| .. | .. |
|---|
| 1 | 1 | /******************************************************************* |
|---|
| 2 | 2 | * This file is part of the Emulex Linux Device Driver for * |
|---|
| 3 | 3 | * Fibre Channel Host Bus Adapters. * |
|---|
| 4 | | - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
|---|
| 4 | + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * |
|---|
| 5 | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
|---|
| 6 | 6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
|---|
| 7 | 7 | * EMULEX and SLI are trademarks of Emulex. * |
|---|
| .. | .. |
|---|
| 36 | 36 | #include <scsi/scsi_transport_fc.h> |
|---|
| 37 | 37 | #include <scsi/fc/fc_fs.h> |
|---|
| 38 | 38 | |
|---|
| 39 | | -#include <linux/nvme.h> |
|---|
| 40 | | -#include <linux/nvme-fc-driver.h> |
|---|
| 41 | | -#include <linux/nvme-fc.h> |
|---|
| 42 | 39 | #include "lpfc_version.h" |
|---|
| 43 | 40 | #include "lpfc_hw4.h" |
|---|
| 44 | 41 | #include "lpfc_hw.h" |
|---|
| .. | .. |
|---|
| 56 | 53 | |
|---|
| 57 | 54 | /* NVME initiator-based functions */ |
|---|
| 58 | 55 | |
|---|
| 59 | | -static struct lpfc_nvme_buf * |
|---|
| 56 | +static struct lpfc_io_buf * |
|---|
| 60 | 57 | lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, |
|---|
| 61 | | - int expedite); |
|---|
| 58 | + int idx, int expedite); |
|---|
| 62 | 59 | |
|---|
| 63 | 60 | static void |
|---|
| 64 | | -lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *); |
|---|
| 61 | +lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *); |
|---|
| 65 | 62 | |
|---|
| 66 | 63 | static struct nvme_fc_port_template lpfc_nvme_template; |
|---|
| 67 | 64 | |
|---|
| .. | .. |
|---|
| 196 | 193 | } |
|---|
| 197 | 194 | |
|---|
| 198 | 195 | /** |
|---|
| 196 | + * lpfc_nvme_prep_abort_wqe - set up 'abort' work queue entry. |
|---|
| 197 | + * @pwqeq: Pointer to command iocb. |
|---|
| 198 | + * @xritag: Tag that uniqely identifies the local exchange resource. |
|---|
| 199 | + * @opt: Option bits - |
|---|
| 200 | + * bit 0 = inhibit sending abts on the link |
|---|
| 201 | + * |
|---|
| 202 | + * This function is called with hbalock held. |
|---|
| 203 | + **/ |
|---|
| 204 | +void |
|---|
| 205 | +lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt) |
|---|
| 206 | +{ |
|---|
| 207 | + union lpfc_wqe128 *wqe = &pwqeq->wqe; |
|---|
| 208 | + |
|---|
| 209 | + /* WQEs are reused. Clear stale data and set key fields to |
|---|
| 210 | + * zero like ia, iaab, iaar, xri_tag, and ctxt_tag. |
|---|
| 211 | + */ |
|---|
| 212 | + memset(wqe, 0, sizeof(*wqe)); |
|---|
| 213 | + |
|---|
| 214 | + if (opt & INHIBIT_ABORT) |
|---|
| 215 | + bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); |
|---|
| 216 | + /* Abort specified xri tag, with the mask deliberately zeroed */ |
|---|
| 217 | + bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); |
|---|
| 218 | + |
|---|
| 219 | + bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); |
|---|
| 220 | + |
|---|
| 221 | + /* Abort the IO associated with this outstanding exchange ID. */ |
|---|
| 222 | + wqe->abort_cmd.wqe_com.abort_tag = xritag; |
|---|
| 223 | + |
|---|
| 224 | + /* iotag for the wqe completion. */ |
|---|
| 225 | + bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag); |
|---|
| 226 | + |
|---|
| 227 | + bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); |
|---|
| 228 | + bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); |
|---|
| 229 | + |
|---|
| 230 | + bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND); |
|---|
| 231 | + bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1); |
|---|
| 232 | + bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); |
|---|
| 233 | +} |
|---|
| 234 | + |
|---|
| 235 | +/** |
|---|
| 199 | 236 | * lpfc_nvme_create_queue - |
|---|
| 237 | + * @pnvme_lport: Transport localport that LS is to be issued from |
|---|
| 200 | 238 | * @lpfc_pnvme: Pointer to the driver's nvme instance data |
|---|
| 201 | 239 | * @qidx: An cpu index used to affinitize IO queues and MSIX vectors. |
|---|
| 240 | + * @qsize: Size of the queue in bytes |
|---|
| 202 | 241 | * @handle: An opaque driver handle used in follow-up calls. |
|---|
| 203 | 242 | * |
|---|
| 204 | 243 | * Driver registers this routine to preallocate and initialize any |
|---|
| .. | .. |
|---|
| 229 | 268 | if (qhandle == NULL) |
|---|
| 230 | 269 | return -ENOMEM; |
|---|
| 231 | 270 | |
|---|
| 232 | | - qhandle->cpu_id = smp_processor_id(); |
|---|
| 271 | + qhandle->cpu_id = raw_smp_processor_id(); |
|---|
| 233 | 272 | qhandle->qidx = qidx; |
|---|
| 234 | 273 | /* |
|---|
| 235 | 274 | * NVME qidx == 0 is the admin queue, so both admin queue |
|---|
| .. | .. |
|---|
| 239 | 278 | if (qidx) { |
|---|
| 240 | 279 | str = "IO "; /* IO queue */ |
|---|
| 241 | 280 | qhandle->index = ((qidx - 1) % |
|---|
| 242 | | - vport->phba->cfg_nvme_io_channel); |
|---|
| 281 | + lpfc_nvme_template.max_hw_queues); |
|---|
| 243 | 282 | } else { |
|---|
| 244 | 283 | str = "ADM"; /* Admin queue */ |
|---|
| 245 | 284 | qhandle->index = qidx; |
|---|
| .. | .. |
|---|
| 247 | 286 | |
|---|
| 248 | 287 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, |
|---|
| 249 | 288 | "6073 Binding %s HdwQueue %d (cpu %d) to " |
|---|
| 250 | | - "io_channel %d qhandle %p\n", str, |
|---|
| 289 | + "hdw_queue %d qhandle x%px\n", str, |
|---|
| 251 | 290 | qidx, qhandle->cpu_id, qhandle->index, qhandle); |
|---|
| 252 | 291 | *handle = (void *)qhandle; |
|---|
| 253 | 292 | return 0; |
|---|
| .. | .. |
|---|
| 255 | 294 | |
|---|
| 256 | 295 | /** |
|---|
| 257 | 296 | * lpfc_nvme_delete_queue - |
|---|
| 258 | | - * @lpfc_pnvme: Pointer to the driver's nvme instance data |
|---|
| 297 | + * @pnvme_lport: Transport localport that LS is to be issued from |
|---|
| 259 | 298 | * @qidx: An cpu index used to affinitize IO queues and MSIX vectors. |
|---|
| 260 | 299 | * @handle: An opaque driver handle from lpfc_nvme_create_queue |
|---|
| 261 | 300 | * |
|---|
| .. | .. |
|---|
| 282 | 321 | vport = lport->vport; |
|---|
| 283 | 322 | |
|---|
| 284 | 323 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, |
|---|
| 285 | | - "6001 ENTER. lpfc_pnvme %p, qidx x%x qhandle %p\n", |
|---|
| 324 | + "6001 ENTER. lpfc_pnvme x%px, qidx x%x qhandle x%px\n", |
|---|
| 286 | 325 | lport, qidx, handle); |
|---|
| 287 | 326 | kfree(handle); |
|---|
| 288 | 327 | } |
|---|
| .. | .. |
|---|
| 293 | 332 | struct lpfc_nvme_lport *lport = localport->private; |
|---|
| 294 | 333 | |
|---|
| 295 | 334 | lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME, |
|---|
| 296 | | - "6173 localport %p delete complete\n", |
|---|
| 335 | + "6173 localport x%px delete complete\n", |
|---|
| 297 | 336 | lport); |
|---|
| 298 | 337 | |
|---|
| 299 | 338 | /* release any threads waiting for the unreg to complete */ |
|---|
| .. | .. |
|---|
| 312 | 351 | * Return value : |
|---|
| 313 | 352 | * None |
|---|
| 314 | 353 | */ |
|---|
| 315 | | -void |
|---|
| 354 | +static void |
|---|
| 316 | 355 | lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport) |
|---|
| 317 | 356 | { |
|---|
| 318 | 357 | struct lpfc_nvme_rport *rport = remoteport->private; |
|---|
| .. | .. |
|---|
| 332 | 371 | * calling state machine to remove the node. |
|---|
| 333 | 372 | */ |
|---|
| 334 | 373 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, |
|---|
| 335 | | - "6146 remoteport delete of remoteport %p\n", |
|---|
| 374 | + "6146 remoteport delete of remoteport x%px\n", |
|---|
| 336 | 375 | remoteport); |
|---|
| 337 | 376 | spin_lock_irq(&vport->phba->hbalock); |
|---|
| 338 | 377 | |
|---|
| .. | .. |
|---|
| 356 | 395 | return; |
|---|
| 357 | 396 | } |
|---|
| 358 | 397 | |
|---|
| 398 | +/** |
|---|
| 399 | + * lpfc_nvme_handle_lsreq - Process an unsolicited NVME LS request |
|---|
| 400 | + * @phba: pointer to lpfc hba data structure. |
|---|
| 401 | + * @axchg: pointer to exchange context for the NVME LS request |
|---|
| 402 | + * |
|---|
| 403 | + * This routine is used for processing an asychronously received NVME LS |
|---|
| 404 | + * request. Any remaining validation is done and the LS is then forwarded |
|---|
| 405 | + * to the nvme-fc transport via nvme_fc_rcv_ls_req(). |
|---|
| 406 | + * |
|---|
| 407 | + * The calling sequence should be: nvme_fc_rcv_ls_req() -> (processing) |
|---|
| 408 | + * -> lpfc_nvme_xmt_ls_rsp/cmp -> req->done. |
|---|
| 409 | + * __lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg. |
|---|
| 410 | + * |
|---|
| 411 | + * Returns 0 if LS was handled and delivered to the transport |
|---|
| 412 | + * Returns 1 if LS failed to be handled and should be dropped |
|---|
| 413 | + */ |
|---|
| 414 | +int |
|---|
| 415 | +lpfc_nvme_handle_lsreq(struct lpfc_hba *phba, |
|---|
| 416 | + struct lpfc_async_xchg_ctx *axchg) |
|---|
| 417 | +{ |
|---|
| 418 | +#if (IS_ENABLED(CONFIG_NVME_FC)) |
|---|
| 419 | + struct lpfc_vport *vport; |
|---|
| 420 | + struct lpfc_nvme_rport *lpfc_rport; |
|---|
| 421 | + struct nvme_fc_remote_port *remoteport; |
|---|
| 422 | + struct lpfc_nvme_lport *lport; |
|---|
| 423 | + uint32_t *payload = axchg->payload; |
|---|
| 424 | + int rc; |
|---|
| 425 | + |
|---|
| 426 | + vport = axchg->ndlp->vport; |
|---|
| 427 | + lpfc_rport = axchg->ndlp->nrport; |
|---|
| 428 | + if (!lpfc_rport) |
|---|
| 429 | + return -EINVAL; |
|---|
| 430 | + |
|---|
| 431 | + remoteport = lpfc_rport->remoteport; |
|---|
| 432 | + if (!vport->localport) |
|---|
| 433 | + return -EINVAL; |
|---|
| 434 | + |
|---|
| 435 | + lport = vport->localport->private; |
|---|
| 436 | + if (!lport) |
|---|
| 437 | + return -EINVAL; |
|---|
| 438 | + |
|---|
| 439 | + rc = nvme_fc_rcv_ls_req(remoteport, &axchg->ls_rsp, axchg->payload, |
|---|
| 440 | + axchg->size); |
|---|
| 441 | + |
|---|
| 442 | + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, |
|---|
| 443 | + "6205 NVME Unsol rcv: sz %d rc %d: %08x %08x %08x " |
|---|
| 444 | + "%08x %08x %08x\n", |
|---|
| 445 | + axchg->size, rc, |
|---|
| 446 | + *payload, *(payload+1), *(payload+2), |
|---|
| 447 | + *(payload+3), *(payload+4), *(payload+5)); |
|---|
| 448 | + |
|---|
| 449 | + if (!rc) |
|---|
| 450 | + return 0; |
|---|
| 451 | +#endif |
|---|
| 452 | + return 1; |
|---|
| 453 | +} |
|---|
| 454 | + |
|---|
| 455 | +/** |
|---|
| 456 | + * __lpfc_nvme_ls_req_cmp - Generic completion handler for a NVME |
|---|
| 457 | + * LS request. |
|---|
| 458 | + * @phba: Pointer to HBA context object |
|---|
| 459 | + * @vport: The local port that issued the LS |
|---|
| 460 | + * @cmdwqe: Pointer to driver command WQE object. |
|---|
| 461 | + * @wcqe: Pointer to driver response CQE object. |
|---|
| 462 | + * |
|---|
| 463 | + * This function is the generic completion handler for NVME LS requests. |
|---|
| 464 | + * The function updates any states and statistics, calls the transport |
|---|
| 465 | + * ls_req done() routine, then tears down the command and buffers used |
|---|
| 466 | + * for the LS request. |
|---|
| 467 | + **/ |
|---|
| 468 | +void |
|---|
| 469 | +__lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport, |
|---|
| 470 | + struct lpfc_iocbq *cmdwqe, |
|---|
| 471 | + struct lpfc_wcqe_complete *wcqe) |
|---|
| 472 | +{ |
|---|
| 473 | + struct nvmefc_ls_req *pnvme_lsreq; |
|---|
| 474 | + struct lpfc_dmabuf *buf_ptr; |
|---|
| 475 | + struct lpfc_nodelist *ndlp; |
|---|
| 476 | + uint32_t status; |
|---|
| 477 | + |
|---|
| 478 | + pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2; |
|---|
| 479 | + ndlp = (struct lpfc_nodelist *)cmdwqe->context1; |
|---|
| 480 | + status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; |
|---|
| 481 | + |
|---|
| 482 | + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, |
|---|
| 483 | + "6047 NVMEx LS REQ %px cmpl DID %x Xri: %x " |
|---|
| 484 | + "status %x reason x%x cmd:x%px lsreg:x%px bmp:x%px " |
|---|
| 485 | + "ndlp:x%px\n", |
|---|
| 486 | + pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, |
|---|
| 487 | + cmdwqe->sli4_xritag, status, |
|---|
| 488 | + (wcqe->parameter & 0xffff), |
|---|
| 489 | + cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp); |
|---|
| 490 | + |
|---|
| 491 | + lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n", |
|---|
| 492 | + cmdwqe->sli4_xritag, status, wcqe->parameter); |
|---|
| 493 | + |
|---|
| 494 | + if (cmdwqe->context3) { |
|---|
| 495 | + buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3; |
|---|
| 496 | + lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); |
|---|
| 497 | + kfree(buf_ptr); |
|---|
| 498 | + cmdwqe->context3 = NULL; |
|---|
| 499 | + } |
|---|
| 500 | + if (pnvme_lsreq->done) |
|---|
| 501 | + pnvme_lsreq->done(pnvme_lsreq, status); |
|---|
| 502 | + else |
|---|
| 503 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 504 | + "6046 NVMEx cmpl without done call back? " |
|---|
| 505 | + "Data %px DID %x Xri: %x status %x\n", |
|---|
| 506 | + pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, |
|---|
| 507 | + cmdwqe->sli4_xritag, status); |
|---|
| 508 | + if (ndlp) { |
|---|
| 509 | + lpfc_nlp_put(ndlp); |
|---|
| 510 | + cmdwqe->context1 = NULL; |
|---|
| 511 | + } |
|---|
| 512 | + lpfc_sli_release_iocbq(phba, cmdwqe); |
|---|
| 513 | +} |
|---|
| 514 | + |
|---|
| 359 | 515 | static void |
|---|
| 360 | | -lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, |
|---|
| 516 | +lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, |
|---|
| 361 | 517 | struct lpfc_wcqe_complete *wcqe) |
|---|
| 362 | 518 | { |
|---|
| 363 | 519 | struct lpfc_vport *vport = cmdwqe->vport; |
|---|
| 364 | 520 | struct lpfc_nvme_lport *lport; |
|---|
| 365 | 521 | uint32_t status; |
|---|
| 366 | | - struct nvmefc_ls_req *pnvme_lsreq; |
|---|
| 367 | | - struct lpfc_dmabuf *buf_ptr; |
|---|
| 368 | | - struct lpfc_nodelist *ndlp; |
|---|
| 369 | 522 | |
|---|
| 370 | | - pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2; |
|---|
| 371 | 523 | status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; |
|---|
| 372 | 524 | |
|---|
| 373 | 525 | if (vport->localport) { |
|---|
| .. | .. |
|---|
| 382 | 534 | } |
|---|
| 383 | 535 | } |
|---|
| 384 | 536 | |
|---|
| 385 | | - ndlp = (struct lpfc_nodelist *)cmdwqe->context1; |
|---|
| 386 | | - lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, |
|---|
| 387 | | - "6047 nvme cmpl Enter " |
|---|
| 388 | | - "Data %p DID %x Xri: %x status %x reason x%x cmd:%p " |
|---|
| 389 | | - "lsreg:%p bmp:%p ndlp:%p\n", |
|---|
| 390 | | - pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, |
|---|
| 391 | | - cmdwqe->sli4_xritag, status, |
|---|
| 392 | | - (wcqe->parameter & 0xffff), |
|---|
| 393 | | - cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp); |
|---|
| 394 | | - |
|---|
| 395 | | - lpfc_nvmeio_data(phba, "NVME LS CMPL: xri x%x stat x%x parm x%x\n", |
|---|
| 396 | | - cmdwqe->sli4_xritag, status, wcqe->parameter); |
|---|
| 397 | | - |
|---|
| 398 | | - if (cmdwqe->context3) { |
|---|
| 399 | | - buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3; |
|---|
| 400 | | - lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); |
|---|
| 401 | | - kfree(buf_ptr); |
|---|
| 402 | | - cmdwqe->context3 = NULL; |
|---|
| 403 | | - } |
|---|
| 404 | | - if (pnvme_lsreq->done) |
|---|
| 405 | | - pnvme_lsreq->done(pnvme_lsreq, status); |
|---|
| 406 | | - else |
|---|
| 407 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, |
|---|
| 408 | | - "6046 nvme cmpl without done call back? " |
|---|
| 409 | | - "Data %p DID %x Xri: %x status %x\n", |
|---|
| 410 | | - pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, |
|---|
| 411 | | - cmdwqe->sli4_xritag, status); |
|---|
| 412 | | - if (ndlp) { |
|---|
| 413 | | - lpfc_nlp_put(ndlp); |
|---|
| 414 | | - cmdwqe->context1 = NULL; |
|---|
| 415 | | - } |
|---|
| 416 | | - lpfc_sli_release_iocbq(phba, cmdwqe); |
|---|
| 537 | + __lpfc_nvme_ls_req_cmp(phba, vport, cmdwqe, wcqe); |
|---|
| 417 | 538 | } |
|---|
| 418 | 539 | |
|---|
| 419 | 540 | static int |
|---|
| .. | .. |
|---|
| 438 | 559 | return 1; |
|---|
| 439 | 560 | |
|---|
| 440 | 561 | wqe = &genwqe->wqe; |
|---|
| 562 | + /* Initialize only 64 bytes */ |
|---|
| 441 | 563 | memset(wqe, 0, sizeof(union lpfc_wqe)); |
|---|
| 442 | 564 | |
|---|
| 443 | 565 | genwqe->context3 = (uint8_t *)bmp; |
|---|
| .. | .. |
|---|
| 516 | 638 | |
|---|
| 517 | 639 | |
|---|
| 518 | 640 | /* Issue GEN REQ WQE for NPORT <did> */ |
|---|
| 519 | | - lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, |
|---|
| 520 | | - "6050 Issue GEN REQ WQE to NPORT x%x " |
|---|
| 521 | | - "Data: x%x x%x wq:%p lsreq:%p bmp:%p xmit:%d 1st:%d\n", |
|---|
| 522 | | - ndlp->nlp_DID, genwqe->iotag, |
|---|
| 523 | | - vport->port_state, |
|---|
| 524 | | - genwqe, pnvme_lsreq, bmp, xmit_len, first_len); |
|---|
| 525 | 641 | genwqe->wqe_cmpl = cmpl; |
|---|
| 526 | 642 | genwqe->iocb_cmpl = NULL; |
|---|
| 527 | 643 | genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT; |
|---|
| .. | .. |
|---|
| 531 | 647 | lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n", |
|---|
| 532 | 648 | genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID); |
|---|
| 533 | 649 | |
|---|
| 534 | | - rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, genwqe); |
|---|
| 650 | + rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe); |
|---|
| 535 | 651 | if (rc) { |
|---|
| 536 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
|---|
| 652 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 537 | 653 | "6045 Issue GEN REQ WQE to NPORT x%x " |
|---|
| 538 | | - "Data: x%x x%x\n", |
|---|
| 654 | + "Data: x%x x%x rc x%x\n", |
|---|
| 539 | 655 | ndlp->nlp_DID, genwqe->iotag, |
|---|
| 540 | | - vport->port_state); |
|---|
| 656 | + vport->port_state, rc); |
|---|
| 541 | 657 | lpfc_sli_release_iocbq(phba, genwqe); |
|---|
| 542 | 658 | return 1; |
|---|
| 543 | 659 | } |
|---|
| 660 | + |
|---|
| 661 | + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_ELS, |
|---|
| 662 | + "6050 Issue GEN REQ WQE to NPORT x%x " |
|---|
| 663 | + "Data: oxid: x%x state: x%x wq:x%px lsreq:x%px " |
|---|
| 664 | + "bmp:x%px xmit:%d 1st:%d\n", |
|---|
| 665 | + ndlp->nlp_DID, genwqe->sli4_xritag, |
|---|
| 666 | + vport->port_state, |
|---|
| 667 | + genwqe, pnvme_lsreq, bmp, xmit_len, first_len); |
|---|
| 544 | 668 | return 0; |
|---|
| 545 | 669 | } |
|---|
| 546 | 670 | |
|---|
| 671 | + |
|---|
| 547 | 672 | /** |
|---|
| 548 | | - * lpfc_nvme_ls_req - Issue an Link Service request |
|---|
| 549 | | - * @lpfc_pnvme: Pointer to the driver's nvme instance data |
|---|
| 550 | | - * @lpfc_nvme_lport: Pointer to the driver's local port data |
|---|
| 551 | | - * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq |
|---|
| 673 | + * __lpfc_nvme_ls_req - Generic service routine to issue an NVME LS request |
|---|
| 674 | + * @vport: The local port issuing the LS |
|---|
| 675 | + * @ndlp: The remote port to send the LS to |
|---|
| 676 | + * @pnvme_lsreq: Pointer to LS request structure from the transport |
|---|
| 677 | + * @gen_req_cmp: Completion call-back |
|---|
| 552 | 678 | * |
|---|
| 553 | | - * Driver registers this routine to handle any link service request |
|---|
| 554 | | - * from the nvme_fc transport to a remote nvme-aware port. |
|---|
| 679 | + * Routine validates the ndlp, builds buffers and sends a GEN_REQUEST |
|---|
| 680 | + * WQE to perform the LS operation. |
|---|
| 555 | 681 | * |
|---|
| 556 | 682 | * Return value : |
|---|
| 557 | 683 | * 0 - Success |
|---|
| 558 | | - * TODO: What are the failure codes. |
|---|
| 684 | + * non-zero: various error codes, in form of -Exxx |
|---|
| 559 | 685 | **/ |
|---|
| 560 | | -static int |
|---|
| 561 | | -lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, |
|---|
| 562 | | - struct nvme_fc_remote_port *pnvme_rport, |
|---|
| 563 | | - struct nvmefc_ls_req *pnvme_lsreq) |
|---|
| 686 | +int |
|---|
| 687 | +__lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
|---|
| 688 | + struct nvmefc_ls_req *pnvme_lsreq, |
|---|
| 689 | + void (*gen_req_cmp)(struct lpfc_hba *phba, |
|---|
| 690 | + struct lpfc_iocbq *cmdwqe, |
|---|
| 691 | + struct lpfc_wcqe_complete *wcqe)) |
|---|
| 564 | 692 | { |
|---|
| 565 | | - int ret = 0; |
|---|
| 566 | | - struct lpfc_nvme_lport *lport; |
|---|
| 567 | | - struct lpfc_nvme_rport *rport; |
|---|
| 568 | | - struct lpfc_vport *vport; |
|---|
| 569 | | - struct lpfc_nodelist *ndlp; |
|---|
| 570 | | - struct ulp_bde64 *bpl; |
|---|
| 571 | 693 | struct lpfc_dmabuf *bmp; |
|---|
| 694 | + struct ulp_bde64 *bpl; |
|---|
| 695 | + int ret; |
|---|
| 572 | 696 | uint16_t ntype, nstate; |
|---|
| 573 | 697 | |
|---|
| 574 | | - /* there are two dma buf in the request, actually there is one and |
|---|
| 575 | | - * the second one is just the start address + cmd size. |
|---|
| 576 | | - * Before calling lpfc_nvme_gen_req these buffers need to be wrapped |
|---|
| 577 | | - * in a lpfc_dmabuf struct. When freeing we just free the wrapper |
|---|
| 578 | | - * because the nvem layer owns the data bufs. |
|---|
| 579 | | - * We do not have to break these packets open, we don't care what is in |
|---|
| 580 | | - * them. And we do not have to look at the resonse data, we only care |
|---|
| 581 | | - * that we got a response. All of the caring is going to happen in the |
|---|
| 582 | | - * nvme-fc layer. |
|---|
| 583 | | - */ |
|---|
| 584 | | - |
|---|
| 585 | | - lport = (struct lpfc_nvme_lport *)pnvme_lport->private; |
|---|
| 586 | | - rport = (struct lpfc_nvme_rport *)pnvme_rport->private; |
|---|
| 587 | | - if (unlikely(!lport) || unlikely(!rport)) |
|---|
| 588 | | - return -EINVAL; |
|---|
| 589 | | - |
|---|
| 590 | | - vport = lport->vport; |
|---|
| 591 | | - |
|---|
| 592 | | - if (vport->load_flag & FC_UNLOADING) |
|---|
| 593 | | - return -ENODEV; |
|---|
| 594 | | - |
|---|
| 595 | | - /* Need the ndlp. It is stored in the driver's rport. */ |
|---|
| 596 | | - ndlp = rport->ndlp; |
|---|
| 597 | 698 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { |
|---|
| 598 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, |
|---|
| 599 | | - "6051 Remoteport %p, rport has invalid ndlp. " |
|---|
| 600 | | - "Failing LS Req\n", pnvme_rport); |
|---|
| 699 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 700 | + "6051 NVMEx LS REQ: Bad NDLP x%px, Failing " |
|---|
| 701 | + "LS Req\n", |
|---|
| 702 | + ndlp); |
|---|
| 601 | 703 | return -ENODEV; |
|---|
| 602 | 704 | } |
|---|
| 603 | 705 | |
|---|
| 604 | | - /* The remote node has to be a mapped nvme target or an |
|---|
| 605 | | - * unmapped nvme initiator or it's an error. |
|---|
| 606 | | - */ |
|---|
| 607 | 706 | ntype = ndlp->nlp_type; |
|---|
| 608 | 707 | nstate = ndlp->nlp_state; |
|---|
| 609 | 708 | if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) || |
|---|
| 610 | 709 | (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) { |
|---|
| 611 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, |
|---|
| 612 | | - "6088 DID x%06x not ready for " |
|---|
| 613 | | - "IO. State x%x, Type x%x\n", |
|---|
| 614 | | - pnvme_rport->port_id, |
|---|
| 615 | | - ndlp->nlp_state, ndlp->nlp_type); |
|---|
| 710 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 711 | + "6088 NVMEx LS REQ: Fail DID x%06x not " |
|---|
| 712 | + "ready for IO. Type x%x, State x%x\n", |
|---|
| 713 | + ndlp->nlp_DID, ntype, nstate); |
|---|
| 616 | 714 | return -ENODEV; |
|---|
| 617 | 715 | } |
|---|
| 618 | | - bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); |
|---|
| 619 | | - if (!bmp) { |
|---|
| 620 | 716 | |
|---|
| 621 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, |
|---|
| 622 | | - "6044 Could not find node for DID %x\n", |
|---|
| 623 | | - pnvme_rport->port_id); |
|---|
| 624 | | - return 2; |
|---|
| 717 | + if (!vport->phba->sli4_hba.nvmels_wq) |
|---|
| 718 | + return -ENOMEM; |
|---|
| 719 | + |
|---|
| 720 | + /* |
|---|
| 721 | + * there are two dma buf in the request, actually there is one and |
|---|
| 722 | + * the second one is just the start address + cmd size. |
|---|
| 723 | + * Before calling lpfc_nvme_gen_req these buffers need to be wrapped |
|---|
| 724 | + * in a lpfc_dmabuf struct. When freeing we just free the wrapper |
|---|
| 725 | + * because the nvem layer owns the data bufs. |
|---|
| 726 | + * We do not have to break these packets open, we don't care what is |
|---|
| 727 | + * in them. And we do not have to look at the resonse data, we only |
|---|
| 728 | + * care that we got a response. All of the caring is going to happen |
|---|
| 729 | + * in the nvme-fc layer. |
|---|
| 730 | + */ |
|---|
| 731 | + |
|---|
| 732 | + bmp = kmalloc(sizeof(*bmp), GFP_KERNEL); |
|---|
| 733 | + if (!bmp) { |
|---|
| 734 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 735 | + "6044 NVMEx LS REQ: Could not alloc LS buf " |
|---|
| 736 | + "for DID %x\n", |
|---|
| 737 | + ndlp->nlp_DID); |
|---|
| 738 | + return -ENOMEM; |
|---|
| 625 | 739 | } |
|---|
| 626 | | - INIT_LIST_HEAD(&bmp->list); |
|---|
| 740 | + |
|---|
| 627 | 741 | bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys)); |
|---|
| 628 | 742 | if (!bmp->virt) { |
|---|
| 629 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, |
|---|
| 630 | | - "6042 Could not find node for DID %x\n", |
|---|
| 631 | | - pnvme_rport->port_id); |
|---|
| 743 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 744 | + "6042 NVMEx LS REQ: Could not alloc mbuf " |
|---|
| 745 | + "for DID %x\n", |
|---|
| 746 | + ndlp->nlp_DID); |
|---|
| 632 | 747 | kfree(bmp); |
|---|
| 633 | | - return 3; |
|---|
| 748 | + return -ENOMEM; |
|---|
| 634 | 749 | } |
|---|
| 750 | + |
|---|
| 751 | + INIT_LIST_HEAD(&bmp->list); |
|---|
| 752 | + |
|---|
| 635 | 753 | bpl = (struct ulp_bde64 *)bmp->virt; |
|---|
| 636 | 754 | bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma)); |
|---|
| 637 | 755 | bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma)); |
|---|
| .. | .. |
|---|
| 646 | 764 | bpl->tus.f.bdeSize = pnvme_lsreq->rsplen; |
|---|
| 647 | 765 | bpl->tus.w = le32_to_cpu(bpl->tus.w); |
|---|
| 648 | 766 | |
|---|
| 649 | | - /* Expand print to include key fields. */ |
|---|
| 650 | 767 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, |
|---|
| 651 | | - "6149 Issue LS Req to DID 0x%06x lport %p, rport %p " |
|---|
| 652 | | - "lsreq%p rqstlen:%d rsplen:%d %pad %pad\n", |
|---|
| 653 | | - ndlp->nlp_DID, |
|---|
| 654 | | - pnvme_lport, pnvme_rport, |
|---|
| 655 | | - pnvme_lsreq, pnvme_lsreq->rqstlen, |
|---|
| 656 | | - pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, |
|---|
| 657 | | - &pnvme_lsreq->rspdma); |
|---|
| 768 | + "6149 NVMEx LS REQ: Issue to DID 0x%06x lsreq x%px, " |
|---|
| 769 | + "rqstlen:%d rsplen:%d %pad %pad\n", |
|---|
| 770 | + ndlp->nlp_DID, pnvme_lsreq, pnvme_lsreq->rqstlen, |
|---|
| 771 | + pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, |
|---|
| 772 | + &pnvme_lsreq->rspdma); |
|---|
| 658 | 773 | |
|---|
| 659 | | - atomic_inc(&lport->fc4NvmeLsRequests); |
|---|
| 660 | | - |
|---|
| 661 | | - /* Hardcode the wait to 30 seconds. Connections are failing otherwise. |
|---|
| 662 | | - * This code allows it all to work. |
|---|
| 663 | | - */ |
|---|
| 664 | 774 | ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr, |
|---|
| 665 | | - pnvme_lsreq, lpfc_nvme_cmpl_gen_req, |
|---|
| 666 | | - ndlp, 2, 30, 0); |
|---|
| 775 | + pnvme_lsreq, gen_req_cmp, ndlp, 2, |
|---|
| 776 | + LPFC_NVME_LS_TIMEOUT, 0); |
|---|
| 667 | 777 | if (ret != WQE_SUCCESS) { |
|---|
| 668 | | - atomic_inc(&lport->xmt_ls_err); |
|---|
| 669 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, |
|---|
| 670 | | - "6052 EXIT. issue ls wqe failed lport %p, " |
|---|
| 671 | | - "rport %p lsreq%p Status %x DID %x\n", |
|---|
| 672 | | - pnvme_lport, pnvme_rport, pnvme_lsreq, |
|---|
| 673 | | - ret, ndlp->nlp_DID); |
|---|
| 778 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 779 | + "6052 NVMEx REQ: EXIT. issue ls wqe failed " |
|---|
| 780 | + "lsreq x%px Status %x DID %x\n", |
|---|
| 781 | + pnvme_lsreq, ret, ndlp->nlp_DID); |
|---|
| 674 | 782 | lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys); |
|---|
| 675 | 783 | kfree(bmp); |
|---|
| 676 | | - return ret; |
|---|
| 784 | + return -EIO; |
|---|
| 677 | 785 | } |
|---|
| 678 | 786 | |
|---|
| 679 | | - /* Stub in routine and return 0 for now. */ |
|---|
| 680 | | - return ret; |
|---|
| 787 | + return 0; |
|---|
| 681 | 788 | } |
|---|
| 682 | 789 | |
|---|
| 683 | 790 | /** |
|---|
| 684 | | - * lpfc_nvme_ls_abort - Issue an Link Service request |
|---|
| 685 | | - * @lpfc_pnvme: Pointer to the driver's nvme instance data |
|---|
| 686 | | - * @lpfc_nvme_lport: Pointer to the driver's local port data |
|---|
| 687 | | - * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq |
|---|
| 791 | + * lpfc_nvme_ls_req - Issue an NVME Link Service request |
|---|
| 792 | + * @pnvme_lport: Transport localport that LS is to be issued from. |
|---|
| 793 | + * @nvme_rport: Transport remoteport that LS is to be sent to. |
|---|
| 794 | + * @pnvme_lsreq: the transport nvme_ls_req structure for the LS |
|---|
| 688 | 795 | * |
|---|
| 689 | 796 | * Driver registers this routine to handle any link service request |
|---|
| 690 | 797 | * from the nvme_fc transport to a remote nvme-aware port. |
|---|
| 691 | 798 | * |
|---|
| 692 | 799 | * Return value : |
|---|
| 693 | 800 | * 0 - Success |
|---|
| 694 | | - * TODO: What are the failure codes. |
|---|
| 801 | + * non-zero: various error codes, in form of -Exxx |
|---|
| 802 | + **/ |
|---|
| 803 | +static int |
|---|
| 804 | +lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, |
|---|
| 805 | + struct nvme_fc_remote_port *pnvme_rport, |
|---|
| 806 | + struct nvmefc_ls_req *pnvme_lsreq) |
|---|
| 807 | +{ |
|---|
| 808 | + struct lpfc_nvme_lport *lport; |
|---|
| 809 | + struct lpfc_nvme_rport *rport; |
|---|
| 810 | + struct lpfc_vport *vport; |
|---|
| 811 | + int ret; |
|---|
| 812 | + |
|---|
| 813 | + lport = (struct lpfc_nvme_lport *)pnvme_lport->private; |
|---|
| 814 | + rport = (struct lpfc_nvme_rport *)pnvme_rport->private; |
|---|
| 815 | + if (unlikely(!lport) || unlikely(!rport)) |
|---|
| 816 | + return -EINVAL; |
|---|
| 817 | + |
|---|
| 818 | + vport = lport->vport; |
|---|
| 819 | + if (vport->load_flag & FC_UNLOADING) |
|---|
| 820 | + return -ENODEV; |
|---|
| 821 | + |
|---|
| 822 | + atomic_inc(&lport->fc4NvmeLsRequests); |
|---|
| 823 | + |
|---|
| 824 | + ret = __lpfc_nvme_ls_req(vport, rport->ndlp, pnvme_lsreq, |
|---|
| 825 | + lpfc_nvme_ls_req_cmp); |
|---|
| 826 | + if (ret) |
|---|
| 827 | + atomic_inc(&lport->xmt_ls_err); |
|---|
| 828 | + |
|---|
| 829 | + return ret; |
|---|
| 830 | +} |
|---|
| 831 | + |
|---|
| 832 | +/** |
|---|
| 833 | + * __lpfc_nvme_ls_abort - Generic service routine to abort a prior |
|---|
| 834 | + * NVME LS request |
|---|
| 835 | + * @vport: The local port that issued the LS |
|---|
| 836 | + * @ndlp: The remote port the LS was sent to |
|---|
| 837 | + * @pnvme_lsreq: Pointer to LS request structure from the transport |
|---|
| 838 | + * |
|---|
| 839 | + * The driver validates the ndlp, looks for the LS, and aborts the |
|---|
| 840 | + * LS if found. |
|---|
| 841 | + * |
|---|
| 842 | + * Returns: |
|---|
| 843 | + * 0 : if LS found and aborted |
|---|
| 844 | + * non-zero: various error conditions in form -Exxx |
|---|
| 845 | + **/ |
|---|
| 846 | +int |
|---|
| 847 | +__lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
|---|
| 848 | + struct nvmefc_ls_req *pnvme_lsreq) |
|---|
| 849 | +{ |
|---|
| 850 | + struct lpfc_hba *phba = vport->phba; |
|---|
| 851 | + struct lpfc_sli_ring *pring; |
|---|
| 852 | + struct lpfc_iocbq *wqe, *next_wqe; |
|---|
| 853 | + bool foundit = false; |
|---|
| 854 | + |
|---|
| 855 | + if (!ndlp) { |
|---|
| 856 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 857 | + "6049 NVMEx LS REQ Abort: Bad NDLP x%px DID " |
|---|
| 858 | + "x%06x, Failing LS Req\n", |
|---|
| 859 | + ndlp, ndlp ? ndlp->nlp_DID : 0); |
|---|
| 860 | + return -EINVAL; |
|---|
| 861 | + } |
|---|
| 862 | + |
|---|
| 863 | + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS, |
|---|
| 864 | + "6040 NVMEx LS REQ Abort: Issue LS_ABORT for lsreq " |
|---|
| 865 | + "x%p rqstlen:%d rsplen:%d %pad %pad\n", |
|---|
| 866 | + pnvme_lsreq, pnvme_lsreq->rqstlen, |
|---|
| 867 | + pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, |
|---|
| 868 | + &pnvme_lsreq->rspdma); |
|---|
| 869 | + |
|---|
| 870 | + /* |
|---|
| 871 | + * Lock the ELS ring txcmplq and look for the wqe that matches |
|---|
| 872 | + * this ELS. If found, issue an abort on the wqe. |
|---|
| 873 | + */ |
|---|
| 874 | + pring = phba->sli4_hba.nvmels_wq->pring; |
|---|
| 875 | + spin_lock_irq(&phba->hbalock); |
|---|
| 876 | + spin_lock(&pring->ring_lock); |
|---|
| 877 | + list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) { |
|---|
| 878 | + if (wqe->context2 == pnvme_lsreq) { |
|---|
| 879 | + wqe->iocb_flag |= LPFC_DRIVER_ABORTED; |
|---|
| 880 | + foundit = true; |
|---|
| 881 | + break; |
|---|
| 882 | + } |
|---|
| 883 | + } |
|---|
| 884 | + spin_unlock(&pring->ring_lock); |
|---|
| 885 | + |
|---|
| 886 | + if (foundit) |
|---|
| 887 | + lpfc_sli_issue_abort_iotag(phba, pring, wqe); |
|---|
| 888 | + spin_unlock_irq(&phba->hbalock); |
|---|
| 889 | + |
|---|
| 890 | + if (foundit) |
|---|
| 891 | + return 0; |
|---|
| 892 | + |
|---|
| 893 | + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS, |
|---|
| 894 | + "6213 NVMEx LS REQ Abort: Unable to locate req x%p\n", |
|---|
| 895 | + pnvme_lsreq); |
|---|
| 896 | + return -EINVAL; |
|---|
| 897 | +} |
|---|
| 898 | + |
|---|
| 899 | +static int |
|---|
| 900 | +lpfc_nvme_xmt_ls_rsp(struct nvme_fc_local_port *localport, |
|---|
| 901 | + struct nvme_fc_remote_port *remoteport, |
|---|
| 902 | + struct nvmefc_ls_rsp *ls_rsp) |
|---|
| 903 | +{ |
|---|
| 904 | + struct lpfc_async_xchg_ctx *axchg = |
|---|
| 905 | + container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp); |
|---|
| 906 | + struct lpfc_nvme_lport *lport; |
|---|
| 907 | + int rc; |
|---|
| 908 | + |
|---|
| 909 | + if (axchg->phba->pport->load_flag & FC_UNLOADING) |
|---|
| 910 | + return -ENODEV; |
|---|
| 911 | + |
|---|
| 912 | + lport = (struct lpfc_nvme_lport *)localport->private; |
|---|
| 913 | + |
|---|
| 914 | + rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, __lpfc_nvme_xmt_ls_rsp_cmp); |
|---|
| 915 | + |
|---|
| 916 | + if (rc) { |
|---|
| 917 | + /* |
|---|
| 918 | + * unless the failure is due to having already sent |
|---|
| 919 | + * the response, an abort will be generated for the |
|---|
| 920 | + * exchange if the rsp can't be sent. |
|---|
| 921 | + */ |
|---|
| 922 | + if (rc != -EALREADY) |
|---|
| 923 | + atomic_inc(&lport->xmt_ls_abort); |
|---|
| 924 | + return rc; |
|---|
| 925 | + } |
|---|
| 926 | + |
|---|
| 927 | + return 0; |
|---|
| 928 | +} |
|---|
| 929 | + |
|---|
| 930 | +/** |
|---|
| 931 | + * lpfc_nvme_ls_abort - Abort a prior NVME LS request |
|---|
| 932 | + * @pnvme_lport: Transport localport that LS is to be issued from. |
|---|
| 933 | + * @pnvme_rport: Transport remoteport that LS is to be sent to. |
|---|
| 934 | + * @pnvme_lsreq: the transport nvme_ls_req structure for the LS |
|---|
| 935 | + * |
|---|
| 936 | + * Driver registers this routine to abort a NVME LS request that is |
|---|
| 937 | + * in progress (from the transports perspective). |
|---|
| 695 | 938 | **/ |
|---|
| 696 | 939 | static void |
|---|
| 697 | 940 | lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport, |
|---|
| .. | .. |
|---|
| 702 | 945 | struct lpfc_vport *vport; |
|---|
| 703 | 946 | struct lpfc_hba *phba; |
|---|
| 704 | 947 | struct lpfc_nodelist *ndlp; |
|---|
| 705 | | - LIST_HEAD(abort_list); |
|---|
| 706 | | - struct lpfc_sli_ring *pring; |
|---|
| 707 | | - struct lpfc_iocbq *wqe, *next_wqe; |
|---|
| 948 | + int ret; |
|---|
| 708 | 949 | |
|---|
| 709 | 950 | lport = (struct lpfc_nvme_lport *)pnvme_lport->private; |
|---|
| 710 | 951 | if (unlikely(!lport)) |
|---|
| .. | .. |
|---|
| 716 | 957 | return; |
|---|
| 717 | 958 | |
|---|
| 718 | 959 | ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); |
|---|
| 719 | | - if (!ndlp) { |
|---|
| 720 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
|---|
| 721 | | - "6049 Could not find node for DID %x\n", |
|---|
| 722 | | - pnvme_rport->port_id); |
|---|
| 723 | | - return; |
|---|
| 724 | | - } |
|---|
| 725 | 960 | |
|---|
| 726 | | - /* Expand print to include key fields. */ |
|---|
| 727 | | - lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, |
|---|
| 728 | | - "6040 ENTER. lport %p, rport %p lsreq %p rqstlen:%d " |
|---|
| 729 | | - "rsplen:%d %pad %pad\n", |
|---|
| 730 | | - pnvme_lport, pnvme_rport, |
|---|
| 731 | | - pnvme_lsreq, pnvme_lsreq->rqstlen, |
|---|
| 732 | | - pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, |
|---|
| 733 | | - &pnvme_lsreq->rspdma); |
|---|
| 734 | | - |
|---|
| 735 | | - /* |
|---|
| 736 | | - * Lock the ELS ring txcmplq and build a local list of all ELS IOs |
|---|
| 737 | | - * that need an ABTS. The IOs need to stay on the txcmplq so that |
|---|
| 738 | | - * the abort operation completes them successfully. |
|---|
| 739 | | - */ |
|---|
| 740 | | - pring = phba->sli4_hba.nvmels_wq->pring; |
|---|
| 741 | | - spin_lock_irq(&phba->hbalock); |
|---|
| 742 | | - spin_lock(&pring->ring_lock); |
|---|
| 743 | | - list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) { |
|---|
| 744 | | - /* Add to abort_list on on NDLP match. */ |
|---|
| 745 | | - if (lpfc_check_sli_ndlp(phba, pring, wqe, ndlp)) { |
|---|
| 746 | | - wqe->iocb_flag |= LPFC_DRIVER_ABORTED; |
|---|
| 747 | | - list_add_tail(&wqe->dlist, &abort_list); |
|---|
| 748 | | - } |
|---|
| 749 | | - } |
|---|
| 750 | | - spin_unlock(&pring->ring_lock); |
|---|
| 751 | | - spin_unlock_irq(&phba->hbalock); |
|---|
| 752 | | - |
|---|
| 753 | | - /* Abort the targeted IOs and remove them from the abort list. */ |
|---|
| 754 | | - list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) { |
|---|
| 961 | + ret = __lpfc_nvme_ls_abort(vport, ndlp, pnvme_lsreq); |
|---|
| 962 | + if (!ret) |
|---|
| 755 | 963 | atomic_inc(&lport->xmt_ls_abort); |
|---|
| 756 | | - spin_lock_irq(&phba->hbalock); |
|---|
| 757 | | - list_del_init(&wqe->dlist); |
|---|
| 758 | | - lpfc_sli_issue_abort_iotag(phba, pring, wqe); |
|---|
| 759 | | - spin_unlock_irq(&phba->hbalock); |
|---|
| 760 | | - } |
|---|
| 761 | 964 | } |
|---|
| 762 | 965 | |
|---|
| 763 | 966 | /* Fix up the existing sgls for NVME IO. */ |
|---|
| 764 | 967 | static inline void |
|---|
| 765 | 968 | lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport, |
|---|
| 766 | | - struct lpfc_nvme_buf *lpfc_ncmd, |
|---|
| 969 | + struct lpfc_io_buf *lpfc_ncmd, |
|---|
| 767 | 970 | struct nvmefc_fcp_req *nCmd) |
|---|
| 768 | 971 | { |
|---|
| 769 | 972 | struct lpfc_hba *phba = vport->phba; |
|---|
| .. | .. |
|---|
| 786 | 989 | * rather than the virtual memory to ease the restore |
|---|
| 787 | 990 | * operation. |
|---|
| 788 | 991 | */ |
|---|
| 789 | | - sgl = lpfc_ncmd->nvme_sgl; |
|---|
| 992 | + sgl = lpfc_ncmd->dma_sgl; |
|---|
| 790 | 993 | sgl->sge_len = cpu_to_le32(nCmd->cmdlen); |
|---|
| 791 | 994 | if (phba->cfg_nvme_embed_cmd) { |
|---|
| 792 | 995 | sgl->addr_hi = 0; |
|---|
| .. | .. |
|---|
| 857 | 1060 | sgl->sge_len = cpu_to_le32(nCmd->rsplen); |
|---|
| 858 | 1061 | } |
|---|
| 859 | 1062 | |
|---|
| 860 | | -#ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
|---|
| 861 | | -static void |
|---|
| 862 | | -lpfc_nvme_ktime(struct lpfc_hba *phba, |
|---|
| 863 | | - struct lpfc_nvme_buf *lpfc_ncmd) |
|---|
| 864 | | -{ |
|---|
| 865 | | - uint64_t seg1, seg2, seg3, seg4; |
|---|
| 866 | | - uint64_t segsum; |
|---|
| 867 | 1063 | |
|---|
| 868 | | - if (!lpfc_ncmd->ts_last_cmd || |
|---|
| 869 | | - !lpfc_ncmd->ts_cmd_start || |
|---|
| 870 | | - !lpfc_ncmd->ts_cmd_wqput || |
|---|
| 871 | | - !lpfc_ncmd->ts_isr_cmpl || |
|---|
| 872 | | - !lpfc_ncmd->ts_data_nvme) |
|---|
| 873 | | - return; |
|---|
| 874 | | - |
|---|
| 875 | | - if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_cmd_start) |
|---|
| 876 | | - return; |
|---|
| 877 | | - if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd) |
|---|
| 878 | | - return; |
|---|
| 879 | | - if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start) |
|---|
| 880 | | - return; |
|---|
| 881 | | - if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput) |
|---|
| 882 | | - return; |
|---|
| 883 | | - if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl) |
|---|
| 884 | | - return; |
|---|
| 885 | | - /* |
|---|
| 886 | | - * Segment 1 - Time from Last FCP command cmpl is handed |
|---|
| 887 | | - * off to NVME Layer to start of next command. |
|---|
| 888 | | - * Segment 2 - Time from Driver receives a IO cmd start |
|---|
| 889 | | - * from NVME Layer to WQ put is done on IO cmd. |
|---|
| 890 | | - * Segment 3 - Time from Driver WQ put is done on IO cmd |
|---|
| 891 | | - * to MSI-X ISR for IO cmpl. |
|---|
| 892 | | - * Segment 4 - Time from MSI-X ISR for IO cmpl to when |
|---|
| 893 | | - * cmpl is handled off to the NVME Layer. |
|---|
| 894 | | - */ |
|---|
| 895 | | - seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd; |
|---|
| 896 | | - if (seg1 > 5000000) /* 5 ms - for sequential IOs only */ |
|---|
| 897 | | - seg1 = 0; |
|---|
| 898 | | - |
|---|
| 899 | | - /* Calculate times relative to start of IO */ |
|---|
| 900 | | - seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start); |
|---|
| 901 | | - segsum = seg2; |
|---|
| 902 | | - seg3 = lpfc_ncmd->ts_isr_cmpl - lpfc_ncmd->ts_cmd_start; |
|---|
| 903 | | - if (segsum > seg3) |
|---|
| 904 | | - return; |
|---|
| 905 | | - seg3 -= segsum; |
|---|
| 906 | | - segsum += seg3; |
|---|
| 907 | | - |
|---|
| 908 | | - seg4 = lpfc_ncmd->ts_data_nvme - lpfc_ncmd->ts_cmd_start; |
|---|
| 909 | | - if (segsum > seg4) |
|---|
| 910 | | - return; |
|---|
| 911 | | - seg4 -= segsum; |
|---|
| 912 | | - |
|---|
| 913 | | - phba->ktime_data_samples++; |
|---|
| 914 | | - phba->ktime_seg1_total += seg1; |
|---|
| 915 | | - if (seg1 < phba->ktime_seg1_min) |
|---|
| 916 | | - phba->ktime_seg1_min = seg1; |
|---|
| 917 | | - else if (seg1 > phba->ktime_seg1_max) |
|---|
| 918 | | - phba->ktime_seg1_max = seg1; |
|---|
| 919 | | - phba->ktime_seg2_total += seg2; |
|---|
| 920 | | - if (seg2 < phba->ktime_seg2_min) |
|---|
| 921 | | - phba->ktime_seg2_min = seg2; |
|---|
| 922 | | - else if (seg2 > phba->ktime_seg2_max) |
|---|
| 923 | | - phba->ktime_seg2_max = seg2; |
|---|
| 924 | | - phba->ktime_seg3_total += seg3; |
|---|
| 925 | | - if (seg3 < phba->ktime_seg3_min) |
|---|
| 926 | | - phba->ktime_seg3_min = seg3; |
|---|
| 927 | | - else if (seg3 > phba->ktime_seg3_max) |
|---|
| 928 | | - phba->ktime_seg3_max = seg3; |
|---|
| 929 | | - phba->ktime_seg4_total += seg4; |
|---|
| 930 | | - if (seg4 < phba->ktime_seg4_min) |
|---|
| 931 | | - phba->ktime_seg4_min = seg4; |
|---|
| 932 | | - else if (seg4 > phba->ktime_seg4_max) |
|---|
| 933 | | - phba->ktime_seg4_max = seg4; |
|---|
| 934 | | - |
|---|
| 935 | | - lpfc_ncmd->ts_last_cmd = 0; |
|---|
| 936 | | - lpfc_ncmd->ts_cmd_start = 0; |
|---|
| 937 | | - lpfc_ncmd->ts_cmd_wqput = 0; |
|---|
| 938 | | - lpfc_ncmd->ts_isr_cmpl = 0; |
|---|
| 939 | | - lpfc_ncmd->ts_data_nvme = 0; |
|---|
| 940 | | -} |
|---|
| 941 | | -#endif |
|---|
| 942 | | - |
|---|
| 943 | | -/** |
|---|
| 1064 | +/* |
|---|
| 944 | 1065 | * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO |
|---|
| 945 | | - * @lpfc_pnvme: Pointer to the driver's nvme instance data |
|---|
| 946 | | - * @lpfc_nvme_lport: Pointer to the driver's local port data |
|---|
| 947 | | - * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq |
|---|
| 948 | 1066 | * |
|---|
| 949 | 1067 | * Driver registers this routine as it io request handler. This |
|---|
| 950 | 1068 | * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq |
|---|
| .. | .. |
|---|
| 958 | 1076 | lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, |
|---|
| 959 | 1077 | struct lpfc_wcqe_complete *wcqe) |
|---|
| 960 | 1078 | { |
|---|
| 961 | | - struct lpfc_nvme_buf *lpfc_ncmd = |
|---|
| 962 | | - (struct lpfc_nvme_buf *)pwqeIn->context1; |
|---|
| 1079 | + struct lpfc_io_buf *lpfc_ncmd = |
|---|
| 1080 | + (struct lpfc_io_buf *)pwqeIn->context1; |
|---|
| 963 | 1081 | struct lpfc_vport *vport = pwqeIn->vport; |
|---|
| 964 | 1082 | struct nvmefc_fcp_req *nCmd; |
|---|
| 965 | 1083 | struct nvme_fc_ersp_iu *ep; |
|---|
| 966 | 1084 | struct nvme_fc_cmd_iu *cp; |
|---|
| 967 | | - struct lpfc_nvme_rport *rport; |
|---|
| 968 | 1085 | struct lpfc_nodelist *ndlp; |
|---|
| 969 | 1086 | struct lpfc_nvme_fcpreq_priv *freqpriv; |
|---|
| 970 | 1087 | struct lpfc_nvme_lport *lport; |
|---|
| 971 | | - struct lpfc_nvme_ctrl_stat *cstat; |
|---|
| 972 | | - unsigned long flags; |
|---|
| 973 | 1088 | uint32_t code, status, idx; |
|---|
| 974 | 1089 | uint16_t cid, sqhd, data; |
|---|
| 975 | 1090 | uint32_t *ptr; |
|---|
| 1091 | +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
|---|
| 1092 | + int cpu; |
|---|
| 1093 | +#endif |
|---|
| 976 | 1094 | |
|---|
| 977 | 1095 | /* Sanity check on return of outstanding command */ |
|---|
| 978 | | - if (!lpfc_ncmd || !lpfc_ncmd->nvmeCmd || !lpfc_ncmd->nrport) { |
|---|
| 979 | | - if (!lpfc_ncmd) { |
|---|
| 980 | | - lpfc_printf_vlog(vport, KERN_ERR, |
|---|
| 981 | | - LOG_NODE | LOG_NVME_IOERR, |
|---|
| 982 | | - "6071 Null lpfc_ncmd pointer. No " |
|---|
| 983 | | - "release, skip completion\n"); |
|---|
| 984 | | - return; |
|---|
| 985 | | - } |
|---|
| 1096 | + if (!lpfc_ncmd) { |
|---|
| 1097 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 1098 | + "6071 Null lpfc_ncmd pointer. No " |
|---|
| 1099 | + "release, skip completion\n"); |
|---|
| 1100 | + return; |
|---|
| 1101 | + } |
|---|
| 986 | 1102 | |
|---|
| 987 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, |
|---|
| 988 | | - "6066 Missing cmpl ptrs: lpfc_ncmd %p, " |
|---|
| 989 | | - "nvmeCmd %p nrport %p\n", |
|---|
| 990 | | - lpfc_ncmd, lpfc_ncmd->nvmeCmd, |
|---|
| 991 | | - lpfc_ncmd->nrport); |
|---|
| 1103 | + /* Guard against abort handler being called at same time */ |
|---|
| 1104 | + spin_lock(&lpfc_ncmd->buf_lock); |
|---|
| 1105 | + |
|---|
| 1106 | + if (!lpfc_ncmd->nvmeCmd) { |
|---|
| 1107 | + spin_unlock(&lpfc_ncmd->buf_lock); |
|---|
| 1108 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 1109 | + "6066 Missing cmpl ptrs: lpfc_ncmd x%px, " |
|---|
| 1110 | + "nvmeCmd x%px\n", |
|---|
| 1111 | + lpfc_ncmd, lpfc_ncmd->nvmeCmd); |
|---|
| 992 | 1112 | |
|---|
| 993 | 1113 | /* Release the lpfc_ncmd regardless of the missing elements. */ |
|---|
| 994 | 1114 | lpfc_release_nvme_buf(phba, lpfc_ncmd); |
|---|
| 995 | 1115 | return; |
|---|
| 996 | 1116 | } |
|---|
| 997 | 1117 | nCmd = lpfc_ncmd->nvmeCmd; |
|---|
| 998 | | - rport = lpfc_ncmd->nrport; |
|---|
| 999 | 1118 | status = bf_get(lpfc_wcqe_c_status, wcqe); |
|---|
| 1000 | 1119 | |
|---|
| 1001 | | - if (vport->localport) { |
|---|
| 1120 | + idx = lpfc_ncmd->cur_iocbq.hba_wqidx; |
|---|
| 1121 | + phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++; |
|---|
| 1122 | + |
|---|
| 1123 | + if (unlikely(status && vport->localport)) { |
|---|
| 1002 | 1124 | lport = (struct lpfc_nvme_lport *)vport->localport->private; |
|---|
| 1003 | 1125 | if (lport) { |
|---|
| 1004 | | - idx = lpfc_ncmd->cur_iocbq.hba_wqidx; |
|---|
| 1005 | | - cstat = &lport->cstat[idx]; |
|---|
| 1006 | | - atomic_inc(&cstat->fc4NvmeIoCmpls); |
|---|
| 1007 | | - if (status) { |
|---|
| 1008 | | - if (bf_get(lpfc_wcqe_c_xb, wcqe)) |
|---|
| 1009 | | - atomic_inc(&lport->cmpl_fcp_xb); |
|---|
| 1010 | | - atomic_inc(&lport->cmpl_fcp_err); |
|---|
| 1011 | | - } |
|---|
| 1126 | + if (bf_get(lpfc_wcqe_c_xb, wcqe)) |
|---|
| 1127 | + atomic_inc(&lport->cmpl_fcp_xb); |
|---|
| 1128 | + atomic_inc(&lport->cmpl_fcp_err); |
|---|
| 1012 | 1129 | } |
|---|
| 1013 | 1130 | } |
|---|
| 1014 | 1131 | |
|---|
| .. | .. |
|---|
| 1019 | 1136 | * Catch race where our node has transitioned, but the |
|---|
| 1020 | 1137 | * transport is still transitioning. |
|---|
| 1021 | 1138 | */ |
|---|
| 1022 | | - ndlp = rport->ndlp; |
|---|
| 1139 | + ndlp = lpfc_ncmd->ndlp; |
|---|
| 1023 | 1140 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { |
|---|
| 1024 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, |
|---|
| 1025 | | - "6061 rport %p, DID x%06x node not ready.\n", |
|---|
| 1026 | | - rport, rport->remoteport->port_id); |
|---|
| 1027 | | - |
|---|
| 1028 | | - ndlp = lpfc_findnode_did(vport, rport->remoteport->port_id); |
|---|
| 1029 | | - if (!ndlp) { |
|---|
| 1030 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, |
|---|
| 1031 | | - "6062 Ignoring NVME cmpl. No ndlp\n"); |
|---|
| 1032 | | - goto out_err; |
|---|
| 1033 | | - } |
|---|
| 1141 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 1142 | + "6062 Ignoring NVME cmpl. No ndlp\n"); |
|---|
| 1143 | + goto out_err; |
|---|
| 1034 | 1144 | } |
|---|
| 1035 | 1145 | |
|---|
| 1036 | 1146 | code = bf_get(lpfc_wcqe_c_code, wcqe); |
|---|
| .. | .. |
|---|
| 1099 | 1209 | /* Sanity check */ |
|---|
| 1100 | 1210 | if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) |
|---|
| 1101 | 1211 | break; |
|---|
| 1102 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, |
|---|
| 1212 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 1103 | 1213 | "6081 NVME Completion Protocol Error: " |
|---|
| 1104 | 1214 | "xri %x status x%x result x%x " |
|---|
| 1105 | 1215 | "placed x%x\n", |
|---|
| .. | .. |
|---|
| 1112 | 1222 | if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED) |
|---|
| 1113 | 1223 | lpfc_printf_vlog(vport, KERN_INFO, |
|---|
| 1114 | 1224 | LOG_NVME_IOERR, |
|---|
| 1115 | | - "6032 Delay Aborted cmd %p " |
|---|
| 1116 | | - "nvme cmd %p, xri x%x, " |
|---|
| 1225 | + "6032 Delay Aborted cmd x%px " |
|---|
| 1226 | + "nvme cmd x%px, xri x%x, " |
|---|
| 1117 | 1227 | "xb %d\n", |
|---|
| 1118 | 1228 | lpfc_ncmd, nCmd, |
|---|
| 1119 | 1229 | lpfc_ncmd->cur_iocbq.sli4_xritag, |
|---|
| 1120 | 1230 | bf_get(lpfc_wcqe_c_xb, wcqe)); |
|---|
| 1231 | + fallthrough; |
|---|
| 1121 | 1232 | default: |
|---|
| 1122 | 1233 | out_err: |
|---|
| 1123 | 1234 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, |
|---|
| 1124 | 1235 | "6072 NVME Completion Error: xri %x " |
|---|
| 1125 | | - "status x%x result x%x placed x%x\n", |
|---|
| 1236 | + "status x%x result x%x [x%x] " |
|---|
| 1237 | + "placed x%x\n", |
|---|
| 1126 | 1238 | lpfc_ncmd->cur_iocbq.sli4_xritag, |
|---|
| 1127 | 1239 | lpfc_ncmd->status, lpfc_ncmd->result, |
|---|
| 1240 | + wcqe->parameter, |
|---|
| 1128 | 1241 | wcqe->total_data_placed); |
|---|
| 1129 | 1242 | nCmd->transferred_length = 0; |
|---|
| 1130 | 1243 | nCmd->rcv_rsplen = 0; |
|---|
| .. | .. |
|---|
| 1145 | 1258 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
|---|
| 1146 | 1259 | if (lpfc_ncmd->ts_cmd_start) { |
|---|
| 1147 | 1260 | lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp; |
|---|
| 1148 | | - lpfc_ncmd->ts_data_nvme = ktime_get_ns(); |
|---|
| 1149 | | - phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme; |
|---|
| 1150 | | - lpfc_nvme_ktime(phba, lpfc_ncmd); |
|---|
| 1261 | + lpfc_ncmd->ts_data_io = ktime_get_ns(); |
|---|
| 1262 | + phba->ktime_last_cmd = lpfc_ncmd->ts_data_io; |
|---|
| 1263 | + lpfc_io_ktime(phba, lpfc_ncmd); |
|---|
| 1151 | 1264 | } |
|---|
| 1152 | | - if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) { |
|---|
| 1153 | | - if (lpfc_ncmd->cpu != smp_processor_id()) |
|---|
| 1154 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, |
|---|
| 1265 | + if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) { |
|---|
| 1266 | + cpu = raw_smp_processor_id(); |
|---|
| 1267 | + this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); |
|---|
| 1268 | + if (lpfc_ncmd->cpu != cpu) |
|---|
| 1269 | + lpfc_printf_vlog(vport, |
|---|
| 1270 | + KERN_INFO, LOG_NVME_IOERR, |
|---|
| 1155 | 1271 | "6701 CPU Check cmpl: " |
|---|
| 1156 | 1272 | "cpu %d expect %d\n", |
|---|
| 1157 | | - smp_processor_id(), lpfc_ncmd->cpu); |
|---|
| 1158 | | - if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT) |
|---|
| 1159 | | - phba->cpucheck_cmpl_io[lpfc_ncmd->cpu]++; |
|---|
| 1273 | + cpu, lpfc_ncmd->cpu); |
|---|
| 1160 | 1274 | } |
|---|
| 1161 | 1275 | #endif |
|---|
| 1162 | 1276 | |
|---|
| .. | .. |
|---|
| 1167 | 1281 | if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) { |
|---|
| 1168 | 1282 | freqpriv = nCmd->private; |
|---|
| 1169 | 1283 | freqpriv->nvme_buf = NULL; |
|---|
| 1170 | | - nCmd->done(nCmd); |
|---|
| 1171 | 1284 | lpfc_ncmd->nvmeCmd = NULL; |
|---|
| 1172 | | - } |
|---|
| 1173 | | - |
|---|
| 1174 | | - spin_lock_irqsave(&phba->hbalock, flags); |
|---|
| 1175 | | - lpfc_ncmd->nrport = NULL; |
|---|
| 1176 | | - spin_unlock_irqrestore(&phba->hbalock, flags); |
|---|
| 1285 | + spin_unlock(&lpfc_ncmd->buf_lock); |
|---|
| 1286 | + nCmd->done(nCmd); |
|---|
| 1287 | + } else |
|---|
| 1288 | + spin_unlock(&lpfc_ncmd->buf_lock); |
|---|
| 1177 | 1289 | |
|---|
| 1178 | 1290 | /* Call release with XB=1 to queue the IO into the abort list. */ |
|---|
| 1179 | 1291 | lpfc_release_nvme_buf(phba, lpfc_ncmd); |
|---|
| .. | .. |
|---|
| 1182 | 1294 | |
|---|
| 1183 | 1295 | /** |
|---|
| 1184 | 1296 | * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO |
|---|
| 1185 | | - * @lpfc_pnvme: Pointer to the driver's nvme instance data |
|---|
| 1186 | | - * @lpfc_nvme_lport: Pointer to the driver's local port data |
|---|
| 1187 | | - * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq |
|---|
| 1188 | | - * @lpfc_nvme_fcreq: IO request from nvme fc to driver. |
|---|
| 1189 | | - * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue |
|---|
| 1297 | + * @vport: pointer to a host virtual N_Port data structure |
|---|
| 1298 | + * @lpfcn_cmd: Pointer to lpfc scsi command |
|---|
| 1299 | + * @pnode: pointer to a node-list data structure |
|---|
| 1300 | + * @cstat: pointer to the control status structure |
|---|
| 1190 | 1301 | * |
|---|
| 1191 | 1302 | * Driver registers this routine as it io request handler. This |
|---|
| 1192 | 1303 | * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq |
|---|
| .. | .. |
|---|
| 1198 | 1309 | **/ |
|---|
| 1199 | 1310 | static int |
|---|
| 1200 | 1311 | lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, |
|---|
| 1201 | | - struct lpfc_nvme_buf *lpfc_ncmd, |
|---|
| 1312 | + struct lpfc_io_buf *lpfc_ncmd, |
|---|
| 1202 | 1313 | struct lpfc_nodelist *pnode, |
|---|
| 1203 | | - struct lpfc_nvme_ctrl_stat *cstat) |
|---|
| 1314 | + struct lpfc_fc4_ctrl_stat *cstat) |
|---|
| 1204 | 1315 | { |
|---|
| 1205 | 1316 | struct lpfc_hba *phba = vport->phba; |
|---|
| 1206 | 1317 | struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; |
|---|
| 1207 | | - struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq); |
|---|
| 1318 | + struct nvme_common_command *sqe; |
|---|
| 1319 | + struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq; |
|---|
| 1208 | 1320 | union lpfc_wqe128 *wqe = &pwqeq->wqe; |
|---|
| 1209 | 1321 | uint32_t req_len; |
|---|
| 1210 | 1322 | |
|---|
| 1211 | | - if (!pnode || !NLP_CHK_NODE_ACT(pnode)) |
|---|
| 1323 | + if (!NLP_CHK_NODE_ACT(pnode)) |
|---|
| 1212 | 1324 | return -EINVAL; |
|---|
| 1213 | 1325 | |
|---|
| 1214 | 1326 | /* |
|---|
| .. | .. |
|---|
| 1238 | 1350 | } else { |
|---|
| 1239 | 1351 | wqe->fcp_iwrite.initial_xfer_len = 0; |
|---|
| 1240 | 1352 | } |
|---|
| 1241 | | - atomic_inc(&cstat->fc4NvmeOutputRequests); |
|---|
| 1353 | + cstat->output_requests++; |
|---|
| 1242 | 1354 | } else { |
|---|
| 1243 | 1355 | /* From the iread template, initialize words 7 - 11 */ |
|---|
| 1244 | 1356 | memcpy(&wqe->words[7], |
|---|
| .. | .. |
|---|
| 1251 | 1363 | /* Word 5 */ |
|---|
| 1252 | 1364 | wqe->fcp_iread.rsrvd5 = 0; |
|---|
| 1253 | 1365 | |
|---|
| 1254 | | - atomic_inc(&cstat->fc4NvmeInputRequests); |
|---|
| 1366 | + cstat->input_requests++; |
|---|
| 1255 | 1367 | } |
|---|
| 1256 | 1368 | } else { |
|---|
| 1257 | 1369 | /* From the icmnd template, initialize words 4 - 11 */ |
|---|
| 1258 | 1370 | memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], |
|---|
| 1259 | 1371 | sizeof(uint32_t) * 8); |
|---|
| 1260 | | - atomic_inc(&cstat->fc4NvmeControlRequests); |
|---|
| 1372 | + cstat->control_requests++; |
|---|
| 1261 | 1373 | } |
|---|
| 1374 | + |
|---|
| 1375 | + if (pnode->nlp_nvme_info & NLP_NVME_NSLER) { |
|---|
| 1376 | + bf_set(wqe_erp, &wqe->generic.wqe_com, 1); |
|---|
| 1377 | + sqe = &((struct nvme_fc_cmd_iu *) |
|---|
| 1378 | + nCmd->cmdaddr)->sqe.common; |
|---|
| 1379 | + if (sqe->opcode == nvme_admin_async_event) |
|---|
| 1380 | + bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1); |
|---|
| 1381 | + } |
|---|
| 1382 | + |
|---|
| 1262 | 1383 | /* |
|---|
| 1263 | 1384 | * Finish initializing those WQE fields that are independent |
|---|
| 1264 | 1385 | * of the nvme_cmnd request_buffer |
|---|
| .. | .. |
|---|
| 1288 | 1409 | |
|---|
| 1289 | 1410 | /** |
|---|
| 1290 | 1411 | * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO |
|---|
| 1291 | | - * @lpfc_pnvme: Pointer to the driver's nvme instance data |
|---|
| 1292 | | - * @lpfc_nvme_lport: Pointer to the driver's local port data |
|---|
| 1293 | | - * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq |
|---|
| 1294 | | - * @lpfc_nvme_fcreq: IO request from nvme fc to driver. |
|---|
| 1295 | | - * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue |
|---|
| 1412 | + * @vport: pointer to a host virtual N_Port data structure |
|---|
| 1413 | + * @lpfcn_cmd: Pointer to lpfc scsi command |
|---|
| 1296 | 1414 | * |
|---|
| 1297 | 1415 | * Driver registers this routine as it io request handler. This |
|---|
| 1298 | 1416 | * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq |
|---|
| .. | .. |
|---|
| 1304 | 1422 | **/ |
|---|
| 1305 | 1423 | static int |
|---|
| 1306 | 1424 | lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, |
|---|
| 1307 | | - struct lpfc_nvme_buf *lpfc_ncmd) |
|---|
| 1425 | + struct lpfc_io_buf *lpfc_ncmd) |
|---|
| 1308 | 1426 | { |
|---|
| 1309 | 1427 | struct lpfc_hba *phba = vport->phba; |
|---|
| 1310 | 1428 | struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; |
|---|
| 1311 | 1429 | union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe; |
|---|
| 1312 | | - struct sli4_sge *sgl = lpfc_ncmd->nvme_sgl; |
|---|
| 1430 | + struct sli4_sge *sgl = lpfc_ncmd->dma_sgl; |
|---|
| 1431 | + struct sli4_hybrid_sgl *sgl_xtra = NULL; |
|---|
| 1313 | 1432 | struct scatterlist *data_sg; |
|---|
| 1314 | 1433 | struct sli4_sge *first_data_sgl; |
|---|
| 1315 | 1434 | struct ulp_bde64 *bde; |
|---|
| 1316 | | - dma_addr_t physaddr; |
|---|
| 1435 | + dma_addr_t physaddr = 0; |
|---|
| 1317 | 1436 | uint32_t num_bde = 0; |
|---|
| 1318 | | - uint32_t dma_len; |
|---|
| 1437 | + uint32_t dma_len = 0; |
|---|
| 1319 | 1438 | uint32_t dma_offset = 0; |
|---|
| 1320 | | - int nseg, i; |
|---|
| 1439 | + int nseg, i, j; |
|---|
| 1440 | + bool lsp_just_set = false; |
|---|
| 1321 | 1441 | |
|---|
| 1322 | 1442 | /* Fix up the command and response DMA stuff. */ |
|---|
| 1323 | 1443 | lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd); |
|---|
| .. | .. |
|---|
| 1336 | 1456 | first_data_sgl = sgl; |
|---|
| 1337 | 1457 | lpfc_ncmd->seg_cnt = nCmd->sg_cnt; |
|---|
| 1338 | 1458 | if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) { |
|---|
| 1339 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
|---|
| 1459 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 1340 | 1460 | "6058 Too many sg segments from " |
|---|
| 1341 | 1461 | "NVME Transport. Max %d, " |
|---|
| 1342 | 1462 | "nvmeIO sg_cnt %d\n", |
|---|
| .. | .. |
|---|
| 1354 | 1474 | */ |
|---|
| 1355 | 1475 | nseg = nCmd->sg_cnt; |
|---|
| 1356 | 1476 | data_sg = nCmd->first_sgl; |
|---|
| 1477 | + |
|---|
| 1478 | + /* for tracking the segment boundaries */ |
|---|
| 1479 | + j = 2; |
|---|
| 1357 | 1480 | for (i = 0; i < nseg; i++) { |
|---|
| 1358 | 1481 | if (data_sg == NULL) { |
|---|
| 1359 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
|---|
| 1482 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 1360 | 1483 | "6059 dptr err %d, nseg %d\n", |
|---|
| 1361 | 1484 | i, nseg); |
|---|
| 1362 | 1485 | lpfc_ncmd->seg_cnt = 0; |
|---|
| 1363 | 1486 | return 1; |
|---|
| 1364 | 1487 | } |
|---|
| 1365 | | - physaddr = data_sg->dma_address; |
|---|
| 1366 | | - dma_len = data_sg->length; |
|---|
| 1367 | | - sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); |
|---|
| 1368 | | - sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); |
|---|
| 1369 | | - sgl->word2 = le32_to_cpu(sgl->word2); |
|---|
| 1370 | | - if ((num_bde + 1) == nseg) |
|---|
| 1371 | | - bf_set(lpfc_sli4_sge_last, sgl, 1); |
|---|
| 1372 | | - else |
|---|
| 1373 | | - bf_set(lpfc_sli4_sge_last, sgl, 0); |
|---|
| 1374 | | - bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); |
|---|
| 1375 | | - bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); |
|---|
| 1376 | | - sgl->word2 = cpu_to_le32(sgl->word2); |
|---|
| 1377 | | - sgl->sge_len = cpu_to_le32(dma_len); |
|---|
| 1378 | 1488 | |
|---|
| 1379 | | - dma_offset += dma_len; |
|---|
| 1380 | | - data_sg = sg_next(data_sg); |
|---|
| 1381 | | - sgl++; |
|---|
| 1489 | + sgl->word2 = 0; |
|---|
| 1490 | + if ((num_bde + 1) == nseg) { |
|---|
| 1491 | + bf_set(lpfc_sli4_sge_last, sgl, 1); |
|---|
| 1492 | + bf_set(lpfc_sli4_sge_type, sgl, |
|---|
| 1493 | + LPFC_SGE_TYPE_DATA); |
|---|
| 1494 | + } else { |
|---|
| 1495 | + bf_set(lpfc_sli4_sge_last, sgl, 0); |
|---|
| 1496 | + |
|---|
| 1497 | + /* expand the segment */ |
|---|
| 1498 | + if (!lsp_just_set && |
|---|
| 1499 | + !((j + 1) % phba->border_sge_num) && |
|---|
| 1500 | + ((nseg - 1) != i)) { |
|---|
| 1501 | + /* set LSP type */ |
|---|
| 1502 | + bf_set(lpfc_sli4_sge_type, sgl, |
|---|
| 1503 | + LPFC_SGE_TYPE_LSP); |
|---|
| 1504 | + |
|---|
| 1505 | + sgl_xtra = lpfc_get_sgl_per_hdwq( |
|---|
| 1506 | + phba, lpfc_ncmd); |
|---|
| 1507 | + |
|---|
| 1508 | + if (unlikely(!sgl_xtra)) { |
|---|
| 1509 | + lpfc_ncmd->seg_cnt = 0; |
|---|
| 1510 | + return 1; |
|---|
| 1511 | + } |
|---|
| 1512 | + sgl->addr_lo = cpu_to_le32(putPaddrLow( |
|---|
| 1513 | + sgl_xtra->dma_phys_sgl)); |
|---|
| 1514 | + sgl->addr_hi = cpu_to_le32(putPaddrHigh( |
|---|
| 1515 | + sgl_xtra->dma_phys_sgl)); |
|---|
| 1516 | + |
|---|
| 1517 | + } else { |
|---|
| 1518 | + bf_set(lpfc_sli4_sge_type, sgl, |
|---|
| 1519 | + LPFC_SGE_TYPE_DATA); |
|---|
| 1520 | + } |
|---|
| 1521 | + } |
|---|
| 1522 | + |
|---|
| 1523 | + if (!(bf_get(lpfc_sli4_sge_type, sgl) & |
|---|
| 1524 | + LPFC_SGE_TYPE_LSP)) { |
|---|
| 1525 | + if ((nseg - 1) == i) |
|---|
| 1526 | + bf_set(lpfc_sli4_sge_last, sgl, 1); |
|---|
| 1527 | + |
|---|
| 1528 | + physaddr = data_sg->dma_address; |
|---|
| 1529 | + dma_len = data_sg->length; |
|---|
| 1530 | + sgl->addr_lo = cpu_to_le32( |
|---|
| 1531 | + putPaddrLow(physaddr)); |
|---|
| 1532 | + sgl->addr_hi = cpu_to_le32( |
|---|
| 1533 | + putPaddrHigh(physaddr)); |
|---|
| 1534 | + |
|---|
| 1535 | + bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); |
|---|
| 1536 | + sgl->word2 = cpu_to_le32(sgl->word2); |
|---|
| 1537 | + sgl->sge_len = cpu_to_le32(dma_len); |
|---|
| 1538 | + |
|---|
| 1539 | + dma_offset += dma_len; |
|---|
| 1540 | + data_sg = sg_next(data_sg); |
|---|
| 1541 | + |
|---|
| 1542 | + sgl++; |
|---|
| 1543 | + |
|---|
| 1544 | + lsp_just_set = false; |
|---|
| 1545 | + } else { |
|---|
| 1546 | + sgl->word2 = cpu_to_le32(sgl->word2); |
|---|
| 1547 | + |
|---|
| 1548 | + sgl->sge_len = cpu_to_le32( |
|---|
| 1549 | + phba->cfg_sg_dma_buf_size); |
|---|
| 1550 | + |
|---|
| 1551 | + sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; |
|---|
| 1552 | + i = i - 1; |
|---|
| 1553 | + |
|---|
| 1554 | + lsp_just_set = true; |
|---|
| 1555 | + } |
|---|
| 1556 | + |
|---|
| 1557 | + j++; |
|---|
| 1382 | 1558 | } |
|---|
| 1383 | 1559 | if (phba->cfg_enable_pbde) { |
|---|
| 1384 | 1560 | /* Use PBDE support for first SGL only, offset == 0 */ |
|---|
| .. | .. |
|---|
| 1398 | 1574 | } |
|---|
| 1399 | 1575 | |
|---|
| 1400 | 1576 | } else { |
|---|
| 1577 | + lpfc_ncmd->seg_cnt = 0; |
|---|
| 1578 | + |
|---|
| 1401 | 1579 | /* For this clause to be valid, the payload_length |
|---|
| 1402 | 1580 | * and sg_cnt must zero. |
|---|
| 1403 | 1581 | */ |
|---|
| 1404 | 1582 | if (nCmd->payload_length != 0) { |
|---|
| 1405 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
|---|
| 1583 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 1406 | 1584 | "6063 NVME DMA Prep Err: sg_cnt %d " |
|---|
| 1407 | 1585 | "payload_length x%x\n", |
|---|
| 1408 | 1586 | nCmd->sg_cnt, nCmd->payload_length); |
|---|
| .. | .. |
|---|
| 1437 | 1615 | { |
|---|
| 1438 | 1616 | int ret = 0; |
|---|
| 1439 | 1617 | int expedite = 0; |
|---|
| 1440 | | - int idx; |
|---|
| 1618 | + int idx, cpu; |
|---|
| 1441 | 1619 | struct lpfc_nvme_lport *lport; |
|---|
| 1442 | | - struct lpfc_nvme_ctrl_stat *cstat; |
|---|
| 1620 | + struct lpfc_fc4_ctrl_stat *cstat; |
|---|
| 1443 | 1621 | struct lpfc_vport *vport; |
|---|
| 1444 | 1622 | struct lpfc_hba *phba; |
|---|
| 1445 | 1623 | struct lpfc_nodelist *ndlp; |
|---|
| 1446 | | - struct lpfc_nvme_buf *lpfc_ncmd; |
|---|
| 1624 | + struct lpfc_io_buf *lpfc_ncmd; |
|---|
| 1447 | 1625 | struct lpfc_nvme_rport *rport; |
|---|
| 1448 | 1626 | struct lpfc_nvme_qhandle *lpfc_queue_info; |
|---|
| 1449 | 1627 | struct lpfc_nvme_fcpreq_priv *freqpriv; |
|---|
| .. | .. |
|---|
| 1473 | 1651 | |
|---|
| 1474 | 1652 | phba = vport->phba; |
|---|
| 1475 | 1653 | |
|---|
| 1476 | | - if (vport->load_flag & FC_UNLOADING) { |
|---|
| 1477 | | - ret = -ENODEV; |
|---|
| 1478 | | - goto out_fail; |
|---|
| 1479 | | - } |
|---|
| 1480 | | - |
|---|
| 1481 | | - if (vport->load_flag & FC_UNLOADING) { |
|---|
| 1654 | + if (unlikely(vport->load_flag & FC_UNLOADING)) { |
|---|
| 1482 | 1655 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, |
|---|
| 1483 | 1656 | "6124 Fail IO, Driver unload\n"); |
|---|
| 1484 | 1657 | atomic_inc(&lport->xmt_fcp_err); |
|---|
| .. | .. |
|---|
| 1509 | 1682 | ndlp = rport->ndlp; |
|---|
| 1510 | 1683 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { |
|---|
| 1511 | 1684 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR, |
|---|
| 1512 | | - "6053 Fail IO, ndlp not ready: rport %p " |
|---|
| 1513 | | - "ndlp %p, DID x%06x\n", |
|---|
| 1685 | + "6053 Busy IO, ndlp not ready: rport x%px " |
|---|
| 1686 | + "ndlp x%px, DID x%06x\n", |
|---|
| 1514 | 1687 | rport, ndlp, pnvme_rport->port_id); |
|---|
| 1515 | 1688 | atomic_inc(&lport->xmt_fcp_err); |
|---|
| 1516 | 1689 | ret = -EBUSY; |
|---|
| .. | .. |
|---|
| 1561 | 1734 | } |
|---|
| 1562 | 1735 | } |
|---|
| 1563 | 1736 | |
|---|
| 1564 | | - lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite); |
|---|
| 1737 | + /* Lookup Hardware Queue index based on fcp_io_sched module parameter */ |
|---|
| 1738 | + if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { |
|---|
| 1739 | + idx = lpfc_queue_info->index; |
|---|
| 1740 | + } else { |
|---|
| 1741 | + cpu = raw_smp_processor_id(); |
|---|
| 1742 | + idx = phba->sli4_hba.cpu_map[cpu].hdwq; |
|---|
| 1743 | + } |
|---|
| 1744 | + |
|---|
| 1745 | + lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite); |
|---|
| 1565 | 1746 | if (lpfc_ncmd == NULL) { |
|---|
| 1566 | 1747 | atomic_inc(&lport->xmt_fcp_noxri); |
|---|
| 1567 | 1748 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, |
|---|
| .. | .. |
|---|
| 1588 | 1769 | */ |
|---|
| 1589 | 1770 | freqpriv->nvme_buf = lpfc_ncmd; |
|---|
| 1590 | 1771 | lpfc_ncmd->nvmeCmd = pnvme_fcreq; |
|---|
| 1591 | | - lpfc_ncmd->nrport = rport; |
|---|
| 1592 | 1772 | lpfc_ncmd->ndlp = ndlp; |
|---|
| 1593 | | - lpfc_ncmd->start_time = jiffies; |
|---|
| 1773 | + lpfc_ncmd->qidx = lpfc_queue_info->qidx; |
|---|
| 1594 | 1774 | |
|---|
| 1595 | 1775 | /* |
|---|
| 1596 | 1776 | * Issue the IO on the WQ indicated by index in the hw_queue_handle. |
|---|
| .. | .. |
|---|
| 1600 | 1780 | * index to use and that they have affinitized a CPU to this hardware |
|---|
| 1601 | 1781 | * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ. |
|---|
| 1602 | 1782 | */ |
|---|
| 1603 | | - idx = lpfc_queue_info->index; |
|---|
| 1604 | 1783 | lpfc_ncmd->cur_iocbq.hba_wqidx = idx; |
|---|
| 1605 | | - cstat = &lport->cstat[idx]; |
|---|
| 1784 | + cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat; |
|---|
| 1606 | 1785 | |
|---|
| 1607 | 1786 | lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat); |
|---|
| 1608 | 1787 | ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd); |
|---|
| .. | .. |
|---|
| 1620 | 1799 | lpfc_ncmd->cur_iocbq.sli4_xritag, |
|---|
| 1621 | 1800 | lpfc_queue_info->index, ndlp->nlp_DID); |
|---|
| 1622 | 1801 | |
|---|
| 1623 | | - ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq); |
|---|
| 1802 | + ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq); |
|---|
| 1624 | 1803 | if (ret) { |
|---|
| 1625 | 1804 | atomic_inc(&lport->xmt_fcp_wqerr); |
|---|
| 1626 | 1805 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, |
|---|
| .. | .. |
|---|
| 1631 | 1810 | goto out_free_nvme_buf; |
|---|
| 1632 | 1811 | } |
|---|
| 1633 | 1812 | |
|---|
| 1813 | + if (phba->cfg_xri_rebalancing) |
|---|
| 1814 | + lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no); |
|---|
| 1815 | + |
|---|
| 1634 | 1816 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
|---|
| 1635 | 1817 | if (lpfc_ncmd->ts_cmd_start) |
|---|
| 1636 | 1818 | lpfc_ncmd->ts_cmd_wqput = ktime_get_ns(); |
|---|
| 1637 | 1819 | |
|---|
| 1638 | | - if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) { |
|---|
| 1639 | | - lpfc_ncmd->cpu = smp_processor_id(); |
|---|
| 1640 | | - if (lpfc_ncmd->cpu != lpfc_queue_info->index) { |
|---|
| 1641 | | - /* Check for admin queue */ |
|---|
| 1642 | | - if (lpfc_queue_info->qidx) { |
|---|
| 1643 | | - lpfc_printf_vlog(vport, |
|---|
| 1644 | | - KERN_ERR, LOG_NVME_IOERR, |
|---|
| 1645 | | - "6702 CPU Check cmd: " |
|---|
| 1646 | | - "cpu %d wq %d\n", |
|---|
| 1647 | | - lpfc_ncmd->cpu, |
|---|
| 1648 | | - lpfc_queue_info->index); |
|---|
| 1649 | | - } |
|---|
| 1650 | | - lpfc_ncmd->cpu = lpfc_queue_info->index; |
|---|
| 1651 | | - } |
|---|
| 1652 | | - if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT) |
|---|
| 1653 | | - phba->cpucheck_xmt_io[lpfc_ncmd->cpu]++; |
|---|
| 1820 | + if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) { |
|---|
| 1821 | + cpu = raw_smp_processor_id(); |
|---|
| 1822 | + this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); |
|---|
| 1823 | + lpfc_ncmd->cpu = cpu; |
|---|
| 1824 | + if (idx != cpu) |
|---|
| 1825 | + lpfc_printf_vlog(vport, |
|---|
| 1826 | + KERN_INFO, LOG_NVME_IOERR, |
|---|
| 1827 | + "6702 CPU Check cmd: " |
|---|
| 1828 | + "cpu %d wq %d\n", |
|---|
| 1829 | + lpfc_ncmd->cpu, |
|---|
| 1830 | + lpfc_queue_info->index); |
|---|
| 1654 | 1831 | } |
|---|
| 1655 | 1832 | #endif |
|---|
| 1656 | 1833 | return 0; |
|---|
| .. | .. |
|---|
| 1658 | 1835 | out_free_nvme_buf: |
|---|
| 1659 | 1836 | if (lpfc_ncmd->nvmeCmd->sg_cnt) { |
|---|
| 1660 | 1837 | if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE) |
|---|
| 1661 | | - atomic_dec(&cstat->fc4NvmeOutputRequests); |
|---|
| 1838 | + cstat->output_requests--; |
|---|
| 1662 | 1839 | else |
|---|
| 1663 | | - atomic_dec(&cstat->fc4NvmeInputRequests); |
|---|
| 1840 | + cstat->input_requests--; |
|---|
| 1664 | 1841 | } else |
|---|
| 1665 | | - atomic_dec(&cstat->fc4NvmeControlRequests); |
|---|
| 1842 | + cstat->control_requests--; |
|---|
| 1666 | 1843 | lpfc_release_nvme_buf(phba, lpfc_ncmd); |
|---|
| 1667 | 1844 | out_fail: |
|---|
| 1668 | 1845 | return ret; |
|---|
| .. | .. |
|---|
| 1722 | 1899 | struct lpfc_nvme_lport *lport; |
|---|
| 1723 | 1900 | struct lpfc_vport *vport; |
|---|
| 1724 | 1901 | struct lpfc_hba *phba; |
|---|
| 1725 | | - struct lpfc_nvme_buf *lpfc_nbuf; |
|---|
| 1902 | + struct lpfc_io_buf *lpfc_nbuf; |
|---|
| 1726 | 1903 | struct lpfc_iocbq *abts_buf; |
|---|
| 1727 | 1904 | struct lpfc_iocbq *nvmereq_wqe; |
|---|
| 1728 | 1905 | struct lpfc_nvme_fcpreq_priv *freqpriv; |
|---|
| 1729 | | - union lpfc_wqe128 *abts_wqe; |
|---|
| 1730 | 1906 | unsigned long flags; |
|---|
| 1731 | 1907 | int ret_val; |
|---|
| 1732 | 1908 | |
|---|
| .. | .. |
|---|
| 1756 | 1932 | /* Announce entry to new IO submit field. */ |
|---|
| 1757 | 1933 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, |
|---|
| 1758 | 1934 | "6002 Abort Request to rport DID x%06x " |
|---|
| 1759 | | - "for nvme_fc_req %p\n", |
|---|
| 1935 | + "for nvme_fc_req x%px\n", |
|---|
| 1760 | 1936 | pnvme_rport->port_id, |
|---|
| 1761 | 1937 | pnvme_fcreq); |
|---|
| 1762 | 1938 | |
|---|
| .. | .. |
|---|
| 1765 | 1941 | */ |
|---|
| 1766 | 1942 | spin_lock_irqsave(&phba->hbalock, flags); |
|---|
| 1767 | 1943 | /* driver queued commands are in process of being flushed */ |
|---|
| 1768 | | - if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { |
|---|
| 1944 | + if (phba->hba_flag & HBA_IOQ_FLUSH) { |
|---|
| 1769 | 1945 | spin_unlock_irqrestore(&phba->hbalock, flags); |
|---|
| 1770 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
|---|
| 1946 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 1771 | 1947 | "6139 Driver in reset cleanup - flushing " |
|---|
| 1772 | 1948 | "NVME Req now. hba_flag x%x\n", |
|---|
| 1773 | 1949 | phba->hba_flag); |
|---|
| .. | .. |
|---|
| 1777 | 1953 | lpfc_nbuf = freqpriv->nvme_buf; |
|---|
| 1778 | 1954 | if (!lpfc_nbuf) { |
|---|
| 1779 | 1955 | spin_unlock_irqrestore(&phba->hbalock, flags); |
|---|
| 1780 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
|---|
| 1956 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 1781 | 1957 | "6140 NVME IO req has no matching lpfc nvme " |
|---|
| 1782 | 1958 | "io buffer. Skipping abort req.\n"); |
|---|
| 1783 | 1959 | return; |
|---|
| 1784 | 1960 | } else if (!lpfc_nbuf->nvmeCmd) { |
|---|
| 1785 | 1961 | spin_unlock_irqrestore(&phba->hbalock, flags); |
|---|
| 1786 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
|---|
| 1962 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 1787 | 1963 | "6141 lpfc NVME IO req has no nvme_fcreq " |
|---|
| 1788 | 1964 | "io buffer. Skipping abort req.\n"); |
|---|
| 1789 | 1965 | return; |
|---|
| 1790 | 1966 | } |
|---|
| 1791 | 1967 | nvmereq_wqe = &lpfc_nbuf->cur_iocbq; |
|---|
| 1968 | + |
|---|
| 1969 | + /* Guard against IO completion being called at same time */ |
|---|
| 1970 | + spin_lock(&lpfc_nbuf->buf_lock); |
|---|
| 1792 | 1971 | |
|---|
| 1793 | 1972 | /* |
|---|
| 1794 | 1973 | * The lpfc_nbuf and the mapped nvme_fcreq in the driver's |
|---|
| .. | .. |
|---|
| 1798 | 1977 | * has not seen it yet. |
|---|
| 1799 | 1978 | */ |
|---|
| 1800 | 1979 | if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) { |
|---|
| 1801 | | - spin_unlock_irqrestore(&phba->hbalock, flags); |
|---|
| 1802 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
|---|
| 1980 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 1803 | 1981 | "6143 NVME req mismatch: " |
|---|
| 1804 | | - "lpfc_nbuf %p nvmeCmd %p, " |
|---|
| 1805 | | - "pnvme_fcreq %p. Skipping Abort xri x%x\n", |
|---|
| 1982 | + "lpfc_nbuf x%px nvmeCmd x%px, " |
|---|
| 1983 | + "pnvme_fcreq x%px. Skipping Abort xri x%x\n", |
|---|
| 1806 | 1984 | lpfc_nbuf, lpfc_nbuf->nvmeCmd, |
|---|
| 1807 | 1985 | pnvme_fcreq, nvmereq_wqe->sli4_xritag); |
|---|
| 1808 | | - return; |
|---|
| 1986 | + goto out_unlock; |
|---|
| 1809 | 1987 | } |
|---|
| 1810 | 1988 | |
|---|
| 1811 | 1989 | /* Don't abort IOs no longer on the pending queue. */ |
|---|
| 1812 | 1990 | if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { |
|---|
| 1813 | | - spin_unlock_irqrestore(&phba->hbalock, flags); |
|---|
| 1814 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
|---|
| 1815 | | - "6142 NVME IO req %p not queued - skipping " |
|---|
| 1991 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 1992 | + "6142 NVME IO req x%px not queued - skipping " |
|---|
| 1816 | 1993 | "abort req xri x%x\n", |
|---|
| 1817 | 1994 | pnvme_fcreq, nvmereq_wqe->sli4_xritag); |
|---|
| 1818 | | - return; |
|---|
| 1995 | + goto out_unlock; |
|---|
| 1819 | 1996 | } |
|---|
| 1820 | 1997 | |
|---|
| 1821 | 1998 | atomic_inc(&lport->xmt_fcp_abort); |
|---|
| .. | .. |
|---|
| 1825 | 2002 | |
|---|
| 1826 | 2003 | /* Outstanding abort is in progress */ |
|---|
| 1827 | 2004 | if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) { |
|---|
| 1828 | | - spin_unlock_irqrestore(&phba->hbalock, flags); |
|---|
| 1829 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
|---|
| 2005 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 1830 | 2006 | "6144 Outstanding NVME I/O Abort Request " |
|---|
| 1831 | | - "still pending on nvme_fcreq %p, " |
|---|
| 1832 | | - "lpfc_ncmd %p xri x%x\n", |
|---|
| 2007 | + "still pending on nvme_fcreq x%px, " |
|---|
| 2008 | + "lpfc_ncmd %px xri x%x\n", |
|---|
| 1833 | 2009 | pnvme_fcreq, lpfc_nbuf, |
|---|
| 1834 | 2010 | nvmereq_wqe->sli4_xritag); |
|---|
| 1835 | | - return; |
|---|
| 2011 | + goto out_unlock; |
|---|
| 1836 | 2012 | } |
|---|
| 1837 | 2013 | |
|---|
| 1838 | 2014 | abts_buf = __lpfc_sli_get_iocbq(phba); |
|---|
| 1839 | 2015 | if (!abts_buf) { |
|---|
| 1840 | | - spin_unlock_irqrestore(&phba->hbalock, flags); |
|---|
| 1841 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
|---|
| 2016 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 1842 | 2017 | "6136 No available abort wqes. Skipping " |
|---|
| 1843 | | - "Abts req for nvme_fcreq %p xri x%x\n", |
|---|
| 2018 | + "Abts req for nvme_fcreq x%px xri x%x\n", |
|---|
| 1844 | 2019 | pnvme_fcreq, nvmereq_wqe->sli4_xritag); |
|---|
| 1845 | | - return; |
|---|
| 2020 | + goto out_unlock; |
|---|
| 1846 | 2021 | } |
|---|
| 1847 | 2022 | |
|---|
| 1848 | 2023 | /* Ready - mark outstanding as aborted by driver. */ |
|---|
| 1849 | 2024 | nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED; |
|---|
| 1850 | 2025 | |
|---|
| 1851 | | - /* Complete prepping the abort wqe and issue to the FW. */ |
|---|
| 1852 | | - abts_wqe = &abts_buf->wqe; |
|---|
| 1853 | | - |
|---|
| 1854 | | - /* WQEs are reused. Clear stale data and set key fields to |
|---|
| 1855 | | - * zero like ia, iaab, iaar, xri_tag, and ctxt_tag. |
|---|
| 1856 | | - */ |
|---|
| 1857 | | - memset(abts_wqe, 0, sizeof(union lpfc_wqe)); |
|---|
| 1858 | | - bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); |
|---|
| 1859 | | - |
|---|
| 1860 | | - /* word 7 */ |
|---|
| 1861 | | - bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); |
|---|
| 1862 | | - bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com, |
|---|
| 1863 | | - nvmereq_wqe->iocb.ulpClass); |
|---|
| 1864 | | - |
|---|
| 1865 | | - /* word 8 - tell the FW to abort the IO associated with this |
|---|
| 1866 | | - * outstanding exchange ID. |
|---|
| 1867 | | - */ |
|---|
| 1868 | | - abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag; |
|---|
| 1869 | | - |
|---|
| 1870 | | - /* word 9 - this is the iotag for the abts_wqe completion. */ |
|---|
| 1871 | | - bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, |
|---|
| 1872 | | - abts_buf->iotag); |
|---|
| 1873 | | - |
|---|
| 1874 | | - /* word 10 */ |
|---|
| 1875 | | - bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); |
|---|
| 1876 | | - bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); |
|---|
| 1877 | | - |
|---|
| 1878 | | - /* word 11 */ |
|---|
| 1879 | | - bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); |
|---|
| 1880 | | - bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); |
|---|
| 1881 | | - bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); |
|---|
| 2026 | + lpfc_nvme_prep_abort_wqe(abts_buf, nvmereq_wqe->sli4_xritag, 0); |
|---|
| 1882 | 2027 | |
|---|
| 1883 | 2028 | /* ABTS WQE must go to the same WQ as the WQE to be aborted */ |
|---|
| 1884 | 2029 | abts_buf->iocb_flag |= LPFC_IO_NVME; |
|---|
| 1885 | 2030 | abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx; |
|---|
| 1886 | 2031 | abts_buf->vport = vport; |
|---|
| 1887 | 2032 | abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl; |
|---|
| 1888 | | - ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf); |
|---|
| 2033 | + ret_val = lpfc_sli4_issue_wqe(phba, lpfc_nbuf->hdwq, abts_buf); |
|---|
| 2034 | + spin_unlock(&lpfc_nbuf->buf_lock); |
|---|
| 1889 | 2035 | spin_unlock_irqrestore(&phba->hbalock, flags); |
|---|
| 1890 | 2036 | if (ret_val) { |
|---|
| 1891 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
|---|
| 2037 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 1892 | 2038 | "6137 Failed abts issue_wqe with status x%x " |
|---|
| 1893 | | - "for nvme_fcreq %p.\n", |
|---|
| 2039 | + "for nvme_fcreq x%px.\n", |
|---|
| 1894 | 2040 | ret_val, pnvme_fcreq); |
|---|
| 1895 | 2041 | lpfc_sli_release_iocbq(phba, abts_buf); |
|---|
| 1896 | 2042 | return; |
|---|
| .. | .. |
|---|
| 1901 | 2047 | "ox_id x%x on reqtag x%x\n", |
|---|
| 1902 | 2048 | nvmereq_wqe->sli4_xritag, |
|---|
| 1903 | 2049 | abts_buf->iotag); |
|---|
| 2050 | + return; |
|---|
| 2051 | + |
|---|
| 2052 | +out_unlock: |
|---|
| 2053 | + spin_unlock(&lpfc_nbuf->buf_lock); |
|---|
| 2054 | + spin_unlock_irqrestore(&phba->hbalock, flags); |
|---|
| 2055 | + return; |
|---|
| 1904 | 2056 | } |
|---|
| 1905 | 2057 | |
|---|
| 1906 | 2058 | /* Declare and initialization an instance of the FC NVME template. */ |
|---|
| .. | .. |
|---|
| 1914 | 2066 | .fcp_io = lpfc_nvme_fcp_io_submit, |
|---|
| 1915 | 2067 | .ls_abort = lpfc_nvme_ls_abort, |
|---|
| 1916 | 2068 | .fcp_abort = lpfc_nvme_fcp_abort, |
|---|
| 2069 | + .xmt_ls_rsp = lpfc_nvme_xmt_ls_rsp, |
|---|
| 1917 | 2070 | |
|---|
| 1918 | 2071 | .max_hw_queues = 1, |
|---|
| 1919 | 2072 | .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS, |
|---|
| .. | .. |
|---|
| 1930 | 2083 | }; |
|---|
| 1931 | 2084 | |
|---|
| 1932 | 2085 | /** |
|---|
| 1933 | | - * lpfc_sli4_post_nvme_sgl_block - post a block of nvme sgl list to firmware |
|---|
| 1934 | | - * @phba: pointer to lpfc hba data structure. |
|---|
| 1935 | | - * @nblist: pointer to nvme buffer list. |
|---|
| 1936 | | - * @count: number of scsi buffers on the list. |
|---|
| 1937 | | - * |
|---|
| 1938 | | - * This routine is invoked to post a block of @count scsi sgl pages from a |
|---|
| 1939 | | - * SCSI buffer list @nblist to the HBA using non-embedded mailbox command. |
|---|
| 1940 | | - * No Lock is held. |
|---|
| 1941 | | - * |
|---|
| 1942 | | - **/ |
|---|
| 1943 | | -static int |
|---|
| 1944 | | -lpfc_sli4_post_nvme_sgl_block(struct lpfc_hba *phba, |
|---|
| 1945 | | - struct list_head *nblist, |
|---|
| 1946 | | - int count) |
|---|
| 1947 | | -{ |
|---|
| 1948 | | - struct lpfc_nvme_buf *lpfc_ncmd; |
|---|
| 1949 | | - struct lpfc_mbx_post_uembed_sgl_page1 *sgl; |
|---|
| 1950 | | - struct sgl_page_pairs *sgl_pg_pairs; |
|---|
| 1951 | | - void *viraddr; |
|---|
| 1952 | | - LPFC_MBOXQ_t *mbox; |
|---|
| 1953 | | - uint32_t reqlen, alloclen, pg_pairs; |
|---|
| 1954 | | - uint32_t mbox_tmo; |
|---|
| 1955 | | - uint16_t xritag_start = 0; |
|---|
| 1956 | | - int rc = 0; |
|---|
| 1957 | | - uint32_t shdr_status, shdr_add_status; |
|---|
| 1958 | | - dma_addr_t pdma_phys_bpl1; |
|---|
| 1959 | | - union lpfc_sli4_cfg_shdr *shdr; |
|---|
| 1960 | | - |
|---|
| 1961 | | - /* Calculate the requested length of the dma memory */ |
|---|
| 1962 | | - reqlen = count * sizeof(struct sgl_page_pairs) + |
|---|
| 1963 | | - sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); |
|---|
| 1964 | | - if (reqlen > SLI4_PAGE_SIZE) { |
|---|
| 1965 | | - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, |
|---|
| 1966 | | - "6118 Block sgl registration required DMA " |
|---|
| 1967 | | - "size (%d) great than a page\n", reqlen); |
|---|
| 1968 | | - return -ENOMEM; |
|---|
| 1969 | | - } |
|---|
| 1970 | | - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
|---|
| 1971 | | - if (!mbox) { |
|---|
| 1972 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 1973 | | - "6119 Failed to allocate mbox cmd memory\n"); |
|---|
| 1974 | | - return -ENOMEM; |
|---|
| 1975 | | - } |
|---|
| 1976 | | - |
|---|
| 1977 | | - /* Allocate DMA memory and set up the non-embedded mailbox command */ |
|---|
| 1978 | | - alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, |
|---|
| 1979 | | - LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, |
|---|
| 1980 | | - LPFC_SLI4_MBX_NEMBED); |
|---|
| 1981 | | - |
|---|
| 1982 | | - if (alloclen < reqlen) { |
|---|
| 1983 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 1984 | | - "6120 Allocated DMA memory size (%d) is " |
|---|
| 1985 | | - "less than the requested DMA memory " |
|---|
| 1986 | | - "size (%d)\n", alloclen, reqlen); |
|---|
| 1987 | | - lpfc_sli4_mbox_cmd_free(phba, mbox); |
|---|
| 1988 | | - return -ENOMEM; |
|---|
| 1989 | | - } |
|---|
| 1990 | | - |
|---|
| 1991 | | - /* Get the first SGE entry from the non-embedded DMA memory */ |
|---|
| 1992 | | - viraddr = mbox->sge_array->addr[0]; |
|---|
| 1993 | | - |
|---|
| 1994 | | - /* Set up the SGL pages in the non-embedded DMA pages */ |
|---|
| 1995 | | - sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; |
|---|
| 1996 | | - sgl_pg_pairs = &sgl->sgl_pg_pairs; |
|---|
| 1997 | | - |
|---|
| 1998 | | - pg_pairs = 0; |
|---|
| 1999 | | - list_for_each_entry(lpfc_ncmd, nblist, list) { |
|---|
| 2000 | | - /* Set up the sge entry */ |
|---|
| 2001 | | - sgl_pg_pairs->sgl_pg0_addr_lo = |
|---|
| 2002 | | - cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl)); |
|---|
| 2003 | | - sgl_pg_pairs->sgl_pg0_addr_hi = |
|---|
| 2004 | | - cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl)); |
|---|
| 2005 | | - if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) |
|---|
| 2006 | | - pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl + |
|---|
| 2007 | | - SGL_PAGE_SIZE; |
|---|
| 2008 | | - else |
|---|
| 2009 | | - pdma_phys_bpl1 = 0; |
|---|
| 2010 | | - sgl_pg_pairs->sgl_pg1_addr_lo = |
|---|
| 2011 | | - cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); |
|---|
| 2012 | | - sgl_pg_pairs->sgl_pg1_addr_hi = |
|---|
| 2013 | | - cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); |
|---|
| 2014 | | - /* Keep the first xritag on the list */ |
|---|
| 2015 | | - if (pg_pairs == 0) |
|---|
| 2016 | | - xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag; |
|---|
| 2017 | | - sgl_pg_pairs++; |
|---|
| 2018 | | - pg_pairs++; |
|---|
| 2019 | | - } |
|---|
| 2020 | | - bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); |
|---|
| 2021 | | - bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); |
|---|
| 2022 | | - /* Perform endian conversion if necessary */ |
|---|
| 2023 | | - sgl->word0 = cpu_to_le32(sgl->word0); |
|---|
| 2024 | | - |
|---|
| 2025 | | - if (!phba->sli4_hba.intr_enable) |
|---|
| 2026 | | - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); |
|---|
| 2027 | | - else { |
|---|
| 2028 | | - mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); |
|---|
| 2029 | | - rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); |
|---|
| 2030 | | - } |
|---|
| 2031 | | - shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr; |
|---|
| 2032 | | - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
|---|
| 2033 | | - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
|---|
| 2034 | | - if (rc != MBX_TIMEOUT) |
|---|
| 2035 | | - lpfc_sli4_mbox_cmd_free(phba, mbox); |
|---|
| 2036 | | - if (shdr_status || shdr_add_status || rc) { |
|---|
| 2037 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 2038 | | - "6125 POST_SGL_BLOCK mailbox command failed " |
|---|
| 2039 | | - "status x%x add_status x%x mbx status x%x\n", |
|---|
| 2040 | | - shdr_status, shdr_add_status, rc); |
|---|
| 2041 | | - rc = -ENXIO; |
|---|
| 2042 | | - } |
|---|
| 2043 | | - return rc; |
|---|
| 2044 | | -} |
|---|
| 2045 | | - |
|---|
| 2046 | | -/** |
|---|
| 2047 | | - * lpfc_post_nvme_sgl_list - Post blocks of nvme buffer sgls from a list |
|---|
| 2048 | | - * @phba: pointer to lpfc hba data structure. |
|---|
| 2049 | | - * @post_nblist: pointer to the nvme buffer list. |
|---|
| 2050 | | - * |
|---|
| 2051 | | - * This routine walks a list of nvme buffers that was passed in. It attempts |
|---|
| 2052 | | - * to construct blocks of nvme buffer sgls which contains contiguous xris and |
|---|
| 2053 | | - * uses the non-embedded SGL block post mailbox commands to post to the port. |
|---|
| 2054 | | - * For single NVME buffer sgl with non-contiguous xri, if any, it shall use |
|---|
| 2055 | | - * embedded SGL post mailbox command for posting. The @post_nblist passed in |
|---|
| 2056 | | - * must be local list, thus no lock is needed when manipulate the list. |
|---|
| 2057 | | - * |
|---|
| 2058 | | - * Returns: 0 = failure, non-zero number of successfully posted buffers. |
|---|
| 2059 | | - **/ |
|---|
| 2060 | | -static int |
|---|
| 2061 | | -lpfc_post_nvme_sgl_list(struct lpfc_hba *phba, |
|---|
| 2062 | | - struct list_head *post_nblist, int sb_count) |
|---|
| 2063 | | -{ |
|---|
| 2064 | | - struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next; |
|---|
| 2065 | | - int status, sgl_size; |
|---|
| 2066 | | - int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; |
|---|
| 2067 | | - dma_addr_t pdma_phys_sgl1; |
|---|
| 2068 | | - int last_xritag = NO_XRI; |
|---|
| 2069 | | - int cur_xritag; |
|---|
| 2070 | | - LIST_HEAD(prep_nblist); |
|---|
| 2071 | | - LIST_HEAD(blck_nblist); |
|---|
| 2072 | | - LIST_HEAD(nvme_nblist); |
|---|
| 2073 | | - |
|---|
| 2074 | | - /* sanity check */ |
|---|
| 2075 | | - if (sb_count <= 0) |
|---|
| 2076 | | - return -EINVAL; |
|---|
| 2077 | | - |
|---|
| 2078 | | - sgl_size = phba->cfg_sg_dma_buf_size; |
|---|
| 2079 | | - |
|---|
| 2080 | | - list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) { |
|---|
| 2081 | | - list_del_init(&lpfc_ncmd->list); |
|---|
| 2082 | | - block_cnt++; |
|---|
| 2083 | | - if ((last_xritag != NO_XRI) && |
|---|
| 2084 | | - (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) { |
|---|
| 2085 | | - /* a hole in xri block, form a sgl posting block */ |
|---|
| 2086 | | - list_splice_init(&prep_nblist, &blck_nblist); |
|---|
| 2087 | | - post_cnt = block_cnt - 1; |
|---|
| 2088 | | - /* prepare list for next posting block */ |
|---|
| 2089 | | - list_add_tail(&lpfc_ncmd->list, &prep_nblist); |
|---|
| 2090 | | - block_cnt = 1; |
|---|
| 2091 | | - } else { |
|---|
| 2092 | | - /* prepare list for next posting block */ |
|---|
| 2093 | | - list_add_tail(&lpfc_ncmd->list, &prep_nblist); |
|---|
| 2094 | | - /* enough sgls for non-embed sgl mbox command */ |
|---|
| 2095 | | - if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { |
|---|
| 2096 | | - list_splice_init(&prep_nblist, &blck_nblist); |
|---|
| 2097 | | - post_cnt = block_cnt; |
|---|
| 2098 | | - block_cnt = 0; |
|---|
| 2099 | | - } |
|---|
| 2100 | | - } |
|---|
| 2101 | | - num_posting++; |
|---|
| 2102 | | - last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; |
|---|
| 2103 | | - |
|---|
| 2104 | | - /* end of repost sgl list condition for NVME buffers */ |
|---|
| 2105 | | - if (num_posting == sb_count) { |
|---|
| 2106 | | - if (post_cnt == 0) { |
|---|
| 2107 | | - /* last sgl posting block */ |
|---|
| 2108 | | - list_splice_init(&prep_nblist, &blck_nblist); |
|---|
| 2109 | | - post_cnt = block_cnt; |
|---|
| 2110 | | - } else if (block_cnt == 1) { |
|---|
| 2111 | | - /* last single sgl with non-contiguous xri */ |
|---|
| 2112 | | - if (sgl_size > SGL_PAGE_SIZE) |
|---|
| 2113 | | - pdma_phys_sgl1 = |
|---|
| 2114 | | - lpfc_ncmd->dma_phys_sgl + |
|---|
| 2115 | | - SGL_PAGE_SIZE; |
|---|
| 2116 | | - else |
|---|
| 2117 | | - pdma_phys_sgl1 = 0; |
|---|
| 2118 | | - cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; |
|---|
| 2119 | | - status = lpfc_sli4_post_sgl(phba, |
|---|
| 2120 | | - lpfc_ncmd->dma_phys_sgl, |
|---|
| 2121 | | - pdma_phys_sgl1, cur_xritag); |
|---|
| 2122 | | - if (status) { |
|---|
| 2123 | | - /* failure, put on abort nvme list */ |
|---|
| 2124 | | - lpfc_ncmd->flags |= LPFC_SBUF_XBUSY; |
|---|
| 2125 | | - } else { |
|---|
| 2126 | | - /* success, put on NVME buffer list */ |
|---|
| 2127 | | - lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; |
|---|
| 2128 | | - lpfc_ncmd->status = IOSTAT_SUCCESS; |
|---|
| 2129 | | - num_posted++; |
|---|
| 2130 | | - } |
|---|
| 2131 | | - /* success, put on NVME buffer sgl list */ |
|---|
| 2132 | | - list_add_tail(&lpfc_ncmd->list, &nvme_nblist); |
|---|
| 2133 | | - } |
|---|
| 2134 | | - } |
|---|
| 2135 | | - |
|---|
| 2136 | | - /* continue until a nembed page worth of sgls */ |
|---|
| 2137 | | - if (post_cnt == 0) |
|---|
| 2138 | | - continue; |
|---|
| 2139 | | - |
|---|
| 2140 | | - /* post block of NVME buffer list sgls */ |
|---|
| 2141 | | - status = lpfc_sli4_post_nvme_sgl_block(phba, &blck_nblist, |
|---|
| 2142 | | - post_cnt); |
|---|
| 2143 | | - |
|---|
| 2144 | | - /* don't reset xirtag due to hole in xri block */ |
|---|
| 2145 | | - if (block_cnt == 0) |
|---|
| 2146 | | - last_xritag = NO_XRI; |
|---|
| 2147 | | - |
|---|
| 2148 | | - /* reset NVME buffer post count for next round of posting */ |
|---|
| 2149 | | - post_cnt = 0; |
|---|
| 2150 | | - |
|---|
| 2151 | | - /* put posted NVME buffer-sgl posted on NVME buffer sgl list */ |
|---|
| 2152 | | - while (!list_empty(&blck_nblist)) { |
|---|
| 2153 | | - list_remove_head(&blck_nblist, lpfc_ncmd, |
|---|
| 2154 | | - struct lpfc_nvme_buf, list); |
|---|
| 2155 | | - if (status) { |
|---|
| 2156 | | - /* failure, put on abort nvme list */ |
|---|
| 2157 | | - lpfc_ncmd->flags |= LPFC_SBUF_XBUSY; |
|---|
| 2158 | | - } else { |
|---|
| 2159 | | - /* success, put on NVME buffer list */ |
|---|
| 2160 | | - lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; |
|---|
| 2161 | | - lpfc_ncmd->status = IOSTAT_SUCCESS; |
|---|
| 2162 | | - num_posted++; |
|---|
| 2163 | | - } |
|---|
| 2164 | | - list_add_tail(&lpfc_ncmd->list, &nvme_nblist); |
|---|
| 2165 | | - } |
|---|
| 2166 | | - } |
|---|
| 2167 | | - /* Push NVME buffers with sgl posted to the available list */ |
|---|
| 2168 | | - while (!list_empty(&nvme_nblist)) { |
|---|
| 2169 | | - list_remove_head(&nvme_nblist, lpfc_ncmd, |
|---|
| 2170 | | - struct lpfc_nvme_buf, list); |
|---|
| 2171 | | - lpfc_release_nvme_buf(phba, lpfc_ncmd); |
|---|
| 2172 | | - } |
|---|
| 2173 | | - return num_posted; |
|---|
| 2174 | | -} |
|---|
| 2175 | | - |
|---|
| 2176 | | -/** |
|---|
| 2177 | | - * lpfc_repost_nvme_sgl_list - Repost all the allocated nvme buffer sgls |
|---|
| 2178 | | - * @phba: pointer to lpfc hba data structure. |
|---|
| 2179 | | - * |
|---|
| 2180 | | - * This routine walks the list of nvme buffers that have been allocated and |
|---|
| 2181 | | - * repost them to the port by using SGL block post. This is needed after a |
|---|
| 2182 | | - * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine |
|---|
| 2183 | | - * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list |
|---|
| 2184 | | - * to the lpfc_nvme_buf_list. If the repost fails, reject all nvme buffers. |
|---|
| 2185 | | - * |
|---|
| 2186 | | - * Returns: 0 = success, non-zero failure. |
|---|
| 2187 | | - **/ |
|---|
| 2188 | | -int |
|---|
| 2189 | | -lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba) |
|---|
| 2190 | | -{ |
|---|
| 2191 | | - LIST_HEAD(post_nblist); |
|---|
| 2192 | | - int num_posted, rc = 0; |
|---|
| 2193 | | - |
|---|
| 2194 | | - /* get all NVME buffers need to repost to a local list */ |
|---|
| 2195 | | - spin_lock_irq(&phba->nvme_buf_list_get_lock); |
|---|
| 2196 | | - spin_lock(&phba->nvme_buf_list_put_lock); |
|---|
| 2197 | | - list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist); |
|---|
| 2198 | | - list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist); |
|---|
| 2199 | | - phba->get_nvme_bufs = 0; |
|---|
| 2200 | | - phba->put_nvme_bufs = 0; |
|---|
| 2201 | | - spin_unlock(&phba->nvme_buf_list_put_lock); |
|---|
| 2202 | | - spin_unlock_irq(&phba->nvme_buf_list_get_lock); |
|---|
| 2203 | | - |
|---|
| 2204 | | - /* post the list of nvme buffer sgls to port if available */ |
|---|
| 2205 | | - if (!list_empty(&post_nblist)) { |
|---|
| 2206 | | - num_posted = lpfc_post_nvme_sgl_list(phba, &post_nblist, |
|---|
| 2207 | | - phba->sli4_hba.nvme_xri_cnt); |
|---|
| 2208 | | - /* failed to post any nvme buffer, return error */ |
|---|
| 2209 | | - if (num_posted == 0) |
|---|
| 2210 | | - rc = -EIO; |
|---|
| 2211 | | - } |
|---|
| 2212 | | - return rc; |
|---|
| 2213 | | -} |
|---|
| 2214 | | - |
|---|
| 2215 | | -/** |
|---|
| 2216 | | - * lpfc_new_nvme_buf - Scsi buffer allocator for HBA with SLI4 IF spec |
|---|
| 2217 | | - * @vport: The virtual port for which this call being executed. |
|---|
| 2218 | | - * @num_to_allocate: The requested number of buffers to allocate. |
|---|
| 2219 | | - * |
|---|
| 2220 | | - * This routine allocates nvme buffers for device with SLI-4 interface spec, |
|---|
| 2221 | | - * the nvme buffer contains all the necessary information needed to initiate |
|---|
| 2222 | | - * a NVME I/O. After allocating up to @num_to_allocate NVME buffers and put |
|---|
| 2223 | | - * them on a list, it post them to the port by using SGL block post. |
|---|
| 2224 | | - * |
|---|
| 2225 | | - * Return codes: |
|---|
| 2226 | | - * int - number of nvme buffers that were allocated and posted. |
|---|
| 2227 | | - * 0 = failure, less than num_to_alloc is a partial failure. |
|---|
| 2228 | | - **/ |
|---|
| 2229 | | -static int |
|---|
| 2230 | | -lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc) |
|---|
| 2231 | | -{ |
|---|
| 2232 | | - struct lpfc_hba *phba = vport->phba; |
|---|
| 2233 | | - struct lpfc_nvme_buf *lpfc_ncmd; |
|---|
| 2234 | | - struct lpfc_iocbq *pwqeq; |
|---|
| 2235 | | - union lpfc_wqe128 *wqe; |
|---|
| 2236 | | - struct sli4_sge *sgl; |
|---|
| 2237 | | - dma_addr_t pdma_phys_sgl; |
|---|
| 2238 | | - uint16_t iotag, lxri = 0; |
|---|
| 2239 | | - int bcnt, num_posted, sgl_size; |
|---|
| 2240 | | - LIST_HEAD(prep_nblist); |
|---|
| 2241 | | - LIST_HEAD(post_nblist); |
|---|
| 2242 | | - LIST_HEAD(nvme_nblist); |
|---|
| 2243 | | - |
|---|
| 2244 | | - sgl_size = phba->cfg_sg_dma_buf_size; |
|---|
| 2245 | | - |
|---|
| 2246 | | - for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { |
|---|
| 2247 | | - lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL); |
|---|
| 2248 | | - if (!lpfc_ncmd) |
|---|
| 2249 | | - break; |
|---|
| 2250 | | - /* |
|---|
| 2251 | | - * Get memory from the pci pool to map the virt space to |
|---|
| 2252 | | - * pci bus space for an I/O. The DMA buffer includes the |
|---|
| 2253 | | - * number of SGE's necessary to support the sg_tablesize. |
|---|
| 2254 | | - */ |
|---|
| 2255 | | - lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, |
|---|
| 2256 | | - GFP_KERNEL, |
|---|
| 2257 | | - &lpfc_ncmd->dma_handle); |
|---|
| 2258 | | - if (!lpfc_ncmd->data) { |
|---|
| 2259 | | - kfree(lpfc_ncmd); |
|---|
| 2260 | | - break; |
|---|
| 2261 | | - } |
|---|
| 2262 | | - |
|---|
| 2263 | | - lxri = lpfc_sli4_next_xritag(phba); |
|---|
| 2264 | | - if (lxri == NO_XRI) { |
|---|
| 2265 | | - dma_pool_free(phba->lpfc_sg_dma_buf_pool, |
|---|
| 2266 | | - lpfc_ncmd->data, lpfc_ncmd->dma_handle); |
|---|
| 2267 | | - kfree(lpfc_ncmd); |
|---|
| 2268 | | - break; |
|---|
| 2269 | | - } |
|---|
| 2270 | | - pwqeq = &(lpfc_ncmd->cur_iocbq); |
|---|
| 2271 | | - wqe = &pwqeq->wqe; |
|---|
| 2272 | | - |
|---|
| 2273 | | - /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ |
|---|
| 2274 | | - iotag = lpfc_sli_next_iotag(phba, pwqeq); |
|---|
| 2275 | | - if (iotag == 0) { |
|---|
| 2276 | | - dma_pool_free(phba->lpfc_sg_dma_buf_pool, |
|---|
| 2277 | | - lpfc_ncmd->data, lpfc_ncmd->dma_handle); |
|---|
| 2278 | | - kfree(lpfc_ncmd); |
|---|
| 2279 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
|---|
| 2280 | | - "6121 Failed to allocated IOTAG for" |
|---|
| 2281 | | - " XRI:0x%x\n", lxri); |
|---|
| 2282 | | - lpfc_sli4_free_xri(phba, lxri); |
|---|
| 2283 | | - break; |
|---|
| 2284 | | - } |
|---|
| 2285 | | - pwqeq->sli4_lxritag = lxri; |
|---|
| 2286 | | - pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; |
|---|
| 2287 | | - pwqeq->iocb_flag |= LPFC_IO_NVME; |
|---|
| 2288 | | - pwqeq->context1 = lpfc_ncmd; |
|---|
| 2289 | | - pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl; |
|---|
| 2290 | | - |
|---|
| 2291 | | - /* Initialize local short-hand pointers. */ |
|---|
| 2292 | | - lpfc_ncmd->nvme_sgl = lpfc_ncmd->data; |
|---|
| 2293 | | - sgl = lpfc_ncmd->nvme_sgl; |
|---|
| 2294 | | - pdma_phys_sgl = lpfc_ncmd->dma_handle; |
|---|
| 2295 | | - lpfc_ncmd->dma_phys_sgl = pdma_phys_sgl; |
|---|
| 2296 | | - |
|---|
| 2297 | | - /* Rsp SGE will be filled in when we rcv an IO |
|---|
| 2298 | | - * from the NVME Layer to be sent. |
|---|
| 2299 | | - * The cmd is going to be embedded so we need a SKIP SGE. |
|---|
| 2300 | | - */ |
|---|
| 2301 | | - bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); |
|---|
| 2302 | | - bf_set(lpfc_sli4_sge_last, sgl, 0); |
|---|
| 2303 | | - sgl->word2 = cpu_to_le32(sgl->word2); |
|---|
| 2304 | | - /* Fill in word 3 / sgl_len during cmd submission */ |
|---|
| 2305 | | - |
|---|
| 2306 | | - lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd; |
|---|
| 2307 | | - |
|---|
| 2308 | | - /* Initialize WQE */ |
|---|
| 2309 | | - memset(wqe, 0, sizeof(union lpfc_wqe)); |
|---|
| 2310 | | - |
|---|
| 2311 | | - /* add the nvme buffer to a post list */ |
|---|
| 2312 | | - list_add_tail(&lpfc_ncmd->list, &post_nblist); |
|---|
| 2313 | | - spin_lock_irq(&phba->nvme_buf_list_get_lock); |
|---|
| 2314 | | - phba->sli4_hba.nvme_xri_cnt++; |
|---|
| 2315 | | - spin_unlock_irq(&phba->nvme_buf_list_get_lock); |
|---|
| 2316 | | - } |
|---|
| 2317 | | - lpfc_printf_log(phba, KERN_INFO, LOG_NVME, |
|---|
| 2318 | | - "6114 Allocate %d out of %d requested new NVME " |
|---|
| 2319 | | - "buffers\n", bcnt, num_to_alloc); |
|---|
| 2320 | | - |
|---|
| 2321 | | - /* post the list of nvme buffer sgls to port if available */ |
|---|
| 2322 | | - if (!list_empty(&post_nblist)) |
|---|
| 2323 | | - num_posted = lpfc_post_nvme_sgl_list(phba, |
|---|
| 2324 | | - &post_nblist, bcnt); |
|---|
| 2325 | | - else |
|---|
| 2326 | | - num_posted = 0; |
|---|
| 2327 | | - |
|---|
| 2328 | | - return num_posted; |
|---|
| 2329 | | -} |
|---|
| 2330 | | - |
|---|
| 2331 | | -static inline struct lpfc_nvme_buf * |
|---|
| 2332 | | -lpfc_nvme_buf(struct lpfc_hba *phba) |
|---|
| 2333 | | -{ |
|---|
| 2334 | | - struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next; |
|---|
| 2335 | | - |
|---|
| 2336 | | - list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, |
|---|
| 2337 | | - &phba->lpfc_nvme_buf_list_get, list) { |
|---|
| 2338 | | - list_del_init(&lpfc_ncmd->list); |
|---|
| 2339 | | - phba->get_nvme_bufs--; |
|---|
| 2340 | | - return lpfc_ncmd; |
|---|
| 2341 | | - } |
|---|
| 2342 | | - return NULL; |
|---|
| 2343 | | -} |
|---|
| 2344 | | - |
|---|
| 2345 | | -/** |
|---|
| 2346 | | - * lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA |
|---|
| 2086 | + * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA |
|---|
| 2347 | 2087 | * @phba: The HBA for which this call is being executed. |
|---|
| 2348 | 2088 | * |
|---|
| 2349 | | - * This routine removes a nvme buffer from head of @phba lpfc_nvme_buf_list list |
|---|
| 2089 | + * This routine removes a nvme buffer from head of @hdwq io_buf_list |
|---|
| 2350 | 2090 | * and returns to caller. |
|---|
| 2351 | 2091 | * |
|---|
| 2352 | 2092 | * Return codes: |
|---|
| 2353 | 2093 | * NULL - Error |
|---|
| 2354 | 2094 | * Pointer to lpfc_nvme_buf - Success |
|---|
| 2355 | 2095 | **/ |
|---|
| 2356 | | -static struct lpfc_nvme_buf * |
|---|
| 2096 | +static struct lpfc_io_buf * |
|---|
| 2357 | 2097 | lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, |
|---|
| 2358 | | - int expedite) |
|---|
| 2098 | + int idx, int expedite) |
|---|
| 2359 | 2099 | { |
|---|
| 2360 | | - struct lpfc_nvme_buf *lpfc_ncmd = NULL; |
|---|
| 2361 | | - unsigned long iflag = 0; |
|---|
| 2100 | + struct lpfc_io_buf *lpfc_ncmd; |
|---|
| 2101 | + struct lpfc_sli4_hdw_queue *qp; |
|---|
| 2102 | + struct sli4_sge *sgl; |
|---|
| 2103 | + struct lpfc_iocbq *pwqeq; |
|---|
| 2104 | + union lpfc_wqe128 *wqe; |
|---|
| 2362 | 2105 | |
|---|
| 2363 | | - spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag); |
|---|
| 2364 | | - if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite) |
|---|
| 2365 | | - lpfc_ncmd = lpfc_nvme_buf(phba); |
|---|
| 2366 | | - if (!lpfc_ncmd) { |
|---|
| 2367 | | - spin_lock(&phba->nvme_buf_list_put_lock); |
|---|
| 2368 | | - list_splice(&phba->lpfc_nvme_buf_list_put, |
|---|
| 2369 | | - &phba->lpfc_nvme_buf_list_get); |
|---|
| 2370 | | - phba->get_nvme_bufs += phba->put_nvme_bufs; |
|---|
| 2371 | | - INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); |
|---|
| 2372 | | - phba->put_nvme_bufs = 0; |
|---|
| 2373 | | - spin_unlock(&phba->nvme_buf_list_put_lock); |
|---|
| 2374 | | - if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite) |
|---|
| 2375 | | - lpfc_ncmd = lpfc_nvme_buf(phba); |
|---|
| 2376 | | - } |
|---|
| 2377 | | - spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag); |
|---|
| 2106 | + lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite); |
|---|
| 2378 | 2107 | |
|---|
| 2379 | | - if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_ncmd) { |
|---|
| 2380 | | - atomic_inc(&ndlp->cmd_pending); |
|---|
| 2381 | | - lpfc_ncmd->flags |= LPFC_BUMP_QDEPTH; |
|---|
| 2108 | + if (lpfc_ncmd) { |
|---|
| 2109 | + pwqeq = &(lpfc_ncmd->cur_iocbq); |
|---|
| 2110 | + wqe = &pwqeq->wqe; |
|---|
| 2111 | + |
|---|
| 2112 | + /* Setup key fields in buffer that may have been changed |
|---|
| 2113 | + * if other protocols used this buffer. |
|---|
| 2114 | + */ |
|---|
| 2115 | + pwqeq->iocb_flag = LPFC_IO_NVME; |
|---|
| 2116 | + pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl; |
|---|
| 2117 | + lpfc_ncmd->start_time = jiffies; |
|---|
| 2118 | + lpfc_ncmd->flags = 0; |
|---|
| 2119 | + |
|---|
| 2120 | + /* Rsp SGE will be filled in when we rcv an IO |
|---|
| 2121 | + * from the NVME Layer to be sent. |
|---|
| 2122 | + * The cmd is going to be embedded so we need a SKIP SGE. |
|---|
| 2123 | + */ |
|---|
| 2124 | + sgl = lpfc_ncmd->dma_sgl; |
|---|
| 2125 | + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); |
|---|
| 2126 | + bf_set(lpfc_sli4_sge_last, sgl, 0); |
|---|
| 2127 | + sgl->word2 = cpu_to_le32(sgl->word2); |
|---|
| 2128 | + /* Fill in word 3 / sgl_len during cmd submission */ |
|---|
| 2129 | + |
|---|
| 2130 | + /* Initialize 64 bytes only */ |
|---|
| 2131 | + memset(wqe, 0, sizeof(union lpfc_wqe)); |
|---|
| 2132 | + |
|---|
| 2133 | + if (lpfc_ndlp_check_qdepth(phba, ndlp)) { |
|---|
| 2134 | + atomic_inc(&ndlp->cmd_pending); |
|---|
| 2135 | + lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH; |
|---|
| 2136 | + } |
|---|
| 2137 | + |
|---|
| 2138 | + } else { |
|---|
| 2139 | + qp = &phba->sli4_hba.hdwq[idx]; |
|---|
| 2140 | + qp->empty_io_bufs++; |
|---|
| 2382 | 2141 | } |
|---|
| 2142 | + |
|---|
| 2383 | 2143 | return lpfc_ncmd; |
|---|
| 2384 | 2144 | } |
|---|
| 2385 | 2145 | |
|---|
| .. | .. |
|---|
| 2389 | 2149 | * @lpfc_ncmd: The nvme buffer which is being released. |
|---|
| 2390 | 2150 | * |
|---|
| 2391 | 2151 | * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba |
|---|
| 2392 | | - * lpfc_nvme_buf_list list. For SLI4 XRI's are tied to the nvme buffer |
|---|
| 2152 | + * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer |
|---|
| 2393 | 2153 | * and cannot be reused for at least RA_TOV amount of time if it was |
|---|
| 2394 | 2154 | * aborted. |
|---|
| 2395 | 2155 | **/ |
|---|
| 2396 | 2156 | static void |
|---|
| 2397 | | -lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd) |
|---|
| 2157 | +lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd) |
|---|
| 2398 | 2158 | { |
|---|
| 2159 | + struct lpfc_sli4_hdw_queue *qp; |
|---|
| 2399 | 2160 | unsigned long iflag = 0; |
|---|
| 2400 | 2161 | |
|---|
| 2401 | | - if ((lpfc_ncmd->flags & LPFC_BUMP_QDEPTH) && lpfc_ncmd->ndlp) |
|---|
| 2162 | + if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp) |
|---|
| 2402 | 2163 | atomic_dec(&lpfc_ncmd->ndlp->cmd_pending); |
|---|
| 2403 | 2164 | |
|---|
| 2404 | | - lpfc_ncmd->nonsg_phys = 0; |
|---|
| 2405 | 2165 | lpfc_ncmd->ndlp = NULL; |
|---|
| 2406 | | - lpfc_ncmd->flags &= ~LPFC_BUMP_QDEPTH; |
|---|
| 2166 | + lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH; |
|---|
| 2407 | 2167 | |
|---|
| 2408 | | - if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) { |
|---|
| 2168 | + qp = lpfc_ncmd->hdwq; |
|---|
| 2169 | + if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) { |
|---|
| 2409 | 2170 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
|---|
| 2410 | 2171 | "6310 XB release deferred for " |
|---|
| 2411 | 2172 | "ox_id x%x on reqtag x%x\n", |
|---|
| 2412 | 2173 | lpfc_ncmd->cur_iocbq.sli4_xritag, |
|---|
| 2413 | 2174 | lpfc_ncmd->cur_iocbq.iotag); |
|---|
| 2414 | 2175 | |
|---|
| 2415 | | - spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, |
|---|
| 2416 | | - iflag); |
|---|
| 2176 | + spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); |
|---|
| 2417 | 2177 | list_add_tail(&lpfc_ncmd->list, |
|---|
| 2418 | | - &phba->sli4_hba.lpfc_abts_nvme_buf_list); |
|---|
| 2419 | | - spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, |
|---|
| 2420 | | - iflag); |
|---|
| 2421 | | - } else { |
|---|
| 2422 | | - lpfc_ncmd->nvmeCmd = NULL; |
|---|
| 2423 | | - lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME; |
|---|
| 2424 | | - spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag); |
|---|
| 2425 | | - list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put); |
|---|
| 2426 | | - phba->put_nvme_bufs++; |
|---|
| 2427 | | - spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag); |
|---|
| 2428 | | - } |
|---|
| 2178 | + &qp->lpfc_abts_io_buf_list); |
|---|
| 2179 | + qp->abts_nvme_io_bufs++; |
|---|
| 2180 | + spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag); |
|---|
| 2181 | + } else |
|---|
| 2182 | + lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp); |
|---|
| 2429 | 2183 | } |
|---|
| 2430 | 2184 | |
|---|
| 2431 | 2185 | /** |
|---|
| .. | .. |
|---|
| 2452 | 2206 | struct nvme_fc_port_info nfcp_info; |
|---|
| 2453 | 2207 | struct nvme_fc_local_port *localport; |
|---|
| 2454 | 2208 | struct lpfc_nvme_lport *lport; |
|---|
| 2455 | | - struct lpfc_nvme_ctrl_stat *cstat; |
|---|
| 2456 | | - int len, i; |
|---|
| 2457 | 2209 | |
|---|
| 2458 | 2210 | /* Initialize this localport instance. The vport wwn usage ensures |
|---|
| 2459 | 2211 | * that NPIV is accounted for. |
|---|
| .. | .. |
|---|
| 2463 | 2215 | nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); |
|---|
| 2464 | 2216 | nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn); |
|---|
| 2465 | 2217 | |
|---|
| 2466 | | - /* Limit to LPFC_MAX_NVME_SEG_CNT. |
|---|
| 2467 | | - * For now need + 1 to get around NVME transport logic. |
|---|
| 2218 | + /* We need to tell the transport layer + 1 because it takes page |
|---|
| 2219 | + * alignment into account. When space for the SGL is allocated we |
|---|
| 2220 | + * allocate + 3, one for cmd, one for rsp and one for this alignment |
|---|
| 2468 | 2221 | */ |
|---|
| 2469 | | - if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { |
|---|
| 2470 | | - lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_INIT, |
|---|
| 2471 | | - "6300 Reducing sg segment cnt to %d\n", |
|---|
| 2472 | | - LPFC_MAX_NVME_SEG_CNT); |
|---|
| 2473 | | - phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; |
|---|
| 2474 | | - } else { |
|---|
| 2475 | | - phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; |
|---|
| 2476 | | - } |
|---|
| 2477 | 2222 | lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; |
|---|
| 2478 | | - lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel; |
|---|
| 2223 | + |
|---|
| 2224 | + /* Advertise how many hw queues we support based on cfg_hdw_queue, |
|---|
| 2225 | + * which will not exceed cpu count. |
|---|
| 2226 | + */ |
|---|
| 2227 | + lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue; |
|---|
| 2479 | 2228 | |
|---|
| 2480 | 2229 | if (!IS_ENABLED(CONFIG_NVME_FC)) |
|---|
| 2481 | 2230 | return ret; |
|---|
| 2482 | | - |
|---|
| 2483 | | - cstat = kmalloc((sizeof(struct lpfc_nvme_ctrl_stat) * |
|---|
| 2484 | | - phba->cfg_nvme_io_channel), GFP_KERNEL); |
|---|
| 2485 | | - if (!cstat) |
|---|
| 2486 | | - return -ENOMEM; |
|---|
| 2487 | 2231 | |
|---|
| 2488 | 2232 | /* localport is allocated from the stack, but the registration |
|---|
| 2489 | 2233 | * call allocates heap memory as well as the private area. |
|---|
| .. | .. |
|---|
| 2494 | 2238 | if (!ret) { |
|---|
| 2495 | 2239 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC, |
|---|
| 2496 | 2240 | "6005 Successfully registered local " |
|---|
| 2497 | | - "NVME port num %d, localP %p, private %p, " |
|---|
| 2498 | | - "sg_seg %d\n", |
|---|
| 2241 | + "NVME port num %d, localP x%px, private " |
|---|
| 2242 | + "x%px, sg_seg %d\n", |
|---|
| 2499 | 2243 | localport->port_num, localport, |
|---|
| 2500 | 2244 | localport->private, |
|---|
| 2501 | 2245 | lpfc_nvme_template.max_sgl_segments); |
|---|
| .. | .. |
|---|
| 2504 | 2248 | lport = (struct lpfc_nvme_lport *)localport->private; |
|---|
| 2505 | 2249 | vport->localport = localport; |
|---|
| 2506 | 2250 | lport->vport = vport; |
|---|
| 2507 | | - lport->cstat = cstat; |
|---|
| 2508 | 2251 | vport->nvmei_support = 1; |
|---|
| 2509 | 2252 | |
|---|
| 2510 | 2253 | atomic_set(&lport->xmt_fcp_noxri, 0); |
|---|
| .. | .. |
|---|
| 2519 | 2262 | atomic_set(&lport->cmpl_fcp_err, 0); |
|---|
| 2520 | 2263 | atomic_set(&lport->cmpl_ls_xb, 0); |
|---|
| 2521 | 2264 | atomic_set(&lport->cmpl_ls_err, 0); |
|---|
| 2265 | + |
|---|
| 2522 | 2266 | atomic_set(&lport->fc4NvmeLsRequests, 0); |
|---|
| 2523 | 2267 | atomic_set(&lport->fc4NvmeLsCmpls, 0); |
|---|
| 2524 | | - |
|---|
| 2525 | | - for (i = 0; i < phba->cfg_nvme_io_channel; i++) { |
|---|
| 2526 | | - cstat = &lport->cstat[i]; |
|---|
| 2527 | | - atomic_set(&cstat->fc4NvmeInputRequests, 0); |
|---|
| 2528 | | - atomic_set(&cstat->fc4NvmeOutputRequests, 0); |
|---|
| 2529 | | - atomic_set(&cstat->fc4NvmeControlRequests, 0); |
|---|
| 2530 | | - atomic_set(&cstat->fc4NvmeIoCmpls, 0); |
|---|
| 2531 | | - } |
|---|
| 2532 | | - |
|---|
| 2533 | | - /* Don't post more new bufs if repost already recovered |
|---|
| 2534 | | - * the nvme sgls. |
|---|
| 2535 | | - */ |
|---|
| 2536 | | - if (phba->sli4_hba.nvme_xri_cnt == 0) { |
|---|
| 2537 | | - len = lpfc_new_nvme_buf(vport, |
|---|
| 2538 | | - phba->sli4_hba.nvme_xri_max); |
|---|
| 2539 | | - vport->phba->total_nvme_bufs += len; |
|---|
| 2540 | | - } |
|---|
| 2541 | | - } else { |
|---|
| 2542 | | - kfree(cstat); |
|---|
| 2543 | 2268 | } |
|---|
| 2544 | 2269 | |
|---|
| 2545 | 2270 | return ret; |
|---|
| 2546 | 2271 | } |
|---|
| 2547 | 2272 | |
|---|
| 2273 | +#if (IS_ENABLED(CONFIG_NVME_FC)) |
|---|
| 2548 | 2274 | /* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg. |
|---|
| 2549 | 2275 | * |
|---|
| 2550 | 2276 | * The driver has to wait for the host nvme transport to callback |
|---|
| .. | .. |
|---|
| 2555 | 2281 | * An uninterruptible wait is used because of the risk of transport-to- |
|---|
| 2556 | 2282 | * driver state mismatch. |
|---|
| 2557 | 2283 | */ |
|---|
| 2558 | | -void |
|---|
| 2284 | +static void |
|---|
| 2559 | 2285 | lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, |
|---|
| 2560 | 2286 | struct lpfc_nvme_lport *lport, |
|---|
| 2561 | 2287 | struct completion *lport_unreg_cmp) |
|---|
| 2562 | 2288 | { |
|---|
| 2563 | | -#if (IS_ENABLED(CONFIG_NVME_FC)) |
|---|
| 2564 | 2289 | u32 wait_tmo; |
|---|
| 2565 | | - int ret; |
|---|
| 2290 | + int ret, i, pending = 0; |
|---|
| 2291 | + struct lpfc_sli_ring *pring; |
|---|
| 2292 | + struct lpfc_hba *phba = vport->phba; |
|---|
| 2293 | + struct lpfc_sli4_hdw_queue *qp; |
|---|
| 2294 | + int abts_scsi, abts_nvme; |
|---|
| 2566 | 2295 | |
|---|
| 2567 | 2296 | /* Host transport has to clean up and confirm requiring an indefinite |
|---|
| 2568 | 2297 | * wait. Print a message if a 10 second wait expires and renew the |
|---|
| .. | .. |
|---|
| 2572 | 2301 | while (true) { |
|---|
| 2573 | 2302 | ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo); |
|---|
| 2574 | 2303 | if (unlikely(!ret)) { |
|---|
| 2575 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, |
|---|
| 2576 | | - "6176 Lport %p Localport %p wait " |
|---|
| 2577 | | - "timed out. Renewing.\n", |
|---|
| 2578 | | - lport, vport->localport); |
|---|
| 2304 | + pending = 0; |
|---|
| 2305 | + abts_scsi = 0; |
|---|
| 2306 | + abts_nvme = 0; |
|---|
| 2307 | + for (i = 0; i < phba->cfg_hdw_queue; i++) { |
|---|
| 2308 | + qp = &phba->sli4_hba.hdwq[i]; |
|---|
| 2309 | + pring = qp->io_wq->pring; |
|---|
| 2310 | + if (!pring) |
|---|
| 2311 | + continue; |
|---|
| 2312 | + pending += pring->txcmplq_cnt; |
|---|
| 2313 | + abts_scsi += qp->abts_scsi_io_bufs; |
|---|
| 2314 | + abts_nvme += qp->abts_nvme_io_bufs; |
|---|
| 2315 | + } |
|---|
| 2316 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 2317 | + "6176 Lport x%px Localport x%px wait " |
|---|
| 2318 | + "timed out. Pending %d [%d:%d]. " |
|---|
| 2319 | + "Renewing.\n", |
|---|
| 2320 | + lport, vport->localport, pending, |
|---|
| 2321 | + abts_scsi, abts_nvme); |
|---|
| 2579 | 2322 | continue; |
|---|
| 2580 | 2323 | } |
|---|
| 2581 | 2324 | break; |
|---|
| 2582 | 2325 | } |
|---|
| 2583 | 2326 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, |
|---|
| 2584 | | - "6177 Lport %p Localport %p Complete Success\n", |
|---|
| 2327 | + "6177 Lport x%px Localport x%px Complete Success\n", |
|---|
| 2585 | 2328 | lport, vport->localport); |
|---|
| 2586 | | -#endif |
|---|
| 2587 | 2329 | } |
|---|
| 2330 | +#endif |
|---|
| 2588 | 2331 | |
|---|
| 2589 | 2332 | /** |
|---|
| 2590 | 2333 | * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport. |
|---|
| .. | .. |
|---|
| 2602 | 2345 | #if (IS_ENABLED(CONFIG_NVME_FC)) |
|---|
| 2603 | 2346 | struct nvme_fc_local_port *localport; |
|---|
| 2604 | 2347 | struct lpfc_nvme_lport *lport; |
|---|
| 2605 | | - struct lpfc_nvme_ctrl_stat *cstat; |
|---|
| 2606 | 2348 | int ret; |
|---|
| 2607 | 2349 | DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp); |
|---|
| 2608 | 2350 | |
|---|
| .. | .. |
|---|
| 2611 | 2353 | |
|---|
| 2612 | 2354 | localport = vport->localport; |
|---|
| 2613 | 2355 | lport = (struct lpfc_nvme_lport *)localport->private; |
|---|
| 2614 | | - cstat = lport->cstat; |
|---|
| 2615 | 2356 | |
|---|
| 2616 | 2357 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, |
|---|
| 2617 | | - "6011 Destroying NVME localport %p\n", |
|---|
| 2358 | + "6011 Destroying NVME localport x%px\n", |
|---|
| 2618 | 2359 | localport); |
|---|
| 2619 | 2360 | |
|---|
| 2620 | 2361 | /* lport's rport list is clear. Unregister |
|---|
| .. | .. |
|---|
| 2628 | 2369 | */ |
|---|
| 2629 | 2370 | lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp); |
|---|
| 2630 | 2371 | vport->localport = NULL; |
|---|
| 2631 | | - kfree(cstat); |
|---|
| 2632 | 2372 | |
|---|
| 2633 | 2373 | /* Regardless of the unregister upcall response, clear |
|---|
| 2634 | 2374 | * nvmei_support. All rports are unregistered and the |
|---|
| .. | .. |
|---|
| 2665 | 2405 | lport = (struct lpfc_nvme_lport *)localport->private; |
|---|
| 2666 | 2406 | if (!lport) { |
|---|
| 2667 | 2407 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME, |
|---|
| 2668 | | - "6171 Update NVME fail. localP %p, No lport\n", |
|---|
| 2408 | + "6171 Update NVME fail. localP x%px, No lport\n", |
|---|
| 2669 | 2409 | localport); |
|---|
| 2670 | 2410 | return; |
|---|
| 2671 | 2411 | } |
|---|
| 2672 | 2412 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, |
|---|
| 2673 | | - "6012 Update NVME lport %p did x%x\n", |
|---|
| 2413 | + "6012 Update NVME lport x%px did x%x\n", |
|---|
| 2674 | 2414 | localport, vport->fc_myDID); |
|---|
| 2675 | 2415 | |
|---|
| 2676 | 2416 | localport->port_id = vport->fc_myDID; |
|---|
| .. | .. |
|---|
| 2680 | 2420 | localport->port_role = FC_PORT_ROLE_NVME_INITIATOR; |
|---|
| 2681 | 2421 | |
|---|
| 2682 | 2422 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, |
|---|
| 2683 | | - "6030 bound lport %p to DID x%06x\n", |
|---|
| 2423 | + "6030 bound lport x%px to DID x%06x\n", |
|---|
| 2684 | 2424 | lport, localport->port_id); |
|---|
| 2685 | 2425 | #endif |
|---|
| 2686 | 2426 | } |
|---|
| .. | .. |
|---|
| 2697 | 2437 | struct nvme_fc_remote_port *remote_port; |
|---|
| 2698 | 2438 | struct nvme_fc_port_info rpinfo; |
|---|
| 2699 | 2439 | struct lpfc_nodelist *prev_ndlp = NULL; |
|---|
| 2440 | + struct fc_rport *srport = ndlp->rport; |
|---|
| 2700 | 2441 | |
|---|
| 2701 | 2442 | lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC, |
|---|
| 2702 | 2443 | "6006 Register NVME PORT. DID x%06x nlptype x%x\n", |
|---|
| .. | .. |
|---|
| 2726 | 2467 | |
|---|
| 2727 | 2468 | rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); |
|---|
| 2728 | 2469 | rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); |
|---|
| 2470 | + if (srport) |
|---|
| 2471 | + rpinfo.dev_loss_tmo = srport->dev_loss_tmo; |
|---|
| 2472 | + else |
|---|
| 2473 | + rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo; |
|---|
| 2729 | 2474 | |
|---|
| 2730 | 2475 | spin_lock_irq(&vport->phba->hbalock); |
|---|
| 2731 | 2476 | oldrport = lpfc_ndlp_get_nrport(ndlp); |
|---|
| 2732 | | - spin_unlock_irq(&vport->phba->hbalock); |
|---|
| 2733 | | - if (!oldrport) |
|---|
| 2477 | + if (oldrport) { |
|---|
| 2478 | + prev_ndlp = oldrport->ndlp; |
|---|
| 2479 | + spin_unlock_irq(&vport->phba->hbalock); |
|---|
| 2480 | + } else { |
|---|
| 2481 | + spin_unlock_irq(&vport->phba->hbalock); |
|---|
| 2734 | 2482 | lpfc_nlp_get(ndlp); |
|---|
| 2483 | + } |
|---|
| 2735 | 2484 | |
|---|
| 2736 | 2485 | ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port); |
|---|
| 2737 | 2486 | if (!ret) { |
|---|
| .. | .. |
|---|
| 2747 | 2496 | spin_unlock_irq(&vport->phba->hbalock); |
|---|
| 2748 | 2497 | rport = remote_port->private; |
|---|
| 2749 | 2498 | if (oldrport) { |
|---|
| 2750 | | - /* New remoteport record does not guarantee valid |
|---|
| 2751 | | - * host private memory area. |
|---|
| 2752 | | - */ |
|---|
| 2753 | | - prev_ndlp = oldrport->ndlp; |
|---|
| 2754 | | - if (oldrport == remote_port->private) { |
|---|
| 2755 | | - /* Same remoteport - ndlp should match. |
|---|
| 2756 | | - * Just reuse. |
|---|
| 2757 | | - */ |
|---|
| 2758 | | - lpfc_printf_vlog(ndlp->vport, KERN_INFO, |
|---|
| 2759 | | - LOG_NVME_DISC, |
|---|
| 2760 | | - "6014 Rebinding lport to " |
|---|
| 2761 | | - "remoteport %p wwpn 0x%llx, " |
|---|
| 2762 | | - "Data: x%x x%x %p %p x%x x%06x\n", |
|---|
| 2763 | | - remote_port, |
|---|
| 2764 | | - remote_port->port_name, |
|---|
| 2765 | | - remote_port->port_id, |
|---|
| 2766 | | - remote_port->port_role, |
|---|
| 2767 | | - prev_ndlp, |
|---|
| 2768 | | - ndlp, |
|---|
| 2769 | | - ndlp->nlp_type, |
|---|
| 2770 | | - ndlp->nlp_DID); |
|---|
| 2771 | | - return 0; |
|---|
| 2772 | | - } |
|---|
| 2773 | 2499 | |
|---|
| 2774 | 2500 | /* Sever the ndlp<->rport association |
|---|
| 2775 | 2501 | * before dropping the ndlp ref from |
|---|
| .. | .. |
|---|
| 2802 | 2528 | spin_unlock_irq(&vport->phba->hbalock); |
|---|
| 2803 | 2529 | lpfc_printf_vlog(vport, KERN_INFO, |
|---|
| 2804 | 2530 | LOG_NVME_DISC | LOG_NODE, |
|---|
| 2805 | | - "6022 Binding new rport to " |
|---|
| 2806 | | - "lport %p Remoteport %p rport %p WWNN 0x%llx, " |
|---|
| 2531 | + "6022 Bind lport x%px to remoteport x%px " |
|---|
| 2532 | + "rport x%px WWNN 0x%llx, " |
|---|
| 2807 | 2533 | "Rport WWPN 0x%llx DID " |
|---|
| 2808 | | - "x%06x Role x%x, ndlp %p prev_ndlp %p\n", |
|---|
| 2534 | + "x%06x Role x%x, ndlp %p prev_ndlp x%px\n", |
|---|
| 2809 | 2535 | lport, remote_port, rport, |
|---|
| 2810 | 2536 | rpinfo.node_name, rpinfo.port_name, |
|---|
| 2811 | 2537 | rpinfo.port_id, rpinfo.port_role, |
|---|
| 2812 | 2538 | ndlp, prev_ndlp); |
|---|
| 2813 | 2539 | } else { |
|---|
| 2814 | 2540 | lpfc_printf_vlog(vport, KERN_ERR, |
|---|
| 2815 | | - LOG_NVME_DISC | LOG_NODE, |
|---|
| 2541 | + LOG_TRACE_EVENT, |
|---|
| 2816 | 2542 | "6031 RemotePort Registration failed " |
|---|
| 2817 | 2543 | "err: %d, DID x%06x\n", |
|---|
| 2818 | 2544 | ret, ndlp->nlp_DID); |
|---|
| .. | .. |
|---|
| 2821 | 2547 | return ret; |
|---|
| 2822 | 2548 | #else |
|---|
| 2823 | 2549 | return 0; |
|---|
| 2550 | +#endif |
|---|
| 2551 | +} |
|---|
| 2552 | + |
|---|
| 2553 | +/** |
|---|
| 2554 | + * lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport |
|---|
| 2555 | + * |
|---|
| 2556 | + * If the ndlp represents an NVME Target, that we are logged into, |
|---|
| 2557 | + * ping the NVME FC Transport layer to initiate a device rescan |
|---|
| 2558 | + * on this remote NPort. |
|---|
| 2559 | + */ |
|---|
| 2560 | +void |
|---|
| 2561 | +lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
|---|
| 2562 | +{ |
|---|
| 2563 | +#if (IS_ENABLED(CONFIG_NVME_FC)) |
|---|
| 2564 | + struct lpfc_nvme_rport *nrport; |
|---|
| 2565 | + struct nvme_fc_remote_port *remoteport = NULL; |
|---|
| 2566 | + |
|---|
| 2567 | + spin_lock_irq(&vport->phba->hbalock); |
|---|
| 2568 | + nrport = lpfc_ndlp_get_nrport(ndlp); |
|---|
| 2569 | + if (nrport) |
|---|
| 2570 | + remoteport = nrport->remoteport; |
|---|
| 2571 | + spin_unlock_irq(&vport->phba->hbalock); |
|---|
| 2572 | + |
|---|
| 2573 | + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, |
|---|
| 2574 | + "6170 Rescan NPort DID x%06x type x%x " |
|---|
| 2575 | + "state x%x nrport x%px remoteport x%px\n", |
|---|
| 2576 | + ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state, |
|---|
| 2577 | + nrport, remoteport); |
|---|
| 2578 | + |
|---|
| 2579 | + if (!nrport || !remoteport) |
|---|
| 2580 | + goto rescan_exit; |
|---|
| 2581 | + |
|---|
| 2582 | + /* Only rescan if we are an NVME target in the MAPPED state */ |
|---|
| 2583 | + if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY && |
|---|
| 2584 | + ndlp->nlp_state == NLP_STE_MAPPED_NODE) { |
|---|
| 2585 | + nvme_fc_rescan_remoteport(remoteport); |
|---|
| 2586 | + |
|---|
| 2587 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 2588 | + "6172 NVME rescanned DID x%06x " |
|---|
| 2589 | + "port_state x%x\n", |
|---|
| 2590 | + ndlp->nlp_DID, remoteport->port_state); |
|---|
| 2591 | + } |
|---|
| 2592 | + return; |
|---|
| 2593 | + rescan_exit: |
|---|
| 2594 | + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, |
|---|
| 2595 | + "6169 Skip NVME Rport Rescan, NVME remoteport " |
|---|
| 2596 | + "unregistered\n"); |
|---|
| 2824 | 2597 | #endif |
|---|
| 2825 | 2598 | } |
|---|
| 2826 | 2599 | |
|---|
| .. | .. |
|---|
| 2867 | 2640 | goto input_err; |
|---|
| 2868 | 2641 | |
|---|
| 2869 | 2642 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, |
|---|
| 2870 | | - "6033 Unreg nvme remoteport %p, portname x%llx, " |
|---|
| 2643 | + "6033 Unreg nvme remoteport x%px, portname x%llx, " |
|---|
| 2871 | 2644 | "port_id x%06x, portstate x%x port type x%x\n", |
|---|
| 2872 | 2645 | remoteport, remoteport->port_name, |
|---|
| 2873 | 2646 | remoteport->port_id, remoteport->port_state, |
|---|
| .. | .. |
|---|
| 2894 | 2667 | ret = nvme_fc_unregister_remoteport(remoteport); |
|---|
| 2895 | 2668 | if (ret != 0) { |
|---|
| 2896 | 2669 | lpfc_nlp_put(ndlp); |
|---|
| 2897 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, |
|---|
| 2670 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 2898 | 2671 | "6167 NVME unregister failed %d " |
|---|
| 2899 | 2672 | "port_state x%x\n", |
|---|
| 2900 | 2673 | ret, remoteport->port_state); |
|---|
| .. | .. |
|---|
| 2904 | 2677 | |
|---|
| 2905 | 2678 | input_err: |
|---|
| 2906 | 2679 | #endif |
|---|
| 2907 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, |
|---|
| 2908 | | - "6168 State error: lport %p, rport%p FCID x%06x\n", |
|---|
| 2680 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 2681 | + "6168 State error: lport x%px, rport x%px FCID x%06x\n", |
|---|
| 2909 | 2682 | vport->localport, ndlp->rport, ndlp->nlp_DID); |
|---|
| 2910 | 2683 | } |
|---|
| 2911 | 2684 | |
|---|
| .. | .. |
|---|
| 2913 | 2686 | * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort |
|---|
| 2914 | 2687 | * @phba: pointer to lpfc hba data structure. |
|---|
| 2915 | 2688 | * @axri: pointer to the fcp xri abort wcqe structure. |
|---|
| 2689 | + * @lpfc_ncmd: The nvme job structure for the request being aborted. |
|---|
| 2916 | 2690 | * |
|---|
| 2917 | 2691 | * This routine is invoked by the worker thread to process a SLI4 fast-path |
|---|
| 2918 | 2692 | * NVME aborted xri. Aborted NVME IO commands are completed to the transport |
|---|
| .. | .. |
|---|
| 2920 | 2694 | **/ |
|---|
| 2921 | 2695 | void |
|---|
| 2922 | 2696 | lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, |
|---|
| 2923 | | - struct sli4_wcqe_xri_aborted *axri) |
|---|
| 2697 | + struct sli4_wcqe_xri_aborted *axri, |
|---|
| 2698 | + struct lpfc_io_buf *lpfc_ncmd) |
|---|
| 2924 | 2699 | { |
|---|
| 2925 | 2700 | uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); |
|---|
| 2926 | | - struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd; |
|---|
| 2927 | 2701 | struct nvmefc_fcp_req *nvme_cmd = NULL; |
|---|
| 2928 | | - struct lpfc_nodelist *ndlp; |
|---|
| 2929 | | - unsigned long iflag = 0; |
|---|
| 2702 | + struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp; |
|---|
| 2930 | 2703 | |
|---|
| 2931 | | - if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) |
|---|
| 2932 | | - return; |
|---|
| 2933 | | - spin_lock_irqsave(&phba->hbalock, iflag); |
|---|
| 2934 | | - spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); |
|---|
| 2935 | | - list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd, |
|---|
| 2936 | | - &phba->sli4_hba.lpfc_abts_nvme_buf_list, |
|---|
| 2937 | | - list) { |
|---|
| 2938 | | - if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) { |
|---|
| 2939 | | - list_del_init(&lpfc_ncmd->list); |
|---|
| 2940 | | - lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; |
|---|
| 2941 | | - lpfc_ncmd->status = IOSTAT_SUCCESS; |
|---|
| 2942 | | - spin_unlock( |
|---|
| 2943 | | - &phba->sli4_hba.abts_nvme_buf_list_lock); |
|---|
| 2944 | 2704 | |
|---|
| 2945 | | - spin_unlock_irqrestore(&phba->hbalock, iflag); |
|---|
| 2946 | | - ndlp = lpfc_ncmd->ndlp; |
|---|
| 2947 | | - if (ndlp) |
|---|
| 2948 | | - lpfc_sli4_abts_err_handler(phba, ndlp, axri); |
|---|
| 2949 | | - |
|---|
| 2950 | | - lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
|---|
| 2951 | | - "6311 nvme_cmd %p xri x%x tag x%x " |
|---|
| 2952 | | - "abort complete and xri released\n", |
|---|
| 2953 | | - lpfc_ncmd->nvmeCmd, xri, |
|---|
| 2954 | | - lpfc_ncmd->cur_iocbq.iotag); |
|---|
| 2955 | | - |
|---|
| 2956 | | - /* Aborted NVME commands are required to not complete |
|---|
| 2957 | | - * before the abort exchange command fully completes. |
|---|
| 2958 | | - * Once completed, it is available via the put list. |
|---|
| 2959 | | - */ |
|---|
| 2960 | | - if (lpfc_ncmd->nvmeCmd) { |
|---|
| 2961 | | - nvme_cmd = lpfc_ncmd->nvmeCmd; |
|---|
| 2962 | | - nvme_cmd->done(nvme_cmd); |
|---|
| 2963 | | - lpfc_ncmd->nvmeCmd = NULL; |
|---|
| 2964 | | - } |
|---|
| 2965 | | - lpfc_release_nvme_buf(phba, lpfc_ncmd); |
|---|
| 2966 | | - return; |
|---|
| 2967 | | - } |
|---|
| 2968 | | - } |
|---|
| 2969 | | - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); |
|---|
| 2970 | | - spin_unlock_irqrestore(&phba->hbalock, iflag); |
|---|
| 2705 | + if (ndlp) |
|---|
| 2706 | + lpfc_sli4_abts_err_handler(phba, ndlp, axri); |
|---|
| 2971 | 2707 | |
|---|
| 2972 | 2708 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
|---|
| 2973 | | - "6312 XRI Aborted xri x%x not found\n", xri); |
|---|
| 2709 | + "6311 nvme_cmd %p xri x%x tag x%x abort complete and " |
|---|
| 2710 | + "xri released\n", |
|---|
| 2711 | + lpfc_ncmd->nvmeCmd, xri, |
|---|
| 2712 | + lpfc_ncmd->cur_iocbq.iotag); |
|---|
| 2974 | 2713 | |
|---|
| 2714 | + /* Aborted NVME commands are required to not complete |
|---|
| 2715 | + * before the abort exchange command fully completes. |
|---|
| 2716 | + * Once completed, it is available via the put list. |
|---|
| 2717 | + */ |
|---|
| 2718 | + if (lpfc_ncmd->nvmeCmd) { |
|---|
| 2719 | + nvme_cmd = lpfc_ncmd->nvmeCmd; |
|---|
| 2720 | + nvme_cmd->done(nvme_cmd); |
|---|
| 2721 | + lpfc_ncmd->nvmeCmd = NULL; |
|---|
| 2722 | + } |
|---|
| 2723 | + lpfc_release_nvme_buf(phba, lpfc_ncmd); |
|---|
| 2975 | 2724 | } |
|---|
| 2976 | 2725 | |
|---|
| 2977 | 2726 | /** |
|---|
| .. | .. |
|---|
| 2990 | 2739 | struct lpfc_sli_ring *pring; |
|---|
| 2991 | 2740 | u32 i, wait_cnt = 0; |
|---|
| 2992 | 2741 | |
|---|
| 2993 | | - if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.nvme_wq) |
|---|
| 2742 | + if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq) |
|---|
| 2994 | 2743 | return; |
|---|
| 2995 | 2744 | |
|---|
| 2996 | | - /* Cycle through all NVME rings and make sure all outstanding |
|---|
| 2745 | + /* Cycle through all IO rings and make sure all outstanding |
|---|
| 2997 | 2746 | * WQEs have been removed from the txcmplqs. |
|---|
| 2998 | 2747 | */ |
|---|
| 2999 | | - for (i = 0; i < phba->cfg_nvme_io_channel; i++) { |
|---|
| 3000 | | - pring = phba->sli4_hba.nvme_wq[i]->pring; |
|---|
| 2748 | + for (i = 0; i < phba->cfg_hdw_queue; i++) { |
|---|
| 2749 | + if (!phba->sli4_hba.hdwq[i].io_wq) |
|---|
| 2750 | + continue; |
|---|
| 2751 | + pring = phba->sli4_hba.hdwq[i].io_wq->pring; |
|---|
| 3001 | 2752 | |
|---|
| 3002 | 2753 | if (!pring) |
|---|
| 3003 | 2754 | continue; |
|---|
| .. | .. |
|---|
| 3011 | 2762 | * dump a message. Something is wrong. |
|---|
| 3012 | 2763 | */ |
|---|
| 3013 | 2764 | if ((wait_cnt % 1000) == 0) { |
|---|
| 3014 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
|---|
| 2765 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 3015 | 2766 | "6178 NVME IO not empty, " |
|---|
| 3016 | 2767 | "cnt %d\n", wait_cnt); |
|---|
| 3017 | 2768 | } |
|---|
| 3018 | 2769 | } |
|---|
| 3019 | 2770 | } |
|---|
| 3020 | 2771 | } |
|---|
| 2772 | + |
|---|
| 2773 | +void |
|---|
| 2774 | +lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn) |
|---|
| 2775 | +{ |
|---|
| 2776 | +#if (IS_ENABLED(CONFIG_NVME_FC)) |
|---|
| 2777 | + struct lpfc_io_buf *lpfc_ncmd; |
|---|
| 2778 | + struct nvmefc_fcp_req *nCmd; |
|---|
| 2779 | + struct lpfc_nvme_fcpreq_priv *freqpriv; |
|---|
| 2780 | + |
|---|
| 2781 | + if (!pwqeIn->context1) { |
|---|
| 2782 | + lpfc_sli_release_iocbq(phba, pwqeIn); |
|---|
| 2783 | + return; |
|---|
| 2784 | + } |
|---|
| 2785 | + /* For abort iocb just return, IO iocb will do a done call */ |
|---|
| 2786 | + if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) == |
|---|
| 2787 | + CMD_ABORT_XRI_CX) { |
|---|
| 2788 | + lpfc_sli_release_iocbq(phba, pwqeIn); |
|---|
| 2789 | + return; |
|---|
| 2790 | + } |
|---|
| 2791 | + lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1; |
|---|
| 2792 | + |
|---|
| 2793 | + spin_lock(&lpfc_ncmd->buf_lock); |
|---|
| 2794 | + if (!lpfc_ncmd->nvmeCmd) { |
|---|
| 2795 | + spin_unlock(&lpfc_ncmd->buf_lock); |
|---|
| 2796 | + lpfc_release_nvme_buf(phba, lpfc_ncmd); |
|---|
| 2797 | + return; |
|---|
| 2798 | + } |
|---|
| 2799 | + |
|---|
| 2800 | + nCmd = lpfc_ncmd->nvmeCmd; |
|---|
| 2801 | + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, |
|---|
| 2802 | + "6194 NVME Cancel xri %x\n", |
|---|
| 2803 | + lpfc_ncmd->cur_iocbq.sli4_xritag); |
|---|
| 2804 | + |
|---|
| 2805 | + nCmd->transferred_length = 0; |
|---|
| 2806 | + nCmd->rcv_rsplen = 0; |
|---|
| 2807 | + nCmd->status = NVME_SC_INTERNAL; |
|---|
| 2808 | + freqpriv = nCmd->private; |
|---|
| 2809 | + freqpriv->nvme_buf = NULL; |
|---|
| 2810 | + lpfc_ncmd->nvmeCmd = NULL; |
|---|
| 2811 | + |
|---|
| 2812 | + spin_unlock(&lpfc_ncmd->buf_lock); |
|---|
| 2813 | + nCmd->done(nCmd); |
|---|
| 2814 | + |
|---|
| 2815 | + /* Call release with XB=1 to queue the IO into the abort list. */ |
|---|
| 2816 | + lpfc_release_nvme_buf(phba, lpfc_ncmd); |
|---|
| 2817 | +#endif |
|---|
| 2818 | +} |
|---|