.. | .. |
---|
1 | 1 | /******************************************************************* |
---|
2 | 2 | * This file is part of the Emulex Linux Device Driver for * |
---|
3 | 3 | * Fibre Channsel Host Bus Adapters. * |
---|
4 | | - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
---|
| 4 | + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * |
---|
5 | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
---|
6 | 6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
---|
7 | 7 | * EMULEX and SLI are trademarks of Emulex. * |
---|
.. | .. |
---|
36 | 36 | #include <scsi/scsi_transport_fc.h> |
---|
37 | 37 | #include <scsi/fc/fc_fs.h> |
---|
38 | 38 | |
---|
39 | | -#include <linux/nvme.h> |
---|
40 | | -#include <linux/nvme-fc-driver.h> |
---|
41 | | -#include <linux/nvme-fc.h> |
---|
42 | | - |
---|
43 | 39 | #include "lpfc_version.h" |
---|
44 | 40 | #include "lpfc_hw4.h" |
---|
45 | 41 | #include "lpfc_hw.h" |
---|
.. | .. |
---|
50 | 46 | #include "lpfc.h" |
---|
51 | 47 | #include "lpfc_scsi.h" |
---|
52 | 48 | #include "lpfc_nvme.h" |
---|
53 | | -#include "lpfc_nvmet.h" |
---|
54 | 49 | #include "lpfc_logmsg.h" |
---|
55 | 50 | #include "lpfc_crtn.h" |
---|
56 | 51 | #include "lpfc_vport.h" |
---|
57 | 52 | #include "lpfc_debugfs.h" |
---|
58 | 53 | |
---|
59 | 54 | static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *, |
---|
60 | | - struct lpfc_nvmet_rcv_ctx *, |
---|
| 55 | + struct lpfc_async_xchg_ctx *, |
---|
61 | 56 | dma_addr_t rspbuf, |
---|
62 | 57 | uint16_t rspsize); |
---|
63 | 58 | static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *, |
---|
64 | | - struct lpfc_nvmet_rcv_ctx *); |
---|
| 59 | + struct lpfc_async_xchg_ctx *); |
---|
65 | 60 | static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *, |
---|
66 | | - struct lpfc_nvmet_rcv_ctx *, |
---|
| 61 | + struct lpfc_async_xchg_ctx *, |
---|
67 | 62 | uint32_t, uint16_t); |
---|
68 | 63 | static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *, |
---|
69 | | - struct lpfc_nvmet_rcv_ctx *, |
---|
| 64 | + struct lpfc_async_xchg_ctx *, |
---|
70 | 65 | uint32_t, uint16_t); |
---|
71 | | -static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *, |
---|
72 | | - struct lpfc_nvmet_rcv_ctx *, |
---|
73 | | - uint32_t, uint16_t); |
---|
74 | 66 | static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *, |
---|
75 | | - struct lpfc_nvmet_rcv_ctx *); |
---|
| 67 | + struct lpfc_async_xchg_ctx *); |
---|
| 68 | +static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *); |
---|
| 69 | + |
---|
| 70 | +static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf); |
---|
76 | 71 | |
---|
77 | 72 | static union lpfc_wqe128 lpfc_tsend_cmd_template; |
---|
78 | 73 | static union lpfc_wqe128 lpfc_treceive_cmd_template; |
---|
.. | .. |
---|
217 | 212 | /* Word 12, 13, 14, 15 - is zero */ |
---|
218 | 213 | } |
---|
219 | 214 | |
---|
220 | | -void |
---|
221 | | -lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp) |
---|
| 215 | +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
---|
| 216 | +static struct lpfc_async_xchg_ctx * |
---|
| 217 | +lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri) |
---|
222 | 218 | { |
---|
| 219 | + struct lpfc_async_xchg_ctx *ctxp; |
---|
223 | 220 | unsigned long iflag; |
---|
| 221 | + bool found = false; |
---|
| 222 | + |
---|
| 223 | + spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag); |
---|
| 224 | + list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) { |
---|
| 225 | + if (ctxp->ctxbuf->sglq->sli4_xritag != xri) |
---|
| 226 | + continue; |
---|
| 227 | + |
---|
| 228 | + found = true; |
---|
| 229 | + break; |
---|
| 230 | + } |
---|
| 231 | + spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag); |
---|
| 232 | + if (found) |
---|
| 233 | + return ctxp; |
---|
| 234 | + |
---|
| 235 | + return NULL; |
---|
| 236 | +} |
---|
| 237 | + |
---|
| 238 | +static struct lpfc_async_xchg_ctx * |
---|
| 239 | +lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid) |
---|
| 240 | +{ |
---|
| 241 | + struct lpfc_async_xchg_ctx *ctxp; |
---|
| 242 | + unsigned long iflag; |
---|
| 243 | + bool found = false; |
---|
| 244 | + |
---|
| 245 | + spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag); |
---|
| 246 | + list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) { |
---|
| 247 | + if (ctxp->oxid != oxid || ctxp->sid != sid) |
---|
| 248 | + continue; |
---|
| 249 | + |
---|
| 250 | + found = true; |
---|
| 251 | + break; |
---|
| 252 | + } |
---|
| 253 | + spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag); |
---|
| 254 | + if (found) |
---|
| 255 | + return ctxp; |
---|
| 256 | + |
---|
| 257 | + return NULL; |
---|
| 258 | +} |
---|
| 259 | +#endif |
---|
| 260 | + |
---|
| 261 | +static void |
---|
| 262 | +lpfc_nvmet_defer_release(struct lpfc_hba *phba, |
---|
| 263 | + struct lpfc_async_xchg_ctx *ctxp) |
---|
| 264 | +{ |
---|
| 265 | + lockdep_assert_held(&ctxp->ctxlock); |
---|
224 | 266 | |
---|
225 | 267 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
---|
226 | | - "6313 NVMET Defer ctx release xri x%x flg x%x\n", |
---|
| 268 | + "6313 NVMET Defer ctx release oxid x%x flg x%x\n", |
---|
227 | 269 | ctxp->oxid, ctxp->flag); |
---|
228 | 270 | |
---|
229 | | - spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag); |
---|
230 | | - if (ctxp->flag & LPFC_NVMET_CTX_RLS) { |
---|
231 | | - spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, |
---|
232 | | - iflag); |
---|
| 271 | + if (ctxp->flag & LPFC_NVME_CTX_RLS) |
---|
233 | 272 | return; |
---|
234 | | - } |
---|
235 | | - ctxp->flag |= LPFC_NVMET_CTX_RLS; |
---|
| 273 | + |
---|
| 274 | + ctxp->flag |= LPFC_NVME_CTX_RLS; |
---|
| 275 | + spin_lock(&phba->sli4_hba.t_active_list_lock); |
---|
| 276 | + list_del(&ctxp->list); |
---|
| 277 | + spin_unlock(&phba->sli4_hba.t_active_list_lock); |
---|
| 278 | + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); |
---|
236 | 279 | list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); |
---|
237 | | - spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag); |
---|
| 280 | + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); |
---|
| 281 | +} |
---|
| 282 | + |
---|
| 283 | +/** |
---|
| 284 | + * __lpfc_nvme_xmt_ls_rsp_cmp - Generic completion handler for the |
---|
| 285 | + * transmission of an NVME LS response. |
---|
| 286 | + * @phba: Pointer to HBA context object. |
---|
| 287 | + * @cmdwqe: Pointer to driver command WQE object. |
---|
| 288 | + * @wcqe: Pointer to driver response CQE object. |
---|
| 289 | + * |
---|
| 290 | + * The function is called from SLI ring event handler with no |
---|
| 291 | + * lock held. The function frees memory resources used for the command |
---|
| 292 | + * used to send the NVME LS RSP. |
---|
| 293 | + **/ |
---|
| 294 | +void |
---|
| 295 | +__lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, |
---|
| 296 | + struct lpfc_wcqe_complete *wcqe) |
---|
| 297 | +{ |
---|
| 298 | + struct lpfc_async_xchg_ctx *axchg = cmdwqe->context2; |
---|
| 299 | + struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp; |
---|
| 300 | + uint32_t status, result; |
---|
| 301 | + |
---|
| 302 | + status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; |
---|
| 303 | + result = wcqe->parameter; |
---|
| 304 | + |
---|
| 305 | + if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) { |
---|
| 306 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 307 | + "6410 NVMEx LS cmpl state mismatch IO x%x: " |
---|
| 308 | + "%d %d\n", |
---|
| 309 | + axchg->oxid, axchg->state, axchg->entry_cnt); |
---|
| 310 | + } |
---|
| 311 | + |
---|
| 312 | + lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x result x%x\n", |
---|
| 313 | + axchg->oxid, status, result); |
---|
| 314 | + |
---|
| 315 | + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, |
---|
| 316 | + "6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n", |
---|
| 317 | + status, result, axchg->oxid); |
---|
| 318 | + |
---|
| 319 | + lpfc_nlp_put(cmdwqe->context1); |
---|
| 320 | + cmdwqe->context2 = NULL; |
---|
| 321 | + cmdwqe->context3 = NULL; |
---|
| 322 | + lpfc_sli_release_iocbq(phba, cmdwqe); |
---|
| 323 | + ls_rsp->done(ls_rsp); |
---|
| 324 | + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, |
---|
| 325 | + "6200 NVMEx LS rsp cmpl done status %d oxid x%x\n", |
---|
| 326 | + status, axchg->oxid); |
---|
| 327 | + kfree(axchg); |
---|
238 | 328 | } |
---|
239 | 329 | |
---|
240 | 330 | /** |
---|
.. | .. |
---|
245 | 335 | * |
---|
246 | 336 | * The function is called from SLI ring event handler with no |
---|
247 | 337 | * lock held. This function is the completion handler for NVME LS commands |
---|
248 | | - * The function frees memory resources used for the NVME commands. |
---|
| 338 | + * The function updates any states and statistics, then calls the |
---|
| 339 | + * generic completion handler to free resources. |
---|
249 | 340 | **/ |
---|
250 | 341 | static void |
---|
251 | 342 | lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, |
---|
252 | 343 | struct lpfc_wcqe_complete *wcqe) |
---|
253 | 344 | { |
---|
254 | 345 | struct lpfc_nvmet_tgtport *tgtp; |
---|
255 | | - struct nvmefc_tgt_ls_req *rsp; |
---|
256 | | - struct lpfc_nvmet_rcv_ctx *ctxp; |
---|
257 | 346 | uint32_t status, result; |
---|
258 | 347 | |
---|
259 | | - status = bf_get(lpfc_wcqe_c_status, wcqe); |
---|
260 | | - result = wcqe->parameter; |
---|
261 | | - ctxp = cmdwqe->context2; |
---|
262 | | - |
---|
263 | | - if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) { |
---|
264 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
265 | | - "6410 NVMET LS cmpl state mismatch IO x%x: " |
---|
266 | | - "%d %d\n", |
---|
267 | | - ctxp->oxid, ctxp->state, ctxp->entry_cnt); |
---|
268 | | - } |
---|
269 | | - |
---|
270 | 348 | if (!phba->targetport) |
---|
271 | | - goto out; |
---|
| 349 | + goto finish; |
---|
| 350 | + |
---|
| 351 | + status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; |
---|
| 352 | + result = wcqe->parameter; |
---|
272 | 353 | |
---|
273 | 354 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
---|
274 | | - |
---|
275 | 355 | if (tgtp) { |
---|
276 | 356 | if (status) { |
---|
277 | 357 | atomic_inc(&tgtp->xmt_ls_rsp_error); |
---|
.. | .. |
---|
284 | 364 | } |
---|
285 | 365 | } |
---|
286 | 366 | |
---|
287 | | -out: |
---|
288 | | - rsp = &ctxp->ctx.ls_req; |
---|
289 | | - |
---|
290 | | - lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n", |
---|
291 | | - ctxp->oxid, status, result); |
---|
292 | | - |
---|
293 | | - lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, |
---|
294 | | - "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n", |
---|
295 | | - status, result, ctxp->oxid); |
---|
296 | | - |
---|
297 | | - lpfc_nlp_put(cmdwqe->context1); |
---|
298 | | - cmdwqe->context2 = NULL; |
---|
299 | | - cmdwqe->context3 = NULL; |
---|
300 | | - lpfc_sli_release_iocbq(phba, cmdwqe); |
---|
301 | | - rsp->done(rsp); |
---|
302 | | - kfree(ctxp); |
---|
| 367 | +finish: |
---|
| 368 | + __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, wcqe); |
---|
303 | 369 | } |
---|
304 | 370 | |
---|
305 | 371 | /** |
---|
.. | .. |
---|
319 | 385 | lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) |
---|
320 | 386 | { |
---|
321 | 387 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
---|
322 | | - struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context; |
---|
| 388 | + struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context; |
---|
323 | 389 | struct lpfc_nvmet_tgtport *tgtp; |
---|
324 | 390 | struct fc_frame_header *fc_hdr; |
---|
325 | 391 | struct rqb_dmabuf *nvmebuf; |
---|
326 | 392 | struct lpfc_nvmet_ctx_info *infop; |
---|
327 | | - uint32_t *payload; |
---|
328 | | - uint32_t size, oxid, sid, rc; |
---|
| 393 | + uint32_t size, oxid, sid; |
---|
329 | 394 | int cpu; |
---|
330 | 395 | unsigned long iflag; |
---|
331 | 396 | |
---|
332 | | - if (ctxp->txrdy) { |
---|
333 | | - dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy, |
---|
334 | | - ctxp->txrdy_phys); |
---|
335 | | - ctxp->txrdy = NULL; |
---|
336 | | - ctxp->txrdy_phys = 0; |
---|
337 | | - } |
---|
338 | | - |
---|
339 | | - if (ctxp->state == LPFC_NVMET_STE_FREE) { |
---|
340 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
| 397 | + if (ctxp->state == LPFC_NVME_STE_FREE) { |
---|
| 398 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
341 | 399 | "6411 NVMET free, already free IO x%x: %d %d\n", |
---|
342 | 400 | ctxp->oxid, ctxp->state, ctxp->entry_cnt); |
---|
343 | 401 | } |
---|
344 | | - ctxp->state = LPFC_NVMET_STE_FREE; |
---|
| 402 | + |
---|
| 403 | + if (ctxp->rqb_buffer) { |
---|
| 404 | + spin_lock_irqsave(&ctxp->ctxlock, iflag); |
---|
| 405 | + nvmebuf = ctxp->rqb_buffer; |
---|
| 406 | + /* check if freed in another path whilst acquiring lock */ |
---|
| 407 | + if (nvmebuf) { |
---|
| 408 | + ctxp->rqb_buffer = NULL; |
---|
| 409 | + if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) { |
---|
| 410 | + ctxp->flag &= ~LPFC_NVME_CTX_REUSE_WQ; |
---|
| 411 | + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); |
---|
| 412 | + nvmebuf->hrq->rqbp->rqb_free_buffer(phba, |
---|
| 413 | + nvmebuf); |
---|
| 414 | + } else { |
---|
| 415 | + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); |
---|
| 416 | + /* repost */ |
---|
| 417 | + lpfc_rq_buf_free(phba, &nvmebuf->hbuf); |
---|
| 418 | + } |
---|
| 419 | + } else { |
---|
| 420 | + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); |
---|
| 421 | + } |
---|
| 422 | + } |
---|
| 423 | + ctxp->state = LPFC_NVME_STE_FREE; |
---|
345 | 424 | |
---|
346 | 425 | spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); |
---|
347 | 426 | if (phba->sli4_hba.nvmet_io_wait_cnt) { |
---|
.. | .. |
---|
355 | 434 | fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); |
---|
356 | 435 | oxid = be16_to_cpu(fc_hdr->fh_ox_id); |
---|
357 | 436 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
---|
358 | | - payload = (uint32_t *)(nvmebuf->dbuf.virt); |
---|
359 | 437 | size = nvmebuf->bytes_recv; |
---|
360 | 438 | sid = sli4_sid_from_fc_hdr(fc_hdr); |
---|
361 | 439 | |
---|
362 | | - ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context; |
---|
| 440 | + ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context; |
---|
363 | 441 | ctxp->wqeq = NULL; |
---|
364 | | - ctxp->txrdy = NULL; |
---|
365 | 442 | ctxp->offset = 0; |
---|
366 | 443 | ctxp->phba = phba; |
---|
367 | 444 | ctxp->size = size; |
---|
368 | 445 | ctxp->oxid = oxid; |
---|
369 | 446 | ctxp->sid = sid; |
---|
370 | | - ctxp->state = LPFC_NVMET_STE_RCV; |
---|
| 447 | + ctxp->state = LPFC_NVME_STE_RCV; |
---|
371 | 448 | ctxp->entry_cnt = 1; |
---|
372 | 449 | ctxp->flag = 0; |
---|
373 | 450 | ctxp->ctxbuf = ctx_buf; |
---|
.. | .. |
---|
375 | 452 | spin_lock_init(&ctxp->ctxlock); |
---|
376 | 453 | |
---|
377 | 454 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
---|
378 | | - if (ctxp->ts_cmd_nvme) { |
---|
379 | | - ctxp->ts_cmd_nvme = ktime_get_ns(); |
---|
| 455 | + /* NOTE: isr time stamp is stale when context is re-assigned*/ |
---|
| 456 | + if (ctxp->ts_isr_cmd) { |
---|
| 457 | + ctxp->ts_cmd_nvme = 0; |
---|
380 | 458 | ctxp->ts_nvme_data = 0; |
---|
381 | 459 | ctxp->ts_data_wqput = 0; |
---|
382 | 460 | ctxp->ts_isr_data = 0; |
---|
.. | .. |
---|
388 | 466 | } |
---|
389 | 467 | #endif |
---|
390 | 468 | atomic_inc(&tgtp->rcv_fcp_cmd_in); |
---|
391 | | - /* |
---|
392 | | - * The calling sequence should be: |
---|
393 | | - * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done |
---|
394 | | - * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. |
---|
395 | | - * When we return from nvmet_fc_rcv_fcp_req, all relevant info |
---|
396 | | - * the NVME command / FC header is stored. |
---|
397 | | - * A buffer has already been reposted for this IO, so just free |
---|
398 | | - * the nvmebuf. |
---|
399 | | - */ |
---|
400 | | - rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, |
---|
401 | | - payload, size); |
---|
402 | 469 | |
---|
403 | | - /* Process FCP command */ |
---|
404 | | - if (rc == 0) { |
---|
405 | | - ctxp->rqb_buffer = NULL; |
---|
406 | | - atomic_inc(&tgtp->rcv_fcp_cmd_out); |
---|
407 | | - nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); |
---|
408 | | - return; |
---|
| 470 | + /* Indicate that a replacement buffer has been posted */ |
---|
| 471 | + spin_lock_irqsave(&ctxp->ctxlock, iflag); |
---|
| 472 | + ctxp->flag |= LPFC_NVME_CTX_REUSE_WQ; |
---|
| 473 | + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); |
---|
| 474 | + |
---|
| 475 | + if (!queue_work(phba->wq, &ctx_buf->defer_work)) { |
---|
| 476 | + atomic_inc(&tgtp->rcv_fcp_cmd_drop); |
---|
| 477 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 478 | + "6181 Unable to queue deferred work " |
---|
| 479 | + "for oxid x%x. " |
---|
| 480 | + "FCP Drop IO [x%x x%x x%x]\n", |
---|
| 481 | + ctxp->oxid, |
---|
| 482 | + atomic_read(&tgtp->rcv_fcp_cmd_in), |
---|
| 483 | + atomic_read(&tgtp->rcv_fcp_cmd_out), |
---|
| 484 | + atomic_read(&tgtp->xmt_fcp_release)); |
---|
| 485 | + |
---|
| 486 | + spin_lock_irqsave(&ctxp->ctxlock, iflag); |
---|
| 487 | + lpfc_nvmet_defer_release(phba, ctxp); |
---|
| 488 | + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); |
---|
| 489 | + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); |
---|
409 | 490 | } |
---|
410 | | - |
---|
411 | | - /* Processing of FCP command is deferred */ |
---|
412 | | - if (rc == -EOVERFLOW) { |
---|
413 | | - lpfc_nvmeio_data(phba, |
---|
414 | | - "NVMET RCV BUSY: xri x%x sz %d " |
---|
415 | | - "from %06x\n", |
---|
416 | | - oxid, size, sid); |
---|
417 | | - atomic_inc(&tgtp->rcv_fcp_cmd_out); |
---|
418 | | - return; |
---|
419 | | - } |
---|
420 | | - atomic_inc(&tgtp->rcv_fcp_cmd_drop); |
---|
421 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
422 | | - "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", |
---|
423 | | - ctxp->oxid, rc, |
---|
424 | | - atomic_read(&tgtp->rcv_fcp_cmd_in), |
---|
425 | | - atomic_read(&tgtp->rcv_fcp_cmd_out), |
---|
426 | | - atomic_read(&tgtp->xmt_fcp_release)); |
---|
427 | | - |
---|
428 | | - lpfc_nvmet_defer_release(phba, ctxp); |
---|
429 | | - lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); |
---|
430 | | - nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); |
---|
431 | 491 | return; |
---|
432 | 492 | } |
---|
433 | 493 | spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag); |
---|
.. | .. |
---|
436 | 496 | * Use the CPU context list, from the MRQ the IO was received on |
---|
437 | 497 | * (ctxp->idx), to save context structure. |
---|
438 | 498 | */ |
---|
439 | | - cpu = smp_processor_id(); |
---|
| 499 | + spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag); |
---|
| 500 | + list_del_init(&ctxp->list); |
---|
| 501 | + spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag); |
---|
| 502 | + cpu = raw_smp_processor_id(); |
---|
440 | 503 | infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx); |
---|
441 | 504 | spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag); |
---|
442 | 505 | list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list); |
---|
.. | .. |
---|
448 | 511 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
---|
449 | 512 | static void |
---|
450 | 513 | lpfc_nvmet_ktime(struct lpfc_hba *phba, |
---|
451 | | - struct lpfc_nvmet_rcv_ctx *ctxp) |
---|
| 514 | + struct lpfc_async_xchg_ctx *ctxp) |
---|
452 | 515 | { |
---|
453 | 516 | uint64_t seg1, seg2, seg3, seg4, seg5; |
---|
454 | 517 | uint64_t seg6, seg7, seg8, seg9, seg10; |
---|
.. | .. |
---|
657 | 720 | { |
---|
658 | 721 | struct lpfc_nvmet_tgtport *tgtp; |
---|
659 | 722 | struct nvmefc_tgt_fcp_req *rsp; |
---|
660 | | - struct lpfc_nvmet_rcv_ctx *ctxp; |
---|
| 723 | + struct lpfc_async_xchg_ctx *ctxp; |
---|
661 | 724 | uint32_t status, result, op, start_clean, logerr; |
---|
662 | 725 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
---|
663 | | - uint32_t id; |
---|
| 726 | + int id; |
---|
664 | 727 | #endif |
---|
665 | 728 | |
---|
666 | 729 | ctxp = cmdwqe->context2; |
---|
667 | | - ctxp->flag &= ~LPFC_NVMET_IO_INP; |
---|
| 730 | + ctxp->flag &= ~LPFC_NVME_IO_INP; |
---|
668 | 731 | |
---|
669 | | - rsp = &ctxp->ctx.fcp_req; |
---|
| 732 | + rsp = &ctxp->hdlrctx.fcp_req; |
---|
670 | 733 | op = rsp->op; |
---|
671 | 734 | |
---|
672 | 735 | status = bf_get(lpfc_wcqe_c_status, wcqe); |
---|
.. | .. |
---|
693 | 756 | |
---|
694 | 757 | /* pick up SLI4 exhange busy condition */ |
---|
695 | 758 | if (bf_get(lpfc_wcqe_c_xb, wcqe)) { |
---|
696 | | - ctxp->flag |= LPFC_NVMET_XBUSY; |
---|
| 759 | + ctxp->flag |= LPFC_NVME_XBUSY; |
---|
697 | 760 | logerr |= LOG_NVME_ABTS; |
---|
698 | 761 | if (tgtp) |
---|
699 | 762 | atomic_inc(&tgtp->xmt_fcp_rsp_xb_set); |
---|
700 | 763 | |
---|
701 | 764 | } else { |
---|
702 | | - ctxp->flag &= ~LPFC_NVMET_XBUSY; |
---|
| 765 | + ctxp->flag &= ~LPFC_NVME_XBUSY; |
---|
703 | 766 | } |
---|
704 | 767 | |
---|
705 | 768 | lpfc_printf_log(phba, KERN_INFO, logerr, |
---|
706 | | - "6315 IO Error Cmpl xri x%x: %x/%x XBUSY:x%x\n", |
---|
707 | | - ctxp->oxid, status, result, ctxp->flag); |
---|
| 769 | + "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x " |
---|
| 770 | + "XBUSY:x%x\n", |
---|
| 771 | + ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag, |
---|
| 772 | + status, result, ctxp->flag); |
---|
708 | 773 | |
---|
709 | 774 | } else { |
---|
710 | 775 | rsp->fcp_error = NVME_SC_SUCCESS; |
---|
.. | .. |
---|
719 | 784 | if ((op == NVMET_FCOP_READDATA_RSP) || |
---|
720 | 785 | (op == NVMET_FCOP_RSP)) { |
---|
721 | 786 | /* Sanity check */ |
---|
722 | | - ctxp->state = LPFC_NVMET_STE_DONE; |
---|
| 787 | + ctxp->state = LPFC_NVME_STE_DONE; |
---|
723 | 788 | ctxp->entry_cnt++; |
---|
724 | 789 | |
---|
725 | 790 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
---|
.. | .. |
---|
744 | 809 | ktime_get_ns(); |
---|
745 | 810 | } |
---|
746 | 811 | } |
---|
747 | | - if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { |
---|
748 | | - id = smp_processor_id(); |
---|
749 | | - if (ctxp->cpu != id) |
---|
750 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
751 | | - "6703 CPU Check cmpl: " |
---|
752 | | - "cpu %d expect %d\n", |
---|
753 | | - id, ctxp->cpu); |
---|
754 | | - if (ctxp->cpu < LPFC_CHECK_CPU_CNT) |
---|
755 | | - phba->cpucheck_cmpl_io[id]++; |
---|
756 | | - } |
---|
757 | 812 | #endif |
---|
758 | 813 | rsp->done(rsp); |
---|
759 | 814 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
---|
.. | .. |
---|
771 | 826 | ctxp->ts_isr_data = cmdwqe->isr_timestamp; |
---|
772 | 827 | ctxp->ts_data_nvme = ktime_get_ns(); |
---|
773 | 828 | } |
---|
774 | | - if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { |
---|
775 | | - id = smp_processor_id(); |
---|
776 | | - if (ctxp->cpu != id) |
---|
777 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
778 | | - "6704 CPU Check cmdcmpl: " |
---|
779 | | - "cpu %d expect %d\n", |
---|
780 | | - id, ctxp->cpu); |
---|
781 | | - if (ctxp->cpu < LPFC_CHECK_CPU_CNT) |
---|
782 | | - phba->cpucheck_ccmpl_io[id]++; |
---|
783 | | - } |
---|
784 | 829 | #endif |
---|
785 | 830 | rsp->done(rsp); |
---|
786 | 831 | } |
---|
| 832 | +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
---|
| 833 | + if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) { |
---|
| 834 | + id = raw_smp_processor_id(); |
---|
| 835 | + this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); |
---|
| 836 | + if (ctxp->cpu != id) |
---|
| 837 | + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, |
---|
| 838 | + "6704 CPU Check cmdcmpl: " |
---|
| 839 | + "cpu %d expect %d\n", |
---|
| 840 | + id, ctxp->cpu); |
---|
| 841 | + } |
---|
| 842 | +#endif |
---|
787 | 843 | } |
---|
788 | 844 | |
---|
789 | | -static int |
---|
790 | | -lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, |
---|
791 | | - struct nvmefc_tgt_ls_req *rsp) |
---|
| 845 | +/** |
---|
| 846 | + * __lpfc_nvme_xmt_ls_rsp - Generic service routine to issue transmit |
---|
| 847 | + * an NVME LS rsp for a prior NVME LS request that was received. |
---|
| 848 | + * @axchg: pointer to exchange context for the NVME LS request the response |
---|
| 849 | + * is for. |
---|
| 850 | + * @ls_rsp: pointer to the transport LS RSP that is to be sent |
---|
| 851 | + * @xmt_ls_rsp_cmp: completion routine to call upon RSP transmit done |
---|
| 852 | + * |
---|
| 853 | + * This routine is used to format and send a WQE to transmit a NVME LS |
---|
| 854 | + * Response. The response is for a prior NVME LS request that was |
---|
| 855 | + * received and posted to the transport. |
---|
| 856 | + * |
---|
| 857 | + * Returns: |
---|
| 858 | + * 0 : if response successfully transmit |
---|
| 859 | + * non-zero : if response failed to transmit, of the form -Exxx. |
---|
| 860 | + **/ |
---|
| 861 | +int |
---|
| 862 | +__lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg, |
---|
| 863 | + struct nvmefc_ls_rsp *ls_rsp, |
---|
| 864 | + void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba, |
---|
| 865 | + struct lpfc_iocbq *cmdwqe, |
---|
| 866 | + struct lpfc_wcqe_complete *wcqe)) |
---|
792 | 867 | { |
---|
793 | | - struct lpfc_nvmet_rcv_ctx *ctxp = |
---|
794 | | - container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req); |
---|
795 | | - struct lpfc_hba *phba = ctxp->phba; |
---|
796 | | - struct hbq_dmabuf *nvmebuf = |
---|
797 | | - (struct hbq_dmabuf *)ctxp->rqb_buffer; |
---|
| 868 | + struct lpfc_hba *phba = axchg->phba; |
---|
| 869 | + struct hbq_dmabuf *nvmebuf = (struct hbq_dmabuf *)axchg->rqb_buffer; |
---|
798 | 870 | struct lpfc_iocbq *nvmewqeq; |
---|
799 | | - struct lpfc_nvmet_tgtport *nvmep = tgtport->private; |
---|
800 | 871 | struct lpfc_dmabuf dmabuf; |
---|
801 | 872 | struct ulp_bde64 bpl; |
---|
802 | 873 | int rc; |
---|
.. | .. |
---|
804 | 875 | if (phba->pport->load_flag & FC_UNLOADING) |
---|
805 | 876 | return -ENODEV; |
---|
806 | 877 | |
---|
807 | | - if (phba->pport->load_flag & FC_UNLOADING) |
---|
808 | | - return -ENODEV; |
---|
809 | | - |
---|
810 | 878 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, |
---|
811 | | - "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid); |
---|
| 879 | + "6023 NVMEx LS rsp oxid x%x\n", axchg->oxid); |
---|
812 | 880 | |
---|
813 | | - if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) || |
---|
814 | | - (ctxp->entry_cnt != 1)) { |
---|
815 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
816 | | - "6412 NVMET LS rsp state mismatch " |
---|
| 881 | + if (axchg->state != LPFC_NVME_STE_LS_RCV || axchg->entry_cnt != 1) { |
---|
| 882 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 883 | + "6412 NVMEx LS rsp state mismatch " |
---|
817 | 884 | "oxid x%x: %d %d\n", |
---|
818 | | - ctxp->oxid, ctxp->state, ctxp->entry_cnt); |
---|
| 885 | + axchg->oxid, axchg->state, axchg->entry_cnt); |
---|
| 886 | + return -EALREADY; |
---|
819 | 887 | } |
---|
820 | | - ctxp->state = LPFC_NVMET_STE_LS_RSP; |
---|
821 | | - ctxp->entry_cnt++; |
---|
| 888 | + axchg->state = LPFC_NVME_STE_LS_RSP; |
---|
| 889 | + axchg->entry_cnt++; |
---|
822 | 890 | |
---|
823 | | - nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma, |
---|
824 | | - rsp->rsplen); |
---|
| 891 | + nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, axchg, ls_rsp->rspdma, |
---|
| 892 | + ls_rsp->rsplen); |
---|
825 | 893 | if (nvmewqeq == NULL) { |
---|
826 | | - atomic_inc(&nvmep->xmt_ls_drop); |
---|
827 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
828 | | - "6150 LS Drop IO x%x: Prep\n", |
---|
829 | | - ctxp->oxid); |
---|
830 | | - lpfc_in_buf_free(phba, &nvmebuf->dbuf); |
---|
831 | | - atomic_inc(&nvmep->xmt_ls_abort); |
---|
832 | | - lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, |
---|
833 | | - ctxp->sid, ctxp->oxid); |
---|
834 | | - return -ENOMEM; |
---|
| 894 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 895 | + "6150 NVMEx LS Drop Rsp x%x: Prep\n", |
---|
| 896 | + axchg->oxid); |
---|
| 897 | + rc = -ENOMEM; |
---|
| 898 | + goto out_free_buf; |
---|
835 | 899 | } |
---|
836 | 900 | |
---|
837 | 901 | /* Save numBdes for bpl2sgl */ |
---|
.. | .. |
---|
841 | 905 | dmabuf.virt = &bpl; |
---|
842 | 906 | bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow; |
---|
843 | 907 | bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh; |
---|
844 | | - bpl.tus.f.bdeSize = rsp->rsplen; |
---|
| 908 | + bpl.tus.f.bdeSize = ls_rsp->rsplen; |
---|
845 | 909 | bpl.tus.f.bdeFlags = 0; |
---|
846 | 910 | bpl.tus.w = le32_to_cpu(bpl.tus.w); |
---|
| 911 | + /* |
---|
| 912 | + * Note: although we're using stack space for the dmabuf, the |
---|
| 913 | + * call to lpfc_sli4_issue_wqe is synchronous, so it will not |
---|
| 914 | + * be referenced after it returns back to this routine. |
---|
| 915 | + */ |
---|
847 | 916 | |
---|
848 | | - nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp; |
---|
| 917 | + nvmewqeq->wqe_cmpl = xmt_ls_rsp_cmp; |
---|
849 | 918 | nvmewqeq->iocb_cmpl = NULL; |
---|
850 | | - nvmewqeq->context2 = ctxp; |
---|
| 919 | + nvmewqeq->context2 = axchg; |
---|
851 | 920 | |
---|
852 | | - lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n", |
---|
853 | | - ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen); |
---|
| 921 | + lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n", |
---|
| 922 | + axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen); |
---|
854 | 923 | |
---|
855 | | - rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq); |
---|
| 924 | + rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq); |
---|
| 925 | + |
---|
| 926 | + /* clear to be sure there's no reference */ |
---|
| 927 | + nvmewqeq->context3 = NULL; |
---|
| 928 | + |
---|
856 | 929 | if (rc == WQE_SUCCESS) { |
---|
857 | 930 | /* |
---|
858 | 931 | * Okay to repost buffer here, but wait till cmpl |
---|
859 | 932 | * before freeing ctxp and iocbq. |
---|
860 | 933 | */ |
---|
861 | 934 | lpfc_in_buf_free(phba, &nvmebuf->dbuf); |
---|
862 | | - ctxp->rqb_buffer = 0; |
---|
863 | | - atomic_inc(&nvmep->xmt_ls_rsp); |
---|
864 | 935 | return 0; |
---|
865 | 936 | } |
---|
866 | | - /* Give back resources */ |
---|
867 | | - atomic_inc(&nvmep->xmt_ls_drop); |
---|
868 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
869 | | - "6151 LS Drop IO x%x: Issue %d\n", |
---|
870 | | - ctxp->oxid, rc); |
---|
| 937 | + |
---|
| 938 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 939 | + "6151 NVMEx LS RSP x%x: failed to transmit %d\n", |
---|
| 940 | + axchg->oxid, rc); |
---|
| 941 | + |
---|
| 942 | + rc = -ENXIO; |
---|
871 | 943 | |
---|
872 | 944 | lpfc_nlp_put(nvmewqeq->context1); |
---|
873 | 945 | |
---|
| 946 | +out_free_buf: |
---|
| 947 | + /* Give back resources */ |
---|
874 | 948 | lpfc_in_buf_free(phba, &nvmebuf->dbuf); |
---|
875 | | - atomic_inc(&nvmep->xmt_ls_abort); |
---|
876 | | - lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); |
---|
877 | | - return -ENXIO; |
---|
| 949 | + |
---|
| 950 | + /* |
---|
| 951 | + * As transport doesn't track completions of responses, if the rsp |
---|
| 952 | + * fails to send, the transport will effectively ignore the rsp |
---|
| 953 | + * and consider the LS done. However, the driver has an active |
---|
| 954 | + * exchange open for the LS - so be sure to abort the exchange |
---|
| 955 | + * if the response isn't sent. |
---|
| 956 | + */ |
---|
| 957 | + lpfc_nvme_unsol_ls_issue_abort(phba, axchg, axchg->sid, axchg->oxid); |
---|
| 958 | + return rc; |
---|
| 959 | +} |
---|
| 960 | + |
---|
| 961 | +/** |
---|
| 962 | + * lpfc_nvmet_xmt_ls_rsp - Transmit NVME LS response |
---|
| 963 | + * @tgtport: pointer to target port that NVME LS is to be transmit from. |
---|
| 964 | + * @ls_rsp: pointer to the transport LS RSP that is to be sent |
---|
| 965 | + * |
---|
| 966 | + * Driver registers this routine to transmit responses for received NVME |
---|
| 967 | + * LS requests. |
---|
| 968 | + * |
---|
| 969 | + * This routine is used to format and send a WQE to transmit a NVME LS |
---|
| 970 | + * Response. The ls_rsp is used to reverse-map the LS to the original |
---|
| 971 | + * NVME LS request sequence, which provides addressing information for |
---|
| 972 | + * the remote port the LS to be sent to, as well as the exchange id |
---|
| 973 | + * that is the LS is bound to. |
---|
| 974 | + * |
---|
| 975 | + * Returns: |
---|
| 976 | + * 0 : if response successfully transmit |
---|
| 977 | + * non-zero : if response failed to transmit, of the form -Exxx. |
---|
| 978 | + **/ |
---|
| 979 | +static int |
---|
| 980 | +lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, |
---|
| 981 | + struct nvmefc_ls_rsp *ls_rsp) |
---|
| 982 | +{ |
---|
| 983 | + struct lpfc_async_xchg_ctx *axchg = |
---|
| 984 | + container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp); |
---|
| 985 | + struct lpfc_nvmet_tgtport *nvmep = tgtport->private; |
---|
| 986 | + int rc; |
---|
| 987 | + |
---|
| 988 | + if (axchg->phba->pport->load_flag & FC_UNLOADING) |
---|
| 989 | + return -ENODEV; |
---|
| 990 | + |
---|
| 991 | + rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, lpfc_nvmet_xmt_ls_rsp_cmp); |
---|
| 992 | + |
---|
| 993 | + if (rc) { |
---|
| 994 | + atomic_inc(&nvmep->xmt_ls_drop); |
---|
| 995 | + /* |
---|
| 996 | + * unless the failure is due to having already sent |
---|
| 997 | + * the response, an abort will be generated for the |
---|
| 998 | + * exchange if the rsp can't be sent. |
---|
| 999 | + */ |
---|
| 1000 | + if (rc != -EALREADY) |
---|
| 1001 | + atomic_inc(&nvmep->xmt_ls_abort); |
---|
| 1002 | + return rc; |
---|
| 1003 | + } |
---|
| 1004 | + |
---|
| 1005 | + atomic_inc(&nvmep->xmt_ls_rsp); |
---|
| 1006 | + return 0; |
---|
878 | 1007 | } |
---|
879 | 1008 | |
---|
880 | 1009 | static int |
---|
.. | .. |
---|
882 | 1011 | struct nvmefc_tgt_fcp_req *rsp) |
---|
883 | 1012 | { |
---|
884 | 1013 | struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; |
---|
885 | | - struct lpfc_nvmet_rcv_ctx *ctxp = |
---|
886 | | - container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); |
---|
| 1014 | + struct lpfc_async_xchg_ctx *ctxp = |
---|
| 1015 | + container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); |
---|
887 | 1016 | struct lpfc_hba *phba = ctxp->phba; |
---|
888 | 1017 | struct lpfc_queue *wq; |
---|
889 | 1018 | struct lpfc_iocbq *nvmewqeq; |
---|
890 | 1019 | struct lpfc_sli_ring *pring; |
---|
891 | 1020 | unsigned long iflags; |
---|
892 | 1021 | int rc; |
---|
893 | | - |
---|
894 | | - if (phba->pport->load_flag & FC_UNLOADING) { |
---|
895 | | - rc = -ENODEV; |
---|
896 | | - goto aerr; |
---|
897 | | - } |
---|
| 1022 | +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
---|
| 1023 | + int id; |
---|
| 1024 | +#endif |
---|
898 | 1025 | |
---|
899 | 1026 | if (phba->pport->load_flag & FC_UNLOADING) { |
---|
900 | 1027 | rc = -ENODEV; |
---|
.. | .. |
---|
908 | 1035 | else |
---|
909 | 1036 | ctxp->ts_nvme_data = ktime_get_ns(); |
---|
910 | 1037 | } |
---|
911 | | - if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { |
---|
912 | | - int id = smp_processor_id(); |
---|
913 | | - ctxp->cpu = id; |
---|
914 | | - if (id < LPFC_CHECK_CPU_CNT) |
---|
915 | | - phba->cpucheck_xmt_io[id]++; |
---|
916 | | - if (rsp->hwqid != id) { |
---|
917 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
| 1038 | + |
---|
| 1039 | + /* Setup the hdw queue if not already set */ |
---|
| 1040 | + if (!ctxp->hdwq) |
---|
| 1041 | + ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid]; |
---|
| 1042 | + |
---|
| 1043 | + if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) { |
---|
| 1044 | + id = raw_smp_processor_id(); |
---|
| 1045 | + this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); |
---|
| 1046 | + if (rsp->hwqid != id) |
---|
| 1047 | + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, |
---|
918 | 1048 | "6705 CPU Check OP: " |
---|
919 | 1049 | "cpu %d expect %d\n", |
---|
920 | 1050 | id, rsp->hwqid); |
---|
921 | | - ctxp->cpu = rsp->hwqid; |
---|
922 | | - } |
---|
| 1051 | + ctxp->cpu = id; /* Setup cpu for cmpl check */ |
---|
923 | 1052 | } |
---|
924 | 1053 | #endif |
---|
925 | 1054 | |
---|
926 | 1055 | /* Sanity check */ |
---|
927 | | - if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) || |
---|
928 | | - (ctxp->state == LPFC_NVMET_STE_ABORT)) { |
---|
| 1056 | + if ((ctxp->flag & LPFC_NVME_ABTS_RCV) || |
---|
| 1057 | + (ctxp->state == LPFC_NVME_STE_ABORT)) { |
---|
929 | 1058 | atomic_inc(&lpfc_nvmep->xmt_fcp_drop); |
---|
930 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
931 | | - "6102 IO xri x%x aborted\n", |
---|
| 1059 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 1060 | + "6102 IO oxid x%x aborted\n", |
---|
932 | 1061 | ctxp->oxid); |
---|
933 | 1062 | rc = -ENXIO; |
---|
934 | 1063 | goto aerr; |
---|
.. | .. |
---|
937 | 1066 | nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp); |
---|
938 | 1067 | if (nvmewqeq == NULL) { |
---|
939 | 1068 | atomic_inc(&lpfc_nvmep->xmt_fcp_drop); |
---|
940 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
| 1069 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
941 | 1070 | "6152 FCP Drop IO x%x: Prep\n", |
---|
942 | 1071 | ctxp->oxid); |
---|
943 | 1072 | rc = -ENXIO; |
---|
.. | .. |
---|
953 | 1082 | lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n", |
---|
954 | 1083 | ctxp->oxid, rsp->op, rsp->rsplen); |
---|
955 | 1084 | |
---|
956 | | - ctxp->flag |= LPFC_NVMET_IO_INP; |
---|
957 | | - rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq); |
---|
| 1085 | + ctxp->flag |= LPFC_NVME_IO_INP; |
---|
| 1086 | + rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq); |
---|
958 | 1087 | if (rc == WQE_SUCCESS) { |
---|
959 | 1088 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
---|
960 | 1089 | if (!ctxp->ts_cmd_nvme) |
---|
.. | .. |
---|
972 | 1101 | * WQ was full, so queue nvmewqeq to be sent after |
---|
973 | 1102 | * WQE release CQE |
---|
974 | 1103 | */ |
---|
975 | | - ctxp->flag |= LPFC_NVMET_DEFER_WQFULL; |
---|
976 | | - wq = phba->sli4_hba.nvme_wq[rsp->hwqid]; |
---|
| 1104 | + ctxp->flag |= LPFC_NVME_DEFER_WQFULL; |
---|
| 1105 | + wq = ctxp->hdwq->io_wq; |
---|
977 | 1106 | pring = wq->pring; |
---|
978 | 1107 | spin_lock_irqsave(&pring->ring_lock, iflags); |
---|
979 | 1108 | list_add_tail(&nvmewqeq->list, &wq->wqfull_list); |
---|
.. | .. |
---|
985 | 1114 | |
---|
986 | 1115 | /* Give back resources */ |
---|
987 | 1116 | atomic_inc(&lpfc_nvmep->xmt_fcp_drop); |
---|
988 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
| 1117 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
989 | 1118 | "6153 FCP Drop IO x%x: Issue: %d\n", |
---|
990 | 1119 | ctxp->oxid, rc); |
---|
991 | 1120 | |
---|
.. | .. |
---|
1012 | 1141 | struct nvmefc_tgt_fcp_req *req) |
---|
1013 | 1142 | { |
---|
1014 | 1143 | struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; |
---|
1015 | | - struct lpfc_nvmet_rcv_ctx *ctxp = |
---|
1016 | | - container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); |
---|
| 1144 | + struct lpfc_async_xchg_ctx *ctxp = |
---|
| 1145 | + container_of(req, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); |
---|
1017 | 1146 | struct lpfc_hba *phba = ctxp->phba; |
---|
1018 | 1147 | struct lpfc_queue *wq; |
---|
1019 | 1148 | unsigned long flags; |
---|
.. | .. |
---|
1021 | 1150 | if (phba->pport->load_flag & FC_UNLOADING) |
---|
1022 | 1151 | return; |
---|
1023 | 1152 | |
---|
1024 | | - if (phba->pport->load_flag & FC_UNLOADING) |
---|
1025 | | - return; |
---|
| 1153 | + if (!ctxp->hdwq) |
---|
| 1154 | + ctxp->hdwq = &phba->sli4_hba.hdwq[0]; |
---|
1026 | 1155 | |
---|
1027 | 1156 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
---|
1028 | | - "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n", |
---|
| 1157 | + "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n", |
---|
1029 | 1158 | ctxp->oxid, ctxp->flag, ctxp->state); |
---|
1030 | 1159 | |
---|
1031 | 1160 | lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n", |
---|
.. | .. |
---|
1034 | 1163 | atomic_inc(&lpfc_nvmep->xmt_fcp_abort); |
---|
1035 | 1164 | |
---|
1036 | 1165 | spin_lock_irqsave(&ctxp->ctxlock, flags); |
---|
1037 | | - ctxp->state = LPFC_NVMET_STE_ABORT; |
---|
1038 | 1166 | |
---|
1039 | 1167 | /* Since iaab/iaar are NOT set, we need to check |
---|
1040 | 1168 | * if the firmware is in process of aborting IO |
---|
1041 | 1169 | */ |
---|
1042 | | - if (ctxp->flag & LPFC_NVMET_XBUSY) { |
---|
| 1170 | + if (ctxp->flag & (LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP)) { |
---|
1043 | 1171 | spin_unlock_irqrestore(&ctxp->ctxlock, flags); |
---|
1044 | 1172 | return; |
---|
1045 | 1173 | } |
---|
1046 | | - ctxp->flag |= LPFC_NVMET_ABORT_OP; |
---|
| 1174 | + ctxp->flag |= LPFC_NVME_ABORT_OP; |
---|
1047 | 1175 | |
---|
1048 | | - if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) { |
---|
| 1176 | + if (ctxp->flag & LPFC_NVME_DEFER_WQFULL) { |
---|
| 1177 | + spin_unlock_irqrestore(&ctxp->ctxlock, flags); |
---|
1049 | 1178 | lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, |
---|
1050 | 1179 | ctxp->oxid); |
---|
1051 | | - wq = phba->sli4_hba.nvme_wq[ctxp->wqeq->hba_wqidx]; |
---|
1052 | | - spin_unlock_irqrestore(&ctxp->ctxlock, flags); |
---|
| 1180 | + wq = ctxp->hdwq->io_wq; |
---|
1053 | 1181 | lpfc_nvmet_wqfull_flush(phba, wq, ctxp); |
---|
1054 | 1182 | return; |
---|
1055 | 1183 | } |
---|
| 1184 | + spin_unlock_irqrestore(&ctxp->ctxlock, flags); |
---|
1056 | 1185 | |
---|
1057 | | - /* An state of LPFC_NVMET_STE_RCV means we have just received |
---|
| 1186 | + /* A state of LPFC_NVME_STE_RCV means we have just received |
---|
1058 | 1187 | * the NVME command and have not started processing it. |
---|
1059 | 1188 | * (by issuing any IO WQEs on this exchange yet) |
---|
1060 | 1189 | */ |
---|
1061 | | - if (ctxp->state == LPFC_NVMET_STE_RCV) |
---|
| 1190 | + if (ctxp->state == LPFC_NVME_STE_RCV) |
---|
1062 | 1191 | lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, |
---|
1063 | 1192 | ctxp->oxid); |
---|
1064 | 1193 | else |
---|
1065 | 1194 | lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, |
---|
1066 | 1195 | ctxp->oxid); |
---|
1067 | | - spin_unlock_irqrestore(&ctxp->ctxlock, flags); |
---|
1068 | 1196 | } |
---|
1069 | 1197 | |
---|
1070 | 1198 | static void |
---|
.. | .. |
---|
1072 | 1200 | struct nvmefc_tgt_fcp_req *rsp) |
---|
1073 | 1201 | { |
---|
1074 | 1202 | struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; |
---|
1075 | | - struct lpfc_nvmet_rcv_ctx *ctxp = |
---|
1076 | | - container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); |
---|
| 1203 | + struct lpfc_async_xchg_ctx *ctxp = |
---|
| 1204 | + container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); |
---|
1077 | 1205 | struct lpfc_hba *phba = ctxp->phba; |
---|
1078 | 1206 | unsigned long flags; |
---|
1079 | 1207 | bool aborting = false; |
---|
1080 | 1208 | |
---|
1081 | | - if (ctxp->state != LPFC_NVMET_STE_DONE && |
---|
1082 | | - ctxp->state != LPFC_NVMET_STE_ABORT) { |
---|
1083 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
| 1209 | + spin_lock_irqsave(&ctxp->ctxlock, flags); |
---|
| 1210 | + if (ctxp->flag & LPFC_NVME_XBUSY) |
---|
| 1211 | + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, |
---|
| 1212 | + "6027 NVMET release with XBUSY flag x%x" |
---|
| 1213 | + " oxid x%x\n", |
---|
| 1214 | + ctxp->flag, ctxp->oxid); |
---|
| 1215 | + else if (ctxp->state != LPFC_NVME_STE_DONE && |
---|
| 1216 | + ctxp->state != LPFC_NVME_STE_ABORT) |
---|
| 1217 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
1084 | 1218 | "6413 NVMET release bad state %d %d oxid x%x\n", |
---|
1085 | 1219 | ctxp->state, ctxp->entry_cnt, ctxp->oxid); |
---|
1086 | | - } |
---|
1087 | 1220 | |
---|
1088 | | - spin_lock_irqsave(&ctxp->ctxlock, flags); |
---|
1089 | | - if ((ctxp->flag & LPFC_NVMET_ABORT_OP) || |
---|
1090 | | - (ctxp->flag & LPFC_NVMET_XBUSY)) { |
---|
| 1221 | + if ((ctxp->flag & LPFC_NVME_ABORT_OP) || |
---|
| 1222 | + (ctxp->flag & LPFC_NVME_XBUSY)) { |
---|
1091 | 1223 | aborting = true; |
---|
1092 | 1224 | /* let the abort path do the real release */ |
---|
1093 | 1225 | lpfc_nvmet_defer_release(phba, ctxp); |
---|
.. | .. |
---|
1098 | 1230 | ctxp->state, aborting); |
---|
1099 | 1231 | |
---|
1100 | 1232 | atomic_inc(&lpfc_nvmep->xmt_fcp_release); |
---|
| 1233 | + ctxp->flag &= ~LPFC_NVME_TNOTIFY; |
---|
1101 | 1234 | |
---|
1102 | 1235 | if (aborting) |
---|
1103 | 1236 | return; |
---|
.. | .. |
---|
1110 | 1243 | struct nvmefc_tgt_fcp_req *rsp) |
---|
1111 | 1244 | { |
---|
1112 | 1245 | struct lpfc_nvmet_tgtport *tgtp; |
---|
1113 | | - struct lpfc_nvmet_rcv_ctx *ctxp = |
---|
1114 | | - container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); |
---|
| 1246 | + struct lpfc_async_xchg_ctx *ctxp = |
---|
| 1247 | + container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); |
---|
1115 | 1248 | struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; |
---|
1116 | 1249 | struct lpfc_hba *phba = ctxp->phba; |
---|
| 1250 | + unsigned long iflag; |
---|
| 1251 | + |
---|
1117 | 1252 | |
---|
1118 | 1253 | lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n", |
---|
1119 | | - ctxp->oxid, ctxp->size, smp_processor_id()); |
---|
| 1254 | + ctxp->oxid, ctxp->size, raw_smp_processor_id()); |
---|
1120 | 1255 | |
---|
1121 | 1256 | if (!nvmebuf) { |
---|
1122 | 1257 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, |
---|
1123 | | - "6425 Defer rcv: no buffer xri x%x: " |
---|
| 1258 | + "6425 Defer rcv: no buffer oxid x%x: " |
---|
1124 | 1259 | "flg %x ste %x\n", |
---|
1125 | 1260 | ctxp->oxid, ctxp->flag, ctxp->state); |
---|
1126 | 1261 | return; |
---|
.. | .. |
---|
1132 | 1267 | |
---|
1133 | 1268 | /* Free the nvmebuf since a new buffer already replaced it */ |
---|
1134 | 1269 | nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); |
---|
| 1270 | + spin_lock_irqsave(&ctxp->ctxlock, iflag); |
---|
| 1271 | + ctxp->rqb_buffer = NULL; |
---|
| 1272 | + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); |
---|
| 1273 | +} |
---|
| 1274 | + |
---|
| 1275 | +/** |
---|
| 1276 | + * lpfc_nvmet_ls_req_cmp - completion handler for a nvme ls request |
---|
| 1277 | + * @phba: Pointer to HBA context object |
---|
| 1278 | + * @cmdwqe: Pointer to driver command WQE object. |
---|
| 1279 | + * @wcqe: Pointer to driver response CQE object. |
---|
| 1280 | + * |
---|
| 1281 | + * This function is the completion handler for NVME LS requests. |
---|
| 1282 | + * The function updates any states and statistics, then calls the |
---|
| 1283 | + * generic completion handler to finish completion of the request. |
---|
| 1284 | + **/ |
---|
| 1285 | +static void |
---|
| 1286 | +lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, |
---|
| 1287 | + struct lpfc_wcqe_complete *wcqe) |
---|
| 1288 | +{ |
---|
| 1289 | + __lpfc_nvme_ls_req_cmp(phba, cmdwqe->vport, cmdwqe, wcqe); |
---|
| 1290 | +} |
---|
| 1291 | + |
---|
| 1292 | +/** |
---|
| 1293 | + * lpfc_nvmet_ls_req - Issue an Link Service request |
---|
| 1294 | + * @targetport - pointer to target instance registered with nvmet transport. |
---|
| 1295 | + * @hosthandle - hosthandle set by the driver in a prior ls_rqst_rcv. |
---|
| 1296 | + * Driver sets this value to the ndlp pointer. |
---|
| 1297 | + * @pnvme_lsreq - the transport nvme_ls_req structure for the LS |
---|
| 1298 | + * |
---|
| 1299 | + * Driver registers this routine to handle any link service request |
---|
| 1300 | + * from the nvme_fc transport to a remote nvme-aware port. |
---|
| 1301 | + * |
---|
| 1302 | + * Return value : |
---|
| 1303 | + * 0 - Success |
---|
| 1304 | + * non-zero: various error codes, in form of -Exxx |
---|
| 1305 | + **/ |
---|
| 1306 | +static int |
---|
| 1307 | +lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport, |
---|
| 1308 | + void *hosthandle, |
---|
| 1309 | + struct nvmefc_ls_req *pnvme_lsreq) |
---|
| 1310 | +{ |
---|
| 1311 | + struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private; |
---|
| 1312 | + struct lpfc_hba *phba; |
---|
| 1313 | + struct lpfc_nodelist *ndlp; |
---|
| 1314 | + int ret; |
---|
| 1315 | + u32 hstate; |
---|
| 1316 | + |
---|
| 1317 | + if (!lpfc_nvmet) |
---|
| 1318 | + return -EINVAL; |
---|
| 1319 | + |
---|
| 1320 | + phba = lpfc_nvmet->phba; |
---|
| 1321 | + if (phba->pport->load_flag & FC_UNLOADING) |
---|
| 1322 | + return -EINVAL; |
---|
| 1323 | + |
---|
| 1324 | + hstate = atomic_read(&lpfc_nvmet->state); |
---|
| 1325 | + if (hstate == LPFC_NVMET_INV_HOST_ACTIVE) |
---|
| 1326 | + return -EACCES; |
---|
| 1327 | + |
---|
| 1328 | + ndlp = (struct lpfc_nodelist *)hosthandle; |
---|
| 1329 | + |
---|
| 1330 | + ret = __lpfc_nvme_ls_req(phba->pport, ndlp, pnvme_lsreq, |
---|
| 1331 | + lpfc_nvmet_ls_req_cmp); |
---|
| 1332 | + |
---|
| 1333 | + return ret; |
---|
| 1334 | +} |
---|
| 1335 | + |
---|
| 1336 | +/** |
---|
| 1337 | + * lpfc_nvmet_ls_abort - Abort a prior NVME LS request |
---|
| 1338 | + * @targetport: Transport targetport, that LS was issued from. |
---|
| 1339 | + * @hosthandle - hosthandle set by the driver in a prior ls_rqst_rcv. |
---|
| 1340 | + * Driver sets this value to the ndlp pointer. |
---|
| 1341 | + * @pnvme_lsreq - the transport nvme_ls_req structure for LS to be aborted |
---|
| 1342 | + * |
---|
| 1343 | + * Driver registers this routine to abort an NVME LS request that is |
---|
| 1344 | + * in progress (from the transports perspective). |
---|
| 1345 | + **/ |
---|
| 1346 | +static void |
---|
| 1347 | +lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport, |
---|
| 1348 | + void *hosthandle, |
---|
| 1349 | + struct nvmefc_ls_req *pnvme_lsreq) |
---|
| 1350 | +{ |
---|
| 1351 | + struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private; |
---|
| 1352 | + struct lpfc_hba *phba; |
---|
| 1353 | + struct lpfc_nodelist *ndlp; |
---|
| 1354 | + int ret; |
---|
| 1355 | + |
---|
| 1356 | + phba = lpfc_nvmet->phba; |
---|
| 1357 | + if (phba->pport->load_flag & FC_UNLOADING) |
---|
| 1358 | + return; |
---|
| 1359 | + |
---|
| 1360 | + ndlp = (struct lpfc_nodelist *)hosthandle; |
---|
| 1361 | + |
---|
| 1362 | + ret = __lpfc_nvme_ls_abort(phba->pport, ndlp, pnvme_lsreq); |
---|
| 1363 | + if (!ret) |
---|
| 1364 | + atomic_inc(&lpfc_nvmet->xmt_ls_abort); |
---|
| 1365 | +} |
---|
| 1366 | + |
---|
| 1367 | +static void |
---|
| 1368 | +lpfc_nvmet_host_release(void *hosthandle) |
---|
| 1369 | +{ |
---|
| 1370 | + struct lpfc_nodelist *ndlp = hosthandle; |
---|
| 1371 | + struct lpfc_hba *phba = NULL; |
---|
| 1372 | + struct lpfc_nvmet_tgtport *tgtp; |
---|
| 1373 | + |
---|
| 1374 | + phba = ndlp->phba; |
---|
| 1375 | + if (!phba->targetport || !phba->targetport->private) |
---|
| 1376 | + return; |
---|
| 1377 | + |
---|
| 1378 | + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, |
---|
| 1379 | + "6202 NVMET XPT releasing hosthandle x%px\n", |
---|
| 1380 | + hosthandle); |
---|
| 1381 | + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
---|
| 1382 | + atomic_set(&tgtp->state, 0); |
---|
| 1383 | +} |
---|
| 1384 | + |
---|
| 1385 | +static void |
---|
| 1386 | +lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport) |
---|
| 1387 | +{ |
---|
| 1388 | + struct lpfc_nvmet_tgtport *tgtp; |
---|
| 1389 | + struct lpfc_hba *phba; |
---|
| 1390 | + uint32_t rc; |
---|
| 1391 | + |
---|
| 1392 | + tgtp = tgtport->private; |
---|
| 1393 | + phba = tgtp->phba; |
---|
| 1394 | + |
---|
| 1395 | + rc = lpfc_issue_els_rscn(phba->pport, 0); |
---|
| 1396 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 1397 | + "6420 NVMET subsystem change: Notification %s\n", |
---|
| 1398 | + (rc) ? "Failed" : "Sent"); |
---|
1135 | 1399 | } |
---|
1136 | 1400 | |
---|
1137 | 1401 | static struct nvmet_fc_target_template lpfc_tgttemplate = { |
---|
.. | .. |
---|
1141 | 1405 | .fcp_abort = lpfc_nvmet_xmt_fcp_abort, |
---|
1142 | 1406 | .fcp_req_release = lpfc_nvmet_xmt_fcp_release, |
---|
1143 | 1407 | .defer_rcv = lpfc_nvmet_defer_rcv, |
---|
| 1408 | + .discovery_event = lpfc_nvmet_discovery_event, |
---|
| 1409 | + .ls_req = lpfc_nvmet_ls_req, |
---|
| 1410 | + .ls_abort = lpfc_nvmet_ls_abort, |
---|
| 1411 | + .host_release = lpfc_nvmet_host_release, |
---|
1144 | 1412 | |
---|
1145 | 1413 | .max_hw_queues = 1, |
---|
1146 | 1414 | .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, |
---|
.. | .. |
---|
1151 | 1419 | .target_features = 0, |
---|
1152 | 1420 | /* sizes of additional private data for data structures */ |
---|
1153 | 1421 | .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport), |
---|
| 1422 | + .lsrqst_priv_sz = 0, |
---|
1154 | 1423 | }; |
---|
1155 | 1424 | |
---|
1156 | 1425 | static void |
---|
.. | .. |
---|
1163 | 1432 | spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags); |
---|
1164 | 1433 | list_for_each_entry_safe(ctx_buf, next_ctx_buf, |
---|
1165 | 1434 | &infop->nvmet_ctx_list, list) { |
---|
1166 | | - spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); |
---|
| 1435 | + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); |
---|
1167 | 1436 | list_del_init(&ctx_buf->list); |
---|
1168 | | - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); |
---|
| 1437 | + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); |
---|
1169 | 1438 | |
---|
1170 | 1439 | __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag); |
---|
1171 | 1440 | ctx_buf->sglq->state = SGL_FREED; |
---|
.. | .. |
---|
1195 | 1464 | |
---|
1196 | 1465 | /* Cycle the the entire CPU context list for every MRQ */ |
---|
1197 | 1466 | for (i = 0; i < phba->cfg_nvmet_mrq; i++) { |
---|
1198 | | - for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) { |
---|
| 1467 | + for_each_present_cpu(j) { |
---|
| 1468 | + infop = lpfc_get_ctx_list(phba, j, i); |
---|
1199 | 1469 | __lpfc_nvmet_clean_io_for_cpu(phba, infop); |
---|
1200 | | - infop++; /* next */ |
---|
1201 | 1470 | } |
---|
1202 | 1471 | } |
---|
1203 | 1472 | kfree(phba->sli4_hba.nvmet_ctx_info); |
---|
.. | .. |
---|
1212 | 1481 | union lpfc_wqe128 *wqe; |
---|
1213 | 1482 | struct lpfc_nvmet_ctx_info *last_infop; |
---|
1214 | 1483 | struct lpfc_nvmet_ctx_info *infop; |
---|
1215 | | - int i, j, idx; |
---|
| 1484 | + int i, j, idx, cpu; |
---|
1216 | 1485 | |
---|
1217 | 1486 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME, |
---|
1218 | 1487 | "6403 Allocate NVMET resources for %d XRIs\n", |
---|
1219 | 1488 | phba->sli4_hba.nvmet_xri_cnt); |
---|
1220 | 1489 | |
---|
1221 | 1490 | phba->sli4_hba.nvmet_ctx_info = kcalloc( |
---|
1222 | | - phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq, |
---|
| 1491 | + phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq, |
---|
1223 | 1492 | sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL); |
---|
1224 | 1493 | if (!phba->sli4_hba.nvmet_ctx_info) { |
---|
1225 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 1494 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
1226 | 1495 | "6419 Failed allocate memory for " |
---|
1227 | 1496 | "nvmet context lists\n"); |
---|
1228 | 1497 | return -ENOMEM; |
---|
.. | .. |
---|
1247 | 1516 | * of the IO completion. Thus a context that was allocated for MRQ A |
---|
1248 | 1517 | * whose IO completed on CPU B will be freed to cpuB/mrqA. |
---|
1249 | 1518 | */ |
---|
1250 | | - infop = phba->sli4_hba.nvmet_ctx_info; |
---|
1251 | | - for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { |
---|
| 1519 | + for_each_possible_cpu(i) { |
---|
1252 | 1520 | for (j = 0; j < phba->cfg_nvmet_mrq; j++) { |
---|
| 1521 | + infop = lpfc_get_ctx_list(phba, i, j); |
---|
1253 | 1522 | INIT_LIST_HEAD(&infop->nvmet_ctx_list); |
---|
1254 | 1523 | spin_lock_init(&infop->nvmet_ctx_list_lock); |
---|
1255 | 1524 | infop->nvmet_ctx_list_cnt = 0; |
---|
1256 | | - infop++; |
---|
1257 | 1525 | } |
---|
1258 | 1526 | } |
---|
1259 | 1527 | |
---|
.. | .. |
---|
1263 | 1531 | * MRQ 1 cycling thru CPUs 0 - X, and so on. |
---|
1264 | 1532 | */ |
---|
1265 | 1533 | for (j = 0; j < phba->cfg_nvmet_mrq; j++) { |
---|
1266 | | - last_infop = lpfc_get_ctx_list(phba, 0, j); |
---|
1267 | | - for (i = phba->sli4_hba.num_present_cpu - 1; i >= 0; i--) { |
---|
| 1534 | + last_infop = lpfc_get_ctx_list(phba, |
---|
| 1535 | + cpumask_first(cpu_present_mask), |
---|
| 1536 | + j); |
---|
| 1537 | + for (i = phba->sli4_hba.num_possible_cpu - 1; i >= 0; i--) { |
---|
1268 | 1538 | infop = lpfc_get_ctx_list(phba, i, j); |
---|
1269 | 1539 | infop->nvmet_ctx_next_cpu = last_infop; |
---|
1270 | 1540 | last_infop = infop; |
---|
.. | .. |
---|
1275 | 1545 | * received command on a per xri basis. |
---|
1276 | 1546 | */ |
---|
1277 | 1547 | idx = 0; |
---|
| 1548 | + cpu = cpumask_first(cpu_present_mask); |
---|
1278 | 1549 | for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) { |
---|
1279 | 1550 | ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL); |
---|
1280 | 1551 | if (!ctx_buf) { |
---|
1281 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, |
---|
| 1552 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
1282 | 1553 | "6404 Ran out of memory for NVMET\n"); |
---|
1283 | 1554 | return -ENOMEM; |
---|
1284 | 1555 | } |
---|
.. | .. |
---|
1287 | 1558 | GFP_KERNEL); |
---|
1288 | 1559 | if (!ctx_buf->context) { |
---|
1289 | 1560 | kfree(ctx_buf); |
---|
1290 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, |
---|
| 1561 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
1291 | 1562 | "6405 Ran out of NVMET " |
---|
1292 | 1563 | "context memory\n"); |
---|
1293 | 1564 | return -ENOMEM; |
---|
1294 | 1565 | } |
---|
1295 | 1566 | ctx_buf->context->ctxbuf = ctx_buf; |
---|
1296 | | - ctx_buf->context->state = LPFC_NVMET_STE_FREE; |
---|
| 1567 | + ctx_buf->context->state = LPFC_NVME_STE_FREE; |
---|
1297 | 1568 | |
---|
1298 | 1569 | ctx_buf->iocbq = lpfc_sli_get_iocbq(phba); |
---|
1299 | 1570 | if (!ctx_buf->iocbq) { |
---|
1300 | 1571 | kfree(ctx_buf->context); |
---|
1301 | 1572 | kfree(ctx_buf); |
---|
1302 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, |
---|
| 1573 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
1303 | 1574 | "6406 Ran out of NVMET iocb/WQEs\n"); |
---|
1304 | 1575 | return -ENOMEM; |
---|
1305 | 1576 | } |
---|
.. | .. |
---|
1318 | 1589 | lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); |
---|
1319 | 1590 | kfree(ctx_buf->context); |
---|
1320 | 1591 | kfree(ctx_buf); |
---|
1321 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, |
---|
| 1592 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
1322 | 1593 | "6407 Ran out of NVMET XRIs\n"); |
---|
1323 | 1594 | return -ENOMEM; |
---|
1324 | 1595 | } |
---|
| 1596 | + INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work); |
---|
1325 | 1597 | |
---|
1326 | 1598 | /* |
---|
1327 | 1599 | * Add ctx to MRQidx context list. Our initial assumption |
---|
1328 | 1600 | * is MRQidx will be associated with CPUidx. This association |
---|
1329 | 1601 | * can change on the fly. |
---|
1330 | 1602 | */ |
---|
1331 | | - infop = lpfc_get_ctx_list(phba, idx, idx); |
---|
| 1603 | + infop = lpfc_get_ctx_list(phba, cpu, idx); |
---|
1332 | 1604 | spin_lock(&infop->nvmet_ctx_list_lock); |
---|
1333 | 1605 | list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list); |
---|
1334 | 1606 | infop->nvmet_ctx_list_cnt++; |
---|
.. | .. |
---|
1336 | 1608 | |
---|
1337 | 1609 | /* Spread ctx structures evenly across all MRQs */ |
---|
1338 | 1610 | idx++; |
---|
1339 | | - if (idx >= phba->cfg_nvmet_mrq) |
---|
| 1611 | + if (idx >= phba->cfg_nvmet_mrq) { |
---|
1340 | 1612 | idx = 0; |
---|
| 1613 | + cpu = cpumask_first(cpu_present_mask); |
---|
| 1614 | + continue; |
---|
| 1615 | + } |
---|
| 1616 | + cpu = cpumask_next(cpu, cpu_present_mask); |
---|
| 1617 | + if (cpu == nr_cpu_ids) |
---|
| 1618 | + cpu = cpumask_first(cpu_present_mask); |
---|
| 1619 | + |
---|
1341 | 1620 | } |
---|
1342 | 1621 | |
---|
1343 | | - for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { |
---|
| 1622 | + for_each_present_cpu(i) { |
---|
1344 | 1623 | for (j = 0; j < phba->cfg_nvmet_mrq; j++) { |
---|
1345 | 1624 | infop = lpfc_get_ctx_list(phba, i, j); |
---|
1346 | 1625 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, |
---|
1347 | 1626 | "6408 TOTAL NVMET ctx for CPU %d " |
---|
1348 | | - "MRQ %d: cnt %d nextcpu %p\n", |
---|
| 1627 | + "MRQ %d: cnt %d nextcpu x%px\n", |
---|
1349 | 1628 | i, j, infop->nvmet_ctx_list_cnt, |
---|
1350 | 1629 | infop->nvmet_ctx_next_cpu); |
---|
1351 | 1630 | } |
---|
.. | .. |
---|
1373 | 1652 | pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn); |
---|
1374 | 1653 | pinfo.port_id = vport->fc_myDID; |
---|
1375 | 1654 | |
---|
1376 | | - /* Limit to LPFC_MAX_NVME_SEG_CNT. |
---|
1377 | | - * For now need + 1 to get around NVME transport logic. |
---|
| 1655 | + /* We need to tell the transport layer + 1 because it takes page |
---|
| 1656 | + * alignment into account. When space for the SGL is allocated we |
---|
| 1657 | + * allocate + 3, one for cmd, one for rsp and one for this alignment |
---|
1378 | 1658 | */ |
---|
1379 | | - if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { |
---|
1380 | | - lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, |
---|
1381 | | - "6400 Reducing sg segment cnt to %d\n", |
---|
1382 | | - LPFC_MAX_NVME_SEG_CNT); |
---|
1383 | | - phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; |
---|
1384 | | - } else { |
---|
1385 | | - phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; |
---|
1386 | | - } |
---|
1387 | 1659 | lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; |
---|
1388 | | - lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel; |
---|
| 1660 | + lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue; |
---|
1389 | 1661 | lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP; |
---|
1390 | 1662 | |
---|
1391 | 1663 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
---|
.. | .. |
---|
1396 | 1668 | error = -ENOENT; |
---|
1397 | 1669 | #endif |
---|
1398 | 1670 | if (error) { |
---|
1399 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, |
---|
| 1671 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
1400 | 1672 | "6025 Cannot register NVME targetport x%x: " |
---|
1401 | 1673 | "portnm %llx nodenm %llx segs %d qs %d\n", |
---|
1402 | 1674 | error, |
---|
.. | .. |
---|
1415 | 1687 | |
---|
1416 | 1688 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, |
---|
1417 | 1689 | "6026 Registered NVME " |
---|
1418 | | - "targetport: %p, private %p " |
---|
| 1690 | + "targetport: x%px, private x%px " |
---|
1419 | 1691 | "portnm %llx nodenm %llx segs %d qs %d\n", |
---|
1420 | 1692 | phba->targetport, tgtp, |
---|
1421 | 1693 | pinfo.port_name, pinfo.node_name, |
---|
.. | .. |
---|
1470 | 1742 | return 0; |
---|
1471 | 1743 | |
---|
1472 | 1744 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, |
---|
1473 | | - "6007 Update NVMET port %p did x%x\n", |
---|
| 1745 | + "6007 Update NVMET port x%px did x%x\n", |
---|
1474 | 1746 | phba->targetport, vport->fc_myDID); |
---|
1475 | 1747 | |
---|
1476 | 1748 | phba->targetport->port_id = vport->fc_myDID; |
---|
.. | .. |
---|
1489 | 1761 | lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, |
---|
1490 | 1762 | struct sli4_wcqe_xri_aborted *axri) |
---|
1491 | 1763 | { |
---|
| 1764 | +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
---|
1492 | 1765 | uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); |
---|
1493 | 1766 | uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); |
---|
1494 | | - struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; |
---|
| 1767 | + struct lpfc_async_xchg_ctx *ctxp, *next_ctxp; |
---|
1495 | 1768 | struct lpfc_nvmet_tgtport *tgtp; |
---|
| 1769 | + struct nvmefc_tgt_fcp_req *req = NULL; |
---|
1496 | 1770 | struct lpfc_nodelist *ndlp; |
---|
1497 | 1771 | unsigned long iflag = 0; |
---|
1498 | 1772 | int rrq_empty = 0; |
---|
.. | .. |
---|
1510 | 1784 | } |
---|
1511 | 1785 | |
---|
1512 | 1786 | spin_lock_irqsave(&phba->hbalock, iflag); |
---|
1513 | | - spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); |
---|
| 1787 | + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); |
---|
1514 | 1788 | list_for_each_entry_safe(ctxp, next_ctxp, |
---|
1515 | 1789 | &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, |
---|
1516 | 1790 | list) { |
---|
1517 | 1791 | if (ctxp->ctxbuf->sglq->sli4_xritag != xri) |
---|
1518 | 1792 | continue; |
---|
1519 | 1793 | |
---|
| 1794 | + spin_lock(&ctxp->ctxlock); |
---|
1520 | 1795 | /* Check if we already received a free context call |
---|
1521 | 1796 | * and we have completed processing an abort situation. |
---|
1522 | 1797 | */ |
---|
1523 | | - if (ctxp->flag & LPFC_NVMET_CTX_RLS && |
---|
1524 | | - !(ctxp->flag & LPFC_NVMET_ABORT_OP)) { |
---|
1525 | | - list_del(&ctxp->list); |
---|
| 1798 | + if (ctxp->flag & LPFC_NVME_CTX_RLS && |
---|
| 1799 | + !(ctxp->flag & LPFC_NVME_ABORT_OP)) { |
---|
| 1800 | + list_del_init(&ctxp->list); |
---|
1526 | 1801 | released = true; |
---|
1527 | 1802 | } |
---|
1528 | | - ctxp->flag &= ~LPFC_NVMET_XBUSY; |
---|
1529 | | - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); |
---|
| 1803 | + ctxp->flag &= ~LPFC_NVME_XBUSY; |
---|
| 1804 | + spin_unlock(&ctxp->ctxlock); |
---|
| 1805 | + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); |
---|
1530 | 1806 | |
---|
1531 | 1807 | rrq_empty = list_empty(&phba->active_rrq_list); |
---|
1532 | 1808 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
---|
.. | .. |
---|
1541 | 1817 | } |
---|
1542 | 1818 | |
---|
1543 | 1819 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
---|
1544 | | - "6318 XB aborted oxid %x flg x%x (%x)\n", |
---|
| 1820 | + "6318 XB aborted oxid x%x flg x%x (%x)\n", |
---|
1545 | 1821 | ctxp->oxid, ctxp->flag, released); |
---|
1546 | 1822 | if (released) |
---|
1547 | 1823 | lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); |
---|
.. | .. |
---|
1550 | 1826 | lpfc_worker_wake_up(phba); |
---|
1551 | 1827 | return; |
---|
1552 | 1828 | } |
---|
1553 | | - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); |
---|
| 1829 | + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); |
---|
1554 | 1830 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
---|
| 1831 | + |
---|
| 1832 | + ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri); |
---|
| 1833 | + if (ctxp) { |
---|
| 1834 | + /* |
---|
| 1835 | + * Abort already done by FW, so BA_ACC sent. |
---|
| 1836 | + * However, the transport may be unaware. |
---|
| 1837 | + */ |
---|
| 1838 | + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
---|
| 1839 | + "6323 NVMET Rcv ABTS xri x%x ctxp state x%x " |
---|
| 1840 | + "flag x%x oxid x%x rxid x%x\n", |
---|
| 1841 | + xri, ctxp->state, ctxp->flag, ctxp->oxid, |
---|
| 1842 | + rxid); |
---|
| 1843 | + |
---|
| 1844 | + spin_lock_irqsave(&ctxp->ctxlock, iflag); |
---|
| 1845 | + ctxp->flag |= LPFC_NVME_ABTS_RCV; |
---|
| 1846 | + ctxp->state = LPFC_NVME_STE_ABORT; |
---|
| 1847 | + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); |
---|
| 1848 | + |
---|
| 1849 | + lpfc_nvmeio_data(phba, |
---|
| 1850 | + "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", |
---|
| 1851 | + xri, raw_smp_processor_id(), 0); |
---|
| 1852 | + |
---|
| 1853 | + req = &ctxp->hdlrctx.fcp_req; |
---|
| 1854 | + if (req) |
---|
| 1855 | + nvmet_fc_rcv_fcp_abort(phba->targetport, req); |
---|
| 1856 | + } |
---|
| 1857 | +#endif |
---|
1555 | 1858 | } |
---|
1556 | 1859 | |
---|
1557 | 1860 | int |
---|
1558 | 1861 | lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, |
---|
1559 | 1862 | struct fc_frame_header *fc_hdr) |
---|
1560 | | - |
---|
1561 | 1863 | { |
---|
1562 | 1864 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
---|
1563 | 1865 | struct lpfc_hba *phba = vport->phba; |
---|
1564 | | - struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; |
---|
| 1866 | + struct lpfc_async_xchg_ctx *ctxp, *next_ctxp; |
---|
1565 | 1867 | struct nvmefc_tgt_fcp_req *rsp; |
---|
1566 | | - uint16_t xri; |
---|
| 1868 | + uint32_t sid; |
---|
| 1869 | + uint16_t oxid, xri; |
---|
1567 | 1870 | unsigned long iflag = 0; |
---|
1568 | 1871 | |
---|
1569 | | - xri = be16_to_cpu(fc_hdr->fh_ox_id); |
---|
| 1872 | + sid = sli4_sid_from_fc_hdr(fc_hdr); |
---|
| 1873 | + oxid = be16_to_cpu(fc_hdr->fh_ox_id); |
---|
1570 | 1874 | |
---|
1571 | 1875 | spin_lock_irqsave(&phba->hbalock, iflag); |
---|
1572 | | - spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); |
---|
| 1876 | + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); |
---|
1573 | 1877 | list_for_each_entry_safe(ctxp, next_ctxp, |
---|
1574 | 1878 | &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, |
---|
1575 | 1879 | list) { |
---|
1576 | | - if (ctxp->ctxbuf->sglq->sli4_xritag != xri) |
---|
| 1880 | + if (ctxp->oxid != oxid || ctxp->sid != sid) |
---|
1577 | 1881 | continue; |
---|
1578 | 1882 | |
---|
1579 | | - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); |
---|
| 1883 | + xri = ctxp->ctxbuf->sglq->sli4_xritag; |
---|
| 1884 | + |
---|
| 1885 | + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); |
---|
1580 | 1886 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
---|
1581 | 1887 | |
---|
1582 | 1888 | spin_lock_irqsave(&ctxp->ctxlock, iflag); |
---|
1583 | | - ctxp->flag |= LPFC_NVMET_ABTS_RCV; |
---|
| 1889 | + ctxp->flag |= LPFC_NVME_ABTS_RCV; |
---|
1584 | 1890 | spin_unlock_irqrestore(&ctxp->ctxlock, iflag); |
---|
1585 | 1891 | |
---|
1586 | 1892 | lpfc_nvmeio_data(phba, |
---|
1587 | 1893 | "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", |
---|
1588 | | - xri, smp_processor_id(), 0); |
---|
| 1894 | + xri, raw_smp_processor_id(), 0); |
---|
1589 | 1895 | |
---|
1590 | 1896 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
---|
1591 | 1897 | "6319 NVMET Rcv ABTS:acc xri x%x\n", xri); |
---|
1592 | 1898 | |
---|
1593 | | - rsp = &ctxp->ctx.fcp_req; |
---|
| 1899 | + rsp = &ctxp->hdlrctx.fcp_req; |
---|
1594 | 1900 | nvmet_fc_rcv_fcp_abort(phba->targetport, rsp); |
---|
1595 | 1901 | |
---|
1596 | 1902 | /* Respond with BA_ACC accordingly */ |
---|
1597 | 1903 | lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); |
---|
1598 | 1904 | return 0; |
---|
1599 | 1905 | } |
---|
1600 | | - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); |
---|
| 1906 | + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); |
---|
1601 | 1907 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
---|
1602 | 1908 | |
---|
1603 | | - lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", |
---|
1604 | | - xri, smp_processor_id(), 1); |
---|
| 1909 | + /* check the wait list */ |
---|
| 1910 | + if (phba->sli4_hba.nvmet_io_wait_cnt) { |
---|
| 1911 | + struct rqb_dmabuf *nvmebuf; |
---|
| 1912 | + struct fc_frame_header *fc_hdr_tmp; |
---|
| 1913 | + u32 sid_tmp; |
---|
| 1914 | + u16 oxid_tmp; |
---|
| 1915 | + bool found = false; |
---|
| 1916 | + |
---|
| 1917 | + spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); |
---|
| 1918 | + |
---|
| 1919 | + /* match by oxid and s_id */ |
---|
| 1920 | + list_for_each_entry(nvmebuf, |
---|
| 1921 | + &phba->sli4_hba.lpfc_nvmet_io_wait_list, |
---|
| 1922 | + hbuf.list) { |
---|
| 1923 | + fc_hdr_tmp = (struct fc_frame_header *) |
---|
| 1924 | + (nvmebuf->hbuf.virt); |
---|
| 1925 | + oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id); |
---|
| 1926 | + sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp); |
---|
| 1927 | + if (oxid_tmp != oxid || sid_tmp != sid) |
---|
| 1928 | + continue; |
---|
| 1929 | + |
---|
| 1930 | + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
---|
| 1931 | + "6321 NVMET Rcv ABTS oxid x%x from x%x " |
---|
| 1932 | + "is waiting for a ctxp\n", |
---|
| 1933 | + oxid, sid); |
---|
| 1934 | + |
---|
| 1935 | + list_del_init(&nvmebuf->hbuf.list); |
---|
| 1936 | + phba->sli4_hba.nvmet_io_wait_cnt--; |
---|
| 1937 | + found = true; |
---|
| 1938 | + break; |
---|
| 1939 | + } |
---|
| 1940 | + spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, |
---|
| 1941 | + iflag); |
---|
| 1942 | + |
---|
| 1943 | + /* free buffer since already posted a new DMA buffer to RQ */ |
---|
| 1944 | + if (found) { |
---|
| 1945 | + nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); |
---|
| 1946 | + /* Respond with BA_ACC accordingly */ |
---|
| 1947 | + lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); |
---|
| 1948 | + return 0; |
---|
| 1949 | + } |
---|
| 1950 | + } |
---|
| 1951 | + |
---|
| 1952 | + /* check active list */ |
---|
| 1953 | + ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid); |
---|
| 1954 | + if (ctxp) { |
---|
| 1955 | + xri = ctxp->ctxbuf->sglq->sli4_xritag; |
---|
| 1956 | + |
---|
| 1957 | + spin_lock_irqsave(&ctxp->ctxlock, iflag); |
---|
| 1958 | + ctxp->flag |= (LPFC_NVME_ABTS_RCV | LPFC_NVME_ABORT_OP); |
---|
| 1959 | + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); |
---|
| 1960 | + |
---|
| 1961 | + lpfc_nvmeio_data(phba, |
---|
| 1962 | + "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", |
---|
| 1963 | + xri, raw_smp_processor_id(), 0); |
---|
| 1964 | + |
---|
| 1965 | + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
---|
| 1966 | + "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x " |
---|
| 1967 | + "flag x%x state x%x\n", |
---|
| 1968 | + ctxp->oxid, xri, ctxp->flag, ctxp->state); |
---|
| 1969 | + |
---|
| 1970 | + if (ctxp->flag & LPFC_NVME_TNOTIFY) { |
---|
| 1971 | + /* Notify the transport */ |
---|
| 1972 | + nvmet_fc_rcv_fcp_abort(phba->targetport, |
---|
| 1973 | + &ctxp->hdlrctx.fcp_req); |
---|
| 1974 | + } else { |
---|
| 1975 | + cancel_work_sync(&ctxp->ctxbuf->defer_work); |
---|
| 1976 | + spin_lock_irqsave(&ctxp->ctxlock, iflag); |
---|
| 1977 | + lpfc_nvmet_defer_release(phba, ctxp); |
---|
| 1978 | + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); |
---|
| 1979 | + } |
---|
| 1980 | + lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, |
---|
| 1981 | + ctxp->oxid); |
---|
| 1982 | + |
---|
| 1983 | + lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); |
---|
| 1984 | + return 0; |
---|
| 1985 | + } |
---|
| 1986 | + |
---|
| 1987 | + lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n", |
---|
| 1988 | + oxid, raw_smp_processor_id(), 1); |
---|
1605 | 1989 | |
---|
1606 | 1990 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
---|
1607 | | - "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri); |
---|
| 1991 | + "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid); |
---|
1608 | 1992 | |
---|
1609 | 1993 | /* Respond with BA_RJT accordingly */ |
---|
1610 | 1994 | lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0); |
---|
.. | .. |
---|
1614 | 1998 | |
---|
1615 | 1999 | static void |
---|
1616 | 2000 | lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq, |
---|
1617 | | - struct lpfc_nvmet_rcv_ctx *ctxp) |
---|
| 2001 | + struct lpfc_async_xchg_ctx *ctxp) |
---|
1618 | 2002 | { |
---|
1619 | 2003 | struct lpfc_sli_ring *pring; |
---|
1620 | 2004 | struct lpfc_iocbq *nvmewqeq; |
---|
.. | .. |
---|
1665 | 2049 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
---|
1666 | 2050 | struct lpfc_sli_ring *pring; |
---|
1667 | 2051 | struct lpfc_iocbq *nvmewqeq; |
---|
| 2052 | + struct lpfc_async_xchg_ctx *ctxp; |
---|
1668 | 2053 | unsigned long iflags; |
---|
1669 | 2054 | int rc; |
---|
1670 | 2055 | |
---|
.. | .. |
---|
1678 | 2063 | list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq, |
---|
1679 | 2064 | list); |
---|
1680 | 2065 | spin_unlock_irqrestore(&pring->ring_lock, iflags); |
---|
1681 | | - rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq); |
---|
| 2066 | + ctxp = (struct lpfc_async_xchg_ctx *)nvmewqeq->context2; |
---|
| 2067 | + rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq); |
---|
1682 | 2068 | spin_lock_irqsave(&pring->ring_lock, iflags); |
---|
1683 | 2069 | if (rc == -EBUSY) { |
---|
1684 | 2070 | /* WQ was full again, so put it back on the list */ |
---|
1685 | 2071 | list_add(&nvmewqeq->list, &wq->wqfull_list); |
---|
1686 | 2072 | spin_unlock_irqrestore(&pring->ring_lock, iflags); |
---|
1687 | 2073 | return; |
---|
| 2074 | + } |
---|
| 2075 | + if (rc == WQE_SUCCESS) { |
---|
| 2076 | +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
---|
| 2077 | + if (ctxp->ts_cmd_nvme) { |
---|
| 2078 | + if (ctxp->hdlrctx.fcp_req.op == NVMET_FCOP_RSP) |
---|
| 2079 | + ctxp->ts_status_wqput = ktime_get_ns(); |
---|
| 2080 | + else |
---|
| 2081 | + ctxp->ts_data_wqput = ktime_get_ns(); |
---|
| 2082 | + } |
---|
| 2083 | +#endif |
---|
| 2084 | + } else { |
---|
| 2085 | + WARN_ON(rc); |
---|
1688 | 2086 | } |
---|
1689 | 2087 | } |
---|
1690 | 2088 | wq->q_flag &= ~HBA_NVMET_WQFULL; |
---|
.. | .. |
---|
1706 | 2104 | return; |
---|
1707 | 2105 | if (phba->targetport) { |
---|
1708 | 2106 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
---|
1709 | | - for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) { |
---|
1710 | | - wq = phba->sli4_hba.nvme_wq[qidx]; |
---|
| 2107 | + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { |
---|
| 2108 | + wq = phba->sli4_hba.hdwq[qidx].io_wq; |
---|
1711 | 2109 | lpfc_nvmet_wqfull_flush(phba, wq, NULL); |
---|
1712 | 2110 | } |
---|
1713 | 2111 | tgtp->tport_unreg_cmp = &tport_unreg_cmp; |
---|
1714 | 2112 | nvmet_fc_unregister_targetport(phba->targetport); |
---|
1715 | 2113 | if (!wait_for_completion_timeout(&tport_unreg_cmp, |
---|
1716 | 2114 | msecs_to_jiffies(LPFC_NVMET_WAIT_TMO))) |
---|
1717 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, |
---|
1718 | | - "6179 Unreg targetport %p timeout " |
---|
| 2115 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 2116 | + "6179 Unreg targetport x%px timeout " |
---|
1719 | 2117 | "reached.\n", phba->targetport); |
---|
1720 | 2118 | lpfc_nvmet_cleanup_io_context(phba); |
---|
1721 | 2119 | } |
---|
.. | .. |
---|
1724 | 2122 | } |
---|
1725 | 2123 | |
---|
1726 | 2124 | /** |
---|
1727 | | - * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer |
---|
| 2125 | + * lpfc_nvmet_handle_lsreq - Process an NVME LS request |
---|
1728 | 2126 | * @phba: pointer to lpfc hba data structure. |
---|
1729 | | - * @pring: pointer to a SLI ring. |
---|
1730 | | - * @nvmebuf: pointer to lpfc nvme command HBQ data structure. |
---|
| 2127 | + * @axchg: pointer to exchange context for the NVME LS request |
---|
1731 | 2128 | * |
---|
1732 | | - * This routine is used for processing the WQE associated with a unsolicited |
---|
1733 | | - * event. It first determines whether there is an existing ndlp that matches |
---|
1734 | | - * the DID from the unsolicited WQE. If not, it will create a new one with |
---|
1735 | | - * the DID from the unsolicited WQE. The ELS command from the unsolicited |
---|
1736 | | - * WQE is then used to invoke the proper routine and to set up proper state |
---|
1737 | | - * of the discovery state machine. |
---|
1738 | | - **/ |
---|
1739 | | -static void |
---|
1740 | | -lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
---|
1741 | | - struct hbq_dmabuf *nvmebuf) |
---|
| 2129 | + * This routine is used for processing an asychronously received NVME LS |
---|
| 2130 | + * request. Any remaining validation is done and the LS is then forwarded |
---|
| 2131 | + * to the nvmet-fc transport via nvmet_fc_rcv_ls_req(). |
---|
| 2132 | + * |
---|
| 2133 | + * The calling sequence should be: nvmet_fc_rcv_ls_req() -> (processing) |
---|
| 2134 | + * -> lpfc_nvmet_xmt_ls_rsp/cmp -> req->done. |
---|
| 2135 | + * lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg. |
---|
| 2136 | + * |
---|
| 2137 | + * Returns 0 if LS was handled and delivered to the transport |
---|
| 2138 | + * Returns 1 if LS failed to be handled and should be dropped |
---|
| 2139 | + */ |
---|
| 2140 | +int |
---|
| 2141 | +lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba, |
---|
| 2142 | + struct lpfc_async_xchg_ctx *axchg) |
---|
1742 | 2143 | { |
---|
1743 | 2144 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
---|
1744 | | - struct lpfc_nvmet_tgtport *tgtp; |
---|
1745 | | - struct fc_frame_header *fc_hdr; |
---|
1746 | | - struct lpfc_nvmet_rcv_ctx *ctxp; |
---|
1747 | | - uint32_t *payload; |
---|
1748 | | - uint32_t size, oxid, sid, rc; |
---|
| 2145 | + struct lpfc_nvmet_tgtport *tgtp = phba->targetport->private; |
---|
| 2146 | + uint32_t *payload = axchg->payload; |
---|
| 2147 | + int rc; |
---|
1749 | 2148 | |
---|
1750 | | - fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); |
---|
1751 | | - oxid = be16_to_cpu(fc_hdr->fh_ox_id); |
---|
1752 | | - |
---|
1753 | | - if (!phba->targetport) { |
---|
1754 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
1755 | | - "6154 LS Drop IO x%x\n", oxid); |
---|
1756 | | - oxid = 0; |
---|
1757 | | - size = 0; |
---|
1758 | | - sid = 0; |
---|
1759 | | - ctxp = NULL; |
---|
1760 | | - goto dropit; |
---|
1761 | | - } |
---|
1762 | | - |
---|
1763 | | - tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
---|
1764 | | - payload = (uint32_t *)(nvmebuf->dbuf.virt); |
---|
1765 | | - size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl); |
---|
1766 | | - sid = sli4_sid_from_fc_hdr(fc_hdr); |
---|
1767 | | - |
---|
1768 | | - ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC); |
---|
1769 | | - if (ctxp == NULL) { |
---|
1770 | | - atomic_inc(&tgtp->rcv_ls_req_drop); |
---|
1771 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
1772 | | - "6155 LS Drop IO x%x: Alloc\n", |
---|
1773 | | - oxid); |
---|
1774 | | -dropit: |
---|
1775 | | - lpfc_nvmeio_data(phba, "NVMET LS DROP: " |
---|
1776 | | - "xri x%x sz %d from %06x\n", |
---|
1777 | | - oxid, size, sid); |
---|
1778 | | - lpfc_in_buf_free(phba, &nvmebuf->dbuf); |
---|
1779 | | - return; |
---|
1780 | | - } |
---|
1781 | | - ctxp->phba = phba; |
---|
1782 | | - ctxp->size = size; |
---|
1783 | | - ctxp->oxid = oxid; |
---|
1784 | | - ctxp->sid = sid; |
---|
1785 | | - ctxp->wqeq = NULL; |
---|
1786 | | - ctxp->state = LPFC_NVMET_STE_LS_RCV; |
---|
1787 | | - ctxp->entry_cnt = 1; |
---|
1788 | | - ctxp->rqb_buffer = (void *)nvmebuf; |
---|
1789 | | - |
---|
1790 | | - lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n", |
---|
1791 | | - oxid, size, sid); |
---|
1792 | | - /* |
---|
1793 | | - * The calling sequence should be: |
---|
1794 | | - * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done |
---|
1795 | | - * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp. |
---|
1796 | | - */ |
---|
1797 | 2149 | atomic_inc(&tgtp->rcv_ls_req_in); |
---|
1798 | | - rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req, |
---|
1799 | | - payload, size); |
---|
| 2150 | + |
---|
| 2151 | + /* |
---|
| 2152 | + * Driver passes the ndlp as the hosthandle argument allowing |
---|
| 2153 | + * the transport to generate LS requests for any associateions |
---|
| 2154 | + * that are created. |
---|
| 2155 | + */ |
---|
| 2156 | + rc = nvmet_fc_rcv_ls_req(phba->targetport, axchg->ndlp, &axchg->ls_rsp, |
---|
| 2157 | + axchg->payload, axchg->size); |
---|
1800 | 2158 | |
---|
1801 | 2159 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, |
---|
1802 | 2160 | "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x " |
---|
1803 | | - "%08x %08x %08x\n", size, rc, |
---|
| 2161 | + "%08x %08x %08x\n", axchg->size, rc, |
---|
1804 | 2162 | *payload, *(payload+1), *(payload+2), |
---|
1805 | 2163 | *(payload+3), *(payload+4), *(payload+5)); |
---|
1806 | 2164 | |
---|
1807 | | - if (rc == 0) { |
---|
| 2165 | + if (!rc) { |
---|
1808 | 2166 | atomic_inc(&tgtp->rcv_ls_req_out); |
---|
| 2167 | + return 0; |
---|
| 2168 | + } |
---|
| 2169 | + |
---|
| 2170 | + atomic_inc(&tgtp->rcv_ls_req_drop); |
---|
| 2171 | +#endif |
---|
| 2172 | + return 1; |
---|
| 2173 | +} |
---|
| 2174 | + |
---|
| 2175 | +static void |
---|
| 2176 | +lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf) |
---|
| 2177 | +{ |
---|
| 2178 | +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
---|
| 2179 | + struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context; |
---|
| 2180 | + struct lpfc_hba *phba = ctxp->phba; |
---|
| 2181 | + struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; |
---|
| 2182 | + struct lpfc_nvmet_tgtport *tgtp; |
---|
| 2183 | + uint32_t *payload, qno; |
---|
| 2184 | + uint32_t rc; |
---|
| 2185 | + unsigned long iflags; |
---|
| 2186 | + |
---|
| 2187 | + if (!nvmebuf) { |
---|
| 2188 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 2189 | + "6159 process_rcv_fcp_req, nvmebuf is NULL, " |
---|
| 2190 | + "oxid: x%x flg: x%x state: x%x\n", |
---|
| 2191 | + ctxp->oxid, ctxp->flag, ctxp->state); |
---|
| 2192 | + spin_lock_irqsave(&ctxp->ctxlock, iflags); |
---|
| 2193 | + lpfc_nvmet_defer_release(phba, ctxp); |
---|
| 2194 | + spin_unlock_irqrestore(&ctxp->ctxlock, iflags); |
---|
| 2195 | + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, |
---|
| 2196 | + ctxp->oxid); |
---|
1809 | 2197 | return; |
---|
1810 | 2198 | } |
---|
1811 | 2199 | |
---|
1812 | | - lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n", |
---|
1813 | | - oxid, size, sid); |
---|
| 2200 | + if (ctxp->flag & LPFC_NVME_ABTS_RCV) { |
---|
| 2201 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 2202 | + "6324 IO oxid x%x aborted\n", |
---|
| 2203 | + ctxp->oxid); |
---|
| 2204 | + return; |
---|
| 2205 | + } |
---|
1814 | 2206 | |
---|
1815 | | - atomic_inc(&tgtp->rcv_ls_req_drop); |
---|
1816 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
1817 | | - "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n", |
---|
1818 | | - ctxp->oxid, rc); |
---|
| 2207 | + payload = (uint32_t *)(nvmebuf->dbuf.virt); |
---|
| 2208 | + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
---|
| 2209 | + ctxp->flag |= LPFC_NVME_TNOTIFY; |
---|
| 2210 | +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
---|
| 2211 | + if (ctxp->ts_isr_cmd) |
---|
| 2212 | + ctxp->ts_cmd_nvme = ktime_get_ns(); |
---|
| 2213 | +#endif |
---|
| 2214 | + /* |
---|
| 2215 | + * The calling sequence should be: |
---|
| 2216 | + * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done |
---|
| 2217 | + * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. |
---|
| 2218 | + * When we return from nvmet_fc_rcv_fcp_req, all relevant info |
---|
| 2219 | + * the NVME command / FC header is stored. |
---|
| 2220 | + * A buffer has already been reposted for this IO, so just free |
---|
| 2221 | + * the nvmebuf. |
---|
| 2222 | + */ |
---|
| 2223 | + rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->hdlrctx.fcp_req, |
---|
| 2224 | + payload, ctxp->size); |
---|
| 2225 | + /* Process FCP command */ |
---|
| 2226 | + if (rc == 0) { |
---|
| 2227 | + atomic_inc(&tgtp->rcv_fcp_cmd_out); |
---|
| 2228 | + spin_lock_irqsave(&ctxp->ctxlock, iflags); |
---|
| 2229 | + if ((ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) || |
---|
| 2230 | + (nvmebuf != ctxp->rqb_buffer)) { |
---|
| 2231 | + spin_unlock_irqrestore(&ctxp->ctxlock, iflags); |
---|
| 2232 | + return; |
---|
| 2233 | + } |
---|
| 2234 | + ctxp->rqb_buffer = NULL; |
---|
| 2235 | + spin_unlock_irqrestore(&ctxp->ctxlock, iflags); |
---|
| 2236 | + lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ |
---|
| 2237 | + return; |
---|
| 2238 | + } |
---|
1819 | 2239 | |
---|
1820 | | - /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */ |
---|
1821 | | - lpfc_in_buf_free(phba, &nvmebuf->dbuf); |
---|
| 2240 | + /* Processing of FCP command is deferred */ |
---|
| 2241 | + if (rc == -EOVERFLOW) { |
---|
| 2242 | + lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d " |
---|
| 2243 | + "from %06x\n", |
---|
| 2244 | + ctxp->oxid, ctxp->size, ctxp->sid); |
---|
| 2245 | + atomic_inc(&tgtp->rcv_fcp_cmd_out); |
---|
| 2246 | + atomic_inc(&tgtp->defer_fod); |
---|
| 2247 | + spin_lock_irqsave(&ctxp->ctxlock, iflags); |
---|
| 2248 | + if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) { |
---|
| 2249 | + spin_unlock_irqrestore(&ctxp->ctxlock, iflags); |
---|
| 2250 | + return; |
---|
| 2251 | + } |
---|
| 2252 | + spin_unlock_irqrestore(&ctxp->ctxlock, iflags); |
---|
| 2253 | + /* |
---|
| 2254 | + * Post a replacement DMA buffer to RQ and defer |
---|
| 2255 | + * freeing rcv buffer till .defer_rcv callback |
---|
| 2256 | + */ |
---|
| 2257 | + qno = nvmebuf->idx; |
---|
| 2258 | + lpfc_post_rq_buffer( |
---|
| 2259 | + phba, phba->sli4_hba.nvmet_mrq_hdr[qno], |
---|
| 2260 | + phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); |
---|
| 2261 | + return; |
---|
| 2262 | + } |
---|
| 2263 | + ctxp->flag &= ~LPFC_NVME_TNOTIFY; |
---|
| 2264 | + atomic_inc(&tgtp->rcv_fcp_cmd_drop); |
---|
| 2265 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 2266 | + "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", |
---|
| 2267 | + ctxp->oxid, rc, |
---|
| 2268 | + atomic_read(&tgtp->rcv_fcp_cmd_in), |
---|
| 2269 | + atomic_read(&tgtp->rcv_fcp_cmd_out), |
---|
| 2270 | + atomic_read(&tgtp->xmt_fcp_release)); |
---|
| 2271 | + lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", |
---|
| 2272 | + ctxp->oxid, ctxp->size, ctxp->sid); |
---|
| 2273 | + spin_lock_irqsave(&ctxp->ctxlock, iflags); |
---|
| 2274 | + lpfc_nvmet_defer_release(phba, ctxp); |
---|
| 2275 | + spin_unlock_irqrestore(&ctxp->ctxlock, iflags); |
---|
| 2276 | + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); |
---|
| 2277 | +#endif |
---|
| 2278 | +} |
---|
1822 | 2279 | |
---|
1823 | | - atomic_inc(&tgtp->xmt_ls_abort); |
---|
1824 | | - lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid); |
---|
| 2280 | +static void |
---|
| 2281 | +lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work) |
---|
| 2282 | +{ |
---|
| 2283 | +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
---|
| 2284 | + struct lpfc_nvmet_ctxbuf *ctx_buf = |
---|
| 2285 | + container_of(work, struct lpfc_nvmet_ctxbuf, defer_work); |
---|
| 2286 | + |
---|
| 2287 | + lpfc_nvmet_process_rcv_fcp_req(ctx_buf); |
---|
1825 | 2288 | #endif |
---|
1826 | 2289 | } |
---|
1827 | 2290 | |
---|
.. | .. |
---|
1849 | 2312 | else |
---|
1850 | 2313 | get_infop = current_infop->nvmet_ctx_next_cpu; |
---|
1851 | 2314 | |
---|
1852 | | - for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { |
---|
| 2315 | + for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) { |
---|
1853 | 2316 | if (get_infop == current_infop) { |
---|
1854 | 2317 | get_infop = get_infop->nvmet_ctx_next_cpu; |
---|
1855 | 2318 | continue; |
---|
.. | .. |
---|
1887 | 2350 | * @phba: pointer to lpfc hba data structure. |
---|
1888 | 2351 | * @idx: relative index of MRQ vector |
---|
1889 | 2352 | * @nvmebuf: pointer to lpfc nvme command HBQ data structure. |
---|
| 2353 | + * @isr_timestamp: in jiffies. |
---|
| 2354 | + * @cqflag: cq processing information regarding workload. |
---|
1890 | 2355 | * |
---|
1891 | 2356 | * This routine is used for processing the WQE associated with a unsolicited |
---|
1892 | 2357 | * event. It first determines whether there is an existing ndlp that matches |
---|
.. | .. |
---|
1899 | 2364 | lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, |
---|
1900 | 2365 | uint32_t idx, |
---|
1901 | 2366 | struct rqb_dmabuf *nvmebuf, |
---|
1902 | | - uint64_t isr_timestamp) |
---|
| 2367 | + uint64_t isr_timestamp, |
---|
| 2368 | + uint8_t cqflag) |
---|
1903 | 2369 | { |
---|
1904 | | - struct lpfc_nvmet_rcv_ctx *ctxp; |
---|
| 2370 | + struct lpfc_async_xchg_ctx *ctxp; |
---|
1905 | 2371 | struct lpfc_nvmet_tgtport *tgtp; |
---|
1906 | 2372 | struct fc_frame_header *fc_hdr; |
---|
1907 | 2373 | struct lpfc_nvmet_ctxbuf *ctx_buf; |
---|
1908 | 2374 | struct lpfc_nvmet_ctx_info *current_infop; |
---|
1909 | | - uint32_t *payload; |
---|
1910 | | - uint32_t size, oxid, sid, rc, qno; |
---|
| 2375 | + uint32_t size, oxid, sid, qno; |
---|
1911 | 2376 | unsigned long iflag; |
---|
1912 | 2377 | int current_cpu; |
---|
1913 | | -#ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
---|
1914 | | - uint32_t id; |
---|
1915 | | -#endif |
---|
1916 | 2378 | |
---|
1917 | 2379 | if (!IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
---|
1918 | 2380 | return; |
---|
1919 | 2381 | |
---|
1920 | 2382 | ctx_buf = NULL; |
---|
1921 | 2383 | if (!nvmebuf || !phba->targetport) { |
---|
1922 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
| 2384 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
1923 | 2385 | "6157 NVMET FCP Drop IO\n"); |
---|
1924 | | - oxid = 0; |
---|
1925 | | - size = 0; |
---|
1926 | | - sid = 0; |
---|
1927 | | - ctxp = NULL; |
---|
1928 | | - goto dropit; |
---|
| 2386 | + if (nvmebuf) |
---|
| 2387 | + lpfc_rq_buf_free(phba, &nvmebuf->hbuf); |
---|
| 2388 | + return; |
---|
1929 | 2389 | } |
---|
1930 | 2390 | |
---|
1931 | 2391 | /* |
---|
.. | .. |
---|
1935 | 2395 | * be empty, thus it would need to be replenished with the |
---|
1936 | 2396 | * context list from another CPU for this MRQ. |
---|
1937 | 2397 | */ |
---|
1938 | | - current_cpu = smp_processor_id(); |
---|
| 2398 | + current_cpu = raw_smp_processor_id(); |
---|
1939 | 2399 | current_infop = lpfc_get_ctx_list(phba, current_cpu, idx); |
---|
1940 | 2400 | spin_lock_irqsave(¤t_infop->nvmet_ctx_list_lock, iflag); |
---|
1941 | 2401 | if (current_infop->nvmet_ctx_list_cnt) { |
---|
.. | .. |
---|
1952 | 2412 | size = nvmebuf->bytes_recv; |
---|
1953 | 2413 | |
---|
1954 | 2414 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
---|
1955 | | - if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) { |
---|
1956 | | - id = smp_processor_id(); |
---|
1957 | | - if (id < LPFC_CHECK_CPU_CNT) |
---|
1958 | | - phba->cpucheck_rcv_io[id]++; |
---|
| 2415 | + if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) { |
---|
| 2416 | + this_cpu_inc(phba->sli4_hba.c_stat->rcv_io); |
---|
| 2417 | + if (idx != current_cpu) |
---|
| 2418 | + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, |
---|
| 2419 | + "6703 CPU Check rcv: " |
---|
| 2420 | + "cpu %d expect %d\n", |
---|
| 2421 | + current_cpu, idx); |
---|
1959 | 2422 | } |
---|
1960 | 2423 | #endif |
---|
1961 | 2424 | |
---|
1962 | 2425 | lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n", |
---|
1963 | | - oxid, size, smp_processor_id()); |
---|
| 2426 | + oxid, size, raw_smp_processor_id()); |
---|
1964 | 2427 | |
---|
1965 | 2428 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
---|
1966 | 2429 | |
---|
.. | .. |
---|
1984 | 2447 | return; |
---|
1985 | 2448 | } |
---|
1986 | 2449 | |
---|
1987 | | - payload = (uint32_t *)(nvmebuf->dbuf.virt); |
---|
1988 | 2450 | sid = sli4_sid_from_fc_hdr(fc_hdr); |
---|
1989 | 2451 | |
---|
1990 | | - ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context; |
---|
1991 | | - if (ctxp->state != LPFC_NVMET_STE_FREE) { |
---|
1992 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
| 2452 | + ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context; |
---|
| 2453 | + spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag); |
---|
| 2454 | + list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list); |
---|
| 2455 | + spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag); |
---|
| 2456 | + if (ctxp->state != LPFC_NVME_STE_FREE) { |
---|
| 2457 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
1993 | 2458 | "6414 NVMET Context corrupt %d %d oxid x%x\n", |
---|
1994 | 2459 | ctxp->state, ctxp->entry_cnt, ctxp->oxid); |
---|
1995 | 2460 | } |
---|
1996 | 2461 | ctxp->wqeq = NULL; |
---|
1997 | | - ctxp->txrdy = NULL; |
---|
1998 | 2462 | ctxp->offset = 0; |
---|
1999 | 2463 | ctxp->phba = phba; |
---|
2000 | 2464 | ctxp->size = size; |
---|
2001 | 2465 | ctxp->oxid = oxid; |
---|
2002 | 2466 | ctxp->sid = sid; |
---|
2003 | 2467 | ctxp->idx = idx; |
---|
2004 | | - ctxp->state = LPFC_NVMET_STE_RCV; |
---|
| 2468 | + ctxp->state = LPFC_NVME_STE_RCV; |
---|
2005 | 2469 | ctxp->entry_cnt = 1; |
---|
2006 | 2470 | ctxp->flag = 0; |
---|
2007 | 2471 | ctxp->ctxbuf = ctx_buf; |
---|
2008 | 2472 | ctxp->rqb_buffer = (void *)nvmebuf; |
---|
| 2473 | + ctxp->hdwq = NULL; |
---|
2009 | 2474 | spin_lock_init(&ctxp->ctxlock); |
---|
2010 | 2475 | |
---|
2011 | 2476 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
---|
2012 | | - if (isr_timestamp) { |
---|
| 2477 | + if (isr_timestamp) |
---|
2013 | 2478 | ctxp->ts_isr_cmd = isr_timestamp; |
---|
2014 | | - ctxp->ts_cmd_nvme = ktime_get_ns(); |
---|
2015 | | - ctxp->ts_nvme_data = 0; |
---|
2016 | | - ctxp->ts_data_wqput = 0; |
---|
2017 | | - ctxp->ts_isr_data = 0; |
---|
2018 | | - ctxp->ts_data_nvme = 0; |
---|
2019 | | - ctxp->ts_nvme_status = 0; |
---|
2020 | | - ctxp->ts_status_wqput = 0; |
---|
2021 | | - ctxp->ts_isr_status = 0; |
---|
2022 | | - ctxp->ts_status_nvme = 0; |
---|
2023 | | - } else { |
---|
2024 | | - ctxp->ts_cmd_nvme = 0; |
---|
2025 | | - } |
---|
| 2479 | + ctxp->ts_cmd_nvme = 0; |
---|
| 2480 | + ctxp->ts_nvme_data = 0; |
---|
| 2481 | + ctxp->ts_data_wqput = 0; |
---|
| 2482 | + ctxp->ts_isr_data = 0; |
---|
| 2483 | + ctxp->ts_data_nvme = 0; |
---|
| 2484 | + ctxp->ts_nvme_status = 0; |
---|
| 2485 | + ctxp->ts_status_wqput = 0; |
---|
| 2486 | + ctxp->ts_isr_status = 0; |
---|
| 2487 | + ctxp->ts_status_nvme = 0; |
---|
2026 | 2488 | #endif |
---|
2027 | 2489 | |
---|
2028 | 2490 | atomic_inc(&tgtp->rcv_fcp_cmd_in); |
---|
2029 | | - /* |
---|
2030 | | - * The calling sequence should be: |
---|
2031 | | - * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done |
---|
2032 | | - * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. |
---|
2033 | | - * When we return from nvmet_fc_rcv_fcp_req, all relevant info in |
---|
2034 | | - * the NVME command / FC header is stored, so we are free to repost |
---|
2035 | | - * the buffer. |
---|
2036 | | - */ |
---|
2037 | | - rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, |
---|
2038 | | - payload, size); |
---|
2039 | | - |
---|
2040 | | - /* Process FCP command */ |
---|
2041 | | - if (rc == 0) { |
---|
2042 | | - ctxp->rqb_buffer = NULL; |
---|
2043 | | - atomic_inc(&tgtp->rcv_fcp_cmd_out); |
---|
2044 | | - lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ |
---|
| 2491 | + /* check for cq processing load */ |
---|
| 2492 | + if (!cqflag) { |
---|
| 2493 | + lpfc_nvmet_process_rcv_fcp_req(ctx_buf); |
---|
2045 | 2494 | return; |
---|
2046 | 2495 | } |
---|
2047 | 2496 | |
---|
2048 | | - /* Processing of FCP command is deferred */ |
---|
2049 | | - if (rc == -EOVERFLOW) { |
---|
2050 | | - /* |
---|
2051 | | - * Post a brand new DMA buffer to RQ and defer |
---|
2052 | | - * freeing rcv buffer till .defer_rcv callback |
---|
2053 | | - */ |
---|
2054 | | - qno = nvmebuf->idx; |
---|
2055 | | - lpfc_post_rq_buffer( |
---|
2056 | | - phba, phba->sli4_hba.nvmet_mrq_hdr[qno], |
---|
2057 | | - phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); |
---|
| 2497 | + if (!queue_work(phba->wq, &ctx_buf->defer_work)) { |
---|
| 2498 | + atomic_inc(&tgtp->rcv_fcp_cmd_drop); |
---|
| 2499 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 2500 | + "6325 Unable to queue work for oxid x%x. " |
---|
| 2501 | + "FCP Drop IO [x%x x%x x%x]\n", |
---|
| 2502 | + ctxp->oxid, |
---|
| 2503 | + atomic_read(&tgtp->rcv_fcp_cmd_in), |
---|
| 2504 | + atomic_read(&tgtp->rcv_fcp_cmd_out), |
---|
| 2505 | + atomic_read(&tgtp->xmt_fcp_release)); |
---|
2058 | 2506 | |
---|
2059 | | - lpfc_nvmeio_data(phba, |
---|
2060 | | - "NVMET RCV BUSY: xri x%x sz %d from %06x\n", |
---|
2061 | | - oxid, size, sid); |
---|
2062 | | - atomic_inc(&tgtp->rcv_fcp_cmd_out); |
---|
2063 | | - atomic_inc(&tgtp->defer_fod); |
---|
2064 | | - return; |
---|
2065 | | - } |
---|
2066 | | - ctxp->rqb_buffer = nvmebuf; |
---|
2067 | | - |
---|
2068 | | - atomic_inc(&tgtp->rcv_fcp_cmd_drop); |
---|
2069 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
2070 | | - "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", |
---|
2071 | | - ctxp->oxid, rc, |
---|
2072 | | - atomic_read(&tgtp->rcv_fcp_cmd_in), |
---|
2073 | | - atomic_read(&tgtp->rcv_fcp_cmd_out), |
---|
2074 | | - atomic_read(&tgtp->xmt_fcp_release)); |
---|
2075 | | -dropit: |
---|
2076 | | - lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", |
---|
2077 | | - oxid, size, sid); |
---|
2078 | | - if (oxid) { |
---|
| 2507 | + spin_lock_irqsave(&ctxp->ctxlock, iflag); |
---|
2079 | 2508 | lpfc_nvmet_defer_release(phba, ctxp); |
---|
| 2509 | + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); |
---|
2080 | 2510 | lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); |
---|
2081 | | - lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ |
---|
2082 | | - return; |
---|
2083 | 2511 | } |
---|
2084 | | - |
---|
2085 | | - if (ctx_buf) |
---|
2086 | | - lpfc_nvmet_ctxbuf_post(phba, ctx_buf); |
---|
2087 | | - |
---|
2088 | | - if (nvmebuf) |
---|
2089 | | - lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ |
---|
2090 | | -} |
---|
2091 | | - |
---|
2092 | | -/** |
---|
2093 | | - * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport |
---|
2094 | | - * @phba: pointer to lpfc hba data structure. |
---|
2095 | | - * @pring: pointer to a SLI ring. |
---|
2096 | | - * @nvmebuf: pointer to received nvme data structure. |
---|
2097 | | - * |
---|
2098 | | - * This routine is used to process an unsolicited event received from a SLI |
---|
2099 | | - * (Service Level Interface) ring. The actual processing of the data buffer |
---|
2100 | | - * associated with the unsolicited event is done by invoking the routine |
---|
2101 | | - * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the |
---|
2102 | | - * SLI RQ on which the unsolicited event was received. |
---|
2103 | | - **/ |
---|
2104 | | -void |
---|
2105 | | -lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
---|
2106 | | - struct lpfc_iocbq *piocb) |
---|
2107 | | -{ |
---|
2108 | | - struct lpfc_dmabuf *d_buf; |
---|
2109 | | - struct hbq_dmabuf *nvmebuf; |
---|
2110 | | - |
---|
2111 | | - d_buf = piocb->context2; |
---|
2112 | | - nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf); |
---|
2113 | | - |
---|
2114 | | - if (phba->nvmet_support == 0) { |
---|
2115 | | - lpfc_in_buf_free(phba, &nvmebuf->dbuf); |
---|
2116 | | - return; |
---|
2117 | | - } |
---|
2118 | | - lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf); |
---|
2119 | 2512 | } |
---|
2120 | 2513 | |
---|
2121 | 2514 | /** |
---|
.. | .. |
---|
2123 | 2516 | * @phba: pointer to lpfc hba data structure. |
---|
2124 | 2517 | * @idx: relative index of MRQ vector |
---|
2125 | 2518 | * @nvmebuf: pointer to received nvme data structure. |
---|
| 2519 | + * @isr_timestamp: in jiffies. |
---|
| 2520 | + * @cqflag: cq processing information regarding workload. |
---|
2126 | 2521 | * |
---|
2127 | 2522 | * This routine is used to process an unsolicited event received from a SLI |
---|
2128 | 2523 | * (Service Level Interface) ring. The actual processing of the data buffer |
---|
.. | .. |
---|
2134 | 2529 | lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, |
---|
2135 | 2530 | uint32_t idx, |
---|
2136 | 2531 | struct rqb_dmabuf *nvmebuf, |
---|
2137 | | - uint64_t isr_timestamp) |
---|
| 2532 | + uint64_t isr_timestamp, |
---|
| 2533 | + uint8_t cqflag) |
---|
2138 | 2534 | { |
---|
| 2535 | + if (!nvmebuf) { |
---|
| 2536 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 2537 | + "3167 NVMET FCP Drop IO\n"); |
---|
| 2538 | + return; |
---|
| 2539 | + } |
---|
2139 | 2540 | if (phba->nvmet_support == 0) { |
---|
2140 | 2541 | lpfc_rq_buf_free(phba, &nvmebuf->hbuf); |
---|
2141 | 2542 | return; |
---|
2142 | 2543 | } |
---|
2143 | | - lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, |
---|
2144 | | - isr_timestamp); |
---|
| 2544 | + lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag); |
---|
2145 | 2545 | } |
---|
2146 | 2546 | |
---|
2147 | 2547 | /** |
---|
.. | .. |
---|
2171 | 2571 | **/ |
---|
2172 | 2572 | static struct lpfc_iocbq * |
---|
2173 | 2573 | lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, |
---|
2174 | | - struct lpfc_nvmet_rcv_ctx *ctxp, |
---|
| 2574 | + struct lpfc_async_xchg_ctx *ctxp, |
---|
2175 | 2575 | dma_addr_t rspbuf, uint16_t rspsize) |
---|
2176 | 2576 | { |
---|
2177 | 2577 | struct lpfc_nodelist *ndlp; |
---|
.. | .. |
---|
2179 | 2579 | union lpfc_wqe128 *wqe; |
---|
2180 | 2580 | |
---|
2181 | 2581 | if (!lpfc_is_link_up(phba)) { |
---|
2182 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, |
---|
| 2582 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
2183 | 2583 | "6104 NVMET prep LS wqe: link err: " |
---|
2184 | 2584 | "NPORT x%x oxid:x%x ste %d\n", |
---|
2185 | 2585 | ctxp->sid, ctxp->oxid, ctxp->state); |
---|
.. | .. |
---|
2189 | 2589 | /* Allocate buffer for command wqe */ |
---|
2190 | 2590 | nvmewqe = lpfc_sli_get_iocbq(phba); |
---|
2191 | 2591 | if (nvmewqe == NULL) { |
---|
2192 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, |
---|
| 2592 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
2193 | 2593 | "6105 NVMET prep LS wqe: No WQE: " |
---|
2194 | 2594 | "NPORT x%x oxid x%x ste %d\n", |
---|
2195 | 2595 | ctxp->sid, ctxp->oxid, ctxp->state); |
---|
.. | .. |
---|
2200 | 2600 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || |
---|
2201 | 2601 | ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && |
---|
2202 | 2602 | (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { |
---|
2203 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, |
---|
| 2603 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
2204 | 2604 | "6106 NVMET prep LS wqe: No ndlp: " |
---|
2205 | 2605 | "NPORT x%x oxid x%x ste %d\n", |
---|
2206 | 2606 | ctxp->sid, ctxp->oxid, ctxp->state); |
---|
.. | .. |
---|
2293 | 2693 | |
---|
2294 | 2694 | static struct lpfc_iocbq * |
---|
2295 | 2695 | lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, |
---|
2296 | | - struct lpfc_nvmet_rcv_ctx *ctxp) |
---|
| 2696 | + struct lpfc_async_xchg_ctx *ctxp) |
---|
2297 | 2697 | { |
---|
2298 | | - struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req; |
---|
| 2698 | + struct nvmefc_tgt_fcp_req *rsp = &ctxp->hdlrctx.fcp_req; |
---|
2299 | 2699 | struct lpfc_nvmet_tgtport *tgtp; |
---|
2300 | 2700 | struct sli4_sge *sgl; |
---|
2301 | 2701 | struct lpfc_nodelist *ndlp; |
---|
.. | .. |
---|
2303 | 2703 | struct scatterlist *sgel; |
---|
2304 | 2704 | union lpfc_wqe128 *wqe; |
---|
2305 | 2705 | struct ulp_bde64 *bde; |
---|
2306 | | - uint32_t *txrdy; |
---|
2307 | 2706 | dma_addr_t physaddr; |
---|
2308 | | - int i, cnt; |
---|
| 2707 | + int i, cnt, nsegs; |
---|
2309 | 2708 | int do_pbde; |
---|
2310 | 2709 | int xc = 1; |
---|
2311 | 2710 | |
---|
2312 | 2711 | if (!lpfc_is_link_up(phba)) { |
---|
2313 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
| 2712 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
2314 | 2713 | "6107 NVMET prep FCP wqe: link err:" |
---|
2315 | 2714 | "NPORT x%x oxid x%x ste %d\n", |
---|
2316 | 2715 | ctxp->sid, ctxp->oxid, ctxp->state); |
---|
.. | .. |
---|
2321 | 2720 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || |
---|
2322 | 2721 | ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && |
---|
2323 | 2722 | (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { |
---|
2324 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
| 2723 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
2325 | 2724 | "6108 NVMET prep FCP wqe: no ndlp: " |
---|
2326 | 2725 | "NPORT x%x oxid x%x ste %d\n", |
---|
2327 | 2726 | ctxp->sid, ctxp->oxid, ctxp->state); |
---|
.. | .. |
---|
2329 | 2728 | } |
---|
2330 | 2729 | |
---|
2331 | 2730 | if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) { |
---|
2332 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
| 2731 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
2333 | 2732 | "6109 NVMET prep FCP wqe: seg cnt err: " |
---|
2334 | 2733 | "NPORT x%x oxid x%x ste %d cnt %d\n", |
---|
2335 | 2734 | ctxp->sid, ctxp->oxid, ctxp->state, |
---|
2336 | 2735 | phba->cfg_nvme_seg_cnt); |
---|
2337 | 2736 | return NULL; |
---|
2338 | 2737 | } |
---|
| 2738 | + nsegs = rsp->sg_cnt; |
---|
2339 | 2739 | |
---|
2340 | 2740 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
---|
2341 | 2741 | nvmewqe = ctxp->wqeq; |
---|
.. | .. |
---|
2343 | 2743 | /* Allocate buffer for command wqe */ |
---|
2344 | 2744 | nvmewqe = ctxp->ctxbuf->iocbq; |
---|
2345 | 2745 | if (nvmewqe == NULL) { |
---|
2346 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
| 2746 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
2347 | 2747 | "6110 NVMET prep FCP wqe: No " |
---|
2348 | 2748 | "WQE: NPORT x%x oxid x%x ste %d\n", |
---|
2349 | 2749 | ctxp->sid, ctxp->oxid, ctxp->state); |
---|
.. | .. |
---|
2356 | 2756 | } |
---|
2357 | 2757 | |
---|
2358 | 2758 | /* Sanity check */ |
---|
2359 | | - if (((ctxp->state == LPFC_NVMET_STE_RCV) && |
---|
| 2759 | + if (((ctxp->state == LPFC_NVME_STE_RCV) && |
---|
2360 | 2760 | (ctxp->entry_cnt == 1)) || |
---|
2361 | | - (ctxp->state == LPFC_NVMET_STE_DATA)) { |
---|
| 2761 | + (ctxp->state == LPFC_NVME_STE_DATA)) { |
---|
2362 | 2762 | wqe = &nvmewqe->wqe; |
---|
2363 | 2763 | } else { |
---|
2364 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
| 2764 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
2365 | 2765 | "6111 Wrong state NVMET FCP: %d cnt %d\n", |
---|
2366 | 2766 | ctxp->state, ctxp->entry_cnt); |
---|
2367 | 2767 | return NULL; |
---|
.. | .. |
---|
2465 | 2865 | &lpfc_treceive_cmd_template.words[3], |
---|
2466 | 2866 | sizeof(uint32_t) * 9); |
---|
2467 | 2867 | |
---|
2468 | | - /* Words 0 - 2 : The first sg segment */ |
---|
2469 | | - txrdy = dma_pool_alloc(phba->txrdy_payload_pool, |
---|
2470 | | - GFP_KERNEL, &physaddr); |
---|
2471 | | - if (!txrdy) { |
---|
2472 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
2473 | | - "6041 Bad txrdy buffer: oxid x%x\n", |
---|
2474 | | - ctxp->oxid); |
---|
2475 | | - return NULL; |
---|
2476 | | - } |
---|
2477 | | - ctxp->txrdy = txrdy; |
---|
2478 | | - ctxp->txrdy_phys = physaddr; |
---|
2479 | | - wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
---|
2480 | | - wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN; |
---|
2481 | | - wqe->fcp_treceive.bde.addrLow = |
---|
2482 | | - cpu_to_le32(putPaddrLow(physaddr)); |
---|
2483 | | - wqe->fcp_treceive.bde.addrHigh = |
---|
2484 | | - cpu_to_le32(putPaddrHigh(physaddr)); |
---|
| 2868 | + /* Words 0 - 2 : First SGE is skipped, set invalid BDE type */ |
---|
| 2869 | + wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP; |
---|
| 2870 | + wqe->fcp_treceive.bde.tus.f.bdeSize = 0; |
---|
| 2871 | + wqe->fcp_treceive.bde.addrLow = 0; |
---|
| 2872 | + wqe->fcp_treceive.bde.addrHigh = 0; |
---|
2485 | 2873 | |
---|
2486 | 2874 | /* Word 4 */ |
---|
2487 | 2875 | wqe->fcp_treceive.relative_offset = ctxp->offset; |
---|
.. | .. |
---|
2516 | 2904 | /* Word 12 */ |
---|
2517 | 2905 | wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; |
---|
2518 | 2906 | |
---|
2519 | | - /* Setup 1 TXRDY and 1 SKIP SGE */ |
---|
2520 | | - txrdy[0] = 0; |
---|
2521 | | - txrdy[1] = cpu_to_be32(rsp->transfer_length); |
---|
2522 | | - txrdy[2] = 0; |
---|
2523 | | - |
---|
2524 | | - sgl->addr_hi = putPaddrHigh(physaddr); |
---|
2525 | | - sgl->addr_lo = putPaddrLow(physaddr); |
---|
| 2907 | + /* Setup 2 SKIP SGEs */ |
---|
| 2908 | + sgl->addr_hi = 0; |
---|
| 2909 | + sgl->addr_lo = 0; |
---|
2526 | 2910 | sgl->word2 = 0; |
---|
2527 | | - bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); |
---|
| 2911 | + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); |
---|
2528 | 2912 | sgl->word2 = cpu_to_le32(sgl->word2); |
---|
2529 | | - sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN); |
---|
| 2913 | + sgl->sge_len = 0; |
---|
2530 | 2914 | sgl++; |
---|
2531 | 2915 | sgl->addr_hi = 0; |
---|
2532 | 2916 | sgl->addr_lo = 0; |
---|
.. | .. |
---|
2591 | 2975 | wqe->fcp_trsp.rsvd_12_15[0] = 0; |
---|
2592 | 2976 | |
---|
2593 | 2977 | /* Use rspbuf, NOT sg list */ |
---|
2594 | | - rsp->sg_cnt = 0; |
---|
| 2978 | + nsegs = 0; |
---|
2595 | 2979 | sgl->word2 = 0; |
---|
2596 | 2980 | atomic_inc(&tgtp->xmt_fcp_rsp); |
---|
2597 | 2981 | break; |
---|
.. | .. |
---|
2608 | 2992 | nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; |
---|
2609 | 2993 | nvmewqe->context1 = ndlp; |
---|
2610 | 2994 | |
---|
2611 | | - for (i = 0; i < rsp->sg_cnt; i++) { |
---|
2612 | | - sgel = &rsp->sg[i]; |
---|
| 2995 | + for_each_sg(rsp->sg, sgel, nsegs, i) { |
---|
2613 | 2996 | physaddr = sg_dma_address(sgel); |
---|
2614 | 2997 | cnt = sg_dma_len(sgel); |
---|
2615 | 2998 | sgl->addr_hi = putPaddrHigh(physaddr); |
---|
.. | .. |
---|
2638 | 3021 | sgl++; |
---|
2639 | 3022 | ctxp->offset += cnt; |
---|
2640 | 3023 | } |
---|
2641 | | - ctxp->state = LPFC_NVMET_STE_DATA; |
---|
| 3024 | + ctxp->state = LPFC_NVME_STE_DATA; |
---|
2642 | 3025 | ctxp->entry_cnt++; |
---|
2643 | 3026 | return nvmewqe; |
---|
2644 | 3027 | } |
---|
.. | .. |
---|
2657 | 3040 | lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, |
---|
2658 | 3041 | struct lpfc_wcqe_complete *wcqe) |
---|
2659 | 3042 | { |
---|
2660 | | - struct lpfc_nvmet_rcv_ctx *ctxp; |
---|
| 3043 | + struct lpfc_async_xchg_ctx *ctxp; |
---|
2661 | 3044 | struct lpfc_nvmet_tgtport *tgtp; |
---|
2662 | | - uint32_t status, result; |
---|
| 3045 | + uint32_t result; |
---|
2663 | 3046 | unsigned long flags; |
---|
2664 | 3047 | bool released = false; |
---|
2665 | 3048 | |
---|
2666 | 3049 | ctxp = cmdwqe->context2; |
---|
2667 | | - status = bf_get(lpfc_wcqe_c_status, wcqe); |
---|
2668 | 3050 | result = wcqe->parameter; |
---|
2669 | 3051 | |
---|
2670 | 3052 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
---|
2671 | | - if (ctxp->flag & LPFC_NVMET_ABORT_OP) |
---|
| 3053 | + if (ctxp->flag & LPFC_NVME_ABORT_OP) |
---|
2672 | 3054 | atomic_inc(&tgtp->xmt_fcp_abort_cmpl); |
---|
2673 | 3055 | |
---|
2674 | | - ctxp->state = LPFC_NVMET_STE_DONE; |
---|
| 3056 | + spin_lock_irqsave(&ctxp->ctxlock, flags); |
---|
| 3057 | + ctxp->state = LPFC_NVME_STE_DONE; |
---|
2675 | 3058 | |
---|
2676 | 3059 | /* Check if we already received a free context call |
---|
2677 | 3060 | * and we have completed processing an abort situation. |
---|
2678 | 3061 | */ |
---|
2679 | | - spin_lock_irqsave(&ctxp->ctxlock, flags); |
---|
2680 | | - if ((ctxp->flag & LPFC_NVMET_CTX_RLS) && |
---|
2681 | | - !(ctxp->flag & LPFC_NVMET_XBUSY)) { |
---|
2682 | | - list_del(&ctxp->list); |
---|
| 3062 | + if ((ctxp->flag & LPFC_NVME_CTX_RLS) && |
---|
| 3063 | + !(ctxp->flag & LPFC_NVME_XBUSY)) { |
---|
| 3064 | + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); |
---|
| 3065 | + list_del_init(&ctxp->list); |
---|
| 3066 | + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); |
---|
2683 | 3067 | released = true; |
---|
2684 | 3068 | } |
---|
2685 | | - ctxp->flag &= ~LPFC_NVMET_ABORT_OP; |
---|
| 3069 | + ctxp->flag &= ~LPFC_NVME_ABORT_OP; |
---|
2686 | 3070 | spin_unlock_irqrestore(&ctxp->ctxlock, flags); |
---|
2687 | 3071 | atomic_inc(&tgtp->xmt_abort_rsp); |
---|
2688 | 3072 | |
---|
2689 | 3073 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
---|
2690 | | - "6165 ABORT cmpl: xri x%x flg x%x (%d) " |
---|
| 3074 | + "6165 ABORT cmpl: oxid x%x flg x%x (%d) " |
---|
2691 | 3075 | "WCQE: %08x %08x %08x %08x\n", |
---|
2692 | 3076 | ctxp->oxid, ctxp->flag, released, |
---|
2693 | 3077 | wcqe->word0, wcqe->total_data_placed, |
---|
.. | .. |
---|
2706 | 3090 | lpfc_sli_release_iocbq(phba, cmdwqe); |
---|
2707 | 3091 | |
---|
2708 | 3092 | /* Since iaab/iaar are NOT set, there is no work left. |
---|
2709 | | - * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted |
---|
| 3093 | + * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted |
---|
2710 | 3094 | * should have been called already. |
---|
2711 | 3095 | */ |
---|
2712 | 3096 | } |
---|
.. | .. |
---|
2725 | 3109 | lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, |
---|
2726 | 3110 | struct lpfc_wcqe_complete *wcqe) |
---|
2727 | 3111 | { |
---|
2728 | | - struct lpfc_nvmet_rcv_ctx *ctxp; |
---|
| 3112 | + struct lpfc_async_xchg_ctx *ctxp; |
---|
2729 | 3113 | struct lpfc_nvmet_tgtport *tgtp; |
---|
2730 | 3114 | unsigned long flags; |
---|
2731 | | - uint32_t status, result; |
---|
| 3115 | + uint32_t result; |
---|
2732 | 3116 | bool released = false; |
---|
2733 | 3117 | |
---|
2734 | 3118 | ctxp = cmdwqe->context2; |
---|
2735 | | - status = bf_get(lpfc_wcqe_c_status, wcqe); |
---|
2736 | 3119 | result = wcqe->parameter; |
---|
2737 | 3120 | |
---|
2738 | 3121 | if (!ctxp) { |
---|
.. | .. |
---|
2745 | 3128 | } |
---|
2746 | 3129 | |
---|
2747 | 3130 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
---|
2748 | | - if (ctxp->flag & LPFC_NVMET_ABORT_OP) |
---|
| 3131 | + spin_lock_irqsave(&ctxp->ctxlock, flags); |
---|
| 3132 | + if (ctxp->flag & LPFC_NVME_ABORT_OP) |
---|
2749 | 3133 | atomic_inc(&tgtp->xmt_fcp_abort_cmpl); |
---|
2750 | 3134 | |
---|
2751 | 3135 | /* Sanity check */ |
---|
2752 | | - if (ctxp->state != LPFC_NVMET_STE_ABORT) { |
---|
2753 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, |
---|
| 3136 | + if (ctxp->state != LPFC_NVME_STE_ABORT) { |
---|
| 3137 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
2754 | 3138 | "6112 ABTS Wrong state:%d oxid x%x\n", |
---|
2755 | 3139 | ctxp->state, ctxp->oxid); |
---|
2756 | 3140 | } |
---|
.. | .. |
---|
2758 | 3142 | /* Check if we already received a free context call |
---|
2759 | 3143 | * and we have completed processing an abort situation. |
---|
2760 | 3144 | */ |
---|
2761 | | - ctxp->state = LPFC_NVMET_STE_DONE; |
---|
2762 | | - spin_lock_irqsave(&ctxp->ctxlock, flags); |
---|
2763 | | - if ((ctxp->flag & LPFC_NVMET_CTX_RLS) && |
---|
2764 | | - !(ctxp->flag & LPFC_NVMET_XBUSY)) { |
---|
2765 | | - list_del(&ctxp->list); |
---|
| 3145 | + ctxp->state = LPFC_NVME_STE_DONE; |
---|
| 3146 | + if ((ctxp->flag & LPFC_NVME_CTX_RLS) && |
---|
| 3147 | + !(ctxp->flag & LPFC_NVME_XBUSY)) { |
---|
| 3148 | + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); |
---|
| 3149 | + list_del_init(&ctxp->list); |
---|
| 3150 | + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); |
---|
2766 | 3151 | released = true; |
---|
2767 | 3152 | } |
---|
2768 | | - ctxp->flag &= ~LPFC_NVMET_ABORT_OP; |
---|
| 3153 | + ctxp->flag &= ~LPFC_NVME_ABORT_OP; |
---|
2769 | 3154 | spin_unlock_irqrestore(&ctxp->ctxlock, flags); |
---|
2770 | 3155 | atomic_inc(&tgtp->xmt_abort_rsp); |
---|
2771 | 3156 | |
---|
2772 | 3157 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
---|
2773 | | - "6316 ABTS cmpl xri x%x flg x%x (%x) " |
---|
| 3158 | + "6316 ABTS cmpl oxid x%x flg x%x (%x) " |
---|
2774 | 3159 | "WCQE: %08x %08x %08x %08x\n", |
---|
2775 | 3160 | ctxp->oxid, ctxp->flag, released, |
---|
2776 | 3161 | wcqe->word0, wcqe->total_data_placed, |
---|
.. | .. |
---|
2786 | 3171 | lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); |
---|
2787 | 3172 | |
---|
2788 | 3173 | /* Since iaab/iaar are NOT set, there is no work left. |
---|
2789 | | - * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted |
---|
| 3174 | + * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted |
---|
2790 | 3175 | * should have been called already. |
---|
2791 | 3176 | */ |
---|
2792 | 3177 | } |
---|
.. | .. |
---|
2805 | 3190 | lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, |
---|
2806 | 3191 | struct lpfc_wcqe_complete *wcqe) |
---|
2807 | 3192 | { |
---|
2808 | | - struct lpfc_nvmet_rcv_ctx *ctxp; |
---|
| 3193 | + struct lpfc_async_xchg_ctx *ctxp; |
---|
2809 | 3194 | struct lpfc_nvmet_tgtport *tgtp; |
---|
2810 | | - uint32_t status, result; |
---|
| 3195 | + uint32_t result; |
---|
2811 | 3196 | |
---|
2812 | 3197 | ctxp = cmdwqe->context2; |
---|
2813 | | - status = bf_get(lpfc_wcqe_c_status, wcqe); |
---|
2814 | 3198 | result = wcqe->parameter; |
---|
2815 | 3199 | |
---|
2816 | | - tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
---|
2817 | | - atomic_inc(&tgtp->xmt_ls_abort_cmpl); |
---|
| 3200 | + if (phba->nvmet_support) { |
---|
| 3201 | + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
---|
| 3202 | + atomic_inc(&tgtp->xmt_ls_abort_cmpl); |
---|
| 3203 | + } |
---|
2818 | 3204 | |
---|
2819 | 3205 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
---|
2820 | | - "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n", |
---|
| 3206 | + "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n", |
---|
2821 | 3207 | ctxp, wcqe->word0, wcqe->total_data_placed, |
---|
2822 | 3208 | result, wcqe->word3); |
---|
2823 | 3209 | |
---|
2824 | 3210 | if (!ctxp) { |
---|
2825 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, |
---|
| 3211 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
2826 | 3212 | "6415 NVMET LS Abort No ctx: WCQE: " |
---|
2827 | 3213 | "%08x %08x %08x %08x\n", |
---|
2828 | 3214 | wcqe->word0, wcqe->total_data_placed, |
---|
.. | .. |
---|
2832 | 3218 | return; |
---|
2833 | 3219 | } |
---|
2834 | 3220 | |
---|
2835 | | - if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) { |
---|
2836 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
| 3221 | + if (ctxp->state != LPFC_NVME_STE_LS_ABORT) { |
---|
| 3222 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
2837 | 3223 | "6416 NVMET LS abort cmpl state mismatch: " |
---|
2838 | 3224 | "oxid x%x: %d %d\n", |
---|
2839 | 3225 | ctxp->oxid, ctxp->state, ctxp->entry_cnt); |
---|
.. | .. |
---|
2847 | 3233 | |
---|
2848 | 3234 | static int |
---|
2849 | 3235 | lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, |
---|
2850 | | - struct lpfc_nvmet_rcv_ctx *ctxp, |
---|
| 3236 | + struct lpfc_async_xchg_ctx *ctxp, |
---|
2851 | 3237 | uint32_t sid, uint16_t xri) |
---|
2852 | 3238 | { |
---|
2853 | | - struct lpfc_nvmet_tgtport *tgtp; |
---|
| 3239 | + struct lpfc_nvmet_tgtport *tgtp = NULL; |
---|
2854 | 3240 | struct lpfc_iocbq *abts_wqeq; |
---|
2855 | 3241 | union lpfc_wqe128 *wqe_abts; |
---|
2856 | 3242 | struct lpfc_nodelist *ndlp; |
---|
.. | .. |
---|
2859 | 3245 | "6067 ABTS: sid %x xri x%x/x%x\n", |
---|
2860 | 3246 | sid, xri, ctxp->wqeq->sli4_xritag); |
---|
2861 | 3247 | |
---|
2862 | | - tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
---|
| 3248 | + if (phba->nvmet_support && phba->targetport) |
---|
| 3249 | + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
---|
2863 | 3250 | |
---|
2864 | 3251 | ndlp = lpfc_findnode_did(phba->pport, sid); |
---|
2865 | 3252 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || |
---|
2866 | 3253 | ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && |
---|
2867 | 3254 | (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { |
---|
2868 | | - atomic_inc(&tgtp->xmt_abort_rsp_error); |
---|
2869 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, |
---|
| 3255 | + if (tgtp) |
---|
| 3256 | + atomic_inc(&tgtp->xmt_abort_rsp_error); |
---|
| 3257 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
2870 | 3258 | "6134 Drop ABTS - wrong NDLP state x%x.\n", |
---|
2871 | 3259 | (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); |
---|
2872 | 3260 | |
---|
.. | .. |
---|
2941 | 3329 | |
---|
2942 | 3330 | static int |
---|
2943 | 3331 | lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, |
---|
2944 | | - struct lpfc_nvmet_rcv_ctx *ctxp, |
---|
| 3332 | + struct lpfc_async_xchg_ctx *ctxp, |
---|
2945 | 3333 | uint32_t sid, uint16_t xri) |
---|
2946 | 3334 | { |
---|
2947 | 3335 | struct lpfc_nvmet_tgtport *tgtp; |
---|
2948 | 3336 | struct lpfc_iocbq *abts_wqeq; |
---|
2949 | | - union lpfc_wqe128 *abts_wqe; |
---|
2950 | 3337 | struct lpfc_nodelist *ndlp; |
---|
2951 | 3338 | unsigned long flags; |
---|
| 3339 | + u8 opt; |
---|
2952 | 3340 | int rc; |
---|
2953 | 3341 | |
---|
2954 | 3342 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
---|
.. | .. |
---|
2962 | 3350 | ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && |
---|
2963 | 3351 | (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { |
---|
2964 | 3352 | atomic_inc(&tgtp->xmt_abort_rsp_error); |
---|
2965 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, |
---|
| 3353 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
2966 | 3354 | "6160 Drop ABORT - wrong NDLP state x%x.\n", |
---|
2967 | 3355 | (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); |
---|
2968 | 3356 | |
---|
2969 | 3357 | /* No failure to an ABTS request. */ |
---|
2970 | | - ctxp->flag &= ~LPFC_NVMET_ABORT_OP; |
---|
| 3358 | + spin_lock_irqsave(&ctxp->ctxlock, flags); |
---|
| 3359 | + ctxp->flag &= ~LPFC_NVME_ABORT_OP; |
---|
| 3360 | + spin_unlock_irqrestore(&ctxp->ctxlock, flags); |
---|
2971 | 3361 | return 0; |
---|
2972 | 3362 | } |
---|
2973 | 3363 | |
---|
2974 | 3364 | /* Issue ABTS for this WQE based on iotag */ |
---|
2975 | 3365 | ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba); |
---|
| 3366 | + spin_lock_irqsave(&ctxp->ctxlock, flags); |
---|
2976 | 3367 | if (!ctxp->abort_wqeq) { |
---|
2977 | 3368 | atomic_inc(&tgtp->xmt_abort_rsp_error); |
---|
2978 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, |
---|
| 3369 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
2979 | 3370 | "6161 ABORT failed: No wqeqs: " |
---|
2980 | 3371 | "xri: x%x\n", ctxp->oxid); |
---|
2981 | 3372 | /* No failure to an ABTS request. */ |
---|
2982 | | - ctxp->flag &= ~LPFC_NVMET_ABORT_OP; |
---|
| 3373 | + ctxp->flag &= ~LPFC_NVME_ABORT_OP; |
---|
| 3374 | + spin_unlock_irqrestore(&ctxp->ctxlock, flags); |
---|
2983 | 3375 | return 0; |
---|
2984 | 3376 | } |
---|
2985 | 3377 | abts_wqeq = ctxp->abort_wqeq; |
---|
2986 | | - abts_wqe = &abts_wqeq->wqe; |
---|
2987 | | - ctxp->state = LPFC_NVMET_STE_ABORT; |
---|
| 3378 | + ctxp->state = LPFC_NVME_STE_ABORT; |
---|
| 3379 | + opt = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? INHIBIT_ABORT : 0; |
---|
| 3380 | + spin_unlock_irqrestore(&ctxp->ctxlock, flags); |
---|
2988 | 3381 | |
---|
2989 | 3382 | /* Announce entry to new IO submit field. */ |
---|
2990 | 3383 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
---|
.. | .. |
---|
2997 | 3390 | */ |
---|
2998 | 3391 | spin_lock_irqsave(&phba->hbalock, flags); |
---|
2999 | 3392 | /* driver queued commands are in process of being flushed */ |
---|
3000 | | - if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { |
---|
| 3393 | + if (phba->hba_flag & HBA_IOQ_FLUSH) { |
---|
3001 | 3394 | spin_unlock_irqrestore(&phba->hbalock, flags); |
---|
3002 | 3395 | atomic_inc(&tgtp->xmt_abort_rsp_error); |
---|
3003 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, |
---|
| 3396 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
3004 | 3397 | "6163 Driver in reset cleanup - flushing " |
---|
3005 | 3398 | "NVME Req now. hba_flag x%x oxid x%x\n", |
---|
3006 | 3399 | phba->hba_flag, ctxp->oxid); |
---|
3007 | 3400 | lpfc_sli_release_iocbq(phba, abts_wqeq); |
---|
3008 | | - ctxp->flag &= ~LPFC_NVMET_ABORT_OP; |
---|
| 3401 | + spin_lock_irqsave(&ctxp->ctxlock, flags); |
---|
| 3402 | + ctxp->flag &= ~LPFC_NVME_ABORT_OP; |
---|
| 3403 | + spin_unlock_irqrestore(&ctxp->ctxlock, flags); |
---|
3009 | 3404 | return 0; |
---|
3010 | 3405 | } |
---|
3011 | 3406 | |
---|
.. | .. |
---|
3013 | 3408 | if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) { |
---|
3014 | 3409 | spin_unlock_irqrestore(&phba->hbalock, flags); |
---|
3015 | 3410 | atomic_inc(&tgtp->xmt_abort_rsp_error); |
---|
3016 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, |
---|
| 3411 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
3017 | 3412 | "6164 Outstanding NVME I/O Abort Request " |
---|
3018 | 3413 | "still pending on oxid x%x\n", |
---|
3019 | 3414 | ctxp->oxid); |
---|
3020 | 3415 | lpfc_sli_release_iocbq(phba, abts_wqeq); |
---|
3021 | | - ctxp->flag &= ~LPFC_NVMET_ABORT_OP; |
---|
| 3416 | + spin_lock_irqsave(&ctxp->ctxlock, flags); |
---|
| 3417 | + ctxp->flag &= ~LPFC_NVME_ABORT_OP; |
---|
| 3418 | + spin_unlock_irqrestore(&ctxp->ctxlock, flags); |
---|
3022 | 3419 | return 0; |
---|
3023 | 3420 | } |
---|
3024 | 3421 | |
---|
3025 | 3422 | /* Ready - mark outstanding as aborted by driver. */ |
---|
3026 | 3423 | abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED; |
---|
3027 | 3424 | |
---|
3028 | | - /* WQEs are reused. Clear stale data and set key fields to |
---|
3029 | | - * zero like ia, iaab, iaar, xri_tag, and ctxt_tag. |
---|
3030 | | - */ |
---|
3031 | | - memset(abts_wqe, 0, sizeof(union lpfc_wqe)); |
---|
3032 | | - |
---|
3033 | | - /* word 3 */ |
---|
3034 | | - bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); |
---|
3035 | | - |
---|
3036 | | - /* word 7 */ |
---|
3037 | | - bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0); |
---|
3038 | | - bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); |
---|
3039 | | - |
---|
3040 | | - /* word 8 - tell the FW to abort the IO associated with this |
---|
3041 | | - * outstanding exchange ID. |
---|
3042 | | - */ |
---|
3043 | | - abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag; |
---|
3044 | | - |
---|
3045 | | - /* word 9 - this is the iotag for the abts_wqe completion. */ |
---|
3046 | | - bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, |
---|
3047 | | - abts_wqeq->iotag); |
---|
3048 | | - |
---|
3049 | | - /* word 10 */ |
---|
3050 | | - bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); |
---|
3051 | | - bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); |
---|
3052 | | - |
---|
3053 | | - /* word 11 */ |
---|
3054 | | - bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); |
---|
3055 | | - bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); |
---|
3056 | | - bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); |
---|
| 3425 | + lpfc_nvme_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt); |
---|
3057 | 3426 | |
---|
3058 | 3427 | /* ABTS WQE must go to the same WQ as the WQE to be aborted */ |
---|
3059 | 3428 | abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx; |
---|
3060 | 3429 | abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp; |
---|
3061 | | - abts_wqeq->iocb_cmpl = 0; |
---|
| 3430 | + abts_wqeq->iocb_cmpl = NULL; |
---|
3062 | 3431 | abts_wqeq->iocb_flag |= LPFC_IO_NVME; |
---|
3063 | 3432 | abts_wqeq->context2 = ctxp; |
---|
3064 | 3433 | abts_wqeq->vport = phba->pport; |
---|
3065 | | - rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); |
---|
| 3434 | + if (!ctxp->hdwq) |
---|
| 3435 | + ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx]; |
---|
| 3436 | + |
---|
| 3437 | + rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq); |
---|
3066 | 3438 | spin_unlock_irqrestore(&phba->hbalock, flags); |
---|
3067 | 3439 | if (rc == WQE_SUCCESS) { |
---|
3068 | 3440 | atomic_inc(&tgtp->xmt_abort_sol); |
---|
.. | .. |
---|
3070 | 3442 | } |
---|
3071 | 3443 | |
---|
3072 | 3444 | atomic_inc(&tgtp->xmt_abort_rsp_error); |
---|
3073 | | - ctxp->flag &= ~LPFC_NVMET_ABORT_OP; |
---|
| 3445 | + spin_lock_irqsave(&ctxp->ctxlock, flags); |
---|
| 3446 | + ctxp->flag &= ~LPFC_NVME_ABORT_OP; |
---|
| 3447 | + spin_unlock_irqrestore(&ctxp->ctxlock, flags); |
---|
3074 | 3448 | lpfc_sli_release_iocbq(phba, abts_wqeq); |
---|
3075 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, |
---|
| 3449 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
3076 | 3450 | "6166 Failed ABORT issue_wqe with status x%x " |
---|
3077 | 3451 | "for oxid x%x.\n", |
---|
3078 | 3452 | rc, ctxp->oxid); |
---|
3079 | 3453 | return 1; |
---|
3080 | 3454 | } |
---|
3081 | 3455 | |
---|
3082 | | - |
---|
3083 | 3456 | static int |
---|
3084 | 3457 | lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, |
---|
3085 | | - struct lpfc_nvmet_rcv_ctx *ctxp, |
---|
| 3458 | + struct lpfc_async_xchg_ctx *ctxp, |
---|
3086 | 3459 | uint32_t sid, uint16_t xri) |
---|
3087 | 3460 | { |
---|
3088 | 3461 | struct lpfc_nvmet_tgtport *tgtp; |
---|
3089 | 3462 | struct lpfc_iocbq *abts_wqeq; |
---|
3090 | 3463 | unsigned long flags; |
---|
| 3464 | + bool released = false; |
---|
3091 | 3465 | int rc; |
---|
3092 | 3466 | |
---|
3093 | 3467 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
---|
.. | .. |
---|
3096 | 3470 | ctxp->wqeq->hba_wqidx = 0; |
---|
3097 | 3471 | } |
---|
3098 | 3472 | |
---|
3099 | | - if (ctxp->state == LPFC_NVMET_STE_FREE) { |
---|
3100 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
| 3473 | + if (ctxp->state == LPFC_NVME_STE_FREE) { |
---|
| 3474 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
3101 | 3475 | "6417 NVMET ABORT ctx freed %d %d oxid x%x\n", |
---|
3102 | 3476 | ctxp->state, ctxp->entry_cnt, ctxp->oxid); |
---|
3103 | 3477 | rc = WQE_BUSY; |
---|
3104 | 3478 | goto aerr; |
---|
3105 | 3479 | } |
---|
3106 | | - ctxp->state = LPFC_NVMET_STE_ABORT; |
---|
| 3480 | + ctxp->state = LPFC_NVME_STE_ABORT; |
---|
3107 | 3481 | ctxp->entry_cnt++; |
---|
3108 | 3482 | rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); |
---|
3109 | 3483 | if (rc == 0) |
---|
.. | .. |
---|
3114 | 3488 | abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp; |
---|
3115 | 3489 | abts_wqeq->iocb_cmpl = NULL; |
---|
3116 | 3490 | abts_wqeq->iocb_flag |= LPFC_IO_NVMET; |
---|
3117 | | - rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); |
---|
| 3491 | + if (!ctxp->hdwq) |
---|
| 3492 | + ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx]; |
---|
| 3493 | + |
---|
| 3494 | + rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq); |
---|
3118 | 3495 | spin_unlock_irqrestore(&phba->hbalock, flags); |
---|
3119 | 3496 | if (rc == WQE_SUCCESS) { |
---|
3120 | 3497 | return 0; |
---|
.. | .. |
---|
3122 | 3499 | |
---|
3123 | 3500 | aerr: |
---|
3124 | 3501 | spin_lock_irqsave(&ctxp->ctxlock, flags); |
---|
3125 | | - if (ctxp->flag & LPFC_NVMET_CTX_RLS) |
---|
3126 | | - list_del(&ctxp->list); |
---|
3127 | | - ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS); |
---|
| 3502 | + if (ctxp->flag & LPFC_NVME_CTX_RLS) { |
---|
| 3503 | + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); |
---|
| 3504 | + list_del_init(&ctxp->list); |
---|
| 3505 | + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); |
---|
| 3506 | + released = true; |
---|
| 3507 | + } |
---|
| 3508 | + ctxp->flag &= ~(LPFC_NVME_ABORT_OP | LPFC_NVME_CTX_RLS); |
---|
3128 | 3509 | spin_unlock_irqrestore(&ctxp->ctxlock, flags); |
---|
3129 | 3510 | |
---|
3130 | 3511 | atomic_inc(&tgtp->xmt_abort_rsp_error); |
---|
3131 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, |
---|
3132 | | - "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n", |
---|
3133 | | - ctxp->oxid, rc); |
---|
3134 | | - lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); |
---|
| 3512 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 3513 | + "6135 Failed to Issue ABTS for oxid x%x. Status x%x " |
---|
| 3514 | + "(%x)\n", |
---|
| 3515 | + ctxp->oxid, rc, released); |
---|
| 3516 | + if (released) |
---|
| 3517 | + lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); |
---|
3135 | 3518 | return 1; |
---|
3136 | 3519 | } |
---|
3137 | 3520 | |
---|
3138 | | -static int |
---|
3139 | | -lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba, |
---|
3140 | | - struct lpfc_nvmet_rcv_ctx *ctxp, |
---|
| 3521 | +/** |
---|
| 3522 | + * lpfc_nvme_unsol_ls_issue_abort - issue ABTS on an exchange received |
---|
| 3523 | + * via async frame receive where the frame is not handled. |
---|
| 3524 | + * @phba: pointer to adapter structure |
---|
| 3525 | + * @ctxp: pointer to the asynchronously received received sequence |
---|
| 3526 | + * @sid: address of the remote port to send the ABTS to |
---|
| 3527 | + * @xri: oxid value to for the ABTS (other side's exchange id). |
---|
| 3528 | + **/ |
---|
| 3529 | +int |
---|
| 3530 | +lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba, |
---|
| 3531 | + struct lpfc_async_xchg_ctx *ctxp, |
---|
3141 | 3532 | uint32_t sid, uint16_t xri) |
---|
3142 | 3533 | { |
---|
3143 | | - struct lpfc_nvmet_tgtport *tgtp; |
---|
| 3534 | + struct lpfc_nvmet_tgtport *tgtp = NULL; |
---|
3144 | 3535 | struct lpfc_iocbq *abts_wqeq; |
---|
3145 | | - union lpfc_wqe128 *wqe_abts; |
---|
3146 | 3536 | unsigned long flags; |
---|
3147 | 3537 | int rc; |
---|
3148 | 3538 | |
---|
3149 | | - if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) || |
---|
3150 | | - (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) { |
---|
3151 | | - ctxp->state = LPFC_NVMET_STE_LS_ABORT; |
---|
| 3539 | + if ((ctxp->state == LPFC_NVME_STE_LS_RCV && ctxp->entry_cnt == 1) || |
---|
| 3540 | + (ctxp->state == LPFC_NVME_STE_LS_RSP && ctxp->entry_cnt == 2)) { |
---|
| 3541 | + ctxp->state = LPFC_NVME_STE_LS_ABORT; |
---|
3152 | 3542 | ctxp->entry_cnt++; |
---|
3153 | 3543 | } else { |
---|
3154 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
---|
| 3544 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
3155 | 3545 | "6418 NVMET LS abort state mismatch " |
---|
3156 | 3546 | "IO x%x: %d %d\n", |
---|
3157 | 3547 | ctxp->oxid, ctxp->state, ctxp->entry_cnt); |
---|
3158 | | - ctxp->state = LPFC_NVMET_STE_LS_ABORT; |
---|
| 3548 | + ctxp->state = LPFC_NVME_STE_LS_ABORT; |
---|
3159 | 3549 | } |
---|
3160 | 3550 | |
---|
3161 | | - tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
---|
| 3551 | + if (phba->nvmet_support && phba->targetport) |
---|
| 3552 | + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
---|
| 3553 | + |
---|
3162 | 3554 | if (!ctxp->wqeq) { |
---|
3163 | 3555 | /* Issue ABTS for this WQE based on iotag */ |
---|
3164 | 3556 | ctxp->wqeq = lpfc_sli_get_iocbq(phba); |
---|
3165 | 3557 | if (!ctxp->wqeq) { |
---|
3166 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, |
---|
| 3558 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
3167 | 3559 | "6068 Abort failed: No wqeqs: " |
---|
3168 | 3560 | "xri: x%x\n", xri); |
---|
3169 | 3561 | /* No failure to an ABTS request. */ |
---|
.. | .. |
---|
3172 | 3564 | } |
---|
3173 | 3565 | } |
---|
3174 | 3566 | abts_wqeq = ctxp->wqeq; |
---|
3175 | | - wqe_abts = &abts_wqeq->wqe; |
---|
3176 | 3567 | |
---|
3177 | 3568 | if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) { |
---|
3178 | 3569 | rc = WQE_BUSY; |
---|
.. | .. |
---|
3181 | 3572 | |
---|
3182 | 3573 | spin_lock_irqsave(&phba->hbalock, flags); |
---|
3183 | 3574 | abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp; |
---|
3184 | | - abts_wqeq->iocb_cmpl = 0; |
---|
| 3575 | + abts_wqeq->iocb_cmpl = NULL; |
---|
3185 | 3576 | abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS; |
---|
3186 | | - rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq); |
---|
| 3577 | + rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq); |
---|
3187 | 3578 | spin_unlock_irqrestore(&phba->hbalock, flags); |
---|
3188 | 3579 | if (rc == WQE_SUCCESS) { |
---|
3189 | | - atomic_inc(&tgtp->xmt_abort_unsol); |
---|
| 3580 | + if (tgtp) |
---|
| 3581 | + atomic_inc(&tgtp->xmt_abort_unsol); |
---|
3190 | 3582 | return 0; |
---|
3191 | 3583 | } |
---|
3192 | 3584 | out: |
---|
3193 | | - atomic_inc(&tgtp->xmt_abort_rsp_error); |
---|
| 3585 | + if (tgtp) |
---|
| 3586 | + atomic_inc(&tgtp->xmt_abort_rsp_error); |
---|
3194 | 3587 | abts_wqeq->context2 = NULL; |
---|
3195 | 3588 | abts_wqeq->context3 = NULL; |
---|
3196 | 3589 | lpfc_sli_release_iocbq(phba, abts_wqeq); |
---|
3197 | | - kfree(ctxp); |
---|
3198 | | - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, |
---|
| 3590 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
3199 | 3591 | "6056 Failed to Issue ABTS. Status x%x\n", rc); |
---|
3200 | | - return 0; |
---|
| 3592 | + return 1; |
---|
| 3593 | +} |
---|
| 3594 | + |
---|
| 3595 | +/** |
---|
| 3596 | + * lpfc_nvmet_invalidate_host |
---|
| 3597 | + * |
---|
| 3598 | + * @phba - pointer to the driver instance bound to an adapter port. |
---|
| 3599 | + * @ndlp - pointer to an lpfc_nodelist type |
---|
| 3600 | + * |
---|
| 3601 | + * This routine upcalls the nvmet transport to invalidate an NVME |
---|
| 3602 | + * host to which this target instance had active connections. |
---|
| 3603 | + */ |
---|
| 3604 | +void |
---|
| 3605 | +lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) |
---|
| 3606 | +{ |
---|
| 3607 | + struct lpfc_nvmet_tgtport *tgtp; |
---|
| 3608 | + |
---|
| 3609 | + lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_NVME_ABTS, |
---|
| 3610 | + "6203 Invalidating hosthandle x%px\n", |
---|
| 3611 | + ndlp); |
---|
| 3612 | + |
---|
| 3613 | + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
---|
| 3614 | + atomic_set(&tgtp->state, LPFC_NVMET_INV_HOST_ACTIVE); |
---|
| 3615 | + |
---|
| 3616 | +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
---|
| 3617 | + /* Need to get the nvmet_fc_target_port pointer here.*/ |
---|
| 3618 | + nvmet_fc_invalidate_host(phba->targetport, ndlp); |
---|
| 3619 | +#endif |
---|
3201 | 3620 | } |
---|