| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Copyright (c) 2016 Avago Technologies. All rights reserved. |
|---|
| 3 | | - * |
|---|
| 4 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 5 | | - * it under the terms of version 2 of the GNU General Public License as |
|---|
| 6 | | - * published by the Free Software Foundation. |
|---|
| 7 | | - * |
|---|
| 8 | | - * This program is distributed in the hope that it will be useful. |
|---|
| 9 | | - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, |
|---|
| 10 | | - * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A |
|---|
| 11 | | - * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO |
|---|
| 12 | | - * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. |
|---|
| 13 | | - * See the GNU General Public License for more details, a copy of which |
|---|
| 14 | | - * can be found in the file COPYING included with this package |
|---|
| 15 | | - * |
|---|
| 16 | 4 | */ |
|---|
| 17 | 5 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
|---|
| 18 | 6 | #include <linux/module.h> |
|---|
| .. | .. |
|---|
| 26 | 14 | #include "nvmet.h" |
|---|
| 27 | 15 | #include <linux/nvme-fc-driver.h> |
|---|
| 28 | 16 | #include <linux/nvme-fc.h> |
|---|
| 17 | +#include "../host/fc.h" |
|---|
| 29 | 18 | |
|---|
| 30 | 19 | |
|---|
| 31 | 20 | /* *************************** Data Structures/Defines ****************** */ |
|---|
| .. | .. |
|---|
| 33 | 22 | |
|---|
| 34 | 23 | #define NVMET_LS_CTX_COUNT 256 |
|---|
| 35 | 24 | |
|---|
| 36 | | -/* for this implementation, assume small single frame rqst/rsp */ |
|---|
| 37 | | -#define NVME_FC_MAX_LS_BUFFER_SIZE 2048 |
|---|
| 38 | | - |
|---|
| 39 | 25 | struct nvmet_fc_tgtport; |
|---|
| 40 | 26 | struct nvmet_fc_tgt_assoc; |
|---|
| 41 | 27 | |
|---|
| 42 | | -struct nvmet_fc_ls_iod { |
|---|
| 43 | | - struct nvmefc_tgt_ls_req *lsreq; |
|---|
| 28 | +struct nvmet_fc_ls_iod { /* for an LS RQST RCV */ |
|---|
| 29 | + struct nvmefc_ls_rsp *lsrsp; |
|---|
| 44 | 30 | struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */ |
|---|
| 45 | 31 | |
|---|
| 46 | | - struct list_head ls_list; /* tgtport->ls_list */ |
|---|
| 32 | + struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */ |
|---|
| 47 | 33 | |
|---|
| 48 | 34 | struct nvmet_fc_tgtport *tgtport; |
|---|
| 49 | 35 | struct nvmet_fc_tgt_assoc *assoc; |
|---|
| 36 | + void *hosthandle; |
|---|
| 50 | 37 | |
|---|
| 51 | | - u8 *rqstbuf; |
|---|
| 52 | | - u8 *rspbuf; |
|---|
| 38 | + union nvmefc_ls_requests *rqstbuf; |
|---|
| 39 | + union nvmefc_ls_responses *rspbuf; |
|---|
| 53 | 40 | u16 rqstdatalen; |
|---|
| 54 | 41 | dma_addr_t rspdma; |
|---|
| 55 | 42 | |
|---|
| .. | .. |
|---|
| 57 | 44 | |
|---|
| 58 | 45 | struct work_struct work; |
|---|
| 59 | 46 | } __aligned(sizeof(unsigned long long)); |
|---|
| 47 | + |
|---|
| 48 | +struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */ |
|---|
| 49 | + struct nvmefc_ls_req ls_req; |
|---|
| 50 | + |
|---|
| 51 | + struct nvmet_fc_tgtport *tgtport; |
|---|
| 52 | + void *hosthandle; |
|---|
| 53 | + |
|---|
| 54 | + int ls_error; |
|---|
| 55 | + struct list_head lsreq_list; /* tgtport->ls_req_list */ |
|---|
| 56 | + bool req_queued; |
|---|
| 57 | +}; |
|---|
| 58 | + |
|---|
| 60 | 59 | |
|---|
| 61 | 60 | /* desired maximum for a single sequence - if sg list allows it */ |
|---|
| 62 | 61 | #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) |
|---|
| .. | .. |
|---|
| 86 | 85 | spinlock_t flock; |
|---|
| 87 | 86 | |
|---|
| 88 | 87 | struct nvmet_req req; |
|---|
| 89 | | - struct work_struct work; |
|---|
| 90 | | - struct work_struct done_work; |
|---|
| 91 | 88 | struct work_struct defer_work; |
|---|
| 92 | 89 | |
|---|
| 93 | 90 | struct nvmet_fc_tgtport *tgtport; |
|---|
| .. | .. |
|---|
| 97 | 94 | }; |
|---|
| 98 | 95 | |
|---|
| 99 | 96 | struct nvmet_fc_tgtport { |
|---|
| 100 | | - |
|---|
| 101 | 97 | struct nvmet_fc_target_port fc_target_port; |
|---|
| 102 | 98 | |
|---|
| 103 | 99 | struct list_head tgt_list; /* nvmet_fc_target_list */ |
|---|
| .. | .. |
|---|
| 106 | 102 | |
|---|
| 107 | 103 | struct nvmet_fc_ls_iod *iod; |
|---|
| 108 | 104 | spinlock_t lock; |
|---|
| 109 | | - struct list_head ls_list; |
|---|
| 105 | + struct list_head ls_rcv_list; |
|---|
| 106 | + struct list_head ls_req_list; |
|---|
| 110 | 107 | struct list_head ls_busylist; |
|---|
| 111 | 108 | struct list_head assoc_list; |
|---|
| 109 | + struct list_head host_list; |
|---|
| 112 | 110 | struct ida assoc_cnt; |
|---|
| 113 | | - struct nvmet_port *port; |
|---|
| 111 | + struct nvmet_fc_port_entry *pe; |
|---|
| 114 | 112 | struct kref ref; |
|---|
| 115 | 113 | u32 max_sg_cnt; |
|---|
| 114 | +}; |
|---|
| 115 | + |
|---|
| 116 | +struct nvmet_fc_port_entry { |
|---|
| 117 | + struct nvmet_fc_tgtport *tgtport; |
|---|
| 118 | + struct nvmet_port *port; |
|---|
| 119 | + u64 node_name; |
|---|
| 120 | + u64 port_name; |
|---|
| 121 | + struct list_head pe_list; |
|---|
| 116 | 122 | }; |
|---|
| 117 | 123 | |
|---|
| 118 | 124 | struct nvmet_fc_defer_fcp_req { |
|---|
| .. | .. |
|---|
| 126 | 132 | u16 sqsize; |
|---|
| 127 | 133 | u16 ersp_ratio; |
|---|
| 128 | 134 | __le16 sqhd; |
|---|
| 129 | | - int cpu; |
|---|
| 130 | 135 | atomic_t connected; |
|---|
| 131 | 136 | atomic_t sqtail; |
|---|
| 132 | 137 | atomic_t zrspcnt; |
|---|
| 133 | 138 | atomic_t rsn; |
|---|
| 134 | 139 | spinlock_t qlock; |
|---|
| 135 | | - struct nvmet_port *port; |
|---|
| 136 | 140 | struct nvmet_cq nvme_cq; |
|---|
| 137 | 141 | struct nvmet_sq nvme_sq; |
|---|
| 138 | 142 | struct nvmet_fc_tgt_assoc *assoc; |
|---|
| 139 | | - struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */ |
|---|
| 140 | 143 | struct list_head fod_list; |
|---|
| 141 | 144 | struct list_head pending_cmd_list; |
|---|
| 142 | 145 | struct list_head avail_defer_list; |
|---|
| 143 | 146 | struct workqueue_struct *work_q; |
|---|
| 144 | 147 | struct kref ref; |
|---|
| 148 | + struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */ |
|---|
| 145 | 149 | } __aligned(sizeof(unsigned long long)); |
|---|
| 150 | + |
|---|
| 151 | +struct nvmet_fc_hostport { |
|---|
| 152 | + struct nvmet_fc_tgtport *tgtport; |
|---|
| 153 | + void *hosthandle; |
|---|
| 154 | + struct list_head host_list; |
|---|
| 155 | + struct kref ref; |
|---|
| 156 | + u8 invalid; |
|---|
| 157 | +}; |
|---|
| 146 | 158 | |
|---|
| 147 | 159 | struct nvmet_fc_tgt_assoc { |
|---|
| 148 | 160 | u64 association_id; |
|---|
| 149 | 161 | u32 a_id; |
|---|
| 162 | + atomic_t terminating; |
|---|
| 150 | 163 | struct nvmet_fc_tgtport *tgtport; |
|---|
| 164 | + struct nvmet_fc_hostport *hostport; |
|---|
| 165 | + struct nvmet_fc_ls_iod *rcv_disconn; |
|---|
| 151 | 166 | struct list_head a_list; |
|---|
| 152 | 167 | struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1]; |
|---|
| 153 | 168 | struct kref ref; |
|---|
| .. | .. |
|---|
| 221 | 236 | |
|---|
| 222 | 237 | static LIST_HEAD(nvmet_fc_target_list); |
|---|
| 223 | 238 | static DEFINE_IDA(nvmet_fc_tgtport_cnt); |
|---|
| 239 | +static LIST_HEAD(nvmet_fc_portentry_list); |
|---|
| 224 | 240 | |
|---|
| 225 | 241 | |
|---|
| 226 | 242 | static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work); |
|---|
| 227 | | -static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work); |
|---|
| 228 | | -static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work); |
|---|
| 229 | 243 | static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work); |
|---|
| 230 | 244 | static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc); |
|---|
| 231 | 245 | static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc); |
|---|
| .. | .. |
|---|
| 236 | 250 | static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, |
|---|
| 237 | 251 | struct nvmet_fc_fcp_iod *fod); |
|---|
| 238 | 252 | static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc); |
|---|
| 253 | +static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, |
|---|
| 254 | + struct nvmet_fc_ls_iod *iod); |
|---|
| 239 | 255 | |
|---|
| 240 | 256 | |
|---|
| 241 | 257 | /* *********************** FC-NVME DMA Handling **************************** */ |
|---|
| .. | .. |
|---|
| 327 | 343 | } |
|---|
| 328 | 344 | |
|---|
| 329 | 345 | |
|---|
| 346 | +/* ********************** FC-NVME LS XMT Handling ************************* */ |
|---|
| 347 | + |
|---|
| 348 | + |
|---|
| 349 | +static void |
|---|
| 350 | +__nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop) |
|---|
| 351 | +{ |
|---|
| 352 | + struct nvmet_fc_tgtport *tgtport = lsop->tgtport; |
|---|
| 353 | + struct nvmefc_ls_req *lsreq = &lsop->ls_req; |
|---|
| 354 | + unsigned long flags; |
|---|
| 355 | + |
|---|
| 356 | + spin_lock_irqsave(&tgtport->lock, flags); |
|---|
| 357 | + |
|---|
| 358 | + if (!lsop->req_queued) { |
|---|
| 359 | + spin_unlock_irqrestore(&tgtport->lock, flags); |
|---|
| 360 | + return; |
|---|
| 361 | + } |
|---|
| 362 | + |
|---|
| 363 | + list_del(&lsop->lsreq_list); |
|---|
| 364 | + |
|---|
| 365 | + lsop->req_queued = false; |
|---|
| 366 | + |
|---|
| 367 | + spin_unlock_irqrestore(&tgtport->lock, flags); |
|---|
| 368 | + |
|---|
| 369 | + fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, |
|---|
| 370 | + (lsreq->rqstlen + lsreq->rsplen), |
|---|
| 371 | + DMA_BIDIRECTIONAL); |
|---|
| 372 | + |
|---|
| 373 | + nvmet_fc_tgtport_put(tgtport); |
|---|
| 374 | +} |
|---|
| 375 | + |
|---|
| 376 | +static int |
|---|
| 377 | +__nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport, |
|---|
| 378 | + struct nvmet_fc_ls_req_op *lsop, |
|---|
| 379 | + void (*done)(struct nvmefc_ls_req *req, int status)) |
|---|
| 380 | +{ |
|---|
| 381 | + struct nvmefc_ls_req *lsreq = &lsop->ls_req; |
|---|
| 382 | + unsigned long flags; |
|---|
| 383 | + int ret = 0; |
|---|
| 384 | + |
|---|
| 385 | + if (!tgtport->ops->ls_req) |
|---|
| 386 | + return -EOPNOTSUPP; |
|---|
| 387 | + |
|---|
| 388 | + if (!nvmet_fc_tgtport_get(tgtport)) |
|---|
| 389 | + return -ESHUTDOWN; |
|---|
| 390 | + |
|---|
| 391 | + lsreq->done = done; |
|---|
| 392 | + lsop->req_queued = false; |
|---|
| 393 | + INIT_LIST_HEAD(&lsop->lsreq_list); |
|---|
| 394 | + |
|---|
| 395 | + lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr, |
|---|
| 396 | + lsreq->rqstlen + lsreq->rsplen, |
|---|
| 397 | + DMA_BIDIRECTIONAL); |
|---|
| 398 | + if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) { |
|---|
| 399 | + ret = -EFAULT; |
|---|
| 400 | + goto out_puttgtport; |
|---|
| 401 | + } |
|---|
| 402 | + lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; |
|---|
| 403 | + |
|---|
| 404 | + spin_lock_irqsave(&tgtport->lock, flags); |
|---|
| 405 | + |
|---|
| 406 | + list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list); |
|---|
| 407 | + |
|---|
| 408 | + lsop->req_queued = true; |
|---|
| 409 | + |
|---|
| 410 | + spin_unlock_irqrestore(&tgtport->lock, flags); |
|---|
| 411 | + |
|---|
| 412 | + ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle, |
|---|
| 413 | + lsreq); |
|---|
| 414 | + if (ret) |
|---|
| 415 | + goto out_unlink; |
|---|
| 416 | + |
|---|
| 417 | + return 0; |
|---|
| 418 | + |
|---|
| 419 | +out_unlink: |
|---|
| 420 | + lsop->ls_error = ret; |
|---|
| 421 | + spin_lock_irqsave(&tgtport->lock, flags); |
|---|
| 422 | + lsop->req_queued = false; |
|---|
| 423 | + list_del(&lsop->lsreq_list); |
|---|
| 424 | + spin_unlock_irqrestore(&tgtport->lock, flags); |
|---|
| 425 | + fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, |
|---|
| 426 | + (lsreq->rqstlen + lsreq->rsplen), |
|---|
| 427 | + DMA_BIDIRECTIONAL); |
|---|
| 428 | +out_puttgtport: |
|---|
| 429 | + nvmet_fc_tgtport_put(tgtport); |
|---|
| 430 | + |
|---|
| 431 | + return ret; |
|---|
| 432 | +} |
|---|
| 433 | + |
|---|
| 434 | +static int |
|---|
| 435 | +nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport, |
|---|
| 436 | + struct nvmet_fc_ls_req_op *lsop, |
|---|
| 437 | + void (*done)(struct nvmefc_ls_req *req, int status)) |
|---|
| 438 | +{ |
|---|
| 439 | + /* don't wait for completion */ |
|---|
| 440 | + |
|---|
| 441 | + return __nvmet_fc_send_ls_req(tgtport, lsop, done); |
|---|
| 442 | +} |
|---|
| 443 | + |
|---|
| 444 | +static void |
|---|
| 445 | +nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) |
|---|
| 446 | +{ |
|---|
| 447 | + struct nvmet_fc_ls_req_op *lsop = |
|---|
| 448 | + container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req); |
|---|
| 449 | + |
|---|
| 450 | + __nvmet_fc_finish_ls_req(lsop); |
|---|
| 451 | + |
|---|
| 452 | + /* fc-nvme target doesn't care about success or failure of cmd */ |
|---|
| 453 | + |
|---|
| 454 | + kfree(lsop); |
|---|
| 455 | +} |
|---|
| 456 | + |
|---|
| 457 | +/* |
|---|
| 458 | + * This routine sends a FC-NVME LS to disconnect (aka terminate) |
|---|
| 459 | + * the FC-NVME Association. Terminating the association also |
|---|
| 460 | + * terminates the FC-NVME connections (per queue, both admin and io |
|---|
| 461 | + * queues) that are part of the association. E.g. things are torn |
|---|
| 462 | + * down, and the related FC-NVME Association ID and Connection IDs |
|---|
| 463 | + * become invalid. |
|---|
| 464 | + * |
|---|
| 465 | + * The behavior of the fc-nvme target is such that it's |
|---|
| 466 | + * understanding of the association and connections will implicitly |
|---|
| 467 | + * be torn down. The action is implicit as it may be due to a loss of |
|---|
| 468 | + * connectivity with the fc-nvme host, so the target may never get a |
|---|
| 469 | + * response even if it tried. As such, the action of this routine |
|---|
| 470 | + * is to asynchronously send the LS, ignore any results of the LS, and |
|---|
| 471 | + * continue on with terminating the association. If the fc-nvme host |
|---|
| 472 | + * is present and receives the LS, it too can tear down. |
|---|
| 473 | + */ |
|---|
| 474 | +static void |
|---|
| 475 | +nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc) |
|---|
| 476 | +{ |
|---|
| 477 | + struct nvmet_fc_tgtport *tgtport = assoc->tgtport; |
|---|
| 478 | + struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; |
|---|
| 479 | + struct fcnvme_ls_disconnect_assoc_acc *discon_acc; |
|---|
| 480 | + struct nvmet_fc_ls_req_op *lsop; |
|---|
| 481 | + struct nvmefc_ls_req *lsreq; |
|---|
| 482 | + int ret; |
|---|
| 483 | + |
|---|
| 484 | + /* |
|---|
| 485 | + * If ls_req is NULL or no hosthandle, it's an older lldd and no |
|---|
| 486 | + * message is normal. Otherwise, send unless the hostport has |
|---|
| 487 | + * already been invalidated by the lldd. |
|---|
| 488 | + */ |
|---|
| 489 | + if (!tgtport->ops->ls_req || !assoc->hostport || |
|---|
| 490 | + assoc->hostport->invalid) |
|---|
| 491 | + return; |
|---|
| 492 | + |
|---|
| 493 | + lsop = kzalloc((sizeof(*lsop) + |
|---|
| 494 | + sizeof(*discon_rqst) + sizeof(*discon_acc) + |
|---|
| 495 | + tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); |
|---|
| 496 | + if (!lsop) { |
|---|
| 497 | + dev_info(tgtport->dev, |
|---|
| 498 | + "{%d:%d} send Disconnect Association failed: ENOMEM\n", |
|---|
| 499 | + tgtport->fc_target_port.port_num, assoc->a_id); |
|---|
| 500 | + return; |
|---|
| 501 | + } |
|---|
| 502 | + |
|---|
| 503 | + discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1]; |
|---|
| 504 | + discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1]; |
|---|
| 505 | + lsreq = &lsop->ls_req; |
|---|
| 506 | + if (tgtport->ops->lsrqst_priv_sz) |
|---|
| 507 | + lsreq->private = (void *)&discon_acc[1]; |
|---|
| 508 | + else |
|---|
| 509 | + lsreq->private = NULL; |
|---|
| 510 | + |
|---|
| 511 | + lsop->tgtport = tgtport; |
|---|
| 512 | + lsop->hosthandle = assoc->hostport->hosthandle; |
|---|
| 513 | + |
|---|
| 514 | + nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc, |
|---|
| 515 | + assoc->association_id); |
|---|
| 516 | + |
|---|
| 517 | + ret = nvmet_fc_send_ls_req_async(tgtport, lsop, |
|---|
| 518 | + nvmet_fc_disconnect_assoc_done); |
|---|
| 519 | + if (ret) { |
|---|
| 520 | + dev_info(tgtport->dev, |
|---|
| 521 | + "{%d:%d} XMT Disconnect Association failed: %d\n", |
|---|
| 522 | + tgtport->fc_target_port.port_num, assoc->a_id, ret); |
|---|
| 523 | + kfree(lsop); |
|---|
| 524 | + } |
|---|
| 525 | +} |
|---|
| 526 | + |
|---|
| 527 | + |
|---|
| 330 | 528 | /* *********************** FC-NVME Port Management ************************ */ |
|---|
| 331 | 529 | |
|---|
| 332 | 530 | |
|---|
| .. | .. |
|---|
| 346 | 544 | for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { |
|---|
| 347 | 545 | INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); |
|---|
| 348 | 546 | iod->tgtport = tgtport; |
|---|
| 349 | | - list_add_tail(&iod->ls_list, &tgtport->ls_list); |
|---|
| 547 | + list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list); |
|---|
| 350 | 548 | |
|---|
| 351 | | - iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE, |
|---|
| 352 | | - GFP_KERNEL); |
|---|
| 549 | + iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) + |
|---|
| 550 | + sizeof(union nvmefc_ls_responses), |
|---|
| 551 | + GFP_KERNEL); |
|---|
| 353 | 552 | if (!iod->rqstbuf) |
|---|
| 354 | 553 | goto out_fail; |
|---|
| 355 | 554 | |
|---|
| 356 | | - iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE; |
|---|
| 555 | + iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1]; |
|---|
| 357 | 556 | |
|---|
| 358 | 557 | iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf, |
|---|
| 359 | | - NVME_FC_MAX_LS_BUFFER_SIZE, |
|---|
| 558 | + sizeof(*iod->rspbuf), |
|---|
| 360 | 559 | DMA_TO_DEVICE); |
|---|
| 361 | 560 | if (fc_dma_mapping_error(tgtport->dev, iod->rspdma)) |
|---|
| 362 | 561 | goto out_fail; |
|---|
| .. | .. |
|---|
| 366 | 565 | |
|---|
| 367 | 566 | out_fail: |
|---|
| 368 | 567 | kfree(iod->rqstbuf); |
|---|
| 369 | | - list_del(&iod->ls_list); |
|---|
| 568 | + list_del(&iod->ls_rcv_list); |
|---|
| 370 | 569 | for (iod--, i--; i >= 0; iod--, i--) { |
|---|
| 371 | 570 | fc_dma_unmap_single(tgtport->dev, iod->rspdma, |
|---|
| 372 | | - NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE); |
|---|
| 571 | + sizeof(*iod->rspbuf), DMA_TO_DEVICE); |
|---|
| 373 | 572 | kfree(iod->rqstbuf); |
|---|
| 374 | | - list_del(&iod->ls_list); |
|---|
| 573 | + list_del(&iod->ls_rcv_list); |
|---|
| 375 | 574 | } |
|---|
| 376 | 575 | |
|---|
| 377 | 576 | kfree(iod); |
|---|
| .. | .. |
|---|
| 387 | 586 | |
|---|
| 388 | 587 | for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { |
|---|
| 389 | 588 | fc_dma_unmap_single(tgtport->dev, |
|---|
| 390 | | - iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE, |
|---|
| 589 | + iod->rspdma, sizeof(*iod->rspbuf), |
|---|
| 391 | 590 | DMA_TO_DEVICE); |
|---|
| 392 | 591 | kfree(iod->rqstbuf); |
|---|
| 393 | | - list_del(&iod->ls_list); |
|---|
| 592 | + list_del(&iod->ls_rcv_list); |
|---|
| 394 | 593 | } |
|---|
| 395 | 594 | kfree(tgtport->iod); |
|---|
| 396 | 595 | } |
|---|
| .. | .. |
|---|
| 402 | 601 | unsigned long flags; |
|---|
| 403 | 602 | |
|---|
| 404 | 603 | spin_lock_irqsave(&tgtport->lock, flags); |
|---|
| 405 | | - iod = list_first_entry_or_null(&tgtport->ls_list, |
|---|
| 406 | | - struct nvmet_fc_ls_iod, ls_list); |
|---|
| 604 | + iod = list_first_entry_or_null(&tgtport->ls_rcv_list, |
|---|
| 605 | + struct nvmet_fc_ls_iod, ls_rcv_list); |
|---|
| 407 | 606 | if (iod) |
|---|
| 408 | | - list_move_tail(&iod->ls_list, &tgtport->ls_busylist); |
|---|
| 607 | + list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist); |
|---|
| 409 | 608 | spin_unlock_irqrestore(&tgtport->lock, flags); |
|---|
| 410 | 609 | return iod; |
|---|
| 411 | 610 | } |
|---|
| .. | .. |
|---|
| 418 | 617 | unsigned long flags; |
|---|
| 419 | 618 | |
|---|
| 420 | 619 | spin_lock_irqsave(&tgtport->lock, flags); |
|---|
| 421 | | - list_move(&iod->ls_list, &tgtport->ls_list); |
|---|
| 620 | + list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list); |
|---|
| 422 | 621 | spin_unlock_irqrestore(&tgtport->lock, flags); |
|---|
| 423 | 622 | } |
|---|
| 424 | 623 | |
|---|
| .. | .. |
|---|
| 430 | 629 | int i; |
|---|
| 431 | 630 | |
|---|
| 432 | 631 | for (i = 0; i < queue->sqsize; fod++, i++) { |
|---|
| 433 | | - INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work); |
|---|
| 434 | | - INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work); |
|---|
| 435 | 632 | INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work); |
|---|
| 436 | 633 | fod->tgtport = tgtport; |
|---|
| 437 | 634 | fod->queue = queue; |
|---|
| .. | .. |
|---|
| 509 | 706 | fcpreq->hwqid = queue->qid ? |
|---|
| 510 | 707 | ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; |
|---|
| 511 | 708 | |
|---|
| 512 | | - if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR) |
|---|
| 513 | | - queue_work_on(queue->cpu, queue->work_q, &fod->work); |
|---|
| 514 | | - else |
|---|
| 515 | | - nvmet_fc_handle_fcp_rqst(tgtport, fod); |
|---|
| 709 | + nvmet_fc_handle_fcp_rqst(tgtport, fod); |
|---|
| 516 | 710 | } |
|---|
| 517 | 711 | |
|---|
| 518 | 712 | static void |
|---|
| .. | .. |
|---|
| 591 | 785 | queue_work(queue->work_q, &fod->defer_work); |
|---|
| 592 | 786 | } |
|---|
| 593 | 787 | |
|---|
| 594 | | -static int |
|---|
| 595 | | -nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid) |
|---|
| 596 | | -{ |
|---|
| 597 | | - int cpu, idx, cnt; |
|---|
| 598 | | - |
|---|
| 599 | | - if (tgtport->ops->max_hw_queues == 1) |
|---|
| 600 | | - return WORK_CPU_UNBOUND; |
|---|
| 601 | | - |
|---|
| 602 | | - /* Simple cpu selection based on qid modulo active cpu count */ |
|---|
| 603 | | - idx = !qid ? 0 : (qid - 1) % num_active_cpus(); |
|---|
| 604 | | - |
|---|
| 605 | | - /* find the n'th active cpu */ |
|---|
| 606 | | - for (cpu = 0, cnt = 0; ; ) { |
|---|
| 607 | | - if (cpu_active(cpu)) { |
|---|
| 608 | | - if (cnt == idx) |
|---|
| 609 | | - break; |
|---|
| 610 | | - cnt++; |
|---|
| 611 | | - } |
|---|
| 612 | | - cpu = (cpu + 1) % num_possible_cpus(); |
|---|
| 613 | | - } |
|---|
| 614 | | - |
|---|
| 615 | | - return cpu; |
|---|
| 616 | | -} |
|---|
| 617 | | - |
|---|
| 618 | 788 | static struct nvmet_fc_tgt_queue * |
|---|
| 619 | 789 | nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, |
|---|
| 620 | 790 | u16 qid, u16 sqsize) |
|---|
| .. | .. |
|---|
| 626 | 796 | if (qid > NVMET_NR_QUEUES) |
|---|
| 627 | 797 | return NULL; |
|---|
| 628 | 798 | |
|---|
| 629 | | - queue = kzalloc((sizeof(*queue) + |
|---|
| 630 | | - (sizeof(struct nvmet_fc_fcp_iod) * sqsize)), |
|---|
| 631 | | - GFP_KERNEL); |
|---|
| 799 | + queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL); |
|---|
| 632 | 800 | if (!queue) |
|---|
| 633 | 801 | return NULL; |
|---|
| 634 | 802 | |
|---|
| .. | .. |
|---|
| 641 | 809 | if (!queue->work_q) |
|---|
| 642 | 810 | goto out_a_put; |
|---|
| 643 | 811 | |
|---|
| 644 | | - queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1]; |
|---|
| 645 | 812 | queue->qid = qid; |
|---|
| 646 | 813 | queue->sqsize = sqsize; |
|---|
| 647 | 814 | queue->assoc = assoc; |
|---|
| 648 | | - queue->port = assoc->tgtport->port; |
|---|
| 649 | | - queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid); |
|---|
| 650 | 815 | INIT_LIST_HEAD(&queue->fod_list); |
|---|
| 651 | 816 | INIT_LIST_HEAD(&queue->avail_defer_list); |
|---|
| 652 | 817 | INIT_LIST_HEAD(&queue->pending_cmd_list); |
|---|
| .. | .. |
|---|
| 721 | 886 | struct nvmet_fc_fcp_iod *fod = queue->fod; |
|---|
| 722 | 887 | struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr; |
|---|
| 723 | 888 | unsigned long flags; |
|---|
| 724 | | - int i, writedataactive; |
|---|
| 889 | + int i; |
|---|
| 725 | 890 | bool disconnect; |
|---|
| 726 | 891 | |
|---|
| 727 | 892 | disconnect = atomic_xchg(&queue->connected, 0); |
|---|
| 728 | 893 | |
|---|
| 894 | + /* if not connected, nothing to do */ |
|---|
| 895 | + if (!disconnect) |
|---|
| 896 | + return; |
|---|
| 897 | + |
|---|
| 729 | 898 | spin_lock_irqsave(&queue->qlock, flags); |
|---|
| 730 | | - /* about outstanding io's */ |
|---|
| 899 | + /* abort outstanding io's */ |
|---|
| 731 | 900 | for (i = 0; i < queue->sqsize; fod++, i++) { |
|---|
| 732 | 901 | if (fod->active) { |
|---|
| 733 | 902 | spin_lock(&fod->flock); |
|---|
| 734 | 903 | fod->abort = true; |
|---|
| 735 | | - writedataactive = fod->writedataactive; |
|---|
| 736 | | - spin_unlock(&fod->flock); |
|---|
| 737 | 904 | /* |
|---|
| 738 | 905 | * only call lldd abort routine if waiting for |
|---|
| 739 | 906 | * writedata. other outstanding ops should finish |
|---|
| 740 | 907 | * on their own. |
|---|
| 741 | 908 | */ |
|---|
| 742 | | - if (writedataactive) { |
|---|
| 743 | | - spin_lock(&fod->flock); |
|---|
| 909 | + if (fod->writedataactive) { |
|---|
| 744 | 910 | fod->aborted = true; |
|---|
| 745 | 911 | spin_unlock(&fod->flock); |
|---|
| 746 | 912 | tgtport->ops->fcp_abort( |
|---|
| 747 | 913 | &tgtport->fc_target_port, fod->fcpreq); |
|---|
| 748 | | - } |
|---|
| 914 | + } else |
|---|
| 915 | + spin_unlock(&fod->flock); |
|---|
| 749 | 916 | } |
|---|
| 750 | 917 | } |
|---|
| 751 | 918 | |
|---|
| .. | .. |
|---|
| 785 | 952 | |
|---|
| 786 | 953 | flush_workqueue(queue->work_q); |
|---|
| 787 | 954 | |
|---|
| 788 | | - if (disconnect) |
|---|
| 789 | | - nvmet_sq_destroy(&queue->nvme_sq); |
|---|
| 955 | + nvmet_sq_destroy(&queue->nvme_sq); |
|---|
| 790 | 956 | |
|---|
| 791 | 957 | nvmet_fc_tgt_q_put(queue); |
|---|
| 792 | 958 | } |
|---|
| .. | .. |
|---|
| 821 | 987 | } |
|---|
| 822 | 988 | |
|---|
| 823 | 989 | static void |
|---|
| 990 | +nvmet_fc_hostport_free(struct kref *ref) |
|---|
| 991 | +{ |
|---|
| 992 | + struct nvmet_fc_hostport *hostport = |
|---|
| 993 | + container_of(ref, struct nvmet_fc_hostport, ref); |
|---|
| 994 | + struct nvmet_fc_tgtport *tgtport = hostport->tgtport; |
|---|
| 995 | + unsigned long flags; |
|---|
| 996 | + |
|---|
| 997 | + spin_lock_irqsave(&tgtport->lock, flags); |
|---|
| 998 | + list_del(&hostport->host_list); |
|---|
| 999 | + spin_unlock_irqrestore(&tgtport->lock, flags); |
|---|
| 1000 | + if (tgtport->ops->host_release && hostport->invalid) |
|---|
| 1001 | + tgtport->ops->host_release(hostport->hosthandle); |
|---|
| 1002 | + kfree(hostport); |
|---|
| 1003 | + nvmet_fc_tgtport_put(tgtport); |
|---|
| 1004 | +} |
|---|
| 1005 | + |
|---|
| 1006 | +static void |
|---|
| 1007 | +nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport) |
|---|
| 1008 | +{ |
|---|
| 1009 | + kref_put(&hostport->ref, nvmet_fc_hostport_free); |
|---|
| 1010 | +} |
|---|
| 1011 | + |
|---|
| 1012 | +static int |
|---|
| 1013 | +nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport) |
|---|
| 1014 | +{ |
|---|
| 1015 | + return kref_get_unless_zero(&hostport->ref); |
|---|
| 1016 | +} |
|---|
| 1017 | + |
|---|
| 1018 | +static void |
|---|
| 1019 | +nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport) |
|---|
| 1020 | +{ |
|---|
| 1021 | + /* if LLDD not implemented, leave as NULL */ |
|---|
| 1022 | + if (!hostport || !hostport->hosthandle) |
|---|
| 1023 | + return; |
|---|
| 1024 | + |
|---|
| 1025 | + nvmet_fc_hostport_put(hostport); |
|---|
| 1026 | +} |
|---|
| 1027 | + |
|---|
| 1028 | +static struct nvmet_fc_hostport * |
|---|
| 1029 | +nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) |
|---|
| 1030 | +{ |
|---|
| 1031 | + struct nvmet_fc_hostport *newhost, *host, *match = NULL; |
|---|
| 1032 | + unsigned long flags; |
|---|
| 1033 | + |
|---|
| 1034 | + /* if LLDD not implemented, leave as NULL */ |
|---|
| 1035 | + if (!hosthandle) |
|---|
| 1036 | + return NULL; |
|---|
| 1037 | + |
|---|
| 1038 | + /* take reference for what will be the newly allocated hostport */ |
|---|
| 1039 | + if (!nvmet_fc_tgtport_get(tgtport)) |
|---|
| 1040 | + return ERR_PTR(-EINVAL); |
|---|
| 1041 | + |
|---|
| 1042 | + newhost = kzalloc(sizeof(*newhost), GFP_KERNEL); |
|---|
| 1043 | + if (!newhost) { |
|---|
| 1044 | + spin_lock_irqsave(&tgtport->lock, flags); |
|---|
| 1045 | + list_for_each_entry(host, &tgtport->host_list, host_list) { |
|---|
| 1046 | + if (host->hosthandle == hosthandle && !host->invalid) { |
|---|
| 1047 | + if (nvmet_fc_hostport_get(host)) { |
|---|
| 1048 | + match = host; |
|---|
| 1049 | + break; |
|---|
| 1050 | + } |
|---|
| 1051 | + } |
|---|
| 1052 | + } |
|---|
| 1053 | + spin_unlock_irqrestore(&tgtport->lock, flags); |
|---|
| 1054 | + /* no allocation - release reference */ |
|---|
| 1055 | + nvmet_fc_tgtport_put(tgtport); |
|---|
| 1056 | + return (match) ? match : ERR_PTR(-ENOMEM); |
|---|
| 1057 | + } |
|---|
| 1058 | + |
|---|
| 1059 | + newhost->tgtport = tgtport; |
|---|
| 1060 | + newhost->hosthandle = hosthandle; |
|---|
| 1061 | + INIT_LIST_HEAD(&newhost->host_list); |
|---|
| 1062 | + kref_init(&newhost->ref); |
|---|
| 1063 | + |
|---|
| 1064 | + spin_lock_irqsave(&tgtport->lock, flags); |
|---|
| 1065 | + list_for_each_entry(host, &tgtport->host_list, host_list) { |
|---|
| 1066 | + if (host->hosthandle == hosthandle && !host->invalid) { |
|---|
| 1067 | + if (nvmet_fc_hostport_get(host)) { |
|---|
| 1068 | + match = host; |
|---|
| 1069 | + break; |
|---|
| 1070 | + } |
|---|
| 1071 | + } |
|---|
| 1072 | + } |
|---|
| 1073 | + if (match) { |
|---|
| 1074 | + kfree(newhost); |
|---|
| 1075 | + newhost = NULL; |
|---|
| 1076 | + /* releasing allocation - release reference */ |
|---|
| 1077 | + nvmet_fc_tgtport_put(tgtport); |
|---|
| 1078 | + } else |
|---|
| 1079 | + list_add_tail(&newhost->host_list, &tgtport->host_list); |
|---|
| 1080 | + spin_unlock_irqrestore(&tgtport->lock, flags); |
|---|
| 1081 | + |
|---|
| 1082 | + return (match) ? match : newhost; |
|---|
| 1083 | +} |
|---|
| 1084 | + |
|---|
| 1085 | +static void |
|---|
| 824 | 1086 | nvmet_fc_delete_assoc(struct work_struct *work) |
|---|
| 825 | 1087 | { |
|---|
| 826 | 1088 | struct nvmet_fc_tgt_assoc *assoc = |
|---|
| .. | .. |
|---|
| 831 | 1093 | } |
|---|
| 832 | 1094 | |
|---|
| 833 | 1095 | static struct nvmet_fc_tgt_assoc * |
|---|
| 834 | | -nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport) |
|---|
| 1096 | +nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) |
|---|
| 835 | 1097 | { |
|---|
| 836 | 1098 | struct nvmet_fc_tgt_assoc *assoc, *tmpassoc; |
|---|
| 837 | 1099 | unsigned long flags; |
|---|
| .. | .. |
|---|
| 848 | 1110 | goto out_free_assoc; |
|---|
| 849 | 1111 | |
|---|
| 850 | 1112 | if (!nvmet_fc_tgtport_get(tgtport)) |
|---|
| 851 | | - goto out_ida_put; |
|---|
| 1113 | + goto out_ida; |
|---|
| 1114 | + |
|---|
| 1115 | + assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle); |
|---|
| 1116 | + if (IS_ERR(assoc->hostport)) |
|---|
| 1117 | + goto out_put; |
|---|
| 852 | 1118 | |
|---|
| 853 | 1119 | assoc->tgtport = tgtport; |
|---|
| 854 | 1120 | assoc->a_id = idx; |
|---|
| 855 | 1121 | INIT_LIST_HEAD(&assoc->a_list); |
|---|
| 856 | 1122 | kref_init(&assoc->ref); |
|---|
| 857 | 1123 | INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc); |
|---|
| 1124 | + atomic_set(&assoc->terminating, 0); |
|---|
| 858 | 1125 | |
|---|
| 859 | 1126 | while (needrandom) { |
|---|
| 860 | 1127 | get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID); |
|---|
| .. | .. |
|---|
| 862 | 1129 | |
|---|
| 863 | 1130 | spin_lock_irqsave(&tgtport->lock, flags); |
|---|
| 864 | 1131 | needrandom = false; |
|---|
| 865 | | - list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) |
|---|
| 1132 | + list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) { |
|---|
| 866 | 1133 | if (ran == tmpassoc->association_id) { |
|---|
| 867 | 1134 | needrandom = true; |
|---|
| 868 | 1135 | break; |
|---|
| 869 | 1136 | } |
|---|
| 1137 | + } |
|---|
| 870 | 1138 | if (!needrandom) { |
|---|
| 871 | 1139 | assoc->association_id = ran; |
|---|
| 872 | 1140 | list_add_tail(&assoc->a_list, &tgtport->assoc_list); |
|---|
| .. | .. |
|---|
| 876 | 1144 | |
|---|
| 877 | 1145 | return assoc; |
|---|
| 878 | 1146 | |
|---|
| 879 | | -out_ida_put: |
|---|
| 1147 | +out_put: |
|---|
| 1148 | + nvmet_fc_tgtport_put(tgtport); |
|---|
| 1149 | +out_ida: |
|---|
| 880 | 1150 | ida_simple_remove(&tgtport->assoc_cnt, idx); |
|---|
| 881 | 1151 | out_free_assoc: |
|---|
| 882 | 1152 | kfree(assoc); |
|---|
| .. | .. |
|---|
| 889 | 1159 | struct nvmet_fc_tgt_assoc *assoc = |
|---|
| 890 | 1160 | container_of(ref, struct nvmet_fc_tgt_assoc, ref); |
|---|
| 891 | 1161 | struct nvmet_fc_tgtport *tgtport = assoc->tgtport; |
|---|
| 1162 | + struct nvmet_fc_ls_iod *oldls; |
|---|
| 892 | 1163 | unsigned long flags; |
|---|
| 893 | 1164 | |
|---|
| 1165 | + /* Send Disconnect now that all i/o has completed */ |
|---|
| 1166 | + nvmet_fc_xmt_disconnect_assoc(assoc); |
|---|
| 1167 | + |
|---|
| 1168 | + nvmet_fc_free_hostport(assoc->hostport); |
|---|
| 894 | 1169 | spin_lock_irqsave(&tgtport->lock, flags); |
|---|
| 895 | 1170 | list_del(&assoc->a_list); |
|---|
| 1171 | + oldls = assoc->rcv_disconn; |
|---|
| 896 | 1172 | spin_unlock_irqrestore(&tgtport->lock, flags); |
|---|
| 1173 | + /* if pending Rcv Disconnect Association LS, send rsp now */ |
|---|
| 1174 | + if (oldls) |
|---|
| 1175 | + nvmet_fc_xmt_ls_rsp(tgtport, oldls); |
|---|
| 897 | 1176 | ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id); |
|---|
| 1177 | + dev_info(tgtport->dev, |
|---|
| 1178 | + "{%d:%d} Association freed\n", |
|---|
| 1179 | + tgtport->fc_target_port.port_num, assoc->a_id); |
|---|
| 898 | 1180 | kfree(assoc); |
|---|
| 899 | 1181 | nvmet_fc_tgtport_put(tgtport); |
|---|
| 900 | 1182 | } |
|---|
| .. | .. |
|---|
| 917 | 1199 | struct nvmet_fc_tgtport *tgtport = assoc->tgtport; |
|---|
| 918 | 1200 | struct nvmet_fc_tgt_queue *queue; |
|---|
| 919 | 1201 | unsigned long flags; |
|---|
| 920 | | - int i; |
|---|
| 1202 | + int i, terminating; |
|---|
| 1203 | + |
|---|
| 1204 | + terminating = atomic_xchg(&assoc->terminating, 1); |
|---|
| 1205 | + |
|---|
| 1206 | + /* if already terminating, do nothing */ |
|---|
| 1207 | + if (terminating) |
|---|
| 1208 | + return; |
|---|
| 921 | 1209 | |
|---|
| 922 | 1210 | spin_lock_irqsave(&tgtport->lock, flags); |
|---|
| 923 | 1211 | for (i = NVMET_NR_QUEUES; i >= 0; i--) { |
|---|
| .. | .. |
|---|
| 932 | 1220 | } |
|---|
| 933 | 1221 | } |
|---|
| 934 | 1222 | spin_unlock_irqrestore(&tgtport->lock, flags); |
|---|
| 1223 | + |
|---|
| 1224 | + dev_info(tgtport->dev, |
|---|
| 1225 | + "{%d:%d} Association deleted\n", |
|---|
| 1226 | + tgtport->fc_target_port.port_num, assoc->a_id); |
|---|
| 935 | 1227 | |
|---|
| 936 | 1228 | nvmet_fc_tgt_a_put(assoc); |
|---|
| 937 | 1229 | } |
|---|
| .. | .. |
|---|
| 948 | 1240 | list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { |
|---|
| 949 | 1241 | if (association_id == assoc->association_id) { |
|---|
| 950 | 1242 | ret = assoc; |
|---|
| 951 | | - nvmet_fc_tgt_a_get(assoc); |
|---|
| 1243 | + if (!nvmet_fc_tgt_a_get(assoc)) |
|---|
| 1244 | + ret = NULL; |
|---|
| 952 | 1245 | break; |
|---|
| 953 | 1246 | } |
|---|
| 954 | 1247 | } |
|---|
| .. | .. |
|---|
| 957 | 1250 | return ret; |
|---|
| 958 | 1251 | } |
|---|
| 959 | 1252 | |
|---|
| 1253 | +static void |
|---|
| 1254 | +nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport, |
|---|
| 1255 | + struct nvmet_fc_port_entry *pe, |
|---|
| 1256 | + struct nvmet_port *port) |
|---|
| 1257 | +{ |
|---|
| 1258 | + lockdep_assert_held(&nvmet_fc_tgtlock); |
|---|
| 1259 | + |
|---|
| 1260 | + pe->tgtport = tgtport; |
|---|
| 1261 | + tgtport->pe = pe; |
|---|
| 1262 | + |
|---|
| 1263 | + pe->port = port; |
|---|
| 1264 | + port->priv = pe; |
|---|
| 1265 | + |
|---|
| 1266 | + pe->node_name = tgtport->fc_target_port.node_name; |
|---|
| 1267 | + pe->port_name = tgtport->fc_target_port.port_name; |
|---|
| 1268 | + INIT_LIST_HEAD(&pe->pe_list); |
|---|
| 1269 | + |
|---|
| 1270 | + list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list); |
|---|
| 1271 | +} |
|---|
| 1272 | + |
|---|
| 1273 | +static void |
|---|
| 1274 | +nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe) |
|---|
| 1275 | +{ |
|---|
| 1276 | + unsigned long flags; |
|---|
| 1277 | + |
|---|
| 1278 | + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
|---|
| 1279 | + if (pe->tgtport) |
|---|
| 1280 | + pe->tgtport->pe = NULL; |
|---|
| 1281 | + list_del(&pe->pe_list); |
|---|
| 1282 | + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); |
|---|
| 1283 | +} |
|---|
| 1284 | + |
|---|
| 1285 | +/* |
|---|
| 1286 | + * called when a targetport deregisters. Breaks the relationship |
|---|
| 1287 | + * with the nvmet port, but leaves the port_entry in place so that |
|---|
| 1288 | + * re-registration can resume operation. |
|---|
| 1289 | + */ |
|---|
| 1290 | +static void |
|---|
| 1291 | +nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport) |
|---|
| 1292 | +{ |
|---|
| 1293 | + struct nvmet_fc_port_entry *pe; |
|---|
| 1294 | + unsigned long flags; |
|---|
| 1295 | + |
|---|
| 1296 | + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
|---|
| 1297 | + pe = tgtport->pe; |
|---|
| 1298 | + if (pe) |
|---|
| 1299 | + pe->tgtport = NULL; |
|---|
| 1300 | + tgtport->pe = NULL; |
|---|
| 1301 | + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); |
|---|
| 1302 | +} |
|---|
| 1303 | + |
|---|
| 1304 | +/* |
|---|
| 1305 | + * called when a new targetport is registered. Looks in the |
|---|
| 1306 | + * existing nvmet port_entries to see if the nvmet layer is |
|---|
| 1307 | + * configured for the targetport's wwn's. (the targetport existed, |
|---|
| 1308 | + * nvmet configured, the lldd unregistered the tgtport, and is now |
|---|
| 1309 | + * reregistering the same targetport). If so, set the nvmet port |
|---|
| 1310 | + * port entry on the targetport. |
|---|
| 1311 | + */ |
|---|
| 1312 | +static void |
|---|
| 1313 | +nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport) |
|---|
| 1314 | +{ |
|---|
| 1315 | + struct nvmet_fc_port_entry *pe; |
|---|
| 1316 | + unsigned long flags; |
|---|
| 1317 | + |
|---|
| 1318 | + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
|---|
| 1319 | + list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) { |
|---|
| 1320 | + if (tgtport->fc_target_port.node_name == pe->node_name && |
|---|
| 1321 | + tgtport->fc_target_port.port_name == pe->port_name) { |
|---|
| 1322 | + WARN_ON(pe->tgtport); |
|---|
| 1323 | + tgtport->pe = pe; |
|---|
| 1324 | + pe->tgtport = tgtport; |
|---|
| 1325 | + break; |
|---|
| 1326 | + } |
|---|
| 1327 | + } |
|---|
| 1328 | + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); |
|---|
| 1329 | +} |
|---|
| 960 | 1330 | |
|---|
| 961 | 1331 | /** |
|---|
| 962 | 1332 | * nvme_fc_register_targetport - transport entry point called by an |
|---|
| .. | .. |
|---|
| 1014 | 1384 | |
|---|
| 1015 | 1385 | newrec->fc_target_port.node_name = pinfo->node_name; |
|---|
| 1016 | 1386 | newrec->fc_target_port.port_name = pinfo->port_name; |
|---|
| 1017 | | - newrec->fc_target_port.private = &newrec[1]; |
|---|
| 1387 | + if (template->target_priv_sz) |
|---|
| 1388 | + newrec->fc_target_port.private = &newrec[1]; |
|---|
| 1389 | + else |
|---|
| 1390 | + newrec->fc_target_port.private = NULL; |
|---|
| 1018 | 1391 | newrec->fc_target_port.port_id = pinfo->port_id; |
|---|
| 1019 | 1392 | newrec->fc_target_port.port_num = idx; |
|---|
| 1020 | 1393 | INIT_LIST_HEAD(&newrec->tgt_list); |
|---|
| 1021 | 1394 | newrec->dev = dev; |
|---|
| 1022 | 1395 | newrec->ops = template; |
|---|
| 1023 | 1396 | spin_lock_init(&newrec->lock); |
|---|
| 1024 | | - INIT_LIST_HEAD(&newrec->ls_list); |
|---|
| 1397 | + INIT_LIST_HEAD(&newrec->ls_rcv_list); |
|---|
| 1398 | + INIT_LIST_HEAD(&newrec->ls_req_list); |
|---|
| 1025 | 1399 | INIT_LIST_HEAD(&newrec->ls_busylist); |
|---|
| 1026 | 1400 | INIT_LIST_HEAD(&newrec->assoc_list); |
|---|
| 1401 | + INIT_LIST_HEAD(&newrec->host_list); |
|---|
| 1027 | 1402 | kref_init(&newrec->ref); |
|---|
| 1028 | 1403 | ida_init(&newrec->assoc_cnt); |
|---|
| 1029 | 1404 | newrec->max_sg_cnt = template->max_sgl_segments; |
|---|
| .. | .. |
|---|
| 1033 | 1408 | ret = -ENOMEM; |
|---|
| 1034 | 1409 | goto out_free_newrec; |
|---|
| 1035 | 1410 | } |
|---|
| 1411 | + |
|---|
| 1412 | + nvmet_fc_portentry_rebind_tgt(newrec); |
|---|
| 1036 | 1413 | |
|---|
| 1037 | 1414 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
|---|
| 1038 | 1415 | list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list); |
|---|
| .. | .. |
|---|
| 1104 | 1481 | &tgtport->assoc_list, a_list) { |
|---|
| 1105 | 1482 | if (!nvmet_fc_tgt_a_get(assoc)) |
|---|
| 1106 | 1483 | continue; |
|---|
| 1107 | | - spin_unlock_irqrestore(&tgtport->lock, flags); |
|---|
| 1108 | | - nvmet_fc_delete_target_assoc(assoc); |
|---|
| 1109 | | - nvmet_fc_tgt_a_put(assoc); |
|---|
| 1110 | | - spin_lock_irqsave(&tgtport->lock, flags); |
|---|
| 1484 | + if (!schedule_work(&assoc->del_work)) |
|---|
| 1485 | + /* already deleting - release local reference */ |
|---|
| 1486 | + nvmet_fc_tgt_a_put(assoc); |
|---|
| 1111 | 1487 | } |
|---|
| 1112 | 1488 | spin_unlock_irqrestore(&tgtport->lock, flags); |
|---|
| 1113 | 1489 | } |
|---|
| 1490 | + |
|---|
| 1491 | +/** |
|---|
| 1492 | + * nvmet_fc_invalidate_host - transport entry point called by an LLDD |
|---|
| 1493 | + * to remove references to a hosthandle for LS's. |
|---|
| 1494 | + * |
|---|
| 1495 | + * The nvmet-fc layer ensures that any references to the hosthandle |
|---|
| 1496 | + * on the targetport are forgotten (set to NULL). The LLDD will |
|---|
| 1497 | + * typically call this when a login with a remote host port has been |
|---|
| 1498 | + * lost, thus LS's for the remote host port are no longer possible. |
|---|
| 1499 | + * |
|---|
| 1500 | + * If an LS request is outstanding to the targetport/hosthandle (or |
|---|
| 1501 | + * issued concurrently with the call to invalidate the host), the |
|---|
| 1502 | + * LLDD is responsible for terminating/aborting the LS and completing |
|---|
| 1503 | + * the LS request. It is recommended that these terminations/aborts |
|---|
| 1504 | + * occur after calling to invalidate the host handle to avoid additional |
|---|
| 1505 | + * retries by the nvmet-fc transport. The nvmet-fc transport may |
|---|
| 1506 | + * continue to reference host handle while it cleans up outstanding |
|---|
| 1507 | + * NVME associations. The nvmet-fc transport will call the |
|---|
| 1508 | + * ops->host_release() callback to notify the LLDD that all references |
|---|
| 1509 | + * are complete and the related host handle can be recovered. |
|---|
| 1510 | + * Note: if there are no references, the callback may be called before |
|---|
| 1511 | + * the invalidate host call returns. |
|---|
| 1512 | + * |
|---|
| 1513 | + * @target_port: pointer to the (registered) target port that a prior |
|---|
| 1514 | + * LS was received on and which supplied the transport the |
|---|
| 1515 | + * hosthandle. |
|---|
| 1516 | + * @hosthandle: the handle (pointer) that represents the host port |
|---|
| 1517 | + * that no longer has connectivity and that LS's should |
|---|
| 1518 | + * no longer be directed to. |
|---|
| 1519 | + */ |
|---|
| 1520 | +void |
|---|
| 1521 | +nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port, |
|---|
| 1522 | + void *hosthandle) |
|---|
| 1523 | +{ |
|---|
| 1524 | + struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); |
|---|
| 1525 | + struct nvmet_fc_tgt_assoc *assoc, *next; |
|---|
| 1526 | + unsigned long flags; |
|---|
| 1527 | + bool noassoc = true; |
|---|
| 1528 | + |
|---|
| 1529 | + spin_lock_irqsave(&tgtport->lock, flags); |
|---|
| 1530 | + list_for_each_entry_safe(assoc, next, |
|---|
| 1531 | + &tgtport->assoc_list, a_list) { |
|---|
| 1532 | + if (!assoc->hostport || |
|---|
| 1533 | + assoc->hostport->hosthandle != hosthandle) |
|---|
| 1534 | + continue; |
|---|
| 1535 | + if (!nvmet_fc_tgt_a_get(assoc)) |
|---|
| 1536 | + continue; |
|---|
| 1537 | + assoc->hostport->invalid = 1; |
|---|
| 1538 | + noassoc = false; |
|---|
| 1539 | + if (!schedule_work(&assoc->del_work)) |
|---|
| 1540 | + /* already deleting - release local reference */ |
|---|
| 1541 | + nvmet_fc_tgt_a_put(assoc); |
|---|
| 1542 | + } |
|---|
| 1543 | + spin_unlock_irqrestore(&tgtport->lock, flags); |
|---|
| 1544 | + |
|---|
| 1545 | + /* if there's nothing to wait for - call the callback */ |
|---|
| 1546 | + if (noassoc && tgtport->ops->host_release) |
|---|
| 1547 | + tgtport->ops->host_release(hosthandle); |
|---|
| 1548 | +} |
|---|
| 1549 | +EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host); |
|---|
| 1114 | 1550 | |
|---|
| 1115 | 1551 | /* |
|---|
| 1116 | 1552 | * nvmet layer has called to terminate an association |
|---|
| .. | .. |
|---|
| 1146 | 1582 | nvmet_fc_tgtport_put(tgtport); |
|---|
| 1147 | 1583 | |
|---|
| 1148 | 1584 | if (found_ctrl) { |
|---|
| 1149 | | - schedule_work(&assoc->del_work); |
|---|
| 1585 | + if (!schedule_work(&assoc->del_work)) |
|---|
| 1586 | + /* already deleting - release local reference */ |
|---|
| 1587 | + nvmet_fc_tgt_a_put(assoc); |
|---|
| 1150 | 1588 | return; |
|---|
| 1151 | 1589 | } |
|---|
| 1152 | 1590 | |
|---|
| .. | .. |
|---|
| 1159 | 1597 | * nvme_fc_unregister_targetport - transport entry point called by an |
|---|
| 1160 | 1598 | * LLDD to deregister/remove a previously |
|---|
| 1161 | 1599 | * registered a local NVME subsystem FC port. |
|---|
| 1162 | | - * @tgtport: pointer to the (registered) target port that is to be |
|---|
| 1163 | | - * deregistered. |
|---|
| 1600 | + * @target_port: pointer to the (registered) target port that is to be |
|---|
| 1601 | + * deregistered. |
|---|
| 1164 | 1602 | * |
|---|
| 1165 | 1603 | * Returns: |
|---|
| 1166 | 1604 | * a completion status. Must be 0 upon success; a negative errno |
|---|
| .. | .. |
|---|
| 1171 | 1609 | { |
|---|
| 1172 | 1610 | struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); |
|---|
| 1173 | 1611 | |
|---|
| 1612 | + nvmet_fc_portentry_unbind_tgt(tgtport); |
|---|
| 1613 | + |
|---|
| 1174 | 1614 | /* terminate any outstanding associations */ |
|---|
| 1175 | 1615 | __nvmet_fc_free_assocs(tgtport); |
|---|
| 1616 | + |
|---|
| 1617 | + /* |
|---|
| 1618 | + * should terminate LS's as well. However, LS's will be generated |
|---|
| 1619 | + * at the tail end of association termination, so they likely don't |
|---|
| 1620 | + * exist yet. And even if they did, it's worthwhile to just let |
|---|
| 1621 | + * them finish and targetport ref counting will clean things up. |
|---|
| 1622 | + */ |
|---|
| 1176 | 1623 | |
|---|
| 1177 | 1624 | nvmet_fc_tgtport_put(tgtport); |
|---|
| 1178 | 1625 | |
|---|
| .. | .. |
|---|
| 1181 | 1628 | EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport); |
|---|
| 1182 | 1629 | |
|---|
| 1183 | 1630 | |
|---|
| 1184 | | -/* *********************** FC-NVME LS Handling **************************** */ |
|---|
| 1631 | +/* ********************** FC-NVME LS RCV Handling ************************* */ |
|---|
| 1185 | 1632 | |
|---|
| 1186 | | - |
|---|
| 1187 | | -static void |
|---|
| 1188 | | -nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd) |
|---|
| 1189 | | -{ |
|---|
| 1190 | | - struct fcnvme_ls_acc_hdr *acc = buf; |
|---|
| 1191 | | - |
|---|
| 1192 | | - acc->w0.ls_cmd = ls_cmd; |
|---|
| 1193 | | - acc->desc_list_len = desc_len; |
|---|
| 1194 | | - acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST); |
|---|
| 1195 | | - acc->rqst.desc_len = |
|---|
| 1196 | | - fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)); |
|---|
| 1197 | | - acc->rqst.w0.ls_cmd = rqst_ls_cmd; |
|---|
| 1198 | | -} |
|---|
| 1199 | | - |
|---|
| 1200 | | -static int |
|---|
| 1201 | | -nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd, |
|---|
| 1202 | | - u8 reason, u8 explanation, u8 vendor) |
|---|
| 1203 | | -{ |
|---|
| 1204 | | - struct fcnvme_ls_rjt *rjt = buf; |
|---|
| 1205 | | - |
|---|
| 1206 | | - nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST, |
|---|
| 1207 | | - fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)), |
|---|
| 1208 | | - ls_cmd); |
|---|
| 1209 | | - rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT); |
|---|
| 1210 | | - rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt)); |
|---|
| 1211 | | - rjt->rjt.reason_code = reason; |
|---|
| 1212 | | - rjt->rjt.reason_explanation = explanation; |
|---|
| 1213 | | - rjt->rjt.vendor = vendor; |
|---|
| 1214 | | - |
|---|
| 1215 | | - return sizeof(struct fcnvme_ls_rjt); |
|---|
| 1216 | | -} |
|---|
| 1217 | | - |
|---|
| 1218 | | -/* Validation Error indexes into the string table below */ |
|---|
| 1219 | | -enum { |
|---|
| 1220 | | - VERR_NO_ERROR = 0, |
|---|
| 1221 | | - VERR_CR_ASSOC_LEN = 1, |
|---|
| 1222 | | - VERR_CR_ASSOC_RQST_LEN = 2, |
|---|
| 1223 | | - VERR_CR_ASSOC_CMD = 3, |
|---|
| 1224 | | - VERR_CR_ASSOC_CMD_LEN = 4, |
|---|
| 1225 | | - VERR_ERSP_RATIO = 5, |
|---|
| 1226 | | - VERR_ASSOC_ALLOC_FAIL = 6, |
|---|
| 1227 | | - VERR_QUEUE_ALLOC_FAIL = 7, |
|---|
| 1228 | | - VERR_CR_CONN_LEN = 8, |
|---|
| 1229 | | - VERR_CR_CONN_RQST_LEN = 9, |
|---|
| 1230 | | - VERR_ASSOC_ID = 10, |
|---|
| 1231 | | - VERR_ASSOC_ID_LEN = 11, |
|---|
| 1232 | | - VERR_NO_ASSOC = 12, |
|---|
| 1233 | | - VERR_CONN_ID = 13, |
|---|
| 1234 | | - VERR_CONN_ID_LEN = 14, |
|---|
| 1235 | | - VERR_NO_CONN = 15, |
|---|
| 1236 | | - VERR_CR_CONN_CMD = 16, |
|---|
| 1237 | | - VERR_CR_CONN_CMD_LEN = 17, |
|---|
| 1238 | | - VERR_DISCONN_LEN = 18, |
|---|
| 1239 | | - VERR_DISCONN_RQST_LEN = 19, |
|---|
| 1240 | | - VERR_DISCONN_CMD = 20, |
|---|
| 1241 | | - VERR_DISCONN_CMD_LEN = 21, |
|---|
| 1242 | | - VERR_DISCONN_SCOPE = 22, |
|---|
| 1243 | | - VERR_RS_LEN = 23, |
|---|
| 1244 | | - VERR_RS_RQST_LEN = 24, |
|---|
| 1245 | | - VERR_RS_CMD = 25, |
|---|
| 1246 | | - VERR_RS_CMD_LEN = 26, |
|---|
| 1247 | | - VERR_RS_RCTL = 27, |
|---|
| 1248 | | - VERR_RS_RO = 28, |
|---|
| 1249 | | -}; |
|---|
| 1250 | | - |
|---|
| 1251 | | -static char *validation_errors[] = { |
|---|
| 1252 | | - "OK", |
|---|
| 1253 | | - "Bad CR_ASSOC Length", |
|---|
| 1254 | | - "Bad CR_ASSOC Rqst Length", |
|---|
| 1255 | | - "Not CR_ASSOC Cmd", |
|---|
| 1256 | | - "Bad CR_ASSOC Cmd Length", |
|---|
| 1257 | | - "Bad Ersp Ratio", |
|---|
| 1258 | | - "Association Allocation Failed", |
|---|
| 1259 | | - "Queue Allocation Failed", |
|---|
| 1260 | | - "Bad CR_CONN Length", |
|---|
| 1261 | | - "Bad CR_CONN Rqst Length", |
|---|
| 1262 | | - "Not Association ID", |
|---|
| 1263 | | - "Bad Association ID Length", |
|---|
| 1264 | | - "No Association", |
|---|
| 1265 | | - "Not Connection ID", |
|---|
| 1266 | | - "Bad Connection ID Length", |
|---|
| 1267 | | - "No Connection", |
|---|
| 1268 | | - "Not CR_CONN Cmd", |
|---|
| 1269 | | - "Bad CR_CONN Cmd Length", |
|---|
| 1270 | | - "Bad DISCONN Length", |
|---|
| 1271 | | - "Bad DISCONN Rqst Length", |
|---|
| 1272 | | - "Not DISCONN Cmd", |
|---|
| 1273 | | - "Bad DISCONN Cmd Length", |
|---|
| 1274 | | - "Bad Disconnect Scope", |
|---|
| 1275 | | - "Bad RS Length", |
|---|
| 1276 | | - "Bad RS Rqst Length", |
|---|
| 1277 | | - "Not RS Cmd", |
|---|
| 1278 | | - "Bad RS Cmd Length", |
|---|
| 1279 | | - "Bad RS R_CTL", |
|---|
| 1280 | | - "Bad RS Relative Offset", |
|---|
| 1281 | | -}; |
|---|
| 1282 | 1633 | |
|---|
| 1283 | 1634 | static void |
|---|
| 1284 | 1635 | nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, |
|---|
| 1285 | 1636 | struct nvmet_fc_ls_iod *iod) |
|---|
| 1286 | 1637 | { |
|---|
| 1287 | | - struct fcnvme_ls_cr_assoc_rqst *rqst = |
|---|
| 1288 | | - (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf; |
|---|
| 1289 | | - struct fcnvme_ls_cr_assoc_acc *acc = |
|---|
| 1290 | | - (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf; |
|---|
| 1638 | + struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc; |
|---|
| 1639 | + struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc; |
|---|
| 1291 | 1640 | struct nvmet_fc_tgt_queue *queue; |
|---|
| 1292 | 1641 | int ret = 0; |
|---|
| 1293 | 1642 | |
|---|
| .. | .. |
|---|
| 1319 | 1668 | |
|---|
| 1320 | 1669 | else { |
|---|
| 1321 | 1670 | /* new association w/ admin queue */ |
|---|
| 1322 | | - iod->assoc = nvmet_fc_alloc_target_assoc(tgtport); |
|---|
| 1671 | + iod->assoc = nvmet_fc_alloc_target_assoc( |
|---|
| 1672 | + tgtport, iod->hosthandle); |
|---|
| 1323 | 1673 | if (!iod->assoc) |
|---|
| 1324 | 1674 | ret = VERR_ASSOC_ALLOC_FAIL; |
|---|
| 1325 | 1675 | else { |
|---|
| .. | .. |
|---|
| 1334 | 1684 | dev_err(tgtport->dev, |
|---|
| 1335 | 1685 | "Create Association LS failed: %s\n", |
|---|
| 1336 | 1686 | validation_errors[ret]); |
|---|
| 1337 | | - iod->lsreq->rsplen = nvmet_fc_format_rjt(acc, |
|---|
| 1338 | | - NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd, |
|---|
| 1687 | + iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, |
|---|
| 1688 | + sizeof(*acc), rqst->w0.ls_cmd, |
|---|
| 1339 | 1689 | FCNVME_RJT_RC_LOGIC, |
|---|
| 1340 | 1690 | FCNVME_RJT_EXP_NONE, 0); |
|---|
| 1341 | 1691 | return; |
|---|
| .. | .. |
|---|
| 1345 | 1695 | atomic_set(&queue->connected, 1); |
|---|
| 1346 | 1696 | queue->sqhd = 0; /* best place to init value */ |
|---|
| 1347 | 1697 | |
|---|
| 1698 | + dev_info(tgtport->dev, |
|---|
| 1699 | + "{%d:%d} Association created\n", |
|---|
| 1700 | + tgtport->fc_target_port.port_num, iod->assoc->a_id); |
|---|
| 1701 | + |
|---|
| 1348 | 1702 | /* format a response */ |
|---|
| 1349 | 1703 | |
|---|
| 1350 | | - iod->lsreq->rsplen = sizeof(*acc); |
|---|
| 1704 | + iod->lsrsp->rsplen = sizeof(*acc); |
|---|
| 1351 | 1705 | |
|---|
| 1352 | | - nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, |
|---|
| 1706 | + nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, |
|---|
| 1353 | 1707 | fcnvme_lsdesc_len( |
|---|
| 1354 | 1708 | sizeof(struct fcnvme_ls_cr_assoc_acc)), |
|---|
| 1355 | 1709 | FCNVME_LS_CREATE_ASSOCIATION); |
|---|
| .. | .. |
|---|
| 1370 | 1724 | nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, |
|---|
| 1371 | 1725 | struct nvmet_fc_ls_iod *iod) |
|---|
| 1372 | 1726 | { |
|---|
| 1373 | | - struct fcnvme_ls_cr_conn_rqst *rqst = |
|---|
| 1374 | | - (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf; |
|---|
| 1375 | | - struct fcnvme_ls_cr_conn_acc *acc = |
|---|
| 1376 | | - (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf; |
|---|
| 1727 | + struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn; |
|---|
| 1728 | + struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn; |
|---|
| 1377 | 1729 | struct nvmet_fc_tgt_queue *queue; |
|---|
| 1378 | 1730 | int ret = 0; |
|---|
| 1379 | 1731 | |
|---|
| .. | .. |
|---|
| 1425 | 1777 | dev_err(tgtport->dev, |
|---|
| 1426 | 1778 | "Create Connection LS failed: %s\n", |
|---|
| 1427 | 1779 | validation_errors[ret]); |
|---|
| 1428 | | - iod->lsreq->rsplen = nvmet_fc_format_rjt(acc, |
|---|
| 1429 | | - NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd, |
|---|
| 1780 | + iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, |
|---|
| 1781 | + sizeof(*acc), rqst->w0.ls_cmd, |
|---|
| 1430 | 1782 | (ret == VERR_NO_ASSOC) ? |
|---|
| 1431 | 1783 | FCNVME_RJT_RC_INV_ASSOC : |
|---|
| 1432 | 1784 | FCNVME_RJT_RC_LOGIC, |
|---|
| .. | .. |
|---|
| 1440 | 1792 | |
|---|
| 1441 | 1793 | /* format a response */ |
|---|
| 1442 | 1794 | |
|---|
| 1443 | | - iod->lsreq->rsplen = sizeof(*acc); |
|---|
| 1795 | + iod->lsrsp->rsplen = sizeof(*acc); |
|---|
| 1444 | 1796 | |
|---|
| 1445 | | - nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, |
|---|
| 1797 | + nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, |
|---|
| 1446 | 1798 | fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)), |
|---|
| 1447 | 1799 | FCNVME_LS_CREATE_CONNECTION); |
|---|
| 1448 | 1800 | acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); |
|---|
| .. | .. |
|---|
| 1454 | 1806 | be16_to_cpu(rqst->connect_cmd.qid))); |
|---|
| 1455 | 1807 | } |
|---|
| 1456 | 1808 | |
|---|
| 1457 | | -static void |
|---|
| 1809 | +/* |
|---|
| 1810 | + * Returns true if the LS response is to be transmit |
|---|
| 1811 | + * Returns false if the LS response is to be delayed |
|---|
| 1812 | + */ |
|---|
| 1813 | +static int |
|---|
| 1458 | 1814 | nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, |
|---|
| 1459 | 1815 | struct nvmet_fc_ls_iod *iod) |
|---|
| 1460 | 1816 | { |
|---|
| 1461 | | - struct fcnvme_ls_disconnect_rqst *rqst = |
|---|
| 1462 | | - (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf; |
|---|
| 1463 | | - struct fcnvme_ls_disconnect_acc *acc = |
|---|
| 1464 | | - (struct fcnvme_ls_disconnect_acc *)iod->rspbuf; |
|---|
| 1465 | | - struct nvmet_fc_tgt_queue *queue = NULL; |
|---|
| 1466 | | - struct nvmet_fc_tgt_assoc *assoc; |
|---|
| 1817 | + struct fcnvme_ls_disconnect_assoc_rqst *rqst = |
|---|
| 1818 | + &iod->rqstbuf->rq_dis_assoc; |
|---|
| 1819 | + struct fcnvme_ls_disconnect_assoc_acc *acc = |
|---|
| 1820 | + &iod->rspbuf->rsp_dis_assoc; |
|---|
| 1821 | + struct nvmet_fc_tgt_assoc *assoc = NULL; |
|---|
| 1822 | + struct nvmet_fc_ls_iod *oldls = NULL; |
|---|
| 1823 | + unsigned long flags; |
|---|
| 1467 | 1824 | int ret = 0; |
|---|
| 1468 | | - bool del_assoc = false; |
|---|
| 1469 | 1825 | |
|---|
| 1470 | 1826 | memset(acc, 0, sizeof(*acc)); |
|---|
| 1471 | 1827 | |
|---|
| 1472 | | - if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst)) |
|---|
| 1473 | | - ret = VERR_DISCONN_LEN; |
|---|
| 1474 | | - else if (rqst->desc_list_len != |
|---|
| 1475 | | - fcnvme_lsdesc_len( |
|---|
| 1476 | | - sizeof(struct fcnvme_ls_disconnect_rqst))) |
|---|
| 1477 | | - ret = VERR_DISCONN_RQST_LEN; |
|---|
| 1478 | | - else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) |
|---|
| 1479 | | - ret = VERR_ASSOC_ID; |
|---|
| 1480 | | - else if (rqst->associd.desc_len != |
|---|
| 1481 | | - fcnvme_lsdesc_len( |
|---|
| 1482 | | - sizeof(struct fcnvme_lsdesc_assoc_id))) |
|---|
| 1483 | | - ret = VERR_ASSOC_ID_LEN; |
|---|
| 1484 | | - else if (rqst->discon_cmd.desc_tag != |
|---|
| 1485 | | - cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD)) |
|---|
| 1486 | | - ret = VERR_DISCONN_CMD; |
|---|
| 1487 | | - else if (rqst->discon_cmd.desc_len != |
|---|
| 1488 | | - fcnvme_lsdesc_len( |
|---|
| 1489 | | - sizeof(struct fcnvme_lsdesc_disconn_cmd))) |
|---|
| 1490 | | - ret = VERR_DISCONN_CMD_LEN; |
|---|
| 1491 | | - else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) && |
|---|
| 1492 | | - (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION)) |
|---|
| 1493 | | - ret = VERR_DISCONN_SCOPE; |
|---|
| 1494 | | - else { |
|---|
| 1495 | | - /* match an active association */ |
|---|
| 1828 | + ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst); |
|---|
| 1829 | + if (!ret) { |
|---|
| 1830 | + /* match an active association - takes an assoc ref if !NULL */ |
|---|
| 1496 | 1831 | assoc = nvmet_fc_find_target_assoc(tgtport, |
|---|
| 1497 | 1832 | be64_to_cpu(rqst->associd.association_id)); |
|---|
| 1498 | 1833 | iod->assoc = assoc; |
|---|
| 1499 | | - if (assoc) { |
|---|
| 1500 | | - if (rqst->discon_cmd.scope == |
|---|
| 1501 | | - FCNVME_DISCONN_CONNECTION) { |
|---|
| 1502 | | - queue = nvmet_fc_find_target_queue(tgtport, |
|---|
| 1503 | | - be64_to_cpu( |
|---|
| 1504 | | - rqst->discon_cmd.id)); |
|---|
| 1505 | | - if (!queue) { |
|---|
| 1506 | | - nvmet_fc_tgt_a_put(assoc); |
|---|
| 1507 | | - ret = VERR_NO_CONN; |
|---|
| 1508 | | - } |
|---|
| 1509 | | - } |
|---|
| 1510 | | - } else |
|---|
| 1834 | + if (!assoc) |
|---|
| 1511 | 1835 | ret = VERR_NO_ASSOC; |
|---|
| 1512 | 1836 | } |
|---|
| 1513 | 1837 | |
|---|
| 1514 | | - if (ret) { |
|---|
| 1838 | + if (ret || !assoc) { |
|---|
| 1515 | 1839 | dev_err(tgtport->dev, |
|---|
| 1516 | 1840 | "Disconnect LS failed: %s\n", |
|---|
| 1517 | 1841 | validation_errors[ret]); |
|---|
| 1518 | | - iod->lsreq->rsplen = nvmet_fc_format_rjt(acc, |
|---|
| 1519 | | - NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd, |
|---|
| 1842 | + iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, |
|---|
| 1843 | + sizeof(*acc), rqst->w0.ls_cmd, |
|---|
| 1520 | 1844 | (ret == VERR_NO_ASSOC) ? |
|---|
| 1521 | 1845 | FCNVME_RJT_RC_INV_ASSOC : |
|---|
| 1522 | | - (ret == VERR_NO_CONN) ? |
|---|
| 1523 | | - FCNVME_RJT_RC_INV_CONN : |
|---|
| 1524 | | - FCNVME_RJT_RC_LOGIC, |
|---|
| 1846 | + FCNVME_RJT_RC_LOGIC, |
|---|
| 1525 | 1847 | FCNVME_RJT_EXP_NONE, 0); |
|---|
| 1526 | | - return; |
|---|
| 1848 | + return true; |
|---|
| 1527 | 1849 | } |
|---|
| 1528 | 1850 | |
|---|
| 1529 | 1851 | /* format a response */ |
|---|
| 1530 | 1852 | |
|---|
| 1531 | | - iod->lsreq->rsplen = sizeof(*acc); |
|---|
| 1853 | + iod->lsrsp->rsplen = sizeof(*acc); |
|---|
| 1532 | 1854 | |
|---|
| 1533 | | - nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, |
|---|
| 1855 | + nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, |
|---|
| 1534 | 1856 | fcnvme_lsdesc_len( |
|---|
| 1535 | | - sizeof(struct fcnvme_ls_disconnect_acc)), |
|---|
| 1536 | | - FCNVME_LS_DISCONNECT); |
|---|
| 1537 | | - |
|---|
| 1538 | | - |
|---|
| 1539 | | - /* are we to delete a Connection ID (queue) */ |
|---|
| 1540 | | - if (queue) { |
|---|
| 1541 | | - int qid = queue->qid; |
|---|
| 1542 | | - |
|---|
| 1543 | | - nvmet_fc_delete_target_queue(queue); |
|---|
| 1544 | | - |
|---|
| 1545 | | - /* release the get taken by find_target_queue */ |
|---|
| 1546 | | - nvmet_fc_tgt_q_put(queue); |
|---|
| 1547 | | - |
|---|
| 1548 | | - /* tear association down if io queue terminated */ |
|---|
| 1549 | | - if (!qid) |
|---|
| 1550 | | - del_assoc = true; |
|---|
| 1551 | | - } |
|---|
| 1857 | + sizeof(struct fcnvme_ls_disconnect_assoc_acc)), |
|---|
| 1858 | + FCNVME_LS_DISCONNECT_ASSOC); |
|---|
| 1552 | 1859 | |
|---|
| 1553 | 1860 | /* release get taken in nvmet_fc_find_target_assoc */ |
|---|
| 1554 | | - nvmet_fc_tgt_a_put(iod->assoc); |
|---|
| 1861 | + nvmet_fc_tgt_a_put(assoc); |
|---|
| 1555 | 1862 | |
|---|
| 1556 | | - if (del_assoc) |
|---|
| 1557 | | - nvmet_fc_delete_target_assoc(iod->assoc); |
|---|
| 1863 | + /* |
|---|
| 1864 | + * The rules for LS response says the response cannot |
|---|
| 1865 | + * go back until ABTS's have been sent for all outstanding |
|---|
| 1866 | + * I/O and a Disconnect Association LS has been sent. |
|---|
| 1867 | + * So... save off the Disconnect LS to send the response |
|---|
| 1868 | + * later. If there was a prior LS already saved, replace |
|---|
| 1869 | + * it with the newer one and send a can't perform reject |
|---|
| 1870 | + * on the older one. |
|---|
| 1871 | + */ |
|---|
| 1872 | + spin_lock_irqsave(&tgtport->lock, flags); |
|---|
| 1873 | + oldls = assoc->rcv_disconn; |
|---|
| 1874 | + assoc->rcv_disconn = iod; |
|---|
| 1875 | + spin_unlock_irqrestore(&tgtport->lock, flags); |
|---|
| 1876 | + |
|---|
| 1877 | + nvmet_fc_delete_target_assoc(assoc); |
|---|
| 1878 | + |
|---|
| 1879 | + if (oldls) { |
|---|
| 1880 | + dev_info(tgtport->dev, |
|---|
| 1881 | + "{%d:%d} Multiple Disconnect Association LS's " |
|---|
| 1882 | + "received\n", |
|---|
| 1883 | + tgtport->fc_target_port.port_num, assoc->a_id); |
|---|
| 1884 | + /* overwrite good response with bogus failure */ |
|---|
| 1885 | + oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, |
|---|
| 1886 | + sizeof(*iod->rspbuf), |
|---|
| 1887 | + /* ok to use rqst, LS is same */ |
|---|
| 1888 | + rqst->w0.ls_cmd, |
|---|
| 1889 | + FCNVME_RJT_RC_UNAB, |
|---|
| 1890 | + FCNVME_RJT_EXP_NONE, 0); |
|---|
| 1891 | + nvmet_fc_xmt_ls_rsp(tgtport, oldls); |
|---|
| 1892 | + } |
|---|
| 1893 | + |
|---|
| 1894 | + return false; |
|---|
| 1558 | 1895 | } |
|---|
| 1559 | 1896 | |
|---|
| 1560 | 1897 | |
|---|
| .. | .. |
|---|
| 1566 | 1903 | static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops; |
|---|
| 1567 | 1904 | |
|---|
| 1568 | 1905 | static void |
|---|
| 1569 | | -nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq) |
|---|
| 1906 | +nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) |
|---|
| 1570 | 1907 | { |
|---|
| 1571 | | - struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private; |
|---|
| 1908 | + struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private; |
|---|
| 1572 | 1909 | struct nvmet_fc_tgtport *tgtport = iod->tgtport; |
|---|
| 1573 | 1910 | |
|---|
| 1574 | 1911 | fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma, |
|---|
| 1575 | | - NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE); |
|---|
| 1912 | + sizeof(*iod->rspbuf), DMA_TO_DEVICE); |
|---|
| 1576 | 1913 | nvmet_fc_free_ls_iod(tgtport, iod); |
|---|
| 1577 | 1914 | nvmet_fc_tgtport_put(tgtport); |
|---|
| 1578 | 1915 | } |
|---|
| .. | .. |
|---|
| 1584 | 1921 | int ret; |
|---|
| 1585 | 1922 | |
|---|
| 1586 | 1923 | fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma, |
|---|
| 1587 | | - NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE); |
|---|
| 1924 | + sizeof(*iod->rspbuf), DMA_TO_DEVICE); |
|---|
| 1588 | 1925 | |
|---|
| 1589 | | - ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq); |
|---|
| 1926 | + ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp); |
|---|
| 1590 | 1927 | if (ret) |
|---|
| 1591 | | - nvmet_fc_xmt_ls_rsp_done(iod->lsreq); |
|---|
| 1928 | + nvmet_fc_xmt_ls_rsp_done(iod->lsrsp); |
|---|
| 1592 | 1929 | } |
|---|
| 1593 | 1930 | |
|---|
| 1594 | 1931 | /* |
|---|
| .. | .. |
|---|
| 1598 | 1935 | nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport, |
|---|
| 1599 | 1936 | struct nvmet_fc_ls_iod *iod) |
|---|
| 1600 | 1937 | { |
|---|
| 1601 | | - struct fcnvme_ls_rqst_w0 *w0 = |
|---|
| 1602 | | - (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf; |
|---|
| 1938 | + struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0; |
|---|
| 1939 | + bool sendrsp = true; |
|---|
| 1603 | 1940 | |
|---|
| 1604 | | - iod->lsreq->nvmet_fc_private = iod; |
|---|
| 1605 | | - iod->lsreq->rspbuf = iod->rspbuf; |
|---|
| 1606 | | - iod->lsreq->rspdma = iod->rspdma; |
|---|
| 1607 | | - iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done; |
|---|
| 1941 | + iod->lsrsp->nvme_fc_private = iod; |
|---|
| 1942 | + iod->lsrsp->rspbuf = iod->rspbuf; |
|---|
| 1943 | + iod->lsrsp->rspdma = iod->rspdma; |
|---|
| 1944 | + iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done; |
|---|
| 1608 | 1945 | /* Be preventative. handlers will later set to valid length */ |
|---|
| 1609 | | - iod->lsreq->rsplen = 0; |
|---|
| 1946 | + iod->lsrsp->rsplen = 0; |
|---|
| 1610 | 1947 | |
|---|
| 1611 | 1948 | iod->assoc = NULL; |
|---|
| 1612 | 1949 | |
|---|
| .. | .. |
|---|
| 1624 | 1961 | /* Creates an IO Queue/Connection */ |
|---|
| 1625 | 1962 | nvmet_fc_ls_create_connection(tgtport, iod); |
|---|
| 1626 | 1963 | break; |
|---|
| 1627 | | - case FCNVME_LS_DISCONNECT: |
|---|
| 1964 | + case FCNVME_LS_DISCONNECT_ASSOC: |
|---|
| 1628 | 1965 | /* Terminate a Queue/Connection or the Association */ |
|---|
| 1629 | | - nvmet_fc_ls_disconnect(tgtport, iod); |
|---|
| 1966 | + sendrsp = nvmet_fc_ls_disconnect(tgtport, iod); |
|---|
| 1630 | 1967 | break; |
|---|
| 1631 | 1968 | default: |
|---|
| 1632 | | - iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf, |
|---|
| 1633 | | - NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd, |
|---|
| 1969 | + iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf, |
|---|
| 1970 | + sizeof(*iod->rspbuf), w0->ls_cmd, |
|---|
| 1634 | 1971 | FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0); |
|---|
| 1635 | 1972 | } |
|---|
| 1636 | 1973 | |
|---|
| 1637 | | - nvmet_fc_xmt_ls_rsp(tgtport, iod); |
|---|
| 1974 | + if (sendrsp) |
|---|
| 1975 | + nvmet_fc_xmt_ls_rsp(tgtport, iod); |
|---|
| 1638 | 1976 | } |
|---|
| 1639 | 1977 | |
|---|
| 1640 | 1978 | /* |
|---|
| .. | .. |
|---|
| 1661 | 1999 | * |
|---|
| 1662 | 2000 | * If this routine returns error, the LLDD should abort the exchange. |
|---|
| 1663 | 2001 | * |
|---|
| 1664 | | - * @tgtport: pointer to the (registered) target port the LS was |
|---|
| 2002 | + * @target_port: pointer to the (registered) target port the LS was |
|---|
| 1665 | 2003 | * received on. |
|---|
| 1666 | | - * @lsreq: pointer to a lsreq request structure to be used to reference |
|---|
| 2004 | + * @lsrsp: pointer to a lsrsp structure to be used to reference |
|---|
| 1667 | 2005 | * the exchange corresponding to the LS. |
|---|
| 1668 | 2006 | * @lsreqbuf: pointer to the buffer containing the LS Request |
|---|
| 1669 | 2007 | * @lsreqbuf_len: length, in bytes, of the received LS request |
|---|
| 1670 | 2008 | */ |
|---|
| 1671 | 2009 | int |
|---|
| 1672 | 2010 | nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, |
|---|
| 1673 | | - struct nvmefc_tgt_ls_req *lsreq, |
|---|
| 2011 | + void *hosthandle, |
|---|
| 2012 | + struct nvmefc_ls_rsp *lsrsp, |
|---|
| 1674 | 2013 | void *lsreqbuf, u32 lsreqbuf_len) |
|---|
| 1675 | 2014 | { |
|---|
| 1676 | 2015 | struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); |
|---|
| 1677 | 2016 | struct nvmet_fc_ls_iod *iod; |
|---|
| 2017 | + struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; |
|---|
| 1678 | 2018 | |
|---|
| 1679 | | - if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE) |
|---|
| 2019 | + if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { |
|---|
| 2020 | + dev_info(tgtport->dev, |
|---|
| 2021 | + "RCV %s LS failed: payload too large (%d)\n", |
|---|
| 2022 | + (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? |
|---|
| 2023 | + nvmefc_ls_names[w0->ls_cmd] : "", |
|---|
| 2024 | + lsreqbuf_len); |
|---|
| 1680 | 2025 | return -E2BIG; |
|---|
| 2026 | + } |
|---|
| 1681 | 2027 | |
|---|
| 1682 | | - if (!nvmet_fc_tgtport_get(tgtport)) |
|---|
| 2028 | + if (!nvmet_fc_tgtport_get(tgtport)) { |
|---|
| 2029 | + dev_info(tgtport->dev, |
|---|
| 2030 | + "RCV %s LS failed: target deleting\n", |
|---|
| 2031 | + (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? |
|---|
| 2032 | + nvmefc_ls_names[w0->ls_cmd] : ""); |
|---|
| 1683 | 2033 | return -ESHUTDOWN; |
|---|
| 2034 | + } |
|---|
| 1684 | 2035 | |
|---|
| 1685 | 2036 | iod = nvmet_fc_alloc_ls_iod(tgtport); |
|---|
| 1686 | 2037 | if (!iod) { |
|---|
| 2038 | + dev_info(tgtport->dev, |
|---|
| 2039 | + "RCV %s LS failed: context allocation failed\n", |
|---|
| 2040 | + (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? |
|---|
| 2041 | + nvmefc_ls_names[w0->ls_cmd] : ""); |
|---|
| 1687 | 2042 | nvmet_fc_tgtport_put(tgtport); |
|---|
| 1688 | 2043 | return -ENOENT; |
|---|
| 1689 | 2044 | } |
|---|
| 1690 | 2045 | |
|---|
| 1691 | | - iod->lsreq = lsreq; |
|---|
| 2046 | + iod->lsrsp = lsrsp; |
|---|
| 1692 | 2047 | iod->fcpreq = NULL; |
|---|
| 1693 | 2048 | memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len); |
|---|
| 1694 | 2049 | iod->rqstdatalen = lsreqbuf_len; |
|---|
| 2050 | + iod->hosthandle = hosthandle; |
|---|
| 1695 | 2051 | |
|---|
| 1696 | 2052 | schedule_work(&iod->work); |
|---|
| 1697 | 2053 | |
|---|
| .. | .. |
|---|
| 1798 | 2154 | */ |
|---|
| 1799 | 2155 | rspcnt = atomic_inc_return(&fod->queue->zrspcnt); |
|---|
| 1800 | 2156 | if (!(rspcnt % fod->queue->ersp_ratio) || |
|---|
| 1801 | | - sqe->opcode == nvme_fabrics_command || |
|---|
| 2157 | + nvme_is_fabrics((struct nvme_command *) sqe) || |
|---|
| 1802 | 2158 | xfr_length != fod->req.transfer_len || |
|---|
| 1803 | 2159 | (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] || |
|---|
| 1804 | 2160 | (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || |
|---|
| .. | .. |
|---|
| 2007 | 2363 | } |
|---|
| 2008 | 2364 | |
|---|
| 2009 | 2365 | /* data transfer complete, resume with nvmet layer */ |
|---|
| 2010 | | - nvmet_req_execute(&fod->req); |
|---|
| 2366 | + fod->req.execute(&fod->req); |
|---|
| 2011 | 2367 | break; |
|---|
| 2012 | 2368 | |
|---|
| 2013 | 2369 | case NVMET_FCOP_READDATA: |
|---|
| .. | .. |
|---|
| 2058 | 2414 | } |
|---|
| 2059 | 2415 | |
|---|
| 2060 | 2416 | static void |
|---|
| 2061 | | -nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work) |
|---|
| 2062 | | -{ |
|---|
| 2063 | | - struct nvmet_fc_fcp_iod *fod = |
|---|
| 2064 | | - container_of(work, struct nvmet_fc_fcp_iod, done_work); |
|---|
| 2065 | | - |
|---|
| 2066 | | - nvmet_fc_fod_op_done(fod); |
|---|
| 2067 | | -} |
|---|
| 2068 | | - |
|---|
| 2069 | | -static void |
|---|
| 2070 | 2417 | nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq) |
|---|
| 2071 | 2418 | { |
|---|
| 2072 | 2419 | struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; |
|---|
| 2073 | | - struct nvmet_fc_tgt_queue *queue = fod->queue; |
|---|
| 2074 | 2420 | |
|---|
| 2075 | | - if (fod->tgtport->ops->target_features & NVMET_FCTGTFEAT_OPDONE_IN_ISR) |
|---|
| 2076 | | - /* context switch so completion is not in ISR context */ |
|---|
| 2077 | | - queue_work_on(queue->cpu, queue->work_q, &fod->done_work); |
|---|
| 2078 | | - else |
|---|
| 2079 | | - nvmet_fc_fod_op_done(fod); |
|---|
| 2421 | + nvmet_fc_fod_op_done(fod); |
|---|
| 2080 | 2422 | } |
|---|
| 2081 | 2423 | |
|---|
| 2082 | 2424 | /* |
|---|
| .. | .. |
|---|
| 2147 | 2489 | |
|---|
| 2148 | 2490 | |
|---|
| 2149 | 2491 | /* |
|---|
| 2150 | | - * Actual processing routine for received FC-NVME LS Requests from the LLD |
|---|
| 2492 | + * Actual processing routine for received FC-NVME I/O Requests from the LLD |
|---|
| 2151 | 2493 | */ |
|---|
| 2152 | 2494 | static void |
|---|
| 2153 | 2495 | nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, |
|---|
| .. | .. |
|---|
| 2183 | 2525 | } |
|---|
| 2184 | 2526 | |
|---|
| 2185 | 2527 | fod->req.cmd = &fod->cmdiubuf.sqe; |
|---|
| 2186 | | - fod->req.rsp = &fod->rspiubuf.cqe; |
|---|
| 2187 | | - fod->req.port = fod->queue->port; |
|---|
| 2528 | + fod->req.cqe = &fod->rspiubuf.cqe; |
|---|
| 2529 | + if (tgtport->pe) |
|---|
| 2530 | + fod->req.port = tgtport->pe->port; |
|---|
| 2188 | 2531 | |
|---|
| 2189 | 2532 | /* clear any response payload */ |
|---|
| 2190 | 2533 | memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); |
|---|
| .. | .. |
|---|
| 2230 | 2573 | * can invoke the nvmet_layer now. If read data, cmd completion will |
|---|
| 2231 | 2574 | * push the data |
|---|
| 2232 | 2575 | */ |
|---|
| 2233 | | - nvmet_req_execute(&fod->req); |
|---|
| 2576 | + fod->req.execute(&fod->req); |
|---|
| 2234 | 2577 | return; |
|---|
| 2235 | 2578 | |
|---|
| 2236 | 2579 | transport_error: |
|---|
| 2237 | 2580 | nvmet_fc_abort_op(tgtport, fod); |
|---|
| 2238 | | -} |
|---|
| 2239 | | - |
|---|
| 2240 | | -/* |
|---|
| 2241 | | - * Actual processing routine for received FC-NVME LS Requests from the LLD |
|---|
| 2242 | | - */ |
|---|
| 2243 | | -static void |
|---|
| 2244 | | -nvmet_fc_handle_fcp_rqst_work(struct work_struct *work) |
|---|
| 2245 | | -{ |
|---|
| 2246 | | - struct nvmet_fc_fcp_iod *fod = |
|---|
| 2247 | | - container_of(work, struct nvmet_fc_fcp_iod, work); |
|---|
| 2248 | | - struct nvmet_fc_tgtport *tgtport = fod->tgtport; |
|---|
| 2249 | | - |
|---|
| 2250 | | - nvmet_fc_handle_fcp_rqst(tgtport, fod); |
|---|
| 2251 | 2581 | } |
|---|
| 2252 | 2582 | |
|---|
| 2253 | 2583 | /** |
|---|
| .. | .. |
|---|
| 2311 | 2641 | |
|---|
| 2312 | 2642 | /* validate iu, so the connection id can be used to find the queue */ |
|---|
| 2313 | 2643 | if ((cmdiubuf_len != sizeof(*cmdiu)) || |
|---|
| 2314 | | - (cmdiu->scsi_id != NVME_CMD_SCSI_ID) || |
|---|
| 2644 | + (cmdiu->format_id != NVME_CMD_FORMAT_ID) || |
|---|
| 2315 | 2645 | (cmdiu->fc_id != NVME_CMD_FC_ID) || |
|---|
| 2316 | 2646 | (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4))) |
|---|
| 2317 | 2647 | return -EIO; |
|---|
| .. | .. |
|---|
| 2468 | 2798 | substring_t wwn = { name, &name[sizeof(name)-1] }; |
|---|
| 2469 | 2799 | int nnoffset, pnoffset; |
|---|
| 2470 | 2800 | |
|---|
| 2471 | | - /* validate it string one of the 2 allowed formats */ |
|---|
| 2801 | + /* validate if string is one of the 2 allowed formats */ |
|---|
| 2472 | 2802 | if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && |
|---|
| 2473 | 2803 | !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && |
|---|
| 2474 | 2804 | !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], |
|---|
| .. | .. |
|---|
| 2508 | 2838 | nvmet_fc_add_port(struct nvmet_port *port) |
|---|
| 2509 | 2839 | { |
|---|
| 2510 | 2840 | struct nvmet_fc_tgtport *tgtport; |
|---|
| 2841 | + struct nvmet_fc_port_entry *pe; |
|---|
| 2511 | 2842 | struct nvmet_fc_traddr traddr = { 0L, 0L }; |
|---|
| 2512 | 2843 | unsigned long flags; |
|---|
| 2513 | 2844 | int ret; |
|---|
| .. | .. |
|---|
| 2524 | 2855 | if (ret) |
|---|
| 2525 | 2856 | return ret; |
|---|
| 2526 | 2857 | |
|---|
| 2858 | + pe = kzalloc(sizeof(*pe), GFP_KERNEL); |
|---|
| 2859 | + if (!pe) |
|---|
| 2860 | + return -ENOMEM; |
|---|
| 2861 | + |
|---|
| 2527 | 2862 | ret = -ENXIO; |
|---|
| 2528 | 2863 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
|---|
| 2529 | 2864 | list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { |
|---|
| 2530 | 2865 | if ((tgtport->fc_target_port.node_name == traddr.nn) && |
|---|
| 2531 | 2866 | (tgtport->fc_target_port.port_name == traddr.pn)) { |
|---|
| 2532 | | - tgtport->port = port; |
|---|
| 2533 | | - ret = 0; |
|---|
| 2867 | + /* a FC port can only be 1 nvmet port id */ |
|---|
| 2868 | + if (!tgtport->pe) { |
|---|
| 2869 | + nvmet_fc_portentry_bind(tgtport, pe, port); |
|---|
| 2870 | + ret = 0; |
|---|
| 2871 | + } else |
|---|
| 2872 | + ret = -EALREADY; |
|---|
| 2534 | 2873 | break; |
|---|
| 2535 | 2874 | } |
|---|
| 2536 | 2875 | } |
|---|
| 2537 | 2876 | spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); |
|---|
| 2877 | + |
|---|
| 2878 | + if (ret) |
|---|
| 2879 | + kfree(pe); |
|---|
| 2880 | + |
|---|
| 2538 | 2881 | return ret; |
|---|
| 2539 | 2882 | } |
|---|
| 2540 | 2883 | |
|---|
| 2541 | 2884 | static void |
|---|
| 2542 | 2885 | nvmet_fc_remove_port(struct nvmet_port *port) |
|---|
| 2543 | 2886 | { |
|---|
| 2544 | | - /* nothing to do */ |
|---|
| 2887 | + struct nvmet_fc_port_entry *pe = port->priv; |
|---|
| 2888 | + |
|---|
| 2889 | + nvmet_fc_portentry_unbind(pe); |
|---|
| 2890 | + |
|---|
| 2891 | + kfree(pe); |
|---|
| 2892 | +} |
|---|
| 2893 | + |
|---|
| 2894 | +static void |
|---|
| 2895 | +nvmet_fc_discovery_chg(struct nvmet_port *port) |
|---|
| 2896 | +{ |
|---|
| 2897 | + struct nvmet_fc_port_entry *pe = port->priv; |
|---|
| 2898 | + struct nvmet_fc_tgtport *tgtport = pe->tgtport; |
|---|
| 2899 | + |
|---|
| 2900 | + if (tgtport && tgtport->ops->discovery_event) |
|---|
| 2901 | + tgtport->ops->discovery_event(&tgtport->fc_target_port); |
|---|
| 2545 | 2902 | } |
|---|
| 2546 | 2903 | |
|---|
| 2547 | 2904 | static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { |
|---|
| .. | .. |
|---|
| 2552 | 2909 | .remove_port = nvmet_fc_remove_port, |
|---|
| 2553 | 2910 | .queue_response = nvmet_fc_fcp_nvme_cmd_done, |
|---|
| 2554 | 2911 | .delete_ctrl = nvmet_fc_delete_ctrl, |
|---|
| 2912 | + .discovery_chg = nvmet_fc_discovery_chg, |
|---|
| 2555 | 2913 | }; |
|---|
| 2556 | 2914 | |
|---|
| 2557 | 2915 | static int __init nvmet_fc_init_module(void) |
|---|