hc
2024-05-14 bedbef8ad3e75a304af6361af235302bcc61d06b
kernel/drivers/infiniband/ulp/srpt/ib_srpt.c
....@@ -51,8 +51,6 @@
5151
5252 /* Name of this kernel module. */
5353 #define DRV_NAME "ib_srpt"
54
-#define DRV_VERSION "2.0.0"
55
-#define DRV_RELDATE "2011-02-14"
5654
5755 #define SRPT_ID_STRING "Linux SRP target"
5856
....@@ -60,8 +58,7 @@
6058 #define pr_fmt(fmt) DRV_NAME " " fmt
6159
6260 MODULE_AUTHOR("Vu Pham and Bart Van Assche");
63
-MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
64
- "v" DRV_VERSION " (" DRV_RELDATE ")");
61
+MODULE_DESCRIPTION("SCSI RDMA Protocol target driver");
6562 MODULE_LICENSE("Dual BSD/GPL");
6663
6764 /*
....@@ -84,13 +81,12 @@
8481
8582 static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp)
8683 {
87
- return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
84
+ return sprintf(buffer, "0x%016llx\n", *(u64 *)kp->arg);
8885 }
8986 module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
9087 0444);
9188 MODULE_PARM_DESC(srpt_service_guid,
92
- "Using this value for ioc_guid, id_ext, and cm_listen_id"
93
- " instead of using the node_guid of the first HCA.");
89
+ "Using this value for ioc_guid, id_ext, and cm_listen_id instead of using the node_guid of the first HCA.");
9490
9591 static struct ib_client srpt_client;
9692 /* Protects both rdma_cm_port and rdma_cm_id. */
....@@ -139,16 +135,13 @@
139135 static void srpt_event_handler(struct ib_event_handler *handler,
140136 struct ib_event *event)
141137 {
142
- struct srpt_device *sdev;
138
+ struct srpt_device *sdev =
139
+ container_of(handler, struct srpt_device, event_handler);
143140 struct srpt_port *sport;
144141 u8 port_num;
145142
146
- sdev = ib_get_client_data(event->device, &srpt_client);
147
- if (!sdev || sdev->device != event->device)
148
- return;
149
-
150143 pr_debug("ASYNC event= %d on device= %s\n", event->event,
151
- sdev->device->name);
144
+ dev_name(&sdev->device->dev));
152145
153146 switch (event->event) {
154147 case IB_EVENT_PORT_ERR:
....@@ -221,8 +214,9 @@
221214 */
222215 static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
223216 {
224
- pr_debug("QP event %d on ch=%p sess_name=%s state=%d\n",
225
- event->event, ch, ch->sess_name, ch->state);
217
+ pr_debug("QP event %d on ch=%p sess_name=%s-%d state=%s\n",
218
+ event->event, ch, ch->sess_name, ch->qp->qp_num,
219
+ get_ch_state_name(ch->state));
226220
227221 switch (event->event) {
228222 case IB_EVENT_COMM_EST:
....@@ -462,7 +456,7 @@
462456 static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
463457 struct ib_mad_send_wc *mad_wc)
464458 {
465
- rdma_destroy_ah(mad_wc->send_buf->ah);
459
+ rdma_destroy_ah(mad_wc->send_buf->ah, RDMA_DESTROY_AH_SLEEPABLE);
466460 ib_free_send_mad(mad_wc->send_buf);
467461 }
468462
....@@ -529,7 +523,7 @@
529523 ib_free_send_mad(rsp);
530524
531525 err_rsp:
532
- rdma_destroy_ah(ah);
526
+ rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
533527 err:
534528 ib_free_recv_mad(mad_wc);
535529 }
....@@ -555,38 +549,43 @@
555549 */
556550 static int srpt_refresh_port(struct srpt_port *sport)
557551 {
552
+ struct ib_mad_agent *mad_agent;
558553 struct ib_mad_reg_req reg_req;
559554 struct ib_port_modify port_modify;
560555 struct ib_port_attr port_attr;
561556 int ret;
562557
563
- memset(&port_modify, 0, sizeof(port_modify));
564
- port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
565
- port_modify.clr_port_cap_mask = 0;
566
-
567
- ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
568
- if (ret)
569
- goto err_mod_port;
570
-
571558 ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
572559 if (ret)
573
- goto err_query_port;
560
+ return ret;
574561
575562 sport->sm_lid = port_attr.sm_lid;
576563 sport->lid = port_attr.lid;
577564
578565 ret = rdma_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
579566 if (ret)
580
- goto err_query_port;
567
+ return ret;
581568
582
- sport->port_guid_wwn.priv = sport;
583
- srpt_format_guid(sport->port_guid, sizeof(sport->port_guid),
569
+ srpt_format_guid(sport->guid_name, ARRAY_SIZE(sport->guid_name),
584570 &sport->gid.global.interface_id);
585
- sport->port_gid_wwn.priv = sport;
586
- snprintf(sport->port_gid, sizeof(sport->port_gid),
571
+ snprintf(sport->gid_name, ARRAY_SIZE(sport->gid_name),
587572 "0x%016llx%016llx",
588573 be64_to_cpu(sport->gid.global.subnet_prefix),
589574 be64_to_cpu(sport->gid.global.interface_id));
575
+
576
+ if (rdma_protocol_iwarp(sport->sdev->device, sport->port))
577
+ return 0;
578
+
579
+ memset(&port_modify, 0, sizeof(port_modify));
580
+ port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
581
+ port_modify.clr_port_cap_mask = 0;
582
+
583
+ ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
584
+ if (ret) {
585
+ pr_warn("%s-%d: enabling device management failed (%d). Note: this is expected if SR-IOV is enabled.\n",
586
+ dev_name(&sport->sdev->device->dev), sport->port, ret);
587
+ return 0;
588
+ }
590589
591590 if (!sport->mad_agent) {
592591 memset(&reg_req, 0, sizeof(reg_req));
....@@ -595,40 +594,39 @@
595594 set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
596595 set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
597596
598
- sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
599
- sport->port,
600
- IB_QPT_GSI,
601
- &reg_req, 0,
602
- srpt_mad_send_handler,
603
- srpt_mad_recv_handler,
604
- sport, 0);
605
- if (IS_ERR(sport->mad_agent)) {
606
- ret = PTR_ERR(sport->mad_agent);
597
+ mad_agent = ib_register_mad_agent(sport->sdev->device,
598
+ sport->port,
599
+ IB_QPT_GSI,
600
+ &reg_req, 0,
601
+ srpt_mad_send_handler,
602
+ srpt_mad_recv_handler,
603
+ sport, 0);
604
+ if (IS_ERR(mad_agent)) {
605
+ pr_err("%s-%d: MAD agent registration failed (%ld). Note: this is expected if SR-IOV is enabled.\n",
606
+ dev_name(&sport->sdev->device->dev), sport->port,
607
+ PTR_ERR(mad_agent));
607608 sport->mad_agent = NULL;
608
- goto err_query_port;
609
+ memset(&port_modify, 0, sizeof(port_modify));
610
+ port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
611
+ ib_modify_port(sport->sdev->device, sport->port, 0,
612
+ &port_modify);
613
+ return 0;
609614 }
615
+
616
+ sport->mad_agent = mad_agent;
610617 }
611618
612619 return 0;
613
-
614
-err_query_port:
615
-
616
- port_modify.set_port_cap_mask = 0;
617
- port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
618
- ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
619
-
620
-err_mod_port:
621
-
622
- return ret;
623620 }
624621
625622 /**
626623 * srpt_unregister_mad_agent - unregister MAD callback functions
627624 * @sdev: SRPT HCA pointer.
625
+ * @port_cnt: number of ports with registered MAD
628626 *
629627 * Note: It is safe to call this function more than once for the same device.
630628 */
631
-static void srpt_unregister_mad_agent(struct srpt_device *sdev)
629
+static void srpt_unregister_mad_agent(struct srpt_device *sdev, int port_cnt)
632630 {
633631 struct ib_port_modify port_modify = {
634632 .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
....@@ -636,12 +634,11 @@
636634 struct srpt_port *sport;
637635 int i;
638636
639
- for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
637
+ for (i = 1; i <= port_cnt; i++) {
640638 sport = &sdev->port[i - 1];
641639 WARN_ON(sport->port != i);
642
- if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
643
- pr_err("disabling MAD processing failed.\n");
644640 if (sport->mad_agent) {
641
+ ib_modify_port(sdev->device, i, 0, &port_modify);
645642 ib_unregister_mad_agent(sport->mad_agent);
646643 sport->mad_agent = NULL;
647644 }
....@@ -652,31 +649,33 @@
652649 * srpt_alloc_ioctx - allocate a SRPT I/O context structure
653650 * @sdev: SRPT HCA pointer.
654651 * @ioctx_size: I/O context size.
655
- * @dma_size: Size of I/O context DMA buffer.
652
+ * @buf_cache: I/O buffer cache.
656653 * @dir: DMA data direction.
657654 */
658655 static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
659
- int ioctx_size, int dma_size,
656
+ int ioctx_size,
657
+ struct kmem_cache *buf_cache,
660658 enum dma_data_direction dir)
661659 {
662660 struct srpt_ioctx *ioctx;
663661
664
- ioctx = kmalloc(ioctx_size, GFP_KERNEL);
662
+ ioctx = kzalloc(ioctx_size, GFP_KERNEL);
665663 if (!ioctx)
666664 goto err;
667665
668
- ioctx->buf = kmalloc(dma_size, GFP_KERNEL);
666
+ ioctx->buf = kmem_cache_alloc(buf_cache, GFP_KERNEL);
669667 if (!ioctx->buf)
670668 goto err_free_ioctx;
671669
672
- ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir);
670
+ ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf,
671
+ kmem_cache_size(buf_cache), dir);
673672 if (ib_dma_mapping_error(sdev->device, ioctx->dma))
674673 goto err_free_buf;
675674
676675 return ioctx;
677676
678677 err_free_buf:
679
- kfree(ioctx->buf);
678
+ kmem_cache_free(buf_cache, ioctx->buf);
680679 err_free_ioctx:
681680 kfree(ioctx);
682681 err:
....@@ -687,17 +686,19 @@
687686 * srpt_free_ioctx - free a SRPT I/O context structure
688687 * @sdev: SRPT HCA pointer.
689688 * @ioctx: I/O context pointer.
690
- * @dma_size: Size of I/O context DMA buffer.
689
+ * @buf_cache: I/O buffer cache.
691690 * @dir: DMA data direction.
692691 */
693692 static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
694
- int dma_size, enum dma_data_direction dir)
693
+ struct kmem_cache *buf_cache,
694
+ enum dma_data_direction dir)
695695 {
696696 if (!ioctx)
697697 return;
698698
699
- ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir);
700
- kfree(ioctx->buf);
699
+ ib_dma_unmap_single(sdev->device, ioctx->dma,
700
+ kmem_cache_size(buf_cache), dir);
701
+ kmem_cache_free(buf_cache, ioctx->buf);
701702 kfree(ioctx);
702703 }
703704
....@@ -706,33 +707,38 @@
706707 * @sdev: Device to allocate the I/O context ring for.
707708 * @ring_size: Number of elements in the I/O context ring.
708709 * @ioctx_size: I/O context size.
709
- * @dma_size: DMA buffer size.
710
+ * @buf_cache: I/O buffer cache.
711
+ * @alignment_offset: Offset in each ring buffer at which the SRP information
712
+ * unit starts.
710713 * @dir: DMA data direction.
711714 */
712715 static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
713716 int ring_size, int ioctx_size,
714
- int dma_size, enum dma_data_direction dir)
717
+ struct kmem_cache *buf_cache,
718
+ int alignment_offset,
719
+ enum dma_data_direction dir)
715720 {
716721 struct srpt_ioctx **ring;
717722 int i;
718723
719
- WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx)
720
- && ioctx_size != sizeof(struct srpt_send_ioctx));
724
+ WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx) &&
725
+ ioctx_size != sizeof(struct srpt_send_ioctx));
721726
722727 ring = kvmalloc_array(ring_size, sizeof(ring[0]), GFP_KERNEL);
723728 if (!ring)
724729 goto out;
725730 for (i = 0; i < ring_size; ++i) {
726
- ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir);
731
+ ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, buf_cache, dir);
727732 if (!ring[i])
728733 goto err;
729734 ring[i]->index = i;
735
+ ring[i]->offset = alignment_offset;
730736 }
731737 goto out;
732738
733739 err:
734740 while (--i >= 0)
735
- srpt_free_ioctx(sdev, ring[i], dma_size, dir);
741
+ srpt_free_ioctx(sdev, ring[i], buf_cache, dir);
736742 kvfree(ring);
737743 ring = NULL;
738744 out:
....@@ -744,12 +750,13 @@
744750 * @ioctx_ring: I/O context ring to be freed.
745751 * @sdev: SRPT HCA pointer.
746752 * @ring_size: Number of ring elements.
747
- * @dma_size: Size of I/O context DMA buffer.
753
+ * @buf_cache: I/O buffer cache.
748754 * @dir: DMA data direction.
749755 */
750756 static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
751757 struct srpt_device *sdev, int ring_size,
752
- int dma_size, enum dma_data_direction dir)
758
+ struct kmem_cache *buf_cache,
759
+ enum dma_data_direction dir)
753760 {
754761 int i;
755762
....@@ -757,7 +764,7 @@
757764 return;
758765
759766 for (i = 0; i < ring_size; ++i)
760
- srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
767
+ srpt_free_ioctx(sdev, ioctx_ring[i], buf_cache, dir);
761768 kvfree(ioctx_ring);
762769 }
763770
....@@ -819,7 +826,7 @@
819826 struct ib_recv_wr wr;
820827
821828 BUG_ON(!sdev);
822
- list.addr = ioctx->ioctx.dma;
829
+ list.addr = ioctx->ioctx.dma + ioctx->ioctx.offset;
823830 list.length = srp_max_req_size;
824831 list.lkey = sdev->lkey;
825832
....@@ -863,7 +870,7 @@
863870
864871 static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
865872 {
866
- struct srpt_rdma_ch *ch = cq->cq_context;
873
+ struct srpt_rdma_ch *ch = wc->qp->qp_context;
867874
868875 pr_debug("%s-%d wc->status %d\n", ch->sess_name, ch->qp->qp_num,
869876 wc->status);
....@@ -985,23 +992,28 @@
985992
986993 /**
987994 * srpt_get_desc_tbl - parse the data descriptors of a SRP_CMD request
988
- * @ioctx: Pointer to the I/O context associated with the request.
995
+ * @recv_ioctx: I/O context associated with the received command @srp_cmd.
996
+ * @ioctx: I/O context that will be used for responding to the initiator.
989997 * @srp_cmd: Pointer to the SRP_CMD request data.
990998 * @dir: Pointer to the variable to which the transfer direction will be
991999 * written.
992
- * @sg: [out] scatterlist allocated for the parsed SRP_CMD.
1000
+ * @sg: [out] scatterlist for the parsed SRP_CMD.
9931001 * @sg_cnt: [out] length of @sg.
9941002 * @data_len: Pointer to the variable to which the total data length of all
9951003 * descriptors in the SRP_CMD request will be written.
1004
+ * @imm_data_offset: [in] Offset in SRP_CMD requests at which immediate data
1005
+ * starts.
9961006 *
9971007 * This function initializes ioctx->nrbuf and ioctx->r_bufs.
9981008 *
9991009 * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
10001010 * -ENOMEM when memory allocation fails and zero upon success.
10011011 */
1002
-static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
1012
+static int srpt_get_desc_tbl(struct srpt_recv_ioctx *recv_ioctx,
1013
+ struct srpt_send_ioctx *ioctx,
10031014 struct srp_cmd *srp_cmd, enum dma_data_direction *dir,
1004
- struct scatterlist **sg, unsigned *sg_cnt, u64 *data_len)
1015
+ struct scatterlist **sg, unsigned int *sg_cnt, u64 *data_len,
1016
+ u16 imm_data_offset)
10051017 {
10061018 BUG_ON(!dir);
10071019 BUG_ON(!data_len);
....@@ -1025,7 +1037,7 @@
10251037
10261038 if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
10271039 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
1028
- struct srp_direct_buf *db = srpt_get_desc_buf(srp_cmd);
1040
+ struct srp_direct_buf *db = srpt_get_desc_buf(srp_cmd);
10291041
10301042 *data_len = be32_to_cpu(db->len);
10311043 return srpt_alloc_rw_ctxs(ioctx, db, 1, sg, sg_cnt);
....@@ -1037,8 +1049,7 @@
10371049
10381050 if (nbufs >
10391051 (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
1040
- pr_err("received unsupported SRP_CMD request"
1041
- " type (%u out + %u in != %u / %zu)\n",
1052
+ pr_err("received unsupported SRP_CMD request type (%u out + %u in != %u / %zu)\n",
10421053 srp_cmd->data_out_desc_cnt,
10431054 srp_cmd->data_in_desc_cnt,
10441055 be32_to_cpu(idb->table_desc.len),
....@@ -1049,6 +1060,40 @@
10491060 *data_len = be32_to_cpu(idb->len);
10501061 return srpt_alloc_rw_ctxs(ioctx, idb->desc_list, nbufs,
10511062 sg, sg_cnt);
1063
+ } else if ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_IMM) {
1064
+ struct srp_imm_buf *imm_buf = srpt_get_desc_buf(srp_cmd);
1065
+ void *data = (void *)srp_cmd + imm_data_offset;
1066
+ uint32_t len = be32_to_cpu(imm_buf->len);
1067
+ uint32_t req_size = imm_data_offset + len;
1068
+
1069
+ if (req_size > srp_max_req_size) {
1070
+ pr_err("Immediate data (length %d + %d) exceeds request size %d\n",
1071
+ imm_data_offset, len, srp_max_req_size);
1072
+ return -EINVAL;
1073
+ }
1074
+ if (recv_ioctx->byte_len < req_size) {
1075
+ pr_err("Received too few data - %d < %d\n",
1076
+ recv_ioctx->byte_len, req_size);
1077
+ return -EIO;
1078
+ }
1079
+ /*
1080
+ * The immediate data buffer descriptor must occur before the
1081
+ * immediate data itself.
1082
+ */
1083
+ if ((void *)(imm_buf + 1) > (void *)data) {
1084
+ pr_err("Received invalid write request\n");
1085
+ return -EINVAL;
1086
+ }
1087
+ *data_len = len;
1088
+ ioctx->recv_ioctx = recv_ioctx;
1089
+ if ((uintptr_t)data & 511) {
1090
+ pr_warn_once("Internal error - the receive buffers are not aligned properly.\n");
1091
+ return -EINVAL;
1092
+ }
1093
+ sg_init_one(&ioctx->imm_sg, data, len);
1094
+ *sg = &ioctx->imm_sg;
1095
+ *sg_cnt = 1;
1096
+ return 0;
10521097 } else {
10531098 *data_len = 0;
10541099 return 0;
....@@ -1173,24 +1218,18 @@
11731218 static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
11741219 {
11751220 struct srpt_send_ioctx *ioctx;
1176
- unsigned long flags;
1221
+ int tag, cpu;
11771222
11781223 BUG_ON(!ch);
11791224
1180
- ioctx = NULL;
1181
- spin_lock_irqsave(&ch->spinlock, flags);
1182
- if (!list_empty(&ch->free_list)) {
1183
- ioctx = list_first_entry(&ch->free_list,
1184
- struct srpt_send_ioctx, free_list);
1185
- list_del(&ioctx->free_list);
1186
- }
1187
- spin_unlock_irqrestore(&ch->spinlock, flags);
1225
+ tag = sbitmap_queue_get(&ch->sess->sess_tag_pool, &cpu);
1226
+ if (tag < 0)
1227
+ return NULL;
11881228
1189
- if (!ioctx)
1190
- return ioctx;
1191
-
1229
+ ioctx = ch->ioctx_ring[tag];
11921230 BUG_ON(ioctx->ch != ch);
11931231 ioctx->state = SRPT_STATE_NEW;
1232
+ WARN_ON_ONCE(ioctx->recv_ioctx);
11941233 ioctx->n_rdma = 0;
11951234 ioctx->n_rw_ctx = 0;
11961235 ioctx->queue_status_only = false;
....@@ -1200,6 +1239,8 @@
12001239 */
12011240 memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
12021241 memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
1242
+ ioctx->cmd.map_tag = tag;
1243
+ ioctx->cmd.map_cpu = cpu;
12031244
12041245 return ioctx;
12051246 }
....@@ -1282,7 +1323,7 @@
12821323 */
12831324 static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
12841325 {
1285
- struct srpt_rdma_ch *ch = cq->cq_context;
1326
+ struct srpt_rdma_ch *ch = wc->qp->qp_context;
12861327 struct srpt_send_ioctx *ioctx =
12871328 container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe);
12881329
....@@ -1376,8 +1417,8 @@
13761417 BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
13771418 max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
13781419 if (sense_data_len > max_sense_len) {
1379
- pr_warn("truncated sense data from %d to %d"
1380
- " bytes\n", sense_data_len, max_sense_len);
1420
+ pr_warn("truncated sense data from %d to %d bytes\n",
1421
+ sense_data_len, max_sense_len);
13811422 sense_data_len = max_sense_len;
13821423 }
13831424
....@@ -1457,7 +1498,7 @@
14571498
14581499 BUG_ON(!send_ioctx);
14591500
1460
- srp_cmd = recv_ioctx->ioctx.buf;
1501
+ srp_cmd = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
14611502 cmd = &send_ioctx->cmd;
14621503 cmd->tag = srp_cmd->tag;
14631504
....@@ -1477,14 +1518,14 @@
14771518 break;
14781519 }
14791520
1480
- rc = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &sg, &sg_cnt,
1481
- &data_len);
1521
+ rc = srpt_get_desc_tbl(recv_ioctx, send_ioctx, srp_cmd, &dir,
1522
+ &sg, &sg_cnt, &data_len, ch->imm_data_offset);
14821523 if (rc) {
14831524 if (rc != -EAGAIN) {
14841525 pr_err("0x%llx: parsing SRP descriptor table failed.\n",
14851526 srp_cmd->tag);
14861527 }
1487
- goto release_ioctx;
1528
+ goto busy;
14881529 }
14891530
14901531 rc = target_submit_cmd_map_sgls(cmd, ch->sess, srp_cmd->cdb,
....@@ -1495,13 +1536,12 @@
14951536 if (rc != 0) {
14961537 pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc,
14971538 srp_cmd->tag);
1498
- goto release_ioctx;
1539
+ goto busy;
14991540 }
15001541 return;
15011542
1502
-release_ioctx:
1503
- send_ioctx->state = SRPT_STATE_DONE;
1504
- srpt_release_cmd(cmd);
1543
+busy:
1544
+ target_send_busy(cmd);
15051545 }
15061546
15071547 static int srp_tmr_to_tcm(int fn)
....@@ -1545,7 +1585,7 @@
15451585
15461586 BUG_ON(!send_ioctx);
15471587
1548
- srp_tsk = recv_ioctx->ioctx.buf;
1588
+ srp_tsk = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
15491589 cmd = &send_ioctx->cmd;
15501590
15511591 pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld ch %p sess %p\n",
....@@ -1561,11 +1601,9 @@
15611601 TARGET_SCF_ACK_KREF);
15621602 if (rc != 0) {
15631603 send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
1564
- goto fail;
1604
+ cmd->se_tfo->queue_tm_rsp(cmd);
15651605 }
15661606 return;
1567
-fail:
1568
- transport_send_check_condition_and_sense(cmd, 0, 0); // XXX:
15691607 }
15701608
15711609 /**
....@@ -1588,10 +1626,11 @@
15881626 goto push;
15891627
15901628 ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
1591
- recv_ioctx->ioctx.dma, srp_max_req_size,
1629
+ recv_ioctx->ioctx.dma,
1630
+ recv_ioctx->ioctx.offset + srp_max_req_size,
15921631 DMA_FROM_DEVICE);
15931632
1594
- srp_cmd = recv_ioctx->ioctx.buf;
1633
+ srp_cmd = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
15951634 opcode = srp_cmd->opcode;
15961635 if (opcode == SRP_CMD || opcode == SRP_TSK_MGMT) {
15971636 send_ioctx = srpt_get_send_ioctx(ch);
....@@ -1628,7 +1667,8 @@
16281667 break;
16291668 }
16301669
1631
- srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
1670
+ if (!send_ioctx || !send_ioctx->recv_ioctx)
1671
+ srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
16321672 res = true;
16331673
16341674 out:
....@@ -1644,7 +1684,7 @@
16441684
16451685 static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
16461686 {
1647
- struct srpt_rdma_ch *ch = cq->cq_context;
1687
+ struct srpt_rdma_ch *ch = wc->qp->qp_context;
16481688 struct srpt_recv_ioctx *ioctx =
16491689 container_of(wc->wr_cqe, struct srpt_recv_ioctx, ioctx.cqe);
16501690
....@@ -1654,6 +1694,7 @@
16541694 req_lim = atomic_dec_return(&ch->req_lim);
16551695 if (unlikely(req_lim < 0))
16561696 pr_err("req_lim = %d < 0\n", req_lim);
1697
+ ioctx->byte_len = wc->byte_len;
16571698 srpt_handle_new_iu(ch, ioctx);
16581699 } else {
16591700 pr_info_ratelimited("receiving failed for ioctx %p with status %d\n",
....@@ -1704,7 +1745,7 @@
17041745 */
17051746 static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
17061747 {
1707
- struct srpt_rdma_ch *ch = cq->cq_context;
1748
+ struct srpt_rdma_ch *ch = wc->qp->qp_context;
17081749 struct srpt_send_ioctx *ioctx =
17091750 container_of(wc->wr_cqe, struct srpt_send_ioctx, ioctx.cqe);
17101751 enum srpt_command_state state;
....@@ -1717,14 +1758,14 @@
17171758 atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
17181759
17191760 if (wc->status != IB_WC_SUCCESS)
1720
- pr_info("sending response for ioctx 0x%p failed"
1721
- " with status %d\n", ioctx, wc->status);
1761
+ pr_info("sending response for ioctx 0x%p failed with status %d\n",
1762
+ ioctx, wc->status);
17221763
17231764 if (state != SRPT_STATE_DONE) {
17241765 transport_generic_free_cmd(&ioctx->cmd, 0);
17251766 } else {
1726
- pr_err("IB completion has been received too late for"
1727
- " wr_id = %u.\n", ioctx->ioctx.index);
1767
+ pr_err("IB completion has been received too late for wr_id = %u.\n",
1768
+ ioctx->ioctx.index);
17281769 }
17291770
17301771 srpt_process_wait_list(ch);
....@@ -1751,14 +1792,15 @@
17511792 goto out;
17521793
17531794 retry:
1754
- ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + sq_size,
1755
- 0 /* XXX: spread CQs */, IB_POLL_WORKQUEUE);
1795
+ ch->cq = ib_cq_pool_get(sdev->device, ch->rq_size + sq_size, -1,
1796
+ IB_POLL_WORKQUEUE);
17561797 if (IS_ERR(ch->cq)) {
17571798 ret = PTR_ERR(ch->cq);
17581799 pr_err("failed to create CQ cqe= %d ret= %d\n",
17591800 ch->rq_size + sq_size, ret);
17601801 goto out;
17611802 }
1803
+ ch->cq_size = ch->rq_size + sq_size;
17621804
17631805 qp_init->qp_context = (void *)ch;
17641806 qp_init->event_handler
....@@ -1776,16 +1818,13 @@
17761818 */
17771819 qp_init->cap.max_send_wr = min(sq_size / 2, attrs->max_qp_wr);
17781820 qp_init->cap.max_rdma_ctxs = sq_size / 2;
1779
- qp_init->cap.max_send_sge = min(attrs->max_send_sge,
1780
- SRPT_MAX_SG_PER_WQE);
1821
+ qp_init->cap.max_send_sge = attrs->max_send_sge;
1822
+ qp_init->cap.max_recv_sge = 1;
17811823 qp_init->port_num = ch->sport->port;
1782
- if (sdev->use_srq) {
1824
+ if (sdev->use_srq)
17831825 qp_init->srq = sdev->srq;
1784
- } else {
1826
+ else
17851827 qp_init->cap.max_recv_wr = ch->rq_size;
1786
- qp_init->cap.max_recv_sge = min(attrs->max_recv_sge,
1787
- SRPT_MAX_SG_PER_WQE);
1788
- }
17891828
17901829 if (ch->using_rdma_cm) {
17911830 ret = rdma_create_qp(ch->rdma_cm.cm_id, sdev->pd, qp_init);
....@@ -1806,7 +1845,7 @@
18061845 if (retry) {
18071846 pr_debug("failed to create queue pair with sq_size = %d (%d) - retrying\n",
18081847 sq_size, ret);
1809
- ib_free_cq(ch->cq);
1848
+ ib_cq_pool_put(ch->cq, ch->cq_size);
18101849 sq_size = max(sq_size / 2, MIN_SRPT_SQ_SIZE);
18111850 goto retry;
18121851 } else {
....@@ -1832,14 +1871,14 @@
18321871
18331872 err_destroy_cq:
18341873 ch->qp = NULL;
1835
- ib_free_cq(ch->cq);
1874
+ ib_cq_pool_put(ch->cq, ch->cq_size);
18361875 goto out;
18371876 }
18381877
18391878 static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
18401879 {
18411880 ib_destroy_qp(ch->qp);
1842
- ib_free_cq(ch->cq);
1881
+ ib_cq_pool_put(ch->cq, ch->cq_size);
18431882 }
18441883
18451884 /**
....@@ -1913,41 +1952,22 @@
19131952 return ret;
19141953 }
19151954
1916
-static bool srpt_ch_closed(struct srpt_port *sport, struct srpt_rdma_ch *ch)
1917
-{
1918
- struct srpt_nexus *nexus;
1919
- struct srpt_rdma_ch *ch2;
1920
- bool res = true;
1921
-
1922
- rcu_read_lock();
1923
- list_for_each_entry(nexus, &sport->nexus_list, entry) {
1924
- list_for_each_entry(ch2, &nexus->ch_list, list) {
1925
- if (ch2 == ch) {
1926
- res = false;
1927
- goto done;
1928
- }
1929
- }
1930
- }
1931
-done:
1932
- rcu_read_unlock();
1933
-
1934
- return res;
1935
-}
1936
-
19371955 /* Send DREQ and wait for DREP. */
19381956 static void srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch)
19391957 {
1958
+ DECLARE_COMPLETION_ONSTACK(closed);
19401959 struct srpt_port *sport = ch->sport;
19411960
19421961 pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
19431962 ch->state);
19441963
1964
+ ch->closed = &closed;
1965
+
19451966 mutex_lock(&sport->mutex);
19461967 srpt_disconnect_ch(ch);
19471968 mutex_unlock(&sport->mutex);
19481969
1949
- while (wait_event_timeout(sport->ch_releaseQ, srpt_ch_closed(sport, ch),
1950
- 5 * HZ) == 0)
1970
+ while (wait_for_completion_timeout(&closed, 5 * HZ) == 0)
19511971 pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
19521972 ch->sess_name, ch->qp->qp_num, ch->state);
19531973
....@@ -1963,9 +1983,10 @@
19631983 list_for_each_entry(nexus, &sport->nexus_list, entry) {
19641984 list_for_each_entry(ch, &nexus->ch_list, list) {
19651985 if (srpt_disconnect_ch(ch) >= 0)
1966
- pr_info("Closing channel %s because target %s_%d has been disabled\n",
1967
- ch->sess_name,
1968
- sport->sdev->device->name, sport->port);
1986
+ pr_info("Closing channel %s-%d because target %s_%d has been disabled\n",
1987
+ ch->sess_name, ch->qp->qp_num,
1988
+ dev_name(&sport->sdev->device->dev),
1989
+ sport->port);
19691990 srpt_close_ch(ch);
19701991 }
19711992 }
....@@ -2026,10 +2047,17 @@
20262047 __srpt_close_all_ch(sport);
20272048 }
20282049
2050
+static void srpt_drop_sport_ref(struct srpt_port *sport)
2051
+{
2052
+ if (atomic_dec_return(&sport->refcount) == 0 && sport->freed_channels)
2053
+ complete(sport->freed_channels);
2054
+}
2055
+
20292056 static void srpt_free_ch(struct kref *kref)
20302057 {
20312058 struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
20322059
2060
+ srpt_drop_sport_ref(ch->sport);
20332061 kfree_rcu(ch, rcu);
20342062 }
20352063
....@@ -2073,17 +2101,22 @@
20732101 list_del_rcu(&ch->list);
20742102 mutex_unlock(&sport->mutex);
20752103
2104
+ if (ch->closed)
2105
+ complete(ch->closed);
2106
+
20762107 srpt_destroy_ch_ib(ch);
20772108
20782109 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
20792110 ch->sport->sdev, ch->rq_size,
2080
- ch->max_rsp_size, DMA_TO_DEVICE);
2111
+ ch->rsp_buf_cache, DMA_TO_DEVICE);
2112
+
2113
+ kmem_cache_destroy(ch->rsp_buf_cache);
20812114
20822115 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
20832116 sdev, ch->rq_size,
2084
- srp_max_req_size, DMA_FROM_DEVICE);
2117
+ ch->req_buf_cache, DMA_FROM_DEVICE);
20852118
2086
- wake_up(&sport->ch_releaseQ);
2119
+ kmem_cache_destroy(ch->req_buf_cache);
20872120
20882121 kref_put(&ch->kref, srpt_free_ch);
20892122 }
....@@ -2120,12 +2153,10 @@
21202153 struct srpt_rdma_ch *ch = NULL;
21212154 char i_port_id[36];
21222155 u32 it_iu_len;
2123
- int i, ret;
2156
+ int i, tag_num, tag_size, ret;
2157
+ struct srpt_tpg *stpg;
21242158
21252159 WARN_ON_ONCE(irqs_disabled());
2126
-
2127
- if (WARN_ON(!sdev || !req))
2128
- return -EINVAL;
21292160
21302161 it_iu_len = be32_to_cpu(req->req_it_iu_len);
21312162
....@@ -2159,7 +2190,7 @@
21592190 if (!sport->enabled) {
21602191 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
21612192 pr_info("rejected SRP_LOGIN_REQ because target port %s_%d has not yet been enabled\n",
2162
- sport->sdev->device->name, port_num);
2193
+ dev_name(&sport->sdev->device->dev), port_num);
21632194 goto reject;
21642195 }
21652196
....@@ -2205,32 +2236,57 @@
22052236 INIT_LIST_HEAD(&ch->cmd_wait_list);
22062237 ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
22072238
2239
+ ch->rsp_buf_cache = kmem_cache_create("srpt-rsp-buf", ch->max_rsp_size,
2240
+ 512, 0, NULL);
2241
+ if (!ch->rsp_buf_cache)
2242
+ goto free_ch;
2243
+
22082244 ch->ioctx_ring = (struct srpt_send_ioctx **)
22092245 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
22102246 sizeof(*ch->ioctx_ring[0]),
2211
- ch->max_rsp_size, DMA_TO_DEVICE);
2247
+ ch->rsp_buf_cache, 0, DMA_TO_DEVICE);
22122248 if (!ch->ioctx_ring) {
22132249 pr_err("rejected SRP_LOGIN_REQ because creating a new QP SQ ring failed.\n");
22142250 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2215
- goto free_ch;
2251
+ goto free_rsp_cache;
22162252 }
22172253
2218
- INIT_LIST_HEAD(&ch->free_list);
2219
- for (i = 0; i < ch->rq_size; i++) {
2254
+ for (i = 0; i < ch->rq_size; i++)
22202255 ch->ioctx_ring[i]->ch = ch;
2221
- list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
2222
- }
22232256 if (!sdev->use_srq) {
2257
+ u16 imm_data_offset = req->req_flags & SRP_IMMED_REQUESTED ?
2258
+ be16_to_cpu(req->imm_data_offset) : 0;
2259
+ u16 alignment_offset;
2260
+ u32 req_sz;
2261
+
2262
+ if (req->req_flags & SRP_IMMED_REQUESTED)
2263
+ pr_debug("imm_data_offset = %d\n",
2264
+ be16_to_cpu(req->imm_data_offset));
2265
+ if (imm_data_offset >= sizeof(struct srp_cmd)) {
2266
+ ch->imm_data_offset = imm_data_offset;
2267
+ rsp->rsp_flags |= SRP_LOGIN_RSP_IMMED_SUPP;
2268
+ } else {
2269
+ ch->imm_data_offset = 0;
2270
+ }
2271
+ alignment_offset = round_up(imm_data_offset, 512) -
2272
+ imm_data_offset;
2273
+ req_sz = alignment_offset + imm_data_offset + srp_max_req_size;
2274
+ ch->req_buf_cache = kmem_cache_create("srpt-req-buf", req_sz,
2275
+ 512, 0, NULL);
2276
+ if (!ch->req_buf_cache)
2277
+ goto free_rsp_ring;
2278
+
22242279 ch->ioctx_recv_ring = (struct srpt_recv_ioctx **)
22252280 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
22262281 sizeof(*ch->ioctx_recv_ring[0]),
2227
- srp_max_req_size,
2282
+ ch->req_buf_cache,
2283
+ alignment_offset,
22282284 DMA_FROM_DEVICE);
22292285 if (!ch->ioctx_recv_ring) {
22302286 pr_err("rejected SRP_LOGIN_REQ because creating a new QP RQ ring failed.\n");
22312287 rej->reason =
22322288 cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2233
- goto free_ring;
2289
+ goto free_recv_cache;
22342290 }
22352291 for (i = 0; i < ch->rq_size; i++)
22362292 INIT_LIST_HEAD(&ch->ioctx_recv_ring[i]->wait_list);
....@@ -2248,21 +2304,42 @@
22482304 be64_to_cpu(*(__be64 *)nexus->i_port_id),
22492305 be64_to_cpu(*(__be64 *)(nexus->i_port_id + 8)));
22502306
2251
- pr_debug("registering session %s\n", ch->sess_name);
2307
+ pr_debug("registering src addr %s or i_port_id %s\n", ch->sess_name,
2308
+ i_port_id);
22522309
2253
- if (sport->port_guid_tpg.se_tpg_wwn)
2254
- ch->sess = target_setup_session(&sport->port_guid_tpg, 0, 0,
2255
- TARGET_PROT_NORMAL,
2310
+ tag_num = ch->rq_size;
2311
+ tag_size = 1; /* ib_srpt does not use se_sess->sess_cmd_map */
2312
+
2313
+ if (sport->guid_id) {
2314
+ mutex_lock(&sport->guid_id->mutex);
2315
+ list_for_each_entry(stpg, &sport->guid_id->tpg_list, entry) {
2316
+ if (!IS_ERR_OR_NULL(ch->sess))
2317
+ break;
2318
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
2319
+ tag_size, TARGET_PROT_NORMAL,
22562320 ch->sess_name, ch, NULL);
2257
- if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
2258
- ch->sess = target_setup_session(&sport->port_gid_tpg, 0, 0,
2259
- TARGET_PROT_NORMAL, i_port_id, ch,
2260
- NULL);
2261
- /* Retry without leading "0x" */
2262
- if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
2263
- ch->sess = target_setup_session(&sport->port_gid_tpg, 0, 0,
2264
- TARGET_PROT_NORMAL,
2321
+ }
2322
+ mutex_unlock(&sport->guid_id->mutex);
2323
+ }
2324
+
2325
+ if (sport->gid_id) {
2326
+ mutex_lock(&sport->gid_id->mutex);
2327
+ list_for_each_entry(stpg, &sport->gid_id->tpg_list, entry) {
2328
+ if (!IS_ERR_OR_NULL(ch->sess))
2329
+ break;
2330
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
2331
+ tag_size, TARGET_PROT_NORMAL, i_port_id,
2332
+ ch, NULL);
2333
+ if (!IS_ERR_OR_NULL(ch->sess))
2334
+ break;
2335
+ /* Retry without leading "0x" */
2336
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
2337
+ tag_size, TARGET_PROT_NORMAL,
22652338 i_port_id + 2, ch, NULL);
2339
+ }
2340
+ mutex_unlock(&sport->gid_id->mutex);
2341
+ }
2342
+
22662343 if (IS_ERR_OR_NULL(ch->sess)) {
22672344 WARN_ON_ONCE(ch->sess == NULL);
22682345 ret = PTR_ERR(ch->sess);
....@@ -2275,22 +2352,26 @@
22752352 goto destroy_ib;
22762353 }
22772354
2355
+ /*
2356
+ * Once a session has been created destruction of srpt_rdma_ch objects
2357
+ * will decrement sport->refcount. Hence increment sport->refcount now.
2358
+ */
2359
+ atomic_inc(&sport->refcount);
2360
+
22782361 mutex_lock(&sport->mutex);
22792362
22802363 if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
22812364 struct srpt_rdma_ch *ch2;
2282
-
2283
- rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
22842365
22852366 list_for_each_entry(ch2, &nexus->ch_list, list) {
22862367 if (srpt_disconnect_ch(ch2) < 0)
22872368 continue;
22882369 pr_info("Relogin - closed existing channel %s\n",
22892370 ch2->sess_name);
2290
- rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
2371
+ rsp->rsp_flags |= SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
22912372 }
22922373 } else {
2293
- rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
2374
+ rsp->rsp_flags |= SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
22942375 }
22952376
22962377 list_add_tail_rcu(&ch->list, &nexus->ch_list);
....@@ -2299,7 +2380,7 @@
22992380 rej->reason = cpu_to_be32(
23002381 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
23012382 pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
2302
- sdev->device->name, port_num);
2383
+ dev_name(&sdev->device->dev), port_num);
23032384 mutex_unlock(&sport->mutex);
23042385 ret = -EINVAL;
23052386 goto reject;
....@@ -2321,7 +2402,7 @@
23212402 /* create srp_login_response */
23222403 rsp->opcode = SRP_LOGIN_RSP;
23232404 rsp->tag = req->tag;
2324
- rsp->max_it_iu_len = req->req_it_iu_len;
2405
+ rsp->max_it_iu_len = cpu_to_be32(srp_max_req_size);
23252406 rsp->max_ti_iu_len = req->req_it_iu_len;
23262407 ch->max_ti_iu_len = it_iu_len;
23272408 rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
....@@ -2385,12 +2466,18 @@
23852466 free_recv_ring:
23862467 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
23872468 ch->sport->sdev, ch->rq_size,
2388
- srp_max_req_size, DMA_FROM_DEVICE);
2469
+ ch->req_buf_cache, DMA_FROM_DEVICE);
23892470
2390
-free_ring:
2471
+free_recv_cache:
2472
+ kmem_cache_destroy(ch->req_buf_cache);
2473
+
2474
+free_rsp_ring:
23912475 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
23922476 ch->sport->sdev, ch->rq_size,
2393
- ch->max_rsp_size, DMA_TO_DEVICE);
2477
+ ch->rsp_buf_cache, DMA_TO_DEVICE);
2478
+
2479
+free_rsp_cache:
2480
+ kmem_cache_destroy(ch->rsp_buf_cache);
23942481
23952482 free_ch:
23962483 if (rdma_cm_id)
....@@ -2410,7 +2497,8 @@
24102497 SRP_BUF_FORMAT_INDIRECT);
24112498
24122499 if (rdma_cm_id)
2413
- rdma_reject(rdma_cm_id, rej, sizeof(*rej));
2500
+ rdma_reject(rdma_cm_id, rej, sizeof(*rej),
2501
+ IB_CM_REJ_CONSUMER_DEFINED);
24142502 else
24152503 ib_send_cm_rej(ib_cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
24162504 rej, sizeof(*rej));
....@@ -2452,6 +2540,7 @@
24522540 struct srpt_device *sdev;
24532541 struct srp_login_req req;
24542542 const struct srp_login_req_rdma *req_rdma;
2543
+ struct sa_path_rec *path_rec = cm_id->route.path_rec;
24552544 char src_addr[40];
24562545
24572546 sdev = ib_get_client_data(cm_id->device, &srpt_client);
....@@ -2471,12 +2560,13 @@
24712560 req.req_flags = req_rdma->req_flags;
24722561 memcpy(req.initiator_port_id, req_rdma->initiator_port_id, 16);
24732562 memcpy(req.target_port_id, req_rdma->target_port_id, 16);
2563
+ req.imm_data_offset = req_rdma->imm_data_offset;
24742564
24752565 snprintf(src_addr, sizeof(src_addr), "%pIS",
24762566 &cm_id->route.addr.src_addr);
24772567
24782568 return srpt_cm_req_recv(sdev, NULL, cm_id, cm_id->port_num,
2479
- cm_id->route.path_rec->pkey, &req, src_addr);
2569
+ path_rec ? path_rec->pkey : 0, &req, src_addr);
24802570 }
24812571
24822572 static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
....@@ -2640,14 +2730,6 @@
26402730 return ret;
26412731 }
26422732
2643
-static int srpt_write_pending_status(struct se_cmd *se_cmd)
2644
-{
2645
- struct srpt_send_ioctx *ioctx;
2646
-
2647
- ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
2648
- return ioctx->state == SRPT_STATE_NEED_DATA;
2649
-}
2650
-
26512733 /*
26522734 * srpt_write_pending - Start data transfer from initiator to target (write).
26532735 */
....@@ -2660,6 +2742,12 @@
26602742 struct ib_cqe *cqe = &ioctx->rdma_cqe;
26612743 enum srpt_command_state new_state;
26622744 int ret, i;
2745
+
2746
+ if (ioctx->recv_ioctx) {
2747
+ srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
2748
+ target_execute_cmd(&ioctx->cmd);
2749
+ return 0;
2750
+ }
26632751
26642752 new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
26652753 WARN_ON(new_state == SRPT_STATE_DONE);
....@@ -2724,8 +2812,6 @@
27242812 int resp_len, ret, i;
27252813 u8 srp_tm_status;
27262814
2727
- BUG_ON(!ch);
2728
-
27292815 state = ioctx->state;
27302816 switch (state) {
27312817 case SRPT_STATE_NEW:
....@@ -2741,7 +2827,7 @@
27412827 break;
27422828 }
27432829
2744
- if (unlikely(WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT)))
2830
+ if (WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))
27452831 return;
27462832
27472833 /* For read commands, transfer the data to the initiator. */
....@@ -2854,39 +2940,29 @@
28542940 srpt_refresh_port(sport);
28552941 }
28562942
2857
-static bool srpt_ch_list_empty(struct srpt_port *sport)
2858
-{
2859
- struct srpt_nexus *nexus;
2860
- bool res = true;
2861
-
2862
- rcu_read_lock();
2863
- list_for_each_entry(nexus, &sport->nexus_list, entry)
2864
- if (!list_empty(&nexus->ch_list))
2865
- res = false;
2866
- rcu_read_unlock();
2867
-
2868
- return res;
2869
-}
2870
-
28712943 /**
28722944 * srpt_release_sport - disable login and wait for associated channels
28732945 * @sport: SRPT HCA port.
28742946 */
28752947 static int srpt_release_sport(struct srpt_port *sport)
28762948 {
2949
+ DECLARE_COMPLETION_ONSTACK(c);
28772950 struct srpt_nexus *nexus, *next_n;
28782951 struct srpt_rdma_ch *ch;
28792952
28802953 WARN_ON_ONCE(irqs_disabled());
28812954
2955
+ sport->freed_channels = &c;
2956
+
28822957 mutex_lock(&sport->mutex);
28832958 srpt_set_enabled(sport, false);
28842959 mutex_unlock(&sport->mutex);
28852960
2886
- while (wait_event_timeout(sport->ch_releaseQ,
2887
- srpt_ch_list_empty(sport), 5 * HZ) <= 0) {
2888
- pr_info("%s_%d: waiting for session unregistration ...\n",
2889
- sport->sdev->device->name, sport->port);
2961
+ while (atomic_read(&sport->refcount) > 0 &&
2962
+ wait_for_completion_timeout(&c, 5 * HZ) <= 0) {
2963
+ pr_info("%s_%d: waiting for unregistration of %d sessions ...\n",
2964
+ dev_name(&sport->sdev->device->dev), sport->port,
2965
+ atomic_read(&sport->refcount));
28902966 rcu_read_lock();
28912967 list_for_each_entry(nexus, &sport->nexus_list, entry) {
28922968 list_for_each_entry(ch, &nexus->ch_list, list) {
....@@ -2908,7 +2984,12 @@
29082984 return 0;
29092985 }
29102986
2911
-static struct se_wwn *__srpt_lookup_wwn(const char *name)
2987
+struct port_and_port_id {
2988
+ struct srpt_port *sport;
2989
+ struct srpt_port_id **port_id;
2990
+};
2991
+
2992
+static struct port_and_port_id __srpt_lookup_port(const char *name)
29122993 {
29132994 struct ib_device *dev;
29142995 struct srpt_device *sdev;
....@@ -2923,25 +3004,38 @@
29233004 for (i = 0; i < dev->phys_port_cnt; i++) {
29243005 sport = &sdev->port[i];
29253006
2926
- if (strcmp(sport->port_guid, name) == 0)
2927
- return &sport->port_guid_wwn;
2928
- if (strcmp(sport->port_gid, name) == 0)
2929
- return &sport->port_gid_wwn;
3007
+ if (strcmp(sport->guid_name, name) == 0) {
3008
+ kref_get(&sdev->refcnt);
3009
+ return (struct port_and_port_id){
3010
+ sport, &sport->guid_id};
3011
+ }
3012
+ if (strcmp(sport->gid_name, name) == 0) {
3013
+ kref_get(&sdev->refcnt);
3014
+ return (struct port_and_port_id){
3015
+ sport, &sport->gid_id};
3016
+ }
29303017 }
29313018 }
29323019
2933
- return NULL;
3020
+ return (struct port_and_port_id){};
29343021 }
29353022
2936
-static struct se_wwn *srpt_lookup_wwn(const char *name)
3023
+/**
3024
+ * srpt_lookup_port() - Look up an RDMA port by name
3025
+ * @name: ASCII port name
3026
+ *
3027
+ * Increments the RDMA port reference count if an RDMA port pointer is returned.
3028
+ * The caller must drop that reference count by calling srpt_port_put_ref().
3029
+ */
3030
+static struct port_and_port_id srpt_lookup_port(const char *name)
29373031 {
2938
- struct se_wwn *wwn;
3032
+ struct port_and_port_id papi;
29393033
29403034 spin_lock(&srpt_dev_lock);
2941
- wwn = __srpt_lookup_wwn(name);
3035
+ papi = __srpt_lookup_port(name);
29423036 spin_unlock(&srpt_dev_lock);
29433037
2944
- return wwn;
3038
+ return papi;
29453039 }
29463040
29473041 static void srpt_free_srq(struct srpt_device *sdev)
....@@ -2951,7 +3045,9 @@
29513045
29523046 ib_destroy_srq(sdev->srq);
29533047 srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
2954
- sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
3048
+ sdev->srq_size, sdev->req_buf_cache,
3049
+ DMA_FROM_DEVICE);
3050
+ kmem_cache_destroy(sdev->req_buf_cache);
29553051 sdev->srq = NULL;
29563052 }
29573053
....@@ -2976,16 +3072,19 @@
29763072 }
29773073
29783074 pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size,
2979
- sdev->device->attrs.max_srq_wr, device->name);
3075
+ sdev->device->attrs.max_srq_wr, dev_name(&device->dev));
3076
+
3077
+ sdev->req_buf_cache = kmem_cache_create("srpt-srq-req-buf",
3078
+ srp_max_req_size, 0, 0, NULL);
3079
+ if (!sdev->req_buf_cache)
3080
+ goto free_srq;
29803081
29813082 sdev->ioctx_ring = (struct srpt_recv_ioctx **)
29823083 srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
29833084 sizeof(*sdev->ioctx_ring[0]),
2984
- srp_max_req_size, DMA_FROM_DEVICE);
2985
- if (!sdev->ioctx_ring) {
2986
- ib_destroy_srq(srq);
2987
- return -ENOMEM;
2988
- }
3085
+ sdev->req_buf_cache, 0, DMA_FROM_DEVICE);
3086
+ if (!sdev->ioctx_ring)
3087
+ goto free_cache;
29893088
29903089 sdev->use_srq = true;
29913090 sdev->srq = srq;
....@@ -2996,6 +3095,13 @@
29963095 }
29973096
29983097 return 0;
3098
+
3099
+free_cache:
3100
+ kmem_cache_destroy(sdev->req_buf_cache);
3101
+
3102
+free_srq:
3103
+ ib_destroy_srq(srq);
3104
+ return -ENOMEM;
29993105 }
30003106
30013107 static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
....@@ -3009,16 +3115,28 @@
30093115 } else if (use_srq && !sdev->srq) {
30103116 ret = srpt_alloc_srq(sdev);
30113117 }
3012
- pr_debug("%s(%s): use_srq = %d; ret = %d\n", __func__, device->name,
3013
- sdev->use_srq, ret);
3118
+ pr_debug("%s(%s): use_srq = %d; ret = %d\n", __func__,
3119
+ dev_name(&device->dev), sdev->use_srq, ret);
30143120 return ret;
3121
+}
3122
+
3123
+static void srpt_free_sdev(struct kref *refcnt)
3124
+{
3125
+ struct srpt_device *sdev = container_of(refcnt, typeof(*sdev), refcnt);
3126
+
3127
+ kfree(sdev);
3128
+}
3129
+
3130
+static void srpt_sdev_put(struct srpt_device *sdev)
3131
+{
3132
+ kref_put(&sdev->refcnt, srpt_free_sdev);
30153133 }
30163134
30173135 /**
30183136 * srpt_add_one - InfiniBand device addition callback function
30193137 * @device: Describes a HCA.
30203138 */
3021
-static void srpt_add_one(struct ib_device *device)
3139
+static int srpt_add_one(struct ib_device *device)
30223140 {
30233141 struct srpt_device *sdev;
30243142 struct srpt_port *sport;
....@@ -3029,14 +3147,17 @@
30293147 sdev = kzalloc(struct_size(sdev, port, device->phys_port_cnt),
30303148 GFP_KERNEL);
30313149 if (!sdev)
3032
- goto err;
3150
+ return -ENOMEM;
30333151
3152
+ kref_init(&sdev->refcnt);
30343153 sdev->device = device;
30353154 mutex_init(&sdev->sdev_mutex);
30363155
30373156 sdev->pd = ib_alloc_pd(device, 0);
3038
- if (IS_ERR(sdev->pd))
3157
+ if (IS_ERR(sdev->pd)) {
3158
+ ret = PTR_ERR(sdev->pd);
30393159 goto free_dev;
3160
+ }
30403161
30413162 sdev->lkey = sdev->pd->local_dma_lkey;
30423163
....@@ -3052,15 +3173,15 @@
30523173 if (IS_ERR(sdev->cm_id)) {
30533174 pr_info("ib_create_cm_id() failed: %ld\n",
30543175 PTR_ERR(sdev->cm_id));
3176
+ ret = PTR_ERR(sdev->cm_id);
30553177 sdev->cm_id = NULL;
30563178 if (!rdma_cm_id)
30573179 goto err_ring;
30583180 }
30593181
30603182 /* print out target login information */
3061
- pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
3062
- "pkey=ffff,service_id=%016llx\n", srpt_service_guid,
3063
- srpt_service_guid, srpt_service_guid);
3183
+ pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,pkey=ffff,service_id=%016llx\n",
3184
+ srpt_service_guid, srpt_service_guid, srpt_service_guid);
30643185
30653186 /*
30663187 * We do not have a consistent service_id (ie. also id_ext of target_id)
....@@ -3084,7 +3205,6 @@
30843205 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
30853206 sport = &sdev->port[i - 1];
30863207 INIT_LIST_HEAD(&sport->nexus_list);
3087
- init_waitqueue_head(&sport->ch_releaseQ);
30883208 mutex_init(&sport->mutex);
30893209 sport->sdev = sdev;
30903210 sport->port = i;
....@@ -3094,10 +3214,12 @@
30943214 sport->port_attrib.use_srq = false;
30953215 INIT_WORK(&sport->work, srpt_refresh_port_work);
30963216
3097
- if (srpt_refresh_port(sport)) {
3217
+ ret = srpt_refresh_port(sport);
3218
+ if (ret) {
30983219 pr_err("MAD registration failed for %s-%d.\n",
3099
- sdev->device->name, i);
3100
- goto err_event;
3220
+ dev_name(&sdev->device->dev), i);
3221
+ i--;
3222
+ goto err_port;
31013223 }
31023224 }
31033225
....@@ -3105,12 +3227,12 @@
31053227 list_add_tail(&sdev->list, &srpt_dev_list);
31063228 spin_unlock(&srpt_dev_lock);
31073229
3108
-out:
31093230 ib_set_client_data(device, &srpt_client, sdev);
3110
- pr_debug("added %s.\n", device->name);
3111
- return;
3231
+ pr_debug("added %s.\n", dev_name(&device->dev));
3232
+ return 0;
31123233
3113
-err_event:
3234
+err_port:
3235
+ srpt_unregister_mad_agent(sdev, i);
31143236 ib_unregister_event_handler(&sdev->event_handler);
31153237 err_cm:
31163238 if (sdev->cm_id)
....@@ -3119,11 +3241,9 @@
31193241 srpt_free_srq(sdev);
31203242 ib_dealloc_pd(sdev->pd);
31213243 free_dev:
3122
- kfree(sdev);
3123
-err:
3124
- sdev = NULL;
3125
- pr_info("%s(%s) failed.\n", __func__, device->name);
3126
- goto out;
3244
+ srpt_sdev_put(sdev);
3245
+ pr_info("%s(%s) failed.\n", __func__, dev_name(&device->dev));
3246
+ return ret;
31273247 }
31283248
31293249 /**
....@@ -3136,12 +3256,7 @@
31363256 struct srpt_device *sdev = client_data;
31373257 int i;
31383258
3139
- if (!sdev) {
3140
- pr_info("%s(%s): nothing to do.\n", __func__, device->name);
3141
- return;
3142
- }
3143
-
3144
- srpt_unregister_mad_agent(sdev);
3259
+ srpt_unregister_mad_agent(sdev, sdev->device->phys_port_cnt);
31453260
31463261 ib_unregister_event_handler(&sdev->event_handler);
31473262
....@@ -3170,7 +3285,7 @@
31703285
31713286 ib_dealloc_pd(sdev->pd);
31723287
3173
- kfree(sdev);
3288
+ srpt_sdev_put(sdev);
31743289 }
31753290
31763291 static struct ib_client srpt_client = {
....@@ -3189,24 +3304,28 @@
31893304 return 0;
31903305 }
31913306
3192
-static char *srpt_get_fabric_name(void)
3193
-{
3194
- return "srpt";
3195
-}
3196
-
31973307 static struct srpt_port *srpt_tpg_to_sport(struct se_portal_group *tpg)
31983308 {
31993309 return tpg->se_tpg_wwn->priv;
32003310 }
32013311
3312
+static struct srpt_port_id *srpt_wwn_to_sport_id(struct se_wwn *wwn)
3313
+{
3314
+ struct srpt_port *sport = wwn->priv;
3315
+
3316
+ if (sport->guid_id && &sport->guid_id->wwn == wwn)
3317
+ return sport->guid_id;
3318
+ if (sport->gid_id && &sport->gid_id->wwn == wwn)
3319
+ return sport->gid_id;
3320
+ WARN_ON_ONCE(true);
3321
+ return NULL;
3322
+}
3323
+
32023324 static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
32033325 {
3204
- struct srpt_port *sport = srpt_tpg_to_sport(tpg);
3326
+ struct srpt_tpg *stpg = container_of(tpg, typeof(*stpg), tpg);
32053327
3206
- WARN_ON_ONCE(tpg != &sport->port_guid_tpg &&
3207
- tpg != &sport->port_gid_tpg);
3208
- return tpg == &sport->port_guid_tpg ? sport->port_guid :
3209
- sport->port_gid;
3328
+ return stpg->sport_id->name;
32103329 }
32113330
32123331 static u16 srpt_get_tag(struct se_portal_group *tpg)
....@@ -3224,19 +3343,23 @@
32243343 struct srpt_send_ioctx *ioctx = container_of(se_cmd,
32253344 struct srpt_send_ioctx, cmd);
32263345 struct srpt_rdma_ch *ch = ioctx->ch;
3227
- unsigned long flags;
3346
+ struct srpt_recv_ioctx *recv_ioctx = ioctx->recv_ioctx;
32283347
32293348 WARN_ON_ONCE(ioctx->state != SRPT_STATE_DONE &&
32303349 !(ioctx->cmd.transport_state & CMD_T_ABORTED));
3350
+
3351
+ if (recv_ioctx) {
3352
+ WARN_ON_ONCE(!list_empty(&recv_ioctx->wait_list));
3353
+ ioctx->recv_ioctx = NULL;
3354
+ srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
3355
+ }
32313356
32323357 if (ioctx->n_rw_ctx) {
32333358 srpt_free_rw_ctxs(ch, ioctx);
32343359 ioctx->n_rw_ctx = 0;
32353360 }
32363361
3237
- spin_lock_irqsave(&ch->spinlock, flags);
3238
- list_add(&ioctx->free_list, &ch->free_list);
3239
- spin_unlock_irqrestore(&ch->spinlock, flags);
3362
+ target_free_tag(se_cmd->se_sess, se_cmd);
32403363 }
32413364
32423365 /**
....@@ -3614,7 +3737,7 @@
36143737 struct se_portal_group *se_tpg = to_tpg(item);
36153738 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
36163739
3617
- return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0);
3740
+ return snprintf(page, PAGE_SIZE, "%d\n", sport->enabled);
36183741 }
36193742
36203743 static ssize_t srpt_tpg_enable_store(struct config_item *item,
....@@ -3623,7 +3746,7 @@
36233746 struct se_portal_group *se_tpg = to_tpg(item);
36243747 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
36253748 unsigned long tmp;
3626
- int ret;
3749
+ int ret;
36273750
36283751 ret = kstrtoul(page, 0, &tmp);
36293752 if (ret < 0) {
....@@ -3658,19 +3781,25 @@
36583781 static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
36593782 const char *name)
36603783 {
3661
- struct srpt_port *sport = wwn->priv;
3662
- static struct se_portal_group *tpg;
3663
- int res;
3784
+ struct srpt_port_id *sport_id = srpt_wwn_to_sport_id(wwn);
3785
+ struct srpt_tpg *stpg;
3786
+ int res = -ENOMEM;
36643787
3665
- WARN_ON_ONCE(wwn != &sport->port_guid_wwn &&
3666
- wwn != &sport->port_gid_wwn);
3667
- tpg = wwn == &sport->port_guid_wwn ? &sport->port_guid_tpg :
3668
- &sport->port_gid_tpg;
3669
- res = core_tpg_register(wwn, tpg, SCSI_PROTOCOL_SRP);
3670
- if (res)
3788
+ stpg = kzalloc(sizeof(*stpg), GFP_KERNEL);
3789
+ if (!stpg)
36713790 return ERR_PTR(res);
3791
+ stpg->sport_id = sport_id;
3792
+ res = core_tpg_register(wwn, &stpg->tpg, SCSI_PROTOCOL_SRP);
3793
+ if (res) {
3794
+ kfree(stpg);
3795
+ return ERR_PTR(res);
3796
+ }
36723797
3673
- return tpg;
3798
+ mutex_lock(&sport_id->mutex);
3799
+ list_add_tail(&stpg->entry, &sport_id->tpg_list);
3800
+ mutex_unlock(&sport_id->mutex);
3801
+
3802
+ return &stpg->tpg;
36743803 }
36753804
36763805 /**
....@@ -3679,10 +3808,17 @@
36793808 */
36803809 static void srpt_drop_tpg(struct se_portal_group *tpg)
36813810 {
3811
+ struct srpt_tpg *stpg = container_of(tpg, typeof(*stpg), tpg);
3812
+ struct srpt_port_id *sport_id = stpg->sport_id;
36823813 struct srpt_port *sport = srpt_tpg_to_sport(tpg);
3814
+
3815
+ mutex_lock(&sport_id->mutex);
3816
+ list_del(&stpg->entry);
3817
+ mutex_unlock(&sport_id->mutex);
36833818
36843819 sport->enabled = false;
36853820 core_tpg_deregister(tpg);
3821
+ kfree(stpg);
36863822 }
36873823
36883824 /**
....@@ -3695,7 +3831,31 @@
36953831 struct config_group *group,
36963832 const char *name)
36973833 {
3698
- return srpt_lookup_wwn(name) ? : ERR_PTR(-EINVAL);
3834
+ struct port_and_port_id papi = srpt_lookup_port(name);
3835
+ struct srpt_port *sport = papi.sport;
3836
+ struct srpt_port_id *port_id;
3837
+
3838
+ if (!papi.port_id)
3839
+ return ERR_PTR(-EINVAL);
3840
+ if (*papi.port_id) {
3841
+ /* Attempt to create a directory that already exists. */
3842
+ WARN_ON_ONCE(true);
3843
+ return &(*papi.port_id)->wwn;
3844
+ }
3845
+ port_id = kzalloc(sizeof(*port_id), GFP_KERNEL);
3846
+ if (!port_id) {
3847
+ srpt_sdev_put(sport->sdev);
3848
+ return ERR_PTR(-ENOMEM);
3849
+ }
3850
+ mutex_init(&port_id->mutex);
3851
+ INIT_LIST_HEAD(&port_id->tpg_list);
3852
+ port_id->wwn.priv = sport;
3853
+ memcpy(port_id->name, port_id == sport->guid_id ? sport->guid_name :
3854
+ sport->gid_name, ARRAY_SIZE(port_id->name));
3855
+
3856
+ *papi.port_id = port_id;
3857
+
3858
+ return &port_id->wwn;
36993859 }
37003860
37013861 /**
....@@ -3704,11 +3864,23 @@
37043864 */
37053865 static void srpt_drop_tport(struct se_wwn *wwn)
37063866 {
3867
+ struct srpt_port_id *port_id = container_of(wwn, typeof(*port_id), wwn);
3868
+ struct srpt_port *sport = wwn->priv;
3869
+
3870
+ if (sport->guid_id == port_id)
3871
+ sport->guid_id = NULL;
3872
+ else if (sport->gid_id == port_id)
3873
+ sport->gid_id = NULL;
3874
+ else
3875
+ WARN_ON_ONCE(true);
3876
+
3877
+ srpt_sdev_put(sport->sdev);
3878
+ kfree(port_id);
37073879 }
37083880
37093881 static ssize_t srpt_wwn_version_show(struct config_item *item, char *buf)
37103882 {
3711
- return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
3883
+ return scnprintf(buf, PAGE_SIZE, "\n");
37123884 }
37133885
37143886 CONFIGFS_ATTR_RO(srpt_wwn_, version);
....@@ -3720,8 +3892,7 @@
37203892
37213893 static const struct target_core_fabric_ops srpt_template = {
37223894 .module = THIS_MODULE,
3723
- .name = "srpt",
3724
- .get_fabric_name = srpt_get_fabric_name,
3895
+ .fabric_name = "srpt",
37253896 .tpg_get_wwn = srpt_get_fabric_wwn,
37263897 .tpg_get_tag = srpt_get_tag,
37273898 .tpg_check_demo_mode = srpt_check_false,
....@@ -3735,7 +3906,6 @@
37353906 .sess_get_index = srpt_sess_get_index,
37363907 .sess_get_initiator_sid = NULL,
37373908 .write_pending = srpt_write_pending,
3738
- .write_pending_status = srpt_write_pending_status,
37393909 .set_default_node_attributes = srpt_set_default_node_attrs,
37403910 .get_cmd_state = srpt_get_tcm_cmd_state,
37413911 .queue_data_in = srpt_queue_data_in,
....@@ -3772,16 +3942,14 @@
37723942
37733943 ret = -EINVAL;
37743944 if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
3775
- pr_err("invalid value %d for kernel module parameter"
3776
- " srp_max_req_size -- must be at least %d.\n",
3945
+ pr_err("invalid value %d for kernel module parameter srp_max_req_size -- must be at least %d.\n",
37773946 srp_max_req_size, MIN_MAX_REQ_SIZE);
37783947 goto out;
37793948 }
37803949
37813950 if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
37823951 || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
3783
- pr_err("invalid value %d for kernel module parameter"
3784
- " srpt_srq_size -- must be in the range [%d..%d].\n",
3952
+ pr_err("invalid value %d for kernel module parameter srpt_srq_size -- must be in the range [%d..%d].\n",
37853953 srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
37863954 goto out;
37873955 }