forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/drivers/infiniband/ulp/srpt/ib_srpt.c
....@@ -51,8 +51,6 @@
5151
5252 /* Name of this kernel module. */
5353 #define DRV_NAME "ib_srpt"
54
-#define DRV_VERSION "2.0.0"
55
-#define DRV_RELDATE "2011-02-14"
5654
5755 #define SRPT_ID_STRING "Linux SRP target"
5856
....@@ -60,8 +58,7 @@
6058 #define pr_fmt(fmt) DRV_NAME " " fmt
6159
6260 MODULE_AUTHOR("Vu Pham and Bart Van Assche");
63
-MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
64
- "v" DRV_VERSION " (" DRV_RELDATE ")");
61
+MODULE_DESCRIPTION("SCSI RDMA Protocol target driver");
6562 MODULE_LICENSE("Dual BSD/GPL");
6663
6764 /*
....@@ -84,13 +81,12 @@
8481
8582 static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp)
8683 {
87
- return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
84
+ return sprintf(buffer, "0x%016llx\n", *(u64 *)kp->arg);
8885 }
8986 module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
9087 0444);
9188 MODULE_PARM_DESC(srpt_service_guid,
92
- "Using this value for ioc_guid, id_ext, and cm_listen_id"
93
- " instead of using the node_guid of the first HCA.");
89
+ "Using this value for ioc_guid, id_ext, and cm_listen_id instead of using the node_guid of the first HCA.");
9490
9591 static struct ib_client srpt_client;
9692 /* Protects both rdma_cm_port and rdma_cm_id. */
....@@ -139,16 +135,13 @@
139135 static void srpt_event_handler(struct ib_event_handler *handler,
140136 struct ib_event *event)
141137 {
142
- struct srpt_device *sdev;
138
+ struct srpt_device *sdev =
139
+ container_of(handler, struct srpt_device, event_handler);
143140 struct srpt_port *sport;
144141 u8 port_num;
145142
146
- sdev = ib_get_client_data(event->device, &srpt_client);
147
- if (!sdev || sdev->device != event->device)
148
- return;
149
-
150143 pr_debug("ASYNC event= %d on device= %s\n", event->event,
151
- sdev->device->name);
144
+ dev_name(&sdev->device->dev));
152145
153146 switch (event->event) {
154147 case IB_EVENT_PORT_ERR:
....@@ -221,8 +214,9 @@
221214 */
222215 static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
223216 {
224
- pr_debug("QP event %d on ch=%p sess_name=%s state=%d\n",
225
- event->event, ch, ch->sess_name, ch->state);
217
+ pr_debug("QP event %d on ch=%p sess_name=%s-%d state=%s\n",
218
+ event->event, ch, ch->sess_name, ch->qp->qp_num,
219
+ get_ch_state_name(ch->state));
226220
227221 switch (event->event) {
228222 case IB_EVENT_COMM_EST:
....@@ -462,7 +456,7 @@
462456 static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
463457 struct ib_mad_send_wc *mad_wc)
464458 {
465
- rdma_destroy_ah(mad_wc->send_buf->ah);
459
+ rdma_destroy_ah(mad_wc->send_buf->ah, RDMA_DESTROY_AH_SLEEPABLE);
466460 ib_free_send_mad(mad_wc->send_buf);
467461 }
468462
....@@ -529,7 +523,7 @@
529523 ib_free_send_mad(rsp);
530524
531525 err_rsp:
532
- rdma_destroy_ah(ah);
526
+ rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
533527 err:
534528 ib_free_recv_mad(mad_wc);
535529 }
....@@ -560,33 +554,37 @@
560554 struct ib_port_attr port_attr;
561555 int ret;
562556
563
- memset(&port_modify, 0, sizeof(port_modify));
564
- port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
565
- port_modify.clr_port_cap_mask = 0;
566
-
567
- ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
568
- if (ret)
569
- goto err_mod_port;
570
-
571557 ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
572558 if (ret)
573
- goto err_query_port;
559
+ return ret;
574560
575561 sport->sm_lid = port_attr.sm_lid;
576562 sport->lid = port_attr.lid;
577563
578564 ret = rdma_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
579565 if (ret)
580
- goto err_query_port;
566
+ return ret;
581567
582
- sport->port_guid_wwn.priv = sport;
583
- srpt_format_guid(sport->port_guid, sizeof(sport->port_guid),
568
+ srpt_format_guid(sport->guid_name, ARRAY_SIZE(sport->guid_name),
584569 &sport->gid.global.interface_id);
585
- sport->port_gid_wwn.priv = sport;
586
- snprintf(sport->port_gid, sizeof(sport->port_gid),
570
+ snprintf(sport->gid_name, ARRAY_SIZE(sport->gid_name),
587571 "0x%016llx%016llx",
588572 be64_to_cpu(sport->gid.global.subnet_prefix),
589573 be64_to_cpu(sport->gid.global.interface_id));
574
+
575
+ if (rdma_protocol_iwarp(sport->sdev->device, sport->port))
576
+ return 0;
577
+
578
+ memset(&port_modify, 0, sizeof(port_modify));
579
+ port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
580
+ port_modify.clr_port_cap_mask = 0;
581
+
582
+ ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
583
+ if (ret) {
584
+ pr_warn("%s-%d: enabling device management failed (%d). Note: this is expected if SR-IOV is enabled.\n",
585
+ dev_name(&sport->sdev->device->dev), sport->port, ret);
586
+ return 0;
587
+ }
590588
591589 if (!sport->mad_agent) {
592590 memset(&reg_req, 0, sizeof(reg_req));
....@@ -603,32 +601,29 @@
603601 srpt_mad_recv_handler,
604602 sport, 0);
605603 if (IS_ERR(sport->mad_agent)) {
606
- ret = PTR_ERR(sport->mad_agent);
604
+ pr_err("%s-%d: MAD agent registration failed (%ld). Note: this is expected if SR-IOV is enabled.\n",
605
+ dev_name(&sport->sdev->device->dev), sport->port,
606
+ PTR_ERR(sport->mad_agent));
607607 sport->mad_agent = NULL;
608
- goto err_query_port;
608
+ memset(&port_modify, 0, sizeof(port_modify));
609
+ port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
610
+ ib_modify_port(sport->sdev->device, sport->port, 0,
611
+ &port_modify);
612
+
609613 }
610614 }
611615
612616 return 0;
613
-
614
-err_query_port:
615
-
616
- port_modify.set_port_cap_mask = 0;
617
- port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
618
- ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
619
-
620
-err_mod_port:
621
-
622
- return ret;
623617 }
624618
625619 /**
626620 * srpt_unregister_mad_agent - unregister MAD callback functions
627621 * @sdev: SRPT HCA pointer.
622
+ * @port_cnt: number of ports with registered MAD
628623 *
629624 * Note: It is safe to call this function more than once for the same device.
630625 */
631
-static void srpt_unregister_mad_agent(struct srpt_device *sdev)
626
+static void srpt_unregister_mad_agent(struct srpt_device *sdev, int port_cnt)
632627 {
633628 struct ib_port_modify port_modify = {
634629 .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
....@@ -636,12 +631,11 @@
636631 struct srpt_port *sport;
637632 int i;
638633
639
- for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
634
+ for (i = 1; i <= port_cnt; i++) {
640635 sport = &sdev->port[i - 1];
641636 WARN_ON(sport->port != i);
642
- if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
643
- pr_err("disabling MAD processing failed.\n");
644637 if (sport->mad_agent) {
638
+ ib_modify_port(sdev->device, i, 0, &port_modify);
645639 ib_unregister_mad_agent(sport->mad_agent);
646640 sport->mad_agent = NULL;
647641 }
....@@ -652,31 +646,33 @@
652646 * srpt_alloc_ioctx - allocate a SRPT I/O context structure
653647 * @sdev: SRPT HCA pointer.
654648 * @ioctx_size: I/O context size.
655
- * @dma_size: Size of I/O context DMA buffer.
649
+ * @buf_cache: I/O buffer cache.
656650 * @dir: DMA data direction.
657651 */
658652 static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
659
- int ioctx_size, int dma_size,
653
+ int ioctx_size,
654
+ struct kmem_cache *buf_cache,
660655 enum dma_data_direction dir)
661656 {
662657 struct srpt_ioctx *ioctx;
663658
664
- ioctx = kmalloc(ioctx_size, GFP_KERNEL);
659
+ ioctx = kzalloc(ioctx_size, GFP_KERNEL);
665660 if (!ioctx)
666661 goto err;
667662
668
- ioctx->buf = kmalloc(dma_size, GFP_KERNEL);
663
+ ioctx->buf = kmem_cache_alloc(buf_cache, GFP_KERNEL);
669664 if (!ioctx->buf)
670665 goto err_free_ioctx;
671666
672
- ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir);
667
+ ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf,
668
+ kmem_cache_size(buf_cache), dir);
673669 if (ib_dma_mapping_error(sdev->device, ioctx->dma))
674670 goto err_free_buf;
675671
676672 return ioctx;
677673
678674 err_free_buf:
679
- kfree(ioctx->buf);
675
+ kmem_cache_free(buf_cache, ioctx->buf);
680676 err_free_ioctx:
681677 kfree(ioctx);
682678 err:
....@@ -687,17 +683,19 @@
687683 * srpt_free_ioctx - free a SRPT I/O context structure
688684 * @sdev: SRPT HCA pointer.
689685 * @ioctx: I/O context pointer.
690
- * @dma_size: Size of I/O context DMA buffer.
686
+ * @buf_cache: I/O buffer cache.
691687 * @dir: DMA data direction.
692688 */
693689 static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
694
- int dma_size, enum dma_data_direction dir)
690
+ struct kmem_cache *buf_cache,
691
+ enum dma_data_direction dir)
695692 {
696693 if (!ioctx)
697694 return;
698695
699
- ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir);
700
- kfree(ioctx->buf);
696
+ ib_dma_unmap_single(sdev->device, ioctx->dma,
697
+ kmem_cache_size(buf_cache), dir);
698
+ kmem_cache_free(buf_cache, ioctx->buf);
701699 kfree(ioctx);
702700 }
703701
....@@ -706,33 +704,38 @@
706704 * @sdev: Device to allocate the I/O context ring for.
707705 * @ring_size: Number of elements in the I/O context ring.
708706 * @ioctx_size: I/O context size.
709
- * @dma_size: DMA buffer size.
707
+ * @buf_cache: I/O buffer cache.
708
+ * @alignment_offset: Offset in each ring buffer at which the SRP information
709
+ * unit starts.
710710 * @dir: DMA data direction.
711711 */
712712 static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
713713 int ring_size, int ioctx_size,
714
- int dma_size, enum dma_data_direction dir)
714
+ struct kmem_cache *buf_cache,
715
+ int alignment_offset,
716
+ enum dma_data_direction dir)
715717 {
716718 struct srpt_ioctx **ring;
717719 int i;
718720
719
- WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx)
720
- && ioctx_size != sizeof(struct srpt_send_ioctx));
721
+ WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx) &&
722
+ ioctx_size != sizeof(struct srpt_send_ioctx));
721723
722724 ring = kvmalloc_array(ring_size, sizeof(ring[0]), GFP_KERNEL);
723725 if (!ring)
724726 goto out;
725727 for (i = 0; i < ring_size; ++i) {
726
- ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir);
728
+ ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, buf_cache, dir);
727729 if (!ring[i])
728730 goto err;
729731 ring[i]->index = i;
732
+ ring[i]->offset = alignment_offset;
730733 }
731734 goto out;
732735
733736 err:
734737 while (--i >= 0)
735
- srpt_free_ioctx(sdev, ring[i], dma_size, dir);
738
+ srpt_free_ioctx(sdev, ring[i], buf_cache, dir);
736739 kvfree(ring);
737740 ring = NULL;
738741 out:
....@@ -744,12 +747,13 @@
744747 * @ioctx_ring: I/O context ring to be freed.
745748 * @sdev: SRPT HCA pointer.
746749 * @ring_size: Number of ring elements.
747
- * @dma_size: Size of I/O context DMA buffer.
750
+ * @buf_cache: I/O buffer cache.
748751 * @dir: DMA data direction.
749752 */
750753 static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
751754 struct srpt_device *sdev, int ring_size,
752
- int dma_size, enum dma_data_direction dir)
755
+ struct kmem_cache *buf_cache,
756
+ enum dma_data_direction dir)
753757 {
754758 int i;
755759
....@@ -757,7 +761,7 @@
757761 return;
758762
759763 for (i = 0; i < ring_size; ++i)
760
- srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
764
+ srpt_free_ioctx(sdev, ioctx_ring[i], buf_cache, dir);
761765 kvfree(ioctx_ring);
762766 }
763767
....@@ -819,7 +823,7 @@
819823 struct ib_recv_wr wr;
820824
821825 BUG_ON(!sdev);
822
- list.addr = ioctx->ioctx.dma;
826
+ list.addr = ioctx->ioctx.dma + ioctx->ioctx.offset;
823827 list.length = srp_max_req_size;
824828 list.lkey = sdev->lkey;
825829
....@@ -863,7 +867,7 @@
863867
864868 static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
865869 {
866
- struct srpt_rdma_ch *ch = cq->cq_context;
870
+ struct srpt_rdma_ch *ch = wc->qp->qp_context;
867871
868872 pr_debug("%s-%d wc->status %d\n", ch->sess_name, ch->qp->qp_num,
869873 wc->status);
....@@ -985,23 +989,28 @@
985989
986990 /**
987991 * srpt_get_desc_tbl - parse the data descriptors of a SRP_CMD request
988
- * @ioctx: Pointer to the I/O context associated with the request.
992
+ * @recv_ioctx: I/O context associated with the received command @srp_cmd.
993
+ * @ioctx: I/O context that will be used for responding to the initiator.
989994 * @srp_cmd: Pointer to the SRP_CMD request data.
990995 * @dir: Pointer to the variable to which the transfer direction will be
991996 * written.
992
- * @sg: [out] scatterlist allocated for the parsed SRP_CMD.
997
+ * @sg: [out] scatterlist for the parsed SRP_CMD.
993998 * @sg_cnt: [out] length of @sg.
994999 * @data_len: Pointer to the variable to which the total data length of all
9951000 * descriptors in the SRP_CMD request will be written.
1001
+ * @imm_data_offset: [in] Offset in SRP_CMD requests at which immediate data
1002
+ * starts.
9961003 *
9971004 * This function initializes ioctx->nrbuf and ioctx->r_bufs.
9981005 *
9991006 * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
10001007 * -ENOMEM when memory allocation fails and zero upon success.
10011008 */
1002
-static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
1009
+static int srpt_get_desc_tbl(struct srpt_recv_ioctx *recv_ioctx,
1010
+ struct srpt_send_ioctx *ioctx,
10031011 struct srp_cmd *srp_cmd, enum dma_data_direction *dir,
1004
- struct scatterlist **sg, unsigned *sg_cnt, u64 *data_len)
1012
+ struct scatterlist **sg, unsigned int *sg_cnt, u64 *data_len,
1013
+ u16 imm_data_offset)
10051014 {
10061015 BUG_ON(!dir);
10071016 BUG_ON(!data_len);
....@@ -1025,7 +1034,7 @@
10251034
10261035 if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
10271036 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
1028
- struct srp_direct_buf *db = srpt_get_desc_buf(srp_cmd);
1037
+ struct srp_direct_buf *db = srpt_get_desc_buf(srp_cmd);
10291038
10301039 *data_len = be32_to_cpu(db->len);
10311040 return srpt_alloc_rw_ctxs(ioctx, db, 1, sg, sg_cnt);
....@@ -1037,8 +1046,7 @@
10371046
10381047 if (nbufs >
10391048 (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
1040
- pr_err("received unsupported SRP_CMD request"
1041
- " type (%u out + %u in != %u / %zu)\n",
1049
+ pr_err("received unsupported SRP_CMD request type (%u out + %u in != %u / %zu)\n",
10421050 srp_cmd->data_out_desc_cnt,
10431051 srp_cmd->data_in_desc_cnt,
10441052 be32_to_cpu(idb->table_desc.len),
....@@ -1049,6 +1057,40 @@
10491057 *data_len = be32_to_cpu(idb->len);
10501058 return srpt_alloc_rw_ctxs(ioctx, idb->desc_list, nbufs,
10511059 sg, sg_cnt);
1060
+ } else if ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_IMM) {
1061
+ struct srp_imm_buf *imm_buf = srpt_get_desc_buf(srp_cmd);
1062
+ void *data = (void *)srp_cmd + imm_data_offset;
1063
+ uint32_t len = be32_to_cpu(imm_buf->len);
1064
+ uint32_t req_size = imm_data_offset + len;
1065
+
1066
+ if (req_size > srp_max_req_size) {
1067
+ pr_err("Immediate data (length %d + %d) exceeds request size %d\n",
1068
+ imm_data_offset, len, srp_max_req_size);
1069
+ return -EINVAL;
1070
+ }
1071
+ if (recv_ioctx->byte_len < req_size) {
1072
+ pr_err("Received too few data - %d < %d\n",
1073
+ recv_ioctx->byte_len, req_size);
1074
+ return -EIO;
1075
+ }
1076
+ /*
1077
+ * The immediate data buffer descriptor must occur before the
1078
+ * immediate data itself.
1079
+ */
1080
+ if ((void *)(imm_buf + 1) > (void *)data) {
1081
+ pr_err("Received invalid write request\n");
1082
+ return -EINVAL;
1083
+ }
1084
+ *data_len = len;
1085
+ ioctx->recv_ioctx = recv_ioctx;
1086
+ if ((uintptr_t)data & 511) {
1087
+ pr_warn_once("Internal error - the receive buffers are not aligned properly.\n");
1088
+ return -EINVAL;
1089
+ }
1090
+ sg_init_one(&ioctx->imm_sg, data, len);
1091
+ *sg = &ioctx->imm_sg;
1092
+ *sg_cnt = 1;
1093
+ return 0;
10521094 } else {
10531095 *data_len = 0;
10541096 return 0;
....@@ -1173,24 +1215,18 @@
11731215 static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
11741216 {
11751217 struct srpt_send_ioctx *ioctx;
1176
- unsigned long flags;
1218
+ int tag, cpu;
11771219
11781220 BUG_ON(!ch);
11791221
1180
- ioctx = NULL;
1181
- spin_lock_irqsave(&ch->spinlock, flags);
1182
- if (!list_empty(&ch->free_list)) {
1183
- ioctx = list_first_entry(&ch->free_list,
1184
- struct srpt_send_ioctx, free_list);
1185
- list_del(&ioctx->free_list);
1186
- }
1187
- spin_unlock_irqrestore(&ch->spinlock, flags);
1222
+ tag = sbitmap_queue_get(&ch->sess->sess_tag_pool, &cpu);
1223
+ if (tag < 0)
1224
+ return NULL;
11881225
1189
- if (!ioctx)
1190
- return ioctx;
1191
-
1226
+ ioctx = ch->ioctx_ring[tag];
11921227 BUG_ON(ioctx->ch != ch);
11931228 ioctx->state = SRPT_STATE_NEW;
1229
+ WARN_ON_ONCE(ioctx->recv_ioctx);
11941230 ioctx->n_rdma = 0;
11951231 ioctx->n_rw_ctx = 0;
11961232 ioctx->queue_status_only = false;
....@@ -1200,6 +1236,8 @@
12001236 */
12011237 memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
12021238 memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
1239
+ ioctx->cmd.map_tag = tag;
1240
+ ioctx->cmd.map_cpu = cpu;
12031241
12041242 return ioctx;
12051243 }
....@@ -1282,7 +1320,7 @@
12821320 */
12831321 static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
12841322 {
1285
- struct srpt_rdma_ch *ch = cq->cq_context;
1323
+ struct srpt_rdma_ch *ch = wc->qp->qp_context;
12861324 struct srpt_send_ioctx *ioctx =
12871325 container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe);
12881326
....@@ -1376,8 +1414,8 @@
13761414 BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
13771415 max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
13781416 if (sense_data_len > max_sense_len) {
1379
- pr_warn("truncated sense data from %d to %d"
1380
- " bytes\n", sense_data_len, max_sense_len);
1417
+ pr_warn("truncated sense data from %d to %d bytes\n",
1418
+ sense_data_len, max_sense_len);
13811419 sense_data_len = max_sense_len;
13821420 }
13831421
....@@ -1457,7 +1495,7 @@
14571495
14581496 BUG_ON(!send_ioctx);
14591497
1460
- srp_cmd = recv_ioctx->ioctx.buf;
1498
+ srp_cmd = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
14611499 cmd = &send_ioctx->cmd;
14621500 cmd->tag = srp_cmd->tag;
14631501
....@@ -1477,14 +1515,14 @@
14771515 break;
14781516 }
14791517
1480
- rc = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &sg, &sg_cnt,
1481
- &data_len);
1518
+ rc = srpt_get_desc_tbl(recv_ioctx, send_ioctx, srp_cmd, &dir,
1519
+ &sg, &sg_cnt, &data_len, ch->imm_data_offset);
14821520 if (rc) {
14831521 if (rc != -EAGAIN) {
14841522 pr_err("0x%llx: parsing SRP descriptor table failed.\n",
14851523 srp_cmd->tag);
14861524 }
1487
- goto release_ioctx;
1525
+ goto busy;
14881526 }
14891527
14901528 rc = target_submit_cmd_map_sgls(cmd, ch->sess, srp_cmd->cdb,
....@@ -1495,13 +1533,12 @@
14951533 if (rc != 0) {
14961534 pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc,
14971535 srp_cmd->tag);
1498
- goto release_ioctx;
1536
+ goto busy;
14991537 }
15001538 return;
15011539
1502
-release_ioctx:
1503
- send_ioctx->state = SRPT_STATE_DONE;
1504
- srpt_release_cmd(cmd);
1540
+busy:
1541
+ target_send_busy(cmd);
15051542 }
15061543
15071544 static int srp_tmr_to_tcm(int fn)
....@@ -1545,7 +1582,7 @@
15451582
15461583 BUG_ON(!send_ioctx);
15471584
1548
- srp_tsk = recv_ioctx->ioctx.buf;
1585
+ srp_tsk = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
15491586 cmd = &send_ioctx->cmd;
15501587
15511588 pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld ch %p sess %p\n",
....@@ -1561,11 +1598,9 @@
15611598 TARGET_SCF_ACK_KREF);
15621599 if (rc != 0) {
15631600 send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
1564
- goto fail;
1601
+ cmd->se_tfo->queue_tm_rsp(cmd);
15651602 }
15661603 return;
1567
-fail:
1568
- transport_send_check_condition_and_sense(cmd, 0, 0); // XXX:
15691604 }
15701605
15711606 /**
....@@ -1588,10 +1623,11 @@
15881623 goto push;
15891624
15901625 ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
1591
- recv_ioctx->ioctx.dma, srp_max_req_size,
1626
+ recv_ioctx->ioctx.dma,
1627
+ recv_ioctx->ioctx.offset + srp_max_req_size,
15921628 DMA_FROM_DEVICE);
15931629
1594
- srp_cmd = recv_ioctx->ioctx.buf;
1630
+ srp_cmd = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
15951631 opcode = srp_cmd->opcode;
15961632 if (opcode == SRP_CMD || opcode == SRP_TSK_MGMT) {
15971633 send_ioctx = srpt_get_send_ioctx(ch);
....@@ -1628,7 +1664,8 @@
16281664 break;
16291665 }
16301666
1631
- srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
1667
+ if (!send_ioctx || !send_ioctx->recv_ioctx)
1668
+ srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
16321669 res = true;
16331670
16341671 out:
....@@ -1644,7 +1681,7 @@
16441681
16451682 static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
16461683 {
1647
- struct srpt_rdma_ch *ch = cq->cq_context;
1684
+ struct srpt_rdma_ch *ch = wc->qp->qp_context;
16481685 struct srpt_recv_ioctx *ioctx =
16491686 container_of(wc->wr_cqe, struct srpt_recv_ioctx, ioctx.cqe);
16501687
....@@ -1654,6 +1691,7 @@
16541691 req_lim = atomic_dec_return(&ch->req_lim);
16551692 if (unlikely(req_lim < 0))
16561693 pr_err("req_lim = %d < 0\n", req_lim);
1694
+ ioctx->byte_len = wc->byte_len;
16571695 srpt_handle_new_iu(ch, ioctx);
16581696 } else {
16591697 pr_info_ratelimited("receiving failed for ioctx %p with status %d\n",
....@@ -1704,7 +1742,7 @@
17041742 */
17051743 static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
17061744 {
1707
- struct srpt_rdma_ch *ch = cq->cq_context;
1745
+ struct srpt_rdma_ch *ch = wc->qp->qp_context;
17081746 struct srpt_send_ioctx *ioctx =
17091747 container_of(wc->wr_cqe, struct srpt_send_ioctx, ioctx.cqe);
17101748 enum srpt_command_state state;
....@@ -1717,14 +1755,14 @@
17171755 atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
17181756
17191757 if (wc->status != IB_WC_SUCCESS)
1720
- pr_info("sending response for ioctx 0x%p failed"
1721
- " with status %d\n", ioctx, wc->status);
1758
+ pr_info("sending response for ioctx 0x%p failed with status %d\n",
1759
+ ioctx, wc->status);
17221760
17231761 if (state != SRPT_STATE_DONE) {
17241762 transport_generic_free_cmd(&ioctx->cmd, 0);
17251763 } else {
1726
- pr_err("IB completion has been received too late for"
1727
- " wr_id = %u.\n", ioctx->ioctx.index);
1764
+ pr_err("IB completion has been received too late for wr_id = %u.\n",
1765
+ ioctx->ioctx.index);
17281766 }
17291767
17301768 srpt_process_wait_list(ch);
....@@ -1751,14 +1789,15 @@
17511789 goto out;
17521790
17531791 retry:
1754
- ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + sq_size,
1755
- 0 /* XXX: spread CQs */, IB_POLL_WORKQUEUE);
1792
+ ch->cq = ib_cq_pool_get(sdev->device, ch->rq_size + sq_size, -1,
1793
+ IB_POLL_WORKQUEUE);
17561794 if (IS_ERR(ch->cq)) {
17571795 ret = PTR_ERR(ch->cq);
17581796 pr_err("failed to create CQ cqe= %d ret= %d\n",
17591797 ch->rq_size + sq_size, ret);
17601798 goto out;
17611799 }
1800
+ ch->cq_size = ch->rq_size + sq_size;
17621801
17631802 qp_init->qp_context = (void *)ch;
17641803 qp_init->event_handler
....@@ -1776,16 +1815,13 @@
17761815 */
17771816 qp_init->cap.max_send_wr = min(sq_size / 2, attrs->max_qp_wr);
17781817 qp_init->cap.max_rdma_ctxs = sq_size / 2;
1779
- qp_init->cap.max_send_sge = min(attrs->max_send_sge,
1780
- SRPT_MAX_SG_PER_WQE);
1818
+ qp_init->cap.max_send_sge = attrs->max_send_sge;
1819
+ qp_init->cap.max_recv_sge = 1;
17811820 qp_init->port_num = ch->sport->port;
1782
- if (sdev->use_srq) {
1821
+ if (sdev->use_srq)
17831822 qp_init->srq = sdev->srq;
1784
- } else {
1823
+ else
17851824 qp_init->cap.max_recv_wr = ch->rq_size;
1786
- qp_init->cap.max_recv_sge = min(attrs->max_recv_sge,
1787
- SRPT_MAX_SG_PER_WQE);
1788
- }
17891825
17901826 if (ch->using_rdma_cm) {
17911827 ret = rdma_create_qp(ch->rdma_cm.cm_id, sdev->pd, qp_init);
....@@ -1806,7 +1842,7 @@
18061842 if (retry) {
18071843 pr_debug("failed to create queue pair with sq_size = %d (%d) - retrying\n",
18081844 sq_size, ret);
1809
- ib_free_cq(ch->cq);
1845
+ ib_cq_pool_put(ch->cq, ch->cq_size);
18101846 sq_size = max(sq_size / 2, MIN_SRPT_SQ_SIZE);
18111847 goto retry;
18121848 } else {
....@@ -1832,14 +1868,14 @@
18321868
18331869 err_destroy_cq:
18341870 ch->qp = NULL;
1835
- ib_free_cq(ch->cq);
1871
+ ib_cq_pool_put(ch->cq, ch->cq_size);
18361872 goto out;
18371873 }
18381874
18391875 static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
18401876 {
18411877 ib_destroy_qp(ch->qp);
1842
- ib_free_cq(ch->cq);
1878
+ ib_cq_pool_put(ch->cq, ch->cq_size);
18431879 }
18441880
18451881 /**
....@@ -1913,41 +1949,22 @@
19131949 return ret;
19141950 }
19151951
1916
-static bool srpt_ch_closed(struct srpt_port *sport, struct srpt_rdma_ch *ch)
1917
-{
1918
- struct srpt_nexus *nexus;
1919
- struct srpt_rdma_ch *ch2;
1920
- bool res = true;
1921
-
1922
- rcu_read_lock();
1923
- list_for_each_entry(nexus, &sport->nexus_list, entry) {
1924
- list_for_each_entry(ch2, &nexus->ch_list, list) {
1925
- if (ch2 == ch) {
1926
- res = false;
1927
- goto done;
1928
- }
1929
- }
1930
- }
1931
-done:
1932
- rcu_read_unlock();
1933
-
1934
- return res;
1935
-}
1936
-
19371952 /* Send DREQ and wait for DREP. */
19381953 static void srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch)
19391954 {
1955
+ DECLARE_COMPLETION_ONSTACK(closed);
19401956 struct srpt_port *sport = ch->sport;
19411957
19421958 pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
19431959 ch->state);
19441960
1961
+ ch->closed = &closed;
1962
+
19451963 mutex_lock(&sport->mutex);
19461964 srpt_disconnect_ch(ch);
19471965 mutex_unlock(&sport->mutex);
19481966
1949
- while (wait_event_timeout(sport->ch_releaseQ, srpt_ch_closed(sport, ch),
1950
- 5 * HZ) == 0)
1967
+ while (wait_for_completion_timeout(&closed, 5 * HZ) == 0)
19511968 pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
19521969 ch->sess_name, ch->qp->qp_num, ch->state);
19531970
....@@ -1963,9 +1980,10 @@
19631980 list_for_each_entry(nexus, &sport->nexus_list, entry) {
19641981 list_for_each_entry(ch, &nexus->ch_list, list) {
19651982 if (srpt_disconnect_ch(ch) >= 0)
1966
- pr_info("Closing channel %s because target %s_%d has been disabled\n",
1967
- ch->sess_name,
1968
- sport->sdev->device->name, sport->port);
1983
+ pr_info("Closing channel %s-%d because target %s_%d has been disabled\n",
1984
+ ch->sess_name, ch->qp->qp_num,
1985
+ dev_name(&sport->sdev->device->dev),
1986
+ sport->port);
19691987 srpt_close_ch(ch);
19701988 }
19711989 }
....@@ -2026,10 +2044,17 @@
20262044 __srpt_close_all_ch(sport);
20272045 }
20282046
2047
+static void srpt_drop_sport_ref(struct srpt_port *sport)
2048
+{
2049
+ if (atomic_dec_return(&sport->refcount) == 0 && sport->freed_channels)
2050
+ complete(sport->freed_channels);
2051
+}
2052
+
20292053 static void srpt_free_ch(struct kref *kref)
20302054 {
20312055 struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
20322056
2057
+ srpt_drop_sport_ref(ch->sport);
20332058 kfree_rcu(ch, rcu);
20342059 }
20352060
....@@ -2073,17 +2098,22 @@
20732098 list_del_rcu(&ch->list);
20742099 mutex_unlock(&sport->mutex);
20752100
2101
+ if (ch->closed)
2102
+ complete(ch->closed);
2103
+
20762104 srpt_destroy_ch_ib(ch);
20772105
20782106 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
20792107 ch->sport->sdev, ch->rq_size,
2080
- ch->max_rsp_size, DMA_TO_DEVICE);
2108
+ ch->rsp_buf_cache, DMA_TO_DEVICE);
2109
+
2110
+ kmem_cache_destroy(ch->rsp_buf_cache);
20812111
20822112 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
20832113 sdev, ch->rq_size,
2084
- srp_max_req_size, DMA_FROM_DEVICE);
2114
+ ch->req_buf_cache, DMA_FROM_DEVICE);
20852115
2086
- wake_up(&sport->ch_releaseQ);
2116
+ kmem_cache_destroy(ch->req_buf_cache);
20872117
20882118 kref_put(&ch->kref, srpt_free_ch);
20892119 }
....@@ -2120,12 +2150,10 @@
21202150 struct srpt_rdma_ch *ch = NULL;
21212151 char i_port_id[36];
21222152 u32 it_iu_len;
2123
- int i, ret;
2153
+ int i, tag_num, tag_size, ret;
2154
+ struct srpt_tpg *stpg;
21242155
21252156 WARN_ON_ONCE(irqs_disabled());
2126
-
2127
- if (WARN_ON(!sdev || !req))
2128
- return -EINVAL;
21292157
21302158 it_iu_len = be32_to_cpu(req->req_it_iu_len);
21312159
....@@ -2159,7 +2187,7 @@
21592187 if (!sport->enabled) {
21602188 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
21612189 pr_info("rejected SRP_LOGIN_REQ because target port %s_%d has not yet been enabled\n",
2162
- sport->sdev->device->name, port_num);
2190
+ dev_name(&sport->sdev->device->dev), port_num);
21632191 goto reject;
21642192 }
21652193
....@@ -2205,32 +2233,57 @@
22052233 INIT_LIST_HEAD(&ch->cmd_wait_list);
22062234 ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
22072235
2236
+ ch->rsp_buf_cache = kmem_cache_create("srpt-rsp-buf", ch->max_rsp_size,
2237
+ 512, 0, NULL);
2238
+ if (!ch->rsp_buf_cache)
2239
+ goto free_ch;
2240
+
22082241 ch->ioctx_ring = (struct srpt_send_ioctx **)
22092242 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
22102243 sizeof(*ch->ioctx_ring[0]),
2211
- ch->max_rsp_size, DMA_TO_DEVICE);
2244
+ ch->rsp_buf_cache, 0, DMA_TO_DEVICE);
22122245 if (!ch->ioctx_ring) {
22132246 pr_err("rejected SRP_LOGIN_REQ because creating a new QP SQ ring failed.\n");
22142247 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2215
- goto free_ch;
2248
+ goto free_rsp_cache;
22162249 }
22172250
2218
- INIT_LIST_HEAD(&ch->free_list);
2219
- for (i = 0; i < ch->rq_size; i++) {
2251
+ for (i = 0; i < ch->rq_size; i++)
22202252 ch->ioctx_ring[i]->ch = ch;
2221
- list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
2222
- }
22232253 if (!sdev->use_srq) {
2254
+ u16 imm_data_offset = req->req_flags & SRP_IMMED_REQUESTED ?
2255
+ be16_to_cpu(req->imm_data_offset) : 0;
2256
+ u16 alignment_offset;
2257
+ u32 req_sz;
2258
+
2259
+ if (req->req_flags & SRP_IMMED_REQUESTED)
2260
+ pr_debug("imm_data_offset = %d\n",
2261
+ be16_to_cpu(req->imm_data_offset));
2262
+ if (imm_data_offset >= sizeof(struct srp_cmd)) {
2263
+ ch->imm_data_offset = imm_data_offset;
2264
+ rsp->rsp_flags |= SRP_LOGIN_RSP_IMMED_SUPP;
2265
+ } else {
2266
+ ch->imm_data_offset = 0;
2267
+ }
2268
+ alignment_offset = round_up(imm_data_offset, 512) -
2269
+ imm_data_offset;
2270
+ req_sz = alignment_offset + imm_data_offset + srp_max_req_size;
2271
+ ch->req_buf_cache = kmem_cache_create("srpt-req-buf", req_sz,
2272
+ 512, 0, NULL);
2273
+ if (!ch->req_buf_cache)
2274
+ goto free_rsp_ring;
2275
+
22242276 ch->ioctx_recv_ring = (struct srpt_recv_ioctx **)
22252277 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
22262278 sizeof(*ch->ioctx_recv_ring[0]),
2227
- srp_max_req_size,
2279
+ ch->req_buf_cache,
2280
+ alignment_offset,
22282281 DMA_FROM_DEVICE);
22292282 if (!ch->ioctx_recv_ring) {
22302283 pr_err("rejected SRP_LOGIN_REQ because creating a new QP RQ ring failed.\n");
22312284 rej->reason =
22322285 cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2233
- goto free_ring;
2286
+ goto free_recv_cache;
22342287 }
22352288 for (i = 0; i < ch->rq_size; i++)
22362289 INIT_LIST_HEAD(&ch->ioctx_recv_ring[i]->wait_list);
....@@ -2248,21 +2301,42 @@
22482301 be64_to_cpu(*(__be64 *)nexus->i_port_id),
22492302 be64_to_cpu(*(__be64 *)(nexus->i_port_id + 8)));
22502303
2251
- pr_debug("registering session %s\n", ch->sess_name);
2304
+ pr_debug("registering src addr %s or i_port_id %s\n", ch->sess_name,
2305
+ i_port_id);
22522306
2253
- if (sport->port_guid_tpg.se_tpg_wwn)
2254
- ch->sess = target_setup_session(&sport->port_guid_tpg, 0, 0,
2255
- TARGET_PROT_NORMAL,
2307
+ tag_num = ch->rq_size;
2308
+ tag_size = 1; /* ib_srpt does not use se_sess->sess_cmd_map */
2309
+
2310
+ if (sport->guid_id) {
2311
+ mutex_lock(&sport->guid_id->mutex);
2312
+ list_for_each_entry(stpg, &sport->guid_id->tpg_list, entry) {
2313
+ if (!IS_ERR_OR_NULL(ch->sess))
2314
+ break;
2315
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
2316
+ tag_size, TARGET_PROT_NORMAL,
22562317 ch->sess_name, ch, NULL);
2257
- if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
2258
- ch->sess = target_setup_session(&sport->port_gid_tpg, 0, 0,
2259
- TARGET_PROT_NORMAL, i_port_id, ch,
2260
- NULL);
2261
- /* Retry without leading "0x" */
2262
- if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
2263
- ch->sess = target_setup_session(&sport->port_gid_tpg, 0, 0,
2264
- TARGET_PROT_NORMAL,
2318
+ }
2319
+ mutex_unlock(&sport->guid_id->mutex);
2320
+ }
2321
+
2322
+ if (sport->gid_id) {
2323
+ mutex_lock(&sport->gid_id->mutex);
2324
+ list_for_each_entry(stpg, &sport->gid_id->tpg_list, entry) {
2325
+ if (!IS_ERR_OR_NULL(ch->sess))
2326
+ break;
2327
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
2328
+ tag_size, TARGET_PROT_NORMAL, i_port_id,
2329
+ ch, NULL);
2330
+ if (!IS_ERR_OR_NULL(ch->sess))
2331
+ break;
2332
+ /* Retry without leading "0x" */
2333
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
2334
+ tag_size, TARGET_PROT_NORMAL,
22652335 i_port_id + 2, ch, NULL);
2336
+ }
2337
+ mutex_unlock(&sport->gid_id->mutex);
2338
+ }
2339
+
22662340 if (IS_ERR_OR_NULL(ch->sess)) {
22672341 WARN_ON_ONCE(ch->sess == NULL);
22682342 ret = PTR_ERR(ch->sess);
....@@ -2275,22 +2349,26 @@
22752349 goto destroy_ib;
22762350 }
22772351
2352
+ /*
2353
+ * Once a session has been created destruction of srpt_rdma_ch objects
2354
+ * will decrement sport->refcount. Hence increment sport->refcount now.
2355
+ */
2356
+ atomic_inc(&sport->refcount);
2357
+
22782358 mutex_lock(&sport->mutex);
22792359
22802360 if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
22812361 struct srpt_rdma_ch *ch2;
2282
-
2283
- rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
22842362
22852363 list_for_each_entry(ch2, &nexus->ch_list, list) {
22862364 if (srpt_disconnect_ch(ch2) < 0)
22872365 continue;
22882366 pr_info("Relogin - closed existing channel %s\n",
22892367 ch2->sess_name);
2290
- rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
2368
+ rsp->rsp_flags |= SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
22912369 }
22922370 } else {
2293
- rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
2371
+ rsp->rsp_flags |= SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
22942372 }
22952373
22962374 list_add_tail_rcu(&ch->list, &nexus->ch_list);
....@@ -2299,7 +2377,7 @@
22992377 rej->reason = cpu_to_be32(
23002378 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
23012379 pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
2302
- sdev->device->name, port_num);
2380
+ dev_name(&sdev->device->dev), port_num);
23032381 mutex_unlock(&sport->mutex);
23042382 ret = -EINVAL;
23052383 goto reject;
....@@ -2321,7 +2399,7 @@
23212399 /* create srp_login_response */
23222400 rsp->opcode = SRP_LOGIN_RSP;
23232401 rsp->tag = req->tag;
2324
- rsp->max_it_iu_len = req->req_it_iu_len;
2402
+ rsp->max_it_iu_len = cpu_to_be32(srp_max_req_size);
23252403 rsp->max_ti_iu_len = req->req_it_iu_len;
23262404 ch->max_ti_iu_len = it_iu_len;
23272405 rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
....@@ -2385,12 +2463,18 @@
23852463 free_recv_ring:
23862464 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
23872465 ch->sport->sdev, ch->rq_size,
2388
- srp_max_req_size, DMA_FROM_DEVICE);
2466
+ ch->req_buf_cache, DMA_FROM_DEVICE);
23892467
2390
-free_ring:
2468
+free_recv_cache:
2469
+ kmem_cache_destroy(ch->req_buf_cache);
2470
+
2471
+free_rsp_ring:
23912472 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
23922473 ch->sport->sdev, ch->rq_size,
2393
- ch->max_rsp_size, DMA_TO_DEVICE);
2474
+ ch->rsp_buf_cache, DMA_TO_DEVICE);
2475
+
2476
+free_rsp_cache:
2477
+ kmem_cache_destroy(ch->rsp_buf_cache);
23942478
23952479 free_ch:
23962480 if (rdma_cm_id)
....@@ -2410,7 +2494,8 @@
24102494 SRP_BUF_FORMAT_INDIRECT);
24112495
24122496 if (rdma_cm_id)
2413
- rdma_reject(rdma_cm_id, rej, sizeof(*rej));
2497
+ rdma_reject(rdma_cm_id, rej, sizeof(*rej),
2498
+ IB_CM_REJ_CONSUMER_DEFINED);
24142499 else
24152500 ib_send_cm_rej(ib_cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
24162501 rej, sizeof(*rej));
....@@ -2452,6 +2537,7 @@
24522537 struct srpt_device *sdev;
24532538 struct srp_login_req req;
24542539 const struct srp_login_req_rdma *req_rdma;
2540
+ struct sa_path_rec *path_rec = cm_id->route.path_rec;
24552541 char src_addr[40];
24562542
24572543 sdev = ib_get_client_data(cm_id->device, &srpt_client);
....@@ -2471,12 +2557,13 @@
24712557 req.req_flags = req_rdma->req_flags;
24722558 memcpy(req.initiator_port_id, req_rdma->initiator_port_id, 16);
24732559 memcpy(req.target_port_id, req_rdma->target_port_id, 16);
2560
+ req.imm_data_offset = req_rdma->imm_data_offset;
24742561
24752562 snprintf(src_addr, sizeof(src_addr), "%pIS",
24762563 &cm_id->route.addr.src_addr);
24772564
24782565 return srpt_cm_req_recv(sdev, NULL, cm_id, cm_id->port_num,
2479
- cm_id->route.path_rec->pkey, &req, src_addr);
2566
+ path_rec ? path_rec->pkey : 0, &req, src_addr);
24802567 }
24812568
24822569 static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
....@@ -2640,14 +2727,6 @@
26402727 return ret;
26412728 }
26422729
2643
-static int srpt_write_pending_status(struct se_cmd *se_cmd)
2644
-{
2645
- struct srpt_send_ioctx *ioctx;
2646
-
2647
- ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
2648
- return ioctx->state == SRPT_STATE_NEED_DATA;
2649
-}
2650
-
26512730 /*
26522731 * srpt_write_pending - Start data transfer from initiator to target (write).
26532732 */
....@@ -2660,6 +2739,12 @@
26602739 struct ib_cqe *cqe = &ioctx->rdma_cqe;
26612740 enum srpt_command_state new_state;
26622741 int ret, i;
2742
+
2743
+ if (ioctx->recv_ioctx) {
2744
+ srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
2745
+ target_execute_cmd(&ioctx->cmd);
2746
+ return 0;
2747
+ }
26632748
26642749 new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
26652750 WARN_ON(new_state == SRPT_STATE_DONE);
....@@ -2724,8 +2809,6 @@
27242809 int resp_len, ret, i;
27252810 u8 srp_tm_status;
27262811
2727
- BUG_ON(!ch);
2728
-
27292812 state = ioctx->state;
27302813 switch (state) {
27312814 case SRPT_STATE_NEW:
....@@ -2741,7 +2824,7 @@
27412824 break;
27422825 }
27432826
2744
- if (unlikely(WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT)))
2827
+ if (WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))
27452828 return;
27462829
27472830 /* For read commands, transfer the data to the initiator. */
....@@ -2854,39 +2937,29 @@
28542937 srpt_refresh_port(sport);
28552938 }
28562939
2857
-static bool srpt_ch_list_empty(struct srpt_port *sport)
2858
-{
2859
- struct srpt_nexus *nexus;
2860
- bool res = true;
2861
-
2862
- rcu_read_lock();
2863
- list_for_each_entry(nexus, &sport->nexus_list, entry)
2864
- if (!list_empty(&nexus->ch_list))
2865
- res = false;
2866
- rcu_read_unlock();
2867
-
2868
- return res;
2869
-}
2870
-
28712940 /**
28722941 * srpt_release_sport - disable login and wait for associated channels
28732942 * @sport: SRPT HCA port.
28742943 */
28752944 static int srpt_release_sport(struct srpt_port *sport)
28762945 {
2946
+ DECLARE_COMPLETION_ONSTACK(c);
28772947 struct srpt_nexus *nexus, *next_n;
28782948 struct srpt_rdma_ch *ch;
28792949
28802950 WARN_ON_ONCE(irqs_disabled());
28812951
2952
+ sport->freed_channels = &c;
2953
+
28822954 mutex_lock(&sport->mutex);
28832955 srpt_set_enabled(sport, false);
28842956 mutex_unlock(&sport->mutex);
28852957
2886
- while (wait_event_timeout(sport->ch_releaseQ,
2887
- srpt_ch_list_empty(sport), 5 * HZ) <= 0) {
2888
- pr_info("%s_%d: waiting for session unregistration ...\n",
2889
- sport->sdev->device->name, sport->port);
2958
+ while (atomic_read(&sport->refcount) > 0 &&
2959
+ wait_for_completion_timeout(&c, 5 * HZ) <= 0) {
2960
+ pr_info("%s_%d: waiting for unregistration of %d sessions ...\n",
2961
+ dev_name(&sport->sdev->device->dev), sport->port,
2962
+ atomic_read(&sport->refcount));
28902963 rcu_read_lock();
28912964 list_for_each_entry(nexus, &sport->nexus_list, entry) {
28922965 list_for_each_entry(ch, &nexus->ch_list, list) {
....@@ -2908,7 +2981,12 @@
29082981 return 0;
29092982 }
29102983
2911
-static struct se_wwn *__srpt_lookup_wwn(const char *name)
2984
+struct port_and_port_id {
2985
+ struct srpt_port *sport;
2986
+ struct srpt_port_id **port_id;
2987
+};
2988
+
2989
+static struct port_and_port_id __srpt_lookup_port(const char *name)
29122990 {
29132991 struct ib_device *dev;
29142992 struct srpt_device *sdev;
....@@ -2923,25 +3001,38 @@
29233001 for (i = 0; i < dev->phys_port_cnt; i++) {
29243002 sport = &sdev->port[i];
29253003
2926
- if (strcmp(sport->port_guid, name) == 0)
2927
- return &sport->port_guid_wwn;
2928
- if (strcmp(sport->port_gid, name) == 0)
2929
- return &sport->port_gid_wwn;
3004
+ if (strcmp(sport->guid_name, name) == 0) {
3005
+ kref_get(&sdev->refcnt);
3006
+ return (struct port_and_port_id){
3007
+ sport, &sport->guid_id};
3008
+ }
3009
+ if (strcmp(sport->gid_name, name) == 0) {
3010
+ kref_get(&sdev->refcnt);
3011
+ return (struct port_and_port_id){
3012
+ sport, &sport->gid_id};
3013
+ }
29303014 }
29313015 }
29323016
2933
- return NULL;
3017
+ return (struct port_and_port_id){};
29343018 }
29353019
2936
-static struct se_wwn *srpt_lookup_wwn(const char *name)
3020
+/**
3021
+ * srpt_lookup_port() - Look up an RDMA port by name
3022
+ * @name: ASCII port name
3023
+ *
3024
+ * Increments the RDMA port reference count if an RDMA port pointer is returned.
3025
+ * The caller must drop that reference count by calling srpt_port_put_ref().
3026
+ */
3027
+static struct port_and_port_id srpt_lookup_port(const char *name)
29373028 {
2938
- struct se_wwn *wwn;
3029
+ struct port_and_port_id papi;
29393030
29403031 spin_lock(&srpt_dev_lock);
2941
- wwn = __srpt_lookup_wwn(name);
3032
+ papi = __srpt_lookup_port(name);
29423033 spin_unlock(&srpt_dev_lock);
29433034
2944
- return wwn;
3035
+ return papi;
29453036 }
29463037
29473038 static void srpt_free_srq(struct srpt_device *sdev)
....@@ -2951,7 +3042,9 @@
29513042
29523043 ib_destroy_srq(sdev->srq);
29533044 srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
2954
- sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
3045
+ sdev->srq_size, sdev->req_buf_cache,
3046
+ DMA_FROM_DEVICE);
3047
+ kmem_cache_destroy(sdev->req_buf_cache);
29553048 sdev->srq = NULL;
29563049 }
29573050
....@@ -2976,16 +3069,19 @@
29763069 }
29773070
29783071 pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size,
2979
- sdev->device->attrs.max_srq_wr, device->name);
3072
+ sdev->device->attrs.max_srq_wr, dev_name(&device->dev));
3073
+
3074
+ sdev->req_buf_cache = kmem_cache_create("srpt-srq-req-buf",
3075
+ srp_max_req_size, 0, 0, NULL);
3076
+ if (!sdev->req_buf_cache)
3077
+ goto free_srq;
29803078
29813079 sdev->ioctx_ring = (struct srpt_recv_ioctx **)
29823080 srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
29833081 sizeof(*sdev->ioctx_ring[0]),
2984
- srp_max_req_size, DMA_FROM_DEVICE);
2985
- if (!sdev->ioctx_ring) {
2986
- ib_destroy_srq(srq);
2987
- return -ENOMEM;
2988
- }
3082
+ sdev->req_buf_cache, 0, DMA_FROM_DEVICE);
3083
+ if (!sdev->ioctx_ring)
3084
+ goto free_cache;
29893085
29903086 sdev->use_srq = true;
29913087 sdev->srq = srq;
....@@ -2996,6 +3092,13 @@
29963092 }
29973093
29983094 return 0;
3095
+
3096
+free_cache:
3097
+ kmem_cache_destroy(sdev->req_buf_cache);
3098
+
3099
+free_srq:
3100
+ ib_destroy_srq(srq);
3101
+ return -ENOMEM;
29993102 }
30003103
30013104 static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
....@@ -3009,16 +3112,28 @@
30093112 } else if (use_srq && !sdev->srq) {
30103113 ret = srpt_alloc_srq(sdev);
30113114 }
3012
- pr_debug("%s(%s): use_srq = %d; ret = %d\n", __func__, device->name,
3013
- sdev->use_srq, ret);
3115
+ pr_debug("%s(%s): use_srq = %d; ret = %d\n", __func__,
3116
+ dev_name(&device->dev), sdev->use_srq, ret);
30143117 return ret;
3118
+}
3119
+
3120
+static void srpt_free_sdev(struct kref *refcnt)
3121
+{
3122
+ struct srpt_device *sdev = container_of(refcnt, typeof(*sdev), refcnt);
3123
+
3124
+ kfree(sdev);
3125
+}
3126
+
3127
+static void srpt_sdev_put(struct srpt_device *sdev)
3128
+{
3129
+ kref_put(&sdev->refcnt, srpt_free_sdev);
30153130 }
30163131
30173132 /**
30183133 * srpt_add_one - InfiniBand device addition callback function
30193134 * @device: Describes a HCA.
30203135 */
3021
-static void srpt_add_one(struct ib_device *device)
3136
+static int srpt_add_one(struct ib_device *device)
30223137 {
30233138 struct srpt_device *sdev;
30243139 struct srpt_port *sport;
....@@ -3029,14 +3144,17 @@
30293144 sdev = kzalloc(struct_size(sdev, port, device->phys_port_cnt),
30303145 GFP_KERNEL);
30313146 if (!sdev)
3032
- goto err;
3147
+ return -ENOMEM;
30333148
3149
+ kref_init(&sdev->refcnt);
30343150 sdev->device = device;
30353151 mutex_init(&sdev->sdev_mutex);
30363152
30373153 sdev->pd = ib_alloc_pd(device, 0);
3038
- if (IS_ERR(sdev->pd))
3154
+ if (IS_ERR(sdev->pd)) {
3155
+ ret = PTR_ERR(sdev->pd);
30393156 goto free_dev;
3157
+ }
30403158
30413159 sdev->lkey = sdev->pd->local_dma_lkey;
30423160
....@@ -3052,15 +3170,15 @@
30523170 if (IS_ERR(sdev->cm_id)) {
30533171 pr_info("ib_create_cm_id() failed: %ld\n",
30543172 PTR_ERR(sdev->cm_id));
3173
+ ret = PTR_ERR(sdev->cm_id);
30553174 sdev->cm_id = NULL;
30563175 if (!rdma_cm_id)
30573176 goto err_ring;
30583177 }
30593178
30603179 /* print out target login information */
3061
- pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
3062
- "pkey=ffff,service_id=%016llx\n", srpt_service_guid,
3063
- srpt_service_guid, srpt_service_guid);
3180
+ pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,pkey=ffff,service_id=%016llx\n",
3181
+ srpt_service_guid, srpt_service_guid, srpt_service_guid);
30643182
30653183 /*
30663184 * We do not have a consistent service_id (ie. also id_ext of target_id)
....@@ -3084,7 +3202,6 @@
30843202 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
30853203 sport = &sdev->port[i - 1];
30863204 INIT_LIST_HEAD(&sport->nexus_list);
3087
- init_waitqueue_head(&sport->ch_releaseQ);
30883205 mutex_init(&sport->mutex);
30893206 sport->sdev = sdev;
30903207 sport->port = i;
....@@ -3094,10 +3211,12 @@
30943211 sport->port_attrib.use_srq = false;
30953212 INIT_WORK(&sport->work, srpt_refresh_port_work);
30963213
3097
- if (srpt_refresh_port(sport)) {
3214
+ ret = srpt_refresh_port(sport);
3215
+ if (ret) {
30983216 pr_err("MAD registration failed for %s-%d.\n",
3099
- sdev->device->name, i);
3100
- goto err_event;
3217
+ dev_name(&sdev->device->dev), i);
3218
+ i--;
3219
+ goto err_port;
31013220 }
31023221 }
31033222
....@@ -3105,12 +3224,12 @@
31053224 list_add_tail(&sdev->list, &srpt_dev_list);
31063225 spin_unlock(&srpt_dev_lock);
31073226
3108
-out:
31093227 ib_set_client_data(device, &srpt_client, sdev);
3110
- pr_debug("added %s.\n", device->name);
3111
- return;
3228
+ pr_debug("added %s.\n", dev_name(&device->dev));
3229
+ return 0;
31123230
3113
-err_event:
3231
+err_port:
3232
+ srpt_unregister_mad_agent(sdev, i);
31143233 ib_unregister_event_handler(&sdev->event_handler);
31153234 err_cm:
31163235 if (sdev->cm_id)
....@@ -3119,11 +3238,9 @@
31193238 srpt_free_srq(sdev);
31203239 ib_dealloc_pd(sdev->pd);
31213240 free_dev:
3122
- kfree(sdev);
3123
-err:
3124
- sdev = NULL;
3125
- pr_info("%s(%s) failed.\n", __func__, device->name);
3126
- goto out;
3241
+ srpt_sdev_put(sdev);
3242
+ pr_info("%s(%s) failed.\n", __func__, dev_name(&device->dev));
3243
+ return ret;
31273244 }
31283245
31293246 /**
....@@ -3136,12 +3253,7 @@
31363253 struct srpt_device *sdev = client_data;
31373254 int i;
31383255
3139
- if (!sdev) {
3140
- pr_info("%s(%s): nothing to do.\n", __func__, device->name);
3141
- return;
3142
- }
3143
-
3144
- srpt_unregister_mad_agent(sdev);
3256
+ srpt_unregister_mad_agent(sdev, sdev->device->phys_port_cnt);
31453257
31463258 ib_unregister_event_handler(&sdev->event_handler);
31473259
....@@ -3170,7 +3282,7 @@
31703282
31713283 ib_dealloc_pd(sdev->pd);
31723284
3173
- kfree(sdev);
3285
+ srpt_sdev_put(sdev);
31743286 }
31753287
31763288 static struct ib_client srpt_client = {
....@@ -3189,24 +3301,28 @@
31893301 return 0;
31903302 }
31913303
3192
-static char *srpt_get_fabric_name(void)
3193
-{
3194
- return "srpt";
3195
-}
3196
-
31973304 static struct srpt_port *srpt_tpg_to_sport(struct se_portal_group *tpg)
31983305 {
31993306 return tpg->se_tpg_wwn->priv;
32003307 }
32013308
3309
+static struct srpt_port_id *srpt_wwn_to_sport_id(struct se_wwn *wwn)
3310
+{
3311
+ struct srpt_port *sport = wwn->priv;
3312
+
3313
+ if (sport->guid_id && &sport->guid_id->wwn == wwn)
3314
+ return sport->guid_id;
3315
+ if (sport->gid_id && &sport->gid_id->wwn == wwn)
3316
+ return sport->gid_id;
3317
+ WARN_ON_ONCE(true);
3318
+ return NULL;
3319
+}
3320
+
32023321 static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
32033322 {
3204
- struct srpt_port *sport = srpt_tpg_to_sport(tpg);
3323
+ struct srpt_tpg *stpg = container_of(tpg, typeof(*stpg), tpg);
32053324
3206
- WARN_ON_ONCE(tpg != &sport->port_guid_tpg &&
3207
- tpg != &sport->port_gid_tpg);
3208
- return tpg == &sport->port_guid_tpg ? sport->port_guid :
3209
- sport->port_gid;
3325
+ return stpg->sport_id->name;
32103326 }
32113327
32123328 static u16 srpt_get_tag(struct se_portal_group *tpg)
....@@ -3224,19 +3340,23 @@
32243340 struct srpt_send_ioctx *ioctx = container_of(se_cmd,
32253341 struct srpt_send_ioctx, cmd);
32263342 struct srpt_rdma_ch *ch = ioctx->ch;
3227
- unsigned long flags;
3343
+ struct srpt_recv_ioctx *recv_ioctx = ioctx->recv_ioctx;
32283344
32293345 WARN_ON_ONCE(ioctx->state != SRPT_STATE_DONE &&
32303346 !(ioctx->cmd.transport_state & CMD_T_ABORTED));
3347
+
3348
+ if (recv_ioctx) {
3349
+ WARN_ON_ONCE(!list_empty(&recv_ioctx->wait_list));
3350
+ ioctx->recv_ioctx = NULL;
3351
+ srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
3352
+ }
32313353
32323354 if (ioctx->n_rw_ctx) {
32333355 srpt_free_rw_ctxs(ch, ioctx);
32343356 ioctx->n_rw_ctx = 0;
32353357 }
32363358
3237
- spin_lock_irqsave(&ch->spinlock, flags);
3238
- list_add(&ioctx->free_list, &ch->free_list);
3239
- spin_unlock_irqrestore(&ch->spinlock, flags);
3359
+ target_free_tag(se_cmd->se_sess, se_cmd);
32403360 }
32413361
32423362 /**
....@@ -3614,7 +3734,7 @@
36143734 struct se_portal_group *se_tpg = to_tpg(item);
36153735 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
36163736
3617
- return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0);
3737
+ return snprintf(page, PAGE_SIZE, "%d\n", sport->enabled);
36183738 }
36193739
36203740 static ssize_t srpt_tpg_enable_store(struct config_item *item,
....@@ -3623,7 +3743,7 @@
36233743 struct se_portal_group *se_tpg = to_tpg(item);
36243744 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
36253745 unsigned long tmp;
3626
- int ret;
3746
+ int ret;
36273747
36283748 ret = kstrtoul(page, 0, &tmp);
36293749 if (ret < 0) {
....@@ -3658,19 +3778,25 @@
36583778 static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
36593779 const char *name)
36603780 {
3661
- struct srpt_port *sport = wwn->priv;
3662
- static struct se_portal_group *tpg;
3663
- int res;
3781
+ struct srpt_port_id *sport_id = srpt_wwn_to_sport_id(wwn);
3782
+ struct srpt_tpg *stpg;
3783
+ int res = -ENOMEM;
36643784
3665
- WARN_ON_ONCE(wwn != &sport->port_guid_wwn &&
3666
- wwn != &sport->port_gid_wwn);
3667
- tpg = wwn == &sport->port_guid_wwn ? &sport->port_guid_tpg :
3668
- &sport->port_gid_tpg;
3669
- res = core_tpg_register(wwn, tpg, SCSI_PROTOCOL_SRP);
3670
- if (res)
3785
+ stpg = kzalloc(sizeof(*stpg), GFP_KERNEL);
3786
+ if (!stpg)
36713787 return ERR_PTR(res);
3788
+ stpg->sport_id = sport_id;
3789
+ res = core_tpg_register(wwn, &stpg->tpg, SCSI_PROTOCOL_SRP);
3790
+ if (res) {
3791
+ kfree(stpg);
3792
+ return ERR_PTR(res);
3793
+ }
36723794
3673
- return tpg;
3795
+ mutex_lock(&sport_id->mutex);
3796
+ list_add_tail(&stpg->entry, &sport_id->tpg_list);
3797
+ mutex_unlock(&sport_id->mutex);
3798
+
3799
+ return &stpg->tpg;
36743800 }
36753801
36763802 /**
....@@ -3679,10 +3805,17 @@
36793805 */
36803806 static void srpt_drop_tpg(struct se_portal_group *tpg)
36813807 {
3808
+ struct srpt_tpg *stpg = container_of(tpg, typeof(*stpg), tpg);
3809
+ struct srpt_port_id *sport_id = stpg->sport_id;
36823810 struct srpt_port *sport = srpt_tpg_to_sport(tpg);
3811
+
3812
+ mutex_lock(&sport_id->mutex);
3813
+ list_del(&stpg->entry);
3814
+ mutex_unlock(&sport_id->mutex);
36833815
36843816 sport->enabled = false;
36853817 core_tpg_deregister(tpg);
3818
+ kfree(stpg);
36863819 }
36873820
36883821 /**
....@@ -3695,7 +3828,31 @@
36953828 struct config_group *group,
36963829 const char *name)
36973830 {
3698
- return srpt_lookup_wwn(name) ? : ERR_PTR(-EINVAL);
3831
+ struct port_and_port_id papi = srpt_lookup_port(name);
3832
+ struct srpt_port *sport = papi.sport;
3833
+ struct srpt_port_id *port_id;
3834
+
3835
+ if (!papi.port_id)
3836
+ return ERR_PTR(-EINVAL);
3837
+ if (*papi.port_id) {
3838
+ /* Attempt to create a directory that already exists. */
3839
+ WARN_ON_ONCE(true);
3840
+ return &(*papi.port_id)->wwn;
3841
+ }
3842
+ port_id = kzalloc(sizeof(*port_id), GFP_KERNEL);
3843
+ if (!port_id) {
3844
+ srpt_sdev_put(sport->sdev);
3845
+ return ERR_PTR(-ENOMEM);
3846
+ }
3847
+ mutex_init(&port_id->mutex);
3848
+ INIT_LIST_HEAD(&port_id->tpg_list);
3849
+ port_id->wwn.priv = sport;
3850
+ memcpy(port_id->name, port_id == sport->guid_id ? sport->guid_name :
3851
+ sport->gid_name, ARRAY_SIZE(port_id->name));
3852
+
3853
+ *papi.port_id = port_id;
3854
+
3855
+ return &port_id->wwn;
36993856 }
37003857
37013858 /**
....@@ -3704,11 +3861,23 @@
37043861 */
37053862 static void srpt_drop_tport(struct se_wwn *wwn)
37063863 {
3864
+ struct srpt_port_id *port_id = container_of(wwn, typeof(*port_id), wwn);
3865
+ struct srpt_port *sport = wwn->priv;
3866
+
3867
+ if (sport->guid_id == port_id)
3868
+ sport->guid_id = NULL;
3869
+ else if (sport->gid_id == port_id)
3870
+ sport->gid_id = NULL;
3871
+ else
3872
+ WARN_ON_ONCE(true);
3873
+
3874
+ srpt_sdev_put(sport->sdev);
3875
+ kfree(port_id);
37073876 }
37083877
37093878 static ssize_t srpt_wwn_version_show(struct config_item *item, char *buf)
37103879 {
3711
- return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
3880
+ return scnprintf(buf, PAGE_SIZE, "\n");
37123881 }
37133882
37143883 CONFIGFS_ATTR_RO(srpt_wwn_, version);
....@@ -3720,8 +3889,7 @@
37203889
37213890 static const struct target_core_fabric_ops srpt_template = {
37223891 .module = THIS_MODULE,
3723
- .name = "srpt",
3724
- .get_fabric_name = srpt_get_fabric_name,
3892
+ .fabric_name = "srpt",
37253893 .tpg_get_wwn = srpt_get_fabric_wwn,
37263894 .tpg_get_tag = srpt_get_tag,
37273895 .tpg_check_demo_mode = srpt_check_false,
....@@ -3735,7 +3903,6 @@
37353903 .sess_get_index = srpt_sess_get_index,
37363904 .sess_get_initiator_sid = NULL,
37373905 .write_pending = srpt_write_pending,
3738
- .write_pending_status = srpt_write_pending_status,
37393906 .set_default_node_attributes = srpt_set_default_node_attrs,
37403907 .get_cmd_state = srpt_get_tcm_cmd_state,
37413908 .queue_data_in = srpt_queue_data_in,
....@@ -3772,16 +3939,14 @@
37723939
37733940 ret = -EINVAL;
37743941 if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
3775
- pr_err("invalid value %d for kernel module parameter"
3776
- " srp_max_req_size -- must be at least %d.\n",
3942
+ pr_err("invalid value %d for kernel module parameter srp_max_req_size -- must be at least %d.\n",
37773943 srp_max_req_size, MIN_MAX_REQ_SIZE);
37783944 goto out;
37793945 }
37803946
37813947 if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
37823948 || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
3783
- pr_err("invalid value %d for kernel module parameter"
3784
- " srpt_srq_size -- must be in the range [%d..%d].\n",
3949
+ pr_err("invalid value %d for kernel module parameter srpt_srq_size -- must be in the range [%d..%d].\n",
37853950 srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
37863951 goto out;
37873952 }