hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/infiniband/ulp/srp/ib_srp.c
....@@ -71,7 +71,6 @@
7171 static unsigned int cmd_sg_entries;
7272 static unsigned int indirect_sg_entries;
7373 static bool allow_ext_sg;
74
-static bool prefer_fr = true;
7574 static bool register_always = true;
7675 static bool never_register;
7776 static int topspin_workarounds = 1;
....@@ -94,10 +93,6 @@
9493 module_param(topspin_workarounds, int, 0444);
9594 MODULE_PARM_DESC(topspin_workarounds,
9695 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
97
-
98
-module_param(prefer_fr, bool, 0444);
99
-MODULE_PARM_DESC(prefer_fr,
100
-"Whether to use fast registration if both FMR and fast registration are supported");
10196
10297 module_param(register_always, bool, 0444);
10398 MODULE_PARM_DESC(register_always,
....@@ -132,13 +127,23 @@
132127 " if fast_io_fail_tmo has not been set. \"off\" means that"
133128 " this functionality is disabled.");
134129
130
+static bool srp_use_imm_data = true;
131
+module_param_named(use_imm_data, srp_use_imm_data, bool, 0644);
132
+MODULE_PARM_DESC(use_imm_data,
133
+ "Whether or not to request permission to use immediate data during SRP login.");
134
+
135
+static unsigned int srp_max_imm_data = 8 * 1024;
136
+module_param_named(max_imm_data, srp_max_imm_data, uint, 0644);
137
+MODULE_PARM_DESC(max_imm_data, "Maximum immediate data size.");
138
+
135139 static unsigned ch_count;
136140 module_param(ch_count, uint, 0444);
137141 MODULE_PARM_DESC(ch_count,
138142 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
139143
140
-static void srp_add_one(struct ib_device *device);
144
+static int srp_add_one(struct ib_device *device);
141145 static void srp_remove_one(struct ib_device *device, void *client_data);
146
+static void srp_rename_dev(struct ib_device *device, void *client_data);
142147 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
143148 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
144149 const char *opname);
....@@ -153,7 +158,8 @@
153158 static struct ib_client srp_client = {
154159 .name = "srp",
155160 .add = srp_add_one,
156
- .remove = srp_remove_one
161
+ .remove = srp_remove_one,
162
+ .rename = srp_rename_dev
157163 };
158164
159165 static struct ib_sa_client srp_sa_client;
....@@ -163,9 +169,9 @@
163169 int tmo = *(int *)kp->arg;
164170
165171 if (tmo >= 0)
166
- return sprintf(buffer, "%d", tmo);
172
+ return sprintf(buffer, "%d\n", tmo);
167173 else
168
- return sprintf(buffer, "off");
174
+ return sprintf(buffer, "off\n");
169175 }
170176
171177 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
....@@ -341,11 +347,11 @@
341347
342348 init_completion(&ch->done);
343349 ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
344
- (struct sockaddr *)&target->rdma_cm.src : NULL,
345
- (struct sockaddr *)&target->rdma_cm.dst,
350
+ &target->rdma_cm.src.sa : NULL,
351
+ &target->rdma_cm.dst.sa,
346352 SRP_PATH_REC_TIMEOUT_MS);
347353 if (ret) {
348
- pr_err("No route available from %pIS to %pIS (%d)\n",
354
+ pr_err("No route available from %pISpsc to %pISpsc (%d)\n",
349355 &target->rdma_cm.src, &target->rdma_cm.dst, ret);
350356 goto out;
351357 }
....@@ -355,7 +361,7 @@
355361
356362 ret = ch->status;
357363 if (ret) {
358
- pr_err("Resolving address %pIS failed (%d)\n",
364
+ pr_err("Resolving address %pISpsc failed (%d)\n",
359365 &target->rdma_cm.dst, ret);
360366 goto out;
361367 }
....@@ -375,24 +381,6 @@
375381
376382 return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) :
377383 srp_new_ib_cm_id(ch);
378
-}
379
-
380
-static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
381
-{
382
- struct srp_device *dev = target->srp_host->srp_dev;
383
- struct ib_fmr_pool_param fmr_param;
384
-
385
- memset(&fmr_param, 0, sizeof(fmr_param));
386
- fmr_param.pool_size = target->mr_pool_size;
387
- fmr_param.dirty_watermark = fmr_param.pool_size / 4;
388
- fmr_param.cache = 1;
389
- fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
390
- fmr_param.page_shift = ilog2(dev->mr_page_size);
391
- fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
392
- IB_ACCESS_REMOTE_WRITE |
393
- IB_ACCESS_REMOTE_READ);
394
-
395
- return ib_create_fmr_pool(dev->pd, &fmr_param);
396384 }
397385
398386 /**
....@@ -434,8 +422,7 @@
434422 if (pool_size <= 0)
435423 goto err;
436424 ret = -ENOMEM;
437
- pool = kzalloc(sizeof(struct srp_fr_pool) +
438
- pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
425
+ pool = kzalloc(struct_size(pool, desc, pool_size), GFP_KERNEL);
439426 if (!pool)
440427 goto err;
441428 pool->size = pool_size;
....@@ -542,10 +529,10 @@
542529 {
543530 struct srp_target_port *target = ch->target;
544531 struct srp_device *dev = target->srp_host->srp_dev;
532
+ const struct ib_device_attr *attr = &dev->dev->attrs;
545533 struct ib_qp_init_attr *init_attr;
546534 struct ib_cq *recv_cq, *send_cq;
547535 struct ib_qp *qp;
548
- struct ib_fmr_pool *fmr_pool = NULL;
549536 struct srp_fr_pool *fr_pool = NULL;
550537 const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
551538 int ret;
....@@ -573,11 +560,13 @@
573560 init_attr->cap.max_send_wr = m * target->queue_size;
574561 init_attr->cap.max_recv_wr = target->queue_size + 1;
575562 init_attr->cap.max_recv_sge = 1;
576
- init_attr->cap.max_send_sge = 1;
563
+ init_attr->cap.max_send_sge = min(SRP_MAX_SGE, attr->max_send_sge);
577564 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
578565 init_attr->qp_type = IB_QPT_RC;
579566 init_attr->send_cq = send_cq;
580567 init_attr->recv_cq = recv_cq;
568
+
569
+ ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U);
581570
582571 if (target->using_rdma_cm) {
583572 ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
....@@ -606,14 +595,6 @@
606595 "FR pool allocation failed (%d)\n", ret);
607596 goto err_qp;
608597 }
609
- } else if (dev->use_fmr) {
610
- fmr_pool = srp_alloc_fmr_pool(target);
611
- if (IS_ERR(fmr_pool)) {
612
- ret = PTR_ERR(fmr_pool);
613
- shost_printk(KERN_WARNING, target->scsi_host, PFX
614
- "FMR pool allocation failed (%d)\n", ret);
615
- goto err_qp;
616
- }
617598 }
618599
619600 if (ch->qp)
....@@ -631,10 +612,6 @@
631612 if (ch->fr_pool)
632613 srp_destroy_fr_pool(ch->fr_pool);
633614 ch->fr_pool = fr_pool;
634
- } else if (dev->use_fmr) {
635
- if (ch->fmr_pool)
636
- ib_destroy_fmr_pool(ch->fmr_pool);
637
- ch->fmr_pool = fmr_pool;
638615 }
639616
640617 kfree(init_attr);
....@@ -689,9 +666,6 @@
689666 if (dev->use_fast_reg) {
690667 if (ch->fr_pool)
691668 srp_destroy_fr_pool(ch->fr_pool);
692
- } else if (dev->use_fmr) {
693
- if (ch->fmr_pool)
694
- ib_destroy_fmr_pool(ch->fmr_pool);
695669 }
696670
697671 srp_destroy_qp(ch);
....@@ -823,7 +797,8 @@
823797 return subnet_timeout;
824798 }
825799
826
-static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
800
+static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len,
801
+ bool multich)
827802 {
828803 struct srp_target_port *target = ch->target;
829804 struct {
....@@ -852,11 +827,15 @@
852827
853828 req->ib_req.opcode = SRP_LOGIN_REQ;
854829 req->ib_req.tag = 0;
855
- req->ib_req.req_it_iu_len = cpu_to_be32(target->max_iu_len);
830
+ req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len);
856831 req->ib_req.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
857832 SRP_BUF_FORMAT_INDIRECT);
858833 req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI :
859834 SRP_MULTICHAN_SINGLE);
835
+ if (srp_use_imm_data) {
836
+ req->ib_req.req_flags |= SRP_IMMED_REQUESTED;
837
+ req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET);
838
+ }
860839
861840 if (target->using_rdma_cm) {
862841 req->rdma_param.flow_control = req->ib_param.flow_control;
....@@ -873,6 +852,7 @@
873852 req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len;
874853 req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt;
875854 req->rdma_req.req_flags = req->ib_req.req_flags;
855
+ req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset;
876856
877857 ipi = req->rdma_req.initiator_port_id;
878858 tpi = req->rdma_req.target_port_id;
....@@ -998,12 +978,8 @@
998978
999979 for (i = 0; i < target->req_ring_size; ++i) {
1000980 req = &ch->req_ring[i];
1001
- if (dev->use_fast_reg) {
981
+ if (dev->use_fast_reg)
1002982 kfree(req->fr_list);
1003
- } else {
1004
- kfree(req->fmr_list);
1005
- kfree(req->map_page);
1006
- }
1007983 if (req->indirect_dma_addr) {
1008984 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
1009985 target->indirect_size,
....@@ -1022,7 +998,6 @@
1022998 struct srp_device *srp_dev = target->srp_host->srp_dev;
1023999 struct ib_device *ibdev = srp_dev->dev;
10241000 struct srp_request *req;
1025
- void *mr_list;
10261001 dma_addr_t dma_addr;
10271002 int i, ret = -ENOMEM;
10281003
....@@ -1033,18 +1008,10 @@
10331008
10341009 for (i = 0; i < target->req_ring_size; ++i) {
10351010 req = &ch->req_ring[i];
1036
- mr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *),
1037
- GFP_KERNEL);
1038
- if (!mr_list)
1039
- goto out;
10401011 if (srp_dev->use_fast_reg) {
1041
- req->fr_list = mr_list;
1042
- } else {
1043
- req->fmr_list = mr_list;
1044
- req->map_page = kmalloc_array(srp_dev->max_pages_per_mr,
1045
- sizeof(void *),
1046
- GFP_KERNEL);
1047
- if (!req->map_page)
1012
+ req->fr_list = kmalloc_array(target->mr_per_cmd,
1013
+ sizeof(void *), GFP_KERNEL);
1014
+ if (!req->fr_list)
10481015 goto out;
10491016 }
10501017 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
....@@ -1145,7 +1112,8 @@
11451112 return c;
11461113 }
11471114
1148
-static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
1115
+static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len,
1116
+ bool multich)
11491117 {
11501118 struct srp_target_port *target = ch->target;
11511119 int ret;
....@@ -1158,7 +1126,7 @@
11581126
11591127 while (1) {
11601128 init_completion(&ch->done);
1161
- ret = srp_send_req(ch, multich);
1129
+ ret = srp_send_req(ch, max_iu_len, multich);
11621130 if (ret)
11631131 goto out;
11641132 ret = wait_for_completion_interruptible(&ch->done);
....@@ -1252,11 +1220,6 @@
12521220 if (req->nmdesc)
12531221 srp_fr_pool_put(ch->fr_pool, req->fr_list,
12541222 req->nmdesc);
1255
- } else if (dev->use_fmr) {
1256
- struct ib_pool_fmr **pfmr;
1257
-
1258
- for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1259
- ib_fmr_pool_unmap(*pfmr);
12601223 }
12611224
12621225 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
....@@ -1330,16 +1293,7 @@
13301293 {
13311294 struct srp_target_port *target = rport->lld_data;
13321295 struct srp_rdma_ch *ch;
1333
- struct Scsi_Host *shost = target->scsi_host;
1334
- struct scsi_device *sdev;
13351296 int i, j;
1336
-
1337
- /*
1338
- * Invoking srp_terminate_io() while srp_queuecommand() is running
1339
- * is not safe. Hence the warning statement below.
1340
- */
1341
- shost_for_each_device(sdev, shost)
1342
- WARN_ON_ONCE(sdev->request_queue->request_fn_active);
13431297
13441298 for (i = 0; i < target->ch_count; i++) {
13451299 ch = &target->ch[i];
....@@ -1351,6 +1305,26 @@
13511305 DID_TRANSPORT_FAILFAST << 16);
13521306 }
13531307 }
1308
+}
1309
+
1310
+/* Calculate maximum initiator to target information unit length. */
1311
+static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data,
1312
+ uint32_t max_it_iu_size)
1313
+{
1314
+ uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN +
1315
+ sizeof(struct srp_indirect_buf) +
1316
+ cmd_sg_cnt * sizeof(struct srp_direct_buf);
1317
+
1318
+ if (use_imm_data)
1319
+ max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET +
1320
+ srp_max_imm_data);
1321
+
1322
+ if (max_it_iu_size)
1323
+ max_iu_len = min(max_iu_len, max_it_iu_size);
1324
+
1325
+ pr_debug("max_iu_len = %d\n", max_iu_len);
1326
+
1327
+ return max_iu_len;
13541328 }
13551329
13561330 /*
....@@ -1366,6 +1340,9 @@
13661340 {
13671341 struct srp_target_port *target = rport->lld_data;
13681342 struct srp_rdma_ch *ch;
1343
+ uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
1344
+ srp_use_imm_data,
1345
+ target->max_it_iu_size);
13691346 int i, j, ret = 0;
13701347 bool multich = false;
13711348
....@@ -1411,7 +1388,7 @@
14111388 ch = &target->ch[i];
14121389 if (ret)
14131390 break;
1414
- ret = srp_connect_ch(ch, multich);
1391
+ ret = srp_connect_ch(ch, max_iu_len, multich);
14151392 multich = true;
14161393 }
14171394
....@@ -1436,50 +1413,6 @@
14361413 state->total_len += dma_len;
14371414 state->desc++;
14381415 state->ndesc++;
1439
-}
1440
-
1441
-static int srp_map_finish_fmr(struct srp_map_state *state,
1442
- struct srp_rdma_ch *ch)
1443
-{
1444
- struct srp_target_port *target = ch->target;
1445
- struct srp_device *dev = target->srp_host->srp_dev;
1446
- struct ib_pool_fmr *fmr;
1447
- u64 io_addr = 0;
1448
-
1449
- if (state->fmr.next >= state->fmr.end) {
1450
- shost_printk(KERN_ERR, ch->target->scsi_host,
1451
- PFX "Out of MRs (mr_per_cmd = %d)\n",
1452
- ch->target->mr_per_cmd);
1453
- return -ENOMEM;
1454
- }
1455
-
1456
- WARN_ON_ONCE(!dev->use_fmr);
1457
-
1458
- if (state->npages == 0)
1459
- return 0;
1460
-
1461
- if (state->npages == 1 && target->global_rkey) {
1462
- srp_map_desc(state, state->base_dma_addr, state->dma_len,
1463
- target->global_rkey);
1464
- goto reset_state;
1465
- }
1466
-
1467
- fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
1468
- state->npages, io_addr);
1469
- if (IS_ERR(fmr))
1470
- return PTR_ERR(fmr);
1471
-
1472
- *state->fmr.next++ = fmr;
1473
- state->nmdesc++;
1474
-
1475
- srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1476
- state->dma_len, fmr->fmr->rkey);
1477
-
1478
-reset_state:
1479
- state->npages = 0;
1480
- state->dma_len = 0;
1481
-
1482
- return 0;
14831416 }
14841417
14851418 static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
....@@ -1572,75 +1505,6 @@
15721505 return n;
15731506 }
15741507
1575
-static int srp_map_sg_entry(struct srp_map_state *state,
1576
- struct srp_rdma_ch *ch,
1577
- struct scatterlist *sg)
1578
-{
1579
- struct srp_target_port *target = ch->target;
1580
- struct srp_device *dev = target->srp_host->srp_dev;
1581
- struct ib_device *ibdev = dev->dev;
1582
- dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1583
- unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1584
- unsigned int len = 0;
1585
- int ret;
1586
-
1587
- WARN_ON_ONCE(!dma_len);
1588
-
1589
- while (dma_len) {
1590
- unsigned offset = dma_addr & ~dev->mr_page_mask;
1591
-
1592
- if (state->npages == dev->max_pages_per_mr ||
1593
- (state->npages > 0 && offset != 0)) {
1594
- ret = srp_map_finish_fmr(state, ch);
1595
- if (ret)
1596
- return ret;
1597
- }
1598
-
1599
- len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
1600
-
1601
- if (!state->npages)
1602
- state->base_dma_addr = dma_addr;
1603
- state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
1604
- state->dma_len += len;
1605
- dma_addr += len;
1606
- dma_len -= len;
1607
- }
1608
-
1609
- /*
1610
- * If the end of the MR is not on a page boundary then we need to
1611
- * close it out and start a new one -- we can only merge at page
1612
- * boundaries.
1613
- */
1614
- ret = 0;
1615
- if ((dma_addr & ~dev->mr_page_mask) != 0)
1616
- ret = srp_map_finish_fmr(state, ch);
1617
- return ret;
1618
-}
1619
-
1620
-static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1621
- struct srp_request *req, struct scatterlist *scat,
1622
- int count)
1623
-{
1624
- struct scatterlist *sg;
1625
- int i, ret;
1626
-
1627
- state->pages = req->map_page;
1628
- state->fmr.next = req->fmr_list;
1629
- state->fmr.end = req->fmr_list + ch->target->mr_per_cmd;
1630
-
1631
- for_each_sg(scat, sg, count, i) {
1632
- ret = srp_map_sg_entry(state, ch, sg);
1633
- if (ret)
1634
- return ret;
1635
- }
1636
-
1637
- ret = srp_map_finish_fmr(state, ch);
1638
- if (ret)
1639
- return ret;
1640
-
1641
- return 0;
1642
-}
1643
-
16441508 static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
16451509 struct srp_request *req, struct scatterlist *scat,
16461510 int count)
....@@ -1674,13 +1538,11 @@
16741538 int count)
16751539 {
16761540 struct srp_target_port *target = ch->target;
1677
- struct srp_device *dev = target->srp_host->srp_dev;
16781541 struct scatterlist *sg;
16791542 int i;
16801543
16811544 for_each_sg(scat, sg, count, i) {
1682
- srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1683
- ib_sg_dma_len(dev->dev, sg),
1545
+ srp_map_desc(state, sg_dma_address(sg), sg_dma_len(sg),
16841546 target->global_rkey);
16851547 }
16861548
....@@ -1702,7 +1564,6 @@
17021564 struct srp_device *dev = target->srp_host->srp_dev;
17031565 struct srp_map_state state;
17041566 struct srp_direct_buf idb_desc;
1705
- u64 idb_pages[1];
17061567 struct scatterlist idb_sg[1];
17071568 int ret;
17081569
....@@ -1725,14 +1586,6 @@
17251586 if (ret < 0)
17261587 return ret;
17271588 WARN_ON_ONCE(ret < 1);
1728
- } else if (dev->use_fmr) {
1729
- state.pages = idb_pages;
1730
- state.pages[0] = (req->indirect_dma_addr &
1731
- dev->mr_page_mask);
1732
- state.npages = 1;
1733
- ret = srp_map_finish_fmr(&state, ch);
1734
- if (ret < 0)
1735
- return ret;
17361589 } else {
17371590 return -EINVAL;
17381591 }
....@@ -1756,9 +1609,6 @@
17561609 if (dev->use_fast_reg)
17571610 for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
17581611 mr_len += (*pfr)->mr->length;
1759
- else if (dev->use_fmr)
1760
- for (i = 0; i < state->nmdesc; i++)
1761
- mr_len += be32_to_cpu(req->indirect_desc[i].len);
17621612 if (desc_len != scsi_bufflen(req->scmnd) ||
17631613 mr_len > scsi_bufflen(req->scmnd))
17641614 pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
....@@ -1773,25 +1623,29 @@
17731623 * @req: SRP request
17741624 *
17751625 * Returns the length in bytes of the SRP_CMD IU or a negative value if
1776
- * mapping failed.
1626
+ * mapping failed. The size of any immediate data is not included in the
1627
+ * return value.
17771628 */
17781629 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
17791630 struct srp_request *req)
17801631 {
17811632 struct srp_target_port *target = ch->target;
1782
- struct scatterlist *scat;
1633
+ struct scatterlist *scat, *sg;
17831634 struct srp_cmd *cmd = req->cmd->buf;
1784
- int len, nents, count, ret;
1635
+ int i, len, nents, count, ret;
17851636 struct srp_device *dev;
17861637 struct ib_device *ibdev;
17871638 struct srp_map_state state;
17881639 struct srp_indirect_buf *indirect_hdr;
1640
+ u64 data_len;
17891641 u32 idb_len, table_len;
17901642 __be32 idb_rkey;
17911643 u8 fmt;
17921644
1645
+ req->cmd->num_sge = 1;
1646
+
17931647 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1794
- return sizeof (struct srp_cmd);
1648
+ return sizeof(struct srp_cmd) + cmd->add_cdb_len;
17951649
17961650 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
17971651 scmnd->sc_data_direction != DMA_TO_DEVICE) {
....@@ -1803,6 +1657,7 @@
18031657
18041658 nents = scsi_sg_count(scmnd);
18051659 scat = scsi_sglist(scmnd);
1660
+ data_len = scsi_bufflen(scmnd);
18061661
18071662 dev = target->srp_host->srp_dev;
18081663 ibdev = dev->dev;
....@@ -1811,8 +1666,31 @@
18111666 if (unlikely(count == 0))
18121667 return -EIO;
18131668
1669
+ if (ch->use_imm_data &&
1670
+ count <= ch->max_imm_sge &&
1671
+ SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len &&
1672
+ scmnd->sc_data_direction == DMA_TO_DEVICE) {
1673
+ struct srp_imm_buf *buf;
1674
+ struct ib_sge *sge = &req->cmd->sge[1];
1675
+
1676
+ fmt = SRP_DATA_DESC_IMM;
1677
+ len = SRP_IMM_DATA_OFFSET;
1678
+ req->nmdesc = 0;
1679
+ buf = (void *)cmd->add_data + cmd->add_cdb_len;
1680
+ buf->len = cpu_to_be32(data_len);
1681
+ WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len);
1682
+ for_each_sg(scat, sg, count, i) {
1683
+ sge[i].addr = sg_dma_address(sg);
1684
+ sge[i].length = sg_dma_len(sg);
1685
+ sge[i].lkey = target->lkey;
1686
+ }
1687
+ req->cmd->num_sge += count;
1688
+ goto map_complete;
1689
+ }
1690
+
18141691 fmt = SRP_DATA_DESC_DIRECT;
1815
- len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
1692
+ len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1693
+ sizeof(struct srp_direct_buf);
18161694
18171695 if (count == 1 && target->global_rkey) {
18181696 /*
....@@ -1821,11 +1699,12 @@
18211699 * single entry. So a direct descriptor along with
18221700 * the DMA MR suffices.
18231701 */
1824
- struct srp_direct_buf *buf = (void *) cmd->add_data;
1702
+ struct srp_direct_buf *buf;
18251703
1826
- buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1704
+ buf = (void *)cmd->add_data + cmd->add_cdb_len;
1705
+ buf->va = cpu_to_be64(sg_dma_address(scat));
18271706 buf->key = cpu_to_be32(target->global_rkey);
1828
- buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1707
+ buf->len = cpu_to_be32(sg_dma_len(scat));
18291708
18301709 req->nmdesc = 0;
18311710 goto map_complete;
....@@ -1835,7 +1714,7 @@
18351714 * We have more than one scatter/gather entry, so build our indirect
18361715 * descriptor table, trying to merge as many entries as we can.
18371716 */
1838
- indirect_hdr = (void *) cmd->add_data;
1717
+ indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len;
18391718
18401719 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
18411720 target->indirect_size, DMA_TO_DEVICE);
....@@ -1844,8 +1723,6 @@
18441723 state.desc = req->indirect_desc;
18451724 if (dev->use_fast_reg)
18461725 ret = srp_map_sg_fr(&state, ch, req, scat, count);
1847
- else if (dev->use_fmr)
1848
- ret = srp_map_sg_fmr(&state, ch, req, scat, count);
18491726 else
18501727 ret = srp_map_sg_dma(&state, ch, req, scat, count);
18511728 req->nmdesc = state.nmdesc;
....@@ -1870,8 +1747,9 @@
18701747 * Memory registration collapsed the sg-list into one entry,
18711748 * so use a direct descriptor.
18721749 */
1873
- struct srp_direct_buf *buf = (void *) cmd->add_data;
1750
+ struct srp_direct_buf *buf;
18741751
1752
+ buf = (void *)cmd->add_data + cmd->add_cdb_len;
18751753 *buf = req->indirect_desc[0];
18761754 goto map_complete;
18771755 }
....@@ -1889,7 +1767,8 @@
18891767 idb_len = sizeof(struct srp_indirect_buf) + table_len;
18901768
18911769 fmt = SRP_DATA_DESC_INDIRECT;
1892
- len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1770
+ len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1771
+ sizeof(struct srp_indirect_buf);
18931772 len += count * sizeof (struct srp_direct_buf);
18941773
18951774 memcpy(indirect_hdr->desc_list, req->indirect_desc,
....@@ -2010,22 +1889,30 @@
20101889 list_add(&iu->list, &ch->free_tx);
20111890 }
20121891
1892
+/**
1893
+ * srp_post_send() - send an SRP information unit
1894
+ * @ch: RDMA channel over which to send the information unit.
1895
+ * @iu: Information unit to send.
1896
+ * @len: Length of the information unit excluding immediate data.
1897
+ */
20131898 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
20141899 {
20151900 struct srp_target_port *target = ch->target;
2016
- struct ib_sge list;
20171901 struct ib_send_wr wr;
20181902
2019
- list.addr = iu->dma;
2020
- list.length = len;
2021
- list.lkey = target->lkey;
1903
+ if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE))
1904
+ return -EINVAL;
1905
+
1906
+ iu->sge[0].addr = iu->dma;
1907
+ iu->sge[0].length = len;
1908
+ iu->sge[0].lkey = target->lkey;
20221909
20231910 iu->cqe.done = srp_send_done;
20241911
20251912 wr.next = NULL;
20261913 wr.wr_cqe = &iu->cqe;
2027
- wr.sg_list = &list;
2028
- wr.num_sge = 1;
1914
+ wr.sg_list = &iu->sge[0];
1915
+ wr.num_sge = iu->num_sge;
20291916 wr.opcode = IB_WR_SEND;
20301917 wr.send_flags = IB_SEND_SIGNALED;
20311918
....@@ -2138,6 +2025,7 @@
21382025 return 1;
21392026 }
21402027
2028
+ iu->num_sge = 1;
21412029 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
21422030 memcpy(iu->buf, rsp, len);
21432031 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
....@@ -2279,7 +2167,6 @@
22792167 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
22802168 {
22812169 struct srp_target_port *target = host_to_target(shost);
2282
- struct srp_rport *rport = target->rport;
22832170 struct srp_rdma_ch *ch;
22842171 struct srp_request *req;
22852172 struct srp_iu *iu;
....@@ -2289,16 +2176,6 @@
22892176 u32 tag;
22902177 u16 idx;
22912178 int len, ret;
2292
- const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2293
-
2294
- /*
2295
- * The SCSI EH thread is the only context from which srp_queuecommand()
2296
- * can get invoked for blocked devices (SDEV_BLOCK /
2297
- * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2298
- * locking the rport mutex if invoked from inside the SCSI EH.
2299
- */
2300
- if (in_scsi_eh)
2301
- mutex_lock(&rport->mutex);
23022179
23032180 scmnd->result = srp_chkready(target->rport);
23042181 if (unlikely(scmnd->result))
....@@ -2321,7 +2198,7 @@
23212198
23222199 req = &ch->req_ring[idx];
23232200 dev = target->srp_host->srp_dev->dev;
2324
- ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
2201
+ ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len,
23252202 DMA_TO_DEVICE);
23262203
23272204 scmnd->host_scribble = (void *) req;
....@@ -2333,6 +2210,12 @@
23332210 int_to_scsilun(scmnd->device->lun, &cmd->lun);
23342211 cmd->tag = tag;
23352212 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2213
+ if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) {
2214
+ cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb),
2215
+ 4);
2216
+ if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN))
2217
+ goto err_iu;
2218
+ }
23362219
23372220 req->scmnd = scmnd;
23382221 req->cmd = iu;
....@@ -2352,7 +2235,7 @@
23522235 goto err_iu;
23532236 }
23542237
2355
- ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
2238
+ ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len,
23562239 DMA_TO_DEVICE);
23572240
23582241 if (srp_post_send(ch, iu, len)) {
....@@ -2361,13 +2244,7 @@
23612244 goto err_unmap;
23622245 }
23632246
2364
- ret = 0;
2365
-
2366
-unlock_rport:
2367
- if (in_scsi_eh)
2368
- mutex_unlock(&rport->mutex);
2369
-
2370
- return ret;
2247
+ return 0;
23712248
23722249 err_unmap:
23732250 srp_unmap_data(scmnd, ch, req);
....@@ -2389,7 +2266,7 @@
23892266 ret = SCSI_MLQUEUE_HOST_BUSY;
23902267 }
23912268
2392
- goto unlock_rport;
2269
+ return ret;
23932270 }
23942271
23952272 /*
....@@ -2420,7 +2297,7 @@
24202297
24212298 for (i = 0; i < target->queue_size; ++i) {
24222299 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2423
- target->max_iu_len,
2300
+ ch->max_it_iu_len,
24242301 GFP_KERNEL, DMA_TO_DEVICE);
24252302 if (!ch->tx_ring[i])
24262303 goto err;
....@@ -2486,6 +2363,17 @@
24862363 if (lrsp->opcode == SRP_LOGIN_RSP) {
24872364 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
24882365 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
2366
+ ch->use_imm_data = srp_use_imm_data &&
2367
+ (lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP);
2368
+ ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
2369
+ ch->use_imm_data,
2370
+ target->max_it_iu_size);
2371
+ WARN_ON_ONCE(ch->max_it_iu_len >
2372
+ be32_to_cpu(lrsp->max_it_iu_len));
2373
+
2374
+ if (ch->use_imm_data)
2375
+ shost_printk(KERN_DEBUG, target->scsi_host,
2376
+ PFX "using immediate data\n");
24892377
24902378 /*
24912379 * Reserve credits for task management so we don't
....@@ -2874,6 +2762,8 @@
28742762 return -1;
28752763 }
28762764
2765
+ iu->num_sge = 1;
2766
+
28772767 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
28782768 DMA_TO_DEVICE);
28792769 tsk_mgmt = iu->buf;
....@@ -2982,20 +2872,6 @@
29822872
29832873 if (target->target_can_queue)
29842874 starget->can_queue = target->target_can_queue;
2985
- return 0;
2986
-}
2987
-
2988
-static int srp_slave_alloc(struct scsi_device *sdev)
2989
-{
2990
- struct Scsi_Host *shost = sdev->host;
2991
- struct srp_target_port *target = host_to_target(shost);
2992
- struct srp_device *srp_dev = target->srp_host->srp_dev;
2993
- struct ib_device *ibdev = srp_dev->dev;
2994
-
2995
- if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
2996
- blk_queue_virt_boundary(sdev->request_queue,
2997
- ~srp_dev->mr_page_mask);
2998
-
29992875 return 0;
30002876 }
30012877
....@@ -3115,7 +2991,8 @@
31152991 {
31162992 struct srp_target_port *target = host_to_target(class_to_shost(dev));
31172993
3118
- return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2994
+ return sprintf(buf, "%s\n",
2995
+ dev_name(&target->srp_host->srp_dev->dev->dev));
31192996 }
31202997
31212998 static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
....@@ -3200,7 +3077,6 @@
32003077 .name = "InfiniBand SRP initiator",
32013078 .proc_name = DRV_NAME,
32023079 .target_alloc = srp_target_alloc,
3203
- .slave_alloc = srp_slave_alloc,
32043080 .slave_configure = srp_slave_configure,
32053081 .info = srp_target_info,
32063082 .queuecommand = srp_queuecommand,
....@@ -3214,7 +3090,6 @@
32143090 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
32153091 .this_id = -1,
32163092 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
3217
- .use_clustering = ENABLE_CLUSTERING,
32183093 .shost_attrs = srp_host_attrs,
32193094 .track_queue_depth = 1,
32203095 };
....@@ -3365,6 +3240,8 @@
33653240 SRP_OPT_IP_SRC = 1 << 15,
33663241 SRP_OPT_IP_DEST = 1 << 16,
33673242 SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
3243
+ SRP_OPT_MAX_IT_IU_SIZE = 1 << 18,
3244
+ SRP_OPT_CH_COUNT = 1 << 19,
33683245 };
33693246
33703247 static unsigned int srp_opt_mandatory[] = {
....@@ -3397,6 +3274,8 @@
33973274 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
33983275 { SRP_OPT_IP_SRC, "src=%s" },
33993276 { SRP_OPT_IP_DEST, "dest=%s" },
3277
+ { SRP_OPT_MAX_IT_IU_SIZE, "max_it_iu_size=%d" },
3278
+ { SRP_OPT_CH_COUNT, "ch_count=%u", },
34003279 { SRP_OPT_ERR, NULL }
34013280 };
34023281
....@@ -3690,6 +3569,22 @@
36903569 target->tl_retry_count = token;
36913570 break;
36923571
3572
+ case SRP_OPT_MAX_IT_IU_SIZE:
3573
+ if (match_int(args, &token) || token < 0) {
3574
+ pr_warn("bad maximum initiator to target IU size '%s'\n", p);
3575
+ goto out;
3576
+ }
3577
+ target->max_it_iu_size = token;
3578
+ break;
3579
+
3580
+ case SRP_OPT_CH_COUNT:
3581
+ if (match_int(args, &token) || token < 1) {
3582
+ pr_warn("bad channel count %s\n", p);
3583
+ goto out;
3584
+ }
3585
+ target->ch_count = token;
3586
+ break;
3587
+
36933588 default:
36943589 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
36953590 p);
....@@ -3728,9 +3623,10 @@
37283623 struct srp_rdma_ch *ch;
37293624 struct srp_device *srp_dev = host->srp_dev;
37303625 struct ib_device *ibdev = srp_dev->dev;
3731
- int ret, node_idx, node, cpu, i;
3626
+ int ret, i, ch_idx;
37323627 unsigned int max_sectors_per_mr, mr_per_cmd = 0;
37333628 bool multich = false;
3629
+ uint32_t max_iu_len;
37343630
37353631 target_host = scsi_host_alloc(&srp_template,
37363632 sizeof (struct srp_target_port));
....@@ -3742,6 +3638,10 @@
37423638 target_host->max_id = 1;
37433639 target_host->max_lun = -1LL;
37443640 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3641
+ target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
3642
+
3643
+ if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
3644
+ target_host->virt_boundary_mask = ~srp_dev->mr_page_mask;
37453645
37463646 target = host_to_target(target_host);
37473647
....@@ -3791,13 +3691,13 @@
37913691 goto out;
37923692 }
37933693
3794
- if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3694
+ if (!srp_dev->has_fr && !target->allow_ext_sg &&
37953695 target->cmd_sg_cnt < target->sg_tablesize) {
37963696 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
37973697 target->sg_tablesize = target->cmd_sg_cnt;
37983698 }
37993699
3800
- if (srp_dev->use_fast_reg || srp_dev->use_fmr) {
3700
+ if (srp_dev->use_fast_reg) {
38013701 bool gaps_reg = (ibdev->attrs.device_cap_flags &
38023702 IB_DEVICE_SG_GAPS_REG);
38033703
....@@ -3805,12 +3705,12 @@
38053705 (ilog2(srp_dev->mr_page_size) - 9);
38063706 if (!gaps_reg) {
38073707 /*
3808
- * FR and FMR can only map one HCA page per entry. If
3809
- * the start address is not aligned on a HCA page
3810
- * boundary two entries will be used for the head and
3811
- * the tail although these two entries combined
3812
- * contain at most one HCA page of data. Hence the "+
3813
- * 1" in the calculation below.
3708
+ * FR can only map one HCA page per entry. If the start
3709
+ * address is not aligned on a HCA page boundary two
3710
+ * entries will be used for the head and the tail
3711
+ * although these two entries combined contain at most
3712
+ * one HCA page of data. Hence the "+ 1" in the
3713
+ * calculation below.
38143714 *
38153715 * The indirect data buffer descriptor is contiguous
38163716 * so the memory for that buffer will only be
....@@ -3836,9 +3736,9 @@
38363736 target->mr_per_cmd = mr_per_cmd;
38373737 target->indirect_size = target->sg_tablesize *
38383738 sizeof (struct srp_direct_buf);
3839
- target->max_iu_len = sizeof (struct srp_cmd) +
3840
- sizeof (struct srp_indirect_buf) +
3841
- target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3739
+ max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
3740
+ srp_use_imm_data,
3741
+ target->max_it_iu_size);
38423742
38433743 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
38443744 INIT_WORK(&target->remove_work, srp_remove_work);
....@@ -3848,79 +3748,61 @@
38483748 goto out;
38493749
38503750 ret = -ENOMEM;
3851
- target->ch_count = max_t(unsigned, num_online_nodes(),
3852
- min(ch_count ? :
3853
- min(4 * num_online_nodes(),
3854
- ibdev->num_comp_vectors),
3855
- num_online_cpus()));
3751
+ if (target->ch_count == 0) {
3752
+ target->ch_count =
3753
+ min(ch_count ?:
3754
+ max(4 * num_online_nodes(),
3755
+ ibdev->num_comp_vectors),
3756
+ num_online_cpus());
3757
+ }
3758
+
38563759 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
38573760 GFP_KERNEL);
38583761 if (!target->ch)
38593762 goto out;
38603763
3861
- node_idx = 0;
3862
- for_each_online_node(node) {
3863
- const int ch_start = (node_idx * target->ch_count /
3864
- num_online_nodes());
3865
- const int ch_end = ((node_idx + 1) * target->ch_count /
3866
- num_online_nodes());
3867
- const int cv_start = node_idx * ibdev->num_comp_vectors /
3868
- num_online_nodes();
3869
- const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors /
3870
- num_online_nodes();
3871
- int cpu_idx = 0;
3764
+ for (ch_idx = 0; ch_idx < target->ch_count; ++ch_idx) {
3765
+ ch = &target->ch[ch_idx];
3766
+ ch->target = target;
3767
+ ch->comp_vector = ch_idx % ibdev->num_comp_vectors;
3768
+ spin_lock_init(&ch->lock);
3769
+ INIT_LIST_HEAD(&ch->free_tx);
3770
+ ret = srp_new_cm_id(ch);
3771
+ if (ret)
3772
+ goto err_disconnect;
38723773
3873
- for_each_online_cpu(cpu) {
3874
- if (cpu_to_node(cpu) != node)
3875
- continue;
3876
- if (ch_start + cpu_idx >= ch_end)
3877
- continue;
3878
- ch = &target->ch[ch_start + cpu_idx];
3879
- ch->target = target;
3880
- ch->comp_vector = cv_start == cv_end ? cv_start :
3881
- cv_start + cpu_idx % (cv_end - cv_start);
3882
- spin_lock_init(&ch->lock);
3883
- INIT_LIST_HEAD(&ch->free_tx);
3884
- ret = srp_new_cm_id(ch);
3885
- if (ret)
3886
- goto err_disconnect;
3774
+ ret = srp_create_ch_ib(ch);
3775
+ if (ret)
3776
+ goto err_disconnect;
38873777
3888
- ret = srp_create_ch_ib(ch);
3889
- if (ret)
3890
- goto err_disconnect;
3778
+ ret = srp_alloc_req_data(ch);
3779
+ if (ret)
3780
+ goto err_disconnect;
38913781
3892
- ret = srp_alloc_req_data(ch);
3893
- if (ret)
3894
- goto err_disconnect;
3782
+ ret = srp_connect_ch(ch, max_iu_len, multich);
3783
+ if (ret) {
3784
+ char dst[64];
38953785
3896
- ret = srp_connect_ch(ch, multich);
3897
- if (ret) {
3898
- char dst[64];
3899
-
3900
- if (target->using_rdma_cm)
3901
- snprintf(dst, sizeof(dst), "%pIS",
3902
- &target->rdma_cm.dst);
3903
- else
3904
- snprintf(dst, sizeof(dst), "%pI6",
3905
- target->ib_cm.orig_dgid.raw);
3906
- shost_printk(KERN_ERR, target->scsi_host,
3907
- PFX "Connection %d/%d to %s failed\n",
3908
- ch_start + cpu_idx,
3909
- target->ch_count, dst);
3910
- if (node_idx == 0 && cpu_idx == 0) {
3911
- goto free_ch;
3912
- } else {
3913
- srp_free_ch_ib(target, ch);
3914
- srp_free_req_data(target, ch);
3915
- target->ch_count = ch - target->ch;
3916
- goto connected;
3917
- }
3786
+ if (target->using_rdma_cm)
3787
+ snprintf(dst, sizeof(dst), "%pIS",
3788
+ &target->rdma_cm.dst);
3789
+ else
3790
+ snprintf(dst, sizeof(dst), "%pI6",
3791
+ target->ib_cm.orig_dgid.raw);
3792
+ shost_printk(KERN_ERR, target->scsi_host,
3793
+ PFX "Connection %d/%d to %s failed\n",
3794
+ ch_idx,
3795
+ target->ch_count, dst);
3796
+ if (ch_idx == 0) {
3797
+ goto free_ch;
3798
+ } else {
3799
+ srp_free_ch_ib(target, ch);
3800
+ srp_free_req_data(target, ch);
3801
+ target->ch_count = ch - target->ch;
3802
+ goto connected;
39183803 }
3919
-
3920
- multich = true;
3921
- cpu_idx++;
39223804 }
3923
- node_idx++;
3805
+ multich = true;
39243806 }
39253807
39263808 connected:
....@@ -3990,7 +3872,7 @@
39903872 {
39913873 struct srp_host *host = container_of(dev, struct srp_host, dev);
39923874
3993
- return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3875
+ return sprintf(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
39943876 }
39953877
39963878 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
....@@ -4022,7 +3904,8 @@
40223904
40233905 host->dev.class = &srp_class;
40243906 host->dev.parent = device->dev->dev.parent;
4025
- dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3907
+ dev_set_name(&host->dev, "srp-%s-%d", dev_name(&device->dev->dev),
3908
+ port);
40263909
40273910 if (device_register(&host->dev))
40283911 goto free_host;
....@@ -4044,18 +3927,33 @@
40443927 return NULL;
40453928 }
40463929
4047
-static void srp_add_one(struct ib_device *device)
3930
+static void srp_rename_dev(struct ib_device *device, void *client_data)
3931
+{
3932
+ struct srp_device *srp_dev = client_data;
3933
+ struct srp_host *host, *tmp_host;
3934
+
3935
+ list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3936
+ char name[IB_DEVICE_NAME_MAX + 8];
3937
+
3938
+ snprintf(name, sizeof(name), "srp-%s-%d",
3939
+ dev_name(&device->dev), host->port);
3940
+ device_rename(&host->dev, name);
3941
+ }
3942
+}
3943
+
3944
+static int srp_add_one(struct ib_device *device)
40483945 {
40493946 struct srp_device *srp_dev;
40503947 struct ib_device_attr *attr = &device->attrs;
40513948 struct srp_host *host;
4052
- int mr_page_shift, p;
3949
+ int mr_page_shift;
3950
+ unsigned int p;
40533951 u64 max_pages_per_mr;
40543952 unsigned int flags = 0;
40553953
40563954 srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
40573955 if (!srp_dev)
4058
- return;
3956
+ return -ENOMEM;
40593957
40603958 /*
40613959 * Use the smallest page size supported by the HCA, down to a
....@@ -4073,21 +3971,15 @@
40733971 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
40743972 max_pages_per_mr);
40753973
4076
- srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
4077
- device->map_phys_fmr && device->unmap_fmr);
40783974 srp_dev->has_fr = (attr->device_cap_flags &
40793975 IB_DEVICE_MEM_MGT_EXTENSIONS);
4080
- if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {
4081
- dev_warn(&device->dev, "neither FMR nor FR is supported\n");
4082
- } else if (!never_register &&
4083
- attr->max_mr_size >= 2 * srp_dev->mr_page_size) {
4084
- srp_dev->use_fast_reg = (srp_dev->has_fr &&
4085
- (!srp_dev->has_fmr || prefer_fr));
4086
- srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
4087
- }
3976
+ if (!never_register && !srp_dev->has_fr)
3977
+ dev_warn(&device->dev, "FR is not supported\n");
3978
+ else if (!never_register &&
3979
+ attr->max_mr_size >= 2 * srp_dev->mr_page_size)
3980
+ srp_dev->use_fast_reg = srp_dev->has_fr;
40883981
4089
- if (never_register || !register_always ||
4090
- (!srp_dev->has_fmr && !srp_dev->has_fr))
3982
+ if (never_register || !register_always || !srp_dev->has_fr)
40913983 flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
40923984
40933985 if (srp_dev->use_fast_reg) {
....@@ -4098,7 +3990,7 @@
40983990 srp_dev->mr_max_size = srp_dev->mr_page_size *
40993991 srp_dev->max_pages_per_mr;
41003992 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
4101
- device->name, mr_page_shift, attr->max_mr_size,
3993
+ dev_name(&device->dev), mr_page_shift, attr->max_mr_size,
41023994 attr->max_fast_reg_page_list_len,
41033995 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
41043996
....@@ -4106,25 +3998,26 @@
41063998
41073999 srp_dev->dev = device;
41084000 srp_dev->pd = ib_alloc_pd(device, flags);
4109
- if (IS_ERR(srp_dev->pd))
4110
- goto free_dev;
4001
+ if (IS_ERR(srp_dev->pd)) {
4002
+ int ret = PTR_ERR(srp_dev->pd);
4003
+
4004
+ kfree(srp_dev);
4005
+ return ret;
4006
+ }
41114007
41124008 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
41134009 srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
41144010 WARN_ON_ONCE(srp_dev->global_rkey == 0);
41154011 }
41164012
4117
- for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
4013
+ rdma_for_each_port (device, p) {
41184014 host = srp_add_port(srp_dev, p);
41194015 if (host)
41204016 list_add_tail(&host->list, &srp_dev->dev_list);
41214017 }
41224018
41234019 ib_set_client_data(device, &srp_client, srp_dev);
4124
- return;
4125
-
4126
-free_dev:
4127
- kfree(srp_dev);
4020
+ return 0;
41284021 }
41294022
41304023 static void srp_remove_one(struct ib_device *device, void *client_data)
....@@ -4134,8 +4027,6 @@
41344027 struct srp_target_port *target;
41354028
41364029 srp_dev = client_data;
4137
- if (!srp_dev)
4138
- return;
41394030
41404031 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
41414032 device_unregister(&host->dev);
....@@ -4184,6 +4075,11 @@
41844075 {
41854076 int ret;
41864077
4078
+ BUILD_BUG_ON(sizeof(struct srp_imm_buf) != 4);
4079
+ BUILD_BUG_ON(sizeof(struct srp_login_req) != 64);
4080
+ BUILD_BUG_ON(sizeof(struct srp_login_req_rdma) != 56);
4081
+ BUILD_BUG_ON(sizeof(struct srp_cmd) != 48);
4082
+
41874083 if (srp_sg_tablesize) {
41884084 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
41894085 if (!cmd_sg_entries)