.. | .. |
---|
71 | 71 | static unsigned int cmd_sg_entries; |
---|
72 | 72 | static unsigned int indirect_sg_entries; |
---|
73 | 73 | static bool allow_ext_sg; |
---|
74 | | -static bool prefer_fr = true; |
---|
75 | 74 | static bool register_always = true; |
---|
76 | 75 | static bool never_register; |
---|
77 | 76 | static int topspin_workarounds = 1; |
---|
.. | .. |
---|
94 | 93 | module_param(topspin_workarounds, int, 0444); |
---|
95 | 94 | MODULE_PARM_DESC(topspin_workarounds, |
---|
96 | 95 | "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); |
---|
97 | | - |
---|
98 | | -module_param(prefer_fr, bool, 0444); |
---|
99 | | -MODULE_PARM_DESC(prefer_fr, |
---|
100 | | -"Whether to use fast registration if both FMR and fast registration are supported"); |
---|
101 | 96 | |
---|
102 | 97 | module_param(register_always, bool, 0444); |
---|
103 | 98 | MODULE_PARM_DESC(register_always, |
---|
.. | .. |
---|
132 | 127 | " if fast_io_fail_tmo has not been set. \"off\" means that" |
---|
133 | 128 | " this functionality is disabled."); |
---|
134 | 129 | |
---|
| 130 | +static bool srp_use_imm_data = true; |
---|
| 131 | +module_param_named(use_imm_data, srp_use_imm_data, bool, 0644); |
---|
| 132 | +MODULE_PARM_DESC(use_imm_data, |
---|
| 133 | + "Whether or not to request permission to use immediate data during SRP login."); |
---|
| 134 | + |
---|
| 135 | +static unsigned int srp_max_imm_data = 8 * 1024; |
---|
| 136 | +module_param_named(max_imm_data, srp_max_imm_data, uint, 0644); |
---|
| 137 | +MODULE_PARM_DESC(max_imm_data, "Maximum immediate data size."); |
---|
| 138 | + |
---|
135 | 139 | static unsigned ch_count; |
---|
136 | 140 | module_param(ch_count, uint, 0444); |
---|
137 | 141 | MODULE_PARM_DESC(ch_count, |
---|
138 | 142 | "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA."); |
---|
139 | 143 | |
---|
140 | | -static void srp_add_one(struct ib_device *device); |
---|
| 144 | +static int srp_add_one(struct ib_device *device); |
---|
141 | 145 | static void srp_remove_one(struct ib_device *device, void *client_data); |
---|
| 146 | +static void srp_rename_dev(struct ib_device *device, void *client_data); |
---|
142 | 147 | static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc); |
---|
143 | 148 | static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc, |
---|
144 | 149 | const char *opname); |
---|
.. | .. |
---|
153 | 158 | static struct ib_client srp_client = { |
---|
154 | 159 | .name = "srp", |
---|
155 | 160 | .add = srp_add_one, |
---|
156 | | - .remove = srp_remove_one |
---|
| 161 | + .remove = srp_remove_one, |
---|
| 162 | + .rename = srp_rename_dev |
---|
157 | 163 | }; |
---|
158 | 164 | |
---|
159 | 165 | static struct ib_sa_client srp_sa_client; |
---|
.. | .. |
---|
163 | 169 | int tmo = *(int *)kp->arg; |
---|
164 | 170 | |
---|
165 | 171 | if (tmo >= 0) |
---|
166 | | - return sprintf(buffer, "%d", tmo); |
---|
| 172 | + return sprintf(buffer, "%d\n", tmo); |
---|
167 | 173 | else |
---|
168 | | - return sprintf(buffer, "off"); |
---|
| 174 | + return sprintf(buffer, "off\n"); |
---|
169 | 175 | } |
---|
170 | 176 | |
---|
171 | 177 | static int srp_tmo_set(const char *val, const struct kernel_param *kp) |
---|
.. | .. |
---|
341 | 347 | |
---|
342 | 348 | init_completion(&ch->done); |
---|
343 | 349 | ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ? |
---|
344 | | - (struct sockaddr *)&target->rdma_cm.src : NULL, |
---|
345 | | - (struct sockaddr *)&target->rdma_cm.dst, |
---|
| 350 | + &target->rdma_cm.src.sa : NULL, |
---|
| 351 | + &target->rdma_cm.dst.sa, |
---|
346 | 352 | SRP_PATH_REC_TIMEOUT_MS); |
---|
347 | 353 | if (ret) { |
---|
348 | | - pr_err("No route available from %pIS to %pIS (%d)\n", |
---|
| 354 | + pr_err("No route available from %pISpsc to %pISpsc (%d)\n", |
---|
349 | 355 | &target->rdma_cm.src, &target->rdma_cm.dst, ret); |
---|
350 | 356 | goto out; |
---|
351 | 357 | } |
---|
.. | .. |
---|
355 | 361 | |
---|
356 | 362 | ret = ch->status; |
---|
357 | 363 | if (ret) { |
---|
358 | | - pr_err("Resolving address %pIS failed (%d)\n", |
---|
| 364 | + pr_err("Resolving address %pISpsc failed (%d)\n", |
---|
359 | 365 | &target->rdma_cm.dst, ret); |
---|
360 | 366 | goto out; |
---|
361 | 367 | } |
---|
.. | .. |
---|
375 | 381 | |
---|
376 | 382 | return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) : |
---|
377 | 383 | srp_new_ib_cm_id(ch); |
---|
378 | | -} |
---|
379 | | - |
---|
380 | | -static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target) |
---|
381 | | -{ |
---|
382 | | - struct srp_device *dev = target->srp_host->srp_dev; |
---|
383 | | - struct ib_fmr_pool_param fmr_param; |
---|
384 | | - |
---|
385 | | - memset(&fmr_param, 0, sizeof(fmr_param)); |
---|
386 | | - fmr_param.pool_size = target->mr_pool_size; |
---|
387 | | - fmr_param.dirty_watermark = fmr_param.pool_size / 4; |
---|
388 | | - fmr_param.cache = 1; |
---|
389 | | - fmr_param.max_pages_per_fmr = dev->max_pages_per_mr; |
---|
390 | | - fmr_param.page_shift = ilog2(dev->mr_page_size); |
---|
391 | | - fmr_param.access = (IB_ACCESS_LOCAL_WRITE | |
---|
392 | | - IB_ACCESS_REMOTE_WRITE | |
---|
393 | | - IB_ACCESS_REMOTE_READ); |
---|
394 | | - |
---|
395 | | - return ib_create_fmr_pool(dev->pd, &fmr_param); |
---|
396 | 384 | } |
---|
397 | 385 | |
---|
398 | 386 | /** |
---|
.. | .. |
---|
434 | 422 | if (pool_size <= 0) |
---|
435 | 423 | goto err; |
---|
436 | 424 | ret = -ENOMEM; |
---|
437 | | - pool = kzalloc(sizeof(struct srp_fr_pool) + |
---|
438 | | - pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL); |
---|
| 425 | + pool = kzalloc(struct_size(pool, desc, pool_size), GFP_KERNEL); |
---|
439 | 426 | if (!pool) |
---|
440 | 427 | goto err; |
---|
441 | 428 | pool->size = pool_size; |
---|
.. | .. |
---|
542 | 529 | { |
---|
543 | 530 | struct srp_target_port *target = ch->target; |
---|
544 | 531 | struct srp_device *dev = target->srp_host->srp_dev; |
---|
| 532 | + const struct ib_device_attr *attr = &dev->dev->attrs; |
---|
545 | 533 | struct ib_qp_init_attr *init_attr; |
---|
546 | 534 | struct ib_cq *recv_cq, *send_cq; |
---|
547 | 535 | struct ib_qp *qp; |
---|
548 | | - struct ib_fmr_pool *fmr_pool = NULL; |
---|
549 | 536 | struct srp_fr_pool *fr_pool = NULL; |
---|
550 | 537 | const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2; |
---|
551 | 538 | int ret; |
---|
.. | .. |
---|
573 | 560 | init_attr->cap.max_send_wr = m * target->queue_size; |
---|
574 | 561 | init_attr->cap.max_recv_wr = target->queue_size + 1; |
---|
575 | 562 | init_attr->cap.max_recv_sge = 1; |
---|
576 | | - init_attr->cap.max_send_sge = 1; |
---|
| 563 | + init_attr->cap.max_send_sge = min(SRP_MAX_SGE, attr->max_send_sge); |
---|
577 | 564 | init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; |
---|
578 | 565 | init_attr->qp_type = IB_QPT_RC; |
---|
579 | 566 | init_attr->send_cq = send_cq; |
---|
580 | 567 | init_attr->recv_cq = recv_cq; |
---|
| 568 | + |
---|
| 569 | + ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U); |
---|
581 | 570 | |
---|
582 | 571 | if (target->using_rdma_cm) { |
---|
583 | 572 | ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr); |
---|
.. | .. |
---|
606 | 595 | "FR pool allocation failed (%d)\n", ret); |
---|
607 | 596 | goto err_qp; |
---|
608 | 597 | } |
---|
609 | | - } else if (dev->use_fmr) { |
---|
610 | | - fmr_pool = srp_alloc_fmr_pool(target); |
---|
611 | | - if (IS_ERR(fmr_pool)) { |
---|
612 | | - ret = PTR_ERR(fmr_pool); |
---|
613 | | - shost_printk(KERN_WARNING, target->scsi_host, PFX |
---|
614 | | - "FMR pool allocation failed (%d)\n", ret); |
---|
615 | | - goto err_qp; |
---|
616 | | - } |
---|
617 | 598 | } |
---|
618 | 599 | |
---|
619 | 600 | if (ch->qp) |
---|
.. | .. |
---|
631 | 612 | if (ch->fr_pool) |
---|
632 | 613 | srp_destroy_fr_pool(ch->fr_pool); |
---|
633 | 614 | ch->fr_pool = fr_pool; |
---|
634 | | - } else if (dev->use_fmr) { |
---|
635 | | - if (ch->fmr_pool) |
---|
636 | | - ib_destroy_fmr_pool(ch->fmr_pool); |
---|
637 | | - ch->fmr_pool = fmr_pool; |
---|
638 | 615 | } |
---|
639 | 616 | |
---|
640 | 617 | kfree(init_attr); |
---|
.. | .. |
---|
689 | 666 | if (dev->use_fast_reg) { |
---|
690 | 667 | if (ch->fr_pool) |
---|
691 | 668 | srp_destroy_fr_pool(ch->fr_pool); |
---|
692 | | - } else if (dev->use_fmr) { |
---|
693 | | - if (ch->fmr_pool) |
---|
694 | | - ib_destroy_fmr_pool(ch->fmr_pool); |
---|
695 | 669 | } |
---|
696 | 670 | |
---|
697 | 671 | srp_destroy_qp(ch); |
---|
.. | .. |
---|
823 | 797 | return subnet_timeout; |
---|
824 | 798 | } |
---|
825 | 799 | |
---|
826 | | -static int srp_send_req(struct srp_rdma_ch *ch, bool multich) |
---|
| 800 | +static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len, |
---|
| 801 | + bool multich) |
---|
827 | 802 | { |
---|
828 | 803 | struct srp_target_port *target = ch->target; |
---|
829 | 804 | struct { |
---|
.. | .. |
---|
852 | 827 | |
---|
853 | 828 | req->ib_req.opcode = SRP_LOGIN_REQ; |
---|
854 | 829 | req->ib_req.tag = 0; |
---|
855 | | - req->ib_req.req_it_iu_len = cpu_to_be32(target->max_iu_len); |
---|
| 830 | + req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len); |
---|
856 | 831 | req->ib_req.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | |
---|
857 | 832 | SRP_BUF_FORMAT_INDIRECT); |
---|
858 | 833 | req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI : |
---|
859 | 834 | SRP_MULTICHAN_SINGLE); |
---|
| 835 | + if (srp_use_imm_data) { |
---|
| 836 | + req->ib_req.req_flags |= SRP_IMMED_REQUESTED; |
---|
| 837 | + req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET); |
---|
| 838 | + } |
---|
860 | 839 | |
---|
861 | 840 | if (target->using_rdma_cm) { |
---|
862 | 841 | req->rdma_param.flow_control = req->ib_param.flow_control; |
---|
.. | .. |
---|
873 | 852 | req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len; |
---|
874 | 853 | req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt; |
---|
875 | 854 | req->rdma_req.req_flags = req->ib_req.req_flags; |
---|
| 855 | + req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset; |
---|
876 | 856 | |
---|
877 | 857 | ipi = req->rdma_req.initiator_port_id; |
---|
878 | 858 | tpi = req->rdma_req.target_port_id; |
---|
.. | .. |
---|
998 | 978 | |
---|
999 | 979 | for (i = 0; i < target->req_ring_size; ++i) { |
---|
1000 | 980 | req = &ch->req_ring[i]; |
---|
1001 | | - if (dev->use_fast_reg) { |
---|
| 981 | + if (dev->use_fast_reg) |
---|
1002 | 982 | kfree(req->fr_list); |
---|
1003 | | - } else { |
---|
1004 | | - kfree(req->fmr_list); |
---|
1005 | | - kfree(req->map_page); |
---|
1006 | | - } |
---|
1007 | 983 | if (req->indirect_dma_addr) { |
---|
1008 | 984 | ib_dma_unmap_single(ibdev, req->indirect_dma_addr, |
---|
1009 | 985 | target->indirect_size, |
---|
.. | .. |
---|
1022 | 998 | struct srp_device *srp_dev = target->srp_host->srp_dev; |
---|
1023 | 999 | struct ib_device *ibdev = srp_dev->dev; |
---|
1024 | 1000 | struct srp_request *req; |
---|
1025 | | - void *mr_list; |
---|
1026 | 1001 | dma_addr_t dma_addr; |
---|
1027 | 1002 | int i, ret = -ENOMEM; |
---|
1028 | 1003 | |
---|
.. | .. |
---|
1033 | 1008 | |
---|
1034 | 1009 | for (i = 0; i < target->req_ring_size; ++i) { |
---|
1035 | 1010 | req = &ch->req_ring[i]; |
---|
1036 | | - mr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *), |
---|
1037 | | - GFP_KERNEL); |
---|
1038 | | - if (!mr_list) |
---|
1039 | | - goto out; |
---|
1040 | 1011 | if (srp_dev->use_fast_reg) { |
---|
1041 | | - req->fr_list = mr_list; |
---|
1042 | | - } else { |
---|
1043 | | - req->fmr_list = mr_list; |
---|
1044 | | - req->map_page = kmalloc_array(srp_dev->max_pages_per_mr, |
---|
1045 | | - sizeof(void *), |
---|
1046 | | - GFP_KERNEL); |
---|
1047 | | - if (!req->map_page) |
---|
| 1012 | + req->fr_list = kmalloc_array(target->mr_per_cmd, |
---|
| 1013 | + sizeof(void *), GFP_KERNEL); |
---|
| 1014 | + if (!req->fr_list) |
---|
1048 | 1015 | goto out; |
---|
1049 | 1016 | } |
---|
1050 | 1017 | req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); |
---|
.. | .. |
---|
1145 | 1112 | return c; |
---|
1146 | 1113 | } |
---|
1147 | 1114 | |
---|
1148 | | -static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich) |
---|
| 1115 | +static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len, |
---|
| 1116 | + bool multich) |
---|
1149 | 1117 | { |
---|
1150 | 1118 | struct srp_target_port *target = ch->target; |
---|
1151 | 1119 | int ret; |
---|
.. | .. |
---|
1158 | 1126 | |
---|
1159 | 1127 | while (1) { |
---|
1160 | 1128 | init_completion(&ch->done); |
---|
1161 | | - ret = srp_send_req(ch, multich); |
---|
| 1129 | + ret = srp_send_req(ch, max_iu_len, multich); |
---|
1162 | 1130 | if (ret) |
---|
1163 | 1131 | goto out; |
---|
1164 | 1132 | ret = wait_for_completion_interruptible(&ch->done); |
---|
.. | .. |
---|
1252 | 1220 | if (req->nmdesc) |
---|
1253 | 1221 | srp_fr_pool_put(ch->fr_pool, req->fr_list, |
---|
1254 | 1222 | req->nmdesc); |
---|
1255 | | - } else if (dev->use_fmr) { |
---|
1256 | | - struct ib_pool_fmr **pfmr; |
---|
1257 | | - |
---|
1258 | | - for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++) |
---|
1259 | | - ib_fmr_pool_unmap(*pfmr); |
---|
1260 | 1223 | } |
---|
1261 | 1224 | |
---|
1262 | 1225 | ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd), |
---|
.. | .. |
---|
1330 | 1293 | { |
---|
1331 | 1294 | struct srp_target_port *target = rport->lld_data; |
---|
1332 | 1295 | struct srp_rdma_ch *ch; |
---|
1333 | | - struct Scsi_Host *shost = target->scsi_host; |
---|
1334 | | - struct scsi_device *sdev; |
---|
1335 | 1296 | int i, j; |
---|
1336 | | - |
---|
1337 | | - /* |
---|
1338 | | - * Invoking srp_terminate_io() while srp_queuecommand() is running |
---|
1339 | | - * is not safe. Hence the warning statement below. |
---|
1340 | | - */ |
---|
1341 | | - shost_for_each_device(sdev, shost) |
---|
1342 | | - WARN_ON_ONCE(sdev->request_queue->request_fn_active); |
---|
1343 | 1297 | |
---|
1344 | 1298 | for (i = 0; i < target->ch_count; i++) { |
---|
1345 | 1299 | ch = &target->ch[i]; |
---|
.. | .. |
---|
1351 | 1305 | DID_TRANSPORT_FAILFAST << 16); |
---|
1352 | 1306 | } |
---|
1353 | 1307 | } |
---|
| 1308 | +} |
---|
| 1309 | + |
---|
| 1310 | +/* Calculate maximum initiator to target information unit length. */ |
---|
| 1311 | +static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data, |
---|
| 1312 | + uint32_t max_it_iu_size) |
---|
| 1313 | +{ |
---|
| 1314 | + uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN + |
---|
| 1315 | + sizeof(struct srp_indirect_buf) + |
---|
| 1316 | + cmd_sg_cnt * sizeof(struct srp_direct_buf); |
---|
| 1317 | + |
---|
| 1318 | + if (use_imm_data) |
---|
| 1319 | + max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET + |
---|
| 1320 | + srp_max_imm_data); |
---|
| 1321 | + |
---|
| 1322 | + if (max_it_iu_size) |
---|
| 1323 | + max_iu_len = min(max_iu_len, max_it_iu_size); |
---|
| 1324 | + |
---|
| 1325 | + pr_debug("max_iu_len = %d\n", max_iu_len); |
---|
| 1326 | + |
---|
| 1327 | + return max_iu_len; |
---|
1354 | 1328 | } |
---|
1355 | 1329 | |
---|
1356 | 1330 | /* |
---|
.. | .. |
---|
1366 | 1340 | { |
---|
1367 | 1341 | struct srp_target_port *target = rport->lld_data; |
---|
1368 | 1342 | struct srp_rdma_ch *ch; |
---|
| 1343 | + uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, |
---|
| 1344 | + srp_use_imm_data, |
---|
| 1345 | + target->max_it_iu_size); |
---|
1369 | 1346 | int i, j, ret = 0; |
---|
1370 | 1347 | bool multich = false; |
---|
1371 | 1348 | |
---|
.. | .. |
---|
1411 | 1388 | ch = &target->ch[i]; |
---|
1412 | 1389 | if (ret) |
---|
1413 | 1390 | break; |
---|
1414 | | - ret = srp_connect_ch(ch, multich); |
---|
| 1391 | + ret = srp_connect_ch(ch, max_iu_len, multich); |
---|
1415 | 1392 | multich = true; |
---|
1416 | 1393 | } |
---|
1417 | 1394 | |
---|
.. | .. |
---|
1436 | 1413 | state->total_len += dma_len; |
---|
1437 | 1414 | state->desc++; |
---|
1438 | 1415 | state->ndesc++; |
---|
1439 | | -} |
---|
1440 | | - |
---|
1441 | | -static int srp_map_finish_fmr(struct srp_map_state *state, |
---|
1442 | | - struct srp_rdma_ch *ch) |
---|
1443 | | -{ |
---|
1444 | | - struct srp_target_port *target = ch->target; |
---|
1445 | | - struct srp_device *dev = target->srp_host->srp_dev; |
---|
1446 | | - struct ib_pool_fmr *fmr; |
---|
1447 | | - u64 io_addr = 0; |
---|
1448 | | - |
---|
1449 | | - if (state->fmr.next >= state->fmr.end) { |
---|
1450 | | - shost_printk(KERN_ERR, ch->target->scsi_host, |
---|
1451 | | - PFX "Out of MRs (mr_per_cmd = %d)\n", |
---|
1452 | | - ch->target->mr_per_cmd); |
---|
1453 | | - return -ENOMEM; |
---|
1454 | | - } |
---|
1455 | | - |
---|
1456 | | - WARN_ON_ONCE(!dev->use_fmr); |
---|
1457 | | - |
---|
1458 | | - if (state->npages == 0) |
---|
1459 | | - return 0; |
---|
1460 | | - |
---|
1461 | | - if (state->npages == 1 && target->global_rkey) { |
---|
1462 | | - srp_map_desc(state, state->base_dma_addr, state->dma_len, |
---|
1463 | | - target->global_rkey); |
---|
1464 | | - goto reset_state; |
---|
1465 | | - } |
---|
1466 | | - |
---|
1467 | | - fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages, |
---|
1468 | | - state->npages, io_addr); |
---|
1469 | | - if (IS_ERR(fmr)) |
---|
1470 | | - return PTR_ERR(fmr); |
---|
1471 | | - |
---|
1472 | | - *state->fmr.next++ = fmr; |
---|
1473 | | - state->nmdesc++; |
---|
1474 | | - |
---|
1475 | | - srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask, |
---|
1476 | | - state->dma_len, fmr->fmr->rkey); |
---|
1477 | | - |
---|
1478 | | -reset_state: |
---|
1479 | | - state->npages = 0; |
---|
1480 | | - state->dma_len = 0; |
---|
1481 | | - |
---|
1482 | | - return 0; |
---|
1483 | 1416 | } |
---|
1484 | 1417 | |
---|
1485 | 1418 | static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc) |
---|
.. | .. |
---|
1572 | 1505 | return n; |
---|
1573 | 1506 | } |
---|
1574 | 1507 | |
---|
1575 | | -static int srp_map_sg_entry(struct srp_map_state *state, |
---|
1576 | | - struct srp_rdma_ch *ch, |
---|
1577 | | - struct scatterlist *sg) |
---|
1578 | | -{ |
---|
1579 | | - struct srp_target_port *target = ch->target; |
---|
1580 | | - struct srp_device *dev = target->srp_host->srp_dev; |
---|
1581 | | - struct ib_device *ibdev = dev->dev; |
---|
1582 | | - dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg); |
---|
1583 | | - unsigned int dma_len = ib_sg_dma_len(ibdev, sg); |
---|
1584 | | - unsigned int len = 0; |
---|
1585 | | - int ret; |
---|
1586 | | - |
---|
1587 | | - WARN_ON_ONCE(!dma_len); |
---|
1588 | | - |
---|
1589 | | - while (dma_len) { |
---|
1590 | | - unsigned offset = dma_addr & ~dev->mr_page_mask; |
---|
1591 | | - |
---|
1592 | | - if (state->npages == dev->max_pages_per_mr || |
---|
1593 | | - (state->npages > 0 && offset != 0)) { |
---|
1594 | | - ret = srp_map_finish_fmr(state, ch); |
---|
1595 | | - if (ret) |
---|
1596 | | - return ret; |
---|
1597 | | - } |
---|
1598 | | - |
---|
1599 | | - len = min_t(unsigned int, dma_len, dev->mr_page_size - offset); |
---|
1600 | | - |
---|
1601 | | - if (!state->npages) |
---|
1602 | | - state->base_dma_addr = dma_addr; |
---|
1603 | | - state->pages[state->npages++] = dma_addr & dev->mr_page_mask; |
---|
1604 | | - state->dma_len += len; |
---|
1605 | | - dma_addr += len; |
---|
1606 | | - dma_len -= len; |
---|
1607 | | - } |
---|
1608 | | - |
---|
1609 | | - /* |
---|
1610 | | - * If the end of the MR is not on a page boundary then we need to |
---|
1611 | | - * close it out and start a new one -- we can only merge at page |
---|
1612 | | - * boundaries. |
---|
1613 | | - */ |
---|
1614 | | - ret = 0; |
---|
1615 | | - if ((dma_addr & ~dev->mr_page_mask) != 0) |
---|
1616 | | - ret = srp_map_finish_fmr(state, ch); |
---|
1617 | | - return ret; |
---|
1618 | | -} |
---|
1619 | | - |
---|
1620 | | -static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch, |
---|
1621 | | - struct srp_request *req, struct scatterlist *scat, |
---|
1622 | | - int count) |
---|
1623 | | -{ |
---|
1624 | | - struct scatterlist *sg; |
---|
1625 | | - int i, ret; |
---|
1626 | | - |
---|
1627 | | - state->pages = req->map_page; |
---|
1628 | | - state->fmr.next = req->fmr_list; |
---|
1629 | | - state->fmr.end = req->fmr_list + ch->target->mr_per_cmd; |
---|
1630 | | - |
---|
1631 | | - for_each_sg(scat, sg, count, i) { |
---|
1632 | | - ret = srp_map_sg_entry(state, ch, sg); |
---|
1633 | | - if (ret) |
---|
1634 | | - return ret; |
---|
1635 | | - } |
---|
1636 | | - |
---|
1637 | | - ret = srp_map_finish_fmr(state, ch); |
---|
1638 | | - if (ret) |
---|
1639 | | - return ret; |
---|
1640 | | - |
---|
1641 | | - return 0; |
---|
1642 | | -} |
---|
1643 | | - |
---|
1644 | 1508 | static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch, |
---|
1645 | 1509 | struct srp_request *req, struct scatterlist *scat, |
---|
1646 | 1510 | int count) |
---|
.. | .. |
---|
1674 | 1538 | int count) |
---|
1675 | 1539 | { |
---|
1676 | 1540 | struct srp_target_port *target = ch->target; |
---|
1677 | | - struct srp_device *dev = target->srp_host->srp_dev; |
---|
1678 | 1541 | struct scatterlist *sg; |
---|
1679 | 1542 | int i; |
---|
1680 | 1543 | |
---|
1681 | 1544 | for_each_sg(scat, sg, count, i) { |
---|
1682 | | - srp_map_desc(state, ib_sg_dma_address(dev->dev, sg), |
---|
1683 | | - ib_sg_dma_len(dev->dev, sg), |
---|
| 1545 | + srp_map_desc(state, sg_dma_address(sg), sg_dma_len(sg), |
---|
1684 | 1546 | target->global_rkey); |
---|
1685 | 1547 | } |
---|
1686 | 1548 | |
---|
.. | .. |
---|
1702 | 1564 | struct srp_device *dev = target->srp_host->srp_dev; |
---|
1703 | 1565 | struct srp_map_state state; |
---|
1704 | 1566 | struct srp_direct_buf idb_desc; |
---|
1705 | | - u64 idb_pages[1]; |
---|
1706 | 1567 | struct scatterlist idb_sg[1]; |
---|
1707 | 1568 | int ret; |
---|
1708 | 1569 | |
---|
.. | .. |
---|
1725 | 1586 | if (ret < 0) |
---|
1726 | 1587 | return ret; |
---|
1727 | 1588 | WARN_ON_ONCE(ret < 1); |
---|
1728 | | - } else if (dev->use_fmr) { |
---|
1729 | | - state.pages = idb_pages; |
---|
1730 | | - state.pages[0] = (req->indirect_dma_addr & |
---|
1731 | | - dev->mr_page_mask); |
---|
1732 | | - state.npages = 1; |
---|
1733 | | - ret = srp_map_finish_fmr(&state, ch); |
---|
1734 | | - if (ret < 0) |
---|
1735 | | - return ret; |
---|
1736 | 1589 | } else { |
---|
1737 | 1590 | return -EINVAL; |
---|
1738 | 1591 | } |
---|
.. | .. |
---|
1756 | 1609 | if (dev->use_fast_reg) |
---|
1757 | 1610 | for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++) |
---|
1758 | 1611 | mr_len += (*pfr)->mr->length; |
---|
1759 | | - else if (dev->use_fmr) |
---|
1760 | | - for (i = 0; i < state->nmdesc; i++) |
---|
1761 | | - mr_len += be32_to_cpu(req->indirect_desc[i].len); |
---|
1762 | 1612 | if (desc_len != scsi_bufflen(req->scmnd) || |
---|
1763 | 1613 | mr_len > scsi_bufflen(req->scmnd)) |
---|
1764 | 1614 | pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n", |
---|
.. | .. |
---|
1773 | 1623 | * @req: SRP request |
---|
1774 | 1624 | * |
---|
1775 | 1625 | * Returns the length in bytes of the SRP_CMD IU or a negative value if |
---|
1776 | | - * mapping failed. |
---|
| 1626 | + * mapping failed. The size of any immediate data is not included in the |
---|
| 1627 | + * return value. |
---|
1777 | 1628 | */ |
---|
1778 | 1629 | static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, |
---|
1779 | 1630 | struct srp_request *req) |
---|
1780 | 1631 | { |
---|
1781 | 1632 | struct srp_target_port *target = ch->target; |
---|
1782 | | - struct scatterlist *scat; |
---|
| 1633 | + struct scatterlist *scat, *sg; |
---|
1783 | 1634 | struct srp_cmd *cmd = req->cmd->buf; |
---|
1784 | | - int len, nents, count, ret; |
---|
| 1635 | + int i, len, nents, count, ret; |
---|
1785 | 1636 | struct srp_device *dev; |
---|
1786 | 1637 | struct ib_device *ibdev; |
---|
1787 | 1638 | struct srp_map_state state; |
---|
1788 | 1639 | struct srp_indirect_buf *indirect_hdr; |
---|
| 1640 | + u64 data_len; |
---|
1789 | 1641 | u32 idb_len, table_len; |
---|
1790 | 1642 | __be32 idb_rkey; |
---|
1791 | 1643 | u8 fmt; |
---|
1792 | 1644 | |
---|
| 1645 | + req->cmd->num_sge = 1; |
---|
| 1646 | + |
---|
1793 | 1647 | if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) |
---|
1794 | | - return sizeof (struct srp_cmd); |
---|
| 1648 | + return sizeof(struct srp_cmd) + cmd->add_cdb_len; |
---|
1795 | 1649 | |
---|
1796 | 1650 | if (scmnd->sc_data_direction != DMA_FROM_DEVICE && |
---|
1797 | 1651 | scmnd->sc_data_direction != DMA_TO_DEVICE) { |
---|
.. | .. |
---|
1803 | 1657 | |
---|
1804 | 1658 | nents = scsi_sg_count(scmnd); |
---|
1805 | 1659 | scat = scsi_sglist(scmnd); |
---|
| 1660 | + data_len = scsi_bufflen(scmnd); |
---|
1806 | 1661 | |
---|
1807 | 1662 | dev = target->srp_host->srp_dev; |
---|
1808 | 1663 | ibdev = dev->dev; |
---|
.. | .. |
---|
1811 | 1666 | if (unlikely(count == 0)) |
---|
1812 | 1667 | return -EIO; |
---|
1813 | 1668 | |
---|
| 1669 | + if (ch->use_imm_data && |
---|
| 1670 | + count <= ch->max_imm_sge && |
---|
| 1671 | + SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len && |
---|
| 1672 | + scmnd->sc_data_direction == DMA_TO_DEVICE) { |
---|
| 1673 | + struct srp_imm_buf *buf; |
---|
| 1674 | + struct ib_sge *sge = &req->cmd->sge[1]; |
---|
| 1675 | + |
---|
| 1676 | + fmt = SRP_DATA_DESC_IMM; |
---|
| 1677 | + len = SRP_IMM_DATA_OFFSET; |
---|
| 1678 | + req->nmdesc = 0; |
---|
| 1679 | + buf = (void *)cmd->add_data + cmd->add_cdb_len; |
---|
| 1680 | + buf->len = cpu_to_be32(data_len); |
---|
| 1681 | + WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len); |
---|
| 1682 | + for_each_sg(scat, sg, count, i) { |
---|
| 1683 | + sge[i].addr = sg_dma_address(sg); |
---|
| 1684 | + sge[i].length = sg_dma_len(sg); |
---|
| 1685 | + sge[i].lkey = target->lkey; |
---|
| 1686 | + } |
---|
| 1687 | + req->cmd->num_sge += count; |
---|
| 1688 | + goto map_complete; |
---|
| 1689 | + } |
---|
| 1690 | + |
---|
1814 | 1691 | fmt = SRP_DATA_DESC_DIRECT; |
---|
1815 | | - len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); |
---|
| 1692 | + len = sizeof(struct srp_cmd) + cmd->add_cdb_len + |
---|
| 1693 | + sizeof(struct srp_direct_buf); |
---|
1816 | 1694 | |
---|
1817 | 1695 | if (count == 1 && target->global_rkey) { |
---|
1818 | 1696 | /* |
---|
.. | .. |
---|
1821 | 1699 | * single entry. So a direct descriptor along with |
---|
1822 | 1700 | * the DMA MR suffices. |
---|
1823 | 1701 | */ |
---|
1824 | | - struct srp_direct_buf *buf = (void *) cmd->add_data; |
---|
| 1702 | + struct srp_direct_buf *buf; |
---|
1825 | 1703 | |
---|
1826 | | - buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); |
---|
| 1704 | + buf = (void *)cmd->add_data + cmd->add_cdb_len; |
---|
| 1705 | + buf->va = cpu_to_be64(sg_dma_address(scat)); |
---|
1827 | 1706 | buf->key = cpu_to_be32(target->global_rkey); |
---|
1828 | | - buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); |
---|
| 1707 | + buf->len = cpu_to_be32(sg_dma_len(scat)); |
---|
1829 | 1708 | |
---|
1830 | 1709 | req->nmdesc = 0; |
---|
1831 | 1710 | goto map_complete; |
---|
.. | .. |
---|
1835 | 1714 | * We have more than one scatter/gather entry, so build our indirect |
---|
1836 | 1715 | * descriptor table, trying to merge as many entries as we can. |
---|
1837 | 1716 | */ |
---|
1838 | | - indirect_hdr = (void *) cmd->add_data; |
---|
| 1717 | + indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len; |
---|
1839 | 1718 | |
---|
1840 | 1719 | ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr, |
---|
1841 | 1720 | target->indirect_size, DMA_TO_DEVICE); |
---|
.. | .. |
---|
1844 | 1723 | state.desc = req->indirect_desc; |
---|
1845 | 1724 | if (dev->use_fast_reg) |
---|
1846 | 1725 | ret = srp_map_sg_fr(&state, ch, req, scat, count); |
---|
1847 | | - else if (dev->use_fmr) |
---|
1848 | | - ret = srp_map_sg_fmr(&state, ch, req, scat, count); |
---|
1849 | 1726 | else |
---|
1850 | 1727 | ret = srp_map_sg_dma(&state, ch, req, scat, count); |
---|
1851 | 1728 | req->nmdesc = state.nmdesc; |
---|
.. | .. |
---|
1870 | 1747 | * Memory registration collapsed the sg-list into one entry, |
---|
1871 | 1748 | * so use a direct descriptor. |
---|
1872 | 1749 | */ |
---|
1873 | | - struct srp_direct_buf *buf = (void *) cmd->add_data; |
---|
| 1750 | + struct srp_direct_buf *buf; |
---|
1874 | 1751 | |
---|
| 1752 | + buf = (void *)cmd->add_data + cmd->add_cdb_len; |
---|
1875 | 1753 | *buf = req->indirect_desc[0]; |
---|
1876 | 1754 | goto map_complete; |
---|
1877 | 1755 | } |
---|
.. | .. |
---|
1889 | 1767 | idb_len = sizeof(struct srp_indirect_buf) + table_len; |
---|
1890 | 1768 | |
---|
1891 | 1769 | fmt = SRP_DATA_DESC_INDIRECT; |
---|
1892 | | - len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf); |
---|
| 1770 | + len = sizeof(struct srp_cmd) + cmd->add_cdb_len + |
---|
| 1771 | + sizeof(struct srp_indirect_buf); |
---|
1893 | 1772 | len += count * sizeof (struct srp_direct_buf); |
---|
1894 | 1773 | |
---|
1895 | 1774 | memcpy(indirect_hdr->desc_list, req->indirect_desc, |
---|
.. | .. |
---|
2010 | 1889 | list_add(&iu->list, &ch->free_tx); |
---|
2011 | 1890 | } |
---|
2012 | 1891 | |
---|
| 1892 | +/** |
---|
| 1893 | + * srp_post_send() - send an SRP information unit |
---|
| 1894 | + * @ch: RDMA channel over which to send the information unit. |
---|
| 1895 | + * @iu: Information unit to send. |
---|
| 1896 | + * @len: Length of the information unit excluding immediate data. |
---|
| 1897 | + */ |
---|
2013 | 1898 | static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len) |
---|
2014 | 1899 | { |
---|
2015 | 1900 | struct srp_target_port *target = ch->target; |
---|
2016 | | - struct ib_sge list; |
---|
2017 | 1901 | struct ib_send_wr wr; |
---|
2018 | 1902 | |
---|
2019 | | - list.addr = iu->dma; |
---|
2020 | | - list.length = len; |
---|
2021 | | - list.lkey = target->lkey; |
---|
| 1903 | + if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE)) |
---|
| 1904 | + return -EINVAL; |
---|
| 1905 | + |
---|
| 1906 | + iu->sge[0].addr = iu->dma; |
---|
| 1907 | + iu->sge[0].length = len; |
---|
| 1908 | + iu->sge[0].lkey = target->lkey; |
---|
2022 | 1909 | |
---|
2023 | 1910 | iu->cqe.done = srp_send_done; |
---|
2024 | 1911 | |
---|
2025 | 1912 | wr.next = NULL; |
---|
2026 | 1913 | wr.wr_cqe = &iu->cqe; |
---|
2027 | | - wr.sg_list = &list; |
---|
2028 | | - wr.num_sge = 1; |
---|
| 1914 | + wr.sg_list = &iu->sge[0]; |
---|
| 1915 | + wr.num_sge = iu->num_sge; |
---|
2029 | 1916 | wr.opcode = IB_WR_SEND; |
---|
2030 | 1917 | wr.send_flags = IB_SEND_SIGNALED; |
---|
2031 | 1918 | |
---|
.. | .. |
---|
2138 | 2025 | return 1; |
---|
2139 | 2026 | } |
---|
2140 | 2027 | |
---|
| 2028 | + iu->num_sge = 1; |
---|
2141 | 2029 | ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE); |
---|
2142 | 2030 | memcpy(iu->buf, rsp, len); |
---|
2143 | 2031 | ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); |
---|
.. | .. |
---|
2279 | 2167 | static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) |
---|
2280 | 2168 | { |
---|
2281 | 2169 | struct srp_target_port *target = host_to_target(shost); |
---|
2282 | | - struct srp_rport *rport = target->rport; |
---|
2283 | 2170 | struct srp_rdma_ch *ch; |
---|
2284 | 2171 | struct srp_request *req; |
---|
2285 | 2172 | struct srp_iu *iu; |
---|
.. | .. |
---|
2289 | 2176 | u32 tag; |
---|
2290 | 2177 | u16 idx; |
---|
2291 | 2178 | int len, ret; |
---|
2292 | | - const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler; |
---|
2293 | | - |
---|
2294 | | - /* |
---|
2295 | | - * The SCSI EH thread is the only context from which srp_queuecommand() |
---|
2296 | | - * can get invoked for blocked devices (SDEV_BLOCK / |
---|
2297 | | - * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by |
---|
2298 | | - * locking the rport mutex if invoked from inside the SCSI EH. |
---|
2299 | | - */ |
---|
2300 | | - if (in_scsi_eh) |
---|
2301 | | - mutex_lock(&rport->mutex); |
---|
2302 | 2179 | |
---|
2303 | 2180 | scmnd->result = srp_chkready(target->rport); |
---|
2304 | 2181 | if (unlikely(scmnd->result)) |
---|
.. | .. |
---|
2321 | 2198 | |
---|
2322 | 2199 | req = &ch->req_ring[idx]; |
---|
2323 | 2200 | dev = target->srp_host->srp_dev->dev; |
---|
2324 | | - ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len, |
---|
| 2201 | + ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len, |
---|
2325 | 2202 | DMA_TO_DEVICE); |
---|
2326 | 2203 | |
---|
2327 | 2204 | scmnd->host_scribble = (void *) req; |
---|
.. | .. |
---|
2333 | 2210 | int_to_scsilun(scmnd->device->lun, &cmd->lun); |
---|
2334 | 2211 | cmd->tag = tag; |
---|
2335 | 2212 | memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); |
---|
| 2213 | + if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) { |
---|
| 2214 | + cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb), |
---|
| 2215 | + 4); |
---|
| 2216 | + if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN)) |
---|
| 2217 | + goto err_iu; |
---|
| 2218 | + } |
---|
2336 | 2219 | |
---|
2337 | 2220 | req->scmnd = scmnd; |
---|
2338 | 2221 | req->cmd = iu; |
---|
.. | .. |
---|
2352 | 2235 | goto err_iu; |
---|
2353 | 2236 | } |
---|
2354 | 2237 | |
---|
2355 | | - ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len, |
---|
| 2238 | + ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len, |
---|
2356 | 2239 | DMA_TO_DEVICE); |
---|
2357 | 2240 | |
---|
2358 | 2241 | if (srp_post_send(ch, iu, len)) { |
---|
.. | .. |
---|
2361 | 2244 | goto err_unmap; |
---|
2362 | 2245 | } |
---|
2363 | 2246 | |
---|
2364 | | - ret = 0; |
---|
2365 | | - |
---|
2366 | | -unlock_rport: |
---|
2367 | | - if (in_scsi_eh) |
---|
2368 | | - mutex_unlock(&rport->mutex); |
---|
2369 | | - |
---|
2370 | | - return ret; |
---|
| 2247 | + return 0; |
---|
2371 | 2248 | |
---|
2372 | 2249 | err_unmap: |
---|
2373 | 2250 | srp_unmap_data(scmnd, ch, req); |
---|
.. | .. |
---|
2389 | 2266 | ret = SCSI_MLQUEUE_HOST_BUSY; |
---|
2390 | 2267 | } |
---|
2391 | 2268 | |
---|
2392 | | - goto unlock_rport; |
---|
| 2269 | + return ret; |
---|
2393 | 2270 | } |
---|
2394 | 2271 | |
---|
2395 | 2272 | /* |
---|
.. | .. |
---|
2420 | 2297 | |
---|
2421 | 2298 | for (i = 0; i < target->queue_size; ++i) { |
---|
2422 | 2299 | ch->tx_ring[i] = srp_alloc_iu(target->srp_host, |
---|
2423 | | - target->max_iu_len, |
---|
| 2300 | + ch->max_it_iu_len, |
---|
2424 | 2301 | GFP_KERNEL, DMA_TO_DEVICE); |
---|
2425 | 2302 | if (!ch->tx_ring[i]) |
---|
2426 | 2303 | goto err; |
---|
.. | .. |
---|
2486 | 2363 | if (lrsp->opcode == SRP_LOGIN_RSP) { |
---|
2487 | 2364 | ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len); |
---|
2488 | 2365 | ch->req_lim = be32_to_cpu(lrsp->req_lim_delta); |
---|
| 2366 | + ch->use_imm_data = srp_use_imm_data && |
---|
| 2367 | + (lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP); |
---|
| 2368 | + ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, |
---|
| 2369 | + ch->use_imm_data, |
---|
| 2370 | + target->max_it_iu_size); |
---|
| 2371 | + WARN_ON_ONCE(ch->max_it_iu_len > |
---|
| 2372 | + be32_to_cpu(lrsp->max_it_iu_len)); |
---|
| 2373 | + |
---|
| 2374 | + if (ch->use_imm_data) |
---|
| 2375 | + shost_printk(KERN_DEBUG, target->scsi_host, |
---|
| 2376 | + PFX "using immediate data\n"); |
---|
2489 | 2377 | |
---|
2490 | 2378 | /* |
---|
2491 | 2379 | * Reserve credits for task management so we don't |
---|
.. | .. |
---|
2874 | 2762 | return -1; |
---|
2875 | 2763 | } |
---|
2876 | 2764 | |
---|
| 2765 | + iu->num_sge = 1; |
---|
| 2766 | + |
---|
2877 | 2767 | ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, |
---|
2878 | 2768 | DMA_TO_DEVICE); |
---|
2879 | 2769 | tsk_mgmt = iu->buf; |
---|
.. | .. |
---|
2982 | 2872 | |
---|
2983 | 2873 | if (target->target_can_queue) |
---|
2984 | 2874 | starget->can_queue = target->target_can_queue; |
---|
2985 | | - return 0; |
---|
2986 | | -} |
---|
2987 | | - |
---|
2988 | | -static int srp_slave_alloc(struct scsi_device *sdev) |
---|
2989 | | -{ |
---|
2990 | | - struct Scsi_Host *shost = sdev->host; |
---|
2991 | | - struct srp_target_port *target = host_to_target(shost); |
---|
2992 | | - struct srp_device *srp_dev = target->srp_host->srp_dev; |
---|
2993 | | - struct ib_device *ibdev = srp_dev->dev; |
---|
2994 | | - |
---|
2995 | | - if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)) |
---|
2996 | | - blk_queue_virt_boundary(sdev->request_queue, |
---|
2997 | | - ~srp_dev->mr_page_mask); |
---|
2998 | | - |
---|
2999 | 2875 | return 0; |
---|
3000 | 2876 | } |
---|
3001 | 2877 | |
---|
.. | .. |
---|
3115 | 2991 | { |
---|
3116 | 2992 | struct srp_target_port *target = host_to_target(class_to_shost(dev)); |
---|
3117 | 2993 | |
---|
3118 | | - return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name); |
---|
| 2994 | + return sprintf(buf, "%s\n", |
---|
| 2995 | + dev_name(&target->srp_host->srp_dev->dev->dev)); |
---|
3119 | 2996 | } |
---|
3120 | 2997 | |
---|
3121 | 2998 | static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr, |
---|
.. | .. |
---|
3200 | 3077 | .name = "InfiniBand SRP initiator", |
---|
3201 | 3078 | .proc_name = DRV_NAME, |
---|
3202 | 3079 | .target_alloc = srp_target_alloc, |
---|
3203 | | - .slave_alloc = srp_slave_alloc, |
---|
3204 | 3080 | .slave_configure = srp_slave_configure, |
---|
3205 | 3081 | .info = srp_target_info, |
---|
3206 | 3082 | .queuecommand = srp_queuecommand, |
---|
.. | .. |
---|
3214 | 3090 | .can_queue = SRP_DEFAULT_CMD_SQ_SIZE, |
---|
3215 | 3091 | .this_id = -1, |
---|
3216 | 3092 | .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE, |
---|
3217 | | - .use_clustering = ENABLE_CLUSTERING, |
---|
3218 | 3093 | .shost_attrs = srp_host_attrs, |
---|
3219 | 3094 | .track_queue_depth = 1, |
---|
3220 | 3095 | }; |
---|
.. | .. |
---|
3365 | 3240 | SRP_OPT_IP_SRC = 1 << 15, |
---|
3366 | 3241 | SRP_OPT_IP_DEST = 1 << 16, |
---|
3367 | 3242 | SRP_OPT_TARGET_CAN_QUEUE= 1 << 17, |
---|
| 3243 | + SRP_OPT_MAX_IT_IU_SIZE = 1 << 18, |
---|
| 3244 | + SRP_OPT_CH_COUNT = 1 << 19, |
---|
3368 | 3245 | }; |
---|
3369 | 3246 | |
---|
3370 | 3247 | static unsigned int srp_opt_mandatory[] = { |
---|
.. | .. |
---|
3397 | 3274 | { SRP_OPT_QUEUE_SIZE, "queue_size=%d" }, |
---|
3398 | 3275 | { SRP_OPT_IP_SRC, "src=%s" }, |
---|
3399 | 3276 | { SRP_OPT_IP_DEST, "dest=%s" }, |
---|
| 3277 | + { SRP_OPT_MAX_IT_IU_SIZE, "max_it_iu_size=%d" }, |
---|
| 3278 | + { SRP_OPT_CH_COUNT, "ch_count=%u", }, |
---|
3400 | 3279 | { SRP_OPT_ERR, NULL } |
---|
3401 | 3280 | }; |
---|
3402 | 3281 | |
---|
.. | .. |
---|
3690 | 3569 | target->tl_retry_count = token; |
---|
3691 | 3570 | break; |
---|
3692 | 3571 | |
---|
| 3572 | + case SRP_OPT_MAX_IT_IU_SIZE: |
---|
| 3573 | + if (match_int(args, &token) || token < 0) { |
---|
| 3574 | + pr_warn("bad maximum initiator to target IU size '%s'\n", p); |
---|
| 3575 | + goto out; |
---|
| 3576 | + } |
---|
| 3577 | + target->max_it_iu_size = token; |
---|
| 3578 | + break; |
---|
| 3579 | + |
---|
| 3580 | + case SRP_OPT_CH_COUNT: |
---|
| 3581 | + if (match_int(args, &token) || token < 1) { |
---|
| 3582 | + pr_warn("bad channel count %s\n", p); |
---|
| 3583 | + goto out; |
---|
| 3584 | + } |
---|
| 3585 | + target->ch_count = token; |
---|
| 3586 | + break; |
---|
| 3587 | + |
---|
3693 | 3588 | default: |
---|
3694 | 3589 | pr_warn("unknown parameter or missing value '%s' in target creation request\n", |
---|
3695 | 3590 | p); |
---|
.. | .. |
---|
3728 | 3623 | struct srp_rdma_ch *ch; |
---|
3729 | 3624 | struct srp_device *srp_dev = host->srp_dev; |
---|
3730 | 3625 | struct ib_device *ibdev = srp_dev->dev; |
---|
3731 | | - int ret, node_idx, node, cpu, i; |
---|
| 3626 | + int ret, i, ch_idx; |
---|
3732 | 3627 | unsigned int max_sectors_per_mr, mr_per_cmd = 0; |
---|
3733 | 3628 | bool multich = false; |
---|
| 3629 | + uint32_t max_iu_len; |
---|
3734 | 3630 | |
---|
3735 | 3631 | target_host = scsi_host_alloc(&srp_template, |
---|
3736 | 3632 | sizeof (struct srp_target_port)); |
---|
.. | .. |
---|
3742 | 3638 | target_host->max_id = 1; |
---|
3743 | 3639 | target_host->max_lun = -1LL; |
---|
3744 | 3640 | target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; |
---|
| 3641 | + target_host->max_segment_size = ib_dma_max_seg_size(ibdev); |
---|
| 3642 | + |
---|
| 3643 | + if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)) |
---|
| 3644 | + target_host->virt_boundary_mask = ~srp_dev->mr_page_mask; |
---|
3745 | 3645 | |
---|
3746 | 3646 | target = host_to_target(target_host); |
---|
3747 | 3647 | |
---|
.. | .. |
---|
3791 | 3691 | goto out; |
---|
3792 | 3692 | } |
---|
3793 | 3693 | |
---|
3794 | | - if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg && |
---|
| 3694 | + if (!srp_dev->has_fr && !target->allow_ext_sg && |
---|
3795 | 3695 | target->cmd_sg_cnt < target->sg_tablesize) { |
---|
3796 | 3696 | pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n"); |
---|
3797 | 3697 | target->sg_tablesize = target->cmd_sg_cnt; |
---|
3798 | 3698 | } |
---|
3799 | 3699 | |
---|
3800 | | - if (srp_dev->use_fast_reg || srp_dev->use_fmr) { |
---|
| 3700 | + if (srp_dev->use_fast_reg) { |
---|
3801 | 3701 | bool gaps_reg = (ibdev->attrs.device_cap_flags & |
---|
3802 | 3702 | IB_DEVICE_SG_GAPS_REG); |
---|
3803 | 3703 | |
---|
.. | .. |
---|
3805 | 3705 | (ilog2(srp_dev->mr_page_size) - 9); |
---|
3806 | 3706 | if (!gaps_reg) { |
---|
3807 | 3707 | /* |
---|
3808 | | - * FR and FMR can only map one HCA page per entry. If |
---|
3809 | | - * the start address is not aligned on a HCA page |
---|
3810 | | - * boundary two entries will be used for the head and |
---|
3811 | | - * the tail although these two entries combined |
---|
3812 | | - * contain at most one HCA page of data. Hence the "+ |
---|
3813 | | - * 1" in the calculation below. |
---|
| 3708 | + * FR can only map one HCA page per entry. If the start |
---|
| 3709 | + * address is not aligned on a HCA page boundary two |
---|
| 3710 | + * entries will be used for the head and the tail |
---|
| 3711 | + * although these two entries combined contain at most |
---|
| 3712 | + * one HCA page of data. Hence the "+ 1" in the |
---|
| 3713 | + * calculation below. |
---|
3814 | 3714 | * |
---|
3815 | 3715 | * The indirect data buffer descriptor is contiguous |
---|
3816 | 3716 | * so the memory for that buffer will only be |
---|
.. | .. |
---|
3836 | 3736 | target->mr_per_cmd = mr_per_cmd; |
---|
3837 | 3737 | target->indirect_size = target->sg_tablesize * |
---|
3838 | 3738 | sizeof (struct srp_direct_buf); |
---|
3839 | | - target->max_iu_len = sizeof (struct srp_cmd) + |
---|
3840 | | - sizeof (struct srp_indirect_buf) + |
---|
3841 | | - target->cmd_sg_cnt * sizeof (struct srp_direct_buf); |
---|
| 3739 | + max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, |
---|
| 3740 | + srp_use_imm_data, |
---|
| 3741 | + target->max_it_iu_size); |
---|
3842 | 3742 | |
---|
3843 | 3743 | INIT_WORK(&target->tl_err_work, srp_tl_err_work); |
---|
3844 | 3744 | INIT_WORK(&target->remove_work, srp_remove_work); |
---|
.. | .. |
---|
3848 | 3748 | goto out; |
---|
3849 | 3749 | |
---|
3850 | 3750 | ret = -ENOMEM; |
---|
3851 | | - target->ch_count = max_t(unsigned, num_online_nodes(), |
---|
3852 | | - min(ch_count ? : |
---|
3853 | | - min(4 * num_online_nodes(), |
---|
3854 | | - ibdev->num_comp_vectors), |
---|
3855 | | - num_online_cpus())); |
---|
| 3751 | + if (target->ch_count == 0) { |
---|
| 3752 | + target->ch_count = |
---|
| 3753 | + min(ch_count ?: |
---|
| 3754 | + max(4 * num_online_nodes(), |
---|
| 3755 | + ibdev->num_comp_vectors), |
---|
| 3756 | + num_online_cpus()); |
---|
| 3757 | + } |
---|
| 3758 | + |
---|
3856 | 3759 | target->ch = kcalloc(target->ch_count, sizeof(*target->ch), |
---|
3857 | 3760 | GFP_KERNEL); |
---|
3858 | 3761 | if (!target->ch) |
---|
3859 | 3762 | goto out; |
---|
3860 | 3763 | |
---|
3861 | | - node_idx = 0; |
---|
3862 | | - for_each_online_node(node) { |
---|
3863 | | - const int ch_start = (node_idx * target->ch_count / |
---|
3864 | | - num_online_nodes()); |
---|
3865 | | - const int ch_end = ((node_idx + 1) * target->ch_count / |
---|
3866 | | - num_online_nodes()); |
---|
3867 | | - const int cv_start = node_idx * ibdev->num_comp_vectors / |
---|
3868 | | - num_online_nodes(); |
---|
3869 | | - const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors / |
---|
3870 | | - num_online_nodes(); |
---|
3871 | | - int cpu_idx = 0; |
---|
| 3764 | + for (ch_idx = 0; ch_idx < target->ch_count; ++ch_idx) { |
---|
| 3765 | + ch = &target->ch[ch_idx]; |
---|
| 3766 | + ch->target = target; |
---|
| 3767 | + ch->comp_vector = ch_idx % ibdev->num_comp_vectors; |
---|
| 3768 | + spin_lock_init(&ch->lock); |
---|
| 3769 | + INIT_LIST_HEAD(&ch->free_tx); |
---|
| 3770 | + ret = srp_new_cm_id(ch); |
---|
| 3771 | + if (ret) |
---|
| 3772 | + goto err_disconnect; |
---|
3872 | 3773 | |
---|
3873 | | - for_each_online_cpu(cpu) { |
---|
3874 | | - if (cpu_to_node(cpu) != node) |
---|
3875 | | - continue; |
---|
3876 | | - if (ch_start + cpu_idx >= ch_end) |
---|
3877 | | - continue; |
---|
3878 | | - ch = &target->ch[ch_start + cpu_idx]; |
---|
3879 | | - ch->target = target; |
---|
3880 | | - ch->comp_vector = cv_start == cv_end ? cv_start : |
---|
3881 | | - cv_start + cpu_idx % (cv_end - cv_start); |
---|
3882 | | - spin_lock_init(&ch->lock); |
---|
3883 | | - INIT_LIST_HEAD(&ch->free_tx); |
---|
3884 | | - ret = srp_new_cm_id(ch); |
---|
3885 | | - if (ret) |
---|
3886 | | - goto err_disconnect; |
---|
| 3774 | + ret = srp_create_ch_ib(ch); |
---|
| 3775 | + if (ret) |
---|
| 3776 | + goto err_disconnect; |
---|
3887 | 3777 | |
---|
3888 | | - ret = srp_create_ch_ib(ch); |
---|
3889 | | - if (ret) |
---|
3890 | | - goto err_disconnect; |
---|
| 3778 | + ret = srp_alloc_req_data(ch); |
---|
| 3779 | + if (ret) |
---|
| 3780 | + goto err_disconnect; |
---|
3891 | 3781 | |
---|
3892 | | - ret = srp_alloc_req_data(ch); |
---|
3893 | | - if (ret) |
---|
3894 | | - goto err_disconnect; |
---|
| 3782 | + ret = srp_connect_ch(ch, max_iu_len, multich); |
---|
| 3783 | + if (ret) { |
---|
| 3784 | + char dst[64]; |
---|
3895 | 3785 | |
---|
3896 | | - ret = srp_connect_ch(ch, multich); |
---|
3897 | | - if (ret) { |
---|
3898 | | - char dst[64]; |
---|
3899 | | - |
---|
3900 | | - if (target->using_rdma_cm) |
---|
3901 | | - snprintf(dst, sizeof(dst), "%pIS", |
---|
3902 | | - &target->rdma_cm.dst); |
---|
3903 | | - else |
---|
3904 | | - snprintf(dst, sizeof(dst), "%pI6", |
---|
3905 | | - target->ib_cm.orig_dgid.raw); |
---|
3906 | | - shost_printk(KERN_ERR, target->scsi_host, |
---|
3907 | | - PFX "Connection %d/%d to %s failed\n", |
---|
3908 | | - ch_start + cpu_idx, |
---|
3909 | | - target->ch_count, dst); |
---|
3910 | | - if (node_idx == 0 && cpu_idx == 0) { |
---|
3911 | | - goto free_ch; |
---|
3912 | | - } else { |
---|
3913 | | - srp_free_ch_ib(target, ch); |
---|
3914 | | - srp_free_req_data(target, ch); |
---|
3915 | | - target->ch_count = ch - target->ch; |
---|
3916 | | - goto connected; |
---|
3917 | | - } |
---|
| 3786 | + if (target->using_rdma_cm) |
---|
| 3787 | + snprintf(dst, sizeof(dst), "%pIS", |
---|
| 3788 | + &target->rdma_cm.dst); |
---|
| 3789 | + else |
---|
| 3790 | + snprintf(dst, sizeof(dst), "%pI6", |
---|
| 3791 | + target->ib_cm.orig_dgid.raw); |
---|
| 3792 | + shost_printk(KERN_ERR, target->scsi_host, |
---|
| 3793 | + PFX "Connection %d/%d to %s failed\n", |
---|
| 3794 | + ch_idx, |
---|
| 3795 | + target->ch_count, dst); |
---|
| 3796 | + if (ch_idx == 0) { |
---|
| 3797 | + goto free_ch; |
---|
| 3798 | + } else { |
---|
| 3799 | + srp_free_ch_ib(target, ch); |
---|
| 3800 | + srp_free_req_data(target, ch); |
---|
| 3801 | + target->ch_count = ch - target->ch; |
---|
| 3802 | + goto connected; |
---|
3918 | 3803 | } |
---|
3919 | | - |
---|
3920 | | - multich = true; |
---|
3921 | | - cpu_idx++; |
---|
3922 | 3804 | } |
---|
3923 | | - node_idx++; |
---|
| 3805 | + multich = true; |
---|
3924 | 3806 | } |
---|
3925 | 3807 | |
---|
3926 | 3808 | connected: |
---|
.. | .. |
---|
3990 | 3872 | { |
---|
3991 | 3873 | struct srp_host *host = container_of(dev, struct srp_host, dev); |
---|
3992 | 3874 | |
---|
3993 | | - return sprintf(buf, "%s\n", host->srp_dev->dev->name); |
---|
| 3875 | + return sprintf(buf, "%s\n", dev_name(&host->srp_dev->dev->dev)); |
---|
3994 | 3876 | } |
---|
3995 | 3877 | |
---|
3996 | 3878 | static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); |
---|
.. | .. |
---|
4022 | 3904 | |
---|
4023 | 3905 | host->dev.class = &srp_class; |
---|
4024 | 3906 | host->dev.parent = device->dev->dev.parent; |
---|
4025 | | - dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port); |
---|
| 3907 | + dev_set_name(&host->dev, "srp-%s-%d", dev_name(&device->dev->dev), |
---|
| 3908 | + port); |
---|
4026 | 3909 | |
---|
4027 | 3910 | if (device_register(&host->dev)) |
---|
4028 | 3911 | goto free_host; |
---|
.. | .. |
---|
4044 | 3927 | return NULL; |
---|
4045 | 3928 | } |
---|
4046 | 3929 | |
---|
4047 | | -static void srp_add_one(struct ib_device *device) |
---|
| 3930 | +static void srp_rename_dev(struct ib_device *device, void *client_data) |
---|
| 3931 | +{ |
---|
| 3932 | + struct srp_device *srp_dev = client_data; |
---|
| 3933 | + struct srp_host *host, *tmp_host; |
---|
| 3934 | + |
---|
| 3935 | + list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { |
---|
| 3936 | + char name[IB_DEVICE_NAME_MAX + 8]; |
---|
| 3937 | + |
---|
| 3938 | + snprintf(name, sizeof(name), "srp-%s-%d", |
---|
| 3939 | + dev_name(&device->dev), host->port); |
---|
| 3940 | + device_rename(&host->dev, name); |
---|
| 3941 | + } |
---|
| 3942 | +} |
---|
| 3943 | + |
---|
| 3944 | +static int srp_add_one(struct ib_device *device) |
---|
4048 | 3945 | { |
---|
4049 | 3946 | struct srp_device *srp_dev; |
---|
4050 | 3947 | struct ib_device_attr *attr = &device->attrs; |
---|
4051 | 3948 | struct srp_host *host; |
---|
4052 | | - int mr_page_shift, p; |
---|
| 3949 | + int mr_page_shift; |
---|
| 3950 | + unsigned int p; |
---|
4053 | 3951 | u64 max_pages_per_mr; |
---|
4054 | 3952 | unsigned int flags = 0; |
---|
4055 | 3953 | |
---|
4056 | 3954 | srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL); |
---|
4057 | 3955 | if (!srp_dev) |
---|
4058 | | - return; |
---|
| 3956 | + return -ENOMEM; |
---|
4059 | 3957 | |
---|
4060 | 3958 | /* |
---|
4061 | 3959 | * Use the smallest page size supported by the HCA, down to a |
---|
.. | .. |
---|
4073 | 3971 | srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR, |
---|
4074 | 3972 | max_pages_per_mr); |
---|
4075 | 3973 | |
---|
4076 | | - srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr && |
---|
4077 | | - device->map_phys_fmr && device->unmap_fmr); |
---|
4078 | 3974 | srp_dev->has_fr = (attr->device_cap_flags & |
---|
4079 | 3975 | IB_DEVICE_MEM_MGT_EXTENSIONS); |
---|
4080 | | - if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) { |
---|
4081 | | - dev_warn(&device->dev, "neither FMR nor FR is supported\n"); |
---|
4082 | | - } else if (!never_register && |
---|
4083 | | - attr->max_mr_size >= 2 * srp_dev->mr_page_size) { |
---|
4084 | | - srp_dev->use_fast_reg = (srp_dev->has_fr && |
---|
4085 | | - (!srp_dev->has_fmr || prefer_fr)); |
---|
4086 | | - srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr; |
---|
4087 | | - } |
---|
| 3976 | + if (!never_register && !srp_dev->has_fr) |
---|
| 3977 | + dev_warn(&device->dev, "FR is not supported\n"); |
---|
| 3978 | + else if (!never_register && |
---|
| 3979 | + attr->max_mr_size >= 2 * srp_dev->mr_page_size) |
---|
| 3980 | + srp_dev->use_fast_reg = srp_dev->has_fr; |
---|
4088 | 3981 | |
---|
4089 | | - if (never_register || !register_always || |
---|
4090 | | - (!srp_dev->has_fmr && !srp_dev->has_fr)) |
---|
| 3982 | + if (never_register || !register_always || !srp_dev->has_fr) |
---|
4091 | 3983 | flags |= IB_PD_UNSAFE_GLOBAL_RKEY; |
---|
4092 | 3984 | |
---|
4093 | 3985 | if (srp_dev->use_fast_reg) { |
---|
.. | .. |
---|
4098 | 3990 | srp_dev->mr_max_size = srp_dev->mr_page_size * |
---|
4099 | 3991 | srp_dev->max_pages_per_mr; |
---|
4100 | 3992 | pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n", |
---|
4101 | | - device->name, mr_page_shift, attr->max_mr_size, |
---|
| 3993 | + dev_name(&device->dev), mr_page_shift, attr->max_mr_size, |
---|
4102 | 3994 | attr->max_fast_reg_page_list_len, |
---|
4103 | 3995 | srp_dev->max_pages_per_mr, srp_dev->mr_max_size); |
---|
4104 | 3996 | |
---|
.. | .. |
---|
4106 | 3998 | |
---|
4107 | 3999 | srp_dev->dev = device; |
---|
4108 | 4000 | srp_dev->pd = ib_alloc_pd(device, flags); |
---|
4109 | | - if (IS_ERR(srp_dev->pd)) |
---|
4110 | | - goto free_dev; |
---|
| 4001 | + if (IS_ERR(srp_dev->pd)) { |
---|
| 4002 | + int ret = PTR_ERR(srp_dev->pd); |
---|
| 4003 | + |
---|
| 4004 | + kfree(srp_dev); |
---|
| 4005 | + return ret; |
---|
| 4006 | + } |
---|
4111 | 4007 | |
---|
4112 | 4008 | if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) { |
---|
4113 | 4009 | srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey; |
---|
4114 | 4010 | WARN_ON_ONCE(srp_dev->global_rkey == 0); |
---|
4115 | 4011 | } |
---|
4116 | 4012 | |
---|
4117 | | - for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { |
---|
| 4013 | + rdma_for_each_port (device, p) { |
---|
4118 | 4014 | host = srp_add_port(srp_dev, p); |
---|
4119 | 4015 | if (host) |
---|
4120 | 4016 | list_add_tail(&host->list, &srp_dev->dev_list); |
---|
4121 | 4017 | } |
---|
4122 | 4018 | |
---|
4123 | 4019 | ib_set_client_data(device, &srp_client, srp_dev); |
---|
4124 | | - return; |
---|
4125 | | - |
---|
4126 | | -free_dev: |
---|
4127 | | - kfree(srp_dev); |
---|
| 4020 | + return 0; |
---|
4128 | 4021 | } |
---|
4129 | 4022 | |
---|
4130 | 4023 | static void srp_remove_one(struct ib_device *device, void *client_data) |
---|
.. | .. |
---|
4134 | 4027 | struct srp_target_port *target; |
---|
4135 | 4028 | |
---|
4136 | 4029 | srp_dev = client_data; |
---|
4137 | | - if (!srp_dev) |
---|
4138 | | - return; |
---|
4139 | 4030 | |
---|
4140 | 4031 | list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { |
---|
4141 | 4032 | device_unregister(&host->dev); |
---|
.. | .. |
---|
4184 | 4075 | { |
---|
4185 | 4076 | int ret; |
---|
4186 | 4077 | |
---|
| 4078 | + BUILD_BUG_ON(sizeof(struct srp_imm_buf) != 4); |
---|
| 4079 | + BUILD_BUG_ON(sizeof(struct srp_login_req) != 64); |
---|
| 4080 | + BUILD_BUG_ON(sizeof(struct srp_login_req_rdma) != 56); |
---|
| 4081 | + BUILD_BUG_ON(sizeof(struct srp_cmd) != 48); |
---|
| 4082 | + |
---|
4187 | 4083 | if (srp_sg_tablesize) { |
---|
4188 | 4084 | pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n"); |
---|
4189 | 4085 | if (!cmd_sg_entries) |
---|