hc
2024-05-14 bedbef8ad3e75a304af6361af235302bcc61d06b
kernel/drivers/infiniband/ulp/iser/iser_verbs.c
....@@ -55,261 +55,50 @@
5555 {
5656 iser_err("async event %s (%d) on device %s port %d\n",
5757 ib_event_msg(event->event), event->event,
58
- event->device->name, event->element.port_num);
58
+ dev_name(&event->device->dev), event->element.port_num);
5959 }
6060
61
-/**
61
+/*
6262 * iser_create_device_ib_res - creates Protection Domain (PD), Completion
6363 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
64
- * the adapator.
64
+ * the adaptor.
6565 *
66
- * returns 0 on success, -1 on failure
66
+ * Return: 0 on success, -1 on failure
6767 */
6868 static int iser_create_device_ib_res(struct iser_device *device)
6969 {
7070 struct ib_device *ib_dev = device->ib_device;
71
- int ret, i, max_cqe;
7271
73
- ret = iser_assign_reg_ops(device);
74
- if (ret)
75
- return ret;
76
-
77
- device->comps_used = min_t(int, num_online_cpus(),
78
- ib_dev->num_comp_vectors);
79
-
80
- device->comps = kcalloc(device->comps_used, sizeof(*device->comps),
81
- GFP_KERNEL);
82
- if (!device->comps)
83
- goto comps_err;
84
-
85
- max_cqe = min(ISER_MAX_CQ_LEN, ib_dev->attrs.max_cqe);
86
-
87
- iser_info("using %d CQs, device %s supports %d vectors max_cqe %d\n",
88
- device->comps_used, ib_dev->name,
89
- ib_dev->num_comp_vectors, max_cqe);
72
+ if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) {
73
+ iser_err("IB device does not support memory registrations\n");
74
+ return -1;
75
+ }
9076
9177 device->pd = ib_alloc_pd(ib_dev,
9278 iser_always_reg ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
9379 if (IS_ERR(device->pd))
9480 goto pd_err;
9581
96
- for (i = 0; i < device->comps_used; i++) {
97
- struct iser_comp *comp = &device->comps[i];
98
-
99
- comp->cq = ib_alloc_cq(ib_dev, comp, max_cqe, i,
100
- IB_POLL_SOFTIRQ);
101
- if (IS_ERR(comp->cq)) {
102
- comp->cq = NULL;
103
- goto cq_err;
104
- }
105
- }
106
-
10782 INIT_IB_EVENT_HANDLER(&device->event_handler, ib_dev,
10883 iser_event_handler);
10984 ib_register_event_handler(&device->event_handler);
11085 return 0;
11186
112
-cq_err:
113
- for (i = 0; i < device->comps_used; i++) {
114
- struct iser_comp *comp = &device->comps[i];
115
-
116
- if (comp->cq)
117
- ib_free_cq(comp->cq);
118
- }
119
- ib_dealloc_pd(device->pd);
12087 pd_err:
121
- kfree(device->comps);
122
-comps_err:
12388 iser_err("failed to allocate an IB resource\n");
12489 return -1;
12590 }
12691
127
-/**
92
+/*
12893 * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
129
- * CQ and PD created with the device associated with the adapator.
94
+ * CQ and PD created with the device associated with the adaptor.
13095 */
13196 static void iser_free_device_ib_res(struct iser_device *device)
13297 {
133
- int i;
134
-
135
- for (i = 0; i < device->comps_used; i++) {
136
- struct iser_comp *comp = &device->comps[i];
137
-
138
- ib_free_cq(comp->cq);
139
- comp->cq = NULL;
140
- }
141
-
14298 ib_unregister_event_handler(&device->event_handler);
14399 ib_dealloc_pd(device->pd);
144100
145
- kfree(device->comps);
146
- device->comps = NULL;
147101 device->pd = NULL;
148
-}
149
-
150
-/**
151
- * iser_alloc_fmr_pool - Creates FMR pool and page_vector
152
- *
153
- * returns 0 on success, or errno code on failure
154
- */
155
-int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
156
- unsigned cmds_max,
157
- unsigned int size)
158
-{
159
- struct iser_device *device = ib_conn->device;
160
- struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
161
- struct iser_page_vec *page_vec;
162
- struct iser_fr_desc *desc;
163
- struct ib_fmr_pool *fmr_pool;
164
- struct ib_fmr_pool_param params;
165
- int ret;
166
-
167
- INIT_LIST_HEAD(&fr_pool->list);
168
- spin_lock_init(&fr_pool->lock);
169
-
170
- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
171
- if (!desc)
172
- return -ENOMEM;
173
-
174
- page_vec = kmalloc(sizeof(*page_vec) + (sizeof(u64) * size),
175
- GFP_KERNEL);
176
- if (!page_vec) {
177
- ret = -ENOMEM;
178
- goto err_frpl;
179
- }
180
-
181
- page_vec->pages = (u64 *)(page_vec + 1);
182
-
183
- params.page_shift = SHIFT_4K;
184
- params.max_pages_per_fmr = size;
185
- /* make the pool size twice the max number of SCSI commands *
186
- * the ML is expected to queue, watermark for unmap at 50% */
187
- params.pool_size = cmds_max * 2;
188
- params.dirty_watermark = cmds_max;
189
- params.cache = 0;
190
- params.flush_function = NULL;
191
- params.access = (IB_ACCESS_LOCAL_WRITE |
192
- IB_ACCESS_REMOTE_WRITE |
193
- IB_ACCESS_REMOTE_READ);
194
-
195
- fmr_pool = ib_create_fmr_pool(device->pd, &params);
196
- if (IS_ERR(fmr_pool)) {
197
- ret = PTR_ERR(fmr_pool);
198
- iser_err("FMR allocation failed, err %d\n", ret);
199
- goto err_fmr;
200
- }
201
-
202
- desc->rsc.page_vec = page_vec;
203
- desc->rsc.fmr_pool = fmr_pool;
204
- list_add(&desc->list, &fr_pool->list);
205
-
206
- return 0;
207
-
208
-err_fmr:
209
- kfree(page_vec);
210
-err_frpl:
211
- kfree(desc);
212
-
213
- return ret;
214
-}
215
-
216
-/**
217
- * iser_free_fmr_pool - releases the FMR pool and page vec
218
- */
219
-void iser_free_fmr_pool(struct ib_conn *ib_conn)
220
-{
221
- struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
222
- struct iser_fr_desc *desc;
223
-
224
- desc = list_first_entry(&fr_pool->list,
225
- struct iser_fr_desc, list);
226
- list_del(&desc->list);
227
-
228
- iser_info("freeing conn %p fmr pool %p\n",
229
- ib_conn, desc->rsc.fmr_pool);
230
-
231
- ib_destroy_fmr_pool(desc->rsc.fmr_pool);
232
- kfree(desc->rsc.page_vec);
233
- kfree(desc);
234
-}
235
-
236
-static int
237
-iser_alloc_reg_res(struct iser_device *device,
238
- struct ib_pd *pd,
239
- struct iser_reg_resources *res,
240
- unsigned int size)
241
-{
242
- struct ib_device *ib_dev = device->ib_device;
243
- enum ib_mr_type mr_type;
244
- int ret;
245
-
246
- if (ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
247
- mr_type = IB_MR_TYPE_SG_GAPS;
248
- else
249
- mr_type = IB_MR_TYPE_MEM_REG;
250
-
251
- res->mr = ib_alloc_mr(pd, mr_type, size);
252
- if (IS_ERR(res->mr)) {
253
- ret = PTR_ERR(res->mr);
254
- iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
255
- return ret;
256
- }
257
- res->mr_valid = 0;
258
-
259
- return 0;
260
-}
261
-
262
-static void
263
-iser_free_reg_res(struct iser_reg_resources *rsc)
264
-{
265
- ib_dereg_mr(rsc->mr);
266
-}
267
-
268
-static int
269
-iser_alloc_pi_ctx(struct iser_device *device,
270
- struct ib_pd *pd,
271
- struct iser_fr_desc *desc,
272
- unsigned int size)
273
-{
274
- struct iser_pi_context *pi_ctx = NULL;
275
- int ret;
276
-
277
- desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
278
- if (!desc->pi_ctx)
279
- return -ENOMEM;
280
-
281
- pi_ctx = desc->pi_ctx;
282
-
283
- ret = iser_alloc_reg_res(device, pd, &pi_ctx->rsc, size);
284
- if (ret) {
285
- iser_err("failed to allocate reg_resources\n");
286
- goto alloc_reg_res_err;
287
- }
288
-
289
- pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2);
290
- if (IS_ERR(pi_ctx->sig_mr)) {
291
- ret = PTR_ERR(pi_ctx->sig_mr);
292
- goto sig_mr_failure;
293
- }
294
- pi_ctx->sig_mr_valid = 0;
295
- desc->pi_ctx->sig_protected = 0;
296
-
297
- return 0;
298
-
299
-sig_mr_failure:
300
- iser_free_reg_res(&pi_ctx->rsc);
301
-alloc_reg_res_err:
302
- kfree(desc->pi_ctx);
303
-
304
- return ret;
305
-}
306
-
307
-static void
308
-iser_free_pi_ctx(struct iser_pi_context *pi_ctx)
309
-{
310
- iser_free_reg_res(&pi_ctx->rsc);
311
- ib_dereg_mr(pi_ctx->sig_mr);
312
- kfree(pi_ctx);
313102 }
314103
315104 static struct iser_fr_desc *
....@@ -319,36 +108,66 @@
319108 unsigned int size)
320109 {
321110 struct iser_fr_desc *desc;
111
+ struct ib_device *ib_dev = device->ib_device;
112
+ enum ib_mr_type mr_type;
322113 int ret;
323114
324115 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
325116 if (!desc)
326117 return ERR_PTR(-ENOMEM);
327118
328
- ret = iser_alloc_reg_res(device, pd, &desc->rsc, size);
329
- if (ret)
330
- goto reg_res_alloc_failure;
119
+ if (ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
120
+ mr_type = IB_MR_TYPE_SG_GAPS;
121
+ else
122
+ mr_type = IB_MR_TYPE_MEM_REG;
123
+
124
+ desc->rsc.mr = ib_alloc_mr(pd, mr_type, size);
125
+ if (IS_ERR(desc->rsc.mr)) {
126
+ ret = PTR_ERR(desc->rsc.mr);
127
+ iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
128
+ goto err_alloc_mr;
129
+ }
331130
332131 if (pi_enable) {
333
- ret = iser_alloc_pi_ctx(device, pd, desc, size);
334
- if (ret)
335
- goto pi_ctx_alloc_failure;
132
+ desc->rsc.sig_mr = ib_alloc_mr_integrity(pd, size, size);
133
+ if (IS_ERR(desc->rsc.sig_mr)) {
134
+ ret = PTR_ERR(desc->rsc.sig_mr);
135
+ iser_err("Failed to allocate sig_mr err=%d\n", ret);
136
+ goto err_alloc_mr_integrity;
137
+ }
336138 }
139
+ desc->rsc.mr_valid = 0;
337140
338141 return desc;
339142
340
-pi_ctx_alloc_failure:
341
- iser_free_reg_res(&desc->rsc);
342
-reg_res_alloc_failure:
143
+err_alloc_mr_integrity:
144
+ ib_dereg_mr(desc->rsc.mr);
145
+err_alloc_mr:
343146 kfree(desc);
344147
345148 return ERR_PTR(ret);
346149 }
347150
151
+static void iser_destroy_fastreg_desc(struct iser_fr_desc *desc)
152
+{
153
+ struct iser_reg_resources *res = &desc->rsc;
154
+
155
+ ib_dereg_mr(res->mr);
156
+ if (res->sig_mr) {
157
+ ib_dereg_mr(res->sig_mr);
158
+ res->sig_mr = NULL;
159
+ }
160
+ kfree(desc);
161
+}
162
+
348163 /**
349164 * iser_alloc_fastreg_pool - Creates pool of fast_reg descriptors
350165 * for fast registration work requests.
351
- * returns 0 on success, or errno code on failure
166
+ * @ib_conn: connection RDMA resources
167
+ * @cmds_max: max number of SCSI commands for this connection
168
+ * @size: max number of pages per map request
169
+ *
170
+ * Return: 0 on success, or errno code on failure
352171 */
353172 int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
354173 unsigned cmds_max,
....@@ -385,6 +204,7 @@
385204
386205 /**
387206 * iser_free_fastreg_pool - releases the pool of fast_reg descriptors
207
+ * @ib_conn: connection RDMA resources
388208 */
389209 void iser_free_fastreg_pool(struct ib_conn *ib_conn)
390210 {
....@@ -399,10 +219,7 @@
399219
400220 list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) {
401221 list_del(&desc->all_list);
402
- iser_free_reg_res(&desc->rsc);
403
- if (desc->pi_ctx)
404
- iser_free_pi_ctx(desc->pi_ctx);
405
- kfree(desc);
222
+ iser_destroy_fastreg_desc(desc);
406223 ++i;
407224 }
408225
....@@ -411,10 +228,10 @@
411228 fr_pool->size - i);
412229 }
413230
414
-/**
231
+/*
415232 * iser_create_ib_conn_res - Queue-Pair (QP)
416233 *
417
- * returns 0 on success, -1 on failure
234
+ * Return: 0 on success, -1 on failure
418235 */
419236 static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
420237 {
....@@ -423,75 +240,63 @@
423240 struct ib_device *ib_dev;
424241 struct ib_qp_init_attr init_attr;
425242 int ret = -ENOMEM;
426
- int index, min_index = 0;
243
+ unsigned int max_send_wr, cq_size;
427244
428245 BUG_ON(ib_conn->device == NULL);
429246
430247 device = ib_conn->device;
431248 ib_dev = device->ib_device;
432249
433
- memset(&init_attr, 0, sizeof init_attr);
250
+ if (ib_conn->pi_support)
251
+ max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
252
+ else
253
+ max_send_wr = ISER_QP_MAX_REQ_DTOS + 1;
254
+ max_send_wr = min_t(unsigned int, max_send_wr,
255
+ (unsigned int)ib_dev->attrs.max_qp_wr);
434256
435
- mutex_lock(&ig.connlist_mutex);
436
- /* select the CQ with the minimal number of usages */
437
- for (index = 0; index < device->comps_used; index++) {
438
- if (device->comps[index].active_qps <
439
- device->comps[min_index].active_qps)
440
- min_index = index;
257
+ cq_size = max_send_wr + ISER_QP_MAX_RECV_DTOS;
258
+ ib_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_SOFTIRQ);
259
+ if (IS_ERR(ib_conn->cq)) {
260
+ ret = PTR_ERR(ib_conn->cq);
261
+ goto cq_err;
441262 }
442
- ib_conn->comp = &device->comps[min_index];
443
- ib_conn->comp->active_qps++;
444
- mutex_unlock(&ig.connlist_mutex);
445
- iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
263
+ ib_conn->cq_size = cq_size;
264
+
265
+ memset(&init_attr, 0, sizeof(init_attr));
446266
447267 init_attr.event_handler = iser_qp_event_callback;
448268 init_attr.qp_context = (void *)ib_conn;
449
- init_attr.send_cq = ib_conn->comp->cq;
450
- init_attr.recv_cq = ib_conn->comp->cq;
269
+ init_attr.send_cq = ib_conn->cq;
270
+ init_attr.recv_cq = ib_conn->cq;
451271 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
452272 init_attr.cap.max_send_sge = 2;
453273 init_attr.cap.max_recv_sge = 1;
454274 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
455275 init_attr.qp_type = IB_QPT_RC;
456
- if (ib_conn->pi_support) {
457
- init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
458
- init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
459
- iser_conn->max_cmds =
460
- ISER_GET_MAX_XMIT_CMDS(ISER_QP_SIG_MAX_REQ_DTOS);
461
- } else {
462
- if (ib_dev->attrs.max_qp_wr > ISER_QP_MAX_REQ_DTOS) {
463
- init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS + 1;
464
- iser_conn->max_cmds =
465
- ISER_GET_MAX_XMIT_CMDS(ISER_QP_MAX_REQ_DTOS);
466
- } else {
467
- init_attr.cap.max_send_wr = ib_dev->attrs.max_qp_wr;
468
- iser_conn->max_cmds =
469
- ISER_GET_MAX_XMIT_CMDS(ib_dev->attrs.max_qp_wr);
470
- iser_dbg("device %s supports max_send_wr %d\n",
471
- device->ib_device->name, ib_dev->attrs.max_qp_wr);
472
- }
473
- }
276
+ init_attr.cap.max_send_wr = max_send_wr;
277
+ if (ib_conn->pi_support)
278
+ init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
279
+ iser_conn->max_cmds = ISER_GET_MAX_XMIT_CMDS(max_send_wr - 1);
474280
475281 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
476282 if (ret)
477283 goto out_err;
478284
479285 ib_conn->qp = ib_conn->cma_id->qp;
480
- iser_info("setting conn %p cma_id %p qp %p\n",
286
+ iser_info("setting conn %p cma_id %p qp %p max_send_wr %d\n",
481287 ib_conn, ib_conn->cma_id,
482
- ib_conn->cma_id->qp);
288
+ ib_conn->cma_id->qp, max_send_wr);
483289 return ret;
484290
485291 out_err:
486
- mutex_lock(&ig.connlist_mutex);
487
- ib_conn->comp->active_qps--;
488
- mutex_unlock(&ig.connlist_mutex);
292
+ ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size);
293
+cq_err:
489294 iser_err("unable to alloc mem or create resource, err %d\n", ret);
490295
491296 return ret;
492297 }
493298
494
-/**
299
+/*
495300 * based on the resolved device node GUID see if there already allocated
496301 * device for this device. If there's no such, create one.
497302 */
....@@ -542,9 +347,9 @@
542347 mutex_unlock(&ig.device_list_mutex);
543348 }
544349
545
-/**
350
+/*
546351 * Called with state mutex held
547
- **/
352
+ */
548353 static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
549354 enum iser_conn_state comp,
550355 enum iser_conn_state exch)
....@@ -597,10 +402,8 @@
597402 iser_conn, ib_conn->cma_id, ib_conn->qp);
598403
599404 if (ib_conn->qp != NULL) {
600
- mutex_lock(&ig.connlist_mutex);
601
- ib_conn->comp->active_qps--;
602
- mutex_unlock(&ig.connlist_mutex);
603405 rdma_destroy_qp(ib_conn->cma_id);
406
+ ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size);
604407 ib_conn->qp = NULL;
605408 }
606409
....@@ -616,7 +419,8 @@
616419 }
617420
618421 /**
619
- * Frees all conn objects and deallocs conn descriptor
422
+ * iser_conn_release - Frees all conn objects and deallocs conn descriptor
423
+ * @iser_conn: iSER connection context
620424 */
621425 void iser_conn_release(struct iser_conn *iser_conn)
622426 {
....@@ -650,7 +454,10 @@
650454 }
651455
652456 /**
653
- * triggers start of the disconnect procedures and wait for them to be done
457
+ * iser_conn_terminate - triggers start of the disconnect procedures and
458
+ * waits for them to be done
459
+ * @iser_conn: iSER connection context
460
+ *
654461 * Called with state mutex held
655462 */
656463 int iser_conn_terminate(struct iser_conn *iser_conn)
....@@ -687,9 +494,9 @@
687494 return 1;
688495 }
689496
690
-/**
497
+/*
691498 * Called with state mutex held
692
- **/
499
+ */
693500 static void iser_connect_error(struct rdma_cm_id *cma_id)
694501 {
695502 struct iser_conn *iser_conn;
....@@ -706,36 +513,35 @@
706513 struct ib_device_attr *attr = &device->ib_device->attrs;
707514 unsigned short sg_tablesize, sup_sg_tablesize;
708515 unsigned short reserved_mr_pages;
516
+ u32 max_num_sg;
709517
710518 /*
711
- * FRs without SG_GAPS or FMRs can only map up to a (device) page per
712
- * entry, but if the first entry is misaligned we'll end up using two
713
- * entries (head and tail) for a single page worth data, so one
714
- * additional entry is required.
519
+ * FRs without SG_GAPS can only map up to a (device) page per entry,
520
+ * but if the first entry is misaligned we'll end up using two entries
521
+ * (head and tail) for a single page worth data, so one additional
522
+ * entry is required.
715523 */
716
- if ((attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) &&
717
- (attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG))
524
+ if (attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
718525 reserved_mr_pages = 0;
719526 else
720527 reserved_mr_pages = 1;
721528
722
- sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K);
723
- if (attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)
724
- sup_sg_tablesize =
725
- min_t(
726
- uint, ISCSI_ISER_MAX_SG_TABLESIZE,
727
- attr->max_fast_reg_page_list_len - reserved_mr_pages);
529
+ if (iser_conn->ib_conn.pi_support)
530
+ max_num_sg = attr->max_pi_fast_reg_page_list_len;
728531 else
729
- sup_sg_tablesize = ISCSI_ISER_MAX_SG_TABLESIZE;
532
+ max_num_sg = attr->max_fast_reg_page_list_len;
730533
534
+ sg_tablesize = DIV_ROUND_UP(max_sectors * SECTOR_SIZE, SZ_4K);
535
+ sup_sg_tablesize = min_t(uint, ISCSI_ISER_MAX_SG_TABLESIZE,
536
+ max_num_sg - reserved_mr_pages);
731537 iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
732538 iser_conn->pages_per_mr =
733539 iser_conn->scsi_sg_tablesize + reserved_mr_pages;
734540 }
735541
736
-/**
542
+/*
737543 * Called with state mutex held
738
- **/
544
+ */
739545 static void iser_addr_handler(struct rdma_cm_id *cma_id)
740546 {
741547 struct iser_device *device;
....@@ -761,10 +567,10 @@
761567 /* connection T10-PI support */
762568 if (iser_pi_enable) {
763569 if (!(device->ib_device->attrs.device_cap_flags &
764
- IB_DEVICE_SIGNATURE_HANDOVER)) {
570
+ IB_DEVICE_INTEGRITY_HANDOVER)) {
765571 iser_warn("T10-PI requested but not supported on %s, "
766572 "continue without T10-PI\n",
767
- ib_conn->device->ib_device->name);
573
+ dev_name(&ib_conn->device->ib_device->dev));
768574 ib_conn->pi_support = false;
769575 } else {
770576 ib_conn->pi_support = true;
....@@ -781,9 +587,9 @@
781587 }
782588 }
783589
784
-/**
590
+/*
785591 * Called with state mutex held
786
- **/
592
+ */
787593 static void iser_route_handler(struct rdma_cm_id *cma_id)
788594 {
789595 struct rdma_conn_param conn_param;
....@@ -791,7 +597,7 @@
791597 struct iser_cm_hdr req_hdr;
792598 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
793599 struct ib_conn *ib_conn = &iser_conn->ib_conn;
794
- struct iser_device *device = ib_conn->device;
600
+ struct ib_device *ib_dev = ib_conn->device->ib_device;
795601
796602 if (iser_conn->state != ISER_CONN_PENDING)
797603 /* bailout */
....@@ -802,19 +608,19 @@
802608 goto failure;
803609
804610 memset(&conn_param, 0, sizeof conn_param);
805
- conn_param.responder_resources = device->ib_device->attrs.max_qp_rd_atom;
611
+ conn_param.responder_resources = ib_dev->attrs.max_qp_rd_atom;
806612 conn_param.initiator_depth = 1;
807613 conn_param.retry_count = 7;
808614 conn_param.rnr_retry_count = 6;
809615
810616 memset(&req_hdr, 0, sizeof(req_hdr));
811617 req_hdr.flags = ISER_ZBVA_NOT_SUP;
812
- if (!device->remote_inv_sup)
618
+ if (!iser_always_reg)
813619 req_hdr.flags |= ISER_SEND_W_INV_NOT_SUP;
814620 conn_param.private_data = (void *)&req_hdr;
815621 conn_param.private_data_len = sizeof(struct iser_cm_hdr);
816622
817
- ret = rdma_connect(cma_id, &conn_param);
623
+ ret = rdma_connect_locked(cma_id, &conn_param);
818624 if (ret) {
819625 iser_err("failure connecting: %d\n", ret);
820626 goto failure;
....@@ -905,7 +711,7 @@
905711 case RDMA_CM_EVENT_REJECTED:
906712 iser_info("Connection rejected: %s\n",
907713 rdma_reject_msg(cma_id, event->status));
908
- /* FALLTHROUGH */
714
+ fallthrough;
909715 case RDMA_CM_EVENT_ADDR_ERROR:
910716 case RDMA_CM_EVENT_ROUTE_ERROR:
911717 case RDMA_CM_EVENT_CONNECT_ERROR:
....@@ -1068,7 +874,7 @@
1068874
1069875 ib_conn->post_recv_buf_count += count;
1070876 ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, NULL);
1071
- if (ib_ret) {
877
+ if (unlikely(ib_ret)) {
1072878 iser_err("ib_post_recv failed ret=%d\n", ib_ret);
1073879 ib_conn->post_recv_buf_count -= count;
1074880 } else
....@@ -1079,14 +885,18 @@
1079885
1080886
1081887 /**
1082
- * iser_start_send - Initiate a Send DTO operation
888
+ * iser_post_send - Initiate a Send DTO operation
889
+ * @ib_conn: connection RDMA resources
890
+ * @tx_desc: iSER TX descriptor
891
+ * @signal: true to send work request as SIGNALED
1083892 *
1084
- * returns 0 on success, -1 on failure
893
+ * Return: 0 on success, -1 on failure
1085894 */
1086895 int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
1087896 bool signal)
1088897 {
1089
- struct ib_send_wr *wr = iser_tx_next_wr(tx_desc);
898
+ struct ib_send_wr *wr = &tx_desc->send_wr;
899
+ struct ib_send_wr *first_wr;
1090900 int ib_ret;
1091901
1092902 ib_dma_sync_single_for_device(ib_conn->device->ib_device,
....@@ -1100,8 +910,15 @@
1100910 wr->opcode = IB_WR_SEND;
1101911 wr->send_flags = signal ? IB_SEND_SIGNALED : 0;
1102912
1103
- ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0].send, NULL);
1104
- if (ib_ret)
913
+ if (tx_desc->inv_wr.next)
914
+ first_wr = &tx_desc->inv_wr;
915
+ else if (tx_desc->reg_wr.wr.next)
916
+ first_wr = &tx_desc->reg_wr.wr;
917
+ else
918
+ first_wr = wr;
919
+
920
+ ib_ret = ib_post_send(ib_conn->qp, first_wr, NULL);
921
+ if (unlikely(ib_ret))
1105922 iser_err("ib_post_send failed, ret:%d opcode:%d\n",
1106923 ib_ret, wr->opcode);
1107924
....@@ -1117,12 +934,12 @@
1117934 struct ib_mr_status mr_status;
1118935 int ret;
1119936
1120
- if (desc && desc->pi_ctx->sig_protected) {
1121
- desc->pi_ctx->sig_protected = 0;
1122
- ret = ib_check_mr_status(desc->pi_ctx->sig_mr,
937
+ if (desc && desc->sig_protected) {
938
+ desc->sig_protected = false;
939
+ ret = ib_check_mr_status(desc->rsc.sig_mr,
1123940 IB_MR_CHECK_SIG_STATUS, &mr_status);
1124941 if (ret) {
1125
- pr_err("ib_check_mr_status failed, ret %d\n", ret);
942
+ iser_err("ib_check_mr_status failed, ret %d\n", ret);
1126943 /* Not a lot we can do, return ambiguous guard error */
1127944 *sector = 0;
1128945 return 0x1;
....@@ -1134,7 +951,7 @@
1134951 sector_div(sector_off, sector_size + 8);
1135952 *sector = scsi_get_lba(iser_task->sc) + sector_off;
1136953
1137
- pr_err("PI error found type %d at sector %llx "
954
+ iser_err("PI error found type %d at sector %llx "
1138955 "expected %x vs actual %x\n",
1139956 mr_status.sig_err.err_type,
1140957 (unsigned long long)*sector,