hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/infiniband/core/rw.c
....@@ -1,17 +1,10 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (c) 2016 HGST, a Western Digital Company.
3
- *
4
- * This program is free software; you can redistribute it and/or modify it
5
- * under the terms and conditions of the GNU General Public License,
6
- * version 2, as published by the Free Software Foundation.
7
- *
8
- * This program is distributed in the hope it will be useful, but WITHOUT
9
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11
- * more details.
124 */
135 #include <linux/moduleparam.h>
146 #include <linux/slab.h>
7
+#include <linux/pci-p2pdma.h>
158 #include <rdma/mr_pool.h>
169 #include <rdma/rw.h>
1710
....@@ -27,13 +20,16 @@
2720 MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations");
2821
2922 /*
30
- * Check if the device might use memory registration. This is currently only
31
- * true for iWarp devices. In the future we can hopefully fine tune this based
32
- * on HCA driver input.
23
+ * Report whether memory registration should be used. Memory registration must
24
+ * be used for iWarp devices because of iWARP-specific limitations. Memory
25
+ * registration is also enabled if registering memory might yield better
26
+ * performance than using multiple SGE entries, see rdma_rw_io_needs_mr()
3327 */
3428 static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num)
3529 {
3630 if (rdma_protocol_iwarp(dev, port_num))
31
+ return true;
32
+ if (dev->attrs.max_sgl_rd)
3733 return true;
3834 if (unlikely(rdma_rw_force_mr))
3935 return true;
....@@ -42,40 +38,41 @@
4238
4339 /*
4440 * Check if the device will use memory registration for this RW operation.
45
- * We currently always use memory registrations for iWarp RDMA READs, and
46
- * have a debug option to force usage of MRs.
47
- *
48
- * XXX: In the future we can hopefully fine tune this based on HCA driver
49
- * input.
41
+ * For RDMA READs we must use MRs on iWarp and can optionally use them as an
42
+ * optimization otherwise. Additionally we have a debug option to force usage
43
+ * of MRs to help testing this code path.
5044 */
5145 static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num,
5246 enum dma_data_direction dir, int dma_nents)
5347 {
54
- if (rdma_protocol_iwarp(dev, port_num) && dir == DMA_FROM_DEVICE)
55
- return true;
48
+ if (dir == DMA_FROM_DEVICE) {
49
+ if (rdma_protocol_iwarp(dev, port_num))
50
+ return true;
51
+ if (dev->attrs.max_sgl_rd && dma_nents > dev->attrs.max_sgl_rd)
52
+ return true;
53
+ }
5654 if (unlikely(rdma_rw_force_mr))
5755 return true;
5856 return false;
5957 }
6058
61
-static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev)
59
+static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev,
60
+ bool pi_support)
6261 {
62
+ u32 max_pages;
63
+
64
+ if (pi_support)
65
+ max_pages = dev->attrs.max_pi_fast_reg_page_list_len;
66
+ else
67
+ max_pages = dev->attrs.max_fast_reg_page_list_len;
68
+
6369 /* arbitrary limit to avoid allocating gigantic resources */
64
- return min_t(u32, dev->attrs.max_fast_reg_page_list_len, 256);
70
+ return min_t(u32, max_pages, 256);
6571 }
6672
67
-/* Caller must have zero-initialized *reg. */
68
-static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num,
69
- struct rdma_rw_reg_ctx *reg, struct scatterlist *sg,
70
- u32 sg_cnt, u32 offset)
73
+static inline int rdma_rw_inv_key(struct rdma_rw_reg_ctx *reg)
7174 {
72
- u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device);
73
- u32 nents = min(sg_cnt, pages_per_mr);
74
- int count = 0, ret;
75
-
76
- reg->mr = ib_mr_pool_get(qp, &qp->rdma_mrs);
77
- if (!reg->mr)
78
- return -EAGAIN;
75
+ int count = 0;
7976
8077 if (reg->mr->need_inval) {
8178 reg->inv_wr.opcode = IB_WR_LOCAL_INV;
....@@ -85,6 +82,25 @@
8582 } else {
8683 reg->inv_wr.next = NULL;
8784 }
85
+
86
+ return count;
87
+}
88
+
89
+/* Caller must have zero-initialized *reg. */
90
+static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num,
91
+ struct rdma_rw_reg_ctx *reg, struct scatterlist *sg,
92
+ u32 sg_cnt, u32 offset)
93
+{
94
+ u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device,
95
+ qp->integrity_en);
96
+ u32 nents = min(sg_cnt, pages_per_mr);
97
+ int count = 0, ret;
98
+
99
+ reg->mr = ib_mr_pool_get(qp, &qp->rdma_mrs);
100
+ if (!reg->mr)
101
+ return -EAGAIN;
102
+
103
+ count += rdma_rw_inv_key(reg);
88104
89105 ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE);
90106 if (ret < 0 || ret < nents) {
....@@ -109,10 +125,11 @@
109125 u64 remote_addr, u32 rkey, enum dma_data_direction dir)
110126 {
111127 struct rdma_rw_reg_ctx *prev = NULL;
112
- u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device);
128
+ u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device,
129
+ qp->integrity_en);
113130 int i, j, ret = 0, count = 0;
114131
115
- ctx->nr_ops = (sg_cnt + pages_per_mr - 1) / pages_per_mr;
132
+ ctx->nr_ops = DIV_ROUND_UP(sg_cnt, pages_per_mr);
116133 ctx->reg = kcalloc(ctx->nr_ops, sizeof(*ctx->reg), GFP_KERNEL);
117134 if (!ctx->reg) {
118135 ret = -ENOMEM;
....@@ -178,7 +195,6 @@
178195 struct scatterlist *sg, u32 sg_cnt, u32 offset,
179196 u64 remote_addr, u32 rkey, enum dma_data_direction dir)
180197 {
181
- struct ib_device *dev = qp->pd->device;
182198 u32 max_sge = dir == DMA_TO_DEVICE ? qp->max_write_sge :
183199 qp->max_read_sge;
184200 struct ib_sge *sge;
....@@ -208,8 +224,8 @@
208224 rdma_wr->wr.sg_list = sge;
209225
210226 for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) {
211
- sge->addr = ib_sg_dma_address(dev, sg) + offset;
212
- sge->length = ib_sg_dma_len(dev, sg) - offset;
227
+ sge->addr = sg_dma_address(sg) + offset;
228
+ sge->length = sg_dma_len(sg) - offset;
213229 sge->lkey = qp->pd->local_dma_lkey;
214230
215231 total_len += sge->length;
....@@ -235,14 +251,13 @@
235251 struct scatterlist *sg, u32 offset, u64 remote_addr, u32 rkey,
236252 enum dma_data_direction dir)
237253 {
238
- struct ib_device *dev = qp->pd->device;
239254 struct ib_rdma_wr *rdma_wr = &ctx->single.wr;
240255
241256 ctx->nr_ops = 1;
242257
243258 ctx->single.sge.lkey = qp->pd->local_dma_lkey;
244
- ctx->single.sge.addr = ib_sg_dma_address(dev, sg) + offset;
245
- ctx->single.sge.length = ib_sg_dma_len(dev, sg) - offset;
259
+ ctx->single.sge.addr = sg_dma_address(sg) + offset;
260
+ ctx->single.sge.length = sg_dma_len(sg) - offset;
246261
247262 memset(rdma_wr, 0, sizeof(*rdma_wr));
248263 if (dir == DMA_TO_DEVICE)
....@@ -256,6 +271,26 @@
256271
257272 ctx->type = RDMA_RW_SINGLE_WR;
258273 return 1;
274
+}
275
+
276
+static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
277
+ u32 sg_cnt, enum dma_data_direction dir)
278
+{
279
+ if (is_pci_p2pdma_page(sg_page(sg)))
280
+ pci_p2pdma_unmap_sg(dev->dma_device, sg, sg_cnt, dir);
281
+ else
282
+ ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
283
+}
284
+
285
+static int rdma_rw_map_sg(struct ib_device *dev, struct scatterlist *sg,
286
+ u32 sg_cnt, enum dma_data_direction dir)
287
+{
288
+ if (is_pci_p2pdma_page(sg_page(sg))) {
289
+ if (WARN_ON_ONCE(ib_uses_virt_dma(dev)))
290
+ return 0;
291
+ return pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
292
+ }
293
+ return ib_dma_map_sg(dev, sg, sg_cnt, dir);
259294 }
260295
261296 /**
....@@ -280,7 +315,7 @@
280315 struct ib_device *dev = qp->pd->device;
281316 int ret;
282317
283
- ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);
318
+ ret = rdma_rw_map_sg(dev, sg, sg_cnt, dir);
284319 if (!ret)
285320 return -ENOMEM;
286321 sg_cnt = ret;
....@@ -289,7 +324,7 @@
289324 * Skip to the S/G entry that sg_offset falls into:
290325 */
291326 for (;;) {
292
- u32 len = ib_sg_dma_len(dev, sg);
327
+ u32 len = sg_dma_len(sg);
293328
294329 if (sg_offset < len)
295330 break;
....@@ -319,7 +354,7 @@
319354 return ret;
320355
321356 out_unmap_sg:
322
- ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
357
+ rdma_rw_unmap_sg(dev, sg, sg_cnt, dir);
323358 return ret;
324359 }
325360 EXPORT_SYMBOL(rdma_rw_ctx_init);
....@@ -348,90 +383,74 @@
348383 u64 remote_addr, u32 rkey, enum dma_data_direction dir)
349384 {
350385 struct ib_device *dev = qp->pd->device;
351
- u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device);
386
+ u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device,
387
+ qp->integrity_en);
352388 struct ib_rdma_wr *rdma_wr;
353
- struct ib_send_wr *prev_wr = NULL;
354389 int count = 0, ret;
355390
356391 if (sg_cnt > pages_per_mr || prot_sg_cnt > pages_per_mr) {
357
- pr_err("SG count too large\n");
392
+ pr_err("SG count too large: sg_cnt=%d, prot_sg_cnt=%d, pages_per_mr=%d\n",
393
+ sg_cnt, prot_sg_cnt, pages_per_mr);
358394 return -EINVAL;
359395 }
360396
361
- ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);
397
+ ret = rdma_rw_map_sg(dev, sg, sg_cnt, dir);
362398 if (!ret)
363399 return -ENOMEM;
364400 sg_cnt = ret;
365401
366
- ret = ib_dma_map_sg(dev, prot_sg, prot_sg_cnt, dir);
367
- if (!ret) {
368
- ret = -ENOMEM;
369
- goto out_unmap_sg;
402
+ if (prot_sg_cnt) {
403
+ ret = rdma_rw_map_sg(dev, prot_sg, prot_sg_cnt, dir);
404
+ if (!ret) {
405
+ ret = -ENOMEM;
406
+ goto out_unmap_sg;
407
+ }
408
+ prot_sg_cnt = ret;
370409 }
371
- prot_sg_cnt = ret;
372410
373411 ctx->type = RDMA_RW_SIG_MR;
374412 ctx->nr_ops = 1;
375
- ctx->sig = kcalloc(1, sizeof(*ctx->sig), GFP_KERNEL);
376
- if (!ctx->sig) {
413
+ ctx->reg = kcalloc(1, sizeof(*ctx->reg), GFP_KERNEL);
414
+ if (!ctx->reg) {
377415 ret = -ENOMEM;
378416 goto out_unmap_prot_sg;
379417 }
380418
381
- ret = rdma_rw_init_one_mr(qp, port_num, &ctx->sig->data, sg, sg_cnt, 0);
382
- if (ret < 0)
383
- goto out_free_ctx;
384
- count += ret;
385
- prev_wr = &ctx->sig->data.reg_wr.wr;
386
-
387
- ret = rdma_rw_init_one_mr(qp, port_num, &ctx->sig->prot,
388
- prot_sg, prot_sg_cnt, 0);
389
- if (ret < 0)
390
- goto out_destroy_data_mr;
391
- count += ret;
392
-
393
- if (ctx->sig->prot.inv_wr.next)
394
- prev_wr->next = &ctx->sig->prot.inv_wr;
395
- else
396
- prev_wr->next = &ctx->sig->prot.reg_wr.wr;
397
- prev_wr = &ctx->sig->prot.reg_wr.wr;
398
-
399
- ctx->sig->sig_mr = ib_mr_pool_get(qp, &qp->sig_mrs);
400
- if (!ctx->sig->sig_mr) {
419
+ ctx->reg->mr = ib_mr_pool_get(qp, &qp->sig_mrs);
420
+ if (!ctx->reg->mr) {
401421 ret = -EAGAIN;
402
- goto out_destroy_prot_mr;
422
+ goto out_free_ctx;
403423 }
404424
405
- if (ctx->sig->sig_mr->need_inval) {
406
- memset(&ctx->sig->sig_inv_wr, 0, sizeof(ctx->sig->sig_inv_wr));
425
+ count += rdma_rw_inv_key(ctx->reg);
407426
408
- ctx->sig->sig_inv_wr.opcode = IB_WR_LOCAL_INV;
409
- ctx->sig->sig_inv_wr.ex.invalidate_rkey = ctx->sig->sig_mr->rkey;
427
+ memcpy(ctx->reg->mr->sig_attrs, sig_attrs, sizeof(struct ib_sig_attrs));
410428
411
- prev_wr->next = &ctx->sig->sig_inv_wr;
412
- prev_wr = &ctx->sig->sig_inv_wr;
429
+ ret = ib_map_mr_sg_pi(ctx->reg->mr, sg, sg_cnt, NULL, prot_sg,
430
+ prot_sg_cnt, NULL, SZ_4K);
431
+ if (unlikely(ret)) {
432
+ pr_err("failed to map PI sg (%d)\n", sg_cnt + prot_sg_cnt);
433
+ goto out_destroy_sig_mr;
413434 }
414435
415
- ctx->sig->sig_wr.wr.opcode = IB_WR_REG_SIG_MR;
416
- ctx->sig->sig_wr.wr.wr_cqe = NULL;
417
- ctx->sig->sig_wr.wr.sg_list = &ctx->sig->data.sge;
418
- ctx->sig->sig_wr.wr.num_sge = 1;
419
- ctx->sig->sig_wr.access_flags = IB_ACCESS_LOCAL_WRITE;
420
- ctx->sig->sig_wr.sig_attrs = sig_attrs;
421
- ctx->sig->sig_wr.sig_mr = ctx->sig->sig_mr;
422
- if (prot_sg_cnt)
423
- ctx->sig->sig_wr.prot = &ctx->sig->prot.sge;
424
- prev_wr->next = &ctx->sig->sig_wr.wr;
425
- prev_wr = &ctx->sig->sig_wr.wr;
436
+ ctx->reg->reg_wr.wr.opcode = IB_WR_REG_MR_INTEGRITY;
437
+ ctx->reg->reg_wr.wr.wr_cqe = NULL;
438
+ ctx->reg->reg_wr.wr.num_sge = 0;
439
+ ctx->reg->reg_wr.wr.send_flags = 0;
440
+ ctx->reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE;
441
+ if (rdma_protocol_iwarp(qp->device, port_num))
442
+ ctx->reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE;
443
+ ctx->reg->reg_wr.mr = ctx->reg->mr;
444
+ ctx->reg->reg_wr.key = ctx->reg->mr->lkey;
426445 count++;
427446
428
- ctx->sig->sig_sge.addr = 0;
429
- ctx->sig->sig_sge.length = ctx->sig->data.sge.length;
430
- if (sig_attrs->wire.sig_type != IB_SIG_TYPE_NONE)
431
- ctx->sig->sig_sge.length += ctx->sig->prot.sge.length;
447
+ ctx->reg->sge.addr = ctx->reg->mr->iova;
448
+ ctx->reg->sge.length = ctx->reg->mr->length;
449
+ if (sig_attrs->wire.sig_type == IB_SIG_TYPE_NONE)
450
+ ctx->reg->sge.length -= ctx->reg->mr->sig_attrs->meta_length;
432451
433
- rdma_wr = &ctx->sig->data.wr;
434
- rdma_wr->wr.sg_list = &ctx->sig->sig_sge;
452
+ rdma_wr = &ctx->reg->wr;
453
+ rdma_wr->wr.sg_list = &ctx->reg->sge;
435454 rdma_wr->wr.num_sge = 1;
436455 rdma_wr->remote_addr = remote_addr;
437456 rdma_wr->rkey = rkey;
....@@ -439,23 +458,20 @@
439458 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
440459 else
441460 rdma_wr->wr.opcode = IB_WR_RDMA_READ;
442
- prev_wr->next = &rdma_wr->wr;
443
- prev_wr = &rdma_wr->wr;
461
+ ctx->reg->reg_wr.wr.next = &rdma_wr->wr;
444462 count++;
445463
446464 return count;
447465
448
-out_destroy_prot_mr:
449
- if (prot_sg_cnt)
450
- ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->prot.mr);
451
-out_destroy_data_mr:
452
- ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->data.mr);
466
+out_destroy_sig_mr:
467
+ ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr);
453468 out_free_ctx:
454
- kfree(ctx->sig);
469
+ kfree(ctx->reg);
455470 out_unmap_prot_sg:
456
- ib_dma_unmap_sg(dev, prot_sg, prot_sg_cnt, dir);
471
+ if (prot_sg_cnt)
472
+ rdma_rw_unmap_sg(dev, prot_sg, prot_sg_cnt, dir);
457473 out_unmap_sg:
458
- ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
474
+ rdma_rw_unmap_sg(dev, sg, sg_cnt, dir);
459475 return ret;
460476 }
461477 EXPORT_SYMBOL(rdma_rw_ctx_signature_init);
....@@ -496,21 +512,6 @@
496512
497513 switch (ctx->type) {
498514 case RDMA_RW_SIG_MR:
499
- rdma_rw_update_lkey(&ctx->sig->data, true);
500
- if (ctx->sig->prot.mr)
501
- rdma_rw_update_lkey(&ctx->sig->prot, true);
502
-
503
- ctx->sig->sig_mr->need_inval = true;
504
- ib_update_fast_reg_key(ctx->sig->sig_mr,
505
- ib_inc_rkey(ctx->sig->sig_mr->lkey));
506
- ctx->sig->sig_sge.lkey = ctx->sig->sig_mr->lkey;
507
-
508
- if (ctx->sig->data.inv_wr.next)
509
- first_wr = &ctx->sig->data.inv_wr;
510
- else
511
- first_wr = &ctx->sig->data.reg_wr.wr;
512
- last_wr = &ctx->sig->data.wr.wr;
513
- break;
514515 case RDMA_RW_MR:
515516 for (i = 0; i < ctx->nr_ops; i++) {
516517 rdma_rw_update_lkey(&ctx->reg[i],
....@@ -602,13 +603,13 @@
602603 break;
603604 }
604605
605
- ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
606
+ rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
606607 }
607608 EXPORT_SYMBOL(rdma_rw_ctx_destroy);
608609
609610 /**
610611 * rdma_rw_ctx_destroy_signature - release all resources allocated by
611
- * rdma_rw_ctx_init_signature
612
+ * rdma_rw_ctx_signature_init
612613 * @ctx: context to release
613614 * @qp: queue pair to operate on
614615 * @port_num: port num to which the connection is bound
....@@ -626,16 +627,12 @@
626627 if (WARN_ON_ONCE(ctx->type != RDMA_RW_SIG_MR))
627628 return;
628629
629
- ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->data.mr);
630
- ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
630
+ ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr);
631
+ kfree(ctx->reg);
631632
632
- if (ctx->sig->prot.mr) {
633
- ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->prot.mr);
634
- ib_dma_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
635
- }
636
-
637
- ib_mr_pool_put(qp, &qp->sig_mrs, ctx->sig->sig_mr);
638
- kfree(ctx->sig);
633
+ if (prot_sg_cnt)
634
+ rdma_rw_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
635
+ rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
639636 }
640637 EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature);
641638
....@@ -656,7 +653,7 @@
656653 unsigned int mr_pages;
657654
658655 if (rdma_rw_can_use_mr(device, port_num))
659
- mr_pages = rdma_rw_fr_page_list_len(device);
656
+ mr_pages = rdma_rw_fr_page_list_len(device, false);
660657 else
661658 mr_pages = device->attrs.max_sge_rd;
662659 return DIV_ROUND_UP(maxpages, mr_pages);
....@@ -682,9 +679,8 @@
682679 * we'll need two additional MRs for the registrations and the
683680 * invalidation.
684681 */
685
- if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
686
- factor += 6; /* (inv + reg) * (data + prot + sig) */
687
- else if (rdma_rw_can_use_mr(dev, attr->port_num))
682
+ if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN ||
683
+ rdma_rw_can_use_mr(dev, attr->port_num))
688684 factor += 2; /* inv + reg */
689685
690686 attr->cap.max_send_wr += factor * attr->cap.max_rdma_ctxs;
....@@ -700,20 +696,22 @@
700696 int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr)
701697 {
702698 struct ib_device *dev = qp->pd->device;
703
- u32 nr_mrs = 0, nr_sig_mrs = 0;
699
+ u32 nr_mrs = 0, nr_sig_mrs = 0, max_num_sg = 0;
704700 int ret = 0;
705701
706
- if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN) {
702
+ if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) {
707703 nr_sig_mrs = attr->cap.max_rdma_ctxs;
708
- nr_mrs = attr->cap.max_rdma_ctxs * 2;
704
+ nr_mrs = attr->cap.max_rdma_ctxs;
705
+ max_num_sg = rdma_rw_fr_page_list_len(dev, true);
709706 } else if (rdma_rw_can_use_mr(dev, attr->port_num)) {
710707 nr_mrs = attr->cap.max_rdma_ctxs;
708
+ max_num_sg = rdma_rw_fr_page_list_len(dev, false);
711709 }
712710
713711 if (nr_mrs) {
714712 ret = ib_mr_pool_init(qp, &qp->rdma_mrs, nr_mrs,
715713 IB_MR_TYPE_MEM_REG,
716
- rdma_rw_fr_page_list_len(dev));
714
+ max_num_sg, 0);
717715 if (ret) {
718716 pr_err("%s: failed to allocated %d MRs\n",
719717 __func__, nr_mrs);
....@@ -723,10 +721,10 @@
723721
724722 if (nr_sig_mrs) {
725723 ret = ib_mr_pool_init(qp, &qp->sig_mrs, nr_sig_mrs,
726
- IB_MR_TYPE_SIGNATURE, 2);
724
+ IB_MR_TYPE_INTEGRITY, max_num_sg, max_num_sg);
727725 if (ret) {
728726 pr_err("%s: failed to allocated %d SIG MRs\n",
729
- __func__, nr_mrs);
727
+ __func__, nr_sig_mrs);
730728 goto out_free_rdma_mrs;
731729 }
732730 }