.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (c) 2016 HGST, a Western Digital Company. |
---|
3 | | - * |
---|
4 | | - * This program is free software; you can redistribute it and/or modify it |
---|
5 | | - * under the terms and conditions of the GNU General Public License, |
---|
6 | | - * version 2, as published by the Free Software Foundation. |
---|
7 | | - * |
---|
8 | | - * This program is distributed in the hope it will be useful, but WITHOUT |
---|
9 | | - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
---|
10 | | - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
---|
11 | | - * more details. |
---|
12 | 4 | */ |
---|
13 | 5 | #include <linux/moduleparam.h> |
---|
14 | 6 | #include <linux/slab.h> |
---|
| 7 | +#include <linux/pci-p2pdma.h> |
---|
15 | 8 | #include <rdma/mr_pool.h> |
---|
16 | 9 | #include <rdma/rw.h> |
---|
17 | 10 | |
---|
.. | .. |
---|
27 | 20 | MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations"); |
---|
28 | 21 | |
---|
29 | 22 | /* |
---|
30 | | - * Check if the device might use memory registration. This is currently only |
---|
31 | | - * true for iWarp devices. In the future we can hopefully fine tune this based |
---|
32 | | - * on HCA driver input. |
---|
| 23 | + * Report whether memory registration should be used. Memory registration must |
---|
| 24 | + * be used for iWarp devices because of iWARP-specific limitations. Memory |
---|
| 25 | + * registration is also enabled if registering memory might yield better |
---|
| 26 | + * performance than using multiple SGE entries, see rdma_rw_io_needs_mr() |
---|
33 | 27 | */ |
---|
34 | 28 | static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num) |
---|
35 | 29 | { |
---|
36 | 30 | if (rdma_protocol_iwarp(dev, port_num)) |
---|
| 31 | + return true; |
---|
| 32 | + if (dev->attrs.max_sgl_rd) |
---|
37 | 33 | return true; |
---|
38 | 34 | if (unlikely(rdma_rw_force_mr)) |
---|
39 | 35 | return true; |
---|
.. | .. |
---|
42 | 38 | |
---|
43 | 39 | /* |
---|
44 | 40 | * Check if the device will use memory registration for this RW operation. |
---|
45 | | - * We currently always use memory registrations for iWarp RDMA READs, and |
---|
46 | | - * have a debug option to force usage of MRs. |
---|
47 | | - * |
---|
48 | | - * XXX: In the future we can hopefully fine tune this based on HCA driver |
---|
49 | | - * input. |
---|
| 41 | + * For RDMA READs we must use MRs on iWarp and can optionally use them as an |
---|
| 42 | + * optimization otherwise. Additionally we have a debug option to force usage |
---|
| 43 | + * of MRs to help testing this code path. |
---|
50 | 44 | */ |
---|
51 | 45 | static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num, |
---|
52 | 46 | enum dma_data_direction dir, int dma_nents) |
---|
53 | 47 | { |
---|
54 | | - if (rdma_protocol_iwarp(dev, port_num) && dir == DMA_FROM_DEVICE) |
---|
55 | | - return true; |
---|
| 48 | + if (dir == DMA_FROM_DEVICE) { |
---|
| 49 | + if (rdma_protocol_iwarp(dev, port_num)) |
---|
| 50 | + return true; |
---|
| 51 | + if (dev->attrs.max_sgl_rd && dma_nents > dev->attrs.max_sgl_rd) |
---|
| 52 | + return true; |
---|
| 53 | + } |
---|
56 | 54 | if (unlikely(rdma_rw_force_mr)) |
---|
57 | 55 | return true; |
---|
58 | 56 | return false; |
---|
59 | 57 | } |
---|
60 | 58 | |
---|
61 | | -static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev) |
---|
| 59 | +static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev, |
---|
| 60 | + bool pi_support) |
---|
62 | 61 | { |
---|
| 62 | + u32 max_pages; |
---|
| 63 | + |
---|
| 64 | + if (pi_support) |
---|
| 65 | + max_pages = dev->attrs.max_pi_fast_reg_page_list_len; |
---|
| 66 | + else |
---|
| 67 | + max_pages = dev->attrs.max_fast_reg_page_list_len; |
---|
| 68 | + |
---|
63 | 69 | /* arbitrary limit to avoid allocating gigantic resources */ |
---|
64 | | - return min_t(u32, dev->attrs.max_fast_reg_page_list_len, 256); |
---|
| 70 | + return min_t(u32, max_pages, 256); |
---|
65 | 71 | } |
---|
66 | 72 | |
---|
67 | | -/* Caller must have zero-initialized *reg. */ |
---|
68 | | -static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num, |
---|
69 | | - struct rdma_rw_reg_ctx *reg, struct scatterlist *sg, |
---|
70 | | - u32 sg_cnt, u32 offset) |
---|
| 73 | +static inline int rdma_rw_inv_key(struct rdma_rw_reg_ctx *reg) |
---|
71 | 74 | { |
---|
72 | | - u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device); |
---|
73 | | - u32 nents = min(sg_cnt, pages_per_mr); |
---|
74 | | - int count = 0, ret; |
---|
75 | | - |
---|
76 | | - reg->mr = ib_mr_pool_get(qp, &qp->rdma_mrs); |
---|
77 | | - if (!reg->mr) |
---|
78 | | - return -EAGAIN; |
---|
| 75 | + int count = 0; |
---|
79 | 76 | |
---|
80 | 77 | if (reg->mr->need_inval) { |
---|
81 | 78 | reg->inv_wr.opcode = IB_WR_LOCAL_INV; |
---|
.. | .. |
---|
85 | 82 | } else { |
---|
86 | 83 | reg->inv_wr.next = NULL; |
---|
87 | 84 | } |
---|
| 85 | + |
---|
| 86 | + return count; |
---|
| 87 | +} |
---|
| 88 | + |
---|
| 89 | +/* Caller must have zero-initialized *reg. */ |
---|
| 90 | +static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num, |
---|
| 91 | + struct rdma_rw_reg_ctx *reg, struct scatterlist *sg, |
---|
| 92 | + u32 sg_cnt, u32 offset) |
---|
| 93 | +{ |
---|
| 94 | + u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device, |
---|
| 95 | + qp->integrity_en); |
---|
| 96 | + u32 nents = min(sg_cnt, pages_per_mr); |
---|
| 97 | + int count = 0, ret; |
---|
| 98 | + |
---|
| 99 | + reg->mr = ib_mr_pool_get(qp, &qp->rdma_mrs); |
---|
| 100 | + if (!reg->mr) |
---|
| 101 | + return -EAGAIN; |
---|
| 102 | + |
---|
| 103 | + count += rdma_rw_inv_key(reg); |
---|
88 | 104 | |
---|
89 | 105 | ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE); |
---|
90 | 106 | if (ret < 0 || ret < nents) { |
---|
.. | .. |
---|
109 | 125 | u64 remote_addr, u32 rkey, enum dma_data_direction dir) |
---|
110 | 126 | { |
---|
111 | 127 | struct rdma_rw_reg_ctx *prev = NULL; |
---|
112 | | - u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device); |
---|
| 128 | + u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device, |
---|
| 129 | + qp->integrity_en); |
---|
113 | 130 | int i, j, ret = 0, count = 0; |
---|
114 | 131 | |
---|
115 | | - ctx->nr_ops = (sg_cnt + pages_per_mr - 1) / pages_per_mr; |
---|
| 132 | + ctx->nr_ops = DIV_ROUND_UP(sg_cnt, pages_per_mr); |
---|
116 | 133 | ctx->reg = kcalloc(ctx->nr_ops, sizeof(*ctx->reg), GFP_KERNEL); |
---|
117 | 134 | if (!ctx->reg) { |
---|
118 | 135 | ret = -ENOMEM; |
---|
.. | .. |
---|
178 | 195 | struct scatterlist *sg, u32 sg_cnt, u32 offset, |
---|
179 | 196 | u64 remote_addr, u32 rkey, enum dma_data_direction dir) |
---|
180 | 197 | { |
---|
181 | | - struct ib_device *dev = qp->pd->device; |
---|
182 | 198 | u32 max_sge = dir == DMA_TO_DEVICE ? qp->max_write_sge : |
---|
183 | 199 | qp->max_read_sge; |
---|
184 | 200 | struct ib_sge *sge; |
---|
.. | .. |
---|
208 | 224 | rdma_wr->wr.sg_list = sge; |
---|
209 | 225 | |
---|
210 | 226 | for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) { |
---|
211 | | - sge->addr = ib_sg_dma_address(dev, sg) + offset; |
---|
212 | | - sge->length = ib_sg_dma_len(dev, sg) - offset; |
---|
| 227 | + sge->addr = sg_dma_address(sg) + offset; |
---|
| 228 | + sge->length = sg_dma_len(sg) - offset; |
---|
213 | 229 | sge->lkey = qp->pd->local_dma_lkey; |
---|
214 | 230 | |
---|
215 | 231 | total_len += sge->length; |
---|
.. | .. |
---|
235 | 251 | struct scatterlist *sg, u32 offset, u64 remote_addr, u32 rkey, |
---|
236 | 252 | enum dma_data_direction dir) |
---|
237 | 253 | { |
---|
238 | | - struct ib_device *dev = qp->pd->device; |
---|
239 | 254 | struct ib_rdma_wr *rdma_wr = &ctx->single.wr; |
---|
240 | 255 | |
---|
241 | 256 | ctx->nr_ops = 1; |
---|
242 | 257 | |
---|
243 | 258 | ctx->single.sge.lkey = qp->pd->local_dma_lkey; |
---|
244 | | - ctx->single.sge.addr = ib_sg_dma_address(dev, sg) + offset; |
---|
245 | | - ctx->single.sge.length = ib_sg_dma_len(dev, sg) - offset; |
---|
| 259 | + ctx->single.sge.addr = sg_dma_address(sg) + offset; |
---|
| 260 | + ctx->single.sge.length = sg_dma_len(sg) - offset; |
---|
246 | 261 | |
---|
247 | 262 | memset(rdma_wr, 0, sizeof(*rdma_wr)); |
---|
248 | 263 | if (dir == DMA_TO_DEVICE) |
---|
.. | .. |
---|
256 | 271 | |
---|
257 | 272 | ctx->type = RDMA_RW_SINGLE_WR; |
---|
258 | 273 | return 1; |
---|
| 274 | +} |
---|
| 275 | + |
---|
| 276 | +static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg, |
---|
| 277 | + u32 sg_cnt, enum dma_data_direction dir) |
---|
| 278 | +{ |
---|
| 279 | + if (is_pci_p2pdma_page(sg_page(sg))) |
---|
| 280 | + pci_p2pdma_unmap_sg(dev->dma_device, sg, sg_cnt, dir); |
---|
| 281 | + else |
---|
| 282 | + ib_dma_unmap_sg(dev, sg, sg_cnt, dir); |
---|
| 283 | +} |
---|
| 284 | + |
---|
| 285 | +static int rdma_rw_map_sg(struct ib_device *dev, struct scatterlist *sg, |
---|
| 286 | + u32 sg_cnt, enum dma_data_direction dir) |
---|
| 287 | +{ |
---|
| 288 | + if (is_pci_p2pdma_page(sg_page(sg))) { |
---|
| 289 | + if (WARN_ON_ONCE(ib_uses_virt_dma(dev))) |
---|
| 290 | + return 0; |
---|
| 291 | + return pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir); |
---|
| 292 | + } |
---|
| 293 | + return ib_dma_map_sg(dev, sg, sg_cnt, dir); |
---|
259 | 294 | } |
---|
260 | 295 | |
---|
261 | 296 | /** |
---|
.. | .. |
---|
280 | 315 | struct ib_device *dev = qp->pd->device; |
---|
281 | 316 | int ret; |
---|
282 | 317 | |
---|
283 | | - ret = ib_dma_map_sg(dev, sg, sg_cnt, dir); |
---|
| 318 | + ret = rdma_rw_map_sg(dev, sg, sg_cnt, dir); |
---|
284 | 319 | if (!ret) |
---|
285 | 320 | return -ENOMEM; |
---|
286 | 321 | sg_cnt = ret; |
---|
.. | .. |
---|
289 | 324 | * Skip to the S/G entry that sg_offset falls into: |
---|
290 | 325 | */ |
---|
291 | 326 | for (;;) { |
---|
292 | | - u32 len = ib_sg_dma_len(dev, sg); |
---|
| 327 | + u32 len = sg_dma_len(sg); |
---|
293 | 328 | |
---|
294 | 329 | if (sg_offset < len) |
---|
295 | 330 | break; |
---|
.. | .. |
---|
319 | 354 | return ret; |
---|
320 | 355 | |
---|
321 | 356 | out_unmap_sg: |
---|
322 | | - ib_dma_unmap_sg(dev, sg, sg_cnt, dir); |
---|
| 357 | + rdma_rw_unmap_sg(dev, sg, sg_cnt, dir); |
---|
323 | 358 | return ret; |
---|
324 | 359 | } |
---|
325 | 360 | EXPORT_SYMBOL(rdma_rw_ctx_init); |
---|
.. | .. |
---|
348 | 383 | u64 remote_addr, u32 rkey, enum dma_data_direction dir) |
---|
349 | 384 | { |
---|
350 | 385 | struct ib_device *dev = qp->pd->device; |
---|
351 | | - u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device); |
---|
| 386 | + u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device, |
---|
| 387 | + qp->integrity_en); |
---|
352 | 388 | struct ib_rdma_wr *rdma_wr; |
---|
353 | | - struct ib_send_wr *prev_wr = NULL; |
---|
354 | 389 | int count = 0, ret; |
---|
355 | 390 | |
---|
356 | 391 | if (sg_cnt > pages_per_mr || prot_sg_cnt > pages_per_mr) { |
---|
357 | | - pr_err("SG count too large\n"); |
---|
| 392 | + pr_err("SG count too large: sg_cnt=%d, prot_sg_cnt=%d, pages_per_mr=%d\n", |
---|
| 393 | + sg_cnt, prot_sg_cnt, pages_per_mr); |
---|
358 | 394 | return -EINVAL; |
---|
359 | 395 | } |
---|
360 | 396 | |
---|
361 | | - ret = ib_dma_map_sg(dev, sg, sg_cnt, dir); |
---|
| 397 | + ret = rdma_rw_map_sg(dev, sg, sg_cnt, dir); |
---|
362 | 398 | if (!ret) |
---|
363 | 399 | return -ENOMEM; |
---|
364 | 400 | sg_cnt = ret; |
---|
365 | 401 | |
---|
366 | | - ret = ib_dma_map_sg(dev, prot_sg, prot_sg_cnt, dir); |
---|
367 | | - if (!ret) { |
---|
368 | | - ret = -ENOMEM; |
---|
369 | | - goto out_unmap_sg; |
---|
| 402 | + if (prot_sg_cnt) { |
---|
| 403 | + ret = rdma_rw_map_sg(dev, prot_sg, prot_sg_cnt, dir); |
---|
| 404 | + if (!ret) { |
---|
| 405 | + ret = -ENOMEM; |
---|
| 406 | + goto out_unmap_sg; |
---|
| 407 | + } |
---|
| 408 | + prot_sg_cnt = ret; |
---|
370 | 409 | } |
---|
371 | | - prot_sg_cnt = ret; |
---|
372 | 410 | |
---|
373 | 411 | ctx->type = RDMA_RW_SIG_MR; |
---|
374 | 412 | ctx->nr_ops = 1; |
---|
375 | | - ctx->sig = kcalloc(1, sizeof(*ctx->sig), GFP_KERNEL); |
---|
376 | | - if (!ctx->sig) { |
---|
| 413 | + ctx->reg = kcalloc(1, sizeof(*ctx->reg), GFP_KERNEL); |
---|
| 414 | + if (!ctx->reg) { |
---|
377 | 415 | ret = -ENOMEM; |
---|
378 | 416 | goto out_unmap_prot_sg; |
---|
379 | 417 | } |
---|
380 | 418 | |
---|
381 | | - ret = rdma_rw_init_one_mr(qp, port_num, &ctx->sig->data, sg, sg_cnt, 0); |
---|
382 | | - if (ret < 0) |
---|
383 | | - goto out_free_ctx; |
---|
384 | | - count += ret; |
---|
385 | | - prev_wr = &ctx->sig->data.reg_wr.wr; |
---|
386 | | - |
---|
387 | | - ret = rdma_rw_init_one_mr(qp, port_num, &ctx->sig->prot, |
---|
388 | | - prot_sg, prot_sg_cnt, 0); |
---|
389 | | - if (ret < 0) |
---|
390 | | - goto out_destroy_data_mr; |
---|
391 | | - count += ret; |
---|
392 | | - |
---|
393 | | - if (ctx->sig->prot.inv_wr.next) |
---|
394 | | - prev_wr->next = &ctx->sig->prot.inv_wr; |
---|
395 | | - else |
---|
396 | | - prev_wr->next = &ctx->sig->prot.reg_wr.wr; |
---|
397 | | - prev_wr = &ctx->sig->prot.reg_wr.wr; |
---|
398 | | - |
---|
399 | | - ctx->sig->sig_mr = ib_mr_pool_get(qp, &qp->sig_mrs); |
---|
400 | | - if (!ctx->sig->sig_mr) { |
---|
| 419 | + ctx->reg->mr = ib_mr_pool_get(qp, &qp->sig_mrs); |
---|
| 420 | + if (!ctx->reg->mr) { |
---|
401 | 421 | ret = -EAGAIN; |
---|
402 | | - goto out_destroy_prot_mr; |
---|
| 422 | + goto out_free_ctx; |
---|
403 | 423 | } |
---|
404 | 424 | |
---|
405 | | - if (ctx->sig->sig_mr->need_inval) { |
---|
406 | | - memset(&ctx->sig->sig_inv_wr, 0, sizeof(ctx->sig->sig_inv_wr)); |
---|
| 425 | + count += rdma_rw_inv_key(ctx->reg); |
---|
407 | 426 | |
---|
408 | | - ctx->sig->sig_inv_wr.opcode = IB_WR_LOCAL_INV; |
---|
409 | | - ctx->sig->sig_inv_wr.ex.invalidate_rkey = ctx->sig->sig_mr->rkey; |
---|
| 427 | + memcpy(ctx->reg->mr->sig_attrs, sig_attrs, sizeof(struct ib_sig_attrs)); |
---|
410 | 428 | |
---|
411 | | - prev_wr->next = &ctx->sig->sig_inv_wr; |
---|
412 | | - prev_wr = &ctx->sig->sig_inv_wr; |
---|
| 429 | + ret = ib_map_mr_sg_pi(ctx->reg->mr, sg, sg_cnt, NULL, prot_sg, |
---|
| 430 | + prot_sg_cnt, NULL, SZ_4K); |
---|
| 431 | + if (unlikely(ret)) { |
---|
| 432 | + pr_err("failed to map PI sg (%d)\n", sg_cnt + prot_sg_cnt); |
---|
| 433 | + goto out_destroy_sig_mr; |
---|
413 | 434 | } |
---|
414 | 435 | |
---|
415 | | - ctx->sig->sig_wr.wr.opcode = IB_WR_REG_SIG_MR; |
---|
416 | | - ctx->sig->sig_wr.wr.wr_cqe = NULL; |
---|
417 | | - ctx->sig->sig_wr.wr.sg_list = &ctx->sig->data.sge; |
---|
418 | | - ctx->sig->sig_wr.wr.num_sge = 1; |
---|
419 | | - ctx->sig->sig_wr.access_flags = IB_ACCESS_LOCAL_WRITE; |
---|
420 | | - ctx->sig->sig_wr.sig_attrs = sig_attrs; |
---|
421 | | - ctx->sig->sig_wr.sig_mr = ctx->sig->sig_mr; |
---|
422 | | - if (prot_sg_cnt) |
---|
423 | | - ctx->sig->sig_wr.prot = &ctx->sig->prot.sge; |
---|
424 | | - prev_wr->next = &ctx->sig->sig_wr.wr; |
---|
425 | | - prev_wr = &ctx->sig->sig_wr.wr; |
---|
| 436 | + ctx->reg->reg_wr.wr.opcode = IB_WR_REG_MR_INTEGRITY; |
---|
| 437 | + ctx->reg->reg_wr.wr.wr_cqe = NULL; |
---|
| 438 | + ctx->reg->reg_wr.wr.num_sge = 0; |
---|
| 439 | + ctx->reg->reg_wr.wr.send_flags = 0; |
---|
| 440 | + ctx->reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE; |
---|
| 441 | + if (rdma_protocol_iwarp(qp->device, port_num)) |
---|
| 442 | + ctx->reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE; |
---|
| 443 | + ctx->reg->reg_wr.mr = ctx->reg->mr; |
---|
| 444 | + ctx->reg->reg_wr.key = ctx->reg->mr->lkey; |
---|
426 | 445 | count++; |
---|
427 | 446 | |
---|
428 | | - ctx->sig->sig_sge.addr = 0; |
---|
429 | | - ctx->sig->sig_sge.length = ctx->sig->data.sge.length; |
---|
430 | | - if (sig_attrs->wire.sig_type != IB_SIG_TYPE_NONE) |
---|
431 | | - ctx->sig->sig_sge.length += ctx->sig->prot.sge.length; |
---|
| 447 | + ctx->reg->sge.addr = ctx->reg->mr->iova; |
---|
| 448 | + ctx->reg->sge.length = ctx->reg->mr->length; |
---|
| 449 | + if (sig_attrs->wire.sig_type == IB_SIG_TYPE_NONE) |
---|
| 450 | + ctx->reg->sge.length -= ctx->reg->mr->sig_attrs->meta_length; |
---|
432 | 451 | |
---|
433 | | - rdma_wr = &ctx->sig->data.wr; |
---|
434 | | - rdma_wr->wr.sg_list = &ctx->sig->sig_sge; |
---|
| 452 | + rdma_wr = &ctx->reg->wr; |
---|
| 453 | + rdma_wr->wr.sg_list = &ctx->reg->sge; |
---|
435 | 454 | rdma_wr->wr.num_sge = 1; |
---|
436 | 455 | rdma_wr->remote_addr = remote_addr; |
---|
437 | 456 | rdma_wr->rkey = rkey; |
---|
.. | .. |
---|
439 | 458 | rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; |
---|
440 | 459 | else |
---|
441 | 460 | rdma_wr->wr.opcode = IB_WR_RDMA_READ; |
---|
442 | | - prev_wr->next = &rdma_wr->wr; |
---|
443 | | - prev_wr = &rdma_wr->wr; |
---|
| 461 | + ctx->reg->reg_wr.wr.next = &rdma_wr->wr; |
---|
444 | 462 | count++; |
---|
445 | 463 | |
---|
446 | 464 | return count; |
---|
447 | 465 | |
---|
448 | | -out_destroy_prot_mr: |
---|
449 | | - if (prot_sg_cnt) |
---|
450 | | - ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->prot.mr); |
---|
451 | | -out_destroy_data_mr: |
---|
452 | | - ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->data.mr); |
---|
| 466 | +out_destroy_sig_mr: |
---|
| 467 | + ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr); |
---|
453 | 468 | out_free_ctx: |
---|
454 | | - kfree(ctx->sig); |
---|
| 469 | + kfree(ctx->reg); |
---|
455 | 470 | out_unmap_prot_sg: |
---|
456 | | - ib_dma_unmap_sg(dev, prot_sg, prot_sg_cnt, dir); |
---|
| 471 | + if (prot_sg_cnt) |
---|
| 472 | + rdma_rw_unmap_sg(dev, prot_sg, prot_sg_cnt, dir); |
---|
457 | 473 | out_unmap_sg: |
---|
458 | | - ib_dma_unmap_sg(dev, sg, sg_cnt, dir); |
---|
| 474 | + rdma_rw_unmap_sg(dev, sg, sg_cnt, dir); |
---|
459 | 475 | return ret; |
---|
460 | 476 | } |
---|
461 | 477 | EXPORT_SYMBOL(rdma_rw_ctx_signature_init); |
---|
.. | .. |
---|
496 | 512 | |
---|
497 | 513 | switch (ctx->type) { |
---|
498 | 514 | case RDMA_RW_SIG_MR: |
---|
499 | | - rdma_rw_update_lkey(&ctx->sig->data, true); |
---|
500 | | - if (ctx->sig->prot.mr) |
---|
501 | | - rdma_rw_update_lkey(&ctx->sig->prot, true); |
---|
502 | | - |
---|
503 | | - ctx->sig->sig_mr->need_inval = true; |
---|
504 | | - ib_update_fast_reg_key(ctx->sig->sig_mr, |
---|
505 | | - ib_inc_rkey(ctx->sig->sig_mr->lkey)); |
---|
506 | | - ctx->sig->sig_sge.lkey = ctx->sig->sig_mr->lkey; |
---|
507 | | - |
---|
508 | | - if (ctx->sig->data.inv_wr.next) |
---|
509 | | - first_wr = &ctx->sig->data.inv_wr; |
---|
510 | | - else |
---|
511 | | - first_wr = &ctx->sig->data.reg_wr.wr; |
---|
512 | | - last_wr = &ctx->sig->data.wr.wr; |
---|
513 | | - break; |
---|
514 | 515 | case RDMA_RW_MR: |
---|
515 | 516 | for (i = 0; i < ctx->nr_ops; i++) { |
---|
516 | 517 | rdma_rw_update_lkey(&ctx->reg[i], |
---|
.. | .. |
---|
602 | 603 | break; |
---|
603 | 604 | } |
---|
604 | 605 | |
---|
605 | | - ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir); |
---|
| 606 | + rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir); |
---|
606 | 607 | } |
---|
607 | 608 | EXPORT_SYMBOL(rdma_rw_ctx_destroy); |
---|
608 | 609 | |
---|
609 | 610 | /** |
---|
610 | 611 | * rdma_rw_ctx_destroy_signature - release all resources allocated by |
---|
611 | | - * rdma_rw_ctx_init_signature |
---|
| 612 | + * rdma_rw_ctx_signature_init |
---|
612 | 613 | * @ctx: context to release |
---|
613 | 614 | * @qp: queue pair to operate on |
---|
614 | 615 | * @port_num: port num to which the connection is bound |
---|
.. | .. |
---|
626 | 627 | if (WARN_ON_ONCE(ctx->type != RDMA_RW_SIG_MR)) |
---|
627 | 628 | return; |
---|
628 | 629 | |
---|
629 | | - ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->data.mr); |
---|
630 | | - ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir); |
---|
| 630 | + ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr); |
---|
| 631 | + kfree(ctx->reg); |
---|
631 | 632 | |
---|
632 | | - if (ctx->sig->prot.mr) { |
---|
633 | | - ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->prot.mr); |
---|
634 | | - ib_dma_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir); |
---|
635 | | - } |
---|
636 | | - |
---|
637 | | - ib_mr_pool_put(qp, &qp->sig_mrs, ctx->sig->sig_mr); |
---|
638 | | - kfree(ctx->sig); |
---|
| 633 | + if (prot_sg_cnt) |
---|
| 634 | + rdma_rw_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir); |
---|
| 635 | + rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir); |
---|
639 | 636 | } |
---|
640 | 637 | EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature); |
---|
641 | 638 | |
---|
.. | .. |
---|
656 | 653 | unsigned int mr_pages; |
---|
657 | 654 | |
---|
658 | 655 | if (rdma_rw_can_use_mr(device, port_num)) |
---|
659 | | - mr_pages = rdma_rw_fr_page_list_len(device); |
---|
| 656 | + mr_pages = rdma_rw_fr_page_list_len(device, false); |
---|
660 | 657 | else |
---|
661 | 658 | mr_pages = device->attrs.max_sge_rd; |
---|
662 | 659 | return DIV_ROUND_UP(maxpages, mr_pages); |
---|
.. | .. |
---|
682 | 679 | * we'll need two additional MRs for the registrations and the |
---|
683 | 680 | * invalidation. |
---|
684 | 681 | */ |
---|
685 | | - if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN) |
---|
686 | | - factor += 6; /* (inv + reg) * (data + prot + sig) */ |
---|
687 | | - else if (rdma_rw_can_use_mr(dev, attr->port_num)) |
---|
| 682 | + if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN || |
---|
| 683 | + rdma_rw_can_use_mr(dev, attr->port_num)) |
---|
688 | 684 | factor += 2; /* inv + reg */ |
---|
689 | 685 | |
---|
690 | 686 | attr->cap.max_send_wr += factor * attr->cap.max_rdma_ctxs; |
---|
.. | .. |
---|
700 | 696 | int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr) |
---|
701 | 697 | { |
---|
702 | 698 | struct ib_device *dev = qp->pd->device; |
---|
703 | | - u32 nr_mrs = 0, nr_sig_mrs = 0; |
---|
| 699 | + u32 nr_mrs = 0, nr_sig_mrs = 0, max_num_sg = 0; |
---|
704 | 700 | int ret = 0; |
---|
705 | 701 | |
---|
706 | | - if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN) { |
---|
| 702 | + if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) { |
---|
707 | 703 | nr_sig_mrs = attr->cap.max_rdma_ctxs; |
---|
708 | | - nr_mrs = attr->cap.max_rdma_ctxs * 2; |
---|
| 704 | + nr_mrs = attr->cap.max_rdma_ctxs; |
---|
| 705 | + max_num_sg = rdma_rw_fr_page_list_len(dev, true); |
---|
709 | 706 | } else if (rdma_rw_can_use_mr(dev, attr->port_num)) { |
---|
710 | 707 | nr_mrs = attr->cap.max_rdma_ctxs; |
---|
| 708 | + max_num_sg = rdma_rw_fr_page_list_len(dev, false); |
---|
711 | 709 | } |
---|
712 | 710 | |
---|
713 | 711 | if (nr_mrs) { |
---|
714 | 712 | ret = ib_mr_pool_init(qp, &qp->rdma_mrs, nr_mrs, |
---|
715 | 713 | IB_MR_TYPE_MEM_REG, |
---|
716 | | - rdma_rw_fr_page_list_len(dev)); |
---|
| 714 | + max_num_sg, 0); |
---|
717 | 715 | if (ret) { |
---|
718 | 716 | pr_err("%s: failed to allocated %d MRs\n", |
---|
719 | 717 | __func__, nr_mrs); |
---|
.. | .. |
---|
723 | 721 | |
---|
724 | 722 | if (nr_sig_mrs) { |
---|
725 | 723 | ret = ib_mr_pool_init(qp, &qp->sig_mrs, nr_sig_mrs, |
---|
726 | | - IB_MR_TYPE_SIGNATURE, 2); |
---|
| 724 | + IB_MR_TYPE_INTEGRITY, max_num_sg, max_num_sg); |
---|
727 | 725 | if (ret) { |
---|
728 | 726 | pr_err("%s: failed to allocated %d SIG MRs\n", |
---|
729 | | - __func__, nr_mrs); |
---|
| 727 | + __func__, nr_sig_mrs); |
---|
730 | 728 | goto out_free_rdma_mrs; |
---|
731 | 729 | } |
---|
732 | 730 | } |
---|