.. | .. |
---|
7 | 7 | |
---|
8 | 8 | #include <rdma/rw.h> |
---|
9 | 9 | |
---|
| 10 | +#include <linux/sunrpc/xdr.h> |
---|
10 | 11 | #include <linux/sunrpc/rpc_rdma.h> |
---|
11 | 12 | #include <linux/sunrpc/svc_rdma.h> |
---|
12 | | -#include <linux/sunrpc/debug.h> |
---|
13 | 13 | |
---|
14 | 14 | #include "xprt_rdma.h" |
---|
15 | 15 | #include <trace/events/rpcrdma.h> |
---|
16 | | - |
---|
17 | | -#define RPCDBG_FACILITY RPCDBG_SVCXPRT |
---|
18 | 16 | |
---|
19 | 17 | static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc); |
---|
20 | 18 | static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc); |
---|
.. | .. |
---|
39 | 37 | struct svc_rdma_rw_ctxt { |
---|
40 | 38 | struct list_head rw_list; |
---|
41 | 39 | struct rdma_rw_ctx rw_ctx; |
---|
42 | | - int rw_nents; |
---|
| 40 | + unsigned int rw_nents; |
---|
43 | 41 | struct sg_table rw_sg_table; |
---|
44 | | - struct scatterlist rw_first_sgl[0]; |
---|
| 42 | + struct scatterlist rw_first_sgl[]; |
---|
45 | 43 | }; |
---|
46 | 44 | |
---|
47 | 45 | static inline struct svc_rdma_rw_ctxt * |
---|
.. | .. |
---|
64 | 62 | spin_unlock(&rdma->sc_rw_ctxt_lock); |
---|
65 | 63 | } else { |
---|
66 | 64 | spin_unlock(&rdma->sc_rw_ctxt_lock); |
---|
67 | | - ctxt = kmalloc(sizeof(*ctxt) + |
---|
68 | | - SG_CHUNK_SIZE * sizeof(struct scatterlist), |
---|
| 65 | + ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE), |
---|
69 | 66 | GFP_KERNEL); |
---|
70 | 67 | if (!ctxt) |
---|
71 | | - goto out; |
---|
| 68 | + goto out_noctx; |
---|
72 | 69 | INIT_LIST_HEAD(&ctxt->rw_list); |
---|
73 | 70 | } |
---|
74 | 71 | |
---|
75 | 72 | ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl; |
---|
76 | 73 | if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges, |
---|
77 | | - ctxt->rw_sg_table.sgl)) { |
---|
78 | | - kfree(ctxt); |
---|
79 | | - ctxt = NULL; |
---|
80 | | - } |
---|
81 | | -out: |
---|
| 74 | + ctxt->rw_sg_table.sgl, |
---|
| 75 | + SG_CHUNK_SIZE)) |
---|
| 76 | + goto out_free; |
---|
82 | 77 | return ctxt; |
---|
| 78 | + |
---|
| 79 | +out_free: |
---|
| 80 | + kfree(ctxt); |
---|
| 81 | +out_noctx: |
---|
| 82 | + trace_svcrdma_no_rwctx_err(rdma, sges); |
---|
| 83 | + return NULL; |
---|
83 | 84 | } |
---|
84 | 85 | |
---|
85 | 86 | static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, |
---|
86 | 87 | struct svc_rdma_rw_ctxt *ctxt) |
---|
87 | 88 | { |
---|
88 | | - sg_free_table_chained(&ctxt->rw_sg_table, true); |
---|
| 89 | + sg_free_table_chained(&ctxt->rw_sg_table, SG_CHUNK_SIZE); |
---|
89 | 90 | |
---|
90 | 91 | spin_lock(&rdma->sc_rw_ctxt_lock); |
---|
91 | 92 | list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts); |
---|
.. | .. |
---|
107 | 108 | } |
---|
108 | 109 | } |
---|
109 | 110 | |
---|
| 111 | +/** |
---|
| 112 | + * svc_rdma_rw_ctx_init - Prepare a R/W context for I/O |
---|
| 113 | + * @rdma: controlling transport instance |
---|
| 114 | + * @ctxt: R/W context to prepare |
---|
| 115 | + * @offset: RDMA offset |
---|
| 116 | + * @handle: RDMA tag/handle |
---|
| 117 | + * @direction: I/O direction |
---|
| 118 | + * |
---|
| 119 | + * Returns on success, the number of WQEs that will be needed |
---|
| 120 | + * on the workqueue, or a negative errno. |
---|
| 121 | + */ |
---|
| 122 | +static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma, |
---|
| 123 | + struct svc_rdma_rw_ctxt *ctxt, |
---|
| 124 | + u64 offset, u32 handle, |
---|
| 125 | + enum dma_data_direction direction) |
---|
| 126 | +{ |
---|
| 127 | + int ret; |
---|
| 128 | + |
---|
| 129 | + ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num, |
---|
| 130 | + ctxt->rw_sg_table.sgl, ctxt->rw_nents, |
---|
| 131 | + 0, offset, handle, direction); |
---|
| 132 | + if (unlikely(ret < 0)) { |
---|
| 133 | + svc_rdma_put_rw_ctxt(rdma, ctxt); |
---|
| 134 | + trace_svcrdma_dma_map_rw_err(rdma, ctxt->rw_nents, ret); |
---|
| 135 | + } |
---|
| 136 | + return ret; |
---|
| 137 | +} |
---|
| 138 | + |
---|
110 | 139 | /* A chunk context tracks all I/O for moving one Read or Write |
---|
111 | | - * chunk. This is a a set of rdma_rw's that handle data movement |
---|
| 140 | + * chunk. This is a set of rdma_rw's that handle data movement |
---|
112 | 141 | * for all segments of one chunk. |
---|
113 | 142 | * |
---|
114 | 143 | * These are small, acquired with a single allocator call, and |
---|
.. | .. |
---|
116 | 145 | * demand, and not cached. |
---|
117 | 146 | */ |
---|
118 | 147 | struct svc_rdma_chunk_ctxt { |
---|
| 148 | + struct rpc_rdma_cid cc_cid; |
---|
119 | 149 | struct ib_cqe cc_cqe; |
---|
120 | 150 | struct svcxprt_rdma *cc_rdma; |
---|
121 | 151 | struct list_head cc_rwctxts; |
---|
122 | 152 | int cc_sqecount; |
---|
123 | 153 | }; |
---|
124 | 154 | |
---|
| 155 | +static void svc_rdma_cc_cid_init(struct svcxprt_rdma *rdma, |
---|
| 156 | + struct rpc_rdma_cid *cid) |
---|
| 157 | +{ |
---|
| 158 | + cid->ci_queue_id = rdma->sc_sq_cq->res.id; |
---|
| 159 | + cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids); |
---|
| 160 | +} |
---|
| 161 | + |
---|
125 | 162 | static void svc_rdma_cc_init(struct svcxprt_rdma *rdma, |
---|
126 | 163 | struct svc_rdma_chunk_ctxt *cc) |
---|
127 | 164 | { |
---|
| 165 | + svc_rdma_cc_cid_init(rdma, &cc->cc_cid); |
---|
128 | 166 | cc->cc_rdma = rdma; |
---|
129 | | - svc_xprt_get(&rdma->sc_xprt); |
---|
130 | 167 | |
---|
131 | 168 | INIT_LIST_HEAD(&cc->cc_rwctxts); |
---|
132 | 169 | cc->cc_sqecount = 0; |
---|
.. | .. |
---|
146 | 183 | ctxt->rw_nents, dir); |
---|
147 | 184 | svc_rdma_put_rw_ctxt(rdma, ctxt); |
---|
148 | 185 | } |
---|
149 | | - svc_xprt_put(&rdma->sc_xprt); |
---|
150 | 186 | } |
---|
151 | 187 | |
---|
152 | 188 | /* State for sending a Write or Reply chunk. |
---|
.. | .. |
---|
208 | 244 | struct svc_rdma_write_info *info = |
---|
209 | 245 | container_of(cc, struct svc_rdma_write_info, wi_cc); |
---|
210 | 246 | |
---|
211 | | - trace_svcrdma_wc_write(wc); |
---|
| 247 | + trace_svcrdma_wc_write(wc, &cc->cc_cid); |
---|
212 | 248 | |
---|
213 | 249 | atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); |
---|
214 | 250 | wake_up(&rdma->sc_send_wait); |
---|
215 | 251 | |
---|
216 | | - if (unlikely(wc->status != IB_WC_SUCCESS)) { |
---|
| 252 | + if (unlikely(wc->status != IB_WC_SUCCESS)) |
---|
217 | 253 | set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); |
---|
218 | | - if (wc->status != IB_WC_WR_FLUSH_ERR) |
---|
219 | | - pr_err("svcrdma: write ctx: %s (%u/0x%x)\n", |
---|
220 | | - ib_wc_status_msg(wc->status), |
---|
221 | | - wc->status, wc->vendor_err); |
---|
222 | | - } |
---|
223 | 254 | |
---|
224 | 255 | svc_rdma_write_info_free(info); |
---|
225 | 256 | } |
---|
.. | .. |
---|
271 | 302 | struct svc_rdma_read_info *info = |
---|
272 | 303 | container_of(cc, struct svc_rdma_read_info, ri_cc); |
---|
273 | 304 | |
---|
274 | | - trace_svcrdma_wc_read(wc); |
---|
| 305 | + trace_svcrdma_wc_read(wc, &cc->cc_cid); |
---|
275 | 306 | |
---|
276 | 307 | atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); |
---|
277 | 308 | wake_up(&rdma->sc_send_wait); |
---|
278 | 309 | |
---|
279 | 310 | if (unlikely(wc->status != IB_WC_SUCCESS)) { |
---|
280 | 311 | set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); |
---|
281 | | - if (wc->status != IB_WC_WR_FLUSH_ERR) |
---|
282 | | - pr_err("svcrdma: read ctx: %s (%u/0x%x)\n", |
---|
283 | | - ib_wc_status_msg(wc->status), |
---|
284 | | - wc->status, wc->vendor_err); |
---|
285 | 312 | svc_rdma_recv_ctxt_put(rdma, info->ri_readctxt); |
---|
286 | 313 | } else { |
---|
287 | 314 | spin_lock(&rdma->sc_rq_dto_lock); |
---|
288 | 315 | list_add_tail(&info->ri_readctxt->rc_list, |
---|
289 | 316 | &rdma->sc_read_complete_q); |
---|
| 317 | + /* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */ |
---|
| 318 | + set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags); |
---|
290 | 319 | spin_unlock(&rdma->sc_rq_dto_lock); |
---|
291 | 320 | |
---|
292 | | - set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags); |
---|
293 | 321 | svc_xprt_enqueue(&rdma->sc_xprt); |
---|
294 | 322 | } |
---|
295 | 323 | |
---|
.. | .. |
---|
330 | 358 | do { |
---|
331 | 359 | if (atomic_sub_return(cc->cc_sqecount, |
---|
332 | 360 | &rdma->sc_sq_avail) > 0) { |
---|
| 361 | + trace_svcrdma_post_chunk(&cc->cc_cid, cc->cc_sqecount); |
---|
333 | 362 | ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr); |
---|
334 | 363 | if (ret) |
---|
335 | 364 | break; |
---|
.. | .. |
---|
421 | 450 | seg = info->wi_segs + info->wi_seg_no * rpcrdma_segment_maxsz; |
---|
422 | 451 | do { |
---|
423 | 452 | unsigned int write_len; |
---|
424 | | - u32 seg_length, seg_handle; |
---|
425 | | - u64 seg_offset; |
---|
| 453 | + u32 handle, length; |
---|
| 454 | + u64 offset; |
---|
426 | 455 | |
---|
427 | 456 | if (info->wi_seg_no >= info->wi_nsegs) |
---|
428 | 457 | goto out_overflow; |
---|
429 | 458 | |
---|
430 | | - seg_handle = be32_to_cpup(seg); |
---|
431 | | - seg_length = be32_to_cpup(seg + 1); |
---|
432 | | - xdr_decode_hyper(seg + 2, &seg_offset); |
---|
433 | | - seg_offset += info->wi_seg_off; |
---|
| 459 | + xdr_decode_rdma_segment(seg, &handle, &length, &offset); |
---|
| 460 | + offset += info->wi_seg_off; |
---|
434 | 461 | |
---|
435 | | - write_len = min(remaining, seg_length - info->wi_seg_off); |
---|
| 462 | + write_len = min(remaining, length - info->wi_seg_off); |
---|
436 | 463 | ctxt = svc_rdma_get_rw_ctxt(rdma, |
---|
437 | 464 | (write_len >> PAGE_SHIFT) + 2); |
---|
438 | 465 | if (!ctxt) |
---|
439 | | - goto out_noctx; |
---|
| 466 | + return -ENOMEM; |
---|
440 | 467 | |
---|
441 | 468 | constructor(info, write_len, ctxt); |
---|
442 | | - ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, |
---|
443 | | - rdma->sc_port_num, ctxt->rw_sg_table.sgl, |
---|
444 | | - ctxt->rw_nents, 0, seg_offset, |
---|
445 | | - seg_handle, DMA_TO_DEVICE); |
---|
| 469 | + ret = svc_rdma_rw_ctx_init(rdma, ctxt, offset, handle, |
---|
| 470 | + DMA_TO_DEVICE); |
---|
446 | 471 | if (ret < 0) |
---|
447 | | - goto out_initerr; |
---|
| 472 | + return -EIO; |
---|
448 | 473 | |
---|
449 | | - trace_svcrdma_encode_wseg(seg_handle, write_len, seg_offset); |
---|
| 474 | + trace_svcrdma_send_wseg(handle, write_len, offset); |
---|
| 475 | + |
---|
450 | 476 | list_add(&ctxt->rw_list, &cc->cc_rwctxts); |
---|
451 | 477 | cc->cc_sqecount += ret; |
---|
452 | | - if (write_len == seg_length - info->wi_seg_off) { |
---|
| 478 | + if (write_len == length - info->wi_seg_off) { |
---|
453 | 479 | seg += 4; |
---|
454 | 480 | info->wi_seg_no++; |
---|
455 | 481 | info->wi_seg_off = 0; |
---|
.. | .. |
---|
462 | 488 | return 0; |
---|
463 | 489 | |
---|
464 | 490 | out_overflow: |
---|
465 | | - dprintk("svcrdma: inadequate space in Write chunk (%u)\n", |
---|
466 | | - info->wi_nsegs); |
---|
| 491 | + trace_svcrdma_small_wrch_err(rdma, remaining, info->wi_seg_no, |
---|
| 492 | + info->wi_nsegs); |
---|
467 | 493 | return -E2BIG; |
---|
468 | | - |
---|
469 | | -out_noctx: |
---|
470 | | - dprintk("svcrdma: no R/W ctxs available\n"); |
---|
471 | | - return -ENOMEM; |
---|
472 | | - |
---|
473 | | -out_initerr: |
---|
474 | | - svc_rdma_put_rw_ctxt(rdma, ctxt); |
---|
475 | | - trace_svcrdma_dma_map_rwctx(rdma, ret); |
---|
476 | | - return -EIO; |
---|
477 | 494 | } |
---|
478 | 495 | |
---|
479 | 496 | /* Send one of an xdr_buf's kvecs by itself. To send a Reply |
---|
.. | .. |
---|
489 | 506 | vec->iov_len); |
---|
490 | 507 | } |
---|
491 | 508 | |
---|
492 | | -/* Send an xdr_buf's page list by itself. A Write chunk is |
---|
493 | | - * just the page list. a Reply chunk is the head, page list, |
---|
494 | | - * and tail. This function is shared between the two types |
---|
495 | | - * of chunk. |
---|
| 509 | +/* Send an xdr_buf's page list by itself. A Write chunk is just |
---|
| 510 | + * the page list. A Reply chunk is @xdr's head, page list, and |
---|
| 511 | + * tail. This function is shared between the two types of chunk. |
---|
496 | 512 | */ |
---|
497 | 513 | static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info, |
---|
498 | | - struct xdr_buf *xdr) |
---|
| 514 | + struct xdr_buf *xdr, |
---|
| 515 | + unsigned int offset, |
---|
| 516 | + unsigned long length) |
---|
499 | 517 | { |
---|
500 | 518 | info->wi_xdr = xdr; |
---|
501 | | - info->wi_next_off = 0; |
---|
| 519 | + info->wi_next_off = offset - xdr->head[0].iov_len; |
---|
502 | 520 | return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg, |
---|
503 | | - xdr->page_len); |
---|
| 521 | + length); |
---|
504 | 522 | } |
---|
505 | 523 | |
---|
506 | 524 | /** |
---|
.. | .. |
---|
508 | 526 | * @rdma: controlling RDMA transport |
---|
509 | 527 | * @wr_ch: Write chunk provided by client |
---|
510 | 528 | * @xdr: xdr_buf containing the data payload |
---|
| 529 | + * @offset: payload's byte offset in @xdr |
---|
| 530 | + * @length: size of payload, in bytes |
---|
511 | 531 | * |
---|
512 | 532 | * Returns a non-negative number of bytes the chunk consumed, or |
---|
513 | 533 | * %-E2BIG if the payload was larger than the Write chunk, |
---|
.. | .. |
---|
517 | 537 | * %-EIO if rdma_rw initialization failed (DMA mapping, etc). |
---|
518 | 538 | */ |
---|
519 | 539 | int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch, |
---|
520 | | - struct xdr_buf *xdr) |
---|
| 540 | + struct xdr_buf *xdr, |
---|
| 541 | + unsigned int offset, unsigned long length) |
---|
521 | 542 | { |
---|
522 | 543 | struct svc_rdma_write_info *info; |
---|
523 | 544 | int ret; |
---|
524 | 545 | |
---|
525 | | - if (!xdr->page_len) |
---|
| 546 | + if (!length) |
---|
526 | 547 | return 0; |
---|
527 | 548 | |
---|
528 | 549 | info = svc_rdma_write_info_alloc(rdma, wr_ch); |
---|
529 | 550 | if (!info) |
---|
530 | 551 | return -ENOMEM; |
---|
531 | 552 | |
---|
532 | | - ret = svc_rdma_send_xdr_pagelist(info, xdr); |
---|
| 553 | + ret = svc_rdma_send_xdr_pagelist(info, xdr, offset, length); |
---|
533 | 554 | if (ret < 0) |
---|
534 | 555 | goto out_err; |
---|
535 | 556 | |
---|
.. | .. |
---|
537 | 558 | if (ret < 0) |
---|
538 | 559 | goto out_err; |
---|
539 | 560 | |
---|
540 | | - trace_svcrdma_encode_write(xdr->page_len); |
---|
541 | | - return xdr->page_len; |
---|
| 561 | + trace_svcrdma_send_write_chunk(xdr->page_len); |
---|
| 562 | + return length; |
---|
542 | 563 | |
---|
543 | 564 | out_err: |
---|
544 | 565 | svc_rdma_write_info_free(info); |
---|
.. | .. |
---|
548 | 569 | /** |
---|
549 | 570 | * svc_rdma_send_reply_chunk - Write all segments in the Reply chunk |
---|
550 | 571 | * @rdma: controlling RDMA transport |
---|
551 | | - * @rp_ch: Reply chunk provided by client |
---|
552 | | - * @writelist: true if client provided a Write list |
---|
| 572 | + * @rctxt: Write and Reply chunks from client |
---|
553 | 573 | * @xdr: xdr_buf containing an RPC Reply |
---|
554 | 574 | * |
---|
555 | 575 | * Returns a non-negative number of bytes the chunk consumed, or |
---|
.. | .. |
---|
559 | 579 | * %-ENOTCONN if posting failed (connection is lost), |
---|
560 | 580 | * %-EIO if rdma_rw initialization failed (DMA mapping, etc). |
---|
561 | 581 | */ |
---|
562 | | -int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch, |
---|
563 | | - bool writelist, struct xdr_buf *xdr) |
---|
| 582 | +int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, |
---|
| 583 | + const struct svc_rdma_recv_ctxt *rctxt, |
---|
| 584 | + struct xdr_buf *xdr) |
---|
564 | 585 | { |
---|
565 | 586 | struct svc_rdma_write_info *info; |
---|
566 | 587 | int consumed, ret; |
---|
567 | 588 | |
---|
568 | | - info = svc_rdma_write_info_alloc(rdma, rp_ch); |
---|
| 589 | + info = svc_rdma_write_info_alloc(rdma, rctxt->rc_reply_chunk); |
---|
569 | 590 | if (!info) |
---|
570 | 591 | return -ENOMEM; |
---|
571 | 592 | |
---|
.. | .. |
---|
577 | 598 | /* Send the page list in the Reply chunk only if the |
---|
578 | 599 | * client did not provide Write chunks. |
---|
579 | 600 | */ |
---|
580 | | - if (!writelist && xdr->page_len) { |
---|
581 | | - ret = svc_rdma_send_xdr_pagelist(info, xdr); |
---|
| 601 | + if (!rctxt->rc_write_list && xdr->page_len) { |
---|
| 602 | + ret = svc_rdma_send_xdr_pagelist(info, xdr, |
---|
| 603 | + xdr->head[0].iov_len, |
---|
| 604 | + xdr->page_len); |
---|
582 | 605 | if (ret < 0) |
---|
583 | 606 | goto out_err; |
---|
584 | 607 | consumed += xdr->page_len; |
---|
.. | .. |
---|
595 | 618 | if (ret < 0) |
---|
596 | 619 | goto out_err; |
---|
597 | 620 | |
---|
598 | | - trace_svcrdma_encode_reply(consumed); |
---|
| 621 | + trace_svcrdma_send_reply_chunk(consumed); |
---|
599 | 622 | return consumed; |
---|
600 | 623 | |
---|
601 | 624 | out_err: |
---|
.. | .. |
---|
617 | 640 | sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT; |
---|
618 | 641 | ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no); |
---|
619 | 642 | if (!ctxt) |
---|
620 | | - goto out_noctx; |
---|
| 643 | + return -ENOMEM; |
---|
621 | 644 | ctxt->rw_nents = sge_no; |
---|
622 | 645 | |
---|
623 | 646 | sg = ctxt->rw_sg_table.sgl; |
---|
.. | .. |
---|
647 | 670 | goto out_overrun; |
---|
648 | 671 | } |
---|
649 | 672 | |
---|
650 | | - ret = rdma_rw_ctx_init(&ctxt->rw_ctx, cc->cc_rdma->sc_qp, |
---|
651 | | - cc->cc_rdma->sc_port_num, |
---|
652 | | - ctxt->rw_sg_table.sgl, ctxt->rw_nents, |
---|
653 | | - 0, offset, rkey, DMA_FROM_DEVICE); |
---|
| 673 | + ret = svc_rdma_rw_ctx_init(cc->cc_rdma, ctxt, offset, rkey, |
---|
| 674 | + DMA_FROM_DEVICE); |
---|
654 | 675 | if (ret < 0) |
---|
655 | | - goto out_initerr; |
---|
| 676 | + return -EIO; |
---|
656 | 677 | |
---|
657 | 678 | list_add(&ctxt->rw_list, &cc->cc_rwctxts); |
---|
658 | 679 | cc->cc_sqecount += ret; |
---|
659 | 680 | return 0; |
---|
660 | 681 | |
---|
661 | | -out_noctx: |
---|
662 | | - dprintk("svcrdma: no R/W ctxs available\n"); |
---|
663 | | - return -ENOMEM; |
---|
664 | | - |
---|
665 | 682 | out_overrun: |
---|
666 | | - dprintk("svcrdma: request overruns rq_pages\n"); |
---|
| 683 | + trace_svcrdma_page_overrun_err(cc->cc_rdma, rqstp, info->ri_pageno); |
---|
667 | 684 | return -EINVAL; |
---|
668 | | - |
---|
669 | | -out_initerr: |
---|
670 | | - trace_svcrdma_dma_map_rwctx(cc->cc_rdma, ret); |
---|
671 | | - svc_rdma_put_rw_ctxt(cc->cc_rdma, ctxt); |
---|
672 | | - return -EIO; |
---|
673 | 685 | } |
---|
674 | 686 | |
---|
675 | 687 | /* Walk the segments in the Read chunk starting at @p and construct |
---|
.. | .. |
---|
684 | 696 | ret = -EINVAL; |
---|
685 | 697 | info->ri_chunklen = 0; |
---|
686 | 698 | while (*p++ != xdr_zero && be32_to_cpup(p++) == info->ri_position) { |
---|
687 | | - u32 rs_handle, rs_length; |
---|
688 | | - u64 rs_offset; |
---|
| 699 | + u32 handle, length; |
---|
| 700 | + u64 offset; |
---|
689 | 701 | |
---|
690 | | - rs_handle = be32_to_cpup(p++); |
---|
691 | | - rs_length = be32_to_cpup(p++); |
---|
692 | | - p = xdr_decode_hyper(p, &rs_offset); |
---|
693 | | - |
---|
694 | | - ret = svc_rdma_build_read_segment(info, rqstp, |
---|
695 | | - rs_handle, rs_length, |
---|
696 | | - rs_offset); |
---|
| 702 | + p = xdr_decode_rdma_segment(p, &handle, &length, &offset); |
---|
| 703 | + ret = svc_rdma_build_read_segment(info, rqstp, handle, length, |
---|
| 704 | + offset); |
---|
697 | 705 | if (ret < 0) |
---|
698 | 706 | break; |
---|
699 | 707 | |
---|
700 | | - trace_svcrdma_encode_rseg(rs_handle, rs_length, rs_offset); |
---|
701 | | - info->ri_chunklen += rs_length; |
---|
| 708 | + trace_svcrdma_send_rseg(handle, length, offset); |
---|
| 709 | + info->ri_chunklen += length; |
---|
702 | 710 | } |
---|
703 | 711 | |
---|
704 | 712 | return ret; |
---|
.. | .. |
---|
722 | 730 | if (ret < 0) |
---|
723 | 731 | goto out; |
---|
724 | 732 | |
---|
725 | | - trace_svcrdma_encode_read(info->ri_chunklen, info->ri_position); |
---|
| 733 | + trace_svcrdma_send_read_chunk(info->ri_chunklen, info->ri_position); |
---|
726 | 734 | |
---|
727 | 735 | head->rc_hdr_count = 0; |
---|
728 | 736 | |
---|
.. | .. |
---|
778 | 786 | if (ret < 0) |
---|
779 | 787 | goto out; |
---|
780 | 788 | |
---|
781 | | - trace_svcrdma_encode_pzr(info->ri_chunklen); |
---|
| 789 | + trace_svcrdma_send_pzr(info->ri_chunklen); |
---|
782 | 790 | |
---|
783 | 791 | head->rc_arg.len += info->ri_chunklen; |
---|
784 | 792 | head->rc_arg.buflen += info->ri_chunklen; |
---|