hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/net/sunrpc/xprtrdma/xprt_rdma.h
....@@ -44,8 +44,10 @@
4444
4545 #include <linux/wait.h> /* wait_queue_head_t, etc */
4646 #include <linux/spinlock.h> /* spinlock_t, etc */
47
-#include <linux/atomic.h> /* atomic_t, etc */
47
+#include <linux/atomic.h> /* atomic_t, etc */
48
+#include <linux/kref.h> /* struct kref */
4849 #include <linux/workqueue.h> /* struct work_struct */
50
+#include <linux/llist.h>
4951
5052 #include <rdma/rdma_cm.h> /* RDMA connection api */
5153 #include <rdma/ib_verbs.h> /* RDMA verbs api */
....@@ -63,117 +65,103 @@
6365 #define RPCRDMA_IDLE_DISC_TO (5U * 60 * HZ)
6466
6567 /*
66
- * Interface Adapter -- one per transport instance
68
+ * RDMA Endpoint -- connection endpoint details
6769 */
68
-struct rpcrdma_ia {
69
- const struct rpcrdma_memreg_ops *ri_ops;
70
- struct ib_device *ri_device;
71
- struct rdma_cm_id *ri_id;
72
- struct ib_pd *ri_pd;
73
- struct completion ri_done;
74
- struct completion ri_remove_done;
75
- int ri_async_rc;
76
- unsigned int ri_max_segs;
77
- unsigned int ri_max_frwr_depth;
78
- unsigned int ri_max_inline_write;
79
- unsigned int ri_max_inline_read;
80
- unsigned int ri_max_send_sges;
81
- bool ri_implicit_roundup;
82
- enum ib_mr_type ri_mrtype;
83
- unsigned long ri_flags;
84
- struct ib_qp_attr ri_qp_attr;
85
- struct ib_qp_init_attr ri_qp_init_attr;
86
-};
87
-
88
-enum {
89
- RPCRDMA_IAF_REMOVING = 0,
90
-};
91
-
92
-/*
93
- * RDMA Endpoint -- one per transport instance
94
- */
95
-
9670 struct rpcrdma_ep {
97
- unsigned int rep_send_count;
98
- unsigned int rep_send_batch;
99
- int rep_connected;
100
- struct ib_qp_init_attr rep_attr;
101
- wait_queue_head_t rep_connect_wait;
102
- struct rpcrdma_connect_private rep_cm_private;
103
- struct rdma_conn_param rep_remote_cma;
104
- struct delayed_work rep_connect_worker;
71
+ struct kref re_kref;
72
+ struct rdma_cm_id *re_id;
73
+ struct ib_pd *re_pd;
74
+ unsigned int re_max_rdma_segs;
75
+ unsigned int re_max_fr_depth;
76
+ bool re_implicit_roundup;
77
+ enum ib_mr_type re_mrtype;
78
+ struct completion re_done;
79
+ unsigned int re_send_count;
80
+ unsigned int re_send_batch;
81
+ unsigned int re_max_inline_send;
82
+ unsigned int re_max_inline_recv;
83
+ int re_async_rc;
84
+ int re_connect_status;
85
+ atomic_t re_force_disconnect;
86
+ struct ib_qp_init_attr re_attr;
87
+ wait_queue_head_t re_connect_wait;
88
+ struct rpc_xprt *re_xprt;
89
+ struct rpcrdma_connect_private
90
+ re_cm_private;
91
+ struct rdma_conn_param re_remote_cma;
92
+ int re_receive_count;
93
+ unsigned int re_max_requests; /* depends on device */
94
+ unsigned int re_inline_send; /* negotiated */
95
+ unsigned int re_inline_recv; /* negotiated */
10596 };
10697
10798 /* Pre-allocate extra Work Requests for handling backward receives
10899 * and sends. This is a fixed value because the Work Queues are
109
- * allocated when the forward channel is set up.
100
+ * allocated when the forward channel is set up, long before the
101
+ * backchannel is provisioned. This value is two times
102
+ * NFS4_DEF_CB_SLOT_TABLE_SIZE.
110103 */
111104 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
112
-#define RPCRDMA_BACKWARD_WRS (8)
105
+#define RPCRDMA_BACKWARD_WRS (32)
113106 #else
114
-#define RPCRDMA_BACKWARD_WRS (0)
107
+#define RPCRDMA_BACKWARD_WRS (0)
115108 #endif
116109
117110 /* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV
118
- *
119
- * The below structure appears at the front of a large region of kmalloc'd
120
- * memory, which always starts on a good alignment boundary.
121111 */
122112
123113 struct rpcrdma_regbuf {
124114 struct ib_sge rg_iov;
125115 struct ib_device *rg_device;
126116 enum dma_data_direction rg_direction;
127
- __be32 rg_base[0] __attribute__ ((aligned(256)));
117
+ void *rg_data;
128118 };
129119
130
-static inline u64
131
-rdmab_addr(struct rpcrdma_regbuf *rb)
120
+static inline u64 rdmab_addr(struct rpcrdma_regbuf *rb)
132121 {
133122 return rb->rg_iov.addr;
134123 }
135124
136
-static inline u32
137
-rdmab_length(struct rpcrdma_regbuf *rb)
125
+static inline u32 rdmab_length(struct rpcrdma_regbuf *rb)
138126 {
139127 return rb->rg_iov.length;
140128 }
141129
142
-static inline u32
143
-rdmab_lkey(struct rpcrdma_regbuf *rb)
130
+static inline u32 rdmab_lkey(struct rpcrdma_regbuf *rb)
144131 {
145132 return rb->rg_iov.lkey;
146133 }
147134
148
-static inline struct ib_device *
149
-rdmab_device(struct rpcrdma_regbuf *rb)
135
+static inline struct ib_device *rdmab_device(struct rpcrdma_regbuf *rb)
150136 {
151137 return rb->rg_device;
138
+}
139
+
140
+static inline void *rdmab_data(const struct rpcrdma_regbuf *rb)
141
+{
142
+ return rb->rg_data;
152143 }
153144
154145 #define RPCRDMA_DEF_GFP (GFP_NOIO | __GFP_NOWARN)
155146
156147 /* To ensure a transport can always make forward progress,
157148 * the number of RDMA segments allowed in header chunk lists
158
- * is capped at 8. This prevents less-capable devices and
159
- * memory registrations from overrunning the Send buffer
160
- * while building chunk lists.
149
+ * is capped at 16. This prevents less-capable devices from
150
+ * overrunning the Send buffer while building chunk lists.
161151 *
162152 * Elements of the Read list take up more room than the
163
- * Write list or Reply chunk. 8 read segments means the Read
164
- * list (or Write list or Reply chunk) cannot consume more
165
- * than
153
+ * Write list or Reply chunk. 16 read segments means the
154
+ * chunk lists cannot consume more than
166155 *
167
- * ((8 + 2) * read segment size) + 1 XDR words, or 244 bytes.
156
+ * ((16 + 2) * read segment size) + 1 XDR words,
168157 *
169
- * And the fixed part of the header is another 24 bytes.
170
- *
171
- * The smallest inline threshold is 1024 bytes, ensuring that
172
- * at least 750 bytes are available for RPC messages.
158
+ * or about 400 bytes. The fixed part of the header is
159
+ * another 24 bytes. Thus when the inline threshold is
160
+ * 1024 bytes, at least 600 bytes are available for RPC
161
+ * message bodies.
173162 */
174163 enum {
175
- RPCRDMA_MAX_HDR_SEGS = 8,
176
- RPCRDMA_HDRBUF_SIZE = 256,
164
+ RPCRDMA_MAX_HDR_SEGS = 16,
177165 };
178166
179167 /*
....@@ -200,35 +188,32 @@
200188 bool rr_temp;
201189 struct rpcrdma_regbuf *rr_rdmabuf;
202190 struct rpcrdma_xprt *rr_rxprt;
203
- struct work_struct rr_work;
191
+ struct rpc_rqst *rr_rqst;
204192 struct xdr_buf rr_hdrbuf;
205193 struct xdr_stream rr_stream;
206
- struct rpc_rqst *rr_rqst;
207
- struct list_head rr_list;
194
+ struct llist_node rr_node;
208195 struct ib_recv_wr rr_recv_wr;
196
+ struct list_head rr_all;
197
+};
198
+
199
+/* To reduce the rate at which a transport invokes ib_post_recv
200
+ * (and thus the hardware doorbell rate), xprtrdma posts Receive
201
+ * WRs in batches.
202
+ *
203
+ * Setting this to zero disables Receive post batching.
204
+ */
205
+enum {
206
+ RPCRDMA_MAX_RECV_BATCH = 7,
209207 };
210208
211209 /* struct rpcrdma_sendctx - DMA mapped SGEs to unmap after Send completes
212210 */
213211 struct rpcrdma_req;
214
-struct rpcrdma_xprt;
215212 struct rpcrdma_sendctx {
216
- struct ib_send_wr sc_wr;
217213 struct ib_cqe sc_cqe;
218
- struct rpcrdma_xprt *sc_xprt;
219214 struct rpcrdma_req *sc_req;
220215 unsigned int sc_unmap_count;
221216 struct ib_sge sc_sges[];
222
-};
223
-
224
-/* Limit the number of SGEs that can be unmapped during one
225
- * Send completion. This caps the amount of work a single
226
- * completion can do before returning to the provider.
227
- *
228
- * Setting this to zero disables Send completion batching.
229
- */
230
-enum {
231
- RPCRDMA_MAX_SEND_BATCH = 7,
232217 };
233218
234219 /*
....@@ -236,25 +221,10 @@
236221 *
237222 * An external memory region is any buffer or page that is registered
238223 * on the fly (ie, not pre-registered).
239
- *
240
- * Each rpcrdma_buffer has a list of free MWs anchored in rb_mrs. During
241
- * call_allocate, rpcrdma_buffer_get() assigns one to each segment in
242
- * an rpcrdma_req. Then rpcrdma_register_external() grabs these to keep
243
- * track of registration metadata while each RPC is pending.
244
- * rpcrdma_deregister_external() uses this metadata to unmap and
245
- * release these resources when an RPC is complete.
246224 */
247
-enum rpcrdma_frwr_state {
248
- FRWR_IS_INVALID, /* ready to be used */
249
- FRWR_IS_VALID, /* in use */
250
- FRWR_FLUSHED_FR, /* flushed FASTREG WR */
251
- FRWR_FLUSHED_LI, /* flushed LOCALINV WR */
252
-};
253
-
254225 struct rpcrdma_frwr {
255226 struct ib_mr *fr_mr;
256227 struct ib_cqe fr_cqe;
257
- enum rpcrdma_frwr_state fr_state;
258228 struct completion fr_linv_done;
259229 union {
260230 struct ib_reg_wr fr_regwr;
....@@ -262,20 +232,14 @@
262232 };
263233 };
264234
265
-struct rpcrdma_fmr {
266
- struct ib_fmr *fm_mr;
267
- u64 *fm_physaddrs;
268
-};
269
-
235
+struct rpcrdma_req;
270236 struct rpcrdma_mr {
271237 struct list_head mr_list;
238
+ struct rpcrdma_req *mr_req;
272239 struct scatterlist *mr_sg;
273240 int mr_nents;
274241 enum dma_data_direction mr_dir;
275
- union {
276
- struct rpcrdma_fmr fmr;
277
- struct rpcrdma_frwr frwr;
278
- };
242
+ struct rpcrdma_frwr frwr;
279243 struct rpcrdma_xprt *mr_xprt;
280244 u32 mr_handle;
281245 u32 mr_length;
....@@ -337,26 +301,21 @@
337301 struct rpcrdma_req {
338302 struct list_head rl_list;
339303 struct rpc_rqst rl_slot;
340
- struct rpcrdma_buffer *rl_buffer;
341304 struct rpcrdma_rep *rl_reply;
342305 struct xdr_stream rl_stream;
343306 struct xdr_buf rl_hdrbuf;
307
+ struct ib_send_wr rl_wr;
344308 struct rpcrdma_sendctx *rl_sendctx;
345309 struct rpcrdma_regbuf *rl_rdmabuf; /* xprt header */
346310 struct rpcrdma_regbuf *rl_sendbuf; /* rq_snd_buf */
347311 struct rpcrdma_regbuf *rl_recvbuf; /* rq_rcv_buf */
348312
349313 struct list_head rl_all;
350
- unsigned long rl_flags;
314
+ struct kref rl_kref;
351315
352
- struct list_head rl_registered; /* registered segments */
316
+ struct list_head rl_free_mrs;
317
+ struct list_head rl_registered;
353318 struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];
354
-};
355
-
356
-/* rl_flags */
357
-enum {
358
- RPCRDMA_REQ_F_PENDING = 0,
359
- RPCRDMA_REQ_F_TX_RESOURCES,
360319 };
361320
362321 static inline struct rpcrdma_req *
....@@ -368,7 +327,7 @@
368327 static inline void
369328 rpcrdma_mr_push(struct rpcrdma_mr *mr, struct list_head *list)
370329 {
371
- list_add_tail(&mr->mr_list, list);
330
+ list_add(&mr->mr_list, list);
372331 }
373332
374333 static inline struct rpcrdma_mr *
....@@ -376,8 +335,9 @@
376335 {
377336 struct rpcrdma_mr *mr;
378337
379
- mr = list_first_entry(list, struct rpcrdma_mr, mr_list);
380
- list_del_init(&mr->mr_list);
338
+ mr = list_first_entry_or_null(list, struct rpcrdma_mr, mr_list);
339
+ if (mr)
340
+ list_del_init(&mr->mr_list);
381341 return mr;
382342 }
383343
....@@ -388,53 +348,28 @@
388348 * One of these is associated with a transport instance
389349 */
390350 struct rpcrdma_buffer {
391
- spinlock_t rb_mrlock; /* protect rb_mrs list */
351
+ spinlock_t rb_lock;
352
+ struct list_head rb_send_bufs;
392353 struct list_head rb_mrs;
393
- struct list_head rb_all;
394354
395355 unsigned long rb_sc_head;
396356 unsigned long rb_sc_tail;
397357 unsigned long rb_sc_last;
398358 struct rpcrdma_sendctx **rb_sc_ctxs;
399359
400
- spinlock_t rb_lock; /* protect buf lists */
401
- struct list_head rb_send_bufs;
402
- struct list_head rb_recv_bufs;
403
- unsigned long rb_flags;
404
- u32 rb_max_requests;
360
+ struct list_head rb_allreqs;
361
+ struct list_head rb_all_mrs;
362
+ struct list_head rb_all_reps;
363
+
364
+ struct llist_head rb_free_reps;
365
+
366
+ __be32 rb_max_requests;
405367 u32 rb_credits; /* most recent credit grant */
406
- int rb_posted_receives;
407368
408369 u32 rb_bc_srv_max_requests;
409
- spinlock_t rb_reqslock; /* protect rb_allreqs */
410
- struct list_head rb_allreqs;
411
-
412370 u32 rb_bc_max_requests;
413371
414
- spinlock_t rb_recovery_lock; /* protect rb_stale_mrs */
415
- struct list_head rb_stale_mrs;
416
- struct delayed_work rb_recovery_worker;
417
- struct delayed_work rb_refresh_worker;
418
-};
419
-#define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia)
420
-
421
-/* rb_flags */
422
-enum {
423
- RPCRDMA_BUF_F_EMPTY_SCQ = 0,
424
-};
425
-
426
-/*
427
- * Internal structure for transport instance creation. This
428
- * exists primarily for modularity.
429
- *
430
- * This data should be set with mount options
431
- */
432
-struct rpcrdma_create_data_internal {
433
- unsigned int max_requests; /* max requests (slots) in flight */
434
- unsigned int rsize; /* mount rsize - max read hdr+data */
435
- unsigned int wsize; /* mount wsize - max write hdr+data */
436
- unsigned int inline_rsize; /* max non-rdma read data payload */
437
- unsigned int inline_wsize; /* max non-rdma write data payload */
372
+ struct work_struct rb_refresh_worker;
438373 };
439374
440375 /*
....@@ -452,7 +387,7 @@
452387 unsigned long hardway_register_count;
453388 unsigned long failed_marshal_count;
454389 unsigned long bad_reply_count;
455
- unsigned long mrs_recovered;
390
+ unsigned long mrs_recycled;
456391 unsigned long mrs_orphaned;
457392 unsigned long mrs_allocated;
458393 unsigned long empty_sendctx_q;
....@@ -467,36 +402,6 @@
467402 };
468403
469404 /*
470
- * Per-registration mode operations
471
- */
472
-struct rpcrdma_xprt;
473
-struct rpcrdma_memreg_ops {
474
- struct rpcrdma_mr_seg *
475
- (*ro_map)(struct rpcrdma_xprt *,
476
- struct rpcrdma_mr_seg *, int, bool,
477
- struct rpcrdma_mr **);
478
- int (*ro_send)(struct rpcrdma_ia *ia,
479
- struct rpcrdma_req *req);
480
- void (*ro_reminv)(struct rpcrdma_rep *rep,
481
- struct list_head *mrs);
482
- void (*ro_unmap_sync)(struct rpcrdma_xprt *,
483
- struct list_head *);
484
- void (*ro_recover_mr)(struct rpcrdma_mr *mr);
485
- int (*ro_open)(struct rpcrdma_ia *,
486
- struct rpcrdma_ep *,
487
- struct rpcrdma_create_data_internal *);
488
- size_t (*ro_maxpages)(struct rpcrdma_xprt *);
489
- int (*ro_init_mr)(struct rpcrdma_ia *,
490
- struct rpcrdma_mr *);
491
- void (*ro_release_mr)(struct rpcrdma_mr *mr);
492
- const char *ro_displayname;
493
- const int ro_send_w_inv_ok;
494
-};
495
-
496
-extern const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops;
497
-extern const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops;
498
-
499
-/*
500405 * RPCRDMA transport -- encapsulates the structures above for
501406 * integration with RPC.
502407 *
....@@ -508,16 +413,14 @@
508413 */
509414 struct rpcrdma_xprt {
510415 struct rpc_xprt rx_xprt;
511
- struct rpcrdma_ia rx_ia;
512
- struct rpcrdma_ep rx_ep;
416
+ struct rpcrdma_ep *rx_ep;
513417 struct rpcrdma_buffer rx_buf;
514
- struct rpcrdma_create_data_internal rx_data;
515418 struct delayed_work rx_connect_worker;
419
+ struct rpc_timeout rx_timeout;
516420 struct rpcrdma_stats rx_stats;
517421 };
518422
519423 #define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, rx_xprt)
520
-#define rpcx_to_rdmad(x) (rpcx_to_rdmax(x)->rx_data)
521424
522425 static inline const char *
523426 rpcrdma_addrstr(const struct rpcrdma_xprt *r_xprt)
....@@ -542,69 +445,65 @@
542445 extern unsigned int xprt_rdma_memreg_strategy;
543446
544447 /*
545
- * Interface Adapter calls - xprtrdma/verbs.c
546
- */
547
-int rpcrdma_ia_open(struct rpcrdma_xprt *xprt);
548
-void rpcrdma_ia_remove(struct rpcrdma_ia *ia);
549
-void rpcrdma_ia_close(struct rpcrdma_ia *);
550
-bool frwr_is_supported(struct rpcrdma_ia *);
551
-bool fmr_is_supported(struct rpcrdma_ia *);
552
-
553
-extern struct workqueue_struct *rpcrdma_receive_wq;
554
-
555
-/*
556448 * Endpoint calls - xprtrdma/verbs.c
557449 */
558
-int rpcrdma_ep_create(struct rpcrdma_ep *, struct rpcrdma_ia *,
559
- struct rpcrdma_create_data_internal *);
560
-void rpcrdma_ep_destroy(struct rpcrdma_ep *, struct rpcrdma_ia *);
561
-int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *);
562
-void rpcrdma_conn_func(struct rpcrdma_ep *ep);
563
-void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *);
450
+void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc);
451
+int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt);
452
+void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt);
564453
565
-int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
566
- struct rpcrdma_req *);
567
-void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp);
454
+int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
455
+void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp);
568456
569457 /*
570458 * Buffer calls - xprtrdma/verbs.c
571459 */
572
-struct rpcrdma_req *rpcrdma_create_req(struct rpcrdma_xprt *);
573
-void rpcrdma_destroy_req(struct rpcrdma_req *);
460
+struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
461
+ gfp_t flags);
462
+int rpcrdma_req_setup(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
463
+void rpcrdma_req_destroy(struct rpcrdma_req *req);
574464 int rpcrdma_buffer_create(struct rpcrdma_xprt *);
575465 void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
576
-struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf);
466
+struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt);
577467
578468 struct rpcrdma_mr *rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt);
579469 void rpcrdma_mr_put(struct rpcrdma_mr *mr);
580
-void rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr);
581
-void rpcrdma_mr_defer_recovery(struct rpcrdma_mr *mr);
470
+void rpcrdma_mrs_refresh(struct rpcrdma_xprt *r_xprt);
582471
583472 struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
584
-void rpcrdma_buffer_put(struct rpcrdma_req *);
473
+void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers,
474
+ struct rpcrdma_req *req);
475
+void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req);
585476 void rpcrdma_recv_buffer_put(struct rpcrdma_rep *);
586477
587
-struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(size_t, enum dma_data_direction,
588
- gfp_t);
589
-bool __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *, struct rpcrdma_regbuf *);
590
-void rpcrdma_free_regbuf(struct rpcrdma_regbuf *);
478
+bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size,
479
+ gfp_t flags);
480
+bool __rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt,
481
+ struct rpcrdma_regbuf *rb);
591482
592
-static inline bool
593
-rpcrdma_regbuf_is_mapped(struct rpcrdma_regbuf *rb)
483
+/**
484
+ * rpcrdma_regbuf_is_mapped - check if buffer is DMA mapped
485
+ *
486
+ * Returns true if the buffer is now mapped to rb->rg_device.
487
+ */
488
+static inline bool rpcrdma_regbuf_is_mapped(struct rpcrdma_regbuf *rb)
594489 {
595490 return rb->rg_device != NULL;
596491 }
597492
598
-static inline bool
599
-rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
493
+/**
494
+ * rpcrdma_regbuf_dma_map - DMA-map a regbuf
495
+ * @r_xprt: controlling transport instance
496
+ * @rb: regbuf to be mapped
497
+ *
498
+ * Returns true if the buffer is currently DMA mapped.
499
+ */
500
+static inline bool rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt,
501
+ struct rpcrdma_regbuf *rb)
600502 {
601503 if (likely(rpcrdma_regbuf_is_mapped(rb)))
602504 return true;
603
- return __rpcrdma_dma_map_regbuf(ia, rb);
505
+ return __rpcrdma_regbuf_dma_map(r_xprt, rb);
604506 }
605
-
606
-int rpcrdma_alloc_wq(void);
607
-void rpcrdma_destroy_wq(void);
608507
609508 /*
610509 * Wrappers for chunk registration, shared by read/write chunk code.
....@@ -616,12 +515,29 @@
616515 return writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
617516 }
618517
518
+/* Memory registration calls xprtrdma/frwr_ops.c
519
+ */
520
+void frwr_reset(struct rpcrdma_req *req);
521
+int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device);
522
+int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr);
523
+void frwr_release_mr(struct rpcrdma_mr *mr);
524
+struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
525
+ struct rpcrdma_mr_seg *seg,
526
+ int nsegs, bool writing, __be32 xid,
527
+ struct rpcrdma_mr *mr);
528
+int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
529
+void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs);
530
+void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
531
+void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
532
+
619533 /*
620534 * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
621535 */
622536
623537 enum rpcrdma_chunktype {
624538 rpcrdma_noch = 0,
539
+ rpcrdma_noch_pullup,
540
+ rpcrdma_noch_mapped,
625541 rpcrdma_readch,
626542 rpcrdma_areadch,
627543 rpcrdma_writech,
....@@ -632,14 +548,12 @@
632548 struct rpcrdma_req *req, u32 hdrlen,
633549 struct xdr_buf *xdr,
634550 enum rpcrdma_chunktype rtype);
635
-void rpcrdma_unmap_sendctx(struct rpcrdma_sendctx *sc);
551
+void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc);
636552 int rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst);
637
-void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *);
553
+void rpcrdma_set_max_header_sizes(struct rpcrdma_ep *ep);
554
+void rpcrdma_reset_cwnd(struct rpcrdma_xprt *r_xprt);
638555 void rpcrdma_complete_rqst(struct rpcrdma_rep *rep);
639556 void rpcrdma_reply_handler(struct rpcrdma_rep *rep);
640
-void rpcrdma_release_rqst(struct rpcrdma_xprt *r_xprt,
641
- struct rpcrdma_req *req);
642
-void rpcrdma_deferred_completion(struct work_struct *work);
643557
644558 static inline void rpcrdma_set_xdrlen(struct xdr_buf *xdr, size_t len)
645559 {
....@@ -650,9 +564,10 @@
650564 /* RPC/RDMA module init - xprtrdma/transport.c
651565 */
652566 extern unsigned int xprt_rdma_max_inline_read;
567
+extern unsigned int xprt_rdma_max_inline_write;
653568 void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap);
654569 void xprt_rdma_free_addresses(struct rpc_xprt *xprt);
655
-void rpcrdma_connect_worker(struct work_struct *work);
570
+void xprt_rdma_close(struct rpc_xprt *xprt);
656571 void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq);
657572 int xprt_rdma_init(void);
658573 void xprt_rdma_cleanup(void);
....@@ -661,8 +576,8 @@
661576 */
662577 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
663578 int xprt_rdma_bc_setup(struct rpc_xprt *, unsigned int);
664
-int xprt_rdma_bc_up(struct svc_serv *, struct net *);
665579 size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *);
580
+unsigned int xprt_rdma_bc_max_slots(struct rpc_xprt *);
666581 int rpcrdma_bc_post_recv(struct rpcrdma_xprt *, unsigned int);
667582 void rpcrdma_bc_receive_call(struct rpcrdma_xprt *, struct rpcrdma_rep *);
668583 int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst);