.. | .. |
---|
5 | 5 | |
---|
6 | 6 | NetApp provides this source code under the GPL v2 License. |
---|
7 | 7 | The GPL v2 license is available at |
---|
8 | | -http://opensource.org/licenses/gpl-license.php. |
---|
| 8 | +https://opensource.org/licenses/gpl-license.php. |
---|
9 | 9 | |
---|
10 | 10 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
---|
11 | 11 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
---|
.. | .. |
---|
31 | 31 | #define RPCDBG_FACILITY RPCDBG_TRANS |
---|
32 | 32 | #endif |
---|
33 | 33 | |
---|
| 34 | +#define BC_MAX_SLOTS 64U |
---|
| 35 | + |
---|
| 36 | +unsigned int xprt_bc_max_slots(struct rpc_xprt *xprt) |
---|
| 37 | +{ |
---|
| 38 | + return BC_MAX_SLOTS; |
---|
| 39 | +} |
---|
| 40 | + |
---|
34 | 41 | /* |
---|
35 | 42 | * Helper routines that track the number of preallocation elements |
---|
36 | 43 | * on the transport. |
---|
37 | 44 | */ |
---|
38 | 45 | static inline int xprt_need_to_requeue(struct rpc_xprt *xprt) |
---|
39 | 46 | { |
---|
40 | | - return xprt->bc_alloc_count < atomic_read(&xprt->bc_free_slots); |
---|
41 | | -} |
---|
42 | | - |
---|
43 | | -static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n) |
---|
44 | | -{ |
---|
45 | | - atomic_add(n, &xprt->bc_free_slots); |
---|
46 | | - xprt->bc_alloc_count += n; |
---|
47 | | -} |
---|
48 | | - |
---|
49 | | -static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n) |
---|
50 | | -{ |
---|
51 | | - atomic_sub(n, &xprt->bc_free_slots); |
---|
52 | | - return xprt->bc_alloc_count -= n; |
---|
| 47 | + return xprt->bc_alloc_count < xprt->bc_alloc_max; |
---|
53 | 48 | } |
---|
54 | 49 | |
---|
55 | 50 | /* |
---|
.. | .. |
---|
67 | 62 | xbufp = &req->rq_snd_buf; |
---|
68 | 63 | free_page((unsigned long)xbufp->head[0].iov_base); |
---|
69 | 64 | kfree(req); |
---|
| 65 | +} |
---|
| 66 | + |
---|
| 67 | +static void xprt_bc_reinit_xdr_buf(struct xdr_buf *buf) |
---|
| 68 | +{ |
---|
| 69 | + buf->head[0].iov_len = PAGE_SIZE; |
---|
| 70 | + buf->tail[0].iov_len = 0; |
---|
| 71 | + buf->pages = NULL; |
---|
| 72 | + buf->page_len = 0; |
---|
| 73 | + buf->flags = 0; |
---|
| 74 | + buf->len = 0; |
---|
| 75 | + buf->buflen = PAGE_SIZE; |
---|
70 | 76 | } |
---|
71 | 77 | |
---|
72 | 78 | static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags) |
---|
.. | .. |
---|
91 | 97 | return NULL; |
---|
92 | 98 | |
---|
93 | 99 | req->rq_xprt = xprt; |
---|
94 | | - INIT_LIST_HEAD(&req->rq_list); |
---|
95 | 100 | INIT_LIST_HEAD(&req->rq_bc_list); |
---|
96 | 101 | |
---|
97 | 102 | /* Preallocate one XDR receive buffer */ |
---|
.. | .. |
---|
117 | 122 | * by the backchannel. This function can be called multiple times |
---|
118 | 123 | * when creating new sessions that use the same rpc_xprt. The |
---|
119 | 124 | * preallocated buffers are added to the pool of resources used by |
---|
120 | | - * the rpc_xprt. Anyone of these resources may be used used by an |
---|
| 125 | + * the rpc_xprt. Any one of these resources may be used by an |
---|
121 | 126 | * incoming callback request. It's up to the higher levels in the |
---|
122 | 127 | * stack to enforce that the maximum number of session slots is not |
---|
123 | 128 | * being exceeded. |
---|
.. | .. |
---|
146 | 151 | |
---|
147 | 152 | dprintk("RPC: setup backchannel transport\n"); |
---|
148 | 153 | |
---|
| 154 | + if (min_reqs > BC_MAX_SLOTS) |
---|
| 155 | + min_reqs = BC_MAX_SLOTS; |
---|
| 156 | + |
---|
149 | 157 | /* |
---|
150 | 158 | * We use a temporary list to keep track of the preallocated |
---|
151 | 159 | * buffers. Once we're done building the list we splice it |
---|
.. | .. |
---|
173 | 181 | */ |
---|
174 | 182 | spin_lock(&xprt->bc_pa_lock); |
---|
175 | 183 | list_splice(&tmp_list, &xprt->bc_pa_list); |
---|
176 | | - xprt_inc_alloc_count(xprt, min_reqs); |
---|
| 184 | + xprt->bc_alloc_count += min_reqs; |
---|
| 185 | + xprt->bc_alloc_max += min_reqs; |
---|
| 186 | + atomic_add(min_reqs, &xprt->bc_slot_count); |
---|
177 | 187 | spin_unlock(&xprt->bc_pa_lock); |
---|
178 | 188 | |
---|
179 | 189 | dprintk("RPC: setup backchannel transport done\n"); |
---|
.. | .. |
---|
198 | 208 | /** |
---|
199 | 209 | * xprt_destroy_backchannel - Destroys the backchannel preallocated structures. |
---|
200 | 210 | * @xprt: the transport holding the preallocated strucures |
---|
201 | | - * @max_reqs the maximum number of preallocated structures to destroy |
---|
| 211 | + * @max_reqs: the maximum number of preallocated structures to destroy |
---|
202 | 212 | * |
---|
203 | 213 | * Since these structures may have been allocated by multiple calls |
---|
204 | 214 | * to xprt_setup_backchannel, we only destroy up to the maximum number |
---|
.. | .. |
---|
221 | 231 | goto out; |
---|
222 | 232 | |
---|
223 | 233 | spin_lock_bh(&xprt->bc_pa_lock); |
---|
224 | | - xprt_dec_alloc_count(xprt, max_reqs); |
---|
| 234 | + xprt->bc_alloc_max -= min(max_reqs, xprt->bc_alloc_max); |
---|
225 | 235 | list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) { |
---|
226 | 236 | dprintk("RPC: req=%p\n", req); |
---|
227 | 237 | list_del(&req->rq_bc_pa_list); |
---|
228 | 238 | xprt_free_allocation(req); |
---|
| 239 | + xprt->bc_alloc_count--; |
---|
| 240 | + atomic_dec(&xprt->bc_slot_count); |
---|
229 | 241 | if (--max_reqs == 0) |
---|
230 | 242 | break; |
---|
231 | 243 | } |
---|
.. | .. |
---|
236 | 248 | list_empty(&xprt->bc_pa_list) ? "true" : "false"); |
---|
237 | 249 | } |
---|
238 | 250 | |
---|
239 | | -static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid) |
---|
| 251 | +static struct rpc_rqst *xprt_get_bc_request(struct rpc_xprt *xprt, __be32 xid, |
---|
| 252 | + struct rpc_rqst *new) |
---|
240 | 253 | { |
---|
241 | 254 | struct rpc_rqst *req = NULL; |
---|
242 | 255 | |
---|
243 | 256 | dprintk("RPC: allocate a backchannel request\n"); |
---|
244 | | - if (atomic_read(&xprt->bc_free_slots) <= 0) |
---|
245 | | - goto not_found; |
---|
246 | 257 | if (list_empty(&xprt->bc_pa_list)) { |
---|
247 | | - req = xprt_alloc_bc_req(xprt, GFP_ATOMIC); |
---|
248 | | - if (!req) |
---|
| 258 | + if (!new) |
---|
249 | 259 | goto not_found; |
---|
250 | | - list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list); |
---|
| 260 | + if (atomic_read(&xprt->bc_slot_count) >= BC_MAX_SLOTS) |
---|
| 261 | + goto not_found; |
---|
| 262 | + list_add_tail(&new->rq_bc_pa_list, &xprt->bc_pa_list); |
---|
251 | 263 | xprt->bc_alloc_count++; |
---|
| 264 | + atomic_inc(&xprt->bc_slot_count); |
---|
252 | 265 | } |
---|
253 | 266 | req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst, |
---|
254 | 267 | rq_bc_pa_list); |
---|
255 | 268 | req->rq_reply_bytes_recvd = 0; |
---|
256 | | - req->rq_bytes_sent = 0; |
---|
257 | 269 | memcpy(&req->rq_private_buf, &req->rq_rcv_buf, |
---|
258 | 270 | sizeof(req->rq_private_buf)); |
---|
259 | 271 | req->rq_xid = xid; |
---|
260 | 272 | req->rq_connect_cookie = xprt->connect_cookie; |
---|
261 | | -not_found: |
---|
262 | 273 | dprintk("RPC: backchannel req=%p\n", req); |
---|
| 274 | +not_found: |
---|
263 | 275 | return req; |
---|
264 | 276 | } |
---|
265 | 277 | |
---|
.. | .. |
---|
291 | 303 | */ |
---|
292 | 304 | spin_lock_bh(&xprt->bc_pa_lock); |
---|
293 | 305 | if (xprt_need_to_requeue(xprt)) { |
---|
| 306 | + xprt_bc_reinit_xdr_buf(&req->rq_snd_buf); |
---|
| 307 | + xprt_bc_reinit_xdr_buf(&req->rq_rcv_buf); |
---|
| 308 | + req->rq_rcv_buf.len = PAGE_SIZE; |
---|
294 | 309 | list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list); |
---|
295 | 310 | xprt->bc_alloc_count++; |
---|
| 311 | + atomic_inc(&xprt->bc_slot_count); |
---|
296 | 312 | req = NULL; |
---|
297 | 313 | } |
---|
298 | 314 | spin_unlock_bh(&xprt->bc_pa_lock); |
---|
.. | .. |
---|
305 | 321 | */ |
---|
306 | 322 | dprintk("RPC: Last session removed req=%p\n", req); |
---|
307 | 323 | xprt_free_allocation(req); |
---|
308 | | - return; |
---|
309 | 324 | } |
---|
| 325 | + xprt_put(xprt); |
---|
310 | 326 | } |
---|
311 | 327 | |
---|
312 | 328 | /* |
---|
.. | .. |
---|
322 | 338 | */ |
---|
323 | 339 | struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid) |
---|
324 | 340 | { |
---|
325 | | - struct rpc_rqst *req; |
---|
| 341 | + struct rpc_rqst *req, *new = NULL; |
---|
326 | 342 | |
---|
327 | | - spin_lock(&xprt->bc_pa_lock); |
---|
328 | | - list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) { |
---|
329 | | - if (req->rq_connect_cookie != xprt->connect_cookie) |
---|
330 | | - continue; |
---|
331 | | - if (req->rq_xid == xid) |
---|
332 | | - goto found; |
---|
333 | | - } |
---|
334 | | - req = xprt_alloc_bc_request(xprt, xid); |
---|
| 343 | + do { |
---|
| 344 | + spin_lock(&xprt->bc_pa_lock); |
---|
| 345 | + list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) { |
---|
| 346 | + if (req->rq_connect_cookie != xprt->connect_cookie) |
---|
| 347 | + continue; |
---|
| 348 | + if (req->rq_xid == xid) |
---|
| 349 | + goto found; |
---|
| 350 | + } |
---|
| 351 | + req = xprt_get_bc_request(xprt, xid, new); |
---|
335 | 352 | found: |
---|
336 | | - spin_unlock(&xprt->bc_pa_lock); |
---|
| 353 | + spin_unlock(&xprt->bc_pa_lock); |
---|
| 354 | + if (new) { |
---|
| 355 | + if (req != new) |
---|
| 356 | + xprt_free_allocation(new); |
---|
| 357 | + break; |
---|
| 358 | + } else if (req) |
---|
| 359 | + break; |
---|
| 360 | + new = xprt_alloc_bc_req(xprt, GFP_KERNEL); |
---|
| 361 | + } while (new); |
---|
337 | 362 | return req; |
---|
338 | 363 | } |
---|
339 | 364 | |
---|
.. | .. |
---|
350 | 375 | |
---|
351 | 376 | spin_lock(&xprt->bc_pa_lock); |
---|
352 | 377 | list_del(&req->rq_bc_pa_list); |
---|
353 | | - xprt_dec_alloc_count(xprt, 1); |
---|
| 378 | + xprt->bc_alloc_count--; |
---|
354 | 379 | spin_unlock(&xprt->bc_pa_lock); |
---|
355 | 380 | |
---|
356 | 381 | req->rq_private_buf.len = copied; |
---|
357 | 382 | set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); |
---|
358 | 383 | |
---|
359 | 384 | dprintk("RPC: add callback request to list\n"); |
---|
| 385 | + xprt_get(xprt); |
---|
360 | 386 | spin_lock(&bc_serv->sv_cb_lock); |
---|
361 | 387 | list_add(&req->rq_bc_list, &bc_serv->sv_cb_list); |
---|
362 | 388 | wake_up(&bc_serv->sv_cb_waitq); |
---|