.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* Maintain an RxRPC server socket to do AFS communications through |
---|
2 | 3 | * |
---|
3 | 4 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
---|
4 | 5 | * Written by David Howells (dhowells@redhat.com) |
---|
5 | | - * |
---|
6 | | - * This program is free software; you can redistribute it and/or |
---|
7 | | - * modify it under the terms of the GNU General Public License |
---|
8 | | - * as published by the Free Software Foundation; either version |
---|
9 | | - * 2 of the License, or (at your option) any later version. |
---|
10 | 6 | */ |
---|
11 | 7 | |
---|
12 | 8 | #include <linux/slab.h> |
---|
.. | .. |
---|
16 | 12 | #include <net/af_rxrpc.h> |
---|
17 | 13 | #include "internal.h" |
---|
18 | 14 | #include "afs_cm.h" |
---|
| 15 | +#include "protocol_yfs.h" |
---|
19 | 16 | |
---|
20 | 17 | struct workqueue_struct *afs_async_calls; |
---|
21 | 18 | |
---|
22 | 19 | static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long); |
---|
23 | | -static long afs_wait_for_call_to_complete(struct afs_call *, struct afs_addr_cursor *); |
---|
24 | 20 | static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long); |
---|
25 | 21 | static void afs_process_async_call(struct work_struct *); |
---|
26 | 22 | static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long); |
---|
.. | .. |
---|
41 | 37 | { |
---|
42 | 38 | struct sockaddr_rxrpc srx; |
---|
43 | 39 | struct socket *socket; |
---|
44 | | - unsigned int min_level; |
---|
45 | 40 | int ret; |
---|
46 | 41 | |
---|
47 | 42 | _enter(""); |
---|
.. | .. |
---|
61 | 56 | srx.transport.sin6.sin6_family = AF_INET6; |
---|
62 | 57 | srx.transport.sin6.sin6_port = htons(AFS_CM_PORT); |
---|
63 | 58 | |
---|
64 | | - min_level = RXRPC_SECURITY_ENCRYPT; |
---|
65 | | - ret = kernel_setsockopt(socket, SOL_RXRPC, RXRPC_MIN_SECURITY_LEVEL, |
---|
66 | | - (void *)&min_level, sizeof(min_level)); |
---|
| 59 | + ret = rxrpc_sock_set_min_security_level(socket->sk, |
---|
| 60 | + RXRPC_SECURITY_ENCRYPT); |
---|
67 | 61 | if (ret < 0) |
---|
68 | 62 | goto error_2; |
---|
69 | 63 | |
---|
.. | .. |
---|
74 | 68 | } |
---|
75 | 69 | if (ret < 0) |
---|
76 | 70 | goto error_2; |
---|
| 71 | + |
---|
| 72 | + srx.srx_service = YFS_CM_SERVICE; |
---|
| 73 | + ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx)); |
---|
| 74 | + if (ret < 0) |
---|
| 75 | + goto error_2; |
---|
| 76 | + |
---|
| 77 | + /* Ideally, we'd turn on service upgrade here, but we can't because |
---|
| 78 | + * OpenAFS is buggy and leaks the userStatus field from packet to |
---|
| 79 | + * packet and between FS packets and CB packets - so if we try to do an |
---|
| 80 | + * upgrade on an FS packet, OpenAFS will leak that into the CB packet |
---|
| 81 | + * it sends back to us. |
---|
| 82 | + */ |
---|
77 | 83 | |
---|
78 | 84 | rxrpc_kernel_new_call_notification(socket, afs_rx_new_call, |
---|
79 | 85 | afs_rx_discard_new_call); |
---|
.. | .. |
---|
143 | 149 | INIT_WORK(&call->async_work, afs_process_async_call); |
---|
144 | 150 | init_waitqueue_head(&call->waitq); |
---|
145 | 151 | spin_lock_init(&call->state_lock); |
---|
| 152 | + call->iter = &call->def_iter; |
---|
146 | 153 | |
---|
147 | 154 | o = atomic_inc_return(&net->nr_outstanding_calls); |
---|
148 | 155 | trace_afs_call(call, afs_call_trace_alloc, 1, o, |
---|
.. | .. |
---|
174 | 181 | if (call->type->destructor) |
---|
175 | 182 | call->type->destructor(call); |
---|
176 | 183 | |
---|
177 | | - afs_put_server(call->net, call->cm_server); |
---|
178 | | - afs_put_cb_interest(call->net, call->cbi); |
---|
| 184 | + afs_unuse_server_notime(call->net, call->server, afs_server_trace_put_call); |
---|
| 185 | + afs_put_addrlist(call->alist); |
---|
179 | 186 | kfree(call->request); |
---|
180 | 187 | |
---|
181 | 188 | trace_afs_call(call, afs_call_trace_free, 0, o, |
---|
.. | .. |
---|
188 | 195 | } |
---|
189 | 196 | } |
---|
190 | 197 | |
---|
191 | | -/* |
---|
192 | | - * Queue the call for actual work. Returns 0 unconditionally for convenience. |
---|
193 | | - */ |
---|
194 | | -int afs_queue_call_work(struct afs_call *call) |
---|
| 198 | +static struct afs_call *afs_get_call(struct afs_call *call, |
---|
| 199 | + enum afs_call_trace why) |
---|
195 | 200 | { |
---|
196 | 201 | int u = atomic_inc_return(&call->usage); |
---|
197 | 202 | |
---|
198 | | - trace_afs_call(call, afs_call_trace_work, u, |
---|
| 203 | + trace_afs_call(call, why, u, |
---|
199 | 204 | atomic_read(&call->net->nr_outstanding_calls), |
---|
200 | 205 | __builtin_return_address(0)); |
---|
| 206 | + return call; |
---|
| 207 | +} |
---|
201 | 208 | |
---|
202 | | - INIT_WORK(&call->work, call->type->work); |
---|
| 209 | +/* |
---|
| 210 | + * Queue the call for actual work. |
---|
| 211 | + */ |
---|
| 212 | +static void afs_queue_call_work(struct afs_call *call) |
---|
| 213 | +{ |
---|
| 214 | + if (call->type->work) { |
---|
| 215 | + INIT_WORK(&call->work, call->type->work); |
---|
203 | 216 | |
---|
204 | | - if (!queue_work(afs_wq, &call->work)) |
---|
205 | | - afs_put_call(call); |
---|
206 | | - return 0; |
---|
| 217 | + afs_get_call(call, afs_call_trace_work); |
---|
| 218 | + if (!queue_work(afs_wq, &call->work)) |
---|
| 219 | + afs_put_call(call); |
---|
| 220 | + } |
---|
207 | 221 | } |
---|
208 | 222 | |
---|
209 | 223 | /* |
---|
.. | .. |
---|
233 | 247 | goto nomem_free; |
---|
234 | 248 | } |
---|
235 | 249 | |
---|
| 250 | + afs_extract_to_buf(call, call->reply_max); |
---|
236 | 251 | call->operation_ID = type->op; |
---|
237 | 252 | init_waitqueue_head(&call->waitq); |
---|
238 | 253 | return call; |
---|
.. | .. |
---|
265 | 280 | struct bio_vec *bv, pgoff_t first, pgoff_t last, |
---|
266 | 281 | unsigned offset) |
---|
267 | 282 | { |
---|
| 283 | + struct afs_operation *op = call->op; |
---|
268 | 284 | struct page *pages[AFS_BVEC_MAX]; |
---|
269 | 285 | unsigned int nr, n, i, to, bytes = 0; |
---|
270 | 286 | |
---|
271 | 287 | nr = min_t(pgoff_t, last - first + 1, AFS_BVEC_MAX); |
---|
272 | | - n = find_get_pages_contig(call->mapping, first, nr, pages); |
---|
| 288 | + n = find_get_pages_contig(op->store.mapping, first, nr, pages); |
---|
273 | 289 | ASSERTCMP(n, ==, nr); |
---|
274 | 290 | |
---|
275 | 291 | msg->msg_flags |= MSG_MORE; |
---|
276 | 292 | for (i = 0; i < nr; i++) { |
---|
277 | 293 | to = PAGE_SIZE; |
---|
278 | 294 | if (first + i >= last) { |
---|
279 | | - to = call->last_to; |
---|
| 295 | + to = op->store.last_to; |
---|
280 | 296 | msg->msg_flags &= ~MSG_MORE; |
---|
281 | 297 | } |
---|
282 | 298 | bv[i].bv_page = pages[i]; |
---|
.. | .. |
---|
286 | 302 | offset = 0; |
---|
287 | 303 | } |
---|
288 | 304 | |
---|
289 | | - iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC, bv, nr, bytes); |
---|
| 305 | + iov_iter_bvec(&msg->msg_iter, WRITE, bv, nr, bytes); |
---|
290 | 306 | } |
---|
291 | 307 | |
---|
292 | 308 | /* |
---|
.. | .. |
---|
306 | 322 | */ |
---|
307 | 323 | static int afs_send_pages(struct afs_call *call, struct msghdr *msg) |
---|
308 | 324 | { |
---|
| 325 | + struct afs_operation *op = call->op; |
---|
309 | 326 | struct bio_vec bv[AFS_BVEC_MAX]; |
---|
310 | 327 | unsigned int bytes, nr, loop, offset; |
---|
311 | | - pgoff_t first = call->first, last = call->last; |
---|
| 328 | + pgoff_t first = op->store.first, last = op->store.last; |
---|
312 | 329 | int ret; |
---|
313 | 330 | |
---|
314 | | - offset = call->first_offset; |
---|
315 | | - call->first_offset = 0; |
---|
| 331 | + offset = op->store.first_offset; |
---|
| 332 | + op->store.first_offset = 0; |
---|
316 | 333 | |
---|
317 | 334 | do { |
---|
318 | 335 | afs_load_bvec(call, msg, bv, first, last, offset); |
---|
.. | .. |
---|
322 | 339 | bytes = msg->msg_iter.count; |
---|
323 | 340 | nr = msg->msg_iter.nr_segs; |
---|
324 | 341 | |
---|
325 | | - ret = rxrpc_kernel_send_data(call->net->socket, call->rxcall, msg, |
---|
| 342 | + ret = rxrpc_kernel_send_data(op->net->socket, call->rxcall, msg, |
---|
326 | 343 | bytes, afs_notify_end_request_tx); |
---|
327 | 344 | for (loop = 0; loop < nr; loop++) |
---|
328 | 345 | put_page(bv[loop].bv_page); |
---|
.. | .. |
---|
332 | 349 | first += nr; |
---|
333 | 350 | } while (first <= last); |
---|
334 | 351 | |
---|
335 | | - trace_afs_sent_pages(call, call->first, last, first, ret); |
---|
| 352 | + trace_afs_sent_pages(call, op->store.first, last, first, ret); |
---|
336 | 353 | return ret; |
---|
337 | 354 | } |
---|
338 | 355 | |
---|
339 | 356 | /* |
---|
340 | | - * initiate a call |
---|
| 357 | + * Initiate a call and synchronously queue up the parameters for dispatch. Any |
---|
| 358 | + * error is stored into the call struct, which the caller must check for. |
---|
341 | 359 | */ |
---|
342 | | -long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, |
---|
343 | | - gfp_t gfp, bool async) |
---|
| 360 | +void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp) |
---|
344 | 361 | { |
---|
345 | | - struct sockaddr_rxrpc *srx = ac->addr; |
---|
| 362 | + struct sockaddr_rxrpc *srx = &ac->alist->addrs[ac->index]; |
---|
346 | 363 | struct rxrpc_call *rxcall; |
---|
347 | 364 | struct msghdr msg; |
---|
348 | 365 | struct kvec iov[1]; |
---|
.. | .. |
---|
358 | 375 | call, call->type->name, key_serial(call->key), |
---|
359 | 376 | atomic_read(&call->net->nr_outstanding_calls)); |
---|
360 | 377 | |
---|
361 | | - call->async = async; |
---|
| 378 | + call->addr_ix = ac->index; |
---|
| 379 | + call->alist = afs_get_addrlist(ac->alist); |
---|
362 | 380 | |
---|
363 | 381 | /* Work out the length we're going to transmit. This is awkward for |
---|
364 | 382 | * calls such as FS.StoreData where there's an extra injection of data |
---|
.. | .. |
---|
366 | 384 | */ |
---|
367 | 385 | tx_total_len = call->request_size; |
---|
368 | 386 | if (call->send_pages) { |
---|
369 | | - if (call->last == call->first) { |
---|
370 | | - tx_total_len += call->last_to - call->first_offset; |
---|
| 387 | + struct afs_operation *op = call->op; |
---|
| 388 | + |
---|
| 389 | + if (op->store.last == op->store.first) { |
---|
| 390 | + tx_total_len += op->store.last_to - op->store.first_offset; |
---|
371 | 391 | } else { |
---|
372 | 392 | /* It looks mathematically like you should be able to |
---|
373 | 393 | * combine the following lines with the ones above, but |
---|
374 | 394 | * unsigned arithmetic is fun when it wraps... |
---|
375 | 395 | */ |
---|
376 | | - tx_total_len += PAGE_SIZE - call->first_offset; |
---|
377 | | - tx_total_len += call->last_to; |
---|
378 | | - tx_total_len += (call->last - call->first - 1) * PAGE_SIZE; |
---|
| 396 | + tx_total_len += PAGE_SIZE - op->store.first_offset; |
---|
| 397 | + tx_total_len += op->store.last_to; |
---|
| 398 | + tx_total_len += (op->store.last - op->store.first - 1) * PAGE_SIZE; |
---|
379 | 399 | } |
---|
| 400 | + } |
---|
| 401 | + |
---|
| 402 | + /* If the call is going to be asynchronous, we need an extra ref for |
---|
| 403 | + * the call to hold itself so the caller need not hang on to its ref. |
---|
| 404 | + */ |
---|
| 405 | + if (call->async) { |
---|
| 406 | + afs_get_call(call, afs_call_trace_get); |
---|
| 407 | + call->drop_ref = true; |
---|
380 | 408 | } |
---|
381 | 409 | |
---|
382 | 410 | /* create a call */ |
---|
383 | 411 | rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key, |
---|
384 | 412 | (unsigned long)call, |
---|
385 | 413 | tx_total_len, gfp, |
---|
386 | | - (async ? |
---|
| 414 | + (call->async ? |
---|
387 | 415 | afs_wake_up_async_call : |
---|
388 | 416 | afs_wake_up_call_waiter), |
---|
389 | 417 | call->upgrade, |
---|
| 418 | + (call->intr ? RXRPC_PREINTERRUPTIBLE : |
---|
| 419 | + RXRPC_UNINTERRUPTIBLE), |
---|
390 | 420 | call->debug_id); |
---|
391 | 421 | if (IS_ERR(rxcall)) { |
---|
392 | 422 | ret = PTR_ERR(rxcall); |
---|
| 423 | + call->error = ret; |
---|
393 | 424 | goto error_kill_call; |
---|
394 | 425 | } |
---|
395 | 426 | |
---|
396 | 427 | call->rxcall = rxcall; |
---|
| 428 | + |
---|
| 429 | + if (call->max_lifespan) |
---|
| 430 | + rxrpc_kernel_set_max_life(call->net->socket, rxcall, |
---|
| 431 | + call->max_lifespan); |
---|
| 432 | + call->issue_time = ktime_get_real(); |
---|
397 | 433 | |
---|
398 | 434 | /* send the request */ |
---|
399 | 435 | iov[0].iov_base = call->request; |
---|
.. | .. |
---|
401 | 437 | |
---|
402 | 438 | msg.msg_name = NULL; |
---|
403 | 439 | msg.msg_namelen = 0; |
---|
404 | | - iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1, |
---|
405 | | - call->request_size); |
---|
| 440 | + iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, call->request_size); |
---|
406 | 441 | msg.msg_control = NULL; |
---|
407 | 442 | msg.msg_controllen = 0; |
---|
408 | 443 | msg.msg_flags = MSG_WAITALL | (call->send_pages ? MSG_MORE : 0); |
---|
.. | .. |
---|
419 | 454 | goto error_do_abort; |
---|
420 | 455 | } |
---|
421 | 456 | |
---|
422 | | - /* at this point, an async call may no longer exist as it may have |
---|
423 | | - * already completed */ |
---|
424 | | - if (call->async) |
---|
425 | | - return -EINPROGRESS; |
---|
426 | | - |
---|
427 | | - return afs_wait_for_call_to_complete(call, ac); |
---|
| 457 | + /* Note that at this point, we may have received the reply or an abort |
---|
| 458 | + * - and an asynchronous call may already have completed. |
---|
| 459 | + * |
---|
| 460 | + * afs_wait_for_call_to_complete(call, ac) |
---|
| 461 | + * must be called to synchronously clean up. |
---|
| 462 | + */ |
---|
| 463 | + return; |
---|
428 | 464 | |
---|
429 | 465 | error_do_abort: |
---|
430 | | - call->state = AFS_CALL_COMPLETE; |
---|
431 | 466 | if (ret != -ECONNABORTED) { |
---|
432 | 467 | rxrpc_kernel_abort_call(call->net->socket, rxcall, |
---|
433 | 468 | RX_USER_ABORT, ret, "KSD"); |
---|
434 | 469 | } else { |
---|
435 | | - iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, NULL, 0, 0); |
---|
| 470 | + iov_iter_kvec(&msg.msg_iter, READ, NULL, 0, 0); |
---|
436 | 471 | rxrpc_kernel_recv_data(call->net->socket, rxcall, |
---|
437 | 472 | &msg.msg_iter, false, |
---|
438 | 473 | &call->abort_code, &call->service_id); |
---|
.. | .. |
---|
442 | 477 | call->error = ret; |
---|
443 | 478 | trace_afs_call_done(call); |
---|
444 | 479 | error_kill_call: |
---|
445 | | - afs_put_call(call); |
---|
| 480 | + if (call->type->done) |
---|
| 481 | + call->type->done(call); |
---|
| 482 | + |
---|
| 483 | + /* We need to dispose of the extra ref we grabbed for an async call. |
---|
| 484 | + * The call, however, might be queued on afs_async_calls and we need to |
---|
| 485 | + * make sure we don't get any more notifications that might requeue it. |
---|
| 486 | + */ |
---|
| 487 | + if (call->rxcall) { |
---|
| 488 | + rxrpc_kernel_end_call(call->net->socket, call->rxcall); |
---|
| 489 | + call->rxcall = NULL; |
---|
| 490 | + } |
---|
| 491 | + if (call->async) { |
---|
| 492 | + if (cancel_work_sync(&call->async_work)) |
---|
| 493 | + afs_put_call(call); |
---|
| 494 | + afs_put_call(call); |
---|
| 495 | + } |
---|
| 496 | + |
---|
446 | 497 | ac->error = ret; |
---|
| 498 | + call->state = AFS_CALL_COMPLETE; |
---|
447 | 499 | _leave(" = %d", ret); |
---|
448 | | - return ret; |
---|
449 | 500 | } |
---|
450 | 501 | |
---|
451 | 502 | /* |
---|
.. | .. |
---|
466 | 517 | state == AFS_CALL_SV_AWAIT_ACK |
---|
467 | 518 | ) { |
---|
468 | 519 | if (state == AFS_CALL_SV_AWAIT_ACK) { |
---|
469 | | - struct iov_iter iter; |
---|
470 | | - |
---|
471 | | - iov_iter_kvec(&iter, READ | ITER_KVEC, NULL, 0, 0); |
---|
| 520 | + iov_iter_kvec(&call->def_iter, READ, NULL, 0, 0); |
---|
472 | 521 | ret = rxrpc_kernel_recv_data(call->net->socket, |
---|
473 | | - call->rxcall, &iter, false, |
---|
474 | | - &remote_abort, |
---|
| 522 | + call->rxcall, &call->def_iter, |
---|
| 523 | + false, &remote_abort, |
---|
475 | 524 | &call->service_id); |
---|
476 | | - trace_afs_recv_data(call, 0, 0, false, ret); |
---|
| 525 | + trace_afs_receive_data(call, &call->def_iter, false, ret); |
---|
477 | 526 | |
---|
478 | 527 | if (ret == -EINPROGRESS || ret == -EAGAIN) |
---|
479 | 528 | return; |
---|
.. | .. |
---|
487 | 536 | |
---|
488 | 537 | ret = call->type->deliver(call); |
---|
489 | 538 | state = READ_ONCE(call->state); |
---|
| 539 | + if (ret == 0 && call->unmarshalling_error) |
---|
| 540 | + ret = -EBADMSG; |
---|
490 | 541 | switch (ret) { |
---|
491 | 542 | case 0: |
---|
| 543 | + afs_queue_call_work(call); |
---|
492 | 544 | if (state == AFS_CALL_CL_PROC_REPLY) { |
---|
493 | | - if (call->cbi) |
---|
| 545 | + if (call->op) |
---|
494 | 546 | set_bit(AFS_SERVER_FL_MAY_HAVE_CB, |
---|
495 | | - &call->cbi->server->flags); |
---|
| 547 | + &call->op->server->flags); |
---|
496 | 548 | goto call_complete; |
---|
497 | 549 | } |
---|
498 | 550 | ASSERTCMP(state, >, AFS_CALL_CL_PROC_REPLY); |
---|
.. | .. |
---|
511 | 563 | case -EIO: |
---|
512 | 564 | pr_err("kAFS: Call %u in bad state %u\n", |
---|
513 | 565 | call->debug_id, state); |
---|
514 | | - /* Fall through */ |
---|
| 566 | + fallthrough; |
---|
515 | 567 | case -ENODATA: |
---|
516 | 568 | case -EBADMSG: |
---|
517 | 569 | case -EMSGSIZE: |
---|
518 | | - default: |
---|
| 570 | + case -ENOMEM: |
---|
| 571 | + case -EFAULT: |
---|
519 | 572 | abort_code = RXGEN_CC_UNMARSHAL; |
---|
520 | 573 | if (state != AFS_CALL_CL_AWAIT_REPLY) |
---|
521 | 574 | abort_code = RXGEN_SS_UNMARSHAL; |
---|
522 | 575 | rxrpc_kernel_abort_call(call->net->socket, call->rxcall, |
---|
523 | | - abort_code, -EBADMSG, "KUM"); |
---|
| 576 | + abort_code, ret, "KUM"); |
---|
| 577 | + goto local_abort; |
---|
| 578 | + default: |
---|
| 579 | + abort_code = RX_CALL_DEAD; |
---|
| 580 | + rxrpc_kernel_abort_call(call->net->socket, call->rxcall, |
---|
| 581 | + abort_code, ret, "KER"); |
---|
524 | 582 | goto local_abort; |
---|
525 | 583 | } |
---|
526 | 584 | } |
---|
527 | 585 | |
---|
528 | 586 | done: |
---|
529 | | - if (state == AFS_CALL_COMPLETE && call->incoming) |
---|
530 | | - afs_put_call(call); |
---|
| 587 | + if (call->type->done) |
---|
| 588 | + call->type->done(call); |
---|
531 | 589 | out: |
---|
532 | 590 | _leave(""); |
---|
533 | 591 | return; |
---|
.. | .. |
---|
541 | 599 | } |
---|
542 | 600 | |
---|
543 | 601 | /* |
---|
544 | | - * wait synchronously for a call to complete |
---|
| 602 | + * Wait synchronously for a call to complete and clean up the call struct. |
---|
545 | 603 | */ |
---|
546 | | -static long afs_wait_for_call_to_complete(struct afs_call *call, |
---|
547 | | - struct afs_addr_cursor *ac) |
---|
| 604 | +long afs_wait_for_call_to_complete(struct afs_call *call, |
---|
| 605 | + struct afs_addr_cursor *ac) |
---|
548 | 606 | { |
---|
549 | | - signed long rtt2, timeout; |
---|
550 | 607 | long ret; |
---|
551 | | - u64 rtt; |
---|
552 | | - u32 life, last_life; |
---|
| 608 | + bool rxrpc_complete = false; |
---|
553 | 609 | |
---|
554 | 610 | DECLARE_WAITQUEUE(myself, current); |
---|
555 | 611 | |
---|
556 | 612 | _enter(""); |
---|
557 | 613 | |
---|
558 | | - rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall); |
---|
559 | | - rtt2 = nsecs_to_jiffies64(rtt) * 2; |
---|
560 | | - if (rtt2 < 2) |
---|
561 | | - rtt2 = 2; |
---|
562 | | - |
---|
563 | | - timeout = rtt2; |
---|
564 | | - last_life = rxrpc_kernel_check_life(call->net->socket, call->rxcall); |
---|
| 614 | + ret = call->error; |
---|
| 615 | + if (ret < 0) |
---|
| 616 | + goto out; |
---|
565 | 617 | |
---|
566 | 618 | add_wait_queue(&call->waitq, &myself); |
---|
567 | 619 | for (;;) { |
---|
.. | .. |
---|
573 | 625 | call->need_attention = false; |
---|
574 | 626 | __set_current_state(TASK_RUNNING); |
---|
575 | 627 | afs_deliver_to_call(call); |
---|
576 | | - timeout = rtt2; |
---|
577 | 628 | continue; |
---|
578 | 629 | } |
---|
579 | 630 | |
---|
580 | 631 | if (afs_check_call_state(call, AFS_CALL_COMPLETE)) |
---|
581 | 632 | break; |
---|
582 | 633 | |
---|
583 | | - life = rxrpc_kernel_check_life(call->net->socket, call->rxcall); |
---|
584 | | - if (timeout == 0 && |
---|
585 | | - life == last_life && signal_pending(current)) |
---|
586 | | - break; |
---|
587 | | - |
---|
588 | | - if (life != last_life) { |
---|
589 | | - timeout = rtt2; |
---|
590 | | - last_life = life; |
---|
| 634 | + if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall)) { |
---|
| 635 | + /* rxrpc terminated the call. */ |
---|
| 636 | + rxrpc_complete = true; |
---|
| 637 | + break; |
---|
591 | 638 | } |
---|
592 | 639 | |
---|
593 | | - timeout = schedule_timeout(timeout); |
---|
| 640 | + schedule(); |
---|
594 | 641 | } |
---|
595 | 642 | |
---|
596 | 643 | remove_wait_queue(&call->waitq, &myself); |
---|
597 | 644 | __set_current_state(TASK_RUNNING); |
---|
598 | 645 | |
---|
599 | | - /* Kill off the call if it's still live. */ |
---|
600 | 646 | if (!afs_check_call_state(call, AFS_CALL_COMPLETE)) { |
---|
601 | | - _debug("call interrupted"); |
---|
602 | | - if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall, |
---|
603 | | - RX_USER_ABORT, -EINTR, "KWI")) |
---|
604 | | - afs_set_call_complete(call, -EINTR, 0); |
---|
| 647 | + if (rxrpc_complete) { |
---|
| 648 | + afs_set_call_complete(call, call->error, call->abort_code); |
---|
| 649 | + } else { |
---|
| 650 | + /* Kill off the call if it's still live. */ |
---|
| 651 | + _debug("call interrupted"); |
---|
| 652 | + if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall, |
---|
| 653 | + RX_USER_ABORT, -EINTR, "KWI")) |
---|
| 654 | + afs_set_call_complete(call, -EINTR, 0); |
---|
| 655 | + } |
---|
605 | 656 | } |
---|
606 | 657 | |
---|
607 | 658 | spin_lock_bh(&call->state_lock); |
---|
.. | .. |
---|
612 | 663 | ret = ac->error; |
---|
613 | 664 | switch (ret) { |
---|
614 | 665 | case 0: |
---|
615 | | - if (call->ret_reply0) { |
---|
616 | | - ret = (long)call->reply[0]; |
---|
617 | | - call->reply[0] = NULL; |
---|
618 | | - } |
---|
619 | | - /* Fall through */ |
---|
| 666 | + ret = call->ret0; |
---|
| 667 | + call->ret0 = 0; |
---|
| 668 | + |
---|
| 669 | + fallthrough; |
---|
620 | 670 | case -ECONNABORTED: |
---|
621 | 671 | ac->responded = true; |
---|
622 | 672 | break; |
---|
623 | 673 | } |
---|
624 | 674 | |
---|
| 675 | +out: |
---|
625 | 676 | _debug("call complete"); |
---|
626 | 677 | afs_put_call(call); |
---|
627 | 678 | _leave(" = %p", (void *)ret); |
---|
.. | .. |
---|
664 | 715 | } |
---|
665 | 716 | |
---|
666 | 717 | /* |
---|
667 | | - * Delete an asynchronous call. The work item carries a ref to the call struct |
---|
668 | | - * that we need to release. |
---|
669 | | - */ |
---|
670 | | -static void afs_delete_async_call(struct work_struct *work) |
---|
671 | | -{ |
---|
672 | | - struct afs_call *call = container_of(work, struct afs_call, async_work); |
---|
673 | | - |
---|
674 | | - _enter(""); |
---|
675 | | - |
---|
676 | | - afs_put_call(call); |
---|
677 | | - |
---|
678 | | - _leave(""); |
---|
679 | | -} |
---|
680 | | - |
---|
681 | | -/* |
---|
682 | 718 | * Perform I/O processing on an asynchronous call. The work item carries a ref |
---|
683 | 719 | * to the call struct that we either need to release or to pass on. |
---|
684 | 720 | */ |
---|
.. | .. |
---|
691 | 727 | if (call->state < AFS_CALL_COMPLETE && call->need_attention) { |
---|
692 | 728 | call->need_attention = false; |
---|
693 | 729 | afs_deliver_to_call(call); |
---|
694 | | - } |
---|
695 | | - |
---|
696 | | - if (call->state == AFS_CALL_COMPLETE) { |
---|
697 | | - /* We have two refs to release - one from the alloc and one |
---|
698 | | - * queued with the work item - and we can't just deallocate the |
---|
699 | | - * call because the work item may be queued again. |
---|
700 | | - */ |
---|
701 | | - call->async_work.func = afs_delete_async_call; |
---|
702 | | - if (!queue_work(afs_async_calls, &call->async_work)) |
---|
703 | | - afs_put_call(call); |
---|
704 | 730 | } |
---|
705 | 731 | |
---|
706 | 732 | afs_put_call(call); |
---|
.. | .. |
---|
729 | 755 | if (!call) |
---|
730 | 756 | break; |
---|
731 | 757 | |
---|
| 758 | + call->drop_ref = true; |
---|
732 | 759 | call->async = true; |
---|
733 | 760 | call->state = AFS_CALL_SV_AWAIT_OP_ID; |
---|
734 | 761 | init_waitqueue_head(&call->waitq); |
---|
| 762 | + afs_extract_to_tmp(call); |
---|
735 | 763 | } |
---|
736 | 764 | |
---|
737 | 765 | if (rxrpc_kernel_charge_accept(net->socket, |
---|
.. | .. |
---|
777 | 805 | { |
---|
778 | 806 | int ret; |
---|
779 | 807 | |
---|
780 | | - _enter("{%zu}", call->offset); |
---|
781 | | - |
---|
782 | | - ASSERTCMP(call->offset, <, 4); |
---|
| 808 | + _enter("{%zu}", iov_iter_count(call->iter)); |
---|
783 | 809 | |
---|
784 | 810 | /* the operation ID forms the first four bytes of the request data */ |
---|
785 | | - ret = afs_extract_data(call, &call->tmp, 4, true); |
---|
| 811 | + ret = afs_extract_data(call, true); |
---|
786 | 812 | if (ret < 0) |
---|
787 | 813 | return ret; |
---|
788 | 814 | |
---|
789 | 815 | call->operation_ID = ntohl(call->tmp); |
---|
790 | 816 | afs_set_call_state(call, AFS_CALL_SV_AWAIT_OP_ID, AFS_CALL_SV_AWAIT_REQUEST); |
---|
791 | | - call->offset = 0; |
---|
792 | 817 | |
---|
793 | 818 | /* ask the cache manager to route the call (it'll change the call type |
---|
794 | 819 | * if successful) */ |
---|
.. | .. |
---|
829 | 854 | |
---|
830 | 855 | msg.msg_name = NULL; |
---|
831 | 856 | msg.msg_namelen = 0; |
---|
832 | | - iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, NULL, 0, 0); |
---|
| 857 | + iov_iter_kvec(&msg.msg_iter, WRITE, NULL, 0, 0); |
---|
833 | 858 | msg.msg_control = NULL; |
---|
834 | 859 | msg.msg_controllen = 0; |
---|
835 | 860 | msg.msg_flags = 0; |
---|
.. | .. |
---|
843 | 868 | case -ENOMEM: |
---|
844 | 869 | _debug("oom"); |
---|
845 | 870 | rxrpc_kernel_abort_call(net->socket, call->rxcall, |
---|
846 | | - RX_USER_ABORT, -ENOMEM, "KOO"); |
---|
| 871 | + RXGEN_SS_MARSHAL, -ENOMEM, "KOO"); |
---|
| 872 | + fallthrough; |
---|
847 | 873 | default: |
---|
848 | 874 | _leave(" [error]"); |
---|
849 | 875 | return; |
---|
.. | .. |
---|
868 | 894 | iov[0].iov_len = len; |
---|
869 | 895 | msg.msg_name = NULL; |
---|
870 | 896 | msg.msg_namelen = 0; |
---|
871 | | - iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1, len); |
---|
| 897 | + iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len); |
---|
872 | 898 | msg.msg_control = NULL; |
---|
873 | 899 | msg.msg_controllen = 0; |
---|
874 | 900 | msg.msg_flags = 0; |
---|
.. | .. |
---|
884 | 910 | if (n == -ENOMEM) { |
---|
885 | 911 | _debug("oom"); |
---|
886 | 912 | rxrpc_kernel_abort_call(net->socket, call->rxcall, |
---|
887 | | - RX_USER_ABORT, -ENOMEM, "KOO"); |
---|
| 913 | + RXGEN_SS_MARSHAL, -ENOMEM, "KOO"); |
---|
888 | 914 | } |
---|
889 | 915 | _leave(" [error]"); |
---|
890 | 916 | } |
---|
.. | .. |
---|
892 | 918 | /* |
---|
893 | 919 | * Extract a piece of data from the received data socket buffers. |
---|
894 | 920 | */ |
---|
895 | | -int afs_extract_data(struct afs_call *call, void *buf, size_t count, |
---|
896 | | - bool want_more) |
---|
| 921 | +int afs_extract_data(struct afs_call *call, bool want_more) |
---|
897 | 922 | { |
---|
898 | 923 | struct afs_net *net = call->net; |
---|
899 | | - struct iov_iter iter; |
---|
900 | | - struct kvec iov; |
---|
| 924 | + struct iov_iter *iter = call->iter; |
---|
901 | 925 | enum afs_call_state state; |
---|
902 | 926 | u32 remote_abort = 0; |
---|
903 | 927 | int ret; |
---|
904 | 928 | |
---|
905 | | - _enter("{%s,%zu},,%zu,%d", |
---|
906 | | - call->type->name, call->offset, count, want_more); |
---|
| 929 | + _enter("{%s,%zu},%d", call->type->name, iov_iter_count(iter), want_more); |
---|
907 | 930 | |
---|
908 | | - ASSERTCMP(call->offset, <=, count); |
---|
909 | | - |
---|
910 | | - iov.iov_base = buf + call->offset; |
---|
911 | | - iov.iov_len = count - call->offset; |
---|
912 | | - iov_iter_kvec(&iter, ITER_KVEC | READ, &iov, 1, count - call->offset); |
---|
913 | | - |
---|
914 | | - ret = rxrpc_kernel_recv_data(net->socket, call->rxcall, &iter, |
---|
| 931 | + ret = rxrpc_kernel_recv_data(net->socket, call->rxcall, iter, |
---|
915 | 932 | want_more, &remote_abort, |
---|
916 | 933 | &call->service_id); |
---|
917 | | - call->offset += (count - call->offset) - iov_iter_count(&iter); |
---|
918 | | - trace_afs_recv_data(call, count, call->offset, want_more, ret); |
---|
919 | 934 | if (ret == 0 || ret == -EAGAIN) |
---|
920 | 935 | return ret; |
---|
921 | 936 | |
---|
.. | .. |
---|
930 | 945 | break; |
---|
931 | 946 | case AFS_CALL_COMPLETE: |
---|
932 | 947 | kdebug("prem complete %d", call->error); |
---|
933 | | - return -EIO; |
---|
| 948 | + return afs_io_error(call, afs_io_error_extract); |
---|
934 | 949 | default: |
---|
935 | 950 | break; |
---|
936 | 951 | } |
---|
.. | .. |
---|
944 | 959 | /* |
---|
945 | 960 | * Log protocol error production. |
---|
946 | 961 | */ |
---|
947 | | -noinline int afs_protocol_error(struct afs_call *call, int error) |
---|
| 962 | +noinline int afs_protocol_error(struct afs_call *call, |
---|
| 963 | + enum afs_eproto_cause cause) |
---|
948 | 964 | { |
---|
949 | | - trace_afs_protocol_error(call, error, __builtin_return_address(0)); |
---|
950 | | - return error; |
---|
| 965 | + trace_afs_protocol_error(call, cause); |
---|
| 966 | + if (call) |
---|
| 967 | + call->unmarshalling_error = true; |
---|
| 968 | + return -EBADMSG; |
---|
951 | 969 | } |
---|