| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
|---|
| 1 | 2 | /* incoming call handling |
|---|
| 2 | 3 | * |
|---|
| 3 | 4 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
|---|
| 4 | 5 | * Written by David Howells (dhowells@redhat.com) |
|---|
| 5 | | - * |
|---|
| 6 | | - * This program is free software; you can redistribute it and/or |
|---|
| 7 | | - * modify it under the terms of the GNU General Public License |
|---|
| 8 | | - * as published by the Free Software Foundation; either version |
|---|
| 9 | | - * 2 of the License, or (at your option) any later version. |
|---|
| 10 | 6 | */ |
|---|
| 11 | 7 | |
|---|
| 12 | 8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
|---|
| .. | .. |
|---|
| 43 | 39 | unsigned int debug_id) |
|---|
| 44 | 40 | { |
|---|
| 45 | 41 | const void *here = __builtin_return_address(0); |
|---|
| 46 | | - struct rxrpc_call *call; |
|---|
| 42 | + struct rxrpc_call *call, *xcall; |
|---|
| 47 | 43 | struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); |
|---|
| 44 | + struct rb_node *parent, **pp; |
|---|
| 48 | 45 | int max, tmp; |
|---|
| 49 | 46 | unsigned int size = RXRPC_BACKLOG_MAX; |
|---|
| 50 | 47 | unsigned int head, tail, call_head, call_tail; |
|---|
| .. | .. |
|---|
| 94 | 91 | (head + 1) & (size - 1)); |
|---|
| 95 | 92 | |
|---|
| 96 | 93 | trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service, |
|---|
| 97 | | - atomic_read(&conn->usage), here); |
|---|
| 94 | + refcount_read(&conn->ref), here); |
|---|
| 98 | 95 | } |
|---|
| 99 | 96 | |
|---|
| 100 | 97 | /* Now it gets complicated, because calls get registered with the |
|---|
| 101 | | - * socket here, particularly if a user ID is preassigned by the user. |
|---|
| 98 | + * socket here, with a user ID preassigned by the user. |
|---|
| 102 | 99 | */ |
|---|
| 103 | 100 | call = rxrpc_alloc_call(rx, gfp, debug_id); |
|---|
| 104 | 101 | if (!call) |
|---|
| .. | .. |
|---|
| 106 | 103 | call->flags |= (1 << RXRPC_CALL_IS_SERVICE); |
|---|
| 107 | 104 | call->state = RXRPC_CALL_SERVER_PREALLOC; |
|---|
| 108 | 105 | |
|---|
| 109 | | - trace_rxrpc_call(call, rxrpc_call_new_service, |
|---|
| 110 | | - atomic_read(&call->usage), |
|---|
| 106 | + trace_rxrpc_call(call->debug_id, rxrpc_call_new_service, |
|---|
| 107 | + refcount_read(&call->ref), |
|---|
| 111 | 108 | here, (const void *)user_call_ID); |
|---|
| 112 | 109 | |
|---|
| 113 | 110 | write_lock(&rx->call_lock); |
|---|
| 111 | + |
|---|
| 112 | + /* Check the user ID isn't already in use */ |
|---|
| 113 | + pp = &rx->calls.rb_node; |
|---|
| 114 | + parent = NULL; |
|---|
| 115 | + while (*pp) { |
|---|
| 116 | + parent = *pp; |
|---|
| 117 | + xcall = rb_entry(parent, struct rxrpc_call, sock_node); |
|---|
| 118 | + if (user_call_ID < xcall->user_call_ID) |
|---|
| 119 | + pp = &(*pp)->rb_left; |
|---|
| 120 | + else if (user_call_ID > xcall->user_call_ID) |
|---|
| 121 | + pp = &(*pp)->rb_right; |
|---|
| 122 | + else |
|---|
| 123 | + goto id_in_use; |
|---|
| 124 | + } |
|---|
| 125 | + |
|---|
| 126 | + call->user_call_ID = user_call_ID; |
|---|
| 127 | + call->notify_rx = notify_rx; |
|---|
| 114 | 128 | if (user_attach_call) { |
|---|
| 115 | | - struct rxrpc_call *xcall; |
|---|
| 116 | | - struct rb_node *parent, **pp; |
|---|
| 117 | | - |
|---|
| 118 | | - /* Check the user ID isn't already in use */ |
|---|
| 119 | | - pp = &rx->calls.rb_node; |
|---|
| 120 | | - parent = NULL; |
|---|
| 121 | | - while (*pp) { |
|---|
| 122 | | - parent = *pp; |
|---|
| 123 | | - xcall = rb_entry(parent, struct rxrpc_call, sock_node); |
|---|
| 124 | | - if (user_call_ID < xcall->user_call_ID) |
|---|
| 125 | | - pp = &(*pp)->rb_left; |
|---|
| 126 | | - else if (user_call_ID > xcall->user_call_ID) |
|---|
| 127 | | - pp = &(*pp)->rb_right; |
|---|
| 128 | | - else |
|---|
| 129 | | - goto id_in_use; |
|---|
| 130 | | - } |
|---|
| 131 | | - |
|---|
| 132 | | - call->user_call_ID = user_call_ID; |
|---|
| 133 | | - call->notify_rx = notify_rx; |
|---|
| 134 | 129 | rxrpc_get_call(call, rxrpc_call_got_kernel); |
|---|
| 135 | 130 | user_attach_call(call, user_call_ID); |
|---|
| 136 | | - rxrpc_get_call(call, rxrpc_call_got_userid); |
|---|
| 137 | | - rb_link_node(&call->sock_node, parent, pp); |
|---|
| 138 | | - rb_insert_color(&call->sock_node, &rx->calls); |
|---|
| 139 | | - set_bit(RXRPC_CALL_HAS_USERID, &call->flags); |
|---|
| 140 | 131 | } |
|---|
| 132 | + |
|---|
| 133 | + rxrpc_get_call(call, rxrpc_call_got_userid); |
|---|
| 134 | + rb_link_node(&call->sock_node, parent, pp); |
|---|
| 135 | + rb_insert_color(&call->sock_node, &rx->calls); |
|---|
| 136 | + set_bit(RXRPC_CALL_HAS_USERID, &call->flags); |
|---|
| 141 | 137 | |
|---|
| 142 | 138 | list_add(&call->sock_link, &rx->sock_calls); |
|---|
| 143 | 139 | |
|---|
| .. | .. |
|---|
| 161 | 157 | } |
|---|
| 162 | 158 | |
|---|
| 163 | 159 | /* |
|---|
| 164 | | - * Preallocate sufficient service connections, calls and peers to cover the |
|---|
| 165 | | - * entire backlog of a socket. When a new call comes in, if we don't have |
|---|
| 166 | | - * sufficient of each available, the call gets rejected as busy or ignored. |
|---|
| 167 | | - * |
|---|
| 168 | | - * The backlog is replenished when a connection is accepted or rejected. |
|---|
| 160 | + * Allocate the preallocation buffers for incoming service calls. These must |
|---|
| 161 | + * be charged manually. |
|---|
| 169 | 162 | */ |
|---|
| 170 | 163 | int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp) |
|---|
| 171 | 164 | { |
|---|
| .. | .. |
|---|
| 177 | 170 | return -ENOMEM; |
|---|
| 178 | 171 | rx->backlog = b; |
|---|
| 179 | 172 | } |
|---|
| 180 | | - |
|---|
| 181 | | - if (rx->discard_new_call) |
|---|
| 182 | | - return 0; |
|---|
| 183 | | - |
|---|
| 184 | | - while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp, |
|---|
| 185 | | - atomic_inc_return(&rxrpc_debug_id)) == 0) |
|---|
| 186 | | - ; |
|---|
| 187 | 173 | |
|---|
| 188 | 174 | return 0; |
|---|
| 189 | 175 | } |
|---|
| .. | .. |
|---|
| 252 | 238 | } |
|---|
| 253 | 239 | |
|---|
| 254 | 240 | /* |
|---|
| 241 | + * Ping the other end to fill our RTT cache and to retrieve the rwind |
|---|
| 242 | + * and MTU parameters. |
|---|
| 243 | + */ |
|---|
| 244 | +static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb) |
|---|
| 245 | +{ |
|---|
| 246 | + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
|---|
| 247 | + ktime_t now = skb->tstamp; |
|---|
| 248 | + |
|---|
| 249 | + if (call->peer->rtt_count < 3 || |
|---|
| 250 | + ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) |
|---|
| 251 | + rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, |
|---|
| 252 | + true, true, |
|---|
| 253 | + rxrpc_propose_ack_ping_for_params); |
|---|
| 254 | +} |
|---|
| 255 | + |
|---|
| 256 | +/* |
|---|
| 255 | 257 | * Allocate a new incoming call from the prealloc pool, along with a connection |
|---|
| 256 | 258 | * and a peer as necessary. |
|---|
| 257 | 259 | */ |
|---|
| .. | .. |
|---|
| 259 | 261 | struct rxrpc_local *local, |
|---|
| 260 | 262 | struct rxrpc_peer *peer, |
|---|
| 261 | 263 | struct rxrpc_connection *conn, |
|---|
| 264 | + const struct rxrpc_security *sec, |
|---|
| 265 | + struct key *key, |
|---|
| 262 | 266 | struct sk_buff *skb) |
|---|
| 263 | 267 | { |
|---|
| 264 | 268 | struct rxrpc_backlog *b = rx->backlog; |
|---|
| .. | .. |
|---|
| 288 | 292 | peer = NULL; |
|---|
| 289 | 293 | if (!peer) { |
|---|
| 290 | 294 | peer = b->peer_backlog[peer_tail]; |
|---|
| 291 | | - if (rxrpc_extract_addr_from_skb(local, &peer->srx, skb) < 0) |
|---|
| 295 | + if (rxrpc_extract_addr_from_skb(&peer->srx, skb) < 0) |
|---|
| 292 | 296 | return NULL; |
|---|
| 293 | 297 | b->peer_backlog[peer_tail] = NULL; |
|---|
| 294 | 298 | smp_store_release(&b->peer_backlog_tail, |
|---|
| .. | .. |
|---|
| 306 | 310 | conn->params.local = rxrpc_get_local(local); |
|---|
| 307 | 311 | conn->params.peer = peer; |
|---|
| 308 | 312 | rxrpc_see_connection(conn); |
|---|
| 309 | | - rxrpc_new_incoming_connection(rx, conn, skb); |
|---|
| 313 | + rxrpc_new_incoming_connection(rx, conn, sec, key, skb); |
|---|
| 310 | 314 | } else { |
|---|
| 311 | 315 | rxrpc_get_connection(conn); |
|---|
| 312 | 316 | } |
|---|
| .. | .. |
|---|
| 319 | 323 | |
|---|
| 320 | 324 | rxrpc_see_call(call); |
|---|
| 321 | 325 | call->conn = conn; |
|---|
| 326 | + call->security = conn->security; |
|---|
| 327 | + call->security_ix = conn->security_ix; |
|---|
| 322 | 328 | call->peer = rxrpc_get_peer(conn->params.peer); |
|---|
| 323 | 329 | call->cong_cwnd = call->peer->cong_cwnd; |
|---|
| 324 | 330 | return call; |
|---|
| .. | .. |
|---|
| 344 | 350 | struct sk_buff *skb) |
|---|
| 345 | 351 | { |
|---|
| 346 | 352 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
|---|
| 353 | + const struct rxrpc_security *sec = NULL; |
|---|
| 347 | 354 | struct rxrpc_connection *conn; |
|---|
| 348 | 355 | struct rxrpc_peer *peer = NULL; |
|---|
| 349 | | - struct rxrpc_call *call; |
|---|
| 356 | + struct rxrpc_call *call = NULL; |
|---|
| 357 | + struct key *key = NULL; |
|---|
| 350 | 358 | |
|---|
| 351 | 359 | _enter(""); |
|---|
| 352 | 360 | |
|---|
| .. | .. |
|---|
| 357 | 365 | sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN); |
|---|
| 358 | 366 | skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; |
|---|
| 359 | 367 | skb->priority = RX_INVALID_OPERATION; |
|---|
| 360 | | - _leave(" = NULL [close]"); |
|---|
| 361 | | - call = NULL; |
|---|
| 362 | | - goto out; |
|---|
| 368 | + goto no_call; |
|---|
| 363 | 369 | } |
|---|
| 364 | 370 | |
|---|
| 365 | 371 | /* The peer, connection and call may all have sprung into existence due |
|---|
| .. | .. |
|---|
| 369 | 375 | */ |
|---|
| 370 | 376 | conn = rxrpc_find_connection_rcu(local, skb, &peer); |
|---|
| 371 | 377 | |
|---|
| 372 | | - call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb); |
|---|
| 378 | + if (!conn && !rxrpc_look_up_server_security(local, rx, &sec, &key, skb)) |
|---|
| 379 | + goto no_call; |
|---|
| 380 | + |
|---|
| 381 | + call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, key, skb); |
|---|
| 382 | + key_put(key); |
|---|
| 373 | 383 | if (!call) { |
|---|
| 374 | 384 | skb->mark = RXRPC_SKB_MARK_REJECT_BUSY; |
|---|
| 375 | | - _leave(" = NULL [busy]"); |
|---|
| 376 | | - call = NULL; |
|---|
| 377 | | - goto out; |
|---|
| 385 | + goto no_call; |
|---|
| 378 | 386 | } |
|---|
| 379 | 387 | |
|---|
| 380 | 388 | trace_rxrpc_receive(call, rxrpc_receive_incoming, |
|---|
| 381 | 389 | sp->hdr.serial, sp->hdr.seq); |
|---|
| 382 | | - |
|---|
| 383 | | - /* Lock the call to prevent rxrpc_kernel_send/recv_data() and |
|---|
| 384 | | - * sendmsg()/recvmsg() inconveniently stealing the mutex once the |
|---|
| 385 | | - * notification is generated. |
|---|
| 386 | | - * |
|---|
| 387 | | - * The BUG should never happen because the kernel should be well |
|---|
| 388 | | - * behaved enough not to access the call before the first notification |
|---|
| 389 | | - * event and userspace is prevented from doing so until the state is |
|---|
| 390 | | - * appropriate. |
|---|
| 391 | | - */ |
|---|
| 392 | | - if (!mutex_trylock(&call->user_mutex)) |
|---|
| 393 | | - BUG(); |
|---|
| 394 | 390 | |
|---|
| 395 | 391 | /* Make the call live. */ |
|---|
| 396 | 392 | rxrpc_incoming_call(rx, call, skb); |
|---|
| .. | .. |
|---|
| 398 | 394 | |
|---|
| 399 | 395 | if (rx->notify_new_call) |
|---|
| 400 | 396 | rx->notify_new_call(&rx->sk, call, call->user_call_ID); |
|---|
| 401 | | - else |
|---|
| 402 | | - sk_acceptq_added(&rx->sk); |
|---|
| 403 | 397 | |
|---|
| 404 | 398 | spin_lock(&conn->state_lock); |
|---|
| 405 | 399 | switch (conn->state) { |
|---|
| .. | .. |
|---|
| 411 | 405 | |
|---|
| 412 | 406 | case RXRPC_CONN_SERVICE: |
|---|
| 413 | 407 | write_lock(&call->state_lock); |
|---|
| 414 | | - if (call->state < RXRPC_CALL_COMPLETE) { |
|---|
| 415 | | - if (rx->discard_new_call) |
|---|
| 416 | | - call->state = RXRPC_CALL_SERVER_RECV_REQUEST; |
|---|
| 417 | | - else |
|---|
| 418 | | - call->state = RXRPC_CALL_SERVER_ACCEPTING; |
|---|
| 419 | | - } |
|---|
| 408 | + if (call->state < RXRPC_CALL_COMPLETE) |
|---|
| 409 | + call->state = RXRPC_CALL_SERVER_RECV_REQUEST; |
|---|
| 420 | 410 | write_unlock(&call->state_lock); |
|---|
| 421 | 411 | break; |
|---|
| 422 | 412 | |
|---|
| .. | .. |
|---|
| 432 | 422 | BUG(); |
|---|
| 433 | 423 | } |
|---|
| 434 | 424 | spin_unlock(&conn->state_lock); |
|---|
| 425 | + spin_unlock(&rx->incoming_lock); |
|---|
| 435 | 426 | |
|---|
| 436 | | - if (call->state == RXRPC_CALL_SERVER_ACCEPTING) |
|---|
| 437 | | - rxrpc_notify_socket(call); |
|---|
| 427 | + rxrpc_send_ping(call, skb); |
|---|
| 438 | 428 | |
|---|
| 439 | 429 | /* We have to discard the prealloc queue's ref here and rely on a |
|---|
| 440 | 430 | * combination of the RCU read lock and refs held either by the socket |
|---|
| .. | .. |
|---|
| 444 | 434 | rxrpc_put_call(call, rxrpc_call_put); |
|---|
| 445 | 435 | |
|---|
| 446 | 436 | _leave(" = %p{%d}", call, call->debug_id); |
|---|
| 447 | | -out: |
|---|
| 437 | + return call; |
|---|
| 438 | + |
|---|
| 439 | +no_call: |
|---|
| 448 | 440 | spin_unlock(&rx->incoming_lock); |
|---|
| 449 | | - return call; |
|---|
| 441 | + _leave(" = NULL [%u]", skb->mark); |
|---|
| 442 | + return NULL; |
|---|
| 450 | 443 | } |
|---|
| 451 | 444 | |
|---|
| 452 | 445 | /* |
|---|
| 453 | | - * handle acceptance of a call by userspace |
|---|
| 454 | | - * - assign the user call ID to the call at the front of the queue |
|---|
| 455 | | - * - called with the socket locked. |
|---|
| 446 | + * Charge up socket with preallocated calls, attaching user call IDs. |
|---|
| 456 | 447 | */ |
|---|
| 457 | | -struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx, |
|---|
| 458 | | - unsigned long user_call_ID, |
|---|
| 459 | | - rxrpc_notify_rx_t notify_rx) |
|---|
| 460 | | - __releases(&rx->sk.sk_lock.slock) |
|---|
| 461 | | - __acquires(call->user_mutex) |
|---|
| 448 | +int rxrpc_user_charge_accept(struct rxrpc_sock *rx, unsigned long user_call_ID) |
|---|
| 462 | 449 | { |
|---|
| 463 | | - struct rxrpc_call *call; |
|---|
| 464 | | - struct rb_node *parent, **pp; |
|---|
| 465 | | - int ret; |
|---|
| 450 | + struct rxrpc_backlog *b = rx->backlog; |
|---|
| 466 | 451 | |
|---|
| 467 | | - _enter(",%lx", user_call_ID); |
|---|
| 452 | + if (rx->sk.sk_state == RXRPC_CLOSE) |
|---|
| 453 | + return -ESHUTDOWN; |
|---|
| 468 | 454 | |
|---|
| 469 | | - ASSERT(!irqs_disabled()); |
|---|
| 470 | | - |
|---|
| 471 | | - write_lock(&rx->call_lock); |
|---|
| 472 | | - |
|---|
| 473 | | - if (list_empty(&rx->to_be_accepted)) { |
|---|
| 474 | | - write_unlock(&rx->call_lock); |
|---|
| 475 | | - release_sock(&rx->sk); |
|---|
| 476 | | - kleave(" = -ENODATA [empty]"); |
|---|
| 477 | | - return ERR_PTR(-ENODATA); |
|---|
| 478 | | - } |
|---|
| 479 | | - |
|---|
| 480 | | - /* check the user ID isn't already in use */ |
|---|
| 481 | | - pp = &rx->calls.rb_node; |
|---|
| 482 | | - parent = NULL; |
|---|
| 483 | | - while (*pp) { |
|---|
| 484 | | - parent = *pp; |
|---|
| 485 | | - call = rb_entry(parent, struct rxrpc_call, sock_node); |
|---|
| 486 | | - |
|---|
| 487 | | - if (user_call_ID < call->user_call_ID) |
|---|
| 488 | | - pp = &(*pp)->rb_left; |
|---|
| 489 | | - else if (user_call_ID > call->user_call_ID) |
|---|
| 490 | | - pp = &(*pp)->rb_right; |
|---|
| 491 | | - else |
|---|
| 492 | | - goto id_in_use; |
|---|
| 493 | | - } |
|---|
| 494 | | - |
|---|
| 495 | | - /* Dequeue the first call and check it's still valid. We gain |
|---|
| 496 | | - * responsibility for the queue's reference. |
|---|
| 497 | | - */ |
|---|
| 498 | | - call = list_entry(rx->to_be_accepted.next, |
|---|
| 499 | | - struct rxrpc_call, accept_link); |
|---|
| 500 | | - write_unlock(&rx->call_lock); |
|---|
| 501 | | - |
|---|
| 502 | | - /* We need to gain the mutex from the interrupt handler without |
|---|
| 503 | | - * upsetting lockdep, so we have to release it there and take it here. |
|---|
| 504 | | - * We are, however, still holding the socket lock, so other accepts |
|---|
| 505 | | - * must wait for us and no one can add the user ID behind our backs. |
|---|
| 506 | | - */ |
|---|
| 507 | | - if (mutex_lock_interruptible(&call->user_mutex) < 0) { |
|---|
| 508 | | - release_sock(&rx->sk); |
|---|
| 509 | | - kleave(" = -ERESTARTSYS"); |
|---|
| 510 | | - return ERR_PTR(-ERESTARTSYS); |
|---|
| 511 | | - } |
|---|
| 512 | | - |
|---|
| 513 | | - write_lock(&rx->call_lock); |
|---|
| 514 | | - list_del_init(&call->accept_link); |
|---|
| 515 | | - sk_acceptq_removed(&rx->sk); |
|---|
| 516 | | - rxrpc_see_call(call); |
|---|
| 517 | | - |
|---|
| 518 | | - /* Find the user ID insertion point. */ |
|---|
| 519 | | - pp = &rx->calls.rb_node; |
|---|
| 520 | | - parent = NULL; |
|---|
| 521 | | - while (*pp) { |
|---|
| 522 | | - parent = *pp; |
|---|
| 523 | | - call = rb_entry(parent, struct rxrpc_call, sock_node); |
|---|
| 524 | | - |
|---|
| 525 | | - if (user_call_ID < call->user_call_ID) |
|---|
| 526 | | - pp = &(*pp)->rb_left; |
|---|
| 527 | | - else if (user_call_ID > call->user_call_ID) |
|---|
| 528 | | - pp = &(*pp)->rb_right; |
|---|
| 529 | | - else |
|---|
| 530 | | - BUG(); |
|---|
| 531 | | - } |
|---|
| 532 | | - |
|---|
| 533 | | - write_lock_bh(&call->state_lock); |
|---|
| 534 | | - switch (call->state) { |
|---|
| 535 | | - case RXRPC_CALL_SERVER_ACCEPTING: |
|---|
| 536 | | - call->state = RXRPC_CALL_SERVER_RECV_REQUEST; |
|---|
| 537 | | - break; |
|---|
| 538 | | - case RXRPC_CALL_COMPLETE: |
|---|
| 539 | | - ret = call->error; |
|---|
| 540 | | - goto out_release; |
|---|
| 541 | | - default: |
|---|
| 542 | | - BUG(); |
|---|
| 543 | | - } |
|---|
| 544 | | - |
|---|
| 545 | | - /* formalise the acceptance */ |
|---|
| 546 | | - call->notify_rx = notify_rx; |
|---|
| 547 | | - call->user_call_ID = user_call_ID; |
|---|
| 548 | | - rxrpc_get_call(call, rxrpc_call_got_userid); |
|---|
| 549 | | - rb_link_node(&call->sock_node, parent, pp); |
|---|
| 550 | | - rb_insert_color(&call->sock_node, &rx->calls); |
|---|
| 551 | | - if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags)) |
|---|
| 552 | | - BUG(); |
|---|
| 553 | | - |
|---|
| 554 | | - write_unlock_bh(&call->state_lock); |
|---|
| 555 | | - write_unlock(&rx->call_lock); |
|---|
| 556 | | - rxrpc_notify_socket(call); |
|---|
| 557 | | - rxrpc_service_prealloc(rx, GFP_KERNEL); |
|---|
| 558 | | - release_sock(&rx->sk); |
|---|
| 559 | | - _leave(" = %p{%d}", call, call->debug_id); |
|---|
| 560 | | - return call; |
|---|
| 561 | | - |
|---|
| 562 | | -out_release: |
|---|
| 563 | | - _debug("release %p", call); |
|---|
| 564 | | - write_unlock_bh(&call->state_lock); |
|---|
| 565 | | - write_unlock(&rx->call_lock); |
|---|
| 566 | | - rxrpc_release_call(rx, call); |
|---|
| 567 | | - rxrpc_put_call(call, rxrpc_call_put); |
|---|
| 568 | | - goto out; |
|---|
| 569 | | - |
|---|
| 570 | | -id_in_use: |
|---|
| 571 | | - ret = -EBADSLT; |
|---|
| 572 | | - write_unlock(&rx->call_lock); |
|---|
| 573 | | -out: |
|---|
| 574 | | - rxrpc_service_prealloc(rx, GFP_KERNEL); |
|---|
| 575 | | - release_sock(&rx->sk); |
|---|
| 576 | | - _leave(" = %d", ret); |
|---|
| 577 | | - return ERR_PTR(ret); |
|---|
| 578 | | -} |
|---|
| 579 | | - |
|---|
| 580 | | -/* |
|---|
| 581 | | - * Handle rejection of a call by userspace |
|---|
| 582 | | - * - reject the call at the front of the queue |
|---|
| 583 | | - */ |
|---|
| 584 | | -int rxrpc_reject_call(struct rxrpc_sock *rx) |
|---|
| 585 | | -{ |
|---|
| 586 | | - struct rxrpc_call *call; |
|---|
| 587 | | - bool abort = false; |
|---|
| 588 | | - int ret; |
|---|
| 589 | | - |
|---|
| 590 | | - _enter(""); |
|---|
| 591 | | - |
|---|
| 592 | | - ASSERT(!irqs_disabled()); |
|---|
| 593 | | - |
|---|
| 594 | | - write_lock(&rx->call_lock); |
|---|
| 595 | | - |
|---|
| 596 | | - if (list_empty(&rx->to_be_accepted)) { |
|---|
| 597 | | - write_unlock(&rx->call_lock); |
|---|
| 598 | | - return -ENODATA; |
|---|
| 599 | | - } |
|---|
| 600 | | - |
|---|
| 601 | | - /* Dequeue the first call and check it's still valid. We gain |
|---|
| 602 | | - * responsibility for the queue's reference. |
|---|
| 603 | | - */ |
|---|
| 604 | | - call = list_entry(rx->to_be_accepted.next, |
|---|
| 605 | | - struct rxrpc_call, accept_link); |
|---|
| 606 | | - list_del_init(&call->accept_link); |
|---|
| 607 | | - sk_acceptq_removed(&rx->sk); |
|---|
| 608 | | - rxrpc_see_call(call); |
|---|
| 609 | | - |
|---|
| 610 | | - write_lock_bh(&call->state_lock); |
|---|
| 611 | | - switch (call->state) { |
|---|
| 612 | | - case RXRPC_CALL_SERVER_ACCEPTING: |
|---|
| 613 | | - __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED); |
|---|
| 614 | | - abort = true; |
|---|
| 615 | | - /* fall through */ |
|---|
| 616 | | - case RXRPC_CALL_COMPLETE: |
|---|
| 617 | | - ret = call->error; |
|---|
| 618 | | - goto out_discard; |
|---|
| 619 | | - default: |
|---|
| 620 | | - BUG(); |
|---|
| 621 | | - } |
|---|
| 622 | | - |
|---|
| 623 | | -out_discard: |
|---|
| 624 | | - write_unlock_bh(&call->state_lock); |
|---|
| 625 | | - write_unlock(&rx->call_lock); |
|---|
| 626 | | - if (abort) { |
|---|
| 627 | | - rxrpc_send_abort_packet(call); |
|---|
| 628 | | - rxrpc_release_call(rx, call); |
|---|
| 629 | | - rxrpc_put_call(call, rxrpc_call_put); |
|---|
| 630 | | - } |
|---|
| 631 | | - rxrpc_service_prealloc(rx, GFP_KERNEL); |
|---|
| 632 | | - _leave(" = %d", ret); |
|---|
| 633 | | - return ret; |
|---|
| 455 | + return rxrpc_service_prealloc_one(rx, b, NULL, NULL, user_call_ID, |
|---|
| 456 | + GFP_KERNEL, |
|---|
| 457 | + atomic_inc_return(&rxrpc_debug_id)); |
|---|
| 634 | 458 | } |
|---|
| 635 | 459 | |
|---|
| 636 | 460 | /* |
|---|