hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/net/rxrpc/call_accept.c
....@@ -1,12 +1,8 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /* incoming call handling
23 *
34 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
45 * Written by David Howells (dhowells@redhat.com)
5
- *
6
- * This program is free software; you can redistribute it and/or
7
- * modify it under the terms of the GNU General Public License
8
- * as published by the Free Software Foundation; either version
9
- * 2 of the License, or (at your option) any later version.
106 */
117
128 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
....@@ -43,8 +39,9 @@
4339 unsigned int debug_id)
4440 {
4541 const void *here = __builtin_return_address(0);
46
- struct rxrpc_call *call;
42
+ struct rxrpc_call *call, *xcall;
4743 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
44
+ struct rb_node *parent, **pp;
4845 int max, tmp;
4946 unsigned int size = RXRPC_BACKLOG_MAX;
5047 unsigned int head, tail, call_head, call_tail;
....@@ -94,11 +91,11 @@
9491 (head + 1) & (size - 1));
9592
9693 trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
97
- atomic_read(&conn->usage), here);
94
+ refcount_read(&conn->ref), here);
9895 }
9996
10097 /* Now it gets complicated, because calls get registered with the
101
- * socket here, particularly if a user ID is preassigned by the user.
98
+ * socket here, with a user ID preassigned by the user.
10299 */
103100 call = rxrpc_alloc_call(rx, gfp, debug_id);
104101 if (!call)
....@@ -106,38 +103,37 @@
106103 call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
107104 call->state = RXRPC_CALL_SERVER_PREALLOC;
108105
109
- trace_rxrpc_call(call, rxrpc_call_new_service,
110
- atomic_read(&call->usage),
106
+ trace_rxrpc_call(call->debug_id, rxrpc_call_new_service,
107
+ refcount_read(&call->ref),
111108 here, (const void *)user_call_ID);
112109
113110 write_lock(&rx->call_lock);
111
+
112
+ /* Check the user ID isn't already in use */
113
+ pp = &rx->calls.rb_node;
114
+ parent = NULL;
115
+ while (*pp) {
116
+ parent = *pp;
117
+ xcall = rb_entry(parent, struct rxrpc_call, sock_node);
118
+ if (user_call_ID < xcall->user_call_ID)
119
+ pp = &(*pp)->rb_left;
120
+ else if (user_call_ID > xcall->user_call_ID)
121
+ pp = &(*pp)->rb_right;
122
+ else
123
+ goto id_in_use;
124
+ }
125
+
126
+ call->user_call_ID = user_call_ID;
127
+ call->notify_rx = notify_rx;
114128 if (user_attach_call) {
115
- struct rxrpc_call *xcall;
116
- struct rb_node *parent, **pp;
117
-
118
- /* Check the user ID isn't already in use */
119
- pp = &rx->calls.rb_node;
120
- parent = NULL;
121
- while (*pp) {
122
- parent = *pp;
123
- xcall = rb_entry(parent, struct rxrpc_call, sock_node);
124
- if (user_call_ID < xcall->user_call_ID)
125
- pp = &(*pp)->rb_left;
126
- else if (user_call_ID > xcall->user_call_ID)
127
- pp = &(*pp)->rb_right;
128
- else
129
- goto id_in_use;
130
- }
131
-
132
- call->user_call_ID = user_call_ID;
133
- call->notify_rx = notify_rx;
134129 rxrpc_get_call(call, rxrpc_call_got_kernel);
135130 user_attach_call(call, user_call_ID);
136
- rxrpc_get_call(call, rxrpc_call_got_userid);
137
- rb_link_node(&call->sock_node, parent, pp);
138
- rb_insert_color(&call->sock_node, &rx->calls);
139
- set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
140131 }
132
+
133
+ rxrpc_get_call(call, rxrpc_call_got_userid);
134
+ rb_link_node(&call->sock_node, parent, pp);
135
+ rb_insert_color(&call->sock_node, &rx->calls);
136
+ set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
141137
142138 list_add(&call->sock_link, &rx->sock_calls);
143139
....@@ -161,11 +157,8 @@
161157 }
162158
163159 /*
164
- * Preallocate sufficient service connections, calls and peers to cover the
165
- * entire backlog of a socket. When a new call comes in, if we don't have
166
- * sufficient of each available, the call gets rejected as busy or ignored.
167
- *
168
- * The backlog is replenished when a connection is accepted or rejected.
160
+ * Allocate the preallocation buffers for incoming service calls. These must
161
+ * be charged manually.
169162 */
170163 int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
171164 {
....@@ -177,13 +170,6 @@
177170 return -ENOMEM;
178171 rx->backlog = b;
179172 }
180
-
181
- if (rx->discard_new_call)
182
- return 0;
183
-
184
- while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp,
185
- atomic_inc_return(&rxrpc_debug_id)) == 0)
186
- ;
187173
188174 return 0;
189175 }
....@@ -252,6 +238,22 @@
252238 }
253239
254240 /*
241
+ * Ping the other end to fill our RTT cache and to retrieve the rwind
242
+ * and MTU parameters.
243
+ */
244
+static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
245
+{
246
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
247
+ ktime_t now = skb->tstamp;
248
+
249
+ if (call->peer->rtt_count < 3 ||
250
+ ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
251
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
252
+ true, true,
253
+ rxrpc_propose_ack_ping_for_params);
254
+}
255
+
256
+/*
255257 * Allocate a new incoming call from the prealloc pool, along with a connection
256258 * and a peer as necessary.
257259 */
....@@ -259,6 +261,8 @@
259261 struct rxrpc_local *local,
260262 struct rxrpc_peer *peer,
261263 struct rxrpc_connection *conn,
264
+ const struct rxrpc_security *sec,
265
+ struct key *key,
262266 struct sk_buff *skb)
263267 {
264268 struct rxrpc_backlog *b = rx->backlog;
....@@ -288,7 +292,7 @@
288292 peer = NULL;
289293 if (!peer) {
290294 peer = b->peer_backlog[peer_tail];
291
- if (rxrpc_extract_addr_from_skb(local, &peer->srx, skb) < 0)
295
+ if (rxrpc_extract_addr_from_skb(&peer->srx, skb) < 0)
292296 return NULL;
293297 b->peer_backlog[peer_tail] = NULL;
294298 smp_store_release(&b->peer_backlog_tail,
....@@ -306,7 +310,7 @@
306310 conn->params.local = rxrpc_get_local(local);
307311 conn->params.peer = peer;
308312 rxrpc_see_connection(conn);
309
- rxrpc_new_incoming_connection(rx, conn, skb);
313
+ rxrpc_new_incoming_connection(rx, conn, sec, key, skb);
310314 } else {
311315 rxrpc_get_connection(conn);
312316 }
....@@ -319,6 +323,8 @@
319323
320324 rxrpc_see_call(call);
321325 call->conn = conn;
326
+ call->security = conn->security;
327
+ call->security_ix = conn->security_ix;
322328 call->peer = rxrpc_get_peer(conn->params.peer);
323329 call->cong_cwnd = call->peer->cong_cwnd;
324330 return call;
....@@ -344,9 +350,11 @@
344350 struct sk_buff *skb)
345351 {
346352 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
353
+ const struct rxrpc_security *sec = NULL;
347354 struct rxrpc_connection *conn;
348355 struct rxrpc_peer *peer = NULL;
349
- struct rxrpc_call *call;
356
+ struct rxrpc_call *call = NULL;
357
+ struct key *key = NULL;
350358
351359 _enter("");
352360
....@@ -357,9 +365,7 @@
357365 sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
358366 skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
359367 skb->priority = RX_INVALID_OPERATION;
360
- _leave(" = NULL [close]");
361
- call = NULL;
362
- goto out;
368
+ goto no_call;
363369 }
364370
365371 /* The peer, connection and call may all have sprung into existence due
....@@ -369,28 +375,18 @@
369375 */
370376 conn = rxrpc_find_connection_rcu(local, skb, &peer);
371377
372
- call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
378
+ if (!conn && !rxrpc_look_up_server_security(local, rx, &sec, &key, skb))
379
+ goto no_call;
380
+
381
+ call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, key, skb);
382
+ key_put(key);
373383 if (!call) {
374384 skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
375
- _leave(" = NULL [busy]");
376
- call = NULL;
377
- goto out;
385
+ goto no_call;
378386 }
379387
380388 trace_rxrpc_receive(call, rxrpc_receive_incoming,
381389 sp->hdr.serial, sp->hdr.seq);
382
-
383
- /* Lock the call to prevent rxrpc_kernel_send/recv_data() and
384
- * sendmsg()/recvmsg() inconveniently stealing the mutex once the
385
- * notification is generated.
386
- *
387
- * The BUG should never happen because the kernel should be well
388
- * behaved enough not to access the call before the first notification
389
- * event and userspace is prevented from doing so until the state is
390
- * appropriate.
391
- */
392
- if (!mutex_trylock(&call->user_mutex))
393
- BUG();
394390
395391 /* Make the call live. */
396392 rxrpc_incoming_call(rx, call, skb);
....@@ -398,8 +394,6 @@
398394
399395 if (rx->notify_new_call)
400396 rx->notify_new_call(&rx->sk, call, call->user_call_ID);
401
- else
402
- sk_acceptq_added(&rx->sk);
403397
404398 spin_lock(&conn->state_lock);
405399 switch (conn->state) {
....@@ -411,12 +405,8 @@
411405
412406 case RXRPC_CONN_SERVICE:
413407 write_lock(&call->state_lock);
414
- if (call->state < RXRPC_CALL_COMPLETE) {
415
- if (rx->discard_new_call)
416
- call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
417
- else
418
- call->state = RXRPC_CALL_SERVER_ACCEPTING;
419
- }
408
+ if (call->state < RXRPC_CALL_COMPLETE)
409
+ call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
420410 write_unlock(&call->state_lock);
421411 break;
422412
....@@ -432,9 +422,9 @@
432422 BUG();
433423 }
434424 spin_unlock(&conn->state_lock);
425
+ spin_unlock(&rx->incoming_lock);
435426
436
- if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
437
- rxrpc_notify_socket(call);
427
+ rxrpc_send_ping(call, skb);
438428
439429 /* We have to discard the prealloc queue's ref here and rely on a
440430 * combination of the RCU read lock and refs held either by the socket
....@@ -444,193 +434,27 @@
444434 rxrpc_put_call(call, rxrpc_call_put);
445435
446436 _leave(" = %p{%d}", call, call->debug_id);
447
-out:
437
+ return call;
438
+
439
+no_call:
448440 spin_unlock(&rx->incoming_lock);
449
- return call;
441
+ _leave(" = NULL [%u]", skb->mark);
442
+ return NULL;
450443 }
451444
452445 /*
453
- * handle acceptance of a call by userspace
454
- * - assign the user call ID to the call at the front of the queue
455
- * - called with the socket locked.
446
+ * Charge up socket with preallocated calls, attaching user call IDs.
456447 */
457
-struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
458
- unsigned long user_call_ID,
459
- rxrpc_notify_rx_t notify_rx)
460
- __releases(&rx->sk.sk_lock.slock)
461
- __acquires(call->user_mutex)
448
+int rxrpc_user_charge_accept(struct rxrpc_sock *rx, unsigned long user_call_ID)
462449 {
463
- struct rxrpc_call *call;
464
- struct rb_node *parent, **pp;
465
- int ret;
450
+ struct rxrpc_backlog *b = rx->backlog;
466451
467
- _enter(",%lx", user_call_ID);
452
+ if (rx->sk.sk_state == RXRPC_CLOSE)
453
+ return -ESHUTDOWN;
468454
469
- ASSERT(!irqs_disabled());
470
-
471
- write_lock(&rx->call_lock);
472
-
473
- if (list_empty(&rx->to_be_accepted)) {
474
- write_unlock(&rx->call_lock);
475
- release_sock(&rx->sk);
476
- kleave(" = -ENODATA [empty]");
477
- return ERR_PTR(-ENODATA);
478
- }
479
-
480
- /* check the user ID isn't already in use */
481
- pp = &rx->calls.rb_node;
482
- parent = NULL;
483
- while (*pp) {
484
- parent = *pp;
485
- call = rb_entry(parent, struct rxrpc_call, sock_node);
486
-
487
- if (user_call_ID < call->user_call_ID)
488
- pp = &(*pp)->rb_left;
489
- else if (user_call_ID > call->user_call_ID)
490
- pp = &(*pp)->rb_right;
491
- else
492
- goto id_in_use;
493
- }
494
-
495
- /* Dequeue the first call and check it's still valid. We gain
496
- * responsibility for the queue's reference.
497
- */
498
- call = list_entry(rx->to_be_accepted.next,
499
- struct rxrpc_call, accept_link);
500
- write_unlock(&rx->call_lock);
501
-
502
- /* We need to gain the mutex from the interrupt handler without
503
- * upsetting lockdep, so we have to release it there and take it here.
504
- * We are, however, still holding the socket lock, so other accepts
505
- * must wait for us and no one can add the user ID behind our backs.
506
- */
507
- if (mutex_lock_interruptible(&call->user_mutex) < 0) {
508
- release_sock(&rx->sk);
509
- kleave(" = -ERESTARTSYS");
510
- return ERR_PTR(-ERESTARTSYS);
511
- }
512
-
513
- write_lock(&rx->call_lock);
514
- list_del_init(&call->accept_link);
515
- sk_acceptq_removed(&rx->sk);
516
- rxrpc_see_call(call);
517
-
518
- /* Find the user ID insertion point. */
519
- pp = &rx->calls.rb_node;
520
- parent = NULL;
521
- while (*pp) {
522
- parent = *pp;
523
- call = rb_entry(parent, struct rxrpc_call, sock_node);
524
-
525
- if (user_call_ID < call->user_call_ID)
526
- pp = &(*pp)->rb_left;
527
- else if (user_call_ID > call->user_call_ID)
528
- pp = &(*pp)->rb_right;
529
- else
530
- BUG();
531
- }
532
-
533
- write_lock_bh(&call->state_lock);
534
- switch (call->state) {
535
- case RXRPC_CALL_SERVER_ACCEPTING:
536
- call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
537
- break;
538
- case RXRPC_CALL_COMPLETE:
539
- ret = call->error;
540
- goto out_release;
541
- default:
542
- BUG();
543
- }
544
-
545
- /* formalise the acceptance */
546
- call->notify_rx = notify_rx;
547
- call->user_call_ID = user_call_ID;
548
- rxrpc_get_call(call, rxrpc_call_got_userid);
549
- rb_link_node(&call->sock_node, parent, pp);
550
- rb_insert_color(&call->sock_node, &rx->calls);
551
- if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
552
- BUG();
553
-
554
- write_unlock_bh(&call->state_lock);
555
- write_unlock(&rx->call_lock);
556
- rxrpc_notify_socket(call);
557
- rxrpc_service_prealloc(rx, GFP_KERNEL);
558
- release_sock(&rx->sk);
559
- _leave(" = %p{%d}", call, call->debug_id);
560
- return call;
561
-
562
-out_release:
563
- _debug("release %p", call);
564
- write_unlock_bh(&call->state_lock);
565
- write_unlock(&rx->call_lock);
566
- rxrpc_release_call(rx, call);
567
- rxrpc_put_call(call, rxrpc_call_put);
568
- goto out;
569
-
570
-id_in_use:
571
- ret = -EBADSLT;
572
- write_unlock(&rx->call_lock);
573
-out:
574
- rxrpc_service_prealloc(rx, GFP_KERNEL);
575
- release_sock(&rx->sk);
576
- _leave(" = %d", ret);
577
- return ERR_PTR(ret);
578
-}
579
-
580
-/*
581
- * Handle rejection of a call by userspace
582
- * - reject the call at the front of the queue
583
- */
584
-int rxrpc_reject_call(struct rxrpc_sock *rx)
585
-{
586
- struct rxrpc_call *call;
587
- bool abort = false;
588
- int ret;
589
-
590
- _enter("");
591
-
592
- ASSERT(!irqs_disabled());
593
-
594
- write_lock(&rx->call_lock);
595
-
596
- if (list_empty(&rx->to_be_accepted)) {
597
- write_unlock(&rx->call_lock);
598
- return -ENODATA;
599
- }
600
-
601
- /* Dequeue the first call and check it's still valid. We gain
602
- * responsibility for the queue's reference.
603
- */
604
- call = list_entry(rx->to_be_accepted.next,
605
- struct rxrpc_call, accept_link);
606
- list_del_init(&call->accept_link);
607
- sk_acceptq_removed(&rx->sk);
608
- rxrpc_see_call(call);
609
-
610
- write_lock_bh(&call->state_lock);
611
- switch (call->state) {
612
- case RXRPC_CALL_SERVER_ACCEPTING:
613
- __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED);
614
- abort = true;
615
- /* fall through */
616
- case RXRPC_CALL_COMPLETE:
617
- ret = call->error;
618
- goto out_discard;
619
- default:
620
- BUG();
621
- }
622
-
623
-out_discard:
624
- write_unlock_bh(&call->state_lock);
625
- write_unlock(&rx->call_lock);
626
- if (abort) {
627
- rxrpc_send_abort_packet(call);
628
- rxrpc_release_call(rx, call);
629
- rxrpc_put_call(call, rxrpc_call_put);
630
- }
631
- rxrpc_service_prealloc(rx, GFP_KERNEL);
632
- _leave(" = %d", ret);
633
- return ret;
455
+ return rxrpc_service_prealloc_one(rx, b, NULL, NULL, user_call_ID,
456
+ GFP_KERNEL,
457
+ atomic_inc_return(&rxrpc_debug_id));
634458 }
635459
636460 /*