hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/net/rxrpc/call_object.c
....@@ -1,12 +1,8 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /* RxRPC individual remote procedure call handling
23 *
34 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
45 * Written by David Howells (dhowells@redhat.com)
5
- *
6
- * This program is free software; you can redistribute it and/or
7
- * modify it under the terms of the GNU General Public License
8
- * as published by the Free Software Foundation; either version
9
- * 2 of the License, or (at your option) any later version.
106 */
117
128 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
....@@ -27,7 +23,6 @@
2723 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
2824 [RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc",
2925 [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
30
- [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
3126 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
3227 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
3328 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
....@@ -45,6 +40,11 @@
4540
4641 struct kmem_cache *rxrpc_call_jar;
4742
43
+static struct semaphore rxrpc_call_limiter =
44
+ __SEMAPHORE_INITIALIZER(rxrpc_call_limiter, 1000);
45
+static struct semaphore rxrpc_kernel_call_limiter =
46
+ __SEMAPHORE_INITIALIZER(rxrpc_kernel_call_limiter, 1000);
47
+
4848 static void rxrpc_call_timer_expired(struct timer_list *t)
4949 {
5050 struct rxrpc_call *call = from_timer(call, t, timer);
....@@ -53,8 +53,28 @@
5353
5454 if (call->state < RXRPC_CALL_COMPLETE) {
5555 trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
56
- rxrpc_queue_call(call);
56
+ __rxrpc_queue_call(call);
57
+ } else {
58
+ rxrpc_put_call(call, rxrpc_call_put);
5759 }
60
+}
61
+
62
+void rxrpc_reduce_call_timer(struct rxrpc_call *call,
63
+ unsigned long expire_at,
64
+ unsigned long now,
65
+ enum rxrpc_timer_trace why)
66
+{
67
+ if (rxrpc_try_get_call(call, rxrpc_call_got_timer)) {
68
+ trace_rxrpc_timer(call, why, now);
69
+ if (timer_reduce(&call->timer, expire_at))
70
+ rxrpc_put_call(call, rxrpc_call_put_notimer);
71
+ }
72
+}
73
+
74
+void rxrpc_delete_call_timer(struct rxrpc_call *call)
75
+{
76
+ if (del_timer_sync(&call->timer))
77
+ rxrpc_put_call(call, rxrpc_call_put_timer);
5878 }
5979
6080 static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
....@@ -92,7 +112,7 @@
92112 found_extant_call:
93113 rxrpc_get_call(call, rxrpc_call_got);
94114 read_unlock(&rx->call_lock);
95
- _leave(" = %p [%d]", call, atomic_read(&call->usage));
115
+ _leave(" = %p [%d]", call, refcount_read(&call->ref));
96116 return call;
97117 }
98118
....@@ -140,7 +160,7 @@
140160 spin_lock_init(&call->notify_lock);
141161 spin_lock_init(&call->input_lock);
142162 rwlock_init(&call->state_lock);
143
- atomic_set(&call->usage, 1);
163
+ refcount_set(&call->ref, 1);
144164 call->debug_id = debug_id;
145165 call->tx_total_len = -1;
146166 call->next_rx_timo = 20 * HZ;
....@@ -157,6 +177,7 @@
157177 call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
158178
159179 call->rxnet = rxnet;
180
+ call->rtt_avail = RXRPC_CALL_RTT_AVAIL_MASK;
160181 atomic_inc(&rxnet->nr_calls);
161182 return call;
162183
....@@ -213,6 +234,34 @@
213234 }
214235
215236 /*
237
+ * Wait for a call slot to become available.
238
+ */
239
+static struct semaphore *rxrpc_get_call_slot(struct rxrpc_call_params *p, gfp_t gfp)
240
+{
241
+ struct semaphore *limiter = &rxrpc_call_limiter;
242
+
243
+ if (p->kernel)
244
+ limiter = &rxrpc_kernel_call_limiter;
245
+ if (p->interruptibility == RXRPC_UNINTERRUPTIBLE) {
246
+ down(limiter);
247
+ return limiter;
248
+ }
249
+ return down_interruptible(limiter) < 0 ? NULL : limiter;
250
+}
251
+
252
+/*
253
+ * Release a call slot.
254
+ */
255
+static void rxrpc_put_call_slot(struct rxrpc_call *call)
256
+{
257
+ struct semaphore *limiter = &rxrpc_call_limiter;
258
+
259
+ if (test_bit(RXRPC_CALL_KERNEL, &call->flags))
260
+ limiter = &rxrpc_kernel_call_limiter;
261
+ up(limiter);
262
+}
263
+
264
+/*
216265 * Set up a call for the given parameters.
217266 * - Called with the socket lock held, which it must release.
218267 * - If it returns a call, the call's lock will need releasing by the caller.
....@@ -228,22 +277,34 @@
228277 {
229278 struct rxrpc_call *call, *xcall;
230279 struct rxrpc_net *rxnet;
280
+ struct semaphore *limiter;
231281 struct rb_node *parent, **pp;
232282 const void *here = __builtin_return_address(0);
233283 int ret;
234284
235285 _enter("%p,%lx", rx, p->user_call_ID);
236286
287
+ limiter = rxrpc_get_call_slot(p, gfp);
288
+ if (!limiter) {
289
+ release_sock(&rx->sk);
290
+ return ERR_PTR(-ERESTARTSYS);
291
+ }
292
+
237293 call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id);
238294 if (IS_ERR(call)) {
239295 release_sock(&rx->sk);
296
+ up(limiter);
240297 _leave(" = %ld", PTR_ERR(call));
241298 return call;
242299 }
243300
301
+ call->interruptibility = p->interruptibility;
244302 call->tx_total_len = p->tx_total_len;
245
- trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
303
+ trace_rxrpc_call(call->debug_id, rxrpc_call_new_client,
304
+ refcount_read(&call->ref),
246305 here, (const void *)p->user_call_ID);
306
+ if (p->kernel)
307
+ __set_bit(RXRPC_CALL_KERNEL, &call->flags);
247308
248309 /* We need to protect a partially set up call against the user as we
249310 * will be acting outside the socket lock.
....@@ -292,8 +353,8 @@
292353 if (ret < 0)
293354 goto error_attached_to_socket;
294355
295
- trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
296
- here, NULL);
356
+ trace_rxrpc_call(call->debug_id, rxrpc_call_connected,
357
+ refcount_read(&call->ref), here, NULL);
297358
298359 rxrpc_start_call_timer(call);
299360
....@@ -312,8 +373,8 @@
312373 release_sock(&rx->sk);
313374 __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
314375 RX_CALL_DEAD, -EEXIST);
315
- trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
316
- here, ERR_PTR(-EEXIST));
376
+ trace_rxrpc_call(call->debug_id, rxrpc_call_error,
377
+ refcount_read(&call->ref), here, ERR_PTR(-EEXIST));
317378 rxrpc_release_call(rx, call);
318379 mutex_unlock(&call->user_mutex);
319380 rxrpc_put_call(call, rxrpc_call_put);
....@@ -326,55 +387,13 @@
326387 * leave the error to recvmsg() to deal with.
327388 */
328389 error_attached_to_socket:
329
- trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
330
- here, ERR_PTR(ret));
390
+ trace_rxrpc_call(call->debug_id, rxrpc_call_error,
391
+ refcount_read(&call->ref), here, ERR_PTR(ret));
331392 set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
332393 __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
333394 RX_CALL_DEAD, ret);
334395 _leave(" = c=%08x [err]", call->debug_id);
335396 return call;
336
-}
337
-
338
-/*
339
- * Retry a call to a new address. It is expected that the Tx queue of the call
340
- * will contain data previously packaged for an old call.
341
- */
342
-int rxrpc_retry_client_call(struct rxrpc_sock *rx,
343
- struct rxrpc_call *call,
344
- struct rxrpc_conn_parameters *cp,
345
- struct sockaddr_rxrpc *srx,
346
- gfp_t gfp)
347
-{
348
- const void *here = __builtin_return_address(0);
349
- int ret;
350
-
351
- /* Set up or get a connection record and set the protocol parameters,
352
- * including channel number and call ID.
353
- */
354
- ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
355
- if (ret < 0)
356
- goto error;
357
-
358
- trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
359
- here, NULL);
360
-
361
- rxrpc_start_call_timer(call);
362
-
363
- _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
364
-
365
- if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
366
- rxrpc_queue_call(call);
367
-
368
- _leave(" = 0");
369
- return 0;
370
-
371
-error:
372
- rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
373
- RX_CALL_DEAD, ret);
374
- trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
375
- here, ERR_PTR(ret));
376
- _leave(" = %d", ret);
377
- return ret;
378397 }
379398
380399 /*
....@@ -395,9 +414,7 @@
395414 call->call_id = sp->hdr.callNumber;
396415 call->service_id = sp->hdr.serviceId;
397416 call->cid = sp->hdr.cid;
398
- call->state = RXRPC_CALL_SERVER_ACCEPTING;
399
- if (sp->hdr.securityIndex > 0)
400
- call->state = RXRPC_CALL_SERVER_SECURING;
417
+ call->state = RXRPC_CALL_SERVER_SECURING;
401418 call->cong_tstamp = skb->tstamp;
402419
403420 /* Set the channel for this call. We don't get channel_lock as we're
....@@ -427,11 +444,13 @@
427444 bool rxrpc_queue_call(struct rxrpc_call *call)
428445 {
429446 const void *here = __builtin_return_address(0);
430
- int n = atomic_fetch_add_unless(&call->usage, 1, 0);
431
- if (n == 0)
447
+ int n;
448
+
449
+ if (!__refcount_inc_not_zero(&call->ref, &n))
432450 return false;
433451 if (rxrpc_queue_work(&call->processor))
434
- trace_rxrpc_call(call, rxrpc_call_queued, n + 1, here, NULL);
452
+ trace_rxrpc_call(call->debug_id, rxrpc_call_queued, n + 1,
453
+ here, NULL);
435454 else
436455 rxrpc_put_call(call, rxrpc_call_put_noqueue);
437456 return true;
....@@ -443,10 +462,11 @@
443462 bool __rxrpc_queue_call(struct rxrpc_call *call)
444463 {
445464 const void *here = __builtin_return_address(0);
446
- int n = atomic_read(&call->usage);
465
+ int n = refcount_read(&call->ref);
447466 ASSERTCMP(n, >=, 1);
448467 if (rxrpc_queue_work(&call->processor))
449
- trace_rxrpc_call(call, rxrpc_call_queued_ref, n, here, NULL);
468
+ trace_rxrpc_call(call->debug_id, rxrpc_call_queued_ref, n,
469
+ here, NULL);
450470 else
451471 rxrpc_put_call(call, rxrpc_call_put_noqueue);
452472 return true;
....@@ -459,10 +479,22 @@
459479 {
460480 const void *here = __builtin_return_address(0);
461481 if (call) {
462
- int n = atomic_read(&call->usage);
482
+ int n = refcount_read(&call->ref);
463483
464
- trace_rxrpc_call(call, rxrpc_call_seen, n, here, NULL);
484
+ trace_rxrpc_call(call->debug_id, rxrpc_call_seen, n,
485
+ here, NULL);
465486 }
487
+}
488
+
489
+bool rxrpc_try_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
490
+{
491
+ const void *here = __builtin_return_address(0);
492
+ int n;
493
+
494
+ if (!__refcount_inc_not_zero(&call->ref, &n))
495
+ return false;
496
+ trace_rxrpc_call(call->debug_id, op, n + 1, here, NULL);
497
+ return true;
466498 }
467499
468500 /*
....@@ -471,9 +503,23 @@
471503 void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
472504 {
473505 const void *here = __builtin_return_address(0);
474
- int n = atomic_inc_return(&call->usage);
506
+ int n;
475507
476
- trace_rxrpc_call(call, op, n, here, NULL);
508
+ __refcount_inc(&call->ref, &n);
509
+ trace_rxrpc_call(call->debug_id, op, n + 1, here, NULL);
510
+}
511
+
512
+/*
513
+ * Clean up the RxTx skb ring.
514
+ */
515
+static void rxrpc_cleanup_ring(struct rxrpc_call *call)
516
+{
517
+ int i;
518
+
519
+ for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
520
+ rxrpc_free_skb(call->rxtx_buffer[i], rxrpc_skb_cleaned);
521
+ call->rxtx_buffer[i] = NULL;
522
+ }
477523 }
478524
479525 /*
....@@ -484,11 +530,11 @@
484530 const void *here = __builtin_return_address(0);
485531 struct rxrpc_connection *conn = call->conn;
486532 bool put = false;
487
- int i;
488533
489
- _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
534
+ _enter("{%d,%d}", call->debug_id, refcount_read(&call->ref));
490535
491
- trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
536
+ trace_rxrpc_call(call->debug_id, rxrpc_call_release,
537
+ refcount_read(&call->ref),
492538 here, (const void *)call->flags);
493539
494540 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
....@@ -498,7 +544,8 @@
498544 BUG();
499545 spin_unlock_bh(&call->lock);
500546
501
- del_timer_sync(&call->timer);
547
+ rxrpc_put_call_slot(call);
548
+ rxrpc_delete_call_timer(call);
502549
503550 /* Make sure we don't get any more notifications */
504551 write_lock_bh(&rx->recvmsg_lock);
....@@ -533,70 +580,9 @@
533580
534581 if (conn && !test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
535582 rxrpc_disconnect_call(call);
536
-
537
- for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
538
- rxrpc_free_skb(call->rxtx_buffer[i],
539
- (call->tx_phase ? rxrpc_skb_tx_cleaned :
540
- rxrpc_skb_rx_cleaned));
541
- call->rxtx_buffer[i] = NULL;
542
- }
543
-
583
+ if (call->security)
584
+ call->security->free_call_crypto(call);
544585 _leave("");
545
-}
546
-
547
-/*
548
- * Prepare a kernel service call for retry.
549
- */
550
-int rxrpc_prepare_call_for_retry(struct rxrpc_sock *rx, struct rxrpc_call *call)
551
-{
552
- const void *here = __builtin_return_address(0);
553
- int i;
554
- u8 last = 0;
555
-
556
- _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
557
-
558
- trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
559
- here, (const void *)call->flags);
560
-
561
- ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
562
- ASSERTCMP(call->completion, !=, RXRPC_CALL_REMOTELY_ABORTED);
563
- ASSERTCMP(call->completion, !=, RXRPC_CALL_LOCALLY_ABORTED);
564
- ASSERT(list_empty(&call->recvmsg_link));
565
-
566
- del_timer_sync(&call->timer);
567
-
568
- _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, call->conn);
569
-
570
- if (call->conn)
571
- rxrpc_disconnect_call(call);
572
-
573
- if (rxrpc_is_service_call(call) ||
574
- !call->tx_phase ||
575
- call->tx_hard_ack != 0 ||
576
- call->rx_hard_ack != 0 ||
577
- call->rx_top != 0)
578
- return -EINVAL;
579
-
580
- call->state = RXRPC_CALL_UNINITIALISED;
581
- call->completion = RXRPC_CALL_SUCCEEDED;
582
- call->call_id = 0;
583
- call->cid = 0;
584
- call->cong_cwnd = 0;
585
- call->cong_extra = 0;
586
- call->cong_ssthresh = 0;
587
- call->cong_mode = 0;
588
- call->cong_dup_acks = 0;
589
- call->cong_cumul_acks = 0;
590
- call->acks_lowest_nak = 0;
591
-
592
- for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
593
- last |= call->rxtx_annotations[i];
594
- call->rxtx_annotations[i] &= RXRPC_TX_ANNO_LAST;
595
- call->rxtx_annotations[i] |= RXRPC_TX_ANNO_RETRANS;
596
- }
597
-
598
- _leave(" = 0");
599
- return 0;
600586 }
601587
602588 /*
....@@ -636,14 +622,15 @@
636622 {
637623 struct rxrpc_net *rxnet = call->rxnet;
638624 const void *here = __builtin_return_address(0);
625
+ unsigned int debug_id = call->debug_id;
626
+ bool dead;
639627 int n;
640628
641629 ASSERT(call != NULL);
642630
643
- n = atomic_dec_return(&call->usage);
644
- trace_rxrpc_call(call, op, n, here, NULL);
645
- ASSERTCMP(n, >=, 0);
646
- if (n == 0) {
631
+ dead = __refcount_dec_and_test(&call->ref, &n);
632
+ trace_rxrpc_call(debug_id, op, n, here, NULL);
633
+ if (dead) {
647634 _debug("call %d dead", call->debug_id);
648635 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
649636
....@@ -664,6 +651,8 @@
664651 {
665652 struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor);
666653 struct rxrpc_net *rxnet = call->rxnet;
654
+
655
+ rxrpc_delete_call_timer(call);
667656
668657 rxrpc_put_connection(call->conn);
669658 rxrpc_put_peer(call->peer);
....@@ -695,24 +684,15 @@
695684 */
696685 void rxrpc_cleanup_call(struct rxrpc_call *call)
697686 {
698
- int i;
699
-
700687 _net("DESTROY CALL %d", call->debug_id);
701688
702689 memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
703690
704
- del_timer_sync(&call->timer);
705
-
706691 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
707692 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
708693
709
- /* Clean up the Rx/Tx buffer */
710
- for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++)
711
- rxrpc_free_skb(call->rxtx_buffer[i],
712
- (call->tx_phase ? rxrpc_skb_tx_cleaned :
713
- rxrpc_skb_rx_cleaned));
714
-
715
- rxrpc_free_skb(call->tx_pending, rxrpc_skb_tx_cleaned);
694
+ rxrpc_cleanup_ring(call);
695
+ rxrpc_free_skb(call->tx_pending, rxrpc_skb_cleaned);
716696
717697 call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
718698 }
....@@ -740,7 +720,7 @@
740720 list_del_init(&call->link);
741721
742722 pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
743
- call, atomic_read(&call->usage),
723
+ call, refcount_read(&call->ref),
744724 rxrpc_call_states[call->state],
745725 call->flags, call->events);
746726