hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/include/net/tcp.h
....@@ -348,13 +348,14 @@
348348 struct pipe_inode_info *pipe, size_t len,
349349 unsigned int flags);
350350
351
-void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
352
-static inline void tcp_dec_quickack_mode(struct sock *sk,
353
- const unsigned int pkts)
351
+static inline void tcp_dec_quickack_mode(struct sock *sk)
354352 {
355353 struct inet_connection_sock *icsk = inet_csk(sk);
356354
357355 if (icsk->icsk_ack.quick) {
356
+ /* How many ACKs S/ACKing new data have we sent? */
357
+ const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
358
+
358359 if (pkts >= icsk->icsk_ack.quick) {
359360 icsk->icsk_ack.quick = 0;
360361 /* Leaving quickack mode we deflate ATO. */
....@@ -392,6 +393,7 @@
392393 void tcp_init_metrics(struct sock *sk);
393394 void tcp_metrics_init(void);
394395 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
396
+void __tcp_close(struct sock *sk, long timeout);
395397 void tcp_close(struct sock *sk, long timeout);
396398 void tcp_init_sock(struct sock *sk);
397399 void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
....@@ -1454,25 +1456,38 @@
14541456 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
14551457 {
14561458 struct net *net = sock_net((struct sock *)tp);
1459
+ int val;
14571460
1458
- return tp->keepalive_intvl ? :
1459
- READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
1461
+ /* Paired with WRITE_ONCE() in tcp_sock_set_keepintvl()
1462
+ * and do_tcp_setsockopt().
1463
+ */
1464
+ val = READ_ONCE(tp->keepalive_intvl);
1465
+
1466
+ return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
14601467 }
14611468
14621469 static inline int keepalive_time_when(const struct tcp_sock *tp)
14631470 {
14641471 struct net *net = sock_net((struct sock *)tp);
1472
+ int val;
14651473
1466
- return tp->keepalive_time ? :
1467
- READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
1474
+ /* Paired with WRITE_ONCE() in tcp_sock_set_keepidle_locked() */
1475
+ val = READ_ONCE(tp->keepalive_time);
1476
+
1477
+ return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
14681478 }
14691479
14701480 static inline int keepalive_probes(const struct tcp_sock *tp)
14711481 {
14721482 struct net *net = sock_net((struct sock *)tp);
1483
+ int val;
14731484
1474
- return tp->keepalive_probes ? :
1475
- READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
1485
+ /* Paired with WRITE_ONCE() in tcp_sock_set_keepcnt()
1486
+ * and do_tcp_setsockopt().
1487
+ */
1488
+ val = READ_ONCE(tp->keepalive_probes);
1489
+
1490
+ return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
14761491 }
14771492
14781493 static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
....@@ -1981,7 +1996,11 @@
19811996 static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
19821997 {
19831998 struct net *net = sock_net((struct sock *)tp);
1984
- return tp->notsent_lowat ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
1999
+ u32 val;
2000
+
2001
+ val = READ_ONCE(tp->notsent_lowat);
2002
+
2003
+ return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
19852004 }
19862005
19872006 /* @wake is one when sk_stream_write_space() calls us.