| .. | .. |
|---|
| 243 | 243 | if (unlikely(len > icsk->icsk_ack.rcv_mss + |
|---|
| 244 | 244 | MAX_TCP_OPTION_SPACE)) |
|---|
| 245 | 245 | tcp_gro_dev_warn(sk, skb, len); |
|---|
| 246 | + /* If the skb has a len of exactly 1*MSS and has the PSH bit |
|---|
| 247 | + * set then it is likely the end of an application write. So |
|---|
| 248 | + * more data may not be arriving soon, and yet the data sender |
|---|
| 249 | + * may be waiting for an ACK if cwnd-bound or using TX zero |
|---|
| 250 | + * copy. So we set ICSK_ACK_PUSHED here so that |
|---|
| 251 | + * tcp_cleanup_rbuf() will send an ACK immediately if the app |
|---|
| 252 | + * reads all of the data and is not ping-pong. If len > MSS |
|---|
| 253 | + * then this logic does not matter (and does not hurt) because |
|---|
| 254 | + * tcp_cleanup_rbuf() will always ACK immediately if the app |
|---|
| 255 | + * reads data and there is more than an MSS of unACKed data. |
|---|
| 256 | + */ |
|---|
| 257 | + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_PSH) |
|---|
| 258 | + icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; |
|---|
| 246 | 259 | } else { |
|---|
| 247 | 260 | /* Otherwise, we make more careful check taking into account, |
|---|
| 248 | 261 | * that SACKs block is variable. |
|---|
| .. | .. |
|---|
| 287 | 300 | icsk->icsk_ack.quick = quickacks; |
|---|
| 288 | 301 | } |
|---|
| 289 | 302 | |
|---|
| 290 | | -void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks) |
|---|
| 303 | +static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks) |
|---|
| 291 | 304 | { |
|---|
| 292 | 305 | struct inet_connection_sock *icsk = inet_csk(sk); |
|---|
| 293 | 306 | |
|---|
| .. | .. |
|---|
| 295 | 308 | inet_csk_exit_pingpong_mode(sk); |
|---|
| 296 | 309 | icsk->icsk_ack.ato = TCP_ATO_MIN; |
|---|
| 297 | 310 | } |
|---|
| 298 | | -EXPORT_SYMBOL(tcp_enter_quickack_mode); |
|---|
| 299 | 311 | |
|---|
| 300 | 312 | /* Send ACKs quickly, if "quick" count is not exhausted |
|---|
| 301 | 313 | * and the session is not interactive. |
|---|
| .. | .. |
|---|
| 3561 | 3573 | static bool __tcp_oow_rate_limited(struct net *net, int mib_idx, |
|---|
| 3562 | 3574 | u32 *last_oow_ack_time) |
|---|
| 3563 | 3575 | { |
|---|
| 3564 | | - if (*last_oow_ack_time) { |
|---|
| 3565 | | - s32 elapsed = (s32)(tcp_jiffies32 - *last_oow_ack_time); |
|---|
| 3576 | + /* Paired with the WRITE_ONCE() in this function. */ |
|---|
| 3577 | + u32 val = READ_ONCE(*last_oow_ack_time); |
|---|
| 3578 | + |
|---|
| 3579 | + if (val) { |
|---|
| 3580 | + s32 elapsed = (s32)(tcp_jiffies32 - val); |
|---|
| 3566 | 3581 | |
|---|
| 3567 | 3582 | if (0 <= elapsed && |
|---|
| 3568 | 3583 | elapsed < READ_ONCE(net->ipv4.sysctl_tcp_invalid_ratelimit)) { |
|---|
| .. | .. |
|---|
| 3571 | 3586 | } |
|---|
| 3572 | 3587 | } |
|---|
| 3573 | 3588 | |
|---|
| 3574 | | - *last_oow_ack_time = tcp_jiffies32; |
|---|
| 3589 | + /* Paired with the prior READ_ONCE() and with itself, |
|---|
| 3590 | + * as we might be lockless. |
|---|
| 3591 | + */ |
|---|
| 3592 | + WRITE_ONCE(*last_oow_ack_time, tcp_jiffies32); |
|---|
| 3575 | 3593 | |
|---|
| 3576 | 3594 | return false; /* not rate-limited: go ahead, send dupack now! */ |
|---|
| 3577 | 3595 | } |
|---|
| .. | .. |
|---|
| 4324 | 4342 | |
|---|
| 4325 | 4343 | inet_csk_schedule_ack(sk); |
|---|
| 4326 | 4344 | |
|---|
| 4327 | | - sk->sk_shutdown |= RCV_SHUTDOWN; |
|---|
| 4345 | + WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN); |
|---|
| 4328 | 4346 | sock_set_flag(sk, SOCK_DONE); |
|---|
| 4329 | 4347 | |
|---|
| 4330 | 4348 | switch (sk->sk_state) { |
|---|
| .. | .. |
|---|
| 6506 | 6524 | break; |
|---|
| 6507 | 6525 | |
|---|
| 6508 | 6526 | tcp_set_state(sk, TCP_FIN_WAIT2); |
|---|
| 6509 | | - sk->sk_shutdown |= SEND_SHUTDOWN; |
|---|
| 6527 | + WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | SEND_SHUTDOWN); |
|---|
| 6510 | 6528 | |
|---|
| 6511 | 6529 | sk_dst_confirm(sk); |
|---|
| 6512 | 6530 | |
|---|