.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* |
---|
2 | 3 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
---|
3 | 4 | * operating system. INET is implemented using the BSD Socket |
---|
.. | .. |
---|
7 | 8 | * |
---|
8 | 9 | * IPv4 specific functions |
---|
9 | 10 | * |
---|
10 | | - * |
---|
11 | 11 | * code split from: |
---|
12 | 12 | * linux/ipv4/tcp.c |
---|
13 | 13 | * linux/ipv4/tcp_input.c |
---|
14 | 14 | * linux/ipv4/tcp_output.c |
---|
15 | 15 | * |
---|
16 | 16 | * See tcp.c for author information |
---|
17 | | - * |
---|
18 | | - * This program is free software; you can redistribute it and/or |
---|
19 | | - * modify it under the terms of the GNU General Public License |
---|
20 | | - * as published by the Free Software Foundation; either version |
---|
21 | | - * 2 of the License, or (at your option) any later version. |
---|
22 | 17 | */ |
---|
23 | 18 | |
---|
24 | 19 | /* |
---|
.. | .. |
---|
81 | 76 | #include <linux/proc_fs.h> |
---|
82 | 77 | #include <linux/seq_file.h> |
---|
83 | 78 | #include <linux/inetdevice.h> |
---|
| 79 | +#include <linux/btf_ids.h> |
---|
84 | 80 | |
---|
85 | 81 | #include <crypto/hash.h> |
---|
86 | 82 | #include <linux/scatterlist.h> |
---|
.. | .. |
---|
110 | 106 | |
---|
111 | 107 | int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) |
---|
112 | 108 | { |
---|
| 109 | + int reuse = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tw_reuse); |
---|
113 | 110 | const struct inet_timewait_sock *tw = inet_twsk(sktw); |
---|
114 | 111 | const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw); |
---|
115 | 112 | struct tcp_sock *tp = tcp_sk(sk); |
---|
116 | | - int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse; |
---|
117 | 113 | |
---|
118 | 114 | if (reuse == 2) { |
---|
119 | 115 | /* Still does not detect *everything* that goes through |
---|
.. | .. |
---|
126 | 122 | #if IS_ENABLED(CONFIG_IPV6) |
---|
127 | 123 | if (tw->tw_family == AF_INET6) { |
---|
128 | 124 | if (ipv6_addr_loopback(&tw->tw_v6_daddr) || |
---|
129 | | - (ipv6_addr_v4mapped(&tw->tw_v6_daddr) && |
---|
130 | | - (tw->tw_v6_daddr.s6_addr[12] == 127)) || |
---|
| 125 | + ipv6_addr_v4mapped_loopback(&tw->tw_v6_daddr) || |
---|
131 | 126 | ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) || |
---|
132 | | - (ipv6_addr_v4mapped(&tw->tw_v6_rcv_saddr) && |
---|
133 | | - (tw->tw_v6_rcv_saddr.s6_addr[12] == 127))) |
---|
| 127 | + ipv6_addr_v4mapped_loopback(&tw->tw_v6_rcv_saddr)) |
---|
134 | 128 | loopback = true; |
---|
135 | 129 | } else |
---|
136 | 130 | #endif |
---|
.. | .. |
---|
328 | 322 | * if necessary. |
---|
329 | 323 | */ |
---|
330 | 324 | tcp_set_state(sk, TCP_CLOSE); |
---|
| 325 | + if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) |
---|
| 326 | + inet_reset_saddr(sk); |
---|
331 | 327 | ip_rt_put(rt); |
---|
332 | 328 | sk->sk_route_caps = 0; |
---|
333 | 329 | inet->inet_dport = 0; |
---|
.. | .. |
---|
410 | 406 | } |
---|
411 | 407 | EXPORT_SYMBOL(tcp_req_err); |
---|
412 | 408 | |
---|
| 409 | +/* TCP-LD (RFC 6069) logic */ |
---|
| 410 | +void tcp_ld_RTO_revert(struct sock *sk, u32 seq) |
---|
| 411 | +{ |
---|
| 412 | + struct inet_connection_sock *icsk = inet_csk(sk); |
---|
| 413 | + struct tcp_sock *tp = tcp_sk(sk); |
---|
| 414 | + struct sk_buff *skb; |
---|
| 415 | + s32 remaining; |
---|
| 416 | + u32 delta_us; |
---|
| 417 | + |
---|
| 418 | + if (sock_owned_by_user(sk)) |
---|
| 419 | + return; |
---|
| 420 | + |
---|
| 421 | + if (seq != tp->snd_una || !icsk->icsk_retransmits || |
---|
| 422 | + !icsk->icsk_backoff) |
---|
| 423 | + return; |
---|
| 424 | + |
---|
| 425 | + skb = tcp_rtx_queue_head(sk); |
---|
| 426 | + if (WARN_ON_ONCE(!skb)) |
---|
| 427 | + return; |
---|
| 428 | + |
---|
| 429 | + icsk->icsk_backoff--; |
---|
| 430 | + icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT; |
---|
| 431 | + icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); |
---|
| 432 | + |
---|
| 433 | + tcp_mstamp_refresh(tp); |
---|
| 434 | + delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb)); |
---|
| 435 | + remaining = icsk->icsk_rto - usecs_to_jiffies(delta_us); |
---|
| 436 | + |
---|
| 437 | + if (remaining > 0) { |
---|
| 438 | + inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
---|
| 439 | + remaining, TCP_RTO_MAX); |
---|
| 440 | + } else { |
---|
| 441 | + /* RTO revert clocked out retransmission. |
---|
| 442 | + * Will retransmit now. |
---|
| 443 | + */ |
---|
| 444 | + tcp_retransmit_timer(sk); |
---|
| 445 | + } |
---|
| 446 | +} |
---|
| 447 | +EXPORT_SYMBOL(tcp_ld_RTO_revert); |
---|
| 448 | + |
---|
413 | 449 | /* |
---|
414 | 450 | * This routine is called by the ICMP module when it gets some |
---|
415 | 451 | * sort of error condition. If err < 0 then the socket should |
---|
.. | .. |
---|
426 | 462 | * |
---|
427 | 463 | */ |
---|
428 | 464 | |
---|
429 | | -void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) |
---|
| 465 | +int tcp_v4_err(struct sk_buff *skb, u32 info) |
---|
430 | 466 | { |
---|
431 | | - const struct iphdr *iph = (const struct iphdr *)icmp_skb->data; |
---|
432 | | - struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2)); |
---|
433 | | - struct inet_connection_sock *icsk; |
---|
| 467 | + const struct iphdr *iph = (const struct iphdr *)skb->data; |
---|
| 468 | + struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); |
---|
434 | 469 | struct tcp_sock *tp; |
---|
435 | 470 | struct inet_sock *inet; |
---|
436 | | - const int type = icmp_hdr(icmp_skb)->type; |
---|
437 | | - const int code = icmp_hdr(icmp_skb)->code; |
---|
| 471 | + const int type = icmp_hdr(skb)->type; |
---|
| 472 | + const int code = icmp_hdr(skb)->code; |
---|
438 | 473 | struct sock *sk; |
---|
439 | | - struct sk_buff *skb; |
---|
440 | 474 | struct request_sock *fastopen; |
---|
441 | 475 | u32 seq, snd_una; |
---|
442 | | - s32 remaining; |
---|
443 | | - u32 delta_us; |
---|
444 | 476 | int err; |
---|
445 | | - struct net *net = dev_net(icmp_skb->dev); |
---|
| 477 | + struct net *net = dev_net(skb->dev); |
---|
446 | 478 | |
---|
447 | 479 | sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr, |
---|
448 | 480 | th->dest, iph->saddr, ntohs(th->source), |
---|
449 | | - inet_iif(icmp_skb), 0); |
---|
| 481 | + inet_iif(skb), 0); |
---|
450 | 482 | if (!sk) { |
---|
451 | 483 | __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); |
---|
452 | | - return; |
---|
| 484 | + return -ENOENT; |
---|
453 | 485 | } |
---|
454 | 486 | if (sk->sk_state == TCP_TIME_WAIT) { |
---|
455 | 487 | inet_twsk_put(inet_twsk(sk)); |
---|
456 | | - return; |
---|
| 488 | + return 0; |
---|
457 | 489 | } |
---|
458 | 490 | seq = ntohl(th->seq); |
---|
459 | | - if (sk->sk_state == TCP_NEW_SYN_RECV) |
---|
460 | | - return tcp_req_err(sk, seq, |
---|
461 | | - type == ICMP_PARAMETERPROB || |
---|
462 | | - type == ICMP_TIME_EXCEEDED || |
---|
463 | | - (type == ICMP_DEST_UNREACH && |
---|
464 | | - (code == ICMP_NET_UNREACH || |
---|
465 | | - code == ICMP_HOST_UNREACH))); |
---|
| 491 | + if (sk->sk_state == TCP_NEW_SYN_RECV) { |
---|
| 492 | + tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB || |
---|
| 493 | + type == ICMP_TIME_EXCEEDED || |
---|
| 494 | + (type == ICMP_DEST_UNREACH && |
---|
| 495 | + (code == ICMP_NET_UNREACH || |
---|
| 496 | + code == ICMP_HOST_UNREACH))); |
---|
| 497 | + return 0; |
---|
| 498 | + } |
---|
466 | 499 | |
---|
467 | 500 | bh_lock_sock(sk); |
---|
468 | 501 | /* If too many ICMPs get dropped on busy |
---|
.. | .. |
---|
482 | 515 | goto out; |
---|
483 | 516 | } |
---|
484 | 517 | |
---|
485 | | - icsk = inet_csk(sk); |
---|
486 | 518 | tp = tcp_sk(sk); |
---|
487 | 519 | /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ |
---|
488 | | - fastopen = tp->fastopen_rsk; |
---|
| 520 | + fastopen = rcu_dereference(tp->fastopen_rsk); |
---|
489 | 521 | snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; |
---|
490 | 522 | if (sk->sk_state != TCP_LISTEN && |
---|
491 | 523 | !between(seq, snd_una, tp->snd_nxt)) { |
---|
.. | .. |
---|
496 | 528 | switch (type) { |
---|
497 | 529 | case ICMP_REDIRECT: |
---|
498 | 530 | if (!sock_owned_by_user(sk)) |
---|
499 | | - do_redirect(icmp_skb, sk); |
---|
| 531 | + do_redirect(skb, sk); |
---|
500 | 532 | goto out; |
---|
501 | 533 | case ICMP_SOURCE_QUENCH: |
---|
502 | 534 | /* Just silently ignore these. */ |
---|
.. | .. |
---|
527 | 559 | } |
---|
528 | 560 | |
---|
529 | 561 | err = icmp_err_convert[code].errno; |
---|
530 | | - /* check if icmp_skb allows revert of backoff |
---|
531 | | - * (see draft-zimmermann-tcp-lcd) */ |
---|
532 | | - if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH) |
---|
533 | | - break; |
---|
534 | | - if (seq != tp->snd_una || !icsk->icsk_retransmits || |
---|
535 | | - !icsk->icsk_backoff || fastopen) |
---|
536 | | - break; |
---|
537 | | - |
---|
538 | | - if (sock_owned_by_user(sk)) |
---|
539 | | - break; |
---|
540 | | - |
---|
541 | | - skb = tcp_rtx_queue_head(sk); |
---|
542 | | - if (WARN_ON_ONCE(!skb)) |
---|
543 | | - break; |
---|
544 | | - |
---|
545 | | - icsk->icsk_backoff--; |
---|
546 | | - icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : |
---|
547 | | - TCP_TIMEOUT_INIT; |
---|
548 | | - icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); |
---|
549 | | - |
---|
550 | | - tcp_mstamp_refresh(tp); |
---|
551 | | - delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp); |
---|
552 | | - remaining = icsk->icsk_rto - |
---|
553 | | - usecs_to_jiffies(delta_us); |
---|
554 | | - |
---|
555 | | - if (remaining > 0) { |
---|
556 | | - inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
---|
557 | | - remaining, TCP_RTO_MAX); |
---|
558 | | - } else { |
---|
559 | | - /* RTO revert clocked out retransmission. |
---|
560 | | - * Will retransmit now */ |
---|
561 | | - tcp_retransmit_timer(sk); |
---|
562 | | - } |
---|
563 | | - |
---|
| 562 | + /* check if this ICMP message allows revert of backoff. |
---|
| 563 | + * (see RFC 6069) |
---|
| 564 | + */ |
---|
| 565 | + if (!fastopen && |
---|
| 566 | + (code == ICMP_NET_UNREACH || code == ICMP_HOST_UNREACH)) |
---|
| 567 | + tcp_ld_RTO_revert(sk, seq); |
---|
564 | 568 | break; |
---|
565 | 569 | case ICMP_TIME_EXCEEDED: |
---|
566 | 570 | err = EHOSTUNREACH; |
---|
.. | .. |
---|
573 | 577 | case TCP_SYN_SENT: |
---|
574 | 578 | case TCP_SYN_RECV: |
---|
575 | 579 | /* Only in fast or simultaneous open. If a fast open socket is |
---|
576 | | - * is already accepted it is treated as a connected one below. |
---|
| 580 | + * already accepted it is treated as a connected one below. |
---|
577 | 581 | */ |
---|
578 | 582 | if (fastopen && !fastopen->sk) |
---|
579 | 583 | break; |
---|
| 584 | + |
---|
| 585 | + ip_icmp_error(sk, skb, err, th->dest, info, (u8 *)th); |
---|
580 | 586 | |
---|
581 | 587 | if (!sock_owned_by_user(sk)) { |
---|
582 | 588 | sk->sk_err = err; |
---|
.. | .. |
---|
617 | 623 | out: |
---|
618 | 624 | bh_unlock_sock(sk); |
---|
619 | 625 | sock_put(sk); |
---|
| 626 | + return 0; |
---|
620 | 627 | } |
---|
621 | 628 | |
---|
622 | 629 | void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr) |
---|
.. | .. |
---|
667 | 674 | int genhash; |
---|
668 | 675 | struct sock *sk1 = NULL; |
---|
669 | 676 | #endif |
---|
670 | | - struct net *net; |
---|
| 677 | + u64 transmit_time = 0; |
---|
671 | 678 | struct sock *ctl_sk; |
---|
| 679 | + struct net *net; |
---|
672 | 680 | |
---|
673 | 681 | /* Never send a reset in response to a reset. */ |
---|
674 | 682 | if (th->rst) |
---|
.. | .. |
---|
704 | 712 | rcu_read_lock(); |
---|
705 | 713 | hash_location = tcp_parse_md5sig_option(th); |
---|
706 | 714 | if (sk && sk_fullsock(sk)) { |
---|
707 | | - key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *) |
---|
708 | | - &ip_hdr(skb)->saddr, AF_INET); |
---|
| 715 | + const union tcp_md5_addr *addr; |
---|
| 716 | + int l3index; |
---|
| 717 | + |
---|
| 718 | + /* sdif set, means packet ingressed via a device |
---|
| 719 | + * in an L3 domain and inet_iif is set to it. |
---|
| 720 | + */ |
---|
| 721 | + l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0; |
---|
| 722 | + addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr; |
---|
| 723 | + key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET); |
---|
709 | 724 | } else if (hash_location) { |
---|
| 725 | + const union tcp_md5_addr *addr; |
---|
| 726 | + int sdif = tcp_v4_sdif(skb); |
---|
| 727 | + int dif = inet_iif(skb); |
---|
| 728 | + int l3index; |
---|
| 729 | + |
---|
710 | 730 | /* |
---|
711 | 731 | * active side is lost. Try to find listening socket through |
---|
712 | 732 | * source port, and then find md5 key through listening socket. |
---|
.. | .. |
---|
717 | 737 | sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0, |
---|
718 | 738 | ip_hdr(skb)->saddr, |
---|
719 | 739 | th->source, ip_hdr(skb)->daddr, |
---|
720 | | - ntohs(th->source), inet_iif(skb), |
---|
721 | | - tcp_v4_sdif(skb)); |
---|
| 740 | + ntohs(th->source), dif, sdif); |
---|
722 | 741 | /* don't send rst if it can't find key */ |
---|
723 | 742 | if (!sk1) |
---|
724 | 743 | goto out; |
---|
725 | 744 | |
---|
726 | | - key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *) |
---|
727 | | - &ip_hdr(skb)->saddr, AF_INET); |
---|
| 745 | + /* sdif set, means packet ingressed via a device |
---|
| 746 | + * in an L3 domain and dif is set to it. |
---|
| 747 | + */ |
---|
| 748 | + l3index = sdif ? dif : 0; |
---|
| 749 | + addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr; |
---|
| 750 | + key = tcp_md5_do_lookup(sk1, l3index, addr, AF_INET); |
---|
728 | 751 | if (!key) |
---|
729 | 752 | goto out; |
---|
730 | 753 | |
---|
.. | .. |
---|
771 | 794 | arg.tos = ip_hdr(skb)->tos; |
---|
772 | 795 | arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL); |
---|
773 | 796 | local_bh_disable(); |
---|
774 | | - ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk); |
---|
775 | | - if (sk) |
---|
| 797 | + ctl_sk = this_cpu_read(*net->ipv4.tcp_sk); |
---|
| 798 | + if (sk) { |
---|
776 | 799 | ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ? |
---|
777 | 800 | inet_twsk(sk)->tw_mark : sk->sk_mark; |
---|
| 801 | + ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ? |
---|
| 802 | + inet_twsk(sk)->tw_priority : sk->sk_priority; |
---|
| 803 | + transmit_time = tcp_transmit_time(sk); |
---|
| 804 | + } |
---|
778 | 805 | ip_send_unicast_reply(ctl_sk, |
---|
779 | 806 | skb, &TCP_SKB_CB(skb)->header.h4.opt, |
---|
780 | 807 | ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, |
---|
781 | | - &arg, arg.iov[0].iov_len); |
---|
| 808 | + &arg, arg.iov[0].iov_len, |
---|
| 809 | + transmit_time); |
---|
782 | 810 | |
---|
783 | 811 | ctl_sk->sk_mark = 0; |
---|
784 | 812 | __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); |
---|
.. | .. |
---|
813 | 841 | struct net *net = sock_net(sk); |
---|
814 | 842 | struct ip_reply_arg arg; |
---|
815 | 843 | struct sock *ctl_sk; |
---|
| 844 | + u64 transmit_time; |
---|
816 | 845 | |
---|
817 | 846 | memset(&rep.th, 0, sizeof(struct tcphdr)); |
---|
818 | 847 | memset(&arg, 0, sizeof(arg)); |
---|
.. | .. |
---|
863 | 892 | arg.tos = tos; |
---|
864 | 893 | arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL); |
---|
865 | 894 | local_bh_disable(); |
---|
866 | | - ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk); |
---|
867 | | - if (sk) |
---|
868 | | - ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ? |
---|
869 | | - inet_twsk(sk)->tw_mark : sk->sk_mark; |
---|
| 895 | + ctl_sk = this_cpu_read(*net->ipv4.tcp_sk); |
---|
| 896 | + ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ? |
---|
| 897 | + inet_twsk(sk)->tw_mark : sk->sk_mark; |
---|
| 898 | + ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ? |
---|
| 899 | + inet_twsk(sk)->tw_priority : sk->sk_priority; |
---|
| 900 | + transmit_time = tcp_transmit_time(sk); |
---|
870 | 901 | ip_send_unicast_reply(ctl_sk, |
---|
871 | 902 | skb, &TCP_SKB_CB(skb)->header.h4.opt, |
---|
872 | 903 | ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, |
---|
873 | | - &arg, arg.iov[0].iov_len); |
---|
| 904 | + &arg, arg.iov[0].iov_len, |
---|
| 905 | + transmit_time); |
---|
874 | 906 | |
---|
875 | 907 | ctl_sk->sk_mark = 0; |
---|
876 | 908 | __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); |
---|
.. | .. |
---|
899 | 931 | static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, |
---|
900 | 932 | struct request_sock *req) |
---|
901 | 933 | { |
---|
| 934 | + const union tcp_md5_addr *addr; |
---|
| 935 | + int l3index; |
---|
| 936 | + |
---|
902 | 937 | /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV |
---|
903 | 938 | * sk->sk_state == TCP_SYN_RECV -> for Fast Open. |
---|
904 | 939 | */ |
---|
.. | .. |
---|
910 | 945 | * exception of <SYN> segments, MUST be right-shifted by |
---|
911 | 946 | * Rcv.Wind.Shift bits: |
---|
912 | 947 | */ |
---|
| 948 | + addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr; |
---|
| 949 | + l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0; |
---|
913 | 950 | tcp_v4_send_ack(sk, skb, seq, |
---|
914 | 951 | tcp_rsk(req)->rcv_nxt, |
---|
915 | 952 | req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, |
---|
916 | 953 | tcp_time_stamp_raw() + tcp_rsk(req)->ts_off, |
---|
917 | | - req->ts_recent, |
---|
| 954 | + READ_ONCE(req->ts_recent), |
---|
918 | 955 | 0, |
---|
919 | | - tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr, |
---|
920 | | - AF_INET), |
---|
| 956 | + tcp_md5_do_lookup(sk, l3index, addr, AF_INET), |
---|
921 | 957 | inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0, |
---|
922 | 958 | ip_hdr(skb)->tos); |
---|
923 | 959 | } |
---|
.. | .. |
---|
931 | 967 | struct flowi *fl, |
---|
932 | 968 | struct request_sock *req, |
---|
933 | 969 | struct tcp_fastopen_cookie *foc, |
---|
934 | | - enum tcp_synack_type synack_type) |
---|
| 970 | + enum tcp_synack_type synack_type, |
---|
| 971 | + struct sk_buff *syn_skb) |
---|
935 | 972 | { |
---|
936 | 973 | const struct inet_request_sock *ireq = inet_rsk(req); |
---|
937 | 974 | struct flowi4 fl4; |
---|
938 | 975 | int err = -1; |
---|
939 | 976 | struct sk_buff *skb; |
---|
| 977 | + u8 tos; |
---|
940 | 978 | |
---|
941 | 979 | /* First, grab a route. */ |
---|
942 | 980 | if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) |
---|
943 | 981 | return -1; |
---|
944 | 982 | |
---|
945 | | - skb = tcp_make_synack(sk, dst, req, foc, synack_type); |
---|
| 983 | + skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb); |
---|
946 | 984 | |
---|
947 | 985 | if (skb) { |
---|
948 | 986 | __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr); |
---|
949 | 987 | |
---|
| 988 | + tos = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ? |
---|
| 989 | + (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) | |
---|
| 990 | + (inet_sk(sk)->tos & INET_ECN_MASK) : |
---|
| 991 | + inet_sk(sk)->tos; |
---|
| 992 | + |
---|
| 993 | + if (!INET_ECN_is_capable(tos) && |
---|
| 994 | + tcp_bpf_ca_needs_ecn((struct sock *)req)) |
---|
| 995 | + tos |= INET_ECN_ECT_0; |
---|
| 996 | + |
---|
950 | 997 | rcu_read_lock(); |
---|
951 | 998 | err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, |
---|
952 | 999 | ireq->ir_rmt_addr, |
---|
953 | | - rcu_dereference(ireq->ireq_opt)); |
---|
| 1000 | + rcu_dereference(ireq->ireq_opt), |
---|
| 1001 | + tos); |
---|
954 | 1002 | rcu_read_unlock(); |
---|
955 | 1003 | err = net_xmit_eval(err); |
---|
956 | 1004 | } |
---|
.. | .. |
---|
973 | 1021 | * We need to maintain these in the sk structure. |
---|
974 | 1022 | */ |
---|
975 | 1023 | |
---|
| 1024 | +DEFINE_STATIC_KEY_FALSE(tcp_md5_needed); |
---|
| 1025 | +EXPORT_SYMBOL(tcp_md5_needed); |
---|
| 1026 | + |
---|
| 1027 | +static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new) |
---|
| 1028 | +{ |
---|
| 1029 | + if (!old) |
---|
| 1030 | + return true; |
---|
| 1031 | + |
---|
| 1032 | + /* l3index always overrides non-l3index */ |
---|
| 1033 | + if (old->l3index && new->l3index == 0) |
---|
| 1034 | + return false; |
---|
| 1035 | + if (old->l3index == 0 && new->l3index) |
---|
| 1036 | + return true; |
---|
| 1037 | + |
---|
| 1038 | + return old->prefixlen < new->prefixlen; |
---|
| 1039 | +} |
---|
| 1040 | + |
---|
976 | 1041 | /* Find the Key structure for an address. */ |
---|
977 | | -struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk, |
---|
978 | | - const union tcp_md5_addr *addr, |
---|
979 | | - int family) |
---|
| 1042 | +struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index, |
---|
| 1043 | + const union tcp_md5_addr *addr, |
---|
| 1044 | + int family) |
---|
980 | 1045 | { |
---|
981 | 1046 | const struct tcp_sock *tp = tcp_sk(sk); |
---|
982 | 1047 | struct tcp_md5sig_key *key; |
---|
.. | .. |
---|
991 | 1056 | if (!md5sig) |
---|
992 | 1057 | return NULL; |
---|
993 | 1058 | |
---|
994 | | - hlist_for_each_entry_rcu(key, &md5sig->head, node) { |
---|
| 1059 | + hlist_for_each_entry_rcu(key, &md5sig->head, node, |
---|
| 1060 | + lockdep_sock_is_held(sk)) { |
---|
995 | 1061 | if (key->family != family) |
---|
996 | 1062 | continue; |
---|
997 | | - |
---|
| 1063 | + if (key->l3index && key->l3index != l3index) |
---|
| 1064 | + continue; |
---|
998 | 1065 | if (family == AF_INET) { |
---|
999 | 1066 | mask = inet_make_mask(key->prefixlen); |
---|
1000 | 1067 | match = (key->addr.a4.s_addr & mask) == |
---|
.. | .. |
---|
1008 | 1075 | match = false; |
---|
1009 | 1076 | } |
---|
1010 | 1077 | |
---|
1011 | | - if (match && (!best_match || |
---|
1012 | | - key->prefixlen > best_match->prefixlen)) |
---|
| 1078 | + if (match && better_md5_match(best_match, key)) |
---|
1013 | 1079 | best_match = key; |
---|
1014 | 1080 | } |
---|
1015 | 1081 | return best_match; |
---|
1016 | 1082 | } |
---|
1017 | | -EXPORT_SYMBOL(tcp_md5_do_lookup); |
---|
| 1083 | +EXPORT_SYMBOL(__tcp_md5_do_lookup); |
---|
1018 | 1084 | |
---|
1019 | 1085 | static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk, |
---|
1020 | 1086 | const union tcp_md5_addr *addr, |
---|
1021 | | - int family, u8 prefixlen) |
---|
| 1087 | + int family, u8 prefixlen, |
---|
| 1088 | + int l3index) |
---|
1022 | 1089 | { |
---|
1023 | 1090 | const struct tcp_sock *tp = tcp_sk(sk); |
---|
1024 | 1091 | struct tcp_md5sig_key *key; |
---|
.. | .. |
---|
1034 | 1101 | if (family == AF_INET6) |
---|
1035 | 1102 | size = sizeof(struct in6_addr); |
---|
1036 | 1103 | #endif |
---|
1037 | | - hlist_for_each_entry_rcu(key, &md5sig->head, node) { |
---|
| 1104 | + hlist_for_each_entry_rcu(key, &md5sig->head, node, |
---|
| 1105 | + lockdep_sock_is_held(sk)) { |
---|
1038 | 1106 | if (key->family != family) |
---|
| 1107 | + continue; |
---|
| 1108 | + if (key->l3index != l3index) |
---|
1039 | 1109 | continue; |
---|
1040 | 1110 | if (!memcmp(&key->addr, addr, size) && |
---|
1041 | 1111 | key->prefixlen == prefixlen) |
---|
.. | .. |
---|
1048 | 1118 | const struct sock *addr_sk) |
---|
1049 | 1119 | { |
---|
1050 | 1120 | const union tcp_md5_addr *addr; |
---|
| 1121 | + int l3index; |
---|
1051 | 1122 | |
---|
| 1123 | + l3index = l3mdev_master_ifindex_by_index(sock_net(sk), |
---|
| 1124 | + addr_sk->sk_bound_dev_if); |
---|
1052 | 1125 | addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr; |
---|
1053 | | - return tcp_md5_do_lookup(sk, addr, AF_INET); |
---|
| 1126 | + return tcp_md5_do_lookup(sk, l3index, addr, AF_INET); |
---|
1054 | 1127 | } |
---|
1055 | 1128 | EXPORT_SYMBOL(tcp_v4_md5_lookup); |
---|
1056 | 1129 | |
---|
1057 | 1130 | /* This can be called on a newly created socket, from other files */ |
---|
1058 | 1131 | int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, |
---|
1059 | | - int family, u8 prefixlen, const u8 *newkey, u8 newkeylen, |
---|
1060 | | - gfp_t gfp) |
---|
| 1132 | + int family, u8 prefixlen, int l3index, |
---|
| 1133 | + const u8 *newkey, u8 newkeylen, gfp_t gfp) |
---|
1061 | 1134 | { |
---|
1062 | 1135 | /* Add Key to the list */ |
---|
1063 | 1136 | struct tcp_md5sig_key *key; |
---|
1064 | 1137 | struct tcp_sock *tp = tcp_sk(sk); |
---|
1065 | 1138 | struct tcp_md5sig_info *md5sig; |
---|
1066 | 1139 | |
---|
1067 | | - key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen); |
---|
| 1140 | + key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index); |
---|
1068 | 1141 | if (key) { |
---|
1069 | 1142 | /* Pre-existing entry - just update that one. |
---|
1070 | 1143 | * Note that the key might be used concurrently. |
---|
| 1144 | + * data_race() is telling kcsan that we do not care of |
---|
| 1145 | + * key mismatches, since changing MD5 key on live flows |
---|
| 1146 | + * can lead to packet drops. |
---|
1071 | 1147 | */ |
---|
1072 | | - memcpy(key->key, newkey, newkeylen); |
---|
| 1148 | + data_race(memcpy(key->key, newkey, newkeylen)); |
---|
1073 | 1149 | |
---|
1074 | 1150 | /* Pairs with READ_ONCE() in tcp_md5_hash_key(). |
---|
1075 | 1151 | * Also note that a reader could catch new key->keylen value |
---|
.. | .. |
---|
1105 | 1181 | key->keylen = newkeylen; |
---|
1106 | 1182 | key->family = family; |
---|
1107 | 1183 | key->prefixlen = prefixlen; |
---|
| 1184 | + key->l3index = l3index; |
---|
1108 | 1185 | memcpy(&key->addr, addr, |
---|
1109 | 1186 | (family == AF_INET6) ? sizeof(struct in6_addr) : |
---|
1110 | 1187 | sizeof(struct in_addr)); |
---|
.. | .. |
---|
1114 | 1191 | EXPORT_SYMBOL(tcp_md5_do_add); |
---|
1115 | 1192 | |
---|
1116 | 1193 | int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family, |
---|
1117 | | - u8 prefixlen) |
---|
| 1194 | + u8 prefixlen, int l3index) |
---|
1118 | 1195 | { |
---|
1119 | 1196 | struct tcp_md5sig_key *key; |
---|
1120 | 1197 | |
---|
1121 | | - key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen); |
---|
| 1198 | + key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index); |
---|
1122 | 1199 | if (!key) |
---|
1123 | 1200 | return -ENOENT; |
---|
1124 | 1201 | hlist_del_rcu(&key->node); |
---|
.. | .. |
---|
1145 | 1222 | } |
---|
1146 | 1223 | |
---|
1147 | 1224 | static int tcp_v4_parse_md5_keys(struct sock *sk, int optname, |
---|
1148 | | - char __user *optval, int optlen) |
---|
| 1225 | + sockptr_t optval, int optlen) |
---|
1149 | 1226 | { |
---|
1150 | 1227 | struct tcp_md5sig cmd; |
---|
1151 | 1228 | struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr; |
---|
| 1229 | + const union tcp_md5_addr *addr; |
---|
1152 | 1230 | u8 prefixlen = 32; |
---|
| 1231 | + int l3index = 0; |
---|
1153 | 1232 | |
---|
1154 | 1233 | if (optlen < sizeof(cmd)) |
---|
1155 | 1234 | return -EINVAL; |
---|
1156 | 1235 | |
---|
1157 | | - if (copy_from_user(&cmd, optval, sizeof(cmd))) |
---|
| 1236 | + if (copy_from_sockptr(&cmd, optval, sizeof(cmd))) |
---|
1158 | 1237 | return -EFAULT; |
---|
1159 | 1238 | |
---|
1160 | 1239 | if (sin->sin_family != AF_INET) |
---|
.. | .. |
---|
1167 | 1246 | return -EINVAL; |
---|
1168 | 1247 | } |
---|
1169 | 1248 | |
---|
| 1249 | + if (optname == TCP_MD5SIG_EXT && |
---|
| 1250 | + cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) { |
---|
| 1251 | + struct net_device *dev; |
---|
| 1252 | + |
---|
| 1253 | + rcu_read_lock(); |
---|
| 1254 | + dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex); |
---|
| 1255 | + if (dev && netif_is_l3_master(dev)) |
---|
| 1256 | + l3index = dev->ifindex; |
---|
| 1257 | + |
---|
| 1258 | + rcu_read_unlock(); |
---|
| 1259 | + |
---|
| 1260 | + /* ok to reference set/not set outside of rcu; |
---|
| 1261 | + * right now device MUST be an L3 master |
---|
| 1262 | + */ |
---|
| 1263 | + if (!dev || !l3index) |
---|
| 1264 | + return -EINVAL; |
---|
| 1265 | + } |
---|
| 1266 | + |
---|
| 1267 | + addr = (union tcp_md5_addr *)&sin->sin_addr.s_addr; |
---|
| 1268 | + |
---|
1170 | 1269 | if (!cmd.tcpm_keylen) |
---|
1171 | | - return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr, |
---|
1172 | | - AF_INET, prefixlen); |
---|
| 1270 | + return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index); |
---|
1173 | 1271 | |
---|
1174 | 1272 | if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) |
---|
1175 | 1273 | return -EINVAL; |
---|
1176 | 1274 | |
---|
1177 | | - return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr, |
---|
1178 | | - AF_INET, prefixlen, cmd.tcpm_key, cmd.tcpm_keylen, |
---|
1179 | | - GFP_KERNEL); |
---|
| 1275 | + return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index, |
---|
| 1276 | + cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); |
---|
1180 | 1277 | } |
---|
1181 | 1278 | |
---|
1182 | 1279 | static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp, |
---|
.. | .. |
---|
1286 | 1383 | |
---|
1287 | 1384 | /* Called with rcu_read_lock() */ |
---|
1288 | 1385 | static bool tcp_v4_inbound_md5_hash(const struct sock *sk, |
---|
1289 | | - const struct sk_buff *skb) |
---|
| 1386 | + const struct sk_buff *skb, |
---|
| 1387 | + int dif, int sdif) |
---|
1290 | 1388 | { |
---|
1291 | 1389 | #ifdef CONFIG_TCP_MD5SIG |
---|
1292 | 1390 | /* |
---|
.. | .. |
---|
1301 | 1399 | struct tcp_md5sig_key *hash_expected; |
---|
1302 | 1400 | const struct iphdr *iph = ip_hdr(skb); |
---|
1303 | 1401 | const struct tcphdr *th = tcp_hdr(skb); |
---|
1304 | | - int genhash; |
---|
| 1402 | + const union tcp_md5_addr *addr; |
---|
1305 | 1403 | unsigned char newhash[16]; |
---|
| 1404 | + int genhash, l3index; |
---|
1306 | 1405 | |
---|
1307 | | - hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr, |
---|
1308 | | - AF_INET); |
---|
| 1406 | + /* sdif set, means packet ingressed via a device |
---|
| 1407 | + * in an L3 domain and dif is set to the l3mdev |
---|
| 1408 | + */ |
---|
| 1409 | + l3index = sdif ? dif : 0; |
---|
| 1410 | + |
---|
| 1411 | + addr = (union tcp_md5_addr *)&iph->saddr; |
---|
| 1412 | + hash_expected = tcp_md5_do_lookup(sk, l3index, addr, AF_INET); |
---|
1309 | 1413 | hash_location = tcp_parse_md5sig_option(th); |
---|
1310 | 1414 | |
---|
1311 | 1415 | /* We've parsed the options - do we have a hash? */ |
---|
.. | .. |
---|
1331 | 1435 | |
---|
1332 | 1436 | if (genhash || memcmp(hash_location, newhash, 16) != 0) { |
---|
1333 | 1437 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); |
---|
1334 | | - net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n", |
---|
| 1438 | + net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n", |
---|
1335 | 1439 | &iph->saddr, ntohs(th->source), |
---|
1336 | 1440 | &iph->daddr, ntohs(th->dest), |
---|
1337 | 1441 | genhash ? " tcp_v4_calc_md5_hash failed" |
---|
1338 | | - : ""); |
---|
| 1442 | + : "", l3index); |
---|
1339 | 1443 | return true; |
---|
1340 | 1444 | } |
---|
1341 | 1445 | return false; |
---|
.. | .. |
---|
1372 | 1476 | .syn_ack_timeout = tcp_syn_ack_timeout, |
---|
1373 | 1477 | }; |
---|
1374 | 1478 | |
---|
1375 | | -static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { |
---|
| 1479 | +const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { |
---|
1376 | 1480 | .mss_clamp = TCP_MSS_DEFAULT, |
---|
1377 | 1481 | #ifdef CONFIG_TCP_MD5SIG |
---|
1378 | 1482 | .req_md5_lookup = tcp_v4_md5_lookup, |
---|
.. | .. |
---|
1415 | 1519 | bool *own_req) |
---|
1416 | 1520 | { |
---|
1417 | 1521 | struct inet_request_sock *ireq; |
---|
| 1522 | + bool found_dup_sk = false; |
---|
1418 | 1523 | struct inet_sock *newinet; |
---|
1419 | 1524 | struct tcp_sock *newtp; |
---|
1420 | 1525 | struct sock *newsk; |
---|
1421 | 1526 | #ifdef CONFIG_TCP_MD5SIG |
---|
| 1527 | + const union tcp_md5_addr *addr; |
---|
1422 | 1528 | struct tcp_md5sig_key *key; |
---|
| 1529 | + int l3index; |
---|
1423 | 1530 | #endif |
---|
1424 | 1531 | struct ip_options_rcu *inet_opt; |
---|
1425 | 1532 | |
---|
.. | .. |
---|
1450 | 1557 | inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; |
---|
1451 | 1558 | newinet->inet_id = prandom_u32(); |
---|
1452 | 1559 | |
---|
| 1560 | + /* Set ToS of the new socket based upon the value of incoming SYN. |
---|
| 1561 | + * ECT bits are set later in tcp_init_transfer(). |
---|
| 1562 | + */ |
---|
| 1563 | + if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)) |
---|
| 1564 | + newinet->tos = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK; |
---|
| 1565 | + |
---|
1453 | 1566 | if (!dst) { |
---|
1454 | 1567 | dst = inet_csk_route_child_sock(sk, newsk, req); |
---|
1455 | 1568 | if (!dst) |
---|
.. | .. |
---|
1467 | 1580 | tcp_initialize_rcv_mss(newsk); |
---|
1468 | 1581 | |
---|
1469 | 1582 | #ifdef CONFIG_TCP_MD5SIG |
---|
| 1583 | + l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif); |
---|
1470 | 1584 | /* Copy over the MD5 key from the original socket */ |
---|
1471 | | - key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr, |
---|
1472 | | - AF_INET); |
---|
| 1585 | + addr = (union tcp_md5_addr *)&newinet->inet_daddr; |
---|
| 1586 | + key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET); |
---|
1473 | 1587 | if (key) { |
---|
1474 | 1588 | /* |
---|
1475 | 1589 | * We're using one, so create a matching key |
---|
.. | .. |
---|
1477 | 1591 | * memory, then we end up not copying the key |
---|
1478 | 1592 | * across. Shucks. |
---|
1479 | 1593 | */ |
---|
1480 | | - tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr, |
---|
1481 | | - AF_INET, 32, key->key, key->keylen, GFP_ATOMIC); |
---|
| 1594 | + tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index, |
---|
| 1595 | + key->key, key->keylen, GFP_ATOMIC); |
---|
1482 | 1596 | sk_nocaps_add(newsk, NETIF_F_GSO_MASK); |
---|
1483 | 1597 | } |
---|
1484 | 1598 | #endif |
---|
1485 | 1599 | |
---|
1486 | 1600 | if (__inet_inherit_port(sk, newsk) < 0) |
---|
1487 | 1601 | goto put_and_exit; |
---|
1488 | | - *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); |
---|
| 1602 | + *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), |
---|
| 1603 | + &found_dup_sk); |
---|
1489 | 1604 | if (likely(*own_req)) { |
---|
1490 | 1605 | tcp_move_syn(newtp, req); |
---|
1491 | 1606 | ireq->ireq_opt = NULL; |
---|
1492 | 1607 | } else { |
---|
1493 | 1608 | newinet->inet_opt = NULL; |
---|
| 1609 | + |
---|
| 1610 | + if (!req_unhash && found_dup_sk) { |
---|
| 1611 | + /* This code path should only be executed in the |
---|
| 1612 | + * syncookie case only |
---|
| 1613 | + */ |
---|
| 1614 | + bh_unlock_sock(newsk); |
---|
| 1615 | + sock_put(newsk); |
---|
| 1616 | + newsk = NULL; |
---|
| 1617 | + } |
---|
1494 | 1618 | } |
---|
1495 | 1619 | return newsk; |
---|
1496 | 1620 | |
---|
.. | .. |
---|
1520 | 1644 | return sk; |
---|
1521 | 1645 | } |
---|
1522 | 1646 | |
---|
| 1647 | +u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph, |
---|
| 1648 | + struct tcphdr *th, u32 *cookie) |
---|
| 1649 | +{ |
---|
| 1650 | + u16 mss = 0; |
---|
| 1651 | +#ifdef CONFIG_SYN_COOKIES |
---|
| 1652 | + mss = tcp_get_syncookie_mss(&tcp_request_sock_ops, |
---|
| 1653 | + &tcp_request_sock_ipv4_ops, sk, th); |
---|
| 1654 | + if (mss) { |
---|
| 1655 | + *cookie = __cookie_v4_init_sequence(iph, th, &mss); |
---|
| 1656 | + tcp_synq_overflow(sk); |
---|
| 1657 | + } |
---|
| 1658 | +#endif |
---|
| 1659 | + return mss; |
---|
| 1660 | +} |
---|
| 1661 | + |
---|
1523 | 1662 | /* The socket must have it's spinlock held when we get |
---|
1524 | 1663 | * here, unless it is a TCP_LISTEN socket. |
---|
1525 | 1664 | * |
---|
.. | .. |
---|
1533 | 1672 | struct sock *rsk; |
---|
1534 | 1673 | |
---|
1535 | 1674 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ |
---|
1536 | | - struct dst_entry *dst = sk->sk_rx_dst; |
---|
| 1675 | + struct dst_entry *dst; |
---|
| 1676 | + |
---|
| 1677 | + dst = rcu_dereference_protected(sk->sk_rx_dst, |
---|
| 1678 | + lockdep_sock_is_held(sk)); |
---|
1537 | 1679 | |
---|
1538 | 1680 | sock_rps_save_rxhash(sk, skb); |
---|
1539 | 1681 | sk_mark_napi_id(sk, skb); |
---|
1540 | 1682 | if (dst) { |
---|
1541 | 1683 | if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || |
---|
1542 | 1684 | !dst->ops->check(dst, 0)) { |
---|
| 1685 | + RCU_INIT_POINTER(sk->sk_rx_dst, NULL); |
---|
1543 | 1686 | dst_release(dst); |
---|
1544 | | - sk->sk_rx_dst = NULL; |
---|
1545 | 1687 | } |
---|
1546 | 1688 | } |
---|
1547 | 1689 | tcp_rcv_established(sk, skb); |
---|
.. | .. |
---|
1616 | 1758 | skb->sk = sk; |
---|
1617 | 1759 | skb->destructor = sock_edemux; |
---|
1618 | 1760 | if (sk_fullsock(sk)) { |
---|
1619 | | - struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); |
---|
| 1761 | + struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst); |
---|
1620 | 1762 | |
---|
1621 | 1763 | if (dst) |
---|
1622 | 1764 | dst = dst_check(dst, 0); |
---|
.. | .. |
---|
1630 | 1772 | |
---|
1631 | 1773 | bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) |
---|
1632 | 1774 | { |
---|
1633 | | - u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf; |
---|
1634 | | - |
---|
1635 | | - /* Only socket owner can try to collapse/prune rx queues |
---|
1636 | | - * to reduce memory overhead, so add a little headroom here. |
---|
1637 | | - * Few sockets backlog are possibly concurrently non empty. |
---|
1638 | | - */ |
---|
1639 | | - limit += 64*1024; |
---|
| 1775 | + u32 limit, tail_gso_size, tail_gso_segs; |
---|
| 1776 | + struct skb_shared_info *shinfo; |
---|
| 1777 | + const struct tcphdr *th; |
---|
| 1778 | + struct tcphdr *thtail; |
---|
| 1779 | + struct sk_buff *tail; |
---|
| 1780 | + unsigned int hdrlen; |
---|
| 1781 | + bool fragstolen; |
---|
| 1782 | + u32 gso_segs; |
---|
| 1783 | + u32 gso_size; |
---|
| 1784 | + int delta; |
---|
1640 | 1785 | |
---|
1641 | 1786 | /* In case all data was pulled from skb frags (in __pskb_pull_tail()), |
---|
1642 | 1787 | * we can fix skb->truesize to its real value to avoid future drops. |
---|
.. | .. |
---|
1645 | 1790 | * (if cooked by drivers without copybreak feature). |
---|
1646 | 1791 | */ |
---|
1647 | 1792 | skb_condense(skb); |
---|
| 1793 | + |
---|
| 1794 | + skb_dst_drop(skb); |
---|
| 1795 | + |
---|
| 1796 | + if (unlikely(tcp_checksum_complete(skb))) { |
---|
| 1797 | + bh_unlock_sock(sk); |
---|
| 1798 | + __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); |
---|
| 1799 | + __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); |
---|
| 1800 | + return true; |
---|
| 1801 | + } |
---|
| 1802 | + |
---|
| 1803 | + /* Attempt coalescing to last skb in backlog, even if we are |
---|
| 1804 | + * above the limits. |
---|
| 1805 | + * This is okay because skb capacity is limited to MAX_SKB_FRAGS. |
---|
| 1806 | + */ |
---|
| 1807 | + th = (const struct tcphdr *)skb->data; |
---|
| 1808 | + hdrlen = th->doff * 4; |
---|
| 1809 | + |
---|
| 1810 | + tail = sk->sk_backlog.tail; |
---|
| 1811 | + if (!tail) |
---|
| 1812 | + goto no_coalesce; |
---|
| 1813 | + thtail = (struct tcphdr *)tail->data; |
---|
| 1814 | + |
---|
| 1815 | + if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq || |
---|
| 1816 | + TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield || |
---|
| 1817 | + ((TCP_SKB_CB(tail)->tcp_flags | |
---|
| 1818 | + TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) || |
---|
| 1819 | + !((TCP_SKB_CB(tail)->tcp_flags & |
---|
| 1820 | + TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) || |
---|
| 1821 | + ((TCP_SKB_CB(tail)->tcp_flags ^ |
---|
| 1822 | + TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) || |
---|
| 1823 | +#ifdef CONFIG_TLS_DEVICE |
---|
| 1824 | + tail->decrypted != skb->decrypted || |
---|
| 1825 | +#endif |
---|
| 1826 | + thtail->doff != th->doff || |
---|
| 1827 | + memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th))) |
---|
| 1828 | + goto no_coalesce; |
---|
| 1829 | + |
---|
| 1830 | + __skb_pull(skb, hdrlen); |
---|
| 1831 | + |
---|
| 1832 | + shinfo = skb_shinfo(skb); |
---|
| 1833 | + gso_size = shinfo->gso_size ?: skb->len; |
---|
| 1834 | + gso_segs = shinfo->gso_segs ?: 1; |
---|
| 1835 | + |
---|
| 1836 | + shinfo = skb_shinfo(tail); |
---|
| 1837 | + tail_gso_size = shinfo->gso_size ?: (tail->len - hdrlen); |
---|
| 1838 | + tail_gso_segs = shinfo->gso_segs ?: 1; |
---|
| 1839 | + |
---|
| 1840 | + if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) { |
---|
| 1841 | + TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq; |
---|
| 1842 | + |
---|
| 1843 | + if (likely(!before(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))) { |
---|
| 1844 | + TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq; |
---|
| 1845 | + thtail->window = th->window; |
---|
| 1846 | + } |
---|
| 1847 | + |
---|
| 1848 | + /* We have to update both TCP_SKB_CB(tail)->tcp_flags and |
---|
| 1849 | + * thtail->fin, so that the fast path in tcp_rcv_established() |
---|
| 1850 | + * is not entered if we append a packet with a FIN. |
---|
| 1851 | + * SYN, RST, URG are not present. |
---|
| 1852 | + * ACK is set on both packets. |
---|
| 1853 | + * PSH : we do not really care in TCP stack, |
---|
| 1854 | + * at least for 'GRO' packets. |
---|
| 1855 | + */ |
---|
| 1856 | + thtail->fin |= th->fin; |
---|
| 1857 | + TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; |
---|
| 1858 | + |
---|
| 1859 | + if (TCP_SKB_CB(skb)->has_rxtstamp) { |
---|
| 1860 | + TCP_SKB_CB(tail)->has_rxtstamp = true; |
---|
| 1861 | + tail->tstamp = skb->tstamp; |
---|
| 1862 | + skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp; |
---|
| 1863 | + } |
---|
| 1864 | + |
---|
| 1865 | + /* Not as strict as GRO. We only need to carry mss max value */ |
---|
| 1866 | + shinfo->gso_size = max(gso_size, tail_gso_size); |
---|
| 1867 | + shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF); |
---|
| 1868 | + |
---|
| 1869 | + sk->sk_backlog.len += delta; |
---|
| 1870 | + __NET_INC_STATS(sock_net(sk), |
---|
| 1871 | + LINUX_MIB_TCPBACKLOGCOALESCE); |
---|
| 1872 | + kfree_skb_partial(skb, fragstolen); |
---|
| 1873 | + return false; |
---|
| 1874 | + } |
---|
| 1875 | + __skb_push(skb, hdrlen); |
---|
| 1876 | + |
---|
| 1877 | +no_coalesce: |
---|
| 1878 | + limit = (u32)READ_ONCE(sk->sk_rcvbuf) + (u32)(READ_ONCE(sk->sk_sndbuf) >> 1); |
---|
| 1879 | + |
---|
| 1880 | + /* Only socket owner can try to collapse/prune rx queues |
---|
| 1881 | + * to reduce memory overhead, so add a little headroom here. |
---|
| 1882 | + * Few sockets backlog are possibly concurrently non empty. |
---|
| 1883 | + */ |
---|
| 1884 | + limit += 64 * 1024; |
---|
1648 | 1885 | |
---|
1649 | 1886 | if (unlikely(sk_add_backlog(sk, skb, limit))) { |
---|
1650 | 1887 | bh_unlock_sock(sk); |
---|
.. | .. |
---|
1698 | 1935 | int tcp_v4_rcv(struct sk_buff *skb) |
---|
1699 | 1936 | { |
---|
1700 | 1937 | struct net *net = dev_net(skb->dev); |
---|
| 1938 | + struct sk_buff *skb_to_free; |
---|
1701 | 1939 | int sdif = inet_sdif(skb); |
---|
| 1940 | + int dif = inet_iif(skb); |
---|
1702 | 1941 | const struct iphdr *iph; |
---|
1703 | 1942 | const struct tcphdr *th; |
---|
1704 | 1943 | bool refcounted; |
---|
.. | .. |
---|
1747 | 1986 | struct sock *nsk; |
---|
1748 | 1987 | |
---|
1749 | 1988 | sk = req->rsk_listener; |
---|
1750 | | - if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) { |
---|
| 1989 | + if (unlikely(!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb) || |
---|
| 1990 | + tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))) { |
---|
1751 | 1991 | sk_drops_add(sk, skb); |
---|
1752 | 1992 | reqsk_put(req); |
---|
1753 | 1993 | goto discard_it; |
---|
.. | .. |
---|
1786 | 2026 | } |
---|
1787 | 2027 | goto discard_and_relse; |
---|
1788 | 2028 | } |
---|
| 2029 | + nf_reset_ct(skb); |
---|
1789 | 2030 | if (nsk == sk) { |
---|
1790 | 2031 | reqsk_put(req); |
---|
1791 | 2032 | tcp_v4_restore_cb(skb); |
---|
.. | .. |
---|
1805 | 2046 | if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) |
---|
1806 | 2047 | goto discard_and_relse; |
---|
1807 | 2048 | |
---|
1808 | | - if (tcp_v4_inbound_md5_hash(sk, skb)) |
---|
| 2049 | + if (tcp_v4_inbound_md5_hash(sk, skb, dif, sdif)) |
---|
1809 | 2050 | goto discard_and_relse; |
---|
1810 | 2051 | |
---|
1811 | | - nf_reset(skb); |
---|
| 2052 | + nf_reset_ct(skb); |
---|
1812 | 2053 | |
---|
1813 | 2054 | if (tcp_filter(sk, skb)) |
---|
1814 | 2055 | goto discard_and_relse; |
---|
.. | .. |
---|
1829 | 2070 | tcp_segs_in(tcp_sk(sk), skb); |
---|
1830 | 2071 | ret = 0; |
---|
1831 | 2072 | if (!sock_owned_by_user(sk)) { |
---|
| 2073 | + skb_to_free = sk->sk_rx_skb_cache; |
---|
| 2074 | + sk->sk_rx_skb_cache = NULL; |
---|
1832 | 2075 | ret = tcp_v4_do_rcv(sk, skb); |
---|
1833 | | - } else if (tcp_add_backlog(sk, skb)) { |
---|
1834 | | - goto discard_and_relse; |
---|
| 2076 | + } else { |
---|
| 2077 | + if (tcp_add_backlog(sk, skb)) |
---|
| 2078 | + goto discard_and_relse; |
---|
| 2079 | + skb_to_free = NULL; |
---|
1835 | 2080 | } |
---|
1836 | 2081 | bh_unlock_sock(sk); |
---|
| 2082 | + if (skb_to_free) |
---|
| 2083 | + __kfree_skb(skb_to_free); |
---|
1837 | 2084 | |
---|
1838 | 2085 | put_and_return: |
---|
1839 | 2086 | if (refcounted) |
---|
.. | .. |
---|
1897 | 2144 | } |
---|
1898 | 2145 | } |
---|
1899 | 2146 | /* to ACK */ |
---|
1900 | | - /* fall through */ |
---|
| 2147 | + fallthrough; |
---|
1901 | 2148 | case TCP_TW_ACK: |
---|
1902 | 2149 | tcp_v4_timewait_ack(sk, skb); |
---|
1903 | 2150 | break; |
---|
.. | .. |
---|
1921 | 2168 | struct dst_entry *dst = skb_dst(skb); |
---|
1922 | 2169 | |
---|
1923 | 2170 | if (dst && dst_hold_safe(dst)) { |
---|
1924 | | - sk->sk_rx_dst = dst; |
---|
| 2171 | + rcu_assign_pointer(sk->sk_rx_dst, dst); |
---|
1925 | 2172 | inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; |
---|
1926 | 2173 | } |
---|
1927 | 2174 | } |
---|
.. | .. |
---|
1939 | 2186 | .getsockopt = ip_getsockopt, |
---|
1940 | 2187 | .addr2sockaddr = inet_csk_addr2sockaddr, |
---|
1941 | 2188 | .sockaddr_len = sizeof(struct sockaddr_in), |
---|
1942 | | -#ifdef CONFIG_COMPAT |
---|
1943 | | - .compat_setsockopt = compat_ip_setsockopt, |
---|
1944 | | - .compat_getsockopt = compat_ip_getsockopt, |
---|
1945 | | -#endif |
---|
1946 | 2189 | .mtu_reduced = tcp_v4_mtu_reduced, |
---|
1947 | 2190 | }; |
---|
1948 | 2191 | EXPORT_SYMBOL(ipv4_specific); |
---|
.. | .. |
---|
2007 | 2250 | if (inet_csk(sk)->icsk_bind_hash) |
---|
2008 | 2251 | inet_put_port(sk); |
---|
2009 | 2252 | |
---|
2010 | | - BUG_ON(tp->fastopen_rsk); |
---|
| 2253 | + BUG_ON(rcu_access_pointer(tp->fastopen_rsk)); |
---|
2011 | 2254 | |
---|
2012 | 2255 | /* If socket is aborted during connect operation */ |
---|
2013 | 2256 | tcp_free_fastopen_req(tp); |
---|
.. | .. |
---|
2028 | 2271 | */ |
---|
2029 | 2272 | static void *listening_get_next(struct seq_file *seq, void *cur) |
---|
2030 | 2273 | { |
---|
2031 | | - struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); |
---|
| 2274 | + struct tcp_seq_afinfo *afinfo; |
---|
2032 | 2275 | struct tcp_iter_state *st = seq->private; |
---|
2033 | 2276 | struct net *net = seq_file_net(seq); |
---|
2034 | 2277 | struct inet_listen_hashbucket *ilb; |
---|
2035 | 2278 | struct hlist_nulls_node *node; |
---|
2036 | 2279 | struct sock *sk = cur; |
---|
| 2280 | + |
---|
| 2281 | + if (st->bpf_seq_afinfo) |
---|
| 2282 | + afinfo = st->bpf_seq_afinfo; |
---|
| 2283 | + else |
---|
| 2284 | + afinfo = PDE_DATA(file_inode(seq->file)); |
---|
2037 | 2285 | |
---|
2038 | 2286 | if (!sk) { |
---|
2039 | 2287 | get_head: |
---|
.. | .. |
---|
2052 | 2300 | sk_nulls_for_each_from(sk, node) { |
---|
2053 | 2301 | if (!net_eq(sock_net(sk), net)) |
---|
2054 | 2302 | continue; |
---|
2055 | | - if (sk->sk_family == afinfo->family) |
---|
| 2303 | + if (afinfo->family == AF_UNSPEC || |
---|
| 2304 | + sk->sk_family == afinfo->family) |
---|
2056 | 2305 | return sk; |
---|
2057 | 2306 | } |
---|
2058 | 2307 | spin_unlock(&ilb->lock); |
---|
.. | .. |
---|
2089 | 2338 | */ |
---|
2090 | 2339 | static void *established_get_first(struct seq_file *seq) |
---|
2091 | 2340 | { |
---|
2092 | | - struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); |
---|
| 2341 | + struct tcp_seq_afinfo *afinfo; |
---|
2093 | 2342 | struct tcp_iter_state *st = seq->private; |
---|
2094 | 2343 | struct net *net = seq_file_net(seq); |
---|
2095 | 2344 | void *rc = NULL; |
---|
| 2345 | + |
---|
| 2346 | + if (st->bpf_seq_afinfo) |
---|
| 2347 | + afinfo = st->bpf_seq_afinfo; |
---|
| 2348 | + else |
---|
| 2349 | + afinfo = PDE_DATA(file_inode(seq->file)); |
---|
2096 | 2350 | |
---|
2097 | 2351 | st->offset = 0; |
---|
2098 | 2352 | for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { |
---|
.. | .. |
---|
2106 | 2360 | |
---|
2107 | 2361 | spin_lock_bh(lock); |
---|
2108 | 2362 | sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { |
---|
2109 | | - if (sk->sk_family != afinfo->family || |
---|
| 2363 | + if ((afinfo->family != AF_UNSPEC && |
---|
| 2364 | + sk->sk_family != afinfo->family) || |
---|
2110 | 2365 | !net_eq(sock_net(sk), net)) { |
---|
2111 | 2366 | continue; |
---|
2112 | 2367 | } |
---|
.. | .. |
---|
2121 | 2376 | |
---|
2122 | 2377 | static void *established_get_next(struct seq_file *seq, void *cur) |
---|
2123 | 2378 | { |
---|
2124 | | - struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); |
---|
| 2379 | + struct tcp_seq_afinfo *afinfo; |
---|
2125 | 2380 | struct sock *sk = cur; |
---|
2126 | 2381 | struct hlist_nulls_node *node; |
---|
2127 | 2382 | struct tcp_iter_state *st = seq->private; |
---|
2128 | 2383 | struct net *net = seq_file_net(seq); |
---|
| 2384 | + |
---|
| 2385 | + if (st->bpf_seq_afinfo) |
---|
| 2386 | + afinfo = st->bpf_seq_afinfo; |
---|
| 2387 | + else |
---|
| 2388 | + afinfo = PDE_DATA(file_inode(seq->file)); |
---|
2129 | 2389 | |
---|
2130 | 2390 | ++st->num; |
---|
2131 | 2391 | ++st->offset; |
---|
.. | .. |
---|
2133 | 2393 | sk = sk_nulls_next(sk); |
---|
2134 | 2394 | |
---|
2135 | 2395 | sk_nulls_for_each_from(sk, node) { |
---|
2136 | | - if (sk->sk_family == afinfo->family && |
---|
| 2396 | + if ((afinfo->family == AF_UNSPEC || |
---|
| 2397 | + sk->sk_family == afinfo->family) && |
---|
2137 | 2398 | net_eq(sock_net(sk), net)) |
---|
2138 | 2399 | return sk; |
---|
2139 | 2400 | } |
---|
.. | .. |
---|
2194 | 2455 | break; |
---|
2195 | 2456 | st->bucket = 0; |
---|
2196 | 2457 | st->state = TCP_SEQ_STATE_ESTABLISHED; |
---|
2197 | | - /* Fallthrough */ |
---|
| 2458 | + fallthrough; |
---|
2198 | 2459 | case TCP_SEQ_STATE_ESTABLISHED: |
---|
2199 | 2460 | if (st->bucket > tcp_hashinfo.ehash_mask) |
---|
2200 | 2461 | break; |
---|
.. | .. |
---|
2338 | 2599 | |
---|
2339 | 2600 | state = inet_sk_state_load(sk); |
---|
2340 | 2601 | if (state == TCP_LISTEN) |
---|
2341 | | - rx_queue = sk->sk_ack_backlog; |
---|
| 2602 | + rx_queue = READ_ONCE(sk->sk_ack_backlog); |
---|
2342 | 2603 | else |
---|
2343 | 2604 | /* Because we don't lock the socket, |
---|
2344 | 2605 | * we might find a transient negative value. |
---|
.. | .. |
---|
2360 | 2621 | refcount_read(&sk->sk_refcnt), sk, |
---|
2361 | 2622 | jiffies_to_clock_t(icsk->icsk_rto), |
---|
2362 | 2623 | jiffies_to_clock_t(icsk->icsk_ack.ato), |
---|
2363 | | - (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, |
---|
| 2624 | + (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk), |
---|
2364 | 2625 | tp->snd_cwnd, |
---|
2365 | 2626 | state == TCP_LISTEN ? |
---|
2366 | 2627 | fastopenq->max_qlen : |
---|
.. | .. |
---|
2412 | 2673 | seq_pad(seq, '\n'); |
---|
2413 | 2674 | return 0; |
---|
2414 | 2675 | } |
---|
| 2676 | + |
---|
| 2677 | +#ifdef CONFIG_BPF_SYSCALL |
---|
| 2678 | +struct bpf_iter__tcp { |
---|
| 2679 | + __bpf_md_ptr(struct bpf_iter_meta *, meta); |
---|
| 2680 | + __bpf_md_ptr(struct sock_common *, sk_common); |
---|
| 2681 | + uid_t uid __aligned(8); |
---|
| 2682 | +}; |
---|
| 2683 | + |
---|
| 2684 | +static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta, |
---|
| 2685 | + struct sock_common *sk_common, uid_t uid) |
---|
| 2686 | +{ |
---|
| 2687 | + struct bpf_iter__tcp ctx; |
---|
| 2688 | + |
---|
| 2689 | + meta->seq_num--; /* skip SEQ_START_TOKEN */ |
---|
| 2690 | + ctx.meta = meta; |
---|
| 2691 | + ctx.sk_common = sk_common; |
---|
| 2692 | + ctx.uid = uid; |
---|
| 2693 | + return bpf_iter_run_prog(prog, &ctx); |
---|
| 2694 | +} |
---|
| 2695 | + |
---|
| 2696 | +static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v) |
---|
| 2697 | +{ |
---|
| 2698 | + struct bpf_iter_meta meta; |
---|
| 2699 | + struct bpf_prog *prog; |
---|
| 2700 | + struct sock *sk = v; |
---|
| 2701 | + uid_t uid; |
---|
| 2702 | + |
---|
| 2703 | + if (v == SEQ_START_TOKEN) |
---|
| 2704 | + return 0; |
---|
| 2705 | + |
---|
| 2706 | + if (sk->sk_state == TCP_TIME_WAIT) { |
---|
| 2707 | + uid = 0; |
---|
| 2708 | + } else if (sk->sk_state == TCP_NEW_SYN_RECV) { |
---|
| 2709 | + const struct request_sock *req = v; |
---|
| 2710 | + |
---|
| 2711 | + uid = from_kuid_munged(seq_user_ns(seq), |
---|
| 2712 | + sock_i_uid(req->rsk_listener)); |
---|
| 2713 | + } else { |
---|
| 2714 | + uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)); |
---|
| 2715 | + } |
---|
| 2716 | + |
---|
| 2717 | + meta.seq = seq; |
---|
| 2718 | + prog = bpf_iter_get_info(&meta, false); |
---|
| 2719 | + return tcp_prog_seq_show(prog, &meta, v, uid); |
---|
| 2720 | +} |
---|
| 2721 | + |
---|
| 2722 | +static void bpf_iter_tcp_seq_stop(struct seq_file *seq, void *v) |
---|
| 2723 | +{ |
---|
| 2724 | + struct bpf_iter_meta meta; |
---|
| 2725 | + struct bpf_prog *prog; |
---|
| 2726 | + |
---|
| 2727 | + if (!v) { |
---|
| 2728 | + meta.seq = seq; |
---|
| 2729 | + prog = bpf_iter_get_info(&meta, true); |
---|
| 2730 | + if (prog) |
---|
| 2731 | + (void)tcp_prog_seq_show(prog, &meta, v, 0); |
---|
| 2732 | + } |
---|
| 2733 | + |
---|
| 2734 | + tcp_seq_stop(seq, v); |
---|
| 2735 | +} |
---|
| 2736 | + |
---|
| 2737 | +static const struct seq_operations bpf_iter_tcp_seq_ops = { |
---|
| 2738 | + .show = bpf_iter_tcp_seq_show, |
---|
| 2739 | + .start = tcp_seq_start, |
---|
| 2740 | + .next = tcp_seq_next, |
---|
| 2741 | + .stop = bpf_iter_tcp_seq_stop, |
---|
| 2742 | +}; |
---|
| 2743 | +#endif |
---|
2415 | 2744 | |
---|
2416 | 2745 | static const struct seq_operations tcp4_seq_ops = { |
---|
2417 | 2746 | .show = tcp4_seq_show, |
---|
.. | .. |
---|
2493 | 2822 | .rsk_prot = &tcp_request_sock_ops, |
---|
2494 | 2823 | .h.hashinfo = &tcp_hashinfo, |
---|
2495 | 2824 | .no_autobind = true, |
---|
2496 | | -#ifdef CONFIG_COMPAT |
---|
2497 | | - .compat_setsockopt = compat_tcp_setsockopt, |
---|
2498 | | - .compat_getsockopt = compat_tcp_getsockopt, |
---|
2499 | | -#endif |
---|
2500 | 2825 | .diag_destroy = tcp_abort, |
---|
2501 | 2826 | }; |
---|
2502 | 2827 | EXPORT_SYMBOL(tcp_prot); |
---|
.. | .. |
---|
2506 | 2831 | int cpu; |
---|
2507 | 2832 | |
---|
2508 | 2833 | if (net->ipv4.tcp_congestion_control) |
---|
2509 | | - module_put(net->ipv4.tcp_congestion_control->owner); |
---|
| 2834 | + bpf_module_put(net->ipv4.tcp_congestion_control, |
---|
| 2835 | + net->ipv4.tcp_congestion_control->owner); |
---|
2510 | 2836 | |
---|
2511 | 2837 | for_each_possible_cpu(cpu) |
---|
2512 | 2838 | inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu)); |
---|
.. | .. |
---|
2545 | 2871 | net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS; |
---|
2546 | 2872 | net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD; |
---|
2547 | 2873 | net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL; |
---|
| 2874 | + net->ipv4.sysctl_tcp_mtu_probe_floor = TCP_MIN_SND_MSS; |
---|
2548 | 2875 | |
---|
2549 | 2876 | net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME; |
---|
2550 | 2877 | net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES; |
---|
.. | .. |
---|
2560 | 2887 | net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT; |
---|
2561 | 2888 | net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX; |
---|
2562 | 2889 | net->ipv4.sysctl_tcp_tw_reuse = 2; |
---|
| 2890 | + net->ipv4.sysctl_tcp_no_ssthresh_metrics_save = 1; |
---|
2563 | 2891 | |
---|
2564 | 2892 | cnt = tcp_hashinfo.ehash_mask + 1; |
---|
2565 | | - net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2; |
---|
| 2893 | + net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2; |
---|
2566 | 2894 | net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo; |
---|
2567 | 2895 | |
---|
2568 | | - net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256); |
---|
| 2896 | + net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 128); |
---|
2569 | 2897 | net->ipv4.sysctl_tcp_sack = 1; |
---|
2570 | 2898 | net->ipv4.sysctl_tcp_window_scaling = 1; |
---|
2571 | 2899 | net->ipv4.sysctl_tcp_timestamps = 1; |
---|
.. | .. |
---|
2584 | 2912 | * which are too large can cause TCP streams to be bursty. |
---|
2585 | 2913 | */ |
---|
2586 | 2914 | net->ipv4.sysctl_tcp_tso_win_divisor = 3; |
---|
2587 | | - /* Default TSQ limit of four TSO segments */ |
---|
2588 | | - net->ipv4.sysctl_tcp_limit_output_bytes = 262144; |
---|
| 2915 | + /* Default TSQ limit of 16 TSO segments */ |
---|
| 2916 | + net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536; |
---|
2589 | 2917 | /* rfc5961 challenge ack rate limiting */ |
---|
2590 | 2918 | net->ipv4.sysctl_tcp_challenge_ack_limit = 1000; |
---|
2591 | 2919 | net->ipv4.sysctl_tcp_min_tso_segs = 2; |
---|
.. | .. |
---|
2603 | 2931 | sizeof(init_net.ipv4.sysctl_tcp_wmem)); |
---|
2604 | 2932 | } |
---|
2605 | 2933 | net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC; |
---|
| 2934 | + net->ipv4.sysctl_tcp_comp_sack_slack_ns = 100 * NSEC_PER_USEC; |
---|
2606 | 2935 | net->ipv4.sysctl_tcp_comp_sack_nr = 44; |
---|
2607 | 2936 | net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE; |
---|
2608 | 2937 | spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock); |
---|
2609 | | - net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60; |
---|
| 2938 | + net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 0; |
---|
2610 | 2939 | atomic_set(&net->ipv4.tfo_active_disable_times, 0); |
---|
2611 | 2940 | |
---|
2612 | 2941 | /* Reno is always built in */ |
---|
2613 | 2942 | if (!net_eq(net, &init_net) && |
---|
2614 | | - try_module_get(init_net.ipv4.tcp_congestion_control->owner)) |
---|
| 2943 | + bpf_try_module_get(init_net.ipv4.tcp_congestion_control, |
---|
| 2944 | + init_net.ipv4.tcp_congestion_control->owner)) |
---|
2615 | 2945 | net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control; |
---|
2616 | 2946 | else |
---|
2617 | 2947 | net->ipv4.tcp_congestion_control = &tcp_reno; |
---|
.. | .. |
---|
2639 | 2969 | .exit_batch = tcp_sk_exit_batch, |
---|
2640 | 2970 | }; |
---|
2641 | 2971 | |
---|
| 2972 | +#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) |
---|
| 2973 | +DEFINE_BPF_ITER_FUNC(tcp, struct bpf_iter_meta *meta, |
---|
| 2974 | + struct sock_common *sk_common, uid_t uid) |
---|
| 2975 | + |
---|
| 2976 | +static int bpf_iter_init_tcp(void *priv_data, struct bpf_iter_aux_info *aux) |
---|
| 2977 | +{ |
---|
| 2978 | + struct tcp_iter_state *st = priv_data; |
---|
| 2979 | + struct tcp_seq_afinfo *afinfo; |
---|
| 2980 | + int ret; |
---|
| 2981 | + |
---|
| 2982 | + afinfo = kmalloc(sizeof(*afinfo), GFP_USER | __GFP_NOWARN); |
---|
| 2983 | + if (!afinfo) |
---|
| 2984 | + return -ENOMEM; |
---|
| 2985 | + |
---|
| 2986 | + afinfo->family = AF_UNSPEC; |
---|
| 2987 | + st->bpf_seq_afinfo = afinfo; |
---|
| 2988 | + ret = bpf_iter_init_seq_net(priv_data, aux); |
---|
| 2989 | + if (ret) |
---|
| 2990 | + kfree(afinfo); |
---|
| 2991 | + return ret; |
---|
| 2992 | +} |
---|
| 2993 | + |
---|
| 2994 | +static void bpf_iter_fini_tcp(void *priv_data) |
---|
| 2995 | +{ |
---|
| 2996 | + struct tcp_iter_state *st = priv_data; |
---|
| 2997 | + |
---|
| 2998 | + kfree(st->bpf_seq_afinfo); |
---|
| 2999 | + bpf_iter_fini_seq_net(priv_data); |
---|
| 3000 | +} |
---|
| 3001 | + |
---|
| 3002 | +static const struct bpf_iter_seq_info tcp_seq_info = { |
---|
| 3003 | + .seq_ops = &bpf_iter_tcp_seq_ops, |
---|
| 3004 | + .init_seq_private = bpf_iter_init_tcp, |
---|
| 3005 | + .fini_seq_private = bpf_iter_fini_tcp, |
---|
| 3006 | + .seq_priv_size = sizeof(struct tcp_iter_state), |
---|
| 3007 | +}; |
---|
| 3008 | + |
---|
| 3009 | +static struct bpf_iter_reg tcp_reg_info = { |
---|
| 3010 | + .target = "tcp", |
---|
| 3011 | + .ctx_arg_info_size = 1, |
---|
| 3012 | + .ctx_arg_info = { |
---|
| 3013 | + { offsetof(struct bpf_iter__tcp, sk_common), |
---|
| 3014 | + PTR_TO_BTF_ID_OR_NULL }, |
---|
| 3015 | + }, |
---|
| 3016 | + .seq_info = &tcp_seq_info, |
---|
| 3017 | +}; |
---|
| 3018 | + |
---|
| 3019 | +static void __init bpf_iter_register(void) |
---|
| 3020 | +{ |
---|
| 3021 | + tcp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON]; |
---|
| 3022 | + if (bpf_iter_reg_target(&tcp_reg_info)) |
---|
| 3023 | + pr_warn("Warning: could not register bpf iterator tcp\n"); |
---|
| 3024 | +} |
---|
| 3025 | + |
---|
| 3026 | +#endif |
---|
| 3027 | + |
---|
2642 | 3028 | void __init tcp_v4_init(void) |
---|
2643 | 3029 | { |
---|
2644 | 3030 | if (register_pernet_subsys(&tcp_sk_ops)) |
---|
2645 | 3031 | panic("Failed to create the TCP control socket.\n"); |
---|
| 3032 | + |
---|
| 3033 | +#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) |
---|
| 3034 | + bpf_iter_register(); |
---|
| 3035 | +#endif |
---|
2646 | 3036 | } |
---|