.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* |
---|
2 | 3 | * TCP over IPv6 |
---|
3 | 4 | * Linux INET6 implementation |
---|
.. | .. |
---|
16 | 17 | * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind |
---|
17 | 18 | * a single port at the same time. |
---|
18 | 19 | * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file. |
---|
19 | | - * |
---|
20 | | - * This program is free software; you can redistribute it and/or |
---|
21 | | - * modify it under the terms of the GNU General Public License |
---|
22 | | - * as published by the Free Software Foundation; either version |
---|
23 | | - * 2 of the License, or (at your option) any later version. |
---|
24 | 20 | */ |
---|
25 | 21 | |
---|
26 | 22 | #include <linux/bottom_half.h> |
---|
.. | .. |
---|
43 | 39 | #include <linux/ipv6.h> |
---|
44 | 40 | #include <linux/icmpv6.h> |
---|
45 | 41 | #include <linux/random.h> |
---|
| 42 | +#include <linux/indirect_call_wrapper.h> |
---|
46 | 43 | |
---|
47 | 44 | #include <net/tcp.h> |
---|
48 | 45 | #include <net/ndisc.h> |
---|
.. | .. |
---|
78 | 75 | static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); |
---|
79 | 76 | |
---|
80 | 77 | static const struct inet_connection_sock_af_ops ipv6_mapped; |
---|
81 | | -static const struct inet_connection_sock_af_ops ipv6_specific; |
---|
| 78 | +const struct inet_connection_sock_af_ops ipv6_specific; |
---|
82 | 79 | #ifdef CONFIG_TCP_MD5SIG |
---|
83 | 80 | static const struct tcp_sock_af_ops tcp_sock_ipv6_specific; |
---|
84 | 81 | static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; |
---|
85 | 82 | #else |
---|
86 | 83 | static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk, |
---|
87 | | - const struct in6_addr *addr) |
---|
| 84 | + const struct in6_addr *addr, |
---|
| 85 | + int l3index) |
---|
88 | 86 | { |
---|
89 | 87 | return NULL; |
---|
90 | 88 | } |
---|
91 | 89 | #endif |
---|
| 90 | + |
---|
| 91 | +/* Helper returning the inet6 address from a given tcp socket. |
---|
| 92 | + * It can be used in TCP stack instead of inet6_sk(sk). |
---|
| 93 | + * This avoids a dereference and allow compiler optimizations. |
---|
| 94 | + * It is a specialized version of inet6_sk_generic(). |
---|
| 95 | + */ |
---|
| 96 | +static struct ipv6_pinfo *tcp_inet6_sk(const struct sock *sk) |
---|
| 97 | +{ |
---|
| 98 | + unsigned int offset = sizeof(struct tcp6_sock) - sizeof(struct ipv6_pinfo); |
---|
| 99 | + |
---|
| 100 | + return (struct ipv6_pinfo *)(((u8 *)sk) + offset); |
---|
| 101 | +} |
---|
92 | 102 | |
---|
93 | 103 | static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) |
---|
94 | 104 | { |
---|
.. | .. |
---|
97 | 107 | if (dst && dst_hold_safe(dst)) { |
---|
98 | 108 | const struct rt6_info *rt = (const struct rt6_info *)dst; |
---|
99 | 109 | |
---|
100 | | - sk->sk_rx_dst = dst; |
---|
| 110 | + rcu_assign_pointer(sk->sk_rx_dst, dst); |
---|
101 | 111 | inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; |
---|
102 | | - inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); |
---|
| 112 | + tcp_inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); |
---|
103 | 113 | } |
---|
104 | 114 | } |
---|
105 | 115 | |
---|
.. | .. |
---|
138 | 148 | struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; |
---|
139 | 149 | struct inet_sock *inet = inet_sk(sk); |
---|
140 | 150 | struct inet_connection_sock *icsk = inet_csk(sk); |
---|
141 | | - struct ipv6_pinfo *np = inet6_sk(sk); |
---|
| 151 | + struct ipv6_pinfo *np = tcp_inet6_sk(sk); |
---|
142 | 152 | struct tcp_sock *tp = tcp_sk(sk); |
---|
143 | 153 | struct in6_addr *saddr = NULL, *final_p, final; |
---|
144 | 154 | struct ipv6_txoptions *opt; |
---|
.. | .. |
---|
162 | 172 | if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { |
---|
163 | 173 | struct ip6_flowlabel *flowlabel; |
---|
164 | 174 | flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); |
---|
165 | | - if (!flowlabel) |
---|
| 175 | + if (IS_ERR(flowlabel)) |
---|
166 | 176 | return -EINVAL; |
---|
167 | 177 | fl6_sock_release(flowlabel); |
---|
168 | 178 | } |
---|
.. | .. |
---|
220 | 230 | u32 exthdrlen = icsk->icsk_ext_hdr_len; |
---|
221 | 231 | struct sockaddr_in sin; |
---|
222 | 232 | |
---|
223 | | - SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); |
---|
224 | | - |
---|
225 | 233 | if (__ipv6_only_sock(sk)) |
---|
226 | 234 | return -ENETUNREACH; |
---|
227 | 235 | |
---|
.. | .. |
---|
230 | 238 | sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3]; |
---|
231 | 239 | |
---|
232 | 240 | icsk->icsk_af_ops = &ipv6_mapped; |
---|
| 241 | + if (sk_is_mptcp(sk)) |
---|
| 242 | + mptcpv6_handle_mapped(sk, true); |
---|
233 | 243 | sk->sk_backlog_rcv = tcp_v4_do_rcv; |
---|
234 | 244 | #ifdef CONFIG_TCP_MD5SIG |
---|
235 | 245 | tp->af_specific = &tcp_sock_ipv6_mapped_specific; |
---|
.. | .. |
---|
240 | 250 | if (err) { |
---|
241 | 251 | icsk->icsk_ext_hdr_len = exthdrlen; |
---|
242 | 252 | icsk->icsk_af_ops = &ipv6_specific; |
---|
| 253 | + if (sk_is_mptcp(sk)) |
---|
| 254 | + mptcpv6_handle_mapped(sk, false); |
---|
243 | 255 | sk->sk_backlog_rcv = tcp_v6_do_rcv; |
---|
244 | 256 | #ifdef CONFIG_TCP_MD5SIG |
---|
245 | 257 | tp->af_specific = &tcp_sock_ipv6_specific; |
---|
.. | .. |
---|
266 | 278 | opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); |
---|
267 | 279 | final_p = fl6_update_dst(&fl6, opt, &final); |
---|
268 | 280 | |
---|
269 | | - security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); |
---|
| 281 | + security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); |
---|
270 | 282 | |
---|
271 | 283 | dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); |
---|
272 | 284 | if (IS_ERR(dst)) { |
---|
.. | .. |
---|
327 | 339 | |
---|
328 | 340 | late_failure: |
---|
329 | 341 | tcp_set_state(sk, TCP_CLOSE); |
---|
| 342 | + if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) |
---|
| 343 | + inet_reset_saddr(sk); |
---|
330 | 344 | failure: |
---|
331 | 345 | inet->inet_dport = 0; |
---|
332 | 346 | sk->sk_route_caps = 0; |
---|
.. | .. |
---|
359 | 373 | } |
---|
360 | 374 | } |
---|
361 | 375 | |
---|
362 | | -static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
---|
| 376 | +static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
---|
363 | 377 | u8 type, u8 code, int offset, __be32 info) |
---|
364 | 378 | { |
---|
365 | 379 | const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; |
---|
.. | .. |
---|
381 | 395 | if (!sk) { |
---|
382 | 396 | __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), |
---|
383 | 397 | ICMP6_MIB_INERRORS); |
---|
384 | | - return; |
---|
| 398 | + return -ENOENT; |
---|
385 | 399 | } |
---|
386 | 400 | |
---|
387 | 401 | if (sk->sk_state == TCP_TIME_WAIT) { |
---|
388 | 402 | inet_twsk_put(inet_twsk(sk)); |
---|
389 | | - return; |
---|
| 403 | + return 0; |
---|
390 | 404 | } |
---|
391 | 405 | seq = ntohl(th->seq); |
---|
392 | 406 | fatal = icmpv6_err_convert(type, code, &err); |
---|
393 | | - if (sk->sk_state == TCP_NEW_SYN_RECV) |
---|
394 | | - return tcp_req_err(sk, seq, fatal); |
---|
| 407 | + if (sk->sk_state == TCP_NEW_SYN_RECV) { |
---|
| 408 | + tcp_req_err(sk, seq, fatal); |
---|
| 409 | + return 0; |
---|
| 410 | + } |
---|
395 | 411 | |
---|
396 | 412 | bh_lock_sock(sk); |
---|
397 | 413 | if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) |
---|
.. | .. |
---|
400 | 416 | if (sk->sk_state == TCP_CLOSE) |
---|
401 | 417 | goto out; |
---|
402 | 418 | |
---|
403 | | - if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) { |
---|
| 419 | + if (ipv6_hdr(skb)->hop_limit < tcp_inet6_sk(sk)->min_hopcount) { |
---|
404 | 420 | __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); |
---|
405 | 421 | goto out; |
---|
406 | 422 | } |
---|
407 | 423 | |
---|
408 | 424 | tp = tcp_sk(sk); |
---|
409 | 425 | /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ |
---|
410 | | - fastopen = tp->fastopen_rsk; |
---|
| 426 | + fastopen = rcu_dereference(tp->fastopen_rsk); |
---|
411 | 427 | snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; |
---|
412 | 428 | if (sk->sk_state != TCP_LISTEN && |
---|
413 | 429 | !between(seq, snd_una, tp->snd_nxt)) { |
---|
.. | .. |
---|
415 | 431 | goto out; |
---|
416 | 432 | } |
---|
417 | 433 | |
---|
418 | | - np = inet6_sk(sk); |
---|
| 434 | + np = tcp_inet6_sk(sk); |
---|
419 | 435 | |
---|
420 | 436 | if (type == NDISC_REDIRECT) { |
---|
421 | 437 | if (!sock_owned_by_user(sk)) { |
---|
.. | .. |
---|
459 | 475 | case TCP_SYN_SENT: |
---|
460 | 476 | case TCP_SYN_RECV: |
---|
461 | 477 | /* Only in fast or simultaneous open. If a fast open socket is |
---|
462 | | - * is already accepted it is treated as a connected one below. |
---|
| 478 | + * already accepted it is treated as a connected one below. |
---|
463 | 479 | */ |
---|
464 | 480 | if (fastopen && !fastopen->sk) |
---|
465 | 481 | break; |
---|
| 482 | + |
---|
| 483 | + ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th); |
---|
466 | 484 | |
---|
467 | 485 | if (!sock_owned_by_user(sk)) { |
---|
468 | 486 | sk->sk_err = err; |
---|
.. | .. |
---|
472 | 490 | } else |
---|
473 | 491 | sk->sk_err_soft = err; |
---|
474 | 492 | goto out; |
---|
| 493 | + case TCP_LISTEN: |
---|
| 494 | + break; |
---|
| 495 | + default: |
---|
| 496 | + /* check if this ICMP message allows revert of backoff. |
---|
| 497 | + * (see RFC 6069) |
---|
| 498 | + */ |
---|
| 499 | + if (!fastopen && type == ICMPV6_DEST_UNREACH && |
---|
| 500 | + code == ICMPV6_NOROUTE) |
---|
| 501 | + tcp_ld_RTO_revert(sk, seq); |
---|
475 | 502 | } |
---|
476 | 503 | |
---|
477 | 504 | if (!sock_owned_by_user(sk) && np->recverr) { |
---|
.. | .. |
---|
483 | 510 | out: |
---|
484 | 511 | bh_unlock_sock(sk); |
---|
485 | 512 | sock_put(sk); |
---|
| 513 | + return 0; |
---|
486 | 514 | } |
---|
487 | 515 | |
---|
488 | 516 | |
---|
.. | .. |
---|
490 | 518 | struct flowi *fl, |
---|
491 | 519 | struct request_sock *req, |
---|
492 | 520 | struct tcp_fastopen_cookie *foc, |
---|
493 | | - enum tcp_synack_type synack_type) |
---|
| 521 | + enum tcp_synack_type synack_type, |
---|
| 522 | + struct sk_buff *syn_skb) |
---|
494 | 523 | { |
---|
495 | 524 | struct inet_request_sock *ireq = inet_rsk(req); |
---|
496 | | - struct ipv6_pinfo *np = inet6_sk(sk); |
---|
| 525 | + struct ipv6_pinfo *np = tcp_inet6_sk(sk); |
---|
497 | 526 | struct ipv6_txoptions *opt; |
---|
498 | 527 | struct flowi6 *fl6 = &fl->u.ip6; |
---|
499 | 528 | struct sk_buff *skb; |
---|
500 | 529 | int err = -ENOMEM; |
---|
| 530 | + u8 tclass; |
---|
501 | 531 | |
---|
502 | 532 | /* First, grab a route. */ |
---|
503 | 533 | if (!dst && (dst = inet6_csk_route_req(sk, fl6, req, |
---|
504 | 534 | IPPROTO_TCP)) == NULL) |
---|
505 | 535 | goto done; |
---|
506 | 536 | |
---|
507 | | - skb = tcp_make_synack(sk, dst, req, foc, synack_type); |
---|
| 537 | + skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb); |
---|
508 | 538 | |
---|
509 | 539 | if (skb) { |
---|
510 | 540 | __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr, |
---|
.. | .. |
---|
514 | 544 | if (np->repflow && ireq->pktopts) |
---|
515 | 545 | fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); |
---|
516 | 546 | |
---|
| 547 | + tclass = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ? |
---|
| 548 | + (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) | |
---|
| 549 | + (np->tclass & INET_ECN_MASK) : |
---|
| 550 | + np->tclass; |
---|
| 551 | + |
---|
| 552 | + if (!INET_ECN_is_capable(tclass) && |
---|
| 553 | + tcp_bpf_ca_needs_ecn((struct sock *)req)) |
---|
| 554 | + tclass |= INET_ECN_ECT_0; |
---|
| 555 | + |
---|
517 | 556 | rcu_read_lock(); |
---|
518 | 557 | opt = ireq->ipv6_opt; |
---|
519 | 558 | if (!opt) |
---|
520 | 559 | opt = rcu_dereference(np->opt); |
---|
521 | 560 | err = ip6_xmit(sk, skb, fl6, skb->mark ? : sk->sk_mark, opt, |
---|
522 | | - np->tclass); |
---|
| 561 | + tclass, sk->sk_priority); |
---|
523 | 562 | rcu_read_unlock(); |
---|
524 | 563 | err = net_xmit_eval(err); |
---|
525 | 564 | } |
---|
.. | .. |
---|
537 | 576 | |
---|
538 | 577 | #ifdef CONFIG_TCP_MD5SIG |
---|
539 | 578 | static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk, |
---|
540 | | - const struct in6_addr *addr) |
---|
| 579 | + const struct in6_addr *addr, |
---|
| 580 | + int l3index) |
---|
541 | 581 | { |
---|
542 | | - return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6); |
---|
| 582 | + return tcp_md5_do_lookup(sk, l3index, |
---|
| 583 | + (union tcp_md5_addr *)addr, AF_INET6); |
---|
543 | 584 | } |
---|
544 | 585 | |
---|
545 | 586 | static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk, |
---|
546 | 587 | const struct sock *addr_sk) |
---|
547 | 588 | { |
---|
548 | | - return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr); |
---|
| 589 | + int l3index; |
---|
| 590 | + |
---|
| 591 | + l3index = l3mdev_master_ifindex_by_index(sock_net(sk), |
---|
| 592 | + addr_sk->sk_bound_dev_if); |
---|
| 593 | + return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr, |
---|
| 594 | + l3index); |
---|
549 | 595 | } |
---|
550 | 596 | |
---|
551 | 597 | static int tcp_v6_parse_md5_keys(struct sock *sk, int optname, |
---|
552 | | - char __user *optval, int optlen) |
---|
| 598 | + sockptr_t optval, int optlen) |
---|
553 | 599 | { |
---|
554 | 600 | struct tcp_md5sig cmd; |
---|
555 | 601 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr; |
---|
| 602 | + int l3index = 0; |
---|
556 | 603 | u8 prefixlen; |
---|
557 | 604 | |
---|
558 | 605 | if (optlen < sizeof(cmd)) |
---|
559 | 606 | return -EINVAL; |
---|
560 | 607 | |
---|
561 | | - if (copy_from_user(&cmd, optval, sizeof(cmd))) |
---|
| 608 | + if (copy_from_sockptr(&cmd, optval, sizeof(cmd))) |
---|
562 | 609 | return -EFAULT; |
---|
563 | 610 | |
---|
564 | 611 | if (sin6->sin6_family != AF_INET6) |
---|
.. | .. |
---|
574 | 621 | prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128; |
---|
575 | 622 | } |
---|
576 | 623 | |
---|
| 624 | + if (optname == TCP_MD5SIG_EXT && |
---|
| 625 | + cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) { |
---|
| 626 | + struct net_device *dev; |
---|
| 627 | + |
---|
| 628 | + rcu_read_lock(); |
---|
| 629 | + dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex); |
---|
| 630 | + if (dev && netif_is_l3_master(dev)) |
---|
| 631 | + l3index = dev->ifindex; |
---|
| 632 | + rcu_read_unlock(); |
---|
| 633 | + |
---|
| 634 | + /* ok to reference set/not set outside of rcu; |
---|
| 635 | + * right now device MUST be an L3 master |
---|
| 636 | + */ |
---|
| 637 | + if (!dev || !l3index) |
---|
| 638 | + return -EINVAL; |
---|
| 639 | + } |
---|
| 640 | + |
---|
577 | 641 | if (!cmd.tcpm_keylen) { |
---|
578 | 642 | if (ipv6_addr_v4mapped(&sin6->sin6_addr)) |
---|
579 | 643 | return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3], |
---|
580 | | - AF_INET, prefixlen); |
---|
| 644 | + AF_INET, prefixlen, |
---|
| 645 | + l3index); |
---|
581 | 646 | return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr, |
---|
582 | | - AF_INET6, prefixlen); |
---|
| 647 | + AF_INET6, prefixlen, l3index); |
---|
583 | 648 | } |
---|
584 | 649 | |
---|
585 | 650 | if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) |
---|
.. | .. |
---|
587 | 652 | |
---|
588 | 653 | if (ipv6_addr_v4mapped(&sin6->sin6_addr)) |
---|
589 | 654 | return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3], |
---|
590 | | - AF_INET, prefixlen, cmd.tcpm_key, |
---|
591 | | - cmd.tcpm_keylen, GFP_KERNEL); |
---|
| 655 | + AF_INET, prefixlen, l3index, |
---|
| 656 | + cmd.tcpm_key, cmd.tcpm_keylen, |
---|
| 657 | + GFP_KERNEL); |
---|
592 | 658 | |
---|
593 | 659 | return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr, |
---|
594 | | - AF_INET6, prefixlen, cmd.tcpm_key, |
---|
595 | | - cmd.tcpm_keylen, GFP_KERNEL); |
---|
| 660 | + AF_INET6, prefixlen, l3index, |
---|
| 661 | + cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); |
---|
596 | 662 | } |
---|
597 | 663 | |
---|
598 | 664 | static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp, |
---|
.. | .. |
---|
703 | 769 | #endif |
---|
704 | 770 | |
---|
705 | 771 | static bool tcp_v6_inbound_md5_hash(const struct sock *sk, |
---|
706 | | - const struct sk_buff *skb) |
---|
| 772 | + const struct sk_buff *skb, |
---|
| 773 | + int dif, int sdif) |
---|
707 | 774 | { |
---|
708 | 775 | #ifdef CONFIG_TCP_MD5SIG |
---|
709 | 776 | const __u8 *hash_location = NULL; |
---|
710 | 777 | struct tcp_md5sig_key *hash_expected; |
---|
711 | 778 | const struct ipv6hdr *ip6h = ipv6_hdr(skb); |
---|
712 | 779 | const struct tcphdr *th = tcp_hdr(skb); |
---|
713 | | - int genhash; |
---|
| 780 | + int genhash, l3index; |
---|
714 | 781 | u8 newhash[16]; |
---|
715 | 782 | |
---|
716 | | - hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr); |
---|
| 783 | + /* sdif set, means packet ingressed via a device |
---|
| 784 | + * in an L3 domain and dif is set to the l3mdev |
---|
| 785 | + */ |
---|
| 786 | + l3index = sdif ? dif : 0; |
---|
| 787 | + |
---|
| 788 | + hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr, l3index); |
---|
717 | 789 | hash_location = tcp_parse_md5sig_option(th); |
---|
718 | 790 | |
---|
719 | 791 | /* We've parsed the options - do we have a hash? */ |
---|
.. | .. |
---|
737 | 809 | |
---|
738 | 810 | if (genhash || memcmp(hash_location, newhash, 16) != 0) { |
---|
739 | 811 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); |
---|
740 | | - net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n", |
---|
| 812 | + net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u L3 index %d\n", |
---|
741 | 813 | genhash ? "failed" : "mismatch", |
---|
742 | 814 | &ip6h->saddr, ntohs(th->source), |
---|
743 | | - &ip6h->daddr, ntohs(th->dest)); |
---|
| 815 | + &ip6h->daddr, ntohs(th->dest), l3index); |
---|
744 | 816 | return true; |
---|
745 | 817 | } |
---|
746 | 818 | #endif |
---|
.. | .. |
---|
751 | 823 | const struct sock *sk_listener, |
---|
752 | 824 | struct sk_buff *skb) |
---|
753 | 825 | { |
---|
| 826 | + bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags); |
---|
754 | 827 | struct inet_request_sock *ireq = inet_rsk(req); |
---|
755 | | - const struct ipv6_pinfo *np = inet6_sk(sk_listener); |
---|
| 828 | + const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener); |
---|
756 | 829 | |
---|
757 | 830 | ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; |
---|
758 | 831 | ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; |
---|
759 | 832 | |
---|
760 | 833 | /* So that link locals have meaning */ |
---|
761 | | - if (!sk_listener->sk_bound_dev_if && |
---|
| 834 | + if ((!sk_listener->sk_bound_dev_if || l3_slave) && |
---|
762 | 835 | ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) |
---|
763 | 836 | ireq->ir_iif = tcp_v6_iif(skb); |
---|
764 | 837 | |
---|
.. | .. |
---|
789 | 862 | .syn_ack_timeout = tcp_syn_ack_timeout, |
---|
790 | 863 | }; |
---|
791 | 864 | |
---|
792 | | -static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { |
---|
| 865 | +const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { |
---|
793 | 866 | .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - |
---|
794 | 867 | sizeof(struct ipv6hdr), |
---|
795 | 868 | #ifdef CONFIG_TCP_MD5SIG |
---|
.. | .. |
---|
809 | 882 | static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq, |
---|
810 | 883 | u32 ack, u32 win, u32 tsval, u32 tsecr, |
---|
811 | 884 | int oif, struct tcp_md5sig_key *key, int rst, |
---|
812 | | - u8 tclass, __be32 label) |
---|
| 885 | + u8 tclass, __be32 label, u32 priority) |
---|
813 | 886 | { |
---|
814 | 887 | const struct tcphdr *th = tcp_hdr(skb); |
---|
815 | 888 | struct tcphdr *t1; |
---|
.. | .. |
---|
889 | 962 | fl6.flowi6_oif = oif; |
---|
890 | 963 | } |
---|
891 | 964 | |
---|
892 | | - if (sk) |
---|
893 | | - mark = (sk->sk_state == TCP_TIME_WAIT) ? |
---|
894 | | - inet_twsk(sk)->tw_mark : sk->sk_mark; |
---|
| 965 | + if (sk) { |
---|
| 966 | + if (sk->sk_state == TCP_TIME_WAIT) { |
---|
| 967 | + mark = inet_twsk(sk)->tw_mark; |
---|
| 968 | + /* autoflowlabel relies on buff->hash */ |
---|
| 969 | + skb_set_hash(buff, inet_twsk(sk)->tw_txhash, |
---|
| 970 | + PKT_HASH_TYPE_L4); |
---|
| 971 | + } else { |
---|
| 972 | + mark = sk->sk_mark; |
---|
| 973 | + } |
---|
| 974 | + buff->tstamp = tcp_transmit_time(sk); |
---|
| 975 | + } |
---|
895 | 976 | fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark; |
---|
896 | 977 | fl6.fl6_dport = t1->dest; |
---|
897 | 978 | fl6.fl6_sport = t1->source; |
---|
898 | 979 | fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL); |
---|
899 | | - security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); |
---|
| 980 | + security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6)); |
---|
900 | 981 | |
---|
901 | 982 | /* Pass a socket to ip6_dst_lookup either it is for RST |
---|
902 | 983 | * Underlying function will use this to retrieve the network |
---|
.. | .. |
---|
905 | 986 | dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL); |
---|
906 | 987 | if (!IS_ERR(dst)) { |
---|
907 | 988 | skb_dst_set(buff, dst); |
---|
908 | | - ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass); |
---|
| 989 | + ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, |
---|
| 990 | + tclass & ~INET_ECN_MASK, priority); |
---|
909 | 991 | TCP_INC_STATS(net, TCP_MIB_OUTSEGS); |
---|
910 | 992 | if (rst) |
---|
911 | 993 | TCP_INC_STATS(net, TCP_MIB_OUTRSTS); |
---|
.. | .. |
---|
918 | 1000 | static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb) |
---|
919 | 1001 | { |
---|
920 | 1002 | const struct tcphdr *th = tcp_hdr(skb); |
---|
| 1003 | + struct ipv6hdr *ipv6h = ipv6_hdr(skb); |
---|
921 | 1004 | u32 seq = 0, ack_seq = 0; |
---|
922 | 1005 | struct tcp_md5sig_key *key = NULL; |
---|
923 | 1006 | #ifdef CONFIG_TCP_MD5SIG |
---|
924 | 1007 | const __u8 *hash_location = NULL; |
---|
925 | | - struct ipv6hdr *ipv6h = ipv6_hdr(skb); |
---|
926 | 1008 | unsigned char newhash[16]; |
---|
927 | 1009 | int genhash; |
---|
928 | 1010 | struct sock *sk1 = NULL; |
---|
929 | 1011 | #endif |
---|
| 1012 | + __be32 label = 0; |
---|
| 1013 | + u32 priority = 0; |
---|
| 1014 | + struct net *net; |
---|
930 | 1015 | int oif = 0; |
---|
931 | 1016 | |
---|
932 | 1017 | if (th->rst) |
---|
.. | .. |
---|
938 | 1023 | if (!sk && !ipv6_unicast_destination(skb)) |
---|
939 | 1024 | return; |
---|
940 | 1025 | |
---|
| 1026 | + net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); |
---|
941 | 1027 | #ifdef CONFIG_TCP_MD5SIG |
---|
942 | 1028 | rcu_read_lock(); |
---|
943 | 1029 | hash_location = tcp_parse_md5sig_option(th); |
---|
944 | 1030 | if (sk && sk_fullsock(sk)) { |
---|
945 | | - key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr); |
---|
| 1031 | + int l3index; |
---|
| 1032 | + |
---|
| 1033 | + /* sdif set, means packet ingressed via a device |
---|
| 1034 | + * in an L3 domain and inet_iif is set to it. |
---|
| 1035 | + */ |
---|
| 1036 | + l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0; |
---|
| 1037 | + key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr, l3index); |
---|
946 | 1038 | } else if (hash_location) { |
---|
| 1039 | + int dif = tcp_v6_iif_l3_slave(skb); |
---|
| 1040 | + int sdif = tcp_v6_sdif(skb); |
---|
| 1041 | + int l3index; |
---|
| 1042 | + |
---|
947 | 1043 | /* |
---|
948 | 1044 | * active side is lost. Try to find listening socket through |
---|
949 | 1045 | * source port, and then find md5 key through listening socket. |
---|
.. | .. |
---|
951 | 1047 | * Incoming packet is checked with md5 hash with finding key, |
---|
952 | 1048 | * no RST generated if md5 hash doesn't match. |
---|
953 | 1049 | */ |
---|
954 | | - sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev), |
---|
| 1050 | + sk1 = inet6_lookup_listener(net, |
---|
955 | 1051 | &tcp_hashinfo, NULL, 0, |
---|
956 | 1052 | &ipv6h->saddr, |
---|
957 | 1053 | th->source, &ipv6h->daddr, |
---|
958 | | - ntohs(th->source), |
---|
959 | | - tcp_v6_iif_l3_slave(skb), |
---|
960 | | - tcp_v6_sdif(skb)); |
---|
| 1054 | + ntohs(th->source), dif, sdif); |
---|
961 | 1055 | if (!sk1) |
---|
962 | 1056 | goto out; |
---|
963 | 1057 | |
---|
964 | | - key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr); |
---|
| 1058 | + /* sdif set, means packet ingressed via a device |
---|
| 1059 | + * in an L3 domain and dif is set to it. |
---|
| 1060 | + */ |
---|
| 1061 | + l3index = tcp_v6_sdif(skb) ? dif : 0; |
---|
| 1062 | + |
---|
| 1063 | + key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr, l3index); |
---|
965 | 1064 | if (!key) |
---|
966 | 1065 | goto out; |
---|
967 | 1066 | |
---|
.. | .. |
---|
979 | 1078 | |
---|
980 | 1079 | if (sk) { |
---|
981 | 1080 | oif = sk->sk_bound_dev_if; |
---|
982 | | - if (sk_fullsock(sk)) |
---|
| 1081 | + if (sk_fullsock(sk)) { |
---|
| 1082 | + const struct ipv6_pinfo *np = tcp_inet6_sk(sk); |
---|
| 1083 | + |
---|
983 | 1084 | trace_tcp_send_reset(sk, skb); |
---|
| 1085 | + if (np->repflow) |
---|
| 1086 | + label = ip6_flowlabel(ipv6h); |
---|
| 1087 | + priority = sk->sk_priority; |
---|
| 1088 | + } |
---|
| 1089 | + if (sk->sk_state == TCP_TIME_WAIT) { |
---|
| 1090 | + label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel); |
---|
| 1091 | + priority = inet_twsk(sk)->tw_priority; |
---|
| 1092 | + } |
---|
| 1093 | + } else { |
---|
| 1094 | + if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_TCP_RESET) |
---|
| 1095 | + label = ip6_flowlabel(ipv6h); |
---|
984 | 1096 | } |
---|
985 | 1097 | |
---|
986 | | - tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0); |
---|
| 1098 | + tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, |
---|
| 1099 | + ipv6_get_dsfield(ipv6h), label, priority); |
---|
987 | 1100 | |
---|
988 | 1101 | #ifdef CONFIG_TCP_MD5SIG |
---|
989 | 1102 | out: |
---|
.. | .. |
---|
994 | 1107 | static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq, |
---|
995 | 1108 | u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, |
---|
996 | 1109 | struct tcp_md5sig_key *key, u8 tclass, |
---|
997 | | - __be32 label) |
---|
| 1110 | + __be32 label, u32 priority) |
---|
998 | 1111 | { |
---|
999 | 1112 | tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0, |
---|
1000 | | - tclass, label); |
---|
| 1113 | + tclass, label, priority); |
---|
1001 | 1114 | } |
---|
1002 | 1115 | |
---|
1003 | 1116 | static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) |
---|
.. | .. |
---|
1009 | 1122 | tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, |
---|
1010 | 1123 | tcp_time_stamp_raw() + tcptw->tw_ts_offset, |
---|
1011 | 1124 | tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), |
---|
1012 | | - tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel)); |
---|
| 1125 | + tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority); |
---|
1013 | 1126 | |
---|
1014 | 1127 | inet_twsk_put(tw); |
---|
1015 | 1128 | } |
---|
.. | .. |
---|
1017 | 1130 | static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, |
---|
1018 | 1131 | struct request_sock *req) |
---|
1019 | 1132 | { |
---|
| 1133 | + int l3index; |
---|
| 1134 | + |
---|
| 1135 | + l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0; |
---|
| 1136 | + |
---|
1020 | 1137 | /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV |
---|
1021 | 1138 | * sk->sk_state == TCP_SYN_RECV -> for Fast Open. |
---|
1022 | 1139 | */ |
---|
.. | .. |
---|
1031 | 1148 | req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, |
---|
1032 | 1149 | tcp_time_stamp_raw() + tcp_rsk(req)->ts_off, |
---|
1033 | 1150 | req->ts_recent, sk->sk_bound_dev_if, |
---|
1034 | | - tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr), |
---|
1035 | | - 0, 0); |
---|
| 1151 | + tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index), |
---|
| 1152 | + ipv6_get_dsfield(ipv6_hdr(skb)), 0, sk->sk_priority); |
---|
1036 | 1153 | } |
---|
1037 | 1154 | |
---|
1038 | 1155 | |
---|
.. | .. |
---|
1045 | 1162 | sk = cookie_v6_check(sk, skb); |
---|
1046 | 1163 | #endif |
---|
1047 | 1164 | return sk; |
---|
| 1165 | +} |
---|
| 1166 | + |
---|
| 1167 | +u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph, |
---|
| 1168 | + struct tcphdr *th, u32 *cookie) |
---|
| 1169 | +{ |
---|
| 1170 | + u16 mss = 0; |
---|
| 1171 | +#ifdef CONFIG_SYN_COOKIES |
---|
| 1172 | + mss = tcp_get_syncookie_mss(&tcp6_request_sock_ops, |
---|
| 1173 | + &tcp_request_sock_ipv6_ops, sk, th); |
---|
| 1174 | + if (mss) { |
---|
| 1175 | + *cookie = __cookie_v6_init_sequence(iph, th, &mss); |
---|
| 1176 | + tcp_synq_overflow(sk); |
---|
| 1177 | + } |
---|
| 1178 | +#endif |
---|
| 1179 | + return mss; |
---|
1048 | 1180 | } |
---|
1049 | 1181 | |
---|
1050 | 1182 | static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) |
---|
.. | .. |
---|
1086 | 1218 | { |
---|
1087 | 1219 | struct inet_request_sock *ireq; |
---|
1088 | 1220 | struct ipv6_pinfo *newnp; |
---|
1089 | | - const struct ipv6_pinfo *np = inet6_sk(sk); |
---|
| 1221 | + const struct ipv6_pinfo *np = tcp_inet6_sk(sk); |
---|
1090 | 1222 | struct ipv6_txoptions *opt; |
---|
1091 | | - struct tcp6_sock *newtcp6sk; |
---|
1092 | 1223 | struct inet_sock *newinet; |
---|
| 1224 | + bool found_dup_sk = false; |
---|
1093 | 1225 | struct tcp_sock *newtp; |
---|
1094 | 1226 | struct sock *newsk; |
---|
1095 | 1227 | #ifdef CONFIG_TCP_MD5SIG |
---|
1096 | 1228 | struct tcp_md5sig_key *key; |
---|
| 1229 | + int l3index; |
---|
1097 | 1230 | #endif |
---|
1098 | 1231 | struct flowi6 fl6; |
---|
1099 | 1232 | |
---|
.. | .. |
---|
1108 | 1241 | if (!newsk) |
---|
1109 | 1242 | return NULL; |
---|
1110 | 1243 | |
---|
1111 | | - newtcp6sk = (struct tcp6_sock *)newsk; |
---|
1112 | | - inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; |
---|
| 1244 | + inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk); |
---|
1113 | 1245 | |
---|
1114 | 1246 | newinet = inet_sk(newsk); |
---|
1115 | | - newnp = inet6_sk(newsk); |
---|
| 1247 | + newnp = tcp_inet6_sk(newsk); |
---|
1116 | 1248 | newtp = tcp_sk(newsk); |
---|
1117 | 1249 | |
---|
1118 | 1250 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); |
---|
.. | .. |
---|
1120 | 1252 | newnp->saddr = newsk->sk_v6_rcv_saddr; |
---|
1121 | 1253 | |
---|
1122 | 1254 | inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; |
---|
| 1255 | + if (sk_is_mptcp(newsk)) |
---|
| 1256 | + mptcpv6_handle_mapped(newsk, true); |
---|
1123 | 1257 | newsk->sk_backlog_rcv = tcp_v4_do_rcv; |
---|
1124 | 1258 | #ifdef CONFIG_TCP_MD5SIG |
---|
1125 | 1259 | newtp->af_specific = &tcp_sock_ipv6_mapped_specific; |
---|
.. | .. |
---|
1176 | 1310 | ip6_dst_store(newsk, dst, NULL, NULL); |
---|
1177 | 1311 | inet6_sk_rx_dst_set(newsk, skb); |
---|
1178 | 1312 | |
---|
1179 | | - newtcp6sk = (struct tcp6_sock *)newsk; |
---|
1180 | | - inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; |
---|
| 1313 | + inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk); |
---|
1181 | 1314 | |
---|
1182 | 1315 | newtp = tcp_sk(newsk); |
---|
1183 | 1316 | newinet = inet_sk(newsk); |
---|
1184 | | - newnp = inet6_sk(newsk); |
---|
| 1317 | + newnp = tcp_inet6_sk(newsk); |
---|
1185 | 1318 | |
---|
1186 | 1319 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); |
---|
1187 | 1320 | |
---|
.. | .. |
---|
1209 | 1342 | newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb)); |
---|
1210 | 1343 | if (np->repflow) |
---|
1211 | 1344 | newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb)); |
---|
| 1345 | + |
---|
| 1346 | + /* Set ToS of the new socket based upon the value of incoming SYN. |
---|
| 1347 | + * ECT bits are set later in tcp_init_transfer(). |
---|
| 1348 | + */ |
---|
| 1349 | + if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)) |
---|
| 1350 | + newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK; |
---|
1212 | 1351 | |
---|
1213 | 1352 | /* Clone native IPv6 options from listening socket (if any) |
---|
1214 | 1353 | |
---|
.. | .. |
---|
1239 | 1378 | newinet->inet_rcv_saddr = LOOPBACK4_IPV6; |
---|
1240 | 1379 | |
---|
1241 | 1380 | #ifdef CONFIG_TCP_MD5SIG |
---|
| 1381 | + l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif); |
---|
| 1382 | + |
---|
1242 | 1383 | /* Copy over the MD5 key from the original socket */ |
---|
1243 | | - key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr); |
---|
| 1384 | + key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr, l3index); |
---|
1244 | 1385 | if (key) { |
---|
1245 | 1386 | /* We're using one, so create a matching key |
---|
1246 | 1387 | * on the newsk structure. If we fail to get |
---|
.. | .. |
---|
1248 | 1389 | * across. Shucks. |
---|
1249 | 1390 | */ |
---|
1250 | 1391 | tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr, |
---|
1251 | | - AF_INET6, 128, key->key, key->keylen, |
---|
| 1392 | + AF_INET6, 128, l3index, key->key, key->keylen, |
---|
1252 | 1393 | sk_gfp_mask(sk, GFP_ATOMIC)); |
---|
1253 | 1394 | } |
---|
1254 | 1395 | #endif |
---|
.. | .. |
---|
1258 | 1399 | tcp_done(newsk); |
---|
1259 | 1400 | goto out; |
---|
1260 | 1401 | } |
---|
1261 | | - *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); |
---|
| 1402 | + *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), |
---|
| 1403 | + &found_dup_sk); |
---|
1262 | 1404 | if (*own_req) { |
---|
1263 | 1405 | tcp_move_syn(newtp, req); |
---|
1264 | 1406 | |
---|
.. | .. |
---|
1272 | 1414 | tcp_v6_restore_cb(newnp->pktoptions); |
---|
1273 | 1415 | skb_set_owner_r(newnp->pktoptions, newsk); |
---|
1274 | 1416 | } |
---|
| 1417 | + } |
---|
| 1418 | + } else { |
---|
| 1419 | + if (!req_unhash && found_dup_sk) { |
---|
| 1420 | + /* This code path should only be executed in the |
---|
| 1421 | + * syncookie case only |
---|
| 1422 | + */ |
---|
| 1423 | + bh_unlock_sock(newsk); |
---|
| 1424 | + sock_put(newsk); |
---|
| 1425 | + newsk = NULL; |
---|
1275 | 1426 | } |
---|
1276 | 1427 | } |
---|
1277 | 1428 | |
---|
.. | .. |
---|
1296 | 1447 | */ |
---|
1297 | 1448 | static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) |
---|
1298 | 1449 | { |
---|
1299 | | - struct ipv6_pinfo *np = inet6_sk(sk); |
---|
1300 | | - struct tcp_sock *tp; |
---|
| 1450 | + struct ipv6_pinfo *np = tcp_inet6_sk(sk); |
---|
1301 | 1451 | struct sk_buff *opt_skb = NULL; |
---|
| 1452 | + struct tcp_sock *tp; |
---|
1302 | 1453 | |
---|
1303 | 1454 | /* Imagine: socket is IPv6. IPv4 packet arrives, |
---|
1304 | 1455 | goes to IPv4 receive handler and backlogged. |
---|
.. | .. |
---|
1333 | 1484 | opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC)); |
---|
1334 | 1485 | |
---|
1335 | 1486 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ |
---|
1336 | | - struct dst_entry *dst = sk->sk_rx_dst; |
---|
| 1487 | + struct dst_entry *dst; |
---|
| 1488 | + |
---|
| 1489 | + dst = rcu_dereference_protected(sk->sk_rx_dst, |
---|
| 1490 | + lockdep_sock_is_held(sk)); |
---|
1337 | 1491 | |
---|
1338 | 1492 | sock_rps_save_rxhash(sk, skb); |
---|
1339 | 1493 | sk_mark_napi_id(sk, skb); |
---|
1340 | 1494 | if (dst) { |
---|
1341 | 1495 | if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || |
---|
1342 | 1496 | dst->ops->check(dst, np->rx_dst_cookie) == NULL) { |
---|
| 1497 | + RCU_INIT_POINTER(sk->sk_rx_dst, NULL); |
---|
1343 | 1498 | dst_release(dst); |
---|
1344 | | - sk->sk_rx_dst = NULL; |
---|
1345 | 1499 | } |
---|
1346 | 1500 | } |
---|
1347 | 1501 | |
---|
.. | .. |
---|
1446 | 1600 | skb->tstamp || skb_hwtstamps(skb)->hwtstamp; |
---|
1447 | 1601 | } |
---|
1448 | 1602 | |
---|
1449 | | -static int tcp_v6_rcv(struct sk_buff *skb) |
---|
| 1603 | +INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) |
---|
1450 | 1604 | { |
---|
| 1605 | + struct sk_buff *skb_to_free; |
---|
1451 | 1606 | int sdif = inet6_sdif(skb); |
---|
| 1607 | + int dif = inet6_iif(skb); |
---|
1452 | 1608 | const struct tcphdr *th; |
---|
1453 | 1609 | const struct ipv6hdr *hdr; |
---|
1454 | 1610 | bool refcounted; |
---|
.. | .. |
---|
1497 | 1653 | struct sock *nsk; |
---|
1498 | 1654 | |
---|
1499 | 1655 | sk = req->rsk_listener; |
---|
1500 | | - if (tcp_v6_inbound_md5_hash(sk, skb)) { |
---|
| 1656 | + if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif)) { |
---|
1501 | 1657 | sk_drops_add(sk, skb); |
---|
1502 | 1658 | reqsk_put(req); |
---|
1503 | 1659 | goto discard_it; |
---|
.. | .. |
---|
1544 | 1700 | return 0; |
---|
1545 | 1701 | } |
---|
1546 | 1702 | } |
---|
1547 | | - if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) { |
---|
| 1703 | + if (hdr->hop_limit < tcp_inet6_sk(sk)->min_hopcount) { |
---|
1548 | 1704 | __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); |
---|
1549 | 1705 | goto discard_and_relse; |
---|
1550 | 1706 | } |
---|
.. | .. |
---|
1552 | 1708 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) |
---|
1553 | 1709 | goto discard_and_relse; |
---|
1554 | 1710 | |
---|
1555 | | - if (tcp_v6_inbound_md5_hash(sk, skb)) |
---|
| 1711 | + if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif)) |
---|
1556 | 1712 | goto discard_and_relse; |
---|
1557 | 1713 | |
---|
1558 | 1714 | if (tcp_filter(sk, skb)) |
---|
.. | .. |
---|
1574 | 1730 | tcp_segs_in(tcp_sk(sk), skb); |
---|
1575 | 1731 | ret = 0; |
---|
1576 | 1732 | if (!sock_owned_by_user(sk)) { |
---|
| 1733 | + skb_to_free = sk->sk_rx_skb_cache; |
---|
| 1734 | + sk->sk_rx_skb_cache = NULL; |
---|
1577 | 1735 | ret = tcp_v6_do_rcv(sk, skb); |
---|
1578 | | - } else if (tcp_add_backlog(sk, skb)) { |
---|
1579 | | - goto discard_and_relse; |
---|
| 1736 | + } else { |
---|
| 1737 | + if (tcp_add_backlog(sk, skb)) |
---|
| 1738 | + goto discard_and_relse; |
---|
| 1739 | + skb_to_free = NULL; |
---|
1580 | 1740 | } |
---|
1581 | 1741 | bh_unlock_sock(sk); |
---|
1582 | | - |
---|
| 1742 | + if (skb_to_free) |
---|
| 1743 | + __kfree_skb(skb_to_free); |
---|
1583 | 1744 | put_and_return: |
---|
1584 | 1745 | if (refcounted) |
---|
1585 | 1746 | sock_put(sk); |
---|
.. | .. |
---|
1645 | 1806 | } |
---|
1646 | 1807 | } |
---|
1647 | 1808 | /* to ACK */ |
---|
1648 | | - /* fall through */ |
---|
| 1809 | + fallthrough; |
---|
1649 | 1810 | case TCP_TW_ACK: |
---|
1650 | 1811 | tcp_v6_timewait_ack(sk, skb); |
---|
1651 | 1812 | break; |
---|
.. | .. |
---|
1659 | 1820 | goto discard_it; |
---|
1660 | 1821 | } |
---|
1661 | 1822 | |
---|
1662 | | -static void tcp_v6_early_demux(struct sk_buff *skb) |
---|
| 1823 | +void tcp_v6_early_demux(struct sk_buff *skb) |
---|
1663 | 1824 | { |
---|
1664 | 1825 | const struct ipv6hdr *hdr; |
---|
1665 | 1826 | const struct tcphdr *th; |
---|
.. | .. |
---|
1686 | 1847 | skb->sk = sk; |
---|
1687 | 1848 | skb->destructor = sock_edemux; |
---|
1688 | 1849 | if (sk_fullsock(sk)) { |
---|
1689 | | - struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); |
---|
| 1850 | + struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst); |
---|
1690 | 1851 | |
---|
1691 | 1852 | if (dst) |
---|
1692 | | - dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); |
---|
| 1853 | + dst = dst_check(dst, tcp_inet6_sk(sk)->rx_dst_cookie); |
---|
1693 | 1854 | if (dst && |
---|
1694 | 1855 | inet_sk(sk)->rx_dst_ifindex == skb->skb_iif) |
---|
1695 | 1856 | skb_dst_set_noref(skb, dst); |
---|
.. | .. |
---|
1703 | 1864 | .twsk_destructor = tcp_twsk_destructor, |
---|
1704 | 1865 | }; |
---|
1705 | 1866 | |
---|
1706 | | -static const struct inet_connection_sock_af_ops ipv6_specific = { |
---|
| 1867 | +INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb) |
---|
| 1868 | +{ |
---|
| 1869 | + struct ipv6_pinfo *np = inet6_sk(sk); |
---|
| 1870 | + |
---|
| 1871 | + __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr); |
---|
| 1872 | +} |
---|
| 1873 | + |
---|
| 1874 | +const struct inet_connection_sock_af_ops ipv6_specific = { |
---|
1707 | 1875 | .queue_xmit = inet6_csk_xmit, |
---|
1708 | 1876 | .send_check = tcp_v6_send_check, |
---|
1709 | 1877 | .rebuild_header = inet6_sk_rebuild_header, |
---|
.. | .. |
---|
1716 | 1884 | .getsockopt = ipv6_getsockopt, |
---|
1717 | 1885 | .addr2sockaddr = inet6_csk_addr2sockaddr, |
---|
1718 | 1886 | .sockaddr_len = sizeof(struct sockaddr_in6), |
---|
1719 | | -#ifdef CONFIG_COMPAT |
---|
1720 | | - .compat_setsockopt = compat_ipv6_setsockopt, |
---|
1721 | | - .compat_getsockopt = compat_ipv6_getsockopt, |
---|
1722 | | -#endif |
---|
1723 | 1887 | .mtu_reduced = tcp_v6_mtu_reduced, |
---|
1724 | 1888 | }; |
---|
1725 | 1889 | |
---|
.. | .. |
---|
1746 | 1910 | .getsockopt = ipv6_getsockopt, |
---|
1747 | 1911 | .addr2sockaddr = inet6_csk_addr2sockaddr, |
---|
1748 | 1912 | .sockaddr_len = sizeof(struct sockaddr_in6), |
---|
1749 | | -#ifdef CONFIG_COMPAT |
---|
1750 | | - .compat_setsockopt = compat_ipv6_setsockopt, |
---|
1751 | | - .compat_getsockopt = compat_ipv6_getsockopt, |
---|
1752 | | -#endif |
---|
1753 | 1913 | .mtu_reduced = tcp_v4_mtu_reduced, |
---|
1754 | 1914 | }; |
---|
1755 | 1915 | |
---|
.. | .. |
---|
1855 | 2015 | |
---|
1856 | 2016 | state = inet_sk_state_load(sp); |
---|
1857 | 2017 | if (state == TCP_LISTEN) |
---|
1858 | | - rx_queue = sp->sk_ack_backlog; |
---|
| 2018 | + rx_queue = READ_ONCE(sp->sk_ack_backlog); |
---|
1859 | 2019 | else |
---|
1860 | 2020 | /* Because we don't lock the socket, |
---|
1861 | 2021 | * we might find a transient negative value. |
---|
.. | .. |
---|
1883 | 2043 | refcount_read(&sp->sk_refcnt), sp, |
---|
1884 | 2044 | jiffies_to_clock_t(icsk->icsk_rto), |
---|
1885 | 2045 | jiffies_to_clock_t(icsk->icsk_ack.ato), |
---|
1886 | | - (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, |
---|
| 2046 | + (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp), |
---|
1887 | 2047 | tp->snd_cwnd, |
---|
1888 | 2048 | state == TCP_LISTEN ? |
---|
1889 | 2049 | fastopenq->max_qlen : |
---|
.. | .. |
---|
2007 | 2167 | .rsk_prot = &tcp6_request_sock_ops, |
---|
2008 | 2168 | .h.hashinfo = &tcp_hashinfo, |
---|
2009 | 2169 | .no_autobind = true, |
---|
2010 | | -#ifdef CONFIG_COMPAT |
---|
2011 | | - .compat_setsockopt = compat_tcp_setsockopt, |
---|
2012 | | - .compat_getsockopt = compat_tcp_getsockopt, |
---|
2013 | | -#endif |
---|
2014 | 2170 | .diag_destroy = tcp_abort, |
---|
2015 | 2171 | }; |
---|
| 2172 | +EXPORT_SYMBOL_GPL(tcpv6_prot); |
---|
2016 | 2173 | |
---|
2017 | | -/* thinking of making this const? Don't. |
---|
2018 | | - * early_demux can change based on sysctl. |
---|
2019 | | - */ |
---|
2020 | | -static struct inet6_protocol tcpv6_protocol = { |
---|
2021 | | - .early_demux = tcp_v6_early_demux, |
---|
2022 | | - .early_demux_handler = tcp_v6_early_demux, |
---|
| 2174 | +static const struct inet6_protocol tcpv6_protocol = { |
---|
2023 | 2175 | .handler = tcp_v6_rcv, |
---|
2024 | 2176 | .err_handler = tcp_v6_err, |
---|
2025 | 2177 | .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, |
---|
.. | .. |
---|
2072 | 2224 | ret = register_pernet_subsys(&tcpv6_net_ops); |
---|
2073 | 2225 | if (ret) |
---|
2074 | 2226 | goto out_tcpv6_protosw; |
---|
| 2227 | + |
---|
| 2228 | + ret = mptcpv6_init(); |
---|
| 2229 | + if (ret) |
---|
| 2230 | + goto out_tcpv6_pernet_subsys; |
---|
| 2231 | + |
---|
2075 | 2232 | out: |
---|
2076 | 2233 | return ret; |
---|
2077 | 2234 | |
---|
| 2235 | +out_tcpv6_pernet_subsys: |
---|
| 2236 | + unregister_pernet_subsys(&tcpv6_net_ops); |
---|
2078 | 2237 | out_tcpv6_protosw: |
---|
2079 | 2238 | inet6_unregister_protosw(&tcpv6_protosw); |
---|
2080 | 2239 | out_tcpv6_protocol: |
---|