.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* |
---|
2 | 3 | * TCP over IPv6 |
---|
3 | 4 | * Linux INET6 implementation |
---|
.. | .. |
---|
16 | 17 | * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind |
---|
17 | 18 | * a single port at the same time. |
---|
18 | 19 | * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file. |
---|
19 | | - * |
---|
20 | | - * This program is free software; you can redistribute it and/or |
---|
21 | | - * modify it under the terms of the GNU General Public License |
---|
22 | | - * as published by the Free Software Foundation; either version |
---|
23 | | - * 2 of the License, or (at your option) any later version. |
---|
24 | 20 | */ |
---|
25 | 21 | |
---|
26 | 22 | #include <linux/bottom_half.h> |
---|
.. | .. |
---|
43 | 39 | #include <linux/ipv6.h> |
---|
44 | 40 | #include <linux/icmpv6.h> |
---|
45 | 41 | #include <linux/random.h> |
---|
| 42 | +#include <linux/indirect_call_wrapper.h> |
---|
46 | 43 | |
---|
47 | 44 | #include <net/tcp.h> |
---|
48 | 45 | #include <net/ndisc.h> |
---|
.. | .. |
---|
78 | 75 | static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); |
---|
79 | 76 | |
---|
80 | 77 | static const struct inet_connection_sock_af_ops ipv6_mapped; |
---|
81 | | -static const struct inet_connection_sock_af_ops ipv6_specific; |
---|
| 78 | +const struct inet_connection_sock_af_ops ipv6_specific; |
---|
82 | 79 | #ifdef CONFIG_TCP_MD5SIG |
---|
83 | 80 | static const struct tcp_sock_af_ops tcp_sock_ipv6_specific; |
---|
84 | 81 | static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; |
---|
85 | 82 | #else |
---|
86 | 83 | static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk, |
---|
87 | | - const struct in6_addr *addr) |
---|
| 84 | + const struct in6_addr *addr, |
---|
| 85 | + int l3index) |
---|
88 | 86 | { |
---|
89 | 87 | return NULL; |
---|
90 | 88 | } |
---|
91 | 89 | #endif |
---|
| 90 | + |
---|
| 91 | +/* Helper returning the inet6 address from a given tcp socket. |
---|
| 92 | + * It can be used in TCP stack instead of inet6_sk(sk). |
---|
| 93 | + * This avoids a dereference and allow compiler optimizations. |
---|
| 94 | + * It is a specialized version of inet6_sk_generic(). |
---|
| 95 | + */ |
---|
| 96 | +static struct ipv6_pinfo *tcp_inet6_sk(const struct sock *sk) |
---|
| 97 | +{ |
---|
| 98 | + unsigned int offset = sizeof(struct tcp6_sock) - sizeof(struct ipv6_pinfo); |
---|
| 99 | + |
---|
| 100 | + return (struct ipv6_pinfo *)(((u8 *)sk) + offset); |
---|
| 101 | +} |
---|
92 | 102 | |
---|
93 | 103 | static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) |
---|
94 | 104 | { |
---|
.. | .. |
---|
97 | 107 | if (dst && dst_hold_safe(dst)) { |
---|
98 | 108 | const struct rt6_info *rt = (const struct rt6_info *)dst; |
---|
99 | 109 | |
---|
100 | | - sk->sk_rx_dst = dst; |
---|
| 110 | + rcu_assign_pointer(sk->sk_rx_dst, dst); |
---|
101 | 111 | inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; |
---|
102 | | - inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); |
---|
| 112 | + tcp_inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); |
---|
103 | 113 | } |
---|
104 | 114 | } |
---|
105 | 115 | |
---|
.. | .. |
---|
138 | 148 | struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; |
---|
139 | 149 | struct inet_sock *inet = inet_sk(sk); |
---|
140 | 150 | struct inet_connection_sock *icsk = inet_csk(sk); |
---|
141 | | - struct ipv6_pinfo *np = inet6_sk(sk); |
---|
| 151 | + struct ipv6_pinfo *np = tcp_inet6_sk(sk); |
---|
142 | 152 | struct tcp_sock *tp = tcp_sk(sk); |
---|
143 | 153 | struct in6_addr *saddr = NULL, *final_p, final; |
---|
144 | 154 | struct ipv6_txoptions *opt; |
---|
.. | .. |
---|
162 | 172 | if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { |
---|
163 | 173 | struct ip6_flowlabel *flowlabel; |
---|
164 | 174 | flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); |
---|
165 | | - if (!flowlabel) |
---|
| 175 | + if (IS_ERR(flowlabel)) |
---|
166 | 176 | return -EINVAL; |
---|
167 | 177 | fl6_sock_release(flowlabel); |
---|
168 | 178 | } |
---|
.. | .. |
---|
220 | 230 | u32 exthdrlen = icsk->icsk_ext_hdr_len; |
---|
221 | 231 | struct sockaddr_in sin; |
---|
222 | 232 | |
---|
223 | | - SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); |
---|
224 | | - |
---|
225 | 233 | if (__ipv6_only_sock(sk)) |
---|
226 | 234 | return -ENETUNREACH; |
---|
227 | 235 | |
---|
.. | .. |
---|
230 | 238 | sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3]; |
---|
231 | 239 | |
---|
232 | 240 | icsk->icsk_af_ops = &ipv6_mapped; |
---|
| 241 | + if (sk_is_mptcp(sk)) |
---|
| 242 | + mptcpv6_handle_mapped(sk, true); |
---|
233 | 243 | sk->sk_backlog_rcv = tcp_v4_do_rcv; |
---|
234 | 244 | #ifdef CONFIG_TCP_MD5SIG |
---|
235 | 245 | tp->af_specific = &tcp_sock_ipv6_mapped_specific; |
---|
.. | .. |
---|
240 | 250 | if (err) { |
---|
241 | 251 | icsk->icsk_ext_hdr_len = exthdrlen; |
---|
242 | 252 | icsk->icsk_af_ops = &ipv6_specific; |
---|
| 253 | + if (sk_is_mptcp(sk)) |
---|
| 254 | + mptcpv6_handle_mapped(sk, false); |
---|
243 | 255 | sk->sk_backlog_rcv = tcp_v6_do_rcv; |
---|
244 | 256 | #ifdef CONFIG_TCP_MD5SIG |
---|
245 | 257 | tp->af_specific = &tcp_sock_ipv6_specific; |
---|
.. | .. |
---|
257 | 269 | fl6.flowi6_proto = IPPROTO_TCP; |
---|
258 | 270 | fl6.daddr = sk->sk_v6_daddr; |
---|
259 | 271 | fl6.saddr = saddr ? *saddr : np->saddr; |
---|
| 272 | + fl6.flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label); |
---|
260 | 273 | fl6.flowi6_oif = sk->sk_bound_dev_if; |
---|
261 | 274 | fl6.flowi6_mark = sk->sk_mark; |
---|
262 | 275 | fl6.fl6_dport = usin->sin6_port; |
---|
.. | .. |
---|
266 | 279 | opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); |
---|
267 | 280 | final_p = fl6_update_dst(&fl6, opt, &final); |
---|
268 | 281 | |
---|
269 | | - security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); |
---|
| 282 | + security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); |
---|
270 | 283 | |
---|
271 | 284 | dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); |
---|
272 | 285 | if (IS_ERR(dst)) { |
---|
.. | .. |
---|
327 | 340 | |
---|
328 | 341 | late_failure: |
---|
329 | 342 | tcp_set_state(sk, TCP_CLOSE); |
---|
| 343 | + if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) |
---|
| 344 | + inet_reset_saddr(sk); |
---|
330 | 345 | failure: |
---|
331 | 346 | inet->inet_dport = 0; |
---|
332 | 347 | sk->sk_route_caps = 0; |
---|
.. | .. |
---|
359 | 374 | } |
---|
360 | 375 | } |
---|
361 | 376 | |
---|
362 | | -static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
---|
| 377 | +static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
---|
363 | 378 | u8 type, u8 code, int offset, __be32 info) |
---|
364 | 379 | { |
---|
365 | 380 | const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; |
---|
.. | .. |
---|
381 | 396 | if (!sk) { |
---|
382 | 397 | __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), |
---|
383 | 398 | ICMP6_MIB_INERRORS); |
---|
384 | | - return; |
---|
| 399 | + return -ENOENT; |
---|
385 | 400 | } |
---|
386 | 401 | |
---|
387 | 402 | if (sk->sk_state == TCP_TIME_WAIT) { |
---|
388 | 403 | inet_twsk_put(inet_twsk(sk)); |
---|
389 | | - return; |
---|
| 404 | + return 0; |
---|
390 | 405 | } |
---|
391 | 406 | seq = ntohl(th->seq); |
---|
392 | 407 | fatal = icmpv6_err_convert(type, code, &err); |
---|
393 | | - if (sk->sk_state == TCP_NEW_SYN_RECV) |
---|
394 | | - return tcp_req_err(sk, seq, fatal); |
---|
| 408 | + if (sk->sk_state == TCP_NEW_SYN_RECV) { |
---|
| 409 | + tcp_req_err(sk, seq, fatal); |
---|
| 410 | + return 0; |
---|
| 411 | + } |
---|
395 | 412 | |
---|
396 | 413 | bh_lock_sock(sk); |
---|
397 | 414 | if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) |
---|
.. | .. |
---|
400 | 417 | if (sk->sk_state == TCP_CLOSE) |
---|
401 | 418 | goto out; |
---|
402 | 419 | |
---|
403 | | - if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) { |
---|
| 420 | + if (ipv6_hdr(skb)->hop_limit < tcp_inet6_sk(sk)->min_hopcount) { |
---|
404 | 421 | __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); |
---|
405 | 422 | goto out; |
---|
406 | 423 | } |
---|
407 | 424 | |
---|
408 | 425 | tp = tcp_sk(sk); |
---|
409 | 426 | /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ |
---|
410 | | - fastopen = tp->fastopen_rsk; |
---|
| 427 | + fastopen = rcu_dereference(tp->fastopen_rsk); |
---|
411 | 428 | snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; |
---|
412 | 429 | if (sk->sk_state != TCP_LISTEN && |
---|
413 | 430 | !between(seq, snd_una, tp->snd_nxt)) { |
---|
.. | .. |
---|
415 | 432 | goto out; |
---|
416 | 433 | } |
---|
417 | 434 | |
---|
418 | | - np = inet6_sk(sk); |
---|
| 435 | + np = tcp_inet6_sk(sk); |
---|
419 | 436 | |
---|
420 | 437 | if (type == NDISC_REDIRECT) { |
---|
421 | 438 | if (!sock_owned_by_user(sk)) { |
---|
.. | .. |
---|
459 | 476 | case TCP_SYN_SENT: |
---|
460 | 477 | case TCP_SYN_RECV: |
---|
461 | 478 | /* Only in fast or simultaneous open. If a fast open socket is |
---|
462 | | - * is already accepted it is treated as a connected one below. |
---|
| 479 | + * already accepted it is treated as a connected one below. |
---|
463 | 480 | */ |
---|
464 | 481 | if (fastopen && !fastopen->sk) |
---|
465 | 482 | break; |
---|
| 483 | + |
---|
| 484 | + ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th); |
---|
466 | 485 | |
---|
467 | 486 | if (!sock_owned_by_user(sk)) { |
---|
468 | 487 | sk->sk_err = err; |
---|
.. | .. |
---|
472 | 491 | } else |
---|
473 | 492 | sk->sk_err_soft = err; |
---|
474 | 493 | goto out; |
---|
| 494 | + case TCP_LISTEN: |
---|
| 495 | + break; |
---|
| 496 | + default: |
---|
| 497 | + /* check if this ICMP message allows revert of backoff. |
---|
| 498 | + * (see RFC 6069) |
---|
| 499 | + */ |
---|
| 500 | + if (!fastopen && type == ICMPV6_DEST_UNREACH && |
---|
| 501 | + code == ICMPV6_NOROUTE) |
---|
| 502 | + tcp_ld_RTO_revert(sk, seq); |
---|
475 | 503 | } |
---|
476 | 504 | |
---|
477 | 505 | if (!sock_owned_by_user(sk) && np->recverr) { |
---|
.. | .. |
---|
483 | 511 | out: |
---|
484 | 512 | bh_unlock_sock(sk); |
---|
485 | 513 | sock_put(sk); |
---|
| 514 | + return 0; |
---|
486 | 515 | } |
---|
487 | 516 | |
---|
488 | 517 | |
---|
.. | .. |
---|
490 | 519 | struct flowi *fl, |
---|
491 | 520 | struct request_sock *req, |
---|
492 | 521 | struct tcp_fastopen_cookie *foc, |
---|
493 | | - enum tcp_synack_type synack_type) |
---|
| 522 | + enum tcp_synack_type synack_type, |
---|
| 523 | + struct sk_buff *syn_skb) |
---|
494 | 524 | { |
---|
495 | 525 | struct inet_request_sock *ireq = inet_rsk(req); |
---|
496 | | - struct ipv6_pinfo *np = inet6_sk(sk); |
---|
| 526 | + struct ipv6_pinfo *np = tcp_inet6_sk(sk); |
---|
497 | 527 | struct ipv6_txoptions *opt; |
---|
498 | 528 | struct flowi6 *fl6 = &fl->u.ip6; |
---|
499 | 529 | struct sk_buff *skb; |
---|
500 | 530 | int err = -ENOMEM; |
---|
| 531 | + u8 tclass; |
---|
501 | 532 | |
---|
502 | 533 | /* First, grab a route. */ |
---|
503 | 534 | if (!dst && (dst = inet6_csk_route_req(sk, fl6, req, |
---|
504 | 535 | IPPROTO_TCP)) == NULL) |
---|
505 | 536 | goto done; |
---|
506 | 537 | |
---|
507 | | - skb = tcp_make_synack(sk, dst, req, foc, synack_type); |
---|
| 538 | + skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb); |
---|
508 | 539 | |
---|
509 | 540 | if (skb) { |
---|
510 | 541 | __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr, |
---|
.. | .. |
---|
514 | 545 | if (np->repflow && ireq->pktopts) |
---|
515 | 546 | fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); |
---|
516 | 547 | |
---|
| 548 | + tclass = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ? |
---|
| 549 | + (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) | |
---|
| 550 | + (np->tclass & INET_ECN_MASK) : |
---|
| 551 | + np->tclass; |
---|
| 552 | + |
---|
| 553 | + if (!INET_ECN_is_capable(tclass) && |
---|
| 554 | + tcp_bpf_ca_needs_ecn((struct sock *)req)) |
---|
| 555 | + tclass |= INET_ECN_ECT_0; |
---|
| 556 | + |
---|
517 | 557 | rcu_read_lock(); |
---|
518 | 558 | opt = ireq->ipv6_opt; |
---|
519 | 559 | if (!opt) |
---|
520 | 560 | opt = rcu_dereference(np->opt); |
---|
521 | 561 | err = ip6_xmit(sk, skb, fl6, skb->mark ? : sk->sk_mark, opt, |
---|
522 | | - np->tclass); |
---|
| 562 | + tclass, sk->sk_priority); |
---|
523 | 563 | rcu_read_unlock(); |
---|
524 | 564 | err = net_xmit_eval(err); |
---|
525 | 565 | } |
---|
.. | .. |
---|
537 | 577 | |
---|
538 | 578 | #ifdef CONFIG_TCP_MD5SIG |
---|
539 | 579 | static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk, |
---|
540 | | - const struct in6_addr *addr) |
---|
| 580 | + const struct in6_addr *addr, |
---|
| 581 | + int l3index) |
---|
541 | 582 | { |
---|
542 | | - return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6); |
---|
| 583 | + return tcp_md5_do_lookup(sk, l3index, |
---|
| 584 | + (union tcp_md5_addr *)addr, AF_INET6); |
---|
543 | 585 | } |
---|
544 | 586 | |
---|
545 | 587 | static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk, |
---|
546 | 588 | const struct sock *addr_sk) |
---|
547 | 589 | { |
---|
548 | | - return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr); |
---|
| 590 | + int l3index; |
---|
| 591 | + |
---|
| 592 | + l3index = l3mdev_master_ifindex_by_index(sock_net(sk), |
---|
| 593 | + addr_sk->sk_bound_dev_if); |
---|
| 594 | + return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr, |
---|
| 595 | + l3index); |
---|
549 | 596 | } |
---|
550 | 597 | |
---|
551 | 598 | static int tcp_v6_parse_md5_keys(struct sock *sk, int optname, |
---|
552 | | - char __user *optval, int optlen) |
---|
| 599 | + sockptr_t optval, int optlen) |
---|
553 | 600 | { |
---|
554 | 601 | struct tcp_md5sig cmd; |
---|
555 | 602 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr; |
---|
| 603 | + int l3index = 0; |
---|
556 | 604 | u8 prefixlen; |
---|
557 | 605 | |
---|
558 | 606 | if (optlen < sizeof(cmd)) |
---|
559 | 607 | return -EINVAL; |
---|
560 | 608 | |
---|
561 | | - if (copy_from_user(&cmd, optval, sizeof(cmd))) |
---|
| 609 | + if (copy_from_sockptr(&cmd, optval, sizeof(cmd))) |
---|
562 | 610 | return -EFAULT; |
---|
563 | 611 | |
---|
564 | 612 | if (sin6->sin6_family != AF_INET6) |
---|
.. | .. |
---|
574 | 622 | prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128; |
---|
575 | 623 | } |
---|
576 | 624 | |
---|
| 625 | + if (optname == TCP_MD5SIG_EXT && |
---|
| 626 | + cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) { |
---|
| 627 | + struct net_device *dev; |
---|
| 628 | + |
---|
| 629 | + rcu_read_lock(); |
---|
| 630 | + dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex); |
---|
| 631 | + if (dev && netif_is_l3_master(dev)) |
---|
| 632 | + l3index = dev->ifindex; |
---|
| 633 | + rcu_read_unlock(); |
---|
| 634 | + |
---|
| 635 | + /* ok to reference set/not set outside of rcu; |
---|
| 636 | + * right now device MUST be an L3 master |
---|
| 637 | + */ |
---|
| 638 | + if (!dev || !l3index) |
---|
| 639 | + return -EINVAL; |
---|
| 640 | + } |
---|
| 641 | + |
---|
577 | 642 | if (!cmd.tcpm_keylen) { |
---|
578 | 643 | if (ipv6_addr_v4mapped(&sin6->sin6_addr)) |
---|
579 | 644 | return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3], |
---|
580 | | - AF_INET, prefixlen); |
---|
| 645 | + AF_INET, prefixlen, |
---|
| 646 | + l3index); |
---|
581 | 647 | return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr, |
---|
582 | | - AF_INET6, prefixlen); |
---|
| 648 | + AF_INET6, prefixlen, l3index); |
---|
583 | 649 | } |
---|
584 | 650 | |
---|
585 | 651 | if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) |
---|
.. | .. |
---|
587 | 653 | |
---|
588 | 654 | if (ipv6_addr_v4mapped(&sin6->sin6_addr)) |
---|
589 | 655 | return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3], |
---|
590 | | - AF_INET, prefixlen, cmd.tcpm_key, |
---|
591 | | - cmd.tcpm_keylen, GFP_KERNEL); |
---|
| 656 | + AF_INET, prefixlen, l3index, |
---|
| 657 | + cmd.tcpm_key, cmd.tcpm_keylen, |
---|
| 658 | + GFP_KERNEL); |
---|
592 | 659 | |
---|
593 | 660 | return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr, |
---|
594 | | - AF_INET6, prefixlen, cmd.tcpm_key, |
---|
595 | | - cmd.tcpm_keylen, GFP_KERNEL); |
---|
| 661 | + AF_INET6, prefixlen, l3index, |
---|
| 662 | + cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); |
---|
596 | 663 | } |
---|
597 | 664 | |
---|
598 | 665 | static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp, |
---|
.. | .. |
---|
703 | 770 | #endif |
---|
704 | 771 | |
---|
705 | 772 | static bool tcp_v6_inbound_md5_hash(const struct sock *sk, |
---|
706 | | - const struct sk_buff *skb) |
---|
| 773 | + const struct sk_buff *skb, |
---|
| 774 | + int dif, int sdif) |
---|
707 | 775 | { |
---|
708 | 776 | #ifdef CONFIG_TCP_MD5SIG |
---|
709 | 777 | const __u8 *hash_location = NULL; |
---|
710 | 778 | struct tcp_md5sig_key *hash_expected; |
---|
711 | 779 | const struct ipv6hdr *ip6h = ipv6_hdr(skb); |
---|
712 | 780 | const struct tcphdr *th = tcp_hdr(skb); |
---|
713 | | - int genhash; |
---|
| 781 | + int genhash, l3index; |
---|
714 | 782 | u8 newhash[16]; |
---|
715 | 783 | |
---|
716 | | - hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr); |
---|
| 784 | + /* sdif set, means packet ingressed via a device |
---|
| 785 | + * in an L3 domain and dif is set to the l3mdev |
---|
| 786 | + */ |
---|
| 787 | + l3index = sdif ? dif : 0; |
---|
| 788 | + |
---|
| 789 | + hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr, l3index); |
---|
717 | 790 | hash_location = tcp_parse_md5sig_option(th); |
---|
718 | 791 | |
---|
719 | 792 | /* We've parsed the options - do we have a hash? */ |
---|
.. | .. |
---|
737 | 810 | |
---|
738 | 811 | if (genhash || memcmp(hash_location, newhash, 16) != 0) { |
---|
739 | 812 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); |
---|
740 | | - net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n", |
---|
| 813 | + net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u L3 index %d\n", |
---|
741 | 814 | genhash ? "failed" : "mismatch", |
---|
742 | 815 | &ip6h->saddr, ntohs(th->source), |
---|
743 | | - &ip6h->daddr, ntohs(th->dest)); |
---|
| 816 | + &ip6h->daddr, ntohs(th->dest), l3index); |
---|
744 | 817 | return true; |
---|
745 | 818 | } |
---|
746 | 819 | #endif |
---|
.. | .. |
---|
751 | 824 | const struct sock *sk_listener, |
---|
752 | 825 | struct sk_buff *skb) |
---|
753 | 826 | { |
---|
| 827 | + bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags); |
---|
754 | 828 | struct inet_request_sock *ireq = inet_rsk(req); |
---|
755 | | - const struct ipv6_pinfo *np = inet6_sk(sk_listener); |
---|
| 829 | + const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener); |
---|
756 | 830 | |
---|
757 | 831 | ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; |
---|
758 | 832 | ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; |
---|
759 | 833 | |
---|
760 | 834 | /* So that link locals have meaning */ |
---|
761 | | - if (!sk_listener->sk_bound_dev_if && |
---|
| 835 | + if ((!sk_listener->sk_bound_dev_if || l3_slave) && |
---|
762 | 836 | ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) |
---|
763 | 837 | ireq->ir_iif = tcp_v6_iif(skb); |
---|
764 | 838 | |
---|
.. | .. |
---|
789 | 863 | .syn_ack_timeout = tcp_syn_ack_timeout, |
---|
790 | 864 | }; |
---|
791 | 865 | |
---|
792 | | -static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { |
---|
| 866 | +const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { |
---|
793 | 867 | .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - |
---|
794 | 868 | sizeof(struct ipv6hdr), |
---|
795 | 869 | #ifdef CONFIG_TCP_MD5SIG |
---|
.. | .. |
---|
809 | 883 | static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq, |
---|
810 | 884 | u32 ack, u32 win, u32 tsval, u32 tsecr, |
---|
811 | 885 | int oif, struct tcp_md5sig_key *key, int rst, |
---|
812 | | - u8 tclass, __be32 label) |
---|
| 886 | + u8 tclass, __be32 label, u32 priority) |
---|
813 | 887 | { |
---|
814 | 888 | const struct tcphdr *th = tcp_hdr(skb); |
---|
815 | 889 | struct tcphdr *t1; |
---|
.. | .. |
---|
889 | 963 | fl6.flowi6_oif = oif; |
---|
890 | 964 | } |
---|
891 | 965 | |
---|
892 | | - if (sk) |
---|
893 | | - mark = (sk->sk_state == TCP_TIME_WAIT) ? |
---|
894 | | - inet_twsk(sk)->tw_mark : sk->sk_mark; |
---|
| 966 | + if (sk) { |
---|
| 967 | + if (sk->sk_state == TCP_TIME_WAIT) { |
---|
| 968 | + mark = inet_twsk(sk)->tw_mark; |
---|
| 969 | + /* autoflowlabel relies on buff->hash */ |
---|
| 970 | + skb_set_hash(buff, inet_twsk(sk)->tw_txhash, |
---|
| 971 | + PKT_HASH_TYPE_L4); |
---|
| 972 | + } else { |
---|
| 973 | + mark = sk->sk_mark; |
---|
| 974 | + } |
---|
| 975 | + buff->tstamp = tcp_transmit_time(sk); |
---|
| 976 | + } |
---|
895 | 977 | fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark; |
---|
896 | 978 | fl6.fl6_dport = t1->dest; |
---|
897 | 979 | fl6.fl6_sport = t1->source; |
---|
898 | 980 | fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL); |
---|
899 | | - security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); |
---|
| 981 | + security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6)); |
---|
900 | 982 | |
---|
901 | 983 | /* Pass a socket to ip6_dst_lookup either it is for RST |
---|
902 | 984 | * Underlying function will use this to retrieve the network |
---|
.. | .. |
---|
905 | 987 | dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL); |
---|
906 | 988 | if (!IS_ERR(dst)) { |
---|
907 | 989 | skb_dst_set(buff, dst); |
---|
908 | | - ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass); |
---|
| 990 | + ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, |
---|
| 991 | + tclass & ~INET_ECN_MASK, priority); |
---|
909 | 992 | TCP_INC_STATS(net, TCP_MIB_OUTSEGS); |
---|
910 | 993 | if (rst) |
---|
911 | 994 | TCP_INC_STATS(net, TCP_MIB_OUTRSTS); |
---|
.. | .. |
---|
918 | 1001 | static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb) |
---|
919 | 1002 | { |
---|
920 | 1003 | const struct tcphdr *th = tcp_hdr(skb); |
---|
| 1004 | + struct ipv6hdr *ipv6h = ipv6_hdr(skb); |
---|
921 | 1005 | u32 seq = 0, ack_seq = 0; |
---|
922 | 1006 | struct tcp_md5sig_key *key = NULL; |
---|
923 | 1007 | #ifdef CONFIG_TCP_MD5SIG |
---|
924 | 1008 | const __u8 *hash_location = NULL; |
---|
925 | | - struct ipv6hdr *ipv6h = ipv6_hdr(skb); |
---|
926 | 1009 | unsigned char newhash[16]; |
---|
927 | 1010 | int genhash; |
---|
928 | 1011 | struct sock *sk1 = NULL; |
---|
929 | 1012 | #endif |
---|
| 1013 | + __be32 label = 0; |
---|
| 1014 | + u32 priority = 0; |
---|
| 1015 | + struct net *net; |
---|
930 | 1016 | int oif = 0; |
---|
931 | 1017 | |
---|
932 | 1018 | if (th->rst) |
---|
.. | .. |
---|
938 | 1024 | if (!sk && !ipv6_unicast_destination(skb)) |
---|
939 | 1025 | return; |
---|
940 | 1026 | |
---|
| 1027 | + net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); |
---|
941 | 1028 | #ifdef CONFIG_TCP_MD5SIG |
---|
942 | 1029 | rcu_read_lock(); |
---|
943 | 1030 | hash_location = tcp_parse_md5sig_option(th); |
---|
944 | 1031 | if (sk && sk_fullsock(sk)) { |
---|
945 | | - key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr); |
---|
| 1032 | + int l3index; |
---|
| 1033 | + |
---|
| 1034 | + /* sdif set, means packet ingressed via a device |
---|
| 1035 | + * in an L3 domain and inet_iif is set to it. |
---|
| 1036 | + */ |
---|
| 1037 | + l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0; |
---|
| 1038 | + key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr, l3index); |
---|
946 | 1039 | } else if (hash_location) { |
---|
| 1040 | + int dif = tcp_v6_iif_l3_slave(skb); |
---|
| 1041 | + int sdif = tcp_v6_sdif(skb); |
---|
| 1042 | + int l3index; |
---|
| 1043 | + |
---|
947 | 1044 | /* |
---|
948 | 1045 | * active side is lost. Try to find listening socket through |
---|
949 | 1046 | * source port, and then find md5 key through listening socket. |
---|
.. | .. |
---|
951 | 1048 | * Incoming packet is checked with md5 hash with finding key, |
---|
952 | 1049 | * no RST generated if md5 hash doesn't match. |
---|
953 | 1050 | */ |
---|
954 | | - sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev), |
---|
| 1051 | + sk1 = inet6_lookup_listener(net, |
---|
955 | 1052 | &tcp_hashinfo, NULL, 0, |
---|
956 | 1053 | &ipv6h->saddr, |
---|
957 | 1054 | th->source, &ipv6h->daddr, |
---|
958 | | - ntohs(th->source), |
---|
959 | | - tcp_v6_iif_l3_slave(skb), |
---|
960 | | - tcp_v6_sdif(skb)); |
---|
| 1055 | + ntohs(th->source), dif, sdif); |
---|
961 | 1056 | if (!sk1) |
---|
962 | 1057 | goto out; |
---|
963 | 1058 | |
---|
964 | | - key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr); |
---|
| 1059 | + /* sdif set, means packet ingressed via a device |
---|
| 1060 | + * in an L3 domain and dif is set to it. |
---|
| 1061 | + */ |
---|
| 1062 | + l3index = tcp_v6_sdif(skb) ? dif : 0; |
---|
| 1063 | + |
---|
| 1064 | + key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr, l3index); |
---|
965 | 1065 | if (!key) |
---|
966 | 1066 | goto out; |
---|
967 | 1067 | |
---|
.. | .. |
---|
979 | 1079 | |
---|
980 | 1080 | if (sk) { |
---|
981 | 1081 | oif = sk->sk_bound_dev_if; |
---|
982 | | - if (sk_fullsock(sk)) |
---|
| 1082 | + if (sk_fullsock(sk)) { |
---|
| 1083 | + const struct ipv6_pinfo *np = tcp_inet6_sk(sk); |
---|
| 1084 | + |
---|
983 | 1085 | trace_tcp_send_reset(sk, skb); |
---|
| 1086 | + if (np->repflow) |
---|
| 1087 | + label = ip6_flowlabel(ipv6h); |
---|
| 1088 | + priority = sk->sk_priority; |
---|
| 1089 | + } |
---|
| 1090 | + if (sk->sk_state == TCP_TIME_WAIT) { |
---|
| 1091 | + label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel); |
---|
| 1092 | + priority = inet_twsk(sk)->tw_priority; |
---|
| 1093 | + } |
---|
| 1094 | + } else { |
---|
| 1095 | + if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_TCP_RESET) |
---|
| 1096 | + label = ip6_flowlabel(ipv6h); |
---|
984 | 1097 | } |
---|
985 | 1098 | |
---|
986 | | - tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0); |
---|
| 1099 | + tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, |
---|
| 1100 | + ipv6_get_dsfield(ipv6h), label, priority); |
---|
987 | 1101 | |
---|
988 | 1102 | #ifdef CONFIG_TCP_MD5SIG |
---|
989 | 1103 | out: |
---|
.. | .. |
---|
994 | 1108 | static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq, |
---|
995 | 1109 | u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, |
---|
996 | 1110 | struct tcp_md5sig_key *key, u8 tclass, |
---|
997 | | - __be32 label) |
---|
| 1111 | + __be32 label, u32 priority) |
---|
998 | 1112 | { |
---|
999 | 1113 | tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0, |
---|
1000 | | - tclass, label); |
---|
| 1114 | + tclass, label, priority); |
---|
1001 | 1115 | } |
---|
1002 | 1116 | |
---|
1003 | 1117 | static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) |
---|
.. | .. |
---|
1009 | 1123 | tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, |
---|
1010 | 1124 | tcp_time_stamp_raw() + tcptw->tw_ts_offset, |
---|
1011 | 1125 | tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), |
---|
1012 | | - tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel)); |
---|
| 1126 | + tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority); |
---|
1013 | 1127 | |
---|
1014 | 1128 | inet_twsk_put(tw); |
---|
1015 | 1129 | } |
---|
.. | .. |
---|
1017 | 1131 | static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, |
---|
1018 | 1132 | struct request_sock *req) |
---|
1019 | 1133 | { |
---|
| 1134 | + int l3index; |
---|
| 1135 | + |
---|
| 1136 | + l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0; |
---|
| 1137 | + |
---|
1020 | 1138 | /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV |
---|
1021 | 1139 | * sk->sk_state == TCP_SYN_RECV -> for Fast Open. |
---|
1022 | 1140 | */ |
---|
.. | .. |
---|
1030 | 1148 | tcp_rsk(req)->rcv_nxt, |
---|
1031 | 1149 | req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, |
---|
1032 | 1150 | tcp_time_stamp_raw() + tcp_rsk(req)->ts_off, |
---|
1033 | | - req->ts_recent, sk->sk_bound_dev_if, |
---|
1034 | | - tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr), |
---|
1035 | | - 0, 0); |
---|
| 1151 | + READ_ONCE(req->ts_recent), sk->sk_bound_dev_if, |
---|
| 1152 | + tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index), |
---|
| 1153 | + ipv6_get_dsfield(ipv6_hdr(skb)), 0, sk->sk_priority); |
---|
1036 | 1154 | } |
---|
1037 | 1155 | |
---|
1038 | 1156 | |
---|
.. | .. |
---|
1045 | 1163 | sk = cookie_v6_check(sk, skb); |
---|
1046 | 1164 | #endif |
---|
1047 | 1165 | return sk; |
---|
| 1166 | +} |
---|
| 1167 | + |
---|
| 1168 | +u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph, |
---|
| 1169 | + struct tcphdr *th, u32 *cookie) |
---|
| 1170 | +{ |
---|
| 1171 | + u16 mss = 0; |
---|
| 1172 | +#ifdef CONFIG_SYN_COOKIES |
---|
| 1173 | + mss = tcp_get_syncookie_mss(&tcp6_request_sock_ops, |
---|
| 1174 | + &tcp_request_sock_ipv6_ops, sk, th); |
---|
| 1175 | + if (mss) { |
---|
| 1176 | + *cookie = __cookie_v6_init_sequence(iph, th, &mss); |
---|
| 1177 | + tcp_synq_overflow(sk); |
---|
| 1178 | + } |
---|
| 1179 | +#endif |
---|
| 1180 | + return mss; |
---|
1048 | 1181 | } |
---|
1049 | 1182 | |
---|
1050 | 1183 | static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) |
---|
.. | .. |
---|
1086 | 1219 | { |
---|
1087 | 1220 | struct inet_request_sock *ireq; |
---|
1088 | 1221 | struct ipv6_pinfo *newnp; |
---|
1089 | | - const struct ipv6_pinfo *np = inet6_sk(sk); |
---|
| 1222 | + const struct ipv6_pinfo *np = tcp_inet6_sk(sk); |
---|
1090 | 1223 | struct ipv6_txoptions *opt; |
---|
1091 | | - struct tcp6_sock *newtcp6sk; |
---|
1092 | 1224 | struct inet_sock *newinet; |
---|
| 1225 | + bool found_dup_sk = false; |
---|
1093 | 1226 | struct tcp_sock *newtp; |
---|
1094 | 1227 | struct sock *newsk; |
---|
1095 | 1228 | #ifdef CONFIG_TCP_MD5SIG |
---|
1096 | 1229 | struct tcp_md5sig_key *key; |
---|
| 1230 | + int l3index; |
---|
1097 | 1231 | #endif |
---|
1098 | 1232 | struct flowi6 fl6; |
---|
1099 | 1233 | |
---|
.. | .. |
---|
1108 | 1242 | if (!newsk) |
---|
1109 | 1243 | return NULL; |
---|
1110 | 1244 | |
---|
1111 | | - newtcp6sk = (struct tcp6_sock *)newsk; |
---|
1112 | | - inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; |
---|
| 1245 | + inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk); |
---|
1113 | 1246 | |
---|
1114 | 1247 | newinet = inet_sk(newsk); |
---|
1115 | | - newnp = inet6_sk(newsk); |
---|
| 1248 | + newnp = tcp_inet6_sk(newsk); |
---|
1116 | 1249 | newtp = tcp_sk(newsk); |
---|
1117 | 1250 | |
---|
1118 | 1251 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); |
---|
.. | .. |
---|
1120 | 1253 | newnp->saddr = newsk->sk_v6_rcv_saddr; |
---|
1121 | 1254 | |
---|
1122 | 1255 | inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; |
---|
| 1256 | + if (sk_is_mptcp(newsk)) |
---|
| 1257 | + mptcpv6_handle_mapped(newsk, true); |
---|
1123 | 1258 | newsk->sk_backlog_rcv = tcp_v4_do_rcv; |
---|
1124 | 1259 | #ifdef CONFIG_TCP_MD5SIG |
---|
1125 | 1260 | newtp->af_specific = &tcp_sock_ipv6_mapped_specific; |
---|
.. | .. |
---|
1176 | 1311 | ip6_dst_store(newsk, dst, NULL, NULL); |
---|
1177 | 1312 | inet6_sk_rx_dst_set(newsk, skb); |
---|
1178 | 1313 | |
---|
1179 | | - newtcp6sk = (struct tcp6_sock *)newsk; |
---|
1180 | | - inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; |
---|
| 1314 | + inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk); |
---|
1181 | 1315 | |
---|
1182 | 1316 | newtp = tcp_sk(newsk); |
---|
1183 | 1317 | newinet = inet_sk(newsk); |
---|
1184 | | - newnp = inet6_sk(newsk); |
---|
| 1318 | + newnp = tcp_inet6_sk(newsk); |
---|
1185 | 1319 | |
---|
1186 | 1320 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); |
---|
1187 | 1321 | |
---|
.. | .. |
---|
1209 | 1343 | newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb)); |
---|
1210 | 1344 | if (np->repflow) |
---|
1211 | 1345 | newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb)); |
---|
| 1346 | + |
---|
| 1347 | + /* Set ToS of the new socket based upon the value of incoming SYN. |
---|
| 1348 | + * ECT bits are set later in tcp_init_transfer(). |
---|
| 1349 | + */ |
---|
| 1350 | + if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)) |
---|
| 1351 | + newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK; |
---|
1212 | 1352 | |
---|
1213 | 1353 | /* Clone native IPv6 options from listening socket (if any) |
---|
1214 | 1354 | |
---|
.. | .. |
---|
1239 | 1379 | newinet->inet_rcv_saddr = LOOPBACK4_IPV6; |
---|
1240 | 1380 | |
---|
1241 | 1381 | #ifdef CONFIG_TCP_MD5SIG |
---|
| 1382 | + l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif); |
---|
| 1383 | + |
---|
1242 | 1384 | /* Copy over the MD5 key from the original socket */ |
---|
1243 | | - key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr); |
---|
| 1385 | + key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr, l3index); |
---|
1244 | 1386 | if (key) { |
---|
1245 | 1387 | /* We're using one, so create a matching key |
---|
1246 | 1388 | * on the newsk structure. If we fail to get |
---|
.. | .. |
---|
1248 | 1390 | * across. Shucks. |
---|
1249 | 1391 | */ |
---|
1250 | 1392 | tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr, |
---|
1251 | | - AF_INET6, 128, key->key, key->keylen, |
---|
| 1393 | + AF_INET6, 128, l3index, key->key, key->keylen, |
---|
1252 | 1394 | sk_gfp_mask(sk, GFP_ATOMIC)); |
---|
1253 | 1395 | } |
---|
1254 | 1396 | #endif |
---|
.. | .. |
---|
1258 | 1400 | tcp_done(newsk); |
---|
1259 | 1401 | goto out; |
---|
1260 | 1402 | } |
---|
1261 | | - *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); |
---|
| 1403 | + *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), |
---|
| 1404 | + &found_dup_sk); |
---|
1262 | 1405 | if (*own_req) { |
---|
1263 | 1406 | tcp_move_syn(newtp, req); |
---|
1264 | 1407 | |
---|
1265 | 1408 | /* Clone pktoptions received with SYN, if we own the req */ |
---|
1266 | 1409 | if (ireq->pktopts) { |
---|
1267 | | - newnp->pktoptions = skb_clone(ireq->pktopts, |
---|
1268 | | - sk_gfp_mask(sk, GFP_ATOMIC)); |
---|
| 1410 | + newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk); |
---|
1269 | 1411 | consume_skb(ireq->pktopts); |
---|
1270 | 1412 | ireq->pktopts = NULL; |
---|
1271 | | - if (newnp->pktoptions) { |
---|
| 1413 | + if (newnp->pktoptions) |
---|
1272 | 1414 | tcp_v6_restore_cb(newnp->pktoptions); |
---|
1273 | | - skb_set_owner_r(newnp->pktoptions, newsk); |
---|
1274 | | - } |
---|
| 1415 | + } |
---|
| 1416 | + } else { |
---|
| 1417 | + if (!req_unhash && found_dup_sk) { |
---|
| 1418 | + /* This code path should only be executed in the |
---|
| 1419 | + * syncookie case only |
---|
| 1420 | + */ |
---|
| 1421 | + bh_unlock_sock(newsk); |
---|
| 1422 | + sock_put(newsk); |
---|
| 1423 | + newsk = NULL; |
---|
1275 | 1424 | } |
---|
1276 | 1425 | } |
---|
1277 | 1426 | |
---|
.. | .. |
---|
1296 | 1445 | */ |
---|
1297 | 1446 | static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) |
---|
1298 | 1447 | { |
---|
1299 | | - struct ipv6_pinfo *np = inet6_sk(sk); |
---|
1300 | | - struct tcp_sock *tp; |
---|
| 1448 | + struct ipv6_pinfo *np = tcp_inet6_sk(sk); |
---|
1301 | 1449 | struct sk_buff *opt_skb = NULL; |
---|
| 1450 | + struct tcp_sock *tp; |
---|
1302 | 1451 | |
---|
1303 | 1452 | /* Imagine: socket is IPv6. IPv4 packet arrives, |
---|
1304 | 1453 | goes to IPv4 receive handler and backlogged. |
---|
.. | .. |
---|
1330 | 1479 | --ANK (980728) |
---|
1331 | 1480 | */ |
---|
1332 | 1481 | if (np->rxopt.all) |
---|
1333 | | - opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC)); |
---|
| 1482 | + opt_skb = skb_clone_and_charge_r(skb, sk); |
---|
1334 | 1483 | |
---|
1335 | 1484 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ |
---|
1336 | | - struct dst_entry *dst = sk->sk_rx_dst; |
---|
| 1485 | + struct dst_entry *dst; |
---|
| 1486 | + |
---|
| 1487 | + dst = rcu_dereference_protected(sk->sk_rx_dst, |
---|
| 1488 | + lockdep_sock_is_held(sk)); |
---|
1337 | 1489 | |
---|
1338 | 1490 | sock_rps_save_rxhash(sk, skb); |
---|
1339 | 1491 | sk_mark_napi_id(sk, skb); |
---|
1340 | 1492 | if (dst) { |
---|
1341 | 1493 | if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || |
---|
1342 | 1494 | dst->ops->check(dst, np->rx_dst_cookie) == NULL) { |
---|
| 1495 | + RCU_INIT_POINTER(sk->sk_rx_dst, NULL); |
---|
1343 | 1496 | dst_release(dst); |
---|
1344 | | - sk->sk_rx_dst = NULL; |
---|
1345 | 1497 | } |
---|
1346 | 1498 | } |
---|
1347 | 1499 | |
---|
.. | .. |
---|
1409 | 1561 | if (np->repflow) |
---|
1410 | 1562 | np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb)); |
---|
1411 | 1563 | if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) { |
---|
1412 | | - skb_set_owner_r(opt_skb, sk); |
---|
1413 | 1564 | tcp_v6_restore_cb(opt_skb); |
---|
1414 | 1565 | opt_skb = xchg(&np->pktoptions, opt_skb); |
---|
1415 | 1566 | } else { |
---|
.. | .. |
---|
1446 | 1597 | skb->tstamp || skb_hwtstamps(skb)->hwtstamp; |
---|
1447 | 1598 | } |
---|
1448 | 1599 | |
---|
1449 | | -static int tcp_v6_rcv(struct sk_buff *skb) |
---|
| 1600 | +INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) |
---|
1450 | 1601 | { |
---|
| 1602 | + struct sk_buff *skb_to_free; |
---|
1451 | 1603 | int sdif = inet6_sdif(skb); |
---|
| 1604 | + int dif = inet6_iif(skb); |
---|
1452 | 1605 | const struct tcphdr *th; |
---|
1453 | 1606 | const struct ipv6hdr *hdr; |
---|
1454 | 1607 | bool refcounted; |
---|
.. | .. |
---|
1497 | 1650 | struct sock *nsk; |
---|
1498 | 1651 | |
---|
1499 | 1652 | sk = req->rsk_listener; |
---|
1500 | | - if (tcp_v6_inbound_md5_hash(sk, skb)) { |
---|
| 1653 | + if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif)) { |
---|
1501 | 1654 | sk_drops_add(sk, skb); |
---|
1502 | 1655 | reqsk_put(req); |
---|
1503 | 1656 | goto discard_it; |
---|
.. | .. |
---|
1544 | 1697 | return 0; |
---|
1545 | 1698 | } |
---|
1546 | 1699 | } |
---|
1547 | | - if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) { |
---|
| 1700 | + if (hdr->hop_limit < tcp_inet6_sk(sk)->min_hopcount) { |
---|
1548 | 1701 | __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); |
---|
1549 | 1702 | goto discard_and_relse; |
---|
1550 | 1703 | } |
---|
.. | .. |
---|
1552 | 1705 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) |
---|
1553 | 1706 | goto discard_and_relse; |
---|
1554 | 1707 | |
---|
1555 | | - if (tcp_v6_inbound_md5_hash(sk, skb)) |
---|
| 1708 | + if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif)) |
---|
1556 | 1709 | goto discard_and_relse; |
---|
1557 | 1710 | |
---|
1558 | 1711 | if (tcp_filter(sk, skb)) |
---|
.. | .. |
---|
1574 | 1727 | tcp_segs_in(tcp_sk(sk), skb); |
---|
1575 | 1728 | ret = 0; |
---|
1576 | 1729 | if (!sock_owned_by_user(sk)) { |
---|
| 1730 | + skb_to_free = sk->sk_rx_skb_cache; |
---|
| 1731 | + sk->sk_rx_skb_cache = NULL; |
---|
1577 | 1732 | ret = tcp_v6_do_rcv(sk, skb); |
---|
1578 | | - } else if (tcp_add_backlog(sk, skb)) { |
---|
1579 | | - goto discard_and_relse; |
---|
| 1733 | + } else { |
---|
| 1734 | + if (tcp_add_backlog(sk, skb)) |
---|
| 1735 | + goto discard_and_relse; |
---|
| 1736 | + skb_to_free = NULL; |
---|
1580 | 1737 | } |
---|
1581 | 1738 | bh_unlock_sock(sk); |
---|
1582 | | - |
---|
| 1739 | + if (skb_to_free) |
---|
| 1740 | + __kfree_skb(skb_to_free); |
---|
1583 | 1741 | put_and_return: |
---|
1584 | 1742 | if (refcounted) |
---|
1585 | 1743 | sock_put(sk); |
---|
.. | .. |
---|
1645 | 1803 | } |
---|
1646 | 1804 | } |
---|
1647 | 1805 | /* to ACK */ |
---|
1648 | | - /* fall through */ |
---|
| 1806 | + fallthrough; |
---|
1649 | 1807 | case TCP_TW_ACK: |
---|
1650 | 1808 | tcp_v6_timewait_ack(sk, skb); |
---|
1651 | 1809 | break; |
---|
.. | .. |
---|
1659 | 1817 | goto discard_it; |
---|
1660 | 1818 | } |
---|
1661 | 1819 | |
---|
1662 | | -static void tcp_v6_early_demux(struct sk_buff *skb) |
---|
| 1820 | +void tcp_v6_early_demux(struct sk_buff *skb) |
---|
1663 | 1821 | { |
---|
1664 | 1822 | const struct ipv6hdr *hdr; |
---|
1665 | 1823 | const struct tcphdr *th; |
---|
.. | .. |
---|
1686 | 1844 | skb->sk = sk; |
---|
1687 | 1845 | skb->destructor = sock_edemux; |
---|
1688 | 1846 | if (sk_fullsock(sk)) { |
---|
1689 | | - struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); |
---|
| 1847 | + struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst); |
---|
1690 | 1848 | |
---|
1691 | 1849 | if (dst) |
---|
1692 | | - dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); |
---|
| 1850 | + dst = dst_check(dst, tcp_inet6_sk(sk)->rx_dst_cookie); |
---|
1693 | 1851 | if (dst && |
---|
1694 | 1852 | inet_sk(sk)->rx_dst_ifindex == skb->skb_iif) |
---|
1695 | 1853 | skb_dst_set_noref(skb, dst); |
---|
.. | .. |
---|
1703 | 1861 | .twsk_destructor = tcp_twsk_destructor, |
---|
1704 | 1862 | }; |
---|
1705 | 1863 | |
---|
1706 | | -static const struct inet_connection_sock_af_ops ipv6_specific = { |
---|
| 1864 | +INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb) |
---|
| 1865 | +{ |
---|
| 1866 | + struct ipv6_pinfo *np = inet6_sk(sk); |
---|
| 1867 | + |
---|
| 1868 | + __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr); |
---|
| 1869 | +} |
---|
| 1870 | + |
---|
| 1871 | +const struct inet_connection_sock_af_ops ipv6_specific = { |
---|
1707 | 1872 | .queue_xmit = inet6_csk_xmit, |
---|
1708 | 1873 | .send_check = tcp_v6_send_check, |
---|
1709 | 1874 | .rebuild_header = inet6_sk_rebuild_header, |
---|
.. | .. |
---|
1716 | 1881 | .getsockopt = ipv6_getsockopt, |
---|
1717 | 1882 | .addr2sockaddr = inet6_csk_addr2sockaddr, |
---|
1718 | 1883 | .sockaddr_len = sizeof(struct sockaddr_in6), |
---|
1719 | | -#ifdef CONFIG_COMPAT |
---|
1720 | | - .compat_setsockopt = compat_ipv6_setsockopt, |
---|
1721 | | - .compat_getsockopt = compat_ipv6_getsockopt, |
---|
1722 | | -#endif |
---|
1723 | 1884 | .mtu_reduced = tcp_v6_mtu_reduced, |
---|
1724 | 1885 | }; |
---|
1725 | 1886 | |
---|
.. | .. |
---|
1746 | 1907 | .getsockopt = ipv6_getsockopt, |
---|
1747 | 1908 | .addr2sockaddr = inet6_csk_addr2sockaddr, |
---|
1748 | 1909 | .sockaddr_len = sizeof(struct sockaddr_in6), |
---|
1749 | | -#ifdef CONFIG_COMPAT |
---|
1750 | | - .compat_setsockopt = compat_ipv6_setsockopt, |
---|
1751 | | - .compat_getsockopt = compat_ipv6_getsockopt, |
---|
1752 | | -#endif |
---|
1753 | 1910 | .mtu_reduced = tcp_v4_mtu_reduced, |
---|
1754 | 1911 | }; |
---|
1755 | 1912 | |
---|
.. | .. |
---|
1777 | 1934 | #endif |
---|
1778 | 1935 | |
---|
1779 | 1936 | return 0; |
---|
1780 | | -} |
---|
1781 | | - |
---|
1782 | | -static void tcp_v6_destroy_sock(struct sock *sk) |
---|
1783 | | -{ |
---|
1784 | | - tcp_v4_destroy_sock(sk); |
---|
1785 | | - inet6_destroy_sock(sk); |
---|
1786 | 1937 | } |
---|
1787 | 1938 | |
---|
1788 | 1939 | #ifdef CONFIG_PROC_FS |
---|
.. | .. |
---|
1855 | 2006 | |
---|
1856 | 2007 | state = inet_sk_state_load(sp); |
---|
1857 | 2008 | if (state == TCP_LISTEN) |
---|
1858 | | - rx_queue = sp->sk_ack_backlog; |
---|
| 2009 | + rx_queue = READ_ONCE(sp->sk_ack_backlog); |
---|
1859 | 2010 | else |
---|
1860 | 2011 | /* Because we don't lock the socket, |
---|
1861 | 2012 | * we might find a transient negative value. |
---|
.. | .. |
---|
1883 | 2034 | refcount_read(&sp->sk_refcnt), sp, |
---|
1884 | 2035 | jiffies_to_clock_t(icsk->icsk_rto), |
---|
1885 | 2036 | jiffies_to_clock_t(icsk->icsk_ack.ato), |
---|
1886 | | - (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, |
---|
| 2037 | + (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp), |
---|
1887 | 2038 | tp->snd_cwnd, |
---|
1888 | 2039 | state == TCP_LISTEN ? |
---|
1889 | 2040 | fastopenq->max_qlen : |
---|
.. | .. |
---|
1977 | 2128 | .accept = inet_csk_accept, |
---|
1978 | 2129 | .ioctl = tcp_ioctl, |
---|
1979 | 2130 | .init = tcp_v6_init_sock, |
---|
1980 | | - .destroy = tcp_v6_destroy_sock, |
---|
| 2131 | + .destroy = tcp_v4_destroy_sock, |
---|
1981 | 2132 | .shutdown = tcp_shutdown, |
---|
1982 | 2133 | .setsockopt = tcp_setsockopt, |
---|
1983 | 2134 | .getsockopt = tcp_getsockopt, |
---|
.. | .. |
---|
2007 | 2158 | .rsk_prot = &tcp6_request_sock_ops, |
---|
2008 | 2159 | .h.hashinfo = &tcp_hashinfo, |
---|
2009 | 2160 | .no_autobind = true, |
---|
2010 | | -#ifdef CONFIG_COMPAT |
---|
2011 | | - .compat_setsockopt = compat_tcp_setsockopt, |
---|
2012 | | - .compat_getsockopt = compat_tcp_getsockopt, |
---|
2013 | | -#endif |
---|
2014 | 2161 | .diag_destroy = tcp_abort, |
---|
2015 | 2162 | }; |
---|
| 2163 | +EXPORT_SYMBOL_GPL(tcpv6_prot); |
---|
2016 | 2164 | |
---|
2017 | | -/* thinking of making this const? Don't. |
---|
2018 | | - * early_demux can change based on sysctl. |
---|
2019 | | - */ |
---|
2020 | | -static struct inet6_protocol tcpv6_protocol = { |
---|
2021 | | - .early_demux = tcp_v6_early_demux, |
---|
2022 | | - .early_demux_handler = tcp_v6_early_demux, |
---|
| 2165 | +static const struct inet6_protocol tcpv6_protocol = { |
---|
2023 | 2166 | .handler = tcp_v6_rcv, |
---|
2024 | 2167 | .err_handler = tcp_v6_err, |
---|
2025 | 2168 | .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, |
---|
.. | .. |
---|
2072 | 2215 | ret = register_pernet_subsys(&tcpv6_net_ops); |
---|
2073 | 2216 | if (ret) |
---|
2074 | 2217 | goto out_tcpv6_protosw; |
---|
| 2218 | + |
---|
| 2219 | + ret = mptcpv6_init(); |
---|
| 2220 | + if (ret) |
---|
| 2221 | + goto out_tcpv6_pernet_subsys; |
---|
| 2222 | + |
---|
2075 | 2223 | out: |
---|
2076 | 2224 | return ret; |
---|
2077 | 2225 | |
---|
| 2226 | +out_tcpv6_pernet_subsys: |
---|
| 2227 | + unregister_pernet_subsys(&tcpv6_net_ops); |
---|
2078 | 2228 | out_tcpv6_protosw: |
---|
2079 | 2229 | inet6_unregister_protosw(&tcpv6_protosw); |
---|
2080 | 2230 | out_tcpv6_protocol: |
---|