.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ |
---|
1 | 2 | /* |
---|
2 | 3 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
---|
3 | 4 | * operating system. INET is implemented using the BSD Socket |
---|
.. | .. |
---|
14 | 15 | * Alan Cox, <alan@lxorguk.ukuu.org.uk> |
---|
15 | 16 | * Bjorn Ekwall. <bj0rn@blox.se> |
---|
16 | 17 | * Pekka Riikonen <priikone@poseidon.pspt.fi> |
---|
17 | | - * |
---|
18 | | - * This program is free software; you can redistribute it and/or |
---|
19 | | - * modify it under the terms of the GNU General Public License |
---|
20 | | - * as published by the Free Software Foundation; either version |
---|
21 | | - * 2 of the License, or (at your option) any later version. |
---|
22 | 18 | * |
---|
23 | 19 | * Moved to /usr/include/linux for NET3 |
---|
24 | 20 | */ |
---|
.. | .. |
---|
58 | 54 | struct device; |
---|
59 | 55 | struct phy_device; |
---|
60 | 56 | struct dsa_port; |
---|
| 57 | +struct ip_tunnel_parm; |
---|
| 58 | +struct macsec_context; |
---|
| 59 | +struct macsec_ops; |
---|
61 | 60 | |
---|
62 | 61 | struct sfp_bus; |
---|
63 | 62 | /* 802.11 specific */ |
---|
.. | .. |
---|
67 | 66 | struct mpls_dev; |
---|
68 | 67 | /* UDP Tunnel offloads */ |
---|
69 | 68 | struct udp_tunnel_info; |
---|
| 69 | +struct udp_tunnel_nic_info; |
---|
| 70 | +struct udp_tunnel_nic; |
---|
70 | 71 | struct bpf_prog; |
---|
71 | 72 | struct xdp_buff; |
---|
72 | 73 | |
---|
| 74 | +void synchronize_net(void); |
---|
73 | 75 | void netdev_set_default_ethtool_ops(struct net_device *dev, |
---|
74 | 76 | const struct ethtool_ops *ops); |
---|
75 | 77 | |
---|
76 | 78 | /* Backlog congestion levels */ |
---|
77 | 79 | #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ |
---|
78 | 80 | #define NET_RX_DROP 1 /* packet dropped */ |
---|
| 81 | + |
---|
| 82 | +#define MAX_NEST_DEV 8 |
---|
79 | 83 | |
---|
80 | 84 | /* |
---|
81 | 85 | * Transmit return codes: transmit return codes originate from three different |
---|
.. | .. |
---|
195 | 199 | |
---|
196 | 200 | #ifdef CONFIG_RPS |
---|
197 | 201 | #include <linux/static_key.h> |
---|
198 | | -extern struct static_key rps_needed; |
---|
199 | | -extern struct static_key rfs_needed; |
---|
| 202 | +extern struct static_key_false rps_needed; |
---|
| 203 | +extern struct static_key_false rfs_needed; |
---|
200 | 204 | #endif |
---|
201 | 205 | |
---|
202 | 206 | struct neighbour; |
---|
.. | .. |
---|
209 | 213 | unsigned char type; |
---|
210 | 214 | #define NETDEV_HW_ADDR_T_LAN 1 |
---|
211 | 215 | #define NETDEV_HW_ADDR_T_SAN 2 |
---|
212 | | -#define NETDEV_HW_ADDR_T_SLAVE 3 |
---|
213 | | -#define NETDEV_HW_ADDR_T_UNICAST 4 |
---|
214 | | -#define NETDEV_HW_ADDR_T_MULTICAST 5 |
---|
| 216 | +#define NETDEV_HW_ADDR_T_UNICAST 3 |
---|
| 217 | +#define NETDEV_HW_ADDR_T_MULTICAST 4 |
---|
215 | 218 | bool global_use; |
---|
216 | 219 | int sync_cnt; |
---|
217 | 220 | int refcount; |
---|
.. | .. |
---|
275 | 278 | const struct net_device *dev, |
---|
276 | 279 | const unsigned char *haddr); |
---|
277 | 280 | bool (*validate)(const char *ll_header, unsigned int len); |
---|
| 281 | + __be16 (*parse_protocol)(const struct sk_buff *skb); |
---|
278 | 282 | |
---|
279 | 283 | ANDROID_KABI_RESERVE(1); |
---|
280 | 284 | ANDROID_KABI_RESERVE(2); |
---|
.. | .. |
---|
291 | 295 | __LINK_STATE_NOCARRIER, |
---|
292 | 296 | __LINK_STATE_LINKWATCH_PENDING, |
---|
293 | 297 | __LINK_STATE_DORMANT, |
---|
| 298 | + __LINK_STATE_TESTING, |
---|
294 | 299 | }; |
---|
295 | 300 | |
---|
296 | 301 | |
---|
.. | .. |
---|
331 | 336 | |
---|
332 | 337 | unsigned long state; |
---|
333 | 338 | int weight; |
---|
| 339 | + int defer_hard_irqs_count; |
---|
334 | 340 | unsigned long gro_bitmask; |
---|
335 | 341 | int (*poll)(struct napi_struct *, int); |
---|
336 | 342 | #ifdef CONFIG_NETPOLL |
---|
.. | .. |
---|
339 | 345 | struct net_device *dev; |
---|
340 | 346 | struct gro_list gro_hash[GRO_HASH_BUCKETS]; |
---|
341 | 347 | struct sk_buff *skb; |
---|
| 348 | + struct list_head rx_list; /* Pending GRO_NORMAL skbs */ |
---|
| 349 | + int rx_count; /* length of rx_list */ |
---|
342 | 350 | struct hrtimer timer; |
---|
343 | 351 | struct list_head dev_list; |
---|
344 | 352 | struct hlist_node napi_hash_node; |
---|
.. | .. |
---|
355 | 363 | NAPI_STATE_MISSED, /* reschedule a napi */ |
---|
356 | 364 | NAPI_STATE_DISABLE, /* Disable pending */ |
---|
357 | 365 | NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ |
---|
358 | | - NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */ |
---|
| 366 | + NAPI_STATE_LISTED, /* NAPI added to system lists */ |
---|
359 | 367 | NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */ |
---|
360 | 368 | NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */ |
---|
361 | 369 | }; |
---|
.. | .. |
---|
365 | 373 | NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED), |
---|
366 | 374 | NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE), |
---|
367 | 375 | NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC), |
---|
368 | | - NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED), |
---|
| 376 | + NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED), |
---|
369 | 377 | NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), |
---|
370 | 378 | NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), |
---|
371 | 379 | }; |
---|
.. | .. |
---|
431 | 439 | typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); |
---|
432 | 440 | |
---|
433 | 441 | void __napi_schedule(struct napi_struct *n); |
---|
434 | | - |
---|
435 | | -/* |
---|
436 | | - * When PREEMPT_RT_FULL is defined, all device interrupt handlers |
---|
437 | | - * run as threads, and they can also be preempted (without PREEMPT_RT |
---|
438 | | - * interrupt threads can not be preempted). Which means that calling |
---|
439 | | - * __napi_schedule_irqoff() from an interrupt handler can be preempted |
---|
440 | | - * and can corrupt the napi->poll_list. |
---|
441 | | - */ |
---|
442 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
443 | | -#define __napi_schedule_irqoff(n) __napi_schedule(n) |
---|
444 | | -#else |
---|
445 | 442 | void __napi_schedule_irqoff(struct napi_struct *n); |
---|
446 | | -#endif |
---|
447 | 443 | |
---|
448 | 444 | static inline bool napi_disable_pending(struct napi_struct *n) |
---|
449 | 445 | { |
---|
.. | .. |
---|
502 | 498 | } |
---|
503 | 499 | |
---|
504 | 500 | /** |
---|
505 | | - * napi_hash_del - remove a NAPI from global table |
---|
506 | | - * @napi: NAPI context |
---|
507 | | - * |
---|
508 | | - * Warning: caller must observe RCU grace period |
---|
509 | | - * before freeing memory containing @napi, if |
---|
510 | | - * this function returns true. |
---|
511 | | - * Note: core networking stack automatically calls it |
---|
512 | | - * from netif_napi_del(). |
---|
513 | | - * Drivers might want to call this helper to combine all |
---|
514 | | - * the needed RCU grace periods into a single one. |
---|
515 | | - */ |
---|
516 | | -bool napi_hash_del(struct napi_struct *napi); |
---|
517 | | - |
---|
518 | | -/** |
---|
519 | 501 | * napi_disable - prevent NAPI from scheduling |
---|
520 | 502 | * @n: NAPI context |
---|
521 | 503 | * |
---|
.. | .. |
---|
554 | 536 | msleep(1); |
---|
555 | 537 | else |
---|
556 | 538 | barrier(); |
---|
| 539 | +} |
---|
| 540 | + |
---|
| 541 | +/** |
---|
| 542 | + * napi_if_scheduled_mark_missed - if napi is running, set the |
---|
| 543 | + * NAPIF_STATE_MISSED |
---|
| 544 | + * @n: NAPI context |
---|
| 545 | + * |
---|
| 546 | + * If napi is running, set the NAPIF_STATE_MISSED, and return true if |
---|
| 547 | + * NAPI is scheduled. |
---|
| 548 | + **/ |
---|
| 549 | +static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n) |
---|
| 550 | +{ |
---|
| 551 | + unsigned long val, new; |
---|
| 552 | + |
---|
| 553 | + do { |
---|
| 554 | + val = READ_ONCE(n->state); |
---|
| 555 | + if (val & NAPIF_STATE_DISABLE) |
---|
| 556 | + return true; |
---|
| 557 | + |
---|
| 558 | + if (!(val & NAPIF_STATE_SCHED)) |
---|
| 559 | + return false; |
---|
| 560 | + |
---|
| 561 | + new = val | NAPIF_STATE_MISSED; |
---|
| 562 | + } while (cmpxchg(&n->state, val, new) != val); |
---|
| 563 | + |
---|
| 564 | + return true; |
---|
557 | 565 | } |
---|
558 | 566 | |
---|
559 | 567 | enum netdev_queue_state_t { |
---|
.. | .. |
---|
604 | 612 | |
---|
605 | 613 | /* Subordinate device that the queue has been assigned to */ |
---|
606 | 614 | struct net_device *sb_dev; |
---|
| 615 | +#ifdef CONFIG_XDP_SOCKETS |
---|
| 616 | + struct xsk_buff_pool *pool; |
---|
| 617 | +#endif |
---|
607 | 618 | /* |
---|
608 | 619 | * write-mostly part |
---|
609 | 620 | */ |
---|
610 | 621 | spinlock_t _xmit_lock ____cacheline_aligned_in_smp; |
---|
611 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
612 | | - struct task_struct *xmit_lock_owner; |
---|
613 | | -#else |
---|
614 | 622 | int xmit_lock_owner; |
---|
615 | | -#endif |
---|
616 | 623 | /* |
---|
617 | 624 | * Time (in jiffies) of last Tx |
---|
618 | 625 | */ |
---|
.. | .. |
---|
631 | 638 | } ____cacheline_aligned_in_smp; |
---|
632 | 639 | |
---|
633 | 640 | extern int sysctl_fb_tunnels_only_for_init_net; |
---|
| 641 | +extern int sysctl_devconf_inherit_init_net; |
---|
634 | 642 | |
---|
| 643 | +/* |
---|
| 644 | + * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns |
---|
| 645 | + * == 1 : For initns only |
---|
| 646 | + * == 2 : For none. |
---|
| 647 | + */ |
---|
635 | 648 | static inline bool net_has_fallback_tunnels(const struct net *net) |
---|
636 | 649 | { |
---|
637 | | - return net == &init_net || |
---|
638 | | - !IS_ENABLED(CONFIG_SYSCTL) || |
---|
639 | | - !sysctl_fb_tunnels_only_for_init_net; |
---|
| 650 | +#if IS_ENABLED(CONFIG_SYSCTL) |
---|
| 651 | + int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net); |
---|
| 652 | + |
---|
| 653 | + return !fb_tunnels_only_for_init_net || |
---|
| 654 | + (net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1); |
---|
| 655 | +#else |
---|
| 656 | + return true; |
---|
| 657 | +#endif |
---|
| 658 | +} |
---|
| 659 | + |
---|
| 660 | +static inline int net_inherit_devconf(void) |
---|
| 661 | +{ |
---|
| 662 | +#if IS_ENABLED(CONFIG_SYSCTL) |
---|
| 663 | + return READ_ONCE(sysctl_devconf_inherit_init_net); |
---|
| 664 | +#else |
---|
| 665 | + return 0; |
---|
| 666 | +#endif |
---|
640 | 667 | } |
---|
641 | 668 | |
---|
642 | 669 | static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) |
---|
.. | .. |
---|
663 | 690 | struct rps_map { |
---|
664 | 691 | unsigned int len; |
---|
665 | 692 | struct rcu_head rcu; |
---|
666 | | - u16 cpus[0]; |
---|
| 693 | + u16 cpus[]; |
---|
667 | 694 | }; |
---|
668 | 695 | #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) |
---|
669 | 696 | |
---|
.. | .. |
---|
685 | 712 | struct rps_dev_flow_table { |
---|
686 | 713 | unsigned int mask; |
---|
687 | 714 | struct rcu_head rcu; |
---|
688 | | - struct rps_dev_flow flows[0]; |
---|
| 715 | + struct rps_dev_flow flows[]; |
---|
689 | 716 | }; |
---|
690 | 717 | #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ |
---|
691 | 718 | ((_num) * sizeof(struct rps_dev_flow))) |
---|
.. | .. |
---|
703 | 730 | struct rps_sock_flow_table { |
---|
704 | 731 | u32 mask; |
---|
705 | 732 | |
---|
706 | | - u32 ents[0] ____cacheline_aligned_in_smp; |
---|
| 733 | + u32 ents[] ____cacheline_aligned_in_smp; |
---|
707 | 734 | }; |
---|
708 | 735 | #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num])) |
---|
709 | 736 | |
---|
.. | .. |
---|
742 | 769 | struct kobject kobj; |
---|
743 | 770 | struct net_device *dev; |
---|
744 | 771 | struct xdp_rxq_info xdp_rxq; |
---|
| 772 | +#ifdef CONFIG_XDP_SOCKETS |
---|
| 773 | + struct xsk_buff_pool *pool; |
---|
| 774 | +#endif |
---|
745 | 775 | |
---|
746 | 776 | ANDROID_KABI_RESERVE(1); |
---|
747 | 777 | ANDROID_KABI_RESERVE(2); |
---|
.. | .. |
---|
768 | 798 | unsigned int len; |
---|
769 | 799 | unsigned int alloc_len; |
---|
770 | 800 | struct rcu_head rcu; |
---|
771 | | - u16 queues[0]; |
---|
| 801 | + u16 queues[]; |
---|
772 | 802 | }; |
---|
773 | 803 | #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) |
---|
774 | 804 | #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \ |
---|
.. | .. |
---|
779 | 809 | */ |
---|
780 | 810 | struct xps_dev_maps { |
---|
781 | 811 | struct rcu_head rcu; |
---|
782 | | - struct xps_map __rcu *attr_map[0]; /* Either CPUs map or RXQs map */ |
---|
| 812 | + struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */ |
---|
783 | 813 | }; |
---|
784 | 814 | |
---|
785 | 815 | #define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ |
---|
.. | .. |
---|
848 | 878 | TC_SETUP_QDISC_PRIO, |
---|
849 | 879 | TC_SETUP_QDISC_MQ, |
---|
850 | 880 | TC_SETUP_QDISC_ETF, |
---|
| 881 | + TC_SETUP_ROOT_QDISC, |
---|
| 882 | + TC_SETUP_QDISC_GRED, |
---|
| 883 | + TC_SETUP_QDISC_TAPRIO, |
---|
| 884 | + TC_SETUP_FT, |
---|
| 885 | + TC_SETUP_QDISC_ETS, |
---|
| 886 | + TC_SETUP_QDISC_TBF, |
---|
| 887 | + TC_SETUP_QDISC_FIFO, |
---|
851 | 888 | }; |
---|
852 | 889 | |
---|
853 | 890 | /* These structures hold the attributes of bpf state that are being passed |
---|
.. | .. |
---|
863 | 900 | */ |
---|
864 | 901 | XDP_SETUP_PROG, |
---|
865 | 902 | XDP_SETUP_PROG_HW, |
---|
866 | | - XDP_QUERY_PROG, |
---|
867 | | - XDP_QUERY_PROG_HW, |
---|
868 | 903 | /* BPF program for offload callbacks, invoked at program load time. */ |
---|
869 | | - BPF_OFFLOAD_VERIFIER_PREP, |
---|
870 | | - BPF_OFFLOAD_TRANSLATE, |
---|
871 | | - BPF_OFFLOAD_DESTROY, |
---|
872 | 904 | BPF_OFFLOAD_MAP_ALLOC, |
---|
873 | 905 | BPF_OFFLOAD_MAP_FREE, |
---|
874 | | - XDP_QUERY_XSK_UMEM, |
---|
875 | | - XDP_SETUP_XSK_UMEM, |
---|
| 906 | + XDP_SETUP_XSK_POOL, |
---|
876 | 907 | }; |
---|
877 | 908 | |
---|
878 | 909 | struct bpf_prog_offload_ops; |
---|
879 | 910 | struct netlink_ext_ack; |
---|
880 | 911 | struct xdp_umem; |
---|
| 912 | +struct xdp_dev_bulk_queue; |
---|
| 913 | +struct bpf_xdp_link; |
---|
| 914 | + |
---|
| 915 | +enum bpf_xdp_mode { |
---|
| 916 | + XDP_MODE_SKB = 0, |
---|
| 917 | + XDP_MODE_DRV = 1, |
---|
| 918 | + XDP_MODE_HW = 2, |
---|
| 919 | + __MAX_XDP_MODE |
---|
| 920 | +}; |
---|
| 921 | + |
---|
| 922 | +struct bpf_xdp_entity { |
---|
| 923 | + struct bpf_prog *prog; |
---|
| 924 | + struct bpf_xdp_link *link; |
---|
| 925 | +}; |
---|
881 | 926 | |
---|
882 | 927 | struct netdev_bpf { |
---|
883 | 928 | enum bpf_netdev_command command; |
---|
.. | .. |
---|
888 | 933 | struct bpf_prog *prog; |
---|
889 | 934 | struct netlink_ext_ack *extack; |
---|
890 | 935 | }; |
---|
891 | | - /* XDP_QUERY_PROG, XDP_QUERY_PROG_HW */ |
---|
892 | | - struct { |
---|
893 | | - u32 prog_id; |
---|
894 | | - /* flags with which program was installed */ |
---|
895 | | - u32 prog_flags; |
---|
896 | | - }; |
---|
897 | | - /* BPF_OFFLOAD_VERIFIER_PREP */ |
---|
898 | | - struct { |
---|
899 | | - struct bpf_prog *prog; |
---|
900 | | - const struct bpf_prog_offload_ops *ops; /* callee set */ |
---|
901 | | - } verifier; |
---|
902 | | - /* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */ |
---|
903 | | - struct { |
---|
904 | | - struct bpf_prog *prog; |
---|
905 | | - } offload; |
---|
906 | 936 | /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ |
---|
907 | 937 | struct { |
---|
908 | 938 | struct bpf_offloaded_map *offmap; |
---|
909 | 939 | }; |
---|
910 | | - /* XDP_QUERY_XSK_UMEM, XDP_SETUP_XSK_UMEM */ |
---|
| 940 | + /* XDP_SETUP_XSK_POOL */ |
---|
911 | 941 | struct { |
---|
912 | | - struct xdp_umem *umem; /* out for query*/ |
---|
913 | | - u16 queue_id; /* in for query */ |
---|
| 942 | + struct xsk_buff_pool *pool; |
---|
| 943 | + u16 queue_id; |
---|
914 | 944 | } xsk; |
---|
915 | 945 | }; |
---|
916 | 946 | }; |
---|
| 947 | + |
---|
| 948 | +/* Flags for ndo_xsk_wakeup. */ |
---|
| 949 | +#define XDP_WAKEUP_RX (1 << 0) |
---|
| 950 | +#define XDP_WAKEUP_TX (1 << 1) |
---|
917 | 951 | |
---|
918 | 952 | #ifdef CONFIG_XFRM_OFFLOAD |
---|
919 | 953 | struct xfrmdev_ops { |
---|
.. | .. |
---|
931 | 965 | }; |
---|
932 | 966 | #endif |
---|
933 | 967 | |
---|
934 | | -#if IS_ENABLED(CONFIG_TLS_DEVICE) |
---|
935 | | -enum tls_offload_ctx_dir { |
---|
936 | | - TLS_OFFLOAD_CTX_DIR_RX, |
---|
937 | | - TLS_OFFLOAD_CTX_DIR_TX, |
---|
938 | | -}; |
---|
939 | | - |
---|
940 | | -struct tls_crypto_info; |
---|
941 | | -struct tls_context; |
---|
942 | | - |
---|
943 | | -struct tlsdev_ops { |
---|
944 | | - int (*tls_dev_add)(struct net_device *netdev, struct sock *sk, |
---|
945 | | - enum tls_offload_ctx_dir direction, |
---|
946 | | - struct tls_crypto_info *crypto_info, |
---|
947 | | - u32 start_offload_tcp_sn); |
---|
948 | | - void (*tls_dev_del)(struct net_device *netdev, |
---|
949 | | - struct tls_context *ctx, |
---|
950 | | - enum tls_offload_ctx_dir direction); |
---|
951 | | - void (*tls_dev_resync_rx)(struct net_device *netdev, |
---|
952 | | - struct sock *sk, u32 seq, u64 rcd_sn); |
---|
953 | | - ANDROID_KABI_RESERVE(1); |
---|
954 | | - ANDROID_KABI_RESERVE(2); |
---|
955 | | - ANDROID_KABI_RESERVE(3); |
---|
956 | | - ANDROID_KABI_RESERVE(4); |
---|
957 | | -}; |
---|
958 | | -#endif |
---|
959 | | - |
---|
960 | 968 | struct dev_ifalias { |
---|
961 | 969 | struct rcu_head rcuhead; |
---|
962 | 970 | char ifalias[]; |
---|
| 971 | +}; |
---|
| 972 | + |
---|
| 973 | +struct devlink; |
---|
| 974 | +struct tlsdev_ops; |
---|
| 975 | + |
---|
| 976 | +struct netdev_name_node { |
---|
| 977 | + struct hlist_node hlist; |
---|
| 978 | + struct list_head list; |
---|
| 979 | + struct net_device *dev; |
---|
| 980 | + const char *name; |
---|
| 981 | +}; |
---|
| 982 | + |
---|
| 983 | +int netdev_name_node_alt_create(struct net_device *dev, const char *name); |
---|
| 984 | +int netdev_name_node_alt_destroy(struct net_device *dev, const char *name); |
---|
| 985 | + |
---|
| 986 | +struct netdev_net_notifier { |
---|
| 987 | + struct list_head list; |
---|
| 988 | + struct notifier_block *nb; |
---|
963 | 989 | }; |
---|
964 | 990 | |
---|
965 | 991 | /* |
---|
.. | .. |
---|
1005 | 1031 | * those the driver believes to be appropriate. |
---|
1006 | 1032 | * |
---|
1007 | 1033 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, |
---|
1008 | | - * struct net_device *sb_dev, |
---|
1009 | | - * select_queue_fallback_t fallback); |
---|
| 1034 | + * struct net_device *sb_dev); |
---|
1010 | 1035 | * Called to decide which queue to use when device supports multiple |
---|
1011 | 1036 | * transmit queues. |
---|
1012 | 1037 | * |
---|
.. | .. |
---|
1041 | 1066 | * Called when a user wants to change the Maximum Transfer Unit |
---|
1042 | 1067 | * of a device. |
---|
1043 | 1068 | * |
---|
1044 | | - * void (*ndo_tx_timeout)(struct net_device *dev); |
---|
| 1069 | + * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue); |
---|
1045 | 1070 | * Callback used when the transmitter has not made any progress |
---|
1046 | 1071 | * for dev->watchdog ticks. |
---|
1047 | 1072 | * |
---|
.. | .. |
---|
1160 | 1185 | * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); |
---|
1161 | 1186 | * Called to release previously enslaved netdev. |
---|
1162 | 1187 | * |
---|
| 1188 | + * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev, |
---|
| 1189 | + * struct sk_buff *skb, |
---|
| 1190 | + * bool all_slaves); |
---|
| 1191 | + * Get the xmit slave of master device. If all_slaves is true, function |
---|
| 1192 | + * assume all the slaves can transmit. |
---|
| 1193 | + * |
---|
1163 | 1194 | * Feature/offload setting functions. |
---|
1164 | 1195 | * netdev_features_t (*ndo_fix_features)(struct net_device *dev, |
---|
1165 | 1196 | * netdev_features_t features); |
---|
.. | .. |
---|
1174 | 1205 | * |
---|
1175 | 1206 | * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], |
---|
1176 | 1207 | * struct net_device *dev, |
---|
1177 | | - * const unsigned char *addr, u16 vid, u16 flags) |
---|
| 1208 | + * const unsigned char *addr, u16 vid, u16 flags, |
---|
| 1209 | + * struct netlink_ext_ack *extack); |
---|
1178 | 1210 | * Adds an FDB entry to dev for addr. |
---|
1179 | 1211 | * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], |
---|
1180 | 1212 | * struct net_device *dev, |
---|
.. | .. |
---|
1187 | 1219 | * entries to skb and update idx with the number of entries. |
---|
1188 | 1220 | * |
---|
1189 | 1221 | * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, |
---|
1190 | | - * u16 flags) |
---|
| 1222 | + * u16 flags, struct netlink_ext_ack *extack) |
---|
1191 | 1223 | * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, |
---|
1192 | 1224 | * struct net_device *dev, u32 filter_mask, |
---|
1193 | 1225 | * int nlflags) |
---|
.. | .. |
---|
1207 | 1239 | * Called to get ID of physical port of this device. If driver does |
---|
1208 | 1240 | * not implement this, it is assumed that the hw is not able to have |
---|
1209 | 1241 | * multiple net devices on single physical port. |
---|
| 1242 | + * |
---|
| 1243 | + * int (*ndo_get_port_parent_id)(struct net_device *dev, |
---|
| 1244 | + * struct netdev_phys_item_id *ppid) |
---|
| 1245 | + * Called to get the parent ID of the physical port of this device. |
---|
1210 | 1246 | * |
---|
1211 | 1247 | * void (*ndo_udp_tunnel_add)(struct net_device *dev, |
---|
1212 | 1248 | * struct udp_tunnel_info *ti); |
---|
.. | .. |
---|
1265 | 1301 | * that got dropped are freed/returned via xdp_return_frame(). |
---|
1266 | 1302 | * Returns negative number, means general error invoking ndo, meaning |
---|
1267 | 1303 | * no frames were xmit'ed and core-caller will free all frames. |
---|
| 1304 | + * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); |
---|
| 1305 | + * This function is used to wake up the softirq, ksoftirqd or kthread |
---|
| 1306 | + * responsible for sending and/or receiving packets on a specific |
---|
| 1307 | + * queue id bound to an AF_XDP socket. The flags field specifies if |
---|
| 1308 | + * only RX, only Tx, or both should be woken up using the flags |
---|
| 1309 | + * XDP_WAKEUP_RX and XDP_WAKEUP_TX. |
---|
| 1310 | + * struct devlink_port *(*ndo_get_devlink_port)(struct net_device *dev); |
---|
| 1311 | + * Get devlink port instance associated with a given netdev. |
---|
| 1312 | + * Called with a reference on the netdevice and devlink locks only, |
---|
| 1313 | + * rtnl_lock is not held. |
---|
| 1314 | + * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p, |
---|
| 1315 | + * int cmd); |
---|
| 1316 | + * Add, change, delete or get information on an IPv4 tunnel. |
---|
| 1317 | + * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev); |
---|
| 1318 | + * If a device is paired with a peer device, return the peer instance. |
---|
| 1319 | + * The caller must be under RCU read context. |
---|
1268 | 1320 | */ |
---|
1269 | 1321 | struct net_device_ops { |
---|
1270 | 1322 | int (*ndo_init)(struct net_device *dev); |
---|
.. | .. |
---|
1278 | 1330 | netdev_features_t features); |
---|
1279 | 1331 | u16 (*ndo_select_queue)(struct net_device *dev, |
---|
1280 | 1332 | struct sk_buff *skb, |
---|
1281 | | - struct net_device *sb_dev, |
---|
1282 | | - select_queue_fallback_t fallback); |
---|
| 1333 | + struct net_device *sb_dev); |
---|
1283 | 1334 | void (*ndo_change_rx_flags)(struct net_device *dev, |
---|
1284 | 1335 | int flags); |
---|
1285 | 1336 | void (*ndo_set_rx_mode)(struct net_device *dev); |
---|
.. | .. |
---|
1294 | 1345 | int new_mtu); |
---|
1295 | 1346 | int (*ndo_neigh_setup)(struct net_device *dev, |
---|
1296 | 1347 | struct neigh_parms *); |
---|
1297 | | - void (*ndo_tx_timeout) (struct net_device *dev); |
---|
| 1348 | + void (*ndo_tx_timeout) (struct net_device *dev, |
---|
| 1349 | + unsigned int txqueue); |
---|
1298 | 1350 | |
---|
1299 | 1351 | void (*ndo_get_stats64)(struct net_device *dev, |
---|
1300 | 1352 | struct rtnl_link_stats64 *storage); |
---|
.. | .. |
---|
1340 | 1392 | struct nlattr *port[]); |
---|
1341 | 1393 | int (*ndo_get_vf_port)(struct net_device *dev, |
---|
1342 | 1394 | int vf, struct sk_buff *skb); |
---|
| 1395 | + int (*ndo_get_vf_guid)(struct net_device *dev, |
---|
| 1396 | + int vf, |
---|
| 1397 | + struct ifla_vf_guid *node_guid, |
---|
| 1398 | + struct ifla_vf_guid *port_guid); |
---|
1343 | 1399 | int (*ndo_set_vf_guid)(struct net_device *dev, |
---|
1344 | 1400 | int vf, u64 guid, |
---|
1345 | 1401 | int guid_type); |
---|
.. | .. |
---|
1384 | 1440 | struct netlink_ext_ack *extack); |
---|
1385 | 1441 | int (*ndo_del_slave)(struct net_device *dev, |
---|
1386 | 1442 | struct net_device *slave_dev); |
---|
| 1443 | + struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev, |
---|
| 1444 | + struct sk_buff *skb, |
---|
| 1445 | + bool all_slaves); |
---|
1387 | 1446 | netdev_features_t (*ndo_fix_features)(struct net_device *dev, |
---|
1388 | 1447 | netdev_features_t features); |
---|
1389 | 1448 | int (*ndo_set_features)(struct net_device *dev, |
---|
.. | .. |
---|
1398 | 1457 | struct net_device *dev, |
---|
1399 | 1458 | const unsigned char *addr, |
---|
1400 | 1459 | u16 vid, |
---|
1401 | | - u16 flags); |
---|
| 1460 | + u16 flags, |
---|
| 1461 | + struct netlink_ext_ack *extack); |
---|
1402 | 1462 | int (*ndo_fdb_del)(struct ndmsg *ndm, |
---|
1403 | 1463 | struct nlattr *tb[], |
---|
1404 | 1464 | struct net_device *dev, |
---|
.. | .. |
---|
1409 | 1469 | struct net_device *dev, |
---|
1410 | 1470 | struct net_device *filter_dev, |
---|
1411 | 1471 | int *idx); |
---|
1412 | | - |
---|
| 1472 | + int (*ndo_fdb_get)(struct sk_buff *skb, |
---|
| 1473 | + struct nlattr *tb[], |
---|
| 1474 | + struct net_device *dev, |
---|
| 1475 | + const unsigned char *addr, |
---|
| 1476 | + u16 vid, u32 portid, u32 seq, |
---|
| 1477 | + struct netlink_ext_ack *extack); |
---|
1413 | 1478 | int (*ndo_bridge_setlink)(struct net_device *dev, |
---|
1414 | 1479 | struct nlmsghdr *nlh, |
---|
1415 | | - u16 flags); |
---|
| 1480 | + u16 flags, |
---|
| 1481 | + struct netlink_ext_ack *extack); |
---|
1416 | 1482 | int (*ndo_bridge_getlink)(struct sk_buff *skb, |
---|
1417 | 1483 | u32 pid, u32 seq, |
---|
1418 | 1484 | struct net_device *dev, |
---|
.. | .. |
---|
1425 | 1491 | bool new_carrier); |
---|
1426 | 1492 | int (*ndo_get_phys_port_id)(struct net_device *dev, |
---|
1427 | 1493 | struct netdev_phys_item_id *ppid); |
---|
| 1494 | + int (*ndo_get_port_parent_id)(struct net_device *dev, |
---|
| 1495 | + struct netdev_phys_item_id *ppid); |
---|
1428 | 1496 | int (*ndo_get_phys_port_name)(struct net_device *dev, |
---|
1429 | 1497 | char *name, size_t len); |
---|
1430 | 1498 | void (*ndo_udp_tunnel_add)(struct net_device *dev, |
---|
.. | .. |
---|
1436 | 1504 | void (*ndo_dfwd_del_station)(struct net_device *pdev, |
---|
1437 | 1505 | void *priv); |
---|
1438 | 1506 | |
---|
1439 | | - int (*ndo_get_lock_subclass)(struct net_device *dev); |
---|
1440 | 1507 | int (*ndo_set_tx_maxrate)(struct net_device *dev, |
---|
1441 | 1508 | int queue_index, |
---|
1442 | 1509 | u32 maxrate); |
---|
.. | .. |
---|
1452 | 1519 | int (*ndo_xdp_xmit)(struct net_device *dev, int n, |
---|
1453 | 1520 | struct xdp_frame **xdp, |
---|
1454 | 1521 | u32 flags); |
---|
1455 | | - int (*ndo_xsk_async_xmit)(struct net_device *dev, |
---|
1456 | | - u32 queue_id); |
---|
| 1522 | + int (*ndo_xsk_wakeup)(struct net_device *dev, |
---|
| 1523 | + u32 queue_id, u32 flags); |
---|
| 1524 | + struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev); |
---|
| 1525 | + int (*ndo_tunnel_ctl)(struct net_device *dev, |
---|
| 1526 | + struct ip_tunnel_parm *p, int cmd); |
---|
| 1527 | + struct net_device * (*ndo_get_peer_dev)(struct net_device *dev); |
---|
1457 | 1528 | |
---|
1458 | 1529 | ANDROID_KABI_RESERVE(1); |
---|
1459 | 1530 | ANDROID_KABI_RESERVE(2); |
---|
.. | .. |
---|
1576 | 1647 | #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER |
---|
1577 | 1648 | #define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK |
---|
1578 | 1649 | |
---|
| 1650 | +/* Specifies the type of the struct net_device::ml_priv pointer */ |
---|
| 1651 | +enum netdev_ml_priv_type { |
---|
| 1652 | + ML_PRIV_NONE, |
---|
| 1653 | + ML_PRIV_CAN, |
---|
| 1654 | +}; |
---|
| 1655 | + |
---|
1579 | 1656 | /** |
---|
1580 | 1657 | * struct net_device - The DEVICE structure. |
---|
1581 | 1658 | * |
---|
.. | .. |
---|
1587 | 1664 | * (i.e. as seen by users in the "Space.c" file). It is the name |
---|
1588 | 1665 | * of the interface. |
---|
1589 | 1666 | * |
---|
1590 | | - * @name_hlist: Device name hash chain, please keep it close to name[] |
---|
| 1667 | + * @name_node: Name hashlist node |
---|
1591 | 1668 | * @ifalias: SNMP alias |
---|
1592 | 1669 | * @mem_end: Shared memory end |
---|
1593 | 1670 | * @mem_start: Shared memory start |
---|
.. | .. |
---|
1616 | 1693 | * and drivers will need to set them appropriately. |
---|
1617 | 1694 | * |
---|
1618 | 1695 | * @mpls_features: Mask of features inheritable by MPLS |
---|
| 1696 | + * @gso_partial_features: value(s) from NETIF_F_GSO\* |
---|
1619 | 1697 | * |
---|
1620 | 1698 | * @ifindex: interface index |
---|
1621 | 1699 | * @group: The group the device belongs to |
---|
.. | .. |
---|
1640 | 1718 | * @netdev_ops: Includes several pointers to callbacks, |
---|
1641 | 1719 | * if one wants to override the ndo_*() functions |
---|
1642 | 1720 | * @ethtool_ops: Management operations |
---|
| 1721 | + * @l3mdev_ops: Layer 3 master device operations |
---|
1643 | 1722 | * @ndisc_ops: Includes callbacks for different IPv6 neighbour |
---|
1644 | 1723 | * discovery handling. Necessary for e.g. 6LoWPAN. |
---|
| 1724 | + * @xfrmdev_ops: Transformation offload operations |
---|
| 1725 | + * @tlsdev_ops: Transport Layer Security offload operations |
---|
1645 | 1726 | * @header_ops: Includes callbacks for creating,parsing,caching,etc |
---|
1646 | 1727 | * of Layer 2 headers. |
---|
1647 | 1728 | * |
---|
.. | .. |
---|
1680 | 1761 | * @dev_port: Used to differentiate devices that share |
---|
1681 | 1762 | * the same function |
---|
1682 | 1763 | * @addr_list_lock: XXX: need comments on this one |
---|
| 1764 | + * @name_assign_type: network interface name assignment type |
---|
1683 | 1765 | * @uc_promisc: Counter that indicates promiscuous mode |
---|
1684 | 1766 | * has been enabled due to the need to listen to |
---|
1685 | 1767 | * additional unicast addresses in a device that |
---|
.. | .. |
---|
1702 | 1784 | * @ip6_ptr: IPv6 specific data |
---|
1703 | 1785 | * @ax25_ptr: AX.25 specific data |
---|
1704 | 1786 | * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering |
---|
| 1787 | + * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network |
---|
| 1788 | + * device struct |
---|
| 1789 | + * @mpls_ptr: mpls_dev struct pointer |
---|
1705 | 1790 | * |
---|
1706 | 1791 | * @dev_addr: Hw address (before bcast, |
---|
1707 | 1792 | * because most packets are unicast) |
---|
.. | .. |
---|
1710 | 1795 | * @num_rx_queues: Number of RX queues |
---|
1711 | 1796 | * allocated at register_netdev() time |
---|
1712 | 1797 | * @real_num_rx_queues: Number of RX queues currently active in device |
---|
| 1798 | + * @xdp_prog: XDP sockets filter program pointer |
---|
| 1799 | + * @gro_flush_timeout: timeout for GRO layer in NAPI |
---|
| 1800 | + * @napi_defer_hard_irqs: If not zero, provides a counter that would |
---|
| 1801 | + * allow to avoid NIC hard IRQ, on busy queues. |
---|
1713 | 1802 | * |
---|
1714 | 1803 | * @rx_handler: handler for received packets |
---|
1715 | 1804 | * @rx_handler_data: XXX: need comments on this one |
---|
1716 | 1805 | * @miniq_ingress: ingress/clsact qdisc specific data for |
---|
1717 | 1806 | * ingress processing |
---|
1718 | 1807 | * @ingress_queue: XXX: need comments on this one |
---|
| 1808 | + * @nf_hooks_ingress: netfilter hooks executed for ingress packets |
---|
1719 | 1809 | * @broadcast: hw bcast address |
---|
1720 | 1810 | * |
---|
1721 | 1811 | * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts, |
---|
.. | .. |
---|
1730 | 1820 | * @qdisc: Root qdisc from userspace point of view |
---|
1731 | 1821 | * @tx_queue_len: Max frames per queue allowed |
---|
1732 | 1822 | * @tx_global_lock: XXX: need comments on this one |
---|
| 1823 | + * @xdp_bulkq: XDP device bulk queue |
---|
| 1824 | + * @xps_cpus_map: all CPUs map for XPS device |
---|
| 1825 | + * @xps_rxqs_map: all RXQs map for XPS device |
---|
1733 | 1826 | * |
---|
1734 | 1827 | * @xps_maps: XXX: need comments on this one |
---|
1735 | 1828 | * @miniq_egress: clsact qdisc specific data for |
---|
1736 | 1829 | * egress processing |
---|
| 1830 | + * @qdisc_hash: qdisc hash table |
---|
1737 | 1831 | * @watchdog_timeo: Represents the timeout that is used by |
---|
1738 | 1832 | * the watchdog (see dev_watchdog()) |
---|
1739 | 1833 | * @watchdog_timer: List of timers |
---|
1740 | 1834 | * |
---|
| 1835 | + * @proto_down_reason: reason a netdev interface is held down |
---|
1741 | 1836 | * @pcpu_refcnt: Number of references to this device |
---|
1742 | 1837 | * @todo_list: Delayed register/unregister |
---|
1743 | 1838 | * @link_watch_list: XXX: need comments on this one |
---|
.. | .. |
---|
1753 | 1848 | * @nd_net: Network namespace this network device is inside |
---|
1754 | 1849 | * |
---|
1755 | 1850 | * @ml_priv: Mid-layer private |
---|
| 1851 | + * @ml_priv_type: Mid-layer private type |
---|
1756 | 1852 | * @lstats: Loopback statistics |
---|
1757 | 1853 | * @tstats: Tunnel statistics |
---|
1758 | 1854 | * @dstats: Dummy statistics |
---|
.. | .. |
---|
1793 | 1889 | * |
---|
1794 | 1890 | * @wol_enabled: Wake-on-LAN is enabled |
---|
1795 | 1891 | * |
---|
| 1892 | + * @net_notifier_list: List of per-net netdev notifier block |
---|
| 1893 | + * that follow this device when it is moved |
---|
| 1894 | + * to another network namespace. |
---|
| 1895 | + * |
---|
| 1896 | + * @macsec_ops: MACsec offloading ops |
---|
| 1897 | + * |
---|
| 1898 | + * @udp_tunnel_nic_info: static structure describing the UDP tunnel |
---|
| 1899 | + * offload capabilities of the device |
---|
| 1900 | + * @udp_tunnel_nic: UDP tunnel offload state |
---|
| 1901 | + * @xdp_state: stores info on attached XDP BPF programs |
---|
| 1902 | + * |
---|
| 1903 | + * @nested_level: Used as as a parameter of spin_lock_nested() of |
---|
| 1904 | + * dev->addr_list_lock. |
---|
| 1905 | + * @unlink_list: As netif_addr_lock() can be called recursively, |
---|
| 1906 | + * keep a list of interfaces to be deleted. |
---|
| 1907 | + * |
---|
1796 | 1908 | * FIXME: cleanup struct net_device such that network protocol info |
---|
1797 | 1909 | * moves out. |
---|
1798 | 1910 | */ |
---|
1799 | 1911 | |
---|
1800 | 1912 | struct net_device { |
---|
1801 | 1913 | char name[IFNAMSIZ]; |
---|
1802 | | - struct hlist_node name_hlist; |
---|
| 1914 | + struct netdev_name_node *name_node; |
---|
1803 | 1915 | struct dev_ifalias __rcu *ifalias; |
---|
1804 | 1916 | /* |
---|
1805 | 1917 | * I/O specific fields |
---|
.. | .. |
---|
1857 | 1969 | #endif |
---|
1858 | 1970 | const struct net_device_ops *netdev_ops; |
---|
1859 | 1971 | const struct ethtool_ops *ethtool_ops; |
---|
1860 | | -#ifdef CONFIG_NET_SWITCHDEV |
---|
1861 | | - const struct switchdev_ops *switchdev_ops; |
---|
1862 | | -#endif |
---|
1863 | 1972 | #ifdef CONFIG_NET_L3_MASTER_DEV |
---|
1864 | 1973 | const struct l3mdev_ops *l3mdev_ops; |
---|
1865 | 1974 | #endif |
---|
.. | .. |
---|
1900 | 2009 | unsigned short type; |
---|
1901 | 2010 | unsigned short hard_header_len; |
---|
1902 | 2011 | unsigned char min_header_len; |
---|
| 2012 | + unsigned char name_assign_type; |
---|
1903 | 2013 | |
---|
1904 | 2014 | unsigned short needed_headroom; |
---|
1905 | 2015 | unsigned short needed_tailroom; |
---|
.. | .. |
---|
1910 | 2020 | unsigned char addr_len; |
---|
1911 | 2021 | unsigned char upper_level; |
---|
1912 | 2022 | unsigned char lower_level; |
---|
| 2023 | + |
---|
1913 | 2024 | unsigned short neigh_priv_len; |
---|
1914 | 2025 | unsigned short dev_id; |
---|
1915 | 2026 | unsigned short dev_port; |
---|
1916 | 2027 | spinlock_t addr_list_lock; |
---|
1917 | | - unsigned char name_assign_type; |
---|
1918 | | - bool uc_promisc; |
---|
| 2028 | + |
---|
1919 | 2029 | struct netdev_hw_addr_list uc; |
---|
1920 | 2030 | struct netdev_hw_addr_list mc; |
---|
1921 | 2031 | struct netdev_hw_addr_list dev_addrs; |
---|
.. | .. |
---|
1923 | 2033 | #ifdef CONFIG_SYSFS |
---|
1924 | 2034 | struct kset *queues_kset; |
---|
1925 | 2035 | #endif |
---|
| 2036 | +#ifdef CONFIG_LOCKDEP |
---|
| 2037 | + struct list_head unlink_list; |
---|
| 2038 | +#endif |
---|
1926 | 2039 | unsigned int promiscuity; |
---|
1927 | 2040 | unsigned int allmulti; |
---|
| 2041 | + bool uc_promisc; |
---|
| 2042 | +#ifdef CONFIG_LOCKDEP |
---|
| 2043 | + unsigned char nested_level; |
---|
| 2044 | +#endif |
---|
1928 | 2045 | |
---|
1929 | 2046 | |
---|
1930 | 2047 | /* Protocol-specific pointers */ |
---|
.. | .. |
---|
1967 | 2084 | |
---|
1968 | 2085 | struct bpf_prog __rcu *xdp_prog; |
---|
1969 | 2086 | unsigned long gro_flush_timeout; |
---|
| 2087 | + int napi_defer_hard_irqs; |
---|
1970 | 2088 | rx_handler_func_t __rcu *rx_handler; |
---|
1971 | 2089 | void __rcu *rx_handler_data; |
---|
1972 | 2090 | |
---|
.. | .. |
---|
1990 | 2108 | struct netdev_queue *_tx ____cacheline_aligned_in_smp; |
---|
1991 | 2109 | unsigned int num_tx_queues; |
---|
1992 | 2110 | unsigned int real_num_tx_queues; |
---|
1993 | | - struct Qdisc *qdisc; |
---|
1994 | | -#ifdef CONFIG_NET_SCHED |
---|
1995 | | - DECLARE_HASHTABLE (qdisc_hash, 4); |
---|
1996 | | -#endif |
---|
| 2111 | + struct Qdisc __rcu *qdisc; |
---|
1997 | 2112 | unsigned int tx_queue_len; |
---|
1998 | 2113 | spinlock_t tx_global_lock; |
---|
1999 | | - int watchdog_timeo; |
---|
| 2114 | + |
---|
| 2115 | + struct xdp_dev_bulk_queue __percpu *xdp_bulkq; |
---|
2000 | 2116 | |
---|
2001 | 2117 | #ifdef CONFIG_XPS |
---|
2002 | 2118 | struct xps_dev_maps __rcu *xps_cpus_map; |
---|
.. | .. |
---|
2006 | 2122 | struct mini_Qdisc __rcu *miniq_egress; |
---|
2007 | 2123 | #endif |
---|
2008 | 2124 | |
---|
| 2125 | +#ifdef CONFIG_NET_SCHED |
---|
| 2126 | + DECLARE_HASHTABLE (qdisc_hash, 4); |
---|
| 2127 | +#endif |
---|
2009 | 2128 | /* These may be needed for future network-power-down code. */ |
---|
2010 | 2129 | struct timer_list watchdog_timer; |
---|
| 2130 | + int watchdog_timeo; |
---|
2011 | 2131 | |
---|
2012 | | - int __percpu *pcpu_refcnt; |
---|
| 2132 | + u32 proto_down_reason; |
---|
| 2133 | + |
---|
2013 | 2134 | struct list_head todo_list; |
---|
| 2135 | + int __percpu *pcpu_refcnt; |
---|
2014 | 2136 | |
---|
2015 | 2137 | struct list_head link_watch_list; |
---|
2016 | 2138 | |
---|
.. | .. |
---|
2039 | 2161 | possible_net_t nd_net; |
---|
2040 | 2162 | |
---|
2041 | 2163 | /* mid-layer private */ |
---|
| 2164 | + void *ml_priv; |
---|
| 2165 | + enum netdev_ml_priv_type ml_priv_type; |
---|
| 2166 | + |
---|
2042 | 2167 | union { |
---|
2043 | | - void *ml_priv; |
---|
2044 | 2168 | struct pcpu_lstats __percpu *lstats; |
---|
2045 | 2169 | struct pcpu_sw_netstats __percpu *tstats; |
---|
2046 | 2170 | struct pcpu_dstats __percpu *dstats; |
---|
2047 | | - struct pcpu_vstats __percpu *vstats; |
---|
2048 | 2171 | }; |
---|
2049 | 2172 | |
---|
2050 | 2173 | #if IS_ENABLED(CONFIG_GARP) |
---|
.. | .. |
---|
2086 | 2209 | bool proto_down; |
---|
2087 | 2210 | unsigned wol_enabled:1; |
---|
2088 | 2211 | |
---|
| 2212 | + struct list_head net_notifier_list; |
---|
| 2213 | + |
---|
| 2214 | +#if IS_ENABLED(CONFIG_MACSEC) |
---|
| 2215 | + /* MACsec management functions */ |
---|
| 2216 | + const struct macsec_ops *macsec_ops; |
---|
| 2217 | +#endif |
---|
| 2218 | + const struct udp_tunnel_nic_info *udp_tunnel_nic_info; |
---|
| 2219 | + struct udp_tunnel_nic *udp_tunnel_nic; |
---|
| 2220 | + |
---|
| 2221 | + /* protected by rtnl_lock */ |
---|
| 2222 | + struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE]; |
---|
| 2223 | + |
---|
2089 | 2224 | ANDROID_KABI_RESERVE(1); |
---|
2090 | 2225 | ANDROID_KABI_RESERVE(2); |
---|
2091 | 2226 | ANDROID_KABI_RESERVE(3); |
---|
.. | .. |
---|
2094 | 2229 | ANDROID_KABI_RESERVE(6); |
---|
2095 | 2230 | ANDROID_KABI_RESERVE(7); |
---|
2096 | 2231 | ANDROID_KABI_RESERVE(8); |
---|
2097 | | - |
---|
2098 | 2232 | }; |
---|
2099 | 2233 | #define to_net_dev(d) container_of(d, struct net_device, dev) |
---|
2100 | 2234 | |
---|
.. | .. |
---|
2132 | 2266 | int netdev_get_num_tc(struct net_device *dev) |
---|
2133 | 2267 | { |
---|
2134 | 2268 | return dev->num_tc; |
---|
| 2269 | +} |
---|
| 2270 | + |
---|
| 2271 | +static inline void net_prefetch(void *p) |
---|
| 2272 | +{ |
---|
| 2273 | + prefetch(p); |
---|
| 2274 | +#if L1_CACHE_BYTES < 128 |
---|
| 2275 | + prefetch((u8 *)p + L1_CACHE_BYTES); |
---|
| 2276 | +#endif |
---|
| 2277 | +} |
---|
| 2278 | + |
---|
| 2279 | +static inline void net_prefetchw(void *p) |
---|
| 2280 | +{ |
---|
| 2281 | + prefetchw(p); |
---|
| 2282 | +#if L1_CACHE_BYTES < 128 |
---|
| 2283 | + prefetchw((u8 *)p + L1_CACHE_BYTES); |
---|
| 2284 | +#endif |
---|
2135 | 2285 | } |
---|
2136 | 2286 | |
---|
2137 | 2287 | void netdev_unbind_sb_channel(struct net_device *dev, |
---|
.. | .. |
---|
2181 | 2331 | (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ |
---|
2182 | 2332 | (dev)->qdisc_running_key = &qdisc_running_key; \ |
---|
2183 | 2333 | lockdep_set_class(&(dev)->addr_list_lock, \ |
---|
2184 | | - &dev_addr_list_lock_key); \ |
---|
| 2334 | + &dev_addr_list_lock_key); \ |
---|
2185 | 2335 | for (i = 0; i < (dev)->num_tx_queues; i++) \ |
---|
2186 | 2336 | lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ |
---|
2187 | 2337 | &qdisc_xmit_lock_key); \ |
---|
2188 | 2338 | } |
---|
2189 | 2339 | |
---|
2190 | | -struct netdev_queue *netdev_pick_tx(struct net_device *dev, |
---|
2191 | | - struct sk_buff *skb, |
---|
2192 | | - struct net_device *sb_dev); |
---|
| 2340 | +u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, |
---|
| 2341 | + struct net_device *sb_dev); |
---|
| 2342 | +struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, |
---|
| 2343 | + struct sk_buff *skb, |
---|
| 2344 | + struct net_device *sb_dev); |
---|
2193 | 2345 | |
---|
2194 | 2346 | /* returns the headroom that the master device needs to take in account |
---|
2195 | 2347 | * when forwarding to this dev |
---|
.. | .. |
---|
2209 | 2361 | static inline void netdev_reset_rx_headroom(struct net_device *dev) |
---|
2210 | 2362 | { |
---|
2211 | 2363 | netdev_set_rx_headroom(dev, -1); |
---|
| 2364 | +} |
---|
| 2365 | + |
---|
| 2366 | +static inline void *netdev_get_ml_priv(struct net_device *dev, |
---|
| 2367 | + enum netdev_ml_priv_type type) |
---|
| 2368 | +{ |
---|
| 2369 | + if (dev->ml_priv_type != type) |
---|
| 2370 | + return NULL; |
---|
| 2371 | + |
---|
| 2372 | + return dev->ml_priv; |
---|
| 2373 | +} |
---|
| 2374 | + |
---|
| 2375 | +static inline void netdev_set_ml_priv(struct net_device *dev, |
---|
| 2376 | + void *ml_priv, |
---|
| 2377 | + enum netdev_ml_priv_type type) |
---|
| 2378 | +{ |
---|
| 2379 | + WARN(dev->ml_priv_type && dev->ml_priv_type != type, |
---|
| 2380 | + "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n", |
---|
| 2381 | + dev->ml_priv_type, type); |
---|
| 2382 | + WARN(!dev->ml_priv_type && dev->ml_priv, |
---|
| 2383 | + "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n"); |
---|
| 2384 | + |
---|
| 2385 | + dev->ml_priv = ml_priv; |
---|
| 2386 | + dev->ml_priv_type = type; |
---|
2212 | 2387 | } |
---|
2213 | 2388 | |
---|
2214 | 2389 | /* |
---|
.. | .. |
---|
2287 | 2462 | } |
---|
2288 | 2463 | |
---|
2289 | 2464 | /** |
---|
| 2465 | + * __netif_napi_del - remove a NAPI context |
---|
| 2466 | + * @napi: NAPI context |
---|
| 2467 | + * |
---|
| 2468 | + * Warning: caller must observe RCU grace period before freeing memory |
---|
| 2469 | + * containing @napi. Drivers might want to call this helper to combine |
---|
| 2470 | + * all the needed RCU grace periods into a single one. |
---|
| 2471 | + */ |
---|
| 2472 | +void __netif_napi_del(struct napi_struct *napi); |
---|
| 2473 | + |
---|
| 2474 | +/** |
---|
2290 | 2475 | * netif_napi_del - remove a NAPI context |
---|
2291 | 2476 | * @napi: NAPI context |
---|
2292 | 2477 | * |
---|
2293 | 2478 | * netif_napi_del() removes a NAPI context from the network device NAPI list |
---|
2294 | 2479 | */ |
---|
2295 | | -void netif_napi_del(struct napi_struct *napi); |
---|
| 2480 | +static inline void netif_napi_del(struct napi_struct *napi) |
---|
| 2481 | +{ |
---|
| 2482 | + __netif_napi_del(napi); |
---|
| 2483 | + synchronize_net(); |
---|
| 2484 | +} |
---|
2296 | 2485 | |
---|
2297 | 2486 | struct napi_gro_cb { |
---|
2298 | 2487 | /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ |
---|
.. | .. |
---|
2351 | 2540 | /* Number of gro_receive callbacks this packet already went through */ |
---|
2352 | 2541 | u8 recursion_counter:4; |
---|
2353 | 2542 | |
---|
2354 | | - /* 1 bit hole */ |
---|
| 2543 | + /* GRO is done by frag_list pointer chaining. */ |
---|
| 2544 | + u8 is_flist:1; |
---|
2355 | 2545 | |
---|
2356 | 2546 | /* used to support CHECKSUM_COMPLETE for tunneling protocols */ |
---|
2357 | 2547 | __wsum csum; |
---|
.. | .. |
---|
2398 | 2588 | |
---|
2399 | 2589 | struct packet_type { |
---|
2400 | 2590 | __be16 type; /* This is really htons(ether_type). */ |
---|
| 2591 | + bool ignore_outgoing; |
---|
2401 | 2592 | struct net_device *dev; /* NULL is wildcarded here */ |
---|
2402 | 2593 | int (*func) (struct sk_buff *, |
---|
2403 | 2594 | struct net_device *, |
---|
.. | .. |
---|
2440 | 2631 | u64 tx_packets; |
---|
2441 | 2632 | u64 tx_bytes; |
---|
2442 | 2633 | struct u64_stats_sync syncp; |
---|
2443 | | -}; |
---|
| 2634 | +} __aligned(4 * sizeof(u64)); |
---|
| 2635 | + |
---|
| 2636 | +struct pcpu_lstats { |
---|
| 2637 | + u64_stats_t packets; |
---|
| 2638 | + u64_stats_t bytes; |
---|
| 2639 | + struct u64_stats_sync syncp; |
---|
| 2640 | +} __aligned(2 * sizeof(u64)); |
---|
| 2641 | + |
---|
| 2642 | +void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes); |
---|
| 2643 | + |
---|
| 2644 | +static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len) |
---|
| 2645 | +{ |
---|
| 2646 | + struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); |
---|
| 2647 | + |
---|
| 2648 | + u64_stats_update_begin(&tstats->syncp); |
---|
| 2649 | + tstats->rx_bytes += len; |
---|
| 2650 | + tstats->rx_packets++; |
---|
| 2651 | + u64_stats_update_end(&tstats->syncp); |
---|
| 2652 | +} |
---|
| 2653 | + |
---|
| 2654 | +static inline void dev_lstats_add(struct net_device *dev, unsigned int len) |
---|
| 2655 | +{ |
---|
| 2656 | + struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats); |
---|
| 2657 | + |
---|
| 2658 | + u64_stats_update_begin(&lstats->syncp); |
---|
| 2659 | + u64_stats_add(&lstats->bytes, len); |
---|
| 2660 | + u64_stats_inc(&lstats->packets); |
---|
| 2661 | + u64_stats_update_end(&lstats->syncp); |
---|
| 2662 | +} |
---|
2444 | 2663 | |
---|
2445 | 2664 | #define __netdev_alloc_pcpu_stats(type, gfp) \ |
---|
2446 | 2665 | ({ \ |
---|
.. | .. |
---|
2505 | 2724 | NETDEV_REGISTER, |
---|
2506 | 2725 | NETDEV_UNREGISTER, |
---|
2507 | 2726 | NETDEV_CHANGEMTU, /* notify after mtu change happened */ |
---|
2508 | | - NETDEV_CHANGEADDR, |
---|
| 2727 | + NETDEV_CHANGEADDR, /* notify after the address change */ |
---|
| 2728 | + NETDEV_PRE_CHANGEADDR, /* notify before the address change */ |
---|
2509 | 2729 | NETDEV_GOING_DOWN, |
---|
2510 | 2730 | NETDEV_CHANGENAME, |
---|
2511 | 2731 | NETDEV_FEAT_CHANGE, |
---|
.. | .. |
---|
2536 | 2756 | |
---|
2537 | 2757 | int register_netdevice_notifier(struct notifier_block *nb); |
---|
2538 | 2758 | int unregister_netdevice_notifier(struct notifier_block *nb); |
---|
| 2759 | +int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb); |
---|
| 2760 | +int unregister_netdevice_notifier_net(struct net *net, |
---|
| 2761 | + struct notifier_block *nb); |
---|
| 2762 | +int register_netdevice_notifier_dev_net(struct net_device *dev, |
---|
| 2763 | + struct notifier_block *nb, |
---|
| 2764 | + struct netdev_net_notifier *nn); |
---|
| 2765 | +int unregister_netdevice_notifier_dev_net(struct net_device *dev, |
---|
| 2766 | + struct notifier_block *nb, |
---|
| 2767 | + struct netdev_net_notifier *nn); |
---|
2539 | 2768 | |
---|
2540 | 2769 | struct netdev_notifier_info { |
---|
2541 | 2770 | struct net_device *dev; |
---|
.. | .. |
---|
2565 | 2794 | struct netdev_notifier_changelowerstate_info { |
---|
2566 | 2795 | struct netdev_notifier_info info; /* must be first */ |
---|
2567 | 2796 | void *lower_state_info; /* is lower dev state */ |
---|
| 2797 | +}; |
---|
| 2798 | + |
---|
| 2799 | +struct netdev_notifier_pre_changeaddr_info { |
---|
| 2800 | + struct netdev_notifier_info info; /* must be first */ |
---|
| 2801 | + const unsigned char *dev_addr; |
---|
2568 | 2802 | }; |
---|
2569 | 2803 | |
---|
2570 | 2804 | static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, |
---|
.. | .. |
---|
2601 | 2835 | list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) |
---|
2602 | 2836 | #define for_each_netdev_continue(net, d) \ |
---|
2603 | 2837 | list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) |
---|
| 2838 | +#define for_each_netdev_continue_reverse(net, d) \ |
---|
| 2839 | + list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \ |
---|
| 2840 | + dev_list) |
---|
2604 | 2841 | #define for_each_netdev_continue_rcu(net, d) \ |
---|
2605 | 2842 | list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) |
---|
2606 | 2843 | #define for_each_netdev_in_bond_rcu(bond, slave) \ |
---|
.. | .. |
---|
2661 | 2898 | struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); |
---|
2662 | 2899 | struct net_device *__dev_get_by_name(struct net *net, const char *name); |
---|
2663 | 2900 | int dev_alloc_name(struct net_device *dev, const char *name); |
---|
2664 | | -int dev_open(struct net_device *dev); |
---|
| 2901 | +int dev_open(struct net_device *dev, struct netlink_ext_ack *extack); |
---|
2665 | 2902 | void dev_close(struct net_device *dev); |
---|
2666 | 2903 | void dev_close_many(struct list_head *head, bool unlink); |
---|
2667 | 2904 | void dev_disable_lro(struct net_device *dev); |
---|
2668 | 2905 | int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); |
---|
2669 | 2906 | u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, |
---|
2670 | | - struct net_device *sb_dev, |
---|
2671 | | - select_queue_fallback_t fallback); |
---|
| 2907 | + struct net_device *sb_dev); |
---|
2672 | 2908 | u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, |
---|
2673 | | - struct net_device *sb_dev, |
---|
2674 | | - select_queue_fallback_t fallback); |
---|
| 2909 | + struct net_device *sb_dev); |
---|
| 2910 | + |
---|
2675 | 2911 | int dev_queue_xmit(struct sk_buff *skb); |
---|
2676 | 2912 | int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev); |
---|
2677 | | -int dev_direct_xmit(struct sk_buff *skb, u16 queue_id); |
---|
| 2913 | +int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id); |
---|
| 2914 | + |
---|
| 2915 | +static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) |
---|
| 2916 | +{ |
---|
| 2917 | + int ret; |
---|
| 2918 | + |
---|
| 2919 | + ret = __dev_direct_xmit(skb, queue_id); |
---|
| 2920 | + if (!dev_xmit_complete(ret)) |
---|
| 2921 | + kfree_skb(skb); |
---|
| 2922 | + return ret; |
---|
| 2923 | +} |
---|
| 2924 | + |
---|
2678 | 2925 | int register_netdevice(struct net_device *dev); |
---|
2679 | 2926 | void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); |
---|
2680 | 2927 | void unregister_netdevice_many(struct list_head *head); |
---|
.. | .. |
---|
2686 | 2933 | int netdev_refcnt_read(const struct net_device *dev); |
---|
2687 | 2934 | void free_netdev(struct net_device *dev); |
---|
2688 | 2935 | void netdev_freemem(struct net_device *dev); |
---|
2689 | | -void synchronize_net(void); |
---|
2690 | 2936 | int init_dummy_netdev(struct net_device *dev); |
---|
2691 | 2937 | |
---|
| 2938 | +struct net_device *netdev_get_xmit_slave(struct net_device *dev, |
---|
| 2939 | + struct sk_buff *skb, |
---|
| 2940 | + bool all_slaves); |
---|
2692 | 2941 | struct net_device *dev_get_by_index(struct net *net, int ifindex); |
---|
2693 | 2942 | struct net_device *__dev_get_by_index(struct net *net, int ifindex); |
---|
2694 | 2943 | struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); |
---|
.. | .. |
---|
2696 | 2945 | int netdev_get_name(struct net *net, char *name, int ifindex); |
---|
2697 | 2946 | int dev_restart(struct net_device *dev); |
---|
2698 | 2947 | int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb); |
---|
| 2948 | +int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb); |
---|
2699 | 2949 | |
---|
2700 | 2950 | static inline unsigned int skb_gro_offset(const struct sk_buff *skb) |
---|
2701 | 2951 | { |
---|
.. | .. |
---|
2832 | 3082 | } |
---|
2833 | 3083 | |
---|
2834 | 3084 | static inline void __skb_gro_checksum_convert(struct sk_buff *skb, |
---|
2835 | | - __sum16 check, __wsum pseudo) |
---|
| 3085 | + __wsum pseudo) |
---|
2836 | 3086 | { |
---|
2837 | 3087 | NAPI_GRO_CB(skb)->csum = ~pseudo; |
---|
2838 | 3088 | NAPI_GRO_CB(skb)->csum_valid = 1; |
---|
2839 | 3089 | } |
---|
2840 | 3090 | |
---|
2841 | | -#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \ |
---|
| 3091 | +#define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \ |
---|
2842 | 3092 | do { \ |
---|
2843 | 3093 | if (__skb_gro_checksum_convert_check(skb)) \ |
---|
2844 | | - __skb_gro_checksum_convert(skb, check, \ |
---|
| 3094 | + __skb_gro_checksum_convert(skb, \ |
---|
2845 | 3095 | compute_pseudo(skb, proto)); \ |
---|
2846 | 3096 | } while (0) |
---|
2847 | 3097 | |
---|
.. | .. |
---|
2964 | 3214 | return dev->header_ops->parse(skb, haddr); |
---|
2965 | 3215 | } |
---|
2966 | 3216 | |
---|
| 3217 | +static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb) |
---|
| 3218 | +{ |
---|
| 3219 | + const struct net_device *dev = skb->dev; |
---|
| 3220 | + |
---|
| 3221 | + if (!dev->header_ops || !dev->header_ops->parse_protocol) |
---|
| 3222 | + return 0; |
---|
| 3223 | + return dev->header_ops->parse_protocol(skb); |
---|
| 3224 | +} |
---|
| 3225 | + |
---|
2967 | 3226 | /* ll_header must have at least hard_header_len allocated */ |
---|
2968 | 3227 | static inline bool dev_validate_header(const struct net_device *dev, |
---|
2969 | 3228 | char *ll_header, int len) |
---|
.. | .. |
---|
2984 | 3243 | return false; |
---|
2985 | 3244 | } |
---|
2986 | 3245 | |
---|
2987 | | -typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, |
---|
2988 | | - int len, int size); |
---|
2989 | | -int register_gifconf(unsigned int family, gifconf_func_t *gifconf); |
---|
2990 | | -static inline int unregister_gifconf(unsigned int family) |
---|
| 3246 | +static inline bool dev_has_header(const struct net_device *dev) |
---|
2991 | 3247 | { |
---|
2992 | | - return register_gifconf(family, NULL); |
---|
| 3248 | + return dev->header_ops && dev->header_ops->create; |
---|
2993 | 3249 | } |
---|
2994 | 3250 | |
---|
2995 | 3251 | #ifdef CONFIG_NET_FLOW_LIMIT |
---|
.. | .. |
---|
3010 | 3266 | */ |
---|
3011 | 3267 | struct softnet_data { |
---|
3012 | 3268 | struct list_head poll_list; |
---|
3013 | | - struct napi_struct *current_napi; |
---|
3014 | 3269 | struct sk_buff_head process_queue; |
---|
3015 | 3270 | |
---|
3016 | 3271 | /* stats */ |
---|
3017 | 3272 | unsigned int processed; |
---|
3018 | 3273 | unsigned int time_squeeze; |
---|
3019 | 3274 | unsigned int received_rps; |
---|
3020 | | - /* unused partner variable for ABI alignment */ |
---|
3021 | | - unsigned int gro_coalesced; |
---|
3022 | | - |
---|
3023 | 3275 | #ifdef CONFIG_RPS |
---|
3024 | 3276 | struct softnet_data *rps_ipi_list; |
---|
3025 | 3277 | #endif |
---|
.. | .. |
---|
3052 | 3304 | unsigned int dropped; |
---|
3053 | 3305 | struct sk_buff_head input_pkt_queue; |
---|
3054 | 3306 | struct napi_struct backlog; |
---|
3055 | | - struct sk_buff_head tofree_queue; |
---|
3056 | 3307 | |
---|
3057 | 3308 | }; |
---|
3058 | 3309 | |
---|
.. | .. |
---|
3071 | 3322 | #endif |
---|
3072 | 3323 | } |
---|
3073 | 3324 | |
---|
3074 | | -#define XMIT_RECURSION_LIMIT 8 |
---|
3075 | 3325 | DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); |
---|
3076 | | - |
---|
3077 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
3078 | | -static inline int dev_recursion_level(void) |
---|
3079 | | -{ |
---|
3080 | | - return current->xmit_recursion; |
---|
3081 | | -} |
---|
3082 | | - |
---|
3083 | | -static inline bool dev_xmit_recursion(void) |
---|
3084 | | -{ |
---|
3085 | | - return unlikely(current->xmit_recursion > |
---|
3086 | | - XMIT_RECURSION_LIMIT); |
---|
3087 | | -} |
---|
3088 | | - |
---|
3089 | | -static inline void dev_xmit_recursion_inc(void) |
---|
3090 | | -{ |
---|
3091 | | - current->xmit_recursion++; |
---|
3092 | | -} |
---|
3093 | | - |
---|
3094 | | -static inline void dev_xmit_recursion_dec(void) |
---|
3095 | | -{ |
---|
3096 | | - current->xmit_recursion--; |
---|
3097 | | -} |
---|
3098 | | - |
---|
3099 | | -#else |
---|
3100 | 3326 | |
---|
3101 | 3327 | static inline int dev_recursion_level(void) |
---|
3102 | 3328 | { |
---|
3103 | 3329 | return this_cpu_read(softnet_data.xmit.recursion); |
---|
3104 | 3330 | } |
---|
3105 | 3331 | |
---|
| 3332 | +#define XMIT_RECURSION_LIMIT 8 |
---|
3106 | 3333 | static inline bool dev_xmit_recursion(void) |
---|
3107 | 3334 | { |
---|
3108 | 3335 | return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > |
---|
.. | .. |
---|
3118 | 3345 | { |
---|
3119 | 3346 | __this_cpu_dec(softnet_data.xmit.recursion); |
---|
3120 | 3347 | } |
---|
3121 | | -#endif |
---|
3122 | 3348 | |
---|
3123 | 3349 | void __netif_schedule(struct Qdisc *q); |
---|
3124 | 3350 | void netif_schedule_queue(struct netdev_queue *txq); |
---|
.. | .. |
---|
3285 | 3511 | #endif |
---|
3286 | 3512 | } |
---|
3287 | 3513 | |
---|
| 3514 | +/* Variant of netdev_tx_sent_queue() for drivers that are aware |
---|
| 3515 | + * that they should not test BQL status themselves. |
---|
| 3516 | + * We do want to change __QUEUE_STATE_STACK_XOFF only for the last |
---|
| 3517 | + * skb of a batch. |
---|
| 3518 | + * Returns true if the doorbell must be used to kick the NIC. |
---|
| 3519 | + */ |
---|
| 3520 | +static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, |
---|
| 3521 | + unsigned int bytes, |
---|
| 3522 | + bool xmit_more) |
---|
| 3523 | +{ |
---|
| 3524 | + if (xmit_more) { |
---|
| 3525 | +#ifdef CONFIG_BQL |
---|
| 3526 | + dql_queued(&dev_queue->dql, bytes); |
---|
| 3527 | +#endif |
---|
| 3528 | + return netif_tx_queue_stopped(dev_queue); |
---|
| 3529 | + } |
---|
| 3530 | + netdev_tx_sent_queue(dev_queue, bytes); |
---|
| 3531 | + return true; |
---|
| 3532 | +} |
---|
| 3533 | + |
---|
3288 | 3534 | /** |
---|
3289 | 3535 | * netdev_sent_queue - report the number of bytes queued to hardware |
---|
3290 | 3536 | * @dev: network device |
---|
.. | .. |
---|
3297 | 3543 | static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) |
---|
3298 | 3544 | { |
---|
3299 | 3545 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); |
---|
| 3546 | +} |
---|
| 3547 | + |
---|
| 3548 | +static inline bool __netdev_sent_queue(struct net_device *dev, |
---|
| 3549 | + unsigned int bytes, |
---|
| 3550 | + bool xmit_more) |
---|
| 3551 | +{ |
---|
| 3552 | + return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes, |
---|
| 3553 | + xmit_more); |
---|
3300 | 3554 | } |
---|
3301 | 3555 | |
---|
3302 | 3556 | static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, |
---|
.. | .. |
---|
3315 | 3569 | */ |
---|
3316 | 3570 | smp_mb(); |
---|
3317 | 3571 | |
---|
3318 | | - if (dql_avail(&dev_queue->dql) < 0) |
---|
| 3572 | + if (unlikely(dql_avail(&dev_queue->dql) < 0)) |
---|
3319 | 3573 | return; |
---|
3320 | 3574 | |
---|
3321 | 3575 | if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) |
---|
.. | .. |
---|
3523 | 3777 | } |
---|
3524 | 3778 | |
---|
3525 | 3779 | /** |
---|
3526 | | - * netif_attrmask_next_and - get the next CPU/Rx queue in *src1p & *src2p |
---|
| 3780 | + * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p |
---|
3527 | 3781 | * @n: CPU/Rx queue index |
---|
3528 | 3782 | * @src1p: the first CPUs/Rx queues mask pointer |
---|
3529 | 3783 | * @src2p: the second CPUs/Rx queues mask pointer |
---|
.. | .. |
---|
3660 | 3914 | int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb); |
---|
3661 | 3915 | int netif_rx(struct sk_buff *skb); |
---|
3662 | 3916 | int netif_rx_ni(struct sk_buff *skb); |
---|
| 3917 | +int netif_rx_any_context(struct sk_buff *skb); |
---|
3663 | 3918 | int netif_receive_skb(struct sk_buff *skb); |
---|
3664 | 3919 | int netif_receive_skb_core(struct sk_buff *skb); |
---|
3665 | 3920 | void netif_receive_skb_list(struct list_head *head); |
---|
.. | .. |
---|
3669 | 3924 | gro_result_t napi_gro_frags(struct napi_struct *napi); |
---|
3670 | 3925 | struct packet_offload *gro_find_receive_by_type(__be16 type); |
---|
3671 | 3926 | struct packet_offload *gro_find_complete_by_type(__be16 type); |
---|
3672 | | -extern struct napi_struct *get_current_napi_context(void); |
---|
3673 | 3927 | |
---|
3674 | 3928 | static inline void napi_free_frags(struct napi_struct *napi) |
---|
3675 | 3929 | { |
---|
.. | .. |
---|
3693 | 3947 | int dev_ifconf(struct net *net, struct ifconf *, int); |
---|
3694 | 3948 | int dev_ethtool(struct net *net, struct ifreq *); |
---|
3695 | 3949 | unsigned int dev_get_flags(const struct net_device *); |
---|
3696 | | -int __dev_change_flags(struct net_device *, unsigned int flags); |
---|
3697 | | -int dev_change_flags(struct net_device *, unsigned int); |
---|
| 3950 | +int __dev_change_flags(struct net_device *dev, unsigned int flags, |
---|
| 3951 | + struct netlink_ext_ack *extack); |
---|
| 3952 | +int dev_change_flags(struct net_device *dev, unsigned int flags, |
---|
| 3953 | + struct netlink_ext_ack *extack); |
---|
3698 | 3954 | void __dev_notify_flags(struct net_device *, unsigned int old_flags, |
---|
3699 | 3955 | unsigned int gchanges); |
---|
3700 | 3956 | int dev_change_name(struct net_device *, const char *); |
---|
.. | .. |
---|
3709 | 3965 | int dev_set_mtu(struct net_device *, int); |
---|
3710 | 3966 | int dev_change_tx_queue_len(struct net_device *, unsigned long); |
---|
3711 | 3967 | void dev_set_group(struct net_device *, int); |
---|
3712 | | -int dev_set_mac_address(struct net_device *, struct sockaddr *); |
---|
| 3968 | +int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, |
---|
| 3969 | + struct netlink_ext_ack *extack); |
---|
| 3970 | +int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, |
---|
| 3971 | + struct netlink_ext_ack *extack); |
---|
| 3972 | +int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, |
---|
| 3973 | + struct netlink_ext_ack *extack); |
---|
| 3974 | +int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name); |
---|
3713 | 3975 | int dev_change_carrier(struct net_device *, bool new_carrier); |
---|
3714 | 3976 | int dev_get_phys_port_id(struct net_device *dev, |
---|
3715 | 3977 | struct netdev_phys_item_id *ppid); |
---|
3716 | 3978 | int dev_get_phys_port_name(struct net_device *dev, |
---|
3717 | 3979 | char *name, size_t len); |
---|
| 3980 | +int dev_get_port_parent_id(struct net_device *dev, |
---|
| 3981 | + struct netdev_phys_item_id *ppid, bool recurse); |
---|
| 3982 | +bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); |
---|
3718 | 3983 | int dev_change_proto_down(struct net_device *dev, bool proto_down); |
---|
| 3984 | +int dev_change_proto_down_generic(struct net_device *dev, bool proto_down); |
---|
| 3985 | +void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask, |
---|
| 3986 | + u32 value); |
---|
3719 | 3987 | struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); |
---|
3720 | 3988 | struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
---|
3721 | 3989 | struct netdev_queue *txq, int *ret); |
---|
3722 | 3990 | |
---|
3723 | 3991 | typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); |
---|
3724 | 3992 | int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, |
---|
3725 | | - int fd, u32 flags); |
---|
3726 | | -u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op, |
---|
3727 | | - enum bpf_netdev_command cmd); |
---|
| 3993 | + int fd, int expected_fd, u32 flags); |
---|
| 3994 | +int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); |
---|
| 3995 | +u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); |
---|
| 3996 | + |
---|
3728 | 3997 | int xdp_umem_query(struct net_device *dev, u16 queue_id); |
---|
3729 | 3998 | |
---|
3730 | 3999 | int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); |
---|
.. | .. |
---|
3747 | 4016 | return 0; |
---|
3748 | 4017 | } |
---|
3749 | 4018 | |
---|
| 4019 | +bool dev_nit_active(struct net_device *dev); |
---|
3750 | 4020 | void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); |
---|
3751 | 4021 | |
---|
3752 | 4022 | extern int netdev_budget; |
---|
.. | .. |
---|
3763 | 4033 | */ |
---|
3764 | 4034 | static inline void dev_put(struct net_device *dev) |
---|
3765 | 4035 | { |
---|
3766 | | - this_cpu_dec(*dev->pcpu_refcnt); |
---|
| 4036 | + if (dev) |
---|
| 4037 | + this_cpu_dec(*dev->pcpu_refcnt); |
---|
3767 | 4038 | } |
---|
3768 | 4039 | |
---|
3769 | 4040 | /** |
---|
.. | .. |
---|
3774 | 4045 | */ |
---|
3775 | 4046 | static inline void dev_hold(struct net_device *dev) |
---|
3776 | 4047 | { |
---|
3777 | | - this_cpu_inc(*dev->pcpu_refcnt); |
---|
| 4048 | + if (dev) |
---|
| 4049 | + this_cpu_inc(*dev->pcpu_refcnt); |
---|
3778 | 4050 | } |
---|
3779 | 4051 | |
---|
3780 | 4052 | /* Carrier loss detection, dial on demand. The functions netif_carrier_on |
---|
.. | .. |
---|
3852 | 4124 | |
---|
3853 | 4125 | |
---|
3854 | 4126 | /** |
---|
| 4127 | + * netif_testing_on - mark device as under test. |
---|
| 4128 | + * @dev: network device |
---|
| 4129 | + * |
---|
| 4130 | + * Mark device as under test (as per RFC2863). |
---|
| 4131 | + * |
---|
| 4132 | + * The testing state indicates that some test(s) must be performed on |
---|
| 4133 | + * the interface. After completion, of the test, the interface state |
---|
| 4134 | + * will change to up, dormant, or down, as appropriate. |
---|
| 4135 | + */ |
---|
| 4136 | +static inline void netif_testing_on(struct net_device *dev) |
---|
| 4137 | +{ |
---|
| 4138 | + if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state)) |
---|
| 4139 | + linkwatch_fire_event(dev); |
---|
| 4140 | +} |
---|
| 4141 | + |
---|
| 4142 | +/** |
---|
| 4143 | + * netif_testing_off - set device as not under test. |
---|
| 4144 | + * @dev: network device |
---|
| 4145 | + * |
---|
| 4146 | + * Device is not in testing state. |
---|
| 4147 | + */ |
---|
| 4148 | +static inline void netif_testing_off(struct net_device *dev) |
---|
| 4149 | +{ |
---|
| 4150 | + if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state)) |
---|
| 4151 | + linkwatch_fire_event(dev); |
---|
| 4152 | +} |
---|
| 4153 | + |
---|
| 4154 | +/** |
---|
| 4155 | + * netif_testing - test if device is under test |
---|
| 4156 | + * @dev: network device |
---|
| 4157 | + * |
---|
| 4158 | + * Check if device is under test |
---|
| 4159 | + */ |
---|
| 4160 | +static inline bool netif_testing(const struct net_device *dev) |
---|
| 4161 | +{ |
---|
| 4162 | + return test_bit(__LINK_STATE_TESTING, &dev->state); |
---|
| 4163 | +} |
---|
| 4164 | + |
---|
| 4165 | + |
---|
| 4166 | +/** |
---|
3855 | 4167 | * netif_oper_up - test if device is operational |
---|
3856 | 4168 | * @dev: network device |
---|
3857 | 4169 | * |
---|
.. | .. |
---|
3883 | 4195 | */ |
---|
3884 | 4196 | |
---|
3885 | 4197 | enum { |
---|
3886 | | - NETIF_MSG_DRV = 0x0001, |
---|
3887 | | - NETIF_MSG_PROBE = 0x0002, |
---|
3888 | | - NETIF_MSG_LINK = 0x0004, |
---|
3889 | | - NETIF_MSG_TIMER = 0x0008, |
---|
3890 | | - NETIF_MSG_IFDOWN = 0x0010, |
---|
3891 | | - NETIF_MSG_IFUP = 0x0020, |
---|
3892 | | - NETIF_MSG_RX_ERR = 0x0040, |
---|
3893 | | - NETIF_MSG_TX_ERR = 0x0080, |
---|
3894 | | - NETIF_MSG_TX_QUEUED = 0x0100, |
---|
3895 | | - NETIF_MSG_INTR = 0x0200, |
---|
3896 | | - NETIF_MSG_TX_DONE = 0x0400, |
---|
3897 | | - NETIF_MSG_RX_STATUS = 0x0800, |
---|
3898 | | - NETIF_MSG_PKTDATA = 0x1000, |
---|
3899 | | - NETIF_MSG_HW = 0x2000, |
---|
3900 | | - NETIF_MSG_WOL = 0x4000, |
---|
| 4198 | + NETIF_MSG_DRV_BIT, |
---|
| 4199 | + NETIF_MSG_PROBE_BIT, |
---|
| 4200 | + NETIF_MSG_LINK_BIT, |
---|
| 4201 | + NETIF_MSG_TIMER_BIT, |
---|
| 4202 | + NETIF_MSG_IFDOWN_BIT, |
---|
| 4203 | + NETIF_MSG_IFUP_BIT, |
---|
| 4204 | + NETIF_MSG_RX_ERR_BIT, |
---|
| 4205 | + NETIF_MSG_TX_ERR_BIT, |
---|
| 4206 | + NETIF_MSG_TX_QUEUED_BIT, |
---|
| 4207 | + NETIF_MSG_INTR_BIT, |
---|
| 4208 | + NETIF_MSG_TX_DONE_BIT, |
---|
| 4209 | + NETIF_MSG_RX_STATUS_BIT, |
---|
| 4210 | + NETIF_MSG_PKTDATA_BIT, |
---|
| 4211 | + NETIF_MSG_HW_BIT, |
---|
| 4212 | + NETIF_MSG_WOL_BIT, |
---|
| 4213 | + |
---|
| 4214 | + /* When you add a new bit above, update netif_msg_class_names array |
---|
| 4215 | + * in net/ethtool/common.c |
---|
| 4216 | + */ |
---|
| 4217 | + NETIF_MSG_CLASS_COUNT, |
---|
3901 | 4218 | }; |
---|
| 4219 | +/* Both ethtool_ops interface and internal driver implementation use u32 */ |
---|
| 4220 | +static_assert(NETIF_MSG_CLASS_COUNT <= 32); |
---|
| 4221 | + |
---|
| 4222 | +#define __NETIF_MSG_BIT(bit) ((u32)1 << (bit)) |
---|
| 4223 | +#define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT) |
---|
| 4224 | + |
---|
| 4225 | +#define NETIF_MSG_DRV __NETIF_MSG(DRV) |
---|
| 4226 | +#define NETIF_MSG_PROBE __NETIF_MSG(PROBE) |
---|
| 4227 | +#define NETIF_MSG_LINK __NETIF_MSG(LINK) |
---|
| 4228 | +#define NETIF_MSG_TIMER __NETIF_MSG(TIMER) |
---|
| 4229 | +#define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN) |
---|
| 4230 | +#define NETIF_MSG_IFUP __NETIF_MSG(IFUP) |
---|
| 4231 | +#define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR) |
---|
| 4232 | +#define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR) |
---|
| 4233 | +#define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED) |
---|
| 4234 | +#define NETIF_MSG_INTR __NETIF_MSG(INTR) |
---|
| 4235 | +#define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE) |
---|
| 4236 | +#define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS) |
---|
| 4237 | +#define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA) |
---|
| 4238 | +#define NETIF_MSG_HW __NETIF_MSG(HW) |
---|
| 4239 | +#define NETIF_MSG_WOL __NETIF_MSG(WOL) |
---|
3902 | 4240 | |
---|
3903 | 4241 | #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) |
---|
3904 | 4242 | #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) |
---|
.. | .. |
---|
3927 | 4265 | return (1U << debug_value) - 1; |
---|
3928 | 4266 | } |
---|
3929 | 4267 | |
---|
3930 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
3931 | | -static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu) |
---|
3932 | | -{ |
---|
3933 | | - WRITE_ONCE(txq->xmit_lock_owner, current); |
---|
3934 | | -} |
---|
3935 | | - |
---|
3936 | | -static inline void netdev_queue_clear_owner(struct netdev_queue *txq) |
---|
3937 | | -{ |
---|
3938 | | - WRITE_ONCE(txq->xmit_lock_owner, NULL); |
---|
3939 | | -} |
---|
3940 | | - |
---|
3941 | | -static inline bool netdev_queue_has_owner(struct netdev_queue *txq) |
---|
3942 | | -{ |
---|
3943 | | - if (READ_ONCE(txq->xmit_lock_owner) != NULL) |
---|
3944 | | - return true; |
---|
3945 | | - return false; |
---|
3946 | | -} |
---|
3947 | | - |
---|
3948 | | -#else |
---|
3949 | | - |
---|
3950 | | -static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu) |
---|
3951 | | -{ |
---|
3952 | | - /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
---|
3953 | | - WRITE_ONCE(txq->xmit_lock_owner, cpu); |
---|
3954 | | -} |
---|
3955 | | - |
---|
3956 | | -static inline void netdev_queue_clear_owner(struct netdev_queue *txq) |
---|
3957 | | -{ |
---|
3958 | | - /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
---|
3959 | | - WRITE_ONCE(txq->xmit_lock_owner, -1); |
---|
3960 | | -} |
---|
3961 | | - |
---|
3962 | | -static inline bool netdev_queue_has_owner(struct netdev_queue *txq) |
---|
3963 | | -{ |
---|
3964 | | - if (READ_ONCE(txq->xmit_lock_owner) != -1) |
---|
3965 | | - return true; |
---|
3966 | | - return false; |
---|
3967 | | -} |
---|
3968 | | -#endif |
---|
3969 | | - |
---|
3970 | 4268 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) |
---|
3971 | 4269 | { |
---|
3972 | 4270 | spin_lock(&txq->_xmit_lock); |
---|
3973 | | - netdev_queue_set_owner(txq, cpu); |
---|
| 4271 | + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
---|
| 4272 | + WRITE_ONCE(txq->xmit_lock_owner, cpu); |
---|
3974 | 4273 | } |
---|
3975 | 4274 | |
---|
3976 | 4275 | static inline bool __netif_tx_acquire(struct netdev_queue *txq) |
---|
.. | .. |
---|
3987 | 4286 | static inline void __netif_tx_lock_bh(struct netdev_queue *txq) |
---|
3988 | 4287 | { |
---|
3989 | 4288 | spin_lock_bh(&txq->_xmit_lock); |
---|
3990 | | - netdev_queue_set_owner(txq, smp_processor_id()); |
---|
| 4289 | + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
---|
| 4290 | + WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); |
---|
3991 | 4291 | } |
---|
3992 | 4292 | |
---|
3993 | 4293 | static inline bool __netif_tx_trylock(struct netdev_queue *txq) |
---|
.. | .. |
---|
3995 | 4295 | bool ok = spin_trylock(&txq->_xmit_lock); |
---|
3996 | 4296 | |
---|
3997 | 4297 | if (likely(ok)) { |
---|
3998 | | - netdev_queue_set_owner(txq, smp_processor_id()); |
---|
| 4298 | + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
---|
| 4299 | + WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); |
---|
3999 | 4300 | } |
---|
4000 | 4301 | return ok; |
---|
4001 | 4302 | } |
---|
4002 | 4303 | |
---|
4003 | 4304 | static inline void __netif_tx_unlock(struct netdev_queue *txq) |
---|
4004 | 4305 | { |
---|
4005 | | - netdev_queue_clear_owner(txq); |
---|
| 4306 | + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
---|
| 4307 | + WRITE_ONCE(txq->xmit_lock_owner, -1); |
---|
4006 | 4308 | spin_unlock(&txq->_xmit_lock); |
---|
4007 | 4309 | } |
---|
4008 | 4310 | |
---|
4009 | 4311 | static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) |
---|
4010 | 4312 | { |
---|
4011 | | - netdev_queue_clear_owner(txq); |
---|
| 4313 | + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
---|
| 4314 | + WRITE_ONCE(txq->xmit_lock_owner, -1); |
---|
4012 | 4315 | spin_unlock_bh(&txq->_xmit_lock); |
---|
4013 | 4316 | } |
---|
4014 | 4317 | |
---|
4015 | 4318 | static inline void txq_trans_update(struct netdev_queue *txq) |
---|
4016 | 4319 | { |
---|
4017 | | - if (netdev_queue_has_owner(txq)) |
---|
| 4320 | + if (txq->xmit_lock_owner != -1) |
---|
4018 | 4321 | txq->trans_start = jiffies; |
---|
4019 | 4322 | } |
---|
4020 | 4323 | |
---|
.. | .. |
---|
4126 | 4429 | |
---|
4127 | 4430 | static inline void netif_addr_lock(struct net_device *dev) |
---|
4128 | 4431 | { |
---|
4129 | | - spin_lock(&dev->addr_list_lock); |
---|
4130 | | -} |
---|
| 4432 | + unsigned char nest_level = 0; |
---|
4131 | 4433 | |
---|
4132 | | -static inline void netif_addr_lock_nested(struct net_device *dev) |
---|
4133 | | -{ |
---|
4134 | | - int subclass = SINGLE_DEPTH_NESTING; |
---|
4135 | | - |
---|
4136 | | - if (dev->netdev_ops->ndo_get_lock_subclass) |
---|
4137 | | - subclass = dev->netdev_ops->ndo_get_lock_subclass(dev); |
---|
4138 | | - |
---|
4139 | | - spin_lock_nested(&dev->addr_list_lock, subclass); |
---|
| 4434 | +#ifdef CONFIG_LOCKDEP |
---|
| 4435 | + nest_level = dev->nested_level; |
---|
| 4436 | +#endif |
---|
| 4437 | + spin_lock_nested(&dev->addr_list_lock, nest_level); |
---|
4140 | 4438 | } |
---|
4141 | 4439 | |
---|
4142 | 4440 | static inline void netif_addr_lock_bh(struct net_device *dev) |
---|
4143 | 4441 | { |
---|
4144 | | - spin_lock_bh(&dev->addr_list_lock); |
---|
| 4442 | + unsigned char nest_level = 0; |
---|
| 4443 | + |
---|
| 4444 | +#ifdef CONFIG_LOCKDEP |
---|
| 4445 | + nest_level = dev->nested_level; |
---|
| 4446 | +#endif |
---|
| 4447 | + local_bh_disable(); |
---|
| 4448 | + spin_lock_nested(&dev->addr_list_lock, nest_level); |
---|
4145 | 4449 | } |
---|
4146 | 4450 | |
---|
4147 | 4451 | static inline void netif_addr_unlock(struct net_device *dev) |
---|
.. | .. |
---|
4170 | 4474 | unsigned char name_assign_type, |
---|
4171 | 4475 | void (*setup)(struct net_device *), |
---|
4172 | 4476 | unsigned int txqs, unsigned int rxqs); |
---|
4173 | | -int dev_get_valid_name(struct net *net, struct net_device *dev, |
---|
4174 | | - const char *name); |
---|
4175 | | - |
---|
4176 | 4477 | #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ |
---|
4177 | 4478 | alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) |
---|
4178 | 4479 | |
---|
.. | .. |
---|
4182 | 4483 | |
---|
4183 | 4484 | int register_netdev(struct net_device *dev); |
---|
4184 | 4485 | void unregister_netdev(struct net_device *dev); |
---|
| 4486 | + |
---|
| 4487 | +int devm_register_netdev(struct device *dev, struct net_device *ndev); |
---|
4185 | 4488 | |
---|
4186 | 4489 | /* General hardware address lists handling functions */ |
---|
4187 | 4490 | int __hw_addr_sync(struct netdev_hw_addr_list *to_list, |
---|
.. | .. |
---|
4193 | 4496 | int (*sync)(struct net_device *, const unsigned char *), |
---|
4194 | 4497 | int (*unsync)(struct net_device *, |
---|
4195 | 4498 | const unsigned char *)); |
---|
| 4499 | +int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list, |
---|
| 4500 | + struct net_device *dev, |
---|
| 4501 | + int (*sync)(struct net_device *, |
---|
| 4502 | + const unsigned char *, int), |
---|
| 4503 | + int (*unsync)(struct net_device *, |
---|
| 4504 | + const unsigned char *, int)); |
---|
| 4505 | +void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list, |
---|
| 4506 | + struct net_device *dev, |
---|
| 4507 | + int (*unsync)(struct net_device *, |
---|
| 4508 | + const unsigned char *, int)); |
---|
4196 | 4509 | void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, |
---|
4197 | 4510 | struct net_device *dev, |
---|
4198 | 4511 | int (*unsync)(struct net_device *, |
---|
.. | .. |
---|
4307 | 4620 | struct rtnl_link_stats64 *storage); |
---|
4308 | 4621 | void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, |
---|
4309 | 4622 | const struct net_device_stats *netdev_stats); |
---|
| 4623 | +void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, |
---|
| 4624 | + const struct pcpu_sw_netstats __percpu *netstats); |
---|
4310 | 4625 | |
---|
4311 | 4626 | extern int netdev_max_backlog; |
---|
4312 | 4627 | extern int netdev_tstamp_prequeue; |
---|
.. | .. |
---|
4315 | 4630 | extern int dev_weight_tx_bias; |
---|
4316 | 4631 | extern int dev_rx_weight; |
---|
4317 | 4632 | extern int dev_tx_weight; |
---|
| 4633 | +extern int gro_normal_batch; |
---|
| 4634 | + |
---|
| 4635 | +enum { |
---|
| 4636 | + NESTED_SYNC_IMM_BIT, |
---|
| 4637 | + NESTED_SYNC_TODO_BIT, |
---|
| 4638 | +}; |
---|
| 4639 | + |
---|
| 4640 | +#define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit)) |
---|
| 4641 | +#define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT) |
---|
| 4642 | + |
---|
| 4643 | +#define NESTED_SYNC_IMM __NESTED_SYNC(IMM) |
---|
| 4644 | +#define NESTED_SYNC_TODO __NESTED_SYNC(TODO) |
---|
| 4645 | + |
---|
| 4646 | +struct netdev_nested_priv { |
---|
| 4647 | + unsigned char flags; |
---|
| 4648 | + void *data; |
---|
| 4649 | +}; |
---|
4318 | 4650 | |
---|
4319 | 4651 | bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); |
---|
4320 | 4652 | struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, |
---|
4321 | 4653 | struct list_head **iter); |
---|
4322 | 4654 | struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, |
---|
4323 | 4655 | struct list_head **iter); |
---|
| 4656 | + |
---|
| 4657 | +#ifdef CONFIG_LOCKDEP |
---|
| 4658 | +static LIST_HEAD(net_unlink_list); |
---|
| 4659 | + |
---|
| 4660 | +static inline void net_unlink_todo(struct net_device *dev) |
---|
| 4661 | +{ |
---|
| 4662 | + if (list_empty(&dev->unlink_list)) |
---|
| 4663 | + list_add_tail(&dev->unlink_list, &net_unlink_list); |
---|
| 4664 | +} |
---|
| 4665 | +#endif |
---|
4324 | 4666 | |
---|
4325 | 4667 | /* iterate through upper list, must be called under RCU read lock */ |
---|
4326 | 4668 | #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ |
---|
.. | .. |
---|
4331 | 4673 | |
---|
4332 | 4674 | int netdev_walk_all_upper_dev_rcu(struct net_device *dev, |
---|
4333 | 4675 | int (*fn)(struct net_device *upper_dev, |
---|
4334 | | - void *data), |
---|
4335 | | - void *data); |
---|
| 4676 | + struct netdev_nested_priv *priv), |
---|
| 4677 | + struct netdev_nested_priv *priv); |
---|
4336 | 4678 | |
---|
4337 | 4679 | bool netdev_has_upper_dev_all_rcu(struct net_device *dev, |
---|
4338 | 4680 | struct net_device *upper_dev); |
---|
.. | .. |
---|
4365 | 4707 | ldev; \ |
---|
4366 | 4708 | ldev = netdev_lower_get_next(dev, &(iter))) |
---|
4367 | 4709 | |
---|
4368 | | -struct net_device *netdev_all_lower_get_next(struct net_device *dev, |
---|
| 4710 | +struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, |
---|
4369 | 4711 | struct list_head **iter); |
---|
4370 | | -struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev, |
---|
4371 | | - struct list_head **iter); |
---|
4372 | | - |
---|
4373 | 4712 | int netdev_walk_all_lower_dev(struct net_device *dev, |
---|
4374 | 4713 | int (*fn)(struct net_device *lower_dev, |
---|
4375 | | - void *data), |
---|
4376 | | - void *data); |
---|
| 4714 | + struct netdev_nested_priv *priv), |
---|
| 4715 | + struct netdev_nested_priv *priv); |
---|
4377 | 4716 | int netdev_walk_all_lower_dev_rcu(struct net_device *dev, |
---|
4378 | 4717 | int (*fn)(struct net_device *lower_dev, |
---|
4379 | | - void *data), |
---|
4380 | | - void *data); |
---|
| 4718 | + struct netdev_nested_priv *priv), |
---|
| 4719 | + struct netdev_nested_priv *priv); |
---|
4381 | 4720 | |
---|
4382 | 4721 | void *netdev_adjacent_get_private(struct list_head *adj_list); |
---|
4383 | 4722 | void *netdev_lower_get_first_private_rcu(struct net_device *dev); |
---|
.. | .. |
---|
4391 | 4730 | struct netlink_ext_ack *extack); |
---|
4392 | 4731 | void netdev_upper_dev_unlink(struct net_device *dev, |
---|
4393 | 4732 | struct net_device *upper_dev); |
---|
| 4733 | +int netdev_adjacent_change_prepare(struct net_device *old_dev, |
---|
| 4734 | + struct net_device *new_dev, |
---|
| 4735 | + struct net_device *dev, |
---|
| 4736 | + struct netlink_ext_ack *extack); |
---|
| 4737 | +void netdev_adjacent_change_commit(struct net_device *old_dev, |
---|
| 4738 | + struct net_device *new_dev, |
---|
| 4739 | + struct net_device *dev); |
---|
| 4740 | +void netdev_adjacent_change_abort(struct net_device *old_dev, |
---|
| 4741 | + struct net_device *new_dev, |
---|
| 4742 | + struct net_device *dev); |
---|
4394 | 4743 | void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); |
---|
4395 | 4744 | void *netdev_lower_dev_get_private(struct net_device *dev, |
---|
4396 | 4745 | struct net_device *lower_dev); |
---|
.. | .. |
---|
4402 | 4751 | extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; |
---|
4403 | 4752 | void netdev_rss_key_fill(void *buffer, size_t len); |
---|
4404 | 4753 | |
---|
4405 | | -int dev_get_nest_level(struct net_device *dev); |
---|
4406 | 4754 | int skb_checksum_help(struct sk_buff *skb); |
---|
4407 | 4755 | int skb_crc32c_csum_help(struct sk_buff *skb); |
---|
4408 | 4756 | int skb_csum_hwoffload_help(struct sk_buff *skb, |
---|
.. | .. |
---|
4425 | 4773 | |
---|
4426 | 4774 | void netdev_bonding_info_change(struct net_device *dev, |
---|
4427 | 4775 | struct netdev_bonding_info *bonding_info); |
---|
| 4776 | + |
---|
| 4777 | +#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK) |
---|
| 4778 | +void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data); |
---|
| 4779 | +#else |
---|
| 4780 | +static inline void ethtool_notify(struct net_device *dev, unsigned int cmd, |
---|
| 4781 | + const void *data) |
---|
| 4782 | +{ |
---|
| 4783 | +} |
---|
| 4784 | +#endif |
---|
4428 | 4785 | |
---|
4429 | 4786 | static inline |
---|
4430 | 4787 | struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) |
---|
.. | .. |
---|
4457 | 4814 | } |
---|
4458 | 4815 | |
---|
4459 | 4816 | #ifdef CONFIG_BUG |
---|
4460 | | -void netdev_rx_csum_fault(struct net_device *dev); |
---|
| 4817 | +void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb); |
---|
4461 | 4818 | #else |
---|
4462 | | -static inline void netdev_rx_csum_fault(struct net_device *dev) |
---|
| 4819 | +static inline void netdev_rx_csum_fault(struct net_device *dev, |
---|
| 4820 | + struct sk_buff *skb) |
---|
4463 | 4821 | { |
---|
4464 | 4822 | } |
---|
4465 | 4823 | #endif |
---|
.. | .. |
---|
4477 | 4835 | struct sk_buff *skb, struct net_device *dev, |
---|
4478 | 4836 | bool more) |
---|
4479 | 4837 | { |
---|
4480 | | - skb->xmit_more = more ? 1 : 0; |
---|
| 4838 | + __this_cpu_write(softnet_data.xmit.more, more); |
---|
4481 | 4839 | return ops->ndo_start_xmit(skb, dev); |
---|
4482 | 4840 | } |
---|
4483 | 4841 | |
---|
.. | .. |
---|
4490 | 4848 | struct netdev_queue *txq, bool more) |
---|
4491 | 4849 | { |
---|
4492 | 4850 | const struct net_device_ops *ops = dev->netdev_ops; |
---|
4493 | | - int rc; |
---|
| 4851 | + netdev_tx_t rc; |
---|
4494 | 4852 | |
---|
4495 | 4853 | rc = __netdev_start_xmit(ops, skb, dev, more); |
---|
4496 | 4854 | if (rc == NETDEV_TX_OK) |
---|
.. | .. |
---|
4503 | 4861 | const void *ns); |
---|
4504 | 4862 | void netdev_class_remove_file_ns(const struct class_attribute *class_attr, |
---|
4505 | 4863 | const void *ns); |
---|
4506 | | - |
---|
4507 | | -static inline int netdev_class_create_file(const struct class_attribute *class_attr) |
---|
4508 | | -{ |
---|
4509 | | - return netdev_class_create_file_ns(class_attr, NULL); |
---|
4510 | | -} |
---|
4511 | | - |
---|
4512 | | -static inline void netdev_class_remove_file(const struct class_attribute *class_attr) |
---|
4513 | | -{ |
---|
4514 | | - netdev_class_remove_file_ns(class_attr, NULL); |
---|
4515 | | -} |
---|
4516 | 4864 | |
---|
4517 | 4865 | extern const struct kobj_ns_type_operations net_ns_type_operations; |
---|
4518 | 4866 | |
---|
.. | .. |
---|
4586 | 4934 | BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT)); |
---|
4587 | 4935 | BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT)); |
---|
4588 | 4936 | BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT)); |
---|
| 4937 | + BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT)); |
---|
4589 | 4938 | |
---|
4590 | 4939 | return (features & feature) == feature; |
---|
4591 | 4940 | } |
---|
.. | .. |
---|
4688 | 5037 | return dev->priv_flags & IFF_OVS_DATAPATH; |
---|
4689 | 5038 | } |
---|
4690 | 5039 | |
---|
| 5040 | +static inline bool netif_is_any_bridge_port(const struct net_device *dev) |
---|
| 5041 | +{ |
---|
| 5042 | + return netif_is_bridge_port(dev) || netif_is_ovs_port(dev); |
---|
| 5043 | +} |
---|
| 5044 | + |
---|
4691 | 5045 | static inline bool netif_is_team_master(const struct net_device *dev) |
---|
4692 | 5046 | { |
---|
4693 | 5047 | return dev->priv_flags & IFF_TEAM; |
---|
.. | .. |
---|
4769 | 5123 | return " (unknown)"; |
---|
4770 | 5124 | } |
---|
4771 | 5125 | |
---|
4772 | | -__printf(3, 4) |
---|
| 5126 | +__printf(3, 4) __cold |
---|
4773 | 5127 | void netdev_printk(const char *level, const struct net_device *dev, |
---|
4774 | 5128 | const char *format, ...); |
---|
4775 | | -__printf(2, 3) |
---|
| 5129 | +__printf(2, 3) __cold |
---|
4776 | 5130 | void netdev_emerg(const struct net_device *dev, const char *format, ...); |
---|
4777 | | -__printf(2, 3) |
---|
| 5131 | +__printf(2, 3) __cold |
---|
4778 | 5132 | void netdev_alert(const struct net_device *dev, const char *format, ...); |
---|
4779 | | -__printf(2, 3) |
---|
| 5133 | +__printf(2, 3) __cold |
---|
4780 | 5134 | void netdev_crit(const struct net_device *dev, const char *format, ...); |
---|
4781 | | -__printf(2, 3) |
---|
| 5135 | +__printf(2, 3) __cold |
---|
4782 | 5136 | void netdev_err(const struct net_device *dev, const char *format, ...); |
---|
4783 | | -__printf(2, 3) |
---|
| 5137 | +__printf(2, 3) __cold |
---|
4784 | 5138 | void netdev_warn(const struct net_device *dev, const char *format, ...); |
---|
4785 | | -__printf(2, 3) |
---|
| 5139 | +__printf(2, 3) __cold |
---|
4786 | 5140 | void netdev_notice(const struct net_device *dev, const char *format, ...); |
---|
4787 | | -__printf(2, 3) |
---|
| 5141 | +__printf(2, 3) __cold |
---|
4788 | 5142 | void netdev_info(const struct net_device *dev, const char *format, ...); |
---|
4789 | 5143 | |
---|
4790 | 5144 | #define netdev_level_once(level, dev, fmt, ...) \ |
---|
.. | .. |
---|
4815 | 5169 | #define MODULE_ALIAS_NETDEV(device) \ |
---|
4816 | 5170 | MODULE_ALIAS("netdev-" device) |
---|
4817 | 5171 | |
---|
4818 | | -#if defined(CONFIG_DYNAMIC_DEBUG) |
---|
| 5172 | +#if defined(CONFIG_DYNAMIC_DEBUG) || \ |
---|
| 5173 | + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) |
---|
4819 | 5174 | #define netdev_dbg(__dev, format, args...) \ |
---|
4820 | 5175 | do { \ |
---|
4821 | 5176 | dynamic_netdev_dbg(__dev, format, ##args); \ |
---|
.. | .. |
---|
4885 | 5240 | #define netif_info(priv, type, dev, fmt, args...) \ |
---|
4886 | 5241 | netif_level(info, priv, type, dev, fmt, ##args) |
---|
4887 | 5242 | |
---|
4888 | | -#if defined(CONFIG_DYNAMIC_DEBUG) |
---|
| 5243 | +#if defined(CONFIG_DYNAMIC_DEBUG) || \ |
---|
| 5244 | + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) |
---|
4889 | 5245 | #define netif_dbg(priv, type, netdev, format, args...) \ |
---|
4890 | 5246 | do { \ |
---|
4891 | 5247 | if (netif_msg_##type(priv)) \ |
---|
.. | .. |
---|
4945 | 5301 | #define PTYPE_HASH_SIZE (16) |
---|
4946 | 5302 | #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) |
---|
4947 | 5303 | |
---|
| 5304 | +extern struct net_device *blackhole_netdev; |
---|
| 5305 | + |
---|
4948 | 5306 | #endif /* _LINUX_NETDEVICE_H */ |
---|