.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ |
---|
1 | 2 | /* |
---|
2 | 3 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
---|
3 | 4 | * operating system. INET is implemented using the BSD Socket |
---|
.. | .. |
---|
14 | 15 | * Alan Cox, <alan@lxorguk.ukuu.org.uk> |
---|
15 | 16 | * Bjorn Ekwall. <bj0rn@blox.se> |
---|
16 | 17 | * Pekka Riikonen <priikone@poseidon.pspt.fi> |
---|
17 | | - * |
---|
18 | | - * This program is free software; you can redistribute it and/or |
---|
19 | | - * modify it under the terms of the GNU General Public License |
---|
20 | | - * as published by the Free Software Foundation; either version |
---|
21 | | - * 2 of the License, or (at your option) any later version. |
---|
22 | 18 | * |
---|
23 | 19 | * Moved to /usr/include/linux for NET3 |
---|
24 | 20 | */ |
---|
.. | .. |
---|
58 | 54 | struct device; |
---|
59 | 55 | struct phy_device; |
---|
60 | 56 | struct dsa_port; |
---|
| 57 | +struct ip_tunnel_parm; |
---|
| 58 | +struct macsec_context; |
---|
| 59 | +struct macsec_ops; |
---|
61 | 60 | |
---|
62 | 61 | struct sfp_bus; |
---|
63 | 62 | /* 802.11 specific */ |
---|
.. | .. |
---|
67 | 66 | struct mpls_dev; |
---|
68 | 67 | /* UDP Tunnel offloads */ |
---|
69 | 68 | struct udp_tunnel_info; |
---|
| 69 | +struct udp_tunnel_nic_info; |
---|
| 70 | +struct udp_tunnel_nic; |
---|
70 | 71 | struct bpf_prog; |
---|
71 | 72 | struct xdp_buff; |
---|
72 | 73 | |
---|
| 74 | +void synchronize_net(void); |
---|
73 | 75 | void netdev_set_default_ethtool_ops(struct net_device *dev, |
---|
74 | 76 | const struct ethtool_ops *ops); |
---|
75 | 77 | |
---|
76 | 78 | /* Backlog congestion levels */ |
---|
77 | 79 | #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ |
---|
78 | 80 | #define NET_RX_DROP 1 /* packet dropped */ |
---|
| 81 | + |
---|
| 82 | +#define MAX_NEST_DEV 8 |
---|
79 | 83 | |
---|
80 | 84 | /* |
---|
81 | 85 | * Transmit return codes: transmit return codes originate from three different |
---|
.. | .. |
---|
195 | 199 | |
---|
196 | 200 | #ifdef CONFIG_RPS |
---|
197 | 201 | #include <linux/static_key.h> |
---|
198 | | -extern struct static_key rps_needed; |
---|
199 | | -extern struct static_key rfs_needed; |
---|
| 202 | +extern struct static_key_false rps_needed; |
---|
| 203 | +extern struct static_key_false rfs_needed; |
---|
200 | 204 | #endif |
---|
201 | 205 | |
---|
202 | 206 | struct neighbour; |
---|
.. | .. |
---|
209 | 213 | unsigned char type; |
---|
210 | 214 | #define NETDEV_HW_ADDR_T_LAN 1 |
---|
211 | 215 | #define NETDEV_HW_ADDR_T_SAN 2 |
---|
212 | | -#define NETDEV_HW_ADDR_T_SLAVE 3 |
---|
213 | | -#define NETDEV_HW_ADDR_T_UNICAST 4 |
---|
214 | | -#define NETDEV_HW_ADDR_T_MULTICAST 5 |
---|
| 216 | +#define NETDEV_HW_ADDR_T_UNICAST 3 |
---|
| 217 | +#define NETDEV_HW_ADDR_T_MULTICAST 4 |
---|
215 | 218 | bool global_use; |
---|
216 | 219 | int sync_cnt; |
---|
217 | 220 | int refcount; |
---|
.. | .. |
---|
275 | 278 | const struct net_device *dev, |
---|
276 | 279 | const unsigned char *haddr); |
---|
277 | 280 | bool (*validate)(const char *ll_header, unsigned int len); |
---|
| 281 | + __be16 (*parse_protocol)(const struct sk_buff *skb); |
---|
278 | 282 | |
---|
279 | 283 | ANDROID_KABI_RESERVE(1); |
---|
280 | 284 | ANDROID_KABI_RESERVE(2); |
---|
.. | .. |
---|
291 | 295 | __LINK_STATE_NOCARRIER, |
---|
292 | 296 | __LINK_STATE_LINKWATCH_PENDING, |
---|
293 | 297 | __LINK_STATE_DORMANT, |
---|
| 298 | + __LINK_STATE_TESTING, |
---|
294 | 299 | }; |
---|
295 | 300 | |
---|
296 | 301 | |
---|
.. | .. |
---|
331 | 336 | |
---|
332 | 337 | unsigned long state; |
---|
333 | 338 | int weight; |
---|
| 339 | + int defer_hard_irqs_count; |
---|
334 | 340 | unsigned long gro_bitmask; |
---|
335 | 341 | int (*poll)(struct napi_struct *, int); |
---|
336 | 342 | #ifdef CONFIG_NETPOLL |
---|
.. | .. |
---|
339 | 345 | struct net_device *dev; |
---|
340 | 346 | struct gro_list gro_hash[GRO_HASH_BUCKETS]; |
---|
341 | 347 | struct sk_buff *skb; |
---|
| 348 | + struct list_head rx_list; /* Pending GRO_NORMAL skbs */ |
---|
| 349 | + int rx_count; /* length of rx_list */ |
---|
342 | 350 | struct hrtimer timer; |
---|
343 | 351 | struct list_head dev_list; |
---|
344 | 352 | struct hlist_node napi_hash_node; |
---|
.. | .. |
---|
355 | 363 | NAPI_STATE_MISSED, /* reschedule a napi */ |
---|
356 | 364 | NAPI_STATE_DISABLE, /* Disable pending */ |
---|
357 | 365 | NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ |
---|
358 | | - NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */ |
---|
| 366 | + NAPI_STATE_LISTED, /* NAPI added to system lists */ |
---|
359 | 367 | NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */ |
---|
360 | 368 | NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */ |
---|
361 | 369 | }; |
---|
.. | .. |
---|
365 | 373 | NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED), |
---|
366 | 374 | NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE), |
---|
367 | 375 | NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC), |
---|
368 | | - NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED), |
---|
| 376 | + NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED), |
---|
369 | 377 | NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), |
---|
370 | 378 | NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), |
---|
371 | 379 | }; |
---|
.. | .. |
---|
490 | 498 | } |
---|
491 | 499 | |
---|
492 | 500 | /** |
---|
493 | | - * napi_hash_del - remove a NAPI from global table |
---|
494 | | - * @napi: NAPI context |
---|
495 | | - * |
---|
496 | | - * Warning: caller must observe RCU grace period |
---|
497 | | - * before freeing memory containing @napi, if |
---|
498 | | - * this function returns true. |
---|
499 | | - * Note: core networking stack automatically calls it |
---|
500 | | - * from netif_napi_del(). |
---|
501 | | - * Drivers might want to call this helper to combine all |
---|
502 | | - * the needed RCU grace periods into a single one. |
---|
503 | | - */ |
---|
504 | | -bool napi_hash_del(struct napi_struct *napi); |
---|
505 | | - |
---|
506 | | -/** |
---|
507 | 501 | * napi_disable - prevent NAPI from scheduling |
---|
508 | 502 | * @n: NAPI context |
---|
509 | 503 | * |
---|
.. | .. |
---|
542 | 536 | msleep(1); |
---|
543 | 537 | else |
---|
544 | 538 | barrier(); |
---|
| 539 | +} |
---|
| 540 | + |
---|
| 541 | +/** |
---|
| 542 | + * napi_if_scheduled_mark_missed - if napi is running, set the |
---|
| 543 | + * NAPIF_STATE_MISSED |
---|
| 544 | + * @n: NAPI context |
---|
| 545 | + * |
---|
| 546 | + * If napi is running, set the NAPIF_STATE_MISSED, and return true if |
---|
| 547 | + * NAPI is scheduled. |
---|
| 548 | + **/ |
---|
| 549 | +static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n) |
---|
| 550 | +{ |
---|
| 551 | + unsigned long val, new; |
---|
| 552 | + |
---|
| 553 | + do { |
---|
| 554 | + val = READ_ONCE(n->state); |
---|
| 555 | + if (val & NAPIF_STATE_DISABLE) |
---|
| 556 | + return true; |
---|
| 557 | + |
---|
| 558 | + if (!(val & NAPIF_STATE_SCHED)) |
---|
| 559 | + return false; |
---|
| 560 | + |
---|
| 561 | + new = val | NAPIF_STATE_MISSED; |
---|
| 562 | + } while (cmpxchg(&n->state, val, new) != val); |
---|
| 563 | + |
---|
| 564 | + return true; |
---|
545 | 565 | } |
---|
546 | 566 | |
---|
547 | 567 | enum netdev_queue_state_t { |
---|
.. | .. |
---|
592 | 612 | |
---|
593 | 613 | /* Subordinate device that the queue has been assigned to */ |
---|
594 | 614 | struct net_device *sb_dev; |
---|
| 615 | +#ifdef CONFIG_XDP_SOCKETS |
---|
| 616 | + struct xsk_buff_pool *pool; |
---|
| 617 | +#endif |
---|
595 | 618 | /* |
---|
596 | 619 | * write-mostly part |
---|
597 | 620 | */ |
---|
.. | .. |
---|
615 | 638 | } ____cacheline_aligned_in_smp; |
---|
616 | 639 | |
---|
617 | 640 | extern int sysctl_fb_tunnels_only_for_init_net; |
---|
| 641 | +extern int sysctl_devconf_inherit_init_net; |
---|
618 | 642 | |
---|
| 643 | +/* |
---|
| 644 | + * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns |
---|
| 645 | + * == 1 : For initns only |
---|
| 646 | + * == 2 : For none. |
---|
| 647 | + */ |
---|
619 | 648 | static inline bool net_has_fallback_tunnels(const struct net *net) |
---|
620 | 649 | { |
---|
621 | | - return net == &init_net || |
---|
622 | | - !IS_ENABLED(CONFIG_SYSCTL) || |
---|
623 | | - !sysctl_fb_tunnels_only_for_init_net; |
---|
| 650 | +#if IS_ENABLED(CONFIG_SYSCTL) |
---|
| 651 | + int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net); |
---|
| 652 | + |
---|
| 653 | + return !fb_tunnels_only_for_init_net || |
---|
| 654 | + (net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1); |
---|
| 655 | +#else |
---|
| 656 | + return true; |
---|
| 657 | +#endif |
---|
| 658 | +} |
---|
| 659 | + |
---|
| 660 | +static inline int net_inherit_devconf(void) |
---|
| 661 | +{ |
---|
| 662 | +#if IS_ENABLED(CONFIG_SYSCTL) |
---|
| 663 | + return READ_ONCE(sysctl_devconf_inherit_init_net); |
---|
| 664 | +#else |
---|
| 665 | + return 0; |
---|
| 666 | +#endif |
---|
624 | 667 | } |
---|
625 | 668 | |
---|
626 | 669 | static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) |
---|
.. | .. |
---|
647 | 690 | struct rps_map { |
---|
648 | 691 | unsigned int len; |
---|
649 | 692 | struct rcu_head rcu; |
---|
650 | | - u16 cpus[0]; |
---|
| 693 | + u16 cpus[]; |
---|
651 | 694 | }; |
---|
652 | 695 | #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) |
---|
653 | 696 | |
---|
.. | .. |
---|
669 | 712 | struct rps_dev_flow_table { |
---|
670 | 713 | unsigned int mask; |
---|
671 | 714 | struct rcu_head rcu; |
---|
672 | | - struct rps_dev_flow flows[0]; |
---|
| 715 | + struct rps_dev_flow flows[]; |
---|
673 | 716 | }; |
---|
674 | 717 | #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ |
---|
675 | 718 | ((_num) * sizeof(struct rps_dev_flow))) |
---|
.. | .. |
---|
687 | 730 | struct rps_sock_flow_table { |
---|
688 | 731 | u32 mask; |
---|
689 | 732 | |
---|
690 | | - u32 ents[0] ____cacheline_aligned_in_smp; |
---|
| 733 | + u32 ents[] ____cacheline_aligned_in_smp; |
---|
691 | 734 | }; |
---|
692 | 735 | #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num])) |
---|
693 | 736 | |
---|
.. | .. |
---|
726 | 769 | struct kobject kobj; |
---|
727 | 770 | struct net_device *dev; |
---|
728 | 771 | struct xdp_rxq_info xdp_rxq; |
---|
| 772 | +#ifdef CONFIG_XDP_SOCKETS |
---|
| 773 | + struct xsk_buff_pool *pool; |
---|
| 774 | +#endif |
---|
729 | 775 | |
---|
730 | 776 | ANDROID_KABI_RESERVE(1); |
---|
731 | 777 | ANDROID_KABI_RESERVE(2); |
---|
.. | .. |
---|
752 | 798 | unsigned int len; |
---|
753 | 799 | unsigned int alloc_len; |
---|
754 | 800 | struct rcu_head rcu; |
---|
755 | | - u16 queues[0]; |
---|
| 801 | + u16 queues[]; |
---|
756 | 802 | }; |
---|
757 | 803 | #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) |
---|
758 | 804 | #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \ |
---|
.. | .. |
---|
763 | 809 | */ |
---|
764 | 810 | struct xps_dev_maps { |
---|
765 | 811 | struct rcu_head rcu; |
---|
766 | | - struct xps_map __rcu *attr_map[0]; /* Either CPUs map or RXQs map */ |
---|
| 812 | + struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */ |
---|
767 | 813 | }; |
---|
768 | 814 | |
---|
769 | 815 | #define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ |
---|
.. | .. |
---|
832 | 878 | TC_SETUP_QDISC_PRIO, |
---|
833 | 879 | TC_SETUP_QDISC_MQ, |
---|
834 | 880 | TC_SETUP_QDISC_ETF, |
---|
| 881 | + TC_SETUP_ROOT_QDISC, |
---|
| 882 | + TC_SETUP_QDISC_GRED, |
---|
| 883 | + TC_SETUP_QDISC_TAPRIO, |
---|
| 884 | + TC_SETUP_FT, |
---|
| 885 | + TC_SETUP_QDISC_ETS, |
---|
| 886 | + TC_SETUP_QDISC_TBF, |
---|
| 887 | + TC_SETUP_QDISC_FIFO, |
---|
835 | 888 | }; |
---|
836 | 889 | |
---|
837 | 890 | /* These structures hold the attributes of bpf state that are being passed |
---|
.. | .. |
---|
847 | 900 | */ |
---|
848 | 901 | XDP_SETUP_PROG, |
---|
849 | 902 | XDP_SETUP_PROG_HW, |
---|
850 | | - XDP_QUERY_PROG, |
---|
851 | | - XDP_QUERY_PROG_HW, |
---|
852 | 903 | /* BPF program for offload callbacks, invoked at program load time. */ |
---|
853 | | - BPF_OFFLOAD_VERIFIER_PREP, |
---|
854 | | - BPF_OFFLOAD_TRANSLATE, |
---|
855 | | - BPF_OFFLOAD_DESTROY, |
---|
856 | 904 | BPF_OFFLOAD_MAP_ALLOC, |
---|
857 | 905 | BPF_OFFLOAD_MAP_FREE, |
---|
858 | | - XDP_QUERY_XSK_UMEM, |
---|
859 | | - XDP_SETUP_XSK_UMEM, |
---|
| 906 | + XDP_SETUP_XSK_POOL, |
---|
860 | 907 | }; |
---|
861 | 908 | |
---|
862 | 909 | struct bpf_prog_offload_ops; |
---|
863 | 910 | struct netlink_ext_ack; |
---|
864 | 911 | struct xdp_umem; |
---|
| 912 | +struct xdp_dev_bulk_queue; |
---|
| 913 | +struct bpf_xdp_link; |
---|
| 914 | + |
---|
| 915 | +enum bpf_xdp_mode { |
---|
| 916 | + XDP_MODE_SKB = 0, |
---|
| 917 | + XDP_MODE_DRV = 1, |
---|
| 918 | + XDP_MODE_HW = 2, |
---|
| 919 | + __MAX_XDP_MODE |
---|
| 920 | +}; |
---|
| 921 | + |
---|
| 922 | +struct bpf_xdp_entity { |
---|
| 923 | + struct bpf_prog *prog; |
---|
| 924 | + struct bpf_xdp_link *link; |
---|
| 925 | +}; |
---|
865 | 926 | |
---|
866 | 927 | struct netdev_bpf { |
---|
867 | 928 | enum bpf_netdev_command command; |
---|
.. | .. |
---|
872 | 933 | struct bpf_prog *prog; |
---|
873 | 934 | struct netlink_ext_ack *extack; |
---|
874 | 935 | }; |
---|
875 | | - /* XDP_QUERY_PROG, XDP_QUERY_PROG_HW */ |
---|
876 | | - struct { |
---|
877 | | - u32 prog_id; |
---|
878 | | - /* flags with which program was installed */ |
---|
879 | | - u32 prog_flags; |
---|
880 | | - }; |
---|
881 | | - /* BPF_OFFLOAD_VERIFIER_PREP */ |
---|
882 | | - struct { |
---|
883 | | - struct bpf_prog *prog; |
---|
884 | | - const struct bpf_prog_offload_ops *ops; /* callee set */ |
---|
885 | | - } verifier; |
---|
886 | | - /* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */ |
---|
887 | | - struct { |
---|
888 | | - struct bpf_prog *prog; |
---|
889 | | - } offload; |
---|
890 | 936 | /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ |
---|
891 | 937 | struct { |
---|
892 | 938 | struct bpf_offloaded_map *offmap; |
---|
893 | 939 | }; |
---|
894 | | - /* XDP_QUERY_XSK_UMEM, XDP_SETUP_XSK_UMEM */ |
---|
| 940 | + /* XDP_SETUP_XSK_POOL */ |
---|
895 | 941 | struct { |
---|
896 | | - struct xdp_umem *umem; /* out for query*/ |
---|
897 | | - u16 queue_id; /* in for query */ |
---|
| 942 | + struct xsk_buff_pool *pool; |
---|
| 943 | + u16 queue_id; |
---|
898 | 944 | } xsk; |
---|
899 | 945 | }; |
---|
900 | 946 | }; |
---|
| 947 | + |
---|
| 948 | +/* Flags for ndo_xsk_wakeup. */ |
---|
| 949 | +#define XDP_WAKEUP_RX (1 << 0) |
---|
| 950 | +#define XDP_WAKEUP_TX (1 << 1) |
---|
901 | 951 | |
---|
902 | 952 | #ifdef CONFIG_XFRM_OFFLOAD |
---|
903 | 953 | struct xfrmdev_ops { |
---|
.. | .. |
---|
915 | 965 | }; |
---|
916 | 966 | #endif |
---|
917 | 967 | |
---|
918 | | -#if IS_ENABLED(CONFIG_TLS_DEVICE) |
---|
919 | | -enum tls_offload_ctx_dir { |
---|
920 | | - TLS_OFFLOAD_CTX_DIR_RX, |
---|
921 | | - TLS_OFFLOAD_CTX_DIR_TX, |
---|
922 | | -}; |
---|
923 | | - |
---|
924 | | -struct tls_crypto_info; |
---|
925 | | -struct tls_context; |
---|
926 | | - |
---|
927 | | -struct tlsdev_ops { |
---|
928 | | - int (*tls_dev_add)(struct net_device *netdev, struct sock *sk, |
---|
929 | | - enum tls_offload_ctx_dir direction, |
---|
930 | | - struct tls_crypto_info *crypto_info, |
---|
931 | | - u32 start_offload_tcp_sn); |
---|
932 | | - void (*tls_dev_del)(struct net_device *netdev, |
---|
933 | | - struct tls_context *ctx, |
---|
934 | | - enum tls_offload_ctx_dir direction); |
---|
935 | | - void (*tls_dev_resync_rx)(struct net_device *netdev, |
---|
936 | | - struct sock *sk, u32 seq, u64 rcd_sn); |
---|
937 | | - ANDROID_KABI_RESERVE(1); |
---|
938 | | - ANDROID_KABI_RESERVE(2); |
---|
939 | | - ANDROID_KABI_RESERVE(3); |
---|
940 | | - ANDROID_KABI_RESERVE(4); |
---|
941 | | -}; |
---|
942 | | -#endif |
---|
943 | | - |
---|
944 | 968 | struct dev_ifalias { |
---|
945 | 969 | struct rcu_head rcuhead; |
---|
946 | 970 | char ifalias[]; |
---|
| 971 | +}; |
---|
| 972 | + |
---|
| 973 | +struct devlink; |
---|
| 974 | +struct tlsdev_ops; |
---|
| 975 | + |
---|
| 976 | +struct netdev_name_node { |
---|
| 977 | + struct hlist_node hlist; |
---|
| 978 | + struct list_head list; |
---|
| 979 | + struct net_device *dev; |
---|
| 980 | + const char *name; |
---|
| 981 | +}; |
---|
| 982 | + |
---|
| 983 | +int netdev_name_node_alt_create(struct net_device *dev, const char *name); |
---|
| 984 | +int netdev_name_node_alt_destroy(struct net_device *dev, const char *name); |
---|
| 985 | + |
---|
| 986 | +struct netdev_net_notifier { |
---|
| 987 | + struct list_head list; |
---|
| 988 | + struct notifier_block *nb; |
---|
947 | 989 | }; |
---|
948 | 990 | |
---|
949 | 991 | /* |
---|
.. | .. |
---|
989 | 1031 | * those the driver believes to be appropriate. |
---|
990 | 1032 | * |
---|
991 | 1033 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, |
---|
992 | | - * struct net_device *sb_dev, |
---|
993 | | - * select_queue_fallback_t fallback); |
---|
| 1034 | + * struct net_device *sb_dev); |
---|
994 | 1035 | * Called to decide which queue to use when device supports multiple |
---|
995 | 1036 | * transmit queues. |
---|
996 | 1037 | * |
---|
.. | .. |
---|
1025 | 1066 | * Called when a user wants to change the Maximum Transfer Unit |
---|
1026 | 1067 | * of a device. |
---|
1027 | 1068 | * |
---|
1028 | | - * void (*ndo_tx_timeout)(struct net_device *dev); |
---|
| 1069 | + * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue); |
---|
1029 | 1070 | * Callback used when the transmitter has not made any progress |
---|
1030 | 1071 | * for dev->watchdog ticks. |
---|
1031 | 1072 | * |
---|
.. | .. |
---|
1144 | 1185 | * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); |
---|
1145 | 1186 | * Called to release previously enslaved netdev. |
---|
1146 | 1187 | * |
---|
| 1188 | + * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev, |
---|
| 1189 | + * struct sk_buff *skb, |
---|
| 1190 | + * bool all_slaves); |
---|
| 1191 | + * Get the xmit slave of master device. If all_slaves is true, function |
---|
| 1192 | + * assume all the slaves can transmit. |
---|
| 1193 | + * |
---|
1147 | 1194 | * Feature/offload setting functions. |
---|
1148 | 1195 | * netdev_features_t (*ndo_fix_features)(struct net_device *dev, |
---|
1149 | 1196 | * netdev_features_t features); |
---|
.. | .. |
---|
1158 | 1205 | * |
---|
1159 | 1206 | * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], |
---|
1160 | 1207 | * struct net_device *dev, |
---|
1161 | | - * const unsigned char *addr, u16 vid, u16 flags) |
---|
| 1208 | + * const unsigned char *addr, u16 vid, u16 flags, |
---|
| 1209 | + * struct netlink_ext_ack *extack); |
---|
1162 | 1210 | * Adds an FDB entry to dev for addr. |
---|
1163 | 1211 | * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], |
---|
1164 | 1212 | * struct net_device *dev, |
---|
.. | .. |
---|
1171 | 1219 | * entries to skb and update idx with the number of entries. |
---|
1172 | 1220 | * |
---|
1173 | 1221 | * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, |
---|
1174 | | - * u16 flags) |
---|
| 1222 | + * u16 flags, struct netlink_ext_ack *extack) |
---|
1175 | 1223 | * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, |
---|
1176 | 1224 | * struct net_device *dev, u32 filter_mask, |
---|
1177 | 1225 | * int nlflags) |
---|
.. | .. |
---|
1191 | 1239 | * Called to get ID of physical port of this device. If driver does |
---|
1192 | 1240 | * not implement this, it is assumed that the hw is not able to have |
---|
1193 | 1241 | * multiple net devices on single physical port. |
---|
| 1242 | + * |
---|
| 1243 | + * int (*ndo_get_port_parent_id)(struct net_device *dev, |
---|
| 1244 | + * struct netdev_phys_item_id *ppid) |
---|
| 1245 | + * Called to get the parent ID of the physical port of this device. |
---|
1194 | 1246 | * |
---|
1195 | 1247 | * void (*ndo_udp_tunnel_add)(struct net_device *dev, |
---|
1196 | 1248 | * struct udp_tunnel_info *ti); |
---|
.. | .. |
---|
1249 | 1301 | * that got dropped are freed/returned via xdp_return_frame(). |
---|
1250 | 1302 | * Returns negative number, means general error invoking ndo, meaning |
---|
1251 | 1303 | * no frames were xmit'ed and core-caller will free all frames. |
---|
| 1304 | + * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); |
---|
| 1305 | + * This function is used to wake up the softirq, ksoftirqd or kthread |
---|
| 1306 | + * responsible for sending and/or receiving packets on a specific |
---|
| 1307 | + * queue id bound to an AF_XDP socket. The flags field specifies if |
---|
| 1308 | + * only RX, only Tx, or both should be woken up using the flags |
---|
| 1309 | + * XDP_WAKEUP_RX and XDP_WAKEUP_TX. |
---|
| 1310 | + * struct devlink_port *(*ndo_get_devlink_port)(struct net_device *dev); |
---|
| 1311 | + * Get devlink port instance associated with a given netdev. |
---|
| 1312 | + * Called with a reference on the netdevice and devlink locks only, |
---|
| 1313 | + * rtnl_lock is not held. |
---|
| 1314 | + * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p, |
---|
| 1315 | + * int cmd); |
---|
| 1316 | + * Add, change, delete or get information on an IPv4 tunnel. |
---|
| 1317 | + * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev); |
---|
| 1318 | + * If a device is paired with a peer device, return the peer instance. |
---|
| 1319 | + * The caller must be under RCU read context. |
---|
1252 | 1320 | */ |
---|
1253 | 1321 | struct net_device_ops { |
---|
1254 | 1322 | int (*ndo_init)(struct net_device *dev); |
---|
.. | .. |
---|
1262 | 1330 | netdev_features_t features); |
---|
1263 | 1331 | u16 (*ndo_select_queue)(struct net_device *dev, |
---|
1264 | 1332 | struct sk_buff *skb, |
---|
1265 | | - struct net_device *sb_dev, |
---|
1266 | | - select_queue_fallback_t fallback); |
---|
| 1333 | + struct net_device *sb_dev); |
---|
1267 | 1334 | void (*ndo_change_rx_flags)(struct net_device *dev, |
---|
1268 | 1335 | int flags); |
---|
1269 | 1336 | void (*ndo_set_rx_mode)(struct net_device *dev); |
---|
.. | .. |
---|
1278 | 1345 | int new_mtu); |
---|
1279 | 1346 | int (*ndo_neigh_setup)(struct net_device *dev, |
---|
1280 | 1347 | struct neigh_parms *); |
---|
1281 | | - void (*ndo_tx_timeout) (struct net_device *dev); |
---|
| 1348 | + void (*ndo_tx_timeout) (struct net_device *dev, |
---|
| 1349 | + unsigned int txqueue); |
---|
1282 | 1350 | |
---|
1283 | 1351 | void (*ndo_get_stats64)(struct net_device *dev, |
---|
1284 | 1352 | struct rtnl_link_stats64 *storage); |
---|
.. | .. |
---|
1324 | 1392 | struct nlattr *port[]); |
---|
1325 | 1393 | int (*ndo_get_vf_port)(struct net_device *dev, |
---|
1326 | 1394 | int vf, struct sk_buff *skb); |
---|
| 1395 | + int (*ndo_get_vf_guid)(struct net_device *dev, |
---|
| 1396 | + int vf, |
---|
| 1397 | + struct ifla_vf_guid *node_guid, |
---|
| 1398 | + struct ifla_vf_guid *port_guid); |
---|
1327 | 1399 | int (*ndo_set_vf_guid)(struct net_device *dev, |
---|
1328 | 1400 | int vf, u64 guid, |
---|
1329 | 1401 | int guid_type); |
---|
.. | .. |
---|
1368 | 1440 | struct netlink_ext_ack *extack); |
---|
1369 | 1441 | int (*ndo_del_slave)(struct net_device *dev, |
---|
1370 | 1442 | struct net_device *slave_dev); |
---|
| 1443 | + struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev, |
---|
| 1444 | + struct sk_buff *skb, |
---|
| 1445 | + bool all_slaves); |
---|
1371 | 1446 | netdev_features_t (*ndo_fix_features)(struct net_device *dev, |
---|
1372 | 1447 | netdev_features_t features); |
---|
1373 | 1448 | int (*ndo_set_features)(struct net_device *dev, |
---|
.. | .. |
---|
1382 | 1457 | struct net_device *dev, |
---|
1383 | 1458 | const unsigned char *addr, |
---|
1384 | 1459 | u16 vid, |
---|
1385 | | - u16 flags); |
---|
| 1460 | + u16 flags, |
---|
| 1461 | + struct netlink_ext_ack *extack); |
---|
1386 | 1462 | int (*ndo_fdb_del)(struct ndmsg *ndm, |
---|
1387 | 1463 | struct nlattr *tb[], |
---|
1388 | 1464 | struct net_device *dev, |
---|
.. | .. |
---|
1393 | 1469 | struct net_device *dev, |
---|
1394 | 1470 | struct net_device *filter_dev, |
---|
1395 | 1471 | int *idx); |
---|
1396 | | - |
---|
| 1472 | + int (*ndo_fdb_get)(struct sk_buff *skb, |
---|
| 1473 | + struct nlattr *tb[], |
---|
| 1474 | + struct net_device *dev, |
---|
| 1475 | + const unsigned char *addr, |
---|
| 1476 | + u16 vid, u32 portid, u32 seq, |
---|
| 1477 | + struct netlink_ext_ack *extack); |
---|
1397 | 1478 | int (*ndo_bridge_setlink)(struct net_device *dev, |
---|
1398 | 1479 | struct nlmsghdr *nlh, |
---|
1399 | | - u16 flags); |
---|
| 1480 | + u16 flags, |
---|
| 1481 | + struct netlink_ext_ack *extack); |
---|
1400 | 1482 | int (*ndo_bridge_getlink)(struct sk_buff *skb, |
---|
1401 | 1483 | u32 pid, u32 seq, |
---|
1402 | 1484 | struct net_device *dev, |
---|
.. | .. |
---|
1409 | 1491 | bool new_carrier); |
---|
1410 | 1492 | int (*ndo_get_phys_port_id)(struct net_device *dev, |
---|
1411 | 1493 | struct netdev_phys_item_id *ppid); |
---|
| 1494 | + int (*ndo_get_port_parent_id)(struct net_device *dev, |
---|
| 1495 | + struct netdev_phys_item_id *ppid); |
---|
1412 | 1496 | int (*ndo_get_phys_port_name)(struct net_device *dev, |
---|
1413 | 1497 | char *name, size_t len); |
---|
1414 | 1498 | void (*ndo_udp_tunnel_add)(struct net_device *dev, |
---|
.. | .. |
---|
1420 | 1504 | void (*ndo_dfwd_del_station)(struct net_device *pdev, |
---|
1421 | 1505 | void *priv); |
---|
1422 | 1506 | |
---|
1423 | | - int (*ndo_get_lock_subclass)(struct net_device *dev); |
---|
1424 | 1507 | int (*ndo_set_tx_maxrate)(struct net_device *dev, |
---|
1425 | 1508 | int queue_index, |
---|
1426 | 1509 | u32 maxrate); |
---|
.. | .. |
---|
1436 | 1519 | int (*ndo_xdp_xmit)(struct net_device *dev, int n, |
---|
1437 | 1520 | struct xdp_frame **xdp, |
---|
1438 | 1521 | u32 flags); |
---|
1439 | | - int (*ndo_xsk_async_xmit)(struct net_device *dev, |
---|
1440 | | - u32 queue_id); |
---|
| 1522 | + int (*ndo_xsk_wakeup)(struct net_device *dev, |
---|
| 1523 | + u32 queue_id, u32 flags); |
---|
| 1524 | + struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev); |
---|
| 1525 | + int (*ndo_tunnel_ctl)(struct net_device *dev, |
---|
| 1526 | + struct ip_tunnel_parm *p, int cmd); |
---|
| 1527 | + struct net_device * (*ndo_get_peer_dev)(struct net_device *dev); |
---|
1441 | 1528 | |
---|
1442 | 1529 | ANDROID_KABI_RESERVE(1); |
---|
1443 | 1530 | ANDROID_KABI_RESERVE(2); |
---|
.. | .. |
---|
1560 | 1647 | #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER |
---|
1561 | 1648 | #define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK |
---|
1562 | 1649 | |
---|
| 1650 | +/* Specifies the type of the struct net_device::ml_priv pointer */ |
---|
| 1651 | +enum netdev_ml_priv_type { |
---|
| 1652 | + ML_PRIV_NONE, |
---|
| 1653 | + ML_PRIV_CAN, |
---|
| 1654 | +}; |
---|
| 1655 | + |
---|
1563 | 1656 | /** |
---|
1564 | 1657 | * struct net_device - The DEVICE structure. |
---|
1565 | 1658 | * |
---|
.. | .. |
---|
1571 | 1664 | * (i.e. as seen by users in the "Space.c" file). It is the name |
---|
1572 | 1665 | * of the interface. |
---|
1573 | 1666 | * |
---|
1574 | | - * @name_hlist: Device name hash chain, please keep it close to name[] |
---|
| 1667 | + * @name_node: Name hashlist node |
---|
1575 | 1668 | * @ifalias: SNMP alias |
---|
1576 | 1669 | * @mem_end: Shared memory end |
---|
1577 | 1670 | * @mem_start: Shared memory start |
---|
.. | .. |
---|
1600 | 1693 | * and drivers will need to set them appropriately. |
---|
1601 | 1694 | * |
---|
1602 | 1695 | * @mpls_features: Mask of features inheritable by MPLS |
---|
| 1696 | + * @gso_partial_features: value(s) from NETIF_F_GSO\* |
---|
1603 | 1697 | * |
---|
1604 | 1698 | * @ifindex: interface index |
---|
1605 | 1699 | * @group: The group the device belongs to |
---|
.. | .. |
---|
1624 | 1718 | * @netdev_ops: Includes several pointers to callbacks, |
---|
1625 | 1719 | * if one wants to override the ndo_*() functions |
---|
1626 | 1720 | * @ethtool_ops: Management operations |
---|
| 1721 | + * @l3mdev_ops: Layer 3 master device operations |
---|
1627 | 1722 | * @ndisc_ops: Includes callbacks for different IPv6 neighbour |
---|
1628 | 1723 | * discovery handling. Necessary for e.g. 6LoWPAN. |
---|
| 1724 | + * @xfrmdev_ops: Transformation offload operations |
---|
| 1725 | + * @tlsdev_ops: Transport Layer Security offload operations |
---|
1629 | 1726 | * @header_ops: Includes callbacks for creating,parsing,caching,etc |
---|
1630 | 1727 | * of Layer 2 headers. |
---|
1631 | 1728 | * |
---|
.. | .. |
---|
1664 | 1761 | * @dev_port: Used to differentiate devices that share |
---|
1665 | 1762 | * the same function |
---|
1666 | 1763 | * @addr_list_lock: XXX: need comments on this one |
---|
| 1764 | + * @name_assign_type: network interface name assignment type |
---|
1667 | 1765 | * @uc_promisc: Counter that indicates promiscuous mode |
---|
1668 | 1766 | * has been enabled due to the need to listen to |
---|
1669 | 1767 | * additional unicast addresses in a device that |
---|
.. | .. |
---|
1686 | 1784 | * @ip6_ptr: IPv6 specific data |
---|
1687 | 1785 | * @ax25_ptr: AX.25 specific data |
---|
1688 | 1786 | * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering |
---|
| 1787 | + * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network |
---|
| 1788 | + * device struct |
---|
| 1789 | + * @mpls_ptr: mpls_dev struct pointer |
---|
1689 | 1790 | * |
---|
1690 | 1791 | * @dev_addr: Hw address (before bcast, |
---|
1691 | 1792 | * because most packets are unicast) |
---|
.. | .. |
---|
1694 | 1795 | * @num_rx_queues: Number of RX queues |
---|
1695 | 1796 | * allocated at register_netdev() time |
---|
1696 | 1797 | * @real_num_rx_queues: Number of RX queues currently active in device |
---|
| 1798 | + * @xdp_prog: XDP sockets filter program pointer |
---|
| 1799 | + * @gro_flush_timeout: timeout for GRO layer in NAPI |
---|
| 1800 | + * @napi_defer_hard_irqs: If not zero, provides a counter that would |
---|
| 1801 | + * allow to avoid NIC hard IRQ, on busy queues. |
---|
1697 | 1802 | * |
---|
1698 | 1803 | * @rx_handler: handler for received packets |
---|
1699 | 1804 | * @rx_handler_data: XXX: need comments on this one |
---|
1700 | 1805 | * @miniq_ingress: ingress/clsact qdisc specific data for |
---|
1701 | 1806 | * ingress processing |
---|
1702 | 1807 | * @ingress_queue: XXX: need comments on this one |
---|
| 1808 | + * @nf_hooks_ingress: netfilter hooks executed for ingress packets |
---|
1703 | 1809 | * @broadcast: hw bcast address |
---|
1704 | 1810 | * |
---|
1705 | 1811 | * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts, |
---|
.. | .. |
---|
1714 | 1820 | * @qdisc: Root qdisc from userspace point of view |
---|
1715 | 1821 | * @tx_queue_len: Max frames per queue allowed |
---|
1716 | 1822 | * @tx_global_lock: XXX: need comments on this one |
---|
| 1823 | + * @xdp_bulkq: XDP device bulk queue |
---|
| 1824 | + * @xps_cpus_map: all CPUs map for XPS device |
---|
| 1825 | + * @xps_rxqs_map: all RXQs map for XPS device |
---|
1717 | 1826 | * |
---|
1718 | 1827 | * @xps_maps: XXX: need comments on this one |
---|
1719 | 1828 | * @miniq_egress: clsact qdisc specific data for |
---|
1720 | 1829 | * egress processing |
---|
| 1830 | + * @qdisc_hash: qdisc hash table |
---|
1721 | 1831 | * @watchdog_timeo: Represents the timeout that is used by |
---|
1722 | 1832 | * the watchdog (see dev_watchdog()) |
---|
1723 | 1833 | * @watchdog_timer: List of timers |
---|
1724 | 1834 | * |
---|
| 1835 | + * @proto_down_reason: reason a netdev interface is held down |
---|
1725 | 1836 | * @pcpu_refcnt: Number of references to this device |
---|
1726 | 1837 | * @todo_list: Delayed register/unregister |
---|
1727 | 1838 | * @link_watch_list: XXX: need comments on this one |
---|
.. | .. |
---|
1737 | 1848 | * @nd_net: Network namespace this network device is inside |
---|
1738 | 1849 | * |
---|
1739 | 1850 | * @ml_priv: Mid-layer private |
---|
| 1851 | + * @ml_priv_type: Mid-layer private type |
---|
1740 | 1852 | * @lstats: Loopback statistics |
---|
1741 | 1853 | * @tstats: Tunnel statistics |
---|
1742 | 1854 | * @dstats: Dummy statistics |
---|
.. | .. |
---|
1777 | 1889 | * |
---|
1778 | 1890 | * @wol_enabled: Wake-on-LAN is enabled |
---|
1779 | 1891 | * |
---|
| 1892 | + * @net_notifier_list: List of per-net netdev notifier block |
---|
| 1893 | + * that follow this device when it is moved |
---|
| 1894 | + * to another network namespace. |
---|
| 1895 | + * |
---|
| 1896 | + * @macsec_ops: MACsec offloading ops |
---|
| 1897 | + * |
---|
| 1898 | + * @udp_tunnel_nic_info: static structure describing the UDP tunnel |
---|
| 1899 | + * offload capabilities of the device |
---|
| 1900 | + * @udp_tunnel_nic: UDP tunnel offload state |
---|
| 1901 | + * @xdp_state: stores info on attached XDP BPF programs |
---|
| 1902 | + * |
---|
| 1903 | + * @nested_level: Used as as a parameter of spin_lock_nested() of |
---|
| 1904 | + * dev->addr_list_lock. |
---|
| 1905 | + * @unlink_list: As netif_addr_lock() can be called recursively, |
---|
| 1906 | + * keep a list of interfaces to be deleted. |
---|
| 1907 | + * |
---|
1780 | 1908 | * FIXME: cleanup struct net_device such that network protocol info |
---|
1781 | 1909 | * moves out. |
---|
1782 | 1910 | */ |
---|
1783 | 1911 | |
---|
1784 | 1912 | struct net_device { |
---|
1785 | 1913 | char name[IFNAMSIZ]; |
---|
1786 | | - struct hlist_node name_hlist; |
---|
| 1914 | + struct netdev_name_node *name_node; |
---|
1787 | 1915 | struct dev_ifalias __rcu *ifalias; |
---|
1788 | 1916 | /* |
---|
1789 | 1917 | * I/O specific fields |
---|
.. | .. |
---|
1841 | 1969 | #endif |
---|
1842 | 1970 | const struct net_device_ops *netdev_ops; |
---|
1843 | 1971 | const struct ethtool_ops *ethtool_ops; |
---|
1844 | | -#ifdef CONFIG_NET_SWITCHDEV |
---|
1845 | | - const struct switchdev_ops *switchdev_ops; |
---|
1846 | | -#endif |
---|
1847 | 1972 | #ifdef CONFIG_NET_L3_MASTER_DEV |
---|
1848 | 1973 | const struct l3mdev_ops *l3mdev_ops; |
---|
1849 | 1974 | #endif |
---|
.. | .. |
---|
1884 | 2009 | unsigned short type; |
---|
1885 | 2010 | unsigned short hard_header_len; |
---|
1886 | 2011 | unsigned char min_header_len; |
---|
| 2012 | + unsigned char name_assign_type; |
---|
1887 | 2013 | |
---|
1888 | 2014 | unsigned short needed_headroom; |
---|
1889 | 2015 | unsigned short needed_tailroom; |
---|
.. | .. |
---|
1894 | 2020 | unsigned char addr_len; |
---|
1895 | 2021 | unsigned char upper_level; |
---|
1896 | 2022 | unsigned char lower_level; |
---|
| 2023 | + |
---|
1897 | 2024 | unsigned short neigh_priv_len; |
---|
1898 | 2025 | unsigned short dev_id; |
---|
1899 | 2026 | unsigned short dev_port; |
---|
1900 | 2027 | spinlock_t addr_list_lock; |
---|
1901 | | - unsigned char name_assign_type; |
---|
1902 | | - bool uc_promisc; |
---|
| 2028 | + |
---|
1903 | 2029 | struct netdev_hw_addr_list uc; |
---|
1904 | 2030 | struct netdev_hw_addr_list mc; |
---|
1905 | 2031 | struct netdev_hw_addr_list dev_addrs; |
---|
.. | .. |
---|
1907 | 2033 | #ifdef CONFIG_SYSFS |
---|
1908 | 2034 | struct kset *queues_kset; |
---|
1909 | 2035 | #endif |
---|
| 2036 | +#ifdef CONFIG_LOCKDEP |
---|
| 2037 | + struct list_head unlink_list; |
---|
| 2038 | +#endif |
---|
1910 | 2039 | unsigned int promiscuity; |
---|
1911 | 2040 | unsigned int allmulti; |
---|
| 2041 | + bool uc_promisc; |
---|
| 2042 | +#ifdef CONFIG_LOCKDEP |
---|
| 2043 | + unsigned char nested_level; |
---|
| 2044 | +#endif |
---|
1912 | 2045 | |
---|
1913 | 2046 | |
---|
1914 | 2047 | /* Protocol-specific pointers */ |
---|
.. | .. |
---|
1951 | 2084 | |
---|
1952 | 2085 | struct bpf_prog __rcu *xdp_prog; |
---|
1953 | 2086 | unsigned long gro_flush_timeout; |
---|
| 2087 | + int napi_defer_hard_irqs; |
---|
1954 | 2088 | rx_handler_func_t __rcu *rx_handler; |
---|
1955 | 2089 | void __rcu *rx_handler_data; |
---|
1956 | 2090 | |
---|
.. | .. |
---|
1974 | 2108 | struct netdev_queue *_tx ____cacheline_aligned_in_smp; |
---|
1975 | 2109 | unsigned int num_tx_queues; |
---|
1976 | 2110 | unsigned int real_num_tx_queues; |
---|
1977 | | - struct Qdisc *qdisc; |
---|
1978 | | -#ifdef CONFIG_NET_SCHED |
---|
1979 | | - DECLARE_HASHTABLE (qdisc_hash, 4); |
---|
1980 | | -#endif |
---|
| 2111 | + struct Qdisc __rcu *qdisc; |
---|
1981 | 2112 | unsigned int tx_queue_len; |
---|
1982 | 2113 | spinlock_t tx_global_lock; |
---|
1983 | | - int watchdog_timeo; |
---|
| 2114 | + |
---|
| 2115 | + struct xdp_dev_bulk_queue __percpu *xdp_bulkq; |
---|
1984 | 2116 | |
---|
1985 | 2117 | #ifdef CONFIG_XPS |
---|
1986 | 2118 | struct xps_dev_maps __rcu *xps_cpus_map; |
---|
.. | .. |
---|
1990 | 2122 | struct mini_Qdisc __rcu *miniq_egress; |
---|
1991 | 2123 | #endif |
---|
1992 | 2124 | |
---|
| 2125 | +#ifdef CONFIG_NET_SCHED |
---|
| 2126 | + DECLARE_HASHTABLE (qdisc_hash, 4); |
---|
| 2127 | +#endif |
---|
1993 | 2128 | /* These may be needed for future network-power-down code. */ |
---|
1994 | 2129 | struct timer_list watchdog_timer; |
---|
| 2130 | + int watchdog_timeo; |
---|
1995 | 2131 | |
---|
1996 | | - int __percpu *pcpu_refcnt; |
---|
| 2132 | + u32 proto_down_reason; |
---|
| 2133 | + |
---|
1997 | 2134 | struct list_head todo_list; |
---|
| 2135 | + int __percpu *pcpu_refcnt; |
---|
1998 | 2136 | |
---|
1999 | 2137 | struct list_head link_watch_list; |
---|
2000 | 2138 | |
---|
.. | .. |
---|
2023 | 2161 | possible_net_t nd_net; |
---|
2024 | 2162 | |
---|
2025 | 2163 | /* mid-layer private */ |
---|
| 2164 | + void *ml_priv; |
---|
| 2165 | + enum netdev_ml_priv_type ml_priv_type; |
---|
| 2166 | + |
---|
2026 | 2167 | union { |
---|
2027 | | - void *ml_priv; |
---|
2028 | 2168 | struct pcpu_lstats __percpu *lstats; |
---|
2029 | 2169 | struct pcpu_sw_netstats __percpu *tstats; |
---|
2030 | 2170 | struct pcpu_dstats __percpu *dstats; |
---|
2031 | | - struct pcpu_vstats __percpu *vstats; |
---|
2032 | 2171 | }; |
---|
2033 | 2172 | |
---|
2034 | 2173 | #if IS_ENABLED(CONFIG_GARP) |
---|
.. | .. |
---|
2070 | 2209 | bool proto_down; |
---|
2071 | 2210 | unsigned wol_enabled:1; |
---|
2072 | 2211 | |
---|
| 2212 | + struct list_head net_notifier_list; |
---|
| 2213 | + |
---|
| 2214 | +#if IS_ENABLED(CONFIG_MACSEC) |
---|
| 2215 | + /* MACsec management functions */ |
---|
| 2216 | + const struct macsec_ops *macsec_ops; |
---|
| 2217 | +#endif |
---|
| 2218 | + const struct udp_tunnel_nic_info *udp_tunnel_nic_info; |
---|
| 2219 | + struct udp_tunnel_nic *udp_tunnel_nic; |
---|
| 2220 | + |
---|
| 2221 | + /* protected by rtnl_lock */ |
---|
| 2222 | + struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE]; |
---|
| 2223 | + |
---|
2073 | 2224 | ANDROID_KABI_RESERVE(1); |
---|
2074 | 2225 | ANDROID_KABI_RESERVE(2); |
---|
2075 | 2226 | ANDROID_KABI_RESERVE(3); |
---|
.. | .. |
---|
2078 | 2229 | ANDROID_KABI_RESERVE(6); |
---|
2079 | 2230 | ANDROID_KABI_RESERVE(7); |
---|
2080 | 2231 | ANDROID_KABI_RESERVE(8); |
---|
2081 | | - |
---|
2082 | 2232 | }; |
---|
2083 | 2233 | #define to_net_dev(d) container_of(d, struct net_device, dev) |
---|
2084 | 2234 | |
---|
.. | .. |
---|
2116 | 2266 | int netdev_get_num_tc(struct net_device *dev) |
---|
2117 | 2267 | { |
---|
2118 | 2268 | return dev->num_tc; |
---|
| 2269 | +} |
---|
| 2270 | + |
---|
| 2271 | +static inline void net_prefetch(void *p) |
---|
| 2272 | +{ |
---|
| 2273 | + prefetch(p); |
---|
| 2274 | +#if L1_CACHE_BYTES < 128 |
---|
| 2275 | + prefetch((u8 *)p + L1_CACHE_BYTES); |
---|
| 2276 | +#endif |
---|
| 2277 | +} |
---|
| 2278 | + |
---|
| 2279 | +static inline void net_prefetchw(void *p) |
---|
| 2280 | +{ |
---|
| 2281 | + prefetchw(p); |
---|
| 2282 | +#if L1_CACHE_BYTES < 128 |
---|
| 2283 | + prefetchw((u8 *)p + L1_CACHE_BYTES); |
---|
| 2284 | +#endif |
---|
2119 | 2285 | } |
---|
2120 | 2286 | |
---|
2121 | 2287 | void netdev_unbind_sb_channel(struct net_device *dev, |
---|
.. | .. |
---|
2165 | 2331 | (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ |
---|
2166 | 2332 | (dev)->qdisc_running_key = &qdisc_running_key; \ |
---|
2167 | 2333 | lockdep_set_class(&(dev)->addr_list_lock, \ |
---|
2168 | | - &dev_addr_list_lock_key); \ |
---|
| 2334 | + &dev_addr_list_lock_key); \ |
---|
2169 | 2335 | for (i = 0; i < (dev)->num_tx_queues; i++) \ |
---|
2170 | 2336 | lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ |
---|
2171 | 2337 | &qdisc_xmit_lock_key); \ |
---|
2172 | 2338 | } |
---|
2173 | 2339 | |
---|
2174 | | -struct netdev_queue *netdev_pick_tx(struct net_device *dev, |
---|
2175 | | - struct sk_buff *skb, |
---|
2176 | | - struct net_device *sb_dev); |
---|
| 2340 | +u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, |
---|
| 2341 | + struct net_device *sb_dev); |
---|
| 2342 | +struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, |
---|
| 2343 | + struct sk_buff *skb, |
---|
| 2344 | + struct net_device *sb_dev); |
---|
2177 | 2345 | |
---|
2178 | 2346 | /* returns the headroom that the master device needs to take in account |
---|
2179 | 2347 | * when forwarding to this dev |
---|
.. | .. |
---|
2193 | 2361 | static inline void netdev_reset_rx_headroom(struct net_device *dev) |
---|
2194 | 2362 | { |
---|
2195 | 2363 | netdev_set_rx_headroom(dev, -1); |
---|
| 2364 | +} |
---|
| 2365 | + |
---|
| 2366 | +static inline void *netdev_get_ml_priv(struct net_device *dev, |
---|
| 2367 | + enum netdev_ml_priv_type type) |
---|
| 2368 | +{ |
---|
| 2369 | + if (dev->ml_priv_type != type) |
---|
| 2370 | + return NULL; |
---|
| 2371 | + |
---|
| 2372 | + return dev->ml_priv; |
---|
| 2373 | +} |
---|
| 2374 | + |
---|
| 2375 | +static inline void netdev_set_ml_priv(struct net_device *dev, |
---|
| 2376 | + void *ml_priv, |
---|
| 2377 | + enum netdev_ml_priv_type type) |
---|
| 2378 | +{ |
---|
| 2379 | + WARN(dev->ml_priv_type && dev->ml_priv_type != type, |
---|
| 2380 | + "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n", |
---|
| 2381 | + dev->ml_priv_type, type); |
---|
| 2382 | + WARN(!dev->ml_priv_type && dev->ml_priv, |
---|
| 2383 | + "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n"); |
---|
| 2384 | + |
---|
| 2385 | + dev->ml_priv = ml_priv; |
---|
| 2386 | + dev->ml_priv_type = type; |
---|
2196 | 2387 | } |
---|
2197 | 2388 | |
---|
2198 | 2389 | /* |
---|
.. | .. |
---|
2271 | 2462 | } |
---|
2272 | 2463 | |
---|
2273 | 2464 | /** |
---|
| 2465 | + * __netif_napi_del - remove a NAPI context |
---|
| 2466 | + * @napi: NAPI context |
---|
| 2467 | + * |
---|
| 2468 | + * Warning: caller must observe RCU grace period before freeing memory |
---|
| 2469 | + * containing @napi. Drivers might want to call this helper to combine |
---|
| 2470 | + * all the needed RCU grace periods into a single one. |
---|
| 2471 | + */ |
---|
| 2472 | +void __netif_napi_del(struct napi_struct *napi); |
---|
| 2473 | + |
---|
| 2474 | +/** |
---|
2274 | 2475 | * netif_napi_del - remove a NAPI context |
---|
2275 | 2476 | * @napi: NAPI context |
---|
2276 | 2477 | * |
---|
2277 | 2478 | * netif_napi_del() removes a NAPI context from the network device NAPI list |
---|
2278 | 2479 | */ |
---|
2279 | | -void netif_napi_del(struct napi_struct *napi); |
---|
| 2480 | +static inline void netif_napi_del(struct napi_struct *napi) |
---|
| 2481 | +{ |
---|
| 2482 | + __netif_napi_del(napi); |
---|
| 2483 | + synchronize_net(); |
---|
| 2484 | +} |
---|
2280 | 2485 | |
---|
2281 | 2486 | struct napi_gro_cb { |
---|
2282 | 2487 | /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ |
---|
.. | .. |
---|
2335 | 2540 | /* Number of gro_receive callbacks this packet already went through */ |
---|
2336 | 2541 | u8 recursion_counter:4; |
---|
2337 | 2542 | |
---|
2338 | | - /* 1 bit hole */ |
---|
| 2543 | + /* GRO is done by frag_list pointer chaining. */ |
---|
| 2544 | + u8 is_flist:1; |
---|
2339 | 2545 | |
---|
2340 | 2546 | /* used to support CHECKSUM_COMPLETE for tunneling protocols */ |
---|
2341 | 2547 | __wsum csum; |
---|
.. | .. |
---|
2382 | 2588 | |
---|
2383 | 2589 | struct packet_type { |
---|
2384 | 2590 | __be16 type; /* This is really htons(ether_type). */ |
---|
| 2591 | + bool ignore_outgoing; |
---|
2385 | 2592 | struct net_device *dev; /* NULL is wildcarded here */ |
---|
2386 | 2593 | int (*func) (struct sk_buff *, |
---|
2387 | 2594 | struct net_device *, |
---|
.. | .. |
---|
2424 | 2631 | u64 tx_packets; |
---|
2425 | 2632 | u64 tx_bytes; |
---|
2426 | 2633 | struct u64_stats_sync syncp; |
---|
2427 | | -}; |
---|
| 2634 | +} __aligned(4 * sizeof(u64)); |
---|
| 2635 | + |
---|
| 2636 | +struct pcpu_lstats { |
---|
| 2637 | + u64_stats_t packets; |
---|
| 2638 | + u64_stats_t bytes; |
---|
| 2639 | + struct u64_stats_sync syncp; |
---|
| 2640 | +} __aligned(2 * sizeof(u64)); |
---|
| 2641 | + |
---|
| 2642 | +void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes); |
---|
| 2643 | + |
---|
| 2644 | +static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len) |
---|
| 2645 | +{ |
---|
| 2646 | + struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); |
---|
| 2647 | + |
---|
| 2648 | + u64_stats_update_begin(&tstats->syncp); |
---|
| 2649 | + tstats->rx_bytes += len; |
---|
| 2650 | + tstats->rx_packets++; |
---|
| 2651 | + u64_stats_update_end(&tstats->syncp); |
---|
| 2652 | +} |
---|
| 2653 | + |
---|
| 2654 | +static inline void dev_lstats_add(struct net_device *dev, unsigned int len) |
---|
| 2655 | +{ |
---|
| 2656 | + struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats); |
---|
| 2657 | + |
---|
| 2658 | + u64_stats_update_begin(&lstats->syncp); |
---|
| 2659 | + u64_stats_add(&lstats->bytes, len); |
---|
| 2660 | + u64_stats_inc(&lstats->packets); |
---|
| 2661 | + u64_stats_update_end(&lstats->syncp); |
---|
| 2662 | +} |
---|
2428 | 2663 | |
---|
2429 | 2664 | #define __netdev_alloc_pcpu_stats(type, gfp) \ |
---|
2430 | 2665 | ({ \ |
---|
.. | .. |
---|
2489 | 2724 | NETDEV_REGISTER, |
---|
2490 | 2725 | NETDEV_UNREGISTER, |
---|
2491 | 2726 | NETDEV_CHANGEMTU, /* notify after mtu change happened */ |
---|
2492 | | - NETDEV_CHANGEADDR, |
---|
| 2727 | + NETDEV_CHANGEADDR, /* notify after the address change */ |
---|
| 2728 | + NETDEV_PRE_CHANGEADDR, /* notify before the address change */ |
---|
2493 | 2729 | NETDEV_GOING_DOWN, |
---|
2494 | 2730 | NETDEV_CHANGENAME, |
---|
2495 | 2731 | NETDEV_FEAT_CHANGE, |
---|
.. | .. |
---|
2520 | 2756 | |
---|
2521 | 2757 | int register_netdevice_notifier(struct notifier_block *nb); |
---|
2522 | 2758 | int unregister_netdevice_notifier(struct notifier_block *nb); |
---|
| 2759 | +int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb); |
---|
| 2760 | +int unregister_netdevice_notifier_net(struct net *net, |
---|
| 2761 | + struct notifier_block *nb); |
---|
| 2762 | +int register_netdevice_notifier_dev_net(struct net_device *dev, |
---|
| 2763 | + struct notifier_block *nb, |
---|
| 2764 | + struct netdev_net_notifier *nn); |
---|
| 2765 | +int unregister_netdevice_notifier_dev_net(struct net_device *dev, |
---|
| 2766 | + struct notifier_block *nb, |
---|
| 2767 | + struct netdev_net_notifier *nn); |
---|
2523 | 2768 | |
---|
2524 | 2769 | struct netdev_notifier_info { |
---|
2525 | 2770 | struct net_device *dev; |
---|
.. | .. |
---|
2549 | 2794 | struct netdev_notifier_changelowerstate_info { |
---|
2550 | 2795 | struct netdev_notifier_info info; /* must be first */ |
---|
2551 | 2796 | void *lower_state_info; /* is lower dev state */ |
---|
| 2797 | +}; |
---|
| 2798 | + |
---|
| 2799 | +struct netdev_notifier_pre_changeaddr_info { |
---|
| 2800 | + struct netdev_notifier_info info; /* must be first */ |
---|
| 2801 | + const unsigned char *dev_addr; |
---|
2552 | 2802 | }; |
---|
2553 | 2803 | |
---|
2554 | 2804 | static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, |
---|
.. | .. |
---|
2585 | 2835 | list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) |
---|
2586 | 2836 | #define for_each_netdev_continue(net, d) \ |
---|
2587 | 2837 | list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) |
---|
| 2838 | +#define for_each_netdev_continue_reverse(net, d) \ |
---|
| 2839 | + list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \ |
---|
| 2840 | + dev_list) |
---|
2588 | 2841 | #define for_each_netdev_continue_rcu(net, d) \ |
---|
2589 | 2842 | list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) |
---|
2590 | 2843 | #define for_each_netdev_in_bond_rcu(bond, slave) \ |
---|
.. | .. |
---|
2645 | 2898 | struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); |
---|
2646 | 2899 | struct net_device *__dev_get_by_name(struct net *net, const char *name); |
---|
2647 | 2900 | int dev_alloc_name(struct net_device *dev, const char *name); |
---|
2648 | | -int dev_open(struct net_device *dev); |
---|
| 2901 | +int dev_open(struct net_device *dev, struct netlink_ext_ack *extack); |
---|
2649 | 2902 | void dev_close(struct net_device *dev); |
---|
2650 | 2903 | void dev_close_many(struct list_head *head, bool unlink); |
---|
2651 | 2904 | void dev_disable_lro(struct net_device *dev); |
---|
2652 | 2905 | int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); |
---|
2653 | 2906 | u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, |
---|
2654 | | - struct net_device *sb_dev, |
---|
2655 | | - select_queue_fallback_t fallback); |
---|
| 2907 | + struct net_device *sb_dev); |
---|
2656 | 2908 | u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, |
---|
2657 | | - struct net_device *sb_dev, |
---|
2658 | | - select_queue_fallback_t fallback); |
---|
| 2909 | + struct net_device *sb_dev); |
---|
| 2910 | + |
---|
2659 | 2911 | int dev_queue_xmit(struct sk_buff *skb); |
---|
2660 | 2912 | int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev); |
---|
2661 | | -int dev_direct_xmit(struct sk_buff *skb, u16 queue_id); |
---|
| 2913 | +int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id); |
---|
| 2914 | + |
---|
| 2915 | +static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) |
---|
| 2916 | +{ |
---|
| 2917 | + int ret; |
---|
| 2918 | + |
---|
| 2919 | + ret = __dev_direct_xmit(skb, queue_id); |
---|
| 2920 | + if (!dev_xmit_complete(ret)) |
---|
| 2921 | + kfree_skb(skb); |
---|
| 2922 | + return ret; |
---|
| 2923 | +} |
---|
| 2924 | + |
---|
2662 | 2925 | int register_netdevice(struct net_device *dev); |
---|
2663 | 2926 | void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); |
---|
2664 | 2927 | void unregister_netdevice_many(struct list_head *head); |
---|
.. | .. |
---|
2670 | 2933 | int netdev_refcnt_read(const struct net_device *dev); |
---|
2671 | 2934 | void free_netdev(struct net_device *dev); |
---|
2672 | 2935 | void netdev_freemem(struct net_device *dev); |
---|
2673 | | -void synchronize_net(void); |
---|
2674 | 2936 | int init_dummy_netdev(struct net_device *dev); |
---|
2675 | 2937 | |
---|
| 2938 | +struct net_device *netdev_get_xmit_slave(struct net_device *dev, |
---|
| 2939 | + struct sk_buff *skb, |
---|
| 2940 | + bool all_slaves); |
---|
2676 | 2941 | struct net_device *dev_get_by_index(struct net *net, int ifindex); |
---|
2677 | 2942 | struct net_device *__dev_get_by_index(struct net *net, int ifindex); |
---|
2678 | 2943 | struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); |
---|
.. | .. |
---|
2680 | 2945 | int netdev_get_name(struct net *net, char *name, int ifindex); |
---|
2681 | 2946 | int dev_restart(struct net_device *dev); |
---|
2682 | 2947 | int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb); |
---|
| 2948 | +int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb); |
---|
2683 | 2949 | |
---|
2684 | 2950 | static inline unsigned int skb_gro_offset(const struct sk_buff *skb) |
---|
2685 | 2951 | { |
---|
.. | .. |
---|
2816 | 3082 | } |
---|
2817 | 3083 | |
---|
2818 | 3084 | static inline void __skb_gro_checksum_convert(struct sk_buff *skb, |
---|
2819 | | - __sum16 check, __wsum pseudo) |
---|
| 3085 | + __wsum pseudo) |
---|
2820 | 3086 | { |
---|
2821 | 3087 | NAPI_GRO_CB(skb)->csum = ~pseudo; |
---|
2822 | 3088 | NAPI_GRO_CB(skb)->csum_valid = 1; |
---|
2823 | 3089 | } |
---|
2824 | 3090 | |
---|
2825 | | -#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \ |
---|
| 3091 | +#define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \ |
---|
2826 | 3092 | do { \ |
---|
2827 | 3093 | if (__skb_gro_checksum_convert_check(skb)) \ |
---|
2828 | | - __skb_gro_checksum_convert(skb, check, \ |
---|
| 3094 | + __skb_gro_checksum_convert(skb, \ |
---|
2829 | 3095 | compute_pseudo(skb, proto)); \ |
---|
2830 | 3096 | } while (0) |
---|
2831 | 3097 | |
---|
.. | .. |
---|
2948 | 3214 | return dev->header_ops->parse(skb, haddr); |
---|
2949 | 3215 | } |
---|
2950 | 3216 | |
---|
| 3217 | +static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb) |
---|
| 3218 | +{ |
---|
| 3219 | + const struct net_device *dev = skb->dev; |
---|
| 3220 | + |
---|
| 3221 | + if (!dev->header_ops || !dev->header_ops->parse_protocol) |
---|
| 3222 | + return 0; |
---|
| 3223 | + return dev->header_ops->parse_protocol(skb); |
---|
| 3224 | +} |
---|
| 3225 | + |
---|
2951 | 3226 | /* ll_header must have at least hard_header_len allocated */ |
---|
2952 | 3227 | static inline bool dev_validate_header(const struct net_device *dev, |
---|
2953 | 3228 | char *ll_header, int len) |
---|
.. | .. |
---|
2968 | 3243 | return false; |
---|
2969 | 3244 | } |
---|
2970 | 3245 | |
---|
2971 | | -typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, |
---|
2972 | | - int len, int size); |
---|
2973 | | -int register_gifconf(unsigned int family, gifconf_func_t *gifconf); |
---|
2974 | | -static inline int unregister_gifconf(unsigned int family) |
---|
| 3246 | +static inline bool dev_has_header(const struct net_device *dev) |
---|
2975 | 3247 | { |
---|
2976 | | - return register_gifconf(family, NULL); |
---|
| 3248 | + return dev->header_ops && dev->header_ops->create; |
---|
2977 | 3249 | } |
---|
2978 | 3250 | |
---|
2979 | 3251 | #ifdef CONFIG_NET_FLOW_LIMIT |
---|
.. | .. |
---|
2994 | 3266 | */ |
---|
2995 | 3267 | struct softnet_data { |
---|
2996 | 3268 | struct list_head poll_list; |
---|
2997 | | - struct napi_struct *current_napi; |
---|
2998 | 3269 | struct sk_buff_head process_queue; |
---|
2999 | 3270 | |
---|
3000 | 3271 | /* stats */ |
---|
3001 | 3272 | unsigned int processed; |
---|
3002 | 3273 | unsigned int time_squeeze; |
---|
3003 | 3274 | unsigned int received_rps; |
---|
3004 | | - /* unused partner variable for ABI alignment */ |
---|
3005 | | - unsigned int gro_coalesced; |
---|
3006 | | - |
---|
3007 | 3275 | #ifdef CONFIG_RPS |
---|
3008 | 3276 | struct softnet_data *rps_ipi_list; |
---|
3009 | 3277 | #endif |
---|
.. | .. |
---|
3243 | 3511 | #endif |
---|
3244 | 3512 | } |
---|
3245 | 3513 | |
---|
| 3514 | +/* Variant of netdev_tx_sent_queue() for drivers that are aware |
---|
| 3515 | + * that they should not test BQL status themselves. |
---|
| 3516 | + * We do want to change __QUEUE_STATE_STACK_XOFF only for the last |
---|
| 3517 | + * skb of a batch. |
---|
| 3518 | + * Returns true if the doorbell must be used to kick the NIC. |
---|
| 3519 | + */ |
---|
| 3520 | +static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, |
---|
| 3521 | + unsigned int bytes, |
---|
| 3522 | + bool xmit_more) |
---|
| 3523 | +{ |
---|
| 3524 | + if (xmit_more) { |
---|
| 3525 | +#ifdef CONFIG_BQL |
---|
| 3526 | + dql_queued(&dev_queue->dql, bytes); |
---|
| 3527 | +#endif |
---|
| 3528 | + return netif_tx_queue_stopped(dev_queue); |
---|
| 3529 | + } |
---|
| 3530 | + netdev_tx_sent_queue(dev_queue, bytes); |
---|
| 3531 | + return true; |
---|
| 3532 | +} |
---|
| 3533 | + |
---|
3246 | 3534 | /** |
---|
3247 | 3535 | * netdev_sent_queue - report the number of bytes queued to hardware |
---|
3248 | 3536 | * @dev: network device |
---|
.. | .. |
---|
3255 | 3543 | static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) |
---|
3256 | 3544 | { |
---|
3257 | 3545 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); |
---|
| 3546 | +} |
---|
| 3547 | + |
---|
| 3548 | +static inline bool __netdev_sent_queue(struct net_device *dev, |
---|
| 3549 | + unsigned int bytes, |
---|
| 3550 | + bool xmit_more) |
---|
| 3551 | +{ |
---|
| 3552 | + return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes, |
---|
| 3553 | + xmit_more); |
---|
3258 | 3554 | } |
---|
3259 | 3555 | |
---|
3260 | 3556 | static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, |
---|
.. | .. |
---|
3273 | 3569 | */ |
---|
3274 | 3570 | smp_mb(); |
---|
3275 | 3571 | |
---|
3276 | | - if (dql_avail(&dev_queue->dql) < 0) |
---|
| 3572 | + if (unlikely(dql_avail(&dev_queue->dql) < 0)) |
---|
3277 | 3573 | return; |
---|
3278 | 3574 | |
---|
3279 | 3575 | if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) |
---|
.. | .. |
---|
3481 | 3777 | } |
---|
3482 | 3778 | |
---|
3483 | 3779 | /** |
---|
3484 | | - * netif_attrmask_next_and - get the next CPU/Rx queue in *src1p & *src2p |
---|
| 3780 | + * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p |
---|
3485 | 3781 | * @n: CPU/Rx queue index |
---|
3486 | 3782 | * @src1p: the first CPUs/Rx queues mask pointer |
---|
3487 | 3783 | * @src2p: the second CPUs/Rx queues mask pointer |
---|
.. | .. |
---|
3618 | 3914 | int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb); |
---|
3619 | 3915 | int netif_rx(struct sk_buff *skb); |
---|
3620 | 3916 | int netif_rx_ni(struct sk_buff *skb); |
---|
| 3917 | +int netif_rx_any_context(struct sk_buff *skb); |
---|
3621 | 3918 | int netif_receive_skb(struct sk_buff *skb); |
---|
3622 | 3919 | int netif_receive_skb_core(struct sk_buff *skb); |
---|
3623 | 3920 | void netif_receive_skb_list(struct list_head *head); |
---|
.. | .. |
---|
3627 | 3924 | gro_result_t napi_gro_frags(struct napi_struct *napi); |
---|
3628 | 3925 | struct packet_offload *gro_find_receive_by_type(__be16 type); |
---|
3629 | 3926 | struct packet_offload *gro_find_complete_by_type(__be16 type); |
---|
3630 | | -extern struct napi_struct *get_current_napi_context(void); |
---|
3631 | 3927 | |
---|
3632 | 3928 | static inline void napi_free_frags(struct napi_struct *napi) |
---|
3633 | 3929 | { |
---|
.. | .. |
---|
3651 | 3947 | int dev_ifconf(struct net *net, struct ifconf *, int); |
---|
3652 | 3948 | int dev_ethtool(struct net *net, struct ifreq *); |
---|
3653 | 3949 | unsigned int dev_get_flags(const struct net_device *); |
---|
3654 | | -int __dev_change_flags(struct net_device *, unsigned int flags); |
---|
3655 | | -int dev_change_flags(struct net_device *, unsigned int); |
---|
| 3950 | +int __dev_change_flags(struct net_device *dev, unsigned int flags, |
---|
| 3951 | + struct netlink_ext_ack *extack); |
---|
| 3952 | +int dev_change_flags(struct net_device *dev, unsigned int flags, |
---|
| 3953 | + struct netlink_ext_ack *extack); |
---|
3656 | 3954 | void __dev_notify_flags(struct net_device *, unsigned int old_flags, |
---|
3657 | 3955 | unsigned int gchanges); |
---|
3658 | 3956 | int dev_change_name(struct net_device *, const char *); |
---|
.. | .. |
---|
3667 | 3965 | int dev_set_mtu(struct net_device *, int); |
---|
3668 | 3966 | int dev_change_tx_queue_len(struct net_device *, unsigned long); |
---|
3669 | 3967 | void dev_set_group(struct net_device *, int); |
---|
3670 | | -int dev_set_mac_address(struct net_device *, struct sockaddr *); |
---|
| 3968 | +int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, |
---|
| 3969 | + struct netlink_ext_ack *extack); |
---|
| 3970 | +int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, |
---|
| 3971 | + struct netlink_ext_ack *extack); |
---|
| 3972 | +int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, |
---|
| 3973 | + struct netlink_ext_ack *extack); |
---|
| 3974 | +int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name); |
---|
3671 | 3975 | int dev_change_carrier(struct net_device *, bool new_carrier); |
---|
3672 | 3976 | int dev_get_phys_port_id(struct net_device *dev, |
---|
3673 | 3977 | struct netdev_phys_item_id *ppid); |
---|
3674 | 3978 | int dev_get_phys_port_name(struct net_device *dev, |
---|
3675 | 3979 | char *name, size_t len); |
---|
| 3980 | +int dev_get_port_parent_id(struct net_device *dev, |
---|
| 3981 | + struct netdev_phys_item_id *ppid, bool recurse); |
---|
| 3982 | +bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); |
---|
3676 | 3983 | int dev_change_proto_down(struct net_device *dev, bool proto_down); |
---|
| 3984 | +int dev_change_proto_down_generic(struct net_device *dev, bool proto_down); |
---|
| 3985 | +void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask, |
---|
| 3986 | + u32 value); |
---|
3677 | 3987 | struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); |
---|
3678 | 3988 | struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
---|
3679 | 3989 | struct netdev_queue *txq, int *ret); |
---|
3680 | 3990 | |
---|
3681 | 3991 | typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); |
---|
3682 | 3992 | int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, |
---|
3683 | | - int fd, u32 flags); |
---|
3684 | | -u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op, |
---|
3685 | | - enum bpf_netdev_command cmd); |
---|
| 3993 | + int fd, int expected_fd, u32 flags); |
---|
| 3994 | +int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); |
---|
| 3995 | +u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); |
---|
| 3996 | + |
---|
3686 | 3997 | int xdp_umem_query(struct net_device *dev, u16 queue_id); |
---|
3687 | 3998 | |
---|
3688 | 3999 | int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); |
---|
.. | .. |
---|
3705 | 4016 | return 0; |
---|
3706 | 4017 | } |
---|
3707 | 4018 | |
---|
| 4019 | +bool dev_nit_active(struct net_device *dev); |
---|
3708 | 4020 | void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); |
---|
3709 | 4021 | |
---|
3710 | 4022 | extern int netdev_budget; |
---|
.. | .. |
---|
3721 | 4033 | */ |
---|
3722 | 4034 | static inline void dev_put(struct net_device *dev) |
---|
3723 | 4035 | { |
---|
3724 | | - this_cpu_dec(*dev->pcpu_refcnt); |
---|
| 4036 | + if (dev) |
---|
| 4037 | + this_cpu_dec(*dev->pcpu_refcnt); |
---|
3725 | 4038 | } |
---|
3726 | 4039 | |
---|
3727 | 4040 | /** |
---|
.. | .. |
---|
3732 | 4045 | */ |
---|
3733 | 4046 | static inline void dev_hold(struct net_device *dev) |
---|
3734 | 4047 | { |
---|
3735 | | - this_cpu_inc(*dev->pcpu_refcnt); |
---|
| 4048 | + if (dev) |
---|
| 4049 | + this_cpu_inc(*dev->pcpu_refcnt); |
---|
3736 | 4050 | } |
---|
3737 | 4051 | |
---|
3738 | 4052 | /* Carrier loss detection, dial on demand. The functions netif_carrier_on |
---|
.. | .. |
---|
3810 | 4124 | |
---|
3811 | 4125 | |
---|
3812 | 4126 | /** |
---|
| 4127 | + * netif_testing_on - mark device as under test. |
---|
| 4128 | + * @dev: network device |
---|
| 4129 | + * |
---|
| 4130 | + * Mark device as under test (as per RFC2863). |
---|
| 4131 | + * |
---|
| 4132 | + * The testing state indicates that some test(s) must be performed on |
---|
| 4133 | + * the interface. After completion, of the test, the interface state |
---|
| 4134 | + * will change to up, dormant, or down, as appropriate. |
---|
| 4135 | + */ |
---|
| 4136 | +static inline void netif_testing_on(struct net_device *dev) |
---|
| 4137 | +{ |
---|
| 4138 | + if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state)) |
---|
| 4139 | + linkwatch_fire_event(dev); |
---|
| 4140 | +} |
---|
| 4141 | + |
---|
| 4142 | +/** |
---|
| 4143 | + * netif_testing_off - set device as not under test. |
---|
| 4144 | + * @dev: network device |
---|
| 4145 | + * |
---|
| 4146 | + * Device is not in testing state. |
---|
| 4147 | + */ |
---|
| 4148 | +static inline void netif_testing_off(struct net_device *dev) |
---|
| 4149 | +{ |
---|
| 4150 | + if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state)) |
---|
| 4151 | + linkwatch_fire_event(dev); |
---|
| 4152 | +} |
---|
| 4153 | + |
---|
| 4154 | +/** |
---|
| 4155 | + * netif_testing - test if device is under test |
---|
| 4156 | + * @dev: network device |
---|
| 4157 | + * |
---|
| 4158 | + * Check if device is under test |
---|
| 4159 | + */ |
---|
| 4160 | +static inline bool netif_testing(const struct net_device *dev) |
---|
| 4161 | +{ |
---|
| 4162 | + return test_bit(__LINK_STATE_TESTING, &dev->state); |
---|
| 4163 | +} |
---|
| 4164 | + |
---|
| 4165 | + |
---|
| 4166 | +/** |
---|
3813 | 4167 | * netif_oper_up - test if device is operational |
---|
3814 | 4168 | * @dev: network device |
---|
3815 | 4169 | * |
---|
.. | .. |
---|
3841 | 4195 | */ |
---|
3842 | 4196 | |
---|
3843 | 4197 | enum { |
---|
3844 | | - NETIF_MSG_DRV = 0x0001, |
---|
3845 | | - NETIF_MSG_PROBE = 0x0002, |
---|
3846 | | - NETIF_MSG_LINK = 0x0004, |
---|
3847 | | - NETIF_MSG_TIMER = 0x0008, |
---|
3848 | | - NETIF_MSG_IFDOWN = 0x0010, |
---|
3849 | | - NETIF_MSG_IFUP = 0x0020, |
---|
3850 | | - NETIF_MSG_RX_ERR = 0x0040, |
---|
3851 | | - NETIF_MSG_TX_ERR = 0x0080, |
---|
3852 | | - NETIF_MSG_TX_QUEUED = 0x0100, |
---|
3853 | | - NETIF_MSG_INTR = 0x0200, |
---|
3854 | | - NETIF_MSG_TX_DONE = 0x0400, |
---|
3855 | | - NETIF_MSG_RX_STATUS = 0x0800, |
---|
3856 | | - NETIF_MSG_PKTDATA = 0x1000, |
---|
3857 | | - NETIF_MSG_HW = 0x2000, |
---|
3858 | | - NETIF_MSG_WOL = 0x4000, |
---|
| 4198 | + NETIF_MSG_DRV_BIT, |
---|
| 4199 | + NETIF_MSG_PROBE_BIT, |
---|
| 4200 | + NETIF_MSG_LINK_BIT, |
---|
| 4201 | + NETIF_MSG_TIMER_BIT, |
---|
| 4202 | + NETIF_MSG_IFDOWN_BIT, |
---|
| 4203 | + NETIF_MSG_IFUP_BIT, |
---|
| 4204 | + NETIF_MSG_RX_ERR_BIT, |
---|
| 4205 | + NETIF_MSG_TX_ERR_BIT, |
---|
| 4206 | + NETIF_MSG_TX_QUEUED_BIT, |
---|
| 4207 | + NETIF_MSG_INTR_BIT, |
---|
| 4208 | + NETIF_MSG_TX_DONE_BIT, |
---|
| 4209 | + NETIF_MSG_RX_STATUS_BIT, |
---|
| 4210 | + NETIF_MSG_PKTDATA_BIT, |
---|
| 4211 | + NETIF_MSG_HW_BIT, |
---|
| 4212 | + NETIF_MSG_WOL_BIT, |
---|
| 4213 | + |
---|
| 4214 | + /* When you add a new bit above, update netif_msg_class_names array |
---|
| 4215 | + * in net/ethtool/common.c |
---|
| 4216 | + */ |
---|
| 4217 | + NETIF_MSG_CLASS_COUNT, |
---|
3859 | 4218 | }; |
---|
| 4219 | +/* Both ethtool_ops interface and internal driver implementation use u32 */ |
---|
| 4220 | +static_assert(NETIF_MSG_CLASS_COUNT <= 32); |
---|
| 4221 | + |
---|
| 4222 | +#define __NETIF_MSG_BIT(bit) ((u32)1 << (bit)) |
---|
| 4223 | +#define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT) |
---|
| 4224 | + |
---|
| 4225 | +#define NETIF_MSG_DRV __NETIF_MSG(DRV) |
---|
| 4226 | +#define NETIF_MSG_PROBE __NETIF_MSG(PROBE) |
---|
| 4227 | +#define NETIF_MSG_LINK __NETIF_MSG(LINK) |
---|
| 4228 | +#define NETIF_MSG_TIMER __NETIF_MSG(TIMER) |
---|
| 4229 | +#define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN) |
---|
| 4230 | +#define NETIF_MSG_IFUP __NETIF_MSG(IFUP) |
---|
| 4231 | +#define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR) |
---|
| 4232 | +#define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR) |
---|
| 4233 | +#define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED) |
---|
| 4234 | +#define NETIF_MSG_INTR __NETIF_MSG(INTR) |
---|
| 4235 | +#define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE) |
---|
| 4236 | +#define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS) |
---|
| 4237 | +#define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA) |
---|
| 4238 | +#define NETIF_MSG_HW __NETIF_MSG(HW) |
---|
| 4239 | +#define NETIF_MSG_WOL __NETIF_MSG(WOL) |
---|
3860 | 4240 | |
---|
3861 | 4241 | #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) |
---|
3862 | 4242 | #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) |
---|
.. | .. |
---|
4049 | 4429 | |
---|
4050 | 4430 | static inline void netif_addr_lock(struct net_device *dev) |
---|
4051 | 4431 | { |
---|
4052 | | - spin_lock(&dev->addr_list_lock); |
---|
4053 | | -} |
---|
| 4432 | + unsigned char nest_level = 0; |
---|
4054 | 4433 | |
---|
4055 | | -static inline void netif_addr_lock_nested(struct net_device *dev) |
---|
4056 | | -{ |
---|
4057 | | - int subclass = SINGLE_DEPTH_NESTING; |
---|
4058 | | - |
---|
4059 | | - if (dev->netdev_ops->ndo_get_lock_subclass) |
---|
4060 | | - subclass = dev->netdev_ops->ndo_get_lock_subclass(dev); |
---|
4061 | | - |
---|
4062 | | - spin_lock_nested(&dev->addr_list_lock, subclass); |
---|
| 4434 | +#ifdef CONFIG_LOCKDEP |
---|
| 4435 | + nest_level = dev->nested_level; |
---|
| 4436 | +#endif |
---|
| 4437 | + spin_lock_nested(&dev->addr_list_lock, nest_level); |
---|
4063 | 4438 | } |
---|
4064 | 4439 | |
---|
4065 | 4440 | static inline void netif_addr_lock_bh(struct net_device *dev) |
---|
4066 | 4441 | { |
---|
4067 | | - spin_lock_bh(&dev->addr_list_lock); |
---|
| 4442 | + unsigned char nest_level = 0; |
---|
| 4443 | + |
---|
| 4444 | +#ifdef CONFIG_LOCKDEP |
---|
| 4445 | + nest_level = dev->nested_level; |
---|
| 4446 | +#endif |
---|
| 4447 | + local_bh_disable(); |
---|
| 4448 | + spin_lock_nested(&dev->addr_list_lock, nest_level); |
---|
4068 | 4449 | } |
---|
4069 | 4450 | |
---|
4070 | 4451 | static inline void netif_addr_unlock(struct net_device *dev) |
---|
.. | .. |
---|
4093 | 4474 | unsigned char name_assign_type, |
---|
4094 | 4475 | void (*setup)(struct net_device *), |
---|
4095 | 4476 | unsigned int txqs, unsigned int rxqs); |
---|
4096 | | -int dev_get_valid_name(struct net *net, struct net_device *dev, |
---|
4097 | | - const char *name); |
---|
4098 | | - |
---|
4099 | 4477 | #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ |
---|
4100 | 4478 | alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) |
---|
4101 | 4479 | |
---|
.. | .. |
---|
4105 | 4483 | |
---|
4106 | 4484 | int register_netdev(struct net_device *dev); |
---|
4107 | 4485 | void unregister_netdev(struct net_device *dev); |
---|
| 4486 | + |
---|
| 4487 | +int devm_register_netdev(struct device *dev, struct net_device *ndev); |
---|
4108 | 4488 | |
---|
4109 | 4489 | /* General hardware address lists handling functions */ |
---|
4110 | 4490 | int __hw_addr_sync(struct netdev_hw_addr_list *to_list, |
---|
.. | .. |
---|
4116 | 4496 | int (*sync)(struct net_device *, const unsigned char *), |
---|
4117 | 4497 | int (*unsync)(struct net_device *, |
---|
4118 | 4498 | const unsigned char *)); |
---|
| 4499 | +int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list, |
---|
| 4500 | + struct net_device *dev, |
---|
| 4501 | + int (*sync)(struct net_device *, |
---|
| 4502 | + const unsigned char *, int), |
---|
| 4503 | + int (*unsync)(struct net_device *, |
---|
| 4504 | + const unsigned char *, int)); |
---|
| 4505 | +void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list, |
---|
| 4506 | + struct net_device *dev, |
---|
| 4507 | + int (*unsync)(struct net_device *, |
---|
| 4508 | + const unsigned char *, int)); |
---|
4119 | 4509 | void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, |
---|
4120 | 4510 | struct net_device *dev, |
---|
4121 | 4511 | int (*unsync)(struct net_device *, |
---|
.. | .. |
---|
4230 | 4620 | struct rtnl_link_stats64 *storage); |
---|
4231 | 4621 | void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, |
---|
4232 | 4622 | const struct net_device_stats *netdev_stats); |
---|
| 4623 | +void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, |
---|
| 4624 | + const struct pcpu_sw_netstats __percpu *netstats); |
---|
4233 | 4625 | |
---|
4234 | 4626 | extern int netdev_max_backlog; |
---|
4235 | 4627 | extern int netdev_tstamp_prequeue; |
---|
.. | .. |
---|
4238 | 4630 | extern int dev_weight_tx_bias; |
---|
4239 | 4631 | extern int dev_rx_weight; |
---|
4240 | 4632 | extern int dev_tx_weight; |
---|
| 4633 | +extern int gro_normal_batch; |
---|
| 4634 | + |
---|
| 4635 | +enum { |
---|
| 4636 | + NESTED_SYNC_IMM_BIT, |
---|
| 4637 | + NESTED_SYNC_TODO_BIT, |
---|
| 4638 | +}; |
---|
| 4639 | + |
---|
| 4640 | +#define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit)) |
---|
| 4641 | +#define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT) |
---|
| 4642 | + |
---|
| 4643 | +#define NESTED_SYNC_IMM __NESTED_SYNC(IMM) |
---|
| 4644 | +#define NESTED_SYNC_TODO __NESTED_SYNC(TODO) |
---|
| 4645 | + |
---|
| 4646 | +struct netdev_nested_priv { |
---|
| 4647 | + unsigned char flags; |
---|
| 4648 | + void *data; |
---|
| 4649 | +}; |
---|
4241 | 4650 | |
---|
4242 | 4651 | bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); |
---|
4243 | 4652 | struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, |
---|
4244 | 4653 | struct list_head **iter); |
---|
4245 | 4654 | struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, |
---|
4246 | 4655 | struct list_head **iter); |
---|
| 4656 | + |
---|
| 4657 | +#ifdef CONFIG_LOCKDEP |
---|
| 4658 | +static LIST_HEAD(net_unlink_list); |
---|
| 4659 | + |
---|
| 4660 | +static inline void net_unlink_todo(struct net_device *dev) |
---|
| 4661 | +{ |
---|
| 4662 | + if (list_empty(&dev->unlink_list)) |
---|
| 4663 | + list_add_tail(&dev->unlink_list, &net_unlink_list); |
---|
| 4664 | +} |
---|
| 4665 | +#endif |
---|
4247 | 4666 | |
---|
4248 | 4667 | /* iterate through upper list, must be called under RCU read lock */ |
---|
4249 | 4668 | #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ |
---|
.. | .. |
---|
4254 | 4673 | |
---|
4255 | 4674 | int netdev_walk_all_upper_dev_rcu(struct net_device *dev, |
---|
4256 | 4675 | int (*fn)(struct net_device *upper_dev, |
---|
4257 | | - void *data), |
---|
4258 | | - void *data); |
---|
| 4676 | + struct netdev_nested_priv *priv), |
---|
| 4677 | + struct netdev_nested_priv *priv); |
---|
4259 | 4678 | |
---|
4260 | 4679 | bool netdev_has_upper_dev_all_rcu(struct net_device *dev, |
---|
4261 | 4680 | struct net_device *upper_dev); |
---|
.. | .. |
---|
4288 | 4707 | ldev; \ |
---|
4289 | 4708 | ldev = netdev_lower_get_next(dev, &(iter))) |
---|
4290 | 4709 | |
---|
4291 | | -struct net_device *netdev_all_lower_get_next(struct net_device *dev, |
---|
| 4710 | +struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, |
---|
4292 | 4711 | struct list_head **iter); |
---|
4293 | | -struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev, |
---|
4294 | | - struct list_head **iter); |
---|
4295 | | - |
---|
4296 | 4712 | int netdev_walk_all_lower_dev(struct net_device *dev, |
---|
4297 | 4713 | int (*fn)(struct net_device *lower_dev, |
---|
4298 | | - void *data), |
---|
4299 | | - void *data); |
---|
| 4714 | + struct netdev_nested_priv *priv), |
---|
| 4715 | + struct netdev_nested_priv *priv); |
---|
4300 | 4716 | int netdev_walk_all_lower_dev_rcu(struct net_device *dev, |
---|
4301 | 4717 | int (*fn)(struct net_device *lower_dev, |
---|
4302 | | - void *data), |
---|
4303 | | - void *data); |
---|
| 4718 | + struct netdev_nested_priv *priv), |
---|
| 4719 | + struct netdev_nested_priv *priv); |
---|
4304 | 4720 | |
---|
4305 | 4721 | void *netdev_adjacent_get_private(struct list_head *adj_list); |
---|
4306 | 4722 | void *netdev_lower_get_first_private_rcu(struct net_device *dev); |
---|
.. | .. |
---|
4314 | 4730 | struct netlink_ext_ack *extack); |
---|
4315 | 4731 | void netdev_upper_dev_unlink(struct net_device *dev, |
---|
4316 | 4732 | struct net_device *upper_dev); |
---|
| 4733 | +int netdev_adjacent_change_prepare(struct net_device *old_dev, |
---|
| 4734 | + struct net_device *new_dev, |
---|
| 4735 | + struct net_device *dev, |
---|
| 4736 | + struct netlink_ext_ack *extack); |
---|
| 4737 | +void netdev_adjacent_change_commit(struct net_device *old_dev, |
---|
| 4738 | + struct net_device *new_dev, |
---|
| 4739 | + struct net_device *dev); |
---|
| 4740 | +void netdev_adjacent_change_abort(struct net_device *old_dev, |
---|
| 4741 | + struct net_device *new_dev, |
---|
| 4742 | + struct net_device *dev); |
---|
4317 | 4743 | void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); |
---|
4318 | 4744 | void *netdev_lower_dev_get_private(struct net_device *dev, |
---|
4319 | 4745 | struct net_device *lower_dev); |
---|
.. | .. |
---|
4325 | 4751 | extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; |
---|
4326 | 4752 | void netdev_rss_key_fill(void *buffer, size_t len); |
---|
4327 | 4753 | |
---|
4328 | | -int dev_get_nest_level(struct net_device *dev); |
---|
4329 | 4754 | int skb_checksum_help(struct sk_buff *skb); |
---|
4330 | 4755 | int skb_crc32c_csum_help(struct sk_buff *skb); |
---|
4331 | 4756 | int skb_csum_hwoffload_help(struct sk_buff *skb, |
---|
.. | .. |
---|
4348 | 4773 | |
---|
4349 | 4774 | void netdev_bonding_info_change(struct net_device *dev, |
---|
4350 | 4775 | struct netdev_bonding_info *bonding_info); |
---|
| 4776 | + |
---|
| 4777 | +#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK) |
---|
| 4778 | +void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data); |
---|
| 4779 | +#else |
---|
| 4780 | +static inline void ethtool_notify(struct net_device *dev, unsigned int cmd, |
---|
| 4781 | + const void *data) |
---|
| 4782 | +{ |
---|
| 4783 | +} |
---|
| 4784 | +#endif |
---|
4351 | 4785 | |
---|
4352 | 4786 | static inline |
---|
4353 | 4787 | struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) |
---|
.. | .. |
---|
4380 | 4814 | } |
---|
4381 | 4815 | |
---|
4382 | 4816 | #ifdef CONFIG_BUG |
---|
4383 | | -void netdev_rx_csum_fault(struct net_device *dev); |
---|
| 4817 | +void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb); |
---|
4384 | 4818 | #else |
---|
4385 | | -static inline void netdev_rx_csum_fault(struct net_device *dev) |
---|
| 4819 | +static inline void netdev_rx_csum_fault(struct net_device *dev, |
---|
| 4820 | + struct sk_buff *skb) |
---|
4386 | 4821 | { |
---|
4387 | 4822 | } |
---|
4388 | 4823 | #endif |
---|
.. | .. |
---|
4400 | 4835 | struct sk_buff *skb, struct net_device *dev, |
---|
4401 | 4836 | bool more) |
---|
4402 | 4837 | { |
---|
4403 | | - skb->xmit_more = more ? 1 : 0; |
---|
| 4838 | + __this_cpu_write(softnet_data.xmit.more, more); |
---|
4404 | 4839 | return ops->ndo_start_xmit(skb, dev); |
---|
4405 | 4840 | } |
---|
4406 | 4841 | |
---|
.. | .. |
---|
4413 | 4848 | struct netdev_queue *txq, bool more) |
---|
4414 | 4849 | { |
---|
4415 | 4850 | const struct net_device_ops *ops = dev->netdev_ops; |
---|
4416 | | - int rc; |
---|
| 4851 | + netdev_tx_t rc; |
---|
4417 | 4852 | |
---|
4418 | 4853 | rc = __netdev_start_xmit(ops, skb, dev, more); |
---|
4419 | 4854 | if (rc == NETDEV_TX_OK) |
---|
.. | .. |
---|
4426 | 4861 | const void *ns); |
---|
4427 | 4862 | void netdev_class_remove_file_ns(const struct class_attribute *class_attr, |
---|
4428 | 4863 | const void *ns); |
---|
4429 | | - |
---|
4430 | | -static inline int netdev_class_create_file(const struct class_attribute *class_attr) |
---|
4431 | | -{ |
---|
4432 | | - return netdev_class_create_file_ns(class_attr, NULL); |
---|
4433 | | -} |
---|
4434 | | - |
---|
4435 | | -static inline void netdev_class_remove_file(const struct class_attribute *class_attr) |
---|
4436 | | -{ |
---|
4437 | | - netdev_class_remove_file_ns(class_attr, NULL); |
---|
4438 | | -} |
---|
4439 | 4864 | |
---|
4440 | 4865 | extern const struct kobj_ns_type_operations net_ns_type_operations; |
---|
4441 | 4866 | |
---|
.. | .. |
---|
4509 | 4934 | BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT)); |
---|
4510 | 4935 | BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT)); |
---|
4511 | 4936 | BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT)); |
---|
| 4937 | + BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT)); |
---|
4512 | 4938 | |
---|
4513 | 4939 | return (features & feature) == feature; |
---|
4514 | 4940 | } |
---|
.. | .. |
---|
4611 | 5037 | return dev->priv_flags & IFF_OVS_DATAPATH; |
---|
4612 | 5038 | } |
---|
4613 | 5039 | |
---|
| 5040 | +static inline bool netif_is_any_bridge_port(const struct net_device *dev) |
---|
| 5041 | +{ |
---|
| 5042 | + return netif_is_bridge_port(dev) || netif_is_ovs_port(dev); |
---|
| 5043 | +} |
---|
| 5044 | + |
---|
4614 | 5045 | static inline bool netif_is_team_master(const struct net_device *dev) |
---|
4615 | 5046 | { |
---|
4616 | 5047 | return dev->priv_flags & IFF_TEAM; |
---|
.. | .. |
---|
4692 | 5123 | return " (unknown)"; |
---|
4693 | 5124 | } |
---|
4694 | 5125 | |
---|
4695 | | -__printf(3, 4) |
---|
| 5126 | +__printf(3, 4) __cold |
---|
4696 | 5127 | void netdev_printk(const char *level, const struct net_device *dev, |
---|
4697 | 5128 | const char *format, ...); |
---|
4698 | | -__printf(2, 3) |
---|
| 5129 | +__printf(2, 3) __cold |
---|
4699 | 5130 | void netdev_emerg(const struct net_device *dev, const char *format, ...); |
---|
4700 | | -__printf(2, 3) |
---|
| 5131 | +__printf(2, 3) __cold |
---|
4701 | 5132 | void netdev_alert(const struct net_device *dev, const char *format, ...); |
---|
4702 | | -__printf(2, 3) |
---|
| 5133 | +__printf(2, 3) __cold |
---|
4703 | 5134 | void netdev_crit(const struct net_device *dev, const char *format, ...); |
---|
4704 | | -__printf(2, 3) |
---|
| 5135 | +__printf(2, 3) __cold |
---|
4705 | 5136 | void netdev_err(const struct net_device *dev, const char *format, ...); |
---|
4706 | | -__printf(2, 3) |
---|
| 5137 | +__printf(2, 3) __cold |
---|
4707 | 5138 | void netdev_warn(const struct net_device *dev, const char *format, ...); |
---|
4708 | | -__printf(2, 3) |
---|
| 5139 | +__printf(2, 3) __cold |
---|
4709 | 5140 | void netdev_notice(const struct net_device *dev, const char *format, ...); |
---|
4710 | | -__printf(2, 3) |
---|
| 5141 | +__printf(2, 3) __cold |
---|
4711 | 5142 | void netdev_info(const struct net_device *dev, const char *format, ...); |
---|
4712 | 5143 | |
---|
4713 | 5144 | #define netdev_level_once(level, dev, fmt, ...) \ |
---|
.. | .. |
---|
4738 | 5169 | #define MODULE_ALIAS_NETDEV(device) \ |
---|
4739 | 5170 | MODULE_ALIAS("netdev-" device) |
---|
4740 | 5171 | |
---|
4741 | | -#if defined(CONFIG_DYNAMIC_DEBUG) |
---|
| 5172 | +#if defined(CONFIG_DYNAMIC_DEBUG) || \ |
---|
| 5173 | + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) |
---|
4742 | 5174 | #define netdev_dbg(__dev, format, args...) \ |
---|
4743 | 5175 | do { \ |
---|
4744 | 5176 | dynamic_netdev_dbg(__dev, format, ##args); \ |
---|
.. | .. |
---|
4808 | 5240 | #define netif_info(priv, type, dev, fmt, args...) \ |
---|
4809 | 5241 | netif_level(info, priv, type, dev, fmt, ##args) |
---|
4810 | 5242 | |
---|
4811 | | -#if defined(CONFIG_DYNAMIC_DEBUG) |
---|
| 5243 | +#if defined(CONFIG_DYNAMIC_DEBUG) || \ |
---|
| 5244 | + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) |
---|
4812 | 5245 | #define netif_dbg(priv, type, netdev, format, args...) \ |
---|
4813 | 5246 | do { \ |
---|
4814 | 5247 | if (netif_msg_##type(priv)) \ |
---|
.. | .. |
---|
4868 | 5301 | #define PTYPE_HASH_SIZE (16) |
---|
4869 | 5302 | #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) |
---|
4870 | 5303 | |
---|
| 5304 | +extern struct net_device *blackhole_netdev; |
---|
| 5305 | + |
---|
4871 | 5306 | #endif /* _LINUX_NETDEVICE_H */ |
---|