hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/include/linux/netdevice.h
....@@ -1,3 +1,4 @@
1
+/* SPDX-License-Identifier: GPL-2.0-or-later */
12 /*
23 * INET An implementation of the TCP/IP protocol suite for the LINUX
34 * operating system. INET is implemented using the BSD Socket
....@@ -14,11 +15,6 @@
1415 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
1516 * Bjorn Ekwall. <bj0rn@blox.se>
1617 * Pekka Riikonen <priikone@poseidon.pspt.fi>
17
- *
18
- * This program is free software; you can redistribute it and/or
19
- * modify it under the terms of the GNU General Public License
20
- * as published by the Free Software Foundation; either version
21
- * 2 of the License, or (at your option) any later version.
2218 *
2319 * Moved to /usr/include/linux for NET3
2420 */
....@@ -58,6 +54,9 @@
5854 struct device;
5955 struct phy_device;
6056 struct dsa_port;
57
+struct ip_tunnel_parm;
58
+struct macsec_context;
59
+struct macsec_ops;
6160
6261 struct sfp_bus;
6362 /* 802.11 specific */
....@@ -67,15 +66,20 @@
6766 struct mpls_dev;
6867 /* UDP Tunnel offloads */
6968 struct udp_tunnel_info;
69
+struct udp_tunnel_nic_info;
70
+struct udp_tunnel_nic;
7071 struct bpf_prog;
7172 struct xdp_buff;
7273
74
+void synchronize_net(void);
7375 void netdev_set_default_ethtool_ops(struct net_device *dev,
7476 const struct ethtool_ops *ops);
7577
7678 /* Backlog congestion levels */
7779 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
7880 #define NET_RX_DROP 1 /* packet dropped */
81
+
82
+#define MAX_NEST_DEV 8
7983
8084 /*
8185 * Transmit return codes: transmit return codes originate from three different
....@@ -195,8 +199,8 @@
195199
196200 #ifdef CONFIG_RPS
197201 #include <linux/static_key.h>
198
-extern struct static_key rps_needed;
199
-extern struct static_key rfs_needed;
202
+extern struct static_key_false rps_needed;
203
+extern struct static_key_false rfs_needed;
200204 #endif
201205
202206 struct neighbour;
....@@ -209,9 +213,8 @@
209213 unsigned char type;
210214 #define NETDEV_HW_ADDR_T_LAN 1
211215 #define NETDEV_HW_ADDR_T_SAN 2
212
-#define NETDEV_HW_ADDR_T_SLAVE 3
213
-#define NETDEV_HW_ADDR_T_UNICAST 4
214
-#define NETDEV_HW_ADDR_T_MULTICAST 5
216
+#define NETDEV_HW_ADDR_T_UNICAST 3
217
+#define NETDEV_HW_ADDR_T_MULTICAST 4
215218 bool global_use;
216219 int sync_cnt;
217220 int refcount;
....@@ -261,9 +264,11 @@
261264 * relationship HH alignment <= LL alignment.
262265 */
263266 #define LL_RESERVED_SPACE(dev) \
264
- ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
267
+ ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \
268
+ & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
265269 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
266
- ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
270
+ ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \
271
+ & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
267272
268273 struct header_ops {
269274 int (*create) (struct sk_buff *skb, struct net_device *dev,
....@@ -275,6 +280,7 @@
275280 const struct net_device *dev,
276281 const unsigned char *haddr);
277282 bool (*validate)(const char *ll_header, unsigned int len);
283
+ __be16 (*parse_protocol)(const struct sk_buff *skb);
278284
279285 ANDROID_KABI_RESERVE(1);
280286 ANDROID_KABI_RESERVE(2);
....@@ -291,6 +297,7 @@
291297 __LINK_STATE_NOCARRIER,
292298 __LINK_STATE_LINKWATCH_PENDING,
293299 __LINK_STATE_DORMANT,
300
+ __LINK_STATE_TESTING,
294301 };
295302
296303
....@@ -331,6 +338,7 @@
331338
332339 unsigned long state;
333340 int weight;
341
+ int defer_hard_irqs_count;
334342 unsigned long gro_bitmask;
335343 int (*poll)(struct napi_struct *, int);
336344 #ifdef CONFIG_NETPOLL
....@@ -339,6 +347,8 @@
339347 struct net_device *dev;
340348 struct gro_list gro_hash[GRO_HASH_BUCKETS];
341349 struct sk_buff *skb;
350
+ struct list_head rx_list; /* Pending GRO_NORMAL skbs */
351
+ int rx_count; /* length of rx_list */
342352 struct hrtimer timer;
343353 struct list_head dev_list;
344354 struct hlist_node napi_hash_node;
....@@ -355,7 +365,7 @@
355365 NAPI_STATE_MISSED, /* reschedule a napi */
356366 NAPI_STATE_DISABLE, /* Disable pending */
357367 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
358
- NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
368
+ NAPI_STATE_LISTED, /* NAPI added to system lists */
359369 NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
360370 NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */
361371 };
....@@ -365,7 +375,7 @@
365375 NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
366376 NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
367377 NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
368
- NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED),
378
+ NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED),
369379 NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
370380 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
371381 };
....@@ -490,20 +500,6 @@
490500 }
491501
492502 /**
493
- * napi_hash_del - remove a NAPI from global table
494
- * @napi: NAPI context
495
- *
496
- * Warning: caller must observe RCU grace period
497
- * before freeing memory containing @napi, if
498
- * this function returns true.
499
- * Note: core networking stack automatically calls it
500
- * from netif_napi_del().
501
- * Drivers might want to call this helper to combine all
502
- * the needed RCU grace periods into a single one.
503
- */
504
-bool napi_hash_del(struct napi_struct *napi);
505
-
506
-/**
507503 * napi_disable - prevent NAPI from scheduling
508504 * @n: NAPI context
509505 *
....@@ -542,6 +538,32 @@
542538 msleep(1);
543539 else
544540 barrier();
541
+}
542
+
543
+/**
544
+ * napi_if_scheduled_mark_missed - if napi is running, set the
545
+ * NAPIF_STATE_MISSED
546
+ * @n: NAPI context
547
+ *
548
+ * If napi is running, set the NAPIF_STATE_MISSED, and return true if
549
+ * NAPI is scheduled.
550
+ **/
551
+static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
552
+{
553
+ unsigned long val, new;
554
+
555
+ do {
556
+ val = READ_ONCE(n->state);
557
+ if (val & NAPIF_STATE_DISABLE)
558
+ return true;
559
+
560
+ if (!(val & NAPIF_STATE_SCHED))
561
+ return false;
562
+
563
+ new = val | NAPIF_STATE_MISSED;
564
+ } while (cmpxchg(&n->state, val, new) != val);
565
+
566
+ return true;
545567 }
546568
547569 enum netdev_queue_state_t {
....@@ -592,6 +614,9 @@
592614
593615 /* Subordinate device that the queue has been assigned to */
594616 struct net_device *sb_dev;
617
+#ifdef CONFIG_XDP_SOCKETS
618
+ struct xsk_buff_pool *pool;
619
+#endif
595620 /*
596621 * write-mostly part
597622 */
....@@ -615,12 +640,32 @@
615640 } ____cacheline_aligned_in_smp;
616641
617642 extern int sysctl_fb_tunnels_only_for_init_net;
643
+extern int sysctl_devconf_inherit_init_net;
618644
645
+/*
646
+ * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns
647
+ * == 1 : For initns only
648
+ * == 2 : For none.
649
+ */
619650 static inline bool net_has_fallback_tunnels(const struct net *net)
620651 {
621
- return net == &init_net ||
622
- !IS_ENABLED(CONFIG_SYSCTL) ||
623
- !sysctl_fb_tunnels_only_for_init_net;
652
+#if IS_ENABLED(CONFIG_SYSCTL)
653
+ int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net);
654
+
655
+ return !fb_tunnels_only_for_init_net ||
656
+ (net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1);
657
+#else
658
+ return true;
659
+#endif
660
+}
661
+
662
+static inline int net_inherit_devconf(void)
663
+{
664
+#if IS_ENABLED(CONFIG_SYSCTL)
665
+ return READ_ONCE(sysctl_devconf_inherit_init_net);
666
+#else
667
+ return 0;
668
+#endif
624669 }
625670
626671 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
....@@ -647,7 +692,7 @@
647692 struct rps_map {
648693 unsigned int len;
649694 struct rcu_head rcu;
650
- u16 cpus[0];
695
+ u16 cpus[];
651696 };
652697 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
653698
....@@ -669,7 +714,7 @@
669714 struct rps_dev_flow_table {
670715 unsigned int mask;
671716 struct rcu_head rcu;
672
- struct rps_dev_flow flows[0];
717
+ struct rps_dev_flow flows[];
673718 };
674719 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
675720 ((_num) * sizeof(struct rps_dev_flow)))
....@@ -687,7 +732,7 @@
687732 struct rps_sock_flow_table {
688733 u32 mask;
689734
690
- u32 ents[0] ____cacheline_aligned_in_smp;
735
+ u32 ents[] ____cacheline_aligned_in_smp;
691736 };
692737 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
693738
....@@ -706,8 +751,11 @@
706751 /* We only give a hint, preemption can change CPU under us */
707752 val |= raw_smp_processor_id();
708753
709
- if (table->ents[index] != val)
710
- table->ents[index] = val;
754
+ /* The following WRITE_ONCE() is paired with the READ_ONCE()
755
+ * here, and another one in get_rps_cpu().
756
+ */
757
+ if (READ_ONCE(table->ents[index]) != val)
758
+ WRITE_ONCE(table->ents[index], val);
711759 }
712760 }
713761
....@@ -726,6 +774,9 @@
726774 struct kobject kobj;
727775 struct net_device *dev;
728776 struct xdp_rxq_info xdp_rxq;
777
+#ifdef CONFIG_XDP_SOCKETS
778
+ struct xsk_buff_pool *pool;
779
+#endif
729780
730781 ANDROID_KABI_RESERVE(1);
731782 ANDROID_KABI_RESERVE(2);
....@@ -752,7 +803,7 @@
752803 unsigned int len;
753804 unsigned int alloc_len;
754805 struct rcu_head rcu;
755
- u16 queues[0];
806
+ u16 queues[];
756807 };
757808 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
758809 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
....@@ -763,7 +814,7 @@
763814 */
764815 struct xps_dev_maps {
765816 struct rcu_head rcu;
766
- struct xps_map __rcu *attr_map[0]; /* Either CPUs map or RXQs map */
817
+ struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */
767818 };
768819
769820 #define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
....@@ -832,6 +883,13 @@
832883 TC_SETUP_QDISC_PRIO,
833884 TC_SETUP_QDISC_MQ,
834885 TC_SETUP_QDISC_ETF,
886
+ TC_SETUP_ROOT_QDISC,
887
+ TC_SETUP_QDISC_GRED,
888
+ TC_SETUP_QDISC_TAPRIO,
889
+ TC_SETUP_FT,
890
+ TC_SETUP_QDISC_ETS,
891
+ TC_SETUP_QDISC_TBF,
892
+ TC_SETUP_QDISC_FIFO,
835893 };
836894
837895 /* These structures hold the attributes of bpf state that are being passed
....@@ -847,21 +905,29 @@
847905 */
848906 XDP_SETUP_PROG,
849907 XDP_SETUP_PROG_HW,
850
- XDP_QUERY_PROG,
851
- XDP_QUERY_PROG_HW,
852908 /* BPF program for offload callbacks, invoked at program load time. */
853
- BPF_OFFLOAD_VERIFIER_PREP,
854
- BPF_OFFLOAD_TRANSLATE,
855
- BPF_OFFLOAD_DESTROY,
856909 BPF_OFFLOAD_MAP_ALLOC,
857910 BPF_OFFLOAD_MAP_FREE,
858
- XDP_QUERY_XSK_UMEM,
859
- XDP_SETUP_XSK_UMEM,
911
+ XDP_SETUP_XSK_POOL,
860912 };
861913
862914 struct bpf_prog_offload_ops;
863915 struct netlink_ext_ack;
864916 struct xdp_umem;
917
+struct xdp_dev_bulk_queue;
918
+struct bpf_xdp_link;
919
+
920
+enum bpf_xdp_mode {
921
+ XDP_MODE_SKB = 0,
922
+ XDP_MODE_DRV = 1,
923
+ XDP_MODE_HW = 2,
924
+ __MAX_XDP_MODE
925
+};
926
+
927
+struct bpf_xdp_entity {
928
+ struct bpf_prog *prog;
929
+ struct bpf_xdp_link *link;
930
+};
865931
866932 struct netdev_bpf {
867933 enum bpf_netdev_command command;
....@@ -872,32 +938,21 @@
872938 struct bpf_prog *prog;
873939 struct netlink_ext_ack *extack;
874940 };
875
- /* XDP_QUERY_PROG, XDP_QUERY_PROG_HW */
876
- struct {
877
- u32 prog_id;
878
- /* flags with which program was installed */
879
- u32 prog_flags;
880
- };
881
- /* BPF_OFFLOAD_VERIFIER_PREP */
882
- struct {
883
- struct bpf_prog *prog;
884
- const struct bpf_prog_offload_ops *ops; /* callee set */
885
- } verifier;
886
- /* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */
887
- struct {
888
- struct bpf_prog *prog;
889
- } offload;
890941 /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
891942 struct {
892943 struct bpf_offloaded_map *offmap;
893944 };
894
- /* XDP_QUERY_XSK_UMEM, XDP_SETUP_XSK_UMEM */
945
+ /* XDP_SETUP_XSK_POOL */
895946 struct {
896
- struct xdp_umem *umem; /* out for query*/
897
- u16 queue_id; /* in for query */
947
+ struct xsk_buff_pool *pool;
948
+ u16 queue_id;
898949 } xsk;
899950 };
900951 };
952
+
953
+/* Flags for ndo_xsk_wakeup. */
954
+#define XDP_WAKEUP_RX (1 << 0)
955
+#define XDP_WAKEUP_TX (1 << 1)
901956
902957 #ifdef CONFIG_XFRM_OFFLOAD
903958 struct xfrmdev_ops {
....@@ -915,35 +970,27 @@
915970 };
916971 #endif
917972
918
-#if IS_ENABLED(CONFIG_TLS_DEVICE)
919
-enum tls_offload_ctx_dir {
920
- TLS_OFFLOAD_CTX_DIR_RX,
921
- TLS_OFFLOAD_CTX_DIR_TX,
922
-};
923
-
924
-struct tls_crypto_info;
925
-struct tls_context;
926
-
927
-struct tlsdev_ops {
928
- int (*tls_dev_add)(struct net_device *netdev, struct sock *sk,
929
- enum tls_offload_ctx_dir direction,
930
- struct tls_crypto_info *crypto_info,
931
- u32 start_offload_tcp_sn);
932
- void (*tls_dev_del)(struct net_device *netdev,
933
- struct tls_context *ctx,
934
- enum tls_offload_ctx_dir direction);
935
- void (*tls_dev_resync_rx)(struct net_device *netdev,
936
- struct sock *sk, u32 seq, u64 rcd_sn);
937
- ANDROID_KABI_RESERVE(1);
938
- ANDROID_KABI_RESERVE(2);
939
- ANDROID_KABI_RESERVE(3);
940
- ANDROID_KABI_RESERVE(4);
941
-};
942
-#endif
943
-
944973 struct dev_ifalias {
945974 struct rcu_head rcuhead;
946975 char ifalias[];
976
+};
977
+
978
+struct devlink;
979
+struct tlsdev_ops;
980
+
981
+struct netdev_name_node {
982
+ struct hlist_node hlist;
983
+ struct list_head list;
984
+ struct net_device *dev;
985
+ const char *name;
986
+};
987
+
988
+int netdev_name_node_alt_create(struct net_device *dev, const char *name);
989
+int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
990
+
991
+struct netdev_net_notifier {
992
+ struct list_head list;
993
+ struct notifier_block *nb;
947994 };
948995
949996 /*
....@@ -989,8 +1036,7 @@
9891036 * those the driver believes to be appropriate.
9901037 *
9911038 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
992
- * struct net_device *sb_dev,
993
- * select_queue_fallback_t fallback);
1039
+ * struct net_device *sb_dev);
9941040 * Called to decide which queue to use when device supports multiple
9951041 * transmit queues.
9961042 *
....@@ -1025,7 +1071,7 @@
10251071 * Called when a user wants to change the Maximum Transfer Unit
10261072 * of a device.
10271073 *
1028
- * void (*ndo_tx_timeout)(struct net_device *dev);
1074
+ * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue);
10291075 * Callback used when the transmitter has not made any progress
10301076 * for dev->watchdog ticks.
10311077 *
....@@ -1144,6 +1190,12 @@
11441190 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
11451191 * Called to release previously enslaved netdev.
11461192 *
1193
+ * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev,
1194
+ * struct sk_buff *skb,
1195
+ * bool all_slaves);
1196
+ * Get the xmit slave of master device. If all_slaves is true, function
1197
+ * assume all the slaves can transmit.
1198
+ *
11471199 * Feature/offload setting functions.
11481200 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
11491201 * netdev_features_t features);
....@@ -1158,7 +1210,8 @@
11581210 *
11591211 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
11601212 * struct net_device *dev,
1161
- * const unsigned char *addr, u16 vid, u16 flags)
1213
+ * const unsigned char *addr, u16 vid, u16 flags,
1214
+ * struct netlink_ext_ack *extack);
11621215 * Adds an FDB entry to dev for addr.
11631216 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
11641217 * struct net_device *dev,
....@@ -1171,7 +1224,7 @@
11711224 * entries to skb and update idx with the number of entries.
11721225 *
11731226 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
1174
- * u16 flags)
1227
+ * u16 flags, struct netlink_ext_ack *extack)
11751228 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
11761229 * struct net_device *dev, u32 filter_mask,
11771230 * int nlflags)
....@@ -1191,6 +1244,10 @@
11911244 * Called to get ID of physical port of this device. If driver does
11921245 * not implement this, it is assumed that the hw is not able to have
11931246 * multiple net devices on single physical port.
1247
+ *
1248
+ * int (*ndo_get_port_parent_id)(struct net_device *dev,
1249
+ * struct netdev_phys_item_id *ppid)
1250
+ * Called to get the parent ID of the physical port of this device.
11941251 *
11951252 * void (*ndo_udp_tunnel_add)(struct net_device *dev,
11961253 * struct udp_tunnel_info *ti);
....@@ -1249,6 +1306,22 @@
12491306 * that got dropped are freed/returned via xdp_return_frame().
12501307 * Returns negative number, means general error invoking ndo, meaning
12511308 * no frames were xmit'ed and core-caller will free all frames.
1309
+ * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags);
1310
+ * This function is used to wake up the softirq, ksoftirqd or kthread
1311
+ * responsible for sending and/or receiving packets on a specific
1312
+ * queue id bound to an AF_XDP socket. The flags field specifies if
1313
+ * only RX, only Tx, or both should be woken up using the flags
1314
+ * XDP_WAKEUP_RX and XDP_WAKEUP_TX.
1315
+ * struct devlink_port *(*ndo_get_devlink_port)(struct net_device *dev);
1316
+ * Get devlink port instance associated with a given netdev.
1317
+ * Called with a reference on the netdevice and devlink locks only,
1318
+ * rtnl_lock is not held.
1319
+ * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p,
1320
+ * int cmd);
1321
+ * Add, change, delete or get information on an IPv4 tunnel.
1322
+ * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev);
1323
+ * If a device is paired with a peer device, return the peer instance.
1324
+ * The caller must be under RCU read context.
12521325 */
12531326 struct net_device_ops {
12541327 int (*ndo_init)(struct net_device *dev);
....@@ -1262,8 +1335,7 @@
12621335 netdev_features_t features);
12631336 u16 (*ndo_select_queue)(struct net_device *dev,
12641337 struct sk_buff *skb,
1265
- struct net_device *sb_dev,
1266
- select_queue_fallback_t fallback);
1338
+ struct net_device *sb_dev);
12671339 void (*ndo_change_rx_flags)(struct net_device *dev,
12681340 int flags);
12691341 void (*ndo_set_rx_mode)(struct net_device *dev);
....@@ -1278,7 +1350,8 @@
12781350 int new_mtu);
12791351 int (*ndo_neigh_setup)(struct net_device *dev,
12801352 struct neigh_parms *);
1281
- void (*ndo_tx_timeout) (struct net_device *dev);
1353
+ void (*ndo_tx_timeout) (struct net_device *dev,
1354
+ unsigned int txqueue);
12821355
12831356 void (*ndo_get_stats64)(struct net_device *dev,
12841357 struct rtnl_link_stats64 *storage);
....@@ -1324,6 +1397,10 @@
13241397 struct nlattr *port[]);
13251398 int (*ndo_get_vf_port)(struct net_device *dev,
13261399 int vf, struct sk_buff *skb);
1400
+ int (*ndo_get_vf_guid)(struct net_device *dev,
1401
+ int vf,
1402
+ struct ifla_vf_guid *node_guid,
1403
+ struct ifla_vf_guid *port_guid);
13271404 int (*ndo_set_vf_guid)(struct net_device *dev,
13281405 int vf, u64 guid,
13291406 int guid_type);
....@@ -1368,6 +1445,9 @@
13681445 struct netlink_ext_ack *extack);
13691446 int (*ndo_del_slave)(struct net_device *dev,
13701447 struct net_device *slave_dev);
1448
+ struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev,
1449
+ struct sk_buff *skb,
1450
+ bool all_slaves);
13711451 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
13721452 netdev_features_t features);
13731453 int (*ndo_set_features)(struct net_device *dev,
....@@ -1382,7 +1462,8 @@
13821462 struct net_device *dev,
13831463 const unsigned char *addr,
13841464 u16 vid,
1385
- u16 flags);
1465
+ u16 flags,
1466
+ struct netlink_ext_ack *extack);
13861467 int (*ndo_fdb_del)(struct ndmsg *ndm,
13871468 struct nlattr *tb[],
13881469 struct net_device *dev,
....@@ -1393,10 +1474,16 @@
13931474 struct net_device *dev,
13941475 struct net_device *filter_dev,
13951476 int *idx);
1396
-
1477
+ int (*ndo_fdb_get)(struct sk_buff *skb,
1478
+ struct nlattr *tb[],
1479
+ struct net_device *dev,
1480
+ const unsigned char *addr,
1481
+ u16 vid, u32 portid, u32 seq,
1482
+ struct netlink_ext_ack *extack);
13971483 int (*ndo_bridge_setlink)(struct net_device *dev,
13981484 struct nlmsghdr *nlh,
1399
- u16 flags);
1485
+ u16 flags,
1486
+ struct netlink_ext_ack *extack);
14001487 int (*ndo_bridge_getlink)(struct sk_buff *skb,
14011488 u32 pid, u32 seq,
14021489 struct net_device *dev,
....@@ -1409,6 +1496,8 @@
14091496 bool new_carrier);
14101497 int (*ndo_get_phys_port_id)(struct net_device *dev,
14111498 struct netdev_phys_item_id *ppid);
1499
+ int (*ndo_get_port_parent_id)(struct net_device *dev,
1500
+ struct netdev_phys_item_id *ppid);
14121501 int (*ndo_get_phys_port_name)(struct net_device *dev,
14131502 char *name, size_t len);
14141503 void (*ndo_udp_tunnel_add)(struct net_device *dev,
....@@ -1420,7 +1509,6 @@
14201509 void (*ndo_dfwd_del_station)(struct net_device *pdev,
14211510 void *priv);
14221511
1423
- int (*ndo_get_lock_subclass)(struct net_device *dev);
14241512 int (*ndo_set_tx_maxrate)(struct net_device *dev,
14251513 int queue_index,
14261514 u32 maxrate);
....@@ -1436,8 +1524,12 @@
14361524 int (*ndo_xdp_xmit)(struct net_device *dev, int n,
14371525 struct xdp_frame **xdp,
14381526 u32 flags);
1439
- int (*ndo_xsk_async_xmit)(struct net_device *dev,
1440
- u32 queue_id);
1527
+ int (*ndo_xsk_wakeup)(struct net_device *dev,
1528
+ u32 queue_id, u32 flags);
1529
+ struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev);
1530
+ int (*ndo_tunnel_ctl)(struct net_device *dev,
1531
+ struct ip_tunnel_parm *p, int cmd);
1532
+ struct net_device * (*ndo_get_peer_dev)(struct net_device *dev);
14411533
14421534 ANDROID_KABI_RESERVE(1);
14431535 ANDROID_KABI_RESERVE(2);
....@@ -1560,6 +1652,12 @@
15601652 #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
15611653 #define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK
15621654
1655
+/* Specifies the type of the struct net_device::ml_priv pointer */
1656
+enum netdev_ml_priv_type {
1657
+ ML_PRIV_NONE,
1658
+ ML_PRIV_CAN,
1659
+};
1660
+
15631661 /**
15641662 * struct net_device - The DEVICE structure.
15651663 *
....@@ -1571,7 +1669,7 @@
15711669 * (i.e. as seen by users in the "Space.c" file). It is the name
15721670 * of the interface.
15731671 *
1574
- * @name_hlist: Device name hash chain, please keep it close to name[]
1672
+ * @name_node: Name hashlist node
15751673 * @ifalias: SNMP alias
15761674 * @mem_end: Shared memory end
15771675 * @mem_start: Shared memory start
....@@ -1600,6 +1698,7 @@
16001698 * and drivers will need to set them appropriately.
16011699 *
16021700 * @mpls_features: Mask of features inheritable by MPLS
1701
+ * @gso_partial_features: value(s) from NETIF_F_GSO\*
16031702 *
16041703 * @ifindex: interface index
16051704 * @group: The group the device belongs to
....@@ -1624,8 +1723,11 @@
16241723 * @netdev_ops: Includes several pointers to callbacks,
16251724 * if one wants to override the ndo_*() functions
16261725 * @ethtool_ops: Management operations
1726
+ * @l3mdev_ops: Layer 3 master device operations
16271727 * @ndisc_ops: Includes callbacks for different IPv6 neighbour
16281728 * discovery handling. Necessary for e.g. 6LoWPAN.
1729
+ * @xfrmdev_ops: Transformation offload operations
1730
+ * @tlsdev_ops: Transport Layer Security offload operations
16291731 * @header_ops: Includes callbacks for creating,parsing,caching,etc
16301732 * of Layer 2 headers.
16311733 *
....@@ -1664,6 +1766,7 @@
16641766 * @dev_port: Used to differentiate devices that share
16651767 * the same function
16661768 * @addr_list_lock: XXX: need comments on this one
1769
+ * @name_assign_type: network interface name assignment type
16671770 * @uc_promisc: Counter that indicates promiscuous mode
16681771 * has been enabled due to the need to listen to
16691772 * additional unicast addresses in a device that
....@@ -1682,10 +1785,12 @@
16821785 * @tipc_ptr: TIPC specific data
16831786 * @atalk_ptr: AppleTalk link
16841787 * @ip_ptr: IPv4 specific data
1685
- * @dn_ptr: DECnet specific data
16861788 * @ip6_ptr: IPv6 specific data
16871789 * @ax25_ptr: AX.25 specific data
16881790 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
1791
+ * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network
1792
+ * device struct
1793
+ * @mpls_ptr: mpls_dev struct pointer
16891794 *
16901795 * @dev_addr: Hw address (before bcast,
16911796 * because most packets are unicast)
....@@ -1694,12 +1799,17 @@
16941799 * @num_rx_queues: Number of RX queues
16951800 * allocated at register_netdev() time
16961801 * @real_num_rx_queues: Number of RX queues currently active in device
1802
+ * @xdp_prog: XDP sockets filter program pointer
1803
+ * @gro_flush_timeout: timeout for GRO layer in NAPI
1804
+ * @napi_defer_hard_irqs: If not zero, provides a counter that would
1805
+ * allow to avoid NIC hard IRQ, on busy queues.
16971806 *
16981807 * @rx_handler: handler for received packets
16991808 * @rx_handler_data: XXX: need comments on this one
17001809 * @miniq_ingress: ingress/clsact qdisc specific data for
17011810 * ingress processing
17021811 * @ingress_queue: XXX: need comments on this one
1812
+ * @nf_hooks_ingress: netfilter hooks executed for ingress packets
17031813 * @broadcast: hw bcast address
17041814 *
17051815 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
....@@ -1714,14 +1824,19 @@
17141824 * @qdisc: Root qdisc from userspace point of view
17151825 * @tx_queue_len: Max frames per queue allowed
17161826 * @tx_global_lock: XXX: need comments on this one
1827
+ * @xdp_bulkq: XDP device bulk queue
1828
+ * @xps_cpus_map: all CPUs map for XPS device
1829
+ * @xps_rxqs_map: all RXQs map for XPS device
17171830 *
17181831 * @xps_maps: XXX: need comments on this one
17191832 * @miniq_egress: clsact qdisc specific data for
17201833 * egress processing
1834
+ * @qdisc_hash: qdisc hash table
17211835 * @watchdog_timeo: Represents the timeout that is used by
17221836 * the watchdog (see dev_watchdog())
17231837 * @watchdog_timer: List of timers
17241838 *
1839
+ * @proto_down_reason: reason a netdev interface is held down
17251840 * @pcpu_refcnt: Number of references to this device
17261841 * @todo_list: Delayed register/unregister
17271842 * @link_watch_list: XXX: need comments on this one
....@@ -1737,6 +1852,7 @@
17371852 * @nd_net: Network namespace this network device is inside
17381853 *
17391854 * @ml_priv: Mid-layer private
1855
+ * @ml_priv_type: Mid-layer private type
17401856 * @lstats: Loopback statistics
17411857 * @tstats: Tunnel statistics
17421858 * @dstats: Dummy statistics
....@@ -1777,13 +1893,29 @@
17771893 *
17781894 * @wol_enabled: Wake-on-LAN is enabled
17791895 *
1896
+ * @net_notifier_list: List of per-net netdev notifier block
1897
+ * that follow this device when it is moved
1898
+ * to another network namespace.
1899
+ *
1900
+ * @macsec_ops: MACsec offloading ops
1901
+ *
1902
+ * @udp_tunnel_nic_info: static structure describing the UDP tunnel
1903
+ * offload capabilities of the device
1904
+ * @udp_tunnel_nic: UDP tunnel offload state
1905
+ * @xdp_state: stores info on attached XDP BPF programs
1906
+ *
1907
+ * @nested_level: Used as as a parameter of spin_lock_nested() of
1908
+ * dev->addr_list_lock.
1909
+ * @unlink_list: As netif_addr_lock() can be called recursively,
1910
+ * keep a list of interfaces to be deleted.
1911
+ *
17801912 * FIXME: cleanup struct net_device such that network protocol info
17811913 * moves out.
17821914 */
17831915
17841916 struct net_device {
17851917 char name[IFNAMSIZ];
1786
- struct hlist_node name_hlist;
1918
+ struct netdev_name_node *name_node;
17871919 struct dev_ifalias __rcu *ifalias;
17881920 /*
17891921 * I/O specific fields
....@@ -1841,9 +1973,6 @@
18411973 #endif
18421974 const struct net_device_ops *netdev_ops;
18431975 const struct ethtool_ops *ethtool_ops;
1844
-#ifdef CONFIG_NET_SWITCHDEV
1845
- const struct switchdev_ops *switchdev_ops;
1846
-#endif
18471976 #ifdef CONFIG_NET_L3_MASTER_DEV
18481977 const struct l3mdev_ops *l3mdev_ops;
18491978 #endif
....@@ -1884,6 +2013,7 @@
18842013 unsigned short type;
18852014 unsigned short hard_header_len;
18862015 unsigned char min_header_len;
2016
+ unsigned char name_assign_type;
18872017
18882018 unsigned short needed_headroom;
18892019 unsigned short needed_tailroom;
....@@ -1894,12 +2024,12 @@
18942024 unsigned char addr_len;
18952025 unsigned char upper_level;
18962026 unsigned char lower_level;
2027
+
18972028 unsigned short neigh_priv_len;
18982029 unsigned short dev_id;
18992030 unsigned short dev_port;
19002031 spinlock_t addr_list_lock;
1901
- unsigned char name_assign_type;
1902
- bool uc_promisc;
2032
+
19032033 struct netdev_hw_addr_list uc;
19042034 struct netdev_hw_addr_list mc;
19052035 struct netdev_hw_addr_list dev_addrs;
....@@ -1907,8 +2037,15 @@
19072037 #ifdef CONFIG_SYSFS
19082038 struct kset *queues_kset;
19092039 #endif
2040
+#ifdef CONFIG_LOCKDEP
2041
+ struct list_head unlink_list;
2042
+#endif
19102043 unsigned int promiscuity;
19112044 unsigned int allmulti;
2045
+ bool uc_promisc;
2046
+#ifdef CONFIG_LOCKDEP
2047
+ unsigned char nested_level;
2048
+#endif
19122049
19132050
19142051 /* Protocol-specific pointers */
....@@ -1926,9 +2063,6 @@
19262063 void *atalk_ptr;
19272064 #endif
19282065 struct in_device __rcu *ip_ptr;
1929
-#if IS_ENABLED(CONFIG_DECNET)
1930
- struct dn_dev __rcu *dn_ptr;
1931
-#endif
19322066 struct inet6_dev __rcu *ip6_ptr;
19332067 #if IS_ENABLED(CONFIG_AX25)
19342068 void *ax25_ptr;
....@@ -1951,6 +2085,7 @@
19512085
19522086 struct bpf_prog __rcu *xdp_prog;
19532087 unsigned long gro_flush_timeout;
2088
+ int napi_defer_hard_irqs;
19542089 rx_handler_func_t __rcu *rx_handler;
19552090 void __rcu *rx_handler_data;
19562091
....@@ -1974,13 +2109,11 @@
19742109 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
19752110 unsigned int num_tx_queues;
19762111 unsigned int real_num_tx_queues;
1977
- struct Qdisc *qdisc;
1978
-#ifdef CONFIG_NET_SCHED
1979
- DECLARE_HASHTABLE (qdisc_hash, 4);
1980
-#endif
2112
+ struct Qdisc __rcu *qdisc;
19812113 unsigned int tx_queue_len;
19822114 spinlock_t tx_global_lock;
1983
- int watchdog_timeo;
2115
+
2116
+ struct xdp_dev_bulk_queue __percpu *xdp_bulkq;
19842117
19852118 #ifdef CONFIG_XPS
19862119 struct xps_dev_maps __rcu *xps_cpus_map;
....@@ -1990,11 +2123,17 @@
19902123 struct mini_Qdisc __rcu *miniq_egress;
19912124 #endif
19922125
2126
+#ifdef CONFIG_NET_SCHED
2127
+ DECLARE_HASHTABLE (qdisc_hash, 4);
2128
+#endif
19932129 /* These may be needed for future network-power-down code. */
19942130 struct timer_list watchdog_timer;
2131
+ int watchdog_timeo;
19952132
1996
- int __percpu *pcpu_refcnt;
2133
+ u32 proto_down_reason;
2134
+
19972135 struct list_head todo_list;
2136
+ int __percpu *pcpu_refcnt;
19982137
19992138 struct list_head link_watch_list;
20002139
....@@ -2023,12 +2162,13 @@
20232162 possible_net_t nd_net;
20242163
20252164 /* mid-layer private */
2165
+ void *ml_priv;
2166
+ enum netdev_ml_priv_type ml_priv_type;
2167
+
20262168 union {
2027
- void *ml_priv;
20282169 struct pcpu_lstats __percpu *lstats;
20292170 struct pcpu_sw_netstats __percpu *tstats;
20302171 struct pcpu_dstats __percpu *dstats;
2031
- struct pcpu_vstats __percpu *vstats;
20322172 };
20332173
20342174 #if IS_ENABLED(CONFIG_GARP)
....@@ -2070,6 +2210,18 @@
20702210 bool proto_down;
20712211 unsigned wol_enabled:1;
20722212
2213
+ struct list_head net_notifier_list;
2214
+
2215
+#if IS_ENABLED(CONFIG_MACSEC)
2216
+ /* MACsec management functions */
2217
+ const struct macsec_ops *macsec_ops;
2218
+#endif
2219
+ const struct udp_tunnel_nic_info *udp_tunnel_nic_info;
2220
+ struct udp_tunnel_nic *udp_tunnel_nic;
2221
+
2222
+ /* protected by rtnl_lock */
2223
+ struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE];
2224
+
20732225 ANDROID_KABI_RESERVE(1);
20742226 ANDROID_KABI_RESERVE(2);
20752227 ANDROID_KABI_RESERVE(3);
....@@ -2078,7 +2230,6 @@
20782230 ANDROID_KABI_RESERVE(6);
20792231 ANDROID_KABI_RESERVE(7);
20802232 ANDROID_KABI_RESERVE(8);
2081
-
20822233 };
20832234 #define to_net_dev(d) container_of(d, struct net_device, dev)
20842235
....@@ -2116,6 +2267,22 @@
21162267 int netdev_get_num_tc(struct net_device *dev)
21172268 {
21182269 return dev->num_tc;
2270
+}
2271
+
2272
+static inline void net_prefetch(void *p)
2273
+{
2274
+ prefetch(p);
2275
+#if L1_CACHE_BYTES < 128
2276
+ prefetch((u8 *)p + L1_CACHE_BYTES);
2277
+#endif
2278
+}
2279
+
2280
+static inline void net_prefetchw(void *p)
2281
+{
2282
+ prefetchw(p);
2283
+#if L1_CACHE_BYTES < 128
2284
+ prefetchw((u8 *)p + L1_CACHE_BYTES);
2285
+#endif
21192286 }
21202287
21212288 void netdev_unbind_sb_channel(struct net_device *dev,
....@@ -2165,15 +2332,17 @@
21652332 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
21662333 (dev)->qdisc_running_key = &qdisc_running_key; \
21672334 lockdep_set_class(&(dev)->addr_list_lock, \
2168
- &dev_addr_list_lock_key); \
2335
+ &dev_addr_list_lock_key); \
21692336 for (i = 0; i < (dev)->num_tx_queues; i++) \
21702337 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
21712338 &qdisc_xmit_lock_key); \
21722339 }
21732340
2174
-struct netdev_queue *netdev_pick_tx(struct net_device *dev,
2175
- struct sk_buff *skb,
2176
- struct net_device *sb_dev);
2341
+u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
2342
+ struct net_device *sb_dev);
2343
+struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
2344
+ struct sk_buff *skb,
2345
+ struct net_device *sb_dev);
21772346
21782347 /* returns the headroom that the master device needs to take in account
21792348 * when forwarding to this dev
....@@ -2193,6 +2362,29 @@
21932362 static inline void netdev_reset_rx_headroom(struct net_device *dev)
21942363 {
21952364 netdev_set_rx_headroom(dev, -1);
2365
+}
2366
+
2367
+static inline void *netdev_get_ml_priv(struct net_device *dev,
2368
+ enum netdev_ml_priv_type type)
2369
+{
2370
+ if (dev->ml_priv_type != type)
2371
+ return NULL;
2372
+
2373
+ return dev->ml_priv;
2374
+}
2375
+
2376
+static inline void netdev_set_ml_priv(struct net_device *dev,
2377
+ void *ml_priv,
2378
+ enum netdev_ml_priv_type type)
2379
+{
2380
+ WARN(dev->ml_priv_type && dev->ml_priv_type != type,
2381
+ "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n",
2382
+ dev->ml_priv_type, type);
2383
+ WARN(!dev->ml_priv_type && dev->ml_priv,
2384
+ "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n");
2385
+
2386
+ dev->ml_priv = ml_priv;
2387
+ dev->ml_priv_type = type;
21962388 }
21972389
21982390 /*
....@@ -2271,12 +2463,26 @@
22712463 }
22722464
22732465 /**
2466
+ * __netif_napi_del - remove a NAPI context
2467
+ * @napi: NAPI context
2468
+ *
2469
+ * Warning: caller must observe RCU grace period before freeing memory
2470
+ * containing @napi. Drivers might want to call this helper to combine
2471
+ * all the needed RCU grace periods into a single one.
2472
+ */
2473
+void __netif_napi_del(struct napi_struct *napi);
2474
+
2475
+/**
22742476 * netif_napi_del - remove a NAPI context
22752477 * @napi: NAPI context
22762478 *
22772479 * netif_napi_del() removes a NAPI context from the network device NAPI list
22782480 */
2279
-void netif_napi_del(struct napi_struct *napi);
2481
+static inline void netif_napi_del(struct napi_struct *napi)
2482
+{
2483
+ __netif_napi_del(napi);
2484
+ synchronize_net();
2485
+}
22802486
22812487 struct napi_gro_cb {
22822488 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
....@@ -2335,7 +2541,8 @@
23352541 /* Number of gro_receive callbacks this packet already went through */
23362542 u8 recursion_counter:4;
23372543
2338
- /* 1 bit hole */
2544
+ /* GRO is done by frag_list pointer chaining. */
2545
+ u8 is_flist:1;
23392546
23402547 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
23412548 __wsum csum;
....@@ -2382,6 +2589,7 @@
23822589
23832590 struct packet_type {
23842591 __be16 type; /* This is really htons(ether_type). */
2592
+ bool ignore_outgoing;
23852593 struct net_device *dev; /* NULL is wildcarded here */
23862594 int (*func) (struct sk_buff *,
23872595 struct net_device *,
....@@ -2424,7 +2632,35 @@
24242632 u64 tx_packets;
24252633 u64 tx_bytes;
24262634 struct u64_stats_sync syncp;
2427
-};
2635
+} __aligned(4 * sizeof(u64));
2636
+
2637
+struct pcpu_lstats {
2638
+ u64_stats_t packets;
2639
+ u64_stats_t bytes;
2640
+ struct u64_stats_sync syncp;
2641
+} __aligned(2 * sizeof(u64));
2642
+
2643
+void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes);
2644
+
2645
+static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len)
2646
+{
2647
+ struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
2648
+
2649
+ u64_stats_update_begin(&tstats->syncp);
2650
+ tstats->rx_bytes += len;
2651
+ tstats->rx_packets++;
2652
+ u64_stats_update_end(&tstats->syncp);
2653
+}
2654
+
2655
+static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
2656
+{
2657
+ struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats);
2658
+
2659
+ u64_stats_update_begin(&lstats->syncp);
2660
+ u64_stats_add(&lstats->bytes, len);
2661
+ u64_stats_inc(&lstats->packets);
2662
+ u64_stats_update_end(&lstats->syncp);
2663
+}
24282664
24292665 #define __netdev_alloc_pcpu_stats(type, gfp) \
24302666 ({ \
....@@ -2489,7 +2725,8 @@
24892725 NETDEV_REGISTER,
24902726 NETDEV_UNREGISTER,
24912727 NETDEV_CHANGEMTU, /* notify after mtu change happened */
2492
- NETDEV_CHANGEADDR,
2728
+ NETDEV_CHANGEADDR, /* notify after the address change */
2729
+ NETDEV_PRE_CHANGEADDR, /* notify before the address change */
24932730 NETDEV_GOING_DOWN,
24942731 NETDEV_CHANGENAME,
24952732 NETDEV_FEAT_CHANGE,
....@@ -2520,6 +2757,15 @@
25202757
25212758 int register_netdevice_notifier(struct notifier_block *nb);
25222759 int unregister_netdevice_notifier(struct notifier_block *nb);
2760
+int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb);
2761
+int unregister_netdevice_notifier_net(struct net *net,
2762
+ struct notifier_block *nb);
2763
+int register_netdevice_notifier_dev_net(struct net_device *dev,
2764
+ struct notifier_block *nb,
2765
+ struct netdev_net_notifier *nn);
2766
+int unregister_netdevice_notifier_dev_net(struct net_device *dev,
2767
+ struct notifier_block *nb,
2768
+ struct netdev_net_notifier *nn);
25232769
25242770 struct netdev_notifier_info {
25252771 struct net_device *dev;
....@@ -2549,6 +2795,11 @@
25492795 struct netdev_notifier_changelowerstate_info {
25502796 struct netdev_notifier_info info; /* must be first */
25512797 void *lower_state_info; /* is lower dev state */
2798
+};
2799
+
2800
+struct netdev_notifier_pre_changeaddr_info {
2801
+ struct netdev_notifier_info info; /* must be first */
2802
+ const unsigned char *dev_addr;
25522803 };
25532804
25542805 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
....@@ -2585,6 +2836,9 @@
25852836 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
25862837 #define for_each_netdev_continue(net, d) \
25872838 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
2839
+#define for_each_netdev_continue_reverse(net, d) \
2840
+ list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \
2841
+ dev_list)
25882842 #define for_each_netdev_continue_rcu(net, d) \
25892843 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
25902844 #define for_each_netdev_in_bond_rcu(bond, slave) \
....@@ -2645,20 +2899,30 @@
26452899 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
26462900 struct net_device *__dev_get_by_name(struct net *net, const char *name);
26472901 int dev_alloc_name(struct net_device *dev, const char *name);
2648
-int dev_open(struct net_device *dev);
2902
+int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
26492903 void dev_close(struct net_device *dev);
26502904 void dev_close_many(struct list_head *head, bool unlink);
26512905 void dev_disable_lro(struct net_device *dev);
26522906 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
26532907 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
2654
- struct net_device *sb_dev,
2655
- select_queue_fallback_t fallback);
2908
+ struct net_device *sb_dev);
26562909 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
2657
- struct net_device *sb_dev,
2658
- select_queue_fallback_t fallback);
2910
+ struct net_device *sb_dev);
2911
+
26592912 int dev_queue_xmit(struct sk_buff *skb);
26602913 int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
2661
-int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
2914
+int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
2915
+
2916
+static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
2917
+{
2918
+ int ret;
2919
+
2920
+ ret = __dev_direct_xmit(skb, queue_id);
2921
+ if (!dev_xmit_complete(ret))
2922
+ kfree_skb(skb);
2923
+ return ret;
2924
+}
2925
+
26622926 int register_netdevice(struct net_device *dev);
26632927 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
26642928 void unregister_netdevice_many(struct list_head *head);
....@@ -2670,9 +2934,11 @@
26702934 int netdev_refcnt_read(const struct net_device *dev);
26712935 void free_netdev(struct net_device *dev);
26722936 void netdev_freemem(struct net_device *dev);
2673
-void synchronize_net(void);
26742937 int init_dummy_netdev(struct net_device *dev);
26752938
2939
+struct net_device *netdev_get_xmit_slave(struct net_device *dev,
2940
+ struct sk_buff *skb,
2941
+ bool all_slaves);
26762942 struct net_device *dev_get_by_index(struct net *net, int ifindex);
26772943 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
26782944 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
....@@ -2680,6 +2946,7 @@
26802946 int netdev_get_name(struct net *net, char *name, int ifindex);
26812947 int dev_restart(struct net_device *dev);
26822948 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
2949
+int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
26832950
26842951 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
26852952 {
....@@ -2816,16 +3083,16 @@
28163083 }
28173084
28183085 static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
2819
- __sum16 check, __wsum pseudo)
3086
+ __wsum pseudo)
28203087 {
28213088 NAPI_GRO_CB(skb)->csum = ~pseudo;
28223089 NAPI_GRO_CB(skb)->csum_valid = 1;
28233090 }
28243091
2825
-#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \
3092
+#define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \
28263093 do { \
28273094 if (__skb_gro_checksum_convert_check(skb)) \
2828
- __skb_gro_checksum_convert(skb, check, \
3095
+ __skb_gro_checksum_convert(skb, \
28293096 compute_pseudo(skb, proto)); \
28303097 } while (0)
28313098
....@@ -2948,6 +3215,15 @@
29483215 return dev->header_ops->parse(skb, haddr);
29493216 }
29503217
3218
+static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb)
3219
+{
3220
+ const struct net_device *dev = skb->dev;
3221
+
3222
+ if (!dev->header_ops || !dev->header_ops->parse_protocol)
3223
+ return 0;
3224
+ return dev->header_ops->parse_protocol(skb);
3225
+}
3226
+
29513227 /* ll_header must have at least hard_header_len allocated */
29523228 static inline bool dev_validate_header(const struct net_device *dev,
29533229 char *ll_header, int len)
....@@ -2968,12 +3244,9 @@
29683244 return false;
29693245 }
29703246
2971
-typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr,
2972
- int len, int size);
2973
-int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
2974
-static inline int unregister_gifconf(unsigned int family)
3247
+static inline bool dev_has_header(const struct net_device *dev)
29753248 {
2976
- return register_gifconf(family, NULL);
3249
+ return dev->header_ops && dev->header_ops->create;
29773250 }
29783251
29793252 #ifdef CONFIG_NET_FLOW_LIMIT
....@@ -2994,16 +3267,12 @@
29943267 */
29953268 struct softnet_data {
29963269 struct list_head poll_list;
2997
- struct napi_struct *current_napi;
29983270 struct sk_buff_head process_queue;
29993271
30003272 /* stats */
30013273 unsigned int processed;
30023274 unsigned int time_squeeze;
30033275 unsigned int received_rps;
3004
- /* unused partner variable for ABI alignment */
3005
- unsigned int gro_coalesced;
3006
-
30073276 #ifdef CONFIG_RPS
30083277 struct softnet_data *rps_ipi_list;
30093278 #endif
....@@ -3243,6 +3512,26 @@
32433512 #endif
32443513 }
32453514
3515
+/* Variant of netdev_tx_sent_queue() for drivers that are aware
3516
+ * that they should not test BQL status themselves.
3517
+ * We do want to change __QUEUE_STATE_STACK_XOFF only for the last
3518
+ * skb of a batch.
3519
+ * Returns true if the doorbell must be used to kick the NIC.
3520
+ */
3521
+static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3522
+ unsigned int bytes,
3523
+ bool xmit_more)
3524
+{
3525
+ if (xmit_more) {
3526
+#ifdef CONFIG_BQL
3527
+ dql_queued(&dev_queue->dql, bytes);
3528
+#endif
3529
+ return netif_tx_queue_stopped(dev_queue);
3530
+ }
3531
+ netdev_tx_sent_queue(dev_queue, bytes);
3532
+ return true;
3533
+}
3534
+
32463535 /**
32473536 * netdev_sent_queue - report the number of bytes queued to hardware
32483537 * @dev: network device
....@@ -3255,6 +3544,14 @@
32553544 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
32563545 {
32573546 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
3547
+}
3548
+
3549
+static inline bool __netdev_sent_queue(struct net_device *dev,
3550
+ unsigned int bytes,
3551
+ bool xmit_more)
3552
+{
3553
+ return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes,
3554
+ xmit_more);
32583555 }
32593556
32603557 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
....@@ -3273,7 +3570,7 @@
32733570 */
32743571 smp_mb();
32753572
3276
- if (dql_avail(&dev_queue->dql) < 0)
3573
+ if (unlikely(dql_avail(&dev_queue->dql) < 0))
32773574 return;
32783575
32793576 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
....@@ -3481,7 +3778,7 @@
34813778 }
34823779
34833780 /**
3484
- * netif_attrmask_next_and - get the next CPU/Rx queue in *src1p & *src2p
3781
+ * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p
34853782 * @n: CPU/Rx queue index
34863783 * @src1p: the first CPUs/Rx queues mask pointer
34873784 * @src2p: the second CPUs/Rx queues mask pointer
....@@ -3618,6 +3915,7 @@
36183915 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
36193916 int netif_rx(struct sk_buff *skb);
36203917 int netif_rx_ni(struct sk_buff *skb);
3918
+int netif_rx_any_context(struct sk_buff *skb);
36213919 int netif_receive_skb(struct sk_buff *skb);
36223920 int netif_receive_skb_core(struct sk_buff *skb);
36233921 void netif_receive_skb_list(struct list_head *head);
....@@ -3627,7 +3925,6 @@
36273925 gro_result_t napi_gro_frags(struct napi_struct *napi);
36283926 struct packet_offload *gro_find_receive_by_type(__be16 type);
36293927 struct packet_offload *gro_find_complete_by_type(__be16 type);
3630
-extern struct napi_struct *get_current_napi_context(void);
36313928
36323929 static inline void napi_free_frags(struct napi_struct *napi)
36333930 {
....@@ -3651,8 +3948,10 @@
36513948 int dev_ifconf(struct net *net, struct ifconf *, int);
36523949 int dev_ethtool(struct net *net, struct ifreq *);
36533950 unsigned int dev_get_flags(const struct net_device *);
3654
-int __dev_change_flags(struct net_device *, unsigned int flags);
3655
-int dev_change_flags(struct net_device *, unsigned int);
3951
+int __dev_change_flags(struct net_device *dev, unsigned int flags,
3952
+ struct netlink_ext_ack *extack);
3953
+int dev_change_flags(struct net_device *dev, unsigned int flags,
3954
+ struct netlink_ext_ack *extack);
36563955 void __dev_notify_flags(struct net_device *, unsigned int old_flags,
36573956 unsigned int gchanges);
36583957 int dev_change_name(struct net_device *, const char *);
....@@ -3667,22 +3966,35 @@
36673966 int dev_set_mtu(struct net_device *, int);
36683967 int dev_change_tx_queue_len(struct net_device *, unsigned long);
36693968 void dev_set_group(struct net_device *, int);
3670
-int dev_set_mac_address(struct net_device *, struct sockaddr *);
3969
+int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
3970
+ struct netlink_ext_ack *extack);
3971
+int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
3972
+ struct netlink_ext_ack *extack);
3973
+int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
3974
+ struct netlink_ext_ack *extack);
3975
+int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
36713976 int dev_change_carrier(struct net_device *, bool new_carrier);
36723977 int dev_get_phys_port_id(struct net_device *dev,
36733978 struct netdev_phys_item_id *ppid);
36743979 int dev_get_phys_port_name(struct net_device *dev,
36753980 char *name, size_t len);
3981
+int dev_get_port_parent_id(struct net_device *dev,
3982
+ struct netdev_phys_item_id *ppid, bool recurse);
3983
+bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
36763984 int dev_change_proto_down(struct net_device *dev, bool proto_down);
3985
+int dev_change_proto_down_generic(struct net_device *dev, bool proto_down);
3986
+void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
3987
+ u32 value);
36773988 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
36783989 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
36793990 struct netdev_queue *txq, int *ret);
36803991
36813992 typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
36823993 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
3683
- int fd, u32 flags);
3684
-u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
3685
- enum bpf_netdev_command cmd);
3994
+ int fd, int expected_fd, u32 flags);
3995
+int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
3996
+u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
3997
+
36863998 int xdp_umem_query(struct net_device *dev, u16 queue_id);
36873999
36884000 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
....@@ -3705,6 +4017,7 @@
37054017 return 0;
37064018 }
37074019
4020
+bool dev_nit_active(struct net_device *dev);
37084021 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
37094022
37104023 extern int netdev_budget;
....@@ -3721,7 +4034,8 @@
37214034 */
37224035 static inline void dev_put(struct net_device *dev)
37234036 {
3724
- this_cpu_dec(*dev->pcpu_refcnt);
4037
+ if (dev)
4038
+ this_cpu_dec(*dev->pcpu_refcnt);
37254039 }
37264040
37274041 /**
....@@ -3732,7 +4046,8 @@
37324046 */
37334047 static inline void dev_hold(struct net_device *dev)
37344048 {
3735
- this_cpu_inc(*dev->pcpu_refcnt);
4049
+ if (dev)
4050
+ this_cpu_inc(*dev->pcpu_refcnt);
37364051 }
37374052
37384053 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
....@@ -3810,6 +4125,46 @@
38104125
38114126
38124127 /**
4128
+ * netif_testing_on - mark device as under test.
4129
+ * @dev: network device
4130
+ *
4131
+ * Mark device as under test (as per RFC2863).
4132
+ *
4133
+ * The testing state indicates that some test(s) must be performed on
4134
+ * the interface. After completion, of the test, the interface state
4135
+ * will change to up, dormant, or down, as appropriate.
4136
+ */
4137
+static inline void netif_testing_on(struct net_device *dev)
4138
+{
4139
+ if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state))
4140
+ linkwatch_fire_event(dev);
4141
+}
4142
+
4143
+/**
4144
+ * netif_testing_off - set device as not under test.
4145
+ * @dev: network device
4146
+ *
4147
+ * Device is not in testing state.
4148
+ */
4149
+static inline void netif_testing_off(struct net_device *dev)
4150
+{
4151
+ if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state))
4152
+ linkwatch_fire_event(dev);
4153
+}
4154
+
4155
+/**
4156
+ * netif_testing - test if device is under test
4157
+ * @dev: network device
4158
+ *
4159
+ * Check if device is under test
4160
+ */
4161
+static inline bool netif_testing(const struct net_device *dev)
4162
+{
4163
+ return test_bit(__LINK_STATE_TESTING, &dev->state);
4164
+}
4165
+
4166
+
4167
+/**
38134168 * netif_oper_up - test if device is operational
38144169 * @dev: network device
38154170 *
....@@ -3841,22 +4196,48 @@
38414196 */
38424197
38434198 enum {
3844
- NETIF_MSG_DRV = 0x0001,
3845
- NETIF_MSG_PROBE = 0x0002,
3846
- NETIF_MSG_LINK = 0x0004,
3847
- NETIF_MSG_TIMER = 0x0008,
3848
- NETIF_MSG_IFDOWN = 0x0010,
3849
- NETIF_MSG_IFUP = 0x0020,
3850
- NETIF_MSG_RX_ERR = 0x0040,
3851
- NETIF_MSG_TX_ERR = 0x0080,
3852
- NETIF_MSG_TX_QUEUED = 0x0100,
3853
- NETIF_MSG_INTR = 0x0200,
3854
- NETIF_MSG_TX_DONE = 0x0400,
3855
- NETIF_MSG_RX_STATUS = 0x0800,
3856
- NETIF_MSG_PKTDATA = 0x1000,
3857
- NETIF_MSG_HW = 0x2000,
3858
- NETIF_MSG_WOL = 0x4000,
4199
+ NETIF_MSG_DRV_BIT,
4200
+ NETIF_MSG_PROBE_BIT,
4201
+ NETIF_MSG_LINK_BIT,
4202
+ NETIF_MSG_TIMER_BIT,
4203
+ NETIF_MSG_IFDOWN_BIT,
4204
+ NETIF_MSG_IFUP_BIT,
4205
+ NETIF_MSG_RX_ERR_BIT,
4206
+ NETIF_MSG_TX_ERR_BIT,
4207
+ NETIF_MSG_TX_QUEUED_BIT,
4208
+ NETIF_MSG_INTR_BIT,
4209
+ NETIF_MSG_TX_DONE_BIT,
4210
+ NETIF_MSG_RX_STATUS_BIT,
4211
+ NETIF_MSG_PKTDATA_BIT,
4212
+ NETIF_MSG_HW_BIT,
4213
+ NETIF_MSG_WOL_BIT,
4214
+
4215
+ /* When you add a new bit above, update netif_msg_class_names array
4216
+ * in net/ethtool/common.c
4217
+ */
4218
+ NETIF_MSG_CLASS_COUNT,
38594219 };
4220
+/* Both ethtool_ops interface and internal driver implementation use u32 */
4221
+static_assert(NETIF_MSG_CLASS_COUNT <= 32);
4222
+
4223
+#define __NETIF_MSG_BIT(bit) ((u32)1 << (bit))
4224
+#define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT)
4225
+
4226
+#define NETIF_MSG_DRV __NETIF_MSG(DRV)
4227
+#define NETIF_MSG_PROBE __NETIF_MSG(PROBE)
4228
+#define NETIF_MSG_LINK __NETIF_MSG(LINK)
4229
+#define NETIF_MSG_TIMER __NETIF_MSG(TIMER)
4230
+#define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN)
4231
+#define NETIF_MSG_IFUP __NETIF_MSG(IFUP)
4232
+#define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR)
4233
+#define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR)
4234
+#define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED)
4235
+#define NETIF_MSG_INTR __NETIF_MSG(INTR)
4236
+#define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE)
4237
+#define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS)
4238
+#define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA)
4239
+#define NETIF_MSG_HW __NETIF_MSG(HW)
4240
+#define NETIF_MSG_WOL __NETIF_MSG(WOL)
38604241
38614242 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
38624243 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
....@@ -4049,22 +4430,23 @@
40494430
40504431 static inline void netif_addr_lock(struct net_device *dev)
40514432 {
4052
- spin_lock(&dev->addr_list_lock);
4053
-}
4433
+ unsigned char nest_level = 0;
40544434
4055
-static inline void netif_addr_lock_nested(struct net_device *dev)
4056
-{
4057
- int subclass = SINGLE_DEPTH_NESTING;
4058
-
4059
- if (dev->netdev_ops->ndo_get_lock_subclass)
4060
- subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
4061
-
4062
- spin_lock_nested(&dev->addr_list_lock, subclass);
4435
+#ifdef CONFIG_LOCKDEP
4436
+ nest_level = dev->nested_level;
4437
+#endif
4438
+ spin_lock_nested(&dev->addr_list_lock, nest_level);
40634439 }
40644440
40654441 static inline void netif_addr_lock_bh(struct net_device *dev)
40664442 {
4067
- spin_lock_bh(&dev->addr_list_lock);
4443
+ unsigned char nest_level = 0;
4444
+
4445
+#ifdef CONFIG_LOCKDEP
4446
+ nest_level = dev->nested_level;
4447
+#endif
4448
+ local_bh_disable();
4449
+ spin_lock_nested(&dev->addr_list_lock, nest_level);
40684450 }
40694451
40704452 static inline void netif_addr_unlock(struct net_device *dev)
....@@ -4093,9 +4475,6 @@
40934475 unsigned char name_assign_type,
40944476 void (*setup)(struct net_device *),
40954477 unsigned int txqs, unsigned int rxqs);
4096
-int dev_get_valid_name(struct net *net, struct net_device *dev,
4097
- const char *name);
4098
-
40994478 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
41004479 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
41014480
....@@ -4105,6 +4484,8 @@
41054484
41064485 int register_netdev(struct net_device *dev);
41074486 void unregister_netdev(struct net_device *dev);
4487
+
4488
+int devm_register_netdev(struct device *dev, struct net_device *ndev);
41084489
41094490 /* General hardware address lists handling functions */
41104491 int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
....@@ -4116,6 +4497,16 @@
41164497 int (*sync)(struct net_device *, const unsigned char *),
41174498 int (*unsync)(struct net_device *,
41184499 const unsigned char *));
4500
+int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
4501
+ struct net_device *dev,
4502
+ int (*sync)(struct net_device *,
4503
+ const unsigned char *, int),
4504
+ int (*unsync)(struct net_device *,
4505
+ const unsigned char *, int));
4506
+void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
4507
+ struct net_device *dev,
4508
+ int (*unsync)(struct net_device *,
4509
+ const unsigned char *, int));
41194510 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
41204511 struct net_device *dev,
41214512 int (*unsync)(struct net_device *,
....@@ -4123,6 +4514,24 @@
41234514 void __hw_addr_init(struct netdev_hw_addr_list *list);
41244515
41254516 /* Functions used for device addresses handling */
4517
+static inline void
4518
+__dev_addr_set(struct net_device *dev, const u8 *addr, size_t len)
4519
+{
4520
+ memcpy(dev->dev_addr, addr, len);
4521
+}
4522
+
4523
+static inline void dev_addr_set(struct net_device *dev, const u8 *addr)
4524
+{
4525
+ __dev_addr_set(dev, addr, dev->addr_len);
4526
+}
4527
+
4528
+static inline void
4529
+dev_addr_mod(struct net_device *dev, unsigned int offset,
4530
+ const u8 *addr, size_t len)
4531
+{
4532
+ memcpy(&dev->dev_addr[offset], addr, len);
4533
+}
4534
+
41264535 int dev_addr_add(struct net_device *dev, const unsigned char *addr,
41274536 unsigned char addr_type);
41284537 int dev_addr_del(struct net_device *dev, const unsigned char *addr,
....@@ -4230,6 +4639,8 @@
42304639 struct rtnl_link_stats64 *storage);
42314640 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
42324641 const struct net_device_stats *netdev_stats);
4642
+void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
4643
+ const struct pcpu_sw_netstats __percpu *netstats);
42334644
42344645 extern int netdev_max_backlog;
42354646 extern int netdev_tstamp_prequeue;
....@@ -4238,12 +4649,39 @@
42384649 extern int dev_weight_tx_bias;
42394650 extern int dev_rx_weight;
42404651 extern int dev_tx_weight;
4652
+extern int gro_normal_batch;
4653
+
4654
+enum {
4655
+ NESTED_SYNC_IMM_BIT,
4656
+ NESTED_SYNC_TODO_BIT,
4657
+};
4658
+
4659
+#define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit))
4660
+#define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT)
4661
+
4662
+#define NESTED_SYNC_IMM __NESTED_SYNC(IMM)
4663
+#define NESTED_SYNC_TODO __NESTED_SYNC(TODO)
4664
+
4665
+struct netdev_nested_priv {
4666
+ unsigned char flags;
4667
+ void *data;
4668
+};
42414669
42424670 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
42434671 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
42444672 struct list_head **iter);
42454673 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
42464674 struct list_head **iter);
4675
+
4676
+#ifdef CONFIG_LOCKDEP
4677
+static LIST_HEAD(net_unlink_list);
4678
+
4679
+static inline void net_unlink_todo(struct net_device *dev)
4680
+{
4681
+ if (list_empty(&dev->unlink_list))
4682
+ list_add_tail(&dev->unlink_list, &net_unlink_list);
4683
+}
4684
+#endif
42474685
42484686 /* iterate through upper list, must be called under RCU read lock */
42494687 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
....@@ -4254,8 +4692,8 @@
42544692
42554693 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
42564694 int (*fn)(struct net_device *upper_dev,
4257
- void *data),
4258
- void *data);
4695
+ struct netdev_nested_priv *priv),
4696
+ struct netdev_nested_priv *priv);
42594697
42604698 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
42614699 struct net_device *upper_dev);
....@@ -4288,19 +4726,16 @@
42884726 ldev; \
42894727 ldev = netdev_lower_get_next(dev, &(iter)))
42904728
4291
-struct net_device *netdev_all_lower_get_next(struct net_device *dev,
4729
+struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
42924730 struct list_head **iter);
4293
-struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
4294
- struct list_head **iter);
4295
-
42964731 int netdev_walk_all_lower_dev(struct net_device *dev,
42974732 int (*fn)(struct net_device *lower_dev,
4298
- void *data),
4299
- void *data);
4733
+ struct netdev_nested_priv *priv),
4734
+ struct netdev_nested_priv *priv);
43004735 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
43014736 int (*fn)(struct net_device *lower_dev,
4302
- void *data),
4303
- void *data);
4737
+ struct netdev_nested_priv *priv),
4738
+ struct netdev_nested_priv *priv);
43044739
43054740 void *netdev_adjacent_get_private(struct list_head *adj_list);
43064741 void *netdev_lower_get_first_private_rcu(struct net_device *dev);
....@@ -4314,6 +4749,16 @@
43144749 struct netlink_ext_ack *extack);
43154750 void netdev_upper_dev_unlink(struct net_device *dev,
43164751 struct net_device *upper_dev);
4752
+int netdev_adjacent_change_prepare(struct net_device *old_dev,
4753
+ struct net_device *new_dev,
4754
+ struct net_device *dev,
4755
+ struct netlink_ext_ack *extack);
4756
+void netdev_adjacent_change_commit(struct net_device *old_dev,
4757
+ struct net_device *new_dev,
4758
+ struct net_device *dev);
4759
+void netdev_adjacent_change_abort(struct net_device *old_dev,
4760
+ struct net_device *new_dev,
4761
+ struct net_device *dev);
43174762 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
43184763 void *netdev_lower_dev_get_private(struct net_device *dev,
43194764 struct net_device *lower_dev);
....@@ -4325,7 +4770,6 @@
43254770 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
43264771 void netdev_rss_key_fill(void *buffer, size_t len);
43274772
4328
-int dev_get_nest_level(struct net_device *dev);
43294773 int skb_checksum_help(struct sk_buff *skb);
43304774 int skb_crc32c_csum_help(struct sk_buff *skb);
43314775 int skb_csum_hwoffload_help(struct sk_buff *skb,
....@@ -4348,6 +4792,15 @@
43484792
43494793 void netdev_bonding_info_change(struct net_device *dev,
43504794 struct netdev_bonding_info *bonding_info);
4795
+
4796
+#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
4797
+void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data);
4798
+#else
4799
+static inline void ethtool_notify(struct net_device *dev, unsigned int cmd,
4800
+ const void *data)
4801
+{
4802
+}
4803
+#endif
43514804
43524805 static inline
43534806 struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
....@@ -4380,9 +4833,10 @@
43804833 }
43814834
43824835 #ifdef CONFIG_BUG
4383
-void netdev_rx_csum_fault(struct net_device *dev);
4836
+void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);
43844837 #else
4385
-static inline void netdev_rx_csum_fault(struct net_device *dev)
4838
+static inline void netdev_rx_csum_fault(struct net_device *dev,
4839
+ struct sk_buff *skb)
43864840 {
43874841 }
43884842 #endif
....@@ -4400,7 +4854,7 @@
44004854 struct sk_buff *skb, struct net_device *dev,
44014855 bool more)
44024856 {
4403
- skb->xmit_more = more ? 1 : 0;
4857
+ __this_cpu_write(softnet_data.xmit.more, more);
44044858 return ops->ndo_start_xmit(skb, dev);
44054859 }
44064860
....@@ -4413,7 +4867,7 @@
44134867 struct netdev_queue *txq, bool more)
44144868 {
44154869 const struct net_device_ops *ops = dev->netdev_ops;
4416
- int rc;
4870
+ netdev_tx_t rc;
44174871
44184872 rc = __netdev_start_xmit(ops, skb, dev, more);
44194873 if (rc == NETDEV_TX_OK)
....@@ -4426,16 +4880,6 @@
44264880 const void *ns);
44274881 void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
44284882 const void *ns);
4429
-
4430
-static inline int netdev_class_create_file(const struct class_attribute *class_attr)
4431
-{
4432
- return netdev_class_create_file_ns(class_attr, NULL);
4433
-}
4434
-
4435
-static inline void netdev_class_remove_file(const struct class_attribute *class_attr)
4436
-{
4437
- netdev_class_remove_file_ns(class_attr, NULL);
4438
-}
44394883
44404884 extern const struct kobj_ns_type_operations net_ns_type_operations;
44414885
....@@ -4509,6 +4953,7 @@
45094953 BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
45104954 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
45114955 BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
4956
+ BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT));
45124957
45134958 return (features & feature) == feature;
45144959 }
....@@ -4611,6 +5056,11 @@
46115056 return dev->priv_flags & IFF_OVS_DATAPATH;
46125057 }
46135058
5059
+static inline bool netif_is_any_bridge_port(const struct net_device *dev)
5060
+{
5061
+ return netif_is_bridge_port(dev) || netif_is_ovs_port(dev);
5062
+}
5063
+
46145064 static inline bool netif_is_team_master(const struct net_device *dev)
46155065 {
46165066 return dev->priv_flags & IFF_TEAM;
....@@ -4692,22 +5142,22 @@
46925142 return " (unknown)";
46935143 }
46945144
4695
-__printf(3, 4)
5145
+__printf(3, 4) __cold
46965146 void netdev_printk(const char *level, const struct net_device *dev,
46975147 const char *format, ...);
4698
-__printf(2, 3)
5148
+__printf(2, 3) __cold
46995149 void netdev_emerg(const struct net_device *dev, const char *format, ...);
4700
-__printf(2, 3)
5150
+__printf(2, 3) __cold
47015151 void netdev_alert(const struct net_device *dev, const char *format, ...);
4702
-__printf(2, 3)
5152
+__printf(2, 3) __cold
47035153 void netdev_crit(const struct net_device *dev, const char *format, ...);
4704
-__printf(2, 3)
5154
+__printf(2, 3) __cold
47055155 void netdev_err(const struct net_device *dev, const char *format, ...);
4706
-__printf(2, 3)
5156
+__printf(2, 3) __cold
47075157 void netdev_warn(const struct net_device *dev, const char *format, ...);
4708
-__printf(2, 3)
5158
+__printf(2, 3) __cold
47095159 void netdev_notice(const struct net_device *dev, const char *format, ...);
4710
-__printf(2, 3)
5160
+__printf(2, 3) __cold
47115161 void netdev_info(const struct net_device *dev, const char *format, ...);
47125162
47135163 #define netdev_level_once(level, dev, fmt, ...) \
....@@ -4738,7 +5188,8 @@
47385188 #define MODULE_ALIAS_NETDEV(device) \
47395189 MODULE_ALIAS("netdev-" device)
47405190
4741
-#if defined(CONFIG_DYNAMIC_DEBUG)
5191
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
5192
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
47425193 #define netdev_dbg(__dev, format, args...) \
47435194 do { \
47445195 dynamic_netdev_dbg(__dev, format, ##args); \
....@@ -4808,7 +5259,8 @@
48085259 #define netif_info(priv, type, dev, fmt, args...) \
48095260 netif_level(info, priv, type, dev, fmt, ##args)
48105261
4811
-#if defined(CONFIG_DYNAMIC_DEBUG)
5262
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
5263
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
48125264 #define netif_dbg(priv, type, netdev, format, args...) \
48135265 do { \
48145266 if (netif_msg_##type(priv)) \
....@@ -4868,4 +5320,6 @@
48685320 #define PTYPE_HASH_SIZE (16)
48695321 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
48705322
5323
+extern struct net_device *blackhole_netdev;
5324
+
48715325 #endif /* _LINUX_NETDEVICE_H */