.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ |
---|
1 | 2 | /* |
---|
2 | 3 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
---|
3 | 4 | * operating system. INET is implemented using the BSD Socket |
---|
.. | .. |
---|
14 | 15 | * Alan Cox, <alan@lxorguk.ukuu.org.uk> |
---|
15 | 16 | * Bjorn Ekwall. <bj0rn@blox.se> |
---|
16 | 17 | * Pekka Riikonen <priikone@poseidon.pspt.fi> |
---|
17 | | - * |
---|
18 | | - * This program is free software; you can redistribute it and/or |
---|
19 | | - * modify it under the terms of the GNU General Public License |
---|
20 | | - * as published by the Free Software Foundation; either version |
---|
21 | | - * 2 of the License, or (at your option) any later version. |
---|
22 | 18 | * |
---|
23 | 19 | * Moved to /usr/include/linux for NET3 |
---|
24 | 20 | */ |
---|
.. | .. |
---|
58 | 54 | struct device; |
---|
59 | 55 | struct phy_device; |
---|
60 | 56 | struct dsa_port; |
---|
| 57 | +struct ip_tunnel_parm; |
---|
| 58 | +struct macsec_context; |
---|
| 59 | +struct macsec_ops; |
---|
61 | 60 | |
---|
62 | 61 | struct sfp_bus; |
---|
63 | 62 | /* 802.11 specific */ |
---|
.. | .. |
---|
67 | 66 | struct mpls_dev; |
---|
68 | 67 | /* UDP Tunnel offloads */ |
---|
69 | 68 | struct udp_tunnel_info; |
---|
| 69 | +struct udp_tunnel_nic_info; |
---|
| 70 | +struct udp_tunnel_nic; |
---|
70 | 71 | struct bpf_prog; |
---|
71 | 72 | struct xdp_buff; |
---|
72 | 73 | |
---|
| 74 | +void synchronize_net(void); |
---|
73 | 75 | void netdev_set_default_ethtool_ops(struct net_device *dev, |
---|
74 | 76 | const struct ethtool_ops *ops); |
---|
75 | 77 | |
---|
76 | 78 | /* Backlog congestion levels */ |
---|
77 | 79 | #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ |
---|
78 | 80 | #define NET_RX_DROP 1 /* packet dropped */ |
---|
| 81 | + |
---|
| 82 | +#define MAX_NEST_DEV 8 |
---|
79 | 83 | |
---|
80 | 84 | /* |
---|
81 | 85 | * Transmit return codes: transmit return codes originate from three different |
---|
.. | .. |
---|
195 | 199 | |
---|
196 | 200 | #ifdef CONFIG_RPS |
---|
197 | 201 | #include <linux/static_key.h> |
---|
198 | | -extern struct static_key rps_needed; |
---|
199 | | -extern struct static_key rfs_needed; |
---|
| 202 | +extern struct static_key_false rps_needed; |
---|
| 203 | +extern struct static_key_false rfs_needed; |
---|
200 | 204 | #endif |
---|
201 | 205 | |
---|
202 | 206 | struct neighbour; |
---|
.. | .. |
---|
209 | 213 | unsigned char type; |
---|
210 | 214 | #define NETDEV_HW_ADDR_T_LAN 1 |
---|
211 | 215 | #define NETDEV_HW_ADDR_T_SAN 2 |
---|
212 | | -#define NETDEV_HW_ADDR_T_SLAVE 3 |
---|
213 | | -#define NETDEV_HW_ADDR_T_UNICAST 4 |
---|
214 | | -#define NETDEV_HW_ADDR_T_MULTICAST 5 |
---|
| 216 | +#define NETDEV_HW_ADDR_T_UNICAST 3 |
---|
| 217 | +#define NETDEV_HW_ADDR_T_MULTICAST 4 |
---|
215 | 218 | bool global_use; |
---|
216 | 219 | int sync_cnt; |
---|
217 | 220 | int refcount; |
---|
.. | .. |
---|
261 | 264 | * relationship HH alignment <= LL alignment. |
---|
262 | 265 | */ |
---|
263 | 266 | #define LL_RESERVED_SPACE(dev) \ |
---|
264 | | - ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
---|
| 267 | + ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \ |
---|
| 268 | + & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
---|
265 | 269 | #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ |
---|
266 | | - ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
---|
| 270 | + ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \ |
---|
| 271 | + & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
---|
267 | 272 | |
---|
268 | 273 | struct header_ops { |
---|
269 | 274 | int (*create) (struct sk_buff *skb, struct net_device *dev, |
---|
.. | .. |
---|
275 | 280 | const struct net_device *dev, |
---|
276 | 281 | const unsigned char *haddr); |
---|
277 | 282 | bool (*validate)(const char *ll_header, unsigned int len); |
---|
| 283 | + __be16 (*parse_protocol)(const struct sk_buff *skb); |
---|
278 | 284 | |
---|
279 | 285 | ANDROID_KABI_RESERVE(1); |
---|
280 | 286 | ANDROID_KABI_RESERVE(2); |
---|
.. | .. |
---|
291 | 297 | __LINK_STATE_NOCARRIER, |
---|
292 | 298 | __LINK_STATE_LINKWATCH_PENDING, |
---|
293 | 299 | __LINK_STATE_DORMANT, |
---|
| 300 | + __LINK_STATE_TESTING, |
---|
294 | 301 | }; |
---|
295 | 302 | |
---|
296 | 303 | |
---|
.. | .. |
---|
331 | 338 | |
---|
332 | 339 | unsigned long state; |
---|
333 | 340 | int weight; |
---|
| 341 | + int defer_hard_irqs_count; |
---|
334 | 342 | unsigned long gro_bitmask; |
---|
335 | 343 | int (*poll)(struct napi_struct *, int); |
---|
336 | 344 | #ifdef CONFIG_NETPOLL |
---|
.. | .. |
---|
339 | 347 | struct net_device *dev; |
---|
340 | 348 | struct gro_list gro_hash[GRO_HASH_BUCKETS]; |
---|
341 | 349 | struct sk_buff *skb; |
---|
| 350 | + struct list_head rx_list; /* Pending GRO_NORMAL skbs */ |
---|
| 351 | + int rx_count; /* length of rx_list */ |
---|
342 | 352 | struct hrtimer timer; |
---|
343 | 353 | struct list_head dev_list; |
---|
344 | 354 | struct hlist_node napi_hash_node; |
---|
.. | .. |
---|
355 | 365 | NAPI_STATE_MISSED, /* reschedule a napi */ |
---|
356 | 366 | NAPI_STATE_DISABLE, /* Disable pending */ |
---|
357 | 367 | NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ |
---|
358 | | - NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */ |
---|
| 368 | + NAPI_STATE_LISTED, /* NAPI added to system lists */ |
---|
359 | 369 | NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */ |
---|
360 | 370 | NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */ |
---|
361 | 371 | }; |
---|
.. | .. |
---|
365 | 375 | NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED), |
---|
366 | 376 | NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE), |
---|
367 | 377 | NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC), |
---|
368 | | - NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED), |
---|
| 378 | + NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED), |
---|
369 | 379 | NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), |
---|
370 | 380 | NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), |
---|
371 | 381 | }; |
---|
.. | .. |
---|
431 | 441 | typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); |
---|
432 | 442 | |
---|
433 | 443 | void __napi_schedule(struct napi_struct *n); |
---|
434 | | - |
---|
435 | | -/* |
---|
436 | | - * When PREEMPT_RT_FULL is defined, all device interrupt handlers |
---|
437 | | - * run as threads, and they can also be preempted (without PREEMPT_RT |
---|
438 | | - * interrupt threads can not be preempted). Which means that calling |
---|
439 | | - * __napi_schedule_irqoff() from an interrupt handler can be preempted |
---|
440 | | - * and can corrupt the napi->poll_list. |
---|
441 | | - */ |
---|
442 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
443 | | -#define __napi_schedule_irqoff(n) __napi_schedule(n) |
---|
444 | | -#else |
---|
445 | 444 | void __napi_schedule_irqoff(struct napi_struct *n); |
---|
446 | | -#endif |
---|
447 | 445 | |
---|
448 | 446 | static inline bool napi_disable_pending(struct napi_struct *n) |
---|
449 | 447 | { |
---|
.. | .. |
---|
502 | 500 | } |
---|
503 | 501 | |
---|
504 | 502 | /** |
---|
505 | | - * napi_hash_del - remove a NAPI from global table |
---|
506 | | - * @napi: NAPI context |
---|
507 | | - * |
---|
508 | | - * Warning: caller must observe RCU grace period |
---|
509 | | - * before freeing memory containing @napi, if |
---|
510 | | - * this function returns true. |
---|
511 | | - * Note: core networking stack automatically calls it |
---|
512 | | - * from netif_napi_del(). |
---|
513 | | - * Drivers might want to call this helper to combine all |
---|
514 | | - * the needed RCU grace periods into a single one. |
---|
515 | | - */ |
---|
516 | | -bool napi_hash_del(struct napi_struct *napi); |
---|
517 | | - |
---|
518 | | -/** |
---|
519 | 503 | * napi_disable - prevent NAPI from scheduling |
---|
520 | 504 | * @n: NAPI context |
---|
521 | 505 | * |
---|
.. | .. |
---|
554 | 538 | msleep(1); |
---|
555 | 539 | else |
---|
556 | 540 | barrier(); |
---|
| 541 | +} |
---|
| 542 | + |
---|
| 543 | +/** |
---|
| 544 | + * napi_if_scheduled_mark_missed - if napi is running, set the |
---|
| 545 | + * NAPIF_STATE_MISSED |
---|
| 546 | + * @n: NAPI context |
---|
| 547 | + * |
---|
| 548 | + * If napi is running, set the NAPIF_STATE_MISSED, and return true if |
---|
| 549 | + * NAPI is scheduled. |
---|
| 550 | + **/ |
---|
| 551 | +static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n) |
---|
| 552 | +{ |
---|
| 553 | + unsigned long val, new; |
---|
| 554 | + |
---|
| 555 | + do { |
---|
| 556 | + val = READ_ONCE(n->state); |
---|
| 557 | + if (val & NAPIF_STATE_DISABLE) |
---|
| 558 | + return true; |
---|
| 559 | + |
---|
| 560 | + if (!(val & NAPIF_STATE_SCHED)) |
---|
| 561 | + return false; |
---|
| 562 | + |
---|
| 563 | + new = val | NAPIF_STATE_MISSED; |
---|
| 564 | + } while (cmpxchg(&n->state, val, new) != val); |
---|
| 565 | + |
---|
| 566 | + return true; |
---|
557 | 567 | } |
---|
558 | 568 | |
---|
559 | 569 | enum netdev_queue_state_t { |
---|
.. | .. |
---|
604 | 614 | |
---|
605 | 615 | /* Subordinate device that the queue has been assigned to */ |
---|
606 | 616 | struct net_device *sb_dev; |
---|
| 617 | +#ifdef CONFIG_XDP_SOCKETS |
---|
| 618 | + struct xsk_buff_pool *pool; |
---|
| 619 | +#endif |
---|
607 | 620 | /* |
---|
608 | 621 | * write-mostly part |
---|
609 | 622 | */ |
---|
610 | 623 | spinlock_t _xmit_lock ____cacheline_aligned_in_smp; |
---|
611 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
612 | | - struct task_struct *xmit_lock_owner; |
---|
613 | | -#else |
---|
614 | 624 | int xmit_lock_owner; |
---|
615 | | -#endif |
---|
616 | 625 | /* |
---|
617 | 626 | * Time (in jiffies) of last Tx |
---|
618 | 627 | */ |
---|
.. | .. |
---|
631 | 640 | } ____cacheline_aligned_in_smp; |
---|
632 | 641 | |
---|
633 | 642 | extern int sysctl_fb_tunnels_only_for_init_net; |
---|
| 643 | +extern int sysctl_devconf_inherit_init_net; |
---|
634 | 644 | |
---|
| 645 | +/* |
---|
| 646 | + * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns |
---|
| 647 | + * == 1 : For initns only |
---|
| 648 | + * == 2 : For none. |
---|
| 649 | + */ |
---|
635 | 650 | static inline bool net_has_fallback_tunnels(const struct net *net) |
---|
636 | 651 | { |
---|
637 | | - return net == &init_net || |
---|
638 | | - !IS_ENABLED(CONFIG_SYSCTL) || |
---|
639 | | - !sysctl_fb_tunnels_only_for_init_net; |
---|
| 652 | +#if IS_ENABLED(CONFIG_SYSCTL) |
---|
| 653 | + int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net); |
---|
| 654 | + |
---|
| 655 | + return !fb_tunnels_only_for_init_net || |
---|
| 656 | + (net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1); |
---|
| 657 | +#else |
---|
| 658 | + return true; |
---|
| 659 | +#endif |
---|
| 660 | +} |
---|
| 661 | + |
---|
| 662 | +static inline int net_inherit_devconf(void) |
---|
| 663 | +{ |
---|
| 664 | +#if IS_ENABLED(CONFIG_SYSCTL) |
---|
| 665 | + return READ_ONCE(sysctl_devconf_inherit_init_net); |
---|
| 666 | +#else |
---|
| 667 | + return 0; |
---|
| 668 | +#endif |
---|
640 | 669 | } |
---|
641 | 670 | |
---|
642 | 671 | static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) |
---|
.. | .. |
---|
663 | 692 | struct rps_map { |
---|
664 | 693 | unsigned int len; |
---|
665 | 694 | struct rcu_head rcu; |
---|
666 | | - u16 cpus[0]; |
---|
| 695 | + u16 cpus[]; |
---|
667 | 696 | }; |
---|
668 | 697 | #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) |
---|
669 | 698 | |
---|
.. | .. |
---|
685 | 714 | struct rps_dev_flow_table { |
---|
686 | 715 | unsigned int mask; |
---|
687 | 716 | struct rcu_head rcu; |
---|
688 | | - struct rps_dev_flow flows[0]; |
---|
| 717 | + struct rps_dev_flow flows[]; |
---|
689 | 718 | }; |
---|
690 | 719 | #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ |
---|
691 | 720 | ((_num) * sizeof(struct rps_dev_flow))) |
---|
.. | .. |
---|
703 | 732 | struct rps_sock_flow_table { |
---|
704 | 733 | u32 mask; |
---|
705 | 734 | |
---|
706 | | - u32 ents[0] ____cacheline_aligned_in_smp; |
---|
| 735 | + u32 ents[] ____cacheline_aligned_in_smp; |
---|
707 | 736 | }; |
---|
708 | 737 | #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num])) |
---|
709 | 738 | |
---|
.. | .. |
---|
722 | 751 | /* We only give a hint, preemption can change CPU under us */ |
---|
723 | 752 | val |= raw_smp_processor_id(); |
---|
724 | 753 | |
---|
725 | | - if (table->ents[index] != val) |
---|
726 | | - table->ents[index] = val; |
---|
| 754 | + /* The following WRITE_ONCE() is paired with the READ_ONCE() |
---|
| 755 | + * here, and another one in get_rps_cpu(). |
---|
| 756 | + */ |
---|
| 757 | + if (READ_ONCE(table->ents[index]) != val) |
---|
| 758 | + WRITE_ONCE(table->ents[index], val); |
---|
727 | 759 | } |
---|
728 | 760 | } |
---|
729 | 761 | |
---|
.. | .. |
---|
742 | 774 | struct kobject kobj; |
---|
743 | 775 | struct net_device *dev; |
---|
744 | 776 | struct xdp_rxq_info xdp_rxq; |
---|
| 777 | +#ifdef CONFIG_XDP_SOCKETS |
---|
| 778 | + struct xsk_buff_pool *pool; |
---|
| 779 | +#endif |
---|
745 | 780 | |
---|
746 | 781 | ANDROID_KABI_RESERVE(1); |
---|
747 | 782 | ANDROID_KABI_RESERVE(2); |
---|
.. | .. |
---|
768 | 803 | unsigned int len; |
---|
769 | 804 | unsigned int alloc_len; |
---|
770 | 805 | struct rcu_head rcu; |
---|
771 | | - u16 queues[0]; |
---|
| 806 | + u16 queues[]; |
---|
772 | 807 | }; |
---|
773 | 808 | #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) |
---|
774 | 809 | #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \ |
---|
.. | .. |
---|
779 | 814 | */ |
---|
780 | 815 | struct xps_dev_maps { |
---|
781 | 816 | struct rcu_head rcu; |
---|
782 | | - struct xps_map __rcu *attr_map[0]; /* Either CPUs map or RXQs map */ |
---|
| 817 | + struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */ |
---|
783 | 818 | }; |
---|
784 | 819 | |
---|
785 | 820 | #define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ |
---|
.. | .. |
---|
848 | 883 | TC_SETUP_QDISC_PRIO, |
---|
849 | 884 | TC_SETUP_QDISC_MQ, |
---|
850 | 885 | TC_SETUP_QDISC_ETF, |
---|
| 886 | + TC_SETUP_ROOT_QDISC, |
---|
| 887 | + TC_SETUP_QDISC_GRED, |
---|
| 888 | + TC_SETUP_QDISC_TAPRIO, |
---|
| 889 | + TC_SETUP_FT, |
---|
| 890 | + TC_SETUP_QDISC_ETS, |
---|
| 891 | + TC_SETUP_QDISC_TBF, |
---|
| 892 | + TC_SETUP_QDISC_FIFO, |
---|
851 | 893 | }; |
---|
852 | 894 | |
---|
853 | 895 | /* These structures hold the attributes of bpf state that are being passed |
---|
.. | .. |
---|
863 | 905 | */ |
---|
864 | 906 | XDP_SETUP_PROG, |
---|
865 | 907 | XDP_SETUP_PROG_HW, |
---|
866 | | - XDP_QUERY_PROG, |
---|
867 | | - XDP_QUERY_PROG_HW, |
---|
868 | 908 | /* BPF program for offload callbacks, invoked at program load time. */ |
---|
869 | | - BPF_OFFLOAD_VERIFIER_PREP, |
---|
870 | | - BPF_OFFLOAD_TRANSLATE, |
---|
871 | | - BPF_OFFLOAD_DESTROY, |
---|
872 | 909 | BPF_OFFLOAD_MAP_ALLOC, |
---|
873 | 910 | BPF_OFFLOAD_MAP_FREE, |
---|
874 | | - XDP_QUERY_XSK_UMEM, |
---|
875 | | - XDP_SETUP_XSK_UMEM, |
---|
| 911 | + XDP_SETUP_XSK_POOL, |
---|
876 | 912 | }; |
---|
877 | 913 | |
---|
878 | 914 | struct bpf_prog_offload_ops; |
---|
879 | 915 | struct netlink_ext_ack; |
---|
880 | 916 | struct xdp_umem; |
---|
| 917 | +struct xdp_dev_bulk_queue; |
---|
| 918 | +struct bpf_xdp_link; |
---|
| 919 | + |
---|
| 920 | +enum bpf_xdp_mode { |
---|
| 921 | + XDP_MODE_SKB = 0, |
---|
| 922 | + XDP_MODE_DRV = 1, |
---|
| 923 | + XDP_MODE_HW = 2, |
---|
| 924 | + __MAX_XDP_MODE |
---|
| 925 | +}; |
---|
| 926 | + |
---|
| 927 | +struct bpf_xdp_entity { |
---|
| 928 | + struct bpf_prog *prog; |
---|
| 929 | + struct bpf_xdp_link *link; |
---|
| 930 | +}; |
---|
881 | 931 | |
---|
882 | 932 | struct netdev_bpf { |
---|
883 | 933 | enum bpf_netdev_command command; |
---|
.. | .. |
---|
888 | 938 | struct bpf_prog *prog; |
---|
889 | 939 | struct netlink_ext_ack *extack; |
---|
890 | 940 | }; |
---|
891 | | - /* XDP_QUERY_PROG, XDP_QUERY_PROG_HW */ |
---|
892 | | - struct { |
---|
893 | | - u32 prog_id; |
---|
894 | | - /* flags with which program was installed */ |
---|
895 | | - u32 prog_flags; |
---|
896 | | - }; |
---|
897 | | - /* BPF_OFFLOAD_VERIFIER_PREP */ |
---|
898 | | - struct { |
---|
899 | | - struct bpf_prog *prog; |
---|
900 | | - const struct bpf_prog_offload_ops *ops; /* callee set */ |
---|
901 | | - } verifier; |
---|
902 | | - /* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */ |
---|
903 | | - struct { |
---|
904 | | - struct bpf_prog *prog; |
---|
905 | | - } offload; |
---|
906 | 941 | /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ |
---|
907 | 942 | struct { |
---|
908 | 943 | struct bpf_offloaded_map *offmap; |
---|
909 | 944 | }; |
---|
910 | | - /* XDP_QUERY_XSK_UMEM, XDP_SETUP_XSK_UMEM */ |
---|
| 945 | + /* XDP_SETUP_XSK_POOL */ |
---|
911 | 946 | struct { |
---|
912 | | - struct xdp_umem *umem; /* out for query*/ |
---|
913 | | - u16 queue_id; /* in for query */ |
---|
| 947 | + struct xsk_buff_pool *pool; |
---|
| 948 | + u16 queue_id; |
---|
914 | 949 | } xsk; |
---|
915 | 950 | }; |
---|
916 | 951 | }; |
---|
| 952 | + |
---|
| 953 | +/* Flags for ndo_xsk_wakeup. */ |
---|
| 954 | +#define XDP_WAKEUP_RX (1 << 0) |
---|
| 955 | +#define XDP_WAKEUP_TX (1 << 1) |
---|
917 | 956 | |
---|
918 | 957 | #ifdef CONFIG_XFRM_OFFLOAD |
---|
919 | 958 | struct xfrmdev_ops { |
---|
.. | .. |
---|
931 | 970 | }; |
---|
932 | 971 | #endif |
---|
933 | 972 | |
---|
934 | | -#if IS_ENABLED(CONFIG_TLS_DEVICE) |
---|
935 | | -enum tls_offload_ctx_dir { |
---|
936 | | - TLS_OFFLOAD_CTX_DIR_RX, |
---|
937 | | - TLS_OFFLOAD_CTX_DIR_TX, |
---|
938 | | -}; |
---|
939 | | - |
---|
940 | | -struct tls_crypto_info; |
---|
941 | | -struct tls_context; |
---|
942 | | - |
---|
943 | | -struct tlsdev_ops { |
---|
944 | | - int (*tls_dev_add)(struct net_device *netdev, struct sock *sk, |
---|
945 | | - enum tls_offload_ctx_dir direction, |
---|
946 | | - struct tls_crypto_info *crypto_info, |
---|
947 | | - u32 start_offload_tcp_sn); |
---|
948 | | - void (*tls_dev_del)(struct net_device *netdev, |
---|
949 | | - struct tls_context *ctx, |
---|
950 | | - enum tls_offload_ctx_dir direction); |
---|
951 | | - void (*tls_dev_resync_rx)(struct net_device *netdev, |
---|
952 | | - struct sock *sk, u32 seq, u64 rcd_sn); |
---|
953 | | - ANDROID_KABI_RESERVE(1); |
---|
954 | | - ANDROID_KABI_RESERVE(2); |
---|
955 | | - ANDROID_KABI_RESERVE(3); |
---|
956 | | - ANDROID_KABI_RESERVE(4); |
---|
957 | | -}; |
---|
958 | | -#endif |
---|
959 | | - |
---|
960 | 973 | struct dev_ifalias { |
---|
961 | 974 | struct rcu_head rcuhead; |
---|
962 | 975 | char ifalias[]; |
---|
| 976 | +}; |
---|
| 977 | + |
---|
| 978 | +struct devlink; |
---|
| 979 | +struct tlsdev_ops; |
---|
| 980 | + |
---|
| 981 | +struct netdev_name_node { |
---|
| 982 | + struct hlist_node hlist; |
---|
| 983 | + struct list_head list; |
---|
| 984 | + struct net_device *dev; |
---|
| 985 | + const char *name; |
---|
| 986 | +}; |
---|
| 987 | + |
---|
| 988 | +int netdev_name_node_alt_create(struct net_device *dev, const char *name); |
---|
| 989 | +int netdev_name_node_alt_destroy(struct net_device *dev, const char *name); |
---|
| 990 | + |
---|
| 991 | +struct netdev_net_notifier { |
---|
| 992 | + struct list_head list; |
---|
| 993 | + struct notifier_block *nb; |
---|
963 | 994 | }; |
---|
964 | 995 | |
---|
965 | 996 | /* |
---|
.. | .. |
---|
1005 | 1036 | * those the driver believes to be appropriate. |
---|
1006 | 1037 | * |
---|
1007 | 1038 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, |
---|
1008 | | - * struct net_device *sb_dev, |
---|
1009 | | - * select_queue_fallback_t fallback); |
---|
| 1039 | + * struct net_device *sb_dev); |
---|
1010 | 1040 | * Called to decide which queue to use when device supports multiple |
---|
1011 | 1041 | * transmit queues. |
---|
1012 | 1042 | * |
---|
.. | .. |
---|
1041 | 1071 | * Called when a user wants to change the Maximum Transfer Unit |
---|
1042 | 1072 | * of a device. |
---|
1043 | 1073 | * |
---|
1044 | | - * void (*ndo_tx_timeout)(struct net_device *dev); |
---|
| 1074 | + * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue); |
---|
1045 | 1075 | * Callback used when the transmitter has not made any progress |
---|
1046 | 1076 | * for dev->watchdog ticks. |
---|
1047 | 1077 | * |
---|
.. | .. |
---|
1160 | 1190 | * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); |
---|
1161 | 1191 | * Called to release previously enslaved netdev. |
---|
1162 | 1192 | * |
---|
| 1193 | + * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev, |
---|
| 1194 | + * struct sk_buff *skb, |
---|
| 1195 | + * bool all_slaves); |
---|
| 1196 | + * Get the xmit slave of master device. If all_slaves is true, function |
---|
| 1197 | + * assume all the slaves can transmit. |
---|
| 1198 | + * |
---|
1163 | 1199 | * Feature/offload setting functions. |
---|
1164 | 1200 | * netdev_features_t (*ndo_fix_features)(struct net_device *dev, |
---|
1165 | 1201 | * netdev_features_t features); |
---|
.. | .. |
---|
1174 | 1210 | * |
---|
1175 | 1211 | * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], |
---|
1176 | 1212 | * struct net_device *dev, |
---|
1177 | | - * const unsigned char *addr, u16 vid, u16 flags) |
---|
| 1213 | + * const unsigned char *addr, u16 vid, u16 flags, |
---|
| 1214 | + * struct netlink_ext_ack *extack); |
---|
1178 | 1215 | * Adds an FDB entry to dev for addr. |
---|
1179 | 1216 | * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], |
---|
1180 | 1217 | * struct net_device *dev, |
---|
.. | .. |
---|
1187 | 1224 | * entries to skb and update idx with the number of entries. |
---|
1188 | 1225 | * |
---|
1189 | 1226 | * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, |
---|
1190 | | - * u16 flags) |
---|
| 1227 | + * u16 flags, struct netlink_ext_ack *extack) |
---|
1191 | 1228 | * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, |
---|
1192 | 1229 | * struct net_device *dev, u32 filter_mask, |
---|
1193 | 1230 | * int nlflags) |
---|
.. | .. |
---|
1207 | 1244 | * Called to get ID of physical port of this device. If driver does |
---|
1208 | 1245 | * not implement this, it is assumed that the hw is not able to have |
---|
1209 | 1246 | * multiple net devices on single physical port. |
---|
| 1247 | + * |
---|
| 1248 | + * int (*ndo_get_port_parent_id)(struct net_device *dev, |
---|
| 1249 | + * struct netdev_phys_item_id *ppid) |
---|
| 1250 | + * Called to get the parent ID of the physical port of this device. |
---|
1210 | 1251 | * |
---|
1211 | 1252 | * void (*ndo_udp_tunnel_add)(struct net_device *dev, |
---|
1212 | 1253 | * struct udp_tunnel_info *ti); |
---|
.. | .. |
---|
1265 | 1306 | * that got dropped are freed/returned via xdp_return_frame(). |
---|
1266 | 1307 | * Returns negative number, means general error invoking ndo, meaning |
---|
1267 | 1308 | * no frames were xmit'ed and core-caller will free all frames. |
---|
| 1309 | + * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); |
---|
| 1310 | + * This function is used to wake up the softirq, ksoftirqd or kthread |
---|
| 1311 | + * responsible for sending and/or receiving packets on a specific |
---|
| 1312 | + * queue id bound to an AF_XDP socket. The flags field specifies if |
---|
| 1313 | + * only RX, only Tx, or both should be woken up using the flags |
---|
| 1314 | + * XDP_WAKEUP_RX and XDP_WAKEUP_TX. |
---|
| 1315 | + * struct devlink_port *(*ndo_get_devlink_port)(struct net_device *dev); |
---|
| 1316 | + * Get devlink port instance associated with a given netdev. |
---|
| 1317 | + * Called with a reference on the netdevice and devlink locks only, |
---|
| 1318 | + * rtnl_lock is not held. |
---|
| 1319 | + * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p, |
---|
| 1320 | + * int cmd); |
---|
| 1321 | + * Add, change, delete or get information on an IPv4 tunnel. |
---|
| 1322 | + * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev); |
---|
| 1323 | + * If a device is paired with a peer device, return the peer instance. |
---|
| 1324 | + * The caller must be under RCU read context. |
---|
1268 | 1325 | */ |
---|
1269 | 1326 | struct net_device_ops { |
---|
1270 | 1327 | int (*ndo_init)(struct net_device *dev); |
---|
.. | .. |
---|
1278 | 1335 | netdev_features_t features); |
---|
1279 | 1336 | u16 (*ndo_select_queue)(struct net_device *dev, |
---|
1280 | 1337 | struct sk_buff *skb, |
---|
1281 | | - struct net_device *sb_dev, |
---|
1282 | | - select_queue_fallback_t fallback); |
---|
| 1338 | + struct net_device *sb_dev); |
---|
1283 | 1339 | void (*ndo_change_rx_flags)(struct net_device *dev, |
---|
1284 | 1340 | int flags); |
---|
1285 | 1341 | void (*ndo_set_rx_mode)(struct net_device *dev); |
---|
.. | .. |
---|
1294 | 1350 | int new_mtu); |
---|
1295 | 1351 | int (*ndo_neigh_setup)(struct net_device *dev, |
---|
1296 | 1352 | struct neigh_parms *); |
---|
1297 | | - void (*ndo_tx_timeout) (struct net_device *dev); |
---|
| 1353 | + void (*ndo_tx_timeout) (struct net_device *dev, |
---|
| 1354 | + unsigned int txqueue); |
---|
1298 | 1355 | |
---|
1299 | 1356 | void (*ndo_get_stats64)(struct net_device *dev, |
---|
1300 | 1357 | struct rtnl_link_stats64 *storage); |
---|
.. | .. |
---|
1340 | 1397 | struct nlattr *port[]); |
---|
1341 | 1398 | int (*ndo_get_vf_port)(struct net_device *dev, |
---|
1342 | 1399 | int vf, struct sk_buff *skb); |
---|
| 1400 | + int (*ndo_get_vf_guid)(struct net_device *dev, |
---|
| 1401 | + int vf, |
---|
| 1402 | + struct ifla_vf_guid *node_guid, |
---|
| 1403 | + struct ifla_vf_guid *port_guid); |
---|
1343 | 1404 | int (*ndo_set_vf_guid)(struct net_device *dev, |
---|
1344 | 1405 | int vf, u64 guid, |
---|
1345 | 1406 | int guid_type); |
---|
.. | .. |
---|
1384 | 1445 | struct netlink_ext_ack *extack); |
---|
1385 | 1446 | int (*ndo_del_slave)(struct net_device *dev, |
---|
1386 | 1447 | struct net_device *slave_dev); |
---|
| 1448 | + struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev, |
---|
| 1449 | + struct sk_buff *skb, |
---|
| 1450 | + bool all_slaves); |
---|
1387 | 1451 | netdev_features_t (*ndo_fix_features)(struct net_device *dev, |
---|
1388 | 1452 | netdev_features_t features); |
---|
1389 | 1453 | int (*ndo_set_features)(struct net_device *dev, |
---|
.. | .. |
---|
1398 | 1462 | struct net_device *dev, |
---|
1399 | 1463 | const unsigned char *addr, |
---|
1400 | 1464 | u16 vid, |
---|
1401 | | - u16 flags); |
---|
| 1465 | + u16 flags, |
---|
| 1466 | + struct netlink_ext_ack *extack); |
---|
1402 | 1467 | int (*ndo_fdb_del)(struct ndmsg *ndm, |
---|
1403 | 1468 | struct nlattr *tb[], |
---|
1404 | 1469 | struct net_device *dev, |
---|
.. | .. |
---|
1409 | 1474 | struct net_device *dev, |
---|
1410 | 1475 | struct net_device *filter_dev, |
---|
1411 | 1476 | int *idx); |
---|
1412 | | - |
---|
| 1477 | + int (*ndo_fdb_get)(struct sk_buff *skb, |
---|
| 1478 | + struct nlattr *tb[], |
---|
| 1479 | + struct net_device *dev, |
---|
| 1480 | + const unsigned char *addr, |
---|
| 1481 | + u16 vid, u32 portid, u32 seq, |
---|
| 1482 | + struct netlink_ext_ack *extack); |
---|
1413 | 1483 | int (*ndo_bridge_setlink)(struct net_device *dev, |
---|
1414 | 1484 | struct nlmsghdr *nlh, |
---|
1415 | | - u16 flags); |
---|
| 1485 | + u16 flags, |
---|
| 1486 | + struct netlink_ext_ack *extack); |
---|
1416 | 1487 | int (*ndo_bridge_getlink)(struct sk_buff *skb, |
---|
1417 | 1488 | u32 pid, u32 seq, |
---|
1418 | 1489 | struct net_device *dev, |
---|
.. | .. |
---|
1425 | 1496 | bool new_carrier); |
---|
1426 | 1497 | int (*ndo_get_phys_port_id)(struct net_device *dev, |
---|
1427 | 1498 | struct netdev_phys_item_id *ppid); |
---|
| 1499 | + int (*ndo_get_port_parent_id)(struct net_device *dev, |
---|
| 1500 | + struct netdev_phys_item_id *ppid); |
---|
1428 | 1501 | int (*ndo_get_phys_port_name)(struct net_device *dev, |
---|
1429 | 1502 | char *name, size_t len); |
---|
1430 | 1503 | void (*ndo_udp_tunnel_add)(struct net_device *dev, |
---|
.. | .. |
---|
1436 | 1509 | void (*ndo_dfwd_del_station)(struct net_device *pdev, |
---|
1437 | 1510 | void *priv); |
---|
1438 | 1511 | |
---|
1439 | | - int (*ndo_get_lock_subclass)(struct net_device *dev); |
---|
1440 | 1512 | int (*ndo_set_tx_maxrate)(struct net_device *dev, |
---|
1441 | 1513 | int queue_index, |
---|
1442 | 1514 | u32 maxrate); |
---|
.. | .. |
---|
1452 | 1524 | int (*ndo_xdp_xmit)(struct net_device *dev, int n, |
---|
1453 | 1525 | struct xdp_frame **xdp, |
---|
1454 | 1526 | u32 flags); |
---|
1455 | | - int (*ndo_xsk_async_xmit)(struct net_device *dev, |
---|
1456 | | - u32 queue_id); |
---|
| 1527 | + int (*ndo_xsk_wakeup)(struct net_device *dev, |
---|
| 1528 | + u32 queue_id, u32 flags); |
---|
| 1529 | + struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev); |
---|
| 1530 | + int (*ndo_tunnel_ctl)(struct net_device *dev, |
---|
| 1531 | + struct ip_tunnel_parm *p, int cmd); |
---|
| 1532 | + struct net_device * (*ndo_get_peer_dev)(struct net_device *dev); |
---|
1457 | 1533 | |
---|
1458 | 1534 | ANDROID_KABI_RESERVE(1); |
---|
1459 | 1535 | ANDROID_KABI_RESERVE(2); |
---|
.. | .. |
---|
1576 | 1652 | #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER |
---|
1577 | 1653 | #define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK |
---|
1578 | 1654 | |
---|
| 1655 | +/* Specifies the type of the struct net_device::ml_priv pointer */ |
---|
| 1656 | +enum netdev_ml_priv_type { |
---|
| 1657 | + ML_PRIV_NONE, |
---|
| 1658 | + ML_PRIV_CAN, |
---|
| 1659 | +}; |
---|
| 1660 | + |
---|
1579 | 1661 | /** |
---|
1580 | 1662 | * struct net_device - The DEVICE structure. |
---|
1581 | 1663 | * |
---|
.. | .. |
---|
1587 | 1669 | * (i.e. as seen by users in the "Space.c" file). It is the name |
---|
1588 | 1670 | * of the interface. |
---|
1589 | 1671 | * |
---|
1590 | | - * @name_hlist: Device name hash chain, please keep it close to name[] |
---|
| 1672 | + * @name_node: Name hashlist node |
---|
1591 | 1673 | * @ifalias: SNMP alias |
---|
1592 | 1674 | * @mem_end: Shared memory end |
---|
1593 | 1675 | * @mem_start: Shared memory start |
---|
.. | .. |
---|
1616 | 1698 | * and drivers will need to set them appropriately. |
---|
1617 | 1699 | * |
---|
1618 | 1700 | * @mpls_features: Mask of features inheritable by MPLS |
---|
| 1701 | + * @gso_partial_features: value(s) from NETIF_F_GSO\* |
---|
1619 | 1702 | * |
---|
1620 | 1703 | * @ifindex: interface index |
---|
1621 | 1704 | * @group: The group the device belongs to |
---|
.. | .. |
---|
1640 | 1723 | * @netdev_ops: Includes several pointers to callbacks, |
---|
1641 | 1724 | * if one wants to override the ndo_*() functions |
---|
1642 | 1725 | * @ethtool_ops: Management operations |
---|
| 1726 | + * @l3mdev_ops: Layer 3 master device operations |
---|
1643 | 1727 | * @ndisc_ops: Includes callbacks for different IPv6 neighbour |
---|
1644 | 1728 | * discovery handling. Necessary for e.g. 6LoWPAN. |
---|
| 1729 | + * @xfrmdev_ops: Transformation offload operations |
---|
| 1730 | + * @tlsdev_ops: Transport Layer Security offload operations |
---|
1645 | 1731 | * @header_ops: Includes callbacks for creating,parsing,caching,etc |
---|
1646 | 1732 | * of Layer 2 headers. |
---|
1647 | 1733 | * |
---|
.. | .. |
---|
1680 | 1766 | * @dev_port: Used to differentiate devices that share |
---|
1681 | 1767 | * the same function |
---|
1682 | 1768 | * @addr_list_lock: XXX: need comments on this one |
---|
| 1769 | + * @name_assign_type: network interface name assignment type |
---|
1683 | 1770 | * @uc_promisc: Counter that indicates promiscuous mode |
---|
1684 | 1771 | * has been enabled due to the need to listen to |
---|
1685 | 1772 | * additional unicast addresses in a device that |
---|
.. | .. |
---|
1698 | 1785 | * @tipc_ptr: TIPC specific data |
---|
1699 | 1786 | * @atalk_ptr: AppleTalk link |
---|
1700 | 1787 | * @ip_ptr: IPv4 specific data |
---|
1701 | | - * @dn_ptr: DECnet specific data |
---|
1702 | 1788 | * @ip6_ptr: IPv6 specific data |
---|
1703 | 1789 | * @ax25_ptr: AX.25 specific data |
---|
1704 | 1790 | * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering |
---|
| 1791 | + * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network |
---|
| 1792 | + * device struct |
---|
| 1793 | + * @mpls_ptr: mpls_dev struct pointer |
---|
1705 | 1794 | * |
---|
1706 | 1795 | * @dev_addr: Hw address (before bcast, |
---|
1707 | 1796 | * because most packets are unicast) |
---|
.. | .. |
---|
1710 | 1799 | * @num_rx_queues: Number of RX queues |
---|
1711 | 1800 | * allocated at register_netdev() time |
---|
1712 | 1801 | * @real_num_rx_queues: Number of RX queues currently active in device |
---|
| 1802 | + * @xdp_prog: XDP sockets filter program pointer |
---|
| 1803 | + * @gro_flush_timeout: timeout for GRO layer in NAPI |
---|
| 1804 | + * @napi_defer_hard_irqs: If not zero, provides a counter that would |
---|
| 1805 | + * allow to avoid NIC hard IRQ, on busy queues. |
---|
1713 | 1806 | * |
---|
1714 | 1807 | * @rx_handler: handler for received packets |
---|
1715 | 1808 | * @rx_handler_data: XXX: need comments on this one |
---|
1716 | 1809 | * @miniq_ingress: ingress/clsact qdisc specific data for |
---|
1717 | 1810 | * ingress processing |
---|
1718 | 1811 | * @ingress_queue: XXX: need comments on this one |
---|
| 1812 | + * @nf_hooks_ingress: netfilter hooks executed for ingress packets |
---|
1719 | 1813 | * @broadcast: hw bcast address |
---|
1720 | 1814 | * |
---|
1721 | 1815 | * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts, |
---|
.. | .. |
---|
1730 | 1824 | * @qdisc: Root qdisc from userspace point of view |
---|
1731 | 1825 | * @tx_queue_len: Max frames per queue allowed |
---|
1732 | 1826 | * @tx_global_lock: XXX: need comments on this one |
---|
| 1827 | + * @xdp_bulkq: XDP device bulk queue |
---|
| 1828 | + * @xps_cpus_map: all CPUs map for XPS device |
---|
| 1829 | + * @xps_rxqs_map: all RXQs map for XPS device |
---|
1733 | 1830 | * |
---|
1734 | 1831 | * @xps_maps: XXX: need comments on this one |
---|
1735 | 1832 | * @miniq_egress: clsact qdisc specific data for |
---|
1736 | 1833 | * egress processing |
---|
| 1834 | + * @qdisc_hash: qdisc hash table |
---|
1737 | 1835 | * @watchdog_timeo: Represents the timeout that is used by |
---|
1738 | 1836 | * the watchdog (see dev_watchdog()) |
---|
1739 | 1837 | * @watchdog_timer: List of timers |
---|
1740 | 1838 | * |
---|
| 1839 | + * @proto_down_reason: reason a netdev interface is held down |
---|
1741 | 1840 | * @pcpu_refcnt: Number of references to this device |
---|
1742 | 1841 | * @todo_list: Delayed register/unregister |
---|
1743 | 1842 | * @link_watch_list: XXX: need comments on this one |
---|
.. | .. |
---|
1753 | 1852 | * @nd_net: Network namespace this network device is inside |
---|
1754 | 1853 | * |
---|
1755 | 1854 | * @ml_priv: Mid-layer private |
---|
| 1855 | + * @ml_priv_type: Mid-layer private type |
---|
1756 | 1856 | * @lstats: Loopback statistics |
---|
1757 | 1857 | * @tstats: Tunnel statistics |
---|
1758 | 1858 | * @dstats: Dummy statistics |
---|
.. | .. |
---|
1793 | 1893 | * |
---|
1794 | 1894 | * @wol_enabled: Wake-on-LAN is enabled |
---|
1795 | 1895 | * |
---|
| 1896 | + * @net_notifier_list: List of per-net netdev notifier block |
---|
| 1897 | + * that follow this device when it is moved |
---|
| 1898 | + * to another network namespace. |
---|
| 1899 | + * |
---|
| 1900 | + * @macsec_ops: MACsec offloading ops |
---|
| 1901 | + * |
---|
| 1902 | + * @udp_tunnel_nic_info: static structure describing the UDP tunnel |
---|
| 1903 | + * offload capabilities of the device |
---|
| 1904 | + * @udp_tunnel_nic: UDP tunnel offload state |
---|
| 1905 | + * @xdp_state: stores info on attached XDP BPF programs |
---|
| 1906 | + * |
---|
| 1907 | + * @nested_level: Used as as a parameter of spin_lock_nested() of |
---|
| 1908 | + * dev->addr_list_lock. |
---|
| 1909 | + * @unlink_list: As netif_addr_lock() can be called recursively, |
---|
| 1910 | + * keep a list of interfaces to be deleted. |
---|
| 1911 | + * |
---|
1796 | 1912 | * FIXME: cleanup struct net_device such that network protocol info |
---|
1797 | 1913 | * moves out. |
---|
1798 | 1914 | */ |
---|
1799 | 1915 | |
---|
1800 | 1916 | struct net_device { |
---|
1801 | 1917 | char name[IFNAMSIZ]; |
---|
1802 | | - struct hlist_node name_hlist; |
---|
| 1918 | + struct netdev_name_node *name_node; |
---|
1803 | 1919 | struct dev_ifalias __rcu *ifalias; |
---|
1804 | 1920 | /* |
---|
1805 | 1921 | * I/O specific fields |
---|
.. | .. |
---|
1857 | 1973 | #endif |
---|
1858 | 1974 | const struct net_device_ops *netdev_ops; |
---|
1859 | 1975 | const struct ethtool_ops *ethtool_ops; |
---|
1860 | | -#ifdef CONFIG_NET_SWITCHDEV |
---|
1861 | | - const struct switchdev_ops *switchdev_ops; |
---|
1862 | | -#endif |
---|
1863 | 1976 | #ifdef CONFIG_NET_L3_MASTER_DEV |
---|
1864 | 1977 | const struct l3mdev_ops *l3mdev_ops; |
---|
1865 | 1978 | #endif |
---|
.. | .. |
---|
1900 | 2013 | unsigned short type; |
---|
1901 | 2014 | unsigned short hard_header_len; |
---|
1902 | 2015 | unsigned char min_header_len; |
---|
| 2016 | + unsigned char name_assign_type; |
---|
1903 | 2017 | |
---|
1904 | 2018 | unsigned short needed_headroom; |
---|
1905 | 2019 | unsigned short needed_tailroom; |
---|
.. | .. |
---|
1910 | 2024 | unsigned char addr_len; |
---|
1911 | 2025 | unsigned char upper_level; |
---|
1912 | 2026 | unsigned char lower_level; |
---|
| 2027 | + |
---|
1913 | 2028 | unsigned short neigh_priv_len; |
---|
1914 | 2029 | unsigned short dev_id; |
---|
1915 | 2030 | unsigned short dev_port; |
---|
1916 | 2031 | spinlock_t addr_list_lock; |
---|
1917 | | - unsigned char name_assign_type; |
---|
1918 | | - bool uc_promisc; |
---|
| 2032 | + |
---|
1919 | 2033 | struct netdev_hw_addr_list uc; |
---|
1920 | 2034 | struct netdev_hw_addr_list mc; |
---|
1921 | 2035 | struct netdev_hw_addr_list dev_addrs; |
---|
.. | .. |
---|
1923 | 2037 | #ifdef CONFIG_SYSFS |
---|
1924 | 2038 | struct kset *queues_kset; |
---|
1925 | 2039 | #endif |
---|
| 2040 | +#ifdef CONFIG_LOCKDEP |
---|
| 2041 | + struct list_head unlink_list; |
---|
| 2042 | +#endif |
---|
1926 | 2043 | unsigned int promiscuity; |
---|
1927 | 2044 | unsigned int allmulti; |
---|
| 2045 | + bool uc_promisc; |
---|
| 2046 | +#ifdef CONFIG_LOCKDEP |
---|
| 2047 | + unsigned char nested_level; |
---|
| 2048 | +#endif |
---|
1928 | 2049 | |
---|
1929 | 2050 | |
---|
1930 | 2051 | /* Protocol-specific pointers */ |
---|
.. | .. |
---|
1942 | 2063 | void *atalk_ptr; |
---|
1943 | 2064 | #endif |
---|
1944 | 2065 | struct in_device __rcu *ip_ptr; |
---|
1945 | | -#if IS_ENABLED(CONFIG_DECNET) |
---|
1946 | | - struct dn_dev __rcu *dn_ptr; |
---|
1947 | | -#endif |
---|
1948 | 2066 | struct inet6_dev __rcu *ip6_ptr; |
---|
1949 | 2067 | #if IS_ENABLED(CONFIG_AX25) |
---|
1950 | 2068 | void *ax25_ptr; |
---|
.. | .. |
---|
1967 | 2085 | |
---|
1968 | 2086 | struct bpf_prog __rcu *xdp_prog; |
---|
1969 | 2087 | unsigned long gro_flush_timeout; |
---|
| 2088 | + int napi_defer_hard_irqs; |
---|
1970 | 2089 | rx_handler_func_t __rcu *rx_handler; |
---|
1971 | 2090 | void __rcu *rx_handler_data; |
---|
1972 | 2091 | |
---|
.. | .. |
---|
1990 | 2109 | struct netdev_queue *_tx ____cacheline_aligned_in_smp; |
---|
1991 | 2110 | unsigned int num_tx_queues; |
---|
1992 | 2111 | unsigned int real_num_tx_queues; |
---|
1993 | | - struct Qdisc *qdisc; |
---|
1994 | | -#ifdef CONFIG_NET_SCHED |
---|
1995 | | - DECLARE_HASHTABLE (qdisc_hash, 4); |
---|
1996 | | -#endif |
---|
| 2112 | + struct Qdisc __rcu *qdisc; |
---|
1997 | 2113 | unsigned int tx_queue_len; |
---|
1998 | 2114 | spinlock_t tx_global_lock; |
---|
1999 | | - int watchdog_timeo; |
---|
| 2115 | + |
---|
| 2116 | + struct xdp_dev_bulk_queue __percpu *xdp_bulkq; |
---|
2000 | 2117 | |
---|
2001 | 2118 | #ifdef CONFIG_XPS |
---|
2002 | 2119 | struct xps_dev_maps __rcu *xps_cpus_map; |
---|
.. | .. |
---|
2006 | 2123 | struct mini_Qdisc __rcu *miniq_egress; |
---|
2007 | 2124 | #endif |
---|
2008 | 2125 | |
---|
| 2126 | +#ifdef CONFIG_NET_SCHED |
---|
| 2127 | + DECLARE_HASHTABLE (qdisc_hash, 4); |
---|
| 2128 | +#endif |
---|
2009 | 2129 | /* These may be needed for future network-power-down code. */ |
---|
2010 | 2130 | struct timer_list watchdog_timer; |
---|
| 2131 | + int watchdog_timeo; |
---|
2011 | 2132 | |
---|
2012 | | - int __percpu *pcpu_refcnt; |
---|
| 2133 | + u32 proto_down_reason; |
---|
| 2134 | + |
---|
2013 | 2135 | struct list_head todo_list; |
---|
| 2136 | + int __percpu *pcpu_refcnt; |
---|
2014 | 2137 | |
---|
2015 | 2138 | struct list_head link_watch_list; |
---|
2016 | 2139 | |
---|
.. | .. |
---|
2039 | 2162 | possible_net_t nd_net; |
---|
2040 | 2163 | |
---|
2041 | 2164 | /* mid-layer private */ |
---|
| 2165 | + void *ml_priv; |
---|
| 2166 | + enum netdev_ml_priv_type ml_priv_type; |
---|
| 2167 | + |
---|
2042 | 2168 | union { |
---|
2043 | | - void *ml_priv; |
---|
2044 | 2169 | struct pcpu_lstats __percpu *lstats; |
---|
2045 | 2170 | struct pcpu_sw_netstats __percpu *tstats; |
---|
2046 | 2171 | struct pcpu_dstats __percpu *dstats; |
---|
2047 | | - struct pcpu_vstats __percpu *vstats; |
---|
2048 | 2172 | }; |
---|
2049 | 2173 | |
---|
2050 | 2174 | #if IS_ENABLED(CONFIG_GARP) |
---|
.. | .. |
---|
2086 | 2210 | bool proto_down; |
---|
2087 | 2211 | unsigned wol_enabled:1; |
---|
2088 | 2212 | |
---|
| 2213 | + struct list_head net_notifier_list; |
---|
| 2214 | + |
---|
| 2215 | +#if IS_ENABLED(CONFIG_MACSEC) |
---|
| 2216 | + /* MACsec management functions */ |
---|
| 2217 | + const struct macsec_ops *macsec_ops; |
---|
| 2218 | +#endif |
---|
| 2219 | + const struct udp_tunnel_nic_info *udp_tunnel_nic_info; |
---|
| 2220 | + struct udp_tunnel_nic *udp_tunnel_nic; |
---|
| 2221 | + |
---|
| 2222 | + /* protected by rtnl_lock */ |
---|
| 2223 | + struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE]; |
---|
| 2224 | + |
---|
2089 | 2225 | ANDROID_KABI_RESERVE(1); |
---|
2090 | 2226 | ANDROID_KABI_RESERVE(2); |
---|
2091 | 2227 | ANDROID_KABI_RESERVE(3); |
---|
.. | .. |
---|
2094 | 2230 | ANDROID_KABI_RESERVE(6); |
---|
2095 | 2231 | ANDROID_KABI_RESERVE(7); |
---|
2096 | 2232 | ANDROID_KABI_RESERVE(8); |
---|
2097 | | - |
---|
2098 | 2233 | }; |
---|
2099 | 2234 | #define to_net_dev(d) container_of(d, struct net_device, dev) |
---|
2100 | 2235 | |
---|
.. | .. |
---|
2132 | 2267 | int netdev_get_num_tc(struct net_device *dev) |
---|
2133 | 2268 | { |
---|
2134 | 2269 | return dev->num_tc; |
---|
| 2270 | +} |
---|
| 2271 | + |
---|
| 2272 | +static inline void net_prefetch(void *p) |
---|
| 2273 | +{ |
---|
| 2274 | + prefetch(p); |
---|
| 2275 | +#if L1_CACHE_BYTES < 128 |
---|
| 2276 | + prefetch((u8 *)p + L1_CACHE_BYTES); |
---|
| 2277 | +#endif |
---|
| 2278 | +} |
---|
| 2279 | + |
---|
| 2280 | +static inline void net_prefetchw(void *p) |
---|
| 2281 | +{ |
---|
| 2282 | + prefetchw(p); |
---|
| 2283 | +#if L1_CACHE_BYTES < 128 |
---|
| 2284 | + prefetchw((u8 *)p + L1_CACHE_BYTES); |
---|
| 2285 | +#endif |
---|
2135 | 2286 | } |
---|
2136 | 2287 | |
---|
2137 | 2288 | void netdev_unbind_sb_channel(struct net_device *dev, |
---|
.. | .. |
---|
2181 | 2332 | (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ |
---|
2182 | 2333 | (dev)->qdisc_running_key = &qdisc_running_key; \ |
---|
2183 | 2334 | lockdep_set_class(&(dev)->addr_list_lock, \ |
---|
2184 | | - &dev_addr_list_lock_key); \ |
---|
| 2335 | + &dev_addr_list_lock_key); \ |
---|
2185 | 2336 | for (i = 0; i < (dev)->num_tx_queues; i++) \ |
---|
2186 | 2337 | lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ |
---|
2187 | 2338 | &qdisc_xmit_lock_key); \ |
---|
2188 | 2339 | } |
---|
2189 | 2340 | |
---|
2190 | | -struct netdev_queue *netdev_pick_tx(struct net_device *dev, |
---|
2191 | | - struct sk_buff *skb, |
---|
2192 | | - struct net_device *sb_dev); |
---|
| 2341 | +u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, |
---|
| 2342 | + struct net_device *sb_dev); |
---|
| 2343 | +struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, |
---|
| 2344 | + struct sk_buff *skb, |
---|
| 2345 | + struct net_device *sb_dev); |
---|
2193 | 2346 | |
---|
2194 | 2347 | /* returns the headroom that the master device needs to take in account |
---|
2195 | 2348 | * when forwarding to this dev |
---|
.. | .. |
---|
2209 | 2362 | static inline void netdev_reset_rx_headroom(struct net_device *dev) |
---|
2210 | 2363 | { |
---|
2211 | 2364 | netdev_set_rx_headroom(dev, -1); |
---|
| 2365 | +} |
---|
| 2366 | + |
---|
| 2367 | +static inline void *netdev_get_ml_priv(struct net_device *dev, |
---|
| 2368 | + enum netdev_ml_priv_type type) |
---|
| 2369 | +{ |
---|
| 2370 | + if (dev->ml_priv_type != type) |
---|
| 2371 | + return NULL; |
---|
| 2372 | + |
---|
| 2373 | + return dev->ml_priv; |
---|
| 2374 | +} |
---|
| 2375 | + |
---|
| 2376 | +static inline void netdev_set_ml_priv(struct net_device *dev, |
---|
| 2377 | + void *ml_priv, |
---|
| 2378 | + enum netdev_ml_priv_type type) |
---|
| 2379 | +{ |
---|
| 2380 | + WARN(dev->ml_priv_type && dev->ml_priv_type != type, |
---|
| 2381 | + "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n", |
---|
| 2382 | + dev->ml_priv_type, type); |
---|
| 2383 | + WARN(!dev->ml_priv_type && dev->ml_priv, |
---|
| 2384 | + "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n"); |
---|
| 2385 | + |
---|
| 2386 | + dev->ml_priv = ml_priv; |
---|
| 2387 | + dev->ml_priv_type = type; |
---|
2212 | 2388 | } |
---|
2213 | 2389 | |
---|
2214 | 2390 | /* |
---|
.. | .. |
---|
2287 | 2463 | } |
---|
2288 | 2464 | |
---|
2289 | 2465 | /** |
---|
| 2466 | + * __netif_napi_del - remove a NAPI context |
---|
| 2467 | + * @napi: NAPI context |
---|
| 2468 | + * |
---|
| 2469 | + * Warning: caller must observe RCU grace period before freeing memory |
---|
| 2470 | + * containing @napi. Drivers might want to call this helper to combine |
---|
| 2471 | + * all the needed RCU grace periods into a single one. |
---|
| 2472 | + */ |
---|
| 2473 | +void __netif_napi_del(struct napi_struct *napi); |
---|
| 2474 | + |
---|
| 2475 | +/** |
---|
2290 | 2476 | * netif_napi_del - remove a NAPI context |
---|
2291 | 2477 | * @napi: NAPI context |
---|
2292 | 2478 | * |
---|
2293 | 2479 | * netif_napi_del() removes a NAPI context from the network device NAPI list |
---|
2294 | 2480 | */ |
---|
2295 | | -void netif_napi_del(struct napi_struct *napi); |
---|
| 2481 | +static inline void netif_napi_del(struct napi_struct *napi) |
---|
| 2482 | +{ |
---|
| 2483 | + __netif_napi_del(napi); |
---|
| 2484 | + synchronize_net(); |
---|
| 2485 | +} |
---|
2296 | 2486 | |
---|
2297 | 2487 | struct napi_gro_cb { |
---|
2298 | 2488 | /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ |
---|
.. | .. |
---|
2351 | 2541 | /* Number of gro_receive callbacks this packet already went through */ |
---|
2352 | 2542 | u8 recursion_counter:4; |
---|
2353 | 2543 | |
---|
2354 | | - /* 1 bit hole */ |
---|
| 2544 | + /* GRO is done by frag_list pointer chaining. */ |
---|
| 2545 | + u8 is_flist:1; |
---|
2355 | 2546 | |
---|
2356 | 2547 | /* used to support CHECKSUM_COMPLETE for tunneling protocols */ |
---|
2357 | 2548 | __wsum csum; |
---|
.. | .. |
---|
2398 | 2589 | |
---|
2399 | 2590 | struct packet_type { |
---|
2400 | 2591 | __be16 type; /* This is really htons(ether_type). */ |
---|
| 2592 | + bool ignore_outgoing; |
---|
2401 | 2593 | struct net_device *dev; /* NULL is wildcarded here */ |
---|
2402 | 2594 | int (*func) (struct sk_buff *, |
---|
2403 | 2595 | struct net_device *, |
---|
.. | .. |
---|
2440 | 2632 | u64 tx_packets; |
---|
2441 | 2633 | u64 tx_bytes; |
---|
2442 | 2634 | struct u64_stats_sync syncp; |
---|
2443 | | -}; |
---|
| 2635 | +} __aligned(4 * sizeof(u64)); |
---|
| 2636 | + |
---|
| 2637 | +struct pcpu_lstats { |
---|
| 2638 | + u64_stats_t packets; |
---|
| 2639 | + u64_stats_t bytes; |
---|
| 2640 | + struct u64_stats_sync syncp; |
---|
| 2641 | +} __aligned(2 * sizeof(u64)); |
---|
| 2642 | + |
---|
| 2643 | +void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes); |
---|
| 2644 | + |
---|
| 2645 | +static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len) |
---|
| 2646 | +{ |
---|
| 2647 | + struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); |
---|
| 2648 | + |
---|
| 2649 | + u64_stats_update_begin(&tstats->syncp); |
---|
| 2650 | + tstats->rx_bytes += len; |
---|
| 2651 | + tstats->rx_packets++; |
---|
| 2652 | + u64_stats_update_end(&tstats->syncp); |
---|
| 2653 | +} |
---|
| 2654 | + |
---|
| 2655 | +static inline void dev_lstats_add(struct net_device *dev, unsigned int len) |
---|
| 2656 | +{ |
---|
| 2657 | + struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats); |
---|
| 2658 | + |
---|
| 2659 | + u64_stats_update_begin(&lstats->syncp); |
---|
| 2660 | + u64_stats_add(&lstats->bytes, len); |
---|
| 2661 | + u64_stats_inc(&lstats->packets); |
---|
| 2662 | + u64_stats_update_end(&lstats->syncp); |
---|
| 2663 | +} |
---|
2444 | 2664 | |
---|
2445 | 2665 | #define __netdev_alloc_pcpu_stats(type, gfp) \ |
---|
2446 | 2666 | ({ \ |
---|
.. | .. |
---|
2505 | 2725 | NETDEV_REGISTER, |
---|
2506 | 2726 | NETDEV_UNREGISTER, |
---|
2507 | 2727 | NETDEV_CHANGEMTU, /* notify after mtu change happened */ |
---|
2508 | | - NETDEV_CHANGEADDR, |
---|
| 2728 | + NETDEV_CHANGEADDR, /* notify after the address change */ |
---|
| 2729 | + NETDEV_PRE_CHANGEADDR, /* notify before the address change */ |
---|
2509 | 2730 | NETDEV_GOING_DOWN, |
---|
2510 | 2731 | NETDEV_CHANGENAME, |
---|
2511 | 2732 | NETDEV_FEAT_CHANGE, |
---|
.. | .. |
---|
2536 | 2757 | |
---|
2537 | 2758 | int register_netdevice_notifier(struct notifier_block *nb); |
---|
2538 | 2759 | int unregister_netdevice_notifier(struct notifier_block *nb); |
---|
| 2760 | +int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb); |
---|
| 2761 | +int unregister_netdevice_notifier_net(struct net *net, |
---|
| 2762 | + struct notifier_block *nb); |
---|
| 2763 | +int register_netdevice_notifier_dev_net(struct net_device *dev, |
---|
| 2764 | + struct notifier_block *nb, |
---|
| 2765 | + struct netdev_net_notifier *nn); |
---|
| 2766 | +int unregister_netdevice_notifier_dev_net(struct net_device *dev, |
---|
| 2767 | + struct notifier_block *nb, |
---|
| 2768 | + struct netdev_net_notifier *nn); |
---|
2539 | 2769 | |
---|
2540 | 2770 | struct netdev_notifier_info { |
---|
2541 | 2771 | struct net_device *dev; |
---|
.. | .. |
---|
2565 | 2795 | struct netdev_notifier_changelowerstate_info { |
---|
2566 | 2796 | struct netdev_notifier_info info; /* must be first */ |
---|
2567 | 2797 | void *lower_state_info; /* is lower dev state */ |
---|
| 2798 | +}; |
---|
| 2799 | + |
---|
| 2800 | +struct netdev_notifier_pre_changeaddr_info { |
---|
| 2801 | + struct netdev_notifier_info info; /* must be first */ |
---|
| 2802 | + const unsigned char *dev_addr; |
---|
2568 | 2803 | }; |
---|
2569 | 2804 | |
---|
2570 | 2805 | static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, |
---|
.. | .. |
---|
2601 | 2836 | list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) |
---|
2602 | 2837 | #define for_each_netdev_continue(net, d) \ |
---|
2603 | 2838 | list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) |
---|
| 2839 | +#define for_each_netdev_continue_reverse(net, d) \ |
---|
| 2840 | + list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \ |
---|
| 2841 | + dev_list) |
---|
2604 | 2842 | #define for_each_netdev_continue_rcu(net, d) \ |
---|
2605 | 2843 | list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) |
---|
2606 | 2844 | #define for_each_netdev_in_bond_rcu(bond, slave) \ |
---|
.. | .. |
---|
2661 | 2899 | struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); |
---|
2662 | 2900 | struct net_device *__dev_get_by_name(struct net *net, const char *name); |
---|
2663 | 2901 | int dev_alloc_name(struct net_device *dev, const char *name); |
---|
2664 | | -int dev_open(struct net_device *dev); |
---|
| 2902 | +int dev_open(struct net_device *dev, struct netlink_ext_ack *extack); |
---|
2665 | 2903 | void dev_close(struct net_device *dev); |
---|
2666 | 2904 | void dev_close_many(struct list_head *head, bool unlink); |
---|
2667 | 2905 | void dev_disable_lro(struct net_device *dev); |
---|
2668 | 2906 | int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); |
---|
2669 | 2907 | u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, |
---|
2670 | | - struct net_device *sb_dev, |
---|
2671 | | - select_queue_fallback_t fallback); |
---|
| 2908 | + struct net_device *sb_dev); |
---|
2672 | 2909 | u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, |
---|
2673 | | - struct net_device *sb_dev, |
---|
2674 | | - select_queue_fallback_t fallback); |
---|
| 2910 | + struct net_device *sb_dev); |
---|
| 2911 | + |
---|
2675 | 2912 | int dev_queue_xmit(struct sk_buff *skb); |
---|
2676 | 2913 | int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev); |
---|
2677 | | -int dev_direct_xmit(struct sk_buff *skb, u16 queue_id); |
---|
| 2914 | +int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id); |
---|
| 2915 | + |
---|
| 2916 | +static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) |
---|
| 2917 | +{ |
---|
| 2918 | + int ret; |
---|
| 2919 | + |
---|
| 2920 | + ret = __dev_direct_xmit(skb, queue_id); |
---|
| 2921 | + if (!dev_xmit_complete(ret)) |
---|
| 2922 | + kfree_skb(skb); |
---|
| 2923 | + return ret; |
---|
| 2924 | +} |
---|
| 2925 | + |
---|
2678 | 2926 | int register_netdevice(struct net_device *dev); |
---|
2679 | 2927 | void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); |
---|
2680 | 2928 | void unregister_netdevice_many(struct list_head *head); |
---|
.. | .. |
---|
2686 | 2934 | int netdev_refcnt_read(const struct net_device *dev); |
---|
2687 | 2935 | void free_netdev(struct net_device *dev); |
---|
2688 | 2936 | void netdev_freemem(struct net_device *dev); |
---|
2689 | | -void synchronize_net(void); |
---|
2690 | 2937 | int init_dummy_netdev(struct net_device *dev); |
---|
2691 | 2938 | |
---|
| 2939 | +struct net_device *netdev_get_xmit_slave(struct net_device *dev, |
---|
| 2940 | + struct sk_buff *skb, |
---|
| 2941 | + bool all_slaves); |
---|
2692 | 2942 | struct net_device *dev_get_by_index(struct net *net, int ifindex); |
---|
2693 | 2943 | struct net_device *__dev_get_by_index(struct net *net, int ifindex); |
---|
2694 | 2944 | struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); |
---|
.. | .. |
---|
2696 | 2946 | int netdev_get_name(struct net *net, char *name, int ifindex); |
---|
2697 | 2947 | int dev_restart(struct net_device *dev); |
---|
2698 | 2948 | int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb); |
---|
| 2949 | +int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb); |
---|
2699 | 2950 | |
---|
2700 | 2951 | static inline unsigned int skb_gro_offset(const struct sk_buff *skb) |
---|
2701 | 2952 | { |
---|
.. | .. |
---|
2832 | 3083 | } |
---|
2833 | 3084 | |
---|
2834 | 3085 | static inline void __skb_gro_checksum_convert(struct sk_buff *skb, |
---|
2835 | | - __sum16 check, __wsum pseudo) |
---|
| 3086 | + __wsum pseudo) |
---|
2836 | 3087 | { |
---|
2837 | 3088 | NAPI_GRO_CB(skb)->csum = ~pseudo; |
---|
2838 | 3089 | NAPI_GRO_CB(skb)->csum_valid = 1; |
---|
2839 | 3090 | } |
---|
2840 | 3091 | |
---|
2841 | | -#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \ |
---|
| 3092 | +#define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \ |
---|
2842 | 3093 | do { \ |
---|
2843 | 3094 | if (__skb_gro_checksum_convert_check(skb)) \ |
---|
2844 | | - __skb_gro_checksum_convert(skb, check, \ |
---|
| 3095 | + __skb_gro_checksum_convert(skb, \ |
---|
2845 | 3096 | compute_pseudo(skb, proto)); \ |
---|
2846 | 3097 | } while (0) |
---|
2847 | 3098 | |
---|
.. | .. |
---|
2964 | 3215 | return dev->header_ops->parse(skb, haddr); |
---|
2965 | 3216 | } |
---|
2966 | 3217 | |
---|
| 3218 | +static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb) |
---|
| 3219 | +{ |
---|
| 3220 | + const struct net_device *dev = skb->dev; |
---|
| 3221 | + |
---|
| 3222 | + if (!dev->header_ops || !dev->header_ops->parse_protocol) |
---|
| 3223 | + return 0; |
---|
| 3224 | + return dev->header_ops->parse_protocol(skb); |
---|
| 3225 | +} |
---|
| 3226 | + |
---|
2967 | 3227 | /* ll_header must have at least hard_header_len allocated */ |
---|
2968 | 3228 | static inline bool dev_validate_header(const struct net_device *dev, |
---|
2969 | 3229 | char *ll_header, int len) |
---|
.. | .. |
---|
2984 | 3244 | return false; |
---|
2985 | 3245 | } |
---|
2986 | 3246 | |
---|
2987 | | -typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, |
---|
2988 | | - int len, int size); |
---|
2989 | | -int register_gifconf(unsigned int family, gifconf_func_t *gifconf); |
---|
2990 | | -static inline int unregister_gifconf(unsigned int family) |
---|
| 3247 | +static inline bool dev_has_header(const struct net_device *dev) |
---|
2991 | 3248 | { |
---|
2992 | | - return register_gifconf(family, NULL); |
---|
| 3249 | + return dev->header_ops && dev->header_ops->create; |
---|
2993 | 3250 | } |
---|
2994 | 3251 | |
---|
2995 | 3252 | #ifdef CONFIG_NET_FLOW_LIMIT |
---|
.. | .. |
---|
3010 | 3267 | */ |
---|
3011 | 3268 | struct softnet_data { |
---|
3012 | 3269 | struct list_head poll_list; |
---|
3013 | | - struct napi_struct *current_napi; |
---|
3014 | 3270 | struct sk_buff_head process_queue; |
---|
3015 | 3271 | |
---|
3016 | 3272 | /* stats */ |
---|
3017 | 3273 | unsigned int processed; |
---|
3018 | 3274 | unsigned int time_squeeze; |
---|
3019 | 3275 | unsigned int received_rps; |
---|
3020 | | - /* unused partner variable for ABI alignment */ |
---|
3021 | | - unsigned int gro_coalesced; |
---|
3022 | | - |
---|
3023 | 3276 | #ifdef CONFIG_RPS |
---|
3024 | 3277 | struct softnet_data *rps_ipi_list; |
---|
3025 | 3278 | #endif |
---|
.. | .. |
---|
3052 | 3305 | unsigned int dropped; |
---|
3053 | 3306 | struct sk_buff_head input_pkt_queue; |
---|
3054 | 3307 | struct napi_struct backlog; |
---|
3055 | | - struct sk_buff_head tofree_queue; |
---|
3056 | 3308 | |
---|
3057 | 3309 | }; |
---|
3058 | 3310 | |
---|
.. | .. |
---|
3071 | 3323 | #endif |
---|
3072 | 3324 | } |
---|
3073 | 3325 | |
---|
3074 | | -#define XMIT_RECURSION_LIMIT 8 |
---|
3075 | 3326 | DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); |
---|
3076 | | - |
---|
3077 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
3078 | | -static inline int dev_recursion_level(void) |
---|
3079 | | -{ |
---|
3080 | | - return current->xmit_recursion; |
---|
3081 | | -} |
---|
3082 | | - |
---|
3083 | | -static inline bool dev_xmit_recursion(void) |
---|
3084 | | -{ |
---|
3085 | | - return unlikely(current->xmit_recursion > |
---|
3086 | | - XMIT_RECURSION_LIMIT); |
---|
3087 | | -} |
---|
3088 | | - |
---|
3089 | | -static inline void dev_xmit_recursion_inc(void) |
---|
3090 | | -{ |
---|
3091 | | - current->xmit_recursion++; |
---|
3092 | | -} |
---|
3093 | | - |
---|
3094 | | -static inline void dev_xmit_recursion_dec(void) |
---|
3095 | | -{ |
---|
3096 | | - current->xmit_recursion--; |
---|
3097 | | -} |
---|
3098 | | - |
---|
3099 | | -#else |
---|
3100 | 3327 | |
---|
3101 | 3328 | static inline int dev_recursion_level(void) |
---|
3102 | 3329 | { |
---|
3103 | 3330 | return this_cpu_read(softnet_data.xmit.recursion); |
---|
3104 | 3331 | } |
---|
3105 | 3332 | |
---|
| 3333 | +#define XMIT_RECURSION_LIMIT 8 |
---|
3106 | 3334 | static inline bool dev_xmit_recursion(void) |
---|
3107 | 3335 | { |
---|
3108 | 3336 | return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > |
---|
.. | .. |
---|
3118 | 3346 | { |
---|
3119 | 3347 | __this_cpu_dec(softnet_data.xmit.recursion); |
---|
3120 | 3348 | } |
---|
3121 | | -#endif |
---|
3122 | 3349 | |
---|
3123 | 3350 | void __netif_schedule(struct Qdisc *q); |
---|
3124 | 3351 | void netif_schedule_queue(struct netdev_queue *txq); |
---|
.. | .. |
---|
3285 | 3512 | #endif |
---|
3286 | 3513 | } |
---|
3287 | 3514 | |
---|
| 3515 | +/* Variant of netdev_tx_sent_queue() for drivers that are aware |
---|
| 3516 | + * that they should not test BQL status themselves. |
---|
| 3517 | + * We do want to change __QUEUE_STATE_STACK_XOFF only for the last |
---|
| 3518 | + * skb of a batch. |
---|
| 3519 | + * Returns true if the doorbell must be used to kick the NIC. |
---|
| 3520 | + */ |
---|
| 3521 | +static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, |
---|
| 3522 | + unsigned int bytes, |
---|
| 3523 | + bool xmit_more) |
---|
| 3524 | +{ |
---|
| 3525 | + if (xmit_more) { |
---|
| 3526 | +#ifdef CONFIG_BQL |
---|
| 3527 | + dql_queued(&dev_queue->dql, bytes); |
---|
| 3528 | +#endif |
---|
| 3529 | + return netif_tx_queue_stopped(dev_queue); |
---|
| 3530 | + } |
---|
| 3531 | + netdev_tx_sent_queue(dev_queue, bytes); |
---|
| 3532 | + return true; |
---|
| 3533 | +} |
---|
| 3534 | + |
---|
3288 | 3535 | /** |
---|
3289 | 3536 | * netdev_sent_queue - report the number of bytes queued to hardware |
---|
3290 | 3537 | * @dev: network device |
---|
.. | .. |
---|
3297 | 3544 | static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) |
---|
3298 | 3545 | { |
---|
3299 | 3546 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); |
---|
| 3547 | +} |
---|
| 3548 | + |
---|
| 3549 | +static inline bool __netdev_sent_queue(struct net_device *dev, |
---|
| 3550 | + unsigned int bytes, |
---|
| 3551 | + bool xmit_more) |
---|
| 3552 | +{ |
---|
| 3553 | + return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes, |
---|
| 3554 | + xmit_more); |
---|
3300 | 3555 | } |
---|
3301 | 3556 | |
---|
3302 | 3557 | static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, |
---|
.. | .. |
---|
3315 | 3570 | */ |
---|
3316 | 3571 | smp_mb(); |
---|
3317 | 3572 | |
---|
3318 | | - if (dql_avail(&dev_queue->dql) < 0) |
---|
| 3573 | + if (unlikely(dql_avail(&dev_queue->dql) < 0)) |
---|
3319 | 3574 | return; |
---|
3320 | 3575 | |
---|
3321 | 3576 | if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) |
---|
.. | .. |
---|
3523 | 3778 | } |
---|
3524 | 3779 | |
---|
3525 | 3780 | /** |
---|
3526 | | - * netif_attrmask_next_and - get the next CPU/Rx queue in *src1p & *src2p |
---|
| 3781 | + * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p |
---|
3527 | 3782 | * @n: CPU/Rx queue index |
---|
3528 | 3783 | * @src1p: the first CPUs/Rx queues mask pointer |
---|
3529 | 3784 | * @src2p: the second CPUs/Rx queues mask pointer |
---|
.. | .. |
---|
3660 | 3915 | int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb); |
---|
3661 | 3916 | int netif_rx(struct sk_buff *skb); |
---|
3662 | 3917 | int netif_rx_ni(struct sk_buff *skb); |
---|
| 3918 | +int netif_rx_any_context(struct sk_buff *skb); |
---|
3663 | 3919 | int netif_receive_skb(struct sk_buff *skb); |
---|
3664 | 3920 | int netif_receive_skb_core(struct sk_buff *skb); |
---|
3665 | 3921 | void netif_receive_skb_list(struct list_head *head); |
---|
.. | .. |
---|
3669 | 3925 | gro_result_t napi_gro_frags(struct napi_struct *napi); |
---|
3670 | 3926 | struct packet_offload *gro_find_receive_by_type(__be16 type); |
---|
3671 | 3927 | struct packet_offload *gro_find_complete_by_type(__be16 type); |
---|
3672 | | -extern struct napi_struct *get_current_napi_context(void); |
---|
3673 | 3928 | |
---|
3674 | 3929 | static inline void napi_free_frags(struct napi_struct *napi) |
---|
3675 | 3930 | { |
---|
.. | .. |
---|
3693 | 3948 | int dev_ifconf(struct net *net, struct ifconf *, int); |
---|
3694 | 3949 | int dev_ethtool(struct net *net, struct ifreq *); |
---|
3695 | 3950 | unsigned int dev_get_flags(const struct net_device *); |
---|
3696 | | -int __dev_change_flags(struct net_device *, unsigned int flags); |
---|
3697 | | -int dev_change_flags(struct net_device *, unsigned int); |
---|
| 3951 | +int __dev_change_flags(struct net_device *dev, unsigned int flags, |
---|
| 3952 | + struct netlink_ext_ack *extack); |
---|
| 3953 | +int dev_change_flags(struct net_device *dev, unsigned int flags, |
---|
| 3954 | + struct netlink_ext_ack *extack); |
---|
3698 | 3955 | void __dev_notify_flags(struct net_device *, unsigned int old_flags, |
---|
3699 | 3956 | unsigned int gchanges); |
---|
3700 | 3957 | int dev_change_name(struct net_device *, const char *); |
---|
.. | .. |
---|
3709 | 3966 | int dev_set_mtu(struct net_device *, int); |
---|
3710 | 3967 | int dev_change_tx_queue_len(struct net_device *, unsigned long); |
---|
3711 | 3968 | void dev_set_group(struct net_device *, int); |
---|
3712 | | -int dev_set_mac_address(struct net_device *, struct sockaddr *); |
---|
| 3969 | +int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, |
---|
| 3970 | + struct netlink_ext_ack *extack); |
---|
| 3971 | +int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, |
---|
| 3972 | + struct netlink_ext_ack *extack); |
---|
| 3973 | +int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, |
---|
| 3974 | + struct netlink_ext_ack *extack); |
---|
| 3975 | +int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name); |
---|
3713 | 3976 | int dev_change_carrier(struct net_device *, bool new_carrier); |
---|
3714 | 3977 | int dev_get_phys_port_id(struct net_device *dev, |
---|
3715 | 3978 | struct netdev_phys_item_id *ppid); |
---|
3716 | 3979 | int dev_get_phys_port_name(struct net_device *dev, |
---|
3717 | 3980 | char *name, size_t len); |
---|
| 3981 | +int dev_get_port_parent_id(struct net_device *dev, |
---|
| 3982 | + struct netdev_phys_item_id *ppid, bool recurse); |
---|
| 3983 | +bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); |
---|
3718 | 3984 | int dev_change_proto_down(struct net_device *dev, bool proto_down); |
---|
| 3985 | +int dev_change_proto_down_generic(struct net_device *dev, bool proto_down); |
---|
| 3986 | +void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask, |
---|
| 3987 | + u32 value); |
---|
3719 | 3988 | struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); |
---|
3720 | 3989 | struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
---|
3721 | 3990 | struct netdev_queue *txq, int *ret); |
---|
3722 | 3991 | |
---|
3723 | 3992 | typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); |
---|
3724 | 3993 | int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, |
---|
3725 | | - int fd, u32 flags); |
---|
3726 | | -u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op, |
---|
3727 | | - enum bpf_netdev_command cmd); |
---|
| 3994 | + int fd, int expected_fd, u32 flags); |
---|
| 3995 | +int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); |
---|
| 3996 | +u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); |
---|
| 3997 | + |
---|
3728 | 3998 | int xdp_umem_query(struct net_device *dev, u16 queue_id); |
---|
3729 | 3999 | |
---|
3730 | 4000 | int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); |
---|
.. | .. |
---|
3747 | 4017 | return 0; |
---|
3748 | 4018 | } |
---|
3749 | 4019 | |
---|
| 4020 | +bool dev_nit_active(struct net_device *dev); |
---|
3750 | 4021 | void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); |
---|
3751 | 4022 | |
---|
3752 | 4023 | extern int netdev_budget; |
---|
.. | .. |
---|
3763 | 4034 | */ |
---|
3764 | 4035 | static inline void dev_put(struct net_device *dev) |
---|
3765 | 4036 | { |
---|
3766 | | - this_cpu_dec(*dev->pcpu_refcnt); |
---|
| 4037 | + if (dev) |
---|
| 4038 | + this_cpu_dec(*dev->pcpu_refcnt); |
---|
3767 | 4039 | } |
---|
3768 | 4040 | |
---|
3769 | 4041 | /** |
---|
.. | .. |
---|
3774 | 4046 | */ |
---|
3775 | 4047 | static inline void dev_hold(struct net_device *dev) |
---|
3776 | 4048 | { |
---|
3777 | | - this_cpu_inc(*dev->pcpu_refcnt); |
---|
| 4049 | + if (dev) |
---|
| 4050 | + this_cpu_inc(*dev->pcpu_refcnt); |
---|
3778 | 4051 | } |
---|
3779 | 4052 | |
---|
3780 | 4053 | /* Carrier loss detection, dial on demand. The functions netif_carrier_on |
---|
.. | .. |
---|
3852 | 4125 | |
---|
3853 | 4126 | |
---|
3854 | 4127 | /** |
---|
| 4128 | + * netif_testing_on - mark device as under test. |
---|
| 4129 | + * @dev: network device |
---|
| 4130 | + * |
---|
| 4131 | + * Mark device as under test (as per RFC2863). |
---|
| 4132 | + * |
---|
| 4133 | + * The testing state indicates that some test(s) must be performed on |
---|
| 4134 | + * the interface. After completion, of the test, the interface state |
---|
| 4135 | + * will change to up, dormant, or down, as appropriate. |
---|
| 4136 | + */ |
---|
| 4137 | +static inline void netif_testing_on(struct net_device *dev) |
---|
| 4138 | +{ |
---|
| 4139 | + if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state)) |
---|
| 4140 | + linkwatch_fire_event(dev); |
---|
| 4141 | +} |
---|
| 4142 | + |
---|
| 4143 | +/** |
---|
| 4144 | + * netif_testing_off - set device as not under test. |
---|
| 4145 | + * @dev: network device |
---|
| 4146 | + * |
---|
| 4147 | + * Device is not in testing state. |
---|
| 4148 | + */ |
---|
| 4149 | +static inline void netif_testing_off(struct net_device *dev) |
---|
| 4150 | +{ |
---|
| 4151 | + if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state)) |
---|
| 4152 | + linkwatch_fire_event(dev); |
---|
| 4153 | +} |
---|
| 4154 | + |
---|
| 4155 | +/** |
---|
| 4156 | + * netif_testing - test if device is under test |
---|
| 4157 | + * @dev: network device |
---|
| 4158 | + * |
---|
| 4159 | + * Check if device is under test |
---|
| 4160 | + */ |
---|
| 4161 | +static inline bool netif_testing(const struct net_device *dev) |
---|
| 4162 | +{ |
---|
| 4163 | + return test_bit(__LINK_STATE_TESTING, &dev->state); |
---|
| 4164 | +} |
---|
| 4165 | + |
---|
| 4166 | + |
---|
| 4167 | +/** |
---|
3855 | 4168 | * netif_oper_up - test if device is operational |
---|
3856 | 4169 | * @dev: network device |
---|
3857 | 4170 | * |
---|
.. | .. |
---|
3883 | 4196 | */ |
---|
3884 | 4197 | |
---|
3885 | 4198 | enum { |
---|
3886 | | - NETIF_MSG_DRV = 0x0001, |
---|
3887 | | - NETIF_MSG_PROBE = 0x0002, |
---|
3888 | | - NETIF_MSG_LINK = 0x0004, |
---|
3889 | | - NETIF_MSG_TIMER = 0x0008, |
---|
3890 | | - NETIF_MSG_IFDOWN = 0x0010, |
---|
3891 | | - NETIF_MSG_IFUP = 0x0020, |
---|
3892 | | - NETIF_MSG_RX_ERR = 0x0040, |
---|
3893 | | - NETIF_MSG_TX_ERR = 0x0080, |
---|
3894 | | - NETIF_MSG_TX_QUEUED = 0x0100, |
---|
3895 | | - NETIF_MSG_INTR = 0x0200, |
---|
3896 | | - NETIF_MSG_TX_DONE = 0x0400, |
---|
3897 | | - NETIF_MSG_RX_STATUS = 0x0800, |
---|
3898 | | - NETIF_MSG_PKTDATA = 0x1000, |
---|
3899 | | - NETIF_MSG_HW = 0x2000, |
---|
3900 | | - NETIF_MSG_WOL = 0x4000, |
---|
| 4199 | + NETIF_MSG_DRV_BIT, |
---|
| 4200 | + NETIF_MSG_PROBE_BIT, |
---|
| 4201 | + NETIF_MSG_LINK_BIT, |
---|
| 4202 | + NETIF_MSG_TIMER_BIT, |
---|
| 4203 | + NETIF_MSG_IFDOWN_BIT, |
---|
| 4204 | + NETIF_MSG_IFUP_BIT, |
---|
| 4205 | + NETIF_MSG_RX_ERR_BIT, |
---|
| 4206 | + NETIF_MSG_TX_ERR_BIT, |
---|
| 4207 | + NETIF_MSG_TX_QUEUED_BIT, |
---|
| 4208 | + NETIF_MSG_INTR_BIT, |
---|
| 4209 | + NETIF_MSG_TX_DONE_BIT, |
---|
| 4210 | + NETIF_MSG_RX_STATUS_BIT, |
---|
| 4211 | + NETIF_MSG_PKTDATA_BIT, |
---|
| 4212 | + NETIF_MSG_HW_BIT, |
---|
| 4213 | + NETIF_MSG_WOL_BIT, |
---|
| 4214 | + |
---|
| 4215 | + /* When you add a new bit above, update netif_msg_class_names array |
---|
| 4216 | + * in net/ethtool/common.c |
---|
| 4217 | + */ |
---|
| 4218 | + NETIF_MSG_CLASS_COUNT, |
---|
3901 | 4219 | }; |
---|
| 4220 | +/* Both ethtool_ops interface and internal driver implementation use u32 */ |
---|
| 4221 | +static_assert(NETIF_MSG_CLASS_COUNT <= 32); |
---|
| 4222 | + |
---|
| 4223 | +#define __NETIF_MSG_BIT(bit) ((u32)1 << (bit)) |
---|
| 4224 | +#define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT) |
---|
| 4225 | + |
---|
| 4226 | +#define NETIF_MSG_DRV __NETIF_MSG(DRV) |
---|
| 4227 | +#define NETIF_MSG_PROBE __NETIF_MSG(PROBE) |
---|
| 4228 | +#define NETIF_MSG_LINK __NETIF_MSG(LINK) |
---|
| 4229 | +#define NETIF_MSG_TIMER __NETIF_MSG(TIMER) |
---|
| 4230 | +#define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN) |
---|
| 4231 | +#define NETIF_MSG_IFUP __NETIF_MSG(IFUP) |
---|
| 4232 | +#define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR) |
---|
| 4233 | +#define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR) |
---|
| 4234 | +#define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED) |
---|
| 4235 | +#define NETIF_MSG_INTR __NETIF_MSG(INTR) |
---|
| 4236 | +#define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE) |
---|
| 4237 | +#define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS) |
---|
| 4238 | +#define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA) |
---|
| 4239 | +#define NETIF_MSG_HW __NETIF_MSG(HW) |
---|
| 4240 | +#define NETIF_MSG_WOL __NETIF_MSG(WOL) |
---|
3902 | 4241 | |
---|
3903 | 4242 | #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) |
---|
3904 | 4243 | #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) |
---|
.. | .. |
---|
3927 | 4266 | return (1U << debug_value) - 1; |
---|
3928 | 4267 | } |
---|
3929 | 4268 | |
---|
3930 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
3931 | | -static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu) |
---|
3932 | | -{ |
---|
3933 | | - WRITE_ONCE(txq->xmit_lock_owner, current); |
---|
3934 | | -} |
---|
3935 | | - |
---|
3936 | | -static inline void netdev_queue_clear_owner(struct netdev_queue *txq) |
---|
3937 | | -{ |
---|
3938 | | - WRITE_ONCE(txq->xmit_lock_owner, NULL); |
---|
3939 | | -} |
---|
3940 | | - |
---|
3941 | | -static inline bool netdev_queue_has_owner(struct netdev_queue *txq) |
---|
3942 | | -{ |
---|
3943 | | - if (READ_ONCE(txq->xmit_lock_owner) != NULL) |
---|
3944 | | - return true; |
---|
3945 | | - return false; |
---|
3946 | | -} |
---|
3947 | | - |
---|
3948 | | -#else |
---|
3949 | | - |
---|
3950 | | -static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu) |
---|
3951 | | -{ |
---|
3952 | | - /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
---|
3953 | | - WRITE_ONCE(txq->xmit_lock_owner, cpu); |
---|
3954 | | -} |
---|
3955 | | - |
---|
3956 | | -static inline void netdev_queue_clear_owner(struct netdev_queue *txq) |
---|
3957 | | -{ |
---|
3958 | | - /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
---|
3959 | | - WRITE_ONCE(txq->xmit_lock_owner, -1); |
---|
3960 | | -} |
---|
3961 | | - |
---|
3962 | | -static inline bool netdev_queue_has_owner(struct netdev_queue *txq) |
---|
3963 | | -{ |
---|
3964 | | - if (READ_ONCE(txq->xmit_lock_owner) != -1) |
---|
3965 | | - return true; |
---|
3966 | | - return false; |
---|
3967 | | -} |
---|
3968 | | -#endif |
---|
3969 | | - |
---|
3970 | 4269 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) |
---|
3971 | 4270 | { |
---|
3972 | 4271 | spin_lock(&txq->_xmit_lock); |
---|
3973 | | - netdev_queue_set_owner(txq, cpu); |
---|
| 4272 | + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
---|
| 4273 | + WRITE_ONCE(txq->xmit_lock_owner, cpu); |
---|
3974 | 4274 | } |
---|
3975 | 4275 | |
---|
3976 | 4276 | static inline bool __netif_tx_acquire(struct netdev_queue *txq) |
---|
.. | .. |
---|
3987 | 4287 | static inline void __netif_tx_lock_bh(struct netdev_queue *txq) |
---|
3988 | 4288 | { |
---|
3989 | 4289 | spin_lock_bh(&txq->_xmit_lock); |
---|
3990 | | - netdev_queue_set_owner(txq, smp_processor_id()); |
---|
| 4290 | + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
---|
| 4291 | + WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); |
---|
3991 | 4292 | } |
---|
3992 | 4293 | |
---|
3993 | 4294 | static inline bool __netif_tx_trylock(struct netdev_queue *txq) |
---|
.. | .. |
---|
3995 | 4296 | bool ok = spin_trylock(&txq->_xmit_lock); |
---|
3996 | 4297 | |
---|
3997 | 4298 | if (likely(ok)) { |
---|
3998 | | - netdev_queue_set_owner(txq, smp_processor_id()); |
---|
| 4299 | + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
---|
| 4300 | + WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); |
---|
3999 | 4301 | } |
---|
4000 | 4302 | return ok; |
---|
4001 | 4303 | } |
---|
4002 | 4304 | |
---|
4003 | 4305 | static inline void __netif_tx_unlock(struct netdev_queue *txq) |
---|
4004 | 4306 | { |
---|
4005 | | - netdev_queue_clear_owner(txq); |
---|
| 4307 | + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
---|
| 4308 | + WRITE_ONCE(txq->xmit_lock_owner, -1); |
---|
4006 | 4309 | spin_unlock(&txq->_xmit_lock); |
---|
4007 | 4310 | } |
---|
4008 | 4311 | |
---|
4009 | 4312 | static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) |
---|
4010 | 4313 | { |
---|
4011 | | - netdev_queue_clear_owner(txq); |
---|
| 4314 | + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
---|
| 4315 | + WRITE_ONCE(txq->xmit_lock_owner, -1); |
---|
4012 | 4316 | spin_unlock_bh(&txq->_xmit_lock); |
---|
4013 | 4317 | } |
---|
4014 | 4318 | |
---|
4015 | 4319 | static inline void txq_trans_update(struct netdev_queue *txq) |
---|
4016 | 4320 | { |
---|
4017 | | - if (netdev_queue_has_owner(txq)) |
---|
| 4321 | + if (txq->xmit_lock_owner != -1) |
---|
4018 | 4322 | txq->trans_start = jiffies; |
---|
4019 | 4323 | } |
---|
4020 | 4324 | |
---|
.. | .. |
---|
4126 | 4430 | |
---|
4127 | 4431 | static inline void netif_addr_lock(struct net_device *dev) |
---|
4128 | 4432 | { |
---|
4129 | | - spin_lock(&dev->addr_list_lock); |
---|
4130 | | -} |
---|
| 4433 | + unsigned char nest_level = 0; |
---|
4131 | 4434 | |
---|
4132 | | -static inline void netif_addr_lock_nested(struct net_device *dev) |
---|
4133 | | -{ |
---|
4134 | | - int subclass = SINGLE_DEPTH_NESTING; |
---|
4135 | | - |
---|
4136 | | - if (dev->netdev_ops->ndo_get_lock_subclass) |
---|
4137 | | - subclass = dev->netdev_ops->ndo_get_lock_subclass(dev); |
---|
4138 | | - |
---|
4139 | | - spin_lock_nested(&dev->addr_list_lock, subclass); |
---|
| 4435 | +#ifdef CONFIG_LOCKDEP |
---|
| 4436 | + nest_level = dev->nested_level; |
---|
| 4437 | +#endif |
---|
| 4438 | + spin_lock_nested(&dev->addr_list_lock, nest_level); |
---|
4140 | 4439 | } |
---|
4141 | 4440 | |
---|
4142 | 4441 | static inline void netif_addr_lock_bh(struct net_device *dev) |
---|
4143 | 4442 | { |
---|
4144 | | - spin_lock_bh(&dev->addr_list_lock); |
---|
| 4443 | + unsigned char nest_level = 0; |
---|
| 4444 | + |
---|
| 4445 | +#ifdef CONFIG_LOCKDEP |
---|
| 4446 | + nest_level = dev->nested_level; |
---|
| 4447 | +#endif |
---|
| 4448 | + local_bh_disable(); |
---|
| 4449 | + spin_lock_nested(&dev->addr_list_lock, nest_level); |
---|
4145 | 4450 | } |
---|
4146 | 4451 | |
---|
4147 | 4452 | static inline void netif_addr_unlock(struct net_device *dev) |
---|
.. | .. |
---|
4170 | 4475 | unsigned char name_assign_type, |
---|
4171 | 4476 | void (*setup)(struct net_device *), |
---|
4172 | 4477 | unsigned int txqs, unsigned int rxqs); |
---|
4173 | | -int dev_get_valid_name(struct net *net, struct net_device *dev, |
---|
4174 | | - const char *name); |
---|
4175 | | - |
---|
4176 | 4478 | #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ |
---|
4177 | 4479 | alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) |
---|
4178 | 4480 | |
---|
.. | .. |
---|
4182 | 4484 | |
---|
4183 | 4485 | int register_netdev(struct net_device *dev); |
---|
4184 | 4486 | void unregister_netdev(struct net_device *dev); |
---|
| 4487 | + |
---|
| 4488 | +int devm_register_netdev(struct device *dev, struct net_device *ndev); |
---|
4185 | 4489 | |
---|
4186 | 4490 | /* General hardware address lists handling functions */ |
---|
4187 | 4491 | int __hw_addr_sync(struct netdev_hw_addr_list *to_list, |
---|
.. | .. |
---|
4193 | 4497 | int (*sync)(struct net_device *, const unsigned char *), |
---|
4194 | 4498 | int (*unsync)(struct net_device *, |
---|
4195 | 4499 | const unsigned char *)); |
---|
| 4500 | +int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list, |
---|
| 4501 | + struct net_device *dev, |
---|
| 4502 | + int (*sync)(struct net_device *, |
---|
| 4503 | + const unsigned char *, int), |
---|
| 4504 | + int (*unsync)(struct net_device *, |
---|
| 4505 | + const unsigned char *, int)); |
---|
| 4506 | +void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list, |
---|
| 4507 | + struct net_device *dev, |
---|
| 4508 | + int (*unsync)(struct net_device *, |
---|
| 4509 | + const unsigned char *, int)); |
---|
4196 | 4510 | void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, |
---|
4197 | 4511 | struct net_device *dev, |
---|
4198 | 4512 | int (*unsync)(struct net_device *, |
---|
.. | .. |
---|
4200 | 4514 | void __hw_addr_init(struct netdev_hw_addr_list *list); |
---|
4201 | 4515 | |
---|
4202 | 4516 | /* Functions used for device addresses handling */ |
---|
| 4517 | +static inline void |
---|
| 4518 | +__dev_addr_set(struct net_device *dev, const u8 *addr, size_t len) |
---|
| 4519 | +{ |
---|
| 4520 | + memcpy(dev->dev_addr, addr, len); |
---|
| 4521 | +} |
---|
| 4522 | + |
---|
| 4523 | +static inline void dev_addr_set(struct net_device *dev, const u8 *addr) |
---|
| 4524 | +{ |
---|
| 4525 | + __dev_addr_set(dev, addr, dev->addr_len); |
---|
| 4526 | +} |
---|
| 4527 | + |
---|
| 4528 | +static inline void |
---|
| 4529 | +dev_addr_mod(struct net_device *dev, unsigned int offset, |
---|
| 4530 | + const u8 *addr, size_t len) |
---|
| 4531 | +{ |
---|
| 4532 | + memcpy(&dev->dev_addr[offset], addr, len); |
---|
| 4533 | +} |
---|
| 4534 | + |
---|
4203 | 4535 | int dev_addr_add(struct net_device *dev, const unsigned char *addr, |
---|
4204 | 4536 | unsigned char addr_type); |
---|
4205 | 4537 | int dev_addr_del(struct net_device *dev, const unsigned char *addr, |
---|
.. | .. |
---|
4307 | 4639 | struct rtnl_link_stats64 *storage); |
---|
4308 | 4640 | void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, |
---|
4309 | 4641 | const struct net_device_stats *netdev_stats); |
---|
| 4642 | +void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, |
---|
| 4643 | + const struct pcpu_sw_netstats __percpu *netstats); |
---|
4310 | 4644 | |
---|
4311 | 4645 | extern int netdev_max_backlog; |
---|
4312 | 4646 | extern int netdev_tstamp_prequeue; |
---|
.. | .. |
---|
4315 | 4649 | extern int dev_weight_tx_bias; |
---|
4316 | 4650 | extern int dev_rx_weight; |
---|
4317 | 4651 | extern int dev_tx_weight; |
---|
| 4652 | +extern int gro_normal_batch; |
---|
| 4653 | + |
---|
| 4654 | +enum { |
---|
| 4655 | + NESTED_SYNC_IMM_BIT, |
---|
| 4656 | + NESTED_SYNC_TODO_BIT, |
---|
| 4657 | +}; |
---|
| 4658 | + |
---|
| 4659 | +#define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit)) |
---|
| 4660 | +#define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT) |
---|
| 4661 | + |
---|
| 4662 | +#define NESTED_SYNC_IMM __NESTED_SYNC(IMM) |
---|
| 4663 | +#define NESTED_SYNC_TODO __NESTED_SYNC(TODO) |
---|
| 4664 | + |
---|
| 4665 | +struct netdev_nested_priv { |
---|
| 4666 | + unsigned char flags; |
---|
| 4667 | + void *data; |
---|
| 4668 | +}; |
---|
4318 | 4669 | |
---|
4319 | 4670 | bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); |
---|
4320 | 4671 | struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, |
---|
4321 | 4672 | struct list_head **iter); |
---|
4322 | 4673 | struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, |
---|
4323 | 4674 | struct list_head **iter); |
---|
| 4675 | + |
---|
| 4676 | +#ifdef CONFIG_LOCKDEP |
---|
| 4677 | +static LIST_HEAD(net_unlink_list); |
---|
| 4678 | + |
---|
| 4679 | +static inline void net_unlink_todo(struct net_device *dev) |
---|
| 4680 | +{ |
---|
| 4681 | + if (list_empty(&dev->unlink_list)) |
---|
| 4682 | + list_add_tail(&dev->unlink_list, &net_unlink_list); |
---|
| 4683 | +} |
---|
| 4684 | +#endif |
---|
4324 | 4685 | |
---|
4325 | 4686 | /* iterate through upper list, must be called under RCU read lock */ |
---|
4326 | 4687 | #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ |
---|
.. | .. |
---|
4331 | 4692 | |
---|
4332 | 4693 | int netdev_walk_all_upper_dev_rcu(struct net_device *dev, |
---|
4333 | 4694 | int (*fn)(struct net_device *upper_dev, |
---|
4334 | | - void *data), |
---|
4335 | | - void *data); |
---|
| 4695 | + struct netdev_nested_priv *priv), |
---|
| 4696 | + struct netdev_nested_priv *priv); |
---|
4336 | 4697 | |
---|
4337 | 4698 | bool netdev_has_upper_dev_all_rcu(struct net_device *dev, |
---|
4338 | 4699 | struct net_device *upper_dev); |
---|
.. | .. |
---|
4365 | 4726 | ldev; \ |
---|
4366 | 4727 | ldev = netdev_lower_get_next(dev, &(iter))) |
---|
4367 | 4728 | |
---|
4368 | | -struct net_device *netdev_all_lower_get_next(struct net_device *dev, |
---|
| 4729 | +struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, |
---|
4369 | 4730 | struct list_head **iter); |
---|
4370 | | -struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev, |
---|
4371 | | - struct list_head **iter); |
---|
4372 | | - |
---|
4373 | 4731 | int netdev_walk_all_lower_dev(struct net_device *dev, |
---|
4374 | 4732 | int (*fn)(struct net_device *lower_dev, |
---|
4375 | | - void *data), |
---|
4376 | | - void *data); |
---|
| 4733 | + struct netdev_nested_priv *priv), |
---|
| 4734 | + struct netdev_nested_priv *priv); |
---|
4377 | 4735 | int netdev_walk_all_lower_dev_rcu(struct net_device *dev, |
---|
4378 | 4736 | int (*fn)(struct net_device *lower_dev, |
---|
4379 | | - void *data), |
---|
4380 | | - void *data); |
---|
| 4737 | + struct netdev_nested_priv *priv), |
---|
| 4738 | + struct netdev_nested_priv *priv); |
---|
4381 | 4739 | |
---|
4382 | 4740 | void *netdev_adjacent_get_private(struct list_head *adj_list); |
---|
4383 | 4741 | void *netdev_lower_get_first_private_rcu(struct net_device *dev); |
---|
.. | .. |
---|
4391 | 4749 | struct netlink_ext_ack *extack); |
---|
4392 | 4750 | void netdev_upper_dev_unlink(struct net_device *dev, |
---|
4393 | 4751 | struct net_device *upper_dev); |
---|
| 4752 | +int netdev_adjacent_change_prepare(struct net_device *old_dev, |
---|
| 4753 | + struct net_device *new_dev, |
---|
| 4754 | + struct net_device *dev, |
---|
| 4755 | + struct netlink_ext_ack *extack); |
---|
| 4756 | +void netdev_adjacent_change_commit(struct net_device *old_dev, |
---|
| 4757 | + struct net_device *new_dev, |
---|
| 4758 | + struct net_device *dev); |
---|
| 4759 | +void netdev_adjacent_change_abort(struct net_device *old_dev, |
---|
| 4760 | + struct net_device *new_dev, |
---|
| 4761 | + struct net_device *dev); |
---|
4394 | 4762 | void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); |
---|
4395 | 4763 | void *netdev_lower_dev_get_private(struct net_device *dev, |
---|
4396 | 4764 | struct net_device *lower_dev); |
---|
.. | .. |
---|
4402 | 4770 | extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; |
---|
4403 | 4771 | void netdev_rss_key_fill(void *buffer, size_t len); |
---|
4404 | 4772 | |
---|
4405 | | -int dev_get_nest_level(struct net_device *dev); |
---|
4406 | 4773 | int skb_checksum_help(struct sk_buff *skb); |
---|
4407 | 4774 | int skb_crc32c_csum_help(struct sk_buff *skb); |
---|
4408 | 4775 | int skb_csum_hwoffload_help(struct sk_buff *skb, |
---|
.. | .. |
---|
4425 | 4792 | |
---|
4426 | 4793 | void netdev_bonding_info_change(struct net_device *dev, |
---|
4427 | 4794 | struct netdev_bonding_info *bonding_info); |
---|
| 4795 | + |
---|
| 4796 | +#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK) |
---|
| 4797 | +void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data); |
---|
| 4798 | +#else |
---|
| 4799 | +static inline void ethtool_notify(struct net_device *dev, unsigned int cmd, |
---|
| 4800 | + const void *data) |
---|
| 4801 | +{ |
---|
| 4802 | +} |
---|
| 4803 | +#endif |
---|
4428 | 4804 | |
---|
4429 | 4805 | static inline |
---|
4430 | 4806 | struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) |
---|
.. | .. |
---|
4457 | 4833 | } |
---|
4458 | 4834 | |
---|
4459 | 4835 | #ifdef CONFIG_BUG |
---|
4460 | | -void netdev_rx_csum_fault(struct net_device *dev); |
---|
| 4836 | +void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb); |
---|
4461 | 4837 | #else |
---|
4462 | | -static inline void netdev_rx_csum_fault(struct net_device *dev) |
---|
| 4838 | +static inline void netdev_rx_csum_fault(struct net_device *dev, |
---|
| 4839 | + struct sk_buff *skb) |
---|
4463 | 4840 | { |
---|
4464 | 4841 | } |
---|
4465 | 4842 | #endif |
---|
.. | .. |
---|
4477 | 4854 | struct sk_buff *skb, struct net_device *dev, |
---|
4478 | 4855 | bool more) |
---|
4479 | 4856 | { |
---|
4480 | | - skb->xmit_more = more ? 1 : 0; |
---|
| 4857 | + __this_cpu_write(softnet_data.xmit.more, more); |
---|
4481 | 4858 | return ops->ndo_start_xmit(skb, dev); |
---|
4482 | 4859 | } |
---|
4483 | 4860 | |
---|
.. | .. |
---|
4490 | 4867 | struct netdev_queue *txq, bool more) |
---|
4491 | 4868 | { |
---|
4492 | 4869 | const struct net_device_ops *ops = dev->netdev_ops; |
---|
4493 | | - int rc; |
---|
| 4870 | + netdev_tx_t rc; |
---|
4494 | 4871 | |
---|
4495 | 4872 | rc = __netdev_start_xmit(ops, skb, dev, more); |
---|
4496 | 4873 | if (rc == NETDEV_TX_OK) |
---|
.. | .. |
---|
4503 | 4880 | const void *ns); |
---|
4504 | 4881 | void netdev_class_remove_file_ns(const struct class_attribute *class_attr, |
---|
4505 | 4882 | const void *ns); |
---|
4506 | | - |
---|
4507 | | -static inline int netdev_class_create_file(const struct class_attribute *class_attr) |
---|
4508 | | -{ |
---|
4509 | | - return netdev_class_create_file_ns(class_attr, NULL); |
---|
4510 | | -} |
---|
4511 | | - |
---|
4512 | | -static inline void netdev_class_remove_file(const struct class_attribute *class_attr) |
---|
4513 | | -{ |
---|
4514 | | - netdev_class_remove_file_ns(class_attr, NULL); |
---|
4515 | | -} |
---|
4516 | 4883 | |
---|
4517 | 4884 | extern const struct kobj_ns_type_operations net_ns_type_operations; |
---|
4518 | 4885 | |
---|
.. | .. |
---|
4586 | 4953 | BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT)); |
---|
4587 | 4954 | BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT)); |
---|
4588 | 4955 | BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT)); |
---|
| 4956 | + BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT)); |
---|
4589 | 4957 | |
---|
4590 | 4958 | return (features & feature) == feature; |
---|
4591 | 4959 | } |
---|
.. | .. |
---|
4688 | 5056 | return dev->priv_flags & IFF_OVS_DATAPATH; |
---|
4689 | 5057 | } |
---|
4690 | 5058 | |
---|
| 5059 | +static inline bool netif_is_any_bridge_port(const struct net_device *dev) |
---|
| 5060 | +{ |
---|
| 5061 | + return netif_is_bridge_port(dev) || netif_is_ovs_port(dev); |
---|
| 5062 | +} |
---|
| 5063 | + |
---|
4691 | 5064 | static inline bool netif_is_team_master(const struct net_device *dev) |
---|
4692 | 5065 | { |
---|
4693 | 5066 | return dev->priv_flags & IFF_TEAM; |
---|
.. | .. |
---|
4769 | 5142 | return " (unknown)"; |
---|
4770 | 5143 | } |
---|
4771 | 5144 | |
---|
4772 | | -__printf(3, 4) |
---|
| 5145 | +__printf(3, 4) __cold |
---|
4773 | 5146 | void netdev_printk(const char *level, const struct net_device *dev, |
---|
4774 | 5147 | const char *format, ...); |
---|
4775 | | -__printf(2, 3) |
---|
| 5148 | +__printf(2, 3) __cold |
---|
4776 | 5149 | void netdev_emerg(const struct net_device *dev, const char *format, ...); |
---|
4777 | | -__printf(2, 3) |
---|
| 5150 | +__printf(2, 3) __cold |
---|
4778 | 5151 | void netdev_alert(const struct net_device *dev, const char *format, ...); |
---|
4779 | | -__printf(2, 3) |
---|
| 5152 | +__printf(2, 3) __cold |
---|
4780 | 5153 | void netdev_crit(const struct net_device *dev, const char *format, ...); |
---|
4781 | | -__printf(2, 3) |
---|
| 5154 | +__printf(2, 3) __cold |
---|
4782 | 5155 | void netdev_err(const struct net_device *dev, const char *format, ...); |
---|
4783 | | -__printf(2, 3) |
---|
| 5156 | +__printf(2, 3) __cold |
---|
4784 | 5157 | void netdev_warn(const struct net_device *dev, const char *format, ...); |
---|
4785 | | -__printf(2, 3) |
---|
| 5158 | +__printf(2, 3) __cold |
---|
4786 | 5159 | void netdev_notice(const struct net_device *dev, const char *format, ...); |
---|
4787 | | -__printf(2, 3) |
---|
| 5160 | +__printf(2, 3) __cold |
---|
4788 | 5161 | void netdev_info(const struct net_device *dev, const char *format, ...); |
---|
4789 | 5162 | |
---|
4790 | 5163 | #define netdev_level_once(level, dev, fmt, ...) \ |
---|
.. | .. |
---|
4815 | 5188 | #define MODULE_ALIAS_NETDEV(device) \ |
---|
4816 | 5189 | MODULE_ALIAS("netdev-" device) |
---|
4817 | 5190 | |
---|
4818 | | -#if defined(CONFIG_DYNAMIC_DEBUG) |
---|
| 5191 | +#if defined(CONFIG_DYNAMIC_DEBUG) || \ |
---|
| 5192 | + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) |
---|
4819 | 5193 | #define netdev_dbg(__dev, format, args...) \ |
---|
4820 | 5194 | do { \ |
---|
4821 | 5195 | dynamic_netdev_dbg(__dev, format, ##args); \ |
---|
.. | .. |
---|
4885 | 5259 | #define netif_info(priv, type, dev, fmt, args...) \ |
---|
4886 | 5260 | netif_level(info, priv, type, dev, fmt, ##args) |
---|
4887 | 5261 | |
---|
4888 | | -#if defined(CONFIG_DYNAMIC_DEBUG) |
---|
| 5262 | +#if defined(CONFIG_DYNAMIC_DEBUG) || \ |
---|
| 5263 | + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) |
---|
4889 | 5264 | #define netif_dbg(priv, type, netdev, format, args...) \ |
---|
4890 | 5265 | do { \ |
---|
4891 | 5266 | if (netif_msg_##type(priv)) \ |
---|
.. | .. |
---|
4945 | 5320 | #define PTYPE_HASH_SIZE (16) |
---|
4946 | 5321 | #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) |
---|
4947 | 5322 | |
---|
| 5323 | +extern struct net_device *blackhole_netdev; |
---|
| 5324 | + |
---|
4948 | 5325 | #endif /* _LINUX_NETDEVICE_H */ |
---|