.. | .. |
---|
3111 | 3111 | } else if (likely(!refcount_dec_and_test(&skb->users))) { |
---|
3112 | 3112 | return; |
---|
3113 | 3113 | } |
---|
| 3114 | + |
---|
| 3115 | + if (recycle_oob_skb(skb)) |
---|
| 3116 | + return; |
---|
| 3117 | + |
---|
3114 | 3118 | get_kfree_skb_cb(skb)->reason = reason; |
---|
3115 | 3119 | local_irq_save(flags); |
---|
3116 | 3120 | skb->next = __this_cpu_read(softnet_data.completion_queue); |
---|
.. | .. |
---|
3584 | 3588 | unsigned int len; |
---|
3585 | 3589 | int rc; |
---|
3586 | 3590 | |
---|
3587 | | - if (dev_nit_active(dev)) |
---|
| 3591 | + /* |
---|
| 3592 | + * Clone-relay outgoing packet to listening taps. Network taps |
---|
| 3593 | + * interested in out-of-band traffic should be handled by the |
---|
| 3594 | + * companion core. |
---|
| 3595 | + */ |
---|
| 3596 | + if (dev_nit_active(dev) && !skb_is_oob(skb)) |
---|
3588 | 3597 | dev_queue_xmit_nit(skb, dev); |
---|
3589 | 3598 | |
---|
3590 | 3599 | len = skb->len; |
---|
.. | .. |
---|
4797 | 4806 | } |
---|
4798 | 4807 | EXPORT_SYMBOL_GPL(do_xdp_generic); |
---|
4799 | 4808 | |
---|
| 4809 | +#ifdef CONFIG_NET_OOB |
---|
| 4810 | + |
---|
| 4811 | +__weak bool netif_oob_deliver(struct sk_buff *skb) |
---|
| 4812 | +{ |
---|
| 4813 | + return false; |
---|
| 4814 | +} |
---|
| 4815 | + |
---|
| 4816 | +__weak int netif_xmit_oob(struct sk_buff *skb) |
---|
| 4817 | +{ |
---|
| 4818 | + return NET_XMIT_DROP; |
---|
| 4819 | +} |
---|
| 4820 | + |
---|
| 4821 | +static bool netif_receive_oob(struct sk_buff *skb) |
---|
| 4822 | +{ |
---|
| 4823 | + struct net_device *dev = skb->dev; |
---|
| 4824 | + |
---|
| 4825 | + if (dev && netif_oob_diversion(dev)) |
---|
| 4826 | + return netif_oob_deliver(skb); |
---|
| 4827 | + |
---|
| 4828 | + return false; |
---|
| 4829 | +} |
---|
| 4830 | + |
---|
| 4831 | +static bool netif_receive_oob_list(struct list_head *head) |
---|
| 4832 | +{ |
---|
| 4833 | + struct sk_buff *skb, *next; |
---|
| 4834 | + struct net_device *dev; |
---|
| 4835 | + |
---|
| 4836 | + if (list_empty(head)) |
---|
| 4837 | + return false; |
---|
| 4838 | + |
---|
| 4839 | + dev = list_first_entry(head, struct sk_buff, list)->dev; |
---|
| 4840 | + if (!dev || !netif_oob_diversion(dev)) |
---|
| 4841 | + return false; |
---|
| 4842 | + |
---|
| 4843 | + /* Callee dequeues every skb it consumes. */ |
---|
| 4844 | + list_for_each_entry_safe(skb, next, head, list) |
---|
| 4845 | + netif_oob_deliver(skb); |
---|
| 4846 | + |
---|
| 4847 | + return list_empty(head); |
---|
| 4848 | +} |
---|
| 4849 | + |
---|
| 4850 | +__weak void netif_oob_run(struct net_device *dev) |
---|
| 4851 | +{ } |
---|
| 4852 | + |
---|
| 4853 | +static void napi_complete_oob(struct napi_struct *n) |
---|
| 4854 | +{ |
---|
| 4855 | + struct net_device *dev = n->dev; |
---|
| 4856 | + |
---|
| 4857 | + if (netif_oob_diversion(dev)) |
---|
| 4858 | + netif_oob_run(dev); |
---|
| 4859 | +} |
---|
| 4860 | + |
---|
| 4861 | +__weak void skb_inband_xmit_backlog(void) |
---|
| 4862 | +{ } |
---|
| 4863 | + |
---|
| 4864 | +#else |
---|
| 4865 | + |
---|
| 4866 | +static inline bool netif_receive_oob(struct sk_buff *skb) |
---|
| 4867 | +{ |
---|
| 4868 | + return false; |
---|
| 4869 | +} |
---|
| 4870 | + |
---|
| 4871 | +static inline bool netif_receive_oob_list(struct list_head *head) |
---|
| 4872 | +{ |
---|
| 4873 | + return false; |
---|
| 4874 | +} |
---|
| 4875 | + |
---|
| 4876 | +static inline void napi_complete_oob(struct napi_struct *n) |
---|
| 4877 | +{ } |
---|
| 4878 | + |
---|
| 4879 | +static inline void skb_inband_xmit_backlog(void) |
---|
| 4880 | +{ } |
---|
| 4881 | + |
---|
| 4882 | +#endif |
---|
| 4883 | + |
---|
4800 | 4884 | static int netif_rx_internal(struct sk_buff *skb) |
---|
4801 | 4885 | { |
---|
4802 | 4886 | int ret; |
---|
.. | .. |
---|
4895 | 4979 | static __latent_entropy void net_tx_action(struct softirq_action *h) |
---|
4896 | 4980 | { |
---|
4897 | 4981 | struct softnet_data *sd = this_cpu_ptr(&softnet_data); |
---|
| 4982 | + |
---|
| 4983 | + skb_inband_xmit_backlog(); |
---|
4898 | 4984 | |
---|
4899 | 4985 | if (sd->completion_queue) { |
---|
4900 | 4986 | struct sk_buff *clist; |
---|
.. | .. |
---|
5639 | 5725 | { |
---|
5640 | 5726 | int ret; |
---|
5641 | 5727 | |
---|
| 5728 | + if (netif_receive_oob(skb)) |
---|
| 5729 | + return NET_RX_SUCCESS; |
---|
| 5730 | + |
---|
5642 | 5731 | trace_netif_receive_skb_entry(skb); |
---|
5643 | 5732 | |
---|
5644 | 5733 | ret = netif_receive_skb_internal(skb); |
---|
.. | .. |
---|
5662 | 5751 | { |
---|
5663 | 5752 | struct sk_buff *skb; |
---|
5664 | 5753 | |
---|
| 5754 | + if (netif_receive_oob_list(head)) |
---|
| 5755 | + return; |
---|
5665 | 5756 | if (list_empty(head)) |
---|
5666 | 5757 | return; |
---|
5667 | 5758 | if (trace_netif_receive_skb_list_entry_enabled()) { |
---|
.. | .. |
---|
6152 | 6243 | { |
---|
6153 | 6244 | gro_result_t ret; |
---|
6154 | 6245 | |
---|
| 6246 | + if (netif_receive_oob(skb)) |
---|
| 6247 | + return GRO_NORMAL; |
---|
| 6248 | + |
---|
6155 | 6249 | skb_mark_napi_id(skb, napi); |
---|
6156 | 6250 | trace_napi_gro_receive_entry(skb); |
---|
6157 | 6251 | |
---|
.. | .. |
---|
6489 | 6583 | unsigned long flags, val, new, timeout = 0; |
---|
6490 | 6584 | bool ret = true; |
---|
6491 | 6585 | |
---|
| 6586 | + napi_complete_oob(n); |
---|
| 6587 | + |
---|
6492 | 6588 | /* |
---|
6493 | 6589 | * 1) Don't let napi dequeue from the cpu poll list |
---|
6494 | 6590 | * just in case its running on a different cpu. |
---|