From ea08eeccae9297f7aabd2ef7f0c2517ac4549acc Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Tue, 20 Feb 2024 01:18:26 +0000
Subject: [PATCH] write in 30M
---
kernel/net/core/dev.c | 54 +++++++++++++++++++++++++-----------------------------
1 files changed, 25 insertions(+), 29 deletions(-)
diff --git a/kernel/net/core/dev.c b/kernel/net/core/dev.c
index d3bc4e8..c34511b 100644
--- a/kernel/net/core/dev.c
+++ b/kernel/net/core/dev.c
@@ -222,14 +222,14 @@
static inline void rps_lock(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
- raw_spin_lock(&sd->input_pkt_queue.raw_lock);
+ spin_lock(&sd->input_pkt_queue.lock);
#endif
}
static inline void rps_unlock(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
- raw_spin_unlock(&sd->input_pkt_queue.raw_lock);
+ spin_unlock(&sd->input_pkt_queue.lock);
#endif
}
@@ -2635,6 +2635,8 @@
bool active = false;
unsigned int nr_ids;
+ WARN_ON_ONCE(index >= dev->num_tx_queues);
+
if (dev->num_tc) {
/* Do not allow XPS on subordinate device directly */
num_tc = dev->num_tc;
@@ -3055,7 +3057,6 @@
sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
- preempt_check_resched_rt();
}
void __netif_schedule(struct Qdisc *q)
@@ -3118,7 +3119,6 @@
__this_cpu_write(softnet_data.completion_queue, skb);
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
- preempt_check_resched_rt();
}
EXPORT_SYMBOL(__dev_kfree_skb_irq);
@@ -3126,8 +3126,10 @@
{
if (in_irq() || irqs_disabled())
__dev_kfree_skb_irq(skb, reason);
+ else if (unlikely(reason == SKB_REASON_DROPPED))
+ kfree_skb(skb);
else
- dev_kfree_skb(skb);
+ consume_skb(skb);
}
EXPORT_SYMBOL(__dev_kfree_skb_any);
@@ -3325,7 +3327,7 @@
type = eth->h_proto;
}
- return __vlan_get_protocol(skb, type, depth);
+ return vlan_get_protocol_and_depth(skb, type, depth);
}
/**
@@ -3638,7 +3640,7 @@
int skb_csum_hwoffload_help(struct sk_buff *skb,
const netdev_features_t features)
{
- if (unlikely(skb->csum_not_inet))
+ if (unlikely(skb_csum_is_sctp(skb)))
return !!(features & NETIF_F_SCTP_CRC) ? 0 :
skb_crc32c_csum_help(skb);
@@ -3797,11 +3799,7 @@
* This permits qdisc->running owner to get the lock more
* often and dequeue packets faster.
*/
-#ifdef CONFIG_PREEMPT_RT
- contended = true;
-#else
contended = qdisc_is_running(q);
-#endif
if (unlikely(contended))
spin_lock(&q->busylock);
@@ -4392,8 +4390,10 @@
u32 next_cpu;
u32 ident;
- /* First check into global flow table if there is a match */
- ident = sock_flow_table->ents[hash & sock_flow_table->mask];
+ /* First check into global flow table if there is a match.
+ * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
+ */
+ ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
if ((ident ^ hash) & ~rps_cpu_mask)
goto try_rps;
@@ -4601,7 +4601,6 @@
rps_unlock(sd);
local_irq_restore(flags);
- preempt_check_resched_rt();
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
@@ -4817,7 +4816,7 @@
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
- migrate_disable();
+ preempt_disable();
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
@@ -4827,14 +4826,14 @@
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
- migrate_enable();
+ preempt_enable();
} else
#endif
{
unsigned int qtail;
- ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail);
- put_cpu_light();
+ ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
+ put_cpu();
}
return ret;
}
@@ -4873,9 +4872,11 @@
trace_netif_rx_ni_entry(skb);
- local_bh_disable();
+ preempt_disable();
err = netif_rx_internal(skb);
- local_bh_enable();
+ if (local_softirq_pending())
+ do_softirq();
+ preempt_enable();
trace_netif_rx_ni_exit(err);
return err;
@@ -6119,6 +6120,7 @@
static void napi_skb_free_stolen_head(struct sk_buff *skb)
{
+ nf_reset_ct(skb);
skb_dst_drop(skb);
skb_ext_put(skb);
kmem_cache_free(skbuff_head_cache, skb);
@@ -6351,14 +6353,12 @@
sd->rps_ipi_list = NULL;
local_irq_enable();
- preempt_check_resched_rt();
/* Send pending IPI's to kick RPS processing on remote cpus. */
net_rps_send_ipi(remsd);
} else
#endif
local_irq_enable();
- preempt_check_resched_rt();
}
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
@@ -6436,7 +6436,6 @@
local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
- preempt_check_resched_rt();
}
EXPORT_SYMBOL(__napi_schedule);
@@ -10306,9 +10305,7 @@
BUG_ON(!list_empty(&dev->ptype_specific));
WARN_ON(rcu_access_pointer(dev->ip_ptr));
WARN_ON(rcu_access_pointer(dev->ip6_ptr));
-#if IS_ENABLED(CONFIG_DECNET)
- WARN_ON(dev->dn_ptr);
-#endif
+
if (dev->priv_destructor)
dev->priv_destructor(dev);
if (dev->needs_free_netdev)
@@ -10978,7 +10975,6 @@
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
- preempt_check_resched_rt();
#ifdef CONFIG_RPS
remsd = oldsd->rps_ipi_list;
@@ -10992,7 +10988,7 @@
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
- while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
+ while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
@@ -11308,7 +11304,7 @@
INIT_WORK(flush, flush_backlog);
- skb_queue_head_init_raw(&sd->input_pkt_queue);
+ skb_queue_head_init(&sd->input_pkt_queue);
skb_queue_head_init(&sd->process_queue);
#ifdef CONFIG_XFRM_OFFLOAD
skb_queue_head_init(&sd->xfrm_backlog);
--
Gitblit v1.6.2