hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/net/core/dev.c
....@@ -222,14 +222,14 @@
222222 static inline void rps_lock(struct softnet_data *sd)
223223 {
224224 #ifdef CONFIG_RPS
225
- raw_spin_lock(&sd->input_pkt_queue.raw_lock);
225
+ spin_lock(&sd->input_pkt_queue.lock);
226226 #endif
227227 }
228228
229229 static inline void rps_unlock(struct softnet_data *sd)
230230 {
231231 #ifdef CONFIG_RPS
232
- raw_spin_unlock(&sd->input_pkt_queue.raw_lock);
232
+ spin_unlock(&sd->input_pkt_queue.lock);
233233 #endif
234234 }
235235
....@@ -2635,6 +2635,8 @@
26352635 bool active = false;
26362636 unsigned int nr_ids;
26372637
2638
+ WARN_ON_ONCE(index >= dev->num_tx_queues);
2639
+
26382640 if (dev->num_tc) {
26392641 /* Do not allow XPS on subordinate device directly */
26402642 num_tc = dev->num_tc;
....@@ -3055,7 +3057,6 @@
30553057 sd->output_queue_tailp = &q->next_sched;
30563058 raise_softirq_irqoff(NET_TX_SOFTIRQ);
30573059 local_irq_restore(flags);
3058
- preempt_check_resched_rt();
30593060 }
30603061
30613062 void __netif_schedule(struct Qdisc *q)
....@@ -3118,7 +3119,6 @@
31183119 __this_cpu_write(softnet_data.completion_queue, skb);
31193120 raise_softirq_irqoff(NET_TX_SOFTIRQ);
31203121 local_irq_restore(flags);
3121
- preempt_check_resched_rt();
31223122 }
31233123 EXPORT_SYMBOL(__dev_kfree_skb_irq);
31243124
....@@ -3126,8 +3126,10 @@
31263126 {
31273127 if (in_irq() || irqs_disabled())
31283128 __dev_kfree_skb_irq(skb, reason);
3129
+ else if (unlikely(reason == SKB_REASON_DROPPED))
3130
+ kfree_skb(skb);
31293131 else
3130
- dev_kfree_skb(skb);
3132
+ consume_skb(skb);
31313133 }
31323134 EXPORT_SYMBOL(__dev_kfree_skb_any);
31333135
....@@ -3325,7 +3327,7 @@
33253327 type = eth->h_proto;
33263328 }
33273329
3328
- return __vlan_get_protocol(skb, type, depth);
3330
+ return vlan_get_protocol_and_depth(skb, type, depth);
33293331 }
33303332
33313333 /**
....@@ -3638,7 +3640,7 @@
36383640 int skb_csum_hwoffload_help(struct sk_buff *skb,
36393641 const netdev_features_t features)
36403642 {
3641
- if (unlikely(skb->csum_not_inet))
3643
+ if (unlikely(skb_csum_is_sctp(skb)))
36423644 return !!(features & NETIF_F_SCTP_CRC) ? 0 :
36433645 skb_crc32c_csum_help(skb);
36443646
....@@ -3797,11 +3799,7 @@
37973799 * This permits qdisc->running owner to get the lock more
37983800 * often and dequeue packets faster.
37993801 */
3800
-#ifdef CONFIG_PREEMPT_RT
3801
- contended = true;
3802
-#else
38033802 contended = qdisc_is_running(q);
3804
-#endif
38053803 if (unlikely(contended))
38063804 spin_lock(&q->busylock);
38073805
....@@ -4392,8 +4390,10 @@
43924390 u32 next_cpu;
43934391 u32 ident;
43944392
4395
- /* First check into global flow table if there is a match */
4396
- ident = sock_flow_table->ents[hash & sock_flow_table->mask];
4393
+ /* First check into global flow table if there is a match.
4394
+ * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
4395
+ */
4396
+ ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
43974397 if ((ident ^ hash) & ~rps_cpu_mask)
43984398 goto try_rps;
43994399
....@@ -4601,7 +4601,6 @@
46014601 rps_unlock(sd);
46024602
46034603 local_irq_restore(flags);
4604
- preempt_check_resched_rt();
46054604
46064605 atomic_long_inc(&skb->dev->rx_dropped);
46074606 kfree_skb(skb);
....@@ -4817,7 +4816,7 @@
48174816 struct rps_dev_flow voidflow, *rflow = &voidflow;
48184817 int cpu;
48194818
4820
- migrate_disable();
4819
+ preempt_disable();
48214820 rcu_read_lock();
48224821
48234822 cpu = get_rps_cpu(skb->dev, skb, &rflow);
....@@ -4827,14 +4826,14 @@
48274826 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
48284827
48294828 rcu_read_unlock();
4830
- migrate_enable();
4829
+ preempt_enable();
48314830 } else
48324831 #endif
48334832 {
48344833 unsigned int qtail;
48354834
4836
- ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail);
4837
- put_cpu_light();
4835
+ ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
4836
+ put_cpu();
48384837 }
48394838 return ret;
48404839 }
....@@ -4873,9 +4872,11 @@
48734872
48744873 trace_netif_rx_ni_entry(skb);
48754874
4876
- local_bh_disable();
4875
+ preempt_disable();
48774876 err = netif_rx_internal(skb);
4878
- local_bh_enable();
4877
+ if (local_softirq_pending())
4878
+ do_softirq();
4879
+ preempt_enable();
48794880 trace_netif_rx_ni_exit(err);
48804881
48814882 return err;
....@@ -6119,6 +6120,7 @@
61196120
61206121 static void napi_skb_free_stolen_head(struct sk_buff *skb)
61216122 {
6123
+ nf_reset_ct(skb);
61226124 skb_dst_drop(skb);
61236125 skb_ext_put(skb);
61246126 kmem_cache_free(skbuff_head_cache, skb);
....@@ -6351,14 +6353,12 @@
63516353 sd->rps_ipi_list = NULL;
63526354
63536355 local_irq_enable();
6354
- preempt_check_resched_rt();
63556356
63566357 /* Send pending IPI's to kick RPS processing on remote cpus. */
63576358 net_rps_send_ipi(remsd);
63586359 } else
63596360 #endif
63606361 local_irq_enable();
6361
- preempt_check_resched_rt();
63626362 }
63636363
63646364 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
....@@ -6436,7 +6436,6 @@
64366436 local_irq_save(flags);
64376437 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
64386438 local_irq_restore(flags);
6439
- preempt_check_resched_rt();
64406439 }
64416440 EXPORT_SYMBOL(__napi_schedule);
64426441
....@@ -10306,9 +10305,7 @@
1030610305 BUG_ON(!list_empty(&dev->ptype_specific));
1030710306 WARN_ON(rcu_access_pointer(dev->ip_ptr));
1030810307 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
10309
-#if IS_ENABLED(CONFIG_DECNET)
10310
- WARN_ON(dev->dn_ptr);
10311
-#endif
10308
+
1031210309 if (dev->priv_destructor)
1031310310 dev->priv_destructor(dev);
1031410311 if (dev->needs_free_netdev)
....@@ -10978,7 +10975,6 @@
1097810975
1097910976 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1098010977 local_irq_enable();
10981
- preempt_check_resched_rt();
1098210978
1098310979 #ifdef CONFIG_RPS
1098410980 remsd = oldsd->rps_ipi_list;
....@@ -10992,7 +10988,7 @@
1099210988 netif_rx_ni(skb);
1099310989 input_queue_head_incr(oldsd);
1099410990 }
10995
- while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
10991
+ while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
1099610992 netif_rx_ni(skb);
1099710993 input_queue_head_incr(oldsd);
1099810994 }
....@@ -11308,7 +11304,7 @@
1130811304
1130911305 INIT_WORK(flush, flush_backlog);
1131011306
11311
- skb_queue_head_init_raw(&sd->input_pkt_queue);
11307
+ skb_queue_head_init(&sd->input_pkt_queue);
1131211308 skb_queue_head_init(&sd->process_queue);
1131311309 #ifdef CONFIG_XFRM_OFFLOAD
1131411310 skb_queue_head_init(&sd->xfrm_backlog);