hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/net/core/dev.c
....@@ -222,14 +222,14 @@
222222 static inline void rps_lock(struct softnet_data *sd)
223223 {
224224 #ifdef CONFIG_RPS
225
- raw_spin_lock(&sd->input_pkt_queue.raw_lock);
225
+ spin_lock(&sd->input_pkt_queue.lock);
226226 #endif
227227 }
228228
229229 static inline void rps_unlock(struct softnet_data *sd)
230230 {
231231 #ifdef CONFIG_RPS
232
- raw_spin_unlock(&sd->input_pkt_queue.raw_lock);
232
+ spin_unlock(&sd->input_pkt_queue.lock);
233233 #endif
234234 }
235235
....@@ -3055,7 +3055,6 @@
30553055 sd->output_queue_tailp = &q->next_sched;
30563056 raise_softirq_irqoff(NET_TX_SOFTIRQ);
30573057 local_irq_restore(flags);
3058
- preempt_check_resched_rt();
30593058 }
30603059
30613060 void __netif_schedule(struct Qdisc *q)
....@@ -3118,7 +3117,6 @@
31183117 __this_cpu_write(softnet_data.completion_queue, skb);
31193118 raise_softirq_irqoff(NET_TX_SOFTIRQ);
31203119 local_irq_restore(flags);
3121
- preempt_check_resched_rt();
31223120 }
31233121 EXPORT_SYMBOL(__dev_kfree_skb_irq);
31243122
....@@ -3797,11 +3795,7 @@
37973795 * This permits qdisc->running owner to get the lock more
37983796 * often and dequeue packets faster.
37993797 */
3800
-#ifdef CONFIG_PREEMPT_RT
3801
- contended = true;
3802
-#else
38033798 contended = qdisc_is_running(q);
3804
-#endif
38053799 if (unlikely(contended))
38063800 spin_lock(&q->busylock);
38073801
....@@ -4601,7 +4595,6 @@
46014595 rps_unlock(sd);
46024596
46034597 local_irq_restore(flags);
4604
- preempt_check_resched_rt();
46054598
46064599 atomic_long_inc(&skb->dev->rx_dropped);
46074600 kfree_skb(skb);
....@@ -4817,7 +4810,7 @@
48174810 struct rps_dev_flow voidflow, *rflow = &voidflow;
48184811 int cpu;
48194812
4820
- migrate_disable();
4813
+ preempt_disable();
48214814 rcu_read_lock();
48224815
48234816 cpu = get_rps_cpu(skb->dev, skb, &rflow);
....@@ -4827,14 +4820,14 @@
48274820 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
48284821
48294822 rcu_read_unlock();
4830
- migrate_enable();
4823
+ preempt_enable();
48314824 } else
48324825 #endif
48334826 {
48344827 unsigned int qtail;
48354828
4836
- ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail);
4837
- put_cpu_light();
4829
+ ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
4830
+ put_cpu();
48384831 }
48394832 return ret;
48404833 }
....@@ -4873,9 +4866,11 @@
48734866
48744867 trace_netif_rx_ni_entry(skb);
48754868
4876
- local_bh_disable();
4869
+ preempt_disable();
48774870 err = netif_rx_internal(skb);
4878
- local_bh_enable();
4871
+ if (local_softirq_pending())
4872
+ do_softirq();
4873
+ preempt_enable();
48794874 trace_netif_rx_ni_exit(err);
48804875
48814876 return err;
....@@ -6351,14 +6346,12 @@
63516346 sd->rps_ipi_list = NULL;
63526347
63536348 local_irq_enable();
6354
- preempt_check_resched_rt();
63556349
63566350 /* Send pending IPI's to kick RPS processing on remote cpus. */
63576351 net_rps_send_ipi(remsd);
63586352 } else
63596353 #endif
63606354 local_irq_enable();
6361
- preempt_check_resched_rt();
63626355 }
63636356
63646357 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
....@@ -6436,7 +6429,6 @@
64366429 local_irq_save(flags);
64376430 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
64386431 local_irq_restore(flags);
6439
- preempt_check_resched_rt();
64406432 }
64416433 EXPORT_SYMBOL(__napi_schedule);
64426434
....@@ -10978,7 +10970,6 @@
1097810970
1097910971 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1098010972 local_irq_enable();
10981
- preempt_check_resched_rt();
1098210973
1098310974 #ifdef CONFIG_RPS
1098410975 remsd = oldsd->rps_ipi_list;
....@@ -10992,7 +10983,7 @@
1099210983 netif_rx_ni(skb);
1099310984 input_queue_head_incr(oldsd);
1099410985 }
10995
- while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
10986
+ while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
1099610987 netif_rx_ni(skb);
1099710988 input_queue_head_incr(oldsd);
1099810989 }
....@@ -11308,7 +11299,7 @@
1130811299
1130911300 INIT_WORK(flush, flush_backlog);
1131011301
11311
- skb_queue_head_init_raw(&sd->input_pkt_queue);
11302
+ skb_queue_head_init(&sd->input_pkt_queue);
1131211303 skb_queue_head_init(&sd->process_queue);
1131311304 #ifdef CONFIG_XFRM_OFFLOAD
1131411305 skb_queue_head_init(&sd->xfrm_backlog);