hc
2023-11-20 69d6da3c1c63675524a25e7dc92a4f43c4164cef
kernel/net/core/dev.c
....@@ -219,14 +219,14 @@
219219 static inline void rps_lock(struct softnet_data *sd)
220220 {
221221 #ifdef CONFIG_RPS
222
- spin_lock(&sd->input_pkt_queue.lock);
222
+ raw_spin_lock(&sd->input_pkt_queue.raw_lock);
223223 #endif
224224 }
225225
226226 static inline void rps_unlock(struct softnet_data *sd)
227227 {
228228 #ifdef CONFIG_RPS
229
- spin_unlock(&sd->input_pkt_queue.lock);
229
+ raw_spin_unlock(&sd->input_pkt_queue.raw_lock);
230230 #endif
231231 }
232232
....@@ -2723,6 +2723,7 @@
27232723 sd->output_queue_tailp = &q->next_sched;
27242724 raise_softirq_irqoff(NET_TX_SOFTIRQ);
27252725 local_irq_restore(flags);
2726
+ preempt_check_resched_rt();
27262727 }
27272728
27282729 void __netif_schedule(struct Qdisc *q)
....@@ -2785,6 +2786,7 @@
27852786 __this_cpu_write(softnet_data.completion_queue, skb);
27862787 raise_softirq_irqoff(NET_TX_SOFTIRQ);
27872788 local_irq_restore(flags);
2789
+ preempt_check_resched_rt();
27882790 }
27892791 EXPORT_SYMBOL(__dev_kfree_skb_irq);
27902792
....@@ -3468,7 +3470,11 @@
34683470 * This permits qdisc->running owner to get the lock more
34693471 * often and dequeue packets faster.
34703472 */
3473
+#ifdef CONFIG_PREEMPT_RT_FULL
3474
+ contended = true;
3475
+#else
34713476 contended = qdisc_is_running(q);
3477
+#endif
34723478 if (unlikely(contended))
34733479 spin_lock(&q->busylock);
34743480
....@@ -3829,10 +3835,14 @@
38293835 if (dev->flags & IFF_UP) {
38303836 int cpu = smp_processor_id(); /* ok because BHs are off */
38313837
3838
+#ifdef CONFIG_PREEMPT_RT_FULL
3839
+ if (READ_ONCE(txq->xmit_lock_owner) != current) {
3840
+#else
38323841 /* Other cpus might concurrently change txq->xmit_lock_owner
38333842 * to -1 or to their cpu id, but not to our id.
38343843 */
38353844 if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
3845
+#endif
38363846 if (dev_xmit_recursion())
38373847 goto recursion_alert;
38383848
....@@ -4267,6 +4277,7 @@
42674277 rps_unlock(sd);
42684278
42694279 local_irq_restore(flags);
4280
+ preempt_check_resched_rt();
42704281
42714282 atomic_long_inc(&skb->dev->rx_dropped);
42724283 kfree_skb(skb);
....@@ -4481,7 +4492,7 @@
44814492 struct rps_dev_flow voidflow, *rflow = &voidflow;
44824493 int cpu;
44834494
4484
- preempt_disable();
4495
+ migrate_disable();
44854496 rcu_read_lock();
44864497
44874498 cpu = get_rps_cpu(skb->dev, skb, &rflow);
....@@ -4491,14 +4502,14 @@
44914502 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
44924503
44934504 rcu_read_unlock();
4494
- preempt_enable();
4505
+ migrate_enable();
44954506 } else
44964507 #endif
44974508 {
44984509 unsigned int qtail;
44994510
4500
- ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
4501
- put_cpu();
4511
+ ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail);
4512
+ put_cpu_light();
45024513 }
45034514 return ret;
45044515 }
....@@ -4532,11 +4543,9 @@
45324543
45334544 trace_netif_rx_ni_entry(skb);
45344545
4535
- preempt_disable();
4546
+ local_bh_disable();
45364547 err = netif_rx_internal(skb);
4537
- if (local_softirq_pending())
4538
- do_softirq();
4539
- preempt_enable();
4548
+ local_bh_enable();
45404549
45414550 return err;
45424551 }
....@@ -5818,12 +5827,14 @@
58185827 sd->rps_ipi_list = NULL;
58195828
58205829 local_irq_enable();
5830
+ preempt_check_resched_rt();
58215831
58225832 /* Send pending IPI's to kick RPS processing on remote cpus. */
58235833 net_rps_send_ipi(remsd);
58245834 } else
58255835 #endif
58265836 local_irq_enable();
5837
+ preempt_check_resched_rt();
58275838 }
58285839
58295840 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
....@@ -5853,16 +5864,18 @@
58535864 while (again) {
58545865 struct sk_buff *skb;
58555866
5867
+ local_irq_disable();
58565868 while ((skb = __skb_dequeue(&sd->process_queue))) {
5869
+ local_irq_enable();
58575870 rcu_read_lock();
58585871 __netif_receive_skb(skb);
58595872 rcu_read_unlock();
58605873 input_queue_head_incr(sd);
58615874 if (++work >= quota)
58625875 goto state_changed;
5876
+ local_irq_disable();
58635877 }
58645878
5865
- local_irq_disable();
58665879 rps_lock(sd);
58675880 if (skb_queue_empty(&sd->input_pkt_queue)) {
58685881 /*
....@@ -5904,6 +5917,7 @@
59045917 local_irq_save(flags);
59055918 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
59065919 local_irq_restore(flags);
5920
+ preempt_check_resched_rt();
59075921 }
59085922 EXPORT_SYMBOL(__napi_schedule);
59095923
....@@ -5940,6 +5954,7 @@
59405954 }
59415955 EXPORT_SYMBOL(napi_schedule_prep);
59425956
5957
+#ifndef CONFIG_PREEMPT_RT_FULL
59435958 /**
59445959 * __napi_schedule_irqoff - schedule for receive
59455960 * @n: entry to schedule
....@@ -5958,6 +5973,7 @@
59585973 __napi_schedule(n);
59595974 }
59605975 EXPORT_SYMBOL(__napi_schedule_irqoff);
5976
+#endif
59615977
59625978 bool napi_complete_done(struct napi_struct *n, int work_done)
59635979 {
....@@ -6352,12 +6368,20 @@
63526368 unsigned long time_limit = jiffies +
63536369 usecs_to_jiffies(netdev_budget_usecs);
63546370 int budget = netdev_budget;
6371
+ struct sk_buff_head tofree_q;
6372
+ struct sk_buff *skb;
63556373 LIST_HEAD(list);
63566374 LIST_HEAD(repoll);
63576375
6376
+ __skb_queue_head_init(&tofree_q);
6377
+
63586378 local_irq_disable();
6379
+ skb_queue_splice_init(&sd->tofree_queue, &tofree_q);
63596380 list_splice_init(&sd->poll_list, &list);
63606381 local_irq_enable();
6382
+
6383
+ while ((skb = __skb_dequeue(&tofree_q)))
6384
+ kfree_skb(skb);
63616385
63626386 for (;;) {
63636387 struct napi_struct *n;
....@@ -6388,7 +6412,7 @@
63886412 list_splice_tail(&repoll, &list);
63896413 list_splice(&list, &sd->poll_list);
63906414 if (!list_empty(&sd->poll_list))
6391
- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
6415
+ __raise_softirq_irqoff_ksoft(NET_RX_SOFTIRQ);
63926416
63936417 net_rps_action_and_irq_enable(sd);
63946418 out:
....@@ -8595,7 +8619,7 @@
85958619 /* Initialize queue lock */
85968620 spin_lock_init(&queue->_xmit_lock);
85978621 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
8598
- queue->xmit_lock_owner = -1;
8622
+ netdev_queue_clear_owner(queue);
85998623 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
86008624 queue->dev = dev;
86018625 #ifdef CONFIG_BQL
....@@ -9542,6 +9566,7 @@
95429566
95439567 raise_softirq_irqoff(NET_TX_SOFTIRQ);
95449568 local_irq_enable();
9569
+ preempt_check_resched_rt();
95459570
95469571 #ifdef CONFIG_RPS
95479572 remsd = oldsd->rps_ipi_list;
....@@ -9555,9 +9580,12 @@
95559580 netif_rx_ni(skb);
95569581 input_queue_head_incr(oldsd);
95579582 }
9558
- while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
9583
+ while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
95599584 netif_rx_ni(skb);
95609585 input_queue_head_incr(oldsd);
9586
+ }
9587
+ while ((skb = __skb_dequeue(&oldsd->tofree_queue))) {
9588
+ kfree_skb(skb);
95619589 }
95629590
95639591 return 0;
....@@ -9869,8 +9897,9 @@
98699897
98709898 INIT_WORK(flush, flush_backlog);
98719899
9872
- skb_queue_head_init(&sd->input_pkt_queue);
9873
- skb_queue_head_init(&sd->process_queue);
9900
+ skb_queue_head_init_raw(&sd->input_pkt_queue);
9901
+ skb_queue_head_init_raw(&sd->process_queue);
9902
+ skb_queue_head_init_raw(&sd->tofree_queue);
98749903 #ifdef CONFIG_XFRM_OFFLOAD
98759904 skb_queue_head_init(&sd->xfrm_backlog);
98769905 #endif