hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/net/xen-netback/interface.c
....@@ -70,7 +70,7 @@
7070 wake_up(&queue->dealloc_wq);
7171 }
7272
73
-int xenvif_schedulable(struct xenvif *vif)
73
+static int xenvif_schedulable(struct xenvif *vif)
7474 {
7575 return netif_running(vif->dev) &&
7676 test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
....@@ -178,23 +178,8 @@
178178 return IRQ_HANDLED;
179179 }
180180
181
-int xenvif_queue_stopped(struct xenvif_queue *queue)
182
-{
183
- struct net_device *dev = queue->vif->dev;
184
- unsigned int id = queue->id;
185
- return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
186
-}
187
-
188
-void xenvif_wake_queue(struct xenvif_queue *queue)
189
-{
190
- struct net_device *dev = queue->vif->dev;
191
- unsigned int id = queue->id;
192
- netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
193
-}
194
-
195181 static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
196
- struct net_device *sb_dev,
197
- select_queue_fallback_t fallback)
182
+ struct net_device *sb_dev)
198183 {
199184 struct xenvif *vif = netdev_priv(dev);
200185 unsigned int size = vif->hash.size;
....@@ -207,7 +192,8 @@
207192 return 0;
208193
209194 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
210
- return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
195
+ return netdev_pick_tx(dev, skb, NULL) %
196
+ dev->real_num_tx_queues;
211197
212198 xenvif_set_skb_hash(vif, skb);
213199
....@@ -269,14 +255,16 @@
269255 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
270256 skb_clear_hash(skb);
271257
272
- xenvif_rx_queue_tail(queue, skb);
258
+ if (!xenvif_rx_queue_tail(queue, skb))
259
+ goto drop;
260
+
273261 xenvif_kick_thread(queue);
274262
275263 return NETDEV_TX_OK;
276264
277265 drop:
278266 vif->dev->stats.tx_dropped++;
279
- dev_kfree_skb(skb);
267
+ dev_kfree_skb_any(skb);
280268 return NETDEV_TX_OK;
281269 }
282270
....@@ -528,6 +516,8 @@
528516 vif->queues = NULL;
529517 vif->num_queues = 0;
530518
519
+ vif->xdp_headroom = 0;
520
+
531521 spin_lock_init(&vif->lock);
532522 INIT_LIST_HEAD(&vif->fe_mcast_addr);
533523
....@@ -630,6 +620,7 @@
630620 struct net_device *dev = vif->dev;
631621 void *addr;
632622 struct xen_netif_ctrl_sring *shared;
623
+ RING_IDX rsp_prod, req_prod;
633624 int err;
634625
635626 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
....@@ -638,7 +629,14 @@
638629 goto err;
639630
640631 shared = (struct xen_netif_ctrl_sring *)addr;
641
- BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE);
632
+ rsp_prod = READ_ONCE(shared->rsp_prod);
633
+ req_prod = READ_ONCE(shared->req_prod);
634
+
635
+ BACK_RING_ATTACH(&vif->ctrl, shared, rsp_prod, XEN_PAGE_SIZE);
636
+
637
+ err = -EIO;
638
+ if (req_prod - rsp_prod > RING_SIZE(&vif->ctrl))
639
+ goto err_unmap;
642640
643641 err = bind_interdomain_evtchn_to_irq_lateeoi(vif->domid, evtchn);
644642 if (err < 0)
....@@ -671,6 +669,39 @@
671669 return err;
672670 }
673671
672
+static void xenvif_disconnect_queue(struct xenvif_queue *queue)
673
+{
674
+ if (queue->task) {
675
+ kthread_stop(queue->task);
676
+ put_task_struct(queue->task);
677
+ queue->task = NULL;
678
+ }
679
+
680
+ if (queue->dealloc_task) {
681
+ kthread_stop(queue->dealloc_task);
682
+ queue->dealloc_task = NULL;
683
+ }
684
+
685
+ if (queue->napi.poll) {
686
+ netif_napi_del(&queue->napi);
687
+ queue->napi.poll = NULL;
688
+ }
689
+
690
+ if (queue->tx_irq) {
691
+ unbind_from_irqhandler(queue->tx_irq, queue);
692
+ if (queue->tx_irq == queue->rx_irq)
693
+ queue->rx_irq = 0;
694
+ queue->tx_irq = 0;
695
+ }
696
+
697
+ if (queue->rx_irq) {
698
+ unbind_from_irqhandler(queue->rx_irq, queue);
699
+ queue->rx_irq = 0;
700
+ }
701
+
702
+ xenvif_unmap_frontend_data_rings(queue);
703
+}
704
+
674705 int xenvif_connect_data(struct xenvif_queue *queue,
675706 unsigned long tx_ring_ref,
676707 unsigned long rx_ring_ref,
....@@ -678,7 +709,7 @@
678709 unsigned int rx_evtchn)
679710 {
680711 struct task_struct *task;
681
- int err = -ENOMEM;
712
+ int err;
682713
683714 BUG_ON(queue->tx_irq);
684715 BUG_ON(queue->task);
....@@ -696,13 +727,32 @@
696727 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
697728 XENVIF_NAPI_WEIGHT);
698729
730
+ queue->stalled = true;
731
+
732
+ task = kthread_run(xenvif_kthread_guest_rx, queue,
733
+ "%s-guest-rx", queue->name);
734
+ if (IS_ERR(task))
735
+ goto kthread_err;
736
+ queue->task = task;
737
+ /*
738
+ * Take a reference to the task in order to prevent it from being freed
739
+ * if the thread function returns before kthread_stop is called.
740
+ */
741
+ get_task_struct(task);
742
+
743
+ task = kthread_run(xenvif_dealloc_kthread, queue,
744
+ "%s-dealloc", queue->name);
745
+ if (IS_ERR(task))
746
+ goto kthread_err;
747
+ queue->dealloc_task = task;
748
+
699749 if (tx_evtchn == rx_evtchn) {
700750 /* feature-split-event-channels == 0 */
701751 err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
702752 queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
703753 queue->name, queue);
704754 if (err < 0)
705
- goto err_unmap;
755
+ goto err;
706756 queue->tx_irq = queue->rx_irq = err;
707757 disable_irq(queue->tx_irq);
708758 } else {
....@@ -713,7 +763,7 @@
713763 queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
714764 queue->tx_irq_name, queue);
715765 if (err < 0)
716
- goto err_unmap;
766
+ goto err;
717767 queue->tx_irq = err;
718768 disable_irq(queue->tx_irq);
719769
....@@ -723,47 +773,18 @@
723773 queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
724774 queue->rx_irq_name, queue);
725775 if (err < 0)
726
- goto err_tx_unbind;
776
+ goto err;
727777 queue->rx_irq = err;
728778 disable_irq(queue->rx_irq);
729779 }
730780
731
- queue->stalled = true;
732
-
733
- task = kthread_create(xenvif_kthread_guest_rx,
734
- (void *)queue, "%s-guest-rx", queue->name);
735
- if (IS_ERR(task)) {
736
- pr_warn("Could not allocate kthread for %s\n", queue->name);
737
- err = PTR_ERR(task);
738
- goto err_rx_unbind;
739
- }
740
- queue->task = task;
741
- get_task_struct(task);
742
-
743
- task = kthread_create(xenvif_dealloc_kthread,
744
- (void *)queue, "%s-dealloc", queue->name);
745
- if (IS_ERR(task)) {
746
- pr_warn("Could not allocate kthread for %s\n", queue->name);
747
- err = PTR_ERR(task);
748
- goto err_rx_unbind;
749
- }
750
- queue->dealloc_task = task;
751
-
752
- wake_up_process(queue->task);
753
- wake_up_process(queue->dealloc_task);
754
-
755781 return 0;
756782
757
-err_rx_unbind:
758
- unbind_from_irqhandler(queue->rx_irq, queue);
759
- queue->rx_irq = 0;
760
-err_tx_unbind:
761
- unbind_from_irqhandler(queue->tx_irq, queue);
762
- queue->tx_irq = 0;
763
-err_unmap:
764
- xenvif_unmap_frontend_data_rings(queue);
765
- netif_napi_del(&queue->napi);
783
+kthread_err:
784
+ pr_warn("Could not allocate kthread for %s\n", queue->name);
785
+ err = PTR_ERR(task);
766786 err:
787
+ xenvif_disconnect_queue(queue);
767788 return err;
768789 }
769790
....@@ -791,30 +812,7 @@
791812 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
792813 queue = &vif->queues[queue_index];
793814
794
- netif_napi_del(&queue->napi);
795
-
796
- if (queue->task) {
797
- kthread_stop(queue->task);
798
- put_task_struct(queue->task);
799
- queue->task = NULL;
800
- }
801
-
802
- if (queue->dealloc_task) {
803
- kthread_stop(queue->dealloc_task);
804
- queue->dealloc_task = NULL;
805
- }
806
-
807
- if (queue->tx_irq) {
808
- if (queue->tx_irq == queue->rx_irq)
809
- unbind_from_irqhandler(queue->tx_irq, queue);
810
- else {
811
- unbind_from_irqhandler(queue->tx_irq, queue);
812
- unbind_from_irqhandler(queue->rx_irq, queue);
813
- }
814
- queue->tx_irq = 0;
815
- }
816
-
817
- xenvif_unmap_frontend_data_rings(queue);
815
+ xenvif_disconnect_queue(queue);
818816 }
819817
820818 xenvif_mcast_addr_list_free(vif);