.. | .. |
---|
646 | 646 | int page_off, |
---|
647 | 647 | unsigned int *len) |
---|
648 | 648 | { |
---|
649 | | - struct page *page = alloc_page(GFP_ATOMIC); |
---|
| 649 | + int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
---|
| 650 | + struct page *page; |
---|
650 | 651 | |
---|
| 652 | + if (page_off + *len + tailroom > PAGE_SIZE) |
---|
| 653 | + return NULL; |
---|
| 654 | + |
---|
| 655 | + page = alloc_page(GFP_ATOMIC); |
---|
651 | 656 | if (!page) |
---|
652 | 657 | return NULL; |
---|
653 | 658 | |
---|
.. | .. |
---|
655 | 660 | page_off += *len; |
---|
656 | 661 | |
---|
657 | 662 | while (--*num_buf) { |
---|
658 | | - int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
---|
659 | 663 | unsigned int buflen; |
---|
660 | 664 | void *buf; |
---|
661 | 665 | int off; |
---|
.. | .. |
---|
1525 | 1529 | |
---|
1526 | 1530 | received = virtnet_receive(rq, budget, &xdp_xmit); |
---|
1527 | 1531 | |
---|
| 1532 | + if (xdp_xmit & VIRTIO_XDP_REDIR) |
---|
| 1533 | + xdp_do_flush(); |
---|
| 1534 | + |
---|
1528 | 1535 | /* Out of packets? */ |
---|
1529 | 1536 | if (received < budget) |
---|
1530 | 1537 | virtqueue_napi_complete(napi, rq->vq, received); |
---|
1531 | | - |
---|
1532 | | - if (xdp_xmit & VIRTIO_XDP_REDIR) |
---|
1533 | | - xdp_do_flush(); |
---|
1534 | 1538 | |
---|
1535 | 1539 | if (xdp_xmit & VIRTIO_XDP_TX) { |
---|
1536 | 1540 | sq = virtnet_xdp_get_sq(vi); |
---|
.. | .. |
---|
1928 | 1932 | cancel_delayed_work_sync(&vi->refill); |
---|
1929 | 1933 | |
---|
1930 | 1934 | for (i = 0; i < vi->max_queue_pairs; i++) { |
---|
1931 | | - xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); |
---|
1932 | 1935 | napi_disable(&vi->rq[i].napi); |
---|
| 1936 | + xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); |
---|
1933 | 1937 | virtnet_napi_tx_disable(&vi->sq[i].napi); |
---|
1934 | 1938 | } |
---|
1935 | 1939 | |
---|
.. | .. |
---|
2746 | 2750 | put_page(vi->rq[i].alloc_frag.page); |
---|
2747 | 2751 | } |
---|
2748 | 2752 | |
---|
| 2753 | +static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf) |
---|
| 2754 | +{ |
---|
| 2755 | + if (!is_xdp_frame(buf)) |
---|
| 2756 | + dev_kfree_skb(buf); |
---|
| 2757 | + else |
---|
| 2758 | + xdp_return_frame(ptr_to_xdp(buf)); |
---|
| 2759 | +} |
---|
| 2760 | + |
---|
| 2761 | +static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf) |
---|
| 2762 | +{ |
---|
| 2763 | + struct virtnet_info *vi = vq->vdev->priv; |
---|
| 2764 | + int i = vq2rxq(vq); |
---|
| 2765 | + |
---|
| 2766 | + if (vi->mergeable_rx_bufs) |
---|
| 2767 | + put_page(virt_to_head_page(buf)); |
---|
| 2768 | + else if (vi->big_packets) |
---|
| 2769 | + give_pages(&vi->rq[i], buf); |
---|
| 2770 | + else |
---|
| 2771 | + put_page(virt_to_head_page(buf)); |
---|
| 2772 | +} |
---|
| 2773 | + |
---|
2749 | 2774 | static void free_unused_bufs(struct virtnet_info *vi) |
---|
2750 | 2775 | { |
---|
2751 | 2776 | void *buf; |
---|
.. | .. |
---|
2753 | 2778 | |
---|
2754 | 2779 | for (i = 0; i < vi->max_queue_pairs; i++) { |
---|
2755 | 2780 | struct virtqueue *vq = vi->sq[i].vq; |
---|
2756 | | - while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { |
---|
2757 | | - if (!is_xdp_frame(buf)) |
---|
2758 | | - dev_kfree_skb(buf); |
---|
2759 | | - else |
---|
2760 | | - xdp_return_frame(ptr_to_xdp(buf)); |
---|
2761 | | - } |
---|
| 2781 | + while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) |
---|
| 2782 | + virtnet_sq_free_unused_buf(vq, buf); |
---|
| 2783 | + cond_resched(); |
---|
2762 | 2784 | } |
---|
2763 | 2785 | |
---|
2764 | 2786 | for (i = 0; i < vi->max_queue_pairs; i++) { |
---|
2765 | 2787 | struct virtqueue *vq = vi->rq[i].vq; |
---|
2766 | | - |
---|
2767 | | - while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { |
---|
2768 | | - if (vi->mergeable_rx_bufs) { |
---|
2769 | | - put_page(virt_to_head_page(buf)); |
---|
2770 | | - } else if (vi->big_packets) { |
---|
2771 | | - give_pages(&vi->rq[i], buf); |
---|
2772 | | - } else { |
---|
2773 | | - put_page(virt_to_head_page(buf)); |
---|
2774 | | - } |
---|
2775 | | - } |
---|
| 2788 | + while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) |
---|
| 2789 | + virtnet_rq_free_unused_buf(vq, buf); |
---|
| 2790 | + cond_resched(); |
---|
2776 | 2791 | } |
---|
2777 | 2792 | } |
---|
2778 | 2793 | |
---|
.. | .. |
---|
3220 | 3235 | |
---|
3221 | 3236 | virtio_device_ready(vdev); |
---|
3222 | 3237 | |
---|
| 3238 | + _virtnet_set_queues(vi, vi->curr_queue_pairs); |
---|
| 3239 | + |
---|
3223 | 3240 | rtnl_unlock(); |
---|
3224 | 3241 | |
---|
3225 | 3242 | err = virtnet_cpu_notif_add(vi); |
---|
.. | .. |
---|
3227 | 3244 | pr_debug("virtio_net: registering cpu notifier failed\n"); |
---|
3228 | 3245 | goto free_unregister_netdev; |
---|
3229 | 3246 | } |
---|
3230 | | - |
---|
3231 | | - virtnet_set_queues(vi, vi->curr_queue_pairs); |
---|
3232 | 3247 | |
---|
3233 | 3248 | /* Assume link up if device can't report link status, |
---|
3234 | 3249 | otherwise get link status from config. */ |
---|