.. | .. |
---|
832 | 832 | int xdp_count, int xdp_idx, |
---|
833 | 833 | int rxr_count, int rxr_idx) |
---|
834 | 834 | { |
---|
| 835 | + int node = dev_to_node(&adapter->pdev->dev); |
---|
835 | 836 | struct ixgbe_q_vector *q_vector; |
---|
836 | 837 | struct ixgbe_ring *ring; |
---|
837 | | - int node = NUMA_NO_NODE; |
---|
838 | 838 | int cpu = -1; |
---|
839 | | - int ring_count, size; |
---|
| 839 | + int ring_count; |
---|
840 | 840 | u8 tcs = adapter->hw_tcs; |
---|
841 | 841 | |
---|
842 | 842 | ring_count = txr_count + rxr_count + xdp_count; |
---|
843 | | - size = sizeof(struct ixgbe_q_vector) + |
---|
844 | | - (sizeof(struct ixgbe_ring) * ring_count); |
---|
845 | 843 | |
---|
846 | 844 | /* customize cpu for Flow Director mapping */ |
---|
847 | 845 | if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { |
---|
848 | 846 | u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; |
---|
849 | 847 | if (rss_i > 1 && adapter->atr_sample_rate) { |
---|
850 | | - if (cpu_online(v_idx)) { |
---|
851 | | - cpu = v_idx; |
---|
852 | | - node = cpu_to_node(cpu); |
---|
853 | | - } |
---|
| 848 | + cpu = cpumask_local_spread(v_idx, node); |
---|
| 849 | + node = cpu_to_node(cpu); |
---|
854 | 850 | } |
---|
855 | 851 | } |
---|
856 | 852 | |
---|
857 | 853 | /* allocate q_vector and rings */ |
---|
858 | | - q_vector = kzalloc_node(size, GFP_KERNEL, node); |
---|
| 854 | + q_vector = kzalloc_node(struct_size(q_vector, ring, ring_count), |
---|
| 855 | + GFP_KERNEL, node); |
---|
859 | 856 | if (!q_vector) |
---|
860 | | - q_vector = kzalloc(size, GFP_KERNEL); |
---|
| 857 | + q_vector = kzalloc(struct_size(q_vector, ring, ring_count), |
---|
| 858 | + GFP_KERNEL); |
---|
861 | 859 | if (!q_vector) |
---|
862 | 860 | return -ENOMEM; |
---|
863 | 861 | |
---|
.. | .. |
---|
1031 | 1029 | WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL); |
---|
1032 | 1030 | |
---|
1033 | 1031 | adapter->q_vector[v_idx] = NULL; |
---|
1034 | | - napi_hash_del(&q_vector->napi); |
---|
1035 | | - netif_napi_del(&q_vector->napi); |
---|
| 1032 | + __netif_napi_del(&q_vector->napi); |
---|
1036 | 1033 | |
---|
1037 | 1034 | /* |
---|
| 1035 | + * after a call to __netif_napi_del() napi may still be used and |
---|
1038 | 1036 | * ixgbe_get_stats64() might access the rings on this vector, |
---|
1039 | 1037 | * we must wait a grace period before freeing it. |
---|
1040 | 1038 | */ |
---|
.. | .. |
---|
1055 | 1053 | int txr_remaining = adapter->num_tx_queues; |
---|
1056 | 1054 | int xdp_remaining = adapter->num_xdp_queues; |
---|
1057 | 1055 | int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0; |
---|
1058 | | - int err; |
---|
| 1056 | + int err, i; |
---|
1059 | 1057 | |
---|
1060 | 1058 | /* only one q_vector if MSI-X is disabled. */ |
---|
1061 | 1059 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) |
---|
.. | .. |
---|
1097 | 1095 | xdp_idx += xqpv; |
---|
1098 | 1096 | } |
---|
1099 | 1097 | |
---|
| 1098 | + for (i = 0; i < adapter->num_rx_queues; i++) { |
---|
| 1099 | + if (adapter->rx_ring[i]) |
---|
| 1100 | + adapter->rx_ring[i]->ring_idx = i; |
---|
| 1101 | + } |
---|
| 1102 | + |
---|
| 1103 | + for (i = 0; i < adapter->num_tx_queues; i++) { |
---|
| 1104 | + if (adapter->tx_ring[i]) |
---|
| 1105 | + adapter->tx_ring[i]->ring_idx = i; |
---|
| 1106 | + } |
---|
| 1107 | + |
---|
| 1108 | + for (i = 0; i < adapter->num_xdp_queues; i++) { |
---|
| 1109 | + if (adapter->xdp_ring[i]) |
---|
| 1110 | + adapter->xdp_ring[i]->ring_idx = i; |
---|
| 1111 | + } |
---|
| 1112 | + |
---|
1100 | 1113 | return 0; |
---|
1101 | 1114 | |
---|
1102 | 1115 | err_out: |
---|