.. | .. |
---|
32 | 32 | |
---|
33 | 33 | #include <linux/irq.h> |
---|
34 | 34 | #include "en.h" |
---|
| 35 | +#include "en/txrx.h" |
---|
35 | 36 | #include "en/xdp.h" |
---|
| 37 | +#include "en/xsk/rx.h" |
---|
| 38 | +#include "en/xsk/tx.h" |
---|
36 | 39 | |
---|
37 | 40 | static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c) |
---|
38 | 41 | { |
---|
.. | .. |
---|
48 | 51 | static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq) |
---|
49 | 52 | { |
---|
50 | 53 | struct mlx5e_sq_stats *stats = sq->stats; |
---|
51 | | - struct net_dim_sample dim_sample; |
---|
| 54 | + struct dim_sample dim_sample = {}; |
---|
52 | 55 | |
---|
53 | 56 | if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state))) |
---|
54 | 57 | return; |
---|
55 | 58 | |
---|
56 | | - net_dim_sample(sq->cq.event_ctr, stats->packets, stats->bytes, |
---|
57 | | - &dim_sample); |
---|
| 59 | + dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample); |
---|
58 | 60 | net_dim(&sq->dim, dim_sample); |
---|
59 | 61 | } |
---|
60 | 62 | |
---|
61 | 63 | static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq) |
---|
62 | 64 | { |
---|
63 | 65 | struct mlx5e_rq_stats *stats = rq->stats; |
---|
64 | | - struct net_dim_sample dim_sample; |
---|
| 66 | + struct dim_sample dim_sample = {}; |
---|
65 | 67 | |
---|
66 | 68 | if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state))) |
---|
67 | 69 | return; |
---|
68 | 70 | |
---|
69 | | - net_dim_sample(rq->cq.event_ctr, stats->packets, stats->bytes, |
---|
70 | | - &dim_sample); |
---|
| 71 | + dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample); |
---|
71 | 72 | net_dim(&rq->dim, dim_sample); |
---|
| 73 | +} |
---|
| 74 | + |
---|
| 75 | +void mlx5e_trigger_irq(struct mlx5e_icosq *sq) |
---|
| 76 | +{ |
---|
| 77 | + struct mlx5_wq_cyc *wq = &sq->wq; |
---|
| 78 | + struct mlx5e_tx_wqe *nopwqe; |
---|
| 79 | + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); |
---|
| 80 | + |
---|
| 81 | + sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { |
---|
| 82 | + .wqe_type = MLX5E_ICOSQ_WQE_NOP, |
---|
| 83 | + .num_wqebbs = 1, |
---|
| 84 | + }; |
---|
| 85 | + |
---|
| 86 | + nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc); |
---|
| 87 | + mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl); |
---|
| 88 | +} |
---|
| 89 | + |
---|
| 90 | +static bool mlx5e_napi_xsk_post(struct mlx5e_xdpsq *xsksq, struct mlx5e_rq *xskrq) |
---|
| 91 | +{ |
---|
| 92 | + bool busy_xsk = false, xsk_rx_alloc_err; |
---|
| 93 | + |
---|
| 94 | + /* Handle the race between the application querying need_wakeup and the |
---|
| 95 | + * driver setting it: |
---|
| 96 | + * 1. Update need_wakeup both before and after the TX. If it goes to |
---|
| 97 | + * "yes", it can only happen with the first update. |
---|
| 98 | + * 2. If the application queried need_wakeup before we set it, the |
---|
| 99 | + * packets will be transmitted anyway, even w/o a wakeup. |
---|
| 100 | + * 3. Give a chance to clear need_wakeup after new packets were queued |
---|
| 101 | + * for TX. |
---|
| 102 | + */ |
---|
| 103 | + mlx5e_xsk_update_tx_wakeup(xsksq); |
---|
| 104 | + busy_xsk |= mlx5e_xsk_tx(xsksq, MLX5E_TX_XSK_POLL_BUDGET); |
---|
| 105 | + mlx5e_xsk_update_tx_wakeup(xsksq); |
---|
| 106 | + |
---|
| 107 | + xsk_rx_alloc_err = INDIRECT_CALL_2(xskrq->post_wqes, |
---|
| 108 | + mlx5e_post_rx_mpwqes, |
---|
| 109 | + mlx5e_post_rx_wqes, |
---|
| 110 | + xskrq); |
---|
| 111 | + busy_xsk |= mlx5e_xsk_update_rx_wakeup(xskrq, xsk_rx_alloc_err); |
---|
| 112 | + |
---|
| 113 | + return busy_xsk; |
---|
72 | 114 | } |
---|
73 | 115 | |
---|
74 | 116 | int mlx5e_napi_poll(struct napi_struct *napi, int budget) |
---|
.. | .. |
---|
76 | 118 | struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, |
---|
77 | 119 | napi); |
---|
78 | 120 | struct mlx5e_ch_stats *ch_stats = c->stats; |
---|
| 121 | + struct mlx5e_xdpsq *xsksq = &c->xsksq; |
---|
| 122 | + struct mlx5e_rq *xskrq = &c->xskrq; |
---|
| 123 | + struct mlx5e_rq *rq = &c->rq; |
---|
| 124 | + bool aff_change = false; |
---|
| 125 | + bool busy_xsk = false; |
---|
79 | 126 | bool busy = false; |
---|
80 | 127 | int work_done = 0; |
---|
| 128 | + bool xsk_open; |
---|
81 | 129 | int i; |
---|
| 130 | + |
---|
| 131 | + rcu_read_lock(); |
---|
| 132 | + |
---|
| 133 | + xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state); |
---|
82 | 134 | |
---|
83 | 135 | ch_stats->poll++; |
---|
84 | 136 | |
---|
85 | 137 | for (i = 0; i < c->num_tc; i++) |
---|
86 | 138 | busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget); |
---|
87 | 139 | |
---|
| 140 | + /* budget=0 means we may be in IRQ context, do as little as possible */ |
---|
| 141 | + if (unlikely(!budget)) |
---|
| 142 | + goto out; |
---|
| 143 | + |
---|
88 | 144 | busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq); |
---|
89 | 145 | |
---|
90 | 146 | if (c->xdp) |
---|
91 | | - busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq); |
---|
| 147 | + busy |= mlx5e_poll_xdpsq_cq(&c->rq_xdpsq.cq); |
---|
92 | 148 | |
---|
93 | | - if (likely(budget)) { /* budget=0 means: don't poll rx rings */ |
---|
94 | | - work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget); |
---|
95 | | - busy |= work_done == budget; |
---|
| 149 | + if (xsk_open) |
---|
| 150 | + work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget); |
---|
| 151 | + |
---|
| 152 | + if (likely(budget - work_done)) |
---|
| 153 | + work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done); |
---|
| 154 | + |
---|
| 155 | + busy |= work_done == budget; |
---|
| 156 | + |
---|
| 157 | + mlx5e_poll_ico_cq(&c->icosq.cq); |
---|
| 158 | + if (mlx5e_poll_ico_cq(&c->async_icosq.cq)) |
---|
| 159 | + /* Don't clear the flag if nothing was polled to prevent |
---|
| 160 | + * queueing more WQEs and overflowing the async ICOSQ. |
---|
| 161 | + */ |
---|
| 162 | + clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state); |
---|
| 163 | + |
---|
| 164 | + busy |= INDIRECT_CALL_2(rq->post_wqes, |
---|
| 165 | + mlx5e_post_rx_mpwqes, |
---|
| 166 | + mlx5e_post_rx_wqes, |
---|
| 167 | + rq); |
---|
| 168 | + if (xsk_open) { |
---|
| 169 | + busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq); |
---|
| 170 | + busy_xsk |= mlx5e_napi_xsk_post(xsksq, xskrq); |
---|
96 | 171 | } |
---|
97 | 172 | |
---|
98 | | - busy |= c->rq.post_wqes(&c->rq); |
---|
| 173 | + busy |= busy_xsk; |
---|
99 | 174 | |
---|
100 | 175 | if (busy) { |
---|
101 | | - if (likely(mlx5e_channel_no_affinity_change(c))) |
---|
102 | | - return budget; |
---|
| 176 | + if (likely(mlx5e_channel_no_affinity_change(c))) { |
---|
| 177 | + work_done = budget; |
---|
| 178 | + goto out; |
---|
| 179 | + } |
---|
103 | 180 | ch_stats->aff_change++; |
---|
| 181 | + aff_change = true; |
---|
104 | 182 | if (budget && work_done == budget) |
---|
105 | 183 | work_done--; |
---|
106 | 184 | } |
---|
107 | 185 | |
---|
108 | 186 | if (unlikely(!napi_complete_done(napi, work_done))) |
---|
109 | | - return work_done; |
---|
| 187 | + goto out; |
---|
110 | 188 | |
---|
111 | 189 | ch_stats->arm++; |
---|
112 | 190 | |
---|
.. | .. |
---|
115 | 193 | mlx5e_cq_arm(&c->sq[i].cq); |
---|
116 | 194 | } |
---|
117 | 195 | |
---|
118 | | - mlx5e_handle_rx_dim(&c->rq); |
---|
| 196 | + mlx5e_handle_rx_dim(rq); |
---|
119 | 197 | |
---|
120 | | - mlx5e_cq_arm(&c->rq.cq); |
---|
| 198 | + mlx5e_cq_arm(&rq->cq); |
---|
121 | 199 | mlx5e_cq_arm(&c->icosq.cq); |
---|
| 200 | + mlx5e_cq_arm(&c->async_icosq.cq); |
---|
122 | 201 | mlx5e_cq_arm(&c->xdpsq.cq); |
---|
| 202 | + |
---|
| 203 | + if (xsk_open) { |
---|
| 204 | + mlx5e_handle_rx_dim(xskrq); |
---|
| 205 | + mlx5e_cq_arm(&xsksq->cq); |
---|
| 206 | + mlx5e_cq_arm(&xskrq->cq); |
---|
| 207 | + } |
---|
| 208 | + |
---|
| 209 | + if (unlikely(aff_change && busy_xsk)) { |
---|
| 210 | + mlx5e_trigger_irq(&c->icosq); |
---|
| 211 | + ch_stats->force_irq++; |
---|
| 212 | + } |
---|
| 213 | + |
---|
| 214 | +out: |
---|
| 215 | + rcu_read_unlock(); |
---|
123 | 216 | |
---|
124 | 217 | return work_done; |
---|
125 | 218 | } |
---|
126 | 219 | |
---|
127 | | -void mlx5e_completion_event(struct mlx5_core_cq *mcq) |
---|
| 220 | +void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe) |
---|
128 | 221 | { |
---|
129 | 222 | struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); |
---|
130 | 223 | |
---|