hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/net/wireless/mediatek/mt76/tx.c
....@@ -1,86 +1,9 @@
1
+// SPDX-License-Identifier: ISC
12 /*
23 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3
- *
4
- * Permission to use, copy, modify, and/or distribute this software for any
5
- * purpose with or without fee is hereby granted, provided that the above
6
- * copyright notice and this permission notice appear in all copies.
7
- *
8
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
154 */
165
176 #include "mt76.h"
18
-
19
-static struct mt76_txwi_cache *
20
-mt76_alloc_txwi(struct mt76_dev *dev)
21
-{
22
- struct mt76_txwi_cache *t;
23
- dma_addr_t addr;
24
- int size;
25
-
26
- size = (sizeof(*t) + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1);
27
- t = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
28
- if (!t)
29
- return NULL;
30
-
31
- addr = dma_map_single(dev->dev, &t->txwi, sizeof(t->txwi),
32
- DMA_TO_DEVICE);
33
- t->dma_addr = addr;
34
-
35
- return t;
36
-}
37
-
38
-static struct mt76_txwi_cache *
39
-__mt76_get_txwi(struct mt76_dev *dev)
40
-{
41
- struct mt76_txwi_cache *t = NULL;
42
-
43
- spin_lock_bh(&dev->lock);
44
- if (!list_empty(&dev->txwi_cache)) {
45
- t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
46
- list);
47
- list_del(&t->list);
48
- }
49
- spin_unlock_bh(&dev->lock);
50
-
51
- return t;
52
-}
53
-
54
-struct mt76_txwi_cache *
55
-mt76_get_txwi(struct mt76_dev *dev)
56
-{
57
- struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
58
-
59
- if (t)
60
- return t;
61
-
62
- return mt76_alloc_txwi(dev);
63
-}
64
-
65
-void
66
-mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
67
-{
68
- if (!t)
69
- return;
70
-
71
- spin_lock_bh(&dev->lock);
72
- list_add(&t->list, &dev->txwi_cache);
73
- spin_unlock_bh(&dev->lock);
74
-}
75
-
76
-void mt76_tx_free(struct mt76_dev *dev)
77
-{
78
- struct mt76_txwi_cache *t;
79
-
80
- while ((t = __mt76_get_txwi(dev)) != NULL)
81
- dma_unmap_single(dev->dev, t->dma_addr, sizeof(t->txwi),
82
- DMA_TO_DEVICE);
83
-}
847
858 static int
869 mt76_txq_get_qid(struct ieee80211_txq *txq)
....@@ -92,83 +15,324 @@
9215 }
9316
9417 void
95
-mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
96
- struct mt76_wcid *wcid, struct sk_buff *skb)
18
+mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb)
19
+{
20
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
21
+ struct ieee80211_txq *txq;
22
+ struct mt76_txq *mtxq;
23
+ u8 tid;
24
+
25
+ if (!sta || !ieee80211_is_data_qos(hdr->frame_control) ||
26
+ !ieee80211_is_data_present(hdr->frame_control))
27
+ return;
28
+
29
+ tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
30
+ txq = sta->txq[tid];
31
+ mtxq = (struct mt76_txq *)txq->drv_priv;
32
+ if (!mtxq->aggr)
33
+ return;
34
+
35
+ mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
36
+}
37
+EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn);
38
+
39
+void
40
+mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
41
+ __acquires(&dev->status_list.lock)
42
+{
43
+ __skb_queue_head_init(list);
44
+ spin_lock_bh(&dev->status_list.lock);
45
+}
46
+EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
47
+
48
+void
49
+mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
50
+ __releases(&dev->status_list.lock)
51
+{
52
+ struct ieee80211_hw *hw;
53
+ struct sk_buff *skb;
54
+
55
+ spin_unlock_bh(&dev->status_list.lock);
56
+
57
+ while ((skb = __skb_dequeue(list)) != NULL) {
58
+ hw = mt76_tx_status_get_hw(dev, skb);
59
+ ieee80211_tx_status(hw, skb);
60
+ }
61
+
62
+}
63
+EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
64
+
65
+static void
66
+__mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
67
+ struct sk_buff_head *list)
68
+{
69
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
70
+ struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
71
+ u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE;
72
+
73
+ flags |= cb->flags;
74
+ cb->flags = flags;
75
+
76
+ if ((flags & done) != done)
77
+ return;
78
+
79
+ __skb_unlink(skb, &dev->status_list);
80
+
81
+ /* Tx status can be unreliable. if it fails, mark the frame as ACKed */
82
+ if (flags & MT_TX_CB_TXS_FAILED) {
83
+ ieee80211_tx_info_clear_status(info);
84
+ info->status.rates[0].idx = -1;
85
+ info->flags |= IEEE80211_TX_STAT_ACK;
86
+ }
87
+
88
+ __skb_queue_tail(list, skb);
89
+}
90
+
91
+void
92
+mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
93
+ struct sk_buff_head *list)
94
+{
95
+ __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
96
+}
97
+EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done);
98
+
99
+int
100
+mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
101
+ struct sk_buff *skb)
102
+{
103
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
104
+ struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
105
+ int pid;
106
+
107
+ if (!wcid)
108
+ return MT_PACKET_ID_NO_ACK;
109
+
110
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
111
+ return MT_PACKET_ID_NO_ACK;
112
+
113
+ if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
114
+ IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
115
+ return MT_PACKET_ID_NO_SKB;
116
+
117
+ spin_lock_bh(&dev->status_list.lock);
118
+
119
+ memset(cb, 0, sizeof(*cb));
120
+ wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
121
+ if (wcid->packet_id == MT_PACKET_ID_NO_ACK ||
122
+ wcid->packet_id == MT_PACKET_ID_NO_SKB)
123
+ wcid->packet_id = MT_PACKET_ID_FIRST;
124
+
125
+ pid = wcid->packet_id;
126
+ cb->wcid = wcid->idx;
127
+ cb->pktid = pid;
128
+ cb->jiffies = jiffies;
129
+
130
+ __skb_queue_tail(&dev->status_list, skb);
131
+ spin_unlock_bh(&dev->status_list.lock);
132
+
133
+ return pid;
134
+}
135
+EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add);
136
+
137
+struct sk_buff *
138
+mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
139
+ struct sk_buff_head *list)
140
+{
141
+ struct sk_buff *skb, *tmp;
142
+
143
+ skb_queue_walk_safe(&dev->status_list, skb, tmp) {
144
+ struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
145
+
146
+ if (wcid && cb->wcid != wcid->idx)
147
+ continue;
148
+
149
+ if (cb->pktid == pktid)
150
+ return skb;
151
+
152
+ if (pktid >= 0 && !time_after(jiffies, cb->jiffies +
153
+ MT_TX_STATUS_SKB_TIMEOUT))
154
+ continue;
155
+
156
+ __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
157
+ MT_TX_CB_TXS_DONE, list);
158
+ }
159
+
160
+ return NULL;
161
+}
162
+EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
163
+
164
+void
165
+mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
166
+{
167
+ struct sk_buff_head list;
168
+
169
+ mt76_tx_status_lock(dev, &list);
170
+ mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
171
+ mt76_tx_status_unlock(dev, &list);
172
+}
173
+EXPORT_SYMBOL_GPL(mt76_tx_status_check);
174
+
175
+static void
176
+mt76_tx_check_non_aql(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb)
177
+{
178
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
179
+ struct mt76_wcid *wcid;
180
+ int pending;
181
+
182
+ if (info->tx_time_est)
183
+ return;
184
+
185
+ if (wcid_idx >= ARRAY_SIZE(dev->wcid))
186
+ return;
187
+
188
+ rcu_read_lock();
189
+
190
+ wcid = rcu_dereference(dev->wcid[wcid_idx]);
191
+ if (wcid) {
192
+ pending = atomic_dec_return(&wcid->non_aql_packets);
193
+ if (pending < 0)
194
+ atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
195
+ }
196
+
197
+ rcu_read_unlock();
198
+}
199
+
200
+void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb)
201
+{
202
+ struct ieee80211_hw *hw;
203
+ struct sk_buff_head list;
204
+
205
+#ifdef CONFIG_NL80211_TESTMODE
206
+ if (skb == dev->test.tx_skb) {
207
+ dev->test.tx_done++;
208
+ if (dev->test.tx_queued == dev->test.tx_done)
209
+ wake_up(&dev->tx_wait);
210
+ }
211
+#endif
212
+
213
+ mt76_tx_check_non_aql(dev, wcid_idx, skb);
214
+
215
+ if (!skb->prev) {
216
+ hw = mt76_tx_status_get_hw(dev, skb);
217
+ ieee80211_free_txskb(hw, skb);
218
+ return;
219
+ }
220
+
221
+ mt76_tx_status_lock(dev, &list);
222
+ __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
223
+ mt76_tx_status_unlock(dev, &list);
224
+}
225
+EXPORT_SYMBOL_GPL(mt76_tx_complete_skb);
226
+
227
+static int
228
+__mt76_tx_queue_skb(struct mt76_dev *dev, int qid, struct sk_buff *skb,
229
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
230
+ bool *stop)
97231 {
98232 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
99233 struct mt76_queue *q;
234
+ bool non_aql;
235
+ int pending;
236
+ int idx;
237
+
238
+ non_aql = !info->tx_time_est;
239
+ idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta);
240
+ if (idx < 0 || !sta || !non_aql)
241
+ return idx;
242
+
243
+ wcid = (struct mt76_wcid *)sta->drv_priv;
244
+ q = dev->q_tx[qid];
245
+ q->entry[idx].wcid = wcid->idx;
246
+ pending = atomic_inc_return(&wcid->non_aql_packets);
247
+ if (stop && pending >= MT_MAX_NON_AQL_PKT)
248
+ *stop = true;
249
+
250
+ return idx;
251
+}
252
+
253
+void
254
+mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
255
+ struct mt76_wcid *wcid, struct sk_buff *skb)
256
+{
257
+ struct mt76_dev *dev = phy->dev;
258
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
259
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
260
+ struct mt76_queue *q;
100261 int qid = skb_get_queue_mapping(skb);
262
+ bool ext_phy = phy != &dev->phy;
263
+
264
+ if (mt76_testmode_enabled(dev)) {
265
+ ieee80211_free_txskb(phy->hw, skb);
266
+ return;
267
+ }
101268
102269 if (WARN_ON(qid >= MT_TXQ_PSD)) {
103270 qid = MT_TXQ_BE;
104271 skb_set_queue_mapping(skb, qid);
105272 }
106273
107
- if (!wcid->tx_rate_set)
274
+ if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) &&
275
+ !ieee80211_is_data(hdr->frame_control) &&
276
+ !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
277
+ qid = MT_TXQ_PSD;
278
+ skb_set_queue_mapping(skb, qid);
279
+ }
280
+
281
+ if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET))
108282 ieee80211_get_tx_rates(info->control.vif, sta, skb,
109283 info->control.rates, 1);
110284
111
- q = &dev->q_tx[qid];
285
+ if (ext_phy)
286
+ info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
287
+
288
+ q = dev->q_tx[qid];
112289
113290 spin_lock_bh(&q->lock);
114
- dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
291
+ __mt76_tx_queue_skb(dev, qid, skb, wcid, sta, NULL);
115292 dev->queue_ops->kick(dev, q);
116293
117
- if (q->queued > q->ndesc - 8)
118
- ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
294
+ if (q->queued > q->ndesc - 8 && !q->stopped) {
295
+ ieee80211_stop_queue(phy->hw, skb_get_queue_mapping(skb));
296
+ q->stopped = true;
297
+ }
298
+
119299 spin_unlock_bh(&q->lock);
120300 }
121301 EXPORT_SYMBOL_GPL(mt76_tx);
122302
123303 static struct sk_buff *
124
-mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps)
304
+mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq)
125305 {
126306 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
307
+ struct ieee80211_tx_info *info;
308
+ bool ext_phy = phy != &phy->dev->phy;
127309 struct sk_buff *skb;
128310
129
- skb = skb_dequeue(&mtxq->retry_q);
130
- if (skb) {
131
- u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
132
-
133
- if (ps && skb_queue_empty(&mtxq->retry_q))
134
- ieee80211_sta_set_buffered(txq->sta, tid, false);
135
-
136
- return skb;
137
- }
138
-
139
- skb = ieee80211_tx_dequeue(dev->hw, txq);
311
+ skb = ieee80211_tx_dequeue(phy->hw, txq);
140312 if (!skb)
141313 return NULL;
142314
315
+ info = IEEE80211_SKB_CB(skb);
316
+ if (ext_phy)
317
+ info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
318
+
143319 return skb;
144
-}
145
-
146
-static void
147
-mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
148
-{
149
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
150
-
151
- if (!ieee80211_is_data_qos(hdr->frame_control) ||
152
- !ieee80211_is_data_present(hdr->frame_control))
153
- return;
154
-
155
- mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
156320 }
157321
158322 static void
159323 mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
160324 struct sk_buff *skb, bool last)
161325 {
162
- struct mt76_wcid *wcid = (struct mt76_wcid *) sta->drv_priv;
326
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
163327 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
164
- struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
165328
166329 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
167330 if (last)
168
- info->flags |= IEEE80211_TX_STATUS_EOSP;
331
+ info->flags |= IEEE80211_TX_STATUS_EOSP |
332
+ IEEE80211_TX_CTL_REQ_TX_STATUS;
169333
170334 mt76_skb_set_moredata(skb, !last);
171
- dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, sta);
335
+ __mt76_tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta, NULL);
172336 }
173337
174338 void
....@@ -177,27 +341,25 @@
177341 enum ieee80211_frame_release_type reason,
178342 bool more_data)
179343 {
180
- struct mt76_dev *dev = hw->priv;
344
+ struct mt76_phy *phy = hw->priv;
345
+ struct mt76_dev *dev = phy->dev;
181346 struct sk_buff *last_skb = NULL;
182
- struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
347
+ struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD];
183348 int i;
184349
185350 spin_lock_bh(&hwq->lock);
186351 for (i = 0; tids && nframes; i++, tids >>= 1) {
187352 struct ieee80211_txq *txq = sta->txq[i];
188
- struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
353
+ struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv;
189354 struct sk_buff *skb;
190355
191356 if (!(tids & 1))
192357 continue;
193358
194359 do {
195
- skb = mt76_txq_dequeue(dev, mtxq, true);
360
+ skb = mt76_txq_dequeue(phy, mtxq);
196361 if (!skb)
197362 break;
198
-
199
- if (mtxq->aggr)
200
- mt76_check_agg_ssn(mtxq, skb);
201363
202364 nframes--;
203365 if (last_skb)
....@@ -210,114 +372,109 @@
210372 if (last_skb) {
211373 mt76_queue_ps_skb(dev, sta, last_skb, true);
212374 dev->queue_ops->kick(dev, hwq);
375
+ } else {
376
+ ieee80211_sta_eosp(sta);
213377 }
378
+
214379 spin_unlock_bh(&hwq->lock);
215380 }
216381 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
217382
218383 static int
219
-mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
220
- struct mt76_txq *mtxq, bool *empty)
384
+mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
385
+ struct mt76_txq *mtxq)
221386 {
387
+ struct mt76_dev *dev = phy->dev;
222388 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
223
- struct ieee80211_tx_info *info;
389
+ enum mt76_txq_id qid = mt76_txq_get_qid(txq);
224390 struct mt76_wcid *wcid = mtxq->wcid;
391
+ struct ieee80211_tx_info *info;
225392 struct sk_buff *skb;
226
- int n_frames = 1, limit;
227
- struct ieee80211_tx_rate tx_rate;
228
- bool ampdu;
229
- bool probe;
393
+ int n_frames = 1;
394
+ bool stop = false;
230395 int idx;
231396
232
- skb = mt76_txq_dequeue(dev, mtxq, false);
233
- if (!skb) {
234
- *empty = true;
397
+ if (test_bit(MT_WCID_FLAG_PS, &wcid->flags))
235398 return 0;
236
- }
399
+
400
+ if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT)
401
+ return 0;
402
+
403
+ skb = mt76_txq_dequeue(phy, mtxq);
404
+ if (!skb)
405
+ return 0;
237406
238407 info = IEEE80211_SKB_CB(skb);
239
- if (!wcid->tx_rate_set)
408
+ if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
240409 ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
241410 info->control.rates, 1);
242
- tx_rate = info->control.rates[0];
243411
244
- probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
245
- ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU;
246
- limit = ampdu ? 16 : 3;
247
-
248
- if (ampdu)
249
- mt76_check_agg_ssn(mtxq, skb);
250
-
251
- idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
252
-
412
+ idx = __mt76_tx_queue_skb(dev, qid, skb, wcid, txq->sta, &stop);
253413 if (idx < 0)
254414 return idx;
255415
256416 do {
257
- bool cur_ampdu;
258
-
259
- if (probe)
260
- break;
261
-
262
- if (test_bit(MT76_OFFCHANNEL, &dev->state) ||
263
- test_bit(MT76_RESET, &dev->state))
417
+ if (test_bit(MT76_STATE_PM, &phy->state) ||
418
+ test_bit(MT76_RESET, &phy->state))
264419 return -EBUSY;
265420
266
- skb = mt76_txq_dequeue(dev, mtxq, false);
267
- if (!skb) {
268
- *empty = true;
421
+ if (stop)
269422 break;
270
- }
423
+
424
+ if (q->queued + MT_TXQ_FREE_THR >= q->ndesc)
425
+ break;
426
+
427
+ skb = mt76_txq_dequeue(phy, mtxq);
428
+ if (!skb)
429
+ break;
271430
272431 info = IEEE80211_SKB_CB(skb);
273
- cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
432
+ if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
433
+ ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
434
+ info->control.rates, 1);
274435
275
- if (ampdu != cur_ampdu ||
276
- (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
277
- skb_queue_tail(&mtxq->retry_q, skb);
278
- break;
279
- }
280
-
281
- info->control.rates[0] = tx_rate;
282
-
283
- if (cur_ampdu)
284
- mt76_check_agg_ssn(mtxq, skb);
285
-
286
- idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid,
287
- txq->sta);
436
+ idx = __mt76_tx_queue_skb(dev, qid, skb, wcid, txq->sta, &stop);
288437 if (idx < 0)
289
- return idx;
438
+ break;
290439
291440 n_frames++;
292
- } while (n_frames < limit);
441
+ } while (1);
293442
294
- if (!probe) {
295
- hwq->swq_queued++;
296
- hwq->entry[idx].schedule = true;
297
- }
298
-
299
- dev->queue_ops->kick(dev, hwq);
443
+ dev->queue_ops->kick(dev, q);
300444
301445 return n_frames;
302446 }
303447
304448 static int
305
-mt76_txq_schedule_list(struct mt76_dev *dev, struct mt76_queue *hwq)
449
+mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
306450 {
307
- struct mt76_txq *mtxq, *mtxq_last;
308
- int len = 0;
451
+ struct mt76_dev *dev = phy->dev;
452
+ struct mt76_queue *q = dev->q_tx[qid];
453
+ struct ieee80211_txq *txq;
454
+ struct mt76_txq *mtxq;
455
+ struct mt76_wcid *wcid;
456
+ int ret = 0;
309457
310
-restart:
311
- mtxq_last = list_last_entry(&hwq->swq, struct mt76_txq, list);
312
- while (!list_empty(&hwq->swq)) {
313
- bool empty = false;
314
- int cur;
458
+ spin_lock_bh(&q->lock);
459
+ while (1) {
460
+ if (test_bit(MT76_STATE_PM, &phy->state) ||
461
+ test_bit(MT76_RESET, &phy->state)) {
462
+ ret = -EBUSY;
463
+ break;
464
+ }
315465
316
- if (test_bit(MT76_OFFCHANNEL, &dev->state) ||
317
- test_bit(MT76_RESET, &dev->state))
318
- return -EBUSY;
466
+ if (q->queued + MT_TXQ_FREE_THR >= q->ndesc)
467
+ break;
319468
320
- mtxq = list_first_entry(&hwq->swq, struct mt76_txq, list);
469
+ txq = ieee80211_next_txq(phy->hw, qid);
470
+ if (!txq)
471
+ break;
472
+
473
+ mtxq = (struct mt76_txq *)txq->drv_priv;
474
+ wcid = mtxq->wcid;
475
+ if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags))
476
+ continue;
477
+
321478 if (mtxq->send_bar && mtxq->aggr) {
322479 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
323480 struct ieee80211_sta *sta = txq->sta;
....@@ -326,58 +483,60 @@
326483 u8 tid = txq->tid;
327484
328485 mtxq->send_bar = false;
329
- spin_unlock_bh(&hwq->lock);
486
+ spin_unlock_bh(&q->lock);
330487 ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
331
- spin_lock_bh(&hwq->lock);
332
- goto restart;
488
+ spin_lock_bh(&q->lock);
333489 }
334490
335
- list_del_init(&mtxq->list);
336
-
337
- cur = mt76_txq_send_burst(dev, hwq, mtxq, &empty);
338
- if (!empty)
339
- list_add_tail(&mtxq->list, &hwq->swq);
340
-
341
- if (cur < 0)
342
- return cur;
343
-
344
- len += cur;
345
-
346
- if (mtxq == mtxq_last)
347
- break;
491
+ ret += mt76_txq_send_burst(phy, q, mtxq);
492
+ ieee80211_return_txq(phy->hw, txq, false);
348493 }
494
+ spin_unlock_bh(&q->lock);
349495
350
- return len;
496
+ return ret;
351497 }
352498
353
-void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq)
499
+void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
354500 {
355501 int len;
356502
357
- rcu_read_lock();
358
- do {
359
- if (hwq->swq_queued >= 4 || list_empty(&hwq->swq))
360
- break;
503
+ if (qid >= 4)
504
+ return;
361505
362
- len = mt76_txq_schedule_list(dev, hwq);
506
+ rcu_read_lock();
507
+
508
+ do {
509
+ ieee80211_txq_schedule_start(phy->hw, qid);
510
+ len = mt76_txq_schedule_list(phy, qid);
511
+ ieee80211_txq_schedule_end(phy->hw, qid);
363512 } while (len > 0);
513
+
364514 rcu_read_unlock();
365515 }
366516 EXPORT_SYMBOL_GPL(mt76_txq_schedule);
367517
368
-void mt76_txq_schedule_all(struct mt76_dev *dev)
518
+void mt76_txq_schedule_all(struct mt76_phy *phy)
369519 {
370520 int i;
371521
372
- for (i = 0; i <= MT_TXQ_BK; i++) {
373
- struct mt76_queue *q = &dev->q_tx[i];
374
-
375
- spin_lock_bh(&q->lock);
376
- mt76_txq_schedule(dev, q);
377
- spin_unlock_bh(&q->lock);
378
- }
522
+ for (i = 0; i <= MT_TXQ_BK; i++)
523
+ mt76_txq_schedule(phy, i);
379524 }
380525 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
526
+
527
+void mt76_tx_worker(struct mt76_worker *w)
528
+{
529
+ struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
530
+
531
+ mt76_txq_schedule_all(&dev->phy);
532
+ if (dev->phy2)
533
+ mt76_txq_schedule_all(dev->phy2);
534
+
535
+#ifdef CONFIG_NL80211_TESTMODE
536
+ if (dev->test.tx_pending)
537
+ mt76_testmode_tx_pending(dev);
538
+#endif
539
+}
381540
382541 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
383542 bool send_bar)
....@@ -386,65 +545,84 @@
386545
387546 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
388547 struct ieee80211_txq *txq = sta->txq[i];
548
+ struct mt76_queue *hwq;
389549 struct mt76_txq *mtxq;
390550
391551 if (!txq)
392552 continue;
393553
554
+ hwq = dev->q_tx[mt76_txq_get_qid(txq)];
394555 mtxq = (struct mt76_txq *)txq->drv_priv;
395556
396
- spin_lock_bh(&mtxq->hwq->lock);
557
+ spin_lock_bh(&hwq->lock);
397558 mtxq->send_bar = mtxq->aggr && send_bar;
398
- if (!list_empty(&mtxq->list))
399
- list_del_init(&mtxq->list);
400
- spin_unlock_bh(&mtxq->hwq->lock);
559
+ spin_unlock_bh(&hwq->lock);
401560 }
402561 }
403562 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
404563
405564 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
406565 {
407
- struct mt76_dev *dev = hw->priv;
408
- struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
409
- struct mt76_queue *hwq = mtxq->hwq;
566
+ struct mt76_phy *phy = hw->priv;
567
+ struct mt76_dev *dev = phy->dev;
410568
411
- spin_lock_bh(&hwq->lock);
412
- if (list_empty(&mtxq->list))
413
- list_add_tail(&mtxq->list, &hwq->swq);
414
- mt76_txq_schedule(dev, hwq);
415
- spin_unlock_bh(&hwq->lock);
569
+ if (!test_bit(MT76_STATE_RUNNING, &phy->state))
570
+ return;
571
+
572
+ mt76_worker_schedule(&dev->tx_worker);
416573 }
417574 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
418575
419
-void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
576
+u8 mt76_ac_to_hwq(u8 ac)
420577 {
421
- struct mt76_txq *mtxq;
422
- struct mt76_queue *hwq;
423
- struct sk_buff *skb;
578
+ static const u8 wmm_queue_map[] = {
579
+ [IEEE80211_AC_BE] = 0,
580
+ [IEEE80211_AC_BK] = 1,
581
+ [IEEE80211_AC_VI] = 2,
582
+ [IEEE80211_AC_VO] = 3,
583
+ };
424584
425
- if (!txq)
426
- return;
585
+ if (WARN_ON(ac >= IEEE80211_NUM_ACS))
586
+ return 0;
427587
428
- mtxq = (struct mt76_txq *) txq->drv_priv;
429
- hwq = mtxq->hwq;
430
-
431
- spin_lock_bh(&hwq->lock);
432
- if (!list_empty(&mtxq->list))
433
- list_del(&mtxq->list);
434
- spin_unlock_bh(&hwq->lock);
435
-
436
- while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL)
437
- ieee80211_free_txskb(dev->hw, skb);
588
+ return wmm_queue_map[ac];
438589 }
439
-EXPORT_SYMBOL_GPL(mt76_txq_remove);
590
+EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);
440591
441
-void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
592
+int mt76_skb_adjust_pad(struct sk_buff *skb, int pad)
442593 {
443
- struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
594
+ struct sk_buff *iter, *last = skb;
444595
445
- INIT_LIST_HEAD(&mtxq->list);
446
- skb_queue_head_init(&mtxq->retry_q);
596
+ /* First packet of a A-MSDU burst keeps track of the whole burst
597
+ * length, need to update length of it and the last packet.
598
+ */
599
+ skb_walk_frags(skb, iter) {
600
+ last = iter;
601
+ if (!iter->next) {
602
+ skb->data_len += pad;
603
+ skb->len += pad;
604
+ break;
605
+ }
606
+ }
447607
448
- mtxq->hwq = &dev->q_tx[mt76_txq_get_qid(txq)];
608
+ if (skb_pad(last, pad))
609
+ return -ENOMEM;
610
+
611
+ __skb_put(last, pad);
612
+
613
+ return 0;
449614 }
450
-EXPORT_SYMBOL_GPL(mt76_txq_init);
615
+EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad);
616
+
617
+void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
618
+ struct mt76_queue_entry *e)
619
+{
620
+ if (e->skb)
621
+ dev->drv->tx_complete_skb(dev, e);
622
+
623
+ spin_lock_bh(&q->lock);
624
+ q->tail = (q->tail + 1) % q->ndesc;
625
+ q->queued--;
626
+ spin_unlock_bh(&q->lock);
627
+}
628
+EXPORT_SYMBOL_GPL(mt76_queue_tx_complete);