forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/net/wireless/mediatek/mt76/dma.c
....@@ -1,33 +1,98 @@
1
+// SPDX-License-Identifier: ISC
12 /*
23 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3
- *
4
- * Permission to use, copy, modify, and/or distribute this software for any
5
- * purpose with or without fee is hereby granted, provided that the above
6
- * copyright notice and this permission notice appear in all copies.
7
- *
8
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
154 */
165
176 #include <linux/dma-mapping.h>
187 #include "mt76.h"
198 #include "dma.h"
209
21
-#define DMA_DUMMY_TXWI ((void *) ~0)
10
+static struct mt76_txwi_cache *
11
+mt76_alloc_txwi(struct mt76_dev *dev)
12
+{
13
+ struct mt76_txwi_cache *t;
14
+ dma_addr_t addr;
15
+ u8 *txwi;
16
+ int size;
17
+
18
+ size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
19
+ txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
20
+ if (!txwi)
21
+ return NULL;
22
+
23
+ addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
24
+ DMA_TO_DEVICE);
25
+ t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
26
+ t->dma_addr = addr;
27
+
28
+ return t;
29
+}
30
+
31
+static struct mt76_txwi_cache *
32
+__mt76_get_txwi(struct mt76_dev *dev)
33
+{
34
+ struct mt76_txwi_cache *t = NULL;
35
+
36
+ spin_lock(&dev->lock);
37
+ if (!list_empty(&dev->txwi_cache)) {
38
+ t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
39
+ list);
40
+ list_del(&t->list);
41
+ }
42
+ spin_unlock(&dev->lock);
43
+
44
+ return t;
45
+}
46
+
47
+static struct mt76_txwi_cache *
48
+mt76_get_txwi(struct mt76_dev *dev)
49
+{
50
+ struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
51
+
52
+ if (t)
53
+ return t;
54
+
55
+ return mt76_alloc_txwi(dev);
56
+}
57
+
58
+void
59
+mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
60
+{
61
+ if (!t)
62
+ return;
63
+
64
+ spin_lock(&dev->lock);
65
+ list_add(&t->list, &dev->txwi_cache);
66
+ spin_unlock(&dev->lock);
67
+}
68
+EXPORT_SYMBOL_GPL(mt76_put_txwi);
69
+
70
+static void
71
+mt76_free_pending_txwi(struct mt76_dev *dev)
72
+{
73
+ struct mt76_txwi_cache *t;
74
+
75
+ local_bh_disable();
76
+ while ((t = __mt76_get_txwi(dev)) != NULL)
77
+ dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
78
+ DMA_TO_DEVICE);
79
+ local_bh_enable();
80
+}
2281
2382 static int
24
-mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q)
83
+mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
84
+ int idx, int n_desc, int bufsize,
85
+ u32 ring_base)
2586 {
2687 int size;
2788 int i;
2889
2990 spin_lock_init(&q->lock);
30
- INIT_LIST_HEAD(&q->swq);
91
+
92
+ q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
93
+ q->ndesc = n_desc;
94
+ q->buf_size = bufsize;
95
+ q->hw_idx = idx;
3196
3297 size = q->ndesc * sizeof(struct mt76_desc);
3398 q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
....@@ -43,10 +108,10 @@
43108 for (i = 0; i < q->ndesc; i++)
44109 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
45110
46
- iowrite32(q->desc_dma, &q->regs->desc_base);
47
- iowrite32(0, &q->regs->cpu_idx);
48
- iowrite32(0, &q->regs->dma_idx);
49
- iowrite32(q->ndesc, &q->regs->ring_size);
111
+ writel(q->desc_dma, &q->regs->desc_base);
112
+ writel(0, &q->regs->cpu_idx);
113
+ writel(0, &q->regs->dma_idx);
114
+ writel(q->ndesc, &q->regs->ring_size);
50115
51116 return 0;
52117 }
....@@ -56,31 +121,46 @@
56121 struct mt76_queue_buf *buf, int nbufs, u32 info,
57122 struct sk_buff *skb, void *txwi)
58123 {
124
+ struct mt76_queue_entry *entry;
59125 struct mt76_desc *desc;
60126 u32 ctrl;
61127 int i, idx = -1;
62128
63
- if (txwi)
64
- q->entry[q->head].txwi = DMA_DUMMY_TXWI;
129
+ if (txwi) {
130
+ q->entry[q->head].txwi = DMA_DUMMY_DATA;
131
+ q->entry[q->head].skip_buf0 = true;
132
+ }
65133
66134 for (i = 0; i < nbufs; i += 2, buf += 2) {
67135 u32 buf0 = buf[0].addr, buf1 = 0;
68136
137
+ idx = q->head;
138
+ q->head = (q->head + 1) % q->ndesc;
139
+
140
+ desc = &q->desc[idx];
141
+ entry = &q->entry[idx];
142
+
143
+ if (buf[0].skip_unmap)
144
+ entry->skip_buf0 = true;
145
+ entry->skip_buf1 = i == nbufs - 1;
146
+
147
+ entry->dma_addr[0] = buf[0].addr;
148
+ entry->dma_len[0] = buf[0].len;
149
+
69150 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
70151 if (i < nbufs - 1) {
152
+ entry->dma_addr[1] = buf[1].addr;
153
+ entry->dma_len[1] = buf[1].len;
71154 buf1 = buf[1].addr;
72155 ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
156
+ if (buf[1].skip_unmap)
157
+ entry->skip_buf1 = true;
73158 }
74159
75160 if (i == nbufs - 1)
76161 ctrl |= MT_DMA_CTL_LAST_SEC0;
77162 else if (i == nbufs - 2)
78163 ctrl |= MT_DMA_CTL_LAST_SEC1;
79
-
80
- idx = q->head;
81
- q->head = (q->head + 1) % q->ndesc;
82
-
83
- desc = &q->desc[idx];
84164
85165 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
86166 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
....@@ -101,27 +181,20 @@
101181 struct mt76_queue_entry *prev_e)
102182 {
103183 struct mt76_queue_entry *e = &q->entry[idx];
104
- __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
105
- u32 ctrl = le32_to_cpu(__ctrl);
106184
107
- if (!e->txwi || !e->skb) {
108
- __le32 addr = READ_ONCE(q->desc[idx].buf0);
109
- u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
110
-
111
- dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
185
+ if (!e->skip_buf0)
186
+ dma_unmap_single(dev->dev, e->dma_addr[0], e->dma_len[0],
112187 DMA_TO_DEVICE);
113
- }
114188
115
- if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) {
116
- __le32 addr = READ_ONCE(q->desc[idx].buf1);
117
- u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl);
118
-
119
- dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
189
+ if (!e->skip_buf1)
190
+ dma_unmap_single(dev->dev, e->dma_addr[1], e->dma_len[1],
120191 DMA_TO_DEVICE);
121
- }
122192
123
- if (e->txwi == DMA_DUMMY_TXWI)
193
+ if (e->txwi == DMA_DUMMY_DATA)
124194 e->txwi = NULL;
195
+
196
+ if (e->skb == DMA_DUMMY_DATA)
197
+ e->skb = NULL;
125198
126199 *prev_e = *e;
127200 memset(e, 0, sizeof(*e));
....@@ -130,59 +203,64 @@
130203 static void
131204 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
132205 {
133
- q->head = ioread32(&q->regs->dma_idx);
206
+ writel(q->desc_dma, &q->regs->desc_base);
207
+ writel(q->ndesc, &q->regs->ring_size);
208
+ q->head = readl(&q->regs->dma_idx);
134209 q->tail = q->head;
135
- iowrite32(q->head, &q->regs->cpu_idx);
210
+}
211
+
212
+static void
213
+mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
214
+{
215
+ wmb();
216
+ writel(q->head, &q->regs->cpu_idx);
136217 }
137218
138219 static void
139220 mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
140221 {
141
- struct mt76_queue *q = &dev->q_tx[qid];
222
+ struct mt76_queue *q = dev->q_tx[qid];
142223 struct mt76_queue_entry entry;
143224 bool wake = false;
144225 int last;
145226
146
- if (!q->ndesc)
227
+ if (!q)
147228 return;
148229
149
- spin_lock_bh(&q->lock);
150230 if (flush)
151231 last = -1;
152232 else
153
- last = ioread32(&q->regs->dma_idx);
233
+ last = readl(&q->regs->dma_idx);
154234
155
- while (q->queued && q->tail != last) {
235
+ while (q->queued > 0 && q->tail != last) {
156236 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
157
- if (entry.schedule)
158
- q->swq_queued--;
159
-
160
- if (entry.skb)
161
- dev->drv->tx_complete_skb(dev, q, &entry, flush);
237
+ mt76_queue_tx_complete(dev, q, &entry);
162238
163239 if (entry.txwi) {
164
- mt76_put_txwi(dev, entry.txwi);
165
- wake = true;
240
+ if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
241
+ mt76_put_txwi(dev, entry.txwi);
242
+ wake = !flush;
166243 }
167244
168
- q->tail = (q->tail + 1) % q->ndesc;
169
- q->queued--;
170
-
171245 if (!flush && q->tail == last)
172
- last = ioread32(&q->regs->dma_idx);
246
+ last = readl(&q->regs->dma_idx);
247
+
173248 }
174249
175
- if (!flush)
176
- mt76_txq_schedule(dev, q);
177
- else
250
+ if (flush) {
251
+ spin_lock_bh(&q->lock);
178252 mt76_dma_sync_idx(dev, q);
253
+ mt76_dma_kick_queue(dev, q);
254
+ spin_unlock_bh(&q->lock);
255
+ }
179256
180
- wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
257
+ wake = wake && q->stopped &&
258
+ qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
259
+ if (wake)
260
+ q->stopped = false;
181261
182262 if (!q->queued)
183263 wake_up(&dev->tx_wait);
184
-
185
- spin_unlock_bh(&q->lock);
186264
187265 if (wake)
188266 ieee80211_wake_queue(dev->hw, qid);
....@@ -198,7 +276,7 @@
198276 void *buf = e->buf;
199277 int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
200278
201
- buf_addr = le32_to_cpu(READ_ONCE(desc->buf0));
279
+ buf_addr = e->dma_addr[0];
202280 if (len) {
203281 u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
204282 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
....@@ -224,7 +302,9 @@
224302 if (!q->queued)
225303 return NULL;
226304
227
- if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
305
+ if (flush)
306
+ q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
307
+ else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
228308 return NULL;
229309
230310 q->tail = (q->tail + 1) % q->ndesc;
....@@ -233,120 +313,150 @@
233313 return mt76_dma_get_buf(dev, q, idx, len, info, more);
234314 }
235315
236
-static void
237
-mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
316
+static int
317
+mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
318
+ struct sk_buff *skb, u32 tx_info)
238319 {
239
- iowrite32(q->head, &q->regs->cpu_idx);
320
+ struct mt76_queue *q = dev->q_tx[qid];
321
+ struct mt76_queue_buf buf = {};
322
+ dma_addr_t addr;
323
+
324
+ if (q->queued + 1 >= q->ndesc - 1)
325
+ goto error;
326
+
327
+ addr = dma_map_single(dev->dev, skb->data, skb->len,
328
+ DMA_TO_DEVICE);
329
+ if (unlikely(dma_mapping_error(dev->dev, addr)))
330
+ goto error;
331
+
332
+ buf.addr = addr;
333
+ buf.len = skb->len;
334
+
335
+ spin_lock_bh(&q->lock);
336
+ mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
337
+ mt76_dma_kick_queue(dev, q);
338
+ spin_unlock_bh(&q->lock);
339
+
340
+ return 0;
341
+
342
+error:
343
+ dev_kfree_skb(skb);
344
+ return -ENOMEM;
240345 }
241346
242
-int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
243
- struct sk_buff *skb, struct mt76_wcid *wcid,
244
- struct ieee80211_sta *sta)
347
+static int
348
+mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
349
+ struct sk_buff *skb, struct mt76_wcid *wcid,
350
+ struct ieee80211_sta *sta)
245351 {
246
- struct mt76_queue_entry e;
352
+ struct mt76_queue *q = dev->q_tx[qid];
353
+ struct mt76_tx_info tx_info = {
354
+ .skb = skb,
355
+ };
356
+ struct ieee80211_hw *hw;
357
+ int len, n = 0, ret = -ENOMEM;
247358 struct mt76_txwi_cache *t;
248
- struct mt76_queue_buf buf[32];
249359 struct sk_buff *iter;
250360 dma_addr_t addr;
251
- int len;
252
- u32 tx_info = 0;
253
- int n, ret;
361
+ u8 *txwi;
254362
255363 t = mt76_get_txwi(dev);
256364 if (!t) {
257
- ieee80211_free_txskb(dev->hw, skb);
365
+ hw = mt76_tx_status_get_hw(dev, skb);
366
+ ieee80211_free_txskb(hw, skb);
258367 return -ENOMEM;
259368 }
369
+ txwi = mt76_get_txwi_ptr(dev, t);
260370
261
- dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
262
- DMA_TO_DEVICE);
263
- ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
264
- &tx_info);
265
- dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
266
- DMA_TO_DEVICE);
267
- if (ret < 0)
268
- goto free;
371
+ skb->prev = skb->next = NULL;
372
+ if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)
373
+ mt76_insert_hdr_pad(skb);
269374
270
- len = skb->len - skb->data_len;
375
+ len = skb_headlen(skb);
271376 addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
272
- if (dma_mapping_error(dev->dev, addr)) {
273
- ret = -ENOMEM;
377
+ if (unlikely(dma_mapping_error(dev->dev, addr)))
274378 goto free;
275
- }
276379
277
- n = 0;
278
- buf[n].addr = t->dma_addr;
279
- buf[n++].len = dev->drv->txwi_size;
280
- buf[n].addr = addr;
281
- buf[n++].len = len;
380
+ tx_info.buf[n].addr = t->dma_addr;
381
+ tx_info.buf[n++].len = dev->drv->txwi_size;
382
+ tx_info.buf[n].addr = addr;
383
+ tx_info.buf[n++].len = len;
282384
283385 skb_walk_frags(skb, iter) {
284
- if (n == ARRAY_SIZE(buf))
386
+ if (n == ARRAY_SIZE(tx_info.buf))
285387 goto unmap;
286388
287389 addr = dma_map_single(dev->dev, iter->data, iter->len,
288390 DMA_TO_DEVICE);
289
- if (dma_mapping_error(dev->dev, addr))
391
+ if (unlikely(dma_mapping_error(dev->dev, addr)))
290392 goto unmap;
291393
292
- buf[n].addr = addr;
293
- buf[n++].len = iter->len;
394
+ tx_info.buf[n].addr = addr;
395
+ tx_info.buf[n++].len = iter->len;
396
+ }
397
+ tx_info.nbuf = n;
398
+
399
+ if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
400
+ ret = -ENOMEM;
401
+ goto unmap;
294402 }
295403
296
- if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
404
+ dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
405
+ DMA_TO_DEVICE);
406
+ ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
407
+ dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
408
+ DMA_TO_DEVICE);
409
+ if (ret < 0)
297410 goto unmap;
298411
299
- return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t);
412
+ return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
413
+ tx_info.info, tx_info.skb, t);
300414
301415 unmap:
302
- ret = -ENOMEM;
303416 for (n--; n > 0; n--)
304
- dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
305
- DMA_TO_DEVICE);
417
+ dma_unmap_single(dev->dev, tx_info.buf[n].addr,
418
+ tx_info.buf[n].len, DMA_TO_DEVICE);
306419
307420 free:
308
- e.skb = skb;
309
- e.txwi = t;
310
- dev->drv->tx_complete_skb(dev, q, &e, true);
421
+#ifdef CONFIG_NL80211_TESTMODE
422
+ /* fix tx_done accounting on queue overflow */
423
+ if (tx_info.skb == dev->test.tx_skb)
424
+ dev->test.tx_done--;
425
+#endif
426
+
427
+ dev_kfree_skb(tx_info.skb);
311428 mt76_put_txwi(dev, t);
312429 return ret;
313430 }
314
-EXPORT_SYMBOL_GPL(mt76_dma_tx_queue_skb);
315431
316432 static int
317
-mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
433
+mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
318434 {
319435 dma_addr_t addr;
320436 void *buf;
321437 int frames = 0;
322438 int len = SKB_WITH_OVERHEAD(q->buf_size);
323439 int offset = q->buf_offset;
324
- int idx;
325
- void *(*alloc)(unsigned int fragsz);
326
-
327
- if (napi)
328
- alloc = napi_alloc_frag;
329
- else
330
- alloc = netdev_alloc_frag;
331440
332441 spin_lock_bh(&q->lock);
333442
334443 while (q->queued < q->ndesc - 1) {
335444 struct mt76_queue_buf qbuf;
336445
337
- buf = alloc(q->buf_size);
446
+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
338447 if (!buf)
339448 break;
340449
341450 addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
342
- if (dma_mapping_error(dev->dev, addr)) {
451
+ if (unlikely(dma_mapping_error(dev->dev, addr))) {
343452 skb_free_frag(buf);
344453 break;
345454 }
346455
347456 qbuf.addr = addr + offset;
348457 qbuf.len = len - offset;
349
- idx = mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
458
+ qbuf.skip_unmap = false;
459
+ mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
350460 frames++;
351461 }
352462
....@@ -361,10 +471,12 @@
361471 static void
362472 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
363473 {
474
+ struct page *page;
364475 void *buf;
365476 bool more;
366477
367478 spin_lock_bh(&q->lock);
479
+
368480 do {
369481 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
370482 if (!buf)
....@@ -372,7 +484,20 @@
372484
373485 skb_free_frag(buf);
374486 } while (1);
487
+
488
+ if (q->rx_head) {
489
+ dev_kfree_skb(q->rx_head);
490
+ q->rx_head = NULL;
491
+ }
492
+
375493 spin_unlock_bh(&q->lock);
494
+
495
+ if (!q->rx_page.va)
496
+ return;
497
+
498
+ page = virt_to_page(q->rx_page.va);
499
+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
500
+ memset(&q->rx_page, 0, sizeof(q->rx_page));
376501 }
377502
378503 static void
....@@ -382,11 +507,11 @@
382507 int i;
383508
384509 for (i = 0; i < q->ndesc; i++)
385
- q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE);
510
+ q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
386511
387512 mt76_dma_rx_cleanup(dev, q);
388513 mt76_dma_sync_idx(dev, q);
389
- mt76_dma_rx_fill(dev, q, false);
514
+ mt76_dma_rx_fill(dev, q);
390515 }
391516
392517 static void
....@@ -419,10 +544,9 @@
419544 static int
420545 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
421546 {
547
+ int len, data_len, done = 0;
422548 struct sk_buff *skb;
423549 unsigned char *data;
424
- int len;
425
- int done = 0;
426550 bool more;
427551
428552 while (done < budget) {
....@@ -431,6 +555,19 @@
431555 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
432556 if (!data)
433557 break;
558
+
559
+ if (q->rx_head)
560
+ data_len = q->buf_size;
561
+ else
562
+ data_len = SKB_WITH_OVERHEAD(q->buf_size);
563
+
564
+ if (data_len < len + q->buf_offset) {
565
+ dev_kfree_skb(q->rx_head);
566
+ q->rx_head = NULL;
567
+
568
+ skb_free_frag(data);
569
+ continue;
570
+ }
434571
435572 if (q->rx_head) {
436573 mt76_add_fragment(dev, q, data, len, more);
....@@ -442,15 +579,10 @@
442579 skb_free_frag(data);
443580 continue;
444581 }
445
-
446582 skb_reserve(skb, q->buf_offset);
447
- if (skb->tail + len > skb->end) {
448
- dev_kfree_skb(skb);
449
- continue;
450
- }
451583
452584 if (q == &dev->q_rx[MT_RXQ_MCU]) {
453
- u32 *rxfce = (u32 *) skb->cb;
585
+ u32 *rxfce = (u32 *)skb->cb;
454586 *rxfce = info;
455587 }
456588
....@@ -465,7 +597,7 @@
465597 dev->drv->rx_skb(dev, q - dev->q_rx, skb);
466598 }
467599
468
- mt76_dma_rx_fill(dev, q, true);
600
+ mt76_dma_rx_fill(dev, q);
469601 return done;
470602 }
471603
....@@ -478,6 +610,7 @@
478610 dev = container_of(napi->dev, struct mt76_dev, napi_dev);
479611 qid = napi - dev->napi;
480612
613
+ local_bh_disable();
481614 rcu_read_lock();
482615
483616 do {
....@@ -487,11 +620,10 @@
487620 } while (cur && done < budget);
488621
489622 rcu_read_unlock();
623
+ local_bh_enable();
490624
491
- if (done < budget) {
492
- napi_complete(napi);
625
+ if (done < budget && napi_complete(napi))
493626 dev->drv->rx_poll_complete(dev, qid);
494
- }
495627
496628 return done;
497629 }
....@@ -503,11 +635,10 @@
503635
504636 init_dummy_netdev(&dev->napi_dev);
505637
506
- for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
638
+ mt76_for_each_q_rx(dev, i) {
507639 netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
508640 64);
509
- mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
510
- skb_queue_head_init(&dev->rx_skb[i]);
641
+ mt76_dma_rx_fill(dev, &dev->q_rx[i]);
511642 napi_enable(&dev->napi[i]);
512643 }
513644
....@@ -517,17 +648,16 @@
517648 static const struct mt76_queue_ops mt76_dma_ops = {
518649 .init = mt76_dma_init,
519650 .alloc = mt76_dma_alloc_queue,
520
- .add_buf = mt76_dma_add_buf,
651
+ .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
521652 .tx_queue_skb = mt76_dma_tx_queue_skb,
522653 .tx_cleanup = mt76_dma_tx_cleanup,
523654 .rx_reset = mt76_dma_rx_reset,
524655 .kick = mt76_dma_kick_queue,
525656 };
526657
527
-int mt76_dma_attach(struct mt76_dev *dev)
658
+void mt76_dma_attach(struct mt76_dev *dev)
528659 {
529660 dev->queue_ops = &mt76_dma_ops;
530
- return 0;
531661 }
532662 EXPORT_SYMBOL_GPL(mt76_dma_attach);
533663
....@@ -535,12 +665,16 @@
535665 {
536666 int i;
537667
668
+ mt76_worker_disable(&dev->tx_worker);
669
+ netif_napi_del(&dev->tx_napi);
538670 for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
539671 mt76_dma_tx_cleanup(dev, i, true);
540672
541
- for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
673
+ mt76_for_each_q_rx(dev, i) {
542674 netif_napi_del(&dev->napi[i]);
543675 mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
544676 }
677
+
678
+ mt76_free_pending_txwi(dev);
545679 }
546680 EXPORT_SYMBOL_GPL(mt76_dma_cleanup);