.. | .. |
---|
| 1 | +// SPDX-License-Identifier: ISC |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> |
---|
3 | | - * |
---|
4 | | - * Permission to use, copy, modify, and/or distribute this software for any |
---|
5 | | - * purpose with or without fee is hereby granted, provided that the above |
---|
6 | | - * copyright notice and this permission notice appear in all copies. |
---|
7 | | - * |
---|
8 | | - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
---|
9 | | - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
---|
10 | | - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
---|
11 | | - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
---|
12 | | - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
---|
13 | | - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
---|
14 | | - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
---|
15 | 4 | */ |
---|
16 | 5 | |
---|
17 | 6 | #include <linux/dma-mapping.h> |
---|
18 | 7 | #include "mt76.h" |
---|
19 | 8 | #include "dma.h" |
---|
20 | 9 | |
---|
21 | | -#define DMA_DUMMY_TXWI ((void *) ~0) |
---|
| 10 | +static struct mt76_txwi_cache * |
---|
| 11 | +mt76_alloc_txwi(struct mt76_dev *dev) |
---|
| 12 | +{ |
---|
| 13 | + struct mt76_txwi_cache *t; |
---|
| 14 | + dma_addr_t addr; |
---|
| 15 | + u8 *txwi; |
---|
| 16 | + int size; |
---|
| 17 | + |
---|
| 18 | + size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t)); |
---|
| 19 | + txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC); |
---|
| 20 | + if (!txwi) |
---|
| 21 | + return NULL; |
---|
| 22 | + |
---|
| 23 | + addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size, |
---|
| 24 | + DMA_TO_DEVICE); |
---|
| 25 | + t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size); |
---|
| 26 | + t->dma_addr = addr; |
---|
| 27 | + |
---|
| 28 | + return t; |
---|
| 29 | +} |
---|
| 30 | + |
---|
| 31 | +static struct mt76_txwi_cache * |
---|
| 32 | +__mt76_get_txwi(struct mt76_dev *dev) |
---|
| 33 | +{ |
---|
| 34 | + struct mt76_txwi_cache *t = NULL; |
---|
| 35 | + |
---|
| 36 | + spin_lock(&dev->lock); |
---|
| 37 | + if (!list_empty(&dev->txwi_cache)) { |
---|
| 38 | + t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache, |
---|
| 39 | + list); |
---|
| 40 | + list_del(&t->list); |
---|
| 41 | + } |
---|
| 42 | + spin_unlock(&dev->lock); |
---|
| 43 | + |
---|
| 44 | + return t; |
---|
| 45 | +} |
---|
| 46 | + |
---|
| 47 | +static struct mt76_txwi_cache * |
---|
| 48 | +mt76_get_txwi(struct mt76_dev *dev) |
---|
| 49 | +{ |
---|
| 50 | + struct mt76_txwi_cache *t = __mt76_get_txwi(dev); |
---|
| 51 | + |
---|
| 52 | + if (t) |
---|
| 53 | + return t; |
---|
| 54 | + |
---|
| 55 | + return mt76_alloc_txwi(dev); |
---|
| 56 | +} |
---|
| 57 | + |
---|
| 58 | +void |
---|
| 59 | +mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) |
---|
| 60 | +{ |
---|
| 61 | + if (!t) |
---|
| 62 | + return; |
---|
| 63 | + |
---|
| 64 | + spin_lock(&dev->lock); |
---|
| 65 | + list_add(&t->list, &dev->txwi_cache); |
---|
| 66 | + spin_unlock(&dev->lock); |
---|
| 67 | +} |
---|
| 68 | +EXPORT_SYMBOL_GPL(mt76_put_txwi); |
---|
| 69 | + |
---|
| 70 | +static void |
---|
| 71 | +mt76_free_pending_txwi(struct mt76_dev *dev) |
---|
| 72 | +{ |
---|
| 73 | + struct mt76_txwi_cache *t; |
---|
| 74 | + |
---|
| 75 | + local_bh_disable(); |
---|
| 76 | + while ((t = __mt76_get_txwi(dev)) != NULL) |
---|
| 77 | + dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size, |
---|
| 78 | + DMA_TO_DEVICE); |
---|
| 79 | + local_bh_enable(); |
---|
| 80 | +} |
---|
22 | 81 | |
---|
23 | 82 | static int |
---|
24 | | -mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q) |
---|
| 83 | +mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, |
---|
| 84 | + int idx, int n_desc, int bufsize, |
---|
| 85 | + u32 ring_base) |
---|
25 | 86 | { |
---|
26 | 87 | int size; |
---|
27 | 88 | int i; |
---|
28 | 89 | |
---|
29 | 90 | spin_lock_init(&q->lock); |
---|
30 | | - INIT_LIST_HEAD(&q->swq); |
---|
| 91 | + |
---|
| 92 | + q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; |
---|
| 93 | + q->ndesc = n_desc; |
---|
| 94 | + q->buf_size = bufsize; |
---|
| 95 | + q->hw_idx = idx; |
---|
31 | 96 | |
---|
32 | 97 | size = q->ndesc * sizeof(struct mt76_desc); |
---|
33 | 98 | q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL); |
---|
.. | .. |
---|
43 | 108 | for (i = 0; i < q->ndesc; i++) |
---|
44 | 109 | q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); |
---|
45 | 110 | |
---|
46 | | - iowrite32(q->desc_dma, &q->regs->desc_base); |
---|
47 | | - iowrite32(0, &q->regs->cpu_idx); |
---|
48 | | - iowrite32(0, &q->regs->dma_idx); |
---|
49 | | - iowrite32(q->ndesc, &q->regs->ring_size); |
---|
| 111 | + writel(q->desc_dma, &q->regs->desc_base); |
---|
| 112 | + writel(0, &q->regs->cpu_idx); |
---|
| 113 | + writel(0, &q->regs->dma_idx); |
---|
| 114 | + writel(q->ndesc, &q->regs->ring_size); |
---|
50 | 115 | |
---|
51 | 116 | return 0; |
---|
52 | 117 | } |
---|
.. | .. |
---|
56 | 121 | struct mt76_queue_buf *buf, int nbufs, u32 info, |
---|
57 | 122 | struct sk_buff *skb, void *txwi) |
---|
58 | 123 | { |
---|
| 124 | + struct mt76_queue_entry *entry; |
---|
59 | 125 | struct mt76_desc *desc; |
---|
60 | 126 | u32 ctrl; |
---|
61 | 127 | int i, idx = -1; |
---|
62 | 128 | |
---|
63 | | - if (txwi) |
---|
64 | | - q->entry[q->head].txwi = DMA_DUMMY_TXWI; |
---|
| 129 | + if (txwi) { |
---|
| 130 | + q->entry[q->head].txwi = DMA_DUMMY_DATA; |
---|
| 131 | + q->entry[q->head].skip_buf0 = true; |
---|
| 132 | + } |
---|
65 | 133 | |
---|
66 | 134 | for (i = 0; i < nbufs; i += 2, buf += 2) { |
---|
67 | 135 | u32 buf0 = buf[0].addr, buf1 = 0; |
---|
68 | 136 | |
---|
| 137 | + idx = q->head; |
---|
| 138 | + q->head = (q->head + 1) % q->ndesc; |
---|
| 139 | + |
---|
| 140 | + desc = &q->desc[idx]; |
---|
| 141 | + entry = &q->entry[idx]; |
---|
| 142 | + |
---|
| 143 | + if (buf[0].skip_unmap) |
---|
| 144 | + entry->skip_buf0 = true; |
---|
| 145 | + entry->skip_buf1 = i == nbufs - 1; |
---|
| 146 | + |
---|
| 147 | + entry->dma_addr[0] = buf[0].addr; |
---|
| 148 | + entry->dma_len[0] = buf[0].len; |
---|
| 149 | + |
---|
69 | 150 | ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); |
---|
70 | 151 | if (i < nbufs - 1) { |
---|
| 152 | + entry->dma_addr[1] = buf[1].addr; |
---|
| 153 | + entry->dma_len[1] = buf[1].len; |
---|
71 | 154 | buf1 = buf[1].addr; |
---|
72 | 155 | ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); |
---|
| 156 | + if (buf[1].skip_unmap) |
---|
| 157 | + entry->skip_buf1 = true; |
---|
73 | 158 | } |
---|
74 | 159 | |
---|
75 | 160 | if (i == nbufs - 1) |
---|
76 | 161 | ctrl |= MT_DMA_CTL_LAST_SEC0; |
---|
77 | 162 | else if (i == nbufs - 2) |
---|
78 | 163 | ctrl |= MT_DMA_CTL_LAST_SEC1; |
---|
79 | | - |
---|
80 | | - idx = q->head; |
---|
81 | | - q->head = (q->head + 1) % q->ndesc; |
---|
82 | | - |
---|
83 | | - desc = &q->desc[idx]; |
---|
84 | 164 | |
---|
85 | 165 | WRITE_ONCE(desc->buf0, cpu_to_le32(buf0)); |
---|
86 | 166 | WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); |
---|
.. | .. |
---|
101 | 181 | struct mt76_queue_entry *prev_e) |
---|
102 | 182 | { |
---|
103 | 183 | struct mt76_queue_entry *e = &q->entry[idx]; |
---|
104 | | - __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl); |
---|
105 | | - u32 ctrl = le32_to_cpu(__ctrl); |
---|
106 | 184 | |
---|
107 | | - if (!e->txwi || !e->skb) { |
---|
108 | | - __le32 addr = READ_ONCE(q->desc[idx].buf0); |
---|
109 | | - u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl); |
---|
110 | | - |
---|
111 | | - dma_unmap_single(dev->dev, le32_to_cpu(addr), len, |
---|
| 185 | + if (!e->skip_buf0) |
---|
| 186 | + dma_unmap_single(dev->dev, e->dma_addr[0], e->dma_len[0], |
---|
112 | 187 | DMA_TO_DEVICE); |
---|
113 | | - } |
---|
114 | 188 | |
---|
115 | | - if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) { |
---|
116 | | - __le32 addr = READ_ONCE(q->desc[idx].buf1); |
---|
117 | | - u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl); |
---|
118 | | - |
---|
119 | | - dma_unmap_single(dev->dev, le32_to_cpu(addr), len, |
---|
| 189 | + if (!e->skip_buf1) |
---|
| 190 | + dma_unmap_single(dev->dev, e->dma_addr[1], e->dma_len[1], |
---|
120 | 191 | DMA_TO_DEVICE); |
---|
121 | | - } |
---|
122 | 192 | |
---|
123 | | - if (e->txwi == DMA_DUMMY_TXWI) |
---|
| 193 | + if (e->txwi == DMA_DUMMY_DATA) |
---|
124 | 194 | e->txwi = NULL; |
---|
| 195 | + |
---|
| 196 | + if (e->skb == DMA_DUMMY_DATA) |
---|
| 197 | + e->skb = NULL; |
---|
125 | 198 | |
---|
126 | 199 | *prev_e = *e; |
---|
127 | 200 | memset(e, 0, sizeof(*e)); |
---|
.. | .. |
---|
130 | 203 | static void |
---|
131 | 204 | mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) |
---|
132 | 205 | { |
---|
133 | | - q->head = ioread32(&q->regs->dma_idx); |
---|
| 206 | + writel(q->desc_dma, &q->regs->desc_base); |
---|
| 207 | + writel(q->ndesc, &q->regs->ring_size); |
---|
| 208 | + q->head = readl(&q->regs->dma_idx); |
---|
134 | 209 | q->tail = q->head; |
---|
135 | | - iowrite32(q->head, &q->regs->cpu_idx); |
---|
| 210 | +} |
---|
| 211 | + |
---|
| 212 | +static void |
---|
| 213 | +mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) |
---|
| 214 | +{ |
---|
| 215 | + wmb(); |
---|
| 216 | + writel(q->head, &q->regs->cpu_idx); |
---|
136 | 217 | } |
---|
137 | 218 | |
---|
138 | 219 | static void |
---|
139 | 220 | mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) |
---|
140 | 221 | { |
---|
141 | | - struct mt76_queue *q = &dev->q_tx[qid]; |
---|
| 222 | + struct mt76_queue *q = dev->q_tx[qid]; |
---|
142 | 223 | struct mt76_queue_entry entry; |
---|
143 | 224 | bool wake = false; |
---|
144 | 225 | int last; |
---|
145 | 226 | |
---|
146 | | - if (!q->ndesc) |
---|
| 227 | + if (!q) |
---|
147 | 228 | return; |
---|
148 | 229 | |
---|
149 | | - spin_lock_bh(&q->lock); |
---|
150 | 230 | if (flush) |
---|
151 | 231 | last = -1; |
---|
152 | 232 | else |
---|
153 | | - last = ioread32(&q->regs->dma_idx); |
---|
| 233 | + last = readl(&q->regs->dma_idx); |
---|
154 | 234 | |
---|
155 | | - while (q->queued && q->tail != last) { |
---|
| 235 | + while (q->queued > 0 && q->tail != last) { |
---|
156 | 236 | mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); |
---|
157 | | - if (entry.schedule) |
---|
158 | | - q->swq_queued--; |
---|
159 | | - |
---|
160 | | - if (entry.skb) |
---|
161 | | - dev->drv->tx_complete_skb(dev, q, &entry, flush); |
---|
| 237 | + mt76_queue_tx_complete(dev, q, &entry); |
---|
162 | 238 | |
---|
163 | 239 | if (entry.txwi) { |
---|
164 | | - mt76_put_txwi(dev, entry.txwi); |
---|
165 | | - wake = true; |
---|
| 240 | + if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE)) |
---|
| 241 | + mt76_put_txwi(dev, entry.txwi); |
---|
| 242 | + wake = !flush; |
---|
166 | 243 | } |
---|
167 | 244 | |
---|
168 | | - q->tail = (q->tail + 1) % q->ndesc; |
---|
169 | | - q->queued--; |
---|
170 | | - |
---|
171 | 245 | if (!flush && q->tail == last) |
---|
172 | | - last = ioread32(&q->regs->dma_idx); |
---|
| 246 | + last = readl(&q->regs->dma_idx); |
---|
| 247 | + |
---|
173 | 248 | } |
---|
174 | 249 | |
---|
175 | | - if (!flush) |
---|
176 | | - mt76_txq_schedule(dev, q); |
---|
177 | | - else |
---|
| 250 | + if (flush) { |
---|
| 251 | + spin_lock_bh(&q->lock); |
---|
178 | 252 | mt76_dma_sync_idx(dev, q); |
---|
| 253 | + mt76_dma_kick_queue(dev, q); |
---|
| 254 | + spin_unlock_bh(&q->lock); |
---|
| 255 | + } |
---|
179 | 256 | |
---|
180 | | - wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; |
---|
| 257 | + wake = wake && q->stopped && |
---|
| 258 | + qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; |
---|
| 259 | + if (wake) |
---|
| 260 | + q->stopped = false; |
---|
181 | 261 | |
---|
182 | 262 | if (!q->queued) |
---|
183 | 263 | wake_up(&dev->tx_wait); |
---|
184 | | - |
---|
185 | | - spin_unlock_bh(&q->lock); |
---|
186 | 264 | |
---|
187 | 265 | if (wake) |
---|
188 | 266 | ieee80211_wake_queue(dev->hw, qid); |
---|
.. | .. |
---|
198 | 276 | void *buf = e->buf; |
---|
199 | 277 | int buf_len = SKB_WITH_OVERHEAD(q->buf_size); |
---|
200 | 278 | |
---|
201 | | - buf_addr = le32_to_cpu(READ_ONCE(desc->buf0)); |
---|
| 279 | + buf_addr = e->dma_addr[0]; |
---|
202 | 280 | if (len) { |
---|
203 | 281 | u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl)); |
---|
204 | 282 | *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl); |
---|
.. | .. |
---|
224 | 302 | if (!q->queued) |
---|
225 | 303 | return NULL; |
---|
226 | 304 | |
---|
227 | | - if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) |
---|
| 305 | + if (flush) |
---|
| 306 | + q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); |
---|
| 307 | + else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) |
---|
228 | 308 | return NULL; |
---|
229 | 309 | |
---|
230 | 310 | q->tail = (q->tail + 1) % q->ndesc; |
---|
.. | .. |
---|
233 | 313 | return mt76_dma_get_buf(dev, q, idx, len, info, more); |
---|
234 | 314 | } |
---|
235 | 315 | |
---|
236 | | -static void |
---|
237 | | -mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) |
---|
| 316 | +static int |
---|
| 317 | +mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid, |
---|
| 318 | + struct sk_buff *skb, u32 tx_info) |
---|
238 | 319 | { |
---|
239 | | - iowrite32(q->head, &q->regs->cpu_idx); |
---|
| 320 | + struct mt76_queue *q = dev->q_tx[qid]; |
---|
| 321 | + struct mt76_queue_buf buf = {}; |
---|
| 322 | + dma_addr_t addr; |
---|
| 323 | + |
---|
| 324 | + if (q->queued + 1 >= q->ndesc - 1) |
---|
| 325 | + goto error; |
---|
| 326 | + |
---|
| 327 | + addr = dma_map_single(dev->dev, skb->data, skb->len, |
---|
| 328 | + DMA_TO_DEVICE); |
---|
| 329 | + if (unlikely(dma_mapping_error(dev->dev, addr))) |
---|
| 330 | + goto error; |
---|
| 331 | + |
---|
| 332 | + buf.addr = addr; |
---|
| 333 | + buf.len = skb->len; |
---|
| 334 | + |
---|
| 335 | + spin_lock_bh(&q->lock); |
---|
| 336 | + mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); |
---|
| 337 | + mt76_dma_kick_queue(dev, q); |
---|
| 338 | + spin_unlock_bh(&q->lock); |
---|
| 339 | + |
---|
| 340 | + return 0; |
---|
| 341 | + |
---|
| 342 | +error: |
---|
| 343 | + dev_kfree_skb(skb); |
---|
| 344 | + return -ENOMEM; |
---|
240 | 345 | } |
---|
241 | 346 | |
---|
242 | | -int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, |
---|
243 | | - struct sk_buff *skb, struct mt76_wcid *wcid, |
---|
244 | | - struct ieee80211_sta *sta) |
---|
| 347 | +static int |
---|
| 348 | +mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, |
---|
| 349 | + struct sk_buff *skb, struct mt76_wcid *wcid, |
---|
| 350 | + struct ieee80211_sta *sta) |
---|
245 | 351 | { |
---|
246 | | - struct mt76_queue_entry e; |
---|
| 352 | + struct mt76_queue *q = dev->q_tx[qid]; |
---|
| 353 | + struct mt76_tx_info tx_info = { |
---|
| 354 | + .skb = skb, |
---|
| 355 | + }; |
---|
| 356 | + struct ieee80211_hw *hw; |
---|
| 357 | + int len, n = 0, ret = -ENOMEM; |
---|
247 | 358 | struct mt76_txwi_cache *t; |
---|
248 | | - struct mt76_queue_buf buf[32]; |
---|
249 | 359 | struct sk_buff *iter; |
---|
250 | 360 | dma_addr_t addr; |
---|
251 | | - int len; |
---|
252 | | - u32 tx_info = 0; |
---|
253 | | - int n, ret; |
---|
| 361 | + u8 *txwi; |
---|
254 | 362 | |
---|
255 | 363 | t = mt76_get_txwi(dev); |
---|
256 | 364 | if (!t) { |
---|
257 | | - ieee80211_free_txskb(dev->hw, skb); |
---|
| 365 | + hw = mt76_tx_status_get_hw(dev, skb); |
---|
| 366 | + ieee80211_free_txskb(hw, skb); |
---|
258 | 367 | return -ENOMEM; |
---|
259 | 368 | } |
---|
| 369 | + txwi = mt76_get_txwi_ptr(dev, t); |
---|
260 | 370 | |
---|
261 | | - dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi), |
---|
262 | | - DMA_TO_DEVICE); |
---|
263 | | - ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta, |
---|
264 | | - &tx_info); |
---|
265 | | - dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi), |
---|
266 | | - DMA_TO_DEVICE); |
---|
267 | | - if (ret < 0) |
---|
268 | | - goto free; |
---|
| 371 | + skb->prev = skb->next = NULL; |
---|
| 372 | + if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS) |
---|
| 373 | + mt76_insert_hdr_pad(skb); |
---|
269 | 374 | |
---|
270 | | - len = skb->len - skb->data_len; |
---|
| 375 | + len = skb_headlen(skb); |
---|
271 | 376 | addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE); |
---|
272 | | - if (dma_mapping_error(dev->dev, addr)) { |
---|
273 | | - ret = -ENOMEM; |
---|
| 377 | + if (unlikely(dma_mapping_error(dev->dev, addr))) |
---|
274 | 378 | goto free; |
---|
275 | | - } |
---|
276 | 379 | |
---|
277 | | - n = 0; |
---|
278 | | - buf[n].addr = t->dma_addr; |
---|
279 | | - buf[n++].len = dev->drv->txwi_size; |
---|
280 | | - buf[n].addr = addr; |
---|
281 | | - buf[n++].len = len; |
---|
| 380 | + tx_info.buf[n].addr = t->dma_addr; |
---|
| 381 | + tx_info.buf[n++].len = dev->drv->txwi_size; |
---|
| 382 | + tx_info.buf[n].addr = addr; |
---|
| 383 | + tx_info.buf[n++].len = len; |
---|
282 | 384 | |
---|
283 | 385 | skb_walk_frags(skb, iter) { |
---|
284 | | - if (n == ARRAY_SIZE(buf)) |
---|
| 386 | + if (n == ARRAY_SIZE(tx_info.buf)) |
---|
285 | 387 | goto unmap; |
---|
286 | 388 | |
---|
287 | 389 | addr = dma_map_single(dev->dev, iter->data, iter->len, |
---|
288 | 390 | DMA_TO_DEVICE); |
---|
289 | | - if (dma_mapping_error(dev->dev, addr)) |
---|
| 391 | + if (unlikely(dma_mapping_error(dev->dev, addr))) |
---|
290 | 392 | goto unmap; |
---|
291 | 393 | |
---|
292 | | - buf[n].addr = addr; |
---|
293 | | - buf[n++].len = iter->len; |
---|
| 394 | + tx_info.buf[n].addr = addr; |
---|
| 395 | + tx_info.buf[n++].len = iter->len; |
---|
| 396 | + } |
---|
| 397 | + tx_info.nbuf = n; |
---|
| 398 | + |
---|
| 399 | + if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { |
---|
| 400 | + ret = -ENOMEM; |
---|
| 401 | + goto unmap; |
---|
294 | 402 | } |
---|
295 | 403 | |
---|
296 | | - if (q->queued + (n + 1) / 2 >= q->ndesc - 1) |
---|
| 404 | + dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size, |
---|
| 405 | + DMA_TO_DEVICE); |
---|
| 406 | + ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info); |
---|
| 407 | + dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size, |
---|
| 408 | + DMA_TO_DEVICE); |
---|
| 409 | + if (ret < 0) |
---|
297 | 410 | goto unmap; |
---|
298 | 411 | |
---|
299 | | - return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t); |
---|
| 412 | + return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, |
---|
| 413 | + tx_info.info, tx_info.skb, t); |
---|
300 | 414 | |
---|
301 | 415 | unmap: |
---|
302 | | - ret = -ENOMEM; |
---|
303 | 416 | for (n--; n > 0; n--) |
---|
304 | | - dma_unmap_single(dev->dev, buf[n].addr, buf[n].len, |
---|
305 | | - DMA_TO_DEVICE); |
---|
| 417 | + dma_unmap_single(dev->dev, tx_info.buf[n].addr, |
---|
| 418 | + tx_info.buf[n].len, DMA_TO_DEVICE); |
---|
306 | 419 | |
---|
307 | 420 | free: |
---|
308 | | - e.skb = skb; |
---|
309 | | - e.txwi = t; |
---|
310 | | - dev->drv->tx_complete_skb(dev, q, &e, true); |
---|
| 421 | +#ifdef CONFIG_NL80211_TESTMODE |
---|
| 422 | + /* fix tx_done accounting on queue overflow */ |
---|
| 423 | + if (tx_info.skb == dev->test.tx_skb) |
---|
| 424 | + dev->test.tx_done--; |
---|
| 425 | +#endif |
---|
| 426 | + |
---|
| 427 | + dev_kfree_skb(tx_info.skb); |
---|
311 | 428 | mt76_put_txwi(dev, t); |
---|
312 | 429 | return ret; |
---|
313 | 430 | } |
---|
314 | | -EXPORT_SYMBOL_GPL(mt76_dma_tx_queue_skb); |
---|
315 | 431 | |
---|
316 | 432 | static int |
---|
317 | | -mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi) |
---|
| 433 | +mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) |
---|
318 | 434 | { |
---|
319 | 435 | dma_addr_t addr; |
---|
320 | 436 | void *buf; |
---|
321 | 437 | int frames = 0; |
---|
322 | 438 | int len = SKB_WITH_OVERHEAD(q->buf_size); |
---|
323 | 439 | int offset = q->buf_offset; |
---|
324 | | - int idx; |
---|
325 | | - void *(*alloc)(unsigned int fragsz); |
---|
326 | | - |
---|
327 | | - if (napi) |
---|
328 | | - alloc = napi_alloc_frag; |
---|
329 | | - else |
---|
330 | | - alloc = netdev_alloc_frag; |
---|
331 | 440 | |
---|
332 | 441 | spin_lock_bh(&q->lock); |
---|
333 | 442 | |
---|
334 | 443 | while (q->queued < q->ndesc - 1) { |
---|
335 | 444 | struct mt76_queue_buf qbuf; |
---|
336 | 445 | |
---|
337 | | - buf = alloc(q->buf_size); |
---|
| 446 | + buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC); |
---|
338 | 447 | if (!buf) |
---|
339 | 448 | break; |
---|
340 | 449 | |
---|
341 | 450 | addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE); |
---|
342 | | - if (dma_mapping_error(dev->dev, addr)) { |
---|
| 451 | + if (unlikely(dma_mapping_error(dev->dev, addr))) { |
---|
343 | 452 | skb_free_frag(buf); |
---|
344 | 453 | break; |
---|
345 | 454 | } |
---|
346 | 455 | |
---|
347 | 456 | qbuf.addr = addr + offset; |
---|
348 | 457 | qbuf.len = len - offset; |
---|
349 | | - idx = mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL); |
---|
| 458 | + qbuf.skip_unmap = false; |
---|
| 459 | + mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL); |
---|
350 | 460 | frames++; |
---|
351 | 461 | } |
---|
352 | 462 | |
---|
.. | .. |
---|
361 | 471 | static void |
---|
362 | 472 | mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) |
---|
363 | 473 | { |
---|
| 474 | + struct page *page; |
---|
364 | 475 | void *buf; |
---|
365 | 476 | bool more; |
---|
366 | 477 | |
---|
367 | 478 | spin_lock_bh(&q->lock); |
---|
| 479 | + |
---|
368 | 480 | do { |
---|
369 | 481 | buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more); |
---|
370 | 482 | if (!buf) |
---|
.. | .. |
---|
372 | 484 | |
---|
373 | 485 | skb_free_frag(buf); |
---|
374 | 486 | } while (1); |
---|
| 487 | + |
---|
| 488 | + if (q->rx_head) { |
---|
| 489 | + dev_kfree_skb(q->rx_head); |
---|
| 490 | + q->rx_head = NULL; |
---|
| 491 | + } |
---|
| 492 | + |
---|
375 | 493 | spin_unlock_bh(&q->lock); |
---|
| 494 | + |
---|
| 495 | + if (!q->rx_page.va) |
---|
| 496 | + return; |
---|
| 497 | + |
---|
| 498 | + page = virt_to_page(q->rx_page.va); |
---|
| 499 | + __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); |
---|
| 500 | + memset(&q->rx_page, 0, sizeof(q->rx_page)); |
---|
376 | 501 | } |
---|
377 | 502 | |
---|
378 | 503 | static void |
---|
.. | .. |
---|
382 | 507 | int i; |
---|
383 | 508 | |
---|
384 | 509 | for (i = 0; i < q->ndesc; i++) |
---|
385 | | - q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE); |
---|
| 510 | + q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); |
---|
386 | 511 | |
---|
387 | 512 | mt76_dma_rx_cleanup(dev, q); |
---|
388 | 513 | mt76_dma_sync_idx(dev, q); |
---|
389 | | - mt76_dma_rx_fill(dev, q, false); |
---|
| 514 | + mt76_dma_rx_fill(dev, q); |
---|
390 | 515 | } |
---|
391 | 516 | |
---|
392 | 517 | static void |
---|
.. | .. |
---|
419 | 544 | static int |
---|
420 | 545 | mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) |
---|
421 | 546 | { |
---|
| 547 | + int len, data_len, done = 0; |
---|
422 | 548 | struct sk_buff *skb; |
---|
423 | 549 | unsigned char *data; |
---|
424 | | - int len; |
---|
425 | | - int done = 0; |
---|
426 | 550 | bool more; |
---|
427 | 551 | |
---|
428 | 552 | while (done < budget) { |
---|
.. | .. |
---|
431 | 555 | data = mt76_dma_dequeue(dev, q, false, &len, &info, &more); |
---|
432 | 556 | if (!data) |
---|
433 | 557 | break; |
---|
| 558 | + |
---|
| 559 | + if (q->rx_head) |
---|
| 560 | + data_len = q->buf_size; |
---|
| 561 | + else |
---|
| 562 | + data_len = SKB_WITH_OVERHEAD(q->buf_size); |
---|
| 563 | + |
---|
| 564 | + if (data_len < len + q->buf_offset) { |
---|
| 565 | + dev_kfree_skb(q->rx_head); |
---|
| 566 | + q->rx_head = NULL; |
---|
| 567 | + |
---|
| 568 | + skb_free_frag(data); |
---|
| 569 | + continue; |
---|
| 570 | + } |
---|
434 | 571 | |
---|
435 | 572 | if (q->rx_head) { |
---|
436 | 573 | mt76_add_fragment(dev, q, data, len, more); |
---|
.. | .. |
---|
442 | 579 | skb_free_frag(data); |
---|
443 | 580 | continue; |
---|
444 | 581 | } |
---|
445 | | - |
---|
446 | 582 | skb_reserve(skb, q->buf_offset); |
---|
447 | | - if (skb->tail + len > skb->end) { |
---|
448 | | - dev_kfree_skb(skb); |
---|
449 | | - continue; |
---|
450 | | - } |
---|
451 | 583 | |
---|
452 | 584 | if (q == &dev->q_rx[MT_RXQ_MCU]) { |
---|
453 | | - u32 *rxfce = (u32 *) skb->cb; |
---|
| 585 | + u32 *rxfce = (u32 *)skb->cb; |
---|
454 | 586 | *rxfce = info; |
---|
455 | 587 | } |
---|
456 | 588 | |
---|
.. | .. |
---|
465 | 597 | dev->drv->rx_skb(dev, q - dev->q_rx, skb); |
---|
466 | 598 | } |
---|
467 | 599 | |
---|
468 | | - mt76_dma_rx_fill(dev, q, true); |
---|
| 600 | + mt76_dma_rx_fill(dev, q); |
---|
469 | 601 | return done; |
---|
470 | 602 | } |
---|
471 | 603 | |
---|
.. | .. |
---|
478 | 610 | dev = container_of(napi->dev, struct mt76_dev, napi_dev); |
---|
479 | 611 | qid = napi - dev->napi; |
---|
480 | 612 | |
---|
| 613 | + local_bh_disable(); |
---|
481 | 614 | rcu_read_lock(); |
---|
482 | 615 | |
---|
483 | 616 | do { |
---|
.. | .. |
---|
487 | 620 | } while (cur && done < budget); |
---|
488 | 621 | |
---|
489 | 622 | rcu_read_unlock(); |
---|
| 623 | + local_bh_enable(); |
---|
490 | 624 | |
---|
491 | | - if (done < budget) { |
---|
492 | | - napi_complete(napi); |
---|
| 625 | + if (done < budget && napi_complete(napi)) |
---|
493 | 626 | dev->drv->rx_poll_complete(dev, qid); |
---|
494 | | - } |
---|
495 | 627 | |
---|
496 | 628 | return done; |
---|
497 | 629 | } |
---|
.. | .. |
---|
503 | 635 | |
---|
504 | 636 | init_dummy_netdev(&dev->napi_dev); |
---|
505 | 637 | |
---|
506 | | - for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) { |
---|
| 638 | + mt76_for_each_q_rx(dev, i) { |
---|
507 | 639 | netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll, |
---|
508 | 640 | 64); |
---|
509 | | - mt76_dma_rx_fill(dev, &dev->q_rx[i], false); |
---|
510 | | - skb_queue_head_init(&dev->rx_skb[i]); |
---|
| 641 | + mt76_dma_rx_fill(dev, &dev->q_rx[i]); |
---|
511 | 642 | napi_enable(&dev->napi[i]); |
---|
512 | 643 | } |
---|
513 | 644 | |
---|
.. | .. |
---|
517 | 648 | static const struct mt76_queue_ops mt76_dma_ops = { |
---|
518 | 649 | .init = mt76_dma_init, |
---|
519 | 650 | .alloc = mt76_dma_alloc_queue, |
---|
520 | | - .add_buf = mt76_dma_add_buf, |
---|
| 651 | + .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw, |
---|
521 | 652 | .tx_queue_skb = mt76_dma_tx_queue_skb, |
---|
522 | 653 | .tx_cleanup = mt76_dma_tx_cleanup, |
---|
523 | 654 | .rx_reset = mt76_dma_rx_reset, |
---|
524 | 655 | .kick = mt76_dma_kick_queue, |
---|
525 | 656 | }; |
---|
526 | 657 | |
---|
527 | | -int mt76_dma_attach(struct mt76_dev *dev) |
---|
| 658 | +void mt76_dma_attach(struct mt76_dev *dev) |
---|
528 | 659 | { |
---|
529 | 660 | dev->queue_ops = &mt76_dma_ops; |
---|
530 | | - return 0; |
---|
531 | 661 | } |
---|
532 | 662 | EXPORT_SYMBOL_GPL(mt76_dma_attach); |
---|
533 | 663 | |
---|
.. | .. |
---|
535 | 665 | { |
---|
536 | 666 | int i; |
---|
537 | 667 | |
---|
| 668 | + mt76_worker_disable(&dev->tx_worker); |
---|
| 669 | + netif_napi_del(&dev->tx_napi); |
---|
538 | 670 | for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) |
---|
539 | 671 | mt76_dma_tx_cleanup(dev, i, true); |
---|
540 | 672 | |
---|
541 | | - for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) { |
---|
| 673 | + mt76_for_each_q_rx(dev, i) { |
---|
542 | 674 | netif_napi_del(&dev->napi[i]); |
---|
543 | 675 | mt76_dma_rx_cleanup(dev, &dev->q_rx[i]); |
---|
544 | 676 | } |
---|
| 677 | + |
---|
| 678 | + mt76_free_pending_txwi(dev); |
---|
545 | 679 | } |
---|
546 | 680 | EXPORT_SYMBOL_GPL(mt76_dma_cleanup); |
---|