.. | .. |
---|
22 | 22 | #include <linux/iopoll.h> |
---|
23 | 23 | |
---|
24 | 24 | #include "mtu3.h" |
---|
| 25 | +#include "mtu3_trace.h" |
---|
25 | 26 | |
---|
26 | 27 | #define QMU_CHECKSUM_LEN 16 |
---|
27 | 28 | |
---|
28 | 29 | #define GPD_FLAGS_HWO BIT(0) |
---|
29 | 30 | #define GPD_FLAGS_BDP BIT(1) |
---|
30 | 31 | #define GPD_FLAGS_BPS BIT(2) |
---|
| 32 | +#define GPD_FLAGS_ZLP BIT(6) |
---|
31 | 33 | #define GPD_FLAGS_IOC BIT(7) |
---|
| 34 | +#define GET_GPD_HWO(gpd) (le32_to_cpu((gpd)->dw0_info) & GPD_FLAGS_HWO) |
---|
32 | 35 | |
---|
33 | | -#define GPD_EXT_FLAG_ZLP BIT(5) |
---|
34 | | -#define GPD_EXT_NGP(x) (((x) & 0xf) << 4) |
---|
35 | | -#define GPD_EXT_BUF(x) (((x) & 0xf) << 0) |
---|
| 36 | +#define GPD_RX_BUF_LEN_OG(x) (((x) & 0xffff) << 16) |
---|
| 37 | +#define GPD_RX_BUF_LEN_EL(x) (((x) & 0xfffff) << 12) |
---|
| 38 | +#define GPD_RX_BUF_LEN(mtu, x) \ |
---|
| 39 | +({ \ |
---|
| 40 | + typeof(x) x_ = (x); \ |
---|
| 41 | + ((mtu)->gen2cp) ? GPD_RX_BUF_LEN_EL(x_) : GPD_RX_BUF_LEN_OG(x_); \ |
---|
| 42 | +}) |
---|
| 43 | + |
---|
| 44 | +#define GPD_DATA_LEN_OG(x) ((x) & 0xffff) |
---|
| 45 | +#define GPD_DATA_LEN_EL(x) ((x) & 0xfffff) |
---|
| 46 | +#define GPD_DATA_LEN(mtu, x) \ |
---|
| 47 | +({ \ |
---|
| 48 | + typeof(x) x_ = (x); \ |
---|
| 49 | + ((mtu)->gen2cp) ? GPD_DATA_LEN_EL(x_) : GPD_DATA_LEN_OG(x_); \ |
---|
| 50 | +}) |
---|
| 51 | + |
---|
| 52 | +#define GPD_EXT_FLAG_ZLP BIT(29) |
---|
| 53 | +#define GPD_EXT_NGP_OG(x) (((x) & 0xf) << 20) |
---|
| 54 | +#define GPD_EXT_BUF_OG(x) (((x) & 0xf) << 16) |
---|
| 55 | +#define GPD_EXT_NGP_EL(x) (((x) & 0xf) << 28) |
---|
| 56 | +#define GPD_EXT_BUF_EL(x) (((x) & 0xf) << 24) |
---|
| 57 | +#define GPD_EXT_NGP(mtu, x) \ |
---|
| 58 | +({ \ |
---|
| 59 | + typeof(x) x_ = (x); \ |
---|
| 60 | + ((mtu)->gen2cp) ? GPD_EXT_NGP_EL(x_) : GPD_EXT_NGP_OG(x_); \ |
---|
| 61 | +}) |
---|
| 62 | + |
---|
| 63 | +#define GPD_EXT_BUF(mtu, x) \ |
---|
| 64 | +({ \ |
---|
| 65 | + typeof(x) x_ = (x); \ |
---|
| 66 | + ((mtu)->gen2cp) ? GPD_EXT_BUF_EL(x_) : GPD_EXT_BUF_OG(x_); \ |
---|
| 67 | +}) |
---|
36 | 68 | |
---|
37 | 69 | #define HILO_GEN64(hi, lo) (((u64)(hi) << 32) + (lo)) |
---|
38 | 70 | #define HILO_DMA(hi, lo) \ |
---|
.. | .. |
---|
125 | 157 | struct qmu_gpd *gpd = ring->start; |
---|
126 | 158 | |
---|
127 | 159 | if (gpd) { |
---|
128 | | - gpd->flag &= ~GPD_FLAGS_HWO; |
---|
| 160 | + gpd->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO); |
---|
129 | 161 | gpd_ring_init(ring, gpd); |
---|
130 | 162 | } |
---|
131 | 163 | } |
---|
.. | .. |
---|
154 | 186 | memset(ring, 0, sizeof(*ring)); |
---|
155 | 187 | } |
---|
156 | 188 | |
---|
157 | | -/* |
---|
158 | | - * calculate check sum of a gpd or bd |
---|
159 | | - * add "noinline" and "mb" to prevent wrong calculation |
---|
160 | | - */ |
---|
161 | | -static noinline u8 qmu_calc_checksum(u8 *data) |
---|
162 | | -{ |
---|
163 | | - u8 chksum = 0; |
---|
164 | | - int i; |
---|
165 | | - |
---|
166 | | - data[1] = 0x0; /* set checksum to 0 */ |
---|
167 | | - |
---|
168 | | - mb(); /* ensure the gpd/bd is really up-to-date */ |
---|
169 | | - for (i = 0; i < QMU_CHECKSUM_LEN; i++) |
---|
170 | | - chksum += data[i]; |
---|
171 | | - |
---|
172 | | - /* Default: HWO=1, @flag[bit0] */ |
---|
173 | | - chksum += 1; |
---|
174 | | - |
---|
175 | | - return 0xFF - chksum; |
---|
176 | | -} |
---|
177 | | - |
---|
178 | 189 | void mtu3_qmu_resume(struct mtu3_ep *mep) |
---|
179 | 190 | { |
---|
180 | 191 | struct mtu3 *mtu = mep->mtu; |
---|
.. | .. |
---|
199 | 210 | return ring->enqueue; |
---|
200 | 211 | } |
---|
201 | 212 | |
---|
| 213 | +/* @dequeue may be NULL if ring is unallocated or freed */ |
---|
202 | 214 | static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring) |
---|
203 | 215 | { |
---|
204 | 216 | if (ring->dequeue < ring->end) |
---|
.. | .. |
---|
235 | 247 | struct mtu3_gpd_ring *ring = &mep->gpd_ring; |
---|
236 | 248 | struct qmu_gpd *gpd = ring->enqueue; |
---|
237 | 249 | struct usb_request *req = &mreq->request; |
---|
| 250 | + struct mtu3 *mtu = mep->mtu; |
---|
238 | 251 | dma_addr_t enq_dma; |
---|
239 | | - u16 ext_addr; |
---|
| 252 | + u32 ext_addr; |
---|
240 | 253 | |
---|
241 | | - /* set all fields to zero as default value */ |
---|
242 | | - memset(gpd, 0, sizeof(*gpd)); |
---|
243 | | - |
---|
| 254 | + gpd->dw0_info = 0; /* SW own it */ |
---|
244 | 255 | gpd->buffer = cpu_to_le32(lower_32_bits(req->dma)); |
---|
245 | | - ext_addr = GPD_EXT_BUF(upper_32_bits(req->dma)); |
---|
246 | | - gpd->buf_len = cpu_to_le16(req->length); |
---|
247 | | - gpd->flag |= GPD_FLAGS_IOC; |
---|
| 256 | + ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma)); |
---|
| 257 | + gpd->dw3_info = cpu_to_le32(GPD_DATA_LEN(mtu, req->length)); |
---|
248 | 258 | |
---|
249 | 259 | /* get the next GPD */ |
---|
250 | 260 | enq = advance_enq_gpd(ring); |
---|
.. | .. |
---|
252 | 262 | dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n", |
---|
253 | 263 | mep->epnum, gpd, enq, &enq_dma); |
---|
254 | 264 | |
---|
255 | | - enq->flag &= ~GPD_FLAGS_HWO; |
---|
| 265 | + enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO); |
---|
256 | 266 | gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma)); |
---|
257 | | - ext_addr |= GPD_EXT_NGP(upper_32_bits(enq_dma)); |
---|
258 | | - gpd->tx_ext_addr = cpu_to_le16(ext_addr); |
---|
| 267 | + ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma)); |
---|
| 268 | + gpd->dw0_info = cpu_to_le32(ext_addr); |
---|
259 | 269 | |
---|
260 | | - if (req->zero) |
---|
261 | | - gpd->ext_flag |= GPD_EXT_FLAG_ZLP; |
---|
| 270 | + if (req->zero) { |
---|
| 271 | + if (mtu->gen2cp) |
---|
| 272 | + gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_ZLP); |
---|
| 273 | + else |
---|
| 274 | + gpd->dw3_info |= cpu_to_le32(GPD_EXT_FLAG_ZLP); |
---|
| 275 | + } |
---|
262 | 276 | |
---|
263 | | - gpd->chksum = qmu_calc_checksum((u8 *)gpd); |
---|
264 | | - gpd->flag |= GPD_FLAGS_HWO; |
---|
| 277 | + /* prevent reorder, make sure GPD's HWO is set last */ |
---|
| 278 | + mb(); |
---|
| 279 | + gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO); |
---|
265 | 280 | |
---|
266 | 281 | mreq->gpd = gpd; |
---|
| 282 | + trace_mtu3_prepare_gpd(mep, gpd); |
---|
267 | 283 | |
---|
268 | 284 | return 0; |
---|
269 | 285 | } |
---|
.. | .. |
---|
274 | 290 | struct mtu3_gpd_ring *ring = &mep->gpd_ring; |
---|
275 | 291 | struct qmu_gpd *gpd = ring->enqueue; |
---|
276 | 292 | struct usb_request *req = &mreq->request; |
---|
| 293 | + struct mtu3 *mtu = mep->mtu; |
---|
277 | 294 | dma_addr_t enq_dma; |
---|
278 | | - u16 ext_addr; |
---|
| 295 | + u32 ext_addr; |
---|
279 | 296 | |
---|
280 | | - /* set all fields to zero as default value */ |
---|
281 | | - memset(gpd, 0, sizeof(*gpd)); |
---|
282 | | - |
---|
| 297 | + gpd->dw0_info = 0; /* SW own it */ |
---|
283 | 298 | gpd->buffer = cpu_to_le32(lower_32_bits(req->dma)); |
---|
284 | | - ext_addr = GPD_EXT_BUF(upper_32_bits(req->dma)); |
---|
285 | | - gpd->data_buf_len = cpu_to_le16(req->length); |
---|
286 | | - gpd->flag |= GPD_FLAGS_IOC; |
---|
| 299 | + ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma)); |
---|
| 300 | + gpd->dw0_info = cpu_to_le32(GPD_RX_BUF_LEN(mtu, req->length)); |
---|
287 | 301 | |
---|
288 | 302 | /* get the next GPD */ |
---|
289 | 303 | enq = advance_enq_gpd(ring); |
---|
.. | .. |
---|
291 | 305 | dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n", |
---|
292 | 306 | mep->epnum, gpd, enq, &enq_dma); |
---|
293 | 307 | |
---|
294 | | - enq->flag &= ~GPD_FLAGS_HWO; |
---|
| 308 | + enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO); |
---|
295 | 309 | gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma)); |
---|
296 | | - ext_addr |= GPD_EXT_NGP(upper_32_bits(enq_dma)); |
---|
297 | | - gpd->rx_ext_addr = cpu_to_le16(ext_addr); |
---|
298 | | - gpd->chksum = qmu_calc_checksum((u8 *)gpd); |
---|
299 | | - gpd->flag |= GPD_FLAGS_HWO; |
---|
| 310 | + ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma)); |
---|
| 311 | + gpd->dw3_info = cpu_to_le32(ext_addr); |
---|
| 312 | + /* prevent reorder, make sure GPD's HWO is set last */ |
---|
| 313 | + mb(); |
---|
| 314 | + gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO); |
---|
300 | 315 | |
---|
301 | 316 | mreq->gpd = gpd; |
---|
| 317 | + trace_mtu3_prepare_gpd(mep, gpd); |
---|
302 | 318 | |
---|
303 | 319 | return 0; |
---|
304 | 320 | } |
---|
.. | .. |
---|
323 | 339 | /* set QMU start address */ |
---|
324 | 340 | write_txq_start_addr(mbase, epnum, ring->dma); |
---|
325 | 341 | mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_DMAREQEN); |
---|
326 | | - mtu3_setbits(mbase, U3D_QCR0, QMU_TX_CS_EN(epnum)); |
---|
327 | 342 | /* send zero length packet according to ZLP flag in GPD */ |
---|
328 | 343 | mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum)); |
---|
329 | 344 | mtu3_writel(mbase, U3D_TQERRIESR0, |
---|
.. | .. |
---|
338 | 353 | } else { |
---|
339 | 354 | write_rxq_start_addr(mbase, epnum, ring->dma); |
---|
340 | 355 | mtu3_setbits(mbase, MU3D_EP_RXCR0(epnum), RX_DMAREQEN); |
---|
341 | | - mtu3_setbits(mbase, U3D_QCR0, QMU_RX_CS_EN(epnum)); |
---|
342 | 356 | /* don't expect ZLP */ |
---|
343 | 357 | mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum)); |
---|
344 | 358 | /* move to next GPD when receive ZLP */ |
---|
.. | .. |
---|
407 | 421 | struct mtu3_gpd_ring *ring = &mep->gpd_ring; |
---|
408 | 422 | void __iomem *mbase = mtu->mac_base; |
---|
409 | 423 | struct qmu_gpd *gpd_current = NULL; |
---|
410 | | - struct usb_request *req = NULL; |
---|
411 | 424 | struct mtu3_request *mreq; |
---|
412 | 425 | dma_addr_t cur_gpd_dma; |
---|
413 | 426 | u32 txcsr = 0; |
---|
414 | 427 | int ret; |
---|
415 | 428 | |
---|
416 | 429 | mreq = next_request(mep); |
---|
417 | | - if (mreq && mreq->request.length == 0) |
---|
418 | | - req = &mreq->request; |
---|
419 | | - else |
---|
| 430 | + if (mreq && mreq->request.length != 0) |
---|
420 | 431 | return; |
---|
421 | 432 | |
---|
422 | 433 | cur_gpd_dma = read_txq_cur_addr(mbase, epnum); |
---|
423 | 434 | gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma); |
---|
424 | 435 | |
---|
425 | | - if (le16_to_cpu(gpd_current->buf_len) != 0) { |
---|
| 436 | + if (GPD_DATA_LEN(mtu, le32_to_cpu(gpd_current->dw3_info)) != 0) { |
---|
426 | 437 | dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum); |
---|
427 | 438 | return; |
---|
428 | 439 | } |
---|
429 | 440 | |
---|
430 | | - dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, req); |
---|
| 441 | + dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, mreq); |
---|
| 442 | + trace_mtu3_zlp_exp_gpd(mep, gpd_current); |
---|
431 | 443 | |
---|
432 | 444 | mtu3_clrbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN); |
---|
433 | 445 | |
---|
.. | .. |
---|
438 | 450 | return; |
---|
439 | 451 | } |
---|
440 | 452 | mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY); |
---|
441 | | - |
---|
| 453 | + /* prevent reorder, make sure GPD's HWO is set last */ |
---|
| 454 | + mb(); |
---|
442 | 455 | /* by pass the current GDP */ |
---|
443 | | - gpd_current->flag |= GPD_FLAGS_BPS; |
---|
444 | | - gpd_current->chksum = qmu_calc_checksum((u8 *)gpd_current); |
---|
445 | | - gpd_current->flag |= GPD_FLAGS_HWO; |
---|
| 456 | + gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO); |
---|
446 | 457 | |
---|
447 | 458 | /*enable DMAREQEN, switch back to QMU mode */ |
---|
448 | 459 | mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN); |
---|
.. | .. |
---|
474 | 485 | dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n", |
---|
475 | 486 | __func__, epnum, gpd, gpd_current, ring->enqueue); |
---|
476 | 487 | |
---|
477 | | - while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) { |
---|
| 488 | + while (gpd && gpd != gpd_current && !GET_GPD_HWO(gpd)) { |
---|
478 | 489 | |
---|
479 | 490 | mreq = next_request(mep); |
---|
480 | 491 | |
---|
.. | .. |
---|
484 | 495 | } |
---|
485 | 496 | |
---|
486 | 497 | request = &mreq->request; |
---|
487 | | - request->actual = le16_to_cpu(gpd->buf_len); |
---|
| 498 | + request->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info)); |
---|
| 499 | + trace_mtu3_complete_gpd(mep, gpd); |
---|
488 | 500 | mtu3_req_complete(mep, request, 0); |
---|
489 | 501 | |
---|
490 | 502 | gpd = advance_deq_gpd(ring); |
---|
.. | .. |
---|
512 | 524 | dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n", |
---|
513 | 525 | __func__, epnum, gpd, gpd_current, ring->enqueue); |
---|
514 | 526 | |
---|
515 | | - while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) { |
---|
| 527 | + while (gpd && gpd != gpd_current && !GET_GPD_HWO(gpd)) { |
---|
516 | 528 | |
---|
517 | 529 | mreq = next_request(mep); |
---|
518 | 530 | |
---|
.. | .. |
---|
522 | 534 | } |
---|
523 | 535 | req = &mreq->request; |
---|
524 | 536 | |
---|
525 | | - req->actual = le16_to_cpu(gpd->buf_len); |
---|
| 537 | + req->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info)); |
---|
| 538 | + trace_mtu3_complete_gpd(mep, gpd); |
---|
526 | 539 | mtu3_req_complete(mep, req, 0); |
---|
527 | 540 | |
---|
528 | 541 | gpd = advance_deq_gpd(ring); |
---|
.. | .. |
---|
600 | 613 | dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n", |
---|
601 | 614 | (qmu_done_status & 0xFFFF), qmu_done_status >> 16, |
---|
602 | 615 | qmu_status); |
---|
| 616 | + trace_mtu3_qmu_isr(qmu_done_status, qmu_status); |
---|
603 | 617 | |
---|
604 | 618 | if (qmu_done_status) |
---|
605 | 619 | qmu_done_isr(mtu, qmu_done_status); |
---|