.. | .. |
---|
65 | 65 | return seg; |
---|
66 | 66 | } |
---|
67 | 67 | |
---|
68 | | -static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) |
---|
| 68 | +void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) |
---|
69 | 69 | { |
---|
70 | 70 | if (seg->trbs) { |
---|
71 | 71 | dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); |
---|
.. | .. |
---|
74 | 74 | kfree(seg->bounce_buf); |
---|
75 | 75 | kfree(seg); |
---|
76 | 76 | } |
---|
| 77 | +EXPORT_SYMBOL_GPL(xhci_segment_free); |
---|
77 | 78 | |
---|
78 | | -static void xhci_free_segments_for_ring(struct xhci_hcd *xhci, |
---|
| 79 | +void xhci_free_segments_for_ring(struct xhci_hcd *xhci, |
---|
79 | 80 | struct xhci_segment *first) |
---|
80 | 81 | { |
---|
81 | 82 | struct xhci_segment *seg; |
---|
.. | .. |
---|
96 | 97 | * DMA address of the next segment. The caller needs to set any Link TRB |
---|
97 | 98 | * related flags, such as End TRB, Toggle Cycle, and no snoop. |
---|
98 | 99 | */ |
---|
99 | | -static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, |
---|
100 | | - struct xhci_segment *next, enum xhci_ring_type type) |
---|
| 100 | +void xhci_link_segments(struct xhci_segment *prev, |
---|
| 101 | + struct xhci_segment *next, |
---|
| 102 | + enum xhci_ring_type type, bool chain_links) |
---|
101 | 103 | { |
---|
102 | 104 | u32 val; |
---|
103 | 105 | |
---|
.. | .. |
---|
112 | 114 | val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); |
---|
113 | 115 | val &= ~TRB_TYPE_BITMASK; |
---|
114 | 116 | val |= TRB_TYPE(TRB_LINK); |
---|
115 | | - /* Always set the chain bit with 0.95 hardware */ |
---|
116 | | - /* Set chain bit for isoc rings on AMD 0.96 host */ |
---|
117 | | - if (xhci_link_trb_quirk(xhci) || |
---|
118 | | - (type == TYPE_ISOC && |
---|
119 | | - (xhci->quirks & XHCI_AMD_0x96_HOST))) |
---|
| 117 | + if (chain_links) |
---|
120 | 118 | val |= TRB_CHAIN; |
---|
121 | 119 | prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); |
---|
122 | 120 | } |
---|
123 | 121 | } |
---|
| 122 | +EXPORT_SYMBOL_GPL(xhci_link_segments); |
---|
124 | 123 | |
---|
125 | 124 | /* |
---|
126 | 125 | * Link the ring to the new segments. |
---|
.. | .. |
---|
131 | 130 | unsigned int num_segs) |
---|
132 | 131 | { |
---|
133 | 132 | struct xhci_segment *next; |
---|
| 133 | + bool chain_links; |
---|
134 | 134 | |
---|
135 | 135 | if (!ring || !first || !last) |
---|
136 | 136 | return; |
---|
137 | 137 | |
---|
| 138 | + /* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */ |
---|
| 139 | + chain_links = !!(xhci_link_trb_quirk(xhci) || |
---|
| 140 | + (ring->type == TYPE_ISOC && |
---|
| 141 | + (xhci->quirks & XHCI_AMD_0x96_HOST))); |
---|
| 142 | + |
---|
138 | 143 | next = ring->enq_seg->next; |
---|
139 | | - xhci_link_segments(xhci, ring->enq_seg, first, ring->type); |
---|
140 | | - xhci_link_segments(xhci, last, next, ring->type); |
---|
| 144 | + xhci_link_segments(ring->enq_seg, first, ring->type, chain_links); |
---|
| 145 | + xhci_link_segments(last, next, ring->type, chain_links); |
---|
141 | 146 | ring->num_segs += num_segs; |
---|
142 | 147 | ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs; |
---|
143 | 148 | |
---|
.. | .. |
---|
289 | 294 | |
---|
290 | 295 | kfree(ring); |
---|
291 | 296 | } |
---|
| 297 | +EXPORT_SYMBOL_GPL(xhci_ring_free); |
---|
292 | 298 | |
---|
293 | | -static void xhci_initialize_ring_info(struct xhci_ring *ring, |
---|
294 | | - unsigned int cycle_state) |
---|
| 299 | +void xhci_initialize_ring_info(struct xhci_ring *ring, |
---|
| 300 | + unsigned int cycle_state) |
---|
295 | 301 | { |
---|
296 | 302 | /* The ring is empty, so the enqueue pointer == dequeue pointer */ |
---|
297 | 303 | ring->enqueue = ring->first_seg->trbs; |
---|
.. | .. |
---|
313 | 319 | */ |
---|
314 | 320 | ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; |
---|
315 | 321 | } |
---|
| 322 | +EXPORT_SYMBOL_GPL(xhci_initialize_ring_info); |
---|
316 | 323 | |
---|
317 | 324 | /* Allocate segments and link them for a ring */ |
---|
318 | 325 | static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, |
---|
.. | .. |
---|
321 | 328 | enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) |
---|
322 | 329 | { |
---|
323 | 330 | struct xhci_segment *prev; |
---|
| 331 | + bool chain_links; |
---|
| 332 | + |
---|
| 333 | + /* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */ |
---|
| 334 | + chain_links = !!(xhci_link_trb_quirk(xhci) || |
---|
| 335 | + (type == TYPE_ISOC && |
---|
| 336 | + (xhci->quirks & XHCI_AMD_0x96_HOST))); |
---|
324 | 337 | |
---|
325 | 338 | prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags); |
---|
326 | 339 | if (!prev) |
---|
.. | .. |
---|
341 | 354 | } |
---|
342 | 355 | return -ENOMEM; |
---|
343 | 356 | } |
---|
344 | | - xhci_link_segments(xhci, prev, next, type); |
---|
| 357 | + xhci_link_segments(prev, next, type, chain_links); |
---|
345 | 358 | |
---|
346 | 359 | prev = next; |
---|
347 | 360 | num_segs--; |
---|
348 | 361 | } |
---|
349 | | - xhci_link_segments(xhci, prev, *first, type); |
---|
| 362 | + xhci_link_segments(prev, *first, type, chain_links); |
---|
350 | 363 | *last = prev; |
---|
351 | 364 | |
---|
352 | 365 | return 0; |
---|
353 | 366 | } |
---|
354 | 367 | |
---|
355 | | -/** |
---|
| 368 | +static void xhci_vendor_free_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) |
---|
| 369 | +{ |
---|
| 370 | + struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); |
---|
| 371 | + |
---|
| 372 | + if (ops && ops->free_container_ctx) |
---|
| 373 | + ops->free_container_ctx(xhci, ctx); |
---|
| 374 | +} |
---|
| 375 | + |
---|
| 376 | +static void xhci_vendor_alloc_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, |
---|
| 377 | + int type, gfp_t flags) |
---|
| 378 | +{ |
---|
| 379 | + struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); |
---|
| 380 | + |
---|
| 381 | + if (ops && ops->alloc_container_ctx) |
---|
| 382 | + ops->alloc_container_ctx(xhci, ctx, type, flags); |
---|
| 383 | +} |
---|
| 384 | + |
---|
| 385 | +static struct xhci_ring *xhci_vendor_alloc_transfer_ring(struct xhci_hcd *xhci, |
---|
| 386 | + u32 endpoint_type, enum xhci_ring_type ring_type, |
---|
| 387 | + unsigned int max_packet, gfp_t mem_flags) |
---|
| 388 | +{ |
---|
| 389 | + struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); |
---|
| 390 | + |
---|
| 391 | + if (ops && ops->alloc_transfer_ring) |
---|
| 392 | + return ops->alloc_transfer_ring(xhci, endpoint_type, ring_type, |
---|
| 393 | + max_packet, mem_flags); |
---|
| 394 | + return 0; |
---|
| 395 | +} |
---|
| 396 | + |
---|
| 397 | +void xhci_vendor_free_transfer_ring(struct xhci_hcd *xhci, |
---|
| 398 | + struct xhci_virt_device *virt_dev, unsigned int ep_index) |
---|
| 399 | +{ |
---|
| 400 | + struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); |
---|
| 401 | + |
---|
| 402 | + if (ops && ops->free_transfer_ring) |
---|
| 403 | + ops->free_transfer_ring(xhci, virt_dev, ep_index); |
---|
| 404 | +} |
---|
| 405 | + |
---|
| 406 | +bool xhci_vendor_is_usb_offload_enabled(struct xhci_hcd *xhci, |
---|
| 407 | + struct xhci_virt_device *virt_dev, unsigned int ep_index) |
---|
| 408 | +{ |
---|
| 409 | + struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); |
---|
| 410 | + |
---|
| 411 | + if (ops && ops->is_usb_offload_enabled) |
---|
| 412 | + return ops->is_usb_offload_enabled(xhci, virt_dev, ep_index); |
---|
| 413 | + return false; |
---|
| 414 | +} |
---|
| 415 | + |
---|
| 416 | +/* |
---|
356 | 417 | * Create a new ring with zero or more segments. |
---|
357 | 418 | * |
---|
358 | 419 | * Link each segment together into a ring. |
---|
.. | .. |
---|
398 | 459 | kfree(ring); |
---|
399 | 460 | return NULL; |
---|
400 | 461 | } |
---|
| 462 | +EXPORT_SYMBOL_GPL(xhci_ring_alloc); |
---|
401 | 463 | |
---|
402 | 464 | void xhci_free_endpoint_ring(struct xhci_hcd *xhci, |
---|
403 | 465 | struct xhci_virt_device *virt_dev, |
---|
404 | 466 | unsigned int ep_index) |
---|
405 | 467 | { |
---|
406 | | - xhci_ring_free(xhci, virt_dev->eps[ep_index].ring); |
---|
| 468 | + if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, ep_index)) |
---|
| 469 | + xhci_vendor_free_transfer_ring(xhci, virt_dev, ep_index); |
---|
| 470 | + else |
---|
| 471 | + xhci_ring_free(xhci, virt_dev->eps[ep_index].ring); |
---|
| 472 | + |
---|
407 | 473 | virt_dev->eps[ep_index].ring = NULL; |
---|
408 | 474 | } |
---|
409 | 475 | |
---|
.. | .. |
---|
462 | 528 | { |
---|
463 | 529 | struct xhci_container_ctx *ctx; |
---|
464 | 530 | struct device *dev = xhci_to_hcd(xhci)->self.sysdev; |
---|
| 531 | + struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); |
---|
465 | 532 | |
---|
466 | 533 | if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)) |
---|
467 | 534 | return NULL; |
---|
.. | .. |
---|
475 | 542 | if (type == XHCI_CTX_TYPE_INPUT) |
---|
476 | 543 | ctx->size += CTX_SIZE(xhci->hcc_params); |
---|
477 | 544 | |
---|
478 | | - ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma); |
---|
| 545 | + if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0) && |
---|
| 546 | + (ops && ops->alloc_container_ctx)) |
---|
| 547 | + xhci_vendor_alloc_container_ctx(xhci, ctx, type, flags); |
---|
| 548 | + else |
---|
| 549 | + ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma); |
---|
| 550 | + |
---|
479 | 551 | if (!ctx->bytes) { |
---|
480 | 552 | kfree(ctx); |
---|
481 | 553 | return NULL; |
---|
.. | .. |
---|
486 | 558 | void xhci_free_container_ctx(struct xhci_hcd *xhci, |
---|
487 | 559 | struct xhci_container_ctx *ctx) |
---|
488 | 560 | { |
---|
| 561 | + struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); |
---|
| 562 | + |
---|
489 | 563 | if (!ctx) |
---|
490 | 564 | return; |
---|
491 | | - dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); |
---|
| 565 | + if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0) && |
---|
| 566 | + (ops && ops->free_container_ctx)) |
---|
| 567 | + xhci_vendor_free_container_ctx(xhci, ctx); |
---|
| 568 | + else |
---|
| 569 | + dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); |
---|
| 570 | + |
---|
492 | 571 | kfree(ctx); |
---|
493 | 572 | } |
---|
494 | 573 | |
---|
.. | .. |
---|
510 | 589 | return (struct xhci_slot_ctx *) |
---|
511 | 590 | (ctx->bytes + CTX_SIZE(xhci->hcc_params)); |
---|
512 | 591 | } |
---|
| 592 | +EXPORT_SYMBOL_GPL(xhci_get_slot_ctx); |
---|
513 | 593 | |
---|
514 | 594 | struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, |
---|
515 | 595 | struct xhci_container_ctx *ctx, |
---|
.. | .. |
---|
523 | 603 | return (struct xhci_ep_ctx *) |
---|
524 | 604 | (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); |
---|
525 | 605 | } |
---|
| 606 | +EXPORT_SYMBOL_GPL(xhci_get_ep_ctx); |
---|
526 | 607 | |
---|
527 | 608 | |
---|
528 | 609 | /***************** Streams structures manipulation *************************/ |
---|
.. | .. |
---|
583 | 664 | return ep->ring; |
---|
584 | 665 | } |
---|
585 | 666 | |
---|
586 | | -struct xhci_ring *xhci_stream_id_to_ring( |
---|
587 | | - struct xhci_virt_device *dev, |
---|
588 | | - unsigned int ep_index, |
---|
589 | | - unsigned int stream_id) |
---|
590 | | -{ |
---|
591 | | - struct xhci_virt_ep *ep = &dev->eps[ep_index]; |
---|
592 | | - |
---|
593 | | - if (stream_id == 0) |
---|
594 | | - return ep->ring; |
---|
595 | | - if (!ep->stream_info) |
---|
596 | | - return NULL; |
---|
597 | | - |
---|
598 | | - if (stream_id >= ep->stream_info->num_streams) |
---|
599 | | - return NULL; |
---|
600 | | - return ep->stream_info->stream_rings[stream_id]; |
---|
601 | | -} |
---|
602 | | - |
---|
603 | 667 | /* |
---|
604 | 668 | * Change an endpoint's internal structure so it supports stream IDs. The |
---|
605 | 669 | * number of requested streams includes stream 0, which cannot be used by device |
---|
.. | .. |
---|
650 | 714 | num_stream_ctxs, &stream_info->ctx_array_dma, |
---|
651 | 715 | mem_flags); |
---|
652 | 716 | if (!stream_info->stream_ctx_array) |
---|
653 | | - goto cleanup_ctx; |
---|
| 717 | + goto cleanup_ring_array; |
---|
654 | 718 | memset(stream_info->stream_ctx_array, 0, |
---|
655 | 719 | sizeof(struct xhci_stream_ctx)*num_stream_ctxs); |
---|
656 | 720 | |
---|
.. | .. |
---|
711 | 775 | } |
---|
712 | 776 | xhci_free_command(xhci, stream_info->free_streams_command); |
---|
713 | 777 | cleanup_ctx: |
---|
| 778 | + xhci_free_stream_ctx(xhci, |
---|
| 779 | + stream_info->num_stream_ctxs, |
---|
| 780 | + stream_info->stream_ctx_array, |
---|
| 781 | + stream_info->ctx_array_dma); |
---|
| 782 | +cleanup_ring_array: |
---|
714 | 783 | kfree(stream_info->stream_rings); |
---|
715 | 784 | cleanup_info: |
---|
716 | 785 | kfree(stream_info); |
---|
.. | .. |
---|
897 | 966 | |
---|
898 | 967 | for (i = 0; i < 31; i++) { |
---|
899 | 968 | if (dev->eps[i].ring) |
---|
900 | | - xhci_ring_free(xhci, dev->eps[i].ring); |
---|
| 969 | + xhci_free_endpoint_ring(xhci, dev, i); |
---|
901 | 970 | if (dev->eps[i].stream_info) |
---|
902 | 971 | xhci_free_stream_info(xhci, |
---|
903 | 972 | dev->eps[i].stream_info); |
---|
904 | | - /* Endpoints on the TT/root port lists should have been removed |
---|
905 | | - * when usb_disable_device() was called for the device. |
---|
906 | | - * We can't drop them anyway, because the udev might have gone |
---|
907 | | - * away by this point, and we can't tell what speed it was. |
---|
| 973 | + /* |
---|
| 974 | + * Endpoints are normally deleted from the bandwidth list when |
---|
| 975 | + * endpoints are dropped, before device is freed. |
---|
| 976 | + * If host is dying or being removed then endpoints aren't |
---|
| 977 | + * dropped cleanly, so delete the endpoint from list here. |
---|
| 978 | + * Only applicable for hosts with software bandwidth checking. |
---|
908 | 979 | */ |
---|
909 | | - if (!list_empty(&dev->eps[i].bw_endpoint_list)) |
---|
910 | | - xhci_warn(xhci, "Slot %u endpoint %u " |
---|
911 | | - "not removed from BW list!\n", |
---|
912 | | - slot_id, i); |
---|
| 980 | + |
---|
| 981 | + if (!list_empty(&dev->eps[i].bw_endpoint_list)) { |
---|
| 982 | + list_del_init(&dev->eps[i].bw_endpoint_list); |
---|
| 983 | + xhci_dbg(xhci, "Slot %u endpoint %u not removed from BW list!\n", |
---|
| 984 | + slot_id, i); |
---|
| 985 | + } |
---|
913 | 986 | } |
---|
914 | 987 | /* If this is a hub, free the TT(s) from the TT list */ |
---|
915 | 988 | xhci_free_tt_info(xhci, dev, slot_id); |
---|
.. | .. |
---|
933 | 1006 | * that tt_info, then free the child first. Recursive. |
---|
934 | 1007 | * We can't rely on udev at this point to find child-parent relationships. |
---|
935 | 1008 | */ |
---|
936 | | -void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id) |
---|
| 1009 | +static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id) |
---|
937 | 1010 | { |
---|
938 | 1011 | struct xhci_virt_device *vdev; |
---|
939 | 1012 | struct list_head *tt_list_head; |
---|
.. | .. |
---|
985 | 1058 | if (!dev) |
---|
986 | 1059 | return 0; |
---|
987 | 1060 | |
---|
| 1061 | + dev->slot_id = slot_id; |
---|
| 1062 | + |
---|
988 | 1063 | /* Allocate the (output) device context that will be used in the HC. */ |
---|
989 | 1064 | dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); |
---|
990 | 1065 | if (!dev->out_ctx) |
---|
.. | .. |
---|
1003 | 1078 | |
---|
1004 | 1079 | /* Initialize the cancellation list and watchdog timers for each ep */ |
---|
1005 | 1080 | for (i = 0; i < 31; i++) { |
---|
| 1081 | + dev->eps[i].ep_index = i; |
---|
| 1082 | + dev->eps[i].vdev = dev; |
---|
1006 | 1083 | xhci_init_endpoint_timer(xhci, &dev->eps[i]); |
---|
1007 | 1084 | INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); |
---|
1008 | 1085 | INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list); |
---|
.. | .. |
---|
1302 | 1379 | interval = xhci_parse_microframe_interval(udev, ep); |
---|
1303 | 1380 | break; |
---|
1304 | 1381 | } |
---|
1305 | | - /* Fall through - SS and HS isoc/int have same decoding */ |
---|
| 1382 | + fallthrough; /* SS and HS isoc/int have same decoding */ |
---|
1306 | 1383 | |
---|
1307 | 1384 | case USB_SPEED_SUPER_PLUS: |
---|
1308 | 1385 | case USB_SPEED_SUPER: |
---|
.. | .. |
---|
1322 | 1399 | * since it uses the same rules as low speed interrupt |
---|
1323 | 1400 | * endpoints. |
---|
1324 | 1401 | */ |
---|
1325 | | - /* fall through */ |
---|
| 1402 | + fallthrough; |
---|
1326 | 1403 | |
---|
1327 | 1404 | case USB_SPEED_LOW: |
---|
1328 | 1405 | if (usb_endpoint_xfer_int(&ep->desc) || |
---|
.. | .. |
---|
1492 | 1569 | mult = 0; |
---|
1493 | 1570 | |
---|
1494 | 1571 | /* Set up the endpoint ring */ |
---|
1495 | | - virt_dev->eps[ep_index].new_ring = |
---|
1496 | | - xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags); |
---|
| 1572 | + if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, ep_index) && |
---|
| 1573 | + usb_endpoint_xfer_isoc(&ep->desc)) { |
---|
| 1574 | + virt_dev->eps[ep_index].new_ring = |
---|
| 1575 | + xhci_vendor_alloc_transfer_ring(xhci, endpoint_type, ring_type, |
---|
| 1576 | + max_packet, mem_flags); |
---|
| 1577 | + } else { |
---|
| 1578 | + virt_dev->eps[ep_index].new_ring = |
---|
| 1579 | + xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags); |
---|
| 1580 | + } |
---|
| 1581 | + |
---|
1497 | 1582 | if (!virt_dev->eps[ep_index].new_ring) |
---|
1498 | 1583 | return -ENOMEM; |
---|
1499 | 1584 | |
---|
.. | .. |
---|
1678 | 1763 | xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); |
---|
1679 | 1764 | for (i = 0; i < num_sp; i++) { |
---|
1680 | 1765 | dma_addr_t dma; |
---|
1681 | | - void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma, |
---|
1682 | | - flags); |
---|
| 1766 | + void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma, |
---|
| 1767 | + flags); |
---|
1683 | 1768 | if (!buf) |
---|
1684 | 1769 | goto fail_sp4; |
---|
1685 | 1770 | |
---|
.. | .. |
---|
1760 | 1845 | INIT_LIST_HEAD(&command->cmd_list); |
---|
1761 | 1846 | return command; |
---|
1762 | 1847 | } |
---|
| 1848 | +EXPORT_SYMBOL_GPL(xhci_alloc_command); |
---|
1763 | 1849 | |
---|
1764 | 1850 | struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci, |
---|
1765 | 1851 | bool allocate_completion, gfp_t mem_flags) |
---|
.. | .. |
---|
1793 | 1879 | kfree(command->completion); |
---|
1794 | 1880 | kfree(command); |
---|
1795 | 1881 | } |
---|
| 1882 | +EXPORT_SYMBOL_GPL(xhci_free_command); |
---|
1796 | 1883 | |
---|
1797 | 1884 | int xhci_alloc_erst(struct xhci_hcd *xhci, |
---|
1798 | 1885 | struct xhci_ring *evt_ring, |
---|
.. | .. |
---|
1805 | 1892 | struct xhci_erst_entry *entry; |
---|
1806 | 1893 | |
---|
1807 | 1894 | size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs; |
---|
1808 | | - erst->entries = dma_zalloc_coherent(xhci_to_hcd(xhci)->self.sysdev, |
---|
1809 | | - size, &erst->erst_dma_addr, flags); |
---|
| 1895 | + erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev, |
---|
| 1896 | + size, &erst->erst_dma_addr, flags); |
---|
1810 | 1897 | if (!erst->entries) |
---|
1811 | 1898 | return -ENOMEM; |
---|
1812 | 1899 | |
---|
.. | .. |
---|
1823 | 1910 | |
---|
1824 | 1911 | return 0; |
---|
1825 | 1912 | } |
---|
| 1913 | +EXPORT_SYMBOL_GPL(xhci_alloc_erst); |
---|
1826 | 1914 | |
---|
1827 | 1915 | void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) |
---|
1828 | 1916 | { |
---|
.. | .. |
---|
1836 | 1924 | erst->erst_dma_addr); |
---|
1837 | 1925 | erst->entries = NULL; |
---|
1838 | 1926 | } |
---|
| 1927 | +EXPORT_SYMBOL_GPL(xhci_free_erst); |
---|
1839 | 1928 | |
---|
1840 | | -void xhci_handle_sec_intr_events(struct xhci_hcd *xhci, int intr_num) |
---|
| 1929 | +static struct xhci_device_context_array *xhci_vendor_alloc_dcbaa( |
---|
| 1930 | + struct xhci_hcd *xhci, gfp_t flags) |
---|
1841 | 1931 | { |
---|
1842 | | - union xhci_trb *erdp_trb, *current_trb; |
---|
1843 | | - struct xhci_segment *seg; |
---|
1844 | | - u64 erdp_reg; |
---|
1845 | | - u32 iman_reg; |
---|
1846 | | - dma_addr_t deq; |
---|
1847 | | - unsigned long segment_offset; |
---|
| 1932 | + struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); |
---|
1848 | 1933 | |
---|
1849 | | - /* disable irq, ack pending interrupt and ack all pending events */ |
---|
1850 | | - |
---|
1851 | | - iman_reg = |
---|
1852 | | - readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending); |
---|
1853 | | - iman_reg &= ~IMAN_IE; |
---|
1854 | | - writel_relaxed(iman_reg, |
---|
1855 | | - &xhci->sec_ir_set[intr_num]->irq_pending); |
---|
1856 | | - iman_reg = |
---|
1857 | | - readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending); |
---|
1858 | | - if (iman_reg & IMAN_IP) |
---|
1859 | | - writel_relaxed(iman_reg, |
---|
1860 | | - &xhci->sec_ir_set[intr_num]->irq_pending); |
---|
1861 | | - |
---|
1862 | | - /* last acked event trb is in erdp reg */ |
---|
1863 | | - erdp_reg = |
---|
1864 | | - xhci_read_64(xhci, &xhci->sec_ir_set[intr_num]->erst_dequeue); |
---|
1865 | | - deq = (dma_addr_t)(erdp_reg & ~ERST_PTR_MASK); |
---|
1866 | | - if (!deq) { |
---|
1867 | | - pr_debug("%s: event ring handling not required\n", __func__); |
---|
1868 | | - return; |
---|
1869 | | - } |
---|
1870 | | - |
---|
1871 | | - seg = xhci->sec_event_ring[intr_num]->first_seg; |
---|
1872 | | - segment_offset = deq - seg->dma; |
---|
1873 | | - |
---|
1874 | | - /* find out virtual address of the last acked event trb */ |
---|
1875 | | - erdp_trb = current_trb = &seg->trbs[0] + |
---|
1876 | | - (segment_offset/sizeof(*current_trb)); |
---|
1877 | | - |
---|
1878 | | - /* read cycle state of the last acked trb to find out CCS */ |
---|
1879 | | - xhci->sec_event_ring[intr_num]->cycle_state = |
---|
1880 | | - (current_trb->event_cmd.flags & TRB_CYCLE); |
---|
1881 | | - |
---|
1882 | | - while (1) { |
---|
1883 | | - /* last trb of the event ring: toggle cycle state */ |
---|
1884 | | - if (current_trb == &seg->trbs[TRBS_PER_SEGMENT - 1]) { |
---|
1885 | | - xhci->sec_event_ring[intr_num]->cycle_state ^= 1; |
---|
1886 | | - current_trb = &seg->trbs[0]; |
---|
1887 | | - } else { |
---|
1888 | | - current_trb++; |
---|
1889 | | - } |
---|
1890 | | - |
---|
1891 | | - /* cycle state transition */ |
---|
1892 | | - if ((le32_to_cpu(current_trb->event_cmd.flags) & TRB_CYCLE) != |
---|
1893 | | - xhci->sec_event_ring[intr_num]->cycle_state) |
---|
1894 | | - break; |
---|
1895 | | - } |
---|
1896 | | - |
---|
1897 | | - if (erdp_trb != current_trb) { |
---|
1898 | | - deq = |
---|
1899 | | - xhci_trb_virt_to_dma(xhci->sec_event_ring[intr_num]->deq_seg, |
---|
1900 | | - current_trb); |
---|
1901 | | - if (deq == 0) |
---|
1902 | | - xhci_warn(xhci, |
---|
1903 | | - "WARN invalid SW event ring dequeue ptr.\n"); |
---|
1904 | | - /* Update HC event ring dequeue pointer */ |
---|
1905 | | - erdp_reg &= ERST_PTR_MASK; |
---|
1906 | | - erdp_reg |= ((u64) deq & (u64) ~ERST_PTR_MASK); |
---|
1907 | | - } |
---|
1908 | | - |
---|
1909 | | - /* Clear the event handler busy flag (RW1C); event ring is empty. */ |
---|
1910 | | - erdp_reg |= ERST_EHB; |
---|
1911 | | - xhci_write_64(xhci, erdp_reg, |
---|
1912 | | - &xhci->sec_ir_set[intr_num]->erst_dequeue); |
---|
1913 | | -} |
---|
1914 | | - |
---|
1915 | | -int xhci_sec_event_ring_cleanup(struct usb_hcd *hcd, unsigned int intr_num) |
---|
1916 | | -{ |
---|
1917 | | - int size; |
---|
1918 | | - struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
---|
1919 | | - struct device *dev = xhci_to_hcd(xhci)->self.sysdev; |
---|
1920 | | - |
---|
1921 | | - if (intr_num >= xhci->max_interrupters) { |
---|
1922 | | - xhci_err(xhci, "invalid secondary interrupter num %d\n", |
---|
1923 | | - intr_num); |
---|
1924 | | - return -EINVAL; |
---|
1925 | | - } |
---|
1926 | | - |
---|
1927 | | - size = |
---|
1928 | | - sizeof(struct xhci_erst_entry)*(xhci->sec_erst[intr_num].num_entries); |
---|
1929 | | - if (xhci->sec_erst[intr_num].entries) { |
---|
1930 | | - xhci_handle_sec_intr_events(xhci, intr_num); |
---|
1931 | | - dma_free_coherent(dev, size, xhci->sec_erst[intr_num].entries, |
---|
1932 | | - xhci->sec_erst[intr_num].erst_dma_addr); |
---|
1933 | | - xhci->sec_erst[intr_num].entries = NULL; |
---|
1934 | | - } |
---|
1935 | | - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed SEC ERST#%d", |
---|
1936 | | - intr_num); |
---|
1937 | | - if (xhci->sec_event_ring[intr_num]) |
---|
1938 | | - xhci_ring_free(xhci, xhci->sec_event_ring[intr_num]); |
---|
1939 | | - |
---|
1940 | | - xhci->sec_event_ring[intr_num] = NULL; |
---|
1941 | | - xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
---|
1942 | | - "Freed sec event ring"); |
---|
1943 | | - |
---|
| 1934 | + if (ops && ops->alloc_dcbaa) |
---|
| 1935 | + return ops->alloc_dcbaa(xhci, flags); |
---|
1944 | 1936 | return 0; |
---|
1945 | 1937 | } |
---|
1946 | 1938 | |
---|
1947 | | -void xhci_event_ring_cleanup(struct xhci_hcd *xhci) |
---|
| 1939 | +static void xhci_vendor_free_dcbaa(struct xhci_hcd *xhci) |
---|
1948 | 1940 | { |
---|
1949 | | - unsigned int i; |
---|
| 1941 | + struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); |
---|
1950 | 1942 | |
---|
1951 | | - /* sec event ring clean up */ |
---|
1952 | | - for (i = 1; i < xhci->max_interrupters; i++) |
---|
1953 | | - xhci_sec_event_ring_cleanup(xhci_to_hcd(xhci), i); |
---|
1954 | | - |
---|
1955 | | - kfree(xhci->sec_ir_set); |
---|
1956 | | - xhci->sec_ir_set = NULL; |
---|
1957 | | - kfree(xhci->sec_erst); |
---|
1958 | | - xhci->sec_erst = NULL; |
---|
1959 | | - kfree(xhci->sec_event_ring); |
---|
1960 | | - xhci->sec_event_ring = NULL; |
---|
1961 | | - |
---|
1962 | | - /* primary event ring clean up */ |
---|
1963 | | - xhci_free_erst(xhci, &xhci->erst); |
---|
1964 | | - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed primary ERST"); |
---|
1965 | | - if (xhci->event_ring) |
---|
1966 | | - xhci_ring_free(xhci, xhci->event_ring); |
---|
1967 | | - xhci->event_ring = NULL; |
---|
1968 | | - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed priamry event ring"); |
---|
| 1943 | + if (ops && ops->free_dcbaa) |
---|
| 1944 | + ops->free_dcbaa(xhci); |
---|
1969 | 1945 | } |
---|
1970 | 1946 | |
---|
1971 | 1947 | void xhci_mem_cleanup(struct xhci_hcd *xhci) |
---|
.. | .. |
---|
1975 | 1951 | |
---|
1976 | 1952 | cancel_delayed_work_sync(&xhci->cmd_timer); |
---|
1977 | 1953 | |
---|
1978 | | - xhci_event_ring_cleanup(xhci); |
---|
| 1954 | + xhci_free_erst(xhci, &xhci->erst); |
---|
| 1955 | + |
---|
| 1956 | + if (xhci->event_ring) |
---|
| 1957 | + xhci_ring_free(xhci, xhci->event_ring); |
---|
| 1958 | + xhci->event_ring = NULL; |
---|
| 1959 | + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring"); |
---|
1979 | 1960 | |
---|
1980 | 1961 | if (xhci->lpm_command) |
---|
1981 | 1962 | xhci_free_command(xhci, xhci->lpm_command); |
---|
.. | .. |
---|
2017 | 1998 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
---|
2018 | 1999 | "Freed medium stream array pool"); |
---|
2019 | 2000 | |
---|
2020 | | - if (xhci->dcbaa) |
---|
2021 | | - dma_free_coherent(dev, sizeof(*xhci->dcbaa), |
---|
2022 | | - xhci->dcbaa, xhci->dcbaa->dma); |
---|
| 2001 | + if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0)) { |
---|
| 2002 | + xhci_vendor_free_dcbaa(xhci); |
---|
| 2003 | + } else { |
---|
| 2004 | + if (xhci->dcbaa) |
---|
| 2005 | + dma_free_coherent(dev, sizeof(*xhci->dcbaa), |
---|
| 2006 | + xhci->dcbaa, xhci->dcbaa->dma); |
---|
| 2007 | + } |
---|
2023 | 2008 | xhci->dcbaa = NULL; |
---|
2024 | 2009 | |
---|
2025 | 2010 | scratchpad_free(xhci); |
---|
.. | .. |
---|
2059 | 2044 | |
---|
2060 | 2045 | xhci->page_size = 0; |
---|
2061 | 2046 | xhci->page_shift = 0; |
---|
2062 | | - xhci->bus_state[0].bus_suspended = 0; |
---|
2063 | | - xhci->bus_state[1].bus_suspended = 0; |
---|
| 2047 | + xhci->usb2_rhub.bus_state.bus_suspended = 0; |
---|
| 2048 | + xhci->usb3_rhub.bus_state.bus_suspended = 0; |
---|
2064 | 2049 | } |
---|
2065 | 2050 | |
---|
2066 | 2051 | static int xhci_test_trb_in_td(struct xhci_hcd *xhci, |
---|
.. | .. |
---|
2100 | 2085 | } |
---|
2101 | 2086 | |
---|
2102 | 2087 | /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */ |
---|
2103 | | -static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci) |
---|
| 2088 | +int xhci_check_trb_in_td_math(struct xhci_hcd *xhci) |
---|
2104 | 2089 | { |
---|
2105 | 2090 | struct { |
---|
2106 | 2091 | dma_addr_t input_dma; |
---|
.. | .. |
---|
2220 | 2205 | xhci_dbg(xhci, "TRB math tests passed.\n"); |
---|
2221 | 2206 | return 0; |
---|
2222 | 2207 | } |
---|
| 2208 | +EXPORT_SYMBOL_GPL(xhci_check_trb_in_td_math); |
---|
| 2209 | + |
---|
| 2210 | +static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) |
---|
| 2211 | +{ |
---|
| 2212 | + u64 temp; |
---|
| 2213 | + dma_addr_t deq; |
---|
| 2214 | + |
---|
| 2215 | + deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, |
---|
| 2216 | + xhci->event_ring->dequeue); |
---|
| 2217 | + if (deq == 0 && !in_interrupt()) |
---|
| 2218 | + xhci_warn(xhci, "WARN something wrong with SW event ring " |
---|
| 2219 | + "dequeue ptr.\n"); |
---|
| 2220 | + /* Update HC event ring dequeue pointer */ |
---|
| 2221 | + temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
---|
| 2222 | + temp &= ERST_PTR_MASK; |
---|
| 2223 | + /* Don't clear the EHB bit (which is RW1C) because |
---|
| 2224 | + * there might be more events to service. |
---|
| 2225 | + */ |
---|
| 2226 | + temp &= ~ERST_EHB; |
---|
| 2227 | + xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
---|
| 2228 | + "// Write event ring dequeue pointer, " |
---|
| 2229 | + "preserving EHB bit"); |
---|
| 2230 | + xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, |
---|
| 2231 | + &xhci->ir_set->erst_dequeue); |
---|
| 2232 | +} |
---|
2223 | 2233 | |
---|
2224 | 2234 | static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, |
---|
2225 | 2235 | __le32 __iomem *addr, int max_caps) |
---|
2226 | 2236 | { |
---|
2227 | 2237 | u32 temp, port_offset, port_count; |
---|
2228 | 2238 | int i; |
---|
2229 | | - u8 major_revision, minor_revision; |
---|
| 2239 | + u8 major_revision, minor_revision, tmp_minor_revision; |
---|
2230 | 2240 | struct xhci_hub *rhub; |
---|
2231 | 2241 | struct device *dev = xhci_to_hcd(xhci)->self.sysdev; |
---|
2232 | 2242 | struct xhci_port_cap *port_cap; |
---|
.. | .. |
---|
2246 | 2256 | */ |
---|
2247 | 2257 | if (minor_revision > 0x00 && minor_revision < 0x10) |
---|
2248 | 2258 | minor_revision <<= 4; |
---|
| 2259 | + /* |
---|
| 2260 | + * Some zhaoxin's xHCI controller that follow usb3.1 spec |
---|
| 2261 | + * but only support Gen1. |
---|
| 2262 | + */ |
---|
| 2263 | + if (xhci->quirks & XHCI_ZHAOXIN_HOST) { |
---|
| 2264 | + tmp_minor_revision = minor_revision; |
---|
| 2265 | + minor_revision = 0; |
---|
| 2266 | + } |
---|
| 2267 | + |
---|
2249 | 2268 | } else if (major_revision <= 0x02) { |
---|
2250 | 2269 | rhub = &xhci->usb2_rhub; |
---|
2251 | 2270 | } else { |
---|
.. | .. |
---|
2255 | 2274 | /* Ignoring port protocol we can't understand. FIXME */ |
---|
2256 | 2275 | return; |
---|
2257 | 2276 | } |
---|
2258 | | - rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp); |
---|
2259 | | - |
---|
2260 | | - if (rhub->min_rev < minor_revision) |
---|
2261 | | - rhub->min_rev = minor_revision; |
---|
2262 | 2277 | |
---|
2263 | 2278 | /* Port offset and count in the third dword, see section 7.2 */ |
---|
2264 | 2279 | temp = readl(addr + 2); |
---|
.. | .. |
---|
2277 | 2292 | if (xhci->num_port_caps > max_caps) |
---|
2278 | 2293 | return; |
---|
2279 | 2294 | |
---|
2280 | | - port_cap->maj_rev = major_revision; |
---|
2281 | | - port_cap->min_rev = minor_revision; |
---|
2282 | 2295 | port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp); |
---|
2283 | 2296 | |
---|
2284 | 2297 | if (port_cap->psi_count) { |
---|
.. | .. |
---|
2299 | 2312 | XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1]))) |
---|
2300 | 2313 | port_cap->psi_uid_count++; |
---|
2301 | 2314 | |
---|
| 2315 | + if (xhci->quirks & XHCI_ZHAOXIN_HOST && |
---|
| 2316 | + major_revision == 0x03 && |
---|
| 2317 | + XHCI_EXT_PORT_PSIV(port_cap->psi[i]) >= 5) |
---|
| 2318 | + minor_revision = tmp_minor_revision; |
---|
| 2319 | + |
---|
2302 | 2320 | xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n", |
---|
2303 | 2321 | XHCI_EXT_PORT_PSIV(port_cap->psi[i]), |
---|
2304 | 2322 | XHCI_EXT_PORT_PSIE(port_cap->psi[i]), |
---|
.. | .. |
---|
2308 | 2326 | XHCI_EXT_PORT_PSIM(port_cap->psi[i])); |
---|
2309 | 2327 | } |
---|
2310 | 2328 | } |
---|
| 2329 | + |
---|
| 2330 | + rhub->maj_rev = major_revision; |
---|
| 2331 | + |
---|
| 2332 | + if (rhub->min_rev < minor_revision) |
---|
| 2333 | + rhub->min_rev = minor_revision; |
---|
| 2334 | + |
---|
| 2335 | + port_cap->maj_rev = major_revision; |
---|
| 2336 | + port_cap->min_rev = minor_revision; |
---|
| 2337 | + |
---|
2311 | 2338 | /* cache usb2 port capabilities */ |
---|
2312 | 2339 | if (major_revision < 0x03 && xhci->num_ext_caps < max_caps) |
---|
2313 | 2340 | xhci->ext_caps[xhci->num_ext_caps++] = temp; |
---|
2314 | 2341 | |
---|
2315 | | - /* Check the host's USB2 LPM capability */ |
---|
2316 | | - if ((xhci->hci_version == 0x96) && (major_revision != 0x03) && |
---|
2317 | | - (temp & XHCI_L1C)) { |
---|
| 2342 | + if ((xhci->hci_version >= 0x100) && (major_revision != 0x03) && |
---|
| 2343 | + (temp & XHCI_HLC)) { |
---|
2318 | 2344 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
---|
2319 | | - "xHCI 0.96: support USB2 software lpm"); |
---|
2320 | | - xhci->sw_lpm_support = 1; |
---|
2321 | | - } |
---|
2322 | | - |
---|
2323 | | - if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) { |
---|
2324 | | - xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
---|
2325 | | - "xHCI 1.0: support USB2 software lpm"); |
---|
2326 | | - xhci->sw_lpm_support = 1; |
---|
2327 | | - if (temp & XHCI_HLC) { |
---|
2328 | | - xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
---|
2329 | | - "xHCI 1.0: support USB2 hardware lpm"); |
---|
2330 | | - xhci->hw_lpm_support = 1; |
---|
2331 | | - } |
---|
| 2345 | + "xHCI 1.0: support USB2 hardware lpm"); |
---|
| 2346 | + xhci->hw_lpm_support = 1; |
---|
2332 | 2347 | } |
---|
2333 | 2348 | |
---|
2334 | 2349 | port_offset--; |
---|
.. | .. |
---|
2367 | 2382 | |
---|
2368 | 2383 | if (!rhub->num_ports) |
---|
2369 | 2384 | return; |
---|
2370 | | - rhub->ports = kcalloc_node(rhub->num_ports, sizeof(rhub->ports), flags, |
---|
2371 | | - dev_to_node(dev)); |
---|
| 2385 | + rhub->ports = kcalloc_node(rhub->num_ports, sizeof(*rhub->ports), |
---|
| 2386 | + flags, dev_to_node(dev)); |
---|
| 2387 | + if (!rhub->ports) |
---|
| 2388 | + return; |
---|
| 2389 | + |
---|
2372 | 2390 | for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) { |
---|
2373 | 2391 | if (xhci->hw_ports[i].rhub != rhub || |
---|
2374 | 2392 | xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY) |
---|
.. | .. |
---|
2493 | 2511 | return 0; |
---|
2494 | 2512 | } |
---|
2495 | 2513 | |
---|
2496 | | -int xhci_event_ring_setup(struct xhci_hcd *xhci, struct xhci_ring **er, |
---|
2497 | | - struct xhci_intr_reg __iomem *ir_set, struct xhci_erst *erst, |
---|
2498 | | - unsigned int intr_num, gfp_t flags) |
---|
2499 | | -{ |
---|
2500 | | - dma_addr_t deq; |
---|
2501 | | - u64 val_64; |
---|
2502 | | - unsigned int val; |
---|
2503 | | - int ret; |
---|
2504 | | - |
---|
2505 | | - *er = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, 0, flags); |
---|
2506 | | - if (!*er) |
---|
2507 | | - return -ENOMEM; |
---|
2508 | | - |
---|
2509 | | - ret = xhci_alloc_erst(xhci, *er, erst, flags); |
---|
2510 | | - if (ret) |
---|
2511 | | - return ret; |
---|
2512 | | - |
---|
2513 | | - xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
---|
2514 | | - "intr# %d: num segs = %i, virt addr = %pK, dma addr = 0x%llx", |
---|
2515 | | - intr_num, |
---|
2516 | | - erst->num_entries, |
---|
2517 | | - erst->entries, |
---|
2518 | | - (unsigned long long)erst->erst_dma_addr); |
---|
2519 | | - |
---|
2520 | | - /* set ERST count with the number of entries in the segment table */ |
---|
2521 | | - val = readl_relaxed(&ir_set->erst_size); |
---|
2522 | | - val &= ERST_SIZE_MASK; |
---|
2523 | | - val |= ERST_NUM_SEGS; |
---|
2524 | | - xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
---|
2525 | | - "Write ERST size = %i to ir_set %d (some bits preserved)", val, |
---|
2526 | | - intr_num); |
---|
2527 | | - writel_relaxed(val, &ir_set->erst_size); |
---|
2528 | | - |
---|
2529 | | - xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
---|
2530 | | - "intr# %d: Set ERST entries to point to event ring.", |
---|
2531 | | - intr_num); |
---|
2532 | | - /* set the segment table base address */ |
---|
2533 | | - xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
---|
2534 | | - "Set ERST base address for ir_set %d = 0x%llx", |
---|
2535 | | - intr_num, |
---|
2536 | | - (unsigned long long)erst->erst_dma_addr); |
---|
2537 | | - val_64 = xhci_read_64(xhci, &ir_set->erst_base); |
---|
2538 | | - val_64 &= ERST_PTR_MASK; |
---|
2539 | | - val_64 |= (erst->erst_dma_addr & (u64) ~ERST_PTR_MASK); |
---|
2540 | | - xhci_write_64(xhci, val_64, &ir_set->erst_base); |
---|
2541 | | - |
---|
2542 | | - /* Set the event ring dequeue address */ |
---|
2543 | | - deq = xhci_trb_virt_to_dma((*er)->deq_seg, (*er)->dequeue); |
---|
2544 | | - if (deq == 0 && !in_interrupt()) |
---|
2545 | | - xhci_warn(xhci, |
---|
2546 | | - "intr# %d:WARN something wrong with SW event ring deq ptr.\n", |
---|
2547 | | - intr_num); |
---|
2548 | | - /* Update HC event ring dequeue pointer */ |
---|
2549 | | - val_64 = xhci_read_64(xhci, &ir_set->erst_dequeue); |
---|
2550 | | - val_64 &= ERST_PTR_MASK; |
---|
2551 | | - /* Don't clear the EHB bit (which is RW1C) because |
---|
2552 | | - * there might be more events to service. |
---|
2553 | | - */ |
---|
2554 | | - val_64 &= ~ERST_EHB; |
---|
2555 | | - xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
---|
2556 | | - "intr# %d:Write event ring dequeue pointer, preserving EHB bit", |
---|
2557 | | - intr_num); |
---|
2558 | | - xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | val_64, |
---|
2559 | | - &ir_set->erst_dequeue); |
---|
2560 | | - xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
---|
2561 | | - "Wrote ERST address to ir_set %d.", intr_num); |
---|
2562 | | - |
---|
2563 | | - return 0; |
---|
2564 | | -} |
---|
2565 | | - |
---|
2566 | | -int xhci_sec_event_ring_setup(struct usb_hcd *hcd, unsigned int intr_num) |
---|
2567 | | -{ |
---|
2568 | | - int ret; |
---|
2569 | | - struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
---|
2570 | | - |
---|
2571 | | - if ((xhci->xhc_state & XHCI_STATE_HALTED) || !xhci->sec_ir_set |
---|
2572 | | - || !xhci->sec_event_ring || !xhci->sec_erst || |
---|
2573 | | - intr_num >= xhci->max_interrupters) { |
---|
2574 | | - xhci_err(xhci, |
---|
2575 | | - "%s:state %x ir_set %pK evt_ring %pK erst %pK intr# %d\n", |
---|
2576 | | - __func__, xhci->xhc_state, xhci->sec_ir_set, |
---|
2577 | | - xhci->sec_event_ring, xhci->sec_erst, intr_num); |
---|
2578 | | - return -EINVAL; |
---|
2579 | | - } |
---|
2580 | | - |
---|
2581 | | - if (xhci->sec_event_ring && xhci->sec_event_ring[intr_num] |
---|
2582 | | - && xhci->sec_event_ring[intr_num]->first_seg) |
---|
2583 | | - goto done; |
---|
2584 | | - |
---|
2585 | | - xhci->sec_ir_set[intr_num] = &xhci->run_regs->ir_set[intr_num]; |
---|
2586 | | - ret = xhci_event_ring_setup(xhci, |
---|
2587 | | - &xhci->sec_event_ring[intr_num], |
---|
2588 | | - xhci->sec_ir_set[intr_num], |
---|
2589 | | - &xhci->sec_erst[intr_num], |
---|
2590 | | - intr_num, GFP_KERNEL); |
---|
2591 | | - if (ret) { |
---|
2592 | | - xhci_err(xhci, "sec event ring setup failed inter#%d\n", |
---|
2593 | | - intr_num); |
---|
2594 | | - return ret; |
---|
2595 | | - } |
---|
2596 | | -done: |
---|
2597 | | - return 0; |
---|
2598 | | -} |
---|
2599 | | - |
---|
2600 | | -int xhci_event_ring_init(struct xhci_hcd *xhci, gfp_t flags) |
---|
2601 | | -{ |
---|
2602 | | - int ret = 0; |
---|
2603 | | - |
---|
2604 | | - /* primary + secondary */ |
---|
2605 | | - xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1); |
---|
2606 | | - |
---|
2607 | | - xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
---|
2608 | | - "// Allocating primary event ring"); |
---|
2609 | | - |
---|
2610 | | - /* Set ir_set to interrupt register set 0 */ |
---|
2611 | | - xhci->ir_set = &xhci->run_regs->ir_set[0]; |
---|
2612 | | - ret = xhci_event_ring_setup(xhci, &xhci->event_ring, xhci->ir_set, |
---|
2613 | | - &xhci->erst, 0, flags); |
---|
2614 | | - if (ret) { |
---|
2615 | | - xhci_err(xhci, "failed to setup primary event ring\n"); |
---|
2616 | | - goto fail; |
---|
2617 | | - } |
---|
2618 | | - |
---|
2619 | | - xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
---|
2620 | | - "// Allocating sec event ring related pointers"); |
---|
2621 | | - |
---|
2622 | | - xhci->sec_ir_set = kcalloc(xhci->max_interrupters, |
---|
2623 | | - sizeof(*xhci->sec_ir_set), flags); |
---|
2624 | | - if (!xhci->sec_ir_set) { |
---|
2625 | | - ret = -ENOMEM; |
---|
2626 | | - goto fail; |
---|
2627 | | - } |
---|
2628 | | - |
---|
2629 | | - xhci->sec_event_ring = kcalloc(xhci->max_interrupters, |
---|
2630 | | - sizeof(*xhci->sec_event_ring), flags); |
---|
2631 | | - if (!xhci->sec_event_ring) { |
---|
2632 | | - ret = -ENOMEM; |
---|
2633 | | - goto fail; |
---|
2634 | | - } |
---|
2635 | | - |
---|
2636 | | - xhci->sec_erst = kcalloc(xhci->max_interrupters, |
---|
2637 | | - sizeof(*xhci->sec_erst), flags); |
---|
2638 | | - if (!xhci->sec_erst) |
---|
2639 | | - ret = -ENOMEM; |
---|
2640 | | -fail: |
---|
2641 | | - return ret; |
---|
2642 | | -} |
---|
2643 | | - |
---|
2644 | 2514 | int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) |
---|
2645 | 2515 | { |
---|
2646 | 2516 | dma_addr_t dma; |
---|
.. | .. |
---|
2648 | 2518 | unsigned int val, val2; |
---|
2649 | 2519 | u64 val_64; |
---|
2650 | 2520 | u32 page_size, temp; |
---|
2651 | | - int i; |
---|
| 2521 | + int i, ret; |
---|
2652 | 2522 | |
---|
2653 | 2523 | INIT_LIST_HEAD(&xhci->cmd_list); |
---|
2654 | 2524 | |
---|
.. | .. |
---|
2692 | 2562 | * xHCI section 5.4.6 - doorbell array must be |
---|
2693 | 2563 | * "physically contiguous and 64-byte (cache line) aligned". |
---|
2694 | 2564 | */ |
---|
2695 | | - xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma, |
---|
2696 | | - flags); |
---|
2697 | | - if (!xhci->dcbaa) |
---|
2698 | | - goto fail; |
---|
2699 | | - memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); |
---|
2700 | | - xhci->dcbaa->dma = dma; |
---|
| 2565 | + if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0)) { |
---|
| 2566 | + xhci->dcbaa = xhci_vendor_alloc_dcbaa(xhci, flags); |
---|
| 2567 | + if (!xhci->dcbaa) |
---|
| 2568 | + goto fail; |
---|
| 2569 | + } else { |
---|
| 2570 | + xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma, |
---|
| 2571 | + flags); |
---|
| 2572 | + if (!xhci->dcbaa) |
---|
| 2573 | + goto fail; |
---|
| 2574 | + xhci->dcbaa->dma = dma; |
---|
| 2575 | + } |
---|
2701 | 2576 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
---|
2702 | 2577 | "// Device context base array address = 0x%llx (DMA), %p (virt)", |
---|
2703 | 2578 | (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); |
---|
2704 | | - xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); |
---|
| 2579 | + xhci_write_64(xhci, xhci->dcbaa->dma, &xhci->op_regs->dcbaa_ptr); |
---|
2705 | 2580 | |
---|
2706 | 2581 | /* |
---|
2707 | 2582 | * Initialize the ring segment pool. The ring must be a contiguous |
---|
.. | .. |
---|
2710 | 2585 | * and our use of dma addresses in the trb_address_map radix tree needs |
---|
2711 | 2586 | * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need. |
---|
2712 | 2587 | */ |
---|
2713 | | - xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, |
---|
2714 | | - TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size); |
---|
| 2588 | + if (xhci->quirks & XHCI_ZHAOXIN_TRB_FETCH) |
---|
| 2589 | + xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, |
---|
| 2590 | + TRB_SEGMENT_SIZE * 2, TRB_SEGMENT_SIZE * 2, xhci->page_size * 2); |
---|
| 2591 | + else |
---|
| 2592 | + xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, |
---|
| 2593 | + TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size); |
---|
2715 | 2594 | |
---|
2716 | 2595 | /* See Table 46 and Note on Figure 55 */ |
---|
2717 | 2596 | xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, |
---|
.. | .. |
---|
2769 | 2648 | "// Doorbell array is located at offset 0x%x" |
---|
2770 | 2649 | " from cap regs base addr", val); |
---|
2771 | 2650 | xhci->dba = (void __iomem *) xhci->cap_regs + val; |
---|
| 2651 | + /* Set ir_set to interrupt register set 0 */ |
---|
| 2652 | + xhci->ir_set = &xhci->run_regs->ir_set[0]; |
---|
2772 | 2653 | |
---|
2773 | 2654 | /* |
---|
2774 | 2655 | * Event ring setup: Allocate a normal ring, but also setup |
---|
2775 | 2656 | * the event ring segment table (ERST). Section 4.9.3. |
---|
2776 | 2657 | */ |
---|
2777 | | - if (xhci_event_ring_init(xhci, GFP_KERNEL)) |
---|
| 2658 | + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring"); |
---|
| 2659 | + xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, |
---|
| 2660 | + 0, flags); |
---|
| 2661 | + if (!xhci->event_ring) |
---|
2778 | 2662 | goto fail; |
---|
2779 | | - |
---|
2780 | 2663 | if (xhci_check_trb_in_td_math(xhci) < 0) |
---|
2781 | 2664 | goto fail; |
---|
| 2665 | + |
---|
| 2666 | + ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags); |
---|
| 2667 | + if (ret) |
---|
| 2668 | + goto fail; |
---|
| 2669 | + |
---|
| 2670 | + /* set ERST count with the number of entries in the segment table */ |
---|
| 2671 | + val = readl(&xhci->ir_set->erst_size); |
---|
| 2672 | + val &= ERST_SIZE_MASK; |
---|
| 2673 | + val |= ERST_NUM_SEGS; |
---|
| 2674 | + xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
---|
| 2675 | + "// Write ERST size = %i to ir_set 0 (some bits preserved)", |
---|
| 2676 | + val); |
---|
| 2677 | + writel(val, &xhci->ir_set->erst_size); |
---|
| 2678 | + |
---|
| 2679 | + xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
---|
| 2680 | + "// Set ERST entries to point to event ring."); |
---|
| 2681 | + /* set the segment table base address */ |
---|
| 2682 | + xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
---|
| 2683 | + "// Set ERST base address for ir_set 0 = 0x%llx", |
---|
| 2684 | + (unsigned long long)xhci->erst.erst_dma_addr); |
---|
| 2685 | + val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); |
---|
| 2686 | + val_64 &= ERST_PTR_MASK; |
---|
| 2687 | + val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); |
---|
| 2688 | + xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); |
---|
| 2689 | + |
---|
| 2690 | + /* Set the event ring dequeue address */ |
---|
| 2691 | + xhci_set_hc_event_deq(xhci); |
---|
| 2692 | + xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
---|
| 2693 | + "Wrote ERST address to ir_set 0."); |
---|
2782 | 2694 | |
---|
2783 | 2695 | /* |
---|
2784 | 2696 | * XXX: Might need to set the Interrupter Moderation Register to |
---|
.. | .. |
---|
2788 | 2700 | for (i = 0; i < MAX_HC_SLOTS; i++) |
---|
2789 | 2701 | xhci->devs[i] = NULL; |
---|
2790 | 2702 | for (i = 0; i < USB_MAXCHILDREN; i++) { |
---|
2791 | | - xhci->bus_state[0].resume_done[i] = 0; |
---|
2792 | | - xhci->bus_state[1].resume_done[i] = 0; |
---|
| 2703 | + xhci->usb2_rhub.bus_state.resume_done[i] = 0; |
---|
| 2704 | + xhci->usb3_rhub.bus_state.resume_done[i] = 0; |
---|
2793 | 2705 | /* Only the USB 2.0 completions will ever be used. */ |
---|
2794 | | - init_completion(&xhci->bus_state[1].rexit_done[i]); |
---|
| 2706 | + init_completion(&xhci->usb2_rhub.bus_state.rexit_done[i]); |
---|
| 2707 | + init_completion(&xhci->usb3_rhub.bus_state.u3exit_done[i]); |
---|
2795 | 2708 | } |
---|
2796 | 2709 | |
---|
2797 | 2710 | if (scratchpad_alloc(xhci, flags)) |
---|
.. | .. |
---|
2812 | 2725 | |
---|
2813 | 2726 | fail: |
---|
2814 | 2727 | xhci_halt(xhci); |
---|
2815 | | - xhci_reset(xhci); |
---|
| 2728 | + xhci_reset(xhci, XHCI_RESET_SHORT_USEC); |
---|
2816 | 2729 | xhci_mem_cleanup(xhci); |
---|
2817 | 2730 | return -ENOMEM; |
---|
2818 | 2731 | } |
---|