hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/drivers/usb/host/xhci-mem.c
....@@ -65,7 +65,7 @@
6565 return seg;
6666 }
6767
68
-static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
68
+void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
6969 {
7070 if (seg->trbs) {
7171 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
....@@ -74,8 +74,9 @@
7474 kfree(seg->bounce_buf);
7575 kfree(seg);
7676 }
77
+EXPORT_SYMBOL_GPL(xhci_segment_free);
7778
78
-static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
79
+void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
7980 struct xhci_segment *first)
8081 {
8182 struct xhci_segment *seg;
....@@ -96,8 +97,9 @@
9697 * DMA address of the next segment. The caller needs to set any Link TRB
9798 * related flags, such as End TRB, Toggle Cycle, and no snoop.
9899 */
99
-static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
100
- struct xhci_segment *next, enum xhci_ring_type type)
100
+void xhci_link_segments(struct xhci_segment *prev,
101
+ struct xhci_segment *next,
102
+ enum xhci_ring_type type, bool chain_links)
101103 {
102104 u32 val;
103105
....@@ -112,15 +114,12 @@
112114 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
113115 val &= ~TRB_TYPE_BITMASK;
114116 val |= TRB_TYPE(TRB_LINK);
115
- /* Always set the chain bit with 0.95 hardware */
116
- /* Set chain bit for isoc rings on AMD 0.96 host */
117
- if (xhci_link_trb_quirk(xhci) ||
118
- (type == TYPE_ISOC &&
119
- (xhci->quirks & XHCI_AMD_0x96_HOST)))
117
+ if (chain_links)
120118 val |= TRB_CHAIN;
121119 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
122120 }
123121 }
122
+EXPORT_SYMBOL_GPL(xhci_link_segments);
124123
125124 /*
126125 * Link the ring to the new segments.
....@@ -131,13 +130,19 @@
131130 unsigned int num_segs)
132131 {
133132 struct xhci_segment *next;
133
+ bool chain_links;
134134
135135 if (!ring || !first || !last)
136136 return;
137137
138
+ /* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */
139
+ chain_links = !!(xhci_link_trb_quirk(xhci) ||
140
+ (ring->type == TYPE_ISOC &&
141
+ (xhci->quirks & XHCI_AMD_0x96_HOST)));
142
+
138143 next = ring->enq_seg->next;
139
- xhci_link_segments(xhci, ring->enq_seg, first, ring->type);
140
- xhci_link_segments(xhci, last, next, ring->type);
144
+ xhci_link_segments(ring->enq_seg, first, ring->type, chain_links);
145
+ xhci_link_segments(last, next, ring->type, chain_links);
141146 ring->num_segs += num_segs;
142147 ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
143148
....@@ -289,9 +294,10 @@
289294
290295 kfree(ring);
291296 }
297
+EXPORT_SYMBOL_GPL(xhci_ring_free);
292298
293
-static void xhci_initialize_ring_info(struct xhci_ring *ring,
294
- unsigned int cycle_state)
299
+void xhci_initialize_ring_info(struct xhci_ring *ring,
300
+ unsigned int cycle_state)
295301 {
296302 /* The ring is empty, so the enqueue pointer == dequeue pointer */
297303 ring->enqueue = ring->first_seg->trbs;
....@@ -313,6 +319,7 @@
313319 */
314320 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
315321 }
322
+EXPORT_SYMBOL_GPL(xhci_initialize_ring_info);
316323
317324 /* Allocate segments and link them for a ring */
318325 static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
....@@ -321,6 +328,12 @@
321328 enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
322329 {
323330 struct xhci_segment *prev;
331
+ bool chain_links;
332
+
333
+ /* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */
334
+ chain_links = !!(xhci_link_trb_quirk(xhci) ||
335
+ (type == TYPE_ISOC &&
336
+ (xhci->quirks & XHCI_AMD_0x96_HOST)));
324337
325338 prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
326339 if (!prev)
....@@ -341,18 +354,66 @@
341354 }
342355 return -ENOMEM;
343356 }
344
- xhci_link_segments(xhci, prev, next, type);
357
+ xhci_link_segments(prev, next, type, chain_links);
345358
346359 prev = next;
347360 num_segs--;
348361 }
349
- xhci_link_segments(xhci, prev, *first, type);
362
+ xhci_link_segments(prev, *first, type, chain_links);
350363 *last = prev;
351364
352365 return 0;
353366 }
354367
355
-/**
368
+static void xhci_vendor_free_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
369
+{
370
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
371
+
372
+ if (ops && ops->free_container_ctx)
373
+ ops->free_container_ctx(xhci, ctx);
374
+}
375
+
376
+static void xhci_vendor_alloc_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
377
+ int type, gfp_t flags)
378
+{
379
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
380
+
381
+ if (ops && ops->alloc_container_ctx)
382
+ ops->alloc_container_ctx(xhci, ctx, type, flags);
383
+}
384
+
385
+static struct xhci_ring *xhci_vendor_alloc_transfer_ring(struct xhci_hcd *xhci,
386
+ u32 endpoint_type, enum xhci_ring_type ring_type,
387
+ unsigned int max_packet, gfp_t mem_flags)
388
+{
389
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
390
+
391
+ if (ops && ops->alloc_transfer_ring)
392
+ return ops->alloc_transfer_ring(xhci, endpoint_type, ring_type,
393
+ max_packet, mem_flags);
394
+ return 0;
395
+}
396
+
397
+void xhci_vendor_free_transfer_ring(struct xhci_hcd *xhci,
398
+ struct xhci_virt_device *virt_dev, unsigned int ep_index)
399
+{
400
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
401
+
402
+ if (ops && ops->free_transfer_ring)
403
+ ops->free_transfer_ring(xhci, virt_dev, ep_index);
404
+}
405
+
406
+bool xhci_vendor_is_usb_offload_enabled(struct xhci_hcd *xhci,
407
+ struct xhci_virt_device *virt_dev, unsigned int ep_index)
408
+{
409
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
410
+
411
+ if (ops && ops->is_usb_offload_enabled)
412
+ return ops->is_usb_offload_enabled(xhci, virt_dev, ep_index);
413
+ return false;
414
+}
415
+
416
+/*
356417 * Create a new ring with zero or more segments.
357418 *
358419 * Link each segment together into a ring.
....@@ -398,12 +459,17 @@
398459 kfree(ring);
399460 return NULL;
400461 }
462
+EXPORT_SYMBOL_GPL(xhci_ring_alloc);
401463
402464 void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
403465 struct xhci_virt_device *virt_dev,
404466 unsigned int ep_index)
405467 {
406
- xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
468
+ if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, ep_index))
469
+ xhci_vendor_free_transfer_ring(xhci, virt_dev, ep_index);
470
+ else
471
+ xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
472
+
407473 virt_dev->eps[ep_index].ring = NULL;
408474 }
409475
....@@ -462,6 +528,7 @@
462528 {
463529 struct xhci_container_ctx *ctx;
464530 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
531
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
465532
466533 if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
467534 return NULL;
....@@ -475,7 +542,12 @@
475542 if (type == XHCI_CTX_TYPE_INPUT)
476543 ctx->size += CTX_SIZE(xhci->hcc_params);
477544
478
- ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
545
+ if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0) &&
546
+ (ops && ops->alloc_container_ctx))
547
+ xhci_vendor_alloc_container_ctx(xhci, ctx, type, flags);
548
+ else
549
+ ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
550
+
479551 if (!ctx->bytes) {
480552 kfree(ctx);
481553 return NULL;
....@@ -486,9 +558,16 @@
486558 void xhci_free_container_ctx(struct xhci_hcd *xhci,
487559 struct xhci_container_ctx *ctx)
488560 {
561
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
562
+
489563 if (!ctx)
490564 return;
491
- dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
565
+ if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0) &&
566
+ (ops && ops->free_container_ctx))
567
+ xhci_vendor_free_container_ctx(xhci, ctx);
568
+ else
569
+ dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
570
+
492571 kfree(ctx);
493572 }
494573
....@@ -510,6 +589,7 @@
510589 return (struct xhci_slot_ctx *)
511590 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
512591 }
592
+EXPORT_SYMBOL_GPL(xhci_get_slot_ctx);
513593
514594 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
515595 struct xhci_container_ctx *ctx,
....@@ -523,6 +603,7 @@
523603 return (struct xhci_ep_ctx *)
524604 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
525605 }
606
+EXPORT_SYMBOL_GPL(xhci_get_ep_ctx);
526607
527608
528609 /***************** Streams structures manipulation *************************/
....@@ -583,23 +664,6 @@
583664 return ep->ring;
584665 }
585666
586
-struct xhci_ring *xhci_stream_id_to_ring(
587
- struct xhci_virt_device *dev,
588
- unsigned int ep_index,
589
- unsigned int stream_id)
590
-{
591
- struct xhci_virt_ep *ep = &dev->eps[ep_index];
592
-
593
- if (stream_id == 0)
594
- return ep->ring;
595
- if (!ep->stream_info)
596
- return NULL;
597
-
598
- if (stream_id >= ep->stream_info->num_streams)
599
- return NULL;
600
- return ep->stream_info->stream_rings[stream_id];
601
-}
602
-
603667 /*
604668 * Change an endpoint's internal structure so it supports stream IDs. The
605669 * number of requested streams includes stream 0, which cannot be used by device
....@@ -650,7 +714,7 @@
650714 num_stream_ctxs, &stream_info->ctx_array_dma,
651715 mem_flags);
652716 if (!stream_info->stream_ctx_array)
653
- goto cleanup_ctx;
717
+ goto cleanup_ring_array;
654718 memset(stream_info->stream_ctx_array, 0,
655719 sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
656720
....@@ -711,6 +775,11 @@
711775 }
712776 xhci_free_command(xhci, stream_info->free_streams_command);
713777 cleanup_ctx:
778
+ xhci_free_stream_ctx(xhci,
779
+ stream_info->num_stream_ctxs,
780
+ stream_info->stream_ctx_array,
781
+ stream_info->ctx_array_dma);
782
+cleanup_ring_array:
714783 kfree(stream_info->stream_rings);
715784 cleanup_info:
716785 kfree(stream_info);
....@@ -897,19 +966,23 @@
897966
898967 for (i = 0; i < 31; i++) {
899968 if (dev->eps[i].ring)
900
- xhci_ring_free(xhci, dev->eps[i].ring);
969
+ xhci_free_endpoint_ring(xhci, dev, i);
901970 if (dev->eps[i].stream_info)
902971 xhci_free_stream_info(xhci,
903972 dev->eps[i].stream_info);
904
- /* Endpoints on the TT/root port lists should have been removed
905
- * when usb_disable_device() was called for the device.
906
- * We can't drop them anyway, because the udev might have gone
907
- * away by this point, and we can't tell what speed it was.
973
+ /*
974
+ * Endpoints are normally deleted from the bandwidth list when
975
+ * endpoints are dropped, before device is freed.
976
+ * If host is dying or being removed then endpoints aren't
977
+ * dropped cleanly, so delete the endpoint from list here.
978
+ * Only applicable for hosts with software bandwidth checking.
908979 */
909
- if (!list_empty(&dev->eps[i].bw_endpoint_list))
910
- xhci_warn(xhci, "Slot %u endpoint %u "
911
- "not removed from BW list!\n",
912
- slot_id, i);
980
+
981
+ if (!list_empty(&dev->eps[i].bw_endpoint_list)) {
982
+ list_del_init(&dev->eps[i].bw_endpoint_list);
983
+ xhci_dbg(xhci, "Slot %u endpoint %u not removed from BW list!\n",
984
+ slot_id, i);
985
+ }
913986 }
914987 /* If this is a hub, free the TT(s) from the TT list */
915988 xhci_free_tt_info(xhci, dev, slot_id);
....@@ -933,7 +1006,7 @@
9331006 * that tt_info, then free the child first. Recursive.
9341007 * We can't rely on udev at this point to find child-parent relationships.
9351008 */
936
-void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
1009
+static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
9371010 {
9381011 struct xhci_virt_device *vdev;
9391012 struct list_head *tt_list_head;
....@@ -985,6 +1058,8 @@
9851058 if (!dev)
9861059 return 0;
9871060
1061
+ dev->slot_id = slot_id;
1062
+
9881063 /* Allocate the (output) device context that will be used in the HC. */
9891064 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
9901065 if (!dev->out_ctx)
....@@ -1003,6 +1078,8 @@
10031078
10041079 /* Initialize the cancellation list and watchdog timers for each ep */
10051080 for (i = 0; i < 31; i++) {
1081
+ dev->eps[i].ep_index = i;
1082
+ dev->eps[i].vdev = dev;
10061083 xhci_init_endpoint_timer(xhci, &dev->eps[i]);
10071084 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
10081085 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
....@@ -1302,7 +1379,7 @@
13021379 interval = xhci_parse_microframe_interval(udev, ep);
13031380 break;
13041381 }
1305
- /* Fall through - SS and HS isoc/int have same decoding */
1382
+ fallthrough; /* SS and HS isoc/int have same decoding */
13061383
13071384 case USB_SPEED_SUPER_PLUS:
13081385 case USB_SPEED_SUPER:
....@@ -1322,7 +1399,7 @@
13221399 * since it uses the same rules as low speed interrupt
13231400 * endpoints.
13241401 */
1325
- /* fall through */
1402
+ fallthrough;
13261403
13271404 case USB_SPEED_LOW:
13281405 if (usb_endpoint_xfer_int(&ep->desc) ||
....@@ -1492,8 +1569,16 @@
14921569 mult = 0;
14931570
14941571 /* Set up the endpoint ring */
1495
- virt_dev->eps[ep_index].new_ring =
1496
- xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
1572
+ if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, ep_index) &&
1573
+ usb_endpoint_xfer_isoc(&ep->desc)) {
1574
+ virt_dev->eps[ep_index].new_ring =
1575
+ xhci_vendor_alloc_transfer_ring(xhci, endpoint_type, ring_type,
1576
+ max_packet, mem_flags);
1577
+ } else {
1578
+ virt_dev->eps[ep_index].new_ring =
1579
+ xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
1580
+ }
1581
+
14971582 if (!virt_dev->eps[ep_index].new_ring)
14981583 return -ENOMEM;
14991584
....@@ -1678,8 +1763,8 @@
16781763 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
16791764 for (i = 0; i < num_sp; i++) {
16801765 dma_addr_t dma;
1681
- void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma,
1682
- flags);
1766
+ void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1767
+ flags);
16831768 if (!buf)
16841769 goto fail_sp4;
16851770
....@@ -1760,6 +1845,7 @@
17601845 INIT_LIST_HEAD(&command->cmd_list);
17611846 return command;
17621847 }
1848
+EXPORT_SYMBOL_GPL(xhci_alloc_command);
17631849
17641850 struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
17651851 bool allocate_completion, gfp_t mem_flags)
....@@ -1793,6 +1879,7 @@
17931879 kfree(command->completion);
17941880 kfree(command);
17951881 }
1882
+EXPORT_SYMBOL_GPL(xhci_free_command);
17961883
17971884 int xhci_alloc_erst(struct xhci_hcd *xhci,
17981885 struct xhci_ring *evt_ring,
....@@ -1805,8 +1892,8 @@
18051892 struct xhci_erst_entry *entry;
18061893
18071894 size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs;
1808
- erst->entries = dma_zalloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
1809
- size, &erst->erst_dma_addr, flags);
1895
+ erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
1896
+ size, &erst->erst_dma_addr, flags);
18101897 if (!erst->entries)
18111898 return -ENOMEM;
18121899
....@@ -1823,6 +1910,7 @@
18231910
18241911 return 0;
18251912 }
1913
+EXPORT_SYMBOL_GPL(xhci_alloc_erst);
18261914
18271915 void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
18281916 {
....@@ -1836,136 +1924,24 @@
18361924 erst->erst_dma_addr);
18371925 erst->entries = NULL;
18381926 }
1927
+EXPORT_SYMBOL_GPL(xhci_free_erst);
18391928
1840
-void xhci_handle_sec_intr_events(struct xhci_hcd *xhci, int intr_num)
1929
+static struct xhci_device_context_array *xhci_vendor_alloc_dcbaa(
1930
+ struct xhci_hcd *xhci, gfp_t flags)
18411931 {
1842
- union xhci_trb *erdp_trb, *current_trb;
1843
- struct xhci_segment *seg;
1844
- u64 erdp_reg;
1845
- u32 iman_reg;
1846
- dma_addr_t deq;
1847
- unsigned long segment_offset;
1932
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
18481933
1849
- /* disable irq, ack pending interrupt and ack all pending events */
1850
-
1851
- iman_reg =
1852
- readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
1853
- iman_reg &= ~IMAN_IE;
1854
- writel_relaxed(iman_reg,
1855
- &xhci->sec_ir_set[intr_num]->irq_pending);
1856
- iman_reg =
1857
- readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
1858
- if (iman_reg & IMAN_IP)
1859
- writel_relaxed(iman_reg,
1860
- &xhci->sec_ir_set[intr_num]->irq_pending);
1861
-
1862
- /* last acked event trb is in erdp reg */
1863
- erdp_reg =
1864
- xhci_read_64(xhci, &xhci->sec_ir_set[intr_num]->erst_dequeue);
1865
- deq = (dma_addr_t)(erdp_reg & ~ERST_PTR_MASK);
1866
- if (!deq) {
1867
- pr_debug("%s: event ring handling not required\n", __func__);
1868
- return;
1869
- }
1870
-
1871
- seg = xhci->sec_event_ring[intr_num]->first_seg;
1872
- segment_offset = deq - seg->dma;
1873
-
1874
- /* find out virtual address of the last acked event trb */
1875
- erdp_trb = current_trb = &seg->trbs[0] +
1876
- (segment_offset/sizeof(*current_trb));
1877
-
1878
- /* read cycle state of the last acked trb to find out CCS */
1879
- xhci->sec_event_ring[intr_num]->cycle_state =
1880
- (current_trb->event_cmd.flags & TRB_CYCLE);
1881
-
1882
- while (1) {
1883
- /* last trb of the event ring: toggle cycle state */
1884
- if (current_trb == &seg->trbs[TRBS_PER_SEGMENT - 1]) {
1885
- xhci->sec_event_ring[intr_num]->cycle_state ^= 1;
1886
- current_trb = &seg->trbs[0];
1887
- } else {
1888
- current_trb++;
1889
- }
1890
-
1891
- /* cycle state transition */
1892
- if ((le32_to_cpu(current_trb->event_cmd.flags) & TRB_CYCLE) !=
1893
- xhci->sec_event_ring[intr_num]->cycle_state)
1894
- break;
1895
- }
1896
-
1897
- if (erdp_trb != current_trb) {
1898
- deq =
1899
- xhci_trb_virt_to_dma(xhci->sec_event_ring[intr_num]->deq_seg,
1900
- current_trb);
1901
- if (deq == 0)
1902
- xhci_warn(xhci,
1903
- "WARN invalid SW event ring dequeue ptr.\n");
1904
- /* Update HC event ring dequeue pointer */
1905
- erdp_reg &= ERST_PTR_MASK;
1906
- erdp_reg |= ((u64) deq & (u64) ~ERST_PTR_MASK);
1907
- }
1908
-
1909
- /* Clear the event handler busy flag (RW1C); event ring is empty. */
1910
- erdp_reg |= ERST_EHB;
1911
- xhci_write_64(xhci, erdp_reg,
1912
- &xhci->sec_ir_set[intr_num]->erst_dequeue);
1913
-}
1914
-
1915
-int xhci_sec_event_ring_cleanup(struct usb_hcd *hcd, unsigned int intr_num)
1916
-{
1917
- int size;
1918
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1919
- struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1920
-
1921
- if (intr_num >= xhci->max_interrupters) {
1922
- xhci_err(xhci, "invalid secondary interrupter num %d\n",
1923
- intr_num);
1924
- return -EINVAL;
1925
- }
1926
-
1927
- size =
1928
- sizeof(struct xhci_erst_entry)*(xhci->sec_erst[intr_num].num_entries);
1929
- if (xhci->sec_erst[intr_num].entries) {
1930
- xhci_handle_sec_intr_events(xhci, intr_num);
1931
- dma_free_coherent(dev, size, xhci->sec_erst[intr_num].entries,
1932
- xhci->sec_erst[intr_num].erst_dma_addr);
1933
- xhci->sec_erst[intr_num].entries = NULL;
1934
- }
1935
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed SEC ERST#%d",
1936
- intr_num);
1937
- if (xhci->sec_event_ring[intr_num])
1938
- xhci_ring_free(xhci, xhci->sec_event_ring[intr_num]);
1939
-
1940
- xhci->sec_event_ring[intr_num] = NULL;
1941
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1942
- "Freed sec event ring");
1943
-
1934
+ if (ops && ops->alloc_dcbaa)
1935
+ return ops->alloc_dcbaa(xhci, flags);
19441936 return 0;
19451937 }
19461938
1947
-void xhci_event_ring_cleanup(struct xhci_hcd *xhci)
1939
+static void xhci_vendor_free_dcbaa(struct xhci_hcd *xhci)
19481940 {
1949
- unsigned int i;
1941
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
19501942
1951
- /* sec event ring clean up */
1952
- for (i = 1; i < xhci->max_interrupters; i++)
1953
- xhci_sec_event_ring_cleanup(xhci_to_hcd(xhci), i);
1954
-
1955
- kfree(xhci->sec_ir_set);
1956
- xhci->sec_ir_set = NULL;
1957
- kfree(xhci->sec_erst);
1958
- xhci->sec_erst = NULL;
1959
- kfree(xhci->sec_event_ring);
1960
- xhci->sec_event_ring = NULL;
1961
-
1962
- /* primary event ring clean up */
1963
- xhci_free_erst(xhci, &xhci->erst);
1964
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed primary ERST");
1965
- if (xhci->event_ring)
1966
- xhci_ring_free(xhci, xhci->event_ring);
1967
- xhci->event_ring = NULL;
1968
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed priamry event ring");
1943
+ if (ops && ops->free_dcbaa)
1944
+ ops->free_dcbaa(xhci);
19691945 }
19701946
19711947 void xhci_mem_cleanup(struct xhci_hcd *xhci)
....@@ -1975,7 +1951,12 @@
19751951
19761952 cancel_delayed_work_sync(&xhci->cmd_timer);
19771953
1978
- xhci_event_ring_cleanup(xhci);
1954
+ xhci_free_erst(xhci, &xhci->erst);
1955
+
1956
+ if (xhci->event_ring)
1957
+ xhci_ring_free(xhci, xhci->event_ring);
1958
+ xhci->event_ring = NULL;
1959
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
19791960
19801961 if (xhci->lpm_command)
19811962 xhci_free_command(xhci, xhci->lpm_command);
....@@ -2017,9 +1998,13 @@
20171998 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
20181999 "Freed medium stream array pool");
20192000
2020
- if (xhci->dcbaa)
2021
- dma_free_coherent(dev, sizeof(*xhci->dcbaa),
2022
- xhci->dcbaa, xhci->dcbaa->dma);
2001
+ if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0)) {
2002
+ xhci_vendor_free_dcbaa(xhci);
2003
+ } else {
2004
+ if (xhci->dcbaa)
2005
+ dma_free_coherent(dev, sizeof(*xhci->dcbaa),
2006
+ xhci->dcbaa, xhci->dcbaa->dma);
2007
+ }
20232008 xhci->dcbaa = NULL;
20242009
20252010 scratchpad_free(xhci);
....@@ -2059,8 +2044,8 @@
20592044
20602045 xhci->page_size = 0;
20612046 xhci->page_shift = 0;
2062
- xhci->bus_state[0].bus_suspended = 0;
2063
- xhci->bus_state[1].bus_suspended = 0;
2047
+ xhci->usb2_rhub.bus_state.bus_suspended = 0;
2048
+ xhci->usb3_rhub.bus_state.bus_suspended = 0;
20642049 }
20652050
20662051 static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
....@@ -2100,7 +2085,7 @@
21002085 }
21012086
21022087 /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
2103
-static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
2088
+int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
21042089 {
21052090 struct {
21062091 dma_addr_t input_dma;
....@@ -2220,13 +2205,38 @@
22202205 xhci_dbg(xhci, "TRB math tests passed.\n");
22212206 return 0;
22222207 }
2208
+EXPORT_SYMBOL_GPL(xhci_check_trb_in_td_math);
2209
+
2210
+static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
2211
+{
2212
+ u64 temp;
2213
+ dma_addr_t deq;
2214
+
2215
+ deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2216
+ xhci->event_ring->dequeue);
2217
+ if (deq == 0 && !in_interrupt())
2218
+ xhci_warn(xhci, "WARN something wrong with SW event ring "
2219
+ "dequeue ptr.\n");
2220
+ /* Update HC event ring dequeue pointer */
2221
+ temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2222
+ temp &= ERST_PTR_MASK;
2223
+ /* Don't clear the EHB bit (which is RW1C) because
2224
+ * there might be more events to service.
2225
+ */
2226
+ temp &= ~ERST_EHB;
2227
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2228
+ "// Write event ring dequeue pointer, "
2229
+ "preserving EHB bit");
2230
+ xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
2231
+ &xhci->ir_set->erst_dequeue);
2232
+}
22232233
22242234 static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
22252235 __le32 __iomem *addr, int max_caps)
22262236 {
22272237 u32 temp, port_offset, port_count;
22282238 int i;
2229
- u8 major_revision, minor_revision;
2239
+ u8 major_revision, minor_revision, tmp_minor_revision;
22302240 struct xhci_hub *rhub;
22312241 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
22322242 struct xhci_port_cap *port_cap;
....@@ -2246,6 +2256,15 @@
22462256 */
22472257 if (minor_revision > 0x00 && minor_revision < 0x10)
22482258 minor_revision <<= 4;
2259
+ /*
2260
+ * Some zhaoxin's xHCI controller that follow usb3.1 spec
2261
+ * but only support Gen1.
2262
+ */
2263
+ if (xhci->quirks & XHCI_ZHAOXIN_HOST) {
2264
+ tmp_minor_revision = minor_revision;
2265
+ minor_revision = 0;
2266
+ }
2267
+
22492268 } else if (major_revision <= 0x02) {
22502269 rhub = &xhci->usb2_rhub;
22512270 } else {
....@@ -2255,10 +2274,6 @@
22552274 /* Ignoring port protocol we can't understand. FIXME */
22562275 return;
22572276 }
2258
- rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
2259
-
2260
- if (rhub->min_rev < minor_revision)
2261
- rhub->min_rev = minor_revision;
22622277
22632278 /* Port offset and count in the third dword, see section 7.2 */
22642279 temp = readl(addr + 2);
....@@ -2277,8 +2292,6 @@
22772292 if (xhci->num_port_caps > max_caps)
22782293 return;
22792294
2280
- port_cap->maj_rev = major_revision;
2281
- port_cap->min_rev = minor_revision;
22822295 port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp);
22832296
22842297 if (port_cap->psi_count) {
....@@ -2299,6 +2312,11 @@
22992312 XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1])))
23002313 port_cap->psi_uid_count++;
23012314
2315
+ if (xhci->quirks & XHCI_ZHAOXIN_HOST &&
2316
+ major_revision == 0x03 &&
2317
+ XHCI_EXT_PORT_PSIV(port_cap->psi[i]) >= 5)
2318
+ minor_revision = tmp_minor_revision;
2319
+
23022320 xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
23032321 XHCI_EXT_PORT_PSIV(port_cap->psi[i]),
23042322 XHCI_EXT_PORT_PSIE(port_cap->psi[i]),
....@@ -2308,27 +2326,24 @@
23082326 XHCI_EXT_PORT_PSIM(port_cap->psi[i]));
23092327 }
23102328 }
2329
+
2330
+ rhub->maj_rev = major_revision;
2331
+
2332
+ if (rhub->min_rev < minor_revision)
2333
+ rhub->min_rev = minor_revision;
2334
+
2335
+ port_cap->maj_rev = major_revision;
2336
+ port_cap->min_rev = minor_revision;
2337
+
23112338 /* cache usb2 port capabilities */
23122339 if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
23132340 xhci->ext_caps[xhci->num_ext_caps++] = temp;
23142341
2315
- /* Check the host's USB2 LPM capability */
2316
- if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
2317
- (temp & XHCI_L1C)) {
2342
+ if ((xhci->hci_version >= 0x100) && (major_revision != 0x03) &&
2343
+ (temp & XHCI_HLC)) {
23182344 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2319
- "xHCI 0.96: support USB2 software lpm");
2320
- xhci->sw_lpm_support = 1;
2321
- }
2322
-
2323
- if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
2324
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2325
- "xHCI 1.0: support USB2 software lpm");
2326
- xhci->sw_lpm_support = 1;
2327
- if (temp & XHCI_HLC) {
2328
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2329
- "xHCI 1.0: support USB2 hardware lpm");
2330
- xhci->hw_lpm_support = 1;
2331
- }
2345
+ "xHCI 1.0: support USB2 hardware lpm");
2346
+ xhci->hw_lpm_support = 1;
23322347 }
23332348
23342349 port_offset--;
....@@ -2367,8 +2382,11 @@
23672382
23682383 if (!rhub->num_ports)
23692384 return;
2370
- rhub->ports = kcalloc_node(rhub->num_ports, sizeof(rhub->ports), flags,
2371
- dev_to_node(dev));
2385
+ rhub->ports = kcalloc_node(rhub->num_ports, sizeof(*rhub->ports),
2386
+ flags, dev_to_node(dev));
2387
+ if (!rhub->ports)
2388
+ return;
2389
+
23722390 for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
23732391 if (xhci->hw_ports[i].rhub != rhub ||
23742392 xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
....@@ -2493,154 +2511,6 @@
24932511 return 0;
24942512 }
24952513
2496
-int xhci_event_ring_setup(struct xhci_hcd *xhci, struct xhci_ring **er,
2497
- struct xhci_intr_reg __iomem *ir_set, struct xhci_erst *erst,
2498
- unsigned int intr_num, gfp_t flags)
2499
-{
2500
- dma_addr_t deq;
2501
- u64 val_64;
2502
- unsigned int val;
2503
- int ret;
2504
-
2505
- *er = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, 0, flags);
2506
- if (!*er)
2507
- return -ENOMEM;
2508
-
2509
- ret = xhci_alloc_erst(xhci, *er, erst, flags);
2510
- if (ret)
2511
- return ret;
2512
-
2513
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2514
- "intr# %d: num segs = %i, virt addr = %pK, dma addr = 0x%llx",
2515
- intr_num,
2516
- erst->num_entries,
2517
- erst->entries,
2518
- (unsigned long long)erst->erst_dma_addr);
2519
-
2520
- /* set ERST count with the number of entries in the segment table */
2521
- val = readl_relaxed(&ir_set->erst_size);
2522
- val &= ERST_SIZE_MASK;
2523
- val |= ERST_NUM_SEGS;
2524
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2525
- "Write ERST size = %i to ir_set %d (some bits preserved)", val,
2526
- intr_num);
2527
- writel_relaxed(val, &ir_set->erst_size);
2528
-
2529
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2530
- "intr# %d: Set ERST entries to point to event ring.",
2531
- intr_num);
2532
- /* set the segment table base address */
2533
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2534
- "Set ERST base address for ir_set %d = 0x%llx",
2535
- intr_num,
2536
- (unsigned long long)erst->erst_dma_addr);
2537
- val_64 = xhci_read_64(xhci, &ir_set->erst_base);
2538
- val_64 &= ERST_PTR_MASK;
2539
- val_64 |= (erst->erst_dma_addr & (u64) ~ERST_PTR_MASK);
2540
- xhci_write_64(xhci, val_64, &ir_set->erst_base);
2541
-
2542
- /* Set the event ring dequeue address */
2543
- deq = xhci_trb_virt_to_dma((*er)->deq_seg, (*er)->dequeue);
2544
- if (deq == 0 && !in_interrupt())
2545
- xhci_warn(xhci,
2546
- "intr# %d:WARN something wrong with SW event ring deq ptr.\n",
2547
- intr_num);
2548
- /* Update HC event ring dequeue pointer */
2549
- val_64 = xhci_read_64(xhci, &ir_set->erst_dequeue);
2550
- val_64 &= ERST_PTR_MASK;
2551
- /* Don't clear the EHB bit (which is RW1C) because
2552
- * there might be more events to service.
2553
- */
2554
- val_64 &= ~ERST_EHB;
2555
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2556
- "intr# %d:Write event ring dequeue pointer, preserving EHB bit",
2557
- intr_num);
2558
- xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | val_64,
2559
- &ir_set->erst_dequeue);
2560
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2561
- "Wrote ERST address to ir_set %d.", intr_num);
2562
-
2563
- return 0;
2564
-}
2565
-
2566
-int xhci_sec_event_ring_setup(struct usb_hcd *hcd, unsigned int intr_num)
2567
-{
2568
- int ret;
2569
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2570
-
2571
- if ((xhci->xhc_state & XHCI_STATE_HALTED) || !xhci->sec_ir_set
2572
- || !xhci->sec_event_ring || !xhci->sec_erst ||
2573
- intr_num >= xhci->max_interrupters) {
2574
- xhci_err(xhci,
2575
- "%s:state %x ir_set %pK evt_ring %pK erst %pK intr# %d\n",
2576
- __func__, xhci->xhc_state, xhci->sec_ir_set,
2577
- xhci->sec_event_ring, xhci->sec_erst, intr_num);
2578
- return -EINVAL;
2579
- }
2580
-
2581
- if (xhci->sec_event_ring && xhci->sec_event_ring[intr_num]
2582
- && xhci->sec_event_ring[intr_num]->first_seg)
2583
- goto done;
2584
-
2585
- xhci->sec_ir_set[intr_num] = &xhci->run_regs->ir_set[intr_num];
2586
- ret = xhci_event_ring_setup(xhci,
2587
- &xhci->sec_event_ring[intr_num],
2588
- xhci->sec_ir_set[intr_num],
2589
- &xhci->sec_erst[intr_num],
2590
- intr_num, GFP_KERNEL);
2591
- if (ret) {
2592
- xhci_err(xhci, "sec event ring setup failed inter#%d\n",
2593
- intr_num);
2594
- return ret;
2595
- }
2596
-done:
2597
- return 0;
2598
-}
2599
-
2600
-int xhci_event_ring_init(struct xhci_hcd *xhci, gfp_t flags)
2601
-{
2602
- int ret = 0;
2603
-
2604
- /* primary + secondary */
2605
- xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1);
2606
-
2607
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2608
- "// Allocating primary event ring");
2609
-
2610
- /* Set ir_set to interrupt register set 0 */
2611
- xhci->ir_set = &xhci->run_regs->ir_set[0];
2612
- ret = xhci_event_ring_setup(xhci, &xhci->event_ring, xhci->ir_set,
2613
- &xhci->erst, 0, flags);
2614
- if (ret) {
2615
- xhci_err(xhci, "failed to setup primary event ring\n");
2616
- goto fail;
2617
- }
2618
-
2619
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2620
- "// Allocating sec event ring related pointers");
2621
-
2622
- xhci->sec_ir_set = kcalloc(xhci->max_interrupters,
2623
- sizeof(*xhci->sec_ir_set), flags);
2624
- if (!xhci->sec_ir_set) {
2625
- ret = -ENOMEM;
2626
- goto fail;
2627
- }
2628
-
2629
- xhci->sec_event_ring = kcalloc(xhci->max_interrupters,
2630
- sizeof(*xhci->sec_event_ring), flags);
2631
- if (!xhci->sec_event_ring) {
2632
- ret = -ENOMEM;
2633
- goto fail;
2634
- }
2635
-
2636
- xhci->sec_erst = kcalloc(xhci->max_interrupters,
2637
- sizeof(*xhci->sec_erst), flags);
2638
- if (!xhci->sec_erst)
2639
- ret = -ENOMEM;
2640
-fail:
2641
- return ret;
2642
-}
2643
-
26442514 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
26452515 {
26462516 dma_addr_t dma;
....@@ -2648,7 +2518,7 @@
26482518 unsigned int val, val2;
26492519 u64 val_64;
26502520 u32 page_size, temp;
2651
- int i;
2521
+ int i, ret;
26522522
26532523 INIT_LIST_HEAD(&xhci->cmd_list);
26542524
....@@ -2692,16 +2562,21 @@
26922562 * xHCI section 5.4.6 - doorbell array must be
26932563 * "physically contiguous and 64-byte (cache line) aligned".
26942564 */
2695
- xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
2696
- flags);
2697
- if (!xhci->dcbaa)
2698
- goto fail;
2699
- memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
2700
- xhci->dcbaa->dma = dma;
2565
+ if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0)) {
2566
+ xhci->dcbaa = xhci_vendor_alloc_dcbaa(xhci, flags);
2567
+ if (!xhci->dcbaa)
2568
+ goto fail;
2569
+ } else {
2570
+ xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
2571
+ flags);
2572
+ if (!xhci->dcbaa)
2573
+ goto fail;
2574
+ xhci->dcbaa->dma = dma;
2575
+ }
27012576 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
27022577 "// Device context base array address = 0x%llx (DMA), %p (virt)",
27032578 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
2704
- xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
2579
+ xhci_write_64(xhci, xhci->dcbaa->dma, &xhci->op_regs->dcbaa_ptr);
27052580
27062581 /*
27072582 * Initialize the ring segment pool. The ring must be a contiguous
....@@ -2710,8 +2585,12 @@
27102585 * and our use of dma addresses in the trb_address_map radix tree needs
27112586 * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
27122587 */
2713
- xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2714
- TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
2588
+ if (xhci->quirks & XHCI_ZHAOXIN_TRB_FETCH)
2589
+ xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2590
+ TRB_SEGMENT_SIZE * 2, TRB_SEGMENT_SIZE * 2, xhci->page_size * 2);
2591
+ else
2592
+ xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2593
+ TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
27152594
27162595 /* See Table 46 and Note on Figure 55 */
27172596 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
....@@ -2769,16 +2648,49 @@
27692648 "// Doorbell array is located at offset 0x%x"
27702649 " from cap regs base addr", val);
27712650 xhci->dba = (void __iomem *) xhci->cap_regs + val;
2651
+ /* Set ir_set to interrupt register set 0 */
2652
+ xhci->ir_set = &xhci->run_regs->ir_set[0];
27722653
27732654 /*
27742655 * Event ring setup: Allocate a normal ring, but also setup
27752656 * the event ring segment table (ERST). Section 4.9.3.
27762657 */
2777
- if (xhci_event_ring_init(xhci, GFP_KERNEL))
2658
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
2659
+ xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
2660
+ 0, flags);
2661
+ if (!xhci->event_ring)
27782662 goto fail;
2779
-
27802663 if (xhci_check_trb_in_td_math(xhci) < 0)
27812664 goto fail;
2665
+
2666
+ ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags);
2667
+ if (ret)
2668
+ goto fail;
2669
+
2670
+ /* set ERST count with the number of entries in the segment table */
2671
+ val = readl(&xhci->ir_set->erst_size);
2672
+ val &= ERST_SIZE_MASK;
2673
+ val |= ERST_NUM_SEGS;
2674
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2675
+ "// Write ERST size = %i to ir_set 0 (some bits preserved)",
2676
+ val);
2677
+ writel(val, &xhci->ir_set->erst_size);
2678
+
2679
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2680
+ "// Set ERST entries to point to event ring.");
2681
+ /* set the segment table base address */
2682
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2683
+ "// Set ERST base address for ir_set 0 = 0x%llx",
2684
+ (unsigned long long)xhci->erst.erst_dma_addr);
2685
+ val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
2686
+ val_64 &= ERST_PTR_MASK;
2687
+ val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
2688
+ xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
2689
+
2690
+ /* Set the event ring dequeue address */
2691
+ xhci_set_hc_event_deq(xhci);
2692
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2693
+ "Wrote ERST address to ir_set 0.");
27822694
27832695 /*
27842696 * XXX: Might need to set the Interrupter Moderation Register to
....@@ -2788,10 +2700,11 @@
27882700 for (i = 0; i < MAX_HC_SLOTS; i++)
27892701 xhci->devs[i] = NULL;
27902702 for (i = 0; i < USB_MAXCHILDREN; i++) {
2791
- xhci->bus_state[0].resume_done[i] = 0;
2792
- xhci->bus_state[1].resume_done[i] = 0;
2703
+ xhci->usb2_rhub.bus_state.resume_done[i] = 0;
2704
+ xhci->usb3_rhub.bus_state.resume_done[i] = 0;
27932705 /* Only the USB 2.0 completions will ever be used. */
2794
- init_completion(&xhci->bus_state[1].rexit_done[i]);
2706
+ init_completion(&xhci->usb2_rhub.bus_state.rexit_done[i]);
2707
+ init_completion(&xhci->usb3_rhub.bus_state.u3exit_done[i]);
27952708 }
27962709
27972710 if (scratchpad_alloc(xhci, flags))
....@@ -2812,7 +2725,7 @@
28122725
28132726 fail:
28142727 xhci_halt(xhci);
2815
- xhci_reset(xhci);
2728
+ xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
28162729 xhci_mem_cleanup(xhci);
28172730 return -ENOMEM;
28182731 }