hc
2023-12-11 1f93a7dfd1f8d5ff7a5c53246c7534fe2332d6f4
kernel/drivers/usb/host/xhci.c
....@@ -17,11 +17,9 @@
1717 #include <linux/slab.h>
1818 #include <linux/dmi.h>
1919 #include <linux/dma-mapping.h>
20
-#include <linux/usb/quirks.h>
2120
2221 #include "xhci.h"
2322 #include "xhci-trace.h"
24
-#include "xhci-mtk.h"
2523 #include "xhci-debugfs.h"
2624 #include "xhci-dbgcap.h"
2725
....@@ -67,7 +65,7 @@
6765 * handshake done). There are two failure modes: "usec" have passed (major
6866 * hardware flakeout), or the register reads as all-ones (hardware removed).
6967 */
70
-int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
68
+int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us)
7169 {
7270 u32 result;
7371 int ret;
....@@ -75,7 +73,7 @@
7573 ret = readl_poll_timeout_atomic(ptr, result,
7674 (result & mask) == done ||
7775 result == U32_MAX,
78
- 1, usec);
76
+ 1, timeout_us);
7977 if (result == U32_MAX) /* card removed */
8078 return -ENODEV;
8179
....@@ -164,11 +162,11 @@
164162 * Transactions will be terminated immediately, and operational registers
165163 * will be set to their defaults.
166164 */
167
-int xhci_reset(struct xhci_hcd *xhci)
165
+int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
168166 {
169167 u32 command;
170168 u32 state;
171
- int ret, i;
169
+ int ret;
172170
173171 state = readl(&xhci->op_regs->status);
174172
....@@ -197,8 +195,7 @@
197195 if (xhci->quirks & XHCI_INTEL_HOST)
198196 udelay(1000);
199197
200
- ret = xhci_handshake(&xhci->op_regs->command,
201
- CMD_RESET, 0, 10 * 1000 * 1000);
198
+ ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us);
202199 if (ret)
203200 return ret;
204201
....@@ -211,14 +208,14 @@
211208 * xHCI cannot write to any doorbells or operational registers other
212209 * than status until the "Controller Not Ready" flag is cleared.
213210 */
214
- ret = xhci_handshake(&xhci->op_regs->status,
215
- STS_CNR, 0, 10 * 1000 * 1000);
211
+ ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us);
216212
217
- for (i = 0; i < 2; i++) {
218
- xhci->bus_state[i].port_c_suspend = 0;
219
- xhci->bus_state[i].suspended_ports = 0;
220
- xhci->bus_state[i].resuming_ports = 0;
221
- }
213
+ xhci->usb2_rhub.bus_state.port_c_suspend = 0;
214
+ xhci->usb2_rhub.bus_state.suspended_ports = 0;
215
+ xhci->usb2_rhub.bus_state.resuming_ports = 0;
216
+ xhci->usb3_rhub.bus_state.port_c_suspend = 0;
217
+ xhci->usb3_rhub.bus_state.suspended_ports = 0;
218
+ xhci->usb3_rhub.bus_state.resuming_ports = 0;
222219
223220 return ret;
224221 }
....@@ -244,7 +241,7 @@
244241 * an iommu. Doing anything when there is no iommu is definitely
245242 * unsafe...
246243 */
247
- if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !dev->iommu_group)
244
+ if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev))
248245 return;
249246
250247 xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
....@@ -732,7 +729,7 @@
732729 xhci->xhc_state |= XHCI_STATE_HALTED;
733730 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
734731 xhci_halt(xhci);
735
- xhci_reset(xhci);
732
+ xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
736733 spin_unlock_irq(&xhci->lock);
737734
738735 xhci_cleanup_msix(xhci);
....@@ -778,23 +775,31 @@
778775 {
779776 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
780777
781
- if (!hcd->rh_registered)
782
- return;
783
-
784
- /* Don't poll the roothubs on shutdown */
785
- clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
786
- del_timer_sync(&hcd->rh_timer);
787
- clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
788
- del_timer_sync(&xhci->shared_hcd->rh_timer);
789
-
790778 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
791779 usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
792780
781
+ /* Don't poll the roothubs after shutdown. */
782
+ xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
783
+ __func__, hcd->self.busnum);
784
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
785
+ del_timer_sync(&hcd->rh_timer);
786
+
787
+ if (xhci->shared_hcd) {
788
+ clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
789
+ del_timer_sync(&xhci->shared_hcd->rh_timer);
790
+ }
791
+
793792 spin_lock_irq(&xhci->lock);
794793 xhci_halt(xhci);
795
- /* Workaround for spurious wakeups at shutdown with HSW */
796
- if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
797
- xhci_reset(xhci);
794
+
795
+ /*
796
+ * Workaround for spurious wakeps at shutdown with HSW, and for boot
797
+ * firmware delay in ADL-P PCH if port are left in U3 at shutdown
798
+ */
799
+ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP ||
800
+ xhci->quirks & XHCI_RESET_TO_DEFAULT)
801
+ xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
802
+
798803 spin_unlock_irq(&xhci->lock);
799804
800805 xhci_cleanup_msix(xhci);
....@@ -896,37 +901,44 @@
896901 xhci_set_cmd_ring_deq(xhci);
897902 }
898903
899
-static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
904
+/*
905
+ * Disable port wake bits if do_wakeup is not set.
906
+ *
907
+ * Also clear a possible internal port wake state left hanging for ports that
908
+ * detected termination but never successfully enumerated (trained to 0U).
909
+ * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done
910
+ * at enumeration clears this wake, force one here as well for unconnected ports
911
+ */
912
+
913
+static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci,
914
+ struct xhci_hub *rhub,
915
+ bool do_wakeup)
900916 {
901
- struct xhci_port **ports;
902
- int port_index;
903917 unsigned long flags;
904
- u32 t1, t2;
918
+ u32 t1, t2, portsc;
919
+ int i;
905920
906921 spin_lock_irqsave(&xhci->lock, flags);
907922
908
- /* disable usb3 ports Wake bits */
909
- port_index = xhci->usb3_rhub.num_ports;
910
- ports = xhci->usb3_rhub.ports;
911
- while (port_index--) {
912
- t1 = readl(ports[port_index]->addr);
913
- t1 = xhci_port_state_to_neutral(t1);
914
- t2 = t1 & ~PORT_WAKE_BITS;
915
- if (t1 != t2)
916
- writel(t2, ports[port_index]->addr);
917
- }
923
+ for (i = 0; i < rhub->num_ports; i++) {
924
+ portsc = readl(rhub->ports[i]->addr);
925
+ t1 = xhci_port_state_to_neutral(portsc);
926
+ t2 = t1;
918927
919
- /* disable usb2 ports Wake bits */
920
- port_index = xhci->usb2_rhub.num_ports;
921
- ports = xhci->usb2_rhub.ports;
922
- while (port_index--) {
923
- t1 = readl(ports[port_index]->addr);
924
- t1 = xhci_port_state_to_neutral(t1);
925
- t2 = t1 & ~PORT_WAKE_BITS;
926
- if (t1 != t2)
927
- writel(t2, ports[port_index]->addr);
928
- }
928
+ /* clear wake bits if do_wake is not set */
929
+ if (!do_wakeup)
930
+ t2 &= ~PORT_WAKE_BITS;
929931
932
+ /* Don't touch csc bit if connected or connect change is set */
933
+ if (!(portsc & (PORT_CSC | PORT_CONNECT)))
934
+ t2 |= PORT_CSC;
935
+
936
+ if (t1 != t2) {
937
+ writel(t2, rhub->ports[i]->addr);
938
+ xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n",
939
+ rhub->hcd->self.busnum, i + 1, portsc, t2);
940
+ }
941
+ }
930942 spin_unlock_irqrestore(&xhci->lock, flags);
931943 }
932944
....@@ -965,26 +977,6 @@
965977 return false;
966978 }
967979
968
-static void xhci_warm_port_reset_quirk(struct xhci_hcd *xhci)
969
-{
970
- struct xhci_port **ports;
971
- int port_index;
972
- u32 portsc;
973
-
974
- port_index = xhci->usb3_rhub.num_ports;
975
- ports = xhci->usb3_rhub.ports;
976
- while (port_index--) {
977
- portsc = readl(ports[port_index]->addr);
978
- /* Do warm port reset if no USB3 device connected */
979
- if (!(portsc & PORT_CONNECT)) {
980
- portsc |= PORT_WR;
981
- writel(portsc, ports[port_index]->addr);
982
- /* flush write */
983
- readl(ports[port_index]->addr);
984
- }
985
- }
986
-}
987
-
988980 /*
989981 * Stop HC (not bus-specific)
990982 *
....@@ -1007,15 +999,8 @@
1007999 return -EINVAL;
10081000
10091001 /* Clear root port wake on bits if wakeup not allowed. */
1010
- if (!do_wakeup)
1011
- xhci_disable_port_wake_on_bits(xhci);
1012
-
1013
- /*
1014
- * Do a warm reset for USB3 port to resets the USB3 link,
1015
- * forcing the link to enter the Rx.Detect state.
1016
- */
1017
- if (xhci->quirks & XHCI_WARM_RESET_ON_SUSPEND)
1018
- xhci_warm_port_reset_quirk(xhci);
1002
+ xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup);
1003
+ xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup);
10191004
10201005 if (!HCD_HW_ACCESSIBLE(hcd))
10211006 return 0;
....@@ -1023,7 +1008,8 @@
10231008 xhci_dbc_suspend(xhci);
10241009
10251010 /* Don't poll the roothubs on bus suspend. */
1026
- xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
1011
+ xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
1012
+ __func__, hcd->self.busnum);
10271013 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
10281014 del_timer_sync(&hcd->rh_timer);
10291015 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
....@@ -1128,9 +1114,9 @@
11281114 /* Wait a bit if either of the roothubs need to settle from the
11291115 * transition into bus suspend.
11301116 */
1131
- if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
1132
- time_before(jiffies,
1133
- xhci->bus_state[1].next_statechange))
1117
+
1118
+ if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) ||
1119
+ time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange))
11341120 msleep(100);
11351121
11361122 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
....@@ -1181,7 +1167,8 @@
11811167 /* re-initialize the HC on Restore Error, or Host Controller Error */
11821168 if (temp & (STS_SRE | STS_HCE)) {
11831169 reinit_xhc = true;
1184
- xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
1170
+ if (!xhci->broken_suspend)
1171
+ xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
11851172 }
11861173
11871174 if (reinit_xhc) {
....@@ -1199,7 +1186,7 @@
11991186 xhci_dbg(xhci, "Stop HCD\n");
12001187 xhci_halt(xhci);
12011188 xhci_zero_64b_regs(xhci);
1202
- retval = xhci_reset(xhci);
1189
+ retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
12031190 spin_unlock_irq(&xhci->lock);
12041191 if (retval)
12051192 return retval;
....@@ -1294,7 +1281,8 @@
12941281 usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
12951282
12961283 /* Re-enable port polling. */
1297
- xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1284
+ xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
1285
+ __func__, hcd->self.busnum);
12981286 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
12991287 usb_hcd_poll_rh_status(xhci->shared_hcd);
13001288 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
....@@ -1307,7 +1295,22 @@
13071295
13081296 /*-------------------------------------------------------------------------*/
13091297
1310
-/**
1298
+/*
1299
+ * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT),
1300
+ * we'll copy the actual data into the TRB address register. This is limited to
1301
+ * transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize
1302
+ * >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed.
1303
+ */
1304
+static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1305
+ gfp_t mem_flags)
1306
+{
1307
+ if (xhci_urb_suitable_for_idt(urb))
1308
+ return 0;
1309
+
1310
+ return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
1311
+}
1312
+
1313
+/*
13111314 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
13121315 * HCDs. Find the index for an endpoint given its descriptor. Use the return
13131316 * value to right shift 1 for the bitmask.
....@@ -1327,6 +1330,7 @@
13271330 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
13281331 return index;
13291332 }
1333
+EXPORT_SYMBOL_GPL(xhci_get_endpoint_index);
13301334
13311335 /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
13321336 * address from the XHCI endpoint index.
....@@ -1345,15 +1349,6 @@
13451349 static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
13461350 {
13471351 return 1 << (xhci_get_endpoint_index(desc) + 1);
1348
-}
1349
-
1350
-/* Find the flag for this endpoint (for use in the control context). Use the
1351
- * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1352
- * bit 1, etc.
1353
- */
1354
-static unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
1355
-{
1356
- return 1 << (ep_index + 1);
13571352 }
13581353
13591354 /* Compute the last valid endpoint context index. Basically, this is the
....@@ -1522,6 +1517,11 @@
15221517 return -ENODEV;
15231518 }
15241519
1520
+ if (xhci_vendor_usb_offload_skip_urb(xhci, urb)) {
1521
+ xhci_dbg(xhci, "skip urb for usb offload\n");
1522
+ return -EOPNOTSUPP;
1523
+ }
1524
+
15251525 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
15261526 num_tds = urb->number_of_packets;
15271527 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
....@@ -1532,8 +1532,7 @@
15321532 else
15331533 num_tds = 1;
15341534
1535
- urb_priv = kzalloc(sizeof(struct urb_priv) +
1536
- num_tds * sizeof(struct xhci_td), mem_flags);
1535
+ urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags);
15371536 if (!urb_priv)
15381537 return -ENOMEM;
15391538
....@@ -1723,7 +1722,12 @@
17231722
17241723 for (; i < urb_priv->num_tds; i++) {
17251724 td = &urb_priv->td[i];
1726
- list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1725
+ /* TD can already be on cancelled list if ep halted on it */
1726
+ if (list_empty(&td->cancelled_td_list)) {
1727
+ td->cancel_status = TD_DIRTY;
1728
+ list_add_tail(&td->cancelled_td_list,
1729
+ &ep->cancelled_td_list);
1730
+ }
17271731 }
17281732
17291733 /* Queue a stop endpoint command, but only if this is
....@@ -1769,8 +1773,8 @@
17691773 * disabled, so there's no need for mutual exclusion to protect
17701774 * the xhci->devs[slot_id] structure.
17711775 */
1772
-static int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1773
- struct usb_host_endpoint *ep)
1776
+int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1777
+ struct usb_host_endpoint *ep)
17741778 {
17751779 struct xhci_hcd *xhci;
17761780 struct xhci_container_ctx *in_ctx, *out_ctx;
....@@ -1830,9 +1834,6 @@
18301834
18311835 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
18321836
1833
- if (xhci->quirks & XHCI_MTK_HOST)
1834
- xhci_mtk_drop_ep_quirk(hcd, udev, ep);
1835
-
18361837 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
18371838 (unsigned int) ep->desc.bEndpointAddress,
18381839 udev->slot_id,
....@@ -1840,6 +1841,7 @@
18401841 (unsigned int) new_add_flags);
18411842 return 0;
18421843 }
1844
+EXPORT_SYMBOL_GPL(xhci_drop_endpoint);
18431845
18441846 /* Add an endpoint to a new possible bandwidth configuration for this device.
18451847 * Only one call to this function is allowed per endpoint before
....@@ -1854,13 +1856,14 @@
18541856 * configuration or alt setting is installed in the device, so there's no need
18551857 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
18561858 */
1857
-static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1858
- struct usb_host_endpoint *ep)
1859
+int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1860
+ struct usb_host_endpoint *ep)
18591861 {
18601862 struct xhci_hcd *xhci;
18611863 struct xhci_container_ctx *in_ctx;
18621864 unsigned int ep_index;
18631865 struct xhci_input_control_ctx *ctrl_ctx;
1866
+ struct xhci_ep_ctx *ep_ctx;
18641867 u32 added_ctxs;
18651868 u32 new_add_flags, new_drop_flags;
18661869 struct xhci_virt_device *virt_dev;
....@@ -1928,15 +1931,6 @@
19281931 return -ENOMEM;
19291932 }
19301933
1931
- if (xhci->quirks & XHCI_MTK_HOST) {
1932
- ret = xhci_mtk_add_ep_quirk(hcd, udev, ep);
1933
- if (ret < 0) {
1934
- xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring);
1935
- virt_dev->eps[ep_index].new_ring = NULL;
1936
- return ret;
1937
- }
1938
- }
1939
-
19401934 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
19411935 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
19421936
....@@ -1951,7 +1945,8 @@
19511945 /* Store the usb_device pointer for later use */
19521946 ep->hcpriv = udev;
19531947
1954
- xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index);
1948
+ ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1949
+ trace_xhci_add_endpoint(ep_ctx);
19551950
19561951 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
19571952 (unsigned int) ep->desc.bEndpointAddress,
....@@ -1960,6 +1955,7 @@
19601955 (unsigned int) new_add_flags);
19611956 return 0;
19621957 }
1958
+EXPORT_SYMBOL_GPL(xhci_add_endpoint);
19631959
19641960 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
19651961 {
....@@ -2825,6 +2821,8 @@
28252821 }
28262822
28272823 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
2824
+
2825
+ trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx);
28282826 trace_xhci_configure_endpoint(slot_ctx);
28292827
28302828 if (!ctx_change)
....@@ -2867,6 +2865,14 @@
28672865 xhci_finish_resource_reservation(xhci, ctrl_ctx);
28682866 spin_unlock_irqrestore(&xhci->lock, flags);
28692867 }
2868
+ if (ret)
2869
+ goto failed;
2870
+
2871
+ ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
2872
+ if (ret)
2873
+ xhci_warn(xhci, "sync device context failed, ret=%d", ret);
2874
+
2875
+failed:
28702876 return ret;
28712877 }
28722878
....@@ -2894,7 +2900,7 @@
28942900 * else should be touching the xhci->devs[slot_id] structure, so we
28952901 * don't need to take the xhci->lock for manipulating that.
28962902 */
2897
-static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2903
+int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
28982904 {
28992905 int i;
29002906 int ret = 0;
....@@ -2983,6 +2989,7 @@
29832989 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
29842990 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
29852991 virt_dev->eps[i].new_ring = NULL;
2992
+ xhci_debugfs_create_endpoint(xhci, virt_dev, i);
29862993 }
29872994 command_cleanup:
29882995 kfree(command->completion);
....@@ -2990,8 +2997,9 @@
29902997
29912998 return ret;
29922999 }
3000
+EXPORT_SYMBOL_GPL(xhci_check_bandwidth);
29933001
2994
-static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
3002
+void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
29953003 {
29963004 struct xhci_hcd *xhci;
29973005 struct xhci_virt_device *virt_dev;
....@@ -3008,12 +3016,17 @@
30083016 for (i = 0; i < 31; i++) {
30093017 if (virt_dev->eps[i].new_ring) {
30103018 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3011
- xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
3019
+ if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, i))
3020
+ xhci_vendor_free_transfer_ring(xhci, virt_dev, i);
3021
+ else
3022
+ xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
3023
+
30123024 virt_dev->eps[i].new_ring = NULL;
30133025 }
30143026 }
30153027 xhci_zero_in_ctx(xhci, virt_dev);
30163028 }
3029
+EXPORT_SYMBOL_GPL(xhci_reset_bandwidth);
30173030
30183031 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
30193032 struct xhci_container_ctx *in_ctx,
....@@ -3027,82 +3040,46 @@
30273040 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
30283041 }
30293042
3030
-static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
3031
- unsigned int slot_id, unsigned int ep_index,
3032
- struct xhci_dequeue_state *deq_state)
3043
+static void xhci_endpoint_disable(struct usb_hcd *hcd,
3044
+ struct usb_host_endpoint *host_ep)
30333045 {
3034
- struct xhci_input_control_ctx *ctrl_ctx;
3035
- struct xhci_container_ctx *in_ctx;
3036
- struct xhci_ep_ctx *ep_ctx;
3037
- u32 added_ctxs;
3038
- dma_addr_t addr;
3046
+ struct xhci_hcd *xhci;
3047
+ struct xhci_virt_device *vdev;
3048
+ struct xhci_virt_ep *ep;
3049
+ struct usb_device *udev;
3050
+ unsigned long flags;
3051
+ unsigned int ep_index;
30393052
3040
- in_ctx = xhci->devs[slot_id]->in_ctx;
3041
- ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
3042
- if (!ctrl_ctx) {
3043
- xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3044
- __func__);
3045
- return;
3053
+ xhci = hcd_to_xhci(hcd);
3054
+rescan:
3055
+ spin_lock_irqsave(&xhci->lock, flags);
3056
+
3057
+ udev = (struct usb_device *)host_ep->hcpriv;
3058
+ if (!udev || !udev->slot_id)
3059
+ goto done;
3060
+
3061
+ vdev = xhci->devs[udev->slot_id];
3062
+ if (!vdev)
3063
+ goto done;
3064
+
3065
+ ep_index = xhci_get_endpoint_index(&host_ep->desc);
3066
+ ep = &vdev->eps[ep_index];
3067
+ if (!ep)
3068
+ goto done;
3069
+
3070
+ /* wait for hub_tt_work to finish clearing hub TT */
3071
+ if (ep->ep_state & EP_CLEARING_TT) {
3072
+ spin_unlock_irqrestore(&xhci->lock, flags);
3073
+ schedule_timeout_uninterruptible(1);
3074
+ goto rescan;
30463075 }
30473076
3048
- xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
3049
- xhci->devs[slot_id]->out_ctx, ep_index);
3050
- ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
3051
- addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
3052
- deq_state->new_deq_ptr);
3053
- if (addr == 0) {
3054
- xhci_warn(xhci, "WARN Cannot submit config ep after "
3055
- "reset ep command\n");
3056
- xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
3057
- deq_state->new_deq_seg,
3058
- deq_state->new_deq_ptr);
3059
- return;
3060
- }
3061
- ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
3062
-
3063
- added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
3064
- xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
3065
- xhci->devs[slot_id]->out_ctx, ctrl_ctx,
3066
- added_ctxs, added_ctxs);
3067
-}
3068
-
3069
-void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
3070
- unsigned int stream_id, struct xhci_td *td)
3071
-{
3072
- struct xhci_dequeue_state deq_state;
3073
- struct usb_device *udev = td->urb->dev;
3074
-
3075
- xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
3076
- "Cleaning up stalled endpoint ring");
3077
- /* We need to move the HW's dequeue pointer past this TD,
3078
- * or it will attempt to resend it on the next doorbell ring.
3079
- */
3080
- xhci_find_new_dequeue_state(xhci, udev->slot_id,
3081
- ep_index, stream_id, td, &deq_state);
3082
-
3083
- if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
3084
- return;
3085
-
3086
- /* HW with the reset endpoint quirk will use the saved dequeue state to
3087
- * issue a configure endpoint command later.
3088
- */
3089
- if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
3090
- xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
3091
- "Queueing new dequeue state");
3092
- xhci_queue_new_dequeue_state(xhci, udev->slot_id,
3093
- ep_index, &deq_state);
3094
- } else {
3095
- /* Better hope no one uses the input context between now and the
3096
- * reset endpoint completion!
3097
- * XXX: No idea how this hardware will react when stream rings
3098
- * are enabled.
3099
- */
3100
- xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3101
- "Setting up input context for "
3102
- "configure endpoint command");
3103
- xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
3104
- ep_index, &deq_state);
3105
- }
3077
+ if (ep->ep_state)
3078
+ xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n",
3079
+ ep->ep_state);
3080
+done:
3081
+ host_ep->hcpriv = NULL;
3082
+ spin_unlock_irqrestore(&xhci->lock, flags);
31063083 }
31073084
31083085 /*
....@@ -3136,8 +3113,18 @@
31363113 return;
31373114 udev = (struct usb_device *) host_ep->hcpriv;
31383115 vdev = xhci->devs[udev->slot_id];
3116
+
3117
+ /*
3118
+ * vdev may be lost due to xHC restore error and re-initialization
3119
+ * during S3/S4 resume. A new vdev will be allocated later by
3120
+ * xhci_discover_or_reset_device()
3121
+ */
3122
+ if (!udev->slot_id || !vdev)
3123
+ return;
31393124 ep_index = xhci_get_endpoint_index(&host_ep->desc);
31403125 ep = &vdev->eps[ep_index];
3126
+ if (!ep)
3127
+ return;
31413128
31423129 /* Bail out if toggle is already being cleared by a endpoint reset */
31433130 spin_lock_irqsave(&xhci->lock, flags);
....@@ -3198,6 +3185,13 @@
31983185
31993186 wait_for_completion(stop_cmd->completion);
32003187
3188
+ err = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
3189
+ if (err) {
3190
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
3191
+ __func__, err);
3192
+ goto cleanup;
3193
+ }
3194
+
32013195 spin_lock_irqsave(&xhci->lock, flags);
32023196
32033197 /* config ep command clears toggle if add and drop ep flags are set */
....@@ -3228,6 +3222,11 @@
32283222 spin_unlock_irqrestore(&xhci->lock, flags);
32293223
32303224 wait_for_completion(cfg_cmd->completion);
3225
+
3226
+ err = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
3227
+ if (err)
3228
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
3229
+ __func__, err);
32313230
32323231 xhci_free_command(xhci, cfg_cmd);
32333232 cleanup:
....@@ -3530,6 +3529,10 @@
35303529 xhci_free_command(xhci, config_cmd);
35313530 spin_unlock_irqrestore(&xhci->lock, flags);
35323531
3532
+ for (i = 0; i < num_eps; i++) {
3533
+ ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3534
+ xhci_debugfs_create_stream_files(xhci, vdev, ep_index);
3535
+ }
35333536 /* Subtract 1 for stream 0, which drivers can't use */
35343537 return num_streams - 1;
35353538
....@@ -3770,6 +3773,13 @@
37703773 /* Wait for the Reset Device command to finish */
37713774 wait_for_completion(reset_device_cmd->completion);
37723775
3776
+ ret = xhci_vendor_sync_dev_ctx(xhci, slot_id);
3777
+ if (ret) {
3778
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
3779
+ __func__, ret);
3780
+ goto command_cleanup;
3781
+ }
3782
+
37733783 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
37743784 * unless we tried to reset a slot ID that wasn't enabled,
37753785 * or the device wasn't in the addressed or configured state.
....@@ -3881,7 +3891,6 @@
38813891 virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
38823892 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
38833893 }
3884
- xhci_debugfs_remove_slot(xhci, udev->slot_id);
38853894 virt_dev->udev = NULL;
38863895 xhci_disable_slot(xhci, udev->slot_id);
38873896 xhci_free_virt_device(xhci, udev->slot_id);
....@@ -3897,6 +3906,8 @@
38973906 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
38983907 if (!command)
38993908 return -ENOMEM;
3909
+
3910
+ xhci_debugfs_remove_slot(xhci, slot_id);
39003911
39013912 spin_lock_irqsave(&xhci->lock, flags);
39023913 /* Don't disable the slot if the host controller is dead. */
....@@ -4014,6 +4025,14 @@
40144025 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
40154026 goto disable_slot;
40164027 }
4028
+
4029
+ ret = xhci_vendor_sync_dev_ctx(xhci, slot_id);
4030
+ if (ret) {
4031
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
4032
+ __func__, ret);
4033
+ goto disable_slot;
4034
+ }
4035
+
40174036 vdev = xhci->devs[slot_id];
40184037 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
40194038 trace_xhci_alloc_dev(slot_ctx);
....@@ -4127,6 +4146,7 @@
41274146 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
41284147 le32_to_cpu(slot_ctx->dev_info) >> 27);
41294148
4149
+ trace_xhci_address_ctrl_ctx(ctrl_ctx);
41304150 spin_lock_irqsave(&xhci->lock, flags);
41314151 trace_xhci_setup_device(virt_dev);
41324152 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
....@@ -4142,6 +4162,13 @@
41424162
41434163 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
41444164 wait_for_completion(command->completion);
4165
+
4166
+ ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
4167
+ if (ret) {
4168
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
4169
+ __func__, ret);
4170
+ goto out;
4171
+ }
41454172
41464173 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
41474174 * the SetAddress() "recovery interval" required by USB and aborting the
....@@ -4212,6 +4239,8 @@
42124239 /* Zero the input context control for later use */
42134240 ctrl_ctx->add_flags = 0;
42144241 ctrl_ctx->drop_flags = 0;
4242
+ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4243
+ udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
42154244
42164245 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
42174246 "Internal device address = %d",
....@@ -4225,10 +4254,11 @@
42254254 return ret;
42264255 }
42274256
4228
-static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
4257
+int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
42294258 {
42304259 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
42314260 }
4261
+EXPORT_SYMBOL_GPL(xhci_address_device);
42324262
42334263 static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
42344264 {
....@@ -4288,6 +4318,14 @@
42884318 return -ENOMEM;
42894319 }
42904320
4321
+ ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
4322
+ if (ret) {
4323
+ spin_unlock_irqrestore(&xhci->lock, flags);
4324
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
4325
+ __func__, ret);
4326
+ return ret;
4327
+ }
4328
+
42914329 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
42924330 spin_unlock_irqrestore(&xhci->lock, flags);
42934331
....@@ -4310,6 +4348,30 @@
43104348 spin_unlock_irqrestore(&xhci->lock, flags);
43114349 }
43124350 return ret;
4351
+}
4352
+
4353
+struct xhci_vendor_ops *xhci_vendor_get_ops(struct xhci_hcd *xhci)
4354
+{
4355
+ return xhci->vendor_ops;
4356
+}
4357
+EXPORT_SYMBOL_GPL(xhci_vendor_get_ops);
4358
+
4359
+int xhci_vendor_sync_dev_ctx(struct xhci_hcd *xhci, unsigned int slot_id)
4360
+{
4361
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
4362
+
4363
+ if (ops && ops->sync_dev_ctx)
4364
+ return ops->sync_dev_ctx(xhci, slot_id);
4365
+ return 0;
4366
+}
4367
+
4368
+bool xhci_vendor_usb_offload_skip_urb(struct xhci_hcd *xhci, struct urb *urb)
4369
+{
4370
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
4371
+
4372
+ if (ops && ops->usb_offload_skip_urb)
4373
+ return ops->usb_offload_skip_urb(xhci, urb);
4374
+ return false;
43134375 }
43144376
43154377 #ifdef CONFIG_PM
....@@ -4510,8 +4572,7 @@
45104572 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
45114573 int portnum = udev->portnum - 1;
45124574
4513
- if (hcd->speed >= HCD_USB3 || !xhci->sw_lpm_support ||
4514
- !udev->lpm_capable)
4575
+ if (hcd->speed >= HCD_USB3 || !udev->lpm_capable)
45154576 return 0;
45164577
45174578 /* we only support lpm for non-hub device connected to root hub yet */
....@@ -4615,7 +4676,7 @@
46154676 break;
46164677 }
46174678 /* Otherwise the calculation is the same as isoc eps */
4618
- /* fall through */
4679
+ fallthrough;
46194680 case USB_ENDPOINT_XFER_ISOC:
46204681 timeout_ns = xhci_service_interval_to_ns(desc);
46214682 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
....@@ -5052,6 +5113,15 @@
50525113 return -ENOMEM;
50535114 }
50545115
5116
+ ret = xhci_vendor_sync_dev_ctx(xhci, hdev->slot_id);
5117
+ if (ret) {
5118
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
5119
+ __func__, ret);
5120
+ xhci_free_command(xhci, config_cmd);
5121
+ spin_unlock_irqrestore(&xhci->lock, flags);
5122
+ return ret;
5123
+ }
5124
+
50555125 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
50565126 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
50575127 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
....@@ -5188,9 +5258,6 @@
51885258 /* xHCI private pointer was set in xhci_pci_probe for the second
51895259 * registered roothub.
51905260 */
5191
- if (xhci->quirks & XHCI_DIS_AUTOSUSPEND)
5192
- xhci->shared_hcd->self.root_hub->quirks |=
5193
- USB_QUIRK_AUTO_SUSPEND;
51945261 return 0;
51955262 }
51965263
....@@ -5230,7 +5297,7 @@
52305297
52315298 xhci_dbg(xhci, "Resetting HCD\n");
52325299 /* Reset the internal HC memory state and registers. */
5233
- retval = xhci_reset(xhci);
5300
+ retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
52345301 if (retval)
52355302 return retval;
52365303 xhci_dbg(xhci, "Reset complete\n");
....@@ -5277,134 +5344,25 @@
52775344 }
52785345 EXPORT_SYMBOL_GPL(xhci_gen_setup);
52795346
5280
-static phys_addr_t xhci_get_sec_event_ring_phys_addr(struct usb_hcd *hcd,
5281
- unsigned int intr_num, dma_addr_t *dma)
5282
-{
5283
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5284
- struct device *dev = hcd->self.sysdev;
5285
- struct sg_table sgt;
5286
- phys_addr_t pa;
5287
-
5288
- if (intr_num > xhci->max_interrupters) {
5289
- xhci_err(xhci, "intr num %d > max intrs %d\n", intr_num,
5290
- xhci->max_interrupters);
5291
- return 0;
5292
- }
5293
-
5294
- if (!(xhci->xhc_state & XHCI_STATE_HALTED) &&
5295
- xhci->sec_event_ring && xhci->sec_event_ring[intr_num]
5296
- && xhci->sec_event_ring[intr_num]->first_seg) {
5297
-
5298
- dma_get_sgtable(dev, &sgt,
5299
- xhci->sec_event_ring[intr_num]->first_seg->trbs,
5300
- xhci->sec_event_ring[intr_num]->first_seg->dma,
5301
- TRB_SEGMENT_SIZE);
5302
-
5303
- *dma = xhci->sec_event_ring[intr_num]->first_seg->dma;
5304
-
5305
- pa = page_to_phys(sg_page(sgt.sgl));
5306
- sg_free_table(&sgt);
5307
-
5308
- return pa;
5309
- }
5310
-
5311
- return 0;
5312
-}
5313
-
5314
-static phys_addr_t xhci_get_xfer_ring_phys_addr(struct usb_hcd *hcd,
5315
- struct usb_device *udev, struct usb_host_endpoint *ep, dma_addr_t *dma)
5316
-{
5317
- int ret;
5318
- unsigned int ep_index;
5319
- struct xhci_virt_device *virt_dev;
5320
- struct device *dev = hcd->self.sysdev;
5321
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5322
- struct sg_table sgt;
5323
- phys_addr_t pa;
5324
-
5325
- ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
5326
- if (ret <= 0) {
5327
- xhci_err(xhci, "%s: invalid args\n", __func__);
5328
- return 0;
5329
- }
5330
-
5331
- virt_dev = xhci->devs[udev->slot_id];
5332
- ep_index = xhci_get_endpoint_index(&ep->desc);
5333
-
5334
- if (virt_dev->eps[ep_index].ring &&
5335
- virt_dev->eps[ep_index].ring->first_seg) {
5336
-
5337
- dma_get_sgtable(dev, &sgt,
5338
- virt_dev->eps[ep_index].ring->first_seg->trbs,
5339
- virt_dev->eps[ep_index].ring->first_seg->dma,
5340
- TRB_SEGMENT_SIZE);
5341
-
5342
- *dma = virt_dev->eps[ep_index].ring->first_seg->dma;
5343
-
5344
- pa = page_to_phys(sg_page(sgt.sgl));
5345
- sg_free_table(&sgt);
5346
-
5347
- return pa;
5348
- }
5349
-
5350
- return 0;
5351
-}
5352
-
5353
-static int xhci_stop_endpoint(struct usb_hcd *hcd,
5354
- struct usb_device *udev, struct usb_host_endpoint *ep)
5347
+static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd,
5348
+ struct usb_host_endpoint *ep)
53555349 {
53565350 struct xhci_hcd *xhci;
5351
+ struct usb_device *udev;
5352
+ unsigned int slot_id;
53575353 unsigned int ep_index;
5358
- struct xhci_virt_device *virt_dev;
5359
- struct xhci_command *cmd;
53605354 unsigned long flags;
5361
- int ret = 0;
5362
-
5363
- if (!hcd || !udev || !ep)
5364
- return -EINVAL;
53655355
53665356 xhci = hcd_to_xhci(hcd);
5367
- cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
5368
- if (!cmd)
5369
- return -ENOMEM;
53705357
53715358 spin_lock_irqsave(&xhci->lock, flags);
5372
- virt_dev = xhci->devs[udev->slot_id];
5373
- if (!virt_dev) {
5374
- ret = -ENODEV;
5375
- goto err;
5376
- }
5377
-
5359
+ udev = (struct usb_device *)ep->hcpriv;
5360
+ slot_id = udev->slot_id;
53785361 ep_index = xhci_get_endpoint_index(&ep->desc);
5379
- if (virt_dev->eps[ep_index].ring &&
5380
- virt_dev->eps[ep_index].ring->dequeue) {
5381
- ret = xhci_queue_stop_endpoint(xhci, cmd, udev->slot_id,
5382
- ep_index, 0);
5383
- if (ret)
5384
- goto err;
53855362
5386
- xhci_ring_cmd_db(xhci);
5387
- spin_unlock_irqrestore(&xhci->lock, flags);
5388
-
5389
- /* Wait for stop endpoint command to finish */
5390
- wait_for_completion(cmd->completion);
5391
-
5392
- if (cmd->status == COMP_COMMAND_ABORTED ||
5393
- cmd->status == COMP_STOPPED) {
5394
- xhci_warn(xhci,
5395
- "stop endpoint command timeout for ep%d%s\n",
5396
- usb_endpoint_num(&ep->desc),
5397
- usb_endpoint_dir_in(&ep->desc) ? "in" : "out");
5398
- ret = -ETIME;
5399
- }
5400
- goto free_cmd;
5401
- }
5402
-
5403
-err:
5363
+ xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT;
5364
+ xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
54045365 spin_unlock_irqrestore(&xhci->lock, flags);
5405
-free_cmd:
5406
- xhci_free_command(xhci, cmd);
5407
- return ret;
54085366 }
54095367
54105368 static const struct hc_driver xhci_hc_driver = {
....@@ -5416,7 +5374,8 @@
54165374 * generic hardware linkage
54175375 */
54185376 .irq = xhci_irq,
5419
- .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED,
5377
+ .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED |
5378
+ HCD_BH,
54205379
54215380 /*
54225381 * basic lifecycle operations
....@@ -5429,6 +5388,7 @@
54295388 /*
54305389 * managing i/o requests and associated device resources
54315390 */
5391
+ .map_urb_for_dma = xhci_map_urb_for_dma,
54325392 .urb_enqueue = xhci_urb_enqueue,
54335393 .urb_dequeue = xhci_urb_dequeue,
54345394 .alloc_dev = xhci_alloc_dev,
....@@ -5437,6 +5397,7 @@
54375397 .free_streams = xhci_free_streams,
54385398 .add_endpoint = xhci_add_endpoint,
54395399 .drop_endpoint = xhci_drop_endpoint,
5400
+ .endpoint_disable = xhci_endpoint_disable,
54405401 .endpoint_reset = xhci_endpoint_reset,
54415402 .check_bandwidth = xhci_check_bandwidth,
54425403 .reset_bandwidth = xhci_reset_bandwidth,
....@@ -5467,11 +5428,7 @@
54675428 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
54685429 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
54695430 .find_raw_port_number = xhci_find_raw_port_number,
5470
- .sec_event_ring_setup = xhci_sec_event_ring_setup,
5471
- .sec_event_ring_cleanup = xhci_sec_event_ring_cleanup,
5472
- .get_sec_event_ring_phys_addr = xhci_get_sec_event_ring_phys_addr,
5473
- .get_xfer_ring_phys_addr = xhci_get_xfer_ring_phys_addr,
5474
- .stop_endpoint = xhci_stop_endpoint,
5431
+ .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete,
54755432 };
54765433
54775434 void xhci_init_driver(struct hc_driver *drv,
....@@ -5488,6 +5445,20 @@
54885445 drv->reset = over->reset;
54895446 if (over->start)
54905447 drv->start = over->start;
5448
+ if (over->add_endpoint)
5449
+ drv->add_endpoint = over->add_endpoint;
5450
+ if (over->drop_endpoint)
5451
+ drv->drop_endpoint = over->drop_endpoint;
5452
+ if (over->check_bandwidth)
5453
+ drv->check_bandwidth = over->check_bandwidth;
5454
+ if (over->reset_bandwidth)
5455
+ drv->reset_bandwidth = over->reset_bandwidth;
5456
+ if (over->address_device)
5457
+ drv->address_device = over->address_device;
5458
+ if (over->bus_suspend)
5459
+ drv->bus_suspend = over->bus_suspend;
5460
+ if (over->bus_resume)
5461
+ drv->bus_resume = over->bus_resume;
54915462 }
54925463 }
54935464 EXPORT_SYMBOL_GPL(xhci_init_driver);