forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/usb/host/xhci.c
....@@ -9,6 +9,9 @@
99 */
1010
1111 #include <linux/pci.h>
12
+#ifndef __GENKSYMS__ /* ANDROID: KABI CRC preservation hack */
13
+#include <linux/iommu.h>
14
+#endif
1215 #include <linux/iopoll.h>
1316 #include <linux/irq.h>
1417 #include <linux/log2.h>
....@@ -17,11 +20,9 @@
1720 #include <linux/slab.h>
1821 #include <linux/dmi.h>
1922 #include <linux/dma-mapping.h>
20
-#include <linux/usb/quirks.h>
2123
2224 #include "xhci.h"
2325 #include "xhci-trace.h"
24
-#include "xhci-mtk.h"
2526 #include "xhci-debugfs.h"
2627 #include "xhci-dbgcap.h"
2728
....@@ -67,7 +68,7 @@
6768 * handshake done). There are two failure modes: "usec" have passed (major
6869 * hardware flakeout), or the register reads as all-ones (hardware removed).
6970 */
70
-int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
71
+int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us)
7172 {
7273 u32 result;
7374 int ret;
....@@ -75,7 +76,7 @@
7576 ret = readl_poll_timeout_atomic(ptr, result,
7677 (result & mask) == done ||
7778 result == U32_MAX,
78
- 1, usec);
79
+ 1, timeout_us);
7980 if (result == U32_MAX) /* card removed */
8081 return -ENODEV;
8182
....@@ -164,11 +165,11 @@
164165 * Transactions will be terminated immediately, and operational registers
165166 * will be set to their defaults.
166167 */
167
-int xhci_reset(struct xhci_hcd *xhci)
168
+int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
168169 {
169170 u32 command;
170171 u32 state;
171
- int ret, i;
172
+ int ret;
172173
173174 state = readl(&xhci->op_regs->status);
174175
....@@ -197,8 +198,7 @@
197198 if (xhci->quirks & XHCI_INTEL_HOST)
198199 udelay(1000);
199200
200
- ret = xhci_handshake(&xhci->op_regs->command,
201
- CMD_RESET, 0, 10 * 1000 * 1000);
201
+ ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us);
202202 if (ret)
203203 return ret;
204204
....@@ -211,14 +211,14 @@
211211 * xHCI cannot write to any doorbells or operational registers other
212212 * than status until the "Controller Not Ready" flag is cleared.
213213 */
214
- ret = xhci_handshake(&xhci->op_regs->status,
215
- STS_CNR, 0, 10 * 1000 * 1000);
214
+ ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us);
216215
217
- for (i = 0; i < 2; i++) {
218
- xhci->bus_state[i].port_c_suspend = 0;
219
- xhci->bus_state[i].suspended_ports = 0;
220
- xhci->bus_state[i].resuming_ports = 0;
221
- }
216
+ xhci->usb2_rhub.bus_state.port_c_suspend = 0;
217
+ xhci->usb2_rhub.bus_state.suspended_ports = 0;
218
+ xhci->usb2_rhub.bus_state.resuming_ports = 0;
219
+ xhci->usb3_rhub.bus_state.port_c_suspend = 0;
220
+ xhci->usb3_rhub.bus_state.suspended_ports = 0;
221
+ xhci->usb3_rhub.bus_state.resuming_ports = 0;
222222
223223 return ret;
224224 }
....@@ -226,6 +226,7 @@
226226 static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
227227 {
228228 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
229
+ struct iommu_domain *domain;
229230 int err, i;
230231 u64 val;
231232 u32 intrs;
....@@ -244,7 +245,9 @@
244245 * an iommu. Doing anything when there is no iommu is definitely
245246 * unsafe...
246247 */
247
- if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !dev->iommu_group)
248
+ domain = iommu_get_domain_for_dev(dev);
249
+ if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain ||
250
+ domain->type == IOMMU_DOMAIN_IDENTITY)
248251 return;
249252
250253 xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
....@@ -696,6 +699,8 @@
696699 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
697700 "Finished xhci_run for USB2 roothub");
698701
702
+ set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags);
703
+
699704 xhci_dbc_init(xhci);
700705
701706 xhci_debugfs_init(xhci);
....@@ -732,7 +737,7 @@
732737 xhci->xhc_state |= XHCI_STATE_HALTED;
733738 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
734739 xhci_halt(xhci);
735
- xhci_reset(xhci);
740
+ xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
736741 spin_unlock_irq(&xhci->lock);
737742
738743 xhci_cleanup_msix(xhci);
....@@ -778,23 +783,31 @@
778783 {
779784 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
780785
781
- if (!hcd->rh_registered)
782
- return;
783
-
784
- /* Don't poll the roothubs on shutdown */
785
- clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
786
- del_timer_sync(&hcd->rh_timer);
787
- clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
788
- del_timer_sync(&xhci->shared_hcd->rh_timer);
789
-
790786 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
791787 usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
792788
789
+ /* Don't poll the roothubs after shutdown. */
790
+ xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
791
+ __func__, hcd->self.busnum);
792
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
793
+ del_timer_sync(&hcd->rh_timer);
794
+
795
+ if (xhci->shared_hcd) {
796
+ clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
797
+ del_timer_sync(&xhci->shared_hcd->rh_timer);
798
+ }
799
+
793800 spin_lock_irq(&xhci->lock);
794801 xhci_halt(xhci);
795
- /* Workaround for spurious wakeups at shutdown with HSW */
796
- if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
797
- xhci_reset(xhci);
802
+
803
+ /*
804
+ * Workaround for spurious wakeps at shutdown with HSW, and for boot
805
+ * firmware delay in ADL-P PCH if port are left in U3 at shutdown
806
+ */
807
+ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP ||
808
+ xhci->quirks & XHCI_RESET_TO_DEFAULT)
809
+ xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
810
+
798811 spin_unlock_irq(&xhci->lock);
799812
800813 xhci_cleanup_msix(xhci);
....@@ -896,37 +909,44 @@
896909 xhci_set_cmd_ring_deq(xhci);
897910 }
898911
899
-static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
912
+/*
913
+ * Disable port wake bits if do_wakeup is not set.
914
+ *
915
+ * Also clear a possible internal port wake state left hanging for ports that
916
+ * detected termination but never successfully enumerated (trained to 0U).
917
+ * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done
918
+ * at enumeration clears this wake, force one here as well for unconnected ports
919
+ */
920
+
921
+static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci,
922
+ struct xhci_hub *rhub,
923
+ bool do_wakeup)
900924 {
901
- struct xhci_port **ports;
902
- int port_index;
903925 unsigned long flags;
904
- u32 t1, t2;
926
+ u32 t1, t2, portsc;
927
+ int i;
905928
906929 spin_lock_irqsave(&xhci->lock, flags);
907930
908
- /* disable usb3 ports Wake bits */
909
- port_index = xhci->usb3_rhub.num_ports;
910
- ports = xhci->usb3_rhub.ports;
911
- while (port_index--) {
912
- t1 = readl(ports[port_index]->addr);
913
- t1 = xhci_port_state_to_neutral(t1);
914
- t2 = t1 & ~PORT_WAKE_BITS;
915
- if (t1 != t2)
916
- writel(t2, ports[port_index]->addr);
917
- }
931
+ for (i = 0; i < rhub->num_ports; i++) {
932
+ portsc = readl(rhub->ports[i]->addr);
933
+ t1 = xhci_port_state_to_neutral(portsc);
934
+ t2 = t1;
918935
919
- /* disable usb2 ports Wake bits */
920
- port_index = xhci->usb2_rhub.num_ports;
921
- ports = xhci->usb2_rhub.ports;
922
- while (port_index--) {
923
- t1 = readl(ports[port_index]->addr);
924
- t1 = xhci_port_state_to_neutral(t1);
925
- t2 = t1 & ~PORT_WAKE_BITS;
926
- if (t1 != t2)
927
- writel(t2, ports[port_index]->addr);
928
- }
936
+ /* clear wake bits if do_wake is not set */
937
+ if (!do_wakeup)
938
+ t2 &= ~PORT_WAKE_BITS;
929939
940
+ /* Don't touch csc bit if connected or connect change is set */
941
+ if (!(portsc & (PORT_CSC | PORT_CONNECT)))
942
+ t2 |= PORT_CSC;
943
+
944
+ if (t1 != t2) {
945
+ writel(t2, rhub->ports[i]->addr);
946
+ xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n",
947
+ rhub->hcd->self.busnum, i + 1, portsc, t2);
948
+ }
949
+ }
930950 spin_unlock_irqrestore(&xhci->lock, flags);
931951 }
932952
....@@ -965,26 +985,6 @@
965985 return false;
966986 }
967987
968
-static void xhci_warm_port_reset_quirk(struct xhci_hcd *xhci)
969
-{
970
- struct xhci_port **ports;
971
- int port_index;
972
- u32 portsc;
973
-
974
- port_index = xhci->usb3_rhub.num_ports;
975
- ports = xhci->usb3_rhub.ports;
976
- while (port_index--) {
977
- portsc = readl(ports[port_index]->addr);
978
- /* Do warm port reset if no USB3 device connected */
979
- if (!(portsc & PORT_CONNECT)) {
980
- portsc |= PORT_WR;
981
- writel(portsc, ports[port_index]->addr);
982
- /* flush write */
983
- readl(ports[port_index]->addr);
984
- }
985
- }
986
-}
987
-
988988 /*
989989 * Stop HC (not bus-specific)
990990 *
....@@ -1007,15 +1007,8 @@
10071007 return -EINVAL;
10081008
10091009 /* Clear root port wake on bits if wakeup not allowed. */
1010
- if (!do_wakeup)
1011
- xhci_disable_port_wake_on_bits(xhci);
1012
-
1013
- /*
1014
- * Do a warm reset for USB3 port to resets the USB3 link,
1015
- * forcing the link to enter the Rx.Detect state.
1016
- */
1017
- if (xhci->quirks & XHCI_WARM_RESET_ON_SUSPEND)
1018
- xhci_warm_port_reset_quirk(xhci);
1010
+ xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup);
1011
+ xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup);
10191012
10201013 if (!HCD_HW_ACCESSIBLE(hcd))
10211014 return 0;
....@@ -1023,7 +1016,8 @@
10231016 xhci_dbc_suspend(xhci);
10241017
10251018 /* Don't poll the roothubs on bus suspend. */
1026
- xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
1019
+ xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
1020
+ __func__, hcd->self.busnum);
10271021 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
10281022 del_timer_sync(&hcd->rh_timer);
10291023 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
....@@ -1128,9 +1122,9 @@
11281122 /* Wait a bit if either of the roothubs need to settle from the
11291123 * transition into bus suspend.
11301124 */
1131
- if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
1132
- time_before(jiffies,
1133
- xhci->bus_state[1].next_statechange))
1125
+
1126
+ if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) ||
1127
+ time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange))
11341128 msleep(100);
11351129
11361130 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
....@@ -1181,7 +1175,8 @@
11811175 /* re-initialize the HC on Restore Error, or Host Controller Error */
11821176 if (temp & (STS_SRE | STS_HCE)) {
11831177 reinit_xhc = true;
1184
- xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
1178
+ if (!xhci->broken_suspend)
1179
+ xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
11851180 }
11861181
11871182 if (reinit_xhc) {
....@@ -1199,7 +1194,7 @@
11991194 xhci_dbg(xhci, "Stop HCD\n");
12001195 xhci_halt(xhci);
12011196 xhci_zero_64b_regs(xhci);
1202
- retval = xhci_reset(xhci);
1197
+ retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
12031198 spin_unlock_irq(&xhci->lock);
12041199 if (retval)
12051200 return retval;
....@@ -1271,7 +1266,7 @@
12711266 * the first wake signalling failed, give it that chance.
12721267 */
12731268 pending_portevent = xhci_pending_portevent(xhci);
1274
- if (!pending_portevent) {
1269
+ if (!pending_portevent && !IS_ENABLED(CONFIG_ARCH_ROCKCHIP)) {
12751270 msleep(120);
12761271 pending_portevent = xhci_pending_portevent(xhci);
12771272 }
....@@ -1294,7 +1289,8 @@
12941289 usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
12951290
12961291 /* Re-enable port polling. */
1297
- xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1292
+ xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
1293
+ __func__, hcd->self.busnum);
12981294 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
12991295 usb_hcd_poll_rh_status(xhci->shared_hcd);
13001296 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
....@@ -1307,7 +1303,22 @@
13071303
13081304 /*-------------------------------------------------------------------------*/
13091305
1310
-/**
1306
+/*
1307
+ * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT),
1308
+ * we'll copy the actual data into the TRB address register. This is limited to
1309
+ * transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize
1310
+ * >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed.
1311
+ */
1312
+static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1313
+ gfp_t mem_flags)
1314
+{
1315
+ if (xhci_urb_suitable_for_idt(urb))
1316
+ return 0;
1317
+
1318
+ return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
1319
+}
1320
+
1321
+/*
13111322 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
13121323 * HCDs. Find the index for an endpoint given its descriptor. Use the return
13131324 * value to right shift 1 for the bitmask.
....@@ -1327,6 +1338,7 @@
13271338 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
13281339 return index;
13291340 }
1341
+EXPORT_SYMBOL_GPL(xhci_get_endpoint_index);
13301342
13311343 /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
13321344 * address from the XHCI endpoint index.
....@@ -1345,15 +1357,6 @@
13451357 static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
13461358 {
13471359 return 1 << (xhci_get_endpoint_index(desc) + 1);
1348
-}
1349
-
1350
-/* Find the flag for this endpoint (for use in the control context). Use the
1351
- * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1352
- * bit 1, etc.
1353
- */
1354
-static unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
1355
-{
1356
- return 1 << (ep_index + 1);
13571360 }
13581361
13591362 /* Compute the last valid endpoint context index. Basically, this is the
....@@ -1522,6 +1525,11 @@
15221525 return -ENODEV;
15231526 }
15241527
1528
+ if (xhci_vendor_usb_offload_skip_urb(xhci, urb)) {
1529
+ xhci_dbg(xhci, "skip urb for usb offload\n");
1530
+ return -EOPNOTSUPP;
1531
+ }
1532
+
15251533 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
15261534 num_tds = urb->number_of_packets;
15271535 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
....@@ -1532,8 +1540,7 @@
15321540 else
15331541 num_tds = 1;
15341542
1535
- urb_priv = kzalloc(sizeof(struct urb_priv) +
1536
- num_tds * sizeof(struct xhci_td), mem_flags);
1543
+ urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags);
15371544 if (!urb_priv)
15381545 return -ENOMEM;
15391546
....@@ -1723,7 +1730,12 @@
17231730
17241731 for (; i < urb_priv->num_tds; i++) {
17251732 td = &urb_priv->td[i];
1726
- list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1733
+ /* TD can already be on cancelled list if ep halted on it */
1734
+ if (list_empty(&td->cancelled_td_list)) {
1735
+ td->cancel_status = TD_DIRTY;
1736
+ list_add_tail(&td->cancelled_td_list,
1737
+ &ep->cancelled_td_list);
1738
+ }
17271739 }
17281740
17291741 /* Queue a stop endpoint command, but only if this is
....@@ -1769,8 +1781,8 @@
17691781 * disabled, so there's no need for mutual exclusion to protect
17701782 * the xhci->devs[slot_id] structure.
17711783 */
1772
-static int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1773
- struct usb_host_endpoint *ep)
1784
+int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1785
+ struct usb_host_endpoint *ep)
17741786 {
17751787 struct xhci_hcd *xhci;
17761788 struct xhci_container_ctx *in_ctx, *out_ctx;
....@@ -1830,9 +1842,6 @@
18301842
18311843 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
18321844
1833
- if (xhci->quirks & XHCI_MTK_HOST)
1834
- xhci_mtk_drop_ep_quirk(hcd, udev, ep);
1835
-
18361845 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
18371846 (unsigned int) ep->desc.bEndpointAddress,
18381847 udev->slot_id,
....@@ -1840,6 +1849,7 @@
18401849 (unsigned int) new_add_flags);
18411850 return 0;
18421851 }
1852
+EXPORT_SYMBOL_GPL(xhci_drop_endpoint);
18431853
18441854 /* Add an endpoint to a new possible bandwidth configuration for this device.
18451855 * Only one call to this function is allowed per endpoint before
....@@ -1854,13 +1864,14 @@
18541864 * configuration or alt setting is installed in the device, so there's no need
18551865 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
18561866 */
1857
-static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1858
- struct usb_host_endpoint *ep)
1867
+int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1868
+ struct usb_host_endpoint *ep)
18591869 {
18601870 struct xhci_hcd *xhci;
18611871 struct xhci_container_ctx *in_ctx;
18621872 unsigned int ep_index;
18631873 struct xhci_input_control_ctx *ctrl_ctx;
1874
+ struct xhci_ep_ctx *ep_ctx;
18641875 u32 added_ctxs;
18651876 u32 new_add_flags, new_drop_flags;
18661877 struct xhci_virt_device *virt_dev;
....@@ -1928,15 +1939,6 @@
19281939 return -ENOMEM;
19291940 }
19301941
1931
- if (xhci->quirks & XHCI_MTK_HOST) {
1932
- ret = xhci_mtk_add_ep_quirk(hcd, udev, ep);
1933
- if (ret < 0) {
1934
- xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring);
1935
- virt_dev->eps[ep_index].new_ring = NULL;
1936
- return ret;
1937
- }
1938
- }
1939
-
19401942 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
19411943 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
19421944
....@@ -1951,7 +1953,8 @@
19511953 /* Store the usb_device pointer for later use */
19521954 ep->hcpriv = udev;
19531955
1954
- xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index);
1956
+ ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1957
+ trace_xhci_add_endpoint(ep_ctx);
19551958
19561959 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
19571960 (unsigned int) ep->desc.bEndpointAddress,
....@@ -1960,6 +1963,7 @@
19601963 (unsigned int) new_add_flags);
19611964 return 0;
19621965 }
1966
+EXPORT_SYMBOL_GPL(xhci_add_endpoint);
19631967
19641968 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
19651969 {
....@@ -2825,6 +2829,8 @@
28252829 }
28262830
28272831 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
2832
+
2833
+ trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx);
28282834 trace_xhci_configure_endpoint(slot_ctx);
28292835
28302836 if (!ctx_change)
....@@ -2867,6 +2873,14 @@
28672873 xhci_finish_resource_reservation(xhci, ctrl_ctx);
28682874 spin_unlock_irqrestore(&xhci->lock, flags);
28692875 }
2876
+ if (ret)
2877
+ goto failed;
2878
+
2879
+ ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
2880
+ if (ret)
2881
+ xhci_warn(xhci, "sync device context failed, ret=%d", ret);
2882
+
2883
+failed:
28702884 return ret;
28712885 }
28722886
....@@ -2894,7 +2908,7 @@
28942908 * else should be touching the xhci->devs[slot_id] structure, so we
28952909 * don't need to take the xhci->lock for manipulating that.
28962910 */
2897
-static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2911
+int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
28982912 {
28992913 int i;
29002914 int ret = 0;
....@@ -2983,6 +2997,7 @@
29832997 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
29842998 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
29852999 virt_dev->eps[i].new_ring = NULL;
3000
+ xhci_debugfs_create_endpoint(xhci, virt_dev, i);
29863001 }
29873002 command_cleanup:
29883003 kfree(command->completion);
....@@ -2990,8 +3005,9 @@
29903005
29913006 return ret;
29923007 }
3008
+EXPORT_SYMBOL_GPL(xhci_check_bandwidth);
29933009
2994
-static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
3010
+void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
29953011 {
29963012 struct xhci_hcd *xhci;
29973013 struct xhci_virt_device *virt_dev;
....@@ -3008,12 +3024,17 @@
30083024 for (i = 0; i < 31; i++) {
30093025 if (virt_dev->eps[i].new_ring) {
30103026 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3011
- xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
3027
+ if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, i))
3028
+ xhci_vendor_free_transfer_ring(xhci, virt_dev, i);
3029
+ else
3030
+ xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
3031
+
30123032 virt_dev->eps[i].new_ring = NULL;
30133033 }
30143034 }
30153035 xhci_zero_in_ctx(xhci, virt_dev);
30163036 }
3037
+EXPORT_SYMBOL_GPL(xhci_reset_bandwidth);
30173038
30183039 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
30193040 struct xhci_container_ctx *in_ctx,
....@@ -3027,82 +3048,46 @@
30273048 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
30283049 }
30293050
3030
-static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
3031
- unsigned int slot_id, unsigned int ep_index,
3032
- struct xhci_dequeue_state *deq_state)
3051
+static void xhci_endpoint_disable(struct usb_hcd *hcd,
3052
+ struct usb_host_endpoint *host_ep)
30333053 {
3034
- struct xhci_input_control_ctx *ctrl_ctx;
3035
- struct xhci_container_ctx *in_ctx;
3036
- struct xhci_ep_ctx *ep_ctx;
3037
- u32 added_ctxs;
3038
- dma_addr_t addr;
3054
+ struct xhci_hcd *xhci;
3055
+ struct xhci_virt_device *vdev;
3056
+ struct xhci_virt_ep *ep;
3057
+ struct usb_device *udev;
3058
+ unsigned long flags;
3059
+ unsigned int ep_index;
30393060
3040
- in_ctx = xhci->devs[slot_id]->in_ctx;
3041
- ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
3042
- if (!ctrl_ctx) {
3043
- xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3044
- __func__);
3045
- return;
3061
+ xhci = hcd_to_xhci(hcd);
3062
+rescan:
3063
+ spin_lock_irqsave(&xhci->lock, flags);
3064
+
3065
+ udev = (struct usb_device *)host_ep->hcpriv;
3066
+ if (!udev || !udev->slot_id)
3067
+ goto done;
3068
+
3069
+ vdev = xhci->devs[udev->slot_id];
3070
+ if (!vdev)
3071
+ goto done;
3072
+
3073
+ ep_index = xhci_get_endpoint_index(&host_ep->desc);
3074
+ ep = &vdev->eps[ep_index];
3075
+ if (!ep)
3076
+ goto done;
3077
+
3078
+ /* wait for hub_tt_work to finish clearing hub TT */
3079
+ if (ep->ep_state & EP_CLEARING_TT) {
3080
+ spin_unlock_irqrestore(&xhci->lock, flags);
3081
+ schedule_timeout_uninterruptible(1);
3082
+ goto rescan;
30463083 }
30473084
3048
- xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
3049
- xhci->devs[slot_id]->out_ctx, ep_index);
3050
- ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
3051
- addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
3052
- deq_state->new_deq_ptr);
3053
- if (addr == 0) {
3054
- xhci_warn(xhci, "WARN Cannot submit config ep after "
3055
- "reset ep command\n");
3056
- xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
3057
- deq_state->new_deq_seg,
3058
- deq_state->new_deq_ptr);
3059
- return;
3060
- }
3061
- ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
3062
-
3063
- added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
3064
- xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
3065
- xhci->devs[slot_id]->out_ctx, ctrl_ctx,
3066
- added_ctxs, added_ctxs);
3067
-}
3068
-
3069
-void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
3070
- unsigned int stream_id, struct xhci_td *td)
3071
-{
3072
- struct xhci_dequeue_state deq_state;
3073
- struct usb_device *udev = td->urb->dev;
3074
-
3075
- xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
3076
- "Cleaning up stalled endpoint ring");
3077
- /* We need to move the HW's dequeue pointer past this TD,
3078
- * or it will attempt to resend it on the next doorbell ring.
3079
- */
3080
- xhci_find_new_dequeue_state(xhci, udev->slot_id,
3081
- ep_index, stream_id, td, &deq_state);
3082
-
3083
- if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
3084
- return;
3085
-
3086
- /* HW with the reset endpoint quirk will use the saved dequeue state to
3087
- * issue a configure endpoint command later.
3088
- */
3089
- if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
3090
- xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
3091
- "Queueing new dequeue state");
3092
- xhci_queue_new_dequeue_state(xhci, udev->slot_id,
3093
- ep_index, &deq_state);
3094
- } else {
3095
- /* Better hope no one uses the input context between now and the
3096
- * reset endpoint completion!
3097
- * XXX: No idea how this hardware will react when stream rings
3098
- * are enabled.
3099
- */
3100
- xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3101
- "Setting up input context for "
3102
- "configure endpoint command");
3103
- xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
3104
- ep_index, &deq_state);
3105
- }
3085
+ if (ep->ep_state)
3086
+ xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n",
3087
+ ep->ep_state);
3088
+done:
3089
+ host_ep->hcpriv = NULL;
3090
+ spin_unlock_irqrestore(&xhci->lock, flags);
31063091 }
31073092
31083093 /*
....@@ -3136,8 +3121,18 @@
31363121 return;
31373122 udev = (struct usb_device *) host_ep->hcpriv;
31383123 vdev = xhci->devs[udev->slot_id];
3124
+
3125
+ /*
3126
+ * vdev may be lost due to xHC restore error and re-initialization
3127
+ * during S3/S4 resume. A new vdev will be allocated later by
3128
+ * xhci_discover_or_reset_device()
3129
+ */
3130
+ if (!udev->slot_id || !vdev)
3131
+ return;
31393132 ep_index = xhci_get_endpoint_index(&host_ep->desc);
31403133 ep = &vdev->eps[ep_index];
3134
+ if (!ep)
3135
+ return;
31413136
31423137 /* Bail out if toggle is already being cleared by a endpoint reset */
31433138 spin_lock_irqsave(&xhci->lock, flags);
....@@ -3198,6 +3193,13 @@
31983193
31993194 wait_for_completion(stop_cmd->completion);
32003195
3196
+ err = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
3197
+ if (err) {
3198
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
3199
+ __func__, err);
3200
+ goto cleanup;
3201
+ }
3202
+
32013203 spin_lock_irqsave(&xhci->lock, flags);
32023204
32033205 /* config ep command clears toggle if add and drop ep flags are set */
....@@ -3228,6 +3230,11 @@
32283230 spin_unlock_irqrestore(&xhci->lock, flags);
32293231
32303232 wait_for_completion(cfg_cmd->completion);
3233
+
3234
+ err = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
3235
+ if (err)
3236
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
3237
+ __func__, err);
32313238
32323239 xhci_free_command(xhci, cfg_cmd);
32333240 cleanup:
....@@ -3530,6 +3537,10 @@
35303537 xhci_free_command(xhci, config_cmd);
35313538 spin_unlock_irqrestore(&xhci->lock, flags);
35323539
3540
+ for (i = 0; i < num_eps; i++) {
3541
+ ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3542
+ xhci_debugfs_create_stream_files(xhci, vdev, ep_index);
3543
+ }
35333544 /* Subtract 1 for stream 0, which drivers can't use */
35343545 return num_streams - 1;
35353546
....@@ -3770,6 +3781,13 @@
37703781 /* Wait for the Reset Device command to finish */
37713782 wait_for_completion(reset_device_cmd->completion);
37723783
3784
+ ret = xhci_vendor_sync_dev_ctx(xhci, slot_id);
3785
+ if (ret) {
3786
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
3787
+ __func__, ret);
3788
+ goto command_cleanup;
3789
+ }
3790
+
37733791 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
37743792 * unless we tried to reset a slot ID that wasn't enabled,
37753793 * or the device wasn't in the addressed or configured state.
....@@ -3855,6 +3873,7 @@
38553873 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
38563874 struct xhci_virt_device *virt_dev;
38573875 struct xhci_slot_ctx *slot_ctx;
3876
+ unsigned long flags;
38583877 int i, ret;
38593878
38603879 /*
....@@ -3881,10 +3900,13 @@
38813900 virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
38823901 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
38833902 }
3884
- xhci_debugfs_remove_slot(xhci, udev->slot_id);
38853903 virt_dev->udev = NULL;
38863904 xhci_disable_slot(xhci, udev->slot_id);
3905
+
3906
+ spin_lock_irqsave(&xhci->lock, flags);
38873907 xhci_free_virt_device(xhci, udev->slot_id);
3908
+ spin_unlock_irqrestore(&xhci->lock, flags);
3909
+
38883910 }
38893911
38903912 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
....@@ -3897,6 +3919,8 @@
38973919 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
38983920 if (!command)
38993921 return -ENOMEM;
3922
+
3923
+ xhci_debugfs_remove_slot(xhci, slot_id);
39003924
39013925 spin_lock_irqsave(&xhci->lock, flags);
39023926 /* Don't disable the slot if the host controller is dead. */
....@@ -4014,6 +4038,14 @@
40144038 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
40154039 goto disable_slot;
40164040 }
4041
+
4042
+ ret = xhci_vendor_sync_dev_ctx(xhci, slot_id);
4043
+ if (ret) {
4044
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
4045
+ __func__, ret);
4046
+ goto disable_slot;
4047
+ }
4048
+
40174049 vdev = xhci->devs[slot_id];
40184050 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
40194051 trace_xhci_alloc_dev(slot_ctx);
....@@ -4127,6 +4159,7 @@
41274159 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
41284160 le32_to_cpu(slot_ctx->dev_info) >> 27);
41294161
4162
+ trace_xhci_address_ctrl_ctx(ctrl_ctx);
41304163 spin_lock_irqsave(&xhci->lock, flags);
41314164 trace_xhci_setup_device(virt_dev);
41324165 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
....@@ -4142,6 +4175,13 @@
41424175
41434176 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
41444177 wait_for_completion(command->completion);
4178
+
4179
+ ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
4180
+ if (ret) {
4181
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
4182
+ __func__, ret);
4183
+ goto out;
4184
+ }
41454185
41464186 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
41474187 * the SetAddress() "recovery interval" required by USB and aborting the
....@@ -4212,6 +4252,8 @@
42124252 /* Zero the input context control for later use */
42134253 ctrl_ctx->add_flags = 0;
42144254 ctrl_ctx->drop_flags = 0;
4255
+ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4256
+ udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
42154257
42164258 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
42174259 "Internal device address = %d",
....@@ -4225,10 +4267,11 @@
42254267 return ret;
42264268 }
42274269
4228
-static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
4270
+int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
42294271 {
42304272 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
42314273 }
4274
+EXPORT_SYMBOL_GPL(xhci_address_device);
42324275
42334276 static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
42344277 {
....@@ -4288,6 +4331,14 @@
42884331 return -ENOMEM;
42894332 }
42904333
4334
+ ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
4335
+ if (ret) {
4336
+ spin_unlock_irqrestore(&xhci->lock, flags);
4337
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
4338
+ __func__, ret);
4339
+ return ret;
4340
+ }
4341
+
42914342 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
42924343 spin_unlock_irqrestore(&xhci->lock, flags);
42934344
....@@ -4310,6 +4361,30 @@
43104361 spin_unlock_irqrestore(&xhci->lock, flags);
43114362 }
43124363 return ret;
4364
+}
4365
+
4366
+struct xhci_vendor_ops *xhci_vendor_get_ops(struct xhci_hcd *xhci)
4367
+{
4368
+ return xhci->vendor_ops;
4369
+}
4370
+EXPORT_SYMBOL_GPL(xhci_vendor_get_ops);
4371
+
4372
+int xhci_vendor_sync_dev_ctx(struct xhci_hcd *xhci, unsigned int slot_id)
4373
+{
4374
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
4375
+
4376
+ if (ops && ops->sync_dev_ctx)
4377
+ return ops->sync_dev_ctx(xhci, slot_id);
4378
+ return 0;
4379
+}
4380
+
4381
+bool xhci_vendor_usb_offload_skip_urb(struct xhci_hcd *xhci, struct urb *urb)
4382
+{
4383
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
4384
+
4385
+ if (ops && ops->usb_offload_skip_urb)
4386
+ return ops->usb_offload_skip_urb(xhci, urb);
4387
+ return false;
43134388 }
43144389
43154390 #ifdef CONFIG_PM
....@@ -4510,8 +4585,7 @@
45104585 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
45114586 int portnum = udev->portnum - 1;
45124587
4513
- if (hcd->speed >= HCD_USB3 || !xhci->sw_lpm_support ||
4514
- !udev->lpm_capable)
4588
+ if (hcd->speed >= HCD_USB3 || !udev->lpm_capable)
45154589 return 0;
45164590
45174591 /* we only support lpm for non-hub device connected to root hub yet */
....@@ -4615,7 +4689,7 @@
46154689 break;
46164690 }
46174691 /* Otherwise the calculation is the same as isoc eps */
4618
- /* fall through */
4692
+ fallthrough;
46194693 case USB_ENDPOINT_XFER_ISOC:
46204694 timeout_ns = xhci_service_interval_to_ns(desc);
46214695 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
....@@ -5052,6 +5126,15 @@
50525126 return -ENOMEM;
50535127 }
50545128
5129
+ ret = xhci_vendor_sync_dev_ctx(xhci, hdev->slot_id);
5130
+ if (ret) {
5131
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
5132
+ __func__, ret);
5133
+ xhci_free_command(xhci, config_cmd);
5134
+ spin_unlock_irqrestore(&xhci->lock, flags);
5135
+ return ret;
5136
+ }
5137
+
50555138 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
50565139 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
50575140 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
....@@ -5188,9 +5271,6 @@
51885271 /* xHCI private pointer was set in xhci_pci_probe for the second
51895272 * registered roothub.
51905273 */
5191
- if (xhci->quirks & XHCI_DIS_AUTOSUSPEND)
5192
- xhci->shared_hcd->self.root_hub->quirks |=
5193
- USB_QUIRK_AUTO_SUSPEND;
51945274 return 0;
51955275 }
51965276
....@@ -5230,7 +5310,7 @@
52305310
52315311 xhci_dbg(xhci, "Resetting HCD\n");
52325312 /* Reset the internal HC memory state and registers. */
5233
- retval = xhci_reset(xhci);
5313
+ retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
52345314 if (retval)
52355315 return retval;
52365316 xhci_dbg(xhci, "Reset complete\n");
....@@ -5277,134 +5357,25 @@
52775357 }
52785358 EXPORT_SYMBOL_GPL(xhci_gen_setup);
52795359
5280
-static phys_addr_t xhci_get_sec_event_ring_phys_addr(struct usb_hcd *hcd,
5281
- unsigned int intr_num, dma_addr_t *dma)
5282
-{
5283
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5284
- struct device *dev = hcd->self.sysdev;
5285
- struct sg_table sgt;
5286
- phys_addr_t pa;
5287
-
5288
- if (intr_num > xhci->max_interrupters) {
5289
- xhci_err(xhci, "intr num %d > max intrs %d\n", intr_num,
5290
- xhci->max_interrupters);
5291
- return 0;
5292
- }
5293
-
5294
- if (!(xhci->xhc_state & XHCI_STATE_HALTED) &&
5295
- xhci->sec_event_ring && xhci->sec_event_ring[intr_num]
5296
- && xhci->sec_event_ring[intr_num]->first_seg) {
5297
-
5298
- dma_get_sgtable(dev, &sgt,
5299
- xhci->sec_event_ring[intr_num]->first_seg->trbs,
5300
- xhci->sec_event_ring[intr_num]->first_seg->dma,
5301
- TRB_SEGMENT_SIZE);
5302
-
5303
- *dma = xhci->sec_event_ring[intr_num]->first_seg->dma;
5304
-
5305
- pa = page_to_phys(sg_page(sgt.sgl));
5306
- sg_free_table(&sgt);
5307
-
5308
- return pa;
5309
- }
5310
-
5311
- return 0;
5312
-}
5313
-
5314
-static phys_addr_t xhci_get_xfer_ring_phys_addr(struct usb_hcd *hcd,
5315
- struct usb_device *udev, struct usb_host_endpoint *ep, dma_addr_t *dma)
5316
-{
5317
- int ret;
5318
- unsigned int ep_index;
5319
- struct xhci_virt_device *virt_dev;
5320
- struct device *dev = hcd->self.sysdev;
5321
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5322
- struct sg_table sgt;
5323
- phys_addr_t pa;
5324
-
5325
- ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
5326
- if (ret <= 0) {
5327
- xhci_err(xhci, "%s: invalid args\n", __func__);
5328
- return 0;
5329
- }
5330
-
5331
- virt_dev = xhci->devs[udev->slot_id];
5332
- ep_index = xhci_get_endpoint_index(&ep->desc);
5333
-
5334
- if (virt_dev->eps[ep_index].ring &&
5335
- virt_dev->eps[ep_index].ring->first_seg) {
5336
-
5337
- dma_get_sgtable(dev, &sgt,
5338
- virt_dev->eps[ep_index].ring->first_seg->trbs,
5339
- virt_dev->eps[ep_index].ring->first_seg->dma,
5340
- TRB_SEGMENT_SIZE);
5341
-
5342
- *dma = virt_dev->eps[ep_index].ring->first_seg->dma;
5343
-
5344
- pa = page_to_phys(sg_page(sgt.sgl));
5345
- sg_free_table(&sgt);
5346
-
5347
- return pa;
5348
- }
5349
-
5350
- return 0;
5351
-}
5352
-
5353
-static int xhci_stop_endpoint(struct usb_hcd *hcd,
5354
- struct usb_device *udev, struct usb_host_endpoint *ep)
5360
+static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd,
5361
+ struct usb_host_endpoint *ep)
53555362 {
53565363 struct xhci_hcd *xhci;
5364
+ struct usb_device *udev;
5365
+ unsigned int slot_id;
53575366 unsigned int ep_index;
5358
- struct xhci_virt_device *virt_dev;
5359
- struct xhci_command *cmd;
53605367 unsigned long flags;
5361
- int ret = 0;
5362
-
5363
- if (!hcd || !udev || !ep)
5364
- return -EINVAL;
53655368
53665369 xhci = hcd_to_xhci(hcd);
5367
- cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
5368
- if (!cmd)
5369
- return -ENOMEM;
53705370
53715371 spin_lock_irqsave(&xhci->lock, flags);
5372
- virt_dev = xhci->devs[udev->slot_id];
5373
- if (!virt_dev) {
5374
- ret = -ENODEV;
5375
- goto err;
5376
- }
5377
-
5372
+ udev = (struct usb_device *)ep->hcpriv;
5373
+ slot_id = udev->slot_id;
53785374 ep_index = xhci_get_endpoint_index(&ep->desc);
5379
- if (virt_dev->eps[ep_index].ring &&
5380
- virt_dev->eps[ep_index].ring->dequeue) {
5381
- ret = xhci_queue_stop_endpoint(xhci, cmd, udev->slot_id,
5382
- ep_index, 0);
5383
- if (ret)
5384
- goto err;
53855375
5386
- xhci_ring_cmd_db(xhci);
5387
- spin_unlock_irqrestore(&xhci->lock, flags);
5388
-
5389
- /* Wait for stop endpoint command to finish */
5390
- wait_for_completion(cmd->completion);
5391
-
5392
- if (cmd->status == COMP_COMMAND_ABORTED ||
5393
- cmd->status == COMP_STOPPED) {
5394
- xhci_warn(xhci,
5395
- "stop endpoint command timeout for ep%d%s\n",
5396
- usb_endpoint_num(&ep->desc),
5397
- usb_endpoint_dir_in(&ep->desc) ? "in" : "out");
5398
- ret = -ETIME;
5399
- }
5400
- goto free_cmd;
5401
- }
5402
-
5403
-err:
5376
+ xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT;
5377
+ xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
54045378 spin_unlock_irqrestore(&xhci->lock, flags);
5405
-free_cmd:
5406
- xhci_free_command(xhci, cmd);
5407
- return ret;
54085379 }
54095380
54105381 static const struct hc_driver xhci_hc_driver = {
....@@ -5416,7 +5387,8 @@
54165387 * generic hardware linkage
54175388 */
54185389 .irq = xhci_irq,
5419
- .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED,
5390
+ .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED |
5391
+ HCD_BH,
54205392
54215393 /*
54225394 * basic lifecycle operations
....@@ -5429,6 +5401,7 @@
54295401 /*
54305402 * managing i/o requests and associated device resources
54315403 */
5404
+ .map_urb_for_dma = xhci_map_urb_for_dma,
54325405 .urb_enqueue = xhci_urb_enqueue,
54335406 .urb_dequeue = xhci_urb_dequeue,
54345407 .alloc_dev = xhci_alloc_dev,
....@@ -5437,6 +5410,7 @@
54375410 .free_streams = xhci_free_streams,
54385411 .add_endpoint = xhci_add_endpoint,
54395412 .drop_endpoint = xhci_drop_endpoint,
5413
+ .endpoint_disable = xhci_endpoint_disable,
54405414 .endpoint_reset = xhci_endpoint_reset,
54415415 .check_bandwidth = xhci_check_bandwidth,
54425416 .reset_bandwidth = xhci_reset_bandwidth,
....@@ -5467,11 +5441,7 @@
54675441 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
54685442 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
54695443 .find_raw_port_number = xhci_find_raw_port_number,
5470
- .sec_event_ring_setup = xhci_sec_event_ring_setup,
5471
- .sec_event_ring_cleanup = xhci_sec_event_ring_cleanup,
5472
- .get_sec_event_ring_phys_addr = xhci_get_sec_event_ring_phys_addr,
5473
- .get_xfer_ring_phys_addr = xhci_get_xfer_ring_phys_addr,
5474
- .stop_endpoint = xhci_stop_endpoint,
5444
+ .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete,
54755445 };
54765446
54775447 void xhci_init_driver(struct hc_driver *drv,
....@@ -5488,6 +5458,20 @@
54885458 drv->reset = over->reset;
54895459 if (over->start)
54905460 drv->start = over->start;
5461
+ if (over->add_endpoint)
5462
+ drv->add_endpoint = over->add_endpoint;
5463
+ if (over->drop_endpoint)
5464
+ drv->drop_endpoint = over->drop_endpoint;
5465
+ if (over->check_bandwidth)
5466
+ drv->check_bandwidth = over->check_bandwidth;
5467
+ if (over->reset_bandwidth)
5468
+ drv->reset_bandwidth = over->reset_bandwidth;
5469
+ if (over->address_device)
5470
+ drv->address_device = over->address_device;
5471
+ if (over->bus_suspend)
5472
+ drv->bus_suspend = over->bus_suspend;
5473
+ if (over->bus_resume)
5474
+ drv->bus_resume = over->bus_resume;
54915475 }
54925476 }
54935477 EXPORT_SYMBOL_GPL(xhci_init_driver);