| .. | .. |
|---|
| 9 | 9 | */ |
|---|
| 10 | 10 | |
|---|
| 11 | 11 | #include <linux/pci.h> |
|---|
| 12 | +#ifndef __GENKSYMS__ /* ANDROID: KABI CRC preservation hack */ |
|---|
| 13 | +#include <linux/iommu.h> |
|---|
| 14 | +#endif |
|---|
| 12 | 15 | #include <linux/iopoll.h> |
|---|
| 13 | 16 | #include <linux/irq.h> |
|---|
| 14 | 17 | #include <linux/log2.h> |
|---|
| .. | .. |
|---|
| 17 | 20 | #include <linux/slab.h> |
|---|
| 18 | 21 | #include <linux/dmi.h> |
|---|
| 19 | 22 | #include <linux/dma-mapping.h> |
|---|
| 20 | | -#include <linux/usb/quirks.h> |
|---|
| 21 | 23 | |
|---|
| 22 | 24 | #include "xhci.h" |
|---|
| 23 | 25 | #include "xhci-trace.h" |
|---|
| 24 | | -#include "xhci-mtk.h" |
|---|
| 25 | 26 | #include "xhci-debugfs.h" |
|---|
| 26 | 27 | #include "xhci-dbgcap.h" |
|---|
| 27 | 28 | |
|---|
| .. | .. |
|---|
| 67 | 68 | * handshake done). There are two failure modes: "usec" have passed (major |
|---|
| 68 | 69 | * hardware flakeout), or the register reads as all-ones (hardware removed). |
|---|
| 69 | 70 | */ |
|---|
| 70 | | -int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec) |
|---|
| 71 | +int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us) |
|---|
| 71 | 72 | { |
|---|
| 72 | 73 | u32 result; |
|---|
| 73 | 74 | int ret; |
|---|
| .. | .. |
|---|
| 75 | 76 | ret = readl_poll_timeout_atomic(ptr, result, |
|---|
| 76 | 77 | (result & mask) == done || |
|---|
| 77 | 78 | result == U32_MAX, |
|---|
| 78 | | - 1, usec); |
|---|
| 79 | + 1, timeout_us); |
|---|
| 79 | 80 | if (result == U32_MAX) /* card removed */ |
|---|
| 80 | 81 | return -ENODEV; |
|---|
| 81 | 82 | |
|---|
| .. | .. |
|---|
| 164 | 165 | * Transactions will be terminated immediately, and operational registers |
|---|
| 165 | 166 | * will be set to their defaults. |
|---|
| 166 | 167 | */ |
|---|
| 167 | | -int xhci_reset(struct xhci_hcd *xhci) |
|---|
| 168 | +int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us) |
|---|
| 168 | 169 | { |
|---|
| 169 | 170 | u32 command; |
|---|
| 170 | 171 | u32 state; |
|---|
| 171 | | - int ret, i; |
|---|
| 172 | + int ret; |
|---|
| 172 | 173 | |
|---|
| 173 | 174 | state = readl(&xhci->op_regs->status); |
|---|
| 174 | 175 | |
|---|
| .. | .. |
|---|
| 197 | 198 | if (xhci->quirks & XHCI_INTEL_HOST) |
|---|
| 198 | 199 | udelay(1000); |
|---|
| 199 | 200 | |
|---|
| 200 | | - ret = xhci_handshake(&xhci->op_regs->command, |
|---|
| 201 | | - CMD_RESET, 0, 10 * 1000 * 1000); |
|---|
| 201 | + ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us); |
|---|
| 202 | 202 | if (ret) |
|---|
| 203 | 203 | return ret; |
|---|
| 204 | 204 | |
|---|
| .. | .. |
|---|
| 211 | 211 | * xHCI cannot write to any doorbells or operational registers other |
|---|
| 212 | 212 | * than status until the "Controller Not Ready" flag is cleared. |
|---|
| 213 | 213 | */ |
|---|
| 214 | | - ret = xhci_handshake(&xhci->op_regs->status, |
|---|
| 215 | | - STS_CNR, 0, 10 * 1000 * 1000); |
|---|
| 214 | + ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us); |
|---|
| 216 | 215 | |
|---|
| 217 | | - for (i = 0; i < 2; i++) { |
|---|
| 218 | | - xhci->bus_state[i].port_c_suspend = 0; |
|---|
| 219 | | - xhci->bus_state[i].suspended_ports = 0; |
|---|
| 220 | | - xhci->bus_state[i].resuming_ports = 0; |
|---|
| 221 | | - } |
|---|
| 216 | + xhci->usb2_rhub.bus_state.port_c_suspend = 0; |
|---|
| 217 | + xhci->usb2_rhub.bus_state.suspended_ports = 0; |
|---|
| 218 | + xhci->usb2_rhub.bus_state.resuming_ports = 0; |
|---|
| 219 | + xhci->usb3_rhub.bus_state.port_c_suspend = 0; |
|---|
| 220 | + xhci->usb3_rhub.bus_state.suspended_ports = 0; |
|---|
| 221 | + xhci->usb3_rhub.bus_state.resuming_ports = 0; |
|---|
| 222 | 222 | |
|---|
| 223 | 223 | return ret; |
|---|
| 224 | 224 | } |
|---|
| .. | .. |
|---|
| 226 | 226 | static void xhci_zero_64b_regs(struct xhci_hcd *xhci) |
|---|
| 227 | 227 | { |
|---|
| 228 | 228 | struct device *dev = xhci_to_hcd(xhci)->self.sysdev; |
|---|
| 229 | + struct iommu_domain *domain; |
|---|
| 229 | 230 | int err, i; |
|---|
| 230 | 231 | u64 val; |
|---|
| 231 | 232 | u32 intrs; |
|---|
| .. | .. |
|---|
| 244 | 245 | * an iommu. Doing anything when there is no iommu is definitely |
|---|
| 245 | 246 | * unsafe... |
|---|
| 246 | 247 | */ |
|---|
| 247 | | - if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !dev->iommu_group) |
|---|
| 248 | + domain = iommu_get_domain_for_dev(dev); |
|---|
| 249 | + if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain || |
|---|
| 250 | + domain->type == IOMMU_DOMAIN_IDENTITY) |
|---|
| 248 | 251 | return; |
|---|
| 249 | 252 | |
|---|
| 250 | 253 | xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n"); |
|---|
| .. | .. |
|---|
| 696 | 699 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
|---|
| 697 | 700 | "Finished xhci_run for USB2 roothub"); |
|---|
| 698 | 701 | |
|---|
| 702 | + set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags); |
|---|
| 703 | + |
|---|
| 699 | 704 | xhci_dbc_init(xhci); |
|---|
| 700 | 705 | |
|---|
| 701 | 706 | xhci_debugfs_init(xhci); |
|---|
| .. | .. |
|---|
| 732 | 737 | xhci->xhc_state |= XHCI_STATE_HALTED; |
|---|
| 733 | 738 | xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; |
|---|
| 734 | 739 | xhci_halt(xhci); |
|---|
| 735 | | - xhci_reset(xhci); |
|---|
| 740 | + xhci_reset(xhci, XHCI_RESET_SHORT_USEC); |
|---|
| 736 | 741 | spin_unlock_irq(&xhci->lock); |
|---|
| 737 | 742 | |
|---|
| 738 | 743 | xhci_cleanup_msix(xhci); |
|---|
| .. | .. |
|---|
| 778 | 783 | { |
|---|
| 779 | 784 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
|---|
| 780 | 785 | |
|---|
| 781 | | - if (!hcd->rh_registered) |
|---|
| 782 | | - return; |
|---|
| 783 | | - |
|---|
| 784 | | - /* Don't poll the roothubs on shutdown */ |
|---|
| 785 | | - clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); |
|---|
| 786 | | - del_timer_sync(&hcd->rh_timer); |
|---|
| 787 | | - clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); |
|---|
| 788 | | - del_timer_sync(&xhci->shared_hcd->rh_timer); |
|---|
| 789 | | - |
|---|
| 790 | 786 | if (xhci->quirks & XHCI_SPURIOUS_REBOOT) |
|---|
| 791 | 787 | usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev)); |
|---|
| 792 | 788 | |
|---|
| 789 | + /* Don't poll the roothubs after shutdown. */ |
|---|
| 790 | + xhci_dbg(xhci, "%s: stopping usb%d port polling.\n", |
|---|
| 791 | + __func__, hcd->self.busnum); |
|---|
| 792 | + clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); |
|---|
| 793 | + del_timer_sync(&hcd->rh_timer); |
|---|
| 794 | + |
|---|
| 795 | + if (xhci->shared_hcd) { |
|---|
| 796 | + clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); |
|---|
| 797 | + del_timer_sync(&xhci->shared_hcd->rh_timer); |
|---|
| 798 | + } |
|---|
| 799 | + |
|---|
| 793 | 800 | spin_lock_irq(&xhci->lock); |
|---|
| 794 | 801 | xhci_halt(xhci); |
|---|
| 795 | | - /* Workaround for spurious wakeups at shutdown with HSW */ |
|---|
| 796 | | - if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) |
|---|
| 797 | | - xhci_reset(xhci); |
|---|
| 802 | + |
|---|
| 803 | + /* |
|---|
| 804 | + * Workaround for spurious wakeps at shutdown with HSW, and for boot |
|---|
| 805 | + * firmware delay in ADL-P PCH if port are left in U3 at shutdown |
|---|
| 806 | + */ |
|---|
| 807 | + if (xhci->quirks & XHCI_SPURIOUS_WAKEUP || |
|---|
| 808 | + xhci->quirks & XHCI_RESET_TO_DEFAULT) |
|---|
| 809 | + xhci_reset(xhci, XHCI_RESET_SHORT_USEC); |
|---|
| 810 | + |
|---|
| 798 | 811 | spin_unlock_irq(&xhci->lock); |
|---|
| 799 | 812 | |
|---|
| 800 | 813 | xhci_cleanup_msix(xhci); |
|---|
| .. | .. |
|---|
| 896 | 909 | xhci_set_cmd_ring_deq(xhci); |
|---|
| 897 | 910 | } |
|---|
| 898 | 911 | |
|---|
| 899 | | -static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) |
|---|
| 912 | +/* |
|---|
| 913 | + * Disable port wake bits if do_wakeup is not set. |
|---|
| 914 | + * |
|---|
| 915 | + * Also clear a possible internal port wake state left hanging for ports that |
|---|
| 916 | + * detected termination but never successfully enumerated (trained to 0U). |
|---|
| 917 | + * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done |
|---|
| 918 | + * at enumeration clears this wake, force one here as well for unconnected ports |
|---|
| 919 | + */ |
|---|
| 920 | + |
|---|
| 921 | +static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci, |
|---|
| 922 | + struct xhci_hub *rhub, |
|---|
| 923 | + bool do_wakeup) |
|---|
| 900 | 924 | { |
|---|
| 901 | | - struct xhci_port **ports; |
|---|
| 902 | | - int port_index; |
|---|
| 903 | 925 | unsigned long flags; |
|---|
| 904 | | - u32 t1, t2; |
|---|
| 926 | + u32 t1, t2, portsc; |
|---|
| 927 | + int i; |
|---|
| 905 | 928 | |
|---|
| 906 | 929 | spin_lock_irqsave(&xhci->lock, flags); |
|---|
| 907 | 930 | |
|---|
| 908 | | - /* disable usb3 ports Wake bits */ |
|---|
| 909 | | - port_index = xhci->usb3_rhub.num_ports; |
|---|
| 910 | | - ports = xhci->usb3_rhub.ports; |
|---|
| 911 | | - while (port_index--) { |
|---|
| 912 | | - t1 = readl(ports[port_index]->addr); |
|---|
| 913 | | - t1 = xhci_port_state_to_neutral(t1); |
|---|
| 914 | | - t2 = t1 & ~PORT_WAKE_BITS; |
|---|
| 915 | | - if (t1 != t2) |
|---|
| 916 | | - writel(t2, ports[port_index]->addr); |
|---|
| 917 | | - } |
|---|
| 931 | + for (i = 0; i < rhub->num_ports; i++) { |
|---|
| 932 | + portsc = readl(rhub->ports[i]->addr); |
|---|
| 933 | + t1 = xhci_port_state_to_neutral(portsc); |
|---|
| 934 | + t2 = t1; |
|---|
| 918 | 935 | |
|---|
| 919 | | - /* disable usb2 ports Wake bits */ |
|---|
| 920 | | - port_index = xhci->usb2_rhub.num_ports; |
|---|
| 921 | | - ports = xhci->usb2_rhub.ports; |
|---|
| 922 | | - while (port_index--) { |
|---|
| 923 | | - t1 = readl(ports[port_index]->addr); |
|---|
| 924 | | - t1 = xhci_port_state_to_neutral(t1); |
|---|
| 925 | | - t2 = t1 & ~PORT_WAKE_BITS; |
|---|
| 926 | | - if (t1 != t2) |
|---|
| 927 | | - writel(t2, ports[port_index]->addr); |
|---|
| 928 | | - } |
|---|
| 936 | + /* clear wake bits if do_wake is not set */ |
|---|
| 937 | + if (!do_wakeup) |
|---|
| 938 | + t2 &= ~PORT_WAKE_BITS; |
|---|
| 929 | 939 | |
|---|
| 940 | + /* Don't touch csc bit if connected or connect change is set */ |
|---|
| 941 | + if (!(portsc & (PORT_CSC | PORT_CONNECT))) |
|---|
| 942 | + t2 |= PORT_CSC; |
|---|
| 943 | + |
|---|
| 944 | + if (t1 != t2) { |
|---|
| 945 | + writel(t2, rhub->ports[i]->addr); |
|---|
| 946 | + xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n", |
|---|
| 947 | + rhub->hcd->self.busnum, i + 1, portsc, t2); |
|---|
| 948 | + } |
|---|
| 949 | + } |
|---|
| 930 | 950 | spin_unlock_irqrestore(&xhci->lock, flags); |
|---|
| 931 | 951 | } |
|---|
| 932 | 952 | |
|---|
| .. | .. |
|---|
| 965 | 985 | return false; |
|---|
| 966 | 986 | } |
|---|
| 967 | 987 | |
|---|
| 968 | | -static void xhci_warm_port_reset_quirk(struct xhci_hcd *xhci) |
|---|
| 969 | | -{ |
|---|
| 970 | | - struct xhci_port **ports; |
|---|
| 971 | | - int port_index; |
|---|
| 972 | | - u32 portsc; |
|---|
| 973 | | - |
|---|
| 974 | | - port_index = xhci->usb3_rhub.num_ports; |
|---|
| 975 | | - ports = xhci->usb3_rhub.ports; |
|---|
| 976 | | - while (port_index--) { |
|---|
| 977 | | - portsc = readl(ports[port_index]->addr); |
|---|
| 978 | | - /* Do warm port reset if no USB3 device connected */ |
|---|
| 979 | | - if (!(portsc & PORT_CONNECT)) { |
|---|
| 980 | | - portsc |= PORT_WR; |
|---|
| 981 | | - writel(portsc, ports[port_index]->addr); |
|---|
| 982 | | - /* flush write */ |
|---|
| 983 | | - readl(ports[port_index]->addr); |
|---|
| 984 | | - } |
|---|
| 985 | | - } |
|---|
| 986 | | -} |
|---|
| 987 | | - |
|---|
| 988 | 988 | /* |
|---|
| 989 | 989 | * Stop HC (not bus-specific) |
|---|
| 990 | 990 | * |
|---|
| .. | .. |
|---|
| 1007 | 1007 | return -EINVAL; |
|---|
| 1008 | 1008 | |
|---|
| 1009 | 1009 | /* Clear root port wake on bits if wakeup not allowed. */ |
|---|
| 1010 | | - if (!do_wakeup) |
|---|
| 1011 | | - xhci_disable_port_wake_on_bits(xhci); |
|---|
| 1012 | | - |
|---|
| 1013 | | - /* |
|---|
| 1014 | | - * Do a warm reset for USB3 port to resets the USB3 link, |
|---|
| 1015 | | - * forcing the link to enter the Rx.Detect state. |
|---|
| 1016 | | - */ |
|---|
| 1017 | | - if (xhci->quirks & XHCI_WARM_RESET_ON_SUSPEND) |
|---|
| 1018 | | - xhci_warm_port_reset_quirk(xhci); |
|---|
| 1010 | + xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup); |
|---|
| 1011 | + xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup); |
|---|
| 1019 | 1012 | |
|---|
| 1020 | 1013 | if (!HCD_HW_ACCESSIBLE(hcd)) |
|---|
| 1021 | 1014 | return 0; |
|---|
| .. | .. |
|---|
| 1023 | 1016 | xhci_dbc_suspend(xhci); |
|---|
| 1024 | 1017 | |
|---|
| 1025 | 1018 | /* Don't poll the roothubs on bus suspend. */ |
|---|
| 1026 | | - xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); |
|---|
| 1019 | + xhci_dbg(xhci, "%s: stopping usb%d port polling.\n", |
|---|
| 1020 | + __func__, hcd->self.busnum); |
|---|
| 1027 | 1021 | clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); |
|---|
| 1028 | 1022 | del_timer_sync(&hcd->rh_timer); |
|---|
| 1029 | 1023 | clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); |
|---|
| .. | .. |
|---|
| 1128 | 1122 | /* Wait a bit if either of the roothubs need to settle from the |
|---|
| 1129 | 1123 | * transition into bus suspend. |
|---|
| 1130 | 1124 | */ |
|---|
| 1131 | | - if (time_before(jiffies, xhci->bus_state[0].next_statechange) || |
|---|
| 1132 | | - time_before(jiffies, |
|---|
| 1133 | | - xhci->bus_state[1].next_statechange)) |
|---|
| 1125 | + |
|---|
| 1126 | + if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) || |
|---|
| 1127 | + time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange)) |
|---|
| 1134 | 1128 | msleep(100); |
|---|
| 1135 | 1129 | |
|---|
| 1136 | 1130 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); |
|---|
| .. | .. |
|---|
| 1181 | 1175 | /* re-initialize the HC on Restore Error, or Host Controller Error */ |
|---|
| 1182 | 1176 | if (temp & (STS_SRE | STS_HCE)) { |
|---|
| 1183 | 1177 | reinit_xhc = true; |
|---|
| 1184 | | - xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp); |
|---|
| 1178 | + if (!xhci->broken_suspend) |
|---|
| 1179 | + xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp); |
|---|
| 1185 | 1180 | } |
|---|
| 1186 | 1181 | |
|---|
| 1187 | 1182 | if (reinit_xhc) { |
|---|
| .. | .. |
|---|
| 1199 | 1194 | xhci_dbg(xhci, "Stop HCD\n"); |
|---|
| 1200 | 1195 | xhci_halt(xhci); |
|---|
| 1201 | 1196 | xhci_zero_64b_regs(xhci); |
|---|
| 1202 | | - retval = xhci_reset(xhci); |
|---|
| 1197 | + retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); |
|---|
| 1203 | 1198 | spin_unlock_irq(&xhci->lock); |
|---|
| 1204 | 1199 | if (retval) |
|---|
| 1205 | 1200 | return retval; |
|---|
| .. | .. |
|---|
| 1271 | 1266 | * the first wake signalling failed, give it that chance. |
|---|
| 1272 | 1267 | */ |
|---|
| 1273 | 1268 | pending_portevent = xhci_pending_portevent(xhci); |
|---|
| 1274 | | - if (!pending_portevent) { |
|---|
| 1269 | + if (!pending_portevent && !IS_ENABLED(CONFIG_ARCH_ROCKCHIP)) { |
|---|
| 1275 | 1270 | msleep(120); |
|---|
| 1276 | 1271 | pending_portevent = xhci_pending_portevent(xhci); |
|---|
| 1277 | 1272 | } |
|---|
| .. | .. |
|---|
| 1294 | 1289 | usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller)); |
|---|
| 1295 | 1290 | |
|---|
| 1296 | 1291 | /* Re-enable port polling. */ |
|---|
| 1297 | | - xhci_dbg(xhci, "%s: starting port polling.\n", __func__); |
|---|
| 1292 | + xhci_dbg(xhci, "%s: starting usb%d port polling.\n", |
|---|
| 1293 | + __func__, hcd->self.busnum); |
|---|
| 1298 | 1294 | set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); |
|---|
| 1299 | 1295 | usb_hcd_poll_rh_status(xhci->shared_hcd); |
|---|
| 1300 | 1296 | set_bit(HCD_FLAG_POLL_RH, &hcd->flags); |
|---|
| .. | .. |
|---|
| 1307 | 1303 | |
|---|
| 1308 | 1304 | /*-------------------------------------------------------------------------*/ |
|---|
| 1309 | 1305 | |
|---|
| 1310 | | -/** |
|---|
| 1306 | +/* |
|---|
| 1307 | + * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT), |
|---|
| 1308 | + * we'll copy the actual data into the TRB address register. This is limited to |
|---|
| 1309 | + * transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize |
|---|
| 1310 | + * >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed. |
|---|
| 1311 | + */ |
|---|
| 1312 | +static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, |
|---|
| 1313 | + gfp_t mem_flags) |
|---|
| 1314 | +{ |
|---|
| 1315 | + if (xhci_urb_suitable_for_idt(urb)) |
|---|
| 1316 | + return 0; |
|---|
| 1317 | + |
|---|
| 1318 | + return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); |
|---|
| 1319 | +} |
|---|
| 1320 | + |
|---|
| 1321 | +/* |
|---|
| 1311 | 1322 | * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and |
|---|
| 1312 | 1323 | * HCDs. Find the index for an endpoint given its descriptor. Use the return |
|---|
| 1313 | 1324 | * value to right shift 1 for the bitmask. |
|---|
| .. | .. |
|---|
| 1327 | 1338 | (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; |
|---|
| 1328 | 1339 | return index; |
|---|
| 1329 | 1340 | } |
|---|
| 1341 | +EXPORT_SYMBOL_GPL(xhci_get_endpoint_index); |
|---|
| 1330 | 1342 | |
|---|
| 1331 | 1343 | /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint |
|---|
| 1332 | 1344 | * address from the XHCI endpoint index. |
|---|
| .. | .. |
|---|
| 1345 | 1357 | static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) |
|---|
| 1346 | 1358 | { |
|---|
| 1347 | 1359 | return 1 << (xhci_get_endpoint_index(desc) + 1); |
|---|
| 1348 | | -} |
|---|
| 1349 | | - |
|---|
| 1350 | | -/* Find the flag for this endpoint (for use in the control context). Use the |
|---|
| 1351 | | - * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is |
|---|
| 1352 | | - * bit 1, etc. |
|---|
| 1353 | | - */ |
|---|
| 1354 | | -static unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index) |
|---|
| 1355 | | -{ |
|---|
| 1356 | | - return 1 << (ep_index + 1); |
|---|
| 1357 | 1360 | } |
|---|
| 1358 | 1361 | |
|---|
| 1359 | 1362 | /* Compute the last valid endpoint context index. Basically, this is the |
|---|
| .. | .. |
|---|
| 1522 | 1525 | return -ENODEV; |
|---|
| 1523 | 1526 | } |
|---|
| 1524 | 1527 | |
|---|
| 1528 | + if (xhci_vendor_usb_offload_skip_urb(xhci, urb)) { |
|---|
| 1529 | + xhci_dbg(xhci, "skip urb for usb offload\n"); |
|---|
| 1530 | + return -EOPNOTSUPP; |
|---|
| 1531 | + } |
|---|
| 1532 | + |
|---|
| 1525 | 1533 | if (usb_endpoint_xfer_isoc(&urb->ep->desc)) |
|---|
| 1526 | 1534 | num_tds = urb->number_of_packets; |
|---|
| 1527 | 1535 | else if (usb_endpoint_is_bulk_out(&urb->ep->desc) && |
|---|
| .. | .. |
|---|
| 1532 | 1540 | else |
|---|
| 1533 | 1541 | num_tds = 1; |
|---|
| 1534 | 1542 | |
|---|
| 1535 | | - urb_priv = kzalloc(sizeof(struct urb_priv) + |
|---|
| 1536 | | - num_tds * sizeof(struct xhci_td), mem_flags); |
|---|
| 1543 | + urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags); |
|---|
| 1537 | 1544 | if (!urb_priv) |
|---|
| 1538 | 1545 | return -ENOMEM; |
|---|
| 1539 | 1546 | |
|---|
| .. | .. |
|---|
| 1723 | 1730 | |
|---|
| 1724 | 1731 | for (; i < urb_priv->num_tds; i++) { |
|---|
| 1725 | 1732 | td = &urb_priv->td[i]; |
|---|
| 1726 | | - list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); |
|---|
| 1733 | + /* TD can already be on cancelled list if ep halted on it */ |
|---|
| 1734 | + if (list_empty(&td->cancelled_td_list)) { |
|---|
| 1735 | + td->cancel_status = TD_DIRTY; |
|---|
| 1736 | + list_add_tail(&td->cancelled_td_list, |
|---|
| 1737 | + &ep->cancelled_td_list); |
|---|
| 1738 | + } |
|---|
| 1727 | 1739 | } |
|---|
| 1728 | 1740 | |
|---|
| 1729 | 1741 | /* Queue a stop endpoint command, but only if this is |
|---|
| .. | .. |
|---|
| 1769 | 1781 | * disabled, so there's no need for mutual exclusion to protect |
|---|
| 1770 | 1782 | * the xhci->devs[slot_id] structure. |
|---|
| 1771 | 1783 | */ |
|---|
| 1772 | | -static int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, |
|---|
| 1773 | | - struct usb_host_endpoint *ep) |
|---|
| 1784 | +int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, |
|---|
| 1785 | + struct usb_host_endpoint *ep) |
|---|
| 1774 | 1786 | { |
|---|
| 1775 | 1787 | struct xhci_hcd *xhci; |
|---|
| 1776 | 1788 | struct xhci_container_ctx *in_ctx, *out_ctx; |
|---|
| .. | .. |
|---|
| 1830 | 1842 | |
|---|
| 1831 | 1843 | xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); |
|---|
| 1832 | 1844 | |
|---|
| 1833 | | - if (xhci->quirks & XHCI_MTK_HOST) |
|---|
| 1834 | | - xhci_mtk_drop_ep_quirk(hcd, udev, ep); |
|---|
| 1835 | | - |
|---|
| 1836 | 1845 | xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", |
|---|
| 1837 | 1846 | (unsigned int) ep->desc.bEndpointAddress, |
|---|
| 1838 | 1847 | udev->slot_id, |
|---|
| .. | .. |
|---|
| 1840 | 1849 | (unsigned int) new_add_flags); |
|---|
| 1841 | 1850 | return 0; |
|---|
| 1842 | 1851 | } |
|---|
| 1852 | +EXPORT_SYMBOL_GPL(xhci_drop_endpoint); |
|---|
| 1843 | 1853 | |
|---|
| 1844 | 1854 | /* Add an endpoint to a new possible bandwidth configuration for this device. |
|---|
| 1845 | 1855 | * Only one call to this function is allowed per endpoint before |
|---|
| .. | .. |
|---|
| 1854 | 1864 | * configuration or alt setting is installed in the device, so there's no need |
|---|
| 1855 | 1865 | * for mutual exclusion to protect the xhci->devs[slot_id] structure. |
|---|
| 1856 | 1866 | */ |
|---|
| 1857 | | -static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, |
|---|
| 1858 | | - struct usb_host_endpoint *ep) |
|---|
| 1867 | +int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, |
|---|
| 1868 | + struct usb_host_endpoint *ep) |
|---|
| 1859 | 1869 | { |
|---|
| 1860 | 1870 | struct xhci_hcd *xhci; |
|---|
| 1861 | 1871 | struct xhci_container_ctx *in_ctx; |
|---|
| 1862 | 1872 | unsigned int ep_index; |
|---|
| 1863 | 1873 | struct xhci_input_control_ctx *ctrl_ctx; |
|---|
| 1874 | + struct xhci_ep_ctx *ep_ctx; |
|---|
| 1864 | 1875 | u32 added_ctxs; |
|---|
| 1865 | 1876 | u32 new_add_flags, new_drop_flags; |
|---|
| 1866 | 1877 | struct xhci_virt_device *virt_dev; |
|---|
| .. | .. |
|---|
| 1928 | 1939 | return -ENOMEM; |
|---|
| 1929 | 1940 | } |
|---|
| 1930 | 1941 | |
|---|
| 1931 | | - if (xhci->quirks & XHCI_MTK_HOST) { |
|---|
| 1932 | | - ret = xhci_mtk_add_ep_quirk(hcd, udev, ep); |
|---|
| 1933 | | - if (ret < 0) { |
|---|
| 1934 | | - xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring); |
|---|
| 1935 | | - virt_dev->eps[ep_index].new_ring = NULL; |
|---|
| 1936 | | - return ret; |
|---|
| 1937 | | - } |
|---|
| 1938 | | - } |
|---|
| 1939 | | - |
|---|
| 1940 | 1942 | ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); |
|---|
| 1941 | 1943 | new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); |
|---|
| 1942 | 1944 | |
|---|
| .. | .. |
|---|
| 1951 | 1953 | /* Store the usb_device pointer for later use */ |
|---|
| 1952 | 1954 | ep->hcpriv = udev; |
|---|
| 1953 | 1955 | |
|---|
| 1954 | | - xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index); |
|---|
| 1956 | + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); |
|---|
| 1957 | + trace_xhci_add_endpoint(ep_ctx); |
|---|
| 1955 | 1958 | |
|---|
| 1956 | 1959 | xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", |
|---|
| 1957 | 1960 | (unsigned int) ep->desc.bEndpointAddress, |
|---|
| .. | .. |
|---|
| 1960 | 1963 | (unsigned int) new_add_flags); |
|---|
| 1961 | 1964 | return 0; |
|---|
| 1962 | 1965 | } |
|---|
| 1966 | +EXPORT_SYMBOL_GPL(xhci_add_endpoint); |
|---|
| 1963 | 1967 | |
|---|
| 1964 | 1968 | static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) |
|---|
| 1965 | 1969 | { |
|---|
| .. | .. |
|---|
| 2825 | 2829 | } |
|---|
| 2826 | 2830 | |
|---|
| 2827 | 2831 | slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); |
|---|
| 2832 | + |
|---|
| 2833 | + trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx); |
|---|
| 2828 | 2834 | trace_xhci_configure_endpoint(slot_ctx); |
|---|
| 2829 | 2835 | |
|---|
| 2830 | 2836 | if (!ctx_change) |
|---|
| .. | .. |
|---|
| 2867 | 2873 | xhci_finish_resource_reservation(xhci, ctrl_ctx); |
|---|
| 2868 | 2874 | spin_unlock_irqrestore(&xhci->lock, flags); |
|---|
| 2869 | 2875 | } |
|---|
| 2876 | + if (ret) |
|---|
| 2877 | + goto failed; |
|---|
| 2878 | + |
|---|
| 2879 | + ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id); |
|---|
| 2880 | + if (ret) |
|---|
| 2881 | + xhci_warn(xhci, "sync device context failed, ret=%d", ret); |
|---|
| 2882 | + |
|---|
| 2883 | +failed: |
|---|
| 2870 | 2884 | return ret; |
|---|
| 2871 | 2885 | } |
|---|
| 2872 | 2886 | |
|---|
| .. | .. |
|---|
| 2894 | 2908 | * else should be touching the xhci->devs[slot_id] structure, so we |
|---|
| 2895 | 2909 | * don't need to take the xhci->lock for manipulating that. |
|---|
| 2896 | 2910 | */ |
|---|
| 2897 | | -static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
|---|
| 2911 | +int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
|---|
| 2898 | 2912 | { |
|---|
| 2899 | 2913 | int i; |
|---|
| 2900 | 2914 | int ret = 0; |
|---|
| .. | .. |
|---|
| 2983 | 2997 | xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); |
|---|
| 2984 | 2998 | virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; |
|---|
| 2985 | 2999 | virt_dev->eps[i].new_ring = NULL; |
|---|
| 3000 | + xhci_debugfs_create_endpoint(xhci, virt_dev, i); |
|---|
| 2986 | 3001 | } |
|---|
| 2987 | 3002 | command_cleanup: |
|---|
| 2988 | 3003 | kfree(command->completion); |
|---|
| .. | .. |
|---|
| 2990 | 3005 | |
|---|
| 2991 | 3006 | return ret; |
|---|
| 2992 | 3007 | } |
|---|
| 3008 | +EXPORT_SYMBOL_GPL(xhci_check_bandwidth); |
|---|
| 2993 | 3009 | |
|---|
| 2994 | | -static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
|---|
| 3010 | +void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
|---|
| 2995 | 3011 | { |
|---|
| 2996 | 3012 | struct xhci_hcd *xhci; |
|---|
| 2997 | 3013 | struct xhci_virt_device *virt_dev; |
|---|
| .. | .. |
|---|
| 3008 | 3024 | for (i = 0; i < 31; i++) { |
|---|
| 3009 | 3025 | if (virt_dev->eps[i].new_ring) { |
|---|
| 3010 | 3026 | xhci_debugfs_remove_endpoint(xhci, virt_dev, i); |
|---|
| 3011 | | - xhci_ring_free(xhci, virt_dev->eps[i].new_ring); |
|---|
| 3027 | + if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, i)) |
|---|
| 3028 | + xhci_vendor_free_transfer_ring(xhci, virt_dev, i); |
|---|
| 3029 | + else |
|---|
| 3030 | + xhci_ring_free(xhci, virt_dev->eps[i].new_ring); |
|---|
| 3031 | + |
|---|
| 3012 | 3032 | virt_dev->eps[i].new_ring = NULL; |
|---|
| 3013 | 3033 | } |
|---|
| 3014 | 3034 | } |
|---|
| 3015 | 3035 | xhci_zero_in_ctx(xhci, virt_dev); |
|---|
| 3016 | 3036 | } |
|---|
| 3037 | +EXPORT_SYMBOL_GPL(xhci_reset_bandwidth); |
|---|
| 3017 | 3038 | |
|---|
| 3018 | 3039 | static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, |
|---|
| 3019 | 3040 | struct xhci_container_ctx *in_ctx, |
|---|
| .. | .. |
|---|
| 3027 | 3048 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
|---|
| 3028 | 3049 | } |
|---|
| 3029 | 3050 | |
|---|
| 3030 | | -static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, |
|---|
| 3031 | | - unsigned int slot_id, unsigned int ep_index, |
|---|
| 3032 | | - struct xhci_dequeue_state *deq_state) |
|---|
| 3051 | +static void xhci_endpoint_disable(struct usb_hcd *hcd, |
|---|
| 3052 | + struct usb_host_endpoint *host_ep) |
|---|
| 3033 | 3053 | { |
|---|
| 3034 | | - struct xhci_input_control_ctx *ctrl_ctx; |
|---|
| 3035 | | - struct xhci_container_ctx *in_ctx; |
|---|
| 3036 | | - struct xhci_ep_ctx *ep_ctx; |
|---|
| 3037 | | - u32 added_ctxs; |
|---|
| 3038 | | - dma_addr_t addr; |
|---|
| 3054 | + struct xhci_hcd *xhci; |
|---|
| 3055 | + struct xhci_virt_device *vdev; |
|---|
| 3056 | + struct xhci_virt_ep *ep; |
|---|
| 3057 | + struct usb_device *udev; |
|---|
| 3058 | + unsigned long flags; |
|---|
| 3059 | + unsigned int ep_index; |
|---|
| 3039 | 3060 | |
|---|
| 3040 | | - in_ctx = xhci->devs[slot_id]->in_ctx; |
|---|
| 3041 | | - ctrl_ctx = xhci_get_input_control_ctx(in_ctx); |
|---|
| 3042 | | - if (!ctrl_ctx) { |
|---|
| 3043 | | - xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
|---|
| 3044 | | - __func__); |
|---|
| 3045 | | - return; |
|---|
| 3061 | + xhci = hcd_to_xhci(hcd); |
|---|
| 3062 | +rescan: |
|---|
| 3063 | + spin_lock_irqsave(&xhci->lock, flags); |
|---|
| 3064 | + |
|---|
| 3065 | + udev = (struct usb_device *)host_ep->hcpriv; |
|---|
| 3066 | + if (!udev || !udev->slot_id) |
|---|
| 3067 | + goto done; |
|---|
| 3068 | + |
|---|
| 3069 | + vdev = xhci->devs[udev->slot_id]; |
|---|
| 3070 | + if (!vdev) |
|---|
| 3071 | + goto done; |
|---|
| 3072 | + |
|---|
| 3073 | + ep_index = xhci_get_endpoint_index(&host_ep->desc); |
|---|
| 3074 | + ep = &vdev->eps[ep_index]; |
|---|
| 3075 | + if (!ep) |
|---|
| 3076 | + goto done; |
|---|
| 3077 | + |
|---|
| 3078 | + /* wait for hub_tt_work to finish clearing hub TT */ |
|---|
| 3079 | + if (ep->ep_state & EP_CLEARING_TT) { |
|---|
| 3080 | + spin_unlock_irqrestore(&xhci->lock, flags); |
|---|
| 3081 | + schedule_timeout_uninterruptible(1); |
|---|
| 3082 | + goto rescan; |
|---|
| 3046 | 3083 | } |
|---|
| 3047 | 3084 | |
|---|
| 3048 | | - xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, |
|---|
| 3049 | | - xhci->devs[slot_id]->out_ctx, ep_index); |
|---|
| 3050 | | - ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); |
|---|
| 3051 | | - addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, |
|---|
| 3052 | | - deq_state->new_deq_ptr); |
|---|
| 3053 | | - if (addr == 0) { |
|---|
| 3054 | | - xhci_warn(xhci, "WARN Cannot submit config ep after " |
|---|
| 3055 | | - "reset ep command\n"); |
|---|
| 3056 | | - xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", |
|---|
| 3057 | | - deq_state->new_deq_seg, |
|---|
| 3058 | | - deq_state->new_deq_ptr); |
|---|
| 3059 | | - return; |
|---|
| 3060 | | - } |
|---|
| 3061 | | - ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state); |
|---|
| 3062 | | - |
|---|
| 3063 | | - added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); |
|---|
| 3064 | | - xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, |
|---|
| 3065 | | - xhci->devs[slot_id]->out_ctx, ctrl_ctx, |
|---|
| 3066 | | - added_ctxs, added_ctxs); |
|---|
| 3067 | | -} |
|---|
| 3068 | | - |
|---|
| 3069 | | -void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, |
|---|
| 3070 | | - unsigned int stream_id, struct xhci_td *td) |
|---|
| 3071 | | -{ |
|---|
| 3072 | | - struct xhci_dequeue_state deq_state; |
|---|
| 3073 | | - struct usb_device *udev = td->urb->dev; |
|---|
| 3074 | | - |
|---|
| 3075 | | - xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, |
|---|
| 3076 | | - "Cleaning up stalled endpoint ring"); |
|---|
| 3077 | | - /* We need to move the HW's dequeue pointer past this TD, |
|---|
| 3078 | | - * or it will attempt to resend it on the next doorbell ring. |
|---|
| 3079 | | - */ |
|---|
| 3080 | | - xhci_find_new_dequeue_state(xhci, udev->slot_id, |
|---|
| 3081 | | - ep_index, stream_id, td, &deq_state); |
|---|
| 3082 | | - |
|---|
| 3083 | | - if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg) |
|---|
| 3084 | | - return; |
|---|
| 3085 | | - |
|---|
| 3086 | | - /* HW with the reset endpoint quirk will use the saved dequeue state to |
|---|
| 3087 | | - * issue a configure endpoint command later. |
|---|
| 3088 | | - */ |
|---|
| 3089 | | - if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { |
|---|
| 3090 | | - xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, |
|---|
| 3091 | | - "Queueing new dequeue state"); |
|---|
| 3092 | | - xhci_queue_new_dequeue_state(xhci, udev->slot_id, |
|---|
| 3093 | | - ep_index, &deq_state); |
|---|
| 3094 | | - } else { |
|---|
| 3095 | | - /* Better hope no one uses the input context between now and the |
|---|
| 3096 | | - * reset endpoint completion! |
|---|
| 3097 | | - * XXX: No idea how this hardware will react when stream rings |
|---|
| 3098 | | - * are enabled. |
|---|
| 3099 | | - */ |
|---|
| 3100 | | - xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
|---|
| 3101 | | - "Setting up input context for " |
|---|
| 3102 | | - "configure endpoint command"); |
|---|
| 3103 | | - xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, |
|---|
| 3104 | | - ep_index, &deq_state); |
|---|
| 3105 | | - } |
|---|
| 3085 | + if (ep->ep_state) |
|---|
| 3086 | + xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n", |
|---|
| 3087 | + ep->ep_state); |
|---|
| 3088 | +done: |
|---|
| 3089 | + host_ep->hcpriv = NULL; |
|---|
| 3090 | + spin_unlock_irqrestore(&xhci->lock, flags); |
|---|
| 3106 | 3091 | } |
|---|
| 3107 | 3092 | |
|---|
| 3108 | 3093 | /* |
|---|
| .. | .. |
|---|
| 3136 | 3121 | return; |
|---|
| 3137 | 3122 | udev = (struct usb_device *) host_ep->hcpriv; |
|---|
| 3138 | 3123 | vdev = xhci->devs[udev->slot_id]; |
|---|
| 3124 | + |
|---|
| 3125 | + /* |
|---|
| 3126 | + * vdev may be lost due to xHC restore error and re-initialization |
|---|
| 3127 | + * during S3/S4 resume. A new vdev will be allocated later by |
|---|
| 3128 | + * xhci_discover_or_reset_device() |
|---|
| 3129 | + */ |
|---|
| 3130 | + if (!udev->slot_id || !vdev) |
|---|
| 3131 | + return; |
|---|
| 3139 | 3132 | ep_index = xhci_get_endpoint_index(&host_ep->desc); |
|---|
| 3140 | 3133 | ep = &vdev->eps[ep_index]; |
|---|
| 3134 | + if (!ep) |
|---|
| 3135 | + return; |
|---|
| 3141 | 3136 | |
|---|
| 3142 | 3137 | /* Bail out if toggle is already being cleared by a endpoint reset */ |
|---|
| 3143 | 3138 | spin_lock_irqsave(&xhci->lock, flags); |
|---|
| .. | .. |
|---|
| 3198 | 3193 | |
|---|
| 3199 | 3194 | wait_for_completion(stop_cmd->completion); |
|---|
| 3200 | 3195 | |
|---|
| 3196 | + err = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id); |
|---|
| 3197 | + if (err) { |
|---|
| 3198 | + xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d", |
|---|
| 3199 | + __func__, err); |
|---|
| 3200 | + goto cleanup; |
|---|
| 3201 | + } |
|---|
| 3202 | + |
|---|
| 3201 | 3203 | spin_lock_irqsave(&xhci->lock, flags); |
|---|
| 3202 | 3204 | |
|---|
| 3203 | 3205 | /* config ep command clears toggle if add and drop ep flags are set */ |
|---|
| .. | .. |
|---|
| 3228 | 3230 | spin_unlock_irqrestore(&xhci->lock, flags); |
|---|
| 3229 | 3231 | |
|---|
| 3230 | 3232 | wait_for_completion(cfg_cmd->completion); |
|---|
| 3233 | + |
|---|
| 3234 | + err = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id); |
|---|
| 3235 | + if (err) |
|---|
| 3236 | + xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d", |
|---|
| 3237 | + __func__, err); |
|---|
| 3231 | 3238 | |
|---|
| 3232 | 3239 | xhci_free_command(xhci, cfg_cmd); |
|---|
| 3233 | 3240 | cleanup: |
|---|
| .. | .. |
|---|
| 3530 | 3537 | xhci_free_command(xhci, config_cmd); |
|---|
| 3531 | 3538 | spin_unlock_irqrestore(&xhci->lock, flags); |
|---|
| 3532 | 3539 | |
|---|
| 3540 | + for (i = 0; i < num_eps; i++) { |
|---|
| 3541 | + ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
|---|
| 3542 | + xhci_debugfs_create_stream_files(xhci, vdev, ep_index); |
|---|
| 3543 | + } |
|---|
| 3533 | 3544 | /* Subtract 1 for stream 0, which drivers can't use */ |
|---|
| 3534 | 3545 | return num_streams - 1; |
|---|
| 3535 | 3546 | |
|---|
| .. | .. |
|---|
| 3770 | 3781 | /* Wait for the Reset Device command to finish */ |
|---|
| 3771 | 3782 | wait_for_completion(reset_device_cmd->completion); |
|---|
| 3772 | 3783 | |
|---|
| 3784 | + ret = xhci_vendor_sync_dev_ctx(xhci, slot_id); |
|---|
| 3785 | + if (ret) { |
|---|
| 3786 | + xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d", |
|---|
| 3787 | + __func__, ret); |
|---|
| 3788 | + goto command_cleanup; |
|---|
| 3789 | + } |
|---|
| 3790 | + |
|---|
| 3773 | 3791 | /* The Reset Device command can't fail, according to the 0.95/0.96 spec, |
|---|
| 3774 | 3792 | * unless we tried to reset a slot ID that wasn't enabled, |
|---|
| 3775 | 3793 | * or the device wasn't in the addressed or configured state. |
|---|
| .. | .. |
|---|
| 3855 | 3873 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
|---|
| 3856 | 3874 | struct xhci_virt_device *virt_dev; |
|---|
| 3857 | 3875 | struct xhci_slot_ctx *slot_ctx; |
|---|
| 3876 | + unsigned long flags; |
|---|
| 3858 | 3877 | int i, ret; |
|---|
| 3859 | 3878 | |
|---|
| 3860 | 3879 | /* |
|---|
| .. | .. |
|---|
| 3881 | 3900 | virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING; |
|---|
| 3882 | 3901 | del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); |
|---|
| 3883 | 3902 | } |
|---|
| 3884 | | - xhci_debugfs_remove_slot(xhci, udev->slot_id); |
|---|
| 3885 | 3903 | virt_dev->udev = NULL; |
|---|
| 3886 | 3904 | xhci_disable_slot(xhci, udev->slot_id); |
|---|
| 3905 | + |
|---|
| 3906 | + spin_lock_irqsave(&xhci->lock, flags); |
|---|
| 3887 | 3907 | xhci_free_virt_device(xhci, udev->slot_id); |
|---|
| 3908 | + spin_unlock_irqrestore(&xhci->lock, flags); |
|---|
| 3909 | + |
|---|
| 3888 | 3910 | } |
|---|
| 3889 | 3911 | |
|---|
| 3890 | 3912 | int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) |
|---|
| .. | .. |
|---|
| 3897 | 3919 | command = xhci_alloc_command(xhci, true, GFP_KERNEL); |
|---|
| 3898 | 3920 | if (!command) |
|---|
| 3899 | 3921 | return -ENOMEM; |
|---|
| 3922 | + |
|---|
| 3923 | + xhci_debugfs_remove_slot(xhci, slot_id); |
|---|
| 3900 | 3924 | |
|---|
| 3901 | 3925 | spin_lock_irqsave(&xhci->lock, flags); |
|---|
| 3902 | 3926 | /* Don't disable the slot if the host controller is dead. */ |
|---|
| .. | .. |
|---|
| 4014 | 4038 | xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); |
|---|
| 4015 | 4039 | goto disable_slot; |
|---|
| 4016 | 4040 | } |
|---|
| 4041 | + |
|---|
| 4042 | + ret = xhci_vendor_sync_dev_ctx(xhci, slot_id); |
|---|
| 4043 | + if (ret) { |
|---|
| 4044 | + xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d", |
|---|
| 4045 | + __func__, ret); |
|---|
| 4046 | + goto disable_slot; |
|---|
| 4047 | + } |
|---|
| 4048 | + |
|---|
| 4017 | 4049 | vdev = xhci->devs[slot_id]; |
|---|
| 4018 | 4050 | slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); |
|---|
| 4019 | 4051 | trace_xhci_alloc_dev(slot_ctx); |
|---|
| .. | .. |
|---|
| 4127 | 4159 | trace_xhci_address_ctx(xhci, virt_dev->in_ctx, |
|---|
| 4128 | 4160 | le32_to_cpu(slot_ctx->dev_info) >> 27); |
|---|
| 4129 | 4161 | |
|---|
| 4162 | + trace_xhci_address_ctrl_ctx(ctrl_ctx); |
|---|
| 4130 | 4163 | spin_lock_irqsave(&xhci->lock, flags); |
|---|
| 4131 | 4164 | trace_xhci_setup_device(virt_dev); |
|---|
| 4132 | 4165 | ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma, |
|---|
| .. | .. |
|---|
| 4142 | 4175 | |
|---|
| 4143 | 4176 | /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ |
|---|
| 4144 | 4177 | wait_for_completion(command->completion); |
|---|
| 4178 | + |
|---|
| 4179 | + ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id); |
|---|
| 4180 | + if (ret) { |
|---|
| 4181 | + xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d", |
|---|
| 4182 | + __func__, ret); |
|---|
| 4183 | + goto out; |
|---|
| 4184 | + } |
|---|
| 4145 | 4185 | |
|---|
| 4146 | 4186 | /* FIXME: From section 4.3.4: "Software shall be responsible for timing |
|---|
| 4147 | 4187 | * the SetAddress() "recovery interval" required by USB and aborting the |
|---|
| .. | .. |
|---|
| 4212 | 4252 | /* Zero the input context control for later use */ |
|---|
| 4213 | 4253 | ctrl_ctx->add_flags = 0; |
|---|
| 4214 | 4254 | ctrl_ctx->drop_flags = 0; |
|---|
| 4255 | + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); |
|---|
| 4256 | + udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); |
|---|
| 4215 | 4257 | |
|---|
| 4216 | 4258 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, |
|---|
| 4217 | 4259 | "Internal device address = %d", |
|---|
| .. | .. |
|---|
| 4225 | 4267 | return ret; |
|---|
| 4226 | 4268 | } |
|---|
| 4227 | 4269 | |
|---|
| 4228 | | -static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) |
|---|
| 4270 | +int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) |
|---|
| 4229 | 4271 | { |
|---|
| 4230 | 4272 | return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS); |
|---|
| 4231 | 4273 | } |
|---|
| 4274 | +EXPORT_SYMBOL_GPL(xhci_address_device); |
|---|
| 4232 | 4275 | |
|---|
| 4233 | 4276 | static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev) |
|---|
| 4234 | 4277 | { |
|---|
| .. | .. |
|---|
| 4288 | 4331 | return -ENOMEM; |
|---|
| 4289 | 4332 | } |
|---|
| 4290 | 4333 | |
|---|
| 4334 | + ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id); |
|---|
| 4335 | + if (ret) { |
|---|
| 4336 | + spin_unlock_irqrestore(&xhci->lock, flags); |
|---|
| 4337 | + xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d", |
|---|
| 4338 | + __func__, ret); |
|---|
| 4339 | + return ret; |
|---|
| 4340 | + } |
|---|
| 4341 | + |
|---|
| 4291 | 4342 | xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx); |
|---|
| 4292 | 4343 | spin_unlock_irqrestore(&xhci->lock, flags); |
|---|
| 4293 | 4344 | |
|---|
| .. | .. |
|---|
| 4310 | 4361 | spin_unlock_irqrestore(&xhci->lock, flags); |
|---|
| 4311 | 4362 | } |
|---|
| 4312 | 4363 | return ret; |
|---|
| 4364 | +} |
|---|
| 4365 | + |
|---|
| 4366 | +struct xhci_vendor_ops *xhci_vendor_get_ops(struct xhci_hcd *xhci) |
|---|
| 4367 | +{ |
|---|
| 4368 | + return xhci->vendor_ops; |
|---|
| 4369 | +} |
|---|
| 4370 | +EXPORT_SYMBOL_GPL(xhci_vendor_get_ops); |
|---|
| 4371 | + |
|---|
| 4372 | +int xhci_vendor_sync_dev_ctx(struct xhci_hcd *xhci, unsigned int slot_id) |
|---|
| 4373 | +{ |
|---|
| 4374 | + struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); |
|---|
| 4375 | + |
|---|
| 4376 | + if (ops && ops->sync_dev_ctx) |
|---|
| 4377 | + return ops->sync_dev_ctx(xhci, slot_id); |
|---|
| 4378 | + return 0; |
|---|
| 4379 | +} |
|---|
| 4380 | + |
|---|
| 4381 | +bool xhci_vendor_usb_offload_skip_urb(struct xhci_hcd *xhci, struct urb *urb) |
|---|
| 4382 | +{ |
|---|
| 4383 | + struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); |
|---|
| 4384 | + |
|---|
| 4385 | + if (ops && ops->usb_offload_skip_urb) |
|---|
| 4386 | + return ops->usb_offload_skip_urb(xhci, urb); |
|---|
| 4387 | + return false; |
|---|
| 4313 | 4388 | } |
|---|
| 4314 | 4389 | |
|---|
| 4315 | 4390 | #ifdef CONFIG_PM |
|---|
| .. | .. |
|---|
| 4510 | 4585 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
|---|
| 4511 | 4586 | int portnum = udev->portnum - 1; |
|---|
| 4512 | 4587 | |
|---|
| 4513 | | - if (hcd->speed >= HCD_USB3 || !xhci->sw_lpm_support || |
|---|
| 4514 | | - !udev->lpm_capable) |
|---|
| 4588 | + if (hcd->speed >= HCD_USB3 || !udev->lpm_capable) |
|---|
| 4515 | 4589 | return 0; |
|---|
| 4516 | 4590 | |
|---|
| 4517 | 4591 | /* we only support lpm for non-hub device connected to root hub yet */ |
|---|
| .. | .. |
|---|
| 4615 | 4689 | break; |
|---|
| 4616 | 4690 | } |
|---|
| 4617 | 4691 | /* Otherwise the calculation is the same as isoc eps */ |
|---|
| 4618 | | - /* fall through */ |
|---|
| 4692 | + fallthrough; |
|---|
| 4619 | 4693 | case USB_ENDPOINT_XFER_ISOC: |
|---|
| 4620 | 4694 | timeout_ns = xhci_service_interval_to_ns(desc); |
|---|
| 4621 | 4695 | timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100); |
|---|
| .. | .. |
|---|
| 5052 | 5126 | return -ENOMEM; |
|---|
| 5053 | 5127 | } |
|---|
| 5054 | 5128 | |
|---|
| 5129 | + ret = xhci_vendor_sync_dev_ctx(xhci, hdev->slot_id); |
|---|
| 5130 | + if (ret) { |
|---|
| 5131 | + xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d", |
|---|
| 5132 | + __func__, ret); |
|---|
| 5133 | + xhci_free_command(xhci, config_cmd); |
|---|
| 5134 | + spin_unlock_irqrestore(&xhci->lock, flags); |
|---|
| 5135 | + return ret; |
|---|
| 5136 | + } |
|---|
| 5137 | + |
|---|
| 5055 | 5138 | xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); |
|---|
| 5056 | 5139 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
|---|
| 5057 | 5140 | slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); |
|---|
| .. | .. |
|---|
| 5188 | 5271 | /* xHCI private pointer was set in xhci_pci_probe for the second |
|---|
| 5189 | 5272 | * registered roothub. |
|---|
| 5190 | 5273 | */ |
|---|
| 5191 | | - if (xhci->quirks & XHCI_DIS_AUTOSUSPEND) |
|---|
| 5192 | | - xhci->shared_hcd->self.root_hub->quirks |= |
|---|
| 5193 | | - USB_QUIRK_AUTO_SUSPEND; |
|---|
| 5194 | 5274 | return 0; |
|---|
| 5195 | 5275 | } |
|---|
| 5196 | 5276 | |
|---|
| .. | .. |
|---|
| 5230 | 5310 | |
|---|
| 5231 | 5311 | xhci_dbg(xhci, "Resetting HCD\n"); |
|---|
| 5232 | 5312 | /* Reset the internal HC memory state and registers. */ |
|---|
| 5233 | | - retval = xhci_reset(xhci); |
|---|
| 5313 | + retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); |
|---|
| 5234 | 5314 | if (retval) |
|---|
| 5235 | 5315 | return retval; |
|---|
| 5236 | 5316 | xhci_dbg(xhci, "Reset complete\n"); |
|---|
| .. | .. |
|---|
| 5277 | 5357 | } |
|---|
| 5278 | 5358 | EXPORT_SYMBOL_GPL(xhci_gen_setup); |
|---|
| 5279 | 5359 | |
|---|
| 5280 | | -static phys_addr_t xhci_get_sec_event_ring_phys_addr(struct usb_hcd *hcd, |
|---|
| 5281 | | - unsigned int intr_num, dma_addr_t *dma) |
|---|
| 5282 | | -{ |
|---|
| 5283 | | - struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
|---|
| 5284 | | - struct device *dev = hcd->self.sysdev; |
|---|
| 5285 | | - struct sg_table sgt; |
|---|
| 5286 | | - phys_addr_t pa; |
|---|
| 5287 | | - |
|---|
| 5288 | | - if (intr_num > xhci->max_interrupters) { |
|---|
| 5289 | | - xhci_err(xhci, "intr num %d > max intrs %d\n", intr_num, |
|---|
| 5290 | | - xhci->max_interrupters); |
|---|
| 5291 | | - return 0; |
|---|
| 5292 | | - } |
|---|
| 5293 | | - |
|---|
| 5294 | | - if (!(xhci->xhc_state & XHCI_STATE_HALTED) && |
|---|
| 5295 | | - xhci->sec_event_ring && xhci->sec_event_ring[intr_num] |
|---|
| 5296 | | - && xhci->sec_event_ring[intr_num]->first_seg) { |
|---|
| 5297 | | - |
|---|
| 5298 | | - dma_get_sgtable(dev, &sgt, |
|---|
| 5299 | | - xhci->sec_event_ring[intr_num]->first_seg->trbs, |
|---|
| 5300 | | - xhci->sec_event_ring[intr_num]->first_seg->dma, |
|---|
| 5301 | | - TRB_SEGMENT_SIZE); |
|---|
| 5302 | | - |
|---|
| 5303 | | - *dma = xhci->sec_event_ring[intr_num]->first_seg->dma; |
|---|
| 5304 | | - |
|---|
| 5305 | | - pa = page_to_phys(sg_page(sgt.sgl)); |
|---|
| 5306 | | - sg_free_table(&sgt); |
|---|
| 5307 | | - |
|---|
| 5308 | | - return pa; |
|---|
| 5309 | | - } |
|---|
| 5310 | | - |
|---|
| 5311 | | - return 0; |
|---|
| 5312 | | -} |
|---|
| 5313 | | - |
|---|
| 5314 | | -static phys_addr_t xhci_get_xfer_ring_phys_addr(struct usb_hcd *hcd, |
|---|
| 5315 | | - struct usb_device *udev, struct usb_host_endpoint *ep, dma_addr_t *dma) |
|---|
| 5316 | | -{ |
|---|
| 5317 | | - int ret; |
|---|
| 5318 | | - unsigned int ep_index; |
|---|
| 5319 | | - struct xhci_virt_device *virt_dev; |
|---|
| 5320 | | - struct device *dev = hcd->self.sysdev; |
|---|
| 5321 | | - struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
|---|
| 5322 | | - struct sg_table sgt; |
|---|
| 5323 | | - phys_addr_t pa; |
|---|
| 5324 | | - |
|---|
| 5325 | | - ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); |
|---|
| 5326 | | - if (ret <= 0) { |
|---|
| 5327 | | - xhci_err(xhci, "%s: invalid args\n", __func__); |
|---|
| 5328 | | - return 0; |
|---|
| 5329 | | - } |
|---|
| 5330 | | - |
|---|
| 5331 | | - virt_dev = xhci->devs[udev->slot_id]; |
|---|
| 5332 | | - ep_index = xhci_get_endpoint_index(&ep->desc); |
|---|
| 5333 | | - |
|---|
| 5334 | | - if (virt_dev->eps[ep_index].ring && |
|---|
| 5335 | | - virt_dev->eps[ep_index].ring->first_seg) { |
|---|
| 5336 | | - |
|---|
| 5337 | | - dma_get_sgtable(dev, &sgt, |
|---|
| 5338 | | - virt_dev->eps[ep_index].ring->first_seg->trbs, |
|---|
| 5339 | | - virt_dev->eps[ep_index].ring->first_seg->dma, |
|---|
| 5340 | | - TRB_SEGMENT_SIZE); |
|---|
| 5341 | | - |
|---|
| 5342 | | - *dma = virt_dev->eps[ep_index].ring->first_seg->dma; |
|---|
| 5343 | | - |
|---|
| 5344 | | - pa = page_to_phys(sg_page(sgt.sgl)); |
|---|
| 5345 | | - sg_free_table(&sgt); |
|---|
| 5346 | | - |
|---|
| 5347 | | - return pa; |
|---|
| 5348 | | - } |
|---|
| 5349 | | - |
|---|
| 5350 | | - return 0; |
|---|
| 5351 | | -} |
|---|
| 5352 | | - |
|---|
| 5353 | | -static int xhci_stop_endpoint(struct usb_hcd *hcd, |
|---|
| 5354 | | - struct usb_device *udev, struct usb_host_endpoint *ep) |
|---|
| 5360 | +static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd, |
|---|
| 5361 | + struct usb_host_endpoint *ep) |
|---|
| 5355 | 5362 | { |
|---|
| 5356 | 5363 | struct xhci_hcd *xhci; |
|---|
| 5364 | + struct usb_device *udev; |
|---|
| 5365 | + unsigned int slot_id; |
|---|
| 5357 | 5366 | unsigned int ep_index; |
|---|
| 5358 | | - struct xhci_virt_device *virt_dev; |
|---|
| 5359 | | - struct xhci_command *cmd; |
|---|
| 5360 | 5367 | unsigned long flags; |
|---|
| 5361 | | - int ret = 0; |
|---|
| 5362 | | - |
|---|
| 5363 | | - if (!hcd || !udev || !ep) |
|---|
| 5364 | | - return -EINVAL; |
|---|
| 5365 | 5368 | |
|---|
| 5366 | 5369 | xhci = hcd_to_xhci(hcd); |
|---|
| 5367 | | - cmd = xhci_alloc_command(xhci, true, GFP_NOIO); |
|---|
| 5368 | | - if (!cmd) |
|---|
| 5369 | | - return -ENOMEM; |
|---|
| 5370 | 5370 | |
|---|
| 5371 | 5371 | spin_lock_irqsave(&xhci->lock, flags); |
|---|
| 5372 | | - virt_dev = xhci->devs[udev->slot_id]; |
|---|
| 5373 | | - if (!virt_dev) { |
|---|
| 5374 | | - ret = -ENODEV; |
|---|
| 5375 | | - goto err; |
|---|
| 5376 | | - } |
|---|
| 5377 | | - |
|---|
| 5372 | + udev = (struct usb_device *)ep->hcpriv; |
|---|
| 5373 | + slot_id = udev->slot_id; |
|---|
| 5378 | 5374 | ep_index = xhci_get_endpoint_index(&ep->desc); |
|---|
| 5379 | | - if (virt_dev->eps[ep_index].ring && |
|---|
| 5380 | | - virt_dev->eps[ep_index].ring->dequeue) { |
|---|
| 5381 | | - ret = xhci_queue_stop_endpoint(xhci, cmd, udev->slot_id, |
|---|
| 5382 | | - ep_index, 0); |
|---|
| 5383 | | - if (ret) |
|---|
| 5384 | | - goto err; |
|---|
| 5385 | 5375 | |
|---|
| 5386 | | - xhci_ring_cmd_db(xhci); |
|---|
| 5387 | | - spin_unlock_irqrestore(&xhci->lock, flags); |
|---|
| 5388 | | - |
|---|
| 5389 | | - /* Wait for stop endpoint command to finish */ |
|---|
| 5390 | | - wait_for_completion(cmd->completion); |
|---|
| 5391 | | - |
|---|
| 5392 | | - if (cmd->status == COMP_COMMAND_ABORTED || |
|---|
| 5393 | | - cmd->status == COMP_STOPPED) { |
|---|
| 5394 | | - xhci_warn(xhci, |
|---|
| 5395 | | - "stop endpoint command timeout for ep%d%s\n", |
|---|
| 5396 | | - usb_endpoint_num(&ep->desc), |
|---|
| 5397 | | - usb_endpoint_dir_in(&ep->desc) ? "in" : "out"); |
|---|
| 5398 | | - ret = -ETIME; |
|---|
| 5399 | | - } |
|---|
| 5400 | | - goto free_cmd; |
|---|
| 5401 | | - } |
|---|
| 5402 | | - |
|---|
| 5403 | | -err: |
|---|
| 5376 | + xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT; |
|---|
| 5377 | + xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index); |
|---|
| 5404 | 5378 | spin_unlock_irqrestore(&xhci->lock, flags); |
|---|
| 5405 | | -free_cmd: |
|---|
| 5406 | | - xhci_free_command(xhci, cmd); |
|---|
| 5407 | | - return ret; |
|---|
| 5408 | 5379 | } |
|---|
| 5409 | 5380 | |
|---|
| 5410 | 5381 | static const struct hc_driver xhci_hc_driver = { |
|---|
| .. | .. |
|---|
| 5416 | 5387 | * generic hardware linkage |
|---|
| 5417 | 5388 | */ |
|---|
| 5418 | 5389 | .irq = xhci_irq, |
|---|
| 5419 | | - .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED, |
|---|
| 5390 | + .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED | |
|---|
| 5391 | + HCD_BH, |
|---|
| 5420 | 5392 | |
|---|
| 5421 | 5393 | /* |
|---|
| 5422 | 5394 | * basic lifecycle operations |
|---|
| .. | .. |
|---|
| 5429 | 5401 | /* |
|---|
| 5430 | 5402 | * managing i/o requests and associated device resources |
|---|
| 5431 | 5403 | */ |
|---|
| 5404 | + .map_urb_for_dma = xhci_map_urb_for_dma, |
|---|
| 5432 | 5405 | .urb_enqueue = xhci_urb_enqueue, |
|---|
| 5433 | 5406 | .urb_dequeue = xhci_urb_dequeue, |
|---|
| 5434 | 5407 | .alloc_dev = xhci_alloc_dev, |
|---|
| .. | .. |
|---|
| 5437 | 5410 | .free_streams = xhci_free_streams, |
|---|
| 5438 | 5411 | .add_endpoint = xhci_add_endpoint, |
|---|
| 5439 | 5412 | .drop_endpoint = xhci_drop_endpoint, |
|---|
| 5413 | + .endpoint_disable = xhci_endpoint_disable, |
|---|
| 5440 | 5414 | .endpoint_reset = xhci_endpoint_reset, |
|---|
| 5441 | 5415 | .check_bandwidth = xhci_check_bandwidth, |
|---|
| 5442 | 5416 | .reset_bandwidth = xhci_reset_bandwidth, |
|---|
| .. | .. |
|---|
| 5467 | 5441 | .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout, |
|---|
| 5468 | 5442 | .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout, |
|---|
| 5469 | 5443 | .find_raw_port_number = xhci_find_raw_port_number, |
|---|
| 5470 | | - .sec_event_ring_setup = xhci_sec_event_ring_setup, |
|---|
| 5471 | | - .sec_event_ring_cleanup = xhci_sec_event_ring_cleanup, |
|---|
| 5472 | | - .get_sec_event_ring_phys_addr = xhci_get_sec_event_ring_phys_addr, |
|---|
| 5473 | | - .get_xfer_ring_phys_addr = xhci_get_xfer_ring_phys_addr, |
|---|
| 5474 | | - .stop_endpoint = xhci_stop_endpoint, |
|---|
| 5444 | + .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete, |
|---|
| 5475 | 5445 | }; |
|---|
| 5476 | 5446 | |
|---|
| 5477 | 5447 | void xhci_init_driver(struct hc_driver *drv, |
|---|
| .. | .. |
|---|
| 5488 | 5458 | drv->reset = over->reset; |
|---|
| 5489 | 5459 | if (over->start) |
|---|
| 5490 | 5460 | drv->start = over->start; |
|---|
| 5461 | + if (over->add_endpoint) |
|---|
| 5462 | + drv->add_endpoint = over->add_endpoint; |
|---|
| 5463 | + if (over->drop_endpoint) |
|---|
| 5464 | + drv->drop_endpoint = over->drop_endpoint; |
|---|
| 5465 | + if (over->check_bandwidth) |
|---|
| 5466 | + drv->check_bandwidth = over->check_bandwidth; |
|---|
| 5467 | + if (over->reset_bandwidth) |
|---|
| 5468 | + drv->reset_bandwidth = over->reset_bandwidth; |
|---|
| 5469 | + if (over->address_device) |
|---|
| 5470 | + drv->address_device = over->address_device; |
|---|
| 5471 | + if (over->bus_suspend) |
|---|
| 5472 | + drv->bus_suspend = over->bus_suspend; |
|---|
| 5473 | + if (over->bus_resume) |
|---|
| 5474 | + drv->bus_resume = over->bus_resume; |
|---|
| 5491 | 5475 | } |
|---|
| 5492 | 5476 | } |
|---|
| 5493 | 5477 | EXPORT_SYMBOL_GPL(xhci_init_driver); |
|---|