.. | .. |
---|
1 | | -/* SPDX-License-Identifier: GPL-2.0 */ |
---|
2 | 1 | /* |
---|
3 | 2 | * Linux DHD Bus Module for PCIE |
---|
4 | 3 | * |
---|
5 | | - * Copyright (C) 1999-2019, Broadcom Corporation |
---|
6 | | - * |
---|
| 4 | + * Portions of this code are copyright (c) 2022 Cypress Semiconductor Corporation |
---|
| 5 | + * |
---|
| 6 | + * Copyright (C) 1999-2017, Broadcom Corporation |
---|
| 7 | + * |
---|
7 | 8 | * Unless you and Broadcom execute a separate written software license |
---|
8 | 9 | * agreement governing use of this software, this software is licensed to you |
---|
9 | 10 | * under the terms of the GNU General Public License version 2 (the "GPL"), |
---|
10 | 11 | * available at http://www.broadcom.com/licenses/GPLv2.php, with the |
---|
11 | 12 | * following added to such license: |
---|
12 | | - * |
---|
| 13 | + * |
---|
13 | 14 | * As a special exception, the copyright holders of this software give you |
---|
14 | 15 | * permission to link this software with independent modules, and to copy and |
---|
15 | 16 | * distribute the resulting executable under terms of your choice, provided that |
---|
.. | .. |
---|
17 | 18 | * the license of that module. An independent module is a module which is not |
---|
18 | 19 | * derived from this software. The special exception does not apply to any |
---|
19 | 20 | * modifications of the software. |
---|
20 | | - * |
---|
| 21 | + * |
---|
21 | 22 | * Notwithstanding the above, under no circumstances may you combine this |
---|
22 | 23 | * software in any way with any other Broadcom software provided under a license |
---|
23 | 24 | * other than the GPL, without Broadcom's express prior written consent. |
---|
.. | .. |
---|
25 | 26 | * |
---|
26 | 27 | * <<Broadcom-WL-IPTag/Open:>> |
---|
27 | 28 | * |
---|
28 | | - * $Id: dhd_pcie_linux.c 608659 2015-12-29 01:18:33Z $ |
---|
| 29 | + * $Id: dhd_pcie_linux.c 701741 2017-05-26 08:18:08Z $ |
---|
29 | 30 | */ |
---|
30 | | - |
---|
31 | 31 | |
---|
32 | 32 | /* include files */ |
---|
33 | 33 | #include <typedefs.h> |
---|
.. | .. |
---|
52 | 52 | #include <pcicfg.h> |
---|
53 | 53 | #include <dhd_pcie.h> |
---|
54 | 54 | #include <dhd_linux.h> |
---|
| 55 | +#ifdef OEM_ANDROID |
---|
55 | 56 | #ifdef CONFIG_ARCH_MSM |
---|
56 | | -#if defined(CONFIG_ARCH_MSM8994) || defined(CONFIG_ARCH_MSM8996) |
---|
| 57 | +#if defined(CONFIG_PCI_MSM) || defined(CONFIG_ARCH_MSM8996) |
---|
57 | 58 | #include <linux/msm_pcie.h> |
---|
58 | 59 | #else |
---|
59 | 60 | #include <mach/msm_pcie.h> |
---|
60 | | -#endif /* CONFIG_ARCH_MSM8994 */ |
---|
| 61 | +#endif /* CONFIG_PCI_MSM */ |
---|
61 | 62 | #endif /* CONFIG_ARCH_MSM */ |
---|
| 63 | +#endif /* OEM_ANDROID */ |
---|
| 64 | + |
---|
| 65 | +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM |
---|
| 66 | +#include <linux/pm_runtime.h> |
---|
| 67 | +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ |
---|
| 68 | + |
---|
| 69 | +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM |
---|
| 70 | +#ifndef AUTO_SUSPEND_TIMEOUT |
---|
| 71 | +#define AUTO_SUSPEND_TIMEOUT 1000 |
---|
| 72 | +#endif /* AUTO_SUSPEND_TIMEOUT */ |
---|
| 73 | +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ |
---|
| 74 | + |
---|
| 75 | +#include <linux/irq.h> |
---|
| 76 | +#ifdef USE_SMMU_ARCH_MSM |
---|
| 77 | +#include <asm/dma-iommu.h> |
---|
| 78 | +#include <linux/iommu.h> |
---|
| 79 | +#include <linux/of.h> |
---|
| 80 | +#include <linux/platform_device.h> |
---|
| 81 | +#endif /* USE_SMMU_ARCH_MSM */ |
---|
62 | 82 | |
---|
63 | 83 | #define PCI_CFG_RETRY 10 |
---|
64 | 84 | #define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */ |
---|
65 | 85 | #define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */ |
---|
66 | 86 | |
---|
67 | | -#define OSL_PKTTAG_CLEAR(p) \ |
---|
68 | | -do { \ |
---|
69 | | - struct sk_buff *s = (struct sk_buff *)(p); \ |
---|
70 | | - ASSERT(OSL_PKTTAG_SZ == 32); \ |
---|
71 | | - *(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \ |
---|
72 | | - *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \ |
---|
73 | | - *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \ |
---|
74 | | - *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \ |
---|
75 | | -} while (0) |
---|
| 87 | +#ifdef FORCE_TPOWERON |
---|
| 88 | +extern uint32 tpoweron_scale; |
---|
| 89 | +#endif /* FORCE_TPOWERON */ |
---|
76 | 90 | |
---|
| 91 | +#if defined(CONFIG_ARCH_MSM) |
---|
| 92 | +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) |
---|
| 93 | +#ifndef MSM_PCIE_CONFIG_NO_CFG_RESTORE |
---|
| 94 | +#define MSM_PCIE_CONFIG_NO_CFG_RESTORE 0 |
---|
| 95 | +#endif /* MSM_PCIE_CONFIG_NO_CFG_RESTORE */ |
---|
| 96 | +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) */ |
---|
| 97 | +#endif /* CONFIG_ARCH_MSM */ |
---|
77 | 98 | |
---|
78 | 99 | /* user defined data structures */ |
---|
79 | | - |
---|
80 | | -typedef struct dhd_pc_res { |
---|
81 | | - uint32 bar0_size; |
---|
82 | | - void* bar0_addr; |
---|
83 | | - uint32 bar1_size; |
---|
84 | | - void* bar1_addr; |
---|
85 | | -} pci_config_res, *pPci_config_res; |
---|
86 | 100 | |
---|
87 | 101 | typedef bool (*dhdpcie_cb_fn_t)(void *); |
---|
88 | 102 | |
---|
89 | 103 | typedef struct dhdpcie_info |
---|
90 | 104 | { |
---|
91 | 105 | dhd_bus_t *bus; |
---|
92 | | - osl_t *osh; |
---|
| 106 | + osl_t *osh; |
---|
93 | 107 | struct pci_dev *dev; /* pci device handle */ |
---|
94 | | - volatile char *regs; /* pci device memory va */ |
---|
95 | | - volatile char *tcm; /* pci device memory va */ |
---|
96 | | - uint32 tcm_size; /* pci device memory size */ |
---|
| 108 | + volatile char *regs; /* pci device memory va */ |
---|
| 109 | + volatile char *tcm; /* pci device memory va */ |
---|
| 110 | + uint32 bar1_size; /* pci device memory size */ |
---|
| 111 | + uint32 curr_bar1_win; /* current PCIEBar1Window setting */ |
---|
97 | 112 | struct pcos_info *pcos_info; |
---|
98 | 113 | uint16 last_intrstatus; /* to cache intrstatus */ |
---|
99 | 114 | int irq; |
---|
.. | .. |
---|
103 | 118 | #ifdef BCMPCIE_OOB_HOST_WAKE |
---|
104 | 119 | void *os_cxt; /* Pointer to per-OS private data */ |
---|
105 | 120 | #endif /* BCMPCIE_OOB_HOST_WAKE */ |
---|
| 121 | +#ifdef DHD_WAKE_STATUS |
---|
| 122 | + spinlock_t pcie_lock; |
---|
| 123 | + unsigned int total_wake_count; |
---|
| 124 | + int pkt_wake; |
---|
| 125 | + int wake_irq; |
---|
| 126 | +#endif /* DHD_WAKE_STATUS */ |
---|
| 127 | +#ifdef USE_SMMU_ARCH_MSM |
---|
| 128 | + void *smmu_cxt; |
---|
| 129 | +#endif /* USE_SMMU_ARCH_MSM */ |
---|
106 | 130 | } dhdpcie_info_t; |
---|
107 | | - |
---|
108 | 131 | |
---|
109 | 132 | struct pcos_info { |
---|
110 | 133 | dhdpcie_info_t *pc; |
---|
.. | .. |
---|
126 | 149 | spinlock_t oob_irq_spinlock; |
---|
127 | 150 | void *dev; /* handle to the underlying device */ |
---|
128 | 151 | } dhdpcie_os_info_t; |
---|
| 152 | +static irqreturn_t wlan_oob_irq(int irq, void *data); |
---|
| 153 | +#ifdef CUSTOMER_HW2 |
---|
| 154 | +extern struct brcm_pcie_wake brcm_pcie_wake; |
---|
| 155 | +#endif /* CUSTOMER_HW2 */ |
---|
129 | 156 | #endif /* BCMPCIE_OOB_HOST_WAKE */ |
---|
| 157 | + |
---|
| 158 | +#ifdef USE_SMMU_ARCH_MSM |
---|
| 159 | +typedef struct dhdpcie_smmu_info { |
---|
| 160 | + struct dma_iommu_mapping *smmu_mapping; |
---|
| 161 | + dma_addr_t smmu_iova_start; |
---|
| 162 | + size_t smmu_iova_len; |
---|
| 163 | +} dhdpcie_smmu_info_t; |
---|
| 164 | +#endif /* USE_SMMU_ARCH_MSM */ |
---|
130 | 165 | |
---|
131 | 166 | /* function declarations */ |
---|
132 | 167 | static int __devinit |
---|
.. | .. |
---|
137 | 172 | static irqreturn_t dhdpcie_isr(int irq, void *arg); |
---|
138 | 173 | /* OS Routine functions for PCI suspend/resume */ |
---|
139 | 174 | |
---|
140 | | -static int dhdpcie_pci_suspend(struct pci_dev *dev, pm_message_t state); |
---|
141 | | -static int dhdpcie_set_suspend_resume(struct pci_dev *dev, bool state); |
---|
142 | | -static int dhdpcie_pci_resume(struct pci_dev *dev); |
---|
| 175 | +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM |
---|
| 176 | +static int dhdpcie_set_suspend_resume(struct pci_dev *dev, bool state, bool byint); |
---|
| 177 | +#else |
---|
| 178 | +static int dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state); |
---|
| 179 | +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ |
---|
| 180 | +static int dhdpcie_resume_host_dev(dhd_bus_t *bus); |
---|
| 181 | +static int dhdpcie_suspend_host_dev(dhd_bus_t *bus); |
---|
143 | 182 | static int dhdpcie_resume_dev(struct pci_dev *dev); |
---|
144 | 183 | static int dhdpcie_suspend_dev(struct pci_dev *dev); |
---|
| 184 | +#ifdef DHD_PCIE_RUNTIMEPM |
---|
| 185 | +static int dhdpcie_pm_suspend(struct device *dev); |
---|
| 186 | +static int dhdpcie_pm_prepare(struct device *dev); |
---|
| 187 | +static int dhdpcie_pm_resume(struct device *dev); |
---|
| 188 | +static void dhdpcie_pm_complete(struct device *dev); |
---|
| 189 | +#else |
---|
| 190 | +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM |
---|
| 191 | +static int dhdpcie_pm_system_suspend_noirq(struct device * dev); |
---|
| 192 | +static int dhdpcie_pm_system_resume_noirq(struct device * dev); |
---|
| 193 | +#else |
---|
| 194 | +static int dhdpcie_pci_suspend(struct pci_dev *dev, pm_message_t state); |
---|
| 195 | +static int dhdpcie_pci_resume(struct pci_dev *dev); |
---|
| 196 | +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ |
---|
| 197 | +#endif /* DHD_PCIE_RUNTIMEPM */ |
---|
| 198 | + |
---|
| 199 | +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM |
---|
| 200 | +static int dhdpcie_pm_runtime_suspend(struct device * dev); |
---|
| 201 | +static int dhdpcie_pm_runtime_resume(struct device * dev); |
---|
| 202 | +static int dhdpcie_pm_system_suspend_noirq(struct device * dev); |
---|
| 203 | +static int dhdpcie_pm_system_resume_noirq(struct device * dev); |
---|
| 204 | +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ |
---|
| 205 | + |
---|
| 206 | +static void dhdpcie_config_save_restore_coherent(dhd_bus_t *bus, bool state); |
---|
| 207 | + |
---|
| 208 | +uint32 |
---|
| 209 | +dhdpcie_access_cap(struct pci_dev *pdev, int cap, uint offset, bool is_ext, bool is_write, |
---|
| 210 | + uint32 writeval); |
---|
| 211 | + |
---|
145 | 212 | static struct pci_device_id dhdpcie_pci_devid[] __devinitdata = { |
---|
146 | | - { vendor: 0x14e4, |
---|
| 213 | + { |
---|
| 214 | + vendor: VENDOR_CYPRESS, |
---|
147 | 215 | device: PCI_ANY_ID, |
---|
148 | 216 | subvendor: PCI_ANY_ID, |
---|
149 | 217 | subdevice: PCI_ANY_ID, |
---|
.. | .. |
---|
151 | 219 | class_mask: 0xffff00, |
---|
152 | 220 | driver_data: 0, |
---|
153 | 221 | }, |
---|
154 | | - { 0, } |
---|
| 222 | + { |
---|
| 223 | + vendor: VENDOR_BROADCOM, |
---|
| 224 | + device: PCI_ANY_ID, |
---|
| 225 | + subvendor: PCI_ANY_ID, |
---|
| 226 | + subdevice: PCI_ANY_ID, |
---|
| 227 | + class: PCI_CLASS_NETWORK_OTHER << 8, |
---|
| 228 | + class_mask: 0xffff00, |
---|
| 229 | + driver_data: 0, |
---|
| 230 | + }, |
---|
| 231 | + { 0, 0, 0, 0, 0, 0, 0} |
---|
155 | 232 | }; |
---|
156 | 233 | MODULE_DEVICE_TABLE(pci, dhdpcie_pci_devid); |
---|
157 | 234 | |
---|
| 235 | +/* Power Management Hooks */ |
---|
| 236 | +#ifdef DHD_PCIE_RUNTIMEPM |
---|
| 237 | +static const struct dev_pm_ops dhd_pcie_pm_ops = { |
---|
| 238 | + .prepare = dhdpcie_pm_prepare, |
---|
| 239 | + .suspend = dhdpcie_pm_suspend, |
---|
| 240 | + .resume = dhdpcie_pm_resume, |
---|
| 241 | + .complete = dhdpcie_pm_complete, |
---|
| 242 | +}; |
---|
| 243 | +#endif /* DHD_PCIE_RUNTIMEPM */ |
---|
| 244 | +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM |
---|
| 245 | +static const struct dev_pm_ops dhdpcie_pm_ops = { |
---|
| 246 | + SET_RUNTIME_PM_OPS(dhdpcie_pm_runtime_suspend, dhdpcie_pm_runtime_resume, NULL) |
---|
| 247 | + .suspend_noirq = dhdpcie_pm_system_suspend_noirq, |
---|
| 248 | + .resume_noirq = dhdpcie_pm_system_resume_noirq |
---|
| 249 | +}; |
---|
| 250 | +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ |
---|
| 251 | + |
---|
158 | 252 | static struct pci_driver dhdpcie_driver = { |
---|
159 | | - node: {}, |
---|
| 253 | + node: {&dhdpcie_driver.node, &dhdpcie_driver.node}, |
---|
| 254 | +#ifndef BCMDHDX |
---|
160 | 255 | name: "pcieh", |
---|
| 256 | +#else |
---|
| 257 | + name: "pciehx", |
---|
| 258 | +#endif /* BCMDHDX */ |
---|
161 | 259 | id_table: dhdpcie_pci_devid, |
---|
162 | 260 | probe: dhdpcie_pci_probe, |
---|
163 | 261 | remove: dhdpcie_pci_remove, |
---|
164 | | -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) |
---|
165 | | - save_state: NULL, |
---|
166 | | -#endif |
---|
| 262 | +#if defined(DHD_PCIE_RUNTIMEPM) || defined(DHD_PCIE_NATIVE_RUNTIMEPM) |
---|
| 263 | + .driver.pm = &dhd_pcie_pm_ops, |
---|
| 264 | +#else |
---|
167 | 265 | suspend: dhdpcie_pci_suspend, |
---|
168 | 266 | resume: dhdpcie_pci_resume, |
---|
| 267 | +#endif /* DHD_PCIE_RUNTIMEPM || DHD_PCIE_NATIVE_RUNTIMEPM */ |
---|
169 | 268 | }; |
---|
170 | 269 | |
---|
171 | 270 | int dhdpcie_init_succeeded = FALSE; |
---|
172 | 271 | |
---|
173 | | -static int dhdpcie_set_suspend_resume(struct pci_dev *pdev, bool state) |
---|
| 272 | +#ifdef USE_SMMU_ARCH_MSM |
---|
| 273 | +static int dhdpcie_smmu_init(struct pci_dev *pdev, void *smmu_cxt) |
---|
| 274 | +{ |
---|
| 275 | + struct dma_iommu_mapping *mapping; |
---|
| 276 | + struct device_node *root_node = NULL; |
---|
| 277 | + dhdpcie_smmu_info_t *smmu_info = (dhdpcie_smmu_info_t *)smmu_cxt; |
---|
| 278 | + int smmu_iova_address[2]; |
---|
| 279 | + char *wlan_node = "android,bcmdhd_wlan"; |
---|
| 280 | + char *wlan_smmu_node = "wlan-smmu-iova-address"; |
---|
| 281 | + int atomic_ctx = 1; |
---|
| 282 | + int s1_bypass = 1; |
---|
| 283 | + int ret = 0; |
---|
| 284 | + |
---|
| 285 | + DHD_ERROR(("%s: SMMU initialize\n", __FUNCTION__)); |
---|
| 286 | + |
---|
| 287 | + root_node = of_find_compatible_node(NULL, NULL, wlan_node); |
---|
| 288 | + if (!root_node) { |
---|
| 289 | + WARN(1, "failed to get device node of BRCM WLAN\n"); |
---|
| 290 | + return -ENODEV; |
---|
| 291 | + } |
---|
| 292 | + |
---|
| 293 | + if (of_property_read_u32_array(root_node, wlan_smmu_node, |
---|
| 294 | + smmu_iova_address, 2) == 0) { |
---|
| 295 | + DHD_ERROR(("%s : get SMMU start address 0x%x, size 0x%x\n", |
---|
| 296 | + __FUNCTION__, smmu_iova_address[0], smmu_iova_address[1])); |
---|
| 297 | + smmu_info->smmu_iova_start = smmu_iova_address[0]; |
---|
| 298 | + smmu_info->smmu_iova_len = smmu_iova_address[1]; |
---|
| 299 | + } else { |
---|
| 300 | + printf("%s : can't get smmu iova address property\n", |
---|
| 301 | + __FUNCTION__); |
---|
| 302 | + return -ENODEV; |
---|
| 303 | + } |
---|
| 304 | + |
---|
| 305 | + if (smmu_info->smmu_iova_len <= 0) { |
---|
| 306 | + DHD_ERROR(("%s: Invalid smmu iova len %d\n", |
---|
| 307 | + __FUNCTION__, (int)smmu_info->smmu_iova_len)); |
---|
| 308 | + return -EINVAL; |
---|
| 309 | + } |
---|
| 310 | + |
---|
| 311 | + DHD_ERROR(("%s : SMMU init start\n", __FUNCTION__)); |
---|
| 312 | + |
---|
| 313 | + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) || |
---|
| 314 | + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { |
---|
| 315 | + DHD_ERROR(("%s: DMA set 64bit mask failed.\n", __FUNCTION__)); |
---|
| 316 | + return -EINVAL; |
---|
| 317 | + } |
---|
| 318 | + |
---|
| 319 | + mapping = arm_iommu_create_mapping(&platform_bus_type, |
---|
| 320 | + smmu_info->smmu_iova_start, smmu_info->smmu_iova_len); |
---|
| 321 | + if (IS_ERR(mapping)) { |
---|
| 322 | + DHD_ERROR(("%s: create mapping failed, err = %d\n", |
---|
| 323 | + __FUNCTION__, ret)); |
---|
| 324 | + ret = PTR_ERR(mapping); |
---|
| 325 | + goto map_fail; |
---|
| 326 | + } |
---|
| 327 | + |
---|
| 328 | + ret = iommu_domain_set_attr(mapping->domain, |
---|
| 329 | + DOMAIN_ATTR_ATOMIC, &atomic_ctx); |
---|
| 330 | + if (ret) { |
---|
| 331 | + DHD_ERROR(("%s: set atomic_ctx attribute failed, err = %d\n", |
---|
| 332 | + __FUNCTION__, ret)); |
---|
| 333 | + goto set_attr_fail; |
---|
| 334 | + } |
---|
| 335 | + |
---|
| 336 | + ret = iommu_domain_set_attr(mapping->domain, |
---|
| 337 | + DOMAIN_ATTR_S1_BYPASS, &s1_bypass); |
---|
| 338 | + if (ret < 0) { |
---|
| 339 | + DHD_ERROR(("%s: set s1_bypass attribute failed, err = %d\n", |
---|
| 340 | + __FUNCTION__, ret)); |
---|
| 341 | + goto set_attr_fail; |
---|
| 342 | + } |
---|
| 343 | + |
---|
| 344 | + ret = arm_iommu_attach_device(&pdev->dev, mapping); |
---|
| 345 | + if (ret) { |
---|
| 346 | + DHD_ERROR(("%s: attach device failed, err = %d\n", |
---|
| 347 | + __FUNCTION__, ret)); |
---|
| 348 | + goto attach_fail; |
---|
| 349 | + } |
---|
| 350 | + |
---|
| 351 | + smmu_info->smmu_mapping = mapping; |
---|
| 352 | + |
---|
| 353 | + return ret; |
---|
| 354 | + |
---|
| 355 | +attach_fail: |
---|
| 356 | +set_attr_fail: |
---|
| 357 | + arm_iommu_release_mapping(mapping); |
---|
| 358 | +map_fail: |
---|
| 359 | + return ret; |
---|
| 360 | +} |
---|
| 361 | + |
---|
| 362 | +static void dhdpcie_smmu_remove(struct pci_dev *pdev, void *smmu_cxt) |
---|
| 363 | +{ |
---|
| 364 | + dhdpcie_smmu_info_t *smmu_info; |
---|
| 365 | + |
---|
| 366 | + if (!smmu_cxt) { |
---|
| 367 | + return; |
---|
| 368 | + } |
---|
| 369 | + |
---|
| 370 | + smmu_info = (dhdpcie_smmu_info_t *)smmu_cxt; |
---|
| 371 | + if (smmu_info->smmu_mapping) { |
---|
| 372 | + arm_iommu_detach_device(&pdev->dev); |
---|
| 373 | + arm_iommu_release_mapping(smmu_info->smmu_mapping); |
---|
| 374 | + smmu_info->smmu_mapping = NULL; |
---|
| 375 | + } |
---|
| 376 | +} |
---|
| 377 | +#endif /* USE_SMMU_ARCH_MSM */ |
---|
| 378 | + |
---|
| 379 | +#ifdef FORCE_TPOWERON |
---|
| 380 | +static void |
---|
| 381 | +dhd_bus_get_tpoweron(dhd_bus_t *bus) |
---|
| 382 | +{ |
---|
| 383 | + |
---|
| 384 | + uint32 tpoweron_rc; |
---|
| 385 | + uint32 tpoweron_ep; |
---|
| 386 | + |
---|
| 387 | + tpoweron_rc = dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS, |
---|
| 388 | + PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, FALSE, 0); |
---|
| 389 | + tpoweron_ep = dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS, |
---|
| 390 | + PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, FALSE, 0); |
---|
| 391 | + DHD_ERROR(("%s: tpoweron_rc:0x%x tpoweron_ep:0x%x\n", |
---|
| 392 | + __FUNCTION__, tpoweron_rc, tpoweron_ep)); |
---|
| 393 | +} |
---|
| 394 | + |
---|
| 395 | +static void |
---|
| 396 | +dhd_bus_set_tpoweron(dhd_bus_t *bus, uint16 tpoweron) |
---|
| 397 | +{ |
---|
| 398 | + |
---|
| 399 | + dhd_bus_get_tpoweron(bus); |
---|
| 400 | + /* Set the tpoweron */ |
---|
| 401 | + DHD_ERROR(("%s tpoweron: 0x%x\n", __FUNCTION__, tpoweron)); |
---|
| 402 | + dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS, |
---|
| 403 | + PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, TRUE, tpoweron); |
---|
| 404 | + dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS, |
---|
| 405 | + PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, TRUE, tpoweron); |
---|
| 406 | + |
---|
| 407 | + dhd_bus_get_tpoweron(bus); |
---|
| 408 | + |
---|
| 409 | +} |
---|
| 410 | + |
---|
| 411 | +static bool |
---|
| 412 | +dhdpcie_chip_req_forced_tpoweron(dhd_bus_t *bus) |
---|
| 413 | +{ |
---|
| 414 | + /* |
---|
| 415 | + * On Fire's reference platform, coming out of L1.2, |
---|
| 416 | + * there is a constant delay of 45us between CLKREQ# and stable REFCLK |
---|
| 417 | + * Due to this delay, with tPowerOn < 50 |
---|
| 418 | + * there is a chance of the refclk sense to trigger on noise. |
---|
| 419 | + * |
---|
| 420 | + * Which ever chip needs forced tPowerOn of 50us should be listed below. |
---|
| 421 | + */ |
---|
| 422 | + if (si_chipid(bus->sih) == BCM4377_CHIP_ID) { |
---|
| 423 | + return TRUE; |
---|
| 424 | + } |
---|
| 425 | + return FALSE; |
---|
| 426 | +} |
---|
| 427 | +#endif /* FORCE_TPOWERON */ |
---|
| 428 | + |
---|
| 429 | +static bool |
---|
| 430 | +dhd_bus_aspm_enable_dev(dhd_bus_t *bus, struct pci_dev *dev, bool enable) |
---|
| 431 | +{ |
---|
| 432 | + uint32 linkctrl_before; |
---|
| 433 | + uint32 linkctrl_after = 0; |
---|
| 434 | + uint8 linkctrl_asm; |
---|
| 435 | + char *device; |
---|
| 436 | + |
---|
| 437 | + device = (dev == bus->dev) ? "EP" : "RC"; |
---|
| 438 | + |
---|
| 439 | + linkctrl_before = dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, |
---|
| 440 | + FALSE, FALSE, 0); |
---|
| 441 | + linkctrl_asm = (linkctrl_before & PCIE_ASPM_CTRL_MASK); |
---|
| 442 | + |
---|
| 443 | + if (enable) { |
---|
| 444 | + if (linkctrl_asm == PCIE_ASPM_L1_ENAB) { |
---|
| 445 | + DHD_ERROR(("%s: %s already enabled linkctrl: 0x%x\n", |
---|
| 446 | + __FUNCTION__, device, linkctrl_before)); |
---|
| 447 | + return FALSE; |
---|
| 448 | + } |
---|
| 449 | + /* Enable only L1 ASPM (bit 1) */ |
---|
| 450 | + dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE, |
---|
| 451 | + TRUE, (linkctrl_before | PCIE_ASPM_L1_ENAB)); |
---|
| 452 | + } else { |
---|
| 453 | + if (linkctrl_asm == 0) { |
---|
| 454 | + DHD_ERROR(("%s: %s already disabled linkctrl: 0x%x\n", |
---|
| 455 | + __FUNCTION__, device, linkctrl_before)); |
---|
| 456 | + return FALSE; |
---|
| 457 | + } |
---|
| 458 | + /* Disable complete ASPM (bit 1 and bit 0) */ |
---|
| 459 | + dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE, |
---|
| 460 | + TRUE, (linkctrl_before & (~PCIE_ASPM_ENAB))); |
---|
| 461 | + } |
---|
| 462 | + |
---|
| 463 | + linkctrl_after = dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, |
---|
| 464 | + FALSE, FALSE, 0); |
---|
| 465 | + DHD_ERROR(("%s: %s %s, linkctrl_before: 0x%x linkctrl_after: 0x%x\n", |
---|
| 466 | + __FUNCTION__, device, (enable ? "ENABLE " : "DISABLE"), |
---|
| 467 | + linkctrl_before, linkctrl_after)); |
---|
| 468 | + |
---|
| 469 | + return TRUE; |
---|
| 470 | +} |
---|
| 471 | + |
---|
| 472 | +static bool |
---|
| 473 | +dhd_bus_is_rc_ep_aspm_capable(dhd_bus_t *bus) |
---|
| 474 | +{ |
---|
| 475 | + uint32 rc_aspm_cap; |
---|
| 476 | + uint32 ep_aspm_cap; |
---|
| 477 | + |
---|
| 478 | + /* RC ASPM capability */ |
---|
| 479 | + rc_aspm_cap = dhdpcie_access_cap(bus->rc_dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, |
---|
| 480 | + FALSE, FALSE, 0); |
---|
| 481 | + if (rc_aspm_cap == BCME_ERROR) { |
---|
| 482 | + DHD_ERROR(("%s RC is not ASPM capable\n", __FUNCTION__)); |
---|
| 483 | + return FALSE; |
---|
| 484 | + } |
---|
| 485 | + |
---|
| 486 | + /* EP ASPM capability */ |
---|
| 487 | + ep_aspm_cap = dhdpcie_access_cap(bus->dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, |
---|
| 488 | + FALSE, FALSE, 0); |
---|
| 489 | + if (ep_aspm_cap == BCME_ERROR) { |
---|
| 490 | + DHD_ERROR(("%s EP is not ASPM capable\n", __FUNCTION__)); |
---|
| 491 | + return FALSE; |
---|
| 492 | + } |
---|
| 493 | + |
---|
| 494 | + return TRUE; |
---|
| 495 | +} |
---|
| 496 | + |
---|
| 497 | +bool |
---|
| 498 | +dhd_bus_aspm_enable_rc_ep(dhd_bus_t *bus, bool enable) |
---|
| 499 | +{ |
---|
| 500 | + bool ret; |
---|
| 501 | + |
---|
| 502 | + if (!bus->rc_ep_aspm_cap) { |
---|
| 503 | + DHD_ERROR(("%s: NOT ASPM CAPABLE rc_ep_aspm_cap: %d\n", |
---|
| 504 | + __FUNCTION__, bus->rc_ep_aspm_cap)); |
---|
| 505 | + return FALSE; |
---|
| 506 | + } |
---|
| 507 | + |
---|
| 508 | + if (enable) { |
---|
| 509 | + /* Enable only L1 ASPM first RC then EP */ |
---|
| 510 | + ret = dhd_bus_aspm_enable_dev(bus, bus->rc_dev, enable); |
---|
| 511 | + ret = dhd_bus_aspm_enable_dev(bus, bus->dev, enable); |
---|
| 512 | + } else { |
---|
| 513 | + /* Disable complete ASPM first EP then RC */ |
---|
| 514 | + ret = dhd_bus_aspm_enable_dev(bus, bus->dev, enable); |
---|
| 515 | + ret = dhd_bus_aspm_enable_dev(bus, bus->rc_dev, enable); |
---|
| 516 | + } |
---|
| 517 | + |
---|
| 518 | + return ret; |
---|
| 519 | +} |
---|
| 520 | + |
---|
| 521 | +static void |
---|
| 522 | +dhd_bus_l1ss_enable_dev(dhd_bus_t *bus, struct pci_dev *dev, bool enable) |
---|
| 523 | +{ |
---|
| 524 | + uint32 l1ssctrl_before; |
---|
| 525 | + uint32 l1ssctrl_after = 0; |
---|
| 526 | + uint8 l1ss_ep; |
---|
| 527 | + char *device; |
---|
| 528 | + |
---|
| 529 | + device = (dev == bus->dev) ? "EP" : "RC"; |
---|
| 530 | + |
---|
| 531 | + /* Extendend Capacility Reg */ |
---|
| 532 | + l1ssctrl_before = dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS, |
---|
| 533 | + PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0); |
---|
| 534 | + l1ss_ep = (l1ssctrl_before & PCIE_EXT_L1SS_MASK); |
---|
| 535 | + |
---|
| 536 | + if (enable) { |
---|
| 537 | + if (l1ss_ep == PCIE_EXT_L1SS_ENAB) { |
---|
| 538 | + DHD_ERROR(("%s: %s already enabled, l1ssctrl: 0x%x\n", |
---|
| 539 | + __FUNCTION__, device, l1ssctrl_before)); |
---|
| 540 | + return; |
---|
| 541 | + } |
---|
| 542 | + dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET, |
---|
| 543 | + TRUE, TRUE, (l1ssctrl_before | PCIE_EXT_L1SS_ENAB)); |
---|
| 544 | + } else { |
---|
| 545 | + if (l1ss_ep == 0) { |
---|
| 546 | + DHD_ERROR(("%s: %s already disabled, l1ssctrl: 0x%x\n", |
---|
| 547 | + __FUNCTION__, device, l1ssctrl_before)); |
---|
| 548 | + return; |
---|
| 549 | + } |
---|
| 550 | + dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET, |
---|
| 551 | + TRUE, TRUE, (l1ssctrl_before & (~PCIE_EXT_L1SS_ENAB))); |
---|
| 552 | + } |
---|
| 553 | + l1ssctrl_after = dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS, |
---|
| 554 | + PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0); |
---|
| 555 | + DHD_ERROR(("%s: %s %s, l1ssctrl_before: 0x%x l1ssctrl_after: 0x%x\n", |
---|
| 556 | + __FUNCTION__, device, (enable ? "ENABLE " : "DISABLE"), |
---|
| 557 | + l1ssctrl_before, l1ssctrl_after)); |
---|
| 558 | + |
---|
| 559 | +} |
---|
| 560 | + |
---|
| 561 | +static bool |
---|
| 562 | +dhd_bus_is_rc_ep_l1ss_capable(dhd_bus_t *bus) |
---|
| 563 | +{ |
---|
| 564 | + uint32 rc_l1ss_cap; |
---|
| 565 | + uint32 ep_l1ss_cap; |
---|
| 566 | + |
---|
| 567 | + /* RC Extendend Capacility */ |
---|
| 568 | + rc_l1ss_cap = dhdpcie_access_cap(bus->rc_dev, PCIE_EXTCAP_ID_L1SS, |
---|
| 569 | + PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0); |
---|
| 570 | + if (rc_l1ss_cap == BCME_ERROR) { |
---|
| 571 | + DHD_ERROR(("%s RC is not l1ss capable\n", __FUNCTION__)); |
---|
| 572 | + return FALSE; |
---|
| 573 | + } |
---|
| 574 | + |
---|
| 575 | + /* EP Extendend Capacility */ |
---|
| 576 | + ep_l1ss_cap = dhdpcie_access_cap(bus->dev, PCIE_EXTCAP_ID_L1SS, |
---|
| 577 | + PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0); |
---|
| 578 | + if (ep_l1ss_cap == BCME_ERROR) { |
---|
| 579 | + DHD_ERROR(("%s EP is not l1ss capable\n", __FUNCTION__)); |
---|
| 580 | + return FALSE; |
---|
| 581 | + } |
---|
| 582 | + |
---|
| 583 | + return TRUE; |
---|
| 584 | +} |
---|
| 585 | + |
---|
| 586 | +void |
---|
| 587 | +dhd_bus_l1ss_enable_rc_ep(dhd_bus_t *bus, bool enable) |
---|
| 588 | +{ |
---|
| 589 | + bool ret; |
---|
| 590 | + |
---|
| 591 | + if ((!bus->rc_ep_aspm_cap) || (!bus->rc_ep_l1ss_cap)) { |
---|
| 592 | + DHD_ERROR(("%s: NOT L1SS CAPABLE rc_ep_aspm_cap: %d rc_ep_l1ss_cap: %d\n", |
---|
| 593 | + __FUNCTION__, bus->rc_ep_aspm_cap, bus->rc_ep_l1ss_cap)); |
---|
| 594 | + return; |
---|
| 595 | + } |
---|
| 596 | + |
---|
| 597 | + /* Disable ASPM of RC and EP */ |
---|
| 598 | + ret = dhd_bus_aspm_enable_rc_ep(bus, FALSE); |
---|
| 599 | + |
---|
| 600 | + if (enable) { |
---|
| 601 | + /* Enable RC then EP */ |
---|
| 602 | + dhd_bus_l1ss_enable_dev(bus, bus->rc_dev, enable); |
---|
| 603 | + dhd_bus_l1ss_enable_dev(bus, bus->dev, enable); |
---|
| 604 | + } else { |
---|
| 605 | + /* Disable EP then RC */ |
---|
| 606 | + dhd_bus_l1ss_enable_dev(bus, bus->dev, enable); |
---|
| 607 | + dhd_bus_l1ss_enable_dev(bus, bus->rc_dev, enable); |
---|
| 608 | + } |
---|
| 609 | + |
---|
| 610 | + /* Enable ASPM of RC and EP only if this API disabled */ |
---|
| 611 | + if (ret == TRUE) { |
---|
| 612 | + dhd_bus_aspm_enable_rc_ep(bus, TRUE); |
---|
| 613 | + } |
---|
| 614 | +} |
---|
| 615 | + |
---|
| 616 | +void |
---|
| 617 | +dhd_bus_aer_config(dhd_bus_t *bus) |
---|
| 618 | +{ |
---|
| 619 | + uint32 val; |
---|
| 620 | + |
---|
| 621 | + DHD_ERROR(("%s: Configure AER registers for EP\n", __FUNCTION__)); |
---|
| 622 | + val = dhdpcie_ep_access_cap(bus, PCIE_ADVERRREP_CAPID, |
---|
| 623 | + PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0); |
---|
| 624 | + if (val != (uint32)-1) { |
---|
| 625 | + val &= ~CORR_ERR_AE; |
---|
| 626 | + dhdpcie_ep_access_cap(bus, PCIE_ADVERRREP_CAPID, |
---|
| 627 | + PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, TRUE, val); |
---|
| 628 | + } else { |
---|
| 629 | + DHD_ERROR(("%s: Invalid EP's PCIE_ADV_CORR_ERR_MASK: 0x%x\n", |
---|
| 630 | + __FUNCTION__, val)); |
---|
| 631 | + } |
---|
| 632 | + |
---|
| 633 | + DHD_ERROR(("%s: Configure AER registers for RC\n", __FUNCTION__)); |
---|
| 634 | + val = dhdpcie_rc_access_cap(bus, PCIE_ADVERRREP_CAPID, |
---|
| 635 | + PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0); |
---|
| 636 | + if (val != (uint32)-1) { |
---|
| 637 | + val &= ~CORR_ERR_AE; |
---|
| 638 | + dhdpcie_rc_access_cap(bus, PCIE_ADVERRREP_CAPID, |
---|
| 639 | + PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, TRUE, val); |
---|
| 640 | + } else { |
---|
| 641 | + DHD_ERROR(("%s: Invalid RC's PCIE_ADV_CORR_ERR_MASK: 0x%x\n", |
---|
| 642 | + __FUNCTION__, val)); |
---|
| 643 | + } |
---|
| 644 | +} |
---|
| 645 | + |
---|
| 646 | +#ifdef DHD_PCIE_RUNTIMEPM |
---|
| 647 | +static int dhdpcie_pm_suspend(struct device *dev) |
---|
174 | 648 | { |
---|
175 | 649 | int ret = 0; |
---|
| 650 | + struct pci_dev *pdev = to_pci_dev(dev); |
---|
176 | 651 | dhdpcie_info_t *pch = pci_get_drvdata(pdev); |
---|
177 | 652 | dhd_bus_t *bus = NULL; |
---|
| 653 | + unsigned long flags; |
---|
178 | 654 | |
---|
179 | 655 | if (pch) { |
---|
180 | 656 | bus = pch->bus; |
---|
181 | 657 | } |
---|
182 | | - |
---|
183 | | - /* When firmware is not loaded do the PCI bus */ |
---|
184 | | - /* suspend/resume only */ |
---|
185 | | - if (bus && (bus->dhd->busstate == DHD_BUS_DOWN) && |
---|
186 | | - !bus->dhd->dongle_reset) { |
---|
187 | | - ret = dhdpcie_pci_suspend_resume(bus, state); |
---|
| 658 | + if (!bus) { |
---|
188 | 659 | return ret; |
---|
189 | 660 | } |
---|
190 | 661 | |
---|
191 | | - if (bus && ((bus->dhd->busstate == DHD_BUS_SUSPEND)|| |
---|
192 | | - (bus->dhd->busstate == DHD_BUS_DATA)) && |
---|
193 | | - (bus->suspended != state)) { |
---|
194 | | - ret = dhdpcie_bus_suspend(bus, state); |
---|
| 662 | + DHD_GENERAL_LOCK(bus->dhd, flags); |
---|
| 663 | + if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) { |
---|
| 664 | + DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n", |
---|
| 665 | + __FUNCTION__, bus->dhd->dhd_bus_busy_state)); |
---|
| 666 | + DHD_GENERAL_UNLOCK(bus->dhd, flags); |
---|
| 667 | + return -EBUSY; |
---|
195 | 668 | } |
---|
| 669 | + DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd); |
---|
| 670 | + DHD_GENERAL_UNLOCK(bus->dhd, flags); |
---|
| 671 | + |
---|
| 672 | + if (!bus->dhd->dongle_reset) |
---|
| 673 | + ret = dhdpcie_set_suspend_resume(bus, TRUE); |
---|
| 674 | + |
---|
| 675 | + DHD_GENERAL_LOCK(bus->dhd, flags); |
---|
| 676 | + DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd); |
---|
| 677 | + dhd_os_busbusy_wake(bus->dhd); |
---|
| 678 | + DHD_GENERAL_UNLOCK(bus->dhd, flags); |
---|
| 679 | + |
---|
| 680 | + return ret; |
---|
| 681 | + |
---|
| 682 | +} |
---|
| 683 | + |
---|
| 684 | +static int dhdpcie_pm_prepare(struct device *dev) |
---|
| 685 | +{ |
---|
| 686 | + struct pci_dev *pdev = to_pci_dev(dev); |
---|
| 687 | + dhdpcie_info_t *pch = pci_get_drvdata(pdev); |
---|
| 688 | + dhd_bus_t *bus = NULL; |
---|
| 689 | + |
---|
| 690 | + if (!pch || !pch->bus) { |
---|
| 691 | + return 0; |
---|
| 692 | + } |
---|
| 693 | + |
---|
| 694 | + bus = pch->bus; |
---|
| 695 | + DHD_DISABLE_RUNTIME_PM(bus->dhd); |
---|
| 696 | + bus->chk_pm = TRUE; |
---|
| 697 | + |
---|
| 698 | + return 0; |
---|
| 699 | +} |
---|
| 700 | + |
---|
| 701 | +static int dhdpcie_pm_resume(struct device *dev) |
---|
| 702 | +{ |
---|
| 703 | + int ret = 0; |
---|
| 704 | + struct pci_dev *pdev = to_pci_dev(dev); |
---|
| 705 | + dhdpcie_info_t *pch = pci_get_drvdata(pdev); |
---|
| 706 | + dhd_bus_t *bus = NULL; |
---|
| 707 | + unsigned long flags; |
---|
| 708 | + |
---|
| 709 | + if (pch) { |
---|
| 710 | + bus = pch->bus; |
---|
| 711 | + } |
---|
| 712 | + if (!bus) { |
---|
| 713 | + return ret; |
---|
| 714 | + } |
---|
| 715 | + |
---|
| 716 | + DHD_GENERAL_LOCK(bus->dhd, flags); |
---|
| 717 | + DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd); |
---|
| 718 | + DHD_GENERAL_UNLOCK(bus->dhd, flags); |
---|
| 719 | + |
---|
| 720 | + if (!bus->dhd->dongle_reset) |
---|
| 721 | + ret = dhdpcie_set_suspend_resume(bus, FALSE); |
---|
| 722 | + |
---|
| 723 | + DHD_GENERAL_LOCK(bus->dhd, flags); |
---|
| 724 | + DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd); |
---|
| 725 | + dhd_os_busbusy_wake(bus->dhd); |
---|
| 726 | + DHD_GENERAL_UNLOCK(bus->dhd, flags); |
---|
| 727 | + |
---|
196 | 728 | return ret; |
---|
197 | 729 | } |
---|
198 | 730 | |
---|
| 731 | +static void dhdpcie_pm_complete(struct device *dev) |
---|
| 732 | +{ |
---|
| 733 | + struct pci_dev *pdev = to_pci_dev(dev); |
---|
| 734 | + dhdpcie_info_t *pch = pci_get_drvdata(pdev); |
---|
| 735 | + dhd_bus_t *bus = NULL; |
---|
| 736 | + |
---|
| 737 | + if (!pch || !pch->bus) { |
---|
| 738 | + return; |
---|
| 739 | + } |
---|
| 740 | + |
---|
| 741 | + bus = pch->bus; |
---|
| 742 | + DHD_ENABLE_RUNTIME_PM(bus->dhd); |
---|
| 743 | + bus->chk_pm = FALSE; |
---|
| 744 | + |
---|
| 745 | + return; |
---|
| 746 | +} |
---|
| 747 | +#else |
---|
199 | 748 | static int dhdpcie_pci_suspend(struct pci_dev * pdev, pm_message_t state) |
---|
200 | 749 | { |
---|
| 750 | + int ret = 0; |
---|
| 751 | + dhdpcie_info_t *pch = pci_get_drvdata(pdev); |
---|
| 752 | + dhd_bus_t *bus = NULL; |
---|
| 753 | + unsigned long flags; |
---|
| 754 | + |
---|
| 755 | + if (pch) { |
---|
| 756 | + bus = pch->bus; |
---|
| 757 | + } |
---|
| 758 | + if (!bus) { |
---|
| 759 | + return ret; |
---|
| 760 | + } |
---|
| 761 | + |
---|
201 | 762 | BCM_REFERENCE(state); |
---|
202 | | - return dhdpcie_set_suspend_resume(pdev, TRUE); |
---|
| 763 | + |
---|
| 764 | + DHD_GENERAL_LOCK(bus->dhd, flags); |
---|
| 765 | + if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) { |
---|
| 766 | + DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n", |
---|
| 767 | + __FUNCTION__, bus->dhd->dhd_bus_busy_state)); |
---|
| 768 | + DHD_GENERAL_UNLOCK(bus->dhd, flags); |
---|
| 769 | + return -EBUSY; |
---|
| 770 | + } |
---|
| 771 | + DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd); |
---|
| 772 | + DHD_GENERAL_UNLOCK(bus->dhd, flags); |
---|
| 773 | + |
---|
| 774 | + if (!bus->dhd->dongle_reset) |
---|
| 775 | + ret = dhdpcie_set_suspend_resume(bus, TRUE); |
---|
| 776 | + |
---|
| 777 | + DHD_GENERAL_LOCK(bus->dhd, flags); |
---|
| 778 | + DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd); |
---|
| 779 | + dhd_os_busbusy_wake(bus->dhd); |
---|
| 780 | + DHD_GENERAL_UNLOCK(bus->dhd, flags); |
---|
| 781 | + |
---|
| 782 | + return ret; |
---|
203 | 783 | } |
---|
204 | 784 | |
---|
205 | 785 | static int dhdpcie_pci_resume(struct pci_dev *pdev) |
---|
206 | 786 | { |
---|
207 | | - return dhdpcie_set_suspend_resume(pdev, FALSE); |
---|
| 787 | + int ret = 0; |
---|
| 788 | + dhdpcie_info_t *pch = pci_get_drvdata(pdev); |
---|
| 789 | + dhd_bus_t *bus = NULL; |
---|
| 790 | + unsigned long flags; |
---|
| 791 | + |
---|
| 792 | + if (pch) { |
---|
| 793 | + bus = pch->bus; |
---|
| 794 | + } |
---|
| 795 | + if (!bus) { |
---|
| 796 | + return ret; |
---|
| 797 | + } |
---|
| 798 | + |
---|
| 799 | + DHD_GENERAL_LOCK(bus->dhd, flags); |
---|
| 800 | + DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd); |
---|
| 801 | + DHD_GENERAL_UNLOCK(bus->dhd, flags); |
---|
| 802 | + |
---|
| 803 | + if (!bus->dhd->dongle_reset) |
---|
| 804 | + ret = dhdpcie_set_suspend_resume(bus, FALSE); |
---|
| 805 | + |
---|
| 806 | + DHD_GENERAL_LOCK(bus->dhd, flags); |
---|
| 807 | + DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd); |
---|
| 808 | + dhd_os_busbusy_wake(bus->dhd); |
---|
| 809 | + DHD_GENERAL_UNLOCK(bus->dhd, flags); |
---|
| 810 | + |
---|
| 811 | + return ret; |
---|
| 812 | +} |
---|
| 813 | + |
---|
| 814 | +#endif /* DHD_PCIE_RUNTIMEPM */ |
---|
| 815 | +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM |
---|
| 816 | +static int dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state, bool byint) |
---|
| 817 | +#else |
---|
| 818 | +static int dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state) |
---|
| 819 | +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ |
---|
| 820 | +{ |
---|
| 821 | + int ret = 0; |
---|
| 822 | + |
---|
| 823 | + ASSERT(bus && !bus->dhd->dongle_reset); |
---|
| 824 | + |
---|
| 825 | +#ifdef DHD_PCIE_RUNTIMEPM |
---|
| 826 | + /* if wakelock is held during suspend, return failed */ |
---|
| 827 | + if (state == TRUE && dhd_os_check_wakelock_all(bus->dhd)) { |
---|
| 828 | + return -EBUSY; |
---|
| 829 | + } |
---|
| 830 | + mutex_lock(&bus->pm_lock); |
---|
| 831 | +#endif /* DHD_PCIE_RUNTIMEPM */ |
---|
| 832 | + |
---|
| 833 | + /* When firmware is not loaded do the PCI bus */ |
---|
| 834 | + /* suspend/resume only */ |
---|
| 835 | + if (bus->dhd->busstate == DHD_BUS_DOWN) { |
---|
| 836 | + ret = dhdpcie_pci_suspend_resume(bus, state); |
---|
| 837 | +#ifdef DHD_PCIE_RUNTIMEPM |
---|
| 838 | + mutex_unlock(&bus->pm_lock); |
---|
| 839 | +#endif /* DHD_PCIE_RUNTIMEPM */ |
---|
| 840 | + return ret; |
---|
| 841 | + } |
---|
| 842 | +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM |
---|
| 843 | + ret = dhdpcie_bus_suspend(bus, state, byint); |
---|
| 844 | +#else |
---|
| 845 | + ret = dhdpcie_bus_suspend(bus, state); |
---|
| 846 | +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ |
---|
| 847 | + |
---|
| 848 | +#ifdef DHD_PCIE_RUNTIMEPM |
---|
| 849 | + mutex_unlock(&bus->pm_lock); |
---|
| 850 | +#endif /* DHD_PCIE_RUNTIMEPM */ |
---|
| 851 | + |
---|
| 852 | + return ret; |
---|
| 853 | +} |
---|
| 854 | + |
---|
| 855 | +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM |
---|
| 856 | +static int dhdpcie_pm_runtime_suspend(struct device * dev) |
---|
| 857 | +{ |
---|
| 858 | + struct pci_dev *pdev = to_pci_dev(dev); |
---|
| 859 | + dhdpcie_info_t *pch = pci_get_drvdata(pdev); |
---|
| 860 | + dhd_bus_t *bus = NULL; |
---|
| 861 | + int ret = 0; |
---|
| 862 | + |
---|
| 863 | + if (!pch) |
---|
| 864 | + return -EBUSY; |
---|
| 865 | + |
---|
| 866 | + bus = pch->bus; |
---|
| 867 | + |
---|
| 868 | + DHD_RPM(("%s Enter\n", __FUNCTION__)); |
---|
| 869 | + |
---|
| 870 | + if (atomic_read(&bus->dhd->block_bus)) |
---|
| 871 | + return -EHOSTDOWN; |
---|
| 872 | + |
---|
| 873 | + dhd_netif_stop_queue(bus); |
---|
| 874 | + atomic_set(&bus->dhd->block_bus, TRUE); |
---|
| 875 | + |
---|
| 876 | + if (dhdpcie_set_suspend_resume(pdev, TRUE, TRUE)) { |
---|
| 877 | + pm_runtime_mark_last_busy(dev); |
---|
| 878 | + ret = -EAGAIN; |
---|
| 879 | + } |
---|
| 880 | + |
---|
| 881 | + atomic_set(&bus->dhd->block_bus, FALSE); |
---|
| 882 | + dhd_bus_start_queue(bus); |
---|
| 883 | + |
---|
| 884 | + return ret; |
---|
| 885 | +} |
---|
| 886 | + |
---|
| 887 | +static int dhdpcie_pm_runtime_resume(struct device * dev) |
---|
| 888 | +{ |
---|
| 889 | + struct pci_dev *pdev = to_pci_dev(dev); |
---|
| 890 | + dhdpcie_info_t *pch = pci_get_drvdata(pdev); |
---|
| 891 | + dhd_bus_t *bus = pch->bus; |
---|
| 892 | + |
---|
| 893 | + DHD_RPM(("%s Enter\n", __FUNCTION__)); |
---|
| 894 | + |
---|
| 895 | + if (atomic_read(&bus->dhd->block_bus)) |
---|
| 896 | + return -EHOSTDOWN; |
---|
| 897 | + |
---|
| 898 | + if (dhdpcie_set_suspend_resume(pdev, FALSE, TRUE)) |
---|
| 899 | + return -EAGAIN; |
---|
| 900 | + |
---|
| 901 | + return 0; |
---|
| 902 | +} |
---|
| 903 | + |
---|
| 904 | +static int dhdpcie_pm_system_suspend_noirq(struct device * dev) |
---|
| 905 | +{ |
---|
| 906 | + struct pci_dev *pdev = to_pci_dev(dev); |
---|
| 907 | + dhdpcie_info_t *pch = pci_get_drvdata(pdev); |
---|
| 908 | + dhd_bus_t *bus = NULL; |
---|
| 909 | + int ret; |
---|
| 910 | + |
---|
| 911 | + DHD_RPM(("%s Enter\n", __FUNCTION__)); |
---|
| 912 | + |
---|
| 913 | + if (!pch) |
---|
| 914 | + return -EBUSY; |
---|
| 915 | + |
---|
| 916 | + bus = pch->bus; |
---|
| 917 | + |
---|
| 918 | + if (atomic_read(&bus->dhd->block_bus)) |
---|
| 919 | + return -EHOSTDOWN; |
---|
| 920 | + |
---|
| 921 | + dhd_netif_stop_queue(bus); |
---|
| 922 | + atomic_set(&bus->dhd->block_bus, TRUE); |
---|
| 923 | + |
---|
| 924 | + ret = dhdpcie_set_suspend_resume(pdev, TRUE, FALSE); |
---|
| 925 | + |
---|
| 926 | + if (ret) { |
---|
| 927 | + dhd_bus_start_queue(bus); |
---|
| 928 | + atomic_set(&bus->dhd->block_bus, FALSE); |
---|
| 929 | + } |
---|
| 930 | + |
---|
| 931 | + return ret; |
---|
| 932 | +} |
---|
| 933 | + |
---|
| 934 | +static int dhdpcie_pm_system_resume_noirq(struct device * dev) |
---|
| 935 | +{ |
---|
| 936 | + struct pci_dev *pdev = to_pci_dev(dev); |
---|
| 937 | + dhdpcie_info_t *pch = pci_get_drvdata(pdev); |
---|
| 938 | + dhd_bus_t *bus = NULL; |
---|
| 939 | + int ret; |
---|
| 940 | + |
---|
| 941 | + if (!pch) |
---|
| 942 | + return -EBUSY; |
---|
| 943 | + |
---|
| 944 | + bus = pch->bus; |
---|
| 945 | + |
---|
| 946 | + DHD_RPM(("%s Enter\n", __FUNCTION__)); |
---|
| 947 | + |
---|
| 948 | + ret = dhdpcie_set_suspend_resume(pdev, FALSE, FALSE); |
---|
| 949 | + |
---|
| 950 | + atomic_set(&bus->dhd->block_bus, FALSE); |
---|
| 951 | + dhd_bus_start_queue(bus); |
---|
| 952 | + pm_runtime_mark_last_busy(dhd_bus_to_dev(bus)); |
---|
| 953 | + |
---|
| 954 | + return ret; |
---|
| 955 | +} |
---|
| 956 | +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ |
---|
| 957 | + |
---|
| 958 | +#if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) |
---|
| 959 | +extern void dhd_dpc_tasklet_kill(dhd_pub_t *dhdp); |
---|
| 960 | +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ |
---|
| 961 | + |
---|
| 962 | +static void |
---|
| 963 | +dhdpcie_suspend_dump_cfgregs(struct dhd_bus *bus, char *suspend_state) |
---|
| 964 | +{ |
---|
| 965 | + DHD_ERROR(("%s: BaseAddress0(0x%x)=0x%x, " |
---|
| 966 | + "BaseAddress1(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n", |
---|
| 967 | + suspend_state, |
---|
| 968 | + PCIECFGREG_BASEADDR0, |
---|
| 969 | + dhd_pcie_config_read(bus->osh, |
---|
| 970 | + PCIECFGREG_BASEADDR0, sizeof(uint32)), |
---|
| 971 | + PCIECFGREG_BASEADDR1, |
---|
| 972 | + dhd_pcie_config_read(bus->osh, |
---|
| 973 | + PCIECFGREG_BASEADDR1, sizeof(uint32)), |
---|
| 974 | + PCIE_CFG_PMCSR, |
---|
| 975 | + dhd_pcie_config_read(bus->osh, |
---|
| 976 | + PCIE_CFG_PMCSR, sizeof(uint32)))); |
---|
208 | 977 | } |
---|
209 | 978 | |
---|
210 | 979 | static int dhdpcie_suspend_dev(struct pci_dev *dev) |
---|
211 | 980 | { |
---|
212 | 981 | int ret; |
---|
213 | | -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) |
---|
214 | 982 | dhdpcie_info_t *pch = pci_get_drvdata(dev); |
---|
| 983 | + dhd_bus_t *bus = pch->bus; |
---|
| 984 | + |
---|
| 985 | +#if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) |
---|
| 986 | + if (bus->is_linkdown) { |
---|
| 987 | + DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__)); |
---|
| 988 | + return BCME_ERROR; |
---|
| 989 | + } |
---|
215 | 990 | #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ |
---|
216 | | - DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__)); |
---|
| 991 | + DHD_ERROR(("%s: Enter\n", __FUNCTION__)); |
---|
| 992 | + dhdpcie_suspend_dump_cfgregs(bus, "BEFORE_EP_SUSPEND"); |
---|
| 993 | +#if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) |
---|
| 994 | + dhd_dpc_tasklet_kill(bus->dhd); |
---|
| 995 | +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ |
---|
217 | 996 | pci_save_state(dev); |
---|
218 | | -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) |
---|
| 997 | +#if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) |
---|
219 | 998 | pch->state = pci_store_saved_state(dev); |
---|
220 | 999 | #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ |
---|
221 | 1000 | pci_enable_wake(dev, PCI_D0, TRUE); |
---|
222 | | -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) |
---|
223 | 1001 | if (pci_is_enabled(dev)) |
---|
224 | | -#endif |
---|
225 | 1002 | pci_disable_device(dev); |
---|
226 | 1003 | |
---|
227 | 1004 | ret = pci_set_power_state(dev, PCI_D3hot); |
---|
.. | .. |
---|
229 | 1006 | DHD_ERROR(("%s: pci_set_power_state error %d\n", |
---|
230 | 1007 | __FUNCTION__, ret)); |
---|
231 | 1008 | } |
---|
232 | | - disable_irq(dev->irq); |
---|
| 1009 | +#ifdef OEM_ANDROID |
---|
| 1010 | + dev->state_saved = FALSE; |
---|
| 1011 | +#endif /* OEM_ANDROID */ |
---|
| 1012 | + dhdpcie_suspend_dump_cfgregs(bus, "AFTER_EP_SUSPEND"); |
---|
233 | 1013 | return ret; |
---|
234 | 1014 | } |
---|
| 1015 | + |
---|
| 1016 | +#ifdef DHD_WAKE_STATUS |
---|
| 1017 | +int bcmpcie_get_total_wake(struct dhd_bus *bus) |
---|
| 1018 | +{ |
---|
| 1019 | + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); |
---|
| 1020 | + |
---|
| 1021 | + return pch->total_wake_count; |
---|
| 1022 | +} |
---|
| 1023 | + |
---|
| 1024 | +int bcmpcie_set_get_wake(struct dhd_bus *bus, int flag) |
---|
| 1025 | +{ |
---|
| 1026 | + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); |
---|
| 1027 | + unsigned long flags; |
---|
| 1028 | + int ret; |
---|
| 1029 | + |
---|
| 1030 | + spin_lock_irqsave(&pch->pcie_lock, flags); |
---|
| 1031 | + |
---|
| 1032 | + ret = pch->pkt_wake; |
---|
| 1033 | + pch->total_wake_count += flag; |
---|
| 1034 | + pch->pkt_wake = flag; |
---|
| 1035 | + |
---|
| 1036 | + spin_unlock_irqrestore(&pch->pcie_lock, flags); |
---|
| 1037 | + return ret; |
---|
| 1038 | +} |
---|
| 1039 | +#endif /* DHD_WAKE_STATUS */ |
---|
235 | 1040 | |
---|
236 | 1041 | static int dhdpcie_resume_dev(struct pci_dev *dev) |
---|
237 | 1042 | { |
---|
238 | 1043 | int err = 0; |
---|
239 | | -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) |
---|
240 | 1044 | dhdpcie_info_t *pch = pci_get_drvdata(dev); |
---|
| 1045 | +#if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) |
---|
241 | 1046 | pci_load_and_free_saved_state(dev, &pch->state); |
---|
242 | 1047 | #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ |
---|
243 | | - DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__)); |
---|
| 1048 | + DHD_ERROR(("%s: Enter\n", __FUNCTION__)); |
---|
| 1049 | +#ifdef OEM_ANDROID |
---|
| 1050 | + dev->state_saved = TRUE; |
---|
| 1051 | +#endif /* OEM_ANDROID */ |
---|
244 | 1052 | pci_restore_state(dev); |
---|
| 1053 | +#ifdef FORCE_TPOWERON |
---|
| 1054 | + if (dhdpcie_chip_req_forced_tpoweron(pch->bus)) { |
---|
| 1055 | + dhd_bus_set_tpoweron(pch->bus, tpoweron_scale); |
---|
| 1056 | + } |
---|
| 1057 | +#endif /* FORCE_TPOWERON */ |
---|
245 | 1058 | err = pci_enable_device(dev); |
---|
246 | 1059 | if (err) { |
---|
247 | 1060 | printf("%s:pci_enable_device error %d \n", __FUNCTION__, err); |
---|
.. | .. |
---|
253 | 1066 | printf("%s:pci_set_power_state error %d \n", __FUNCTION__, err); |
---|
254 | 1067 | goto out; |
---|
255 | 1068 | } |
---|
256 | | - |
---|
| 1069 | + BCM_REFERENCE(pch); |
---|
| 1070 | + dhdpcie_suspend_dump_cfgregs(pch->bus, "AFTER_EP_RESUME"); |
---|
257 | 1071 | out: |
---|
258 | | - enable_irq(dev->irq); |
---|
259 | 1072 | return err; |
---|
260 | 1073 | } |
---|
261 | 1074 | |
---|
| 1075 | +static int dhdpcie_resume_host_dev(dhd_bus_t *bus) |
---|
| 1076 | +{ |
---|
| 1077 | + int bcmerror = 0; |
---|
| 1078 | +#ifdef USE_EXYNOS_PCIE_RC_PMPATCH |
---|
| 1079 | + bcmerror = exynos_pcie_pm_resume(SAMSUNG_PCIE_CH_NUM); |
---|
| 1080 | +#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */ |
---|
| 1081 | +#ifdef CONFIG_ARCH_MSM |
---|
| 1082 | + bcmerror = dhdpcie_start_host_pcieclock(bus); |
---|
| 1083 | +#endif /* CONFIG_ARCH_MSM */ |
---|
| 1084 | +#ifdef CONFIG_ARCH_TEGRA |
---|
| 1085 | +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)) |
---|
| 1086 | + bcmerror = tegra_pcie_pm_resume(); |
---|
| 1087 | +#endif // endif |
---|
| 1088 | +#endif /* CONFIG_ARCH_TEGRA */ |
---|
| 1089 | + if (bcmerror < 0) { |
---|
| 1090 | + DHD_ERROR(("%s: PCIe RC resume failed!!! (%d)\n", |
---|
| 1091 | + __FUNCTION__, bcmerror)); |
---|
| 1092 | + bus->is_linkdown = 1; |
---|
| 1093 | +#ifdef SUPPORT_LINKDOWN_RECOVERY |
---|
| 1094 | +#ifdef CONFIG_ARCH_MSM |
---|
| 1095 | + bus->no_cfg_restore = 1; |
---|
| 1096 | +#endif /* CONFIG_ARCH_MSM */ |
---|
| 1097 | +#endif /* SUPPORT_LINKDOWN_RECOVERY */ |
---|
| 1098 | + } |
---|
| 1099 | + |
---|
| 1100 | + return bcmerror; |
---|
| 1101 | +} |
---|
| 1102 | + |
---|
| 1103 | +static int dhdpcie_suspend_host_dev(dhd_bus_t *bus) |
---|
| 1104 | +{ |
---|
| 1105 | + int bcmerror = 0; |
---|
| 1106 | +#ifdef USE_EXYNOS_PCIE_RC_PMPATCH |
---|
| 1107 | + if (bus->rc_dev) { |
---|
| 1108 | + pci_save_state(bus->rc_dev); |
---|
| 1109 | + } else { |
---|
| 1110 | + DHD_ERROR(("%s: RC %x:%x handle is NULL\n", |
---|
| 1111 | + __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID)); |
---|
| 1112 | + } |
---|
| 1113 | + exynos_pcie_pm_suspend(SAMSUNG_PCIE_CH_NUM); |
---|
| 1114 | +#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */ |
---|
| 1115 | +#ifdef CONFIG_ARCH_MSM |
---|
| 1116 | + bcmerror = dhdpcie_stop_host_pcieclock(bus); |
---|
| 1117 | +#endif /* CONFIG_ARCH_MSM */ |
---|
| 1118 | +#ifdef CONFIG_ARCH_TEGRA |
---|
| 1119 | +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)) |
---|
| 1120 | + bcmerror = tegra_pcie_pm_suspend(); |
---|
| 1121 | +#endif // endif |
---|
| 1122 | +#endif /* CONFIG_ARCH_TEGRA */ |
---|
| 1123 | + return bcmerror; |
---|
| 1124 | +} |
---|
| 1125 | + |
---|
| 1126 | +/** |
---|
| 1127 | + * dhdpcie_os_setbar1win |
---|
| 1128 | + * |
---|
| 1129 | + * Interface function for setting bar1 window in order to allow |
---|
| 1130 | + * os layer to be aware of current window positon. |
---|
| 1131 | + * |
---|
| 1132 | + * @bus: dhd bus context |
---|
| 1133 | + * @addr: new backplane windows address for BAR1 |
---|
| 1134 | + */ |
---|
| 1135 | +void |
---|
| 1136 | +dhdpcie_os_setbar1win(dhd_bus_t *bus, uint32 addr) |
---|
| 1137 | +{ |
---|
| 1138 | + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); |
---|
| 1139 | + |
---|
| 1140 | + osl_pci_write_config(bus->osh, PCI_BAR1_WIN, 4, addr); |
---|
| 1141 | + pch->curr_bar1_win = addr; |
---|
| 1142 | +} |
---|
| 1143 | + |
---|
| 1144 | +/** |
---|
| 1145 | + * dhdpcie_os_chkbpoffset |
---|
| 1146 | + * |
---|
| 1147 | + * Check the provided address is within the current BAR1 window, |
---|
| 1148 | + * if not, shift the window |
---|
| 1149 | + * |
---|
| 1150 | + * @bus: dhd bus context |
---|
| 1151 | + * @offset: back plane address that the caller wants to access |
---|
| 1152 | + * |
---|
| 1153 | + * Return: new offset for access |
---|
| 1154 | + */ |
---|
| 1155 | +static ulong |
---|
| 1156 | +dhdpcie_os_chkbpoffset(dhdpcie_info_t *pch, ulong offset) |
---|
| 1157 | +{ |
---|
| 1158 | + /* Determine BAR1 backplane window using window size |
---|
| 1159 | + * Window address mask should be ~(size - 1) |
---|
| 1160 | + */ |
---|
| 1161 | + uint32 bpwin = (uint32)(offset & ~(pch->bar1_size - 1)); |
---|
| 1162 | + |
---|
| 1163 | + if (bpwin != pch->curr_bar1_win) { |
---|
| 1164 | + /* Move BAR1 window */ |
---|
| 1165 | + dhdpcie_os_setbar1win(pch->bus, bpwin); |
---|
| 1166 | + } |
---|
| 1167 | + |
---|
| 1168 | + return offset - bpwin; |
---|
| 1169 | +} |
---|
| 1170 | + |
---|
| 1171 | +/** |
---|
| 1172 | + * dhdpcie os layer tcm read/write interface |
---|
| 1173 | + */ |
---|
| 1174 | +void |
---|
| 1175 | +dhdpcie_os_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data) |
---|
| 1176 | +{ |
---|
| 1177 | + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); |
---|
| 1178 | + |
---|
| 1179 | + offset = dhdpcie_os_chkbpoffset(pch, offset); |
---|
| 1180 | + W_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset), data); |
---|
| 1181 | +} |
---|
| 1182 | + |
---|
| 1183 | +uint8 |
---|
| 1184 | +dhdpcie_os_rtcm8(dhd_bus_t *bus, ulong offset) |
---|
| 1185 | +{ |
---|
| 1186 | + volatile uint8 data; |
---|
| 1187 | + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); |
---|
| 1188 | + |
---|
| 1189 | + offset = dhdpcie_os_chkbpoffset(pch, offset); |
---|
| 1190 | + data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset)); |
---|
| 1191 | + return data; |
---|
| 1192 | +} |
---|
| 1193 | + |
---|
| 1194 | +void |
---|
| 1195 | +dhdpcie_os_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data) |
---|
| 1196 | +{ |
---|
| 1197 | + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); |
---|
| 1198 | + |
---|
| 1199 | + offset = dhdpcie_os_chkbpoffset(pch, offset); |
---|
| 1200 | + W_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset), data); |
---|
| 1201 | +} |
---|
| 1202 | + |
---|
| 1203 | +uint16 |
---|
| 1204 | +dhdpcie_os_rtcm16(dhd_bus_t *bus, ulong offset) |
---|
| 1205 | +{ |
---|
| 1206 | + volatile uint16 data; |
---|
| 1207 | + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); |
---|
| 1208 | + |
---|
| 1209 | + offset = dhdpcie_os_chkbpoffset(pch, offset); |
---|
| 1210 | + data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset)); |
---|
| 1211 | + return data; |
---|
| 1212 | +} |
---|
| 1213 | + |
---|
| 1214 | +void |
---|
| 1215 | +dhdpcie_os_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data) |
---|
| 1216 | +{ |
---|
| 1217 | + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); |
---|
| 1218 | + |
---|
| 1219 | + offset = dhdpcie_os_chkbpoffset(pch, offset); |
---|
| 1220 | + W_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset), data); |
---|
| 1221 | +} |
---|
| 1222 | + |
---|
| 1223 | +uint32 |
---|
| 1224 | +dhdpcie_os_rtcm32(dhd_bus_t *bus, ulong offset) |
---|
| 1225 | +{ |
---|
| 1226 | + volatile uint32 data; |
---|
| 1227 | + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); |
---|
| 1228 | + |
---|
| 1229 | + offset = dhdpcie_os_chkbpoffset(pch, offset); |
---|
| 1230 | + data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset)); |
---|
| 1231 | + return data; |
---|
| 1232 | +} |
---|
| 1233 | + |
---|
| 1234 | +#ifdef DHD_SUPPORT_64BIT |
---|
| 1235 | +void |
---|
| 1236 | +dhdpcie_os_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data) |
---|
| 1237 | +{ |
---|
| 1238 | + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); |
---|
| 1239 | + |
---|
| 1240 | + offset = dhdpcie_os_chkbpoffset(pch, offset); |
---|
| 1241 | + W_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset), data); |
---|
| 1242 | +} |
---|
| 1243 | + |
---|
| 1244 | +uint64 |
---|
| 1245 | +dhdpcie_os_rtcm64(dhd_bus_t *bus, ulong offset) |
---|
| 1246 | +{ |
---|
| 1247 | + volatile uint64 data; |
---|
| 1248 | + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); |
---|
| 1249 | + |
---|
| 1250 | + offset = dhdpcie_os_chkbpoffset(pch, offset); |
---|
| 1251 | + data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset)); |
---|
| 1252 | + return data; |
---|
| 1253 | +} |
---|
| 1254 | +#endif /* DHD_SUPPORT_64BIT */ |
---|
| 1255 | + |
---|
| 1256 | +uint32 |
---|
| 1257 | +dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset) |
---|
| 1258 | +{ |
---|
| 1259 | + uint val = -1; /* Initialise to 0xfffffff */ |
---|
| 1260 | + if (bus->rc_dev) { |
---|
| 1261 | + pci_read_config_dword(bus->rc_dev, offset, &val); |
---|
| 1262 | + OSL_DELAY(100); |
---|
| 1263 | + } else { |
---|
| 1264 | + DHD_ERROR(("%s: RC %x:%x handle is NULL\n", |
---|
| 1265 | + __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID)); |
---|
| 1266 | + } |
---|
| 1267 | + DHD_ERROR(("%s: RC %x:%x offset 0x%x val 0x%x\n", |
---|
| 1268 | + __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, offset, val)); |
---|
| 1269 | + return (val); |
---|
| 1270 | +} |
---|
| 1271 | + |
---|
| 1272 | +/* |
---|
| 1273 | + * Reads/ Writes the value of capability register |
---|
| 1274 | + * from the given CAP_ID section of PCI Root Port |
---|
| 1275 | + * |
---|
| 1276 | + * Arguements |
---|
| 1277 | + * @bus current dhd_bus_t pointer |
---|
| 1278 | + * @cap Capability or Extended Capability ID to get |
---|
| 1279 | + * @offset offset of Register to Read |
---|
| 1280 | + * @is_ext TRUE if @cap is given for Extended Capability |
---|
| 1281 | + * @is_write is set to TRUE to indicate write |
---|
| 1282 | + * @val value to write |
---|
| 1283 | + * |
---|
| 1284 | + * Return Value |
---|
| 1285 | + * Returns 0xffffffff on error |
---|
| 1286 | + * on write success returns BCME_OK (0) |
---|
| 1287 | + * on Read Success returns the value of register requested |
---|
| 1288 | + * Note: caller shoud ensure valid capability ID and Ext. Capability ID. |
---|
| 1289 | + */ |
---|
| 1290 | + |
---|
| 1291 | +uint32 |
---|
| 1292 | +dhdpcie_access_cap(struct pci_dev *pdev, int cap, uint offset, bool is_ext, bool is_write, |
---|
| 1293 | + uint32 writeval) |
---|
| 1294 | +{ |
---|
| 1295 | + int cap_ptr = 0; |
---|
| 1296 | + uint32 ret = -1; |
---|
| 1297 | + uint32 readval; |
---|
| 1298 | + |
---|
| 1299 | + if (!(pdev)) { |
---|
| 1300 | + DHD_ERROR(("%s: pdev is NULL\n", __FUNCTION__)); |
---|
| 1301 | + return ret; |
---|
| 1302 | + } |
---|
| 1303 | + |
---|
| 1304 | + /* Find Capability offset */ |
---|
| 1305 | + if (is_ext) { |
---|
| 1306 | + /* removing max EXT_CAP_ID check as |
---|
| 1307 | + * linux kernel definition's max value is not upadted yet as per spec |
---|
| 1308 | + */ |
---|
| 1309 | + cap_ptr = pci_find_ext_capability(pdev, cap); |
---|
| 1310 | + |
---|
| 1311 | + } else { |
---|
| 1312 | + /* removing max PCI_CAP_ID_MAX check as |
---|
| 1313 | + * pervious kernel versions dont have this definition |
---|
| 1314 | + */ |
---|
| 1315 | + cap_ptr = pci_find_capability(pdev, cap); |
---|
| 1316 | + } |
---|
| 1317 | + |
---|
| 1318 | + /* Return if capability with given ID not found */ |
---|
| 1319 | + if (cap_ptr == 0) { |
---|
| 1320 | + DHD_ERROR(("%s: PCI Cap(0x%02x) not supported.\n", |
---|
| 1321 | + __FUNCTION__, cap)); |
---|
| 1322 | + return BCME_ERROR; |
---|
| 1323 | + } |
---|
| 1324 | + |
---|
| 1325 | + if (is_write) { |
---|
| 1326 | + pci_write_config_dword(pdev, (cap_ptr + offset), writeval); |
---|
| 1327 | + ret = BCME_OK; |
---|
| 1328 | + |
---|
| 1329 | + } else { |
---|
| 1330 | + |
---|
| 1331 | + pci_read_config_dword(pdev, (cap_ptr + offset), &readval); |
---|
| 1332 | + ret = readval; |
---|
| 1333 | + } |
---|
| 1334 | + |
---|
| 1335 | + return ret; |
---|
| 1336 | +} |
---|
| 1337 | + |
---|
| 1338 | +uint32 |
---|
| 1339 | +dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write, |
---|
| 1340 | + uint32 writeval) |
---|
| 1341 | +{ |
---|
| 1342 | + if (!(bus->rc_dev)) { |
---|
| 1343 | + DHD_ERROR(("%s: RC %x:%x handle is NULL\n", |
---|
| 1344 | + __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID)); |
---|
| 1345 | + return BCME_ERROR; |
---|
| 1346 | + } |
---|
| 1347 | + |
---|
| 1348 | + return dhdpcie_access_cap(bus->rc_dev, cap, offset, is_ext, is_write, writeval); |
---|
| 1349 | +} |
---|
| 1350 | + |
---|
| 1351 | +uint32 |
---|
| 1352 | +dhdpcie_ep_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write, |
---|
| 1353 | + uint32 writeval) |
---|
| 1354 | +{ |
---|
| 1355 | + if (!(bus->dev)) { |
---|
| 1356 | + DHD_ERROR(("%s: EP handle is NULL\n", __FUNCTION__)); |
---|
| 1357 | + return BCME_ERROR; |
---|
| 1358 | + } |
---|
| 1359 | + |
---|
| 1360 | + return dhdpcie_access_cap(bus->dev, cap, offset, is_ext, is_write, writeval); |
---|
| 1361 | +} |
---|
| 1362 | + |
---|
| 1363 | +/* API wrapper to read Root Port link capability |
---|
| 1364 | + * Returns 2 = GEN2 1 = GEN1 BCME_ERR on linkcap not found |
---|
| 1365 | + */ |
---|
| 1366 | + |
---|
| 1367 | +uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus) |
---|
| 1368 | +{ |
---|
| 1369 | + uint32 linkcap = -1; |
---|
| 1370 | + linkcap = dhdpcie_rc_access_cap(bus, PCIE_CAP_ID_EXP, |
---|
| 1371 | + PCIE_CAP_LINKCAP_OFFSET, FALSE, FALSE, 0); |
---|
| 1372 | + linkcap &= PCIE_CAP_LINKCAP_LNKSPEED_MASK; |
---|
| 1373 | + return linkcap; |
---|
| 1374 | +} |
---|
| 1375 | + |
---|
| 1376 | +static void dhdpcie_config_save_restore_coherent(dhd_bus_t *bus, bool state) |
---|
| 1377 | +{ |
---|
| 1378 | + if (bus->coreid == ARMCA7_CORE_ID) { |
---|
| 1379 | + if (state) { |
---|
| 1380 | + /* Sleep */ |
---|
| 1381 | + bus->coherent_state = dhdpcie_bus_cfg_read_dword(bus, |
---|
| 1382 | + PCIE_CFG_SUBSYSTEM_CONTROL, 4) & PCIE_BARCOHERENTACCEN_MASK; |
---|
| 1383 | + } else { |
---|
| 1384 | + uint32 val = (dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, |
---|
| 1385 | + 4) & ~PCIE_BARCOHERENTACCEN_MASK) | bus->coherent_state; |
---|
| 1386 | + dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4, val); |
---|
| 1387 | + } |
---|
| 1388 | + } |
---|
| 1389 | +} |
---|
262 | 1390 | |
---|
263 | 1391 | int dhdpcie_pci_suspend_resume(dhd_bus_t *bus, bool state) |
---|
264 | 1392 | { |
---|
265 | 1393 | int rc; |
---|
266 | 1394 | |
---|
267 | 1395 | struct pci_dev *dev = bus->dev; |
---|
268 | | -#ifdef USE_EXYNOS_PCIE_RC_PMPATCH |
---|
269 | | - struct pci_dev *rc_pci_dev; |
---|
270 | | -#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */ |
---|
271 | 1396 | |
---|
272 | 1397 | if (state) { |
---|
273 | | -#ifndef BCMPCIE_OOB_HOST_WAKE |
---|
| 1398 | + dhdpcie_config_save_restore_coherent(bus, state); |
---|
| 1399 | +#if !defined(BCMPCIE_OOB_HOST_WAKE) |
---|
274 | 1400 | dhdpcie_pme_active(bus->osh, state); |
---|
275 | | -#endif /* !BCMPCIE_OOB_HOST_WAKE */ |
---|
| 1401 | +#endif // endif |
---|
276 | 1402 | rc = dhdpcie_suspend_dev(dev); |
---|
277 | | -#ifdef USE_EXYNOS_PCIE_RC_PMPATCH |
---|
278 | 1403 | if (!rc) { |
---|
279 | | - rc_pci_dev = pci_get_device(0x144d, SAMSUNG_PCIE_DEVICE_ID, NULL); |
---|
280 | | - if (rc_pci_dev) { |
---|
281 | | - pci_save_state(rc_pci_dev); |
---|
282 | | - } |
---|
283 | | - exynos_pcie_pm_suspend(SAMSUNG_PCIE_CH_NUM); |
---|
| 1404 | + dhdpcie_suspend_host_dev(bus); |
---|
284 | 1405 | } |
---|
285 | | -#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */ |
---|
286 | 1406 | } else { |
---|
287 | | -#ifdef USE_EXYNOS_PCIE_RC_PMPATCH |
---|
288 | | - exynos_pcie_pm_resume(SAMSUNG_PCIE_CH_NUM); |
---|
289 | | -#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */ |
---|
290 | | - |
---|
291 | | - rc = dhdpcie_resume_dev(dev); |
---|
292 | | -#ifndef BCMPCIE_OOB_HOST_WAKE |
---|
293 | | - dhdpcie_pme_active(bus->osh, state); |
---|
294 | | -#endif /* !BCMPCIE_OOB_HOST_WAKE */ |
---|
| 1407 | + rc = dhdpcie_resume_host_dev(bus); |
---|
| 1408 | + if (!rc) { |
---|
| 1409 | + rc = dhdpcie_resume_dev(dev); |
---|
| 1410 | + if (PCIECTO_ENAB(bus)) { |
---|
| 1411 | + /* reinit CTO configuration |
---|
| 1412 | + * because cfg space got reset at D3 (PERST) |
---|
| 1413 | + */ |
---|
| 1414 | + dhdpcie_cto_cfg_init(bus, TRUE); |
---|
| 1415 | + } |
---|
| 1416 | + if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) { |
---|
| 1417 | + dhdpcie_ssreset_dis_enum_rst(bus); |
---|
| 1418 | + } |
---|
| 1419 | +#if !defined(BCMPCIE_OOB_HOST_WAKE) |
---|
| 1420 | + dhdpcie_pme_active(bus->osh, state); |
---|
| 1421 | +#endif // endif |
---|
| 1422 | + } |
---|
| 1423 | + dhdpcie_config_save_restore_coherent(bus, state); |
---|
| 1424 | +#if defined(OEM_ANDROID) |
---|
| 1425 | +#if defined(DHD_HANG_SEND_UP_TEST) |
---|
| 1426 | + if (bus->is_linkdown || |
---|
| 1427 | + bus->dhd->req_hang_type == HANG_REASON_PCIE_RC_LINK_UP_FAIL) { |
---|
| 1428 | +#else /* DHD_HANG_SEND_UP_TEST */ |
---|
| 1429 | + if (bus->is_linkdown) { |
---|
| 1430 | +#endif /* DHD_HANG_SEND_UP_TEST */ |
---|
| 1431 | + bus->dhd->hang_reason = HANG_REASON_PCIE_RC_LINK_UP_FAIL; |
---|
| 1432 | + dhd_os_send_hang_message(bus->dhd); |
---|
| 1433 | + } |
---|
| 1434 | +#endif /* OEM_ANDROID */ |
---|
295 | 1435 | } |
---|
296 | 1436 | return rc; |
---|
297 | 1437 | } |
---|
298 | 1438 | |
---|
299 | | -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) |
---|
300 | 1439 | static int dhdpcie_device_scan(struct device *dev, void *data) |
---|
301 | 1440 | { |
---|
302 | 1441 | struct pci_dev *pcidev; |
---|
303 | 1442 | int *cnt = data; |
---|
304 | 1443 | |
---|
| 1444 | +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) |
---|
| 1445 | +#pragma GCC diagnostic push |
---|
| 1446 | +#pragma GCC diagnostic ignored "-Wcast-qual" |
---|
| 1447 | +#endif // endif |
---|
305 | 1448 | pcidev = container_of(dev, struct pci_dev, dev); |
---|
306 | | - if (pcidev->vendor != 0x14e4) |
---|
| 1449 | +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) |
---|
| 1450 | +#pragma GCC diagnostic pop |
---|
| 1451 | +#endif // endif |
---|
| 1452 | + if (pcidev->vendor != VENDOR_BROADCOM && pcidev->vendor != VENDOR_CYPRESS) |
---|
307 | 1453 | return 0; |
---|
308 | 1454 | |
---|
309 | | - DHD_INFO(("Found Broadcom PCI device 0x%04x\n", pcidev->device)); |
---|
| 1455 | + if (pcidev->vendor == VENDOR_CYPRESS) { |
---|
| 1456 | + DHD_INFO(("Found Cypress PCI device 0x%04x\n", pcidev->device)); |
---|
| 1457 | + } else { |
---|
| 1458 | + DHD_INFO(("Found Broadcom PCI device 0x%04x\n", pcidev->device)); |
---|
| 1459 | + } |
---|
310 | 1460 | *cnt += 1; |
---|
311 | 1461 | if (pcidev->driver && strcmp(pcidev->driver->name, dhdpcie_driver.name)) |
---|
312 | 1462 | DHD_ERROR(("Broadcom PCI Device 0x%04x has allocated with driver %s\n", |
---|
.. | .. |
---|
314 | 1464 | |
---|
315 | 1465 | return 0; |
---|
316 | 1466 | } |
---|
317 | | -#endif /* LINUX_VERSION >= 2.6.0 */ |
---|
318 | 1467 | |
---|
319 | 1468 | int |
---|
320 | 1469 | dhdpcie_bus_register(void) |
---|
321 | 1470 | { |
---|
322 | 1471 | int error = 0; |
---|
323 | 1472 | |
---|
324 | | - |
---|
325 | | -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) |
---|
326 | | - if (!(error = pci_module_init(&dhdpcie_driver))) |
---|
327 | | - return 0; |
---|
328 | | - |
---|
329 | | - DHD_ERROR(("%s: pci_module_init failed 0x%x\n", __FUNCTION__, error)); |
---|
330 | | -#else |
---|
331 | 1473 | if (!(error = pci_register_driver(&dhdpcie_driver))) { |
---|
332 | 1474 | bus_for_each_dev(dhdpcie_driver.driver.bus, NULL, &error, dhdpcie_device_scan); |
---|
333 | 1475 | if (!error) { |
---|
.. | .. |
---|
341 | 1483 | pci_unregister_driver(&dhdpcie_driver); |
---|
342 | 1484 | error = BCME_ERROR; |
---|
343 | 1485 | } |
---|
344 | | -#endif /* LINUX_VERSION < 2.6.0 */ |
---|
345 | 1486 | |
---|
346 | 1487 | return error; |
---|
347 | 1488 | } |
---|
348 | | - |
---|
349 | 1489 | |
---|
350 | 1490 | void |
---|
351 | 1491 | dhdpcie_bus_unregister(void) |
---|
.. | .. |
---|
356 | 1496 | int __devinit |
---|
357 | 1497 | dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
---|
358 | 1498 | { |
---|
359 | | - int retry = 0; |
---|
360 | | - |
---|
| 1499 | +#ifndef MULTI_CHIP_SUPPORT |
---|
| 1500 | + /* Don't enumerate more than one device */ |
---|
| 1501 | + if (dhdpcie_init_succeeded) { |
---|
| 1502 | + DHD_TRACE(("%s: PCIe Enumeration is already done.\n", |
---|
| 1503 | + __func__)); |
---|
| 1504 | + return -ENODEV; |
---|
| 1505 | + } |
---|
| 1506 | +#endif /* MULTI_CHIP_SUPPORT */ |
---|
361 | 1507 | |
---|
362 | 1508 | if (dhdpcie_chipmatch (pdev->vendor, pdev->device)) { |
---|
363 | 1509 | DHD_ERROR(("%s: chipmatch failed!!\n", __FUNCTION__)); |
---|
364 | 1510 | return -ENODEV; |
---|
365 | 1511 | } |
---|
| 1512 | + |
---|
366 | 1513 | printf("PCI_PROBE: bus %X, slot %X,vendor %X, device %X" |
---|
367 | 1514 | "(good PCI location)\n", pdev->bus->number, |
---|
368 | 1515 | PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device); |
---|
369 | 1516 | |
---|
370 | | - |
---|
371 | | - do { |
---|
372 | | - if (dhdpcie_init(pdev) == 0) |
---|
373 | | - break; |
---|
374 | | - DHD_ERROR(("%s: PCIe Enumeration failed\n", __FUNCTION__)); |
---|
375 | | - } while (retry-- >= 0); |
---|
376 | | - |
---|
377 | | - if (retry < 0) |
---|
| 1517 | + if (dhdpcie_init_succeeded == TRUE) { |
---|
| 1518 | + DHD_ERROR(("%s(): === Driver Already attached to a BRCM device === \r\n", |
---|
| 1519 | + __FUNCTION__)); |
---|
378 | 1520 | return -ENODEV; |
---|
| 1521 | + } |
---|
| 1522 | + |
---|
| 1523 | + if (dhdpcie_init (pdev)) { |
---|
| 1524 | + DHD_ERROR(("%s: PCIe Enumeration failed\n", __FUNCTION__)); |
---|
| 1525 | + return -ENODEV; |
---|
| 1526 | + } |
---|
| 1527 | + |
---|
| 1528 | +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM |
---|
| 1529 | + /* |
---|
| 1530 | + Since MSM PCIe RC dev usage conunt already incremented +2 even |
---|
| 1531 | + before dhdpcie_pci_probe() called, then we inevitably to call |
---|
| 1532 | + pm_runtime_put_noidle() two times to make the count start with zero. |
---|
| 1533 | + */ |
---|
| 1534 | + |
---|
| 1535 | + pm_runtime_put_noidle(&pdev->dev); |
---|
| 1536 | + pm_runtime_put_noidle(&pdev->dev); |
---|
| 1537 | + pm_runtime_set_suspended(&pdev->dev); |
---|
| 1538 | +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ |
---|
379 | 1539 | |
---|
380 | 1540 | #ifdef BCMPCIE_DISABLE_ASYNC_SUSPEND |
---|
381 | 1541 | /* disable async suspend */ |
---|
.. | .. |
---|
390 | 1550 | dhdpcie_detach(dhdpcie_info_t *pch) |
---|
391 | 1551 | { |
---|
392 | 1552 | if (pch) { |
---|
393 | | -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) |
---|
394 | | - if (!dhd_download_fw_on_driverload) { |
---|
| 1553 | +#if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) |
---|
| 1554 | + if (pch->default_state) { |
---|
395 | 1555 | pci_load_and_free_saved_state(pch->dev, &pch->default_state); |
---|
396 | 1556 | } |
---|
397 | 1557 | #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ |
---|
.. | .. |
---|
399 | 1559 | } |
---|
400 | 1560 | return 0; |
---|
401 | 1561 | } |
---|
402 | | - |
---|
403 | 1562 | |
---|
404 | 1563 | void __devexit |
---|
405 | 1564 | dhdpcie_pci_remove(struct pci_dev *pdev) |
---|
.. | .. |
---|
413 | 1572 | bus = pch->bus; |
---|
414 | 1573 | osh = pch->osh; |
---|
415 | 1574 | |
---|
| 1575 | +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM |
---|
| 1576 | + pm_runtime_get_noresume(&pdev->dev); |
---|
| 1577 | + pm_runtime_get_noresume(&pdev->dev); |
---|
| 1578 | +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ |
---|
| 1579 | + |
---|
| 1580 | + if (bus) { |
---|
416 | 1581 | #ifdef SUPPORT_LINKDOWN_RECOVERY |
---|
417 | 1582 | #ifdef CONFIG_ARCH_MSM |
---|
418 | | - if (bus) { |
---|
419 | 1583 | msm_pcie_deregister_event(&bus->pcie_event); |
---|
420 | | - } |
---|
421 | 1584 | #endif /* CONFIG_ARCH_MSM */ |
---|
| 1585 | +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY |
---|
| 1586 | +#if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) || \ |
---|
| 1587 | + defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) |
---|
| 1588 | + exynos_pcie_deregister_event(&bus->pcie_event); |
---|
| 1589 | +#endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 || |
---|
| 1590 | + * CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 |
---|
| 1591 | + */ |
---|
| 1592 | +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ |
---|
422 | 1593 | #endif /* SUPPORT_LINKDOWN_RECOVERY */ |
---|
423 | | - dhdpcie_bus_release(bus); |
---|
| 1594 | + |
---|
| 1595 | + bus->rc_dev = NULL; |
---|
| 1596 | + |
---|
| 1597 | + dhdpcie_bus_release(bus); |
---|
| 1598 | + } |
---|
| 1599 | + |
---|
424 | 1600 | if (pci_is_enabled(pdev)) |
---|
425 | 1601 | pci_disable_device(pdev); |
---|
| 1602 | +#if defined(CONFIG_ARCH_MSM) |
---|
| 1603 | + msm_pcie_pm_control(MSM_PCIE_SUSPEND, pdev->bus->number, pdev, NULL, 0); |
---|
| 1604 | +#endif // endif |
---|
426 | 1605 | #ifdef BCMPCIE_OOB_HOST_WAKE |
---|
427 | 1606 | /* pcie os info detach */ |
---|
428 | 1607 | MFREE(osh, pch->os_cxt, sizeof(dhdpcie_os_info_t)); |
---|
429 | 1608 | #endif /* BCMPCIE_OOB_HOST_WAKE */ |
---|
| 1609 | +#ifdef USE_SMMU_ARCH_MSM |
---|
| 1610 | + /* smmu info detach */ |
---|
| 1611 | + dhdpcie_smmu_remove(pdev, pch->smmu_cxt); |
---|
| 1612 | + MFREE(osh, pch->smmu_cxt, sizeof(dhdpcie_smmu_info_t)); |
---|
| 1613 | +#endif /* USE_SMMU_ARCH_MSM */ |
---|
430 | 1614 | /* pcie info detach */ |
---|
431 | 1615 | dhdpcie_detach(pch); |
---|
432 | 1616 | /* osl detach */ |
---|
433 | 1617 | osl_detach(osh); |
---|
| 1618 | + |
---|
| 1619 | +#if defined(BCMPCIE_OOB_HOST_WAKE) && defined(CUSTOMER_HW2) && \ |
---|
| 1620 | + defined(CONFIG_ARCH_APQ8084) |
---|
| 1621 | + brcm_pcie_wake.wake_irq = NULL; |
---|
| 1622 | + brcm_pcie_wake.data = NULL; |
---|
| 1623 | +#endif /* BCMPCIE_OOB_HOST_WAKE && CUSTOMR_HW2 && CONFIG_ARCH_APQ8084 */ |
---|
434 | 1624 | |
---|
435 | 1625 | dhdpcie_init_succeeded = FALSE; |
---|
436 | 1626 | |
---|
.. | .. |
---|
439 | 1629 | return; |
---|
440 | 1630 | } |
---|
441 | 1631 | |
---|
442 | | -/* Free Linux irq */ |
---|
| 1632 | +/* Enable Linux Msi */ |
---|
| 1633 | +int |
---|
| 1634 | +dhdpcie_enable_msi(struct pci_dev *pdev, unsigned int min_vecs, unsigned int max_vecs) |
---|
| 1635 | +{ |
---|
| 1636 | +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) |
---|
| 1637 | + return pci_alloc_irq_vectors(pdev, min_vecs, max_vecs, PCI_IRQ_MSI); |
---|
| 1638 | +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) |
---|
| 1639 | + return pci_enable_msi_range(pdev, min_vecs, max_vecs); |
---|
| 1640 | +#else |
---|
| 1641 | + return pci_enable_msi_block(pdev, max_vecs); |
---|
| 1642 | +#endif // endif |
---|
| 1643 | +} |
---|
| 1644 | + |
---|
| 1645 | +/* Disable Linux Msi */ |
---|
| 1646 | +void |
---|
| 1647 | +dhdpcie_disable_msi(struct pci_dev *pdev) |
---|
| 1648 | +{ |
---|
| 1649 | +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) |
---|
| 1650 | + pci_free_irq_vectors(pdev); |
---|
| 1651 | +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)) |
---|
| 1652 | + pci_disable_msi(pdev); |
---|
| 1653 | +#else |
---|
| 1654 | + pci_disable_msi(pdev); |
---|
| 1655 | +#endif // endif |
---|
| 1656 | + return; |
---|
| 1657 | +} |
---|
| 1658 | + |
---|
| 1659 | +/* Request Linux irq */ |
---|
443 | 1660 | int |
---|
444 | 1661 | dhdpcie_request_irq(dhdpcie_info_t *dhdpcie_info) |
---|
445 | 1662 | { |
---|
446 | 1663 | dhd_bus_t *bus = dhdpcie_info->bus; |
---|
447 | 1664 | struct pci_dev *pdev = dhdpcie_info->bus->dev; |
---|
| 1665 | + int host_irq_disabled; |
---|
448 | 1666 | |
---|
449 | | - snprintf(dhdpcie_info->pciname, sizeof(dhdpcie_info->pciname), |
---|
450 | | - "dhdpcie:%s", pci_name(pdev)); |
---|
451 | | - if (request_irq(pdev->irq, dhdpcie_isr, IRQF_SHARED, |
---|
452 | | - dhdpcie_info->pciname, bus) < 0) { |
---|
| 1667 | + if (!bus->irq_registered) { |
---|
| 1668 | + snprintf(dhdpcie_info->pciname, sizeof(dhdpcie_info->pciname), |
---|
| 1669 | + "dhdpcie:%s", pci_name(pdev)); |
---|
| 1670 | + |
---|
| 1671 | + if (bus->d2h_intr_method == PCIE_MSI) { |
---|
| 1672 | + if (dhdpcie_enable_msi(pdev, 1, 1) < 0) { |
---|
| 1673 | + DHD_ERROR(("%s: dhdpcie_enable_msi() failed\n", __FUNCTION__)); |
---|
| 1674 | + dhdpcie_disable_msi(pdev); |
---|
| 1675 | + bus->d2h_intr_method = PCIE_INTX; |
---|
| 1676 | + } |
---|
| 1677 | + } |
---|
| 1678 | + |
---|
| 1679 | + if (request_irq(pdev->irq, dhdpcie_isr, IRQF_SHARED, |
---|
| 1680 | + dhdpcie_info->pciname, bus) < 0) { |
---|
453 | 1681 | DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__)); |
---|
| 1682 | + if (bus->d2h_intr_method == PCIE_MSI) { |
---|
| 1683 | + dhdpcie_disable_msi(pdev); |
---|
| 1684 | + } |
---|
454 | 1685 | return -1; |
---|
| 1686 | + } |
---|
| 1687 | + else { |
---|
| 1688 | + bus->irq_registered = TRUE; |
---|
| 1689 | + } |
---|
| 1690 | + } else { |
---|
| 1691 | + DHD_ERROR(("%s: PCI IRQ is already registered\n", __FUNCTION__)); |
---|
| 1692 | + } |
---|
| 1693 | + |
---|
| 1694 | + host_irq_disabled = dhdpcie_irq_disabled(bus); |
---|
| 1695 | + if (host_irq_disabled) { |
---|
| 1696 | + DHD_ERROR(("%s: PCIe IRQ was disabled(%d), so, enabled it again\n", |
---|
| 1697 | + __FUNCTION__, host_irq_disabled)); |
---|
| 1698 | + dhdpcie_enable_irq(bus); |
---|
455 | 1699 | } |
---|
456 | 1700 | |
---|
457 | 1701 | DHD_TRACE(("%s %s\n", __FUNCTION__, dhdpcie_info->pciname)); |
---|
458 | 1702 | |
---|
| 1703 | + return 0; /* SUCCESS */ |
---|
| 1704 | +} |
---|
| 1705 | + |
---|
| 1706 | +/** |
---|
| 1707 | + * dhdpcie_get_pcieirq - return pcie irq number to linux-dhd |
---|
| 1708 | + */ |
---|
| 1709 | +int |
---|
| 1710 | +dhdpcie_get_pcieirq(struct dhd_bus *bus, unsigned int *irq) |
---|
| 1711 | +{ |
---|
| 1712 | + struct pci_dev *pdev = bus->dev; |
---|
| 1713 | + |
---|
| 1714 | + if (!pdev) { |
---|
| 1715 | + DHD_ERROR(("%s : bus->dev is NULL\n", __FUNCTION__)); |
---|
| 1716 | + return -ENODEV; |
---|
| 1717 | + } |
---|
| 1718 | + |
---|
| 1719 | + *irq = pdev->irq; |
---|
459 | 1720 | |
---|
460 | 1721 | return 0; /* SUCCESS */ |
---|
461 | 1722 | } |
---|
.. | .. |
---|
464 | 1725 | #define PRINTF_RESOURCE "0x%016llx" |
---|
465 | 1726 | #else |
---|
466 | 1727 | #define PRINTF_RESOURCE "0x%08x" |
---|
467 | | -#endif |
---|
| 1728 | +#endif // endif |
---|
| 1729 | + |
---|
| 1730 | +#ifdef EXYNOS_PCIE_MODULE_PATCH |
---|
| 1731 | +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) |
---|
| 1732 | +extern struct pci_saved_state *bcm_pcie_default_state; |
---|
| 1733 | +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ |
---|
| 1734 | +#endif /* EXYNOS_MODULE_PATCH */ |
---|
468 | 1735 | |
---|
469 | 1736 | /* |
---|
470 | 1737 | |
---|
.. | .. |
---|
474 | 1741 | |
---|
475 | 1742 | 1: struct pci_dev *pdev -- pci device structure |
---|
476 | 1743 | 2: pci_res -- structure containing pci configuration space values |
---|
477 | | - |
---|
478 | 1744 | |
---|
479 | 1745 | Return value: |
---|
480 | 1746 | |
---|
.. | .. |
---|
488 | 1754 | { |
---|
489 | 1755 | phys_addr_t bar0_addr, bar1_addr; |
---|
490 | 1756 | ulong bar1_size; |
---|
491 | | - struct pci_dev *pdev = NULL; |
---|
492 | | - pdev = dhdpcie_info->dev; |
---|
| 1757 | + struct pci_dev *pdev = dhdpcie_info->dev; |
---|
| 1758 | +#if defined(CONFIG_ARCH_MSM) && !defined(ENABLE_INSMOD_NO_FW_LOAD) |
---|
| 1759 | + int ret; |
---|
| 1760 | + /* enable PCIe link */ |
---|
| 1761 | + ret = msm_pcie_pm_control(MSM_PCIE_RESUME, pdev->bus->number, |
---|
| 1762 | + pdev, NULL, MSM_PCIE_CONFIG_NO_CFG_RESTORE); |
---|
| 1763 | + if (ret) { |
---|
| 1764 | + DHD_ERROR(("%s: MSM_PCIE_RESUME failed : %d\n", __FUNCTION__, ret)); |
---|
| 1765 | + goto err; |
---|
| 1766 | + } |
---|
| 1767 | + DHD_ERROR(("PCIe:%s:enabled link\n", __FUNCTION__)); |
---|
| 1768 | +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) |
---|
| 1769 | + /* recover the config space of both RC and Endpoint */ |
---|
| 1770 | + msm_pcie_recover_config(pdev); |
---|
| 1771 | +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0) */ |
---|
| 1772 | +#endif /* CONFIG_ARCH_MSM && !ENABLE_INSMOD_NO_FW_LOAD */ |
---|
| 1773 | +#ifdef EXYNOS_PCIE_MODULE_PATCH |
---|
| 1774 | +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) |
---|
| 1775 | + if (bcm_pcie_default_state) { |
---|
| 1776 | + pci_load_saved_state(pdev, bcm_pcie_default_state); |
---|
| 1777 | + pci_restore_state(pdev); |
---|
| 1778 | + } |
---|
| 1779 | +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ |
---|
| 1780 | +#endif /* EXYNOS_MODULE_PATCH */ |
---|
493 | 1781 | do { |
---|
494 | 1782 | if (pci_enable_device(pdev)) { |
---|
495 | 1783 | printf("%s: Cannot enable PCI device\n", __FUNCTION__); |
---|
.. | .. |
---|
510 | 1798 | } |
---|
511 | 1799 | |
---|
512 | 1800 | dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE); |
---|
513 | | - dhdpcie_info->tcm_size = |
---|
| 1801 | + dhdpcie_info->bar1_size = |
---|
514 | 1802 | (bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE; |
---|
515 | | - dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size); |
---|
| 1803 | + dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->bar1_size); |
---|
516 | 1804 | |
---|
517 | 1805 | if (!dhdpcie_info->regs || !dhdpcie_info->tcm) { |
---|
518 | 1806 | DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__)); |
---|
519 | 1807 | break; |
---|
520 | 1808 | } |
---|
| 1809 | +#ifdef EXYNOS_PCIE_MODULE_PATCH |
---|
| 1810 | +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) |
---|
| 1811 | + if (bcm_pcie_default_state == NULL) { |
---|
| 1812 | + pci_save_state(pdev); |
---|
| 1813 | + bcm_pcie_default_state = pci_store_saved_state(pdev); |
---|
| 1814 | + } |
---|
| 1815 | +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ |
---|
| 1816 | +#endif /* EXYNOS_MODULE_PATCH */ |
---|
521 | 1817 | |
---|
522 | 1818 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) |
---|
523 | | - if (!dhd_download_fw_on_driverload) { |
---|
524 | | - /* Backup PCIe configuration so as to use Wi-Fi on/off process |
---|
525 | | - * in case of built in driver |
---|
526 | | - */ |
---|
527 | | - pci_save_state(pdev); |
---|
528 | | - dhdpcie_info->default_state = pci_store_saved_state(pdev); |
---|
| 1819 | + /* Backup PCIe configuration so as to use Wi-Fi on/off process |
---|
| 1820 | + * in case of built in driver |
---|
| 1821 | + */ |
---|
| 1822 | + pci_save_state(pdev); |
---|
| 1823 | + dhdpcie_info->default_state = pci_store_saved_state(pdev); |
---|
529 | 1824 | |
---|
530 | | - if (dhdpcie_info->default_state == NULL) { |
---|
531 | | - DHD_ERROR(("%s pci_store_saved_state returns NULL\n", |
---|
532 | | - __FUNCTION__)); |
---|
533 | | - REG_UNMAP(dhdpcie_info->regs); |
---|
534 | | - REG_UNMAP(dhdpcie_info->tcm); |
---|
535 | | - pci_disable_device(pdev); |
---|
536 | | - break; |
---|
537 | | - } |
---|
538 | | - } |
---|
| 1825 | + if (dhdpcie_info->default_state == NULL) { |
---|
| 1826 | + DHD_ERROR(("%s pci_store_saved_state returns NULL\n", |
---|
| 1827 | + __FUNCTION__)); |
---|
| 1828 | + REG_UNMAP(dhdpcie_info->regs); |
---|
| 1829 | + REG_UNMAP(dhdpcie_info->tcm); |
---|
| 1830 | + pci_disable_device(pdev); |
---|
| 1831 | + break; |
---|
| 1832 | + } |
---|
539 | 1833 | #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ |
---|
540 | 1834 | |
---|
541 | 1835 | DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n", |
---|
.. | .. |
---|
573 | 1867 | |
---|
574 | 1868 | } |
---|
575 | 1869 | |
---|
| 1870 | +void dhdpcie_dump_resource(dhd_bus_t *bus) |
---|
| 1871 | +{ |
---|
| 1872 | + dhdpcie_info_t *pch; |
---|
| 1873 | + |
---|
| 1874 | + if (bus == NULL) { |
---|
| 1875 | + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); |
---|
| 1876 | + return; |
---|
| 1877 | + } |
---|
| 1878 | + |
---|
| 1879 | + if (bus->dev == NULL) { |
---|
| 1880 | + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); |
---|
| 1881 | + return; |
---|
| 1882 | + } |
---|
| 1883 | + |
---|
| 1884 | + pch = pci_get_drvdata(bus->dev); |
---|
| 1885 | + if (pch == NULL) { |
---|
| 1886 | + DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__)); |
---|
| 1887 | + return; |
---|
| 1888 | + } |
---|
| 1889 | + |
---|
| 1890 | + /* BAR0 */ |
---|
| 1891 | + DHD_ERROR(("%s: BAR0(VA): 0x%pK, BAR0(PA): "PRINTF_RESOURCE", SIZE: %d\n", |
---|
| 1892 | + __FUNCTION__, pch->regs, pci_resource_start(bus->dev, 0), |
---|
| 1893 | + DONGLE_REG_MAP_SIZE)); |
---|
| 1894 | + |
---|
| 1895 | + /* BAR1 */ |
---|
| 1896 | + DHD_ERROR(("%s: BAR1(VA): 0x%pK, BAR1(PA): "PRINTF_RESOURCE", SIZE: %d\n", |
---|
| 1897 | + __FUNCTION__, pch->tcm, pci_resource_start(bus->dev, 2), |
---|
| 1898 | + pch->bar1_size)); |
---|
| 1899 | +} |
---|
| 1900 | + |
---|
576 | 1901 | #ifdef SUPPORT_LINKDOWN_RECOVERY |
---|
577 | | -#ifdef CONFIG_ARCH_MSM |
---|
578 | | -void dhdpcie_linkdown_cb(struct msm_pcie_notify *noti) |
---|
| 1902 | +#if defined(CONFIG_ARCH_MSM) || (defined(EXYNOS_PCIE_LINKDOWN_RECOVERY) && \ |
---|
| 1903 | + (defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) || \ |
---|
| 1904 | + defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820))) |
---|
| 1905 | +void dhdpcie_linkdown_cb(struct_pcie_notify *noti) |
---|
579 | 1906 | { |
---|
580 | 1907 | struct pci_dev *pdev = (struct pci_dev *)noti->user; |
---|
581 | 1908 | dhdpcie_info_t *pch = NULL; |
---|
.. | .. |
---|
590 | 1917 | DHD_ERROR(("%s: Event HANG send up " |
---|
591 | 1918 | "due to PCIe linkdown\n", |
---|
592 | 1919 | __FUNCTION__)); |
---|
593 | | - bus->islinkdown = 1; |
---|
| 1920 | +#ifdef CONFIG_ARCH_MSM |
---|
| 1921 | + bus->no_cfg_restore = 1; |
---|
| 1922 | +#endif /* CONFIG_ARCH_MSM */ |
---|
| 1923 | + bus->is_linkdown = 1; |
---|
594 | 1924 | DHD_OS_WAKE_LOCK(dhd); |
---|
595 | | - dhd_os_check_hang(dhd, 0, -ETIMEDOUT); |
---|
| 1925 | + dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN_RC_DETECT; |
---|
| 1926 | + dhd_os_send_hang_message(dhd); |
---|
596 | 1927 | } |
---|
597 | 1928 | } |
---|
598 | 1929 | } |
---|
599 | 1930 | } |
---|
600 | 1931 | |
---|
601 | 1932 | } |
---|
602 | | -#endif /* CONFIG_ARCH_MSM */ |
---|
| 1933 | +#endif /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY && |
---|
| 1934 | + * (CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 || \ |
---|
| 1935 | + * CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820)) |
---|
| 1936 | + */ |
---|
603 | 1937 | #endif /* SUPPORT_LINKDOWN_RECOVERY */ |
---|
604 | 1938 | |
---|
605 | 1939 | int dhdpcie_init(struct pci_dev *pdev) |
---|
.. | .. |
---|
612 | 1946 | #ifdef BCMPCIE_OOB_HOST_WAKE |
---|
613 | 1947 | dhdpcie_os_info_t *dhdpcie_osinfo = NULL; |
---|
614 | 1948 | #endif /* BCMPCIE_OOB_HOST_WAKE */ |
---|
| 1949 | +#ifdef USE_SMMU_ARCH_MSM |
---|
| 1950 | + dhdpcie_smmu_info_t *dhdpcie_smmu_info = NULL; |
---|
| 1951 | +#endif /* USE_SMMU_ARCH_MSM */ |
---|
| 1952 | + int ret = 0; |
---|
615 | 1953 | |
---|
616 | 1954 | do { |
---|
617 | 1955 | /* osl attach */ |
---|
.. | .. |
---|
663 | 2001 | } |
---|
664 | 2002 | #endif /* BCMPCIE_OOB_HOST_WAKE */ |
---|
665 | 2003 | |
---|
| 2004 | +#ifdef USE_SMMU_ARCH_MSM |
---|
| 2005 | + /* allocate private structure for using SMMU */ |
---|
| 2006 | + dhdpcie_smmu_info = MALLOC(osh, sizeof(dhdpcie_smmu_info_t)); |
---|
| 2007 | + if (dhdpcie_smmu_info == NULL) { |
---|
| 2008 | + DHD_ERROR(("%s: MALLOC of dhdpcie_smmu_info_t failed\n", |
---|
| 2009 | + __FUNCTION__)); |
---|
| 2010 | + break; |
---|
| 2011 | + } |
---|
| 2012 | + bzero(dhdpcie_smmu_info, sizeof(dhdpcie_smmu_info_t)); |
---|
| 2013 | + dhdpcie_info->smmu_cxt = (void *)dhdpcie_smmu_info; |
---|
| 2014 | + |
---|
| 2015 | + /* Initialize smmu structure */ |
---|
| 2016 | + if (dhdpcie_smmu_init(pdev, dhdpcie_info->smmu_cxt) < 0) { |
---|
| 2017 | + DHD_ERROR(("%s: Failed to initialize SMMU\n", |
---|
| 2018 | + __FUNCTION__)); |
---|
| 2019 | + break; |
---|
| 2020 | + } |
---|
| 2021 | +#endif /* USE_SMMU_ARCH_MSM */ |
---|
| 2022 | + |
---|
| 2023 | +#ifdef DHD_WAKE_STATUS |
---|
| 2024 | + /* Initialize pcie_lock */ |
---|
| 2025 | + spin_lock_init(&dhdpcie_info->pcie_lock); |
---|
| 2026 | +#endif /* DHD_WAKE_STATUS */ |
---|
| 2027 | + |
---|
666 | 2028 | /* Find the PCI resources, verify the */ |
---|
667 | 2029 | /* vendor and device ID, map BAR regions and irq, update in structures */ |
---|
668 | 2030 | if (dhdpcie_scan_resource(dhdpcie_info)) { |
---|
.. | .. |
---|
672 | 2034 | } |
---|
673 | 2035 | |
---|
674 | 2036 | /* Bus initialization */ |
---|
675 | | - bus = dhdpcie_bus_attach(osh, dhdpcie_info->regs, dhdpcie_info->tcm, pdev); |
---|
676 | | - if (!bus) { |
---|
| 2037 | + ret = dhdpcie_bus_attach(osh, &bus, dhdpcie_info->regs, dhdpcie_info->tcm, pdev); |
---|
| 2038 | + if (ret != BCME_OK) { |
---|
677 | 2039 | DHD_ERROR(("%s:dhdpcie_bus_attach() failed\n", __FUNCTION__)); |
---|
678 | 2040 | break; |
---|
679 | 2041 | } |
---|
680 | 2042 | |
---|
681 | 2043 | dhdpcie_info->bus = bus; |
---|
| 2044 | + bus->is_linkdown = 0; |
---|
| 2045 | + bus->no_bus_init = FALSE; |
---|
| 2046 | + bus->cto_triggered = 0; |
---|
682 | 2047 | |
---|
| 2048 | + bus->rc_dev = NULL; |
---|
| 2049 | + |
---|
| 2050 | + /* Get RC Device Handle */ |
---|
| 2051 | + if (bus->dev->bus) { |
---|
| 2052 | + /* self member of structure pci_bus is bridge device as seen by parent */ |
---|
| 2053 | + bus->rc_dev = bus->dev->bus->self; |
---|
| 2054 | + DHD_ERROR(("%s: rc_dev from dev->bus->self (%x:%x) is %pK\n", __FUNCTION__, |
---|
| 2055 | + bus->rc_dev->vendor, bus->rc_dev->device, bus->rc_dev)); |
---|
| 2056 | + } else { |
---|
| 2057 | + DHD_ERROR(("%s: unable to get rc_dev as dev->bus is NULL\n", __FUNCTION__)); |
---|
| 2058 | + } |
---|
| 2059 | + |
---|
| 2060 | + /* if rc_dev is still NULL, try to get from vendor/device IDs */ |
---|
| 2061 | + if (bus->rc_dev == NULL) { |
---|
| 2062 | + bus->rc_dev = pci_get_device(PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, NULL); |
---|
| 2063 | + DHD_ERROR(("%s: rc_dev from pci_get_device (%x:%x) is %p\n", __FUNCTION__, |
---|
| 2064 | + PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, bus->rc_dev)); |
---|
| 2065 | + } |
---|
| 2066 | + |
---|
| 2067 | + bus->rc_ep_aspm_cap = dhd_bus_is_rc_ep_aspm_capable(bus); |
---|
| 2068 | + bus->rc_ep_l1ss_cap = dhd_bus_is_rc_ep_l1ss_capable(bus); |
---|
| 2069 | + DHD_ERROR(("%s: rc_ep_aspm_cap: %d rc_ep_l1ss_cap: %d\n", |
---|
| 2070 | + __FUNCTION__, bus->rc_ep_aspm_cap, bus->rc_ep_l1ss_cap)); |
---|
| 2071 | +#ifdef DHD_DISABLE_ASPM |
---|
| 2072 | + dhd_bus_aspm_enable_rc_ep(bus, FALSE); |
---|
| 2073 | +#endif /* DHD_DISABLE_ASPM */ |
---|
| 2074 | + |
---|
| 2075 | +#ifdef FORCE_TPOWERON |
---|
| 2076 | + if (dhdpcie_chip_req_forced_tpoweron(bus)) { |
---|
| 2077 | + dhd_bus_set_tpoweron(bus, tpoweron_scale); |
---|
| 2078 | + } |
---|
| 2079 | +#endif /* FORCE_TPOWERON */ |
---|
| 2080 | + |
---|
| 2081 | +#if defined(BCMPCIE_OOB_HOST_WAKE) && defined(CUSTOMER_HW2) && \ |
---|
| 2082 | + defined(CONFIG_ARCH_APQ8084) |
---|
| 2083 | + brcm_pcie_wake.wake_irq = wlan_oob_irq; |
---|
| 2084 | + brcm_pcie_wake.data = bus; |
---|
| 2085 | +#endif /* BCMPCIE_OOB_HOST_WAKE && CUSTOMR_HW2 && CONFIG_ARCH_APQ8084 */ |
---|
| 2086 | + |
---|
| 2087 | +#ifdef DONGLE_ENABLE_ISOLATION |
---|
| 2088 | + bus->dhd->dongle_isolation = TRUE; |
---|
| 2089 | +#endif /* DONGLE_ENABLE_ISOLATION */ |
---|
683 | 2090 | #ifdef SUPPORT_LINKDOWN_RECOVERY |
---|
684 | 2091 | #ifdef CONFIG_ARCH_MSM |
---|
685 | 2092 | bus->pcie_event.events = MSM_PCIE_EVENT_LINKDOWN; |
---|
.. | .. |
---|
688 | 2095 | bus->pcie_event.callback = dhdpcie_linkdown_cb; |
---|
689 | 2096 | bus->pcie_event.options = MSM_PCIE_CONFIG_NO_RECOVERY; |
---|
690 | 2097 | msm_pcie_register_event(&bus->pcie_event); |
---|
691 | | - bus->islinkdown = 0; |
---|
| 2098 | + bus->no_cfg_restore = FALSE; |
---|
692 | 2099 | #endif /* CONFIG_ARCH_MSM */ |
---|
| 2100 | +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY |
---|
| 2101 | +#if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) || \ |
---|
| 2102 | + defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) |
---|
| 2103 | + bus->pcie_event.events = EXYNOS_PCIE_EVENT_LINKDOWN; |
---|
| 2104 | + bus->pcie_event.user = pdev; |
---|
| 2105 | + bus->pcie_event.mode = EXYNOS_PCIE_TRIGGER_CALLBACK; |
---|
| 2106 | + bus->pcie_event.callback = dhdpcie_linkdown_cb; |
---|
| 2107 | + exynos_pcie_register_event(&bus->pcie_event); |
---|
| 2108 | +#endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 || |
---|
| 2109 | + * CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 |
---|
| 2110 | + */ |
---|
| 2111 | +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ |
---|
| 2112 | + bus->read_shm_fail = FALSE; |
---|
693 | 2113 | #endif /* SUPPORT_LINKDOWN_RECOVERY */ |
---|
694 | 2114 | |
---|
695 | 2115 | if (bus->intr) { |
---|
.. | .. |
---|
735 | 2155 | |
---|
736 | 2156 | /* Attach to the OS network interface */ |
---|
737 | 2157 | DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__)); |
---|
738 | | - if (dhd_register_if(bus->dhd, 0, TRUE)) { |
---|
| 2158 | + if (dhd_attach_net(bus->dhd, TRUE)) { |
---|
739 | 2159 | DHD_ERROR(("%s(): ERROR.. dhd_register_if() failed\n", __FUNCTION__)); |
---|
740 | 2160 | break; |
---|
741 | 2161 | } |
---|
| 2162 | +#ifdef WL_VIF_SUPPORT |
---|
| 2163 | + /* Attach to the virtual interface */ |
---|
| 2164 | + DHD_TRACE(("%s(): Calling dhd_register_vif() \n", __FUNCTION__)); |
---|
| 2165 | + if (dhd_register_vif(bus->dhd) != 0) { |
---|
| 2166 | + DHD_ERROR(("%s(): ERROR.. dhd_register_vif() failed\n", __FUNCTION__)); |
---|
| 2167 | + } |
---|
| 2168 | +#endif // endif |
---|
742 | 2169 | |
---|
743 | 2170 | dhdpcie_init_succeeded = TRUE; |
---|
| 2171 | + |
---|
| 2172 | +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM |
---|
| 2173 | + pm_runtime_set_autosuspend_delay(&pdev->dev, AUTO_SUSPEND_TIMEOUT); |
---|
| 2174 | + pm_runtime_use_autosuspend(&pdev->dev); |
---|
| 2175 | + atomic_set(&bus->dhd->block_bus, FALSE); |
---|
| 2176 | +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ |
---|
744 | 2177 | |
---|
745 | 2178 | DHD_TRACE(("%s:Exit - SUCCESS \n", __FUNCTION__)); |
---|
746 | 2179 | return 0; /* return SUCCESS */ |
---|
.. | .. |
---|
756 | 2189 | MFREE(osh, dhdpcie_osinfo, sizeof(dhdpcie_os_info_t)); |
---|
757 | 2190 | } |
---|
758 | 2191 | #endif /* BCMPCIE_OOB_HOST_WAKE */ |
---|
| 2192 | + |
---|
| 2193 | +#ifdef USE_SMMU_ARCH_MSM |
---|
| 2194 | + if (dhdpcie_smmu_info) { |
---|
| 2195 | + MFREE(osh, dhdpcie_smmu_info, sizeof(dhdpcie_smmu_info_t)); |
---|
| 2196 | + dhdpcie_info->smmu_cxt = NULL; |
---|
| 2197 | + } |
---|
| 2198 | +#endif /* USE_SMMU_ARCH_MSM */ |
---|
759 | 2199 | |
---|
760 | 2200 | if (dhdpcie_info) |
---|
761 | 2201 | dhdpcie_detach(dhdpcie_info); |
---|
.. | .. |
---|
779 | 2219 | DHD_TRACE(("%s: freeing up the IRQ\n", __FUNCTION__)); |
---|
780 | 2220 | if (bus) { |
---|
781 | 2221 | pdev = bus->dev; |
---|
782 | | - free_irq(pdev->irq, bus); |
---|
| 2222 | + if (bus->irq_registered) { |
---|
| 2223 | + free_irq(pdev->irq, bus); |
---|
| 2224 | + bus->irq_registered = FALSE; |
---|
| 2225 | + if (bus->d2h_intr_method == PCIE_MSI) { |
---|
| 2226 | + dhdpcie_disable_msi(pdev); |
---|
| 2227 | + } |
---|
| 2228 | + } else { |
---|
| 2229 | + DHD_ERROR(("%s: PCIe IRQ is not registered\n", __FUNCTION__)); |
---|
| 2230 | + } |
---|
783 | 2231 | } |
---|
784 | 2232 | DHD_TRACE(("%s: Exit\n", __FUNCTION__)); |
---|
785 | 2233 | return; |
---|
.. | .. |
---|
803 | 2251 | disable interrupt and queue DPC if mail box interrupts are raised. |
---|
804 | 2252 | */ |
---|
805 | 2253 | |
---|
806 | | - |
---|
807 | 2254 | irqreturn_t |
---|
808 | 2255 | dhdpcie_isr(int irq, void *arg) |
---|
809 | 2256 | { |
---|
810 | 2257 | dhd_bus_t *bus = (dhd_bus_t*)arg; |
---|
811 | | - if (dhdpcie_bus_isr(bus)) |
---|
812 | | - return TRUE; |
---|
813 | | - else |
---|
814 | | - return FALSE; |
---|
| 2258 | + bus->isr_entry_time = OSL_LOCALTIME_NS(); |
---|
| 2259 | + if (!dhdpcie_bus_isr(bus)) { |
---|
| 2260 | + DHD_LOG_MEM(("%s: dhdpcie_bus_isr returns with FALSE\n", __FUNCTION__)); |
---|
| 2261 | + } |
---|
| 2262 | + bus->isr_exit_time = OSL_LOCALTIME_NS(); |
---|
| 2263 | + return IRQ_HANDLED; |
---|
| 2264 | +} |
---|
| 2265 | + |
---|
| 2266 | +int |
---|
| 2267 | +dhdpcie_disable_irq_nosync(dhd_bus_t *bus) |
---|
| 2268 | +{ |
---|
| 2269 | + struct pci_dev *dev; |
---|
| 2270 | + if ((bus == NULL) || (bus->dev == NULL)) { |
---|
| 2271 | + DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__)); |
---|
| 2272 | + return BCME_ERROR; |
---|
| 2273 | + } |
---|
| 2274 | + |
---|
| 2275 | + dev = bus->dev; |
---|
| 2276 | + disable_irq_nosync(dev->irq); |
---|
| 2277 | + return BCME_OK; |
---|
| 2278 | +} |
---|
| 2279 | + |
---|
| 2280 | +int |
---|
| 2281 | +dhdpcie_disable_irq(dhd_bus_t *bus) |
---|
| 2282 | +{ |
---|
| 2283 | + struct pci_dev *dev; |
---|
| 2284 | + if ((bus == NULL) || (bus->dev == NULL)) { |
---|
| 2285 | + DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__)); |
---|
| 2286 | + return BCME_ERROR; |
---|
| 2287 | + } |
---|
| 2288 | + |
---|
| 2289 | + dev = bus->dev; |
---|
| 2290 | + disable_irq(dev->irq); |
---|
| 2291 | + return BCME_OK; |
---|
| 2292 | +} |
---|
| 2293 | + |
---|
| 2294 | +int |
---|
| 2295 | +dhdpcie_enable_irq(dhd_bus_t *bus) |
---|
| 2296 | +{ |
---|
| 2297 | + struct pci_dev *dev; |
---|
| 2298 | + if ((bus == NULL) || (bus->dev == NULL)) { |
---|
| 2299 | + DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__)); |
---|
| 2300 | + return BCME_ERROR; |
---|
| 2301 | + } |
---|
| 2302 | + |
---|
| 2303 | + dev = bus->dev; |
---|
| 2304 | + enable_irq(dev->irq); |
---|
| 2305 | + return BCME_OK; |
---|
| 2306 | +} |
---|
| 2307 | + |
---|
| 2308 | +int |
---|
| 2309 | +dhdpcie_irq_disabled(dhd_bus_t *bus) |
---|
| 2310 | +{ |
---|
| 2311 | + struct irq_desc *desc = irq_to_desc(bus->dev->irq); |
---|
| 2312 | + /* depth will be zero, if enabled */ |
---|
| 2313 | + return desc->depth; |
---|
815 | 2314 | } |
---|
816 | 2315 | |
---|
817 | 2316 | int |
---|
.. | .. |
---|
835 | 2334 | |
---|
836 | 2335 | #ifdef CONFIG_ARCH_MSM |
---|
837 | 2336 | #ifdef SUPPORT_LINKDOWN_RECOVERY |
---|
838 | | - if (bus->islinkdown) { |
---|
| 2337 | + if (bus->no_cfg_restore) { |
---|
839 | 2338 | options = MSM_PCIE_CONFIG_NO_CFG_RESTORE; |
---|
840 | 2339 | } |
---|
841 | 2340 | ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number, |
---|
842 | 2341 | bus->dev, NULL, options); |
---|
843 | | - if (bus->islinkdown && !ret) { |
---|
| 2342 | + if (bus->no_cfg_restore && !ret) { |
---|
| 2343 | +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) |
---|
844 | 2344 | msm_pcie_recover_config(bus->dev); |
---|
845 | | - if (bus->dhd) { |
---|
846 | | - DHD_OS_WAKE_UNLOCK(bus->dhd); |
---|
847 | | - } |
---|
| 2345 | +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0) */ |
---|
| 2346 | + bus->no_cfg_restore = 0; |
---|
848 | 2347 | } |
---|
849 | 2348 | #else |
---|
850 | 2349 | ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number, |
---|
.. | .. |
---|
883 | 2382 | |
---|
884 | 2383 | #ifdef CONFIG_ARCH_MSM |
---|
885 | 2384 | #ifdef SUPPORT_LINKDOWN_RECOVERY |
---|
886 | | - if (bus->islinkdown) { |
---|
| 2385 | + if (bus->no_cfg_restore) { |
---|
887 | 2386 | options = MSM_PCIE_CONFIG_NO_CFG_RESTORE | MSM_PCIE_CONFIG_LINKDOWN; |
---|
888 | 2387 | } |
---|
889 | 2388 | |
---|
.. | .. |
---|
906 | 2405 | int |
---|
907 | 2406 | dhdpcie_disable_device(dhd_bus_t *bus) |
---|
908 | 2407 | { |
---|
909 | | - int islinkdown; |
---|
910 | | - dhdpcie_info_t *pch; |
---|
911 | | - |
---|
912 | 2408 | DHD_TRACE(("%s Enter:\n", __FUNCTION__)); |
---|
913 | 2409 | |
---|
914 | 2410 | if (bus == NULL) { |
---|
.. | .. |
---|
919 | 2415 | return BCME_ERROR; |
---|
920 | 2416 | } |
---|
921 | 2417 | |
---|
922 | | - pch = pci_get_drvdata(bus->dev); |
---|
923 | | - if (pch == NULL) { |
---|
924 | | - return BCME_ERROR; |
---|
925 | | - } |
---|
926 | | -#if defined(SUPPORT_LINKDOWN_RECOVERY) && defined(CONFIG_ARCH_MSM) |
---|
927 | | - islinkdown = bus->islinkdown; |
---|
928 | | -#else |
---|
929 | | - islinkdown = 0; |
---|
930 | | -#endif /* SUPPORT_LINKDOWN_RECOVERY && CONFIG_ARCH_MSM */ |
---|
931 | | - |
---|
932 | | - /* Backup PCIe configuration so as to use Wi-Fi on/off process |
---|
933 | | - * in case of built in driver |
---|
934 | | - */ |
---|
935 | | - if (!islinkdown) { |
---|
936 | | - pci_save_state(bus->dev); |
---|
937 | | - pch->default_state = pci_store_saved_state(bus->dev); |
---|
938 | | - |
---|
939 | | - if (pch->default_state == NULL) { |
---|
940 | | - DHD_ERROR(("%s pci_store_saved_state returns NULL\n", __FUNCTION__)); |
---|
941 | | - } |
---|
942 | | - } |
---|
943 | | - |
---|
944 | | - pci_disable_device(bus->dev); |
---|
| 2418 | + if (pci_is_enabled(bus->dev)) |
---|
| 2419 | + pci_disable_device(bus->dev); |
---|
945 | 2420 | |
---|
946 | 2421 | return 0; |
---|
947 | 2422 | } |
---|
.. | .. |
---|
952 | 2427 | int ret = BCME_ERROR; |
---|
953 | 2428 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) |
---|
954 | 2429 | dhdpcie_info_t *pch; |
---|
955 | | - int islinkdown; |
---|
956 | 2430 | #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ |
---|
957 | 2431 | |
---|
958 | 2432 | DHD_TRACE(("%s Enter:\n", __FUNCTION__)); |
---|
.. | .. |
---|
971 | 2445 | return BCME_ERROR; |
---|
972 | 2446 | } |
---|
973 | 2447 | |
---|
974 | | -#if defined(CONFIG_ARCH_MSM) && defined(SUPPORT_LINKDOWN_RECOVERY) |
---|
975 | | - islinkdown = bus->islinkdown; |
---|
| 2448 | +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) && (LINUX_VERSION_CODE < \ |
---|
| 2449 | + KERNEL_VERSION(3, 19, 0)) && !defined(CONFIG_SOC_EXYNOS8890) |
---|
| 2450 | + /* Updated with pci_load_and_free_saved_state to compatible |
---|
| 2451 | + * with Kernel version 3.14.0 to 3.18.41. |
---|
| 2452 | + */ |
---|
| 2453 | + pci_load_and_free_saved_state(bus->dev, &pch->default_state); |
---|
| 2454 | + pch->default_state = pci_store_saved_state(bus->dev); |
---|
976 | 2455 | #else |
---|
977 | | - islinkdown = 0; |
---|
978 | | -#endif /* CONFIG_ARCH_MSM && SUPPORT_LINKDOWN_RECOVERY */ |
---|
| 2456 | + pci_load_saved_state(bus->dev, pch->default_state); |
---|
| 2457 | +#endif /* LINUX_VERSION >= 3.14.0 && LINUX_VERSION < 3.19.0 && !CONFIG_SOC_EXYNOS8890 */ |
---|
979 | 2458 | |
---|
980 | | - if (!islinkdown) { |
---|
981 | | -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) |
---|
982 | | - /* Updated with pci_load_and_free_saved_state to compatible |
---|
983 | | - * with kernel 3.14 or higher |
---|
984 | | - */ |
---|
985 | | - pci_load_and_free_saved_state(bus->dev, &pch->default_state); |
---|
986 | | -#else |
---|
987 | | - pci_load_saved_state(bus->dev, pch->default_state); |
---|
| 2459 | + /* Check if Device ID is valid */ |
---|
| 2460 | + if (bus->dev->state_saved) { |
---|
| 2461 | + uint32 vid, saved_vid; |
---|
| 2462 | + pci_read_config_dword(bus->dev, PCI_CFG_VID, &vid); |
---|
| 2463 | + saved_vid = bus->dev->saved_config_space[PCI_CFG_VID]; |
---|
| 2464 | + if (vid != saved_vid) { |
---|
| 2465 | + DHD_ERROR(("%s: VID(0x%x) is different from saved VID(0x%x) " |
---|
| 2466 | + "Skip the bus init\n", __FUNCTION__, vid, saved_vid)); |
---|
| 2467 | + bus->no_bus_init = TRUE; |
---|
| 2468 | + /* Check if the PCIe link is down */ |
---|
| 2469 | + if (vid == (uint32)-1) { |
---|
| 2470 | + bus->is_linkdown = 1; |
---|
| 2471 | +#ifdef SUPPORT_LINKDOWN_RECOVERY |
---|
| 2472 | +#ifdef CONFIG_ARCH_MSM |
---|
| 2473 | + bus->no_cfg_restore = TRUE; |
---|
988 | 2474 | #endif /* CONFIG_ARCH_MSM */ |
---|
989 | | - |
---|
990 | | - pci_restore_state(bus->dev); |
---|
| 2475 | +#endif /* SUPPORT_LINKDOWN_RECOVERY */ |
---|
| 2476 | + } |
---|
| 2477 | + return BCME_ERROR; |
---|
| 2478 | + } |
---|
991 | 2479 | } |
---|
| 2480 | + |
---|
| 2481 | + pci_restore_state(bus->dev); |
---|
992 | 2482 | #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) */ |
---|
993 | 2483 | |
---|
994 | 2484 | ret = pci_enable_device(bus->dev); |
---|
.. | .. |
---|
996 | 2486 | pci_disable_device(bus->dev); |
---|
997 | 2487 | } else { |
---|
998 | 2488 | pci_set_master(bus->dev); |
---|
999 | | -#if defined(CONFIG_ARCH_MSM) && defined(SUPPORT_LINKDOWN_RECOVERY) |
---|
1000 | | - bus->islinkdown = 0; |
---|
1001 | | -#endif /* CONFIG_ARCH_MSM && SUPPORT_LINKDOWN_RECOVERY */ |
---|
1002 | 2489 | } |
---|
1003 | 2490 | |
---|
1004 | 2491 | return ret; |
---|
.. | .. |
---|
1048 | 2535 | } |
---|
1049 | 2536 | |
---|
1050 | 2537 | bus->regs = dhdpcie_info->regs; |
---|
1051 | | - dhdpcie_info->tcm_size = |
---|
| 2538 | + dhdpcie_info->bar1_size = |
---|
1052 | 2539 | (bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE; |
---|
1053 | | - dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size); |
---|
| 2540 | + dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->bar1_size); |
---|
1054 | 2541 | if (!dhdpcie_info->tcm) { |
---|
1055 | 2542 | DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__)); |
---|
1056 | 2543 | REG_UNMAP(dhdpcie_info->regs); |
---|
.. | .. |
---|
1141 | 2628 | } |
---|
1142 | 2629 | |
---|
1143 | 2630 | #ifdef BCMPCIE_OOB_HOST_WAKE |
---|
| 2631 | +#ifdef CONFIG_BCMDHD_GET_OOB_STATE |
---|
| 2632 | +extern int dhd_get_wlan_oob_gpio(void); |
---|
| 2633 | +#endif /* CONFIG_BCMDHD_GET_OOB_STATE */ |
---|
| 2634 | + |
---|
| 2635 | +int dhdpcie_get_oob_irq_level(void) |
---|
| 2636 | +{ |
---|
| 2637 | + int gpio_level; |
---|
| 2638 | + |
---|
| 2639 | +#ifdef CONFIG_BCMDHD_GET_OOB_STATE |
---|
| 2640 | + gpio_level = dhd_get_wlan_oob_gpio(); |
---|
| 2641 | +#else |
---|
| 2642 | + gpio_level = BCME_UNSUPPORTED; |
---|
| 2643 | +#endif /* CONFIG_BCMDHD_GET_OOB_STATE */ |
---|
| 2644 | + return gpio_level; |
---|
| 2645 | +} |
---|
| 2646 | + |
---|
| 2647 | +int dhdpcie_get_oob_irq_status(struct dhd_bus *bus) |
---|
| 2648 | +{ |
---|
| 2649 | + dhdpcie_info_t *pch; |
---|
| 2650 | + dhdpcie_os_info_t *dhdpcie_osinfo; |
---|
| 2651 | + |
---|
| 2652 | + if (bus == NULL) { |
---|
| 2653 | + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); |
---|
| 2654 | + return 0; |
---|
| 2655 | + } |
---|
| 2656 | + |
---|
| 2657 | + if (bus->dev == NULL) { |
---|
| 2658 | + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); |
---|
| 2659 | + return 0; |
---|
| 2660 | + } |
---|
| 2661 | + |
---|
| 2662 | + pch = pci_get_drvdata(bus->dev); |
---|
| 2663 | + if (pch == NULL) { |
---|
| 2664 | + DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__)); |
---|
| 2665 | + return 0; |
---|
| 2666 | + } |
---|
| 2667 | + |
---|
| 2668 | + dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt; |
---|
| 2669 | + |
---|
| 2670 | + return dhdpcie_osinfo ? dhdpcie_osinfo->oob_irq_enabled : 0; |
---|
| 2671 | +} |
---|
| 2672 | + |
---|
| 2673 | +int dhdpcie_get_oob_irq_num(struct dhd_bus *bus) |
---|
| 2674 | +{ |
---|
| 2675 | + dhdpcie_info_t *pch; |
---|
| 2676 | + dhdpcie_os_info_t *dhdpcie_osinfo; |
---|
| 2677 | + |
---|
| 2678 | + if (bus == NULL) { |
---|
| 2679 | + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); |
---|
| 2680 | + return 0; |
---|
| 2681 | + } |
---|
| 2682 | + |
---|
| 2683 | + if (bus->dev == NULL) { |
---|
| 2684 | + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); |
---|
| 2685 | + return 0; |
---|
| 2686 | + } |
---|
| 2687 | + |
---|
| 2688 | + pch = pci_get_drvdata(bus->dev); |
---|
| 2689 | + if (pch == NULL) { |
---|
| 2690 | + DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__)); |
---|
| 2691 | + return 0; |
---|
| 2692 | + } |
---|
| 2693 | + |
---|
| 2694 | + dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt; |
---|
| 2695 | + |
---|
| 2696 | + return dhdpcie_osinfo ? dhdpcie_osinfo->oob_irq_num : 0; |
---|
| 2697 | +} |
---|
| 2698 | + |
---|
1144 | 2699 | void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable) |
---|
1145 | 2700 | { |
---|
1146 | 2701 | unsigned long flags; |
---|
.. | .. |
---|
1169 | 2724 | (dhdpcie_osinfo->oob_irq_num > 0)) { |
---|
1170 | 2725 | if (enable) { |
---|
1171 | 2726 | enable_irq(dhdpcie_osinfo->oob_irq_num); |
---|
| 2727 | + bus->oob_intr_enable_count++; |
---|
| 2728 | + bus->last_oob_irq_enable_time = OSL_LOCALTIME_NS(); |
---|
1172 | 2729 | } else { |
---|
1173 | 2730 | disable_irq_nosync(dhdpcie_osinfo->oob_irq_num); |
---|
| 2731 | + bus->oob_intr_disable_count++; |
---|
| 2732 | + bus->last_oob_irq_disable_time = OSL_LOCALTIME_NS(); |
---|
1174 | 2733 | } |
---|
1175 | 2734 | dhdpcie_osinfo->oob_irq_enabled = enable; |
---|
1176 | 2735 | } |
---|
.. | .. |
---|
1180 | 2739 | static irqreturn_t wlan_oob_irq(int irq, void *data) |
---|
1181 | 2740 | { |
---|
1182 | 2741 | dhd_bus_t *bus; |
---|
| 2742 | + unsigned long flags_bus; |
---|
1183 | 2743 | DHD_TRACE(("%s: IRQ Triggered\n", __FUNCTION__)); |
---|
1184 | 2744 | bus = (dhd_bus_t *)data; |
---|
1185 | 2745 | dhdpcie_oob_intr_set(bus, FALSE); |
---|
1186 | | - if (bus->dhd->up && bus->suspended) { |
---|
| 2746 | + bus->last_oob_irq_time = OSL_LOCALTIME_NS(); |
---|
| 2747 | + bus->oob_intr_count++; |
---|
| 2748 | +#ifdef DHD_WAKE_STATUS |
---|
| 2749 | +#ifdef DHD_PCIE_RUNTIMEPM |
---|
| 2750 | + /* This condition is for avoiding counting of wake up from Runtime PM */ |
---|
| 2751 | + if (bus->chk_pm) |
---|
| 2752 | +#endif /* DHD_PCIE_RUNTIMPM */ |
---|
| 2753 | + { |
---|
| 2754 | + bcmpcie_set_get_wake(bus, 1); |
---|
| 2755 | + } |
---|
| 2756 | +#endif /* DHD_WAKE_STATUS */ |
---|
| 2757 | +#ifdef DHD_PCIE_RUNTIMEPM |
---|
| 2758 | + dhdpcie_runtime_bus_wake(bus->dhd, FALSE, wlan_oob_irq); |
---|
| 2759 | +#endif /* DHD_PCIE_RUNTIMPM */ |
---|
| 2760 | +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM |
---|
| 2761 | + dhd_bus_wakeup_work(bus->dhd); |
---|
| 2762 | +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ |
---|
| 2763 | + DHD_BUS_LOCK(bus->bus_lock, flags_bus); |
---|
| 2764 | + /* Hold wakelock if bus_low_power_state is |
---|
| 2765 | + * DHD_BUS_D3_INFORM_SENT OR DHD_BUS_D3_ACK_RECIEVED |
---|
| 2766 | + */ |
---|
| 2767 | + if (bus->dhd->up && bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) { |
---|
1187 | 2768 | DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(bus->dhd, OOB_WAKE_LOCK_TIMEOUT); |
---|
1188 | 2769 | } |
---|
| 2770 | + DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); |
---|
1189 | 2771 | return IRQ_HANDLED; |
---|
1190 | 2772 | } |
---|
1191 | 2773 | |
---|
.. | .. |
---|
1233 | 2815 | err = enable_irq_wake(dhdpcie_osinfo->oob_irq_num); |
---|
1234 | 2816 | if (!err) { |
---|
1235 | 2817 | dhdpcie_osinfo->oob_irq_wake_enabled = TRUE; |
---|
| 2818 | + } else { |
---|
| 2819 | + /* On Hikey platform enable_irq_wake() is failing with error |
---|
| 2820 | + * ENXIO (No such device or address). This is because the callback function |
---|
| 2821 | + * irq_set_wake() is not registered in kernel, hence returning BCME_OK. |
---|
| 2822 | + */ |
---|
| 2823 | +#ifdef BOARD_HIKEY |
---|
| 2824 | + DHD_ERROR(("%s: continue eventhough enable_irq_wake failed: %d\n", |
---|
| 2825 | + __FUNCTION__, err)); |
---|
| 2826 | + err = BCME_OK; |
---|
| 2827 | +#endif /* BOARD_HIKEY */ |
---|
1236 | 2828 | } |
---|
1237 | 2829 | dhdpcie_osinfo->oob_irq_enabled = TRUE; |
---|
1238 | 2830 | } |
---|
.. | .. |
---|
1286 | 2878 | dhdpcie_osinfo->oob_irq_registered = FALSE; |
---|
1287 | 2879 | } |
---|
1288 | 2880 | #endif /* BCMPCIE_OOB_HOST_WAKE */ |
---|
| 2881 | + |
---|
| 2882 | +#ifdef DHD_PCIE_RUNTIMEPM |
---|
| 2883 | +bool dhd_runtimepm_state(dhd_pub_t *dhd) |
---|
| 2884 | +{ |
---|
| 2885 | + dhd_bus_t *bus; |
---|
| 2886 | + unsigned long flags; |
---|
| 2887 | + bus = dhd->bus; |
---|
| 2888 | + |
---|
| 2889 | + DHD_GENERAL_LOCK(dhd, flags); |
---|
| 2890 | + bus->idlecount++; |
---|
| 2891 | + |
---|
| 2892 | + DHD_TRACE(("%s : Enter \n", __FUNCTION__)); |
---|
| 2893 | + if ((bus->idletime > 0) && (bus->idlecount >= bus->idletime)) { |
---|
| 2894 | + bus->idlecount = 0; |
---|
| 2895 | + if (DHD_BUS_BUSY_CHECK_IDLE(dhd) && !DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd)) { |
---|
| 2896 | + bus->bus_wake = 0; |
---|
| 2897 | + DHD_BUS_BUSY_SET_RPM_SUSPEND_IN_PROGRESS(dhd); |
---|
| 2898 | + bus->runtime_resume_done = FALSE; |
---|
| 2899 | + /* stop all interface network queue. */ |
---|
| 2900 | + dhd_bus_stop_queue(bus); |
---|
| 2901 | + DHD_GENERAL_UNLOCK(dhd, flags); |
---|
| 2902 | + DHD_ERROR(("%s: DHD Idle state!! - idletime :%d, wdtick :%d \n", |
---|
| 2903 | + __FUNCTION__, bus->idletime, dhd_runtimepm_ms)); |
---|
| 2904 | + /* RPM suspend is failed, return FALSE then re-trying */ |
---|
| 2905 | + if (dhdpcie_set_suspend_resume(bus, TRUE)) { |
---|
| 2906 | + DHD_ERROR(("%s: exit with wakelock \n", __FUNCTION__)); |
---|
| 2907 | + DHD_GENERAL_LOCK(dhd, flags); |
---|
| 2908 | + DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhd); |
---|
| 2909 | + dhd_os_busbusy_wake(bus->dhd); |
---|
| 2910 | + bus->runtime_resume_done = TRUE; |
---|
| 2911 | + /* It can make stuck NET TX Queue without below */ |
---|
| 2912 | + dhd_bus_start_queue(bus); |
---|
| 2913 | + DHD_GENERAL_UNLOCK(dhd, flags); |
---|
| 2914 | + smp_wmb(); |
---|
| 2915 | + wake_up_interruptible(&bus->rpm_queue); |
---|
| 2916 | + return FALSE; |
---|
| 2917 | + } |
---|
| 2918 | + |
---|
| 2919 | + DHD_GENERAL_LOCK(dhd, flags); |
---|
| 2920 | + DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhd); |
---|
| 2921 | + DHD_BUS_BUSY_SET_RPM_SUSPEND_DONE(dhd); |
---|
| 2922 | + /* For making sure NET TX Queue active */ |
---|
| 2923 | + dhd_bus_start_queue(bus); |
---|
| 2924 | + DHD_GENERAL_UNLOCK(dhd, flags); |
---|
| 2925 | + |
---|
| 2926 | + wait_event_interruptible(bus->rpm_queue, bus->bus_wake); |
---|
| 2927 | + |
---|
| 2928 | + DHD_GENERAL_LOCK(dhd, flags); |
---|
| 2929 | + DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_DONE(dhd); |
---|
| 2930 | + DHD_BUS_BUSY_SET_RPM_RESUME_IN_PROGRESS(dhd); |
---|
| 2931 | + DHD_GENERAL_UNLOCK(dhd, flags); |
---|
| 2932 | + |
---|
| 2933 | + dhdpcie_set_suspend_resume(bus, FALSE); |
---|
| 2934 | + |
---|
| 2935 | + DHD_GENERAL_LOCK(dhd, flags); |
---|
| 2936 | + DHD_BUS_BUSY_CLEAR_RPM_RESUME_IN_PROGRESS(dhd); |
---|
| 2937 | + dhd_os_busbusy_wake(bus->dhd); |
---|
| 2938 | + /* Inform the wake up context that Resume is over */ |
---|
| 2939 | + bus->runtime_resume_done = TRUE; |
---|
| 2940 | + /* For making sure NET TX Queue active */ |
---|
| 2941 | + dhd_bus_start_queue(bus); |
---|
| 2942 | + DHD_GENERAL_UNLOCK(dhd, flags); |
---|
| 2943 | + |
---|
| 2944 | + smp_wmb(); |
---|
| 2945 | + wake_up_interruptible(&bus->rpm_queue); |
---|
| 2946 | + DHD_ERROR(("%s : runtime resume ended \n", __FUNCTION__)); |
---|
| 2947 | + return TRUE; |
---|
| 2948 | + } else { |
---|
| 2949 | + DHD_GENERAL_UNLOCK(dhd, flags); |
---|
| 2950 | + /* Since one of the contexts are busy (TX, IOVAR or RX) |
---|
| 2951 | + * we should not suspend |
---|
| 2952 | + */ |
---|
| 2953 | + DHD_ERROR(("%s : bus is active with dhd_bus_busy_state = 0x%x\n", |
---|
| 2954 | + __FUNCTION__, dhd->dhd_bus_busy_state)); |
---|
| 2955 | + return FALSE; |
---|
| 2956 | + } |
---|
| 2957 | + } |
---|
| 2958 | + |
---|
| 2959 | + DHD_GENERAL_UNLOCK(dhd, flags); |
---|
| 2960 | + return FALSE; |
---|
| 2961 | +} /* dhd_runtimepm_state */ |
---|
| 2962 | + |
---|
| 2963 | +/* |
---|
| 2964 | + * dhd_runtime_bus_wake |
---|
| 2965 | + * TRUE - related with runtime pm context |
---|
| 2966 | + * FALSE - It isn't invloved in runtime pm context |
---|
| 2967 | + */ |
---|
| 2968 | +bool dhd_runtime_bus_wake(dhd_bus_t *bus, bool wait, void *func_addr) |
---|
| 2969 | +{ |
---|
| 2970 | + unsigned long flags; |
---|
| 2971 | + bus->idlecount = 0; |
---|
| 2972 | + DHD_TRACE(("%s : enter\n", __FUNCTION__)); |
---|
| 2973 | + if (bus->dhd->up == FALSE) { |
---|
| 2974 | + DHD_INFO(("%s : dhd is not up\n", __FUNCTION__)); |
---|
| 2975 | + return FALSE; |
---|
| 2976 | + } |
---|
| 2977 | + |
---|
| 2978 | + DHD_GENERAL_LOCK(bus->dhd, flags); |
---|
| 2979 | + if (DHD_BUS_BUSY_CHECK_RPM_ALL(bus->dhd)) { |
---|
| 2980 | + /* Wake up RPM state thread if it is suspend in progress or suspended */ |
---|
| 2981 | + if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(bus->dhd) || |
---|
| 2982 | + DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus->dhd)) { |
---|
| 2983 | + bus->bus_wake = 1; |
---|
| 2984 | + |
---|
| 2985 | + DHD_GENERAL_UNLOCK(bus->dhd, flags); |
---|
| 2986 | + |
---|
| 2987 | + DHD_ERROR(("Runtime Resume is called in %pf\n", func_addr)); |
---|
| 2988 | + smp_wmb(); |
---|
| 2989 | + wake_up_interruptible(&bus->rpm_queue); |
---|
| 2990 | + /* No need to wake up the RPM state thread */ |
---|
| 2991 | + } else if (DHD_BUS_BUSY_CHECK_RPM_RESUME_IN_PROGRESS(bus->dhd)) { |
---|
| 2992 | + DHD_GENERAL_UNLOCK(bus->dhd, flags); |
---|
| 2993 | + } |
---|
| 2994 | + |
---|
| 2995 | + /* If wait is TRUE, function with wait = TRUE will be wait in here */ |
---|
| 2996 | + if (wait) { |
---|
| 2997 | + wait_event_interruptible(bus->rpm_queue, bus->runtime_resume_done); |
---|
| 2998 | + } else { |
---|
| 2999 | + DHD_INFO(("%s: bus wakeup but no wait until resume done\n", __FUNCTION__)); |
---|
| 3000 | + } |
---|
| 3001 | + /* If it is called from RPM context, it returns TRUE */ |
---|
| 3002 | + return TRUE; |
---|
| 3003 | + } |
---|
| 3004 | + |
---|
| 3005 | + DHD_GENERAL_UNLOCK(bus->dhd, flags); |
---|
| 3006 | + |
---|
| 3007 | + return FALSE; |
---|
| 3008 | +} |
---|
| 3009 | + |
---|
| 3010 | +bool dhdpcie_runtime_bus_wake(dhd_pub_t *dhdp, bool wait, void* func_addr) |
---|
| 3011 | +{ |
---|
| 3012 | + dhd_bus_t *bus = dhdp->bus; |
---|
| 3013 | + return dhd_runtime_bus_wake(bus, wait, func_addr); |
---|
| 3014 | +} |
---|
| 3015 | + |
---|
| 3016 | +void dhdpcie_block_runtime_pm(dhd_pub_t *dhdp) |
---|
| 3017 | +{ |
---|
| 3018 | + dhd_bus_t *bus = dhdp->bus; |
---|
| 3019 | + bus->idletime = 0; |
---|
| 3020 | +} |
---|
| 3021 | + |
---|
| 3022 | +bool dhdpcie_is_resume_done(dhd_pub_t *dhdp) |
---|
| 3023 | +{ |
---|
| 3024 | + dhd_bus_t *bus = dhdp->bus; |
---|
| 3025 | + return bus->runtime_resume_done; |
---|
| 3026 | +} |
---|
| 3027 | +#endif /* DHD_PCIE_RUNTIMEPM */ |
---|
| 3028 | + |
---|
| 3029 | +struct device * dhd_bus_to_dev(dhd_bus_t *bus) |
---|
| 3030 | +{ |
---|
| 3031 | + struct pci_dev *pdev; |
---|
| 3032 | + pdev = bus->dev; |
---|
| 3033 | + |
---|
| 3034 | + if (pdev) |
---|
| 3035 | + return &pdev->dev; |
---|
| 3036 | + else |
---|
| 3037 | + return NULL; |
---|
| 3038 | +} |
---|
| 3039 | + |
---|
| 3040 | +#define KIRQ_PRINT_BUF_LEN 256 |
---|
| 3041 | + |
---|
| 3042 | +void |
---|
| 3043 | +dhd_print_kirqstats(dhd_pub_t *dhd, unsigned int irq_num) |
---|
| 3044 | +{ |
---|
| 3045 | + unsigned long flags = 0; |
---|
| 3046 | + struct irq_desc *desc; |
---|
| 3047 | + int i; /* cpu iterator */ |
---|
| 3048 | + struct bcmstrbuf strbuf; |
---|
| 3049 | + char tmp_buf[KIRQ_PRINT_BUF_LEN]; |
---|
| 3050 | + |
---|
| 3051 | + desc = irq_to_desc(irq_num); |
---|
| 3052 | + if (!desc) { |
---|
| 3053 | + DHD_ERROR(("%s : irqdesc is not found \n", __FUNCTION__)); |
---|
| 3054 | + return; |
---|
| 3055 | + } |
---|
| 3056 | + bcm_binit(&strbuf, tmp_buf, KIRQ_PRINT_BUF_LEN); |
---|
| 3057 | + raw_spin_lock_irqsave(&desc->lock, flags); |
---|
| 3058 | + bcm_bprintf(&strbuf, "dhd irq %u:", irq_num); |
---|
| 3059 | + for_each_online_cpu(i) |
---|
| 3060 | + bcm_bprintf(&strbuf, "%10u ", |
---|
| 3061 | + desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, i) : 0); |
---|
| 3062 | + if (desc->irq_data.chip) { |
---|
| 3063 | + if (desc->irq_data.chip->name) |
---|
| 3064 | + bcm_bprintf(&strbuf, " %8s", desc->irq_data.chip->name); |
---|
| 3065 | + else |
---|
| 3066 | + bcm_bprintf(&strbuf, " %8s", "-"); |
---|
| 3067 | + } else { |
---|
| 3068 | + bcm_bprintf(&strbuf, " %8s", "None"); |
---|
| 3069 | + } |
---|
| 3070 | +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)) |
---|
| 3071 | + if (desc->irq_data.domain) |
---|
| 3072 | + bcm_bprintf(&strbuf, " %d", (int)desc->irq_data.hwirq); |
---|
| 3073 | +#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL |
---|
| 3074 | + bcm_bprintf(&strbuf, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge"); |
---|
| 3075 | +#endif // endif |
---|
| 3076 | +#endif /* LINUX VERSION > 3.1.0 */ |
---|
| 3077 | + |
---|
| 3078 | + if (desc->name) |
---|
| 3079 | + bcm_bprintf(&strbuf, "-%-8s", desc->name); |
---|
| 3080 | + |
---|
| 3081 | + DHD_ERROR(("%s\n", strbuf.origbuf)); |
---|
| 3082 | + raw_spin_unlock_irqrestore(&desc->lock, flags); |
---|
| 3083 | +} |
---|
| 3084 | + |
---|
| 3085 | +void |
---|
| 3086 | +dhd_show_kirqstats(dhd_pub_t *dhd) |
---|
| 3087 | +{ |
---|
| 3088 | + unsigned int irq = -1; |
---|
| 3089 | +#ifdef BCMPCIE |
---|
| 3090 | + dhdpcie_get_pcieirq(dhd->bus, &irq); |
---|
| 3091 | +#endif /* BCMPCIE */ |
---|
| 3092 | +#ifdef BCMSDIO |
---|
| 3093 | + irq = ((wifi_adapter_info_t *)dhd->info->adapter)->irq_num; |
---|
| 3094 | +#endif /* BCMSDIO */ |
---|
| 3095 | + if (irq != -1) { |
---|
| 3096 | +#ifdef BCMPCIE |
---|
| 3097 | + DHD_ERROR(("DUMP data kernel irq stats : \n")); |
---|
| 3098 | +#endif /* BCMPCIE */ |
---|
| 3099 | +#ifdef BCMSDIO |
---|
| 3100 | + DHD_ERROR(("DUMP data/host wakeup kernel irq stats : \n")); |
---|
| 3101 | +#endif /* BCMSDIO */ |
---|
| 3102 | + dhd_print_kirqstats(dhd, irq); |
---|
| 3103 | + } |
---|
| 3104 | +#ifdef BCMPCIE_OOB_HOST_WAKE |
---|
| 3105 | + irq = dhdpcie_get_oob_irq_num(dhd->bus); |
---|
| 3106 | + if (irq) { |
---|
| 3107 | + DHD_ERROR(("DUMP PCIE host wakeup kernel irq stats : \n")); |
---|
| 3108 | + dhd_print_kirqstats(dhd, irq); |
---|
| 3109 | + } |
---|
| 3110 | +#endif /* BCMPCIE_OOB_HOST_WAKE */ |
---|
| 3111 | +} |
---|
| 3112 | + |
---|
| 3113 | +#ifdef DHD_FW_COREDUMP |
---|
| 3114 | +#ifdef BCMDHDX |
---|
| 3115 | +int |
---|
| 3116 | +dhdx_dongle_mem_dump() |
---|
| 3117 | +{ |
---|
| 3118 | + if (!g_dhd_bus) { |
---|
| 3119 | + DHD_ERROR(("%s: Bus is NULL\n", __FUNCTION__)); |
---|
| 3120 | + return -ENODEV; |
---|
| 3121 | + } |
---|
| 3122 | + |
---|
| 3123 | + dhd_bus_dump_console_buffer(g_dhd_bus); |
---|
| 3124 | + dhd_prot_debug_info_print(g_dhd_bus->dhd); |
---|
| 3125 | + |
---|
| 3126 | + g_dhd_bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON; |
---|
| 3127 | + g_dhd_bus->dhd->memdump_type = DUMP_TYPE_AP_ABNORMAL_ACCESS; |
---|
| 3128 | + |
---|
| 3129 | +#ifdef DHD_PCIE_RUNTIMEPM |
---|
| 3130 | + dhdpcie_runtime_bus_wake(g_dhd_bus->dhd, TRUE, __builtin_return_address(0)); |
---|
| 3131 | +#endif /* DHD_PCIE_RUNTIMEPM */ |
---|
| 3132 | + |
---|
| 3133 | + dhd_bus_mem_dump(g_dhd_bus->dhd); |
---|
| 3134 | + return 0; |
---|
| 3135 | +} |
---|
| 3136 | +EXPORT_SYMBOL(dhdx_dongle_mem_dump); |
---|
| 3137 | +#else |
---|
| 3138 | +int |
---|
| 3139 | +dhd_dongle_mem_dump(void) |
---|
| 3140 | +{ |
---|
| 3141 | + if (!g_dhd_bus) { |
---|
| 3142 | + DHD_ERROR(("%s: Bus is NULL\n", __FUNCTION__)); |
---|
| 3143 | + return -ENODEV; |
---|
| 3144 | + } |
---|
| 3145 | + |
---|
| 3146 | + dhd_bus_dump_console_buffer(g_dhd_bus); |
---|
| 3147 | + dhd_prot_debug_info_print(g_dhd_bus->dhd); |
---|
| 3148 | + |
---|
| 3149 | + g_dhd_bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON; |
---|
| 3150 | + g_dhd_bus->dhd->memdump_type = DUMP_TYPE_AP_ABNORMAL_ACCESS; |
---|
| 3151 | + |
---|
| 3152 | +#ifdef DHD_PCIE_RUNTIMEPM |
---|
| 3153 | + dhdpcie_runtime_bus_wake(g_dhd_bus->dhd, TRUE, __builtin_return_address(0)); |
---|
| 3154 | +#endif /* DHD_PCIE_RUNTIMEPM */ |
---|
| 3155 | + |
---|
| 3156 | + dhd_bus_mem_dump(g_dhd_bus->dhd); |
---|
| 3157 | + return 0; |
---|
| 3158 | +} |
---|
| 3159 | +EXPORT_SYMBOL(dhd_dongle_mem_dump); |
---|
| 3160 | +#endif /* BCMDHDX */ |
---|
| 3161 | +#endif /* DHD_FW_COREDUMP */ |
---|
| 3162 | + |
---|
| 3163 | +#ifdef BCMDHDX |
---|
| 3164 | +bool |
---|
| 3165 | +dhdx_bus_check_driver_up(void) |
---|
| 3166 | +{ |
---|
| 3167 | + dhd_bus_t *bus; |
---|
| 3168 | + dhd_pub_t *dhdp; |
---|
| 3169 | + bool isup = FALSE; |
---|
| 3170 | + |
---|
| 3171 | + bus = (dhd_bus_t *)g_dhd_bus; |
---|
| 3172 | + if (!bus) { |
---|
| 3173 | + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); |
---|
| 3174 | + return isup; |
---|
| 3175 | + } |
---|
| 3176 | + |
---|
| 3177 | + dhdp = bus->dhd; |
---|
| 3178 | + if (dhdp) { |
---|
| 3179 | + isup = dhdp->up; |
---|
| 3180 | + } |
---|
| 3181 | + |
---|
| 3182 | + return isup; |
---|
| 3183 | +} |
---|
| 3184 | +EXPORT_SYMBOL(dhdx_bus_check_driver_up); |
---|
| 3185 | +#else |
---|
| 3186 | +bool |
---|
| 3187 | +dhd_bus_check_driver_up(void) |
---|
| 3188 | +{ |
---|
| 3189 | + dhd_bus_t *bus; |
---|
| 3190 | + dhd_pub_t *dhdp; |
---|
| 3191 | + bool isup = FALSE; |
---|
| 3192 | + |
---|
| 3193 | + bus = (dhd_bus_t *)g_dhd_bus; |
---|
| 3194 | + if (!bus) { |
---|
| 3195 | + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); |
---|
| 3196 | + return isup; |
---|
| 3197 | + } |
---|
| 3198 | + |
---|
| 3199 | + dhdp = bus->dhd; |
---|
| 3200 | + if (dhdp) { |
---|
| 3201 | + isup = dhdp->up; |
---|
| 3202 | + } |
---|
| 3203 | + |
---|
| 3204 | + return isup; |
---|
| 3205 | +} |
---|
| 3206 | +EXPORT_SYMBOL(dhd_bus_check_driver_up); |
---|
| 3207 | +#endif /* BCMDHDX */ |
---|