forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/net/wireless/rockchip_wlan/cywdhd/bcmdhd/dhd_pcie.c
....@@ -1,15 +1,16 @@
1
-/* SPDX-License-Identifier: GPL-2.0 */
21 /*
32 * DHD Bus Module for PCIE
43 *
5
- * Copyright (C) 1999-2019, Broadcom Corporation
6
- *
4
+ * Portions of this code are copyright (c) 2022 Cypress Semiconductor Corporation
5
+ *
6
+ * Copyright (C) 1999-2017, Broadcom Corporation
7
+ *
78 * Unless you and Broadcom execute a separate written software license
89 * agreement governing use of this software, this software is licensed to you
910 * under the terms of the GNU General Public License version 2 (the "GPL"),
1011 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
1112 * following added to such license:
12
- *
13
+ *
1314 * As a special exception, the copyright holders of this software give you
1415 * permission to link this software with independent modules, and to copy and
1516 * distribute the resulting executable under terms of your choice, provided that
....@@ -17,7 +18,7 @@
1718 * the license of that module. An independent module is a module which is not
1819 * derived from this software. The special exception does not apply to any
1920 * modifications of the software.
20
- *
21
+ *
2122 * Notwithstanding the above, under no circumstances may you combine this
2223 * software in any way with any other Broadcom software provided under a license
2324 * other than the GPL, without Broadcom's express prior written consent.
....@@ -25,19 +26,22 @@
2526 *
2627 * <<Broadcom-WL-IPTag/Open:>>
2728 *
28
- * $Id: dhd_pcie.c 608315 2015-12-24 11:35:38Z $
29
+ * $Id: dhd_pcie.c 702835 2017-06-05 07:19:55Z $
2930 */
30
-
3131
3232 /* include files */
3333 #include <typedefs.h>
3434 #include <bcmutils.h>
3535 #include <bcmdevs.h>
3636 #include <siutils.h>
37
+#include <sbpcmcia.h>
38
+#include <hndoobr.h>
3739 #include <hndsoc.h>
3840 #include <hndpmu.h>
41
+#include <etd.h>
3942 #include <hnd_debug.h>
4043 #include <sbchipc.h>
44
+#include <sbhndarm.h>
4145 #include <hnd_armtrap.h>
4246 #if defined(DHD_DEBUG)
4347 #include <hnd_cons.h>
....@@ -49,6 +53,8 @@
4953 #include <dhd_flowring.h>
5054 #include <dhd_proto.h>
5155 #include <dhd_dbg.h>
56
+#include <dhd_debug.h>
57
+#include <dhd_daemon.h>
5258 #include <dhdioctl.h>
5359 #include <sdiovar.h>
5460 #include <bcmmsgbuf.h>
....@@ -56,37 +62,107 @@
5662 #include <dhd_pcie.h>
5763 #include <bcmpcie.h>
5864 #include <bcmendian.h>
65
+#include <bcmstdlib_s.h>
5966 #ifdef DHDTCPACK_SUPPRESS
6067 #include <dhd_ip.h>
6168 #endif /* DHDTCPACK_SUPPRESS */
69
+#include <bcmevent.h>
70
+#include <trxhdr.h>
6271
63
-#ifdef BCMEMBEDIMAGE
64
-#include BCMEMBEDIMAGE
65
-#endif /* BCMEMBEDIMAGE */
72
+extern uint32 hw_module_variant;
73
+#include <pcie_core.h>
6674
67
-#ifdef PCIE_OOB
68
-#include "ftdi_sio_external.h"
69
-#endif /* PCIE_OOB */
75
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
76
+#include <linux/pm_runtime.h>
77
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
78
+
79
+#if defined(DEBUGGER) || defined(DHD_DSCOPE)
80
+#include <debugger.h>
81
+#endif /* DEBUGGER || DHD_DSCOPE */
82
+
83
+#ifdef DNGL_AXI_ERROR_LOGGING
84
+#include <dhd_linux_wq.h>
85
+#include <dhd_linux.h>
86
+#endif /* DNGL_AXI_ERROR_LOGGING */
87
+
88
+#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
89
+#include <dhd_linux_priv.h>
90
+#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
91
+
92
+//#include <otpdefs.h>
93
+#define EXTENDED_PCIE_DEBUG_DUMP 1 /* Enable Extended pcie registers dump */
7094
7195 #define MEMBLOCK 2048 /* Block size used for downloading of dongle image */
7296 #define MAX_WKLK_IDLE_CHECK 3 /* times wake_lock checked before deciding not to suspend */
97
+
98
+#define DHD_MAX_ITEMS_HPP_TXCPL_RING 512
99
+#define DHD_MAX_ITEMS_HPP_RXCPL_RING 512
100
+
101
+#define ARMCR4REG_CORECAP (0x4/sizeof(uint32))
102
+#define ARMCR4REG_MPUCTRL (0x90/sizeof(uint32))
103
+#define ACC_MPU_SHIFT 25
104
+#define ACC_MPU_MASK (0x1u << ACC_MPU_SHIFT)
73105
74106 #define ARMCR4REG_BANKIDX (0x40/sizeof(uint32))
75107 #define ARMCR4REG_BANKPDA (0x4C/sizeof(uint32))
76108 /* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
77109
110
+/* CTO Prevention Recovery */
111
+#ifdef BCMQT_HW
112
+#define CTO_TO_CLEAR_WAIT_MS 10000
113
+#define CTO_TO_CLEAR_WAIT_MAX_CNT 100
114
+#else
115
+#define CTO_TO_CLEAR_WAIT_MS 1000
116
+#define CTO_TO_CLEAR_WAIT_MAX_CNT 10
117
+#endif // endif
118
+
119
+/* Fetch address of a member in the pciedev_shared structure in dongle memory */
120
+#define DHD_PCIE_SHARED_MEMBER_ADDR(bus, member) \
121
+ (bus)->shared_addr + OFFSETOF(pciedev_shared_t, member)
122
+
123
+/* Fetch address of a member in rings_info_ptr structure in dongle memory */
124
+#define DHD_RING_INFO_MEMBER_ADDR(bus, member) \
125
+ (bus)->pcie_sh->rings_info_ptr + OFFSETOF(ring_info_t, member)
126
+
127
+/* Fetch address of a member in the ring_mem structure in dongle memory */
128
+#define DHD_RING_MEM_MEMBER_ADDR(bus, ringid, member) \
129
+ (bus)->ring_sh[ringid].ring_mem_addr + OFFSETOF(ring_mem_t, member)
130
+
78131 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
79132 extern unsigned int system_rev;
80133 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
81134
135
+/* DHD module parameter */
136
+extern uint32 hw_module_variant;
137
+
138
+#ifdef EWP_EDL
139
+extern int host_edl_support;
140
+#endif // endif
141
+
142
+#define D2H_HS_START_STATE (1 << D2H_START_SHIFT)
143
+#define D2H_HS_READY_STATE (1 << D2H_START_SHIFT | 1 << D2H_READY_SHIFT)
144
+
145
+/* This can be overwritten by module parameter(dma_ring_indices) defined in dhd_linux.c */
146
+uint dma_ring_indices = 0;
147
+/* This can be overwritten by module parameter(h2d_phase) defined in dhd_linux.c */
148
+bool h2d_phase = 0;
149
+/* This can be overwritten by module parameter(force_trap_bad_h2d_phase)
150
+ * defined in dhd_linux.c
151
+ */
152
+bool force_trap_bad_h2d_phase = 0;
153
+
82154 int dhd_dongle_memsize;
83155 int dhd_dongle_ramsize;
156
+struct dhd_bus *g_dhd_bus = NULL;
157
+#ifdef DNGL_AXI_ERROR_LOGGING
158
+static void dhd_log_dump_axi_error(uint8 *axi_err);
159
+#endif /* DNGL_AXI_ERROR_LOGGING */
160
+
84161 static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size);
85
-#ifdef DHD_DEBUG
86162 static int dhdpcie_bus_readconsole(dhd_bus_t *bus);
87
-#endif /* DHD_DEBUG */
88163 #if defined(DHD_FW_COREDUMP)
89164 static int dhdpcie_mem_dump(dhd_bus_t *bus);
165
+static int dhdpcie_get_mem_dump(dhd_bus_t *bus);
90166 #endif /* DHD_FW_COREDUMP */
91167
92168 static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size);
....@@ -95,13 +171,29 @@
95171 int plen, void *arg, int len, int val_size);
96172 static int dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 intval);
97173 static int dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus,
98
- uint32 len, uint32 srcdelay, uint32 destdelay);
174
+ uint32 len, uint32 srcdelay, uint32 destdelay,
175
+ uint32 d11_lpbk, uint32 core_num, uint32 wait);
99176 static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter);
177
+static int dhdpcie_handshake_msg_reg_write(si_t *sih, osl_t *osh, volatile void *addr,
178
+ uint *buffer);
179
+static int dhdpcie_handshake_msg_reg_read(si_t *sih, osl_t *osh, volatile void *addr,
180
+ uint *buffer);
181
+static int dhdpcie_dongle_host_handshake_spinwait(si_t *sih, osl_t *osh, volatile void *addr,
182
+ uint32 bitshift, uint32 us);
183
+static int dhdpcie_dongle_host_get_handshake_address(si_t *sih, osl_t *osh, hs_addrs_t *addr);
184
+static int dhdpcie_dongle_host_pre_handshake(si_t *sih, osl_t *osh, hs_addrs_t *addr);
185
+static int dhdpcie_dongle_host_post_handshake(si_t *sih, osl_t *osh, hs_addrs_t *addr);
186
+static int dhdpcie_dongle_host_chk_validation(si_t *sih, osl_t *osh, hs_addrs_t *addr);
187
+int dhdpcie_dongle_host_pre_wd_reset_sequence(si_t *sih, osl_t *osh);
188
+int dhdpcie_dongle_host_post_wd_reset_sequence(si_t *sih, osl_t *osh);
189
+int dhdpcie_dongle_host_pre_chipid_access_sequence(osl_t *osh, volatile void *regva);
190
+static int dhdpcie_dongle_host_post_varswrite(dhd_bus_t *bus, hs_addrs_t *addr);
100191 static int _dhdpcie_download_firmware(struct dhd_bus *bus);
101192 static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh);
102193 static int dhdpcie_bus_write_vars(dhd_bus_t *bus);
103194 static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus);
104195 static bool dhdpci_bus_read_frames(dhd_bus_t *bus);
196
+static int dhdpcie_readshared_console(dhd_bus_t *bus);
105197 static int dhdpcie_readshared(dhd_bus_t *bus);
106198 static void dhdpcie_init_shared_addr(dhd_bus_t *bus);
107199 static bool dhdpcie_dongle_attach(dhd_bus_t *bus);
....@@ -110,56 +202,71 @@
110202 bool dongle_isolation, bool reset_flag);
111203 static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh);
112204 static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len);
205
+static void dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr);
113206 static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset);
114207 static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
115208 static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
116209 static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset);
117210 static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
118211 static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset);
119
-static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data);
120
-static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset);
212
+#ifdef DHD_SUPPORT_64BIT
213
+static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data) __attribute__ ((used));
214
+static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset) __attribute__ ((used));
215
+#endif /* DHD_SUPPORT_64BIT */
121216 static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data);
122
-static void dhdpcie_bus_reg_unmap(osl_t *osh, ulong addr, int size);
217
+static void dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size);
123218 static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b);
219
+static int dhdpcie_sromotp_customvar(dhd_bus_t *bus, uint32 *customvar1, uint32 *customvar2);
124220 static void dhdpcie_fw_trap(dhd_bus_t *bus);
125
-static void dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data);
126221 static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info);
222
+static void dhdpcie_handle_mb_data(dhd_bus_t *bus);
223
+extern void dhd_dpc_enable(dhd_pub_t *dhdp);
127224 extern void dhd_dpc_kill(dhd_pub_t *dhdp);
128
-#ifdef CUSTOMER_HW_31_2
129
-#include <nvram_zae.h>
130
-#endif /* CUSTOMER_HW_31_2 */
131225
132
-#ifdef BCMEMBEDIMAGE
133
-static int dhdpcie_download_code_array(dhd_bus_t *bus);
134
-#endif /* BCMEMBEDIMAGE */
226
+#ifdef IDLE_TX_FLOW_MGMT
227
+static void dhd_bus_check_idle_scan(dhd_bus_t *bus);
228
+static void dhd_bus_idle_scan(dhd_bus_t *bus);
229
+#endif /* IDLE_TX_FLOW_MGMT */
135230
231
+#ifdef EXYNOS_PCIE_DEBUG
232
+extern void exynos_pcie_register_dump(int ch_num);
233
+#endif /* EXYNOS_PCIE_DEBUG */
234
+
235
+#if defined(DHD_H2D_LOG_TIME_SYNC)
236
+static void dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus);
237
+#endif /* DHD_H2D_LOG_TIME_SYNC */
136238
137239 #define PCI_VENDOR_ID_BROADCOM 0x14e4
240
+#define PCI_VENDOR_ID_CYPRESS 0x12be
138241
139
-static void dhd_bus_set_device_wake(struct dhd_bus *bus, bool val);
140
-extern void wl_nddbg_wpp_log(const char *format, ...);
141
-#ifdef PCIE_OOB
142
-static void dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus);
242
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
243
+#define MAX_D3_ACK_TIMEOUT 100
244
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
143245
144246 #define DHD_DEFAULT_DOORBELL_TIMEOUT 200 /* ms */
145
-static uint dhd_doorbell_timeout = DHD_DEFAULT_DOORBELL_TIMEOUT;
146
-
147
-#define HOST_WAKE 4 /* GPIO_0 (HOST_WAKE) - Output from WLAN */
148
-#define DEVICE_WAKE 5 /* GPIO_1 (DEVICE_WAKE) - Input to WLAN */
149
-#define BIT_WL_REG_ON 6
150
-#define BIT_BT_REG_ON 7
151
-
152
-int gpio_handle_val = 0;
153
-unsigned char gpio_port = 0;
154
-unsigned char gpio_direction = 0;
155
-#define OOB_PORT "ttyUSB0"
156
-#endif /* PCIE_OOB */
157247 static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version);
248
+static int dhdpcie_cto_error_recovery(struct dhd_bus *bus);
249
+
250
+static int dhdpcie_init_d11status(struct dhd_bus *bus);
251
+
252
+static int dhdpcie_wrt_rnd(struct dhd_bus *bus);
253
+
254
+extern uint16 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd);
255
+extern void dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost);
256
+
257
+#ifdef DHD_HP2P
258
+extern enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer);
259
+static uint16 dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val);
260
+#endif // endif
261
+#define NUM_PATTERNS 2
262
+static bool dhd_bus_tcm_test(struct dhd_bus *bus);
158263
159264 /* IOVar table */
160265 enum {
161266 IOV_INTR = 1,
267
+#ifdef DHD_BUS_MEM_ACCESS
162268 IOV_MEMBYTES,
269
+#endif /* DHD_BUS_MEM_ACCESS */
163270 IOV_MEMSIZE,
164271 IOV_SET_DOWNLOAD_STATE,
165272 IOV_DEVRESET,
....@@ -172,6 +279,7 @@
172279 IOV_SLEEP_ALLOWED,
173280 IOV_PCIE_DMAXFER,
174281 IOV_PCIE_SUSPEND,
282
+#ifdef DHD_PCIE_REG_ACCESS
175283 IOV_PCIEREG,
176284 IOV_PCIECFGREG,
177285 IOV_PCIECOREREG,
....@@ -179,6 +287,7 @@
179287 IOV_PCIEASPM,
180288 IOV_BAR0_SECWIN_REG,
181289 IOV_SBREG,
290
+#endif /* DHD_PCIE_REG_ACCESS */
182291 IOV_DONGLEISOLATION,
183292 IOV_LTRSLEEPON_UNLOOAD,
184293 IOV_METADATA_DBG,
....@@ -191,76 +300,171 @@
191300 IOV_FORCE_FW_TRAP,
192301 IOV_DB1_FOR_MB,
193302 IOV_FLOW_PRIO_MAP,
303
+#ifdef DHD_PCIE_RUNTIMEPM
304
+ IOV_IDLETIME,
305
+#endif /* DHD_PCIE_RUNTIMEPM */
194306 IOV_RXBOUND,
195307 IOV_TXBOUND,
196308 IOV_HANGREPORT,
197
-#ifdef PCIE_OOB
198
- IOV_OOB_BT_REG_ON,
199
- IOV_OOB_ENABLE
200
-#endif /* PCIE_OOB */
309
+ IOV_H2D_MAILBOXDATA,
310
+ IOV_INFORINGS,
311
+ IOV_H2D_PHASE,
312
+ IOV_H2D_ENABLE_TRAP_BADPHASE,
313
+ IOV_H2D_TXPOST_MAX_ITEM,
314
+ IOV_TRAPDATA,
315
+ IOV_TRAPDATA_RAW,
316
+ IOV_CTO_PREVENTION,
317
+ IOV_PCIE_WD_RESET,
318
+ IOV_DUMP_DONGLE,
319
+ IOV_HWA_ENAB_BMAP,
320
+ IOV_IDMA_ENABLE,
321
+ IOV_IFRM_ENABLE,
322
+ IOV_CLEAR_RING,
323
+ IOV_DAR_ENABLE,
324
+ IOV_DNGL_CAPS, /**< returns string with dongle capabilities */
325
+#if defined(DEBUGGER) || defined(DHD_DSCOPE)
326
+ IOV_GDB_SERVER, /**< starts gdb server on given interface */
327
+#endif /* DEBUGGER || DHD_DSCOPE */
328
+ IOV_INB_DW_ENABLE,
329
+ IOV_CTO_THRESHOLD,
330
+ IOV_HSCBSIZE, /* get HSCB buffer size */
331
+#ifdef DHD_BUS_MEM_ACCESS
332
+ IOV_HSCBBYTES, /* copy HSCB buffer */
333
+#endif // endif
334
+ IOV_HP2P_ENABLE,
335
+ IOV_HP2P_PKT_THRESHOLD,
336
+ IOV_HP2P_TIME_THRESHOLD,
337
+ IOV_HP2P_PKT_EXPIRY,
338
+ IOV_HP2P_TXCPL_MAXITEMS,
339
+ IOV_HP2P_RXCPL_MAXITEMS,
340
+ IOV_EXTDTXS_IN_TXCPL,
341
+ IOV_HOSTRDY_AFTER_INIT,
342
+ IOV_PCIE_LAST /**< unused IOVAR */
201343 };
202
-
203344
204345 const bcm_iovar_t dhdpcie_iovars[] = {
205
- {"intr", IOV_INTR, 0, IOVT_BOOL, 0 },
206
- {"membytes", IOV_MEMBYTES, 0, IOVT_BUFFER, 2 * sizeof(int) },
207
- {"memsize", IOV_MEMSIZE, 0, IOVT_UINT32, 0 },
208
- {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, IOVT_BOOL, 0 },
209
- {"vars", IOV_VARS, 0, IOVT_BUFFER, 0 },
210
- {"devreset", IOV_DEVRESET, 0, IOVT_BOOL, 0 },
211
- {"pcie_device_trap", IOV_FORCE_FW_TRAP, 0, 0, 0 },
212
- {"pcie_lpbk", IOV_PCIE_LPBK, 0, IOVT_UINT32, 0 },
213
- {"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, IOVT_BUFFER, 0 },
214
- {"ramsize", IOV_RAMSIZE, 0, IOVT_UINT32, 0 },
215
- {"ramstart", IOV_RAMSTART, 0, IOVT_UINT32, 0 },
216
- {"pciereg", IOV_PCIEREG, 0, IOVT_BUFFER, 2 * sizeof(int32) },
217
- {"pciecfgreg", IOV_PCIECFGREG, 0, IOVT_BUFFER, 2 * sizeof(int32) },
218
- {"pciecorereg", IOV_PCIECOREREG, 0, IOVT_BUFFER, 2 * sizeof(int32) },
219
- {"pcieserdesreg", IOV_PCIESERDESREG, 0, IOVT_BUFFER, 3 * sizeof(int32) },
220
- {"bar0secwinreg", IOV_BAR0_SECWIN_REG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
221
- {"sbreg", IOV_SBREG, 0, IOVT_BUFFER, sizeof(uint8) },
222
- {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, IOVT_BUFFER, 3 * sizeof(int32) },
223
- {"pcie_suspend", IOV_PCIE_SUSPEND, 0, IOVT_UINT32, 0 },
224
-#ifdef PCIE_OOB
225
- {"oob_bt_reg_on", IOV_OOB_BT_REG_ON, 0, IOVT_UINT32, 0 },
226
- {"oob_enable", IOV_OOB_ENABLE, 0, IOVT_UINT32, 0 },
227
-#endif /* PCIE_OOB */
228
- {"sleep_allowed", IOV_SLEEP_ALLOWED, 0, IOVT_BOOL, 0 },
229
- {"dngl_isolation", IOV_DONGLEISOLATION, 0, IOVT_UINT32, 0 },
230
- {"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD, 0, IOVT_UINT32, 0 },
231
- {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK, 0, IOVT_BUFFER, 0 },
232
- {"dma_ring_indices", IOV_DMA_RINGINDICES, 0, IOVT_UINT32, 0},
233
- {"metadata_dbg", IOV_METADATA_DBG, 0, IOVT_BOOL, 0 },
234
- {"rx_metadata_len", IOV_RX_METADATALEN, 0, IOVT_UINT32, 0 },
235
- {"tx_metadata_len", IOV_TX_METADATALEN, 0, IOVT_UINT32, 0 },
236
- {"db1_for_mb", IOV_DB1_FOR_MB, 0, IOVT_UINT32, 0 },
237
- {"txp_thresh", IOV_TXP_THRESHOLD, 0, IOVT_UINT32, 0 },
238
- {"buzzz_dump", IOV_BUZZZ_DUMP, 0, IOVT_UINT32, 0 },
239
- {"flow_prio_map", IOV_FLOW_PRIO_MAP, 0, IOVT_UINT32, 0 },
240
- {"rxbound", IOV_RXBOUND, 0, IOVT_UINT32, 0 },
241
- {"txbound", IOV_TXBOUND, 0, IOVT_UINT32, 0 },
242
- {"aspm", IOV_PCIEASPM, 0, IOVT_INT32, 0 },
243
- {"fw_hang_report", IOV_HANGREPORT, 0, IOVT_BOOL, 0 },
244
- {NULL, 0, 0, 0, 0 }
346
+ {"intr", IOV_INTR, 0, 0, IOVT_BOOL, 0 },
347
+#ifdef DHD_BUS_MEM_ACCESS
348
+ {"membytes", IOV_MEMBYTES, 0, 0, IOVT_BUFFER, 2 * sizeof(int) },
349
+#endif /* DHD_BUS_MEM_ACCESS */
350
+ {"memsize", IOV_MEMSIZE, 0, 0, IOVT_UINT32, 0 },
351
+ {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, 0, IOVT_BOOL, 0 },
352
+ {"vars", IOV_VARS, 0, 0, IOVT_BUFFER, 0 },
353
+ {"devreset", IOV_DEVRESET, 0, 0, IOVT_UINT8, 0 },
354
+ {"pcie_device_trap", IOV_FORCE_FW_TRAP, 0, 0, 0, 0 },
355
+ {"pcie_lpbk", IOV_PCIE_LPBK, 0, 0, IOVT_UINT32, 0 },
356
+ {"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, 0, IOVT_BUFFER, 0 },
357
+ {"ramsize", IOV_RAMSIZE, 0, 0, IOVT_UINT32, 0 },
358
+ {"ramstart", IOV_RAMSTART, 0, 0, IOVT_UINT32, 0 },
359
+#ifdef DHD_PCIE_REG_ACCESS
360
+ {"pciereg", IOV_PCIEREG, 0, 0, IOVT_BUFFER, 2 * sizeof(int32) },
361
+ {"pciecfgreg", IOV_PCIECFGREG, 0, 0, IOVT_BUFFER, 2 * sizeof(int32) },
362
+ {"pciecorereg", IOV_PCIECOREREG, 0, 0, IOVT_BUFFER, 2 * sizeof(int32) },
363
+ {"pcieserdesreg", IOV_PCIESERDESREG, 0, 0, IOVT_BUFFER, 3 * sizeof(int32) },
364
+ {"bar0secwinreg", IOV_BAR0_SECWIN_REG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
365
+ {"sbreg", IOV_SBREG, 0, 0, IOVT_BUFFER, sizeof(uint8) },
366
+#endif /* DHD_PCIE_REG_ACCESS */
367
+ {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, 0, IOVT_BUFFER, sizeof(dma_xfer_info_t)},
368
+ {"pcie_suspend", IOV_PCIE_SUSPEND, DHD_IOVF_PWRREQ_BYPASS, 0, IOVT_UINT32, 0 },
369
+ {"sleep_allowed", IOV_SLEEP_ALLOWED, 0, 0, IOVT_BOOL, 0 },
370
+ {"dngl_isolation", IOV_DONGLEISOLATION, 0, 0, IOVT_UINT32, 0 },
371
+ {"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD, 0, 0, IOVT_UINT32, 0 },
372
+ {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK, 0, 0, IOVT_BUFFER, 0 },
373
+ {"dma_ring_indices", IOV_DMA_RINGINDICES, 0, 0, IOVT_UINT32, 0},
374
+ {"metadata_dbg", IOV_METADATA_DBG, 0, 0, IOVT_BOOL, 0 },
375
+ {"rx_metadata_len", IOV_RX_METADATALEN, 0, 0, IOVT_UINT32, 0 },
376
+ {"tx_metadata_len", IOV_TX_METADATALEN, 0, 0, IOVT_UINT32, 0 },
377
+ {"db1_for_mb", IOV_DB1_FOR_MB, 0, 0, IOVT_UINT32, 0 },
378
+ {"txp_thresh", IOV_TXP_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
379
+ {"buzzz_dump", IOV_BUZZZ_DUMP, 0, 0, IOVT_UINT32, 0 },
380
+ {"flow_prio_map", IOV_FLOW_PRIO_MAP, 0, 0, IOVT_UINT32, 0 },
381
+#ifdef DHD_PCIE_RUNTIMEPM
382
+ {"idletime", IOV_IDLETIME, 0, 0, IOVT_INT32, 0 },
383
+#endif /* DHD_PCIE_RUNTIMEPM */
384
+ {"rxbound", IOV_RXBOUND, 0, 0, IOVT_UINT32, 0 },
385
+ {"txbound", IOV_TXBOUND, 0, 0, IOVT_UINT32, 0 },
386
+#ifdef DHD_PCIE_REG_ACCESS
387
+ {"aspm", IOV_PCIEASPM, 0, 0, IOVT_INT32, 0 },
388
+#endif /* DHD_PCIE_REG_ACCESS */
389
+ {"fw_hang_report", IOV_HANGREPORT, 0, 0, IOVT_BOOL, 0 },
390
+ {"h2d_mb_data", IOV_H2D_MAILBOXDATA, 0, 0, IOVT_UINT32, 0 },
391
+ {"inforings", IOV_INFORINGS, 0, 0, IOVT_UINT32, 0 },
392
+ {"h2d_phase", IOV_H2D_PHASE, 0, 0, IOVT_UINT32, 0 },
393
+ {"force_trap_bad_h2d_phase", IOV_H2D_ENABLE_TRAP_BADPHASE, 0, 0,
394
+ IOVT_UINT32, 0 },
395
+ {"h2d_max_txpost", IOV_H2D_TXPOST_MAX_ITEM, 0, 0, IOVT_UINT32, 0 },
396
+ {"trap_data", IOV_TRAPDATA, 0, 0, IOVT_BUFFER, 0 },
397
+ {"trap_data_raw", IOV_TRAPDATA_RAW, 0, 0, IOVT_BUFFER, 0 },
398
+ {"cto_prevention", IOV_CTO_PREVENTION, 0, 0, IOVT_UINT32, 0 },
399
+ {"pcie_wd_reset", IOV_PCIE_WD_RESET, 0, 0, IOVT_BOOL, 0 },
400
+ {"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER,
401
+ MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t))},
402
+ {"clear_ring", IOV_CLEAR_RING, 0, 0, IOVT_UINT32, 0 },
403
+ {"hwa_enab_bmap", IOV_HWA_ENAB_BMAP, 0, 0, IOVT_UINT32, 0 },
404
+ {"idma_enable", IOV_IDMA_ENABLE, 0, 0, IOVT_UINT32, 0 },
405
+ {"ifrm_enable", IOV_IFRM_ENABLE, 0, 0, IOVT_UINT32, 0 },
406
+ {"dar_enable", IOV_DAR_ENABLE, 0, 0, IOVT_UINT32, 0 },
407
+ {"cap", IOV_DNGL_CAPS, 0, 0, IOVT_BUFFER, 0},
408
+#if defined(DEBUGGER) || defined(DHD_DSCOPE)
409
+ {"gdb_server", IOV_GDB_SERVER, 0, 0, IOVT_UINT32, 0 },
410
+#endif /* DEBUGGER || DHD_DSCOPE */
411
+ {"inb_dw_enable", IOV_INB_DW_ENABLE, 0, 0, IOVT_UINT32, 0 },
412
+ {"cto_threshold", IOV_CTO_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
413
+ {"hscbsize", IOV_HSCBSIZE, 0, 0, IOVT_UINT32, 0 },
414
+#ifdef DHD_BUS_MEM_ACCESS
415
+ {"hscbbytes", IOV_HSCBBYTES, 0, 0, IOVT_BUFFER, 2 * sizeof(int32) },
416
+#endif // endif
417
+#ifdef DHD_HP2P
418
+ {"hp2p_enable", IOV_HP2P_ENABLE, 0, 0, IOVT_UINT32, 0 },
419
+ {"hp2p_pkt_thresh", IOV_HP2P_PKT_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
420
+ {"hp2p_time_thresh", IOV_HP2P_TIME_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
421
+ {"hp2p_pkt_expiry", IOV_HP2P_PKT_EXPIRY, 0, 0, IOVT_UINT32, 0 },
422
+ {"hp2p_txcpl_maxitems", IOV_HP2P_TXCPL_MAXITEMS, 0, 0, IOVT_UINT32, 0 },
423
+ {"hp2p_rxcpl_maxitems", IOV_HP2P_RXCPL_MAXITEMS, 0, 0, IOVT_UINT32, 0 },
424
+#endif // endif
425
+ {"extdtxs_in_txcpl", IOV_EXTDTXS_IN_TXCPL, 0, 0, IOVT_UINT32, 0 },
426
+ {"hostrdy_after_init", IOV_HOSTRDY_AFTER_INIT, 0, 0, IOVT_UINT32, 0 },
427
+ {NULL, 0, 0, 0, 0, 0 }
245428 };
246429
247
-
430
+#ifdef BCMQT
431
+#define MAX_READ_TIMEOUT 200 * 1000 * 1000
432
+#else
248433 #define MAX_READ_TIMEOUT 5 * 1000 * 1000
434
+#endif // endif
249435
250436 #ifndef DHD_RXBOUND
251437 #define DHD_RXBOUND 64
252
-#endif
438
+#endif // endif
253439 #ifndef DHD_TXBOUND
254440 #define DHD_TXBOUND 64
255
-#endif
441
+#endif // endif
442
+
443
+#define DHD_INFORING_BOUND 32
444
+#define DHD_BTLOGRING_BOUND 32
445
+
256446 uint dhd_rxbound = DHD_RXBOUND;
257447 uint dhd_txbound = DHD_TXBOUND;
258448
259
-/* Register/Unregister functions are called by the main DHD entry
260
- * point (e.g. module insertion) to link with the bus driver, in
261
- * order to look for or await the device.
262
- */
449
+#if defined(DEBUGGER) || defined(DHD_DSCOPE)
450
+/** the GDB debugger layer will call back into this (bus) layer to read/write dongle memory */
451
+static struct dhd_gdb_bus_ops_s bus_ops = {
452
+ .read_u16 = dhdpcie_bus_rtcm16,
453
+ .read_u32 = dhdpcie_bus_rtcm32,
454
+ .write_u32 = dhdpcie_bus_wtcm32,
455
+};
456
+#endif /* DEBUGGER || DHD_DSCOPE */
263457
458
+bool
459
+dhd_bus_get_flr_force_fail(struct dhd_bus *bus)
460
+{
461
+ return bus->flr_force_fail;
462
+}
463
+
464
+/**
465
+ * Register/Unregister functions are called by the main DHD entry point (eg module insertion) to
466
+ * link with the bus driver, in order to look for or await the device.
467
+ */
264468 int
265469 dhd_bus_register(void)
266470 {
....@@ -278,7 +482,6 @@
278482 return;
279483 }
280484
281
-
282485 /** returns a host virtual address */
283486 uint32 *
284487 dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size)
....@@ -287,29 +490,234 @@
287490 }
288491
289492 void
290
-dhdpcie_bus_reg_unmap(osl_t *osh, ulong addr, int size)
493
+dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size)
291494 {
292
- REG_UNMAP((void*)(uintptr)addr);
495
+ REG_UNMAP(addr);
293496 return;
294497 }
295498
296499 /**
500
+ * retrun H2D Doorbell registers address
501
+ * use DAR registers instead of enum register for corerev >= 23 (4347B0)
502
+ */
503
+static INLINE uint
504
+dhd_bus_db0_addr_get(struct dhd_bus *bus)
505
+{
506
+ uint addr = PCIH2D_MailBox;
507
+ uint dar_addr = DAR_PCIH2D_DB0_0(bus->sih->buscorerev);
508
+
509
+ return ((DAR_ACTIVE(bus->dhd)) ? dar_addr : addr);
510
+}
511
+
512
+static INLINE uint
513
+dhd_bus_db0_addr_2_get(struct dhd_bus *bus)
514
+{
515
+ return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB2_0(bus->sih->buscorerev) : PCIH2D_MailBox_2);
516
+}
517
+
518
+static INLINE uint
519
+dhd_bus_db1_addr_get(struct dhd_bus *bus)
520
+{
521
+ return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB0_1(bus->sih->buscorerev) : PCIH2D_DB1);
522
+}
523
+
524
+static INLINE uint
525
+dhd_bus_db1_addr_1_get(struct dhd_bus *bus)
526
+{
527
+ return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB1_1(bus->sih->buscorerev) : PCIH2D_DB1_1);
528
+}
529
+
530
+/*
531
+ * WAR for SWWLAN-215055 - [4378B0] ARM fails to boot without DAR WL domain request
532
+ */
533
+static INLINE void
534
+dhd_bus_pcie_pwr_req_wl_domain(struct dhd_bus *bus, bool enable)
535
+{
536
+ if (enable) {
537
+ si_corereg(bus->sih, bus->sih->buscoreidx,
538
+ DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev),
539
+ SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT,
540
+ SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT);
541
+ } else {
542
+ si_corereg(bus->sih, bus->sih->buscoreidx,
543
+ DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev),
544
+ SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT, 0);
545
+ }
546
+}
547
+
548
+static INLINE void
549
+_dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus *bus)
550
+{
551
+ uint mask;
552
+
553
+ /*
554
+ * If multiple de-asserts, decrement ref and return
555
+ * Clear power request when only one pending
556
+ * so initial request is not removed unexpectedly
557
+ */
558
+ if (bus->pwr_req_ref > 1) {
559
+ bus->pwr_req_ref--;
560
+ return;
561
+ }
562
+
563
+ ASSERT(bus->pwr_req_ref == 1);
564
+
565
+ if (MULTIBP_ENAB(bus->sih)) {
566
+ /* Common BP controlled by HW so only need to toggle WL/ARM backplane */
567
+ mask = SRPWR_DMN1_ARMBPSD_MASK;
568
+ } else {
569
+ mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
570
+ }
571
+
572
+ si_srpwr_request(bus->sih, mask, 0);
573
+ bus->pwr_req_ref = 0;
574
+}
575
+
576
+static INLINE void
577
+dhd_bus_pcie_pwr_req_clear(struct dhd_bus *bus)
578
+{
579
+ unsigned long flags = 0;
580
+
581
+ DHD_GENERAL_LOCK(bus->dhd, flags);
582
+ _dhd_bus_pcie_pwr_req_clear_cmn(bus);
583
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
584
+}
585
+
586
+static INLINE void
587
+dhd_bus_pcie_pwr_req_clear_nolock(struct dhd_bus *bus)
588
+{
589
+ _dhd_bus_pcie_pwr_req_clear_cmn(bus);
590
+}
591
+
592
+static INLINE void
593
+_dhd_bus_pcie_pwr_req_cmn(struct dhd_bus *bus)
594
+{
595
+ uint mask, val;
596
+
597
+ /* If multiple request entries, increment reference and return */
598
+ if (bus->pwr_req_ref > 0) {
599
+ bus->pwr_req_ref++;
600
+ return;
601
+ }
602
+
603
+ ASSERT(bus->pwr_req_ref == 0);
604
+
605
+ if (MULTIBP_ENAB(bus->sih)) {
606
+ /* Common BP controlled by HW so only need to toggle WL/ARM backplane */
607
+ mask = SRPWR_DMN1_ARMBPSD_MASK;
608
+ val = SRPWR_DMN1_ARMBPSD_MASK;
609
+ } else {
610
+ mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
611
+ val = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
612
+ }
613
+
614
+ si_srpwr_request(bus->sih, mask, val);
615
+
616
+ bus->pwr_req_ref = 1;
617
+}
618
+
619
+static INLINE void
620
+dhd_bus_pcie_pwr_req(struct dhd_bus *bus)
621
+{
622
+ unsigned long flags = 0;
623
+
624
+ DHD_GENERAL_LOCK(bus->dhd, flags);
625
+ _dhd_bus_pcie_pwr_req_cmn(bus);
626
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
627
+}
628
+
629
+static INLINE void
630
+_dhd_bus_pcie_pwr_req_pd0123_cmn(struct dhd_bus *bus)
631
+{
632
+ uint mask, val;
633
+
634
+ mask = SRPWR_DMN_ALL_MASK(bus->sih);
635
+ val = SRPWR_DMN_ALL_MASK(bus->sih);
636
+
637
+ si_srpwr_request(bus->sih, mask, val);
638
+}
639
+
640
+static INLINE void
641
+dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus *bus)
642
+{
643
+ unsigned long flags = 0;
644
+
645
+ DHD_GENERAL_LOCK(bus->dhd, flags);
646
+ _dhd_bus_pcie_pwr_req_pd0123_cmn(bus);
647
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
648
+}
649
+
650
+static INLINE void
651
+_dhd_bus_pcie_pwr_req_clear_pd0123_cmn(struct dhd_bus *bus)
652
+{
653
+ uint mask;
654
+
655
+ mask = SRPWR_DMN_ALL_MASK(bus->sih);
656
+
657
+ si_srpwr_request(bus->sih, mask, 0);
658
+}
659
+
660
+static INLINE void
661
+dhd_bus_pcie_pwr_req_clear_reload_war(struct dhd_bus *bus)
662
+{
663
+ unsigned long flags = 0;
664
+
665
+ DHD_GENERAL_LOCK(bus->dhd, flags);
666
+ _dhd_bus_pcie_pwr_req_clear_pd0123_cmn(bus);
667
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
668
+}
669
+
670
+static INLINE void
671
+dhd_bus_pcie_pwr_req_nolock(struct dhd_bus *bus)
672
+{
673
+ _dhd_bus_pcie_pwr_req_cmn(bus);
674
+}
675
+
676
+bool
677
+dhdpcie_chip_support_msi(dhd_bus_t *bus)
678
+{
679
+ DHD_ERROR(("%s: buscorerev=%d chipid=0x%x\n",
680
+ __FUNCTION__, bus->sih->buscorerev, si_chipid(bus->sih)));
681
+ if (bus->sih->buscorerev <= 14 ||
682
+ si_chipid(bus->sih) == BCM4375_CHIP_ID ||
683
+ si_chipid(bus->sih) == BCM4362_CHIP_ID ||
684
+ si_chipid(bus->sih) == BCM43751_CHIP_ID ||
685
+ si_chipid(bus->sih) == BCM4361_CHIP_ID ||
686
+ si_chipid(bus->sih) == CYW55560_CHIP_ID) {
687
+ return FALSE;
688
+ } else {
689
+ return TRUE;
690
+ }
691
+}
692
+
693
+/**
694
+ * Called once for each hardware (dongle) instance that this DHD manages.
695
+ *
297696 * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096
298697 * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The
299698 * precondition is that the PCIEBAR0Window register 'points' at the PCIe core.
300699 *
301700 * 'tcm' is the *host* virtual address at which tcm is mapped.
302701 */
303
-dhd_bus_t* dhdpcie_bus_attach(osl_t *osh,
702
+int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr,
304703 volatile char *regs, volatile char *tcm, void *pci_dev)
305704 {
306
- dhd_bus_t *bus;
307
-
705
+ dhd_bus_t *bus = NULL;
706
+ int ret = BCME_OK;
707
+ /* customvar1 and customvar2 are customer configurable CIS tuples in OTP.
708
+ * In dual chip (PCIE) scenario, customvar2 is used as a hint to detect
709
+ * the chip variants and load the right firmware and NVRAM
710
+ */
711
+ /* Below vars are set to 0x0 as OTPed value can not take 0x0 */
712
+ uint32 customvar1 = 0x0;
713
+ uint32 customvar2 = 0x0;
714
+ uint32 otp_hw_module_variant = 0x0;
308715 DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
309716
310717 do {
311718 if (!(bus = MALLOCZ(osh, sizeof(dhd_bus_t)))) {
312719 DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
720
+ ret = BCME_NORESOURCE;
313721 break;
314722 }
315723
....@@ -319,11 +727,15 @@
319727 /* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */
320728 bus->dev = (struct pci_dev *)pci_dev;
321729
322
- dll_init(&bus->const_flowring);
730
+ dll_init(&bus->flowring_active_list);
731
+#ifdef IDLE_TX_FLOW_MGMT
732
+ bus->active_list_last_process_ts = OSL_SYSUPTIME();
733
+#endif /* IDLE_TX_FLOW_MGMT */
323734
324735 /* Attach pcie shared structure */
325736 if (!(bus->pcie_sh = MALLOCZ(osh, sizeof(pciedev_shared_t)))) {
326737 DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__));
738
+ ret = BCME_NORESOURCE;
327739 break;
328740 }
329741
....@@ -331,23 +743,96 @@
331743
332744 if (dhdpcie_dongle_attach(bus)) {
333745 DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__));
746
+ ret = BCME_NOTREADY;
334747 break;
335748 }
336749
750
+ if (!hw_module_variant) {
751
+ /* For single wifi module */
752
+ goto enumerate_module;
753
+ }
754
+
755
+ /* read otp variable customvar and store in dhd->customvar1 and dhd->customvar2 */
756
+ if (dhdpcie_sromotp_customvar(bus, &customvar1, &customvar2)) {
757
+ DHD_ERROR(("%s: dhdpcie_sromotp_customvar failed\n", __FUNCTION__));
758
+ break;
759
+ }
760
+ if (!customvar2) {
761
+ DHD_ERROR(("%s:customvar2 is not OTPed"
762
+ "hw_module_variant=0x%x\n",
763
+ __FUNCTION__, hw_module_variant));
764
+ goto enumerate_module;
765
+ }
766
+ /* customvar2=0xNNMMLLKK, LL is module variant */
767
+ otp_hw_module_variant = (customvar2 >> 8) & 0xFF;
768
+ DHD_TRACE(("%s hw_module_variant=0x%x and"
769
+ "OTPed-module_variant=0x%x\n", __func__,
770
+ hw_module_variant, otp_hw_module_variant));
771
+ if (hw_module_variant != otp_hw_module_variant) {
772
+ DHD_ERROR(("%s: Not going to enumerate this module as "
773
+ "hw_module_variant=0x%x and "
774
+ "OTPed-module_variant=0x%x didn't match\n",
775
+ __FUNCTION__, hw_module_variant, otp_hw_module_variant));
776
+ break;
777
+ }
778
+ DHD_TRACE(("%s: Going to enumerate this module as "
779
+ "hw_module_variant=0x%x and "
780
+ "OTPed-module_variant=0x%x match\n",
781
+ __FUNCTION__, hw_module_variant, otp_hw_module_variant));
782
+enumerate_module:
337783 /* software resources */
338784 if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) {
339785 DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
340
-
786
+ ret = BCME_ERROR;
341787 break;
342788 }
789
+
790
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
343791 bus->dhd->busstate = DHD_BUS_DOWN;
792
+ bus->dhd->hostrdy_after_init = TRUE;
344793 bus->db1_for_mb = TRUE;
345794 bus->dhd->hang_report = TRUE;
795
+ bus->use_mailbox = FALSE;
796
+ bus->use_d0_inform = FALSE;
797
+ bus->intr_enabled = FALSE;
798
+ bus->flr_force_fail = FALSE;
799
+ /* By default disable HWA and enable it via iovar */
800
+ bus->hwa_enab_bmap = 0;
801
+ /* update the dma indices if set through module parameter. */
802
+ if (dma_ring_indices != 0) {
803
+ dhdpcie_set_dma_ring_indices(bus->dhd, dma_ring_indices);
804
+ }
805
+ /* update h2d phase support if set through module parameter */
806
+ bus->dhd->h2d_phase_supported = h2d_phase ? TRUE : FALSE;
807
+ /* update force trap on bad phase if set through module parameter */
808
+ bus->dhd->force_dongletrap_on_bad_h2d_phase =
809
+ force_trap_bad_h2d_phase ? TRUE : FALSE;
810
+#ifdef IDLE_TX_FLOW_MGMT
811
+ bus->enable_idle_flowring_mgmt = FALSE;
812
+#endif /* IDLE_TX_FLOW_MGMT */
813
+ bus->irq_registered = FALSE;
814
+
815
+#ifdef DHD_MSI_SUPPORT
816
+#ifdef DHD_FORCE_MSI
817
+ bus->d2h_intr_method = PCIE_MSI;
818
+#else
819
+ bus->d2h_intr_method = enable_msi && dhdpcie_chip_support_msi(bus) ?
820
+ PCIE_MSI : PCIE_INTX;
821
+#endif /* DHD_FORCE_MSI */
822
+#else
823
+ bus->d2h_intr_method = PCIE_INTX;
824
+#endif /* DHD_MSI_SUPPORT */
825
+
826
+#ifdef DHD_HP2P
827
+ bus->hp2p_txcpl_max_items = DHD_MAX_ITEMS_HPP_TXCPL_RING;
828
+ bus->hp2p_rxcpl_max_items = DHD_MAX_ITEMS_HPP_RXCPL_RING;
829
+#endif /* DHD_HP2P */
346830
347831 DHD_TRACE(("%s: EXIT SUCCESS\n",
348832 __FUNCTION__));
349
-
350
- return bus;
833
+ g_dhd_bus = bus;
834
+ *bus_ptr = bus;
835
+ return ret;
351836 } while (0);
352837
353838 DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__));
....@@ -359,7 +844,19 @@
359844 if (bus) {
360845 MFREE(osh, bus, sizeof(dhd_bus_t));
361846 }
362
- return NULL;
847
+
848
+ return ret;
849
+}
850
+
851
+bool
852
+dhd_bus_skip_clm(dhd_pub_t *dhdp)
853
+{
854
+ switch (dhd_bus_chip_id(dhdp)) {
855
+ case BCM4369_CHIP_ID:
856
+ return TRUE;
857
+ default:
858
+ return FALSE;
859
+ }
363860 }
364861
365862 uint
....@@ -416,13 +913,99 @@
416913 return bus->sih->chippkg;
417914 }
418915
419
-/** Read and clear intstatus. This should be called with interupts disabled or inside isr */
916
+/** Conduct Loopback test */
917
+int
918
+dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type)
919
+{
920
+ dma_xfer_info_t dmaxfer_lpbk;
921
+ int ret = BCME_OK;
922
+
923
+#define PCIE_DMAXFER_LPBK_LENGTH 4096
924
+ memset(&dmaxfer_lpbk, 0, sizeof(dma_xfer_info_t));
925
+ dmaxfer_lpbk.version = DHD_DMAXFER_VERSION;
926
+ dmaxfer_lpbk.length = (uint16)sizeof(dma_xfer_info_t);
927
+ dmaxfer_lpbk.num_bytes = PCIE_DMAXFER_LPBK_LENGTH;
928
+ dmaxfer_lpbk.type = type;
929
+ dmaxfer_lpbk.should_wait = TRUE;
930
+
931
+ ret = dhd_bus_iovar_op(dhdp, "pcie_dmaxfer", NULL, 0,
932
+ (char *)&dmaxfer_lpbk, sizeof(dma_xfer_info_t), IOV_SET);
933
+ if (ret < 0) {
934
+ DHD_ERROR(("failed to start PCIe Loopback Test!!! "
935
+ "Type:%d Reason:%d\n", type, ret));
936
+ return ret;
937
+ }
938
+
939
+ if (dmaxfer_lpbk.status != DMA_XFER_SUCCESS) {
940
+ DHD_ERROR(("failed to check PCIe Loopback Test!!! "
941
+ "Type:%d Status:%d Error code:%d\n", type,
942
+ dmaxfer_lpbk.status, dmaxfer_lpbk.error_code));
943
+ ret = BCME_ERROR;
944
+ } else {
945
+ DHD_ERROR(("successful to check PCIe Loopback Test"
946
+ " Type:%d\n", type));
947
+ }
948
+#undef PCIE_DMAXFER_LPBK_LENGTH
949
+
950
+ return ret;
951
+}
952
+
953
+/* Log the lastest DPC schedule time */
954
+void
955
+dhd_bus_set_dpc_sched_time(dhd_pub_t *dhdp)
956
+{
957
+ dhdp->bus->dpc_sched_time = OSL_LOCALTIME_NS();
958
+}
959
+
960
+/* Check if there is DPC scheduling errors */
961
+bool
962
+dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp)
963
+{
964
+ dhd_bus_t *bus = dhdp->bus;
965
+ bool sched_err;
966
+
967
+ if (bus->dpc_entry_time < bus->isr_exit_time) {
968
+ /* Kernel doesn't schedule the DPC after processing PCIe IRQ */
969
+ sched_err = TRUE;
970
+ } else if (bus->dpc_entry_time < bus->resched_dpc_time) {
971
+ /* Kernel doesn't schedule the DPC after DHD tries to reschedule
972
+ * the DPC due to pending work items to be processed.
973
+ */
974
+ sched_err = TRUE;
975
+ } else {
976
+ sched_err = FALSE;
977
+ }
978
+
979
+ if (sched_err) {
980
+ /* print out minimum timestamp info */
981
+ DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
982
+ " isr_exit_time="SEC_USEC_FMT
983
+ " dpc_entry_time="SEC_USEC_FMT
984
+ "\ndpc_exit_time="SEC_USEC_FMT
985
+ " dpc_sched_time="SEC_USEC_FMT
986
+ " resched_dpc_time="SEC_USEC_FMT"\n",
987
+ GET_SEC_USEC(bus->isr_entry_time),
988
+ GET_SEC_USEC(bus->isr_exit_time),
989
+ GET_SEC_USEC(bus->dpc_entry_time),
990
+ GET_SEC_USEC(bus->dpc_exit_time),
991
+ GET_SEC_USEC(bus->dpc_sched_time),
992
+ GET_SEC_USEC(bus->resched_dpc_time)));
993
+ }
994
+
995
+ return sched_err;
996
+}
997
+
998
+/** Read and clear intstatus. This should be called with interrupts disabled or inside isr */
420999 uint32
4211000 dhdpcie_bus_intstatus(dhd_bus_t *bus)
4221001 {
4231002 uint32 intstatus = 0;
4241003 uint32 intmask = 0;
4251004
1005
+ if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
1006
+ DHD_ERROR(("%s: trying to clear intstatus after D3 Ack\n", __FUNCTION__));
1007
+ return intstatus;
1008
+ }
4261009 if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
4271010 (bus->sih->buscorerev == 2)) {
4281011 intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
....@@ -430,10 +1013,32 @@
4301013 intstatus &= I_MB;
4311014 } else {
4321015 /* this is a PCIE core register..not a config register... */
433
- intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
1016
+ intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0);
4341017
4351018 /* this is a PCIE core register..not a config register... */
436
- intmask = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
1019
+ intmask = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask, 0, 0);
1020
+ /* Is device removed. intstatus & intmask read 0xffffffff */
1021
+ if (intstatus == (uint32)-1 || intmask == (uint32)-1) {
1022
+ DHD_ERROR(("%s: Device is removed or Link is down.\n", __FUNCTION__));
1023
+ DHD_ERROR(("%s: INTSTAT : 0x%x INTMASK : 0x%x.\n",
1024
+ __FUNCTION__, intstatus, intmask));
1025
+ bus->is_linkdown = TRUE;
1026
+ dhd_pcie_debug_info_dump(bus->dhd);
1027
+#ifdef CUSTOMER_HW4_DEBUG
1028
+#if defined(OEM_ANDROID)
1029
+#ifdef SUPPORT_LINKDOWN_RECOVERY
1030
+#ifdef CONFIG_ARCH_MSM
1031
+ bus->no_cfg_restore = 1;
1032
+#endif /* CONFIG_ARCH_MSM */
1033
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
1034
+ bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN_EP_DETECT;
1035
+ dhd_os_send_hang_message(bus->dhd);
1036
+#endif /* OEM_ANDROID */
1037
+#endif /* CUSTOMER_HW4_DEBUG */
1038
+ return intstatus;
1039
+ }
1040
+
1041
+ intstatus &= intmask;
4371042
4381043 /*
4391044 * The fourth argument to si_corereg is the "mask" fields of the register to update
....@@ -441,21 +1046,63 @@
4411046 * few fields of the "mask" bit map, we should not be writing back what we read
4421047 * By doing so, we might clear/ack interrupts that are not handled yet.
4431048 */
444
- si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, bus->def_intmask,
1049
+ si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask,
4451050 intstatus);
446
-
447
- intstatus &= intmask;
448
-
449
- /* Is device removed. intstatus & intmask read 0xffffffff */
450
- if (intstatus == (uint32)-1) {
451
- DHD_ERROR(("%s: !!!!!!Device Removed or dead chip.\n", __FUNCTION__));
452
- intstatus = 0;
453
- }
4541051
4551052 intstatus &= bus->def_intmask;
4561053 }
4571054
4581055 return intstatus;
1056
+}
1057
+
1058
+void
1059
+dhdpcie_cto_recovery_handler(dhd_pub_t *dhd)
1060
+{
1061
+ dhd_bus_t *bus = dhd->bus;
1062
+ int ret;
1063
+
1064
+ /* Disable PCIe Runtime PM to avoid D3_ACK timeout.
1065
+ */
1066
+ DHD_DISABLE_RUNTIME_PM(dhd);
1067
+
1068
+ /* Sleep for 1 seconds so that any AXI timeout
1069
+ * if running on ALP clock also will be captured
1070
+ */
1071
+ OSL_SLEEP(1000);
1072
+
1073
+ /* reset backplane and cto,
1074
+ * then access through pcie is recovered.
1075
+ */
1076
+ ret = dhdpcie_cto_error_recovery(bus);
1077
+ if (!ret) {
1078
+ /* Waiting for backplane reset */
1079
+ OSL_SLEEP(10);
1080
+ /* Dump debug Info */
1081
+ dhd_prot_debug_info_print(bus->dhd);
1082
+ /* Dump console buffer */
1083
+ dhd_bus_dump_console_buffer(bus);
1084
+#if defined(DHD_FW_COREDUMP)
1085
+ /* save core dump or write to a file */
1086
+ if (!bus->is_linkdown && bus->dhd->memdump_enabled) {
1087
+#ifdef DHD_SSSR_DUMP
1088
+ bus->dhd->collect_sssr = TRUE;
1089
+#endif /* DHD_SSSR_DUMP */
1090
+ bus->dhd->memdump_type = DUMP_TYPE_CTO_RECOVERY;
1091
+ dhdpcie_mem_dump(bus);
1092
+ }
1093
+#endif /* DHD_FW_COREDUMP */
1094
+ }
1095
+#ifdef OEM_ANDROID
1096
+#ifdef SUPPORT_LINKDOWN_RECOVERY
1097
+#ifdef CONFIG_ARCH_MSM
1098
+ bus->no_cfg_restore = 1;
1099
+#endif /* CONFIG_ARCH_MSM */
1100
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
1101
+ bus->is_linkdown = TRUE;
1102
+ bus->dhd->hang_reason = HANG_REASON_PCIE_CTO_DETECT;
1103
+ /* Send HANG event */
1104
+ dhd_os_send_hang_message(bus->dhd);
1105
+#endif /* OEM_ANDROID */
4591106 }
4601107
4611108 /**
....@@ -479,30 +1126,83 @@
4791126 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4801127 /* verify argument */
4811128 if (!bus) {
482
- DHD_ERROR(("%s : bus is null pointer, exit \n", __FUNCTION__));
1129
+ DHD_LOG_MEM(("%s : bus is null pointer, exit \n", __FUNCTION__));
4831130 break;
4841131 }
4851132
4861133 if (bus->dhd->dongle_reset) {
1134
+ DHD_LOG_MEM(("%s : dongle is reset\n", __FUNCTION__));
4871135 break;
4881136 }
4891137
4901138 if (bus->dhd->busstate == DHD_BUS_DOWN) {
491
- DHD_ERROR(("%s: BUS is down, not processing the interrupt \r\n",
492
- __FUNCTION__));
1139
+ DHD_LOG_MEM(("%s : bus is down \n", __FUNCTION__));
4931140 break;
1141
+ }
1142
+
1143
+ /* avoid processing of interrupts until msgbuf prot is inited */
1144
+ if (!bus->intr_enabled) {
1145
+ DHD_INFO(("%s, not ready to receive interrupts\n", __FUNCTION__));
1146
+ break;
1147
+ }
1148
+
1149
+ if (PCIECTO_ENAB(bus)) {
1150
+ /* read pci_intstatus */
1151
+ intstatus = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 4);
1152
+
1153
+ if (intstatus & PCI_CTO_INT_MASK) {
1154
+ DHD_ERROR(("%s: ##### CTO RECOVERY REPORTED BY DONGLE "
1155
+ "intstat=0x%x enab=%d\n", __FUNCTION__,
1156
+ intstatus, bus->cto_enable));
1157
+ bus->cto_triggered = 1;
1158
+ /*
1159
+ * DAR still accessible
1160
+ */
1161
+ dhd_bus_dump_dar_registers(bus);
1162
+
1163
+ /* Disable further PCIe interrupts */
1164
+ dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */
1165
+ /* Stop Tx flow */
1166
+ dhd_bus_stop_queue(bus);
1167
+
1168
+ /* Schedule CTO recovery */
1169
+ dhd_schedule_cto_recovery(bus->dhd);
1170
+
1171
+ return TRUE;
1172
+ }
1173
+ }
1174
+
1175
+ if (bus->d2h_intr_method == PCIE_MSI) {
1176
+ /* For MSI, as intstatus is cleared by firmware, no need to read */
1177
+ goto skip_intstatus_read;
4941178 }
4951179
4961180 intstatus = dhdpcie_bus_intstatus(bus);
4971181
4981182 /* Check if the interrupt is ours or not */
4991183 if (intstatus == 0) {
1184
+ /* in EFI since we poll for interrupt, this message will flood the logs
1185
+ * so disable this for EFI
1186
+ */
1187
+ DHD_LOG_MEM(("%s : this interrupt is not ours\n", __FUNCTION__));
1188
+ bus->non_ours_irq_count++;
1189
+ bus->last_non_ours_irq_time = OSL_LOCALTIME_NS();
5001190 break;
5011191 }
5021192
5031193 /* save the intstatus */
1194
+ /* read interrupt status register!! Status bits will be cleared in DPC !! */
5041195 bus->intstatus = intstatus;
5051196
1197
+ /* return error for 0xFFFFFFFF */
1198
+ if (intstatus == (uint32)-1) {
1199
+ DHD_LOG_MEM(("%s : wrong interrupt status val : 0x%x\n",
1200
+ __FUNCTION__, intstatus));
1201
+ dhdpcie_disable_irq_nosync(bus);
1202
+ break;
1203
+ }
1204
+
1205
+skip_intstatus_read:
5061206 /* Overall operation:
5071207 * - Mask further interrupts
5081208 * - Read/ack intstatus
....@@ -513,9 +1213,17 @@
5131213 /* Count the interrupt call */
5141214 bus->intrcount++;
5151215
516
- /* read interrupt status register!! Status bits will be cleared in DPC !! */
5171216 bus->ipend = TRUE;
518
- dhdpcie_bus_intr_disable(bus); /* Disable interrupt!! */
1217
+
1218
+ bus->isr_intr_disable_count++;
1219
+
1220
+ /* For Linux, Macos etc (otherthan NDIS) instead of disabling
1221
+ * dongle interrupt by clearing the IntMask, disable directly
1222
+ * interrupt from the host side, so that host will not recieve
1223
+ * any interrupts at all, even though dongle raises interrupts
1224
+ */
1225
+ dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */
1226
+
5191227 bus->intdis = TRUE;
5201228
5211229 #if defined(PCIE_ISR_THREAD)
....@@ -538,30 +1246,321 @@
5381246 return FALSE;
5391247 }
5401248
1249
+int
1250
+dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state)
1251
+{
1252
+ uint32 cur_state = 0;
1253
+ uint32 pm_csr = 0;
1254
+ osl_t *osh = bus->osh;
1255
+
1256
+ pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1257
+ cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
1258
+
1259
+ if (cur_state == state) {
1260
+ DHD_ERROR(("%s: Already in state %u \n", __FUNCTION__, cur_state));
1261
+ return BCME_OK;
1262
+ }
1263
+
1264
+ if (state > PCIECFGREG_PM_CSR_STATE_D3_HOT)
1265
+ return BCME_ERROR;
1266
+
1267
+ /* Validate the state transition
1268
+ * if already in a lower power state, return error
1269
+ */
1270
+ if (state != PCIECFGREG_PM_CSR_STATE_D0 &&
1271
+ cur_state <= PCIECFGREG_PM_CSR_STATE_D3_COLD &&
1272
+ cur_state > state) {
1273
+ DHD_ERROR(("%s: Invalid power state transition !\n", __FUNCTION__));
1274
+ return BCME_ERROR;
1275
+ }
1276
+
1277
+ pm_csr &= ~PCIECFGREG_PM_CSR_STATE_MASK;
1278
+ pm_csr |= state;
1279
+
1280
+ OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32), pm_csr);
1281
+
1282
+ /* need to wait for the specified mandatory pcie power transition delay time */
1283
+ if (state == PCIECFGREG_PM_CSR_STATE_D3_HOT ||
1284
+ cur_state == PCIECFGREG_PM_CSR_STATE_D3_HOT)
1285
+ OSL_DELAY(DHDPCIE_PM_D3_DELAY);
1286
+ else if (state == PCIECFGREG_PM_CSR_STATE_D2 ||
1287
+ cur_state == PCIECFGREG_PM_CSR_STATE_D2)
1288
+ OSL_DELAY(DHDPCIE_PM_D2_DELAY);
1289
+
1290
+ /* read back the power state and verify */
1291
+ pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1292
+ cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
1293
+ if (cur_state != state) {
1294
+ DHD_ERROR(("%s: power transition failed ! Current state is %u \n",
1295
+ __FUNCTION__, cur_state));
1296
+ return BCME_ERROR;
1297
+ } else {
1298
+ DHD_ERROR(("%s: power transition to %u success \n",
1299
+ __FUNCTION__, cur_state));
1300
+ }
1301
+
1302
+ return BCME_OK;
1303
+}
1304
+
1305
+int
1306
+dhdpcie_config_check(dhd_bus_t *bus)
1307
+{
1308
+ uint32 i, val;
1309
+ int ret = BCME_ERROR;
1310
+
1311
+ for (i = 0; i < DHDPCIE_CONFIG_CHECK_RETRY_COUNT; i++) {
1312
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCI_CFG_VID, sizeof(uint32));
1313
+ if ((val & 0xFFFF) == VENDOR_BROADCOM || (val & 0xFFFF) == VENDOR_CYPRESS) {
1314
+ ret = BCME_OK;
1315
+ break;
1316
+ }
1317
+ OSL_DELAY(DHDPCIE_CONFIG_CHECK_DELAY_MS * 1000);
1318
+ }
1319
+
1320
+ return ret;
1321
+}
1322
+
1323
+int
1324
+dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr)
1325
+{
1326
+ uint32 i;
1327
+ osl_t *osh = bus->osh;
1328
+
1329
+ if (BCME_OK != dhdpcie_config_check(bus)) {
1330
+ return BCME_ERROR;
1331
+ }
1332
+
1333
+ for (i = PCI_CFG_REV >> 2; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
1334
+ OSL_PCI_WRITE_CONFIG(osh, i << 2, sizeof(uint32), bus->saved_config.header[i]);
1335
+ }
1336
+ OSL_PCI_WRITE_CONFIG(osh, PCI_CFG_CMD, sizeof(uint32), bus->saved_config.header[1]);
1337
+
1338
+ if (restore_pmcsr)
1339
+ OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR,
1340
+ sizeof(uint32), bus->saved_config.pmcsr);
1341
+
1342
+ OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_CAP, sizeof(uint32), bus->saved_config.msi_cap);
1343
+ OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_L, sizeof(uint32),
1344
+ bus->saved_config.msi_addr0);
1345
+ OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
1346
+ sizeof(uint32), bus->saved_config.msi_addr1);
1347
+ OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_DATA,
1348
+ sizeof(uint32), bus->saved_config.msi_data);
1349
+
1350
+ OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_DEV_STATUS_CTRL,
1351
+ sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat);
1352
+ OSL_PCI_WRITE_CONFIG(osh, PCIECFGGEN_DEV_STATUS_CTRL2,
1353
+ sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat2);
1354
+ OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL,
1355
+ sizeof(uint32), bus->saved_config.exp_link_ctrl_stat);
1356
+ OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL2,
1357
+ sizeof(uint32), bus->saved_config.exp_link_ctrl_stat2);
1358
+
1359
+ OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
1360
+ sizeof(uint32), bus->saved_config.l1pm0);
1361
+ OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
1362
+ sizeof(uint32), bus->saved_config.l1pm1);
1363
+
1364
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, sizeof(uint32),
1365
+ bus->saved_config.bar0_win);
1366
+ dhdpcie_setbar1win(bus, bus->saved_config.bar1_win);
1367
+
1368
+ return BCME_OK;
1369
+}
1370
+
1371
+int
1372
+dhdpcie_config_save(dhd_bus_t *bus)
1373
+{
1374
+ uint32 i;
1375
+ osl_t *osh = bus->osh;
1376
+
1377
+ if (BCME_OK != dhdpcie_config_check(bus)) {
1378
+ return BCME_ERROR;
1379
+ }
1380
+
1381
+ for (i = 0; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
1382
+ bus->saved_config.header[i] = OSL_PCI_READ_CONFIG(osh, i << 2, sizeof(uint32));
1383
+ }
1384
+
1385
+ bus->saved_config.pmcsr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1386
+
1387
+ bus->saved_config.msi_cap = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_CAP,
1388
+ sizeof(uint32));
1389
+ bus->saved_config.msi_addr0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_L,
1390
+ sizeof(uint32));
1391
+ bus->saved_config.msi_addr1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
1392
+ sizeof(uint32));
1393
+ bus->saved_config.msi_data = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_DATA,
1394
+ sizeof(uint32));
1395
+
1396
+ bus->saved_config.exp_dev_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
1397
+ PCIECFGREG_DEV_STATUS_CTRL, sizeof(uint32));
1398
+ bus->saved_config.exp_dev_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
1399
+ PCIECFGGEN_DEV_STATUS_CTRL2, sizeof(uint32));
1400
+ bus->saved_config.exp_link_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
1401
+ PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32));
1402
+ bus->saved_config.exp_link_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
1403
+ PCIECFGREG_LINK_STATUS_CTRL2, sizeof(uint32));
1404
+
1405
+ bus->saved_config.l1pm0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
1406
+ sizeof(uint32));
1407
+ bus->saved_config.l1pm1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
1408
+ sizeof(uint32));
1409
+
1410
+ bus->saved_config.bar0_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR0_WIN,
1411
+ sizeof(uint32));
1412
+ bus->saved_config.bar1_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR1_WIN,
1413
+ sizeof(uint32));
1414
+
1415
+ return BCME_OK;
1416
+}
1417
+
1418
+#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
1419
+dhd_pub_t *link_recovery = NULL;
1420
+#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
1421
+
1422
+static void
1423
+dhdpcie_bus_intr_init(dhd_bus_t *bus)
1424
+{
1425
+ uint buscorerev = bus->sih->buscorerev;
1426
+ bus->pcie_mailbox_int = PCIMailBoxInt(buscorerev);
1427
+ bus->pcie_mailbox_mask = PCIMailBoxMask(buscorerev);
1428
+ bus->d2h_mb_mask = PCIE_MB_D2H_MB_MASK(buscorerev);
1429
+ bus->def_intmask = PCIE_MB_D2H_MB_MASK(buscorerev);
1430
+ if (buscorerev < 64) {
1431
+ bus->def_intmask |= PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1;
1432
+ }
1433
+}
1434
+
1435
+static void
1436
+dhdpcie_cc_watchdog_reset(dhd_bus_t *bus)
1437
+{
1438
+ uint32 wd_en = (bus->sih->buscorerev >= 66) ? WD_SSRESET_PCIE_F0_EN :
1439
+ (WD_SSRESET_PCIE_F0_EN | WD_SSRESET_PCIE_ALL_FN_EN);
1440
+ pcie_watchdog_reset(bus->osh, bus->sih, WD_ENABLE_MASK, wd_en);
1441
+}
1442
+
1443
+void
1444
+dhdpcie_dongle_reset(dhd_bus_t *bus)
1445
+{
1446
+ /* if the pcie link is down, watchdog reset
1447
+ * should not be done, as it may hang
1448
+ */
1449
+ if (bus->is_linkdown) {
1450
+ return;
1451
+ }
1452
+
1453
+ /* dhd_bus_perform_flr will return BCME_UNSUPPORTED if chip is not FLR capable */
1454
+ if (dhd_bus_perform_flr(bus, FALSE) == BCME_UNSUPPORTED) {
1455
+#ifdef DHD_USE_BP_RESET
1456
+ /* Backplane reset using SPROM cfg register(0x88) for buscorerev <= 24 */
1457
+ dhd_bus_perform_bp_reset(bus);
1458
+#else
1459
+ /* Legacy chipcommon watchdog reset */
1460
+ dhdpcie_cc_watchdog_reset(bus);
1461
+#endif /* DHD_USE_BP_RESET */
1462
+ }
1463
+}
1464
+
1465
+#ifdef CHIPS_CUSTOMER_HW6
1466
+void
1467
+dhdpcie_bus_mpu_disable(dhd_bus_t *bus)
1468
+{
1469
+ volatile uint32 *cr4_regs;
1470
+ if (BCM4378_CHIP(bus->sih->chip)) {
1471
+ cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
1472
+ if (cr4_regs == NULL) {
1473
+ DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
1474
+ return;
1475
+ }
1476
+ if (R_REG(bus->osh, cr4_regs + ARMCR4REG_CORECAP) & ACC_MPU_MASK) {
1477
+ /* bus mpu is supported */
1478
+ W_REG(bus->osh, cr4_regs + ARMCR4REG_MPUCTRL, 0);
1479
+ }
1480
+ }
1481
+}
1482
+#endif /* CHIPS_CUSTOMER_HW6 */
1483
+
5411484 static bool
5421485 dhdpcie_dongle_attach(dhd_bus_t *bus)
5431486 {
544
-
5451487 osl_t *osh = bus->osh;
546
- void *regsva = (void*)bus->regs;
547
- uint16 devid = bus->cl_devid;
1488
+ volatile void *regsva = (volatile void*)bus->regs;
1489
+ uint16 devid;
5481490 uint32 val;
1491
+ uint32 reg_val = 0;
1492
+ bool is_pcie_reset = FALSE;
1493
+ uint32 secureboot;
5491494 sbpcieregs_t *sbpcieregs;
550
-
1495
+ bool dongle_isolation;
1496
+ int32 bcmerror = BCME_ERROR;
5511497 DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
5521498
1499
+#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
1500
+ link_recovery = bus->dhd;
1501
+#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
5531502
5541503 bus->alp_only = TRUE;
5551504 bus->sih = NULL;
5561505
557
- /* Set bar0 window to si_enum_base */
558
- dhdpcie_bus_cfg_set_bar0_win(bus, SI_ENUM_BASE);
559
-
5601506 /* Checking PCIe bus status with reading configuration space */
5611507 val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32));
562
- if ((val & 0xFFFF) != VENDOR_BROADCOM) {
1508
+ if ((val & 0xFFFF) != VENDOR_BROADCOM && (val & 0xFFFF) != VENDOR_CYPRESS) {
5631509 DHD_ERROR(("%s : failed to read PCI configuration space!\n", __FUNCTION__));
5641510 goto fail;
1511
+ }
1512
+ devid = (val >> 16) & 0xFFFF;
1513
+ bus->cl_devid = devid;
1514
+
1515
+ /* Set bar0 window to si_enum_base */
1516
+ dhdpcie_bus_cfg_set_bar0_win(bus, si_enum_base(devid));
1517
+
1518
+ /*
1519
+ * Checking PCI_SPROM_CONTROL register for preventing invalid address access
1520
+ * due to switch address space from PCI_BUS to SI_BUS.
1521
+ */
1522
+ val = OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32));
1523
+ if (val == 0xffffffff) {
1524
+ DHD_ERROR(("%s : failed to read SPROM control register\n", __FUNCTION__));
1525
+ goto fail;
1526
+ }
1527
+
1528
+ /* Getting Secureboot capability to make sure that the
1529
+ * functionalities are ristricted to the chips having bootloader
1530
+ */
1531
+ secureboot = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_REVID, sizeof(uint32));
1532
+
1533
+ if (isset(&secureboot, PCIECFGREG_SECURE_MODE_SHIFT)) {
1534
+
1535
+ /* Set bar0 window to si_pcie_enum_base */
1536
+ dhdpcie_bus_cfg_set_bar0_win(bus, si_pcie_enum_base(devid));
1537
+ sbpcieregs = (sbpcieregs_t*)(bus->regs);
1538
+ DHD_INFO(("%s: before read reg_val:%d\n", __FUNCTION__, reg_val));
1539
+ reg_val = R_REG(osh, &sbpcieregs->u1.dar_64.d2h_msg_reg0);
1540
+ DHD_INFO(("%s: after reg_val:%d\n", __FUNCTION__, reg_val));
1541
+ if (reg_val != D2H_HS_START_STATE || reg_val != (D2H_HS_READY_STATE)) {
1542
+ /* si_attach() will provide an SI handle and scan the backplane */
1543
+ if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus,
1544
+ &bus->vars, &bus->varsz))) {
1545
+ DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
1546
+ goto fail;
1547
+ }
1548
+ dhdpcie_dongle_reset(bus);
1549
+ is_pcie_reset = TRUE;
1550
+ }
1551
+
1552
+ /* Pre ChipID access sequence, make sure that
1553
+ * bootloader is ready before ChipID access.
1554
+ */
1555
+ bcmerror = dhdpcie_dongle_host_pre_chipid_access_sequence(osh, regsva);
1556
+ if (bcmerror) {
1557
+ DHD_ERROR(("%s: error - pre chipid access sequence error %d\n",
1558
+ __FUNCTION__, bcmerror));
1559
+ goto fail;
1560
+ }
1561
+
1562
+ /* Set bar0 window to si_enum_base */
1563
+ dhdpcie_bus_cfg_set_bar0_win(bus, si_enum_base(devid));
5651564 }
5661565
5671566 /* si_attach() will provide an SI handle and scan the backplane */
....@@ -571,6 +1570,133 @@
5711570 goto fail;
5721571 }
5731572
1573
+ /* Configure CTO Prevention functionality */
1574
+#if defined(BCMFPGA_HW)
1575
+ DHD_ERROR(("Disable CTO\n"));
1576
+ bus->cto_enable = FALSE;
1577
+#else
1578
+#if defined(BCMPCIE_CTO_PREVENTION)
1579
+ if (bus->sih->buscorerev >= 24) {
1580
+ DHD_ERROR(("Enable CTO\n"));
1581
+ bus->cto_enable = TRUE;
1582
+ } else
1583
+#endif /* BCMPCIE_CTO_PREVENTION */
1584
+ {
1585
+ DHD_ERROR(("Disable CTO\n"));
1586
+ bus->cto_enable = FALSE;
1587
+ }
1588
+#endif /* BCMFPGA_HW */
1589
+
1590
+ if (PCIECTO_ENAB(bus)) {
1591
+ dhdpcie_cto_init(bus, TRUE);
1592
+ }
1593
+
1594
+ /* Storing secureboot capability */
1595
+ bus->sih->secureboot = isset(&secureboot, PCIECFGREG_SECURE_MODE_SHIFT);
1596
+
1597
+ if (MULTIBP_ENAB(bus->sih) && (bus->sih->buscorerev >= 66)) {
1598
+ /*
1599
+ * HW JIRA - CRWLPCIEGEN2-672
1600
+ * Producer Index Feature which is used by F1 gets reset on F0 FLR
1601
+ * fixed in REV68
1602
+ */
1603
+ if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
1604
+ dhdpcie_ssreset_dis_enum_rst(bus);
1605
+ }
1606
+
1607
+ /* IOV_DEVRESET could exercise si_detach()/si_attach() again so reset
1608
+ * dhdpcie_bus_release_dongle() --> si_detach()
1609
+ * dhdpcie_dongle_attach() --> si_attach()
1610
+ */
1611
+ bus->pwr_req_ref = 0;
1612
+ }
1613
+
1614
+ if (MULTIBP_ENAB(bus->sih)) {
1615
+ dhd_bus_pcie_pwr_req_nolock(bus);
1616
+ }
1617
+
1618
+ /* Get info on the ARM and SOCRAM cores... */
1619
+ /* Should really be qualified by device id */
1620
+ if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
1621
+ (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
1622
+ (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) ||
1623
+ (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
1624
+ bus->armrev = si_corerev(bus->sih);
1625
+ bus->coreid = si_coreid(bus->sih);
1626
+ } else {
1627
+ DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
1628
+ goto fail;
1629
+ }
1630
+
1631
+ /* CA7 requires coherent bits on */
1632
+ if (bus->coreid == ARMCA7_CORE_ID) {
1633
+ val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4);
1634
+ dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
1635
+ (val | PCIE_BARCOHERENTACCEN_MASK));
1636
+ }
1637
+
1638
+ /* Olympic EFI requirement - stop driver load if FW is already running
1639
+ * need to do this here before pcie_watchdog_reset, because
1640
+ * pcie_watchdog_reset will put the ARM back into halt state
1641
+ */
1642
+ if (!dhdpcie_is_arm_halted(bus)) {
1643
+ DHD_ERROR(("%s: ARM is not halted,FW is already running! Abort.\n",
1644
+ __FUNCTION__));
1645
+ goto fail;
1646
+ }
1647
+
1648
+ BCM_REFERENCE(dongle_isolation);
1649
+
1650
+ /* For inbuilt drivers pcie clk req will be done by RC,
1651
+ * so do not do clkreq from dhd
1652
+ */
1653
+ if (dhd_download_fw_on_driverload)
1654
+ {
1655
+ /* Enable CLKREQ# */
1656
+ dhdpcie_clkreq(bus->osh, 1, 1);
1657
+ }
1658
+
1659
+ /*
1660
+ * bus->dhd will be NULL if it is called from dhd_bus_attach, so need to reset
1661
+ * without checking dongle_isolation flag, but if it is called via some other path
1662
+ * like quiesce FLR, then based on dongle_isolation flag, watchdog_reset should
1663
+ * be called.
1664
+ */
1665
+ if (bus->dhd == NULL) {
1666
+ /* dhd_attach not yet happened, do watchdog reset */
1667
+ dongle_isolation = FALSE;
1668
+ } else {
1669
+ dongle_isolation = bus->dhd->dongle_isolation;
1670
+ }
1671
+
1672
+#ifndef DHD_SKIP_DONGLE_RESET_IN_ATTACH
1673
+ /*
1674
+ * Issue CC watchdog to reset all the cores on the chip - similar to rmmod dhd
1675
+ * This is required to avoid spurious interrupts to the Host and bring back
1676
+ * dongle to a sane state (on host soft-reboot / watchdog-reboot).
1677
+ */
1678
+ if (dongle_isolation == FALSE && is_pcie_reset == FALSE) {
1679
+ dhdpcie_dongle_reset(bus);
1680
+ }
1681
+#endif /* !DHD_SKIP_DONGLE_RESET_IN_ATTACH */
1682
+
1683
+ /* need to set the force_bt_quiesce flag here
1684
+ * before calling dhdpcie_dongle_flr_or_pwr_toggle
1685
+ */
1686
+ bus->force_bt_quiesce = TRUE;
1687
+ /*
1688
+ * For buscorerev = 66 and after, F0 FLR should be done independent from F1.
1689
+ * So don't need BT quiesce.
1690
+ */
1691
+ if (bus->sih->buscorerev >= 66) {
1692
+ bus->force_bt_quiesce = FALSE;
1693
+ }
1694
+
1695
+ dhdpcie_dongle_flr_or_pwr_toggle(bus);
1696
+
1697
+#ifdef CHIPS_CUSTOMER_HW6
1698
+ dhdpcie_bus_mpu_disable(bus);
1699
+#endif /* CHIPS_CUSTOMER_HW6 */
5741700
5751701 si_setcore(bus->sih, PCIE2_CORE_ID, 0);
5761702 sbpcieregs = (sbpcieregs_t*)(bus->regs);
....@@ -580,27 +1706,33 @@
5801706 val = R_REG(osh, &sbpcieregs->configdata);
5811707 W_REG(osh, &sbpcieregs->configdata, val);
5821708
583
- /* Get info on the ARM and SOCRAM cores... */
584
- /* Should really be qualified by device id */
585
- if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
586
- (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
587
- (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) ||
588
- (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
589
- bus->armrev = si_corerev(bus->sih);
590
- } else {
591
- DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
592
- goto fail;
593
- }
594
-
5951709 if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
596
- if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) {
597
- DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__));
598
- goto fail;
1710
+ /* Only set dongle RAMSIZE to default value when BMC vs ARM usage of SYSMEM is not
1711
+ * adjusted.
1712
+ */
1713
+ if (!bus->ramsize_adjusted) {
1714
+ if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) {
1715
+ DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__));
1716
+ goto fail;
1717
+ }
1718
+ switch ((uint16)bus->sih->chip) {
1719
+#ifdef CHIPS_CUSTOMER_HW6
1720
+ case BCM4368_CHIP_ID:
1721
+ bus->dongle_ram_base = CA7_4368_RAM_BASE;
1722
+ bus->orig_ramsize = 0x1c0000;
1723
+ break;
1724
+ CASE_BCM4367_CHIP:
1725
+ bus->dongle_ram_base = CA7_4367_RAM_BASE;
1726
+ bus->orig_ramsize = 0x1e0000;
1727
+ break;
1728
+#endif /* CHIPS_CUSTOMER_HW6 */
1729
+ default:
1730
+ /* also populate base address */
1731
+ bus->dongle_ram_base = CA7_4365_RAM_BASE;
1732
+ bus->orig_ramsize = 0x1c0000; /* Reserve 1.75MB for CA7 */
1733
+ break;
1734
+ }
5991735 }
600
- /* also populate base address */
601
- bus->dongle_ram_base = CA7_4365_RAM_BASE;
602
- /* Default reserve 1.75MB for CA7 */
603
- bus->orig_ramsize = 0x1c0000;
6041736 } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
6051737 if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
6061738 DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
....@@ -619,7 +1751,6 @@
6191751 bus->dongle_ram_base = CR4_4335_RAM_BASE;
6201752 break;
6211753 case BCM4358_CHIP_ID:
622
- case BCM4356_CHIP_ID:
6231754 case BCM4354_CHIP_ID:
6241755 case BCM43567_CHIP_ID:
6251756 case BCM43569_CHIP_ID:
....@@ -630,6 +1761,11 @@
6301761 case BCM4360_CHIP_ID:
6311762 bus->dongle_ram_base = CR4_4360_RAM_BASE;
6321763 break;
1764
+
1765
+ case BCM4364_CHIP_ID:
1766
+ bus->dongle_ram_base = CR4_4364_RAM_BASE;
1767
+ break;
1768
+
6331769 CASE_BCM4345_CHIP:
6341770 bus->dongle_ram_base = (bus->sih->chiprev < 6) /* changed at 4345C0 */
6351771 ? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE;
....@@ -638,9 +1774,39 @@
6381774 bus->dongle_ram_base = CR4_43602_RAM_BASE;
6391775 break;
6401776 case BCM4349_CHIP_GRPID:
641
- /* RAM base changed from 4349c0(revid=9) onwards */
1777
+ /* RAM based changed from 4349c0(revid=9) onwards */
6421778 bus->dongle_ram_base = ((bus->sih->chiprev < 9) ?
643
- CR4_4349_RAM_BASE : CR4_4349_RAM_BASE_FROM_REV_9);
1779
+ CR4_4349_RAM_BASE : CR4_4349_RAM_BASE_FROM_REV_9);
1780
+ break;
1781
+ case BCM4347_CHIP_ID:
1782
+ case BCM4357_CHIP_ID:
1783
+ case BCM4361_CHIP_ID:
1784
+ bus->dongle_ram_base = CR4_4347_RAM_BASE;
1785
+ break;
1786
+ case BCM4362_CHIP_ID:
1787
+ bus->dongle_ram_base = CR4_4362_RAM_BASE;
1788
+ break;
1789
+ case BCM43751_CHIP_ID:
1790
+ bus->dongle_ram_base = CR4_43751_RAM_BASE;
1791
+ break;
1792
+
1793
+ case BCM4373_CHIP_ID:
1794
+ bus->dongle_ram_base = CR4_4373_RAM_BASE;
1795
+ break;
1796
+#ifdef CHIPS_CUSTOMER_HW6
1797
+ case BCM4378_CHIP_GRPID:
1798
+ bus->dongle_ram_base = CR4_4378_RAM_BASE;
1799
+ break;
1800
+ case BCM4377_CHIP_ID:
1801
+ bus->dongle_ram_base = CR4_4377_RAM_BASE;
1802
+ break;
1803
+#endif /* CHIPS_CUSTOMER_HW6 */
1804
+ case BCM4375_CHIP_ID:
1805
+ case BCM4369_CHIP_ID:
1806
+ bus->dongle_ram_base = CR4_4369_RAM_BASE;
1807
+ break;
1808
+ case CYW55560_CHIP_ID:
1809
+ bus->dongle_ram_base = CR4_55560_RAM_BASE;
6441810 break;
6451811 default:
6461812 bus->dongle_ram_base = 0;
....@@ -648,55 +1814,67 @@
6481814 __FUNCTION__, bus->dongle_ram_base));
6491815 }
6501816 }
1817
+
1818
+ /* 55560, Dedicated space for TCAM patching and TRX Hader at RAMBASE */
1819
+ /* TCAM Patching - 2048[2K], TRX Header - 32Bytes */
1820
+ if (bus->sih->chip == CYW55560_CHIP_ID) {
1821
+ bus->orig_ramsize -= (CR4_55560_TCAM_SZ + CR4_55560_TRX_HDR_SZ);
1822
+ }
1823
+
6511824 bus->ramsize = bus->orig_ramsize;
6521825 if (dhd_dongle_memsize)
6531826 dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_memsize);
1827
+
1828
+ if (bus->ramsize > DONGLE_TCM_MAP_SIZE) {
1829
+ DHD_ERROR(("%s : invalid ramsize %d(0x%x) is returned from dongle\n",
1830
+ __FUNCTION__, bus->ramsize, bus->ramsize));
1831
+ goto fail;
1832
+ }
6541833
6551834 DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
6561835 bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
6571836
6581837 bus->srmemsize = si_socram_srmem_size(bus->sih);
6591838
660
-
661
- bus->def_intmask = PCIE_MB_D2H_MB_MASK | PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1;
1839
+ dhdpcie_bus_intr_init(bus);
6621840
6631841 /* Set the poll and/or interrupt flags */
6641842 bus->intr = (bool)dhd_intr;
6651843
666
- bus->wait_for_d3_ack = 1;
667
- bus->suspended = FALSE;
1844
+ bus->idma_enabled = TRUE;
1845
+ bus->ifrm_enabled = TRUE;
1846
+ DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__));
6681847
669
-#ifdef PCIE_OOB
670
- gpio_handle_val = get_handle(OOB_PORT);
671
- if (gpio_handle_val < 0)
672
- {
673
- DHD_ERROR(("%s: Could not get GPIO handle.\n", __FUNCTION__));
674
- ASSERT(FALSE);
1848
+ if (MULTIBP_ENAB(bus->sih)) {
1849
+ dhd_bus_pcie_pwr_req_clear_nolock(bus);
1850
+
1851
+ /*
1852
+ * One time clearing of Common Power Domain since HW default is set
1853
+ * Needs to be after FLR because FLR resets PCIe enum back to HW defaults
1854
+ * for 4378B0 (rev 68).
1855
+ * On 4378A0 (rev 66), PCIe enum reset is disabled due to CRWLPCIEGEN2-672
1856
+ */
1857
+ si_srpwr_request(bus->sih, SRPWR_DMN0_PCIE_MASK, 0);
1858
+
1859
+ /*
1860
+ * WAR to fix ARM cold boot;
1861
+ * Assert WL domain in DAR helps but not enum
1862
+ */
1863
+ if (bus->sih->buscorerev >= 68) {
1864
+ dhd_bus_pcie_pwr_req_wl_domain(bus, TRUE);
1865
+ }
6751866 }
6761867
677
- gpio_direction = 0;
678
- ftdi_set_bitmode(gpio_handle_val, 0, BITMODE_BITBANG);
679
-
680
- /* Note BT core is also enabled here */
681
- gpio_port = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE;
682
- gpio_write_port(gpio_handle_val, gpio_port);
683
-
684
- gpio_direction = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE;
685
- ftdi_set_bitmode(gpio_handle_val, gpio_direction, BITMODE_BITBANG);
686
-
687
- bus->oob_enabled = TRUE;
688
-
689
- /* drive the Device_Wake GPIO low on startup */
690
- bus->device_wake_state = TRUE;
691
- dhd_bus_set_device_wake(bus, FALSE);
692
- dhd_bus_doorbell_timeout_reset(bus);
693
-#endif /* PCIE_OOB */
694
-
695
- DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__));
6961868 return 0;
6971869
6981870 fail:
6991871 if (bus->sih != NULL) {
1872
+ if (MULTIBP_ENAB(bus->sih)) {
1873
+ dhd_bus_pcie_pwr_req_clear_nolock(bus);
1874
+ }
1875
+ /* for EFI even if there is an error, load still succeeds
1876
+ * so si_detach should not be called here, it is called during unload
1877
+ */
7001878 si_detach(bus->sih);
7011879 bus->sih = NULL;
7021880 }
....@@ -717,36 +1895,54 @@
7171895 return 0;
7181896 }
7191897
1898
+/* Non atomic function, caller should hold appropriate lock */
7201899 void
7211900 dhdpcie_bus_intr_enable(dhd_bus_t *bus)
7221901 {
723
- DHD_TRACE(("enable interrupts\n"));
724
- if (bus && bus->sih) {
725
- if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
726
- (bus->sih->buscorerev == 4)) {
727
- dhpcie_bus_unmask_interrupt(bus);
728
- } else {
729
- si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask,
730
- bus->def_intmask, bus->def_intmask);
1902
+ DHD_TRACE(("%s Enter\n", __FUNCTION__));
1903
+ if (bus) {
1904
+ if (bus->sih && !bus->is_linkdown) {
1905
+ /* Skip after recieving D3 ACK */
1906
+ if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
1907
+ return;
1908
+ }
1909
+ if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
1910
+ (bus->sih->buscorerev == 4)) {
1911
+ dhpcie_bus_unmask_interrupt(bus);
1912
+ } else {
1913
+ #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
1914
+ dhd_bus_mmio_trace(bus, bus->pcie_mailbox_mask,
1915
+ bus->def_intmask, TRUE);
1916
+ #endif
1917
+ si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
1918
+ bus->def_intmask, bus->def_intmask);
1919
+ }
7311920 }
1921
+
7321922 }
1923
+
1924
+ DHD_TRACE(("%s Exit\n", __FUNCTION__));
7331925 }
7341926
1927
+/* Non atomic function, caller should hold appropriate lock */
7351928 void
7361929 dhdpcie_bus_intr_disable(dhd_bus_t *bus)
7371930 {
738
-
7391931 DHD_TRACE(("%s Enter\n", __FUNCTION__));
740
-
741
- if (bus && bus->sih) {
1932
+ if (bus && bus->sih && !bus->is_linkdown) {
1933
+ /* Skip after recieving D3 ACK */
1934
+ if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
1935
+ return;
1936
+ }
7421937 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
7431938 (bus->sih->buscorerev == 4)) {
7441939 dhpcie_bus_mask_interrupt(bus);
7451940 } else {
746
- si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask,
1941
+ si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
7471942 bus->def_intmask, 0);
7481943 }
7491944 }
1945
+
7501946 DHD_TRACE(("%s Exit\n", __FUNCTION__));
7511947 }
7521948
....@@ -757,21 +1953,33 @@
7571953 * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so
7581954 * they will exit from there itself without marking dhd_bus_busy_state as BUSY.
7591955 */
760
-static void
1956
+void
7611957 dhdpcie_advertise_bus_cleanup(dhd_pub_t *dhdp)
7621958 {
7631959 unsigned long flags;
7641960 int timeleft;
7651961
766
- DHD_GENERAL_LOCK(dhdp, flags);
767
- dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS;
768
- DHD_GENERAL_UNLOCK(dhdp, flags);
1962
+#ifdef DHD_PCIE_RUNTIMEPM
1963
+ dhdpcie_runtime_bus_wake(dhdp, TRUE, dhdpcie_advertise_bus_cleanup);
1964
+#endif /* DHD_PCIE_RUNTIMEPM */
1965
+
1966
+ dhdp->dhd_watchdog_ms_backup = dhd_watchdog_ms;
1967
+ if (dhdp->dhd_watchdog_ms_backup) {
1968
+ DHD_ERROR(("%s: Disabling wdtick before dhd deinit\n",
1969
+ __FUNCTION__));
1970
+ dhd_os_wd_timer(dhdp, 0);
1971
+ }
1972
+ if (dhdp->busstate != DHD_BUS_DOWN) {
1973
+ DHD_GENERAL_LOCK(dhdp, flags);
1974
+ dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS;
1975
+ DHD_GENERAL_UNLOCK(dhdp, flags);
1976
+ }
7691977
7701978 timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
771
- if (timeleft == 0) {
1979
+ if ((timeleft == 0) || (timeleft == 1)) {
7721980 DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
7731981 __FUNCTION__, dhdp->dhd_bus_busy_state));
774
- BUG_ON(1);
1982
+ ASSERT(0);
7751983 }
7761984
7771985 return;
....@@ -784,19 +1992,94 @@
7841992 DHD_TRACE(("%s Enter\n", __FUNCTION__));
7851993
7861994 DHD_GENERAL_LOCK(bus->dhd, flags);
1995
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
7871996 bus->dhd->busstate = DHD_BUS_DOWN;
7881997 DHD_GENERAL_UNLOCK(bus->dhd, flags);
7891998
7901999 dhd_os_sdlock(bus->dhd);
7912000
792
- dhdpcie_bus_intr_disable(bus);
2001
+ if (bus->sih && !bus->dhd->dongle_isolation) {
2002
+ if (PCIE_RELOAD_WAR_ENAB(bus->sih->buscorerev) &&
2003
+ bus->sih->chip != CYW55560_CHIP_ID) {
2004
+ dhd_bus_pcie_pwr_req_reload_war(bus);
2005
+ }
7932006
794
- if (bus->sih)
795
- pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs));
2007
+ /* Has insmod fails after rmmod issue in Brix Android */
2008
+#if !defined(OEM_ANDROID) && !defined(ANDROID)
2009
+ /* HW4347-909 */
2010
+ if ((bus->sih->buscorerev == 19) || (bus->sih->buscorerev == 23)) {
2011
+ /* Set PCIE TRefUp time to 100us for 4347 */
2012
+ pcie_set_trefup_time_100us(bus->sih);
2013
+ }
2014
+
2015
+ /* disable fast lpo from 4347 */
2016
+ /* For 4378/4387, do not disable fast lpo because we always enable fast lpo.
2017
+ * it causes insmod/rmmod reload failure.
2018
+ */
2019
+ if ((PMUREV(bus->sih->pmurev) > 31) &&
2020
+ (bus->sih->buscorerev != 66) &&
2021
+ (bus->sih->buscorerev != 68) &&
2022
+ (bus->sih->buscorerev != 69) &&
2023
+ (bus->sih->buscorerev != 70)) {
2024
+ si_pmu_fast_lpo_disable(bus->sih);
2025
+ }
2026
+#endif /* !OEM_ANDROID && !ANDROID */
2027
+
2028
+ /* if the pcie link is down, watchdog reset
2029
+ * should not be done, as it may hang
2030
+ */
2031
+
2032
+ if (!bus->is_linkdown) {
2033
+#ifndef DHD_SKIP_DONGLE_RESET_IN_ATTACH
2034
+ /* for efi, depending on bt over pcie mode
2035
+ * we either power toggle or do F0 FLR
2036
+ * from dhdpcie_bus_release dongle. So no need to
2037
+ * do dongle reset from here
2038
+ */
2039
+ dhdpcie_dongle_reset(bus);
2040
+#endif /* !DHD_SKIP_DONGLE_RESET_IN_ATTACH */
2041
+ }
2042
+
2043
+ bus->dhd->is_pcie_watchdog_reset = TRUE;
2044
+ }
7962045
7972046 dhd_os_sdunlock(bus->dhd);
7982047
7992048 DHD_TRACE(("%s Exit\n", __FUNCTION__));
2049
+}
2050
+
2051
+void
2052
+dhd_init_bus_lock(dhd_bus_t *bus)
2053
+{
2054
+ if (!bus->bus_lock) {
2055
+ bus->bus_lock = dhd_os_spin_lock_init(bus->dhd->osh);
2056
+ }
2057
+}
2058
+
2059
+void
2060
+dhd_deinit_bus_lock(dhd_bus_t *bus)
2061
+{
2062
+ if (bus->bus_lock) {
2063
+ dhd_os_spin_lock_deinit(bus->dhd->osh, bus->bus_lock);
2064
+ bus->bus_lock = NULL;
2065
+ }
2066
+}
2067
+
2068
+void
2069
+dhd_init_backplane_access_lock(dhd_bus_t *bus)
2070
+{
2071
+ if (!bus->backplane_access_lock) {
2072
+ bus->backplane_access_lock = dhd_os_spin_lock_init(bus->dhd->osh);
2073
+ }
2074
+}
2075
+
2076
+void
2077
+dhd_deinit_backplane_access_lock(dhd_bus_t *bus)
2078
+{
2079
+ if (bus->backplane_access_lock) {
2080
+ dhd_os_spin_lock_deinit(bus->dhd->osh, bus->backplane_access_lock);
2081
+ bus->backplane_access_lock = NULL;
2082
+ }
8002083 }
8012084
8022085 /** Detach and free everything */
....@@ -804,7 +2087,11 @@
8042087 dhdpcie_bus_release(dhd_bus_t *bus)
8052088 {
8062089 bool dongle_isolation = FALSE;
2090
+#ifdef BCMQT
2091
+ uint buscorerev = 0;
2092
+#endif /* BCMQT */
8072093 osl_t *osh = NULL;
2094
+ unsigned long flags_bus;
8082095
8092096 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
8102097
....@@ -814,27 +2101,52 @@
8142101 ASSERT(osh);
8152102
8162103 if (bus->dhd) {
2104
+#if defined(DEBUGGER) || defined(DHD_DSCOPE)
2105
+ debugger_close();
2106
+#endif /* DEBUGGER || DHD_DSCOPE */
8172107 dhdpcie_advertise_bus_cleanup(bus->dhd);
8182108 dongle_isolation = bus->dhd->dongle_isolation;
2109
+ bus->dhd->is_pcie_watchdog_reset = FALSE;
8192110 dhdpcie_bus_remove_prep(bus);
8202111
8212112 if (bus->intr) {
2113
+ DHD_BUS_LOCK(bus->bus_lock, flags_bus);
8222114 dhdpcie_bus_intr_disable(bus);
2115
+ DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
8232116 dhdpcie_free_irq(bus);
8242117 }
825
- dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
2118
+ dhd_deinit_bus_lock(bus);
2119
+ dhd_deinit_backplane_access_lock(bus);
2120
+#ifdef BCMQT
2121
+ if (IDMA_ACTIVE(bus->dhd)) {
2122
+ /**
2123
+ * On FPGA during exit path force set "IDMA Control Register"
2124
+ * to default value 0x0. Otherwise host dongle syc for IDMA fails
2125
+ * during next IDMA initilization(without system reboot)
2126
+ */
2127
+ buscorerev = bus->sih->buscorerev;
2128
+ si_corereg(bus->sih, bus->sih->buscoreidx,
2129
+ IDMAControl(buscorerev), ~0, 0);
2130
+ }
2131
+#endif /* BCMQT */
2132
+ /**
2133
+ * dhdpcie_bus_release_dongle free bus->sih handle, which is needed to
2134
+ * access Dongle registers.
2135
+ * dhd_detach will communicate with dongle to delete flowring ..etc.
2136
+ * So dhdpcie_bus_release_dongle should be called only after the dhd_detach.
2137
+ */
8262138 dhd_detach(bus->dhd);
2139
+ dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
8272140 dhd_free(bus->dhd);
8282141 bus->dhd = NULL;
8292142 }
830
-
8312143 /* unmap the regs and tcm here!! */
8322144 if (bus->regs) {
833
- dhdpcie_bus_reg_unmap(osh, (ulong)bus->regs, DONGLE_REG_MAP_SIZE);
2145
+ dhdpcie_bus_reg_unmap(osh, bus->regs, DONGLE_REG_MAP_SIZE);
8342146 bus->regs = NULL;
8352147 }
8362148 if (bus->tcm) {
837
- dhdpcie_bus_reg_unmap(osh, (ulong)bus->tcm, DONGLE_TCM_MAP_SIZE);
2149
+ dhdpcie_bus_reg_unmap(osh, bus->tcm, DONGLE_TCM_MAP_SIZE);
8382150 bus->tcm = NULL;
8392151 }
8402152
....@@ -845,21 +2157,18 @@
8452157 bus->pcie_sh = NULL;
8462158 }
8472159
848
-#ifdef DHD_DEBUG
849
-
850
- if (bus->console.buf != NULL)
2160
+ if (bus->console.buf != NULL) {
8512161 MFREE(osh, bus->console.buf, bus->console.bufsize);
852
-#endif
853
-
2162
+ }
8542163
8552164 /* Finally free bus info */
8562165 MFREE(osh, bus, sizeof(dhd_bus_t));
8572166
2167
+ g_dhd_bus = NULL;
8582168 }
8592169
8602170 DHD_TRACE(("%s: Exit\n", __FUNCTION__));
8612171 } /* dhdpcie_bus_release */
862
-
8632172
8642173 void
8652174 dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
....@@ -872,10 +2181,19 @@
8722181 return;
8732182 }
8742183
2184
+ if (bus->is_linkdown) {
2185
+ DHD_ERROR(("%s : Skip release dongle due to linkdown \n", __FUNCTION__));
2186
+ return;
2187
+ }
2188
+
8752189 if (bus->sih) {
8762190
877
- if (!dongle_isolation)
878
- pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs));
2191
+ if (!dongle_isolation &&
2192
+ (bus->dhd && !bus->dhd->is_pcie_watchdog_reset)) {
2193
+ dhdpcie_dongle_reset(bus);
2194
+ }
2195
+
2196
+ dhdpcie_dongle_flr_or_pwr_toggle(bus);
8792197
8802198 if (bus->ltrsleep_on_unload) {
8812199 si_corereg(bus->sih, bus->sih->buscoreidx,
....@@ -883,7 +2201,21 @@
8832201 }
8842202
8852203 if (bus->sih->buscorerev == 13)
886
- pcie_serdes_iddqdisable(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs));
2204
+ pcie_serdes_iddqdisable(bus->osh, bus->sih,
2205
+ (sbpcieregs_t *) bus->regs);
2206
+
2207
+ /* For inbuilt drivers pcie clk req will be done by RC,
2208
+ * so do not do clkreq from dhd
2209
+ */
2210
+ if (dhd_download_fw_on_driverload)
2211
+ {
2212
+ /* Disable CLKREQ# */
2213
+ dhdpcie_clkreq(bus->osh, 1, 0);
2214
+ }
2215
+
2216
+#ifdef PCIE_SUSPEND_DURING_DETACH
2217
+ dhdpcie_bus_clock_stop(bus);
2218
+#endif /* PCIE_SUSPEND_DURING_DETACH */
8872219
8882220 if (bus->sih != NULL) {
8892221 si_detach(bus->sih);
....@@ -950,8 +2282,7 @@
9502282 /** Stop bus module: clear pending frames, disable data flow */
9512283 void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
9522284 {
953
- uint32 status;
954
- unsigned long flags;
2285
+ unsigned long flags, flags_bus;
9552286
9562287 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9572288
....@@ -963,17 +2294,36 @@
9632294 goto done;
9642295 }
9652296
2297
+ DHD_DISABLE_RUNTIME_PM(bus->dhd);
2298
+
9662299 DHD_GENERAL_LOCK(bus->dhd, flags);
2300
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
9672301 bus->dhd->busstate = DHD_BUS_DOWN;
9682302 DHD_GENERAL_UNLOCK(bus->dhd, flags);
9692303
2304
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2305
+ atomic_set(&bus->dhd->block_bus, TRUE);
2306
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2307
+
2308
+ DHD_BUS_LOCK(bus->bus_lock, flags_bus);
9702309 dhdpcie_bus_intr_disable(bus);
971
- status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
972
- dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
2310
+ DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
2311
+
2312
+ if (!bus->is_linkdown) {
2313
+ uint32 status;
2314
+ status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
2315
+ dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
2316
+ }
9732317
9742318 if (!dhd_download_fw_on_driverload) {
9752319 dhd_dpc_kill(bus->dhd);
9762320 }
2321
+
2322
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2323
+ pm_runtime_disable(dhd_bus_to_dev(bus));
2324
+ pm_runtime_set_suspended(dhd_bus_to_dev(bus));
2325
+ pm_runtime_enable(dhd_bus_to_dev(bus));
2326
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
9772327
9782328 /* Clear rx control and wake any waiters */
9792329 dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT);
....@@ -983,52 +2333,614 @@
9832333 return;
9842334 }
9852335
986
-/** Watchdog timer function */
2336
+/**
2337
+ * Watchdog timer function.
2338
+ * @param dhd Represents a specific hardware (dongle) instance that this DHD manages
2339
+ */
9872340 bool dhd_bus_watchdog(dhd_pub_t *dhd)
9882341 {
9892342 unsigned long flags;
990
-#ifdef DHD_DEBUG
991
- dhd_bus_t *bus;
992
- bus = dhd->bus;
2343
+ dhd_bus_t *bus = dhd->bus;
9932344
9942345 DHD_GENERAL_LOCK(dhd, flags);
995
- if (dhd->busstate == DHD_BUS_DOWN ||
996
- dhd->busstate == DHD_BUS_DOWN_IN_PROGRESS) {
2346
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd) ||
2347
+ DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd)) {
9972348 DHD_GENERAL_UNLOCK(dhd, flags);
9982349 return FALSE;
9992350 }
1000
- dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_WD;
2351
+ DHD_BUS_BUSY_SET_IN_WD(dhd);
10012352 DHD_GENERAL_UNLOCK(dhd, flags);
10022353
1003
-
2354
+#ifdef DHD_PCIE_RUNTIMEPM
2355
+ dhdpcie_runtime_bus_wake(dhd, TRUE, __builtin_return_address(0));
2356
+#endif /* DHD_PCIE_RUNTIMEPM */
10042357
10052358 /* Poll for console output periodically */
1006
- if (dhd->busstate == DHD_BUS_DATA && dhd_console_ms != 0) {
2359
+ if (dhd->busstate == DHD_BUS_DATA &&
2360
+ dhd->dhd_console_ms != 0 &&
2361
+ bus->bus_low_power_state == DHD_BUS_NO_LOW_POWER_STATE) {
10072362 bus->console.count += dhd_watchdog_ms;
1008
- if (bus->console.count >= dhd_console_ms) {
1009
- bus->console.count -= dhd_console_ms;
2363
+ if (bus->console.count >= dhd->dhd_console_ms) {
2364
+ bus->console.count -= dhd->dhd_console_ms;
2365
+
2366
+ if (MULTIBP_ENAB(bus->sih)) {
2367
+ dhd_bus_pcie_pwr_req(bus);
2368
+ }
2369
+
10102370 /* Make sure backplane clock is on */
1011
- if (dhdpcie_bus_readconsole(bus) < 0)
1012
- dhd_console_ms = 0; /* On error, stop trying */
2371
+ if (dhdpcie_bus_readconsole(bus) < 0) {
2372
+ dhd->dhd_console_ms = 0; /* On error, stop trying */
2373
+ }
2374
+
2375
+ if (MULTIBP_ENAB(bus->sih)) {
2376
+ dhd_bus_pcie_pwr_req_clear(bus);
2377
+ }
10132378 }
10142379 }
1015
-#endif /* DHD_DEBUG */
1016
-
1017
-#ifdef PCIE_OOB
1018
- /* If haven't communicated with device for a while, deassert the Device_Wake GPIO */
1019
- if (dhd_doorbell_timeout != 0 && !(bus->dhd->busstate == DHD_BUS_SUSPEND) &&
1020
- dhd_timeout_expired(&bus->doorbell_timer)) {
1021
- dhd_bus_set_device_wake(bus, FALSE);
1022
- }
1023
-#endif /* PCIE_OOB */
10242380
10252381 DHD_GENERAL_LOCK(dhd, flags);
1026
- dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_WD;
2382
+ DHD_BUS_BUSY_CLEAR_IN_WD(dhd);
2383
+ dhd_os_busbusy_wake(dhd);
10272384 DHD_GENERAL_UNLOCK(dhd, flags);
2385
+
10282386 return TRUE;
10292387 } /* dhd_bus_watchdog */
10302388
1031
-/* Download firmware image and nvram image */
2389
+#if defined(SUPPORT_MULTIPLE_REVISION)
2390
+static int concate_revision_bcm4358(dhd_bus_t *bus, char *fw_path, char *nv_path)
2391
+{
2392
+ uint32 chiprev;
2393
+#if defined(SUPPORT_MULTIPLE_CHIPS)
2394
+ char chipver_tag[20] = "_4358";
2395
+#else
2396
+ char chipver_tag[10] = {0, };
2397
+#endif /* SUPPORT_MULTIPLE_CHIPS */
2398
+
2399
+ chiprev = dhd_bus_chiprev(bus);
2400
+ if (chiprev == 0) {
2401
+ DHD_ERROR(("----- CHIP 4358 A0 -----\n"));
2402
+ strcat(chipver_tag, "_a0");
2403
+ } else if (chiprev == 1) {
2404
+ DHD_ERROR(("----- CHIP 4358 A1 -----\n"));
2405
+#if defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS)
2406
+ strcat(chipver_tag, "_a1");
2407
+#endif /* defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS) */
2408
+ } else if (chiprev == 3) {
2409
+ DHD_ERROR(("----- CHIP 4358 A3 -----\n"));
2410
+#if defined(SUPPORT_MULTIPLE_CHIPS)
2411
+ strcat(chipver_tag, "_a3");
2412
+#endif /* SUPPORT_MULTIPLE_CHIPS */
2413
+ } else {
2414
+ DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chiprev));
2415
+ }
2416
+
2417
+ strcat(fw_path, chipver_tag);
2418
+
2419
+#if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK)
2420
+ if (chiprev == 1 || chiprev == 3) {
2421
+ int ret = dhd_check_module_b85a();
2422
+ if ((chiprev == 1) && (ret < 0)) {
2423
+ memset(chipver_tag, 0x00, sizeof(chipver_tag));
2424
+ strcat(chipver_tag, "_b85");
2425
+ strcat(chipver_tag, "_a1");
2426
+ }
2427
+ }
2428
+
2429
+ DHD_ERROR(("%s: chipver_tag %s \n", __FUNCTION__, chipver_tag));
2430
+#endif /* defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) */
2431
+
2432
+#if defined(SUPPORT_MULTIPLE_BOARD_REV)
2433
+ if (system_rev >= 10) {
2434
+ DHD_ERROR(("----- Board Rev [%d]-----\n", system_rev));
2435
+ strcat(chipver_tag, "_r10");
2436
+ }
2437
+#endif /* SUPPORT_MULTIPLE_BOARD_REV */
2438
+ strcat(nv_path, chipver_tag);
2439
+
2440
+ return 0;
2441
+}
2442
+
2443
+static int concate_revision_bcm4359(dhd_bus_t *bus, char *fw_path, char *nv_path)
2444
+{
2445
+ uint32 chip_ver;
2446
+ char chipver_tag[10] = {0, };
2447
+#if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2448
+ defined(SUPPORT_BCM4359_MIXED_MODULES)
2449
+ int module_type = -1;
2450
+#endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2451
+
2452
+ chip_ver = bus->sih->chiprev;
2453
+ if (chip_ver == 4) {
2454
+ DHD_ERROR(("----- CHIP 4359 B0 -----\n"));
2455
+ strncat(chipver_tag, "_b0", strlen("_b0"));
2456
+ } else if (chip_ver == 5) {
2457
+ DHD_ERROR(("----- CHIP 4359 B1 -----\n"));
2458
+ strncat(chipver_tag, "_b1", strlen("_b1"));
2459
+ } else if (chip_ver == 9) {
2460
+ DHD_ERROR(("----- CHIP 4359 C0 -----\n"));
2461
+ strncat(chipver_tag, "_c0", strlen("_c0"));
2462
+ } else {
2463
+ DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver));
2464
+ return -1;
2465
+ }
2466
+
2467
+#if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2468
+ defined(SUPPORT_BCM4359_MIXED_MODULES)
2469
+ module_type = dhd_check_module_b90();
2470
+
2471
+ switch (module_type) {
2472
+ case BCM4359_MODULE_TYPE_B90B:
2473
+ strcat(fw_path, chipver_tag);
2474
+ break;
2475
+ case BCM4359_MODULE_TYPE_B90S:
2476
+ default:
2477
+ /*
2478
+ * .cid.info file not exist case,
2479
+ * loading B90S FW force for initial MFG boot up.
2480
+ */
2481
+ if (chip_ver == 5) {
2482
+ strncat(fw_path, "_b90s", strlen("_b90s"));
2483
+ }
2484
+ strcat(fw_path, chipver_tag);
2485
+ strcat(nv_path, chipver_tag);
2486
+ break;
2487
+ }
2488
+#else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2489
+ strcat(fw_path, chipver_tag);
2490
+ strcat(nv_path, chipver_tag);
2491
+#endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2492
+
2493
+ return 0;
2494
+}
2495
+
2496
+#if defined(USE_CID_CHECK)
2497
+
2498
+#define MAX_EXTENSION 20
2499
+#define MODULE_BCM4361_INDEX 3
2500
+#define CHIP_REV_A0 1
2501
+#define CHIP_REV_A1 2
2502
+#define CHIP_REV_B0 3
2503
+#define CHIP_REV_B1 4
2504
+#define CHIP_REV_B2 5
2505
+#define CHIP_REV_C0 6
2506
+#define BOARD_TYPE_EPA 0x080f
2507
+#define BOARD_TYPE_IPA 0x0827
2508
+#define BOARD_TYPE_IPA_OLD 0x081a
2509
+#define DEFAULT_CIDINFO_FOR_EPA "r00a_e000_a0_ePA"
2510
+#define DEFAULT_CIDINFO_FOR_IPA "r00a_e000_a0_iPA"
2511
+#define DEFAULT_CIDINFO_FOR_A1 "r01a_e30a_a1"
2512
+#define DEFAULT_CIDINFO_FOR_B0 "r01i_e32_b0"
2513
+#define MAX_VID_LEN 8
2514
+#define CIS_TUPLE_HDR_LEN 2
2515
+#if defined(BCM4361_CHIP)
2516
+#define CIS_TUPLE_START_ADDRESS 0x18011110
2517
+#define CIS_TUPLE_END_ADDRESS 0x18011167
2518
+#elif defined(BCM4375_CHIP)
2519
+#define CIS_TUPLE_START_ADDRESS 0x18011120
2520
+#define CIS_TUPLE_END_ADDRESS 0x18011177
2521
+#endif /* defined(BCM4361_CHIP) */
2522
+#define CIS_TUPLE_MAX_COUNT (uint32)((CIS_TUPLE_END_ADDRESS - CIS_TUPLE_START_ADDRESS\
2523
+ + 1) / sizeof(uint32))
2524
+#define CIS_TUPLE_TAG_START 0x80
2525
+#define CIS_TUPLE_TAG_VENDOR 0x81
2526
+#define CIS_TUPLE_TAG_BOARDTYPE 0x1b
2527
+#define CIS_TUPLE_TAG_LENGTH 1
2528
+#define NVRAM_FEM_MURATA "_murata"
2529
+#define CID_FEM_MURATA "_mur_"
2530
+
2531
+typedef struct cis_tuple_format {
2532
+ uint8 id;
2533
+ uint8 len; /* total length of tag and data */
2534
+ uint8 tag;
2535
+ uint8 data[1];
2536
+} cis_tuple_format_t;
2537
+
2538
+typedef struct {
2539
+ char cid_ext[MAX_EXTENSION];
2540
+ char nvram_ext[MAX_EXTENSION];
2541
+ char fw_ext[MAX_EXTENSION];
2542
+} naming_info_t;
2543
+
2544
+naming_info_t bcm4361_naming_table[] = {
2545
+ { {""}, {""}, {""} },
2546
+ { {"r00a_e000_a0_ePA"}, {"_a0_ePA"}, {"_a0_ePA"} },
2547
+ { {"r00a_e000_a0_iPA"}, {"_a0"}, {"_a1"} },
2548
+ { {"r01a_e30a_a1"}, {"_r01a_a1"}, {"_a1"} },
2549
+ { {"r02a_e30a_a1"}, {"_r02a_a1"}, {"_a1"} },
2550
+ { {"r02c_e30a_a1"}, {"_r02c_a1"}, {"_a1"} },
2551
+ { {"r01d_e31_b0"}, {"_r01d_b0"}, {"_b0"} },
2552
+ { {"r01f_e31_b0"}, {"_r01f_b0"}, {"_b0"} },
2553
+ { {"r02g_e31_b0"}, {"_r02g_b0"}, {"_b0"} },
2554
+ { {"r01h_e32_b0"}, {"_r01h_b0"}, {"_b0"} },
2555
+ { {"r01i_e32_b0"}, {"_r01i_b0"}, {"_b0"} },
2556
+ { {"r02j_e32_b0"}, {"_r02j_b0"}, {"_b0"} },
2557
+ { {"r012_1kl_a1"}, {"_r012_a1"}, {"_a1"} },
2558
+ { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
2559
+ { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
2560
+ { {"r014_1kl_b0"}, {"_r014_b0"}, {"_b0"} },
2561
+ { {"r015_1kl_b0"}, {"_r015_b0"}, {"_b0"} },
2562
+ { {"r020_1kl_b0"}, {"_r020_b0"}, {"_b0"} },
2563
+ { {"r021_1kl_b0"}, {"_r021_b0"}, {"_b0"} },
2564
+ { {"r022_1kl_b0"}, {"_r022_b0"}, {"_b0"} },
2565
+ { {"r023_1kl_b0"}, {"_r023_b0"}, {"_b0"} },
2566
+ { {"r024_1kl_b0"}, {"_r024_b0"}, {"_b0"} },
2567
+ { {"r030_1kl_b0"}, {"_r030_b0"}, {"_b0"} },
2568
+ { {"r031_1kl_b0"}, {"_r030_b0"}, {"_b0"} }, /* exceptional case : r31 -> r30 */
2569
+ { {"r032_1kl_b0"}, {"_r032_b0"}, {"_b0"} },
2570
+ { {"r033_1kl_b0"}, {"_r033_b0"}, {"_b0"} },
2571
+ { {"r034_1kl_b0"}, {"_r034_b0"}, {"_b0"} },
2572
+ { {"r02a_e32a_b2"}, {"_r02a_b2"}, {"_b2"} },
2573
+ { {"r02b_e32a_b2"}, {"_r02b_b2"}, {"_b2"} },
2574
+ { {"r020_1qw_b2"}, {"_r020_b2"}, {"_b2"} },
2575
+ { {"r021_1qw_b2"}, {"_r021_b2"}, {"_b2"} },
2576
+ { {"r022_1qw_b2"}, {"_r022_b2"}, {"_b2"} },
2577
+ { {"r031_1qw_b2"}, {"_r031_b2"}, {"_b2"} },
2578
+ { {"r032_1qw_b2"}, {"_r032_b2"}, {"_b2"} },
2579
+ { {"r041_1qw_b2"}, {"_r041_b2"}, {"_b2"} }
2580
+};
2581
+
2582
+#define MODULE_BCM4375_INDEX 3
2583
+
2584
+naming_info_t bcm4375_naming_table[] = {
2585
+ { {""}, {""}, {""} },
2586
+ { {"e41_es11"}, {"_ES00_semco_b0"}, {"_b0"} },
2587
+ { {"e43_es33"}, {"_ES01_semco_b0"}, {"_b0"} },
2588
+ { {"e43_es34"}, {"_ES02_semco_b0"}, {"_b0"} },
2589
+ { {"e43_es35"}, {"_ES02_semco_b0"}, {"_b0"} },
2590
+ { {"e43_es36"}, {"_ES03_semco_b0"}, {"_b0"} },
2591
+ { {"e43_cs41"}, {"_CS00_semco_b1"}, {"_b1"} },
2592
+ { {"e43_cs51"}, {"_CS01_semco_b1"}, {"_b1"} },
2593
+ { {"e43_cs53"}, {"_CS01_semco_b1"}, {"_b1"} },
2594
+ { {"e43_cs61"}, {"_CS00_skyworks_b1"}, {"_b1"} },
2595
+ { {"1rh_es10"}, {"_1rh_es10_b0"}, {"_b0"} },
2596
+ { {"1rh_es11"}, {"_1rh_es11_b0"}, {"_b0"} },
2597
+ { {"1rh_es12"}, {"_1rh_es12_b0"}, {"_b0"} },
2598
+ { {"1rh_es13"}, {"_1rh_es13_b0"}, {"_b0"} },
2599
+ { {"1rh_es20"}, {"_1rh_es20_b0"}, {"_b0"} },
2600
+ { {"1rh_es32"}, {"_1rh_es32_b0"}, {"_b0"} },
2601
+ { {"1rh_es41"}, {"_1rh_es41_b1"}, {"_b1"} },
2602
+ { {"1rh_es42"}, {"_1rh_es42_b1"}, {"_b1"} },
2603
+ { {"1rh_es43"}, {"_1rh_es43_b1"}, {"_b1"} },
2604
+ { {"1rh_es44"}, {"_1rh_es44_b1"}, {"_b1"} }
2605
+};
2606
+
2607
+static naming_info_t *
2608
+dhd_find_naming_info(naming_info_t table[], int table_size, char *module_type)
2609
+{
2610
+ int index_found = 0, i = 0;
2611
+
2612
+ if (module_type && strlen(module_type) > 0) {
2613
+ for (i = 1; i < table_size; i++) {
2614
+ if (!strncmp(table[i].cid_ext, module_type, strlen(table[i].cid_ext))) {
2615
+ index_found = i;
2616
+ break;
2617
+ }
2618
+ }
2619
+ }
2620
+
2621
+ DHD_INFO(("%s: index_found=%d\n", __FUNCTION__, index_found));
2622
+
2623
+ return &table[index_found];
2624
+}
2625
+
2626
+static naming_info_t *
2627
+dhd_find_naming_info_by_cid(naming_info_t table[], int table_size,
2628
+ char *cid_info)
2629
+{
2630
+ int index_found = 0, i = 0;
2631
+ char *ptr;
2632
+
2633
+ /* truncate extension */
2634
+ for (i = 1, ptr = cid_info; i < MODULE_BCM4361_INDEX && ptr; i++) {
2635
+ ptr = bcmstrstr(ptr, "_");
2636
+ if (ptr) {
2637
+ ptr++;
2638
+ }
2639
+ }
2640
+
2641
+ for (i = 1; i < table_size && ptr; i++) {
2642
+ if (!strncmp(table[i].cid_ext, ptr, strlen(table[i].cid_ext))) {
2643
+ index_found = i;
2644
+ break;
2645
+ }
2646
+ }
2647
+
2648
+ DHD_INFO(("%s: index_found=%d\n", __FUNCTION__, index_found));
2649
+
2650
+ return &table[index_found];
2651
+}
2652
+
2653
+static int
2654
+dhd_parse_board_information_bcm(dhd_bus_t *bus, int *boardtype,
2655
+ unsigned char *vid, int *vid_length)
2656
+{
2657
+ int boardtype_backplane_addr[] = {
2658
+ 0x18010324, /* OTP Control 1 */
2659
+ 0x18012618, /* PMU min resource mask */
2660
+ };
2661
+ int boardtype_backplane_data[] = {
2662
+ 0x00fa0000,
2663
+ 0x0e4fffff /* Keep on ARMHTAVAIL */
2664
+ };
2665
+ int int_val = 0, i = 0;
2666
+ cis_tuple_format_t *tuple;
2667
+ int totlen, len;
2668
+ uint32 raw_data[CIS_TUPLE_MAX_COUNT];
2669
+
2670
+ for (i = 0; i < ARRAYSIZE(boardtype_backplane_addr); i++) {
2671
+ /* Write new OTP and PMU configuration */
2672
+ if (si_backplane_access(bus->sih, boardtype_backplane_addr[i], sizeof(int),
2673
+ &boardtype_backplane_data[i], FALSE) != BCME_OK) {
2674
+ DHD_ERROR(("invalid size/addr combination\n"));
2675
+ return BCME_ERROR;
2676
+ }
2677
+
2678
+ if (si_backplane_access(bus->sih, boardtype_backplane_addr[i], sizeof(int),
2679
+ &int_val, TRUE) != BCME_OK) {
2680
+ DHD_ERROR(("invalid size/addr combination\n"));
2681
+ return BCME_ERROR;
2682
+ }
2683
+
2684
+ DHD_INFO(("%s: boardtype_backplane_addr 0x%08x rdata 0x%04x\n",
2685
+ __FUNCTION__, boardtype_backplane_addr[i], int_val));
2686
+ }
2687
+
2688
+ /* read tuple raw data */
2689
+ for (i = 0; i < CIS_TUPLE_MAX_COUNT; i++) {
2690
+ if (si_backplane_access(bus->sih, CIS_TUPLE_START_ADDRESS + i * sizeof(uint32),
2691
+ sizeof(uint32), &raw_data[i], TRUE) != BCME_OK) {
2692
+ break;
2693
+ }
2694
+ }
2695
+
2696
+ totlen = i * sizeof(uint32);
2697
+ tuple = (cis_tuple_format_t *)raw_data;
2698
+
2699
+ /* check the first tuple has tag 'start' */
2700
+ if (tuple->id != CIS_TUPLE_TAG_START) {
2701
+ return BCME_ERROR;
2702
+ }
2703
+
2704
+ *vid_length = *boardtype = 0;
2705
+
2706
+ /* find tagged parameter */
2707
+ while ((totlen >= (tuple->len + CIS_TUPLE_HDR_LEN)) &&
2708
+ (*vid_length == 0 || *boardtype == 0)) {
2709
+ len = tuple->len;
2710
+
2711
+ if ((tuple->tag == CIS_TUPLE_TAG_VENDOR) &&
2712
+ (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) {
2713
+ /* found VID */
2714
+ memcpy(vid, tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
2715
+ *vid_length = tuple->len - CIS_TUPLE_TAG_LENGTH;
2716
+ prhex("OTP VID", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
2717
+ }
2718
+ else if ((tuple->tag == CIS_TUPLE_TAG_BOARDTYPE) &&
2719
+ (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) {
2720
+ /* found boardtype */
2721
+ *boardtype = (int)tuple->data[0];
2722
+ prhex("OTP boardtype", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
2723
+ }
2724
+
2725
+ tuple = (cis_tuple_format_t*)((uint8*)tuple + (len + CIS_TUPLE_HDR_LEN));
2726
+ totlen -= (len + CIS_TUPLE_HDR_LEN);
2727
+ }
2728
+
2729
+ if (*vid_length <= 0 || *boardtype <= 0) {
2730
+ DHD_ERROR(("failed to parse information (vid=%d, boardtype=%d)\n",
2731
+ *vid_length, *boardtype));
2732
+ return BCME_ERROR;
2733
+ }
2734
+
2735
+ return BCME_OK;
2736
+
2737
+}
2738
+
2739
+static naming_info_t *
2740
+dhd_find_naming_info_by_chip_rev(naming_info_t table[], int table_size,
2741
+ dhd_bus_t *bus, bool *is_murata_fem)
2742
+{
2743
+ int board_type = 0, chip_rev = 0, vid_length = 0;
2744
+ unsigned char vid[MAX_VID_LEN];
2745
+ naming_info_t *info = &table[0];
2746
+ char *cid_info = NULL;
2747
+
2748
+ if (!bus || !bus->sih) {
2749
+ DHD_ERROR(("%s:bus(%p) or bus->sih is NULL\n", __FUNCTION__, bus));
2750
+ return NULL;
2751
+ }
2752
+ chip_rev = bus->sih->chiprev;
2753
+
2754
+ if (dhd_parse_board_information_bcm(bus, &board_type, vid, &vid_length)
2755
+ != BCME_OK) {
2756
+ DHD_ERROR(("%s:failed to parse board information\n", __FUNCTION__));
2757
+ return NULL;
2758
+ }
2759
+
2760
+ DHD_INFO(("%s:chip version %d\n", __FUNCTION__, chip_rev));
2761
+
2762
+#if defined(BCM4361_CHIP)
2763
+ /* A0 chipset has exception only */
2764
+ if (chip_rev == CHIP_REV_A0) {
2765
+ if (board_type == BOARD_TYPE_EPA) {
2766
+ info = dhd_find_naming_info(table, table_size,
2767
+ DEFAULT_CIDINFO_FOR_EPA);
2768
+ } else if ((board_type == BOARD_TYPE_IPA) ||
2769
+ (board_type == BOARD_TYPE_IPA_OLD)) {
2770
+ info = dhd_find_naming_info(table, table_size,
2771
+ DEFAULT_CIDINFO_FOR_IPA);
2772
+ }
2773
+ } else {
2774
+ cid_info = dhd_get_cid_info(vid, vid_length);
2775
+ if (cid_info) {
2776
+ info = dhd_find_naming_info_by_cid(table, table_size, cid_info);
2777
+ if (strstr(cid_info, CID_FEM_MURATA)) {
2778
+ *is_murata_fem = TRUE;
2779
+ }
2780
+ }
2781
+ }
2782
+#else
2783
+ cid_info = dhd_get_cid_info(vid, vid_length);
2784
+ if (cid_info) {
2785
+ info = dhd_find_naming_info_by_cid(table, table_size, cid_info);
2786
+ if (strstr(cid_info, CID_FEM_MURATA)) {
2787
+ *is_murata_fem = TRUE;
2788
+ }
2789
+ }
2790
+#endif /* BCM4361_CHIP */
2791
+
2792
+ return info;
2793
+}
2794
+#endif /* USE_CID_CHECK */
2795
+
2796
+static int
2797
+concate_revision_bcm4361(dhd_bus_t *bus, char *fw_path, char *nv_path)
2798
+{
2799
+ int ret = BCME_OK;
2800
+#if defined(SUPPORT_BCM4361_MIXED_MODULES) && defined(USE_CID_CHECK)
2801
+ char module_type[MAX_VNAME_LEN];
2802
+ naming_info_t *info = NULL;
2803
+ bool is_murata_fem = FALSE;
2804
+
2805
+ memset(module_type, 0, sizeof(module_type));
2806
+
2807
+ if (dhd_check_module_bcm(module_type,
2808
+ MODULE_BCM4361_INDEX, &is_murata_fem) == BCME_OK) {
2809
+ info = dhd_find_naming_info(bcm4361_naming_table,
2810
+ ARRAYSIZE(bcm4361_naming_table), module_type);
2811
+ } else {
2812
+ /* in case of .cid.info doesn't exists */
2813
+ info = dhd_find_naming_info_by_chip_rev(bcm4361_naming_table,
2814
+ ARRAYSIZE(bcm4361_naming_table), bus, &is_murata_fem);
2815
+ }
2816
+
2817
+ if (bcmstrnstr(nv_path, PATH_MAX, "_murata", 7)) {
2818
+ is_murata_fem = FALSE;
2819
+ }
2820
+
2821
+ if (info) {
2822
+ if (is_murata_fem) {
2823
+ strncat(nv_path, NVRAM_FEM_MURATA, strlen(NVRAM_FEM_MURATA));
2824
+ }
2825
+ strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext));
2826
+ strncat(fw_path, info->fw_ext, strlen(info->fw_ext));
2827
+ } else {
2828
+ DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__));
2829
+ ret = BCME_ERROR;
2830
+ }
2831
+#else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
2832
+ char chipver_tag[10] = {0, };
2833
+
2834
+ strcat(fw_path, chipver_tag);
2835
+ strcat(nv_path, chipver_tag);
2836
+#endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
2837
+
2838
+ return ret;
2839
+}
2840
+
2841
+static int
2842
+concate_revision_bcm4375(dhd_bus_t *bus, char *fw_path, char *nv_path)
2843
+{
2844
+ int ret = BCME_OK;
2845
+#if defined(SUPPORT_BCM4375_MIXED_MODULES) && defined(USE_CID_CHECK)
2846
+ char module_type[MAX_VNAME_LEN];
2847
+ naming_info_t *info = NULL;
2848
+ bool is_murata_fem = FALSE;
2849
+
2850
+ memset(module_type, 0, sizeof(module_type));
2851
+
2852
+ if (dhd_check_module_bcm(module_type,
2853
+ MODULE_BCM4375_INDEX, &is_murata_fem) == BCME_OK) {
2854
+ info = dhd_find_naming_info(bcm4375_naming_table,
2855
+ ARRAYSIZE(bcm4375_naming_table), module_type);
2856
+ } else {
2857
+ /* in case of .cid.info doesn't exists */
2858
+ info = dhd_find_naming_info_by_chip_rev(bcm4375_naming_table,
2859
+ ARRAYSIZE(bcm4375_naming_table), bus, &is_murata_fem);
2860
+ }
2861
+
2862
+ if (info) {
2863
+ strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext));
2864
+ strncat(fw_path, info->fw_ext, strlen(info->fw_ext));
2865
+ } else {
2866
+ DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__));
2867
+ ret = BCME_ERROR;
2868
+ }
2869
+#else /* SUPPORT_BCM4375_MIXED_MODULES && USE_CID_CHECK */
2870
+ char chipver_tag[10] = {0, };
2871
+
2872
+ strcat(fw_path, chipver_tag);
2873
+ strcat(nv_path, chipver_tag);
2874
+#endif /* SUPPORT_BCM4375_MIXED_MODULES && USE_CID_CHECK */
2875
+
2876
+ return ret;
2877
+}
2878
+
2879
+int
2880
+concate_revision(dhd_bus_t *bus, char *fw_path, char *nv_path)
2881
+{
2882
+ int res = 0;
2883
+
2884
+ if (!bus || !bus->sih) {
2885
+ DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
2886
+ return -1;
2887
+ }
2888
+
2889
+ if (!fw_path || !nv_path) {
2890
+ DHD_ERROR(("fw_path or nv_path is null.\n"));
2891
+ return res;
2892
+ }
2893
+
2894
+ switch (si_chipid(bus->sih)) {
2895
+
2896
+ case BCM43569_CHIP_ID:
2897
+ case BCM4358_CHIP_ID:
2898
+ res = concate_revision_bcm4358(bus, fw_path, nv_path);
2899
+ break;
2900
+ case BCM4355_CHIP_ID:
2901
+ case BCM4359_CHIP_ID:
2902
+ res = concate_revision_bcm4359(bus, fw_path, nv_path);
2903
+ break;
2904
+ case BCM4361_CHIP_ID:
2905
+ case BCM4347_CHIP_ID:
2906
+ res = concate_revision_bcm4361(bus, fw_path, nv_path);
2907
+ break;
2908
+ case BCM4375_CHIP_ID:
2909
+ res = concate_revision_bcm4375(bus, fw_path, nv_path);
2910
+ break;
2911
+ default:
2912
+ DHD_ERROR(("REVISION SPECIFIC feature is not required\n"));
2913
+ return res;
2914
+ }
2915
+
2916
+ return res;
2917
+}
2918
+#endif /* SUPPORT_MULTIPLE_REVISION */
2919
+
2920
+uint16
2921
+dhd_get_chipid(dhd_pub_t *dhd)
2922
+{
2923
+ dhd_bus_t *bus = dhd->bus;
2924
+
2925
+ if (bus && bus->sih)
2926
+ return (uint16)si_chipid(bus->sih);
2927
+ else
2928
+ return 0;
2929
+}
2930
+
2931
+/**
2932
+ * Loads firmware given by caller supplied path and nvram image into PCIe dongle.
2933
+ *
2934
+ * BCM_REQUEST_FW specific :
2935
+ * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
2936
+ * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
2937
+ *
2938
+ * BCMEMBEDIMAGE specific:
2939
+ * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
2940
+ * file will be used instead.
2941
+ *
2942
+ * @return BCME_OK on success
2943
+ */
10322944 int
10332945 dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
10342946 char *pfw_path, char *pnv_path)
....@@ -1038,43 +2950,93 @@
10382950 bus->fw_path = pfw_path;
10392951 bus->nv_path = pnv_path;
10402952
1041
-#ifndef CUSTOMER_HW_31_2
2953
+#if defined(SUPPORT_MULTIPLE_REVISION)
2954
+ if (concate_revision(bus, bus->fw_path, bus->nv_path) != 0) {
2955
+ DHD_ERROR(("%s: fail to concatnate revison \n",
2956
+ __FUNCTION__));
2957
+ return BCME_BADARG;
2958
+ }
2959
+#endif /* SUPPORT_MULTIPLE_REVISION */
2960
+
2961
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
2962
+ dhd_set_blob_support(bus->dhd, bus->fw_path);
2963
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
2964
+
10422965 DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n",
10432966 __FUNCTION__, bus->fw_path, bus->nv_path));
1044
-#endif
2967
+ dhdpcie_dump_resource(bus);
10452968
10462969 ret = dhdpcie_download_firmware(bus, osh);
10472970
10482971 return ret;
10492972 }
10502973
1051
-/* Define alternate fw/nvram paths used in Android */
1052
-#define CONFIG_ANDROID_BCMDHD_FW_PATH "/vendor/firmware/broadcom/dhd/firmware/fw.bin.trx"
1053
-#define CONFIG_ANDROID_BCMDHD_NVRAM_PATH "/vendor/firmware/broadcom/dhd/nvrams/nvm.txt"
1054
-
2974
+/**
2975
+ * Loads firmware given by 'bus->fw_path' into PCIe dongle.
2976
+ *
2977
+ * BCM_REQUEST_FW specific :
2978
+ * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
2979
+ * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
2980
+ *
2981
+ * BCMEMBEDIMAGE specific:
2982
+ * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
2983
+ * file will be used instead.
2984
+ *
2985
+ * @return BCME_OK on success
2986
+ */
10552987 static int
10562988 dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh)
10572989 {
10582990 int ret = 0;
10592991 #if defined(BCM_REQUEST_FW)
2992
+ uint chipid = bus->sih->chip;
2993
+ uint revid = bus->sih->chiprev;
10602994 char fw_path[64] = "/lib/firmware/brcm/bcm"; /* path to firmware image */
10612995 char nv_path[64]; /* path to nvram vars file */
10622996 bus->fw_path = fw_path;
10632997 bus->nv_path = nv_path;
2998
+ switch (chipid) {
2999
+ case BCM43570_CHIP_ID:
3000
+ bcmstrncat(fw_path, "43570", 5);
3001
+ switch (revid) {
3002
+ case 0:
3003
+ bcmstrncat(fw_path, "a0", 2);
3004
+ break;
3005
+ case 2:
3006
+ bcmstrncat(fw_path, "a2", 2);
3007
+ break;
3008
+ default:
3009
+ DHD_ERROR(("%s: revid is not found %x\n", __FUNCTION__,
3010
+ revid));
3011
+ break;
3012
+ }
3013
+ break;
3014
+ default:
3015
+ DHD_ERROR(("%s: unsupported device %x\n", __FUNCTION__,
3016
+ chipid));
3017
+ return 0;
3018
+ }
10643019 /* load board specific nvram file */
1065
- snprintf(bus->nv_path, sizeof(nv_path), "%s", CONFIG_ANDROID_BCMDHD_NVRAM_PATH);
3020
+ snprintf(bus->nv_path, sizeof(nv_path), "%s.nvm", fw_path);
10663021 /* load firmware */
1067
- snprintf(bus->fw_path, sizeof(fw_path), "%s", CONFIG_ANDROID_BCMDHD_FW_PATH);
3022
+ snprintf(bus->fw_path, sizeof(fw_path), "%s-firmware.bin", fw_path);
10683023 #endif /* BCM_REQUEST_FW */
10693024
10703025 DHD_OS_WAKE_LOCK(bus->dhd);
1071
-
10723026 ret = _dhdpcie_download_firmware(bus);
10733027
10743028 DHD_OS_WAKE_UNLOCK(bus->dhd);
10753029 return ret;
1076
-}
3030
+} /* dhdpcie_download_firmware */
10773031
3032
+#define DHD_MEMORY_SET_PATTERN 0xAA
3033
+
3034
+/**
3035
+ * Downloads a file containing firmware into dongle memory. In case of a .bea file, the DHD
3036
+ * is updated with the event logging partitions within that file as well.
3037
+ *
3038
+ * @param pfw_path Path to .bin or .bea file
3039
+ */
10783040 static int
10793041 dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path)
10803042 {
....@@ -1083,29 +3045,55 @@
10833045 int len = 0;
10843046 bool store_reset;
10853047 char *imgbuf = NULL;
1086
- uint8 *memblock = NULL, *memptr;
3048
+ uint8 *memblock = NULL, *memptr = NULL;
3049
+ int offset_end = bus->ramsize;
3050
+ uint32 file_size = 0, read_len = 0;
3051
+ struct trx_header *trx_hdr;
3052
+ bool trx_chk = TRUE;
10873053
3054
+#if defined(DHD_FW_MEM_CORRUPTION)
3055
+ if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
3056
+ dhd_tcm_test_enable = TRUE;
3057
+ } else {
3058
+ dhd_tcm_test_enable = FALSE;
3059
+ }
3060
+#endif /* DHD_FW_MEM_CORRUPTION */
3061
+ DHD_ERROR(("%s: dhd_tcm_test_enable %u\n", __FUNCTION__, dhd_tcm_test_enable));
3062
+ /* TCM check */
3063
+ if (dhd_tcm_test_enable && !dhd_bus_tcm_test(bus)) {
3064
+ DHD_ERROR(("dhd_bus_tcm_test failed\n"));
3065
+ bcmerror = BCME_ERROR;
3066
+ goto err;
3067
+ }
10883068 DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
10893069
10903070 /* Should succeed in opening image if it is actually given through registry
10913071 * entry or in module param.
10923072 */
1093
- imgbuf = dhd_os_open_image(pfw_path);
1094
- if (imgbuf == NULL)
3073
+ imgbuf = dhd_os_open_image1(bus->dhd, pfw_path);
3074
+ if (imgbuf == NULL) {
10953075 goto err;
3076
+ }
3077
+
3078
+ file_size = dhd_os_get_image_size(imgbuf);
3079
+ if (!file_size) {
3080
+ DHD_ERROR(("%s: get file size fails ! \n", __FUNCTION__));
3081
+ goto err;
3082
+ }
10963083
10973084 memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
10983085 if (memblock == NULL) {
10993086 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
3087
+ bcmerror = BCME_NOMEM;
11003088 goto err;
11013089 }
1102
- if ((uint32)(uintptr)memblock % DHD_SDALIGN)
3090
+ if ((uint32)(uintptr)memblock % DHD_SDALIGN) {
11033091 memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
3092
+ }
11043093
11053094 /* check if CR4/CA7 */
11063095 store_reset = (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
11073096 si_setcore(bus->sih, ARMCA7_CORE_ID, 0));
1108
-
11093097 /* Download image with MEMBLOCK size */
11103098 while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, imgbuf))) {
11113099 if (len < 0) {
....@@ -1113,13 +3101,32 @@
11133101 bcmerror = BCME_ERROR;
11143102 goto err;
11153103 }
3104
+ read_len += len;
3105
+ if (read_len > file_size) {
3106
+ DHD_ERROR(("%s: WARNING! reading beyond EOF, len=%d; read_len=%u;"
3107
+ " file_size=%u truncating len to %d \n", __FUNCTION__,
3108
+ len, read_len, file_size, (len - (read_len - file_size))));
3109
+ len -= (read_len - file_size);
3110
+ }
3111
+
11163112 /* if address is 0, store the reset instruction to be written in 0 */
11173113 if (store_reset) {
11183114 ASSERT(offset == 0);
11193115 bus->resetinstr = *(((uint32*)memptr));
11203116 /* Add start of RAM address to the address given by user */
11213117 offset += bus->dongle_ram_base;
3118
+ offset_end += offset;
11223119 store_reset = FALSE;
3120
+ }
3121
+
3122
+ /* Check for trx file */
3123
+ if (trx_chk && (len >= sizeof(struct trx_header))) {
3124
+ trx_chk = FALSE;
3125
+ trx_hdr = (struct trx_header *)memptr;
3126
+ if (trx_hdr->magic == TRX_MAGIC) {
3127
+ /* CYW55560, we need to write TRX header at RAMSTART */
3128
+ offset -= sizeof(struct trx_header);
3129
+ }
11233130 }
11243131
11253132 bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
....@@ -1129,17 +3136,33 @@
11293136 goto err;
11303137 }
11313138 offset += MEMBLOCK;
3139
+
3140
+ if (offset >= offset_end) {
3141
+ DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n",
3142
+ __FUNCTION__, offset, offset_end));
3143
+ bcmerror = BCME_ERROR;
3144
+ goto err;
3145
+ }
3146
+
3147
+ if (read_len >= file_size) {
3148
+ break;
3149
+ }
3150
+ }
3151
+err:
3152
+ if (memblock) {
3153
+ MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
11323154 }
11333155
1134
-err:
1135
- if (memblock)
1136
- MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
1137
-
1138
- if (imgbuf)
1139
- dhd_os_close_image(imgbuf);
3156
+ if (imgbuf) {
3157
+ dhd_os_close_image1(bus->dhd, imgbuf);
3158
+ }
11403159
11413160 return bcmerror;
11423161 } /* dhdpcie_download_code_file */
3162
+
3163
+#ifdef CUSTOMER_HW4_DEBUG
3164
+#define MIN_NVRAMVARS_SIZE 128
3165
+#endif /* CUSTOMER_HW4_DEBUG */
11433166
11443167 static int
11453168 dhdpcie_download_nvram(struct dhd_bus *bus)
....@@ -1154,25 +3177,18 @@
11543177 bool local_alloc = FALSE;
11553178 pnv_path = bus->nv_path;
11563179
1157
-
1158
-#ifdef CUSTOMER_HW_31_2
1159
- bus->nvram_params = NVRAMARRAY;
1160
- bus->nvram_params_len = strlen(bus->nvram_params);
1161
- DHD_TRACE(("%s:--> nvram len %d\n", __FUNCTION__, bus->nvram_params_len));
1162
-#endif
1163
-#ifndef CUSTOMER_HW_31_2
11643180 nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
11653181
11663182 /* First try UEFI */
11673183 len = MAX_NVRAMBUF_SIZE;
1168
- dhd_get_download_buffer(bus->dhd, NULL, NVRAM, &memblock, &len);
3184
+ dhd_get_download_buffer(bus->dhd, NULL, NVRAM, &memblock, (int *)&len);
11693185
11703186 /* If UEFI empty, then read from file system */
1171
- if ((len == 0) || (memblock[0] == '\0')) {
3187
+ if ((len <= 0) || (memblock == NULL)) {
11723188
11733189 if (nvram_file_exists) {
11743190 len = MAX_NVRAMBUF_SIZE;
1175
- dhd_get_download_buffer(bus->dhd, pnv_path, NVRAM, &memblock, &len);
3191
+ dhd_get_download_buffer(bus->dhd, pnv_path, NVRAM, &memblock, (int *)&len);
11763192 if ((len <= 0 || len > MAX_NVRAMBUF_SIZE)) {
11773193 goto err;
11783194 }
....@@ -1185,48 +3201,27 @@
11853201 nvram_uefi_exists = TRUE;
11863202 }
11873203
1188
-#else
1189
- if (bus->nvram_params_len) {
1190
- memblock = MALLOCZ(bus->dhd->osh, MAX_NVRAMBUF_SIZE);
1191
- if (memblock == NULL) {
1192
- DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
1193
- __FUNCTION__, MAX_NVRAMBUF_SIZE));
1194
- goto err;
1195
- }
3204
+ DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__, len));
11963205
1197
- local_alloc = TRUE;
1198
- /* nvram is string with null terminated. cannot use strlen */
1199
- len = bus->nvram_params_len;
1200
- ASSERT(len <= MAX_NVRAMBUF_SIZE);
1201
- memcpy(memblock, bus->nvram_params, len);
1202
- nvram_file_exists = TRUE;
1203
- }
1204
-#endif /* !CUSTOMER_HW_31_2 */
1205
-
1206
- if (len > 0 && len <= MAX_NVRAMBUF_SIZE) {
3206
+ if (len > 0 && len <= MAX_NVRAMBUF_SIZE && memblock != NULL) {
12073207 bufp = (char *) memblock;
12083208
1209
-#ifdef CACHE_FW_IMAGES
1210
- if (bus->processed_nvram_params_len) {
1211
- len = bus->processed_nvram_params_len;
1212
- }
1213
-
1214
- if (!bus->processed_nvram_params_len) {
1215
- bufp[len] = 0;
1216
- if (nvram_uefi_exists || nvram_file_exists) {
1217
- len = process_nvram_vars(bufp, len);
1218
- bus->processed_nvram_params_len = len;
1219
- }
1220
- } else
1221
-#else
12223209 {
12233210 bufp[len] = 0;
12243211 if (nvram_uefi_exists || nvram_file_exists) {
12253212 len = process_nvram_vars(bufp, len);
12263213 }
12273214 }
1228
-#endif /* CACHE_FW_IMAGES */
12293215
3216
+ DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__, len));
3217
+#ifdef CUSTOMER_HW4_DEBUG
3218
+ if (len < MIN_NVRAMVARS_SIZE) {
3219
+ DHD_ERROR(("%s: invalid nvram size in process_nvram_vars \n",
3220
+ __FUNCTION__));
3221
+ bcmerror = BCME_ERROR;
3222
+ goto err;
3223
+ }
3224
+#endif /* CUSTOMER_HW4_DEBUG */
12303225
12313226 if (len % 4) {
12323227 len += 4 - (len % 4);
....@@ -1241,7 +3236,6 @@
12413236 }
12423237 }
12433238
1244
-
12453239 err:
12463240 if (memblock) {
12473241 if (local_alloc) {
....@@ -1254,123 +3248,6 @@
12543248 return bcmerror;
12553249 }
12563250
1257
-
1258
-#ifdef BCMEMBEDIMAGE
1259
-int
1260
-dhdpcie_download_code_array(struct dhd_bus *bus)
1261
-{
1262
- int bcmerror = -1;
1263
- int offset = 0;
1264
- unsigned char *p_dlarray = NULL;
1265
- unsigned int dlarray_size = 0;
1266
- unsigned int downloded_len, remaining_len, len;
1267
- char *p_dlimagename, *p_dlimagever, *p_dlimagedate;
1268
- uint8 *memblock = NULL, *memptr;
1269
-
1270
- downloded_len = 0;
1271
- remaining_len = 0;
1272
- len = 0;
1273
-
1274
- p_dlarray = dlarray;
1275
- dlarray_size = sizeof(dlarray);
1276
- p_dlimagename = dlimagename;
1277
- p_dlimagever = dlimagever;
1278
- p_dlimagedate = dlimagedate;
1279
-
1280
- if ((p_dlarray == 0) || (dlarray_size == 0) ||(dlarray_size > bus->ramsize) ||
1281
- (p_dlimagename == 0) || (p_dlimagever == 0) || (p_dlimagedate == 0))
1282
- goto err;
1283
-
1284
- memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
1285
- if (memblock == NULL) {
1286
- DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
1287
- goto err;
1288
- }
1289
- if ((uint32)(uintptr)memblock % DHD_SDALIGN)
1290
- memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
1291
-
1292
- while (downloded_len < dlarray_size) {
1293
- remaining_len = dlarray_size - downloded_len;
1294
- if (remaining_len >= MEMBLOCK)
1295
- len = MEMBLOCK;
1296
- else
1297
- len = remaining_len;
1298
-
1299
- memcpy(memptr, (p_dlarray + downloded_len), len);
1300
- /* check if CR4/CA7 */
1301
- if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
1302
- si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
1303
- /* if address is 0, store the reset instruction to be written in 0 */
1304
- if (offset == 0) {
1305
- bus->resetinstr = *(((uint32*)memptr));
1306
- /* Add start of RAM address to the address given by user */
1307
- offset += bus->dongle_ram_base;
1308
- }
1309
- }
1310
- bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
1311
- downloded_len += len;
1312
- if (bcmerror) {
1313
- DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
1314
- __FUNCTION__, bcmerror, MEMBLOCK, offset));
1315
- goto err;
1316
- }
1317
- offset += MEMBLOCK;
1318
- }
1319
-
1320
-#ifdef DHD_DEBUG
1321
- /* Upload and compare the downloaded code */
1322
- {
1323
- unsigned char *ularray = NULL;
1324
- unsigned int uploded_len;
1325
- uploded_len = 0;
1326
- bcmerror = -1;
1327
- ularray = MALLOC(bus->dhd->osh, dlarray_size);
1328
- if (ularray == NULL)
1329
- goto upload_err;
1330
- /* Upload image to verify downloaded contents. */
1331
- offset = bus->dongle_ram_base;
1332
- memset(ularray, 0xaa, dlarray_size);
1333
- while (uploded_len < dlarray_size) {
1334
- remaining_len = dlarray_size - uploded_len;
1335
- if (remaining_len >= MEMBLOCK)
1336
- len = MEMBLOCK;
1337
- else
1338
- len = remaining_len;
1339
- bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset,
1340
- (uint8 *)(ularray + uploded_len), len);
1341
- if (bcmerror) {
1342
- DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
1343
- __FUNCTION__, bcmerror, MEMBLOCK, offset));
1344
- goto upload_err;
1345
- }
1346
-
1347
- uploded_len += len;
1348
- offset += MEMBLOCK;
1349
- }
1350
-
1351
- if (memcmp(p_dlarray, ularray, dlarray_size)) {
1352
- DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n",
1353
- __FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
1354
- goto upload_err;
1355
-
1356
- } else
1357
- DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n",
1358
- __FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
1359
-upload_err:
1360
- if (ularray)
1361
- MFREE(bus->dhd->osh, ularray, dlarray_size);
1362
- }
1363
-#endif /* DHD_DEBUG */
1364
-err:
1365
-
1366
- if (memblock)
1367
- MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
1368
-
1369
- return bcmerror;
1370
-} /* dhdpcie_download_code_array */
1371
-#endif /* BCMEMBEDIMAGE */
1372
-
1373
-
13743251 static int
13753252 dhdpcie_ramsize_read_image(struct dhd_bus *bus, char *buf, int len)
13763253 {
....@@ -1382,7 +3259,8 @@
13823259
13833260 /* External image takes precedence if specified */
13843261 if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
1385
- imgbuf = dhd_os_open_image(bus->fw_path);
3262
+ // opens and seeks to correct file offset:
3263
+ imgbuf = dhd_os_open_image1(bus->dhd, bus->fw_path);
13863264 if (imgbuf == NULL) {
13873265 DHD_ERROR(("%s: Failed to open firmware file\n", __FUNCTION__));
13883266 goto err;
....@@ -1399,7 +3277,7 @@
13993277
14003278 err:
14013279 if (imgbuf)
1402
- dhd_os_close_image(imgbuf);
3280
+ dhd_os_close_image1(bus->dhd, imgbuf);
14033281
14043282 return bcmerror;
14053283 }
....@@ -1418,7 +3296,17 @@
14183296 uint32 ramsize_ptr_ptr[] = {RAMSIZE_PTR_PTR_LIST};
14193297 hnd_ramsize_ptr_t ramsize_info;
14203298
1421
- DHD_ERROR(("%s: Enter\n", __FUNCTION__));
3299
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3300
+
3301
+ /* Adjust dongle RAMSIZE already called. */
3302
+ if (bus->ramsize_adjusted) {
3303
+ return;
3304
+ }
3305
+
3306
+ /* success or failure, we don't want to be here
3307
+ * more than once.
3308
+ */
3309
+ bus->ramsize_adjusted = TRUE;
14223310
14233311 /* Not handle if user restrict dongle ram size enabled */
14243312 if (dhd_dongle_memsize) {
....@@ -1427,21 +3315,19 @@
14273315 return;
14283316 }
14293317
1430
-#ifndef BCMEMBEDIMAGE
14313318 /* Out immediately if no image to download */
14323319 if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
14333320 DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
14343321 return;
14353322 }
1436
-#endif /* !BCMEMBEDIMAGE */
14373323
14383324 /* Get maximum RAMSIZE info search length */
14393325 for (i = 0; ; i++) {
14403326 if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END)
14413327 break;
14423328
1443
- if (search_len < ramsize_ptr_ptr[i])
1444
- search_len = ramsize_ptr_ptr[i];
3329
+ if (search_len < (int)ramsize_ptr_ptr[i])
3330
+ search_len = (int)ramsize_ptr_ptr[i];
14453331 }
14463332
14473333 if (!search_len)
....@@ -1456,27 +3342,8 @@
14563342 }
14573343
14583344 /* External image takes precedence if specified */
1459
- if (dhdpcie_ramsize_read_image(bus, memptr, search_len) != BCME_OK) {
1460
-#ifdef BCMEMBEDIMAGE
1461
- unsigned char *p_dlarray = NULL;
1462
- unsigned int dlarray_size = 0;
1463
- char *p_dlimagename, *p_dlimagever, *p_dlimagedate;
1464
-
1465
- p_dlarray = dlarray;
1466
- dlarray_size = sizeof(dlarray);
1467
- p_dlimagename = dlimagename;
1468
- p_dlimagever = dlimagever;
1469
- p_dlimagedate = dlimagedate;
1470
-
1471
- if ((p_dlarray == 0) || (dlarray_size == 0) || (p_dlimagename == 0) ||
1472
- (p_dlimagever == 0) || (p_dlimagedate == 0))
1473
- goto err;
1474
-
1475
- ramsizeptr = p_dlarray;
1476
- ramsizelen = dlarray_size;
1477
-#else
3345
+ if (dhdpcie_ramsize_read_image(bus, (char *)memptr, search_len) != BCME_OK) {
14783346 goto err;
1479
-#endif /* BCMEMBEDIMAGE */
14803347 }
14813348 else {
14823349 ramsizeptr = memptr;
....@@ -1510,8 +3377,16 @@
15103377 MFREE(bus->dhd->osh, memptr, search_len);
15113378
15123379 return;
1513
-} /* _dhdpcie_download_firmware */
3380
+} /* dhdpcie_ramsize_adj */
15143381
3382
+/**
3383
+ * Downloads firmware file given by 'bus->fw_path' into PCIe dongle
3384
+ *
3385
+ * BCMEMBEDIMAGE specific:
3386
+ * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
3387
+ * file will be used instead.
3388
+ *
3389
+ */
15153390 static int
15163391 _dhdpcie_download_firmware(struct dhd_bus *bus)
15173392 {
....@@ -1522,12 +3397,8 @@
15223397
15233398 /* Out immediately if no image to download */
15243399 if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
1525
-#ifdef BCMEMBEDIMAGE
1526
- embed = TRUE;
1527
-#else
15283400 DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
15293401 return 0;
1530
-#endif
15313402 }
15323403 /* Adjust ram size */
15333404 dhdpcie_ramsize_adj(bus);
....@@ -1541,32 +3412,18 @@
15413412 /* External image takes precedence if specified */
15423413 if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
15433414 if (dhdpcie_download_code_file(bus, bus->fw_path)) {
1544
- DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__));
1545
-#ifdef BCMEMBEDIMAGE
1546
- embed = TRUE;
1547
-#else
3415
+ DHD_ERROR(("%s:%d dongle image file download failed\n", __FUNCTION__,
3416
+ __LINE__));
15483417 goto err;
1549
-#endif
15503418 } else {
15513419 embed = FALSE;
15523420 dlok = TRUE;
15533421 }
15543422 }
15553423
1556
-#ifdef BCMEMBEDIMAGE
1557
- if (embed) {
1558
- if (dhdpcie_download_code_array(bus)) {
1559
- DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
1560
- goto err;
1561
- } else {
1562
- dlok = TRUE;
1563
- }
1564
- }
1565
-#else
15663424 BCM_REFERENCE(embed);
1567
-#endif
15683425 if (!dlok) {
1569
- DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__));
3426
+ DHD_ERROR(("%s:%d dongle image download failed\n", __FUNCTION__, __LINE__));
15703427 goto err;
15713428 }
15723429
....@@ -1574,10 +3431,9 @@
15743431 /* If a valid nvram_arry is specified as above, it can be passed down to dongle */
15753432 /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
15763433
1577
-
15783434 /* External nvram takes precedence if specified */
15793435 if (dhdpcie_download_nvram(bus)) {
1580
- DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__));
3436
+ DHD_ERROR(("%s:%d dongle nvram file download failed\n", __FUNCTION__, __LINE__));
15813437 goto err;
15823438 }
15833439
....@@ -1593,9 +3449,6 @@
15933449 return bcmerror;
15943450 } /* _dhdpcie_download_firmware */
15953451
1596
-#define CONSOLE_LINE_MAX 192
1597
-
1598
-#if (defined(DHD_DEBUG) && !defined(CUSTOMER_HW_31_2))
15993452 static int
16003453 dhdpcie_bus_readconsole(dhd_bus_t *bus)
16013454 {
....@@ -1603,6 +3456,11 @@
16033456 uint8 line[CONSOLE_LINE_MAX], ch;
16043457 uint32 n, idx, addr;
16053458 int rv;
3459
+ uint readlen = 0;
3460
+ uint i = 0;
3461
+
3462
+ if (!DHD_FWLOG_ON())
3463
+ return 0;
16063464
16073465 /* Don't do anything until FWREADY updates console address */
16083466 if (bus->console_addr == 0)
....@@ -1610,6 +3468,20 @@
16103468
16113469 /* Read console log struct */
16123470 addr = bus->console_addr + OFFSETOF(hnd_cons_t, log);
3471
+
3472
+ /* Check if console log struct addr has changed */
3473
+ /* Save the address(Local copy) */
3474
+ if (c->log_addr != addr) {
3475
+ /* Reset last index pointer */
3476
+ c->last = 0;
3477
+ /* Re-allocate memory if console address changes */
3478
+ if (c->buf) {
3479
+ MFREE(bus->dhd->osh, c->buf, c->bufsize);
3480
+ c->buf = NULL;
3481
+ }
3482
+ /* Save new console address */
3483
+ c->log_addr = addr;
3484
+ }
16133485
16143486 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
16153487 return rv;
....@@ -1619,6 +3491,7 @@
16193491 c->bufsize = ltoh32(c->log.buf_size);
16203492 if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
16213493 return BCME_NOMEM;
3494
+ DHD_ERROR(("conlog: bufsize=0x%x\n", c->bufsize));
16223495 }
16233496 idx = ltoh32(c->log.idx);
16243497
....@@ -1630,25 +3503,49 @@
16303503 if (idx == c->last)
16313504 return BCME_OK;
16323505
1633
- /* Read the console buffer */
1634
- addr = ltoh32(c->log.buf);
1635
- if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0)
1636
- return rv;
3506
+ DHD_ERROR(("conlog: addr=0x%x, idx=0x%x, last=0x%x \n", c->log.buf,
3507
+ idx, c->last));
16373508
1638
- while (c->last != idx) {
1639
- for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
1640
- if (c->last == idx) {
1641
- /* This would output a partial line. Instead, back up
1642
- * the buffer pointer and output this line next time around.
1643
- */
1644
- if (c->last >= n)
1645
- c->last -= n;
1646
- else
1647
- c->last = c->bufsize - n;
1648
- goto break2;
1649
- }
1650
- ch = c->buf[c->last];
1651
- c->last = (c->last + 1) % c->bufsize;
3509
+ /* Read the console buffer data to a local buffer */
3510
+ /* optimize and read only the portion of the buffer needed, but
3511
+ * important to handle wrap-around.
3512
+ */
3513
+ addr = ltoh32(c->log.buf);
3514
+
3515
+ /* wrap around case - write ptr < read ptr */
3516
+ if (idx < c->last) {
3517
+ /* from read ptr to end of buffer */
3518
+ readlen = c->bufsize - c->last;
3519
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE,
3520
+ addr + c->last, c->buf, readlen)) < 0) {
3521
+ DHD_ERROR(("conlog: read error[1] ! \n"));
3522
+ return rv;
3523
+ }
3524
+ /* from beginning of buffer to write ptr */
3525
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE,
3526
+ addr, c->buf + readlen,
3527
+ idx)) < 0) {
3528
+ DHD_ERROR(("conlog: read error[2] ! \n"));
3529
+ return rv;
3530
+ }
3531
+ readlen += idx;
3532
+ } else {
3533
+ /* non-wraparound case, write ptr > read ptr */
3534
+ readlen = (uint)idx - c->last;
3535
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE,
3536
+ addr + c->last, c->buf, readlen)) < 0) {
3537
+ DHD_ERROR(("conlog: read error[3] ! \n"));
3538
+ return rv;
3539
+ }
3540
+ }
3541
+ /* update read ptr */
3542
+ c->last = idx;
3543
+
3544
+ /* now output the read data from the local buffer to the host console */
3545
+ while (i < readlen) {
3546
+ for (n = 0; n < CONSOLE_LINE_MAX - 2 && i < readlen; n++) {
3547
+ ch = c->buf[i];
3548
+ ++i;
16523549 if (ch == '\n')
16533550 break;
16543551 line[n] = ch;
....@@ -1658,32 +3555,107 @@
16583555 if (line[n - 1] == '\r')
16593556 n--;
16603557 line[n] = 0;
1661
- printf("CONSOLE: %s\n", line);
1662
-
3558
+ DHD_FWLOG(("CONSOLE: %s\n", line));
16633559 }
16643560 }
1665
-break2:
16663561
16673562 return BCME_OK;
1668
-} /* dhdpcie_bus_readconsole */
1669
-#endif /* DHD_DEBUG */
16703563
3564
+} /* dhdpcie_bus_readconsole */
3565
+
3566
+void
3567
+dhd_bus_dump_console_buffer(dhd_bus_t *bus)
3568
+{
3569
+ uint32 n, i;
3570
+ uint32 addr;
3571
+ char *console_buffer = NULL;
3572
+ uint32 console_ptr, console_size, console_index;
3573
+ uint8 line[CONSOLE_LINE_MAX], ch;
3574
+ int rv;
3575
+
3576
+ DHD_ERROR(("%s: Dump Complete Console Buffer\n", __FUNCTION__));
3577
+
3578
+ if (bus->is_linkdown) {
3579
+ DHD_ERROR(("%s: Skip dump Console Buffer due to PCIe link down\n", __FUNCTION__));
3580
+ return;
3581
+ }
3582
+
3583
+ addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log);
3584
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
3585
+ (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) {
3586
+ goto exit;
3587
+ }
3588
+
3589
+ addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
3590
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
3591
+ (uint8 *)&console_size, sizeof(console_size))) < 0) {
3592
+ goto exit;
3593
+ }
3594
+
3595
+ addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx);
3596
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
3597
+ (uint8 *)&console_index, sizeof(console_index))) < 0) {
3598
+ goto exit;
3599
+ }
3600
+
3601
+ console_ptr = ltoh32(console_ptr);
3602
+ console_size = ltoh32(console_size);
3603
+ console_index = ltoh32(console_index);
3604
+
3605
+ if (console_size > CONSOLE_BUFFER_MAX ||
3606
+ !(console_buffer = MALLOC(bus->dhd->osh, console_size))) {
3607
+ goto exit;
3608
+ }
3609
+
3610
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr,
3611
+ (uint8 *)console_buffer, console_size)) < 0) {
3612
+ goto exit;
3613
+ }
3614
+
3615
+ for (i = 0, n = 0; i < console_size; i += n + 1) {
3616
+ for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
3617
+ ch = console_buffer[(console_index + i + n) % console_size];
3618
+ if (ch == '\n')
3619
+ break;
3620
+ line[n] = ch;
3621
+ }
3622
+
3623
+ if (n > 0) {
3624
+ if (line[n - 1] == '\r')
3625
+ n--;
3626
+ line[n] = 0;
3627
+ /* Don't use DHD_ERROR macro since we print
3628
+ * a lot of information quickly. The macro
3629
+ * will truncate a lot of the printfs
3630
+ */
3631
+
3632
+ DHD_FWLOG(("CONSOLE: %s\n", line));
3633
+ }
3634
+ }
3635
+
3636
+exit:
3637
+ if (console_buffer)
3638
+ MFREE(bus->dhd->osh, console_buffer, console_size);
3639
+ return;
3640
+}
3641
+
3642
+/**
3643
+ * Opens the file given by bus->fw_path, reads part of the file into a buffer and closes the file.
3644
+ *
3645
+ * @return BCME_OK on success
3646
+ */
16713647 static int
16723648 dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size)
16733649 {
16743650 int bcmerror = 0;
16753651 uint msize = 512;
16763652 char *mbuffer = NULL;
1677
- char *console_buffer = NULL;
16783653 uint maxstrlen = 256;
16793654 char *str = NULL;
1680
- trap_t tr;
1681
- pciedev_shared_t *pciedev_shared = bus->pcie_sh;
3655
+ pciedev_shared_t *local_pciedev_shared = bus->pcie_sh;
16823656 struct bcmstrbuf strbuf;
1683
- uint32 console_ptr, console_size, console_index;
1684
- uint8 line[CONSOLE_LINE_MAX], ch;
1685
- uint32 n, i, addr;
1686
- int rv;
3657
+ unsigned long flags;
3658
+ bool dongle_trap_occured = FALSE;
16873659
16883660 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
16893661
....@@ -1702,26 +3674,32 @@
17023674 if (mbuffer == NULL) {
17033675 DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
17043676 bcmerror = BCME_NOMEM;
1705
- goto done;
3677
+ goto done2;
17063678 }
17073679 }
17083680
17093681 if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
17103682 DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
17113683 bcmerror = BCME_NOMEM;
1712
- goto done;
3684
+ goto done2;
17133685 }
3686
+ DHD_GENERAL_LOCK(bus->dhd, flags);
3687
+ DHD_BUS_BUSY_SET_IN_CHECKDIED(bus->dhd);
3688
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
17143689
3690
+ if (MULTIBP_ENAB(bus->sih)) {
3691
+ dhd_bus_pcie_pwr_req(bus);
3692
+ }
17153693 if ((bcmerror = dhdpcie_readshared(bus)) < 0) {
1716
- goto done;
3694
+ goto done1;
17173695 }
17183696
17193697 bcm_binit(&strbuf, data, size);
17203698
17213699 bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n",
1722
- pciedev_shared->msgtrace_addr, pciedev_shared->console_addr);
3700
+ local_pciedev_shared->msgtrace_addr, local_pciedev_shared->console_addr);
17233701
1724
- if ((pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) {
3702
+ if ((local_pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) {
17253703 /* NOTE: Misspelled assert is intentional - DO NOT FIX.
17263704 * (Avoids conflict with real asserts for programmatic parsing of output.)
17273705 */
....@@ -1744,7 +3722,7 @@
17443722 if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
17453723 bus->pcie_sh->assert_exp_addr,
17463724 (uint8 *)str, maxstrlen)) < 0) {
1747
- goto done;
3725
+ goto done1;
17483726 }
17493727
17503728 str[maxstrlen - 1] = '\0';
....@@ -1756,7 +3734,7 @@
17563734 if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
17573735 bus->pcie_sh->assert_file_addr,
17583736 (uint8 *)str, maxstrlen)) < 0) {
1759
- goto done;
3737
+ goto done1;
17603738 }
17613739
17623740 str[maxstrlen - 1] = '\0';
....@@ -1767,109 +3745,68 @@
17673745 }
17683746
17693747 if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) {
1770
- bus->dhd->dongle_trap_occured = TRUE;
3748
+ trap_t *tr = &bus->dhd->last_trap_info;
3749
+ dongle_trap_occured = TRUE;
17713750 if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
1772
- bus->pcie_sh->trap_addr, (uint8*)&tr, sizeof(trap_t))) < 0) {
1773
- goto done;
3751
+ bus->pcie_sh->trap_addr, (uint8*)tr, sizeof(trap_t))) < 0) {
3752
+ bus->dhd->dongle_trap_occured = TRUE;
3753
+ goto done1;
17743754 }
1775
-
1776
- bcm_bprintf(&strbuf,
1777
- "\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
1778
- " lp 0x%x, rpc 0x%x"
1779
- "\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
1780
- "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n",
1781
- ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr),
1782
- ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc),
1783
- ltoh32(bus->pcie_sh->trap_addr),
1784
- ltoh32(tr.r0), ltoh32(tr.r1), ltoh32(tr.r2), ltoh32(tr.r3),
1785
- ltoh32(tr.r4), ltoh32(tr.r5), ltoh32(tr.r6), ltoh32(tr.r7));
1786
-
1787
- addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log);
1788
- if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
1789
- (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) {
1790
- goto printbuf;
1791
- }
1792
-
1793
- addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
1794
- if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
1795
- (uint8 *)&console_size, sizeof(console_size))) < 0) {
1796
- goto printbuf;
1797
- }
1798
-
1799
- addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx);
1800
- if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
1801
- (uint8 *)&console_index, sizeof(console_index))) < 0) {
1802
- goto printbuf;
1803
- }
1804
-
1805
- console_ptr = ltoh32(console_ptr);
1806
- console_size = ltoh32(console_size);
1807
- console_index = ltoh32(console_index);
1808
-
1809
- if (console_size > CONSOLE_BUFFER_MAX ||
1810
- !(console_buffer = MALLOC(bus->dhd->osh, console_size))) {
1811
- goto printbuf;
1812
- }
1813
-
1814
- if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr,
1815
- (uint8 *)console_buffer, console_size)) < 0) {
1816
- goto printbuf;
1817
- }
1818
-
1819
- for (i = 0, n = 0; i < console_size; i += n + 1) {
1820
- for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
1821
- ch = console_buffer[(console_index + i + n) % console_size];
1822
- if (ch == '\n')
1823
- break;
1824
- line[n] = ch;
1825
- }
1826
-
1827
-
1828
- if (n > 0) {
1829
- if (line[n - 1] == '\r')
1830
- n--;
1831
- line[n] = 0;
1832
- /* Don't use DHD_ERROR macro since we print
1833
- * a lot of information quickly. The macro
1834
- * will truncate a lot of the printfs
1835
- */
1836
-
1837
- printf("CONSOLE: %s\n", line);
1838
- }
1839
- }
3755
+ dhd_bus_dump_trap_info(bus, &strbuf);
18403756 }
18413757 }
18423758
1843
-printbuf:
18443759 if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) {
1845
- printf("%s: %s\n", __FUNCTION__, strbuf.origbuf);
3760
+ DHD_FWLOG(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
18463761
1847
- /* wake up IOCTL wait event */
1848
- dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP);
3762
+ dhd_bus_dump_console_buffer(bus);
3763
+ dhd_prot_debug_info_print(bus->dhd);
18493764
18503765 #if defined(DHD_FW_COREDUMP)
18513766 /* save core dump or write to a file */
18523767 if (bus->dhd->memdump_enabled) {
3768
+#ifdef DHD_SSSR_DUMP
3769
+ bus->dhd->collect_sssr = TRUE;
3770
+#endif /* DHD_SSSR_DUMP */
18533771 bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP;
18543772 dhdpcie_mem_dump(bus);
18553773 }
18563774 #endif /* DHD_FW_COREDUMP */
18573775
3776
+ /* set the trap occured flag only after all the memdump,
3777
+ * logdump and sssr dump collection has been scheduled
3778
+ */
3779
+ if (dongle_trap_occured) {
3780
+ bus->dhd->dongle_trap_occured = TRUE;
3781
+ }
3782
+
3783
+ /* wake up IOCTL wait event */
3784
+ dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP);
3785
+
3786
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
3787
+ copy_hang_info_trap(bus->dhd);
3788
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
3789
+ dhd_schedule_reset(bus->dhd);
18583790
18593791 }
18603792
1861
-done:
3793
+done1:
3794
+ if (MULTIBP_ENAB(bus->sih)) {
3795
+ dhd_bus_pcie_pwr_req_clear(bus);
3796
+ }
3797
+
3798
+ DHD_GENERAL_LOCK(bus->dhd, flags);
3799
+ DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(bus->dhd);
3800
+ dhd_os_busbusy_wake(bus->dhd);
3801
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
3802
+done2:
18623803 if (mbuffer)
18633804 MFREE(bus->dhd->osh, mbuffer, msize);
18643805 if (str)
18653806 MFREE(bus->dhd->osh, str, maxstrlen);
18663807
1867
- if (console_buffer)
1868
- MFREE(bus->dhd->osh, console_buffer, console_size);
1869
-
18703808 return bcmerror;
18713809 } /* dhdpcie_checkdied */
1872
-
18733810
18743811 /* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */
18753812 void dhdpcie_mem_dump_bugcheck(dhd_bus_t *bus, uint8 *buf)
....@@ -1885,6 +3822,17 @@
18853822 }
18863823
18873824 start = bus->dongle_ram_base;
3825
+ read_size = 4;
3826
+ /* check for dead bus */
3827
+ {
3828
+ uint test_word = 0;
3829
+ ret = dhdpcie_bus_membytes(bus, FALSE, start, (uint8*)&test_word, read_size);
3830
+ /* if read error or bus timeout */
3831
+ if (ret || (test_word == 0xFFFFFFFF)) {
3832
+ return;
3833
+ }
3834
+ }
3835
+
18883836 /* Get full mem size */
18893837 size = bus->ramsize;
18903838 /* Read mem content */
....@@ -1905,35 +3853,49 @@
19053853 return;
19063854 }
19073855
1908
-
19093856 #if defined(DHD_FW_COREDUMP)
19103857 static int
1911
-dhdpcie_mem_dump(dhd_bus_t *bus)
3858
+dhdpcie_get_mem_dump(dhd_bus_t *bus)
19123859 {
1913
- int ret = 0;
1914
- int size; /* Full mem size */
1915
- int start = bus->dongle_ram_base; /* Start address */
3860
+ int ret = BCME_OK;
3861
+ int size = 0;
3862
+ int start = 0;
19163863 int read_size = 0; /* Read size of each iteration */
1917
- uint8 *buf = NULL, *databuf = NULL;
3864
+ uint8 *p_buf = NULL, *databuf = NULL;
3865
+
3866
+ if (!bus) {
3867
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
3868
+ return BCME_ERROR;
3869
+ }
3870
+
3871
+ if (!bus->dhd) {
3872
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
3873
+ return BCME_ERROR;
3874
+ }
3875
+
3876
+ size = bus->ramsize; /* Full mem size */
3877
+ start = bus->dongle_ram_base; /* Start address */
19183878
19193879 /* Get full mem size */
1920
- size = bus->ramsize;
1921
- buf = dhd_get_fwdump_buf(bus->dhd, size);
1922
- if (!buf) {
1923
- DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__, size));
3880
+ p_buf = dhd_get_fwdump_buf(bus->dhd, size);
3881
+ if (!p_buf) {
3882
+ DHD_ERROR(("%s: Out of memory (%d bytes)\n",
3883
+ __FUNCTION__, size));
19243884 return BCME_ERROR;
19253885 }
19263886
19273887 /* Read mem content */
1928
- DHD_TRACE_HW4(("Dump dongle memory"));
1929
- databuf = buf;
1930
- while (size)
1931
- {
3888
+ DHD_TRACE_HW4(("Dump dongle memory\n"));
3889
+ databuf = p_buf;
3890
+ while (size > 0) {
19323891 read_size = MIN(MEMBLOCK, size);
1933
- if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size)))
1934
- {
3892
+ ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size);
3893
+ if (ret) {
19353894 DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret));
1936
- return BCME_ERROR;
3895
+#ifdef DHD_DEBUG_UART
3896
+ bus->dhd->memdump_success = FALSE;
3897
+#endif /* DHD_DEBUG_UART */
3898
+ break;
19373899 }
19383900 DHD_TRACE(("."));
19393901
....@@ -1943,35 +3905,114 @@
19433905 databuf += read_size;
19443906 }
19453907
1946
- dhd_schedule_memdump(bus->dhd, buf, bus->ramsize);
3908
+ return ret;
3909
+}
19473910
3911
+static int
3912
+dhdpcie_mem_dump(dhd_bus_t *bus)
3913
+{
3914
+ dhd_pub_t *dhdp;
3915
+ int ret;
3916
+
3917
+#ifdef EXYNOS_PCIE_DEBUG
3918
+ exynos_pcie_register_dump(1);
3919
+#endif /* EXYNOS_PCIE_DEBUG */
3920
+
3921
+#ifdef SUPPORT_LINKDOWN_RECOVERY
3922
+ if (bus->is_linkdown) {
3923
+ DHD_ERROR(("%s: PCIe link is down so skip\n", __FUNCTION__));
3924
+ /* panic only for DUMP_MEMFILE_BUGON */
3925
+ ASSERT(bus->dhd->memdump_enabled != DUMP_MEMFILE_BUGON);
3926
+ return BCME_ERROR;
3927
+ }
3928
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
3929
+
3930
+ dhdp = bus->dhd;
3931
+ if (!dhdp) {
3932
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
3933
+ return BCME_ERROR;
3934
+ }
3935
+
3936
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
3937
+ DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__));
3938
+ return BCME_ERROR;
3939
+ }
3940
+
3941
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
3942
+ if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) < 0)
3943
+ return BCME_ERROR;
3944
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
3945
+
3946
+ ret = dhdpcie_get_mem_dump(bus);
3947
+ if (ret) {
3948
+ DHD_ERROR(("%s: failed to get mem dump, err=%d\n",
3949
+ __FUNCTION__, ret));
3950
+ return ret;
3951
+ }
3952
+
3953
+ dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
19483954 /* buf, actually soc_ram free handled in dhd_{free,clear} */
19493955
3956
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
3957
+ pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
3958
+ pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
3959
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
3960
+
19503961 return ret;
3962
+}
3963
+
3964
+int
3965
+dhd_bus_get_mem_dump(dhd_pub_t *dhdp)
3966
+{
3967
+ if (!dhdp) {
3968
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
3969
+ return BCME_ERROR;
3970
+ }
3971
+
3972
+ return dhdpcie_get_mem_dump(dhdp->bus);
19513973 }
19523974
19533975 int
19543976 dhd_bus_mem_dump(dhd_pub_t *dhdp)
19553977 {
19563978 dhd_bus_t *bus = dhdp->bus;
3979
+ int ret = BCME_ERROR;
19573980
1958
- if (bus->suspended) {
1959
- DHD_ERROR(("%s: Bus is suspend so skip\n", __FUNCTION__));
1960
- return 0;
3981
+ if (dhdp->busstate == DHD_BUS_DOWN) {
3982
+ DHD_ERROR(("%s bus is down\n", __FUNCTION__));
3983
+ return BCME_ERROR;
19613984 }
19623985
1963
- return dhdpcie_mem_dump(bus);
3986
+ /* Try to resume if already suspended or suspend in progress */
3987
+#ifdef DHD_PCIE_RUNTIMEPM
3988
+ dhdpcie_runtime_bus_wake(dhdp, CAN_SLEEP(), __builtin_return_address(0));
3989
+#endif /* DHD_PCIE_RUNTIMEPM */
3990
+
3991
+ /* Skip if still in suspended or suspend in progress */
3992
+ if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhdp)) {
3993
+ DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
3994
+ __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
3995
+ return BCME_ERROR;
3996
+ }
3997
+
3998
+ DHD_OS_WAKE_LOCK(dhdp);
3999
+ ret = dhdpcie_mem_dump(bus);
4000
+ DHD_OS_WAKE_UNLOCK(dhdp);
4001
+ return ret;
19644002 }
1965
-#endif /* DHD_FW_COREDUMP */
4003
+#endif /* DHD_FW_COREDUMP */
19664004
19674005 int
19684006 dhd_socram_dump(dhd_bus_t *bus)
19694007 {
19704008 #if defined(DHD_FW_COREDUMP)
1971
- return (dhdpcie_mem_dump(bus));
4009
+ DHD_OS_WAKE_LOCK(bus->dhd);
4010
+ dhd_bus_mem_dump(bus->dhd);
4011
+ DHD_OS_WAKE_UNLOCK(bus->dhd);
4012
+ return 0;
19724013 #else
19734014 return -1;
1974
-#endif
4015
+#endif // endif
19754016 }
19764017
19774018 /**
....@@ -1985,6 +4026,14 @@
19854026 int detect_endian_flag = 0x01;
19864027 bool little_endian;
19874028
4029
+ if (write && bus->is_linkdown) {
4030
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
4031
+ return BCME_ERROR;
4032
+ }
4033
+
4034
+ if (MULTIBP_ENAB(bus->sih)) {
4035
+ dhd_bus_pcie_pwr_req(bus);
4036
+ }
19884037 /* Detect endianness. */
19894038 little_endian = *(char *)&detect_endian_flag;
19904039
....@@ -1994,18 +4043,25 @@
19944043 */
19954044
19964045 /* Determine initial transfer parameters */
4046
+#ifdef DHD_SUPPORT_64BIT
19974047 dsize = sizeof(uint64);
4048
+#else /* !DHD_SUPPORT_64BIT */
4049
+ dsize = sizeof(uint32);
4050
+#endif /* DHD_SUPPORT_64BIT */
19984051
19994052 /* Do the transfer(s) */
20004053 if (write) {
20014054 while (size) {
2002
- if (size >= sizeof(uint64) && little_endian &&
2003
-#ifdef CONFIG_64BIT
2004
- !(address % 8) &&
2005
-#endif /* CONFIG_64BIT */
2006
- 1) {
4055
+#ifdef DHD_SUPPORT_64BIT
4056
+ if (size >= sizeof(uint64) && little_endian && !(address % 8)) {
20074057 dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data));
2008
- } else {
4058
+ }
4059
+#else /* !DHD_SUPPORT_64BIT */
4060
+ if (size >= sizeof(uint32) && little_endian && !(address % 4)) {
4061
+ dhdpcie_bus_wtcm32(bus, address, *((uint32*)data));
4062
+ }
4063
+#endif /* DHD_SUPPORT_64BIT */
4064
+ else {
20094065 dsize = sizeof(uint8);
20104066 dhdpcie_bus_wtcm8(bus, address, *data);
20114067 }
....@@ -2018,13 +4074,18 @@
20184074 }
20194075 } else {
20204076 while (size) {
2021
- if (size >= sizeof(uint64) && little_endian &&
2022
-#ifdef CONFIG_64BIT
2023
- !(address % 8) &&
2024
-#endif /* CONFIG_64BIT */
2025
- 1) {
4077
+#ifdef DHD_SUPPORT_64BIT
4078
+ if (size >= sizeof(uint64) && little_endian && !(address % 8))
4079
+ {
20264080 *(uint64 *)data = dhdpcie_bus_rtcm64(bus, address);
2027
- } else {
4081
+ }
4082
+#else /* !DHD_SUPPORT_64BIT */
4083
+ if (size >= sizeof(uint32) && little_endian && !(address % 4))
4084
+ {
4085
+ *(uint32 *)data = dhdpcie_bus_rtcm32(bus, address);
4086
+ }
4087
+#endif /* DHD_SUPPORT_64BIT */
4088
+ else {
20284089 dsize = sizeof(uint8);
20294090 *data = dhdpcie_bus_rtcm8(bus, address);
20304091 }
....@@ -2035,6 +4096,9 @@
20354096 address += dsize;
20364097 }
20374098 }
4099
+ }
4100
+ if (MULTIBP_ENAB(bus->sih)) {
4101
+ dhd_bus_pcie_pwr_req_clear(bus);
20384102 }
20394103 return BCME_OK;
20404104 } /* dhdpcie_bus_membytes */
....@@ -2050,17 +4114,22 @@
20504114 int ret = BCME_OK;
20514115 #ifdef DHD_LOSSLESS_ROAMING
20524116 dhd_pub_t *dhdp = bus->dhd;
2053
-#endif
4117
+#endif // endif
20544118 DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id));
20554119
20564120 /* ASSERT on flow_id */
2057
- if (flow_id >= bus->max_sub_queues) {
4121
+ if (flow_id >= bus->max_submission_rings) {
20584122 DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__,
2059
- flow_id, bus->max_sub_queues));
4123
+ flow_id, bus->max_submission_rings));
20604124 return 0;
20614125 }
20624126
20634127 flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id);
4128
+
4129
+ if (flow_ring_node->prot_info == NULL) {
4130
+ DHD_ERROR((" %s : invalid flow_ring_node \n", __FUNCTION__));
4131
+ return BCME_NOTREADY;
4132
+ }
20644133
20654134 #ifdef DHD_LOSSLESS_ROAMING
20664135 if ((dhdp->dequeue_prec_map & (1 << flow_ring_node->flow_info.tid)) == 0) {
....@@ -2113,19 +4182,17 @@
21134182 eh = (struct ether_header *) pktdata;
21144183 if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
21154184 uint8 prio = (uint8)PKTPRIO(txp);
2116
-
21174185 /* Restore to original priority for 802.1X packet */
21184186 if (prio == PRIO_8021D_NC) {
2119
- PKTSETPRIO(txp, PRIO_8021D_BE);
4187
+ PKTSETPRIO(txp, dhdp->prio_8021x);
21204188 }
21214189 }
21224190 #endif /* DHD_LOSSLESS_ROAMING */
2123
-
21244191 /* Attempt to transfer packet over flow ring */
21254192 ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex);
21264193 if (ret != BCME_OK) { /* may not have resources in flow ring */
21274194 DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret));
2128
- dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE);
4195
+ dhd_prot_txdata_write_flush(bus->dhd, flow_id);
21294196 /* reinsert at head */
21304197 dhd_flow_queue_reinsert(bus->dhd, queue, txp);
21314198 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
....@@ -2135,8 +4202,13 @@
21354202 }
21364203 }
21374204
2138
- dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE);
2139
-
4205
+#ifdef DHD_HP2P
4206
+ if (!flow_ring_node->hp2p_ring) {
4207
+ dhd_prot_txdata_write_flush(bus->dhd, flow_id);
4208
+ }
4209
+#else
4210
+ dhd_prot_txdata_write_flush(bus->dhd, flow_id);
4211
+#endif // endif
21404212 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
21414213 }
21424214
....@@ -2148,6 +4220,9 @@
21484220 dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx)
21494221 {
21504222 uint16 flowid;
4223
+#ifdef IDLE_TX_FLOW_MGMT
4224
+ uint8 node_status;
4225
+#endif /* IDLE_TX_FLOW_MGMT */
21514226 flow_queue_t *queue;
21524227 flow_ring_node_t *flow_ring_node;
21534228 unsigned long flags;
....@@ -2164,34 +4239,77 @@
21644239 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
21654240
21664241 DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n",
2167
- __FUNCTION__, flowid, flow_ring_node->status,
2168
- flow_ring_node->active));
4242
+ __FUNCTION__, flowid, flow_ring_node->status, flow_ring_node->active));
21694243
21704244 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
21714245 if ((flowid >= bus->dhd->num_flow_rings) ||
4246
+#ifdef IDLE_TX_FLOW_MGMT
4247
+ (!flow_ring_node->active))
4248
+#else
21724249 (!flow_ring_node->active) ||
21734250 (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) ||
2174
- (flow_ring_node->status == FLOW_RING_STATUS_STA_FREEING)) {
4251
+ (flow_ring_node->status == FLOW_RING_STATUS_STA_FREEING))
4252
+#endif /* IDLE_TX_FLOW_MGMT */
4253
+ {
21754254 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
21764255 DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n",
21774256 __FUNCTION__, flowid, flow_ring_node->status,
21784257 flow_ring_node->active));
21794258 ret = BCME_ERROR;
2180
- goto toss;
4259
+ goto toss;
21814260 }
4261
+
4262
+#ifdef IDLE_TX_FLOW_MGMT
4263
+ node_status = flow_ring_node->status;
4264
+
4265
+ /* handle diffrent status states here!! */
4266
+ switch (node_status)
4267
+ {
4268
+ case FLOW_RING_STATUS_OPEN:
4269
+
4270
+ if (bus->enable_idle_flowring_mgmt) {
4271
+ /* Move the node to the head of active list */
4272
+ dhd_flow_ring_move_to_active_list_head(bus, flow_ring_node);
4273
+ }
4274
+ break;
4275
+
4276
+ case FLOW_RING_STATUS_SUSPENDED:
4277
+ DHD_INFO(("Need to Initiate TX Flow resume\n"));
4278
+ /* Issue resume_ring request */
4279
+ dhd_bus_flow_ring_resume_request(bus,
4280
+ flow_ring_node);
4281
+ break;
4282
+
4283
+ case FLOW_RING_STATUS_CREATE_PENDING:
4284
+ case FLOW_RING_STATUS_RESUME_PENDING:
4285
+ /* Dont do anything here!! */
4286
+ DHD_INFO(("Waiting for Flow create/resume! status is %u\n",
4287
+ node_status));
4288
+ break;
4289
+
4290
+ case FLOW_RING_STATUS_DELETE_PENDING:
4291
+ default:
4292
+ DHD_ERROR(("Dropping packet!! flowid %u status is %u\n",
4293
+ flowid, node_status));
4294
+ /* error here!! */
4295
+ ret = BCME_ERROR;
4296
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4297
+ goto toss;
4298
+ }
4299
+ /* Now queue the packet */
4300
+#endif /* IDLE_TX_FLOW_MGMT */
21824301
21834302 queue = &flow_ring_node->queue; /* queue associated with flow ring */
21844303
2185
- if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK) {
4304
+ if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK)
21864305 txp_pend = txp;
2187
- }
21884306
21894307 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
21904308
21914309 if (flow_ring_node->status) {
21924310 DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n",
2193
- __FUNCTION__, flowid, flow_ring_node->status,
2194
- flow_ring_node->active));
4311
+ __FUNCTION__, flowid, flow_ring_node->status,
4312
+ flow_ring_node->active));
21954313 if (txp_pend) {
21964314 txp = txp_pend;
21974315 goto toss;
....@@ -2217,30 +4335,35 @@
22174335
22184336 toss:
22194337 DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret));
2220
-#ifdef CUSTOMER_HW_31_2
2221
- dhd_txcomplete(bus->dhd->osh, txp, TRUE);
2222
-#else
22234338 PKTCFREE(bus->dhd->osh, txp, TRUE);
2224
-#endif
22254339 return ret;
22264340 } /* dhd_bus_txdata */
2227
-
22284341
22294342 void
22304343 dhd_bus_stop_queue(struct dhd_bus *bus)
22314344 {
22324345 dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
2233
- bus->bus_flowctrl = TRUE;
22344346 }
22354347
22364348 void
22374349 dhd_bus_start_queue(struct dhd_bus *bus)
22384350 {
4351
+ /*
4352
+ * Tx queue has been stopped due to resource shortage (or)
4353
+ * bus is not in a state to turn on.
4354
+ *
4355
+ * Note that we try to re-start network interface only
4356
+ * when we have enough resources, one has to first change the
4357
+ * flag indicating we have all the resources.
4358
+ */
4359
+ if (dhd_prot_check_tx_resource(bus->dhd)) {
4360
+ DHD_ERROR(("%s: Interface NOT started, previously stopped "
4361
+ "due to resource shortage\n", __FUNCTION__));
4362
+ return;
4363
+ }
22394364 dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
2240
- bus->bus_flowctrl = TRUE;
22414365 }
22424366
2243
-#if defined(DHD_DEBUG)
22444367 /* Device console input function */
22454368 int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
22464369 {
....@@ -2253,7 +4376,6 @@
22534376
22544377 /* Don't allow input if dongle is in reset */
22554378 if (bus->dhd->dongle_reset) {
2256
- dhd_os_sdunlock(bus->dhd);
22574379 return BCME_NOTREADY;
22584380 }
22594381
....@@ -2279,7 +4401,6 @@
22794401 done:
22804402 return rv;
22814403 } /* dhd_bus_console_in */
2282
-#endif /* defined(DHD_DEBUG) */
22834404
22844405 /**
22854406 * Called on frame reception, the frame was received from the dongle on interface 'ifidx' and is
....@@ -2291,46 +4412,80 @@
22914412 dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0);
22924413 }
22934414
4415
+void
4416
+dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr)
4417
+{
4418
+ dhdpcie_os_setbar1win(bus, addr);
4419
+}
4420
+
22944421 /** 'offset' is a backplane address */
22954422 void
22964423 dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
22974424 {
2298
- *(volatile uint8 *)(bus->tcm + offset) = (uint8)data;
4425
+ if (bus->is_linkdown) {
4426
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4427
+ return;
4428
+ } else {
4429
+ dhdpcie_os_wtcm8(bus, offset, data);
4430
+ }
22994431 }
23004432
23014433 uint8
23024434 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset)
23034435 {
23044436 volatile uint8 data;
2305
-
2306
- data = *(volatile uint8 *)(bus->tcm + offset);
2307
-
4437
+ if (bus->is_linkdown) {
4438
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4439
+ data = (uint8)-1;
4440
+ } else {
4441
+ data = dhdpcie_os_rtcm8(bus, offset);
4442
+ }
23084443 return data;
23094444 }
23104445
23114446 void
23124447 dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
23134448 {
2314
- *(volatile uint32 *)(bus->tcm + offset) = (uint32)data;
4449
+ if (bus->is_linkdown) {
4450
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4451
+ return;
4452
+ } else {
4453
+ dhdpcie_os_wtcm32(bus, offset, data);
4454
+ }
23154455 }
23164456 void
23174457 dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
23184458 {
2319
- *(volatile uint16 *)(bus->tcm + offset) = (uint16)data;
4459
+ if (bus->is_linkdown) {
4460
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4461
+ return;
4462
+ } else {
4463
+ dhdpcie_os_wtcm16(bus, offset, data);
4464
+ }
23204465 }
4466
+#ifdef DHD_SUPPORT_64BIT
23214467 void
23224468 dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
23234469 {
2324
- *(volatile uint64 *)(bus->tcm + offset) = (uint64)data;
4470
+ if (bus->is_linkdown) {
4471
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4472
+ return;
4473
+ } else {
4474
+ dhdpcie_os_wtcm64(bus, offset, data);
4475
+ }
23254476 }
4477
+#endif /* DHD_SUPPORT_64BIT */
23264478
23274479 uint16
23284480 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset)
23294481 {
23304482 volatile uint16 data;
2331
-
2332
- data = *(volatile uint16 *)(bus->tcm + offset);
2333
-
4483
+ if (bus->is_linkdown) {
4484
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4485
+ data = (uint16)-1;
4486
+ } else {
4487
+ data = dhdpcie_os_rtcm16(bus, offset);
4488
+ }
23344489 return data;
23354490 }
23364491
....@@ -2338,138 +4493,177 @@
23384493 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset)
23394494 {
23404495 volatile uint32 data;
2341
-
2342
- data = *(volatile uint32 *)(bus->tcm + offset);
2343
-
4496
+ if (bus->is_linkdown) {
4497
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4498
+ data = (uint32)-1;
4499
+ } else {
4500
+ data = dhdpcie_os_rtcm32(bus, offset);
4501
+ }
23444502 return data;
23454503 }
23464504
4505
+#ifdef DHD_SUPPORT_64BIT
23474506 uint64
23484507 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
23494508 {
23504509 volatile uint64 data;
2351
-
2352
- data = *(volatile uint64 *)(bus->tcm + offset);
2353
-
4510
+ if (bus->is_linkdown) {
4511
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4512
+ data = (uint64)-1;
4513
+ } else {
4514
+ data = dhdpcie_os_rtcm64(bus, offset);
4515
+ }
23544516 return data;
23554517 }
4518
+#endif /* DHD_SUPPORT_64BIT */
23564519
23574520 /** A snippet of dongle memory is shared between host and dongle */
23584521 void
23594522 dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint16 ringid)
23604523 {
23614524 uint64 long_data;
2362
- ulong tcm_offset;
4525
+ ulong addr; /* dongle address */
23634526
23644527 DHD_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len));
23654528
4529
+ if (bus->is_linkdown) {
4530
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
4531
+ return;
4532
+ }
4533
+
4534
+ if (MULTIBP_ENAB(bus->sih)) {
4535
+ dhd_bus_pcie_pwr_req(bus);
4536
+ }
23664537 switch (type) {
23674538 case D2H_DMA_SCRATCH_BUF:
2368
- {
2369
- pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
4539
+ addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer);
23704540 long_data = HTOL64(*(uint64 *)data);
2371
- tcm_offset = (ulong)&(sh->host_dma_scratch_buffer);
2372
- dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
2373
- prhex(__FUNCTION__, data, len);
4541
+ dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4542
+ if (dhd_msg_level & DHD_INFO_VAL) {
4543
+ prhex(__FUNCTION__, data, len);
4544
+ }
23744545 break;
2375
- }
23764546
2377
- case D2H_DMA_SCRATCH_BUF_LEN:
2378
- {
2379
- pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
2380
- tcm_offset = (ulong)&(sh->host_dma_scratch_buffer_len);
2381
- dhdpcie_bus_wtcm32(bus, tcm_offset, (uint32) HTOL32(*(uint32 *)data));
2382
- prhex(__FUNCTION__, data, len);
4547
+ case D2H_DMA_SCRATCH_BUF_LEN :
4548
+ addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer_len);
4549
+ dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4550
+ if (dhd_msg_level & DHD_INFO_VAL) {
4551
+ prhex(__FUNCTION__, data, len);
4552
+ }
23834553 break;
2384
- }
23854554
23864555 case H2D_DMA_INDX_WR_BUF:
2387
- {
2388
- pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
2389
-
23904556 long_data = HTOL64(*(uint64 *)data);
2391
- tcm_offset = (ulong)shmem->rings_info_ptr;
2392
- tcm_offset += OFFSETOF(ring_info_t, h2d_w_idx_hostaddr);
2393
- dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
2394
- prhex(__FUNCTION__, data, len);
4557
+ addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_w_idx_hostaddr);
4558
+ dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4559
+ if (dhd_msg_level & DHD_INFO_VAL) {
4560
+ prhex(__FUNCTION__, data, len);
4561
+ }
23954562 break;
2396
- }
23974563
23984564 case H2D_DMA_INDX_RD_BUF:
2399
- {
2400
- pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
24014565 long_data = HTOL64(*(uint64 *)data);
2402
- tcm_offset = (ulong)shmem->rings_info_ptr;
2403
- tcm_offset += OFFSETOF(ring_info_t, h2d_r_idx_hostaddr);
2404
- dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
2405
- prhex(__FUNCTION__, data, len);
4566
+ addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_r_idx_hostaddr);
4567
+ dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4568
+ if (dhd_msg_level & DHD_INFO_VAL) {
4569
+ prhex(__FUNCTION__, data, len);
4570
+ }
24064571 break;
2407
- }
24084572
24094573 case D2H_DMA_INDX_WR_BUF:
2410
- {
2411
- pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
24124574 long_data = HTOL64(*(uint64 *)data);
2413
- tcm_offset = (ulong)shmem->rings_info_ptr;
2414
- tcm_offset += OFFSETOF(ring_info_t, d2h_w_idx_hostaddr);
2415
- dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
2416
- prhex(__FUNCTION__, data, len);
4575
+ addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_w_idx_hostaddr);
4576
+ dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4577
+ if (dhd_msg_level & DHD_INFO_VAL) {
4578
+ prhex(__FUNCTION__, data, len);
4579
+ }
24174580 break;
2418
- }
24194581
24204582 case D2H_DMA_INDX_RD_BUF:
2421
- {
2422
- pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
24234583 long_data = HTOL64(*(uint64 *)data);
2424
- tcm_offset = (ulong)shmem->rings_info_ptr;
2425
- tcm_offset += OFFSETOF(ring_info_t, d2h_r_idx_hostaddr);
2426
- dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
2427
- prhex(__FUNCTION__, data, len);
2428
- break;
2429
- }
2430
-
2431
- case RING_ITEM_LEN:
2432
- tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
2433
- tcm_offset += OFFSETOF(ring_mem_t, len_items);
2434
- dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
4584
+ addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_r_idx_hostaddr);
4585
+ dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4586
+ if (dhd_msg_level & DHD_INFO_VAL) {
4587
+ prhex(__FUNCTION__, data, len);
4588
+ }
24354589 break;
24364590
2437
- case RING_MAX_ITEMS:
2438
- tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
2439
- tcm_offset += OFFSETOF(ring_mem_t, max_item);
2440
- dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
2441
- break;
2442
-
2443
- case RING_BUF_ADDR:
4591
+ case H2D_IFRM_INDX_WR_BUF:
24444592 long_data = HTOL64(*(uint64 *)data);
2445
- tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
2446
- tcm_offset += OFFSETOF(ring_mem_t, base_addr);
2447
- dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8 *) &long_data, len);
2448
- prhex(__FUNCTION__, data, len);
4593
+ addr = DHD_RING_INFO_MEMBER_ADDR(bus, ifrm_w_idx_hostaddr);
4594
+ dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4595
+ if (dhd_msg_level & DHD_INFO_VAL) {
4596
+ prhex(__FUNCTION__, data, len);
4597
+ }
24494598 break;
24504599
2451
- case RING_WR_UPD:
2452
- tcm_offset = bus->ring_sh[ringid].ring_state_w;
2453
- dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
4600
+ case RING_ITEM_LEN :
4601
+ addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, len_items);
4602
+ dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
24544603 break;
24554604
2456
- case RING_RD_UPD:
2457
- tcm_offset = bus->ring_sh[ringid].ring_state_r;
2458
- dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
4605
+ case RING_MAX_ITEMS :
4606
+ addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, max_item);
4607
+ dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4608
+ break;
4609
+
4610
+ case RING_BUF_ADDR :
4611
+ long_data = HTOL64(*(uint64 *)data);
4612
+ addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, base_addr);
4613
+ dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len);
4614
+ if (dhd_msg_level & DHD_INFO_VAL) {
4615
+ prhex(__FUNCTION__, data, len);
4616
+ }
4617
+ break;
4618
+
4619
+ case RING_WR_UPD :
4620
+ addr = bus->ring_sh[ringid].ring_state_w;
4621
+ dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4622
+ break;
4623
+
4624
+ case RING_RD_UPD :
4625
+ addr = bus->ring_sh[ringid].ring_state_r;
4626
+ dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
24594627 break;
24604628
24614629 case D2H_MB_DATA:
2462
- dhdpcie_bus_wtcm32(bus, bus->d2h_mb_data_ptr_addr,
2463
- (uint32) HTOL32(*(uint32 *)data));
4630
+ addr = bus->d2h_mb_data_ptr_addr;
4631
+ dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
24644632 break;
24654633
24664634 case H2D_MB_DATA:
2467
- dhdpcie_bus_wtcm32(bus, bus->h2d_mb_data_ptr_addr,
2468
- (uint32) HTOL32(*(uint32 *)data));
4635
+ addr = bus->h2d_mb_data_ptr_addr;
4636
+ dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4637
+ break;
4638
+
4639
+ case HOST_API_VERSION:
4640
+ addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_cap);
4641
+ dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4642
+ break;
4643
+
4644
+ case DNGL_TO_HOST_TRAP_ADDR:
4645
+ long_data = HTOL64(*(uint64 *)data);
4646
+ addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_trap_addr);
4647
+ dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len);
4648
+ DHD_INFO(("Wrote trap addr:0x%x\n", (uint32) HTOL32(*(uint32 *)data)));
4649
+ break;
4650
+
4651
+ case HOST_SCB_ADDR:
4652
+ addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_addr);
4653
+#ifdef DHD_SUPPORT_64BIT
4654
+ dhdpcie_bus_wtcm64(bus, addr, (uint64) HTOL64(*(uint64 *)data));
4655
+#else /* !DHD_SUPPORT_64BIT */
4656
+ dhdpcie_bus_wtcm32(bus, addr, *((uint32*)data));
4657
+#endif /* DHD_SUPPORT_64BIT */
4658
+ DHD_INFO(("Wrote host_scb_addr:0x%x\n",
4659
+ (uint32) HTOL32(*(uint32 *)data)));
24694660 break;
24704661
24714662 default:
24724663 break;
4664
+ }
4665
+ if (MULTIBP_ENAB(bus->sih)) {
4666
+ dhd_bus_pcie_pwr_req_clear(bus);
24734667 }
24744668 } /* dhd_bus_cmn_writeshared */
24754669
....@@ -2477,39 +4671,52 @@
24774671 void
24784672 dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid)
24794673 {
2480
- ulong tcm_offset;
4674
+ ulong addr; /* dongle address */
24814675
4676
+ if (MULTIBP_ENAB(bus->sih)) {
4677
+ dhd_bus_pcie_pwr_req(bus);
4678
+ }
24824679 switch (type) {
2483
- case RING_WR_UPD:
2484
- tcm_offset = bus->ring_sh[ringid].ring_state_w;
2485
- *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset));
4680
+ case RING_WR_UPD :
4681
+ addr = bus->ring_sh[ringid].ring_state_w;
4682
+ *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
24864683 break;
2487
- case RING_RD_UPD:
2488
- tcm_offset = bus->ring_sh[ringid].ring_state_r;
2489
- *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset));
4684
+
4685
+ case RING_RD_UPD :
4686
+ addr = bus->ring_sh[ringid].ring_state_r;
4687
+ *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
24904688 break;
2491
- case TOTAL_LFRAG_PACKET_CNT:
2492
- {
2493
- pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
2494
- *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus,
2495
- (ulong) &sh->total_lfrag_pkt_cnt));
4689
+
4690
+ case TOTAL_LFRAG_PACKET_CNT :
4691
+ addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, total_lfrag_pkt_cnt);
4692
+ *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
24964693 break;
2497
- }
4694
+
24984695 case H2D_MB_DATA:
2499
- *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->h2d_mb_data_ptr_addr));
4696
+ addr = bus->h2d_mb_data_ptr_addr;
4697
+ *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
25004698 break;
4699
+
25014700 case D2H_MB_DATA:
2502
- *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->d2h_mb_data_ptr_addr));
4701
+ addr = bus->d2h_mb_data_ptr_addr;
4702
+ *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
25034703 break;
2504
- case MAX_HOST_RXBUFS:
2505
- {
2506
- pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
2507
- *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus,
2508
- (ulong) &sh->max_host_rxbufs));
4704
+
4705
+ case MAX_HOST_RXBUFS :
4706
+ addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, max_host_rxbufs);
4707
+ *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
25094708 break;
2510
- }
4709
+
4710
+ case HOST_SCB_ADDR:
4711
+ addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_size);
4712
+ *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
4713
+ break;
4714
+
25114715 default :
25124716 break;
4717
+ }
4718
+ if (MULTIBP_ENAB(bus->sih)) {
4719
+ dhd_bus_pcie_pwr_req_clear(bus);
25134720 }
25144721 }
25154722
....@@ -2523,13 +4730,18 @@
25234730 {
25244731 }
25254732
4733
+/**
4734
+ * @param params input buffer, NULL for 'set' operation.
4735
+ * @param plen length of 'params' buffer, 0 for 'set' operation.
4736
+ * @param arg output buffer
4737
+ */
25264738 int
25274739 dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
25284740 void *params, int plen, void *arg, int len, bool set)
25294741 {
25304742 dhd_bus_t *bus = dhdp->bus;
25314743 const bcm_iovar_t *vi = NULL;
2532
- int bcmerror = 0;
4744
+ int bcmerror = BCME_UNSUPPORTED;
25334745 int val_size;
25344746 uint32 actionid;
25354747
....@@ -2537,12 +4749,18 @@
25374749
25384750 ASSERT(name);
25394751 ASSERT(len >= 0);
4752
+ if (!name || len < 0)
4753
+ return BCME_BADARG;
25404754
25414755 /* Get MUST have return space */
25424756 ASSERT(set || (arg && len));
4757
+ if (!(set || (arg && len)))
4758
+ return BCME_BADARG;
25434759
25444760 /* Set does NOT take qualifiers */
25454761 ASSERT(!set || (!params && !plen));
4762
+ if (!(!set || (!params && !plen)))
4763
+ return BCME_BADARG;
25464764
25474765 DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
25484766 name, (set ? "set" : "get"), len, plen));
....@@ -2552,6 +4770,13 @@
25524770 goto exit;
25534771 }
25544772
4773
+ if (MULTIBP_ENAB(bus->sih)) {
4774
+ if (vi->flags & DHD_IOVF_PWRREQ_BYPASS) {
4775
+ DHD_ERROR(("%s: Bypass pwr request\n", __FUNCTION__));
4776
+ } else {
4777
+ dhd_bus_pcie_pwr_req(bus);
4778
+ }
4779
+ }
25554780
25564781 /* set up 'params' pointer in case this is a set command so that
25574782 * the convenience int and bool code can be common to set and get
....@@ -2573,6 +4798,22 @@
25734798 bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
25744799
25754800 exit:
4801
+ /* In DEVRESET_QUIESCE/DEVRESET_ON,
4802
+ * this includes dongle re-attach which initialize pwr_req_ref count to 0 and
4803
+ * causes pwr_req_ref count miss-match in pwr req clear function and hang.
4804
+ * In this case, bypass pwr req clear.
4805
+ */
4806
+ if (bcmerror == BCME_DNGL_DEVRESET) {
4807
+ bcmerror = BCME_OK;
4808
+ } else {
4809
+ if (MULTIBP_ENAB(bus->sih)) {
4810
+ if (vi->flags & DHD_IOVF_PWRREQ_BYPASS) {
4811
+ DHD_ERROR(("%s: Bypass pwr request clear\n", __FUNCTION__));
4812
+ } else {
4813
+ dhd_bus_pcie_pwr_req_clear(bus);
4814
+ }
4815
+ }
4816
+ }
25764817 return bcmerror;
25774818 } /* dhd_bus_iovar_op */
25784819
....@@ -2812,17 +5053,17 @@
28125053
28135054 sh = bus->pcie_sh;
28145055
2815
- DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzzz));
5056
+ DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzz_dbg_ptr));
28165057
2817
- if (sh->buzzz != 0U) { /* Fetch and display dongle BUZZZ Trace */
5058
+ if (sh->buzz_dbg_ptr != 0U) { /* Fetch and display dongle BUZZZ Trace */
28185059
2819
- dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzzz,
5060
+ dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzz_dbg_ptr,
28205061 (uint8 *)buzzz_p, sizeof(bcm_buzzz_t));
28215062
28225063 printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> "
28235064 "count<%u> status<%u> wrap<%u>\n"
28245065 "cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n",
2825
- (int)sh->buzzz,
5066
+ (int)sh->buzz_dbg_ptr,
28265067 (int)buzzz_p->log, (int)buzzz_p->cur, (int)buzzz_p->end,
28275068 buzzz_p->count, buzzz_p->status, buzzz_p->wrap,
28285069 buzzz_p->cpu_idcode, buzzz_p->counters, buzzz_p->group,
....@@ -2873,7 +5114,7 @@
28735114
28745115 #define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && \
28755116 ((sih)->buscoretype == PCIE2_CORE_ID))
2876
-
5117
+#ifdef DHD_PCIE_REG_ACCESS
28775118 static bool
28785119 pcie2_mdiosetblock(dhd_bus_t *bus, uint blk)
28795120 {
....@@ -2905,7 +5146,275 @@
29055146
29065147 return TRUE;
29075148 }
5149
+#endif /* DHD_PCIE_REG_ACCESS */
29085150
5151
+#define PCIE_FLR_CAPAB_BIT 28
5152
+#define PCIE_FUNCTION_LEVEL_RESET_BIT 15
5153
+
5154
+/* Change delays for only QT HW, FPGA and silicon uses same delay */
5155
+#ifdef BCMQT_HW
5156
+#define DHD_FUNCTION_LEVEL_RESET_DELAY 300000u
5157
+#define DHD_SSRESET_STATUS_RETRY_DELAY 10000u
5158
+#else
5159
+#define DHD_FUNCTION_LEVEL_RESET_DELAY 70u /* 70 msec delay */
5160
+#define DHD_SSRESET_STATUS_RETRY_DELAY 40u
5161
+#endif // endif
5162
+/*
5163
+ * Increase SSReset de-assert time to 8ms.
5164
+ * since it takes longer time if re-scan time on 4378B0.
5165
+ */
5166
+#define DHD_SSRESET_STATUS_RETRIES 200u
5167
+
5168
+static void
5169
+dhdpcie_enum_reg_init(dhd_bus_t *bus)
5170
+{
5171
+ /* initialize Function control register (clear bit 4) to HW init value */
5172
+ si_corereg(bus->sih, bus->sih->buscoreidx,
5173
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.control), ~0,
5174
+ PCIE_CPLCA_ENABLE | PCIE_DLY_PERST_TO_COE);
5175
+
5176
+ /* clear IntMask */
5177
+ si_corereg(bus->sih, bus->sih->buscoreidx,
5178
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.intmask), ~0, 0);
5179
+ /* clear IntStatus */
5180
+ si_corereg(bus->sih, bus->sih->buscoreidx,
5181
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), ~0,
5182
+ si_corereg(bus->sih, bus->sih->buscoreidx,
5183
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), 0, 0));
5184
+
5185
+ /* clear MSIVector */
5186
+ si_corereg(bus->sih, bus->sih->buscoreidx,
5187
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_vector), ~0, 0);
5188
+ /* clear MSIIntMask */
5189
+ si_corereg(bus->sih, bus->sih->buscoreidx,
5190
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intmask), ~0, 0);
5191
+ /* clear MSIIntStatus */
5192
+ si_corereg(bus->sih, bus->sih->buscoreidx,
5193
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), ~0,
5194
+ si_corereg(bus->sih, bus->sih->buscoreidx,
5195
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), 0, 0));
5196
+
5197
+ /* clear PowerIntMask */
5198
+ si_corereg(bus->sih, bus->sih->buscoreidx,
5199
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intmask), ~0, 0);
5200
+ /* clear PowerIntStatus */
5201
+ si_corereg(bus->sih, bus->sih->buscoreidx,
5202
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), ~0,
5203
+ si_corereg(bus->sih, bus->sih->buscoreidx,
5204
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), 0, 0));
5205
+
5206
+ /* clear MailboxIntMask */
5207
+ si_corereg(bus->sih, bus->sih->buscoreidx,
5208
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intmask), ~0, 0);
5209
+ /* clear MailboxInt */
5210
+ si_corereg(bus->sih, bus->sih->buscoreidx,
5211
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), ~0,
5212
+ si_corereg(bus->sih, bus->sih->buscoreidx,
5213
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), 0, 0));
5214
+}
5215
+
5216
+int
5217
+dhd_bus_perform_flr(dhd_bus_t *bus, bool force_fail)
5218
+{
5219
+ uint flr_capab;
5220
+ uint val;
5221
+ int retry = 0;
5222
+
5223
+ DHD_ERROR(("******** Perform FLR ********\n"));
5224
+
5225
+ if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
5226
+ if (bus->pcie_mailbox_mask != 0) {
5227
+ dhdpcie_bus_intr_disable(bus);
5228
+ }
5229
+ /* initialize F0 enum registers before FLR for rev66/67 */
5230
+ dhdpcie_enum_reg_init(bus);
5231
+ }
5232
+
5233
+ /* Read PCIE_CFG_DEVICE_CAPABILITY bit 28 to check FLR capability */
5234
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CAPABILITY, sizeof(val));
5235
+ flr_capab = val & (1 << PCIE_FLR_CAPAB_BIT);
5236
+ DHD_INFO(("Read Device Capability: reg=0x%x read val=0x%x flr_capab=0x%x\n",
5237
+ PCIE_CFG_DEVICE_CAPABILITY, val, flr_capab));
5238
+ if (!flr_capab) {
5239
+ DHD_ERROR(("Chip does not support FLR\n"));
5240
+ return BCME_UNSUPPORTED;
5241
+ }
5242
+ /* WAR: Disable FLR reset For H2 chip to perform legacy reset */
5243
+ else if ((bus->sih->chip == CYW55560_CHIP_ID) || (bus->sih->chip == BCM4375_CHIP_ID)) {
5244
+ DHD_INFO(("H2/4375 CHIP return unsupported\n"));
5245
+ return BCME_UNSUPPORTED;
5246
+ }
5247
+
5248
+ /* Save pcie config space */
5249
+ DHD_INFO(("Save Pcie Config Space\n"));
5250
+ DHD_PCIE_CONFIG_SAVE(bus);
5251
+
5252
+ /* Set bit 15 of PCIE_CFG_DEVICE_CONTROL */
5253
+ DHD_INFO(("Set PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
5254
+ PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
5255
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
5256
+ DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
5257
+ val = val | (1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
5258
+ DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
5259
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
5260
+
5261
+ /* wait for DHD_FUNCTION_LEVEL_RESET_DELAY msec */
5262
+ DHD_INFO(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY));
5263
+ OSL_DELAY(DHD_FUNCTION_LEVEL_RESET_DELAY * 1000u);
5264
+
5265
+ if (force_fail) {
5266
+ DHD_ERROR(("Set PCIE_SSRESET_DISABLE_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)\n",
5267
+ PCIE_SSRESET_DISABLE_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
5268
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
5269
+ DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
5270
+ val));
5271
+ val = val | (1 << PCIE_SSRESET_DISABLE_BIT);
5272
+ DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
5273
+ val));
5274
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val), val);
5275
+
5276
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
5277
+ DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
5278
+ val));
5279
+ }
5280
+
5281
+ /* Clear bit 15 of PCIE_CFG_DEVICE_CONTROL */
5282
+ DHD_INFO(("Clear PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
5283
+ PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
5284
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
5285
+ DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
5286
+ val = val & ~(1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
5287
+ DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
5288
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
5289
+
5290
+ /* Wait till bit 13 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */
5291
+ DHD_INFO(("Wait till PCIE_SSRESET_STATUS_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)"
5292
+ "is cleared\n", PCIE_SSRESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
5293
+ do {
5294
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
5295
+ DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
5296
+ PCIE_CFG_SUBSYSTEM_CONTROL, val));
5297
+ val = val & (1 << PCIE_SSRESET_STATUS_BIT);
5298
+ OSL_DELAY(DHD_SSRESET_STATUS_RETRY_DELAY);
5299
+ } while (val && (retry++ < DHD_SSRESET_STATUS_RETRIES));
5300
+
5301
+ if (val) {
5302
+ DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
5303
+ PCIE_CFG_SUBSYSTEM_CONTROL, PCIE_SSRESET_STATUS_BIT));
5304
+ /* User has to fire the IOVAR again, if force_fail is needed */
5305
+ if (force_fail) {
5306
+ bus->flr_force_fail = FALSE;
5307
+ DHD_ERROR(("%s cleared flr_force_fail flag\n", __FUNCTION__));
5308
+ }
5309
+ return BCME_DONGLE_DOWN;
5310
+ }
5311
+
5312
+ /* Restore pcie config space */
5313
+ DHD_INFO(("Restore Pcie Config Space\n"));
5314
+ DHD_PCIE_CONFIG_RESTORE(bus);
5315
+
5316
+ DHD_ERROR(("******** FLR Succedeed ********\n"));
5317
+
5318
+ return BCME_OK;
5319
+}
5320
+
5321
+#ifdef DHD_USE_BP_RESET
5322
+#define DHD_BP_RESET_ASPM_DISABLE_DELAY 500u /* usec */
5323
+
5324
+#define DHD_BP_RESET_STATUS_RETRY_DELAY 40u /* usec */
5325
+#define DHD_BP_RESET_STATUS_RETRIES 50u
5326
+
5327
+#define PCIE_CFG_SPROM_CTRL_SB_RESET_BIT 10
5328
+#define PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT 21
5329
+int
5330
+dhd_bus_perform_bp_reset(struct dhd_bus *bus)
5331
+{
5332
+ uint val;
5333
+ int retry = 0;
5334
+ uint dar_clk_ctrl_status_reg = DAR_CLK_CTRL(bus->sih->buscorerev);
5335
+ int ret = BCME_OK;
5336
+ bool cond;
5337
+
5338
+ DHD_ERROR(("******** Perform BP reset ********\n"));
5339
+
5340
+ /* Disable ASPM */
5341
+ DHD_INFO(("Disable ASPM: Clear bits(1-0) of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
5342
+ PCIECFGREG_LINK_STATUS_CTRL));
5343
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
5344
+ DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
5345
+ val = val & (~PCIE_ASPM_ENAB);
5346
+ DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
5347
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
5348
+
5349
+ /* wait for delay usec */
5350
+ DHD_INFO(("Delay of %d usec\n", DHD_BP_RESET_ASPM_DISABLE_DELAY));
5351
+ OSL_DELAY(DHD_BP_RESET_ASPM_DISABLE_DELAY);
5352
+
5353
+ /* Set bit 10 of PCIECFGREG_SPROM_CTRL */
5354
+ DHD_INFO(("Set PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of PCIECFGREG_SPROM_CTRL(0x%x)\n",
5355
+ PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
5356
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
5357
+ DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
5358
+ val = val | (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
5359
+ DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
5360
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val), val);
5361
+
5362
+ /* Wait till bit backplane reset is ASSERTED i,e
5363
+ * bit 10 of PCIECFGREG_SPROM_CTRL is cleared.
5364
+ * Only after this, poll for 21st bit of DAR reg 0xAE0 is valid
5365
+ * else DAR register will read previous old value
5366
+ */
5367
+ DHD_INFO(("Wait till PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of "
5368
+ "PCIECFGREG_SPROM_CTRL(0x%x) is cleared\n",
5369
+ PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
5370
+ do {
5371
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
5372
+ DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
5373
+ cond = val & (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
5374
+ OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
5375
+ } while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
5376
+
5377
+ if (cond) {
5378
+ DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
5379
+ PCIECFGREG_SPROM_CTRL, PCIE_CFG_SPROM_CTRL_SB_RESET_BIT));
5380
+ ret = BCME_ERROR;
5381
+ goto aspm_enab;
5382
+ }
5383
+
5384
+ /* Wait till bit 21 of dar_clk_ctrl_status_reg is cleared */
5385
+ DHD_INFO(("Wait till PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT(%d) of "
5386
+ "dar_clk_ctrl_status_reg(0x%x) is cleared\n",
5387
+ PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT, dar_clk_ctrl_status_reg));
5388
+ do {
5389
+ val = si_corereg(bus->sih, bus->sih->buscoreidx,
5390
+ dar_clk_ctrl_status_reg, 0, 0);
5391
+ DHD_INFO(("read_dar si_corereg: reg=0x%x read val=0x%x\n",
5392
+ dar_clk_ctrl_status_reg, val));
5393
+ cond = val & (1 << PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT);
5394
+ OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
5395
+ } while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
5396
+
5397
+ if (cond) {
5398
+ DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
5399
+ dar_clk_ctrl_status_reg, PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT));
5400
+ ret = BCME_ERROR;
5401
+ }
5402
+
5403
+aspm_enab:
5404
+ /* Enable ASPM */
5405
+ DHD_INFO(("Enable ASPM: set bit 1 of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
5406
+ PCIECFGREG_LINK_STATUS_CTRL));
5407
+ val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
5408
+ DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
5409
+ val = val | (PCIE_ASPM_L1_ENAB);
5410
+ DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
5411
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
5412
+
5413
+ DHD_ERROR(("******** BP reset Succedeed ********\n"));
5414
+
5415
+ return ret;
5416
+}
5417
+#endif /* DHD_USE_BP_RESET */
29095418
29105419 int
29115420 dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
....@@ -2913,165 +5422,230 @@
29135422 dhd_bus_t *bus = dhdp->bus;
29145423 int bcmerror = 0;
29155424 unsigned long flags;
5425
+ unsigned long flags_bus;
29165426 #ifdef CONFIG_ARCH_MSM
29175427 int retry = POWERUP_MAX_RETRY;
29185428 #endif /* CONFIG_ARCH_MSM */
29195429
2920
- if (dhd_download_fw_on_driverload) {
2921
- bcmerror = dhd_bus_start(dhdp);
2922
- } else {
2923
- if (flag == TRUE) { /* Turn off WLAN */
2924
- /* Removing Power */
2925
- DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
5430
+ if (flag == TRUE) { /* Turn off WLAN */
5431
+ /* Removing Power */
5432
+ DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
5433
+ DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
5434
+ bus->dhd->up = FALSE;
29265435
2927
- bus->dhd->up = FALSE;
5436
+ /* wait for other contexts to finish -- if required a call
5437
+ * to OSL_DELAY for 1s can be added to give other contexts
5438
+ * a chance to finish
5439
+ */
5440
+ dhdpcie_advertise_bus_cleanup(bus->dhd);
29285441
2929
- if (bus->dhd->busstate != DHD_BUS_DOWN) {
2930
- dhdpcie_advertise_bus_cleanup(bus->dhd);
2931
- if (bus->intr) {
2932
- dhdpcie_bus_intr_disable(bus);
2933
- dhdpcie_free_irq(bus);
2934
- }
5442
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
5443
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5444
+ atomic_set(&bus->dhd->block_bus, TRUE);
5445
+ dhd_flush_rx_tx_wq(bus->dhd);
5446
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5447
+
29355448 #ifdef BCMPCIE_OOB_HOST_WAKE
2936
- /* Clean up any pending host wake IRQ */
2937
- dhd_bus_oob_intr_set(bus->dhd, FALSE);
2938
- dhd_bus_oob_intr_unregister(bus->dhd);
5449
+ /* Clean up any pending host wake IRQ */
5450
+ dhd_bus_oob_intr_set(bus->dhd, FALSE);
5451
+ dhd_bus_oob_intr_unregister(bus->dhd);
29395452 #endif /* BCMPCIE_OOB_HOST_WAKE */
2940
- dhd_os_wd_timer(dhdp, 0);
2941
- dhd_bus_stop(bus, TRUE);
2942
- dhd_prot_reset(dhdp);
2943
- dhd_clear(dhdp);
2944
- dhd_bus_release_dongle(bus);
2945
- dhdpcie_bus_free_resource(bus);
2946
- bcmerror = dhdpcie_bus_disable_device(bus);
2947
- if (bcmerror) {
2948
- DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
2949
- __FUNCTION__, bcmerror));
2950
- goto done;
2951
- }
2952
-#ifdef CONFIG_ARCH_MSM
2953
- bcmerror = dhdpcie_bus_clock_stop(bus);
2954
- if (bcmerror) {
2955
- DHD_ERROR(("%s: host clock stop failed: %d\n",
2956
- __FUNCTION__, bcmerror));
2957
- goto done;
2958
- }
2959
-#endif /* CONFIG_ARCH_MSM */
2960
- DHD_GENERAL_LOCK(bus->dhd, flags);
2961
- bus->dhd->busstate = DHD_BUS_DOWN;
2962
- DHD_GENERAL_UNLOCK(bus->dhd, flags);
2963
- } else {
2964
- if (bus->intr) {
2965
- dhdpcie_bus_intr_disable(bus);
2966
- dhdpcie_free_irq(bus);
2967
- }
2968
-#ifdef BCMPCIE_OOB_HOST_WAKE
2969
- /* Clean up any pending host wake IRQ */
2970
- dhd_bus_oob_intr_set(bus->dhd, FALSE);
2971
- dhd_bus_oob_intr_unregister(bus->dhd);
2972
-#endif /* BCMPCIE_OOB_HOST_WAKE */
2973
- dhd_prot_reset(dhdp);
2974
- dhd_clear(dhdp);
2975
- dhd_bus_release_dongle(bus);
2976
- dhdpcie_bus_free_resource(bus);
2977
- bcmerror = dhdpcie_bus_disable_device(bus);
2978
- if (bcmerror) {
2979
- DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
2980
- __FUNCTION__, bcmerror));
2981
- goto done;
2982
- }
2983
-
2984
-#ifdef CONFIG_ARCH_MSM
2985
- bcmerror = dhdpcie_bus_clock_stop(bus);
2986
- if (bcmerror) {
2987
- DHD_ERROR(("%s: host clock stop failed: %d\n",
2988
- __FUNCTION__, bcmerror));
2989
- goto done;
2990
- }
2991
-#endif /* CONFIG_ARCH_MSM */
5453
+ dhd_os_wd_timer(dhdp, 0);
5454
+ dhd_bus_stop(bus, TRUE);
5455
+ if (bus->intr) {
5456
+ DHD_BUS_LOCK(bus->bus_lock, flags_bus);
5457
+ dhdpcie_bus_intr_disable(bus);
5458
+ DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
5459
+ dhdpcie_free_irq(bus);
29925460 }
2993
-
2994
- bus->dhd->dongle_reset = TRUE;
2995
- DHD_ERROR(("%s: WLAN OFF Done\n", __FUNCTION__));
2996
-
2997
- } else { /* Turn on WLAN */
2998
- if (bus->dhd->busstate == DHD_BUS_DOWN) {
2999
- /* Powering On */
3000
- DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
5461
+ dhd_deinit_bus_lock(bus);
5462
+ dhd_deinit_backplane_access_lock(bus);
5463
+ dhd_bus_release_dongle(bus);
5464
+ dhdpcie_bus_free_resource(bus);
5465
+ bcmerror = dhdpcie_bus_disable_device(bus);
5466
+ if (bcmerror) {
5467
+ DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
5468
+ __FUNCTION__, bcmerror));
5469
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5470
+ atomic_set(&bus->dhd->block_bus, FALSE);
5471
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5472
+ }
5473
+ /* Clean up protocol data after Bus Master Enable bit clear
5474
+ * so that host can safely unmap DMA and remove the allocated buffers
5475
+ * from the PKTID MAP. Some Applicantion Processors supported
5476
+ * System MMU triggers Kernel panic when they detect to attempt to
5477
+ * DMA-unmapped memory access from the devices which use the
5478
+ * System MMU. Therefore, Kernel panic can be happened since it is
5479
+ * possible that dongle can access to DMA-unmapped memory after
5480
+ * calling the dhd_prot_reset().
5481
+ * For this reason, the dhd_prot_reset() and dhd_clear() functions
5482
+ * should be located after the dhdpcie_bus_disable_device().
5483
+ */
5484
+ dhd_prot_reset(dhdp);
5485
+ dhd_clear(dhdp);
30015486 #ifdef CONFIG_ARCH_MSM
3002
- while (--retry) {
3003
- bcmerror = dhdpcie_bus_clock_start(bus);
3004
- if (!bcmerror) {
3005
- DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n",
3006
- __FUNCTION__));
3007
- break;
3008
- } else {
3009
- OSL_SLEEP(10);
3010
- }
3011
- }
3012
-
3013
- if (bcmerror && !retry) {
3014
- DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
3015
- __FUNCTION__, bcmerror));
3016
- goto done;
3017
- }
3018
-#endif /* CONFIG_ARCH_MSM */
3019
- bcmerror = dhdpcie_bus_enable_device(bus);
3020
- if (bcmerror) {
3021
- DHD_ERROR(("%s: host configuration restore failed: %d\n",
3022
- __FUNCTION__, bcmerror));
3023
- goto done;
3024
- }
3025
-
3026
- bcmerror = dhdpcie_bus_alloc_resource(bus);
3027
- if (bcmerror) {
3028
- DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
3029
- __FUNCTION__, bcmerror));
3030
- goto done;
3031
- }
3032
-
3033
- bcmerror = dhdpcie_bus_dongle_attach(bus);
3034
- if (bcmerror) {
3035
- DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n",
3036
- __FUNCTION__, bcmerror));
3037
- goto done;
3038
- }
3039
-
3040
- bcmerror = dhd_bus_request_irq(bus);
3041
- if (bcmerror) {
3042
- DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n",
3043
- __FUNCTION__, bcmerror));
3044
- goto done;
3045
- }
3046
-
3047
- bus->dhd->dongle_reset = FALSE;
3048
-
3049
- bcmerror = dhd_bus_start(dhdp);
3050
- if (bcmerror) {
3051
- DHD_ERROR(("%s: dhd_bus_start: %d\n",
3052
- __FUNCTION__, bcmerror));
3053
- goto done;
3054
- }
3055
-
3056
- bus->dhd->up = TRUE;
3057
- DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__));
3058
- } else {
3059
- DHD_ERROR(("%s: what should we do here\n", __FUNCTION__));
5487
+ bcmerror = dhdpcie_bus_clock_stop(bus);
5488
+ if (bcmerror) {
5489
+ DHD_ERROR(("%s: host clock stop failed: %d\n",
5490
+ __FUNCTION__, bcmerror));
5491
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5492
+ atomic_set(&bus->dhd->block_bus, FALSE);
5493
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
30605494 goto done;
30615495 }
5496
+#endif /* CONFIG_ARCH_MSM */
5497
+ DHD_GENERAL_LOCK(bus->dhd, flags);
5498
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
5499
+ bus->dhd->busstate = DHD_BUS_DOWN;
5500
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
5501
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5502
+ atomic_set(&bus->dhd->block_bus, FALSE);
5503
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5504
+ } else {
5505
+ if (bus->intr) {
5506
+ dhdpcie_free_irq(bus);
5507
+ }
5508
+#ifdef BCMPCIE_OOB_HOST_WAKE
5509
+ /* Clean up any pending host wake IRQ */
5510
+ dhd_bus_oob_intr_set(bus->dhd, FALSE);
5511
+ dhd_bus_oob_intr_unregister(bus->dhd);
5512
+#endif /* BCMPCIE_OOB_HOST_WAKE */
5513
+ dhd_dpc_kill(bus->dhd);
5514
+ if (!bus->no_bus_init) {
5515
+ dhd_bus_release_dongle(bus);
5516
+ dhdpcie_bus_free_resource(bus);
5517
+ bcmerror = dhdpcie_bus_disable_device(bus);
5518
+ if (bcmerror) {
5519
+ DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
5520
+ __FUNCTION__, bcmerror));
5521
+ }
5522
+
5523
+ /* Clean up protocol data after Bus Master Enable bit clear
5524
+ * so that host can safely unmap DMA and remove the allocated
5525
+ * buffers from the PKTID MAP. Some Applicantion Processors
5526
+ * supported System MMU triggers Kernel panic when they detect
5527
+ * to attempt to DMA-unmapped memory access from the devices
5528
+ * which use the System MMU.
5529
+ * Therefore, Kernel panic can be happened since it is possible
5530
+ * that dongle can access to DMA-unmapped memory after calling
5531
+ * the dhd_prot_reset().
5532
+ * For this reason, the dhd_prot_reset() and dhd_clear() functions
5533
+ * should be located after the dhdpcie_bus_disable_device().
5534
+ */
5535
+ dhd_prot_reset(dhdp);
5536
+ dhd_clear(dhdp);
5537
+ } else {
5538
+ bus->no_bus_init = FALSE;
5539
+ }
5540
+#ifdef CONFIG_ARCH_MSM
5541
+ bcmerror = dhdpcie_bus_clock_stop(bus);
5542
+ if (bcmerror) {
5543
+ DHD_ERROR(("%s: host clock stop failed: %d\n",
5544
+ __FUNCTION__, bcmerror));
5545
+ goto done;
5546
+ }
5547
+#endif /* CONFIG_ARCH_MSM */
5548
+ }
5549
+
5550
+ bus->dhd->dongle_reset = TRUE;
5551
+ DHD_ERROR(("%s: WLAN OFF Done\n", __FUNCTION__));
5552
+
5553
+ } else { /* Turn on WLAN */
5554
+ if (bus->dhd->busstate == DHD_BUS_DOWN) {
5555
+ /* Powering On */
5556
+ DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
5557
+#ifdef CONFIG_ARCH_MSM
5558
+ while (--retry) {
5559
+ bcmerror = dhdpcie_bus_clock_start(bus);
5560
+ if (!bcmerror) {
5561
+ DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n",
5562
+ __FUNCTION__));
5563
+ break;
5564
+ } else {
5565
+ OSL_SLEEP(10);
5566
+ }
5567
+ }
5568
+
5569
+ if (bcmerror && !retry) {
5570
+ DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
5571
+ __FUNCTION__, bcmerror));
5572
+ goto done;
5573
+ }
5574
+#if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON)
5575
+ dhd_bus_aspm_enable_rc_ep(bus, FALSE);
5576
+#endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */
5577
+#endif /* CONFIG_ARCH_MSM */
5578
+ bus->is_linkdown = 0;
5579
+ bus->cto_triggered = 0;
5580
+#ifdef SUPPORT_LINKDOWN_RECOVERY
5581
+ bus->read_shm_fail = FALSE;
5582
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
5583
+ bcmerror = dhdpcie_bus_enable_device(bus);
5584
+ if (bcmerror) {
5585
+ DHD_ERROR(("%s: host configuration restore failed: %d\n",
5586
+ __FUNCTION__, bcmerror));
5587
+ goto done;
5588
+ }
5589
+
5590
+ bcmerror = dhdpcie_bus_alloc_resource(bus);
5591
+ if (bcmerror) {
5592
+ DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
5593
+ __FUNCTION__, bcmerror));
5594
+ goto done;
5595
+ }
5596
+
5597
+ bcmerror = dhdpcie_bus_dongle_attach(bus);
5598
+ if (bcmerror) {
5599
+ DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n",
5600
+ __FUNCTION__, bcmerror));
5601
+ goto done;
5602
+ }
5603
+
5604
+ bcmerror = dhd_bus_request_irq(bus);
5605
+ if (bcmerror) {
5606
+ DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n",
5607
+ __FUNCTION__, bcmerror));
5608
+ goto done;
5609
+ }
5610
+
5611
+ bus->dhd->dongle_reset = FALSE;
5612
+
5613
+#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
5614
+ dhd_irq_set_affinity(bus->dhd, cpumask_of(1));
5615
+#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
5616
+
5617
+ bcmerror = dhd_bus_start(dhdp);
5618
+ if (bcmerror) {
5619
+ DHD_ERROR(("%s: dhd_bus_start: %d\n",
5620
+ __FUNCTION__, bcmerror));
5621
+ goto done;
5622
+ }
5623
+
5624
+ bus->dhd->up = TRUE;
5625
+ /* Renabling watchdog which is disabled in dhdpcie_advertise_bus_cleanup */
5626
+ if (bus->dhd->dhd_watchdog_ms_backup) {
5627
+ DHD_ERROR(("%s: Enabling wdtick after dhd init\n",
5628
+ __FUNCTION__));
5629
+ dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
5630
+ }
5631
+ DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__));
5632
+ } else {
5633
+ DHD_ERROR(("%s: what should we do here\n", __FUNCTION__));
5634
+ goto done;
30625635 }
30635636 }
30645637
30655638 done:
30665639 if (bcmerror) {
30675640 DHD_GENERAL_LOCK(bus->dhd, flags);
5641
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
30685642 bus->dhd->busstate = DHD_BUS_DOWN;
30695643 DHD_GENERAL_UNLOCK(bus->dhd, flags);
30705644 }
3071
-
30725645 return bcmerror;
30735646 }
30745647
5648
+#ifdef DHD_PCIE_REG_ACCESS
30755649 static int
30765650 pcie2_mdioop(dhd_bus_t *bus, uint physmedia, uint regaddr, bool write, uint *val,
30775651 bool slave_bypass)
....@@ -3116,7 +5690,77 @@
31165690 }
31175691 return -1;
31185692 }
5693
+#endif /* DHD_PCIE_REG_ACCESS */
31195694
5695
+/* si_backplane_access() manages a shared resource - BAR0 mapping, hence its
5696
+ * calls shall be serialized. This wrapper function provides such serialization
5697
+ * and shall be used everywjer einstead of direct call of si_backplane_access()
5698
+ *
5699
+ * Linux DHD driver calls si_backplane_access() from 3 three contexts: tasklet
5700
+ * (that may call dhdpcie_sssr_dump() from dhdpcie_sssr_dump()), iovar
5701
+ * ("sbreg", "membyres", etc.) and procfs (used by GDB proxy). To avoid race
5702
+ * conditions calls of si_backplane_access() shall be serialized. Presence of
5703
+ * tasklet context implies that serialization shall b ebased on spinlock. Hence
5704
+ * Linux implementation of dhd_pcie_backplane_access_[un]lock() is
5705
+ * spinlock-based.
5706
+ *
5707
+ * Other platforms may add their own implementations of
5708
+ * dhd_pcie_backplane_access_[un]lock() as needed (e.g. if serialization is not
5709
+ * needed implementation might be empty)
5710
+ */
5711
+static uint
5712
+serialized_backplane_access(dhd_bus_t *bus, uint addr, uint size, uint *val, bool read)
5713
+{
5714
+ uint ret;
5715
+ unsigned long flags;
5716
+ DHD_BACKPLANE_ACCESS_LOCK(bus->backplane_access_lock, flags);
5717
+ ret = si_backplane_access(bus->sih, addr, size, val, read);
5718
+ DHD_BACKPLANE_ACCESS_UNLOCK(bus->backplane_access_lock, flags);
5719
+ return ret;
5720
+}
5721
+
5722
+static int
5723
+dhdpcie_get_dma_ring_indices(dhd_pub_t *dhd)
5724
+{
5725
+ int h2d_support, d2h_support;
5726
+
5727
+ d2h_support = dhd->dma_d2h_ring_upd_support ? 1 : 0;
5728
+ h2d_support = dhd->dma_h2d_ring_upd_support ? 1 : 0;
5729
+ return (d2h_support | (h2d_support << 1));
5730
+
5731
+}
5732
+int
5733
+dhdpcie_set_dma_ring_indices(dhd_pub_t *dhd, int32 int_val)
5734
+{
5735
+ int bcmerror = 0;
5736
+ /* Can change it only during initialization/FW download */
5737
+ if (dhd->busstate == DHD_BUS_DOWN) {
5738
+ if ((int_val > 3) || (int_val < 0)) {
5739
+ DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n"));
5740
+ bcmerror = BCME_BADARG;
5741
+ } else {
5742
+ dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
5743
+ dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE;
5744
+ dhd->dma_ring_upd_overwrite = TRUE;
5745
+ }
5746
+ } else {
5747
+ DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5748
+ __FUNCTION__));
5749
+ bcmerror = BCME_NOTDOWN;
5750
+ }
5751
+
5752
+ return bcmerror;
5753
+
5754
+}
5755
+/**
5756
+ * IOVAR handler of the DHD bus layer (in this case, the PCIe bus).
5757
+ *
5758
+ * @param actionid e.g. IOV_SVAL(IOV_PCIEREG)
5759
+ * @param params input buffer
5760
+ * @param plen length in [bytes] of input buffer 'params'
5761
+ * @param arg output buffer
5762
+ * @param len length in [bytes] of output buffer 'arg'
5763
+ */
31205764 static int
31215765 dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
31225766 void *params, int plen, void *arg, int len, int val_size)
....@@ -3153,11 +5797,10 @@
31535797
31545798 switch (actionid) {
31555799
3156
-
31575800 case IOV_SVAL(IOV_VARS):
31585801 bcmerror = dhdpcie_downloadvars(bus, arg, len);
31595802 break;
3160
-
5803
+#ifdef DHD_PCIE_REG_ACCESS
31615804 case IOV_SVAL(IOV_PCIEREG):
31625805 si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
31635806 int_val);
....@@ -3186,7 +5829,8 @@
31865829 addr = sdreg.offset;
31875830 size = sdreg.func;
31885831
3189
- if (si_backplane_access(bus->sih, addr, size, &int_val, TRUE) != BCME_OK) {
5832
+ if (serialized_backplane_access(bus, addr, size, (uint *)&int_val, TRUE) != BCME_OK)
5833
+ {
31905834 DHD_ERROR(("Invalid size/addr combination \n"));
31915835 bcmerror = BCME_ERROR;
31925836 break;
....@@ -3204,7 +5848,8 @@
32045848
32055849 addr = sdreg.offset;
32065850 size = sdreg.func;
3207
- if (si_backplane_access(bus->sih, addr, size, &sdreg.value, FALSE) != BCME_OK) {
5851
+ if (serialized_backplane_access(bus, addr, size,
5852
+ (uint *)(&sdreg.value), FALSE) != BCME_OK) {
32085853 DHD_ERROR(("Invalid size/addr combination \n"));
32095854 bcmerror = BCME_ERROR;
32105855 }
....@@ -3218,10 +5863,11 @@
32185863
32195864 bcopy(params, &sdreg, sizeof(sdreg));
32205865
3221
- addr = sdreg.offset | SI_ENUM_BASE;
5866
+ addr = sdreg.offset | SI_ENUM_BASE(bus->sih);
32225867 size = sdreg.func;
32235868
3224
- if (si_backplane_access(bus->sih, addr, size, &int_val, TRUE) != BCME_OK) {
5869
+ if (serialized_backplane_access(bus, addr, size, (uint *)&int_val, TRUE) != BCME_OK)
5870
+ {
32255871 DHD_ERROR(("Invalid size/addr combination \n"));
32265872 bcmerror = BCME_ERROR;
32275873 break;
....@@ -3237,9 +5883,10 @@
32375883
32385884 bcopy(params, &sdreg, sizeof(sdreg));
32395885
3240
- addr = sdreg.offset | SI_ENUM_BASE;
5886
+ addr = sdreg.offset | SI_ENUM_BASE(bus->sih);
32415887 size = sdreg.func;
3242
- if (si_backplane_access(bus->sih, addr, size, &sdreg.value, FALSE) != BCME_OK) {
5888
+ if (serialized_backplane_access(bus, addr, size,
5889
+ (uint *)(&sdreg.value), FALSE) != BCME_OK) {
32435890 DHD_ERROR(("Invalid size/addr combination \n"));
32445891 bcmerror = BCME_ERROR;
32455892 }
....@@ -3270,7 +5917,7 @@
32705917 bcmerror = BCME_ERROR;
32715918 break;
32725919 }
3273
- if (pcie2_mdioop(bus, int_val, int_val2, TRUE, &int_val3, FALSE)) {
5920
+ if (pcie2_mdioop(bus, int_val, int_val2, TRUE, (uint *)&int_val3, FALSE)) {
32745921 DHD_ERROR(("pcie2_mdioop failed.\n"));
32755922 bcmerror = BCME_ERROR;
32765923 }
....@@ -3288,14 +5935,45 @@
32885935 int_val = OSL_PCI_READ_CONFIG(bus->osh, int_val, 4);
32895936 bcopy(&int_val, arg, sizeof(int_val));
32905937 break;
3291
-
5938
+#endif /* DHD_PCIE_REG_ACCESS */
32925939 case IOV_SVAL(IOV_PCIE_LPBK):
32935940 bcmerror = dhdpcie_bus_lpback_req(bus, int_val);
32945941 break;
32955942
3296
- case IOV_SVAL(IOV_PCIE_DMAXFER):
3297
- bcmerror = dhdpcie_bus_dmaxfer_req(bus, int_val, int_val2, int_val3);
5943
+ case IOV_SVAL(IOV_PCIE_DMAXFER): {
5944
+ dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)arg;
5945
+
5946
+ if (!dmaxfer)
5947
+ return BCME_BADARG;
5948
+ if (dmaxfer->version != DHD_DMAXFER_VERSION)
5949
+ return BCME_VERSION;
5950
+ if (dmaxfer->length != sizeof(dma_xfer_info_t)) {
5951
+ return BCME_BADLEN;
5952
+ }
5953
+
5954
+ bcmerror = dhdpcie_bus_dmaxfer_req(bus, dmaxfer->num_bytes,
5955
+ dmaxfer->src_delay, dmaxfer->dest_delay,
5956
+ dmaxfer->type, dmaxfer->core_num,
5957
+ dmaxfer->should_wait);
5958
+
5959
+ if (dmaxfer->should_wait && bcmerror >= 0) {
5960
+ bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer);
5961
+ }
32985962 break;
5963
+ }
5964
+
5965
+ case IOV_GVAL(IOV_PCIE_DMAXFER): {
5966
+ dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)params;
5967
+ if (!dmaxfer)
5968
+ return BCME_BADARG;
5969
+ if (dmaxfer->version != DHD_DMAXFER_VERSION)
5970
+ return BCME_VERSION;
5971
+ if (dmaxfer->length != sizeof(dma_xfer_info_t)) {
5972
+ return BCME_BADLEN;
5973
+ }
5974
+ bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer);
5975
+ break;
5976
+ }
32995977
33005978 case IOV_GVAL(IOV_PCIE_SUSPEND):
33015979 int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0;
....@@ -3303,13 +5981,55 @@
33035981 break;
33045982
33055983 case IOV_SVAL(IOV_PCIE_SUSPEND):
3306
- dhdpcie_bus_suspend(bus, bool_val);
5984
+ if (bool_val) { /* Suspend */
5985
+ int ret;
5986
+ unsigned long flags;
5987
+
5988
+ /*
5989
+ * If some other context is busy, wait until they are done,
5990
+ * before starting suspend
5991
+ */
5992
+ ret = dhd_os_busbusy_wait_condition(bus->dhd,
5993
+ &bus->dhd->dhd_bus_busy_state, DHD_BUS_BUSY_IN_DHD_IOVAR);
5994
+ if (ret == 0) {
5995
+ DHD_ERROR(("%s:Wait Timedout, dhd_bus_busy_state = 0x%x\n",
5996
+ __FUNCTION__, bus->dhd->dhd_bus_busy_state));
5997
+ return BCME_BUSY;
5998
+ }
5999
+
6000
+ DHD_GENERAL_LOCK(bus->dhd, flags);
6001
+ DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
6002
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
6003
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
6004
+ dhdpcie_bus_suspend(bus, TRUE, TRUE);
6005
+#else
6006
+ dhdpcie_bus_suspend(bus, TRUE);
6007
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
6008
+
6009
+ DHD_GENERAL_LOCK(bus->dhd, flags);
6010
+ DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
6011
+ dhd_os_busbusy_wake(bus->dhd);
6012
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
6013
+ } else { /* Resume */
6014
+ unsigned long flags;
6015
+ DHD_GENERAL_LOCK(bus->dhd, flags);
6016
+ DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
6017
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
6018
+
6019
+ dhdpcie_bus_suspend(bus, FALSE);
6020
+
6021
+ DHD_GENERAL_LOCK(bus->dhd, flags);
6022
+ DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
6023
+ dhd_os_busbusy_wake(bus->dhd);
6024
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
6025
+ }
33076026 break;
33086027
33096028 case IOV_GVAL(IOV_MEMSIZE):
33106029 int_val = (int32)bus->ramsize;
33116030 bcopy(&int_val, arg, val_size);
33126031 break;
6032
+#ifdef DHD_BUS_MEM_ACCESS
33136033 case IOV_SVAL(IOV_MEMBYTES):
33146034 case IOV_GVAL(IOV_MEMBYTES):
33156035 {
....@@ -3396,6 +6116,71 @@
33966116
33976117 break;
33986118 }
6119
+#endif /* DHD_BUS_MEM_ACCESS */
6120
+
6121
+ /* Debug related. Dumps core registers or one of the dongle memory */
6122
+ case IOV_GVAL(IOV_DUMP_DONGLE):
6123
+ {
6124
+ dump_dongle_in_t ddi = *(dump_dongle_in_t*)params;
6125
+ dump_dongle_out_t *ddo = (dump_dongle_out_t*)arg;
6126
+ uint32 *p = ddo->val;
6127
+ const uint max_offset = 4096 - 1; /* one core contains max 4096/4 registers */
6128
+
6129
+ if (plen < sizeof(ddi) || len < sizeof(ddo)) {
6130
+ bcmerror = BCME_BADARG;
6131
+ break;
6132
+ }
6133
+
6134
+ switch (ddi.type) {
6135
+ case DUMP_DONGLE_COREREG:
6136
+ ddo->n_bytes = 0;
6137
+
6138
+ if (si_setcoreidx(bus->sih, ddi.index) == NULL) {
6139
+ break; // beyond last core: core enumeration ended
6140
+ }
6141
+
6142
+ ddo->address = si_addrspace(bus->sih, CORE_SLAVE_PORT_0, CORE_BASE_ADDR_0);
6143
+ ddo->address += ddi.offset; // BP address at which this dump starts
6144
+
6145
+ ddo->id = si_coreid(bus->sih);
6146
+ ddo->rev = si_corerev(bus->sih);
6147
+
6148
+ while (ddi.offset < max_offset &&
6149
+ sizeof(dump_dongle_out_t) + ddo->n_bytes < (uint)len) {
6150
+ *p++ = si_corereg(bus->sih, ddi.index, ddi.offset, 0, 0);
6151
+ ddi.offset += sizeof(uint32);
6152
+ ddo->n_bytes += sizeof(uint32);
6153
+ }
6154
+ break;
6155
+ default:
6156
+ // TODO: implement d11 SHM/TPL dumping
6157
+ bcmerror = BCME_BADARG;
6158
+ break;
6159
+ }
6160
+ break;
6161
+ }
6162
+
6163
+ /* Debug related. Returns a string with dongle capabilities */
6164
+ case IOV_GVAL(IOV_DNGL_CAPS):
6165
+ {
6166
+ strncpy(arg, bus->dhd->fw_capabilities,
6167
+ MIN(strlen(bus->dhd->fw_capabilities), (size_t)len));
6168
+ ((char*)arg)[len - 1] = '\0';
6169
+ break;
6170
+ }
6171
+
6172
+#if defined(DEBUGGER) || defined(DHD_DSCOPE)
6173
+ case IOV_SVAL(IOV_GDB_SERVER):
6174
+ /* debugger_*() functions may sleep, so cannot hold spinlock */
6175
+ DHD_PERIM_UNLOCK(bus->dhd);
6176
+ if (int_val > 0) {
6177
+ debugger_init((void *) bus, &bus_ops, int_val, SI_ENUM_BASE(bus->sih));
6178
+ } else {
6179
+ debugger_close();
6180
+ }
6181
+ DHD_PERIM_LOCK(bus->dhd);
6182
+ break;
6183
+#endif /* DEBUGGER || DHD_DSCOPE */
33996184
34006185 #ifdef BCM_BUZZZ
34016186 /* Dump dongle side buzzz trace to console */
....@@ -3467,29 +6252,13 @@
34676252 break;
34686253 }
34696254 case IOV_GVAL(IOV_DMA_RINGINDICES):
3470
- { int h2d_support, d2h_support;
3471
-
3472
- d2h_support = DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ? 1 : 0;
3473
- h2d_support = DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support) ? 1 : 0;
3474
- int_val = d2h_support | (h2d_support << 1);
6255
+ {
6256
+ int_val = dhdpcie_get_dma_ring_indices(bus->dhd);
34756257 bcopy(&int_val, arg, sizeof(int_val));
34766258 break;
34776259 }
34786260 case IOV_SVAL(IOV_DMA_RINGINDICES):
3479
- /* Can change it only during initialization/FW download */
3480
- if (bus->dhd->busstate == DHD_BUS_DOWN) {
3481
- if ((int_val > 3) || (int_val < 0)) {
3482
- DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n"));
3483
- bcmerror = BCME_BADARG;
3484
- } else {
3485
- bus->dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
3486
- bus->dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE;
3487
- }
3488
- } else {
3489
- DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
3490
- __FUNCTION__));
3491
- bcmerror = BCME_NOTDOWN;
3492
- }
6261
+ bcmerror = dhdpcie_set_dma_ring_indices(bus->dhd, int_val);
34936262 break;
34946263
34956264 case IOV_GVAL(IOV_METADATA_DBG):
....@@ -3551,7 +6320,23 @@
35516320 break;
35526321
35536322 case IOV_SVAL(IOV_DEVRESET):
3554
- dhd_bus_devreset(bus->dhd, (uint8)bool_val);
6323
+ switch (int_val) {
6324
+ case DHD_BUS_DEVRESET_ON:
6325
+ bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
6326
+ break;
6327
+ case DHD_BUS_DEVRESET_OFF:
6328
+ bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
6329
+ break;
6330
+ case DHD_BUS_DEVRESET_FLR:
6331
+ bcmerror = dhd_bus_perform_flr(bus, bus->flr_force_fail);
6332
+ break;
6333
+ case DHD_BUS_DEVRESET_FLR_FORCE_FAIL:
6334
+ bus->flr_force_fail = TRUE;
6335
+ break;
6336
+ default:
6337
+ DHD_ERROR(("%s: invalid argument for devreset\n", __FUNCTION__));
6338
+ break;
6339
+ }
35556340 break;
35566341 case IOV_SVAL(IOV_FORCE_FW_TRAP):
35576342 if (bus->dhd->busstate == DHD_BUS_DATA)
....@@ -3571,6 +6356,30 @@
35716356 bcopy(&int_val, arg, val_size);
35726357 break;
35736358
6359
+#ifdef DHD_PCIE_RUNTIMEPM
6360
+ case IOV_GVAL(IOV_IDLETIME):
6361
+ if (!(bus->dhd->op_mode & DHD_FLAG_MFG_MODE)) {
6362
+ int_val = bus->idletime;
6363
+ } else {
6364
+ int_val = 0;
6365
+ }
6366
+ bcopy(&int_val, arg, val_size);
6367
+ break;
6368
+
6369
+ case IOV_SVAL(IOV_IDLETIME):
6370
+ if (int_val < 0) {
6371
+ bcmerror = BCME_BADARG;
6372
+ } else {
6373
+ bus->idletime = int_val;
6374
+ if (bus->idletime) {
6375
+ DHD_ENABLE_RUNTIME_PM(bus->dhd);
6376
+ } else {
6377
+ DHD_DISABLE_RUNTIME_PM(bus->dhd);
6378
+ }
6379
+ }
6380
+ break;
6381
+#endif /* DHD_PCIE_RUNTIMEPM */
6382
+
35746383 case IOV_GVAL(IOV_TXBOUND):
35756384 int_val = (int32)dhd_txbound;
35766385 bcopy(&int_val, arg, val_size);
....@@ -3578,6 +6387,65 @@
35786387
35796388 case IOV_SVAL(IOV_TXBOUND):
35806389 dhd_txbound = (uint)int_val;
6390
+ break;
6391
+
6392
+ case IOV_SVAL(IOV_H2D_MAILBOXDATA):
6393
+ dhdpcie_send_mb_data(bus, (uint)int_val);
6394
+ break;
6395
+
6396
+ case IOV_SVAL(IOV_INFORINGS):
6397
+ dhd_prot_init_info_rings(bus->dhd);
6398
+ break;
6399
+
6400
+ case IOV_SVAL(IOV_H2D_PHASE):
6401
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
6402
+ DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
6403
+ __FUNCTION__));
6404
+ bcmerror = BCME_NOTDOWN;
6405
+ break;
6406
+ }
6407
+ if (int_val)
6408
+ bus->dhd->h2d_phase_supported = TRUE;
6409
+ else
6410
+ bus->dhd->h2d_phase_supported = FALSE;
6411
+ break;
6412
+
6413
+ case IOV_GVAL(IOV_H2D_PHASE):
6414
+ int_val = (int32) bus->dhd->h2d_phase_supported;
6415
+ bcopy(&int_val, arg, val_size);
6416
+ break;
6417
+
6418
+ case IOV_SVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
6419
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
6420
+ DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
6421
+ __FUNCTION__));
6422
+ bcmerror = BCME_NOTDOWN;
6423
+ break;
6424
+ }
6425
+ if (int_val)
6426
+ bus->dhd->force_dongletrap_on_bad_h2d_phase = TRUE;
6427
+ else
6428
+ bus->dhd->force_dongletrap_on_bad_h2d_phase = FALSE;
6429
+ break;
6430
+
6431
+ case IOV_GVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
6432
+ int_val = (int32) bus->dhd->force_dongletrap_on_bad_h2d_phase;
6433
+ bcopy(&int_val, arg, val_size);
6434
+ break;
6435
+
6436
+ case IOV_SVAL(IOV_H2D_TXPOST_MAX_ITEM):
6437
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
6438
+ DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
6439
+ __FUNCTION__));
6440
+ bcmerror = BCME_NOTDOWN;
6441
+ break;
6442
+ }
6443
+ dhd_prot_set_h2d_max_txpost(bus->dhd, (uint16)int_val);
6444
+ break;
6445
+
6446
+ case IOV_GVAL(IOV_H2D_TXPOST_MAX_ITEM):
6447
+ int_val = dhd_prot_get_h2d_max_txpost(bus->dhd);
6448
+ bcopy(&int_val, arg, val_size);
35816449 break;
35826450
35836451 case IOV_GVAL(IOV_RXBOUND):
....@@ -3589,14 +6457,30 @@
35896457 dhd_rxbound = (uint)int_val;
35906458 break;
35916459
6460
+ case IOV_GVAL(IOV_TRAPDATA):
6461
+ {
6462
+ struct bcmstrbuf dump_b;
6463
+ bcm_binit(&dump_b, arg, len);
6464
+ bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, FALSE);
6465
+ break;
6466
+ }
6467
+
6468
+ case IOV_GVAL(IOV_TRAPDATA_RAW):
6469
+ {
6470
+ struct bcmstrbuf dump_b;
6471
+ bcm_binit(&dump_b, arg, len);
6472
+ bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, TRUE);
6473
+ break;
6474
+ }
6475
+#ifdef DHD_PCIE_REG_ACCESS
35926476 case IOV_GVAL(IOV_PCIEASPM): {
35936477 uint8 clkreq = 0;
35946478 uint32 aspm = 0;
35956479
35966480 /* this command is to hide the details, but match the lcreg
35976481 #define PCIE_CLKREQ_ENAB 0x100
3598
- #define PCIE_ASPM_L1_ENAB 2
3599
- #define PCIE_ASPM_L0s_ENAB 1
6482
+ #define PCIE_ASPM_L1_ENAB 2
6483
+ #define PCIE_ASPM_L0s_ENAB 1
36006484 */
36016485
36026486 clkreq = dhdpcie_clkreq(bus->dhd->osh, 0, 0);
....@@ -3617,7 +6501,7 @@
36176501 dhdpcie_clkreq(bus->dhd->osh, 1, ((int_val & 0x100) >> 8));
36186502 break;
36196503 }
3620
-
6504
+#endif /* DHD_PCIE_REG_ACCESS */
36216505 case IOV_SVAL(IOV_HANGREPORT):
36226506 bus->dhd->hang_report = bool_val;
36236507 DHD_ERROR(("%s: Set hang_report as %d\n",
....@@ -3626,6 +6510,179 @@
36266510
36276511 case IOV_GVAL(IOV_HANGREPORT):
36286512 int_val = (int32)bus->dhd->hang_report;
6513
+ bcopy(&int_val, arg, val_size);
6514
+ break;
6515
+
6516
+ case IOV_SVAL(IOV_CTO_PREVENTION):
6517
+ bcmerror = dhdpcie_cto_init(bus, bool_val);
6518
+ break;
6519
+
6520
+ case IOV_GVAL(IOV_CTO_PREVENTION):
6521
+ if (bus->sih->buscorerev < 19) {
6522
+ bcmerror = BCME_UNSUPPORTED;
6523
+ break;
6524
+ }
6525
+ int_val = (int32)bus->cto_enable;
6526
+ bcopy(&int_val, arg, val_size);
6527
+ break;
6528
+
6529
+ case IOV_SVAL(IOV_CTO_THRESHOLD):
6530
+ {
6531
+ if (bus->sih->buscorerev < 19) {
6532
+ bcmerror = BCME_UNSUPPORTED;
6533
+ break;
6534
+ }
6535
+ bus->cto_threshold = (uint32)int_val;
6536
+ }
6537
+ break;
6538
+
6539
+ case IOV_GVAL(IOV_CTO_THRESHOLD):
6540
+ if (bus->sih->buscorerev < 19) {
6541
+ bcmerror = BCME_UNSUPPORTED;
6542
+ break;
6543
+ }
6544
+ if (bus->cto_threshold)
6545
+ int_val = (int32)bus->cto_threshold;
6546
+ else
6547
+ int_val = (int32)PCIE_CTO_TO_THRESH_DEFAULT;
6548
+
6549
+ bcopy(&int_val, arg, val_size);
6550
+ break;
6551
+
6552
+ case IOV_SVAL(IOV_PCIE_WD_RESET):
6553
+ if (bool_val) {
6554
+ /* Legacy chipcommon watchdog reset */
6555
+ dhdpcie_cc_watchdog_reset(bus);
6556
+ }
6557
+ break;
6558
+
6559
+ case IOV_GVAL(IOV_HWA_ENAB_BMAP):
6560
+ int_val = bus->hwa_enab_bmap;
6561
+ bcopy(&int_val, arg, val_size);
6562
+ break;
6563
+ case IOV_SVAL(IOV_HWA_ENAB_BMAP):
6564
+ bus->hwa_enab_bmap = (uint8)int_val;
6565
+ break;
6566
+ case IOV_GVAL(IOV_IDMA_ENABLE):
6567
+ int_val = bus->idma_enabled;
6568
+ bcopy(&int_val, arg, val_size);
6569
+ break;
6570
+ case IOV_SVAL(IOV_IDMA_ENABLE):
6571
+ bus->idma_enabled = (bool)int_val;
6572
+ break;
6573
+ case IOV_GVAL(IOV_IFRM_ENABLE):
6574
+ int_val = bus->ifrm_enabled;
6575
+ bcopy(&int_val, arg, val_size);
6576
+ break;
6577
+ case IOV_SVAL(IOV_IFRM_ENABLE):
6578
+ bus->ifrm_enabled = (bool)int_val;
6579
+ break;
6580
+ case IOV_GVAL(IOV_CLEAR_RING):
6581
+ bcopy(&int_val, arg, val_size);
6582
+ dhd_flow_rings_flush(bus->dhd, 0);
6583
+ break;
6584
+ case IOV_GVAL(IOV_DAR_ENABLE):
6585
+ int_val = bus->dar_enabled;
6586
+ bcopy(&int_val, arg, val_size);
6587
+ break;
6588
+ case IOV_SVAL(IOV_DAR_ENABLE):
6589
+ bus->dar_enabled = (bool)int_val;
6590
+ break;
6591
+ case IOV_GVAL(IOV_HSCBSIZE):
6592
+ bcmerror = dhd_get_hscb_info(bus->dhd, NULL, (uint32 *)arg);
6593
+ break;
6594
+#ifdef DHD_BUS_MEM_ACCESS
6595
+ case IOV_GVAL(IOV_HSCBBYTES):
6596
+ bcmerror = dhd_get_hscb_buff(bus->dhd, int_val, int_val2, (void*)arg);
6597
+ break;
6598
+#endif // endif
6599
+
6600
+#ifdef DHD_HP2P
6601
+ case IOV_SVAL(IOV_HP2P_ENABLE):
6602
+ dhd_prot_hp2p_enable(bus->dhd, TRUE, int_val);
6603
+ break;
6604
+
6605
+ case IOV_GVAL(IOV_HP2P_ENABLE):
6606
+ int_val = dhd_prot_hp2p_enable(bus->dhd, FALSE, int_val);
6607
+ bcopy(&int_val, arg, val_size);
6608
+ break;
6609
+
6610
+ case IOV_SVAL(IOV_HP2P_PKT_THRESHOLD):
6611
+ dhd_prot_pkt_threshold(bus->dhd, TRUE, int_val);
6612
+ break;
6613
+
6614
+ case IOV_GVAL(IOV_HP2P_PKT_THRESHOLD):
6615
+ int_val = dhd_prot_pkt_threshold(bus->dhd, FALSE, int_val);
6616
+ bcopy(&int_val, arg, val_size);
6617
+ break;
6618
+
6619
+ case IOV_SVAL(IOV_HP2P_TIME_THRESHOLD):
6620
+ dhd_prot_time_threshold(bus->dhd, TRUE, int_val);
6621
+ break;
6622
+
6623
+ case IOV_GVAL(IOV_HP2P_TIME_THRESHOLD):
6624
+ int_val = dhd_prot_time_threshold(bus->dhd, FALSE, int_val);
6625
+ bcopy(&int_val, arg, val_size);
6626
+ break;
6627
+
6628
+ case IOV_SVAL(IOV_HP2P_PKT_EXPIRY):
6629
+ dhd_prot_pkt_expiry(bus->dhd, TRUE, int_val);
6630
+ break;
6631
+
6632
+ case IOV_GVAL(IOV_HP2P_PKT_EXPIRY):
6633
+ int_val = dhd_prot_pkt_expiry(bus->dhd, FALSE, int_val);
6634
+ bcopy(&int_val, arg, val_size);
6635
+ break;
6636
+ case IOV_SVAL(IOV_HP2P_TXCPL_MAXITEMS):
6637
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
6638
+ return BCME_NOTDOWN;
6639
+ }
6640
+ dhd_bus_set_hp2p_ring_max_size(bus, TRUE, int_val);
6641
+ break;
6642
+
6643
+ case IOV_GVAL(IOV_HP2P_TXCPL_MAXITEMS):
6644
+ int_val = dhd_bus_get_hp2p_ring_max_size(bus, TRUE);
6645
+ bcopy(&int_val, arg, val_size);
6646
+ break;
6647
+ case IOV_SVAL(IOV_HP2P_RXCPL_MAXITEMS):
6648
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
6649
+ return BCME_NOTDOWN;
6650
+ }
6651
+ dhd_bus_set_hp2p_ring_max_size(bus, FALSE, int_val);
6652
+ break;
6653
+
6654
+ case IOV_GVAL(IOV_HP2P_RXCPL_MAXITEMS):
6655
+ int_val = dhd_bus_get_hp2p_ring_max_size(bus, FALSE);
6656
+ bcopy(&int_val, arg, val_size);
6657
+ break;
6658
+#endif /* DHD_HP2P */
6659
+ case IOV_SVAL(IOV_EXTDTXS_IN_TXCPL):
6660
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
6661
+ return BCME_NOTDOWN;
6662
+ }
6663
+ if (int_val)
6664
+ bus->dhd->extdtxs_in_txcpl = TRUE;
6665
+ else
6666
+ bus->dhd->extdtxs_in_txcpl = FALSE;
6667
+ break;
6668
+
6669
+ case IOV_GVAL(IOV_EXTDTXS_IN_TXCPL):
6670
+ int_val = bus->dhd->extdtxs_in_txcpl;
6671
+ bcopy(&int_val, arg, val_size);
6672
+ break;
6673
+
6674
+ case IOV_SVAL(IOV_HOSTRDY_AFTER_INIT):
6675
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
6676
+ return BCME_NOTDOWN;
6677
+ }
6678
+ if (int_val)
6679
+ bus->dhd->hostrdy_after_init = TRUE;
6680
+ else
6681
+ bus->dhd->hostrdy_after_init = FALSE;
6682
+ break;
6683
+
6684
+ case IOV_GVAL(IOV_HOSTRDY_AFTER_INIT):
6685
+ int_val = bus->dhd->hostrdy_after_init;
36296686 bcopy(&int_val, arg, val_size);
36306687 break;
36316688
....@@ -3658,12 +6715,107 @@
36586715 return 0;
36596716 }
36606717
6718
+void
6719
+dhd_bus_dump_dar_registers(struct dhd_bus *bus)
6720
+{
6721
+ uint32 dar_clk_ctrl_val, dar_pwr_ctrl_val, dar_intstat_val,
6722
+ dar_errlog_val, dar_erraddr_val, dar_pcie_mbint_val;
6723
+ uint32 dar_clk_ctrl_reg, dar_pwr_ctrl_reg, dar_intstat_reg,
6724
+ dar_errlog_reg, dar_erraddr_reg, dar_pcie_mbint_reg;
6725
+
6726
+ if (bus->is_linkdown && !bus->cto_triggered) {
6727
+ DHD_ERROR(("%s: link is down\n", __FUNCTION__));
6728
+ return;
6729
+ }
6730
+
6731
+ dar_clk_ctrl_reg = (uint32)DAR_CLK_CTRL(bus->sih->buscorerev);
6732
+ dar_pwr_ctrl_reg = (uint32)DAR_PCIE_PWR_CTRL(bus->sih->buscorerev);
6733
+ dar_intstat_reg = (uint32)DAR_INTSTAT(bus->sih->buscorerev);
6734
+ dar_errlog_reg = (uint32)DAR_ERRLOG(bus->sih->buscorerev);
6735
+ dar_erraddr_reg = (uint32)DAR_ERRADDR(bus->sih->buscorerev);
6736
+ dar_pcie_mbint_reg = (uint32)DAR_PCIMailBoxInt(bus->sih->buscorerev);
6737
+
6738
+ if (bus->sih->buscorerev < 24) {
6739
+ DHD_ERROR(("%s: DAR not supported for corerev(%d) < 24\n",
6740
+ __FUNCTION__, bus->sih->buscorerev));
6741
+ return;
6742
+ }
6743
+
6744
+ dar_clk_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_clk_ctrl_reg, 0, 0);
6745
+ dar_pwr_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pwr_ctrl_reg, 0, 0);
6746
+ dar_intstat_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_intstat_reg, 0, 0);
6747
+ dar_errlog_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_errlog_reg, 0, 0);
6748
+ dar_erraddr_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_erraddr_reg, 0, 0);
6749
+ dar_pcie_mbint_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pcie_mbint_reg, 0, 0);
6750
+
6751
+ DHD_ERROR(("%s: dar_clk_ctrl(0x%x:0x%x) dar_pwr_ctrl(0x%x:0x%x) dar_intstat(0x%x:0x%x)\n",
6752
+ __FUNCTION__, dar_clk_ctrl_reg, dar_clk_ctrl_val,
6753
+ dar_pwr_ctrl_reg, dar_pwr_ctrl_val, dar_intstat_reg, dar_intstat_val));
6754
+
6755
+ DHD_ERROR(("%s: dar_errlog(0x%x:0x%x) dar_erraddr(0x%x:0x%x) dar_pcie_mbint(0x%x:0x%x)\n",
6756
+ __FUNCTION__, dar_errlog_reg, dar_errlog_val,
6757
+ dar_erraddr_reg, dar_erraddr_val, dar_pcie_mbint_reg, dar_pcie_mbint_val));
6758
+}
6759
+
6760
+/* Ring DoorBell1 to indicate Hostready i.e. D3 Exit */
6761
+void
6762
+dhd_bus_hostready(struct dhd_bus *bus)
6763
+{
6764
+ if (!bus->dhd->d2h_hostrdy_supported) {
6765
+ return;
6766
+ }
6767
+
6768
+ if (bus->is_linkdown) {
6769
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
6770
+ return;
6771
+ }
6772
+
6773
+ DHD_ERROR(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__,
6774
+ dhd_pcie_config_read(bus->osh, PCI_CFG_CMD, sizeof(uint32))));
6775
+
6776
+ if (DAR_PWRREQ(bus)) {
6777
+ dhd_bus_pcie_pwr_req(bus);
6778
+ }
6779
+
6780
+ dhd_bus_dump_dar_registers(bus);
6781
+
6782
+ si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus), ~0, 0x12345678);
6783
+ bus->hostready_count ++;
6784
+ DHD_ERROR(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count));
6785
+}
6786
+
6787
+/* Clear INTSTATUS */
6788
+void
6789
+dhdpcie_bus_clear_intstatus(struct dhd_bus *bus)
6790
+{
6791
+ uint32 intstatus = 0;
6792
+ if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
6793
+ (bus->sih->buscorerev == 2)) {
6794
+ intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
6795
+ dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
6796
+ } else {
6797
+ /* this is a PCIE core register..not a config register... */
6798
+ intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0);
6799
+ si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask,
6800
+ intstatus);
6801
+ }
6802
+}
6803
+
36616804 int
6805
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
6806
+dhdpcie_bus_suspend(struct dhd_bus *bus, bool state, bool byint)
6807
+#else
36626808 dhdpcie_bus_suspend(struct dhd_bus *bus, bool state)
6809
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
36636810 {
36646811 int timeleft;
3665
- unsigned long flags;
36666812 int rc = 0;
6813
+ unsigned long flags, flags_bus;
6814
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
6815
+ int d3_read_retry = 0;
6816
+ uint32 d2h_mb_data = 0;
6817
+ uint32 zero = 0;
6818
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
36676819
36686820 if (bus->dhd == NULL) {
36696821 DHD_ERROR(("bus not inited\n"));
....@@ -3673,9 +6825,14 @@
36736825 DHD_ERROR(("prot is not inited\n"));
36746826 return BCME_ERROR;
36756827 }
6828
+
6829
+ if (dhd_query_bus_erros(bus->dhd)) {
6830
+ return BCME_ERROR;
6831
+ }
6832
+
36766833 DHD_GENERAL_LOCK(bus->dhd, flags);
3677
- if (bus->dhd->busstate != DHD_BUS_DATA && bus->dhd->busstate != DHD_BUS_SUSPEND) {
3678
- DHD_ERROR(("not in a readystate to LPBK is not inited\n"));
6834
+ if (!(bus->dhd->busstate == DHD_BUS_DATA || bus->dhd->busstate == DHD_BUS_SUSPEND)) {
6835
+ DHD_ERROR(("not in a readystate\n"));
36796836 DHD_GENERAL_UNLOCK(bus->dhd, flags);
36806837 return BCME_ERROR;
36816838 }
....@@ -3685,55 +6842,142 @@
36856842 return -EIO;
36866843 }
36876844
3688
- if (bus->suspended == state) { /* Set to same state */
6845
+ /* Check whether we are already in the requested state.
6846
+ * state=TRUE means Suspend
6847
+ * state=FALSE meanse Resume
6848
+ */
6849
+ if (state == TRUE && bus->dhd->busstate == DHD_BUS_SUSPEND) {
36896850 DHD_ERROR(("Bus is already in SUSPEND state.\n"));
6851
+ return BCME_OK;
6852
+ } else if (state == FALSE && bus->dhd->busstate == DHD_BUS_DATA) {
6853
+ DHD_ERROR(("Bus is already in RESUME state.\n"));
36906854 return BCME_OK;
36916855 }
36926856
36936857 if (state) {
6858
+#ifdef OEM_ANDROID
36946859 int idle_retry = 0;
36956860 int active;
6861
+#endif /* OEM_ANDROID */
6862
+
6863
+ if (bus->is_linkdown) {
6864
+ DHD_ERROR(("%s: PCIe link was down, state=%d\n",
6865
+ __FUNCTION__, state));
6866
+ return BCME_ERROR;
6867
+ }
36966868
36976869 /* Suspend */
36986870 DHD_ERROR(("%s: Entering suspend state\n", __FUNCTION__));
3699
- bus->wait_for_d3_ack = 0;
3700
- bus->suspended = TRUE;
6871
+
6872
+ bus->dhd->dhd_watchdog_ms_backup = dhd_watchdog_ms;
6873
+ if (bus->dhd->dhd_watchdog_ms_backup) {
6874
+ DHD_ERROR(("%s: Disabling wdtick before going to suspend\n",
6875
+ __FUNCTION__));
6876
+ dhd_os_wd_timer(bus->dhd, 0);
6877
+ }
6878
+
37016879 DHD_GENERAL_LOCK(bus->dhd, flags);
3702
- bus->dhd->busstate = DHD_BUS_SUSPEND;
3703
- if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_IN_TX) {
6880
+ if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) {
37046881 DHD_ERROR(("Tx Request is not ended\n"));
37056882 bus->dhd->busstate = DHD_BUS_DATA;
37066883 DHD_GENERAL_UNLOCK(bus->dhd, flags);
3707
- bus->suspended = FALSE;
37086884 return -EBUSY;
37096885 }
3710
- bus->dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SUSPEND;
3711
- DHD_GENERAL_UNLOCK(bus->dhd, flags);
3712
- DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
3713
- dhd_os_set_ioctl_resp_timeout(D3_ACK_RESP_TIMEOUT);
3714
- dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
3715
- timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
3716
- dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
3717
- DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
37186886
6887
+ bus->last_suspend_start_time = OSL_LOCALTIME_NS();
6888
+
6889
+ /* stop all interface network queue. */
6890
+ dhd_bus_stop_queue(bus);
6891
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
6892
+
6893
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
6894
+ if (byint) {
6895
+ DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
6896
+ /* Clear wait_for_d3_ack before sending D3_INFORM */
6897
+ bus->wait_for_d3_ack = 0;
6898
+ dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
6899
+
6900
+ timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
6901
+ DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
6902
+ } else {
6903
+ /* Clear wait_for_d3_ack before sending D3_INFORM */
6904
+ bus->wait_for_d3_ack = 0;
6905
+ dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM | H2D_HOST_ACK_NOINT);
6906
+ while (!bus->wait_for_d3_ack && d3_read_retry < MAX_D3_ACK_TIMEOUT) {
6907
+ dhdpcie_handle_mb_data(bus);
6908
+ usleep_range(1000, 1500);
6909
+ d3_read_retry++;
6910
+ }
6911
+ }
6912
+#else
6913
+ DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
6914
+ /* Clear wait_for_d3_ack before sending D3_INFORM */
6915
+ bus->wait_for_d3_ack = 0;
6916
+ /*
6917
+ * Send H2D_HOST_D3_INFORM to dongle and mark bus->bus_low_power_state
6918
+ * to DHD_BUS_D3_INFORM_SENT in dhd_prot_ring_write_complete_mbdata
6919
+ * inside atomic context, so that no more DBs will be
6920
+ * rung after sending D3_INFORM
6921
+ */
6922
+ dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
6923
+
6924
+ /* Wait for D3 ACK for D3_ACK_RESP_TIMEOUT seconds */
6925
+
6926
+ timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
6927
+
6928
+#ifdef DHD_RECOVER_TIMEOUT
6929
+ if (bus->wait_for_d3_ack == 0) {
6930
+ /* If wait_for_d3_ack was not updated because D2H MB was not received */
6931
+ uint32 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
6932
+ bus->pcie_mailbox_int, 0, 0);
6933
+ int host_irq_disabled = dhdpcie_irq_disabled(bus);
6934
+ if ((intstatus) && (intstatus != (uint32)-1) &&
6935
+ (timeleft == 0) && (!dhd_query_bus_erros(bus->dhd))) {
6936
+ DHD_ERROR(("%s: D3 ACK trying again intstatus=%x"
6937
+ " host_irq_disabled=%d\n",
6938
+ __FUNCTION__, intstatus, host_irq_disabled));
6939
+ dhd_pcie_intr_count_dump(bus->dhd);
6940
+ dhd_print_tasklet_status(bus->dhd);
6941
+ if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 &&
6942
+ !bus->use_mailbox) {
6943
+ dhd_prot_process_ctrlbuf(bus->dhd);
6944
+ } else {
6945
+ dhdpcie_handle_mb_data(bus);
6946
+ }
6947
+ timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
6948
+ /* Clear Interrupts */
6949
+ dhdpcie_bus_clear_intstatus(bus);
6950
+ }
6951
+ } /* bus->wait_for_d3_ack was 0 */
6952
+#endif /* DHD_RECOVER_TIMEOUT */
6953
+
6954
+ DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
6955
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
6956
+
6957
+#ifdef OEM_ANDROID
37196958 /* To allow threads that got pre-empted to complete.
37206959 */
37216960 while ((active = dhd_os_check_wakelock_all(bus->dhd)) &&
37226961 (idle_retry < MAX_WKLK_IDLE_CHECK)) {
3723
- msleep(1);
6962
+ OSL_SLEEP(1);
37246963 idle_retry++;
37256964 }
6965
+#endif /* OEM_ANDROID */
37266966
37276967 if (bus->wait_for_d3_ack) {
37286968 DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__));
3729
-#if defined(BCMPCIE_OOB_HOST_WAKE)
3730
- dhdpcie_oob_intr_set(bus, TRUE);
3731
-#endif /* BCMPCIE_OOB_HOST_WAKE */
3732
-
37336969 /* Got D3 Ack. Suspend the bus */
6970
+#ifdef OEM_ANDROID
37346971 if (active) {
37356972 DHD_ERROR(("%s():Suspend failed because of wakelock"
37366973 "restoring Dongle to D0\n", __FUNCTION__));
6974
+
6975
+ if (bus->dhd->dhd_watchdog_ms_backup) {
6976
+ DHD_ERROR(("%s: Enabling wdtick due to wakelock active\n",
6977
+ __FUNCTION__));
6978
+ dhd_os_wd_timer(bus->dhd,
6979
+ bus->dhd->dhd_watchdog_ms_backup);
6980
+ }
37376981
37386982 /*
37396983 * Dongle still thinks that it has to be in D3 state until
....@@ -3745,81 +6989,289 @@
37456989 * would be sent as a MB interrupt to bring it out of D3 Ack
37466990 * state to D0 state. So we have to send both this message.
37476991 */
3748
- DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
3749
- dhdpcie_send_mb_data(bus,
3750
- (H2D_HOST_D0_INFORM_IN_USE|H2D_HOST_D0_INFORM));
3751
- DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
37526992
3753
- bus->suspended = FALSE;
6993
+ /* Clear wait_for_d3_ack to send D0_INFORM or host_ready */
6994
+ bus->wait_for_d3_ack = 0;
6995
+
6996
+ DHD_BUS_LOCK(bus->bus_lock, flags_bus);
6997
+ bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
6998
+ /* Enable back the intmask which was cleared in DPC
6999
+ * after getting D3_ACK.
7000
+ */
7001
+ bus->resume_intr_enable_count++;
7002
+
7003
+ /* For Linux, Macos etc (otherthan NDIS) enable back the dongle
7004
+ * interrupts using intmask and host interrupts
7005
+ * which were disabled in the dhdpcie_bus_isr()->
7006
+ * dhd_bus_handle_d3_ack().
7007
+ */
7008
+ /* Enable back interrupt using Intmask!! */
7009
+ dhdpcie_bus_intr_enable(bus);
7010
+ /* Enable back interrupt from Host side!! */
7011
+ dhdpcie_enable_irq(bus);
7012
+
7013
+ DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
7014
+
7015
+ if (bus->use_d0_inform) {
7016
+ DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
7017
+ dhdpcie_send_mb_data(bus,
7018
+ (H2D_HOST_D0_INFORM_IN_USE | H2D_HOST_D0_INFORM));
7019
+ DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
7020
+ }
7021
+ /* ring doorbell 1 (hostready) */
7022
+ dhd_bus_hostready(bus);
7023
+
37547024 DHD_GENERAL_LOCK(bus->dhd, flags);
37557025 bus->dhd->busstate = DHD_BUS_DATA;
7026
+ /* resume all interface network queue. */
7027
+ dhd_bus_start_queue(bus);
37567028 DHD_GENERAL_UNLOCK(bus->dhd, flags);
37577029 rc = BCME_ERROR;
37587030 } else {
3759
- DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
3760
- dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE));
3761
- DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
3762
- dhdpcie_bus_intr_disable(bus);
7031
+ /* Actual Suspend after no wakelock */
7032
+#endif /* OEM_ANDROID */
7033
+ /* At this time bus->bus_low_power_state will be
7034
+ * made to DHD_BUS_D3_ACK_RECIEVED after recieving D3_ACK
7035
+ * in dhd_bus_handle_d3_ack()
7036
+ */
7037
+ if (bus->use_d0_inform &&
7038
+ (bus->api.fw_rev < PCIE_SHARED_VERSION_6)) {
7039
+ DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
7040
+ dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE));
7041
+ DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
7042
+ }
7043
+
7044
+#if defined(BCMPCIE_OOB_HOST_WAKE)
7045
+ if (bus->dhd->dhd_induce_error == DHD_INDUCE_DROP_OOB_IRQ) {
7046
+ DHD_ERROR(("%s: Inducing DROP OOB IRQ\n", __FUNCTION__));
7047
+ } else {
7048
+ dhdpcie_oob_intr_set(bus, TRUE);
7049
+ }
7050
+#endif /* BCMPCIE_OOB_HOST_WAKE */
7051
+
7052
+ DHD_GENERAL_LOCK(bus->dhd, flags);
7053
+ /* The Host cannot process interrupts now so disable the same.
7054
+ * No need to disable the dongle INTR using intmask, as we are
7055
+ * already calling disabling INTRs from DPC context after
7056
+ * getting D3_ACK in dhd_bus_handle_d3_ack.
7057
+ * Code may not look symmetric between Suspend and
7058
+ * Resume paths but this is done to close down the timing window
7059
+ * between DPC and suspend context and bus->bus_low_power_state
7060
+ * will be set to DHD_BUS_D3_ACK_RECIEVED in DPC.
7061
+ */
7062
+ bus->dhd->d3ackcnt_timeout = 0;
7063
+ bus->dhd->busstate = DHD_BUS_SUSPEND;
7064
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
7065
+ dhdpcie_dump_resource(bus);
7066
+ /* Handle Host Suspend */
37637067 rc = dhdpcie_pci_suspend_resume(bus, state);
3764
- dhd_bus_set_device_wake(bus, FALSE);
7068
+ if (!rc) {
7069
+ bus->last_suspend_end_time = OSL_LOCALTIME_NS();
7070
+ }
7071
+#ifdef OEM_ANDROID
37657072 }
3766
- bus->dhd->d3ackcnt_timeout = 0;
3767
- } else if (timeleft == 0) {
7073
+#endif /* OEM_ANDROID */
7074
+ } else if (timeleft == 0) { /* D3 ACK Timeout */
7075
+#ifdef DHD_FW_COREDUMP
7076
+ uint32 cur_memdump_mode = bus->dhd->memdump_enabled;
7077
+#endif /* DHD_FW_COREDUMP */
7078
+
7079
+ /* check if the D3 ACK timeout due to scheduling issue */
7080
+ bus->dhd->is_sched_error = !dhd_query_bus_erros(bus->dhd) &&
7081
+ bus->isr_entry_time > bus->last_d3_inform_time &&
7082
+ dhd_bus_query_dpc_sched_errors(bus->dhd);
7083
+ bus->dhd->d3ack_timeout_occured = TRUE;
7084
+ /* If the D3 Ack has timeout */
37687085 bus->dhd->d3ackcnt_timeout++;
3769
- DHD_ERROR(("%s: resumed on timeout for D3 ACK d3_inform_cnt %d \n",
3770
- __FUNCTION__, bus->dhd->d3ackcnt_timeout));
3771
- bus->suspended = FALSE;
7086
+ DHD_ERROR(("%s: resumed on timeout for D3 ACK%s d3_inform_cnt %d\n",
7087
+ __FUNCTION__, bus->dhd->is_sched_error ?
7088
+ " due to scheduling problem" : "", bus->dhd->d3ackcnt_timeout));
7089
+#if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
7090
+ if (bus->dhd->is_sched_error && cur_memdump_mode == DUMP_MEMFILE_BUGON) {
7091
+ /* change g_assert_type to trigger Kernel panic */
7092
+ g_assert_type = 2;
7093
+ /* use ASSERT() to trigger panic */
7094
+ ASSERT(0);
7095
+ }
7096
+#endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
7097
+ DHD_BUS_LOCK(bus->bus_lock, flags_bus);
7098
+ bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
7099
+ DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
37727100 DHD_GENERAL_LOCK(bus->dhd, flags);
37737101 bus->dhd->busstate = DHD_BUS_DATA;
7102
+ /* resume all interface network queue. */
7103
+ dhd_bus_start_queue(bus);
37747104 DHD_GENERAL_UNLOCK(bus->dhd, flags);
3775
- if (bus->dhd->d3ackcnt_timeout >= MAX_CNTL_D3ACK_TIMEOUT) {
3776
- DHD_ERROR(("%s: Event HANG send up "
3777
- "due to PCIe linkdown\n", __FUNCTION__));
7105
+ if (!bus->dhd->dongle_trap_occured &&
7106
+ !bus->is_linkdown &&
7107
+ !bus->cto_triggered) {
7108
+ uint32 intstatus = 0;
7109
+
7110
+ /* Check if PCIe bus status is valid */
7111
+ intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
7112
+ bus->pcie_mailbox_int, 0, 0);
7113
+ if (intstatus == (uint32)-1) {
7114
+ /* Invalidate PCIe bus status */
7115
+ bus->is_linkdown = 1;
7116
+ }
7117
+
7118
+ dhd_bus_dump_console_buffer(bus);
7119
+ dhd_prot_debug_info_print(bus->dhd);
7120
+#ifdef DHD_FW_COREDUMP
7121
+ if (cur_memdump_mode) {
7122
+ /* write core dump to file */
7123
+ bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT;
7124
+ dhdpcie_mem_dump(bus);
7125
+ }
7126
+#endif /* DHD_FW_COREDUMP */
7127
+
7128
+#ifdef OEM_ANDROID
7129
+ DHD_ERROR(("%s: Event HANG send up due to D3_ACK timeout\n",
7130
+ __FUNCTION__));
37787131 #ifdef SUPPORT_LINKDOWN_RECOVERY
37797132 #ifdef CONFIG_ARCH_MSM
3780
- bus->islinkdown = 1;
7133
+ bus->no_cfg_restore = 1;
37817134 #endif /* CONFIG_ARCH_MSM */
37827135 #endif /* SUPPORT_LINKDOWN_RECOVERY */
37837136 dhd_os_check_hang(bus->dhd, 0, -ETIMEDOUT);
7137
+#endif /* OEM_ANDROID */
37847138 }
7139
+#if defined(DHD_ERPOM)
7140
+ dhd_schedule_reset(bus->dhd);
7141
+#endif // endif
37857142 rc = -ETIMEDOUT;
3786
-
37877143 }
3788
- bus->wait_for_d3_ack = 1;
3789
- DHD_GENERAL_LOCK(bus->dhd, flags);
3790
- bus->dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SUSPEND;
3791
- dhd_os_busbusy_wake(bus->dhd);
3792
- DHD_GENERAL_UNLOCK(bus->dhd, flags);
37937144 } else {
37947145 /* Resume */
7146
+ DHD_ERROR(("%s: Entering resume state\n", __FUNCTION__));
7147
+ bus->last_resume_start_time = OSL_LOCALTIME_NS();
7148
+
7149
+ /**
7150
+ * PCIE2_BAR0_CORE2_WIN gets reset after D3 cold.
7151
+ * si_backplane_access(function to read/write backplane)
7152
+ * updates the window(PCIE2_BAR0_CORE2_WIN) only if
7153
+ * window being accessed is different form the window
7154
+ * being pointed by second_bar0win.
7155
+ * Since PCIE2_BAR0_CORE2_WIN is already reset because of D3 cold,
7156
+ * invalidating second_bar0win after resume updates
7157
+ * PCIE2_BAR0_CORE2_WIN with right window.
7158
+ */
7159
+ si_invalidate_second_bar0win(bus->sih);
7160
+#if defined(OEM_ANDROID)
37957161 #if defined(BCMPCIE_OOB_HOST_WAKE)
37967162 DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd);
37977163 #endif /* BCMPCIE_OOB_HOST_WAKE */
3798
- DHD_GENERAL_LOCK(bus->dhd, flags);
3799
- bus->dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_RESUME;
3800
- DHD_GENERAL_UNLOCK(bus->dhd, flags);
7164
+#endif /* linux && OEM_ANDROID */
38017165 rc = dhdpcie_pci_suspend_resume(bus, state);
3802
- if (bus->dhd->busstate == DHD_BUS_SUSPEND) {
3803
- DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
3804
- dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM));
3805
- DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
3806
- dhd_bus_set_device_wake(bus, TRUE);
7166
+ dhdpcie_dump_resource(bus);
7167
+
7168
+ DHD_BUS_LOCK(bus->bus_lock, flags_bus);
7169
+ /* set bus_low_power_state to DHD_BUS_NO_LOW_POWER_STATE */
7170
+ bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
7171
+ DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
7172
+
7173
+ if (!rc && bus->dhd->busstate == DHD_BUS_SUSPEND) {
7174
+ if (bus->use_d0_inform) {
7175
+ DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
7176
+ dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM));
7177
+ DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
7178
+ }
7179
+ /* ring doorbell 1 (hostready) */
7180
+ dhd_bus_hostready(bus);
38077181 }
3808
- bus->suspended = FALSE;
38097182 DHD_GENERAL_LOCK(bus->dhd, flags);
38107183 bus->dhd->busstate = DHD_BUS_DATA;
3811
- bus->dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_RESUME;
3812
- dhd_os_busbusy_wake(bus->dhd);
7184
+#ifdef DHD_PCIE_RUNTIMEPM
7185
+ if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus->dhd)) {
7186
+ bus->bus_wake = 1;
7187
+ OSL_SMP_WMB();
7188
+ wake_up_interruptible(&bus->rpm_queue);
7189
+ }
7190
+#endif /* DHD_PCIE_RUNTIMEPM */
7191
+ /* resume all interface network queue. */
7192
+ dhd_bus_start_queue(bus);
7193
+
7194
+ /* TODO: for NDIS also we need to use enable_irq in future */
7195
+ bus->resume_intr_enable_count++;
7196
+
7197
+ /* For Linux, Macos etc (otherthan NDIS) enable back the dongle interrupts
7198
+ * using intmask and host interrupts
7199
+ * which were disabled in the dhdpcie_bus_isr()->dhd_bus_handle_d3_ack().
7200
+ */
7201
+ dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */
7202
+ dhdpcie_enable_irq(bus); /* Enable back interrupt from Host side!! */
7203
+
38137204 DHD_GENERAL_UNLOCK(bus->dhd, flags);
3814
- dhdpcie_bus_intr_enable(bus);
7205
+
7206
+ if (bus->dhd->dhd_watchdog_ms_backup) {
7207
+ DHD_ERROR(("%s: Enabling wdtick after resume\n",
7208
+ __FUNCTION__));
7209
+ dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
7210
+ }
7211
+
7212
+ bus->last_resume_end_time = OSL_LOCALTIME_NS();
7213
+ /* Update TCM rd index for EDL ring */
7214
+ DHD_EDL_RING_TCM_RD_UPDATE(bus->dhd);
38157215 }
38167216 return rc;
38177217 }
38187218
7219
+uint32
7220
+dhdpcie_force_alp(struct dhd_bus *bus, bool enable)
7221
+{
7222
+ ASSERT(bus && bus->sih);
7223
+ if (enable) {
7224
+ si_corereg(bus->sih, bus->sih->buscoreidx,
7225
+ OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, CCS_FORCEALP);
7226
+ } else {
7227
+ si_corereg(bus->sih, bus->sih->buscoreidx,
7228
+ OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, 0);
7229
+ }
7230
+ return 0;
7231
+}
7232
+
7233
+/* set pcie l1 entry time: dhd pciereg 0x1004[22:16] */
7234
+uint32
7235
+dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int l1_entry_time)
7236
+{
7237
+ uint reg_val;
7238
+
7239
+ ASSERT(bus && bus->sih);
7240
+
7241
+ si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
7242
+ 0x1004);
7243
+ reg_val = si_corereg(bus->sih, bus->sih->buscoreidx,
7244
+ OFFSETOF(sbpcieregs_t, configdata), 0, 0);
7245
+ reg_val = (reg_val & ~(0x7f << 16)) | ((l1_entry_time & 0x7f) << 16);
7246
+ si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0,
7247
+ reg_val);
7248
+
7249
+ return 0;
7250
+}
7251
+
7252
+static uint32
7253
+dhd_apply_d11_war_length(struct dhd_bus *bus, uint32 len, uint32 d11_lpbk)
7254
+{
7255
+ uint16 chipid = si_chipid(bus->sih);
7256
+ if ((chipid == BCM4375_CHIP_ID ||
7257
+ chipid == BCM4362_CHIP_ID ||
7258
+ chipid == BCM43751_CHIP_ID ||
7259
+ chipid == BCM4377_CHIP_ID) &&
7260
+ (d11_lpbk != M2M_DMA_LPBK && d11_lpbk != M2M_NON_DMA_LPBK)) {
7261
+ len += 8;
7262
+ }
7263
+ DHD_ERROR(("%s: len %d\n", __FUNCTION__, len));
7264
+ return len;
7265
+}
7266
+
38197267 /** Transfers bytes from host to dongle and to host again using DMA */
38207268 static int
3821
-dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus, uint32 len, uint32 srcdelay, uint32 destdelay)
7269
+dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus,
7270
+ uint32 len, uint32 srcdelay, uint32 destdelay,
7271
+ uint32 d11_lpbk, uint32 core_num, uint32 wait)
38227272 {
7273
+ int ret = 0;
7274
+
38237275 if (bus->dhd == NULL) {
38247276 DHD_ERROR(("bus not inited\n"));
38257277 return BCME_ERROR;
....@@ -3837,31 +7289,73 @@
38377289 DHD_ERROR(("len is too small or too large\n"));
38387290 return BCME_ERROR;
38397291 }
3840
- return dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay);
7292
+
7293
+ len = dhd_apply_d11_war_length(bus, len, d11_lpbk);
7294
+
7295
+ bus->dmaxfer_complete = FALSE;
7296
+ ret = dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay,
7297
+ d11_lpbk, core_num);
7298
+ if (ret != BCME_OK || !wait) {
7299
+ DHD_INFO(("%s: dmaxfer req returns status %u; wait = %u\n", __FUNCTION__,
7300
+ ret, wait));
7301
+ } else {
7302
+ ret = dhd_os_dmaxfer_wait(bus->dhd, &bus->dmaxfer_complete);
7303
+ if (ret < 0)
7304
+ ret = BCME_NOTREADY;
7305
+ }
7306
+
7307
+ return ret;
7308
+
38417309 }
38427310
7311
+bool
7312
+dhd_bus_is_multibp_capable(struct dhd_bus *bus)
7313
+{
7314
+ return MULTIBP_CAP(bus->sih);
7315
+}
38437316
7317
+#define PCIE_REV_FOR_4378A0 66 /* dhd_bus_perform_flr_with_quiesce() causes problems */
7318
+#define PCIE_REV_FOR_4378B0 68
38447319
38457320 static int
38467321 dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter)
38477322 {
38487323 int bcmerror = 0;
3849
- uint32 *cr4_regs;
7324
+ volatile uint32 *cr4_regs;
7325
+ bool do_flr;
7326
+ hs_addrs_t bl_hs_addrs = {NULL, NULL};
7327
+
7328
+ if (bus->sih->chip == CYW55560_CHIP_ID) {
7329
+ /* Host bootloader handshake TCM/REGS addresses init */
7330
+ bcmerror = dhdpcie_dongle_host_get_handshake_address(bus->sih, bus->osh,
7331
+ &bl_hs_addrs);
7332
+ if (bcmerror) {
7333
+ DHD_ERROR(("%s: REGS/TCM addresses not initialized\n", __FUNCTION__));
7334
+ goto fail;
7335
+ }
7336
+ }
38507337
38517338 if (!bus->sih) {
38527339 DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__));
38537340 return BCME_ERROR;
38547341 }
7342
+
7343
+ do_flr = ((bus->sih->buscorerev != PCIE_REV_FOR_4378A0) &&
7344
+ (bus->sih->buscorerev != PCIE_REV_FOR_4378B0));
7345
+
7346
+ if (MULTIBP_ENAB(bus->sih) && !do_flr) {
7347
+ dhd_bus_pcie_pwr_req(bus);
7348
+ }
7349
+
38557350 /* To enter download state, disable ARM and reset SOCRAM.
38567351 * To exit download state, simply reset ARM (default is RAM boot).
38577352 */
38587353 if (enter) {
3859
- /* To handle failures due to unmanaged perst scenarios,
3860
- * introducing back plane reset before FW download.
3861
- */
3862
- pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs));
7354
+#ifndef BCMQT /* for performance reasons, skip the FLR for QT */
7355
+#endif /* !BCMQT */
7356
+
38637357 /* Make sure BAR1 maps to backplane address 0 */
3864
- dhdpcie_bus_cfg_write_dword(bus, PCI_BAR1_WIN, 4, 0x00000000);
7358
+ dhdpcie_setbar1win(bus, 0x00000000);
38657359 bus->alp_only = TRUE;
38667360
38677361 /* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */
....@@ -3915,22 +7409,63 @@
39157409 * [done at else] Populate the reset vector
39167410 * [done at else] Remove ARM halt
39177411 */
3918
- /* Halt ARM & remove reset */
3919
- si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
3920
- if (BCM43602_CHIP(bus->sih->chip)) {
3921
- W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 5);
3922
- W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
3923
- W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 7);
3924
- W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
7412
+
7413
+ if (bus->sih->chip == CYW55560_CHIP_ID) {
7414
+
7415
+ /* Skip ARM halt and reset in case of 55560 */
7416
+
7417
+ /* Bootloader host pre handshake function */
7418
+ if ((bcmerror = dhdpcie_dongle_host_pre_handshake(bus->sih,
7419
+ bus->osh, &bl_hs_addrs))) {
7420
+ DHD_ERROR(("%s: error %d dongle host pre handshake\n",
7421
+ __FUNCTION__, bcmerror));
7422
+ goto fail;
7423
+ }
7424
+ DHD_ERROR(("%s: dongle host pre handshake successful, dl FW\n",
7425
+ __FUNCTION__));
7426
+
7427
+ /* Read PCIE shared structure here */
7428
+ /* This is necessary for console buffer initialization */
7429
+ if ((bcmerror = dhdpcie_readshared_console(bus)) < 0) {
7430
+ DHD_ERROR(("%s: Shared region not initialized\n",
7431
+ __FUNCTION__));
7432
+ }
7433
+
7434
+ /* Console buffer read - First pass */
7435
+ if ((bcmerror = dhdpcie_bus_readconsole(bus)) < 0) {
7436
+ DHD_ERROR(("%s: First pass console buffer read failed\n",
7437
+ __FUNCTION__));
7438
+ }
7439
+ } else {
7440
+ /* Halt ARM & remove reset */
7441
+ si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
7442
+ if (BCM43602_CHIP(bus->sih->chip)) {
7443
+ W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX,
7444
+ 5);
7445
+ W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA,
7446
+ 0);
7447
+ W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX,
7448
+ 7);
7449
+ W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA,
7450
+ 0);
7451
+ }
7452
+ /* reset last 4 bytes of RAM address. to be used for shared area */
7453
+ dhdpcie_init_shared_addr(bus);
39257454 }
3926
- /* reset last 4 bytes of RAM address. to be used for shared area */
3927
- dhdpcie_init_shared_addr(bus);
39287455 }
39297456 } else {
39307457 if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
39317458 /* write vars */
39327459 if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
39337460 DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
7461
+ goto fail;
7462
+ }
7463
+ /* write random numbers to sysmem for the purpose of
7464
+ * randomizing heap address space.
7465
+ */
7466
+ if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) {
7467
+ DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
7468
+ __FUNCTION__));
39347469 goto fail;
39357470 }
39367471 /* switch back to arm core again */
....@@ -3966,7 +7501,6 @@
39667501 goto fail;
39677502 }
39687503
3969
-
39707504 if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
39717505 !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
39727506 DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
....@@ -3986,9 +7520,51 @@
39867520 si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
39877521 }
39887522
7523
+ if (bus->sih->chip == CYW55560_CHIP_ID) {
7524
+ /* Console buffer read - Second pass */
7525
+ if ((bcmerror = dhdpcie_bus_readconsole(bus)) < 0) {
7526
+ DHD_ERROR(("%s: Second pass console buffer read failed\n",
7527
+ __FUNCTION__));
7528
+ }
7529
+
7530
+ /* FW and NVRAM download done notification to bootloader */
7531
+ if ((bcmerror = dhdpcie_dongle_host_post_handshake(bus->sih,
7532
+ bus->osh, &bl_hs_addrs))) {
7533
+ DHD_ERROR(("%s: error %d dongle host post handshake\n",
7534
+ __FUNCTION__, bcmerror));
7535
+ goto fail;
7536
+ }
7537
+ DHD_ERROR(("%s: FW download successful\n", __FUNCTION__));
7538
+
7539
+ /*
7540
+ * Check signature validation function
7541
+ * D2H_VALDN_DONE bit will be set in the following cases:
7542
+ * 1. Open mode: when a signature is not sent
7543
+ * 2. Secure mode: when a valid signature is sent
7544
+ * Write vars and nvram download only if the D2H_VALDN_DONE
7545
+ * bit has been set
7546
+ */
7547
+
7548
+ if ((bcmerror = dhdpcie_dongle_host_chk_validation(bus->sih,
7549
+ bus->osh, &bl_hs_addrs))) {
7550
+ DHD_ERROR(("%s: error %d dongle host validation\n",
7551
+ __FUNCTION__, bcmerror));
7552
+ goto fail;
7553
+ }
7554
+ }
7555
+
39897556 /* write vars */
39907557 if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
39917558 DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
7559
+ goto fail;
7560
+ }
7561
+
7562
+ /* write a random number to TCM for the purpose of
7563
+ * randomizing heap address space.
7564
+ */
7565
+ if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) {
7566
+ DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
7567
+ __FUNCTION__));
39927568 goto fail;
39937569 }
39947570
....@@ -4000,30 +7576,48 @@
40007576 }
40017577
40027578 /* write address 0 with reset instruction */
4003
- bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
4004
- (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
7579
+ if (bus->sih->chip != CYW55560_CHIP_ID) {
7580
+ bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
7581
+ (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
40057582
4006
- if (bcmerror == BCME_OK) {
4007
- uint32 tmp;
7583
+ if (bcmerror == BCME_OK) {
7584
+ uint32 tmp;
40087585
4009
- bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0,
4010
- (uint8 *)&tmp, sizeof(tmp));
7586
+ bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0,
7587
+ (uint8 *)&tmp, sizeof(tmp));
40117588
4012
- if (bcmerror == BCME_OK && tmp != bus->resetinstr) {
4013
- DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n",
4014
- __FUNCTION__, bus->resetinstr));
4015
- DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n",
4016
- __FUNCTION__, tmp));
4017
- bcmerror = BCME_ERROR;
4018
- goto fail;
7589
+ if (bcmerror == BCME_OK && tmp != bus->resetinstr) {
7590
+ DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n",
7591
+ __FUNCTION__, bus->resetinstr));
7592
+ DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n",
7593
+ __FUNCTION__, tmp));
7594
+ bcmerror = BCME_ERROR;
7595
+ goto fail;
7596
+ }
40197597 }
40207598 }
40217599
40227600 /* now remove reset and halt and continue to run CR4 */
40237601 }
40247602
4025
- si_core_reset(bus->sih, 0, 0);
7603
+ if (bus->sih->chip == CYW55560_CHIP_ID) {
7604
+ /* Console buffer read - Final pass */
7605
+ if ((bcmerror = dhdpcie_bus_readconsole(bus)) < 0) {
7606
+ DHD_ERROR(("%s: Final pass console buffer read failed\n",
7607
+ __FUNCTION__));
7608
+ }
40267609
7610
+ /* Set write_vars done bit to let BL jump to mainline FW */
7611
+ if ((bcmerror = dhdpcie_dongle_host_post_varswrite(bus, &bl_hs_addrs))) {
7612
+ DHD_ERROR(("%s: error %d dongle_host_post_varswrite\n",
7613
+ __FUNCTION__, bcmerror));
7614
+ goto fail;
7615
+ }
7616
+ DHD_ERROR(("%s VARS done bit set, BL can jump to mainline FW\n",
7617
+ __FUNCTION__));
7618
+ } else {
7619
+ si_core_reset(bus->sih, 0, 0);
7620
+ }
40277621 /* Allow HT Clock now that the ARM is running. */
40287622 bus->alp_only = FALSE;
40297623
....@@ -4031,11 +7625,336 @@
40317625 }
40327626
40337627 fail:
7628
+
7629
+ if (bcmerror) {
7630
+ if (bus->sih->chip == CYW55560_CHIP_ID) {
7631
+ /* Read the shared structure to determine console address */
7632
+ if (dhdpcie_readshared_console(bus) < 0) {
7633
+ DHD_ERROR(("%s: Shared region not initialized\n",
7634
+ __FUNCTION__));
7635
+ } else {
7636
+ /* Console buffer read */
7637
+ if (dhdpcie_bus_readconsole(bus) < 0) {
7638
+ DHD_ERROR(("%s: Failure case console buffer read failed\n",
7639
+ __FUNCTION__));
7640
+ }
7641
+ }
7642
+ }
7643
+ }
7644
+
40347645 /* Always return to PCIE core */
40357646 si_setcore(bus->sih, PCIE2_CORE_ID, 0);
40367647
7648
+ if (MULTIBP_ENAB(bus->sih) && !do_flr) {
7649
+ dhd_bus_pcie_pwr_req_clear(bus);
7650
+ }
7651
+
40377652 return bcmerror;
40387653 } /* dhdpcie_bus_download_state */
7654
+
7655
+static int
7656
+dhdpcie_dongle_host_get_handshake_address(si_t *sih, osl_t *osh, hs_addrs_t *addr)
7657
+{
7658
+ int bcmerror = BCME_OK;
7659
+
7660
+#ifndef HS_IN_TCM
7661
+ sbpcieregs_t *pcieregs;
7662
+
7663
+ pcieregs = si_setcore(sih, PCIE2_CORE_ID, 0);
7664
+ if (!pcieregs) {
7665
+ return BCME_ERROR;
7666
+ }
7667
+ addr->d2h = &pcieregs->u1.dar_64.d2h_msg_reg0;
7668
+ addr->h2d = &pcieregs->u1.dar_64.h2d_msg_reg0;
7669
+#else /* HS_IN_TCM */
7670
+ addr->d2h = (void *)HS_IN_TCM;
7671
+ addr->h2d = (volatile uint32 *)addr->d2h + 1;
7672
+#endif /* HS_IN_TCM */
7673
+
7674
+ return bcmerror;
7675
+} /* dhdpcie_dongle_host_get_handshake_address */
7676
+
7677
+static int
7678
+dhdpcie_handshake_msg_reg_write(si_t *sih, osl_t *osh, volatile void *addr, uint *buffer)
7679
+{
7680
+ int bcmerror = BCME_OK;
7681
+
7682
+#ifndef HS_IN_TCM
7683
+ si_setcore(sih, PCIE2_CORE_ID, 0);
7684
+ W_REG(osh, (volatile uint32 *)addr, *buffer);
7685
+#else
7686
+ bcmerror = si_backplane_access(sih, addr, 4, buffer, FALSE);
7687
+#endif // endif
7688
+ return bcmerror;
7689
+} /* dhdpcie_handshake_msg_reg_write */
7690
+
7691
+static int
7692
+dhdpcie_handshake_msg_reg_read(si_t *sih, osl_t *osh, volatile void *addr, uint *buffer)
7693
+{
7694
+ int bcmerror = BCME_OK;
7695
+
7696
+#ifndef HS_IN_TCM
7697
+ si_setcore(sih, PCIE2_CORE_ID, 0);
7698
+ *buffer = R_REG(osh, (volatile uint32 *)addr);
7699
+#else
7700
+ bcmerror = si_backplane_access(sih, addr, 4, buffer, TRUE);
7701
+#endif // endif
7702
+ return bcmerror;
7703
+} /* dhdpcie_handshake_msg_reg_read */
7704
+
7705
+static int
7706
+dhdpcie_dongle_host_handshake_spinwait(si_t *sih, osl_t *osh, volatile void *addr, uint32 bitshift,
7707
+ uint32 us)
7708
+{
7709
+ uint32 countdown_;
7710
+ uint32 read_addr = 0;
7711
+ int bcmerror = BCME_OK;
7712
+
7713
+ for (countdown_ = (us) + (HS_POLL_PERIOD_US - 1U); countdown_ >= HS_POLL_PERIOD_US;
7714
+ countdown_ -= HS_POLL_PERIOD_US) {
7715
+
7716
+ bcmerror = dhdpcie_handshake_msg_reg_read(sih, osh, addr, &read_addr);
7717
+ if (bcmerror) {
7718
+ bcmerror = BCME_ERROR;
7719
+ break;
7720
+ }
7721
+
7722
+ if (isset(&read_addr, bitshift)) {
7723
+ bcmerror = BCME_OK;
7724
+ break;
7725
+ }
7726
+
7727
+ OSL_DELAY(HS_POLL_PERIOD_US);
7728
+ }
7729
+
7730
+ if (countdown_ <= HS_POLL_PERIOD_US) {
7731
+ bcmerror = BCME_NOTREADY;
7732
+ }
7733
+
7734
+ return bcmerror;
7735
+} /* dhdpcie_dongle_host_handshake_spinwait */
7736
+
7737
+static int
7738
+dhdpcie_dongle_host_pre_handshake(si_t *sih, osl_t *osh, hs_addrs_t *addr)
7739
+{
7740
+ int bcmerror = BCME_OK;
7741
+ int h2d_reg = 0x00000000;
7742
+
7743
+ /* Host initialization for dongle to host handshake */
7744
+ bcmerror = dhdpcie_handshake_msg_reg_write(sih, osh, addr->h2d, &h2d_reg);
7745
+ if (bcmerror) {
7746
+ goto err;
7747
+ }
7748
+
7749
+ bcmerror = dhdpcie_dongle_host_handshake_spinwait(sih, osh, addr->d2h, D2H_READY_SHIFT,
7750
+ D2H_READY_TIMEOUT_US);
7751
+ if (!bcmerror) {
7752
+
7753
+ /* Set H2D_DL_START indication to dongle that Host shall start FW download */
7754
+ h2d_reg = 0;
7755
+ setbit(&h2d_reg, H2D_DL_START_SHIFT);
7756
+ bcmerror = dhdpcie_handshake_msg_reg_write(sih, osh, addr->h2d, &h2d_reg);
7757
+ if (bcmerror) {
7758
+ goto err;
7759
+ }
7760
+ }
7761
+
7762
+err:
7763
+ return bcmerror;
7764
+} /* dhdpcie_dongle_host_pre_handshake */
7765
+
7766
+static int
7767
+dhdpcie_dongle_host_post_handshake(si_t *sih, osl_t *osh, hs_addrs_t *addr)
7768
+{
7769
+ int bcmerror = BCME_OK;
7770
+ int h2d_reg = 0x00000000;
7771
+
7772
+ /* Reset download start */
7773
+ clrbit(&h2d_reg, H2D_DL_START_SHIFT);
7774
+
7775
+ /* download done */
7776
+ setbit(&h2d_reg, H2D_DL_DONE_SHIFT);
7777
+ bcmerror = dhdpcie_handshake_msg_reg_write(sih, osh, addr->h2d, &h2d_reg);
7778
+ if (bcmerror) {
7779
+ goto err;
7780
+ }
7781
+
7782
+ bcmerror = dhdpcie_dongle_host_handshake_spinwait(sih, osh, addr->d2h,
7783
+ D2H_TRX_HDR_PARSE_DONE_SHIFT, D2H_TRX_HDR_PARSE_DONE_TIMEOUT_US);
7784
+
7785
+ if (bcmerror) {
7786
+ /* Host notification to bootloader to get reset on error */
7787
+ dhdpcie_handshake_msg_reg_read(sih, osh, addr->h2d, &h2d_reg);
7788
+ setbit(&h2d_reg, H2D_BL_RESET_ON_ERROR_SHIFT);
7789
+ dhdpcie_handshake_msg_reg_write(sih, osh, addr->h2d, &h2d_reg);
7790
+ }
7791
+
7792
+err:
7793
+ return bcmerror;
7794
+} /* dhdpcie_dongle_host_post_handshake */
7795
+
7796
+static int
7797
+dhdpcie_dongle_host_chk_validation(si_t *sih, osl_t *osh, hs_addrs_t *addr)
7798
+{
7799
+ int bcmerror = BCME_OK;
7800
+ uint d2h_reg = 0x00000000;
7801
+ uint h2d_reg = 0x00000000;
7802
+
7803
+ bcmerror = dhdpcie_dongle_host_handshake_spinwait(sih, osh, addr->d2h, D2H_VALDN_DONE_SHIFT,
7804
+ D2H_VALDN_DONE_TIMEOUT_US);
7805
+ if (!bcmerror) {
7806
+
7807
+ bcmerror = dhdpcie_handshake_msg_reg_read(sih, osh, addr->d2h, &d2h_reg);
7808
+ if (!bcmerror) {
7809
+
7810
+ if (isset(&d2h_reg, D2H_VALDN_RESULT_SHIFT)) {
7811
+ DHD_ERROR(("%s: TRX img validation check successful\n",
7812
+ __FUNCTION__));
7813
+ } else {
7814
+ DHD_ERROR(("%s: TRX img validation check failed\n", __FUNCTION__));
7815
+ bcmerror = BCME_ERROR;
7816
+ }
7817
+ }
7818
+ }
7819
+
7820
+ if (bcmerror) {
7821
+ /* Host notification to bootloader to get reset on error
7822
+ * To avoid the race condition betweeen host and dongle
7823
+ */
7824
+ dhdpcie_handshake_msg_reg_read(sih, osh, addr->h2d, &h2d_reg);
7825
+ setbit(&h2d_reg, H2D_BL_RESET_ON_ERROR_SHIFT);
7826
+ dhdpcie_handshake_msg_reg_write(sih, osh, addr->h2d, &h2d_reg);
7827
+ }
7828
+
7829
+ return bcmerror;
7830
+} /* dhdpcie_dongle_host_chk_validation */
7831
+
7832
+int
7833
+dhdpcie_dongle_host_pre_wd_reset_sequence(si_t *sih, osl_t *osh)
7834
+{
7835
+ int32 bcmerror = BCME_ERROR;
7836
+ sbpcieregs_t *pcieregs = NULL;
7837
+ uint32 reg_val = 0;
7838
+
7839
+ if (sih && osh) {
7840
+
7841
+ pcieregs = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0);
7842
+
7843
+ /* Host initialization for dongle to host handshake */
7844
+ bcmerror = dhdpcie_handshake_msg_reg_write(sih, osh,
7845
+ &pcieregs->u1.dar_64.h2d_msg_reg0, &reg_val);
7846
+ }
7847
+
7848
+ return bcmerror;
7849
+} /* dhdpcie_dongle_host_pre_wd_reset_sequence */
7850
+
7851
+int
7852
+dhdpcie_dongle_host_post_wd_reset_sequence(si_t *sih, osl_t *osh)
7853
+{
7854
+ int32 bcmerror = BCME_ERROR;
7855
+ sbpcieregs_t *pcieregs = NULL;
7856
+ uint32 reg_val = 0;
7857
+ int32 idx = 0;
7858
+ int print_interval = D2H_READY_WD_RESET_COUNT / 10;
7859
+
7860
+ if (sih && osh) {
7861
+ pcieregs = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0);
7862
+
7863
+ /* Host initialization for dongle to host handshake */
7864
+ bcmerror = dhdpcie_handshake_msg_reg_write(sih, osh,
7865
+ &pcieregs->u1.dar_64.h2d_msg_reg0, &reg_val);
7866
+
7867
+ for (idx = D2H_READY_WD_RESET_COUNT; idx > 0; idx--) {
7868
+
7869
+#ifdef BCMQT
7870
+ OSL_SLEEP(D2H_READY_WD_RESET_US/1000);
7871
+#else
7872
+ OSL_DELAY(D2H_READY_WD_RESET_US);
7873
+#endif // endif
7874
+ if (!(idx % print_interval)) {
7875
+ DHD_ERROR(("Waiting %d us for D2H_READY\n",
7876
+ idx * D2H_READY_WD_RESET_US));
7877
+ }
7878
+
7879
+ dhdpcie_handshake_msg_reg_read(sih, osh, &pcieregs->u1.dar_64.d2h_msg_reg0,
7880
+ &reg_val);
7881
+ if (isset(&reg_val, D2H_READY_SHIFT)) {
7882
+ break;
7883
+ }
7884
+ }
7885
+
7886
+ if (!idx) {
7887
+ DHD_ERROR(("%s: error - Waiting for D2H_READY timeout %d\n",
7888
+ __FUNCTION__, idx));
7889
+ } else {
7890
+ bcmerror = BCME_OK;
7891
+ }
7892
+ }
7893
+
7894
+ return bcmerror;
7895
+} /* dhdpcie_dongle_host_post_wd_reset_sequence */
7896
+
7897
+/* Pre ChipId access sequence making sure that H2D HS reg is cleared and
7898
+ * host waited for bootloader to be ready before chipid access.
7899
+ */
7900
+int
7901
+dhdpcie_dongle_host_pre_chipid_access_sequence(osl_t *osh, volatile void *regva)
7902
+{
7903
+ int32 bcmerror = BCME_ERROR;
7904
+ sbpcieregs_t *pcieregs = NULL;
7905
+ uint32 reg_val = 0;
7906
+ int32 idx = 0;
7907
+ int print_interval = D2H_READY_WD_RESET_COUNT / 10;
7908
+
7909
+ if (osh && regva) {
7910
+
7911
+ pcieregs = (sbpcieregs_t*)(regva);
7912
+
7913
+ /* Host init for D2H handshake */
7914
+ W_REG(osh, &pcieregs->u1.dar_64.h2d_msg_reg0, reg_val);
7915
+
7916
+ /* Host waits for bootloader to be ready before ChipId access */
7917
+ for (idx = D2H_READY_WD_RESET_COUNT; idx > 0; idx--) {
7918
+
7919
+#ifdef BCMQT
7920
+ OSL_SLEEP(D2H_READY_WD_RESET_US/1000);
7921
+#else
7922
+ OSL_DELAY(D2H_READY_WD_RESET_US);
7923
+#endif // endif
7924
+ if (!(idx % print_interval)) {
7925
+ DHD_ERROR(("Waiting %d us for D2H_READY\n",
7926
+ idx * D2H_READY_WD_RESET_US));
7927
+ }
7928
+ reg_val = R_REG(osh, &pcieregs->u1.dar_64.d2h_msg_reg0);
7929
+ if (isset(&reg_val, D2H_READY_SHIFT)) {
7930
+ break;
7931
+ }
7932
+ }
7933
+
7934
+ if (!idx) {
7935
+ DHD_ERROR(("%s: error - Waiting for D2H_READY timeout %d\n",
7936
+ __FUNCTION__, idx));
7937
+ } else {
7938
+ bcmerror = BCME_OK;
7939
+ }
7940
+ }
7941
+
7942
+ return bcmerror;
7943
+} /* dhdpcie_dongle_host_pre_chipid_access_sequence */
7944
+
7945
+static int
7946
+dhdpcie_dongle_host_post_varswrite(dhd_bus_t *bus, hs_addrs_t *addr)
7947
+{
7948
+ int bcmerror = BCME_OK;
7949
+ uint h2d_reg = 0x00000000;
7950
+
7951
+ /* Set NVRAM done bit (Download done is already set) */
7952
+ setbit(&h2d_reg, H2D_DL_DONE_SHIFT);
7953
+ setbit(&h2d_reg, H2D_DL_NVRAM_DONE_SHIFT);
7954
+ bcmerror = dhdpcie_handshake_msg_reg_write(bus->sih, bus->osh, addr->h2d, &h2d_reg);
7955
+
7956
+ return bcmerror;
7957
+} /* dhdpcie_dongle_host_post_varswrite */
40397958
40407959 static int
40417960 dhdpcie_bus_write_vars(dhd_bus_t *bus)
....@@ -4045,7 +7964,7 @@
40457964 uint32 varaddr;
40467965 uint8 *vbuffer;
40477966 uint32 varsizew;
4048
-#if (defined DHD_DEBUG && !defined(CUSTOMER_HW_31_2))
7967
+#ifdef DHD_DEBUG
40497968 uint8 *nvram_ularray;
40507969 #endif /* DHD_DEBUG */
40517970
....@@ -4067,12 +7986,14 @@
40677986 bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize);
40687987
40697988 /* Implement read back and verify later */
4070
-#if (defined DHD_DEBUG && !defined(CUSTOMER_HW_31_2))
7989
+#ifdef DHD_DEBUG
40717990 /* Verify NVRAM bytes */
40727991 DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize));
40737992 nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
4074
- if (!nvram_ularray)
7993
+ if (!nvram_ularray) {
7994
+ MFREE(bus->dhd->osh, vbuffer, varsize);
40757995 return BCME_NOMEM;
7996
+ }
40767997
40777998 /* Upload image to verify downloaded contents. */
40787999 memset(nvram_ularray, 0xaa, varsize);
....@@ -4162,6 +8083,43 @@
41628083 /* Copy the passed variables, which should include the terminating double-null */
41638084 bcopy(arg, bus->vars, bus->varsz);
41648085
8086
+#ifdef DHD_USE_SINGLE_NVRAM_FILE
8087
+ if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
8088
+ char *sp = NULL;
8089
+ char *ep = NULL;
8090
+ int i;
8091
+ char tag[2][8] = {"ccode=", "regrev="};
8092
+
8093
+ /* Find ccode and regrev info */
8094
+ for (i = 0; i < 2; i++) {
8095
+ sp = strnstr(bus->vars, tag[i], bus->varsz);
8096
+ if (!sp) {
8097
+ DHD_ERROR(("%s: Could not find ccode info from the nvram %s\n",
8098
+ __FUNCTION__, bus->nv_path));
8099
+ bcmerror = BCME_ERROR;
8100
+ goto err;
8101
+ }
8102
+ sp = strchr(sp, '=');
8103
+ ep = strchr(sp, '\0');
8104
+ /* We assumed that string length of both ccode and
8105
+ * regrev values should not exceed WLC_CNTRY_BUF_SZ
8106
+ */
8107
+ if (ep && ((ep - sp) <= WLC_CNTRY_BUF_SZ)) {
8108
+ sp++;
8109
+ while (*sp != '\0') {
8110
+ DHD_INFO(("%s: parse '%s', current sp = '%c'\n",
8111
+ __FUNCTION__, tag[i], *sp));
8112
+ *sp++ = '0';
8113
+ }
8114
+ } else {
8115
+ DHD_ERROR(("%s: Invalid parameter format when parsing for %s\n",
8116
+ __FUNCTION__, tag[i]));
8117
+ bcmerror = BCME_ERROR;
8118
+ goto err;
8119
+ }
8120
+ }
8121
+ }
8122
+#endif /* DHD_USE_SINGLE_NVRAM_FILE */
41658123
41668124 err:
41678125 return bcmerror;
....@@ -4259,6 +8217,7 @@
42598217 uint32
42608218 dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val)
42618219 {
8220
+
42628221 uint8 pcie_cap;
42638222 uint8 lcreg_offset; /* PCIE capability LCreg offset in the config space */
42648223 uint32 reg_val;
....@@ -4319,24 +8278,100 @@
43198278 return 0;
43208279 }
43218280
8281
+void dhd_dump_intr_counters(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
8282
+{
8283
+ dhd_bus_t *bus;
8284
+ uint64 current_time = OSL_LOCALTIME_NS();
8285
+
8286
+ if (!dhd) {
8287
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
8288
+ return;
8289
+ }
8290
+
8291
+ bus = dhd->bus;
8292
+ if (!bus) {
8293
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
8294
+ return;
8295
+ }
8296
+
8297
+ bcm_bprintf(strbuf, "\n ------- DUMPING INTR enable/disable counters-------\n");
8298
+ bcm_bprintf(strbuf, "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
8299
+ "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
8300
+ "dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
8301
+ bus->resume_intr_enable_count, bus->dpc_intr_enable_count,
8302
+ bus->isr_intr_disable_count, bus->suspend_intr_disable_count,
8303
+ bus->dpc_return_busdown_count, bus->non_ours_irq_count);
8304
+#ifdef BCMPCIE_OOB_HOST_WAKE
8305
+ bcm_bprintf(strbuf, "oob_intr_count=%lu oob_intr_enable_count=%lu"
8306
+ " oob_intr_disable_count=%lu\noob_irq_num=%d last_oob_irq_time="SEC_USEC_FMT
8307
+ " last_oob_irq_enable_time="SEC_USEC_FMT"\nlast_oob_irq_disable_time="SEC_USEC_FMT
8308
+ " oob_irq_enabled=%d oob_gpio_level=%d\n",
8309
+ bus->oob_intr_count, bus->oob_intr_enable_count,
8310
+ bus->oob_intr_disable_count, dhdpcie_get_oob_irq_num(bus),
8311
+ GET_SEC_USEC(bus->last_oob_irq_time), GET_SEC_USEC(bus->last_oob_irq_enable_time),
8312
+ GET_SEC_USEC(bus->last_oob_irq_disable_time), dhdpcie_get_oob_irq_status(bus),
8313
+ dhdpcie_get_oob_irq_level());
8314
+#endif /* BCMPCIE_OOB_HOST_WAKE */
8315
+ bcm_bprintf(strbuf, "\ncurrent_time="SEC_USEC_FMT" isr_entry_time="SEC_USEC_FMT
8316
+ " isr_exit_time="SEC_USEC_FMT"\ndpc_sched_time="SEC_USEC_FMT
8317
+ " last_non_ours_irq_time="SEC_USEC_FMT" dpc_entry_time="SEC_USEC_FMT"\n"
8318
+ "last_process_ctrlbuf_time="SEC_USEC_FMT " last_process_flowring_time="SEC_USEC_FMT
8319
+ " last_process_txcpl_time="SEC_USEC_FMT"\nlast_process_rxcpl_time="SEC_USEC_FMT
8320
+ " last_process_infocpl_time="SEC_USEC_FMT" last_process_edl_time="SEC_USEC_FMT
8321
+ "\ndpc_exit_time="SEC_USEC_FMT" resched_dpc_time="SEC_USEC_FMT"\n"
8322
+ "last_d3_inform_time="SEC_USEC_FMT"\n",
8323
+ GET_SEC_USEC(current_time), GET_SEC_USEC(bus->isr_entry_time),
8324
+ GET_SEC_USEC(bus->isr_exit_time), GET_SEC_USEC(bus->dpc_sched_time),
8325
+ GET_SEC_USEC(bus->last_non_ours_irq_time), GET_SEC_USEC(bus->dpc_entry_time),
8326
+ GET_SEC_USEC(bus->last_process_ctrlbuf_time),
8327
+ GET_SEC_USEC(bus->last_process_flowring_time),
8328
+ GET_SEC_USEC(bus->last_process_txcpl_time),
8329
+ GET_SEC_USEC(bus->last_process_rxcpl_time),
8330
+ GET_SEC_USEC(bus->last_process_infocpl_time),
8331
+ GET_SEC_USEC(bus->last_process_edl_time),
8332
+ GET_SEC_USEC(bus->dpc_exit_time), GET_SEC_USEC(bus->resched_dpc_time),
8333
+ GET_SEC_USEC(bus->last_d3_inform_time));
8334
+
8335
+ bcm_bprintf(strbuf, "\nlast_suspend_start_time="SEC_USEC_FMT" last_suspend_end_time="
8336
+ SEC_USEC_FMT" last_resume_start_time="SEC_USEC_FMT" last_resume_end_time="
8337
+ SEC_USEC_FMT"\n", GET_SEC_USEC(bus->last_suspend_start_time),
8338
+ GET_SEC_USEC(bus->last_suspend_end_time),
8339
+ GET_SEC_USEC(bus->last_resume_start_time),
8340
+ GET_SEC_USEC(bus->last_resume_end_time));
8341
+
8342
+#if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
8343
+ bcm_bprintf(strbuf, "logtrace_thread_entry_time="SEC_USEC_FMT
8344
+ " logtrace_thread_sem_down_time="SEC_USEC_FMT
8345
+ "\nlogtrace_thread_flush_time="SEC_USEC_FMT
8346
+ " logtrace_thread_unexpected_break_time="SEC_USEC_FMT
8347
+ "\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n",
8348
+ GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time),
8349
+ GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time),
8350
+ GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time),
8351
+ GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time),
8352
+ GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time));
8353
+#endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
8354
+}
8355
+
43228356 void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
43238357 {
43248358 uint32 intstatus = 0;
43258359 uint32 intmask = 0;
4326
- uint32 mbintstatus = 0;
8360
+ uint32 d2h_db0 = 0;
43278361 uint32 d2h_mb_data = 0;
43288362
4329
- intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
4330
- intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
4331
- mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0);
8363
+ intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
8364
+ dhd->bus->pcie_mailbox_int, 0, 0);
8365
+ intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
8366
+ dhd->bus->pcie_mailbox_mask, 0, 0);
8367
+ d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0);
43328368 dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
43338369
4334
- bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n",
4335
- intstatus, intmask, mbintstatus);
8370
+ bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
8371
+ intstatus, intmask, d2h_db0);
43368372 bcm_bprintf(strbuf, "d2h_mb_data=0x%x def_intmask=0x%x\n",
43378373 d2h_mb_data, dhd->bus->def_intmask);
43388374 }
4339
-
43408375 /** Add bus dump output to a buffer */
43418376 void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
43428377 {
....@@ -4344,19 +8379,63 @@
43448379 int ix = 0;
43458380 flow_ring_node_t *flow_ring_node;
43468381 flow_info_t *flow_info;
4347
- char eabuf[ETHER_ADDR_STR_LEN];
8382
+#ifdef TX_STATUS_LATENCY_STATS
8383
+ uint8 ifindex;
8384
+ if_flow_lkup_t *if_flow_lkup;
8385
+ dhd_if_tx_status_latency_t if_tx_status_latency[DHD_MAX_IFS];
8386
+#endif /* TX_STATUS_LATENCY_STATS */
43488387
43498388 if (dhdp->busstate != DHD_BUS_DATA)
43508389 return;
43518390
8391
+#ifdef TX_STATUS_LATENCY_STATS
8392
+ memset(if_tx_status_latency, 0, sizeof(if_tx_status_latency));
8393
+#endif /* TX_STATUS_LATENCY_STATS */
8394
+#ifdef DHD_WAKE_STATUS
8395
+ bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n",
8396
+ bcmpcie_get_total_wake(dhdp->bus), dhdp->bus->wake_counts.rxwake,
8397
+ dhdp->bus->wake_counts.rcwake);
8398
+#ifdef DHD_WAKE_RX_STATUS
8399
+ bcm_bprintf(strbuf, " unicast %u muticast %u broadcast %u arp %u\n",
8400
+ dhdp->bus->wake_counts.rx_ucast, dhdp->bus->wake_counts.rx_mcast,
8401
+ dhdp->bus->wake_counts.rx_bcast, dhdp->bus->wake_counts.rx_arp);
8402
+ bcm_bprintf(strbuf, " multi4 %u multi6 %u icmp6 %u multiother %u\n",
8403
+ dhdp->bus->wake_counts.rx_multi_ipv4, dhdp->bus->wake_counts.rx_multi_ipv6,
8404
+ dhdp->bus->wake_counts.rx_icmpv6, dhdp->bus->wake_counts.rx_multi_other);
8405
+ bcm_bprintf(strbuf, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n",
8406
+ dhdp->bus->wake_counts.rx_icmpv6_ra, dhdp->bus->wake_counts.rx_icmpv6_na,
8407
+ dhdp->bus->wake_counts.rx_icmpv6_ns);
8408
+#endif /* DHD_WAKE_RX_STATUS */
8409
+#ifdef DHD_WAKE_EVENT_STATUS
8410
+ for (flowid = 0; flowid < WLC_E_LAST; flowid++)
8411
+ if (dhdp->bus->wake_counts.rc_event[flowid] != 0)
8412
+ bcm_bprintf(strbuf, " %s = %u\n", bcmevent_get_name(flowid),
8413
+ dhdp->bus->wake_counts.rc_event[flowid]);
8414
+ bcm_bprintf(strbuf, "\n");
8415
+#endif /* DHD_WAKE_EVENT_STATUS */
8416
+#endif /* DHD_WAKE_STATUS */
8417
+
43528418 dhd_prot_print_info(dhdp, strbuf);
43538419 dhd_dump_intr_registers(dhdp, strbuf);
8420
+ dhd_dump_intr_counters(dhdp, strbuf);
8421
+ bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n",
8422
+ dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr);
43548423 bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr));
8424
+#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
8425
+ bcm_bprintf(strbuf, "multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
8426
+ dhdp->multi_client_flow_rings, dhdp->max_multi_client_flow_rings);
8427
+#endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
43558428 bcm_bprintf(strbuf,
4356
- "%s %4s %2s %4s %17s %4s %4s %6s %10s %4s %4s ",
8429
+ "%4s %4s %2s %4s %17s %4s %4s %6s %10s %4s %4s ",
43578430 "Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", "L2CLen",
4358
- "Overflows", "RD", "WR");
4359
- bcm_bprintf(strbuf, "%5s %6s %5s \n", "Acked", "tossed", "noack");
8431
+ " Overflows", " RD", " WR");
8432
+
8433
+#ifdef TX_STATUS_LATENCY_STATS
8434
+ /* Average Tx status/Completion Latency in micro secs */
8435
+ bcm_bprintf(strbuf, "%16s %16s ", " NumTxPkts", " AvgTxCmpL_Us");
8436
+#endif /* TX_STATUS_LATENCY_STATS */
8437
+
8438
+ bcm_bprintf(strbuf, "\n");
43608439
43618440 for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
43628441 flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
....@@ -4365,22 +8444,253 @@
43658444
43668445 flow_info = &flow_ring_node->flow_info;
43678446 bcm_bprintf(strbuf,
4368
- "%3d. %4d %2d %4d %17s %4d %4d %6d %10u ", ix++,
8447
+ "%4d %4d %2d %4d "MACDBG" %4d %4d %6d %10u ", ix++,
43698448 flow_ring_node->flowid, flow_info->ifindex, flow_info->tid,
4370
- bcm_ether_ntoa((struct ether_addr *)&flow_info->da, eabuf),
8449
+ MAC2STRDBG(flow_info->da),
43718450 DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue),
43728451 DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node->queue)),
43738452 DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_L2CLEN_PTR(&flow_ring_node->queue)),
43748453 DHD_FLOW_QUEUE_FAILURES(&flow_ring_node->queue));
43758454 dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf,
43768455 "%4d %4d ");
4377
- bcm_bprintf(strbuf,
4378
- "%5s %6s %5s\n", "NA", "NA", "NA");
8456
+
8457
+#ifdef TX_STATUS_LATENCY_STATS
8458
+ bcm_bprintf(strbuf, "%16d %16d ",
8459
+ flow_info->num_tx_pkts,
8460
+ flow_info->num_tx_status ?
8461
+ DIV_U64_BY_U64(flow_info->cum_tx_status_latency,
8462
+ flow_info->num_tx_status) : 0);
8463
+
8464
+ ifindex = flow_info->ifindex;
8465
+ ASSERT(ifindex < DHD_MAX_IFS);
8466
+ if (ifindex < DHD_MAX_IFS) {
8467
+ if_tx_status_latency[ifindex].num_tx_status += flow_info->num_tx_status;
8468
+ if_tx_status_latency[ifindex].cum_tx_status_latency +=
8469
+ flow_info->cum_tx_status_latency;
8470
+ } else {
8471
+ DHD_ERROR(("%s: Bad IF index: %d associated with flowid: %d\n",
8472
+ __FUNCTION__, ifindex, flowid));
8473
+ }
8474
+#endif /* TX_STATUS_LATENCY_STATS */
8475
+ bcm_bprintf(strbuf, "\n");
43798476 }
8477
+
8478
+#ifdef TX_STATUS_LATENCY_STATS
8479
+ bcm_bprintf(strbuf, "\n%s %16s %16s\n", "If", "AvgTxCmpL_Us", "NumTxStatus");
8480
+ if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
8481
+ for (ix = 0; ix < DHD_MAX_IFS; ix++) {
8482
+ if (!if_flow_lkup[ix].status) {
8483
+ continue;
8484
+ }
8485
+ bcm_bprintf(strbuf, "%2d %16d %16d\n",
8486
+ ix,
8487
+ if_tx_status_latency[ix].num_tx_status ?
8488
+ DIV_U64_BY_U64(if_tx_status_latency[ix].cum_tx_status_latency,
8489
+ if_tx_status_latency[ix].num_tx_status): 0,
8490
+ if_tx_status_latency[ix].num_tx_status);
8491
+ }
8492
+#endif /* TX_STATUS_LATENCY_STATS */
8493
+
8494
+#ifdef DHD_HP2P
8495
+ if (dhdp->hp2p_capable) {
8496
+ bcm_bprintf(strbuf, "\n%s %16s %16s", "Flowid", "Tx_t0", "Tx_t1");
8497
+
8498
+ for (flowid = 0; flowid < MAX_HP2P_FLOWS; flowid++) {
8499
+ hp2p_info_t *hp2p_info;
8500
+ int bin;
8501
+
8502
+ hp2p_info = &dhdp->hp2p_info[flowid];
8503
+ if (hp2p_info->num_timer_start == 0)
8504
+ continue;
8505
+
8506
+ bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid);
8507
+ bcm_bprintf(strbuf, "\n%s", "Bin");
8508
+
8509
+ for (bin = 0; bin < MAX_TX_HIST_BIN; bin++) {
8510
+ bcm_bprintf(strbuf, "\n%2d %20d %16d", bin,
8511
+ hp2p_info->tx_t0[bin], hp2p_info->tx_t1[bin]);
8512
+ }
8513
+
8514
+ bcm_bprintf(strbuf, "\n%s %16s", "Flowid", "Rx_t0");
8515
+ bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid);
8516
+ bcm_bprintf(strbuf, "\n%s", "Bin");
8517
+
8518
+ for (bin = 0; bin < MAX_RX_HIST_BIN; bin++) {
8519
+ bcm_bprintf(strbuf, "\n%d %20d", bin,
8520
+ hp2p_info->rx_t0[bin]);
8521
+ }
8522
+
8523
+ bcm_bprintf(strbuf, "\n%s %16s %16s",
8524
+ "Packet limit", "Timer limit", "Timer start");
8525
+ bcm_bprintf(strbuf, "\n%d %24d %16d", hp2p_info->num_pkt_limit,
8526
+ hp2p_info->num_timer_limit, hp2p_info->num_timer_start);
8527
+ }
8528
+
8529
+ bcm_bprintf(strbuf, "\n");
8530
+ }
8531
+#endif /* DHD_HP2P */
8532
+
43808533 bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt);
43818534 bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt);
43828535 bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt);
8536
+ if (dhdp->d2h_hostrdy_supported) {
8537
+ bcm_bprintf(strbuf, "hostready count:%d\n", dhdp->bus->hostready_count);
8538
+ }
8539
+ bcm_bprintf(strbuf, "d2h_intr_method -> %s\n",
8540
+ dhdp->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX");
43838541 }
8542
+
8543
+#ifdef DNGL_AXI_ERROR_LOGGING
8544
+bool
8545
+dhd_axi_sig_match(dhd_pub_t *dhdp)
8546
+{
8547
+ uint32 axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, dhdp->axierror_logbuf_addr);
8548
+
8549
+ if (dhdp->dhd_induce_error == DHD_INDUCE_DROP_AXI_SIG) {
8550
+ DHD_ERROR(("%s: Induce AXI signature drop\n", __FUNCTION__));
8551
+ return FALSE;
8552
+ }
8553
+
8554
+ DHD_ERROR(("%s: axi_tcm_addr: 0x%x, tcm range: 0x%x ~ 0x%x\n",
8555
+ __FUNCTION__, axi_tcm_addr, dhdp->bus->dongle_ram_base,
8556
+ dhdp->bus->dongle_ram_base + dhdp->bus->ramsize));
8557
+ if (axi_tcm_addr >= dhdp->bus->dongle_ram_base &&
8558
+ axi_tcm_addr < dhdp->bus->dongle_ram_base + dhdp->bus->ramsize) {
8559
+ uint32 axi_signature = dhdpcie_bus_rtcm32(dhdp->bus, (axi_tcm_addr +
8560
+ OFFSETOF(hnd_ext_trap_axi_error_v1_t, signature)));
8561
+ if (axi_signature == HND_EXT_TRAP_AXIERROR_SIGNATURE) {
8562
+ return TRUE;
8563
+ } else {
8564
+ DHD_ERROR(("%s: No AXI signature: 0x%x\n",
8565
+ __FUNCTION__, axi_signature));
8566
+ return FALSE;
8567
+ }
8568
+ } else {
8569
+ DHD_ERROR(("%s: No AXI shared tcm address debug info.\n", __FUNCTION__));
8570
+ return FALSE;
8571
+ }
8572
+}
8573
+
8574
+void
8575
+dhd_axi_error(dhd_pub_t *dhdp)
8576
+{
8577
+ dhd_axi_error_dump_t *axi_err_dump;
8578
+ uint8 *axi_err_buf = NULL;
8579
+ uint8 *p_axi_err = NULL;
8580
+ uint32 axi_logbuf_addr;
8581
+ uint32 axi_tcm_addr;
8582
+ int err, size;
8583
+
8584
+ OSL_DELAY(75000);
8585
+
8586
+ axi_logbuf_addr = dhdp->axierror_logbuf_addr;
8587
+ if (!axi_logbuf_addr) {
8588
+ DHD_ERROR(("%s: No AXI TCM address debug info.\n", __FUNCTION__));
8589
+ goto sched_axi;
8590
+ }
8591
+
8592
+ axi_err_dump = dhdp->axi_err_dump;
8593
+ if (!axi_err_dump) {
8594
+ goto sched_axi;
8595
+ }
8596
+
8597
+ if (!dhd_axi_sig_match(dhdp)) {
8598
+ goto sched_axi;
8599
+ }
8600
+
8601
+ /* Reading AXI error data for SMMU fault */
8602
+ DHD_ERROR(("%s: Read AXI data from TCM address\n", __FUNCTION__));
8603
+ axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, axi_logbuf_addr);
8604
+ size = sizeof(hnd_ext_trap_axi_error_v1_t);
8605
+ axi_err_buf = MALLOCZ(dhdp->osh, size);
8606
+ if (axi_err_buf == NULL) {
8607
+ DHD_ERROR(("%s: out of memory !\n", __FUNCTION__));
8608
+ goto sched_axi;
8609
+ }
8610
+
8611
+ p_axi_err = axi_err_buf;
8612
+ err = dhdpcie_bus_membytes(dhdp->bus, FALSE, axi_tcm_addr, p_axi_err, size);
8613
+ if (err) {
8614
+ DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
8615
+ __FUNCTION__, err, size, axi_tcm_addr));
8616
+ goto sched_axi;
8617
+ }
8618
+
8619
+ /* Dump data to Dmesg */
8620
+ dhd_log_dump_axi_error(axi_err_buf);
8621
+ err = memcpy_s(&axi_err_dump->etd_axi_error_v1, size, axi_err_buf, size);
8622
+ if (err) {
8623
+ DHD_ERROR(("%s: failed to copy etd axi error info, err=%d\n",
8624
+ __FUNCTION__, err));
8625
+ }
8626
+
8627
+sched_axi:
8628
+ if (axi_err_buf) {
8629
+ MFREE(dhdp->osh, axi_err_buf, size);
8630
+ }
8631
+ dhd_schedule_axi_error_dump(dhdp, NULL);
8632
+}
8633
+
8634
+static void
8635
+dhd_log_dump_axi_error(uint8 *axi_err)
8636
+{
8637
+ dma_dentry_v1_t dma_dentry;
8638
+ dma_fifo_v1_t dma_fifo;
8639
+ int i = 0, j = 0;
8640
+
8641
+ if (*(uint8 *)axi_err == HND_EXT_TRAP_AXIERROR_VERSION_1) {
8642
+ hnd_ext_trap_axi_error_v1_t *axi_err_v1 = (hnd_ext_trap_axi_error_v1_t *)axi_err;
8643
+ DHD_ERROR(("%s: signature : 0x%x\n", __FUNCTION__, axi_err_v1->signature));
8644
+ DHD_ERROR(("%s: version : 0x%x\n", __FUNCTION__, axi_err_v1->version));
8645
+ DHD_ERROR(("%s: length : 0x%x\n", __FUNCTION__, axi_err_v1->length));
8646
+ DHD_ERROR(("%s: dma_fifo_valid_count : 0x%x\n",
8647
+ __FUNCTION__, axi_err_v1->dma_fifo_valid_count));
8648
+ DHD_ERROR(("%s: axi_errorlog_status : 0x%x\n",
8649
+ __FUNCTION__, axi_err_v1->axi_errorlog_status));
8650
+ DHD_ERROR(("%s: axi_errorlog_core : 0x%x\n",
8651
+ __FUNCTION__, axi_err_v1->axi_errorlog_core));
8652
+ DHD_ERROR(("%s: axi_errorlog_hi : 0x%x\n",
8653
+ __FUNCTION__, axi_err_v1->axi_errorlog_hi));
8654
+ DHD_ERROR(("%s: axi_errorlog_lo : 0x%x\n",
8655
+ __FUNCTION__, axi_err_v1->axi_errorlog_lo));
8656
+ DHD_ERROR(("%s: axi_errorlog_id : 0x%x\n",
8657
+ __FUNCTION__, axi_err_v1->axi_errorlog_id));
8658
+
8659
+ for (i = 0; i < MAX_DMAFIFO_ENTRIES_V1; i++) {
8660
+ dma_fifo = axi_err_v1->dma_fifo[i];
8661
+ DHD_ERROR(("%s: valid:%d : 0x%x\n", __FUNCTION__, i, dma_fifo.valid));
8662
+ DHD_ERROR(("%s: direction:%d : 0x%x\n",
8663
+ __FUNCTION__, i, dma_fifo.direction));
8664
+ DHD_ERROR(("%s: index:%d : 0x%x\n",
8665
+ __FUNCTION__, i, dma_fifo.index));
8666
+ DHD_ERROR(("%s: dpa:%d : 0x%x\n",
8667
+ __FUNCTION__, i, dma_fifo.dpa));
8668
+ DHD_ERROR(("%s: desc_lo:%d : 0x%x\n",
8669
+ __FUNCTION__, i, dma_fifo.desc_lo));
8670
+ DHD_ERROR(("%s: desc_hi:%d : 0x%x\n",
8671
+ __FUNCTION__, i, dma_fifo.desc_hi));
8672
+ DHD_ERROR(("%s: din:%d : 0x%x\n",
8673
+ __FUNCTION__, i, dma_fifo.din));
8674
+ DHD_ERROR(("%s: dout:%d : 0x%x\n",
8675
+ __FUNCTION__, i, dma_fifo.dout));
8676
+ for (j = 0; j < MAX_DMAFIFO_DESC_ENTRIES_V1; j++) {
8677
+ dma_dentry = axi_err_v1->dma_fifo[i].dentry[j];
8678
+ DHD_ERROR(("%s: ctrl1:%d : 0x%x\n",
8679
+ __FUNCTION__, i, dma_dentry.ctrl1));
8680
+ DHD_ERROR(("%s: ctrl2:%d : 0x%x\n",
8681
+ __FUNCTION__, i, dma_dentry.ctrl2));
8682
+ DHD_ERROR(("%s: addrlo:%d : 0x%x\n",
8683
+ __FUNCTION__, i, dma_dentry.addrlo));
8684
+ DHD_ERROR(("%s: addrhi:%d : 0x%x\n",
8685
+ __FUNCTION__, i, dma_dentry.addrhi));
8686
+ }
8687
+ }
8688
+ }
8689
+ else {
8690
+ DHD_ERROR(("%s: Invalid AXI version: 0x%x\n", __FUNCTION__, (*(uint8 *)axi_err)));
8691
+ }
8692
+}
8693
+#endif /* DNGL_AXI_ERROR_LOGGING */
43848694
43858695 /**
43868696 * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their
....@@ -4394,9 +8704,14 @@
43948704 flow_ring_node_t *flow_ring_node;
43958705 struct dhd_bus *bus = dhd->bus;
43968706
8707
+ if (dhd_query_bus_erros(dhd)) {
8708
+ return;
8709
+ }
8710
+
8711
+ /* Hold flowring_list_lock to ensure no race condition while accessing the List */
43978712 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
4398
- for (item = dll_head_p(&bus->const_flowring);
4399
- (!dhd_is_device_removed(dhd) && !dll_end(&bus->const_flowring, item));
8713
+ for (item = dll_head_p(&bus->flowring_active_list);
8714
+ (!dhd_is_device_removed(dhd) && !dll_end(&bus->flowring_active_list, item));
44008715 item = next) {
44018716 if (dhd->hang_was_sent) {
44028717 break;
....@@ -4428,177 +8743,17 @@
44288743 if (bus->db1_for_mb) {
44298744 /* this is a pcie core register, not the config register */
44308745 DHD_INFO(("writing a mail box interrupt to the device, through doorbell 1\n"));
4431
- si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_DB1, ~0, 0x12345678);
8746
+ if (DAR_PWRREQ(bus)) {
8747
+ dhd_bus_pcie_pwr_req(bus);
8748
+ }
8749
+ si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus),
8750
+ ~0, 0x12345678);
44328751 } else {
44338752 DHD_INFO(("writing a mail box interrupt to the device, through config space\n"));
44348753 dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
44358754 dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
44368755 }
44378756 }
4438
-
4439
-static void
4440
-dhd_bus_set_device_wake(struct dhd_bus *bus, bool val)
4441
-{
4442
- if (bus->device_wake_state != val)
4443
- {
4444
- DHD_INFO(("Set Device_Wake to %d\n", val));
4445
-#ifdef PCIE_OOB
4446
- if (bus->oob_enabled)
4447
- {
4448
- if (val)
4449
- {
4450
- gpio_port = gpio_port | (1 << DEVICE_WAKE);
4451
- gpio_write_port_non_block(gpio_handle_val, gpio_port);
4452
- } else {
4453
- gpio_port = gpio_port & (0xff ^ (1 << DEVICE_WAKE));
4454
- gpio_write_port_non_block(gpio_handle_val, gpio_port);
4455
- }
4456
- }
4457
-#endif /* PCIE_OOB */
4458
- bus->device_wake_state = val;
4459
- }
4460
-}
4461
-
4462
-#ifdef PCIE_OOB
4463
-void
4464
-dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val)
4465
-{
4466
- DHD_INFO(("Set Device_Wake to %d\n", val));
4467
- if (val)
4468
- {
4469
- gpio_port = gpio_port | (1 << BIT_BT_REG_ON);
4470
- gpio_write_port(gpio_handle_val, gpio_port);
4471
- } else {
4472
- gpio_port = gpio_port & (0xff ^ (1 << BIT_BT_REG_ON));
4473
- gpio_write_port(gpio_handle_val, gpio_port);
4474
- }
4475
-}
4476
-
4477
-int
4478
-dhd_oob_get_bt_reg_on(struct dhd_bus *bus)
4479
-{
4480
- int ret;
4481
- uint8 val;
4482
- ret = gpio_read_port(gpio_handle_val, &val);
4483
-
4484
- if (ret < 0) {
4485
- DHD_ERROR(("gpio_read_port returns %d\n", ret));
4486
- return ret;
4487
- }
4488
-
4489
- if (val & (1 << BIT_BT_REG_ON))
4490
- {
4491
- ret = 1;
4492
- } else {
4493
- ret = 0;
4494
- }
4495
-
4496
- return ret;
4497
-}
4498
-
4499
-static void
4500
-dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus)
4501
-{
4502
- if (dhd_doorbell_timeout)
4503
- dhd_timeout_start(&bus->doorbell_timer,
4504
- (dhd_doorbell_timeout * 1000) / dhd_watchdog_ms);
4505
- else if (!(bus->dhd->busstate == DHD_BUS_SUSPEND))
4506
- dhd_bus_set_device_wake(bus, FALSE);
4507
-}
4508
-#endif /* PCIE_OOB */
4509
-
4510
-/** mailbox doorbell ring function */
4511
-void
4512
-dhd_bus_ringbell(struct dhd_bus *bus, uint32 value)
4513
-{
4514
- if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
4515
- (bus->sih->buscorerev == 4)) {
4516
- si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, PCIE_INTB, PCIE_INTB);
4517
- } else {
4518
- /* this is a pcie core register, not the config regsiter */
4519
- DHD_INFO(("writing a door bell to the device\n"));
4520
- si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_MailBox, ~0, 0x12345678);
4521
- }
4522
-}
4523
-
4524
-void
4525
-dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value)
4526
-{
4527
-#ifdef PCIE_OOB
4528
- dhd_bus_set_device_wake(bus, TRUE);
4529
- dhd_bus_doorbell_timeout_reset(bus);
4530
-#endif
4531
- W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value);
4532
-}
4533
-
4534
-static void
4535
-dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value)
4536
-{
4537
- uint32 w;
4538
- w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB;
4539
- W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w);
4540
-}
4541
-
4542
-dhd_mb_ring_t
4543
-dhd_bus_get_mbintr_fn(struct dhd_bus *bus)
4544
-{
4545
- if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
4546
- (bus->sih->buscorerev == 4)) {
4547
- bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
4548
- PCIMailBoxInt);
4549
- if (bus->pcie_mb_intr_addr) {
4550
- bus->pcie_mb_intr_osh = si_osh(bus->sih);
4551
- return dhd_bus_ringbell_oldpcie;
4552
- }
4553
- } else {
4554
- bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
4555
- PCIH2D_MailBox);
4556
- if (bus->pcie_mb_intr_addr) {
4557
- bus->pcie_mb_intr_osh = si_osh(bus->sih);
4558
- return dhdpcie_bus_ringbell_fast;
4559
- }
4560
- }
4561
- return dhd_bus_ringbell;
4562
-}
4563
-
4564
-bool BCMFASTPATH
4565
-dhd_bus_dpc(struct dhd_bus *bus)
4566
-{
4567
- bool resched = FALSE; /* Flag indicating resched wanted */
4568
- unsigned long flags;
4569
-
4570
- DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4571
-
4572
- DHD_GENERAL_LOCK(bus->dhd, flags);
4573
- /* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS
4574
- * to avoid IOCTL Resumed On timeout when ioctl is waiting for response
4575
- * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS
4576
- * and if we return from here, then IOCTL response will never be handled
4577
- */
4578
- if (bus->dhd->busstate == DHD_BUS_DOWN) {
4579
- DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
4580
- bus->intstatus = 0;
4581
- DHD_GENERAL_UNLOCK(bus->dhd, flags);
4582
- return 0;
4583
- }
4584
- bus->dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DPC;
4585
- DHD_GENERAL_UNLOCK(bus->dhd, flags);
4586
-
4587
- resched = dhdpcie_bus_process_mailbox_intr(bus, bus->intstatus);
4588
- if (!resched) {
4589
- bus->intstatus = 0;
4590
- dhdpcie_bus_intr_enable(bus);
4591
- }
4592
-
4593
- DHD_GENERAL_LOCK(bus->dhd, flags);
4594
- bus->dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DPC;
4595
- dhd_os_busbusy_wake(bus->dhd);
4596
- DHD_GENERAL_UNLOCK(bus->dhd, flags);
4597
-
4598
- return resched;
4599
-
4600
-}
4601
-
46028757
46038758 /* Upon receiving a mailbox interrupt,
46048759 * if H2D_FW_TRAP bit is set in mailbox location
....@@ -4613,13 +8768,262 @@
46138768 (void)dhd_wl_ioctl_set_intiovar(bus->dhd, "bus:disconnect", 99, WLC_SET_VAR, TRUE, 0);
46148769 }
46158770
8771
+/** mailbox doorbell ring function */
8772
+void
8773
+dhd_bus_ringbell(struct dhd_bus *bus, uint32 value)
8774
+{
8775
+ /* Skip after sending D3_INFORM */
8776
+ if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
8777
+ DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
8778
+ __FUNCTION__, bus->bus_low_power_state));
8779
+ return;
8780
+ }
8781
+
8782
+ /* Skip in the case of link down */
8783
+ if (bus->is_linkdown) {
8784
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
8785
+ return;
8786
+ }
8787
+
8788
+ if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
8789
+ (bus->sih->buscorerev == 4)) {
8790
+ si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int,
8791
+ PCIE_INTB, PCIE_INTB);
8792
+ } else {
8793
+ /* this is a pcie core register, not the config regsiter */
8794
+ DHD_INFO(("writing a door bell to the device\n"));
8795
+ if (IDMA_ACTIVE(bus->dhd)) {
8796
+ if (DAR_PWRREQ(bus)) {
8797
+ dhd_bus_pcie_pwr_req(bus);
8798
+ }
8799
+ si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus),
8800
+ ~0, value);
8801
+ } else {
8802
+ if (DAR_PWRREQ(bus)) {
8803
+ dhd_bus_pcie_pwr_req(bus);
8804
+ }
8805
+ si_corereg(bus->sih, bus->sih->buscoreidx,
8806
+ dhd_bus_db0_addr_get(bus), ~0, 0x12345678);
8807
+ }
8808
+ }
8809
+}
8810
+
8811
+/** mailbox doorbell ring function for IDMA/IFRM using dma channel2 */
8812
+void
8813
+dhd_bus_ringbell_2(struct dhd_bus *bus, uint32 value, bool devwake)
8814
+{
8815
+ /* this is a pcie core register, not the config regsiter */
8816
+ /* Skip after sending D3_INFORM */
8817
+ if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
8818
+ DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
8819
+ __FUNCTION__, bus->bus_low_power_state));
8820
+ return;
8821
+ }
8822
+
8823
+ /* Skip in the case of link down */
8824
+ if (bus->is_linkdown) {
8825
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
8826
+ return;
8827
+ }
8828
+
8829
+ DHD_INFO(("writing a door bell 2 to the device\n"));
8830
+ if (DAR_PWRREQ(bus)) {
8831
+ dhd_bus_pcie_pwr_req(bus);
8832
+ }
8833
+ si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus),
8834
+ ~0, value);
8835
+}
8836
+
8837
+void
8838
+dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value)
8839
+{
8840
+ /* Skip after sending D3_INFORM */
8841
+ if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
8842
+ DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
8843
+ __FUNCTION__, bus->bus_low_power_state));
8844
+ return;
8845
+ }
8846
+
8847
+ /* Skip in the case of link down */
8848
+ if (bus->is_linkdown) {
8849
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
8850
+ return;
8851
+ }
8852
+
8853
+ if (DAR_PWRREQ(bus)) {
8854
+ dhd_bus_pcie_pwr_req(bus);
8855
+ }
8856
+
8857
+#ifdef DHD_DB0TS
8858
+ if (bus->dhd->db0ts_capable) {
8859
+ uint64 ts;
8860
+
8861
+ ts = local_clock();
8862
+ do_div(ts, 1000);
8863
+
8864
+ value = htol32(ts & 0xFFFFFFFF);
8865
+ DHD_INFO(("%s: usec timer = 0x%x\n", __FUNCTION__, value));
8866
+ }
8867
+#endif /* DHD_DB0TS */
8868
+ W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value);
8869
+}
8870
+
8871
+void
8872
+dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake)
8873
+{
8874
+ /* Skip after sending D3_INFORM */
8875
+ if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
8876
+ DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
8877
+ __FUNCTION__, bus->bus_low_power_state));
8878
+ return;
8879
+ }
8880
+
8881
+ /* Skip in the case of link down */
8882
+ if (bus->is_linkdown) {
8883
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
8884
+ return;
8885
+ }
8886
+
8887
+ if (DAR_PWRREQ(bus)) {
8888
+ dhd_bus_pcie_pwr_req(bus);
8889
+ }
8890
+ W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_2_addr, value);
8891
+}
46168892
46178893 static void
8894
+dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value)
8895
+{
8896
+ uint32 w;
8897
+ /* Skip after sending D3_INFORM */
8898
+ if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
8899
+ DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
8900
+ __FUNCTION__, bus->bus_low_power_state));
8901
+ return;
8902
+ }
8903
+
8904
+ /* Skip in the case of link down */
8905
+ if (bus->is_linkdown) {
8906
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
8907
+ return;
8908
+ }
8909
+
8910
+ w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB;
8911
+ W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w);
8912
+}
8913
+
8914
+dhd_mb_ring_t
8915
+dhd_bus_get_mbintr_fn(struct dhd_bus *bus)
8916
+{
8917
+ if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
8918
+ (bus->sih->buscorerev == 4)) {
8919
+ bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
8920
+ bus->pcie_mailbox_int);
8921
+ if (bus->pcie_mb_intr_addr) {
8922
+ bus->pcie_mb_intr_osh = si_osh(bus->sih);
8923
+ return dhd_bus_ringbell_oldpcie;
8924
+ }
8925
+ } else {
8926
+ bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
8927
+ dhd_bus_db0_addr_get(bus));
8928
+ if (bus->pcie_mb_intr_addr) {
8929
+ bus->pcie_mb_intr_osh = si_osh(bus->sih);
8930
+ return dhdpcie_bus_ringbell_fast;
8931
+ }
8932
+ }
8933
+ return dhd_bus_ringbell;
8934
+}
8935
+
8936
+dhd_mb_ring_2_t
8937
+dhd_bus_get_mbintr_2_fn(struct dhd_bus *bus)
8938
+{
8939
+ bus->pcie_mb_intr_2_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
8940
+ dhd_bus_db0_addr_2_get(bus));
8941
+ if (bus->pcie_mb_intr_2_addr) {
8942
+ bus->pcie_mb_intr_osh = si_osh(bus->sih);
8943
+ return dhdpcie_bus_ringbell_2_fast;
8944
+ }
8945
+ return dhd_bus_ringbell_2;
8946
+}
8947
+
8948
+bool BCMFASTPATH
8949
+dhd_bus_dpc(struct dhd_bus *bus)
8950
+{
8951
+ bool resched = FALSE; /* Flag indicating resched wanted */
8952
+ unsigned long flags;
8953
+
8954
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
8955
+
8956
+ bus->dpc_entry_time = OSL_LOCALTIME_NS();
8957
+
8958
+ DHD_GENERAL_LOCK(bus->dhd, flags);
8959
+ /* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS
8960
+ * to avoid IOCTL Resumed On timeout when ioctl is waiting for response
8961
+ * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS
8962
+ * and if we return from here, then IOCTL response will never be handled
8963
+ */
8964
+ if (bus->dhd->busstate == DHD_BUS_DOWN) {
8965
+ DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
8966
+ bus->intstatus = 0;
8967
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
8968
+ bus->dpc_return_busdown_count++;
8969
+ return 0;
8970
+ }
8971
+#ifdef DHD_PCIE_RUNTIMEPM
8972
+ bus->idlecount = 0;
8973
+#endif /* DHD_PCIE_RUNTIMEPM */
8974
+ DHD_BUS_BUSY_SET_IN_DPC(bus->dhd);
8975
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
8976
+
8977
+ resched = dhdpcie_bus_process_mailbox_intr(bus, bus->intstatus);
8978
+ if (!resched) {
8979
+ bus->intstatus = 0;
8980
+ bus->dpc_intr_enable_count++;
8981
+ /* For Linux, Macos etc (otherthan NDIS) enable back the host interrupts
8982
+ * which has been disabled in the dhdpcie_bus_isr()
8983
+ */
8984
+ dhdpcie_enable_irq(bus); /* Enable back interrupt!! */
8985
+ bus->dpc_exit_time = OSL_LOCALTIME_NS();
8986
+ } else {
8987
+ bus->resched_dpc_time = OSL_LOCALTIME_NS();
8988
+ }
8989
+
8990
+ bus->dpc_sched = resched;
8991
+
8992
+ DHD_GENERAL_LOCK(bus->dhd, flags);
8993
+ DHD_BUS_BUSY_CLEAR_IN_DPC(bus->dhd);
8994
+ dhd_os_busbusy_wake(bus->dhd);
8995
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
8996
+
8997
+ return resched;
8998
+
8999
+}
9000
+
9001
+int
46189002 dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data)
46199003 {
46209004 uint32 cur_h2d_mb_data = 0;
46219005
46229006 DHD_INFO_HW4(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__, h2d_mb_data));
9007
+
9008
+ if (bus->is_linkdown) {
9009
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
9010
+ return BCME_ERROR;
9011
+ }
9012
+
9013
+ if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !bus->use_mailbox) {
9014
+ DHD_INFO(("API rev is 6, sending mb data as H2D Ctrl message to dongle, 0x%04x\n",
9015
+ h2d_mb_data));
9016
+ /* Prevent asserting device_wake during doorbell ring for mb data to avoid loop. */
9017
+ {
9018
+ if (dhd_prot_h2d_mbdata_send_ctrlmsg(bus->dhd, h2d_mb_data)) {
9019
+ DHD_ERROR(("failure sending the H2D Mailbox message "
9020
+ "to firmware\n"));
9021
+ goto fail;
9022
+ }
9023
+ }
9024
+ goto done;
9025
+ }
9026
+
46239027 dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
46249028
46259029 if (cur_h2d_mb_data != 0) {
....@@ -4640,8 +9044,10 @@
46409044 dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), H2D_MB_DATA, 0);
46419045 dhd_bus_gen_devmb_intr(bus);
46429046
9047
+done:
46439048 if (h2d_mb_data == H2D_HOST_D3_INFORM) {
46449049 DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__));
9050
+ bus->last_d3_inform_time = OSL_LOCALTIME_NS();
46459051 bus->d3_inform_cnt++;
46469052 }
46479053 if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) {
....@@ -4652,6 +9058,110 @@
46529058 DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__));
46539059 bus->d0_inform_cnt++;
46549060 }
9061
+ return BCME_OK;
9062
+fail:
9063
+ return BCME_ERROR;
9064
+}
9065
+
9066
+static void
9067
+dhd_bus_handle_d3_ack(dhd_bus_t *bus)
9068
+{
9069
+ unsigned long flags_bus;
9070
+ DHD_BUS_LOCK(bus->bus_lock, flags_bus);
9071
+ bus->suspend_intr_disable_count++;
9072
+ /* Disable dongle Interrupts Immediately after D3 */
9073
+
9074
+ /* For Linux, Macos etc (otherthan NDIS) along with disabling
9075
+ * dongle interrupt by clearing the IntMask, disable directly
9076
+ * interrupt from the host side as well. Also clear the intstatus
9077
+ * if it is set to avoid unnecessary intrrupts after D3 ACK.
9078
+ */
9079
+ dhdpcie_bus_intr_disable(bus); /* Disable interrupt using IntMask!! */
9080
+ dhdpcie_bus_clear_intstatus(bus);
9081
+ dhdpcie_disable_irq_nosync(bus); /* Disable host interrupt!! */
9082
+
9083
+ if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) {
9084
+ /* Set bus_low_power_state to DHD_BUS_D3_ACK_RECIEVED */
9085
+ bus->bus_low_power_state = DHD_BUS_D3_ACK_RECIEVED;
9086
+ DHD_ERROR(("%s: D3_ACK Recieved\n", __FUNCTION__));
9087
+ }
9088
+ DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
9089
+ /* Check for D3 ACK induce flag, which is set by firing dhd iovar to induce D3 Ack timeout.
9090
+ * If flag is set, D3 wake is skipped, which results in to D3 Ack timeout.
9091
+ */
9092
+ if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) {
9093
+ bus->wait_for_d3_ack = 1;
9094
+ dhd_os_d3ack_wake(bus->dhd);
9095
+ } else {
9096
+ DHD_ERROR(("%s: Inducing D3 ACK timeout\n", __FUNCTION__));
9097
+ }
9098
+}
9099
+void
9100
+dhd_bus_handle_mb_data(dhd_bus_t *bus, uint32 d2h_mb_data)
9101
+{
9102
+ if (MULTIBP_ENAB(bus->sih)) {
9103
+ dhd_bus_pcie_pwr_req(bus);
9104
+ }
9105
+
9106
+ DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
9107
+
9108
+ if (d2h_mb_data & D2H_DEV_FWHALT) {
9109
+ DHD_ERROR(("FW trap has happened\n"));
9110
+ dhdpcie_checkdied(bus, NULL, 0);
9111
+#ifdef OEM_ANDROID
9112
+#ifdef SUPPORT_LINKDOWN_RECOVERY
9113
+#ifdef CONFIG_ARCH_MSM
9114
+ bus->no_cfg_restore = 1;
9115
+#endif /* CONFIG_ARCH_MSM */
9116
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
9117
+ dhd_os_check_hang(bus->dhd, 0, -EREMOTEIO);
9118
+#endif /* OEM_ANDROID */
9119
+ goto exit;
9120
+ }
9121
+ if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) {
9122
+ bool ds_acked = FALSE;
9123
+ BCM_REFERENCE(ds_acked);
9124
+ if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
9125
+ DHD_ERROR(("DS-ENTRY AFTER D3-ACK!!!!! QUITING\n"));
9126
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
9127
+ bus->dhd->busstate = DHD_BUS_DOWN;
9128
+ goto exit;
9129
+ }
9130
+ /* what should we do */
9131
+ DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
9132
+ {
9133
+ dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
9134
+ DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
9135
+ }
9136
+ }
9137
+ if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) {
9138
+ /* what should we do */
9139
+ DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
9140
+ }
9141
+ if (d2h_mb_data & D2HMB_DS_HOST_SLEEP_EXIT_ACK) {
9142
+ /* what should we do */
9143
+ DHD_INFO(("D2H_MB_DATA: D0 ACK\n"));
9144
+ }
9145
+ if (d2h_mb_data & D2H_DEV_D3_ACK) {
9146
+ /* what should we do */
9147
+ DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
9148
+ if (!bus->wait_for_d3_ack) {
9149
+#if defined(DHD_HANG_SEND_UP_TEST)
9150
+ if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
9151
+ DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
9152
+ } else {
9153
+ dhd_bus_handle_d3_ack(bus);
9154
+ }
9155
+#else /* DHD_HANG_SEND_UP_TEST */
9156
+ dhd_bus_handle_d3_ack(bus);
9157
+#endif /* DHD_HANG_SEND_UP_TEST */
9158
+ }
9159
+ }
9160
+
9161
+exit:
9162
+ if (MULTIBP_ENAB(bus->sih)) {
9163
+ dhd_bus_pcie_pwr_req_clear(bus);
9164
+ }
46559165 }
46569166
46579167 static void
....@@ -4659,16 +9169,27 @@
46599169 {
46609170 uint32 d2h_mb_data = 0;
46619171 uint32 zero = 0;
9172
+
9173
+ if (MULTIBP_ENAB(bus->sih)) {
9174
+ dhd_bus_pcie_pwr_req(bus);
9175
+ }
9176
+
46629177 dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
4663
- if (!d2h_mb_data) {
4664
- DHD_INFO_HW4(("%s: Invalid D2H_MB_DATA: 0x%08x\n",
9178
+ if (D2H_DEV_MB_INVALIDATED(d2h_mb_data)) {
9179
+ DHD_ERROR(("%s: Invalid D2H_MB_DATA: 0x%08x\n",
46659180 __FUNCTION__, d2h_mb_data));
4666
- return;
9181
+ goto exit;
46679182 }
46689183
46699184 dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
46709185
4671
- DHD_INFO_HW4(("D2H_MB_DATA: 0x%08x\n", d2h_mb_data));
9186
+ DHD_INFO_HW4(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
9187
+ if (d2h_mb_data & D2H_DEV_FWHALT) {
9188
+ DHD_ERROR(("FW trap has happened\n"));
9189
+ dhdpcie_checkdied(bus, NULL, 0);
9190
+ /* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */
9191
+ goto exit;
9192
+ }
46729193 if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) {
46739194 /* what should we do */
46749195 DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
....@@ -4683,15 +9204,51 @@
46839204 /* what should we do */
46849205 DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
46859206 if (!bus->wait_for_d3_ack) {
4686
- bus->wait_for_d3_ack = 1;
4687
- dhd_os_d3ack_wake(bus->dhd);
9207
+#if defined(DHD_HANG_SEND_UP_TEST)
9208
+ if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
9209
+ DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
9210
+ } else {
9211
+ dhd_bus_handle_d3_ack(bus);
9212
+ }
9213
+#else /* DHD_HANG_SEND_UP_TEST */
9214
+ dhd_bus_handle_d3_ack(bus);
9215
+#endif /* DHD_HANG_SEND_UP_TEST */
46889216 }
46899217 }
4690
- if (d2h_mb_data & D2H_DEV_FWHALT) {
4691
- DHD_ERROR(("FW trap has happened\n"));
4692
- dhdpcie_checkdied(bus, NULL, 0);
4693
- /* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */
4694
- bus->dhd->busstate = DHD_BUS_DOWN;
9218
+
9219
+exit:
9220
+ if (MULTIBP_ENAB(bus->sih)) {
9221
+ dhd_bus_pcie_pwr_req_clear(bus);
9222
+ }
9223
+}
9224
+
9225
+static void
9226
+dhdpcie_read_handle_mb_data(dhd_bus_t *bus)
9227
+{
9228
+ uint32 d2h_mb_data = 0;
9229
+ uint32 zero = 0;
9230
+
9231
+ if (bus->is_linkdown) {
9232
+ DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
9233
+ return;
9234
+ }
9235
+
9236
+ if (MULTIBP_ENAB(bus->sih)) {
9237
+ dhd_bus_pcie_pwr_req(bus);
9238
+ }
9239
+
9240
+ dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
9241
+ if (!d2h_mb_data) {
9242
+ goto exit;
9243
+ }
9244
+
9245
+ dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
9246
+
9247
+ dhd_bus_handle_mb_data(bus, d2h_mb_data);
9248
+
9249
+exit:
9250
+ if (MULTIBP_ENAB(bus->sih)) {
9251
+ dhd_bus_pcie_pwr_req_clear(bus);
46959252 }
46969253 }
46979254
....@@ -4699,7 +9256,11 @@
46999256 dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus)
47009257 {
47019258 bool resched = FALSE;
9259
+ unsigned long flags_bus;
47029260
9261
+ if (MULTIBP_ENAB(bus->sih)) {
9262
+ dhd_bus_pcie_pwr_req(bus);
9263
+ }
47039264 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
47049265 (bus->sih->buscorerev == 4)) {
47059266 /* Msg stream interrupt */
....@@ -4710,45 +9271,148 @@
47109271 }
47119272 } else {
47129273 if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1))
4713
- dhdpcie_handle_mb_data(bus);
9274
+ bus->api.handle_mb_data(bus);
47149275
4715
- if (bus->dhd->busstate == DHD_BUS_SUSPEND) {
9276
+ /* Do no process any rings after recieving D3_ACK */
9277
+ DHD_BUS_LOCK(bus->bus_lock, flags_bus);
9278
+ if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
9279
+ DHD_ERROR(("%s: D3 Ack Recieved. "
9280
+ "Skip processing rest of ring buffers.\n", __FUNCTION__));
9281
+ DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
47169282 goto exit;
47179283 }
9284
+ DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
47189285
4719
- if (intstatus & PCIE_MB_D2H_MB_MASK) {
9286
+ /* Validate intstatus only for INTX case */
9287
+ if ((bus->d2h_intr_method == PCIE_MSI) ||
9288
+ ((bus->d2h_intr_method == PCIE_INTX) && (intstatus & bus->d2h_mb_mask))) {
9289
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
9290
+ if (pm_runtime_get(dhd_bus_to_dev(bus)) >= 0) {
9291
+ resched = dhdpci_bus_read_frames(bus);
9292
+ pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
9293
+ pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
9294
+ }
9295
+#else
47209296 resched = dhdpci_bus_read_frames(bus);
9297
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
47219298 }
47229299 }
47239300
47249301 exit:
9302
+ if (MULTIBP_ENAB(bus->sih)) {
9303
+ dhd_bus_pcie_pwr_req_clear(bus);
9304
+ }
47259305 return resched;
47269306 }
9307
+
9308
+#if defined(DHD_H2D_LOG_TIME_SYNC)
9309
+static void
9310
+dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus)
9311
+{
9312
+ unsigned long time_elapsed;
9313
+
9314
+ /* Poll for timeout value periodically */
9315
+ if ((bus->dhd->busstate == DHD_BUS_DATA) &&
9316
+ (bus->dhd->dhd_rte_time_sync_ms != 0) &&
9317
+ (bus->bus_low_power_state == DHD_BUS_NO_LOW_POWER_STATE)) {
9318
+ time_elapsed = OSL_SYSUPTIME_US() - bus->dhd_rte_time_sync_count;
9319
+ /* Compare time is milli seconds */
9320
+ if ((time_elapsed / 1000) >= bus->dhd->dhd_rte_time_sync_ms) {
9321
+ /*
9322
+ * Its fine, if it has crossed the timeout value. No need to adjust the
9323
+ * elapsed time
9324
+ */
9325
+ bus->dhd_rte_time_sync_count += time_elapsed;
9326
+
9327
+ /* Schedule deffered work. Work function will send IOVAR. */
9328
+ dhd_h2d_log_time_sync_deferred_wq_schedule(bus->dhd);
9329
+ }
9330
+ }
9331
+}
9332
+#endif /* DHD_H2D_LOG_TIME_SYNC */
47279333
47289334 static bool
47299335 dhdpci_bus_read_frames(dhd_bus_t *bus)
47309336 {
47319337 bool more = FALSE;
9338
+ unsigned long flags_bus;
9339
+
9340
+ /* First check if there a FW trap */
9341
+ if ((bus->api.fw_rev >= PCIE_SHARED_VERSION_6) &&
9342
+ (bus->dhd->dongle_trap_data = dhd_prot_process_trapbuf(bus->dhd))) {
9343
+#ifdef DNGL_AXI_ERROR_LOGGING
9344
+ if (bus->dhd->axi_error) {
9345
+ DHD_ERROR(("AXI Error happened\n"));
9346
+ return FALSE;
9347
+ }
9348
+#endif /* DNGL_AXI_ERROR_LOGGING */
9349
+ dhd_bus_handle_mb_data(bus, D2H_DEV_FWHALT);
9350
+ return FALSE;
9351
+ }
47329352
47339353 /* There may be frames in both ctrl buf and data buf; check ctrl buf first */
47349354 DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
9355
+
47359356 dhd_prot_process_ctrlbuf(bus->dhd);
9357
+ bus->last_process_ctrlbuf_time = OSL_LOCALTIME_NS();
47369358 /* Unlock to give chance for resp to be handled */
47379359 DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
9360
+
9361
+ /* Do not process rest of ring buf once bus enters low power state (D3_INFORM/D3_ACK) */
9362
+ DHD_BUS_LOCK(bus->bus_lock, flags_bus);
9363
+ if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
9364
+ DHD_ERROR(("%s: Bus is in power save state (%d). "
9365
+ "Skip processing rest of ring buffers.\n",
9366
+ __FUNCTION__, bus->bus_low_power_state));
9367
+ DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
9368
+ return FALSE;
9369
+ }
9370
+ DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
47389371
47399372 DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
47409373 /* update the flow ring cpls */
47419374 dhd_update_txflowrings(bus->dhd);
9375
+ bus->last_process_flowring_time = OSL_LOCALTIME_NS();
47429376
47439377 /* With heavy TX traffic, we could get a lot of TxStatus
47449378 * so add bound
47459379 */
4746
- more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound);
9380
+#ifdef DHD_HP2P
9381
+ more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_HP2P_RING);
9382
+#endif /* DHD_HP2P */
9383
+ more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_REGULAR_RING);
9384
+ bus->last_process_txcpl_time = OSL_LOCALTIME_NS();
47479385
47489386 /* With heavy RX traffic, this routine potentially could spend some time
47499387 * processing RX frames without RX bound
47509388 */
4751
- more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound);
9389
+#ifdef DHD_HP2P
9390
+ more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_HP2P_RING);
9391
+#endif /* DHD_HP2P */
9392
+ more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_REGULAR_RING);
9393
+ bus->last_process_rxcpl_time = OSL_LOCALTIME_NS();
9394
+
9395
+ /* Process info ring completion messages */
9396
+#ifdef EWP_EDL
9397
+ if (!bus->dhd->dongle_edl_support)
9398
+#endif // endif
9399
+ {
9400
+ more |= dhd_prot_process_msgbuf_infocpl(bus->dhd, DHD_INFORING_BOUND);
9401
+ bus->last_process_infocpl_time = OSL_LOCALTIME_NS();
9402
+ }
9403
+#ifdef EWP_EDL
9404
+ else {
9405
+ more |= dhd_prot_process_msgbuf_edl(bus->dhd);
9406
+ bus->last_process_edl_time = OSL_LOCALTIME_NS();
9407
+ }
9408
+#endif /* EWP_EDL */
9409
+
9410
+#ifdef IDLE_TX_FLOW_MGMT
9411
+ if (bus->enable_idle_flowring_mgmt) {
9412
+ /* Look for idle flow rings */
9413
+ dhd_bus_check_idle_scan(bus);
9414
+ }
9415
+#endif /* IDLE_TX_FLOW_MGMT */
47529416
47539417 /* don't talk to the dongle if fw is about to be reloaded */
47549418 if (bus->dhd->hang_was_sent) {
....@@ -4756,6 +9420,38 @@
47569420 }
47579421 DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
47589422
9423
+#ifdef SUPPORT_LINKDOWN_RECOVERY
9424
+ if (bus->read_shm_fail) {
9425
+ /* Read interrupt state once again to confirm linkdown */
9426
+ int intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
9427
+ bus->pcie_mailbox_int, 0, 0);
9428
+ if (intstatus != (uint32)-1) {
9429
+ DHD_ERROR(("%s: read SHM failed but intstatus is valid\n", __FUNCTION__));
9430
+#ifdef DHD_FW_COREDUMP
9431
+ if (bus->dhd->memdump_enabled) {
9432
+ DHD_OS_WAKE_LOCK(bus->dhd);
9433
+ bus->dhd->memdump_type = DUMP_TYPE_READ_SHM_FAIL;
9434
+ dhd_bus_mem_dump(bus->dhd);
9435
+ DHD_OS_WAKE_UNLOCK(bus->dhd);
9436
+ }
9437
+#endif /* DHD_FW_COREDUMP */
9438
+ } else {
9439
+ DHD_ERROR(("%s: Link is Down.\n", __FUNCTION__));
9440
+#ifdef CONFIG_ARCH_MSM
9441
+ bus->no_cfg_restore = 1;
9442
+#endif /* CONFIG_ARCH_MSM */
9443
+ bus->is_linkdown = 1;
9444
+ }
9445
+
9446
+ dhd_prot_debug_info_print(bus->dhd);
9447
+ bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN_EP_DETECT;
9448
+ dhd_os_send_hang_message(bus->dhd);
9449
+ more = FALSE;
9450
+ }
9451
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
9452
+#if defined(DHD_H2D_LOG_TIME_SYNC)
9453
+ dhdpci_bus_rte_log_time_sync_poll(bus);
9454
+#endif /* DHD_H2D_LOG_TIME_SYNC */
47599455 return more;
47609456 }
47619457
....@@ -4795,19 +9491,79 @@
47959491 return TRUE;
47969492 }
47979493
9494
+static void
9495
+dhdpcie_update_bus_api_revisions(uint32 firmware_api_version, uint32 host_api_version)
9496
+{
9497
+ snprintf(bus_api_revision, BUS_API_REV_STR_LEN, "\nBus API revisions:(FW rev%d)(DHD rev%d)",
9498
+ firmware_api_version, host_api_version);
9499
+ return;
9500
+}
9501
+
47989502 static bool
47999503 dhdpcie_check_firmware_compatible(uint32 firmware_api_version, uint32 host_api_version)
48009504 {
9505
+ bool retcode = FALSE;
9506
+
48019507 DHD_INFO(("firmware api revision %d, host api revision %d\n",
48029508 firmware_api_version, host_api_version));
4803
- if (firmware_api_version <= host_api_version)
4804
- return TRUE;
4805
- if ((firmware_api_version == 6) && (host_api_version == 5))
4806
- return TRUE;
4807
- if ((firmware_api_version == 5) && (host_api_version == 6))
4808
- return TRUE;
4809
- return FALSE;
9509
+
9510
+ switch (firmware_api_version) {
9511
+ case PCIE_SHARED_VERSION_7:
9512
+ case PCIE_SHARED_VERSION_6:
9513
+ case PCIE_SHARED_VERSION_5:
9514
+ retcode = TRUE;
9515
+ break;
9516
+ default:
9517
+ if (firmware_api_version <= host_api_version)
9518
+ retcode = TRUE;
9519
+ }
9520
+ return retcode;
48109521 }
9522
+
9523
+static int
9524
+dhdpcie_readshared_console(dhd_bus_t *bus)
9525
+{
9526
+ uint32 addr = 0;
9527
+ uint32 shaddr = 0;
9528
+ int rv;
9529
+ pciedev_shared_t *sh = bus->pcie_sh;
9530
+ dhd_timeout_t tmo;
9531
+
9532
+ shaddr = bus->dongle_ram_base + bus->ramsize - 4;
9533
+ /* start a timer for 5 seconds */
9534
+ dhd_timeout_start(&tmo, MAX_READ_TIMEOUT);
9535
+
9536
+ while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) {
9537
+ /* Read last word in memory to determine address of pciedev_shared structure */
9538
+ addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
9539
+ }
9540
+
9541
+ if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
9542
+ (addr > shaddr)) {
9543
+ DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
9544
+ __FUNCTION__, addr));
9545
+ DHD_ERROR(("Waited %u usec, dongle is not ready\n", tmo.elapsed));
9546
+ return BCME_ERROR;
9547
+ } else {
9548
+ bus->shared_addr = (ulong)addr;
9549
+ DHD_ERROR(("%s:PCIe shared addr (0x%08x) read took %u usec "
9550
+ "before dongle is ready\n", __FUNCTION__, addr, tmo.elapsed));
9551
+ }
9552
+
9553
+ /* Read hndrte_shared structure */
9554
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh,
9555
+ sizeof(pciedev_shared_t))) < 0) {
9556
+ DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv));
9557
+ return rv;
9558
+ }
9559
+
9560
+ /* Endianness */
9561
+ sh->console_addr = ltoh32(sh->console_addr);
9562
+ /* load bus console address */
9563
+ bus->console_addr = sh->console_addr;
9564
+
9565
+ return BCME_OK;
9566
+} /* dhdpcie_readshared_console */
48119567
48129568 static int
48139569 dhdpcie_readshared(dhd_bus_t *bus)
....@@ -4817,24 +9573,30 @@
48179573 uint32 shaddr = 0;
48189574 pciedev_shared_t *sh = bus->pcie_sh;
48199575 dhd_timeout_t tmo;
4820
-#ifdef CUSTOMER_HW_31_2
4821
- int count = 0;
4822
-#endif
9576
+ bool idma_en = FALSE;
9577
+
9578
+ if (MULTIBP_ENAB(bus->sih)) {
9579
+ dhd_bus_pcie_pwr_req(bus);
9580
+ }
48239581
48249582 shaddr = bus->dongle_ram_base + bus->ramsize - 4;
48259583 /* start a timer for 5 seconds */
4826
-#ifdef CUSTOMER_HW_31_2
4827
- while (((addr == 0) || (addr == bus->nvram_csm)) && (count++ < 500)) {
4828
-#else
48299584 dhd_timeout_start(&tmo, MAX_READ_TIMEOUT);
48309585
48319586 while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) {
4832
-#endif
48339587 /* Read last word in memory to determine address of pciedev_shared structure */
48349588 addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
4835
-#ifdef CUSTOMER_HW_31_2
4836
- OSL_DELAY(10000);
4837
-#endif
9589
+ }
9590
+
9591
+ if (addr == (uint32)-1) {
9592
+ DHD_ERROR(("%s: PCIe link might be down\n", __FUNCTION__));
9593
+#ifdef SUPPORT_LINKDOWN_RECOVERY
9594
+#ifdef CONFIG_ARCH_MSM
9595
+ bus->no_cfg_restore = 1;
9596
+#endif /* CONFIG_ARCH_MSM */
9597
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
9598
+ bus->is_linkdown = 1;
9599
+ return BCME_ERROR;
48389600 }
48399601
48409602 if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
....@@ -4842,6 +9604,17 @@
48429604 DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
48439605 __FUNCTION__, addr));
48449606 DHD_ERROR(("Waited %u usec, dongle is not ready\n", tmo.elapsed));
9607
+#ifdef DEBUG_DNGL_INIT_FAIL
9608
+ if (addr != (uint32)-1) { /* skip further PCIE reads if read this addr */
9609
+#ifdef CUSTOMER_HW4_DEBUG
9610
+ bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
9611
+#endif /* CUSTOMER_HW4_DEBUG */
9612
+ if (bus->dhd->memdump_enabled) {
9613
+ bus->dhd->memdump_type = DUMP_TYPE_DONGLE_INIT_FAILURE;
9614
+ dhdpcie_mem_dump(bus);
9615
+ }
9616
+ }
9617
+#endif /* DEBUG_DNGL_INIT_FAIL */
48459618 return BCME_ERROR;
48469619 } else {
48479620 bus->shared_addr = (ulong)addr;
....@@ -4866,60 +9639,108 @@
48669639 sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
48679640 sh->dma_rxoffset = ltoh32(sh->dma_rxoffset);
48689641 sh->rings_info_ptr = ltoh32(sh->rings_info_ptr);
9642
+ sh->flags2 = ltoh32(sh->flags2);
48699643
4870
-#ifdef DHD_DEBUG
48719644 /* load bus console address */
48729645 bus->console_addr = sh->console_addr;
4873
-#endif
48749646
48759647 /* Read the dma rx offset */
48769648 bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset;
48779649 dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset);
48789650
4879
- DHD_ERROR(("DMA RX offset from shared Area %d\n", bus->dma_rxoffset));
9651
+ DHD_INFO(("DMA RX offset from shared Area %d\n", bus->dma_rxoffset));
48809652
4881
- if (!(dhdpcie_check_firmware_compatible(sh->flags & PCIE_SHARED_VERSION_MASK,
4882
- PCIE_SHARED_VERSION)))
9653
+ bus->api.fw_rev = sh->flags & PCIE_SHARED_VERSION_MASK;
9654
+ if (!(dhdpcie_check_firmware_compatible(bus->api.fw_rev, PCIE_SHARED_VERSION)))
48839655 {
48849656 DHD_ERROR(("%s: pcie_shared version %d in dhd "
48859657 "is older than pciedev_shared version %d in dongle\n",
48869658 __FUNCTION__, PCIE_SHARED_VERSION,
4887
- sh->flags & PCIE_SHARED_VERSION_MASK));
9659
+ bus->api.fw_rev));
48889660 return BCME_ERROR;
48899661 }
9662
+ dhdpcie_update_bus_api_revisions(bus->api.fw_rev, PCIE_SHARED_VERSION);
48909663
48919664 bus->rw_index_sz = (sh->flags & PCIE_SHARED_2BYTE_INDICES) ?
48929665 sizeof(uint16) : sizeof(uint32);
4893
- DHD_ERROR(("%s: Dongle advertizes %d size indices\n",
9666
+ DHD_INFO(("%s: Dongle advertizes %d size indices\n",
48949667 __FUNCTION__, bus->rw_index_sz));
9668
+
9669
+#ifdef IDLE_TX_FLOW_MGMT
9670
+ if (sh->flags & PCIE_SHARED_IDLE_FLOW_RING) {
9671
+ DHD_ERROR(("%s: FW Supports IdleFlow ring managment!\n",
9672
+ __FUNCTION__));
9673
+ bus->enable_idle_flowring_mgmt = TRUE;
9674
+ }
9675
+#endif /* IDLE_TX_FLOW_MGMT */
9676
+
9677
+ if (IDMA_CAPABLE(bus)) {
9678
+ if (bus->sih->buscorerev == 23) {
9679
+ } else {
9680
+ idma_en = TRUE;
9681
+ }
9682
+ }
9683
+
9684
+ /* TODO: This need to be selected based on IPC instead of compile time */
9685
+ bus->dhd->hwa_enable = TRUE;
9686
+
9687
+ if (idma_en) {
9688
+ bus->dhd->idma_enable = (sh->flags & PCIE_SHARED_IDMA) ? TRUE : FALSE;
9689
+ bus->dhd->ifrm_enable = (sh->flags & PCIE_SHARED_IFRM) ? TRUE : FALSE;
9690
+ }
9691
+
9692
+ bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK;
9693
+
9694
+ bus->dhd->dar_enable = (sh->flags & PCIE_SHARED_DAR) ? TRUE : FALSE;
48959695
48969696 /* Does the FW support DMA'ing r/w indices */
48979697 if (sh->flags & PCIE_SHARED_DMA_INDEX) {
9698
+ if (!bus->dhd->dma_ring_upd_overwrite) {
9699
+ {
9700
+ if (!IFRM_ENAB(bus->dhd)) {
9701
+ bus->dhd->dma_h2d_ring_upd_support = TRUE;
9702
+ }
9703
+ bus->dhd->dma_d2h_ring_upd_support = TRUE;
9704
+ }
9705
+ }
48989706
9707
+ if (bus->dhd->dma_d2h_ring_upd_support)
9708
+ bus->dhd->d2h_sync_mode = 0;
48999709
4900
- DHD_ERROR(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n",
9710
+ DHD_INFO(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n",
49019711 __FUNCTION__,
4902
- (DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support) ? 1 : 0),
4903
- (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ? 1 : 0)));
4904
-
4905
- } else if (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ||
4906
- DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support)) {
4907
-
4908
-#ifdef BCM_INDX_DMA
4909
- DHD_ERROR(("%s: Incompatible FW. FW does not support DMAing indices\n",
9712
+ (bus->dhd->dma_h2d_ring_upd_support ? 1 : 0),
9713
+ (bus->dhd->dma_d2h_ring_upd_support ? 1 : 0)));
9714
+ } else if (!(sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK)) {
9715
+ DHD_ERROR(("%s FW has to support either dma indices or d2h sync\n",
49109716 __FUNCTION__));
4911
- return BCME_ERROR;
4912
-#endif
4913
- DHD_ERROR(("%s: Host supports DMAing indices but FW does not\n",
4914
- __FUNCTION__));
4915
- bus->dhd->dma_d2h_ring_upd_support = FALSE;
9717
+ return BCME_UNSUPPORTED;
9718
+ } else {
49169719 bus->dhd->dma_h2d_ring_upd_support = FALSE;
9720
+ bus->dhd->dma_d2h_ring_upd_support = FALSE;
49179721 }
49189722
9723
+ /* Does the firmware support fast delete ring? */
9724
+ if (sh->flags2 & PCIE_SHARED2_FAST_DELETE_RING) {
9725
+ DHD_INFO(("%s: Firmware supports fast delete ring\n",
9726
+ __FUNCTION__));
9727
+ bus->dhd->fast_delete_ring_support = TRUE;
9728
+ } else {
9729
+ DHD_INFO(("%s: Firmware does not support fast delete ring\n",
9730
+ __FUNCTION__));
9731
+ bus->dhd->fast_delete_ring_support = FALSE;
9732
+ }
49199733
49209734 /* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */
49219735 {
49229736 ring_info_t ring_info;
9737
+
9738
+ /* boundary check */
9739
+ if (sh->rings_info_ptr > shaddr) {
9740
+ DHD_ERROR(("%s: rings_info_ptr is invalid 0x%x, skip reading ring info\n",
9741
+ __FUNCTION__, sh->rings_info_ptr));
9742
+ return BCME_ERROR;
9743
+ }
49239744
49249745 if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr,
49259746 (uint8 *)&ring_info, sizeof(ring_info_t))) < 0)
....@@ -4928,48 +9749,89 @@
49289749 bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr);
49299750 bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr);
49309751
4931
-
4932
- bus->max_sub_queues = ltoh16(ring_info.max_sub_queues);
9752
+ if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
9753
+ bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
9754
+ bus->max_submission_rings = ltoh16(ring_info.max_submission_queues);
9755
+ bus->max_completion_rings = ltoh16(ring_info.max_completion_rings);
9756
+ bus->max_cmn_rings = bus->max_submission_rings - bus->max_tx_flowrings;
9757
+ bus->api.handle_mb_data = dhdpcie_read_handle_mb_data;
9758
+ bus->use_mailbox = sh->flags & PCIE_SHARED_USE_MAILBOX;
9759
+ }
9760
+ else {
9761
+ bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
9762
+ bus->max_submission_rings = bus->max_tx_flowrings;
9763
+ bus->max_completion_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
9764
+ bus->max_cmn_rings = BCMPCIE_H2D_COMMON_MSGRINGS;
9765
+ bus->api.handle_mb_data = dhdpcie_handle_mb_data;
9766
+ bus->use_mailbox = TRUE;
9767
+ }
9768
+ if (bus->max_completion_rings == 0) {
9769
+ DHD_ERROR(("dongle completion rings are invalid %d\n",
9770
+ bus->max_completion_rings));
9771
+ return BCME_ERROR;
9772
+ }
9773
+ if (bus->max_submission_rings == 0) {
9774
+ DHD_ERROR(("dongle submission rings are invalid %d\n",
9775
+ bus->max_submission_rings));
9776
+ return BCME_ERROR;
9777
+ }
9778
+ if (bus->max_tx_flowrings == 0) {
9779
+ DHD_ERROR(("dongle txflow rings are invalid %d\n", bus->max_tx_flowrings));
9780
+ return BCME_ERROR;
9781
+ }
49339782
49349783 /* If both FW and Host support DMA'ing indices, allocate memory and notify FW
49359784 * The max_sub_queues is read from FW initialized ring_info
49369785 */
4937
- if (DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support)) {
9786
+ if (bus->dhd->dma_h2d_ring_upd_support || IDMA_ENAB(bus->dhd)) {
49389787 dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
4939
- H2D_DMA_INDX_WR_BUF, bus->max_sub_queues);
9788
+ H2D_DMA_INDX_WR_BUF, bus->max_submission_rings);
49409789 dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
4941
- D2H_DMA_INDX_RD_BUF, BCMPCIE_D2H_COMMON_MSGRINGS);
9790
+ D2H_DMA_INDX_RD_BUF, bus->max_completion_rings);
49429791
49439792 if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
49449793 DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices"
4945
- "Host will use w/r indices in TCM\n",
4946
- __FUNCTION__));
9794
+ "Host will use w/r indices in TCM\n",
9795
+ __FUNCTION__));
49479796 bus->dhd->dma_h2d_ring_upd_support = FALSE;
9797
+ bus->dhd->idma_enable = FALSE;
49489798 }
49499799 }
49509800
4951
- if (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support)) {
9801
+ if (bus->dhd->dma_d2h_ring_upd_support) {
49529802 dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
4953
- D2H_DMA_INDX_WR_BUF, BCMPCIE_D2H_COMMON_MSGRINGS);
9803
+ D2H_DMA_INDX_WR_BUF, bus->max_completion_rings);
49549804 dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
4955
- H2D_DMA_INDX_RD_BUF, bus->max_sub_queues);
9805
+ H2D_DMA_INDX_RD_BUF, bus->max_submission_rings);
49569806
49579807 if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
49589808 DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices"
4959
- "Host will use w/r indices in TCM\n",
4960
- __FUNCTION__));
9809
+ "Host will use w/r indices in TCM\n",
9810
+ __FUNCTION__));
49619811 bus->dhd->dma_d2h_ring_upd_support = FALSE;
9812
+ }
9813
+ }
9814
+
9815
+ if (IFRM_ENAB(bus->dhd)) {
9816
+ dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
9817
+ H2D_IFRM_INDX_WR_BUF, bus->max_tx_flowrings);
9818
+
9819
+ if (dma_indx_wr_buf != BCME_OK) {
9820
+ DHD_ERROR(("%s: Failed to alloc memory for Implicit DMA\n",
9821
+ __FUNCTION__));
9822
+ bus->dhd->ifrm_enable = FALSE;
49629823 }
49639824 }
49649825
49659826 /* read ringmem and ringstate ptrs from shared area and store in host variables */
49669827 dhd_fillup_ring_sharedptr_info(bus, &ring_info);
4967
-
4968
- bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t));
9828
+ if (dhd_msg_level & DHD_INFO_VAL) {
9829
+ bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t));
9830
+ }
49699831 DHD_INFO(("ring_info\n"));
49709832
49719833 DHD_ERROR(("%s: max H2D queues %d\n",
4972
- __FUNCTION__, ltoh16(ring_info.max_sub_queues)));
9834
+ __FUNCTION__, ltoh16(ring_info.max_tx_flowrings)));
49739835
49749836 DHD_INFO(("mail box address\n"));
49759837 DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n",
....@@ -4978,10 +9840,81 @@
49789840 __FUNCTION__, bus->d2h_mb_data_ptr_addr));
49799841 }
49809842
4981
- bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK;
49829843 DHD_INFO(("%s: d2h_sync_mode 0x%08x\n",
49839844 __FUNCTION__, bus->dhd->d2h_sync_mode));
49849845
9846
+ bus->dhd->d2h_hostrdy_supported =
9847
+ ((sh->flags & PCIE_SHARED_HOSTRDY_SUPPORT) == PCIE_SHARED_HOSTRDY_SUPPORT);
9848
+
9849
+ bus->dhd->ext_trap_data_supported =
9850
+ ((sh->flags2 & PCIE_SHARED2_EXTENDED_TRAP_DATA) == PCIE_SHARED2_EXTENDED_TRAP_DATA);
9851
+
9852
+ if ((sh->flags2 & PCIE_SHARED2_TXSTATUS_METADATA) == 0)
9853
+ bus->dhd->pcie_txs_metadata_enable = 0;
9854
+
9855
+ bus->dhd->hscb_enable =
9856
+ (sh->flags2 & PCIE_SHARED2_HSCB) == PCIE_SHARED2_HSCB;
9857
+
9858
+#ifdef EWP_EDL
9859
+ if (host_edl_support) {
9860
+ bus->dhd->dongle_edl_support = (sh->flags2 & PCIE_SHARED2_EDL_RING) ? TRUE : FALSE;
9861
+ DHD_ERROR(("Dongle EDL support: %u\n", bus->dhd->dongle_edl_support));
9862
+ }
9863
+#endif /* EWP_EDL */
9864
+
9865
+ bus->dhd->debug_buf_dest_support =
9866
+ (sh->flags2 & PCIE_SHARED2_DEBUG_BUF_DEST) ? TRUE : FALSE;
9867
+ DHD_ERROR(("FW supports debug buf dest ? %s \n",
9868
+ bus->dhd->debug_buf_dest_support ? "Y" : "N"));
9869
+
9870
+#ifdef DHD_HP2P
9871
+ if (bus->dhd->hp2p_enable) {
9872
+ bus->dhd->hp2p_ts_capable =
9873
+ (sh->flags2 & PCIE_SHARED2_PKT_TIMESTAMP) == PCIE_SHARED2_PKT_TIMESTAMP;
9874
+ bus->dhd->hp2p_capable =
9875
+ (sh->flags2 & PCIE_SHARED2_HP2P) == PCIE_SHARED2_HP2P;
9876
+ bus->dhd->hp2p_capable &= bus->dhd->hp2p_ts_capable;
9877
+
9878
+ DHD_ERROR(("FW supports HP2P ? %s \n",
9879
+ bus->dhd->hp2p_capable ? "Y" : "N"));
9880
+
9881
+ if (bus->dhd->hp2p_capable) {
9882
+ bus->dhd->pkt_thresh = HP2P_PKT_THRESH;
9883
+ bus->dhd->pkt_expiry = HP2P_PKT_EXPIRY;
9884
+ bus->dhd->time_thresh = HP2P_TIME_THRESH;
9885
+ for (addr = 0; addr < MAX_HP2P_FLOWS; addr++) {
9886
+ hp2p_info_t *hp2p_info = &bus->dhd->hp2p_info[addr];
9887
+
9888
+ hp2p_info->hrtimer_init = FALSE;
9889
+ hp2p_info->timer.function = &dhd_hp2p_write;
9890
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21))
9891
+ tasklet_hrtimer_init(&hp2p_info->timer,
9892
+ dhd_hp2p_write, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
9893
+#else
9894
+ hrtimer_init(&hp2p_info->timer, CLOCK_MONOTONIC,
9895
+ HRTIMER_MODE_REL_SOFT);
9896
+#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21 */
9897
+ }
9898
+ }
9899
+ }
9900
+#endif /* DHD_HP2P */
9901
+
9902
+#ifdef DHD_DB0TS
9903
+ bus->dhd->db0ts_capable =
9904
+ (sh->flags & PCIE_SHARED_TIMESTAMP_DB0) == PCIE_SHARED_TIMESTAMP_DB0;
9905
+#endif /* DHD_DB0TS */
9906
+
9907
+ if (MULTIBP_ENAB(bus->sih)) {
9908
+ dhd_bus_pcie_pwr_req_clear(bus);
9909
+
9910
+ /*
9911
+ * WAR to fix ARM cold boot;
9912
+ * De-assert WL domain in DAR
9913
+ */
9914
+ if (bus->sih->buscorerev >= 68) {
9915
+ dhd_bus_pcie_pwr_req_wl_domain(bus, FALSE);
9916
+ }
9917
+ }
49859918 return BCME_OK;
49869919 } /* dhdpcie_readshared */
49879920
....@@ -4993,6 +9926,7 @@
49939926 uint16 j = 0;
49949927 uint32 tcm_memloc;
49959928 uint32 d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr;
9929
+ uint16 max_tx_flowrings = bus->max_tx_flowrings;
49969930
49979931 /* Ring mem ptr info */
49989932 /* Alloated in the order
....@@ -5051,8 +9985,13 @@
50519985 }
50529986
50539987 /* Store txflow ring write/read pointers */
5054
- for (j = 0; j < (bus->max_sub_queues - BCMPCIE_H2D_COMMON_MSGRINGS);
5055
- i++, j++)
9988
+ if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
9989
+ max_tx_flowrings -= BCMPCIE_H2D_COMMON_MSGRINGS;
9990
+ } else {
9991
+ /* Account for Debug info h2d ring located after the last tx flow ring */
9992
+ max_tx_flowrings = max_tx_flowrings + 1;
9993
+ }
9994
+ for (j = 0; j < max_tx_flowrings; i++, j++)
50569995 {
50579996 bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
50589997 bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
....@@ -5065,6 +10004,13 @@
506510004 bus->ring_sh[i].ring_state_w,
506610005 bus->ring_sh[i].ring_state_r));
506710006 }
10007
+ /* store wr/rd pointers for debug info completion ring */
10008
+ bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
10009
+ bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
10010
+ d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
10011
+ d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
10012
+ DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i,
10013
+ bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
506810014 }
506910015 } /* dhd_fillup_ring_sharedptr_info */
507010016
....@@ -5083,32 +10029,84 @@
508310029 if (!bus->dhd)
508410030 return 0;
508510031
10032
+ if (PCIE_RELOAD_WAR_ENAB(bus->sih->buscorerev)) {
10033
+ dhd_bus_pcie_pwr_req_clear_reload_war(bus);
10034
+ }
10035
+
10036
+ if (MULTIBP_ENAB(bus->sih)) {
10037
+ dhd_bus_pcie_pwr_req(bus);
10038
+ }
10039
+
10040
+ /* Configure AER registers to log the TLP header */
10041
+ dhd_bus_aer_config(bus);
10042
+
508610043 /* Make sure we're talking to the core. */
508710044 bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
508810045 ASSERT(bus->reg != NULL);
508910046
509010047 /* before opening up bus for data transfer, check if shared are is intact */
10048
+
10049
+ /* Do minimum console buffer read */
10050
+ /* This helps in getting trap messages if any */
10051
+ if ((ret = dhdpcie_readshared_console(bus)) >= 0) {
10052
+ if ((ret = dhdpcie_bus_readconsole(bus)) < 0) {
10053
+ DHD_ERROR(("%s: Console buffer read failed\n",
10054
+ __FUNCTION__));
10055
+ }
10056
+ }
10057
+
509110058 ret = dhdpcie_readshared(bus);
509210059 if (ret < 0) {
509310060 DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
5094
- return ret;
10061
+ goto exit;
509510062 }
509610063
509710064 /* Make sure we're talking to the core. */
509810065 bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
509910066 ASSERT(bus->reg != NULL);
510010067
10068
+ dhd_init_bus_lock(bus);
10069
+
10070
+ dhd_init_backplane_access_lock(bus);
10071
+
510110072 /* Set bus state according to enable result */
510210073 dhdp->busstate = DHD_BUS_DATA;
10074
+ bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
10075
+ dhdp->dhd_bus_busy_state = 0;
10076
+
10077
+ /* D11 status via PCIe completion header */
10078
+ if ((ret = dhdpcie_init_d11status(bus)) < 0) {
10079
+ goto exit;
10080
+ }
510310081
510410082 if (!dhd_download_fw_on_driverload)
510510083 dhd_dpc_enable(bus->dhd);
5106
-
510710084 /* Enable the interrupt after device is up */
510810085 dhdpcie_bus_intr_enable(bus);
510910086
5110
- /* bcmsdh_intr_unmask(bus->sdh); */
10087
+ bus->intr_enabled = TRUE;
511110088
10089
+ /* bcmsdh_intr_unmask(bus->sdh); */
10090
+#ifdef DHD_PCIE_RUNTIMEPM
10091
+ bus->idlecount = 0;
10092
+ bus->idletime = (int32)MAX_IDLE_COUNT;
10093
+ init_waitqueue_head(&bus->rpm_queue);
10094
+ mutex_init(&bus->pm_lock);
10095
+#else
10096
+ bus->idletime = 0;
10097
+#endif /* DHD_PCIE_RUNTIMEPM */
10098
+
10099
+ /* Make use_d0_inform TRUE for Rev 5 for backward compatibility */
10100
+ if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
10101
+ bus->use_d0_inform = TRUE;
10102
+ } else {
10103
+ bus->use_d0_inform = FALSE;
10104
+ }
10105
+
10106
+exit:
10107
+ if (MULTIBP_ENAB(bus->sih)) {
10108
+ dhd_bus_pcie_pwr_req_clear(bus);
10109
+ }
511210110 return ret;
511310111 }
511410112
....@@ -5117,15 +10115,22 @@
511710115 {
511810116 uint32 addr = 0;
511910117 uint32 val = 0;
10118
+
512010119 addr = bus->dongle_ram_base + bus->ramsize - 4;
10120
+#ifdef DHD_PCIE_RUNTIMEPM
10121
+ dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0));
10122
+#endif /* DHD_PCIE_RUNTIMEPM */
512110123 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val));
512210124 }
5123
-
512410125
512510126 bool
512610127 dhdpcie_chipmatch(uint16 vendor, uint16 device)
512710128 {
5128
- if (vendor != PCI_VENDOR_ID_BROADCOM) {
10129
+
10130
+ if (vendor == PCI_VENDOR_ID_BROADCOM || vendor == PCI_VENDOR_ID_CYPRESS) {
10131
+ DHD_ERROR(("%s: Supporting vendor %x device %x\n", __FUNCTION__,
10132
+ vendor, device));
10133
+ } else {
512910134 DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__,
513010135 vendor, device));
513110136 return (-ENODEV);
....@@ -5133,65 +10138,372 @@
513310138
513410139 if ((device == BCM4350_D11AC_ID) || (device == BCM4350_D11AC2G_ID) ||
513510140 (device == BCM4350_D11AC5G_ID) || (device == BCM4350_CHIP_ID) ||
5136
- (device == BCM43569_CHIP_ID))
10141
+ (device == BCM43569_CHIP_ID)) {
513710142 return 0;
10143
+ }
513810144
513910145 if ((device == BCM4354_D11AC_ID) || (device == BCM4354_D11AC2G_ID) ||
5140
- (device == BCM4354_D11AC5G_ID) || (device == BCM4354_CHIP_ID))
10146
+ (device == BCM4354_D11AC5G_ID) || (device == BCM4354_CHIP_ID)) {
514110147 return 0;
10148
+ }
514210149
514310150 if ((device == BCM4356_D11AC_ID) || (device == BCM4356_D11AC2G_ID) ||
5144
- (device == BCM4356_D11AC5G_ID) || (device == BCM4356_CHIP_ID))
10151
+ (device == BCM4356_D11AC5G_ID) || (device == BCM4356_CHIP_ID)) {
514510152 return 0;
10153
+ }
10154
+
10155
+ if ((device == BCM4371_D11AC_ID) || (device == BCM4371_D11AC2G_ID) ||
10156
+ (device == BCM4371_D11AC5G_ID) || (device == BCM4371_CHIP_ID)) {
10157
+ return 0;
10158
+ }
514610159
514710160 if ((device == BCM4345_D11AC_ID) || (device == BCM4345_D11AC2G_ID) ||
5148
- (device == BCM4345_D11AC5G_ID) || BCM4345_CHIP(device))
10161
+ (device == BCM4345_D11AC5G_ID) || BCM4345_CHIP(device)) {
514910162 return 0;
10163
+ }
10164
+
10165
+ if ((device == BCM43452_D11AC_ID) || (device == BCM43452_D11AC2G_ID) ||
10166
+ (device == BCM43452_D11AC5G_ID)) {
10167
+ return 0;
10168
+ }
515010169
515110170 if ((device == BCM4335_D11AC_ID) || (device == BCM4335_D11AC2G_ID) ||
5152
- (device == BCM4335_D11AC5G_ID) || (device == BCM4335_CHIP_ID))
10171
+ (device == BCM4335_D11AC5G_ID) || (device == BCM4335_CHIP_ID)) {
515310172 return 0;
10173
+ }
515410174
515510175 if ((device == BCM43602_D11AC_ID) || (device == BCM43602_D11AC2G_ID) ||
5156
- (device == BCM43602_D11AC5G_ID) || (device == BCM43602_CHIP_ID))
10176
+ (device == BCM43602_D11AC5G_ID) || (device == BCM43602_CHIP_ID)) {
515710177 return 0;
10178
+ }
515810179
515910180 if ((device == BCM43569_D11AC_ID) || (device == BCM43569_D11AC2G_ID) ||
5160
- (device == BCM43569_D11AC5G_ID) || (device == BCM43569_CHIP_ID))
10181
+ (device == BCM43569_D11AC5G_ID) || (device == BCM43569_CHIP_ID)) {
516110182 return 0;
10183
+ }
516210184
516310185 if ((device == BCM4358_D11AC_ID) || (device == BCM4358_D11AC2G_ID) ||
5164
- (device == BCM4358_D11AC5G_ID))
10186
+ (device == BCM4358_D11AC5G_ID)) {
516510187 return 0;
10188
+ }
516610189
516710190 if ((device == BCM4349_D11AC_ID) || (device == BCM4349_D11AC2G_ID) ||
5168
- (device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID))
10191
+ (device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID)) {
516910192 return 0;
10193
+ }
517010194
517110195 if ((device == BCM4355_D11AC_ID) || (device == BCM4355_D11AC2G_ID) ||
5172
- (device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID))
10196
+ (device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID)) {
517310197 return 0;
10198
+ }
517410199
517510200 if ((device == BCM4359_D11AC_ID) || (device == BCM4359_D11AC2G_ID) ||
5176
- (device == BCM4359_D11AC5G_ID))
10201
+ (device == BCM4359_D11AC5G_ID)) {
517710202 return 0;
10203
+ }
517810204
517910205 if ((device == BCM43596_D11AC_ID) || (device == BCM43596_D11AC2G_ID) ||
5180
- (device == BCM43596_D11AC5G_ID))
10206
+ (device == BCM43596_D11AC5G_ID)) {
518110207 return 0;
10208
+ }
518210209
10210
+ if ((device == BCM43597_D11AC_ID) || (device == BCM43597_D11AC2G_ID) ||
10211
+ (device == BCM43597_D11AC5G_ID)) {
10212
+ return 0;
10213
+ }
10214
+
10215
+ if ((device == BCM4364_D11AC_ID) || (device == BCM4364_D11AC2G_ID) ||
10216
+ (device == BCM4364_D11AC5G_ID) || (device == BCM4364_CHIP_ID)) {
10217
+ return 0;
10218
+ }
10219
+
10220
+ if ((device == BCM4361_D11AC_ID) || (device == BCM4361_D11AC2G_ID) ||
10221
+ (device == BCM4361_D11AC5G_ID) || (device == BCM4361_CHIP_ID)) {
10222
+ return 0;
10223
+ }
10224
+ if ((device == BCM4362_D11AX_ID) || (device == BCM4362_D11AX2G_ID) ||
10225
+ (device == BCM4362_D11AX5G_ID) || (device == BCM4362_CHIP_ID)) {
10226
+ return 0;
10227
+ }
10228
+ if ((device == BCM43751_D11AX_ID) || (device == BCM43751_D11AX2G_ID) ||
10229
+ (device == BCM43751_D11AX5G_ID) || (device == BCM43751_CHIP_ID)) {
10230
+ return 0;
10231
+ }
10232
+ if ((device == BCM4347_D11AC_ID) || (device == BCM4347_D11AC2G_ID) ||
10233
+ (device == BCM4347_D11AC5G_ID) || (device == BCM4347_CHIP_ID)) {
10234
+ return 0;
10235
+ }
518310236
518410237 if ((device == BCM4365_D11AC_ID) || (device == BCM4365_D11AC2G_ID) ||
5185
- (device == BCM4365_D11AC5G_ID) || (device == BCM4365_CHIP_ID))
10238
+ (device == BCM4365_D11AC5G_ID) || (device == BCM4365_CHIP_ID)) {
518610239 return 0;
10240
+ }
518710241
518810242 if ((device == BCM4366_D11AC_ID) || (device == BCM4366_D11AC2G_ID) ||
5189
- (device == BCM4366_D11AC5G_ID) || (device == BCM4366_CHIP_ID))
10243
+ (device == BCM4366_D11AC5G_ID) || (device == BCM4366_CHIP_ID) ||
10244
+ (device == BCM43664_CHIP_ID) || (device == BCM43666_CHIP_ID)) {
519010245 return 0;
10246
+ }
519110247
10248
+ if ((device == BCM4369_D11AX_ID) || (device == BCM4369_D11AX2G_ID) ||
10249
+ (device == BCM4369_D11AX5G_ID) || (device == BCM4369_CHIP_ID)) {
10250
+ return 0;
10251
+ }
10252
+
10253
+ if ((device == BCM4373_D11AC_ID) || (device == BCM4373_D11AC2G_ID) ||
10254
+ (device == BCM4373_D11AC5G_ID) || (device == BCM4373_CHIP_ID)) {
10255
+ return 0;
10256
+ }
10257
+
10258
+ if ((device == BCM4375_D11AX_ID) || (device == BCM4375_D11AX2G_ID) ||
10259
+ (device == BCM4375_D11AX5G_ID) || (device == BCM4375_CHIP_ID)) {
10260
+ return 0;
10261
+ }
10262
+
10263
+#ifdef CHIPS_CUSTOMER_HW6
10264
+ if ((device == BCM4376_D11AC_ID) || (device == BCM4376_D11AC2G_ID) ||
10265
+ (device == BCM4376_D11AC5G_ID) || (device == BCM4376_CHIP_ID)) {
10266
+ return 0;
10267
+ }
10268
+ if ((device == BCM4377_M_D11AX_ID) || (device == BCM4377_D11AX_ID) ||
10269
+ (device == BCM4377_D11AX2G_ID) || (device == BCM4377_D11AX5G_ID) ||
10270
+ (device == BCM4377_CHIP_ID)) {
10271
+ return 0;
10272
+ }
10273
+ if ((device == BCM4378_D11AC_ID) || (device == BCM4378_D11AC2G_ID) ||
10274
+ (device == BCM4378_D11AC5G_ID) || (device == BCM4378_CHIP_ID)) {
10275
+ return 0;
10276
+ }
10277
+#endif /* CHIPS_CUSTOMER_HW6 */
10278
+#ifdef CHIPS_CUSTOMER_HW6
10279
+ if ((device == BCM4368_D11AC_ID) || (device == BCM4368_D11AC2G_ID) ||
10280
+ (device == BCM4368_D11AC5G_ID) || (device == BCM4368_CHIP_ID)) {
10281
+ return 0;
10282
+ }
10283
+ if ((device == BCM4367_D11AC_ID) || (device == BCM4367_D11AC2G_ID) ||
10284
+ (device == BCM4367_D11AC5G_ID) || (device == BCM4367_CHIP_ID)) {
10285
+ return 0;
10286
+ }
10287
+#endif /* CHIPS_CUSTOMER_HW6 */
10288
+
10289
+ /* CYW55560 */
10290
+ if ((device == CYW55560_WLAN_ID) || (device == CYW89570_WLAN_ID)) {
10291
+ return 0;
10292
+ }
519210293 DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device));
519310294 return (-ENODEV);
519410295 } /* dhdpcie_chipmatch */
10296
+
10297
+/*
10298
+ * Name: dhdpcie_sromotp_customvar
10299
+ * Description:
10300
+ * read otp/sprom and parse & store customvar.
10301
+ * A shadow of OTP/SPROM exists in ChipCommon Region
10302
+ * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
10303
+ * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
10304
+ * can also be read from ChipCommon Registers.
10305
+ */
10306
+static int
10307
+dhdpcie_sromotp_customvar(dhd_bus_t *bus, uint32 *customvar1, uint32 *customvar2)
10308
+{
10309
+ uint16 dump_offset = 0;
10310
+ uint32 dump_size = 0, otp_size = 0, sprom_size = 0;
10311
+ /* Table for 65nm OTP Size (in bits) */
10312
+ int otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
10313
+ volatile uint16 *nvm_shadow;
10314
+ uint cur_coreid;
10315
+ uint chipc_corerev;
10316
+ chipcregs_t *chipcregs;
10317
+ uint16 *otp_dump;
10318
+ uint8 *cis;
10319
+ uint8 tup, tlen;
10320
+ int i = 0;
10321
+
10322
+ /* Save the current core */
10323
+ cur_coreid = si_coreid(bus->sih);
10324
+ /* Switch to ChipC */
10325
+ chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0);
10326
+ ASSERT(chipcregs != NULL);
10327
+ chipc_corerev = si_corerev(bus->sih);
10328
+ /* Check ChipcommonCore Rev */
10329
+ if (chipc_corerev < 44) {
10330
+ DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev));
10331
+ return BCME_UNSUPPORTED;
10332
+ }
10333
+ /* Check ChipID */
10334
+ if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip) &&
10335
+ ((uint16)bus->sih->chip != BCM4355_CHIP_ID) &&
10336
+ ((uint16)bus->sih->chip != BCM4359_CHIP_ID) &&
10337
+ ((uint16)bus->sih->chip != BCM4349_CHIP_ID)) {
10338
+ DHD_ERROR(("%s: supported for chips"
10339
+ "4350/4345/4355/4364/4349/4359 only\n", __FUNCTION__));
10340
+ return BCME_UNSUPPORTED;
10341
+ }
10342
+ /* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
10343
+ if (chipcregs->sromcontrol & SRC_PRESENT) {
10344
+ /* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
10345
+ sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK)
10346
+ >> SRC_SIZE_SHIFT))) * 1024;
10347
+ DHD_TRACE(("\nSPROM Present (Size %d bits)\n", sprom_size));
10348
+ }
10349
+ if (chipcregs->sromcontrol & SRC_OTPPRESENT) {
10350
+ DHD_TRACE(("\nOTP Present"));
10351
+ if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT)
10352
+ == OTPL_WRAP_TYPE_40NM) {
10353
+ /* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
10354
+ /* Chipcommon rev51 is a variation on rev45 and does not support
10355
+ * the latest OTP configuration.
10356
+ */
10357
+ if (chipc_corerev != 51 && chipc_corerev >= 49) {
10358
+ otp_size = (((chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
10359
+ >> OTPL_ROW_SIZE_SHIFT) + 1) * 1024;
10360
+ DHD_TRACE(("(Size %d bits)\n", otp_size));
10361
+ } else {
10362
+ otp_size = (((chipcregs->capabilities & CC_CAP_OTPSIZE)
10363
+ >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024;
10364
+ DHD_TRACE(("(Size %d bits)\n", otp_size));
10365
+ }
10366
+ } else {
10367
+ /* This part is untested since newer chips have 40nm OTP */
10368
+ /* Chipcommon rev51 is a variation on rev45 and does not support
10369
+ * the latest OTP configuration.
10370
+ */
10371
+ if (chipc_corerev != 51 && chipc_corerev >= 49) {
10372
+ otp_size = otp_size_65nm[(chipcregs->otplayout &
10373
+ OTPL_ROW_SIZE_MASK) >> OTPL_ROW_SIZE_SHIFT];
10374
+ DHD_TRACE(("(Size %d bits)\n", otp_size));
10375
+ } else {
10376
+ otp_size = otp_size_65nm[(chipcregs->capabilities &
10377
+ CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT];
10378
+ DHD_TRACE(("(Size %d bits)\n", otp_size));
10379
+ DHD_TRACE(("%s: 65nm/130nm OTP Size not tested. \n",
10380
+ __FUNCTION__));
10381
+ }
10382
+ }
10383
+ }
10384
+ /* Chipcommon rev51 is a variation on rev45 and does not support
10385
+ * the latest OTP configuration.
10386
+ */
10387
+ if (chipc_corerev != 51 && chipc_corerev >= 49) {
10388
+ if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
10389
+ ((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) == 0)) {
10390
+ DHD_ERROR(("%s: SPROM and OTP could not be found "
10391
+ "sromcontrol = %x, otplayout = %x \n",
10392
+ __FUNCTION__, chipcregs->sromcontrol,
10393
+ chipcregs->otplayout));
10394
+ return BCME_NOTFOUND;
10395
+ }
10396
+ } else {
10397
+ if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
10398
+ ((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
10399
+ DHD_ERROR(("%s: SPROM and OTP could not be found "
10400
+ "sromcontrol = %x, capablities = %x \n",
10401
+ __FUNCTION__, chipcregs->sromcontrol,
10402
+ chipcregs->capabilities));
10403
+ return BCME_NOTFOUND;
10404
+ }
10405
+ }
10406
+ /* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
10407
+ if ((!(chipcregs->sromcontrol & SRC_PRESENT) || (chipcregs->sromcontrol & SRC_OTPSEL)) &&
10408
+ (chipcregs->sromcontrol & SRC_OTPPRESENT)) {
10409
+ DHD_TRACE(("OTP Strap selected.\n"
10410
+ "\nOTP Shadow in ChipCommon:\n"));
10411
+ dump_size = otp_size / 16 ; /* 16bit words */
10412
+ } else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) &&
10413
+ (chipcregs->sromcontrol & SRC_PRESENT)) {
10414
+ DHD_TRACE(("SPROM Strap selected\n"
10415
+ "\nSPROM Shadow in ChipCommon:\n"));
10416
+ /* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
10417
+ /* dump_size in 16bit words */
10418
+ dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16;
10419
+ } else {
10420
+ DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
10421
+ __FUNCTION__));
10422
+ return BCME_NOTFOUND;
10423
+ }
10424
+ if (bus->regs == NULL) {
10425
+ DHD_ERROR(("ChipCommon Regs. not initialized\n"));
10426
+ return BCME_NOTREADY;
10427
+ } else {
10428
+ /* Chipcommon rev51 is a variation on rev45 and does not support
10429
+ * the latest OTP configuration.
10430
+ */
10431
+ if (chipc_corerev != 51 && chipc_corerev >= 49) {
10432
+ /* Chip common can read only 8kbits,
10433
+ * for ccrev >= 49 otp size is around 12 kbits so use GCI core
10434
+ */
10435
+ nvm_shadow = (volatile uint16 *)si_setcore(bus->sih, GCI_CORE_ID, 0);
10436
+ } else {
10437
+ /* Point to the SPROM/OTP shadow in ChipCommon */
10438
+ nvm_shadow = chipcregs->sromotp;
10439
+ }
10440
+ if (nvm_shadow == NULL) {
10441
+ DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__));
10442
+ return BCME_NOTFOUND;
10443
+ }
10444
+ otp_dump = kzalloc(dump_size*2, GFP_KERNEL);
10445
+ if (otp_dump == NULL) {
10446
+ DHD_ERROR(("%s: Insufficient system memory of size %d\n",
10447
+ __FUNCTION__, dump_size));
10448
+ return BCME_NOMEM;
10449
+ }
10450
+ /*
10451
+ * Read 16 bits / iteration.
10452
+ * dump_size & dump_offset in 16-bit words
10453
+ */
10454
+ while (dump_offset < dump_size) {
10455
+ *(otp_dump + dump_offset) = *(nvm_shadow + dump_offset);
10456
+ dump_offset += 0x1;
10457
+ }
10458
+ /* Read from cis tuple start address */
10459
+ cis = (uint8 *)otp_dump + CISTPL_OFFSET;
10460
+ /* parse value of customvar2 tuple */
10461
+ do {
10462
+ tup = cis[i++];
10463
+ if (tup == CISTPL_NULL || tup == CISTPL_END)
10464
+ tlen = 0;
10465
+ else
10466
+ tlen = cis[i++];
10467
+ if ((i + tlen) >= dump_size*2)
10468
+ break;
10469
+ switch (tup) {
10470
+ case CISTPL_BRCM_HNBU:
10471
+ switch (cis[i]) {
10472
+ case HNBU_CUSTOM1:
10473
+ *customvar1 = ((cis[i + 4] << 24) +
10474
+ (cis[i + 3] << 16) +
10475
+ (cis[i + 2] << 8) +
10476
+ cis[i + 1]);
10477
+ DHD_TRACE(("%s : customvar1 [%x]\n",
10478
+ __FUNCTION__, *customvar1));
10479
+ break;
10480
+ case HNBU_CUSTOM2:
10481
+ *customvar2 = ((cis[i + 4] << 24) +
10482
+ (cis[i + 3] << 16) +
10483
+ (cis[i + 2] << 8) +
10484
+ cis[i + 1]);
10485
+ DHD_TRACE(("%s : customvar2 [%x]\n",
10486
+ __FUNCTION__, *customvar2));
10487
+ break;
10488
+ default:
10489
+ break;
10490
+ }
10491
+ break;
10492
+ default:
10493
+ break;
10494
+ }
10495
+ i += tlen;
10496
+ } while (tup != 0xff);
10497
+
10498
+ if (otp_dump) {
10499
+ kfree(otp_dump);
10500
+ otp_dump = NULL;
10501
+ }
10502
+ }
10503
+ /* Switch back to the original core */
10504
+ si_setcore(bus->sih, cur_coreid, 0);
10505
+ return BCME_OK;
10506
+} /* dhdpcie_sromotp_customvar */
519510507
519610508 /**
519710509 * Name: dhdpcie_cc_nvmshadow
....@@ -5232,9 +10544,11 @@
523210544 }
523310545
523410546 /* Check ChipID */
5235
- if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip)) {
5236
- DHD_ERROR(("%s: cc_nvmdump cmd. supported for 4350/4345 only\n",
5237
- __FUNCTION__));
10547
+ if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip) &&
10548
+ ((uint16)bus->sih->chip != BCM4355_CHIP_ID) &&
10549
+ ((uint16)bus->sih->chip != BCM4364_CHIP_ID)) {
10550
+ DHD_ERROR(("%s: cc_nvmdump cmd. supported for Olympic chips"
10551
+ "4350/4345/4355/4364 only\n", __FUNCTION__));
523810552 return BCME_UNSUPPORTED;
523910553 }
524010554
....@@ -5252,28 +10566,60 @@
525210566 if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT)
525310567 == OTPL_WRAP_TYPE_40NM) {
525410568 /* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
5255
- otp_size = (((chipcregs->capabilities & CC_CAP_OTPSIZE)
10569
+ /* Chipcommon rev51 is a variation on rev45 and does not support
10570
+ * the latest OTP configuration.
10571
+ */
10572
+ if (chipc_corerev != 51 && chipc_corerev >= 49) {
10573
+ otp_size = (((chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
10574
+ >> OTPL_ROW_SIZE_SHIFT) + 1) * 1024;
10575
+ bcm_bprintf(b, "(Size %d bits)\n", otp_size);
10576
+ } else {
10577
+ otp_size = (((chipcregs->capabilities & CC_CAP_OTPSIZE)
525610578 >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024;
5257
- bcm_bprintf(b, "(Size %d bits)\n", otp_size);
10579
+ bcm_bprintf(b, "(Size %d bits)\n", otp_size);
10580
+ }
525810581 } else {
525910582 /* This part is untested since newer chips have 40nm OTP */
5260
- otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE)
5261
- >> CC_CAP_OTPSIZE_SHIFT];
5262
- bcm_bprintf(b, "(Size %d bits)\n", otp_size);
5263
- DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n",
5264
- __FUNCTION__));
10583
+ /* Chipcommon rev51 is a variation on rev45 and does not support
10584
+ * the latest OTP configuration.
10585
+ */
10586
+ if (chipc_corerev != 51 && chipc_corerev >= 49) {
10587
+ otp_size = otp_size_65nm[(chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
10588
+ >> OTPL_ROW_SIZE_SHIFT];
10589
+ bcm_bprintf(b, "(Size %d bits)\n", otp_size);
10590
+ } else {
10591
+ otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE)
10592
+ >> CC_CAP_OTPSIZE_SHIFT];
10593
+ bcm_bprintf(b, "(Size %d bits)\n", otp_size);
10594
+ DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n",
10595
+ __FUNCTION__));
10596
+ }
526510597 }
526610598 }
526710599
5268
- if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
5269
- ((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
5270
- DHD_ERROR(("%s: SPROM and OTP could not be found \n",
5271
- __FUNCTION__));
5272
- return BCME_NOTFOUND;
10600
+ /* Chipcommon rev51 is a variation on rev45 and does not support
10601
+ * the latest OTP configuration.
10602
+ */
10603
+ if (chipc_corerev != 51 && chipc_corerev >= 49) {
10604
+ if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
10605
+ ((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) == 0)) {
10606
+ DHD_ERROR(("%s: SPROM and OTP could not be found "
10607
+ "sromcontrol = %x, otplayout = %x \n",
10608
+ __FUNCTION__, chipcregs->sromcontrol, chipcregs->otplayout));
10609
+ return BCME_NOTFOUND;
10610
+ }
10611
+ } else {
10612
+ if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
10613
+ ((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
10614
+ DHD_ERROR(("%s: SPROM and OTP could not be found "
10615
+ "sromcontrol = %x, capablities = %x \n",
10616
+ __FUNCTION__, chipcregs->sromcontrol, chipcregs->capabilities));
10617
+ return BCME_NOTFOUND;
10618
+ }
527310619 }
527410620
527510621 /* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
5276
- if ((chipcregs->sromcontrol & SRC_OTPSEL) &&
10622
+ if ((!(chipcregs->sromcontrol & SRC_PRESENT) || (chipcregs->sromcontrol & SRC_OTPSEL)) &&
527710623 (chipcregs->sromcontrol & SRC_OTPPRESENT)) {
527810624
527910625 bcm_bprintf(b, "OTP Strap selected.\n"
....@@ -5300,23 +10646,38 @@
530010646 DHD_ERROR(("ChipCommon Regs. not initialized\n"));
530110647 return BCME_NOTREADY;
530210648 } else {
5303
- bcm_bprintf(b, "\n OffSet:");
10649
+ bcm_bprintf(b, "\n OffSet:");
530410650
5305
- /* Point to the SPROM/OTP shadow in ChipCommon */
5306
- nvm_shadow = chipcregs->sromotp;
10651
+ /* Chipcommon rev51 is a variation on rev45 and does not support
10652
+ * the latest OTP configuration.
10653
+ */
10654
+ if (chipc_corerev != 51 && chipc_corerev >= 49) {
10655
+ /* Chip common can read only 8kbits,
10656
+ * for ccrev >= 49 otp size is around 12 kbits so use GCI core
10657
+ */
10658
+ nvm_shadow = (volatile uint16 *)si_setcore(bus->sih, GCI_CORE_ID, 0);
10659
+ } else {
10660
+ /* Point to the SPROM/OTP shadow in ChipCommon */
10661
+ nvm_shadow = chipcregs->sromotp;
10662
+ }
530710663
5308
- /*
5309
- * Read 16 bits / iteration.
5310
- * dump_size & dump_offset in 16-bit words
5311
- */
5312
- while (dump_offset < dump_size) {
5313
- if (dump_offset % 2 == 0)
5314
- /* Print the offset in the shadow space in Bytes */
5315
- bcm_bprintf(b, "\n 0x%04x", dump_offset * 2);
10664
+ if (nvm_shadow == NULL) {
10665
+ DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__));
10666
+ return BCME_NOTFOUND;
10667
+ }
531610668
5317
- bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset));
5318
- dump_offset += 0x1;
5319
- }
10669
+ /*
10670
+ * Read 16 bits / iteration.
10671
+ * dump_size & dump_offset in 16-bit words
10672
+ */
10673
+ while (dump_offset < dump_size) {
10674
+ if (dump_offset % 2 == 0)
10675
+ /* Print the offset in the shadow space in Bytes */
10676
+ bcm_bprintf(b, "\n 0x%04x", dump_offset * 2);
10677
+
10678
+ bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset));
10679
+ dump_offset += 0x1;
10680
+ }
532010681 }
532110682
532210683 /* Switch back to the original core */
....@@ -5342,6 +10703,13 @@
534210703 dhd_tcpack_info_tbl_clean(bus->dhd);
534310704 #endif /* DHDTCPACK_SUPPRESS */
534410705
10706
+#ifdef DHD_HP2P
10707
+ if (flow_ring_node->hp2p_ring) {
10708
+ bus->dhd->hp2p_ring_active = FALSE;
10709
+ flow_ring_node->hp2p_ring = FALSE;
10710
+ }
10711
+#endif /* DHD_HP2P */
10712
+
534510713 /* clean up BUS level info */
534610714 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
534710715
....@@ -5353,12 +10721,12 @@
535310721
535410722 /* Reinitialise flowring's queue */
535510723 dhd_flow_queue_reinit(bus->dhd, queue, FLOW_RING_QUEUE_THRESHOLD);
5356
-
535710724 flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
535810725 flow_ring_node->active = FALSE;
535910726
536010727 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
536110728
10729
+ /* Hold flowring_list_lock to ensure no race condition while accessing the List */
536210730 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
536310731 dll_delete(&flow_ring_node->list);
536410732 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
....@@ -5369,7 +10737,7 @@
536910737
537010738 /* Free the flowid back to the flowid allocator */
537110739 dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex,
5372
- flow_ring_node->flowid);
10740
+ flow_ring_node->flowid);
537310741 }
537410742
537510743 /**
....@@ -5399,8 +10767,26 @@
539910767
540010768 DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid));
540110769
10770
+ /* Boundary check of the flowid */
10771
+ if (flowid >= bus->dhd->num_flow_rings) {
10772
+ DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
10773
+ flowid, bus->dhd->num_flow_rings));
10774
+ return;
10775
+ }
10776
+
540210777 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
10778
+ if (!flow_ring_node) {
10779
+ DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
10780
+ return;
10781
+ }
10782
+
540310783 ASSERT(flow_ring_node->flowid == flowid);
10784
+ if (flow_ring_node->flowid != flowid) {
10785
+ DHD_ERROR(("%s: flowid %d is different from the flowid "
10786
+ "of the flow_ring_node %d\n", __FUNCTION__, flowid,
10787
+ flow_ring_node->flowid));
10788
+ return;
10789
+ }
540410790
540510791 if (status != BCME_OK) {
540610792 DHD_ERROR(("%s Flow create Response failure error status = %d \n",
....@@ -5430,9 +10816,8 @@
543010816 * active list only after its truely created, which is after
543110817 * receiving the create response message from the Host.
543210818 */
5433
-
543410819 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
5435
- dll_prepend(&bus->const_flowring, &flow_ring_node->list);
10820
+ dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
543610821 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
543710822
543810823 dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
....@@ -5452,23 +10837,22 @@
545210837
545310838 flow_ring_node = (flow_ring_node_t *)arg;
545410839
5455
- DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
5456
- if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) {
5457
- DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
5458
- DHD_ERROR(("%s :Delete Pending Flow %d\n",
5459
- __FUNCTION__, flow_ring_node->flowid));
5460
- return BCME_ERROR;
5461
- }
5462
- flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING;
5463
-
5464
- queue = &flow_ring_node->queue; /* queue associated with flow ring */
5465
-
546610840 #ifdef DHDTCPACK_SUPPRESS
546710841 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
546810842 * when there is a newly coming packet from network stack.
546910843 */
547010844 dhd_tcpack_info_tbl_clean(bus->dhd);
547110845 #endif /* DHDTCPACK_SUPPRESS */
10846
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
10847
+ if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) {
10848
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
10849
+ DHD_ERROR(("%s :Delete Pending flowid %u\n", __FUNCTION__, flow_ring_node->flowid));
10850
+ return BCME_ERROR;
10851
+ }
10852
+ flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING;
10853
+
10854
+ queue = &flow_ring_node->queue; /* queue associated with flow ring */
10855
+
547210856 /* Flush all pending packets in the queue, if any */
547310857 while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
547410858 PKTFREE(bus->dhd->osh, pkt, TRUE);
....@@ -5490,8 +10874,26 @@
549010874
549110875 DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid));
549210876
10877
+ /* Boundary check of the flowid */
10878
+ if (flowid >= bus->dhd->num_flow_rings) {
10879
+ DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
10880
+ flowid, bus->dhd->num_flow_rings));
10881
+ return;
10882
+ }
10883
+
549310884 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
10885
+ if (!flow_ring_node) {
10886
+ DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
10887
+ return;
10888
+ }
10889
+
549410890 ASSERT(flow_ring_node->flowid == flowid);
10891
+ if (flow_ring_node->flowid != flowid) {
10892
+ DHD_ERROR(("%s: flowid %d is different from the flowid "
10893
+ "of the flow_ring_node %d\n", __FUNCTION__, flowid,
10894
+ flow_ring_node->flowid));
10895
+ return;
10896
+ }
549510897
549610898 if (status != BCME_OK) {
549710899 DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
....@@ -5505,7 +10907,6 @@
550510907
550610908 }
550710909
5508
-/** This function is not called. Obsolete ? */
550910910 int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg)
551010911 {
551110912 void *pkt;
....@@ -5513,12 +10914,16 @@
551310914 flow_ring_node_t *flow_ring_node;
551410915 unsigned long flags;
551510916
5516
- DHD_INFO(("%s :Flow Delete\n", __FUNCTION__));
10917
+ DHD_INFO(("%s :Flow Flush\n", __FUNCTION__));
551710918
551810919 flow_ring_node = (flow_ring_node_t *)arg;
5519
- queue = &flow_ring_node->queue; /* queue associated with flow ring */
552010920
552110921 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
10922
+ queue = &flow_ring_node->queue; /* queue associated with flow ring */
10923
+ /* Flow ring status will be set back to FLOW_RING_STATUS_OPEN
10924
+ * once flow ring flush response is received for this flowring node.
10925
+ */
10926
+ flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING;
552210927
552310928 #ifdef DHDTCPACK_SUPPRESS
552410929 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
....@@ -5538,7 +10943,6 @@
553810943 /* Send Msg to device about flow ring flush */
553910944 dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node);
554010945
5541
- flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING;
554210946 return BCME_OK;
554310947 }
554410948
....@@ -5553,8 +10957,26 @@
555310957 return;
555410958 }
555510959
10960
+ /* Boundary check of the flowid */
10961
+ if (flowid >= bus->dhd->num_flow_rings) {
10962
+ DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
10963
+ flowid, bus->dhd->num_flow_rings));
10964
+ return;
10965
+ }
10966
+
555610967 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
10968
+ if (!flow_ring_node) {
10969
+ DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
10970
+ return;
10971
+ }
10972
+
555710973 ASSERT(flow_ring_node->flowid == flowid);
10974
+ if (flow_ring_node->flowid != flowid) {
10975
+ DHD_ERROR(("%s: flowid %d is different from the flowid "
10976
+ "of the flow_ring_node %d\n", __FUNCTION__, flowid,
10977
+ flow_ring_node->flowid));
10978
+ return;
10979
+ }
555810980
555910981 flow_ring_node->status = FLOW_RING_STATUS_OPEN;
556010982 return;
....@@ -5563,7 +10985,7 @@
556310985 uint32
556410986 dhd_bus_max_h2d_queues(struct dhd_bus *bus)
556510987 {
5566
- return bus->max_sub_queues;
10988
+ return bus->max_submission_rings;
556710989 }
556810990
556910991 /* To be symmetric with SDIO */
....@@ -5572,6 +10994,203 @@
557210994 {
557310995 return;
557410996 }
10997
+
10998
+void
10999
+dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val)
11000
+{
11001
+ dhdp->bus->is_linkdown = val;
11002
+}
11003
+
11004
+int
11005
+dhd_bus_get_linkdown(dhd_pub_t *dhdp)
11006
+{
11007
+ return dhdp->bus->is_linkdown;
11008
+}
11009
+
11010
+int
11011
+dhd_bus_get_cto(dhd_pub_t *dhdp)
11012
+{
11013
+ return dhdp->bus->cto_triggered;
11014
+}
11015
+
11016
+#ifdef IDLE_TX_FLOW_MGMT
11017
+/* resume request */
11018
+int
11019
+dhd_bus_flow_ring_resume_request(dhd_bus_t *bus, void *arg)
11020
+{
11021
+ flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
11022
+
11023
+ DHD_ERROR(("%s :Flow Resume Request flow id %u\n", __FUNCTION__, flow_ring_node->flowid));
11024
+
11025
+ flow_ring_node->status = FLOW_RING_STATUS_RESUME_PENDING;
11026
+
11027
+ /* Send Msg to device about flow ring resume */
11028
+ dhd_prot_flow_ring_resume(bus->dhd, flow_ring_node);
11029
+
11030
+ return BCME_OK;
11031
+}
11032
+
11033
+/* add the node back to active flowring */
11034
+void
11035
+dhd_bus_flow_ring_resume_response(dhd_bus_t *bus, uint16 flowid, int32 status)
11036
+{
11037
+
11038
+ flow_ring_node_t *flow_ring_node;
11039
+
11040
+ DHD_TRACE(("%s :flowid %d \n", __FUNCTION__, flowid));
11041
+
11042
+ flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
11043
+ ASSERT(flow_ring_node->flowid == flowid);
11044
+
11045
+ if (status != BCME_OK) {
11046
+ DHD_ERROR(("%s Error Status = %d \n",
11047
+ __FUNCTION__, status));
11048
+ return;
11049
+ }
11050
+
11051
+ DHD_TRACE(("%s :Number of pkts queued in FlowId:%d is -> %u!!\n",
11052
+ __FUNCTION__, flow_ring_node->flowid, flow_ring_node->queue.len));
11053
+
11054
+ flow_ring_node->status = FLOW_RING_STATUS_OPEN;
11055
+
11056
+ dhd_bus_schedule_queue(bus, flowid, FALSE);
11057
+ return;
11058
+}
11059
+
11060
+/* scan the flow rings in active list for idle time out */
11061
+void
11062
+dhd_bus_check_idle_scan(dhd_bus_t *bus)
11063
+{
11064
+ uint64 time_stamp; /* in millisec */
11065
+ uint64 diff;
11066
+
11067
+ time_stamp = OSL_SYSUPTIME();
11068
+ diff = time_stamp - bus->active_list_last_process_ts;
11069
+
11070
+ if (diff > IDLE_FLOW_LIST_TIMEOUT) {
11071
+ dhd_bus_idle_scan(bus);
11072
+ bus->active_list_last_process_ts = OSL_SYSUPTIME();
11073
+ }
11074
+
11075
+ return;
11076
+}
11077
+
11078
+/* scan the nodes in active list till it finds a non idle node */
11079
+void
11080
+dhd_bus_idle_scan(dhd_bus_t *bus)
11081
+{
11082
+ dll_t *item, *prev;
11083
+ flow_ring_node_t *flow_ring_node;
11084
+ uint64 time_stamp, diff;
11085
+ unsigned long flags;
11086
+ uint16 ringid[MAX_SUSPEND_REQ];
11087
+ uint16 count = 0;
11088
+
11089
+ time_stamp = OSL_SYSUPTIME();
11090
+ DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
11091
+
11092
+ for (item = dll_tail_p(&bus->flowring_active_list);
11093
+ !dll_end(&bus->flowring_active_list, item); item = prev) {
11094
+ prev = dll_prev_p(item);
11095
+
11096
+ flow_ring_node = dhd_constlist_to_flowring(item);
11097
+
11098
+ if (flow_ring_node->flowid == (bus->max_submission_rings - 1))
11099
+ continue;
11100
+
11101
+ if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
11102
+ /* Takes care of deleting zombie rings */
11103
+ /* delete from the active list */
11104
+ DHD_INFO(("deleting flow id %u from active list\n",
11105
+ flow_ring_node->flowid));
11106
+ __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
11107
+ continue;
11108
+ }
11109
+
11110
+ diff = time_stamp - flow_ring_node->last_active_ts;
11111
+
11112
+ if ((diff > IDLE_FLOW_RING_TIMEOUT) && !(flow_ring_node->queue.len)) {
11113
+ DHD_ERROR(("\nSuspending flowid %d\n", flow_ring_node->flowid));
11114
+ /* delete from the active list */
11115
+ __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
11116
+ flow_ring_node->status = FLOW_RING_STATUS_SUSPENDED;
11117
+ ringid[count] = flow_ring_node->flowid;
11118
+ count++;
11119
+ if (count == MAX_SUSPEND_REQ) {
11120
+ /* create a batch message now!! */
11121
+ dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
11122
+ count = 0;
11123
+ }
11124
+
11125
+ } else {
11126
+
11127
+ /* No more scanning, break from here! */
11128
+ break;
11129
+ }
11130
+ }
11131
+
11132
+ if (count) {
11133
+ dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
11134
+ }
11135
+
11136
+ DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
11137
+
11138
+ return;
11139
+}
11140
+
11141
+void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
11142
+{
11143
+ unsigned long flags;
11144
+ dll_t* list;
11145
+
11146
+ DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
11147
+ /* check if the node is already at head, otherwise delete it and prepend */
11148
+ list = dll_head_p(&bus->flowring_active_list);
11149
+ if (&flow_ring_node->list != list) {
11150
+ dll_delete(&flow_ring_node->list);
11151
+ dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
11152
+ }
11153
+
11154
+ /* update flow ring timestamp */
11155
+ flow_ring_node->last_active_ts = OSL_SYSUPTIME();
11156
+
11157
+ DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
11158
+
11159
+ return;
11160
+}
11161
+
11162
+void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
11163
+{
11164
+ unsigned long flags;
11165
+
11166
+ DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
11167
+
11168
+ dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
11169
+ /* update flow ring timestamp */
11170
+ flow_ring_node->last_active_ts = OSL_SYSUPTIME();
11171
+
11172
+ DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
11173
+
11174
+ return;
11175
+}
11176
+void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
11177
+{
11178
+ dll_delete(&flow_ring_node->list);
11179
+}
11180
+
11181
+void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
11182
+{
11183
+ unsigned long flags;
11184
+
11185
+ DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
11186
+
11187
+ __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
11188
+
11189
+ DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
11190
+
11191
+ return;
11192
+}
11193
+#endif /* IDLE_TX_FLOW_MGMT */
557511194
557611195 int
557711196 dhdpcie_bus_clock_start(struct dhd_bus *bus)
....@@ -5634,6 +11253,10 @@
563411253 ASSERT(osh);
563511254
563611255 if (bus->dhd) {
11256
+#if defined(DEBUGGER) || defined(DHD_DSCOPE)
11257
+ debugger_close();
11258
+#endif /* DEBUGGER || DHD_DSCOPE */
11259
+
563711260 dongle_isolation = bus->dhd->dongle_isolation;
563811261 dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
563911262 }
....@@ -5641,6 +11264,182 @@
564111264
564211265 return 0;
564311266 }
11267
+
11268
+int
11269
+dhdpcie_cto_cfg_init(struct dhd_bus *bus, bool enable)
11270
+{
11271
+ uint32 val;
11272
+ if (enable) {
11273
+ dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4,
11274
+ PCI_CTO_INT_MASK | PCI_SBIM_MASK_SERR);
11275
+ val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
11276
+ dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_BACKPLANE_EN);
11277
+ } else {
11278
+ dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, 0);
11279
+ val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
11280
+ dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_BACKPLANE_EN);
11281
+ }
11282
+ return 0;
11283
+}
11284
+
11285
+int
11286
+dhdpcie_cto_init(struct dhd_bus *bus, bool enable)
11287
+{
11288
+ if (bus->sih->buscorerev < 19) {
11289
+ DHD_INFO(("%s: Unsupported CTO, buscorerev=%d\n",
11290
+ __FUNCTION__, bus->sih->buscorerev));
11291
+ return BCME_UNSUPPORTED;
11292
+ }
11293
+
11294
+ if (bus->sih->buscorerev == 19) {
11295
+ uint32 pcie_lnkst;
11296
+ si_corereg(bus->sih, bus->sih->buscoreidx,
11297
+ OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_LINK_STATUS);
11298
+
11299
+ pcie_lnkst = si_corereg(bus->sih, bus->sih->buscoreidx,
11300
+ OFFSETOF(sbpcieregs_t, configdata), 0, 0);
11301
+
11302
+ if (((pcie_lnkst >> PCI_LINK_SPEED_SHIFT) &
11303
+ PCI_LINK_SPEED_MASK) == PCIE_LNK_SPEED_GEN1) {
11304
+ return BCME_UNSUPPORTED;
11305
+ }
11306
+ }
11307
+
11308
+ bus->cto_enable = enable;
11309
+
11310
+ dhdpcie_cto_cfg_init(bus, enable);
11311
+
11312
+ if (enable) {
11313
+ if (bus->cto_threshold == 0) {
11314
+ bus->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT;
11315
+ }
11316
+ si_corereg(bus->sih, bus->sih->buscoreidx,
11317
+ OFFSETOF(sbpcieregs_t, ctoctrl), ~0,
11318
+ ((bus->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) &
11319
+ PCIE_CTO_TO_THRESHHOLD_MASK) |
11320
+ ((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) &
11321
+ PCIE_CTO_CLKCHKCNT_MASK) |
11322
+ PCIE_CTO_ENAB_MASK);
11323
+ } else {
11324
+ si_corereg(bus->sih, bus->sih->buscoreidx,
11325
+ OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0);
11326
+ }
11327
+
11328
+ DHD_ERROR(("%s: set CTO prevention and recovery enable/disable %d\n",
11329
+ __FUNCTION__, bus->cto_enable));
11330
+
11331
+ return 0;
11332
+}
11333
+
11334
+static int
11335
+dhdpcie_cto_error_recovery(struct dhd_bus *bus)
11336
+{
11337
+ uint32 pci_intmask, err_status;
11338
+ uint8 i = 0;
11339
+ uint32 val;
11340
+
11341
+ pci_intmask = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_MASK, 4);
11342
+ dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, pci_intmask & ~PCI_CTO_INT_MASK);
11343
+
11344
+ DHD_OS_WAKE_LOCK(bus->dhd);
11345
+
11346
+ DHD_ERROR(("--- CTO Triggered --- %d\n", bus->pwr_req_ref));
11347
+
11348
+ /*
11349
+ * DAR still accessible
11350
+ */
11351
+ dhd_bus_dump_dar_registers(bus);
11352
+
11353
+ /* reset backplane */
11354
+ val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
11355
+ dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_CFG_TO_SB_RST);
11356
+
11357
+ /* clear timeout error */
11358
+ while (1) {
11359
+ err_status = si_corereg(bus->sih, bus->sih->buscoreidx,
11360
+ DAR_ERRLOG(bus->sih->buscorerev),
11361
+ 0, 0);
11362
+ if (err_status & PCIE_CTO_ERR_MASK) {
11363
+ si_corereg(bus->sih, bus->sih->buscoreidx,
11364
+ DAR_ERRLOG(bus->sih->buscorerev),
11365
+ ~0, PCIE_CTO_ERR_MASK);
11366
+ } else {
11367
+ break;
11368
+ }
11369
+ OSL_DELAY(CTO_TO_CLEAR_WAIT_MS * 1000);
11370
+ i++;
11371
+ if (i > CTO_TO_CLEAR_WAIT_MAX_CNT) {
11372
+ DHD_ERROR(("cto recovery fail\n"));
11373
+
11374
+ DHD_OS_WAKE_UNLOCK(bus->dhd);
11375
+ return BCME_ERROR;
11376
+ }
11377
+ }
11378
+
11379
+ /* clear interrupt status */
11380
+ dhdpcie_bus_cfg_write_dword(bus, PCI_INT_STATUS, 4, PCI_CTO_INT_MASK);
11381
+
11382
+ /* Halt ARM & remove reset */
11383
+ /* TBD : we can add ARM Halt here in case */
11384
+
11385
+ /* reset SPROM_CFG_TO_SB_RST */
11386
+ val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
11387
+
11388
+ DHD_ERROR(("cto recovery reset 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
11389
+ PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
11390
+ dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_CFG_TO_SB_RST);
11391
+
11392
+ val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
11393
+ DHD_ERROR(("cto recovery success 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
11394
+ PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
11395
+
11396
+ DHD_OS_WAKE_UNLOCK(bus->dhd);
11397
+
11398
+ return BCME_OK;
11399
+}
11400
+
11401
+void
11402
+dhdpcie_ssreset_dis_enum_rst(struct dhd_bus *bus)
11403
+{
11404
+ uint32 val;
11405
+
11406
+ val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4);
11407
+ dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
11408
+ val | (0x1 << PCIE_SSRESET_DIS_ENUM_RST_BIT));
11409
+}
11410
+
11411
+#if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
11412
+static int
11413
+dhdpcie_init_d11status(struct dhd_bus *bus)
11414
+{
11415
+ uint32 addr;
11416
+ uint32 flags2;
11417
+ int ret = 0;
11418
+
11419
+ if (bus->pcie_sh->flags2 & PCIE_SHARED2_D2H_D11_TX_STATUS) {
11420
+ flags2 = bus->pcie_sh->flags2;
11421
+ addr = bus->shared_addr + OFFSETOF(pciedev_shared_t, flags2);
11422
+ flags2 |= PCIE_SHARED2_H2D_D11_TX_STATUS;
11423
+ ret = dhdpcie_bus_membytes(bus, TRUE, addr,
11424
+ (uint8 *)&flags2, sizeof(flags2));
11425
+ if (ret < 0) {
11426
+ DHD_ERROR(("%s: update flag bit (H2D_D11_TX_STATUS) failed\n",
11427
+ __FUNCTION__));
11428
+ return ret;
11429
+ }
11430
+ bus->pcie_sh->flags2 = flags2;
11431
+ bus->dhd->d11_tx_status = TRUE;
11432
+ }
11433
+ return ret;
11434
+}
11435
+
11436
+#else
11437
+static int
11438
+dhdpcie_init_d11status(struct dhd_bus *bus)
11439
+{
11440
+ return 0;
11441
+}
11442
+#endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
564411443
564511444 #ifdef BCMPCIE_OOB_HOST_WAKE
564611445 int
....@@ -5661,3 +11460,1484 @@
566111460 dhdpcie_oob_intr_set(dhdp->bus, enable);
566211461 }
566311462 #endif /* BCMPCIE_OOB_HOST_WAKE */
11463
+
11464
+bool
11465
+dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus)
11466
+{
11467
+ return bus->dhd->d2h_hostrdy_supported;
11468
+}
11469
+
11470
+void
11471
+dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr)
11472
+{
11473
+ dhd_bus_t *bus = pub->bus;
11474
+ uint32 coreoffset = index << 12;
11475
+ uint32 core_addr = SI_ENUM_BASE(bus->sih) + coreoffset;
11476
+ uint32 value;
11477
+
11478
+ while (first_addr <= last_addr) {
11479
+ core_addr = SI_ENUM_BASE(bus->sih) + coreoffset + first_addr;
11480
+ if (serialized_backplane_access(bus, core_addr, 4, &value, TRUE) != BCME_OK) {
11481
+ DHD_ERROR(("Invalid size/addr combination \n"));
11482
+ }
11483
+ DHD_ERROR(("[0x%08x]: 0x%08x\n", core_addr, value));
11484
+ first_addr = first_addr + 4;
11485
+ }
11486
+}
11487
+
11488
+bool
11489
+dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t *bus)
11490
+{
11491
+ if (!bus->dhd)
11492
+ return FALSE;
11493
+ else if (bus->hwa_enab_bmap) {
11494
+ return bus->dhd->hwa_enable;
11495
+ } else {
11496
+ return FALSE;
11497
+ }
11498
+}
11499
+
11500
+bool
11501
+dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus)
11502
+{
11503
+ if (!bus->dhd)
11504
+ return FALSE;
11505
+ else if (bus->idma_enabled) {
11506
+ return bus->dhd->idma_enable;
11507
+ } else {
11508
+ return FALSE;
11509
+ }
11510
+}
11511
+
11512
+bool
11513
+dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus)
11514
+{
11515
+ if (!bus->dhd)
11516
+ return FALSE;
11517
+ else if (bus->ifrm_enabled) {
11518
+ return bus->dhd->ifrm_enable;
11519
+ } else {
11520
+ return FALSE;
11521
+ }
11522
+}
11523
+
11524
+bool
11525
+dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus)
11526
+{
11527
+ if (!bus->dhd) {
11528
+ return FALSE;
11529
+ } else if (bus->dar_enabled) {
11530
+ return bus->dhd->dar_enable;
11531
+ } else {
11532
+ return FALSE;
11533
+ }
11534
+}
11535
+
11536
+void
11537
+dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option)
11538
+{
11539
+ DHD_ERROR(("ENABLING DW:%d\n", dw_option));
11540
+ bus->dw_option = dw_option;
11541
+}
11542
+
11543
+void
11544
+dhd_bus_dump_trap_info(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
11545
+{
11546
+ trap_t *tr = &bus->dhd->last_trap_info;
11547
+ bcm_bprintf(strbuf,
11548
+ "\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
11549
+ " lp 0x%x, rpc 0x%x"
11550
+ "\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
11551
+ "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x, r8 0x%x, r9 0x%x, "
11552
+ "r10 0x%x, r11 0x%x, r12 0x%x\n\n",
11553
+ ltoh32(tr->type), ltoh32(tr->epc), ltoh32(tr->cpsr), ltoh32(tr->spsr),
11554
+ ltoh32(tr->r13), ltoh32(tr->r14), ltoh32(tr->pc),
11555
+ ltoh32(bus->pcie_sh->trap_addr),
11556
+ ltoh32(tr->r0), ltoh32(tr->r1), ltoh32(tr->r2), ltoh32(tr->r3),
11557
+ ltoh32(tr->r4), ltoh32(tr->r5), ltoh32(tr->r6), ltoh32(tr->r7),
11558
+ ltoh32(tr->r8), ltoh32(tr->r9), ltoh32(tr->r10),
11559
+ ltoh32(tr->r11), ltoh32(tr->r12));
11560
+}
11561
+
11562
+int
11563
+dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read)
11564
+{
11565
+ int bcmerror = 0;
11566
+ struct dhd_bus *bus = dhdp->bus;
11567
+
11568
+ if (serialized_backplane_access(bus, addr, size, data, read) != BCME_OK) {
11569
+ DHD_ERROR(("Invalid size/addr combination \n"));
11570
+ bcmerror = BCME_ERROR;
11571
+ }
11572
+
11573
+ return bcmerror;
11574
+}
11575
+
11576
+int
11577
+dhd_get_idletime(dhd_pub_t *dhd)
11578
+{
11579
+ return dhd->bus->idletime;
11580
+}
11581
+
11582
+static INLINE void
11583
+dhd_sbreg_op(dhd_pub_t *dhd, uint addr, uint *val, bool read)
11584
+{
11585
+ OSL_DELAY(1);
11586
+ if (serialized_backplane_access(dhd->bus, addr, sizeof(uint), val, read) != BCME_OK) {
11587
+ DHD_ERROR(("sbreg: Invalid uint addr: 0x%x \n", addr));
11588
+ } else {
11589
+ DHD_ERROR(("sbreg: addr:0x%x val:0x%x read:%d\n", addr, *val, read));
11590
+ }
11591
+ return;
11592
+}
11593
+
11594
+#ifdef DHD_SSSR_DUMP
11595
+static int
11596
+dhdpcie_get_sssr_fifo_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
11597
+ uint addr_reg, uint data_reg)
11598
+{
11599
+ uint addr;
11600
+ uint val = 0;
11601
+ int i;
11602
+
11603
+ DHD_ERROR(("%s\n", __FUNCTION__));
11604
+
11605
+ if (!buf) {
11606
+ DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
11607
+ return BCME_ERROR;
11608
+ }
11609
+
11610
+ if (!fifo_size) {
11611
+ DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
11612
+ return BCME_ERROR;
11613
+ }
11614
+
11615
+ /* Set the base address offset to 0 */
11616
+ addr = addr_reg;
11617
+ val = 0;
11618
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
11619
+
11620
+ addr = data_reg;
11621
+ /* Read 4 bytes at once and loop for fifo_size / 4 */
11622
+ for (i = 0; i < fifo_size / 4; i++) {
11623
+ if (serialized_backplane_access(dhd->bus, addr,
11624
+ sizeof(uint), &val, TRUE) != BCME_OK) {
11625
+ DHD_ERROR(("%s: error in serialized_backplane_access\n", __FUNCTION__));
11626
+ return BCME_ERROR;
11627
+ }
11628
+ buf[i] = val;
11629
+ OSL_DELAY(1);
11630
+ }
11631
+ return BCME_OK;
11632
+}
11633
+
11634
+static int
11635
+dhdpcie_get_sssr_dig_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
11636
+ uint addr_reg)
11637
+{
11638
+ uint addr;
11639
+ uint val = 0;
11640
+ int i;
11641
+ si_t *sih = dhd->bus->sih;
11642
+
11643
+ DHD_ERROR(("%s\n", __FUNCTION__));
11644
+
11645
+ if (!buf) {
11646
+ DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
11647
+ return BCME_ERROR;
11648
+ }
11649
+
11650
+ if (!fifo_size) {
11651
+ DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
11652
+ return BCME_ERROR;
11653
+ }
11654
+
11655
+ if (addr_reg) {
11656
+
11657
+ if ((!dhd->sssr_reg_info.vasip_regs.vasip_sr_size) &&
11658
+ dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) {
11659
+ int err = dhdpcie_bus_membytes(dhd->bus, FALSE, addr_reg, (uint8 *)buf,
11660
+ fifo_size);
11661
+ if (err != BCME_OK) {
11662
+ DHD_ERROR(("%s: Error reading dig dump from dongle !\n",
11663
+ __FUNCTION__));
11664
+ }
11665
+ } else {
11666
+ /* Check if vasip clk is disabled, if yes enable it */
11667
+ addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
11668
+ dhd_sbreg_op(dhd, addr, &val, TRUE);
11669
+ if (!val) {
11670
+ val = 1;
11671
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
11672
+ }
11673
+
11674
+ addr = addr_reg;
11675
+ /* Read 4 bytes at once and loop for fifo_size / 4 */
11676
+ for (i = 0; i < fifo_size / 4; i++, addr += 4) {
11677
+ if (serialized_backplane_access(dhd->bus, addr, sizeof(uint),
11678
+ &val, TRUE) != BCME_OK) {
11679
+ DHD_ERROR(("%s: Invalid uint addr: 0x%x \n", __FUNCTION__,
11680
+ addr));
11681
+ return BCME_ERROR;
11682
+ }
11683
+ buf[i] = val;
11684
+ OSL_DELAY(1);
11685
+ }
11686
+ }
11687
+ } else {
11688
+ uint cur_coreid;
11689
+ uint chipc_corerev;
11690
+ chipcregs_t *chipcregs;
11691
+
11692
+ /* Save the current core */
11693
+ cur_coreid = si_coreid(sih);
11694
+
11695
+ /* Switch to ChipC */
11696
+ chipcregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
11697
+
11698
+ chipc_corerev = si_corerev(sih);
11699
+
11700
+ if ((chipc_corerev == 64) || (chipc_corerev == 65)) {
11701
+ W_REG(si_osh(sih), &chipcregs->sr_memrw_addr, 0);
11702
+
11703
+ /* Read 4 bytes at once and loop for fifo_size / 4 */
11704
+ for (i = 0; i < fifo_size / 4; i++) {
11705
+ buf[i] = R_REG(si_osh(sih), &chipcregs->sr_memrw_data);
11706
+ OSL_DELAY(1);
11707
+ }
11708
+ }
11709
+
11710
+ /* Switch back to the original core */
11711
+ si_setcore(sih, cur_coreid, 0);
11712
+ }
11713
+
11714
+ return BCME_OK;
11715
+}
11716
+
11717
+#if defined(EWP_ETD_PRSRV_LOGS)
11718
+void
11719
+dhdpcie_get_etd_preserve_logs(dhd_pub_t *dhd,
11720
+ uint8 *ext_trap_data, void *event_decode_data)
11721
+{
11722
+ hnd_ext_trap_hdr_t *hdr = NULL;
11723
+ bcm_tlv_t *tlv;
11724
+ eventlog_trapdata_info_t *etd_evtlog = NULL;
11725
+ eventlog_trap_buf_info_t *evtlog_buf_arr = NULL;
11726
+ uint arr_size = 0;
11727
+ int i = 0;
11728
+ int err = 0;
11729
+ uint32 seqnum = 0;
11730
+
11731
+ if (!ext_trap_data || !event_decode_data || !dhd)
11732
+ return;
11733
+
11734
+ if (!dhd->concise_dbg_buf)
11735
+ return;
11736
+
11737
+ /* First word is original trap_data, skip */
11738
+ ext_trap_data += sizeof(uint32);
11739
+
11740
+ hdr = (hnd_ext_trap_hdr_t *)ext_trap_data;
11741
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_LOG_DATA);
11742
+ if (tlv) {
11743
+ uint32 baseaddr = 0;
11744
+ uint32 endaddr = dhd->bus->dongle_ram_base + dhd->bus->ramsize - 4;
11745
+
11746
+ etd_evtlog = (eventlog_trapdata_info_t *)tlv->data;
11747
+ DHD_ERROR(("%s: etd_evtlog tlv found, num_elements=%x; "
11748
+ "seq_num=%x; log_arr_addr=%x\n", __FUNCTION__,
11749
+ (etd_evtlog->num_elements),
11750
+ ntoh32(etd_evtlog->seq_num), (etd_evtlog->log_arr_addr)));
11751
+ arr_size = (uint32)sizeof(*evtlog_buf_arr) * (etd_evtlog->num_elements);
11752
+ if (!arr_size) {
11753
+ DHD_ERROR(("%s: num event logs is zero! \n", __FUNCTION__));
11754
+ return;
11755
+ }
11756
+ evtlog_buf_arr = MALLOCZ(dhd->osh, arr_size);
11757
+ if (!evtlog_buf_arr) {
11758
+ DHD_ERROR(("%s: out of memory !\n", __FUNCTION__));
11759
+ return;
11760
+ }
11761
+
11762
+ /* boundary check */
11763
+ baseaddr = etd_evtlog->log_arr_addr;
11764
+ if ((baseaddr < dhd->bus->dongle_ram_base) ||
11765
+ ((baseaddr + arr_size) > endaddr)) {
11766
+ DHD_ERROR(("%s: Error reading invalid address\n",
11767
+ __FUNCTION__));
11768
+ goto err;
11769
+ }
11770
+
11771
+ /* read the eventlog_trap_buf_info_t array from dongle memory */
11772
+ err = dhdpcie_bus_membytes(dhd->bus, FALSE,
11773
+ (ulong)(etd_evtlog->log_arr_addr),
11774
+ (uint8 *)evtlog_buf_arr, arr_size);
11775
+ if (err != BCME_OK) {
11776
+ DHD_ERROR(("%s: Error reading event log array from dongle !\n",
11777
+ __FUNCTION__));
11778
+ goto err;
11779
+ }
11780
+ /* ntoh is required only for seq_num, because in the original
11781
+ * case of event logs from info ring, it is sent from dongle in that way
11782
+ * so for ETD also dongle follows same convention
11783
+ */
11784
+ seqnum = ntoh32(etd_evtlog->seq_num);
11785
+ memset(dhd->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN);
11786
+ for (i = 0; i < (etd_evtlog->num_elements); ++i) {
11787
+ /* boundary check */
11788
+ baseaddr = evtlog_buf_arr[i].buf_addr;
11789
+ if ((baseaddr < dhd->bus->dongle_ram_base) ||
11790
+ ((baseaddr + evtlog_buf_arr[i].len) > endaddr)) {
11791
+ DHD_ERROR(("%s: Error reading invalid address\n",
11792
+ __FUNCTION__));
11793
+ goto err;
11794
+ }
11795
+ /* read each individual event log buf from dongle memory */
11796
+ err = dhdpcie_bus_membytes(dhd->bus, FALSE,
11797
+ ((ulong)evtlog_buf_arr[i].buf_addr),
11798
+ dhd->concise_dbg_buf, (evtlog_buf_arr[i].len));
11799
+ if (err != BCME_OK) {
11800
+ DHD_ERROR(("%s: Error reading event log buffer from dongle !\n",
11801
+ __FUNCTION__));
11802
+ goto err;
11803
+ }
11804
+ dhd_dbg_msgtrace_log_parser(dhd, dhd->concise_dbg_buf,
11805
+ event_decode_data, (evtlog_buf_arr[i].len),
11806
+ FALSE, hton32(seqnum));
11807
+ ++seqnum;
11808
+ }
11809
+err:
11810
+ MFREE(dhd->osh, evtlog_buf_arr, arr_size);
11811
+ } else {
11812
+ DHD_ERROR(("%s: Error getting trap log data in ETD !\n", __FUNCTION__));
11813
+ }
11814
+}
11815
+#endif /* BCMPCIE && DHD_LOG_DUMP */
11816
+
11817
+static uint32
11818
+dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd, uint32 reg_val)
11819
+{
11820
+ uint addr;
11821
+ uint val = 0;
11822
+
11823
+ DHD_ERROR(("%s\n", __FUNCTION__));
11824
+
11825
+ /* conditionally clear bits [11:8] of PowerCtrl */
11826
+ addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
11827
+ dhd_sbreg_op(dhd, addr, &val, TRUE);
11828
+ if (!(val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask)) {
11829
+ addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
11830
+ dhd_sbreg_op(dhd, addr, &reg_val, FALSE);
11831
+ }
11832
+ return BCME_OK;
11833
+}
11834
+
11835
+static uint32
11836
+dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t *dhd)
11837
+{
11838
+ uint addr;
11839
+ uint val = 0, reg_val = 0;
11840
+
11841
+ DHD_ERROR(("%s\n", __FUNCTION__));
11842
+
11843
+ /* conditionally clear bits [11:8] of PowerCtrl */
11844
+ addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
11845
+ dhd_sbreg_op(dhd, addr, &reg_val, TRUE);
11846
+ if (reg_val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask) {
11847
+ addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
11848
+ val = 0;
11849
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
11850
+ }
11851
+ return reg_val;
11852
+}
11853
+
11854
+static int
11855
+dhdpcie_clear_intmask_and_timer(dhd_pub_t *dhd)
11856
+{
11857
+ uint addr;
11858
+ uint val;
11859
+
11860
+ DHD_ERROR(("%s\n", __FUNCTION__));
11861
+
11862
+ /* clear chipcommon intmask */
11863
+ addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.intmask;
11864
+ val = 0x0;
11865
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
11866
+
11867
+ /* clear PMUIntMask0 */
11868
+ addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask0;
11869
+ val = 0x0;
11870
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
11871
+
11872
+ /* clear PMUIntMask1 */
11873
+ addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask1;
11874
+ val = 0x0;
11875
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
11876
+
11877
+ /* clear res_req_timer */
11878
+ addr = dhd->sssr_reg_info.pmu_regs.base_regs.resreqtimer;
11879
+ val = 0x0;
11880
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
11881
+
11882
+ /* clear macresreqtimer */
11883
+ addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer;
11884
+ val = 0x0;
11885
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
11886
+
11887
+ /* clear macresreqtimer1 */
11888
+ addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer1;
11889
+ val = 0x0;
11890
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
11891
+
11892
+ /* clear VasipClkEn */
11893
+ if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
11894
+ addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
11895
+ val = 0x0;
11896
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
11897
+ }
11898
+
11899
+ return BCME_OK;
11900
+}
11901
+
11902
+static void
11903
+dhdpcie_update_d11_status_from_trapdata(dhd_pub_t *dhd)
11904
+{
11905
+#define TRAP_DATA_MAIN_CORE_BIT_MASK (1 << 1)
11906
+#define TRAP_DATA_AUX_CORE_BIT_MASK (1 << 4)
11907
+ uint trap_data_mask[MAX_NUM_D11CORES] =
11908
+ {TRAP_DATA_MAIN_CORE_BIT_MASK, TRAP_DATA_AUX_CORE_BIT_MASK};
11909
+ int i;
11910
+ /* Apply only for 4375 chip */
11911
+ if (dhd_bus_chip_id(dhd) == BCM4375_CHIP_ID) {
11912
+ for (i = 0; i < MAX_NUM_D11CORES; i++) {
11913
+ if (dhd->sssr_d11_outofreset[i] &&
11914
+ (dhd->dongle_trap_data & trap_data_mask[i])) {
11915
+ dhd->sssr_d11_outofreset[i] = TRUE;
11916
+ } else {
11917
+ dhd->sssr_d11_outofreset[i] = FALSE;
11918
+ }
11919
+ DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d after AND with "
11920
+ "trap_data:0x%x-0x%x\n",
11921
+ __FUNCTION__, i, dhd->sssr_d11_outofreset[i],
11922
+ dhd->dongle_trap_data, trap_data_mask[i]));
11923
+ }
11924
+ }
11925
+}
11926
+
11927
+static int
11928
+dhdpcie_d11_check_outofreset(dhd_pub_t *dhd)
11929
+{
11930
+ int i;
11931
+ uint addr;
11932
+ uint val = 0;
11933
+
11934
+ DHD_ERROR(("%s\n", __FUNCTION__));
11935
+
11936
+ for (i = 0; i < MAX_NUM_D11CORES; i++) {
11937
+ /* Check if bit 0 of resetctrl is cleared */
11938
+ addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
11939
+ if (!addr) {
11940
+ DHD_ERROR(("%s: skipping for core[%d] as 'addr' is NULL\n",
11941
+ __FUNCTION__, i));
11942
+ continue;
11943
+ }
11944
+ dhd_sbreg_op(dhd, addr, &val, TRUE);
11945
+ if (!(val & 1)) {
11946
+ dhd->sssr_d11_outofreset[i] = TRUE;
11947
+ } else {
11948
+ dhd->sssr_d11_outofreset[i] = FALSE;
11949
+ }
11950
+ DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d\n",
11951
+ __FUNCTION__, i, dhd->sssr_d11_outofreset[i]));
11952
+ }
11953
+ dhdpcie_update_d11_status_from_trapdata(dhd);
11954
+
11955
+ return BCME_OK;
11956
+}
11957
+
11958
+static int
11959
+dhdpcie_d11_clear_clk_req(dhd_pub_t *dhd)
11960
+{
11961
+ int i;
11962
+ uint addr;
11963
+ uint val = 0;
11964
+
11965
+ DHD_ERROR(("%s\n", __FUNCTION__));
11966
+
11967
+ for (i = 0; i < MAX_NUM_D11CORES; i++) {
11968
+ if (dhd->sssr_d11_outofreset[i]) {
11969
+ /* clear request clk only if itopoobb is non zero */
11970
+ addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.itopoobb;
11971
+ dhd_sbreg_op(dhd, addr, &val, TRUE);
11972
+ if (val != 0) {
11973
+ /* clear clockcontrolstatus */
11974
+ addr = dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus;
11975
+ val =
11976
+ dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus_val;
11977
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
11978
+ }
11979
+ }
11980
+ }
11981
+ return BCME_OK;
11982
+}
11983
+
11984
+static int
11985
+dhdpcie_arm_clear_clk_req(dhd_pub_t *dhd)
11986
+{
11987
+ uint addr;
11988
+ uint val = 0;
11989
+
11990
+ DHD_ERROR(("%s\n", __FUNCTION__));
11991
+
11992
+ /* Check if bit 0 of resetctrl is cleared */
11993
+ addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl;
11994
+ dhd_sbreg_op(dhd, addr, &val, TRUE);
11995
+ if (!(val & 1)) {
11996
+ /* clear request clk only if itopoobb is non zero */
11997
+ addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.itopoobb;
11998
+ dhd_sbreg_op(dhd, addr, &val, TRUE);
11999
+ if (val != 0) {
12000
+ /* clear clockcontrolstatus */
12001
+ addr = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus;
12002
+ val = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus_val;
12003
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
12004
+ }
12005
+ }
12006
+ return BCME_OK;
12007
+}
12008
+
12009
+static int
12010
+dhdpcie_pcie_clear_clk_req(dhd_pub_t *dhd)
12011
+{
12012
+ uint addr;
12013
+ uint val = 0;
12014
+
12015
+ DHD_ERROR(("%s\n", __FUNCTION__));
12016
+
12017
+ /* clear request clk only if itopoobb is non zero */
12018
+ addr = dhd->sssr_reg_info.pcie_regs.wrapper_regs.itopoobb;
12019
+ dhd_sbreg_op(dhd, addr, &val, TRUE);
12020
+ if (val) {
12021
+ /* clear clockcontrolstatus */
12022
+ addr = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus;
12023
+ val = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus_val;
12024
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
12025
+ }
12026
+ return BCME_OK;
12027
+}
12028
+
12029
+static int
12030
+dhdpcie_pcie_send_ltrsleep(dhd_pub_t *dhd)
12031
+{
12032
+ uint addr;
12033
+ uint val = 0;
12034
+
12035
+ DHD_ERROR(("%s\n", __FUNCTION__));
12036
+
12037
+ addr = dhd->sssr_reg_info.pcie_regs.base_regs.ltrstate;
12038
+ val = LTR_ACTIVE;
12039
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
12040
+
12041
+ val = LTR_SLEEP;
12042
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
12043
+
12044
+ return BCME_OK;
12045
+}
12046
+
12047
+static int
12048
+dhdpcie_clear_clk_req(dhd_pub_t *dhd)
12049
+{
12050
+ DHD_ERROR(("%s\n", __FUNCTION__));
12051
+
12052
+ dhdpcie_arm_clear_clk_req(dhd);
12053
+
12054
+ dhdpcie_d11_clear_clk_req(dhd);
12055
+
12056
+ dhdpcie_pcie_clear_clk_req(dhd);
12057
+
12058
+ return BCME_OK;
12059
+}
12060
+
12061
+static int
12062
+dhdpcie_bring_d11_outofreset(dhd_pub_t *dhd)
12063
+{
12064
+ int i;
12065
+ uint addr;
12066
+ uint val = 0;
12067
+
12068
+ DHD_ERROR(("%s\n", __FUNCTION__));
12069
+
12070
+ for (i = 0; i < MAX_NUM_D11CORES; i++) {
12071
+ if (dhd->sssr_d11_outofreset[i]) {
12072
+ /* disable core by setting bit 0 */
12073
+ addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
12074
+ val = 1;
12075
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
12076
+ OSL_DELAY(6000);
12077
+
12078
+ addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl;
12079
+ val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[0];
12080
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
12081
+
12082
+ val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[1];
12083
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
12084
+
12085
+ /* enable core by clearing bit 0 */
12086
+ addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
12087
+ val = 0;
12088
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
12089
+
12090
+ addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl;
12091
+ val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[2];
12092
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
12093
+
12094
+ val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[3];
12095
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
12096
+
12097
+ val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[4];
12098
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
12099
+ }
12100
+ }
12101
+ return BCME_OK;
12102
+}
12103
+
12104
+static int
12105
+dhdpcie_sssr_dump_get_before_sr(dhd_pub_t *dhd)
12106
+{
12107
+ int i;
12108
+
12109
+ DHD_ERROR(("%s\n", __FUNCTION__));
12110
+
12111
+ for (i = 0; i < MAX_NUM_D11CORES; i++) {
12112
+ if (dhd->sssr_d11_outofreset[i]) {
12113
+ dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_before[i],
12114
+ dhd->sssr_reg_info.mac_regs[i].sr_size,
12115
+ dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress,
12116
+ dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata);
12117
+ }
12118
+ }
12119
+
12120
+ if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
12121
+ dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before,
12122
+ dhd->sssr_reg_info.vasip_regs.vasip_sr_size,
12123
+ dhd->sssr_reg_info.vasip_regs.vasip_sr_addr);
12124
+ } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
12125
+ dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
12126
+ dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before,
12127
+ dhd->sssr_reg_info.dig_mem_info.dig_sr_size,
12128
+ dhd->sssr_reg_info.dig_mem_info.dig_sr_addr);
12129
+ }
12130
+
12131
+ return BCME_OK;
12132
+}
12133
+
12134
+static int
12135
+dhdpcie_sssr_dump_get_after_sr(dhd_pub_t *dhd)
12136
+{
12137
+ int i;
12138
+
12139
+ DHD_ERROR(("%s\n", __FUNCTION__));
12140
+
12141
+ for (i = 0; i < MAX_NUM_D11CORES; i++) {
12142
+ if (dhd->sssr_d11_outofreset[i]) {
12143
+ dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_after[i],
12144
+ dhd->sssr_reg_info.mac_regs[i].sr_size,
12145
+ dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress,
12146
+ dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata);
12147
+ }
12148
+ }
12149
+
12150
+ if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
12151
+ dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after,
12152
+ dhd->sssr_reg_info.vasip_regs.vasip_sr_size,
12153
+ dhd->sssr_reg_info.vasip_regs.vasip_sr_addr);
12154
+ } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
12155
+ dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
12156
+ dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after,
12157
+ dhd->sssr_reg_info.dig_mem_info.dig_sr_size,
12158
+ dhd->sssr_reg_info.dig_mem_info.dig_sr_addr);
12159
+ }
12160
+
12161
+ return BCME_OK;
12162
+}
12163
+
12164
+int
12165
+dhdpcie_sssr_dump(dhd_pub_t *dhd)
12166
+{
12167
+ uint32 powerctrl_val;
12168
+
12169
+ if (!dhd->sssr_inited) {
12170
+ DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
12171
+ return BCME_ERROR;
12172
+ }
12173
+
12174
+ if (dhd->bus->is_linkdown) {
12175
+ DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
12176
+ return BCME_ERROR;
12177
+ }
12178
+
12179
+ dhdpcie_d11_check_outofreset(dhd);
12180
+
12181
+ DHD_ERROR(("%s: Collecting Dump before SR\n", __FUNCTION__));
12182
+ if (dhdpcie_sssr_dump_get_before_sr(dhd) != BCME_OK) {
12183
+ DHD_ERROR(("%s: dhdpcie_sssr_dump_get_before_sr failed\n", __FUNCTION__));
12184
+ return BCME_ERROR;
12185
+ }
12186
+
12187
+ dhdpcie_clear_intmask_and_timer(dhd);
12188
+ powerctrl_val = dhdpcie_suspend_chipcommon_powerctrl(dhd);
12189
+ dhdpcie_clear_clk_req(dhd);
12190
+ dhdpcie_pcie_send_ltrsleep(dhd);
12191
+
12192
+ /* Wait for some time before Restore */
12193
+ OSL_DELAY(6000);
12194
+
12195
+ dhdpcie_resume_chipcommon_powerctrl(dhd, powerctrl_val);
12196
+ dhdpcie_bring_d11_outofreset(dhd);
12197
+
12198
+ DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
12199
+ if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
12200
+ DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
12201
+ return BCME_ERROR;
12202
+ }
12203
+ dhd->sssr_dump_collected = TRUE;
12204
+ dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_SSSR);
12205
+
12206
+ return BCME_OK;
12207
+}
12208
+
12209
+static int
12210
+dhdpcie_fis_trigger(dhd_pub_t *dhd)
12211
+{
12212
+ if (!dhd->sssr_inited) {
12213
+ DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
12214
+ return BCME_ERROR;
12215
+ }
12216
+
12217
+ if (dhd->bus->is_linkdown) {
12218
+ DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
12219
+ return BCME_ERROR;
12220
+ }
12221
+
12222
+ /* Trigger FIS */
12223
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12224
+ DAR_FIS_CTRL(dhd->bus->sih->buscorerev), ~0, DAR_FIS_START_MASK);
12225
+ OSL_DELAY(100 * 1000);
12226
+
12227
+ return BCME_OK;
12228
+}
12229
+
12230
+int
12231
+dhd_bus_fis_trigger(dhd_pub_t *dhd)
12232
+{
12233
+ return dhdpcie_fis_trigger(dhd);
12234
+}
12235
+
12236
+static int
12237
+dhdpcie_fis_dump(dhd_pub_t *dhd)
12238
+{
12239
+ int i;
12240
+
12241
+ if (!dhd->sssr_inited) {
12242
+ DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
12243
+ return BCME_ERROR;
12244
+ }
12245
+
12246
+ if (dhd->bus->is_linkdown) {
12247
+ DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
12248
+ return BCME_ERROR;
12249
+ }
12250
+
12251
+ /* bring up all pmu resources */
12252
+ PMU_REG(dhd->bus->sih, min_res_mask, ~0,
12253
+ PMU_REG(dhd->bus->sih, max_res_mask, 0, 0));
12254
+ OSL_DELAY(10 * 1000);
12255
+
12256
+ for (i = 0; i < MAX_NUM_D11CORES; i++) {
12257
+ dhd->sssr_d11_outofreset[i] = TRUE;
12258
+ }
12259
+
12260
+ dhdpcie_bring_d11_outofreset(dhd);
12261
+ OSL_DELAY(6000);
12262
+
12263
+ /* clear FIS Done */
12264
+ PMU_REG(dhd->bus->sih, fis_ctrl_status, PMU_CLEAR_FIS_DONE_MASK, PMU_CLEAR_FIS_DONE_MASK);
12265
+
12266
+ dhdpcie_d11_check_outofreset(dhd);
12267
+
12268
+ DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
12269
+ if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
12270
+ DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
12271
+ return BCME_ERROR;
12272
+ }
12273
+
12274
+ dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_FIS);
12275
+
12276
+ return BCME_OK;
12277
+}
12278
+
12279
+int
12280
+dhd_bus_fis_dump(dhd_pub_t *dhd)
12281
+{
12282
+ return dhdpcie_fis_dump(dhd);
12283
+}
12284
+#endif /* DHD_SSSR_DUMP */
12285
+
12286
+#ifdef DHD_WAKE_STATUS
12287
+wake_counts_t*
12288
+dhd_bus_get_wakecount(dhd_pub_t *dhd)
12289
+{
12290
+ return &dhd->bus->wake_counts;
12291
+}
12292
+int
12293
+dhd_bus_get_bus_wake(dhd_pub_t *dhd)
12294
+{
12295
+ return bcmpcie_set_get_wake(dhd->bus, 0);
12296
+}
12297
+#endif /* DHD_WAKE_STATUS */
12298
+
12299
+/* Writes random number(s) to the TCM. FW upon initialization reads this register
12300
+ * to fetch the random number, and uses it to randomize heap address space layout.
12301
+ */
12302
+static int
12303
+dhdpcie_wrt_rnd(struct dhd_bus *bus)
12304
+{
12305
+ bcm_rand_metadata_t rnd_data;
12306
+ uint8 rand_buf[BCM_ENTROPY_HOST_NBYTES];
12307
+ uint32 count = BCM_ENTROPY_HOST_NBYTES;
12308
+ int ret = 0;
12309
+ uint32 addr = bus->dongle_ram_base + (bus->ramsize - BCM_NVRAM_OFFSET_TCM) -
12310
+ ((bus->nvram_csm & 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR + sizeof(rnd_data));
12311
+
12312
+ memset(rand_buf, 0, BCM_ENTROPY_HOST_NBYTES);
12313
+ rnd_data.signature = htol32(BCM_NVRAM_RNG_SIGNATURE);
12314
+ rnd_data.count = htol32(count);
12315
+ /* write the metadata about random number */
12316
+ dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&rnd_data, sizeof(rnd_data));
12317
+ /* scale back by number of random number counts */
12318
+ addr -= count;
12319
+
12320
+#ifdef DHD_RND_DEBUG
12321
+ bus->dhd->rnd_buf = NULL;
12322
+ /* get random contents from file */
12323
+ ret = dhd_get_rnd_info(bus->dhd);
12324
+ if (bus->dhd->rnd_buf) {
12325
+ /* write file contents to TCM */
12326
+ DHD_ERROR(("%s: use stored .rnd.in content\n", __FUNCTION__));
12327
+ dhdpcie_bus_membytes(bus, TRUE, addr, bus->dhd->rnd_buf, bus->dhd->rnd_len);
12328
+
12329
+ /* Dump random content to out file */
12330
+ dhd_dump_rnd_info(bus->dhd, bus->dhd->rnd_buf, bus->dhd->rnd_len);
12331
+
12332
+ /* bus->dhd->rnd_buf is allocated in dhd_get_rnd_info, free here */
12333
+ MFREE(bus->dhd->osh, bus->dhd->rnd_buf, bus->dhd->rnd_len);
12334
+ bus->dhd->rnd_buf = NULL;
12335
+ return BCME_OK;
12336
+ }
12337
+#endif /* DHD_RND_DEBUG */
12338
+
12339
+ /* Now get & write the random number(s) */
12340
+ ret = dhd_get_random_bytes(rand_buf, count);
12341
+ if (ret != BCME_OK) {
12342
+ return ret;
12343
+ }
12344
+ dhdpcie_bus_membytes(bus, TRUE, addr, rand_buf, count);
12345
+
12346
+#ifdef DHD_RND_DEBUG
12347
+ /* Dump random content to out file */
12348
+ dhd_dump_rnd_info(bus->dhd, rand_buf, count);
12349
+#endif /* DHD_RND_DEBUG */
12350
+
12351
+ return BCME_OK;
12352
+}
12353
+
12354
+void
12355
+dhd_pcie_intr_count_dump(dhd_pub_t *dhd)
12356
+{
12357
+ struct dhd_bus *bus = dhd->bus;
12358
+ uint64 current_time;
12359
+
12360
+ DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters ------- \r\n"));
12361
+ DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n",
12362
+ bus->resume_intr_enable_count, bus->dpc_intr_enable_count));
12363
+ DHD_ERROR(("isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n",
12364
+ bus->isr_intr_disable_count, bus->suspend_intr_disable_count));
12365
+#ifdef BCMPCIE_OOB_HOST_WAKE
12366
+ DHD_ERROR(("oob_intr_count=%lu oob_intr_enable_count=%lu oob_intr_disable_count=%lu\n",
12367
+ bus->oob_intr_count, bus->oob_intr_enable_count,
12368
+ bus->oob_intr_disable_count));
12369
+ DHD_ERROR(("oob_irq_num=%d last_oob_irq_time="SEC_USEC_FMT"\n",
12370
+ dhdpcie_get_oob_irq_num(bus),
12371
+ GET_SEC_USEC(bus->last_oob_irq_time)));
12372
+ DHD_ERROR(("last_oob_irq_enable_time="SEC_USEC_FMT
12373
+ " last_oob_irq_disable_time="SEC_USEC_FMT"\n",
12374
+ GET_SEC_USEC(bus->last_oob_irq_enable_time),
12375
+ GET_SEC_USEC(bus->last_oob_irq_disable_time)));
12376
+ DHD_ERROR(("oob_irq_enabled=%d oob_gpio_level=%d\n",
12377
+ dhdpcie_get_oob_irq_status(bus),
12378
+ dhdpcie_get_oob_irq_level()));
12379
+#endif /* BCMPCIE_OOB_HOST_WAKE */
12380
+ DHD_ERROR(("dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
12381
+ bus->dpc_return_busdown_count, bus->non_ours_irq_count));
12382
+
12383
+ current_time = OSL_LOCALTIME_NS();
12384
+ DHD_ERROR(("\ncurrent_time="SEC_USEC_FMT"\n",
12385
+ GET_SEC_USEC(current_time)));
12386
+ DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
12387
+ " isr_exit_time="SEC_USEC_FMT"\n",
12388
+ GET_SEC_USEC(bus->isr_entry_time),
12389
+ GET_SEC_USEC(bus->isr_exit_time)));
12390
+ DHD_ERROR(("dpc_sched_time="SEC_USEC_FMT
12391
+ " last_non_ours_irq_time="SEC_USEC_FMT"\n",
12392
+ GET_SEC_USEC(bus->dpc_sched_time),
12393
+ GET_SEC_USEC(bus->last_non_ours_irq_time)));
12394
+ DHD_ERROR(("dpc_entry_time="SEC_USEC_FMT
12395
+ " last_process_ctrlbuf_time="SEC_USEC_FMT"\n",
12396
+ GET_SEC_USEC(bus->dpc_entry_time),
12397
+ GET_SEC_USEC(bus->last_process_ctrlbuf_time)));
12398
+ DHD_ERROR(("last_process_flowring_time="SEC_USEC_FMT
12399
+ " last_process_txcpl_time="SEC_USEC_FMT"\n",
12400
+ GET_SEC_USEC(bus->last_process_flowring_time),
12401
+ GET_SEC_USEC(bus->last_process_txcpl_time)));
12402
+ DHD_ERROR(("last_process_rxcpl_time="SEC_USEC_FMT
12403
+ " last_process_infocpl_time="SEC_USEC_FMT
12404
+ " last_process_edl_time="SEC_USEC_FMT"\n",
12405
+ GET_SEC_USEC(bus->last_process_rxcpl_time),
12406
+ GET_SEC_USEC(bus->last_process_infocpl_time),
12407
+ GET_SEC_USEC(bus->last_process_edl_time)));
12408
+ DHD_ERROR(("dpc_exit_time="SEC_USEC_FMT
12409
+ " resched_dpc_time="SEC_USEC_FMT"\n",
12410
+ GET_SEC_USEC(bus->dpc_exit_time),
12411
+ GET_SEC_USEC(bus->resched_dpc_time)));
12412
+ DHD_ERROR(("last_d3_inform_time="SEC_USEC_FMT"\n",
12413
+ GET_SEC_USEC(bus->last_d3_inform_time)));
12414
+
12415
+ DHD_ERROR(("\nlast_suspend_start_time="SEC_USEC_FMT
12416
+ " last_suspend_end_time="SEC_USEC_FMT"\n",
12417
+ GET_SEC_USEC(bus->last_suspend_start_time),
12418
+ GET_SEC_USEC(bus->last_suspend_end_time)));
12419
+ DHD_ERROR(("last_resume_start_time="SEC_USEC_FMT
12420
+ " last_resume_end_time="SEC_USEC_FMT"\n",
12421
+ GET_SEC_USEC(bus->last_resume_start_time),
12422
+ GET_SEC_USEC(bus->last_resume_end_time)));
12423
+
12424
+#if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
12425
+ DHD_ERROR(("logtrace_thread_entry_time="SEC_USEC_FMT
12426
+ " logtrace_thread_sem_down_time="SEC_USEC_FMT
12427
+ "\nlogtrace_thread_flush_time="SEC_USEC_FMT
12428
+ " logtrace_thread_unexpected_break_time="SEC_USEC_FMT
12429
+ "\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n",
12430
+ GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time),
12431
+ GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time),
12432
+ GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time),
12433
+ GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time),
12434
+ GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time)));
12435
+#endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
12436
+}
12437
+
12438
+void
12439
+dhd_bus_intr_count_dump(dhd_pub_t *dhd)
12440
+{
12441
+ dhd_pcie_intr_count_dump(dhd);
12442
+}
12443
+
12444
+int
12445
+dhd_pcie_dump_wrapper_regs(dhd_pub_t *dhd)
12446
+{
12447
+ uint32 save_idx, val;
12448
+ si_t *sih = dhd->bus->sih;
12449
+ uint32 oob_base, oob_base1;
12450
+ uint32 wrapper_dump_list[] = {
12451
+ AI_OOBSELOUTA30, AI_OOBSELOUTA74, AI_OOBSELOUTB30, AI_OOBSELOUTB74,
12452
+ AI_OOBSELOUTC30, AI_OOBSELOUTC74, AI_OOBSELOUTD30, AI_OOBSELOUTD74,
12453
+ AI_RESETSTATUS, AI_RESETCTRL,
12454
+ AI_ITIPOOBA, AI_ITIPOOBB, AI_ITIPOOBC, AI_ITIPOOBD,
12455
+ AI_ITIPOOBAOUT, AI_ITIPOOBBOUT, AI_ITIPOOBCOUT, AI_ITIPOOBDOUT
12456
+ };
12457
+ uint8 i;
12458
+ hndoobr_reg_t *reg;
12459
+ cr4regs_t *cr4regs;
12460
+ ca7regs_t *ca7regs;
12461
+
12462
+ save_idx = si_coreidx(sih);
12463
+
12464
+ DHD_ERROR(("%s: Master wrapper Reg\n", __FUNCTION__));
12465
+
12466
+ if (si_setcore(sih, PCIE2_CORE_ID, 0) != NULL) {
12467
+ for (i = 0; i < sizeof(wrapper_dump_list) / 4; i++) {
12468
+ val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0);
12469
+ DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val));
12470
+ }
12471
+ }
12472
+
12473
+ if ((cr4regs = si_setcore(sih, ARMCR4_CORE_ID, 0)) != NULL) {
12474
+ DHD_ERROR(("%s: ARM CR4 wrapper Reg\n", __FUNCTION__));
12475
+ for (i = 0; i < sizeof(wrapper_dump_list) / 4; i++) {
12476
+ val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0);
12477
+ DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val));
12478
+ }
12479
+ DHD_ERROR(("%s: ARM CR4 core Reg\n", __FUNCTION__));
12480
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecontrol));
12481
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corecontrol), val));
12482
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecapabilities));
12483
+ DHD_ERROR(("reg:0x%x val:0x%x\n",
12484
+ (uint)OFFSETOF(cr4regs_t, corecapabilities), val));
12485
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corestatus));
12486
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corestatus), val));
12487
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmiisrst));
12488
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmiisrst), val));
12489
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmimask));
12490
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmimask), val));
12491
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, isrmask));
12492
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, isrmask), val));
12493
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, swintreg));
12494
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, swintreg), val));
12495
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, intstatus));
12496
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, intstatus), val));
12497
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, cyclecnt));
12498
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, cyclecnt), val));
12499
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, inttimer));
12500
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, inttimer), val));
12501
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, clk_ctl_st));
12502
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, clk_ctl_st), val));
12503
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, powerctl));
12504
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, powerctl), val));
12505
+ }
12506
+
12507
+ if ((ca7regs = si_setcore(sih, ARMCA7_CORE_ID, 0)) != NULL) {
12508
+ DHD_ERROR(("%s: ARM CA7 core Reg\n", __FUNCTION__));
12509
+ val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecontrol));
12510
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corecontrol), val));
12511
+ val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecapabilities));
12512
+ DHD_ERROR(("reg:0x%x val:0x%x\n",
12513
+ (uint)OFFSETOF(ca7regs_t, corecapabilities), val));
12514
+ val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corestatus));
12515
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corestatus), val));
12516
+ val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, tracecontrol));
12517
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, tracecontrol), val));
12518
+ val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, clk_ctl_st));
12519
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, clk_ctl_st), val));
12520
+ val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, powerctl));
12521
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, powerctl), val));
12522
+ }
12523
+
12524
+ DHD_ERROR(("%s: OOBR Reg\n", __FUNCTION__));
12525
+
12526
+ oob_base = si_oobr_baseaddr(sih, FALSE);
12527
+ oob_base1 = si_oobr_baseaddr(sih, TRUE);
12528
+ if (oob_base) {
12529
+ dhd_sbreg_op(dhd, oob_base + OOB_STATUSA, &val, TRUE);
12530
+ dhd_sbreg_op(dhd, oob_base + OOB_STATUSB, &val, TRUE);
12531
+ dhd_sbreg_op(dhd, oob_base + OOB_STATUSC, &val, TRUE);
12532
+ dhd_sbreg_op(dhd, oob_base + OOB_STATUSD, &val, TRUE);
12533
+ } else if ((reg = si_setcore(sih, HND_OOBR_CORE_ID, 0)) != NULL) {
12534
+ val = R_REG(dhd->osh, &reg->intstatus[0]);
12535
+ DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
12536
+ val = R_REG(dhd->osh, &reg->intstatus[1]);
12537
+ DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
12538
+ val = R_REG(dhd->osh, &reg->intstatus[2]);
12539
+ DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
12540
+ val = R_REG(dhd->osh, &reg->intstatus[3]);
12541
+ DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
12542
+ }
12543
+
12544
+ if (oob_base1) {
12545
+ DHD_ERROR(("%s: Second OOBR Reg\n", __FUNCTION__));
12546
+
12547
+ dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSA, &val, TRUE);
12548
+ dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSB, &val, TRUE);
12549
+ dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSC, &val, TRUE);
12550
+ dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSD, &val, TRUE);
12551
+ }
12552
+
12553
+ si_setcoreidx(dhd->bus->sih, save_idx);
12554
+
12555
+ return 0;
12556
+}
12557
+
12558
+int
12559
+dhd_pcie_dma_info_dump(dhd_pub_t *dhd)
12560
+{
12561
+ if (dhd->bus->is_linkdown) {
12562
+ DHD_ERROR(("\n ------- SKIP DUMPING DMA Registers "
12563
+ "due to PCIe link down ------- \r\n"));
12564
+ return 0;
12565
+ }
12566
+
12567
+ DHD_ERROR(("\n ------- DUMPING DMA Registers ------- \r\n"));
12568
+
12569
+ //HostToDev
12570
+ DHD_ERROR(("HostToDev TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
12571
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x200, 0, 0),
12572
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x204, 0, 0)));
12573
+ DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
12574
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x208, 0, 0),
12575
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x20C, 0, 0)));
12576
+ DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
12577
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x210, 0, 0),
12578
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x214, 0, 0)));
12579
+
12580
+ DHD_ERROR(("HostToDev RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
12581
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x220, 0, 0),
12582
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x224, 0, 0)));
12583
+ DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
12584
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x228, 0, 0),
12585
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x22C, 0, 0)));
12586
+ DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
12587
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x230, 0, 0),
12588
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x234, 0, 0)));
12589
+
12590
+ //DevToHost
12591
+ DHD_ERROR(("DevToHost TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
12592
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x240, 0, 0),
12593
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x244, 0, 0)));
12594
+ DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
12595
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x248, 0, 0),
12596
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x24C, 0, 0)));
12597
+ DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
12598
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x250, 0, 0),
12599
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x254, 0, 0)));
12600
+
12601
+ DHD_ERROR(("DevToHost RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
12602
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x260, 0, 0),
12603
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x264, 0, 0)));
12604
+ DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
12605
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x268, 0, 0),
12606
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x26C, 0, 0)));
12607
+ DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
12608
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x270, 0, 0),
12609
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x274, 0, 0)));
12610
+
12611
+ return 0;
12612
+}
12613
+
12614
+bool
12615
+dhd_pcie_dump_int_regs(dhd_pub_t *dhd)
12616
+{
12617
+ uint32 intstatus = 0;
12618
+ uint32 intmask = 0;
12619
+ uint32 d2h_db0 = 0;
12620
+ uint32 d2h_mb_data = 0;
12621
+
12622
+ DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
12623
+ intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12624
+ dhd->bus->pcie_mailbox_int, 0, 0);
12625
+ if (intstatus == (uint32)-1) {
12626
+ DHD_ERROR(("intstatus=0x%x \n", intstatus));
12627
+ return FALSE;
12628
+ }
12629
+
12630
+ intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12631
+ dhd->bus->pcie_mailbox_mask, 0, 0);
12632
+ if (intmask == (uint32) -1) {
12633
+ DHD_ERROR(("intstatus=0x%x intmask=0x%x \n", intstatus, intmask));
12634
+ return FALSE;
12635
+ }
12636
+
12637
+ d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12638
+ PCID2H_MailBox, 0, 0);
12639
+ if (d2h_db0 == (uint32)-1) {
12640
+ DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
12641
+ intstatus, intmask, d2h_db0));
12642
+ return FALSE;
12643
+ }
12644
+
12645
+ DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
12646
+ intstatus, intmask, d2h_db0));
12647
+ dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
12648
+ DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data,
12649
+ dhd->bus->def_intmask));
12650
+
12651
+ return TRUE;
12652
+}
12653
+
12654
+void
12655
+dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t *dhd)
12656
+{
12657
+ DHD_ERROR(("\n ------- DUMPING PCIE RC config space Registers ------- \r\n"));
12658
+ DHD_ERROR(("Pcie RC Uncorrectable Error Status Val=0x%x\n",
12659
+ dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
12660
+ PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
12661
+#ifdef EXTENDED_PCIE_DEBUG_DUMP
12662
+ DHD_ERROR(("hdrlog0 =0x%08x hdrlog1 =0x%08x hdrlog2 =0x%08x hdrlog3 =0x%08x\n",
12663
+ dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
12664
+ PCIE_EXTCAP_ERR_HEADER_LOG_0, TRUE, FALSE, 0),
12665
+ dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
12666
+ PCIE_EXTCAP_ERR_HEADER_LOG_1, TRUE, FALSE, 0),
12667
+ dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
12668
+ PCIE_EXTCAP_ERR_HEADER_LOG_2, TRUE, FALSE, 0),
12669
+ dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
12670
+ PCIE_EXTCAP_ERR_HEADER_LOG_3, TRUE, FALSE, 0)));
12671
+#endif /* EXTENDED_PCIE_DEBUG_DUMP */
12672
+}
12673
+
12674
+int
12675
+dhd_pcie_debug_info_dump(dhd_pub_t *dhd)
12676
+{
12677
+ int host_irq_disabled;
12678
+
12679
+ DHD_ERROR(("bus->bus_low_power_state = %d\n", dhd->bus->bus_low_power_state));
12680
+ host_irq_disabled = dhdpcie_irq_disabled(dhd->bus);
12681
+ DHD_ERROR(("host pcie_irq disabled = %d\n", host_irq_disabled));
12682
+ dhd_print_tasklet_status(dhd);
12683
+ dhd_pcie_intr_count_dump(dhd);
12684
+
12685
+ DHD_ERROR(("\n ------- DUMPING PCIE EP Resouce Info ------- \r\n"));
12686
+ dhdpcie_dump_resource(dhd->bus);
12687
+
12688
+ dhd_pcie_dump_rc_conf_space_cap(dhd);
12689
+
12690
+ DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n",
12691
+ dhd_debug_get_rc_linkcap(dhd->bus)));
12692
+
12693
+ if (dhd->bus->is_linkdown && !dhd->bus->cto_triggered) {
12694
+ DHD_ERROR(("Skip dumping the PCIe Config and Core registers. "
12695
+ "link may be DOWN\n"));
12696
+ return 0;
12697
+ }
12698
+
12699
+ DHD_ERROR(("\n ------- DUMPING PCIE EP config space Registers ------- \r\n"));
12700
+ DHD_ERROR(("Status Command(0x%x)=0x%x, BaseAddress0(0x%x)=0x%x BaseAddress1(0x%x)=0x%x "
12701
+ "PCIE_CFG_PMCSR(0x%x)=0x%x\n",
12702
+ PCIECFGREG_STATUS_CMD,
12703
+ dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_STATUS_CMD, sizeof(uint32)),
12704
+ PCIECFGREG_BASEADDR0,
12705
+ dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32)),
12706
+ PCIECFGREG_BASEADDR1,
12707
+ dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR1, sizeof(uint32)),
12708
+ PCIE_CFG_PMCSR,
12709
+ dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_PMCSR, sizeof(uint32))));
12710
+ DHD_ERROR(("LinkCtl(0x%x)=0x%x DeviceStatusControl2(0x%x)=0x%x "
12711
+ "L1SSControl(0x%x)=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL,
12712
+ dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_LINK_STATUS_CTRL,
12713
+ sizeof(uint32)), PCIECFGGEN_DEV_STATUS_CTRL2,
12714
+ dhd_pcie_config_read(dhd->bus->osh, PCIECFGGEN_DEV_STATUS_CTRL2,
12715
+ sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL1,
12716
+ dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL1,
12717
+ sizeof(uint32))));
12718
+#ifdef EXTENDED_PCIE_DEBUG_DUMP
12719
+ DHD_ERROR(("Pcie EP Uncorrectable Error Status Val=0x%x\n",
12720
+ dhdpcie_ep_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
12721
+ PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
12722
+ DHD_ERROR(("hdrlog0(0x%x)=0x%08x hdrlog1(0x%x)=0x%08x hdrlog2(0x%x)=0x%08x "
12723
+ "hdrlog3(0x%x)=0x%08x\n", PCI_TLP_HDR_LOG1,
12724
+ dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG1, sizeof(uint32)),
12725
+ PCI_TLP_HDR_LOG2,
12726
+ dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG2, sizeof(uint32)),
12727
+ PCI_TLP_HDR_LOG3,
12728
+ dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG3, sizeof(uint32)),
12729
+ PCI_TLP_HDR_LOG4,
12730
+ dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG4, sizeof(uint32))));
12731
+ if (dhd->bus->sih->buscorerev >= 24) {
12732
+ DHD_ERROR(("DeviceStatusControl(0x%x)=0x%x SubsystemControl(0x%x)=0x%x "
12733
+ "L1SSControl2(0x%x)=0x%x\n", PCIECFGREG_DEV_STATUS_CTRL,
12734
+ dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_DEV_STATUS_CTRL,
12735
+ sizeof(uint32)), PCIE_CFG_SUBSYSTEM_CONTROL,
12736
+ dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL,
12737
+ sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL2,
12738
+ dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL2,
12739
+ sizeof(uint32))));
12740
+ dhd_bus_dump_dar_registers(dhd->bus);
12741
+ }
12742
+#endif /* EXTENDED_PCIE_DEBUG_DUMP */
12743
+
12744
+ if (dhd->bus->is_linkdown) {
12745
+ DHD_ERROR(("Skip dumping the PCIe Core registers. link may be DOWN\n"));
12746
+ return 0;
12747
+ }
12748
+
12749
+ DHD_ERROR(("\n ------- DUMPING PCIE core Registers ------- \r\n"));
12750
+
12751
+ DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x "
12752
+ "ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0,
12753
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ0),
12754
+ PCIECFGREG_PHY_DBG_CLKREQ1,
12755
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ1),
12756
+ PCIECFGREG_PHY_DBG_CLKREQ2,
12757
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ2),
12758
+ PCIECFGREG_PHY_DBG_CLKREQ3,
12759
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ3)));
12760
+
12761
+#ifdef EXTENDED_PCIE_DEBUG_DUMP
12762
+ if (dhd->bus->sih->buscorerev >= 24) {
12763
+
12764
+ DHD_ERROR(("ltssm_hist_0(0x%x)=0x%x ltssm_hist_1(0x%x)=0x%x "
12765
+ "ltssm_hist_2(0x%x)=0x%x "
12766
+ "ltssm_hist_3(0x%x)=0x%x\n", PCIECFGREG_PHY_LTSSM_HIST_0,
12767
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_0),
12768
+ PCIECFGREG_PHY_LTSSM_HIST_1,
12769
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_1),
12770
+ PCIECFGREG_PHY_LTSSM_HIST_2,
12771
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_2),
12772
+ PCIECFGREG_PHY_LTSSM_HIST_3,
12773
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_3)));
12774
+
12775
+ DHD_ERROR(("trefup(0x%x)=0x%x trefup_ext(0x%x)=0x%x\n",
12776
+ PCIECFGREG_TREFUP,
12777
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP),
12778
+ PCIECFGREG_TREFUP_EXT,
12779
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP_EXT)));
12780
+ DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x "
12781
+ "Function_Intstatus(0x%x)=0x%x "
12782
+ "Function_Intmask(0x%x)=0x%x Power_Intstatus(0x%x)=0x%x "
12783
+ "Power_Intmask(0x%x)=0x%x\n",
12784
+ PCIE_CORE_REG_ERRLOG,
12785
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12786
+ PCIE_CORE_REG_ERRLOG, 0, 0),
12787
+ PCIE_CORE_REG_ERR_ADDR,
12788
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12789
+ PCIE_CORE_REG_ERR_ADDR, 0, 0),
12790
+ PCIFunctionIntstatus(dhd->bus->sih->buscorerev),
12791
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12792
+ PCIFunctionIntstatus(dhd->bus->sih->buscorerev), 0, 0),
12793
+ PCIFunctionIntmask(dhd->bus->sih->buscorerev),
12794
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12795
+ PCIFunctionIntmask(dhd->bus->sih->buscorerev), 0, 0),
12796
+ PCIPowerIntstatus(dhd->bus->sih->buscorerev),
12797
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12798
+ PCIPowerIntstatus(dhd->bus->sih->buscorerev), 0, 0),
12799
+ PCIPowerIntmask(dhd->bus->sih->buscorerev),
12800
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12801
+ PCIPowerIntmask(dhd->bus->sih->buscorerev), 0, 0)));
12802
+ DHD_ERROR(("err_hdrlog1(0x%x)=0x%x err_hdrlog2(0x%x)=0x%x "
12803
+ "err_hdrlog3(0x%x)=0x%x err_hdrlog4(0x%x)=0x%x\n",
12804
+ (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1),
12805
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12806
+ OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1), 0, 0),
12807
+ (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2),
12808
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12809
+ OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2), 0, 0),
12810
+ (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3),
12811
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12812
+ OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3), 0, 0),
12813
+ (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4),
12814
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12815
+ OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4), 0, 0)));
12816
+ DHD_ERROR(("err_code(0x%x)=0x%x\n",
12817
+ (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg),
12818
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12819
+ OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg), 0, 0)));
12820
+
12821
+ dhd_pcie_dump_wrapper_regs(dhd);
12822
+ }
12823
+#endif /* EXTENDED_PCIE_DEBUG_DUMP */
12824
+
12825
+ dhd_pcie_dma_info_dump(dhd);
12826
+
12827
+ return 0;
12828
+}
12829
+
12830
+bool
12831
+dhd_bus_force_bt_quiesce_enabled(struct dhd_bus *bus)
12832
+{
12833
+ return bus->force_bt_quiesce;
12834
+}
12835
+
12836
+#ifdef DHD_HP2P
12837
+uint16
12838
+dhd_bus_get_hp2p_ring_max_size(struct dhd_bus *bus, bool tx)
12839
+{
12840
+ if (tx)
12841
+ return bus->hp2p_txcpl_max_items;
12842
+ else
12843
+ return bus->hp2p_rxcpl_max_items;
12844
+}
12845
+
12846
+static uint16
12847
+dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val)
12848
+{
12849
+ if (tx)
12850
+ bus->hp2p_txcpl_max_items = val;
12851
+ else
12852
+ bus->hp2p_rxcpl_max_items = val;
12853
+ return val;
12854
+}
12855
+#endif /* DHD_HP2P */
12856
+
12857
+static bool
12858
+dhd_bus_tcm_test(struct dhd_bus *bus)
12859
+{
12860
+ int ret = 0;
12861
+ int size; /* Full mem size */
12862
+ int start; /* Start address */
12863
+ int read_size = 0; /* Read size of each iteration */
12864
+ int num = 0;
12865
+ uint8 *read_buf, *write_buf;
12866
+ uint8 init_val[NUM_PATTERNS] = {
12867
+ 0xFFu, /* 11111111 */
12868
+ 0x00u, /* 00000000 */
12869
+ };
12870
+
12871
+ if (!bus) {
12872
+ DHD_ERROR(("%s: bus is NULL !\n", __FUNCTION__));
12873
+ return FALSE;
12874
+ }
12875
+
12876
+ read_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK);
12877
+
12878
+ if (!read_buf) {
12879
+ DHD_ERROR(("%s: MALLOC of read_buf failed\n", __FUNCTION__));
12880
+ return FALSE;
12881
+ }
12882
+
12883
+ write_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK);
12884
+
12885
+ if (!write_buf) {
12886
+ MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
12887
+ DHD_ERROR(("%s: MALLOC of write_buf failed\n", __FUNCTION__));
12888
+ return FALSE;
12889
+ }
12890
+
12891
+ DHD_ERROR(("%s: start %x, size: %x\n", __FUNCTION__, bus->dongle_ram_base, bus->ramsize));
12892
+ DHD_ERROR(("%s: memblock size %d, #pattern %d\n", __FUNCTION__, MEMBLOCK, NUM_PATTERNS));
12893
+
12894
+ while (num < NUM_PATTERNS) {
12895
+ start = bus->dongle_ram_base;
12896
+ /* Get full mem size */
12897
+ size = bus->ramsize;
12898
+
12899
+ memset(write_buf, init_val[num], MEMBLOCK);
12900
+ while (size > 0) {
12901
+ read_size = MIN(MEMBLOCK, size);
12902
+ memset(read_buf, 0, read_size);
12903
+
12904
+ /* Write */
12905
+ if ((ret = dhdpcie_bus_membytes(bus, TRUE, start, write_buf, read_size))) {
12906
+ DHD_ERROR(("%s: Write Error membytes %d\n", __FUNCTION__, ret));
12907
+ MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
12908
+ MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
12909
+ return FALSE;
12910
+ }
12911
+
12912
+ /* Read */
12913
+ if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, read_buf, read_size))) {
12914
+ DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__, ret));
12915
+ MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
12916
+ MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
12917
+ return FALSE;
12918
+ }
12919
+
12920
+ /* Compare */
12921
+ if (memcmp(read_buf, write_buf, read_size)) {
12922
+ DHD_ERROR(("%s: Mismatch at %x, iter : %d\n",
12923
+ __FUNCTION__, start, num));
12924
+ prhex("Readbuf", read_buf, read_size);
12925
+ prhex("Writebuf", write_buf, read_size);
12926
+ MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
12927
+ MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
12928
+ return FALSE;
12929
+ }
12930
+
12931
+ /* Decrement size and increment start address */
12932
+ size -= read_size;
12933
+ start += read_size;
12934
+ }
12935
+ num++;
12936
+ }
12937
+
12938
+ MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
12939
+ MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
12940
+
12941
+ DHD_ERROR(("%s: Success iter : %d\n", __FUNCTION__, num));
12942
+ return TRUE;
12943
+}