forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 cde9070d9970eef1f7ec2360586c802a16230ad8
kernel/drivers/net/wireless/rockchip_wlan/cywdhd/bcmdhd/dhd_pcie.h
....@@ -1,15 +1,16 @@
1
-/* SPDX-License-Identifier: GPL-2.0 */
21 /*
32 * Linux DHD Bus Module for PCIE
43 *
5
- * Copyright (C) 1999-2019, Broadcom Corporation
6
- *
4
+ * Portions of this code are copyright (c) 2022 Cypress Semiconductor Corporation
5
+ *
6
+ * Copyright (C) 1999-2017, Broadcom Corporation
7
+ *
78 * Unless you and Broadcom execute a separate written software license
89 * agreement governing use of this software, this software is licensed to you
910 * under the terms of the GNU General Public License version 2 (the "GPL"),
1011 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
1112 * following added to such license:
12
- *
13
+ *
1314 * As a special exception, the copyright holders of this software give you
1415 * permission to link this software with independent modules, and to copy and
1516 * distribute the resulting executable under terms of your choice, provided that
....@@ -17,7 +18,7 @@
1718 * the license of that module. An independent module is a module which is not
1819 * derived from this software. The special exception does not apply to any
1920 * modifications of the software.
20
- *
21
+ *
2122 * Notwithstanding the above, under no circumstances may you combine this
2223 * software in any way with any other Broadcom software provided under a license
2324 * other than the GPL, without Broadcom's express prior written consent.
....@@ -25,9 +26,8 @@
2526 *
2627 * <<Broadcom-WL-IPTag/Open:>>
2728 *
28
- * $Id: dhd_pcie.h 606080 2015-12-14 09:31:57Z $
29
+ * $Id: dhd_pcie.h 698652 2017-05-10 10:39:24Z $
2930 */
30
-
3131
3232 #ifndef dhd_pcie_h
3333 #define dhd_pcie_h
....@@ -36,15 +36,44 @@
3636 #include <hnd_cons.h>
3737 #ifdef SUPPORT_LINKDOWN_RECOVERY
3838 #ifdef CONFIG_ARCH_MSM
39
-#if defined(CONFIG_ARCH_MSM8994) || defined(CONFIG_ARCH_MSM8996)
39
+#ifdef CONFIG_PCI_MSM
4040 #include <linux/msm_pcie.h>
4141 #else
4242 #include <mach/msm_pcie.h>
43
-#endif /* CONFIG_ARCH_MSM8994 */
43
+#endif /* CONFIG_PCI_MSM */
4444 #endif /* CONFIG_ARCH_MSM */
45
+#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
46
+#if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) || \
47
+ defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820)
48
+#include <linux/exynos-pci-noti.h>
49
+extern int exynos_pcie_register_event(struct exynos_pcie_register_event *reg);
50
+extern int exynos_pcie_deregister_event(struct exynos_pcie_register_event *reg);
51
+#endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895
52
+ * CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820
53
+ */
54
+#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
4555 #endif /* SUPPORT_LINKDOWN_RECOVERY */
4656
57
+#ifdef DHD_PCIE_RUNTIMEPM
58
+#include <linux/mutex.h>
59
+#include <linux/wait.h>
60
+
61
+#define DEFAULT_DHD_RUNTIME_MS 100
62
+#ifndef CUSTOM_DHD_RUNTIME_MS
63
+#define CUSTOM_DHD_RUNTIME_MS DEFAULT_DHD_RUNTIME_MS
64
+#endif /* CUSTOM_DHD_RUNTIME_MS */
65
+
66
+#ifndef MAX_IDLE_COUNT
67
+#define MAX_IDLE_COUNT 16
68
+#endif /* MAX_IDLE_COUNT */
69
+
70
+#ifndef MAX_RESUME_WAIT
71
+#define MAX_RESUME_WAIT 100
72
+#endif /* MAX_RESUME_WAIT */
73
+#endif /* DHD_PCIE_RUNTIMEPM */
74
+
4775 /* defines */
76
+#define PCIE_SHARED_VERSION PCIE_SHARED_VERSION_7
4877
4978 #define PCMSGBUF_HDRLEN 0
5079 #define DONGLE_REG_MAP_SIZE (32 * 1024)
....@@ -57,22 +86,74 @@
5786 #define REMAP_ENAB(bus) ((bus)->remap)
5887 #define REMAP_ISADDR(bus, a) (((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize)))
5988
60
-/*
61
- * Router with 4366 can have 128 stations and 16 BSS,
62
- * hence (128 stations x 4 access categories for ucast) + 16 bc/mc flowrings
63
- */
89
+#ifdef SUPPORT_LINKDOWN_RECOVERY
90
+#ifdef CONFIG_ARCH_MSM
91
+#define struct_pcie_notify struct msm_pcie_notify
92
+#define struct_pcie_register_event struct msm_pcie_register_event
93
+#endif /* CONFIG_ARCH_MSM */
94
+#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
95
+#if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) || \
96
+ defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820)
97
+#define struct_pcie_notify struct exynos_pcie_notify
98
+#define struct_pcie_register_event struct exynos_pcie_register_event
99
+#endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895
100
+ * CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820
101
+ */
102
+#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
103
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
104
+
64105 #define MAX_DHD_TX_FLOWS 320
65106
66107 /* user defined data structures */
67108 /* Device console log buffer state */
68
-#define CONSOLE_LINE_MAX 192
109
+#define CONSOLE_LINE_MAX 192u
69110 #define CONSOLE_BUFFER_MAX (8 * 1024)
70111
71
-#ifndef MAX_CNTL_D3ACK_TIMEOUT
72
-#define MAX_CNTL_D3ACK_TIMEOUT 2
73
-#endif /* MAX_CNTL_D3ACK_TIMEOUT */
112
+#ifdef IDLE_TX_FLOW_MGMT
113
+#define IDLE_FLOW_LIST_TIMEOUT 5000
114
+#define IDLE_FLOW_RING_TIMEOUT 5000
115
+#endif /* IDLE_TX_FLOW_MGMT */
74116
75
-#ifdef DHD_DEBUG
117
+/* HWA enabled and inited */
118
+#define HWA_ACTIVE(dhd) (((dhd)->hwa_enable) && ((dhd)->hwa_inited))
119
+
120
+/* implicit DMA for h2d wr and d2h rd indice from Host memory to TCM */
121
+#define IDMA_ENAB(dhd) ((dhd)->idma_enable)
122
+#define IDMA_ACTIVE(dhd) (((dhd)->idma_enable) && ((dhd)->idma_inited))
123
+
124
+#define IDMA_CAPABLE(bus) (((bus)->sih->buscorerev == 19) || ((bus)->sih->buscorerev >= 23))
125
+
126
+/* IFRM (Implicit Flow Ring Manager enable and inited */
127
+#define IFRM_ENAB(dhd) ((dhd)->ifrm_enable)
128
+#define IFRM_ACTIVE(dhd) (((dhd)->ifrm_enable) && ((dhd)->ifrm_inited))
129
+
130
+/* DAR registers use for h2d doorbell */
131
+#define DAR_ENAB(dhd) ((dhd)->dar_enable)
132
+#define DAR_ACTIVE(dhd) (((dhd)->dar_enable) && ((dhd)->dar_inited))
133
+
134
+/* DAR WAR for revs < 64 */
135
+#define DAR_PWRREQ(bus) (((bus)->_dar_war) && DAR_ACTIVE((bus)->dhd))
136
+
137
+/* PCIE CTO Prevention and Recovery */
138
+#define PCIECTO_ENAB(bus) ((bus)->cto_enable)
139
+
140
+/* Implicit DMA index usage :
141
+ * Index 0 for h2d write index transfer
142
+ * Index 1 for d2h read index transfer
143
+ */
144
+#define IDMA_IDX0 0
145
+#define IDMA_IDX1 1
146
+#define IDMA_IDX2 2
147
+#define IDMA_IDX3 3
148
+#define DMA_TYPE_SHIFT 4
149
+#define DMA_TYPE_IDMA 1
150
+
151
+#define DHDPCIE_CONFIG_HDR_SIZE 16
152
+#define DHDPCIE_CONFIG_CHECK_DELAY_MS 10 /* 10ms */
153
+#define DHDPCIE_CONFIG_CHECK_RETRY_COUNT 20
154
+#define DHDPCIE_DONGLE_PWR_TOGGLE_DELAY 1000 /* 1ms in units of us */
155
+#define DHDPCIE_PM_D3_DELAY 200000 /* 200ms in units of us */
156
+#define DHDPCIE_PM_D2_DELAY 200 /* 200us */
76157
77158 typedef struct dhd_console {
78159 uint count; /* Poll interval msec counter */
....@@ -82,17 +163,88 @@
82163 uint8 *buf; /* Log buffer (host copy) */
83164 uint last; /* Last buffer read index */
84165 } dhd_console_t;
85
-#endif /* DHD_DEBUG */
166
+
86167 typedef struct ring_sh_info {
87168 uint32 ring_mem_addr;
88169 uint32 ring_state_w;
89170 uint32 ring_state_r;
90171 } ring_sh_info_t;
91172
173
+#define DEVICE_WAKE_NONE 0
174
+#define DEVICE_WAKE_OOB 1
175
+#define DEVICE_WAKE_INB 2
176
+
177
+#define INBAND_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_INB)
178
+#define OOB_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_OOB)
179
+#define NO_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_NONE)
180
+
181
+#define PCIE_RELOAD_WAR_ENAB(buscorerev) \
182
+ ((buscorerev == 66) || (buscorerev == 67) || (buscorerev == 68) || (buscorerev == 70))
183
+
184
+/*
185
+ * HW JIRA - CRWLPCIEGEN2-672
186
+ * Producer Index Feature which is used by F1 gets reset on F0 FLR
187
+ * fixed in REV68
188
+ */
189
+#define PCIE_ENUM_RESET_WAR_ENAB(buscorerev) \
190
+ ((buscorerev == 66) || (buscorerev == 67))
191
+
192
+struct dhd_bus;
193
+
194
+struct dhd_pcie_rev {
195
+ uint8 fw_rev;
196
+ void (*handle_mb_data)(struct dhd_bus *);
197
+};
198
+
199
+typedef struct dhdpcie_config_save
200
+{
201
+ uint32 header[DHDPCIE_CONFIG_HDR_SIZE];
202
+ /* pmcsr save */
203
+ uint32 pmcsr;
204
+ /* express save */
205
+ uint32 exp_dev_ctrl_stat;
206
+ uint32 exp_link_ctrl_stat;
207
+ uint32 exp_dev_ctrl_stat2;
208
+ uint32 exp_link_ctrl_stat2;
209
+ /* msi save */
210
+ uint32 msi_cap;
211
+ uint32 msi_addr0;
212
+ uint32 msi_addr1;
213
+ uint32 msi_data;
214
+ /* l1pm save */
215
+ uint32 l1pm0;
216
+ uint32 l1pm1;
217
+ /* ltr save */
218
+ uint32 ltr;
219
+ /* aer save */
220
+ uint32 aer_caps_ctrl; /* 0x18 */
221
+ uint32 aer_severity; /* 0x0C */
222
+ uint32 aer_umask; /* 0x08 */
223
+ uint32 aer_cmask; /* 0x14 */
224
+ uint32 aer_root_cmd; /* 0x2c */
225
+ /* BAR0 and BAR1 windows */
226
+ uint32 bar0_win;
227
+ uint32 bar1_win;
228
+} dhdpcie_config_save_t;
229
+
230
+/* The level of bus communication with the dongle */
231
+enum dhd_bus_low_power_state {
232
+ DHD_BUS_NO_LOW_POWER_STATE, /* Not in low power state */
233
+ DHD_BUS_D3_INFORM_SENT, /* D3 INFORM sent */
234
+ DHD_BUS_D3_ACK_RECIEVED, /* D3 ACK recieved */
235
+};
236
+
237
+/** Instantiated once for each hardware (dongle) instance that this DHD manages */
92238 typedef struct dhd_bus {
93
- dhd_pub_t *dhd;
239
+ dhd_pub_t *dhd; /**< pointer to per hardware (dongle) unique instance */
240
+ struct pci_dev *rc_dev; /* pci RC device handle */
94241 struct pci_dev *dev; /* pci device handle */
95
- dll_t const_flowring; /* constructed list of tx flowring queues */
242
+
243
+ dll_t flowring_active_list; /* constructed list of tx flowring queues */
244
+#ifdef IDLE_TX_FLOW_MGMT
245
+ uint64 active_list_last_process_ts;
246
+ /* stores the timestamp of active list processing */
247
+#endif /* IDLE_TX_FLOW_MGMT */
96248
97249 si_t *sih; /* Handle for SI calls */
98250 char *vars; /* Variables (from CIS and/or other) */
....@@ -101,9 +253,14 @@
101253 sbpcieregs_t *reg; /* Registers for PCIE core */
102254
103255 uint armrev; /* CPU core revision */
256
+ uint coreid; /* CPU core id */
104257 uint ramrev; /* SOCRAM core revision */
105258 uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */
106259 uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */
260
+ bool ramsize_adjusted; /* flag to note adjustment, so that
261
+ * adjustment routine and file io
262
+ * are avoided on D3 cold -> D0
263
+ */
107264 uint32 srmemsize; /* Size of SRMEM */
108265
109266 uint32 bus; /* gSPI or SDIO bus */
....@@ -114,14 +271,6 @@
114271 uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */
115272 char *fw_path; /* module_param: path to firmware image */
116273 char *nv_path; /* module_param: path to nvram vars file */
117
-#ifdef CACHE_FW_IMAGES
118
- int processed_nvram_params_len; /* Modified len of NVRAM info */
119
-#endif
120
-
121
-#if defined(CUSTOMER_HW_31_2)
122
- char *nvram_params; /* user specified nvram params. */
123
- int nvram_params_len;
124
-#endif
125274
126275 struct pktq txq; /* Queue length used for flow-control */
127276
....@@ -131,10 +280,8 @@
131280 uint intrcount; /* Count of device interrupt callbacks */
132281 uint lastintrs; /* Count as of last watchdog timer */
133282
134
-#ifdef DHD_DEBUG
135283 dhd_console_t console; /* Console output polling support */
136284 uint console_addr; /* Console address from shared struct */
137
-#endif /* DHD_DEBUG */
138285
139286 bool alp_only; /* Don't use HT clock (ALP only) */
140287
....@@ -147,7 +294,6 @@
147294
148295 ulong shared_addr;
149296 pciedev_shared_t *pcie_sh;
150
- bool bus_flowctrl;
151297 uint32 dma_rxoffset;
152298 volatile char *regs; /* pci device memory va */
153299 volatile char *tcm; /* pci device memory va */
....@@ -156,12 +302,16 @@
156302 uint16 pollrate;
157303 uint16 polltick;
158304
159
- uint32 *pcie_mb_intr_addr;
305
+ volatile uint32 *pcie_mb_intr_addr;
306
+ volatile uint32 *pcie_mb_intr_2_addr;
160307 void *pcie_mb_intr_osh;
161308 bool sleep_allowed;
162309
310
+ wake_counts_t wake_counts;
311
+
163312 /* version 3 shared struct related info start */
164313 ring_sh_info_t ring_sh[BCMPCIE_COMMON_MSGRINGS + MAX_DHD_TX_FLOWS];
314
+
165315 uint8 h2d_ring_count;
166316 uint8 d2h_ring_count;
167317 uint32 ringmem_ptr;
....@@ -174,29 +324,133 @@
174324 /* version 3 shared struct related info end */
175325
176326 uint32 def_intmask;
327
+ uint32 d2h_mb_mask;
328
+ uint32 pcie_mailbox_mask;
329
+ uint32 pcie_mailbox_int;
177330 bool ltrsleep_on_unload;
178331 uint wait_for_d3_ack;
179
- uint32 max_sub_queues;
332
+ uint16 max_tx_flowrings;
333
+ uint16 max_submission_rings;
334
+ uint16 max_completion_rings;
335
+ uint16 max_cmn_rings;
180336 uint32 rw_index_sz;
181337 bool db1_for_mb;
182
- bool suspended;
183338
184339 dhd_timeout_t doorbell_timer;
185340 bool device_wake_state;
186
-#ifdef PCIE_OOB
187
- bool oob_enabled;
188
-#endif /* PCIE_OOB */
341
+ bool irq_registered;
342
+ bool d2h_intr_method;
189343 #ifdef SUPPORT_LINKDOWN_RECOVERY
344
+#if defined(CONFIG_ARCH_MSM) || (defined(EXYNOS_PCIE_LINKDOWN_RECOVERY) && \
345
+ defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) || \
346
+ defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820))
190347 #ifdef CONFIG_ARCH_MSM
191
- struct msm_pcie_register_event pcie_event;
192
- uint8 islinkdown;
348
+ uint8 no_cfg_restore;
193349 #endif /* CONFIG_ARCH_MSM */
350
+ struct_pcie_register_event pcie_event;
351
+#endif /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY &&
352
+ * (CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 ||
353
+ * CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 ))
354
+ */
355
+ bool read_shm_fail;
194356 #endif /* SUPPORT_LINKDOWN_RECOVERY */
357
+ int32 idletime; /* Control for activity timeout */
358
+#ifdef DHD_PCIE_RUNTIMEPM
359
+ int32 idlecount; /* Activity timeout counter */
360
+ int32 bus_wake; /* For wake up the bus */
361
+ bool runtime_resume_done; /* For check runtime suspend end */
362
+ struct mutex pm_lock; /* Synchronize for system PM & runtime PM */
363
+ wait_queue_head_t rpm_queue; /* wait-queue for bus wake up */
364
+#endif /* DHD_PCIE_RUNTIMEPM */
195365 uint32 d3_inform_cnt;
196366 uint32 d0_inform_cnt;
197367 uint32 d0_inform_in_use_cnt;
198368 uint8 force_suspend;
369
+ uint8 is_linkdown;
370
+ uint8 no_bus_init;
371
+#ifdef IDLE_TX_FLOW_MGMT
372
+ bool enable_idle_flowring_mgmt;
373
+#endif /* IDLE_TX_FLOW_MGMT */
374
+ struct dhd_pcie_rev api;
375
+ bool use_mailbox;
376
+ bool use_d0_inform;
377
+ void *bus_lock;
378
+ void *backplane_access_lock;
379
+ enum dhd_bus_low_power_state bus_low_power_state;
380
+ uint32 hostready_count; /* Number of hostready issued */
381
+#if defined(BCMPCIE_OOB_HOST_WAKE)
382
+ bool oob_presuspend;
383
+#endif // endif
384
+ dhdpcie_config_save_t saved_config;
385
+ ulong resume_intr_enable_count;
386
+ ulong dpc_intr_enable_count;
387
+ ulong isr_intr_disable_count;
388
+ ulong suspend_intr_disable_count;
389
+ ulong dpc_return_busdown_count;
390
+ ulong non_ours_irq_count;
391
+#ifdef BCMPCIE_OOB_HOST_WAKE
392
+ ulong oob_intr_count;
393
+ ulong oob_intr_enable_count;
394
+ ulong oob_intr_disable_count;
395
+ uint64 last_oob_irq_time;
396
+ uint64 last_oob_irq_enable_time;
397
+ uint64 last_oob_irq_disable_time;
398
+#endif /* BCMPCIE_OOB_HOST_WAKE */
399
+ uint64 isr_entry_time;
400
+ uint64 isr_exit_time;
401
+ uint64 dpc_sched_time;
402
+ uint64 dpc_entry_time;
403
+ uint64 dpc_exit_time;
404
+ uint64 resched_dpc_time;
405
+ uint64 last_d3_inform_time;
406
+ uint64 last_process_ctrlbuf_time;
407
+ uint64 last_process_flowring_time;
408
+ uint64 last_process_txcpl_time;
409
+ uint64 last_process_rxcpl_time;
410
+ uint64 last_process_infocpl_time;
411
+ uint64 last_process_edl_time;
412
+ uint64 last_suspend_start_time;
413
+ uint64 last_suspend_end_time;
414
+ uint64 last_resume_start_time;
415
+ uint64 last_resume_end_time;
416
+ uint64 last_non_ours_irq_time;
417
+ uint8 hwa_enab_bmap;
418
+ bool idma_enabled;
419
+ bool ifrm_enabled;
420
+ bool dar_enabled;
421
+ uint32 dmaxfer_complete;
422
+ uint8 dw_option;
423
+#ifdef DHD_PCIE_RUNTIMEPM
424
+ bool chk_pm; /* To avoid counting of wake up from Runtime PM */
425
+#endif /* DHD_PCIE_RUNTIMEPM */
426
+ bool _dar_war;
427
+ uint8 dma_chan;
428
+ bool cto_enable; /* enable PCIE CTO Prevention and recovery */
429
+ uint32 cto_threshold; /* PCIE CTO timeout threshold */
430
+ bool cto_triggered; /* CTO is triggered */
431
+ int pwr_req_ref;
432
+ bool flr_force_fail; /* user intends to simulate flr force fail */
433
+ bool intr_enabled; /* ready to receive interrupts from dongle */
434
+ bool force_bt_quiesce; /* send bt_quiesce command to BT driver. */
435
+#if defined(DHD_H2D_LOG_TIME_SYNC)
436
+ ulong dhd_rte_time_sync_count; /* OSL_SYSUPTIME_US() */
437
+#endif /* DHD_H2D_LOG_TIME_SYNC */
438
+ bool rc_ep_aspm_cap; /* RC and EP ASPM capable */
439
+ bool rc_ep_l1ss_cap; /* EC and EP L1SS capable */
440
+ uint16 hp2p_txcpl_max_items;
441
+ uint16 hp2p_rxcpl_max_items;
442
+ /* PCIE coherent status */
443
+ uint32 coherent_state;
199444 } dhd_bus_t;
445
+
446
+#ifdef DHD_MSI_SUPPORT
447
+extern uint enable_msi;
448
+#endif /* DHD_MSI_SUPPORT */
449
+
450
+enum {
451
+ PCIE_INTX = 0,
452
+ PCIE_MSI = 1
453
+};
200454
201455 /* function declarations */
202456
....@@ -205,55 +459,157 @@
205459 extern void dhdpcie_bus_unregister(void);
206460 extern bool dhdpcie_chipmatch(uint16 vendor, uint16 device);
207461
208
-extern struct dhd_bus* dhdpcie_bus_attach(osl_t *osh,
462
+extern int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr,
209463 volatile char *regs, volatile char *tcm, void *pci_dev);
210464 extern uint32 dhdpcie_bus_cfg_read_dword(struct dhd_bus *bus, uint32 addr, uint32 size);
211465 extern void dhdpcie_bus_cfg_write_dword(struct dhd_bus *bus, uint32 addr, uint32 size, uint32 data);
212466 extern void dhdpcie_bus_intr_enable(struct dhd_bus *bus);
213467 extern void dhdpcie_bus_intr_disable(struct dhd_bus *bus);
468
+extern int dhpcie_bus_mask_interrupt(dhd_bus_t *bus);
214469 extern void dhdpcie_bus_release(struct dhd_bus *bus);
215470 extern int32 dhdpcie_bus_isr(struct dhd_bus *bus);
216471 extern void dhdpcie_free_irq(dhd_bus_t *bus);
217472 extern void dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value);
473
+extern void dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake);
474
+extern void dhdpcie_dongle_reset(dhd_bus_t *bus);
475
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
476
+extern int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state, bool byint);
477
+#else
218478 extern int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state);
479
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
219480 extern int dhdpcie_pci_suspend_resume(struct dhd_bus *bus, bool state);
481
+extern uint32 dhdpcie_force_alp(struct dhd_bus *bus, bool enable);
482
+extern uint32 dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int force_l1_entry_time);
220483 extern bool dhdpcie_tcm_valid(dhd_bus_t *bus);
221
-#ifndef BCMPCIE_OOB_HOST_WAKE
222484 extern void dhdpcie_pme_active(osl_t *osh, bool enable);
223
-#endif /* !BCMPCIE_OOB_HOST_WAKE */
224485 extern bool dhdpcie_pme_cap(osl_t *osh);
225486 extern uint32 dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val);
487
+extern void dhdpcie_set_pmu_min_res_mask(struct dhd_bus *bus, uint min_res_mask);
226488 extern uint8 dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val);
489
+extern int dhdpcie_disable_irq(dhd_bus_t *bus);
490
+extern int dhdpcie_disable_irq_nosync(dhd_bus_t *bus);
491
+extern int dhdpcie_enable_irq(dhd_bus_t *bus);
492
+
493
+extern void dhd_bus_dump_dar_registers(struct dhd_bus *bus);
494
+
495
+extern uint32 dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset);
496
+extern uint32 dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext,
497
+ bool is_write, uint32 writeval);
498
+extern uint32 dhdpcie_ep_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext,
499
+ bool is_write, uint32 writeval);
500
+extern uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus);
227501 extern int dhdpcie_start_host_pcieclock(dhd_bus_t *bus);
228502 extern int dhdpcie_stop_host_pcieclock(dhd_bus_t *bus);
229503 extern int dhdpcie_disable_device(dhd_bus_t *bus);
230
-extern int dhdpcie_enable_device(dhd_bus_t *bus);
231504 extern int dhdpcie_alloc_resource(dhd_bus_t *bus);
232505 extern void dhdpcie_free_resource(dhd_bus_t *bus);
506
+extern void dhdpcie_dump_resource(dhd_bus_t *bus);
233507 extern int dhdpcie_bus_request_irq(struct dhd_bus *bus);
508
+void dhdpcie_os_setbar1win(dhd_bus_t *bus, uint32 addr);
509
+void dhdpcie_os_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
510
+uint8 dhdpcie_os_rtcm8(dhd_bus_t *bus, ulong offset);
511
+void dhdpcie_os_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
512
+uint16 dhdpcie_os_rtcm16(dhd_bus_t *bus, ulong offset);
513
+void dhdpcie_os_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
514
+uint32 dhdpcie_os_rtcm32(dhd_bus_t *bus, ulong offset);
515
+#ifdef DHD_SUPPORT_64BIT
516
+void dhdpcie_os_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data);
517
+uint64 dhdpcie_os_rtcm64(dhd_bus_t *bus, ulong offset);
518
+#endif // endif
519
+
520
+extern int dhdpcie_enable_device(dhd_bus_t *bus);
521
+
234522 #ifdef BCMPCIE_OOB_HOST_WAKE
235523 extern int dhdpcie_oob_intr_register(dhd_bus_t *bus);
236524 extern void dhdpcie_oob_intr_unregister(dhd_bus_t *bus);
237525 extern void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable);
526
+extern int dhdpcie_get_oob_irq_num(struct dhd_bus *bus);
527
+extern int dhdpcie_get_oob_irq_status(struct dhd_bus *bus);
528
+extern int dhdpcie_get_oob_irq_level(void);
238529 #endif /* BCMPCIE_OOB_HOST_WAKE */
239
-#ifdef PCIE_OOB
240
-extern void dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val);
241
-extern int dhd_oob_get_bt_reg_on(struct dhd_bus *bus);
242
-#endif /* PCIE_OOB */
243530
244
-#ifdef USE_EXYNOS_PCIE_RC_PMPATCH
531
+#if defined(CONFIG_ARCH_EXYNOS)
532
+#define SAMSUNG_PCIE_VENDOR_ID 0x144d
245533 #if defined(CONFIG_MACH_UNIVERSAL5433)
246534 #define SAMSUNG_PCIE_DEVICE_ID 0xa5e3
247535 #define SAMSUNG_PCIE_CH_NUM
248
-#elif defined(CONFIG_MACH_UNIVERSAL7420)
536
+#elif defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS7420)
249537 #define SAMSUNG_PCIE_DEVICE_ID 0xa575
250538 #define SAMSUNG_PCIE_CH_NUM 1
251539 #elif defined(CONFIG_SOC_EXYNOS8890)
252540 #define SAMSUNG_PCIE_DEVICE_ID 0xa544
253541 #define SAMSUNG_PCIE_CH_NUM 0
542
+#elif defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
543
+ defined(CONFIG_SOC_EXYNOS9820)
544
+#define SAMSUNG_PCIE_DEVICE_ID 0xecec
545
+#define SAMSUNG_PCIE_CH_NUM 0
254546 #else
255547 #error "Not supported platform"
256
-#endif
548
+#endif /* CONFIG_SOC_EXYNOSXXXX & CONFIG_MACH_UNIVERSALXXXX */
549
+#endif /* CONFIG_ARCH_EXYNOS */
550
+
551
+#if defined(CONFIG_ARCH_MSM)
552
+#define MSM_PCIE_VENDOR_ID 0x17cb
553
+#if defined(CONFIG_ARCH_APQ8084)
554
+#define MSM_PCIE_DEVICE_ID 0x0101
555
+#elif defined(CONFIG_ARCH_MSM8994)
556
+#define MSM_PCIE_DEVICE_ID 0x0300
557
+#elif defined(CONFIG_ARCH_MSM8996)
558
+#define MSM_PCIE_DEVICE_ID 0x0104
559
+#elif defined(CONFIG_ARCH_MSM8998)
560
+#define MSM_PCIE_DEVICE_ID 0x0105
561
+#elif defined(CONFIG_ARCH_SDM845) || defined(CONFIG_ARCH_SM8150)
562
+#define MSM_PCIE_DEVICE_ID 0x0106
563
+#elif defined(USE_CUSTOM_MSM_PCIE)
564
+#define MSM_PCIE_DEVICE_ID MSM_PCIE_CUSTOM_DEVICE_ID
565
+#else
566
+#error "Not supported platform"
567
+#endif // endif
568
+#endif /* CONFIG_ARCH_MSM */
569
+
570
+#if defined(CONFIG_X86)
571
+#define X86_PCIE_VENDOR_ID 0x8086
572
+#define X86_PCIE_DEVICE_ID 0x9c1a
573
+#endif /* CONFIG_X86 */
574
+
575
+#if defined(CONFIG_ARCH_TEGRA)
576
+#define TEGRA_PCIE_VENDOR_ID 0x14e4
577
+#define TEGRA_PCIE_DEVICE_ID 0x4347
578
+#endif /* CONFIG_ARCH_TEGRA */
579
+
580
+#if defined(BOARD_HIKEY)
581
+#define HIKEY_PCIE_VENDOR_ID 0x19e5
582
+#define HIKEY_PCIE_DEVICE_ID 0x3660
583
+#endif /* BOARD_HIKEY */
584
+
585
+#define DUMMY_PCIE_VENDOR_ID 0xffff
586
+#define DUMMY_PCIE_DEVICE_ID 0xffff
587
+
588
+#if defined(CONFIG_ARCH_EXYNOS)
589
+#define PCIE_RC_VENDOR_ID SAMSUNG_PCIE_VENDOR_ID
590
+#define PCIE_RC_DEVICE_ID SAMSUNG_PCIE_DEVICE_ID
591
+#elif defined(CONFIG_ARCH_MSM)
592
+#define PCIE_RC_VENDOR_ID MSM_PCIE_VENDOR_ID
593
+#define PCIE_RC_DEVICE_ID MSM_PCIE_DEVICE_ID
594
+#elif defined(CONFIG_X86)
595
+#define PCIE_RC_VENDOR_ID X86_PCIE_VENDOR_ID
596
+#define PCIE_RC_DEVICE_ID X86_PCIE_DEVICE_ID
597
+#elif defined(CONFIG_ARCH_TEGRA)
598
+#define PCIE_RC_VENDOR_ID TEGRA_PCIE_VENDOR_ID
599
+#define PCIE_RC_DEVICE_ID TEGRA_PCIE_DEVICE_ID
600
+#elif defined(BOARD_HIKEY)
601
+#define PCIE_RC_VENDOR_ID HIKEY_PCIE_VENDOR_ID
602
+#define PCIE_RC_DEVICE_ID HIKEY_PCIE_DEVICE_ID
603
+#else
604
+/* Use dummy vendor and device IDs */
605
+#define PCIE_RC_VENDOR_ID DUMMY_PCIE_VENDOR_ID
606
+#define PCIE_RC_DEVICE_ID DUMMY_PCIE_DEVICE_ID
607
+#endif /* CONFIG_ARCH_EXYNOS */
608
+
609
+#define DHD_REGULAR_RING 0
610
+#define DHD_HP2P_RING 1
611
+
612
+#ifdef USE_EXYNOS_PCIE_RC_PMPATCH
257613 #ifdef CONFIG_MACH_UNIVERSAL5433
258614 extern int exynos_pcie_pm_suspend(void);
259615 extern int exynos_pcie_pm_resume(void);
....@@ -263,5 +619,79 @@
263619 #endif /* CONFIG_MACH_UNIVERSAL5433 */
264620 #endif /* USE_EXYNOS_PCIE_RC_PMPATCH */
265621
622
+#ifdef CONFIG_ARCH_TEGRA
623
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0))
624
+extern int tegra_pcie_pm_suspend(void);
625
+extern int tegra_pcie_pm_resume(void);
626
+#endif // endif
627
+#endif /* CONFIG_ARCH_TEGRA */
628
+
266629 extern int dhd_buzzz_dump_dngl(dhd_bus_t *bus);
630
+#ifdef IDLE_TX_FLOW_MGMT
631
+extern int dhd_bus_flow_ring_resume_request(struct dhd_bus *bus, void *arg);
632
+extern void dhd_bus_flow_ring_resume_response(struct dhd_bus *bus, uint16 flowid, int32 status);
633
+extern int dhd_bus_flow_ring_suspend_request(struct dhd_bus *bus, void *arg);
634
+extern void dhd_bus_flow_ring_suspend_response(struct dhd_bus *bus, uint16 flowid, uint32 status);
635
+extern void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus,
636
+ flow_ring_node_t *flow_ring_node);
637
+extern void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus,
638
+ flow_ring_node_t *flow_ring_node);
639
+extern void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus,
640
+ flow_ring_node_t *flow_ring_node);
641
+extern void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus,
642
+ flow_ring_node_t *flow_ring_node);
643
+#endif /* IDLE_TX_FLOW_MGMT */
644
+
645
+extern int dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data);
646
+
647
+#ifdef DHD_WAKE_STATUS
648
+int bcmpcie_get_total_wake(struct dhd_bus *bus);
649
+int bcmpcie_set_get_wake(struct dhd_bus *bus, int flag);
650
+#endif /* DHD_WAKE_STATUS */
651
+extern bool dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus);
652
+extern void dhd_bus_hostready(struct dhd_bus *bus);
653
+extern void dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option);
654
+extern int dhdpcie_irq_disabled(struct dhd_bus *bus);
655
+
656
+static INLINE bool dhdpcie_is_arm_halted(struct dhd_bus *bus) {return TRUE;}
657
+static INLINE int dhd_os_wifi_platform_set_power(uint32 value) {return BCME_OK; }
658
+static INLINE void
659
+dhdpcie_dongle_flr_or_pwr_toggle(dhd_bus_t *bus)
660
+{ return; }
661
+
662
+int dhdpcie_config_check(dhd_bus_t *bus);
663
+int dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr);
664
+int dhdpcie_config_save(dhd_bus_t *bus);
665
+int dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state);
666
+
667
+extern bool dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t *bus);
668
+extern bool dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus);
669
+extern bool dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus);
670
+extern bool dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus);
671
+
672
+static INLINE uint32
673
+dhd_pcie_config_read(osl_t *osh, uint offset, uint size)
674
+{
675
+ OSL_DELAY(100);
676
+ return OSL_PCI_READ_CONFIG(osh, offset, size);
677
+}
678
+
679
+static INLINE uint32
680
+dhd_pcie_corereg_read(si_t *sih, uint val)
681
+{
682
+ OSL_DELAY(100);
683
+ si_corereg(sih, sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0, val);
684
+ return si_corereg(sih, sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), 0, 0);
685
+}
686
+
687
+extern int dhdpcie_get_fwpath_otp(dhd_bus_t *bus, char *fw_path, char *nv_path,
688
+ char *clm_path, char *txcap_path);
689
+
690
+extern int dhd_pcie_debug_info_dump(dhd_pub_t *dhd);
691
+extern void dhd_pcie_intr_count_dump(dhd_pub_t *dhd);
692
+extern void dhdpcie_bus_clear_intstatus(dhd_bus_t *bus);
693
+#ifdef DHD_HP2P
694
+extern uint16 dhd_bus_get_hp2p_ring_max_size(dhd_bus_t *bus, bool tx);
695
+#endif // endif
696
+
267697 #endif /* dhd_pcie_h */