forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/net/wireless/rockchip_wlan/cywdhd/bcmdhd/dhd_msgbuf.c
....@@ -1,17 +1,18 @@
1
-/* SPDX-License-Identifier: GPL-2.0 */
21 /**
32 * @file definition of host message ring functionality
43 * Provides type definitions and function prototypes used to link the
54 * DHD OS, bus, and protocol modules.
65 *
7
- * Copyright (C) 1999-2019, Broadcom Corporation
8
- *
6
+ * Portions of this code are copyright (c) 2022 Cypress Semiconductor Corporation
7
+ *
8
+ * Copyright (C) 1999-2017, Broadcom Corporation
9
+ *
910 * Unless you and Broadcom execute a separate written software license
1011 * agreement governing use of this software, this software is licensed to you
1112 * under the terms of the GNU General Public License version 2 (the "GPL"),
1213 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
1314 * following added to such license:
14
- *
15
+ *
1516 * As a special exception, the copyright holders of this software give you
1617 * permission to link this software with independent modules, and to copy and
1718 * distribute the resulting executable under terms of your choice, provided that
....@@ -19,7 +20,7 @@
1920 * the license of that module. An independent module is a module which is not
2021 * derived from this software. The special exception does not apply to any
2122 * modifications of the software.
22
- *
23
+ *
2324 * Notwithstanding the above, under no circumstances may you combine this
2425 * software in any way with any other Broadcom software provided under a license
2526 * other than the GPL, without Broadcom's express prior written consent.
....@@ -27,9 +28,8 @@
2728 *
2829 * <<Broadcom-WL-IPTag/Open:>>
2930 *
30
- * $Id: dhd_msgbuf.c 608659 2015-12-29 01:18:33Z $
31
+ * $Id: dhd_msgbuf.c 701962 2017-05-30 06:13:15Z $
3132 */
32
-
3333
3434 #include <typedefs.h>
3535 #include <osl.h>
....@@ -37,20 +37,17 @@
3737 #include <bcmutils.h>
3838 #include <bcmmsgbuf.h>
3939 #include <bcmendian.h>
40
+#include <bcmstdlib_s.h>
4041
4142 #include <dngl_stats.h>
4243 #include <dhd.h>
4344 #include <dhd_proto.h>
4445
45
-#ifdef BCMDBUS
46
-#include <dbus.h>
47
-#else
4846 #include <dhd_bus.h>
49
-#endif /* BCMDBUS */
5047
5148 #include <dhd_dbg.h>
5249 #include <siutils.h>
53
-
50
+#include <dhd_debug.h>
5451
5552 #include <dhd_flowring.h>
5653
....@@ -61,11 +58,27 @@
6158 #if defined(DHD_LB)
6259 #include <linux/cpu.h>
6360 #include <bcm_ring.h>
64
-#define DHD_LB_WORKQ_SZ (8192)
61
+#define DHD_LB_WORKQ_SZ (8192)
6562 #define DHD_LB_WORKQ_SYNC (16)
6663 #define DHD_LB_WORK_SCHED (DHD_LB_WORKQ_SYNC * 2)
6764 #endif /* DHD_LB */
6865
66
+#include <etd.h>
67
+#include <hnd_debug.h>
68
+#include <bcmtlv.h>
69
+#include <hnd_armtrap.h>
70
+#include <dnglevent.h>
71
+
72
+#ifdef DHD_PKT_LOGGING
73
+#include <dhd_pktlog.h>
74
+#include <dhd_linux_pktdump.h>
75
+#endif /* DHD_PKT_LOGGING */
76
+#ifdef DHD_EWPR_VER2
77
+#include <dhd_bitpack.h>
78
+#endif /* DHD_EWPR_VER2 */
79
+
80
+extern char dhd_version[];
81
+extern char fw_version[];
6982
7083 /**
7184 * Host configures a soft doorbell for d2h rings, by specifying a 32bit host
....@@ -95,11 +108,13 @@
95108
96109 #define RX_DMA_OFFSET 8 /* Mem2mem DMA inserts an extra 8 */
97110 #define IOCT_RETBUF_SIZE (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN)
98
-#define FLOWRING_SIZE (H2DRING_TXPOST_MAX_ITEM * H2DRING_TXPOST_ITEMSIZE)
99111
100112 /* flags for ioctl pending status */
101113 #define MSGBUF_IOCTL_ACK_PENDING (1<<0)
102114 #define MSGBUF_IOCTL_RESP_PENDING (1<<1)
115
+
116
+#define DHD_IOCTL_REQ_PKTBUFSZ 2048
117
+#define MSGBUF_IOCTL_MAX_RQSTLEN (DHD_IOCTL_REQ_PKTBUFSZ - H2DRING_CTRL_SUB_ITEMSIZE)
103118
104119 #define DMA_ALIGN_LEN 4
105120
....@@ -108,16 +123,19 @@
108123
109124 #ifdef BCM_HOST_BUF
110125 #ifndef DMA_HOST_BUFFER_LEN
111
-#define DMA_HOST_BUFFER_LEN 0x80000
112
-#endif
126
+#define DMA_HOST_BUFFER_LEN 0x200000
127
+#endif // endif
113128 #endif /* BCM_HOST_BUF */
114129
115130 #define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 8192
116131
117
-#define DHD_FLOWRING_MAX_EVENTBUF_POST 8
132
+#define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D 1
133
+#define DHD_FLOWRING_MAX_EVENTBUF_POST 32
118134 #define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST 8
135
+#define DHD_H2D_INFORING_MAX_BUF_POST 32
136
+#define DHD_MAX_TSBUF_POST 8
119137
120
-#define DHD_PROT_FUNCS 37
138
+#define DHD_PROT_FUNCS 43
121139
122140 /* Length of buffer in host for bus throughput measurement */
123141 #define DHD_BUS_TPUT_BUF_LEN 2048
....@@ -128,7 +146,12 @@
128146 #define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT 48
129147
130148 #define RING_NAME_MAX_LENGTH 24
149
+#define CTRLSUB_HOSTTS_MEESAGE_SIZE 1024
150
+/* Giving room before ioctl_trans_id rollsover. */
151
+#define BUFFER_BEFORE_ROLLOVER 300
131152
153
+/* 512K memory + 32K registers */
154
+#define SNAPSHOT_UPLOAD_BUF_SIZE ((512 + 32) * 1024)
132155
133156 struct msgbuf_ring; /* ring context for common and flow rings */
134157
....@@ -152,10 +175,19 @@
152175 *
153176 * Dongle advertizes host side sync mechanism requirements.
154177 */
155
-#define PCIE_D2H_SYNC
156178
157
-#if defined(PCIE_D2H_SYNC)
158
-#define PCIE_D2H_SYNC_WAIT_TRIES 512
179
+#define PCIE_D2H_SYNC_WAIT_TRIES (512U)
180
+#define PCIE_D2H_SYNC_NUM_OF_STEPS (5U)
181
+#define PCIE_D2H_SYNC_DELAY (100UL) /* in terms of usecs */
182
+
183
+#define HWA_DB_TYPE_RXPOST (0x0050)
184
+#define HWA_DB_TYPE_TXCPLT (0x0060)
185
+#define HWA_DB_TYPE_RXCPLT (0x0170)
186
+#define HWA_DB_INDEX_VALUE(val) ((uint32)(val) << 16)
187
+
188
+#define HWA_ENAB_BITMAP_RXPOST (1U << 0) /* 1A */
189
+#define HWA_ENAB_BITMAP_RXCPLT (1U << 1) /* 2B */
190
+#define HWA_ENAB_BITMAP_TXCPLT (1U << 2) /* 4B */
159191
160192 /**
161193 * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
....@@ -165,8 +197,18 @@
165197 */
166198 typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
167199 volatile cmn_msg_hdr_t *msg, int msglen);
168
-#endif /* PCIE_D2H_SYNC */
169200
201
+/**
202
+ * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
203
+ * For EDL messages.
204
+ *
205
+ * On success: return cmn_msg_hdr_t::msg_type
206
+ * On failure: return 0 (invalid msg_type)
207
+ */
208
+#ifdef EWP_EDL
209
+typedef int (* d2h_edl_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
210
+ volatile cmn_msg_hdr_t *msg);
211
+#endif /* EWP_EDL */
170212
171213 /*
172214 * +----------------------------------------------------------------------------
....@@ -220,8 +262,9 @@
220262 #define DHD_FLOWRING_START_FLOWID BCMPCIE_H2D_COMMON_MSGRINGS
221263
222264 /* Determine whether a ringid belongs to a TxPost flowring */
223
-#define DHD_IS_FLOWRING(ringid) \
224
- ((ringid) >= BCMPCIE_COMMON_MSGRINGS)
265
+#define DHD_IS_FLOWRING(ringid, max_flow_rings) \
266
+ ((ringid) >= BCMPCIE_COMMON_MSGRINGS && \
267
+ (ringid) < ((max_flow_rings) + BCMPCIE_COMMON_MSGRINGS))
225268
226269 /* Convert a H2D TxPost FlowId to a MsgBuf RingId */
227270 #define DHD_FLOWID_TO_RINGID(flowid) \
....@@ -236,19 +279,28 @@
236279 * any array of H2D rings.
237280 */
238281 #define DHD_H2D_RING_OFFSET(ringid) \
239
- ((DHD_IS_FLOWRING(ringid)) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid))
282
+ (((ringid) >= BCMPCIE_COMMON_MSGRINGS) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid))
283
+
284
+/* Convert a H2D MsgBuf Flowring Id to an offset index into the H2D DMA indices array
285
+ * This may be used for IFRM.
286
+ */
287
+#define DHD_H2D_FRM_FLOW_RING_OFFSET(ringid) \
288
+ ((ringid) - BCMPCIE_COMMON_MSGRINGS)
240289
241290 /* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array
242291 * This may be used for the D2H DMA WR index array or D2H DMA RD index array or
243292 * any array of D2H rings.
293
+ * d2h debug ring is located at the end, i.e. after all the tx flow rings and h2d debug ring
294
+ * max_h2d_rings: total number of h2d rings
244295 */
245
-#define DHD_D2H_RING_OFFSET(ringid) \
246
- ((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS)
296
+#define DHD_D2H_RING_OFFSET(ringid, max_h2d_rings) \
297
+ ((ringid) > (max_h2d_rings) ? \
298
+ ((ringid) - max_h2d_rings) : \
299
+ ((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS))
247300
248301 /* Convert a D2H DMA Indices Offset to a RingId */
249302 #define DHD_D2H_RINGID(offset) \
250303 ((offset) + BCMPCIE_H2D_COMMON_MSGRINGS)
251
-
252304
253305 #define DHD_DMAH_NULL ((void*)NULL)
254306
....@@ -263,7 +315,65 @@
263315 #define DHD_DMA_PAD (L1_CACHE_BYTES)
264316 #else
265317 #define DHD_DMA_PAD (128)
266
-#endif
318
+#endif // endif
319
+
320
+/*
321
+ * +----------------------------------------------------------------------------
322
+ * Flowring Pool
323
+ *
324
+ * Unlike common rings, which are attached very early on (dhd_prot_attach),
325
+ * flowrings are dynamically instantiated. Moreover, flowrings may require a
326
+ * larger DMA-able buffer. To avoid issues with fragmented cache coherent
327
+ * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
328
+ * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
329
+ *
330
+ * Each DMA-able buffer may be allocated independently, or may be carved out
331
+ * of a single large contiguous region that is registered with the protocol
332
+ * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
333
+ * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
334
+ *
335
+ * No flowring pool action is performed in dhd_prot_attach(), as the number
336
+ * of h2d rings is not yet known.
337
+ *
338
+ * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
339
+ * determine the number of flowrings required, and a pool of msgbuf_rings are
340
+ * allocated and a DMA-able buffer (carved or allocated) is attached.
341
+ * See: dhd_prot_flowrings_pool_attach()
342
+ *
343
+ * A flowring msgbuf_ring object may be fetched from this pool during flowring
344
+ * creation, using the flowid. Likewise, flowrings may be freed back into the
345
+ * pool on flowring deletion.
346
+ * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
347
+ *
348
+ * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
349
+ * are detached (returned back to the carved region or freed), and the pool of
350
+ * msgbuf_ring and any objects allocated against it are freed.
351
+ * See: dhd_prot_flowrings_pool_detach()
352
+ *
353
+ * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
354
+ * state as-if upon an attach. All DMA-able buffers are retained.
355
+ * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
356
+ * pool attach will notice that the pool persists and continue to use it. This
357
+ * will avoid the case of a fragmented DMA-able region.
358
+ *
359
+ * +----------------------------------------------------------------------------
360
+ */
361
+
362
+/* Conversion of a flowid to a flowring pool index */
363
+#define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
364
+ ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
365
+
366
+/* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
367
+#define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
368
+ (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \
369
+ DHD_FLOWRINGS_POOL_OFFSET(flowid)
370
+
371
+/* Traverse each flowring in the flowring pool, assigning ring and flowid */
372
+#define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \
373
+ for ((flowid) = DHD_FLOWRING_START_FLOWID, \
374
+ (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
375
+ (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \
376
+ (ring)++, (flowid)++)
267377
268378 /* Used in loopback tests */
269379 typedef struct dhd_dmaxfer {
....@@ -273,6 +383,10 @@
273383 uint32 destdelay;
274384 uint32 len;
275385 bool in_progress;
386
+ uint64 start_usec;
387
+ uint64 time_taken;
388
+ uint32 d11_lpbk;
389
+ int status;
276390 } dhd_dmaxfer_t;
277391
278392 /**
....@@ -290,6 +404,7 @@
290404 bool inited;
291405 uint16 idx; /* ring id */
292406 uint16 rd; /* read index */
407
+ uint16 curr_rd; /* read index for debug */
293408 uint16 wr; /* write index */
294409 uint16 max_items; /* maximum number of items in ring */
295410 uint16 item_len; /* length of each item in the ring */
....@@ -301,7 +416,17 @@
301416 /* # of messages on ring not yet announced to dongle */
302417 uint16 pend_items_count;
303418 #endif /* TXP_FLUSH_NITEMS */
419
+
420
+ uint8 ring_type;
421
+ uint16 hwa_db_type; /* hwa type non-zero for Data path rings */
422
+ uint8 n_completion_ids;
423
+ bool create_pending;
424
+ uint16 create_req_id;
425
+ uint8 current_phase;
426
+ uint16 compeltion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED];
304427 uchar name[RING_NAME_MAX_LENGTH];
428
+ uint32 ring_mem_allocated;
429
+ void *ring_lock;
305430 } msgbuf_ring_t;
306431
307432 #define DHD_RING_BGN_VA(ring) ((ring)->dma_buf.va)
....@@ -309,21 +434,29 @@
309434 ((uint8 *)(DHD_RING_BGN_VA((ring))) + \
310435 (((ring)->max_items - 1) * (ring)->item_len))
311436
312
-
437
+/* This can be overwritten by module parameter defined in dhd_linux.c
438
+ * or by dhd iovar h2d_max_txpost.
439
+ */
440
+int h2d_max_txpost = H2DRING_TXPOST_MAX_ITEM;
313441
314442 /** DHD protocol handle. Is an opaque type to other DHD software layers. */
315443 typedef struct dhd_prot {
316444 osl_t *osh; /* OSL handle */
445
+ uint16 rxbufpost_sz;
317446 uint16 rxbufpost;
318447 uint16 max_rxbufpost;
319448 uint16 max_eventbufpost;
320449 uint16 max_ioctlrespbufpost;
450
+ uint16 max_tsbufpost;
451
+ uint16 max_infobufpost;
452
+ uint16 infobufpost;
321453 uint16 cur_event_bufs_posted;
322454 uint16 cur_ioctlresp_bufs_posted;
455
+ uint16 cur_ts_bufs_posted;
323456
324457 /* Flow control mechanism based on active transmits pending */
325
- uint16 active_tx_count; /* increments on every packet tx, and decrements on tx_status */
326
- uint16 max_tx_count;
458
+ osl_atomic_t active_tx_count; /* increments/decrements on every packet tx/tx_status */
459
+ uint16 h2d_max_txpost;
327460 uint16 txp_threshold; /* optimization to write "n" tx items at a time to ring */
328461
329462 /* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */
....@@ -332,6 +465,9 @@
332465 msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */
333466 msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */
334467 msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */
468
+ msgbuf_ring_t *h2dring_info_subn; /* H2D info submission ring */
469
+ msgbuf_ring_t *d2hring_info_cpln; /* D2H info completion ring */
470
+ msgbuf_ring_t *d2hring_edl; /* D2H Enhanced Debug Lane (EDL) ring */
335471
336472 msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */
337473 dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */
....@@ -340,6 +476,7 @@
340476 uint32 rx_dataoffset;
341477
342478 dhd_mb_ring_t mb_ring_fn; /* called when dongle needs to be notified of new msg */
479
+ dhd_mb_ring_2_t mb_2_ring_fn; /* called when dongle needs to be notified of new msg */
343480
344481 /* ioctl related resources */
345482 uint8 ioctl_state;
....@@ -358,37 +495,52 @@
358495 dhd_dma_buf_t h2d_dma_indx_rd_buf; /* Array of H2D RD indices */
359496 dhd_dma_buf_t d2h_dma_indx_wr_buf; /* Array of D2H WR indices */
360497 dhd_dma_buf_t d2h_dma_indx_rd_buf; /* Array of D2H RD indices */
498
+ dhd_dma_buf_t h2d_ifrm_indx_wr_buf; /* Array of H2D WR indices for ifrm */
361499
362500 dhd_dma_buf_t host_bus_throughput_buf; /* bus throughput measure buffer */
363501
364502 dhd_dma_buf_t *flowring_buf; /* pool of flow ring buf */
365503 uint32 flowring_num;
366504
367
-#if defined(PCIE_D2H_SYNC)
368505 d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
506
+#ifdef EWP_EDL
507
+ d2h_edl_sync_cb_t d2h_edl_sync_cb; /* Sync on EDL D2H DMA done: SEQNUM or XORCSUM */
508
+#endif /* EWP_EDL */
369509 ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */
370510 ulong d2h_sync_wait_tot; /* total wait loops */
371
-#endif /* PCIE_D2H_SYNC */
372511
373512 dhd_dmaxfer_t dmaxfer; /* for test/DMA loopback */
374513
375514 uint16 ioctl_seq_no;
376515 uint16 data_seq_no;
377516 uint16 ioctl_trans_id;
378
- void *pktid_map_handle; /* a pktid maps to a packet and its metadata */
517
+ void *pktid_ctrl_map; /* a pktid maps to a packet and its metadata */
518
+ void *pktid_rx_map; /* pktid map for rx path */
519
+ void *pktid_tx_map; /* pktid map for tx path */
379520 bool metadata_dbg;
380521 void *pktid_map_handle_ioctl;
522
+#ifdef DHD_MAP_PKTID_LOGGING
523
+ void *pktid_dma_map; /* pktid map for DMA MAP */
524
+ void *pktid_dma_unmap; /* pktid map for DMA UNMAP */
525
+#endif /* DHD_MAP_PKTID_LOGGING */
526
+ uint32 pktid_depleted_cnt; /* pktid depleted count */
527
+ /* netif tx queue stop count */
528
+ uint8 pktid_txq_stop_cnt;
529
+ /* netif tx queue start count */
530
+ uint8 pktid_txq_start_cnt;
531
+ uint64 ioctl_fillup_time; /* timestamp for ioctl fillup */
532
+ uint64 ioctl_ack_time; /* timestamp for ioctl ack */
533
+ uint64 ioctl_cmplt_time; /* timestamp for ioctl completion */
381534
382535 /* Applications/utilities can read tx and rx metadata using IOVARs */
383536 uint16 rx_metadata_offset;
384537 uint16 tx_metadata_offset;
385538
386
-
387539 #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
388540 /* Host's soft doorbell configuration */
389541 bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS];
390542 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
391
-#if defined(DHD_LB)
543
+
392544 /* Work Queues to be used by the producer and the consumer, and threshold
393545 * when the WRITE index must be synced to consumer's workq
394546 */
....@@ -400,17 +552,50 @@
400552 uint32 rx_compl_prod_sync ____cacheline_aligned;
401553 bcm_workq_t rx_compl_prod, rx_compl_cons;
402554 #endif /* DHD_LB_RXC */
403
-#endif /* DHD_LB */
555
+
556
+ dhd_dma_buf_t fw_trap_buf; /* firmware trap buffer */
557
+
558
+ uint32 host_ipc_version; /* Host sypported IPC rev */
559
+ uint32 device_ipc_version; /* FW supported IPC rev */
560
+ uint32 active_ipc_version; /* Host advertised IPC rev */
561
+ dhd_dma_buf_t hostts_req_buf; /* For holding host timestamp request buf */
562
+ bool hostts_req_buf_inuse;
563
+ bool rx_ts_log_enabled;
564
+ bool tx_ts_log_enabled;
565
+ bool no_retry;
566
+ bool no_aggr;
567
+ bool fixed_rate;
568
+ dhd_dma_buf_t host_scb_buf; /* scb host offload buffer */
569
+#ifdef DHD_HP2P
570
+ msgbuf_ring_t *d2hring_hp2p_txcpl; /* D2H HPP Tx completion ring */
571
+ msgbuf_ring_t *d2hring_hp2p_rxcpl; /* D2H HPP Rx completion ring */
572
+#endif /* DHD_HP2P */
573
+ bool no_tx_resource;
404574 } dhd_prot_t;
405575
576
+#ifdef DHD_EWPR_VER2
577
+#define HANG_INFO_BASE64_BUFFER_SIZE 640
578
+#endif // endif
579
+
580
+#ifdef DHD_DUMP_PCIE_RINGS
581
+static
582
+int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
583
+ const void *user_buf, unsigned long *file_posn);
584
+#ifdef EWP_EDL
585
+static
586
+int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
587
+ unsigned long *file_posn);
588
+#endif /* EWP_EDL */
589
+#endif /* DHD_DUMP_PCIE_RINGS */
590
+
591
+extern bool dhd_timesync_delay_post_bufs(dhd_pub_t *dhdp);
592
+extern void dhd_schedule_dmaxfer_free(dhd_pub_t* dhdp, dmaxref_mem_map_t *dmmap);
406593 /* Convert a dmaaddr_t to a base_addr with htol operations */
407594 static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa);
408595
409596 /* APIs for managing a DMA-able buffer */
410597 static int dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
411
-static int dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len);
412598 static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
413
-static void dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
414599
415600 /* msgbuf ring management */
416601 static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring,
....@@ -418,6 +603,7 @@
418603 static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring);
419604 static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring);
420605 static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring);
606
+static void dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf);
421607
422608 /* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */
423609 static int dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd);
....@@ -444,13 +630,12 @@
444630 void *p, uint16 len);
445631 static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring);
446632
447
-/* Allocate DMA-able memory for saving H2D/D2H WR/RD indices */
448633 static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
449634 dhd_dma_buf_t *dma_buf, uint32 bufsz);
450635
451636 /* Set/Get a RD or WR index in the array of indices */
452637 /* See also: dhd_prot_dma_indx_init() */
453
-static void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type,
638
+void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type,
454639 uint16 ringid);
455640 static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid);
456641
....@@ -469,11 +654,12 @@
469654 void *buf, int ifidx);
470655
471656 /* Post buffers for Rx, control ioctl response and events */
472
-static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post);
657
+static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msgid, uint32 max_to_post);
473658 static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub);
474659 static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub);
475660 static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid);
476661 static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid);
662
+static int dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *pub);
477663
478664 static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt);
479665
....@@ -487,7 +673,6 @@
487673 static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg);
488674 static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg);
489675 static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg);
490
-static void dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void *msg);
491676 static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg);
492677
493678 /* Loopback test with dongle */
....@@ -500,11 +685,38 @@
500685 static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg);
501686 static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg);
502687 static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg);
688
+static void dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg);
689
+static void dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg);
690
+
691
+/* Monitor Mode */
692
+#ifdef WL_MONITOR
693
+extern bool dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx);
694
+extern void dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx);
695
+#endif /* WL_MONITOR */
503696
504697 /* Configure a soft doorbell per D2H ring */
505698 static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd);
506
-static void dhd_prot_d2h_ring_config_cmplt_process(dhd_pub_t *dhd, void *msg);
699
+static void dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg);
700
+static void dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf);
701
+static void dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf);
702
+static void dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf);
703
+static void dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf);
704
+static void dhd_prot_detach_info_rings(dhd_pub_t *dhd);
705
+#ifdef DHD_HP2P
706
+static void dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd);
707
+#endif /* DHD_HP2P */
708
+#ifdef EWP_EDL
709
+static void dhd_prot_detach_edl_rings(dhd_pub_t *dhd);
710
+#endif // endif
711
+static void dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf);
712
+static void dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf);
507713
714
+#ifdef DHD_HP2P
715
+static void dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus);
716
+static void dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus);
717
+static void dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid);
718
+static void dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc);
719
+#endif // endif
508720 typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg);
509721
510722 /** callback functions for messages generated by the dongle */
....@@ -529,32 +741,38 @@
529741 NULL,
530742 dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */
531743 NULL,
532
- dhd_prot_rxcmplt_process, /* MSG_TYPE_RX_CMPLT */
744
+ NULL, /* MSG_TYPE_RX_CMPLT use dedicated handler */
533745 NULL,
534746 dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */
535747 NULL, /* MSG_TYPE_FLOW_RING_RESUME */
536
- NULL, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */
748
+ dhd_prot_process_flow_ring_resume_response, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */
537749 NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */
538
- NULL, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */
750
+ dhd_prot_process_flow_ring_suspend_response, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */
539751 NULL, /* MSG_TYPE_INFO_BUF_POST */
540
- NULL, /* MSG_TYPE_INFO_BUF_CMPLT */
752
+ dhd_prot_process_infobuf_complete, /* MSG_TYPE_INFO_BUF_CMPLT */
541753 NULL, /* MSG_TYPE_H2D_RING_CREATE */
542754 NULL, /* MSG_TYPE_D2H_RING_CREATE */
543
- NULL, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
544
- NULL, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */
755
+ dhd_prot_process_h2d_ring_create_complete, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
756
+ dhd_prot_process_d2h_ring_create_complete, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */
545757 NULL, /* MSG_TYPE_H2D_RING_CONFIG */
546758 NULL, /* MSG_TYPE_D2H_RING_CONFIG */
547759 NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */
548
- dhd_prot_d2h_ring_config_cmplt_process, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */
760
+ dhd_prot_process_d2h_ring_config_complete, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */
549761 NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */
550
- NULL, /* MSG_TYPE_D2H_MAILBOX_DATA */
762
+ dhd_prot_process_d2h_mb_data, /* MSG_TYPE_D2H_MAILBOX_DATA */
763
+ NULL, /* MSG_TYPE_TIMSTAMP_BUFPOST */
764
+ NULL, /* MSG_TYPE_HOSTTIMSTAMP */
765
+ dhd_prot_process_d2h_host_ts_complete, /* MSG_TYPE_HOSTTIMSTAMP_CMPLT */
766
+ dhd_prot_process_fw_timestamp, /* MSG_TYPE_FIRMWARE_TIMESTAMP */
767
+ NULL, /* MSG_TYPE_SNAPSHOT_UPLOAD */
768
+ dhd_prot_process_snapshot_complete, /* MSG_TYPE_SNAPSHOT_CMPLT */
551769 };
552
-
553770
554771 #ifdef DHD_RX_CHAINING
555772
556773 #define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \
557
- (dhd_rx_pkt_chainable((dhd), (ifidx)) && \
774
+ (dhd_wet_chainable(dhd) && \
775
+ dhd_rx_pkt_chainable((dhd), (ifidx)) && \
558776 !ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \
559777 !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \
560778 !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \
....@@ -571,10 +789,49 @@
571789
572790 #endif /* DHD_RX_CHAINING */
573791
792
+#define DHD_LPBKDTDUMP_ON() (dhd_msg_level & DHD_LPBKDTDUMP_VAL)
793
+
574794 static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd);
575795
576
-#if defined(PCIE_D2H_SYNC) /* avoids problems related to host CPU cache */
796
+bool
797
+dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info)
798
+{
799
+ msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)prot_info;
800
+ uint16 rd, wr;
801
+ bool ret;
577802
803
+ if (dhd->dma_d2h_ring_upd_support) {
804
+ wr = flow_ring->wr;
805
+ } else {
806
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
807
+ }
808
+ if (dhd->dma_h2d_ring_upd_support) {
809
+ rd = flow_ring->rd;
810
+ } else {
811
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
812
+ }
813
+ ret = (wr == rd) ? TRUE : FALSE;
814
+ return ret;
815
+}
816
+
817
+void
818
+dhd_prot_dump_ring_ptrs(void *prot_info)
819
+{
820
+ msgbuf_ring_t *ring = (msgbuf_ring_t *)prot_info;
821
+ DHD_ERROR(("%s curr_rd: %d rd: %d wr: %d \n", __FUNCTION__,
822
+ ring->curr_rd, ring->rd, ring->wr));
823
+}
824
+
825
+uint16
826
+dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd)
827
+{
828
+ return (uint16)h2d_max_txpost;
829
+}
830
+void
831
+dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost)
832
+{
833
+ h2d_max_txpost = max_txpost;
834
+}
578835 /**
579836 * D2H DMA to completion callback handlers. Based on the mode advertised by the
580837 * dongle through the PCIE shared region, the appropriate callback will be
....@@ -583,8 +840,8 @@
583840 * does not require host participation, then a noop callback handler will be
584841 * bound that simply returns the msg_type.
585842 */
586
-static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, msgbuf_ring_t *ring,
587
- uint32 tries, uchar *msg, int msglen);
843
+static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring,
844
+ uint32 tries, volatile uchar *msg, int msglen);
588845 static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
589846 volatile cmn_msg_hdr_t *msg, int msglen);
590847 static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
....@@ -592,6 +849,10 @@
592849 static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
593850 volatile cmn_msg_hdr_t *msg, int msglen);
594851 static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd);
852
+static int dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
853
+ uint16 ring_type, uint32 id);
854
+static int dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
855
+ uint8 type, uint32 id);
595856
596857 /**
597858 * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has
....@@ -603,22 +864,60 @@
603864 *
604865 */
605866 static void
606
-dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 tries,
607
- uchar *msg, int msglen)
867
+dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring, uint32 tries,
868
+ volatile uchar *msg, int msglen)
608869 {
609
- uint32 seqnum = ring->seqnum;
870
+ uint32 ring_seqnum = ring->seqnum;
610871
611
- DHD_ERROR(("LIVELOCK DHD<%p> seqnum<%u:%u> tries<%u> max<%lu> tot<%lu>"
612
- "dma_buf va<%p> msg<%p>\n",
613
- dhd, seqnum, seqnum% D2H_EPOCH_MODULO, tries,
872
+ if (dhd_query_bus_erros(dhd)) {
873
+ return;
874
+ }
875
+
876
+ DHD_ERROR((
877
+ "LIVELOCK DHD<%p> ring<%s> msg_seqnum<%u> ring_seqnum<%u:%u> tries<%u> max<%lu>"
878
+ " tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d> rd<%d> wr<%d>\n",
879
+ dhd, ring->name, msg_seqnum, ring_seqnum, ring_seqnum% D2H_EPOCH_MODULO, tries,
614880 dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot,
615
- ring->dma_buf.va, msg));
616
- prhex("D2H MsgBuf Failure", (uchar *)msg, msglen);
881
+ ring->dma_buf.va, msg, ring->curr_rd, ring->rd, ring->wr));
617882
618
-#if defined(SUPPORT_LINKDOWN_RECOVERY) && defined(CONFIG_ARCH_MSM)
619
- dhd->bus->islinkdown = 1;
620
- dhd_os_check_hang(dhd, 0, -ETIMEDOUT);
621
-#endif /* SUPPORT_LINKDOWN_RECOVERY && CONFIG_ARCH_MSM */
883
+ dhd_prhex("D2H MsgBuf Failure", msg, msglen, DHD_ERROR_VAL);
884
+
885
+ /* Try to resume if already suspended or suspend in progress */
886
+#ifdef DHD_PCIE_RUNTIMEPM
887
+ dhdpcie_runtime_bus_wake(dhd, CAN_SLEEP(), __builtin_return_address(0));
888
+#endif /* DHD_PCIE_RUNTIMEPM */
889
+
890
+ /* Skip if still in suspended or suspend in progress */
891
+ if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
892
+ DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
893
+ __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
894
+ goto exit;
895
+ }
896
+
897
+ dhd_bus_dump_console_buffer(dhd->bus);
898
+ dhd_prot_debug_info_print(dhd);
899
+
900
+#ifdef DHD_FW_COREDUMP
901
+ if (dhd->memdump_enabled) {
902
+ /* collect core dump */
903
+ dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK;
904
+ dhd_bus_mem_dump(dhd);
905
+ }
906
+#endif /* DHD_FW_COREDUMP */
907
+
908
+exit:
909
+ dhd_schedule_reset(dhd);
910
+
911
+#ifdef OEM_ANDROID
912
+#ifdef SUPPORT_LINKDOWN_RECOVERY
913
+#ifdef CONFIG_ARCH_MSM
914
+ dhd->bus->no_cfg_restore = 1;
915
+#endif /* CONFIG_ARCH_MSM */
916
+ dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK;
917
+ dhd_os_send_hang_message(dhd);
918
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
919
+#endif /* OEM_ANDROID */
920
+ dhd->livelock_occured = TRUE;
622921 }
623922
624923 /**
....@@ -632,27 +931,63 @@
632931 uint32 tries;
633932 uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
634933 int num_words = msglen / sizeof(uint32); /* num of 32bit words */
635
- volatile uint32 *marker = (uint32 *)msg + (num_words - 1); /* last word */
934
+ volatile uint32 *marker = (volatile uint32 *)msg + (num_words - 1); /* last word */
636935 dhd_prot_t *prot = dhd->prot;
936
+ uint32 msg_seqnum;
937
+ uint32 step = 0;
938
+ uint32 delay = PCIE_D2H_SYNC_DELAY;
939
+ uint32 total_tries = 0;
637940
638941 ASSERT(msglen == ring->item_len);
639942
640
- for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
641
- uint32 msg_seqnum = *marker;
642
- if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
643
- ring->seqnum++; /* next expected sequence number */
644
- goto dma_completed;
645
- }
943
+ BCM_REFERENCE(delay);
944
+ /*
945
+ * For retries we have to make some sort of stepper algorithm.
946
+ * We see that every time when the Dongle comes out of the D3
947
+ * Cold state, the first D2H mem2mem DMA takes more time to
948
+ * complete, leading to livelock issues.
949
+ *
950
+ * Case 1 - Apart from Host CPU some other bus master is
951
+ * accessing the DDR port, probably page close to the ring
952
+ * so, PCIE does not get a change to update the memory.
953
+ * Solution - Increase the number of tries.
954
+ *
955
+ * Case 2 - The 50usec delay given by the Host CPU is not
956
+ * sufficient for the PCIe RC to start its work.
957
+ * In this case the breathing time of 50usec given by
958
+ * the Host CPU is not sufficient.
959
+ * Solution: Increase the delay in a stepper fashion.
960
+ * This is done to ensure that there are no
961
+ * unwanted extra delay introdcued in normal conditions.
962
+ */
963
+ for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
964
+ for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
965
+ msg_seqnum = *marker;
966
+ if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
967
+ ring->seqnum++; /* next expected sequence number */
968
+ /* Check for LIVELOCK induce flag, which is set by firing
969
+ * dhd iovar to induce LIVELOCK error. If flag is set,
970
+ * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
971
+ */
972
+ if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
973
+ goto dma_completed;
974
+ }
975
+ }
646976
647
- if (tries > prot->d2h_sync_wait_max)
648
- prot->d2h_sync_wait_max = tries;
977
+ total_tries = (uint32)(((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries);
649978
650
- OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
651
- OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
979
+ if (total_tries > prot->d2h_sync_wait_max)
980
+ prot->d2h_sync_wait_max = total_tries;
652981
653
- } /* for PCIE_D2H_SYNC_WAIT_TRIES */
982
+ OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
983
+ OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
984
+ OSL_DELAY(delay * step); /* Add stepper delay */
654985
655
- dhd_prot_d2h_sync_livelock(dhd, ring, tries, (uchar *)msg, msglen);
986
+ } /* for PCIE_D2H_SYNC_WAIT_TRIES */
987
+ } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
988
+
989
+ dhd_prot_d2h_sync_livelock(dhd, msg_seqnum, ring, total_tries,
990
+ (volatile uchar *) msg, msglen);
656991
657992 ring->seqnum++; /* skip this message ... leak of a pktid */
658993 return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
....@@ -677,27 +1012,70 @@
6771012 int num_words = msglen / sizeof(uint32); /* num of 32bit words */
6781013 uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
6791014 dhd_prot_t *prot = dhd->prot;
1015
+ uint32 step = 0;
1016
+ uint32 delay = PCIE_D2H_SYNC_DELAY;
1017
+ uint32 total_tries = 0;
6801018
6811019 ASSERT(msglen == ring->item_len);
6821020
683
- for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
684
- prot_checksum = bcm_compute_xor32((volatile uint32 *)msg, num_words);
685
- if (prot_checksum == 0U) { /* checksum is OK */
1021
+ BCM_REFERENCE(delay);
1022
+ /*
1023
+ * For retries we have to make some sort of stepper algorithm.
1024
+ * We see that every time when the Dongle comes out of the D3
1025
+ * Cold state, the first D2H mem2mem DMA takes more time to
1026
+ * complete, leading to livelock issues.
1027
+ *
1028
+ * Case 1 - Apart from Host CPU some other bus master is
1029
+ * accessing the DDR port, probably page close to the ring
1030
+ * so, PCIE does not get a change to update the memory.
1031
+ * Solution - Increase the number of tries.
1032
+ *
1033
+ * Case 2 - The 50usec delay given by the Host CPU is not
1034
+ * sufficient for the PCIe RC to start its work.
1035
+ * In this case the breathing time of 50usec given by
1036
+ * the Host CPU is not sufficient.
1037
+ * Solution: Increase the delay in a stepper fashion.
1038
+ * This is done to ensure that there are no
1039
+ * unwanted extra delay introdcued in normal conditions.
1040
+ */
1041
+ for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
1042
+ for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
1043
+ /* First verify if the seqnumber has been update,
1044
+ * if yes, then only check xorcsum.
1045
+ * Once seqnum and xorcsum is proper that means
1046
+ * complete message has arrived.
1047
+ */
6861048 if (msg->epoch == ring_seqnum) {
687
- ring->seqnum++; /* next expected sequence number */
688
- goto dma_completed;
1049
+ prot_checksum = bcm_compute_xor32((volatile uint32 *)msg,
1050
+ num_words);
1051
+ if (prot_checksum == 0U) { /* checksum is OK */
1052
+ ring->seqnum++; /* next expected sequence number */
1053
+ /* Check for LIVELOCK induce flag, which is set by firing
1054
+ * dhd iovar to induce LIVELOCK error. If flag is set,
1055
+ * MSG_TYPE_INVALID is returned, which results in to
1056
+ * LIVELOCK error.
1057
+ */
1058
+ if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
1059
+ goto dma_completed;
1060
+ }
1061
+ }
6891062 }
690
- }
6911063
692
- if (tries > prot->d2h_sync_wait_max)
693
- prot->d2h_sync_wait_max = tries;
1064
+ total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
6941065
695
- OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
696
- OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
1066
+ if (total_tries > prot->d2h_sync_wait_max)
1067
+ prot->d2h_sync_wait_max = total_tries;
6971068
698
- } /* for PCIE_D2H_SYNC_WAIT_TRIES */
1069
+ OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
1070
+ OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
1071
+ OSL_DELAY(delay * step); /* Add stepper delay */
6991072
700
- dhd_prot_d2h_sync_livelock(dhd, ring, tries, (uchar *)msg, msglen);
1073
+ } /* for PCIE_D2H_SYNC_WAIT_TRIES */
1074
+ } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
1075
+
1076
+ DHD_ERROR(("%s: prot_checksum = 0x%x\n", __FUNCTION__, prot_checksum));
1077
+ dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
1078
+ (volatile uchar *) msg, msglen);
7011079
7021080 ring->seqnum++; /* skip this message ... leak of a pktid */
7031081 return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
....@@ -717,7 +1095,197 @@
7171095 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
7181096 volatile cmn_msg_hdr_t *msg, int msglen)
7191097 {
720
- return msg->msg_type;
1098
+ /* Check for LIVELOCK induce flag, which is set by firing
1099
+ * dhd iovar to induce LIVELOCK error. If flag is set,
1100
+ * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
1101
+ */
1102
+ if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
1103
+ DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
1104
+ return MSG_TYPE_INVALID;
1105
+ } else {
1106
+ return msg->msg_type;
1107
+ }
1108
+}
1109
+
1110
+#ifdef EWP_EDL
1111
+/**
1112
+ * dhd_prot_d2h_sync_edl - Sync on a D2H DMA completion by validating the cmn_msg_hdr_t
1113
+ * header values at both the beginning and end of the payload.
1114
+ * The cmn_msg_hdr_t is placed at the start and end of the payload
1115
+ * in each work item in the EDL ring.
1116
+ * Dongle will place a seqnum inside the cmn_msg_hdr_t 'epoch' field
1117
+ * and the length of the payload in the 'request_id' field.
1118
+ * Structure of each work item in the EDL ring:
1119
+ * | cmn_msg_hdr_t | payload (var len) | cmn_msg_hdr_t |
1120
+ * NOTE: - it was felt that calculating xorcsum for the entire payload (max length of 1648 bytes) is
1121
+ * too costly on the dongle side and might take up too many ARM cycles,
1122
+ * hence the xorcsum sync method is not being used for EDL ring.
1123
+ */
1124
+static int
1125
+BCMFASTPATH(dhd_prot_d2h_sync_edl)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1126
+ volatile cmn_msg_hdr_t *msg)
1127
+{
1128
+ uint32 tries;
1129
+ int msglen = 0, len = 0;
1130
+ uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
1131
+ dhd_prot_t *prot = dhd->prot;
1132
+ uint32 step = 0;
1133
+ uint32 delay = PCIE_D2H_SYNC_DELAY;
1134
+ uint32 total_tries = 0;
1135
+ volatile cmn_msg_hdr_t *trailer = NULL;
1136
+ volatile uint8 *buf = NULL;
1137
+ bool valid_msg = FALSE;
1138
+
1139
+ BCM_REFERENCE(delay);
1140
+ /*
1141
+ * For retries we have to make some sort of stepper algorithm.
1142
+ * We see that every time when the Dongle comes out of the D3
1143
+ * Cold state, the first D2H mem2mem DMA takes more time to
1144
+ * complete, leading to livelock issues.
1145
+ *
1146
+ * Case 1 - Apart from Host CPU some other bus master is
1147
+ * accessing the DDR port, probably page close to the ring
1148
+ * so, PCIE does not get a change to update the memory.
1149
+ * Solution - Increase the number of tries.
1150
+ *
1151
+ * Case 2 - The 50usec delay given by the Host CPU is not
1152
+ * sufficient for the PCIe RC to start its work.
1153
+ * In this case the breathing time of 50usec given by
1154
+ * the Host CPU is not sufficient.
1155
+ * Solution: Increase the delay in a stepper fashion.
1156
+ * This is done to ensure that there are no
1157
+ * unwanted extra delay introdcued in normal conditions.
1158
+ */
1159
+ for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
1160
+ for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
1161
+ /* First verify if the seqnumber has been updated,
1162
+ * if yes, only then validate the header and trailer.
1163
+ * Once seqnum, header and trailer have been validated, it means
1164
+ * that the complete message has arrived.
1165
+ */
1166
+ valid_msg = FALSE;
1167
+ if (msg->epoch == ring_seqnum &&
1168
+ msg->msg_type == MSG_TYPE_INFO_PYLD &&
1169
+ msg->request_id > 0 &&
1170
+ msg->request_id <= ring->item_len) {
1171
+ /* proceed to check trailer only if header is valid */
1172
+ buf = (volatile uint8 *)msg;
1173
+ msglen = sizeof(cmn_msg_hdr_t) + msg->request_id;
1174
+ buf += msglen;
1175
+ if (msglen + sizeof(cmn_msg_hdr_t) <= ring->item_len) {
1176
+ trailer = (volatile cmn_msg_hdr_t *)buf;
1177
+ valid_msg = (trailer->epoch == ring_seqnum) &&
1178
+ (trailer->msg_type == msg->msg_type) &&
1179
+ (trailer->request_id == msg->request_id);
1180
+ if (!valid_msg) {
1181
+ DHD_TRACE(("%s:invalid trailer! seqnum=%u;reqid=%u"
1182
+ " expected, seqnum=%u; reqid=%u. Retrying... \n",
1183
+ __FUNCTION__, trailer->epoch, trailer->request_id,
1184
+ msg->epoch, msg->request_id));
1185
+ }
1186
+ } else {
1187
+ DHD_TRACE(("%s: invalid payload length (%u)! Retrying.. \n",
1188
+ __FUNCTION__, msg->request_id));
1189
+ }
1190
+
1191
+ if (valid_msg) {
1192
+ /* data is OK */
1193
+ ring->seqnum++; /* next expected sequence number */
1194
+ if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
1195
+ goto dma_completed;
1196
+ }
1197
+ }
1198
+ } else {
1199
+ DHD_TRACE(("%s: wrong hdr, seqnum expected %u, got %u."
1200
+ " msg_type=0x%x, request_id=%u."
1201
+ " Retrying...\n",
1202
+ __FUNCTION__, ring_seqnum, msg->epoch,
1203
+ msg->msg_type, msg->request_id));
1204
+ }
1205
+
1206
+ total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
1207
+
1208
+ if (total_tries > prot->d2h_sync_wait_max)
1209
+ prot->d2h_sync_wait_max = total_tries;
1210
+
1211
+ OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
1212
+ OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
1213
+ OSL_DELAY(delay * step); /* Add stepper delay */
1214
+
1215
+ } /* for PCIE_D2H_SYNC_WAIT_TRIES */
1216
+ } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
1217
+
1218
+ DHD_ERROR(("%s: EDL header check fails !\n", __FUNCTION__));
1219
+ DHD_ERROR(("%s: header: seqnum=%u; expected-seqnum=%u"
1220
+ " msgtype=0x%x; expected-msgtype=0x%x"
1221
+ " length=%u; expected-max-length=%u", __FUNCTION__,
1222
+ msg->epoch, ring_seqnum, msg->msg_type, MSG_TYPE_INFO_PYLD,
1223
+ msg->request_id, ring->item_len));
1224
+ dhd_prhex("msg header bytes: ", (volatile uchar *)msg, sizeof(*msg), DHD_ERROR_VAL);
1225
+ if (trailer && msglen > 0 &&
1226
+ (msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len) {
1227
+ DHD_ERROR(("%s: trailer: seqnum=%u; expected-seqnum=%u"
1228
+ " msgtype=0x%x; expected-msgtype=0x%x"
1229
+ " length=%u; expected-length=%u", __FUNCTION__,
1230
+ trailer->epoch, ring_seqnum, trailer->msg_type, MSG_TYPE_INFO_PYLD,
1231
+ trailer->request_id, msg->request_id));
1232
+ dhd_prhex("msg trailer bytes: ", (volatile uchar *)trailer,
1233
+ sizeof(*trailer), DHD_ERROR_VAL);
1234
+ }
1235
+
1236
+ if ((msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len)
1237
+ len = msglen + sizeof(cmn_msg_hdr_t);
1238
+ else
1239
+ len = ring->item_len;
1240
+
1241
+ dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
1242
+ (volatile uchar *) msg, len);
1243
+
1244
+ ring->seqnum++; /* skip this message */
1245
+ return BCME_ERROR; /* invalid msg_type 0 -> noop callback */
1246
+
1247
+dma_completed:
1248
+ DHD_TRACE(("%s: EDL header check pass, seqnum=%u; reqid=%u\n", __FUNCTION__,
1249
+ msg->epoch, msg->request_id));
1250
+
1251
+ prot->d2h_sync_wait_tot += tries;
1252
+ return BCME_OK;
1253
+}
1254
+
1255
+/**
1256
+ * dhd_prot_d2h_sync_edl_none - Dongle ensure that the DMA will complete and host
1257
+ * need to try to sync. This noop sync handler will be bound when the dongle
1258
+ * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
1259
+ */
1260
+static int BCMFASTPATH
1261
+dhd_prot_d2h_sync_edl_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1262
+ volatile cmn_msg_hdr_t *msg)
1263
+{
1264
+ /* Check for LIVELOCK induce flag, which is set by firing
1265
+ * dhd iovar to induce LIVELOCK error. If flag is set,
1266
+ * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
1267
+ */
1268
+ if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
1269
+ DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
1270
+ return BCME_ERROR;
1271
+ } else {
1272
+ if (msg->msg_type == MSG_TYPE_INFO_PYLD)
1273
+ return BCME_OK;
1274
+ else
1275
+ return msg->msg_type;
1276
+ }
1277
+}
1278
+#endif /* EWP_EDL */
1279
+
1280
+INLINE void
1281
+dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason)
1282
+{
1283
+ /* To synchronize with the previous memory operations call wmb() */
1284
+ OSL_SMP_WMB();
1285
+ dhd->prot->ioctl_received = reason;
1286
+ /* Call another wmb() to make sure before waking up the other event value gets updated */
1287
+ OSL_SMP_WMB();
1288
+ dhd_os_ioctl_resp_wake(dhd);
7211289 }
7221290
7231291 /**
....@@ -732,29 +1300,43 @@
7321300 prot->d2h_sync_wait_tot = 0UL;
7331301
7341302 prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL;
1303
+ prot->d2hring_ctrl_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
1304
+
7351305 prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
1306
+ prot->d2hring_tx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
1307
+
7361308 prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
1309
+ prot->d2hring_rx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
1310
+
1311
+ if (HWA_ACTIVE(dhd)) {
1312
+ prot->d2hring_tx_cpln.hwa_db_type =
1313
+ (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_TXCPLT) ? HWA_DB_TYPE_TXCPLT : 0;
1314
+ prot->d2hring_rx_cpln.hwa_db_type =
1315
+ (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_RXCPLT) ? HWA_DB_TYPE_RXCPLT : 0;
1316
+ DHD_ERROR(("%s: TXCPLT hwa_db_type:0x%x RXCPLT hwa_db_type:0x%x\n",
1317
+ __FUNCTION__, prot->d2hring_tx_cpln.hwa_db_type,
1318
+ prot->d2hring_rx_cpln.hwa_db_type));
1319
+ }
7371320
7381321 if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) {
7391322 prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum;
1323
+#ifdef EWP_EDL
1324
+ prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
1325
+#endif /* EWP_EDL */
1326
+ DHD_ERROR(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__));
7401327 } else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) {
7411328 prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum;
1329
+#ifdef EWP_EDL
1330
+ prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
1331
+#endif /* EWP_EDL */
1332
+ DHD_ERROR(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__));
7421333 } else {
7431334 prot->d2h_sync_cb = dhd_prot_d2h_sync_none;
1335
+#ifdef EWP_EDL
1336
+ prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl_none;
1337
+#endif /* EWP_EDL */
1338
+ DHD_ERROR(("%s(): D2H sync mechanism is NONE \r\n", __FUNCTION__));
7441339 }
745
-}
746
-
747
-#endif /* PCIE_D2H_SYNC */
748
-
749
-INLINE void
750
-dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason)
751
-{
752
- /* To synchronize with the previous memory operations call wmb() */
753
- OSL_SMP_WMB();
754
- dhd->prot->ioctl_received = reason;
755
- /* Call another wmb() to make sure before waking up the other event value gets updated */
756
- OSL_SMP_WMB();
757
- dhd_os_ioctl_resp_wake(dhd);
7581340 }
7591341
7601342 /**
....@@ -765,11 +1347,21 @@
7651347 {
7661348 dhd_prot_t *prot = dhd->prot;
7671349 prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL;
1350
+
1351
+ if (HWA_ACTIVE(dhd)) {
1352
+ prot->h2dring_rxp_subn.hwa_db_type =
1353
+ (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_RXPOST) ? HWA_DB_TYPE_RXPOST : 0;
1354
+ DHD_ERROR(("%s: RXPOST hwa_db_type:0x%x\n",
1355
+ __FUNCTION__, prot->d2hring_tx_cpln.hwa_db_type));
1356
+ }
1357
+
1358
+ prot->h2dring_rxp_subn.current_phase = 0;
1359
+
7681360 prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL;
1361
+ prot->h2dring_ctrl_subn.current_phase = 0;
7691362 }
7701363
7711364 /* +----------------- End of PCIE DHD H2D DMA SYNC ------------------------+ */
772
-
7731365
7741366 /*
7751367 * +---------------------------------------------------------------------------+
....@@ -786,27 +1378,25 @@
7861378 base_addr->high_addr = htol32(PHYSADDRHI(pa));
7871379 }
7881380
789
-
7901381 /**
7911382 * dhd_dma_buf_audit - Any audits on a DHD DMA Buffer.
7921383 */
7931384 static int
7941385 dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
7951386 {
796
- uint32 base, end; /* dongle uses 32bit ptr arithmetic */
797
-
1387
+ uint32 pa_lowaddr, end; /* dongle uses 32bit ptr arithmetic */
7981388 ASSERT(dma_buf);
799
- base = PHYSADDRLO(dma_buf->pa);
800
- ASSERT(base);
801
- ASSERT(ISALIGNED(base, DMA_ALIGN_LEN));
1389
+ pa_lowaddr = PHYSADDRLO(dma_buf->pa);
1390
+ ASSERT(PHYSADDRLO(dma_buf->pa) || PHYSADDRHI(dma_buf->pa));
1391
+ ASSERT(ISALIGNED(pa_lowaddr, DMA_ALIGN_LEN));
8021392 ASSERT(dma_buf->len != 0);
8031393
8041394 /* test 32bit offset arithmetic over dma buffer for loss of carry-over */
805
- end = (base + dma_buf->len); /* end address */
1395
+ end = (pa_lowaddr + dma_buf->len); /* end address */
8061396
807
- if ((end & 0xFFFFFFFF) < (base & 0xFFFFFFFF)) { /* exclude carryover */
1397
+ if ((end & 0xFFFFFFFF) < (pa_lowaddr & 0xFFFFFFFF)) { /* exclude carryover */
8081398 DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n",
809
- __FUNCTION__, base, dma_buf->len));
1399
+ __FUNCTION__, pa_lowaddr, dma_buf->len));
8101400 return BCME_ERROR;
8111401 }
8121402
....@@ -818,22 +1408,22 @@
8181408 * returns BCME_OK=0 on success
8191409 * returns non-zero negative error value on failure.
8201410 */
821
-static int
1411
+int
8221412 dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len)
8231413 {
8241414 uint32 dma_pad = 0;
8251415 osl_t *osh = dhd->osh;
826
- int dma_align = DMA_ALIGN_LEN;
827
-
1416
+ uint16 dma_align = DMA_ALIGN_LEN;
1417
+ uint32 rem = 0;
8281418
8291419 ASSERT(dma_buf != NULL);
8301420 ASSERT(dma_buf->va == NULL);
8311421 ASSERT(dma_buf->len == 0);
8321422
833
- /* Pad the buffer length by one extra cacheline size.
834
- * Required for D2H direction.
835
- */
836
- dma_pad = (buf_len % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
1423
+ /* Pad the buffer length to align to cacheline size. */
1424
+ rem = (buf_len % DHD_DMA_PAD);
1425
+ dma_pad = rem ? (DHD_DMA_PAD - rem) : 0;
1426
+
8371427 dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad,
8381428 dma_align, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah);
8391429
....@@ -861,9 +1451,8 @@
8611451 static void
8621452 dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
8631453 {
864
- if ((dma_buf == NULL) || (dma_buf->va == NULL)) {
1454
+ if ((dma_buf == NULL) || (dma_buf->va == NULL))
8651455 return;
866
- }
8671456
8681457 (void)dhd_dma_buf_audit(dhd, dma_buf);
8691458
....@@ -876,16 +1465,15 @@
8761465 * dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using
8771466 * dhd_dma_buf_alloc().
8781467 */
879
-static void
1468
+void
8801469 dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
8811470 {
8821471 osl_t *osh = dhd->osh;
8831472
8841473 ASSERT(dma_buf);
8851474
886
- if (dma_buf->va == NULL) {
1475
+ if (dma_buf->va == NULL)
8871476 return; /* Allow for free invocation, when alloc failed */
888
- }
8891477
8901478 /* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */
8911479 (void)dhd_dma_buf_audit(dhd, dma_buf);
....@@ -922,6 +1510,133 @@
9221510
9231511 /*
9241512 * +---------------------------------------------------------------------------+
1513
+ * DHD_MAP_PKTID_LOGGING
1514
+ * Logging the PKTID and DMA map/unmap information for the SMMU fault issue
1515
+ * debugging in customer platform.
1516
+ * +---------------------------------------------------------------------------+
1517
+ */
1518
+
1519
+#ifdef DHD_MAP_PKTID_LOGGING
1520
+typedef struct dhd_pktid_log_item {
1521
+ dmaaddr_t pa; /* DMA bus address */
1522
+ uint64 ts_nsec; /* Timestamp: nsec */
1523
+ uint32 size; /* DMA map/unmap size */
1524
+ uint32 pktid; /* Packet ID */
1525
+ uint8 pkttype; /* Packet Type */
1526
+ uint8 rsvd[7]; /* Reserved for future use */
1527
+} dhd_pktid_log_item_t;
1528
+
1529
+typedef struct dhd_pktid_log {
1530
+ uint32 items; /* number of total items */
1531
+ uint32 index; /* index of pktid_log_item */
1532
+ dhd_pktid_log_item_t map[0]; /* metadata storage */
1533
+} dhd_pktid_log_t;
1534
+
1535
+typedef void * dhd_pktid_log_handle_t; /* opaque handle to pktid log */
1536
+
1537
+#define MAX_PKTID_LOG (2048)
1538
+#define DHD_PKTID_LOG_ITEM_SZ (sizeof(dhd_pktid_log_item_t))
1539
+#define DHD_PKTID_LOG_SZ(items) (uint32)((sizeof(dhd_pktid_log_t)) + \
1540
+ ((DHD_PKTID_LOG_ITEM_SZ) * (items)))
1541
+
1542
+#define DHD_PKTID_LOG_INIT(dhd, hdl) dhd_pktid_logging_init((dhd), (hdl))
1543
+#define DHD_PKTID_LOG_FINI(dhd, hdl) dhd_pktid_logging_fini((dhd), (hdl))
1544
+#define DHD_PKTID_LOG(dhd, hdl, pa, pktid, len, pkttype) \
1545
+ dhd_pktid_logging((dhd), (hdl), (pa), (pktid), (len), (pkttype))
1546
+#define DHD_PKTID_LOG_DUMP(dhd) dhd_pktid_logging_dump((dhd))
1547
+
1548
+static dhd_pktid_log_handle_t *
1549
+dhd_pktid_logging_init(dhd_pub_t *dhd, uint32 num_items)
1550
+{
1551
+ dhd_pktid_log_t *log;
1552
+ uint32 log_size;
1553
+
1554
+ log_size = DHD_PKTID_LOG_SZ(num_items);
1555
+ log = (dhd_pktid_log_t *)MALLOCZ(dhd->osh, log_size);
1556
+ if (log == NULL) {
1557
+ DHD_ERROR(("%s: MALLOC failed for size %d\n",
1558
+ __FUNCTION__, log_size));
1559
+ return (dhd_pktid_log_handle_t *)NULL;
1560
+ }
1561
+
1562
+ log->items = num_items;
1563
+ log->index = 0;
1564
+
1565
+ return (dhd_pktid_log_handle_t *)log; /* opaque handle */
1566
+}
1567
+
1568
+static void
1569
+dhd_pktid_logging_fini(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle)
1570
+{
1571
+ dhd_pktid_log_t *log;
1572
+ uint32 log_size;
1573
+
1574
+ if (handle == NULL) {
1575
+ DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
1576
+ return;
1577
+ }
1578
+
1579
+ log = (dhd_pktid_log_t *)handle;
1580
+ log_size = DHD_PKTID_LOG_SZ(log->items);
1581
+ MFREE(dhd->osh, handle, log_size);
1582
+}
1583
+
1584
+static void
1585
+dhd_pktid_logging(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle, dmaaddr_t pa,
1586
+ uint32 pktid, uint32 len, uint8 pkttype)
1587
+{
1588
+ dhd_pktid_log_t *log;
1589
+ uint32 idx;
1590
+
1591
+ if (handle == NULL) {
1592
+ DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
1593
+ return;
1594
+ }
1595
+
1596
+ log = (dhd_pktid_log_t *)handle;
1597
+ idx = log->index;
1598
+ log->map[idx].ts_nsec = OSL_LOCALTIME_NS();
1599
+ log->map[idx].pa = pa;
1600
+ log->map[idx].pktid = pktid;
1601
+ log->map[idx].size = len;
1602
+ log->map[idx].pkttype = pkttype;
1603
+ log->index = (idx + 1) % (log->items); /* update index */
1604
+}
1605
+
1606
+void
1607
+dhd_pktid_logging_dump(dhd_pub_t *dhd)
1608
+{
1609
+ dhd_prot_t *prot = dhd->prot;
1610
+ dhd_pktid_log_t *map_log, *unmap_log;
1611
+ uint64 ts_sec, ts_usec;
1612
+
1613
+ if (prot == NULL) {
1614
+ DHD_ERROR(("%s: prot is NULL\n", __FUNCTION__));
1615
+ return;
1616
+ }
1617
+
1618
+ map_log = (dhd_pktid_log_t *)(prot->pktid_dma_map);
1619
+ unmap_log = (dhd_pktid_log_t *)(prot->pktid_dma_unmap);
1620
+ OSL_GET_LOCALTIME(&ts_sec, &ts_usec);
1621
+ if (map_log && unmap_log) {
1622
+ DHD_ERROR(("%s: map_idx=%d unmap_idx=%d "
1623
+ "current time=[%5lu.%06lu]\n", __FUNCTION__,
1624
+ map_log->index, unmap_log->index,
1625
+ (unsigned long)ts_sec, (unsigned long)ts_usec));
1626
+ DHD_ERROR(("%s: pktid_map_log(pa)=0x%llx size=%d, "
1627
+ "pktid_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__,
1628
+ (uint64)__virt_to_phys((ulong)(map_log->map)),
1629
+ (uint32)(DHD_PKTID_LOG_ITEM_SZ * map_log->items),
1630
+ (uint64)__virt_to_phys((ulong)(unmap_log->map)),
1631
+ (uint32)(DHD_PKTID_LOG_ITEM_SZ * unmap_log->items)));
1632
+ }
1633
+}
1634
+#endif /* DHD_MAP_PKTID_LOGGING */
1635
+
1636
+/* +----------------- End of DHD_MAP_PKTID_LOGGING -----------------------+ */
1637
+
1638
+/*
1639
+ * +---------------------------------------------------------------------------+
9251640 * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
9261641 * Main purpose is to save memory on the dongle, has other purposes as well.
9271642 * The packet id map, also includes storage for some packet parameters that
....@@ -931,14 +1646,15 @@
9311646 * +---------------------------------------------------------------------------+
9321647 */
9331648 #define DHD_PCIE_PKTID
934
-#define MAX_PKTID_ITEMS (3072 * 2) /* Maximum number of pktids supported */
1649
+#define MAX_CTRL_PKTID (1024) /* Maximum number of pktids supported */
1650
+#define MAX_RX_PKTID (1024)
1651
+#define MAX_TX_PKTID (3072 * 12)
9351652
9361653 /* On Router, the pktptr serves as a pktid. */
9371654
938
-
9391655 #if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID)
9401656 #error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC"
941
-#endif
1657
+#endif // endif
9421658
9431659 /* Enum for marking the buffer color based on usage */
9441660 typedef enum dhd_pkttype {
....@@ -946,47 +1662,68 @@
9461662 PKTTYPE_DATA_RX,
9471663 PKTTYPE_IOCTL_RX,
9481664 PKTTYPE_EVENT_RX,
1665
+ PKTTYPE_INFO_RX,
9491666 /* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */
950
- PKTTYPE_NO_CHECK
1667
+ PKTTYPE_NO_CHECK,
1668
+ PKTTYPE_TSBUF_RX
9511669 } dhd_pkttype_t;
9521670
953
-#define DHD_PKTID_INVALID (0U)
954
-#define DHD_IOCTL_REQ_PKTID (0xFFFE)
955
-#define DHD_FAKE_PKTID (0xFACE)
1671
+#define DHD_PKTID_MIN_AVAIL_COUNT 512U
1672
+#define DHD_PKTID_DEPLETED_MAX_COUNT (DHD_PKTID_MIN_AVAIL_COUNT * 2U)
1673
+#define DHD_PKTID_INVALID (0U)
1674
+#define DHD_IOCTL_REQ_PKTID (0xFFFE)
1675
+#define DHD_FAKE_PKTID (0xFACE)
1676
+#define DHD_H2D_DBGRING_REQ_PKTID 0xFFFD
1677
+#define DHD_D2H_DBGRING_REQ_PKTID 0xFFFC
1678
+#define DHD_H2D_HOSTTS_REQ_PKTID 0xFFFB
1679
+#define DHD_H2D_BTLOGRING_REQ_PKTID 0xFFFA
1680
+#define DHD_D2H_BTLOGRING_REQ_PKTID 0xFFF9
1681
+#define DHD_H2D_SNAPSHOT_UPLOAD_REQ_PKTID 0xFFF8
1682
+#ifdef DHD_HP2P
1683
+#define DHD_D2H_HPPRING_TXREQ_PKTID 0xFFF7
1684
+#define DHD_D2H_HPPRING_RXREQ_PKTID 0xFFF6
1685
+#endif /* DHD_HP2P */
9561686
957
-#define DHD_PKTID_FREE_LOCKER (FALSE)
958
-#define DHD_PKTID_RSV_LOCKER (TRUE)
1687
+#define IS_FLOWRING(ring) \
1688
+ ((strncmp(ring->name, "h2dflr", sizeof("h2dflr"))) == (0))
9591689
9601690 typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */
9611691
9621692 /* Construct a packet id mapping table, returning an opaque map handle */
963
-static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index);
1693
+static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items);
9641694
9651695 /* Destroy a packet id mapping table, freeing all packets active in the table */
9661696 static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map);
9671697
968
-#define PKTID_MAP_HANDLE (0)
969
-#define PKTID_MAP_HANDLE_IOCTL (1)
970
-
971
-#define DHD_NATIVE_TO_PKTID_INIT(dhd, items, index) dhd_pktid_map_init((dhd), (items), (index))
1698
+#define DHD_NATIVE_TO_PKTID_INIT(dhd, items) dhd_pktid_map_init((dhd), (items))
1699
+#define DHD_NATIVE_TO_PKTID_RESET(dhd, map) dhd_pktid_map_reset((dhd), (map))
9721700 #define DHD_NATIVE_TO_PKTID_FINI(dhd, map) dhd_pktid_map_fini((dhd), (map))
1701
+#define DHD_NATIVE_TO_PKTID_FINI_IOCTL(osh, map) dhd_pktid_map_fini_ioctl((osh), (map))
1702
+
1703
+#ifdef MACOSX_DHD
1704
+#undef DHD_PCIE_PKTID
1705
+#define DHD_PCIE_PKTID 1
1706
+#endif /* MACOSX_DHD */
9731707
9741708 #if defined(DHD_PCIE_PKTID)
975
-
1709
+#if defined(MACOSX_DHD)
1710
+#define IOCTLRESP_USE_CONSTMEM
1711
+static void free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
1712
+static int alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
1713
+#endif // endif
9761714
9771715 /* Determine number of pktids that are available */
9781716 static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle);
9791717
9801718 /* Allocate a unique pktid against which a pkt and some metadata is saved */
9811719 static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
982
- void *pkt);
1720
+ void *pkt, dhd_pkttype_t pkttype);
9831721 static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
9841722 void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma,
9851723 void *dmah, void *secdma, dhd_pkttype_t pkttype);
9861724 static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
9871725 void *pkt, dmaaddr_t pa, uint32 len, uint8 dma,
9881726 void *dmah, void *secdma, dhd_pkttype_t pkttype);
989
-
9901727 /* Return an allocated pktid, retrieving previously saved pkt and metadata */
9911728 static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
9921729 uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah,
....@@ -1001,11 +1738,6 @@
10011738 * CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined,
10021739 * either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected.
10031740 */
1004
-#ifndef DHD_PKTID_AUDIT_ENABLED
1005
-#define DHD_PKTID_AUDIT_ENABLED 1
1006
-#endif /* DHD_PKTID_AUDIT_ENABLED */
1007
-
1008
-
10091741 #if defined(DHD_PKTID_AUDIT_ENABLED)
10101742 #define USE_DHD_PKTID_AUDIT_LOCK 1
10111743 /* Audit the pktidmap allocator */
....@@ -1023,6 +1755,13 @@
10231755 #define DHD_TEST_IS_ALLOC 3
10241756 #define DHD_TEST_IS_FREE 4
10251757
1758
+typedef enum dhd_pktid_map_type {
1759
+ DHD_PKTID_MAP_TYPE_CTRL = 1,
1760
+ DHD_PKTID_MAP_TYPE_TX,
1761
+ DHD_PKTID_MAP_TYPE_RX,
1762
+ DHD_PKTID_MAP_TYPE_UNKNOWN
1763
+} dhd_pktid_map_type_t;
1764
+
10261765 #ifdef USE_DHD_PKTID_AUDIT_LOCK
10271766 #define DHD_PKTID_AUDIT_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
10281767 #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
....@@ -1037,12 +1776,12 @@
10371776
10381777 #endif /* DHD_PKTID_AUDIT_ENABLED */
10391778
1040
-/* #define USE_DHD_PKTID_LOCK 1 */
1779
+#define USE_DHD_PKTID_LOCK 1
10411780
10421781 #ifdef USE_DHD_PKTID_LOCK
10431782 #define DHD_PKTID_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
10441783 #define DHD_PKTID_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
1045
-#define DHD_PKTID_LOCK(lock) dhd_os_spin_lock(lock)
1784
+#define DHD_PKTID_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
10461785 #define DHD_PKTID_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
10471786 #else
10481787 #define DHD_PKTID_LOCK_INIT(osh) (void *)(1)
....@@ -1059,37 +1798,26 @@
10591798 } while (0)
10601799 #endif /* !USE_DHD_PKTID_LOCK */
10611800
1062
-/* Packet metadata saved in packet id mapper */
1063
-
1064
-/* The Locker can be 3 states
1065
- * LOCKER_IS_FREE - Locker is free and can be allocated
1066
- * LOCKER_IS_BUSY - Locker is assigned and is being used, values in the
1067
- * locker (buffer address, len, phy addr etc) are populated
1068
- * with valid values
1069
- * LOCKER_IS_RSVD - The locker is reserved for future use, but the values
1070
- * in the locker are not valid. Especially pkt should be
1071
- * NULL in this state. When the user wants to re-use the
1072
- * locker dhd_pktid_map_free can be called with a flag
1073
- * to reserve the pktid for future use, which will clear
1074
- * the contents of the locker. When the user calls
1075
- * dhd_pktid_map_save the locker would move to LOCKER_IS_BUSY
1076
- */
10771801 typedef enum dhd_locker_state {
10781802 LOCKER_IS_FREE,
10791803 LOCKER_IS_BUSY,
10801804 LOCKER_IS_RSVD
10811805 } dhd_locker_state_t;
10821806
1807
+/* Packet metadata saved in packet id mapper */
1808
+
10831809 typedef struct dhd_pktid_item {
10841810 dhd_locker_state_t state; /* tag a locker to be free, busy or reserved */
1085
- uint8 dir; /* dma map direction (Tx=flush or Rx=invalidate) */
1086
- dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */
1087
- uint16 len; /* length of mapped packet's buffer */
1088
- void *pkt; /* opaque native pointer to a packet */
1089
- dmaaddr_t pa; /* physical address of mapped packet's buffer */
1090
- void *dmah; /* handle to OS specific DMA map */
1091
- void *secdma;
1811
+ uint8 dir; /* dma map direction (Tx=flush or Rx=invalidate) */
1812
+ dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */
1813
+ uint16 len; /* length of mapped packet's buffer */
1814
+ void *pkt; /* opaque native pointer to a packet */
1815
+ dmaaddr_t pa; /* physical address of mapped packet's buffer */
1816
+ void *dmah; /* handle to OS specific DMA map */
1817
+ void *secdma;
10921818 } dhd_pktid_item_t;
1819
+
1820
+typedef uint32 dhd_pktid_key_t;
10931821
10941822 typedef struct dhd_pktid_map {
10951823 uint32 items; /* total items in map */
....@@ -1099,11 +1827,10 @@
10991827 void *pktid_lock; /* Used when USE_DHD_PKTID_LOCK is defined */
11001828
11011829 #if defined(DHD_PKTID_AUDIT_ENABLED)
1102
- void *pktid_audit_lock;
1830
+ void *pktid_audit_lock;
11031831 struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */
11041832 #endif /* DHD_PKTID_AUDIT_ENABLED */
1105
-
1106
- uint32 keys[MAX_PKTID_ITEMS + 1]; /* stack of unique pkt ids */
1833
+ dhd_pktid_key_t *keys; /* map_items +1 unique pkt ids */
11071834 dhd_pktid_item_t lockers[0]; /* metadata storage */
11081835 } dhd_pktid_map_t;
11091836
....@@ -1116,65 +1843,78 @@
11161843 * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID.
11171844 */
11181845
1846
+#define DHD_PKTID_FREE_LOCKER (FALSE)
1847
+#define DHD_PKTID_RSV_LOCKER (TRUE)
1848
+
11191849 #define DHD_PKTID_ITEM_SZ (sizeof(dhd_pktid_item_t))
11201850 #define DHD_PKIDMAP_ITEMS(items) (items)
11211851 #define DHD_PKTID_MAP_SZ(items) (sizeof(dhd_pktid_map_t) + \
1122
- (DHD_PKTID_ITEM_SZ * ((items) + 1)))
1852
+ (DHD_PKTID_ITEM_SZ * ((items) + 1)))
1853
+#define DHD_PKTIDMAP_KEYS_SZ(items) (sizeof(dhd_pktid_key_t) * ((items) + 1))
11231854
1124
-#define DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, map) dhd_pktid_map_fini_ioctl((dhd), (map))
1855
+#define DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, map) dhd_pktid_map_reset_ioctl((dhd), (map))
11251856
11261857 /* Convert a packet to a pktid, and save pkt pointer in busy locker */
1127
-#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt) dhd_pktid_map_reserve((dhd), (map), (pkt))
1128
-
1858
+#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype) \
1859
+ dhd_pktid_map_reserve((dhd), (map), (pkt), (pkttype))
11291860 /* Reuse a previously reserved locker to save packet params */
11301861 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \
11311862 dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \
1132
- (uint8)(dir), (void *)(dmah), (void *)(secdma), \
1133
- (dhd_pkttype_t)(pkttype))
1134
-
1863
+ (uint8)(dir), (void *)(dmah), (void *)(secdma), \
1864
+ (dhd_pkttype_t)(pkttype))
11351865 /* Convert a packet to a pktid, and save packet params in locker */
11361866 #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \
11371867 dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \
1138
- (uint8)(dir), (void *)(dmah), (void *)(secdma), \
1139
- (dhd_pkttype_t)(pkttype))
1868
+ (uint8)(dir), (void *)(dmah), (void *)(secdma), \
1869
+ (dhd_pkttype_t)(pkttype))
11401870
11411871 /* Convert pktid to a packet, and free the locker */
11421872 #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
11431873 dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
1144
- (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
1145
- (void **) &secdma, (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER)
1874
+ (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
1875
+ (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER)
11461876
11471877 /* Convert the pktid to a packet, empty locker, but keep it reserved */
11481878 #define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
11491879 dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
1150
- (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
1151
- (void **) &secdma, (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER)
1880
+ (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
1881
+ (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER)
11521882
11531883 #define DHD_PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map)
11541884
11551885 #if defined(DHD_PKTID_AUDIT_ENABLED)
11561886
1157
-static int dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
1158
- const int test_for, const char *errmsg);
1887
+static int
1888
+dhd_get_pktid_map_type(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map)
1889
+{
1890
+ dhd_prot_t *prot = dhd->prot;
1891
+ int pktid_map_type;
11591892
1160
-/* Call back into OS layer to take the dongle dump and panic */
1161
-#ifdef DHD_DEBUG_PAGEALLOC
1162
-extern void dhd_pktid_audit_fail_cb(dhd_pub_t *dhdp);
1163
-#endif /* DHD_DEBUG_PAGEALLOC */
1893
+ if (pktid_map == prot->pktid_ctrl_map) {
1894
+ pktid_map_type = DHD_PKTID_MAP_TYPE_CTRL;
1895
+ } else if (pktid_map == prot->pktid_tx_map) {
1896
+ pktid_map_type = DHD_PKTID_MAP_TYPE_TX;
1897
+ } else if (pktid_map == prot->pktid_rx_map) {
1898
+ pktid_map_type = DHD_PKTID_MAP_TYPE_RX;
1899
+ } else {
1900
+ pktid_map_type = DHD_PKTID_MAP_TYPE_UNKNOWN;
1901
+ }
1902
+
1903
+ return pktid_map_type;
1904
+}
11641905
11651906 /**
1166
-* dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
1907
+* __dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
11671908 */
11681909 static int
1169
-dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
1910
+__dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
11701911 const int test_for, const char *errmsg)
11711912 {
11721913 #define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: "
1173
-
1174
- const uint32 max_pktid_items = (MAX_PKTID_ITEMS);
11751914 struct bcm_mwbmap *handle;
11761915 uint32 flags;
11771916 bool ignore_audit;
1917
+ int error = BCME_OK;
11781918
11791919 if (pktid_map == (dhd_pktid_map_t *)NULL) {
11801920 DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg));
....@@ -1186,21 +1926,19 @@
11861926 handle = pktid_map->pktid_audit;
11871927 if (handle == (struct bcm_mwbmap *)NULL) {
11881928 DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg));
1189
- DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
1190
- return BCME_OK;
1929
+ goto out;
11911930 }
11921931
11931932 /* Exclude special pktids from audit */
11941933 ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID);
11951934 if (ignore_audit) {
1196
- DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
1197
- return BCME_OK;
1935
+ goto out;
11981936 }
11991937
1200
- if ((pktid == DHD_PKTID_INVALID) || (pktid > max_pktid_items)) {
1938
+ if ((pktid == DHD_PKTID_INVALID) || (pktid > pktid_map->items)) {
12011939 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid));
1202
- /* lock is released in "error" */
1203
- goto error;
1940
+ error = BCME_ERROR;
1941
+ goto out;
12041942 }
12051943
12061944 /* Perform audit */
....@@ -1209,25 +1947,27 @@
12091947 if (!bcm_mwbmap_isfree(handle, pktid)) {
12101948 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n",
12111949 errmsg, pktid));
1212
- goto error;
1950
+ error = BCME_ERROR;
1951
+ } else {
1952
+ bcm_mwbmap_force(handle, pktid);
12131953 }
1214
- bcm_mwbmap_force(handle, pktid);
12151954 break;
12161955
12171956 case DHD_DUPLICATE_FREE:
12181957 if (bcm_mwbmap_isfree(handle, pktid)) {
12191958 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n",
12201959 errmsg, pktid));
1221
- goto error;
1960
+ error = BCME_ERROR;
1961
+ } else {
1962
+ bcm_mwbmap_free(handle, pktid);
12221963 }
1223
- bcm_mwbmap_free(handle, pktid);
12241964 break;
12251965
12261966 case DHD_TEST_IS_ALLOC:
12271967 if (bcm_mwbmap_isfree(handle, pktid)) {
12281968 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n",
12291969 errmsg, pktid));
1230
- goto error;
1970
+ error = BCME_ERROR;
12311971 }
12321972 break;
12331973
....@@ -1235,42 +1975,74 @@
12351975 if (!bcm_mwbmap_isfree(handle, pktid)) {
12361976 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free",
12371977 errmsg, pktid));
1238
- goto error;
1978
+ error = BCME_ERROR;
12391979 }
12401980 break;
12411981
12421982 default:
1243
- goto error;
1983
+ DHD_ERROR(("%s: Invalid test case: %d\n", __FUNCTION__, test_for));
1984
+ error = BCME_ERROR;
1985
+ break;
12441986 }
12451987
1988
+out:
12461989 DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
1247
- return BCME_OK;
12481990
1249
-error:
1991
+ if (error != BCME_OK) {
1992
+ dhd->pktid_audit_failed = TRUE;
1993
+ }
12501994
1251
- DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
1252
- /* May insert any trap mechanism here ! */
1253
-#ifdef DHD_DEBUG_PAGEALLOC
1254
- dhd_pktid_audit_fail_cb(dhd);
1255
-#else
1256
- ASSERT(0);
1257
-#endif /* DHD_DEBUG_PAGEALLOC */
1258
- return BCME_ERROR;
1995
+ return error;
1996
+}
1997
+
1998
+static int
1999
+dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
2000
+ const int test_for, const char *errmsg)
2001
+{
2002
+ int ret = BCME_OK;
2003
+ ret = __dhd_pktid_audit(dhd, pktid_map, pktid, test_for, errmsg);
2004
+ if (ret == BCME_ERROR) {
2005
+ DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
2006
+ __FUNCTION__, pktid, dhd_get_pktid_map_type(dhd, pktid_map)));
2007
+ dhd_pktid_error_handler(dhd);
2008
+ }
2009
+
2010
+ return ret;
12592011 }
12602012
12612013 #define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \
12622014 dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__)
12632015
2016
+static int
2017
+dhd_pktid_audit_ring_debug(dhd_pub_t *dhdp, dhd_pktid_map_t *map, uint32 pktid,
2018
+ const int test_for, void *msg, uint32 msg_len, const char *func)
2019
+{
2020
+ int ret = BCME_OK;
2021
+
2022
+ if (dhd_query_bus_erros(dhdp)) {
2023
+ return BCME_ERROR;
2024
+ }
2025
+
2026
+ ret = __dhd_pktid_audit(dhdp, map, pktid, test_for, func);
2027
+ if (ret == BCME_ERROR) {
2028
+ DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
2029
+ __FUNCTION__, pktid, dhd_get_pktid_map_type(dhdp, map)));
2030
+ prhex(func, (uchar *)msg, msg_len);
2031
+ dhd_pktid_error_handler(dhdp);
2032
+ }
2033
+ return ret;
2034
+}
2035
+#define DHD_PKTID_AUDIT_RING_DEBUG(dhdp, map, pktid, test_for, msg, msg_len) \
2036
+ dhd_pktid_audit_ring_debug((dhdp), (dhd_pktid_map_t *)(map), \
2037
+ (pktid), (test_for), msg, msg_len, __FUNCTION__)
2038
+
12642039 #endif /* DHD_PKTID_AUDIT_ENABLED */
1265
-
1266
-/* +------------------ End of PCIE DHD PKTID AUDIT ------------------------+ */
1267
-
12682040
12692041 /**
12702042 * +---------------------------------------------------------------------------+
12712043 * Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
12722044 *
1273
- * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_PKTID_ITEMS].
2045
+ * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_xxx_PKTID].
12742046 *
12752047 * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique
12762048 * packet id is returned. This unique packet id may be used to retrieve the
....@@ -1289,40 +2061,31 @@
12892061 /** Allocate and initialize a mapper of num_items <numbered_key, locker> */
12902062
12912063 static dhd_pktid_map_handle_t *
1292
-dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index)
2064
+dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
12932065 {
1294
- void *osh;
2066
+ void* osh;
12952067 uint32 nkey;
12962068 dhd_pktid_map_t *map;
12972069 uint32 dhd_pktid_map_sz;
12982070 uint32 map_items;
1299
-#ifdef DHD_USE_STATIC_PKTIDMAP
1300
- uint32 section;
1301
-#endif /* DHD_USE_STATIC_PKTIDMAP */
2071
+ uint32 map_keys_sz;
13022072 osh = dhd->osh;
13032073
1304
- ASSERT((num_items >= 1) && (num_items <= MAX_PKTID_ITEMS));
13052074 dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items);
13062075
1307
-#ifdef DHD_USE_STATIC_PKTIDMAP
1308
- if (index == PKTID_MAP_HANDLE) {
1309
- section = DHD_PREALLOC_PKTID_MAP;
1310
- } else {
1311
- section = DHD_PREALLOC_PKTID_MAP_IOCTL;
1312
- }
1313
-
1314
- map = (dhd_pktid_map_t *)DHD_OS_PREALLOC(dhd, section, dhd_pktid_map_sz);
1315
-#else
1316
- map = (dhd_pktid_map_t *)MALLOC(osh, dhd_pktid_map_sz);
1317
-#endif /* DHD_USE_STATIC_PKTIDMAP */
1318
-
2076
+ map = (dhd_pktid_map_t *)VMALLOCZ(osh, dhd_pktid_map_sz);
13192077 if (map == NULL) {
13202078 DHD_ERROR(("%s:%d: MALLOC failed for size %d\n",
13212079 __FUNCTION__, __LINE__, dhd_pktid_map_sz));
1322
- goto error;
2080
+ return (dhd_pktid_map_handle_t *)NULL;
13232081 }
13242082
1325
- bzero(map, dhd_pktid_map_sz);
2083
+ map->items = num_items;
2084
+ map->avail = num_items;
2085
+
2086
+ map_items = DHD_PKIDMAP_ITEMS(map->items);
2087
+
2088
+ map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
13262089
13272090 /* Initialize the lock that protects this structure */
13282091 map->pktid_lock = DHD_PKTID_LOCK_INIT(osh);
....@@ -1331,24 +2094,24 @@
13312094 goto error;
13322095 }
13332096
1334
- map->items = num_items;
1335
- map->avail = num_items;
1336
-
1337
- map_items = DHD_PKIDMAP_ITEMS(map->items);
1338
-
1339
-#if defined(DHD_PKTID_AUDIT_ENABLED)
1340
- /* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */
1341
- map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1);
1342
- if (map->pktid_audit == (struct bcm_mwbmap *)NULL) {
1343
- DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__));
2097
+ map->keys = (dhd_pktid_key_t *)MALLOC(osh, map_keys_sz);
2098
+ if (map->keys == NULL) {
2099
+ DHD_ERROR(("%s:%d: MALLOC failed for map->keys size %d\n",
2100
+ __FUNCTION__, __LINE__, map_keys_sz));
13442101 goto error;
1345
- } else {
1346
- DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n",
1347
- __FUNCTION__, __LINE__, map_items + 1));
13482102 }
13492103
1350
- map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh);
1351
-
2104
+#if defined(DHD_PKTID_AUDIT_ENABLED)
2105
+ /* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */
2106
+ map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1);
2107
+ if (map->pktid_audit == (struct bcm_mwbmap *)NULL) {
2108
+ DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__));
2109
+ goto error;
2110
+ } else {
2111
+ DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n",
2112
+ __FUNCTION__, __LINE__, map_items + 1));
2113
+ }
2114
+ map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh);
13522115 #endif /* DHD_PKTID_AUDIT_ENABLED */
13532116
13542117 for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */
....@@ -1358,8 +2121,8 @@
13582121 map->lockers[nkey].len = 0;
13592122 }
13602123
1361
- /* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be busy */
1362
- map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY;
2124
+ /* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be inuse */
2125
+ map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY; /* tag locker #0 as inuse */
13632126 map->lockers[DHD_PKTID_INVALID].pkt = NULL; /* bzero: redundant */
13642127 map->lockers[DHD_PKTID_INVALID].len = 0;
13652128
....@@ -1371,9 +2134,7 @@
13712134 return (dhd_pktid_map_handle_t *)map; /* opaque handle */
13722135
13732136 error:
1374
-
13752137 if (map) {
1376
-
13772138 #if defined(DHD_PKTID_AUDIT_ENABLED)
13782139 if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
13792140 bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
....@@ -1383,12 +2144,16 @@
13832144 }
13842145 #endif /* DHD_PKTID_AUDIT_ENABLED */
13852146
1386
- if (map->pktid_lock)
2147
+ if (map->keys) {
2148
+ MFREE(osh, map->keys, map_keys_sz);
2149
+ }
2150
+
2151
+ if (map->pktid_lock) {
13872152 DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
2153
+ }
13882154
1389
- MFREE(osh, map, dhd_pktid_map_sz);
2155
+ VMFREE(osh, map, dhd_pktid_map_sz);
13902156 }
1391
-
13922157 return (dhd_pktid_map_handle_t *)NULL;
13932158 }
13942159
....@@ -1397,159 +2162,180 @@
13972162 * Freeing implies: unmapping the buffers and freeing the native packet
13982163 * This could have been a callback registered with the pktid mapper.
13992164 */
1400
-
14012165 static void
1402
-dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2166
+dhd_pktid_map_reset(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
14032167 {
14042168 void *osh;
14052169 uint32 nkey;
14062170 dhd_pktid_map_t *map;
1407
- uint32 dhd_pktid_map_sz;
14082171 dhd_pktid_item_t *locker;
14092172 uint32 map_items;
1410
- uint32 flags;
1411
-
1412
- if (handle == NULL) {
1413
- return;
1414
- }
2173
+ unsigned long flags;
2174
+ bool data_tx = FALSE;
14152175
14162176 map = (dhd_pktid_map_t *)handle;
1417
- flags = DHD_PKTID_LOCK(map->pktid_lock);
2177
+ DHD_PKTID_LOCK(map->pktid_lock, flags);
14182178 osh = dhd->osh;
14192179
1420
- dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
1421
-
1422
- nkey = 1; /* skip reserved KEY #0, and start from 1 */
1423
- locker = &map->lockers[nkey];
1424
-
14252180 map_items = DHD_PKIDMAP_ITEMS(map->items);
2181
+ /* skip reserved KEY #0, and start from 1 */
14262182
1427
- for (; nkey <= map_items; nkey++, locker++) {
1428
-
1429
- if (locker->state == LOCKER_IS_BUSY) { /* numbered key still in use */
1430
-
1431
- locker->state = LOCKER_IS_FREE; /* force open the locker */
1432
-
1433
-#if defined(DHD_PKTID_AUDIT_ENABLED)
1434
- DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
1435
-#endif /* DHD_PKTID_AUDIT_ENABLED */
1436
-
1437
- { /* This could be a callback registered with dhd_pktid_map */
1438
- DMA_UNMAP(osh, locker->pa, locker->len,
1439
- locker->dir, 0, DHD_DMAH_NULL);
1440
- dhd_prot_packet_free(dhd, (ulong*)locker->pkt,
1441
- locker->pkttype, TRUE);
2183
+ for (nkey = 1; nkey <= map_items; nkey++) {
2184
+ if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
2185
+ locker = &map->lockers[nkey];
2186
+ locker->state = LOCKER_IS_FREE;
2187
+ data_tx = (locker->pkttype == PKTTYPE_DATA_TX);
2188
+ if (data_tx) {
2189
+ OSL_ATOMIC_DEC(dhd->osh, &dhd->prot->active_tx_count);
14422190 }
2191
+
2192
+#ifdef DHD_PKTID_AUDIT_RING
2193
+ DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
2194
+#endif /* DHD_PKTID_AUDIT_RING */
2195
+#ifdef DHD_MAP_PKTID_LOGGING
2196
+ DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap,
2197
+ locker->pa, nkey, locker->len,
2198
+ locker->pkttype);
2199
+#endif /* DHD_MAP_PKTID_LOGGING */
2200
+
2201
+ {
2202
+ if (SECURE_DMA_ENAB(dhd->osh))
2203
+ SECURE_DMA_UNMAP(osh, locker->pa,
2204
+ locker->len, locker->dir, 0,
2205
+ locker->dmah, locker->secdma, 0);
2206
+ else
2207
+ DMA_UNMAP(osh, locker->pa, locker->len,
2208
+ locker->dir, 0, locker->dmah);
2209
+ }
2210
+ dhd_prot_packet_free(dhd, (ulong*)locker->pkt,
2211
+ locker->pkttype, data_tx);
14432212 }
1444
-#if defined(DHD_PKTID_AUDIT_ENABLED)
14452213 else {
2214
+#ifdef DHD_PKTID_AUDIT_RING
14462215 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
2216
+#endif /* DHD_PKTID_AUDIT_RING */
14472217 }
1448
-#endif /* DHD_PKTID_AUDIT_ENABLED */
1449
-
1450
- locker->pkt = NULL; /* clear saved pkt */
1451
- locker->len = 0;
2218
+ map->keys[nkey] = nkey; /* populate with unique keys */
14522219 }
14532220
1454
-#if defined(DHD_PKTID_AUDIT_ENABLED)
1455
- if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
1456
- bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
1457
- map->pktid_audit = (struct bcm_mwbmap *)NULL;
1458
- if (map->pktid_audit_lock) {
1459
- DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
1460
- }
1461
- }
1462
-#endif /* DHD_PKTID_AUDIT_ENABLED */
1463
-
2221
+ map->avail = map_items;
2222
+ memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
14642223 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1465
- DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
1466
-
1467
-#ifdef DHD_USE_STATIC_PKTIDMAP
1468
- DHD_OS_PREFREE(dhd, handle, dhd_pktid_map_sz);
1469
-#else
1470
- MFREE(osh, handle, dhd_pktid_map_sz);
1471
-#endif /* DHD_USE_STATIC_PKTIDMAP */
14722224 }
14732225
14742226 #ifdef IOCTLRESP_USE_CONSTMEM
14752227 /** Called in detach scenario. Releasing IOCTL buffers. */
14762228 static void
1477
-dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2229
+dhd_pktid_map_reset_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
14782230 {
14792231 uint32 nkey;
14802232 dhd_pktid_map_t *map;
1481
- uint32 dhd_pktid_map_sz;
14822233 dhd_pktid_item_t *locker;
14832234 uint32 map_items;
1484
- uint32 flags;
1485
- osl_t *osh = dhd->osh;
1486
-
1487
- if (handle == NULL) {
1488
- return;
1489
- }
2235
+ unsigned long flags;
14902236
14912237 map = (dhd_pktid_map_t *)handle;
1492
- flags = DHD_PKTID_LOCK(map->pktid_lock);
1493
-
1494
- dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
1495
-
1496
- nkey = 1; /* skip reserved KEY #0, and start from 1 */
1497
- locker = &map->lockers[nkey];
2238
+ DHD_PKTID_LOCK(map->pktid_lock, flags);
14982239
14992240 map_items = DHD_PKIDMAP_ITEMS(map->items);
2241
+ /* skip reserved KEY #0, and start from 1 */
2242
+ for (nkey = 1; nkey <= map_items; nkey++) {
2243
+ if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
2244
+ dhd_dma_buf_t retbuf;
15002245
1501
- for (; nkey <= map_items; nkey++, locker++) {
1502
-
1503
- if (locker->state == LOCKER_IS_BUSY) { /* numbered key still in use */
1504
-
1505
- locker->state = LOCKER_IS_FREE; /* force open the locker */
1506
-
1507
-#if defined(DHD_PKTID_AUDIT_ENABLED)
2246
+#ifdef DHD_PKTID_AUDIT_RING
15082247 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
1509
-#endif /* DHD_PKTID_AUDIT_ENABLED */
2248
+#endif /* DHD_PKTID_AUDIT_RING */
15102249
1511
- {
1512
- dhd_dma_buf_t retbuf;
1513
- retbuf.va = locker->pkt;
1514
- retbuf.len = locker->len;
1515
- retbuf.pa = locker->pa;
1516
- retbuf.dmah = locker->dmah;
1517
- retbuf.secdma = locker->secdma;
2250
+ locker = &map->lockers[nkey];
2251
+ retbuf.va = locker->pkt;
2252
+ retbuf.len = locker->len;
2253
+ retbuf.pa = locker->pa;
2254
+ retbuf.dmah = locker->dmah;
2255
+ retbuf.secdma = locker->secdma;
15182256
1519
- /* This could be a callback registered with dhd_pktid_map */
1520
- DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1521
- free_ioctl_return_buffer(dhd, &retbuf);
1522
- flags = DHD_PKTID_LOCK(map->pktid_lock);
1523
- }
2257
+ free_ioctl_return_buffer(dhd, &retbuf);
15242258 }
1525
-#if defined(DHD_PKTID_AUDIT_ENABLED)
15262259 else {
2260
+#ifdef DHD_PKTID_AUDIT_RING
15272261 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
2262
+#endif /* DHD_PKTID_AUDIT_RING */
15282263 }
1529
-#endif /* DHD_PKTID_AUDIT_ENABLED */
1530
-
1531
- locker->pkt = NULL; /* clear saved pkt */
1532
- locker->len = 0;
2264
+ map->keys[nkey] = nkey; /* populate with unique keys */
15332265 }
2266
+
2267
+ map->avail = map_items;
2268
+ memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
2269
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2270
+}
2271
+#endif /* IOCTLRESP_USE_CONSTMEM */
2272
+
2273
+/**
2274
+ * Free the pktid map.
2275
+ */
2276
+static void
2277
+dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2278
+{
2279
+ dhd_pktid_map_t *map;
2280
+ uint32 dhd_pktid_map_sz;
2281
+ uint32 map_keys_sz;
2282
+
2283
+ if (handle == NULL)
2284
+ return;
2285
+
2286
+ /* Free any pending packets */
2287
+ dhd_pktid_map_reset(dhd, handle);
2288
+
2289
+ map = (dhd_pktid_map_t *)handle;
2290
+ dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
2291
+ map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
2292
+
2293
+ DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
15342294
15352295 #if defined(DHD_PKTID_AUDIT_ENABLED)
15362296 if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
1537
- bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
2297
+ bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
15382298 map->pktid_audit = (struct bcm_mwbmap *)NULL;
15392299 if (map->pktid_audit_lock) {
1540
- DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
2300
+ DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
2301
+ }
2302
+ }
2303
+#endif /* DHD_PKTID_AUDIT_ENABLED */
2304
+ MFREE(dhd->osh, map->keys, map_keys_sz);
2305
+ VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
2306
+}
2307
+#ifdef IOCTLRESP_USE_CONSTMEM
2308
+static void
2309
+dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2310
+{
2311
+ dhd_pktid_map_t *map;
2312
+ uint32 dhd_pktid_map_sz;
2313
+ uint32 map_keys_sz;
2314
+
2315
+ if (handle == NULL)
2316
+ return;
2317
+
2318
+ /* Free any pending packets */
2319
+ dhd_pktid_map_reset_ioctl(dhd, handle);
2320
+
2321
+ map = (dhd_pktid_map_t *)handle;
2322
+ dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
2323
+ map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
2324
+
2325
+ DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
2326
+
2327
+#if defined(DHD_PKTID_AUDIT_ENABLED)
2328
+ if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
2329
+ bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
2330
+ map->pktid_audit = (struct bcm_mwbmap *)NULL;
2331
+ if (map->pktid_audit_lock) {
2332
+ DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
15412333 }
15422334 }
15432335 #endif /* DHD_PKTID_AUDIT_ENABLED */
15442336
1545
- DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1546
- DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
1547
-
1548
-#ifdef DHD_USE_STATIC_PKTIDMAP
1549
- DHD_OS_PREFREE(dhd, handle, dhd_pktid_map_sz);
1550
-#else
1551
- MFREE(osh, handle, dhd_pktid_map_sz);
1552
-#endif /* DHD_USE_STATIC_PKTIDMAP */
2337
+ MFREE(dhd->osh, map->keys, map_keys_sz);
2338
+ VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
15532339 }
15542340 #endif /* IOCTLRESP_USE_CONSTMEM */
15552341
....@@ -1558,13 +2344,13 @@
15582344 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle)
15592345 {
15602346 dhd_pktid_map_t *map;
1561
- uint32 flags;
15622347 uint32 avail;
2348
+ unsigned long flags;
15632349
15642350 ASSERT(handle != NULL);
15652351 map = (dhd_pktid_map_t *)handle;
15662352
1567
- flags = DHD_PKTID_LOCK(map->pktid_lock);
2353
+ DHD_PKTID_LOCK(map->pktid_lock, flags);
15682354 avail = map->avail;
15692355 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
15702356
....@@ -1572,88 +2358,97 @@
15722358 }
15732359
15742360 /**
1575
- * Allocate locker, save pkt contents, and return the locker's numbered key.
1576
- * dhd_pktid_map_alloc() is not reentrant, and is the caller's responsibility.
1577
- * Caller must treat a returned value DHD_PKTID_INVALID as a failure case,
1578
- * implying a depleted pool of pktids.
2361
+ * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not
2362
+ * yet populated. Invoke the pktid save api to populate the packet parameters
2363
+ * into the locker. This function is not reentrant, and is the caller's
2364
+ * responsibility. Caller must treat a returned value DHD_PKTID_INVALID as
2365
+ * a failure case, implying a depleted pool of pktids.
15792366 */
1580
-
15812367 static INLINE uint32
1582
-__dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt)
2368
+dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
2369
+ void *pkt, dhd_pkttype_t pkttype)
15832370 {
15842371 uint32 nkey;
15852372 dhd_pktid_map_t *map;
15862373 dhd_pktid_item_t *locker;
2374
+ unsigned long flags;
15872375
15882376 ASSERT(handle != NULL);
15892377 map = (dhd_pktid_map_t *)handle;
15902378
1591
- if (map->avail <= 0) { /* no more pktids to allocate */
2379
+ DHD_PKTID_LOCK(map->pktid_lock, flags);
2380
+
2381
+ if ((int)(map->avail) <= 0) { /* no more pktids to allocate */
15922382 map->failures++;
15932383 DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__));
2384
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
15942385 return DHD_PKTID_INVALID; /* failed alloc request */
15952386 }
15962387
15972388 ASSERT(map->avail <= map->items);
15982389 nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */
2390
+
2391
+ if ((map->avail > map->items) || (nkey > map->items)) {
2392
+ map->failures++;
2393
+ DHD_ERROR(("%s:%d: failed to allocate a new pktid,"
2394
+ " map->avail<%u>, nkey<%u>, pkttype<%u>\n",
2395
+ __FUNCTION__, __LINE__, map->avail, nkey,
2396
+ pkttype));
2397
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2398
+ return DHD_PKTID_INVALID; /* failed alloc request */
2399
+ }
2400
+
15992401 locker = &map->lockers[nkey]; /* save packet metadata in locker */
16002402 map->avail--;
16012403 locker->pkt = pkt; /* pkt is saved, other params not yet saved. */
16022404 locker->len = 0;
16032405 locker->state = LOCKER_IS_BUSY; /* reserve this locker */
16042406
1605
-#if defined(DHD_PKTID_AUDIT_MAP)
1606
- DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_ALLOC); /* Audit duplicate alloc */
1607
-#endif /* DHD_PKTID_AUDIT_MAP */
2407
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
16082408
16092409 ASSERT(nkey != DHD_PKTID_INVALID);
2410
+
16102411 return nkey; /* return locker's numbered key */
16112412 }
16122413
1613
-
1614
-/**
1615
- * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not
1616
- * yet populated. Invoke the pktid save api to populate the packet parameters
1617
- * into the locker.
1618
- * Wrapper that takes the required lock when called directly.
2414
+/*
2415
+ * dhd_pktid_map_save - Save a packet's parameters into a locker
2416
+ * corresponding to a previously reserved unique numbered key.
16192417 */
1620
-static INLINE uint32
1621
-dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt)
1622
-{
1623
- dhd_pktid_map_t *map;
1624
- uint32 flags;
1625
- uint32 ret;
1626
-
1627
- ASSERT(handle != NULL);
1628
- map = (dhd_pktid_map_t *)handle;
1629
- flags = DHD_PKTID_LOCK(map->pktid_lock);
1630
- ret = __dhd_pktid_map_reserve(dhd, handle, pkt);
1631
- DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1632
-
1633
- return ret;
1634
-}
1635
-
16362418 static INLINE void
1637
-__dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
2419
+dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
16382420 uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
16392421 dhd_pkttype_t pkttype)
16402422 {
16412423 dhd_pktid_map_t *map;
16422424 dhd_pktid_item_t *locker;
2425
+ unsigned long flags;
16432426
16442427 ASSERT(handle != NULL);
16452428 map = (dhd_pktid_map_t *)handle;
16462429
1647
- ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= DHD_PKIDMAP_ITEMS(map->items)));
2430
+ DHD_PKTID_LOCK(map->pktid_lock, flags);
2431
+
2432
+ if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
2433
+ DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u> pkttype<%u>\n",
2434
+ __FUNCTION__, __LINE__, nkey, pkttype));
2435
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2436
+#ifdef DHD_FW_COREDUMP
2437
+ if (dhd->memdump_enabled) {
2438
+ /* collect core dump */
2439
+ dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2440
+ dhd_bus_mem_dump(dhd);
2441
+ }
2442
+#else
2443
+ ASSERT(0);
2444
+#endif /* DHD_FW_COREDUMP */
2445
+ return;
2446
+ }
16482447
16492448 locker = &map->lockers[nkey];
16502449
16512450 ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) ||
16522451 ((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL)));
1653
-
1654
-#if defined(DHD_PKTID_AUDIT_MAP)
1655
- DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_ALLOC); /* apriori, reservation */
1656
-#endif /* DHD_PKTID_AUDIT_MAP */
16572452
16582453 /* store contents in locker */
16592454 locker->dir = dir;
....@@ -1664,26 +2459,9 @@
16642459 locker->pkttype = pkttype;
16652460 locker->pkt = pkt;
16662461 locker->state = LOCKER_IS_BUSY; /* make this locker busy */
1667
-}
1668
-
1669
-/**
1670
- * dhd_pktid_map_save - Save a packet's parameters into a locker corresponding
1671
- * to a previously reserved unique numbered key.
1672
- * Wrapper that takes the required lock when called directly.
1673
- */
1674
-static INLINE void
1675
-dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
1676
- uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
1677
- dhd_pkttype_t pkttype)
1678
-{
1679
- dhd_pktid_map_t *map;
1680
- uint32 flags;
1681
-
1682
- ASSERT(handle != NULL);
1683
- map = (dhd_pktid_map_t *)handle;
1684
- flags = DHD_PKTID_LOCK(map->pktid_lock);
1685
- __dhd_pktid_map_save(dhd, handle, pkt, nkey, pa, len,
1686
- dir, dmah, secdma, pkttype);
2462
+#ifdef DHD_MAP_PKTID_LOGGING
2463
+ DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_map, pa, nkey, len, pkttype);
2464
+#endif /* DHD_MAP_PKTID_LOGGING */
16872465 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
16882466 }
16892467
....@@ -1697,30 +2475,12 @@
16972475 dhd_pkttype_t pkttype)
16982476 {
16992477 uint32 nkey;
1700
- uint32 flags;
1701
- dhd_pktid_map_t *map;
17022478
1703
- ASSERT(handle != NULL);
1704
- map = (dhd_pktid_map_t *)handle;
1705
-
1706
- flags = DHD_PKTID_LOCK(map->pktid_lock);
1707
-
1708
- nkey = __dhd_pktid_map_reserve(dhd, handle, pkt);
2479
+ nkey = dhd_pktid_map_reserve(dhd, handle, pkt, pkttype);
17092480 if (nkey != DHD_PKTID_INVALID) {
1710
- __dhd_pktid_map_save(dhd, handle, pkt, nkey, pa,
2481
+ dhd_pktid_map_save(dhd, handle, pkt, nkey, pa,
17112482 len, dir, dmah, secdma, pkttype);
1712
-#if defined(DHD_PKTID_AUDIT_MAP)
1713
- DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_ALLOC); /* apriori, reservation */
1714
-#endif /* DHD_PKTID_AUDIT_MAP */
17152483 }
1716
-
1717
-#ifdef CUSTOMER_HW_31_2
1718
- /* Need to do the flush at buffer allocation time */
1719
- DHD_TRACE(("%s: flush buffer 0x%x len %d\n", __FUNCTION__,
1720
- PKTDATA(dhd->osh, pkt), PKTLEN(dhd->osh, pkt)));
1721
- OSL_CACHE_FLUSH(PKTDATA(dhd->osh, pkt), PKTLEN(dhd->osh, pkt));
1722
-#endif
1723
- DHD_PKTID_UNLOCK(map->pktid_lock, flags);
17242484
17252485 return nkey;
17262486 }
....@@ -1733,21 +2493,36 @@
17332493 */
17342494 static void * BCMFASTPATH
17352495 dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey,
1736
- dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma,
1737
- dhd_pkttype_t pkttype, bool rsv_locker)
2496
+ dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma, dhd_pkttype_t pkttype,
2497
+ bool rsv_locker)
17382498 {
17392499 dhd_pktid_map_t *map;
17402500 dhd_pktid_item_t *locker;
17412501 void * pkt;
1742
- uint32 flags;
2502
+ unsigned long long locker_addr;
2503
+ unsigned long flags;
17432504
17442505 ASSERT(handle != NULL);
17452506
17462507 map = (dhd_pktid_map_t *)handle;
17472508
1748
- flags = DHD_PKTID_LOCK(map->pktid_lock);
2509
+ DHD_PKTID_LOCK(map->pktid_lock, flags);
17492510
1750
- ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= DHD_PKIDMAP_ITEMS(map->items)));
2511
+ if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
2512
+ DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>, pkttype<%d>\n",
2513
+ __FUNCTION__, __LINE__, nkey, pkttype));
2514
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2515
+#ifdef DHD_FW_COREDUMP
2516
+ if (dhd->memdump_enabled) {
2517
+ /* collect core dump */
2518
+ dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2519
+ dhd_bus_mem_dump(dhd);
2520
+ }
2521
+#else
2522
+ ASSERT(0);
2523
+#endif /* DHD_FW_COREDUMP */
2524
+ return NULL;
2525
+ }
17512526
17522527 locker = &map->lockers[nkey];
17532528
....@@ -1755,12 +2530,20 @@
17552530 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */
17562531 #endif /* DHD_PKTID_AUDIT_MAP */
17572532
1758
- if (locker->state == LOCKER_IS_FREE) { /* Debug check for cloned numbered key */
1759
- DHD_ERROR(("%s:%d: Error! freeing invalid pktid<%u>\n",
1760
- __FUNCTION__, __LINE__, nkey));
1761
- ASSERT(locker->state != LOCKER_IS_FREE);
1762
-
2533
+ /* Debug check for cloned numbered key */
2534
+ if (locker->state == LOCKER_IS_FREE) {
2535
+ DHD_ERROR(("%s:%d: Error! freeing already freed invalid pktid<%u>\n",
2536
+ __FUNCTION__, __LINE__, nkey));
17632537 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2538
+#ifdef DHD_FW_COREDUMP
2539
+ if (dhd->memdump_enabled) {
2540
+ /* collect core dump */
2541
+ dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2542
+ dhd_bus_mem_dump(dhd);
2543
+ }
2544
+#else
2545
+ ASSERT(0);
2546
+#endif /* DHD_FW_COREDUMP */
17642547 return NULL;
17652548 }
17662549
....@@ -1770,12 +2553,27 @@
17702553 */
17712554 if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) {
17722555
1773
- DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1774
-
17752556 DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n",
17762557 __FUNCTION__, __LINE__, nkey));
1777
- ASSERT(locker->pkttype == pkttype);
1778
-
2558
+#ifdef BCMDMA64OSL
2559
+ PHYSADDRTOULONG(locker->pa, locker_addr);
2560
+#else
2561
+ locker_addr = PHYSADDRLO(locker->pa);
2562
+#endif /* BCMDMA64OSL */
2563
+ DHD_ERROR(("%s:%d: locker->state <%d>, locker->pkttype <%d>,"
2564
+ "pkttype <%d> locker->pa <0x%llx> \n",
2565
+ __FUNCTION__, __LINE__, locker->state, locker->pkttype,
2566
+ pkttype, locker_addr));
2567
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2568
+#ifdef DHD_FW_COREDUMP
2569
+ if (dhd->memdump_enabled) {
2570
+ /* collect core dump */
2571
+ dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2572
+ dhd_bus_mem_dump(dhd);
2573
+ }
2574
+#else
2575
+ ASSERT(0);
2576
+#endif /* DHD_FW_COREDUMP */
17792577 return NULL;
17802578 }
17812579
....@@ -1791,6 +2589,10 @@
17912589 #if defined(DHD_PKTID_AUDIT_MAP)
17922590 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
17932591 #endif /* DHD_PKTID_AUDIT_MAP */
2592
+#ifdef DHD_MAP_PKTID_LOGGING
2593
+ DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap, locker->pa, nkey,
2594
+ (uint32)locker->len, pkttype);
2595
+#endif /* DHD_MAP_PKTID_LOGGING */
17942596
17952597 *pa = locker->pa; /* return contents of locker */
17962598 *len = (uint32)locker->len;
....@@ -1801,17 +2603,12 @@
18012603 locker->pkt = NULL; /* Clear pkt */
18022604 locker->len = 0;
18032605
1804
-#ifdef CUSTOMER_HW_31_2
1805
- /* need to do to ensure all packet are flushed */
1806
- OSL_CACHE_INV(PKTDATA(dhd->osh, pkt), PKTLEN(dhd->osh, pkt));
1807
-#endif
1808
-
18092606 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2607
+
18102608 return pkt;
18112609 }
18122610
18132611 #else /* ! DHD_PCIE_PKTID */
1814
-
18152612
18162613 typedef struct pktlist {
18172614 PKT_LIST *tx_pkt_list; /* list for tx packets */
....@@ -1831,7 +2628,6 @@
18312628 #define DHD_PKTID32(pktptr32) ((uint32)(pktptr32))
18322629 #define DHD_PKTPTR32(pktid32) ((void *)(pktid32))
18332630
1834
-
18352631 static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
18362632 dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
18372633 dhd_pkttype_t pkttype);
....@@ -1840,7 +2636,7 @@
18402636 dhd_pkttype_t pkttype);
18412637
18422638 static dhd_pktid_map_handle_t *
1843
-dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index)
2639
+dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
18442640 {
18452641 osl_t *osh = dhd->osh;
18462642 pktlists_t *handle = NULL;
....@@ -1897,15 +2693,9 @@
18972693 }
18982694
18992695 static void
1900
-dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map)
2696
+dhd_pktid_map_reset(dhd_pub_t *dhd, pktlists_t *handle)
19012697 {
19022698 osl_t *osh = dhd->osh;
1903
- pktlists_t *handle = (pktlists_t *) map;
1904
-
1905
- ASSERT(handle != NULL);
1906
- if (handle == (pktlists_t *)NULL) {
1907
- return;
1908
- }
19092699
19102700 if (handle->ctrl_pkt_list) {
19112701 PKTLIST_FINI(handle->ctrl_pkt_list);
....@@ -1921,6 +2711,20 @@
19212711 PKTLIST_FINI(handle->tx_pkt_list);
19222712 MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
19232713 }
2714
+}
2715
+
2716
+static void
2717
+dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map)
2718
+{
2719
+ osl_t *osh = dhd->osh;
2720
+ pktlists_t *handle = (pktlists_t *) map;
2721
+
2722
+ ASSERT(handle != NULL);
2723
+ if (handle == (pktlists_t *)NULL) {
2724
+ return;
2725
+ }
2726
+
2727
+ dhd_pktid_map_reset(dhd, handle);
19242728
19252729 if (handle) {
19262730 MFREE(osh, handle, sizeof(pktlists_t));
....@@ -1978,7 +2782,7 @@
19782782 return pktptr32;
19792783 }
19802784
1981
-#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt) DHD_PKTID32(pkt)
2785
+#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype) DHD_PKTID32(pkt)
19822786
19832787 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \
19842788 ({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \
....@@ -2004,7 +2808,6 @@
20042808 #endif /* ! DHD_PCIE_PKTID */
20052809
20062810 /* +------------------ End of PCIE DHD PKTID MAPPER -----------------------+ */
2007
-
20082811
20092812 /**
20102813 * The PCIE FD protocol layer is constructed in two phases:
....@@ -2033,6 +2836,11 @@
20332836 osl_t *osh = dhd->osh;
20342837 dhd_prot_t *prot;
20352838
2839
+ /* FW going to DMA extended trap data,
2840
+ * allocate buffer for the maximum extended trap data.
2841
+ */
2842
+ uint32 trap_buf_len = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
2843
+
20362844 /* Allocate prot structure */
20372845 if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT,
20382846 sizeof(dhd_prot_t)))) {
....@@ -2047,6 +2855,12 @@
20472855 /* DMAing ring completes supported? FALSE by default */
20482856 dhd->dma_d2h_ring_upd_support = FALSE;
20492857 dhd->dma_h2d_ring_upd_support = FALSE;
2858
+ dhd->dma_ring_upd_overwrite = FALSE;
2859
+
2860
+ dhd->hwa_inited = 0;
2861
+ dhd->idma_inited = 0;
2862
+ dhd->ifrm_inited = 0;
2863
+ dhd->dar_inited = 0;
20502864
20512865 /* Common Ring Allocations */
20522866
....@@ -2112,6 +2926,12 @@
21122926 goto fail;
21132927 }
21142928
2929
+ /* Host TS request buffer one buffer for now */
2930
+ if (dhd_dma_buf_alloc(dhd, &prot->hostts_req_buf, CTRLSUB_HOSTTS_MEESAGE_SIZE)) {
2931
+ goto fail;
2932
+ }
2933
+ prot->hostts_req_buf_inuse = FALSE;
2934
+
21152935 /* Scratch buffer for dma rx offset */
21162936 #ifdef BCM_HOST_BUF
21172937 if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf,
....@@ -2120,6 +2940,7 @@
21202940 if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN)) {
21212941
21222942 #endif /* BCM_HOST_BUF */
2943
+
21232944 goto fail;
21242945 }
21252946
....@@ -2130,50 +2951,245 @@
21302951
21312952 #ifdef DHD_RX_CHAINING
21322953 dhd_rxchain_reset(&prot->rxchain);
2133
-#endif
2954
+#endif // endif
21342955
2135
-#if defined(DHD_LB)
2956
+ prot->pktid_ctrl_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_CTRL_PKTID);
2957
+ if (prot->pktid_ctrl_map == NULL) {
2958
+ goto fail;
2959
+ }
2960
+
2961
+ prot->pktid_rx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_RX_PKTID);
2962
+ if (prot->pktid_rx_map == NULL)
2963
+ goto fail;
2964
+
2965
+ prot->pktid_tx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_TX_PKTID);
2966
+ if (prot->pktid_tx_map == NULL)
2967
+ goto fail;
2968
+
2969
+#ifdef IOCTLRESP_USE_CONSTMEM
2970
+ prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd,
2971
+ DHD_FLOWRING_MAX_IOCTLRESPBUF_POST);
2972
+ if (prot->pktid_map_handle_ioctl == NULL) {
2973
+ goto fail;
2974
+ }
2975
+#endif /* IOCTLRESP_USE_CONSTMEM */
2976
+
2977
+#ifdef DHD_MAP_PKTID_LOGGING
2978
+ prot->pktid_dma_map = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
2979
+ if (prot->pktid_dma_map == NULL) {
2980
+ DHD_ERROR(("%s: failed to allocate pktid_dma_map\n",
2981
+ __FUNCTION__));
2982
+ }
2983
+
2984
+ prot->pktid_dma_unmap = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
2985
+ if (prot->pktid_dma_unmap == NULL) {
2986
+ DHD_ERROR(("%s: failed to allocate pktid_dma_unmap\n",
2987
+ __FUNCTION__));
2988
+ }
2989
+#endif /* DHD_MAP_PKTID_LOGGING */
21362990
21372991 /* Initialize the work queues to be used by the Load Balancing logic */
21382992 #if defined(DHD_LB_TXC)
21392993 {
21402994 void *buffer;
21412995 buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
2996
+ if (buffer == NULL) {
2997
+ DHD_ERROR(("%s: failed to allocate RXC work buffer\n", __FUNCTION__));
2998
+ goto fail;
2999
+ }
21423000 bcm_workq_init(&prot->tx_compl_prod, &prot->tx_compl_cons,
21433001 buffer, DHD_LB_WORKQ_SZ);
21443002 prot->tx_compl_prod_sync = 0;
21453003 DHD_INFO(("%s: created tx_compl_workq <%p,%d>\n",
21463004 __FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
2147
- }
3005
+ }
21483006 #endif /* DHD_LB_TXC */
21493007
21503008 #if defined(DHD_LB_RXC)
2151
- {
3009
+ {
21523010 void *buffer;
2153
- buffer = MALLOC(dhd->osh, sizeof(uint32) * DHD_LB_WORKQ_SZ);
3011
+ buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
3012
+ if (buffer == NULL) {
3013
+ DHD_ERROR(("%s: failed to allocate RXC work buffer\n", __FUNCTION__));
3014
+ goto fail;
3015
+ }
21543016 bcm_workq_init(&prot->rx_compl_prod, &prot->rx_compl_cons,
21553017 buffer, DHD_LB_WORKQ_SZ);
21563018 prot->rx_compl_prod_sync = 0;
21573019 DHD_INFO(("%s: created rx_compl_workq <%p,%d>\n",
21583020 __FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
2159
- }
3021
+ }
21603022 #endif /* DHD_LB_RXC */
21613023
2162
-#endif /* DHD_LB */
3024
+ /* Initialize trap buffer */
3025
+ if (dhd_dma_buf_alloc(dhd, &dhd->prot->fw_trap_buf, trap_buf_len)) {
3026
+ DHD_ERROR(("%s: dhd_init_trap_buffer falied\n", __FUNCTION__));
3027
+ goto fail;
3028
+ }
21633029
21643030 return BCME_OK;
21653031
21663032 fail:
21673033
2168
-#ifndef CONFIG_DHD_USE_STATIC_BUF
2169
- if (prot != NULL) {
3034
+ if (prot) {
3035
+ /* Free up all allocated memories */
21703036 dhd_prot_detach(dhd);
21713037 }
2172
-#endif /* CONFIG_DHD_USE_STATIC_BUF */
21733038
21743039 return BCME_NOMEM;
21753040 } /* dhd_prot_attach */
21763041
3042
+static int
3043
+dhd_alloc_host_scbs(dhd_pub_t *dhd)
3044
+{
3045
+ int ret = BCME_OK;
3046
+ sh_addr_t base_addr;
3047
+ dhd_prot_t *prot = dhd->prot;
3048
+ uint32 host_scb_size = 0;
3049
+
3050
+ if (dhd->hscb_enable) {
3051
+ /* read number of bytes to allocate from F/W */
3052
+ dhd_bus_cmn_readshared(dhd->bus, &host_scb_size, HOST_SCB_ADDR, 0);
3053
+ if (host_scb_size) {
3054
+ /* alloc array of host scbs */
3055
+ ret = dhd_dma_buf_alloc(dhd, &prot->host_scb_buf, host_scb_size);
3056
+ /* write host scb address to F/W */
3057
+ if (ret == BCME_OK) {
3058
+ dhd_base_addr_htolpa(&base_addr, prot->host_scb_buf.pa);
3059
+ dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3060
+ HOST_SCB_ADDR, 0);
3061
+ } else {
3062
+ DHD_TRACE(("dhd_alloc_host_scbs: dhd_dma_buf_alloc error\n"));
3063
+ }
3064
+ } else {
3065
+ DHD_TRACE(("dhd_alloc_host_scbs: host_scb_size is 0.\n"));
3066
+ }
3067
+ } else {
3068
+ DHD_TRACE(("dhd_alloc_host_scbs: Host scb not supported in F/W.\n"));
3069
+ }
3070
+
3071
+ return ret;
3072
+}
3073
+
3074
+void
3075
+dhd_set_host_cap(dhd_pub_t *dhd)
3076
+{
3077
+ uint32 data = 0;
3078
+ dhd_prot_t *prot = dhd->prot;
3079
+
3080
+ if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
3081
+ if (dhd->h2d_phase_supported) {
3082
+ data |= HOSTCAP_H2D_VALID_PHASE;
3083
+ if (dhd->force_dongletrap_on_bad_h2d_phase)
3084
+ data |= HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE;
3085
+ }
3086
+ if (prot->host_ipc_version > prot->device_ipc_version)
3087
+ prot->active_ipc_version = prot->device_ipc_version;
3088
+ else
3089
+ prot->active_ipc_version = prot->host_ipc_version;
3090
+
3091
+ data |= prot->active_ipc_version;
3092
+
3093
+ if (dhdpcie_bus_get_pcie_hostready_supported(dhd->bus)) {
3094
+ DHD_INFO(("Advertise Hostready Capability\n"));
3095
+ data |= HOSTCAP_H2D_ENABLE_HOSTRDY;
3096
+ }
3097
+ {
3098
+ /* Disable DS altogether */
3099
+ data |= HOSTCAP_DS_NO_OOB_DW;
3100
+ dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_NONE);
3101
+ }
3102
+
3103
+ /* Indicate support for extended trap data */
3104
+ data |= HOSTCAP_EXTENDED_TRAP_DATA;
3105
+
3106
+ /* Indicate support for TX status metadata */
3107
+ if (dhd->pcie_txs_metadata_enable != 0)
3108
+ data |= HOSTCAP_TXSTATUS_METADATA;
3109
+
3110
+ /* Enable fast delete ring in firmware if supported */
3111
+ if (dhd->fast_delete_ring_support) {
3112
+ data |= HOSTCAP_FAST_DELETE_RING;
3113
+ }
3114
+
3115
+ if (dhdpcie_bus_get_pcie_hwa_supported(dhd->bus)) {
3116
+ DHD_ERROR(("HWA inited\n"));
3117
+ /* TODO: Is hostcap needed? */
3118
+ dhd->hwa_inited = TRUE;
3119
+ }
3120
+
3121
+ if (dhdpcie_bus_get_pcie_idma_supported(dhd->bus)) {
3122
+ DHD_ERROR(("IDMA inited\n"));
3123
+ data |= HOSTCAP_H2D_IDMA;
3124
+ dhd->idma_inited = TRUE;
3125
+ }
3126
+
3127
+ if (dhdpcie_bus_get_pcie_ifrm_supported(dhd->bus)) {
3128
+ DHD_ERROR(("IFRM Inited\n"));
3129
+ data |= HOSTCAP_H2D_IFRM;
3130
+ dhd->ifrm_inited = TRUE;
3131
+ dhd->dma_h2d_ring_upd_support = FALSE;
3132
+ dhd_prot_dma_indx_free(dhd);
3133
+ }
3134
+
3135
+ if (dhdpcie_bus_get_pcie_dar_supported(dhd->bus)) {
3136
+ DHD_ERROR(("DAR doorbell Use\n"));
3137
+ data |= HOSTCAP_H2D_DAR;
3138
+ dhd->dar_inited = TRUE;
3139
+ }
3140
+
3141
+ data |= HOSTCAP_UR_FW_NO_TRAP;
3142
+
3143
+ if (dhd->hscb_enable) {
3144
+ data |= HOSTCAP_HSCB;
3145
+ }
3146
+
3147
+#ifdef EWP_EDL
3148
+ if (dhd->dongle_edl_support) {
3149
+ data |= HOSTCAP_EDL_RING;
3150
+ DHD_ERROR(("Enable EDL host cap\n"));
3151
+ } else {
3152
+ DHD_ERROR(("DO NOT SET EDL host cap\n"));
3153
+ }
3154
+#endif /* EWP_EDL */
3155
+
3156
+#ifdef DHD_HP2P
3157
+ if (dhd->hp2p_capable) {
3158
+ data |= HOSTCAP_PKT_TIMESTAMP;
3159
+ data |= HOSTCAP_PKT_HP2P;
3160
+ DHD_ERROR(("Enable HP2P in host cap\n"));
3161
+ } else {
3162
+ DHD_ERROR(("HP2P not enabled in host cap\n"));
3163
+ }
3164
+#endif // endif
3165
+
3166
+#ifdef DHD_DB0TS
3167
+ if (dhd->db0ts_capable) {
3168
+ data |= HOSTCAP_DB0_TIMESTAMP;
3169
+ DHD_ERROR(("Enable DB0 TS in host cap\n"));
3170
+ } else {
3171
+ DHD_ERROR(("DB0 TS not enabled in host cap\n"));
3172
+ }
3173
+#endif /* DHD_DB0TS */
3174
+ if (dhd->extdtxs_in_txcpl) {
3175
+ DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
3176
+ data |= HOSTCAP_PKT_TXSTATUS;
3177
+ }
3178
+ else {
3179
+ DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
3180
+ }
3181
+
3182
+ DHD_INFO(("%s:Active Ver:%d, Host Ver:%d, FW Ver:%d\n",
3183
+ __FUNCTION__,
3184
+ prot->active_ipc_version, prot->host_ipc_version,
3185
+ prot->device_ipc_version));
3186
+
3187
+ dhd_bus_cmn_writeshared(dhd->bus, &data, sizeof(uint32), HOST_API_VERSION, 0);
3188
+ dhd_bus_cmn_writeshared(dhd->bus, &prot->fw_trap_buf.pa,
3189
+ sizeof(prot->fw_trap_buf.pa), DNGL_TO_HOST_TRAP_ADDR, 0);
3190
+ }
3191
+
3192
+}
21773193
21783194 /**
21793195 * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has
....@@ -2187,43 +3203,24 @@
21873203 {
21883204 sh_addr_t base_addr;
21893205 dhd_prot_t *prot = dhd->prot;
3206
+ int ret = 0;
3207
+ uint32 idmacontrol;
3208
+ uint32 waitcount = 0;
21903209
2191
- /* PKTID handle INIT */
2192
- if (prot->pktid_map_handle != NULL) {
2193
- DHD_ERROR(("%s: pktid_map_handle already set!\n", __FUNCTION__));
2194
- ASSERT(0);
2195
- return BCME_ERROR;
2196
- }
3210
+#ifdef WL_MONITOR
3211
+ dhd->monitor_enable = FALSE;
3212
+#endif /* WL_MONITOR */
21973213
2198
-#ifdef IOCTLRESP_USE_CONSTMEM
2199
- if (prot->pktid_map_handle_ioctl != NULL) {
2200
- DHD_ERROR(("%s: pktid_map_handle_ioctl already set!\n", __FUNCTION__));
2201
- ASSERT(0);
2202
- return BCME_ERROR;
2203
- }
2204
-#endif /* IOCTLRESP_USE_CONSTMEM */
3214
+ /**
3215
+ * A user defined value can be assigned to global variable h2d_max_txpost via
3216
+ * 1. DHD IOVAR h2d_max_txpost, before firmware download
3217
+ * 2. module parameter h2d_max_txpost
3218
+ * prot->h2d_max_txpost is assigned with H2DRING_TXPOST_MAX_ITEM,
3219
+ * if user has not defined any buffers by one of the above methods.
3220
+ */
3221
+ prot->h2d_max_txpost = (uint16)h2d_max_txpost;
22053222
2206
- prot->pktid_map_handle = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_ITEMS, PKTID_MAP_HANDLE);
2207
- if (prot->pktid_map_handle == NULL) {
2208
- DHD_ERROR(("%s: Unable to map packet id's\n", __FUNCTION__));
2209
- ASSERT(0);
2210
- return BCME_NOMEM;
2211
- }
2212
-
2213
-#ifdef IOCTLRESP_USE_CONSTMEM
2214
- prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd,
2215
- DHD_FLOWRING_MAX_IOCTLRESPBUF_POST, PKTID_MAP_HANDLE_IOCTL);
2216
- if (prot->pktid_map_handle_ioctl == NULL) {
2217
- DHD_ERROR(("%s: Unable to map ioctl response buffers\n", __FUNCTION__));
2218
- ASSERT(0);
2219
- return BCME_NOMEM;
2220
- }
2221
-#endif /* IOCTLRESP_USE_CONSTMEM */
2222
-
2223
- /* Max pkts in ring */
2224
- prot->max_tx_count = H2DRING_TXPOST_MAX_ITEM;
2225
-
2226
- DHD_INFO(("%s:%d: MAX_TX_COUNT = %d\n", __FUNCTION__, __LINE__, prot->max_tx_count));
3223
+ DHD_ERROR(("%s:%d: h2d_max_txpost = %d\n", __FUNCTION__, __LINE__, prot->h2d_max_txpost));
22273224
22283225 /* Read max rx packets supported by dongle */
22293226 dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0);
....@@ -2232,21 +3229,24 @@
22323229 /* using the latest shared structure template */
22333230 prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
22343231 }
2235
- DHD_INFO(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
3232
+ DHD_ERROR(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
22363233
22373234 /* Initialize. bzero() would blow away the dma pointers. */
22383235 prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST;
22393236 prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST;
3237
+ prot->max_infobufpost = DHD_H2D_INFORING_MAX_BUF_POST;
3238
+ prot->max_tsbufpost = DHD_MAX_TSBUF_POST;
22403239
22413240 prot->cur_ioctlresp_bufs_posted = 0;
2242
- prot->active_tx_count = 0;
3241
+ OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
22433242 prot->data_seq_no = 0;
22443243 prot->ioctl_seq_no = 0;
22453244 prot->rxbufpost = 0;
22463245 prot->cur_event_bufs_posted = 0;
22473246 prot->ioctl_state = 0;
22483247 prot->curr_ioctl_cmd = 0;
2249
- prot->ioctl_received = IOCTL_WAIT;
3248
+ prot->cur_ts_bufs_posted = 0;
3249
+ prot->infobufpost = 0;
22503250
22513251 prot->dmaxfer.srcmem.va = NULL;
22523252 prot->dmaxfer.dstmem.va = NULL;
....@@ -2257,23 +3257,50 @@
22573257 prot->tx_metadata_offset = 0;
22583258 prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT;
22593259
2260
- prot->ioctl_trans_id = 0;
3260
+ /* To catch any rollover issues fast, starting with higher ioctl_trans_id */
3261
+ prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
3262
+ prot->ioctl_state = 0;
3263
+ prot->ioctl_status = 0;
3264
+ prot->ioctl_resplen = 0;
3265
+ prot->ioctl_received = IOCTL_WAIT;
3266
+
3267
+ /* Initialize Common MsgBuf Rings */
3268
+
3269
+ prot->device_ipc_version = dhd->bus->api.fw_rev;
3270
+ prot->host_ipc_version = PCIE_SHARED_VERSION;
3271
+ prot->no_tx_resource = FALSE;
3272
+
3273
+ /* Init the host API version */
3274
+ dhd_set_host_cap(dhd);
3275
+
3276
+ /* alloc and configure scb host address for dongle */
3277
+ if ((ret = dhd_alloc_host_scbs(dhd))) {
3278
+ return ret;
3279
+ }
22613280
22623281 /* Register the interrupt function upfront */
22633282 /* remove corerev checks in data path */
3283
+ /* do this after host/fw negotiation for DAR */
22643284 prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
3285
+ prot->mb_2_ring_fn = dhd_bus_get_mbintr_2_fn(dhd->bus);
22653286
2266
- /* Initialize Common MsgBuf Rings */
3287
+ dhd->bus->_dar_war = (dhd->bus->sih->buscorerev < 64) ? TRUE : FALSE;
22673288
22683289 dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn);
22693290 dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn);
22703291 dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln);
3292
+
3293
+ /* Make it compatibile with pre-rev7 Firmware */
3294
+ if (prot->active_ipc_version < PCIE_SHARED_VERSION_7) {
3295
+ prot->d2hring_tx_cpln.item_len =
3296
+ D2HRING_TXCMPLT_ITEMSIZE_PREREV7;
3297
+ prot->d2hring_rx_cpln.item_len =
3298
+ D2HRING_RXCMPLT_ITEMSIZE_PREREV7;
3299
+ }
22713300 dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln);
22723301 dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln);
22733302
2274
-#if defined(PCIE_D2H_SYNC)
22753303 dhd_prot_d2h_sync_init(dhd);
2276
-#endif /* PCIE_D2H_SYNC */
22773304
22783305 dhd_prot_h2d_sync_init(dhd);
22793306
....@@ -2287,7 +3314,7 @@
22873314 /* If supported by the host, indicate the memory block
22883315 * for completion writes / submission reads to shared space
22893316 */
2290
- if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
3317
+ if (dhd->dma_d2h_ring_upd_support) {
22913318 dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa);
22923319 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
22933320 D2H_DMA_INDX_WR_BUF, 0);
....@@ -2296,7 +3323,7 @@
22963323 H2D_DMA_INDX_RD_BUF, 0);
22973324 }
22983325
2299
- if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
3326
+ if (dhd->dma_h2d_ring_upd_support || IDMA_ENAB(dhd)) {
23003327 dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa);
23013328 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
23023329 H2D_DMA_INDX_WR_BUF, 0);
....@@ -2304,6 +3331,9 @@
23043331 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
23053332 D2H_DMA_INDX_RD_BUF, 0);
23063333 }
3334
+ /* Signal to the dongle that common ring init is complete */
3335
+ if (dhd->hostrdy_after_init)
3336
+ dhd_bus_hostready(dhd->bus);
23073337
23083338 /*
23093339 * If the DMA-able buffers for flowring needs to come from a specific
....@@ -2317,26 +3347,123 @@
23173347 return BCME_ERROR;
23183348 }
23193349
3350
+ /* If IFRM is enabled, wait for FW to setup the DMA channel */
3351
+ if (IFRM_ENAB(dhd)) {
3352
+ dhd_base_addr_htolpa(&base_addr, prot->h2d_ifrm_indx_wr_buf.pa);
3353
+ dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3354
+ H2D_IFRM_INDX_WR_BUF, 0);
3355
+ }
3356
+
3357
+ /* If IDMA is enabled and initied, wait for FW to setup the IDMA descriptors
3358
+ * Waiting just before configuring doorbell
3359
+ */
3360
+#ifdef BCMQT
3361
+#define IDMA_ENABLE_WAIT 100
3362
+#else
3363
+#define IDMA_ENABLE_WAIT 10
3364
+#endif // endif
3365
+ if (IDMA_ACTIVE(dhd)) {
3366
+ /* wait for idma_en bit in IDMAcontrol register to be set */
3367
+ /* Loop till idma_en is not set */
3368
+ uint buscorerev = dhd->bus->sih->buscorerev;
3369
+ idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
3370
+ IDMAControl(buscorerev), 0, 0);
3371
+ while (!(idmacontrol & PCIE_IDMA_MODE_EN(buscorerev)) &&
3372
+ (waitcount++ < IDMA_ENABLE_WAIT)) {
3373
+
3374
+ DHD_ERROR(("iDMA not enabled yet,waiting 1 ms c=%d IDMAControl = %08x\n",
3375
+ waitcount, idmacontrol));
3376
+#ifdef BCMQT
3377
+ OSL_DELAY(200000); /* 200msec for BCMQT */
3378
+#else
3379
+ OSL_DELAY(1000); /* 1ms as its onetime only */
3380
+#endif // endif
3381
+ idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
3382
+ IDMAControl(buscorerev), 0, 0);
3383
+ }
3384
+
3385
+ if (waitcount < IDMA_ENABLE_WAIT) {
3386
+ DHD_ERROR(("iDMA enabled PCIEControl = %08x\n", idmacontrol));
3387
+ } else {
3388
+ DHD_ERROR(("Error: wait for iDMA timed out wait=%d IDMAControl = %08x\n",
3389
+ waitcount, idmacontrol));
3390
+ return BCME_ERROR;
3391
+ }
3392
+ }
3393
+
23203394 /* Host should configure soft doorbells if needed ... here */
23213395
23223396 /* Post to dongle host configured soft doorbells */
23233397 dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd);
23243398
2325
- /* Post buffers for packet reception and ioctl/event responses */
2326
- dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
23273399 dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
23283400 dhd_msgbuf_rxbuf_post_event_bufs(dhd);
23293401
3402
+ prot->no_retry = FALSE;
3403
+ prot->no_aggr = FALSE;
3404
+ prot->fixed_rate = FALSE;
3405
+
3406
+ /*
3407
+ * Note that any communication with the Dongle should be added
3408
+ * below this point. Any other host data structure initialiation that
3409
+ * needs to be done prior to the DPC starts executing should be done
3410
+ * befor this point.
3411
+ * Because once we start sending H2D requests to Dongle, the Dongle
3412
+ * respond immediately. So the DPC context to handle this
3413
+ * D2H response could preempt the context in which dhd_prot_init is running.
3414
+ * We want to ensure that all the Host part of dhd_prot_init is
3415
+ * done before that.
3416
+ */
3417
+
3418
+ /* See if info rings could be created, info rings should be created
3419
+ * only if dongle does not support EDL
3420
+ */
3421
+#ifdef EWP_EDL
3422
+ if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
3423
+#else
3424
+ if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
3425
+#endif /* EWP_EDL */
3426
+ {
3427
+ if ((ret = dhd_prot_init_info_rings(dhd)) != BCME_OK) {
3428
+ /* For now log and proceed, further clean up action maybe necessary
3429
+ * when we have more clarity.
3430
+ */
3431
+ DHD_ERROR(("%s Info rings couldn't be created: Err Code%d",
3432
+ __FUNCTION__, ret));
3433
+ }
3434
+ }
3435
+
3436
+#ifdef EWP_EDL
3437
+ /* Create Enhanced Debug Lane rings (EDL) if dongle supports it */
3438
+ if (dhd->dongle_edl_support) {
3439
+ if ((ret = dhd_prot_init_edl_rings(dhd)) != BCME_OK) {
3440
+ DHD_ERROR(("%s EDL rings couldn't be created: Err Code%d",
3441
+ __FUNCTION__, ret));
3442
+ }
3443
+ }
3444
+#endif /* EWP_EDL */
3445
+
3446
+#ifdef DHD_HP2P
3447
+ /* create HPP txcmpl/rxcmpl rings */
3448
+ if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_7 && dhd->hp2p_capable) {
3449
+ if ((ret = dhd_prot_init_hp2p_rings(dhd)) != BCME_OK) {
3450
+ /* For now log and proceed, further clean up action maybe necessary
3451
+ * when we have more clarity.
3452
+ */
3453
+ DHD_ERROR(("%s HP2P rings couldn't be created: Err Code%d",
3454
+ __FUNCTION__, ret));
3455
+ }
3456
+ }
3457
+#endif /* DHD_HP2P */
3458
+
23303459 return BCME_OK;
23313460 } /* dhd_prot_init */
2332
-
23333461
23343462 /**
23353463 * dhd_prot_detach - PCIE FD protocol layer destructor.
23363464 * Unlink, frees allocated protocol memory (including dhd_prot)
23373465 */
2338
-void
2339
-dhd_prot_detach(dhd_pub_t *dhd)
3466
+void dhd_prot_detach(dhd_pub_t *dhd)
23403467 {
23413468 dhd_prot_t *prot = dhd->prot;
23423469
....@@ -2346,15 +3473,20 @@
23463473 /* free up all DMA-able buffers allocated during prot attach/init */
23473474
23483475 dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf);
2349
- dhd_dma_buf_free(dhd, &prot->retbuf); /* ioctl return buffer */
3476
+ dhd_dma_buf_free(dhd, &prot->retbuf);
23503477 dhd_dma_buf_free(dhd, &prot->ioctbuf);
23513478 dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf);
3479
+ dhd_dma_buf_free(dhd, &prot->hostts_req_buf);
3480
+ dhd_dma_buf_free(dhd, &prot->fw_trap_buf);
3481
+ dhd_dma_buf_free(dhd, &prot->host_scb_buf);
23523482
23533483 /* DMA-able buffers for DMAing H2D/D2H WR/RD indices */
23543484 dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
23553485 dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf);
23563486 dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf);
23573487 dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
3488
+
3489
+ dhd_dma_buf_free(dhd, &prot->h2d_ifrm_indx_wr_buf);
23583490
23593491 /* Common MsgBuf Rings */
23603492 dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn);
....@@ -2366,41 +3498,61 @@
23663498 /* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */
23673499 dhd_prot_flowrings_pool_detach(dhd);
23683500
2369
- if (dhd->prot->pktid_map_handle) {
2370
- DHD_NATIVE_TO_PKTID_FINI(dhd, dhd->prot->pktid_map_handle);
2371
- }
3501
+ /* detach info rings */
3502
+ dhd_prot_detach_info_rings(dhd);
23723503
2373
-#ifndef CONFIG_DHD_USE_STATIC_BUF
2374
- MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t));
2375
-#endif /* CONFIG_DHD_USE_STATIC_BUF */
3504
+#ifdef EWP_EDL
3505
+ dhd_prot_detach_edl_rings(dhd);
3506
+#endif // endif
3507
+#ifdef DHD_HP2P
3508
+ /* detach HPP rings */
3509
+ dhd_prot_detach_hp2p_rings(dhd);
3510
+#endif /* DHD_HP2P */
23763511
2377
-#if defined(DHD_LB)
3512
+ /* if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs use pktid_map_handle_ioctl
3513
+ * handler and PKT memory is allocated using alloc_ioctl_return_buffer(), Otherwise
3514
+ * they will be part of pktid_ctrl_map handler and PKT memory is allocated using
3515
+ * PKTGET_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKGET.
3516
+ * Similarly for freeing PKT buffers DHD_NATIVE_TO_PKTID_FINI will be used
3517
+ * which calls PKTFREE_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKFREE.
3518
+ * Else if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs will be freed using
3519
+ * DHD_NATIVE_TO_PKTID_FINI_IOCTL which calls free_ioctl_return_buffer.
3520
+ */
3521
+ DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_ctrl_map);
3522
+ DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_rx_map);
3523
+ DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_tx_map);
3524
+#ifdef IOCTLRESP_USE_CONSTMEM
3525
+ DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl);
3526
+#endif // endif
3527
+#ifdef DHD_MAP_PKTID_LOGGING
3528
+ DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_map);
3529
+ DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_unmap);
3530
+#endif /* DHD_MAP_PKTID_LOGGING */
3531
+
23783532 #if defined(DHD_LB_TXC)
2379
- if (prot->tx_compl_prod.buffer) {
3533
+ if (prot->tx_compl_prod.buffer)
23803534 MFREE(dhd->osh, prot->tx_compl_prod.buffer,
2381
- sizeof(void*) * DHD_LB_WORKQ_SZ);
2382
- }
3535
+ sizeof(void*) * DHD_LB_WORKQ_SZ);
23833536 #endif /* DHD_LB_TXC */
23843537 #if defined(DHD_LB_RXC)
2385
- if (prot->rx_compl_prod.buffer) {
3538
+ if (prot->rx_compl_prod.buffer)
23863539 MFREE(dhd->osh, prot->rx_compl_prod.buffer,
2387
- sizeof(void*) * DHD_LB_WORKQ_SZ);
2388
- }
3540
+ sizeof(void*) * DHD_LB_WORKQ_SZ);
23893541 #endif /* DHD_LB_RXC */
2390
-#endif /* DHD_LB */
3542
+
3543
+ DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t));
23913544
23923545 dhd->prot = NULL;
23933546 }
23943547 } /* dhd_prot_detach */
23953548
2396
-
23973549 /**
2398
- * dhd_prot_reset - Reset the protocol layer without freeing any objects. This
2399
- * may be invoked to soft reboot the dongle, without having to detach and attach
2400
- * the entire protocol layer.
3550
+ * dhd_prot_reset - Reset the protocol layer without freeing any objects.
3551
+ * This may be invoked to soft reboot the dongle, without having to
3552
+ * detach and attach the entire protocol layer.
24013553 *
2402
- * After dhd_prot_reset(), dhd_prot_init() may be invoked without going through
2403
- * a dhd_prot_attach() phase.
3554
+ * After dhd_prot_reset(), dhd_prot_init() may be invoked
3555
+ * without going througha dhd_prot_attach() phase.
24043556 */
24053557 void
24063558 dhd_prot_reset(dhd_pub_t *dhd)
....@@ -2415,20 +3567,43 @@
24153567
24163568 dhd_prot_flowrings_pool_reset(dhd);
24173569
3570
+ /* Reset Common MsgBuf Rings */
24183571 dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn);
24193572 dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn);
24203573 dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln);
24213574 dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln);
24223575 dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln);
24233576
3577
+ /* Reset info rings */
3578
+ if (prot->h2dring_info_subn) {
3579
+ dhd_prot_ring_reset(dhd, prot->h2dring_info_subn);
3580
+ }
3581
+
3582
+ if (prot->d2hring_info_cpln) {
3583
+ dhd_prot_ring_reset(dhd, prot->d2hring_info_cpln);
3584
+ }
3585
+#ifdef EWP_EDL
3586
+ if (prot->d2hring_edl) {
3587
+ dhd_prot_ring_reset(dhd, prot->d2hring_edl);
3588
+ }
3589
+#endif /* EWP_EDL */
3590
+
3591
+ /* Reset all DMA-able buffers allocated during prot attach */
3592
+ dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf);
24243593 dhd_dma_buf_reset(dhd, &prot->retbuf);
24253594 dhd_dma_buf_reset(dhd, &prot->ioctbuf);
2426
- dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf);
3595
+ dhd_dma_buf_reset(dhd, &prot->host_bus_throughput_buf);
3596
+ dhd_dma_buf_reset(dhd, &prot->hostts_req_buf);
3597
+ dhd_dma_buf_reset(dhd, &prot->fw_trap_buf);
3598
+ dhd_dma_buf_reset(dhd, &prot->host_scb_buf);
3599
+
3600
+ dhd_dma_buf_reset(dhd, &prot->h2d_ifrm_indx_wr_buf);
3601
+
3602
+ /* Rest all DMA-able buffers for DMAing H2D/D2H WR/RD indices */
24273603 dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf);
24283604 dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf);
24293605 dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf);
24303606 dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf);
2431
-
24323607
24333608 prot->rx_metadata_offset = 0;
24343609 prot->tx_metadata_offset = 0;
....@@ -2437,13 +3612,14 @@
24373612 prot->cur_event_bufs_posted = 0;
24383613 prot->cur_ioctlresp_bufs_posted = 0;
24393614
2440
- prot->active_tx_count = 0;
3615
+ OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
24413616 prot->data_seq_no = 0;
24423617 prot->ioctl_seq_no = 0;
24433618 prot->ioctl_state = 0;
24443619 prot->curr_ioctl_cmd = 0;
24453620 prot->ioctl_received = IOCTL_WAIT;
2446
- prot->ioctl_trans_id = 0;
3621
+ /* To catch any rollover issues fast, starting with higher ioctl_trans_id */
3622
+ prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
24473623
24483624 /* dhd_flow_rings_init is located at dhd_bus_start,
24493625 * so when stopping bus, flowrings shall be deleted
....@@ -2452,123 +3628,84 @@
24523628 dhd_flow_rings_deinit(dhd);
24533629 }
24543630
2455
- if (prot->pktid_map_handle) {
2456
- DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_map_handle);
2457
- prot->pktid_map_handle = NULL;
3631
+#ifdef DHD_HP2P
3632
+ if (prot->d2hring_hp2p_txcpl) {
3633
+ dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_txcpl);
24583634 }
3635
+ if (prot->d2hring_hp2p_rxcpl) {
3636
+ dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_rxcpl);
3637
+ }
3638
+#endif /* DHD_HP2P */
24593639
3640
+ /* Reset PKTID map */
3641
+ DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_ctrl_map);
3642
+ DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_rx_map);
3643
+ DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_tx_map);
24603644 #ifdef IOCTLRESP_USE_CONSTMEM
2461
- if (prot->pktid_map_handle_ioctl) {
2462
- DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl);
2463
- prot->pktid_map_handle_ioctl = NULL;
2464
- }
3645
+ DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, prot->pktid_map_handle_ioctl);
24653646 #endif /* IOCTLRESP_USE_CONSTMEM */
3647
+#ifdef DMAMAP_STATS
3648
+ dhd->dma_stats.txdata = dhd->dma_stats.txdata_sz = 0;
3649
+ dhd->dma_stats.rxdata = dhd->dma_stats.rxdata_sz = 0;
3650
+#ifndef IOCTLRESP_USE_CONSTMEM
3651
+ dhd->dma_stats.ioctl_rx = dhd->dma_stats.ioctl_rx_sz = 0;
3652
+#endif /* IOCTLRESP_USE_CONSTMEM */
3653
+ dhd->dma_stats.event_rx = dhd->dma_stats.event_rx_sz = 0;
3654
+ dhd->dma_stats.info_rx = dhd->dma_stats.info_rx_sz = 0;
3655
+ dhd->dma_stats.tsbuf_rx = dhd->dma_stats.tsbuf_rx_sz = 0;
3656
+#endif /* DMAMAP_STATS */
24663657 } /* dhd_prot_reset */
24673658
3659
+#if defined(DHD_LB_RXP)
3660
+#define DHD_LB_DISPATCH_RX_PROCESS(dhdp) dhd_lb_dispatch_rx_process(dhdp)
3661
+#else /* !DHD_LB_RXP */
3662
+#define DHD_LB_DISPATCH_RX_PROCESS(dhdp) do { /* noop */ } while (0)
3663
+#endif /* !DHD_LB_RXP */
24683664
2469
-void
2470
-dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset)
2471
-{
2472
- dhd_prot_t *prot = dhd->prot;
2473
- prot->rx_dataoffset = rx_offset;
2474
-}
3665
+#if defined(DHD_LB_RXC)
3666
+#define DHD_LB_DISPATCH_RX_COMPL(dhdp) dhd_lb_dispatch_rx_compl(dhdp)
3667
+#else /* !DHD_LB_RXC */
3668
+#define DHD_LB_DISPATCH_RX_COMPL(dhdp) do { /* noop */ } while (0)
3669
+#endif /* !DHD_LB_RXC */
24753670
2476
-/**
2477
- * Initialize protocol: sync w/dongle state.
2478
- * Sets dongle media info (iswl, drv_version, mac address).
2479
- */
2480
-int
2481
-dhd_sync_with_dongle(dhd_pub_t *dhd)
2482
-{
2483
- int ret = 0;
2484
- wlc_rev_info_t revinfo;
2485
-
2486
-
2487
- DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2488
-
2489
- dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
2490
-
2491
-
2492
-
2493
-#ifdef DHD_FW_COREDUMP
2494
- /* For Android Builds check memdump capability */
2495
- /* Check the memdump capability */
2496
- dhd_get_memdump_info(dhd);
2497
-#endif /* DHD_FW_COREDUMP */
2498
-
2499
- /* Get the device rev info */
2500
- memset(&revinfo, 0, sizeof(revinfo));
2501
- ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
2502
- if (ret < 0) {
2503
- DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__));
2504
- goto done;
2505
- }
2506
- DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
2507
- revinfo.deviceid, revinfo.vendorid, revinfo.chipnum));
2508
-
2509
- dhd_process_cid_mac(dhd, TRUE);
2510
-
2511
- ret = dhd_preinit_ioctls(dhd);
2512
- if (!ret) {
2513
- dhd_process_cid_mac(dhd, FALSE);
2514
- }
2515
-
2516
- /* Always assumes wl for now */
2517
- dhd->iswl = TRUE;
2518
-done:
2519
- return ret;
2520
-} /* dhd_sync_with_dongle */
3671
+#if defined(DHD_LB_TXC)
3672
+#define DHD_LB_DISPATCH_TX_COMPL(dhdp) dhd_lb_dispatch_tx_compl(dhdp)
3673
+#else /* !DHD_LB_TXC */
3674
+#define DHD_LB_DISPATCH_TX_COMPL(dhdp) do { /* noop */ } while (0)
3675
+#endif /* !DHD_LB_TXC */
25213676
25223677 #if defined(DHD_LB)
2523
-
25243678 /* DHD load balancing: deferral of work to another online CPU */
2525
-
25263679 /* DHD_LB_TXC DHD_LB_RXC DHD_LB_RXP dispatchers, in dhd_linux.c */
25273680 extern void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp);
25283681 extern void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp);
25293682 extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
2530
-
25313683 extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
25323684
3685
+#if defined(DHD_LB_RXP)
25333686 /**
2534
- * dhd_lb_dispatch - load balance by dispatch work to other CPU cores
2535
- * Note: rx_compl_tasklet is dispatched explicitly.
3687
+ * dhd_lb_dispatch_rx_process - load balance by dispatch Rx processing work
3688
+ * to other CPU cores
25363689 */
25373690 static INLINE void
2538
-dhd_lb_dispatch(dhd_pub_t *dhdp, uint16 ring_idx)
3691
+dhd_lb_dispatch_rx_process(dhd_pub_t *dhdp)
25393692 {
2540
- switch (ring_idx) {
3693
+ dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */
3694
+}
3695
+#endif /* DHD_LB_RXP */
25413696
25423697 #if defined(DHD_LB_TXC)
2543
- case BCMPCIE_D2H_MSGRING_TX_COMPLETE:
2544
- bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */
2545
- dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */
2546
- break;
2547
-#endif /* DHD_LB_TXC */
2548
-
2549
- case BCMPCIE_D2H_MSGRING_RX_COMPLETE:
2550
- {
2551
-#if defined(DHD_LB_RXC)
2552
- dhd_prot_t *prot = dhdp->prot;
2553
- /* Schedule the takslet only if we have to */
2554
- if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
2555
- /* flush WR index */
2556
- bcm_workq_prod_sync(&dhdp->prot->rx_compl_prod);
2557
- dhd_lb_rx_compl_dispatch(dhdp); /* dispatch rx_compl_tasklet */
2558
- }
2559
-#endif /* DHD_LB_RXC */
2560
-#if defined(DHD_LB_RXP)
2561
- dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */
2562
-#endif /* DHD_LB_RXP */
2563
- break;
2564
- }
2565
- default:
2566
- break;
2567
- }
3698
+/**
3699
+ * dhd_lb_dispatch_tx_compl - load balance by dispatch Tx complition work
3700
+ * to other CPU cores
3701
+ */
3702
+static INLINE void
3703
+dhd_lb_dispatch_tx_compl(dhd_pub_t *dhdp, uint16 ring_idx)
3704
+{
3705
+ bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */
3706
+ dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */
25683707 }
25693708
2570
-
2571
-#if defined(DHD_LB_TXC)
25723709 /**
25733710 * DHD load balanced tx completion tasklet handler, that will perform the
25743711 * freeing of packets on the selected CPU. Packet pointers are delivered to
....@@ -2585,6 +3722,10 @@
25853722 dhd_prot_t *prot = dhd->prot;
25863723 bcm_workq_t *workq = &prot->tx_compl_cons;
25873724 uint32 count = 0;
3725
+
3726
+ int curr_cpu;
3727
+ curr_cpu = get_cpu();
3728
+ put_cpu();
25883729
25893730 DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhd);
25903731
....@@ -2606,11 +3747,13 @@
26063747 pa = DHD_PKTTAG_PA((dhd_pkttag_fr_t *)PKTTAG(pkt));
26073748 pa_len = DHD_PKTTAG_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt));
26083749
2609
- DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, pkt, 0);
2610
-
3750
+ DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0);
26113751 #if defined(BCMPCIE)
26123752 dhd_txcomplete(dhd, pkt, true);
2613
-#endif
3753
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
3754
+ dhd_eap_txcomplete(dhd, pkt, TRUE, txstatus->cmn_hdr.if_id);
3755
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
3756
+#endif // endif
26143757
26153758 PKTFREE(dhd->osh, pkt, TRUE);
26163759 count++;
....@@ -2623,6 +3766,23 @@
26233766 #endif /* DHD_LB_TXC */
26243767
26253768 #if defined(DHD_LB_RXC)
3769
+
3770
+/**
3771
+ * dhd_lb_dispatch_rx_compl - load balance by dispatch rx complition work
3772
+ * to other CPU cores
3773
+ */
3774
+static INLINE void
3775
+dhd_lb_dispatch_rx_compl(dhd_pub_t *dhdp)
3776
+{
3777
+ dhd_prot_t *prot = dhdp->prot;
3778
+ /* Schedule the takslet only if we have to */
3779
+ if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
3780
+ /* flush WR index */
3781
+ bcm_workq_prod_sync(&dhdp->prot->rx_compl_prod);
3782
+ dhd_lb_rx_compl_dispatch(dhdp); /* dispatch rx_compl_tasklet */
3783
+ }
3784
+}
3785
+
26263786 void
26273787 dhd_lb_rx_compl_handler(unsigned long data)
26283788 {
....@@ -2635,8 +3795,486 @@
26353795 bcm_workq_cons_sync(workq);
26363796 }
26373797 #endif /* DHD_LB_RXC */
2638
-
26393798 #endif /* DHD_LB */
3799
+
3800
+void
3801
+dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset)
3802
+{
3803
+ dhd_prot_t *prot = dhd->prot;
3804
+ prot->rx_dataoffset = rx_offset;
3805
+}
3806
+
3807
+static int
3808
+dhd_check_create_info_rings(dhd_pub_t *dhd)
3809
+{
3810
+ dhd_prot_t *prot = dhd->prot;
3811
+ int ret = BCME_ERROR;
3812
+ uint16 ringid;
3813
+
3814
+ {
3815
+ /* dongle may increase max_submission_rings so keep
3816
+ * ringid at end of dynamic rings
3817
+ */
3818
+ ringid = dhd->bus->max_tx_flowrings +
3819
+ (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
3820
+ BCMPCIE_H2D_COMMON_MSGRINGS;
3821
+ }
3822
+
3823
+ if (prot->d2hring_info_cpln) {
3824
+ /* for d2hring re-entry case, clear inited flag */
3825
+ prot->d2hring_info_cpln->inited = FALSE;
3826
+ }
3827
+
3828
+ if (prot->h2dring_info_subn && prot->d2hring_info_cpln) {
3829
+ return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
3830
+ }
3831
+
3832
+ if (prot->h2dring_info_subn == NULL) {
3833
+ prot->h2dring_info_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
3834
+
3835
+ if (prot->h2dring_info_subn == NULL) {
3836
+ DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
3837
+ __FUNCTION__));
3838
+ return BCME_NOMEM;
3839
+ }
3840
+
3841
+ DHD_INFO(("%s: about to create debug submit ring\n", __FUNCTION__));
3842
+ ret = dhd_prot_ring_attach(dhd, prot->h2dring_info_subn, "h2dinfo",
3843
+ H2DRING_DYNAMIC_INFO_MAX_ITEM, H2DRING_INFO_BUFPOST_ITEMSIZE,
3844
+ ringid);
3845
+ if (ret != BCME_OK) {
3846
+ DHD_ERROR(("%s: couldn't alloc resources for dbg submit ring\n",
3847
+ __FUNCTION__));
3848
+ goto err;
3849
+ }
3850
+ }
3851
+
3852
+ if (prot->d2hring_info_cpln == NULL) {
3853
+ prot->d2hring_info_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
3854
+
3855
+ if (prot->d2hring_info_cpln == NULL) {
3856
+ DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
3857
+ __FUNCTION__));
3858
+ return BCME_NOMEM;
3859
+ }
3860
+
3861
+ /* create the debug info completion ring next to debug info submit ring
3862
+ * ringid = id next to debug info submit ring
3863
+ */
3864
+ ringid = ringid + 1;
3865
+
3866
+ DHD_INFO(("%s: about to create debug cpl ring\n", __FUNCTION__));
3867
+ ret = dhd_prot_ring_attach(dhd, prot->d2hring_info_cpln, "d2hinfo",
3868
+ D2HRING_DYNAMIC_INFO_MAX_ITEM, D2HRING_INFO_BUFCMPLT_ITEMSIZE,
3869
+ ringid);
3870
+ if (ret != BCME_OK) {
3871
+ DHD_ERROR(("%s: couldn't alloc resources for dbg cpl ring\n",
3872
+ __FUNCTION__));
3873
+ dhd_prot_ring_detach(dhd, prot->h2dring_info_subn);
3874
+ goto err;
3875
+ }
3876
+ }
3877
+
3878
+ return ret;
3879
+err:
3880
+ MFREE(prot->osh, prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
3881
+ prot->h2dring_info_subn = NULL;
3882
+
3883
+ if (prot->d2hring_info_cpln) {
3884
+ MFREE(prot->osh, prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
3885
+ prot->d2hring_info_cpln = NULL;
3886
+ }
3887
+ return ret;
3888
+} /* dhd_check_create_info_rings */
3889
+
3890
+int
3891
+dhd_prot_init_info_rings(dhd_pub_t *dhd)
3892
+{
3893
+ dhd_prot_t *prot = dhd->prot;
3894
+ int ret = BCME_OK;
3895
+
3896
+ if ((ret = dhd_check_create_info_rings(dhd)) != BCME_OK) {
3897
+ DHD_ERROR(("%s: info rings aren't created! \n",
3898
+ __FUNCTION__));
3899
+ return ret;
3900
+ }
3901
+
3902
+ if ((prot->d2hring_info_cpln->inited) || (prot->d2hring_info_cpln->create_pending)) {
3903
+ DHD_INFO(("Info completion ring was created!\n"));
3904
+ return ret;
3905
+ }
3906
+
3907
+ DHD_TRACE(("trying to send create d2h info ring: id %d\n", prot->d2hring_info_cpln->idx));
3908
+ ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_info_cpln,
3909
+ BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL, DHD_D2H_DBGRING_REQ_PKTID);
3910
+ if (ret != BCME_OK)
3911
+ return ret;
3912
+
3913
+ prot->h2dring_info_subn->seqnum = H2D_EPOCH_INIT_VAL;
3914
+ prot->h2dring_info_subn->current_phase = 0;
3915
+ prot->d2hring_info_cpln->seqnum = D2H_EPOCH_INIT_VAL;
3916
+ prot->d2hring_info_cpln->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
3917
+
3918
+ DHD_TRACE(("trying to send create h2d info ring id %d\n", prot->h2dring_info_subn->idx));
3919
+ prot->h2dring_info_subn->n_completion_ids = 1;
3920
+ prot->h2dring_info_subn->compeltion_ring_ids[0] = prot->d2hring_info_cpln->idx;
3921
+
3922
+ ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_info_subn,
3923
+ BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT, DHD_H2D_DBGRING_REQ_PKTID);
3924
+
3925
+ /* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
3926
+ * so can not cleanup if one ring was created while the other failed
3927
+ */
3928
+ return ret;
3929
+} /* dhd_prot_init_info_rings */
3930
+
3931
+static void
3932
+dhd_prot_detach_info_rings(dhd_pub_t *dhd)
3933
+{
3934
+ if (dhd->prot->h2dring_info_subn) {
3935
+ dhd_prot_ring_detach(dhd, dhd->prot->h2dring_info_subn);
3936
+ MFREE(dhd->prot->osh, dhd->prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
3937
+ dhd->prot->h2dring_info_subn = NULL;
3938
+ }
3939
+ if (dhd->prot->d2hring_info_cpln) {
3940
+ dhd_prot_ring_detach(dhd, dhd->prot->d2hring_info_cpln);
3941
+ MFREE(dhd->prot->osh, dhd->prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
3942
+ dhd->prot->d2hring_info_cpln = NULL;
3943
+ }
3944
+}
3945
+
3946
+#ifdef DHD_HP2P
3947
+static int
3948
+dhd_check_create_hp2p_rings(dhd_pub_t *dhd)
3949
+{
3950
+ dhd_prot_t *prot = dhd->prot;
3951
+ int ret = BCME_ERROR;
3952
+ uint16 ringid;
3953
+
3954
+ /* Last 2 dynamic ring indices are used by hp2p rings */
3955
+ ringid = dhd->bus->max_submission_rings + dhd->bus->max_completion_rings - 2;
3956
+
3957
+ if (prot->d2hring_hp2p_txcpl == NULL) {
3958
+ prot->d2hring_hp2p_txcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
3959
+
3960
+ if (prot->d2hring_hp2p_txcpl == NULL) {
3961
+ DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_txcpl\n",
3962
+ __FUNCTION__));
3963
+ return BCME_NOMEM;
3964
+ }
3965
+
3966
+ DHD_INFO(("%s: about to create hp2p txcpl ring\n", __FUNCTION__));
3967
+ ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_txcpl, "d2hhp2p_txcpl",
3968
+ dhd_bus_get_hp2p_ring_max_size(dhd->bus, TRUE), D2HRING_TXCMPLT_ITEMSIZE,
3969
+ ringid);
3970
+ if (ret != BCME_OK) {
3971
+ DHD_ERROR(("%s: couldn't alloc resources for hp2p txcpl ring\n",
3972
+ __FUNCTION__));
3973
+ goto err2;
3974
+ }
3975
+ } else {
3976
+ /* for re-entry case, clear inited flag */
3977
+ prot->d2hring_hp2p_txcpl->inited = FALSE;
3978
+ }
3979
+ if (prot->d2hring_hp2p_rxcpl == NULL) {
3980
+ prot->d2hring_hp2p_rxcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
3981
+
3982
+ if (prot->d2hring_hp2p_rxcpl == NULL) {
3983
+ DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_rxcpl\n",
3984
+ __FUNCTION__));
3985
+ return BCME_NOMEM;
3986
+ }
3987
+
3988
+ /* create the hp2p rx completion ring next to hp2p tx compl ring
3989
+ * ringid = id next to hp2p tx compl ring
3990
+ */
3991
+ ringid = ringid + 1;
3992
+
3993
+ DHD_INFO(("%s: about to create hp2p rxcpl ring\n", __FUNCTION__));
3994
+ ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_rxcpl, "d2hhp2p_rxcpl",
3995
+ dhd_bus_get_hp2p_ring_max_size(dhd->bus, FALSE), D2HRING_RXCMPLT_ITEMSIZE,
3996
+ ringid);
3997
+ if (ret != BCME_OK) {
3998
+ DHD_ERROR(("%s: couldn't alloc resources for hp2p rxcpl ring\n",
3999
+ __FUNCTION__));
4000
+ goto err1;
4001
+ }
4002
+ } else {
4003
+ /* for re-entry case, clear inited flag */
4004
+ prot->d2hring_hp2p_rxcpl->inited = FALSE;
4005
+ }
4006
+
4007
+ return ret;
4008
+err1:
4009
+ MFREE(prot->osh, prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
4010
+ prot->d2hring_hp2p_rxcpl = NULL;
4011
+
4012
+err2:
4013
+ MFREE(prot->osh, prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
4014
+ prot->d2hring_hp2p_txcpl = NULL;
4015
+ return ret;
4016
+} /* dhd_check_create_hp2p_rings */
4017
+
4018
+int
4019
+dhd_prot_init_hp2p_rings(dhd_pub_t *dhd)
4020
+{
4021
+ dhd_prot_t *prot = dhd->prot;
4022
+ int ret = BCME_OK;
4023
+
4024
+ dhd->hp2p_ring_active = FALSE;
4025
+
4026
+ if ((ret = dhd_check_create_hp2p_rings(dhd)) != BCME_OK) {
4027
+ DHD_ERROR(("%s: hp2p rings aren't created! \n",
4028
+ __FUNCTION__));
4029
+ return ret;
4030
+ }
4031
+
4032
+ if ((prot->d2hring_hp2p_txcpl->inited) || (prot->d2hring_hp2p_txcpl->create_pending)) {
4033
+ DHD_INFO(("hp2p tx completion ring was created!\n"));
4034
+ return ret;
4035
+ }
4036
+
4037
+ DHD_TRACE(("trying to send create d2h hp2p txcpl ring: id %d\n",
4038
+ prot->d2hring_hp2p_txcpl->idx));
4039
+ ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_txcpl,
4040
+ BCMPCIE_D2H_RING_TYPE_HPP_TX_CPL, DHD_D2H_HPPRING_TXREQ_PKTID);
4041
+ if (ret != BCME_OK)
4042
+ return ret;
4043
+
4044
+ prot->d2hring_hp2p_txcpl->seqnum = D2H_EPOCH_INIT_VAL;
4045
+ prot->d2hring_hp2p_txcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4046
+
4047
+ if ((prot->d2hring_hp2p_rxcpl->inited) || (prot->d2hring_hp2p_rxcpl->create_pending)) {
4048
+ DHD_INFO(("hp2p rx completion ring was created!\n"));
4049
+ return ret;
4050
+ }
4051
+
4052
+ DHD_TRACE(("trying to send create d2h hp2p rxcpl ring: id %d\n",
4053
+ prot->d2hring_hp2p_rxcpl->idx));
4054
+ ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_rxcpl,
4055
+ BCMPCIE_D2H_RING_TYPE_HPP_RX_CPL, DHD_D2H_HPPRING_RXREQ_PKTID);
4056
+ if (ret != BCME_OK)
4057
+ return ret;
4058
+
4059
+ prot->d2hring_hp2p_rxcpl->seqnum = D2H_EPOCH_INIT_VAL;
4060
+ prot->d2hring_hp2p_rxcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4061
+
4062
+ /* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
4063
+ * so can not cleanup if one ring was created while the other failed
4064
+ */
4065
+ return BCME_OK;
4066
+} /* dhd_prot_init_hp2p_rings */
4067
+
4068
+static void
4069
+dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd)
4070
+{
4071
+ if (dhd->prot->d2hring_hp2p_txcpl) {
4072
+ dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_txcpl);
4073
+ MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
4074
+ dhd->prot->d2hring_hp2p_txcpl = NULL;
4075
+ }
4076
+ if (dhd->prot->d2hring_hp2p_rxcpl) {
4077
+ dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_rxcpl);
4078
+ MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
4079
+ dhd->prot->d2hring_hp2p_rxcpl = NULL;
4080
+ }
4081
+}
4082
+#endif /* DHD_HP2P */
4083
+
4084
+#ifdef EWP_EDL
4085
+static int
4086
+dhd_check_create_edl_rings(dhd_pub_t *dhd)
4087
+{
4088
+ dhd_prot_t *prot = dhd->prot;
4089
+ int ret = BCME_ERROR;
4090
+ uint16 ringid;
4091
+
4092
+ {
4093
+ /* dongle may increase max_submission_rings so keep
4094
+ * ringid at end of dynamic rings (re-use info ring cpl ring id)
4095
+ */
4096
+ ringid = dhd->bus->max_tx_flowrings +
4097
+ (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
4098
+ BCMPCIE_H2D_COMMON_MSGRINGS + 1;
4099
+ }
4100
+
4101
+ if (prot->d2hring_edl) {
4102
+ prot->d2hring_edl->inited = FALSE;
4103
+ return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
4104
+ }
4105
+
4106
+ if (prot->d2hring_edl == NULL) {
4107
+ prot->d2hring_edl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4108
+
4109
+ if (prot->d2hring_edl == NULL) {
4110
+ DHD_ERROR(("%s: couldn't alloc memory for d2hring_edl\n",
4111
+ __FUNCTION__));
4112
+ return BCME_NOMEM;
4113
+ }
4114
+
4115
+ DHD_ERROR(("%s: about to create EDL ring, ringid: %u \n", __FUNCTION__,
4116
+ ringid));
4117
+ ret = dhd_prot_ring_attach(dhd, prot->d2hring_edl, "d2hring_edl",
4118
+ D2HRING_EDL_MAX_ITEM, D2HRING_EDL_ITEMSIZE,
4119
+ ringid);
4120
+ if (ret != BCME_OK) {
4121
+ DHD_ERROR(("%s: couldn't alloc resources for EDL ring\n",
4122
+ __FUNCTION__));
4123
+ goto err;
4124
+ }
4125
+ }
4126
+
4127
+ return ret;
4128
+err:
4129
+ MFREE(prot->osh, prot->d2hring_edl, sizeof(msgbuf_ring_t));
4130
+ prot->d2hring_edl = NULL;
4131
+
4132
+ return ret;
4133
+} /* dhd_check_create_btlog_rings */
4134
+
4135
+int
4136
+dhd_prot_init_edl_rings(dhd_pub_t *dhd)
4137
+{
4138
+ dhd_prot_t *prot = dhd->prot;
4139
+ int ret = BCME_ERROR;
4140
+
4141
+ if ((ret = dhd_check_create_edl_rings(dhd)) != BCME_OK) {
4142
+ DHD_ERROR(("%s: EDL rings aren't created! \n",
4143
+ __FUNCTION__));
4144
+ return ret;
4145
+ }
4146
+
4147
+ if ((prot->d2hring_edl->inited) || (prot->d2hring_edl->create_pending)) {
4148
+ DHD_INFO(("EDL completion ring was created!\n"));
4149
+ return ret;
4150
+ }
4151
+
4152
+ DHD_ERROR(("trying to send create d2h edl ring: idx %d\n", prot->d2hring_edl->idx));
4153
+ ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_edl,
4154
+ BCMPCIE_D2H_RING_TYPE_EDL, DHD_D2H_DBGRING_REQ_PKTID);
4155
+ if (ret != BCME_OK)
4156
+ return ret;
4157
+
4158
+ prot->d2hring_edl->seqnum = D2H_EPOCH_INIT_VAL;
4159
+ prot->d2hring_edl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4160
+
4161
+ return BCME_OK;
4162
+} /* dhd_prot_init_btlog_rings */
4163
+
4164
+static void
4165
+dhd_prot_detach_edl_rings(dhd_pub_t *dhd)
4166
+{
4167
+ if (dhd->prot->d2hring_edl) {
4168
+ dhd_prot_ring_detach(dhd, dhd->prot->d2hring_edl);
4169
+ MFREE(dhd->prot->osh, dhd->prot->d2hring_edl, sizeof(msgbuf_ring_t));
4170
+ dhd->prot->d2hring_edl = NULL;
4171
+ }
4172
+}
4173
+#endif /* EWP_EDL */
4174
+
4175
+/**
4176
+ * Initialize protocol: sync w/dongle state.
4177
+ * Sets dongle media info (iswl, drv_version, mac address).
4178
+ */
4179
+int dhd_sync_with_dongle(dhd_pub_t *dhd)
4180
+{
4181
+ int ret = 0;
4182
+ wlc_rev_info_t revinfo;
4183
+ char buf[128];
4184
+ dhd_prot_t *prot = dhd->prot;
4185
+
4186
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4187
+
4188
+ dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
4189
+
4190
+ /* Post ts buffer after shim layer is attached */
4191
+ ret = dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
4192
+
4193
+#ifndef OEM_ANDROID
4194
+ /* Get the device MAC address */
4195
+ memset(buf, 0, sizeof(buf));
4196
+ strncpy(buf, "cur_etheraddr", sizeof(buf) - 1);
4197
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
4198
+ if (ret < 0) {
4199
+ DHD_ERROR(("%s: GET iovar cur_etheraddr FAILED\n", __FUNCTION__));
4200
+ goto done;
4201
+ }
4202
+ memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
4203
+ if (dhd_msg_level & DHD_INFO_VAL) {
4204
+ bcm_print_bytes("CUR_ETHERADDR ", (uchar *)buf, ETHER_ADDR_LEN);
4205
+ }
4206
+#endif /* OEM_ANDROID */
4207
+
4208
+#ifdef DHD_FW_COREDUMP
4209
+ /* Check the memdump capability */
4210
+ dhd_get_memdump_info(dhd);
4211
+#endif /* DHD_FW_COREDUMP */
4212
+#ifdef BCMASSERT_LOG
4213
+ dhd_get_assert_info(dhd);
4214
+#endif /* BCMASSERT_LOG */
4215
+
4216
+ /* Get the device rev info */
4217
+ memset(&revinfo, 0, sizeof(revinfo));
4218
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
4219
+ if (ret < 0) {
4220
+ DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__));
4221
+ goto done;
4222
+ }
4223
+ DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
4224
+ revinfo.deviceid, revinfo.vendorid, revinfo.chipnum));
4225
+
4226
+ /* Get the RxBuf post size */
4227
+ memset(buf, 0, sizeof(buf));
4228
+ bcm_mkiovar("rxbufpost_sz", NULL, 0, buf, sizeof(buf));
4229
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
4230
+ if (ret < 0) {
4231
+ DHD_ERROR(("%s: GET RxBuf post FAILED, default to %d\n",
4232
+ __FUNCTION__, DHD_FLOWRING_RX_BUFPOST_PKTSZ));
4233
+ prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
4234
+ } else {
4235
+ memcpy_s(&(prot->rxbufpost_sz), sizeof(prot->rxbufpost_sz), buf, sizeof(uint16));
4236
+ if (prot->rxbufpost_sz > DHD_FLOWRING_RX_BUFPOST_PKTSZ_MAX) {
4237
+ DHD_ERROR(("%s: Invalid RxBuf post size : %d, default to %d\n",
4238
+ __FUNCTION__, prot->rxbufpost_sz, DHD_FLOWRING_RX_BUFPOST_PKTSZ));
4239
+ prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
4240
+ } else {
4241
+ DHD_ERROR(("%s: RxBuf Post : %d\n", __FUNCTION__, prot->rxbufpost_sz));
4242
+ }
4243
+ }
4244
+
4245
+ /* Post buffers for packet reception */
4246
+ dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
4247
+
4248
+ DHD_SSSR_DUMP_INIT(dhd);
4249
+
4250
+ dhd_process_cid_mac(dhd, TRUE);
4251
+ ret = dhd_preinit_ioctls(dhd);
4252
+ dhd_process_cid_mac(dhd, FALSE);
4253
+
4254
+#if defined(DHD_H2D_LOG_TIME_SYNC)
4255
+#ifdef DHD_HP2P
4256
+ if (FW_SUPPORTED(dhd, h2dlogts) || dhd->hp2p_capable) {
4257
+ if (dhd->hp2p_enable) {
4258
+ dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH / 40;
4259
+ } else {
4260
+ dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
4261
+ }
4262
+#else
4263
+ if (FW_SUPPORTED(dhd, h2dlogts)) {
4264
+ dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
4265
+#endif // endif
4266
+ dhd->bus->dhd_rte_time_sync_count = OSL_SYSUPTIME_US();
4267
+ /* This is during initialization. */
4268
+ dhd_h2d_log_time_sync(dhd);
4269
+ } else {
4270
+ dhd->dhd_rte_time_sync_ms = 0;
4271
+ }
4272
+#endif /* DHD_H2D_LOG_TIME_SYNC || DHD_HP2P */
4273
+ /* Always assumes wl for now */
4274
+ dhd->iswl = TRUE;
4275
+done:
4276
+ return ret;
4277
+} /* dhd_sync_with_dongle */
26404278
26414279 #define DHD_DBG_SHOW_METADATA 0
26424280
....@@ -2736,7 +4374,9 @@
27364374 {
27374375 if (pkt) {
27384376 if (pkttype == PKTTYPE_IOCTL_RX ||
2739
- pkttype == PKTTYPE_EVENT_RX) {
4377
+ pkttype == PKTTYPE_EVENT_RX ||
4378
+ pkttype == PKTTYPE_INFO_RX ||
4379
+ pkttype == PKTTYPE_TSBUF_RX) {
27404380 #ifdef DHD_USE_STATIC_CTRLBUF
27414381 PKTFREE_STATIC(dhd->osh, pkt, send);
27424382 #else
....@@ -2748,6 +4388,11 @@
27484388 }
27494389 }
27504390
4391
+/**
4392
+ * dhd_prot_packet_get should be called only for items having pktid_ctrl_map handle
4393
+ * and all the bottom most functions like dhd_pktid_map_free hold separate DHD_PKTID_LOCK
4394
+ * to ensure thread safety, so no need to hold any locks for this function
4395
+ */
27514396 static INLINE void * BCMFASTPATH
27524397 dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid)
27534398 {
....@@ -2759,25 +4404,45 @@
27594404
27604405 #ifdef DHD_PCIE_PKTID
27614406 if (free_pktid) {
2762
- PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle,
4407
+ PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map,
27634408 pktid, pa, len, dmah, secdma, pkttype);
27644409 } else {
2765
- PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_map_handle,
4410
+ PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_ctrl_map,
27664411 pktid, pa, len, dmah, secdma, pkttype);
27674412 }
27684413 #else
2769
- PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, pktid, pa,
4414
+ PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, pktid, pa,
27704415 len, dmah, secdma, pkttype);
27714416 #endif /* DHD_PCIE_PKTID */
2772
-
27734417 if (PKTBUF) {
27744418 {
2775
- if (SECURE_DMA_ENAB(dhd->osh)) {
4419
+ if (SECURE_DMA_ENAB(dhd->osh))
27764420 SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah,
27774421 secdma, 0);
2778
- } else {
2779
- DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, PKTBUF, dmah);
4422
+ else
4423
+ DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
4424
+#ifdef DMAMAP_STATS
4425
+ switch (pkttype) {
4426
+#ifndef IOCTLRESP_USE_CONSTMEM
4427
+ case PKTTYPE_IOCTL_RX:
4428
+ dhd->dma_stats.ioctl_rx--;
4429
+ dhd->dma_stats.ioctl_rx_sz -= len;
4430
+ break;
4431
+#endif /* IOCTLRESP_USE_CONSTMEM */
4432
+ case PKTTYPE_EVENT_RX:
4433
+ dhd->dma_stats.event_rx--;
4434
+ dhd->dma_stats.event_rx_sz -= len;
4435
+ break;
4436
+ case PKTTYPE_INFO_RX:
4437
+ dhd->dma_stats.info_rx--;
4438
+ dhd->dma_stats.info_rx_sz -= len;
4439
+ break;
4440
+ case PKTTYPE_TSBUF_RX:
4441
+ dhd->dma_stats.tsbuf_rx--;
4442
+ dhd->dma_stats.tsbuf_rx_sz -= len;
4443
+ break;
27804444 }
4445
+#endif /* DMAMAP_STATS */
27814446 }
27824447 }
27834448
....@@ -2794,7 +4459,7 @@
27944459
27954460 return;
27964461 }
2797
-#endif /* IOCTLRESP_USE_CONSTMEM */
4462
+#endif // endif
27984463
27994464 static void BCMFASTPATH
28004465 dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid)
....@@ -2839,91 +4504,98 @@
28394504 static int BCMFASTPATH
28404505 dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid)
28414506 {
2842
- void *p;
2843
- uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
4507
+ void *p, **pktbuf;
28444508 uint8 *rxbuf_post_tmp;
28454509 host_rxbuf_post_t *rxbuf_post;
28464510 void *msg_start;
2847
- dmaaddr_t pa;
2848
- uint32 pktlen;
2849
- uint8 i = 0;
2850
- uint16 alloced = 0;
4511
+ dmaaddr_t pa, *pktbuf_pa;
4512
+ uint32 *pktlen;
4513
+ uint16 i = 0, alloced = 0;
28514514 unsigned long flags;
28524515 uint32 pktid;
28534516 dhd_prot_t *prot = dhd->prot;
28544517 msgbuf_ring_t *ring = &prot->h2dring_rxp_subn;
4518
+ void *lcl_buf;
4519
+ uint16 lcl_buf_size;
4520
+ uint16 pktsz = prot->rxbufpost_sz;
28554521
2856
- DHD_GENERAL_LOCK(dhd, flags);
2857
-
2858
- /* Claim space for exactly 'count' no of messages, for mitigation purpose */
2859
- msg_start = (void *)
2860
- dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE);
2861
-
2862
- DHD_GENERAL_UNLOCK(dhd, flags);
2863
-
2864
- if (msg_start == NULL) {
2865
- DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
2866
- return -1;
4522
+ /* allocate a local buffer to store pkt buffer va, pa and length */
4523
+ lcl_buf_size = (sizeof(void *) + sizeof(dmaaddr_t) + sizeof(uint32)) *
4524
+ RX_BUF_BURST;
4525
+ lcl_buf = MALLOC(dhd->osh, lcl_buf_size);
4526
+ if (!lcl_buf) {
4527
+ DHD_ERROR(("%s: local scratch buffer allocation failed\n", __FUNCTION__));
4528
+ return 0;
28674529 }
2868
- /* if msg_start != NULL, we should have alloced space for atleast 1 item */
2869
- ASSERT(alloced > 0);
4530
+ pktbuf = lcl_buf;
4531
+ pktbuf_pa = (dmaaddr_t *)((uint8 *)pktbuf + sizeof(void *) * RX_BUF_BURST);
4532
+ pktlen = (uint32 *)((uint8 *)pktbuf_pa + sizeof(dmaaddr_t) * RX_BUF_BURST);
28704533
2871
- rxbuf_post_tmp = (uint8*)msg_start;
2872
-
2873
- /* loop through each allocated message in the rxbuf post msgbuf_ring */
2874
- for (i = 0; i < alloced; i++) {
2875
- rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp;
2876
- /* Create a rx buffer */
4534
+ for (i = 0; i < count; i++) {
28774535 if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
28784536 DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__));
28794537 dhd->rx_pktgetfail++;
28804538 break;
28814539 }
28824540
2883
- pktlen = PKTLEN(dhd->osh, p);
4541
+ pktlen[i] = PKTLEN(dhd->osh, p);
28844542 if (SECURE_DMA_ENAB(dhd->osh)) {
2885
- DHD_GENERAL_LOCK(dhd, flags);
2886
- pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
4543
+ pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i],
28874544 DMA_RX, p, 0, ring->dma_buf.secdma, 0);
2888
- DHD_GENERAL_UNLOCK(dhd, flags);
28894545 }
28904546 #ifndef BCM_SECURE_DMA
2891
- else {
2892
- pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
2893
- }
2894
-#endif /* BCM_SECURE_DMA */
4547
+ else
4548
+ pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i], DMA_RX, p, 0);
4549
+#endif /* #ifndef BCM_SECURE_DMA */
28954550
28964551 if (PHYSADDRISZERO(pa)) {
2897
- if (SECURE_DMA_ENAB(dhd->osh)) {
2898
- DHD_GENERAL_LOCK(dhd, flags);
2899
- SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
2900
- ring->dma_buf.secdma, 0);
2901
- DHD_GENERAL_UNLOCK(dhd, flags);
2902
- } else {
2903
- DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, p, DHD_DMAH_NULL);
2904
- }
2905
-
29064552 PKTFREE(dhd->osh, p, FALSE);
29074553 DHD_ERROR(("Invalid phyaddr 0\n"));
29084554 ASSERT(0);
29094555 break;
29104556 }
4557
+#ifdef DMAMAP_STATS
4558
+ dhd->dma_stats.rxdata++;
4559
+ dhd->dma_stats.rxdata_sz += pktlen[i];
4560
+#endif /* DMAMAP_STATS */
29114561
29124562 PKTPULL(dhd->osh, p, prot->rx_metadata_offset);
2913
- pktlen = PKTLEN(dhd->osh, p);
4563
+ pktlen[i] = PKTLEN(dhd->osh, p);
4564
+ pktbuf[i] = p;
4565
+ pktbuf_pa[i] = pa;
4566
+ }
29144567
2915
- /* Common msg header */
2916
- rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST;
2917
- rxbuf_post->cmn_hdr.if_id = 0;
2918
- rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
2919
- ring->seqnum++;
4568
+ /* only post what we have */
4569
+ count = i;
4570
+
4571
+ /* grab the ring lock to allocate pktid and post on ring */
4572
+ DHD_RING_LOCK(ring->ring_lock, flags);
4573
+
4574
+ /* Claim space for exactly 'count' no of messages, for mitigation purpose */
4575
+ msg_start = (void *)
4576
+ dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE);
4577
+ if (msg_start == NULL) {
4578
+ DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
4579
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
4580
+ goto cleanup;
4581
+ }
4582
+ /* if msg_start != NULL, we should have alloced space for atleast 1 item */
4583
+ ASSERT(alloced > 0);
4584
+
4585
+ rxbuf_post_tmp = (uint8*)msg_start;
4586
+
4587
+ for (i = 0; i < alloced; i++) {
4588
+ rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp;
4589
+ p = pktbuf[i];
4590
+ pa = pktbuf_pa[i];
29204591
29214592 #if defined(DHD_LB_RXC)
29224593 if (use_rsv_pktid == TRUE) {
29234594 bcm_workq_t *workq = &prot->rx_compl_cons;
29244595 int elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
4596
+
29254597 if (elem_ix == BCM_RING_EMPTY) {
2926
- DHD_ERROR(("%s rx_compl_cons ring is empty\n", __FUNCTION__));
4598
+ DHD_INFO(("%s rx_compl_cons ring is empty\n", __FUNCTION__));
29274599 pktid = DHD_PKTID_INVALID;
29284600 goto alloc_pkt_id;
29294601 } else {
....@@ -2931,49 +4603,36 @@
29314603 pktid = *elem;
29324604 }
29334605
4606
+ rxbuf_post->cmn_hdr.request_id = htol32(pktid);
4607
+
29344608 /* Now populate the previous locker with valid information */
29354609 if (pktid != DHD_PKTID_INVALID) {
2936
- rxbuf_post->cmn_hdr.request_id = htol32(pktid);
2937
- DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_map_handle, p, pktid,
2938
- pa, pktlen, DMA_RX, NULL, ring->dma_buf.secdma,
4610
+ DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_rx_map,
4611
+ p, pktid, pa, pktlen[i], DMA_RX, NULL, NULL,
29394612 PKTTYPE_DATA_RX);
29404613 }
29414614 } else
2942
-#endif /* DHD_LB_RXC */
4615
+#endif /* ! DHD_LB_RXC */
29434616 {
29444617 #if defined(DHD_LB_RXC)
29454618 alloc_pkt_id:
2946
-#endif
4619
+#endif /* DHD_LB_RXC */
4620
+ pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_rx_map, p, pa,
4621
+ pktlen[i], DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX);
29474622 #if defined(DHD_PCIE_PKTID)
2948
- /* get the lock before calling DHD_NATIVE_TO_PKTID */
2949
- DHD_GENERAL_LOCK(dhd, flags);
2950
-#endif
2951
- pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_map_handle, p, pa,
2952
- pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX);
2953
-
2954
-#if defined(DHD_PCIE_PKTID)
2955
- /* free lock */
2956
- DHD_GENERAL_UNLOCK(dhd, flags);
2957
-
29584623 if (pktid == DHD_PKTID_INVALID) {
2959
-
2960
- if (SECURE_DMA_ENAB(dhd->osh)) {
2961
- DHD_GENERAL_LOCK(dhd, flags);
2962
- SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
2963
- ring->dma_buf.secdma, 0);
2964
- DHD_GENERAL_UNLOCK(dhd, flags);
2965
- } else {
2966
- DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, p, DHD_DMAH_NULL);
2967
- }
2968
-
2969
- PKTFREE(dhd->osh, p, FALSE);
2970
- DHD_ERROR(("Pktid pool depleted.\n"));
29714624 break;
29724625 }
29734626 #endif /* DHD_PCIE_PKTID */
29744627 }
29754628
2976
- rxbuf_post->data_buf_len = htol16((uint16)pktlen);
4629
+ /* Common msg header */
4630
+ rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST;
4631
+ rxbuf_post->cmn_hdr.if_id = 0;
4632
+ rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
4633
+ rxbuf_post->cmn_hdr.flags = ring->current_phase;
4634
+ ring->seqnum++;
4635
+ rxbuf_post->data_buf_len = htol16((uint16)pktlen[i]);
29774636 rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
29784637 rxbuf_post->data_buf_addr.low_addr =
29794638 htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset);
....@@ -2988,33 +4647,235 @@
29884647 rxbuf_post->metadata_buf_addr.low_addr = 0;
29894648 }
29904649
2991
-#if defined(DHD_PKTID_AUDIT_RING)
2992
- DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pktid, DHD_DUPLICATE_ALLOC);
4650
+#ifdef DHD_PKTID_AUDIT_RING
4651
+ DHD_PKTID_AUDIT(dhd, prot->pktid_rx_map, pktid, DHD_DUPLICATE_ALLOC);
29934652 #endif /* DHD_PKTID_AUDIT_RING */
29944653
29954654 rxbuf_post->cmn_hdr.request_id = htol32(pktid);
29964655
29974656 /* Move rxbuf_post_tmp to next item */
29984657 rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len;
4658
+
4659
+#ifdef DHD_LBUF_AUDIT
4660
+ PKTAUDIT(dhd->osh, p);
4661
+#endif // endif
29994662 }
30004663
30014664 if (i < alloced) {
3002
- if (ring->wr < (alloced - i)) {
4665
+ if (ring->wr < (alloced - i))
30034666 ring->wr = ring->max_items - (alloced - i);
3004
- } else {
4667
+ else
30054668 ring->wr -= (alloced - i);
4669
+
4670
+ if (ring->wr == 0) {
4671
+ DHD_INFO(("%s: flipping the phase now\n", ring->name));
4672
+ ring->current_phase = ring->current_phase ?
4673
+ 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
30064674 }
30074675
30084676 alloced = i;
30094677 }
30104678
3011
- /* Update ring's WR index and ring doorbell to dongle */
4679
+ /* update ring's WR index and ring doorbell to dongle */
30124680 if (alloced > 0) {
30134681 dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
30144682 }
30154683
4684
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
4685
+
4686
+cleanup:
4687
+ for (i = alloced; i < count; i++) {
4688
+ p = pktbuf[i];
4689
+ pa = pktbuf_pa[i];
4690
+
4691
+ if (SECURE_DMA_ENAB(dhd->osh))
4692
+ SECURE_DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0,
4693
+ DHD_DMAH_NULL, ring->dma_buf.secdma, 0);
4694
+ else
4695
+ DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0, DHD_DMAH_NULL);
4696
+ PKTFREE(dhd->osh, p, FALSE);
4697
+ }
4698
+
4699
+ MFREE(dhd->osh, lcl_buf, lcl_buf_size);
4700
+
30164701 return alloced;
3017
-} /* dhd_prot_rxbuf_post */
4702
+} /* dhd_prot_rxbufpost */
4703
+
4704
+static int
4705
+dhd_prot_infobufpost(dhd_pub_t *dhd, msgbuf_ring_t *ring)
4706
+{
4707
+ unsigned long flags;
4708
+ uint32 pktid;
4709
+ dhd_prot_t *prot = dhd->prot;
4710
+ uint16 alloced = 0;
4711
+ uint16 pktsz = DHD_INFOBUF_RX_BUFPOST_PKTSZ;
4712
+ uint32 pktlen;
4713
+ info_buf_post_msg_t *infobuf_post;
4714
+ uint8 *infobuf_post_tmp;
4715
+ void *p;
4716
+ void* msg_start;
4717
+ uint8 i = 0;
4718
+ dmaaddr_t pa;
4719
+ int16 count = 0;
4720
+
4721
+ if (ring == NULL)
4722
+ return 0;
4723
+
4724
+ if (ring->inited != TRUE)
4725
+ return 0;
4726
+ if (ring == dhd->prot->h2dring_info_subn) {
4727
+ if (prot->max_infobufpost == 0)
4728
+ return 0;
4729
+
4730
+ count = prot->max_infobufpost - prot->infobufpost;
4731
+ }
4732
+ else {
4733
+ DHD_ERROR(("Unknown ring\n"));
4734
+ return 0;
4735
+ }
4736
+
4737
+ if (count <= 0) {
4738
+ DHD_INFO(("%s: Cannot post more than max info resp buffers\n",
4739
+ __FUNCTION__));
4740
+ return 0;
4741
+ }
4742
+
4743
+ /* grab the ring lock to allocate pktid and post on ring */
4744
+ DHD_RING_LOCK(ring->ring_lock, flags);
4745
+
4746
+ /* Claim space for exactly 'count' no of messages, for mitigation purpose */
4747
+ msg_start = (void *) dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, FALSE);
4748
+
4749
+ if (msg_start == NULL) {
4750
+ DHD_INFO(("%s:%d: infobufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
4751
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
4752
+ return -1;
4753
+ }
4754
+
4755
+ /* if msg_start != NULL, we should have alloced space for atleast 1 item */
4756
+ ASSERT(alloced > 0);
4757
+
4758
+ infobuf_post_tmp = (uint8*) msg_start;
4759
+
4760
+ /* loop through each allocated message in the host ring */
4761
+ for (i = 0; i < alloced; i++) {
4762
+ infobuf_post = (info_buf_post_msg_t *) infobuf_post_tmp;
4763
+ /* Create a rx buffer */
4764
+#ifdef DHD_USE_STATIC_CTRLBUF
4765
+ p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
4766
+#else
4767
+ p = PKTGET(dhd->osh, pktsz, FALSE);
4768
+#endif /* DHD_USE_STATIC_CTRLBUF */
4769
+ if (p == NULL) {
4770
+ DHD_ERROR(("%s:%d: PKTGET for infobuf failed\n", __FUNCTION__, __LINE__));
4771
+ dhd->rx_pktgetfail++;
4772
+ break;
4773
+ }
4774
+ pktlen = PKTLEN(dhd->osh, p);
4775
+ if (SECURE_DMA_ENAB(dhd->osh)) {
4776
+ pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
4777
+ DMA_RX, p, 0, ring->dma_buf.secdma, 0);
4778
+ }
4779
+#ifndef BCM_SECURE_DMA
4780
+ else
4781
+ pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
4782
+#endif /* #ifndef BCM_SECURE_DMA */
4783
+ if (PHYSADDRISZERO(pa)) {
4784
+ if (SECURE_DMA_ENAB(dhd->osh)) {
4785
+ SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
4786
+ ring->dma_buf.secdma, 0);
4787
+ }
4788
+ else
4789
+ DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
4790
+#ifdef DHD_USE_STATIC_CTRLBUF
4791
+ PKTFREE_STATIC(dhd->osh, p, FALSE);
4792
+#else
4793
+ PKTFREE(dhd->osh, p, FALSE);
4794
+#endif /* DHD_USE_STATIC_CTRLBUF */
4795
+ DHD_ERROR(("Invalid phyaddr 0\n"));
4796
+ ASSERT(0);
4797
+ break;
4798
+ }
4799
+#ifdef DMAMAP_STATS
4800
+ dhd->dma_stats.info_rx++;
4801
+ dhd->dma_stats.info_rx_sz += pktlen;
4802
+#endif /* DMAMAP_STATS */
4803
+ pktlen = PKTLEN(dhd->osh, p);
4804
+
4805
+ /* Common msg header */
4806
+ infobuf_post->cmn_hdr.msg_type = MSG_TYPE_INFO_BUF_POST;
4807
+ infobuf_post->cmn_hdr.if_id = 0;
4808
+ infobuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
4809
+ infobuf_post->cmn_hdr.flags = ring->current_phase;
4810
+ ring->seqnum++;
4811
+
4812
+ pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_ctrl_map, p, pa,
4813
+ pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_INFO_RX);
4814
+
4815
+#if defined(DHD_PCIE_PKTID)
4816
+ if (pktid == DHD_PKTID_INVALID) {
4817
+ if (SECURE_DMA_ENAB(dhd->osh)) {
4818
+ SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0,
4819
+ ring->dma_buf.secdma, 0);
4820
+ } else
4821
+ DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0);
4822
+
4823
+#ifdef DHD_USE_STATIC_CTRLBUF
4824
+ PKTFREE_STATIC(dhd->osh, p, FALSE);
4825
+#else
4826
+ PKTFREE(dhd->osh, p, FALSE);
4827
+#endif /* DHD_USE_STATIC_CTRLBUF */
4828
+ DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
4829
+ break;
4830
+ }
4831
+#endif /* DHD_PCIE_PKTID */
4832
+
4833
+ infobuf_post->host_buf_len = htol16((uint16)pktlen);
4834
+ infobuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
4835
+ infobuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
4836
+
4837
+#ifdef DHD_PKTID_AUDIT_RING
4838
+ DHD_PKTID_AUDIT(dhd, prot->pktid_ctrl_map, pktid, DHD_DUPLICATE_ALLOC);
4839
+#endif /* DHD_PKTID_AUDIT_RING */
4840
+
4841
+ DHD_INFO(("ID %d, low_addr 0x%08x, high_addr 0x%08x\n",
4842
+ infobuf_post->cmn_hdr.request_id, infobuf_post->host_buf_addr.low_addr,
4843
+ infobuf_post->host_buf_addr.high_addr));
4844
+
4845
+ infobuf_post->cmn_hdr.request_id = htol32(pktid);
4846
+ /* Move rxbuf_post_tmp to next item */
4847
+ infobuf_post_tmp = infobuf_post_tmp + ring->item_len;
4848
+#ifdef DHD_LBUF_AUDIT
4849
+ PKTAUDIT(dhd->osh, p);
4850
+#endif // endif
4851
+ }
4852
+
4853
+ if (i < alloced) {
4854
+ if (ring->wr < (alloced - i))
4855
+ ring->wr = ring->max_items - (alloced - i);
4856
+ else
4857
+ ring->wr -= (alloced - i);
4858
+
4859
+ alloced = i;
4860
+ if (alloced && ring->wr == 0) {
4861
+ DHD_INFO(("%s: flipping the phase now\n", ring->name));
4862
+ ring->current_phase = ring->current_phase ?
4863
+ 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4864
+ }
4865
+ }
4866
+
4867
+ /* Update the write pointer in TCM & ring bell */
4868
+ if (alloced > 0) {
4869
+ if (ring == dhd->prot->h2dring_info_subn) {
4870
+ prot->infobufpost += alloced;
4871
+ }
4872
+ dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
4873
+ }
4874
+
4875
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
4876
+
4877
+ return alloced;
4878
+} /* dhd_prot_infobufpost */
30184879
30194880 #ifdef IOCTLRESP_USE_CONSTMEM
30204881 static int
....@@ -3041,10 +4902,6 @@
30414902 dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
30424903 retbuf->len = IOCT_RETBUF_SIZE;
30434904 retbuf->_alloced = retbuf->len + dma_pad;
3044
- /* JIRA:SWWLAN-70021 The pa value would be overwritten by the dongle.
3045
- * Need to reassign before free to pass the check in dhd_dma_buf_audit().
3046
- */
3047
- retbuf->pa = DMA_MAP(dhd->osh, retbuf->va, retbuf->len, DMA_RX, NULL, NULL);
30484905 }
30494906
30504907 dhd_dma_buf_free(dhd, retbuf);
....@@ -3053,7 +4910,7 @@
30534910 #endif /* IOCTLRESP_USE_CONSTMEM */
30544911
30554912 static int
3056
-dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, bool event_buf)
4913
+dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, uint8 msg_type)
30574914 {
30584915 void *p;
30594916 uint16 pktsz;
....@@ -3068,16 +4925,33 @@
30684925 uint32 pktid;
30694926 void *map_handle;
30704927 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
4928
+ bool non_ioctl_resp_buf = 0;
4929
+ dhd_pkttype_t buf_type;
30714930
30724931 if (dhd->busstate == DHD_BUS_DOWN) {
30734932 DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
30744933 return -1;
30754934 }
3076
-
30774935 memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
30784936
3079
- if (event_buf) {
3080
- /* Allocate packet for event buffer post */
4937
+ if (msg_type == MSG_TYPE_IOCTLRESP_BUF_POST)
4938
+ buf_type = PKTTYPE_IOCTL_RX;
4939
+ else if (msg_type == MSG_TYPE_EVENT_BUF_POST)
4940
+ buf_type = PKTTYPE_EVENT_RX;
4941
+ else if (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST)
4942
+ buf_type = PKTTYPE_TSBUF_RX;
4943
+ else {
4944
+ DHD_ERROR(("invalid message type to be posted to Ctrl ring %d\n", msg_type));
4945
+ return -1;
4946
+ }
4947
+
4948
+ if ((msg_type == MSG_TYPE_EVENT_BUF_POST) || (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST))
4949
+ non_ioctl_resp_buf = TRUE;
4950
+ else
4951
+ non_ioctl_resp_buf = FALSE;
4952
+
4953
+ if (non_ioctl_resp_buf) {
4954
+ /* Allocate packet for not ioctl resp buffer post */
30814955 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
30824956 } else {
30834957 /* Allocate packet for ctrl/ioctl buffer post */
....@@ -3085,7 +4959,7 @@
30854959 }
30864960
30874961 #ifdef IOCTLRESP_USE_CONSTMEM
3088
- if (!event_buf) {
4962
+ if (!non_ioctl_resp_buf) {
30894963 if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) {
30904964 DHD_ERROR(("Could not allocate IOCTL response buffer\n"));
30914965 return -1;
....@@ -3105,7 +4979,7 @@
31054979 #endif /* DHD_USE_STATIC_CTRLBUF */
31064980 if (p == NULL) {
31074981 DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n",
3108
- __FUNCTION__, __LINE__, event_buf ?
4982
+ __FUNCTION__, __LINE__, non_ioctl_resp_buf ?
31094983 "EVENT" : "IOCTL RESP"));
31104984 dhd->rx_pktgetfail++;
31114985 return -1;
....@@ -3114,10 +4988,8 @@
31144988 pktlen = PKTLEN(dhd->osh, p);
31154989
31164990 if (SECURE_DMA_ENAB(dhd->osh)) {
3117
- DHD_GENERAL_LOCK(dhd, flags);
31184991 pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
31194992 DMA_RX, p, 0, ring->dma_buf.secdma, 0);
3120
- DHD_GENERAL_UNLOCK(dhd, flags);
31214993 }
31224994 #ifndef BCM_SECURE_DMA
31234995 else
....@@ -3129,53 +5001,70 @@
31295001 ASSERT(0);
31305002 goto free_pkt_return;
31315003 }
5004
+
5005
+#ifdef DMAMAP_STATS
5006
+ switch (buf_type) {
5007
+#ifndef IOCTLRESP_USE_CONSTMEM
5008
+ case PKTTYPE_IOCTL_RX:
5009
+ dhd->dma_stats.ioctl_rx++;
5010
+ dhd->dma_stats.ioctl_rx_sz += pktlen;
5011
+ break;
5012
+#endif /* !IOCTLRESP_USE_CONSTMEM */
5013
+ case PKTTYPE_EVENT_RX:
5014
+ dhd->dma_stats.event_rx++;
5015
+ dhd->dma_stats.event_rx_sz += pktlen;
5016
+ break;
5017
+ case PKTTYPE_TSBUF_RX:
5018
+ dhd->dma_stats.tsbuf_rx++;
5019
+ dhd->dma_stats.tsbuf_rx_sz += pktlen;
5020
+ break;
5021
+ default:
5022
+ break;
5023
+ }
5024
+#endif /* DMAMAP_STATS */
5025
+
31325026 }
31335027
3134
- DHD_GENERAL_LOCK(dhd, flags);
5028
+ /* grab the ring lock to allocate pktid and post on ring */
5029
+ DHD_RING_LOCK(ring->ring_lock, flags);
31355030
31365031 rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)
31375032 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
31385033
31395034 if (rxbuf_post == NULL) {
3140
- DHD_GENERAL_UNLOCK(dhd, flags);
5035
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
31415036 DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n",
31425037 __FUNCTION__, __LINE__));
31435038
31445039 #ifdef IOCTLRESP_USE_CONSTMEM
3145
- if (event_buf)
5040
+ if (non_ioctl_resp_buf)
31465041 #endif /* IOCTLRESP_USE_CONSTMEM */
31475042 {
31485043 if (SECURE_DMA_ENAB(dhd->osh)) {
3149
- DHD_GENERAL_LOCK(dhd, flags);
31505044 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
31515045 ring->dma_buf.secdma, 0);
3152
- DHD_GENERAL_UNLOCK(dhd, flags);
31535046 } else {
3154
- DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, p, DHD_DMAH_NULL);
5047
+ DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
31555048 }
31565049 }
31575050 goto free_pkt_return;
31585051 }
31595052
31605053 /* CMN msg header */
3161
- if (event_buf) {
3162
- rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_EVENT_BUF_POST;
3163
- } else {
3164
- rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_IOCTLRESP_BUF_POST;
3165
- }
5054
+ rxbuf_post->cmn_hdr.msg_type = msg_type;
31665055
31675056 #ifdef IOCTLRESP_USE_CONSTMEM
3168
- if (!event_buf) {
5057
+ if (!non_ioctl_resp_buf) {
31695058 map_handle = dhd->prot->pktid_map_handle_ioctl;
3170
- pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen,
3171
- DMA_RX, dmah, ring->dma_buf.secdma, PKTTYPE_IOCTL_RX);
5059
+ pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen, DMA_RX, dmah,
5060
+ ring->dma_buf.secdma, buf_type);
31725061 } else
31735062 #endif /* IOCTLRESP_USE_CONSTMEM */
31745063 {
3175
- map_handle = dhd->prot->pktid_map_handle;
3176
- pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle,
5064
+ map_handle = dhd->prot->pktid_ctrl_map;
5065
+ pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle,
31775066 p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma,
3178
- event_buf ? PKTTYPE_EVENT_RX : PKTTYPE_IOCTL_RX);
5067
+ buf_type);
31795068 }
31805069
31815070 if (pktid == DHD_PKTID_INVALID) {
....@@ -3183,47 +5072,52 @@
31835072 ring->wr = ring->max_items - 1;
31845073 } else {
31855074 ring->wr--;
5075
+ if (ring->wr == 0) {
5076
+ ring->current_phase = ring->current_phase ? 0 :
5077
+ BCMPCIE_CMNHDR_PHASE_BIT_INIT;
5078
+ }
31865079 }
3187
- DHD_GENERAL_UNLOCK(dhd, flags);
3188
- DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, p, DHD_DMAH_NULL);
5080
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
5081
+ DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
5082
+ DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
31895083 goto free_pkt_return;
31905084 }
31915085
3192
-#if defined(DHD_PKTID_AUDIT_RING)
5086
+#ifdef DHD_PKTID_AUDIT_RING
31935087 DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC);
31945088 #endif /* DHD_PKTID_AUDIT_RING */
31955089
31965090 rxbuf_post->cmn_hdr.request_id = htol32(pktid);
31975091 rxbuf_post->cmn_hdr.if_id = 0;
3198
- rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
5092
+ rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
31995093 ring->seqnum++;
5094
+ rxbuf_post->cmn_hdr.flags = ring->current_phase;
32005095
32015096 #if defined(DHD_PCIE_PKTID)
32025097 if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
32035098 if (ring->wr == 0) {
32045099 ring->wr = ring->max_items - 1;
32055100 } else {
3206
- ring->wr--;
5101
+ if (ring->wr == 0) {
5102
+ ring->current_phase = ring->current_phase ? 0 :
5103
+ BCMPCIE_CMNHDR_PHASE_BIT_INIT;
5104
+ }
32075105 }
3208
- DHD_GENERAL_UNLOCK(dhd, flags);
5106
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
32095107 #ifdef IOCTLRESP_USE_CONSTMEM
3210
- if (event_buf)
5108
+ if (non_ioctl_resp_buf)
32115109 #endif /* IOCTLRESP_USE_CONSTMEM */
32125110 {
32135111 if (SECURE_DMA_ENAB(dhd->osh)) {
3214
- DHD_GENERAL_LOCK(dhd, flags);
32155112 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
32165113 ring->dma_buf.secdma, 0);
3217
- DHD_GENERAL_UNLOCK(dhd, flags);
3218
- } else {
3219
- DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, p, DHD_DMAH_NULL);
3220
- }
5114
+ } else
5115
+ DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
32215116 }
32225117 goto free_pkt_return;
32235118 }
32245119 #endif /* DHD_PCIE_PKTID */
32255120
3226
- rxbuf_post->cmn_hdr.flags = 0;
32275121 #ifndef IOCTLRESP_USE_CONSTMEM
32285122 rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p));
32295123 #else
....@@ -3232,34 +5126,39 @@
32325126 rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
32335127 rxbuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
32345128
5129
+#ifdef DHD_LBUF_AUDIT
5130
+ if (non_ioctl_resp_buf)
5131
+ PKTAUDIT(dhd->osh, p);
5132
+#endif // endif
5133
+
32355134 /* update ring's WR index and ring doorbell to dongle */
32365135 dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1);
3237
- DHD_GENERAL_UNLOCK(dhd, flags);
5136
+
5137
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
32385138
32395139 return 1;
32405140
32415141 free_pkt_return:
5142
+ if (!non_ioctl_resp_buf) {
32425143 #ifdef IOCTLRESP_USE_CONSTMEM
3243
- if (!event_buf) {
32445144 free_ioctl_return_buffer(dhd, &retbuf);
3245
- } else
5145
+#else
5146
+ dhd_prot_packet_free(dhd, p, buf_type, FALSE);
32465147 #endif /* IOCTLRESP_USE_CONSTMEM */
3247
- {
3248
- dhd_prot_packet_free(dhd, p,
3249
- event_buf ? PKTTYPE_EVENT_RX : PKTTYPE_IOCTL_RX,
3250
- FALSE);
5148
+ } else {
5149
+ dhd_prot_packet_free(dhd, p, buf_type, FALSE);
32515150 }
32525151
32535152 return -1;
32545153 } /* dhd_prot_rxbufpost_ctrl */
32555154
32565155 static uint16
3257
-dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post)
5156
+dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msg_type, uint32 max_to_post)
32585157 {
32595158 uint32 i = 0;
32605159 int32 ret_val;
32615160
3262
- DHD_INFO(("max to post %d, event %d \n", max_to_post, event_buf));
5161
+ DHD_INFO(("max to post %d, event %d \n", max_to_post, msg_type));
32635162
32645163 if (dhd->busstate == DHD_BUS_DOWN) {
32655164 DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
....@@ -3267,13 +5166,12 @@
32675166 }
32685167
32695168 while (i < max_to_post) {
3270
- ret_val = dhd_prot_rxbufpost_ctrl(dhd, event_buf);
3271
- if (ret_val < 0) {
5169
+ ret_val = dhd_prot_rxbufpost_ctrl(dhd, msg_type);
5170
+ if (ret_val < 0)
32725171 break;
3273
- }
32745172 i++;
32755173 }
3276
- DHD_INFO(("posted %d buffers to event_pool/ioctl_resp_pool %d\n", i, event_buf));
5174
+ DHD_INFO(("posted %d buffers of type %d\n", i, msg_type));
32775175 return (uint16)i;
32785176 }
32795177
....@@ -3291,7 +5189,7 @@
32915189 return;
32925190 }
32935191 prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
3294
- FALSE, max_to_post);
5192
+ MSG_TYPE_IOCTLRESP_BUF_POST, max_to_post);
32955193 }
32965194
32975195 static void
....@@ -3302,34 +5200,58 @@
33025200
33035201 max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted;
33045202 if (max_to_post <= 0) {
3305
- DHD_INFO(("%s: Cannot post more than max event buffers\n",
5203
+ DHD_ERROR(("%s: Cannot post more than max event buffers\n",
33065204 __FUNCTION__));
33075205 return;
33085206 }
33095207 prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
3310
- TRUE, max_to_post);
5208
+ MSG_TYPE_EVENT_BUF_POST, max_to_post);
33115209 }
33125210
3313
-/** called when DHD needs to check for 'receive complete' messages from the dongle */
3314
-bool BCMFASTPATH
3315
-dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound)
5211
+static int
5212
+dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *dhd)
33165213 {
5214
+ return 0;
5215
+}
5216
+
5217
+bool BCMFASTPATH
5218
+dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound)
5219
+{
5220
+ dhd_prot_t *prot = dhd->prot;
33175221 bool more = TRUE;
33185222 uint n = 0;
3319
- msgbuf_ring_t *ring = &dhd->prot->d2hring_rx_cpln;
5223
+ msgbuf_ring_t *ring = prot->d2hring_info_cpln;
5224
+ unsigned long flags;
5225
+
5226
+ if (ring == NULL)
5227
+ return FALSE;
5228
+ if (ring->inited != TRUE)
5229
+ return FALSE;
33205230
33215231 /* Process all the messages - DTOH direction */
33225232 while (!dhd_is_device_removed(dhd)) {
33235233 uint8 *msg_addr;
33245234 uint32 msg_len;
33255235
5236
+ if (dhd_query_bus_erros(dhd)) {
5237
+ more = FALSE;
5238
+ break;
5239
+ }
5240
+
33265241 if (dhd->hang_was_sent) {
33275242 more = FALSE;
33285243 break;
33295244 }
33305245
3331
- /* Get the address of the next message to be read from ring */
5246
+ if (dhd->smmu_fault_occurred) {
5247
+ more = FALSE;
5248
+ break;
5249
+ }
5250
+
5251
+ DHD_RING_LOCK(ring->ring_lock, flags);
5252
+ /* Get the message from ring */
33325253 msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
5254
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
33335255 if (msg_addr == NULL) {
33345256 more = FALSE;
33355257 break;
....@@ -3339,8 +5261,8 @@
33395261 OSL_PREFETCH(msg_addr);
33405262
33415263 if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
3342
- DHD_ERROR(("%s: process %s msg addr %p len %d\n",
3343
- __FUNCTION__, ring->name, msg_addr, msg_len));
5264
+ DHD_ERROR(("%s: Error at process rxpl msgbuf of len %d\n",
5265
+ __FUNCTION__, msg_len));
33445266 }
33455267
33465268 /* Update read pointer */
....@@ -3356,6 +5278,536 @@
33565278 return more;
33575279 }
33585280
5281
+#ifdef EWP_EDL
5282
+bool
5283
+dhd_prot_process_msgbuf_edl(dhd_pub_t *dhd)
5284
+{
5285
+ dhd_prot_t *prot = dhd->prot;
5286
+ msgbuf_ring_t *ring = prot->d2hring_edl;
5287
+ unsigned long flags = 0;
5288
+ uint32 items = 0;
5289
+ uint16 rd = 0;
5290
+ uint16 depth = 0;
5291
+
5292
+ if (ring == NULL)
5293
+ return FALSE;
5294
+ if (ring->inited != TRUE)
5295
+ return FALSE;
5296
+ if (ring->item_len == 0) {
5297
+ DHD_ERROR(("%s: Bad ring ! ringidx %d, item_len %d \n",
5298
+ __FUNCTION__, ring->idx, ring->item_len));
5299
+ return FALSE;
5300
+ }
5301
+
5302
+ if (dhd_query_bus_erros(dhd)) {
5303
+ return FALSE;
5304
+ }
5305
+
5306
+ if (dhd->hang_was_sent) {
5307
+ return FALSE;
5308
+ }
5309
+
5310
+ /* in this DPC context just check if wr index has moved
5311
+ * and schedule deferred context to actually process the
5312
+ * work items.
5313
+ */
5314
+ /* update the write index */
5315
+ DHD_RING_LOCK(ring->ring_lock, flags);
5316
+ if (dhd->dma_d2h_ring_upd_support) {
5317
+ /* DMAing write/read indices supported */
5318
+ ring->wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
5319
+ } else {
5320
+ dhd_bus_cmn_readshared(dhd->bus, &ring->wr, RING_WR_UPD, ring->idx);
5321
+ }
5322
+ rd = ring->rd;
5323
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
5324
+
5325
+ depth = ring->max_items;
5326
+ /* check for avail space, in number of ring items */
5327
+ items = READ_AVAIL_SPACE(ring->wr, rd, depth);
5328
+ if (items == 0) {
5329
+ /* no work items in edl ring */
5330
+ return FALSE;
5331
+ }
5332
+ if (items > ring->max_items) {
5333
+ DHD_ERROR(("\r\n======================= \r\n"));
5334
+ DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
5335
+ __FUNCTION__, ring, ring->name, ring->max_items, items));
5336
+ DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n",
5337
+ ring->wr, ring->rd, depth));
5338
+ DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
5339
+ dhd->busstate, dhd->bus->wait_for_d3_ack));
5340
+ DHD_ERROR(("\r\n======================= \r\n"));
5341
+#ifdef SUPPORT_LINKDOWN_RECOVERY
5342
+ if (ring->wr >= ring->max_items) {
5343
+ dhd->bus->read_shm_fail = TRUE;
5344
+ }
5345
+#else
5346
+#ifdef DHD_FW_COREDUMP
5347
+ if (dhd->memdump_enabled) {
5348
+ /* collect core dump */
5349
+ dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
5350
+ dhd_bus_mem_dump(dhd);
5351
+
5352
+ }
5353
+#endif /* DHD_FW_COREDUMP */
5354
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
5355
+ dhd_schedule_reset(dhd);
5356
+
5357
+ return FALSE;
5358
+ }
5359
+
5360
+ if (items > D2HRING_EDL_WATERMARK) {
5361
+ DHD_ERROR_RLMT(("%s: WARNING! EDL watermark hit, num items=%u;"
5362
+ " rd=%u; wr=%u; depth=%u;\n", __FUNCTION__, items,
5363
+ ring->rd, ring->wr, depth));
5364
+ }
5365
+
5366
+ dhd_schedule_logtrace(dhd->info);
5367
+
5368
+ return FALSE;
5369
+}
5370
+
5371
+/* This is called either from work queue context of 'event_log_dispatcher_work' or
5372
+* from the kthread context of dhd_logtrace_thread
5373
+*/
5374
+int
5375
+dhd_prot_process_edl_complete(dhd_pub_t *dhd, void *evt_decode_data)
5376
+{
5377
+ dhd_prot_t *prot = NULL;
5378
+ msgbuf_ring_t *ring = NULL;
5379
+ int err = 0;
5380
+ unsigned long flags = 0;
5381
+ cmn_msg_hdr_t *msg = NULL;
5382
+ uint8 *msg_addr = NULL;
5383
+ uint32 max_items_to_process = 0, n = 0;
5384
+ uint32 num_items = 0, new_items = 0;
5385
+ uint16 depth = 0;
5386
+ volatile uint16 wr = 0;
5387
+
5388
+ if (!dhd || !dhd->prot)
5389
+ return 0;
5390
+
5391
+ prot = dhd->prot;
5392
+ ring = prot->d2hring_edl;
5393
+ if (!ring || !evt_decode_data) {
5394
+ return 0;
5395
+ }
5396
+
5397
+ if (dhd->hang_was_sent) {
5398
+ return FALSE;
5399
+ }
5400
+
5401
+ DHD_RING_LOCK(ring->ring_lock, flags);
5402
+ ring->curr_rd = ring->rd;
5403
+ wr = ring->wr;
5404
+ depth = ring->max_items;
5405
+ /* check for avail space, in number of ring items
5406
+ * Note, that this will only give the # of items
5407
+ * from rd to wr if wr>=rd, or from rd to ring end
5408
+ * if wr < rd. So in the latter case strictly speaking
5409
+ * not all the items are read. But this is OK, because
5410
+ * these will be processed in the next doorbell as rd
5411
+ * would have wrapped around. Processing in the next
5412
+ * doorbell is acceptable since EDL only contains debug data
5413
+ */
5414
+ num_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
5415
+
5416
+ if (num_items == 0) {
5417
+ /* no work items in edl ring */
5418
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
5419
+ return 0;
5420
+ }
5421
+
5422
+ DHD_INFO(("%s: EDL work items [%u] available \n",
5423
+ __FUNCTION__, num_items));
5424
+
5425
+ /* if space is available, calculate address to be read */
5426
+ msg_addr = (char*)ring->dma_buf.va + (ring->rd * ring->item_len);
5427
+
5428
+ max_items_to_process = MIN(num_items, DHD_EVENT_LOGTRACE_BOUND);
5429
+
5430
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
5431
+
5432
+ /* Prefetch data to populate the cache */
5433
+ OSL_PREFETCH(msg_addr);
5434
+
5435
+ n = max_items_to_process;
5436
+ while (n > 0) {
5437
+ msg = (cmn_msg_hdr_t *)msg_addr;
5438
+ /* wait for DMA of work item to complete */
5439
+ if ((err = prot->d2h_edl_sync_cb(dhd, ring, msg)) != BCME_OK) {
5440
+ DHD_ERROR(("%s: Error waiting for DMA to cmpl in EDL "
5441
+ "ring; err = %d\n", __FUNCTION__, err));
5442
+ }
5443
+
5444
+ /*
5445
+ * Update the curr_rd to the current index in the ring, from where
5446
+ * the work item is fetched. This way if the fetched work item
5447
+ * fails in LIVELOCK, we can print the exact read index in the ring
5448
+ * that shows up the corrupted work item.
5449
+ */
5450
+ if ((ring->curr_rd + 1) >= ring->max_items) {
5451
+ ring->curr_rd = 0;
5452
+ } else {
5453
+ ring->curr_rd += 1;
5454
+ }
5455
+
5456
+ if (err != BCME_OK) {
5457
+ return 0;
5458
+ }
5459
+
5460
+ /* process the edl work item, i.e, the event log */
5461
+ err = dhd_event_logtrace_process_edl(dhd, msg_addr, evt_decode_data);
5462
+
5463
+ /* Dummy sleep so that scheduler kicks in after processing any logprints */
5464
+ OSL_SLEEP(0);
5465
+
5466
+ /* Prefetch data to populate the cache */
5467
+ OSL_PREFETCH(msg_addr + ring->item_len);
5468
+
5469
+ msg_addr += ring->item_len;
5470
+ --n;
5471
+ }
5472
+
5473
+ DHD_RING_LOCK(ring->ring_lock, flags);
5474
+ /* update host ring read pointer */
5475
+ if ((ring->rd + max_items_to_process) >= ring->max_items)
5476
+ ring->rd = 0;
5477
+ else
5478
+ ring->rd += max_items_to_process;
5479
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
5480
+
5481
+ /* Now after processing max_items_to_process update dongle rd index.
5482
+ * The TCM rd index is updated only if bus is not
5483
+ * in D3. Else, the rd index is updated from resume
5484
+ * context in - 'dhdpcie_bus_suspend'
5485
+ */
5486
+ DHD_GENERAL_LOCK(dhd, flags);
5487
+ if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
5488
+ DHD_INFO(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
5489
+ __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
5490
+ DHD_GENERAL_UNLOCK(dhd, flags);
5491
+ } else {
5492
+ DHD_GENERAL_UNLOCK(dhd, flags);
5493
+ DHD_EDL_RING_TCM_RD_UPDATE(dhd);
5494
+ }
5495
+
5496
+ /* if num_items > bound, then anyway we will reschedule and
5497
+ * this function runs again, so that if in between the DPC has
5498
+ * updated the wr index, then the updated wr is read. But if
5499
+ * num_items <= bound, and if DPC executes and updates the wr index
5500
+ * when the above while loop is running, then the updated 'wr' index
5501
+ * needs to be re-read from here, If we don't do so, then till
5502
+ * the next time this function is scheduled
5503
+ * the event logs will not be processed.
5504
+ */
5505
+ if (num_items <= DHD_EVENT_LOGTRACE_BOUND) {
5506
+ /* read the updated wr index if reqd. and update num_items */
5507
+ DHD_RING_LOCK(ring->ring_lock, flags);
5508
+ if (wr != (volatile uint16)ring->wr) {
5509
+ wr = (volatile uint16)ring->wr;
5510
+ new_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
5511
+ DHD_INFO(("%s: new items [%u] avail in edl\n",
5512
+ __FUNCTION__, new_items));
5513
+ num_items += new_items;
5514
+ }
5515
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
5516
+ }
5517
+
5518
+ /* if # of items processed is less than num_items, need to re-schedule
5519
+ * the deferred ctx
5520
+ */
5521
+ if (max_items_to_process < num_items) {
5522
+ DHD_INFO(("%s: EDL bound hit / new items found, "
5523
+ "items processed=%u; remaining=%u, "
5524
+ "resched deferred ctx...\n",
5525
+ __FUNCTION__, max_items_to_process,
5526
+ num_items - max_items_to_process));
5527
+ return (num_items - max_items_to_process);
5528
+ }
5529
+
5530
+ return 0;
5531
+
5532
+}
5533
+
5534
+void
5535
+dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t *dhd)
5536
+{
5537
+ dhd_prot_t *prot = NULL;
5538
+ unsigned long flags = 0;
5539
+ msgbuf_ring_t *ring = NULL;
5540
+
5541
+ if (!dhd)
5542
+ return;
5543
+
5544
+ prot = dhd->prot;
5545
+ if (!prot || !prot->d2hring_edl)
5546
+ return;
5547
+
5548
+ ring = prot->d2hring_edl;
5549
+ DHD_RING_LOCK(ring->ring_lock, flags);
5550
+ dhd_prot_upd_read_idx(dhd, ring);
5551
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
5552
+}
5553
+#endif /* EWP_EDL */
5554
+
5555
+/* called when DHD needs to check for 'receive complete' messages from the dongle */
5556
+bool BCMFASTPATH
5557
+dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound, int ringtype)
5558
+{
5559
+ bool more = FALSE;
5560
+ uint n = 0;
5561
+ dhd_prot_t *prot = dhd->prot;
5562
+ msgbuf_ring_t *ring;
5563
+ uint16 item_len;
5564
+ host_rxbuf_cmpl_t *msg = NULL;
5565
+ uint8 *msg_addr;
5566
+ uint32 msg_len;
5567
+ uint16 pkt_cnt, pkt_cnt_newidx;
5568
+ unsigned long flags;
5569
+ dmaaddr_t pa;
5570
+ uint32 len;
5571
+ void *dmah;
5572
+ void *secdma;
5573
+ int ifidx = 0, if_newidx = 0;
5574
+ void *pkt, *pktqhead = NULL, *prevpkt = NULL, *pkt_newidx, *nextpkt;
5575
+ uint32 pktid;
5576
+ int i;
5577
+ uint8 sync;
5578
+ ts_timestamp_t *ts;
5579
+
5580
+ BCM_REFERENCE(ts);
5581
+#ifdef DHD_HP2P
5582
+ if (ringtype == DHD_HP2P_RING && prot->d2hring_hp2p_rxcpl)
5583
+ ring = prot->d2hring_hp2p_rxcpl;
5584
+ else
5585
+#endif /* DHD_HP2P */
5586
+ ring = &prot->d2hring_rx_cpln;
5587
+ item_len = ring->item_len;
5588
+ while (1) {
5589
+ if (dhd_is_device_removed(dhd))
5590
+ break;
5591
+
5592
+ if (dhd_query_bus_erros(dhd))
5593
+ break;
5594
+
5595
+ if (dhd->hang_was_sent)
5596
+ break;
5597
+
5598
+ if (dhd->smmu_fault_occurred) {
5599
+ break;
5600
+ }
5601
+
5602
+ pkt_cnt = 0;
5603
+ pktqhead = pkt_newidx = NULL;
5604
+ pkt_cnt_newidx = 0;
5605
+
5606
+ DHD_RING_LOCK(ring->ring_lock, flags);
5607
+
5608
+ /* Get the address of the next message to be read from ring */
5609
+ msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
5610
+ if (msg_addr == NULL) {
5611
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
5612
+ break;
5613
+ }
5614
+
5615
+ while (msg_len > 0) {
5616
+ msg = (host_rxbuf_cmpl_t *)msg_addr;
5617
+
5618
+ /* Wait until DMA completes, then fetch msg_type */
5619
+ sync = prot->d2h_sync_cb(dhd, ring, &msg->cmn_hdr, item_len);
5620
+ /*
5621
+ * Update the curr_rd to the current index in the ring, from where
5622
+ * the work item is fetched. This way if the fetched work item
5623
+ * fails in LIVELOCK, we can print the exact read index in the ring
5624
+ * that shows up the corrupted work item.
5625
+ */
5626
+ if ((ring->curr_rd + 1) >= ring->max_items) {
5627
+ ring->curr_rd = 0;
5628
+ } else {
5629
+ ring->curr_rd += 1;
5630
+ }
5631
+
5632
+ if (!sync) {
5633
+ msg_len -= item_len;
5634
+ msg_addr += item_len;
5635
+ continue;
5636
+ }
5637
+
5638
+ pktid = ltoh32(msg->cmn_hdr.request_id);
5639
+
5640
+#ifdef DHD_PKTID_AUDIT_RING
5641
+ DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_rx_map, pktid,
5642
+ DHD_DUPLICATE_FREE, msg, D2HRING_RXCMPLT_ITEMSIZE);
5643
+#endif /* DHD_PKTID_AUDIT_RING */
5644
+
5645
+ pkt = DHD_PKTID_TO_NATIVE(dhd, prot->pktid_rx_map, pktid, pa,
5646
+ len, dmah, secdma, PKTTYPE_DATA_RX);
5647
+ if (!pkt) {
5648
+ msg_len -= item_len;
5649
+ msg_addr += item_len;
5650
+ continue;
5651
+ }
5652
+
5653
+ if (SECURE_DMA_ENAB(dhd->osh))
5654
+ SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0,
5655
+ dmah, secdma, 0);
5656
+ else
5657
+ DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
5658
+
5659
+#ifdef DMAMAP_STATS
5660
+ dhd->dma_stats.rxdata--;
5661
+ dhd->dma_stats.rxdata_sz -= len;
5662
+#endif /* DMAMAP_STATS */
5663
+ DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, "
5664
+ "pktdata %p, metalen %d\n",
5665
+ ltoh32(msg->cmn_hdr.request_id),
5666
+ ltoh16(msg->data_offset),
5667
+ ltoh16(msg->data_len), msg->cmn_hdr.if_id,
5668
+ msg->cmn_hdr.flags, PKTDATA(dhd->osh, pkt),
5669
+ ltoh16(msg->metadata_len)));
5670
+
5671
+ pkt_cnt++;
5672
+ msg_len -= item_len;
5673
+ msg_addr += item_len;
5674
+
5675
+#if DHD_DBG_SHOW_METADATA
5676
+ if (prot->metadata_dbg && prot->rx_metadata_offset &&
5677
+ msg->metadata_len) {
5678
+ uchar *ptr;
5679
+ ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset);
5680
+ /* header followed by data */
5681
+ bcm_print_bytes("rxmetadata", ptr, msg->metadata_len);
5682
+ dhd_prot_print_metadata(dhd, ptr, msg->metadata_len);
5683
+ }
5684
+#endif /* DHD_DBG_SHOW_METADATA */
5685
+
5686
+ /* data_offset from buf start */
5687
+ if (ltoh16(msg->data_offset)) {
5688
+ /* data offset given from dongle after split rx */
5689
+ PKTPULL(dhd->osh, pkt, ltoh16(msg->data_offset));
5690
+ }
5691
+ else if (prot->rx_dataoffset) {
5692
+ /* DMA RX offset updated through shared area */
5693
+ PKTPULL(dhd->osh, pkt, prot->rx_dataoffset);
5694
+ }
5695
+ /* Actual length of the packet */
5696
+ PKTSETLEN(dhd->osh, pkt, ltoh16(msg->data_len));
5697
+
5698
+#if defined(WL_MONITOR)
5699
+ if (dhd_monitor_enabled(dhd, ifidx)) {
5700
+ if (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) {
5701
+ dhd_rx_mon_pkt(dhd, msg, pkt, ifidx);
5702
+ continue;
5703
+ } else {
5704
+ DHD_ERROR(("Received non 802.11 packet, "
5705
+ "when monitor mode is enabled\n"));
5706
+ }
5707
+ }
5708
+#endif /* WL_MONITOR */
5709
+
5710
+ if (msg->flags & BCMPCIE_PKT_FLAGS_NO_FORWARD) {
5711
+ DHD_PKT_FLAGS_SET_NO_FWD(pkt);
5712
+ }
5713
+
5714
+ if (!pktqhead) {
5715
+ pktqhead = prevpkt = pkt;
5716
+ ifidx = msg->cmn_hdr.if_id;
5717
+ } else {
5718
+ if (ifidx != msg->cmn_hdr.if_id) {
5719
+ pkt_newidx = pkt;
5720
+ if_newidx = msg->cmn_hdr.if_id;
5721
+ pkt_cnt--;
5722
+ pkt_cnt_newidx = 1;
5723
+ break;
5724
+ } else {
5725
+ PKTSETNEXT(dhd->osh, prevpkt, pkt);
5726
+ prevpkt = pkt;
5727
+ }
5728
+ }
5729
+
5730
+#ifdef DHD_HP2P
5731
+ if (dhd->hp2p_capable && ring == prot->d2hring_hp2p_rxcpl) {
5732
+#ifdef DHD_HP2P_DEBUG
5733
+ bcm_print_bytes("Rxcpl", (uchar *)msg, sizeof(host_rxbuf_cmpl_t));
5734
+#endif /* DHD_HP2P_DEBUG */
5735
+ dhd_update_hp2p_rxstats(dhd, msg);
5736
+ }
5737
+#endif /* DHD_HP2P */
5738
+
5739
+#ifdef DHD_LBUF_AUDIT
5740
+ PKTAUDIT(dhd->osh, pkt);
5741
+#endif // endif
5742
+ }
5743
+
5744
+ /* roll back read pointer for unprocessed message */
5745
+ if (msg_len > 0) {
5746
+ if (ring->rd < msg_len / item_len)
5747
+ ring->rd = ring->max_items - msg_len / item_len;
5748
+ else
5749
+ ring->rd -= msg_len / item_len;
5750
+ }
5751
+
5752
+ /* Update read pointer */
5753
+ dhd_prot_upd_read_idx(dhd, ring);
5754
+
5755
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
5756
+
5757
+ pkt = pktqhead;
5758
+ for (i = 0; pkt && i < pkt_cnt; i++, pkt = nextpkt) {
5759
+ nextpkt = PKTNEXT(dhd->osh, pkt);
5760
+ PKTSETNEXT(dhd->osh, pkt, NULL);
5761
+#ifdef DHD_LB_RXP
5762
+ dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx);
5763
+#elif defined(DHD_RX_CHAINING)
5764
+ dhd_rxchain_frame(dhd, pkt, ifidx);
5765
+#else
5766
+ dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
5767
+#endif /* DHD_LB_RXP */
5768
+ }
5769
+
5770
+ if (pkt_newidx) {
5771
+#ifdef DHD_LB_RXP
5772
+ dhd_lb_rx_pkt_enqueue(dhd, pkt_newidx, if_newidx);
5773
+#elif defined(DHD_RX_CHAINING)
5774
+ dhd_rxchain_frame(dhd, pkt_newidx, if_newidx);
5775
+#else
5776
+ dhd_bus_rx_frame(dhd->bus, pkt_newidx, if_newidx, 1);
5777
+#endif /* DHD_LB_RXP */
5778
+ }
5779
+
5780
+ pkt_cnt += pkt_cnt_newidx;
5781
+
5782
+ /* Post another set of rxbufs to the device */
5783
+ dhd_prot_return_rxbuf(dhd, 0, pkt_cnt);
5784
+
5785
+#ifdef DHD_RX_CHAINING
5786
+ dhd_rxchain_commit(dhd);
5787
+#endif // endif
5788
+
5789
+ /* After batch processing, check RX bound */
5790
+ n += pkt_cnt;
5791
+ if (n >= bound) {
5792
+ more = TRUE;
5793
+ break;
5794
+ }
5795
+ }
5796
+
5797
+ /* Call lb_dispatch only if packets are queued */
5798
+ if (n &&
5799
+#ifdef WL_MONITOR
5800
+ !(dhd_monitor_enabled(dhd, ifidx)) &&
5801
+#endif /* WL_MONITOR */
5802
+ TRUE) {
5803
+ DHD_LB_DISPATCH_RX_COMPL(dhd);
5804
+ DHD_LB_DISPATCH_RX_PROCESS(dhd);
5805
+ }
5806
+
5807
+ return more;
5808
+
5809
+}
5810
+
33595811 /**
33605812 * Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring)
33615813 */
....@@ -3364,8 +5816,12 @@
33645816 {
33655817 msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring;
33665818
5819
+ if (ring == NULL) {
5820
+ DHD_ERROR(("%s: NULL txflowring. exiting...\n", __FUNCTION__));
5821
+ return;
5822
+ }
33675823 /* Update read pointer */
3368
- if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
5824
+ if (dhd->dma_d2h_ring_upd_support) {
33695825 ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
33705826 }
33715827
....@@ -3378,24 +5834,45 @@
33785834
33795835 /** called when DHD needs to check for 'transmit complete' messages from the dongle */
33805836 bool BCMFASTPATH
3381
-dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound)
5837
+dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound, int ringtype)
33825838 {
33835839 bool more = TRUE;
33845840 uint n = 0;
3385
- msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
5841
+ msgbuf_ring_t *ring;
5842
+ unsigned long flags;
5843
+
5844
+#ifdef DHD_HP2P
5845
+ if (ringtype == DHD_HP2P_RING && dhd->prot->d2hring_hp2p_txcpl)
5846
+ ring = dhd->prot->d2hring_hp2p_txcpl;
5847
+ else
5848
+#endif /* DHD_HP2P */
5849
+ ring = &dhd->prot->d2hring_tx_cpln;
33865850
33875851 /* Process all the messages - DTOH direction */
33885852 while (!dhd_is_device_removed(dhd)) {
33895853 uint8 *msg_addr;
33905854 uint32 msg_len;
33915855
5856
+ if (dhd_query_bus_erros(dhd)) {
5857
+ more = FALSE;
5858
+ break;
5859
+ }
5860
+
33925861 if (dhd->hang_was_sent) {
33935862 more = FALSE;
33945863 break;
33955864 }
33965865
5866
+ if (dhd->smmu_fault_occurred) {
5867
+ more = FALSE;
5868
+ break;
5869
+ }
5870
+
5871
+ DHD_RING_LOCK(ring->ring_lock, flags);
33975872 /* Get the address of the next message to be read from ring */
33985873 msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
5874
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
5875
+
33995876 if (msg_addr == NULL) {
34005877 more = FALSE;
34015878 break;
....@@ -3419,7 +5896,44 @@
34195896 }
34205897 }
34215898
5899
+ DHD_LB_DISPATCH_TX_COMPL(dhd);
5900
+
34225901 return more;
5902
+}
5903
+
5904
+int BCMFASTPATH
5905
+dhd_prot_process_trapbuf(dhd_pub_t *dhd)
5906
+{
5907
+ uint32 data;
5908
+ dhd_dma_buf_t *trap_addr = &dhd->prot->fw_trap_buf;
5909
+
5910
+ /* Interrupts can come in before this struct
5911
+ * has been initialized.
5912
+ */
5913
+ if (trap_addr->va == NULL) {
5914
+ DHD_ERROR(("%s: trap_addr->va is NULL\n", __FUNCTION__));
5915
+ return 0;
5916
+ }
5917
+
5918
+ OSL_CACHE_INV((void *)trap_addr->va, sizeof(uint32));
5919
+ data = *(uint32 *)(trap_addr->va);
5920
+
5921
+ if (data & D2H_DEV_FWHALT) {
5922
+ DHD_ERROR(("Firmware trapped and trap_data is 0x%04x\n", data));
5923
+
5924
+ if (data & D2H_DEV_EXT_TRAP_DATA)
5925
+ {
5926
+ if (dhd->extended_trap_data) {
5927
+ OSL_CACHE_INV((void *)trap_addr->va,
5928
+ BCMPCIE_EXT_TRAP_DATA_MAXLEN);
5929
+ memcpy(dhd->extended_trap_data, (uint32 *)trap_addr->va,
5930
+ BCMPCIE_EXT_TRAP_DATA_MAXLEN);
5931
+ }
5932
+ DHD_ERROR(("Extended trap data available\n"));
5933
+ }
5934
+ return data;
5935
+ }
5936
+ return 0;
34235937 }
34245938
34255939 /** called when DHD needs to check for 'ioctl complete' messages from the dongle */
....@@ -3428,18 +5942,30 @@
34285942 {
34295943 dhd_prot_t *prot = dhd->prot;
34305944 msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln;
5945
+ unsigned long flags;
34315946
34325947 /* Process all the messages - DTOH direction */
34335948 while (!dhd_is_device_removed(dhd)) {
34345949 uint8 *msg_addr;
34355950 uint32 msg_len;
34365951
5952
+ if (dhd_query_bus_erros(dhd)) {
5953
+ break;
5954
+ }
5955
+
34375956 if (dhd->hang_was_sent) {
34385957 break;
34395958 }
34405959
5960
+ if (dhd->smmu_fault_occurred) {
5961
+ break;
5962
+ }
5963
+
5964
+ DHD_RING_LOCK(ring->ring_lock, flags);
34415965 /* Get the address of the next message to be read from ring */
34425966 msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
5967
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
5968
+
34435969 if (msg_addr == NULL) {
34445970 break;
34455971 }
....@@ -3466,7 +5992,7 @@
34665992 static int BCMFASTPATH
34675993 dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len)
34685994 {
3469
- int buf_len = len;
5995
+ uint32 buf_len = len;
34705996 uint16 item_len;
34715997 uint8 msg_type;
34725998 cmn_msg_hdr_t *msg = NULL;
....@@ -3475,7 +6001,7 @@
34756001 ASSERT(ring);
34766002 item_len = ring->item_len;
34776003 if (item_len == 0) {
3478
- DHD_ERROR(("%s: ringidx %d item_len %d buf_len %d\n",
6004
+ DHD_ERROR(("%s: ringidx %d, item_len %d buf_len %d \n",
34796005 __FUNCTION__, ring->idx, item_len, buf_len));
34806006 return BCME_ERROR;
34816007 }
....@@ -3486,14 +6012,27 @@
34866012 goto done;
34876013 }
34886014
6015
+ if (dhd->smmu_fault_occurred) {
6016
+ ret = BCME_ERROR;
6017
+ goto done;
6018
+ }
6019
+
34896020 msg = (cmn_msg_hdr_t *)buf;
34906021
3491
-#if defined(PCIE_D2H_SYNC)
34926022 /* Wait until DMA completes, then fetch msg_type */
34936023 msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len);
3494
-#else
3495
- msg_type = msg->msg_type;
3496
-#endif /* !PCIE_D2H_SYNC */
6024
+
6025
+ /*
6026
+ * Update the curr_rd to the current index in the ring, from where
6027
+ * the work item is fetched. This way if the fetched work item
6028
+ * fails in LIVELOCK, we can print the exact read index in the ring
6029
+ * that shows up the corrupted work item.
6030
+ */
6031
+ if ((ring->curr_rd + 1) >= ring->max_items) {
6032
+ ring->curr_rd = 0;
6033
+ } else {
6034
+ ring->curr_rd += 1;
6035
+ }
34976036
34986037 /* Prefetch data to populate the cache */
34996038 OSL_PREFETCH(buf + item_len);
....@@ -3508,12 +6047,24 @@
35086047
35096048 ASSERT(msg_type < DHD_PROT_FUNCS);
35106049 if (msg_type >= DHD_PROT_FUNCS) {
3511
- DHD_ERROR(("%s: msg_type %d item_len %d buf_len %d\n",
6050
+ DHD_ERROR(("%s: msg_type %d, item_len %d buf_len %d\n",
35126051 __FUNCTION__, msg_type, item_len, buf_len));
35136052 ret = BCME_ERROR;
35146053 goto done;
35156054 }
35166055
6056
+ if (msg_type == MSG_TYPE_INFO_BUF_CMPLT) {
6057
+ if (ring == dhd->prot->d2hring_info_cpln) {
6058
+ if (!dhd->prot->infobufpost) {
6059
+ DHD_ERROR(("infobuf posted are zero,"
6060
+ "but there is a completion\n"));
6061
+ goto done;
6062
+ }
6063
+ dhd->prot->infobufpost--;
6064
+ dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
6065
+ dhd_prot_process_infobuf_complete(dhd, buf);
6066
+ }
6067
+ } else
35176068 if (table_lookup[msg_type]) {
35186069 table_lookup[msg_type](dhd, buf);
35196070 }
....@@ -3530,10 +6081,8 @@
35306081
35316082 #ifdef DHD_RX_CHAINING
35326083 dhd_rxchain_commit(dhd);
3533
-#endif
3534
-#if defined(DHD_LB)
3535
- dhd_lb_dispatch(dhd, ring->idx);
3536
-#endif
6084
+#endif // endif
6085
+
35376086 return ret;
35386087 } /* dhd_prot_process_msgtype */
35396088
....@@ -3547,10 +6096,76 @@
35476096 static void
35486097 dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg)
35496098 {
3550
- pcie_ring_status_t *ring_status = (pcie_ring_status_t *)msg;
6099
+ pcie_ring_status_t *ring_status = (pcie_ring_status_t *) msg;
6100
+ uint32 request_id = ltoh32(ring_status->cmn_hdr.request_id);
6101
+ uint16 status = ltoh16(ring_status->compl_hdr.status);
6102
+ uint16 ring_id = ltoh16(ring_status->compl_hdr.flow_ring_id);
6103
+
35516104 DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n",
3552
- ring_status->cmn_hdr.request_id, ring_status->compl_hdr.status,
3553
- ring_status->compl_hdr.flow_ring_id, ring_status->write_idx));
6105
+ request_id, status, ring_id, ltoh16(ring_status->write_idx)));
6106
+
6107
+ if (ltoh16(ring_status->compl_hdr.ring_id) != BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT)
6108
+ return;
6109
+ if (status == BCMPCIE_BAD_PHASE) {
6110
+ /* bad phase report from */
6111
+ DHD_ERROR(("Bad phase\n"));
6112
+ }
6113
+ if (status != BCMPCIE_BADOPTION)
6114
+ return;
6115
+
6116
+ if (request_id == DHD_H2D_DBGRING_REQ_PKTID) {
6117
+ if (dhd->prot->h2dring_info_subn != NULL) {
6118
+ if (dhd->prot->h2dring_info_subn->create_pending == TRUE) {
6119
+ DHD_ERROR(("H2D ring create failed for info ring\n"));
6120
+ dhd->prot->h2dring_info_subn->create_pending = FALSE;
6121
+ }
6122
+ else
6123
+ DHD_ERROR(("ring create ID for a ring, create not pending\n"));
6124
+ } else {
6125
+ DHD_ERROR(("%s info submit ring doesn't exist\n", __FUNCTION__));
6126
+ }
6127
+ }
6128
+ else if (request_id == DHD_D2H_DBGRING_REQ_PKTID) {
6129
+ if (dhd->prot->d2hring_info_cpln != NULL) {
6130
+ if (dhd->prot->d2hring_info_cpln->create_pending == TRUE) {
6131
+ DHD_ERROR(("D2H ring create failed for info ring\n"));
6132
+ dhd->prot->d2hring_info_cpln->create_pending = FALSE;
6133
+ }
6134
+ else
6135
+ DHD_ERROR(("ring create ID for info ring, create not pending\n"));
6136
+ } else {
6137
+ DHD_ERROR(("%s info cpl ring doesn't exist\n", __FUNCTION__));
6138
+ }
6139
+ }
6140
+#ifdef DHD_HP2P
6141
+ else if (request_id == DHD_D2H_HPPRING_TXREQ_PKTID) {
6142
+ if (dhd->prot->d2hring_hp2p_txcpl != NULL) {
6143
+ if (dhd->prot->d2hring_hp2p_txcpl->create_pending == TRUE) {
6144
+ DHD_ERROR(("H2D ring create failed for hp2p ring\n"));
6145
+ dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
6146
+ }
6147
+ else
6148
+ DHD_ERROR(("ring create ID for a ring, create not pending\n"));
6149
+ } else {
6150
+ DHD_ERROR(("%s hp2p txcmpl ring doesn't exist\n", __FUNCTION__));
6151
+ }
6152
+ }
6153
+ else if (request_id == DHD_D2H_HPPRING_RXREQ_PKTID) {
6154
+ if (dhd->prot->d2hring_hp2p_rxcpl != NULL) {
6155
+ if (dhd->prot->d2hring_hp2p_rxcpl->create_pending == TRUE) {
6156
+ DHD_ERROR(("D2H ring create failed for hp2p rxcmpl ring\n"));
6157
+ dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
6158
+ }
6159
+ else
6160
+ DHD_ERROR(("ring create ID for hp2p rxcmpl ring, not pending\n"));
6161
+ } else {
6162
+ DHD_ERROR(("%s hp2p rxcpl ring doesn't exist\n", __FUNCTION__));
6163
+ }
6164
+ }
6165
+#endif /* DHD_HP2P */
6166
+ else {
6167
+ DHD_ERROR(("don;t know how to pair with original request\n"));
6168
+ }
35546169 /* How do we track this to pair it with ??? */
35556170 return;
35566171 }
....@@ -3575,19 +6190,26 @@
35756190 static void
35766191 dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg)
35776192 {
3578
- uint32 pktid;
35796193 ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg;
35806194 unsigned long flags;
3581
-
3582
- pktid = ltoh32(ioct_ack->cmn_hdr.request_id);
6195
+#if defined(DHD_PKTID_AUDIT_RING)
6196
+ uint32 pktid = ltoh32(ioct_ack->cmn_hdr.request_id);
6197
+#endif // endif
35836198
35846199 #if defined(DHD_PKTID_AUDIT_RING)
3585
- /* Skip DHD_IOCTL_REQ_PKTID = 0xFFFE */
6200
+ /* Skip audit for ADHD_IOCTL_REQ_PKTID = 0xFFFE */
35866201 if (pktid != DHD_IOCTL_REQ_PKTID) {
3587
- DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid,
3588
- DHD_TEST_IS_ALLOC);
6202
+#ifndef IOCTLRESP_USE_CONSTMEM
6203
+ DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
6204
+ DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6205
+#else
6206
+ DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
6207
+ DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6208
+#endif /* !IOCTLRESP_USE_CONSTMEM */
35896209 }
3590
-#endif /* DHD_PKTID_AUDIT_RING */
6210
+#endif // endif
6211
+
6212
+ dhd->prot->ioctl_ack_time = OSL_LOCALTIME_NS();
35916213
35926214 DHD_GENERAL_LOCK(dhd, flags);
35936215 if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) &&
....@@ -3596,6 +6218,8 @@
35966218 } else {
35976219 DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n",
35986220 __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
6221
+ prhex("dhd_prot_ioctack_process:",
6222
+ (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
35996223 }
36006224 DHD_GENERAL_UNLOCK(dhd, flags);
36016225
....@@ -3618,28 +6242,46 @@
36186242 unsigned long flags;
36196243 dhd_dma_buf_t retbuf;
36206244
6245
+ /* Check for ioctl timeout induce flag, which is set by firing
6246
+ * dhd iovar to induce IOCTL timeout. If flag is set,
6247
+ * return from here, which results in to IOCTL timeout.
6248
+ */
6249
+ if (dhd->dhd_induce_error == DHD_INDUCE_IOCTL_TIMEOUT) {
6250
+ DHD_ERROR(("%s: Inducing resumed on timeout\n", __FUNCTION__));
6251
+ return;
6252
+ }
6253
+
36216254 memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
36226255
36236256 pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
36246257
36256258 #if defined(DHD_PKTID_AUDIT_RING)
36266259 #ifndef IOCTLRESP_USE_CONSTMEM
3627
- DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pkt_id, DHD_DUPLICATE_FREE);
6260
+ DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_ctrl_map, pkt_id,
6261
+ DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
36286262 #else
3629
- DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle_ioctl, pkt_id, DHD_DUPLICATE_FREE);
6263
+ DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_map_handle_ioctl, pkt_id,
6264
+ DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
36306265 #endif /* !IOCTLRESP_USE_CONSTMEM */
3631
-#endif /* DHD_PKTID_AUDIT_RING */
6266
+#endif // endif
36326267
36336268 DHD_GENERAL_LOCK(dhd, flags);
36346269 if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) ||
36356270 !(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
36366271 DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n",
36376272 __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
3638
- /* reset ioctl state */
3639
- prot->ioctl_state = 0;
6273
+ prhex("dhd_prot_ioctcmplt_process:",
6274
+ (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
36406275 DHD_GENERAL_UNLOCK(dhd, flags);
36416276 return;
36426277 }
6278
+
6279
+ dhd->prot->ioctl_cmplt_time = OSL_LOCALTIME_NS();
6280
+
6281
+ /* Clear Response pending bit */
6282
+ prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING;
6283
+ DHD_GENERAL_UNLOCK(dhd, flags);
6284
+
36436285 #ifndef IOCTLRESP_USE_CONSTMEM
36446286 pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE);
36456287 #else
....@@ -3647,21 +6289,34 @@
36476289 pkt = retbuf.va;
36486290 #endif /* !IOCTLRESP_USE_CONSTMEM */
36496291 if (!pkt) {
3650
- prot->ioctl_state = 0;
3651
- DHD_GENERAL_UNLOCK(dhd, flags);
36526292 DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__));
6293
+ prhex("dhd_prot_ioctcmplt_process:",
6294
+ (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
36536295 return;
36546296 }
3655
- DHD_GENERAL_UNLOCK(dhd, flags);
36566297
36576298 prot->ioctl_resplen = ltoh16(ioct_resp->resp_len);
36586299 prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status);
36596300 xt_id = ltoh16(ioct_resp->trans_id);
3660
- if (xt_id != prot->ioctl_trans_id) {
6301
+
6302
+ if (xt_id != prot->ioctl_trans_id || prot->curr_ioctl_cmd != ioct_resp->cmd) {
6303
+ DHD_ERROR(("%s: transaction id(%d %d) or cmd(%d %d) mismatch\n",
6304
+ __FUNCTION__, xt_id, prot->ioctl_trans_id,
6305
+ prot->curr_ioctl_cmd, ioct_resp->cmd));
6306
+ dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_ERROR);
6307
+ dhd_prot_debug_info_print(dhd);
6308
+#ifdef DHD_FW_COREDUMP
6309
+ if (dhd->memdump_enabled) {
6310
+ /* collect core dump */
6311
+ dhd->memdump_type = DUMP_TYPE_TRANS_ID_MISMATCH;
6312
+ dhd_bus_mem_dump(dhd);
6313
+ }
6314
+#else
36616315 ASSERT(0);
6316
+#endif /* DHD_FW_COREDUMP */
6317
+ dhd_schedule_reset(dhd);
36626318 goto exit;
36636319 }
3664
-
36656320 DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n",
36666321 pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen));
36676322
....@@ -3683,6 +6338,31 @@
36836338 #else
36846339 free_ioctl_return_buffer(dhd, &retbuf);
36856340 #endif /* !IOCTLRESP_USE_CONSTMEM */
6341
+
6342
+ /* Post another ioctl buf to the device */
6343
+ if (prot->cur_ioctlresp_bufs_posted > 0) {
6344
+ prot->cur_ioctlresp_bufs_posted--;
6345
+ }
6346
+
6347
+ dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
6348
+}
6349
+
6350
+int
6351
+dhd_prot_check_tx_resource(dhd_pub_t *dhd)
6352
+{
6353
+ return dhd->prot->no_tx_resource;
6354
+}
6355
+
6356
+void
6357
+dhd_prot_update_pktid_txq_stop_cnt(dhd_pub_t *dhd)
6358
+{
6359
+ dhd->prot->pktid_txq_stop_cnt++;
6360
+}
6361
+
6362
+void
6363
+dhd_prot_update_pktid_txq_start_cnt(dhd_pub_t *dhd)
6364
+{
6365
+ dhd->prot->pktid_txq_start_cnt++;
36866366 }
36876367
36886368 /** called on MSG_TYPE_TX_STATUS message received from dongle */
....@@ -3693,45 +6373,94 @@
36936373 host_txbuf_cmpl_t * txstatus;
36946374 unsigned long flags;
36956375 uint32 pktid;
3696
- void *pkt = NULL;
3697
- ulong pa;
6376
+ void *pkt;
6377
+ dmaaddr_t pa;
36986378 uint32 len;
36996379 void *dmah;
37006380 void *secdma;
6381
+ bool pkt_fate;
6382
+ msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
6383
+#if defined(TX_STATUS_LATENCY_STATS) || defined(DHD_HP2P)
6384
+ flow_info_t *flow_info;
6385
+ uint64 tx_status_latency;
6386
+#endif /* TX_STATUS_LATENCY_STATS || DHD_HP2P */
6387
+#if defined(TX_STATUS_LATENCY_STATS)
6388
+ flow_ring_node_t *flow_ring_node;
6389
+ uint16 flowid;
6390
+#endif // endif
6391
+ ts_timestamp_t *ts;
6392
+
6393
+ BCM_REFERENCE(ts);
6394
+ txstatus = (host_txbuf_cmpl_t *)msg;
6395
+#if defined(TX_STATUS_LATENCY_STATS)
6396
+ flowid = txstatus->compl_hdr.flow_ring_id;
6397
+ flow_ring_node = DHD_FLOW_RING(dhd, flowid);
6398
+#endif // endif
37016399
37026400 /* locks required to protect circular buffer accesses */
3703
- DHD_GENERAL_LOCK(dhd, flags);
3704
-
3705
- txstatus = (host_txbuf_cmpl_t *)msg;
6401
+ DHD_RING_LOCK(ring->ring_lock, flags);
37066402 pktid = ltoh32(txstatus->cmn_hdr.request_id);
6403
+ pkt_fate = TRUE;
37076404
37086405 #if defined(DHD_PKTID_AUDIT_RING)
3709
- DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid,
3710
- DHD_DUPLICATE_FREE);
3711
-#endif /* DHD_PKTID_AUDIT_RING */
6406
+ DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_tx_map, pktid,
6407
+ DHD_DUPLICATE_FREE, msg, D2HRING_TXCMPLT_ITEMSIZE);
6408
+#endif // endif
37126409
37136410 DHD_INFO(("txstatus for pktid 0x%04x\n", pktid));
3714
- if (prot->active_tx_count) {
3715
- prot->active_tx_count--;
3716
-
3717
- /* Release the Lock when no more tx packets are pending */
3718
- if (prot->active_tx_count == 0)
3719
- DHD_TXFL_WAKE_UNLOCK(dhd);
3720
-
3721
- } else {
6411
+ if (OSL_ATOMIC_DEC_RETURN(dhd->osh, &prot->active_tx_count) < 0) {
37226412 DHD_ERROR(("Extra packets are freed\n"));
37236413 }
3724
-
37256414 ASSERT(pktid != 0);
37266415
6416
+ pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
6417
+ pa, len, dmah, secdma, PKTTYPE_DATA_TX);
6418
+ if (!pkt) {
6419
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
6420
+ DHD_ERROR(("%s: received txstatus with NULL pkt\n", __FUNCTION__));
6421
+ prhex("dhd_prot_txstatus_process:", (uchar *)msg, D2HRING_TXCMPLT_ITEMSIZE);
6422
+#ifdef DHD_FW_COREDUMP
6423
+ if (dhd->memdump_enabled) {
6424
+ /* collect core dump */
6425
+ dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
6426
+ dhd_bus_mem_dump(dhd);
6427
+ }
6428
+#else
6429
+ ASSERT(0);
6430
+#endif /* DHD_FW_COREDUMP */
6431
+ return;
6432
+ }
6433
+
6434
+ if (DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map) == DHD_PKTID_MIN_AVAIL_COUNT) {
6435
+ dhd->prot->no_tx_resource = FALSE;
6436
+ dhd_bus_start_queue(dhd->bus);
6437
+ }
6438
+
6439
+ if (SECURE_DMA_ENAB(dhd->osh)) {
6440
+ int offset = 0;
6441
+ BCM_REFERENCE(offset);
6442
+
6443
+ if (dhd->prot->tx_metadata_offset)
6444
+ offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN;
6445
+ SECURE_DMA_UNMAP(dhd->osh, (uint) pa,
6446
+ (uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, dmah,
6447
+ secdma, offset);
6448
+ } else {
6449
+ DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
6450
+ }
6451
+
6452
+#ifdef TX_STATUS_LATENCY_STATS
6453
+ /* update the tx status latency for flowid */
6454
+ flow_info = &flow_ring_node->flow_info;
6455
+ tx_status_latency = OSL_SYSUPTIME_US() - DHD_PKT_GET_QTIME(pkt);
6456
+ flow_info->cum_tx_status_latency += tx_status_latency;
6457
+ flow_info->num_tx_status++;
6458
+#endif /* TX_STATUS_LATENCY_STATS */
37276459 #if defined(DHD_LB_TXC) && !defined(BCM_SECURE_DMA)
37286460 {
37296461 int elem_ix;
37306462 void **elem;
37316463 bcm_workq_t *workq;
3732
-
3733
- pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle,
3734
- pktid, pa, len, dmah, secdma, PKTTYPE_DATA_TX);
37356464
37366465 workq = &prot->tx_compl_prod;
37376466 /*
....@@ -3763,66 +6492,72 @@
37636492 }
37646493
37656494 DHD_INFO(("%s: tx_compl_prod pkt<%p> sync<%d>\n",
3766
- __FUNCTION__, pkt, prot->tx_compl_prod_sync));
6495
+ __FUNCTION__, pkt, prot->tx_compl_prod_sync));
37676496
3768
- DHD_GENERAL_UNLOCK(dhd, flags);
6497
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
37696498 return;
3770
- }
6499
+ }
37716500
37726501 workq_ring_full:
37736502
37746503 #endif /* !DHD_LB_TXC */
37756504
3776
- /*
3777
- * We can come here if no DHD_LB_TXC is enabled and in case where DHD_LB_TXC is
3778
- * defined but the tx_compl queue is full.
3779
- */
3780
- if (pkt == NULL) {
3781
- pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle,
3782
- pktid, pa, len, dmah, secdma, PKTTYPE_DATA_TX);
6505
+#ifdef DMAMAP_STATS
6506
+ dhd->dma_stats.txdata--;
6507
+ dhd->dma_stats.txdata_sz -= len;
6508
+#endif /* DMAMAP_STATS */
6509
+ pkt_fate = dhd_dbg_process_tx_status(dhd, pkt, pktid,
6510
+ ltoh16(txstatus->compl_hdr.status) & WLFC_CTL_PKTFLAG_MASK);
6511
+#ifdef DHD_PKT_LOGGING
6512
+ if (dhd->d11_tx_status) {
6513
+ uint16 status = ltoh16(txstatus->compl_hdr.status) &
6514
+ WLFC_CTL_PKTFLAG_MASK;
6515
+ uint32 pkthash = __dhd_dbg_pkt_hash((uintptr_t)pkt, pktid);
6516
+ DHD_PKTLOG_TXS(dhd, pkt, pktid, status);
6517
+ dhd_dump_pkt(dhd, ltoh32(txstatus->cmn_hdr.if_id),
6518
+ (uint8 *)PKTDATA(dhd->osh, pkt), len, TRUE,
6519
+ &pkthash, &status);
37836520 }
6521
+#endif /* DHD_PKT_LOGGING */
37846522
3785
- if (pkt) {
3786
- if (SECURE_DMA_ENAB(dhd->osh)) {
3787
- int offset = 0;
3788
- BCM_REFERENCE(offset);
3789
-
3790
- if (dhd->prot->tx_metadata_offset)
3791
- offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN;
3792
- SECURE_DMA_UNMAP(dhd->osh, (uint) pa,
3793
- (uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, dmah,
3794
- secdma, offset);
3795
- } else {
3796
- DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, pkt, dmah);
3797
- }
37986523 #if defined(BCMPCIE)
3799
- dhd_txcomplete(dhd, pkt, true);
3800
-#endif
6524
+ dhd_txcomplete(dhd, pkt, pkt_fate);
6525
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
6526
+ dhd_eap_txcomplete(dhd, pkt, pkt_fate, txstatus->cmn_hdr.if_id);
6527
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
6528
+#endif // endif
38016529
38026530 #if DHD_DBG_SHOW_METADATA
3803
- if (dhd->prot->metadata_dbg &&
3804
- dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
3805
- uchar *ptr;
3806
- /* The Ethernet header of TX frame was copied and removed.
3807
- * Here, move the data pointer forward by Ethernet header size.
3808
- */
3809
- PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN);
3810
- ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->tx_metadata_offset);
3811
- bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len);
3812
- dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len);
3813
- }
3814
-#endif /* DHD_DBG_SHOW_METADATA */
3815
-#ifndef CUSTOMER_HW_31_2
3816
- DHD_GENERAL_UNLOCK(dhd, flags);
3817
- PKTFREE(dhd->osh, pkt, TRUE);
3818
- DHD_GENERAL_LOCK(dhd, flags);
3819
-#endif /* CUSTOMER_HW_31_2 */
3820
- DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id,
3821
- txstatus->tx_status);
6531
+ if (dhd->prot->metadata_dbg &&
6532
+ dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
6533
+ uchar *ptr;
6534
+ /* The Ethernet header of TX frame was copied and removed.
6535
+ * Here, move the data pointer forward by Ethernet header size.
6536
+ */
6537
+ PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN);
6538
+ ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->tx_metadata_offset);
6539
+ bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len);
6540
+ dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len);
38226541 }
6542
+#endif /* DHD_DBG_SHOW_METADATA */
38236543
3824
- DHD_GENERAL_UNLOCK(dhd, flags);
6544
+#ifdef DHD_HP2P
6545
+ if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
6546
+#ifdef DHD_HP2P_DEBUG
6547
+ bcm_print_bytes("txcpl", (uint8 *)txstatus, sizeof(host_txbuf_cmpl_t));
6548
+#endif /* DHD_HP2P_DEBUG */
6549
+ dhd_update_hp2p_txstats(dhd, txstatus);
6550
+ }
6551
+#endif /* DHD_HP2P */
38256552
6553
+#ifdef DHD_LBUF_AUDIT
6554
+ PKTAUDIT(dhd->osh, pkt);
6555
+#endif // endif
6556
+
6557
+ DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id,
6558
+ txstatus->tx_status);
6559
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
6560
+ PKTFREE(dhd->osh, pkt, TRUE);
38266561 return;
38276562 } /* dhd_prot_txstatus_process */
38286563
....@@ -3835,7 +6570,6 @@
38356570 uint16 buflen;
38366571 int ifidx = 0;
38376572 void* pkt;
3838
- unsigned long flags;
38396573 dhd_prot_t *prot = dhd->prot;
38406574
38416575 /* Event complete header */
....@@ -3843,134 +6577,91 @@
38436577 bufid = ltoh32(evnt->cmn_hdr.request_id);
38446578
38456579 #if defined(DHD_PKTID_AUDIT_RING)
3846
- DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, bufid,
3847
- DHD_DUPLICATE_FREE);
3848
-#endif /* DHD_PKTID_AUDIT_RING */
6580
+ DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, bufid,
6581
+ DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6582
+#endif // endif
38496583
38506584 buflen = ltoh16(evnt->event_data_len);
38516585
38526586 ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr);
38536587
38546588 /* Post another rxbuf to the device */
3855
- if (prot->cur_event_bufs_posted) {
6589
+ if (prot->cur_event_bufs_posted)
38566590 prot->cur_event_bufs_posted--;
3857
- }
38586591 dhd_msgbuf_rxbuf_post_event_bufs(dhd);
38596592
3860
- /* locks required to protect pktid_map */
3861
- DHD_GENERAL_LOCK(dhd, flags);
38626593 pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE);
3863
- DHD_GENERAL_UNLOCK(dhd, flags);
38646594
38656595 if (!pkt) {
6596
+ DHD_ERROR(("%s: pkt is NULL for pktid %d\n", __FUNCTION__, bufid));
38666597 return;
38676598 }
38686599
38696600 /* DMA RX offset updated through shared area */
3870
- if (dhd->prot->rx_dataoffset) {
6601
+ if (dhd->prot->rx_dataoffset)
38716602 PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
3872
- }
38736603
38746604 PKTSETLEN(dhd->osh, pkt, buflen);
3875
-
6605
+#ifdef DHD_LBUF_AUDIT
6606
+ PKTAUDIT(dhd->osh, pkt);
6607
+#endif // endif
38766608 dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
38776609 }
38786610
3879
-extern bool dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx);
3880
-extern void dhd_rx_mon_pkt(dhd_pub_t *dhdp, void *pkt, int ifidx);
3881
-
3882
-/** called on MSG_TYPE_RX_CMPLT message received from dongle */
6611
+/** called on MSG_TYPE_INFO_BUF_CMPLT message received from dongle */
38836612 static void BCMFASTPATH
3884
-dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void *msg)
6613
+dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf)
38856614 {
3886
- host_rxbuf_cmpl_t *rxcmplt_h;
3887
- uint16 data_offset; /* offset at which data starts */
3888
- void *pkt;
3889
- unsigned long flags;
3890
- uint ifidx;
6615
+ info_buf_resp_t *resp;
38916616 uint32 pktid;
3892
-#if defined(DHD_LB_RXC)
3893
- const bool free_pktid = FALSE;
3894
-#else
3895
- const bool free_pktid = TRUE;
3896
-#endif /* DHD_LB_RXC */
6617
+ uint16 buflen;
6618
+ void * pkt;
38976619
3898
- /* RXCMPLT HDR */
3899
- rxcmplt_h = (host_rxbuf_cmpl_t *)msg;
6620
+ resp = (info_buf_resp_t *)buf;
6621
+ pktid = ltoh32(resp->cmn_hdr.request_id);
6622
+ buflen = ltoh16(resp->info_data_len);
39006623
3901
- /* offset from which data starts is populated in rxstatus0 */
3902
- data_offset = ltoh16(rxcmplt_h->data_offset);
3903
-
3904
- pktid = ltoh32(rxcmplt_h->cmn_hdr.request_id);
3905
-
3906
-#if defined(DHD_PKTID_AUDIT_RING)
3907
- DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid,
3908
- DHD_DUPLICATE_FREE);
6624
+#ifdef DHD_PKTID_AUDIT_RING
6625
+ DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
6626
+ DHD_DUPLICATE_FREE, buf, D2HRING_INFO_BUFCMPLT_ITEMSIZE);
39096627 #endif /* DHD_PKTID_AUDIT_RING */
39106628
3911
- DHD_GENERAL_LOCK(dhd, flags);
3912
- pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_DATA_RX, free_pktid);
3913
- DHD_GENERAL_UNLOCK(dhd, flags);
6629
+ DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n",
6630
+ pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum),
6631
+ dhd->prot->rx_dataoffset));
39146632
3915
- if (!pkt) {
3916
- return;
3917
- }
3918
-
3919
- /* Post another set of rxbufs to the device */
3920
- dhd_prot_return_rxbuf(dhd, pktid, 1);
3921
-
3922
- DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, pktdata %p, metalen %d\n",
3923
- ltoh32(rxcmplt_h->cmn_hdr.request_id), data_offset, ltoh16(rxcmplt_h->data_len),
3924
- rxcmplt_h->cmn_hdr.if_id, rxcmplt_h->cmn_hdr.flags, PKTDATA(dhd->osh, pkt),
3925
- ltoh16(rxcmplt_h->metadata_len)));
3926
-#if DHD_DBG_SHOW_METADATA
3927
- if (dhd->prot->metadata_dbg &&
3928
- dhd->prot->rx_metadata_offset && rxcmplt_h->metadata_len) {
3929
- uchar *ptr;
3930
- ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->rx_metadata_offset);
3931
- /* header followed by data */
3932
- bcm_print_bytes("rxmetadata", ptr, rxcmplt_h->metadata_len);
3933
- dhd_prot_print_metadata(dhd, ptr, rxcmplt_h->metadata_len);
3934
- }
3935
-#endif /* DHD_DBG_SHOW_METADATA */
3936
-
3937
- if (rxcmplt_h->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) {
3938
- DHD_INFO(("D11 frame rxed \n"));
3939
- }
3940
-
3941
- /* data_offset from buf start */
3942
- if (data_offset) {
3943
- /* data offset given from dongle after split rx */
3944
- PKTPULL(dhd->osh, pkt, data_offset); /* data offset */
3945
- } else {
3946
- /* DMA RX offset updated through shared area */
3947
- if (dhd->prot->rx_dataoffset) {
3948
- PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
6633
+ if (dhd->debug_buf_dest_support) {
6634
+ if (resp->dest < DEBUG_BUF_DEST_MAX) {
6635
+ dhd->debug_buf_dest_stat[resp->dest]++;
39496636 }
39506637 }
3951
- /* Actual length of the packet */
3952
- PKTSETLEN(dhd->osh, pkt, ltoh16(rxcmplt_h->data_len));
39536638
3954
- ifidx = rxcmplt_h->cmn_hdr.if_id;
3955
-
3956
- if (dhd_monitor_enabled(dhd, ifidx) &&
3957
- (rxcmplt_h->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11)) {
3958
- dhd_rx_mon_pkt(dhd, pkt, ifidx);
6639
+ pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE);
6640
+ if (!pkt)
39596641 return;
3960
- }
39616642
3962
-#if defined(DHD_LB_RXP)
3963
- dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx);
3964
-#else /* ! DHD_LB_RXP */
3965
-#ifdef DHD_RX_CHAINING
3966
- /* Chain the packets */
3967
- dhd_rxchain_frame(dhd, pkt, ifidx);
3968
-#else /* ! DHD_RX_CHAINING */
3969
- /* offset from which data starts is populated in rxstatus0 */
3970
- dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
3971
-#endif /* ! DHD_RX_CHAINING */
3972
-#endif /* ! DHD_LB_RXP */
3973
-} /* dhd_prot_rxcmplt_process */
6643
+ /* DMA RX offset updated through shared area */
6644
+ if (dhd->prot->rx_dataoffset)
6645
+ PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
6646
+
6647
+ PKTSETLEN(dhd->osh, pkt, buflen);
6648
+
6649
+#ifdef DHD_LBUF_AUDIT
6650
+ PKTAUDIT(dhd->osh, pkt);
6651
+#endif // endif
6652
+
6653
+ /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
6654
+ * special ifidx of -1. This is just internal to dhd to get the data to
6655
+ * dhd_linux.c:dhd_rx_frame() from here (dhd_prot_infobuf_cmplt_process).
6656
+ */
6657
+ dhd_bus_rx_frame(dhd->bus, pkt, DHD_DUMMY_INFO_IF /* ifidx HACK */, 1);
6658
+}
6659
+
6660
+/** called on MSG_TYPE_SNAPSHOT_CMPLT message received from dongle */
6661
+static void BCMFASTPATH
6662
+dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf)
6663
+{
6664
+}
39746665
39756666 /** Stop protocol: sync w/dongle state. */
39766667 void dhd_prot_stop(dhd_pub_t *dhd)
....@@ -3995,6 +6686,7 @@
39956686 return 0;
39966687 }
39976688
6689
+#define MAX_MTU_SZ (1600u)
39986690
39996691 #define PKTBUF pktbuf
40006692
....@@ -4019,26 +6711,41 @@
40196711 msgbuf_ring_t *ring;
40206712 flow_ring_table_t *flow_ring_table;
40216713 flow_ring_node_t *flow_ring_node;
6714
+#ifdef DHD_PKT_LOGGING
6715
+ uint32 pkthash;
6716
+#endif /* DHD_PKT_LOGGING */
40226717
40236718 if (dhd->flow_ring_table == NULL) {
6719
+ DHD_ERROR(("dhd flow_ring_table is NULL\n"));
40246720 return BCME_NORESOURCE;
40256721 }
6722
+#ifdef DHD_PCIE_PKTID
6723
+ if (!DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map)) {
6724
+ if (dhd->prot->pktid_depleted_cnt == DHD_PKTID_DEPLETED_MAX_COUNT) {
6725
+ dhd_bus_stop_queue(dhd->bus);
6726
+ dhd->prot->no_tx_resource = TRUE;
6727
+ }
6728
+ dhd->prot->pktid_depleted_cnt++;
6729
+ goto err_no_res;
6730
+ } else {
6731
+ dhd->prot->pktid_depleted_cnt = 0;
6732
+ }
6733
+#endif /* DHD_PCIE_PKTID */
40266734
40276735 flowid = DHD_PKT_GET_FLOWID(PKTBUF);
4028
-
40296736 flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
40306737 flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
40316738
40326739 ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
40336740
4034
-
4035
- DHD_GENERAL_LOCK(dhd, flags);
6741
+ DHD_RING_LOCK(ring->ring_lock, flags);
40366742
40376743 /* Create a unique 32-bit packet id */
4038
- pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_map_handle, PKTBUF);
6744
+ pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_tx_map,
6745
+ PKTBUF, PKTTYPE_DATA_TX);
40396746 #if defined(DHD_PCIE_PKTID)
40406747 if (pktid == DHD_PKTID_INVALID) {
4041
- DHD_ERROR(("Pktid pool depleted.\n"));
6748
+ DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
40426749 /*
40436750 * If we return error here, the caller would queue the packet
40446751 * again. So we'll just free the skb allocated in DMA Zone.
....@@ -4053,21 +6760,22 @@
40536760 txdesc = (host_txbuf_post_t *)
40546761 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
40556762 if (txdesc == NULL) {
4056
-#if defined(DHD_PCIE_PKTID)
4057
- void *dmah;
4058
- void *secdma;
4059
- /* Free up the PKTID. physaddr and pktlen will be garbage. */
4060
- DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, pktid,
4061
- pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK);
4062
-#endif /* DHD_PCIE_PKTID */
40636763 DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
4064
- __FUNCTION__, __LINE__, prot->active_tx_count));
4065
- goto err_no_res_pktfree;
6764
+ __FUNCTION__, __LINE__, OSL_ATOMIC_READ(dhd->osh, &prot->active_tx_count)));
6765
+ goto err_free_pktid;
40666766 }
40676767
40686768 /* Extract the data pointer and length information */
40696769 pktdata = PKTDATA(dhd->osh, PKTBUF);
40706770 pktlen = PKTLEN(dhd->osh, PKTBUF);
6771
+
6772
+ DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid);
6773
+#ifdef DHD_PKT_LOGGING
6774
+ DHD_PKTLOG_TX(dhd, PKTBUF, pktid);
6775
+ /* Dump TX packet */
6776
+ pkthash = __dhd_dbg_pkt_hash((uintptr_t)PKTBUF, pktid);
6777
+ dhd_dump_pkt(dhd, ifidx, pktdata, pktlen, TRUE, &pkthash, NULL);
6778
+#endif /* DHD_PKT_LOGGING */
40716779
40726780 /* Ethernet header: Copy before we cache flush packet using DMA_MAP */
40736781 bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
....@@ -4081,9 +6789,8 @@
40816789 int offset = 0;
40826790 BCM_REFERENCE(offset);
40836791
4084
- if (prot->tx_metadata_offset) {
6792
+ if (prot->tx_metadata_offset)
40856793 offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
4086
- }
40876794
40886795 pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen,
40896796 DMA_TX, PKTBUF, 0, ring->dma_buf.secdma, offset);
....@@ -4093,31 +6800,36 @@
40936800 pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0);
40946801 #endif /* #ifndef BCM_SECURE_DMA */
40956802
4096
- if ((PHYSADDRHI(pa) == 0) && (PHYSADDRLO(pa) == 0)) {
4097
- DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n"));
6803
+ if (PHYSADDRISZERO(pa)) {
6804
+ DHD_ERROR(("%s: Something really bad, unless 0 is "
6805
+ "a valid phyaddr for pa\n", __FUNCTION__));
40986806 ASSERT(0);
6807
+ goto err_rollback_idx;
40996808 }
41006809
6810
+#ifdef DMAMAP_STATS
6811
+ dhd->dma_stats.txdata++;
6812
+ dhd->dma_stats.txdata_sz += pktlen;
6813
+#endif /* DMAMAP_STATS */
41016814 /* No need to lock. Save the rest of the packet's metadata */
4102
- DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_map_handle, PKTBUF, pktid,
6815
+ DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_tx_map, PKTBUF, pktid,
41036816 pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX);
41046817
41056818 #ifdef TXP_FLUSH_NITEMS
4106
- if (ring->pend_items_count == 0) {
6819
+ if (ring->pend_items_count == 0)
41076820 ring->start_addr = (void *)txdesc;
4108
- }
41096821 ring->pend_items_count++;
4110
-#endif
6822
+#endif // endif
41116823
41126824 /* Form the Tx descriptor message buffer */
41136825
41146826 /* Common message hdr */
41156827 txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST;
41166828 txdesc->cmn_hdr.if_id = ifidx;
6829
+ txdesc->cmn_hdr.flags = ring->current_phase;
41176830
41186831 txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3;
41196832 prio = (uint8)PKTPRIO(PKTBUF);
4120
-
41216833
41226834 txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT;
41236835 txdesc->seg_cnt = 1;
....@@ -4131,10 +6843,9 @@
41316843
41326844 /* Handle Tx metadata */
41336845 headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF);
4134
- if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset)) {
6846
+ if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset))
41356847 DHD_ERROR(("No headroom for Metadata tx %d %d\n",
41366848 prot->tx_metadata_offset, headroom));
4137
- }
41386849
41396850 if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) {
41406851 DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset));
....@@ -4154,8 +6865,32 @@
41546865 #endif /* #ifndef BCM_SECURE_DMA */
41556866
41566867 if (PHYSADDRISZERO(meta_pa)) {
4157
- DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n"));
6868
+ /* Unmap the data pointer to a DMA-able address */
6869
+ if (SECURE_DMA_ENAB(dhd->osh)) {
6870
+ int offset = 0;
6871
+ BCM_REFERENCE(offset);
6872
+
6873
+ if (prot->tx_metadata_offset) {
6874
+ offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
6875
+ }
6876
+
6877
+ SECURE_DMA_UNMAP(dhd->osh, pa, pktlen,
6878
+ DMA_TX, 0, DHD_DMAH_NULL, ring->dma_buf.secdma, offset);
6879
+ }
6880
+#ifndef BCM_SECURE_DMA
6881
+ else {
6882
+ DMA_UNMAP(dhd->osh, pa, pktlen, DMA_TX, 0, DHD_DMAH_NULL);
6883
+ }
6884
+#endif /* #ifndef BCM_SECURE_DMA */
6885
+#ifdef TXP_FLUSH_NITEMS
6886
+ /* update pend_items_count */
6887
+ ring->pend_items_count--;
6888
+#endif /* TXP_FLUSH_NITEMS */
6889
+
6890
+ DHD_ERROR(("%s: Something really bad, unless 0 is "
6891
+ "a valid phyaddr for meta_pa\n", __FUNCTION__));
41586892 ASSERT(0);
6893
+ goto err_rollback_idx;
41596894 }
41606895
41616896 /* Adjust the data pointer back to original value */
....@@ -4165,14 +6900,21 @@
41656900 txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa));
41666901 txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa));
41676902 } else {
4168
- txdesc->metadata_buf_len = htol16(0);
4169
- txdesc->metadata_buf_addr.high_addr = 0;
4170
- txdesc->metadata_buf_addr.low_addr = 0;
6903
+#ifdef DHD_HP2P
6904
+ if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
6905
+ dhd_update_hp2p_txdesc(dhd, txdesc);
6906
+ } else
6907
+#endif /* DHD_HP2P */
6908
+ if (1)
6909
+ {
6910
+ txdesc->metadata_buf_len = htol16(0);
6911
+ txdesc->metadata_buf_addr.high_addr = 0;
6912
+ txdesc->metadata_buf_addr.low_addr = 0;
6913
+ }
41716914 }
41726915
4173
-#if defined(DHD_PKTID_AUDIT_RING)
4174
- DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pktid,
4175
- DHD_DUPLICATE_ALLOC);
6916
+#ifdef DHD_PKTID_AUDIT_RING
6917
+ DHD_PKTID_AUDIT(dhd, prot->pktid_tx_map, pktid, DHD_DUPLICATE_ALLOC);
41766918 #endif /* DHD_PKTID_AUDIT_RING */
41776919
41786920 txdesc->cmn_hdr.request_id = htol32(pktid);
....@@ -4180,57 +6922,101 @@
41806922 DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len,
41816923 txdesc->cmn_hdr.request_id));
41826924
6925
+#ifdef DHD_LBUF_AUDIT
6926
+ PKTAUDIT(dhd->osh, PKTBUF);
6927
+#endif // endif
6928
+
6929
+ if (pktlen > MAX_MTU_SZ) {
6930
+ DHD_ERROR(("%s: ######## pktlen(%d) > MAX_MTU_SZ(%d) #######\n",
6931
+ __FUNCTION__, pktlen, MAX_MTU_SZ));
6932
+ dhd_prhex("txringitem", (volatile uchar*)txdesc,
6933
+ sizeof(host_txbuf_post_t), DHD_ERROR_VAL);
6934
+ }
6935
+
41836936 /* Update the write pointer in TCM & ring bell */
4184
-#ifdef TXP_FLUSH_NITEMS
6937
+#if defined(DHD_HP2P) && defined(TXP_FLUSH_NITEMS)
6938
+ if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
6939
+ dhd_calc_hp2p_burst(dhd, ring, flowid);
6940
+ } else {
6941
+ if ((ring->pend_items_count == prot->txp_threshold) ||
6942
+ ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
6943
+ dhd_prot_txdata_write_flush(dhd, flowid);
6944
+ }
6945
+ }
6946
+#elif defined(TXP_FLUSH_NITEMS)
41856947 /* Flush if we have either hit the txp_threshold or if this msg is */
41866948 /* occupying the last slot in the flow_ring - before wrap around. */
41876949 if ((ring->pend_items_count == prot->txp_threshold) ||
41886950 ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
4189
- dhd_prot_txdata_write_flush(dhd, flowid, TRUE);
6951
+ dhd_prot_txdata_write_flush(dhd, flowid);
41906952 }
41916953 #else
41926954 /* update ring's WR index and ring doorbell to dongle */
41936955 dhd_prot_ring_write_complete(dhd, ring, txdesc, 1);
4194
-#endif
6956
+#endif /* DHD_HP2P && TXP_FLUSH_NITEMS */
41956957
4196
- prot->active_tx_count++;
6958
+#if defined(TX_STATUS_LATENCY_STATS)
6959
+ /* set the time when pkt is queued to flowring */
6960
+ DHD_PKT_SET_QTIME(PKTBUF, OSL_SYSUPTIME_US());
6961
+#endif // endif
41976962
6963
+ OSL_ATOMIC_INC(dhd->osh, &prot->active_tx_count);
41986964 /*
41996965 * Take a wake lock, do not sleep if we have atleast one packet
42006966 * to finish.
42016967 */
4202
- if (prot->active_tx_count == 1)
4203
- DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd, MAX_TX_TIMEOUT);
6968
+ DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd, MAX_TX_TIMEOUT);
42046969
4205
- DHD_GENERAL_UNLOCK(dhd, flags);
6970
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
42066971
6972
+#ifdef TX_STATUS_LATENCY_STATS
6973
+ flow_ring_node->flow_info.num_tx_pkts++;
6974
+#endif /* TX_STATUS_LATENCY_STATS */
42076975 return BCME_OK;
42086976
6977
+err_rollback_idx:
6978
+ /* roll back write pointer for unprocessed message */
6979
+ if (ring->wr == 0) {
6980
+ ring->wr = ring->max_items - 1;
6981
+ } else {
6982
+ ring->wr--;
6983
+ if (ring->wr == 0) {
6984
+ DHD_INFO(("%s: flipping the phase now\n", ring->name));
6985
+ ring->current_phase = ring->current_phase ?
6986
+ 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
6987
+ }
6988
+ }
6989
+
6990
+err_free_pktid:
6991
+#if defined(DHD_PCIE_PKTID)
6992
+ {
6993
+ void *dmah;
6994
+ void *secdma;
6995
+ /* Free up the PKTID. physaddr and pktlen will be garbage. */
6996
+ DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
6997
+ pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK);
6998
+ }
6999
+
42097000 err_no_res_pktfree:
7001
+#endif /* DHD_PCIE_PKTID */
42107002
4211
-
4212
-
4213
- DHD_GENERAL_UNLOCK(dhd, flags);
7003
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
7004
+err_no_res:
42147005 return BCME_NORESOURCE;
42157006 } /* dhd_prot_txdata */
42167007
4217
-/* called with a lock */
7008
+/* called with a ring_lock */
42187009 /** optimization to write "n" tx items at a time to ring */
42197010 void BCMFASTPATH
4220
-dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid, bool in_lock)
7011
+dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid)
42217012 {
42227013 #ifdef TXP_FLUSH_NITEMS
4223
- unsigned long flags = 0;
42247014 flow_ring_table_t *flow_ring_table;
42257015 flow_ring_node_t *flow_ring_node;
42267016 msgbuf_ring_t *ring;
42277017
42287018 if (dhd->flow_ring_table == NULL) {
42297019 return;
4230
- }
4231
-
4232
- if (!in_lock) {
4233
- DHD_GENERAL_LOCK(dhd, flags);
42347020 }
42357021
42367022 flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
....@@ -4243,10 +7029,6 @@
42437029 ring->pend_items_count);
42447030 ring->pend_items_count = 0;
42457031 ring->start_addr = NULL;
4246
- }
4247
-
4248
- if (!in_lock) {
4249
- DHD_GENERAL_UNLOCK(dhd, flags);
42507032 }
42517033 #endif /* TXP_FLUSH_NITEMS */
42527034 }
....@@ -4295,19 +7077,18 @@
42957077
42967078 #endif /* DHD_LB_RXC */
42977079
4298
-
42997080 if (prot->rxbufpost >= rxcnt) {
4300
- prot->rxbufpost -= rxcnt;
7081
+ prot->rxbufpost -= (uint16)rxcnt;
43017082 } else {
43027083 /* ASSERT(0); */
43037084 prot->rxbufpost = 0;
43047085 }
43057086
43067087 #if !defined(DHD_LB_RXC)
4307
- if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
7088
+ if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD))
43087089 dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
4309
- }
43107090 #endif /* !DHD_LB_RXC */
7091
+ return;
43117092 }
43127093
43137094 /* called before an ioctl is sent to the dongle */
....@@ -4315,9 +7096,9 @@
43157096 dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf)
43167097 {
43177098 dhd_prot_t *prot = dhd->prot;
7099
+ int slen = 0;
43187100
43197101 if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) {
4320
- int slen = 0;
43217102 pcie_bus_tput_params_t *tput_params;
43227103
43237104 slen = strlen("pcie_bus_tput") + 1;
....@@ -4326,8 +7107,33 @@
43267107 sizeof(tput_params->host_buf_addr));
43277108 tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN;
43287109 }
7110
+
43297111 }
43307112
7113
+/* called after an ioctl returns from dongle */
7114
+static void
7115
+dhd_prot_wl_ioctl_ret_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf,
7116
+ int ifidx, int ret, int len)
7117
+{
7118
+
7119
+ if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) {
7120
+ /* Intercept the wme_dp ioctl here */
7121
+ if (!strcmp(buf, "wme_dp")) {
7122
+ int slen, val = 0;
7123
+
7124
+ slen = strlen("wme_dp") + 1;
7125
+ if (len >= (int)(slen + sizeof(int)))
7126
+ bcopy(((char *)buf + slen), &val, sizeof(int));
7127
+ dhd->wme_dp = (uint8) ltoh32(val);
7128
+ }
7129
+
7130
+ }
7131
+
7132
+}
7133
+
7134
+#ifdef DHD_PM_CONTROL_FROM_FILE
7135
+extern bool g_pm_control;
7136
+#endif /* DHD_PM_CONTROL_FROM_FILE */
43317137
43327138 /** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */
43337139 int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
....@@ -4335,8 +7141,20 @@
43357141 int ret = -1;
43367142 uint8 action;
43377143
7144
+ if (dhd->bus->is_linkdown) {
7145
+ DHD_ERROR_RLMT(("%s : PCIe link is down. we have nothing to do\n", __FUNCTION__));
7146
+ goto done;
7147
+ }
7148
+
7149
+ if (dhd_query_bus_erros(dhd)) {
7150
+ DHD_ERROR_RLMT(("%s : some BUS error. we have nothing to do\n", __FUNCTION__));
7151
+ goto done;
7152
+ }
7153
+
43387154 if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
4339
- DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
7155
+ DHD_ERROR_RLMT(("%s : bus is down. we have nothing to do -"
7156
+ " bus state: %d, sent hang: %d\n", __FUNCTION__,
7157
+ dhd->busstate, dhd->hang_was_sent));
43407158 goto done;
43417159 }
43427160
....@@ -4347,12 +7165,21 @@
43477165
43487166 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
43497167
7168
+ if (ioc->cmd == WLC_SET_PM) {
7169
+#ifdef DHD_PM_CONTROL_FROM_FILE
7170
+ if (g_pm_control == TRUE) {
7171
+ DHD_ERROR(("%s: SET PM ignored!(Requested:%d)\n",
7172
+ __FUNCTION__, buf ? *(char *)buf : 0));
7173
+ goto done;
7174
+ }
7175
+#endif /* DHD_PM_CONTROL_FROM_FILE */
7176
+ DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0));
7177
+ }
43507178
43517179 ASSERT(len <= WLC_IOCTL_MAXLEN);
43527180
4353
- if (len > WLC_IOCTL_MAXLEN) {
7181
+ if (len > WLC_IOCTL_MAXLEN)
43547182 goto done;
4355
- }
43567183
43577184 action = ioc->set;
43587185
....@@ -4362,32 +7189,19 @@
43627189 ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
43637190 } else {
43647191 ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
4365
- if (ret > 0) {
7192
+ if (ret > 0)
43667193 ioc->used = ret;
4367
- }
43687194 }
43697195
43707196 /* Too many programs assume ioctl() returns 0 on success */
43717197 if (ret >= 0) {
43727198 ret = 0;
43737199 } else {
4374
- DHD_ERROR(("%s: status ret value is %d \n", __FUNCTION__, ret));
7200
+ DHD_INFO(("%s: status ret value is %d \n", __FUNCTION__, ret));
43757201 dhd->dongle_error = ret;
43767202 }
43777203
4378
- if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) {
4379
- /* Intercept the wme_dp ioctl here */
4380
- if (!strcmp(buf, "wme_dp")) {
4381
- int slen, val = 0;
4382
-
4383
- slen = strlen("wme_dp") + 1;
4384
- if (len >= (int)(slen + sizeof(int))) {
4385
- bcopy(((char *)buf + slen), &val, sizeof(int));
4386
- }
4387
- dhd->wme_dp = (uint8) ltoh32(val);
4388
- }
4389
-
4390
- }
7204
+ dhd_prot_wl_ioctl_ret_intercept(dhd, ioc, buf, ifidx, ret, len);
43917205
43927206 done:
43937207 return ret;
....@@ -4412,13 +7226,13 @@
44127226 msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN);
44137227 msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE);
44147228
4415
- DHD_GENERAL_LOCK(dhd, flags);
7229
+ DHD_RING_LOCK(ring->ring_lock, flags);
44167230
44177231 ioct_rqst = (ioct_reqst_hdr_t *)
44187232 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
44197233
44207234 if (ioct_rqst == NULL) {
4421
- DHD_GENERAL_UNLOCK(dhd, flags);
7235
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
44227236 return 0;
44237237 }
44247238
....@@ -4438,12 +7252,14 @@
44387252
44397253 ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK;
44407254 ioct_rqst->msg.if_id = 0;
7255
+ ioct_rqst->msg.flags = ring->current_phase;
44417256
44427257 bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen);
44437258
44447259 /* update ring's WR index and ring doorbell to dongle */
44457260 dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
4446
- DHD_GENERAL_UNLOCK(dhd, flags);
7261
+
7262
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
44477263
44487264 return 0;
44497265 }
....@@ -4451,22 +7267,64 @@
44517267 /** test / loopback */
44527268 void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer)
44537269 {
4454
- if (dmaxfer == NULL) {
7270
+ if (dmaxfer == NULL)
44557271 return;
4456
- }
44577272
44587273 dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
44597274 dhd_dma_buf_free(dhd, &dmaxfer->dstmem);
44607275 }
44617276
44627277 /** test / loopback */
7278
+int
7279
+dhd_prepare_schedule_dmaxfer_free(dhd_pub_t *dhdp)
7280
+{
7281
+ dhd_prot_t *prot = dhdp->prot;
7282
+ dhd_dmaxfer_t *dmaxfer = &prot->dmaxfer;
7283
+ dmaxref_mem_map_t *dmap = NULL;
7284
+
7285
+ dmap = MALLOCZ(dhdp->osh, sizeof(dmaxref_mem_map_t));
7286
+ if (!dmap) {
7287
+ DHD_ERROR(("%s: dmap alloc failed\n", __FUNCTION__));
7288
+ goto mem_alloc_fail;
7289
+ }
7290
+ dmap->srcmem = &(dmaxfer->srcmem);
7291
+ dmap->dstmem = &(dmaxfer->dstmem);
7292
+
7293
+ DMAXFER_FREE(dhdp, dmap);
7294
+ return BCME_OK;
7295
+
7296
+mem_alloc_fail:
7297
+ if (dmap) {
7298
+ MFREE(dhdp->osh, dmap, sizeof(dmaxref_mem_map_t));
7299
+ dmap = NULL;
7300
+ }
7301
+ return BCME_NOMEM;
7302
+} /* dhd_prepare_schedule_dmaxfer_free */
7303
+
7304
+/** test / loopback */
7305
+void
7306
+dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
7307
+{
7308
+
7309
+ dhd_dma_buf_free(dhdp, dmmap->srcmem);
7310
+ dhd_dma_buf_free(dhdp, dmmap->dstmem);
7311
+
7312
+ MFREE(dhdp->osh, dmmap, sizeof(dmaxref_mem_map_t));
7313
+
7314
+ dhdp->bus->dmaxfer_complete = TRUE;
7315
+ dhd_os_dmaxfer_wake(dhdp);
7316
+
7317
+ dmmap = NULL;
7318
+
7319
+} /* dmaxfer_free_prev_dmaaddr */
7320
+
7321
+/** test / loopback */
44637322 int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len,
44647323 uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer)
44657324 {
4466
- uint i;
4467
- if (!dmaxfer) {
7325
+ uint i = 0, j = 0;
7326
+ if (!dmaxfer)
44687327 return BCME_ERROR;
4469
- }
44707328
44717329 /* First free up existing buffers */
44727330 dmaxfer_free_dmaaddr(dhd, dmaxfer);
....@@ -4482,10 +7340,24 @@
44827340
44837341 dmaxfer->len = len;
44847342
4485
- /* Populate source with a pattern */
4486
- for (i = 0; i < dmaxfer->len; i++) {
4487
- ((uint8*)dmaxfer->srcmem.va)[i] = i % 256;
7343
+ /* Populate source with a pattern like below
7344
+ * 0x00000000
7345
+ * 0x01010101
7346
+ * 0x02020202
7347
+ * 0x03030303
7348
+ * 0x04040404
7349
+ * 0x05050505
7350
+ * ...
7351
+ * 0xFFFFFFFF
7352
+ */
7353
+ while (i < dmaxfer->len) {
7354
+ ((uint8*)dmaxfer->srcmem.va)[i] = j % 256;
7355
+ i++;
7356
+ if (i % 4 == 0) {
7357
+ j++;
7358
+ }
44887359 }
7360
+
44897361 OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len);
44907362
44917363 dmaxfer->srcdelay = srcdelay;
....@@ -4498,21 +7370,81 @@
44987370 dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg)
44997371 {
45007372 dhd_prot_t *prot = dhd->prot;
7373
+ uint64 end_usec;
7374
+ pcie_dmaxfer_cmplt_t *cmplt = (pcie_dmaxfer_cmplt_t *)msg;
7375
+ int buf_free_scheduled;
45017376
7377
+ BCM_REFERENCE(cmplt);
7378
+ end_usec = OSL_SYSUPTIME_US();
7379
+
7380
+ DHD_ERROR(("DMA loopback status: %d\n", cmplt->compl_hdr.status));
7381
+ prot->dmaxfer.status = cmplt->compl_hdr.status;
45027382 OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
45037383 if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) {
45047384 if (memcmp(prot->dmaxfer.srcmem.va,
4505
- prot->dmaxfer.dstmem.va, prot->dmaxfer.len)) {
4506
- bcm_print_bytes("XFER SRC: ",
7385
+ prot->dmaxfer.dstmem.va, prot->dmaxfer.len) ||
7386
+ cmplt->compl_hdr.status != BCME_OK) {
7387
+ DHD_ERROR(("DMA loopback failed\n"));
7388
+ /* it is observed that some times the completion
7389
+ * header status is set as OK, but the memcmp fails
7390
+ * hence always explicitly set the dmaxfer status
7391
+ * as error if this happens.
7392
+ */
7393
+ prot->dmaxfer.status = BCME_ERROR;
7394
+ prhex("XFER SRC: ",
45077395 prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
4508
- bcm_print_bytes("XFER DST: ",
7396
+ prhex("XFER DST: ",
45097397 prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
4510
- } else {
4511
- DHD_INFO(("DMA successful\n"));
7398
+ }
7399
+ else {
7400
+ switch (prot->dmaxfer.d11_lpbk) {
7401
+ case M2M_DMA_LPBK: {
7402
+ DHD_ERROR(("DMA successful pcie m2m DMA loopback\n"));
7403
+ } break;
7404
+ case D11_LPBK: {
7405
+ DHD_ERROR(("DMA successful with d11 loopback\n"));
7406
+ } break;
7407
+ case BMC_LPBK: {
7408
+ DHD_ERROR(("DMA successful with bmc loopback\n"));
7409
+ } break;
7410
+ case M2M_NON_DMA_LPBK: {
7411
+ DHD_ERROR(("DMA successful pcie m2m NON DMA loopback\n"));
7412
+ } break;
7413
+ case D11_HOST_MEM_LPBK: {
7414
+ DHD_ERROR(("DMA successful d11 host mem loopback\n"));
7415
+ } break;
7416
+ case BMC_HOST_MEM_LPBK: {
7417
+ DHD_ERROR(("DMA successful bmc host mem loopback\n"));
7418
+ } break;
7419
+ default: {
7420
+ DHD_ERROR(("Invalid loopback option\n"));
7421
+ } break;
7422
+ }
7423
+
7424
+ if (DHD_LPBKDTDUMP_ON()) {
7425
+ /* debug info print of the Tx and Rx buffers */
7426
+ dhd_prhex("XFER SRC: ", prot->dmaxfer.srcmem.va,
7427
+ prot->dmaxfer.len, DHD_INFO_VAL);
7428
+ dhd_prhex("XFER DST: ", prot->dmaxfer.dstmem.va,
7429
+ prot->dmaxfer.len, DHD_INFO_VAL);
7430
+ }
45127431 }
45137432 }
4514
- dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
7433
+
7434
+ buf_free_scheduled = dhd_prepare_schedule_dmaxfer_free(dhd);
7435
+ end_usec -= prot->dmaxfer.start_usec;
7436
+ if (end_usec) {
7437
+ prot->dmaxfer.time_taken = end_usec;
7438
+ DHD_ERROR(("DMA loopback %d bytes in %lu usec, %u kBps\n",
7439
+ prot->dmaxfer.len, (unsigned long)end_usec,
7440
+ (prot->dmaxfer.len * (1000 * 1000 / 1024) / (uint32)end_usec)));
7441
+ }
45157442 dhd->prot->dmaxfer.in_progress = FALSE;
7443
+
7444
+ if (buf_free_scheduled != BCME_OK) {
7445
+ dhd->bus->dmaxfer_complete = TRUE;
7446
+ dhd_os_dmaxfer_wake(dhd);
7447
+ }
45167448 }
45177449
45187450 /** Test functionality.
....@@ -4521,7 +7453,8 @@
45217453 * by a spinlock.
45227454 */
45237455 int
4524
-dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay)
7456
+dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay,
7457
+ uint d11_lpbk, uint core_num)
45257458 {
45267459 unsigned long flags;
45277460 int ret = BCME_OK;
....@@ -4533,17 +7466,24 @@
45337466
45347467 if (prot->dmaxfer.in_progress) {
45357468 DHD_ERROR(("DMA is in progress...\n"));
4536
- return ret;
7469
+ return BCME_ERROR;
45377470 }
7471
+
7472
+ if (d11_lpbk >= MAX_LPBK) {
7473
+ DHD_ERROR(("loopback mode should be either"
7474
+ " 0-PCIE_M2M_DMA, 1-D11, 2-BMC or 3-PCIE_M2M_NonDMA\n"));
7475
+ return BCME_ERROR;
7476
+ }
7477
+
7478
+ DHD_RING_LOCK(ring->ring_lock, flags);
45387479
45397480 prot->dmaxfer.in_progress = TRUE;
45407481 if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay,
45417482 &prot->dmaxfer)) != BCME_OK) {
45427483 prot->dmaxfer.in_progress = FALSE;
7484
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
45437485 return ret;
45447486 }
4545
-
4546
- DHD_GENERAL_LOCK(dhd, flags);
45477487
45487488 dmap = (pcie_dma_xfer_params_t *)
45497489 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
....@@ -4551,7 +7491,7 @@
45517491 if (dmap == NULL) {
45527492 dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
45537493 prot->dmaxfer.in_progress = FALSE;
4554
- DHD_GENERAL_UNLOCK(dhd, flags);
7494
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
45557495 return BCME_NOMEM;
45567496 }
45577497
....@@ -4559,6 +7499,7 @@
45597499 dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER;
45607500 dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID);
45617501 dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
7502
+ dmap->cmn_hdr.flags = ring->current_phase;
45627503 ring->seqnum++;
45637504
45647505 dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa));
....@@ -4568,49 +7509,170 @@
45687509 dmap->xfer_len = htol32(prot->dmaxfer.len);
45697510 dmap->srcdelay = htol32(prot->dmaxfer.srcdelay);
45707511 dmap->destdelay = htol32(prot->dmaxfer.destdelay);
7512
+ prot->dmaxfer.d11_lpbk = d11_lpbk;
7513
+ dmap->flags = (((core_num & PCIE_DMA_XFER_FLG_CORE_NUMBER_MASK)
7514
+ << PCIE_DMA_XFER_FLG_CORE_NUMBER_SHIFT) |
7515
+ ((prot->dmaxfer.d11_lpbk & PCIE_DMA_XFER_FLG_D11_LPBK_MASK)
7516
+ << PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT));
7517
+ prot->dmaxfer.start_usec = OSL_SYSUPTIME_US();
45717518
45727519 /* update ring's WR index and ring doorbell to dongle */
45737520 dhd_prot_ring_write_complete(dhd, ring, dmap, 1);
4574
- DHD_GENERAL_UNLOCK(dhd, flags);
45757521
4576
- DHD_ERROR(("DMA Started...\n"));
7522
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
7523
+
7524
+ DHD_ERROR(("DMA loopback Started...\n"));
45777525
45787526 return BCME_OK;
45797527 } /* dhdmsgbuf_dmaxfer_req */
7528
+
7529
+int
7530
+dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd, dma_xfer_info_t *result)
7531
+{
7532
+ dhd_prot_t *prot = dhd->prot;
7533
+
7534
+ if (prot->dmaxfer.in_progress)
7535
+ result->status = DMA_XFER_IN_PROGRESS;
7536
+ else if (prot->dmaxfer.status == 0)
7537
+ result->status = DMA_XFER_SUCCESS;
7538
+ else
7539
+ result->status = DMA_XFER_FAILED;
7540
+
7541
+ result->type = prot->dmaxfer.d11_lpbk;
7542
+ result->error_code = prot->dmaxfer.status;
7543
+ result->num_bytes = prot->dmaxfer.len;
7544
+ result->time_taken = prot->dmaxfer.time_taken;
7545
+ if (prot->dmaxfer.time_taken) {
7546
+ /* throughput in kBps */
7547
+ result->tput =
7548
+ (prot->dmaxfer.len * (1000 * 1000 / 1024)) /
7549
+ (uint32)prot->dmaxfer.time_taken;
7550
+ }
7551
+
7552
+ return BCME_OK;
7553
+}
45807554
45817555 /** Called in the process of submitting an ioctl to the dongle */
45827556 static int
45837557 dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
45847558 {
45857559 int ret = 0;
7560
+ uint copylen = 0;
45867561
45877562 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
45887563
4589
- /* Respond "bcmerror" and "bcmerrorstr" with local cache */
7564
+ if (dhd->bus->is_linkdown) {
7565
+ DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
7566
+ __FUNCTION__));
7567
+ return -EIO;
7568
+ }
7569
+
7570
+ if (dhd->busstate == DHD_BUS_DOWN) {
7571
+ DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
7572
+ return -EIO;
7573
+ }
7574
+
7575
+ /* don't talk to the dongle if fw is about to be reloaded */
7576
+ if (dhd->hang_was_sent) {
7577
+ DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
7578
+ __FUNCTION__));
7579
+ return -EIO;
7580
+ }
7581
+
45907582 if (cmd == WLC_GET_VAR && buf)
45917583 {
4592
- if (!strcmp((char *)buf, "bcmerrorstr"))
4593
- {
4594
- strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN);
7584
+ if (!len || !*(uint8 *)buf) {
7585
+ DHD_ERROR(("%s(): Zero length bailing\n", __FUNCTION__));
7586
+ ret = BCME_BADARG;
45957587 goto done;
45967588 }
4597
- else if (!strcmp((char *)buf, "bcmerror"))
4598
- {
4599
- *(int *)buf = dhd->dongle_error;
7589
+
7590
+ /* Respond "bcmerror" and "bcmerrorstr" with local cache */
7591
+ copylen = MIN(len, BCME_STRLEN);
7592
+
7593
+ if ((len >= strlen("bcmerrorstr")) &&
7594
+ (!strcmp((char *)buf, "bcmerrorstr"))) {
7595
+ strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), copylen);
7596
+ *(uint8 *)((uint8 *)buf + (copylen - 1)) = '\0';
7597
+ goto done;
7598
+ } else if ((len >= strlen("bcmerror")) &&
7599
+ !strcmp((char *)buf, "bcmerror")) {
7600
+ *(uint32 *)(uint32 *)buf = dhd->dongle_error;
46007601 goto done;
46017602 }
46027603 }
46037604
7605
+ DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n",
7606
+ action, ifidx, cmd, len));
7607
+
46047608 ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
46057609
4606
- DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n",
4607
- action, ifidx, cmd, len));
7610
+ if (ret < 0) {
7611
+ DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
7612
+ goto done;
7613
+ }
46087614
46097615 /* wait for IOCTL completion message from dongle and get first fragment */
46107616 ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
46117617
46127618 done:
46137619 return ret;
7620
+}
7621
+
7622
+void
7623
+dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd)
7624
+{
7625
+ uint32 intstatus;
7626
+ dhd_prot_t *prot = dhd->prot;
7627
+ dhd->rxcnt_timeout++;
7628
+ dhd->rx_ctlerrs++;
7629
+ dhd->iovar_timeout_occured = TRUE;
7630
+ DHD_ERROR(("%s: resumed on timeout rxcnt_timeout%s %d ioctl_cmd %d "
7631
+ "trans_id %d state %d busstate=%d ioctl_received=%d\n", __FUNCTION__,
7632
+ dhd->is_sched_error ? " due to scheduling problem" : "",
7633
+ dhd->rxcnt_timeout, prot->curr_ioctl_cmd, prot->ioctl_trans_id,
7634
+ prot->ioctl_state, dhd->busstate, prot->ioctl_received));
7635
+#if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
7636
+ if (dhd->is_sched_error && dhd->memdump_enabled == DUMP_MEMFILE_BUGON) {
7637
+ /* change g_assert_type to trigger Kernel panic */
7638
+ g_assert_type = 2;
7639
+ /* use ASSERT() to trigger panic */
7640
+ ASSERT(0);
7641
+ }
7642
+#endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
7643
+
7644
+ if (prot->curr_ioctl_cmd == WLC_SET_VAR ||
7645
+ prot->curr_ioctl_cmd == WLC_GET_VAR) {
7646
+ char iovbuf[32];
7647
+ int i;
7648
+ int dump_size = 128;
7649
+ uint8 *ioctl_buf = (uint8 *)prot->ioctbuf.va;
7650
+ memset(iovbuf, 0, sizeof(iovbuf));
7651
+ strncpy(iovbuf, ioctl_buf, sizeof(iovbuf) - 1);
7652
+ iovbuf[sizeof(iovbuf) - 1] = '\0';
7653
+ DHD_ERROR(("Current IOVAR (%s): %s\n",
7654
+ prot->curr_ioctl_cmd == WLC_SET_VAR ?
7655
+ "WLC_SET_VAR" : "WLC_GET_VAR", iovbuf));
7656
+ DHD_ERROR(("========== START IOCTL REQBUF DUMP ==========\n"));
7657
+ for (i = 0; i < dump_size; i++) {
7658
+ DHD_ERROR(("%02X ", ioctl_buf[i]));
7659
+ if ((i % 32) == 31) {
7660
+ DHD_ERROR(("\n"));
7661
+ }
7662
+ }
7663
+ DHD_ERROR(("\n========== END IOCTL REQBUF DUMP ==========\n"));
7664
+ }
7665
+
7666
+ /* Check the PCIe link status by reading intstatus register */
7667
+ intstatus = si_corereg(dhd->bus->sih,
7668
+ dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
7669
+ if (intstatus == (uint32)-1) {
7670
+ DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
7671
+ dhd->bus->is_linkdown = TRUE;
7672
+ }
7673
+
7674
+ dhd_bus_dump_console_buffer(dhd->bus);
7675
+ dhd_prot_debug_info_print(dhd);
46147676 }
46157677
46167678 /**
....@@ -4627,53 +7689,61 @@
46277689
46287690 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
46297691
4630
- if (dhd->dongle_reset) {
7692
+ if (dhd_query_bus_erros(dhd)) {
46317693 ret = -EIO;
46327694 goto out;
46337695 }
46347696
4635
- if (prot->cur_ioctlresp_bufs_posted) {
4636
- prot->cur_ioctlresp_bufs_posted--;
7697
+ timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
7698
+
7699
+ if (prot->ioctl_received == 0) {
7700
+ uint32 intstatus = si_corereg(dhd->bus->sih,
7701
+ dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
7702
+ int host_irq_disbled = dhdpcie_irq_disabled(dhd->bus);
7703
+ if ((intstatus) && (intstatus != (uint32)-1) &&
7704
+ (timeleft == 0) && (!dhd_query_bus_erros(dhd))) {
7705
+ DHD_ERROR(("%s: iovar timeout trying again intstatus=%x"
7706
+ " host_irq_disabled=%d\n",
7707
+ __FUNCTION__, intstatus, host_irq_disbled));
7708
+ dhd_pcie_intr_count_dump(dhd);
7709
+ dhd_print_tasklet_status(dhd);
7710
+ dhd_prot_process_ctrlbuf(dhd);
7711
+ timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
7712
+ /* Clear Interrupts */
7713
+ dhdpcie_bus_clear_intstatus(dhd->bus);
7714
+ }
46377715 }
46387716
4639
- dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
7717
+ if (timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
7718
+ /* check if resumed on time out related to scheduling issue */
7719
+ dhd->is_sched_error = FALSE;
7720
+ if (dhd->bus->isr_entry_time > prot->ioctl_fillup_time) {
7721
+ dhd->is_sched_error = dhd_bus_query_dpc_sched_errors(dhd);
7722
+ }
46407723
4641
- timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received);
4642
- if (timeleft == 0) {
4643
- dhd->rxcnt_timeout++;
4644
- dhd->rx_ctlerrs++;
4645
- DHD_ERROR(("%s: resumed on timeout rxcnt_timeout %d ioctl_cmd %d "
4646
- "trans_id %d state %d busstate=%d ioctl_received=%d\n",
4647
- __FUNCTION__, dhd->rxcnt_timeout, prot->curr_ioctl_cmd,
4648
- prot->ioctl_trans_id, prot->ioctl_state & ~MSGBUF_IOCTL_RESP_PENDING,
4649
- dhd->busstate, prot->ioctl_received));
7724
+ dhd_msgbuf_iovar_timeout_dump(dhd);
46507725
4651
- dhd_prot_debug_info_print(dhd);
4652
-
4653
-#if defined(DHD_FW_COREDUMP)
4654
- /* Collect socram dump for CUSTOMER_HW4 OR Brix Android */
4655
- /* As soon as FW TRAP occurs, FW dump will be collected from dhdpcie_checkdied */
4656
- if (dhd->memdump_enabled && !dhd->dongle_trap_occured) {
7726
+#ifdef DHD_FW_COREDUMP
7727
+ /* Collect socram dump */
7728
+ if (dhd->memdump_enabled) {
46577729 /* collect core dump */
46587730 dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT;
46597731 dhd_bus_mem_dump(dhd);
46607732 }
4661
-#endif /* DHD_FW_COREDUMP && OEM_ANDROID */
4662
- if (dhd->rxcnt_timeout >= MAX_CNTL_RX_TIMEOUT) {
7733
+#endif /* DHD_FW_COREDUMP */
7734
+
46637735 #ifdef SUPPORT_LINKDOWN_RECOVERY
46647736 #ifdef CONFIG_ARCH_MSM
4665
- dhd->bus->islinkdown = 1;
7737
+ dhd->bus->no_cfg_restore = 1;
46667738 #endif /* CONFIG_ARCH_MSM */
46677739 #endif /* SUPPORT_LINKDOWN_RECOVERY */
4668
- DHD_ERROR(("%s: timeout > MAX_CNTL_TX_TIMEOUT\n", __FUNCTION__));
4669
- }
46707740 ret = -ETIMEDOUT;
46717741 goto out;
46727742 } else {
46737743 if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) {
46747744 DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n",
46757745 __FUNCTION__, prot->ioctl_received));
4676
- ret = -ECONNABORTED;
7746
+ ret = -EINVAL;
46777747 goto out;
46787748 }
46797749 dhd->rxcnt_timeout = 0;
....@@ -4682,28 +7752,16 @@
46827752 __FUNCTION__, prot->ioctl_resplen));
46837753 }
46847754
4685
- if (dhd->dongle_trap_occured) {
4686
-#ifdef SUPPORT_LINKDOWN_RECOVERY
4687
-#ifdef CONFIG_ARCH_MSM
4688
- dhd->bus->islinkdown = 1;
4689
-#endif /* CONFIG_ARCH_MSM */
4690
-#endif /* SUPPORT_LINKDOWN_RECOVERY */
4691
- DHD_ERROR(("%s: TRAP occurred!!\n", __FUNCTION__));
4692
- ret = -EREMOTEIO;
4693
- goto out;
4694
- }
4695
-
4696
- if (dhd->prot->ioctl_resplen > len) {
7755
+ if (dhd->prot->ioctl_resplen > len)
46977756 dhd->prot->ioctl_resplen = (uint16)len;
4698
- }
4699
- if (buf) {
7757
+ if (buf)
47007758 bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen);
4701
- }
47027759
47037760 ret = (int)(dhd->prot->ioctl_status);
7761
+
47047762 out:
47057763 DHD_GENERAL_LOCK(dhd, flags);
4706
- dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING;
7764
+ dhd->prot->ioctl_state = 0;
47077765 dhd->prot->ioctl_resplen = 0;
47087766 dhd->prot->ioctl_received = IOCTL_WAIT;
47097767 dhd->prot->curr_ioctl_cmd = 0;
....@@ -4719,6 +7777,12 @@
47197777
47207778 DHD_TRACE(("%s: Enter \n", __FUNCTION__));
47217779
7780
+ if (dhd->bus->is_linkdown) {
7781
+ DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
7782
+ __FUNCTION__));
7783
+ return -EIO;
7784
+ }
7785
+
47227786 if (dhd->busstate == DHD_BUS_DOWN) {
47237787 DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
47247788 return -EIO;
....@@ -4731,14 +7795,20 @@
47317795 return -EIO;
47327796 }
47337797
4734
- /* Fill up msgbuf for ioctl req */
4735
- ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
4736
-
47377798 DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n",
47387799 action, ifidx, cmd, len));
47397800
7801
+ /* Fill up msgbuf for ioctl req */
7802
+ ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
7803
+
7804
+ if (ret < 0) {
7805
+ DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
7806
+ goto done;
7807
+ }
7808
+
47407809 ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
47417810
7811
+done:
47427812 return ret;
47437813 }
47447814
....@@ -4750,16 +7820,161 @@
47507820
47517821 /** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */
47527822 int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name,
4753
- void *params, int plen, void *arg, int len, bool set)
7823
+ void *params, int plen, void *arg, int len, bool set)
47547824 {
47557825 return BCME_UNSUPPORTED;
47567826 }
7827
+
7828
+#ifdef DHD_DUMP_PCIE_RINGS
7829
+int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, const void *user_buf,
7830
+ unsigned long *file_posn, bool file_write)
7831
+{
7832
+ dhd_prot_t *prot;
7833
+ msgbuf_ring_t *ring;
7834
+ int ret = 0;
7835
+ uint16 h2d_flowrings_total;
7836
+ uint16 flowid;
7837
+
7838
+ if (!(dhd) || !(dhd->prot)) {
7839
+ goto exit;
7840
+ }
7841
+ prot = dhd->prot;
7842
+
7843
+ /* Below is the same ring dump sequence followed in parser as well. */
7844
+ ring = &prot->h2dring_ctrl_subn;
7845
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7846
+ goto exit;
7847
+
7848
+ ring = &prot->h2dring_rxp_subn;
7849
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7850
+ goto exit;
7851
+
7852
+ ring = &prot->d2hring_ctrl_cpln;
7853
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7854
+ goto exit;
7855
+
7856
+ ring = &prot->d2hring_tx_cpln;
7857
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7858
+ goto exit;
7859
+
7860
+ ring = &prot->d2hring_rx_cpln;
7861
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7862
+ goto exit;
7863
+
7864
+ h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
7865
+ FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
7866
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) {
7867
+ goto exit;
7868
+ }
7869
+ }
7870
+
7871
+#ifdef EWP_EDL
7872
+ if (dhd->dongle_edl_support) {
7873
+ ring = prot->d2hring_edl;
7874
+ if ((ret = dhd_edl_ring_hdr_write(dhd, ring, file, user_buf, file_posn)) < 0)
7875
+ goto exit;
7876
+ }
7877
+ else if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
7878
+#else
7879
+ if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
7880
+#endif /* EWP_EDL */
7881
+ {
7882
+ ring = prot->h2dring_info_subn;
7883
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7884
+ goto exit;
7885
+
7886
+ ring = prot->d2hring_info_cpln;
7887
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7888
+ goto exit;
7889
+ }
7890
+
7891
+exit :
7892
+ return ret;
7893
+}
7894
+
7895
+/* Write to file */
7896
+static
7897
+int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
7898
+ const void *user_buf, unsigned long *file_posn)
7899
+{
7900
+ int ret = 0;
7901
+
7902
+ if (ring == NULL) {
7903
+ DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
7904
+ __FUNCTION__));
7905
+ return BCME_ERROR;
7906
+ }
7907
+ if (file) {
7908
+ ret = dhd_os_write_file_posn(file, file_posn, (char *)(ring->dma_buf.va),
7909
+ ((unsigned long)(ring->max_items) * (ring->item_len)));
7910
+ if (ret < 0) {
7911
+ DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
7912
+ ret = BCME_ERROR;
7913
+ }
7914
+ } else if (user_buf) {
7915
+ ret = dhd_export_debug_data((char *)(ring->dma_buf.va), NULL, user_buf,
7916
+ ((unsigned long)(ring->max_items) * (ring->item_len)), (int *)file_posn);
7917
+ }
7918
+ return ret;
7919
+}
7920
+#endif /* DHD_DUMP_PCIE_RINGS */
7921
+
7922
+#ifdef EWP_EDL
7923
+/* Write to file */
7924
+static
7925
+int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
7926
+ unsigned long *file_posn)
7927
+{
7928
+ int ret = 0, nitems = 0;
7929
+ char *buf = NULL, *ptr = NULL;
7930
+ uint8 *msg_addr = NULL;
7931
+ uint16 rd = 0;
7932
+
7933
+ if (ring == NULL) {
7934
+ DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
7935
+ __FUNCTION__));
7936
+ ret = BCME_ERROR;
7937
+ goto done;
7938
+ }
7939
+
7940
+ buf = MALLOCZ(dhd->osh, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
7941
+ if (buf == NULL) {
7942
+ DHD_ERROR(("%s: buffer allocation failed\n", __FUNCTION__));
7943
+ ret = BCME_ERROR;
7944
+ goto done;
7945
+ }
7946
+ ptr = buf;
7947
+
7948
+ for (; nitems < D2HRING_EDL_MAX_ITEM; nitems++, rd++) {
7949
+ msg_addr = (uint8 *)ring->dma_buf.va + (rd * ring->item_len);
7950
+ memcpy(ptr, (char *)msg_addr, D2HRING_EDL_HDR_SIZE);
7951
+ ptr += D2HRING_EDL_HDR_SIZE;
7952
+ }
7953
+ if (file) {
7954
+ ret = dhd_os_write_file_posn(file, file_posn, buf,
7955
+ (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM));
7956
+ if (ret < 0) {
7957
+ DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
7958
+ goto done;
7959
+ }
7960
+ }
7961
+ else {
7962
+ ret = dhd_export_debug_data(buf, NULL, user_buf,
7963
+ (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM), file_posn);
7964
+ }
7965
+
7966
+done:
7967
+ if (buf) {
7968
+ MFREE(dhd->osh, buf, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
7969
+ }
7970
+ return ret;
7971
+}
7972
+#endif /* EWP_EDL */
47577973
47587974 /** Add prot dump output to a buffer */
47597975 void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
47607976 {
47617977
4762
-#if defined(PCIE_D2H_SYNC)
47637978 if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM)
47647979 bcm_bprintf(b, "\nd2h_sync: SEQNUM:");
47657980 else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM)
....@@ -4768,12 +7983,16 @@
47687983 bcm_bprintf(b, "\nd2h_sync: NONE:");
47697984 bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n",
47707985 dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot);
4771
-#endif /* PCIE_D2H_SYNC */
47727986
47737987 bcm_bprintf(b, "\nDongle DMA Indices: h2d %d d2h %d index size %d bytes\n",
4774
- DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support),
4775
- DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support),
7988
+ dhd->dma_h2d_ring_upd_support,
7989
+ dhd->dma_d2h_ring_upd_support,
47767990 dhd->prot->rw_index_sz);
7991
+ bcm_bprintf(b, "h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
7992
+ h2d_max_txpost, dhd->prot->h2d_max_txpost);
7993
+ bcm_bprintf(b, "pktid_txq_start_cnt: %d\n", dhd->prot->pktid_txq_start_cnt);
7994
+ bcm_bprintf(b, "pktid_txq_stop_cnt: %d\n", dhd->prot->pktid_txq_stop_cnt);
7995
+ bcm_bprintf(b, "pktid_depleted_cnt: %d\n", dhd->prot->pktid_depleted_cnt);
47777996 }
47787997
47797998 /* Update local copy of dongle statistics */
....@@ -4800,13 +8019,13 @@
48008019 dhd_prot_t *prot = dhd->prot;
48018020 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
48028021
4803
- DHD_GENERAL_LOCK(dhd, flags);
8022
+ DHD_RING_LOCK(ring->ring_lock, flags);
48048023
48058024 hevent = (hostevent_hdr_t *)
48068025 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
48078026
48088027 if (hevent == NULL) {
4809
- DHD_GENERAL_UNLOCK(dhd, flags);
8028
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
48108029 return -1;
48118030 }
48128031
....@@ -4815,6 +8034,7 @@
48158034 ring->seqnum++;
48168035 hevent->msg.msg_type = MSG_TYPE_HOST_EVNT;
48178036 hevent->msg.if_id = 0;
8037
+ hevent->msg.flags = ring->current_phase;
48188038
48198039 /* Event payload */
48208040 hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD);
....@@ -4823,7 +8043,8 @@
48238043 * from the msgbuf, we can directly call the write_complete
48248044 */
48258045 dhd_prot_ring_write_complete(dhd, ring, hevent, 1);
4826
- DHD_GENERAL_UNLOCK(dhd, flags);
8046
+
8047
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
48278048
48288049 return 0;
48298050 }
....@@ -4842,11 +8063,22 @@
48428063 ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
48438064
48448065 if (ret_buf == NULL) {
8066
+ /* HWA TODO, need to get RD pointer from different array
8067
+ * which HWA will directly write into host memory
8068
+ */
48458069 /* if alloc failed , invalidate cached read ptr */
4846
- if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
8070
+ if (dhd->dma_d2h_ring_upd_support) {
48478071 ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
48488072 } else {
48498073 dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx);
8074
+#ifdef SUPPORT_LINKDOWN_RECOVERY
8075
+ /* Check if ring->rd is valid */
8076
+ if (ring->rd >= ring->max_items) {
8077
+ DHD_ERROR(("%s: Invalid rd idx=%d\n", ring->name, ring->rd));
8078
+ dhd->bus->read_shm_fail = TRUE;
8079
+ return NULL;
8080
+ }
8081
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
48508082 }
48518083
48528084 /* Try allocating once more */
....@@ -4856,6 +8088,11 @@
48568088 DHD_INFO(("%s: Ring space not available \n", ring->name));
48578089 return NULL;
48588090 }
8091
+ }
8092
+
8093
+ if (ret_buf == HOST_RING_BASE(ring)) {
8094
+ DHD_INFO(("%s: setting the phase now\n", ring->name));
8095
+ ring->current_phase = ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
48598096 }
48608097
48618098 /* Return alloced space */
....@@ -4879,20 +8116,24 @@
48798116 uint16 alloced = 0;
48808117 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
48818118
8119
+ if (dhd_query_bus_erros(dhd)) {
8120
+ return -EIO;
8121
+ }
8122
+
48828123 rqstlen = len;
48838124 resplen = len;
48848125
48858126 /* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */
48868127 /* 8K allocation of dongle buffer fails */
48878128 /* dhd doesnt give separate input & output buf lens */
4888
- /* so making the assumption that input length can never be more than 1.5k */
4889
- rqstlen = MIN(rqstlen, MSGBUF_MAX_MSG_SIZE);
8129
+ /* so making the assumption that input length can never be more than 2k */
8130
+ rqstlen = MIN(rqstlen, MSGBUF_IOCTL_MAX_RQSTLEN);
48908131
4891
- DHD_GENERAL_LOCK(dhd, flags);
8132
+ DHD_RING_LOCK(ring->ring_lock, flags);
48928133
48938134 if (prot->ioctl_state) {
48948135 DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state));
4895
- DHD_GENERAL_UNLOCK(dhd, flags);
8136
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
48968137 return BCME_BUSY;
48978138 } else {
48988139 prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING;
....@@ -4906,14 +8147,14 @@
49068147 prot->ioctl_state = 0;
49078148 prot->curr_ioctl_cmd = 0;
49088149 prot->ioctl_received = IOCTL_WAIT;
4909
- DHD_GENERAL_UNLOCK(dhd, flags);
8150
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
49108151 return -1;
49118152 }
49128153
49138154 /* Common msg buf hdr */
49148155 ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ;
49158156 ioct_rqst->cmn_hdr.if_id = (uint8)ifidx;
4916
- ioct_rqst->cmn_hdr.flags = 0;
8157
+ ioct_rqst->cmn_hdr.flags = ring->current_phase;
49178158 ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID);
49188159 ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
49198160 ring->seqnum++;
....@@ -4931,15 +8172,15 @@
49318172 /* copy ioct payload */
49328173 ioct_buf = (void *) prot->ioctbuf.va;
49338174
4934
- if (buf) {
8175
+ prot->ioctl_fillup_time = OSL_LOCALTIME_NS();
8176
+
8177
+ if (buf)
49358178 memcpy(ioct_buf, buf, len);
4936
- }
49378179
49388180 OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len);
49398181
4940
- if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN)) {
8182
+ if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN))
49418183 DHD_ERROR(("host ioct address unaligned !!!!! \n"));
4942
- }
49438184
49448185 DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n",
49458186 ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len,
....@@ -4947,11 +8188,11 @@
49478188
49488189 /* update ring's WR index and ring doorbell to dongle */
49498190 dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
4950
- DHD_GENERAL_UNLOCK(dhd, flags);
8191
+
8192
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
49518193
49528194 return 0;
49538195 } /* dhd_fillup_ioct_reqst */
4954
-
49558196
49568197 /**
49578198 * dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a
....@@ -4971,6 +8212,8 @@
49718212 int dma_buf_alloced = BCME_NOMEM;
49728213 uint32 dma_buf_len = max_items * item_len;
49738214 dhd_prot_t *prot = dhd->prot;
8215
+ uint16 max_flowrings = dhd->bus->max_tx_flowrings;
8216
+ dhd_dma_buf_t *dma_buf = NULL;
49748217
49758218 ASSERT(ring);
49768219 ASSERT(name);
....@@ -4986,13 +8229,13 @@
49868229 ring->item_len = item_len;
49878230
49888231 /* A contiguous space may be reserved for all flowrings */
4989
- if (DHD_IS_FLOWRING(ringid) && (prot->flowrings_dma_buf.va)) {
8232
+ if (DHD_IS_FLOWRING(ringid, max_flowrings) && (prot->flowrings_dma_buf.va)) {
49908233 /* Carve out from the contiguous DMA-able flowring buffer */
49918234 uint16 flowid;
49928235 uint32 base_offset;
49938236
4994
- dhd_dma_buf_t *dma_buf = &ring->dma_buf;
49958237 dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf;
8238
+ dma_buf = &ring->dma_buf;
49968239
49978240 flowid = DHD_RINGID_TO_FLOWID(ringid);
49988241 base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len;
....@@ -5012,10 +8255,24 @@
50128255
50138256 (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
50148257 } else {
5015
- /* Allocate a dhd_dma_buf */
5016
- dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
5017
- if (dma_buf_alloced != BCME_OK) {
5018
- return BCME_NOMEM;
8258
+#ifdef EWP_EDL
8259
+ if (ring == dhd->prot->d2hring_edl) {
8260
+ /* For EDL ring, memory is alloced during attach,
8261
+ * so just need to copy the dma_buf to the ring's dma_buf
8262
+ */
8263
+ memcpy(&ring->dma_buf, &dhd->edl_ring_mem, sizeof(ring->dma_buf));
8264
+ dma_buf = &ring->dma_buf;
8265
+ if (dma_buf->va == NULL) {
8266
+ return BCME_NOMEM;
8267
+ }
8268
+ } else
8269
+#endif /* EWP_EDL */
8270
+ {
8271
+ /* Allocate a dhd_dma_buf */
8272
+ dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
8273
+ if (dma_buf_alloced != BCME_OK) {
8274
+ return BCME_NOMEM;
8275
+ }
50198276 }
50208277 }
50218278
....@@ -5030,6 +8287,8 @@
50308287 }
50318288 }
50328289 #endif /* BCM_SECURE_DMA */
8290
+
8291
+ ring->ring_lock = dhd_os_spin_lock_init(dhd->osh);
50338292
50348293 DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d "
50358294 "ring start %p buf phys addr %x:%x \n",
....@@ -5050,7 +8309,6 @@
50508309
50518310 } /* dhd_prot_ring_attach */
50528311
5053
-
50548312 /**
50558313 * dhd_prot_ring_init - Post the common ring information to dongle.
50568314 *
....@@ -5065,6 +8323,12 @@
50658323 {
50668324 ring->wr = 0;
50678325 ring->rd = 0;
8326
+ ring->curr_rd = 0;
8327
+ /* Reset hwa_db_type for all rings,
8328
+ * for data path rings, it will be assigned separately post init
8329
+ * from dhd_prot_d2h_sync_init and dhd_prot_h2d_sync_init
8330
+ */
8331
+ ring->hwa_db_type = 0;
50688332
50698333 /* CAUTION: ring::base_addr already in Little Endian */
50708334 dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr,
....@@ -5084,7 +8348,6 @@
50848348
50858349 } /* dhd_prot_ring_init */
50868350
5087
-
50888351 /**
50898352 * dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush
50908353 * Reset WR and RD indices to 0.
....@@ -5097,8 +8360,10 @@
50978360 dhd_dma_buf_reset(dhd, &ring->dma_buf);
50988361
50998362 ring->rd = ring->wr = 0;
8363
+ ring->curr_rd = 0;
8364
+ ring->inited = FALSE;
8365
+ ring->create_pending = FALSE;
51008366 }
5101
-
51028367
51038368 /**
51048369 * dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects
....@@ -5108,6 +8373,7 @@
51088373 dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring)
51098374 {
51108375 dhd_prot_t *prot = dhd->prot;
8376
+ uint16 max_flowrings = dhd->bus->max_tx_flowrings;
51118377 ASSERT(ring);
51128378
51138379 ring->inited = FALSE;
....@@ -5126,76 +8392,26 @@
51268392 /* If the DMA-able buffer was carved out of a pre-reserved contiguous
51278393 * memory, then simply stop using it.
51288394 */
5129
- if (DHD_IS_FLOWRING(ring->idx) && (prot->flowrings_dma_buf.va)) {
8395
+ if (DHD_IS_FLOWRING(ring->idx, max_flowrings) && (prot->flowrings_dma_buf.va)) {
51308396 (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
51318397 memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t));
51328398 } else {
51338399 dhd_dma_buf_free(dhd, &ring->dma_buf);
51348400 }
51358401
8402
+ dhd_os_spin_lock_deinit(dhd->osh, ring->ring_lock);
8403
+
51368404 } /* dhd_prot_ring_detach */
51378405
5138
-
5139
-/*
5140
- * +----------------------------------------------------------------------------
5141
- * Flowring Pool
5142
- *
5143
- * Unlike common rings, which are attached very early on (dhd_prot_attach),
5144
- * flowrings are dynamically instantiated. Moreover, flowrings may require a
5145
- * larger DMA-able buffer. To avoid issues with fragmented cache coherent
5146
- * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
5147
- * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
5148
- *
5149
- * Each DMA-able buffer may be allocated independently, or may be carved out
5150
- * of a single large contiguous region that is registered with the protocol
5151
- * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
5152
- * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
5153
- *
5154
- * No flowring pool action is performed in dhd_prot_attach(), as the number
5155
- * of h2d rings is not yet known.
5156
- *
5157
- * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
5158
- * determine the number of flowrings required, and a pool of msgbuf_rings are
5159
- * allocated and a DMA-able buffer (carved or allocated) is attached.
5160
- * See: dhd_prot_flowrings_pool_attach()
5161
- *
5162
- * A flowring msgbuf_ring object may be fetched from this pool during flowring
5163
- * creation, using the flowid. Likewise, flowrings may be freed back into the
5164
- * pool on flowring deletion.
5165
- * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
5166
- *
5167
- * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
5168
- * are detached (returned back to the carved region or freed), and the pool of
5169
- * msgbuf_ring and any objects allocated against it are freed.
5170
- * See: dhd_prot_flowrings_pool_detach()
5171
- *
5172
- * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
5173
- * state as-if upon an attach. All DMA-able buffers are retained.
5174
- * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
5175
- * pool attach will notice that the pool persists and continue to use it. This
5176
- * will avoid the case of a fragmented DMA-able region.
5177
- *
5178
- * +----------------------------------------------------------------------------
5179
- */
5180
-
51818406 /* Fetch number of H2D flowrings given the total number of h2d rings */
5182
-#define DHD_FLOWRINGS_POOL_TOTAL(h2d_rings_total) \
5183
- ((h2d_rings_total) - BCMPCIE_H2D_COMMON_MSGRINGS)
5184
-
5185
-/* Conversion of a flowid to a flowring pool index */
5186
-#define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
5187
- ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
5188
-
5189
-/* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
5190
-#define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
5191
- (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + DHD_FLOWRINGS_POOL_OFFSET(flowid)
5192
-
5193
-/* Traverse each flowring in the flowring pool, assigning ring and flowid */
5194
-#define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) \
5195
- for ((flowid) = DHD_FLOWRING_START_FLOWID, \
5196
- (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
5197
- (flowid) < (prot)->h2d_rings_total; \
5198
- (flowid)++, (ring)++)
8407
+uint16
8408
+dhd_get_max_flow_rings(dhd_pub_t *dhd)
8409
+{
8410
+ if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
8411
+ return dhd->bus->max_tx_flowrings;
8412
+ else
8413
+ return (dhd->bus->max_tx_flowrings - BCMPCIE_H2D_COMMON_MSGRINGS);
8414
+}
51998415
52008416 /**
52018417 * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t.
....@@ -5223,9 +8439,8 @@
52238439 dhd_prot_t *prot = dhd->prot;
52248440 char ring_name[RING_NAME_MAX_LENGTH];
52258441
5226
- if (prot->h2d_flowrings_pool != NULL) {
8442
+ if (prot->h2d_flowrings_pool != NULL)
52278443 return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
5228
- }
52298444
52308445 ASSERT(prot->h2d_rings_total == 0);
52318446
....@@ -5239,7 +8454,7 @@
52398454 }
52408455
52418456 /* Subtract number of H2D common rings, to determine number of flowrings */
5242
- h2d_flowrings_total = DHD_FLOWRINGS_POOL_TOTAL(prot->h2d_rings_total);
8457
+ h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
52438458
52448459 DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total));
52458460
....@@ -5254,14 +8469,18 @@
52548469 }
52558470
52568471 /* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */
5257
- FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) {
8472
+ FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
52588473 snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid);
5259
- ring_name[RING_NAME_MAX_LENGTH - 1] = '\0';
52608474 if (dhd_prot_ring_attach(dhd, ring, ring_name,
5261
- H2DRING_TXPOST_MAX_ITEM, H2DRING_TXPOST_ITEMSIZE,
8475
+ prot->h2d_max_txpost, H2DRING_TXPOST_ITEMSIZE,
52628476 DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) {
52638477 goto attach_fail;
52648478 }
8479
+ /*
8480
+ * TOD0 - Currently flowrings hwa is disabled and can be enabled like below
8481
+ * (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_TXPOSTS) ? HWA_DB_TYPE_TXPOSTS : 0;
8482
+ */
8483
+ ring->hwa_db_type = 0;
52658484 }
52668485
52678486 return BCME_OK;
....@@ -5274,7 +8493,6 @@
52748493 return BCME_NOMEM;
52758494
52768495 } /* dhd_prot_flowrings_pool_attach */
5277
-
52788496
52798497 /**
52808498 * dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool.
....@@ -5294,7 +8512,7 @@
52948512 static void
52958513 dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd)
52968514 {
5297
- uint16 flowid;
8515
+ uint16 flowid, h2d_flowrings_total;
52988516 msgbuf_ring_t *ring;
52998517 dhd_prot_t *prot = dhd->prot;
53008518
....@@ -5302,16 +8520,15 @@
53028520 ASSERT(prot->h2d_rings_total == 0);
53038521 return;
53048522 }
5305
-
8523
+ h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
53068524 /* Reset each flowring in the flowring pool */
5307
- FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) {
8525
+ FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
53088526 dhd_prot_ring_reset(dhd, ring);
53098527 ring->inited = FALSE;
53108528 }
53118529
53128530 /* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */
53138531 }
5314
-
53158532
53168533 /**
53178534 * dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with
....@@ -5324,7 +8541,7 @@
53248541 {
53258542 int flowid;
53268543 msgbuf_ring_t *ring;
5327
- int h2d_flowrings_total; /* exclude H2D common rings */
8544
+ uint16 h2d_flowrings_total; /* exclude H2D common rings */
53288545 dhd_prot_t *prot = dhd->prot;
53298546
53308547 if (prot->h2d_flowrings_pool == NULL) {
....@@ -5332,12 +8549,11 @@
53328549 return;
53338550 }
53348551
8552
+ h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
53358553 /* Detach the DMA-able buffer for each flowring in the flowring pool */
5336
- FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) {
8554
+ FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
53378555 dhd_prot_ring_detach(dhd, ring);
53388556 }
5339
-
5340
- h2d_flowrings_total = DHD_FLOWRINGS_POOL_TOTAL(prot->h2d_rings_total);
53418557
53428558 MFREE(prot->osh, prot->h2d_flowrings_pool,
53438559 (h2d_flowrings_total * sizeof(msgbuf_ring_t)));
....@@ -5346,7 +8562,6 @@
53468562 prot->h2d_rings_total = 0;
53478563
53488564 } /* dhd_prot_flowrings_pool_detach */
5349
-
53508565
53518566 /**
53528567 * dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized
....@@ -5374,11 +8589,15 @@
53748589
53758590 ring->wr = 0;
53768591 ring->rd = 0;
8592
+ ring->curr_rd = 0;
53778593 ring->inited = TRUE;
5378
-
8594
+ /**
8595
+ * Every time a flowring starts dynamically, initialize current_phase with 0
8596
+ * then flip to BCMPCIE_CMNHDR_PHASE_BIT_INIT
8597
+ */
8598
+ ring->current_phase = 0;
53798599 return ring;
53808600 }
5381
-
53828601
53838602 /**
53848603 * dhd_prot_flowrings_pool_release - release a previously fetched flowring's
....@@ -5404,8 +8623,9 @@
54048623 ring->wr = 0;
54058624 ring->rd = 0;
54068625 ring->inited = FALSE;
5407
-}
54088626
8627
+ ring->curr_rd = 0;
8628
+}
54098629
54108630 /* Assumes only one index is updated at a time */
54118631 /* If exactly_nitems is true, this function will allocate space for nitems or fail */
....@@ -5435,11 +8655,11 @@
54358655 ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len);
54368656
54378657 /* Update write index */
5438
- if ((ring->wr + *alloced) == ring->max_items) {
8658
+ if ((ring->wr + *alloced) == ring->max_items)
54398659 ring->wr = 0;
5440
- } else if ((ring->wr + *alloced) < ring->max_items) {
8660
+ else if ((ring->wr + *alloced) < ring->max_items)
54418661 ring->wr += *alloced;
5442
- } else {
8662
+ else {
54438663 /* Should never hit this */
54448664 ASSERT(0);
54458665 return NULL;
....@@ -5448,35 +8668,97 @@
54488668 return ret_ptr;
54498669 } /* dhd_prot_get_ring_space */
54508670
5451
-
54528671 /**
54538672 * dhd_prot_ring_write_complete - Host updates the new WR index on producing
54548673 * new messages in a H2D ring. The messages are flushed from cache prior to
54558674 * posting the new WR index. The new WR index will be updated in the DMA index
54568675 * array or directly in the dongle's ring state memory.
54578676 * A PCIE doorbell will be generated to wake up the dongle.
8677
+ * This is a non-atomic function, make sure the callers
8678
+ * always hold appropriate locks.
54588679 */
54598680 static void BCMFASTPATH
5460
-dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
8681
+__dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
54618682 uint16 nitems)
54628683 {
54638684 dhd_prot_t *prot = dhd->prot;
8685
+ uint32 db_index;
8686
+ uint16 max_flowrings = dhd->bus->max_tx_flowrings;
8687
+ uint corerev;
54648688
54658689 /* cache flush */
54668690 OSL_CACHE_FLUSH(p, ring->item_len * nitems);
54678691
5468
- if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
5469
- dhd_prot_dma_indx_set(dhd, ring->wr,
5470
- H2D_DMA_INDX_WR_UPD, ring->idx);
8692
+ /* For HWA, update db_index and ring mb2 DB and return */
8693
+ if (HWA_ACTIVE(dhd) && ring->hwa_db_type) {
8694
+ db_index = HWA_DB_INDEX_VALUE(ring->wr) | ring->hwa_db_type;
8695
+ DHD_TRACE(("%s: ring(%s) wr(%d) hwa_db_type(0x%x) db_index(0x%x)\n",
8696
+ __FUNCTION__, ring->name, ring->wr, ring->hwa_db_type, db_index));
8697
+ prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
8698
+ return;
8699
+ }
8700
+
8701
+ if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
8702
+ dhd_prot_dma_indx_set(dhd, ring->wr,
8703
+ H2D_DMA_INDX_WR_UPD, ring->idx);
8704
+ } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
8705
+ dhd_prot_dma_indx_set(dhd, ring->wr,
8706
+ H2D_IFRM_INDX_WR_UPD, ring->idx);
54718707 } else {
5472
- dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
5473
- sizeof(uint16), RING_WR_UPD, ring->idx);
8708
+ dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
8709
+ sizeof(uint16), RING_WR_UPD, ring->idx);
54748710 }
54758711
54768712 /* raise h2d interrupt */
5477
- prot->mb_ring_fn(dhd->bus, ring->wr);
8713
+ if (IDMA_ACTIVE(dhd) ||
8714
+ (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings))) {
8715
+ db_index = IDMA_IDX0;
8716
+ /* this api is called in wl down path..in that case sih is freed already */
8717
+ if (dhd->bus->sih) {
8718
+ corerev = dhd->bus->sih->buscorerev;
8719
+ /* We need to explictly configure the type of DMA for core rev >= 24 */
8720
+ if (corerev >= 24) {
8721
+ db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
8722
+ }
8723
+ }
8724
+ prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
8725
+ } else {
8726
+ prot->mb_ring_fn(dhd->bus, ring->wr);
8727
+ }
54788728 }
54798729
8730
+static void BCMFASTPATH
8731
+dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
8732
+ uint16 nitems)
8733
+{
8734
+ unsigned long flags_bus;
8735
+ DHD_BUS_LOCK(dhd->bus->bus_lock, flags_bus);
8736
+ __dhd_prot_ring_write_complete(dhd, ring, p, nitems);
8737
+ DHD_BUS_UNLOCK(dhd->bus->bus_lock, flags_bus);
8738
+}
8739
+
8740
+/**
8741
+ * dhd_prot_ring_write_complete_mbdata - will be called from dhd_prot_h2d_mbdata_send_ctrlmsg,
8742
+ * which will hold DHD_BUS_LOCK to update WR pointer, Ring DB and also update bus_low_power_state
8743
+ * to indicate D3_INFORM sent in the same BUS_LOCK.
8744
+ */
8745
+static void BCMFASTPATH
8746
+dhd_prot_ring_write_complete_mbdata(dhd_pub_t *dhd, msgbuf_ring_t * ring, void *p,
8747
+ uint16 nitems, uint32 mb_data)
8748
+{
8749
+ unsigned long flags_bus;
8750
+
8751
+ DHD_BUS_LOCK(dhd->bus->bus_lock, flags_bus);
8752
+
8753
+ __dhd_prot_ring_write_complete(dhd, ring, p, nitems);
8754
+
8755
+ /* Mark D3_INFORM in the same context to skip ringing H2D DB after D3_INFORM */
8756
+ if (mb_data == H2D_HOST_D3_INFORM) {
8757
+ dhd->bus->bus_low_power_state = DHD_BUS_D3_INFORM_SENT;
8758
+ }
8759
+
8760
+ DHD_BUS_UNLOCK(dhd->bus->bus_lock, flags_bus);
8761
+}
54808762
54818763 /**
54828764 * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages
....@@ -5486,32 +8768,205 @@
54868768 static void
54878769 dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
54888770 {
8771
+ dhd_prot_t *prot = dhd->prot;
8772
+ uint32 db_index;
8773
+ uint corerev;
8774
+
8775
+ /* For HWA, update db_index and ring mb2 DB and return */
8776
+ if (HWA_ACTIVE(dhd) && ring->hwa_db_type) {
8777
+ db_index = HWA_DB_INDEX_VALUE(ring->rd) | ring->hwa_db_type;
8778
+ DHD_TRACE(("%s: ring(%s) rd(0x%x) hwa_db_type(0x%x) db_index(0x%x)\n",
8779
+ __FUNCTION__, ring->name, ring->rd, ring->hwa_db_type, db_index));
8780
+ prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
8781
+ return;
8782
+ }
8783
+
54898784 /* update read index */
54908785 /* If dma'ing h2d indices supported
54918786 * update the r -indices in the
54928787 * host memory o/w in TCM
54938788 */
5494
- if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
8789
+ if (IDMA_ACTIVE(dhd)) {
54958790 dhd_prot_dma_indx_set(dhd, ring->rd,
54968791 D2H_DMA_INDX_RD_UPD, ring->idx);
8792
+ db_index = IDMA_IDX1;
8793
+ if (dhd->bus->sih) {
8794
+ corerev = dhd->bus->sih->buscorerev;
8795
+ /* We need to explictly configure the type of DMA for core rev >= 24 */
8796
+ if (corerev >= 24) {
8797
+ db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
8798
+ }
8799
+ }
8800
+ prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
8801
+ } else if (dhd->dma_h2d_ring_upd_support) {
8802
+ dhd_prot_dma_indx_set(dhd, ring->rd,
8803
+ D2H_DMA_INDX_RD_UPD, ring->idx);
54978804 } else {
54988805 dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
54998806 sizeof(uint16), RING_RD_UPD, ring->idx);
55008807 }
55018808 }
55028809
8810
+static int
8811
+dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
8812
+ uint16 ring_type, uint32 req_id)
8813
+{
8814
+ unsigned long flags;
8815
+ d2h_ring_create_req_t *d2h_ring;
8816
+ uint16 alloced = 0;
8817
+ int ret = BCME_OK;
8818
+ uint16 max_h2d_rings = dhd->bus->max_submission_rings;
8819
+ msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
8820
+
8821
+ DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
8822
+
8823
+ DHD_TRACE(("%s trying to send D2H ring create Req\n", __FUNCTION__));
8824
+
8825
+ if (ring_to_create == NULL) {
8826
+ DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
8827
+ ret = BCME_ERROR;
8828
+ goto err;
8829
+ }
8830
+
8831
+ /* Request for ring buffer space */
8832
+ d2h_ring = (d2h_ring_create_req_t *) dhd_prot_alloc_ring_space(dhd,
8833
+ ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
8834
+ &alloced, FALSE);
8835
+
8836
+ if (d2h_ring == NULL) {
8837
+ DHD_ERROR(("%s: FATAL: No space in control ring to send D2H ring create\n",
8838
+ __FUNCTION__));
8839
+ ret = BCME_NOMEM;
8840
+ goto err;
8841
+ }
8842
+ ring_to_create->create_req_id = (uint16)req_id;
8843
+ ring_to_create->create_pending = TRUE;
8844
+
8845
+ /* Common msg buf hdr */
8846
+ d2h_ring->msg.msg_type = MSG_TYPE_D2H_RING_CREATE;
8847
+ d2h_ring->msg.if_id = 0;
8848
+ d2h_ring->msg.flags = ctrl_ring->current_phase;
8849
+ d2h_ring->msg.request_id = htol32(ring_to_create->create_req_id);
8850
+ d2h_ring->ring_id = htol16(DHD_D2H_RING_OFFSET(ring_to_create->idx, max_h2d_rings));
8851
+ DHD_ERROR(("%s ringid: %d idx: %d max_h2d: %d\n", __FUNCTION__, d2h_ring->ring_id,
8852
+ ring_to_create->idx, max_h2d_rings));
8853
+
8854
+ d2h_ring->ring_type = ring_type;
8855
+ d2h_ring->max_items = htol16(ring_to_create->max_items);
8856
+ d2h_ring->len_item = htol16(ring_to_create->item_len);
8857
+ d2h_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
8858
+ d2h_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
8859
+
8860
+ d2h_ring->flags = 0;
8861
+ d2h_ring->msg.epoch =
8862
+ ctrl_ring->seqnum % H2D_EPOCH_MODULO;
8863
+ ctrl_ring->seqnum++;
8864
+#ifdef EWP_EDL
8865
+ if (ring_type == BCMPCIE_D2H_RING_TYPE_EDL) {
8866
+ DHD_ERROR(("%s: sending d2h EDL ring create: "
8867
+ "\n max items=%u; len_item=%u; ring_id=%u; low_addr=0x%x; high_addr=0x%x\n",
8868
+ __FUNCTION__, ltoh16(d2h_ring->max_items),
8869
+ ltoh16(d2h_ring->len_item),
8870
+ ltoh16(d2h_ring->ring_id),
8871
+ d2h_ring->ring_ptr.low_addr,
8872
+ d2h_ring->ring_ptr.high_addr));
8873
+ }
8874
+#endif /* EWP_EDL */
8875
+
8876
+ /* Update the flow_ring's WRITE index */
8877
+ dhd_prot_ring_write_complete(dhd, ctrl_ring, d2h_ring,
8878
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
8879
+
8880
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
8881
+
8882
+ return ret;
8883
+err:
8884
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
8885
+
8886
+ return ret;
8887
+}
8888
+
8889
+static int
8890
+dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create, uint8 ring_type, uint32 id)
8891
+{
8892
+ unsigned long flags;
8893
+ h2d_ring_create_req_t *h2d_ring;
8894
+ uint16 alloced = 0;
8895
+ uint8 i = 0;
8896
+ int ret = BCME_OK;
8897
+ msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
8898
+
8899
+ DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
8900
+
8901
+ DHD_TRACE(("%s trying to send H2D ring create Req\n", __FUNCTION__));
8902
+
8903
+ if (ring_to_create == NULL) {
8904
+ DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
8905
+ ret = BCME_ERROR;
8906
+ goto err;
8907
+ }
8908
+
8909
+ /* Request for ring buffer space */
8910
+ h2d_ring = (h2d_ring_create_req_t *)dhd_prot_alloc_ring_space(dhd,
8911
+ ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
8912
+ &alloced, FALSE);
8913
+
8914
+ if (h2d_ring == NULL) {
8915
+ DHD_ERROR(("%s: FATAL: No space in control ring to send H2D ring create\n",
8916
+ __FUNCTION__));
8917
+ ret = BCME_NOMEM;
8918
+ goto err;
8919
+ }
8920
+ ring_to_create->create_req_id = (uint16)id;
8921
+ ring_to_create->create_pending = TRUE;
8922
+
8923
+ /* Common msg buf hdr */
8924
+ h2d_ring->msg.msg_type = MSG_TYPE_H2D_RING_CREATE;
8925
+ h2d_ring->msg.if_id = 0;
8926
+ h2d_ring->msg.request_id = htol32(ring_to_create->create_req_id);
8927
+ h2d_ring->msg.flags = ctrl_ring->current_phase;
8928
+ h2d_ring->ring_id = htol16(DHD_H2D_RING_OFFSET(ring_to_create->idx));
8929
+ h2d_ring->ring_type = ring_type;
8930
+ h2d_ring->max_items = htol16(H2DRING_DYNAMIC_INFO_MAX_ITEM);
8931
+ h2d_ring->n_completion_ids = ring_to_create->n_completion_ids;
8932
+ h2d_ring->len_item = htol16(H2DRING_INFO_BUFPOST_ITEMSIZE);
8933
+ h2d_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
8934
+ h2d_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
8935
+
8936
+ for (i = 0; i < ring_to_create->n_completion_ids; i++) {
8937
+ h2d_ring->completion_ring_ids[i] = htol16(ring_to_create->compeltion_ring_ids[i]);
8938
+ }
8939
+
8940
+ h2d_ring->flags = 0;
8941
+ h2d_ring->msg.epoch =
8942
+ ctrl_ring->seqnum % H2D_EPOCH_MODULO;
8943
+ ctrl_ring->seqnum++;
8944
+
8945
+ /* Update the flow_ring's WRITE index */
8946
+ dhd_prot_ring_write_complete(dhd, ctrl_ring, h2d_ring,
8947
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
8948
+
8949
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
8950
+
8951
+ return ret;
8952
+err:
8953
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
8954
+
8955
+ return ret;
8956
+}
55038957
55048958 /**
55058959 * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array.
55068960 * Dongle will DMA the entire array (if DMA_INDX feature is enabled).
55078961 * See dhd_prot_dma_indx_init()
55088962 */
5509
-static void
8963
+void
55108964 dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid)
55118965 {
55128966 uint8 *ptr;
55138967 uint16 offset;
55148968 dhd_prot_t *prot = dhd->prot;
8969
+ uint16 max_h2d_rings = dhd->bus->max_submission_rings;
55158970
55168971 switch (type) {
55178972 case H2D_DMA_INDX_WR_UPD:
....@@ -5521,7 +8976,12 @@
55218976
55228977 case D2H_DMA_INDX_RD_UPD:
55238978 ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
5524
- offset = DHD_D2H_RING_OFFSET(ringid);
8979
+ offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
8980
+ break;
8981
+
8982
+ case H2D_IFRM_INDX_WR_UPD:
8983
+ ptr = (uint8 *)(prot->h2d_ifrm_indx_wr_buf.va);
8984
+ offset = DHD_H2D_FRM_FLOW_RING_OFFSET(ringid);
55258985 break;
55268986
55278987 default:
....@@ -5542,7 +9002,6 @@
55429002
55439003 } /* dhd_prot_dma_indx_set */
55449004
5545
-
55469005 /**
55479006 * dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index
55489007 * array.
....@@ -5556,6 +9015,7 @@
55569015 uint16 data;
55579016 uint16 offset;
55589017 dhd_prot_t *prot = dhd->prot;
9018
+ uint16 max_h2d_rings = dhd->bus->max_submission_rings;
55599019
55609020 switch (type) {
55619021 case H2D_DMA_INDX_WR_UPD:
....@@ -5570,12 +9030,12 @@
55709030
55719031 case D2H_DMA_INDX_WR_UPD:
55729032 ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va);
5573
- offset = DHD_D2H_RING_OFFSET(ringid);
9033
+ offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
55749034 break;
55759035
55769036 case D2H_DMA_INDX_RD_UPD:
55779037 ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
5578
- offset = DHD_D2H_RING_OFFSET(ringid);
9038
+ offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
55799039 break;
55809040
55819041 default:
....@@ -5642,37 +9102,41 @@
56429102 switch (type) {
56439103 case H2D_DMA_INDX_WR_BUF:
56449104 dma_buf = &prot->h2d_dma_indx_wr_buf;
5645
- if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
9105
+ if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
56469106 goto ret_no_mem;
5647
- }
56489107 DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n",
56499108 dma_buf->len, rw_index_sz, length));
56509109 break;
56519110
56529111 case H2D_DMA_INDX_RD_BUF:
56539112 dma_buf = &prot->h2d_dma_indx_rd_buf;
5654
- if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
9113
+ if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
56559114 goto ret_no_mem;
5656
- }
56579115 DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n",
56589116 dma_buf->len, rw_index_sz, length));
56599117 break;
56609118
56619119 case D2H_DMA_INDX_WR_BUF:
56629120 dma_buf = &prot->d2h_dma_indx_wr_buf;
5663
- if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
9121
+ if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
56649122 goto ret_no_mem;
5665
- }
56669123 DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n",
56679124 dma_buf->len, rw_index_sz, length));
56689125 break;
56699126
56709127 case D2H_DMA_INDX_RD_BUF:
56719128 dma_buf = &prot->d2h_dma_indx_rd_buf;
5672
- if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
9129
+ if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
56739130 goto ret_no_mem;
5674
- }
56759131 DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n",
9132
+ dma_buf->len, rw_index_sz, length));
9133
+ break;
9134
+
9135
+ case H2D_IFRM_INDX_WR_BUF:
9136
+ dma_buf = &prot->h2d_ifrm_indx_wr_buf;
9137
+ if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
9138
+ goto ret_no_mem;
9139
+ DHD_ERROR(("H2D IFRM WR INDX : array size %d = %d * %d\n",
56769140 dma_buf->len, rw_index_sz, length));
56779141 break;
56789142
....@@ -5689,7 +9153,6 @@
56899153 return BCME_NOMEM;
56909154
56919155 } /* dhd_prot_dma_indx_init */
5692
-
56939156
56949157 /**
56959158 * Called on checking for 'completion' messages from the dongle. Returns next host buffer to read
....@@ -5709,8 +9172,15 @@
57099172 __FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va),
57109173 (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va)));
57119174
9175
+ /* Remember the read index in a variable.
9176
+ * This is becuase ring->rd gets updated in the end of this function
9177
+ * So if we have to print the exact read index from which the
9178
+ * message is read its not possible.
9179
+ */
9180
+ ring->curr_rd = ring->rd;
9181
+
57129182 /* update write pointer */
5713
- if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
9183
+ if (dhd->dma_d2h_ring_upd_support) {
57149184 /* DMAing write/read indices supported */
57159185 d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
57169186 ring->wr = d2h_wr;
....@@ -5724,11 +9194,8 @@
57249194
57259195 /* check for avail space, in number of ring items */
57269196 items = READ_AVAIL_SPACE(wr, rd, depth);
5727
- if (items == 0) {
9197
+ if (items == 0)
57289198 return NULL;
5729
- }
5730
-
5731
- ASSERT(items < ring->max_items);
57329199
57339200 /*
57349201 * Note that there are builds where Assert translates to just printk
....@@ -5736,16 +9203,32 @@
57369203 * dhd_prot_process_msgtype can get into an big loop if this
57379204 * happens.
57389205 */
5739
- if (items >= ring->max_items) {
9206
+ if (items > ring->max_items) {
57409207 DHD_ERROR(("\r\n======================= \r\n"));
57419208 DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
57429209 __FUNCTION__, ring, ring->name, ring->max_items, items));
57439210 DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n", wr, rd, depth));
5744
- DHD_ERROR(("dhd->busstate %d bus->suspended %d bus->wait_for_d3_ack %d \r\n",
5745
- dhd->busstate, dhd->bus->suspended, dhd->bus->wait_for_d3_ack));
9211
+ DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
9212
+ dhd->busstate, dhd->bus->wait_for_d3_ack));
57469213 DHD_ERROR(("\r\n======================= \r\n"));
9214
+#ifdef SUPPORT_LINKDOWN_RECOVERY
9215
+ if (wr >= ring->max_items) {
9216
+ dhd->bus->read_shm_fail = TRUE;
9217
+ }
9218
+#else
9219
+#ifdef DHD_FW_COREDUMP
9220
+ if (dhd->memdump_enabled) {
9221
+ /* collect core dump */
9222
+ dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
9223
+ dhd_bus_mem_dump(dhd);
9224
+
9225
+ }
9226
+#endif /* DHD_FW_COREDUMP */
9227
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
57479228
57489229 *available_len = 0;
9230
+ dhd_schedule_reset(dhd);
9231
+
57499232 return NULL;
57509233 }
57519234
....@@ -5753,27 +9236,83 @@
57539236 read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len);
57549237
57559238 /* update read pointer */
5756
- if ((ring->rd + items) >= ring->max_items) {
9239
+ if ((ring->rd + items) >= ring->max_items)
57579240 ring->rd = 0;
5758
- } else {
9241
+ else
57599242 ring->rd += items;
5760
- }
57619243
57629244 ASSERT(ring->rd < ring->max_items);
57639245
57649246 /* convert items to bytes : available_len must be 32bits */
57659247 *available_len = (uint32)(items * ring->item_len);
57669248
5767
-#ifndef CUSTOMER_HW_31_2
5768
- /* cannot use this since the dma ring is allocated as uncached,
5769
- * this will cause an assertation
5770
- */
57719249 OSL_CACHE_INV(read_addr, *available_len);
5772
-#endif
9250
+
57739251 /* return read address */
57749252 return read_addr;
57759253
57769254 } /* dhd_prot_get_read_addr */
9255
+
9256
+/**
9257
+ * dhd_prot_h2d_mbdata_send_ctrlmsg is a non-atomic function,
9258
+ * make sure the callers always hold appropriate locks.
9259
+ */
9260
+int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data)
9261
+{
9262
+ h2d_mailbox_data_t *h2d_mb_data;
9263
+ uint16 alloced = 0;
9264
+ msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
9265
+ unsigned long flags;
9266
+ int num_post = 1;
9267
+ int i;
9268
+
9269
+ DHD_INFO(("%s Sending H2D MB data Req data 0x%04x\n",
9270
+ __FUNCTION__, mb_data));
9271
+ if (!ctrl_ring->inited) {
9272
+ DHD_ERROR(("%s: Ctrl Submit Ring: not inited\n", __FUNCTION__));
9273
+ return BCME_ERROR;
9274
+ }
9275
+
9276
+ for (i = 0; i < num_post; i ++) {
9277
+ DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
9278
+ /* Request for ring buffer space */
9279
+ h2d_mb_data = (h2d_mailbox_data_t *)dhd_prot_alloc_ring_space(dhd,
9280
+ ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
9281
+ &alloced, FALSE);
9282
+
9283
+ if (h2d_mb_data == NULL) {
9284
+ DHD_ERROR(("%s: FATAL: No space in control ring to send H2D Mb data\n",
9285
+ __FUNCTION__));
9286
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9287
+ return BCME_NOMEM;
9288
+ }
9289
+
9290
+ memset(h2d_mb_data, 0, sizeof(h2d_mailbox_data_t));
9291
+ /* Common msg buf hdr */
9292
+ h2d_mb_data->msg.msg_type = MSG_TYPE_H2D_MAILBOX_DATA;
9293
+ h2d_mb_data->msg.flags = ctrl_ring->current_phase;
9294
+
9295
+ h2d_mb_data->msg.epoch =
9296
+ ctrl_ring->seqnum % H2D_EPOCH_MODULO;
9297
+ ctrl_ring->seqnum++;
9298
+
9299
+ /* Update flow create message */
9300
+ h2d_mb_data->mail_box_data = htol32(mb_data);
9301
+ {
9302
+ h2d_mb_data->mail_box_data = htol32(mb_data);
9303
+ }
9304
+
9305
+ DHD_INFO(("%s Send H2D MB data Req data 0x%04x\n", __FUNCTION__, mb_data));
9306
+
9307
+ /* upd wrt ptr and raise interrupt */
9308
+ dhd_prot_ring_write_complete_mbdata(dhd, ctrl_ring, h2d_mb_data,
9309
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, mb_data);
9310
+
9311
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9312
+
9313
+ }
9314
+ return 0;
9315
+}
57779316
57789317 /** Creates a flow ring and informs dongle of this event */
57799318 int
....@@ -5785,6 +9324,7 @@
57859324 unsigned long flags;
57869325 uint16 alloced = 0;
57879326 msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
9327
+ uint16 max_flowrings = dhd->bus->max_tx_flowrings;
57889328
57899329 /* Fetch a pre-initialized msgbuf_ring from the flowring pool */
57909330 flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
....@@ -5794,7 +9334,7 @@
57949334 return BCME_NOMEM;
57959335 }
57969336
5797
- DHD_GENERAL_LOCK(dhd, flags);
9337
+ DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
57989338
57999339 /* Request for ctrl_ring buffer space */
58009340 flow_create_rqst = (tx_flowring_create_request_t *)
....@@ -5804,7 +9344,7 @@
58049344 dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
58059345 DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n",
58069346 __FUNCTION__, flow_ring_node->flowid));
5807
- DHD_GENERAL_UNLOCK(dhd, flags);
9347
+ DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
58089348 return BCME_NOMEM;
58099349 }
58109350
....@@ -5814,6 +9354,7 @@
58149354 flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE;
58159355 flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
58169356 flow_create_rqst->msg.request_id = htol32(0); /* TBD */
9357
+ flow_create_rqst->msg.flags = ctrl_ring->current_phase;
58179358
58189359 flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
58199360 ctrl_ring->seqnum++;
....@@ -5826,17 +9367,47 @@
58269367 /* CAUTION: ring::base_addr already in Little Endian */
58279368 flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr;
58289369 flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr;
5829
- flow_create_rqst->max_items = htol16(H2DRING_TXPOST_MAX_ITEM);
9370
+ flow_create_rqst->max_items = htol16(prot->h2d_max_txpost);
58309371 flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
9372
+ flow_create_rqst->if_flags = 0;
9373
+
9374
+#ifdef DHD_HP2P
9375
+ /* Create HPP flow ring if HP2P is enabled and TID=7 and AWDL interface */
9376
+ /* and traffic is not multicast */
9377
+ /* Allow infra interface only if user enabled hp2p_infra_enable thru iovar */
9378
+ /* Allow only one HP2P Flow active at a time */
9379
+ if (dhd->hp2p_capable && !dhd->hp2p_ring_active &&
9380
+ flow_ring_node->flow_info.tid == HP2P_PRIO &&
9381
+ (dhd->hp2p_infra_enable || flow_create_rqst->msg.if_id) &&
9382
+ !ETHER_ISMULTI(flow_create_rqst->da)) {
9383
+ flow_create_rqst->if_flags |= BCMPCIE_FLOW_RING_INTF_HP2P;
9384
+ flow_ring_node->hp2p_ring = TRUE;
9385
+ dhd->hp2p_ring_active = TRUE;
9386
+
9387
+ DHD_ERROR(("%s: flow ring for HP2P tid = %d flowid = %d\n",
9388
+ __FUNCTION__, flow_ring_node->flow_info.tid,
9389
+ flow_ring_node->flowid));
9390
+ }
9391
+#endif /* DHD_HP2P */
9392
+
9393
+ /* definition for ifrm mask : bit0:d11ac core, bit1:d11ad core
9394
+ * currently it is not used for priority. so uses solely for ifrm mask
9395
+ */
9396
+ if (IFRM_ACTIVE(dhd))
9397
+ flow_create_rqst->priority_ifrmmask = (1 << IFRM_DEV_0);
9398
+
58319399 DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG
58329400 " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
58339401 MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
58349402 flow_ring_node->flow_info.ifindex));
58359403
58369404 /* Update the flow_ring's WRITE index */
5837
- if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
9405
+ if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
58389406 dhd_prot_dma_indx_set(dhd, flow_ring->wr,
5839
- H2D_DMA_INDX_WR_UPD, flow_ring->idx);
9407
+ H2D_DMA_INDX_WR_UPD, flow_ring->idx);
9408
+ } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(flow_ring->idx, max_flowrings)) {
9409
+ dhd_prot_dma_indx_set(dhd, flow_ring->wr,
9410
+ H2D_IFRM_INDX_WR_UPD, flow_ring->idx);
58409411 } else {
58419412 dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
58429413 sizeof(uint16), RING_WR_UPD, flow_ring->idx);
....@@ -5845,7 +9416,7 @@
58459416 /* update control subn ring's WR index and ring doorbell to dongle */
58469417 dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1);
58479418
5848
- DHD_GENERAL_UNLOCK(dhd, flags);
9419
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
58499420
58509421 return BCME_OK;
58519422 } /* dhd_prot_flow_ring_create */
....@@ -5865,6 +9436,143 @@
58659436 ltoh16(flow_create_resp->cmplt.status));
58669437 }
58679438
9439
+static void
9440
+dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf)
9441
+{
9442
+ h2d_ring_create_response_t *resp = (h2d_ring_create_response_t *)buf;
9443
+ DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
9444
+ ltoh16(resp->cmplt.status),
9445
+ ltoh16(resp->cmplt.ring_id),
9446
+ ltoh32(resp->cmn_hdr.request_id)));
9447
+ if ((ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_DBGRING_REQ_PKTID) &&
9448
+ (ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_BTLOGRING_REQ_PKTID)) {
9449
+ DHD_ERROR(("invalid request ID with h2d ring create complete\n"));
9450
+ return;
9451
+ }
9452
+ if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id) &&
9453
+ !dhd->prot->h2dring_info_subn->create_pending) {
9454
+ DHD_ERROR(("info ring create status for not pending submit ring\n"));
9455
+ }
9456
+
9457
+ if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
9458
+ DHD_ERROR(("info/btlog ring create failed with status %d\n",
9459
+ ltoh16(resp->cmplt.status)));
9460
+ return;
9461
+ }
9462
+ if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id)) {
9463
+ dhd->prot->h2dring_info_subn->create_pending = FALSE;
9464
+ dhd->prot->h2dring_info_subn->inited = TRUE;
9465
+ DHD_ERROR(("info buffer post after ring create\n"));
9466
+ dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
9467
+ }
9468
+}
9469
+
9470
+static void
9471
+dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf)
9472
+{
9473
+ d2h_ring_create_response_t *resp = (d2h_ring_create_response_t *)buf;
9474
+ DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
9475
+ ltoh16(resp->cmplt.status),
9476
+ ltoh16(resp->cmplt.ring_id),
9477
+ ltoh32(resp->cmn_hdr.request_id)));
9478
+ if ((ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_DBGRING_REQ_PKTID) &&
9479
+ (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_BTLOGRING_REQ_PKTID) &&
9480
+#ifdef DHD_HP2P
9481
+ (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_TXREQ_PKTID) &&
9482
+ (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_RXREQ_PKTID) &&
9483
+#endif /* DHD_HP2P */
9484
+ TRUE) {
9485
+ DHD_ERROR(("invalid request ID with d2h ring create complete\n"));
9486
+ return;
9487
+ }
9488
+ if (ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_DBGRING_REQ_PKTID) {
9489
+#ifdef EWP_EDL
9490
+ if (!dhd->dongle_edl_support)
9491
+#endif // endif
9492
+ {
9493
+ if (!dhd->prot->d2hring_info_cpln->create_pending) {
9494
+ DHD_ERROR(("info ring create status for not pending cpl ring\n"));
9495
+ return;
9496
+ }
9497
+
9498
+ if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
9499
+ DHD_ERROR(("info cpl ring create failed with status %d\n",
9500
+ ltoh16(resp->cmplt.status)));
9501
+ return;
9502
+ }
9503
+ dhd->prot->d2hring_info_cpln->create_pending = FALSE;
9504
+ dhd->prot->d2hring_info_cpln->inited = TRUE;
9505
+ }
9506
+#ifdef EWP_EDL
9507
+ else {
9508
+ if (!dhd->prot->d2hring_edl->create_pending) {
9509
+ DHD_ERROR(("edl ring create status for not pending cpl ring\n"));
9510
+ return;
9511
+ }
9512
+
9513
+ if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
9514
+ DHD_ERROR(("edl cpl ring create failed with status %d\n",
9515
+ ltoh16(resp->cmplt.status)));
9516
+ return;
9517
+ }
9518
+ dhd->prot->d2hring_edl->create_pending = FALSE;
9519
+ dhd->prot->d2hring_edl->inited = TRUE;
9520
+ }
9521
+#endif /* EWP_EDL */
9522
+ }
9523
+
9524
+#ifdef DHD_HP2P
9525
+ if (dhd->prot->d2hring_hp2p_txcpl &&
9526
+ ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_TXREQ_PKTID) {
9527
+ if (!dhd->prot->d2hring_hp2p_txcpl->create_pending) {
9528
+ DHD_ERROR(("HPP tx ring create status for not pending cpl ring\n"));
9529
+ return;
9530
+ }
9531
+
9532
+ if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
9533
+ DHD_ERROR(("HPP tx cpl ring create failed with status %d\n",
9534
+ ltoh16(resp->cmplt.status)));
9535
+ return;
9536
+ }
9537
+ dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
9538
+ dhd->prot->d2hring_hp2p_txcpl->inited = TRUE;
9539
+ }
9540
+ if (dhd->prot->d2hring_hp2p_rxcpl &&
9541
+ ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_RXREQ_PKTID) {
9542
+ if (!dhd->prot->d2hring_hp2p_rxcpl->create_pending) {
9543
+ DHD_ERROR(("HPP rx ring create status for not pending cpl ring\n"));
9544
+ return;
9545
+ }
9546
+
9547
+ if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
9548
+ DHD_ERROR(("HPP rx cpl ring create failed with status %d\n",
9549
+ ltoh16(resp->cmplt.status)));
9550
+ return;
9551
+ }
9552
+ dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
9553
+ dhd->prot->d2hring_hp2p_rxcpl->inited = TRUE;
9554
+ }
9555
+#endif /* DHD_HP2P */
9556
+}
9557
+
9558
+static void
9559
+dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf)
9560
+{
9561
+ d2h_mailbox_data_t *d2h_data;
9562
+
9563
+ d2h_data = (d2h_mailbox_data_t *)buf;
9564
+ DHD_INFO(("%s dhd_prot_process_d2h_mb_data, 0x%04x\n", __FUNCTION__,
9565
+ d2h_data->d2h_mailbox_data));
9566
+ dhd_bus_handle_mb_data(dhd->bus, d2h_data->d2h_mailbox_data);
9567
+}
9568
+
9569
+static void
9570
+dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf)
9571
+{
9572
+ DHD_ERROR(("Timesunc feature not compiled in but GOT HOST_TS_COMPLETE\n"));
9573
+
9574
+}
9575
+
58689576 /** called on e.g. flow ring delete */
58699577 void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info)
58709578 {
....@@ -5876,37 +9584,89 @@
58769584 void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info,
58779585 struct bcmstrbuf *strbuf, const char * fmt)
58789586 {
5879
- const char *default_fmt = "RD %d WR %d\n";
9587
+ const char *default_fmt =
9588
+ "RD %d WR %d BASE(VA) %p BASE(PA) %x:%x SIZE %d "
9589
+ "WORK_ITEM_SIZE %d MAX_WORK_ITEMS %d TOTAL_SIZE %d\n";
58809590 msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
58819591 uint16 rd, wr;
9592
+ uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len;
58829593
58839594 if (fmt == NULL) {
58849595 fmt = default_fmt;
58859596 }
9597
+
9598
+ if (dhd->bus->is_linkdown) {
9599
+ DHD_ERROR(("%s: Skip dumping flowring due to Link down\n", __FUNCTION__));
9600
+ return;
9601
+ }
9602
+
58869603 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
58879604 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
5888
- bcm_bprintf(strbuf, fmt, rd, wr);
9605
+ bcm_bprintf(strbuf, fmt, rd, wr, flow_ring->dma_buf.va,
9606
+ ltoh32(flow_ring->base_addr.high_addr),
9607
+ ltoh32(flow_ring->base_addr.low_addr),
9608
+ flow_ring->item_len, flow_ring->max_items,
9609
+ dma_buf_len);
58899610 }
58909611
58919612 void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
58929613 {
58939614 dhd_prot_t *prot = dhd->prot;
5894
- bcm_bprintf(strbuf, "CtrlPost: ");
5895
- dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, strbuf, NULL);
5896
- bcm_bprintf(strbuf, "CtrlCpl: ");
5897
- dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, strbuf, NULL);
9615
+ bcm_bprintf(strbuf, "IPCrevs: Dev %d, \t Host %d, \tactive %d\n",
9616
+ dhd->prot->device_ipc_version,
9617
+ dhd->prot->host_ipc_version,
9618
+ dhd->prot->active_ipc_version);
58989619
5899
- bcm_bprintf(strbuf, "RxPost: ");
5900
- bcm_bprintf(strbuf, "RBP %d ", prot->rxbufpost);
5901
- dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, strbuf, NULL);
5902
- bcm_bprintf(strbuf, "RxCpl: ");
5903
- dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, strbuf, NULL);
9620
+ bcm_bprintf(strbuf, "max Host TS bufs to post: %d, \t posted %d \n",
9621
+ dhd->prot->max_tsbufpost, dhd->prot->cur_ts_bufs_posted);
9622
+ bcm_bprintf(strbuf, "max INFO bufs to post: %d, \t posted %d \n",
9623
+ dhd->prot->max_infobufpost, dhd->prot->infobufpost);
9624
+ bcm_bprintf(strbuf, "max event bufs to post: %d, \t posted %d \n",
9625
+ dhd->prot->max_eventbufpost, dhd->prot->cur_event_bufs_posted);
9626
+ bcm_bprintf(strbuf, "max ioctlresp bufs to post: %d, \t posted %d \n",
9627
+ dhd->prot->max_ioctlrespbufpost, dhd->prot->cur_ioctlresp_bufs_posted);
9628
+ bcm_bprintf(strbuf, "max RX bufs to post: %d, \t posted %d \n",
9629
+ dhd->prot->max_rxbufpost, dhd->prot->rxbufpost);
59049630
5905
- bcm_bprintf(strbuf, "TxCpl: ");
5906
- dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, strbuf, NULL);
5907
- bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail %d\n",
5908
- dhd->prot->active_tx_count,
5909
- DHD_PKTID_AVAIL(dhd->prot->pktid_map_handle));
9631
+ bcm_bprintf(strbuf,
9632
+ "%14s %5s %5s %17s %17s %14s %14s %10s\n",
9633
+ "Type", "RD", "WR", "BASE(VA)", "BASE(PA)",
9634
+ "WORK_ITEM_SIZE", "MAX_WORK_ITEMS", "TOTAL_SIZE");
9635
+ bcm_bprintf(strbuf, "%14s", "H2DCtrlPost");
9636
+ dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, strbuf,
9637
+ " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9638
+ bcm_bprintf(strbuf, "%14s", "D2HCtrlCpl");
9639
+ dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, strbuf,
9640
+ " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9641
+ bcm_bprintf(strbuf, "%14s", "H2DRxPost", prot->rxbufpost);
9642
+ dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, strbuf,
9643
+ " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9644
+ bcm_bprintf(strbuf, "%14s", "D2HRxCpl");
9645
+ dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, strbuf,
9646
+ " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9647
+ bcm_bprintf(strbuf, "%14s", "D2HTxCpl");
9648
+ dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, strbuf,
9649
+ " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9650
+ if (dhd->prot->h2dring_info_subn != NULL && dhd->prot->d2hring_info_cpln != NULL) {
9651
+ bcm_bprintf(strbuf, "%14s", "H2DRingInfoSub");
9652
+ dhd_prot_print_flow_ring(dhd, prot->h2dring_info_subn, strbuf,
9653
+ " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9654
+ bcm_bprintf(strbuf, "%14s", "D2HRingInfoCpl");
9655
+ dhd_prot_print_flow_ring(dhd, prot->d2hring_info_cpln, strbuf,
9656
+ " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9657
+ }
9658
+ if (dhd->prot->d2hring_edl != NULL) {
9659
+ bcm_bprintf(strbuf, "%14s", "D2HRingEDL");
9660
+ dhd_prot_print_flow_ring(dhd, prot->d2hring_edl, strbuf,
9661
+ " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9662
+ }
9663
+
9664
+ bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail(ctrl/rx/tx) %d %d %d\n",
9665
+ OSL_ATOMIC_READ(dhd->osh, &dhd->prot->active_tx_count),
9666
+ DHD_PKTID_AVAIL(dhd->prot->pktid_ctrl_map),
9667
+ DHD_PKTID_AVAIL(dhd->prot->pktid_rx_map),
9668
+ DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map));
9669
+
59109670 }
59119671
59129672 int
....@@ -5918,14 +9678,14 @@
59189678 uint16 alloced = 0;
59199679 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
59209680
5921
- DHD_GENERAL_LOCK(dhd, flags);
9681
+ DHD_RING_LOCK(ring->ring_lock, flags);
59229682
59239683 /* Request for ring buffer space */
59249684 flow_delete_rqst = (tx_flowring_delete_request_t *)
59259685 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
59269686
59279687 if (flow_delete_rqst == NULL) {
5928
- DHD_GENERAL_UNLOCK(dhd, flags);
9688
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
59299689 DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__));
59309690 return BCME_NOMEM;
59319691 }
....@@ -5934,6 +9694,7 @@
59349694 flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE;
59359695 flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
59369696 flow_delete_rqst->msg.request_id = htol32(0); /* TBD */
9697
+ flow_delete_rqst->msg.flags = ring->current_phase;
59379698
59389699 flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
59399700 ring->seqnum++;
....@@ -5949,9 +9710,39 @@
59499710
59509711 /* update ring's WR index and ring doorbell to dongle */
59519712 dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1);
5952
- DHD_GENERAL_UNLOCK(dhd, flags);
9713
+
9714
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
59539715
59549716 return BCME_OK;
9717
+}
9718
+
9719
+static void BCMFASTPATH
9720
+dhd_prot_flow_ring_fastdelete(dhd_pub_t *dhd, uint16 flowid, uint16 rd_idx)
9721
+{
9722
+ flow_ring_node_t *flow_ring_node = DHD_FLOW_RING(dhd, flowid);
9723
+ msgbuf_ring_t *ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
9724
+ host_txbuf_cmpl_t txstatus;
9725
+ host_txbuf_post_t *txdesc;
9726
+ uint16 wr_idx;
9727
+
9728
+ DHD_INFO(("%s: FAST delete ring, flowid=%d, rd_idx=%d, wr_idx=%d\n",
9729
+ __FUNCTION__, flowid, rd_idx, ring->wr));
9730
+
9731
+ memset(&txstatus, 0, sizeof(txstatus));
9732
+ txstatus.compl_hdr.flow_ring_id = flowid;
9733
+ txstatus.cmn_hdr.if_id = flow_ring_node->flow_info.ifindex;
9734
+ wr_idx = ring->wr;
9735
+
9736
+ while (wr_idx != rd_idx) {
9737
+ if (wr_idx)
9738
+ wr_idx--;
9739
+ else
9740
+ wr_idx = ring->max_items - 1;
9741
+ txdesc = (host_txbuf_post_t *)((char *)DHD_RING_BGN_VA(ring) +
9742
+ (wr_idx * ring->item_len));
9743
+ txstatus.cmn_hdr.request_id = txdesc->cmn_hdr.request_id;
9744
+ dhd_prot_txstatus_process(dhd, &txstatus);
9745
+ }
59559746 }
59569747
59579748 static void
....@@ -5962,8 +9753,49 @@
59629753 DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__,
59639754 flow_delete_resp->cmplt.status, flow_delete_resp->cmplt.flow_ring_id));
59649755
9756
+ if (dhd->fast_delete_ring_support) {
9757
+ dhd_prot_flow_ring_fastdelete(dhd, flow_delete_resp->cmplt.flow_ring_id,
9758
+ flow_delete_resp->read_idx);
9759
+ }
59659760 dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id,
59669761 flow_delete_resp->cmplt.status);
9762
+}
9763
+
9764
+static void
9765
+dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg)
9766
+{
9767
+#ifdef IDLE_TX_FLOW_MGMT
9768
+ tx_idle_flowring_resume_response_t *flow_resume_resp =
9769
+ (tx_idle_flowring_resume_response_t *)msg;
9770
+
9771
+ DHD_ERROR(("%s Flow resume Response status = %d Flow %d\n", __FUNCTION__,
9772
+ flow_resume_resp->cmplt.status, flow_resume_resp->cmplt.flow_ring_id));
9773
+
9774
+ dhd_bus_flow_ring_resume_response(dhd->bus, flow_resume_resp->cmplt.flow_ring_id,
9775
+ flow_resume_resp->cmplt.status);
9776
+#endif /* IDLE_TX_FLOW_MGMT */
9777
+}
9778
+
9779
+static void
9780
+dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg)
9781
+{
9782
+#ifdef IDLE_TX_FLOW_MGMT
9783
+ int16 status;
9784
+ tx_idle_flowring_suspend_response_t *flow_suspend_resp =
9785
+ (tx_idle_flowring_suspend_response_t *)msg;
9786
+ status = flow_suspend_resp->cmplt.status;
9787
+
9788
+ DHD_ERROR(("%s Flow id %d suspend Response status = %d\n",
9789
+ __FUNCTION__, flow_suspend_resp->cmplt.flow_ring_id,
9790
+ status));
9791
+
9792
+ if (status != BCME_OK) {
9793
+
9794
+ DHD_ERROR(("%s Error in Suspending Flow rings!!"
9795
+ "Dongle will still be polling idle rings!!Status = %d \n",
9796
+ __FUNCTION__, status));
9797
+ }
9798
+#endif /* IDLE_TX_FLOW_MGMT */
59679799 }
59689800
59699801 int
....@@ -5975,13 +9807,13 @@
59759807 uint16 alloced = 0;
59769808 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
59779809
5978
- DHD_GENERAL_LOCK(dhd, flags);
9810
+ DHD_RING_LOCK(ring->ring_lock, flags);
59799811
59809812 /* Request for ring buffer space */
59819813 flow_flush_rqst = (tx_flowring_flush_request_t *)
59829814 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
59839815 if (flow_flush_rqst == NULL) {
5984
- DHD_GENERAL_UNLOCK(dhd, flags);
9816
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
59859817 DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__));
59869818 return BCME_NOMEM;
59879819 }
....@@ -5990,7 +9822,7 @@
59909822 flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH;
59919823 flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
59929824 flow_flush_rqst->msg.request_id = htol32(0); /* TBD */
5993
-
9825
+ flow_flush_rqst->msg.flags = ring->current_phase;
59949826 flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
59959827 ring->seqnum++;
59969828
....@@ -6001,7 +9833,8 @@
60019833
60029834 /* update ring's WR index and ring doorbell to dongle */
60039835 dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1);
6004
- DHD_GENERAL_UNLOCK(dhd, flags);
9836
+
9837
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
60059838
60069839 return BCME_OK;
60079840 } /* dhd_prot_flow_ring_flush */
....@@ -6039,13 +9872,13 @@
60399872 const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
60409873
60419874 /* Claim space for d2h_ring number of d2h_ring_config_req_t messages */
6042
- DHD_GENERAL_LOCK(dhd, flags);
9875
+ DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
60439876 msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE);
60449877
60459878 if (msg_start == NULL) {
60469879 DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n",
60479880 __FUNCTION__, d2h_rings));
6048
- DHD_GENERAL_UNLOCK(dhd, flags);
9881
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
60499882 return;
60509883 }
60519884
....@@ -6091,17 +9924,501 @@
60919924
60929925 /* update control subn ring's WR index and ring doorbell to dongle */
60939926 dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings);
6094
- DHD_GENERAL_UNLOCK(dhd, flags);
9927
+
9928
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9929
+
60959930 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
60969931 }
60979932
60989933 static void
6099
-dhd_prot_d2h_ring_config_cmplt_process(dhd_pub_t *dhd, void *msg)
9934
+dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg)
61009935 {
61019936 DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n",
61029937 __FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status),
61039938 ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id)));
61049939 }
9940
+
9941
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
9942
+void
9943
+copy_ext_trap_sig(dhd_pub_t *dhd, trap_t *tr)
9944
+{
9945
+ uint32 *ext_data = dhd->extended_trap_data;
9946
+ hnd_ext_trap_hdr_t *hdr;
9947
+ const bcm_tlv_t *tlv;
9948
+
9949
+ if (ext_data == NULL) {
9950
+ return;
9951
+ }
9952
+ /* First word is original trap_data */
9953
+ ext_data++;
9954
+
9955
+ /* Followed by the extended trap data header */
9956
+ hdr = (hnd_ext_trap_hdr_t *)ext_data;
9957
+
9958
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
9959
+ if (tlv) {
9960
+ memcpy(tr, &tlv->data, sizeof(struct _trap_struct));
9961
+ }
9962
+}
9963
+#define TRAP_T_NAME_OFFSET(var) {#var, OFFSETOF(trap_t, var)}
9964
+
9965
+typedef struct {
9966
+ char name[HANG_INFO_TRAP_T_NAME_MAX];
9967
+ uint32 offset;
9968
+} hang_info_trap_t;
9969
+
9970
+#ifdef DHD_EWPR_VER2
9971
+static hang_info_trap_t hang_info_trap_tbl[] = {
9972
+ {"reason", 0},
9973
+ {"ver", VENDOR_SEND_HANG_EXT_INFO_VER},
9974
+ {"stype", 0},
9975
+ TRAP_T_NAME_OFFSET(type),
9976
+ TRAP_T_NAME_OFFSET(epc),
9977
+ {"resrvd", 0},
9978
+ {"resrvd", 0},
9979
+ {"resrvd", 0},
9980
+ {"resrvd", 0},
9981
+ {"", 0}
9982
+};
9983
+#else
9984
+static hang_info_trap_t hang_info_trap_tbl[] = {
9985
+ {"reason", 0},
9986
+ {"ver", VENDOR_SEND_HANG_EXT_INFO_VER},
9987
+ {"stype", 0},
9988
+ TRAP_T_NAME_OFFSET(type),
9989
+ TRAP_T_NAME_OFFSET(epc),
9990
+ TRAP_T_NAME_OFFSET(cpsr),
9991
+ TRAP_T_NAME_OFFSET(spsr),
9992
+ TRAP_T_NAME_OFFSET(r0),
9993
+ TRAP_T_NAME_OFFSET(r1),
9994
+ TRAP_T_NAME_OFFSET(r2),
9995
+ TRAP_T_NAME_OFFSET(r3),
9996
+ TRAP_T_NAME_OFFSET(r4),
9997
+ TRAP_T_NAME_OFFSET(r5),
9998
+ TRAP_T_NAME_OFFSET(r6),
9999
+ TRAP_T_NAME_OFFSET(r7),
10000
+ TRAP_T_NAME_OFFSET(r8),
10001
+ TRAP_T_NAME_OFFSET(r9),
10002
+ TRAP_T_NAME_OFFSET(r10),
10003
+ TRAP_T_NAME_OFFSET(r11),
10004
+ TRAP_T_NAME_OFFSET(r12),
10005
+ TRAP_T_NAME_OFFSET(r13),
10006
+ TRAP_T_NAME_OFFSET(r14),
10007
+ TRAP_T_NAME_OFFSET(pc),
10008
+ {"", 0}
10009
+};
10010
+#endif /* DHD_EWPR_VER2 */
10011
+
10012
+#define TAG_TRAP_IS_STATE(tag) \
10013
+ ((tag == TAG_TRAP_MEMORY) || (tag == TAG_TRAP_PCIE_Q) || \
10014
+ (tag == TAG_TRAP_WLC_STATE) || (tag == TAG_TRAP_LOG_DATA) || \
10015
+ (tag == TAG_TRAP_CODE))
10016
+
10017
+static void
10018
+copy_hang_info_head(char *dest, trap_t *src, int len, int field_name,
10019
+ int *bytes_written, int *cnt, char *cookie)
10020
+{
10021
+ uint8 *ptr;
10022
+ int remain_len;
10023
+ int i;
10024
+
10025
+ ptr = (uint8 *)src;
10026
+
10027
+ memset(dest, 0, len);
10028
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10029
+
10030
+ /* hang reason, hang info ver */
10031
+ for (i = 0; (i < HANG_INFO_TRAP_T_SUBTYPE_IDX) && (*cnt < HANG_FIELD_CNT_MAX);
10032
+ i++, (*cnt)++) {
10033
+ if (field_name) {
10034
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10035
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
10036
+ hang_info_trap_tbl[i].name, HANG_KEY_DEL);
10037
+ }
10038
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10039
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%d%c",
10040
+ hang_info_trap_tbl[i].offset, HANG_KEY_DEL);
10041
+
10042
+ }
10043
+
10044
+ if (*cnt < HANG_FIELD_CNT_MAX) {
10045
+ if (field_name) {
10046
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10047
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
10048
+ "cookie", HANG_KEY_DEL);
10049
+ }
10050
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10051
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s%c",
10052
+ cookie, HANG_KEY_DEL);
10053
+ (*cnt)++;
10054
+ }
10055
+
10056
+ if (*cnt < HANG_FIELD_CNT_MAX) {
10057
+ if (field_name) {
10058
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10059
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
10060
+ hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].name,
10061
+ HANG_KEY_DEL);
10062
+ }
10063
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10064
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%08x%c",
10065
+ hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].offset,
10066
+ HANG_KEY_DEL);
10067
+ (*cnt)++;
10068
+ }
10069
+
10070
+ if (*cnt < HANG_FIELD_CNT_MAX) {
10071
+ if (field_name) {
10072
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10073
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
10074
+ hang_info_trap_tbl[HANG_INFO_TRAP_T_EPC_IDX].name,
10075
+ HANG_KEY_DEL);
10076
+ }
10077
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10078
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%08x%c",
10079
+ *(uint32 *)
10080
+ (ptr + hang_info_trap_tbl[HANG_INFO_TRAP_T_EPC_IDX].offset),
10081
+ HANG_KEY_DEL);
10082
+ (*cnt)++;
10083
+ }
10084
+#ifdef DHD_EWPR_VER2
10085
+ /* put 0 for HG03 ~ HG06 (reserved for future use) */
10086
+ for (i = 0; (i < HANG_INFO_BIGDATA_EXTRA_KEY) && (*cnt < HANG_FIELD_CNT_MAX);
10087
+ i++, (*cnt)++) {
10088
+ if (field_name) {
10089
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10090
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
10091
+ hang_info_trap_tbl[HANG_INFO_TRAP_T_EXTRA_KEY_IDX+i].name,
10092
+ HANG_KEY_DEL);
10093
+ }
10094
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10095
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%d%c",
10096
+ hang_info_trap_tbl[HANG_INFO_TRAP_T_EXTRA_KEY_IDX+i].offset,
10097
+ HANG_KEY_DEL);
10098
+ }
10099
+#endif /* DHD_EWPR_VER2 */
10100
+}
10101
+#ifndef DHD_EWPR_VER2
10102
+static void
10103
+copy_hang_info_trap_t(char *dest, trap_t *src, int len, int field_name,
10104
+ int *bytes_written, int *cnt, char *cookie)
10105
+{
10106
+ uint8 *ptr;
10107
+ int remain_len;
10108
+ int i;
10109
+
10110
+ ptr = (uint8 *)src;
10111
+
10112
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10113
+
10114
+ for (i = HANG_INFO_TRAP_T_OFFSET_IDX;
10115
+ (hang_info_trap_tbl[i].name[0] != 0) && (*cnt < HANG_FIELD_CNT_MAX);
10116
+ i++, (*cnt)++) {
10117
+ if (field_name) {
10118
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10119
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%s:",
10120
+ HANG_RAW_DEL, hang_info_trap_tbl[i].name);
10121
+ }
10122
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10123
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
10124
+ HANG_RAW_DEL, *(uint32 *)(ptr + hang_info_trap_tbl[i].offset));
10125
+ }
10126
+}
10127
+
10128
+/* Ignore compiler warnings due to -Werror=cast-qual */
10129
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
10130
+#pragma GCC diagnostic push
10131
+#pragma GCC diagnostic ignored "-Wcast-qual"
10132
+#endif // endif
10133
+
10134
+static void
10135
+copy_hang_info_stack(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
10136
+{
10137
+ int remain_len;
10138
+ int i = 0;
10139
+ const uint32 *stack;
10140
+ uint32 *ext_data = dhd->extended_trap_data;
10141
+ hnd_ext_trap_hdr_t *hdr;
10142
+ const bcm_tlv_t *tlv;
10143
+ int remain_stack_cnt = 0;
10144
+ uint32 dummy_data = 0;
10145
+ int bigdata_key_stack_cnt = 0;
10146
+
10147
+ if (ext_data == NULL) {
10148
+ return;
10149
+ }
10150
+ /* First word is original trap_data */
10151
+ ext_data++;
10152
+
10153
+ /* Followed by the extended trap data header */
10154
+ hdr = (hnd_ext_trap_hdr_t *)ext_data;
10155
+
10156
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
10157
+
10158
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10159
+
10160
+ if (tlv) {
10161
+ stack = (const uint32 *)tlv->data;
10162
+
10163
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len,
10164
+ "%08x", *(uint32 *)(stack++));
10165
+ (*cnt)++;
10166
+ if (*cnt >= HANG_FIELD_CNT_MAX) {
10167
+ return;
10168
+ }
10169
+ for (i = 1; i < (uint32)(tlv->len / sizeof(uint32)); i++, bigdata_key_stack_cnt++) {
10170
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10171
+ /* Raw data for bigdata use '_' and Key data for bigdata use space */
10172
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len,
10173
+ "%c%08x",
10174
+ i <= HANG_INFO_BIGDATA_KEY_STACK_CNT ? HANG_KEY_DEL : HANG_RAW_DEL,
10175
+ *(uint32 *)(stack++));
10176
+
10177
+ (*cnt)++;
10178
+ if ((*cnt >= HANG_FIELD_CNT_MAX) ||
10179
+ (i >= HANG_FIELD_TRAP_T_STACK_CNT_MAX)) {
10180
+ return;
10181
+ }
10182
+ }
10183
+ }
10184
+
10185
+ remain_stack_cnt = HANG_FIELD_TRAP_T_STACK_CNT_MAX - i;
10186
+
10187
+ for (i = 0; i < remain_stack_cnt; i++) {
10188
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10189
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
10190
+ HANG_RAW_DEL, dummy_data);
10191
+ (*cnt)++;
10192
+ if (*cnt >= HANG_FIELD_CNT_MAX) {
10193
+ return;
10194
+ }
10195
+ }
10196
+
10197
+}
10198
+
10199
+static void
10200
+copy_hang_info_specific(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
10201
+{
10202
+ int remain_len;
10203
+ int i;
10204
+ const uint32 *data;
10205
+ uint32 *ext_data = dhd->extended_trap_data;
10206
+ hnd_ext_trap_hdr_t *hdr;
10207
+ const bcm_tlv_t *tlv;
10208
+ int remain_trap_data = 0;
10209
+ uint8 buf_u8[sizeof(uint32)] = { 0, };
10210
+ const uint8 *p_u8;
10211
+
10212
+ if (ext_data == NULL) {
10213
+ return;
10214
+ }
10215
+ /* First word is original trap_data */
10216
+ ext_data++;
10217
+
10218
+ /* Followed by the extended trap data header */
10219
+ hdr = (hnd_ext_trap_hdr_t *)ext_data;
10220
+
10221
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
10222
+ if (tlv) {
10223
+ /* header include tlv hader */
10224
+ remain_trap_data = (hdr->len - tlv->len - sizeof(uint16));
10225
+ }
10226
+
10227
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
10228
+ if (tlv) {
10229
+ /* header include tlv hader */
10230
+ remain_trap_data -= (tlv->len + sizeof(uint16));
10231
+ }
10232
+
10233
+ data = (const uint32 *)(hdr->data + (hdr->len - remain_trap_data));
10234
+
10235
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10236
+
10237
+ for (i = 0; i < (uint32)(remain_trap_data / sizeof(uint32)) && *cnt < HANG_FIELD_CNT_MAX;
10238
+ i++, (*cnt)++) {
10239
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10240
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
10241
+ HANG_RAW_DEL, *(uint32 *)(data++));
10242
+ }
10243
+
10244
+ if (*cnt >= HANG_FIELD_CNT_MAX) {
10245
+ return;
10246
+ }
10247
+
10248
+ remain_trap_data -= (sizeof(uint32) * i);
10249
+
10250
+ if (remain_trap_data > sizeof(buf_u8)) {
10251
+ DHD_ERROR(("%s: resize remain_trap_data\n", __FUNCTION__));
10252
+ remain_trap_data = sizeof(buf_u8);
10253
+ }
10254
+
10255
+ if (remain_trap_data) {
10256
+ p_u8 = (const uint8 *)data;
10257
+ for (i = 0; i < remain_trap_data; i++) {
10258
+ buf_u8[i] = *(const uint8 *)(p_u8++);
10259
+ }
10260
+
10261
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10262
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
10263
+ HANG_RAW_DEL, ltoh32_ua(buf_u8));
10264
+ (*cnt)++;
10265
+ }
10266
+}
10267
+#endif /* DHD_EWPR_VER2 */
10268
+
10269
+static void
10270
+get_hang_info_trap_subtype(dhd_pub_t *dhd, uint32 *subtype)
10271
+{
10272
+ uint32 i;
10273
+ uint32 *ext_data = dhd->extended_trap_data;
10274
+ hnd_ext_trap_hdr_t *hdr;
10275
+ const bcm_tlv_t *tlv;
10276
+
10277
+ /* First word is original trap_data */
10278
+ ext_data++;
10279
+
10280
+ /* Followed by the extended trap data header */
10281
+ hdr = (hnd_ext_trap_hdr_t *)ext_data;
10282
+
10283
+ /* Dump a list of all tags found before parsing data */
10284
+ for (i = TAG_TRAP_DEEPSLEEP; i < TAG_TRAP_LAST; i++) {
10285
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
10286
+ if (tlv) {
10287
+ if (!TAG_TRAP_IS_STATE(i)) {
10288
+ *subtype = i;
10289
+ return;
10290
+ }
10291
+ }
10292
+ }
10293
+}
10294
+#ifdef DHD_EWPR_VER2
10295
+static void
10296
+copy_hang_info_etd_base64(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
10297
+{
10298
+ int remain_len;
10299
+ uint32 *ext_data = dhd->extended_trap_data;
10300
+ hnd_ext_trap_hdr_t *hdr;
10301
+ char *base64_out = NULL;
10302
+ int base64_cnt;
10303
+ int max_base64_len = HANG_INFO_BASE64_BUFFER_SIZE;
10304
+
10305
+ if (ext_data == NULL) {
10306
+ return;
10307
+ }
10308
+ /* First word is original trap_data */
10309
+ ext_data++;
10310
+
10311
+ /* Followed by the extended trap data header */
10312
+ hdr = (hnd_ext_trap_hdr_t *)ext_data;
10313
+
10314
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10315
+
10316
+ if (remain_len <= 0) {
10317
+ DHD_ERROR(("%s: no space to put etd\n", __FUNCTION__));
10318
+ return;
10319
+ }
10320
+
10321
+ if (remain_len < max_base64_len) {
10322
+ DHD_ERROR(("%s: change max base64 length to remain length %d\n", __FUNCTION__,
10323
+ remain_len));
10324
+ max_base64_len = remain_len;
10325
+ }
10326
+
10327
+ base64_out = MALLOCZ(dhd->osh, HANG_INFO_BASE64_BUFFER_SIZE);
10328
+ if (base64_out == NULL) {
10329
+ DHD_ERROR(("%s: MALLOC failed for size %d\n",
10330
+ __FUNCTION__, HANG_INFO_BASE64_BUFFER_SIZE));
10331
+ return;
10332
+ }
10333
+
10334
+ if (hdr->len > 0) {
10335
+ base64_cnt = dhd_base64_encode(hdr->data, hdr->len, base64_out, max_base64_len);
10336
+ if (base64_cnt == 0) {
10337
+ DHD_ERROR(("%s: base64 encoding error\n", __FUNCTION__));
10338
+ }
10339
+ }
10340
+
10341
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s",
10342
+ base64_out);
10343
+ (*cnt)++;
10344
+ MFREE(dhd->osh, base64_out, HANG_INFO_BASE64_BUFFER_SIZE);
10345
+}
10346
+#endif /* DHD_EWPR_VER2 */
10347
+
10348
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
10349
+#pragma GCC diagnostic pop
10350
+#endif // endif
10351
+
10352
+void
10353
+copy_hang_info_trap(dhd_pub_t *dhd)
10354
+{
10355
+ trap_t tr;
10356
+ int bytes_written;
10357
+ int trap_subtype = 0;
10358
+
10359
+ if (!dhd || !dhd->hang_info) {
10360
+ DHD_ERROR(("%s dhd=%p hang_info=%p\n", __FUNCTION__,
10361
+ dhd, (dhd ? dhd->hang_info : NULL)));
10362
+ return;
10363
+ }
10364
+
10365
+ if (!dhd->dongle_trap_occured) {
10366
+ DHD_ERROR(("%s: dongle_trap_occured is FALSE\n", __FUNCTION__));
10367
+ return;
10368
+ }
10369
+
10370
+ memset(&tr, 0x00, sizeof(struct _trap_struct));
10371
+
10372
+ copy_ext_trap_sig(dhd, &tr);
10373
+ get_hang_info_trap_subtype(dhd, &trap_subtype);
10374
+
10375
+ hang_info_trap_tbl[HANG_INFO_TRAP_T_REASON_IDX].offset = HANG_REASON_DONGLE_TRAP;
10376
+ hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].offset = trap_subtype;
10377
+
10378
+ bytes_written = 0;
10379
+ dhd->hang_info_cnt = 0;
10380
+ get_debug_dump_time(dhd->debug_dump_time_hang_str);
10381
+ copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
10382
+
10383
+ copy_hang_info_head(dhd->hang_info, &tr, VENDOR_SEND_HANG_EXT_INFO_LEN, FALSE,
10384
+ &bytes_written, &dhd->hang_info_cnt, dhd->debug_dump_time_hang_str);
10385
+
10386
+ DHD_INFO(("hang info haed cnt: %d len: %d data: %s\n",
10387
+ dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
10388
+
10389
+ clear_debug_dump_time(dhd->debug_dump_time_hang_str);
10390
+
10391
+#ifdef DHD_EWPR_VER2
10392
+ /* stack info & trap info are included in etd data */
10393
+
10394
+ /* extended trap data dump */
10395
+ if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
10396
+ copy_hang_info_etd_base64(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
10397
+ DHD_INFO(("hang info specific cnt: %d len: %d data: %s\n",
10398
+ dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
10399
+ }
10400
+#else
10401
+ if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
10402
+ copy_hang_info_stack(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
10403
+ DHD_INFO(("hang info stack cnt: %d len: %d data: %s\n",
10404
+ dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
10405
+ }
10406
+
10407
+ if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
10408
+ copy_hang_info_trap_t(dhd->hang_info, &tr, VENDOR_SEND_HANG_EXT_INFO_LEN, FALSE,
10409
+ &bytes_written, &dhd->hang_info_cnt, dhd->debug_dump_time_hang_str);
10410
+ DHD_INFO(("hang info trap_t cnt: %d len: %d data: %s\n",
10411
+ dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
10412
+ }
10413
+
10414
+ if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
10415
+ copy_hang_info_specific(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
10416
+ DHD_INFO(("hang info specific cnt: %d len: %d data: %s\n",
10417
+ dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
10418
+ }
10419
+#endif /* DHD_EWPR_VER2 */
10420
+}
10421
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
610510422
610610423 int
610710424 dhd_prot_debug_info_print(dhd_pub_t *dhd)
....@@ -6109,93 +10426,266 @@
610910426 dhd_prot_t *prot = dhd->prot;
611010427 msgbuf_ring_t *ring;
611110428 uint16 rd, wr;
6112
- uint32 intstatus = 0;
6113
- uint32 intmask = 0;
6114
- uint32 mbintstatus = 0;
6115
- uint32 d2h_mb_data = 0;
611610429 uint32 dma_buf_len;
10430
+ uint64 current_time;
10431
+ ulong ring_tcm_rd_addr; /* dongle address */
10432
+ ulong ring_tcm_wr_addr; /* dongle address */
10433
+
10434
+ DHD_ERROR(("\n ------- DUMPING VERSION INFORMATION ------- \r\n"));
10435
+ DHD_ERROR(("DHD: %s\n", dhd_version));
10436
+ DHD_ERROR(("Firmware: %s\n", fw_version));
10437
+
10438
+ DHD_ERROR(("\n ------- DUMPING PROTOCOL INFORMATION ------- \r\n"));
10439
+ DHD_ERROR(("ICPrevs: Dev %d, Host %d, active %d\n",
10440
+ prot->device_ipc_version,
10441
+ prot->host_ipc_version,
10442
+ prot->active_ipc_version));
10443
+ DHD_ERROR(("d2h_intr_method -> %s\n",
10444
+ dhd->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX"));
10445
+ DHD_ERROR(("max Host TS bufs to post: %d, posted %d\n",
10446
+ prot->max_tsbufpost, prot->cur_ts_bufs_posted));
10447
+ DHD_ERROR(("max INFO bufs to post: %d, posted %d\n",
10448
+ prot->max_infobufpost, prot->infobufpost));
10449
+ DHD_ERROR(("max event bufs to post: %d, posted %d\n",
10450
+ prot->max_eventbufpost, prot->cur_event_bufs_posted));
10451
+ DHD_ERROR(("max ioctlresp bufs to post: %d, posted %d\n",
10452
+ prot->max_ioctlrespbufpost, prot->cur_ioctlresp_bufs_posted));
10453
+ DHD_ERROR(("max RX bufs to post: %d, posted %d\n",
10454
+ prot->max_rxbufpost, prot->rxbufpost));
10455
+ DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
10456
+ h2d_max_txpost, prot->h2d_max_txpost));
10457
+
10458
+ current_time = OSL_LOCALTIME_NS();
10459
+ DHD_ERROR(("current_time="SEC_USEC_FMT"\n", GET_SEC_USEC(current_time)));
10460
+ DHD_ERROR(("ioctl_fillup_time="SEC_USEC_FMT
10461
+ " ioctl_ack_time="SEC_USEC_FMT
10462
+ " ioctl_cmplt_time="SEC_USEC_FMT"\n",
10463
+ GET_SEC_USEC(prot->ioctl_fillup_time),
10464
+ GET_SEC_USEC(prot->ioctl_ack_time),
10465
+ GET_SEC_USEC(prot->ioctl_cmplt_time)));
10466
+
10467
+ /* Check PCIe INT registers */
10468
+ if (!dhd_pcie_dump_int_regs(dhd)) {
10469
+ DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
10470
+ dhd->bus->is_linkdown = TRUE;
10471
+ }
611710472
611810473 DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n"));
611910474
612010475 ring = &prot->h2dring_ctrl_subn;
612110476 dma_buf_len = ring->max_items * ring->item_len;
6122
- DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
10477
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10478
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10479
+ DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10480
+ "SIZE %d \r\n",
612310481 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
6124
- ltoh32(ring->base_addr.low_addr), dma_buf_len));
10482
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
612510483 DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
6126
- dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
6127
- dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
6128
- DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10484
+ if (dhd->bus->is_linkdown) {
10485
+ DHD_ERROR(("CtrlPost: From Shared Mem: RD and WR are invalid"
10486
+ " due to PCIe link down\r\n"));
10487
+ } else {
10488
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10489
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10490
+ DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10491
+ }
10492
+ DHD_ERROR(("CtrlPost: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
612910493
613010494 ring = &prot->d2hring_ctrl_cpln;
613110495 dma_buf_len = ring->max_items * ring->item_len;
6132
- DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
10496
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10497
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10498
+ DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10499
+ "SIZE %d \r\n",
613310500 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
6134
- ltoh32(ring->base_addr.low_addr), dma_buf_len));
10501
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
613510502 DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
6136
- dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
6137
- dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
6138
- DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
6139
- DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum));
10503
+ if (dhd->bus->is_linkdown) {
10504
+ DHD_ERROR(("CtrlCpl: From Shared Mem: RD and WR are invalid"
10505
+ " due to PCIe link down\r\n"));
10506
+ } else {
10507
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10508
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10509
+ DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10510
+ }
10511
+ DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
614010512
6141
- intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
6142
- intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
6143
- mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0);
6144
- dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
10513
+ ring = prot->h2dring_info_subn;
10514
+ if (ring) {
10515
+ dma_buf_len = ring->max_items * ring->item_len;
10516
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10517
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10518
+ DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10519
+ "SIZE %d \r\n",
10520
+ ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10521
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
10522
+ dma_buf_len));
10523
+ DHD_ERROR(("InfoSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10524
+ if (dhd->bus->is_linkdown) {
10525
+ DHD_ERROR(("InfoSub: From Shared Mem: RD and WR are invalid"
10526
+ " due to PCIe link down\r\n"));
10527
+ } else {
10528
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10529
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10530
+ DHD_ERROR(("InfoSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10531
+ }
10532
+ DHD_ERROR(("InfoSub: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
10533
+ }
10534
+ ring = prot->d2hring_info_cpln;
10535
+ if (ring) {
10536
+ dma_buf_len = ring->max_items * ring->item_len;
10537
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10538
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10539
+ DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10540
+ "SIZE %d \r\n",
10541
+ ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10542
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
10543
+ dma_buf_len));
10544
+ DHD_ERROR(("InfoCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10545
+ if (dhd->bus->is_linkdown) {
10546
+ DHD_ERROR(("InfoCpl: From Shared Mem: RD and WR are invalid"
10547
+ " due to PCIe link down\r\n"));
10548
+ } else {
10549
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10550
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10551
+ DHD_ERROR(("InfoCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10552
+ }
10553
+ DHD_ERROR(("InfoCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
10554
+ }
614510555
6146
- DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
6147
- DHD_ERROR(("intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n,",
6148
- intstatus, intmask, mbintstatus));
6149
- DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data, dhd->bus->def_intmask));
10556
+ ring = &prot->d2hring_tx_cpln;
10557
+ if (ring) {
10558
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10559
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10560
+ dma_buf_len = ring->max_items * ring->item_len;
10561
+ DHD_ERROR(("TxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10562
+ "SIZE %d \r\n",
10563
+ ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10564
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
10565
+ dma_buf_len));
10566
+ DHD_ERROR(("TxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10567
+ if (dhd->bus->is_linkdown) {
10568
+ DHD_ERROR(("TxCpl: From Shared Mem: RD and WR are invalid"
10569
+ " due to PCIe link down\r\n"));
10570
+ } else {
10571
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10572
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10573
+ DHD_ERROR(("TxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10574
+ }
10575
+ DHD_ERROR(("TxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
10576
+ }
10577
+
10578
+ ring = &prot->d2hring_rx_cpln;
10579
+ if (ring) {
10580
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10581
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10582
+ dma_buf_len = ring->max_items * ring->item_len;
10583
+ DHD_ERROR(("RxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10584
+ "SIZE %d \r\n",
10585
+ ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10586
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
10587
+ dma_buf_len));
10588
+ DHD_ERROR(("RxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10589
+ if (dhd->bus->is_linkdown) {
10590
+ DHD_ERROR(("RxCpl: From Shared Mem: RD and WR are invalid"
10591
+ " due to PCIe link down\r\n"));
10592
+ } else {
10593
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10594
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10595
+ DHD_ERROR(("RxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10596
+ }
10597
+ DHD_ERROR(("RxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
10598
+ }
10599
+#ifdef EWP_EDL
10600
+ ring = prot->d2hring_edl;
10601
+ if (ring) {
10602
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10603
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10604
+ dma_buf_len = ring->max_items * ring->item_len;
10605
+ DHD_ERROR(("EdlRing: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10606
+ "SIZE %d \r\n",
10607
+ ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10608
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
10609
+ dma_buf_len));
10610
+ DHD_ERROR(("EdlRing: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10611
+ if (dhd->bus->is_linkdown) {
10612
+ DHD_ERROR(("EdlRing: From Shared Mem: RD and WR are invalid"
10613
+ " due to PCIe link down\r\n"));
10614
+ } else {
10615
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10616
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10617
+ DHD_ERROR(("EdlRing: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10618
+ }
10619
+ DHD_ERROR(("EdlRing: Expected seq num: %d \r\n",
10620
+ ring->seqnum % D2H_EPOCH_MODULO));
10621
+ }
10622
+#endif /* EWP_EDL */
10623
+
10624
+ DHD_ERROR(("%s: cur_ioctlresp_bufs_posted %d cur_event_bufs_posted %d\n",
10625
+ __FUNCTION__, prot->cur_ioctlresp_bufs_posted, prot->cur_event_bufs_posted));
10626
+#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
10627
+ DHD_ERROR(("%s: multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
10628
+ __FUNCTION__, dhd->multi_client_flow_rings, dhd->max_multi_client_flow_rings));
10629
+#endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
10630
+
10631
+ DHD_ERROR(("pktid_txq_start_cnt: %d\n", prot->pktid_txq_start_cnt));
10632
+ DHD_ERROR(("pktid_txq_stop_cnt: %d\n", prot->pktid_txq_stop_cnt));
10633
+ DHD_ERROR(("pktid_depleted_cnt: %d\n", prot->pktid_depleted_cnt));
10634
+
10635
+ dhd_pcie_debug_info_dump(dhd);
615010636
615110637 return 0;
615210638 }
6153
-
615410639
615510640 int
615610641 dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
615710642 {
615810643 uint32 *ptr;
615910644 uint32 value;
6160
- uint32 i;
6161
- uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
616210645
6163
- OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
6164
- dhd->prot->d2h_dma_indx_wr_buf.len);
10646
+ if (dhd->prot->d2h_dma_indx_wr_buf.va) {
10647
+ uint32 i;
10648
+ uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
616510649
6166
- ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
10650
+ OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
10651
+ dhd->prot->d2h_dma_indx_wr_buf.len);
616710652
6168
- bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues);
10653
+ ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
616910654
6170
- bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr);
6171
- value = ltoh32(*ptr);
6172
- bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
6173
- ptr++;
6174
- value = ltoh32(*ptr);
6175
- bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
10655
+ bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues);
617610656
6177
- ptr++;
6178
- bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr);
6179
- for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
10657
+ bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr);
618010658 value = ltoh32(*ptr);
6181
- bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
10659
+ bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
618210660 ptr++;
10661
+ value = ltoh32(*ptr);
10662
+ bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
10663
+
10664
+ ptr++;
10665
+ bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr);
10666
+ for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
10667
+ value = ltoh32(*ptr);
10668
+ bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
10669
+ ptr++;
10670
+ }
618310671 }
618410672
6185
- OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
6186
- dhd->prot->h2d_dma_indx_rd_buf.len);
10673
+ if (dhd->prot->h2d_dma_indx_rd_buf.va) {
10674
+ OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
10675
+ dhd->prot->h2d_dma_indx_rd_buf.len);
618710676
6188
- ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
10677
+ ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
618910678
6190
- bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr);
6191
- value = ltoh32(*ptr);
6192
- bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
6193
- ptr++;
6194
- value = ltoh32(*ptr);
6195
- bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
6196
- ptr++;
6197
- value = ltoh32(*ptr);
6198
- bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
10679
+ bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr);
10680
+ value = ltoh32(*ptr);
10681
+ bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
10682
+ ptr++;
10683
+ value = ltoh32(*ptr);
10684
+ bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
10685
+ ptr++;
10686
+ value = ltoh32(*ptr);
10687
+ bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
10688
+ }
619910689
620010690 return 0;
620110691 }
....@@ -6206,7 +10696,7 @@
620610696 dhd_prot_t *prot = dhd->prot;
620710697 #if DHD_DBG_SHOW_METADATA
620810698 prot->metadata_dbg = val;
6209
-#endif
10699
+#endif // endif
621010700 return (uint32)prot->metadata_dbg;
621110701 }
621210702
....@@ -6331,3 +10821,910 @@
633110821 }
633210822
633310823 #endif /* DHD_RX_CHAINING */
10824
+
10825
+#ifdef IDLE_TX_FLOW_MGMT
10826
+int
10827
+dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
10828
+{
10829
+ tx_idle_flowring_resume_request_t *flow_resume_rqst;
10830
+ msgbuf_ring_t *flow_ring;
10831
+ dhd_prot_t *prot = dhd->prot;
10832
+ unsigned long flags;
10833
+ uint16 alloced = 0;
10834
+ msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
10835
+
10836
+ /* Fetch a pre-initialized msgbuf_ring from the flowring pool */
10837
+ flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
10838
+ if (flow_ring == NULL) {
10839
+ DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
10840
+ __FUNCTION__, flow_ring_node->flowid));
10841
+ return BCME_NOMEM;
10842
+ }
10843
+
10844
+ DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
10845
+
10846
+ /* Request for ctrl_ring buffer space */
10847
+ flow_resume_rqst = (tx_idle_flowring_resume_request_t *)
10848
+ dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
10849
+
10850
+ if (flow_resume_rqst == NULL) {
10851
+ dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
10852
+ DHD_ERROR(("%s: Flow resume Req flowid %d - failure ring space\n",
10853
+ __FUNCTION__, flow_ring_node->flowid));
10854
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
10855
+ return BCME_NOMEM;
10856
+ }
10857
+
10858
+ flow_ring_node->prot_info = (void *)flow_ring;
10859
+
10860
+ /* Common msg buf hdr */
10861
+ flow_resume_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_RESUME;
10862
+ flow_resume_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
10863
+ flow_resume_rqst->msg.request_id = htol32(0); /* TBD */
10864
+
10865
+ flow_resume_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
10866
+ ctrl_ring->seqnum++;
10867
+
10868
+ flow_resume_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
10869
+ DHD_ERROR(("%s Send Flow resume Req flow ID %d\n",
10870
+ __FUNCTION__, flow_ring_node->flowid));
10871
+
10872
+ /* Update the flow_ring's WRITE index */
10873
+ if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
10874
+ dhd_prot_dma_indx_set(dhd, flow_ring->wr,
10875
+ H2D_DMA_INDX_WR_UPD, flow_ring->idx);
10876
+ } else if (IFRM_ACTIVE(dhd) && (flow_ring->idx >= BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START)) {
10877
+ dhd_prot_dma_indx_set(dhd, flow_ring->wr,
10878
+ H2D_IFRM_INDX_WR_UPD,
10879
+ (flow_ring->idx - BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START));
10880
+ } else {
10881
+ dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
10882
+ sizeof(uint16), RING_WR_UPD, flow_ring->idx);
10883
+ }
10884
+
10885
+ /* update control subn ring's WR index and ring doorbell to dongle */
10886
+ dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_resume_rqst, 1);
10887
+
10888
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
10889
+
10890
+ return BCME_OK;
10891
+} /* dhd_prot_flow_ring_create */
10892
+
10893
+int
10894
+dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count)
10895
+{
10896
+ tx_idle_flowring_suspend_request_t *flow_suspend_rqst;
10897
+ dhd_prot_t *prot = dhd->prot;
10898
+ unsigned long flags;
10899
+ uint16 index;
10900
+ uint16 alloced = 0;
10901
+ msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
10902
+
10903
+ DHD_RING_LOCK(ring->ring_lock, flags);
10904
+
10905
+ /* Request for ring buffer space */
10906
+ flow_suspend_rqst = (tx_idle_flowring_suspend_request_t *)
10907
+ dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
10908
+
10909
+ if (flow_suspend_rqst == NULL) {
10910
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
10911
+ DHD_ERROR(("%s: Flow suspend Req - failure ring space\n", __FUNCTION__));
10912
+ return BCME_NOMEM;
10913
+ }
10914
+
10915
+ /* Common msg buf hdr */
10916
+ flow_suspend_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_SUSPEND;
10917
+ /* flow_suspend_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; */
10918
+ flow_suspend_rqst->msg.request_id = htol32(0); /* TBD */
10919
+
10920
+ flow_suspend_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
10921
+ ring->seqnum++;
10922
+
10923
+ /* Update flow id info */
10924
+ for (index = 0; index < count; index++)
10925
+ {
10926
+ flow_suspend_rqst->ring_id[index] = ringid[index];
10927
+ }
10928
+ flow_suspend_rqst->num = count;
10929
+
10930
+ DHD_ERROR(("%s sending batch suspend!! count is %d\n", __FUNCTION__, count));
10931
+
10932
+ /* update ring's WR index and ring doorbell to dongle */
10933
+ dhd_prot_ring_write_complete(dhd, ring, flow_suspend_rqst, 1);
10934
+
10935
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
10936
+
10937
+ return BCME_OK;
10938
+}
10939
+#endif /* IDLE_TX_FLOW_MGMT */
10940
+
10941
+static const char* etd_trap_name(hnd_ext_tag_trap_t tag)
10942
+{
10943
+ switch (tag)
10944
+ {
10945
+ case TAG_TRAP_SIGNATURE: return "TAG_TRAP_SIGNATURE";
10946
+ case TAG_TRAP_STACK: return "TAG_TRAP_STACK";
10947
+ case TAG_TRAP_MEMORY: return "TAG_TRAP_MEMORY";
10948
+ case TAG_TRAP_DEEPSLEEP: return "TAG_TRAP_DEEPSLEEP";
10949
+ case TAG_TRAP_PSM_WD: return "TAG_TRAP_PSM_WD";
10950
+ case TAG_TRAP_PHY: return "TAG_TRAP_PHY";
10951
+ case TAG_TRAP_BUS: return "TAG_TRAP_BUS";
10952
+ case TAG_TRAP_MAC_SUSP: return "TAG_TRAP_MAC_SUSP";
10953
+ case TAG_TRAP_BACKPLANE: return "TAG_TRAP_BACKPLANE";
10954
+ case TAG_TRAP_PCIE_Q: return "TAG_TRAP_PCIE_Q";
10955
+ case TAG_TRAP_WLC_STATE: return "TAG_TRAP_WLC_STATE";
10956
+ case TAG_TRAP_MAC_WAKE: return "TAG_TRAP_MAC_WAKE";
10957
+ case TAG_TRAP_HMAP: return "TAG_TRAP_HMAP";
10958
+ case TAG_TRAP_PHYTXERR_THRESH: return "TAG_TRAP_PHYTXERR_THRESH";
10959
+ case TAG_TRAP_HC_DATA: return "TAG_TRAP_HC_DATA";
10960
+ case TAG_TRAP_LOG_DATA: return "TAG_TRAP_LOG_DATA";
10961
+ case TAG_TRAP_CODE: return "TAG_TRAP_CODE";
10962
+ case TAG_TRAP_LAST:
10963
+ default:
10964
+ return "Unknown";
10965
+ }
10966
+ return "Unknown";
10967
+}
10968
+
10969
+int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw)
10970
+{
10971
+ uint32 i;
10972
+ uint32 *ext_data;
10973
+ hnd_ext_trap_hdr_t *hdr;
10974
+ const bcm_tlv_t *tlv;
10975
+ const trap_t *tr;
10976
+ const uint32 *stack;
10977
+ const hnd_ext_trap_bp_err_t *bpe;
10978
+ uint32 raw_len;
10979
+
10980
+ ext_data = dhdp->extended_trap_data;
10981
+
10982
+ /* return if there is no extended trap data */
10983
+ if (!ext_data || !(dhdp->dongle_trap_data & D2H_DEV_EXT_TRAP_DATA))
10984
+ {
10985
+ bcm_bprintf(b, "%d (0x%x)", dhdp->dongle_trap_data, dhdp->dongle_trap_data);
10986
+ return BCME_OK;
10987
+ }
10988
+
10989
+ bcm_bprintf(b, "Extended trap data\n");
10990
+
10991
+ /* First word is original trap_data */
10992
+ bcm_bprintf(b, "trap_data = 0x%08x\n", *ext_data);
10993
+ ext_data++;
10994
+
10995
+ /* Followed by the extended trap data header */
10996
+ hdr = (hnd_ext_trap_hdr_t *)ext_data;
10997
+ bcm_bprintf(b, "version: %d, len: %d\n", hdr->version, hdr->len);
10998
+
10999
+ /* Dump a list of all tags found before parsing data */
11000
+ bcm_bprintf(b, "\nTags Found:\n");
11001
+ for (i = 0; i < TAG_TRAP_LAST; i++) {
11002
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
11003
+ if (tlv)
11004
+ bcm_bprintf(b, "Tag: %d (%s), Length: %d\n", i, etd_trap_name(i), tlv->len);
11005
+ }
11006
+
11007
+ if (raw)
11008
+ {
11009
+ raw_len = sizeof(hnd_ext_trap_hdr_t) + (hdr->len / 4) + (hdr->len % 4 ? 1 : 0);
11010
+ for (i = 0; i < raw_len; i++)
11011
+ {
11012
+ bcm_bprintf(b, "0x%08x ", ext_data[i]);
11013
+ if (i % 4 == 3)
11014
+ bcm_bprintf(b, "\n");
11015
+ }
11016
+ return BCME_OK;
11017
+ }
11018
+
11019
+ /* Extract the various supported TLVs from the extended trap data */
11020
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_CODE);
11021
+ if (tlv)
11022
+ {
11023
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_CODE), tlv->len);
11024
+ bcm_bprintf(b, "ETD TYPE: %d\n", tlv->data[0]);
11025
+ }
11026
+
11027
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
11028
+ if (tlv)
11029
+ {
11030
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_SIGNATURE), tlv->len);
11031
+ tr = (const trap_t *)tlv->data;
11032
+
11033
+ bcm_bprintf(b, "TRAP %x: pc %x, lr %x, sp %x, cpsr %x, spsr %x\n",
11034
+ tr->type, tr->pc, tr->r14, tr->r13, tr->cpsr, tr->spsr);
11035
+ bcm_bprintf(b, " r0 %x, r1 %x, r2 %x, r3 %x, r4 %x, r5 %x, r6 %x\n",
11036
+ tr->r0, tr->r1, tr->r2, tr->r3, tr->r4, tr->r5, tr->r6);
11037
+ bcm_bprintf(b, " r7 %x, r8 %x, r9 %x, r10 %x, r11 %x, r12 %x\n",
11038
+ tr->r7, tr->r8, tr->r9, tr->r10, tr->r11, tr->r12);
11039
+ }
11040
+
11041
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
11042
+ if (tlv)
11043
+ {
11044
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_STACK), tlv->len);
11045
+ stack = (const uint32 *)tlv->data;
11046
+ for (i = 0; i < (uint32)(tlv->len / 4); i++)
11047
+ {
11048
+ bcm_bprintf(b, " 0x%08x\n", *stack);
11049
+ stack++;
11050
+ }
11051
+ }
11052
+
11053
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BACKPLANE);
11054
+ if (tlv)
11055
+ {
11056
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BACKPLANE), tlv->len);
11057
+ bpe = (const hnd_ext_trap_bp_err_t *)tlv->data;
11058
+ bcm_bprintf(b, " error: %x\n", bpe->error);
11059
+ bcm_bprintf(b, " coreid: %x\n", bpe->coreid);
11060
+ bcm_bprintf(b, " baseaddr: %x\n", bpe->baseaddr);
11061
+ bcm_bprintf(b, " ioctrl: %x\n", bpe->ioctrl);
11062
+ bcm_bprintf(b, " iostatus: %x\n", bpe->iostatus);
11063
+ bcm_bprintf(b, " resetctrl: %x\n", bpe->resetctrl);
11064
+ bcm_bprintf(b, " resetstatus: %x\n", bpe->resetstatus);
11065
+ bcm_bprintf(b, " errlogctrl: %x\n", bpe->errlogctrl);
11066
+ bcm_bprintf(b, " errlogdone: %x\n", bpe->errlogdone);
11067
+ bcm_bprintf(b, " errlogstatus: %x\n", bpe->errlogstatus);
11068
+ bcm_bprintf(b, " errlogaddrlo: %x\n", bpe->errlogaddrlo);
11069
+ bcm_bprintf(b, " errlogaddrhi: %x\n", bpe->errlogaddrhi);
11070
+ bcm_bprintf(b, " errlogid: %x\n", bpe->errlogid);
11071
+ bcm_bprintf(b, " errloguser: %x\n", bpe->errloguser);
11072
+ bcm_bprintf(b, " errlogflags: %x\n", bpe->errlogflags);
11073
+ }
11074
+
11075
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MEMORY);
11076
+ if (tlv)
11077
+ {
11078
+ const hnd_ext_trap_heap_err_t* hme;
11079
+
11080
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MEMORY), tlv->len);
11081
+ hme = (const hnd_ext_trap_heap_err_t *)tlv->data;
11082
+ bcm_bprintf(b, " arena total: %d\n", hme->arena_total);
11083
+ bcm_bprintf(b, " heap free: %d\n", hme->heap_free);
11084
+ bcm_bprintf(b, " heap in use: %d\n", hme->heap_inuse);
11085
+ bcm_bprintf(b, " mf count: %d\n", hme->mf_count);
11086
+ bcm_bprintf(b, " stack LWM: %x\n", hme->stack_lwm);
11087
+
11088
+ bcm_bprintf(b, " Histogram:\n");
11089
+ for (i = 0; i < (HEAP_HISTOGRAM_DUMP_LEN * 2); i += 2) {
11090
+ if (hme->heap_histogm[i] == 0xfffe)
11091
+ bcm_bprintf(b, " Others\t%d\t?\n", hme->heap_histogm[i + 1]);
11092
+ else if (hme->heap_histogm[i] == 0xffff)
11093
+ bcm_bprintf(b, " >= 256K\t%d\t?\n", hme->heap_histogm[i + 1]);
11094
+ else
11095
+ bcm_bprintf(b, " %d\t%d\t%d\n", hme->heap_histogm[i] << 2,
11096
+ hme->heap_histogm[i + 1], (hme->heap_histogm[i] << 2)
11097
+ * hme->heap_histogm[i + 1]);
11098
+ }
11099
+
11100
+ bcm_bprintf(b, " Max free block: %d\n", hme->max_sz_free_blk[0] << 2);
11101
+ for (i = 1; i < HEAP_MAX_SZ_BLKS_LEN; i++) {
11102
+ bcm_bprintf(b, " Next lgst free block: %d\n", hme->max_sz_free_blk[i] << 2);
11103
+ }
11104
+ }
11105
+
11106
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PCIE_Q);
11107
+ if (tlv)
11108
+ {
11109
+ const hnd_ext_trap_pcie_mem_err_t* pqme;
11110
+
11111
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PCIE_Q), tlv->len);
11112
+ pqme = (const hnd_ext_trap_pcie_mem_err_t *)tlv->data;
11113
+ bcm_bprintf(b, " d2h queue len: %x\n", pqme->d2h_queue_len);
11114
+ bcm_bprintf(b, " d2h req queue len: %x\n", pqme->d2h_req_queue_len);
11115
+ }
11116
+
11117
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_WLC_STATE);
11118
+ if (tlv)
11119
+ {
11120
+ const hnd_ext_trap_wlc_mem_err_t* wsme;
11121
+
11122
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_WLC_STATE), tlv->len);
11123
+ wsme = (const hnd_ext_trap_wlc_mem_err_t *)tlv->data;
11124
+ bcm_bprintf(b, " instance: %d\n", wsme->instance);
11125
+ bcm_bprintf(b, " associated: %d\n", wsme->associated);
11126
+ bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
11127
+ bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
11128
+ bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
11129
+ bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
11130
+ bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
11131
+ bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
11132
+
11133
+ if (tlv->len >= (sizeof(*wsme) * 2)) {
11134
+ wsme++;
11135
+ bcm_bprintf(b, "\n instance: %d\n", wsme->instance);
11136
+ bcm_bprintf(b, " associated: %d\n", wsme->associated);
11137
+ bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
11138
+ bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
11139
+ bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
11140
+ bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
11141
+ bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
11142
+ bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
11143
+ }
11144
+ }
11145
+
11146
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PHY);
11147
+ if (tlv)
11148
+ {
11149
+ const hnd_ext_trap_phydbg_t* phydbg;
11150
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PHY), tlv->len);
11151
+ phydbg = (const hnd_ext_trap_phydbg_t *)tlv->data;
11152
+ bcm_bprintf(b, " err: 0x%x\n", phydbg->err);
11153
+ bcm_bprintf(b, " RxFeStatus: 0x%x\n", phydbg->RxFeStatus);
11154
+ bcm_bprintf(b, " TxFIFOStatus0: 0x%x\n", phydbg->TxFIFOStatus0);
11155
+ bcm_bprintf(b, " TxFIFOStatus1: 0x%x\n", phydbg->TxFIFOStatus1);
11156
+ bcm_bprintf(b, " RfseqMode: 0x%x\n", phydbg->RfseqMode);
11157
+ bcm_bprintf(b, " RfseqStatus0: 0x%x\n", phydbg->RfseqStatus0);
11158
+ bcm_bprintf(b, " RfseqStatus1: 0x%x\n", phydbg->RfseqStatus1);
11159
+ bcm_bprintf(b, " RfseqStatus_Ocl: 0x%x\n", phydbg->RfseqStatus_Ocl);
11160
+ bcm_bprintf(b, " RfseqStatus_Ocl1: 0x%x\n", phydbg->RfseqStatus_Ocl1);
11161
+ bcm_bprintf(b, " OCLControl1: 0x%x\n", phydbg->OCLControl1);
11162
+ bcm_bprintf(b, " TxError: 0x%x\n", phydbg->TxError);
11163
+ bcm_bprintf(b, " bphyTxError: 0x%x\n", phydbg->bphyTxError);
11164
+ bcm_bprintf(b, " TxCCKError: 0x%x\n", phydbg->TxCCKError);
11165
+ bcm_bprintf(b, " TxCtrlWrd0: 0x%x\n", phydbg->TxCtrlWrd0);
11166
+ bcm_bprintf(b, " TxCtrlWrd1: 0x%x\n", phydbg->TxCtrlWrd1);
11167
+ bcm_bprintf(b, " TxCtrlWrd2: 0x%x\n", phydbg->TxCtrlWrd2);
11168
+ bcm_bprintf(b, " TxLsig0: 0x%x\n", phydbg->TxLsig0);
11169
+ bcm_bprintf(b, " TxLsig1: 0x%x\n", phydbg->TxLsig1);
11170
+ bcm_bprintf(b, " TxVhtSigA10: 0x%x\n", phydbg->TxVhtSigA10);
11171
+ bcm_bprintf(b, " TxVhtSigA11: 0x%x\n", phydbg->TxVhtSigA11);
11172
+ bcm_bprintf(b, " TxVhtSigA20: 0x%x\n", phydbg->TxVhtSigA20);
11173
+ bcm_bprintf(b, " TxVhtSigA21: 0x%x\n", phydbg->TxVhtSigA21);
11174
+ bcm_bprintf(b, " txPktLength: 0x%x\n", phydbg->txPktLength);
11175
+ bcm_bprintf(b, " txPsdulengthCtr: 0x%x\n", phydbg->txPsdulengthCtr);
11176
+ bcm_bprintf(b, " gpioClkControl: 0x%x\n", phydbg->gpioClkControl);
11177
+ bcm_bprintf(b, " gpioSel: 0x%x\n", phydbg->gpioSel);
11178
+ bcm_bprintf(b, " pktprocdebug: 0x%x\n", phydbg->pktprocdebug);
11179
+ for (i = 0; i < 3; i++)
11180
+ bcm_bprintf(b, " gpioOut[%d]: 0x%x\n", i, phydbg->gpioOut[i]);
11181
+ }
11182
+
11183
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PSM_WD);
11184
+ if (tlv)
11185
+ {
11186
+ const hnd_ext_trap_psmwd_t* psmwd;
11187
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PSM_WD), tlv->len);
11188
+ psmwd = (const hnd_ext_trap_psmwd_t *)tlv;
11189
+ bcm_bprintf(b, " version: 0x%x\n", psmwd->version);
11190
+ bcm_bprintf(b, " maccontrol: 0x%x\n", psmwd->i32_maccontrol);
11191
+ bcm_bprintf(b, " maccommand: 0x%x\n", psmwd->i32_maccommand);
11192
+ bcm_bprintf(b, " macintstatus: 0x%x\n", psmwd->i32_macintstatus);
11193
+ bcm_bprintf(b, " phydebug: 0x%x\n", psmwd->i32_phydebug);
11194
+ bcm_bprintf(b, " clk_ctl_st: 0x%x\n", psmwd->i32_clk_ctl_st);
11195
+ for (i = 0; i < 3; i++)
11196
+ bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, psmwd->i32_psmdebug[i]);
11197
+ bcm_bprintf(b, " gated clock en: 0x%x\n", psmwd->i16_0x1a8);
11198
+ bcm_bprintf(b, " Rcv Fifo Ctrl: 0x%x\n", psmwd->i16_0x406);
11199
+ bcm_bprintf(b, " Rx ctrl 1: 0x%x\n", psmwd->i16_0x408);
11200
+ bcm_bprintf(b, " Rxe Status 1: 0x%x\n", psmwd->i16_0x41a);
11201
+ bcm_bprintf(b, " Rxe Status 2: 0x%x\n", psmwd->i16_0x41c);
11202
+ bcm_bprintf(b, " rcv wrd count 0: 0x%x\n", psmwd->i16_0x424);
11203
+ bcm_bprintf(b, " rcv wrd count 1: 0x%x\n", psmwd->i16_0x426);
11204
+ bcm_bprintf(b, " RCV_LFIFO_STS: 0x%x\n", psmwd->i16_0x456);
11205
+ bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", psmwd->i16_0x480);
11206
+ bcm_bprintf(b, " PSM BRC: 0x%x\n", psmwd->i16_0x490);
11207
+ bcm_bprintf(b, " TXE CTRL: 0x%x\n", psmwd->i16_0x500);
11208
+ bcm_bprintf(b, " TXE Status: 0x%x\n", psmwd->i16_0x50e);
11209
+ bcm_bprintf(b, " TXE_xmtdmabusy: 0x%x\n", psmwd->i16_0x55e);
11210
+ bcm_bprintf(b, " TXE_XMTfifosuspflush: 0x%x\n", psmwd->i16_0x566);
11211
+ bcm_bprintf(b, " IFS Stat: 0x%x\n", psmwd->i16_0x690);
11212
+ bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", psmwd->i16_0x692);
11213
+ bcm_bprintf(b, " IFS_TX_DUR: 0x%x\n", psmwd->i16_0x694);
11214
+ bcm_bprintf(b, " SLow_CTL: 0x%x\n", psmwd->i16_0x6a0);
11215
+ bcm_bprintf(b, " TXE_AQM fifo Ready: 0x%x\n", psmwd->i16_0x838);
11216
+ bcm_bprintf(b, " Dagg ctrl: 0x%x\n", psmwd->i16_0x8c0);
11217
+ bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", psmwd->shm_prewds_cnt);
11218
+ bcm_bprintf(b, " shm_txtplufl_cnt: 0x%x\n", psmwd->shm_txtplufl_cnt);
11219
+ bcm_bprintf(b, " shm_txphyerr_cnt: 0x%x\n", psmwd->shm_txphyerr_cnt);
11220
+ }
11221
+
11222
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_SUSP);
11223
+ if (tlv)
11224
+ {
11225
+ const hnd_ext_trap_macsusp_t* macsusp;
11226
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_SUSP), tlv->len);
11227
+ macsusp = (const hnd_ext_trap_macsusp_t *)tlv;
11228
+ bcm_bprintf(b, " version: %d\n", macsusp->version);
11229
+ bcm_bprintf(b, " trap_reason: %d\n", macsusp->trap_reason);
11230
+ bcm_bprintf(b, " maccontrol: 0x%x\n", macsusp->i32_maccontrol);
11231
+ bcm_bprintf(b, " maccommand: 0x%x\n", macsusp->i32_maccommand);
11232
+ bcm_bprintf(b, " macintstatus: 0x%x\n", macsusp->i32_macintstatus);
11233
+ for (i = 0; i < 4; i++)
11234
+ bcm_bprintf(b, " phydebug[%d]: 0x%x\n", i, macsusp->i32_phydebug[i]);
11235
+ for (i = 0; i < 8; i++)
11236
+ bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macsusp->i32_psmdebug[i]);
11237
+ bcm_bprintf(b, " Rxe Status_1: 0x%x\n", macsusp->i16_0x41a);
11238
+ bcm_bprintf(b, " Rxe Status_2: 0x%x\n", macsusp->i16_0x41c);
11239
+ bcm_bprintf(b, " PSM BRC: 0x%x\n", macsusp->i16_0x490);
11240
+ bcm_bprintf(b, " TXE Status: 0x%x\n", macsusp->i16_0x50e);
11241
+ bcm_bprintf(b, " TXE xmtdmabusy: 0x%x\n", macsusp->i16_0x55e);
11242
+ bcm_bprintf(b, " TXE XMTfifosuspflush: 0x%x\n", macsusp->i16_0x566);
11243
+ bcm_bprintf(b, " IFS Stat: 0x%x\n", macsusp->i16_0x690);
11244
+ bcm_bprintf(b, " IFS MEDBUSY CTR: 0x%x\n", macsusp->i16_0x692);
11245
+ bcm_bprintf(b, " IFS TX DUR: 0x%x\n", macsusp->i16_0x694);
11246
+ bcm_bprintf(b, " WEP CTL: 0x%x\n", macsusp->i16_0x7c0);
11247
+ bcm_bprintf(b, " TXE AQM fifo Ready: 0x%x\n", macsusp->i16_0x838);
11248
+ bcm_bprintf(b, " MHP status: 0x%x\n", macsusp->i16_0x880);
11249
+ bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", macsusp->shm_prewds_cnt);
11250
+ bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macsusp->shm_ucode_dbgst);
11251
+ }
11252
+
11253
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_WAKE);
11254
+ if (tlv)
11255
+ {
11256
+ const hnd_ext_trap_macenab_t* macwake;
11257
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_WAKE), tlv->len);
11258
+ macwake = (const hnd_ext_trap_macenab_t *)tlv;
11259
+ bcm_bprintf(b, " version: 0x%x\n", macwake->version);
11260
+ bcm_bprintf(b, " trap_reason: 0x%x\n", macwake->trap_reason);
11261
+ bcm_bprintf(b, " maccontrol: 0x%x\n", macwake->i32_maccontrol);
11262
+ bcm_bprintf(b, " maccommand: 0x%x\n", macwake->i32_maccommand);
11263
+ bcm_bprintf(b, " macintstatus: 0x%x\n", macwake->i32_macintstatus);
11264
+ for (i = 0; i < 8; i++)
11265
+ bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macwake->i32_psmdebug[i]);
11266
+ bcm_bprintf(b, " clk_ctl_st: 0x%x\n", macwake->i32_clk_ctl_st);
11267
+ bcm_bprintf(b, " powerctl: 0x%x\n", macwake->i32_powerctl);
11268
+ bcm_bprintf(b, " gated clock en: 0x%x\n", macwake->i16_0x1a8);
11269
+ bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", macwake->i16_0x480);
11270
+ bcm_bprintf(b, " PSM BRC: 0x%x\n", macwake->i16_0x490);
11271
+ bcm_bprintf(b, " TSF CTL: 0x%x\n", macwake->i16_0x600);
11272
+ bcm_bprintf(b, " IFS Stat: 0x%x\n", macwake->i16_0x690);
11273
+ bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", macwake->i16_0x692);
11274
+ bcm_bprintf(b, " Slow_CTL: 0x%x\n", macwake->i16_0x6a0);
11275
+ bcm_bprintf(b, " Slow_FRAC: 0x%x\n", macwake->i16_0x6a6);
11276
+ bcm_bprintf(b, " fast power up delay: 0x%x\n", macwake->i16_0x6a8);
11277
+ bcm_bprintf(b, " Slow_PER: 0x%x\n", macwake->i16_0x6aa);
11278
+ bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macwake->shm_ucode_dbgst);
11279
+ }
11280
+
11281
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BUS);
11282
+ if (tlv)
11283
+ {
11284
+ const bcm_dngl_pcie_hc_t* hc;
11285
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BUS), tlv->len);
11286
+ hc = (const bcm_dngl_pcie_hc_t *)tlv->data;
11287
+ bcm_bprintf(b, " version: 0x%x\n", hc->version);
11288
+ bcm_bprintf(b, " reserved: 0x%x\n", hc->reserved);
11289
+ bcm_bprintf(b, " pcie_err_ind_type: 0x%x\n", hc->pcie_err_ind_type);
11290
+ bcm_bprintf(b, " pcie_flag: 0x%x\n", hc->pcie_flag);
11291
+ bcm_bprintf(b, " pcie_control_reg: 0x%x\n", hc->pcie_control_reg);
11292
+ for (i = 0; i < HC_PCIEDEV_CONFIG_REGLIST_MAX; i++)
11293
+ bcm_bprintf(b, " pcie_config_regs[%d]: 0x%x\n", i, hc->pcie_config_regs[i]);
11294
+ }
11295
+
11296
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_HMAP);
11297
+ if (tlv)
11298
+ {
11299
+ const pcie_hmapviolation_t* hmap;
11300
+ hmap = (const pcie_hmapviolation_t *)tlv->data;
11301
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_HMAP), tlv->len);
11302
+ bcm_bprintf(b, " HMAP Vio Addr Low: 0x%x\n", hmap->hmap_violationaddr_lo);
11303
+ bcm_bprintf(b, " HMAP Vio Addr Hi: 0x%x\n", hmap->hmap_violationaddr_hi);
11304
+ bcm_bprintf(b, " HMAP Vio Info: 0x%x\n", hmap->hmap_violation_info);
11305
+ }
11306
+
11307
+ return BCME_OK;
11308
+}
11309
+
11310
+#ifdef BCMPCIE
11311
+int
11312
+dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len,
11313
+ uint16 seqnum, uint16 xt_id)
11314
+{
11315
+ dhd_prot_t *prot = dhdp->prot;
11316
+ host_timestamp_msg_t *ts_req;
11317
+ unsigned long flags;
11318
+ uint16 alloced = 0;
11319
+ uchar *ts_tlv_buf;
11320
+ msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
11321
+
11322
+ if ((tlvs == NULL) || (tlv_len == 0)) {
11323
+ DHD_ERROR(("%s: argument error tlv: %p, tlv_len %d\n",
11324
+ __FUNCTION__, tlvs, tlv_len));
11325
+ return -1;
11326
+ }
11327
+
11328
+ DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
11329
+
11330
+ /* if Host TS req already pending go away */
11331
+ if (prot->hostts_req_buf_inuse == TRUE) {
11332
+ DHD_ERROR(("one host TS request already pending at device\n"));
11333
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11334
+ return -1;
11335
+ }
11336
+
11337
+ /* Request for cbuf space */
11338
+ ts_req = (host_timestamp_msg_t*)dhd_prot_alloc_ring_space(dhdp, ctrl_ring,
11339
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced, FALSE);
11340
+ if (ts_req == NULL) {
11341
+ DHD_ERROR(("couldn't allocate space on msgring to send host TS request\n"));
11342
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11343
+ return -1;
11344
+ }
11345
+
11346
+ /* Common msg buf hdr */
11347
+ ts_req->msg.msg_type = MSG_TYPE_HOSTTIMSTAMP;
11348
+ ts_req->msg.if_id = 0;
11349
+ ts_req->msg.flags = ctrl_ring->current_phase;
11350
+ ts_req->msg.request_id = DHD_H2D_HOSTTS_REQ_PKTID;
11351
+
11352
+ ts_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
11353
+ ctrl_ring->seqnum++;
11354
+
11355
+ ts_req->xt_id = xt_id;
11356
+ ts_req->seqnum = seqnum;
11357
+ /* populate TS req buffer info */
11358
+ ts_req->input_data_len = htol16(tlv_len);
11359
+ ts_req->host_buf_addr.high = htol32(PHYSADDRHI(prot->hostts_req_buf.pa));
11360
+ ts_req->host_buf_addr.low = htol32(PHYSADDRLO(prot->hostts_req_buf.pa));
11361
+ /* copy ioct payload */
11362
+ ts_tlv_buf = (void *) prot->hostts_req_buf.va;
11363
+ prot->hostts_req_buf_inuse = TRUE;
11364
+ memcpy(ts_tlv_buf, tlvs, tlv_len);
11365
+
11366
+ OSL_CACHE_FLUSH((void *) prot->hostts_req_buf.va, tlv_len);
11367
+
11368
+ if (ISALIGNED(ts_tlv_buf, DMA_ALIGN_LEN) == FALSE) {
11369
+ DHD_ERROR(("host TS req buffer address unaligned !!!!! \n"));
11370
+ }
11371
+
11372
+ DHD_CTL(("submitted Host TS request request_id %d, data_len %d, tx_id %d, seq %d\n",
11373
+ ts_req->msg.request_id, ts_req->input_data_len,
11374
+ ts_req->xt_id, ts_req->seqnum));
11375
+
11376
+ /* upd wrt ptr and raise interrupt */
11377
+ dhd_prot_ring_write_complete(dhdp, ctrl_ring, ts_req,
11378
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
11379
+
11380
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11381
+
11382
+ return 0;
11383
+} /* dhd_prot_send_host_timestamp */
11384
+
11385
+bool
11386
+dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set)
11387
+{
11388
+ if (set)
11389
+ dhd->prot->tx_ts_log_enabled = enable;
11390
+
11391
+ return dhd->prot->tx_ts_log_enabled;
11392
+}
11393
+
11394
+bool
11395
+dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set)
11396
+{
11397
+ if (set)
11398
+ dhd->prot->rx_ts_log_enabled = enable;
11399
+
11400
+ return dhd->prot->rx_ts_log_enabled;
11401
+}
11402
+
11403
+bool
11404
+dhd_prot_pkt_noretry(dhd_pub_t *dhd, bool enable, bool set)
11405
+{
11406
+ if (set)
11407
+ dhd->prot->no_retry = enable;
11408
+
11409
+ return dhd->prot->no_retry;
11410
+}
11411
+
11412
+bool
11413
+dhd_prot_pkt_noaggr(dhd_pub_t *dhd, bool enable, bool set)
11414
+{
11415
+ if (set)
11416
+ dhd->prot->no_aggr = enable;
11417
+
11418
+ return dhd->prot->no_aggr;
11419
+}
11420
+
11421
+bool
11422
+dhd_prot_pkt_fixed_rate(dhd_pub_t *dhd, bool enable, bool set)
11423
+{
11424
+ if (set)
11425
+ dhd->prot->fixed_rate = enable;
11426
+
11427
+ return dhd->prot->fixed_rate;
11428
+}
11429
+#endif /* BCMPCIE */
11430
+
11431
+void
11432
+dhd_prot_dma_indx_free(dhd_pub_t *dhd)
11433
+{
11434
+ dhd_prot_t *prot = dhd->prot;
11435
+
11436
+ dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
11437
+ dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
11438
+}
11439
+
11440
+void
11441
+dhd_msgbuf_delay_post_ts_bufs(dhd_pub_t *dhd)
11442
+{
11443
+ if (dhd->prot->max_tsbufpost > 0)
11444
+ dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
11445
+}
11446
+
11447
+static void BCMFASTPATH
11448
+dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf)
11449
+{
11450
+ DHD_ERROR(("Timesunc feature not compiled in but GOT FW TS message\n"));
11451
+
11452
+}
11453
+
11454
+uint16
11455
+dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp)
11456
+{
11457
+ return dhdp->prot->ioctl_trans_id;
11458
+}
11459
+
11460
+int dhd_get_hscb_info(dhd_pub_t *dhd, void ** va, uint32 *len)
11461
+{
11462
+ if (!dhd->hscb_enable) {
11463
+ if (len) {
11464
+ /* prevent "Operation not supported" dhd message */
11465
+ *len = 0;
11466
+ return BCME_OK;
11467
+ }
11468
+ return BCME_UNSUPPORTED;
11469
+ }
11470
+
11471
+ if (va) {
11472
+ *va = dhd->prot->host_scb_buf.va;
11473
+ }
11474
+ if (len) {
11475
+ *len = dhd->prot->host_scb_buf.len;
11476
+ }
11477
+
11478
+ return BCME_OK;
11479
+}
11480
+
11481
+#ifdef DHD_BUS_MEM_ACCESS
11482
+int dhd_get_hscb_buff(dhd_pub_t *dhd, uint32 offset, uint32 length, void * buff)
11483
+{
11484
+ if (!dhd->hscb_enable) {
11485
+ return BCME_UNSUPPORTED;
11486
+ }
11487
+
11488
+ if (dhd->prot->host_scb_buf.va == NULL ||
11489
+ ((uint64)offset + length > (uint64)dhd->prot->host_scb_buf.len)) {
11490
+ return BCME_BADADDR;
11491
+ }
11492
+
11493
+ memcpy(buff, (char*)dhd->prot->host_scb_buf.va + offset, length);
11494
+
11495
+ return BCME_OK;
11496
+}
11497
+#endif /* DHD_BUS_MEM_ACCESS */
11498
+
11499
+#ifdef DHD_HP2P
11500
+uint32
11501
+dhd_prot_pkt_threshold(dhd_pub_t *dhd, bool set, uint32 val)
11502
+{
11503
+ if (set)
11504
+ dhd->pkt_thresh = (uint16)val;
11505
+
11506
+ val = dhd->pkt_thresh;
11507
+
11508
+ return val;
11509
+}
11510
+
11511
+uint32
11512
+dhd_prot_time_threshold(dhd_pub_t *dhd, bool set, uint32 val)
11513
+{
11514
+ if (set)
11515
+ dhd->time_thresh = (uint16)val;
11516
+
11517
+ val = dhd->time_thresh;
11518
+
11519
+ return val;
11520
+}
11521
+
11522
+uint32
11523
+dhd_prot_pkt_expiry(dhd_pub_t *dhd, bool set, uint32 val)
11524
+{
11525
+ if (set)
11526
+ dhd->pkt_expiry = (uint16)val;
11527
+
11528
+ val = dhd->pkt_expiry;
11529
+
11530
+ return val;
11531
+}
11532
+
11533
+uint8
11534
+dhd_prot_hp2p_enable(dhd_pub_t *dhd, bool set, int enable)
11535
+{
11536
+ uint8 ret = 0;
11537
+ if (set) {
11538
+ dhd->hp2p_enable = (enable & 0xf) ? TRUE : FALSE;
11539
+ dhd->hp2p_infra_enable = ((enable >> 4) & 0xf) ? TRUE : FALSE;
11540
+
11541
+ if (enable) {
11542
+ dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_TID_MAP);
11543
+ } else {
11544
+ dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_AC_MAP);
11545
+ }
11546
+ }
11547
+ ret = dhd->hp2p_infra_enable ? 0x1:0x0;
11548
+ ret <<= 4;
11549
+ ret |= dhd->hp2p_enable ? 0x1:0x0;
11550
+
11551
+ return ret;
11552
+}
11553
+
11554
+static void
11555
+dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus)
11556
+{
11557
+ ts_timestamp_t *ts = (ts_timestamp_t *)&rxstatus->ts;
11558
+ hp2p_info_t *hp2p_info;
11559
+ uint32 dur1;
11560
+
11561
+ hp2p_info = &dhd->hp2p_info[0];
11562
+ dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 100;
11563
+
11564
+ if (dur1 > (MAX_RX_HIST_BIN - 1)) {
11565
+ dur1 = MAX_RX_HIST_BIN - 1;
11566
+ DHD_ERROR(("%s: 0x%x 0x%x\n",
11567
+ __FUNCTION__, ts->low, ts->high));
11568
+ }
11569
+
11570
+ hp2p_info->rx_t0[dur1 % MAX_RX_HIST_BIN]++;
11571
+ return;
11572
+}
11573
+
11574
+static void
11575
+dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus)
11576
+{
11577
+ ts_timestamp_t *ts = (ts_timestamp_t *)&txstatus->ts;
11578
+ uint16 flowid = txstatus->compl_hdr.flow_ring_id;
11579
+ uint32 hp2p_flowid, dur1, dur2;
11580
+ hp2p_info_t *hp2p_info;
11581
+
11582
+ hp2p_flowid = dhd->bus->max_submission_rings -
11583
+ dhd->bus->max_cmn_rings - flowid + 1;
11584
+ hp2p_info = &dhd->hp2p_info[hp2p_flowid];
11585
+ ts = (ts_timestamp_t *)&(txstatus->ts);
11586
+
11587
+ dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 1000;
11588
+ if (dur1 > (MAX_TX_HIST_BIN - 1)) {
11589
+ dur1 = MAX_TX_HIST_BIN - 1;
11590
+ DHD_ERROR(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
11591
+ }
11592
+ hp2p_info->tx_t0[dur1 % MAX_TX_HIST_BIN]++;
11593
+
11594
+ dur2 = (((ts->high >> 10) & 0x3FF) * HP2P_TIME_SCALE) / 1000;
11595
+ if (dur2 > (MAX_TX_HIST_BIN - 1)) {
11596
+ dur2 = MAX_TX_HIST_BIN - 1;
11597
+ DHD_ERROR(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
11598
+ }
11599
+
11600
+ hp2p_info->tx_t1[dur2 % MAX_TX_HIST_BIN]++;
11601
+ return;
11602
+}
11603
+
11604
+enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer)
11605
+{
11606
+ hp2p_info_t *hp2p_info;
11607
+ unsigned long flags;
11608
+ dhd_pub_t *dhdp;
11609
+
11610
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
11611
+#pragma GCC diagnostic push
11612
+#pragma GCC diagnostic ignored "-Wcast-qual"
11613
+#endif // endif
11614
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
11615
+ hp2p_info = container_of(timer, hp2p_info_t, timer.timer);
11616
+#else
11617
+ hp2p_info = container_of(timer, hp2p_info_t, timer);
11618
+#endif // endif
11619
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
11620
+#pragma GCC diagnostic pop
11621
+#endif // endif
11622
+ dhdp = hp2p_info->dhd_pub;
11623
+ if (!dhdp) {
11624
+ goto done;
11625
+ }
11626
+
11627
+ DHD_INFO(("%s: pend_item = %d flowid = %d\n",
11628
+ __FUNCTION__, ((msgbuf_ring_t *)hp2p_info->ring)->pend_items_count,
11629
+ hp2p_info->flowid));
11630
+
11631
+ flags = dhd_os_hp2plock(dhdp);
11632
+
11633
+ dhd_prot_txdata_write_flush(dhdp, hp2p_info->flowid);
11634
+ hp2p_info->hrtimer_init = FALSE;
11635
+ hp2p_info->num_timer_limit++;
11636
+
11637
+ dhd_os_hp2punlock(dhdp, flags);
11638
+done:
11639
+ return HRTIMER_NORESTART;
11640
+}
11641
+
11642
+static void
11643
+dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid)
11644
+{
11645
+ hp2p_info_t *hp2p_info;
11646
+ uint16 hp2p_flowid;
11647
+
11648
+ hp2p_flowid = dhd->bus->max_submission_rings -
11649
+ dhd->bus->max_cmn_rings - flowid + 1;
11650
+ hp2p_info = &dhd->hp2p_info[hp2p_flowid];
11651
+
11652
+ if (ring->pend_items_count == dhd->pkt_thresh) {
11653
+ dhd_prot_txdata_write_flush(dhd, flowid);
11654
+
11655
+ hp2p_info->hrtimer_init = FALSE;
11656
+ hp2p_info->ring = NULL;
11657
+ hp2p_info->num_pkt_limit++;
11658
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21)
11659
+ tasklet_hrtimer_cancel(&hp2p_info->timer);
11660
+#else
11661
+ hrtimer_cancel(&hp2p_info->timer);
11662
+#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21) */
11663
+ DHD_INFO(("%s: cancel hrtimer for flowid = %d \n"
11664
+ "hp2p_flowid = %d pkt_thresh = %d\n",
11665
+ __FUNCTION__, flowid, hp2p_flowid, dhd->pkt_thresh));
11666
+ } else {
11667
+ if (hp2p_info->hrtimer_init == FALSE) {
11668
+ hp2p_info->hrtimer_init = TRUE;
11669
+ hp2p_info->flowid = flowid;
11670
+ hp2p_info->dhd_pub = dhd;
11671
+ hp2p_info->ring = ring;
11672
+ hp2p_info->num_timer_start++;
11673
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21)
11674
+ tasklet_hrtimer_start(&hp2p_info->timer,
11675
+ ktime_set(0, dhd->time_thresh * 1000), HRTIMER_MODE_REL);
11676
+#else
11677
+ hrtimer_start(&hp2p_info->timer,
11678
+ ktime_set(0, dhd->time_thresh * 1000), HRTIMER_MODE_REL_SOFT);
11679
+#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21) */
11680
+
11681
+ DHD_INFO(("%s: start hrtimer for flowid = %d hp2_flowid = %d\n",
11682
+ __FUNCTION__, flowid, hp2p_flowid));
11683
+ }
11684
+ }
11685
+ return;
11686
+}
11687
+
11688
+static void
11689
+dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc)
11690
+{
11691
+ uint64 ts;
11692
+
11693
+ ts = local_clock();
11694
+ do_div(ts, 1000);
11695
+
11696
+ txdesc->metadata_buf_len = 0;
11697
+ txdesc->metadata_buf_addr.high_addr = htol32((ts >> 32) & 0xFFFFFFFF);
11698
+ txdesc->metadata_buf_addr.low_addr = htol32(ts & 0xFFFFFFFF);
11699
+ txdesc->exp_time = dhd->pkt_expiry;
11700
+
11701
+ DHD_INFO(("%s: metadata_high = 0x%x metadata_low = 0x%x exp_time = %x\n",
11702
+ __FUNCTION__, txdesc->metadata_buf_addr.high_addr,
11703
+ txdesc->metadata_buf_addr.low_addr,
11704
+ txdesc->exp_time));
11705
+
11706
+ return;
11707
+}
11708
+#endif /* DHD_HP2P */
11709
+
11710
+#ifdef DHD_MAP_LOGGING
11711
+void
11712
+dhd_prot_smmu_fault_dump(dhd_pub_t *dhdp)
11713
+{
11714
+ dhd_prot_debug_info_print(dhdp);
11715
+ OSL_DMA_MAP_DUMP(dhdp->osh);
11716
+#ifdef DHD_MAP_PKTID_LOGGING
11717
+ dhd_pktid_logging_dump(dhdp);
11718
+#endif /* DHD_MAP_PKTID_LOGGING */
11719
+#ifdef DHD_FW_COREDUMP
11720
+ dhdp->memdump_type = DUMP_TYPE_SMMU_FAULT;
11721
+#ifdef DNGL_AXI_ERROR_LOGGING
11722
+ dhdp->memdump_enabled = DUMP_MEMFILE;
11723
+ dhd_bus_get_mem_dump(dhdp);
11724
+#else
11725
+ dhdp->memdump_enabled = DUMP_MEMONLY;
11726
+ dhd_bus_mem_dump(dhdp);
11727
+#endif /* DNGL_AXI_ERROR_LOGGING */
11728
+#endif /* DHD_FW_COREDUMP */
11729
+}
11730
+#endif /* DHD_MAP_LOGGING */