From 297b60346df8beafee954a0fd7c2d64f33f3b9bc Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Sat, 11 May 2024 01:44:05 +0000
Subject: [PATCH] rtl8211F_led_control
---
kernel/drivers/net/wireless/rockchip_wlan/cywdhd/bcmdhd/dhd_msgbuf.c | 8207 +++++++++++++++++++++++++++++++++++++++++++++++++----------
1 files changed, 6,802 insertions(+), 1,405 deletions(-)
diff --git a/kernel/drivers/net/wireless/rockchip_wlan/cywdhd/bcmdhd/dhd_msgbuf.c b/kernel/drivers/net/wireless/rockchip_wlan/cywdhd/bcmdhd/dhd_msgbuf.c
index 886787e..26638d4 100644
--- a/kernel/drivers/net/wireless/rockchip_wlan/cywdhd/bcmdhd/dhd_msgbuf.c
+++ b/kernel/drivers/net/wireless/rockchip_wlan/cywdhd/bcmdhd/dhd_msgbuf.c
@@ -1,17 +1,18 @@
-/* SPDX-License-Identifier: GPL-2.0 */
/**
* @file definition of host message ring functionality
* Provides type definitions and function prototypes used to link the
* DHD OS, bus, and protocol modules.
*
- * Copyright (C) 1999-2019, Broadcom Corporation
- *
+ * Portions of this code are copyright (c) 2022 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
@@ -19,7 +20,7 @@
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
@@ -27,9 +28,8 @@
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_msgbuf.c 608659 2015-12-29 01:18:33Z $
+ * $Id: dhd_msgbuf.c 701962 2017-05-30 06:13:15Z $
*/
-
#include <typedefs.h>
#include <osl.h>
@@ -37,20 +37,17 @@
#include <bcmutils.h>
#include <bcmmsgbuf.h>
#include <bcmendian.h>
+#include <bcmstdlib_s.h>
#include <dngl_stats.h>
#include <dhd.h>
#include <dhd_proto.h>
-#ifdef BCMDBUS
-#include <dbus.h>
-#else
#include <dhd_bus.h>
-#endif /* BCMDBUS */
#include <dhd_dbg.h>
#include <siutils.h>
-
+#include <dhd_debug.h>
#include <dhd_flowring.h>
@@ -61,11 +58,27 @@
#if defined(DHD_LB)
#include <linux/cpu.h>
#include <bcm_ring.h>
-#define DHD_LB_WORKQ_SZ (8192)
+#define DHD_LB_WORKQ_SZ (8192)
#define DHD_LB_WORKQ_SYNC (16)
#define DHD_LB_WORK_SCHED (DHD_LB_WORKQ_SYNC * 2)
#endif /* DHD_LB */
+#include <etd.h>
+#include <hnd_debug.h>
+#include <bcmtlv.h>
+#include <hnd_armtrap.h>
+#include <dnglevent.h>
+
+#ifdef DHD_PKT_LOGGING
+#include <dhd_pktlog.h>
+#include <dhd_linux_pktdump.h>
+#endif /* DHD_PKT_LOGGING */
+#ifdef DHD_EWPR_VER2
+#include <dhd_bitpack.h>
+#endif /* DHD_EWPR_VER2 */
+
+extern char dhd_version[];
+extern char fw_version[];
/**
* Host configures a soft doorbell for d2h rings, by specifying a 32bit host
@@ -95,11 +108,13 @@
#define RX_DMA_OFFSET 8 /* Mem2mem DMA inserts an extra 8 */
#define IOCT_RETBUF_SIZE (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN)
-#define FLOWRING_SIZE (H2DRING_TXPOST_MAX_ITEM * H2DRING_TXPOST_ITEMSIZE)
/* flags for ioctl pending status */
#define MSGBUF_IOCTL_ACK_PENDING (1<<0)
#define MSGBUF_IOCTL_RESP_PENDING (1<<1)
+
+#define DHD_IOCTL_REQ_PKTBUFSZ 2048
+#define MSGBUF_IOCTL_MAX_RQSTLEN (DHD_IOCTL_REQ_PKTBUFSZ - H2DRING_CTRL_SUB_ITEMSIZE)
#define DMA_ALIGN_LEN 4
@@ -108,16 +123,19 @@
#ifdef BCM_HOST_BUF
#ifndef DMA_HOST_BUFFER_LEN
-#define DMA_HOST_BUFFER_LEN 0x80000
-#endif
+#define DMA_HOST_BUFFER_LEN 0x200000
+#endif // endif
#endif /* BCM_HOST_BUF */
#define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 8192
-#define DHD_FLOWRING_MAX_EVENTBUF_POST 8
+#define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D 1
+#define DHD_FLOWRING_MAX_EVENTBUF_POST 32
#define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST 8
+#define DHD_H2D_INFORING_MAX_BUF_POST 32
+#define DHD_MAX_TSBUF_POST 8
-#define DHD_PROT_FUNCS 37
+#define DHD_PROT_FUNCS 43
/* Length of buffer in host for bus throughput measurement */
#define DHD_BUS_TPUT_BUF_LEN 2048
@@ -128,7 +146,12 @@
#define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT 48
#define RING_NAME_MAX_LENGTH 24
+#define CTRLSUB_HOSTTS_MEESAGE_SIZE 1024
+/* Giving room before ioctl_trans_id rollsover. */
+#define BUFFER_BEFORE_ROLLOVER 300
+/* 512K memory + 32K registers */
+#define SNAPSHOT_UPLOAD_BUF_SIZE ((512 + 32) * 1024)
struct msgbuf_ring; /* ring context for common and flow rings */
@@ -152,10 +175,19 @@
*
* Dongle advertizes host side sync mechanism requirements.
*/
-#define PCIE_D2H_SYNC
-#if defined(PCIE_D2H_SYNC)
-#define PCIE_D2H_SYNC_WAIT_TRIES 512
+#define PCIE_D2H_SYNC_WAIT_TRIES (512U)
+#define PCIE_D2H_SYNC_NUM_OF_STEPS (5U)
+#define PCIE_D2H_SYNC_DELAY (100UL) /* in terms of usecs */
+
+#define HWA_DB_TYPE_RXPOST (0x0050)
+#define HWA_DB_TYPE_TXCPLT (0x0060)
+#define HWA_DB_TYPE_RXCPLT (0x0170)
+#define HWA_DB_INDEX_VALUE(val) ((uint32)(val) << 16)
+
+#define HWA_ENAB_BITMAP_RXPOST (1U << 0) /* 1A */
+#define HWA_ENAB_BITMAP_RXCPLT (1U << 1) /* 2B */
+#define HWA_ENAB_BITMAP_TXCPLT (1U << 2) /* 4B */
/**
* Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
@@ -165,8 +197,18 @@
*/
typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
volatile cmn_msg_hdr_t *msg, int msglen);
-#endif /* PCIE_D2H_SYNC */
+/**
+ * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
+ * For EDL messages.
+ *
+ * On success: return cmn_msg_hdr_t::msg_type
+ * On failure: return 0 (invalid msg_type)
+ */
+#ifdef EWP_EDL
+typedef int (* d2h_edl_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
+ volatile cmn_msg_hdr_t *msg);
+#endif /* EWP_EDL */
/*
* +----------------------------------------------------------------------------
@@ -220,8 +262,9 @@
#define DHD_FLOWRING_START_FLOWID BCMPCIE_H2D_COMMON_MSGRINGS
/* Determine whether a ringid belongs to a TxPost flowring */
-#define DHD_IS_FLOWRING(ringid) \
- ((ringid) >= BCMPCIE_COMMON_MSGRINGS)
+#define DHD_IS_FLOWRING(ringid, max_flow_rings) \
+ ((ringid) >= BCMPCIE_COMMON_MSGRINGS && \
+ (ringid) < ((max_flow_rings) + BCMPCIE_COMMON_MSGRINGS))
/* Convert a H2D TxPost FlowId to a MsgBuf RingId */
#define DHD_FLOWID_TO_RINGID(flowid) \
@@ -236,19 +279,28 @@
* any array of H2D rings.
*/
#define DHD_H2D_RING_OFFSET(ringid) \
- ((DHD_IS_FLOWRING(ringid)) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid))
+ (((ringid) >= BCMPCIE_COMMON_MSGRINGS) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid))
+
+/* Convert a H2D MsgBuf Flowring Id to an offset index into the H2D DMA indices array
+ * This may be used for IFRM.
+ */
+#define DHD_H2D_FRM_FLOW_RING_OFFSET(ringid) \
+ ((ringid) - BCMPCIE_COMMON_MSGRINGS)
/* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array
* This may be used for the D2H DMA WR index array or D2H DMA RD index array or
* any array of D2H rings.
+ * d2h debug ring is located at the end, i.e. after all the tx flow rings and h2d debug ring
+ * max_h2d_rings: total number of h2d rings
*/
-#define DHD_D2H_RING_OFFSET(ringid) \
- ((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS)
+#define DHD_D2H_RING_OFFSET(ringid, max_h2d_rings) \
+ ((ringid) > (max_h2d_rings) ? \
+ ((ringid) - max_h2d_rings) : \
+ ((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS))
/* Convert a D2H DMA Indices Offset to a RingId */
#define DHD_D2H_RINGID(offset) \
((offset) + BCMPCIE_H2D_COMMON_MSGRINGS)
-
#define DHD_DMAH_NULL ((void*)NULL)
@@ -263,7 +315,65 @@
#define DHD_DMA_PAD (L1_CACHE_BYTES)
#else
#define DHD_DMA_PAD (128)
-#endif
+#endif // endif
+
+/*
+ * +----------------------------------------------------------------------------
+ * Flowring Pool
+ *
+ * Unlike common rings, which are attached very early on (dhd_prot_attach),
+ * flowrings are dynamically instantiated. Moreover, flowrings may require a
+ * larger DMA-able buffer. To avoid issues with fragmented cache coherent
+ * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
+ * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
+ *
+ * Each DMA-able buffer may be allocated independently, or may be carved out
+ * of a single large contiguous region that is registered with the protocol
+ * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
+ * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
+ *
+ * No flowring pool action is performed in dhd_prot_attach(), as the number
+ * of h2d rings is not yet known.
+ *
+ * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
+ * determine the number of flowrings required, and a pool of msgbuf_rings are
+ * allocated and a DMA-able buffer (carved or allocated) is attached.
+ * See: dhd_prot_flowrings_pool_attach()
+ *
+ * A flowring msgbuf_ring object may be fetched from this pool during flowring
+ * creation, using the flowid. Likewise, flowrings may be freed back into the
+ * pool on flowring deletion.
+ * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
+ *
+ * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
+ * are detached (returned back to the carved region or freed), and the pool of
+ * msgbuf_ring and any objects allocated against it are freed.
+ * See: dhd_prot_flowrings_pool_detach()
+ *
+ * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
+ * state as-if upon an attach. All DMA-able buffers are retained.
+ * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
+ * pool attach will notice that the pool persists and continue to use it. This
+ * will avoid the case of a fragmented DMA-able region.
+ *
+ * +----------------------------------------------------------------------------
+ */
+
+/* Conversion of a flowid to a flowring pool index */
+#define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
+ ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
+
+/* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
+#define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
+ (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \
+ DHD_FLOWRINGS_POOL_OFFSET(flowid)
+
+/* Traverse each flowring in the flowring pool, assigning ring and flowid */
+#define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \
+ for ((flowid) = DHD_FLOWRING_START_FLOWID, \
+ (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
+ (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \
+ (ring)++, (flowid)++)
/* Used in loopback tests */
typedef struct dhd_dmaxfer {
@@ -273,6 +383,10 @@
uint32 destdelay;
uint32 len;
bool in_progress;
+ uint64 start_usec;
+ uint64 time_taken;
+ uint32 d11_lpbk;
+ int status;
} dhd_dmaxfer_t;
/**
@@ -290,6 +404,7 @@
bool inited;
uint16 idx; /* ring id */
uint16 rd; /* read index */
+ uint16 curr_rd; /* read index for debug */
uint16 wr; /* write index */
uint16 max_items; /* maximum number of items in ring */
uint16 item_len; /* length of each item in the ring */
@@ -301,7 +416,17 @@
/* # of messages on ring not yet announced to dongle */
uint16 pend_items_count;
#endif /* TXP_FLUSH_NITEMS */
+
+ uint8 ring_type;
+ uint16 hwa_db_type; /* hwa type non-zero for Data path rings */
+ uint8 n_completion_ids;
+ bool create_pending;
+ uint16 create_req_id;
+ uint8 current_phase;
+ uint16 compeltion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED];
uchar name[RING_NAME_MAX_LENGTH];
+ uint32 ring_mem_allocated;
+ void *ring_lock;
} msgbuf_ring_t;
#define DHD_RING_BGN_VA(ring) ((ring)->dma_buf.va)
@@ -309,21 +434,29 @@
((uint8 *)(DHD_RING_BGN_VA((ring))) + \
(((ring)->max_items - 1) * (ring)->item_len))
-
+/* This can be overwritten by module parameter defined in dhd_linux.c
+ * or by dhd iovar h2d_max_txpost.
+ */
+int h2d_max_txpost = H2DRING_TXPOST_MAX_ITEM;
/** DHD protocol handle. Is an opaque type to other DHD software layers. */
typedef struct dhd_prot {
osl_t *osh; /* OSL handle */
+ uint16 rxbufpost_sz;
uint16 rxbufpost;
uint16 max_rxbufpost;
uint16 max_eventbufpost;
uint16 max_ioctlrespbufpost;
+ uint16 max_tsbufpost;
+ uint16 max_infobufpost;
+ uint16 infobufpost;
uint16 cur_event_bufs_posted;
uint16 cur_ioctlresp_bufs_posted;
+ uint16 cur_ts_bufs_posted;
/* Flow control mechanism based on active transmits pending */
- uint16 active_tx_count; /* increments on every packet tx, and decrements on tx_status */
- uint16 max_tx_count;
+ osl_atomic_t active_tx_count; /* increments/decrements on every packet tx/tx_status */
+ uint16 h2d_max_txpost;
uint16 txp_threshold; /* optimization to write "n" tx items at a time to ring */
/* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */
@@ -332,6 +465,9 @@
msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */
msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */
msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */
+ msgbuf_ring_t *h2dring_info_subn; /* H2D info submission ring */
+ msgbuf_ring_t *d2hring_info_cpln; /* D2H info completion ring */
+ msgbuf_ring_t *d2hring_edl; /* D2H Enhanced Debug Lane (EDL) ring */
msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */
dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */
@@ -340,6 +476,7 @@
uint32 rx_dataoffset;
dhd_mb_ring_t mb_ring_fn; /* called when dongle needs to be notified of new msg */
+ dhd_mb_ring_2_t mb_2_ring_fn; /* called when dongle needs to be notified of new msg */
/* ioctl related resources */
uint8 ioctl_state;
@@ -358,37 +495,52 @@
dhd_dma_buf_t h2d_dma_indx_rd_buf; /* Array of H2D RD indices */
dhd_dma_buf_t d2h_dma_indx_wr_buf; /* Array of D2H WR indices */
dhd_dma_buf_t d2h_dma_indx_rd_buf; /* Array of D2H RD indices */
+ dhd_dma_buf_t h2d_ifrm_indx_wr_buf; /* Array of H2D WR indices for ifrm */
dhd_dma_buf_t host_bus_throughput_buf; /* bus throughput measure buffer */
dhd_dma_buf_t *flowring_buf; /* pool of flow ring buf */
uint32 flowring_num;
-#if defined(PCIE_D2H_SYNC)
d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
+#ifdef EWP_EDL
+ d2h_edl_sync_cb_t d2h_edl_sync_cb; /* Sync on EDL D2H DMA done: SEQNUM or XORCSUM */
+#endif /* EWP_EDL */
ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */
ulong d2h_sync_wait_tot; /* total wait loops */
-#endif /* PCIE_D2H_SYNC */
dhd_dmaxfer_t dmaxfer; /* for test/DMA loopback */
uint16 ioctl_seq_no;
uint16 data_seq_no;
uint16 ioctl_trans_id;
- void *pktid_map_handle; /* a pktid maps to a packet and its metadata */
+ void *pktid_ctrl_map; /* a pktid maps to a packet and its metadata */
+ void *pktid_rx_map; /* pktid map for rx path */
+ void *pktid_tx_map; /* pktid map for tx path */
bool metadata_dbg;
void *pktid_map_handle_ioctl;
+#ifdef DHD_MAP_PKTID_LOGGING
+ void *pktid_dma_map; /* pktid map for DMA MAP */
+ void *pktid_dma_unmap; /* pktid map for DMA UNMAP */
+#endif /* DHD_MAP_PKTID_LOGGING */
+ uint32 pktid_depleted_cnt; /* pktid depleted count */
+ /* netif tx queue stop count */
+ uint8 pktid_txq_stop_cnt;
+ /* netif tx queue start count */
+ uint8 pktid_txq_start_cnt;
+ uint64 ioctl_fillup_time; /* timestamp for ioctl fillup */
+ uint64 ioctl_ack_time; /* timestamp for ioctl ack */
+ uint64 ioctl_cmplt_time; /* timestamp for ioctl completion */
/* Applications/utilities can read tx and rx metadata using IOVARs */
uint16 rx_metadata_offset;
uint16 tx_metadata_offset;
-
#if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
/* Host's soft doorbell configuration */
bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS];
#endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
-#if defined(DHD_LB)
+
/* Work Queues to be used by the producer and the consumer, and threshold
* when the WRITE index must be synced to consumer's workq
*/
@@ -400,17 +552,50 @@
uint32 rx_compl_prod_sync ____cacheline_aligned;
bcm_workq_t rx_compl_prod, rx_compl_cons;
#endif /* DHD_LB_RXC */
-#endif /* DHD_LB */
+
+ dhd_dma_buf_t fw_trap_buf; /* firmware trap buffer */
+
+ uint32 host_ipc_version; /* Host sypported IPC rev */
+ uint32 device_ipc_version; /* FW supported IPC rev */
+ uint32 active_ipc_version; /* Host advertised IPC rev */
+ dhd_dma_buf_t hostts_req_buf; /* For holding host timestamp request buf */
+ bool hostts_req_buf_inuse;
+ bool rx_ts_log_enabled;
+ bool tx_ts_log_enabled;
+ bool no_retry;
+ bool no_aggr;
+ bool fixed_rate;
+ dhd_dma_buf_t host_scb_buf; /* scb host offload buffer */
+#ifdef DHD_HP2P
+ msgbuf_ring_t *d2hring_hp2p_txcpl; /* D2H HPP Tx completion ring */
+ msgbuf_ring_t *d2hring_hp2p_rxcpl; /* D2H HPP Rx completion ring */
+#endif /* DHD_HP2P */
+ bool no_tx_resource;
} dhd_prot_t;
+#ifdef DHD_EWPR_VER2
+#define HANG_INFO_BASE64_BUFFER_SIZE 640
+#endif // endif
+
+#ifdef DHD_DUMP_PCIE_RINGS
+static
+int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
+ const void *user_buf, unsigned long *file_posn);
+#ifdef EWP_EDL
+static
+int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
+ unsigned long *file_posn);
+#endif /* EWP_EDL */
+#endif /* DHD_DUMP_PCIE_RINGS */
+
+extern bool dhd_timesync_delay_post_bufs(dhd_pub_t *dhdp);
+extern void dhd_schedule_dmaxfer_free(dhd_pub_t* dhdp, dmaxref_mem_map_t *dmmap);
/* Convert a dmaaddr_t to a base_addr with htol operations */
static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa);
/* APIs for managing a DMA-able buffer */
static int dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
-static int dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len);
static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
-static void dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
/* msgbuf ring management */
static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring,
@@ -418,6 +603,7 @@
static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring);
static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring);
static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring);
+static void dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf);
/* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */
static int dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd);
@@ -444,13 +630,12 @@
void *p, uint16 len);
static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring);
-/* Allocate DMA-able memory for saving H2D/D2H WR/RD indices */
static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
dhd_dma_buf_t *dma_buf, uint32 bufsz);
/* Set/Get a RD or WR index in the array of indices */
/* See also: dhd_prot_dma_indx_init() */
-static void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type,
+void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type,
uint16 ringid);
static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid);
@@ -469,11 +654,12 @@
void *buf, int ifidx);
/* Post buffers for Rx, control ioctl response and events */
-static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post);
+static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msgid, uint32 max_to_post);
static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub);
static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub);
static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid);
static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid);
+static int dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *pub);
static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt);
@@ -487,7 +673,6 @@
static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg);
static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg);
static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg);
-static void dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void *msg);
static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg);
/* Loopback test with dongle */
@@ -500,11 +685,38 @@
static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg);
static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg);
static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg);
+static void dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg);
+
+/* Monitor Mode */
+#ifdef WL_MONITOR
+extern bool dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx);
+extern void dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx);
+#endif /* WL_MONITOR */
/* Configure a soft doorbell per D2H ring */
static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd);
-static void dhd_prot_d2h_ring_config_cmplt_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf);
+static void dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf);
+static void dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf);
+static void dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf);
+static void dhd_prot_detach_info_rings(dhd_pub_t *dhd);
+#ifdef DHD_HP2P
+static void dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd);
+#endif /* DHD_HP2P */
+#ifdef EWP_EDL
+static void dhd_prot_detach_edl_rings(dhd_pub_t *dhd);
+#endif // endif
+static void dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf);
+static void dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf);
+#ifdef DHD_HP2P
+static void dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus);
+static void dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus);
+static void dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid);
+static void dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc);
+#endif // endif
typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg);
/** callback functions for messages generated by the dongle */
@@ -529,32 +741,38 @@
NULL,
dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */
NULL,
- dhd_prot_rxcmplt_process, /* MSG_TYPE_RX_CMPLT */
+ NULL, /* MSG_TYPE_RX_CMPLT use dedicated handler */
NULL,
dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */
NULL, /* MSG_TYPE_FLOW_RING_RESUME */
- NULL, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */
+ dhd_prot_process_flow_ring_resume_response, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */
NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */
- NULL, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */
+ dhd_prot_process_flow_ring_suspend_response, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */
NULL, /* MSG_TYPE_INFO_BUF_POST */
- NULL, /* MSG_TYPE_INFO_BUF_CMPLT */
+ dhd_prot_process_infobuf_complete, /* MSG_TYPE_INFO_BUF_CMPLT */
NULL, /* MSG_TYPE_H2D_RING_CREATE */
NULL, /* MSG_TYPE_D2H_RING_CREATE */
- NULL, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
- NULL, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */
+ dhd_prot_process_h2d_ring_create_complete, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
+ dhd_prot_process_d2h_ring_create_complete, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */
NULL, /* MSG_TYPE_H2D_RING_CONFIG */
NULL, /* MSG_TYPE_D2H_RING_CONFIG */
NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */
- dhd_prot_d2h_ring_config_cmplt_process, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */
+ dhd_prot_process_d2h_ring_config_complete, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */
NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */
- NULL, /* MSG_TYPE_D2H_MAILBOX_DATA */
+ dhd_prot_process_d2h_mb_data, /* MSG_TYPE_D2H_MAILBOX_DATA */
+ NULL, /* MSG_TYPE_TIMSTAMP_BUFPOST */
+ NULL, /* MSG_TYPE_HOSTTIMSTAMP */
+ dhd_prot_process_d2h_host_ts_complete, /* MSG_TYPE_HOSTTIMSTAMP_CMPLT */
+ dhd_prot_process_fw_timestamp, /* MSG_TYPE_FIRMWARE_TIMESTAMP */
+ NULL, /* MSG_TYPE_SNAPSHOT_UPLOAD */
+ dhd_prot_process_snapshot_complete, /* MSG_TYPE_SNAPSHOT_CMPLT */
};
-
#ifdef DHD_RX_CHAINING
#define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \
- (dhd_rx_pkt_chainable((dhd), (ifidx)) && \
+ (dhd_wet_chainable(dhd) && \
+ dhd_rx_pkt_chainable((dhd), (ifidx)) && \
!ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \
!ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \
!eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \
@@ -571,10 +789,49 @@
#endif /* DHD_RX_CHAINING */
+#define DHD_LPBKDTDUMP_ON() (dhd_msg_level & DHD_LPBKDTDUMP_VAL)
+
static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd);
-#if defined(PCIE_D2H_SYNC) /* avoids problems related to host CPU cache */
+bool
+dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info)
+{
+ msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)prot_info;
+ uint16 rd, wr;
+ bool ret;
+ if (dhd->dma_d2h_ring_upd_support) {
+ wr = flow_ring->wr;
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
+ }
+ if (dhd->dma_h2d_ring_upd_support) {
+ rd = flow_ring->rd;
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
+ }
+ ret = (wr == rd) ? TRUE : FALSE;
+ return ret;
+}
+
+void
+dhd_prot_dump_ring_ptrs(void *prot_info)
+{
+ msgbuf_ring_t *ring = (msgbuf_ring_t *)prot_info;
+ DHD_ERROR(("%s curr_rd: %d rd: %d wr: %d \n", __FUNCTION__,
+ ring->curr_rd, ring->rd, ring->wr));
+}
+
+uint16
+dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd)
+{
+ return (uint16)h2d_max_txpost;
+}
+void
+dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost)
+{
+ h2d_max_txpost = max_txpost;
+}
/**
* D2H DMA to completion callback handlers. Based on the mode advertised by the
* dongle through the PCIE shared region, the appropriate callback will be
@@ -583,8 +840,8 @@
* does not require host participation, then a noop callback handler will be
* bound that simply returns the msg_type.
*/
-static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, msgbuf_ring_t *ring,
- uint32 tries, uchar *msg, int msglen);
+static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring,
+ uint32 tries, volatile uchar *msg, int msglen);
static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
volatile cmn_msg_hdr_t *msg, int msglen);
static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
@@ -592,6 +849,10 @@
static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
volatile cmn_msg_hdr_t *msg, int msglen);
static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd);
+static int dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
+ uint16 ring_type, uint32 id);
+static int dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
+ uint8 type, uint32 id);
/**
* dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has
@@ -603,22 +864,60 @@
*
*/
static void
-dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 tries,
- uchar *msg, int msglen)
+dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring, uint32 tries,
+ volatile uchar *msg, int msglen)
{
- uint32 seqnum = ring->seqnum;
+ uint32 ring_seqnum = ring->seqnum;
- DHD_ERROR(("LIVELOCK DHD<%p> seqnum<%u:%u> tries<%u> max<%lu> tot<%lu>"
- "dma_buf va<%p> msg<%p>\n",
- dhd, seqnum, seqnum% D2H_EPOCH_MODULO, tries,
+ if (dhd_query_bus_erros(dhd)) {
+ return;
+ }
+
+ DHD_ERROR((
+ "LIVELOCK DHD<%p> ring<%s> msg_seqnum<%u> ring_seqnum<%u:%u> tries<%u> max<%lu>"
+ " tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d> rd<%d> wr<%d>\n",
+ dhd, ring->name, msg_seqnum, ring_seqnum, ring_seqnum% D2H_EPOCH_MODULO, tries,
dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot,
- ring->dma_buf.va, msg));
- prhex("D2H MsgBuf Failure", (uchar *)msg, msglen);
+ ring->dma_buf.va, msg, ring->curr_rd, ring->rd, ring->wr));
-#if defined(SUPPORT_LINKDOWN_RECOVERY) && defined(CONFIG_ARCH_MSM)
- dhd->bus->islinkdown = 1;
- dhd_os_check_hang(dhd, 0, -ETIMEDOUT);
-#endif /* SUPPORT_LINKDOWN_RECOVERY && CONFIG_ARCH_MSM */
+ dhd_prhex("D2H MsgBuf Failure", msg, msglen, DHD_ERROR_VAL);
+
+ /* Try to resume if already suspended or suspend in progress */
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(dhd, CAN_SLEEP(), __builtin_return_address(0));
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+ /* Skip if still in suspended or suspend in progress */
+ if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
+ DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
+ __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
+ goto exit;
+ }
+
+ dhd_bus_dump_console_buffer(dhd->bus);
+ dhd_prot_debug_info_print(dhd);
+
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* collect core dump */
+ dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK;
+ dhd_bus_mem_dump(dhd);
+ }
+#endif /* DHD_FW_COREDUMP */
+
+exit:
+ dhd_schedule_reset(dhd);
+
+#ifdef OEM_ANDROID
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ dhd->bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+ dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK;
+ dhd_os_send_hang_message(dhd);
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+#endif /* OEM_ANDROID */
+ dhd->livelock_occured = TRUE;
}
/**
@@ -632,27 +931,63 @@
uint32 tries;
uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
int num_words = msglen / sizeof(uint32); /* num of 32bit words */
- volatile uint32 *marker = (uint32 *)msg + (num_words - 1); /* last word */
+ volatile uint32 *marker = (volatile uint32 *)msg + (num_words - 1); /* last word */
dhd_prot_t *prot = dhd->prot;
+ uint32 msg_seqnum;
+ uint32 step = 0;
+ uint32 delay = PCIE_D2H_SYNC_DELAY;
+ uint32 total_tries = 0;
ASSERT(msglen == ring->item_len);
- for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
- uint32 msg_seqnum = *marker;
- if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
- ring->seqnum++; /* next expected sequence number */
- goto dma_completed;
- }
+ BCM_REFERENCE(delay);
+ /*
+ * For retries we have to make some sort of stepper algorithm.
+ * We see that every time when the Dongle comes out of the D3
+ * Cold state, the first D2H mem2mem DMA takes more time to
+ * complete, leading to livelock issues.
+ *
+ * Case 1 - Apart from Host CPU some other bus master is
+ * accessing the DDR port, probably page close to the ring
+ * so, PCIE does not get a change to update the memory.
+ * Solution - Increase the number of tries.
+ *
+ * Case 2 - The 50usec delay given by the Host CPU is not
+ * sufficient for the PCIe RC to start its work.
+ * In this case the breathing time of 50usec given by
+ * the Host CPU is not sufficient.
+ * Solution: Increase the delay in a stepper fashion.
+ * This is done to ensure that there are no
+ * unwanted extra delay introdcued in normal conditions.
+ */
+ for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
+ for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
+ msg_seqnum = *marker;
+ if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
+ ring->seqnum++; /* next expected sequence number */
+ /* Check for LIVELOCK induce flag, which is set by firing
+ * dhd iovar to induce LIVELOCK error. If flag is set,
+ * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
+ */
+ if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
+ goto dma_completed;
+ }
+ }
- if (tries > prot->d2h_sync_wait_max)
- prot->d2h_sync_wait_max = tries;
+ total_tries = (uint32)(((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries);
- OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
- OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
+ if (total_tries > prot->d2h_sync_wait_max)
+ prot->d2h_sync_wait_max = total_tries;
- } /* for PCIE_D2H_SYNC_WAIT_TRIES */
+ OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
+ OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
+ OSL_DELAY(delay * step); /* Add stepper delay */
- dhd_prot_d2h_sync_livelock(dhd, ring, tries, (uchar *)msg, msglen);
+ } /* for PCIE_D2H_SYNC_WAIT_TRIES */
+ } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
+
+ dhd_prot_d2h_sync_livelock(dhd, msg_seqnum, ring, total_tries,
+ (volatile uchar *) msg, msglen);
ring->seqnum++; /* skip this message ... leak of a pktid */
return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
@@ -677,27 +1012,70 @@
int num_words = msglen / sizeof(uint32); /* num of 32bit words */
uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
dhd_prot_t *prot = dhd->prot;
+ uint32 step = 0;
+ uint32 delay = PCIE_D2H_SYNC_DELAY;
+ uint32 total_tries = 0;
ASSERT(msglen == ring->item_len);
- for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
- prot_checksum = bcm_compute_xor32((volatile uint32 *)msg, num_words);
- if (prot_checksum == 0U) { /* checksum is OK */
+ BCM_REFERENCE(delay);
+ /*
+ * For retries we have to make some sort of stepper algorithm.
+ * We see that every time when the Dongle comes out of the D3
+ * Cold state, the first D2H mem2mem DMA takes more time to
+ * complete, leading to livelock issues.
+ *
+ * Case 1 - Apart from Host CPU some other bus master is
+ * accessing the DDR port, probably page close to the ring
+ * so, PCIE does not get a change to update the memory.
+ * Solution - Increase the number of tries.
+ *
+ * Case 2 - The 50usec delay given by the Host CPU is not
+ * sufficient for the PCIe RC to start its work.
+ * In this case the breathing time of 50usec given by
+ * the Host CPU is not sufficient.
+ * Solution: Increase the delay in a stepper fashion.
+ * This is done to ensure that there are no
+ * unwanted extra delay introdcued in normal conditions.
+ */
+ for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
+ for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
+ /* First verify if the seqnumber has been update,
+ * if yes, then only check xorcsum.
+ * Once seqnum and xorcsum is proper that means
+ * complete message has arrived.
+ */
if (msg->epoch == ring_seqnum) {
- ring->seqnum++; /* next expected sequence number */
- goto dma_completed;
+ prot_checksum = bcm_compute_xor32((volatile uint32 *)msg,
+ num_words);
+ if (prot_checksum == 0U) { /* checksum is OK */
+ ring->seqnum++; /* next expected sequence number */
+ /* Check for LIVELOCK induce flag, which is set by firing
+ * dhd iovar to induce LIVELOCK error. If flag is set,
+ * MSG_TYPE_INVALID is returned, which results in to
+ * LIVELOCK error.
+ */
+ if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
+ goto dma_completed;
+ }
+ }
}
- }
- if (tries > prot->d2h_sync_wait_max)
- prot->d2h_sync_wait_max = tries;
+ total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
- OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
- OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
+ if (total_tries > prot->d2h_sync_wait_max)
+ prot->d2h_sync_wait_max = total_tries;
- } /* for PCIE_D2H_SYNC_WAIT_TRIES */
+ OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
+ OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
+ OSL_DELAY(delay * step); /* Add stepper delay */
- dhd_prot_d2h_sync_livelock(dhd, ring, tries, (uchar *)msg, msglen);
+ } /* for PCIE_D2H_SYNC_WAIT_TRIES */
+ } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
+
+ DHD_ERROR(("%s: prot_checksum = 0x%x\n", __FUNCTION__, prot_checksum));
+ dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
+ (volatile uchar *) msg, msglen);
ring->seqnum++; /* skip this message ... leak of a pktid */
return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
@@ -717,7 +1095,197 @@
dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
volatile cmn_msg_hdr_t *msg, int msglen)
{
- return msg->msg_type;
+ /* Check for LIVELOCK induce flag, which is set by firing
+ * dhd iovar to induce LIVELOCK error. If flag is set,
+ * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
+ */
+ if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
+ DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
+ return MSG_TYPE_INVALID;
+ } else {
+ return msg->msg_type;
+ }
+}
+
+#ifdef EWP_EDL
+/**
+ * dhd_prot_d2h_sync_edl - Sync on a D2H DMA completion by validating the cmn_msg_hdr_t
+ * header values at both the beginning and end of the payload.
+ * The cmn_msg_hdr_t is placed at the start and end of the payload
+ * in each work item in the EDL ring.
+ * Dongle will place a seqnum inside the cmn_msg_hdr_t 'epoch' field
+ * and the length of the payload in the 'request_id' field.
+ * Structure of each work item in the EDL ring:
+ * | cmn_msg_hdr_t | payload (var len) | cmn_msg_hdr_t |
+ * NOTE: - it was felt that calculating xorcsum for the entire payload (max length of 1648 bytes) is
+ * too costly on the dongle side and might take up too many ARM cycles,
+ * hence the xorcsum sync method is not being used for EDL ring.
+ */
+static int
+BCMFASTPATH(dhd_prot_d2h_sync_edl)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ volatile cmn_msg_hdr_t *msg)
+{
+ uint32 tries;
+ int msglen = 0, len = 0;
+ uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
+ dhd_prot_t *prot = dhd->prot;
+ uint32 step = 0;
+ uint32 delay = PCIE_D2H_SYNC_DELAY;
+ uint32 total_tries = 0;
+ volatile cmn_msg_hdr_t *trailer = NULL;
+ volatile uint8 *buf = NULL;
+ bool valid_msg = FALSE;
+
+ BCM_REFERENCE(delay);
+ /*
+ * For retries we have to make some sort of stepper algorithm.
+ * We see that every time when the Dongle comes out of the D3
+ * Cold state, the first D2H mem2mem DMA takes more time to
+ * complete, leading to livelock issues.
+ *
+ * Case 1 - Apart from Host CPU some other bus master is
+ * accessing the DDR port, probably page close to the ring
+ * so, PCIE does not get a change to update the memory.
+ * Solution - Increase the number of tries.
+ *
+ * Case 2 - The 50usec delay given by the Host CPU is not
+ * sufficient for the PCIe RC to start its work.
+ * In this case the breathing time of 50usec given by
+ * the Host CPU is not sufficient.
+ * Solution: Increase the delay in a stepper fashion.
+ * This is done to ensure that there are no
+ * unwanted extra delay introdcued in normal conditions.
+ */
+ for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
+ for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
+ /* First verify if the seqnumber has been updated,
+ * if yes, only then validate the header and trailer.
+ * Once seqnum, header and trailer have been validated, it means
+ * that the complete message has arrived.
+ */
+ valid_msg = FALSE;
+ if (msg->epoch == ring_seqnum &&
+ msg->msg_type == MSG_TYPE_INFO_PYLD &&
+ msg->request_id > 0 &&
+ msg->request_id <= ring->item_len) {
+ /* proceed to check trailer only if header is valid */
+ buf = (volatile uint8 *)msg;
+ msglen = sizeof(cmn_msg_hdr_t) + msg->request_id;
+ buf += msglen;
+ if (msglen + sizeof(cmn_msg_hdr_t) <= ring->item_len) {
+ trailer = (volatile cmn_msg_hdr_t *)buf;
+ valid_msg = (trailer->epoch == ring_seqnum) &&
+ (trailer->msg_type == msg->msg_type) &&
+ (trailer->request_id == msg->request_id);
+ if (!valid_msg) {
+ DHD_TRACE(("%s:invalid trailer! seqnum=%u;reqid=%u"
+ " expected, seqnum=%u; reqid=%u. Retrying... \n",
+ __FUNCTION__, trailer->epoch, trailer->request_id,
+ msg->epoch, msg->request_id));
+ }
+ } else {
+ DHD_TRACE(("%s: invalid payload length (%u)! Retrying.. \n",
+ __FUNCTION__, msg->request_id));
+ }
+
+ if (valid_msg) {
+ /* data is OK */
+ ring->seqnum++; /* next expected sequence number */
+ if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
+ goto dma_completed;
+ }
+ }
+ } else {
+ DHD_TRACE(("%s: wrong hdr, seqnum expected %u, got %u."
+ " msg_type=0x%x, request_id=%u."
+ " Retrying...\n",
+ __FUNCTION__, ring_seqnum, msg->epoch,
+ msg->msg_type, msg->request_id));
+ }
+
+ total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
+
+ if (total_tries > prot->d2h_sync_wait_max)
+ prot->d2h_sync_wait_max = total_tries;
+
+ OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
+ OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
+ OSL_DELAY(delay * step); /* Add stepper delay */
+
+ } /* for PCIE_D2H_SYNC_WAIT_TRIES */
+ } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
+
+ DHD_ERROR(("%s: EDL header check fails !\n", __FUNCTION__));
+ DHD_ERROR(("%s: header: seqnum=%u; expected-seqnum=%u"
+ " msgtype=0x%x; expected-msgtype=0x%x"
+ " length=%u; expected-max-length=%u", __FUNCTION__,
+ msg->epoch, ring_seqnum, msg->msg_type, MSG_TYPE_INFO_PYLD,
+ msg->request_id, ring->item_len));
+ dhd_prhex("msg header bytes: ", (volatile uchar *)msg, sizeof(*msg), DHD_ERROR_VAL);
+ if (trailer && msglen > 0 &&
+ (msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len) {
+ DHD_ERROR(("%s: trailer: seqnum=%u; expected-seqnum=%u"
+ " msgtype=0x%x; expected-msgtype=0x%x"
+ " length=%u; expected-length=%u", __FUNCTION__,
+ trailer->epoch, ring_seqnum, trailer->msg_type, MSG_TYPE_INFO_PYLD,
+ trailer->request_id, msg->request_id));
+ dhd_prhex("msg trailer bytes: ", (volatile uchar *)trailer,
+ sizeof(*trailer), DHD_ERROR_VAL);
+ }
+
+ if ((msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len)
+ len = msglen + sizeof(cmn_msg_hdr_t);
+ else
+ len = ring->item_len;
+
+ dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
+ (volatile uchar *) msg, len);
+
+ ring->seqnum++; /* skip this message */
+ return BCME_ERROR; /* invalid msg_type 0 -> noop callback */
+
+dma_completed:
+ DHD_TRACE(("%s: EDL header check pass, seqnum=%u; reqid=%u\n", __FUNCTION__,
+ msg->epoch, msg->request_id));
+
+ prot->d2h_sync_wait_tot += tries;
+ return BCME_OK;
+}
+
+/**
+ * dhd_prot_d2h_sync_edl_none - Dongle ensure that the DMA will complete and host
+ * need to try to sync. This noop sync handler will be bound when the dongle
+ * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
+ */
+static int BCMFASTPATH
+dhd_prot_d2h_sync_edl_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ volatile cmn_msg_hdr_t *msg)
+{
+ /* Check for LIVELOCK induce flag, which is set by firing
+ * dhd iovar to induce LIVELOCK error. If flag is set,
+ * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
+ */
+ if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
+ DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
+ return BCME_ERROR;
+ } else {
+ if (msg->msg_type == MSG_TYPE_INFO_PYLD)
+ return BCME_OK;
+ else
+ return msg->msg_type;
+ }
+}
+#endif /* EWP_EDL */
+
+INLINE void
+dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason)
+{
+ /* To synchronize with the previous memory operations call wmb() */
+ OSL_SMP_WMB();
+ dhd->prot->ioctl_received = reason;
+ /* Call another wmb() to make sure before waking up the other event value gets updated */
+ OSL_SMP_WMB();
+ dhd_os_ioctl_resp_wake(dhd);
}
/**
@@ -732,29 +1300,43 @@
prot->d2h_sync_wait_tot = 0UL;
prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_ctrl_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+
prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_tx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+
prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_rx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+
+ if (HWA_ACTIVE(dhd)) {
+ prot->d2hring_tx_cpln.hwa_db_type =
+ (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_TXCPLT) ? HWA_DB_TYPE_TXCPLT : 0;
+ prot->d2hring_rx_cpln.hwa_db_type =
+ (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_RXCPLT) ? HWA_DB_TYPE_RXCPLT : 0;
+ DHD_ERROR(("%s: TXCPLT hwa_db_type:0x%x RXCPLT hwa_db_type:0x%x\n",
+ __FUNCTION__, prot->d2hring_tx_cpln.hwa_db_type,
+ prot->d2hring_rx_cpln.hwa_db_type));
+ }
if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) {
prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum;
+#ifdef EWP_EDL
+ prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
+#endif /* EWP_EDL */
+ DHD_ERROR(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__));
} else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) {
prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum;
+#ifdef EWP_EDL
+ prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
+#endif /* EWP_EDL */
+ DHD_ERROR(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__));
} else {
prot->d2h_sync_cb = dhd_prot_d2h_sync_none;
+#ifdef EWP_EDL
+ prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl_none;
+#endif /* EWP_EDL */
+ DHD_ERROR(("%s(): D2H sync mechanism is NONE \r\n", __FUNCTION__));
}
-}
-
-#endif /* PCIE_D2H_SYNC */
-
-INLINE void
-dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason)
-{
- /* To synchronize with the previous memory operations call wmb() */
- OSL_SMP_WMB();
- dhd->prot->ioctl_received = reason;
- /* Call another wmb() to make sure before waking up the other event value gets updated */
- OSL_SMP_WMB();
- dhd_os_ioctl_resp_wake(dhd);
}
/**
@@ -765,11 +1347,21 @@
{
dhd_prot_t *prot = dhd->prot;
prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL;
+
+ if (HWA_ACTIVE(dhd)) {
+ prot->h2dring_rxp_subn.hwa_db_type =
+ (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_RXPOST) ? HWA_DB_TYPE_RXPOST : 0;
+ DHD_ERROR(("%s: RXPOST hwa_db_type:0x%x\n",
+ __FUNCTION__, prot->d2hring_tx_cpln.hwa_db_type));
+ }
+
+ prot->h2dring_rxp_subn.current_phase = 0;
+
prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL;
+ prot->h2dring_ctrl_subn.current_phase = 0;
}
/* +----------------- End of PCIE DHD H2D DMA SYNC ------------------------+ */
-
/*
* +---------------------------------------------------------------------------+
@@ -786,27 +1378,25 @@
base_addr->high_addr = htol32(PHYSADDRHI(pa));
}
-
/**
* dhd_dma_buf_audit - Any audits on a DHD DMA Buffer.
*/
static int
dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
{
- uint32 base, end; /* dongle uses 32bit ptr arithmetic */
-
+ uint32 pa_lowaddr, end; /* dongle uses 32bit ptr arithmetic */
ASSERT(dma_buf);
- base = PHYSADDRLO(dma_buf->pa);
- ASSERT(base);
- ASSERT(ISALIGNED(base, DMA_ALIGN_LEN));
+ pa_lowaddr = PHYSADDRLO(dma_buf->pa);
+ ASSERT(PHYSADDRLO(dma_buf->pa) || PHYSADDRHI(dma_buf->pa));
+ ASSERT(ISALIGNED(pa_lowaddr, DMA_ALIGN_LEN));
ASSERT(dma_buf->len != 0);
/* test 32bit offset arithmetic over dma buffer for loss of carry-over */
- end = (base + dma_buf->len); /* end address */
+ end = (pa_lowaddr + dma_buf->len); /* end address */
- if ((end & 0xFFFFFFFF) < (base & 0xFFFFFFFF)) { /* exclude carryover */
+ if ((end & 0xFFFFFFFF) < (pa_lowaddr & 0xFFFFFFFF)) { /* exclude carryover */
DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n",
- __FUNCTION__, base, dma_buf->len));
+ __FUNCTION__, pa_lowaddr, dma_buf->len));
return BCME_ERROR;
}
@@ -818,22 +1408,22 @@
* returns BCME_OK=0 on success
* returns non-zero negative error value on failure.
*/
-static int
+int
dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len)
{
uint32 dma_pad = 0;
osl_t *osh = dhd->osh;
- int dma_align = DMA_ALIGN_LEN;
-
+ uint16 dma_align = DMA_ALIGN_LEN;
+ uint32 rem = 0;
ASSERT(dma_buf != NULL);
ASSERT(dma_buf->va == NULL);
ASSERT(dma_buf->len == 0);
- /* Pad the buffer length by one extra cacheline size.
- * Required for D2H direction.
- */
- dma_pad = (buf_len % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
+ /* Pad the buffer length to align to cacheline size. */
+ rem = (buf_len % DHD_DMA_PAD);
+ dma_pad = rem ? (DHD_DMA_PAD - rem) : 0;
+
dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad,
dma_align, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah);
@@ -861,9 +1451,8 @@
static void
dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
{
- if ((dma_buf == NULL) || (dma_buf->va == NULL)) {
+ if ((dma_buf == NULL) || (dma_buf->va == NULL))
return;
- }
(void)dhd_dma_buf_audit(dhd, dma_buf);
@@ -876,16 +1465,15 @@
* dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using
* dhd_dma_buf_alloc().
*/
-static void
+void
dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
{
osl_t *osh = dhd->osh;
ASSERT(dma_buf);
- if (dma_buf->va == NULL) {
+ if (dma_buf->va == NULL)
return; /* Allow for free invocation, when alloc failed */
- }
/* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */
(void)dhd_dma_buf_audit(dhd, dma_buf);
@@ -922,6 +1510,133 @@
/*
* +---------------------------------------------------------------------------+
+ * DHD_MAP_PKTID_LOGGING
+ * Logging the PKTID and DMA map/unmap information for the SMMU fault issue
+ * debugging in customer platform.
+ * +---------------------------------------------------------------------------+
+ */
+
+#ifdef DHD_MAP_PKTID_LOGGING
+typedef struct dhd_pktid_log_item {
+ dmaaddr_t pa; /* DMA bus address */
+ uint64 ts_nsec; /* Timestamp: nsec */
+ uint32 size; /* DMA map/unmap size */
+ uint32 pktid; /* Packet ID */
+ uint8 pkttype; /* Packet Type */
+ uint8 rsvd[7]; /* Reserved for future use */
+} dhd_pktid_log_item_t;
+
+typedef struct dhd_pktid_log {
+ uint32 items; /* number of total items */
+ uint32 index; /* index of pktid_log_item */
+ dhd_pktid_log_item_t map[0]; /* metadata storage */
+} dhd_pktid_log_t;
+
+typedef void * dhd_pktid_log_handle_t; /* opaque handle to pktid log */
+
+#define MAX_PKTID_LOG (2048)
+#define DHD_PKTID_LOG_ITEM_SZ (sizeof(dhd_pktid_log_item_t))
+#define DHD_PKTID_LOG_SZ(items) (uint32)((sizeof(dhd_pktid_log_t)) + \
+ ((DHD_PKTID_LOG_ITEM_SZ) * (items)))
+
+#define DHD_PKTID_LOG_INIT(dhd, hdl) dhd_pktid_logging_init((dhd), (hdl))
+#define DHD_PKTID_LOG_FINI(dhd, hdl) dhd_pktid_logging_fini((dhd), (hdl))
+#define DHD_PKTID_LOG(dhd, hdl, pa, pktid, len, pkttype) \
+ dhd_pktid_logging((dhd), (hdl), (pa), (pktid), (len), (pkttype))
+#define DHD_PKTID_LOG_DUMP(dhd) dhd_pktid_logging_dump((dhd))
+
+static dhd_pktid_log_handle_t *
+dhd_pktid_logging_init(dhd_pub_t *dhd, uint32 num_items)
+{
+ dhd_pktid_log_t *log;
+ uint32 log_size;
+
+ log_size = DHD_PKTID_LOG_SZ(num_items);
+ log = (dhd_pktid_log_t *)MALLOCZ(dhd->osh, log_size);
+ if (log == NULL) {
+ DHD_ERROR(("%s: MALLOC failed for size %d\n",
+ __FUNCTION__, log_size));
+ return (dhd_pktid_log_handle_t *)NULL;
+ }
+
+ log->items = num_items;
+ log->index = 0;
+
+ return (dhd_pktid_log_handle_t *)log; /* opaque handle */
+}
+
+static void
+dhd_pktid_logging_fini(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle)
+{
+ dhd_pktid_log_t *log;
+ uint32 log_size;
+
+ if (handle == NULL) {
+ DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ log = (dhd_pktid_log_t *)handle;
+ log_size = DHD_PKTID_LOG_SZ(log->items);
+ MFREE(dhd->osh, handle, log_size);
+}
+
+static void
+dhd_pktid_logging(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle, dmaaddr_t pa,
+ uint32 pktid, uint32 len, uint8 pkttype)
+{
+ dhd_pktid_log_t *log;
+ uint32 idx;
+
+ if (handle == NULL) {
+ DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ log = (dhd_pktid_log_t *)handle;
+ idx = log->index;
+ log->map[idx].ts_nsec = OSL_LOCALTIME_NS();
+ log->map[idx].pa = pa;
+ log->map[idx].pktid = pktid;
+ log->map[idx].size = len;
+ log->map[idx].pkttype = pkttype;
+ log->index = (idx + 1) % (log->items); /* update index */
+}
+
+void
+dhd_pktid_logging_dump(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ dhd_pktid_log_t *map_log, *unmap_log;
+ uint64 ts_sec, ts_usec;
+
+ if (prot == NULL) {
+ DHD_ERROR(("%s: prot is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ map_log = (dhd_pktid_log_t *)(prot->pktid_dma_map);
+ unmap_log = (dhd_pktid_log_t *)(prot->pktid_dma_unmap);
+ OSL_GET_LOCALTIME(&ts_sec, &ts_usec);
+ if (map_log && unmap_log) {
+ DHD_ERROR(("%s: map_idx=%d unmap_idx=%d "
+ "current time=[%5lu.%06lu]\n", __FUNCTION__,
+ map_log->index, unmap_log->index,
+ (unsigned long)ts_sec, (unsigned long)ts_usec));
+ DHD_ERROR(("%s: pktid_map_log(pa)=0x%llx size=%d, "
+ "pktid_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__,
+ (uint64)__virt_to_phys((ulong)(map_log->map)),
+ (uint32)(DHD_PKTID_LOG_ITEM_SZ * map_log->items),
+ (uint64)__virt_to_phys((ulong)(unmap_log->map)),
+ (uint32)(DHD_PKTID_LOG_ITEM_SZ * unmap_log->items)));
+ }
+}
+#endif /* DHD_MAP_PKTID_LOGGING */
+
+/* +----------------- End of DHD_MAP_PKTID_LOGGING -----------------------+ */
+
+/*
+ * +---------------------------------------------------------------------------+
* PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
* Main purpose is to save memory on the dongle, has other purposes as well.
* The packet id map, also includes storage for some packet parameters that
@@ -931,14 +1646,15 @@
* +---------------------------------------------------------------------------+
*/
#define DHD_PCIE_PKTID
-#define MAX_PKTID_ITEMS (3072 * 2) /* Maximum number of pktids supported */
+#define MAX_CTRL_PKTID (1024) /* Maximum number of pktids supported */
+#define MAX_RX_PKTID (1024)
+#define MAX_TX_PKTID (3072 * 12)
/* On Router, the pktptr serves as a pktid. */
-
#if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID)
#error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC"
-#endif
+#endif // endif
/* Enum for marking the buffer color based on usage */
typedef enum dhd_pkttype {
@@ -946,47 +1662,68 @@
PKTTYPE_DATA_RX,
PKTTYPE_IOCTL_RX,
PKTTYPE_EVENT_RX,
+ PKTTYPE_INFO_RX,
/* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */
- PKTTYPE_NO_CHECK
+ PKTTYPE_NO_CHECK,
+ PKTTYPE_TSBUF_RX
} dhd_pkttype_t;
-#define DHD_PKTID_INVALID (0U)
-#define DHD_IOCTL_REQ_PKTID (0xFFFE)
-#define DHD_FAKE_PKTID (0xFACE)
+#define DHD_PKTID_MIN_AVAIL_COUNT 512U
+#define DHD_PKTID_DEPLETED_MAX_COUNT (DHD_PKTID_MIN_AVAIL_COUNT * 2U)
+#define DHD_PKTID_INVALID (0U)
+#define DHD_IOCTL_REQ_PKTID (0xFFFE)
+#define DHD_FAKE_PKTID (0xFACE)
+#define DHD_H2D_DBGRING_REQ_PKTID 0xFFFD
+#define DHD_D2H_DBGRING_REQ_PKTID 0xFFFC
+#define DHD_H2D_HOSTTS_REQ_PKTID 0xFFFB
+#define DHD_H2D_BTLOGRING_REQ_PKTID 0xFFFA
+#define DHD_D2H_BTLOGRING_REQ_PKTID 0xFFF9
+#define DHD_H2D_SNAPSHOT_UPLOAD_REQ_PKTID 0xFFF8
+#ifdef DHD_HP2P
+#define DHD_D2H_HPPRING_TXREQ_PKTID 0xFFF7
+#define DHD_D2H_HPPRING_RXREQ_PKTID 0xFFF6
+#endif /* DHD_HP2P */
-#define DHD_PKTID_FREE_LOCKER (FALSE)
-#define DHD_PKTID_RSV_LOCKER (TRUE)
+#define IS_FLOWRING(ring) \
+ ((strncmp(ring->name, "h2dflr", sizeof("h2dflr"))) == (0))
typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */
/* Construct a packet id mapping table, returning an opaque map handle */
-static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index);
+static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items);
/* Destroy a packet id mapping table, freeing all packets active in the table */
static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map);
-#define PKTID_MAP_HANDLE (0)
-#define PKTID_MAP_HANDLE_IOCTL (1)
-
-#define DHD_NATIVE_TO_PKTID_INIT(dhd, items, index) dhd_pktid_map_init((dhd), (items), (index))
+#define DHD_NATIVE_TO_PKTID_INIT(dhd, items) dhd_pktid_map_init((dhd), (items))
+#define DHD_NATIVE_TO_PKTID_RESET(dhd, map) dhd_pktid_map_reset((dhd), (map))
#define DHD_NATIVE_TO_PKTID_FINI(dhd, map) dhd_pktid_map_fini((dhd), (map))
+#define DHD_NATIVE_TO_PKTID_FINI_IOCTL(osh, map) dhd_pktid_map_fini_ioctl((osh), (map))
+
+#ifdef MACOSX_DHD
+#undef DHD_PCIE_PKTID
+#define DHD_PCIE_PKTID 1
+#endif /* MACOSX_DHD */
#if defined(DHD_PCIE_PKTID)
-
+#if defined(MACOSX_DHD)
+#define IOCTLRESP_USE_CONSTMEM
+static void free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
+static int alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
+#endif // endif
/* Determine number of pktids that are available */
static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle);
/* Allocate a unique pktid against which a pkt and some metadata is saved */
static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
- void *pkt);
+ void *pkt, dhd_pkttype_t pkttype);
static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma,
void *dmah, void *secdma, dhd_pkttype_t pkttype);
static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
void *pkt, dmaaddr_t pa, uint32 len, uint8 dma,
void *dmah, void *secdma, dhd_pkttype_t pkttype);
-
/* Return an allocated pktid, retrieving previously saved pkt and metadata */
static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah,
@@ -1001,11 +1738,6 @@
* CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined,
* either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected.
*/
-#ifndef DHD_PKTID_AUDIT_ENABLED
-#define DHD_PKTID_AUDIT_ENABLED 1
-#endif /* DHD_PKTID_AUDIT_ENABLED */
-
-
#if defined(DHD_PKTID_AUDIT_ENABLED)
#define USE_DHD_PKTID_AUDIT_LOCK 1
/* Audit the pktidmap allocator */
@@ -1023,6 +1755,13 @@
#define DHD_TEST_IS_ALLOC 3
#define DHD_TEST_IS_FREE 4
+typedef enum dhd_pktid_map_type {
+ DHD_PKTID_MAP_TYPE_CTRL = 1,
+ DHD_PKTID_MAP_TYPE_TX,
+ DHD_PKTID_MAP_TYPE_RX,
+ DHD_PKTID_MAP_TYPE_UNKNOWN
+} dhd_pktid_map_type_t;
+
#ifdef USE_DHD_PKTID_AUDIT_LOCK
#define DHD_PKTID_AUDIT_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
#define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
@@ -1037,12 +1776,12 @@
#endif /* DHD_PKTID_AUDIT_ENABLED */
-/* #define USE_DHD_PKTID_LOCK 1 */
+#define USE_DHD_PKTID_LOCK 1
#ifdef USE_DHD_PKTID_LOCK
#define DHD_PKTID_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
#define DHD_PKTID_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
-#define DHD_PKTID_LOCK(lock) dhd_os_spin_lock(lock)
+#define DHD_PKTID_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
#define DHD_PKTID_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
#else
#define DHD_PKTID_LOCK_INIT(osh) (void *)(1)
@@ -1059,37 +1798,26 @@
} while (0)
#endif /* !USE_DHD_PKTID_LOCK */
-/* Packet metadata saved in packet id mapper */
-
-/* The Locker can be 3 states
- * LOCKER_IS_FREE - Locker is free and can be allocated
- * LOCKER_IS_BUSY - Locker is assigned and is being used, values in the
- * locker (buffer address, len, phy addr etc) are populated
- * with valid values
- * LOCKER_IS_RSVD - The locker is reserved for future use, but the values
- * in the locker are not valid. Especially pkt should be
- * NULL in this state. When the user wants to re-use the
- * locker dhd_pktid_map_free can be called with a flag
- * to reserve the pktid for future use, which will clear
- * the contents of the locker. When the user calls
- * dhd_pktid_map_save the locker would move to LOCKER_IS_BUSY
- */
typedef enum dhd_locker_state {
LOCKER_IS_FREE,
LOCKER_IS_BUSY,
LOCKER_IS_RSVD
} dhd_locker_state_t;
+/* Packet metadata saved in packet id mapper */
+
typedef struct dhd_pktid_item {
dhd_locker_state_t state; /* tag a locker to be free, busy or reserved */
- uint8 dir; /* dma map direction (Tx=flush or Rx=invalidate) */
- dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */
- uint16 len; /* length of mapped packet's buffer */
- void *pkt; /* opaque native pointer to a packet */
- dmaaddr_t pa; /* physical address of mapped packet's buffer */
- void *dmah; /* handle to OS specific DMA map */
- void *secdma;
+ uint8 dir; /* dma map direction (Tx=flush or Rx=invalidate) */
+ dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */
+ uint16 len; /* length of mapped packet's buffer */
+ void *pkt; /* opaque native pointer to a packet */
+ dmaaddr_t pa; /* physical address of mapped packet's buffer */
+ void *dmah; /* handle to OS specific DMA map */
+ void *secdma;
} dhd_pktid_item_t;
+
+typedef uint32 dhd_pktid_key_t;
typedef struct dhd_pktid_map {
uint32 items; /* total items in map */
@@ -1099,11 +1827,10 @@
void *pktid_lock; /* Used when USE_DHD_PKTID_LOCK is defined */
#if defined(DHD_PKTID_AUDIT_ENABLED)
- void *pktid_audit_lock;
+ void *pktid_audit_lock;
struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */
#endif /* DHD_PKTID_AUDIT_ENABLED */
-
- uint32 keys[MAX_PKTID_ITEMS + 1]; /* stack of unique pkt ids */
+ dhd_pktid_key_t *keys; /* map_items +1 unique pkt ids */
dhd_pktid_item_t lockers[0]; /* metadata storage */
} dhd_pktid_map_t;
@@ -1116,65 +1843,78 @@
* Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID.
*/
+#define DHD_PKTID_FREE_LOCKER (FALSE)
+#define DHD_PKTID_RSV_LOCKER (TRUE)
+
#define DHD_PKTID_ITEM_SZ (sizeof(dhd_pktid_item_t))
#define DHD_PKIDMAP_ITEMS(items) (items)
#define DHD_PKTID_MAP_SZ(items) (sizeof(dhd_pktid_map_t) + \
- (DHD_PKTID_ITEM_SZ * ((items) + 1)))
+ (DHD_PKTID_ITEM_SZ * ((items) + 1)))
+#define DHD_PKTIDMAP_KEYS_SZ(items) (sizeof(dhd_pktid_key_t) * ((items) + 1))
-#define DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, map) dhd_pktid_map_fini_ioctl((dhd), (map))
+#define DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, map) dhd_pktid_map_reset_ioctl((dhd), (map))
/* Convert a packet to a pktid, and save pkt pointer in busy locker */
-#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt) dhd_pktid_map_reserve((dhd), (map), (pkt))
-
+#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype) \
+ dhd_pktid_map_reserve((dhd), (map), (pkt), (pkttype))
/* Reuse a previously reserved locker to save packet params */
#define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \
dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \
- (uint8)(dir), (void *)(dmah), (void *)(secdma), \
- (dhd_pkttype_t)(pkttype))
-
+ (uint8)(dir), (void *)(dmah), (void *)(secdma), \
+ (dhd_pkttype_t)(pkttype))
/* Convert a packet to a pktid, and save packet params in locker */
#define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \
dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \
- (uint8)(dir), (void *)(dmah), (void *)(secdma), \
- (dhd_pkttype_t)(pkttype))
+ (uint8)(dir), (void *)(dmah), (void *)(secdma), \
+ (dhd_pkttype_t)(pkttype))
/* Convert pktid to a packet, and free the locker */
#define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
- (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
- (void **) &secdma, (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER)
+ (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
+ (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER)
/* Convert the pktid to a packet, empty locker, but keep it reserved */
#define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
- (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
- (void **) &secdma, (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER)
+ (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
+ (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER)
#define DHD_PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map)
#if defined(DHD_PKTID_AUDIT_ENABLED)
-static int dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
- const int test_for, const char *errmsg);
+static int
+dhd_get_pktid_map_type(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int pktid_map_type;
-/* Call back into OS layer to take the dongle dump and panic */
-#ifdef DHD_DEBUG_PAGEALLOC
-extern void dhd_pktid_audit_fail_cb(dhd_pub_t *dhdp);
-#endif /* DHD_DEBUG_PAGEALLOC */
+ if (pktid_map == prot->pktid_ctrl_map) {
+ pktid_map_type = DHD_PKTID_MAP_TYPE_CTRL;
+ } else if (pktid_map == prot->pktid_tx_map) {
+ pktid_map_type = DHD_PKTID_MAP_TYPE_TX;
+ } else if (pktid_map == prot->pktid_rx_map) {
+ pktid_map_type = DHD_PKTID_MAP_TYPE_RX;
+ } else {
+ pktid_map_type = DHD_PKTID_MAP_TYPE_UNKNOWN;
+ }
+
+ return pktid_map_type;
+}
/**
-* dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
+* __dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
*/
static int
-dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
+__dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
const int test_for, const char *errmsg)
{
#define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: "
-
- const uint32 max_pktid_items = (MAX_PKTID_ITEMS);
struct bcm_mwbmap *handle;
uint32 flags;
bool ignore_audit;
+ int error = BCME_OK;
if (pktid_map == (dhd_pktid_map_t *)NULL) {
DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg));
@@ -1186,21 +1926,19 @@
handle = pktid_map->pktid_audit;
if (handle == (struct bcm_mwbmap *)NULL) {
DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg));
- DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
- return BCME_OK;
+ goto out;
}
/* Exclude special pktids from audit */
ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID);
if (ignore_audit) {
- DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
- return BCME_OK;
+ goto out;
}
- if ((pktid == DHD_PKTID_INVALID) || (pktid > max_pktid_items)) {
+ if ((pktid == DHD_PKTID_INVALID) || (pktid > pktid_map->items)) {
DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid));
- /* lock is released in "error" */
- goto error;
+ error = BCME_ERROR;
+ goto out;
}
/* Perform audit */
@@ -1209,25 +1947,27 @@
if (!bcm_mwbmap_isfree(handle, pktid)) {
DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n",
errmsg, pktid));
- goto error;
+ error = BCME_ERROR;
+ } else {
+ bcm_mwbmap_force(handle, pktid);
}
- bcm_mwbmap_force(handle, pktid);
break;
case DHD_DUPLICATE_FREE:
if (bcm_mwbmap_isfree(handle, pktid)) {
DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n",
errmsg, pktid));
- goto error;
+ error = BCME_ERROR;
+ } else {
+ bcm_mwbmap_free(handle, pktid);
}
- bcm_mwbmap_free(handle, pktid);
break;
case DHD_TEST_IS_ALLOC:
if (bcm_mwbmap_isfree(handle, pktid)) {
DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n",
errmsg, pktid));
- goto error;
+ error = BCME_ERROR;
}
break;
@@ -1235,42 +1975,74 @@
if (!bcm_mwbmap_isfree(handle, pktid)) {
DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free",
errmsg, pktid));
- goto error;
+ error = BCME_ERROR;
}
break;
default:
- goto error;
+ DHD_ERROR(("%s: Invalid test case: %d\n", __FUNCTION__, test_for));
+ error = BCME_ERROR;
+ break;
}
+out:
DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
- return BCME_OK;
-error:
+ if (error != BCME_OK) {
+ dhd->pktid_audit_failed = TRUE;
+ }
- DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
- /* May insert any trap mechanism here ! */
-#ifdef DHD_DEBUG_PAGEALLOC
- dhd_pktid_audit_fail_cb(dhd);
-#else
- ASSERT(0);
-#endif /* DHD_DEBUG_PAGEALLOC */
- return BCME_ERROR;
+ return error;
+}
+
+static int
+dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
+ const int test_for, const char *errmsg)
+{
+ int ret = BCME_OK;
+ ret = __dhd_pktid_audit(dhd, pktid_map, pktid, test_for, errmsg);
+ if (ret == BCME_ERROR) {
+ DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
+ __FUNCTION__, pktid, dhd_get_pktid_map_type(dhd, pktid_map)));
+ dhd_pktid_error_handler(dhd);
+ }
+
+ return ret;
}
#define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \
dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__)
+static int
+dhd_pktid_audit_ring_debug(dhd_pub_t *dhdp, dhd_pktid_map_t *map, uint32 pktid,
+ const int test_for, void *msg, uint32 msg_len, const char *func)
+{
+ int ret = BCME_OK;
+
+ if (dhd_query_bus_erros(dhdp)) {
+ return BCME_ERROR;
+ }
+
+ ret = __dhd_pktid_audit(dhdp, map, pktid, test_for, func);
+ if (ret == BCME_ERROR) {
+ DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
+ __FUNCTION__, pktid, dhd_get_pktid_map_type(dhdp, map)));
+ prhex(func, (uchar *)msg, msg_len);
+ dhd_pktid_error_handler(dhdp);
+ }
+ return ret;
+}
+#define DHD_PKTID_AUDIT_RING_DEBUG(dhdp, map, pktid, test_for, msg, msg_len) \
+ dhd_pktid_audit_ring_debug((dhdp), (dhd_pktid_map_t *)(map), \
+ (pktid), (test_for), msg, msg_len, __FUNCTION__)
+
#endif /* DHD_PKTID_AUDIT_ENABLED */
-
-/* +------------------ End of PCIE DHD PKTID AUDIT ------------------------+ */
-
/**
* +---------------------------------------------------------------------------+
* Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
*
- * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_PKTID_ITEMS].
+ * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_xxx_PKTID].
*
* dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique
* packet id is returned. This unique packet id may be used to retrieve the
@@ -1289,40 +2061,31 @@
/** Allocate and initialize a mapper of num_items <numbered_key, locker> */
static dhd_pktid_map_handle_t *
-dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index)
+dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
{
- void *osh;
+ void* osh;
uint32 nkey;
dhd_pktid_map_t *map;
uint32 dhd_pktid_map_sz;
uint32 map_items;
-#ifdef DHD_USE_STATIC_PKTIDMAP
- uint32 section;
-#endif /* DHD_USE_STATIC_PKTIDMAP */
+ uint32 map_keys_sz;
osh = dhd->osh;
- ASSERT((num_items >= 1) && (num_items <= MAX_PKTID_ITEMS));
dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items);
-#ifdef DHD_USE_STATIC_PKTIDMAP
- if (index == PKTID_MAP_HANDLE) {
- section = DHD_PREALLOC_PKTID_MAP;
- } else {
- section = DHD_PREALLOC_PKTID_MAP_IOCTL;
- }
-
- map = (dhd_pktid_map_t *)DHD_OS_PREALLOC(dhd, section, dhd_pktid_map_sz);
-#else
- map = (dhd_pktid_map_t *)MALLOC(osh, dhd_pktid_map_sz);
-#endif /* DHD_USE_STATIC_PKTIDMAP */
-
+ map = (dhd_pktid_map_t *)VMALLOCZ(osh, dhd_pktid_map_sz);
if (map == NULL) {
DHD_ERROR(("%s:%d: MALLOC failed for size %d\n",
__FUNCTION__, __LINE__, dhd_pktid_map_sz));
- goto error;
+ return (dhd_pktid_map_handle_t *)NULL;
}
- bzero(map, dhd_pktid_map_sz);
+ map->items = num_items;
+ map->avail = num_items;
+
+ map_items = DHD_PKIDMAP_ITEMS(map->items);
+
+ map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
/* Initialize the lock that protects this structure */
map->pktid_lock = DHD_PKTID_LOCK_INIT(osh);
@@ -1331,24 +2094,24 @@
goto error;
}
- map->items = num_items;
- map->avail = num_items;
-
- map_items = DHD_PKIDMAP_ITEMS(map->items);
-
-#if defined(DHD_PKTID_AUDIT_ENABLED)
- /* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */
- map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1);
- if (map->pktid_audit == (struct bcm_mwbmap *)NULL) {
- DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__));
+ map->keys = (dhd_pktid_key_t *)MALLOC(osh, map_keys_sz);
+ if (map->keys == NULL) {
+ DHD_ERROR(("%s:%d: MALLOC failed for map->keys size %d\n",
+ __FUNCTION__, __LINE__, map_keys_sz));
goto error;
- } else {
- DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n",
- __FUNCTION__, __LINE__, map_items + 1));
}
- map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh);
-
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+ /* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */
+ map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1);
+ if (map->pktid_audit == (struct bcm_mwbmap *)NULL) {
+ DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__));
+ goto error;
+ } else {
+ DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n",
+ __FUNCTION__, __LINE__, map_items + 1));
+ }
+ map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh);
#endif /* DHD_PKTID_AUDIT_ENABLED */
for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */
@@ -1358,8 +2121,8 @@
map->lockers[nkey].len = 0;
}
- /* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be busy */
- map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY;
+ /* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be inuse */
+ map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY; /* tag locker #0 as inuse */
map->lockers[DHD_PKTID_INVALID].pkt = NULL; /* bzero: redundant */
map->lockers[DHD_PKTID_INVALID].len = 0;
@@ -1371,9 +2134,7 @@
return (dhd_pktid_map_handle_t *)map; /* opaque handle */
error:
-
if (map) {
-
#if defined(DHD_PKTID_AUDIT_ENABLED)
if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
@@ -1383,12 +2144,16 @@
}
#endif /* DHD_PKTID_AUDIT_ENABLED */
- if (map->pktid_lock)
+ if (map->keys) {
+ MFREE(osh, map->keys, map_keys_sz);
+ }
+
+ if (map->pktid_lock) {
DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
+ }
- MFREE(osh, map, dhd_pktid_map_sz);
+ VMFREE(osh, map, dhd_pktid_map_sz);
}
-
return (dhd_pktid_map_handle_t *)NULL;
}
@@ -1397,159 +2162,180 @@
* Freeing implies: unmapping the buffers and freeing the native packet
* This could have been a callback registered with the pktid mapper.
*/
-
static void
-dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
+dhd_pktid_map_reset(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
{
void *osh;
uint32 nkey;
dhd_pktid_map_t *map;
- uint32 dhd_pktid_map_sz;
dhd_pktid_item_t *locker;
uint32 map_items;
- uint32 flags;
-
- if (handle == NULL) {
- return;
- }
+ unsigned long flags;
+ bool data_tx = FALSE;
map = (dhd_pktid_map_t *)handle;
- flags = DHD_PKTID_LOCK(map->pktid_lock);
+ DHD_PKTID_LOCK(map->pktid_lock, flags);
osh = dhd->osh;
- dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
-
- nkey = 1; /* skip reserved KEY #0, and start from 1 */
- locker = &map->lockers[nkey];
-
map_items = DHD_PKIDMAP_ITEMS(map->items);
+ /* skip reserved KEY #0, and start from 1 */
- for (; nkey <= map_items; nkey++, locker++) {
-
- if (locker->state == LOCKER_IS_BUSY) { /* numbered key still in use */
-
- locker->state = LOCKER_IS_FREE; /* force open the locker */
-
-#if defined(DHD_PKTID_AUDIT_ENABLED)
- DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
-#endif /* DHD_PKTID_AUDIT_ENABLED */
-
- { /* This could be a callback registered with dhd_pktid_map */
- DMA_UNMAP(osh, locker->pa, locker->len,
- locker->dir, 0, DHD_DMAH_NULL);
- dhd_prot_packet_free(dhd, (ulong*)locker->pkt,
- locker->pkttype, TRUE);
+ for (nkey = 1; nkey <= map_items; nkey++) {
+ if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
+ locker = &map->lockers[nkey];
+ locker->state = LOCKER_IS_FREE;
+ data_tx = (locker->pkttype == PKTTYPE_DATA_TX);
+ if (data_tx) {
+ OSL_ATOMIC_DEC(dhd->osh, &dhd->prot->active_tx_count);
}
+
+#ifdef DHD_PKTID_AUDIT_RING
+ DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
+#endif /* DHD_PKTID_AUDIT_RING */
+#ifdef DHD_MAP_PKTID_LOGGING
+ DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap,
+ locker->pa, nkey, locker->len,
+ locker->pkttype);
+#endif /* DHD_MAP_PKTID_LOGGING */
+
+ {
+ if (SECURE_DMA_ENAB(dhd->osh))
+ SECURE_DMA_UNMAP(osh, locker->pa,
+ locker->len, locker->dir, 0,
+ locker->dmah, locker->secdma, 0);
+ else
+ DMA_UNMAP(osh, locker->pa, locker->len,
+ locker->dir, 0, locker->dmah);
+ }
+ dhd_prot_packet_free(dhd, (ulong*)locker->pkt,
+ locker->pkttype, data_tx);
}
-#if defined(DHD_PKTID_AUDIT_ENABLED)
else {
+#ifdef DHD_PKTID_AUDIT_RING
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
+#endif /* DHD_PKTID_AUDIT_RING */
}
-#endif /* DHD_PKTID_AUDIT_ENABLED */
-
- locker->pkt = NULL; /* clear saved pkt */
- locker->len = 0;
+ map->keys[nkey] = nkey; /* populate with unique keys */
}
-#if defined(DHD_PKTID_AUDIT_ENABLED)
- if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
- bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
- map->pktid_audit = (struct bcm_mwbmap *)NULL;
- if (map->pktid_audit_lock) {
- DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
- }
- }
-#endif /* DHD_PKTID_AUDIT_ENABLED */
-
+ map->avail = map_items;
+ memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
- DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
-
-#ifdef DHD_USE_STATIC_PKTIDMAP
- DHD_OS_PREFREE(dhd, handle, dhd_pktid_map_sz);
-#else
- MFREE(osh, handle, dhd_pktid_map_sz);
-#endif /* DHD_USE_STATIC_PKTIDMAP */
}
#ifdef IOCTLRESP_USE_CONSTMEM
/** Called in detach scenario. Releasing IOCTL buffers. */
static void
-dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
+dhd_pktid_map_reset_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
{
uint32 nkey;
dhd_pktid_map_t *map;
- uint32 dhd_pktid_map_sz;
dhd_pktid_item_t *locker;
uint32 map_items;
- uint32 flags;
- osl_t *osh = dhd->osh;
-
- if (handle == NULL) {
- return;
- }
+ unsigned long flags;
map = (dhd_pktid_map_t *)handle;
- flags = DHD_PKTID_LOCK(map->pktid_lock);
-
- dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
-
- nkey = 1; /* skip reserved KEY #0, and start from 1 */
- locker = &map->lockers[nkey];
+ DHD_PKTID_LOCK(map->pktid_lock, flags);
map_items = DHD_PKIDMAP_ITEMS(map->items);
+ /* skip reserved KEY #0, and start from 1 */
+ for (nkey = 1; nkey <= map_items; nkey++) {
+ if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
+ dhd_dma_buf_t retbuf;
- for (; nkey <= map_items; nkey++, locker++) {
-
- if (locker->state == LOCKER_IS_BUSY) { /* numbered key still in use */
-
- locker->state = LOCKER_IS_FREE; /* force open the locker */
-
-#if defined(DHD_PKTID_AUDIT_ENABLED)
+#ifdef DHD_PKTID_AUDIT_RING
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
-#endif /* DHD_PKTID_AUDIT_ENABLED */
+#endif /* DHD_PKTID_AUDIT_RING */
- {
- dhd_dma_buf_t retbuf;
- retbuf.va = locker->pkt;
- retbuf.len = locker->len;
- retbuf.pa = locker->pa;
- retbuf.dmah = locker->dmah;
- retbuf.secdma = locker->secdma;
+ locker = &map->lockers[nkey];
+ retbuf.va = locker->pkt;
+ retbuf.len = locker->len;
+ retbuf.pa = locker->pa;
+ retbuf.dmah = locker->dmah;
+ retbuf.secdma = locker->secdma;
- /* This could be a callback registered with dhd_pktid_map */
- DHD_PKTID_UNLOCK(map->pktid_lock, flags);
- free_ioctl_return_buffer(dhd, &retbuf);
- flags = DHD_PKTID_LOCK(map->pktid_lock);
- }
+ free_ioctl_return_buffer(dhd, &retbuf);
}
-#if defined(DHD_PKTID_AUDIT_ENABLED)
else {
+#ifdef DHD_PKTID_AUDIT_RING
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
+#endif /* DHD_PKTID_AUDIT_RING */
}
-#endif /* DHD_PKTID_AUDIT_ENABLED */
-
- locker->pkt = NULL; /* clear saved pkt */
- locker->len = 0;
+ map->keys[nkey] = nkey; /* populate with unique keys */
}
+
+ map->avail = map_items;
+ memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+}
+#endif /* IOCTLRESP_USE_CONSTMEM */
+
+/**
+ * Free the pktid map.
+ */
+static void
+dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
+{
+ dhd_pktid_map_t *map;
+ uint32 dhd_pktid_map_sz;
+ uint32 map_keys_sz;
+
+ if (handle == NULL)
+ return;
+
+ /* Free any pending packets */
+ dhd_pktid_map_reset(dhd, handle);
+
+ map = (dhd_pktid_map_t *)handle;
+ dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
+ map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
+
+ DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
#if defined(DHD_PKTID_AUDIT_ENABLED)
if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
- bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
+ bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
map->pktid_audit = (struct bcm_mwbmap *)NULL;
if (map->pktid_audit_lock) {
- DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
+ DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
+ }
+ }
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+ MFREE(dhd->osh, map->keys, map_keys_sz);
+ VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
+}
+#ifdef IOCTLRESP_USE_CONSTMEM
+static void
+dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
+{
+ dhd_pktid_map_t *map;
+ uint32 dhd_pktid_map_sz;
+ uint32 map_keys_sz;
+
+ if (handle == NULL)
+ return;
+
+ /* Free any pending packets */
+ dhd_pktid_map_reset_ioctl(dhd, handle);
+
+ map = (dhd_pktid_map_t *)handle;
+ dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
+ map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
+
+ DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
+
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+ if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
+ bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
+ map->pktid_audit = (struct bcm_mwbmap *)NULL;
+ if (map->pktid_audit_lock) {
+ DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
}
}
#endif /* DHD_PKTID_AUDIT_ENABLED */
- DHD_PKTID_UNLOCK(map->pktid_lock, flags);
- DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
-
-#ifdef DHD_USE_STATIC_PKTIDMAP
- DHD_OS_PREFREE(dhd, handle, dhd_pktid_map_sz);
-#else
- MFREE(osh, handle, dhd_pktid_map_sz);
-#endif /* DHD_USE_STATIC_PKTIDMAP */
+ MFREE(dhd->osh, map->keys, map_keys_sz);
+ VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
}
#endif /* IOCTLRESP_USE_CONSTMEM */
@@ -1558,13 +2344,13 @@
dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle)
{
dhd_pktid_map_t *map;
- uint32 flags;
uint32 avail;
+ unsigned long flags;
ASSERT(handle != NULL);
map = (dhd_pktid_map_t *)handle;
- flags = DHD_PKTID_LOCK(map->pktid_lock);
+ DHD_PKTID_LOCK(map->pktid_lock, flags);
avail = map->avail;
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
@@ -1572,88 +2358,97 @@
}
/**
- * Allocate locker, save pkt contents, and return the locker's numbered key.
- * dhd_pktid_map_alloc() is not reentrant, and is the caller's responsibility.
- * Caller must treat a returned value DHD_PKTID_INVALID as a failure case,
- * implying a depleted pool of pktids.
+ * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not
+ * yet populated. Invoke the pktid save api to populate the packet parameters
+ * into the locker. This function is not reentrant, and is the caller's
+ * responsibility. Caller must treat a returned value DHD_PKTID_INVALID as
+ * a failure case, implying a depleted pool of pktids.
*/
-
static INLINE uint32
-__dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt)
+dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
+ void *pkt, dhd_pkttype_t pkttype)
{
uint32 nkey;
dhd_pktid_map_t *map;
dhd_pktid_item_t *locker;
+ unsigned long flags;
ASSERT(handle != NULL);
map = (dhd_pktid_map_t *)handle;
- if (map->avail <= 0) { /* no more pktids to allocate */
+ DHD_PKTID_LOCK(map->pktid_lock, flags);
+
+ if ((int)(map->avail) <= 0) { /* no more pktids to allocate */
map->failures++;
DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__));
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
return DHD_PKTID_INVALID; /* failed alloc request */
}
ASSERT(map->avail <= map->items);
nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */
+
+ if ((map->avail > map->items) || (nkey > map->items)) {
+ map->failures++;
+ DHD_ERROR(("%s:%d: failed to allocate a new pktid,"
+ " map->avail<%u>, nkey<%u>, pkttype<%u>\n",
+ __FUNCTION__, __LINE__, map->avail, nkey,
+ pkttype));
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+ return DHD_PKTID_INVALID; /* failed alloc request */
+ }
+
locker = &map->lockers[nkey]; /* save packet metadata in locker */
map->avail--;
locker->pkt = pkt; /* pkt is saved, other params not yet saved. */
locker->len = 0;
locker->state = LOCKER_IS_BUSY; /* reserve this locker */
-#if defined(DHD_PKTID_AUDIT_MAP)
- DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_ALLOC); /* Audit duplicate alloc */
-#endif /* DHD_PKTID_AUDIT_MAP */
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
ASSERT(nkey != DHD_PKTID_INVALID);
+
return nkey; /* return locker's numbered key */
}
-
-/**
- * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not
- * yet populated. Invoke the pktid save api to populate the packet parameters
- * into the locker.
- * Wrapper that takes the required lock when called directly.
+/*
+ * dhd_pktid_map_save - Save a packet's parameters into a locker
+ * corresponding to a previously reserved unique numbered key.
*/
-static INLINE uint32
-dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt)
-{
- dhd_pktid_map_t *map;
- uint32 flags;
- uint32 ret;
-
- ASSERT(handle != NULL);
- map = (dhd_pktid_map_t *)handle;
- flags = DHD_PKTID_LOCK(map->pktid_lock);
- ret = __dhd_pktid_map_reserve(dhd, handle, pkt);
- DHD_PKTID_UNLOCK(map->pktid_lock, flags);
-
- return ret;
-}
-
static INLINE void
-__dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
+dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
dhd_pkttype_t pkttype)
{
dhd_pktid_map_t *map;
dhd_pktid_item_t *locker;
+ unsigned long flags;
ASSERT(handle != NULL);
map = (dhd_pktid_map_t *)handle;
- ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= DHD_PKIDMAP_ITEMS(map->items)));
+ DHD_PKTID_LOCK(map->pktid_lock, flags);
+
+ if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
+ DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u> pkttype<%u>\n",
+ __FUNCTION__, __LINE__, nkey, pkttype));
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* collect core dump */
+ dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
+ dhd_bus_mem_dump(dhd);
+ }
+#else
+ ASSERT(0);
+#endif /* DHD_FW_COREDUMP */
+ return;
+ }
locker = &map->lockers[nkey];
ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) ||
((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL)));
-
-#if defined(DHD_PKTID_AUDIT_MAP)
- DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_ALLOC); /* apriori, reservation */
-#endif /* DHD_PKTID_AUDIT_MAP */
/* store contents in locker */
locker->dir = dir;
@@ -1664,26 +2459,9 @@
locker->pkttype = pkttype;
locker->pkt = pkt;
locker->state = LOCKER_IS_BUSY; /* make this locker busy */
-}
-
-/**
- * dhd_pktid_map_save - Save a packet's parameters into a locker corresponding
- * to a previously reserved unique numbered key.
- * Wrapper that takes the required lock when called directly.
- */
-static INLINE void
-dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
- uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
- dhd_pkttype_t pkttype)
-{
- dhd_pktid_map_t *map;
- uint32 flags;
-
- ASSERT(handle != NULL);
- map = (dhd_pktid_map_t *)handle;
- flags = DHD_PKTID_LOCK(map->pktid_lock);
- __dhd_pktid_map_save(dhd, handle, pkt, nkey, pa, len,
- dir, dmah, secdma, pkttype);
+#ifdef DHD_MAP_PKTID_LOGGING
+ DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_map, pa, nkey, len, pkttype);
+#endif /* DHD_MAP_PKTID_LOGGING */
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
}
@@ -1697,30 +2475,12 @@
dhd_pkttype_t pkttype)
{
uint32 nkey;
- uint32 flags;
- dhd_pktid_map_t *map;
- ASSERT(handle != NULL);
- map = (dhd_pktid_map_t *)handle;
-
- flags = DHD_PKTID_LOCK(map->pktid_lock);
-
- nkey = __dhd_pktid_map_reserve(dhd, handle, pkt);
+ nkey = dhd_pktid_map_reserve(dhd, handle, pkt, pkttype);
if (nkey != DHD_PKTID_INVALID) {
- __dhd_pktid_map_save(dhd, handle, pkt, nkey, pa,
+ dhd_pktid_map_save(dhd, handle, pkt, nkey, pa,
len, dir, dmah, secdma, pkttype);
-#if defined(DHD_PKTID_AUDIT_MAP)
- DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_ALLOC); /* apriori, reservation */
-#endif /* DHD_PKTID_AUDIT_MAP */
}
-
-#ifdef CUSTOMER_HW_31_2
- /* Need to do the flush at buffer allocation time */
- DHD_TRACE(("%s: flush buffer 0x%x len %d\n", __FUNCTION__,
- PKTDATA(dhd->osh, pkt), PKTLEN(dhd->osh, pkt)));
- OSL_CACHE_FLUSH(PKTDATA(dhd->osh, pkt), PKTLEN(dhd->osh, pkt));
-#endif
- DHD_PKTID_UNLOCK(map->pktid_lock, flags);
return nkey;
}
@@ -1733,21 +2493,36 @@
*/
static void * BCMFASTPATH
dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey,
- dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma,
- dhd_pkttype_t pkttype, bool rsv_locker)
+ dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma, dhd_pkttype_t pkttype,
+ bool rsv_locker)
{
dhd_pktid_map_t *map;
dhd_pktid_item_t *locker;
void * pkt;
- uint32 flags;
+ unsigned long long locker_addr;
+ unsigned long flags;
ASSERT(handle != NULL);
map = (dhd_pktid_map_t *)handle;
- flags = DHD_PKTID_LOCK(map->pktid_lock);
+ DHD_PKTID_LOCK(map->pktid_lock, flags);
- ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= DHD_PKIDMAP_ITEMS(map->items)));
+ if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
+ DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>, pkttype<%d>\n",
+ __FUNCTION__, __LINE__, nkey, pkttype));
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* collect core dump */
+ dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
+ dhd_bus_mem_dump(dhd);
+ }
+#else
+ ASSERT(0);
+#endif /* DHD_FW_COREDUMP */
+ return NULL;
+ }
locker = &map->lockers[nkey];
@@ -1755,12 +2530,20 @@
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */
#endif /* DHD_PKTID_AUDIT_MAP */
- if (locker->state == LOCKER_IS_FREE) { /* Debug check for cloned numbered key */
- DHD_ERROR(("%s:%d: Error! freeing invalid pktid<%u>\n",
- __FUNCTION__, __LINE__, nkey));
- ASSERT(locker->state != LOCKER_IS_FREE);
-
+ /* Debug check for cloned numbered key */
+ if (locker->state == LOCKER_IS_FREE) {
+ DHD_ERROR(("%s:%d: Error! freeing already freed invalid pktid<%u>\n",
+ __FUNCTION__, __LINE__, nkey));
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* collect core dump */
+ dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
+ dhd_bus_mem_dump(dhd);
+ }
+#else
+ ASSERT(0);
+#endif /* DHD_FW_COREDUMP */
return NULL;
}
@@ -1770,12 +2553,27 @@
*/
if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) {
- DHD_PKTID_UNLOCK(map->pktid_lock, flags);
-
DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n",
__FUNCTION__, __LINE__, nkey));
- ASSERT(locker->pkttype == pkttype);
-
+#ifdef BCMDMA64OSL
+ PHYSADDRTOULONG(locker->pa, locker_addr);
+#else
+ locker_addr = PHYSADDRLO(locker->pa);
+#endif /* BCMDMA64OSL */
+ DHD_ERROR(("%s:%d: locker->state <%d>, locker->pkttype <%d>,"
+ "pkttype <%d> locker->pa <0x%llx> \n",
+ __FUNCTION__, __LINE__, locker->state, locker->pkttype,
+ pkttype, locker_addr));
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* collect core dump */
+ dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
+ dhd_bus_mem_dump(dhd);
+ }
+#else
+ ASSERT(0);
+#endif /* DHD_FW_COREDUMP */
return NULL;
}
@@ -1791,6 +2589,10 @@
#if defined(DHD_PKTID_AUDIT_MAP)
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
#endif /* DHD_PKTID_AUDIT_MAP */
+#ifdef DHD_MAP_PKTID_LOGGING
+ DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap, locker->pa, nkey,
+ (uint32)locker->len, pkttype);
+#endif /* DHD_MAP_PKTID_LOGGING */
*pa = locker->pa; /* return contents of locker */
*len = (uint32)locker->len;
@@ -1801,17 +2603,12 @@
locker->pkt = NULL; /* Clear pkt */
locker->len = 0;
-#ifdef CUSTOMER_HW_31_2
- /* need to do to ensure all packet are flushed */
- OSL_CACHE_INV(PKTDATA(dhd->osh, pkt), PKTLEN(dhd->osh, pkt));
-#endif
-
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+
return pkt;
}
#else /* ! DHD_PCIE_PKTID */
-
typedef struct pktlist {
PKT_LIST *tx_pkt_list; /* list for tx packets */
@@ -1831,7 +2628,6 @@
#define DHD_PKTID32(pktptr32) ((uint32)(pktptr32))
#define DHD_PKTPTR32(pktid32) ((void *)(pktid32))
-
static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
dhd_pkttype_t pkttype);
@@ -1840,7 +2636,7 @@
dhd_pkttype_t pkttype);
static dhd_pktid_map_handle_t *
-dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index)
+dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
{
osl_t *osh = dhd->osh;
pktlists_t *handle = NULL;
@@ -1897,15 +2693,9 @@
}
static void
-dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map)
+dhd_pktid_map_reset(dhd_pub_t *dhd, pktlists_t *handle)
{
osl_t *osh = dhd->osh;
- pktlists_t *handle = (pktlists_t *) map;
-
- ASSERT(handle != NULL);
- if (handle == (pktlists_t *)NULL) {
- return;
- }
if (handle->ctrl_pkt_list) {
PKTLIST_FINI(handle->ctrl_pkt_list);
@@ -1921,6 +2711,20 @@
PKTLIST_FINI(handle->tx_pkt_list);
MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
}
+}
+
+static void
+dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map)
+{
+ osl_t *osh = dhd->osh;
+ pktlists_t *handle = (pktlists_t *) map;
+
+ ASSERT(handle != NULL);
+ if (handle == (pktlists_t *)NULL) {
+ return;
+ }
+
+ dhd_pktid_map_reset(dhd, handle);
if (handle) {
MFREE(osh, handle, sizeof(pktlists_t));
@@ -1978,7 +2782,7 @@
return pktptr32;
}
-#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt) DHD_PKTID32(pkt)
+#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype) DHD_PKTID32(pkt)
#define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \
({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \
@@ -2004,7 +2808,6 @@
#endif /* ! DHD_PCIE_PKTID */
/* +------------------ End of PCIE DHD PKTID MAPPER -----------------------+ */
-
/**
* The PCIE FD protocol layer is constructed in two phases:
@@ -2033,6 +2836,11 @@
osl_t *osh = dhd->osh;
dhd_prot_t *prot;
+ /* FW going to DMA extended trap data,
+ * allocate buffer for the maximum extended trap data.
+ */
+ uint32 trap_buf_len = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
+
/* Allocate prot structure */
if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT,
sizeof(dhd_prot_t)))) {
@@ -2047,6 +2855,12 @@
/* DMAing ring completes supported? FALSE by default */
dhd->dma_d2h_ring_upd_support = FALSE;
dhd->dma_h2d_ring_upd_support = FALSE;
+ dhd->dma_ring_upd_overwrite = FALSE;
+
+ dhd->hwa_inited = 0;
+ dhd->idma_inited = 0;
+ dhd->ifrm_inited = 0;
+ dhd->dar_inited = 0;
/* Common Ring Allocations */
@@ -2112,6 +2926,12 @@
goto fail;
}
+ /* Host TS request buffer one buffer for now */
+ if (dhd_dma_buf_alloc(dhd, &prot->hostts_req_buf, CTRLSUB_HOSTTS_MEESAGE_SIZE)) {
+ goto fail;
+ }
+ prot->hostts_req_buf_inuse = FALSE;
+
/* Scratch buffer for dma rx offset */
#ifdef BCM_HOST_BUF
if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf,
@@ -2120,6 +2940,7 @@
if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN)) {
#endif /* BCM_HOST_BUF */
+
goto fail;
}
@@ -2130,50 +2951,245 @@
#ifdef DHD_RX_CHAINING
dhd_rxchain_reset(&prot->rxchain);
-#endif
+#endif // endif
-#if defined(DHD_LB)
+ prot->pktid_ctrl_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_CTRL_PKTID);
+ if (prot->pktid_ctrl_map == NULL) {
+ goto fail;
+ }
+
+ prot->pktid_rx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_RX_PKTID);
+ if (prot->pktid_rx_map == NULL)
+ goto fail;
+
+ prot->pktid_tx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_TX_PKTID);
+ if (prot->pktid_tx_map == NULL)
+ goto fail;
+
+#ifdef IOCTLRESP_USE_CONSTMEM
+ prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd,
+ DHD_FLOWRING_MAX_IOCTLRESPBUF_POST);
+ if (prot->pktid_map_handle_ioctl == NULL) {
+ goto fail;
+ }
+#endif /* IOCTLRESP_USE_CONSTMEM */
+
+#ifdef DHD_MAP_PKTID_LOGGING
+ prot->pktid_dma_map = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
+ if (prot->pktid_dma_map == NULL) {
+ DHD_ERROR(("%s: failed to allocate pktid_dma_map\n",
+ __FUNCTION__));
+ }
+
+ prot->pktid_dma_unmap = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
+ if (prot->pktid_dma_unmap == NULL) {
+ DHD_ERROR(("%s: failed to allocate pktid_dma_unmap\n",
+ __FUNCTION__));
+ }
+#endif /* DHD_MAP_PKTID_LOGGING */
/* Initialize the work queues to be used by the Load Balancing logic */
#if defined(DHD_LB_TXC)
{
void *buffer;
buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
+ if (buffer == NULL) {
+ DHD_ERROR(("%s: failed to allocate RXC work buffer\n", __FUNCTION__));
+ goto fail;
+ }
bcm_workq_init(&prot->tx_compl_prod, &prot->tx_compl_cons,
buffer, DHD_LB_WORKQ_SZ);
prot->tx_compl_prod_sync = 0;
DHD_INFO(("%s: created tx_compl_workq <%p,%d>\n",
__FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
- }
+ }
#endif /* DHD_LB_TXC */
#if defined(DHD_LB_RXC)
- {
+ {
void *buffer;
- buffer = MALLOC(dhd->osh, sizeof(uint32) * DHD_LB_WORKQ_SZ);
+ buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
+ if (buffer == NULL) {
+ DHD_ERROR(("%s: failed to allocate RXC work buffer\n", __FUNCTION__));
+ goto fail;
+ }
bcm_workq_init(&prot->rx_compl_prod, &prot->rx_compl_cons,
buffer, DHD_LB_WORKQ_SZ);
prot->rx_compl_prod_sync = 0;
DHD_INFO(("%s: created rx_compl_workq <%p,%d>\n",
__FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
- }
+ }
#endif /* DHD_LB_RXC */
-#endif /* DHD_LB */
+ /* Initialize trap buffer */
+ if (dhd_dma_buf_alloc(dhd, &dhd->prot->fw_trap_buf, trap_buf_len)) {
+ DHD_ERROR(("%s: dhd_init_trap_buffer falied\n", __FUNCTION__));
+ goto fail;
+ }
return BCME_OK;
fail:
-#ifndef CONFIG_DHD_USE_STATIC_BUF
- if (prot != NULL) {
+ if (prot) {
+ /* Free up all allocated memories */
dhd_prot_detach(dhd);
}
-#endif /* CONFIG_DHD_USE_STATIC_BUF */
return BCME_NOMEM;
} /* dhd_prot_attach */
+static int
+dhd_alloc_host_scbs(dhd_pub_t *dhd)
+{
+ int ret = BCME_OK;
+ sh_addr_t base_addr;
+ dhd_prot_t *prot = dhd->prot;
+ uint32 host_scb_size = 0;
+
+ if (dhd->hscb_enable) {
+ /* read number of bytes to allocate from F/W */
+ dhd_bus_cmn_readshared(dhd->bus, &host_scb_size, HOST_SCB_ADDR, 0);
+ if (host_scb_size) {
+ /* alloc array of host scbs */
+ ret = dhd_dma_buf_alloc(dhd, &prot->host_scb_buf, host_scb_size);
+ /* write host scb address to F/W */
+ if (ret == BCME_OK) {
+ dhd_base_addr_htolpa(&base_addr, prot->host_scb_buf.pa);
+ dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
+ HOST_SCB_ADDR, 0);
+ } else {
+ DHD_TRACE(("dhd_alloc_host_scbs: dhd_dma_buf_alloc error\n"));
+ }
+ } else {
+ DHD_TRACE(("dhd_alloc_host_scbs: host_scb_size is 0.\n"));
+ }
+ } else {
+ DHD_TRACE(("dhd_alloc_host_scbs: Host scb not supported in F/W.\n"));
+ }
+
+ return ret;
+}
+
+void
+dhd_set_host_cap(dhd_pub_t *dhd)
+{
+ uint32 data = 0;
+ dhd_prot_t *prot = dhd->prot;
+
+ if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
+ if (dhd->h2d_phase_supported) {
+ data |= HOSTCAP_H2D_VALID_PHASE;
+ if (dhd->force_dongletrap_on_bad_h2d_phase)
+ data |= HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE;
+ }
+ if (prot->host_ipc_version > prot->device_ipc_version)
+ prot->active_ipc_version = prot->device_ipc_version;
+ else
+ prot->active_ipc_version = prot->host_ipc_version;
+
+ data |= prot->active_ipc_version;
+
+ if (dhdpcie_bus_get_pcie_hostready_supported(dhd->bus)) {
+ DHD_INFO(("Advertise Hostready Capability\n"));
+ data |= HOSTCAP_H2D_ENABLE_HOSTRDY;
+ }
+ {
+ /* Disable DS altogether */
+ data |= HOSTCAP_DS_NO_OOB_DW;
+ dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_NONE);
+ }
+
+ /* Indicate support for extended trap data */
+ data |= HOSTCAP_EXTENDED_TRAP_DATA;
+
+ /* Indicate support for TX status metadata */
+ if (dhd->pcie_txs_metadata_enable != 0)
+ data |= HOSTCAP_TXSTATUS_METADATA;
+
+ /* Enable fast delete ring in firmware if supported */
+ if (dhd->fast_delete_ring_support) {
+ data |= HOSTCAP_FAST_DELETE_RING;
+ }
+
+ if (dhdpcie_bus_get_pcie_hwa_supported(dhd->bus)) {
+ DHD_ERROR(("HWA inited\n"));
+ /* TODO: Is hostcap needed? */
+ dhd->hwa_inited = TRUE;
+ }
+
+ if (dhdpcie_bus_get_pcie_idma_supported(dhd->bus)) {
+ DHD_ERROR(("IDMA inited\n"));
+ data |= HOSTCAP_H2D_IDMA;
+ dhd->idma_inited = TRUE;
+ }
+
+ if (dhdpcie_bus_get_pcie_ifrm_supported(dhd->bus)) {
+ DHD_ERROR(("IFRM Inited\n"));
+ data |= HOSTCAP_H2D_IFRM;
+ dhd->ifrm_inited = TRUE;
+ dhd->dma_h2d_ring_upd_support = FALSE;
+ dhd_prot_dma_indx_free(dhd);
+ }
+
+ if (dhdpcie_bus_get_pcie_dar_supported(dhd->bus)) {
+ DHD_ERROR(("DAR doorbell Use\n"));
+ data |= HOSTCAP_H2D_DAR;
+ dhd->dar_inited = TRUE;
+ }
+
+ data |= HOSTCAP_UR_FW_NO_TRAP;
+
+ if (dhd->hscb_enable) {
+ data |= HOSTCAP_HSCB;
+ }
+
+#ifdef EWP_EDL
+ if (dhd->dongle_edl_support) {
+ data |= HOSTCAP_EDL_RING;
+ DHD_ERROR(("Enable EDL host cap\n"));
+ } else {
+ DHD_ERROR(("DO NOT SET EDL host cap\n"));
+ }
+#endif /* EWP_EDL */
+
+#ifdef DHD_HP2P
+ if (dhd->hp2p_capable) {
+ data |= HOSTCAP_PKT_TIMESTAMP;
+ data |= HOSTCAP_PKT_HP2P;
+ DHD_ERROR(("Enable HP2P in host cap\n"));
+ } else {
+ DHD_ERROR(("HP2P not enabled in host cap\n"));
+ }
+#endif // endif
+
+#ifdef DHD_DB0TS
+ if (dhd->db0ts_capable) {
+ data |= HOSTCAP_DB0_TIMESTAMP;
+ DHD_ERROR(("Enable DB0 TS in host cap\n"));
+ } else {
+ DHD_ERROR(("DB0 TS not enabled in host cap\n"));
+ }
+#endif /* DHD_DB0TS */
+ if (dhd->extdtxs_in_txcpl) {
+ DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
+ data |= HOSTCAP_PKT_TXSTATUS;
+ }
+ else {
+ DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
+ }
+
+ DHD_INFO(("%s:Active Ver:%d, Host Ver:%d, FW Ver:%d\n",
+ __FUNCTION__,
+ prot->active_ipc_version, prot->host_ipc_version,
+ prot->device_ipc_version));
+
+ dhd_bus_cmn_writeshared(dhd->bus, &data, sizeof(uint32), HOST_API_VERSION, 0);
+ dhd_bus_cmn_writeshared(dhd->bus, &prot->fw_trap_buf.pa,
+ sizeof(prot->fw_trap_buf.pa), DNGL_TO_HOST_TRAP_ADDR, 0);
+ }
+
+}
/**
* dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has
@@ -2187,43 +3203,24 @@
{
sh_addr_t base_addr;
dhd_prot_t *prot = dhd->prot;
+ int ret = 0;
+ uint32 idmacontrol;
+ uint32 waitcount = 0;
- /* PKTID handle INIT */
- if (prot->pktid_map_handle != NULL) {
- DHD_ERROR(("%s: pktid_map_handle already set!\n", __FUNCTION__));
- ASSERT(0);
- return BCME_ERROR;
- }
+#ifdef WL_MONITOR
+ dhd->monitor_enable = FALSE;
+#endif /* WL_MONITOR */
-#ifdef IOCTLRESP_USE_CONSTMEM
- if (prot->pktid_map_handle_ioctl != NULL) {
- DHD_ERROR(("%s: pktid_map_handle_ioctl already set!\n", __FUNCTION__));
- ASSERT(0);
- return BCME_ERROR;
- }
-#endif /* IOCTLRESP_USE_CONSTMEM */
+ /**
+ * A user defined value can be assigned to global variable h2d_max_txpost via
+ * 1. DHD IOVAR h2d_max_txpost, before firmware download
+ * 2. module parameter h2d_max_txpost
+ * prot->h2d_max_txpost is assigned with H2DRING_TXPOST_MAX_ITEM,
+ * if user has not defined any buffers by one of the above methods.
+ */
+ prot->h2d_max_txpost = (uint16)h2d_max_txpost;
- prot->pktid_map_handle = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_ITEMS, PKTID_MAP_HANDLE);
- if (prot->pktid_map_handle == NULL) {
- DHD_ERROR(("%s: Unable to map packet id's\n", __FUNCTION__));
- ASSERT(0);
- return BCME_NOMEM;
- }
-
-#ifdef IOCTLRESP_USE_CONSTMEM
- prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd,
- DHD_FLOWRING_MAX_IOCTLRESPBUF_POST, PKTID_MAP_HANDLE_IOCTL);
- if (prot->pktid_map_handle_ioctl == NULL) {
- DHD_ERROR(("%s: Unable to map ioctl response buffers\n", __FUNCTION__));
- ASSERT(0);
- return BCME_NOMEM;
- }
-#endif /* IOCTLRESP_USE_CONSTMEM */
-
- /* Max pkts in ring */
- prot->max_tx_count = H2DRING_TXPOST_MAX_ITEM;
-
- DHD_INFO(("%s:%d: MAX_TX_COUNT = %d\n", __FUNCTION__, __LINE__, prot->max_tx_count));
+ DHD_ERROR(("%s:%d: h2d_max_txpost = %d\n", __FUNCTION__, __LINE__, prot->h2d_max_txpost));
/* Read max rx packets supported by dongle */
dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0);
@@ -2232,21 +3229,24 @@
/* using the latest shared structure template */
prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
}
- DHD_INFO(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
+ DHD_ERROR(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
/* Initialize. bzero() would blow away the dma pointers. */
prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST;
prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST;
+ prot->max_infobufpost = DHD_H2D_INFORING_MAX_BUF_POST;
+ prot->max_tsbufpost = DHD_MAX_TSBUF_POST;
prot->cur_ioctlresp_bufs_posted = 0;
- prot->active_tx_count = 0;
+ OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
prot->data_seq_no = 0;
prot->ioctl_seq_no = 0;
prot->rxbufpost = 0;
prot->cur_event_bufs_posted = 0;
prot->ioctl_state = 0;
prot->curr_ioctl_cmd = 0;
- prot->ioctl_received = IOCTL_WAIT;
+ prot->cur_ts_bufs_posted = 0;
+ prot->infobufpost = 0;
prot->dmaxfer.srcmem.va = NULL;
prot->dmaxfer.dstmem.va = NULL;
@@ -2257,23 +3257,50 @@
prot->tx_metadata_offset = 0;
prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT;
- prot->ioctl_trans_id = 0;
+ /* To catch any rollover issues fast, starting with higher ioctl_trans_id */
+ prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
+ prot->ioctl_state = 0;
+ prot->ioctl_status = 0;
+ prot->ioctl_resplen = 0;
+ prot->ioctl_received = IOCTL_WAIT;
+
+ /* Initialize Common MsgBuf Rings */
+
+ prot->device_ipc_version = dhd->bus->api.fw_rev;
+ prot->host_ipc_version = PCIE_SHARED_VERSION;
+ prot->no_tx_resource = FALSE;
+
+ /* Init the host API version */
+ dhd_set_host_cap(dhd);
+
+ /* alloc and configure scb host address for dongle */
+ if ((ret = dhd_alloc_host_scbs(dhd))) {
+ return ret;
+ }
/* Register the interrupt function upfront */
/* remove corerev checks in data path */
+ /* do this after host/fw negotiation for DAR */
prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
+ prot->mb_2_ring_fn = dhd_bus_get_mbintr_2_fn(dhd->bus);
- /* Initialize Common MsgBuf Rings */
+ dhd->bus->_dar_war = (dhd->bus->sih->buscorerev < 64) ? TRUE : FALSE;
dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn);
dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn);
dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln);
+
+ /* Make it compatibile with pre-rev7 Firmware */
+ if (prot->active_ipc_version < PCIE_SHARED_VERSION_7) {
+ prot->d2hring_tx_cpln.item_len =
+ D2HRING_TXCMPLT_ITEMSIZE_PREREV7;
+ prot->d2hring_rx_cpln.item_len =
+ D2HRING_RXCMPLT_ITEMSIZE_PREREV7;
+ }
dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln);
dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln);
-#if defined(PCIE_D2H_SYNC)
dhd_prot_d2h_sync_init(dhd);
-#endif /* PCIE_D2H_SYNC */
dhd_prot_h2d_sync_init(dhd);
@@ -2287,7 +3314,7 @@
/* If supported by the host, indicate the memory block
* for completion writes / submission reads to shared space
*/
- if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
+ if (dhd->dma_d2h_ring_upd_support) {
dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa);
dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
D2H_DMA_INDX_WR_BUF, 0);
@@ -2296,7 +3323,7 @@
H2D_DMA_INDX_RD_BUF, 0);
}
- if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
+ if (dhd->dma_h2d_ring_upd_support || IDMA_ENAB(dhd)) {
dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa);
dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
H2D_DMA_INDX_WR_BUF, 0);
@@ -2304,6 +3331,9 @@
dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
D2H_DMA_INDX_RD_BUF, 0);
}
+ /* Signal to the dongle that common ring init is complete */
+ if (dhd->hostrdy_after_init)
+ dhd_bus_hostready(dhd->bus);
/*
* If the DMA-able buffers for flowring needs to come from a specific
@@ -2317,26 +3347,123 @@
return BCME_ERROR;
}
+ /* If IFRM is enabled, wait for FW to setup the DMA channel */
+ if (IFRM_ENAB(dhd)) {
+ dhd_base_addr_htolpa(&base_addr, prot->h2d_ifrm_indx_wr_buf.pa);
+ dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
+ H2D_IFRM_INDX_WR_BUF, 0);
+ }
+
+ /* If IDMA is enabled and initied, wait for FW to setup the IDMA descriptors
+ * Waiting just before configuring doorbell
+ */
+#ifdef BCMQT
+#define IDMA_ENABLE_WAIT 100
+#else
+#define IDMA_ENABLE_WAIT 10
+#endif // endif
+ if (IDMA_ACTIVE(dhd)) {
+ /* wait for idma_en bit in IDMAcontrol register to be set */
+ /* Loop till idma_en is not set */
+ uint buscorerev = dhd->bus->sih->buscorerev;
+ idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ IDMAControl(buscorerev), 0, 0);
+ while (!(idmacontrol & PCIE_IDMA_MODE_EN(buscorerev)) &&
+ (waitcount++ < IDMA_ENABLE_WAIT)) {
+
+ DHD_ERROR(("iDMA not enabled yet,waiting 1 ms c=%d IDMAControl = %08x\n",
+ waitcount, idmacontrol));
+#ifdef BCMQT
+ OSL_DELAY(200000); /* 200msec for BCMQT */
+#else
+ OSL_DELAY(1000); /* 1ms as its onetime only */
+#endif // endif
+ idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ IDMAControl(buscorerev), 0, 0);
+ }
+
+ if (waitcount < IDMA_ENABLE_WAIT) {
+ DHD_ERROR(("iDMA enabled PCIEControl = %08x\n", idmacontrol));
+ } else {
+ DHD_ERROR(("Error: wait for iDMA timed out wait=%d IDMAControl = %08x\n",
+ waitcount, idmacontrol));
+ return BCME_ERROR;
+ }
+ }
+
/* Host should configure soft doorbells if needed ... here */
/* Post to dongle host configured soft doorbells */
dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd);
- /* Post buffers for packet reception and ioctl/event responses */
- dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
dhd_msgbuf_rxbuf_post_event_bufs(dhd);
+ prot->no_retry = FALSE;
+ prot->no_aggr = FALSE;
+ prot->fixed_rate = FALSE;
+
+ /*
+ * Note that any communication with the Dongle should be added
+ * below this point. Any other host data structure initialiation that
+ * needs to be done prior to the DPC starts executing should be done
+ * befor this point.
+ * Because once we start sending H2D requests to Dongle, the Dongle
+ * respond immediately. So the DPC context to handle this
+ * D2H response could preempt the context in which dhd_prot_init is running.
+ * We want to ensure that all the Host part of dhd_prot_init is
+ * done before that.
+ */
+
+ /* See if info rings could be created, info rings should be created
+ * only if dongle does not support EDL
+ */
+#ifdef EWP_EDL
+ if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
+#else
+ if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
+#endif /* EWP_EDL */
+ {
+ if ((ret = dhd_prot_init_info_rings(dhd)) != BCME_OK) {
+ /* For now log and proceed, further clean up action maybe necessary
+ * when we have more clarity.
+ */
+ DHD_ERROR(("%s Info rings couldn't be created: Err Code%d",
+ __FUNCTION__, ret));
+ }
+ }
+
+#ifdef EWP_EDL
+ /* Create Enhanced Debug Lane rings (EDL) if dongle supports it */
+ if (dhd->dongle_edl_support) {
+ if ((ret = dhd_prot_init_edl_rings(dhd)) != BCME_OK) {
+ DHD_ERROR(("%s EDL rings couldn't be created: Err Code%d",
+ __FUNCTION__, ret));
+ }
+ }
+#endif /* EWP_EDL */
+
+#ifdef DHD_HP2P
+ /* create HPP txcmpl/rxcmpl rings */
+ if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_7 && dhd->hp2p_capable) {
+ if ((ret = dhd_prot_init_hp2p_rings(dhd)) != BCME_OK) {
+ /* For now log and proceed, further clean up action maybe necessary
+ * when we have more clarity.
+ */
+ DHD_ERROR(("%s HP2P rings couldn't be created: Err Code%d",
+ __FUNCTION__, ret));
+ }
+ }
+#endif /* DHD_HP2P */
+
return BCME_OK;
} /* dhd_prot_init */
-
/**
* dhd_prot_detach - PCIE FD protocol layer destructor.
* Unlink, frees allocated protocol memory (including dhd_prot)
*/
-void
-dhd_prot_detach(dhd_pub_t *dhd)
+void dhd_prot_detach(dhd_pub_t *dhd)
{
dhd_prot_t *prot = dhd->prot;
@@ -2346,15 +3473,20 @@
/* free up all DMA-able buffers allocated during prot attach/init */
dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf);
- dhd_dma_buf_free(dhd, &prot->retbuf); /* ioctl return buffer */
+ dhd_dma_buf_free(dhd, &prot->retbuf);
dhd_dma_buf_free(dhd, &prot->ioctbuf);
dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf);
+ dhd_dma_buf_free(dhd, &prot->hostts_req_buf);
+ dhd_dma_buf_free(dhd, &prot->fw_trap_buf);
+ dhd_dma_buf_free(dhd, &prot->host_scb_buf);
/* DMA-able buffers for DMAing H2D/D2H WR/RD indices */
dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf);
dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf);
dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
+
+ dhd_dma_buf_free(dhd, &prot->h2d_ifrm_indx_wr_buf);
/* Common MsgBuf Rings */
dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn);
@@ -2366,41 +3498,61 @@
/* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */
dhd_prot_flowrings_pool_detach(dhd);
- if (dhd->prot->pktid_map_handle) {
- DHD_NATIVE_TO_PKTID_FINI(dhd, dhd->prot->pktid_map_handle);
- }
+ /* detach info rings */
+ dhd_prot_detach_info_rings(dhd);
-#ifndef CONFIG_DHD_USE_STATIC_BUF
- MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t));
-#endif /* CONFIG_DHD_USE_STATIC_BUF */
+#ifdef EWP_EDL
+ dhd_prot_detach_edl_rings(dhd);
+#endif // endif
+#ifdef DHD_HP2P
+ /* detach HPP rings */
+ dhd_prot_detach_hp2p_rings(dhd);
+#endif /* DHD_HP2P */
-#if defined(DHD_LB)
+ /* if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs use pktid_map_handle_ioctl
+ * handler and PKT memory is allocated using alloc_ioctl_return_buffer(), Otherwise
+ * they will be part of pktid_ctrl_map handler and PKT memory is allocated using
+ * PKTGET_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKGET.
+ * Similarly for freeing PKT buffers DHD_NATIVE_TO_PKTID_FINI will be used
+ * which calls PKTFREE_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKFREE.
+ * Else if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs will be freed using
+ * DHD_NATIVE_TO_PKTID_FINI_IOCTL which calls free_ioctl_return_buffer.
+ */
+ DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_ctrl_map);
+ DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_rx_map);
+ DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_tx_map);
+#ifdef IOCTLRESP_USE_CONSTMEM
+ DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl);
+#endif // endif
+#ifdef DHD_MAP_PKTID_LOGGING
+ DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_map);
+ DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_unmap);
+#endif /* DHD_MAP_PKTID_LOGGING */
+
#if defined(DHD_LB_TXC)
- if (prot->tx_compl_prod.buffer) {
+ if (prot->tx_compl_prod.buffer)
MFREE(dhd->osh, prot->tx_compl_prod.buffer,
- sizeof(void*) * DHD_LB_WORKQ_SZ);
- }
+ sizeof(void*) * DHD_LB_WORKQ_SZ);
#endif /* DHD_LB_TXC */
#if defined(DHD_LB_RXC)
- if (prot->rx_compl_prod.buffer) {
+ if (prot->rx_compl_prod.buffer)
MFREE(dhd->osh, prot->rx_compl_prod.buffer,
- sizeof(void*) * DHD_LB_WORKQ_SZ);
- }
+ sizeof(void*) * DHD_LB_WORKQ_SZ);
#endif /* DHD_LB_RXC */
-#endif /* DHD_LB */
+
+ DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t));
dhd->prot = NULL;
}
} /* dhd_prot_detach */
-
/**
- * dhd_prot_reset - Reset the protocol layer without freeing any objects. This
- * may be invoked to soft reboot the dongle, without having to detach and attach
- * the entire protocol layer.
+ * dhd_prot_reset - Reset the protocol layer without freeing any objects.
+ * This may be invoked to soft reboot the dongle, without having to
+ * detach and attach the entire protocol layer.
*
- * After dhd_prot_reset(), dhd_prot_init() may be invoked without going through
- * a dhd_prot_attach() phase.
+ * After dhd_prot_reset(), dhd_prot_init() may be invoked
+ * without going througha dhd_prot_attach() phase.
*/
void
dhd_prot_reset(dhd_pub_t *dhd)
@@ -2415,20 +3567,43 @@
dhd_prot_flowrings_pool_reset(dhd);
+ /* Reset Common MsgBuf Rings */
dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn);
dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn);
dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln);
dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln);
dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln);
+ /* Reset info rings */
+ if (prot->h2dring_info_subn) {
+ dhd_prot_ring_reset(dhd, prot->h2dring_info_subn);
+ }
+
+ if (prot->d2hring_info_cpln) {
+ dhd_prot_ring_reset(dhd, prot->d2hring_info_cpln);
+ }
+#ifdef EWP_EDL
+ if (prot->d2hring_edl) {
+ dhd_prot_ring_reset(dhd, prot->d2hring_edl);
+ }
+#endif /* EWP_EDL */
+
+ /* Reset all DMA-able buffers allocated during prot attach */
+ dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf);
dhd_dma_buf_reset(dhd, &prot->retbuf);
dhd_dma_buf_reset(dhd, &prot->ioctbuf);
- dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf);
+ dhd_dma_buf_reset(dhd, &prot->host_bus_throughput_buf);
+ dhd_dma_buf_reset(dhd, &prot->hostts_req_buf);
+ dhd_dma_buf_reset(dhd, &prot->fw_trap_buf);
+ dhd_dma_buf_reset(dhd, &prot->host_scb_buf);
+
+ dhd_dma_buf_reset(dhd, &prot->h2d_ifrm_indx_wr_buf);
+
+ /* Rest all DMA-able buffers for DMAing H2D/D2H WR/RD indices */
dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf);
dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf);
dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf);
dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf);
-
prot->rx_metadata_offset = 0;
prot->tx_metadata_offset = 0;
@@ -2437,13 +3612,14 @@
prot->cur_event_bufs_posted = 0;
prot->cur_ioctlresp_bufs_posted = 0;
- prot->active_tx_count = 0;
+ OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
prot->data_seq_no = 0;
prot->ioctl_seq_no = 0;
prot->ioctl_state = 0;
prot->curr_ioctl_cmd = 0;
prot->ioctl_received = IOCTL_WAIT;
- prot->ioctl_trans_id = 0;
+ /* To catch any rollover issues fast, starting with higher ioctl_trans_id */
+ prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
/* dhd_flow_rings_init is located at dhd_bus_start,
* so when stopping bus, flowrings shall be deleted
@@ -2452,123 +3628,84 @@
dhd_flow_rings_deinit(dhd);
}
- if (prot->pktid_map_handle) {
- DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_map_handle);
- prot->pktid_map_handle = NULL;
+#ifdef DHD_HP2P
+ if (prot->d2hring_hp2p_txcpl) {
+ dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_txcpl);
}
+ if (prot->d2hring_hp2p_rxcpl) {
+ dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_rxcpl);
+ }
+#endif /* DHD_HP2P */
+ /* Reset PKTID map */
+ DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_ctrl_map);
+ DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_rx_map);
+ DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_tx_map);
#ifdef IOCTLRESP_USE_CONSTMEM
- if (prot->pktid_map_handle_ioctl) {
- DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl);
- prot->pktid_map_handle_ioctl = NULL;
- }
+ DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, prot->pktid_map_handle_ioctl);
#endif /* IOCTLRESP_USE_CONSTMEM */
+#ifdef DMAMAP_STATS
+ dhd->dma_stats.txdata = dhd->dma_stats.txdata_sz = 0;
+ dhd->dma_stats.rxdata = dhd->dma_stats.rxdata_sz = 0;
+#ifndef IOCTLRESP_USE_CONSTMEM
+ dhd->dma_stats.ioctl_rx = dhd->dma_stats.ioctl_rx_sz = 0;
+#endif /* IOCTLRESP_USE_CONSTMEM */
+ dhd->dma_stats.event_rx = dhd->dma_stats.event_rx_sz = 0;
+ dhd->dma_stats.info_rx = dhd->dma_stats.info_rx_sz = 0;
+ dhd->dma_stats.tsbuf_rx = dhd->dma_stats.tsbuf_rx_sz = 0;
+#endif /* DMAMAP_STATS */
} /* dhd_prot_reset */
+#if defined(DHD_LB_RXP)
+#define DHD_LB_DISPATCH_RX_PROCESS(dhdp) dhd_lb_dispatch_rx_process(dhdp)
+#else /* !DHD_LB_RXP */
+#define DHD_LB_DISPATCH_RX_PROCESS(dhdp) do { /* noop */ } while (0)
+#endif /* !DHD_LB_RXP */
-void
-dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset)
-{
- dhd_prot_t *prot = dhd->prot;
- prot->rx_dataoffset = rx_offset;
-}
+#if defined(DHD_LB_RXC)
+#define DHD_LB_DISPATCH_RX_COMPL(dhdp) dhd_lb_dispatch_rx_compl(dhdp)
+#else /* !DHD_LB_RXC */
+#define DHD_LB_DISPATCH_RX_COMPL(dhdp) do { /* noop */ } while (0)
+#endif /* !DHD_LB_RXC */
-/**
- * Initialize protocol: sync w/dongle state.
- * Sets dongle media info (iswl, drv_version, mac address).
- */
-int
-dhd_sync_with_dongle(dhd_pub_t *dhd)
-{
- int ret = 0;
- wlc_rev_info_t revinfo;
-
-
- DHD_TRACE(("%s: Enter\n", __FUNCTION__));
-
- dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
-
-
-
-#ifdef DHD_FW_COREDUMP
- /* For Android Builds check memdump capability */
- /* Check the memdump capability */
- dhd_get_memdump_info(dhd);
-#endif /* DHD_FW_COREDUMP */
-
- /* Get the device rev info */
- memset(&revinfo, 0, sizeof(revinfo));
- ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
- if (ret < 0) {
- DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__));
- goto done;
- }
- DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
- revinfo.deviceid, revinfo.vendorid, revinfo.chipnum));
-
- dhd_process_cid_mac(dhd, TRUE);
-
- ret = dhd_preinit_ioctls(dhd);
- if (!ret) {
- dhd_process_cid_mac(dhd, FALSE);
- }
-
- /* Always assumes wl for now */
- dhd->iswl = TRUE;
-done:
- return ret;
-} /* dhd_sync_with_dongle */
+#if defined(DHD_LB_TXC)
+#define DHD_LB_DISPATCH_TX_COMPL(dhdp) dhd_lb_dispatch_tx_compl(dhdp)
+#else /* !DHD_LB_TXC */
+#define DHD_LB_DISPATCH_TX_COMPL(dhdp) do { /* noop */ } while (0)
+#endif /* !DHD_LB_TXC */
#if defined(DHD_LB)
-
/* DHD load balancing: deferral of work to another online CPU */
-
/* DHD_LB_TXC DHD_LB_RXC DHD_LB_RXP dispatchers, in dhd_linux.c */
extern void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp);
extern void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp);
extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
-
extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
+#if defined(DHD_LB_RXP)
/**
- * dhd_lb_dispatch - load balance by dispatch work to other CPU cores
- * Note: rx_compl_tasklet is dispatched explicitly.
+ * dhd_lb_dispatch_rx_process - load balance by dispatch Rx processing work
+ * to other CPU cores
*/
static INLINE void
-dhd_lb_dispatch(dhd_pub_t *dhdp, uint16 ring_idx)
+dhd_lb_dispatch_rx_process(dhd_pub_t *dhdp)
{
- switch (ring_idx) {
+ dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */
+}
+#endif /* DHD_LB_RXP */
#if defined(DHD_LB_TXC)
- case BCMPCIE_D2H_MSGRING_TX_COMPLETE:
- bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */
- dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */
- break;
-#endif /* DHD_LB_TXC */
-
- case BCMPCIE_D2H_MSGRING_RX_COMPLETE:
- {
-#if defined(DHD_LB_RXC)
- dhd_prot_t *prot = dhdp->prot;
- /* Schedule the takslet only if we have to */
- if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
- /* flush WR index */
- bcm_workq_prod_sync(&dhdp->prot->rx_compl_prod);
- dhd_lb_rx_compl_dispatch(dhdp); /* dispatch rx_compl_tasklet */
- }
-#endif /* DHD_LB_RXC */
-#if defined(DHD_LB_RXP)
- dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */
-#endif /* DHD_LB_RXP */
- break;
- }
- default:
- break;
- }
+/**
+ * dhd_lb_dispatch_tx_compl - load balance by dispatch Tx complition work
+ * to other CPU cores
+ */
+static INLINE void
+dhd_lb_dispatch_tx_compl(dhd_pub_t *dhdp, uint16 ring_idx)
+{
+ bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */
+ dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */
}
-
-#if defined(DHD_LB_TXC)
/**
* DHD load balanced tx completion tasklet handler, that will perform the
* freeing of packets on the selected CPU. Packet pointers are delivered to
@@ -2585,6 +3722,10 @@
dhd_prot_t *prot = dhd->prot;
bcm_workq_t *workq = &prot->tx_compl_cons;
uint32 count = 0;
+
+ int curr_cpu;
+ curr_cpu = get_cpu();
+ put_cpu();
DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhd);
@@ -2606,11 +3747,13 @@
pa = DHD_PKTTAG_PA((dhd_pkttag_fr_t *)PKTTAG(pkt));
pa_len = DHD_PKTTAG_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt));
- DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, pkt, 0);
-
+ DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0);
#if defined(BCMPCIE)
dhd_txcomplete(dhd, pkt, true);
-#endif
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+ dhd_eap_txcomplete(dhd, pkt, TRUE, txstatus->cmn_hdr.if_id);
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+#endif // endif
PKTFREE(dhd->osh, pkt, TRUE);
count++;
@@ -2623,6 +3766,23 @@
#endif /* DHD_LB_TXC */
#if defined(DHD_LB_RXC)
+
+/**
+ * dhd_lb_dispatch_rx_compl - load balance by dispatch rx complition work
+ * to other CPU cores
+ */
+static INLINE void
+dhd_lb_dispatch_rx_compl(dhd_pub_t *dhdp)
+{
+ dhd_prot_t *prot = dhdp->prot;
+ /* Schedule the takslet only if we have to */
+ if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
+ /* flush WR index */
+ bcm_workq_prod_sync(&dhdp->prot->rx_compl_prod);
+ dhd_lb_rx_compl_dispatch(dhdp); /* dispatch rx_compl_tasklet */
+ }
+}
+
void
dhd_lb_rx_compl_handler(unsigned long data)
{
@@ -2635,8 +3795,486 @@
bcm_workq_cons_sync(workq);
}
#endif /* DHD_LB_RXC */
-
#endif /* DHD_LB */
+
+void
+dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset)
+{
+ dhd_prot_t *prot = dhd->prot;
+ prot->rx_dataoffset = rx_offset;
+}
+
+static int
+dhd_check_create_info_rings(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = BCME_ERROR;
+ uint16 ringid;
+
+ {
+ /* dongle may increase max_submission_rings so keep
+ * ringid at end of dynamic rings
+ */
+ ringid = dhd->bus->max_tx_flowrings +
+ (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
+ BCMPCIE_H2D_COMMON_MSGRINGS;
+ }
+
+ if (prot->d2hring_info_cpln) {
+ /* for d2hring re-entry case, clear inited flag */
+ prot->d2hring_info_cpln->inited = FALSE;
+ }
+
+ if (prot->h2dring_info_subn && prot->d2hring_info_cpln) {
+ return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
+ }
+
+ if (prot->h2dring_info_subn == NULL) {
+ prot->h2dring_info_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
+
+ if (prot->h2dring_info_subn == NULL) {
+ DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
+ __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ DHD_INFO(("%s: about to create debug submit ring\n", __FUNCTION__));
+ ret = dhd_prot_ring_attach(dhd, prot->h2dring_info_subn, "h2dinfo",
+ H2DRING_DYNAMIC_INFO_MAX_ITEM, H2DRING_INFO_BUFPOST_ITEMSIZE,
+ ringid);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: couldn't alloc resources for dbg submit ring\n",
+ __FUNCTION__));
+ goto err;
+ }
+ }
+
+ if (prot->d2hring_info_cpln == NULL) {
+ prot->d2hring_info_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
+
+ if (prot->d2hring_info_cpln == NULL) {
+ DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
+ __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ /* create the debug info completion ring next to debug info submit ring
+ * ringid = id next to debug info submit ring
+ */
+ ringid = ringid + 1;
+
+ DHD_INFO(("%s: about to create debug cpl ring\n", __FUNCTION__));
+ ret = dhd_prot_ring_attach(dhd, prot->d2hring_info_cpln, "d2hinfo",
+ D2HRING_DYNAMIC_INFO_MAX_ITEM, D2HRING_INFO_BUFCMPLT_ITEMSIZE,
+ ringid);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: couldn't alloc resources for dbg cpl ring\n",
+ __FUNCTION__));
+ dhd_prot_ring_detach(dhd, prot->h2dring_info_subn);
+ goto err;
+ }
+ }
+
+ return ret;
+err:
+ MFREE(prot->osh, prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
+ prot->h2dring_info_subn = NULL;
+
+ if (prot->d2hring_info_cpln) {
+ MFREE(prot->osh, prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
+ prot->d2hring_info_cpln = NULL;
+ }
+ return ret;
+} /* dhd_check_create_info_rings */
+
+int
+dhd_prot_init_info_rings(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = BCME_OK;
+
+ if ((ret = dhd_check_create_info_rings(dhd)) != BCME_OK) {
+ DHD_ERROR(("%s: info rings aren't created! \n",
+ __FUNCTION__));
+ return ret;
+ }
+
+ if ((prot->d2hring_info_cpln->inited) || (prot->d2hring_info_cpln->create_pending)) {
+ DHD_INFO(("Info completion ring was created!\n"));
+ return ret;
+ }
+
+ DHD_TRACE(("trying to send create d2h info ring: id %d\n", prot->d2hring_info_cpln->idx));
+ ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_info_cpln,
+ BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL, DHD_D2H_DBGRING_REQ_PKTID);
+ if (ret != BCME_OK)
+ return ret;
+
+ prot->h2dring_info_subn->seqnum = H2D_EPOCH_INIT_VAL;
+ prot->h2dring_info_subn->current_phase = 0;
+ prot->d2hring_info_cpln->seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_info_cpln->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+
+ DHD_TRACE(("trying to send create h2d info ring id %d\n", prot->h2dring_info_subn->idx));
+ prot->h2dring_info_subn->n_completion_ids = 1;
+ prot->h2dring_info_subn->compeltion_ring_ids[0] = prot->d2hring_info_cpln->idx;
+
+ ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_info_subn,
+ BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT, DHD_H2D_DBGRING_REQ_PKTID);
+
+ /* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
+ * so can not cleanup if one ring was created while the other failed
+ */
+ return ret;
+} /* dhd_prot_init_info_rings */
+
+static void
+dhd_prot_detach_info_rings(dhd_pub_t *dhd)
+{
+ if (dhd->prot->h2dring_info_subn) {
+ dhd_prot_ring_detach(dhd, dhd->prot->h2dring_info_subn);
+ MFREE(dhd->prot->osh, dhd->prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
+ dhd->prot->h2dring_info_subn = NULL;
+ }
+ if (dhd->prot->d2hring_info_cpln) {
+ dhd_prot_ring_detach(dhd, dhd->prot->d2hring_info_cpln);
+ MFREE(dhd->prot->osh, dhd->prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
+ dhd->prot->d2hring_info_cpln = NULL;
+ }
+}
+
+#ifdef DHD_HP2P
+static int
+dhd_check_create_hp2p_rings(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = BCME_ERROR;
+ uint16 ringid;
+
+ /* Last 2 dynamic ring indices are used by hp2p rings */
+ ringid = dhd->bus->max_submission_rings + dhd->bus->max_completion_rings - 2;
+
+ if (prot->d2hring_hp2p_txcpl == NULL) {
+ prot->d2hring_hp2p_txcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
+
+ if (prot->d2hring_hp2p_txcpl == NULL) {
+ DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_txcpl\n",
+ __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ DHD_INFO(("%s: about to create hp2p txcpl ring\n", __FUNCTION__));
+ ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_txcpl, "d2hhp2p_txcpl",
+ dhd_bus_get_hp2p_ring_max_size(dhd->bus, TRUE), D2HRING_TXCMPLT_ITEMSIZE,
+ ringid);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: couldn't alloc resources for hp2p txcpl ring\n",
+ __FUNCTION__));
+ goto err2;
+ }
+ } else {
+ /* for re-entry case, clear inited flag */
+ prot->d2hring_hp2p_txcpl->inited = FALSE;
+ }
+ if (prot->d2hring_hp2p_rxcpl == NULL) {
+ prot->d2hring_hp2p_rxcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
+
+ if (prot->d2hring_hp2p_rxcpl == NULL) {
+ DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_rxcpl\n",
+ __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ /* create the hp2p rx completion ring next to hp2p tx compl ring
+ * ringid = id next to hp2p tx compl ring
+ */
+ ringid = ringid + 1;
+
+ DHD_INFO(("%s: about to create hp2p rxcpl ring\n", __FUNCTION__));
+ ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_rxcpl, "d2hhp2p_rxcpl",
+ dhd_bus_get_hp2p_ring_max_size(dhd->bus, FALSE), D2HRING_RXCMPLT_ITEMSIZE,
+ ringid);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: couldn't alloc resources for hp2p rxcpl ring\n",
+ __FUNCTION__));
+ goto err1;
+ }
+ } else {
+ /* for re-entry case, clear inited flag */
+ prot->d2hring_hp2p_rxcpl->inited = FALSE;
+ }
+
+ return ret;
+err1:
+ MFREE(prot->osh, prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
+ prot->d2hring_hp2p_rxcpl = NULL;
+
+err2:
+ MFREE(prot->osh, prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
+ prot->d2hring_hp2p_txcpl = NULL;
+ return ret;
+} /* dhd_check_create_hp2p_rings */
+
+int
+dhd_prot_init_hp2p_rings(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = BCME_OK;
+
+ dhd->hp2p_ring_active = FALSE;
+
+ if ((ret = dhd_check_create_hp2p_rings(dhd)) != BCME_OK) {
+ DHD_ERROR(("%s: hp2p rings aren't created! \n",
+ __FUNCTION__));
+ return ret;
+ }
+
+ if ((prot->d2hring_hp2p_txcpl->inited) || (prot->d2hring_hp2p_txcpl->create_pending)) {
+ DHD_INFO(("hp2p tx completion ring was created!\n"));
+ return ret;
+ }
+
+ DHD_TRACE(("trying to send create d2h hp2p txcpl ring: id %d\n",
+ prot->d2hring_hp2p_txcpl->idx));
+ ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_txcpl,
+ BCMPCIE_D2H_RING_TYPE_HPP_TX_CPL, DHD_D2H_HPPRING_TXREQ_PKTID);
+ if (ret != BCME_OK)
+ return ret;
+
+ prot->d2hring_hp2p_txcpl->seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_hp2p_txcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+
+ if ((prot->d2hring_hp2p_rxcpl->inited) || (prot->d2hring_hp2p_rxcpl->create_pending)) {
+ DHD_INFO(("hp2p rx completion ring was created!\n"));
+ return ret;
+ }
+
+ DHD_TRACE(("trying to send create d2h hp2p rxcpl ring: id %d\n",
+ prot->d2hring_hp2p_rxcpl->idx));
+ ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_rxcpl,
+ BCMPCIE_D2H_RING_TYPE_HPP_RX_CPL, DHD_D2H_HPPRING_RXREQ_PKTID);
+ if (ret != BCME_OK)
+ return ret;
+
+ prot->d2hring_hp2p_rxcpl->seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_hp2p_rxcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+
+ /* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
+ * so can not cleanup if one ring was created while the other failed
+ */
+ return BCME_OK;
+} /* dhd_prot_init_hp2p_rings */
+
+static void
+dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd)
+{
+ if (dhd->prot->d2hring_hp2p_txcpl) {
+ dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_txcpl);
+ MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
+ dhd->prot->d2hring_hp2p_txcpl = NULL;
+ }
+ if (dhd->prot->d2hring_hp2p_rxcpl) {
+ dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_rxcpl);
+ MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
+ dhd->prot->d2hring_hp2p_rxcpl = NULL;
+ }
+}
+#endif /* DHD_HP2P */
+
+#ifdef EWP_EDL
+static int
+dhd_check_create_edl_rings(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = BCME_ERROR;
+ uint16 ringid;
+
+ {
+ /* dongle may increase max_submission_rings so keep
+ * ringid at end of dynamic rings (re-use info ring cpl ring id)
+ */
+ ringid = dhd->bus->max_tx_flowrings +
+ (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
+ BCMPCIE_H2D_COMMON_MSGRINGS + 1;
+ }
+
+ if (prot->d2hring_edl) {
+ prot->d2hring_edl->inited = FALSE;
+ return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
+ }
+
+ if (prot->d2hring_edl == NULL) {
+ prot->d2hring_edl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
+
+ if (prot->d2hring_edl == NULL) {
+ DHD_ERROR(("%s: couldn't alloc memory for d2hring_edl\n",
+ __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ DHD_ERROR(("%s: about to create EDL ring, ringid: %u \n", __FUNCTION__,
+ ringid));
+ ret = dhd_prot_ring_attach(dhd, prot->d2hring_edl, "d2hring_edl",
+ D2HRING_EDL_MAX_ITEM, D2HRING_EDL_ITEMSIZE,
+ ringid);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: couldn't alloc resources for EDL ring\n",
+ __FUNCTION__));
+ goto err;
+ }
+ }
+
+ return ret;
+err:
+ MFREE(prot->osh, prot->d2hring_edl, sizeof(msgbuf_ring_t));
+ prot->d2hring_edl = NULL;
+
+ return ret;
+} /* dhd_check_create_btlog_rings */
+
+int
+dhd_prot_init_edl_rings(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = BCME_ERROR;
+
+ if ((ret = dhd_check_create_edl_rings(dhd)) != BCME_OK) {
+ DHD_ERROR(("%s: EDL rings aren't created! \n",
+ __FUNCTION__));
+ return ret;
+ }
+
+ if ((prot->d2hring_edl->inited) || (prot->d2hring_edl->create_pending)) {
+ DHD_INFO(("EDL completion ring was created!\n"));
+ return ret;
+ }
+
+ DHD_ERROR(("trying to send create d2h edl ring: idx %d\n", prot->d2hring_edl->idx));
+ ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_edl,
+ BCMPCIE_D2H_RING_TYPE_EDL, DHD_D2H_DBGRING_REQ_PKTID);
+ if (ret != BCME_OK)
+ return ret;
+
+ prot->d2hring_edl->seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_edl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+
+ return BCME_OK;
+} /* dhd_prot_init_btlog_rings */
+
+static void
+dhd_prot_detach_edl_rings(dhd_pub_t *dhd)
+{
+ if (dhd->prot->d2hring_edl) {
+ dhd_prot_ring_detach(dhd, dhd->prot->d2hring_edl);
+ MFREE(dhd->prot->osh, dhd->prot->d2hring_edl, sizeof(msgbuf_ring_t));
+ dhd->prot->d2hring_edl = NULL;
+ }
+}
+#endif /* EWP_EDL */
+
+/**
+ * Initialize protocol: sync w/dongle state.
+ * Sets dongle media info (iswl, drv_version, mac address).
+ */
+int dhd_sync_with_dongle(dhd_pub_t *dhd)
+{
+ int ret = 0;
+ wlc_rev_info_t revinfo;
+ char buf[128];
+ dhd_prot_t *prot = dhd->prot;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
+
+ /* Post ts buffer after shim layer is attached */
+ ret = dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
+
+#ifndef OEM_ANDROID
+ /* Get the device MAC address */
+ memset(buf, 0, sizeof(buf));
+ strncpy(buf, "cur_etheraddr", sizeof(buf) - 1);
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
+ if (ret < 0) {
+ DHD_ERROR(("%s: GET iovar cur_etheraddr FAILED\n", __FUNCTION__));
+ goto done;
+ }
+ memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
+ if (dhd_msg_level & DHD_INFO_VAL) {
+ bcm_print_bytes("CUR_ETHERADDR ", (uchar *)buf, ETHER_ADDR_LEN);
+ }
+#endif /* OEM_ANDROID */
+
+#ifdef DHD_FW_COREDUMP
+ /* Check the memdump capability */
+ dhd_get_memdump_info(dhd);
+#endif /* DHD_FW_COREDUMP */
+#ifdef BCMASSERT_LOG
+ dhd_get_assert_info(dhd);
+#endif /* BCMASSERT_LOG */
+
+ /* Get the device rev info */
+ memset(&revinfo, 0, sizeof(revinfo));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
+ if (ret < 0) {
+ DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__));
+ goto done;
+ }
+ DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
+ revinfo.deviceid, revinfo.vendorid, revinfo.chipnum));
+
+ /* Get the RxBuf post size */
+ memset(buf, 0, sizeof(buf));
+ bcm_mkiovar("rxbufpost_sz", NULL, 0, buf, sizeof(buf));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
+ if (ret < 0) {
+ DHD_ERROR(("%s: GET RxBuf post FAILED, default to %d\n",
+ __FUNCTION__, DHD_FLOWRING_RX_BUFPOST_PKTSZ));
+ prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
+ } else {
+ memcpy_s(&(prot->rxbufpost_sz), sizeof(prot->rxbufpost_sz), buf, sizeof(uint16));
+ if (prot->rxbufpost_sz > DHD_FLOWRING_RX_BUFPOST_PKTSZ_MAX) {
+ DHD_ERROR(("%s: Invalid RxBuf post size : %d, default to %d\n",
+ __FUNCTION__, prot->rxbufpost_sz, DHD_FLOWRING_RX_BUFPOST_PKTSZ));
+ prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
+ } else {
+ DHD_ERROR(("%s: RxBuf Post : %d\n", __FUNCTION__, prot->rxbufpost_sz));
+ }
+ }
+
+ /* Post buffers for packet reception */
+ dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
+
+ DHD_SSSR_DUMP_INIT(dhd);
+
+ dhd_process_cid_mac(dhd, TRUE);
+ ret = dhd_preinit_ioctls(dhd);
+ dhd_process_cid_mac(dhd, FALSE);
+
+#if defined(DHD_H2D_LOG_TIME_SYNC)
+#ifdef DHD_HP2P
+ if (FW_SUPPORTED(dhd, h2dlogts) || dhd->hp2p_capable) {
+ if (dhd->hp2p_enable) {
+ dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH / 40;
+ } else {
+ dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
+ }
+#else
+ if (FW_SUPPORTED(dhd, h2dlogts)) {
+ dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
+#endif // endif
+ dhd->bus->dhd_rte_time_sync_count = OSL_SYSUPTIME_US();
+ /* This is during initialization. */
+ dhd_h2d_log_time_sync(dhd);
+ } else {
+ dhd->dhd_rte_time_sync_ms = 0;
+ }
+#endif /* DHD_H2D_LOG_TIME_SYNC || DHD_HP2P */
+ /* Always assumes wl for now */
+ dhd->iswl = TRUE;
+done:
+ return ret;
+} /* dhd_sync_with_dongle */
#define DHD_DBG_SHOW_METADATA 0
@@ -2736,7 +4374,9 @@
{
if (pkt) {
if (pkttype == PKTTYPE_IOCTL_RX ||
- pkttype == PKTTYPE_EVENT_RX) {
+ pkttype == PKTTYPE_EVENT_RX ||
+ pkttype == PKTTYPE_INFO_RX ||
+ pkttype == PKTTYPE_TSBUF_RX) {
#ifdef DHD_USE_STATIC_CTRLBUF
PKTFREE_STATIC(dhd->osh, pkt, send);
#else
@@ -2748,6 +4388,11 @@
}
}
+/**
+ * dhd_prot_packet_get should be called only for items having pktid_ctrl_map handle
+ * and all the bottom most functions like dhd_pktid_map_free hold separate DHD_PKTID_LOCK
+ * to ensure thread safety, so no need to hold any locks for this function
+ */
static INLINE void * BCMFASTPATH
dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid)
{
@@ -2759,25 +4404,45 @@
#ifdef DHD_PCIE_PKTID
if (free_pktid) {
- PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle,
+ PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map,
pktid, pa, len, dmah, secdma, pkttype);
} else {
- PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_map_handle,
+ PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_ctrl_map,
pktid, pa, len, dmah, secdma, pkttype);
}
#else
- PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, pktid, pa,
+ PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, pktid, pa,
len, dmah, secdma, pkttype);
#endif /* DHD_PCIE_PKTID */
-
if (PKTBUF) {
{
- if (SECURE_DMA_ENAB(dhd->osh)) {
+ if (SECURE_DMA_ENAB(dhd->osh))
SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah,
secdma, 0);
- } else {
- DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, PKTBUF, dmah);
+ else
+ DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
+#ifdef DMAMAP_STATS
+ switch (pkttype) {
+#ifndef IOCTLRESP_USE_CONSTMEM
+ case PKTTYPE_IOCTL_RX:
+ dhd->dma_stats.ioctl_rx--;
+ dhd->dma_stats.ioctl_rx_sz -= len;
+ break;
+#endif /* IOCTLRESP_USE_CONSTMEM */
+ case PKTTYPE_EVENT_RX:
+ dhd->dma_stats.event_rx--;
+ dhd->dma_stats.event_rx_sz -= len;
+ break;
+ case PKTTYPE_INFO_RX:
+ dhd->dma_stats.info_rx--;
+ dhd->dma_stats.info_rx_sz -= len;
+ break;
+ case PKTTYPE_TSBUF_RX:
+ dhd->dma_stats.tsbuf_rx--;
+ dhd->dma_stats.tsbuf_rx_sz -= len;
+ break;
}
+#endif /* DMAMAP_STATS */
}
}
@@ -2794,7 +4459,7 @@
return;
}
-#endif /* IOCTLRESP_USE_CONSTMEM */
+#endif // endif
static void BCMFASTPATH
dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid)
@@ -2839,91 +4504,98 @@
static int BCMFASTPATH
dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid)
{
- void *p;
- uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
+ void *p, **pktbuf;
uint8 *rxbuf_post_tmp;
host_rxbuf_post_t *rxbuf_post;
void *msg_start;
- dmaaddr_t pa;
- uint32 pktlen;
- uint8 i = 0;
- uint16 alloced = 0;
+ dmaaddr_t pa, *pktbuf_pa;
+ uint32 *pktlen;
+ uint16 i = 0, alloced = 0;
unsigned long flags;
uint32 pktid;
dhd_prot_t *prot = dhd->prot;
msgbuf_ring_t *ring = &prot->h2dring_rxp_subn;
+ void *lcl_buf;
+ uint16 lcl_buf_size;
+ uint16 pktsz = prot->rxbufpost_sz;
- DHD_GENERAL_LOCK(dhd, flags);
-
- /* Claim space for exactly 'count' no of messages, for mitigation purpose */
- msg_start = (void *)
- dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE);
-
- DHD_GENERAL_UNLOCK(dhd, flags);
-
- if (msg_start == NULL) {
- DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
- return -1;
+ /* allocate a local buffer to store pkt buffer va, pa and length */
+ lcl_buf_size = (sizeof(void *) + sizeof(dmaaddr_t) + sizeof(uint32)) *
+ RX_BUF_BURST;
+ lcl_buf = MALLOC(dhd->osh, lcl_buf_size);
+ if (!lcl_buf) {
+ DHD_ERROR(("%s: local scratch buffer allocation failed\n", __FUNCTION__));
+ return 0;
}
- /* if msg_start != NULL, we should have alloced space for atleast 1 item */
- ASSERT(alloced > 0);
+ pktbuf = lcl_buf;
+ pktbuf_pa = (dmaaddr_t *)((uint8 *)pktbuf + sizeof(void *) * RX_BUF_BURST);
+ pktlen = (uint32 *)((uint8 *)pktbuf_pa + sizeof(dmaaddr_t) * RX_BUF_BURST);
- rxbuf_post_tmp = (uint8*)msg_start;
-
- /* loop through each allocated message in the rxbuf post msgbuf_ring */
- for (i = 0; i < alloced; i++) {
- rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp;
- /* Create a rx buffer */
+ for (i = 0; i < count; i++) {
if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__));
dhd->rx_pktgetfail++;
break;
}
- pktlen = PKTLEN(dhd->osh, p);
+ pktlen[i] = PKTLEN(dhd->osh, p);
if (SECURE_DMA_ENAB(dhd->osh)) {
- DHD_GENERAL_LOCK(dhd, flags);
- pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
+ pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i],
DMA_RX, p, 0, ring->dma_buf.secdma, 0);
- DHD_GENERAL_UNLOCK(dhd, flags);
}
#ifndef BCM_SECURE_DMA
- else {
- pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
- }
-#endif /* BCM_SECURE_DMA */
+ else
+ pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i], DMA_RX, p, 0);
+#endif /* #ifndef BCM_SECURE_DMA */
if (PHYSADDRISZERO(pa)) {
- if (SECURE_DMA_ENAB(dhd->osh)) {
- DHD_GENERAL_LOCK(dhd, flags);
- SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
- ring->dma_buf.secdma, 0);
- DHD_GENERAL_UNLOCK(dhd, flags);
- } else {
- DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, p, DHD_DMAH_NULL);
- }
-
PKTFREE(dhd->osh, p, FALSE);
DHD_ERROR(("Invalid phyaddr 0\n"));
ASSERT(0);
break;
}
+#ifdef DMAMAP_STATS
+ dhd->dma_stats.rxdata++;
+ dhd->dma_stats.rxdata_sz += pktlen[i];
+#endif /* DMAMAP_STATS */
PKTPULL(dhd->osh, p, prot->rx_metadata_offset);
- pktlen = PKTLEN(dhd->osh, p);
+ pktlen[i] = PKTLEN(dhd->osh, p);
+ pktbuf[i] = p;
+ pktbuf_pa[i] = pa;
+ }
- /* Common msg header */
- rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST;
- rxbuf_post->cmn_hdr.if_id = 0;
- rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
- ring->seqnum++;
+ /* only post what we have */
+ count = i;
+
+ /* grab the ring lock to allocate pktid and post on ring */
+ DHD_RING_LOCK(ring->ring_lock, flags);
+
+ /* Claim space for exactly 'count' no of messages, for mitigation purpose */
+ msg_start = (void *)
+ dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE);
+ if (msg_start == NULL) {
+ DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+ goto cleanup;
+ }
+ /* if msg_start != NULL, we should have alloced space for atleast 1 item */
+ ASSERT(alloced > 0);
+
+ rxbuf_post_tmp = (uint8*)msg_start;
+
+ for (i = 0; i < alloced; i++) {
+ rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp;
+ p = pktbuf[i];
+ pa = pktbuf_pa[i];
#if defined(DHD_LB_RXC)
if (use_rsv_pktid == TRUE) {
bcm_workq_t *workq = &prot->rx_compl_cons;
int elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
+
if (elem_ix == BCM_RING_EMPTY) {
- DHD_ERROR(("%s rx_compl_cons ring is empty\n", __FUNCTION__));
+ DHD_INFO(("%s rx_compl_cons ring is empty\n", __FUNCTION__));
pktid = DHD_PKTID_INVALID;
goto alloc_pkt_id;
} else {
@@ -2931,49 +4603,36 @@
pktid = *elem;
}
+ rxbuf_post->cmn_hdr.request_id = htol32(pktid);
+
/* Now populate the previous locker with valid information */
if (pktid != DHD_PKTID_INVALID) {
- rxbuf_post->cmn_hdr.request_id = htol32(pktid);
- DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_map_handle, p, pktid,
- pa, pktlen, DMA_RX, NULL, ring->dma_buf.secdma,
+ DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_rx_map,
+ p, pktid, pa, pktlen[i], DMA_RX, NULL, NULL,
PKTTYPE_DATA_RX);
}
} else
-#endif /* DHD_LB_RXC */
+#endif /* ! DHD_LB_RXC */
{
#if defined(DHD_LB_RXC)
alloc_pkt_id:
-#endif
+#endif /* DHD_LB_RXC */
+ pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_rx_map, p, pa,
+ pktlen[i], DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX);
#if defined(DHD_PCIE_PKTID)
- /* get the lock before calling DHD_NATIVE_TO_PKTID */
- DHD_GENERAL_LOCK(dhd, flags);
-#endif
- pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_map_handle, p, pa,
- pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX);
-
-#if defined(DHD_PCIE_PKTID)
- /* free lock */
- DHD_GENERAL_UNLOCK(dhd, flags);
-
if (pktid == DHD_PKTID_INVALID) {
-
- if (SECURE_DMA_ENAB(dhd->osh)) {
- DHD_GENERAL_LOCK(dhd, flags);
- SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
- ring->dma_buf.secdma, 0);
- DHD_GENERAL_UNLOCK(dhd, flags);
- } else {
- DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, p, DHD_DMAH_NULL);
- }
-
- PKTFREE(dhd->osh, p, FALSE);
- DHD_ERROR(("Pktid pool depleted.\n"));
break;
}
#endif /* DHD_PCIE_PKTID */
}
- rxbuf_post->data_buf_len = htol16((uint16)pktlen);
+ /* Common msg header */
+ rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST;
+ rxbuf_post->cmn_hdr.if_id = 0;
+ rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+ rxbuf_post->cmn_hdr.flags = ring->current_phase;
+ ring->seqnum++;
+ rxbuf_post->data_buf_len = htol16((uint16)pktlen[i]);
rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
rxbuf_post->data_buf_addr.low_addr =
htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset);
@@ -2988,33 +4647,235 @@
rxbuf_post->metadata_buf_addr.low_addr = 0;
}
-#if defined(DHD_PKTID_AUDIT_RING)
- DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pktid, DHD_DUPLICATE_ALLOC);
+#ifdef DHD_PKTID_AUDIT_RING
+ DHD_PKTID_AUDIT(dhd, prot->pktid_rx_map, pktid, DHD_DUPLICATE_ALLOC);
#endif /* DHD_PKTID_AUDIT_RING */
rxbuf_post->cmn_hdr.request_id = htol32(pktid);
/* Move rxbuf_post_tmp to next item */
rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len;
+
+#ifdef DHD_LBUF_AUDIT
+ PKTAUDIT(dhd->osh, p);
+#endif // endif
}
if (i < alloced) {
- if (ring->wr < (alloced - i)) {
+ if (ring->wr < (alloced - i))
ring->wr = ring->max_items - (alloced - i);
- } else {
+ else
ring->wr -= (alloced - i);
+
+ if (ring->wr == 0) {
+ DHD_INFO(("%s: flipping the phase now\n", ring->name));
+ ring->current_phase = ring->current_phase ?
+ 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
}
alloced = i;
}
- /* Update ring's WR index and ring doorbell to dongle */
+ /* update ring's WR index and ring doorbell to dongle */
if (alloced > 0) {
dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
}
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+cleanup:
+ for (i = alloced; i < count; i++) {
+ p = pktbuf[i];
+ pa = pktbuf_pa[i];
+
+ if (SECURE_DMA_ENAB(dhd->osh))
+ SECURE_DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0,
+ DHD_DMAH_NULL, ring->dma_buf.secdma, 0);
+ else
+ DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0, DHD_DMAH_NULL);
+ PKTFREE(dhd->osh, p, FALSE);
+ }
+
+ MFREE(dhd->osh, lcl_buf, lcl_buf_size);
+
return alloced;
-} /* dhd_prot_rxbuf_post */
+} /* dhd_prot_rxbufpost */
+
+static int
+dhd_prot_infobufpost(dhd_pub_t *dhd, msgbuf_ring_t *ring)
+{
+ unsigned long flags;
+ uint32 pktid;
+ dhd_prot_t *prot = dhd->prot;
+ uint16 alloced = 0;
+ uint16 pktsz = DHD_INFOBUF_RX_BUFPOST_PKTSZ;
+ uint32 pktlen;
+ info_buf_post_msg_t *infobuf_post;
+ uint8 *infobuf_post_tmp;
+ void *p;
+ void* msg_start;
+ uint8 i = 0;
+ dmaaddr_t pa;
+ int16 count = 0;
+
+ if (ring == NULL)
+ return 0;
+
+ if (ring->inited != TRUE)
+ return 0;
+ if (ring == dhd->prot->h2dring_info_subn) {
+ if (prot->max_infobufpost == 0)
+ return 0;
+
+ count = prot->max_infobufpost - prot->infobufpost;
+ }
+ else {
+ DHD_ERROR(("Unknown ring\n"));
+ return 0;
+ }
+
+ if (count <= 0) {
+ DHD_INFO(("%s: Cannot post more than max info resp buffers\n",
+ __FUNCTION__));
+ return 0;
+ }
+
+ /* grab the ring lock to allocate pktid and post on ring */
+ DHD_RING_LOCK(ring->ring_lock, flags);
+
+ /* Claim space for exactly 'count' no of messages, for mitigation purpose */
+ msg_start = (void *) dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, FALSE);
+
+ if (msg_start == NULL) {
+ DHD_INFO(("%s:%d: infobufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+ return -1;
+ }
+
+ /* if msg_start != NULL, we should have alloced space for atleast 1 item */
+ ASSERT(alloced > 0);
+
+ infobuf_post_tmp = (uint8*) msg_start;
+
+ /* loop through each allocated message in the host ring */
+ for (i = 0; i < alloced; i++) {
+ infobuf_post = (info_buf_post_msg_t *) infobuf_post_tmp;
+ /* Create a rx buffer */
+#ifdef DHD_USE_STATIC_CTRLBUF
+ p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
+#else
+ p = PKTGET(dhd->osh, pktsz, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ if (p == NULL) {
+ DHD_ERROR(("%s:%d: PKTGET for infobuf failed\n", __FUNCTION__, __LINE__));
+ dhd->rx_pktgetfail++;
+ break;
+ }
+ pktlen = PKTLEN(dhd->osh, p);
+ if (SECURE_DMA_ENAB(dhd->osh)) {
+ pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
+ DMA_RX, p, 0, ring->dma_buf.secdma, 0);
+ }
+#ifndef BCM_SECURE_DMA
+ else
+ pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
+#endif /* #ifndef BCM_SECURE_DMA */
+ if (PHYSADDRISZERO(pa)) {
+ if (SECURE_DMA_ENAB(dhd->osh)) {
+ SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
+ ring->dma_buf.secdma, 0);
+ }
+ else
+ DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhd->osh, p, FALSE);
+#else
+ PKTFREE(dhd->osh, p, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ DHD_ERROR(("Invalid phyaddr 0\n"));
+ ASSERT(0);
+ break;
+ }
+#ifdef DMAMAP_STATS
+ dhd->dma_stats.info_rx++;
+ dhd->dma_stats.info_rx_sz += pktlen;
+#endif /* DMAMAP_STATS */
+ pktlen = PKTLEN(dhd->osh, p);
+
+ /* Common msg header */
+ infobuf_post->cmn_hdr.msg_type = MSG_TYPE_INFO_BUF_POST;
+ infobuf_post->cmn_hdr.if_id = 0;
+ infobuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+ infobuf_post->cmn_hdr.flags = ring->current_phase;
+ ring->seqnum++;
+
+ pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_ctrl_map, p, pa,
+ pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_INFO_RX);
+
+#if defined(DHD_PCIE_PKTID)
+ if (pktid == DHD_PKTID_INVALID) {
+ if (SECURE_DMA_ENAB(dhd->osh)) {
+ SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0,
+ ring->dma_buf.secdma, 0);
+ } else
+ DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0);
+
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhd->osh, p, FALSE);
+#else
+ PKTFREE(dhd->osh, p, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
+ break;
+ }
+#endif /* DHD_PCIE_PKTID */
+
+ infobuf_post->host_buf_len = htol16((uint16)pktlen);
+ infobuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
+ infobuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
+
+#ifdef DHD_PKTID_AUDIT_RING
+ DHD_PKTID_AUDIT(dhd, prot->pktid_ctrl_map, pktid, DHD_DUPLICATE_ALLOC);
+#endif /* DHD_PKTID_AUDIT_RING */
+
+ DHD_INFO(("ID %d, low_addr 0x%08x, high_addr 0x%08x\n",
+ infobuf_post->cmn_hdr.request_id, infobuf_post->host_buf_addr.low_addr,
+ infobuf_post->host_buf_addr.high_addr));
+
+ infobuf_post->cmn_hdr.request_id = htol32(pktid);
+ /* Move rxbuf_post_tmp to next item */
+ infobuf_post_tmp = infobuf_post_tmp + ring->item_len;
+#ifdef DHD_LBUF_AUDIT
+ PKTAUDIT(dhd->osh, p);
+#endif // endif
+ }
+
+ if (i < alloced) {
+ if (ring->wr < (alloced - i))
+ ring->wr = ring->max_items - (alloced - i);
+ else
+ ring->wr -= (alloced - i);
+
+ alloced = i;
+ if (alloced && ring->wr == 0) {
+ DHD_INFO(("%s: flipping the phase now\n", ring->name));
+ ring->current_phase = ring->current_phase ?
+ 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+ }
+ }
+
+ /* Update the write pointer in TCM & ring bell */
+ if (alloced > 0) {
+ if (ring == dhd->prot->h2dring_info_subn) {
+ prot->infobufpost += alloced;
+ }
+ dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
+ }
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+ return alloced;
+} /* dhd_prot_infobufpost */
#ifdef IOCTLRESP_USE_CONSTMEM
static int
@@ -3041,10 +4902,6 @@
dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
retbuf->len = IOCT_RETBUF_SIZE;
retbuf->_alloced = retbuf->len + dma_pad;
- /* JIRA:SWWLAN-70021 The pa value would be overwritten by the dongle.
- * Need to reassign before free to pass the check in dhd_dma_buf_audit().
- */
- retbuf->pa = DMA_MAP(dhd->osh, retbuf->va, retbuf->len, DMA_RX, NULL, NULL);
}
dhd_dma_buf_free(dhd, retbuf);
@@ -3053,7 +4910,7 @@
#endif /* IOCTLRESP_USE_CONSTMEM */
static int
-dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, bool event_buf)
+dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, uint8 msg_type)
{
void *p;
uint16 pktsz;
@@ -3068,16 +4925,33 @@
uint32 pktid;
void *map_handle;
msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
+ bool non_ioctl_resp_buf = 0;
+ dhd_pkttype_t buf_type;
if (dhd->busstate == DHD_BUS_DOWN) {
DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
return -1;
}
-
memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
- if (event_buf) {
- /* Allocate packet for event buffer post */
+ if (msg_type == MSG_TYPE_IOCTLRESP_BUF_POST)
+ buf_type = PKTTYPE_IOCTL_RX;
+ else if (msg_type == MSG_TYPE_EVENT_BUF_POST)
+ buf_type = PKTTYPE_EVENT_RX;
+ else if (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST)
+ buf_type = PKTTYPE_TSBUF_RX;
+ else {
+ DHD_ERROR(("invalid message type to be posted to Ctrl ring %d\n", msg_type));
+ return -1;
+ }
+
+ if ((msg_type == MSG_TYPE_EVENT_BUF_POST) || (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST))
+ non_ioctl_resp_buf = TRUE;
+ else
+ non_ioctl_resp_buf = FALSE;
+
+ if (non_ioctl_resp_buf) {
+ /* Allocate packet for not ioctl resp buffer post */
pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
} else {
/* Allocate packet for ctrl/ioctl buffer post */
@@ -3085,7 +4959,7 @@
}
#ifdef IOCTLRESP_USE_CONSTMEM
- if (!event_buf) {
+ if (!non_ioctl_resp_buf) {
if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) {
DHD_ERROR(("Could not allocate IOCTL response buffer\n"));
return -1;
@@ -3105,7 +4979,7 @@
#endif /* DHD_USE_STATIC_CTRLBUF */
if (p == NULL) {
DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n",
- __FUNCTION__, __LINE__, event_buf ?
+ __FUNCTION__, __LINE__, non_ioctl_resp_buf ?
"EVENT" : "IOCTL RESP"));
dhd->rx_pktgetfail++;
return -1;
@@ -3114,10 +4988,8 @@
pktlen = PKTLEN(dhd->osh, p);
if (SECURE_DMA_ENAB(dhd->osh)) {
- DHD_GENERAL_LOCK(dhd, flags);
pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
DMA_RX, p, 0, ring->dma_buf.secdma, 0);
- DHD_GENERAL_UNLOCK(dhd, flags);
}
#ifndef BCM_SECURE_DMA
else
@@ -3129,53 +5001,70 @@
ASSERT(0);
goto free_pkt_return;
}
+
+#ifdef DMAMAP_STATS
+ switch (buf_type) {
+#ifndef IOCTLRESP_USE_CONSTMEM
+ case PKTTYPE_IOCTL_RX:
+ dhd->dma_stats.ioctl_rx++;
+ dhd->dma_stats.ioctl_rx_sz += pktlen;
+ break;
+#endif /* !IOCTLRESP_USE_CONSTMEM */
+ case PKTTYPE_EVENT_RX:
+ dhd->dma_stats.event_rx++;
+ dhd->dma_stats.event_rx_sz += pktlen;
+ break;
+ case PKTTYPE_TSBUF_RX:
+ dhd->dma_stats.tsbuf_rx++;
+ dhd->dma_stats.tsbuf_rx_sz += pktlen;
+ break;
+ default:
+ break;
+ }
+#endif /* DMAMAP_STATS */
+
}
- DHD_GENERAL_LOCK(dhd, flags);
+ /* grab the ring lock to allocate pktid and post on ring */
+ DHD_RING_LOCK(ring->ring_lock, flags);
rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)
dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (rxbuf_post == NULL) {
- DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n",
__FUNCTION__, __LINE__));
#ifdef IOCTLRESP_USE_CONSTMEM
- if (event_buf)
+ if (non_ioctl_resp_buf)
#endif /* IOCTLRESP_USE_CONSTMEM */
{
if (SECURE_DMA_ENAB(dhd->osh)) {
- DHD_GENERAL_LOCK(dhd, flags);
SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
ring->dma_buf.secdma, 0);
- DHD_GENERAL_UNLOCK(dhd, flags);
} else {
- DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, p, DHD_DMAH_NULL);
+ DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
}
}
goto free_pkt_return;
}
/* CMN msg header */
- if (event_buf) {
- rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_EVENT_BUF_POST;
- } else {
- rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_IOCTLRESP_BUF_POST;
- }
+ rxbuf_post->cmn_hdr.msg_type = msg_type;
#ifdef IOCTLRESP_USE_CONSTMEM
- if (!event_buf) {
+ if (!non_ioctl_resp_buf) {
map_handle = dhd->prot->pktid_map_handle_ioctl;
- pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen,
- DMA_RX, dmah, ring->dma_buf.secdma, PKTTYPE_IOCTL_RX);
+ pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen, DMA_RX, dmah,
+ ring->dma_buf.secdma, buf_type);
} else
#endif /* IOCTLRESP_USE_CONSTMEM */
{
- map_handle = dhd->prot->pktid_map_handle;
- pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle,
+ map_handle = dhd->prot->pktid_ctrl_map;
+ pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle,
p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma,
- event_buf ? PKTTYPE_EVENT_RX : PKTTYPE_IOCTL_RX);
+ buf_type);
}
if (pktid == DHD_PKTID_INVALID) {
@@ -3183,47 +5072,52 @@
ring->wr = ring->max_items - 1;
} else {
ring->wr--;
+ if (ring->wr == 0) {
+ ring->current_phase = ring->current_phase ? 0 :
+ BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+ }
}
- DHD_GENERAL_UNLOCK(dhd, flags);
- DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, p, DHD_DMAH_NULL);
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
+ DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
goto free_pkt_return;
}
-#if defined(DHD_PKTID_AUDIT_RING)
+#ifdef DHD_PKTID_AUDIT_RING
DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC);
#endif /* DHD_PKTID_AUDIT_RING */
rxbuf_post->cmn_hdr.request_id = htol32(pktid);
rxbuf_post->cmn_hdr.if_id = 0;
- rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+ rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
ring->seqnum++;
+ rxbuf_post->cmn_hdr.flags = ring->current_phase;
#if defined(DHD_PCIE_PKTID)
if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
if (ring->wr == 0) {
ring->wr = ring->max_items - 1;
} else {
- ring->wr--;
+ if (ring->wr == 0) {
+ ring->current_phase = ring->current_phase ? 0 :
+ BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+ }
}
- DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
#ifdef IOCTLRESP_USE_CONSTMEM
- if (event_buf)
+ if (non_ioctl_resp_buf)
#endif /* IOCTLRESP_USE_CONSTMEM */
{
if (SECURE_DMA_ENAB(dhd->osh)) {
- DHD_GENERAL_LOCK(dhd, flags);
SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
ring->dma_buf.secdma, 0);
- DHD_GENERAL_UNLOCK(dhd, flags);
- } else {
- DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, p, DHD_DMAH_NULL);
- }
+ } else
+ DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
}
goto free_pkt_return;
}
#endif /* DHD_PCIE_PKTID */
- rxbuf_post->cmn_hdr.flags = 0;
#ifndef IOCTLRESP_USE_CONSTMEM
rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p));
#else
@@ -3232,34 +5126,39 @@
rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
rxbuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
+#ifdef DHD_LBUF_AUDIT
+ if (non_ioctl_resp_buf)
+ PKTAUDIT(dhd->osh, p);
+#endif // endif
+
/* update ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1);
- DHD_GENERAL_UNLOCK(dhd, flags);
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
return 1;
free_pkt_return:
+ if (!non_ioctl_resp_buf) {
#ifdef IOCTLRESP_USE_CONSTMEM
- if (!event_buf) {
free_ioctl_return_buffer(dhd, &retbuf);
- } else
+#else
+ dhd_prot_packet_free(dhd, p, buf_type, FALSE);
#endif /* IOCTLRESP_USE_CONSTMEM */
- {
- dhd_prot_packet_free(dhd, p,
- event_buf ? PKTTYPE_EVENT_RX : PKTTYPE_IOCTL_RX,
- FALSE);
+ } else {
+ dhd_prot_packet_free(dhd, p, buf_type, FALSE);
}
return -1;
} /* dhd_prot_rxbufpost_ctrl */
static uint16
-dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post)
+dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msg_type, uint32 max_to_post)
{
uint32 i = 0;
int32 ret_val;
- DHD_INFO(("max to post %d, event %d \n", max_to_post, event_buf));
+ DHD_INFO(("max to post %d, event %d \n", max_to_post, msg_type));
if (dhd->busstate == DHD_BUS_DOWN) {
DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
@@ -3267,13 +5166,12 @@
}
while (i < max_to_post) {
- ret_val = dhd_prot_rxbufpost_ctrl(dhd, event_buf);
- if (ret_val < 0) {
+ ret_val = dhd_prot_rxbufpost_ctrl(dhd, msg_type);
+ if (ret_val < 0)
break;
- }
i++;
}
- DHD_INFO(("posted %d buffers to event_pool/ioctl_resp_pool %d\n", i, event_buf));
+ DHD_INFO(("posted %d buffers of type %d\n", i, msg_type));
return (uint16)i;
}
@@ -3291,7 +5189,7 @@
return;
}
prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
- FALSE, max_to_post);
+ MSG_TYPE_IOCTLRESP_BUF_POST, max_to_post);
}
static void
@@ -3302,34 +5200,58 @@
max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted;
if (max_to_post <= 0) {
- DHD_INFO(("%s: Cannot post more than max event buffers\n",
+ DHD_ERROR(("%s: Cannot post more than max event buffers\n",
__FUNCTION__));
return;
}
prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
- TRUE, max_to_post);
+ MSG_TYPE_EVENT_BUF_POST, max_to_post);
}
-/** called when DHD needs to check for 'receive complete' messages from the dongle */
-bool BCMFASTPATH
-dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound)
+static int
+dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *dhd)
{
+ return 0;
+}
+
+bool BCMFASTPATH
+dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound)
+{
+ dhd_prot_t *prot = dhd->prot;
bool more = TRUE;
uint n = 0;
- msgbuf_ring_t *ring = &dhd->prot->d2hring_rx_cpln;
+ msgbuf_ring_t *ring = prot->d2hring_info_cpln;
+ unsigned long flags;
+
+ if (ring == NULL)
+ return FALSE;
+ if (ring->inited != TRUE)
+ return FALSE;
/* Process all the messages - DTOH direction */
while (!dhd_is_device_removed(dhd)) {
uint8 *msg_addr;
uint32 msg_len;
+ if (dhd_query_bus_erros(dhd)) {
+ more = FALSE;
+ break;
+ }
+
if (dhd->hang_was_sent) {
more = FALSE;
break;
}
- /* Get the address of the next message to be read from ring */
+ if (dhd->smmu_fault_occurred) {
+ more = FALSE;
+ break;
+ }
+
+ DHD_RING_LOCK(ring->ring_lock, flags);
+ /* Get the message from ring */
msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
if (msg_addr == NULL) {
more = FALSE;
break;
@@ -3339,8 +5261,8 @@
OSL_PREFETCH(msg_addr);
if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
- DHD_ERROR(("%s: process %s msg addr %p len %d\n",
- __FUNCTION__, ring->name, msg_addr, msg_len));
+ DHD_ERROR(("%s: Error at process rxpl msgbuf of len %d\n",
+ __FUNCTION__, msg_len));
}
/* Update read pointer */
@@ -3356,6 +5278,536 @@
return more;
}
+#ifdef EWP_EDL
+bool
+dhd_prot_process_msgbuf_edl(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ msgbuf_ring_t *ring = prot->d2hring_edl;
+ unsigned long flags = 0;
+ uint32 items = 0;
+ uint16 rd = 0;
+ uint16 depth = 0;
+
+ if (ring == NULL)
+ return FALSE;
+ if (ring->inited != TRUE)
+ return FALSE;
+ if (ring->item_len == 0) {
+ DHD_ERROR(("%s: Bad ring ! ringidx %d, item_len %d \n",
+ __FUNCTION__, ring->idx, ring->item_len));
+ return FALSE;
+ }
+
+ if (dhd_query_bus_erros(dhd)) {
+ return FALSE;
+ }
+
+ if (dhd->hang_was_sent) {
+ return FALSE;
+ }
+
+ /* in this DPC context just check if wr index has moved
+ * and schedule deferred context to actually process the
+ * work items.
+ */
+ /* update the write index */
+ DHD_RING_LOCK(ring->ring_lock, flags);
+ if (dhd->dma_d2h_ring_upd_support) {
+ /* DMAing write/read indices supported */
+ ring->wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &ring->wr, RING_WR_UPD, ring->idx);
+ }
+ rd = ring->rd;
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+ depth = ring->max_items;
+ /* check for avail space, in number of ring items */
+ items = READ_AVAIL_SPACE(ring->wr, rd, depth);
+ if (items == 0) {
+ /* no work items in edl ring */
+ return FALSE;
+ }
+ if (items > ring->max_items) {
+ DHD_ERROR(("\r\n======================= \r\n"));
+ DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
+ __FUNCTION__, ring, ring->name, ring->max_items, items));
+ DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n",
+ ring->wr, ring->rd, depth));
+ DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
+ dhd->busstate, dhd->bus->wait_for_d3_ack));
+ DHD_ERROR(("\r\n======================= \r\n"));
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ if (ring->wr >= ring->max_items) {
+ dhd->bus->read_shm_fail = TRUE;
+ }
+#else
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* collect core dump */
+ dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
+ dhd_bus_mem_dump(dhd);
+
+ }
+#endif /* DHD_FW_COREDUMP */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ dhd_schedule_reset(dhd);
+
+ return FALSE;
+ }
+
+ if (items > D2HRING_EDL_WATERMARK) {
+ DHD_ERROR_RLMT(("%s: WARNING! EDL watermark hit, num items=%u;"
+ " rd=%u; wr=%u; depth=%u;\n", __FUNCTION__, items,
+ ring->rd, ring->wr, depth));
+ }
+
+ dhd_schedule_logtrace(dhd->info);
+
+ return FALSE;
+}
+
+/* This is called either from work queue context of 'event_log_dispatcher_work' or
+* from the kthread context of dhd_logtrace_thread
+*/
+int
+dhd_prot_process_edl_complete(dhd_pub_t *dhd, void *evt_decode_data)
+{
+ dhd_prot_t *prot = NULL;
+ msgbuf_ring_t *ring = NULL;
+ int err = 0;
+ unsigned long flags = 0;
+ cmn_msg_hdr_t *msg = NULL;
+ uint8 *msg_addr = NULL;
+ uint32 max_items_to_process = 0, n = 0;
+ uint32 num_items = 0, new_items = 0;
+ uint16 depth = 0;
+ volatile uint16 wr = 0;
+
+ if (!dhd || !dhd->prot)
+ return 0;
+
+ prot = dhd->prot;
+ ring = prot->d2hring_edl;
+ if (!ring || !evt_decode_data) {
+ return 0;
+ }
+
+ if (dhd->hang_was_sent) {
+ return FALSE;
+ }
+
+ DHD_RING_LOCK(ring->ring_lock, flags);
+ ring->curr_rd = ring->rd;
+ wr = ring->wr;
+ depth = ring->max_items;
+ /* check for avail space, in number of ring items
+ * Note, that this will only give the # of items
+ * from rd to wr if wr>=rd, or from rd to ring end
+ * if wr < rd. So in the latter case strictly speaking
+ * not all the items are read. But this is OK, because
+ * these will be processed in the next doorbell as rd
+ * would have wrapped around. Processing in the next
+ * doorbell is acceptable since EDL only contains debug data
+ */
+ num_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
+
+ if (num_items == 0) {
+ /* no work items in edl ring */
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+ return 0;
+ }
+
+ DHD_INFO(("%s: EDL work items [%u] available \n",
+ __FUNCTION__, num_items));
+
+ /* if space is available, calculate address to be read */
+ msg_addr = (char*)ring->dma_buf.va + (ring->rd * ring->item_len);
+
+ max_items_to_process = MIN(num_items, DHD_EVENT_LOGTRACE_BOUND);
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+ /* Prefetch data to populate the cache */
+ OSL_PREFETCH(msg_addr);
+
+ n = max_items_to_process;
+ while (n > 0) {
+ msg = (cmn_msg_hdr_t *)msg_addr;
+ /* wait for DMA of work item to complete */
+ if ((err = prot->d2h_edl_sync_cb(dhd, ring, msg)) != BCME_OK) {
+ DHD_ERROR(("%s: Error waiting for DMA to cmpl in EDL "
+ "ring; err = %d\n", __FUNCTION__, err));
+ }
+
+ /*
+ * Update the curr_rd to the current index in the ring, from where
+ * the work item is fetched. This way if the fetched work item
+ * fails in LIVELOCK, we can print the exact read index in the ring
+ * that shows up the corrupted work item.
+ */
+ if ((ring->curr_rd + 1) >= ring->max_items) {
+ ring->curr_rd = 0;
+ } else {
+ ring->curr_rd += 1;
+ }
+
+ if (err != BCME_OK) {
+ return 0;
+ }
+
+ /* process the edl work item, i.e, the event log */
+ err = dhd_event_logtrace_process_edl(dhd, msg_addr, evt_decode_data);
+
+ /* Dummy sleep so that scheduler kicks in after processing any logprints */
+ OSL_SLEEP(0);
+
+ /* Prefetch data to populate the cache */
+ OSL_PREFETCH(msg_addr + ring->item_len);
+
+ msg_addr += ring->item_len;
+ --n;
+ }
+
+ DHD_RING_LOCK(ring->ring_lock, flags);
+ /* update host ring read pointer */
+ if ((ring->rd + max_items_to_process) >= ring->max_items)
+ ring->rd = 0;
+ else
+ ring->rd += max_items_to_process;
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+ /* Now after processing max_items_to_process update dongle rd index.
+ * The TCM rd index is updated only if bus is not
+ * in D3. Else, the rd index is updated from resume
+ * context in - 'dhdpcie_bus_suspend'
+ */
+ DHD_GENERAL_LOCK(dhd, flags);
+ if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
+ DHD_INFO(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
+ __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ } else {
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_EDL_RING_TCM_RD_UPDATE(dhd);
+ }
+
+ /* if num_items > bound, then anyway we will reschedule and
+ * this function runs again, so that if in between the DPC has
+ * updated the wr index, then the updated wr is read. But if
+ * num_items <= bound, and if DPC executes and updates the wr index
+ * when the above while loop is running, then the updated 'wr' index
+ * needs to be re-read from here, If we don't do so, then till
+ * the next time this function is scheduled
+ * the event logs will not be processed.
+ */
+ if (num_items <= DHD_EVENT_LOGTRACE_BOUND) {
+ /* read the updated wr index if reqd. and update num_items */
+ DHD_RING_LOCK(ring->ring_lock, flags);
+ if (wr != (volatile uint16)ring->wr) {
+ wr = (volatile uint16)ring->wr;
+ new_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
+ DHD_INFO(("%s: new items [%u] avail in edl\n",
+ __FUNCTION__, new_items));
+ num_items += new_items;
+ }
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+ }
+
+ /* if # of items processed is less than num_items, need to re-schedule
+ * the deferred ctx
+ */
+ if (max_items_to_process < num_items) {
+ DHD_INFO(("%s: EDL bound hit / new items found, "
+ "items processed=%u; remaining=%u, "
+ "resched deferred ctx...\n",
+ __FUNCTION__, max_items_to_process,
+ num_items - max_items_to_process));
+ return (num_items - max_items_to_process);
+ }
+
+ return 0;
+
+}
+
+void
+dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = NULL;
+ unsigned long flags = 0;
+ msgbuf_ring_t *ring = NULL;
+
+ if (!dhd)
+ return;
+
+ prot = dhd->prot;
+ if (!prot || !prot->d2hring_edl)
+ return;
+
+ ring = prot->d2hring_edl;
+ DHD_RING_LOCK(ring->ring_lock, flags);
+ dhd_prot_upd_read_idx(dhd, ring);
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+}
+#endif /* EWP_EDL */
+
+/* called when DHD needs to check for 'receive complete' messages from the dongle */
+bool BCMFASTPATH
+dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound, int ringtype)
+{
+ bool more = FALSE;
+ uint n = 0;
+ dhd_prot_t *prot = dhd->prot;
+ msgbuf_ring_t *ring;
+ uint16 item_len;
+ host_rxbuf_cmpl_t *msg = NULL;
+ uint8 *msg_addr;
+ uint32 msg_len;
+ uint16 pkt_cnt, pkt_cnt_newidx;
+ unsigned long flags;
+ dmaaddr_t pa;
+ uint32 len;
+ void *dmah;
+ void *secdma;
+ int ifidx = 0, if_newidx = 0;
+ void *pkt, *pktqhead = NULL, *prevpkt = NULL, *pkt_newidx, *nextpkt;
+ uint32 pktid;
+ int i;
+ uint8 sync;
+ ts_timestamp_t *ts;
+
+ BCM_REFERENCE(ts);
+#ifdef DHD_HP2P
+ if (ringtype == DHD_HP2P_RING && prot->d2hring_hp2p_rxcpl)
+ ring = prot->d2hring_hp2p_rxcpl;
+ else
+#endif /* DHD_HP2P */
+ ring = &prot->d2hring_rx_cpln;
+ item_len = ring->item_len;
+ while (1) {
+ if (dhd_is_device_removed(dhd))
+ break;
+
+ if (dhd_query_bus_erros(dhd))
+ break;
+
+ if (dhd->hang_was_sent)
+ break;
+
+ if (dhd->smmu_fault_occurred) {
+ break;
+ }
+
+ pkt_cnt = 0;
+ pktqhead = pkt_newidx = NULL;
+ pkt_cnt_newidx = 0;
+
+ DHD_RING_LOCK(ring->ring_lock, flags);
+
+ /* Get the address of the next message to be read from ring */
+ msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
+ if (msg_addr == NULL) {
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+ break;
+ }
+
+ while (msg_len > 0) {
+ msg = (host_rxbuf_cmpl_t *)msg_addr;
+
+ /* Wait until DMA completes, then fetch msg_type */
+ sync = prot->d2h_sync_cb(dhd, ring, &msg->cmn_hdr, item_len);
+ /*
+ * Update the curr_rd to the current index in the ring, from where
+ * the work item is fetched. This way if the fetched work item
+ * fails in LIVELOCK, we can print the exact read index in the ring
+ * that shows up the corrupted work item.
+ */
+ if ((ring->curr_rd + 1) >= ring->max_items) {
+ ring->curr_rd = 0;
+ } else {
+ ring->curr_rd += 1;
+ }
+
+ if (!sync) {
+ msg_len -= item_len;
+ msg_addr += item_len;
+ continue;
+ }
+
+ pktid = ltoh32(msg->cmn_hdr.request_id);
+
+#ifdef DHD_PKTID_AUDIT_RING
+ DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_rx_map, pktid,
+ DHD_DUPLICATE_FREE, msg, D2HRING_RXCMPLT_ITEMSIZE);
+#endif /* DHD_PKTID_AUDIT_RING */
+
+ pkt = DHD_PKTID_TO_NATIVE(dhd, prot->pktid_rx_map, pktid, pa,
+ len, dmah, secdma, PKTTYPE_DATA_RX);
+ if (!pkt) {
+ msg_len -= item_len;
+ msg_addr += item_len;
+ continue;
+ }
+
+ if (SECURE_DMA_ENAB(dhd->osh))
+ SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0,
+ dmah, secdma, 0);
+ else
+ DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
+
+#ifdef DMAMAP_STATS
+ dhd->dma_stats.rxdata--;
+ dhd->dma_stats.rxdata_sz -= len;
+#endif /* DMAMAP_STATS */
+ DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, "
+ "pktdata %p, metalen %d\n",
+ ltoh32(msg->cmn_hdr.request_id),
+ ltoh16(msg->data_offset),
+ ltoh16(msg->data_len), msg->cmn_hdr.if_id,
+ msg->cmn_hdr.flags, PKTDATA(dhd->osh, pkt),
+ ltoh16(msg->metadata_len)));
+
+ pkt_cnt++;
+ msg_len -= item_len;
+ msg_addr += item_len;
+
+#if DHD_DBG_SHOW_METADATA
+ if (prot->metadata_dbg && prot->rx_metadata_offset &&
+ msg->metadata_len) {
+ uchar *ptr;
+ ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset);
+ /* header followed by data */
+ bcm_print_bytes("rxmetadata", ptr, msg->metadata_len);
+ dhd_prot_print_metadata(dhd, ptr, msg->metadata_len);
+ }
+#endif /* DHD_DBG_SHOW_METADATA */
+
+ /* data_offset from buf start */
+ if (ltoh16(msg->data_offset)) {
+ /* data offset given from dongle after split rx */
+ PKTPULL(dhd->osh, pkt, ltoh16(msg->data_offset));
+ }
+ else if (prot->rx_dataoffset) {
+ /* DMA RX offset updated through shared area */
+ PKTPULL(dhd->osh, pkt, prot->rx_dataoffset);
+ }
+ /* Actual length of the packet */
+ PKTSETLEN(dhd->osh, pkt, ltoh16(msg->data_len));
+
+#if defined(WL_MONITOR)
+ if (dhd_monitor_enabled(dhd, ifidx)) {
+ if (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) {
+ dhd_rx_mon_pkt(dhd, msg, pkt, ifidx);
+ continue;
+ } else {
+ DHD_ERROR(("Received non 802.11 packet, "
+ "when monitor mode is enabled\n"));
+ }
+ }
+#endif /* WL_MONITOR */
+
+ if (msg->flags & BCMPCIE_PKT_FLAGS_NO_FORWARD) {
+ DHD_PKT_FLAGS_SET_NO_FWD(pkt);
+ }
+
+ if (!pktqhead) {
+ pktqhead = prevpkt = pkt;
+ ifidx = msg->cmn_hdr.if_id;
+ } else {
+ if (ifidx != msg->cmn_hdr.if_id) {
+ pkt_newidx = pkt;
+ if_newidx = msg->cmn_hdr.if_id;
+ pkt_cnt--;
+ pkt_cnt_newidx = 1;
+ break;
+ } else {
+ PKTSETNEXT(dhd->osh, prevpkt, pkt);
+ prevpkt = pkt;
+ }
+ }
+
+#ifdef DHD_HP2P
+ if (dhd->hp2p_capable && ring == prot->d2hring_hp2p_rxcpl) {
+#ifdef DHD_HP2P_DEBUG
+ bcm_print_bytes("Rxcpl", (uchar *)msg, sizeof(host_rxbuf_cmpl_t));
+#endif /* DHD_HP2P_DEBUG */
+ dhd_update_hp2p_rxstats(dhd, msg);
+ }
+#endif /* DHD_HP2P */
+
+#ifdef DHD_LBUF_AUDIT
+ PKTAUDIT(dhd->osh, pkt);
+#endif // endif
+ }
+
+ /* roll back read pointer for unprocessed message */
+ if (msg_len > 0) {
+ if (ring->rd < msg_len / item_len)
+ ring->rd = ring->max_items - msg_len / item_len;
+ else
+ ring->rd -= msg_len / item_len;
+ }
+
+ /* Update read pointer */
+ dhd_prot_upd_read_idx(dhd, ring);
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+ pkt = pktqhead;
+ for (i = 0; pkt && i < pkt_cnt; i++, pkt = nextpkt) {
+ nextpkt = PKTNEXT(dhd->osh, pkt);
+ PKTSETNEXT(dhd->osh, pkt, NULL);
+#ifdef DHD_LB_RXP
+ dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx);
+#elif defined(DHD_RX_CHAINING)
+ dhd_rxchain_frame(dhd, pkt, ifidx);
+#else
+ dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
+#endif /* DHD_LB_RXP */
+ }
+
+ if (pkt_newidx) {
+#ifdef DHD_LB_RXP
+ dhd_lb_rx_pkt_enqueue(dhd, pkt_newidx, if_newidx);
+#elif defined(DHD_RX_CHAINING)
+ dhd_rxchain_frame(dhd, pkt_newidx, if_newidx);
+#else
+ dhd_bus_rx_frame(dhd->bus, pkt_newidx, if_newidx, 1);
+#endif /* DHD_LB_RXP */
+ }
+
+ pkt_cnt += pkt_cnt_newidx;
+
+ /* Post another set of rxbufs to the device */
+ dhd_prot_return_rxbuf(dhd, 0, pkt_cnt);
+
+#ifdef DHD_RX_CHAINING
+ dhd_rxchain_commit(dhd);
+#endif // endif
+
+ /* After batch processing, check RX bound */
+ n += pkt_cnt;
+ if (n >= bound) {
+ more = TRUE;
+ break;
+ }
+ }
+
+ /* Call lb_dispatch only if packets are queued */
+ if (n &&
+#ifdef WL_MONITOR
+ !(dhd_monitor_enabled(dhd, ifidx)) &&
+#endif /* WL_MONITOR */
+ TRUE) {
+ DHD_LB_DISPATCH_RX_COMPL(dhd);
+ DHD_LB_DISPATCH_RX_PROCESS(dhd);
+ }
+
+ return more;
+
+}
+
/**
* Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring)
*/
@@ -3364,8 +5816,12 @@
{
msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring;
+ if (ring == NULL) {
+ DHD_ERROR(("%s: NULL txflowring. exiting...\n", __FUNCTION__));
+ return;
+ }
/* Update read pointer */
- if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
+ if (dhd->dma_d2h_ring_upd_support) {
ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
}
@@ -3378,24 +5834,45 @@
/** called when DHD needs to check for 'transmit complete' messages from the dongle */
bool BCMFASTPATH
-dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound)
+dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound, int ringtype)
{
bool more = TRUE;
uint n = 0;
- msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
+ msgbuf_ring_t *ring;
+ unsigned long flags;
+
+#ifdef DHD_HP2P
+ if (ringtype == DHD_HP2P_RING && dhd->prot->d2hring_hp2p_txcpl)
+ ring = dhd->prot->d2hring_hp2p_txcpl;
+ else
+#endif /* DHD_HP2P */
+ ring = &dhd->prot->d2hring_tx_cpln;
/* Process all the messages - DTOH direction */
while (!dhd_is_device_removed(dhd)) {
uint8 *msg_addr;
uint32 msg_len;
+ if (dhd_query_bus_erros(dhd)) {
+ more = FALSE;
+ break;
+ }
+
if (dhd->hang_was_sent) {
more = FALSE;
break;
}
+ if (dhd->smmu_fault_occurred) {
+ more = FALSE;
+ break;
+ }
+
+ DHD_RING_LOCK(ring->ring_lock, flags);
/* Get the address of the next message to be read from ring */
msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
if (msg_addr == NULL) {
more = FALSE;
break;
@@ -3419,7 +5896,44 @@
}
}
+ DHD_LB_DISPATCH_TX_COMPL(dhd);
+
return more;
+}
+
+int BCMFASTPATH
+dhd_prot_process_trapbuf(dhd_pub_t *dhd)
+{
+ uint32 data;
+ dhd_dma_buf_t *trap_addr = &dhd->prot->fw_trap_buf;
+
+ /* Interrupts can come in before this struct
+ * has been initialized.
+ */
+ if (trap_addr->va == NULL) {
+ DHD_ERROR(("%s: trap_addr->va is NULL\n", __FUNCTION__));
+ return 0;
+ }
+
+ OSL_CACHE_INV((void *)trap_addr->va, sizeof(uint32));
+ data = *(uint32 *)(trap_addr->va);
+
+ if (data & D2H_DEV_FWHALT) {
+ DHD_ERROR(("Firmware trapped and trap_data is 0x%04x\n", data));
+
+ if (data & D2H_DEV_EXT_TRAP_DATA)
+ {
+ if (dhd->extended_trap_data) {
+ OSL_CACHE_INV((void *)trap_addr->va,
+ BCMPCIE_EXT_TRAP_DATA_MAXLEN);
+ memcpy(dhd->extended_trap_data, (uint32 *)trap_addr->va,
+ BCMPCIE_EXT_TRAP_DATA_MAXLEN);
+ }
+ DHD_ERROR(("Extended trap data available\n"));
+ }
+ return data;
+ }
+ return 0;
}
/** called when DHD needs to check for 'ioctl complete' messages from the dongle */
@@ -3428,18 +5942,30 @@
{
dhd_prot_t *prot = dhd->prot;
msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln;
+ unsigned long flags;
/* Process all the messages - DTOH direction */
while (!dhd_is_device_removed(dhd)) {
uint8 *msg_addr;
uint32 msg_len;
+ if (dhd_query_bus_erros(dhd)) {
+ break;
+ }
+
if (dhd->hang_was_sent) {
break;
}
+ if (dhd->smmu_fault_occurred) {
+ break;
+ }
+
+ DHD_RING_LOCK(ring->ring_lock, flags);
/* Get the address of the next message to be read from ring */
msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
if (msg_addr == NULL) {
break;
}
@@ -3466,7 +5992,7 @@
static int BCMFASTPATH
dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len)
{
- int buf_len = len;
+ uint32 buf_len = len;
uint16 item_len;
uint8 msg_type;
cmn_msg_hdr_t *msg = NULL;
@@ -3475,7 +6001,7 @@
ASSERT(ring);
item_len = ring->item_len;
if (item_len == 0) {
- DHD_ERROR(("%s: ringidx %d item_len %d buf_len %d\n",
+ DHD_ERROR(("%s: ringidx %d, item_len %d buf_len %d \n",
__FUNCTION__, ring->idx, item_len, buf_len));
return BCME_ERROR;
}
@@ -3486,14 +6012,27 @@
goto done;
}
+ if (dhd->smmu_fault_occurred) {
+ ret = BCME_ERROR;
+ goto done;
+ }
+
msg = (cmn_msg_hdr_t *)buf;
-#if defined(PCIE_D2H_SYNC)
/* Wait until DMA completes, then fetch msg_type */
msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len);
-#else
- msg_type = msg->msg_type;
-#endif /* !PCIE_D2H_SYNC */
+
+ /*
+ * Update the curr_rd to the current index in the ring, from where
+ * the work item is fetched. This way if the fetched work item
+ * fails in LIVELOCK, we can print the exact read index in the ring
+ * that shows up the corrupted work item.
+ */
+ if ((ring->curr_rd + 1) >= ring->max_items) {
+ ring->curr_rd = 0;
+ } else {
+ ring->curr_rd += 1;
+ }
/* Prefetch data to populate the cache */
OSL_PREFETCH(buf + item_len);
@@ -3508,12 +6047,24 @@
ASSERT(msg_type < DHD_PROT_FUNCS);
if (msg_type >= DHD_PROT_FUNCS) {
- DHD_ERROR(("%s: msg_type %d item_len %d buf_len %d\n",
+ DHD_ERROR(("%s: msg_type %d, item_len %d buf_len %d\n",
__FUNCTION__, msg_type, item_len, buf_len));
ret = BCME_ERROR;
goto done;
}
+ if (msg_type == MSG_TYPE_INFO_BUF_CMPLT) {
+ if (ring == dhd->prot->d2hring_info_cpln) {
+ if (!dhd->prot->infobufpost) {
+ DHD_ERROR(("infobuf posted are zero,"
+ "but there is a completion\n"));
+ goto done;
+ }
+ dhd->prot->infobufpost--;
+ dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
+ dhd_prot_process_infobuf_complete(dhd, buf);
+ }
+ } else
if (table_lookup[msg_type]) {
table_lookup[msg_type](dhd, buf);
}
@@ -3530,10 +6081,8 @@
#ifdef DHD_RX_CHAINING
dhd_rxchain_commit(dhd);
-#endif
-#if defined(DHD_LB)
- dhd_lb_dispatch(dhd, ring->idx);
-#endif
+#endif // endif
+
return ret;
} /* dhd_prot_process_msgtype */
@@ -3547,10 +6096,76 @@
static void
dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg)
{
- pcie_ring_status_t *ring_status = (pcie_ring_status_t *)msg;
+ pcie_ring_status_t *ring_status = (pcie_ring_status_t *) msg;
+ uint32 request_id = ltoh32(ring_status->cmn_hdr.request_id);
+ uint16 status = ltoh16(ring_status->compl_hdr.status);
+ uint16 ring_id = ltoh16(ring_status->compl_hdr.flow_ring_id);
+
DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n",
- ring_status->cmn_hdr.request_id, ring_status->compl_hdr.status,
- ring_status->compl_hdr.flow_ring_id, ring_status->write_idx));
+ request_id, status, ring_id, ltoh16(ring_status->write_idx)));
+
+ if (ltoh16(ring_status->compl_hdr.ring_id) != BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT)
+ return;
+ if (status == BCMPCIE_BAD_PHASE) {
+ /* bad phase report from */
+ DHD_ERROR(("Bad phase\n"));
+ }
+ if (status != BCMPCIE_BADOPTION)
+ return;
+
+ if (request_id == DHD_H2D_DBGRING_REQ_PKTID) {
+ if (dhd->prot->h2dring_info_subn != NULL) {
+ if (dhd->prot->h2dring_info_subn->create_pending == TRUE) {
+ DHD_ERROR(("H2D ring create failed for info ring\n"));
+ dhd->prot->h2dring_info_subn->create_pending = FALSE;
+ }
+ else
+ DHD_ERROR(("ring create ID for a ring, create not pending\n"));
+ } else {
+ DHD_ERROR(("%s info submit ring doesn't exist\n", __FUNCTION__));
+ }
+ }
+ else if (request_id == DHD_D2H_DBGRING_REQ_PKTID) {
+ if (dhd->prot->d2hring_info_cpln != NULL) {
+ if (dhd->prot->d2hring_info_cpln->create_pending == TRUE) {
+ DHD_ERROR(("D2H ring create failed for info ring\n"));
+ dhd->prot->d2hring_info_cpln->create_pending = FALSE;
+ }
+ else
+ DHD_ERROR(("ring create ID for info ring, create not pending\n"));
+ } else {
+ DHD_ERROR(("%s info cpl ring doesn't exist\n", __FUNCTION__));
+ }
+ }
+#ifdef DHD_HP2P
+ else if (request_id == DHD_D2H_HPPRING_TXREQ_PKTID) {
+ if (dhd->prot->d2hring_hp2p_txcpl != NULL) {
+ if (dhd->prot->d2hring_hp2p_txcpl->create_pending == TRUE) {
+ DHD_ERROR(("H2D ring create failed for hp2p ring\n"));
+ dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
+ }
+ else
+ DHD_ERROR(("ring create ID for a ring, create not pending\n"));
+ } else {
+ DHD_ERROR(("%s hp2p txcmpl ring doesn't exist\n", __FUNCTION__));
+ }
+ }
+ else if (request_id == DHD_D2H_HPPRING_RXREQ_PKTID) {
+ if (dhd->prot->d2hring_hp2p_rxcpl != NULL) {
+ if (dhd->prot->d2hring_hp2p_rxcpl->create_pending == TRUE) {
+ DHD_ERROR(("D2H ring create failed for hp2p rxcmpl ring\n"));
+ dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
+ }
+ else
+ DHD_ERROR(("ring create ID for hp2p rxcmpl ring, not pending\n"));
+ } else {
+ DHD_ERROR(("%s hp2p rxcpl ring doesn't exist\n", __FUNCTION__));
+ }
+ }
+#endif /* DHD_HP2P */
+ else {
+ DHD_ERROR(("don;t know how to pair with original request\n"));
+ }
/* How do we track this to pair it with ??? */
return;
}
@@ -3575,19 +6190,26 @@
static void
dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg)
{
- uint32 pktid;
ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg;
unsigned long flags;
-
- pktid = ltoh32(ioct_ack->cmn_hdr.request_id);
+#if defined(DHD_PKTID_AUDIT_RING)
+ uint32 pktid = ltoh32(ioct_ack->cmn_hdr.request_id);
+#endif // endif
#if defined(DHD_PKTID_AUDIT_RING)
- /* Skip DHD_IOCTL_REQ_PKTID = 0xFFFE */
+ /* Skip audit for ADHD_IOCTL_REQ_PKTID = 0xFFFE */
if (pktid != DHD_IOCTL_REQ_PKTID) {
- DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid,
- DHD_TEST_IS_ALLOC);
+#ifndef IOCTLRESP_USE_CONSTMEM
+ DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
+ DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
+#else
+ DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
+ DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
+#endif /* !IOCTLRESP_USE_CONSTMEM */
}
-#endif /* DHD_PKTID_AUDIT_RING */
+#endif // endif
+
+ dhd->prot->ioctl_ack_time = OSL_LOCALTIME_NS();
DHD_GENERAL_LOCK(dhd, flags);
if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) &&
@@ -3596,6 +6218,8 @@
} else {
DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n",
__FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
+ prhex("dhd_prot_ioctack_process:",
+ (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
}
DHD_GENERAL_UNLOCK(dhd, flags);
@@ -3618,28 +6242,46 @@
unsigned long flags;
dhd_dma_buf_t retbuf;
+ /* Check for ioctl timeout induce flag, which is set by firing
+ * dhd iovar to induce IOCTL timeout. If flag is set,
+ * return from here, which results in to IOCTL timeout.
+ */
+ if (dhd->dhd_induce_error == DHD_INDUCE_IOCTL_TIMEOUT) {
+ DHD_ERROR(("%s: Inducing resumed on timeout\n", __FUNCTION__));
+ return;
+ }
+
memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
#if defined(DHD_PKTID_AUDIT_RING)
#ifndef IOCTLRESP_USE_CONSTMEM
- DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pkt_id, DHD_DUPLICATE_FREE);
+ DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_ctrl_map, pkt_id,
+ DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
#else
- DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle_ioctl, pkt_id, DHD_DUPLICATE_FREE);
+ DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_map_handle_ioctl, pkt_id,
+ DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
#endif /* !IOCTLRESP_USE_CONSTMEM */
-#endif /* DHD_PKTID_AUDIT_RING */
+#endif // endif
DHD_GENERAL_LOCK(dhd, flags);
if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) ||
!(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n",
__FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
- /* reset ioctl state */
- prot->ioctl_state = 0;
+ prhex("dhd_prot_ioctcmplt_process:",
+ (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
DHD_GENERAL_UNLOCK(dhd, flags);
return;
}
+
+ dhd->prot->ioctl_cmplt_time = OSL_LOCALTIME_NS();
+
+ /* Clear Response pending bit */
+ prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING;
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
#ifndef IOCTLRESP_USE_CONSTMEM
pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE);
#else
@@ -3647,21 +6289,34 @@
pkt = retbuf.va;
#endif /* !IOCTLRESP_USE_CONSTMEM */
if (!pkt) {
- prot->ioctl_state = 0;
- DHD_GENERAL_UNLOCK(dhd, flags);
DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__));
+ prhex("dhd_prot_ioctcmplt_process:",
+ (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
return;
}
- DHD_GENERAL_UNLOCK(dhd, flags);
prot->ioctl_resplen = ltoh16(ioct_resp->resp_len);
prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status);
xt_id = ltoh16(ioct_resp->trans_id);
- if (xt_id != prot->ioctl_trans_id) {
+
+ if (xt_id != prot->ioctl_trans_id || prot->curr_ioctl_cmd != ioct_resp->cmd) {
+ DHD_ERROR(("%s: transaction id(%d %d) or cmd(%d %d) mismatch\n",
+ __FUNCTION__, xt_id, prot->ioctl_trans_id,
+ prot->curr_ioctl_cmd, ioct_resp->cmd));
+ dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_ERROR);
+ dhd_prot_debug_info_print(dhd);
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* collect core dump */
+ dhd->memdump_type = DUMP_TYPE_TRANS_ID_MISMATCH;
+ dhd_bus_mem_dump(dhd);
+ }
+#else
ASSERT(0);
+#endif /* DHD_FW_COREDUMP */
+ dhd_schedule_reset(dhd);
goto exit;
}
-
DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n",
pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen));
@@ -3683,6 +6338,31 @@
#else
free_ioctl_return_buffer(dhd, &retbuf);
#endif /* !IOCTLRESP_USE_CONSTMEM */
+
+ /* Post another ioctl buf to the device */
+ if (prot->cur_ioctlresp_bufs_posted > 0) {
+ prot->cur_ioctlresp_bufs_posted--;
+ }
+
+ dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
+}
+
+int
+dhd_prot_check_tx_resource(dhd_pub_t *dhd)
+{
+ return dhd->prot->no_tx_resource;
+}
+
+void
+dhd_prot_update_pktid_txq_stop_cnt(dhd_pub_t *dhd)
+{
+ dhd->prot->pktid_txq_stop_cnt++;
+}
+
+void
+dhd_prot_update_pktid_txq_start_cnt(dhd_pub_t *dhd)
+{
+ dhd->prot->pktid_txq_start_cnt++;
}
/** called on MSG_TYPE_TX_STATUS message received from dongle */
@@ -3693,45 +6373,94 @@
host_txbuf_cmpl_t * txstatus;
unsigned long flags;
uint32 pktid;
- void *pkt = NULL;
- ulong pa;
+ void *pkt;
+ dmaaddr_t pa;
uint32 len;
void *dmah;
void *secdma;
+ bool pkt_fate;
+ msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
+#if defined(TX_STATUS_LATENCY_STATS) || defined(DHD_HP2P)
+ flow_info_t *flow_info;
+ uint64 tx_status_latency;
+#endif /* TX_STATUS_LATENCY_STATS || DHD_HP2P */
+#if defined(TX_STATUS_LATENCY_STATS)
+ flow_ring_node_t *flow_ring_node;
+ uint16 flowid;
+#endif // endif
+ ts_timestamp_t *ts;
+
+ BCM_REFERENCE(ts);
+ txstatus = (host_txbuf_cmpl_t *)msg;
+#if defined(TX_STATUS_LATENCY_STATS)
+ flowid = txstatus->compl_hdr.flow_ring_id;
+ flow_ring_node = DHD_FLOW_RING(dhd, flowid);
+#endif // endif
/* locks required to protect circular buffer accesses */
- DHD_GENERAL_LOCK(dhd, flags);
-
- txstatus = (host_txbuf_cmpl_t *)msg;
+ DHD_RING_LOCK(ring->ring_lock, flags);
pktid = ltoh32(txstatus->cmn_hdr.request_id);
+ pkt_fate = TRUE;
#if defined(DHD_PKTID_AUDIT_RING)
- DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid,
- DHD_DUPLICATE_FREE);
-#endif /* DHD_PKTID_AUDIT_RING */
+ DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_tx_map, pktid,
+ DHD_DUPLICATE_FREE, msg, D2HRING_TXCMPLT_ITEMSIZE);
+#endif // endif
DHD_INFO(("txstatus for pktid 0x%04x\n", pktid));
- if (prot->active_tx_count) {
- prot->active_tx_count--;
-
- /* Release the Lock when no more tx packets are pending */
- if (prot->active_tx_count == 0)
- DHD_TXFL_WAKE_UNLOCK(dhd);
-
- } else {
+ if (OSL_ATOMIC_DEC_RETURN(dhd->osh, &prot->active_tx_count) < 0) {
DHD_ERROR(("Extra packets are freed\n"));
}
-
ASSERT(pktid != 0);
+ pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
+ pa, len, dmah, secdma, PKTTYPE_DATA_TX);
+ if (!pkt) {
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_ERROR(("%s: received txstatus with NULL pkt\n", __FUNCTION__));
+ prhex("dhd_prot_txstatus_process:", (uchar *)msg, D2HRING_TXCMPLT_ITEMSIZE);
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* collect core dump */
+ dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
+ dhd_bus_mem_dump(dhd);
+ }
+#else
+ ASSERT(0);
+#endif /* DHD_FW_COREDUMP */
+ return;
+ }
+
+ if (DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map) == DHD_PKTID_MIN_AVAIL_COUNT) {
+ dhd->prot->no_tx_resource = FALSE;
+ dhd_bus_start_queue(dhd->bus);
+ }
+
+ if (SECURE_DMA_ENAB(dhd->osh)) {
+ int offset = 0;
+ BCM_REFERENCE(offset);
+
+ if (dhd->prot->tx_metadata_offset)
+ offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN;
+ SECURE_DMA_UNMAP(dhd->osh, (uint) pa,
+ (uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, dmah,
+ secdma, offset);
+ } else {
+ DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
+ }
+
+#ifdef TX_STATUS_LATENCY_STATS
+ /* update the tx status latency for flowid */
+ flow_info = &flow_ring_node->flow_info;
+ tx_status_latency = OSL_SYSUPTIME_US() - DHD_PKT_GET_QTIME(pkt);
+ flow_info->cum_tx_status_latency += tx_status_latency;
+ flow_info->num_tx_status++;
+#endif /* TX_STATUS_LATENCY_STATS */
#if defined(DHD_LB_TXC) && !defined(BCM_SECURE_DMA)
{
int elem_ix;
void **elem;
bcm_workq_t *workq;
-
- pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle,
- pktid, pa, len, dmah, secdma, PKTTYPE_DATA_TX);
workq = &prot->tx_compl_prod;
/*
@@ -3763,66 +6492,72 @@
}
DHD_INFO(("%s: tx_compl_prod pkt<%p> sync<%d>\n",
- __FUNCTION__, pkt, prot->tx_compl_prod_sync));
+ __FUNCTION__, pkt, prot->tx_compl_prod_sync));
- DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
return;
- }
+ }
workq_ring_full:
#endif /* !DHD_LB_TXC */
- /*
- * We can come here if no DHD_LB_TXC is enabled and in case where DHD_LB_TXC is
- * defined but the tx_compl queue is full.
- */
- if (pkt == NULL) {
- pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle,
- pktid, pa, len, dmah, secdma, PKTTYPE_DATA_TX);
+#ifdef DMAMAP_STATS
+ dhd->dma_stats.txdata--;
+ dhd->dma_stats.txdata_sz -= len;
+#endif /* DMAMAP_STATS */
+ pkt_fate = dhd_dbg_process_tx_status(dhd, pkt, pktid,
+ ltoh16(txstatus->compl_hdr.status) & WLFC_CTL_PKTFLAG_MASK);
+#ifdef DHD_PKT_LOGGING
+ if (dhd->d11_tx_status) {
+ uint16 status = ltoh16(txstatus->compl_hdr.status) &
+ WLFC_CTL_PKTFLAG_MASK;
+ uint32 pkthash = __dhd_dbg_pkt_hash((uintptr_t)pkt, pktid);
+ DHD_PKTLOG_TXS(dhd, pkt, pktid, status);
+ dhd_dump_pkt(dhd, ltoh32(txstatus->cmn_hdr.if_id),
+ (uint8 *)PKTDATA(dhd->osh, pkt), len, TRUE,
+ &pkthash, &status);
}
+#endif /* DHD_PKT_LOGGING */
- if (pkt) {
- if (SECURE_DMA_ENAB(dhd->osh)) {
- int offset = 0;
- BCM_REFERENCE(offset);
-
- if (dhd->prot->tx_metadata_offset)
- offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN;
- SECURE_DMA_UNMAP(dhd->osh, (uint) pa,
- (uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, dmah,
- secdma, offset);
- } else {
- DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, pkt, dmah);
- }
#if defined(BCMPCIE)
- dhd_txcomplete(dhd, pkt, true);
-#endif
+ dhd_txcomplete(dhd, pkt, pkt_fate);
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+ dhd_eap_txcomplete(dhd, pkt, pkt_fate, txstatus->cmn_hdr.if_id);
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+#endif // endif
#if DHD_DBG_SHOW_METADATA
- if (dhd->prot->metadata_dbg &&
- dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
- uchar *ptr;
- /* The Ethernet header of TX frame was copied and removed.
- * Here, move the data pointer forward by Ethernet header size.
- */
- PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN);
- ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->tx_metadata_offset);
- bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len);
- dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len);
- }
-#endif /* DHD_DBG_SHOW_METADATA */
-#ifndef CUSTOMER_HW_31_2
- DHD_GENERAL_UNLOCK(dhd, flags);
- PKTFREE(dhd->osh, pkt, TRUE);
- DHD_GENERAL_LOCK(dhd, flags);
-#endif /* CUSTOMER_HW_31_2 */
- DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id,
- txstatus->tx_status);
+ if (dhd->prot->metadata_dbg &&
+ dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
+ uchar *ptr;
+ /* The Ethernet header of TX frame was copied and removed.
+ * Here, move the data pointer forward by Ethernet header size.
+ */
+ PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN);
+ ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->tx_metadata_offset);
+ bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len);
+ dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len);
}
+#endif /* DHD_DBG_SHOW_METADATA */
- DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef DHD_HP2P
+ if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
+#ifdef DHD_HP2P_DEBUG
+ bcm_print_bytes("txcpl", (uint8 *)txstatus, sizeof(host_txbuf_cmpl_t));
+#endif /* DHD_HP2P_DEBUG */
+ dhd_update_hp2p_txstats(dhd, txstatus);
+ }
+#endif /* DHD_HP2P */
+#ifdef DHD_LBUF_AUDIT
+ PKTAUDIT(dhd->osh, pkt);
+#endif // endif
+
+ DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id,
+ txstatus->tx_status);
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+ PKTFREE(dhd->osh, pkt, TRUE);
return;
} /* dhd_prot_txstatus_process */
@@ -3835,7 +6570,6 @@
uint16 buflen;
int ifidx = 0;
void* pkt;
- unsigned long flags;
dhd_prot_t *prot = dhd->prot;
/* Event complete header */
@@ -3843,134 +6577,91 @@
bufid = ltoh32(evnt->cmn_hdr.request_id);
#if defined(DHD_PKTID_AUDIT_RING)
- DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, bufid,
- DHD_DUPLICATE_FREE);
-#endif /* DHD_PKTID_AUDIT_RING */
+ DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, bufid,
+ DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
+#endif // endif
buflen = ltoh16(evnt->event_data_len);
ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr);
/* Post another rxbuf to the device */
- if (prot->cur_event_bufs_posted) {
+ if (prot->cur_event_bufs_posted)
prot->cur_event_bufs_posted--;
- }
dhd_msgbuf_rxbuf_post_event_bufs(dhd);
- /* locks required to protect pktid_map */
- DHD_GENERAL_LOCK(dhd, flags);
pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE);
- DHD_GENERAL_UNLOCK(dhd, flags);
if (!pkt) {
+ DHD_ERROR(("%s: pkt is NULL for pktid %d\n", __FUNCTION__, bufid));
return;
}
/* DMA RX offset updated through shared area */
- if (dhd->prot->rx_dataoffset) {
+ if (dhd->prot->rx_dataoffset)
PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
- }
PKTSETLEN(dhd->osh, pkt, buflen);
-
+#ifdef DHD_LBUF_AUDIT
+ PKTAUDIT(dhd->osh, pkt);
+#endif // endif
dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
}
-extern bool dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx);
-extern void dhd_rx_mon_pkt(dhd_pub_t *dhdp, void *pkt, int ifidx);
-
-/** called on MSG_TYPE_RX_CMPLT message received from dongle */
+/** called on MSG_TYPE_INFO_BUF_CMPLT message received from dongle */
static void BCMFASTPATH
-dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void *msg)
+dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf)
{
- host_rxbuf_cmpl_t *rxcmplt_h;
- uint16 data_offset; /* offset at which data starts */
- void *pkt;
- unsigned long flags;
- uint ifidx;
+ info_buf_resp_t *resp;
uint32 pktid;
-#if defined(DHD_LB_RXC)
- const bool free_pktid = FALSE;
-#else
- const bool free_pktid = TRUE;
-#endif /* DHD_LB_RXC */
+ uint16 buflen;
+ void * pkt;
- /* RXCMPLT HDR */
- rxcmplt_h = (host_rxbuf_cmpl_t *)msg;
+ resp = (info_buf_resp_t *)buf;
+ pktid = ltoh32(resp->cmn_hdr.request_id);
+ buflen = ltoh16(resp->info_data_len);
- /* offset from which data starts is populated in rxstatus0 */
- data_offset = ltoh16(rxcmplt_h->data_offset);
-
- pktid = ltoh32(rxcmplt_h->cmn_hdr.request_id);
-
-#if defined(DHD_PKTID_AUDIT_RING)
- DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid,
- DHD_DUPLICATE_FREE);
+#ifdef DHD_PKTID_AUDIT_RING
+ DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
+ DHD_DUPLICATE_FREE, buf, D2HRING_INFO_BUFCMPLT_ITEMSIZE);
#endif /* DHD_PKTID_AUDIT_RING */
- DHD_GENERAL_LOCK(dhd, flags);
- pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_DATA_RX, free_pktid);
- DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n",
+ pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum),
+ dhd->prot->rx_dataoffset));
- if (!pkt) {
- return;
- }
-
- /* Post another set of rxbufs to the device */
- dhd_prot_return_rxbuf(dhd, pktid, 1);
-
- DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, pktdata %p, metalen %d\n",
- ltoh32(rxcmplt_h->cmn_hdr.request_id), data_offset, ltoh16(rxcmplt_h->data_len),
- rxcmplt_h->cmn_hdr.if_id, rxcmplt_h->cmn_hdr.flags, PKTDATA(dhd->osh, pkt),
- ltoh16(rxcmplt_h->metadata_len)));
-#if DHD_DBG_SHOW_METADATA
- if (dhd->prot->metadata_dbg &&
- dhd->prot->rx_metadata_offset && rxcmplt_h->metadata_len) {
- uchar *ptr;
- ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->rx_metadata_offset);
- /* header followed by data */
- bcm_print_bytes("rxmetadata", ptr, rxcmplt_h->metadata_len);
- dhd_prot_print_metadata(dhd, ptr, rxcmplt_h->metadata_len);
- }
-#endif /* DHD_DBG_SHOW_METADATA */
-
- if (rxcmplt_h->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) {
- DHD_INFO(("D11 frame rxed \n"));
- }
-
- /* data_offset from buf start */
- if (data_offset) {
- /* data offset given from dongle after split rx */
- PKTPULL(dhd->osh, pkt, data_offset); /* data offset */
- } else {
- /* DMA RX offset updated through shared area */
- if (dhd->prot->rx_dataoffset) {
- PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
+ if (dhd->debug_buf_dest_support) {
+ if (resp->dest < DEBUG_BUF_DEST_MAX) {
+ dhd->debug_buf_dest_stat[resp->dest]++;
}
}
- /* Actual length of the packet */
- PKTSETLEN(dhd->osh, pkt, ltoh16(rxcmplt_h->data_len));
- ifidx = rxcmplt_h->cmn_hdr.if_id;
-
- if (dhd_monitor_enabled(dhd, ifidx) &&
- (rxcmplt_h->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11)) {
- dhd_rx_mon_pkt(dhd, pkt, ifidx);
+ pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE);
+ if (!pkt)
return;
- }
-#if defined(DHD_LB_RXP)
- dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx);
-#else /* ! DHD_LB_RXP */
-#ifdef DHD_RX_CHAINING
- /* Chain the packets */
- dhd_rxchain_frame(dhd, pkt, ifidx);
-#else /* ! DHD_RX_CHAINING */
- /* offset from which data starts is populated in rxstatus0 */
- dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
-#endif /* ! DHD_RX_CHAINING */
-#endif /* ! DHD_LB_RXP */
-} /* dhd_prot_rxcmplt_process */
+ /* DMA RX offset updated through shared area */
+ if (dhd->prot->rx_dataoffset)
+ PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
+
+ PKTSETLEN(dhd->osh, pkt, buflen);
+
+#ifdef DHD_LBUF_AUDIT
+ PKTAUDIT(dhd->osh, pkt);
+#endif // endif
+
+ /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
+ * special ifidx of -1. This is just internal to dhd to get the data to
+ * dhd_linux.c:dhd_rx_frame() from here (dhd_prot_infobuf_cmplt_process).
+ */
+ dhd_bus_rx_frame(dhd->bus, pkt, DHD_DUMMY_INFO_IF /* ifidx HACK */, 1);
+}
+
+/** called on MSG_TYPE_SNAPSHOT_CMPLT message received from dongle */
+static void BCMFASTPATH
+dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf)
+{
+}
/** Stop protocol: sync w/dongle state. */
void dhd_prot_stop(dhd_pub_t *dhd)
@@ -3995,6 +6686,7 @@
return 0;
}
+#define MAX_MTU_SZ (1600u)
#define PKTBUF pktbuf
@@ -4019,26 +6711,41 @@
msgbuf_ring_t *ring;
flow_ring_table_t *flow_ring_table;
flow_ring_node_t *flow_ring_node;
+#ifdef DHD_PKT_LOGGING
+ uint32 pkthash;
+#endif /* DHD_PKT_LOGGING */
if (dhd->flow_ring_table == NULL) {
+ DHD_ERROR(("dhd flow_ring_table is NULL\n"));
return BCME_NORESOURCE;
}
+#ifdef DHD_PCIE_PKTID
+ if (!DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map)) {
+ if (dhd->prot->pktid_depleted_cnt == DHD_PKTID_DEPLETED_MAX_COUNT) {
+ dhd_bus_stop_queue(dhd->bus);
+ dhd->prot->no_tx_resource = TRUE;
+ }
+ dhd->prot->pktid_depleted_cnt++;
+ goto err_no_res;
+ } else {
+ dhd->prot->pktid_depleted_cnt = 0;
+ }
+#endif /* DHD_PCIE_PKTID */
flowid = DHD_PKT_GET_FLOWID(PKTBUF);
-
flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
-
- DHD_GENERAL_LOCK(dhd, flags);
+ DHD_RING_LOCK(ring->ring_lock, flags);
/* Create a unique 32-bit packet id */
- pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_map_handle, PKTBUF);
+ pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_tx_map,
+ PKTBUF, PKTTYPE_DATA_TX);
#if defined(DHD_PCIE_PKTID)
if (pktid == DHD_PKTID_INVALID) {
- DHD_ERROR(("Pktid pool depleted.\n"));
+ DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
/*
* If we return error here, the caller would queue the packet
* again. So we'll just free the skb allocated in DMA Zone.
@@ -4053,21 +6760,22 @@
txdesc = (host_txbuf_post_t *)
dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (txdesc == NULL) {
-#if defined(DHD_PCIE_PKTID)
- void *dmah;
- void *secdma;
- /* Free up the PKTID. physaddr and pktlen will be garbage. */
- DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, pktid,
- pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK);
-#endif /* DHD_PCIE_PKTID */
DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
- __FUNCTION__, __LINE__, prot->active_tx_count));
- goto err_no_res_pktfree;
+ __FUNCTION__, __LINE__, OSL_ATOMIC_READ(dhd->osh, &prot->active_tx_count)));
+ goto err_free_pktid;
}
/* Extract the data pointer and length information */
pktdata = PKTDATA(dhd->osh, PKTBUF);
pktlen = PKTLEN(dhd->osh, PKTBUF);
+
+ DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid);
+#ifdef DHD_PKT_LOGGING
+ DHD_PKTLOG_TX(dhd, PKTBUF, pktid);
+ /* Dump TX packet */
+ pkthash = __dhd_dbg_pkt_hash((uintptr_t)PKTBUF, pktid);
+ dhd_dump_pkt(dhd, ifidx, pktdata, pktlen, TRUE, &pkthash, NULL);
+#endif /* DHD_PKT_LOGGING */
/* Ethernet header: Copy before we cache flush packet using DMA_MAP */
bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
@@ -4081,9 +6789,8 @@
int offset = 0;
BCM_REFERENCE(offset);
- if (prot->tx_metadata_offset) {
+ if (prot->tx_metadata_offset)
offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
- }
pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen,
DMA_TX, PKTBUF, 0, ring->dma_buf.secdma, offset);
@@ -4093,31 +6800,36 @@
pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0);
#endif /* #ifndef BCM_SECURE_DMA */
- if ((PHYSADDRHI(pa) == 0) && (PHYSADDRLO(pa) == 0)) {
- DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n"));
+ if (PHYSADDRISZERO(pa)) {
+ DHD_ERROR(("%s: Something really bad, unless 0 is "
+ "a valid phyaddr for pa\n", __FUNCTION__));
ASSERT(0);
+ goto err_rollback_idx;
}
+#ifdef DMAMAP_STATS
+ dhd->dma_stats.txdata++;
+ dhd->dma_stats.txdata_sz += pktlen;
+#endif /* DMAMAP_STATS */
/* No need to lock. Save the rest of the packet's metadata */
- DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_map_handle, PKTBUF, pktid,
+ DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_tx_map, PKTBUF, pktid,
pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX);
#ifdef TXP_FLUSH_NITEMS
- if (ring->pend_items_count == 0) {
+ if (ring->pend_items_count == 0)
ring->start_addr = (void *)txdesc;
- }
ring->pend_items_count++;
-#endif
+#endif // endif
/* Form the Tx descriptor message buffer */
/* Common message hdr */
txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST;
txdesc->cmn_hdr.if_id = ifidx;
+ txdesc->cmn_hdr.flags = ring->current_phase;
txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3;
prio = (uint8)PKTPRIO(PKTBUF);
-
txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT;
txdesc->seg_cnt = 1;
@@ -4131,10 +6843,9 @@
/* Handle Tx metadata */
headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF);
- if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset)) {
+ if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset))
DHD_ERROR(("No headroom for Metadata tx %d %d\n",
prot->tx_metadata_offset, headroom));
- }
if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) {
DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset));
@@ -4154,8 +6865,32 @@
#endif /* #ifndef BCM_SECURE_DMA */
if (PHYSADDRISZERO(meta_pa)) {
- DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n"));
+ /* Unmap the data pointer to a DMA-able address */
+ if (SECURE_DMA_ENAB(dhd->osh)) {
+ int offset = 0;
+ BCM_REFERENCE(offset);
+
+ if (prot->tx_metadata_offset) {
+ offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
+ }
+
+ SECURE_DMA_UNMAP(dhd->osh, pa, pktlen,
+ DMA_TX, 0, DHD_DMAH_NULL, ring->dma_buf.secdma, offset);
+ }
+#ifndef BCM_SECURE_DMA
+ else {
+ DMA_UNMAP(dhd->osh, pa, pktlen, DMA_TX, 0, DHD_DMAH_NULL);
+ }
+#endif /* #ifndef BCM_SECURE_DMA */
+#ifdef TXP_FLUSH_NITEMS
+ /* update pend_items_count */
+ ring->pend_items_count--;
+#endif /* TXP_FLUSH_NITEMS */
+
+ DHD_ERROR(("%s: Something really bad, unless 0 is "
+ "a valid phyaddr for meta_pa\n", __FUNCTION__));
ASSERT(0);
+ goto err_rollback_idx;
}
/* Adjust the data pointer back to original value */
@@ -4165,14 +6900,21 @@
txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa));
txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa));
} else {
- txdesc->metadata_buf_len = htol16(0);
- txdesc->metadata_buf_addr.high_addr = 0;
- txdesc->metadata_buf_addr.low_addr = 0;
+#ifdef DHD_HP2P
+ if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
+ dhd_update_hp2p_txdesc(dhd, txdesc);
+ } else
+#endif /* DHD_HP2P */
+ if (1)
+ {
+ txdesc->metadata_buf_len = htol16(0);
+ txdesc->metadata_buf_addr.high_addr = 0;
+ txdesc->metadata_buf_addr.low_addr = 0;
+ }
}
-#if defined(DHD_PKTID_AUDIT_RING)
- DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pktid,
- DHD_DUPLICATE_ALLOC);
+#ifdef DHD_PKTID_AUDIT_RING
+ DHD_PKTID_AUDIT(dhd, prot->pktid_tx_map, pktid, DHD_DUPLICATE_ALLOC);
#endif /* DHD_PKTID_AUDIT_RING */
txdesc->cmn_hdr.request_id = htol32(pktid);
@@ -4180,57 +6922,101 @@
DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len,
txdesc->cmn_hdr.request_id));
+#ifdef DHD_LBUF_AUDIT
+ PKTAUDIT(dhd->osh, PKTBUF);
+#endif // endif
+
+ if (pktlen > MAX_MTU_SZ) {
+ DHD_ERROR(("%s: ######## pktlen(%d) > MAX_MTU_SZ(%d) #######\n",
+ __FUNCTION__, pktlen, MAX_MTU_SZ));
+ dhd_prhex("txringitem", (volatile uchar*)txdesc,
+ sizeof(host_txbuf_post_t), DHD_ERROR_VAL);
+ }
+
/* Update the write pointer in TCM & ring bell */
-#ifdef TXP_FLUSH_NITEMS
+#if defined(DHD_HP2P) && defined(TXP_FLUSH_NITEMS)
+ if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
+ dhd_calc_hp2p_burst(dhd, ring, flowid);
+ } else {
+ if ((ring->pend_items_count == prot->txp_threshold) ||
+ ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
+ dhd_prot_txdata_write_flush(dhd, flowid);
+ }
+ }
+#elif defined(TXP_FLUSH_NITEMS)
/* Flush if we have either hit the txp_threshold or if this msg is */
/* occupying the last slot in the flow_ring - before wrap around. */
if ((ring->pend_items_count == prot->txp_threshold) ||
((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
- dhd_prot_txdata_write_flush(dhd, flowid, TRUE);
+ dhd_prot_txdata_write_flush(dhd, flowid);
}
#else
/* update ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ring, txdesc, 1);
-#endif
+#endif /* DHD_HP2P && TXP_FLUSH_NITEMS */
- prot->active_tx_count++;
+#if defined(TX_STATUS_LATENCY_STATS)
+ /* set the time when pkt is queued to flowring */
+ DHD_PKT_SET_QTIME(PKTBUF, OSL_SYSUPTIME_US());
+#endif // endif
+ OSL_ATOMIC_INC(dhd->osh, &prot->active_tx_count);
/*
* Take a wake lock, do not sleep if we have atleast one packet
* to finish.
*/
- if (prot->active_tx_count == 1)
- DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd, MAX_TX_TIMEOUT);
+ DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd, MAX_TX_TIMEOUT);
- DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+#ifdef TX_STATUS_LATENCY_STATS
+ flow_ring_node->flow_info.num_tx_pkts++;
+#endif /* TX_STATUS_LATENCY_STATS */
return BCME_OK;
+err_rollback_idx:
+ /* roll back write pointer for unprocessed message */
+ if (ring->wr == 0) {
+ ring->wr = ring->max_items - 1;
+ } else {
+ ring->wr--;
+ if (ring->wr == 0) {
+ DHD_INFO(("%s: flipping the phase now\n", ring->name));
+ ring->current_phase = ring->current_phase ?
+ 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+ }
+ }
+
+err_free_pktid:
+#if defined(DHD_PCIE_PKTID)
+ {
+ void *dmah;
+ void *secdma;
+ /* Free up the PKTID. physaddr and pktlen will be garbage. */
+ DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
+ pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK);
+ }
+
err_no_res_pktfree:
+#endif /* DHD_PCIE_PKTID */
-
-
- DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+err_no_res:
return BCME_NORESOURCE;
} /* dhd_prot_txdata */
-/* called with a lock */
+/* called with a ring_lock */
/** optimization to write "n" tx items at a time to ring */
void BCMFASTPATH
-dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid, bool in_lock)
+dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid)
{
#ifdef TXP_FLUSH_NITEMS
- unsigned long flags = 0;
flow_ring_table_t *flow_ring_table;
flow_ring_node_t *flow_ring_node;
msgbuf_ring_t *ring;
if (dhd->flow_ring_table == NULL) {
return;
- }
-
- if (!in_lock) {
- DHD_GENERAL_LOCK(dhd, flags);
}
flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
@@ -4243,10 +7029,6 @@
ring->pend_items_count);
ring->pend_items_count = 0;
ring->start_addr = NULL;
- }
-
- if (!in_lock) {
- DHD_GENERAL_UNLOCK(dhd, flags);
}
#endif /* TXP_FLUSH_NITEMS */
}
@@ -4295,19 +7077,18 @@
#endif /* DHD_LB_RXC */
-
if (prot->rxbufpost >= rxcnt) {
- prot->rxbufpost -= rxcnt;
+ prot->rxbufpost -= (uint16)rxcnt;
} else {
/* ASSERT(0); */
prot->rxbufpost = 0;
}
#if !defined(DHD_LB_RXC)
- if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
+ if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD))
dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
- }
#endif /* !DHD_LB_RXC */
+ return;
}
/* called before an ioctl is sent to the dongle */
@@ -4315,9 +7096,9 @@
dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf)
{
dhd_prot_t *prot = dhd->prot;
+ int slen = 0;
if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) {
- int slen = 0;
pcie_bus_tput_params_t *tput_params;
slen = strlen("pcie_bus_tput") + 1;
@@ -4326,8 +7107,33 @@
sizeof(tput_params->host_buf_addr));
tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN;
}
+
}
+/* called after an ioctl returns from dongle */
+static void
+dhd_prot_wl_ioctl_ret_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf,
+ int ifidx, int ret, int len)
+{
+
+ if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) {
+ /* Intercept the wme_dp ioctl here */
+ if (!strcmp(buf, "wme_dp")) {
+ int slen, val = 0;
+
+ slen = strlen("wme_dp") + 1;
+ if (len >= (int)(slen + sizeof(int)))
+ bcopy(((char *)buf + slen), &val, sizeof(int));
+ dhd->wme_dp = (uint8) ltoh32(val);
+ }
+
+ }
+
+}
+
+#ifdef DHD_PM_CONTROL_FROM_FILE
+extern bool g_pm_control;
+#endif /* DHD_PM_CONTROL_FROM_FILE */
/** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */
int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
@@ -4335,8 +7141,20 @@
int ret = -1;
uint8 action;
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR_RLMT(("%s : PCIe link is down. we have nothing to do\n", __FUNCTION__));
+ goto done;
+ }
+
+ if (dhd_query_bus_erros(dhd)) {
+ DHD_ERROR_RLMT(("%s : some BUS error. we have nothing to do\n", __FUNCTION__));
+ goto done;
+ }
+
if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
- DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+ DHD_ERROR_RLMT(("%s : bus is down. we have nothing to do -"
+ " bus state: %d, sent hang: %d\n", __FUNCTION__,
+ dhd->busstate, dhd->hang_was_sent));
goto done;
}
@@ -4347,12 +7165,21 @@
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ if (ioc->cmd == WLC_SET_PM) {
+#ifdef DHD_PM_CONTROL_FROM_FILE
+ if (g_pm_control == TRUE) {
+ DHD_ERROR(("%s: SET PM ignored!(Requested:%d)\n",
+ __FUNCTION__, buf ? *(char *)buf : 0));
+ goto done;
+ }
+#endif /* DHD_PM_CONTROL_FROM_FILE */
+ DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0));
+ }
ASSERT(len <= WLC_IOCTL_MAXLEN);
- if (len > WLC_IOCTL_MAXLEN) {
+ if (len > WLC_IOCTL_MAXLEN)
goto done;
- }
action = ioc->set;
@@ -4362,32 +7189,19 @@
ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
} else {
ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
- if (ret > 0) {
+ if (ret > 0)
ioc->used = ret;
- }
}
/* Too many programs assume ioctl() returns 0 on success */
if (ret >= 0) {
ret = 0;
} else {
- DHD_ERROR(("%s: status ret value is %d \n", __FUNCTION__, ret));
+ DHD_INFO(("%s: status ret value is %d \n", __FUNCTION__, ret));
dhd->dongle_error = ret;
}
- if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) {
- /* Intercept the wme_dp ioctl here */
- if (!strcmp(buf, "wme_dp")) {
- int slen, val = 0;
-
- slen = strlen("wme_dp") + 1;
- if (len >= (int)(slen + sizeof(int))) {
- bcopy(((char *)buf + slen), &val, sizeof(int));
- }
- dhd->wme_dp = (uint8) ltoh32(val);
- }
-
- }
+ dhd_prot_wl_ioctl_ret_intercept(dhd, ioc, buf, ifidx, ret, len);
done:
return ret;
@@ -4412,13 +7226,13 @@
msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN);
msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE);
- DHD_GENERAL_LOCK(dhd, flags);
+ DHD_RING_LOCK(ring->ring_lock, flags);
ioct_rqst = (ioct_reqst_hdr_t *)
dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (ioct_rqst == NULL) {
- DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
return 0;
}
@@ -4438,12 +7252,14 @@
ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK;
ioct_rqst->msg.if_id = 0;
+ ioct_rqst->msg.flags = ring->current_phase;
bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen);
/* update ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
- DHD_GENERAL_UNLOCK(dhd, flags);
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
return 0;
}
@@ -4451,22 +7267,64 @@
/** test / loopback */
void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer)
{
- if (dmaxfer == NULL) {
+ if (dmaxfer == NULL)
return;
- }
dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
dhd_dma_buf_free(dhd, &dmaxfer->dstmem);
}
/** test / loopback */
+int
+dhd_prepare_schedule_dmaxfer_free(dhd_pub_t *dhdp)
+{
+ dhd_prot_t *prot = dhdp->prot;
+ dhd_dmaxfer_t *dmaxfer = &prot->dmaxfer;
+ dmaxref_mem_map_t *dmap = NULL;
+
+ dmap = MALLOCZ(dhdp->osh, sizeof(dmaxref_mem_map_t));
+ if (!dmap) {
+ DHD_ERROR(("%s: dmap alloc failed\n", __FUNCTION__));
+ goto mem_alloc_fail;
+ }
+ dmap->srcmem = &(dmaxfer->srcmem);
+ dmap->dstmem = &(dmaxfer->dstmem);
+
+ DMAXFER_FREE(dhdp, dmap);
+ return BCME_OK;
+
+mem_alloc_fail:
+ if (dmap) {
+ MFREE(dhdp->osh, dmap, sizeof(dmaxref_mem_map_t));
+ dmap = NULL;
+ }
+ return BCME_NOMEM;
+} /* dhd_prepare_schedule_dmaxfer_free */
+
+/** test / loopback */
+void
+dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
+{
+
+ dhd_dma_buf_free(dhdp, dmmap->srcmem);
+ dhd_dma_buf_free(dhdp, dmmap->dstmem);
+
+ MFREE(dhdp->osh, dmmap, sizeof(dmaxref_mem_map_t));
+
+ dhdp->bus->dmaxfer_complete = TRUE;
+ dhd_os_dmaxfer_wake(dhdp);
+
+ dmmap = NULL;
+
+} /* dmaxfer_free_prev_dmaaddr */
+
+/** test / loopback */
int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len,
uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer)
{
- uint i;
- if (!dmaxfer) {
+ uint i = 0, j = 0;
+ if (!dmaxfer)
return BCME_ERROR;
- }
/* First free up existing buffers */
dmaxfer_free_dmaaddr(dhd, dmaxfer);
@@ -4482,10 +7340,24 @@
dmaxfer->len = len;
- /* Populate source with a pattern */
- for (i = 0; i < dmaxfer->len; i++) {
- ((uint8*)dmaxfer->srcmem.va)[i] = i % 256;
+ /* Populate source with a pattern like below
+ * 0x00000000
+ * 0x01010101
+ * 0x02020202
+ * 0x03030303
+ * 0x04040404
+ * 0x05050505
+ * ...
+ * 0xFFFFFFFF
+ */
+ while (i < dmaxfer->len) {
+ ((uint8*)dmaxfer->srcmem.va)[i] = j % 256;
+ i++;
+ if (i % 4 == 0) {
+ j++;
+ }
}
+
OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len);
dmaxfer->srcdelay = srcdelay;
@@ -4498,21 +7370,81 @@
dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg)
{
dhd_prot_t *prot = dhd->prot;
+ uint64 end_usec;
+ pcie_dmaxfer_cmplt_t *cmplt = (pcie_dmaxfer_cmplt_t *)msg;
+ int buf_free_scheduled;
+ BCM_REFERENCE(cmplt);
+ end_usec = OSL_SYSUPTIME_US();
+
+ DHD_ERROR(("DMA loopback status: %d\n", cmplt->compl_hdr.status));
+ prot->dmaxfer.status = cmplt->compl_hdr.status;
OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) {
if (memcmp(prot->dmaxfer.srcmem.va,
- prot->dmaxfer.dstmem.va, prot->dmaxfer.len)) {
- bcm_print_bytes("XFER SRC: ",
+ prot->dmaxfer.dstmem.va, prot->dmaxfer.len) ||
+ cmplt->compl_hdr.status != BCME_OK) {
+ DHD_ERROR(("DMA loopback failed\n"));
+ /* it is observed that some times the completion
+ * header status is set as OK, but the memcmp fails
+ * hence always explicitly set the dmaxfer status
+ * as error if this happens.
+ */
+ prot->dmaxfer.status = BCME_ERROR;
+ prhex("XFER SRC: ",
prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
- bcm_print_bytes("XFER DST: ",
+ prhex("XFER DST: ",
prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
- } else {
- DHD_INFO(("DMA successful\n"));
+ }
+ else {
+ switch (prot->dmaxfer.d11_lpbk) {
+ case M2M_DMA_LPBK: {
+ DHD_ERROR(("DMA successful pcie m2m DMA loopback\n"));
+ } break;
+ case D11_LPBK: {
+ DHD_ERROR(("DMA successful with d11 loopback\n"));
+ } break;
+ case BMC_LPBK: {
+ DHD_ERROR(("DMA successful with bmc loopback\n"));
+ } break;
+ case M2M_NON_DMA_LPBK: {
+ DHD_ERROR(("DMA successful pcie m2m NON DMA loopback\n"));
+ } break;
+ case D11_HOST_MEM_LPBK: {
+ DHD_ERROR(("DMA successful d11 host mem loopback\n"));
+ } break;
+ case BMC_HOST_MEM_LPBK: {
+ DHD_ERROR(("DMA successful bmc host mem loopback\n"));
+ } break;
+ default: {
+ DHD_ERROR(("Invalid loopback option\n"));
+ } break;
+ }
+
+ if (DHD_LPBKDTDUMP_ON()) {
+ /* debug info print of the Tx and Rx buffers */
+ dhd_prhex("XFER SRC: ", prot->dmaxfer.srcmem.va,
+ prot->dmaxfer.len, DHD_INFO_VAL);
+ dhd_prhex("XFER DST: ", prot->dmaxfer.dstmem.va,
+ prot->dmaxfer.len, DHD_INFO_VAL);
+ }
}
}
- dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
+
+ buf_free_scheduled = dhd_prepare_schedule_dmaxfer_free(dhd);
+ end_usec -= prot->dmaxfer.start_usec;
+ if (end_usec) {
+ prot->dmaxfer.time_taken = end_usec;
+ DHD_ERROR(("DMA loopback %d bytes in %lu usec, %u kBps\n",
+ prot->dmaxfer.len, (unsigned long)end_usec,
+ (prot->dmaxfer.len * (1000 * 1000 / 1024) / (uint32)end_usec)));
+ }
dhd->prot->dmaxfer.in_progress = FALSE;
+
+ if (buf_free_scheduled != BCME_OK) {
+ dhd->bus->dmaxfer_complete = TRUE;
+ dhd_os_dmaxfer_wake(dhd);
+ }
}
/** Test functionality.
@@ -4521,7 +7453,8 @@
* by a spinlock.
*/
int
-dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay)
+dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay,
+ uint d11_lpbk, uint core_num)
{
unsigned long flags;
int ret = BCME_OK;
@@ -4533,17 +7466,24 @@
if (prot->dmaxfer.in_progress) {
DHD_ERROR(("DMA is in progress...\n"));
- return ret;
+ return BCME_ERROR;
}
+
+ if (d11_lpbk >= MAX_LPBK) {
+ DHD_ERROR(("loopback mode should be either"
+ " 0-PCIE_M2M_DMA, 1-D11, 2-BMC or 3-PCIE_M2M_NonDMA\n"));
+ return BCME_ERROR;
+ }
+
+ DHD_RING_LOCK(ring->ring_lock, flags);
prot->dmaxfer.in_progress = TRUE;
if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay,
&prot->dmaxfer)) != BCME_OK) {
prot->dmaxfer.in_progress = FALSE;
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
return ret;
}
-
- DHD_GENERAL_LOCK(dhd, flags);
dmap = (pcie_dma_xfer_params_t *)
dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
@@ -4551,7 +7491,7 @@
if (dmap == NULL) {
dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
prot->dmaxfer.in_progress = FALSE;
- DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
return BCME_NOMEM;
}
@@ -4559,6 +7499,7 @@
dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER;
dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID);
dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+ dmap->cmn_hdr.flags = ring->current_phase;
ring->seqnum++;
dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa));
@@ -4568,49 +7509,170 @@
dmap->xfer_len = htol32(prot->dmaxfer.len);
dmap->srcdelay = htol32(prot->dmaxfer.srcdelay);
dmap->destdelay = htol32(prot->dmaxfer.destdelay);
+ prot->dmaxfer.d11_lpbk = d11_lpbk;
+ dmap->flags = (((core_num & PCIE_DMA_XFER_FLG_CORE_NUMBER_MASK)
+ << PCIE_DMA_XFER_FLG_CORE_NUMBER_SHIFT) |
+ ((prot->dmaxfer.d11_lpbk & PCIE_DMA_XFER_FLG_D11_LPBK_MASK)
+ << PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT));
+ prot->dmaxfer.start_usec = OSL_SYSUPTIME_US();
/* update ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ring, dmap, 1);
- DHD_GENERAL_UNLOCK(dhd, flags);
- DHD_ERROR(("DMA Started...\n"));
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+ DHD_ERROR(("DMA loopback Started...\n"));
return BCME_OK;
} /* dhdmsgbuf_dmaxfer_req */
+
+int
+dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd, dma_xfer_info_t *result)
+{
+ dhd_prot_t *prot = dhd->prot;
+
+ if (prot->dmaxfer.in_progress)
+ result->status = DMA_XFER_IN_PROGRESS;
+ else if (prot->dmaxfer.status == 0)
+ result->status = DMA_XFER_SUCCESS;
+ else
+ result->status = DMA_XFER_FAILED;
+
+ result->type = prot->dmaxfer.d11_lpbk;
+ result->error_code = prot->dmaxfer.status;
+ result->num_bytes = prot->dmaxfer.len;
+ result->time_taken = prot->dmaxfer.time_taken;
+ if (prot->dmaxfer.time_taken) {
+ /* throughput in kBps */
+ result->tput =
+ (prot->dmaxfer.len * (1000 * 1000 / 1024)) /
+ (uint32)prot->dmaxfer.time_taken;
+ }
+
+ return BCME_OK;
+}
/** Called in the process of submitting an ioctl to the dongle */
static int
dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
{
int ret = 0;
+ uint copylen = 0;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
- /* Respond "bcmerror" and "bcmerrorstr" with local cache */
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
+ __FUNCTION__));
+ return -EIO;
+ }
+
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+ return -EIO;
+ }
+
+ /* don't talk to the dongle if fw is about to be reloaded */
+ if (dhd->hang_was_sent) {
+ DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
+ __FUNCTION__));
+ return -EIO;
+ }
+
if (cmd == WLC_GET_VAR && buf)
{
- if (!strcmp((char *)buf, "bcmerrorstr"))
- {
- strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN);
+ if (!len || !*(uint8 *)buf) {
+ DHD_ERROR(("%s(): Zero length bailing\n", __FUNCTION__));
+ ret = BCME_BADARG;
goto done;
}
- else if (!strcmp((char *)buf, "bcmerror"))
- {
- *(int *)buf = dhd->dongle_error;
+
+ /* Respond "bcmerror" and "bcmerrorstr" with local cache */
+ copylen = MIN(len, BCME_STRLEN);
+
+ if ((len >= strlen("bcmerrorstr")) &&
+ (!strcmp((char *)buf, "bcmerrorstr"))) {
+ strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), copylen);
+ *(uint8 *)((uint8 *)buf + (copylen - 1)) = '\0';
+ goto done;
+ } else if ((len >= strlen("bcmerror")) &&
+ !strcmp((char *)buf, "bcmerror")) {
+ *(uint32 *)(uint32 *)buf = dhd->dongle_error;
goto done;
}
}
+ DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n",
+ action, ifidx, cmd, len));
+
ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
- DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n",
- action, ifidx, cmd, len));
+ if (ret < 0) {
+ DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
+ goto done;
+ }
/* wait for IOCTL completion message from dongle and get first fragment */
ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
done:
return ret;
+}
+
+void
+dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd)
+{
+ uint32 intstatus;
+ dhd_prot_t *prot = dhd->prot;
+ dhd->rxcnt_timeout++;
+ dhd->rx_ctlerrs++;
+ dhd->iovar_timeout_occured = TRUE;
+ DHD_ERROR(("%s: resumed on timeout rxcnt_timeout%s %d ioctl_cmd %d "
+ "trans_id %d state %d busstate=%d ioctl_received=%d\n", __FUNCTION__,
+ dhd->is_sched_error ? " due to scheduling problem" : "",
+ dhd->rxcnt_timeout, prot->curr_ioctl_cmd, prot->ioctl_trans_id,
+ prot->ioctl_state, dhd->busstate, prot->ioctl_received));
+#if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
+ if (dhd->is_sched_error && dhd->memdump_enabled == DUMP_MEMFILE_BUGON) {
+ /* change g_assert_type to trigger Kernel panic */
+ g_assert_type = 2;
+ /* use ASSERT() to trigger panic */
+ ASSERT(0);
+ }
+#endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
+
+ if (prot->curr_ioctl_cmd == WLC_SET_VAR ||
+ prot->curr_ioctl_cmd == WLC_GET_VAR) {
+ char iovbuf[32];
+ int i;
+ int dump_size = 128;
+ uint8 *ioctl_buf = (uint8 *)prot->ioctbuf.va;
+ memset(iovbuf, 0, sizeof(iovbuf));
+ strncpy(iovbuf, ioctl_buf, sizeof(iovbuf) - 1);
+ iovbuf[sizeof(iovbuf) - 1] = '\0';
+ DHD_ERROR(("Current IOVAR (%s): %s\n",
+ prot->curr_ioctl_cmd == WLC_SET_VAR ?
+ "WLC_SET_VAR" : "WLC_GET_VAR", iovbuf));
+ DHD_ERROR(("========== START IOCTL REQBUF DUMP ==========\n"));
+ for (i = 0; i < dump_size; i++) {
+ DHD_ERROR(("%02X ", ioctl_buf[i]));
+ if ((i % 32) == 31) {
+ DHD_ERROR(("\n"));
+ }
+ }
+ DHD_ERROR(("\n========== END IOCTL REQBUF DUMP ==========\n"));
+ }
+
+ /* Check the PCIe link status by reading intstatus register */
+ intstatus = si_corereg(dhd->bus->sih,
+ dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
+ if (intstatus == (uint32)-1) {
+ DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
+ dhd->bus->is_linkdown = TRUE;
+ }
+
+ dhd_bus_dump_console_buffer(dhd->bus);
+ dhd_prot_debug_info_print(dhd);
}
/**
@@ -4627,53 +7689,61 @@
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
- if (dhd->dongle_reset) {
+ if (dhd_query_bus_erros(dhd)) {
ret = -EIO;
goto out;
}
- if (prot->cur_ioctlresp_bufs_posted) {
- prot->cur_ioctlresp_bufs_posted--;
+ timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
+
+ if (prot->ioctl_received == 0) {
+ uint32 intstatus = si_corereg(dhd->bus->sih,
+ dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
+ int host_irq_disbled = dhdpcie_irq_disabled(dhd->bus);
+ if ((intstatus) && (intstatus != (uint32)-1) &&
+ (timeleft == 0) && (!dhd_query_bus_erros(dhd))) {
+ DHD_ERROR(("%s: iovar timeout trying again intstatus=%x"
+ " host_irq_disabled=%d\n",
+ __FUNCTION__, intstatus, host_irq_disbled));
+ dhd_pcie_intr_count_dump(dhd);
+ dhd_print_tasklet_status(dhd);
+ dhd_prot_process_ctrlbuf(dhd);
+ timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
+ /* Clear Interrupts */
+ dhdpcie_bus_clear_intstatus(dhd->bus);
+ }
}
- dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
+ if (timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
+ /* check if resumed on time out related to scheduling issue */
+ dhd->is_sched_error = FALSE;
+ if (dhd->bus->isr_entry_time > prot->ioctl_fillup_time) {
+ dhd->is_sched_error = dhd_bus_query_dpc_sched_errors(dhd);
+ }
- timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received);
- if (timeleft == 0) {
- dhd->rxcnt_timeout++;
- dhd->rx_ctlerrs++;
- DHD_ERROR(("%s: resumed on timeout rxcnt_timeout %d ioctl_cmd %d "
- "trans_id %d state %d busstate=%d ioctl_received=%d\n",
- __FUNCTION__, dhd->rxcnt_timeout, prot->curr_ioctl_cmd,
- prot->ioctl_trans_id, prot->ioctl_state & ~MSGBUF_IOCTL_RESP_PENDING,
- dhd->busstate, prot->ioctl_received));
+ dhd_msgbuf_iovar_timeout_dump(dhd);
- dhd_prot_debug_info_print(dhd);
-
-#if defined(DHD_FW_COREDUMP)
- /* Collect socram dump for CUSTOMER_HW4 OR Brix Android */
- /* As soon as FW TRAP occurs, FW dump will be collected from dhdpcie_checkdied */
- if (dhd->memdump_enabled && !dhd->dongle_trap_occured) {
+#ifdef DHD_FW_COREDUMP
+ /* Collect socram dump */
+ if (dhd->memdump_enabled) {
/* collect core dump */
dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT;
dhd_bus_mem_dump(dhd);
}
-#endif /* DHD_FW_COREDUMP && OEM_ANDROID */
- if (dhd->rxcnt_timeout >= MAX_CNTL_RX_TIMEOUT) {
+#endif /* DHD_FW_COREDUMP */
+
#ifdef SUPPORT_LINKDOWN_RECOVERY
#ifdef CONFIG_ARCH_MSM
- dhd->bus->islinkdown = 1;
+ dhd->bus->no_cfg_restore = 1;
#endif /* CONFIG_ARCH_MSM */
#endif /* SUPPORT_LINKDOWN_RECOVERY */
- DHD_ERROR(("%s: timeout > MAX_CNTL_TX_TIMEOUT\n", __FUNCTION__));
- }
ret = -ETIMEDOUT;
goto out;
} else {
if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) {
DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n",
__FUNCTION__, prot->ioctl_received));
- ret = -ECONNABORTED;
+ ret = -EINVAL;
goto out;
}
dhd->rxcnt_timeout = 0;
@@ -4682,28 +7752,16 @@
__FUNCTION__, prot->ioctl_resplen));
}
- if (dhd->dongle_trap_occured) {
-#ifdef SUPPORT_LINKDOWN_RECOVERY
-#ifdef CONFIG_ARCH_MSM
- dhd->bus->islinkdown = 1;
-#endif /* CONFIG_ARCH_MSM */
-#endif /* SUPPORT_LINKDOWN_RECOVERY */
- DHD_ERROR(("%s: TRAP occurred!!\n", __FUNCTION__));
- ret = -EREMOTEIO;
- goto out;
- }
-
- if (dhd->prot->ioctl_resplen > len) {
+ if (dhd->prot->ioctl_resplen > len)
dhd->prot->ioctl_resplen = (uint16)len;
- }
- if (buf) {
+ if (buf)
bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen);
- }
ret = (int)(dhd->prot->ioctl_status);
+
out:
DHD_GENERAL_LOCK(dhd, flags);
- dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING;
+ dhd->prot->ioctl_state = 0;
dhd->prot->ioctl_resplen = 0;
dhd->prot->ioctl_received = IOCTL_WAIT;
dhd->prot->curr_ioctl_cmd = 0;
@@ -4719,6 +7777,12 @@
DHD_TRACE(("%s: Enter \n", __FUNCTION__));
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
+ __FUNCTION__));
+ return -EIO;
+ }
+
if (dhd->busstate == DHD_BUS_DOWN) {
DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
return -EIO;
@@ -4731,14 +7795,20 @@
return -EIO;
}
- /* Fill up msgbuf for ioctl req */
- ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
-
DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n",
action, ifidx, cmd, len));
+ /* Fill up msgbuf for ioctl req */
+ ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
+
+ if (ret < 0) {
+ DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
+ goto done;
+ }
+
ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
+done:
return ret;
}
@@ -4750,16 +7820,161 @@
/** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */
int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name,
- void *params, int plen, void *arg, int len, bool set)
+ void *params, int plen, void *arg, int len, bool set)
{
return BCME_UNSUPPORTED;
}
+
+#ifdef DHD_DUMP_PCIE_RINGS
+int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, const void *user_buf,
+ unsigned long *file_posn, bool file_write)
+{
+ dhd_prot_t *prot;
+ msgbuf_ring_t *ring;
+ int ret = 0;
+ uint16 h2d_flowrings_total;
+ uint16 flowid;
+
+ if (!(dhd) || !(dhd->prot)) {
+ goto exit;
+ }
+ prot = dhd->prot;
+
+ /* Below is the same ring dump sequence followed in parser as well. */
+ ring = &prot->h2dring_ctrl_subn;
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
+ goto exit;
+
+ ring = &prot->h2dring_rxp_subn;
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
+ goto exit;
+
+ ring = &prot->d2hring_ctrl_cpln;
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
+ goto exit;
+
+ ring = &prot->d2hring_tx_cpln;
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
+ goto exit;
+
+ ring = &prot->d2hring_rx_cpln;
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
+ goto exit;
+
+ h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
+ FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) {
+ goto exit;
+ }
+ }
+
+#ifdef EWP_EDL
+ if (dhd->dongle_edl_support) {
+ ring = prot->d2hring_edl;
+ if ((ret = dhd_edl_ring_hdr_write(dhd, ring, file, user_buf, file_posn)) < 0)
+ goto exit;
+ }
+ else if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
+#else
+ if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
+#endif /* EWP_EDL */
+ {
+ ring = prot->h2dring_info_subn;
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
+ goto exit;
+
+ ring = prot->d2hring_info_cpln;
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
+ goto exit;
+ }
+
+exit :
+ return ret;
+}
+
+/* Write to file */
+static
+int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
+ const void *user_buf, unsigned long *file_posn)
+{
+ int ret = 0;
+
+ if (ring == NULL) {
+ DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+ if (file) {
+ ret = dhd_os_write_file_posn(file, file_posn, (char *)(ring->dma_buf.va),
+ ((unsigned long)(ring->max_items) * (ring->item_len)));
+ if (ret < 0) {
+ DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
+ ret = BCME_ERROR;
+ }
+ } else if (user_buf) {
+ ret = dhd_export_debug_data((char *)(ring->dma_buf.va), NULL, user_buf,
+ ((unsigned long)(ring->max_items) * (ring->item_len)), (int *)file_posn);
+ }
+ return ret;
+}
+#endif /* DHD_DUMP_PCIE_RINGS */
+
+#ifdef EWP_EDL
+/* Write to file */
+static
+int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
+ unsigned long *file_posn)
+{
+ int ret = 0, nitems = 0;
+ char *buf = NULL, *ptr = NULL;
+ uint8 *msg_addr = NULL;
+ uint16 rd = 0;
+
+ if (ring == NULL) {
+ DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
+ __FUNCTION__));
+ ret = BCME_ERROR;
+ goto done;
+ }
+
+ buf = MALLOCZ(dhd->osh, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
+ if (buf == NULL) {
+ DHD_ERROR(("%s: buffer allocation failed\n", __FUNCTION__));
+ ret = BCME_ERROR;
+ goto done;
+ }
+ ptr = buf;
+
+ for (; nitems < D2HRING_EDL_MAX_ITEM; nitems++, rd++) {
+ msg_addr = (uint8 *)ring->dma_buf.va + (rd * ring->item_len);
+ memcpy(ptr, (char *)msg_addr, D2HRING_EDL_HDR_SIZE);
+ ptr += D2HRING_EDL_HDR_SIZE;
+ }
+ if (file) {
+ ret = dhd_os_write_file_posn(file, file_posn, buf,
+ (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM));
+ if (ret < 0) {
+ DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
+ goto done;
+ }
+ }
+ else {
+ ret = dhd_export_debug_data(buf, NULL, user_buf,
+ (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM), file_posn);
+ }
+
+done:
+ if (buf) {
+ MFREE(dhd->osh, buf, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
+ }
+ return ret;
+}
+#endif /* EWP_EDL */
/** Add prot dump output to a buffer */
void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
{
-#if defined(PCIE_D2H_SYNC)
if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM)
bcm_bprintf(b, "\nd2h_sync: SEQNUM:");
else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM)
@@ -4768,12 +7983,16 @@
bcm_bprintf(b, "\nd2h_sync: NONE:");
bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n",
dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot);
-#endif /* PCIE_D2H_SYNC */
bcm_bprintf(b, "\nDongle DMA Indices: h2d %d d2h %d index size %d bytes\n",
- DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support),
- DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support),
+ dhd->dma_h2d_ring_upd_support,
+ dhd->dma_d2h_ring_upd_support,
dhd->prot->rw_index_sz);
+ bcm_bprintf(b, "h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
+ h2d_max_txpost, dhd->prot->h2d_max_txpost);
+ bcm_bprintf(b, "pktid_txq_start_cnt: %d\n", dhd->prot->pktid_txq_start_cnt);
+ bcm_bprintf(b, "pktid_txq_stop_cnt: %d\n", dhd->prot->pktid_txq_stop_cnt);
+ bcm_bprintf(b, "pktid_depleted_cnt: %d\n", dhd->prot->pktid_depleted_cnt);
}
/* Update local copy of dongle statistics */
@@ -4800,13 +8019,13 @@
dhd_prot_t *prot = dhd->prot;
msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
- DHD_GENERAL_LOCK(dhd, flags);
+ DHD_RING_LOCK(ring->ring_lock, flags);
hevent = (hostevent_hdr_t *)
dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (hevent == NULL) {
- DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
return -1;
}
@@ -4815,6 +8034,7 @@
ring->seqnum++;
hevent->msg.msg_type = MSG_TYPE_HOST_EVNT;
hevent->msg.if_id = 0;
+ hevent->msg.flags = ring->current_phase;
/* Event payload */
hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD);
@@ -4823,7 +8043,8 @@
* from the msgbuf, we can directly call the write_complete
*/
dhd_prot_ring_write_complete(dhd, ring, hevent, 1);
- DHD_GENERAL_UNLOCK(dhd, flags);
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
return 0;
}
@@ -4842,11 +8063,22 @@
ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
if (ret_buf == NULL) {
+ /* HWA TODO, need to get RD pointer from different array
+ * which HWA will directly write into host memory
+ */
/* if alloc failed , invalidate cached read ptr */
- if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
+ if (dhd->dma_d2h_ring_upd_support) {
ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
} else {
dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx);
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ /* Check if ring->rd is valid */
+ if (ring->rd >= ring->max_items) {
+ DHD_ERROR(("%s: Invalid rd idx=%d\n", ring->name, ring->rd));
+ dhd->bus->read_shm_fail = TRUE;
+ return NULL;
+ }
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
}
/* Try allocating once more */
@@ -4856,6 +8088,11 @@
DHD_INFO(("%s: Ring space not available \n", ring->name));
return NULL;
}
+ }
+
+ if (ret_buf == HOST_RING_BASE(ring)) {
+ DHD_INFO(("%s: setting the phase now\n", ring->name));
+ ring->current_phase = ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
}
/* Return alloced space */
@@ -4879,20 +8116,24 @@
uint16 alloced = 0;
msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
+ if (dhd_query_bus_erros(dhd)) {
+ return -EIO;
+ }
+
rqstlen = len;
resplen = len;
/* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */
/* 8K allocation of dongle buffer fails */
/* dhd doesnt give separate input & output buf lens */
- /* so making the assumption that input length can never be more than 1.5k */
- rqstlen = MIN(rqstlen, MSGBUF_MAX_MSG_SIZE);
+ /* so making the assumption that input length can never be more than 2k */
+ rqstlen = MIN(rqstlen, MSGBUF_IOCTL_MAX_RQSTLEN);
- DHD_GENERAL_LOCK(dhd, flags);
+ DHD_RING_LOCK(ring->ring_lock, flags);
if (prot->ioctl_state) {
DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state));
- DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
return BCME_BUSY;
} else {
prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING;
@@ -4906,14 +8147,14 @@
prot->ioctl_state = 0;
prot->curr_ioctl_cmd = 0;
prot->ioctl_received = IOCTL_WAIT;
- DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
return -1;
}
/* Common msg buf hdr */
ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ;
ioct_rqst->cmn_hdr.if_id = (uint8)ifidx;
- ioct_rqst->cmn_hdr.flags = 0;
+ ioct_rqst->cmn_hdr.flags = ring->current_phase;
ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID);
ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
ring->seqnum++;
@@ -4931,15 +8172,15 @@
/* copy ioct payload */
ioct_buf = (void *) prot->ioctbuf.va;
- if (buf) {
+ prot->ioctl_fillup_time = OSL_LOCALTIME_NS();
+
+ if (buf)
memcpy(ioct_buf, buf, len);
- }
OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len);
- if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN)) {
+ if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN))
DHD_ERROR(("host ioct address unaligned !!!!! \n"));
- }
DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n",
ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len,
@@ -4947,11 +8188,11 @@
/* update ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
- DHD_GENERAL_UNLOCK(dhd, flags);
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
return 0;
} /* dhd_fillup_ioct_reqst */
-
/**
* dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a
@@ -4971,6 +8212,8 @@
int dma_buf_alloced = BCME_NOMEM;
uint32 dma_buf_len = max_items * item_len;
dhd_prot_t *prot = dhd->prot;
+ uint16 max_flowrings = dhd->bus->max_tx_flowrings;
+ dhd_dma_buf_t *dma_buf = NULL;
ASSERT(ring);
ASSERT(name);
@@ -4986,13 +8229,13 @@
ring->item_len = item_len;
/* A contiguous space may be reserved for all flowrings */
- if (DHD_IS_FLOWRING(ringid) && (prot->flowrings_dma_buf.va)) {
+ if (DHD_IS_FLOWRING(ringid, max_flowrings) && (prot->flowrings_dma_buf.va)) {
/* Carve out from the contiguous DMA-able flowring buffer */
uint16 flowid;
uint32 base_offset;
- dhd_dma_buf_t *dma_buf = &ring->dma_buf;
dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf;
+ dma_buf = &ring->dma_buf;
flowid = DHD_RINGID_TO_FLOWID(ringid);
base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len;
@@ -5012,10 +8255,24 @@
(void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
} else {
- /* Allocate a dhd_dma_buf */
- dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
- if (dma_buf_alloced != BCME_OK) {
- return BCME_NOMEM;
+#ifdef EWP_EDL
+ if (ring == dhd->prot->d2hring_edl) {
+ /* For EDL ring, memory is alloced during attach,
+ * so just need to copy the dma_buf to the ring's dma_buf
+ */
+ memcpy(&ring->dma_buf, &dhd->edl_ring_mem, sizeof(ring->dma_buf));
+ dma_buf = &ring->dma_buf;
+ if (dma_buf->va == NULL) {
+ return BCME_NOMEM;
+ }
+ } else
+#endif /* EWP_EDL */
+ {
+ /* Allocate a dhd_dma_buf */
+ dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
+ if (dma_buf_alloced != BCME_OK) {
+ return BCME_NOMEM;
+ }
}
}
@@ -5030,6 +8287,8 @@
}
}
#endif /* BCM_SECURE_DMA */
+
+ ring->ring_lock = dhd_os_spin_lock_init(dhd->osh);
DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d "
"ring start %p buf phys addr %x:%x \n",
@@ -5050,7 +8309,6 @@
} /* dhd_prot_ring_attach */
-
/**
* dhd_prot_ring_init - Post the common ring information to dongle.
*
@@ -5065,6 +8323,12 @@
{
ring->wr = 0;
ring->rd = 0;
+ ring->curr_rd = 0;
+ /* Reset hwa_db_type for all rings,
+ * for data path rings, it will be assigned separately post init
+ * from dhd_prot_d2h_sync_init and dhd_prot_h2d_sync_init
+ */
+ ring->hwa_db_type = 0;
/* CAUTION: ring::base_addr already in Little Endian */
dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr,
@@ -5084,7 +8348,6 @@
} /* dhd_prot_ring_init */
-
/**
* dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush
* Reset WR and RD indices to 0.
@@ -5097,8 +8360,10 @@
dhd_dma_buf_reset(dhd, &ring->dma_buf);
ring->rd = ring->wr = 0;
+ ring->curr_rd = 0;
+ ring->inited = FALSE;
+ ring->create_pending = FALSE;
}
-
/**
* dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects
@@ -5108,6 +8373,7 @@
dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring)
{
dhd_prot_t *prot = dhd->prot;
+ uint16 max_flowrings = dhd->bus->max_tx_flowrings;
ASSERT(ring);
ring->inited = FALSE;
@@ -5126,76 +8392,26 @@
/* If the DMA-able buffer was carved out of a pre-reserved contiguous
* memory, then simply stop using it.
*/
- if (DHD_IS_FLOWRING(ring->idx) && (prot->flowrings_dma_buf.va)) {
+ if (DHD_IS_FLOWRING(ring->idx, max_flowrings) && (prot->flowrings_dma_buf.va)) {
(void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t));
} else {
dhd_dma_buf_free(dhd, &ring->dma_buf);
}
+ dhd_os_spin_lock_deinit(dhd->osh, ring->ring_lock);
+
} /* dhd_prot_ring_detach */
-
-/*
- * +----------------------------------------------------------------------------
- * Flowring Pool
- *
- * Unlike common rings, which are attached very early on (dhd_prot_attach),
- * flowrings are dynamically instantiated. Moreover, flowrings may require a
- * larger DMA-able buffer. To avoid issues with fragmented cache coherent
- * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
- * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
- *
- * Each DMA-able buffer may be allocated independently, or may be carved out
- * of a single large contiguous region that is registered with the protocol
- * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
- * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
- *
- * No flowring pool action is performed in dhd_prot_attach(), as the number
- * of h2d rings is not yet known.
- *
- * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
- * determine the number of flowrings required, and a pool of msgbuf_rings are
- * allocated and a DMA-able buffer (carved or allocated) is attached.
- * See: dhd_prot_flowrings_pool_attach()
- *
- * A flowring msgbuf_ring object may be fetched from this pool during flowring
- * creation, using the flowid. Likewise, flowrings may be freed back into the
- * pool on flowring deletion.
- * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
- *
- * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
- * are detached (returned back to the carved region or freed), and the pool of
- * msgbuf_ring and any objects allocated against it are freed.
- * See: dhd_prot_flowrings_pool_detach()
- *
- * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
- * state as-if upon an attach. All DMA-able buffers are retained.
- * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
- * pool attach will notice that the pool persists and continue to use it. This
- * will avoid the case of a fragmented DMA-able region.
- *
- * +----------------------------------------------------------------------------
- */
-
/* Fetch number of H2D flowrings given the total number of h2d rings */
-#define DHD_FLOWRINGS_POOL_TOTAL(h2d_rings_total) \
- ((h2d_rings_total) - BCMPCIE_H2D_COMMON_MSGRINGS)
-
-/* Conversion of a flowid to a flowring pool index */
-#define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
- ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
-
-/* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
-#define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
- (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + DHD_FLOWRINGS_POOL_OFFSET(flowid)
-
-/* Traverse each flowring in the flowring pool, assigning ring and flowid */
-#define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) \
- for ((flowid) = DHD_FLOWRING_START_FLOWID, \
- (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
- (flowid) < (prot)->h2d_rings_total; \
- (flowid)++, (ring)++)
+uint16
+dhd_get_max_flow_rings(dhd_pub_t *dhd)
+{
+ if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
+ return dhd->bus->max_tx_flowrings;
+ else
+ return (dhd->bus->max_tx_flowrings - BCMPCIE_H2D_COMMON_MSGRINGS);
+}
/**
* dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t.
@@ -5223,9 +8439,8 @@
dhd_prot_t *prot = dhd->prot;
char ring_name[RING_NAME_MAX_LENGTH];
- if (prot->h2d_flowrings_pool != NULL) {
+ if (prot->h2d_flowrings_pool != NULL)
return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
- }
ASSERT(prot->h2d_rings_total == 0);
@@ -5239,7 +8454,7 @@
}
/* Subtract number of H2D common rings, to determine number of flowrings */
- h2d_flowrings_total = DHD_FLOWRINGS_POOL_TOTAL(prot->h2d_rings_total);
+ h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total));
@@ -5254,14 +8469,18 @@
}
/* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */
- FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) {
+ FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid);
- ring_name[RING_NAME_MAX_LENGTH - 1] = '\0';
if (dhd_prot_ring_attach(dhd, ring, ring_name,
- H2DRING_TXPOST_MAX_ITEM, H2DRING_TXPOST_ITEMSIZE,
+ prot->h2d_max_txpost, H2DRING_TXPOST_ITEMSIZE,
DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) {
goto attach_fail;
}
+ /*
+ * TOD0 - Currently flowrings hwa is disabled and can be enabled like below
+ * (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_TXPOSTS) ? HWA_DB_TYPE_TXPOSTS : 0;
+ */
+ ring->hwa_db_type = 0;
}
return BCME_OK;
@@ -5274,7 +8493,6 @@
return BCME_NOMEM;
} /* dhd_prot_flowrings_pool_attach */
-
/**
* dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool.
@@ -5294,7 +8512,7 @@
static void
dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd)
{
- uint16 flowid;
+ uint16 flowid, h2d_flowrings_total;
msgbuf_ring_t *ring;
dhd_prot_t *prot = dhd->prot;
@@ -5302,16 +8520,15 @@
ASSERT(prot->h2d_rings_total == 0);
return;
}
-
+ h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
/* Reset each flowring in the flowring pool */
- FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) {
+ FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
dhd_prot_ring_reset(dhd, ring);
ring->inited = FALSE;
}
/* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */
}
-
/**
* dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with
@@ -5324,7 +8541,7 @@
{
int flowid;
msgbuf_ring_t *ring;
- int h2d_flowrings_total; /* exclude H2D common rings */
+ uint16 h2d_flowrings_total; /* exclude H2D common rings */
dhd_prot_t *prot = dhd->prot;
if (prot->h2d_flowrings_pool == NULL) {
@@ -5332,12 +8549,11 @@
return;
}
+ h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
/* Detach the DMA-able buffer for each flowring in the flowring pool */
- FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) {
+ FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
dhd_prot_ring_detach(dhd, ring);
}
-
- h2d_flowrings_total = DHD_FLOWRINGS_POOL_TOTAL(prot->h2d_rings_total);
MFREE(prot->osh, prot->h2d_flowrings_pool,
(h2d_flowrings_total * sizeof(msgbuf_ring_t)));
@@ -5346,7 +8562,6 @@
prot->h2d_rings_total = 0;
} /* dhd_prot_flowrings_pool_detach */
-
/**
* dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized
@@ -5374,11 +8589,15 @@
ring->wr = 0;
ring->rd = 0;
+ ring->curr_rd = 0;
ring->inited = TRUE;
-
+ /**
+ * Every time a flowring starts dynamically, initialize current_phase with 0
+ * then flip to BCMPCIE_CMNHDR_PHASE_BIT_INIT
+ */
+ ring->current_phase = 0;
return ring;
}
-
/**
* dhd_prot_flowrings_pool_release - release a previously fetched flowring's
@@ -5404,8 +8623,9 @@
ring->wr = 0;
ring->rd = 0;
ring->inited = FALSE;
-}
+ ring->curr_rd = 0;
+}
/* Assumes only one index is updated at a time */
/* If exactly_nitems is true, this function will allocate space for nitems or fail */
@@ -5435,11 +8655,11 @@
ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len);
/* Update write index */
- if ((ring->wr + *alloced) == ring->max_items) {
+ if ((ring->wr + *alloced) == ring->max_items)
ring->wr = 0;
- } else if ((ring->wr + *alloced) < ring->max_items) {
+ else if ((ring->wr + *alloced) < ring->max_items)
ring->wr += *alloced;
- } else {
+ else {
/* Should never hit this */
ASSERT(0);
return NULL;
@@ -5448,35 +8668,97 @@
return ret_ptr;
} /* dhd_prot_get_ring_space */
-
/**
* dhd_prot_ring_write_complete - Host updates the new WR index on producing
* new messages in a H2D ring. The messages are flushed from cache prior to
* posting the new WR index. The new WR index will be updated in the DMA index
* array or directly in the dongle's ring state memory.
* A PCIE doorbell will be generated to wake up the dongle.
+ * This is a non-atomic function, make sure the callers
+ * always hold appropriate locks.
*/
static void BCMFASTPATH
-dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
+__dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
uint16 nitems)
{
dhd_prot_t *prot = dhd->prot;
+ uint32 db_index;
+ uint16 max_flowrings = dhd->bus->max_tx_flowrings;
+ uint corerev;
/* cache flush */
OSL_CACHE_FLUSH(p, ring->item_len * nitems);
- if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
- dhd_prot_dma_indx_set(dhd, ring->wr,
- H2D_DMA_INDX_WR_UPD, ring->idx);
+ /* For HWA, update db_index and ring mb2 DB and return */
+ if (HWA_ACTIVE(dhd) && ring->hwa_db_type) {
+ db_index = HWA_DB_INDEX_VALUE(ring->wr) | ring->hwa_db_type;
+ DHD_TRACE(("%s: ring(%s) wr(%d) hwa_db_type(0x%x) db_index(0x%x)\n",
+ __FUNCTION__, ring->name, ring->wr, ring->hwa_db_type, db_index));
+ prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
+ return;
+ }
+
+ if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
+ dhd_prot_dma_indx_set(dhd, ring->wr,
+ H2D_DMA_INDX_WR_UPD, ring->idx);
+ } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
+ dhd_prot_dma_indx_set(dhd, ring->wr,
+ H2D_IFRM_INDX_WR_UPD, ring->idx);
} else {
- dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
- sizeof(uint16), RING_WR_UPD, ring->idx);
+ dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
+ sizeof(uint16), RING_WR_UPD, ring->idx);
}
/* raise h2d interrupt */
- prot->mb_ring_fn(dhd->bus, ring->wr);
+ if (IDMA_ACTIVE(dhd) ||
+ (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings))) {
+ db_index = IDMA_IDX0;
+ /* this api is called in wl down path..in that case sih is freed already */
+ if (dhd->bus->sih) {
+ corerev = dhd->bus->sih->buscorerev;
+ /* We need to explictly configure the type of DMA for core rev >= 24 */
+ if (corerev >= 24) {
+ db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
+ }
+ }
+ prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
+ } else {
+ prot->mb_ring_fn(dhd->bus, ring->wr);
+ }
}
+static void BCMFASTPATH
+dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
+ uint16 nitems)
+{
+ unsigned long flags_bus;
+ DHD_BUS_LOCK(dhd->bus->bus_lock, flags_bus);
+ __dhd_prot_ring_write_complete(dhd, ring, p, nitems);
+ DHD_BUS_UNLOCK(dhd->bus->bus_lock, flags_bus);
+}
+
+/**
+ * dhd_prot_ring_write_complete_mbdata - will be called from dhd_prot_h2d_mbdata_send_ctrlmsg,
+ * which will hold DHD_BUS_LOCK to update WR pointer, Ring DB and also update bus_low_power_state
+ * to indicate D3_INFORM sent in the same BUS_LOCK.
+ */
+static void BCMFASTPATH
+dhd_prot_ring_write_complete_mbdata(dhd_pub_t *dhd, msgbuf_ring_t * ring, void *p,
+ uint16 nitems, uint32 mb_data)
+{
+ unsigned long flags_bus;
+
+ DHD_BUS_LOCK(dhd->bus->bus_lock, flags_bus);
+
+ __dhd_prot_ring_write_complete(dhd, ring, p, nitems);
+
+ /* Mark D3_INFORM in the same context to skip ringing H2D DB after D3_INFORM */
+ if (mb_data == H2D_HOST_D3_INFORM) {
+ dhd->bus->bus_low_power_state = DHD_BUS_D3_INFORM_SENT;
+ }
+
+ DHD_BUS_UNLOCK(dhd->bus->bus_lock, flags_bus);
+}
/**
* dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages
@@ -5486,32 +8768,205 @@
static void
dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
{
+ dhd_prot_t *prot = dhd->prot;
+ uint32 db_index;
+ uint corerev;
+
+ /* For HWA, update db_index and ring mb2 DB and return */
+ if (HWA_ACTIVE(dhd) && ring->hwa_db_type) {
+ db_index = HWA_DB_INDEX_VALUE(ring->rd) | ring->hwa_db_type;
+ DHD_TRACE(("%s: ring(%s) rd(0x%x) hwa_db_type(0x%x) db_index(0x%x)\n",
+ __FUNCTION__, ring->name, ring->rd, ring->hwa_db_type, db_index));
+ prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
+ return;
+ }
+
/* update read index */
/* If dma'ing h2d indices supported
* update the r -indices in the
* host memory o/w in TCM
*/
- if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
+ if (IDMA_ACTIVE(dhd)) {
dhd_prot_dma_indx_set(dhd, ring->rd,
D2H_DMA_INDX_RD_UPD, ring->idx);
+ db_index = IDMA_IDX1;
+ if (dhd->bus->sih) {
+ corerev = dhd->bus->sih->buscorerev;
+ /* We need to explictly configure the type of DMA for core rev >= 24 */
+ if (corerev >= 24) {
+ db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
+ }
+ }
+ prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
+ } else if (dhd->dma_h2d_ring_upd_support) {
+ dhd_prot_dma_indx_set(dhd, ring->rd,
+ D2H_DMA_INDX_RD_UPD, ring->idx);
} else {
dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
sizeof(uint16), RING_RD_UPD, ring->idx);
}
}
+static int
+dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
+ uint16 ring_type, uint32 req_id)
+{
+ unsigned long flags;
+ d2h_ring_create_req_t *d2h_ring;
+ uint16 alloced = 0;
+ int ret = BCME_OK;
+ uint16 max_h2d_rings = dhd->bus->max_submission_rings;
+ msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
+
+ DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
+
+ DHD_TRACE(("%s trying to send D2H ring create Req\n", __FUNCTION__));
+
+ if (ring_to_create == NULL) {
+ DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
+ ret = BCME_ERROR;
+ goto err;
+ }
+
+ /* Request for ring buffer space */
+ d2h_ring = (d2h_ring_create_req_t *) dhd_prot_alloc_ring_space(dhd,
+ ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
+ &alloced, FALSE);
+
+ if (d2h_ring == NULL) {
+ DHD_ERROR(("%s: FATAL: No space in control ring to send D2H ring create\n",
+ __FUNCTION__));
+ ret = BCME_NOMEM;
+ goto err;
+ }
+ ring_to_create->create_req_id = (uint16)req_id;
+ ring_to_create->create_pending = TRUE;
+
+ /* Common msg buf hdr */
+ d2h_ring->msg.msg_type = MSG_TYPE_D2H_RING_CREATE;
+ d2h_ring->msg.if_id = 0;
+ d2h_ring->msg.flags = ctrl_ring->current_phase;
+ d2h_ring->msg.request_id = htol32(ring_to_create->create_req_id);
+ d2h_ring->ring_id = htol16(DHD_D2H_RING_OFFSET(ring_to_create->idx, max_h2d_rings));
+ DHD_ERROR(("%s ringid: %d idx: %d max_h2d: %d\n", __FUNCTION__, d2h_ring->ring_id,
+ ring_to_create->idx, max_h2d_rings));
+
+ d2h_ring->ring_type = ring_type;
+ d2h_ring->max_items = htol16(ring_to_create->max_items);
+ d2h_ring->len_item = htol16(ring_to_create->item_len);
+ d2h_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
+ d2h_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
+
+ d2h_ring->flags = 0;
+ d2h_ring->msg.epoch =
+ ctrl_ring->seqnum % H2D_EPOCH_MODULO;
+ ctrl_ring->seqnum++;
+#ifdef EWP_EDL
+ if (ring_type == BCMPCIE_D2H_RING_TYPE_EDL) {
+ DHD_ERROR(("%s: sending d2h EDL ring create: "
+ "\n max items=%u; len_item=%u; ring_id=%u; low_addr=0x%x; high_addr=0x%x\n",
+ __FUNCTION__, ltoh16(d2h_ring->max_items),
+ ltoh16(d2h_ring->len_item),
+ ltoh16(d2h_ring->ring_id),
+ d2h_ring->ring_ptr.low_addr,
+ d2h_ring->ring_ptr.high_addr));
+ }
+#endif /* EWP_EDL */
+
+ /* Update the flow_ring's WRITE index */
+ dhd_prot_ring_write_complete(dhd, ctrl_ring, d2h_ring,
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+
+ return ret;
+err:
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+
+ return ret;
+}
+
+static int
+dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create, uint8 ring_type, uint32 id)
+{
+ unsigned long flags;
+ h2d_ring_create_req_t *h2d_ring;
+ uint16 alloced = 0;
+ uint8 i = 0;
+ int ret = BCME_OK;
+ msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
+
+ DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
+
+ DHD_TRACE(("%s trying to send H2D ring create Req\n", __FUNCTION__));
+
+ if (ring_to_create == NULL) {
+ DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
+ ret = BCME_ERROR;
+ goto err;
+ }
+
+ /* Request for ring buffer space */
+ h2d_ring = (h2d_ring_create_req_t *)dhd_prot_alloc_ring_space(dhd,
+ ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
+ &alloced, FALSE);
+
+ if (h2d_ring == NULL) {
+ DHD_ERROR(("%s: FATAL: No space in control ring to send H2D ring create\n",
+ __FUNCTION__));
+ ret = BCME_NOMEM;
+ goto err;
+ }
+ ring_to_create->create_req_id = (uint16)id;
+ ring_to_create->create_pending = TRUE;
+
+ /* Common msg buf hdr */
+ h2d_ring->msg.msg_type = MSG_TYPE_H2D_RING_CREATE;
+ h2d_ring->msg.if_id = 0;
+ h2d_ring->msg.request_id = htol32(ring_to_create->create_req_id);
+ h2d_ring->msg.flags = ctrl_ring->current_phase;
+ h2d_ring->ring_id = htol16(DHD_H2D_RING_OFFSET(ring_to_create->idx));
+ h2d_ring->ring_type = ring_type;
+ h2d_ring->max_items = htol16(H2DRING_DYNAMIC_INFO_MAX_ITEM);
+ h2d_ring->n_completion_ids = ring_to_create->n_completion_ids;
+ h2d_ring->len_item = htol16(H2DRING_INFO_BUFPOST_ITEMSIZE);
+ h2d_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
+ h2d_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
+
+ for (i = 0; i < ring_to_create->n_completion_ids; i++) {
+ h2d_ring->completion_ring_ids[i] = htol16(ring_to_create->compeltion_ring_ids[i]);
+ }
+
+ h2d_ring->flags = 0;
+ h2d_ring->msg.epoch =
+ ctrl_ring->seqnum % H2D_EPOCH_MODULO;
+ ctrl_ring->seqnum++;
+
+ /* Update the flow_ring's WRITE index */
+ dhd_prot_ring_write_complete(dhd, ctrl_ring, h2d_ring,
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+
+ return ret;
+err:
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+
+ return ret;
+}
/**
* dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array.
* Dongle will DMA the entire array (if DMA_INDX feature is enabled).
* See dhd_prot_dma_indx_init()
*/
-static void
+void
dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid)
{
uint8 *ptr;
uint16 offset;
dhd_prot_t *prot = dhd->prot;
+ uint16 max_h2d_rings = dhd->bus->max_submission_rings;
switch (type) {
case H2D_DMA_INDX_WR_UPD:
@@ -5521,7 +8976,12 @@
case D2H_DMA_INDX_RD_UPD:
ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
- offset = DHD_D2H_RING_OFFSET(ringid);
+ offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
+ break;
+
+ case H2D_IFRM_INDX_WR_UPD:
+ ptr = (uint8 *)(prot->h2d_ifrm_indx_wr_buf.va);
+ offset = DHD_H2D_FRM_FLOW_RING_OFFSET(ringid);
break;
default:
@@ -5542,7 +9002,6 @@
} /* dhd_prot_dma_indx_set */
-
/**
* dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index
* array.
@@ -5556,6 +9015,7 @@
uint16 data;
uint16 offset;
dhd_prot_t *prot = dhd->prot;
+ uint16 max_h2d_rings = dhd->bus->max_submission_rings;
switch (type) {
case H2D_DMA_INDX_WR_UPD:
@@ -5570,12 +9030,12 @@
case D2H_DMA_INDX_WR_UPD:
ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va);
- offset = DHD_D2H_RING_OFFSET(ringid);
+ offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
break;
case D2H_DMA_INDX_RD_UPD:
ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
- offset = DHD_D2H_RING_OFFSET(ringid);
+ offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
break;
default:
@@ -5642,37 +9102,41 @@
switch (type) {
case H2D_DMA_INDX_WR_BUF:
dma_buf = &prot->h2d_dma_indx_wr_buf;
- if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
+ if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
goto ret_no_mem;
- }
DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n",
dma_buf->len, rw_index_sz, length));
break;
case H2D_DMA_INDX_RD_BUF:
dma_buf = &prot->h2d_dma_indx_rd_buf;
- if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
+ if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
goto ret_no_mem;
- }
DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n",
dma_buf->len, rw_index_sz, length));
break;
case D2H_DMA_INDX_WR_BUF:
dma_buf = &prot->d2h_dma_indx_wr_buf;
- if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
+ if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
goto ret_no_mem;
- }
DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n",
dma_buf->len, rw_index_sz, length));
break;
case D2H_DMA_INDX_RD_BUF:
dma_buf = &prot->d2h_dma_indx_rd_buf;
- if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
+ if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
goto ret_no_mem;
- }
DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n",
+ dma_buf->len, rw_index_sz, length));
+ break;
+
+ case H2D_IFRM_INDX_WR_BUF:
+ dma_buf = &prot->h2d_ifrm_indx_wr_buf;
+ if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
+ goto ret_no_mem;
+ DHD_ERROR(("H2D IFRM WR INDX : array size %d = %d * %d\n",
dma_buf->len, rw_index_sz, length));
break;
@@ -5689,7 +9153,6 @@
return BCME_NOMEM;
} /* dhd_prot_dma_indx_init */
-
/**
* Called on checking for 'completion' messages from the dongle. Returns next host buffer to read
@@ -5709,8 +9172,15 @@
__FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va),
(uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va)));
+ /* Remember the read index in a variable.
+ * This is becuase ring->rd gets updated in the end of this function
+ * So if we have to print the exact read index from which the
+ * message is read its not possible.
+ */
+ ring->curr_rd = ring->rd;
+
/* update write pointer */
- if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
+ if (dhd->dma_d2h_ring_upd_support) {
/* DMAing write/read indices supported */
d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
ring->wr = d2h_wr;
@@ -5724,11 +9194,8 @@
/* check for avail space, in number of ring items */
items = READ_AVAIL_SPACE(wr, rd, depth);
- if (items == 0) {
+ if (items == 0)
return NULL;
- }
-
- ASSERT(items < ring->max_items);
/*
* Note that there are builds where Assert translates to just printk
@@ -5736,16 +9203,32 @@
* dhd_prot_process_msgtype can get into an big loop if this
* happens.
*/
- if (items >= ring->max_items) {
+ if (items > ring->max_items) {
DHD_ERROR(("\r\n======================= \r\n"));
DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
__FUNCTION__, ring, ring->name, ring->max_items, items));
DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n", wr, rd, depth));
- DHD_ERROR(("dhd->busstate %d bus->suspended %d bus->wait_for_d3_ack %d \r\n",
- dhd->busstate, dhd->bus->suspended, dhd->bus->wait_for_d3_ack));
+ DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
+ dhd->busstate, dhd->bus->wait_for_d3_ack));
DHD_ERROR(("\r\n======================= \r\n"));
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ if (wr >= ring->max_items) {
+ dhd->bus->read_shm_fail = TRUE;
+ }
+#else
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* collect core dump */
+ dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
+ dhd_bus_mem_dump(dhd);
+
+ }
+#endif /* DHD_FW_COREDUMP */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
*available_len = 0;
+ dhd_schedule_reset(dhd);
+
return NULL;
}
@@ -5753,27 +9236,83 @@
read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len);
/* update read pointer */
- if ((ring->rd + items) >= ring->max_items) {
+ if ((ring->rd + items) >= ring->max_items)
ring->rd = 0;
- } else {
+ else
ring->rd += items;
- }
ASSERT(ring->rd < ring->max_items);
/* convert items to bytes : available_len must be 32bits */
*available_len = (uint32)(items * ring->item_len);
-#ifndef CUSTOMER_HW_31_2
- /* cannot use this since the dma ring is allocated as uncached,
- * this will cause an assertation
- */
OSL_CACHE_INV(read_addr, *available_len);
-#endif
+
/* return read address */
return read_addr;
} /* dhd_prot_get_read_addr */
+
+/**
+ * dhd_prot_h2d_mbdata_send_ctrlmsg is a non-atomic function,
+ * make sure the callers always hold appropriate locks.
+ */
+int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data)
+{
+ h2d_mailbox_data_t *h2d_mb_data;
+ uint16 alloced = 0;
+ msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
+ unsigned long flags;
+ int num_post = 1;
+ int i;
+
+ DHD_INFO(("%s Sending H2D MB data Req data 0x%04x\n",
+ __FUNCTION__, mb_data));
+ if (!ctrl_ring->inited) {
+ DHD_ERROR(("%s: Ctrl Submit Ring: not inited\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ for (i = 0; i < num_post; i ++) {
+ DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
+ /* Request for ring buffer space */
+ h2d_mb_data = (h2d_mailbox_data_t *)dhd_prot_alloc_ring_space(dhd,
+ ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
+ &alloced, FALSE);
+
+ if (h2d_mb_data == NULL) {
+ DHD_ERROR(("%s: FATAL: No space in control ring to send H2D Mb data\n",
+ __FUNCTION__));
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+ return BCME_NOMEM;
+ }
+
+ memset(h2d_mb_data, 0, sizeof(h2d_mailbox_data_t));
+ /* Common msg buf hdr */
+ h2d_mb_data->msg.msg_type = MSG_TYPE_H2D_MAILBOX_DATA;
+ h2d_mb_data->msg.flags = ctrl_ring->current_phase;
+
+ h2d_mb_data->msg.epoch =
+ ctrl_ring->seqnum % H2D_EPOCH_MODULO;
+ ctrl_ring->seqnum++;
+
+ /* Update flow create message */
+ h2d_mb_data->mail_box_data = htol32(mb_data);
+ {
+ h2d_mb_data->mail_box_data = htol32(mb_data);
+ }
+
+ DHD_INFO(("%s Send H2D MB data Req data 0x%04x\n", __FUNCTION__, mb_data));
+
+ /* upd wrt ptr and raise interrupt */
+ dhd_prot_ring_write_complete_mbdata(dhd, ctrl_ring, h2d_mb_data,
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, mb_data);
+
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+
+ }
+ return 0;
+}
/** Creates a flow ring and informs dongle of this event */
int
@@ -5785,6 +9324,7 @@
unsigned long flags;
uint16 alloced = 0;
msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
+ uint16 max_flowrings = dhd->bus->max_tx_flowrings;
/* Fetch a pre-initialized msgbuf_ring from the flowring pool */
flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
@@ -5794,7 +9334,7 @@
return BCME_NOMEM;
}
- DHD_GENERAL_LOCK(dhd, flags);
+ DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
/* Request for ctrl_ring buffer space */
flow_create_rqst = (tx_flowring_create_request_t *)
@@ -5804,7 +9344,7 @@
dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n",
__FUNCTION__, flow_ring_node->flowid));
- DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
return BCME_NOMEM;
}
@@ -5814,6 +9354,7 @@
flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE;
flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
flow_create_rqst->msg.request_id = htol32(0); /* TBD */
+ flow_create_rqst->msg.flags = ctrl_ring->current_phase;
flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
ctrl_ring->seqnum++;
@@ -5826,17 +9367,47 @@
/* CAUTION: ring::base_addr already in Little Endian */
flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr;
flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr;
- flow_create_rqst->max_items = htol16(H2DRING_TXPOST_MAX_ITEM);
+ flow_create_rqst->max_items = htol16(prot->h2d_max_txpost);
flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
+ flow_create_rqst->if_flags = 0;
+
+#ifdef DHD_HP2P
+ /* Create HPP flow ring if HP2P is enabled and TID=7 and AWDL interface */
+ /* and traffic is not multicast */
+ /* Allow infra interface only if user enabled hp2p_infra_enable thru iovar */
+ /* Allow only one HP2P Flow active at a time */
+ if (dhd->hp2p_capable && !dhd->hp2p_ring_active &&
+ flow_ring_node->flow_info.tid == HP2P_PRIO &&
+ (dhd->hp2p_infra_enable || flow_create_rqst->msg.if_id) &&
+ !ETHER_ISMULTI(flow_create_rqst->da)) {
+ flow_create_rqst->if_flags |= BCMPCIE_FLOW_RING_INTF_HP2P;
+ flow_ring_node->hp2p_ring = TRUE;
+ dhd->hp2p_ring_active = TRUE;
+
+ DHD_ERROR(("%s: flow ring for HP2P tid = %d flowid = %d\n",
+ __FUNCTION__, flow_ring_node->flow_info.tid,
+ flow_ring_node->flowid));
+ }
+#endif /* DHD_HP2P */
+
+ /* definition for ifrm mask : bit0:d11ac core, bit1:d11ad core
+ * currently it is not used for priority. so uses solely for ifrm mask
+ */
+ if (IFRM_ACTIVE(dhd))
+ flow_create_rqst->priority_ifrmmask = (1 << IFRM_DEV_0);
+
DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG
" prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
flow_ring_node->flow_info.ifindex));
/* Update the flow_ring's WRITE index */
- if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
+ if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
dhd_prot_dma_indx_set(dhd, flow_ring->wr,
- H2D_DMA_INDX_WR_UPD, flow_ring->idx);
+ H2D_DMA_INDX_WR_UPD, flow_ring->idx);
+ } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(flow_ring->idx, max_flowrings)) {
+ dhd_prot_dma_indx_set(dhd, flow_ring->wr,
+ H2D_IFRM_INDX_WR_UPD, flow_ring->idx);
} else {
dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
sizeof(uint16), RING_WR_UPD, flow_ring->idx);
@@ -5845,7 +9416,7 @@
/* update control subn ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1);
- DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
return BCME_OK;
} /* dhd_prot_flow_ring_create */
@@ -5865,6 +9436,143 @@
ltoh16(flow_create_resp->cmplt.status));
}
+static void
+dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf)
+{
+ h2d_ring_create_response_t *resp = (h2d_ring_create_response_t *)buf;
+ DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
+ ltoh16(resp->cmplt.status),
+ ltoh16(resp->cmplt.ring_id),
+ ltoh32(resp->cmn_hdr.request_id)));
+ if ((ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_DBGRING_REQ_PKTID) &&
+ (ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_BTLOGRING_REQ_PKTID)) {
+ DHD_ERROR(("invalid request ID with h2d ring create complete\n"));
+ return;
+ }
+ if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id) &&
+ !dhd->prot->h2dring_info_subn->create_pending) {
+ DHD_ERROR(("info ring create status for not pending submit ring\n"));
+ }
+
+ if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
+ DHD_ERROR(("info/btlog ring create failed with status %d\n",
+ ltoh16(resp->cmplt.status)));
+ return;
+ }
+ if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id)) {
+ dhd->prot->h2dring_info_subn->create_pending = FALSE;
+ dhd->prot->h2dring_info_subn->inited = TRUE;
+ DHD_ERROR(("info buffer post after ring create\n"));
+ dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
+ }
+}
+
+static void
+dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf)
+{
+ d2h_ring_create_response_t *resp = (d2h_ring_create_response_t *)buf;
+ DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
+ ltoh16(resp->cmplt.status),
+ ltoh16(resp->cmplt.ring_id),
+ ltoh32(resp->cmn_hdr.request_id)));
+ if ((ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_DBGRING_REQ_PKTID) &&
+ (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_BTLOGRING_REQ_PKTID) &&
+#ifdef DHD_HP2P
+ (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_TXREQ_PKTID) &&
+ (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_RXREQ_PKTID) &&
+#endif /* DHD_HP2P */
+ TRUE) {
+ DHD_ERROR(("invalid request ID with d2h ring create complete\n"));
+ return;
+ }
+ if (ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_DBGRING_REQ_PKTID) {
+#ifdef EWP_EDL
+ if (!dhd->dongle_edl_support)
+#endif // endif
+ {
+ if (!dhd->prot->d2hring_info_cpln->create_pending) {
+ DHD_ERROR(("info ring create status for not pending cpl ring\n"));
+ return;
+ }
+
+ if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
+ DHD_ERROR(("info cpl ring create failed with status %d\n",
+ ltoh16(resp->cmplt.status)));
+ return;
+ }
+ dhd->prot->d2hring_info_cpln->create_pending = FALSE;
+ dhd->prot->d2hring_info_cpln->inited = TRUE;
+ }
+#ifdef EWP_EDL
+ else {
+ if (!dhd->prot->d2hring_edl->create_pending) {
+ DHD_ERROR(("edl ring create status for not pending cpl ring\n"));
+ return;
+ }
+
+ if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
+ DHD_ERROR(("edl cpl ring create failed with status %d\n",
+ ltoh16(resp->cmplt.status)));
+ return;
+ }
+ dhd->prot->d2hring_edl->create_pending = FALSE;
+ dhd->prot->d2hring_edl->inited = TRUE;
+ }
+#endif /* EWP_EDL */
+ }
+
+#ifdef DHD_HP2P
+ if (dhd->prot->d2hring_hp2p_txcpl &&
+ ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_TXREQ_PKTID) {
+ if (!dhd->prot->d2hring_hp2p_txcpl->create_pending) {
+ DHD_ERROR(("HPP tx ring create status for not pending cpl ring\n"));
+ return;
+ }
+
+ if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
+ DHD_ERROR(("HPP tx cpl ring create failed with status %d\n",
+ ltoh16(resp->cmplt.status)));
+ return;
+ }
+ dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
+ dhd->prot->d2hring_hp2p_txcpl->inited = TRUE;
+ }
+ if (dhd->prot->d2hring_hp2p_rxcpl &&
+ ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_RXREQ_PKTID) {
+ if (!dhd->prot->d2hring_hp2p_rxcpl->create_pending) {
+ DHD_ERROR(("HPP rx ring create status for not pending cpl ring\n"));
+ return;
+ }
+
+ if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
+ DHD_ERROR(("HPP rx cpl ring create failed with status %d\n",
+ ltoh16(resp->cmplt.status)));
+ return;
+ }
+ dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
+ dhd->prot->d2hring_hp2p_rxcpl->inited = TRUE;
+ }
+#endif /* DHD_HP2P */
+}
+
+static void
+dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf)
+{
+ d2h_mailbox_data_t *d2h_data;
+
+ d2h_data = (d2h_mailbox_data_t *)buf;
+ DHD_INFO(("%s dhd_prot_process_d2h_mb_data, 0x%04x\n", __FUNCTION__,
+ d2h_data->d2h_mailbox_data));
+ dhd_bus_handle_mb_data(dhd->bus, d2h_data->d2h_mailbox_data);
+}
+
+static void
+dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf)
+{
+ DHD_ERROR(("Timesunc feature not compiled in but GOT HOST_TS_COMPLETE\n"));
+
+}
+
/** called on e.g. flow ring delete */
void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info)
{
@@ -5876,37 +9584,89 @@
void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info,
struct bcmstrbuf *strbuf, const char * fmt)
{
- const char *default_fmt = "RD %d WR %d\n";
+ const char *default_fmt =
+ "RD %d WR %d BASE(VA) %p BASE(PA) %x:%x SIZE %d "
+ "WORK_ITEM_SIZE %d MAX_WORK_ITEMS %d TOTAL_SIZE %d\n";
msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
uint16 rd, wr;
+ uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len;
if (fmt == NULL) {
fmt = default_fmt;
}
+
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("%s: Skip dumping flowring due to Link down\n", __FUNCTION__));
+ return;
+ }
+
dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
- bcm_bprintf(strbuf, fmt, rd, wr);
+ bcm_bprintf(strbuf, fmt, rd, wr, flow_ring->dma_buf.va,
+ ltoh32(flow_ring->base_addr.high_addr),
+ ltoh32(flow_ring->base_addr.low_addr),
+ flow_ring->item_len, flow_ring->max_items,
+ dma_buf_len);
}
void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
{
dhd_prot_t *prot = dhd->prot;
- bcm_bprintf(strbuf, "CtrlPost: ");
- dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, strbuf, NULL);
- bcm_bprintf(strbuf, "CtrlCpl: ");
- dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, strbuf, NULL);
+ bcm_bprintf(strbuf, "IPCrevs: Dev %d, \t Host %d, \tactive %d\n",
+ dhd->prot->device_ipc_version,
+ dhd->prot->host_ipc_version,
+ dhd->prot->active_ipc_version);
- bcm_bprintf(strbuf, "RxPost: ");
- bcm_bprintf(strbuf, "RBP %d ", prot->rxbufpost);
- dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, strbuf, NULL);
- bcm_bprintf(strbuf, "RxCpl: ");
- dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, strbuf, NULL);
+ bcm_bprintf(strbuf, "max Host TS bufs to post: %d, \t posted %d \n",
+ dhd->prot->max_tsbufpost, dhd->prot->cur_ts_bufs_posted);
+ bcm_bprintf(strbuf, "max INFO bufs to post: %d, \t posted %d \n",
+ dhd->prot->max_infobufpost, dhd->prot->infobufpost);
+ bcm_bprintf(strbuf, "max event bufs to post: %d, \t posted %d \n",
+ dhd->prot->max_eventbufpost, dhd->prot->cur_event_bufs_posted);
+ bcm_bprintf(strbuf, "max ioctlresp bufs to post: %d, \t posted %d \n",
+ dhd->prot->max_ioctlrespbufpost, dhd->prot->cur_ioctlresp_bufs_posted);
+ bcm_bprintf(strbuf, "max RX bufs to post: %d, \t posted %d \n",
+ dhd->prot->max_rxbufpost, dhd->prot->rxbufpost);
- bcm_bprintf(strbuf, "TxCpl: ");
- dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, strbuf, NULL);
- bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail %d\n",
- dhd->prot->active_tx_count,
- DHD_PKTID_AVAIL(dhd->prot->pktid_map_handle));
+ bcm_bprintf(strbuf,
+ "%14s %5s %5s %17s %17s %14s %14s %10s\n",
+ "Type", "RD", "WR", "BASE(VA)", "BASE(PA)",
+ "WORK_ITEM_SIZE", "MAX_WORK_ITEMS", "TOTAL_SIZE");
+ bcm_bprintf(strbuf, "%14s", "H2DCtrlPost");
+ dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, strbuf,
+ " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
+ bcm_bprintf(strbuf, "%14s", "D2HCtrlCpl");
+ dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, strbuf,
+ " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
+ bcm_bprintf(strbuf, "%14s", "H2DRxPost", prot->rxbufpost);
+ dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, strbuf,
+ " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
+ bcm_bprintf(strbuf, "%14s", "D2HRxCpl");
+ dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, strbuf,
+ " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
+ bcm_bprintf(strbuf, "%14s", "D2HTxCpl");
+ dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, strbuf,
+ " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
+ if (dhd->prot->h2dring_info_subn != NULL && dhd->prot->d2hring_info_cpln != NULL) {
+ bcm_bprintf(strbuf, "%14s", "H2DRingInfoSub");
+ dhd_prot_print_flow_ring(dhd, prot->h2dring_info_subn, strbuf,
+ " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
+ bcm_bprintf(strbuf, "%14s", "D2HRingInfoCpl");
+ dhd_prot_print_flow_ring(dhd, prot->d2hring_info_cpln, strbuf,
+ " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
+ }
+ if (dhd->prot->d2hring_edl != NULL) {
+ bcm_bprintf(strbuf, "%14s", "D2HRingEDL");
+ dhd_prot_print_flow_ring(dhd, prot->d2hring_edl, strbuf,
+ " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
+ }
+
+ bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail(ctrl/rx/tx) %d %d %d\n",
+ OSL_ATOMIC_READ(dhd->osh, &dhd->prot->active_tx_count),
+ DHD_PKTID_AVAIL(dhd->prot->pktid_ctrl_map),
+ DHD_PKTID_AVAIL(dhd->prot->pktid_rx_map),
+ DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map));
+
}
int
@@ -5918,14 +9678,14 @@
uint16 alloced = 0;
msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
- DHD_GENERAL_LOCK(dhd, flags);
+ DHD_RING_LOCK(ring->ring_lock, flags);
/* Request for ring buffer space */
flow_delete_rqst = (tx_flowring_delete_request_t *)
dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (flow_delete_rqst == NULL) {
- DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__));
return BCME_NOMEM;
}
@@ -5934,6 +9694,7 @@
flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE;
flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
flow_delete_rqst->msg.request_id = htol32(0); /* TBD */
+ flow_delete_rqst->msg.flags = ring->current_phase;
flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
ring->seqnum++;
@@ -5949,9 +9710,39 @@
/* update ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1);
- DHD_GENERAL_UNLOCK(dhd, flags);
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
return BCME_OK;
+}
+
+static void BCMFASTPATH
+dhd_prot_flow_ring_fastdelete(dhd_pub_t *dhd, uint16 flowid, uint16 rd_idx)
+{
+ flow_ring_node_t *flow_ring_node = DHD_FLOW_RING(dhd, flowid);
+ msgbuf_ring_t *ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
+ host_txbuf_cmpl_t txstatus;
+ host_txbuf_post_t *txdesc;
+ uint16 wr_idx;
+
+ DHD_INFO(("%s: FAST delete ring, flowid=%d, rd_idx=%d, wr_idx=%d\n",
+ __FUNCTION__, flowid, rd_idx, ring->wr));
+
+ memset(&txstatus, 0, sizeof(txstatus));
+ txstatus.compl_hdr.flow_ring_id = flowid;
+ txstatus.cmn_hdr.if_id = flow_ring_node->flow_info.ifindex;
+ wr_idx = ring->wr;
+
+ while (wr_idx != rd_idx) {
+ if (wr_idx)
+ wr_idx--;
+ else
+ wr_idx = ring->max_items - 1;
+ txdesc = (host_txbuf_post_t *)((char *)DHD_RING_BGN_VA(ring) +
+ (wr_idx * ring->item_len));
+ txstatus.cmn_hdr.request_id = txdesc->cmn_hdr.request_id;
+ dhd_prot_txstatus_process(dhd, &txstatus);
+ }
}
static void
@@ -5962,8 +9753,49 @@
DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__,
flow_delete_resp->cmplt.status, flow_delete_resp->cmplt.flow_ring_id));
+ if (dhd->fast_delete_ring_support) {
+ dhd_prot_flow_ring_fastdelete(dhd, flow_delete_resp->cmplt.flow_ring_id,
+ flow_delete_resp->read_idx);
+ }
dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id,
flow_delete_resp->cmplt.status);
+}
+
+static void
+dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg)
+{
+#ifdef IDLE_TX_FLOW_MGMT
+ tx_idle_flowring_resume_response_t *flow_resume_resp =
+ (tx_idle_flowring_resume_response_t *)msg;
+
+ DHD_ERROR(("%s Flow resume Response status = %d Flow %d\n", __FUNCTION__,
+ flow_resume_resp->cmplt.status, flow_resume_resp->cmplt.flow_ring_id));
+
+ dhd_bus_flow_ring_resume_response(dhd->bus, flow_resume_resp->cmplt.flow_ring_id,
+ flow_resume_resp->cmplt.status);
+#endif /* IDLE_TX_FLOW_MGMT */
+}
+
+static void
+dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg)
+{
+#ifdef IDLE_TX_FLOW_MGMT
+ int16 status;
+ tx_idle_flowring_suspend_response_t *flow_suspend_resp =
+ (tx_idle_flowring_suspend_response_t *)msg;
+ status = flow_suspend_resp->cmplt.status;
+
+ DHD_ERROR(("%s Flow id %d suspend Response status = %d\n",
+ __FUNCTION__, flow_suspend_resp->cmplt.flow_ring_id,
+ status));
+
+ if (status != BCME_OK) {
+
+ DHD_ERROR(("%s Error in Suspending Flow rings!!"
+ "Dongle will still be polling idle rings!!Status = %d \n",
+ __FUNCTION__, status));
+ }
+#endif /* IDLE_TX_FLOW_MGMT */
}
int
@@ -5975,13 +9807,13 @@
uint16 alloced = 0;
msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
- DHD_GENERAL_LOCK(dhd, flags);
+ DHD_RING_LOCK(ring->ring_lock, flags);
/* Request for ring buffer space */
flow_flush_rqst = (tx_flowring_flush_request_t *)
dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (flow_flush_rqst == NULL) {
- DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__));
return BCME_NOMEM;
}
@@ -5990,7 +9822,7 @@
flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH;
flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
flow_flush_rqst->msg.request_id = htol32(0); /* TBD */
-
+ flow_flush_rqst->msg.flags = ring->current_phase;
flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
ring->seqnum++;
@@ -6001,7 +9833,8 @@
/* update ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1);
- DHD_GENERAL_UNLOCK(dhd, flags);
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
return BCME_OK;
} /* dhd_prot_flow_ring_flush */
@@ -6039,13 +9872,13 @@
const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
/* Claim space for d2h_ring number of d2h_ring_config_req_t messages */
- DHD_GENERAL_LOCK(dhd, flags);
+ DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE);
if (msg_start == NULL) {
DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n",
__FUNCTION__, d2h_rings));
- DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
return;
}
@@ -6091,17 +9924,501 @@
/* update control subn ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings);
- DHD_GENERAL_UNLOCK(dhd, flags);
+
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+
#endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
}
static void
-dhd_prot_d2h_ring_config_cmplt_process(dhd_pub_t *dhd, void *msg)
+dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg)
{
DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n",
__FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status),
ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id)));
}
+
+#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
+void
+copy_ext_trap_sig(dhd_pub_t *dhd, trap_t *tr)
+{
+ uint32 *ext_data = dhd->extended_trap_data;
+ hnd_ext_trap_hdr_t *hdr;
+ const bcm_tlv_t *tlv;
+
+ if (ext_data == NULL) {
+ return;
+ }
+ /* First word is original trap_data */
+ ext_data++;
+
+ /* Followed by the extended trap data header */
+ hdr = (hnd_ext_trap_hdr_t *)ext_data;
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
+ if (tlv) {
+ memcpy(tr, &tlv->data, sizeof(struct _trap_struct));
+ }
+}
+#define TRAP_T_NAME_OFFSET(var) {#var, OFFSETOF(trap_t, var)}
+
+typedef struct {
+ char name[HANG_INFO_TRAP_T_NAME_MAX];
+ uint32 offset;
+} hang_info_trap_t;
+
+#ifdef DHD_EWPR_VER2
+static hang_info_trap_t hang_info_trap_tbl[] = {
+ {"reason", 0},
+ {"ver", VENDOR_SEND_HANG_EXT_INFO_VER},
+ {"stype", 0},
+ TRAP_T_NAME_OFFSET(type),
+ TRAP_T_NAME_OFFSET(epc),
+ {"resrvd", 0},
+ {"resrvd", 0},
+ {"resrvd", 0},
+ {"resrvd", 0},
+ {"", 0}
+};
+#else
+static hang_info_trap_t hang_info_trap_tbl[] = {
+ {"reason", 0},
+ {"ver", VENDOR_SEND_HANG_EXT_INFO_VER},
+ {"stype", 0},
+ TRAP_T_NAME_OFFSET(type),
+ TRAP_T_NAME_OFFSET(epc),
+ TRAP_T_NAME_OFFSET(cpsr),
+ TRAP_T_NAME_OFFSET(spsr),
+ TRAP_T_NAME_OFFSET(r0),
+ TRAP_T_NAME_OFFSET(r1),
+ TRAP_T_NAME_OFFSET(r2),
+ TRAP_T_NAME_OFFSET(r3),
+ TRAP_T_NAME_OFFSET(r4),
+ TRAP_T_NAME_OFFSET(r5),
+ TRAP_T_NAME_OFFSET(r6),
+ TRAP_T_NAME_OFFSET(r7),
+ TRAP_T_NAME_OFFSET(r8),
+ TRAP_T_NAME_OFFSET(r9),
+ TRAP_T_NAME_OFFSET(r10),
+ TRAP_T_NAME_OFFSET(r11),
+ TRAP_T_NAME_OFFSET(r12),
+ TRAP_T_NAME_OFFSET(r13),
+ TRAP_T_NAME_OFFSET(r14),
+ TRAP_T_NAME_OFFSET(pc),
+ {"", 0}
+};
+#endif /* DHD_EWPR_VER2 */
+
+#define TAG_TRAP_IS_STATE(tag) \
+ ((tag == TAG_TRAP_MEMORY) || (tag == TAG_TRAP_PCIE_Q) || \
+ (tag == TAG_TRAP_WLC_STATE) || (tag == TAG_TRAP_LOG_DATA) || \
+ (tag == TAG_TRAP_CODE))
+
+static void
+copy_hang_info_head(char *dest, trap_t *src, int len, int field_name,
+ int *bytes_written, int *cnt, char *cookie)
+{
+ uint8 *ptr;
+ int remain_len;
+ int i;
+
+ ptr = (uint8 *)src;
+
+ memset(dest, 0, len);
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+
+ /* hang reason, hang info ver */
+ for (i = 0; (i < HANG_INFO_TRAP_T_SUBTYPE_IDX) && (*cnt < HANG_FIELD_CNT_MAX);
+ i++, (*cnt)++) {
+ if (field_name) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
+ hang_info_trap_tbl[i].name, HANG_KEY_DEL);
+ }
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%d%c",
+ hang_info_trap_tbl[i].offset, HANG_KEY_DEL);
+
+ }
+
+ if (*cnt < HANG_FIELD_CNT_MAX) {
+ if (field_name) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
+ "cookie", HANG_KEY_DEL);
+ }
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s%c",
+ cookie, HANG_KEY_DEL);
+ (*cnt)++;
+ }
+
+ if (*cnt < HANG_FIELD_CNT_MAX) {
+ if (field_name) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
+ hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].name,
+ HANG_KEY_DEL);
+ }
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%08x%c",
+ hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].offset,
+ HANG_KEY_DEL);
+ (*cnt)++;
+ }
+
+ if (*cnt < HANG_FIELD_CNT_MAX) {
+ if (field_name) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
+ hang_info_trap_tbl[HANG_INFO_TRAP_T_EPC_IDX].name,
+ HANG_KEY_DEL);
+ }
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%08x%c",
+ *(uint32 *)
+ (ptr + hang_info_trap_tbl[HANG_INFO_TRAP_T_EPC_IDX].offset),
+ HANG_KEY_DEL);
+ (*cnt)++;
+ }
+#ifdef DHD_EWPR_VER2
+ /* put 0 for HG03 ~ HG06 (reserved for future use) */
+ for (i = 0; (i < HANG_INFO_BIGDATA_EXTRA_KEY) && (*cnt < HANG_FIELD_CNT_MAX);
+ i++, (*cnt)++) {
+ if (field_name) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
+ hang_info_trap_tbl[HANG_INFO_TRAP_T_EXTRA_KEY_IDX+i].name,
+ HANG_KEY_DEL);
+ }
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%d%c",
+ hang_info_trap_tbl[HANG_INFO_TRAP_T_EXTRA_KEY_IDX+i].offset,
+ HANG_KEY_DEL);
+ }
+#endif /* DHD_EWPR_VER2 */
+}
+#ifndef DHD_EWPR_VER2
+static void
+copy_hang_info_trap_t(char *dest, trap_t *src, int len, int field_name,
+ int *bytes_written, int *cnt, char *cookie)
+{
+ uint8 *ptr;
+ int remain_len;
+ int i;
+
+ ptr = (uint8 *)src;
+
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+
+ for (i = HANG_INFO_TRAP_T_OFFSET_IDX;
+ (hang_info_trap_tbl[i].name[0] != 0) && (*cnt < HANG_FIELD_CNT_MAX);
+ i++, (*cnt)++) {
+ if (field_name) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%s:",
+ HANG_RAW_DEL, hang_info_trap_tbl[i].name);
+ }
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
+ HANG_RAW_DEL, *(uint32 *)(ptr + hang_info_trap_tbl[i].offset));
+ }
+}
+
+/* Ignore compiler warnings due to -Werror=cast-qual */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+
+static void
+copy_hang_info_stack(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
+{
+ int remain_len;
+ int i = 0;
+ const uint32 *stack;
+ uint32 *ext_data = dhd->extended_trap_data;
+ hnd_ext_trap_hdr_t *hdr;
+ const bcm_tlv_t *tlv;
+ int remain_stack_cnt = 0;
+ uint32 dummy_data = 0;
+ int bigdata_key_stack_cnt = 0;
+
+ if (ext_data == NULL) {
+ return;
+ }
+ /* First word is original trap_data */
+ ext_data++;
+
+ /* Followed by the extended trap data header */
+ hdr = (hnd_ext_trap_hdr_t *)ext_data;
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
+
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+
+ if (tlv) {
+ stack = (const uint32 *)tlv->data;
+
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len,
+ "%08x", *(uint32 *)(stack++));
+ (*cnt)++;
+ if (*cnt >= HANG_FIELD_CNT_MAX) {
+ return;
+ }
+ for (i = 1; i < (uint32)(tlv->len / sizeof(uint32)); i++, bigdata_key_stack_cnt++) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ /* Raw data for bigdata use '_' and Key data for bigdata use space */
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len,
+ "%c%08x",
+ i <= HANG_INFO_BIGDATA_KEY_STACK_CNT ? HANG_KEY_DEL : HANG_RAW_DEL,
+ *(uint32 *)(stack++));
+
+ (*cnt)++;
+ if ((*cnt >= HANG_FIELD_CNT_MAX) ||
+ (i >= HANG_FIELD_TRAP_T_STACK_CNT_MAX)) {
+ return;
+ }
+ }
+ }
+
+ remain_stack_cnt = HANG_FIELD_TRAP_T_STACK_CNT_MAX - i;
+
+ for (i = 0; i < remain_stack_cnt; i++) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
+ HANG_RAW_DEL, dummy_data);
+ (*cnt)++;
+ if (*cnt >= HANG_FIELD_CNT_MAX) {
+ return;
+ }
+ }
+
+}
+
+static void
+copy_hang_info_specific(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
+{
+ int remain_len;
+ int i;
+ const uint32 *data;
+ uint32 *ext_data = dhd->extended_trap_data;
+ hnd_ext_trap_hdr_t *hdr;
+ const bcm_tlv_t *tlv;
+ int remain_trap_data = 0;
+ uint8 buf_u8[sizeof(uint32)] = { 0, };
+ const uint8 *p_u8;
+
+ if (ext_data == NULL) {
+ return;
+ }
+ /* First word is original trap_data */
+ ext_data++;
+
+ /* Followed by the extended trap data header */
+ hdr = (hnd_ext_trap_hdr_t *)ext_data;
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
+ if (tlv) {
+ /* header include tlv hader */
+ remain_trap_data = (hdr->len - tlv->len - sizeof(uint16));
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
+ if (tlv) {
+ /* header include tlv hader */
+ remain_trap_data -= (tlv->len + sizeof(uint16));
+ }
+
+ data = (const uint32 *)(hdr->data + (hdr->len - remain_trap_data));
+
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+
+ for (i = 0; i < (uint32)(remain_trap_data / sizeof(uint32)) && *cnt < HANG_FIELD_CNT_MAX;
+ i++, (*cnt)++) {
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
+ HANG_RAW_DEL, *(uint32 *)(data++));
+ }
+
+ if (*cnt >= HANG_FIELD_CNT_MAX) {
+ return;
+ }
+
+ remain_trap_data -= (sizeof(uint32) * i);
+
+ if (remain_trap_data > sizeof(buf_u8)) {
+ DHD_ERROR(("%s: resize remain_trap_data\n", __FUNCTION__));
+ remain_trap_data = sizeof(buf_u8);
+ }
+
+ if (remain_trap_data) {
+ p_u8 = (const uint8 *)data;
+ for (i = 0; i < remain_trap_data; i++) {
+ buf_u8[i] = *(const uint8 *)(p_u8++);
+ }
+
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
+ HANG_RAW_DEL, ltoh32_ua(buf_u8));
+ (*cnt)++;
+ }
+}
+#endif /* DHD_EWPR_VER2 */
+
+static void
+get_hang_info_trap_subtype(dhd_pub_t *dhd, uint32 *subtype)
+{
+ uint32 i;
+ uint32 *ext_data = dhd->extended_trap_data;
+ hnd_ext_trap_hdr_t *hdr;
+ const bcm_tlv_t *tlv;
+
+ /* First word is original trap_data */
+ ext_data++;
+
+ /* Followed by the extended trap data header */
+ hdr = (hnd_ext_trap_hdr_t *)ext_data;
+
+ /* Dump a list of all tags found before parsing data */
+ for (i = TAG_TRAP_DEEPSLEEP; i < TAG_TRAP_LAST; i++) {
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
+ if (tlv) {
+ if (!TAG_TRAP_IS_STATE(i)) {
+ *subtype = i;
+ return;
+ }
+ }
+ }
+}
+#ifdef DHD_EWPR_VER2
+static void
+copy_hang_info_etd_base64(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
+{
+ int remain_len;
+ uint32 *ext_data = dhd->extended_trap_data;
+ hnd_ext_trap_hdr_t *hdr;
+ char *base64_out = NULL;
+ int base64_cnt;
+ int max_base64_len = HANG_INFO_BASE64_BUFFER_SIZE;
+
+ if (ext_data == NULL) {
+ return;
+ }
+ /* First word is original trap_data */
+ ext_data++;
+
+ /* Followed by the extended trap data header */
+ hdr = (hnd_ext_trap_hdr_t *)ext_data;
+
+ remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
+
+ if (remain_len <= 0) {
+ DHD_ERROR(("%s: no space to put etd\n", __FUNCTION__));
+ return;
+ }
+
+ if (remain_len < max_base64_len) {
+ DHD_ERROR(("%s: change max base64 length to remain length %d\n", __FUNCTION__,
+ remain_len));
+ max_base64_len = remain_len;
+ }
+
+ base64_out = MALLOCZ(dhd->osh, HANG_INFO_BASE64_BUFFER_SIZE);
+ if (base64_out == NULL) {
+ DHD_ERROR(("%s: MALLOC failed for size %d\n",
+ __FUNCTION__, HANG_INFO_BASE64_BUFFER_SIZE));
+ return;
+ }
+
+ if (hdr->len > 0) {
+ base64_cnt = dhd_base64_encode(hdr->data, hdr->len, base64_out, max_base64_len);
+ if (base64_cnt == 0) {
+ DHD_ERROR(("%s: base64 encoding error\n", __FUNCTION__));
+ }
+ }
+
+ *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s",
+ base64_out);
+ (*cnt)++;
+ MFREE(dhd->osh, base64_out, HANG_INFO_BASE64_BUFFER_SIZE);
+}
+#endif /* DHD_EWPR_VER2 */
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+
+void
+copy_hang_info_trap(dhd_pub_t *dhd)
+{
+ trap_t tr;
+ int bytes_written;
+ int trap_subtype = 0;
+
+ if (!dhd || !dhd->hang_info) {
+ DHD_ERROR(("%s dhd=%p hang_info=%p\n", __FUNCTION__,
+ dhd, (dhd ? dhd->hang_info : NULL)));
+ return;
+ }
+
+ if (!dhd->dongle_trap_occured) {
+ DHD_ERROR(("%s: dongle_trap_occured is FALSE\n", __FUNCTION__));
+ return;
+ }
+
+ memset(&tr, 0x00, sizeof(struct _trap_struct));
+
+ copy_ext_trap_sig(dhd, &tr);
+ get_hang_info_trap_subtype(dhd, &trap_subtype);
+
+ hang_info_trap_tbl[HANG_INFO_TRAP_T_REASON_IDX].offset = HANG_REASON_DONGLE_TRAP;
+ hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].offset = trap_subtype;
+
+ bytes_written = 0;
+ dhd->hang_info_cnt = 0;
+ get_debug_dump_time(dhd->debug_dump_time_hang_str);
+ copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
+
+ copy_hang_info_head(dhd->hang_info, &tr, VENDOR_SEND_HANG_EXT_INFO_LEN, FALSE,
+ &bytes_written, &dhd->hang_info_cnt, dhd->debug_dump_time_hang_str);
+
+ DHD_INFO(("hang info haed cnt: %d len: %d data: %s\n",
+ dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
+
+ clear_debug_dump_time(dhd->debug_dump_time_hang_str);
+
+#ifdef DHD_EWPR_VER2
+ /* stack info & trap info are included in etd data */
+
+ /* extended trap data dump */
+ if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
+ copy_hang_info_etd_base64(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
+ DHD_INFO(("hang info specific cnt: %d len: %d data: %s\n",
+ dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
+ }
+#else
+ if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
+ copy_hang_info_stack(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
+ DHD_INFO(("hang info stack cnt: %d len: %d data: %s\n",
+ dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
+ }
+
+ if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
+ copy_hang_info_trap_t(dhd->hang_info, &tr, VENDOR_SEND_HANG_EXT_INFO_LEN, FALSE,
+ &bytes_written, &dhd->hang_info_cnt, dhd->debug_dump_time_hang_str);
+ DHD_INFO(("hang info trap_t cnt: %d len: %d data: %s\n",
+ dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
+ }
+
+ if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
+ copy_hang_info_specific(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
+ DHD_INFO(("hang info specific cnt: %d len: %d data: %s\n",
+ dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
+ }
+#endif /* DHD_EWPR_VER2 */
+}
+#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
int
dhd_prot_debug_info_print(dhd_pub_t *dhd)
@@ -6109,93 +10426,266 @@
dhd_prot_t *prot = dhd->prot;
msgbuf_ring_t *ring;
uint16 rd, wr;
- uint32 intstatus = 0;
- uint32 intmask = 0;
- uint32 mbintstatus = 0;
- uint32 d2h_mb_data = 0;
uint32 dma_buf_len;
+ uint64 current_time;
+ ulong ring_tcm_rd_addr; /* dongle address */
+ ulong ring_tcm_wr_addr; /* dongle address */
+
+ DHD_ERROR(("\n ------- DUMPING VERSION INFORMATION ------- \r\n"));
+ DHD_ERROR(("DHD: %s\n", dhd_version));
+ DHD_ERROR(("Firmware: %s\n", fw_version));
+
+ DHD_ERROR(("\n ------- DUMPING PROTOCOL INFORMATION ------- \r\n"));
+ DHD_ERROR(("ICPrevs: Dev %d, Host %d, active %d\n",
+ prot->device_ipc_version,
+ prot->host_ipc_version,
+ prot->active_ipc_version));
+ DHD_ERROR(("d2h_intr_method -> %s\n",
+ dhd->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX"));
+ DHD_ERROR(("max Host TS bufs to post: %d, posted %d\n",
+ prot->max_tsbufpost, prot->cur_ts_bufs_posted));
+ DHD_ERROR(("max INFO bufs to post: %d, posted %d\n",
+ prot->max_infobufpost, prot->infobufpost));
+ DHD_ERROR(("max event bufs to post: %d, posted %d\n",
+ prot->max_eventbufpost, prot->cur_event_bufs_posted));
+ DHD_ERROR(("max ioctlresp bufs to post: %d, posted %d\n",
+ prot->max_ioctlrespbufpost, prot->cur_ioctlresp_bufs_posted));
+ DHD_ERROR(("max RX bufs to post: %d, posted %d\n",
+ prot->max_rxbufpost, prot->rxbufpost));
+ DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
+ h2d_max_txpost, prot->h2d_max_txpost));
+
+ current_time = OSL_LOCALTIME_NS();
+ DHD_ERROR(("current_time="SEC_USEC_FMT"\n", GET_SEC_USEC(current_time)));
+ DHD_ERROR(("ioctl_fillup_time="SEC_USEC_FMT
+ " ioctl_ack_time="SEC_USEC_FMT
+ " ioctl_cmplt_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(prot->ioctl_fillup_time),
+ GET_SEC_USEC(prot->ioctl_ack_time),
+ GET_SEC_USEC(prot->ioctl_cmplt_time)));
+
+ /* Check PCIe INT registers */
+ if (!dhd_pcie_dump_int_regs(dhd)) {
+ DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
+ dhd->bus->is_linkdown = TRUE;
+ }
DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n"));
ring = &prot->h2dring_ctrl_subn;
dma_buf_len = ring->max_items * ring->item_len;
- DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
+ DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
- ltoh32(ring->base_addr.low_addr), dma_buf_len));
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
- dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
- dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
- DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("CtrlPost: From Shared Mem: RD and WR are invalid"
+ " due to PCIe link down\r\n"));
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+ DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
+ }
+ DHD_ERROR(("CtrlPost: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
ring = &prot->d2hring_ctrl_cpln;
dma_buf_len = ring->max_items * ring->item_len;
- DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
+ DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
- ltoh32(ring->base_addr.low_addr), dma_buf_len));
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
- dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
- dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
- DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
- DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum));
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("CtrlCpl: From Shared Mem: RD and WR are invalid"
+ " due to PCIe link down\r\n"));
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+ DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
+ }
+ DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
- intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
- intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
- mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0);
- dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
+ ring = prot->h2dring_info_subn;
+ if (ring) {
+ dma_buf_len = ring->max_items * ring->item_len;
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
+ DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
+ ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
+ dma_buf_len));
+ DHD_ERROR(("InfoSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("InfoSub: From Shared Mem: RD and WR are invalid"
+ " due to PCIe link down\r\n"));
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+ DHD_ERROR(("InfoSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
+ }
+ DHD_ERROR(("InfoSub: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
+ }
+ ring = prot->d2hring_info_cpln;
+ if (ring) {
+ dma_buf_len = ring->max_items * ring->item_len;
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
+ DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
+ ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
+ dma_buf_len));
+ DHD_ERROR(("InfoCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("InfoCpl: From Shared Mem: RD and WR are invalid"
+ " due to PCIe link down\r\n"));
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+ DHD_ERROR(("InfoCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
+ }
+ DHD_ERROR(("InfoCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
+ }
- DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
- DHD_ERROR(("intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n,",
- intstatus, intmask, mbintstatus));
- DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data, dhd->bus->def_intmask));
+ ring = &prot->d2hring_tx_cpln;
+ if (ring) {
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
+ dma_buf_len = ring->max_items * ring->item_len;
+ DHD_ERROR(("TxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
+ ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
+ dma_buf_len));
+ DHD_ERROR(("TxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("TxCpl: From Shared Mem: RD and WR are invalid"
+ " due to PCIe link down\r\n"));
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+ DHD_ERROR(("TxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
+ }
+ DHD_ERROR(("TxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
+ }
+
+ ring = &prot->d2hring_rx_cpln;
+ if (ring) {
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
+ dma_buf_len = ring->max_items * ring->item_len;
+ DHD_ERROR(("RxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
+ ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
+ dma_buf_len));
+ DHD_ERROR(("RxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("RxCpl: From Shared Mem: RD and WR are invalid"
+ " due to PCIe link down\r\n"));
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+ DHD_ERROR(("RxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
+ }
+ DHD_ERROR(("RxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
+ }
+#ifdef EWP_EDL
+ ring = prot->d2hring_edl;
+ if (ring) {
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
+ dma_buf_len = ring->max_items * ring->item_len;
+ DHD_ERROR(("EdlRing: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
+ ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
+ dma_buf_len));
+ DHD_ERROR(("EdlRing: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("EdlRing: From Shared Mem: RD and WR are invalid"
+ " due to PCIe link down\r\n"));
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+ DHD_ERROR(("EdlRing: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
+ }
+ DHD_ERROR(("EdlRing: Expected seq num: %d \r\n",
+ ring->seqnum % D2H_EPOCH_MODULO));
+ }
+#endif /* EWP_EDL */
+
+ DHD_ERROR(("%s: cur_ioctlresp_bufs_posted %d cur_event_bufs_posted %d\n",
+ __FUNCTION__, prot->cur_ioctlresp_bufs_posted, prot->cur_event_bufs_posted));
+#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
+ DHD_ERROR(("%s: multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
+ __FUNCTION__, dhd->multi_client_flow_rings, dhd->max_multi_client_flow_rings));
+#endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
+
+ DHD_ERROR(("pktid_txq_start_cnt: %d\n", prot->pktid_txq_start_cnt));
+ DHD_ERROR(("pktid_txq_stop_cnt: %d\n", prot->pktid_txq_stop_cnt));
+ DHD_ERROR(("pktid_depleted_cnt: %d\n", prot->pktid_depleted_cnt));
+
+ dhd_pcie_debug_info_dump(dhd);
return 0;
}
-
int
dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
{
uint32 *ptr;
uint32 value;
- uint32 i;
- uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
- OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
- dhd->prot->d2h_dma_indx_wr_buf.len);
+ if (dhd->prot->d2h_dma_indx_wr_buf.va) {
+ uint32 i;
+ uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
- ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
+ OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
+ dhd->prot->d2h_dma_indx_wr_buf.len);
- bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues);
+ ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
- bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr);
- value = ltoh32(*ptr);
- bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
- ptr++;
- value = ltoh32(*ptr);
- bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
+ bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues);
- ptr++;
- bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr);
- for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
+ bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr);
value = ltoh32(*ptr);
- bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
+ bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
ptr++;
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
+
+ ptr++;
+ bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr);
+ for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
+ ptr++;
+ }
}
- OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
- dhd->prot->h2d_dma_indx_rd_buf.len);
+ if (dhd->prot->h2d_dma_indx_rd_buf.va) {
+ OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
+ dhd->prot->h2d_dma_indx_rd_buf.len);
- ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
+ ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
- bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr);
- value = ltoh32(*ptr);
- bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
- ptr++;
- value = ltoh32(*ptr);
- bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
- ptr++;
- value = ltoh32(*ptr);
- bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
+ bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr);
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
+ ptr++;
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
+ ptr++;
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
+ }
return 0;
}
@@ -6206,7 +10696,7 @@
dhd_prot_t *prot = dhd->prot;
#if DHD_DBG_SHOW_METADATA
prot->metadata_dbg = val;
-#endif
+#endif // endif
return (uint32)prot->metadata_dbg;
}
@@ -6331,3 +10821,910 @@
}
#endif /* DHD_RX_CHAINING */
+
+#ifdef IDLE_TX_FLOW_MGMT
+int
+dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
+{
+ tx_idle_flowring_resume_request_t *flow_resume_rqst;
+ msgbuf_ring_t *flow_ring;
+ dhd_prot_t *prot = dhd->prot;
+ unsigned long flags;
+ uint16 alloced = 0;
+ msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
+
+ /* Fetch a pre-initialized msgbuf_ring from the flowring pool */
+ flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
+ if (flow_ring == NULL) {
+ DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
+ __FUNCTION__, flow_ring_node->flowid));
+ return BCME_NOMEM;
+ }
+
+ DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
+
+ /* Request for ctrl_ring buffer space */
+ flow_resume_rqst = (tx_idle_flowring_resume_request_t *)
+ dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
+
+ if (flow_resume_rqst == NULL) {
+ dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
+ DHD_ERROR(("%s: Flow resume Req flowid %d - failure ring space\n",
+ __FUNCTION__, flow_ring_node->flowid));
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+ return BCME_NOMEM;
+ }
+
+ flow_ring_node->prot_info = (void *)flow_ring;
+
+ /* Common msg buf hdr */
+ flow_resume_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_RESUME;
+ flow_resume_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
+ flow_resume_rqst->msg.request_id = htol32(0); /* TBD */
+
+ flow_resume_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
+ ctrl_ring->seqnum++;
+
+ flow_resume_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
+ DHD_ERROR(("%s Send Flow resume Req flow ID %d\n",
+ __FUNCTION__, flow_ring_node->flowid));
+
+ /* Update the flow_ring's WRITE index */
+ if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
+ dhd_prot_dma_indx_set(dhd, flow_ring->wr,
+ H2D_DMA_INDX_WR_UPD, flow_ring->idx);
+ } else if (IFRM_ACTIVE(dhd) && (flow_ring->idx >= BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START)) {
+ dhd_prot_dma_indx_set(dhd, flow_ring->wr,
+ H2D_IFRM_INDX_WR_UPD,
+ (flow_ring->idx - BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START));
+ } else {
+ dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
+ sizeof(uint16), RING_WR_UPD, flow_ring->idx);
+ }
+
+ /* update control subn ring's WR index and ring doorbell to dongle */
+ dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_resume_rqst, 1);
+
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+
+ return BCME_OK;
+} /* dhd_prot_flow_ring_create */
+
+int
+dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count)
+{
+ tx_idle_flowring_suspend_request_t *flow_suspend_rqst;
+ dhd_prot_t *prot = dhd->prot;
+ unsigned long flags;
+ uint16 index;
+ uint16 alloced = 0;
+ msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
+
+ DHD_RING_LOCK(ring->ring_lock, flags);
+
+ /* Request for ring buffer space */
+ flow_suspend_rqst = (tx_idle_flowring_suspend_request_t *)
+ dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
+
+ if (flow_suspend_rqst == NULL) {
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_ERROR(("%s: Flow suspend Req - failure ring space\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ /* Common msg buf hdr */
+ flow_suspend_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_SUSPEND;
+ /* flow_suspend_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; */
+ flow_suspend_rqst->msg.request_id = htol32(0); /* TBD */
+
+ flow_suspend_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+ ring->seqnum++;
+
+ /* Update flow id info */
+ for (index = 0; index < count; index++)
+ {
+ flow_suspend_rqst->ring_id[index] = ringid[index];
+ }
+ flow_suspend_rqst->num = count;
+
+ DHD_ERROR(("%s sending batch suspend!! count is %d\n", __FUNCTION__, count));
+
+ /* update ring's WR index and ring doorbell to dongle */
+ dhd_prot_ring_write_complete(dhd, ring, flow_suspend_rqst, 1);
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+ return BCME_OK;
+}
+#endif /* IDLE_TX_FLOW_MGMT */
+
+static const char* etd_trap_name(hnd_ext_tag_trap_t tag)
+{
+ switch (tag)
+ {
+ case TAG_TRAP_SIGNATURE: return "TAG_TRAP_SIGNATURE";
+ case TAG_TRAP_STACK: return "TAG_TRAP_STACK";
+ case TAG_TRAP_MEMORY: return "TAG_TRAP_MEMORY";
+ case TAG_TRAP_DEEPSLEEP: return "TAG_TRAP_DEEPSLEEP";
+ case TAG_TRAP_PSM_WD: return "TAG_TRAP_PSM_WD";
+ case TAG_TRAP_PHY: return "TAG_TRAP_PHY";
+ case TAG_TRAP_BUS: return "TAG_TRAP_BUS";
+ case TAG_TRAP_MAC_SUSP: return "TAG_TRAP_MAC_SUSP";
+ case TAG_TRAP_BACKPLANE: return "TAG_TRAP_BACKPLANE";
+ case TAG_TRAP_PCIE_Q: return "TAG_TRAP_PCIE_Q";
+ case TAG_TRAP_WLC_STATE: return "TAG_TRAP_WLC_STATE";
+ case TAG_TRAP_MAC_WAKE: return "TAG_TRAP_MAC_WAKE";
+ case TAG_TRAP_HMAP: return "TAG_TRAP_HMAP";
+ case TAG_TRAP_PHYTXERR_THRESH: return "TAG_TRAP_PHYTXERR_THRESH";
+ case TAG_TRAP_HC_DATA: return "TAG_TRAP_HC_DATA";
+ case TAG_TRAP_LOG_DATA: return "TAG_TRAP_LOG_DATA";
+ case TAG_TRAP_CODE: return "TAG_TRAP_CODE";
+ case TAG_TRAP_LAST:
+ default:
+ return "Unknown";
+ }
+ return "Unknown";
+}
+
+int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw)
+{
+ uint32 i;
+ uint32 *ext_data;
+ hnd_ext_trap_hdr_t *hdr;
+ const bcm_tlv_t *tlv;
+ const trap_t *tr;
+ const uint32 *stack;
+ const hnd_ext_trap_bp_err_t *bpe;
+ uint32 raw_len;
+
+ ext_data = dhdp->extended_trap_data;
+
+ /* return if there is no extended trap data */
+ if (!ext_data || !(dhdp->dongle_trap_data & D2H_DEV_EXT_TRAP_DATA))
+ {
+ bcm_bprintf(b, "%d (0x%x)", dhdp->dongle_trap_data, dhdp->dongle_trap_data);
+ return BCME_OK;
+ }
+
+ bcm_bprintf(b, "Extended trap data\n");
+
+ /* First word is original trap_data */
+ bcm_bprintf(b, "trap_data = 0x%08x\n", *ext_data);
+ ext_data++;
+
+ /* Followed by the extended trap data header */
+ hdr = (hnd_ext_trap_hdr_t *)ext_data;
+ bcm_bprintf(b, "version: %d, len: %d\n", hdr->version, hdr->len);
+
+ /* Dump a list of all tags found before parsing data */
+ bcm_bprintf(b, "\nTags Found:\n");
+ for (i = 0; i < TAG_TRAP_LAST; i++) {
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
+ if (tlv)
+ bcm_bprintf(b, "Tag: %d (%s), Length: %d\n", i, etd_trap_name(i), tlv->len);
+ }
+
+ if (raw)
+ {
+ raw_len = sizeof(hnd_ext_trap_hdr_t) + (hdr->len / 4) + (hdr->len % 4 ? 1 : 0);
+ for (i = 0; i < raw_len; i++)
+ {
+ bcm_bprintf(b, "0x%08x ", ext_data[i]);
+ if (i % 4 == 3)
+ bcm_bprintf(b, "\n");
+ }
+ return BCME_OK;
+ }
+
+ /* Extract the various supported TLVs from the extended trap data */
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_CODE);
+ if (tlv)
+ {
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_CODE), tlv->len);
+ bcm_bprintf(b, "ETD TYPE: %d\n", tlv->data[0]);
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
+ if (tlv)
+ {
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_SIGNATURE), tlv->len);
+ tr = (const trap_t *)tlv->data;
+
+ bcm_bprintf(b, "TRAP %x: pc %x, lr %x, sp %x, cpsr %x, spsr %x\n",
+ tr->type, tr->pc, tr->r14, tr->r13, tr->cpsr, tr->spsr);
+ bcm_bprintf(b, " r0 %x, r1 %x, r2 %x, r3 %x, r4 %x, r5 %x, r6 %x\n",
+ tr->r0, tr->r1, tr->r2, tr->r3, tr->r4, tr->r5, tr->r6);
+ bcm_bprintf(b, " r7 %x, r8 %x, r9 %x, r10 %x, r11 %x, r12 %x\n",
+ tr->r7, tr->r8, tr->r9, tr->r10, tr->r11, tr->r12);
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
+ if (tlv)
+ {
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_STACK), tlv->len);
+ stack = (const uint32 *)tlv->data;
+ for (i = 0; i < (uint32)(tlv->len / 4); i++)
+ {
+ bcm_bprintf(b, " 0x%08x\n", *stack);
+ stack++;
+ }
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BACKPLANE);
+ if (tlv)
+ {
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BACKPLANE), tlv->len);
+ bpe = (const hnd_ext_trap_bp_err_t *)tlv->data;
+ bcm_bprintf(b, " error: %x\n", bpe->error);
+ bcm_bprintf(b, " coreid: %x\n", bpe->coreid);
+ bcm_bprintf(b, " baseaddr: %x\n", bpe->baseaddr);
+ bcm_bprintf(b, " ioctrl: %x\n", bpe->ioctrl);
+ bcm_bprintf(b, " iostatus: %x\n", bpe->iostatus);
+ bcm_bprintf(b, " resetctrl: %x\n", bpe->resetctrl);
+ bcm_bprintf(b, " resetstatus: %x\n", bpe->resetstatus);
+ bcm_bprintf(b, " errlogctrl: %x\n", bpe->errlogctrl);
+ bcm_bprintf(b, " errlogdone: %x\n", bpe->errlogdone);
+ bcm_bprintf(b, " errlogstatus: %x\n", bpe->errlogstatus);
+ bcm_bprintf(b, " errlogaddrlo: %x\n", bpe->errlogaddrlo);
+ bcm_bprintf(b, " errlogaddrhi: %x\n", bpe->errlogaddrhi);
+ bcm_bprintf(b, " errlogid: %x\n", bpe->errlogid);
+ bcm_bprintf(b, " errloguser: %x\n", bpe->errloguser);
+ bcm_bprintf(b, " errlogflags: %x\n", bpe->errlogflags);
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MEMORY);
+ if (tlv)
+ {
+ const hnd_ext_trap_heap_err_t* hme;
+
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MEMORY), tlv->len);
+ hme = (const hnd_ext_trap_heap_err_t *)tlv->data;
+ bcm_bprintf(b, " arena total: %d\n", hme->arena_total);
+ bcm_bprintf(b, " heap free: %d\n", hme->heap_free);
+ bcm_bprintf(b, " heap in use: %d\n", hme->heap_inuse);
+ bcm_bprintf(b, " mf count: %d\n", hme->mf_count);
+ bcm_bprintf(b, " stack LWM: %x\n", hme->stack_lwm);
+
+ bcm_bprintf(b, " Histogram:\n");
+ for (i = 0; i < (HEAP_HISTOGRAM_DUMP_LEN * 2); i += 2) {
+ if (hme->heap_histogm[i] == 0xfffe)
+ bcm_bprintf(b, " Others\t%d\t?\n", hme->heap_histogm[i + 1]);
+ else if (hme->heap_histogm[i] == 0xffff)
+ bcm_bprintf(b, " >= 256K\t%d\t?\n", hme->heap_histogm[i + 1]);
+ else
+ bcm_bprintf(b, " %d\t%d\t%d\n", hme->heap_histogm[i] << 2,
+ hme->heap_histogm[i + 1], (hme->heap_histogm[i] << 2)
+ * hme->heap_histogm[i + 1]);
+ }
+
+ bcm_bprintf(b, " Max free block: %d\n", hme->max_sz_free_blk[0] << 2);
+ for (i = 1; i < HEAP_MAX_SZ_BLKS_LEN; i++) {
+ bcm_bprintf(b, " Next lgst free block: %d\n", hme->max_sz_free_blk[i] << 2);
+ }
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PCIE_Q);
+ if (tlv)
+ {
+ const hnd_ext_trap_pcie_mem_err_t* pqme;
+
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PCIE_Q), tlv->len);
+ pqme = (const hnd_ext_trap_pcie_mem_err_t *)tlv->data;
+ bcm_bprintf(b, " d2h queue len: %x\n", pqme->d2h_queue_len);
+ bcm_bprintf(b, " d2h req queue len: %x\n", pqme->d2h_req_queue_len);
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_WLC_STATE);
+ if (tlv)
+ {
+ const hnd_ext_trap_wlc_mem_err_t* wsme;
+
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_WLC_STATE), tlv->len);
+ wsme = (const hnd_ext_trap_wlc_mem_err_t *)tlv->data;
+ bcm_bprintf(b, " instance: %d\n", wsme->instance);
+ bcm_bprintf(b, " associated: %d\n", wsme->associated);
+ bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
+ bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
+ bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
+ bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
+ bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
+ bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
+
+ if (tlv->len >= (sizeof(*wsme) * 2)) {
+ wsme++;
+ bcm_bprintf(b, "\n instance: %d\n", wsme->instance);
+ bcm_bprintf(b, " associated: %d\n", wsme->associated);
+ bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
+ bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
+ bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
+ bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
+ bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
+ bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
+ }
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PHY);
+ if (tlv)
+ {
+ const hnd_ext_trap_phydbg_t* phydbg;
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PHY), tlv->len);
+ phydbg = (const hnd_ext_trap_phydbg_t *)tlv->data;
+ bcm_bprintf(b, " err: 0x%x\n", phydbg->err);
+ bcm_bprintf(b, " RxFeStatus: 0x%x\n", phydbg->RxFeStatus);
+ bcm_bprintf(b, " TxFIFOStatus0: 0x%x\n", phydbg->TxFIFOStatus0);
+ bcm_bprintf(b, " TxFIFOStatus1: 0x%x\n", phydbg->TxFIFOStatus1);
+ bcm_bprintf(b, " RfseqMode: 0x%x\n", phydbg->RfseqMode);
+ bcm_bprintf(b, " RfseqStatus0: 0x%x\n", phydbg->RfseqStatus0);
+ bcm_bprintf(b, " RfseqStatus1: 0x%x\n", phydbg->RfseqStatus1);
+ bcm_bprintf(b, " RfseqStatus_Ocl: 0x%x\n", phydbg->RfseqStatus_Ocl);
+ bcm_bprintf(b, " RfseqStatus_Ocl1: 0x%x\n", phydbg->RfseqStatus_Ocl1);
+ bcm_bprintf(b, " OCLControl1: 0x%x\n", phydbg->OCLControl1);
+ bcm_bprintf(b, " TxError: 0x%x\n", phydbg->TxError);
+ bcm_bprintf(b, " bphyTxError: 0x%x\n", phydbg->bphyTxError);
+ bcm_bprintf(b, " TxCCKError: 0x%x\n", phydbg->TxCCKError);
+ bcm_bprintf(b, " TxCtrlWrd0: 0x%x\n", phydbg->TxCtrlWrd0);
+ bcm_bprintf(b, " TxCtrlWrd1: 0x%x\n", phydbg->TxCtrlWrd1);
+ bcm_bprintf(b, " TxCtrlWrd2: 0x%x\n", phydbg->TxCtrlWrd2);
+ bcm_bprintf(b, " TxLsig0: 0x%x\n", phydbg->TxLsig0);
+ bcm_bprintf(b, " TxLsig1: 0x%x\n", phydbg->TxLsig1);
+ bcm_bprintf(b, " TxVhtSigA10: 0x%x\n", phydbg->TxVhtSigA10);
+ bcm_bprintf(b, " TxVhtSigA11: 0x%x\n", phydbg->TxVhtSigA11);
+ bcm_bprintf(b, " TxVhtSigA20: 0x%x\n", phydbg->TxVhtSigA20);
+ bcm_bprintf(b, " TxVhtSigA21: 0x%x\n", phydbg->TxVhtSigA21);
+ bcm_bprintf(b, " txPktLength: 0x%x\n", phydbg->txPktLength);
+ bcm_bprintf(b, " txPsdulengthCtr: 0x%x\n", phydbg->txPsdulengthCtr);
+ bcm_bprintf(b, " gpioClkControl: 0x%x\n", phydbg->gpioClkControl);
+ bcm_bprintf(b, " gpioSel: 0x%x\n", phydbg->gpioSel);
+ bcm_bprintf(b, " pktprocdebug: 0x%x\n", phydbg->pktprocdebug);
+ for (i = 0; i < 3; i++)
+ bcm_bprintf(b, " gpioOut[%d]: 0x%x\n", i, phydbg->gpioOut[i]);
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PSM_WD);
+ if (tlv)
+ {
+ const hnd_ext_trap_psmwd_t* psmwd;
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PSM_WD), tlv->len);
+ psmwd = (const hnd_ext_trap_psmwd_t *)tlv;
+ bcm_bprintf(b, " version: 0x%x\n", psmwd->version);
+ bcm_bprintf(b, " maccontrol: 0x%x\n", psmwd->i32_maccontrol);
+ bcm_bprintf(b, " maccommand: 0x%x\n", psmwd->i32_maccommand);
+ bcm_bprintf(b, " macintstatus: 0x%x\n", psmwd->i32_macintstatus);
+ bcm_bprintf(b, " phydebug: 0x%x\n", psmwd->i32_phydebug);
+ bcm_bprintf(b, " clk_ctl_st: 0x%x\n", psmwd->i32_clk_ctl_st);
+ for (i = 0; i < 3; i++)
+ bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, psmwd->i32_psmdebug[i]);
+ bcm_bprintf(b, " gated clock en: 0x%x\n", psmwd->i16_0x1a8);
+ bcm_bprintf(b, " Rcv Fifo Ctrl: 0x%x\n", psmwd->i16_0x406);
+ bcm_bprintf(b, " Rx ctrl 1: 0x%x\n", psmwd->i16_0x408);
+ bcm_bprintf(b, " Rxe Status 1: 0x%x\n", psmwd->i16_0x41a);
+ bcm_bprintf(b, " Rxe Status 2: 0x%x\n", psmwd->i16_0x41c);
+ bcm_bprintf(b, " rcv wrd count 0: 0x%x\n", psmwd->i16_0x424);
+ bcm_bprintf(b, " rcv wrd count 1: 0x%x\n", psmwd->i16_0x426);
+ bcm_bprintf(b, " RCV_LFIFO_STS: 0x%x\n", psmwd->i16_0x456);
+ bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", psmwd->i16_0x480);
+ bcm_bprintf(b, " PSM BRC: 0x%x\n", psmwd->i16_0x490);
+ bcm_bprintf(b, " TXE CTRL: 0x%x\n", psmwd->i16_0x500);
+ bcm_bprintf(b, " TXE Status: 0x%x\n", psmwd->i16_0x50e);
+ bcm_bprintf(b, " TXE_xmtdmabusy: 0x%x\n", psmwd->i16_0x55e);
+ bcm_bprintf(b, " TXE_XMTfifosuspflush: 0x%x\n", psmwd->i16_0x566);
+ bcm_bprintf(b, " IFS Stat: 0x%x\n", psmwd->i16_0x690);
+ bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", psmwd->i16_0x692);
+ bcm_bprintf(b, " IFS_TX_DUR: 0x%x\n", psmwd->i16_0x694);
+ bcm_bprintf(b, " SLow_CTL: 0x%x\n", psmwd->i16_0x6a0);
+ bcm_bprintf(b, " TXE_AQM fifo Ready: 0x%x\n", psmwd->i16_0x838);
+ bcm_bprintf(b, " Dagg ctrl: 0x%x\n", psmwd->i16_0x8c0);
+ bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", psmwd->shm_prewds_cnt);
+ bcm_bprintf(b, " shm_txtplufl_cnt: 0x%x\n", psmwd->shm_txtplufl_cnt);
+ bcm_bprintf(b, " shm_txphyerr_cnt: 0x%x\n", psmwd->shm_txphyerr_cnt);
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_SUSP);
+ if (tlv)
+ {
+ const hnd_ext_trap_macsusp_t* macsusp;
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_SUSP), tlv->len);
+ macsusp = (const hnd_ext_trap_macsusp_t *)tlv;
+ bcm_bprintf(b, " version: %d\n", macsusp->version);
+ bcm_bprintf(b, " trap_reason: %d\n", macsusp->trap_reason);
+ bcm_bprintf(b, " maccontrol: 0x%x\n", macsusp->i32_maccontrol);
+ bcm_bprintf(b, " maccommand: 0x%x\n", macsusp->i32_maccommand);
+ bcm_bprintf(b, " macintstatus: 0x%x\n", macsusp->i32_macintstatus);
+ for (i = 0; i < 4; i++)
+ bcm_bprintf(b, " phydebug[%d]: 0x%x\n", i, macsusp->i32_phydebug[i]);
+ for (i = 0; i < 8; i++)
+ bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macsusp->i32_psmdebug[i]);
+ bcm_bprintf(b, " Rxe Status_1: 0x%x\n", macsusp->i16_0x41a);
+ bcm_bprintf(b, " Rxe Status_2: 0x%x\n", macsusp->i16_0x41c);
+ bcm_bprintf(b, " PSM BRC: 0x%x\n", macsusp->i16_0x490);
+ bcm_bprintf(b, " TXE Status: 0x%x\n", macsusp->i16_0x50e);
+ bcm_bprintf(b, " TXE xmtdmabusy: 0x%x\n", macsusp->i16_0x55e);
+ bcm_bprintf(b, " TXE XMTfifosuspflush: 0x%x\n", macsusp->i16_0x566);
+ bcm_bprintf(b, " IFS Stat: 0x%x\n", macsusp->i16_0x690);
+ bcm_bprintf(b, " IFS MEDBUSY CTR: 0x%x\n", macsusp->i16_0x692);
+ bcm_bprintf(b, " IFS TX DUR: 0x%x\n", macsusp->i16_0x694);
+ bcm_bprintf(b, " WEP CTL: 0x%x\n", macsusp->i16_0x7c0);
+ bcm_bprintf(b, " TXE AQM fifo Ready: 0x%x\n", macsusp->i16_0x838);
+ bcm_bprintf(b, " MHP status: 0x%x\n", macsusp->i16_0x880);
+ bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", macsusp->shm_prewds_cnt);
+ bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macsusp->shm_ucode_dbgst);
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_WAKE);
+ if (tlv)
+ {
+ const hnd_ext_trap_macenab_t* macwake;
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_WAKE), tlv->len);
+ macwake = (const hnd_ext_trap_macenab_t *)tlv;
+ bcm_bprintf(b, " version: 0x%x\n", macwake->version);
+ bcm_bprintf(b, " trap_reason: 0x%x\n", macwake->trap_reason);
+ bcm_bprintf(b, " maccontrol: 0x%x\n", macwake->i32_maccontrol);
+ bcm_bprintf(b, " maccommand: 0x%x\n", macwake->i32_maccommand);
+ bcm_bprintf(b, " macintstatus: 0x%x\n", macwake->i32_macintstatus);
+ for (i = 0; i < 8; i++)
+ bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macwake->i32_psmdebug[i]);
+ bcm_bprintf(b, " clk_ctl_st: 0x%x\n", macwake->i32_clk_ctl_st);
+ bcm_bprintf(b, " powerctl: 0x%x\n", macwake->i32_powerctl);
+ bcm_bprintf(b, " gated clock en: 0x%x\n", macwake->i16_0x1a8);
+ bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", macwake->i16_0x480);
+ bcm_bprintf(b, " PSM BRC: 0x%x\n", macwake->i16_0x490);
+ bcm_bprintf(b, " TSF CTL: 0x%x\n", macwake->i16_0x600);
+ bcm_bprintf(b, " IFS Stat: 0x%x\n", macwake->i16_0x690);
+ bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", macwake->i16_0x692);
+ bcm_bprintf(b, " Slow_CTL: 0x%x\n", macwake->i16_0x6a0);
+ bcm_bprintf(b, " Slow_FRAC: 0x%x\n", macwake->i16_0x6a6);
+ bcm_bprintf(b, " fast power up delay: 0x%x\n", macwake->i16_0x6a8);
+ bcm_bprintf(b, " Slow_PER: 0x%x\n", macwake->i16_0x6aa);
+ bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macwake->shm_ucode_dbgst);
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BUS);
+ if (tlv)
+ {
+ const bcm_dngl_pcie_hc_t* hc;
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BUS), tlv->len);
+ hc = (const bcm_dngl_pcie_hc_t *)tlv->data;
+ bcm_bprintf(b, " version: 0x%x\n", hc->version);
+ bcm_bprintf(b, " reserved: 0x%x\n", hc->reserved);
+ bcm_bprintf(b, " pcie_err_ind_type: 0x%x\n", hc->pcie_err_ind_type);
+ bcm_bprintf(b, " pcie_flag: 0x%x\n", hc->pcie_flag);
+ bcm_bprintf(b, " pcie_control_reg: 0x%x\n", hc->pcie_control_reg);
+ for (i = 0; i < HC_PCIEDEV_CONFIG_REGLIST_MAX; i++)
+ bcm_bprintf(b, " pcie_config_regs[%d]: 0x%x\n", i, hc->pcie_config_regs[i]);
+ }
+
+ tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_HMAP);
+ if (tlv)
+ {
+ const pcie_hmapviolation_t* hmap;
+ hmap = (const pcie_hmapviolation_t *)tlv->data;
+ bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_HMAP), tlv->len);
+ bcm_bprintf(b, " HMAP Vio Addr Low: 0x%x\n", hmap->hmap_violationaddr_lo);
+ bcm_bprintf(b, " HMAP Vio Addr Hi: 0x%x\n", hmap->hmap_violationaddr_hi);
+ bcm_bprintf(b, " HMAP Vio Info: 0x%x\n", hmap->hmap_violation_info);
+ }
+
+ return BCME_OK;
+}
+
+#ifdef BCMPCIE
+int
+dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len,
+ uint16 seqnum, uint16 xt_id)
+{
+ dhd_prot_t *prot = dhdp->prot;
+ host_timestamp_msg_t *ts_req;
+ unsigned long flags;
+ uint16 alloced = 0;
+ uchar *ts_tlv_buf;
+ msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
+
+ if ((tlvs == NULL) || (tlv_len == 0)) {
+ DHD_ERROR(("%s: argument error tlv: %p, tlv_len %d\n",
+ __FUNCTION__, tlvs, tlv_len));
+ return -1;
+ }
+
+ DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
+
+ /* if Host TS req already pending go away */
+ if (prot->hostts_req_buf_inuse == TRUE) {
+ DHD_ERROR(("one host TS request already pending at device\n"));
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+ return -1;
+ }
+
+ /* Request for cbuf space */
+ ts_req = (host_timestamp_msg_t*)dhd_prot_alloc_ring_space(dhdp, ctrl_ring,
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced, FALSE);
+ if (ts_req == NULL) {
+ DHD_ERROR(("couldn't allocate space on msgring to send host TS request\n"));
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+ return -1;
+ }
+
+ /* Common msg buf hdr */
+ ts_req->msg.msg_type = MSG_TYPE_HOSTTIMSTAMP;
+ ts_req->msg.if_id = 0;
+ ts_req->msg.flags = ctrl_ring->current_phase;
+ ts_req->msg.request_id = DHD_H2D_HOSTTS_REQ_PKTID;
+
+ ts_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
+ ctrl_ring->seqnum++;
+
+ ts_req->xt_id = xt_id;
+ ts_req->seqnum = seqnum;
+ /* populate TS req buffer info */
+ ts_req->input_data_len = htol16(tlv_len);
+ ts_req->host_buf_addr.high = htol32(PHYSADDRHI(prot->hostts_req_buf.pa));
+ ts_req->host_buf_addr.low = htol32(PHYSADDRLO(prot->hostts_req_buf.pa));
+ /* copy ioct payload */
+ ts_tlv_buf = (void *) prot->hostts_req_buf.va;
+ prot->hostts_req_buf_inuse = TRUE;
+ memcpy(ts_tlv_buf, tlvs, tlv_len);
+
+ OSL_CACHE_FLUSH((void *) prot->hostts_req_buf.va, tlv_len);
+
+ if (ISALIGNED(ts_tlv_buf, DMA_ALIGN_LEN) == FALSE) {
+ DHD_ERROR(("host TS req buffer address unaligned !!!!! \n"));
+ }
+
+ DHD_CTL(("submitted Host TS request request_id %d, data_len %d, tx_id %d, seq %d\n",
+ ts_req->msg.request_id, ts_req->input_data_len,
+ ts_req->xt_id, ts_req->seqnum));
+
+ /* upd wrt ptr and raise interrupt */
+ dhd_prot_ring_write_complete(dhdp, ctrl_ring, ts_req,
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+
+ return 0;
+} /* dhd_prot_send_host_timestamp */
+
+bool
+dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set)
+{
+ if (set)
+ dhd->prot->tx_ts_log_enabled = enable;
+
+ return dhd->prot->tx_ts_log_enabled;
+}
+
+bool
+dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set)
+{
+ if (set)
+ dhd->prot->rx_ts_log_enabled = enable;
+
+ return dhd->prot->rx_ts_log_enabled;
+}
+
+bool
+dhd_prot_pkt_noretry(dhd_pub_t *dhd, bool enable, bool set)
+{
+ if (set)
+ dhd->prot->no_retry = enable;
+
+ return dhd->prot->no_retry;
+}
+
+bool
+dhd_prot_pkt_noaggr(dhd_pub_t *dhd, bool enable, bool set)
+{
+ if (set)
+ dhd->prot->no_aggr = enable;
+
+ return dhd->prot->no_aggr;
+}
+
+bool
+dhd_prot_pkt_fixed_rate(dhd_pub_t *dhd, bool enable, bool set)
+{
+ if (set)
+ dhd->prot->fixed_rate = enable;
+
+ return dhd->prot->fixed_rate;
+}
+#endif /* BCMPCIE */
+
+void
+dhd_prot_dma_indx_free(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+
+ dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
+ dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
+}
+
+void
+dhd_msgbuf_delay_post_ts_bufs(dhd_pub_t *dhd)
+{
+ if (dhd->prot->max_tsbufpost > 0)
+ dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
+}
+
+static void BCMFASTPATH
+dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf)
+{
+ DHD_ERROR(("Timesunc feature not compiled in but GOT FW TS message\n"));
+
+}
+
+uint16
+dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp)
+{
+ return dhdp->prot->ioctl_trans_id;
+}
+
+int dhd_get_hscb_info(dhd_pub_t *dhd, void ** va, uint32 *len)
+{
+ if (!dhd->hscb_enable) {
+ if (len) {
+ /* prevent "Operation not supported" dhd message */
+ *len = 0;
+ return BCME_OK;
+ }
+ return BCME_UNSUPPORTED;
+ }
+
+ if (va) {
+ *va = dhd->prot->host_scb_buf.va;
+ }
+ if (len) {
+ *len = dhd->prot->host_scb_buf.len;
+ }
+
+ return BCME_OK;
+}
+
+#ifdef DHD_BUS_MEM_ACCESS
+int dhd_get_hscb_buff(dhd_pub_t *dhd, uint32 offset, uint32 length, void * buff)
+{
+ if (!dhd->hscb_enable) {
+ return BCME_UNSUPPORTED;
+ }
+
+ if (dhd->prot->host_scb_buf.va == NULL ||
+ ((uint64)offset + length > (uint64)dhd->prot->host_scb_buf.len)) {
+ return BCME_BADADDR;
+ }
+
+ memcpy(buff, (char*)dhd->prot->host_scb_buf.va + offset, length);
+
+ return BCME_OK;
+}
+#endif /* DHD_BUS_MEM_ACCESS */
+
+#ifdef DHD_HP2P
+uint32
+dhd_prot_pkt_threshold(dhd_pub_t *dhd, bool set, uint32 val)
+{
+ if (set)
+ dhd->pkt_thresh = (uint16)val;
+
+ val = dhd->pkt_thresh;
+
+ return val;
+}
+
+uint32
+dhd_prot_time_threshold(dhd_pub_t *dhd, bool set, uint32 val)
+{
+ if (set)
+ dhd->time_thresh = (uint16)val;
+
+ val = dhd->time_thresh;
+
+ return val;
+}
+
+uint32
+dhd_prot_pkt_expiry(dhd_pub_t *dhd, bool set, uint32 val)
+{
+ if (set)
+ dhd->pkt_expiry = (uint16)val;
+
+ val = dhd->pkt_expiry;
+
+ return val;
+}
+
+uint8
+dhd_prot_hp2p_enable(dhd_pub_t *dhd, bool set, int enable)
+{
+ uint8 ret = 0;
+ if (set) {
+ dhd->hp2p_enable = (enable & 0xf) ? TRUE : FALSE;
+ dhd->hp2p_infra_enable = ((enable >> 4) & 0xf) ? TRUE : FALSE;
+
+ if (enable) {
+ dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_TID_MAP);
+ } else {
+ dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_AC_MAP);
+ }
+ }
+ ret = dhd->hp2p_infra_enable ? 0x1:0x0;
+ ret <<= 4;
+ ret |= dhd->hp2p_enable ? 0x1:0x0;
+
+ return ret;
+}
+
+static void
+dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus)
+{
+ ts_timestamp_t *ts = (ts_timestamp_t *)&rxstatus->ts;
+ hp2p_info_t *hp2p_info;
+ uint32 dur1;
+
+ hp2p_info = &dhd->hp2p_info[0];
+ dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 100;
+
+ if (dur1 > (MAX_RX_HIST_BIN - 1)) {
+ dur1 = MAX_RX_HIST_BIN - 1;
+ DHD_ERROR(("%s: 0x%x 0x%x\n",
+ __FUNCTION__, ts->low, ts->high));
+ }
+
+ hp2p_info->rx_t0[dur1 % MAX_RX_HIST_BIN]++;
+ return;
+}
+
+static void
+dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus)
+{
+ ts_timestamp_t *ts = (ts_timestamp_t *)&txstatus->ts;
+ uint16 flowid = txstatus->compl_hdr.flow_ring_id;
+ uint32 hp2p_flowid, dur1, dur2;
+ hp2p_info_t *hp2p_info;
+
+ hp2p_flowid = dhd->bus->max_submission_rings -
+ dhd->bus->max_cmn_rings - flowid + 1;
+ hp2p_info = &dhd->hp2p_info[hp2p_flowid];
+ ts = (ts_timestamp_t *)&(txstatus->ts);
+
+ dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 1000;
+ if (dur1 > (MAX_TX_HIST_BIN - 1)) {
+ dur1 = MAX_TX_HIST_BIN - 1;
+ DHD_ERROR(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
+ }
+ hp2p_info->tx_t0[dur1 % MAX_TX_HIST_BIN]++;
+
+ dur2 = (((ts->high >> 10) & 0x3FF) * HP2P_TIME_SCALE) / 1000;
+ if (dur2 > (MAX_TX_HIST_BIN - 1)) {
+ dur2 = MAX_TX_HIST_BIN - 1;
+ DHD_ERROR(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
+ }
+
+ hp2p_info->tx_t1[dur2 % MAX_TX_HIST_BIN]++;
+ return;
+}
+
+enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer)
+{
+ hp2p_info_t *hp2p_info;
+ unsigned long flags;
+ dhd_pub_t *dhdp;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
+ hp2p_info = container_of(timer, hp2p_info_t, timer.timer);
+#else
+ hp2p_info = container_of(timer, hp2p_info_t, timer);
+#endif // endif
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+ dhdp = hp2p_info->dhd_pub;
+ if (!dhdp) {
+ goto done;
+ }
+
+ DHD_INFO(("%s: pend_item = %d flowid = %d\n",
+ __FUNCTION__, ((msgbuf_ring_t *)hp2p_info->ring)->pend_items_count,
+ hp2p_info->flowid));
+
+ flags = dhd_os_hp2plock(dhdp);
+
+ dhd_prot_txdata_write_flush(dhdp, hp2p_info->flowid);
+ hp2p_info->hrtimer_init = FALSE;
+ hp2p_info->num_timer_limit++;
+
+ dhd_os_hp2punlock(dhdp, flags);
+done:
+ return HRTIMER_NORESTART;
+}
+
+static void
+dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid)
+{
+ hp2p_info_t *hp2p_info;
+ uint16 hp2p_flowid;
+
+ hp2p_flowid = dhd->bus->max_submission_rings -
+ dhd->bus->max_cmn_rings - flowid + 1;
+ hp2p_info = &dhd->hp2p_info[hp2p_flowid];
+
+ if (ring->pend_items_count == dhd->pkt_thresh) {
+ dhd_prot_txdata_write_flush(dhd, flowid);
+
+ hp2p_info->hrtimer_init = FALSE;
+ hp2p_info->ring = NULL;
+ hp2p_info->num_pkt_limit++;
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21)
+ tasklet_hrtimer_cancel(&hp2p_info->timer);
+#else
+ hrtimer_cancel(&hp2p_info->timer);
+#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21) */
+ DHD_INFO(("%s: cancel hrtimer for flowid = %d \n"
+ "hp2p_flowid = %d pkt_thresh = %d\n",
+ __FUNCTION__, flowid, hp2p_flowid, dhd->pkt_thresh));
+ } else {
+ if (hp2p_info->hrtimer_init == FALSE) {
+ hp2p_info->hrtimer_init = TRUE;
+ hp2p_info->flowid = flowid;
+ hp2p_info->dhd_pub = dhd;
+ hp2p_info->ring = ring;
+ hp2p_info->num_timer_start++;
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21)
+ tasklet_hrtimer_start(&hp2p_info->timer,
+ ktime_set(0, dhd->time_thresh * 1000), HRTIMER_MODE_REL);
+#else
+ hrtimer_start(&hp2p_info->timer,
+ ktime_set(0, dhd->time_thresh * 1000), HRTIMER_MODE_REL_SOFT);
+#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21) */
+
+ DHD_INFO(("%s: start hrtimer for flowid = %d hp2_flowid = %d\n",
+ __FUNCTION__, flowid, hp2p_flowid));
+ }
+ }
+ return;
+}
+
+static void
+dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc)
+{
+ uint64 ts;
+
+ ts = local_clock();
+ do_div(ts, 1000);
+
+ txdesc->metadata_buf_len = 0;
+ txdesc->metadata_buf_addr.high_addr = htol32((ts >> 32) & 0xFFFFFFFF);
+ txdesc->metadata_buf_addr.low_addr = htol32(ts & 0xFFFFFFFF);
+ txdesc->exp_time = dhd->pkt_expiry;
+
+ DHD_INFO(("%s: metadata_high = 0x%x metadata_low = 0x%x exp_time = %x\n",
+ __FUNCTION__, txdesc->metadata_buf_addr.high_addr,
+ txdesc->metadata_buf_addr.low_addr,
+ txdesc->exp_time));
+
+ return;
+}
+#endif /* DHD_HP2P */
+
+#ifdef DHD_MAP_LOGGING
+void
+dhd_prot_smmu_fault_dump(dhd_pub_t *dhdp)
+{
+ dhd_prot_debug_info_print(dhdp);
+ OSL_DMA_MAP_DUMP(dhdp->osh);
+#ifdef DHD_MAP_PKTID_LOGGING
+ dhd_pktid_logging_dump(dhdp);
+#endif /* DHD_MAP_PKTID_LOGGING */
+#ifdef DHD_FW_COREDUMP
+ dhdp->memdump_type = DUMP_TYPE_SMMU_FAULT;
+#ifdef DNGL_AXI_ERROR_LOGGING
+ dhdp->memdump_enabled = DUMP_MEMFILE;
+ dhd_bus_get_mem_dump(dhdp);
+#else
+ dhdp->memdump_enabled = DUMP_MEMONLY;
+ dhd_bus_mem_dump(dhdp);
+#endif /* DNGL_AXI_ERROR_LOGGING */
+#endif /* DHD_FW_COREDUMP */
+}
+#endif /* DHD_MAP_LOGGING */
--
Gitblit v1.6.2