From d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 11 Dec 2023 02:45:28 +0000
Subject: [PATCH] add boot partition  size

---
 kernel/drivers/net/wireless/rockchip_wlan/cywdhd/bcmdhd/dhd_flowring.c |  379 ++++++++++++++++++++++++++++++++++++++++++++---------
 1 files changed, 310 insertions(+), 69 deletions(-)

diff --git a/kernel/drivers/net/wireless/rockchip_wlan/cywdhd/bcmdhd/dhd_flowring.c b/kernel/drivers/net/wireless/rockchip_wlan/cywdhd/bcmdhd/dhd_flowring.c
index 24d4189..976fc32 100644
--- a/kernel/drivers/net/wireless/rockchip_wlan/cywdhd/bcmdhd/dhd_flowring.c
+++ b/kernel/drivers/net/wireless/rockchip_wlan/cywdhd/bcmdhd/dhd_flowring.c
@@ -1,18 +1,19 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/**
+/*
  * @file Broadcom Dongle Host Driver (DHD), Flow ring specific code at top level
  *
  * Flow rings are transmit traffic (=propagating towards antenna) related entities
  *
  *
- * Copyright (C) 1999-2019, Broadcom Corporation
- * 
+ * Portions of this code are copyright (c) 2022 Cypress Semiconductor Corporation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
  *      Unless you and Broadcom execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2 (the "GPL"),
  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
  * following added to such license:
- * 
+ *
  *      As a special exception, the copyright holders of this software give you
  * permission to link this software with independent modules, and to copy and
  * distribute the resulting executable under terms of your choice, provided that
@@ -20,7 +21,7 @@
  * the license of that module.  An independent module is a module which is not
  * derived from this software.  The special exception does not apply to any
  * modifications of the software.
- * 
+ *
  *      Notwithstanding the above, under no circumstances may you combine this
  * software in any way with any other Broadcom software provided under a license
  * other than the GPL, without Broadcom's express prior written consent.
@@ -28,17 +29,16 @@
  *
  * <<Broadcom-WL-IPTag/Open:>>
  *
- * $Id: dhd_flowrings.c jaganlv $
+ * $Id: dhd_flowring.c 699841 2017-05-16 16:47:06Z $
  */
-
 
 #include <typedefs.h>
 #include <bcmutils.h>
 #include <bcmendian.h>
 #include <bcmdevs.h>
 
-#include <proto/ethernet.h>
-#include <proto/bcmevent.h>
+#include <ethernet.h>
+#include <bcmevent.h>
 #include <dngl_stats.h>
 
 #include <dhd.h>
@@ -47,11 +47,10 @@
 #include <dhd_bus.h>
 #include <dhd_proto.h>
 #include <dhd_dbg.h>
-#include <proto/802.1d.h>
+#include <802.1d.h>
 #include <pcie_core.h>
 #include <bcmmsgbuf.h>
 #include <dhd_pcie.h>
-
 
 static INLINE int dhd_flow_queue_throttle(flow_queue_t *queue);
 
@@ -68,11 +67,7 @@
 #define FLOW_QUEUE_PKT_NEXT(p)          PKTLINK(p)
 #define FLOW_QUEUE_PKT_SETNEXT(p, x)    PKTSETLINK((p), (x))
 
-#ifdef DHD_LOSSLESS_ROAMING
-const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 7 };
-#else
 const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 };
-#endif
 const uint8 prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
 
 /** Queue overflow throttle. Return value: TRUE if throttle needs to be applied */
@@ -96,6 +91,9 @@
 
 	ASSERT(dhdp != (dhd_pub_t*)NULL);
 	ASSERT(flowid < dhdp->num_flow_rings);
+	if (flowid >= dhdp->num_flow_rings) {
+		return NULL;
+	}
 
 	flow_ring_node = &(((flow_ring_node_t*)(dhdp->flow_ring_table))[flowid]);
 
@@ -107,10 +105,13 @@
 flow_queue_t *
 dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid)
 {
-	flow_ring_node_t * flow_ring_node;
+	flow_ring_node_t * flow_ring_node = NULL;
 
 	flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
-	return &flow_ring_node->queue;
+	if (flow_ring_node)
+		return &flow_ring_node->queue;
+	else
+		return NULL;
 }
 
 /* Flow ring's queue management functions */
@@ -244,7 +245,7 @@
                      int queue_budget, int cumm_threshold, void *cumm_ctr,
                      int l2cumm_threshold, void *l2cumm_ctr)
 {
-	flow_queue_t * queue;
+	flow_queue_t * queue = NULL;
 
 	ASSERT(dhdp != (dhd_pub_t*)NULL);
 	ASSERT(queue_budget > 1);
@@ -254,16 +255,59 @@
 	ASSERT(l2cumm_ctr != (void*)NULL);
 
 	queue = dhd_flow_queue(dhdp, flowid);
+	if (queue) {
+		DHD_FLOW_QUEUE_SET_MAX(queue, queue_budget); /* Max queue length */
 
-	DHD_FLOW_QUEUE_SET_MAX(queue, queue_budget); /* Max queue length */
+		/* Set the queue's parent threshold and cummulative counter */
+		DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold);
+		DHD_FLOW_QUEUE_SET_CLEN(queue, cumm_ctr);
 
-	/* Set the queue's parent threshold and cummulative counter */
-	DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold);
-	DHD_FLOW_QUEUE_SET_CLEN(queue, cumm_ctr);
+		/* Set the queue's grandparent threshold and cummulative counter */
+		DHD_FLOW_QUEUE_SET_L2THRESHOLD(queue, l2cumm_threshold);
+		DHD_FLOW_QUEUE_SET_L2CLEN(queue, l2cumm_ctr);
+	}
+}
 
-	/* Set the queue's grandparent threshold and cummulative counter */
-	DHD_FLOW_QUEUE_SET_L2THRESHOLD(queue, l2cumm_threshold);
-	DHD_FLOW_QUEUE_SET_L2CLEN(queue, l2cumm_ctr);
+uint8
+dhd_num_prio_supported_per_flow_ring(dhd_pub_t *dhdp)
+{
+	uint8 prio_count = 0;
+	int i;
+	// Pick all elements one by one
+	for (i = 0; i < NUMPRIO; i++)
+	{
+		// Check if the picked element is already counted
+		int j;
+		for (j = 0; j < i; j++) {
+			if (dhdp->flow_prio_map[i] == dhdp->flow_prio_map[j]) {
+				break;
+			}
+		}
+		// If not counted earlier, then count it
+		if (i == j) {
+			prio_count++;
+		}
+	}
+
+#ifdef DHD_LOSSLESS_ROAMING
+	/* For LLR, we are using flowring with prio 7 which is not considered
+	 * in prio2ac array. But in __dhd_sendpkt, it is hardcoded hardcoded
+	 * prio to PRIO_8021D_NC and send to dhd_flowid_update.
+	 * So add 1 to prio_count.
+	 */
+	prio_count++;
+#endif /* DHD_LOSSLESS_ROAMING */
+
+	return prio_count;
+}
+
+uint8
+dhd_get_max_multi_client_flow_rings(dhd_pub_t *dhdp)
+{
+	uint8 reserved_infra_sta_flow_rings = dhd_num_prio_supported_per_flow_ring(dhdp);
+	uint8 total_tx_flow_rings = dhdp->num_flow_rings - dhdp->bus->max_cmn_rings;
+	uint8 max_multi_client_flow_rings = total_tx_flow_rings - reserved_infra_sta_flow_rings;
+	return max_multi_client_flow_rings;
 }
 
 /** Initializes data structures of multiple flow rings */
@@ -284,7 +328,7 @@
 
 	/* Construct a 16bit flowid allocator */
 	flowid_allocator = id16_map_init(dhdp->osh,
-	                       num_flow_rings - FLOW_RING_COMMON, FLOWID_RESERVED);
+	                       num_flow_rings - dhdp->bus->max_cmn_rings, FLOWID_RESERVED);
 	if (flowid_allocator == NULL) {
 		DHD_ERROR(("%s: flowid allocator init failure\n", __FUNCTION__));
 		return BCME_NOMEM;
@@ -306,6 +350,9 @@
 		flow_ring_table[idx].status = FLOW_RING_STATUS_CLOSED;
 		flow_ring_table[idx].flowid = (uint16)idx;
 		flow_ring_table[idx].lock = dhd_os_spin_lock_init(dhdp->osh);
+#ifdef IDLE_TX_FLOW_MGMT
+		flow_ring_table[idx].last_active_ts = OSL_SYSUPTIME();
+#endif /* IDLE_TX_FLOW_MGMT */
 		if (flow_ring_table[idx].lock == NULL) {
 			DHD_ERROR(("%s: Failed to init spinlock for queue!\n", __FUNCTION__));
 			goto fail;
@@ -346,9 +393,13 @@
 
 	dhdp->flow_prio_map_type = DHD_FLOW_PRIO_AC_MAP;
 	bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
+
+	dhdp->max_multi_client_flow_rings = dhd_get_max_multi_client_flow_rings(dhdp);
+	dhdp->multi_client_flow_rings = 0U;
+
 #ifdef DHD_LOSSLESS_ROAMING
 	dhdp->dequeue_prec_map = ALLPRIO;
-#endif
+#endif // endif
 	/* Now populate into dhd pub */
 	DHD_FLOWID_LOCK(lock, flags);
 	dhdp->num_flow_rings = num_flow_rings;
@@ -445,6 +496,9 @@
 	dhdp->num_flow_rings = 0U;
 	bzero(dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
 
+	dhdp->max_multi_client_flow_rings = 0U;
+	dhdp->multi_client_flow_rings = 0U;
+
 	lock = dhdp->flowid_lock;
 	dhdp->flowid_lock = NULL;
 
@@ -474,13 +528,20 @@
 #ifdef WLTDLS
 bool is_tdls_destination(dhd_pub_t *dhdp, uint8 *da)
 {
-	tdls_peer_node_t *cur = dhdp->peer_tbl.node;
+	unsigned long flags;
+	tdls_peer_node_t *cur = NULL;
+
+	DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
+	cur = dhdp->peer_tbl.node;
+
 	while (cur != NULL) {
 		if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
+			DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
 			return TRUE;
 		}
 		cur = cur->next;
 	}
+	DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
 	return FALSE;
 }
 #endif /* WLTDLS */
@@ -495,13 +556,16 @@
 	if_flow_lkup_t *if_flow_lkup;
 	unsigned long flags;
 
+	ASSERT(ifindex < DHD_MAX_IFS);
+	if (ifindex >= DHD_MAX_IFS)
+		return FLOWID_INVALID;
+
 	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
 	if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
 
 	ASSERT(if_flow_lkup);
 
-	if ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) ||
-			(if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_WDS)) {
+	if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) {
 #ifdef WLTDLS
 		if (dhdp->peer_tbl.tdls_peer_count && !(ETHER_ISMULTI(da)) &&
 			is_tdls_destination(dhdp, da)) {
@@ -561,7 +625,11 @@
 	uint16 flowid;
 	unsigned long flags;
 
-	fl_hash_node = (flow_hash_info_t *) MALLOC(dhdp->osh, sizeof(flow_hash_info_t));
+	fl_hash_node = (flow_hash_info_t *) MALLOCZ(dhdp->osh, sizeof(flow_hash_info_t));
+	if (fl_hash_node == NULL) {
+		DHD_ERROR(("%s: flow_hash_info_t memory allocation failed \n", __FUNCTION__));
+		return FLOWID_INVALID;
+	}
 	memcpy(fl_hash_node->flow_info.da, da, sizeof(fl_hash_node->flow_info.da));
 
 	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
@@ -571,7 +639,7 @@
 
 	if (flowid == FLOWID_INVALID) {
 		MFREE(dhdp->osh, fl_hash_node,  sizeof(flow_hash_info_t));
-		DHD_ERROR(("%s: cannot get free flowid \n", __FUNCTION__));
+		DHD_ERROR_RLMT(("%s: cannot get free flowid \n", __FUNCTION__));
 		return FLOWID_INVALID;
 	}
 
@@ -583,9 +651,8 @@
 	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
 	if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
 
-	if ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) ||
-			(if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_WDS)) {
-		/* For STA non TDLS dest and WDS dest we allocate entry based on prio only */
+	if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) {
+		/* For STA/GC non TDLS dest and WDS dest we allocate entry based on prio only */
 #ifdef WLTDLS
 		if (dhdp->peer_tbl.tdls_peer_count &&
 			(is_tdls_destination(dhdp, da))) {
@@ -619,6 +686,15 @@
 
 	DHD_INFO(("%s: allocated flowid %d\n", __FUNCTION__, fl_hash_node->flowid));
 
+	if (fl_hash_node->flowid >= dhdp->num_flow_rings) {
+		DHD_ERROR(("%s: flowid=%d num_flow_rings=%d ifindex=%d prio=%d role=%d\n",
+			__FUNCTION__, fl_hash_node->flowid, dhdp->num_flow_rings,
+			ifindex, prio, if_flow_lkup[ifindex].role));
+		dhd_prhex("da", (uchar *)da, ETHER_ADDR_LEN, DHD_ERROR_VAL);
+		dhd_prhex("sa", (uchar *)sa, ETHER_ADDR_LEN, DHD_ERROR_VAL);
+		return FLOWID_INVALID;
+	}
+
 	return fl_hash_node->flowid;
 } /* dhd_flowid_alloc */
 
@@ -633,36 +709,67 @@
 	unsigned long flags;
 	int ret;
 
-	DHD_INFO(("%s\n", __FUNCTION__));
+	DHD_TRACE(("%s\n", __FUNCTION__));
 
 	if (!dhdp->flow_ring_table) {
 		return BCME_ERROR;
 	}
+
+	ASSERT(ifindex < DHD_MAX_IFS);
+	if (ifindex >= DHD_MAX_IFS)
+		return BCME_BADARG;
 
 	flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
 
 	id = dhd_flowid_find(dhdp, ifindex, prio, sa, da);
 
 	if (id == FLOWID_INVALID) {
-
+		bool if_role_multi_client;
 		if_flow_lkup_t *if_flow_lkup;
 		if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
 
 		if (!if_flow_lkup[ifindex].status)
 			return BCME_ERROR;
 
+		/* check role for multi client case */
+		if_role_multi_client = DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex);
+
+		/* Abort Flowring creation if multi client flowrings crossed the threshold */
+#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
+		if (if_role_multi_client &&
+			(dhdp->multi_client_flow_rings >= dhdp->max_multi_client_flow_rings)) {
+			DHD_ERROR_RLMT(("%s: Max multi client flow rings reached: %d:%d\n",
+				__FUNCTION__, dhdp->multi_client_flow_rings,
+				dhdp->max_multi_client_flow_rings));
+			return BCME_ERROR;
+		}
+#endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
+
+		/* Do not create Flowring if peer is not associated */
+#if defined(PCIE_FULL_DONGLE)
+		if (if_role_multi_client && !ETHER_ISMULTI(da) &&
+			!dhd_sta_associated(dhdp, ifindex, (uint8 *)da)) {
+			DHD_ERROR_RLMT(("%s: Skip send pkt without peer addition\n", __FUNCTION__));
+			return BCME_ERROR;
+		}
+#endif /* (linux || LINUX) && PCIE_FULL_DONGLE */
 
 		id = dhd_flowid_alloc(dhdp, ifindex, prio, sa, da);
 		if (id == FLOWID_INVALID) {
-			DHD_ERROR(("%s: alloc flowid ifindex %u status %u\n",
+			DHD_ERROR_RLMT(("%s: alloc flowid ifindex %u status %u\n",
 			           __FUNCTION__, ifindex, if_flow_lkup[ifindex].status));
 			return BCME_ERROR;
 		}
 
+		ASSERT(id < dhdp->num_flow_rings);
+
+		/* Only after flowid alloc, increment multi_client_flow_rings */
+		if (if_role_multi_client) {
+			dhdp->multi_client_flow_rings++;
+		}
+
 		/* register this flowid in dhd_pub */
 		dhd_add_flowid(dhdp, ifindex, prio, da, id);
-
-		ASSERT(id < dhdp->num_flow_rings);
 
 		flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
 
@@ -674,12 +781,22 @@
 		flow_ring_node->flow_info.tid = prio;
 		flow_ring_node->flow_info.ifindex = ifindex;
 		flow_ring_node->active = TRUE;
-		flow_ring_node->status = FLOW_RING_STATUS_PENDING;
+		flow_ring_node->status = FLOW_RING_STATUS_CREATE_PENDING;
+
+#ifdef TX_STATUS_LATENCY_STATS
+		flow_ring_node->flow_info.num_tx_status = 0;
+		flow_ring_node->flow_info.cum_tx_status_latency = 0;
+		flow_ring_node->flow_info.num_tx_pkts = 0;
+#endif /* TX_STATUS_LATENCY_STATS */
 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
 
 		/* Create and inform device about the new flow */
 		if (dhd_bus_flow_ring_create_request(dhdp->bus, (void *)flow_ring_node)
 				!= BCME_OK) {
+			DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+			flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
+			flow_ring_node->active = FALSE;
+			DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
 			DHD_ERROR(("%s: create error %d\n", __FUNCTION__, id));
 			return BCME_ERROR;
 		}
@@ -688,7 +805,14 @@
 		return BCME_OK;
 	} else {
 		/* if the Flow id was found in the hash */
-		ASSERT(id < dhdp->num_flow_rings);
+
+		if (id >= dhdp->num_flow_rings) {
+			DHD_ERROR(("%s: Invalid flow id : %u, num_flow_rings : %u\n",
+				__FUNCTION__, id, dhdp->num_flow_rings));
+			*flowid = FLOWID_INVALID;
+			ASSERT(0);
+			return BCME_ERROR;
+		}
 
 		flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
 		DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
@@ -709,21 +833,74 @@
 		 * active is made TRUE when a flow_ring_node gets allocated and is made
 		 * FALSE when the flow ring gets removed and does not reflect the True state
 		 * of the Flow ring.
+		 * In case if IDLE_TX_FLOW_MGMT is defined, we have to handle two more flowring
+		 * states. If the flow_ring_node's status is FLOW_RING_STATUS_SUSPENDED, the flowid
+		 * is to be returned and from dhd_bus_txdata, the flowring would be resumed again.
+		 * The status FLOW_RING_STATUS_RESUME_PENDING, is equivalent to
+		 * FLOW_RING_STATUS_CREATE_PENDING.
 		 */
-		if (flow_ring_node->status == FLOW_RING_STATUS_OPEN ||
-			flow_ring_node->status == FLOW_RING_STATUS_PENDING) {
-			*flowid = id;
-			ret = BCME_OK;
-		} else {
+		if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING ||
+			flow_ring_node->status == FLOW_RING_STATUS_CLOSED) {
 			*flowid = FLOWID_INVALID;
 			ret = BCME_ERROR;
+		} else {
+			*flowid = id;
+			ret = BCME_OK;
 		}
 
 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
 		return ret;
-
 	} /* Flow Id found in the hash */
 } /* dhd_flowid_lookup */
+
+int
+dhd_flowid_find_by_ifidx(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid)
+{
+	int hashidx = 0;
+	bool found = FALSE;
+	flow_hash_info_t *cur;
+	if_flow_lkup_t *if_flow_lkup;
+	unsigned long flags;
+
+	if (!dhdp->flow_ring_table) {
+		DHD_ERROR(("%s : dhd->flow_ring_table is NULL\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+	if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+	for (hashidx = 0; hashidx < DHD_FLOWRING_HASH_SIZE; hashidx++) {
+		cur = if_flow_lkup[ifindex].fl_hash[hashidx];
+		if (cur) {
+			if (cur->flowid == flowid) {
+				found = TRUE;
+			}
+
+			while (!found && cur) {
+				if (cur->flowid == flowid) {
+					found = TRUE;
+					break;
+				}
+				cur = cur->next;
+			}
+
+			if (found) {
+				DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+				return BCME_OK;
+			}
+		}
+	}
+	DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+
+	return BCME_ERROR;
+}
+
+int
+dhd_flowid_debug_create(dhd_pub_t *dhdp, uint8 ifindex,
+	uint8 prio, char *sa, char *da, uint16 *flowid)
+{
+	return dhd_flowid_lookup(dhdp, ifindex, prio, sa, da, flowid);
+}
 
 /**
  * Assign existing or newly created flowid to an 802.3 packet. This flowid is later on used to
@@ -734,7 +911,7 @@
 {
 	uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
 	struct ether_header *eh = (struct ether_header *)pktdata;
-	uint16 flowid;
+	uint16 flowid = 0;
 
 	ASSERT(ifindex < DHD_MAX_IFS);
 
@@ -747,7 +924,7 @@
 		return BCME_ERROR;
 	}
 
-	if (dhd_flowid_lookup(dhdp, ifindex, prio, eh->ether_shost, eh->ether_dhost,
+	if (dhd_flowid_lookup(dhdp, ifindex, prio, (char *)eh->ether_shost, (char *)eh->ether_dhost,
 		&flowid) != BCME_OK) {
 		return BCME_ERROR;
 	}
@@ -767,9 +944,16 @@
 	flow_hash_info_t *cur, *prev;
 	if_flow_lkup_t *if_flow_lkup;
 	unsigned long flags;
+	bool if_role_multi_client;
+
+	ASSERT(ifindex < DHD_MAX_IFS);
+	if (ifindex >= DHD_MAX_IFS)
+		return;
 
 	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
 	if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+
+	if_role_multi_client = DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex);
 
 	for (hashix = 0; hashix < DHD_FLOWRING_HASH_SIZE; hashix++) {
 
@@ -796,6 +980,11 @@
 					prev->next = cur->next;
 				}
 
+				/* Decrement multi_client_flow_rings */
+				if (if_role_multi_client) {
+					dhdp->multi_client_flow_rings--;
+				}
+
 				/* deregister flowid from dhd_pub. */
 				dhd_del_flowid(dhdp, ifindex, flowid);
 
@@ -814,11 +1003,37 @@
 } /* dhd_flowid_free */
 
 /**
- * Delete all Flow rings associated with the given interface. Is called when e.g. the dongle
+ * Delete all Flow rings associated with the given interface. Is called when eg the dongle
  * indicates that a wireless link has gone down.
  */
 void
 dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex)
+{
+	uint32 id;
+	flow_ring_table_t *flow_ring_table;
+
+	DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex));
+
+	ASSERT(ifindex < DHD_MAX_IFS);
+	if (ifindex >= DHD_MAX_IFS)
+		return;
+
+	if (!dhdp->flow_ring_table)
+		return;
+
+	flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
+	for (id = 0; id < dhdp->num_flow_rings; id++) {
+		if (flow_ring_table[id].active &&
+			(flow_ring_table[id].flow_info.ifindex == ifindex) &&
+			(flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
+			dhd_bus_flow_ring_delete_request(dhdp->bus,
+			                                 (void *) &flow_ring_table[id]);
+		}
+	}
+}
+
+void
+dhd_flow_rings_flush(dhd_pub_t *dhdp, uint8 ifindex)
 {
 	uint32 id;
 	flow_ring_table_t *flow_ring_table;
@@ -831,12 +1046,13 @@
 
 	if (!dhdp->flow_ring_table)
 		return;
-
 	flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
+
 	for (id = 0; id < dhdp->num_flow_rings; id++) {
 		if (flow_ring_table[id].active &&
-		    (flow_ring_table[id].flow_info.ifindex == ifindex)) {
-			dhd_bus_flow_ring_delete_request(dhdp->bus,
+			(flow_ring_table[id].flow_info.ifindex == ifindex) &&
+			(flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
+			dhd_bus_flow_ring_flush_request(dhdp->bus,
 			                                 (void *) &flow_ring_table[id]);
 		}
 	}
@@ -860,11 +1076,21 @@
 
 	flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
 	for (id = 0; id < dhdp->num_flow_rings; id++) {
+		/*
+		 * Send flowring delete request even if flowring status is
+		 * FLOW_RING_STATUS_CREATE_PENDING, to handle cases where DISASSOC_IND
+		 * event comes ahead of flowring create response.
+		 * Otherwise the flowring will not be deleted later as there will not be any
+		 * DISASSOC_IND event. With this change, when create response event comes to DHD,
+		 * it will change the status to FLOW_RING_STATUS_OPEN and soon delete response
+		 * event will come, upon which DHD will delete the flowring.
+		 */
 		if (flow_ring_table[id].active &&
 			(flow_ring_table[id].flow_info.ifindex == ifindex) &&
 			(!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) &&
-			(flow_ring_table[id].status != FLOW_RING_STATUS_DELETE_PENDING)) {
-			DHD_INFO(("%s: deleting flowid %d\n",
+			((flow_ring_table[id].status == FLOW_RING_STATUS_OPEN) ||
+			(flow_ring_table[id].status == FLOW_RING_STATUS_CREATE_PENDING))) {
+			DHD_ERROR(("%s: deleting flowid %d\n",
 				__FUNCTION__, flow_ring_table[id].flowid));
 			dhd_bus_flow_ring_delete_request(dhdp->bus,
 				(void *) &flow_ring_table[id]);
@@ -898,14 +1124,19 @@
 
 		if_flow_lkup[ifindex].role = role;
 
-		if (role != WLC_E_IF_ROLE_STA) {
-			/* Flowrings has to be created for WDS and DWDS when interface is created */
+		if (role == WLC_E_IF_ROLE_WDS) {
+			/**
+			 * WDS role does not send WLC_E_LINK event after interface is up.
+			 * So to create flowrings for WDS, make status as TRUE in WLC_E_IF itself.
+			 * same is true while making the status as FALSE.
+			 * TODO: Fix FW to send WLC_E_LINK for WDS role aswell. So that all the
+			 * interfaces are handled uniformly.
+			 */
 			if_flow_lkup[ifindex].status = TRUE;
 			DHD_INFO(("%s: Mcast Flow ring for ifindex %d role is %d \n",
 			          __FUNCTION__, ifindex, role));
-			/* Create Mcast Flow */
 		}
-	} else	if (op == WLC_E_IF_DEL) {
+	} else	if ((op == WLC_E_IF_DEL) && (role == WLC_E_IF_ROLE_WDS)) {
 		if_flow_lkup[ifindex].status = FALSE;
 		DHD_INFO(("%s: cleanup all Flow rings for ifindex %d role is %d \n",
 		          __FUNCTION__, ifindex, role));
@@ -929,12 +1160,12 @@
 	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
 	if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
 
-	if (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) {
-		if (status)
-			if_flow_lkup[ifindex].status = TRUE;
-		else
-			if_flow_lkup[ifindex].status = FALSE;
+	if (status) {
+		if_flow_lkup[ifindex].status = TRUE;
+	} else {
+		if_flow_lkup[ifindex].status = FALSE;
 	}
+
 	DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
 
 	return BCME_OK;
@@ -971,6 +1202,8 @@
 	else
 		bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
 
+	dhdp->max_multi_client_flow_rings = dhd_get_max_multi_client_flow_rings(dhdp);
+
 	return BCME_OK;
 }
 
@@ -978,8 +1211,13 @@
 int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set)
 {
 	uint8 iovbuf[24];
+	int len;
 	if (!set) {
-		bcm_mkiovar("bus:fl_prio_map", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
+		memset(&iovbuf, 0, sizeof(iovbuf));
+		len = bcm_mkiovar("bus:fl_prio_map", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
+		if (len == 0) {
+			return BCME_BUFTOOSHORT;
+		}
 		if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) {
 			DHD_ERROR(("%s: failed to get fl_prio_map\n", __FUNCTION__));
 			return BCME_ERROR;
@@ -987,8 +1225,11 @@
 		*map = iovbuf[0];
 		return BCME_OK;
 	}
-	bcm_mkiovar("bus:fl_prio_map", (char *)map, 4, (char*)iovbuf, sizeof(iovbuf));
-	if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+	len = bcm_mkiovar("bus:fl_prio_map", (char *)map, 4, (char*)iovbuf, sizeof(iovbuf));
+	if (len == 0) {
+		return BCME_BUFTOOSHORT;
+	}
+	if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0) < 0) {
 		DHD_ERROR(("%s: failed to set fl_prio_map \n",
 			__FUNCTION__));
 		return BCME_ERROR;

--
Gitblit v1.6.2