forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/net/wireless/rockchip_wlan/cywdhd/bcmdhd/dhd_flowring.c
....@@ -1,18 +1,19 @@
1
-/* SPDX-License-Identifier: GPL-2.0 */
2
-/**
1
+/*
32 * @file Broadcom Dongle Host Driver (DHD), Flow ring specific code at top level
43 *
54 * Flow rings are transmit traffic (=propagating towards antenna) related entities
65 *
76 *
8
- * Copyright (C) 1999-2019, Broadcom Corporation
9
- *
7
+ * Portions of this code are copyright (c) 2022 Cypress Semiconductor Corporation
8
+ *
9
+ * Copyright (C) 1999-2017, Broadcom Corporation
10
+ *
1011 * Unless you and Broadcom execute a separate written software license
1112 * agreement governing use of this software, this software is licensed to you
1213 * under the terms of the GNU General Public License version 2 (the "GPL"),
1314 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
1415 * following added to such license:
15
- *
16
+ *
1617 * As a special exception, the copyright holders of this software give you
1718 * permission to link this software with independent modules, and to copy and
1819 * distribute the resulting executable under terms of your choice, provided that
....@@ -20,7 +21,7 @@
2021 * the license of that module. An independent module is a module which is not
2122 * derived from this software. The special exception does not apply to any
2223 * modifications of the software.
23
- *
24
+ *
2425 * Notwithstanding the above, under no circumstances may you combine this
2526 * software in any way with any other Broadcom software provided under a license
2627 * other than the GPL, without Broadcom's express prior written consent.
....@@ -28,17 +29,16 @@
2829 *
2930 * <<Broadcom-WL-IPTag/Open:>>
3031 *
31
- * $Id: dhd_flowrings.c jaganlv $
32
+ * $Id: dhd_flowring.c 699841 2017-05-16 16:47:06Z $
3233 */
33
-
3434
3535 #include <typedefs.h>
3636 #include <bcmutils.h>
3737 #include <bcmendian.h>
3838 #include <bcmdevs.h>
3939
40
-#include <proto/ethernet.h>
41
-#include <proto/bcmevent.h>
40
+#include <ethernet.h>
41
+#include <bcmevent.h>
4242 #include <dngl_stats.h>
4343
4444 #include <dhd.h>
....@@ -47,11 +47,10 @@
4747 #include <dhd_bus.h>
4848 #include <dhd_proto.h>
4949 #include <dhd_dbg.h>
50
-#include <proto/802.1d.h>
50
+#include <802.1d.h>
5151 #include <pcie_core.h>
5252 #include <bcmmsgbuf.h>
5353 #include <dhd_pcie.h>
54
-
5554
5655 static INLINE int dhd_flow_queue_throttle(flow_queue_t *queue);
5756
....@@ -68,11 +67,7 @@
6867 #define FLOW_QUEUE_PKT_NEXT(p) PKTLINK(p)
6968 #define FLOW_QUEUE_PKT_SETNEXT(p, x) PKTSETLINK((p), (x))
7069
71
-#ifdef DHD_LOSSLESS_ROAMING
72
-const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 7 };
73
-#else
7470 const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 };
75
-#endif
7671 const uint8 prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
7772
7873 /** Queue overflow throttle. Return value: TRUE if throttle needs to be applied */
....@@ -96,6 +91,9 @@
9691
9792 ASSERT(dhdp != (dhd_pub_t*)NULL);
9893 ASSERT(flowid < dhdp->num_flow_rings);
94
+ if (flowid >= dhdp->num_flow_rings) {
95
+ return NULL;
96
+ }
9997
10098 flow_ring_node = &(((flow_ring_node_t*)(dhdp->flow_ring_table))[flowid]);
10199
....@@ -107,10 +105,13 @@
107105 flow_queue_t *
108106 dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid)
109107 {
110
- flow_ring_node_t * flow_ring_node;
108
+ flow_ring_node_t * flow_ring_node = NULL;
111109
112110 flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
113
- return &flow_ring_node->queue;
111
+ if (flow_ring_node)
112
+ return &flow_ring_node->queue;
113
+ else
114
+ return NULL;
114115 }
115116
116117 /* Flow ring's queue management functions */
....@@ -244,7 +245,7 @@
244245 int queue_budget, int cumm_threshold, void *cumm_ctr,
245246 int l2cumm_threshold, void *l2cumm_ctr)
246247 {
247
- flow_queue_t * queue;
248
+ flow_queue_t * queue = NULL;
248249
249250 ASSERT(dhdp != (dhd_pub_t*)NULL);
250251 ASSERT(queue_budget > 1);
....@@ -254,16 +255,59 @@
254255 ASSERT(l2cumm_ctr != (void*)NULL);
255256
256257 queue = dhd_flow_queue(dhdp, flowid);
258
+ if (queue) {
259
+ DHD_FLOW_QUEUE_SET_MAX(queue, queue_budget); /* Max queue length */
257260
258
- DHD_FLOW_QUEUE_SET_MAX(queue, queue_budget); /* Max queue length */
261
+ /* Set the queue's parent threshold and cummulative counter */
262
+ DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold);
263
+ DHD_FLOW_QUEUE_SET_CLEN(queue, cumm_ctr);
259264
260
- /* Set the queue's parent threshold and cummulative counter */
261
- DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold);
262
- DHD_FLOW_QUEUE_SET_CLEN(queue, cumm_ctr);
265
+ /* Set the queue's grandparent threshold and cummulative counter */
266
+ DHD_FLOW_QUEUE_SET_L2THRESHOLD(queue, l2cumm_threshold);
267
+ DHD_FLOW_QUEUE_SET_L2CLEN(queue, l2cumm_ctr);
268
+ }
269
+}
263270
264
- /* Set the queue's grandparent threshold and cummulative counter */
265
- DHD_FLOW_QUEUE_SET_L2THRESHOLD(queue, l2cumm_threshold);
266
- DHD_FLOW_QUEUE_SET_L2CLEN(queue, l2cumm_ctr);
271
+uint8
272
+dhd_num_prio_supported_per_flow_ring(dhd_pub_t *dhdp)
273
+{
274
+ uint8 prio_count = 0;
275
+ int i;
276
+ // Pick all elements one by one
277
+ for (i = 0; i < NUMPRIO; i++)
278
+ {
279
+ // Check if the picked element is already counted
280
+ int j;
281
+ for (j = 0; j < i; j++) {
282
+ if (dhdp->flow_prio_map[i] == dhdp->flow_prio_map[j]) {
283
+ break;
284
+ }
285
+ }
286
+ // If not counted earlier, then count it
287
+ if (i == j) {
288
+ prio_count++;
289
+ }
290
+ }
291
+
292
+#ifdef DHD_LOSSLESS_ROAMING
293
+ /* For LLR, we are using flowring with prio 7 which is not considered
294
+ * in prio2ac array. But in __dhd_sendpkt, it is hardcoded hardcoded
295
+ * prio to PRIO_8021D_NC and send to dhd_flowid_update.
296
+ * So add 1 to prio_count.
297
+ */
298
+ prio_count++;
299
+#endif /* DHD_LOSSLESS_ROAMING */
300
+
301
+ return prio_count;
302
+}
303
+
304
+uint8
305
+dhd_get_max_multi_client_flow_rings(dhd_pub_t *dhdp)
306
+{
307
+ uint8 reserved_infra_sta_flow_rings = dhd_num_prio_supported_per_flow_ring(dhdp);
308
+ uint8 total_tx_flow_rings = dhdp->num_flow_rings - dhdp->bus->max_cmn_rings;
309
+ uint8 max_multi_client_flow_rings = total_tx_flow_rings - reserved_infra_sta_flow_rings;
310
+ return max_multi_client_flow_rings;
267311 }
268312
269313 /** Initializes data structures of multiple flow rings */
....@@ -284,7 +328,7 @@
284328
285329 /* Construct a 16bit flowid allocator */
286330 flowid_allocator = id16_map_init(dhdp->osh,
287
- num_flow_rings - FLOW_RING_COMMON, FLOWID_RESERVED);
331
+ num_flow_rings - dhdp->bus->max_cmn_rings, FLOWID_RESERVED);
288332 if (flowid_allocator == NULL) {
289333 DHD_ERROR(("%s: flowid allocator init failure\n", __FUNCTION__));
290334 return BCME_NOMEM;
....@@ -306,6 +350,9 @@
306350 flow_ring_table[idx].status = FLOW_RING_STATUS_CLOSED;
307351 flow_ring_table[idx].flowid = (uint16)idx;
308352 flow_ring_table[idx].lock = dhd_os_spin_lock_init(dhdp->osh);
353
+#ifdef IDLE_TX_FLOW_MGMT
354
+ flow_ring_table[idx].last_active_ts = OSL_SYSUPTIME();
355
+#endif /* IDLE_TX_FLOW_MGMT */
309356 if (flow_ring_table[idx].lock == NULL) {
310357 DHD_ERROR(("%s: Failed to init spinlock for queue!\n", __FUNCTION__));
311358 goto fail;
....@@ -346,9 +393,13 @@
346393
347394 dhdp->flow_prio_map_type = DHD_FLOW_PRIO_AC_MAP;
348395 bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
396
+
397
+ dhdp->max_multi_client_flow_rings = dhd_get_max_multi_client_flow_rings(dhdp);
398
+ dhdp->multi_client_flow_rings = 0U;
399
+
349400 #ifdef DHD_LOSSLESS_ROAMING
350401 dhdp->dequeue_prec_map = ALLPRIO;
351
-#endif
402
+#endif // endif
352403 /* Now populate into dhd pub */
353404 DHD_FLOWID_LOCK(lock, flags);
354405 dhdp->num_flow_rings = num_flow_rings;
....@@ -445,6 +496,9 @@
445496 dhdp->num_flow_rings = 0U;
446497 bzero(dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
447498
499
+ dhdp->max_multi_client_flow_rings = 0U;
500
+ dhdp->multi_client_flow_rings = 0U;
501
+
448502 lock = dhdp->flowid_lock;
449503 dhdp->flowid_lock = NULL;
450504
....@@ -474,13 +528,20 @@
474528 #ifdef WLTDLS
475529 bool is_tdls_destination(dhd_pub_t *dhdp, uint8 *da)
476530 {
477
- tdls_peer_node_t *cur = dhdp->peer_tbl.node;
531
+ unsigned long flags;
532
+ tdls_peer_node_t *cur = NULL;
533
+
534
+ DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
535
+ cur = dhdp->peer_tbl.node;
536
+
478537 while (cur != NULL) {
479538 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
539
+ DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
480540 return TRUE;
481541 }
482542 cur = cur->next;
483543 }
544
+ DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
484545 return FALSE;
485546 }
486547 #endif /* WLTDLS */
....@@ -495,13 +556,16 @@
495556 if_flow_lkup_t *if_flow_lkup;
496557 unsigned long flags;
497558
559
+ ASSERT(ifindex < DHD_MAX_IFS);
560
+ if (ifindex >= DHD_MAX_IFS)
561
+ return FLOWID_INVALID;
562
+
498563 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
499564 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
500565
501566 ASSERT(if_flow_lkup);
502567
503
- if ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) ||
504
- (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_WDS)) {
568
+ if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) {
505569 #ifdef WLTDLS
506570 if (dhdp->peer_tbl.tdls_peer_count && !(ETHER_ISMULTI(da)) &&
507571 is_tdls_destination(dhdp, da)) {
....@@ -561,7 +625,11 @@
561625 uint16 flowid;
562626 unsigned long flags;
563627
564
- fl_hash_node = (flow_hash_info_t *) MALLOC(dhdp->osh, sizeof(flow_hash_info_t));
628
+ fl_hash_node = (flow_hash_info_t *) MALLOCZ(dhdp->osh, sizeof(flow_hash_info_t));
629
+ if (fl_hash_node == NULL) {
630
+ DHD_ERROR(("%s: flow_hash_info_t memory allocation failed \n", __FUNCTION__));
631
+ return FLOWID_INVALID;
632
+ }
565633 memcpy(fl_hash_node->flow_info.da, da, sizeof(fl_hash_node->flow_info.da));
566634
567635 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
....@@ -571,7 +639,7 @@
571639
572640 if (flowid == FLOWID_INVALID) {
573641 MFREE(dhdp->osh, fl_hash_node, sizeof(flow_hash_info_t));
574
- DHD_ERROR(("%s: cannot get free flowid \n", __FUNCTION__));
642
+ DHD_ERROR_RLMT(("%s: cannot get free flowid \n", __FUNCTION__));
575643 return FLOWID_INVALID;
576644 }
577645
....@@ -583,9 +651,8 @@
583651 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
584652 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
585653
586
- if ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) ||
587
- (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_WDS)) {
588
- /* For STA non TDLS dest and WDS dest we allocate entry based on prio only */
654
+ if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) {
655
+ /* For STA/GC non TDLS dest and WDS dest we allocate entry based on prio only */
589656 #ifdef WLTDLS
590657 if (dhdp->peer_tbl.tdls_peer_count &&
591658 (is_tdls_destination(dhdp, da))) {
....@@ -619,6 +686,15 @@
619686
620687 DHD_INFO(("%s: allocated flowid %d\n", __FUNCTION__, fl_hash_node->flowid));
621688
689
+ if (fl_hash_node->flowid >= dhdp->num_flow_rings) {
690
+ DHD_ERROR(("%s: flowid=%d num_flow_rings=%d ifindex=%d prio=%d role=%d\n",
691
+ __FUNCTION__, fl_hash_node->flowid, dhdp->num_flow_rings,
692
+ ifindex, prio, if_flow_lkup[ifindex].role));
693
+ dhd_prhex("da", (uchar *)da, ETHER_ADDR_LEN, DHD_ERROR_VAL);
694
+ dhd_prhex("sa", (uchar *)sa, ETHER_ADDR_LEN, DHD_ERROR_VAL);
695
+ return FLOWID_INVALID;
696
+ }
697
+
622698 return fl_hash_node->flowid;
623699 } /* dhd_flowid_alloc */
624700
....@@ -633,36 +709,67 @@
633709 unsigned long flags;
634710 int ret;
635711
636
- DHD_INFO(("%s\n", __FUNCTION__));
712
+ DHD_TRACE(("%s\n", __FUNCTION__));
637713
638714 if (!dhdp->flow_ring_table) {
639715 return BCME_ERROR;
640716 }
717
+
718
+ ASSERT(ifindex < DHD_MAX_IFS);
719
+ if (ifindex >= DHD_MAX_IFS)
720
+ return BCME_BADARG;
641721
642722 flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
643723
644724 id = dhd_flowid_find(dhdp, ifindex, prio, sa, da);
645725
646726 if (id == FLOWID_INVALID) {
647
-
727
+ bool if_role_multi_client;
648728 if_flow_lkup_t *if_flow_lkup;
649729 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
650730
651731 if (!if_flow_lkup[ifindex].status)
652732 return BCME_ERROR;
653733
734
+ /* check role for multi client case */
735
+ if_role_multi_client = DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex);
736
+
737
+ /* Abort Flowring creation if multi client flowrings crossed the threshold */
738
+#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
739
+ if (if_role_multi_client &&
740
+ (dhdp->multi_client_flow_rings >= dhdp->max_multi_client_flow_rings)) {
741
+ DHD_ERROR_RLMT(("%s: Max multi client flow rings reached: %d:%d\n",
742
+ __FUNCTION__, dhdp->multi_client_flow_rings,
743
+ dhdp->max_multi_client_flow_rings));
744
+ return BCME_ERROR;
745
+ }
746
+#endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
747
+
748
+ /* Do not create Flowring if peer is not associated */
749
+#if defined(PCIE_FULL_DONGLE)
750
+ if (if_role_multi_client && !ETHER_ISMULTI(da) &&
751
+ !dhd_sta_associated(dhdp, ifindex, (uint8 *)da)) {
752
+ DHD_ERROR_RLMT(("%s: Skip send pkt without peer addition\n", __FUNCTION__));
753
+ return BCME_ERROR;
754
+ }
755
+#endif /* (linux || LINUX) && PCIE_FULL_DONGLE */
654756
655757 id = dhd_flowid_alloc(dhdp, ifindex, prio, sa, da);
656758 if (id == FLOWID_INVALID) {
657
- DHD_ERROR(("%s: alloc flowid ifindex %u status %u\n",
759
+ DHD_ERROR_RLMT(("%s: alloc flowid ifindex %u status %u\n",
658760 __FUNCTION__, ifindex, if_flow_lkup[ifindex].status));
659761 return BCME_ERROR;
660762 }
661763
764
+ ASSERT(id < dhdp->num_flow_rings);
765
+
766
+ /* Only after flowid alloc, increment multi_client_flow_rings */
767
+ if (if_role_multi_client) {
768
+ dhdp->multi_client_flow_rings++;
769
+ }
770
+
662771 /* register this flowid in dhd_pub */
663772 dhd_add_flowid(dhdp, ifindex, prio, da, id);
664
-
665
- ASSERT(id < dhdp->num_flow_rings);
666773
667774 flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
668775
....@@ -674,12 +781,22 @@
674781 flow_ring_node->flow_info.tid = prio;
675782 flow_ring_node->flow_info.ifindex = ifindex;
676783 flow_ring_node->active = TRUE;
677
- flow_ring_node->status = FLOW_RING_STATUS_PENDING;
784
+ flow_ring_node->status = FLOW_RING_STATUS_CREATE_PENDING;
785
+
786
+#ifdef TX_STATUS_LATENCY_STATS
787
+ flow_ring_node->flow_info.num_tx_status = 0;
788
+ flow_ring_node->flow_info.cum_tx_status_latency = 0;
789
+ flow_ring_node->flow_info.num_tx_pkts = 0;
790
+#endif /* TX_STATUS_LATENCY_STATS */
678791 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
679792
680793 /* Create and inform device about the new flow */
681794 if (dhd_bus_flow_ring_create_request(dhdp->bus, (void *)flow_ring_node)
682795 != BCME_OK) {
796
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
797
+ flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
798
+ flow_ring_node->active = FALSE;
799
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
683800 DHD_ERROR(("%s: create error %d\n", __FUNCTION__, id));
684801 return BCME_ERROR;
685802 }
....@@ -688,7 +805,14 @@
688805 return BCME_OK;
689806 } else {
690807 /* if the Flow id was found in the hash */
691
- ASSERT(id < dhdp->num_flow_rings);
808
+
809
+ if (id >= dhdp->num_flow_rings) {
810
+ DHD_ERROR(("%s: Invalid flow id : %u, num_flow_rings : %u\n",
811
+ __FUNCTION__, id, dhdp->num_flow_rings));
812
+ *flowid = FLOWID_INVALID;
813
+ ASSERT(0);
814
+ return BCME_ERROR;
815
+ }
692816
693817 flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
694818 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
....@@ -709,21 +833,74 @@
709833 * active is made TRUE when a flow_ring_node gets allocated and is made
710834 * FALSE when the flow ring gets removed and does not reflect the True state
711835 * of the Flow ring.
836
+ * In case if IDLE_TX_FLOW_MGMT is defined, we have to handle two more flowring
837
+ * states. If the flow_ring_node's status is FLOW_RING_STATUS_SUSPENDED, the flowid
838
+ * is to be returned and from dhd_bus_txdata, the flowring would be resumed again.
839
+ * The status FLOW_RING_STATUS_RESUME_PENDING, is equivalent to
840
+ * FLOW_RING_STATUS_CREATE_PENDING.
712841 */
713
- if (flow_ring_node->status == FLOW_RING_STATUS_OPEN ||
714
- flow_ring_node->status == FLOW_RING_STATUS_PENDING) {
715
- *flowid = id;
716
- ret = BCME_OK;
717
- } else {
842
+ if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING ||
843
+ flow_ring_node->status == FLOW_RING_STATUS_CLOSED) {
718844 *flowid = FLOWID_INVALID;
719845 ret = BCME_ERROR;
846
+ } else {
847
+ *flowid = id;
848
+ ret = BCME_OK;
720849 }
721850
722851 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
723852 return ret;
724
-
725853 } /* Flow Id found in the hash */
726854 } /* dhd_flowid_lookup */
855
+
856
+int
857
+dhd_flowid_find_by_ifidx(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid)
858
+{
859
+ int hashidx = 0;
860
+ bool found = FALSE;
861
+ flow_hash_info_t *cur;
862
+ if_flow_lkup_t *if_flow_lkup;
863
+ unsigned long flags;
864
+
865
+ if (!dhdp->flow_ring_table) {
866
+ DHD_ERROR(("%s : dhd->flow_ring_table is NULL\n", __FUNCTION__));
867
+ return BCME_ERROR;
868
+ }
869
+
870
+ DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
871
+ if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
872
+ for (hashidx = 0; hashidx < DHD_FLOWRING_HASH_SIZE; hashidx++) {
873
+ cur = if_flow_lkup[ifindex].fl_hash[hashidx];
874
+ if (cur) {
875
+ if (cur->flowid == flowid) {
876
+ found = TRUE;
877
+ }
878
+
879
+ while (!found && cur) {
880
+ if (cur->flowid == flowid) {
881
+ found = TRUE;
882
+ break;
883
+ }
884
+ cur = cur->next;
885
+ }
886
+
887
+ if (found) {
888
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
889
+ return BCME_OK;
890
+ }
891
+ }
892
+ }
893
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
894
+
895
+ return BCME_ERROR;
896
+}
897
+
898
+int
899
+dhd_flowid_debug_create(dhd_pub_t *dhdp, uint8 ifindex,
900
+ uint8 prio, char *sa, char *da, uint16 *flowid)
901
+{
902
+ return dhd_flowid_lookup(dhdp, ifindex, prio, sa, da, flowid);
903
+}
727904
728905 /**
729906 * Assign existing or newly created flowid to an 802.3 packet. This flowid is later on used to
....@@ -734,7 +911,7 @@
734911 {
735912 uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
736913 struct ether_header *eh = (struct ether_header *)pktdata;
737
- uint16 flowid;
914
+ uint16 flowid = 0;
738915
739916 ASSERT(ifindex < DHD_MAX_IFS);
740917
....@@ -747,7 +924,7 @@
747924 return BCME_ERROR;
748925 }
749926
750
- if (dhd_flowid_lookup(dhdp, ifindex, prio, eh->ether_shost, eh->ether_dhost,
927
+ if (dhd_flowid_lookup(dhdp, ifindex, prio, (char *)eh->ether_shost, (char *)eh->ether_dhost,
751928 &flowid) != BCME_OK) {
752929 return BCME_ERROR;
753930 }
....@@ -767,9 +944,16 @@
767944 flow_hash_info_t *cur, *prev;
768945 if_flow_lkup_t *if_flow_lkup;
769946 unsigned long flags;
947
+ bool if_role_multi_client;
948
+
949
+ ASSERT(ifindex < DHD_MAX_IFS);
950
+ if (ifindex >= DHD_MAX_IFS)
951
+ return;
770952
771953 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
772954 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
955
+
956
+ if_role_multi_client = DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex);
773957
774958 for (hashix = 0; hashix < DHD_FLOWRING_HASH_SIZE; hashix++) {
775959
....@@ -796,6 +980,11 @@
796980 prev->next = cur->next;
797981 }
798982
983
+ /* Decrement multi_client_flow_rings */
984
+ if (if_role_multi_client) {
985
+ dhdp->multi_client_flow_rings--;
986
+ }
987
+
799988 /* deregister flowid from dhd_pub. */
800989 dhd_del_flowid(dhdp, ifindex, flowid);
801990
....@@ -814,11 +1003,37 @@
8141003 } /* dhd_flowid_free */
8151004
8161005 /**
817
- * Delete all Flow rings associated with the given interface. Is called when e.g. the dongle
1006
+ * Delete all Flow rings associated with the given interface. Is called when eg the dongle
8181007 * indicates that a wireless link has gone down.
8191008 */
8201009 void
8211010 dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex)
1011
+{
1012
+ uint32 id;
1013
+ flow_ring_table_t *flow_ring_table;
1014
+
1015
+ DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex));
1016
+
1017
+ ASSERT(ifindex < DHD_MAX_IFS);
1018
+ if (ifindex >= DHD_MAX_IFS)
1019
+ return;
1020
+
1021
+ if (!dhdp->flow_ring_table)
1022
+ return;
1023
+
1024
+ flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
1025
+ for (id = 0; id < dhdp->num_flow_rings; id++) {
1026
+ if (flow_ring_table[id].active &&
1027
+ (flow_ring_table[id].flow_info.ifindex == ifindex) &&
1028
+ (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
1029
+ dhd_bus_flow_ring_delete_request(dhdp->bus,
1030
+ (void *) &flow_ring_table[id]);
1031
+ }
1032
+ }
1033
+}
1034
+
1035
+void
1036
+dhd_flow_rings_flush(dhd_pub_t *dhdp, uint8 ifindex)
8221037 {
8231038 uint32 id;
8241039 flow_ring_table_t *flow_ring_table;
....@@ -831,12 +1046,13 @@
8311046
8321047 if (!dhdp->flow_ring_table)
8331048 return;
834
-
8351049 flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
1050
+
8361051 for (id = 0; id < dhdp->num_flow_rings; id++) {
8371052 if (flow_ring_table[id].active &&
838
- (flow_ring_table[id].flow_info.ifindex == ifindex)) {
839
- dhd_bus_flow_ring_delete_request(dhdp->bus,
1053
+ (flow_ring_table[id].flow_info.ifindex == ifindex) &&
1054
+ (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
1055
+ dhd_bus_flow_ring_flush_request(dhdp->bus,
8401056 (void *) &flow_ring_table[id]);
8411057 }
8421058 }
....@@ -860,11 +1076,21 @@
8601076
8611077 flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
8621078 for (id = 0; id < dhdp->num_flow_rings; id++) {
1079
+ /*
1080
+ * Send flowring delete request even if flowring status is
1081
+ * FLOW_RING_STATUS_CREATE_PENDING, to handle cases where DISASSOC_IND
1082
+ * event comes ahead of flowring create response.
1083
+ * Otherwise the flowring will not be deleted later as there will not be any
1084
+ * DISASSOC_IND event. With this change, when create response event comes to DHD,
1085
+ * it will change the status to FLOW_RING_STATUS_OPEN and soon delete response
1086
+ * event will come, upon which DHD will delete the flowring.
1087
+ */
8631088 if (flow_ring_table[id].active &&
8641089 (flow_ring_table[id].flow_info.ifindex == ifindex) &&
8651090 (!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) &&
866
- (flow_ring_table[id].status != FLOW_RING_STATUS_DELETE_PENDING)) {
867
- DHD_INFO(("%s: deleting flowid %d\n",
1091
+ ((flow_ring_table[id].status == FLOW_RING_STATUS_OPEN) ||
1092
+ (flow_ring_table[id].status == FLOW_RING_STATUS_CREATE_PENDING))) {
1093
+ DHD_ERROR(("%s: deleting flowid %d\n",
8681094 __FUNCTION__, flow_ring_table[id].flowid));
8691095 dhd_bus_flow_ring_delete_request(dhdp->bus,
8701096 (void *) &flow_ring_table[id]);
....@@ -898,14 +1124,19 @@
8981124
8991125 if_flow_lkup[ifindex].role = role;
9001126
901
- if (role != WLC_E_IF_ROLE_STA) {
902
- /* Flowrings has to be created for WDS and DWDS when interface is created */
1127
+ if (role == WLC_E_IF_ROLE_WDS) {
1128
+ /**
1129
+ * WDS role does not send WLC_E_LINK event after interface is up.
1130
+ * So to create flowrings for WDS, make status as TRUE in WLC_E_IF itself.
1131
+ * same is true while making the status as FALSE.
1132
+ * TODO: Fix FW to send WLC_E_LINK for WDS role aswell. So that all the
1133
+ * interfaces are handled uniformly.
1134
+ */
9031135 if_flow_lkup[ifindex].status = TRUE;
9041136 DHD_INFO(("%s: Mcast Flow ring for ifindex %d role is %d \n",
9051137 __FUNCTION__, ifindex, role));
906
- /* Create Mcast Flow */
9071138 }
908
- } else if (op == WLC_E_IF_DEL) {
1139
+ } else if ((op == WLC_E_IF_DEL) && (role == WLC_E_IF_ROLE_WDS)) {
9091140 if_flow_lkup[ifindex].status = FALSE;
9101141 DHD_INFO(("%s: cleanup all Flow rings for ifindex %d role is %d \n",
9111142 __FUNCTION__, ifindex, role));
....@@ -929,12 +1160,12 @@
9291160 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
9301161 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
9311162
932
- if (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) {
933
- if (status)
934
- if_flow_lkup[ifindex].status = TRUE;
935
- else
936
- if_flow_lkup[ifindex].status = FALSE;
1163
+ if (status) {
1164
+ if_flow_lkup[ifindex].status = TRUE;
1165
+ } else {
1166
+ if_flow_lkup[ifindex].status = FALSE;
9371167 }
1168
+
9381169 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
9391170
9401171 return BCME_OK;
....@@ -971,6 +1202,8 @@
9711202 else
9721203 bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
9731204
1205
+ dhdp->max_multi_client_flow_rings = dhd_get_max_multi_client_flow_rings(dhdp);
1206
+
9741207 return BCME_OK;
9751208 }
9761209
....@@ -978,8 +1211,13 @@
9781211 int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set)
9791212 {
9801213 uint8 iovbuf[24];
1214
+ int len;
9811215 if (!set) {
982
- bcm_mkiovar("bus:fl_prio_map", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
1216
+ memset(&iovbuf, 0, sizeof(iovbuf));
1217
+ len = bcm_mkiovar("bus:fl_prio_map", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
1218
+ if (len == 0) {
1219
+ return BCME_BUFTOOSHORT;
1220
+ }
9831221 if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) {
9841222 DHD_ERROR(("%s: failed to get fl_prio_map\n", __FUNCTION__));
9851223 return BCME_ERROR;
....@@ -987,8 +1225,11 @@
9871225 *map = iovbuf[0];
9881226 return BCME_OK;
9891227 }
990
- bcm_mkiovar("bus:fl_prio_map", (char *)map, 4, (char*)iovbuf, sizeof(iovbuf));
991
- if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
1228
+ len = bcm_mkiovar("bus:fl_prio_map", (char *)map, 4, (char*)iovbuf, sizeof(iovbuf));
1229
+ if (len == 0) {
1230
+ return BCME_BUFTOOSHORT;
1231
+ }
1232
+ if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0) < 0) {
9921233 DHD_ERROR(("%s: failed to set fl_prio_map \n",
9931234 __FUNCTION__));
9941235 return BCME_ERROR;