hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
....@@ -1,10 +1,10 @@
11 /*
2
- * Marvell Wireless LAN device driver: 802.11n RX Re-ordering
2
+ * NXP Wireless LAN device driver: 802.11n RX Re-ordering
33 *
4
- * Copyright (C) 2011-2014, Marvell International Ltd.
4
+ * Copyright 2011-2020 NXP
55 *
6
- * This software file (the "File") is distributed by Marvell International
7
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
6
+ * This software file (the "File") is distributed by NXP
7
+ * under the terms of the GNU General Public License Version 2, June 1991
88 * (the "License"). You may use, redistribute and/or modify this File in
99 * accordance with the terms and conditions of the License, a copy of which
1010 * is available by writing to the Free Software Foundation, Inc.,
....@@ -76,7 +76,8 @@
7676 /* This function will process the rx packet and forward it to kernel/upper
7777 * layer.
7878 */
79
-static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
79
+static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv,
80
+ struct sk_buff *payload)
8081 {
8182
8283 int ret;
....@@ -109,27 +110,25 @@
109110 struct mwifiex_rx_reorder_tbl *tbl,
110111 int start_win)
111112 {
113
+ struct sk_buff_head list;
114
+ struct sk_buff *skb;
112115 int pkt_to_send, i;
113
- void *rx_tmp_ptr;
114
- unsigned long flags;
116
+
117
+ __skb_queue_head_init(&list);
118
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
115119
116120 pkt_to_send = (start_win > tbl->start_win) ?
117121 min((start_win - tbl->start_win), tbl->win_size) :
118122 tbl->win_size;
119123
120124 for (i = 0; i < pkt_to_send; ++i) {
121
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
122
- rx_tmp_ptr = NULL;
123125 if (tbl->rx_reorder_ptr[i]) {
124
- rx_tmp_ptr = tbl->rx_reorder_ptr[i];
126
+ skb = tbl->rx_reorder_ptr[i];
127
+ __skb_queue_tail(&list, skb);
125128 tbl->rx_reorder_ptr[i] = NULL;
126129 }
127
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
128
- if (rx_tmp_ptr)
129
- mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
130130 }
131131
132
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
133132 /*
134133 * We don't have a circular buffer, hence use rotation to simulate
135134 * circular buffer
....@@ -140,7 +139,10 @@
140139 }
141140
142141 tbl->start_win = start_win;
143
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
142
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
143
+
144
+ while ((skb = __skb_dequeue(&list)))
145
+ mwifiex_11n_dispatch_pkt(priv, skb);
144146 }
145147
146148 /*
....@@ -155,24 +157,21 @@
155157 mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
156158 struct mwifiex_rx_reorder_tbl *tbl)
157159 {
160
+ struct sk_buff_head list;
161
+ struct sk_buff *skb;
158162 int i, j, xchg;
159
- void *rx_tmp_ptr;
160
- unsigned long flags;
163
+
164
+ __skb_queue_head_init(&list);
165
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
161166
162167 for (i = 0; i < tbl->win_size; ++i) {
163
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
164
- if (!tbl->rx_reorder_ptr[i]) {
165
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
166
- flags);
168
+ if (!tbl->rx_reorder_ptr[i])
167169 break;
168
- }
169
- rx_tmp_ptr = tbl->rx_reorder_ptr[i];
170
+ skb = tbl->rx_reorder_ptr[i];
171
+ __skb_queue_tail(&list, skb);
170172 tbl->rx_reorder_ptr[i] = NULL;
171
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
172
- mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
173173 }
174174
175
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
176175 /*
177176 * We don't have a circular buffer, hence use rotation to simulate
178177 * circular buffer
....@@ -185,7 +184,11 @@
185184 }
186185 }
187186 tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
188
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
187
+
188
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
189
+
190
+ while ((skb = __skb_dequeue(&list)))
191
+ mwifiex_11n_dispatch_pkt(priv, skb);
189192 }
190193
191194 /*
....@@ -198,19 +201,18 @@
198201 mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
199202 struct mwifiex_rx_reorder_tbl *tbl)
200203 {
201
- unsigned long flags;
202204 int start_win;
203205
204206 if (!tbl)
205207 return;
206208
207
- spin_lock_irqsave(&priv->adapter->rx_proc_lock, flags);
209
+ spin_lock_bh(&priv->adapter->rx_proc_lock);
208210 priv->adapter->rx_locked = true;
209211 if (priv->adapter->rx_processing) {
210
- spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags);
212
+ spin_unlock_bh(&priv->adapter->rx_proc_lock);
211213 flush_workqueue(priv->adapter->rx_workqueue);
212214 } else {
213
- spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags);
215
+ spin_unlock_bh(&priv->adapter->rx_proc_lock);
214216 }
215217
216218 start_win = (tbl->start_win + tbl->win_size) & (MAX_TID_VALUE - 1);
....@@ -219,16 +221,16 @@
219221 del_timer_sync(&tbl->timer_context.timer);
220222 tbl->timer_context.timer_is_set = false;
221223
222
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
224
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
223225 list_del(&tbl->list);
224
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
226
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
225227
226228 kfree(tbl->rx_reorder_ptr);
227229 kfree(tbl);
228230
229
- spin_lock_irqsave(&priv->adapter->rx_proc_lock, flags);
231
+ spin_lock_bh(&priv->adapter->rx_proc_lock);
230232 priv->adapter->rx_locked = false;
231
- spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags);
233
+ spin_unlock_bh(&priv->adapter->rx_proc_lock);
232234
233235 }
234236
....@@ -240,17 +242,15 @@
240242 mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
241243 {
242244 struct mwifiex_rx_reorder_tbl *tbl;
243
- unsigned long flags;
244245
245
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
246
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
246247 list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
247248 if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
248
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
249
- flags);
249
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
250250 return tbl;
251251 }
252252 }
253
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
253
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
254254
255255 return NULL;
256256 }
....@@ -261,21 +261,19 @@
261261 void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
262262 {
263263 struct mwifiex_rx_reorder_tbl *tbl, *tmp;
264
- unsigned long flags;
265264
266265 if (!ta)
267266 return;
268267
269
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
268
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
270269 list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
271270 if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
272
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
273
- flags);
271
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
274272 mwifiex_del_rx_reorder_entry(priv, tbl);
275
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
273
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
276274 }
277275 }
278
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
276
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
279277
280278 return;
281279 }
....@@ -289,18 +287,16 @@
289287 {
290288 struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
291289 struct mwifiex_private *priv = ctx->priv;
292
- unsigned long flags;
293290 int i;
294291
295
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
292
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
296293 for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) {
297294 if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
298
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
299
- flags);
295
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
300296 return i;
301297 }
302298 }
303
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
299
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
304300
305301 return -1;
306302 }
....@@ -348,7 +344,6 @@
348344 int i;
349345 struct mwifiex_rx_reorder_tbl *tbl, *new_node;
350346 u16 last_seq = 0;
351
- unsigned long flags;
352347 struct mwifiex_sta_node *node;
353348
354349 /*
....@@ -372,7 +367,7 @@
372367 new_node->init_win = seq_num;
373368 new_node->flags = 0;
374369
375
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
370
+ spin_lock_bh(&priv->sta_list_spinlock);
376371 if (mwifiex_queuing_ra_based(priv)) {
377372 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) {
378373 node = mwifiex_get_sta_entry(priv, ta);
....@@ -386,7 +381,7 @@
386381 else
387382 last_seq = priv->rx_seq[tid];
388383 }
389
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
384
+ spin_unlock_bh(&priv->sta_list_spinlock);
390385
391386 mwifiex_dbg(priv->adapter, INFO,
392387 "info: last_seq=%d start_win=%d\n",
....@@ -403,7 +398,7 @@
403398 new_node->rx_reorder_ptr = kcalloc(win_size, sizeof(void *),
404399 GFP_KERNEL);
405400 if (!new_node->rx_reorder_ptr) {
406
- kfree((u8 *) new_node);
401
+ kfree(new_node);
407402 mwifiex_dbg(priv->adapter, ERROR,
408403 "%s: failed to alloc reorder_ptr\n", __func__);
409404 return;
....@@ -418,9 +413,9 @@
418413 for (i = 0; i < win_size; ++i)
419414 new_node->rx_reorder_ptr[i] = NULL;
420415
421
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
416
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
422417 list_add_tail(&new_node->list, &priv->rx_reorder_tbl_ptr);
423
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
418
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
424419 }
425420
426421 static void
....@@ -476,18 +471,17 @@
476471 u32 rx_win_size = priv->add_ba_param.rx_win_size;
477472 u8 tid;
478473 int win_size;
479
- unsigned long flags;
480474 uint16_t block_ack_param_set;
481475
482476 if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
483477 ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
484478 priv->adapter->is_hw_11ac_capable &&
485479 memcmp(priv->cfg_bssid, cmd_addba_req->peer_mac_addr, ETH_ALEN)) {
486
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
480
+ spin_lock_bh(&priv->sta_list_spinlock);
487481 sta_ptr = mwifiex_get_sta_entry(priv,
488482 cmd_addba_req->peer_mac_addr);
489483 if (!sta_ptr) {
490
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
484
+ spin_unlock_bh(&priv->sta_list_spinlock);
491485 mwifiex_dbg(priv->adapter, ERROR,
492486 "BA setup with unknown TDLS peer %pM!\n",
493487 cmd_addba_req->peer_mac_addr);
....@@ -495,7 +489,7 @@
495489 }
496490 if (sta_ptr->is_11ac_enabled)
497491 rx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE;
498
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
492
+ spin_unlock_bh(&priv->sta_list_spinlock);
499493 }
500494
501495 cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_RSP);
....@@ -682,7 +676,6 @@
682676 struct mwifiex_tx_ba_stream_tbl *ptx_tbl;
683677 struct mwifiex_ra_list_tbl *ra_list;
684678 u8 cleanup_rx_reorder_tbl;
685
- unsigned long flags;
686679 int tid_down;
687680
688681 if (type == TYPE_DELBA_RECEIVE)
....@@ -716,9 +709,9 @@
716709 ra_list->amsdu_in_ampdu = false;
717710 ra_list->ba_status = BA_SETUP_NONE;
718711 }
719
- spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
712
+ spin_lock_bh(&priv->tx_ba_stream_tbl_lock);
720713 mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, ptx_tbl);
721
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
714
+ spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
722715 }
723716 }
724717
....@@ -804,17 +797,16 @@
804797 void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
805798 {
806799 struct mwifiex_rx_reorder_tbl *del_tbl_ptr, *tmp_node;
807
- unsigned long flags;
808800
809
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
801
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
810802 list_for_each_entry_safe(del_tbl_ptr, tmp_node,
811803 &priv->rx_reorder_tbl_ptr, list) {
812
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
804
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
813805 mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
814
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
806
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
815807 }
816808 INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
817
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
809
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
818810
819811 mwifiex_reset_11n_rx_seq_num(priv);
820812 }
....@@ -826,7 +818,6 @@
826818 {
827819 struct mwifiex_private *priv;
828820 struct mwifiex_rx_reorder_tbl *tbl;
829
- unsigned long lock_flags;
830821 int i;
831822
832823 for (i = 0; i < adapter->priv_num; i++) {
....@@ -834,10 +825,10 @@
834825 if (!priv)
835826 continue;
836827
837
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, lock_flags);
828
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
838829 list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
839830 tbl->flags = flags;
840
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, lock_flags);
831
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
841832 }
842833
843834 return;
....@@ -986,8 +977,8 @@
986977 }
987978 }
988979
989
- tlv_buf_left -= (sizeof(*tlv_rxba) + tlv_len);
990
- tmp = (u8 *)tlv_rxba + tlv_len + sizeof(*tlv_rxba);
980
+ tlv_buf_left -= (sizeof(tlv_rxba->header) + tlv_len);
981
+ tmp = (u8 *)tlv_rxba + sizeof(tlv_rxba->header) + tlv_len;
991982 tlv_rxba = (struct mwifiex_ie_types_rxba_sync *)tmp;
992983 }
993984 }