From 6778948f9de86c3cfaf36725a7c87dcff9ba247f Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Mon, 11 Dec 2023 08:20:59 +0000 Subject: [PATCH] kernel_5.10 no rt --- kernel/drivers/net/wireless/marvell/mwifiex/wmm.c | 132 ++++++++++++++++++++----------------------- 1 files changed, 62 insertions(+), 70 deletions(-) diff --git a/kernel/drivers/net/wireless/marvell/mwifiex/wmm.c b/kernel/drivers/net/wireless/marvell/mwifiex/wmm.c index 429ea27..b8f19ca 100644 --- a/kernel/drivers/net/wireless/marvell/mwifiex/wmm.c +++ b/kernel/drivers/net/wireless/marvell/mwifiex/wmm.c @@ -1,10 +1,10 @@ /* - * Marvell Wireless LAN device driver: WMM + * NXP Wireless LAN device driver: WMM * - * Copyright (C) 2011-2014, Marvell International Ltd. + * Copyright 2011-2020 NXP * - * This software file (the "File") is distributed by Marvell International - * Ltd. under the terms of the GNU General Public License Version 2, June 1991 + * This software file (the "File") is distributed by NXP + * under the terms of the GNU General Public License Version 2, June 1991 * (the "License"). You may use, redistribute and/or modify this File in * accordance with the terms and conditions of the License, a copy of which * is available by writing to the Free Software Foundation, Inc., @@ -39,6 +39,21 @@ static bool disable_tx_amsdu; module_param(disable_tx_amsdu, bool, 0644); + +/* This table inverses the tos_to_tid operation to get a priority + * which is in sequential order, and can be compared. + * Use this to compare the priority of two different TIDs. + */ +const u8 tos_to_tid_inv[] = { + 0x02, /* from tos_to_tid[2] = 0 */ + 0x00, /* from tos_to_tid[0] = 1 */ + 0x01, /* from tos_to_tid[1] = 2 */ + 0x03, + 0x04, + 0x05, + 0x06, + 0x07 +}; /* WMM information IE */ static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07, @@ -138,7 +153,6 @@ struct mwifiex_ra_list_tbl *ra_list; struct mwifiex_adapter *adapter = priv->adapter; struct mwifiex_sta_node *node; - unsigned long flags; for (i = 0; i < MAX_NUM_TID; ++i) { @@ -163,7 +177,7 @@ ra_list->is_11n_enabled = IS_11N_ENABLED(priv); } } else { - spin_lock_irqsave(&priv->sta_list_spinlock, flags); + spin_lock_bh(&priv->sta_list_spinlock); node = mwifiex_get_sta_entry(priv, ra); if (node) ra_list->tx_paused = node->tx_pause; @@ -171,7 +185,7 @@ mwifiex_is_sta_11n_enabled(priv, node); if (ra_list->is_11n_enabled) ra_list->max_amsdu = node->max_amsdu; - spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); + spin_unlock_bh(&priv->sta_list_spinlock); } mwifiex_dbg(adapter, DATA, "data: ralist %p: is_11n_enabled=%d\n", @@ -583,11 +597,10 @@ void mwifiex_clean_txrx(struct mwifiex_private *priv) { - unsigned long flags; struct sk_buff *skb, *tmp; mwifiex_11n_cleanup_reorder_tbl(priv); - spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); + spin_lock_bh(&priv->wmm.ra_list_spinlock); mwifiex_wmm_cleanup_queues(priv); mwifiex_11n_delete_all_tx_ba_stream_tbl(priv); @@ -601,7 +614,7 @@ if (priv->adapter->if_ops.clean_pcie_ring && !test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags)) priv->adapter->if_ops.clean_pcie_ring(priv->adapter); - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); + spin_unlock_bh(&priv->wmm.ra_list_spinlock); skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) { skb_unlink(skb, &priv->tdls_txq); @@ -642,10 +655,9 @@ { struct mwifiex_ra_list_tbl *ra_list; u32 pkt_cnt = 0, tx_pkts_queued; - unsigned long flags; int i; - spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); + spin_lock_bh(&priv->wmm.ra_list_spinlock); for (i = 0; i < MAX_NUM_TID; ++i) { ra_list = mwifiex_wmm_get_ralist_node(priv, i, mac); @@ -671,7 +683,7 @@ atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued); atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID); } - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); + spin_unlock_bh(&priv->wmm.ra_list_spinlock); } /* This function updates non-tdls peer ralist tx_pause while @@ -682,10 +694,9 @@ { struct mwifiex_ra_list_tbl *ra_list; u32 pkt_cnt = 0, tx_pkts_queued; - unsigned long flags; int i; - spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); + spin_lock_bh(&priv->wmm.ra_list_spinlock); for (i = 0; i < MAX_NUM_TID; ++i) { list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[i].ra_list, @@ -716,7 +727,7 @@ atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued); atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID); } - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); + spin_unlock_bh(&priv->wmm.ra_list_spinlock); } /* @@ -748,10 +759,9 @@ mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv, const u8 *ra_addr) { struct mwifiex_ra_list_tbl *ra_list; - unsigned long flags; int i; - spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); + spin_lock_bh(&priv->wmm.ra_list_spinlock); for (i = 0; i < MAX_NUM_TID; ++i) { ra_list = mwifiex_wmm_get_ralist_node(priv, i, ra_addr); @@ -767,7 +777,7 @@ list_del(&ra_list->list); kfree(ra_list); } - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); + spin_unlock_bh(&priv->wmm.ra_list_spinlock); } /* @@ -818,7 +828,6 @@ u32 tid; struct mwifiex_ra_list_tbl *ra_list; u8 ra[ETH_ALEN], tid_down; - unsigned long flags; struct list_head list_head; int tdls_status = TDLS_NOT_SETUP; struct ethhdr *eth_hdr = (struct ethhdr *)skb->data; @@ -844,7 +853,7 @@ tid = skb->priority; - spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); + spin_lock_bh(&priv->wmm.ra_list_spinlock); tid_down = mwifiex_wmm_downgrade_tid(priv, tid); @@ -864,8 +873,7 @@ break; case TDLS_SETUP_INPROGRESS: skb_queue_tail(&priv->tdls_txq, skb); - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, - flags); + spin_unlock_bh(&priv->wmm.ra_list_spinlock); return; default: list_head = priv->wmm.tid_tbl_ptr[tid_down].ra_list; @@ -881,7 +889,7 @@ } if (!ra_list) { - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); + spin_unlock_bh(&priv->wmm.ra_list_spinlock); mwifiex_write_data_complete(adapter, skb, 0, -1); return; } @@ -901,7 +909,7 @@ else atomic_inc(&priv->wmm.tx_pkts_queued); - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); + spin_unlock_bh(&priv->wmm.ra_list_spinlock); } /* @@ -1096,7 +1104,6 @@ struct mwifiex_ra_list_tbl *ptr; struct mwifiex_tid_tbl *tid_ptr; atomic_t *hqp; - unsigned long flags_ra; int i, j; /* check the BSS with highest priority first */ @@ -1122,8 +1129,7 @@ hqp = &priv_tmp->wmm.highest_queued_prio; for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) { - spin_lock_irqsave(&priv_tmp->wmm. - ra_list_spinlock, flags_ra); + spin_lock_bh(&priv_tmp->wmm.ra_list_spinlock); tid_ptr = &(priv_tmp)->wmm. tid_tbl_ptr[tos_to_tid[i]]; @@ -1138,9 +1144,7 @@ goto found; } - spin_unlock_irqrestore(&priv_tmp->wmm. - ra_list_spinlock, - flags_ra); + spin_unlock_bh(&priv_tmp->wmm.ra_list_spinlock); } if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) != 0) { @@ -1162,7 +1166,7 @@ /* holds ra_list_spinlock */ if (atomic_read(hqp) > i) atomic_set(hqp, i); - spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags_ra); + spin_unlock_bh(&priv_tmp->wmm.ra_list_spinlock); *priv = priv_tmp; *tid = tos_to_tid[i]; @@ -1186,24 +1190,23 @@ struct mwifiex_adapter *adapter = priv->adapter; struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl; struct mwifiex_tid_tbl *tid_ptr = &priv->wmm.tid_tbl_ptr[tid]; - unsigned long flags; - spin_lock_irqsave(&tbl[priv->bss_priority].bss_prio_lock, flags); + spin_lock_bh(&tbl[priv->bss_priority].bss_prio_lock); /* * dirty trick: we remove 'head' temporarily and reinsert it after * curr bss node. imagine list to stay fixed while head is moved */ list_move(&tbl[priv->bss_priority].bss_prio_head, &tbl[priv->bss_priority].bss_prio_cur->list); - spin_unlock_irqrestore(&tbl[priv->bss_priority].bss_prio_lock, flags); + spin_unlock_bh(&tbl[priv->bss_priority].bss_prio_lock); - spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); + spin_lock_bh(&priv->wmm.ra_list_spinlock); if (mwifiex_is_ralist_valid(priv, ra, tid)) { priv->wmm.packets_out[tid]++; /* same as above */ list_move(&tid_ptr->ra_list, &ra->list); } - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); + spin_unlock_bh(&priv->wmm.ra_list_spinlock); } /* @@ -1240,8 +1243,7 @@ */ static void mwifiex_send_single_packet(struct mwifiex_private *priv, - struct mwifiex_ra_list_tbl *ptr, int ptr_index, - unsigned long ra_list_flags) + struct mwifiex_ra_list_tbl *ptr, int ptr_index) __releases(&priv->wmm.ra_list_spinlock) { struct sk_buff *skb, *skb_next; @@ -1250,8 +1252,7 @@ struct mwifiex_txinfo *tx_info; if (skb_queue_empty(&ptr->skb_head)) { - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, - ra_list_flags); + spin_unlock_bh(&priv->wmm.ra_list_spinlock); mwifiex_dbg(adapter, DATA, "data: nothing to send\n"); return; } @@ -1269,18 +1270,17 @@ else skb_next = NULL; - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); + spin_unlock_bh(&priv->wmm.ra_list_spinlock); tx_param.next_pkt_len = ((skb_next) ? skb_next->len + sizeof(struct txpd) : 0); if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) { /* Queue the packet back at the head */ - spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); + spin_lock_bh(&priv->wmm.ra_list_spinlock); if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) { - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, - ra_list_flags); + spin_unlock_bh(&priv->wmm.ra_list_spinlock); mwifiex_write_data_complete(adapter, skb, 0, -1); return; } @@ -1290,8 +1290,7 @@ ptr->total_pkt_count++; ptr->ba_pkt_count++; tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT; - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, - ra_list_flags); + spin_unlock_bh(&priv->wmm.ra_list_spinlock); } else { mwifiex_rotate_priolists(priv, ptr, ptr_index); atomic_dec(&priv->wmm.tx_pkts_queued); @@ -1327,8 +1326,7 @@ */ static void mwifiex_send_processed_packet(struct mwifiex_private *priv, - struct mwifiex_ra_list_tbl *ptr, int ptr_index, - unsigned long ra_list_flags) + struct mwifiex_ra_list_tbl *ptr, int ptr_index) __releases(&priv->wmm.ra_list_spinlock) { struct mwifiex_tx_param tx_param; @@ -1338,8 +1336,7 @@ struct mwifiex_txinfo *tx_info; if (skb_queue_empty(&ptr->skb_head)) { - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, - ra_list_flags); + spin_unlock_bh(&priv->wmm.ra_list_spinlock); return; } @@ -1347,8 +1344,7 @@ if (adapter->data_sent || adapter->tx_lock_flag) { ptr->total_pkt_count--; - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, - ra_list_flags); + spin_unlock_bh(&priv->wmm.ra_list_spinlock); skb_queue_tail(&adapter->tx_data_q, skb); atomic_dec(&priv->wmm.tx_pkts_queued); atomic_inc(&adapter->tx_queued); @@ -1362,7 +1358,7 @@ tx_info = MWIFIEX_SKB_TXCB(skb); - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); + spin_unlock_bh(&priv->wmm.ra_list_spinlock); tx_param.next_pkt_len = ((skb_next) ? skb_next->len + @@ -1378,11 +1374,10 @@ switch (ret) { case -EBUSY: mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n"); - spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); + spin_lock_bh(&priv->wmm.ra_list_spinlock); if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) { - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, - ra_list_flags); + spin_unlock_bh(&priv->wmm.ra_list_spinlock); mwifiex_write_data_complete(adapter, skb, 0, -1); return; } @@ -1390,8 +1385,7 @@ skb_queue_tail(&ptr->skb_head, skb); tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT; - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, - ra_list_flags); + spin_unlock_bh(&priv->wmm.ra_list_spinlock); break; case -1: mwifiex_dbg(adapter, ERROR, "host_to_card failed: %#x\n", ret); @@ -1408,10 +1402,9 @@ if (ret != -EBUSY) { mwifiex_rotate_priolists(priv, ptr, ptr_index); atomic_dec(&priv->wmm.tx_pkts_queued); - spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); + spin_lock_bh(&priv->wmm.ra_list_spinlock); ptr->total_pkt_count--; - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, - ra_list_flags); + spin_unlock_bh(&priv->wmm.ra_list_spinlock); } } @@ -1427,7 +1420,6 @@ int ptr_index = 0; u8 ra[ETH_ALEN]; int tid_del = 0, tid = 0; - unsigned long flags; ptr = mwifiex_wmm_get_highest_priolist_ptr(adapter, &priv, &ptr_index); if (!ptr) @@ -1437,14 +1429,14 @@ mwifiex_dbg(adapter, DATA, "data: tid=%d\n", tid); - spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); + spin_lock_bh(&priv->wmm.ra_list_spinlock); if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) { - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); + spin_unlock_bh(&priv->wmm.ra_list_spinlock); return -1; } if (mwifiex_is_ptr_processed(priv, ptr)) { - mwifiex_send_processed_packet(priv, ptr, ptr_index, flags); + mwifiex_send_processed_packet(priv, ptr, ptr_index); /* ra_list_spinlock has been freed in mwifiex_send_processed_packet() */ return 0; @@ -1459,12 +1451,12 @@ mwifiex_is_amsdu_allowed(priv, tid) && mwifiex_is_11n_aggragation_possible(priv, ptr, adapter->tx_buf_size)) - mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags); + mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index); /* ra_list_spinlock has been freed in * mwifiex_11n_aggregate_pkt() */ else - mwifiex_send_single_packet(priv, ptr, ptr_index, flags); + mwifiex_send_single_packet(priv, ptr, ptr_index); /* ra_list_spinlock has been freed in * mwifiex_send_single_packet() */ @@ -1485,11 +1477,11 @@ if (mwifiex_is_amsdu_allowed(priv, tid) && mwifiex_is_11n_aggragation_possible(priv, ptr, adapter->tx_buf_size)) - mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags); + mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index); /* ra_list_spinlock has been freed in mwifiex_11n_aggregate_pkt() */ else - mwifiex_send_single_packet(priv, ptr, ptr_index, flags); + mwifiex_send_single_packet(priv, ptr, ptr_index); /* ra_list_spinlock has been freed in mwifiex_send_single_packet() */ } -- Gitblit v1.6.2