| .. | .. |
|---|
| 1 | 1 | /* |
|---|
| 2 | | - * Marvell Wireless LAN device driver: WMM |
|---|
| 2 | + * NXP Wireless LAN device driver: WMM |
|---|
| 3 | 3 | * |
|---|
| 4 | | - * Copyright (C) 2011-2014, Marvell International Ltd. |
|---|
| 4 | + * Copyright 2011-2020 NXP |
|---|
| 5 | 5 | * |
|---|
| 6 | | - * This software file (the "File") is distributed by Marvell International |
|---|
| 7 | | - * Ltd. under the terms of the GNU General Public License Version 2, June 1991 |
|---|
| 6 | + * This software file (the "File") is distributed by NXP |
|---|
| 7 | + * under the terms of the GNU General Public License Version 2, June 1991 |
|---|
| 8 | 8 | * (the "License"). You may use, redistribute and/or modify this File in |
|---|
| 9 | 9 | * accordance with the terms and conditions of the License, a copy of which |
|---|
| 10 | 10 | * is available by writing to the Free Software Foundation, Inc., |
|---|
| .. | .. |
|---|
| 39 | 39 | |
|---|
| 40 | 40 | static bool disable_tx_amsdu; |
|---|
| 41 | 41 | module_param(disable_tx_amsdu, bool, 0644); |
|---|
| 42 | + |
|---|
| 43 | +/* This table inverses the tos_to_tid operation to get a priority |
|---|
| 44 | + * which is in sequential order, and can be compared. |
|---|
| 45 | + * Use this to compare the priority of two different TIDs. |
|---|
| 46 | + */ |
|---|
| 47 | +const u8 tos_to_tid_inv[] = { |
|---|
| 48 | + 0x02, /* from tos_to_tid[2] = 0 */ |
|---|
| 49 | + 0x00, /* from tos_to_tid[0] = 1 */ |
|---|
| 50 | + 0x01, /* from tos_to_tid[1] = 2 */ |
|---|
| 51 | + 0x03, |
|---|
| 52 | + 0x04, |
|---|
| 53 | + 0x05, |
|---|
| 54 | + 0x06, |
|---|
| 55 | + 0x07 |
|---|
| 56 | +}; |
|---|
| 42 | 57 | |
|---|
| 43 | 58 | /* WMM information IE */ |
|---|
| 44 | 59 | static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07, |
|---|
| .. | .. |
|---|
| 138 | 153 | struct mwifiex_ra_list_tbl *ra_list; |
|---|
| 139 | 154 | struct mwifiex_adapter *adapter = priv->adapter; |
|---|
| 140 | 155 | struct mwifiex_sta_node *node; |
|---|
| 141 | | - unsigned long flags; |
|---|
| 142 | 156 | |
|---|
| 143 | 157 | |
|---|
| 144 | 158 | for (i = 0; i < MAX_NUM_TID; ++i) { |
|---|
| .. | .. |
|---|
| 163 | 177 | ra_list->is_11n_enabled = IS_11N_ENABLED(priv); |
|---|
| 164 | 178 | } |
|---|
| 165 | 179 | } else { |
|---|
| 166 | | - spin_lock_irqsave(&priv->sta_list_spinlock, flags); |
|---|
| 180 | + spin_lock_bh(&priv->sta_list_spinlock); |
|---|
| 167 | 181 | node = mwifiex_get_sta_entry(priv, ra); |
|---|
| 168 | 182 | if (node) |
|---|
| 169 | 183 | ra_list->tx_paused = node->tx_pause; |
|---|
| .. | .. |
|---|
| 171 | 185 | mwifiex_is_sta_11n_enabled(priv, node); |
|---|
| 172 | 186 | if (ra_list->is_11n_enabled) |
|---|
| 173 | 187 | ra_list->max_amsdu = node->max_amsdu; |
|---|
| 174 | | - spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); |
|---|
| 188 | + spin_unlock_bh(&priv->sta_list_spinlock); |
|---|
| 175 | 189 | } |
|---|
| 176 | 190 | |
|---|
| 177 | 191 | mwifiex_dbg(adapter, DATA, "data: ralist %p: is_11n_enabled=%d\n", |
|---|
| .. | .. |
|---|
| 583 | 597 | void |
|---|
| 584 | 598 | mwifiex_clean_txrx(struct mwifiex_private *priv) |
|---|
| 585 | 599 | { |
|---|
| 586 | | - unsigned long flags; |
|---|
| 587 | 600 | struct sk_buff *skb, *tmp; |
|---|
| 588 | 601 | |
|---|
| 589 | 602 | mwifiex_11n_cleanup_reorder_tbl(priv); |
|---|
| 590 | | - spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); |
|---|
| 603 | + spin_lock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 591 | 604 | |
|---|
| 592 | 605 | mwifiex_wmm_cleanup_queues(priv); |
|---|
| 593 | 606 | mwifiex_11n_delete_all_tx_ba_stream_tbl(priv); |
|---|
| .. | .. |
|---|
| 601 | 614 | if (priv->adapter->if_ops.clean_pcie_ring && |
|---|
| 602 | 615 | !test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags)) |
|---|
| 603 | 616 | priv->adapter->if_ops.clean_pcie_ring(priv->adapter); |
|---|
| 604 | | - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); |
|---|
| 617 | + spin_unlock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 605 | 618 | |
|---|
| 606 | 619 | skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) { |
|---|
| 607 | 620 | skb_unlink(skb, &priv->tdls_txq); |
|---|
| .. | .. |
|---|
| 642 | 655 | { |
|---|
| 643 | 656 | struct mwifiex_ra_list_tbl *ra_list; |
|---|
| 644 | 657 | u32 pkt_cnt = 0, tx_pkts_queued; |
|---|
| 645 | | - unsigned long flags; |
|---|
| 646 | 658 | int i; |
|---|
| 647 | 659 | |
|---|
| 648 | | - spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); |
|---|
| 660 | + spin_lock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 649 | 661 | |
|---|
| 650 | 662 | for (i = 0; i < MAX_NUM_TID; ++i) { |
|---|
| 651 | 663 | ra_list = mwifiex_wmm_get_ralist_node(priv, i, mac); |
|---|
| .. | .. |
|---|
| 671 | 683 | atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued); |
|---|
| 672 | 684 | atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID); |
|---|
| 673 | 685 | } |
|---|
| 674 | | - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); |
|---|
| 686 | + spin_unlock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 675 | 687 | } |
|---|
| 676 | 688 | |
|---|
| 677 | 689 | /* This function updates non-tdls peer ralist tx_pause while |
|---|
| .. | .. |
|---|
| 682 | 694 | { |
|---|
| 683 | 695 | struct mwifiex_ra_list_tbl *ra_list; |
|---|
| 684 | 696 | u32 pkt_cnt = 0, tx_pkts_queued; |
|---|
| 685 | | - unsigned long flags; |
|---|
| 686 | 697 | int i; |
|---|
| 687 | 698 | |
|---|
| 688 | | - spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); |
|---|
| 699 | + spin_lock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 689 | 700 | |
|---|
| 690 | 701 | for (i = 0; i < MAX_NUM_TID; ++i) { |
|---|
| 691 | 702 | list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[i].ra_list, |
|---|
| .. | .. |
|---|
| 716 | 727 | atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued); |
|---|
| 717 | 728 | atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID); |
|---|
| 718 | 729 | } |
|---|
| 719 | | - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); |
|---|
| 730 | + spin_unlock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 720 | 731 | } |
|---|
| 721 | 732 | |
|---|
| 722 | 733 | /* |
|---|
| .. | .. |
|---|
| 748 | 759 | mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv, const u8 *ra_addr) |
|---|
| 749 | 760 | { |
|---|
| 750 | 761 | struct mwifiex_ra_list_tbl *ra_list; |
|---|
| 751 | | - unsigned long flags; |
|---|
| 752 | 762 | int i; |
|---|
| 753 | 763 | |
|---|
| 754 | | - spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); |
|---|
| 764 | + spin_lock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 755 | 765 | |
|---|
| 756 | 766 | for (i = 0; i < MAX_NUM_TID; ++i) { |
|---|
| 757 | 767 | ra_list = mwifiex_wmm_get_ralist_node(priv, i, ra_addr); |
|---|
| .. | .. |
|---|
| 767 | 777 | list_del(&ra_list->list); |
|---|
| 768 | 778 | kfree(ra_list); |
|---|
| 769 | 779 | } |
|---|
| 770 | | - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); |
|---|
| 780 | + spin_unlock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 771 | 781 | } |
|---|
| 772 | 782 | |
|---|
| 773 | 783 | /* |
|---|
| .. | .. |
|---|
| 818 | 828 | u32 tid; |
|---|
| 819 | 829 | struct mwifiex_ra_list_tbl *ra_list; |
|---|
| 820 | 830 | u8 ra[ETH_ALEN], tid_down; |
|---|
| 821 | | - unsigned long flags; |
|---|
| 822 | 831 | struct list_head list_head; |
|---|
| 823 | 832 | int tdls_status = TDLS_NOT_SETUP; |
|---|
| 824 | 833 | struct ethhdr *eth_hdr = (struct ethhdr *)skb->data; |
|---|
| .. | .. |
|---|
| 844 | 853 | |
|---|
| 845 | 854 | tid = skb->priority; |
|---|
| 846 | 855 | |
|---|
| 847 | | - spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); |
|---|
| 856 | + spin_lock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 848 | 857 | |
|---|
| 849 | 858 | tid_down = mwifiex_wmm_downgrade_tid(priv, tid); |
|---|
| 850 | 859 | |
|---|
| .. | .. |
|---|
| 864 | 873 | break; |
|---|
| 865 | 874 | case TDLS_SETUP_INPROGRESS: |
|---|
| 866 | 875 | skb_queue_tail(&priv->tdls_txq, skb); |
|---|
| 867 | | - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, |
|---|
| 868 | | - flags); |
|---|
| 876 | + spin_unlock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 869 | 877 | return; |
|---|
| 870 | 878 | default: |
|---|
| 871 | 879 | list_head = priv->wmm.tid_tbl_ptr[tid_down].ra_list; |
|---|
| .. | .. |
|---|
| 881 | 889 | } |
|---|
| 882 | 890 | |
|---|
| 883 | 891 | if (!ra_list) { |
|---|
| 884 | | - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); |
|---|
| 892 | + spin_unlock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 885 | 893 | mwifiex_write_data_complete(adapter, skb, 0, -1); |
|---|
| 886 | 894 | return; |
|---|
| 887 | 895 | } |
|---|
| .. | .. |
|---|
| 901 | 909 | else |
|---|
| 902 | 910 | atomic_inc(&priv->wmm.tx_pkts_queued); |
|---|
| 903 | 911 | |
|---|
| 904 | | - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); |
|---|
| 912 | + spin_unlock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 905 | 913 | } |
|---|
| 906 | 914 | |
|---|
| 907 | 915 | /* |
|---|
| .. | .. |
|---|
| 1096 | 1104 | struct mwifiex_ra_list_tbl *ptr; |
|---|
| 1097 | 1105 | struct mwifiex_tid_tbl *tid_ptr; |
|---|
| 1098 | 1106 | atomic_t *hqp; |
|---|
| 1099 | | - unsigned long flags_ra; |
|---|
| 1100 | 1107 | int i, j; |
|---|
| 1101 | 1108 | |
|---|
| 1102 | 1109 | /* check the BSS with highest priority first */ |
|---|
| .. | .. |
|---|
| 1122 | 1129 | hqp = &priv_tmp->wmm.highest_queued_prio; |
|---|
| 1123 | 1130 | for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) { |
|---|
| 1124 | 1131 | |
|---|
| 1125 | | - spin_lock_irqsave(&priv_tmp->wmm. |
|---|
| 1126 | | - ra_list_spinlock, flags_ra); |
|---|
| 1132 | + spin_lock_bh(&priv_tmp->wmm.ra_list_spinlock); |
|---|
| 1127 | 1133 | |
|---|
| 1128 | 1134 | tid_ptr = &(priv_tmp)->wmm. |
|---|
| 1129 | 1135 | tid_tbl_ptr[tos_to_tid[i]]; |
|---|
| .. | .. |
|---|
| 1138 | 1144 | goto found; |
|---|
| 1139 | 1145 | } |
|---|
| 1140 | 1146 | |
|---|
| 1141 | | - spin_unlock_irqrestore(&priv_tmp->wmm. |
|---|
| 1142 | | - ra_list_spinlock, |
|---|
| 1143 | | - flags_ra); |
|---|
| 1147 | + spin_unlock_bh(&priv_tmp->wmm.ra_list_spinlock); |
|---|
| 1144 | 1148 | } |
|---|
| 1145 | 1149 | |
|---|
| 1146 | 1150 | if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) != 0) { |
|---|
| .. | .. |
|---|
| 1162 | 1166 | /* holds ra_list_spinlock */ |
|---|
| 1163 | 1167 | if (atomic_read(hqp) > i) |
|---|
| 1164 | 1168 | atomic_set(hqp, i); |
|---|
| 1165 | | - spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags_ra); |
|---|
| 1169 | + spin_unlock_bh(&priv_tmp->wmm.ra_list_spinlock); |
|---|
| 1166 | 1170 | |
|---|
| 1167 | 1171 | *priv = priv_tmp; |
|---|
| 1168 | 1172 | *tid = tos_to_tid[i]; |
|---|
| .. | .. |
|---|
| 1186 | 1190 | struct mwifiex_adapter *adapter = priv->adapter; |
|---|
| 1187 | 1191 | struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl; |
|---|
| 1188 | 1192 | struct mwifiex_tid_tbl *tid_ptr = &priv->wmm.tid_tbl_ptr[tid]; |
|---|
| 1189 | | - unsigned long flags; |
|---|
| 1190 | 1193 | |
|---|
| 1191 | | - spin_lock_irqsave(&tbl[priv->bss_priority].bss_prio_lock, flags); |
|---|
| 1194 | + spin_lock_bh(&tbl[priv->bss_priority].bss_prio_lock); |
|---|
| 1192 | 1195 | /* |
|---|
| 1193 | 1196 | * dirty trick: we remove 'head' temporarily and reinsert it after |
|---|
| 1194 | 1197 | * curr bss node. imagine list to stay fixed while head is moved |
|---|
| 1195 | 1198 | */ |
|---|
| 1196 | 1199 | list_move(&tbl[priv->bss_priority].bss_prio_head, |
|---|
| 1197 | 1200 | &tbl[priv->bss_priority].bss_prio_cur->list); |
|---|
| 1198 | | - spin_unlock_irqrestore(&tbl[priv->bss_priority].bss_prio_lock, flags); |
|---|
| 1201 | + spin_unlock_bh(&tbl[priv->bss_priority].bss_prio_lock); |
|---|
| 1199 | 1202 | |
|---|
| 1200 | | - spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); |
|---|
| 1203 | + spin_lock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 1201 | 1204 | if (mwifiex_is_ralist_valid(priv, ra, tid)) { |
|---|
| 1202 | 1205 | priv->wmm.packets_out[tid]++; |
|---|
| 1203 | 1206 | /* same as above */ |
|---|
| 1204 | 1207 | list_move(&tid_ptr->ra_list, &ra->list); |
|---|
| 1205 | 1208 | } |
|---|
| 1206 | | - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); |
|---|
| 1209 | + spin_unlock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 1207 | 1210 | } |
|---|
| 1208 | 1211 | |
|---|
| 1209 | 1212 | /* |
|---|
| .. | .. |
|---|
| 1240 | 1243 | */ |
|---|
| 1241 | 1244 | static void |
|---|
| 1242 | 1245 | mwifiex_send_single_packet(struct mwifiex_private *priv, |
|---|
| 1243 | | - struct mwifiex_ra_list_tbl *ptr, int ptr_index, |
|---|
| 1244 | | - unsigned long ra_list_flags) |
|---|
| 1246 | + struct mwifiex_ra_list_tbl *ptr, int ptr_index) |
|---|
| 1245 | 1247 | __releases(&priv->wmm.ra_list_spinlock) |
|---|
| 1246 | 1248 | { |
|---|
| 1247 | 1249 | struct sk_buff *skb, *skb_next; |
|---|
| .. | .. |
|---|
| 1250 | 1252 | struct mwifiex_txinfo *tx_info; |
|---|
| 1251 | 1253 | |
|---|
| 1252 | 1254 | if (skb_queue_empty(&ptr->skb_head)) { |
|---|
| 1253 | | - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, |
|---|
| 1254 | | - ra_list_flags); |
|---|
| 1255 | + spin_unlock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 1255 | 1256 | mwifiex_dbg(adapter, DATA, "data: nothing to send\n"); |
|---|
| 1256 | 1257 | return; |
|---|
| 1257 | 1258 | } |
|---|
| .. | .. |
|---|
| 1269 | 1270 | else |
|---|
| 1270 | 1271 | skb_next = NULL; |
|---|
| 1271 | 1272 | |
|---|
| 1272 | | - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); |
|---|
| 1273 | + spin_unlock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 1273 | 1274 | |
|---|
| 1274 | 1275 | tx_param.next_pkt_len = ((skb_next) ? skb_next->len + |
|---|
| 1275 | 1276 | sizeof(struct txpd) : 0); |
|---|
| 1276 | 1277 | |
|---|
| 1277 | 1278 | if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) { |
|---|
| 1278 | 1279 | /* Queue the packet back at the head */ |
|---|
| 1279 | | - spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); |
|---|
| 1280 | + spin_lock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 1280 | 1281 | |
|---|
| 1281 | 1282 | if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) { |
|---|
| 1282 | | - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, |
|---|
| 1283 | | - ra_list_flags); |
|---|
| 1283 | + spin_unlock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 1284 | 1284 | mwifiex_write_data_complete(adapter, skb, 0, -1); |
|---|
| 1285 | 1285 | return; |
|---|
| 1286 | 1286 | } |
|---|
| .. | .. |
|---|
| 1290 | 1290 | ptr->total_pkt_count++; |
|---|
| 1291 | 1291 | ptr->ba_pkt_count++; |
|---|
| 1292 | 1292 | tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT; |
|---|
| 1293 | | - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, |
|---|
| 1294 | | - ra_list_flags); |
|---|
| 1293 | + spin_unlock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 1295 | 1294 | } else { |
|---|
| 1296 | 1295 | mwifiex_rotate_priolists(priv, ptr, ptr_index); |
|---|
| 1297 | 1296 | atomic_dec(&priv->wmm.tx_pkts_queued); |
|---|
| .. | .. |
|---|
| 1327 | 1326 | */ |
|---|
| 1328 | 1327 | static void |
|---|
| 1329 | 1328 | mwifiex_send_processed_packet(struct mwifiex_private *priv, |
|---|
| 1330 | | - struct mwifiex_ra_list_tbl *ptr, int ptr_index, |
|---|
| 1331 | | - unsigned long ra_list_flags) |
|---|
| 1329 | + struct mwifiex_ra_list_tbl *ptr, int ptr_index) |
|---|
| 1332 | 1330 | __releases(&priv->wmm.ra_list_spinlock) |
|---|
| 1333 | 1331 | { |
|---|
| 1334 | 1332 | struct mwifiex_tx_param tx_param; |
|---|
| .. | .. |
|---|
| 1338 | 1336 | struct mwifiex_txinfo *tx_info; |
|---|
| 1339 | 1337 | |
|---|
| 1340 | 1338 | if (skb_queue_empty(&ptr->skb_head)) { |
|---|
| 1341 | | - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, |
|---|
| 1342 | | - ra_list_flags); |
|---|
| 1339 | + spin_unlock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 1343 | 1340 | return; |
|---|
| 1344 | 1341 | } |
|---|
| 1345 | 1342 | |
|---|
| .. | .. |
|---|
| 1347 | 1344 | |
|---|
| 1348 | 1345 | if (adapter->data_sent || adapter->tx_lock_flag) { |
|---|
| 1349 | 1346 | ptr->total_pkt_count--; |
|---|
| 1350 | | - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, |
|---|
| 1351 | | - ra_list_flags); |
|---|
| 1347 | + spin_unlock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 1352 | 1348 | skb_queue_tail(&adapter->tx_data_q, skb); |
|---|
| 1353 | 1349 | atomic_dec(&priv->wmm.tx_pkts_queued); |
|---|
| 1354 | 1350 | atomic_inc(&adapter->tx_queued); |
|---|
| .. | .. |
|---|
| 1362 | 1358 | |
|---|
| 1363 | 1359 | tx_info = MWIFIEX_SKB_TXCB(skb); |
|---|
| 1364 | 1360 | |
|---|
| 1365 | | - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); |
|---|
| 1361 | + spin_unlock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 1366 | 1362 | |
|---|
| 1367 | 1363 | tx_param.next_pkt_len = |
|---|
| 1368 | 1364 | ((skb_next) ? skb_next->len + |
|---|
| .. | .. |
|---|
| 1378 | 1374 | switch (ret) { |
|---|
| 1379 | 1375 | case -EBUSY: |
|---|
| 1380 | 1376 | mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n"); |
|---|
| 1381 | | - spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); |
|---|
| 1377 | + spin_lock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 1382 | 1378 | |
|---|
| 1383 | 1379 | if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) { |
|---|
| 1384 | | - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, |
|---|
| 1385 | | - ra_list_flags); |
|---|
| 1380 | + spin_unlock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 1386 | 1381 | mwifiex_write_data_complete(adapter, skb, 0, -1); |
|---|
| 1387 | 1382 | return; |
|---|
| 1388 | 1383 | } |
|---|
| .. | .. |
|---|
| 1390 | 1385 | skb_queue_tail(&ptr->skb_head, skb); |
|---|
| 1391 | 1386 | |
|---|
| 1392 | 1387 | tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT; |
|---|
| 1393 | | - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, |
|---|
| 1394 | | - ra_list_flags); |
|---|
| 1388 | + spin_unlock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 1395 | 1389 | break; |
|---|
| 1396 | 1390 | case -1: |
|---|
| 1397 | 1391 | mwifiex_dbg(adapter, ERROR, "host_to_card failed: %#x\n", ret); |
|---|
| .. | .. |
|---|
| 1408 | 1402 | if (ret != -EBUSY) { |
|---|
| 1409 | 1403 | mwifiex_rotate_priolists(priv, ptr, ptr_index); |
|---|
| 1410 | 1404 | atomic_dec(&priv->wmm.tx_pkts_queued); |
|---|
| 1411 | | - spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); |
|---|
| 1405 | + spin_lock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 1412 | 1406 | ptr->total_pkt_count--; |
|---|
| 1413 | | - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, |
|---|
| 1414 | | - ra_list_flags); |
|---|
| 1407 | + spin_unlock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 1415 | 1408 | } |
|---|
| 1416 | 1409 | } |
|---|
| 1417 | 1410 | |
|---|
| .. | .. |
|---|
| 1427 | 1420 | int ptr_index = 0; |
|---|
| 1428 | 1421 | u8 ra[ETH_ALEN]; |
|---|
| 1429 | 1422 | int tid_del = 0, tid = 0; |
|---|
| 1430 | | - unsigned long flags; |
|---|
| 1431 | 1423 | |
|---|
| 1432 | 1424 | ptr = mwifiex_wmm_get_highest_priolist_ptr(adapter, &priv, &ptr_index); |
|---|
| 1433 | 1425 | if (!ptr) |
|---|
| .. | .. |
|---|
| 1437 | 1429 | |
|---|
| 1438 | 1430 | mwifiex_dbg(adapter, DATA, "data: tid=%d\n", tid); |
|---|
| 1439 | 1431 | |
|---|
| 1440 | | - spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); |
|---|
| 1432 | + spin_lock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 1441 | 1433 | if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) { |
|---|
| 1442 | | - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); |
|---|
| 1434 | + spin_unlock_bh(&priv->wmm.ra_list_spinlock); |
|---|
| 1443 | 1435 | return -1; |
|---|
| 1444 | 1436 | } |
|---|
| 1445 | 1437 | |
|---|
| 1446 | 1438 | if (mwifiex_is_ptr_processed(priv, ptr)) { |
|---|
| 1447 | | - mwifiex_send_processed_packet(priv, ptr, ptr_index, flags); |
|---|
| 1439 | + mwifiex_send_processed_packet(priv, ptr, ptr_index); |
|---|
| 1448 | 1440 | /* ra_list_spinlock has been freed in |
|---|
| 1449 | 1441 | mwifiex_send_processed_packet() */ |
|---|
| 1450 | 1442 | return 0; |
|---|
| .. | .. |
|---|
| 1459 | 1451 | mwifiex_is_amsdu_allowed(priv, tid) && |
|---|
| 1460 | 1452 | mwifiex_is_11n_aggragation_possible(priv, ptr, |
|---|
| 1461 | 1453 | adapter->tx_buf_size)) |
|---|
| 1462 | | - mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags); |
|---|
| 1454 | + mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index); |
|---|
| 1463 | 1455 | /* ra_list_spinlock has been freed in |
|---|
| 1464 | 1456 | * mwifiex_11n_aggregate_pkt() |
|---|
| 1465 | 1457 | */ |
|---|
| 1466 | 1458 | else |
|---|
| 1467 | | - mwifiex_send_single_packet(priv, ptr, ptr_index, flags); |
|---|
| 1459 | + mwifiex_send_single_packet(priv, ptr, ptr_index); |
|---|
| 1468 | 1460 | /* ra_list_spinlock has been freed in |
|---|
| 1469 | 1461 | * mwifiex_send_single_packet() |
|---|
| 1470 | 1462 | */ |
|---|
| .. | .. |
|---|
| 1485 | 1477 | if (mwifiex_is_amsdu_allowed(priv, tid) && |
|---|
| 1486 | 1478 | mwifiex_is_11n_aggragation_possible(priv, ptr, |
|---|
| 1487 | 1479 | adapter->tx_buf_size)) |
|---|
| 1488 | | - mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags); |
|---|
| 1480 | + mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index); |
|---|
| 1489 | 1481 | /* ra_list_spinlock has been freed in |
|---|
| 1490 | 1482 | mwifiex_11n_aggregate_pkt() */ |
|---|
| 1491 | 1483 | else |
|---|
| 1492 | | - mwifiex_send_single_packet(priv, ptr, ptr_index, flags); |
|---|
| 1484 | + mwifiex_send_single_packet(priv, ptr, ptr_index); |
|---|
| 1493 | 1485 | /* ra_list_spinlock has been freed in |
|---|
| 1494 | 1486 | mwifiex_send_single_packet() */ |
|---|
| 1495 | 1487 | } |
|---|