hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
....@@ -8,6 +8,7 @@
88 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
99 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
1010 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11
+ * Copyright(c) 2018 - 2020 Intel Corporation
1112 *
1213 * This program is free software; you can redistribute it and/or modify
1314 * it under the terms of version 2 of the GNU General Public License as
....@@ -17,11 +18,6 @@
1718 * WITHOUT ANY WARRANTY; without even the implied warranty of
1819 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1920 * General Public License for more details.
20
- *
21
- * You should have received a copy of the GNU General Public License
22
- * along with this program; if not, write to the Free Software
23
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24
- * USA
2521 *
2622 * The full GNU General Public License is included in this distribution
2723 * in the file called COPYING.
....@@ -35,6 +31,7 @@
3531 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
3632 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
3733 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34
+ * Copyright(c) 2018 - 2020 Intel Corporation
3835 * All rights reserved.
3936 *
4037 * Redistribution and use in source and binary forms, with or without
....@@ -82,14 +79,11 @@
8279 struct iwl_fw_dbg_trigger_tlv *trig;
8380 struct iwl_fw_dbg_trigger_ba *ba_trig;
8481
85
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
82
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA);
83
+ if (!trig)
8684 return;
8785
88
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
8986 ba_trig = (void *)trig->data;
90
-
91
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
92
- return;
9387
9488 if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
9589 return;
....@@ -215,7 +209,9 @@
215209 u16 offload_assist = 0;
216210 u8 ac;
217211
218
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
212
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) ||
213
+ (ieee80211_is_probe_resp(fc) &&
214
+ !is_multicast_ether_addr(hdr->addr1)))
219215 tx_flags |= TX_CMD_FLG_ACK;
220216 else
221217 tx_flags &= ~TX_CMD_FLG_ACK;
....@@ -245,14 +241,18 @@
245241 iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec,
246242 ssn);
247243 } else {
248
- tx_cmd->tid_tspec = IWL_TID_NON_QOS;
244
+ if (ieee80211_is_data(fc))
245
+ tx_cmd->tid_tspec = IWL_TID_NON_QOS;
246
+ else
247
+ tx_cmd->tid_tspec = IWL_MAX_TID_COUNT;
248
+
249249 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
250250 tx_flags |= TX_CMD_FLG_SEQ_CTL;
251251 else
252252 tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
253253 }
254254
255
- /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */
255
+ /* Default to 0 (BE) when tid_spec is set to IWL_MAX_TID_COUNT */
256256 if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
257257 ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
258258 else
....@@ -280,7 +280,7 @@
280280 }
281281
282282 if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
283
- !is_multicast_ether_addr(ieee80211_get_DA(hdr)))
283
+ !is_multicast_ether_addr(hdr->addr1))
284284 tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
285285
286286 if (fw_has_capa(&mvm->fw->ucode_capa,
....@@ -304,13 +304,30 @@
304304 offload_assist));
305305 }
306306
307
+static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm,
308
+ struct ieee80211_tx_info *info,
309
+ struct ieee80211_sta *sta, __le16 fc)
310
+{
311
+ if (info->band == NL80211_BAND_2GHZ &&
312
+ !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
313
+ return mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
314
+
315
+ if (sta && ieee80211_is_data(fc)) {
316
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
317
+
318
+ return BIT(mvmsta->tx_ant) << RATE_MCS_ANT_POS;
319
+ }
320
+
321
+ return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
322
+}
323
+
307324 static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
308325 struct ieee80211_tx_info *info,
309326 struct ieee80211_sta *sta)
310327 {
311328 int rate_idx;
312329 u8 rate_plcp;
313
- u32 rate_flags;
330
+ u32 rate_flags = 0;
314331
315332 /* HT rate doesn't make sense for a non data frame */
316333 WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
....@@ -324,8 +341,11 @@
324341 rate_idx = rate_lowest_index(
325342 &mvm->nvm_data->bands[info->band], sta);
326343
327
- /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
328
- if (info->band == NL80211_BAND_5GHZ)
344
+ /*
345
+ * For non 2 GHZ band, remap mac80211 rate
346
+ * indices into driver indices
347
+ */
348
+ if (info->band != NL80211_BAND_2GHZ)
329349 rate_idx += IWL_FIRST_OFDM_RATE;
330350
331351 /* For 2.4 GHZ band, check that there is no need to remap */
....@@ -334,18 +354,19 @@
334354 /* Get PLCP rate for tx_cmd->rate_n_flags */
335355 rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
336356
337
- if (info->band == NL80211_BAND_2GHZ &&
338
- !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
339
- rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
340
- else
341
- rate_flags =
342
- BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
343
-
344357 /* Set CCK flag as needed */
345358 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
346359 rate_flags |= RATE_MCS_CCK_MSK;
347360
348361 return (u32)rate_plcp | rate_flags;
362
+}
363
+
364
+static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm,
365
+ struct ieee80211_tx_info *info,
366
+ struct ieee80211_sta *sta, __le16 fc)
367
+{
368
+ return iwl_mvm_get_tx_rate(mvm, info, sta) |
369
+ iwl_mvm_get_tx_ant(mvm, info, sta, fc);
349370 }
350371
351372 /*
....@@ -375,20 +396,21 @@
375396 */
376397
377398 if (ieee80211_is_data(fc) && sta) {
378
- tx_cmd->initial_rate_index = 0;
379
- tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
380
- return;
399
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
400
+
401
+ if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) {
402
+ tx_cmd->initial_rate_index = 0;
403
+ tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
404
+ return;
405
+ }
381406 } else if (ieee80211_is_back_req(fc)) {
382407 tx_cmd->tx_flags |=
383408 cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
384409 }
385410
386
- mvm->mgmt_last_antenna_idx =
387
- iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
388
- mvm->mgmt_last_antenna_idx);
389
-
390411 /* Set the rate in the TX cmd */
391
- tx_cmd->rate_n_flags = cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta));
412
+ tx_cmd->rate_n_flags =
413
+ cpu_to_le32(iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc));
392414 }
393415
394416 static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
....@@ -468,13 +490,13 @@
468490 /*
469491 * Allocates and sets the Tx cmd the driver data pointers in the skb
470492 */
471
-static struct iwl_device_cmd *
493
+static struct iwl_device_tx_cmd *
472494 iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
473495 struct ieee80211_tx_info *info, int hdrlen,
474496 struct ieee80211_sta *sta, u8 sta_id)
475497 {
476498 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
477
- struct iwl_device_cmd *dev_cmd;
499
+ struct iwl_device_tx_cmd *dev_cmd;
478500 struct iwl_tx_cmd *tx_cmd;
479501
480502 dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
....@@ -482,17 +504,14 @@
482504 if (unlikely(!dev_cmd))
483505 return NULL;
484506
485
- /* Make sure we zero enough of dev_cmd */
486
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
487
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd));
488
-
489
- memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
490507 dev_cmd->hdr.cmd = TX_CMD;
491508
492509 if (iwl_mvm_has_new_tx_api(mvm)) {
493510 u16 offload_assist = 0;
494511 u32 rate_n_flags = 0;
495512 u16 flags = 0;
513
+ struct iwl_mvm_sta *mvmsta = sta ?
514
+ iwl_mvm_sta_from_mac80211(sta) : NULL;
496515
497516 if (ieee80211_is_data_qos(hdr->frame_control)) {
498517 u8 *qc = ieee80211_get_qos_ctl(hdr);
....@@ -512,14 +531,21 @@
512531 if (!info->control.hw_key)
513532 flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
514533
515
- /* For data packets rate info comes from the fw */
516
- if (!(ieee80211_is_data(hdr->frame_control) && sta)) {
534
+ /*
535
+ * For data packets rate info comes from the fw. Only
536
+ * set rate/antenna during connection establishment or in case
537
+ * no station is given.
538
+ */
539
+ if (!sta || !ieee80211_is_data(hdr->frame_control) ||
540
+ mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
517541 flags |= IWL_TX_FLAGS_CMD_RATE;
518
- rate_n_flags = iwl_mvm_get_tx_rate(mvm, info, sta);
542
+ rate_n_flags =
543
+ iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
544
+ hdr->frame_control);
519545 }
520546
521
- if (mvm->trans->cfg->device_family >=
522
- IWL_DEVICE_FAMILY_22560) {
547
+ if (mvm->trans->trans_cfg->device_family >=
548
+ IWL_DEVICE_FAMILY_AX210) {
523549 struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload;
524550
525551 cmd->offload_assist |= cpu_to_le32(offload_assist);
....@@ -566,7 +592,7 @@
566592 }
567593
568594 static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
569
- struct iwl_device_cmd *cmd)
595
+ struct iwl_device_tx_cmd *cmd)
570596 {
571597 struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
572598
....@@ -577,11 +603,12 @@
577603 }
578604
579605 static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
580
- struct ieee80211_tx_info *info, __le16 fc)
606
+ struct ieee80211_tx_info *info,
607
+ struct ieee80211_hdr *hdr)
581608 {
582
- struct iwl_mvm_vif *mvmvif;
583
-
584
- mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif);
609
+ struct iwl_mvm_vif *mvmvif =
610
+ iwl_mvm_vif_from_mac80211(info->control.vif);
611
+ __le16 fc = hdr->frame_control;
585612
586613 switch (info->control.vif->type) {
587614 case NL80211_IFTYPE_AP:
....@@ -600,7 +627,9 @@
600627 (!ieee80211_is_bufferable_mmpdu(fc) ||
601628 ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
602629 return mvm->probe_queue;
603
- if (info->hw_queue == info->control.vif->cab_queue)
630
+
631
+ if (!ieee80211_has_order(fc) && !ieee80211_is_probe_req(fc) &&
632
+ is_multicast_ether_addr(hdr->addr1))
604633 return mvmvif->cab_queue;
605634
606635 WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
....@@ -609,8 +638,6 @@
609638 case NL80211_IFTYPE_P2P_DEVICE:
610639 if (ieee80211_is_mgmt(fc))
611640 return mvm->p2p_dev_queue;
612
- if (info->hw_queue == info->control.vif->cab_queue)
613
- return mvmvif->cab_queue;
614641
615642 WARN_ON_ONCE(1);
616643 return mvm->p2p_dev_queue;
....@@ -620,24 +647,80 @@
620647 }
621648 }
622649
650
+static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm,
651
+ struct sk_buff *skb)
652
+{
653
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
654
+ struct iwl_mvm_vif *mvmvif =
655
+ iwl_mvm_vif_from_mac80211(info->control.vif);
656
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
657
+ int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt;
658
+ struct iwl_probe_resp_data *resp_data;
659
+ u8 *ie, *pos;
660
+ u8 match[] = {
661
+ (WLAN_OUI_WFA >> 16) & 0xff,
662
+ (WLAN_OUI_WFA >> 8) & 0xff,
663
+ WLAN_OUI_WFA & 0xff,
664
+ WLAN_OUI_TYPE_WFA_P2P,
665
+ };
666
+
667
+ rcu_read_lock();
668
+
669
+ resp_data = rcu_dereference(mvmvif->probe_resp_data);
670
+ if (!resp_data)
671
+ goto out;
672
+
673
+ if (!resp_data->notif.noa_active)
674
+ goto out;
675
+
676
+ ie = (u8 *)cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC,
677
+ mgmt->u.probe_resp.variable,
678
+ skb->len - base_len,
679
+ match, 4, 2);
680
+ if (!ie) {
681
+ IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n");
682
+ goto out;
683
+ }
684
+
685
+ if (skb_tailroom(skb) < resp_data->noa_len) {
686
+ if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) {
687
+ IWL_ERR(mvm,
688
+ "Failed to reallocate probe resp\n");
689
+ goto out;
690
+ }
691
+ }
692
+
693
+ pos = skb_put(skb, resp_data->noa_len);
694
+
695
+ *pos++ = WLAN_EID_VENDOR_SPECIFIC;
696
+ /* Set length of IE body (not including ID and length itself) */
697
+ *pos++ = resp_data->noa_len - 2;
698
+ *pos++ = (WLAN_OUI_WFA >> 16) & 0xff;
699
+ *pos++ = (WLAN_OUI_WFA >> 8) & 0xff;
700
+ *pos++ = WLAN_OUI_WFA & 0xff;
701
+ *pos++ = WLAN_OUI_TYPE_WFA_P2P;
702
+
703
+ memcpy(pos, &resp_data->notif.noa_attr,
704
+ resp_data->noa_len - sizeof(struct ieee80211_vendor_ie));
705
+
706
+out:
707
+ rcu_read_unlock();
708
+}
709
+
623710 int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
624711 {
625712 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
626
- struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
627713 struct ieee80211_tx_info info;
628
- struct iwl_device_cmd *dev_cmd;
714
+ struct iwl_device_tx_cmd *dev_cmd;
629715 u8 sta_id;
630716 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
631
- int queue;
717
+ __le16 fc = hdr->frame_control;
718
+ bool offchannel = IEEE80211_SKB_CB(skb)->flags &
719
+ IEEE80211_TX_CTL_TX_OFFCHAN;
720
+ int queue = -1;
632721
633
- /* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
634
- * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
635
- * queue. STATION (HS2.0) uses the auxiliary context of the FW,
636
- * and hence needs to be sent on the aux queue
637
- */
638
- if (skb_info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
639
- skb_info->control.vif->type == NL80211_IFTYPE_STATION)
640
- skb_info->hw_queue = mvm->aux_queue;
722
+ if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc))
723
+ return -1;
641724
642725 memcpy(&info, skb->cb, sizeof(info));
643726
....@@ -647,23 +730,6 @@
647730 if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
648731 return -1;
649732
650
- if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
651
- (!info.control.vif ||
652
- info.hw_queue != info.control.vif->cab_queue)))
653
- return -1;
654
-
655
- queue = info.hw_queue;
656
-
657
- /*
658
- * If the interface on which the frame is sent is the P2P_DEVICE
659
- * or an AP/GO interface use the broadcast station associated
660
- * with it; otherwise if the interface is a managed interface
661
- * use the AP station associated with it for multicast traffic
662
- * (this is not possible for unicast packets as a TLDS discovery
663
- * response are sent without a station entry); otherwise use the
664
- * AUX station.
665
- */
666
- sta_id = mvm->aux_sta.sta_id;
667733 if (info.control.vif) {
668734 struct iwl_mvm_vif *mvmvif =
669735 iwl_mvm_vif_from_mac80211(info.control.vif);
....@@ -676,21 +742,32 @@
676742 else
677743 sta_id = mvmvif->mcast_sta.sta_id;
678744
679
- queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
680
- hdr->frame_control);
681
- if (queue < 0)
682
- return -1;
683
- } else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
684
- is_multicast_ether_addr(hdr->addr1)) {
685
- u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
686
-
687
- if (ap_sta_id != IWL_MVM_INVALID_STA)
688
- sta_id = ap_sta_id;
745
+ queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr);
689746 } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
690747 queue = mvm->snif_queue;
691748 sta_id = mvm->snif_sta.sta_id;
749
+ } else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
750
+ offchannel) {
751
+ /*
752
+ * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets
753
+ * that can be used in 2 different types of vifs, P2P &
754
+ * STATION.
755
+ * P2P uses the offchannel queue.
756
+ * STATION (HS2.0) uses the auxiliary context of the FW,
757
+ * and hence needs to be sent on the aux queue.
758
+ */
759
+ sta_id = mvm->aux_sta.sta_id;
760
+ queue = mvm->aux_queue;
692761 }
693762 }
763
+
764
+ if (queue < 0) {
765
+ IWL_ERR(mvm, "No queue was found. Dropping TX\n");
766
+ return -1;
767
+ }
768
+
769
+ if (unlikely(ieee80211_is_probe_resp(fc)))
770
+ iwl_mvm_probe_resp_set_noa(mvm, skb);
694771
695772 IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
696773
....@@ -709,88 +786,14 @@
709786 return 0;
710787 }
711788
712
-#ifdef CONFIG_INET
713
-
714
-static int
715
-iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
716
- netdev_features_t netdev_flags,
717
- struct sk_buff_head *mpdus_skb)
718
-{
719
- struct sk_buff *tmp, *next;
720
- struct ieee80211_hdr *hdr = (void *)skb->data;
721
- char cb[sizeof(skb->cb)];
722
- u16 i = 0;
723
- unsigned int tcp_payload_len;
724
- unsigned int mss = skb_shinfo(skb)->gso_size;
725
- bool ipv4 = (skb->protocol == htons(ETH_P_IP));
726
- u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
727
-
728
- skb_shinfo(skb)->gso_size = num_subframes * mss;
729
- memcpy(cb, skb->cb, sizeof(cb));
730
-
731
- next = skb_gso_segment(skb, netdev_flags);
732
- skb_shinfo(skb)->gso_size = mss;
733
- if (WARN_ON_ONCE(IS_ERR(next)))
734
- return -EINVAL;
735
- else if (next)
736
- consume_skb(skb);
737
-
738
- while (next) {
739
- tmp = next;
740
- next = tmp->next;
741
-
742
- memcpy(tmp->cb, cb, sizeof(tmp->cb));
743
- /*
744
- * Compute the length of all the data added for the A-MSDU.
745
- * This will be used to compute the length to write in the TX
746
- * command. We have: SNAP + IP + TCP for n -1 subframes and
747
- * ETH header for n subframes.
748
- */
749
- tcp_payload_len = skb_tail_pointer(tmp) -
750
- skb_transport_header(tmp) -
751
- tcp_hdrlen(tmp) + tmp->data_len;
752
-
753
- if (ipv4)
754
- ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
755
-
756
- if (tcp_payload_len > mss) {
757
- skb_shinfo(tmp)->gso_size = mss;
758
- } else {
759
- if (ieee80211_is_data_qos(hdr->frame_control)) {
760
- u8 *qc;
761
-
762
- if (ipv4)
763
- ip_send_check(ip_hdr(tmp));
764
-
765
- qc = ieee80211_get_qos_ctl((void *)tmp->data);
766
- *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
767
- }
768
- skb_shinfo(tmp)->gso_size = 0;
769
- }
770
-
771
- tmp->prev = NULL;
772
- tmp->next = NULL;
773
-
774
- __skb_queue_tail(mpdus_skb, tmp);
775
- i++;
776
- }
777
-
778
- return 0;
779
-}
780
-
781
-static unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
782
- struct ieee80211_sta *sta,
783
- unsigned int tid)
789
+unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
790
+ struct ieee80211_sta *sta, unsigned int tid)
784791 {
785792 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
786793 enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band;
787794 u8 ac = tid_to_mac80211_ac[tid];
788795 unsigned int txf;
789
- int lmac = IWL_LMAC_24G_INDEX;
790
-
791
- if (iwl_mvm_is_cdb_supported(mvm) &&
792
- band == NL80211_BAND_5GHZ)
793
- lmac = IWL_LMAC_5G_INDEX;
796
+ int lmac = iwl_mvm_get_lmac_id(mvm->fw, band);
794797
795798 /* For HE redirect to trigger based fifos */
796799 if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
....@@ -808,6 +811,74 @@
808811 mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
809812 }
810813
814
+#ifdef CONFIG_INET
815
+
816
+static int
817
+iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
818
+ netdev_features_t netdev_flags,
819
+ struct sk_buff_head *mpdus_skb)
820
+{
821
+ struct sk_buff *tmp, *next;
822
+ struct ieee80211_hdr *hdr = (void *)skb->data;
823
+ char cb[sizeof(skb->cb)];
824
+ u16 i = 0;
825
+ unsigned int tcp_payload_len;
826
+ unsigned int mss = skb_shinfo(skb)->gso_size;
827
+ bool ipv4 = (skb->protocol == htons(ETH_P_IP));
828
+ bool qos = ieee80211_is_data_qos(hdr->frame_control);
829
+ u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
830
+
831
+ skb_shinfo(skb)->gso_size = num_subframes * mss;
832
+ memcpy(cb, skb->cb, sizeof(cb));
833
+
834
+ next = skb_gso_segment(skb, netdev_flags);
835
+ skb_shinfo(skb)->gso_size = mss;
836
+ skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
837
+ if (WARN_ON_ONCE(IS_ERR(next)))
838
+ return -EINVAL;
839
+ else if (next)
840
+ consume_skb(skb);
841
+
842
+ skb_list_walk_safe(next, tmp, next) {
843
+ memcpy(tmp->cb, cb, sizeof(tmp->cb));
844
+ /*
845
+ * Compute the length of all the data added for the A-MSDU.
846
+ * This will be used to compute the length to write in the TX
847
+ * command. We have: SNAP + IP + TCP for n -1 subframes and
848
+ * ETH header for n subframes.
849
+ */
850
+ tcp_payload_len = skb_tail_pointer(tmp) -
851
+ skb_transport_header(tmp) -
852
+ tcp_hdrlen(tmp) + tmp->data_len;
853
+
854
+ if (ipv4)
855
+ ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
856
+
857
+ if (tcp_payload_len > mss) {
858
+ skb_shinfo(tmp)->gso_size = mss;
859
+ skb_shinfo(tmp)->gso_type = ipv4 ? SKB_GSO_TCPV4 :
860
+ SKB_GSO_TCPV6;
861
+ } else {
862
+ if (qos) {
863
+ u8 *qc;
864
+
865
+ if (ipv4)
866
+ ip_send_check(ip_hdr(tmp));
867
+
868
+ qc = ieee80211_get_qos_ctl((void *)tmp->data);
869
+ *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
870
+ }
871
+ skb_shinfo(tmp)->gso_size = 0;
872
+ }
873
+
874
+ skb_mark_not_on_list(tmp);
875
+ __skb_queue_tail(mpdus_skb, tmp);
876
+ i++;
877
+ }
878
+
879
+ return 0;
880
+}
881
+
811882 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
812883 struct ieee80211_tx_info *info,
813884 struct ieee80211_sta *sta,
....@@ -818,18 +889,15 @@
818889 unsigned int mss = skb_shinfo(skb)->gso_size;
819890 unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len;
820891 u16 snap_ip_tcp, pad;
821
- unsigned int dbg_max_amsdu_len;
822892 netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
823893 u8 tid;
824894
825895 snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
826896 tcp_hdrlen(skb);
827897
828
- dbg_max_amsdu_len = READ_ONCE(mvm->max_amsdu_len);
829
-
830898 if (!mvmsta->max_amsdu_len ||
831899 !ieee80211_is_data_qos(hdr->frame_control) ||
832
- (!mvmsta->amsdu_enabled && !dbg_max_amsdu_len))
900
+ !mvmsta->amsdu_enabled)
833901 return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
834902
835903 /*
....@@ -851,19 +919,17 @@
851919 * No need to lock amsdu_in_ampdu_allowed since it can't be modified
852920 * during an BA session.
853921 */
854
- if (info->flags & IEEE80211_TX_CTL_AMPDU &&
855
- !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed)
856
- return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
857
-
858
- if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(mvmsta->vif)) ||
922
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU &&
923
+ !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) ||
859924 !(mvmsta->amsdu_enabled & BIT(tid)))
860925 return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
861926
862
- max_amsdu_len = iwl_mvm_max_amsdu_size(mvm, sta, tid);
863
-
864
- if (unlikely(dbg_max_amsdu_len))
865
- max_amsdu_len = min_t(unsigned int, max_amsdu_len,
866
- dbg_max_amsdu_len);
927
+ /*
928
+ * Take the min of ieee80211 station and mvm station
929
+ */
930
+ max_amsdu_len =
931
+ min_t(unsigned int, sta->max_amsdu_len,
932
+ iwl_mvm_max_amsdu_size(mvm, sta, tid));
867933
868934 /*
869935 * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not
....@@ -930,34 +996,6 @@
930996 }
931997 #endif
932998
933
-static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
934
- struct iwl_mvm_sta *mvm_sta, u8 tid,
935
- struct sk_buff *skb)
936
-{
937
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
938
- u8 mac_queue = info->hw_queue;
939
- struct sk_buff_head *deferred_tx_frames;
940
-
941
- lockdep_assert_held(&mvm_sta->lock);
942
-
943
- mvm_sta->deferred_traffic_tid_map |= BIT(tid);
944
- set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames);
945
-
946
- deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames;
947
-
948
- skb_queue_tail(deferred_tx_frames, skb);
949
-
950
- /*
951
- * The first deferred frame should've stopped the MAC queues, so we
952
- * should never get a second deferred frame for the RA/TID.
953
- * In case of GSO the first packet may have been split, so don't warn.
954
- */
955
- if (skb_queue_len(deferred_tx_frames) == 1) {
956
- iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
957
- schedule_work(&mvm->add_stream_wk);
958
- }
959
-}
960
-
961999 /* Check if there are any timed-out TIDs on a given shared TXQ */
9621000 static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
9631001 {
....@@ -982,7 +1020,12 @@
9821020 int airtime)
9831021 {
9841022 int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
985
- struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
1023
+ struct iwl_mvm_tcm_mac *mdata;
1024
+
1025
+ if (mac >= NUM_MAC_INDEX_DRIVER)
1026
+ return;
1027
+
1028
+ mdata = &mvm->tcm.data[mac];
9861029
9871030 if (mvm->tcm.paused)
9881031 return;
....@@ -993,18 +1036,27 @@
9931036 mdata->tx.airtime += airtime;
9941037 }
9951038
996
-static void iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
997
- struct iwl_mvm_sta *mvmsta, int tid)
1039
+static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
1040
+ struct iwl_mvm_sta *mvmsta, int tid)
9981041 {
9991042 u32 ac = tid_to_mac80211_ac[tid];
10001043 int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
1001
- struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
1044
+ struct iwl_mvm_tcm_mac *mdata;
1045
+
1046
+ if (mac >= NUM_MAC_INDEX_DRIVER)
1047
+ return -EINVAL;
1048
+
1049
+ mdata = &mvm->tcm.data[mac];
10021050
10031051 mdata->tx.pkts[ac]++;
1052
+
1053
+ return 0;
10041054 }
10051055
10061056 /*
1007
- * Sets the fields in the Tx cmd that are crypto related
1057
+ * Sets the fields in the Tx cmd that are crypto related.
1058
+ *
1059
+ * This function must be called with BHs disabled.
10081060 */
10091061 static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
10101062 struct ieee80211_tx_info *info,
....@@ -1012,11 +1064,11 @@
10121064 {
10131065 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
10141066 struct iwl_mvm_sta *mvmsta;
1015
- struct iwl_device_cmd *dev_cmd;
1067
+ struct iwl_device_tx_cmd *dev_cmd;
10161068 __le16 fc;
10171069 u16 seq_number = 0;
10181070 u8 tid = IWL_MAX_TID_COUNT;
1019
- u16 txq_id = info->hw_queue;
1071
+ u16 txq_id;
10201072 bool is_ampdu = false;
10211073 int hdrlen;
10221074
....@@ -1024,11 +1076,20 @@
10241076 fc = hdr->frame_control;
10251077 hdrlen = ieee80211_hdrlen(fc);
10261078
1079
+ if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc))
1080
+ return -1;
1081
+
10271082 if (WARN_ON_ONCE(!mvmsta))
10281083 return -1;
10291084
10301085 if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
10311086 return -1;
1087
+
1088
+ if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->he_cap.has_he)
1089
+ return -1;
1090
+
1091
+ if (unlikely(ieee80211_is_probe_resp(fc)))
1092
+ iwl_mvm_probe_resp_set_noa(mvm, skb);
10321093
10331094 dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
10341095 sta, mvmsta->sta_id);
....@@ -1050,12 +1111,14 @@
10501111 */
10511112 if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
10521113 tid = ieee80211_get_tid(hdr);
1053
- if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
1114
+ if (WARN_ONCE(tid >= IWL_MAX_TID_COUNT, "Invalid TID %d", tid))
10541115 goto drop_unlock_sta;
10551116
10561117 is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
1057
- if (WARN_ON_ONCE(is_ampdu &&
1058
- mvmsta->tid_data[tid].state != IWL_AGG_ON))
1118
+ if (WARN_ONCE(is_ampdu &&
1119
+ mvmsta->tid_data[tid].state != IWL_AGG_ON,
1120
+ "Invalid internal agg state %d for TID %d",
1121
+ mvmsta->tid_data[tid].state, tid))
10591122 goto drop_unlock_sta;
10601123
10611124 seq_number = mvmsta->tid_data[tid].seq_number;
....@@ -1069,39 +1132,18 @@
10691132 /* update the tx_cmd hdr as it was already copied */
10701133 tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl;
10711134 }
1135
+ } else if (ieee80211_is_data(fc) && !ieee80211_is_data_qos(fc)) {
1136
+ tid = IWL_TID_NON_QOS;
10721137 }
10731138
10741139 txq_id = mvmsta->tid_data[tid].txq_id;
10751140
10761141 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
10771142
1078
- /* Check if TXQ needs to be allocated or re-activated */
1079
- if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE ||
1080
- !mvmsta->tid_data[tid].is_tid_active)) {
1081
- /* If TXQ needs to be allocated... */
1082
- if (txq_id == IWL_MVM_INVALID_QUEUE) {
1083
- iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
1084
-
1085
- /*
1086
- * The frame is now deferred, and the worker scheduled
1087
- * will re-allocate it, so we can free it for now.
1088
- */
1089
- iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
1090
- spin_unlock(&mvmsta->lock);
1091
- return 0;
1092
- }
1093
-
1094
- /* queue should always be active in new TX path */
1095
- WARN_ON(iwl_mvm_has_new_tx_api(mvm));
1096
-
1097
- /* If we are here - TXQ exists and needs to be re-activated */
1098
- spin_lock(&mvm->queue_info_lock);
1099
- mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1100
- mvmsta->tid_data[tid].is_tid_active = true;
1101
- spin_unlock(&mvm->queue_info_lock);
1102
-
1103
- IWL_DEBUG_TX_QUEUES(mvm, "Re-activating queue %d for TX\n",
1104
- txq_id);
1143
+ if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) {
1144
+ iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
1145
+ spin_unlock(&mvmsta->lock);
1146
+ return -1;
11051147 }
11061148
11071149 if (!iwl_mvm_has_new_tx_api(mvm)) {
....@@ -1112,11 +1154,11 @@
11121154 * If we have timed-out TIDs - schedule the worker that will
11131155 * reconfig the queues and update them
11141156 *
1115
- * Note that the mvm->queue_info_lock isn't being taken here in
1116
- * order to not serialize the TX flow. This isn't dangerous
1117
- * because scheduling mvm->add_stream_wk can't ruin the state,
1118
- * and if we DON'T schedule it due to some race condition then
1119
- * next TX we get here we will.
1157
+ * Note that the no lock is taken here in order to not serialize
1158
+ * the TX flow. This isn't dangerous because scheduling
1159
+ * mvm->add_stream_wk can't ruin the state, and if we DON'T
1160
+ * schedule it due to some race condition then next TX we get
1161
+ * here we will.
11201162 */
11211163 if (unlikely(mvm->queue_info[txq_id].status ==
11221164 IWL_MVM_QUEUE_SHARED &&
....@@ -1124,8 +1166,9 @@
11241166 schedule_work(&mvm->add_stream_wk);
11251167 }
11261168
1127
- IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
1128
- tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
1169
+ IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x len %d\n",
1170
+ mvmsta->sta_id, tid, txq_id,
1171
+ IEEE80211_SEQ_TO_SN(seq_number), skb->len);
11291172
11301173 /* From now on, we cannot access info->control */
11311174 iwl_mvm_skb_prepare_status(skb, dev_cmd);
....@@ -1138,7 +1181,9 @@
11381181
11391182 spin_unlock(&mvmsta->lock);
11401183
1141
- iwl_mvm_tx_pkt_queued(mvm, mvmsta, tid == IWL_MAX_TID_COUNT ? 0 : tid);
1184
+ if (iwl_mvm_tx_pkt_queued(mvm, mvmsta,
1185
+ tid == IWL_MAX_TID_COUNT ? 0 : tid))
1186
+ goto drop;
11421187
11431188 return 0;
11441189
....@@ -1146,11 +1191,12 @@
11461191 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
11471192 spin_unlock(&mvmsta->lock);
11481193 drop:
1194
+ IWL_DEBUG_TX(mvm, "TX to [%d|%d] dropped\n", mvmsta->sta_id, tid);
11491195 return -1;
11501196 }
11511197
1152
-int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
1153
- struct ieee80211_sta *sta)
1198
+int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
1199
+ struct ieee80211_sta *sta)
11541200 {
11551201 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
11561202 struct ieee80211_tx_info info;
....@@ -1223,7 +1269,7 @@
12231269 * to align the wrap around of ssn so we compare relevant values.
12241270 */
12251271 normalized_ssn = tid_data->ssn;
1226
- if (mvm->trans->cfg->gen2)
1272
+ if (mvm->trans->trans_cfg->gen2)
12271273 normalized_ssn &= 0xff;
12281274
12291275 if (normalized_ssn != tid_data->next_reclaimed)
....@@ -1327,7 +1373,7 @@
13271373 }
13281374 }
13291375
1330
-/**
1376
+/*
13311377 * translate ucode response to mac80211 tx status control values
13321378 */
13331379 static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
....@@ -1347,14 +1393,12 @@
13471393 struct iwl_fw_dbg_trigger_tx_status *status_trig;
13481394 int i;
13491395
1350
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_STATUS))
1396
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
1397
+ FW_DBG_TRIGGER_TX_STATUS);
1398
+ if (!trig)
13511399 return;
13521400
1353
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS);
13541401 status_trig = (void *)trig->data;
1355
-
1356
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
1357
- return;
13581402
13591403 for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
13601404 /* don't collect on status 0 */
....@@ -1371,7 +1415,7 @@
13711415 }
13721416 }
13731417
1374
-/**
1418
+/*
13751419 * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
13761420 * @tx_resp: the Tx response from the fw (agg or non-agg)
13771421 *
....@@ -1405,7 +1449,6 @@
14051449 iwl_mvm_get_agg_status(mvm, tx_resp);
14061450 u32 status = le16_to_cpu(agg_status->status);
14071451 u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp);
1408
- struct iwl_mvm_sta *mvmsta;
14091452 struct sk_buff_head skbs;
14101453 u8 skb_freed = 0;
14111454 u8 lq_color;
....@@ -1454,6 +1497,10 @@
14541497 default:
14551498 break;
14561499 }
1500
+
1501
+ if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS &&
1502
+ ieee80211_is_mgmt(hdr->frame_control))
1503
+ iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
14571504
14581505 /*
14591506 * If we are freeing multiple frames, mark all the frames
....@@ -1549,12 +1596,16 @@
15491596 goto out;
15501597
15511598 if (!IS_ERR(sta)) {
1552
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
1599
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
15531600
15541601 iwl_mvm_tx_airtime(mvm, mvmsta,
15551602 le16_to_cpu(tx_resp->wireless_media_time));
15561603
1557
- if (tid != IWL_TID_NON_QOS && tid != IWL_MGMT_TID) {
1604
+ if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS &&
1605
+ mvmsta->sta_state < IEEE80211_STA_AUTHORIZED)
1606
+ iwl_mvm_toggle_tx_ant(mvm, &mvmsta->tx_ant);
1607
+
1608
+ if (sta->wme && tid != IWL_MGMT_TID) {
15581609 struct iwl_mvm_tid_data *tid_data =
15591610 &mvmsta->tid_data[tid];
15601611 bool send_eosp_ndp = false;
....@@ -1608,10 +1659,7 @@
16081659 mvmsta->next_status_eosp = false;
16091660 ieee80211_sta_eosp(sta);
16101661 }
1611
- } else {
1612
- mvmsta = NULL;
16131662 }
1614
-
16151663 out:
16161664 rcu_read_unlock();
16171665 }
....@@ -1674,12 +1722,10 @@
16741722 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
16751723 struct iwl_mvm_sta *mvmsta;
16761724 int queue = SEQ_TO_QUEUE(sequence);
1725
+ struct ieee80211_sta *sta;
16771726
16781727 if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE &&
16791728 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)))
1680
- return;
1681
-
1682
- if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
16831729 return;
16841730
16851731 iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt);
....@@ -1687,6 +1733,12 @@
16871733 rcu_read_lock();
16881734
16891735 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
1736
+
1737
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1738
+ if (WARN_ON_ONCE(!sta || !sta->wme)) {
1739
+ rcu_read_unlock();
1740
+ return;
1741
+ }
16901742
16911743 if (!WARN_ON_ONCE(!mvmsta)) {
16921744 mvmsta->tid_data[tid].rate_n_flags =
....@@ -1718,13 +1770,13 @@
17181770 struct ieee80211_tx_info *ba_info, u32 rate)
17191771 {
17201772 struct sk_buff_head reclaimed_skbs;
1721
- struct iwl_mvm_tid_data *tid_data;
1773
+ struct iwl_mvm_tid_data *tid_data = NULL;
17221774 struct ieee80211_sta *sta;
1723
- struct iwl_mvm_sta *mvmsta;
1775
+ struct iwl_mvm_sta *mvmsta = NULL;
17241776 struct sk_buff *skb;
17251777 int freed;
17261778
1727
- if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
1779
+ if (WARN_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations ||
17281780 tid > IWL_MAX_TID_COUNT,
17291781 "sta_id %d tid %d", sta_id, tid))
17301782 return;
....@@ -1734,10 +1786,43 @@
17341786 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
17351787
17361788 /* Reclaiming frames for a station that has been deleted ? */
1737
- if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
1789
+ if (WARN_ON_ONCE(!sta)) {
17381790 rcu_read_unlock();
17391791 return;
17401792 }
1793
+
1794
+ __skb_queue_head_init(&reclaimed_skbs);
1795
+
1796
+ /*
1797
+ * Release all TFDs before the SSN, i.e. all TFDs in front of
1798
+ * block-ack window (we assume that they've been successfully
1799
+ * transmitted ... if not, it's too late anyway).
1800
+ */
1801
+ iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
1802
+
1803
+ skb_queue_walk(&reclaimed_skbs, skb) {
1804
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1805
+
1806
+ iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1807
+
1808
+ memset(&info->status, 0, sizeof(info->status));
1809
+ /* Packet was transmitted successfully, failures come as single
1810
+ * frames because before failing a frame the firmware transmits
1811
+ * it without aggregation at least once.
1812
+ */
1813
+ info->flags |= IEEE80211_TX_STAT_ACK;
1814
+ }
1815
+
1816
+ /*
1817
+ * It's possible to get a BA response after invalidating the rcu (rcu is
1818
+ * invalidated in order to prevent new Tx from being sent, but there may
1819
+ * be some frames already in-flight).
1820
+ * In this case we just want to reclaim, and could skip all the
1821
+ * sta-dependent stuff since it's in the middle of being removed
1822
+ * anyways.
1823
+ */
1824
+ if (IS_ERR(sta))
1825
+ goto out;
17411826
17421827 mvmsta = iwl_mvm_sta_from_mac80211(sta);
17431828 tid_data = &mvmsta->tid_data[tid];
....@@ -1751,15 +1836,6 @@
17511836 }
17521837
17531838 spin_lock_bh(&mvmsta->lock);
1754
-
1755
- __skb_queue_head_init(&reclaimed_skbs);
1756
-
1757
- /*
1758
- * Release all TFDs before the SSN, i.e. all TFDs in front of
1759
- * block-ack window (we assume that they've been successfully
1760
- * transmitted ... if not, it's too late anyway).
1761
- */
1762
- iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
17631839
17641840 tid_data->next_reclaimed = index;
17651841
....@@ -1781,15 +1857,6 @@
17811857 freed++;
17821858 else
17831859 WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT);
1784
-
1785
- iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1786
-
1787
- memset(&info->status, 0, sizeof(info->status));
1788
- /* Packet was transmitted successfully, failures come as single
1789
- * frames because before failing a frame the firmware transmits
1790
- * it without aggregation at least once.
1791
- */
1792
- info->flags |= IEEE80211_TX_STAT_ACK;
17931860
17941861 /* this is the first skb we deliver in this batch */
17951862 /* put the rate scaling data there */
....@@ -1867,8 +1934,14 @@
18671934 rcu_read_lock();
18681935
18691936 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
1870
- if (!mvmsta)
1871
- goto out_unlock;
1937
+ /*
1938
+ * It's possible to get a BA response after invalidating the rcu
1939
+ * (rcu is invalidated in order to prevent new Tx from being
1940
+ * sent, but there may be some frames already in-flight).
1941
+ * In this case we just want to reclaim, and could skip all the
1942
+ * sta-dependent stuff since it's in the middle of being removed
1943
+ * anyways.
1944
+ */
18721945
18731946 /* Free per TID */
18741947 for (i = 0; i < le16_to_cpu(ba_res->tfd_cnt); i++) {
....@@ -1879,7 +1952,9 @@
18791952 if (tid == IWL_MGMT_TID)
18801953 tid = IWL_MAX_TID_COUNT;
18811954
1882
- mvmsta->tid_data[i].lq_color = lq_color;
1955
+ if (mvmsta)
1956
+ mvmsta->tid_data[i].lq_color = lq_color;
1957
+
18831958 iwl_mvm_tx_reclaim(mvm, sta_id, tid,
18841959 (int)(le16_to_cpu(ba_tfd->q_num)),
18851960 le16_to_cpu(ba_tfd->tfd_index),
....@@ -1887,9 +1962,9 @@
18871962 le32_to_cpu(ba_res->tx_rate));
18881963 }
18891964
1890
- iwl_mvm_tx_airtime(mvm, mvmsta,
1891
- le32_to_cpu(ba_res->wireless_time));
1892
-out_unlock:
1965
+ if (mvmsta)
1966
+ iwl_mvm_tx_airtime(mvm, mvmsta,
1967
+ le32_to_cpu(ba_res->wireless_time));
18931968 rcu_read_unlock();
18941969 out:
18951970 IWL_DEBUG_TX_REPLY(mvm,
....@@ -1986,7 +2061,7 @@
19862061 return ret;
19872062 }
19882063
1989
-int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags)
2064
+int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal)
19902065 {
19912066 struct iwl_mvm_int_sta *int_sta = sta;
19922067 struct iwl_mvm_sta *mvm_sta = sta;
....@@ -1995,12 +2070,10 @@
19952070 offsetof(struct iwl_mvm_sta, sta_id));
19962071
19972072 if (iwl_mvm_has_new_tx_api(mvm))
1998
- return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id,
1999
- 0xff | BIT(IWL_MGMT_TID), flags);
2073
+ return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id, 0xffff, 0);
20002074
20012075 if (internal)
2002
- return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk,
2003
- flags);
2076
+ return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk, 0);
20042077
2005
- return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, flags);
2078
+ return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
20062079 }