hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
....@@ -5,10 +5,9 @@
55 *
66 * GPL LICENSE SUMMARY
77 *
8
- * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
8
+ * Copyright(c) 2012 - 2015, 2018 - 2020 Intel Corporation. All rights reserved.
99 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
1010 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11
- * Copyright(c) 2018 Intel Corporation
1211 *
1312 * This program is free software; you can redistribute it and/or modify
1413 * it under the terms of version 2 of the GNU General Public License as
....@@ -19,11 +18,6 @@
1918 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
2019 * General Public License for more details.
2120 *
22
- * You should have received a copy of the GNU General Public License
23
- * along with this program; if not, write to the Free Software
24
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
25
- * USA
26
- *
2721 * The full GNU General Public License is included in this distribution
2822 * in the file called COPYING.
2923 *
....@@ -33,10 +27,9 @@
3327 *
3428 * BSD LICENSE
3529 *
36
- * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
30
+ * Copyright(c) 2012 - 2015, 2018 - 2020 Intel Corporation. All rights reserved.
3731 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
3832 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
39
- * Copyright(c) 2018 Intel Corporation
4033 * All rights reserved.
4134 *
4235 * Redistribution and use in source and binary forms, with or without
....@@ -92,7 +85,7 @@
9285 int sta_id;
9386 u32 reserved_ids = 0;
9487
95
- BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
88
+ BUILD_BUG_ON(IWL_MVM_STATION_COUNT_MAX > 32);
9689 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
9790
9891 lockdep_assert_held(&mvm->mutex);
....@@ -102,7 +95,7 @@
10295 reserved_ids = BIT(0);
10396
10497 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
105
- for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
98
+ for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) {
10699 if (BIT(sta_id) & reserved_ids)
107100 continue;
108101
....@@ -203,6 +196,7 @@
203196 mpdu_dens = sta->ht_cap.ampdu_density;
204197 }
205198
199
+
206200 if (sta->vht_cap.vht_supported) {
207201 agg_size = sta->vht_cap.cap &
208202 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
....@@ -211,6 +205,23 @@
211205 } else if (sta->ht_cap.ht_supported) {
212206 agg_size = sta->ht_cap.ampdu_factor;
213207 }
208
+
209
+ /* D6.0 10.12.2 A-MPDU length limit rules
210
+ * A STA indicates the maximum length of the A-MPDU preEOF padding
211
+ * that it can receive in an HE PPDU in the Maximum A-MPDU Length
212
+ * Exponent field in its HT Capabilities, VHT Capabilities,
213
+ * and HE 6 GHz Band Capabilities elements (if present) and the
214
+ * Maximum AMPDU Length Exponent Extension field in its HE
215
+ * Capabilities element
216
+ */
217
+ if (sta->he_cap.has_he)
218
+ agg_size += u8_get_bits(sta->he_cap.he_cap_elem.mac_cap_info[3],
219
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
220
+
221
+ /* Limit to max A-MPDU supported by FW */
222
+ if (agg_size > (STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT))
223
+ agg_size = (STA_FLG_MAX_AGG_SIZE_4M >>
224
+ STA_FLG_MAX_AGG_SIZE_SHIFT);
214225
215226 add_sta_cmd.station_flags |=
216227 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
....@@ -311,14 +322,11 @@
311322 struct iwl_mvm_sta *mvmsta;
312323 u32 status;
313324 u8 sta_id;
314
- int ret;
315325
316326 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
317327 return -EINVAL;
318328
319
- spin_lock_bh(&mvm->queue_info_lock);
320329 sta_id = mvm->queue_info[queue].ra_sta_id;
321
- spin_unlock_bh(&mvm->queue_info_lock);
322330
323331 rcu_read_lock();
324332
....@@ -348,10 +356,75 @@
348356
349357 /* Notify FW of queue removal from the STA queues */
350358 status = ADD_STA_SUCCESS;
351
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
352
- iwl_mvm_add_sta_cmd_size(mvm),
353
- &cmd, &status);
359
+ return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
360
+ iwl_mvm_add_sta_cmd_size(mvm),
361
+ &cmd, &status);
362
+}
354363
364
+static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
365
+ u16 *queueptr, u8 tid, u8 flags)
366
+{
367
+ int queue = *queueptr;
368
+ struct iwl_scd_txq_cfg_cmd cmd = {
369
+ .scd_queue = queue,
370
+ .action = SCD_CFG_DISABLE_QUEUE,
371
+ };
372
+ int ret;
373
+
374
+ if (iwl_mvm_has_new_tx_api(mvm)) {
375
+ iwl_trans_txq_free(mvm->trans, queue);
376
+ *queueptr = IWL_MVM_INVALID_QUEUE;
377
+
378
+ return 0;
379
+ }
380
+
381
+ if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
382
+ return 0;
383
+
384
+ mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
385
+
386
+ cmd.action = mvm->queue_info[queue].tid_bitmap ?
387
+ SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
388
+ if (cmd.action == SCD_CFG_DISABLE_QUEUE)
389
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
390
+
391
+ IWL_DEBUG_TX_QUEUES(mvm,
392
+ "Disabling TXQ #%d tids=0x%x\n",
393
+ queue,
394
+ mvm->queue_info[queue].tid_bitmap);
395
+
396
+ /* If the queue is still enabled - nothing left to do in this func */
397
+ if (cmd.action == SCD_CFG_ENABLE_QUEUE)
398
+ return 0;
399
+
400
+ cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
401
+ cmd.tid = mvm->queue_info[queue].txq_tid;
402
+
403
+ /* Make sure queue info is correct even though we overwrite it */
404
+ WARN(mvm->queue_info[queue].tid_bitmap,
405
+ "TXQ #%d info out-of-sync - tids=0x%x\n",
406
+ queue, mvm->queue_info[queue].tid_bitmap);
407
+
408
+ /* If we are here - the queue is freed and we can zero out these vals */
409
+ mvm->queue_info[queue].tid_bitmap = 0;
410
+
411
+ if (sta) {
412
+ struct iwl_mvm_txq *mvmtxq =
413
+ iwl_mvm_txq_from_tid(sta, tid);
414
+
415
+ mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
416
+ }
417
+
418
+ /* Regardless if this is a reserved TXQ for a STA - mark it as false */
419
+ mvm->queue_info[queue].reserved = false;
420
+
421
+ iwl_trans_txq_disable(mvm->trans, queue, false);
422
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
423
+ sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
424
+
425
+ if (ret)
426
+ IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
427
+ queue, ret);
355428 return ret;
356429 }
357430
....@@ -369,10 +442,8 @@
369442 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
370443 return -EINVAL;
371444
372
- spin_lock_bh(&mvm->queue_info_lock);
373445 sta_id = mvm->queue_info[queue].ra_sta_id;
374446 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
375
- spin_unlock_bh(&mvm->queue_info_lock);
376447
377448 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
378449 lockdep_is_held(&mvm->mutex));
....@@ -411,10 +482,8 @@
411482 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
412483 return -EINVAL;
413484
414
- spin_lock_bh(&mvm->queue_info_lock);
415485 sta_id = mvm->queue_info[queue].ra_sta_id;
416486 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
417
- spin_unlock_bh(&mvm->queue_info_lock);
418487
419488 rcu_read_lock();
420489
....@@ -430,9 +499,14 @@
430499 spin_lock_bh(&mvmsta->lock);
431500 /* Unmap MAC queues and TIDs from this queue */
432501 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
502
+ struct iwl_mvm_txq *mvmtxq =
503
+ iwl_mvm_txq_from_tid(sta, tid);
504
+
433505 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
434506 disable_agg_tids |= BIT(tid);
435507 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
508
+
509
+ mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
436510 }
437511
438512 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
....@@ -454,11 +528,14 @@
454528 }
455529
456530 static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
457
- bool same_sta)
531
+ struct ieee80211_sta *old_sta,
532
+ u8 new_sta_id)
458533 {
459534 struct iwl_mvm_sta *mvmsta;
460
- u8 txq_curr_ac, sta_id, tid;
535
+ u8 sta_id, tid;
461536 unsigned long disable_agg_tids = 0;
537
+ bool same_sta;
538
+ u16 queue_tmp = queue;
462539 int ret;
463540
464541 lockdep_assert_held(&mvm->mutex);
....@@ -466,11 +543,10 @@
466543 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
467544 return -EINVAL;
468545
469
- spin_lock_bh(&mvm->queue_info_lock);
470
- txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
471546 sta_id = mvm->queue_info[queue].ra_sta_id;
472547 tid = mvm->queue_info[queue].txq_tid;
473
- spin_unlock_bh(&mvm->queue_info_lock);
548
+
549
+ same_sta = sta_id == new_sta_id;
474550
475551 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
476552 if (WARN_ON(!mvmsta))
....@@ -482,14 +558,8 @@
482558 iwl_mvm_invalidate_sta_queue(mvm, queue,
483559 disable_agg_tids, false);
484560
485
- ret = iwl_mvm_disable_txq(mvm, queue,
486
- mvmsta->vif->hw_queue[txq_curr_ac],
487
- tid, 0);
561
+ ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid, 0);
488562 if (ret) {
489
- /* Re-mark the inactive queue as inactive */
490
- spin_lock_bh(&mvm->queue_info_lock);
491
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
492
- spin_unlock_bh(&mvm->queue_info_lock);
493563 IWL_ERR(mvm,
494564 "Failed to free inactive queue %d (ret=%d)\n",
495565 queue, ret);
....@@ -511,7 +581,12 @@
511581 u8 ac_to_queue[IEEE80211_NUM_ACS];
512582 int i;
513583
514
- lockdep_assert_held(&mvm->queue_info_lock);
584
+ /*
585
+ * This protects us against grabbing a queue that's being reconfigured
586
+ * by the inactivity checker.
587
+ */
588
+ lockdep_assert_held(&mvm->mutex);
589
+
515590 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
516591 return -EINVAL;
517592
....@@ -522,11 +597,6 @@
522597 /* Only DATA queues can be shared */
523598 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
524599 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
525
- continue;
526
-
527
- /* Don't try and take queues being reconfigured */
528
- if (mvm->queue_info[queue].status ==
529
- IWL_MVM_QUEUE_RECONFIGURING)
530600 continue;
531601
532602 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
....@@ -569,14 +639,6 @@
569639 return -ENOSPC;
570640 }
571641
572
- /* Make sure the queue isn't in the middle of being reconfigured */
573
- if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
574
- IWL_ERR(mvm,
575
- "TXQ %d is in the middle of re-config - try again\n",
576
- queue);
577
- return -EBUSY;
578
- }
579
-
580642 return queue;
581643 }
582644
....@@ -586,16 +648,15 @@
586648 * in such a case, otherwise - if no redirection required - it does nothing,
587649 * unless the %force param is true.
588650 */
589
-int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
590
- int ac, int ssn, unsigned int wdg_timeout,
591
- bool force)
651
+static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
652
+ int ac, int ssn, unsigned int wdg_timeout,
653
+ bool force, struct iwl_mvm_txq *txq)
592654 {
593655 struct iwl_scd_txq_cfg_cmd cmd = {
594656 .scd_queue = queue,
595657 .action = SCD_CFG_DISABLE_QUEUE,
596658 };
597659 bool shared_queue;
598
- unsigned long mq;
599660 int ret;
600661
601662 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
....@@ -609,10 +670,7 @@
609670 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
610671 * we need to check if the numerical value of X is LARGER than of Y.
611672 */
612
- spin_lock_bh(&mvm->queue_info_lock);
613673 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
614
- spin_unlock_bh(&mvm->queue_info_lock);
615
-
616674 IWL_DEBUG_TX_QUEUES(mvm,
617675 "No redirection needed on TXQ #%d\n",
618676 queue);
....@@ -622,15 +680,14 @@
622680 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
623681 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
624682 cmd.tid = mvm->queue_info[queue].txq_tid;
625
- mq = mvm->hw_queue_to_mac80211[queue];
626
- shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
627
- spin_unlock_bh(&mvm->queue_info_lock);
683
+ shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
628684
629685 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
630686 queue, iwl_mvm_ac_to_tx_fifo[ac]);
631687
632
- /* Stop MAC queues and wait for this queue to empty */
633
- iwl_mvm_stop_mac_queues(mvm, mq);
688
+ /* Stop the queue and wait for it to empty */
689
+ txq->stopped = true;
690
+
634691 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
635692 if (ret) {
636693 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
....@@ -650,9 +707,7 @@
650707 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
651708
652709 /* Update the TID "owner" of the queue */
653
- spin_lock_bh(&mvm->queue_info_lock);
654710 mvm->queue_info[queue].txq_tid = tid;
655
- spin_unlock_bh(&mvm->queue_info_lock);
656711
657712 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
658713
....@@ -661,9 +716,7 @@
661716 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
662717
663718 /* Update AC marking of the queue */
664
- spin_lock_bh(&mvm->queue_info_lock);
665719 mvm->queue_info[queue].mac80211_ac = ac;
666
- spin_unlock_bh(&mvm->queue_info_lock);
667720
668721 /*
669722 * Mark queue as shared in transport if shared
....@@ -675,10 +728,70 @@
675728 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
676729
677730 out:
678
- /* Continue using the MAC queues */
679
- iwl_mvm_start_mac_queues(mvm, mq);
731
+ /* Continue using the queue */
732
+ txq->stopped = false;
680733
681734 return ret;
735
+}
736
+
737
+static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
738
+ u8 minq, u8 maxq)
739
+{
740
+ int i;
741
+
742
+ lockdep_assert_held(&mvm->mutex);
743
+
744
+ if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues,
745
+ "max queue %d >= num_of_queues (%d)", maxq,
746
+ mvm->trans->trans_cfg->base_params->num_of_queues))
747
+ maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1;
748
+
749
+ /* This should not be hit with new TX path */
750
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
751
+ return -ENOSPC;
752
+
753
+ /* Start by looking for a free queue */
754
+ for (i = minq; i <= maxq; i++)
755
+ if (mvm->queue_info[i].tid_bitmap == 0 &&
756
+ mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
757
+ return i;
758
+
759
+ return -ENOSPC;
760
+}
761
+
762
+static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
763
+ u8 sta_id, u8 tid, unsigned int timeout)
764
+{
765
+ int queue, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
766
+ mvm->trans->cfg->min_256_ba_txq_size);
767
+
768
+ if (tid == IWL_MAX_TID_COUNT) {
769
+ tid = IWL_MGMT_TID;
770
+ size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
771
+ mvm->trans->cfg->min_txq_size);
772
+ }
773
+
774
+ do {
775
+ __le16 enable = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE);
776
+
777
+ queue = iwl_trans_txq_alloc(mvm->trans, enable,
778
+ sta_id, tid, SCD_QUEUE_CFG,
779
+ size, timeout);
780
+
781
+ if (queue < 0)
782
+ IWL_DEBUG_TX_QUEUES(mvm,
783
+ "Failed allocating TXQ of size %d for sta %d tid %d, ret: %d\n",
784
+ size, sta_id, tid, queue);
785
+ size /= 2;
786
+ } while (queue < 0 && size >= 16);
787
+
788
+ if (queue < 0)
789
+ return queue;
790
+
791
+ IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
792
+ queue, sta_id, tid);
793
+
794
+ return queue;
682795 }
683796
684797 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
....@@ -686,9 +799,10 @@
686799 int tid)
687800 {
688801 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
802
+ struct iwl_mvm_txq *mvmtxq =
803
+ iwl_mvm_txq_from_tid(sta, tid);
689804 unsigned int wdg_timeout =
690805 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
691
- u8 mac_queue = mvmsta->vif->hw_queue[ac];
692806 int queue = -1;
693807
694808 lockdep_assert_held(&mvm->mutex);
....@@ -696,227 +810,103 @@
696810 IWL_DEBUG_TX_QUEUES(mvm,
697811 "Allocating queue for sta %d on tid %d\n",
698812 mvmsta->sta_id, tid);
699
- queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
700
- wdg_timeout);
813
+ queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
701814 if (queue < 0)
702815 return queue;
816
+
817
+ mvmtxq->txq_id = queue;
818
+ mvm->tvqm_info[queue].txq_tid = tid;
819
+ mvm->tvqm_info[queue].sta_id = mvmsta->sta_id;
703820
704821 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
705822
706823 spin_lock_bh(&mvmsta->lock);
707824 mvmsta->tid_data[tid].txq_id = queue;
708
- mvmsta->tid_data[tid].is_tid_active = true;
709825 spin_unlock_bh(&mvmsta->lock);
710826
711827 return 0;
712828 }
713829
714
-static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
715
- struct ieee80211_sta *sta, u8 ac, int tid,
716
- struct ieee80211_hdr *hdr)
830
+static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
831
+ struct ieee80211_sta *sta,
832
+ int queue, u8 sta_id, u8 tid)
717833 {
718
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
719
- struct iwl_trans_txq_scd_cfg cfg = {
720
- .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
721
- .sta_id = mvmsta->sta_id,
722
- .tid = tid,
723
- .frame_limit = IWL_FRAME_LIMIT,
724
- };
725
- unsigned int wdg_timeout =
726
- iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
727
- u8 mac_queue = mvmsta->vif->hw_queue[ac];
728
- int queue = -1;
729
- bool using_inactive_queue = false, same_sta = false;
730
- unsigned long disable_agg_tids = 0;
731
- enum iwl_mvm_agg_state queue_state;
732
- bool shared_queue = false, inc_ssn;
733
- int ssn;
734
- unsigned long tfd_queue_mask;
735
- int ret;
834
+ bool enable_queue = true;
736835
737
- lockdep_assert_held(&mvm->mutex);
738
-
739
- if (iwl_mvm_has_new_tx_api(mvm))
740
- return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
741
-
742
- spin_lock_bh(&mvmsta->lock);
743
- tfd_queue_mask = mvmsta->tfd_queue_msk;
744
- spin_unlock_bh(&mvmsta->lock);
745
-
746
- spin_lock_bh(&mvm->queue_info_lock);
747
-
748
- /*
749
- * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
750
- * exists
751
- */
752
- if (!ieee80211_is_data_qos(hdr->frame_control) ||
753
- ieee80211_is_qos_nullfunc(hdr->frame_control)) {
754
- queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
755
- IWL_MVM_DQA_MIN_MGMT_QUEUE,
756
- IWL_MVM_DQA_MAX_MGMT_QUEUE);
757
- if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
758
- IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
759
- queue);
760
-
761
- /* If no such queue is found, we'll use a DATA queue instead */
836
+ /* Make sure this TID isn't already enabled */
837
+ if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
838
+ IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
839
+ queue, tid);
840
+ return false;
762841 }
763842
764
- if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
765
- (mvm->queue_info[mvmsta->reserved_queue].status ==
766
- IWL_MVM_QUEUE_RESERVED ||
767
- mvm->queue_info[mvmsta->reserved_queue].status ==
768
- IWL_MVM_QUEUE_INACTIVE)) {
769
- queue = mvmsta->reserved_queue;
770
- mvm->queue_info[queue].reserved = true;
771
- IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
843
+ /* Update mappings and refcounts */
844
+ if (mvm->queue_info[queue].tid_bitmap)
845
+ enable_queue = false;
846
+
847
+ mvm->queue_info[queue].tid_bitmap |= BIT(tid);
848
+ mvm->queue_info[queue].ra_sta_id = sta_id;
849
+
850
+ if (enable_queue) {
851
+ if (tid != IWL_MAX_TID_COUNT)
852
+ mvm->queue_info[queue].mac80211_ac =
853
+ tid_to_mac80211_ac[tid];
854
+ else
855
+ mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
856
+
857
+ mvm->queue_info[queue].txq_tid = tid;
772858 }
773859
774
- if (queue < 0)
775
- queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
776
- IWL_MVM_DQA_MIN_DATA_QUEUE,
777
- IWL_MVM_DQA_MAX_DATA_QUEUE);
860
+ if (sta) {
861
+ struct iwl_mvm_txq *mvmtxq =
862
+ iwl_mvm_txq_from_tid(sta, tid);
778863
779
- /*
780
- * Check if this queue is already allocated but inactive.
781
- * In such a case, we'll need to first free this queue before enabling
782
- * it again, so we'll mark it as reserved to make sure no new traffic
783
- * arrives on it
784
- */
785
- if (queue > 0 &&
786
- mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
787
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
788
- using_inactive_queue = true;
789
- same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
790
- IWL_DEBUG_TX_QUEUES(mvm,
791
- "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
792
- queue, mvmsta->sta_id, tid);
793
- }
794
-
795
- /* No free queue - we'll have to share */
796
- if (queue <= 0) {
797
- queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
798
- if (queue > 0) {
799
- shared_queue = true;
800
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
801
- }
802
- }
803
-
804
- /*
805
- * Mark TXQ as ready, even though it hasn't been fully configured yet,
806
- * to make sure no one else takes it.
807
- * This will allow avoiding re-acquiring the lock at the end of the
808
- * configuration. On error we'll mark it back as free.
809
- */
810
- if ((queue > 0) && !shared_queue)
811
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
812
-
813
- spin_unlock_bh(&mvm->queue_info_lock);
814
-
815
- /* This shouldn't happen - out of queues */
816
- if (WARN_ON(queue <= 0)) {
817
- IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
818
- tid, cfg.sta_id);
819
- return queue;
820
- }
821
-
822
- /*
823
- * Actual en/disablement of aggregations is through the ADD_STA HCMD,
824
- * but for configuring the SCD to send A-MPDUs we need to mark the queue
825
- * as aggregatable.
826
- * Mark all DATA queues as allowing to be aggregated at some point
827
- */
828
- cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
829
- queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
830
-
831
- /*
832
- * If this queue was previously inactive (idle) - we need to free it
833
- * first
834
- */
835
- if (using_inactive_queue) {
836
- ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
837
- if (ret)
838
- return ret;
864
+ mvmtxq->txq_id = queue;
839865 }
840866
841867 IWL_DEBUG_TX_QUEUES(mvm,
842
- "Allocating %squeue #%d to sta %d on tid %d\n",
843
- shared_queue ? "shared " : "", queue,
844
- mvmsta->sta_id, tid);
868
+ "Enabling TXQ #%d tids=0x%x\n",
869
+ queue, mvm->queue_info[queue].tid_bitmap);
845870
846
- if (shared_queue) {
847
- /* Disable any open aggs on this queue */
848
- disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
849
-
850
- if (disable_agg_tids) {
851
- IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
852
- queue);
853
- iwl_mvm_invalidate_sta_queue(mvm, queue,
854
- disable_agg_tids, false);
855
- }
856
- }
857
-
858
- ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
859
- inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
860
- ssn, &cfg, wdg_timeout);
861
- if (inc_ssn) {
862
- ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
863
- le16_add_cpu(&hdr->seq_ctrl, 0x10);
864
- }
865
-
866
- /*
867
- * Mark queue as shared in transport if shared
868
- * Note this has to be done after queue enablement because enablement
869
- * can also set this value, and there is no indication there to shared
870
- * queues
871
- */
872
- if (shared_queue)
873
- iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
874
-
875
- spin_lock_bh(&mvmsta->lock);
876
- /*
877
- * This looks racy, but it is not. We have only one packet for
878
- * this ra/tid in our Tx path since we stop the Qdisc when we
879
- * need to allocate a new TFD queue.
880
- */
881
- if (inc_ssn)
882
- mvmsta->tid_data[tid].seq_number += 0x10;
883
- mvmsta->tid_data[tid].txq_id = queue;
884
- mvmsta->tid_data[tid].is_tid_active = true;
885
- mvmsta->tfd_queue_msk |= BIT(queue);
886
- queue_state = mvmsta->tid_data[tid].state;
887
-
888
- if (mvmsta->reserved_queue == queue)
889
- mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
890
- spin_unlock_bh(&mvmsta->lock);
891
-
892
- if (!shared_queue) {
893
- ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
894
- if (ret)
895
- goto out_err;
896
-
897
- /* If we need to re-enable aggregations... */
898
- if (queue_state == IWL_AGG_ON) {
899
- ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
900
- if (ret)
901
- goto out_err;
902
- }
903
- } else {
904
- /* Redirect queue, if needed */
905
- ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
906
- wdg_timeout, false);
907
- if (ret)
908
- goto out_err;
909
- }
910
-
911
- return 0;
912
-
913
-out_err:
914
- iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
915
-
916
- return ret;
871
+ return enable_queue;
917872 }
918873
919
-static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
874
+static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
875
+ int queue, u16 ssn,
876
+ const struct iwl_trans_txq_scd_cfg *cfg,
877
+ unsigned int wdg_timeout)
878
+{
879
+ struct iwl_scd_txq_cfg_cmd cmd = {
880
+ .scd_queue = queue,
881
+ .action = SCD_CFG_ENABLE_QUEUE,
882
+ .window = cfg->frame_limit,
883
+ .sta_id = cfg->sta_id,
884
+ .ssn = cpu_to_le16(ssn),
885
+ .tx_fifo = cfg->fifo,
886
+ .aggregate = cfg->aggregate,
887
+ .tid = cfg->tid,
888
+ };
889
+ bool inc_ssn;
890
+
891
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
892
+ return false;
893
+
894
+ /* Send the enabling command if we need to */
895
+ if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
896
+ return false;
897
+
898
+ inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
899
+ NULL, wdg_timeout);
900
+ if (inc_ssn)
901
+ le16_add_cpu(&cmd.ssn, 1);
902
+
903
+ WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
904
+ "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
905
+
906
+ return inc_ssn;
907
+}
908
+
909
+static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
920910 {
921911 struct iwl_scd_txq_cfg_cmd cmd = {
922912 .scd_queue = queue,
....@@ -931,9 +921,7 @@
931921 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
932922 return;
933923
934
- spin_lock_bh(&mvm->queue_info_lock);
935924 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
936
- spin_unlock_bh(&mvm->queue_info_lock);
937925
938926 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
939927 return;
....@@ -950,9 +938,7 @@
950938 return;
951939 }
952940
953
- spin_lock_bh(&mvm->queue_info_lock);
954941 mvm->queue_info[queue].txq_tid = tid;
955
- spin_unlock_bh(&mvm->queue_info_lock);
956942 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
957943 queue, tid);
958944 }
....@@ -974,10 +960,8 @@
974960
975961 lockdep_assert_held(&mvm->mutex);
976962
977
- spin_lock_bh(&mvm->queue_info_lock);
978963 sta_id = mvm->queue_info[queue].ra_sta_id;
979964 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
980
- spin_unlock_bh(&mvm->queue_info_lock);
981965
982966 /* Find TID for queue, and make sure it is the only one on the queue */
983967 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
....@@ -1001,9 +985,10 @@
1001985
1002986 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1003987
1004
- ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
1005
- tid_to_mac80211_ac[tid], ssn,
1006
- wdg_timeout, true);
988
+ ret = iwl_mvm_redirect_queue(mvm, queue, tid,
989
+ tid_to_mac80211_ac[tid], ssn,
990
+ wdg_timeout, true,
991
+ iwl_mvm_txq_from_tid(sta, tid));
1007992 if (ret) {
1008993 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1009994 return;
....@@ -1034,98 +1019,58 @@
10341019 }
10351020 }
10361021
1037
- spin_lock_bh(&mvm->queue_info_lock);
10381022 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1039
- spin_unlock_bh(&mvm->queue_info_lock);
10401023 }
10411024
1042
-static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1025
+/*
1026
+ * Remove inactive TIDs of a given queue.
1027
+ * If all queue TIDs are inactive - mark the queue as inactive
1028
+ * If only some the queue TIDs are inactive - unmap them from the queue
1029
+ *
1030
+ * Returns %true if all TIDs were removed and the queue could be reused.
1031
+ */
1032
+static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1033
+ struct iwl_mvm_sta *mvmsta, int queue,
1034
+ unsigned long tid_bitmap,
1035
+ unsigned long *unshare_queues,
1036
+ unsigned long *changetid_queues)
10431037 {
1044
- if (tid == IWL_MAX_TID_COUNT)
1045
- return IEEE80211_AC_VO; /* MGMT */
1038
+ int tid;
10461039
1047
- return tid_to_mac80211_ac[tid];
1048
-}
1049
-
1050
-static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
1051
- struct ieee80211_sta *sta, int tid)
1052
-{
1053
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1054
- struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1055
- struct sk_buff *skb;
1056
- struct ieee80211_hdr *hdr;
1057
- struct sk_buff_head deferred_tx;
1058
- u8 mac_queue;
1059
- bool no_queue = false; /* Marks if there is a problem with the queue */
1060
- u8 ac;
1061
-
1040
+ lockdep_assert_held(&mvmsta->lock);
10621041 lockdep_assert_held(&mvm->mutex);
10631042
1064
- skb = skb_peek(&tid_data->deferred_tx_frames);
1065
- if (!skb)
1066
- return;
1067
- hdr = (void *)skb->data;
1043
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1044
+ return false;
10681045
1069
- ac = iwl_mvm_tid_to_ac_queue(tid);
1070
- mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
1046
+ /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1047
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1048
+ /* If some TFDs are still queued - don't mark TID as inactive */
1049
+ if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1050
+ tid_bitmap &= ~BIT(tid);
10711051
1072
- if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
1073
- iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
1074
- IWL_ERR(mvm,
1075
- "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1076
- mvmsta->sta_id, tid);
1077
-
1078
- /*
1079
- * Mark queue as problematic so later the deferred traffic is
1080
- * freed, as we can do nothing with it
1081
- */
1082
- no_queue = true;
1052
+ /* Don't mark as inactive any TID that has an active BA */
1053
+ if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1054
+ tid_bitmap &= ~BIT(tid);
10831055 }
10841056
1085
- __skb_queue_head_init(&deferred_tx);
1057
+ /* If all TIDs in the queue are inactive - return it can be reused */
1058
+ if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1059
+ IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1060
+ return true;
1061
+ }
10861062
1087
- /* Disable bottom-halves when entering TX path */
1088
- local_bh_disable();
1089
- spin_lock(&mvmsta->lock);
1090
- skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
1091
- mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
1092
- spin_unlock(&mvmsta->lock);
1063
+ /*
1064
+ * If we are here, this is a shared queue and not all TIDs timed-out.
1065
+ * Remove the ones that did.
1066
+ */
1067
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1068
+ u16 tid_bitmap;
10931069
1094
- while ((skb = __skb_dequeue(&deferred_tx)))
1095
- if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
1096
- ieee80211_free_txskb(mvm->hw, skb);
1097
- local_bh_enable();
1070
+ mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1071
+ mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
10981072
1099
- /* Wake queue */
1100
- iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
1101
-}
1102
-
1103
-void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1104
-{
1105
- struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1106
- add_stream_wk);
1107
- struct ieee80211_sta *sta;
1108
- struct iwl_mvm_sta *mvmsta;
1109
- unsigned long deferred_tid_traffic;
1110
- int queue, sta_id, tid;
1111
-
1112
- /* Check inactivity of queues */
1113
- iwl_mvm_inactivity_check(mvm);
1114
-
1115
- mutex_lock(&mvm->mutex);
1116
-
1117
- /* No queue reconfiguration in TVQM mode */
1118
- if (iwl_mvm_has_new_tx_api(mvm))
1119
- goto alloc_queues;
1120
-
1121
- /* Reconfigure queues requiring reconfiguation */
1122
- for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) {
1123
- bool reconfig;
1124
- bool change_owner;
1125
-
1126
- spin_lock_bh(&mvm->queue_info_lock);
1127
- reconfig = (mvm->queue_info[queue].status ==
1128
- IWL_MVM_QUEUE_RECONFIGURING);
1073
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
11291074
11301075 /*
11311076 * We need to take into account a situation in which a TXQ was
....@@ -1134,35 +1079,353 @@
11341079 * ownership must be given to one of the remaining TIDs.
11351080 * This is mainly because if TID x continues - a new queue can't
11361081 * be allocated for it as long as it is an owner of another TXQ.
1082
+ *
1083
+ * Mark this queue in the right bitmap, we'll send the command
1084
+ * to the firmware later.
11371085 */
1138
- change_owner = !(mvm->queue_info[queue].tid_bitmap &
1139
- BIT(mvm->queue_info[queue].txq_tid)) &&
1140
- (mvm->queue_info[queue].status ==
1141
- IWL_MVM_QUEUE_SHARED);
1142
- spin_unlock_bh(&mvm->queue_info_lock);
1086
+ if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1087
+ set_bit(queue, changetid_queues);
11431088
1144
- if (reconfig)
1145
- iwl_mvm_unshare_queue(mvm, queue);
1146
- else if (change_owner)
1147
- iwl_mvm_change_queue_owner(mvm, queue);
1089
+ IWL_DEBUG_TX_QUEUES(mvm,
1090
+ "Removing inactive TID %d from shared Q:%d\n",
1091
+ tid, queue);
11481092 }
11491093
1150
-alloc_queues:
1151
- /* Go over all stations with deferred traffic */
1152
- for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1153
- IWL_MVM_STATION_COUNT) {
1154
- clear_bit(sta_id, mvm->sta_deferred_frames);
1155
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1156
- lockdep_is_held(&mvm->mutex));
1094
+ IWL_DEBUG_TX_QUEUES(mvm,
1095
+ "TXQ #%d left with tid bitmap 0x%x\n", queue,
1096
+ mvm->queue_info[queue].tid_bitmap);
1097
+
1098
+ /*
1099
+ * There may be different TIDs with the same mac queues, so make
1100
+ * sure all TIDs have existing corresponding mac queues enabled
1101
+ */
1102
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1103
+
1104
+ /* If the queue is marked as shared - "unshare" it */
1105
+ if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
1106
+ mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1107
+ IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1108
+ queue);
1109
+ set_bit(queue, unshare_queues);
1110
+ }
1111
+
1112
+ return false;
1113
+}
1114
+
1115
+/*
1116
+ * Check for inactivity - this includes checking if any queue
1117
+ * can be unshared and finding one (and only one) that can be
1118
+ * reused.
1119
+ * This function is also invoked as a sort of clean-up task,
1120
+ * in which case @alloc_for_sta is IWL_MVM_INVALID_STA.
1121
+ *
1122
+ * Returns the queue number, or -ENOSPC.
1123
+ */
1124
+static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
1125
+{
1126
+ unsigned long now = jiffies;
1127
+ unsigned long unshare_queues = 0;
1128
+ unsigned long changetid_queues = 0;
1129
+ int i, ret, free_queue = -ENOSPC;
1130
+ struct ieee80211_sta *queue_owner = NULL;
1131
+
1132
+ lockdep_assert_held(&mvm->mutex);
1133
+
1134
+ if (iwl_mvm_has_new_tx_api(mvm))
1135
+ return -ENOSPC;
1136
+
1137
+ rcu_read_lock();
1138
+
1139
+ /* we skip the CMD queue below by starting at 1 */
1140
+ BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1141
+
1142
+ for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1143
+ struct ieee80211_sta *sta;
1144
+ struct iwl_mvm_sta *mvmsta;
1145
+ u8 sta_id;
1146
+ int tid;
1147
+ unsigned long inactive_tid_bitmap = 0;
1148
+ unsigned long queue_tid_bitmap;
1149
+
1150
+ queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1151
+ if (!queue_tid_bitmap)
1152
+ continue;
1153
+
1154
+ /* If TXQ isn't in active use anyway - nothing to do here... */
1155
+ if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1156
+ mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
1157
+ continue;
1158
+
1159
+ /* Check to see if there are inactive TIDs on this queue */
1160
+ for_each_set_bit(tid, &queue_tid_bitmap,
1161
+ IWL_MAX_TID_COUNT + 1) {
1162
+ if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1163
+ IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1164
+ continue;
1165
+
1166
+ inactive_tid_bitmap |= BIT(tid);
1167
+ }
1168
+
1169
+ /* If all TIDs are active - finish check on this queue */
1170
+ if (!inactive_tid_bitmap)
1171
+ continue;
1172
+
1173
+ /*
1174
+ * If we are here - the queue hadn't been served recently and is
1175
+ * in use
1176
+ */
1177
+
1178
+ sta_id = mvm->queue_info[i].ra_sta_id;
1179
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1180
+
1181
+ /*
1182
+ * If the STA doesn't exist anymore, it isn't an error. It could
1183
+ * be that it was removed since getting the queues, and in this
1184
+ * case it should've inactivated its queues anyway.
1185
+ */
11571186 if (IS_ERR_OR_NULL(sta))
11581187 continue;
11591188
11601189 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1161
- deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
11621190
1163
- for_each_set_bit(tid, &deferred_tid_traffic,
1164
- IWL_MAX_TID_COUNT + 1)
1165
- iwl_mvm_tx_deferred_stream(mvm, sta, tid);
1191
+ spin_lock_bh(&mvmsta->lock);
1192
+ ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1193
+ inactive_tid_bitmap,
1194
+ &unshare_queues,
1195
+ &changetid_queues);
1196
+ if (ret && free_queue < 0) {
1197
+ queue_owner = sta;
1198
+ free_queue = i;
1199
+ }
1200
+ /* only unlock sta lock - we still need the queue info lock */
1201
+ spin_unlock_bh(&mvmsta->lock);
1202
+ }
1203
+
1204
+
1205
+ /* Reconfigure queues requiring reconfiguation */
1206
+ for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1207
+ iwl_mvm_unshare_queue(mvm, i);
1208
+ for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1209
+ iwl_mvm_change_queue_tid(mvm, i);
1210
+
1211
+ rcu_read_unlock();
1212
+
1213
+ if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
1214
+ ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
1215
+ alloc_for_sta);
1216
+ if (ret)
1217
+ return ret;
1218
+ }
1219
+
1220
+ return free_queue;
1221
+}
1222
+
1223
+static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
1224
+ struct ieee80211_sta *sta, u8 ac, int tid)
1225
+{
1226
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1227
+ struct iwl_trans_txq_scd_cfg cfg = {
1228
+ .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1229
+ .sta_id = mvmsta->sta_id,
1230
+ .tid = tid,
1231
+ .frame_limit = IWL_FRAME_LIMIT,
1232
+ };
1233
+ unsigned int wdg_timeout =
1234
+ iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1235
+ int queue = -1;
1236
+ u16 queue_tmp;
1237
+ unsigned long disable_agg_tids = 0;
1238
+ enum iwl_mvm_agg_state queue_state;
1239
+ bool shared_queue = false, inc_ssn;
1240
+ int ssn;
1241
+ unsigned long tfd_queue_mask;
1242
+ int ret;
1243
+
1244
+ lockdep_assert_held(&mvm->mutex);
1245
+
1246
+ if (iwl_mvm_has_new_tx_api(mvm))
1247
+ return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1248
+
1249
+ spin_lock_bh(&mvmsta->lock);
1250
+ tfd_queue_mask = mvmsta->tfd_queue_msk;
1251
+ ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1252
+ spin_unlock_bh(&mvmsta->lock);
1253
+
1254
+ if (tid == IWL_MAX_TID_COUNT) {
1255
+ queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1256
+ IWL_MVM_DQA_MIN_MGMT_QUEUE,
1257
+ IWL_MVM_DQA_MAX_MGMT_QUEUE);
1258
+ if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1259
+ IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1260
+ queue);
1261
+
1262
+ /* If no such queue is found, we'll use a DATA queue instead */
1263
+ }
1264
+
1265
+ if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1266
+ (mvm->queue_info[mvmsta->reserved_queue].status ==
1267
+ IWL_MVM_QUEUE_RESERVED)) {
1268
+ queue = mvmsta->reserved_queue;
1269
+ mvm->queue_info[queue].reserved = true;
1270
+ IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1271
+ }
1272
+
1273
+ if (queue < 0)
1274
+ queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1275
+ IWL_MVM_DQA_MIN_DATA_QUEUE,
1276
+ IWL_MVM_DQA_MAX_DATA_QUEUE);
1277
+ if (queue < 0) {
1278
+ /* try harder - perhaps kill an inactive queue */
1279
+ queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1280
+ }
1281
+
1282
+ /* No free queue - we'll have to share */
1283
+ if (queue <= 0) {
1284
+ queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1285
+ if (queue > 0) {
1286
+ shared_queue = true;
1287
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1288
+ }
1289
+ }
1290
+
1291
+ /*
1292
+ * Mark TXQ as ready, even though it hasn't been fully configured yet,
1293
+ * to make sure no one else takes it.
1294
+ * This will allow avoiding re-acquiring the lock at the end of the
1295
+ * configuration. On error we'll mark it back as free.
1296
+ */
1297
+ if (queue > 0 && !shared_queue)
1298
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1299
+
1300
+ /* This shouldn't happen - out of queues */
1301
+ if (WARN_ON(queue <= 0)) {
1302
+ IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1303
+ tid, cfg.sta_id);
1304
+ return queue;
1305
+ }
1306
+
1307
+ /*
1308
+ * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1309
+ * but for configuring the SCD to send A-MPDUs we need to mark the queue
1310
+ * as aggregatable.
1311
+ * Mark all DATA queues as allowing to be aggregated at some point
1312
+ */
1313
+ cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1314
+ queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1315
+
1316
+ IWL_DEBUG_TX_QUEUES(mvm,
1317
+ "Allocating %squeue #%d to sta %d on tid %d\n",
1318
+ shared_queue ? "shared " : "", queue,
1319
+ mvmsta->sta_id, tid);
1320
+
1321
+ if (shared_queue) {
1322
+ /* Disable any open aggs on this queue */
1323
+ disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1324
+
1325
+ if (disable_agg_tids) {
1326
+ IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1327
+ queue);
1328
+ iwl_mvm_invalidate_sta_queue(mvm, queue,
1329
+ disable_agg_tids, false);
1330
+ }
1331
+ }
1332
+
1333
+ inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
1334
+
1335
+ /*
1336
+ * Mark queue as shared in transport if shared
1337
+ * Note this has to be done after queue enablement because enablement
1338
+ * can also set this value, and there is no indication there to shared
1339
+ * queues
1340
+ */
1341
+ if (shared_queue)
1342
+ iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1343
+
1344
+ spin_lock_bh(&mvmsta->lock);
1345
+ /*
1346
+ * This looks racy, but it is not. We have only one packet for
1347
+ * this ra/tid in our Tx path since we stop the Qdisc when we
1348
+ * need to allocate a new TFD queue.
1349
+ */
1350
+ if (inc_ssn) {
1351
+ mvmsta->tid_data[tid].seq_number += 0x10;
1352
+ ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1353
+ }
1354
+ mvmsta->tid_data[tid].txq_id = queue;
1355
+ mvmsta->tfd_queue_msk |= BIT(queue);
1356
+ queue_state = mvmsta->tid_data[tid].state;
1357
+
1358
+ if (mvmsta->reserved_queue == queue)
1359
+ mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1360
+ spin_unlock_bh(&mvmsta->lock);
1361
+
1362
+ if (!shared_queue) {
1363
+ ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1364
+ if (ret)
1365
+ goto out_err;
1366
+
1367
+ /* If we need to re-enable aggregations... */
1368
+ if (queue_state == IWL_AGG_ON) {
1369
+ ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1370
+ if (ret)
1371
+ goto out_err;
1372
+ }
1373
+ } else {
1374
+ /* Redirect queue, if needed */
1375
+ ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
1376
+ wdg_timeout, false,
1377
+ iwl_mvm_txq_from_tid(sta, tid));
1378
+ if (ret)
1379
+ goto out_err;
1380
+ }
1381
+
1382
+ return 0;
1383
+
1384
+out_err:
1385
+ queue_tmp = queue;
1386
+ iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid, 0);
1387
+
1388
+ return ret;
1389
+}
1390
+
1391
+void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1392
+{
1393
+ struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1394
+ add_stream_wk);
1395
+
1396
+ mutex_lock(&mvm->mutex);
1397
+
1398
+ iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1399
+
1400
+ while (!list_empty(&mvm->add_stream_txqs)) {
1401
+ struct iwl_mvm_txq *mvmtxq;
1402
+ struct ieee80211_txq *txq;
1403
+ u8 tid;
1404
+
1405
+ mvmtxq = list_first_entry(&mvm->add_stream_txqs,
1406
+ struct iwl_mvm_txq, list);
1407
+
1408
+ txq = container_of((void *)mvmtxq, struct ieee80211_txq,
1409
+ drv_priv);
1410
+ tid = txq->tid;
1411
+ if (tid == IEEE80211_NUM_TIDS)
1412
+ tid = IWL_MAX_TID_COUNT;
1413
+
1414
+ /*
1415
+ * We can't really do much here, but if this fails we can't
1416
+ * transmit anyway - so just don't transmit the frame etc.
1417
+ * and let them back up ... we've tried our best to allocate
1418
+ * a queue in the function itself.
1419
+ */
1420
+ if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
1421
+ list_del_init(&mvmtxq->list);
1422
+ continue;
1423
+ }
1424
+
1425
+ list_del_init(&mvmtxq->list);
1426
+ local_bh_disable();
1427
+ iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1428
+ local_bh_enable();
11661429 }
11671430
11681431 mutex_unlock(&mvm->mutex);
....@@ -1174,23 +1437,17 @@
11741437 {
11751438 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
11761439 int queue;
1177
- bool using_inactive_queue = false, same_sta = false;
11781440
11791441 /* queue reserving is disabled on new TX path */
11801442 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
11811443 return 0;
11821444
1183
- /*
1184
- * Check for inactive queues, so we don't reach a situation where we
1185
- * can't add a STA due to a shortage in queues that doesn't really exist
1186
- */
1187
- iwl_mvm_inactivity_check(mvm);
1188
-
1189
- spin_lock_bh(&mvm->queue_info_lock);
1445
+ /* run the general cleanup/unsharing of queues */
1446
+ iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
11901447
11911448 /* Make sure we have free resources for this STA */
11921449 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1193
- !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
1450
+ !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
11941451 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
11951452 IWL_MVM_QUEUE_FREE))
11961453 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
....@@ -1199,26 +1456,16 @@
11991456 IWL_MVM_DQA_MIN_DATA_QUEUE,
12001457 IWL_MVM_DQA_MAX_DATA_QUEUE);
12011458 if (queue < 0) {
1202
- spin_unlock_bh(&mvm->queue_info_lock);
1203
- IWL_ERR(mvm, "No available queues for new station\n");
1204
- return -ENOSPC;
1205
- } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
1206
- /*
1207
- * If this queue is already allocated but inactive we'll need to
1208
- * first free this queue before enabling it again, we'll mark
1209
- * it as reserved to make sure no new traffic arrives on it
1210
- */
1211
- using_inactive_queue = true;
1212
- same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
1459
+ /* try again - this time kick out a queue if needed */
1460
+ queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1461
+ if (queue < 0) {
1462
+ IWL_ERR(mvm, "No available queues for new station\n");
1463
+ return -ENOSPC;
1464
+ }
12131465 }
12141466 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
12151467
1216
- spin_unlock_bh(&mvm->queue_info_lock);
1217
-
12181468 mvmsta->reserved_queue = queue;
1219
-
1220
- if (using_inactive_queue)
1221
- iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
12221469
12231470 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
12241471 queue, mvmsta->sta_id);
....@@ -1234,10 +1481,11 @@
12341481 * Note that re-enabling aggregations isn't done in this function.
12351482 */
12361483 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1237
- struct iwl_mvm_sta *mvm_sta)
1484
+ struct ieee80211_sta *sta)
12381485 {
1239
- unsigned int wdg_timeout =
1240
- iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1486
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1487
+ unsigned int wdg =
1488
+ iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
12411489 int i;
12421490 struct iwl_trans_txq_scd_cfg cfg = {
12431491 .sta_id = mvm_sta->sta_id,
....@@ -1253,23 +1501,25 @@
12531501 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
12541502 int txq_id = tid_data->txq_id;
12551503 int ac;
1256
- u8 mac_queue;
12571504
12581505 if (txq_id == IWL_MVM_INVALID_QUEUE)
12591506 continue;
12601507
1261
- skb_queue_head_init(&tid_data->deferred_tx_frames);
1262
-
12631508 ac = tid_to_mac80211_ac[i];
1264
- mac_queue = mvm_sta->vif->hw_queue[ac];
12651509
12661510 if (iwl_mvm_has_new_tx_api(mvm)) {
12671511 IWL_DEBUG_TX_QUEUES(mvm,
12681512 "Re-mapping sta %d tid %d\n",
12691513 mvm_sta->sta_id, i);
1270
- txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
1271
- mvm_sta->sta_id,
1272
- i, wdg_timeout);
1514
+ txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
1515
+ i, wdg);
1516
+ /*
1517
+ * on failures, just set it to IWL_MVM_INVALID_QUEUE
1518
+ * to try again later, we have no other good way of
1519
+ * failing here
1520
+ */
1521
+ if (txq_id < 0)
1522
+ txq_id = IWL_MVM_INVALID_QUEUE;
12731523 tid_data->txq_id = txq_id;
12741524
12751525 /*
....@@ -1292,8 +1542,7 @@
12921542 "Re-mapping sta %d tid %d to queue %d\n",
12931543 mvm_sta->sta_id, i, txq_id);
12941544
1295
- iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
1296
- wdg_timeout);
1545
+ iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
12971546 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
12981547 }
12991548 }
....@@ -1312,8 +1561,15 @@
13121561
13131562 memset(&cmd, 0, sizeof(cmd));
13141563 cmd.sta_id = sta->sta_id;
1315
- cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1316
- color));
1564
+
1565
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, ADD_STA,
1566
+ 0) >= 12 &&
1567
+ sta->type == IWL_STA_AUX_ACTIVITY)
1568
+ cmd.mac_id_n_color = cpu_to_le32(mac_id);
1569
+ else
1570
+ cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1571
+ color));
1572
+
13171573 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
13181574 cmd.station_type = sta->type;
13191575
....@@ -1383,7 +1639,7 @@
13831639 if (ret)
13841640 goto err;
13851641
1386
- iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
1642
+ iwl_mvm_realloc_queues_after_restart(mvm, sta);
13871643 sta_update = true;
13881644 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
13891645 goto update_fw;
....@@ -1393,7 +1649,7 @@
13931649 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
13941650 mvmvif->color);
13951651 mvm_sta->vif = vif;
1396
- if (!mvm->trans->cfg->gen2)
1652
+ if (!mvm->trans->trans_cfg->gen2)
13971653 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
13981654 else
13991655 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
....@@ -1416,9 +1672,17 @@
14161672 * frames until the queue is allocated
14171673 */
14181674 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1419
- skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
14201675 }
1421
- mvm_sta->deferred_traffic_tid_map = 0;
1676
+
1677
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1678
+ struct iwl_mvm_txq *mvmtxq =
1679
+ iwl_mvm_txq_from_mac80211(sta->txq[i]);
1680
+
1681
+ mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1682
+ INIT_LIST_HEAD(&mvmtxq->list);
1683
+ atomic_set(&mvmtxq->tx_request, 0);
1684
+ }
1685
+
14221686 mvm_sta->agg_tids = 0;
14231687
14241688 if (iwl_mvm_has_new_rx_api(mvm) &&
....@@ -1457,6 +1721,10 @@
14571721 */
14581722 if (iwl_mvm_has_tlc_offload(mvm))
14591723 iwl_mvm_rs_add_sta(mvm, mvm_sta);
1724
+ else
1725
+ spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock);
1726
+
1727
+ iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
14601728
14611729 update_fw:
14621730 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
....@@ -1551,9 +1819,9 @@
15511819
15521820 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
15531821 struct ieee80211_vif *vif,
1554
- struct iwl_mvm_sta *mvm_sta)
1822
+ struct ieee80211_sta *sta)
15551823 {
1556
- int ac;
1824
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
15571825 int i;
15581826
15591827 lockdep_assert_held(&mvm->mutex);
....@@ -1562,10 +1830,17 @@
15621830 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
15631831 continue;
15641832
1565
- ac = iwl_mvm_tid_to_ac_queue(i);
1566
- iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
1567
- vif->hw_queue[ac], i, 0);
1833
+ iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i,
1834
+ 0);
15681835 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1836
+ }
1837
+
1838
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1839
+ struct iwl_mvm_txq *mvmtxq =
1840
+ iwl_mvm_txq_from_mac80211(sta->txq[i]);
1841
+
1842
+ mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1843
+ list_del_init(&mvmtxq->list);
15691844 }
15701845 }
15711846
....@@ -1612,7 +1887,7 @@
16121887 return ret;
16131888
16141889 /* flush its queues here since we are freeing mvm_sta */
1615
- ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
1890
+ ret = iwl_mvm_flush_sta(mvm, mvm_sta, false);
16161891 if (ret)
16171892 return ret;
16181893 if (iwl_mvm_has_new_tx_api(mvm)) {
....@@ -1628,7 +1903,7 @@
16281903
16291904 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
16301905
1631
- iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
1906
+ iwl_mvm_disable_sta_queues(mvm, vif, sta);
16321907
16331908 /* If there is a TXQ still marked as reserved - free it */
16341909 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
....@@ -1640,18 +1915,14 @@
16401915 * is still marked as IWL_MVM_QUEUE_RESERVED, and
16411916 * should be manually marked as free again
16421917 */
1643
- spin_lock_bh(&mvm->queue_info_lock);
16441918 status = &mvm->queue_info[reserved_txq].status;
16451919 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
16461920 (*status != IWL_MVM_QUEUE_FREE),
16471921 "sta_id %d reserved txq %d status %d",
1648
- sta_id, reserved_txq, *status)) {
1649
- spin_unlock_bh(&mvm->queue_info_lock);
1922
+ sta_id, reserved_txq, *status))
16501923 return -EINVAL;
1651
- }
16521924
16531925 *status = IWL_MVM_QUEUE_FREE;
1654
- spin_unlock_bh(&mvm->queue_info_lock);
16551926 }
16561927
16571928 if (vif->type == NL80211_IFTYPE_STATION &&
....@@ -1662,10 +1933,6 @@
16621933
16631934 /* unassoc - go ahead - remove the AP STA now */
16641935 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1665
-
1666
- /* clear d0i3_ap_sta_id if no longer relevant */
1667
- if (mvm->d0i3_ap_sta_id == sta_id)
1668
- mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
16691936 }
16701937
16711938 /*
....@@ -1729,33 +1996,74 @@
17291996 sta->sta_id = IWL_MVM_INVALID_STA;
17301997 }
17311998
1732
-static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
1999
+static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
17332000 u8 sta_id, u8 fifo)
17342001 {
1735
- unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1736
- mvm->cfg->base_params->wd_timeout :
1737
- IWL_WATCHDOG_DISABLED;
2002
+ unsigned int wdg_timeout =
2003
+ mvm->trans->trans_cfg->base_params->wd_timeout;
2004
+ struct iwl_trans_txq_scd_cfg cfg = {
2005
+ .fifo = fifo,
2006
+ .sta_id = sta_id,
2007
+ .tid = IWL_MAX_TID_COUNT,
2008
+ .aggregate = false,
2009
+ .frame_limit = IWL_FRAME_LIMIT,
2010
+ };
17382011
1739
- if (iwl_mvm_has_new_tx_api(mvm)) {
1740
- int tvqm_queue =
1741
- iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
1742
- IWL_MAX_TID_COUNT,
1743
- wdg_timeout);
1744
- *queue = tvqm_queue;
1745
- } else {
1746
- struct iwl_trans_txq_scd_cfg cfg = {
1747
- .fifo = fifo,
1748
- .sta_id = sta_id,
1749
- .tid = IWL_MAX_TID_COUNT,
1750
- .aggregate = false,
1751
- .frame_limit = IWL_FRAME_LIMIT,
1752
- };
2012
+ WARN_ON(iwl_mvm_has_new_tx_api(mvm));
17532013
1754
- iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
1755
- }
2014
+ iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
17562015 }
17572016
1758
-int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
2017
+static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
2018
+{
2019
+ unsigned int wdg_timeout =
2020
+ mvm->trans->trans_cfg->base_params->wd_timeout;
2021
+
2022
+ WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
2023
+
2024
+ return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT,
2025
+ wdg_timeout);
2026
+}
2027
+
2028
+static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
2029
+ int maccolor, u8 *addr,
2030
+ struct iwl_mvm_int_sta *sta,
2031
+ u16 *queue, int fifo)
2032
+{
2033
+ int ret;
2034
+
2035
+ /* Map queue to fifo - needs to happen before adding station */
2036
+ if (!iwl_mvm_has_new_tx_api(mvm))
2037
+ iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
2038
+
2039
+ ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor);
2040
+ if (ret) {
2041
+ if (!iwl_mvm_has_new_tx_api(mvm))
2042
+ iwl_mvm_disable_txq(mvm, NULL, queue,
2043
+ IWL_MAX_TID_COUNT, 0);
2044
+ return ret;
2045
+ }
2046
+
2047
+ /*
2048
+ * For 22000 firmware and on we cannot add queue to a station unknown
2049
+ * to firmware so enable queue here - after the station was added
2050
+ */
2051
+ if (iwl_mvm_has_new_tx_api(mvm)) {
2052
+ int txq;
2053
+
2054
+ txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id);
2055
+ if (txq < 0) {
2056
+ iwl_mvm_rm_sta_common(mvm, sta->sta_id);
2057
+ return txq;
2058
+ }
2059
+
2060
+ *queue = txq;
2061
+ }
2062
+
2063
+ return 0;
2064
+}
2065
+
2066
+int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id)
17592067 {
17602068 int ret;
17612069
....@@ -1768,27 +2076,17 @@
17682076 if (ret)
17692077 return ret;
17702078
1771
- /* Map Aux queue to fifo - needs to happen before adding Aux station */
1772
- if (!iwl_mvm_has_new_tx_api(mvm))
1773
- iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
1774
- mvm->aux_sta.sta_id,
1775
- IWL_MVM_TX_FIFO_MCAST);
1776
-
1777
- ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
1778
- MAC_INDEX_AUX, 0);
2079
+ /*
2080
+ * In CDB NICs we need to specify which lmac to use for aux activity
2081
+ * using the mac_id argument place to send lmac_id to the function
2082
+ */
2083
+ ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL,
2084
+ &mvm->aux_sta, &mvm->aux_queue,
2085
+ IWL_MVM_TX_FIFO_MCAST);
17792086 if (ret) {
17802087 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
17812088 return ret;
17822089 }
1783
-
1784
- /*
1785
- * For 22000 firmware and on we cannot add queue to a station unknown
1786
- * to firmware so enable queue here - after the station was added
1787
- */
1788
- if (iwl_mvm_has_new_tx_api(mvm))
1789
- iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
1790
- mvm->aux_sta.sta_id,
1791
- IWL_MVM_TX_FIFO_MCAST);
17922090
17932091 return 0;
17942092 }
....@@ -1796,31 +2094,13 @@
17962094 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
17972095 {
17982096 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1799
- int ret;
18002097
18012098 lockdep_assert_held(&mvm->mutex);
18022099
1803
- /* Map snif queue to fifo - must happen before adding snif station */
1804
- if (!iwl_mvm_has_new_tx_api(mvm))
1805
- iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
1806
- mvm->snif_sta.sta_id,
2100
+ return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
2101
+ NULL, &mvm->snif_sta,
2102
+ &mvm->snif_queue,
18072103 IWL_MVM_TX_FIFO_BE);
1808
-
1809
- ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
1810
- mvmvif->id, 0);
1811
- if (ret)
1812
- return ret;
1813
-
1814
- /*
1815
- * For 22000 firmware and on we cannot add queue to a station unknown
1816
- * to firmware so enable queue here - after the station was added
1817
- */
1818
- if (iwl_mvm_has_new_tx_api(mvm))
1819
- iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
1820
- mvm->snif_sta.sta_id,
1821
- IWL_MVM_TX_FIFO_BE);
1822
-
1823
- return 0;
18242104 }
18252105
18262106 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
....@@ -1829,8 +2109,10 @@
18292109
18302110 lockdep_assert_held(&mvm->mutex);
18312111
1832
- iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
1833
- IWL_MAX_TID_COUNT, 0);
2112
+ if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
2113
+ return -EINVAL;
2114
+
2115
+ iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
18342116 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
18352117 if (ret)
18362118 IWL_WARN(mvm, "Failed sending remove station\n");
....@@ -1838,16 +2120,27 @@
18382120 return ret;
18392121 }
18402122
2123
+int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
2124
+{
2125
+ int ret;
2126
+
2127
+ lockdep_assert_held(&mvm->mutex);
2128
+
2129
+ if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
2130
+ return -EINVAL;
2131
+
2132
+ iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
2133
+ ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
2134
+ if (ret)
2135
+ IWL_WARN(mvm, "Failed sending remove station\n");
2136
+ iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2137
+
2138
+ return ret;
2139
+}
2140
+
18412141 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
18422142 {
18432143 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
1844
-}
1845
-
1846
-void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
1847
-{
1848
- lockdep_assert_held(&mvm->mutex);
1849
-
1850
- iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
18512144 }
18522145
18532146 /*
....@@ -1880,17 +2173,18 @@
18802173
18812174 if (!iwl_mvm_has_new_tx_api(mvm)) {
18822175 if (vif->type == NL80211_IFTYPE_AP ||
1883
- vif->type == NL80211_IFTYPE_ADHOC)
2176
+ vif->type == NL80211_IFTYPE_ADHOC) {
18842177 queue = mvm->probe_queue;
1885
- else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2178
+ } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
18862179 queue = mvm->p2p_dev_queue;
1887
- else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
2180
+ } else {
2181
+ WARN(1, "Missing required TXQ for adding bcast STA\n");
18882182 return -EINVAL;
2183
+ }
18892184
18902185 bsta->tfd_queue_msk |= BIT(queue);
18912186
1892
- iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
1893
- &cfg, wdg_timeout);
2187
+ iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
18942188 }
18952189
18962190 if (vif->type == NL80211_IFTYPE_ADHOC)
....@@ -1909,10 +2203,13 @@
19092203 * to firmware so enable queue here - after the station was added
19102204 */
19112205 if (iwl_mvm_has_new_tx_api(mvm)) {
1912
- queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
1913
- bsta->sta_id,
2206
+ queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
19142207 IWL_MAX_TID_COUNT,
19152208 wdg_timeout);
2209
+ if (queue < 0) {
2210
+ iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
2211
+ return queue;
2212
+ }
19162213
19172214 if (vif->type == NL80211_IFTYPE_AP ||
19182215 vif->type == NL80211_IFTYPE_ADHOC)
....@@ -1928,19 +2225,19 @@
19282225 struct ieee80211_vif *vif)
19292226 {
19302227 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1931
- int queue;
2228
+ u16 *queueptr, queue;
19322229
19332230 lockdep_assert_held(&mvm->mutex);
19342231
1935
- iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
2232
+ iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true);
19362233
19372234 switch (vif->type) {
19382235 case NL80211_IFTYPE_AP:
19392236 case NL80211_IFTYPE_ADHOC:
1940
- queue = mvm->probe_queue;
2237
+ queueptr = &mvm->probe_queue;
19412238 break;
19422239 case NL80211_IFTYPE_P2P_DEVICE:
1943
- queue = mvm->p2p_dev_queue;
2240
+ queueptr = &mvm->p2p_dev_queue;
19442241 break;
19452242 default:
19462243 WARN(1, "Can't free bcast queue on vif type %d\n",
....@@ -1948,7 +2245,8 @@
19482245 return;
19492246 }
19502247
1951
- iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
2248
+ queue = *queueptr;
2249
+ iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT, 0);
19522250 if (iwl_mvm_has_new_tx_api(mvm))
19532251 return;
19542252
....@@ -2050,7 +2348,8 @@
20502348 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
20512349 const u8 *maddr = _maddr;
20522350 struct iwl_trans_txq_scd_cfg cfg = {
2053
- .fifo = IWL_MVM_TX_FIFO_MCAST,
2351
+ .fifo = vif->type == NL80211_IFTYPE_AP ?
2352
+ IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
20542353 .sta_id = msta->sta_id,
20552354 .tid = 0,
20562355 .aggregate = false,
....@@ -2071,10 +2370,8 @@
20712370 * Note that this is done here as we want to avoid making DQA
20722371 * changes in mac80211 layer.
20732372 */
2074
- if (vif->type == NL80211_IFTYPE_ADHOC) {
2075
- vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2076
- mvmvif->cab_queue = vif->cab_queue;
2077
- }
2373
+ if (vif->type == NL80211_IFTYPE_ADHOC)
2374
+ mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
20782375
20792376 /*
20802377 * While in previous FWs we had to exclude cab queue from TFD queue
....@@ -2082,16 +2379,14 @@
20822379 */
20832380 if (!iwl_mvm_has_new_tx_api(mvm) &&
20842381 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2085
- iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2086
- &cfg, timeout);
2087
- msta->tfd_queue_msk |= BIT(vif->cab_queue);
2382
+ iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2383
+ timeout);
2384
+ msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
20882385 }
20892386 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
20902387 mvmvif->id, mvmvif->color);
2091
- if (ret) {
2092
- iwl_mvm_dealloc_int_sta(mvm, msta);
2093
- return ret;
2094
- }
2388
+ if (ret)
2389
+ goto err;
20952390
20962391 /*
20972392 * Enable cab queue after the ADD_STA command is sent.
....@@ -2101,17 +2396,76 @@
21012396 * tfd_queue_mask.
21022397 */
21032398 if (iwl_mvm_has_new_tx_api(mvm)) {
2104
- int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
2105
- msta->sta_id,
2399
+ int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
21062400 0,
21072401 timeout);
2402
+ if (queue < 0) {
2403
+ ret = queue;
2404
+ goto err;
2405
+ }
21082406 mvmvif->cab_queue = queue;
21092407 } else if (!fw_has_api(&mvm->fw->ucode_capa,
21102408 IWL_UCODE_TLV_API_STA_TYPE))
2111
- iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2112
- &cfg, timeout);
2409
+ iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2410
+ timeout);
21132411
21142412 return 0;
2413
+err:
2414
+ iwl_mvm_dealloc_int_sta(mvm, msta);
2415
+ return ret;
2416
+}
2417
+
2418
+static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2419
+ struct ieee80211_key_conf *keyconf,
2420
+ bool mcast)
2421
+{
2422
+ union {
2423
+ struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2424
+ struct iwl_mvm_add_sta_key_cmd cmd;
2425
+ } u = {};
2426
+ bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2427
+ IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
2428
+ __le16 key_flags;
2429
+ int ret, size;
2430
+ u32 status;
2431
+
2432
+ /* This is a valid situation for GTK removal */
2433
+ if (sta_id == IWL_MVM_INVALID_STA)
2434
+ return 0;
2435
+
2436
+ key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2437
+ STA_KEY_FLG_KEYID_MSK);
2438
+ key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2439
+ key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2440
+
2441
+ if (mcast)
2442
+ key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2443
+
2444
+ /*
2445
+ * The fields assigned here are in the same location at the start
2446
+ * of the command, so we can do this union trick.
2447
+ */
2448
+ u.cmd.common.key_flags = key_flags;
2449
+ u.cmd.common.key_offset = keyconf->hw_key_idx;
2450
+ u.cmd.common.sta_id = sta_id;
2451
+
2452
+ size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
2453
+
2454
+ status = ADD_STA_SUCCESS;
2455
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
2456
+ &status);
2457
+
2458
+ switch (status) {
2459
+ case ADD_STA_SUCCESS:
2460
+ IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2461
+ break;
2462
+ default:
2463
+ ret = -EIO;
2464
+ IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2465
+ break;
2466
+ }
2467
+
2468
+ return ret;
21152469 }
21162470
21172471 /*
....@@ -2125,10 +2479,9 @@
21252479
21262480 lockdep_assert_held(&mvm->mutex);
21272481
2128
- iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2482
+ iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true);
21292483
2130
- iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
2131
- 0, 0);
2484
+ iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0, 0);
21322485
21332486 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
21342487 if (ret)
....@@ -2141,7 +2494,7 @@
21412494
21422495 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
21432496 {
2144
- struct iwl_mvm_delba_notif notif = {
2497
+ struct iwl_mvm_rss_sync_notif notif = {
21452498 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
21462499 .metadata.sync = 1,
21472500 .delba.baid = baid,
....@@ -2489,15 +2842,6 @@
24892842
24902843 spin_lock_bh(&mvmsta->lock);
24912844
2492
- /* possible race condition - we entered D0i3 while starting agg */
2493
- if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2494
- spin_unlock_bh(&mvmsta->lock);
2495
- IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2496
- return -EIO;
2497
- }
2498
-
2499
- spin_lock(&mvm->queue_info_lock);
2500
-
25012845 /*
25022846 * Note the possible cases:
25032847 * 1. An enabled TXQ - TXQ needs to become agg'ed
....@@ -2511,7 +2855,7 @@
25112855 IWL_MVM_DQA_MAX_DATA_QUEUE);
25122856 if (ret < 0) {
25132857 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2514
- goto release_locks;
2858
+ goto out;
25152859 }
25162860
25172861 txq_id = ret;
....@@ -2530,10 +2874,8 @@
25302874 IWL_DEBUG_TX_QUEUES(mvm,
25312875 "Can't start tid %d agg on shared queue!\n",
25322876 tid);
2533
- goto release_locks;
2877
+ goto out;
25342878 }
2535
-
2536
- spin_unlock(&mvm->queue_info_lock);
25372879
25382880 IWL_DEBUG_TX_QUEUES(mvm,
25392881 "AGG for tid %d will be on queue #%d\n",
....@@ -2554,21 +2896,17 @@
25542896 * to align the wrap around of ssn so we compare relevant values.
25552897 */
25562898 normalized_ssn = tid_data->ssn;
2557
- if (mvm->trans->cfg->gen2)
2899
+ if (mvm->trans->trans_cfg->gen2)
25582900 normalized_ssn &= 0xff;
25592901
25602902 if (normalized_ssn == tid_data->next_reclaimed) {
25612903 tid_data->state = IWL_AGG_STARTING;
2562
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2904
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
25632905 } else {
25642906 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2907
+ ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA;
25652908 }
25662909
2567
- ret = 0;
2568
- goto out;
2569
-
2570
-release_locks:
2571
- spin_unlock(&mvm->queue_info_lock);
25722910 out:
25732911 spin_unlock_bh(&mvmsta->lock);
25742912
....@@ -2637,9 +2975,7 @@
26372975
26382976 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
26392977
2640
- spin_lock_bh(&mvm->queue_info_lock);
26412978 queue_status = mvm->queue_info[queue].status;
2642
- spin_unlock_bh(&mvm->queue_info_lock);
26432979
26442980 /* Maybe there is no need to even alloc a queue... */
26452981 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
....@@ -2673,8 +3009,7 @@
26733009 }
26743010
26753011 if (alloc_queue)
2676
- iwl_mvm_enable_txq(mvm, queue,
2677
- vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
3012
+ iwl_mvm_enable_txq(mvm, sta, queue, ssn,
26783013 &cfg, wdg_timeout);
26793014
26803015 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
....@@ -2685,9 +3020,7 @@
26853020 }
26863021
26873022 /* No need to mark as reserved */
2688
- spin_lock_bh(&mvm->queue_info_lock);
26893023 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
2690
- spin_unlock_bh(&mvm->queue_info_lock);
26913024
26923025 out:
26933026 /*
....@@ -2704,7 +3037,7 @@
27043037 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
27053038 sta->addr, tid);
27063039
2707
- return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
3040
+ return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq);
27083041 }
27093042
27103043 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
....@@ -2713,10 +3046,11 @@
27133046 {
27143047 u16 txq_id = tid_data->txq_id;
27153048
3049
+ lockdep_assert_held(&mvm->mutex);
3050
+
27163051 if (iwl_mvm_has_new_tx_api(mvm))
27173052 return;
27183053
2719
- spin_lock_bh(&mvm->queue_info_lock);
27203054 /*
27213055 * The TXQ is marked as reserved only if no traffic came through yet
27223056 * This means no traffic has been sent on this TID (agg'd or not), so
....@@ -2728,8 +3062,6 @@
27283062 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
27293063 tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
27303064 }
2731
-
2732
- spin_unlock_bh(&mvm->queue_info_lock);
27333065 }
27343066
27353067 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
....@@ -3178,59 +3510,6 @@
31783510 return ret;
31793511 }
31803512
3181
-static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
3182
- struct ieee80211_key_conf *keyconf,
3183
- bool mcast)
3184
-{
3185
- union {
3186
- struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3187
- struct iwl_mvm_add_sta_key_cmd cmd;
3188
- } u = {};
3189
- bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3190
- IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3191
- __le16 key_flags;
3192
- int ret, size;
3193
- u32 status;
3194
-
3195
- /* This is a valid situation for GTK removal */
3196
- if (sta_id == IWL_MVM_INVALID_STA)
3197
- return 0;
3198
-
3199
- key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
3200
- STA_KEY_FLG_KEYID_MSK);
3201
- key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
3202
- key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
3203
-
3204
- if (mcast)
3205
- key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3206
-
3207
- /*
3208
- * The fields assigned here are in the same location at the start
3209
- * of the command, so we can do this union trick.
3210
- */
3211
- u.cmd.common.key_flags = key_flags;
3212
- u.cmd.common.key_offset = keyconf->hw_key_idx;
3213
- u.cmd.common.sta_id = sta_id;
3214
-
3215
- size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
3216
-
3217
- status = ADD_STA_SUCCESS;
3218
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
3219
- &status);
3220
-
3221
- switch (status) {
3222
- case ADD_STA_SUCCESS:
3223
- IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
3224
- break;
3225
- default:
3226
- ret = -EIO;
3227
- IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
3228
- break;
3229
- }
3230
-
3231
- return ret;
3232
-}
3233
-
32343513 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
32353514 struct ieee80211_vif *vif,
32363515 struct ieee80211_sta *sta,
....@@ -3523,7 +3802,7 @@
35233802 struct ieee80211_sta *sta;
35243803 u32 sta_id = le32_to_cpu(notif->sta_id);
35253804
3526
- if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
3805
+ if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
35273806 return;
35283807
35293808 rcu_read_lock();
....@@ -3606,7 +3885,7 @@
36063885 lockdep_assert_held(&mvm->mutex);
36073886
36083887 /* Block/unblock all the stations of the given mvmvif */
3609
- for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
3888
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
36103889 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
36113890 lockdep_is_held(&mvm->mutex));
36123891 if (IS_ERR_OR_NULL(sta))
....@@ -3660,8 +3939,48 @@
36603939 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
36613940 * to align the wrap around of ssn so we compare relevant values.
36623941 */
3663
- if (mvm->trans->cfg->gen2)
3942
+ if (mvm->trans->trans_cfg->gen2)
36643943 sn &= 0xff;
36653944
36663945 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
36673946 }
3947
+
3948
+int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3949
+ struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
3950
+ u8 *key, u32 key_len)
3951
+{
3952
+ int ret;
3953
+ u16 queue;
3954
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3955
+ struct ieee80211_key_conf *keyconf;
3956
+
3957
+ ret = iwl_mvm_allocate_int_sta(mvm, sta, 0,
3958
+ NL80211_IFTYPE_UNSPECIFIED,
3959
+ IWL_STA_LINK);
3960
+ if (ret)
3961
+ return ret;
3962
+
3963
+ ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
3964
+ addr, sta, &queue,
3965
+ IWL_MVM_TX_FIFO_BE);
3966
+ if (ret)
3967
+ goto out;
3968
+
3969
+ keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL);
3970
+ if (!keyconf) {
3971
+ ret = -ENOBUFS;
3972
+ goto out;
3973
+ }
3974
+
3975
+ keyconf->cipher = cipher;
3976
+ memcpy(keyconf->key, key, key_len);
3977
+ keyconf->keylen = key_len;
3978
+
3979
+ ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false,
3980
+ 0, NULL, 0, 0, true);
3981
+ kfree(keyconf);
3982
+ return 0;
3983
+out:
3984
+ iwl_mvm_dealloc_int_sta(mvm, sta);
3985
+ return ret;
3986
+}