From 1c055e55a242a33e574e48be530e06770a210dcd Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Mon, 19 Feb 2024 03:26:26 +0000 Subject: [PATCH] add r8169 read mac form eeprom --- kernel/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 1776 ++++++++++++++++++++++++++++++++++++++++------------------- 1 files changed, 1,205 insertions(+), 571 deletions(-) diff --git a/kernel/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/kernel/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 3f37fb6..d2c6fdb 100644 --- a/kernel/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/kernel/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -5,10 +5,9 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -19,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -33,10 +27,9 @@ * * BSD LICENSE * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -78,6 +71,7 @@ #include <net/ieee80211_radiotap.h> #include <net/tcp.h> +#include "iwl-drv.h" #include "iwl-op-mode.h" #include "iwl-io.h" #include "mvm.h" @@ -189,90 +183,37 @@ }; #endif -void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) -{ - if (!iwl_mvm_is_d0i3_supported(mvm)) - return; +static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = { + .max_peers = IWL_MVM_TOF_MAX_APS, + .report_ap_tsf = 1, + .randomize_mac_addr = 1, - IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type); - spin_lock_bh(&mvm->refs_lock); - mvm->refs[ref_type]++; - spin_unlock_bh(&mvm->refs_lock); - iwl_trans_ref(mvm->trans); -} + .ftm = { + .supported = 1, + .asap = 1, + .non_asap = 1, + .request_lci = 1, + .request_civicloc = 1, + .trigger_based = 1, + .non_trigger_based = 1, + .max_bursts_exponent = -1, /* all supported */ + .max_ftms_per_burst = 0, /* no limits */ + .bandwidths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80), + .preambles = BIT(NL80211_PREAMBLE_LEGACY) | + BIT(NL80211_PREAMBLE_HT) | + BIT(NL80211_PREAMBLE_VHT) | + BIT(NL80211_PREAMBLE_HE), + }, +}; -void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) -{ - if (!iwl_mvm_is_d0i3_supported(mvm)) - return; - - IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type); - spin_lock_bh(&mvm->refs_lock); - if (WARN_ON(!mvm->refs[ref_type])) { - spin_unlock_bh(&mvm->refs_lock); - return; - } - mvm->refs[ref_type]--; - spin_unlock_bh(&mvm->refs_lock); - iwl_trans_unref(mvm->trans); -} - -static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm, - enum iwl_mvm_ref_type except_ref) -{ - int i, j; - - if (!iwl_mvm_is_d0i3_supported(mvm)) - return; - - spin_lock_bh(&mvm->refs_lock); - for (i = 0; i < IWL_MVM_REF_COUNT; i++) { - if (except_ref == i || !mvm->refs[i]) - continue; - - IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n", - i, mvm->refs[i]); - for (j = 0; j < mvm->refs[i]; j++) - iwl_trans_unref(mvm->trans); - mvm->refs[i] = 0; - } - spin_unlock_bh(&mvm->refs_lock); -} - -bool iwl_mvm_ref_taken(struct iwl_mvm *mvm) -{ - int i; - bool taken = false; - - if (!iwl_mvm_is_d0i3_supported(mvm)) - return true; - - spin_lock_bh(&mvm->refs_lock); - for (i = 0; i < IWL_MVM_REF_COUNT; i++) { - if (mvm->refs[i]) { - taken = true; - break; - } - } - spin_unlock_bh(&mvm->refs_lock); - - return taken; -} - -int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) -{ - iwl_mvm_ref(mvm, ref_type); - - if (!wait_event_timeout(mvm->d0i3_exit_waitq, - !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status), - HZ)) { - WARN_ON_ONCE(1); - iwl_mvm_unref(mvm, ref_type); - return -EIO; - } - - return 0; -} +static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, + enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key); static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm) { @@ -294,6 +235,7 @@ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mcc_update_resp *resp; + u8 resp_ver; IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2); @@ -312,13 +254,16 @@ *changed = (status == MCC_RESP_NEW_CHAN_PROFILE || status == MCC_RESP_ILLEGAL); } + resp_ver = iwl_fw_lookup_notif_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, + MCC_UPDATE_CMD, 0); + IWL_DEBUG_LAR(mvm, "MCC update response version: %d\n", resp_ver); regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, __le32_to_cpu(resp->n_channels), resp->channels, __le16_to_cpu(resp->mcc), __le16_to_cpu(resp->geo_info), - __le16_to_cpu(resp->cap)); + __le16_to_cpu(resp->cap), resp_ver); /* Store the return source id */ src_id = resp->source_id; kfree(resp); @@ -401,6 +346,30 @@ return ret; } +static const u8 he_if_types_ext_capa_sta[] = { + [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, + [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, + [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, +}; + +static const struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = { + { + .iftype = NL80211_IFTYPE_STATION, + .extended_capabilities = he_if_types_ext_capa_sta, + .extended_capabilities_mask = he_if_types_ext_capa_sta, + .extended_capabilities_len = sizeof(he_if_types_ext_capa_sta), + }, +}; + +static int +iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + *tx_ant = iwl_mvm_get_valid_tx_ant(mvm); + *rx_ant = iwl_mvm_get_valid_rx_ant(mvm); + return 0; +} + int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) { struct ieee80211_hw *hw = mvm->hw; @@ -411,12 +380,15 @@ WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, }; +#ifdef CONFIG_PM_SLEEP + bool unified = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); +#endif /* Tell mac80211 our characteristics */ ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, SPECTRUM_MGMT); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); - ieee80211_hw_set(hw, QUEUE_CONTROL); ieee80211_hw_set(hw, WANT_MONITOR_VIF); ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); @@ -429,6 +401,23 @@ ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP); + ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); + ieee80211_hw_set(hw, BUFF_MMPDU_TXQ); + ieee80211_hw_set(hw, STA_MMPDU_TXQ); + /* + * On older devices, enabling TX A-MSDU occasionally leads to + * something getting messed up, the command read from the FIFO + * gets out of sync and isn't a TX command, so that we have an + * assert EDC. + * + * It's not clear where the bug is, but since we didn't used to + * support A-MSDU until moving the mac80211 iTXQs, just leave it + * for older devices. We also don't see this issue on any newer + * devices. + */ + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) + ieee80211_hw_set(hw, TX_AMSDU); + ieee80211_hw_set(hw, TX_FRAG_LIST); if (iwl_mvm_has_tlc_offload(mvm)) { ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); @@ -474,6 +463,7 @@ hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; + hw->max_tx_fragments = mvm->trans->max_skb_frags; BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6); memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers)); @@ -489,23 +479,23 @@ hw->wiphy->n_cipher_suites++; } - /* Enable 11w if software crypto is not enabled (as the - * firmware will interpret some mgmt packets, so enabling it - * with software crypto isn't safe). - */ - if (!iwlwifi_mod_params.swcrypto) { - ieee80211_hw_set(hw, MFP_CAPABLE); + if (iwlwifi_mod_params.swcrypto) + IWL_ERR(mvm, + "iwlmvm doesn't allow to disable HW crypto, check swcrypto module parameter\n"); + if (!iwlwifi_mod_params.bt_coex_active) + IWL_ERR(mvm, + "iwlmvm doesn't allow to disable BT Coex, check bt_coex_active module parameter\n"); + + ieee80211_hw_set(hw, MFP_CAPABLE); + mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_AES_CMAC; + hw->wiphy->n_cipher_suites++; + if (iwl_mvm_has_new_rx_api(mvm)) { mvm->ciphers[hw->wiphy->n_cipher_suites] = - WLAN_CIPHER_SUITE_AES_CMAC; + WLAN_CIPHER_SUITE_BIP_GMAC_128; hw->wiphy->n_cipher_suites++; - if (iwl_mvm_has_new_rx_api(mvm)) { - mvm->ciphers[hw->wiphy->n_cipher_suites] = - WLAN_CIPHER_SUITE_BIP_GMAC_128; - hw->wiphy->n_cipher_suites++; - mvm->ciphers[hw->wiphy->n_cipher_suites] = - WLAN_CIPHER_SUITE_BIP_GMAC_256; - hw->wiphy->n_cipher_suites++; - } + mvm->ciphers[hw->wiphy->n_cipher_suites] = + WLAN_CIPHER_SUITE_BIP_GMAC_256; + hw->wiphy->n_cipher_suites++; } /* currently FW API supports only one optional cipher scheme */ @@ -530,6 +520,13 @@ hw->wiphy->n_cipher_suites++; } + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_FTM_CALIBRATED)) { + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER); + hw->wiphy->pmsr_capa = &iwl_mvm_pmsr_capa; + } + ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); hw->wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | @@ -539,6 +536,7 @@ hw->sta_data_size = sizeof(struct iwl_mvm_sta); hw->vif_data_size = sizeof(struct iwl_mvm_vif); hw->chanctx_data_size = sizeof(u16); + hw->txq_data_size = sizeof(struct iwl_mvm_txq); hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_CLIENT) | @@ -548,6 +546,17 @@ BIT(NL80211_IFTYPE_ADHOC); hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); + + /* The new Tx API does not allow to pass the key or keyid of a MPDU to + * the hw, preventing us to control which key(id) to use per MPDU. + * Till that's fixed we can't use Extended Key ID for the newer cards. + */ + if (!iwl_mvm_has_new_tx_api(mvm)) + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_EXT_KEY_ID); + hw->wiphy->features |= NL80211_FEATURE_HT_IBSS; + hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR; if (iwl_mvm_is_lar_supported(mvm)) hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; @@ -564,8 +573,6 @@ hw->wiphy->max_remain_on_channel_duration = 10000; hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; - /* we can compensate an offset of up to 3 channels = 15 MHz */ - hw->wiphy->max_adj_channel_rssi_comp = 3 * 5; /* Extract MAC address */ memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN); @@ -622,7 +629,7 @@ hw->wiphy->max_sched_scan_reqs = 1; hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; - hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; + hw->wiphy->max_match_sets = iwl_umac_scan_get_max_profiles(mvm->fw); /* we create the 802.11 header and zero length SSID IE. */ hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; @@ -659,14 +666,17 @@ IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; + if (iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, + WOWLAN_KEK_KCK_MATERIAL, + IWL_FW_CMD_VER_UNKNOWN) == 3) + hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK; + if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) { wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SCAN_START_TIME); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_BSS_PARENT_TSF); - wiphy_ext_feature_set(hw->wiphy, - NL80211_EXT_FEATURE_SET_SCAN_DWELL); } if (iwl_mvm_is_oce_supported(mvm)) { @@ -680,16 +690,20 @@ NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE); } + if (mvm->nvm_data->sku_cap_11ax_enable && + !iwlwifi_mod_params.disable_11ax) { + hw->wiphy->iftype_ext_capab = he_iftypes_ext_capa; + hw->wiphy->num_iftype_ext_capab = + ARRAY_SIZE(he_iftypes_ext_capa); + + ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); + ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID); + } + mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; #ifdef CONFIG_PM_SLEEP - if (iwl_mvm_is_d0i3_supported(mvm) && - device_can_wakeup(mvm->trans->dev)) { - mvm->wowlan.flags = WIPHY_WOWLAN_ANY; - hw->wiphy->wowlan = &mvm->wowlan; - } - - if (mvm->fw->img[IWL_UCODE_WOWLAN].num_sec && + if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) && mvm->trans->ops->d3_suspend && mvm->trans->ops->d3_resume && device_can_wakeup(mvm->trans->dev)) { @@ -698,15 +712,15 @@ WIPHY_WOWLAN_EAP_IDENTITY_REQ | WIPHY_WOWLAN_RFKILL_RELEASE | WIPHY_WOWLAN_NET_DETECT; - if (!iwlwifi_mod_params.swcrypto) - mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | - WIPHY_WOWLAN_GTK_REKEY_FAILURE | - WIPHY_WOWLAN_4WAY_HANDSHAKE; + mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | + WIPHY_WOWLAN_GTK_REKEY_FAILURE | + WIPHY_WOWLAN_4WAY_HANDSHAKE; mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; - mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES; + mvm->wowlan.max_nd_match_sets = + iwl_umac_scan_get_max_profiles(mvm->fw); hw->wiphy->wowlan = &mvm->wowlan; } #endif @@ -742,57 +756,37 @@ hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS; } - ret = ieee80211_register_hw(mvm->hw); - if (ret) - iwl_mvm_leds_exit(mvm); - mvm->init_status |= IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE; - if (mvm->cfg->vht_mu_mimo_supported) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER); + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_PROTECTED_TWT)) + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_PROTECTED_TWT); + + hw->wiphy->available_antennas_tx = iwl_mvm_get_valid_tx_ant(mvm); + hw->wiphy->available_antennas_rx = iwl_mvm_get_valid_rx_ant(mvm); + + ret = ieee80211_register_hw(mvm->hw); + if (ret) { + iwl_mvm_leds_exit(mvm); + } + return ret; } -static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - struct sk_buff *skb) +static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_sta *sta) { - struct iwl_mvm_sta *mvmsta; - bool defer = false; + if (likely(sta)) { + if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0)) + return; + } else { + if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0)) + return; + } - /* - * double check the IN_D0I3 flag both before and after - * taking the spinlock, in order to prevent taking - * the spinlock when not needed. - */ - if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))) - return false; - - spin_lock(&mvm->d0i3_tx_lock); - /* - * testing the flag again ensures the skb dequeue - * loop (on d0i3 exit) hasn't run yet. - */ - if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) - goto out; - - mvmsta = iwl_mvm_sta_from_mac80211(sta); - if (mvmsta->sta_id == IWL_MVM_INVALID_STA || - mvmsta->sta_id != mvm->d0i3_ap_sta_id) - goto out; - - __skb_queue_tail(&mvm->d0i3_tx, skb); - ieee80211_stop_queues(mvm->hw); - - /* trigger wakeup */ - iwl_mvm_ref(mvm, IWL_MVM_REF_TX); - iwl_mvm_unref(mvm, IWL_MVM_REF_TX); - - defer = true; -out: - spin_unlock(&mvm->d0i3_tx_lock); - return defer; + ieee80211_free_txskb(mvm->hw, skb); } static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, @@ -803,13 +797,15 @@ struct ieee80211_sta *sta = control->sta; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; + bool offchannel = IEEE80211_SKB_CB(skb)->flags & + IEEE80211_TX_CTL_TX_OFFCHAN; if (iwl_mvm_is_radio_killed(mvm)) { IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n"); goto drop; } - if (info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && + if (offchannel && !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) && !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) goto drop; @@ -822,13 +818,13 @@ sta = NULL; /* If there is no sta, and it's not offchannel - send through AP */ - if (info->control.vif->type == NL80211_IFTYPE_STATION && - info->hw_queue != IWL_MVM_OFFCHANNEL_QUEUE && !sta) { + if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION && + !offchannel) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id); - if (ap_sta_id < IWL_MVM_STATION_COUNT) { + if (ap_sta_id < mvm->fw->ucode_capa.num_stations) { /* mac80211 holds rcu read lock */ sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]); if (IS_ERR_OR_NULL(sta)) @@ -836,37 +832,104 @@ } } - if (sta) { - if (iwl_mvm_defer_tx(mvm, sta, skb)) - return; - if (iwl_mvm_tx_skb(mvm, skb, sta)) - goto drop; - return; - } - - if (iwl_mvm_tx_skb_non_sta(mvm, skb)) - goto drop; + iwl_mvm_tx_skb(mvm, skb, sta); return; drop: ieee80211_free_txskb(hw, skb); } -static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg) +void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) - return false; - return true; + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); + struct sk_buff *skb = NULL; + + /* + * No need for threads to be pending here, they can leave the first + * taker all the work. + * + * mvmtxq->tx_request logic: + * + * If 0, no one is currently TXing, set to 1 to indicate current thread + * will now start TX and other threads should quit. + * + * If 1, another thread is currently TXing, set to 2 to indicate to + * that thread that there was another request. Since that request may + * have raced with the check whether the queue is empty, the TXing + * thread should check the queue's status one more time before leaving. + * This check is done in order to not leave any TX hanging in the queue + * until the next TX invocation (which may not even happen). + * + * If 2, another thread is currently TXing, and it will already double + * check the queue, so do nothing. + */ + if (atomic_fetch_add_unless(&mvmtxq->tx_request, 1, 2)) + return; + + rcu_read_lock(); + do { + while (likely(!mvmtxq->stopped && + (mvm->trans->system_pm_mode == + IWL_PLAT_PM_MODE_DISABLED))) { + skb = ieee80211_tx_dequeue(hw, txq); + + if (!skb) { + if (txq->sta) + IWL_DEBUG_TX(mvm, + "TXQ of sta %pM tid %d is now empty\n", + txq->sta->addr, + txq->tid); + break; + } + + iwl_mvm_tx_skb(mvm, skb, txq->sta); + } + } while (atomic_dec_return(&mvmtxq->tx_request)); + rcu_read_unlock(); } -static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) +static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) { - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) - return false; - if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) - return true; + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); - /* enabled by default */ - return true; + /* + * Please note that racing is handled very carefully here: + * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is + * deleted afterwards. + * This means that if: + * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list): + * queue is allocated and we can TX. + * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list): + * a race, should defer the frame. + * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list): + * need to allocate the queue and defer the frame. + * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list): + * queue is already scheduled for allocation, no need to allocate, + * should defer the frame. + */ + + /* If the queue is allocated TX and return. */ + if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) { + /* + * Check that list is empty to avoid a race where txq_id is + * already updated, but the queue allocation work wasn't + * finished + */ + if (unlikely(txq->sta && !list_empty(&mvmtxq->list))) + return; + + iwl_mvm_mac_itxq_xmit(hw, txq); + return; + } + + /* The list is being deleted only after the queue is fully allocated. */ + if (!list_empty(&mvmtxq->list)) + return; + + list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs); + schedule_work(&mvm->add_stream_wk); } #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ @@ -884,15 +947,12 @@ struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_BA); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(vif), trig)) - return; switch (action) { case IEEE80211_AMPDU_TX_OPERATIONAL: { @@ -930,7 +990,6 @@ { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; - bool tx_agg_ref = false; struct ieee80211_sta *sta = params->sta; enum ieee80211_ampdu_mlme_action action = params->action; u16 tid = params->tid; @@ -944,31 +1003,6 @@ if (!(mvm->nvm_data->sku_cap_11n_enable)) return -EACCES; - - /* return from D0i3 before starting a new Tx aggregation */ - switch (action) { - case IEEE80211_AMPDU_TX_START: - case IEEE80211_AMPDU_TX_STOP_CONT: - case IEEE80211_AMPDU_TX_STOP_FLUSH: - case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: - case IEEE80211_AMPDU_TX_OPERATIONAL: - /* - * for tx start, wait synchronously until D0i3 exit to - * get the correct sequence number for the tid. - * additionally, some other ampdu actions use direct - * target access, which is not handled automatically - * by the trans layer (unlike commands), so wait for - * d0i3 exit in these cases as well. - */ - ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG); - if (ret) - return ret; - - tx_agg_ref = true; - break; - default: - break; - } mutex_lock(&mvm->mutex); @@ -984,7 +1018,7 @@ mvmvif = iwl_mvm_vif_from_mac80211(vif); cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk); } - if (!iwl_enable_rx_ampdu(mvm->cfg)) { + if (!iwl_enable_rx_ampdu()) { ret = -EINVAL; break; } @@ -996,7 +1030,7 @@ timeout); break; case IEEE80211_AMPDU_TX_START: - if (!iwl_enable_tx_ampdu(mvm->cfg)) { + if (!iwl_enable_tx_ampdu()) { ret = -EINVAL; break; } @@ -1030,13 +1064,6 @@ } mutex_unlock(&mvm->mutex); - /* - * If the tid is marked as started, we won't use it for offloaded - * traffic on the next D0i3 entry. It's safe to unref. - */ - if (tx_agg_ref) - iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG); - return ret; } @@ -1055,33 +1082,24 @@ mvmvif->phy_ctxt = NULL; memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data)); + memset(&mvmvif->probe_resp_data, 0, sizeof(mvmvif->probe_resp_data)); } static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) { - /* clear the D3 reconfig, we only need it to avoid dumping a - * firmware coredump on reconfiguration, we shouldn't do that - * on D3->D0 transition - */ - if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) { - mvm->fwrt.dump.desc = &iwl_dump_desc_assert; - iwl_fw_error_dump(&mvm->fwrt); - } - - /* cleanup all stale references (scan, roc), but keep the - * ucode_down ref until reconfig is complete - */ - iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN); - iwl_mvm_stop_device(mvm); + + mvm->cur_aid = 0; mvm->scan_status = 0; mvm->ps_disabled = false; - mvm->calibrating = false; + mvm->rfkill_safe_init_done = false; /* just in case one was running */ iwl_mvm_cleanup_roc_te(mvm); ieee80211_remain_on_channel_expired(mvm->hw); + + iwl_mvm_ftm_restart(mvm); /* * cleanup all interfaces, even inactive ones, as some might have @@ -1090,18 +1108,13 @@ ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); mvm->p2p_device_vif = NULL; - mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; iwl_mvm_reset_phy_ctxts(mvm); memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); - memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames)); memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); ieee80211_wake_queues(mvm->hw); - - /* clear any stale d0i3 state */ - clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); mvm->vif_count = 0; mvm->rx_ba_sessions = 0; @@ -1127,16 +1140,13 @@ clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); /* Clean up some internal and mac80211 state on restart */ iwl_mvm_restart_cleanup(mvm); - } else { - /* Hold the reference to prevent runtime suspend while - * the start procedure runs. It's a bit confusing - * that the UCODE_DOWN reference is taken, but it just - * means "UCODE is not UP yet". ( TODO: rename this - * reference). - */ - iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); } ret = iwl_mvm_up(mvm); + + iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_POST_INIT, + NULL); + iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_PERIODIC, + NULL); if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { /* Something went wrong - we need to finish some cleanup @@ -1144,7 +1154,6 @@ * would do. */ clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); - iwl_mvm_d0i3_enable_tx(mvm, NULL); } return ret; @@ -1154,22 +1163,30 @@ { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; - - /* Some hw restart cleanups must not hold the mutex */ - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { - /* - * Make sure we are out of d0i3. This is needed - * to make sure the reference accounting is correct - * (and there is no stale d0i3_exit_work). - */ - wait_event_timeout(mvm->d0i3_exit_waitq, - !test_bit(IWL_MVM_STATUS_IN_D0I3, - &mvm->status), - HZ); - } + int retry, max_retry = 0; mutex_lock(&mvm->mutex); - ret = __iwl_mvm_mac_start(mvm); + + /* we are starting the mac not in error flow, and restart is enabled */ + if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) && + iwlwifi_mod_params.fw_restart) { + max_retry = IWL_MAX_INIT_RETRY; + /* + * This will prevent mac80211 recovery flows to trigger during + * init failures + */ + set_bit(IWL_MVM_STATUS_STARTING, &mvm->status); + } + + for (retry = 0; retry <= max_retry; retry++) { + ret = __iwl_mvm_mac_start(mvm); + if (!ret) + break; + + IWL_ERR(mvm, "mac start retry %d\n", retry); + } + clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status); + mutex_unlock(&mvm->mutex); return ret; @@ -1182,14 +1199,13 @@ mutex_lock(&mvm->mutex); clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); - iwl_mvm_d0i3_enable_tx(mvm, NULL); + ret = iwl_mvm_update_quotas(mvm, true, NULL); if (ret) IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", ret); - /* allow transport/FW low power modes */ - iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); + iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_END_OF_RECOVERY); /* * If we have TDLS peers, remove them. We don't know the last seqno/PN @@ -1198,17 +1214,6 @@ iwl_mvm_teardown_tdls_peers(mvm); mutex_unlock(&mvm->mutex); -} - -static void iwl_mvm_resume_complete(struct iwl_mvm *mvm) -{ - if (iwl_mvm_is_d0i3_supported(mvm) && - iwl_mvm_enter_d0i3_on_suspend(mvm)) - WARN_ONCE(!wait_event_timeout(mvm->d0i3_exit_waitq, - !test_bit(IWL_MVM_STATUS_IN_D0I3, - &mvm->status), - HZ), - "D0i3 exit on resume timed out\n"); } static void @@ -1222,7 +1227,6 @@ iwl_mvm_restart_complete(mvm); break; case IEEE80211_RECONFIG_TYPE_SUSPEND: - iwl_mvm_resume_complete(mvm); break; } } @@ -1231,6 +1235,8 @@ { lockdep_assert_held(&mvm->mutex); + iwl_mvm_ftm_initiator_smooth_stop(mvm); + /* firmware counters are obviously reset now, but we shouldn't * partially track so also clear the fw_reset_accu counters. */ @@ -1238,19 +1244,13 @@ /* async_handlers_wk is now blocked */ - /* - * The work item could be running or queued if the - * ROC time event stops just as we get here. - */ - flush_work(&mvm->roc_done_wk); + if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, ADD_STA, 0) < 12) + iwl_mvm_rm_aux_sta(mvm); iwl_mvm_stop_device(mvm); iwl_mvm_async_handlers_purge(mvm); /* async_handlers_list is empty and will stay empty: HW is stopped */ - - /* the fw is stopped, the aux sta is dead: clean up driver state */ - iwl_mvm_del_aux_sta(mvm); /* * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the @@ -1284,7 +1284,6 @@ { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - flush_work(&mvm->d0i3_exit_work); flush_work(&mvm->async_handlers_wk); flush_work(&mvm->add_stream_wk); @@ -1298,10 +1297,14 @@ */ clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); - iwl_fw_cancel_dump(&mvm->fwrt); cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork); cancel_delayed_work_sync(&mvm->scan_timeout_dwork); - iwl_fw_free_dump_desc(&mvm->fwrt); + + /* + * The work item could be running or queued if the + * ROC time event stops just as we get here. + */ + flush_work(&mvm->roc_done_wk); mutex_lock(&mvm->mutex); __iwl_mvm_mac_stop(mvm); @@ -1331,21 +1334,127 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, s16 tx_power) { + int len; struct iwl_dev_tx_power_cmd cmd = { - .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), - .v3.mac_context_id = + .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), + .common.mac_context_id = cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id), - .v3.pwr_restriction = cpu_to_le16(8 * tx_power), + .common.pwr_restriction = cpu_to_le16(8 * tx_power), }; - int len = sizeof(cmd); + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, + REDUCE_TX_POWER_CMD, + IWL_FW_CMD_VER_UNKNOWN); if (tx_power == IWL_DEFAULT_MAX_TX_POWER) - cmd.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); + cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) + if (cmd_ver == 6) + len = sizeof(cmd.v6); + else if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_REDUCE_TX_POWER)) + len = sizeof(cmd.v5); + else if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) + len = sizeof(cmd.v4); + else len = sizeof(cmd.v3); + /* all structs have the same common part, add it */ + len += sizeof(cmd.common); + return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); +} + +static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + mutex_lock(&mvm->mutex); + + if (mvmvif->csa_failed) { + mvmvif->csa_failed = false; + ret = -EIO; + goto out_unlock; + } + + if (vif->type == NL80211_IFTYPE_STATION) { + struct iwl_mvm_sta *mvmsta; + + mvmvif->csa_bcn_pending = false; + mvmsta = iwl_mvm_sta_from_staid_protected(mvm, + mvmvif->ap_sta_id); + + if (WARN_ON(!mvmsta)) { + ret = -EIO; + goto out_unlock; + } + + iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); + + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { + ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); + if (ret) + goto out_unlock; + + iwl_mvm_stop_session_protection(mvm, vif); + } + } + + mvmvif->ps_disabled = false; + + ret = iwl_mvm_power_update_ps(mvm); + +out_unlock: + mutex_unlock(&mvm->mutex); + + return ret; +} + +static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_chan_switch_te_cmd cmd = { + .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)), + .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), + }; + + IWL_DEBUG_MAC80211(mvm, "Abort CSA on mac %d\n", mvmvif->id); + + mutex_lock(&mvm->mutex); + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) + iwl_mvm_remove_csa_period(mvm, vif); + else + WARN_ON(iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(MAC_CONF_GROUP, + CHANNEL_SWITCH_TIME_EVENT_CMD), + 0, sizeof(cmd), &cmd)); + mutex_unlock(&mvm->mutex); + + WARN_ON(iwl_mvm_post_channel_switch(hw, vif)); +} + +static void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk) +{ + struct iwl_mvm *mvm; + struct iwl_mvm_vif *mvmvif; + struct ieee80211_vif *vif; + + mvmvif = container_of(wk, struct iwl_mvm_vif, csa_work.work); + vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); + mvm = mvmvif->mvm; + + iwl_mvm_abort_channel_switch(mvm->hw, vif); + ieee80211_chswitch_done(vif, false); } static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, @@ -1356,15 +1465,7 @@ int ret; mvmvif->mvm = mvm; - - /* - * make sure D0i3 exit is completed, otherwise a target access - * during tx queue configuration could be done when still in - * D0i3 state. - */ - ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF); - if (ret) - return ret; + RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); /* * Not much to do here. The stack will not allow interface @@ -1383,6 +1484,8 @@ ret = iwl_mvm_mac_ctxt_init(mvm, vif); if (ret) goto out_unlock; + + rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif); /* Counting number of interfaces is needed for legacy PM */ if (vif->type != NL80211_IFTYPE_P2P_DEVICE) @@ -1471,6 +1574,8 @@ } iwl_mvm_tcm_add_vif(mvm, vif); + INIT_DELAYED_WORK(&mvmvif->csa_work, + iwl_mvm_channel_switch_disconnect_wk); if (vif->type == NL80211_IFTYPE_MONITOR) mvm->monitor_on = true; @@ -1497,8 +1602,6 @@ out_unlock: mutex_unlock(&mvm->mutex); - iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF); - return ret; } @@ -1520,6 +1623,7 @@ { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_probe_resp_data *probe_data; iwl_mvm_prepare_mac_removal(mvm, vif); @@ -1529,11 +1633,20 @@ mutex_lock(&mvm->mutex); + probe_data = rcu_dereference_protected(mvmvif->probe_resp_data, + lockdep_is_held(&mvm->mutex)); + RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); + if (probe_data) + kfree_rcu(probe_data, rcu_head); + if (mvm->bf_allowed_vif == mvmvif) { mvm->bf_allowed_vif = NULL; vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | IEEE80211_VIF_SUPPORTS_CQM_RSSI); } + + if (vif->bss_conf.ftm_responder) + memset(&mvm->ftm_resp_stats, 0, sizeof(mvm->ftm_resp_stats)); iwl_mvm_vif_dbgfs_clean(mvm, vif); @@ -1567,6 +1680,8 @@ iwl_mvm_power_update_mac(mvm); iwl_mvm_mac_ctxt_remove(mvm, vif); + + RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL); if (vif->type == NL80211_IFTYPE_MONITOR) mvm->monitor_on = false; @@ -1972,11 +2087,15 @@ struct iwl_he_sta_context_cmd sta_ctxt_cmd = { .sta_id = sta_id, .tid_limit = IWL_MAX_TID_COUNT, - .bss_color = vif->bss_conf.bss_color, + .bss_color = vif->bss_conf.he_bss_color.color, .htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext, .frame_time_rts_th = cpu_to_le16(vif->bss_conf.frame_time_rts_th), }; + int size = fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_MBSSID_HE) ? + sizeof(sta_ctxt_cmd) : + sizeof(struct iwl_he_sta_context_cmd_v1); struct ieee80211_sta *sta; u32 flags; int i; @@ -1984,7 +2103,7 @@ rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]); - if (IS_ERR(sta)) { + if (IS_ERR_OR_NULL(sta)) { rcu_read_unlock(); WARN(1, "Can't find STA to configure HE\n"); return; @@ -1996,6 +2115,10 @@ } flags = 0; + + /* Block 26-tone RU OFDMA transmissions */ + if (mvmvif->he_ru_2mhz_block) + flags |= STA_CTXT_HE_RU_2MHZ_BLOCK; /* HTC flags */ if (sta->he_cap.he_cap_elem.mac_cap_info[0] & @@ -2018,10 +2141,6 @@ sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH); } - if (sta->he_cap.he_cap_elem.mac_cap_info[2] & - IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED) - sta_ctxt_cmd.htc_flags |= - cpu_to_le32(IWL_HE_HTC_UL_MU_RESP_SCHED); if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP); if (sta->he_cap.he_cap_elem.mac_cap_info[3] & @@ -2083,35 +2202,82 @@ } flags |= STA_CTXT_HE_PACKET_EXT; + } else if ((sta->he_cap.he_cap_elem.phy_cap_info[9] & + IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK) != + IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED) { + int low_th = -1; + int high_th = -1; + + /* Take the PPE thresholds from the nominal padding info */ + switch (sta->he_cap.he_cap_elem.phy_cap_info[9] & + IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK) { + case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_0US: + low_th = IWL_HE_PKT_EXT_NONE; + high_th = IWL_HE_PKT_EXT_NONE; + break; + case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_8US: + low_th = IWL_HE_PKT_EXT_BPSK; + high_th = IWL_HE_PKT_EXT_NONE; + break; + case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US: + low_th = IWL_HE_PKT_EXT_NONE; + high_th = IWL_HE_PKT_EXT_BPSK; + break; + } + + /* Set the PPE thresholds accordingly */ + if (low_th >= 0 && high_th >= 0) { + struct iwl_he_pkt_ext *pkt_ext = + (struct iwl_he_pkt_ext *)&sta_ctxt_cmd.pkt_ext; + + for (i = 0; i < MAX_HE_SUPP_NSS; i++) { + u8 bw; + + for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; + bw++) { + pkt_ext->pkt_ext_qam_th[i][bw][0] = + low_th; + pkt_ext->pkt_ext_qam_th[i][bw][1] = + high_th; + } + } + + flags |= STA_CTXT_HE_PACKET_EXT; + } } + + if (sta->he_cap.he_cap_elem.mac_cap_info[2] & + IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP) + flags |= STA_CTXT_HE_32BIT_BA_BITMAP; + + if (sta->he_cap.he_cap_elem.mac_cap_info[2] & + IEEE80211_HE_MAC_CAP2_ACK_EN) + flags |= STA_CTXT_HE_ACK_ENABLED; + rcu_read_unlock(); /* Mark MU EDCA as enabled, unless none detected on some AC */ flags |= STA_CTXT_HE_MU_EDCA_CW; - for (i = 0; i < AC_NUM; i++) { + for (i = 0; i < IEEE80211_NUM_ACS; i++) { struct ieee80211_he_mu_edca_param_ac_rec *mu_edca = &mvmvif->queue_params[i].mu_edca_param_rec; + u8 ac = iwl_mvm_mac80211_ac_to_ucode_ac(i); if (!mvmvif->queue_params[i].mu_edca) { flags &= ~STA_CTXT_HE_MU_EDCA_CW; break; } - sta_ctxt_cmd.trig_based_txf[i].cwmin = + sta_ctxt_cmd.trig_based_txf[ac].cwmin = cpu_to_le16(mu_edca->ecw_min_max & 0xf); - sta_ctxt_cmd.trig_based_txf[i].cwmax = + sta_ctxt_cmd.trig_based_txf[ac].cwmax = cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4); - sta_ctxt_cmd.trig_based_txf[i].aifsn = + sta_ctxt_cmd.trig_based_txf[ac].aifsn = cpu_to_le16(mu_edca->aifsn); - sta_ctxt_cmd.trig_based_txf[i].mu_time = + sta_ctxt_cmd.trig_based_txf[ac].mu_time = cpu_to_le16(mu_edca->mu_edca_timer); } - if (vif->bss_conf.multi_sta_back_32bit) - flags |= STA_CTXT_HE_32BIT_BA_BITMAP; - - if (vif->bss_conf.ack_enabled) - flags |= STA_CTXT_HE_ACK_ENABLED; if (vif->bss_conf.uora_exists) { flags |= STA_CTXT_HE_TRIG_RND_ALLOC; @@ -2122,13 +2288,23 @@ (vif->bss_conf.uora_ocw_range >> 3) & 0x7; } - /* TODO: support Multi BSSID IE */ + if (vif->bss_conf.nontransmitted) { + flags |= STA_CTXT_HE_REF_BSSID_VALID; + ether_addr_copy(sta_ctxt_cmd.ref_bssid_addr, + vif->bss_conf.transmitter_bssid); + sta_ctxt_cmd.max_bssid_indicator = + vif->bss_conf.bssid_indicator; + sta_ctxt_cmd.bssid_index = vif->bss_conf.bssid_index; + sta_ctxt_cmd.ema_ap = vif->bss_conf.ema_ap; + sta_ctxt_cmd.profile_periodicity = + vif->bss_conf.profile_periodicity; + } sta_ctxt_cmd.flags = cpu_to_le32(flags); if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD, DATA_PATH_GROUP, 0), - 0, sizeof(sta_ctxt_cmd), &sta_ctxt_cmd)) + 0, size, &sta_ctxt_cmd)) IWL_ERR(mvm, "Failed to config FW to work HE!\n"); } @@ -2141,9 +2317,9 @@ int ret; /* - * Re-calculate the tsf id, as the master-slave relations depend on the - * beacon interval, which was not known when the station interface was - * added. + * Re-calculate the tsf id, as the leader-follower relations depend + * on the beacon interval, which was not known when the station + * interface was added. */ if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) { if (vif->bss_conf.he_support && @@ -2152,6 +2328,12 @@ iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); } + + /* Update MU EDCA params */ + if (changes & BSS_CHANGED_QOS && mvmvif->associated && + bss_conf->assoc && vif->bss_conf.he_support && + !iwlwifi_mod_params.disable_11ax) + iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); /* * If we're not associated yet, take the (new) BSSID before associating @@ -2185,7 +2367,9 @@ } if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, - &mvm->status)) { + &mvm->status) && + !fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) { /* * If we're restarting then the firmware will * obviously have lost synchronisation with @@ -2199,6 +2383,10 @@ * * Set a large maximum delay to allow for more * than a single interface. + * + * For new firmware versions, rely on the + * firmware. This is relevant for DCM scenarios + * only anyway. */ u32 dur = (11 * vif->bss_conf.beacon_int) / 10; iwl_mvm_protect_session(mvm, vif, dur, dur, @@ -2208,7 +2396,6 @@ iwl_mvm_sf_update(mvm, vif, false); iwl_mvm_power_vif_assoc(mvm, vif); if (vif->p2p) { - iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT); iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_PROT, IEEE80211_SMPS_DYNAMIC); @@ -2218,7 +2405,10 @@ * If update fails - SF might be running in associated * mode while disassociated - which is forbidden. */ - WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false), + ret = iwl_mvm_sf_update(mvm, vif, false); + WARN_ONCE(ret && + !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, + &mvm->status), "Failed to update SF upon disassociation\n"); /* @@ -2241,9 +2431,6 @@ IWL_ERR(mvm, "failed to remove AP station\n"); - if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) - mvm->d0i3_ap_sta_id = - IWL_MVM_INVALID_STA; mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; } @@ -2251,9 +2438,6 @@ ret = iwl_mvm_update_quotas(mvm, false, NULL); if (ret) IWL_ERR(mvm, "failed to update quotas\n"); - - if (vif->p2p) - iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT); /* this will take the cleared BSSID from bss_conf */ ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); @@ -2293,8 +2477,11 @@ /* * We received a beacon from the associated AP so * remove the session protection. + * A firmware with the new API will remove it automatically. */ - iwl_mvm_stop_session_protection(mvm, vif); + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) + iwl_mvm_stop_session_protection(mvm, vif); iwl_mvm_sf_update(mvm, vif, false); WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); @@ -2340,15 +2527,7 @@ { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int ret; - - /* - * iwl_mvm_mac_ctxt_add() might read directly from the device - * (the system time), so make sure it is available. - */ - ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP); - if (ret) - return ret; + int ret, i; mutex_lock(&mvm->mutex); @@ -2358,8 +2537,9 @@ goto out_unlock; /* - * Re-calculate the tsf id, as the master-slave relations depend on the - * beacon interval, which was not known when the AP interface was added. + * Re-calculate the tsf id, as the leader-follower relations depend on + * the beacon interval, which was not known when the AP interface + * was added. */ if (vif->type == NL80211_IFTYPE_AP) iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); @@ -2414,6 +2594,26 @@ /* must be set before quota calculations */ mvmvif->ap_ibss_active = true; + /* send all the early keys to the device now */ + for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { + struct ieee80211_key_conf *key = mvmvif->ap_early_keys[i]; + + if (!key) + continue; + + mvmvif->ap_early_keys[i] = NULL; + + ret = __iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key); + if (ret) + goto out_quota_failed; + } + + if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { + iwl_mvm_vif_set_low_latency(mvmvif, true, + LOW_LATENCY_VIF_TYPE); + iwl_mvm_send_low_latency_cmd(mvm, true, mvmvif->id); + } + /* power updated needs to be done before quotas */ iwl_mvm_power_update_mac(mvm); @@ -2425,13 +2625,13 @@ if (vif->p2p && mvm->p2p_device_vif) iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); - iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS); - iwl_mvm_bt_coex_vif_change(mvm); /* we don't support TDLS during DCM */ if (iwl_mvm_phy_ctx_count(mvm) > 1) iwl_mvm_teardown_tdls_peers(mvm); + + iwl_mvm_ftm_restart_responder(mvm, vif); goto out_unlock; @@ -2446,7 +2646,6 @@ iwl_mvm_mac_ctxt_remove(mvm, vif); out_unlock: mutex_unlock(&mvm->mutex); - iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP); return ret; } @@ -2476,15 +2675,21 @@ mvmvif->ap_ibss_active = false; mvm->ap_last_beacon_gp2 = 0; - iwl_mvm_bt_coex_vif_change(mvm); + if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { + iwl_mvm_vif_set_low_latency(mvmvif, false, + LOW_LATENCY_VIF_TYPE); + iwl_mvm_send_low_latency_cmd(mvm, false, mvmvif->id); + } - iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS); + iwl_mvm_bt_coex_vif_change(mvm); /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ if (vif->p2p && mvm->p2p_device_vif) iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); iwl_mvm_update_quotas(mvm, false, NULL); + + iwl_mvm_ftm_responder_clear(mvm, vif); /* * This is not very nice, but the simplest: @@ -2535,6 +2740,15 @@ bss_conf->txpower); iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); } + + if (changes & BSS_CHANGED_FTM_RESPONDER) { + int ret = iwl_mvm_ftm_start_responder(mvm, vif); + + if (ret) + IWL_WARN(mvm, "Failed to enable FTM responder (%d)\n", + ret); + } + } static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, @@ -2543,14 +2757,6 @@ u32 changes) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - - /* - * iwl_mvm_bss_info_changed_station() might call - * iwl_mvm_protect_session(), which reads directly from - * the device (the system time), so make sure it is available. - */ - if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED)) - return; mutex_lock(&mvm->mutex); @@ -2575,7 +2781,6 @@ } mutex_unlock(&mvm->mutex); - iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED); } static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, @@ -2664,7 +2869,7 @@ return; spin_lock_bh(&mvmsta->lock); - for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { + for (tid = 0; tid < ARRAY_SIZE(mvmsta->tid_data); tid++) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE) @@ -2721,7 +2926,7 @@ struct iwl_mvm_sta *mvmsta; bool sleeping = (notif->type != IWL_MVM_PM_EVENT_AWAKE); - if (WARN_ON(notif->sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) + if (WARN_ON(notif->sta_id >= mvm->fw->ucode_capa.num_stations)) return; rcu_read_lock(); @@ -2834,14 +3039,12 @@ struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_tdls *tdls_trig; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TDLS)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_TDLS); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS); tdls_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(vif), trig)) - return; if (!(tdls_trig->action_bitmap & BIT(action))) return; @@ -2855,30 +3058,53 @@ peer_addr, action); } -static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm, - struct iwl_mvm_sta *mvm_sta) +struct iwl_mvm_he_obss_narrow_bw_ru_data { + bool tolerated; +}; + +static void iwl_mvm_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy, + struct cfg80211_bss *bss, + void *_data) { - struct iwl_mvm_tid_data *tid_data; - struct sk_buff *skb; - int i; + struct iwl_mvm_he_obss_narrow_bw_ru_data *data = _data; + const struct cfg80211_bss_ies *ies; + const struct element *elem; - spin_lock_bh(&mvm_sta->lock); - for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { - tid_data = &mvm_sta->tid_data[i]; + rcu_read_lock(); + ies = rcu_dereference(bss->ies); + elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data, + ies->len); - while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - - /* - * The first deferred frame should've stopped the MAC - * queues, so we should never get a second deferred - * frame for the RA/TID. - */ - iwl_mvm_start_mac_queues(mvm, BIT(info->hw_queue)); - ieee80211_free_txskb(mvm->hw, skb); - } + if (!elem || elem->datalen < 10 || + !(elem->data[10] & + WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) { + data->tolerated = false; } - spin_unlock_bh(&mvm_sta->lock); + rcu_read_unlock(); +} + +static void iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_he_obss_narrow_bw_ru_data iter_data = { + .tolerated = true, + }; + + if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR)) { + mvmvif->he_ru_2mhz_block = false; + return; + } + + cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef, + iwl_mvm_check_he_obss_narrow_bw_ru_iter, + &iter_data); + + /* + * If there is at least one AP on radar channel that cannot + * tolerate 26-tone RU UL OFDMA transmissions using HE TB PPDU. + */ + mvmvif->he_ru_2mhz_block = !iter_data.tolerated; } static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, @@ -2914,7 +3140,6 @@ */ if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) { - iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta); flush_work(&mvm->add_stream_wk); /* @@ -2934,7 +3159,7 @@ * than 16. We can't avoid connecting at all, so refuse the * station state change, this will cause mac80211 to abandon * attempts to connect to this AP, and eventually wpa_s will - * blacklist the AP... + * blocklist the AP... */ if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.beacon_int < 16) { @@ -2944,6 +3169,9 @@ ret = -EINVAL; goto out_unlock; } + + if (vif->type == NL80211_IFTYPE_STATION) + vif->bss_conf.he_support = sta->he_cap.has_he; if (sta->tdls && (vif->p2p || @@ -2961,6 +3189,8 @@ iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, NL80211_TDLS_SETUP); } + + sta->max_rc_amsdu_len = 1; } else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_AUTH) { /* @@ -2973,7 +3203,19 @@ } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC) { if (vif->type == NL80211_IFTYPE_AP) { + vif->bss_conf.he_support = sta->he_cap.has_he; mvmvif->ap_assoc_sta_count++; + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + if (vif->bss_conf.he_support && + !iwlwifi_mod_params.disable_11ax) + iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->sta_id); + } else if (vif->type == NL80211_IFTYPE_STATION) { + vif->bss_conf.he_support = sta->he_cap.has_he; + + mvmvif->he_ru_2mhz_block = false; + if (sta->he_cap.has_he) + iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif); + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } @@ -2982,6 +3224,7 @@ ret = iwl_mvm_update_sta(mvm, vif, sta); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) { + ret = 0; /* we don't support TDLS during DCM */ if (iwl_mvm_phy_ctx_count(mvm) > 1) @@ -2994,14 +3237,25 @@ /* enable beacon filtering */ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); + /* + * Now that the station is authorized, i.e., keys were already + * installed, need to indicate to the FW that + * multicast data frames can be forwarded to the driver + */ + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, true); - - ret = 0; } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { + /* Multicast data frames are no longer allowed */ + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + /* disable beacon filtering */ - WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0)); + ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); + WARN_ON(ret && + !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, + &mvm->status)); ret = 0; } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTH) { @@ -3021,6 +3275,11 @@ iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, NL80211_TDLS_DISABLE_LINK); } + + if (unlikely(ret && + test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, + &mvm->status))) + ret = 0; } else { ret = -EIO; } @@ -3053,6 +3312,13 @@ struct ieee80211_sta *sta, u32 changed) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (changed & (IEEE80211_RC_BW_CHANGED | + IEEE80211_RC_SUPP_RATES_CHANGED | + IEEE80211_RC_NSS_CHANGED)) + iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, + true); if (vif->type == NL80211_IFTYPE_STATION && changed & IEEE80211_RC_NSS_CHANGED) @@ -3091,22 +3357,27 @@ u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS; - /* - * iwl_mvm_protect_session() reads directly from the device - * (the system time), so make sure it is available. - */ - if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX)) - return; - if (req_duration > duration) duration = req_duration; mutex_lock(&mvm->mutex); - /* Try really hard to protect the session and hear a beacon */ - iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false); + /* Try really hard to protect the session and hear a beacon + * The new session protection command allows us to protect the + * session for a much longer time since the firmware will internally + * create two events: a 300TU one with a very high priority that + * won't be fragmented which should be enough for 99% of the cases, + * and another one (which we configure here to be 900TU long) which + * will have a slightly lower priority, but more importantly, can be + * fragmented so that it'll allow other activities to run. + */ + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) + iwl_mvm_schedule_session_protection(mvm, vif, 900, + min_duration, false); + else + iwl_mvm_protect_session(mvm, vif, duration, + min_duration, 500, false); mutex_unlock(&mvm->mutex); - - iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX); } static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, @@ -3160,28 +3431,31 @@ return ret; } -static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, - enum set_key_cmd cmd, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct ieee80211_key_conf *key) +static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, + enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvmsta; struct iwl_mvm_key_pn *ptk_pn; int keyidx = key->keyidx; - int ret; + int ret, i; u8 key_offset; - - if (iwlwifi_mod_params.swcrypto) { - IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n"); - return -EOPNOTSUPP; - } switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: - key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; - key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; + if (!mvm->trans->trans_cfg->gen2) { + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; + } else if (vif->type == NL80211_IFTYPE_STATION) { + key->flags |= IEEE80211_KEY_FLAG_PUT_MIC_SPACE; + } else { + IWL_DEBUG_MAC80211(mvm, "Use SW encryption for TKIP\n"); + return -EOPNOTSUPP; + } break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: @@ -3196,16 +3470,12 @@ break; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: - /* For non-client mode, only use WEP keys for TX as we probably - * don't have a station yet anyway and would then have to keep - * track of the keys, linking them to each of the clients/peers - * as they appear. For now, don't do that, for performance WEP - * offload doesn't really matter much, but we need it for some - * other offload features in client mode. - */ - if (vif->type != NL80211_IFTYPE_STATION) - return 0; - break; + if (vif->type == NL80211_IFTYPE_STATION) + break; + if (iwl_mvm_has_new_tx_api(mvm)) + return -EOPNOTSUPP; + /* support HW crypto on TX */ + return 0; default: /* currently FW supports only one optional cipher scheme */ if (hw->n_cipher_schemes && @@ -3214,8 +3484,6 @@ else return -EOPNOTSUPP; } - - mutex_lock(&mvm->mutex); switch (cmd) { case SET_KEY: @@ -3229,15 +3497,34 @@ */ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || - key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) + key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { ret = -EOPNOTSUPP; - else - ret = 0; + break; + } if (key->cipher != WLAN_CIPHER_SUITE_GCMP && key->cipher != WLAN_CIPHER_SUITE_GCMP_256 && !iwl_mvm_has_new_tx_api(mvm)) { key->hw_key_idx = STA_KEY_IDX_INVALID; + ret = 0; + break; + } + + if (!mvmvif->ap_ibss_active) { + for (i = 0; + i < ARRAY_SIZE(mvmvif->ap_early_keys); + i++) { + if (!mvmvif->ap_early_keys[i]) { + mvmvif->ap_early_keys[i] = key; + break; + } + } + + if (i >= ARRAY_SIZE(mvmvif->ap_early_keys)) + ret = -ENOSPC; + else + ret = 0; + break; } } @@ -3293,16 +3580,33 @@ ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); if (ret) { IWL_WARN(mvm, "set key failed\n"); + key->hw_key_idx = STA_KEY_IDX_INVALID; /* * can't add key for RX, but we don't need it - * in the device for TX so still return 0 + * in the device for TX so still return 0, + * unless we have new TX API where we cannot + * put key material into the TX_CMD */ - key->hw_key_idx = STA_KEY_IDX_INVALID; - ret = 0; + if (iwl_mvm_has_new_tx_api(mvm)) + ret = -EOPNOTSUPP; + else + ret = 0; } break; case DISABLE_KEY: + ret = -ENOENT; + for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { + if (mvmvif->ap_early_keys[i] == key) { + mvmvif->ap_early_keys[i] = NULL; + ret = 0; + } + } + + /* found in pending list - don't do anything else */ + if (ret == 0) + break; + if (key->hw_key_idx == STA_KEY_IDX_INVALID) { ret = 0; break; @@ -3329,7 +3633,22 @@ ret = -EINVAL; } + return ret; +} + +static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, + enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + mutex_lock(&mvm->mutex); + ret = __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key); mutex_unlock(&mvm->mutex); + return ret; } @@ -3368,7 +3687,7 @@ resp = (void *)pkt->data; IWL_DEBUG_TE(mvm, - "Aux ROC: Recieved response from ucode: status=%d uid=%d\n", + "Aux ROC: Received response from ucode: status=%d uid=%d\n", resp->status, resp->event_unique_id); te_data->uid = le32_to_cpu(resp->event_unique_id); @@ -3392,7 +3711,7 @@ struct ieee80211_vif *vif, int duration) { - int res, time_reg = DEVICE_SYSTEM_TIME_REG; + int res; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data; static const u16 time_event_response[] = { HOT_SPOT_CMD }; @@ -3405,14 +3724,19 @@ .id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)), .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id), - /* Set the channel info data */ - .channel_info.band = (channel->band == NL80211_BAND_2GHZ) ? - PHY_BAND_24 : PHY_BAND_5, - .channel_info.channel = channel->hw_value, - .channel_info.width = PHY_VHT_CHANNEL_MODE20, - /* Set the time and duration */ - .apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)), - }; + }; + struct iwl_hs20_roc_req_tail *tail = iwl_mvm_chan_info_cmd_tail(mvm, + &aux_roc_req.channel_info); + u16 len = sizeof(aux_roc_req) - iwl_mvm_chan_info_padding(mvm); + + /* Set the channel info data */ + iwl_mvm_set_chan_info(mvm, &aux_roc_req.channel_info, channel->hw_value, + iwl_mvm_phy_band_from_nl80211(channel->band), + PHY_VHT_CHANNEL_MODE20, + 0); + + /* Set the time and duration */ + tail->apply_time = cpu_to_le32(iwl_mvm_get_systime(mvm)); delay = AUX_ROC_MIN_DELAY; req_dur = MSEC_TO_TU(duration); @@ -3437,8 +3761,8 @@ } } - aux_roc_req.duration = cpu_to_le32(req_dur); - aux_roc_req.apply_time_max_delay = cpu_to_le32(delay); + tail->duration = cpu_to_le32(req_dur); + tail->apply_time_max_delay = cpu_to_le32(delay); IWL_DEBUG_TE(mvm, "ROC: Requesting to remain on channel %u for %ums\n", @@ -3448,7 +3772,7 @@ duration, delay, dtim_interval); /* Set the node address */ - memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN); + memcpy(tail->node_addr, vif->addr, ETH_ALEN); lockdep_assert_held(&mvm->mutex); @@ -3479,7 +3803,7 @@ ARRAY_SIZE(time_event_response), iwl_mvm_rx_aux_roc, te_data); - res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req), + res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, len, &aux_roc_req); if (res) { @@ -3532,6 +3856,17 @@ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) { /* Use aux roc framework (HS20) */ + if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, + ADD_STA, 0) >= 12) { + u32 lmac_id; + + lmac_id = iwl_mvm_get_lmac_id(mvm->fw, + channel->band); + ret = iwl_mvm_add_aux_sta(mvm, lmac_id); + if (WARN(ret, + "Failed to allocate aux station")) + goto out_unlock; + } ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, vif, duration); goto out_unlock; @@ -3648,18 +3983,49 @@ return ret; } -static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw) +static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); IWL_DEBUG_MAC80211(mvm, "enter\n"); mutex_lock(&mvm->mutex); - iwl_mvm_stop_roc(mvm); + iwl_mvm_stop_roc(mvm, vif); mutex_unlock(&mvm->mutex); IWL_DEBUG_MAC80211(mvm, "leave\n"); return 0; +} + +struct iwl_mvm_ftm_responder_iter_data { + bool responder; + struct ieee80211_chanctx_conf *ctx; +}; + +static void iwl_mvm_ftm_responder_chanctx_iter(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_ftm_responder_iter_data *data = _data; + + if (rcu_access_pointer(vif->chanctx_conf) == data->ctx && + vif->type == NL80211_IFTYPE_AP && vif->bss_conf.ftmr_params) + data->responder = true; +} + +static bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm, + struct ieee80211_chanctx_conf *ctx) +{ + struct iwl_mvm_ftm_responder_iter_data data = { + .responder = false, + .ctx = ctx, + }; + + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_ftm_responder_chanctx_iter, + &data); + return data.responder; } static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm, @@ -3667,6 +4033,8 @@ { u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt; + bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); + struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; int ret; lockdep_assert_held(&mvm->mutex); @@ -3679,7 +4047,7 @@ goto out; } - ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def, + ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, ctx->rx_chains_static, ctx->rx_chains_dynamic); if (ret) { @@ -3734,6 +4102,8 @@ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; + bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); + struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; if (WARN_ONCE((phy_ctxt->ref > 1) && (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH | @@ -3748,17 +4118,17 @@ /* we are only changing the min_width, may be a noop */ if (changed == IEEE80211_CHANCTX_CHANGE_MIN_WIDTH) { - if (phy_ctxt->width == ctx->min_def.width) + if (phy_ctxt->width == def->width) goto out_unlock; /* we are just toggling between 20_NOHT and 20 */ if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 && - ctx->min_def.width <= NL80211_CHAN_WIDTH_20) + def->width <= NL80211_CHAN_WIDTH_20) goto out_unlock; } iwl_mvm_bt_coex_vif_change(mvm); - iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def, + iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, ctx->rx_chains_static, ctx->rx_chains_dynamic); @@ -3787,6 +4157,7 @@ mvmvif->ap_ibss_active = true; break; } + /* fall through */ case NL80211_IFTYPE_ADHOC: /* * The AP binding flow is handled as part of the start_ap flow @@ -3839,25 +4210,19 @@ } if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) { - u32 duration = 3 * vif->bss_conf.beacon_int; - - /* iwl_mvm_protect_session() reads directly from the - * device (the system time), so make sure it is - * available. - */ - ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA); - if (ret) - goto out_remove_binding; - - /* Protect the session to make sure we hear the first - * beacon on the new channel. - */ mvmvif->csa_bcn_pending = true; - iwl_mvm_protect_session(mvm, vif, duration, duration, - vif->bss_conf.beacon_int / 2, - true); - iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA); + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { + u32 duration = 3 * vif->bss_conf.beacon_int; + + /* Protect the session to make sure we hear the first + * beacon on the new channel. + */ + iwl_mvm_protect_session(mvm, vif, duration, duration, + vif->bss_conf.beacon_int / 2, + true); + } iwl_mvm_update_quotas(mvm, false, NULL); } @@ -3927,7 +4292,9 @@ disabled_vif = vif; - iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL); + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) + iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL); break; default: break; @@ -3937,6 +4304,9 @@ iwl_mvm_binding_remove_vif(mvm, vif); out: + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD) && + switching_chanctx) + return; mvmvif->phy_ctxt = NULL; iwl_mvm_power_update_mac(mvm); } @@ -4111,8 +4481,8 @@ int err; u32 noa_duration; - err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy, - NULL); + err = nla_parse_deprecated(tb, IWL_MVM_TM_ATTR_MAX, data, len, + iwl_mvm_tm_policy, NULL); if (err) return err; @@ -4178,6 +4548,69 @@ "dummy channel switch op\n"); } +static int iwl_mvm_schedule_client_csa(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_chan_switch_te_cmd cmd = { + .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)), + .action = cpu_to_le32(FW_CTXT_ACTION_ADD), + .tsf = cpu_to_le32(chsw->timestamp), + .cs_count = chsw->count, + .cs_mode = chsw->block_tx, + }; + + lockdep_assert_held(&mvm->mutex); + + if (chsw->delay) + cmd.cs_delayed_bcn_count = + DIV_ROUND_UP(chsw->delay, vif->bss_conf.beacon_int); + + return iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(MAC_CONF_GROUP, + CHANNEL_SWITCH_TIME_EVENT_CMD), + 0, sizeof(cmd), &cmd); +} + +static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u32 apply_time; + + /* Schedule the time event to a bit before beacon 1, + * to make sure we're in the new channel when the + * GO/AP arrives. In case count <= 1 immediately schedule the + * TE (this might result with some packet loss or connection + * loss). + */ + if (chsw->count <= 1) + apply_time = 0; + else + apply_time = chsw->device_timestamp + + ((vif->bss_conf.beacon_int * (chsw->count - 1) - + IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024); + + if (chsw->block_tx) + iwl_mvm_csa_client_absent(mvm, vif); + + if (mvmvif->bf_data.bf_enabled) { + int ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); + + if (ret) + return ret; + } + + iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int, + apply_time); + + return 0; +} + +#define IWL_MAX_CSA_BLOCK_TX 1500 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) @@ -4185,7 +4618,6 @@ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct ieee80211_vif *csa_vif; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u32 apply_time; int ret; mutex_lock(&mvm->mutex); @@ -4229,30 +4661,29 @@ break; case NL80211_IFTYPE_STATION: - /* Schedule the time event to a bit before beacon 1, - * to make sure we're in the new channel when the - * GO/AP arrives. In case count <= 1 immediately schedule the - * TE (this might result with some packet loss or connection - * loss). - */ - if (chsw->count <= 1) - apply_time = 0; - else - apply_time = chsw->device_timestamp + - ((vif->bss_conf.beacon_int * (chsw->count - 1) - - IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024); - - if (chsw->block_tx) - iwl_mvm_csa_client_absent(mvm, vif); - - iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int, - apply_time); - if (mvmvif->bf_data.bf_enabled) { - ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); - if (ret) - goto out_unlock; + if (chsw->block_tx) { + /* + * In case of undetermined / long time with immediate + * quiet monitor status to gracefully disconnect + */ + if (!chsw->count || + chsw->count * vif->bss_conf.beacon_int > + IWL_MAX_CSA_BLOCK_TX) + schedule_delayed_work(&mvmvif->csa_work, + msecs_to_jiffies(IWL_MAX_CSA_BLOCK_TX)); } + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { + ret = iwl_mvm_old_pre_chan_sw_sta(mvm, vif, chsw); + if (ret) + goto out_unlock; + } else { + iwl_mvm_schedule_client_csa(mvm, vif, chsw); + } + + mvmvif->csa_count = chsw->count; + mvmvif->csa_misbehave = false; break; default: break; @@ -4273,52 +4704,42 @@ return ret; } -static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw) { - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_chan_switch_te_cmd cmd = { + .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)), + .action = cpu_to_le32(FW_CTXT_ACTION_MODIFY), + .tsf = cpu_to_le32(chsw->timestamp), + .cs_count = chsw->count, + .cs_mode = chsw->block_tx, + }; - mutex_lock(&mvm->mutex); + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CS_MODIFY)) + return; - if (mvmvif->csa_failed) { - mvmvif->csa_failed = false; - ret = -EIO; - goto out_unlock; - } - - if (vif->type == NL80211_IFTYPE_STATION) { - struct iwl_mvm_sta *mvmsta; - - mvmvif->csa_bcn_pending = false; - mvmsta = iwl_mvm_sta_from_staid_protected(mvm, - mvmvif->ap_sta_id); - - if (WARN_ON(!mvmsta)) { - ret = -EIO; - goto out_unlock; + if (chsw->count >= mvmvif->csa_count && chsw->block_tx) { + if (mvmvif->csa_misbehave) { + /* Second time, give up on this AP*/ + iwl_mvm_abort_channel_switch(hw, vif); + ieee80211_chswitch_done(vif, false); + mvmvif->csa_misbehave = false; + return; } - - iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); - - iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - - ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); - if (ret) - goto out_unlock; - - iwl_mvm_stop_session_protection(mvm, vif); + mvmvif->csa_misbehave = true; } + mvmvif->csa_count = chsw->count; - mvmvif->ps_disabled = false; + IWL_DEBUG_MAC80211(mvm, "Modify CSA on mac %d\n", mvmvif->id); - ret = iwl_mvm_power_update_ps(mvm); - -out_unlock: - mutex_unlock(&mvm->mutex); - - return ret; + WARN_ON(iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(MAC_CONF_GROUP, + CHANNEL_SWITCH_TIME_EVENT_CMD), + CMD_ASYNC, sizeof(cmd), &cmd)); } static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop) @@ -4338,7 +4759,7 @@ } mutex_lock(&mvm->mutex); - for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { + for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { struct ieee80211_sta *sta; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], @@ -4347,7 +4768,7 @@ continue; if (drop) - iwl_mvm_flush_sta_tids(mvm, i, 0xFF, 0); + iwl_mvm_flush_sta_tids(mvm, i, 0xFFFF, 0); else iwl_mvm_wait_sta_queues_empty(mvm, iwl_mvm_sta_from_mac80211(sta)); @@ -4380,7 +4801,7 @@ mvmvif = iwl_mvm_vif_from_mac80211(vif); /* flush the AP-station and all TDLS peers */ - for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { + for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(sta)) @@ -4394,7 +4815,7 @@ WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls); if (drop) { - if (iwl_mvm_flush_sta(mvm, mvmsta, false, 0)) + if (iwl_mvm_flush_sta(mvm, mvmsta, false)) IWL_ERR(mvm, "flush request fail\n"); } else { msk |= mvmsta->tfd_queue_msk; @@ -4462,6 +4883,125 @@ return ret; } +static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo) +{ + switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { + case RATE_MCS_CHAN_WIDTH_20: + rinfo->bw = RATE_INFO_BW_20; + break; + case RATE_MCS_CHAN_WIDTH_40: + rinfo->bw = RATE_INFO_BW_40; + break; + case RATE_MCS_CHAN_WIDTH_80: + rinfo->bw = RATE_INFO_BW_80; + break; + case RATE_MCS_CHAN_WIDTH_160: + rinfo->bw = RATE_INFO_BW_160; + break; + } + + if (rate_n_flags & RATE_MCS_HT_MSK) { + rinfo->flags |= RATE_INFO_FLAGS_MCS; + rinfo->mcs = u32_get_bits(rate_n_flags, RATE_HT_MCS_INDEX_MSK); + rinfo->nss = u32_get_bits(rate_n_flags, + RATE_HT_MCS_NSS_MSK) + 1; + if (rate_n_flags & RATE_MCS_SGI_MSK) + rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; + } else if (rate_n_flags & RATE_MCS_VHT_MSK) { + rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS; + rinfo->mcs = u32_get_bits(rate_n_flags, + RATE_VHT_MCS_RATE_CODE_MSK); + rinfo->nss = u32_get_bits(rate_n_flags, + RATE_VHT_MCS_NSS_MSK) + 1; + if (rate_n_flags & RATE_MCS_SGI_MSK) + rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; + } else if (rate_n_flags & RATE_MCS_HE_MSK) { + u32 gi_ltf = u32_get_bits(rate_n_flags, + RATE_MCS_HE_GI_LTF_MSK); + + rinfo->flags |= RATE_INFO_FLAGS_HE_MCS; + rinfo->mcs = u32_get_bits(rate_n_flags, + RATE_VHT_MCS_RATE_CODE_MSK); + rinfo->nss = u32_get_bits(rate_n_flags, + RATE_VHT_MCS_NSS_MSK) + 1; + + if (rate_n_flags & RATE_MCS_HE_106T_MSK) { + rinfo->bw = RATE_INFO_BW_HE_RU; + rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106; + } + + switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) { + case RATE_MCS_HE_TYPE_SU: + case RATE_MCS_HE_TYPE_EXT_SU: + if (gi_ltf == 0 || gi_ltf == 1) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + else if (gi_ltf == 2) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; + else if (rate_n_flags & RATE_MCS_SGI_MSK) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + else + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; + break; + case RATE_MCS_HE_TYPE_MU: + if (gi_ltf == 0 || gi_ltf == 1) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + else if (gi_ltf == 2) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; + else + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; + break; + case RATE_MCS_HE_TYPE_TRIG: + if (gi_ltf == 0 || gi_ltf == 1) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; + else + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; + break; + } + + if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK) + rinfo->he_dcm = 1; + } else { + switch (u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK)) { + case IWL_RATE_1M_PLCP: + rinfo->legacy = 10; + break; + case IWL_RATE_2M_PLCP: + rinfo->legacy = 20; + break; + case IWL_RATE_5M_PLCP: + rinfo->legacy = 55; + break; + case IWL_RATE_11M_PLCP: + rinfo->legacy = 110; + break; + case IWL_RATE_6M_PLCP: + rinfo->legacy = 60; + break; + case IWL_RATE_9M_PLCP: + rinfo->legacy = 90; + break; + case IWL_RATE_12M_PLCP: + rinfo->legacy = 120; + break; + case IWL_RATE_18M_PLCP: + rinfo->legacy = 180; + break; + case IWL_RATE_24M_PLCP: + rinfo->legacy = 240; + break; + case IWL_RATE_36M_PLCP: + rinfo->legacy = 360; + break; + case IWL_RATE_48M_PLCP: + rinfo->legacy = 480; + break; + case IWL_RATE_54M_PLCP: + rinfo->legacy = 540; + break; + } + } +} + static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -4472,8 +5012,15 @@ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); if (mvmsta->avg_energy) { - sinfo->signal_avg = mvmsta->avg_energy; + sinfo->signal_avg = -(s8)mvmsta->avg_energy; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); + } + + if (iwl_mvm_has_tlc_offload(mvm)) { + struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; + + iwl_mvm_set_sta_rate(lq_sta->last_rate_n_flags, &sinfo->txrate); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); } /* if beacon filtering isn't on mac80211 does it anyway */ @@ -4517,14 +5064,12 @@ struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_mlme *trig_mlme; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_MLME); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); trig_mlme = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(vif), trig)) - return; if (event->u.mlme.data == ASSOC_EVENT) { if (event->u.mlme.status == MLME_DENIED) @@ -4559,14 +5104,12 @@ struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_BA); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(vif), trig)) - return; if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid))) return; @@ -4606,24 +5149,25 @@ u32 qmask = BIT(mvm->trans->num_rx_queues) - 1; int ret; - lockdep_assert_held(&mvm->mutex); if (!iwl_mvm_has_new_rx_api(mvm)) return; - notif->cookie = mvm->queue_sync_cookie; - - if (notif->sync) + if (notif->sync) { + notif->cookie = mvm->queue_sync_cookie; atomic_set(&mvm->queue_sync_counter, mvm->trans->num_rx_queues); + } - ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size); + ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, + size, !notif->sync); if (ret) { IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret); goto out; } if (notif->sync) { + lockdep_assert_held(&mvm->mutex); ret = wait_event_timeout(mvm->rx_sync_waitq, atomic_read(&mvm->queue_sync_counter) == 0 || iwl_mvm_is_radio_killed(mvm), @@ -4633,7 +5177,8 @@ out: atomic_set(&mvm->queue_sync_counter, 0); - mvm->queue_sync_cookie++; + if (notif->sync) + mvm->queue_sync_cookie++; } static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) @@ -4649,9 +5194,91 @@ mutex_unlock(&mvm->mutex); } +static int +iwl_mvm_mac_get_ftm_responder_stats(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_ftm_responder_stats *stats) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (vif->p2p || vif->type != NL80211_IFTYPE_AP || + !mvmvif->ap_ibss_active || !vif->bss_conf.ftm_responder) + return -EINVAL; + + mutex_lock(&mvm->mutex); + *stats = mvm->ftm_resp_stats; + mutex_unlock(&mvm->mutex); + + stats->filled = BIT(NL80211_FTM_STATS_SUCCESS_NUM) | + BIT(NL80211_FTM_STATS_PARTIAL_NUM) | + BIT(NL80211_FTM_STATS_FAILED_NUM) | + BIT(NL80211_FTM_STATS_ASAP_NUM) | + BIT(NL80211_FTM_STATS_NON_ASAP_NUM) | + BIT(NL80211_FTM_STATS_TOTAL_DURATION_MSEC) | + BIT(NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM) | + BIT(NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM) | + BIT(NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM); + + return 0; +} + +static int iwl_mvm_start_pmsr(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *request) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_ftm_start(mvm, vif, request); + mutex_unlock(&mvm->mutex); + + return ret; +} + +static void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *request) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + mutex_lock(&mvm->mutex); + iwl_mvm_ftm_abort(mvm, request); + mutex_unlock(&mvm->mutex); +} + +static bool iwl_mvm_can_hw_csum(struct sk_buff *skb) +{ + u8 protocol = ip_hdr(skb)->protocol; + + if (!IS_ENABLED(CONFIG_INET)) + return false; + + return protocol == IPPROTO_TCP || protocol == IPPROTO_UDP; +} + +static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw, + struct sk_buff *head, + struct sk_buff *skb) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + /* For now don't aggregate IPv6 in AMSDU */ + if (skb->protocol != htons(ETH_P_IP)) + return false; + + if (!iwl_mvm_is_csum_supported(mvm)) + return true; + + return iwl_mvm_can_hw_csum(skb) == iwl_mvm_can_hw_csum(head); +} + const struct ieee80211_ops iwl_mvm_hw_ops = { .tx = iwl_mvm_mac_tx, + .wake_tx_queue = iwl_mvm_mac_wake_tx_queue, .ampdu_action = iwl_mvm_mac_ampdu_action, + .get_antenna = iwl_mvm_op_get_antenna, .start = iwl_mvm_mac_start, .reconfig_complete = iwl_mvm_mac_reconfig_complete, .stop = iwl_mvm_mac_stop, @@ -4700,6 +5327,8 @@ .channel_switch = iwl_mvm_channel_switch, .pre_channel_switch = iwl_mvm_pre_channel_switch, .post_channel_switch = iwl_mvm_post_channel_switch, + .abort_channel_switch = iwl_mvm_abort_channel_switch, + .channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon, .tdls_channel_switch = iwl_mvm_tdls_channel_switch, .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch, @@ -4724,6 +5353,11 @@ #endif .get_survey = iwl_mvm_mac_get_survey, .sta_statistics = iwl_mvm_mac_sta_statistics, + .get_ftm_responder_stats = iwl_mvm_mac_get_ftm_responder_stats, + .start_pmsr = iwl_mvm_start_pmsr, + .abort_pmsr = iwl_mvm_abort_pmsr, + + .can_aggregate_in_amsdu = iwl_mvm_mac_can_aggregate, #ifdef CONFIG_IWLWIFI_DEBUGFS .sta_add_debugfs = iwl_mvm_sta_add_debugfs, #endif -- Gitblit v1.6.2