.. | .. |
---|
8 | 8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
---|
9 | 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
---|
10 | 10 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
---|
| 11 | + * Copyright(c) 2018 - 2020 Intel Corporation |
---|
11 | 12 | * |
---|
12 | 13 | * This program is free software; you can redistribute it and/or modify |
---|
13 | 14 | * it under the terms of version 2 of the GNU General Public License as |
---|
.. | .. |
---|
17 | 18 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
18 | 19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
19 | 20 | * General Public License for more details. |
---|
20 | | - * |
---|
21 | | - * You should have received a copy of the GNU General Public License |
---|
22 | | - * along with this program; if not, write to the Free Software |
---|
23 | | - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, |
---|
24 | | - * USA |
---|
25 | 21 | * |
---|
26 | 22 | * The full GNU General Public License is included in this distribution |
---|
27 | 23 | * in the file called COPYING. |
---|
.. | .. |
---|
35 | 31 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
---|
36 | 32 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
---|
37 | 33 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
---|
| 34 | + * Copyright(c) 2018 - 2020 Intel Corporation |
---|
38 | 35 | * All rights reserved. |
---|
39 | 36 | * |
---|
40 | 37 | * Redistribution and use in source and binary forms, with or without |
---|
.. | .. |
---|
82 | 79 | struct iwl_fw_dbg_trigger_tlv *trig; |
---|
83 | 80 | struct iwl_fw_dbg_trigger_ba *ba_trig; |
---|
84 | 81 | |
---|
85 | | - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) |
---|
| 82 | + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA); |
---|
| 83 | + if (!trig) |
---|
86 | 84 | return; |
---|
87 | 85 | |
---|
88 | | - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); |
---|
89 | 86 | ba_trig = (void *)trig->data; |
---|
90 | | - |
---|
91 | | - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) |
---|
92 | | - return; |
---|
93 | 87 | |
---|
94 | 88 | if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid))) |
---|
95 | 89 | return; |
---|
.. | .. |
---|
215 | 209 | u16 offload_assist = 0; |
---|
216 | 210 | u8 ac; |
---|
217 | 211 | |
---|
218 | | - if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) |
---|
| 212 | + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) || |
---|
| 213 | + (ieee80211_is_probe_resp(fc) && |
---|
| 214 | + !is_multicast_ether_addr(hdr->addr1))) |
---|
219 | 215 | tx_flags |= TX_CMD_FLG_ACK; |
---|
220 | 216 | else |
---|
221 | 217 | tx_flags &= ~TX_CMD_FLG_ACK; |
---|
.. | .. |
---|
245 | 241 | iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec, |
---|
246 | 242 | ssn); |
---|
247 | 243 | } else { |
---|
248 | | - tx_cmd->tid_tspec = IWL_TID_NON_QOS; |
---|
| 244 | + if (ieee80211_is_data(fc)) |
---|
| 245 | + tx_cmd->tid_tspec = IWL_TID_NON_QOS; |
---|
| 246 | + else |
---|
| 247 | + tx_cmd->tid_tspec = IWL_MAX_TID_COUNT; |
---|
| 248 | + |
---|
249 | 249 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) |
---|
250 | 250 | tx_flags |= TX_CMD_FLG_SEQ_CTL; |
---|
251 | 251 | else |
---|
252 | 252 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL; |
---|
253 | 253 | } |
---|
254 | 254 | |
---|
255 | | - /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */ |
---|
| 255 | + /* Default to 0 (BE) when tid_spec is set to IWL_MAX_TID_COUNT */ |
---|
256 | 256 | if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT) |
---|
257 | 257 | ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; |
---|
258 | 258 | else |
---|
.. | .. |
---|
280 | 280 | } |
---|
281 | 281 | |
---|
282 | 282 | if (ieee80211_is_data(fc) && len > mvm->rts_threshold && |
---|
283 | | - !is_multicast_ether_addr(ieee80211_get_DA(hdr))) |
---|
| 283 | + !is_multicast_ether_addr(hdr->addr1)) |
---|
284 | 284 | tx_flags |= TX_CMD_FLG_PROT_REQUIRE; |
---|
285 | 285 | |
---|
286 | 286 | if (fw_has_capa(&mvm->fw->ucode_capa, |
---|
.. | .. |
---|
304 | 304 | offload_assist)); |
---|
305 | 305 | } |
---|
306 | 306 | |
---|
| 307 | +static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm, |
---|
| 308 | + struct ieee80211_tx_info *info, |
---|
| 309 | + struct ieee80211_sta *sta, __le16 fc) |
---|
| 310 | +{ |
---|
| 311 | + if (info->band == NL80211_BAND_2GHZ && |
---|
| 312 | + !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) |
---|
| 313 | + return mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; |
---|
| 314 | + |
---|
| 315 | + if (sta && ieee80211_is_data(fc)) { |
---|
| 316 | + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
---|
| 317 | + |
---|
| 318 | + return BIT(mvmsta->tx_ant) << RATE_MCS_ANT_POS; |
---|
| 319 | + } |
---|
| 320 | + |
---|
| 321 | + return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; |
---|
| 322 | +} |
---|
| 323 | + |
---|
307 | 324 | static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, |
---|
308 | 325 | struct ieee80211_tx_info *info, |
---|
309 | 326 | struct ieee80211_sta *sta) |
---|
310 | 327 | { |
---|
311 | 328 | int rate_idx; |
---|
312 | 329 | u8 rate_plcp; |
---|
313 | | - u32 rate_flags; |
---|
| 330 | + u32 rate_flags = 0; |
---|
314 | 331 | |
---|
315 | 332 | /* HT rate doesn't make sense for a non data frame */ |
---|
316 | 333 | WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS, |
---|
.. | .. |
---|
324 | 341 | rate_idx = rate_lowest_index( |
---|
325 | 342 | &mvm->nvm_data->bands[info->band], sta); |
---|
326 | 343 | |
---|
327 | | - /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ |
---|
328 | | - if (info->band == NL80211_BAND_5GHZ) |
---|
| 344 | + /* |
---|
| 345 | + * For non 2 GHZ band, remap mac80211 rate |
---|
| 346 | + * indices into driver indices |
---|
| 347 | + */ |
---|
| 348 | + if (info->band != NL80211_BAND_2GHZ) |
---|
329 | 349 | rate_idx += IWL_FIRST_OFDM_RATE; |
---|
330 | 350 | |
---|
331 | 351 | /* For 2.4 GHZ band, check that there is no need to remap */ |
---|
.. | .. |
---|
334 | 354 | /* Get PLCP rate for tx_cmd->rate_n_flags */ |
---|
335 | 355 | rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx); |
---|
336 | 356 | |
---|
337 | | - if (info->band == NL80211_BAND_2GHZ && |
---|
338 | | - !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) |
---|
339 | | - rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; |
---|
340 | | - else |
---|
341 | | - rate_flags = |
---|
342 | | - BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; |
---|
343 | | - |
---|
344 | 357 | /* Set CCK flag as needed */ |
---|
345 | 358 | if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) |
---|
346 | 359 | rate_flags |= RATE_MCS_CCK_MSK; |
---|
347 | 360 | |
---|
348 | 361 | return (u32)rate_plcp | rate_flags; |
---|
| 362 | +} |
---|
| 363 | + |
---|
| 364 | +static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm, |
---|
| 365 | + struct ieee80211_tx_info *info, |
---|
| 366 | + struct ieee80211_sta *sta, __le16 fc) |
---|
| 367 | +{ |
---|
| 368 | + return iwl_mvm_get_tx_rate(mvm, info, sta) | |
---|
| 369 | + iwl_mvm_get_tx_ant(mvm, info, sta, fc); |
---|
349 | 370 | } |
---|
350 | 371 | |
---|
351 | 372 | /* |
---|
.. | .. |
---|
375 | 396 | */ |
---|
376 | 397 | |
---|
377 | 398 | if (ieee80211_is_data(fc) && sta) { |
---|
378 | | - tx_cmd->initial_rate_index = 0; |
---|
379 | | - tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); |
---|
380 | | - return; |
---|
| 399 | + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
---|
| 400 | + |
---|
| 401 | + if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) { |
---|
| 402 | + tx_cmd->initial_rate_index = 0; |
---|
| 403 | + tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); |
---|
| 404 | + return; |
---|
| 405 | + } |
---|
381 | 406 | } else if (ieee80211_is_back_req(fc)) { |
---|
382 | 407 | tx_cmd->tx_flags |= |
---|
383 | 408 | cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR); |
---|
384 | 409 | } |
---|
385 | 410 | |
---|
386 | | - mvm->mgmt_last_antenna_idx = |
---|
387 | | - iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), |
---|
388 | | - mvm->mgmt_last_antenna_idx); |
---|
389 | | - |
---|
390 | 411 | /* Set the rate in the TX cmd */ |
---|
391 | | - tx_cmd->rate_n_flags = cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta)); |
---|
| 412 | + tx_cmd->rate_n_flags = |
---|
| 413 | + cpu_to_le32(iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc)); |
---|
392 | 414 | } |
---|
393 | 415 | |
---|
394 | 416 | static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info, |
---|
.. | .. |
---|
468 | 490 | /* |
---|
469 | 491 | * Allocates and sets the Tx cmd the driver data pointers in the skb |
---|
470 | 492 | */ |
---|
471 | | -static struct iwl_device_cmd * |
---|
| 493 | +static struct iwl_device_tx_cmd * |
---|
472 | 494 | iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, |
---|
473 | 495 | struct ieee80211_tx_info *info, int hdrlen, |
---|
474 | 496 | struct ieee80211_sta *sta, u8 sta_id) |
---|
475 | 497 | { |
---|
476 | 498 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
---|
477 | | - struct iwl_device_cmd *dev_cmd; |
---|
| 499 | + struct iwl_device_tx_cmd *dev_cmd; |
---|
478 | 500 | struct iwl_tx_cmd *tx_cmd; |
---|
479 | 501 | |
---|
480 | 502 | dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans); |
---|
.. | .. |
---|
482 | 504 | if (unlikely(!dev_cmd)) |
---|
483 | 505 | return NULL; |
---|
484 | 506 | |
---|
485 | | - /* Make sure we zero enough of dev_cmd */ |
---|
486 | | - BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd)); |
---|
487 | | - BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd)); |
---|
488 | | - |
---|
489 | | - memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd)); |
---|
490 | 507 | dev_cmd->hdr.cmd = TX_CMD; |
---|
491 | 508 | |
---|
492 | 509 | if (iwl_mvm_has_new_tx_api(mvm)) { |
---|
493 | 510 | u16 offload_assist = 0; |
---|
494 | 511 | u32 rate_n_flags = 0; |
---|
495 | 512 | u16 flags = 0; |
---|
| 513 | + struct iwl_mvm_sta *mvmsta = sta ? |
---|
| 514 | + iwl_mvm_sta_from_mac80211(sta) : NULL; |
---|
496 | 515 | |
---|
497 | 516 | if (ieee80211_is_data_qos(hdr->frame_control)) { |
---|
498 | 517 | u8 *qc = ieee80211_get_qos_ctl(hdr); |
---|
.. | .. |
---|
512 | 531 | if (!info->control.hw_key) |
---|
513 | 532 | flags |= IWL_TX_FLAGS_ENCRYPT_DIS; |
---|
514 | 533 | |
---|
515 | | - /* For data packets rate info comes from the fw */ |
---|
516 | | - if (!(ieee80211_is_data(hdr->frame_control) && sta)) { |
---|
| 534 | + /* |
---|
| 535 | + * For data packets rate info comes from the fw. Only |
---|
| 536 | + * set rate/antenna during connection establishment or in case |
---|
| 537 | + * no station is given. |
---|
| 538 | + */ |
---|
| 539 | + if (!sta || !ieee80211_is_data(hdr->frame_control) || |
---|
| 540 | + mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) { |
---|
517 | 541 | flags |= IWL_TX_FLAGS_CMD_RATE; |
---|
518 | | - rate_n_flags = iwl_mvm_get_tx_rate(mvm, info, sta); |
---|
| 542 | + rate_n_flags = |
---|
| 543 | + iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, |
---|
| 544 | + hdr->frame_control); |
---|
519 | 545 | } |
---|
520 | 546 | |
---|
521 | | - if (mvm->trans->cfg->device_family >= |
---|
522 | | - IWL_DEVICE_FAMILY_22560) { |
---|
| 547 | + if (mvm->trans->trans_cfg->device_family >= |
---|
| 548 | + IWL_DEVICE_FAMILY_AX210) { |
---|
523 | 549 | struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload; |
---|
524 | 550 | |
---|
525 | 551 | cmd->offload_assist |= cpu_to_le32(offload_assist); |
---|
.. | .. |
---|
566 | 592 | } |
---|
567 | 593 | |
---|
568 | 594 | static void iwl_mvm_skb_prepare_status(struct sk_buff *skb, |
---|
569 | | - struct iwl_device_cmd *cmd) |
---|
| 595 | + struct iwl_device_tx_cmd *cmd) |
---|
570 | 596 | { |
---|
571 | 597 | struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); |
---|
572 | 598 | |
---|
.. | .. |
---|
577 | 603 | } |
---|
578 | 604 | |
---|
579 | 605 | static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, |
---|
580 | | - struct ieee80211_tx_info *info, __le16 fc) |
---|
| 606 | + struct ieee80211_tx_info *info, |
---|
| 607 | + struct ieee80211_hdr *hdr) |
---|
581 | 608 | { |
---|
582 | | - struct iwl_mvm_vif *mvmvif; |
---|
583 | | - |
---|
584 | | - mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); |
---|
| 609 | + struct iwl_mvm_vif *mvmvif = |
---|
| 610 | + iwl_mvm_vif_from_mac80211(info->control.vif); |
---|
| 611 | + __le16 fc = hdr->frame_control; |
---|
585 | 612 | |
---|
586 | 613 | switch (info->control.vif->type) { |
---|
587 | 614 | case NL80211_IFTYPE_AP: |
---|
.. | .. |
---|
600 | 627 | (!ieee80211_is_bufferable_mmpdu(fc) || |
---|
601 | 628 | ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc))) |
---|
602 | 629 | return mvm->probe_queue; |
---|
603 | | - if (info->hw_queue == info->control.vif->cab_queue) |
---|
| 630 | + |
---|
| 631 | + if (!ieee80211_has_order(fc) && !ieee80211_is_probe_req(fc) && |
---|
| 632 | + is_multicast_ether_addr(hdr->addr1)) |
---|
604 | 633 | return mvmvif->cab_queue; |
---|
605 | 634 | |
---|
606 | 635 | WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC, |
---|
.. | .. |
---|
609 | 638 | case NL80211_IFTYPE_P2P_DEVICE: |
---|
610 | 639 | if (ieee80211_is_mgmt(fc)) |
---|
611 | 640 | return mvm->p2p_dev_queue; |
---|
612 | | - if (info->hw_queue == info->control.vif->cab_queue) |
---|
613 | | - return mvmvif->cab_queue; |
---|
614 | 641 | |
---|
615 | 642 | WARN_ON_ONCE(1); |
---|
616 | 643 | return mvm->p2p_dev_queue; |
---|
.. | .. |
---|
620 | 647 | } |
---|
621 | 648 | } |
---|
622 | 649 | |
---|
| 650 | +static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm, |
---|
| 651 | + struct sk_buff *skb) |
---|
| 652 | +{ |
---|
| 653 | + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
---|
| 654 | + struct iwl_mvm_vif *mvmvif = |
---|
| 655 | + iwl_mvm_vif_from_mac80211(info->control.vif); |
---|
| 656 | + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; |
---|
| 657 | + int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt; |
---|
| 658 | + struct iwl_probe_resp_data *resp_data; |
---|
| 659 | + u8 *ie, *pos; |
---|
| 660 | + u8 match[] = { |
---|
| 661 | + (WLAN_OUI_WFA >> 16) & 0xff, |
---|
| 662 | + (WLAN_OUI_WFA >> 8) & 0xff, |
---|
| 663 | + WLAN_OUI_WFA & 0xff, |
---|
| 664 | + WLAN_OUI_TYPE_WFA_P2P, |
---|
| 665 | + }; |
---|
| 666 | + |
---|
| 667 | + rcu_read_lock(); |
---|
| 668 | + |
---|
| 669 | + resp_data = rcu_dereference(mvmvif->probe_resp_data); |
---|
| 670 | + if (!resp_data) |
---|
| 671 | + goto out; |
---|
| 672 | + |
---|
| 673 | + if (!resp_data->notif.noa_active) |
---|
| 674 | + goto out; |
---|
| 675 | + |
---|
| 676 | + ie = (u8 *)cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC, |
---|
| 677 | + mgmt->u.probe_resp.variable, |
---|
| 678 | + skb->len - base_len, |
---|
| 679 | + match, 4, 2); |
---|
| 680 | + if (!ie) { |
---|
| 681 | + IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n"); |
---|
| 682 | + goto out; |
---|
| 683 | + } |
---|
| 684 | + |
---|
| 685 | + if (skb_tailroom(skb) < resp_data->noa_len) { |
---|
| 686 | + if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) { |
---|
| 687 | + IWL_ERR(mvm, |
---|
| 688 | + "Failed to reallocate probe resp\n"); |
---|
| 689 | + goto out; |
---|
| 690 | + } |
---|
| 691 | + } |
---|
| 692 | + |
---|
| 693 | + pos = skb_put(skb, resp_data->noa_len); |
---|
| 694 | + |
---|
| 695 | + *pos++ = WLAN_EID_VENDOR_SPECIFIC; |
---|
| 696 | + /* Set length of IE body (not including ID and length itself) */ |
---|
| 697 | + *pos++ = resp_data->noa_len - 2; |
---|
| 698 | + *pos++ = (WLAN_OUI_WFA >> 16) & 0xff; |
---|
| 699 | + *pos++ = (WLAN_OUI_WFA >> 8) & 0xff; |
---|
| 700 | + *pos++ = WLAN_OUI_WFA & 0xff; |
---|
| 701 | + *pos++ = WLAN_OUI_TYPE_WFA_P2P; |
---|
| 702 | + |
---|
| 703 | + memcpy(pos, &resp_data->notif.noa_attr, |
---|
| 704 | + resp_data->noa_len - sizeof(struct ieee80211_vendor_ie)); |
---|
| 705 | + |
---|
| 706 | +out: |
---|
| 707 | + rcu_read_unlock(); |
---|
| 708 | +} |
---|
| 709 | + |
---|
623 | 710 | int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) |
---|
624 | 711 | { |
---|
625 | 712 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
---|
626 | | - struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); |
---|
627 | 713 | struct ieee80211_tx_info info; |
---|
628 | | - struct iwl_device_cmd *dev_cmd; |
---|
| 714 | + struct iwl_device_tx_cmd *dev_cmd; |
---|
629 | 715 | u8 sta_id; |
---|
630 | 716 | int hdrlen = ieee80211_hdrlen(hdr->frame_control); |
---|
631 | | - int queue; |
---|
| 717 | + __le16 fc = hdr->frame_control; |
---|
| 718 | + bool offchannel = IEEE80211_SKB_CB(skb)->flags & |
---|
| 719 | + IEEE80211_TX_CTL_TX_OFFCHAN; |
---|
| 720 | + int queue = -1; |
---|
632 | 721 | |
---|
633 | | - /* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used |
---|
634 | | - * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel |
---|
635 | | - * queue. STATION (HS2.0) uses the auxiliary context of the FW, |
---|
636 | | - * and hence needs to be sent on the aux queue |
---|
637 | | - */ |
---|
638 | | - if (skb_info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && |
---|
639 | | - skb_info->control.vif->type == NL80211_IFTYPE_STATION) |
---|
640 | | - skb_info->hw_queue = mvm->aux_queue; |
---|
| 722 | + if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc)) |
---|
| 723 | + return -1; |
---|
641 | 724 | |
---|
642 | 725 | memcpy(&info, skb->cb, sizeof(info)); |
---|
643 | 726 | |
---|
.. | .. |
---|
647 | 730 | if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU)) |
---|
648 | 731 | return -1; |
---|
649 | 732 | |
---|
650 | | - if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM && |
---|
651 | | - (!info.control.vif || |
---|
652 | | - info.hw_queue != info.control.vif->cab_queue))) |
---|
653 | | - return -1; |
---|
654 | | - |
---|
655 | | - queue = info.hw_queue; |
---|
656 | | - |
---|
657 | | - /* |
---|
658 | | - * If the interface on which the frame is sent is the P2P_DEVICE |
---|
659 | | - * or an AP/GO interface use the broadcast station associated |
---|
660 | | - * with it; otherwise if the interface is a managed interface |
---|
661 | | - * use the AP station associated with it for multicast traffic |
---|
662 | | - * (this is not possible for unicast packets as a TLDS discovery |
---|
663 | | - * response are sent without a station entry); otherwise use the |
---|
664 | | - * AUX station. |
---|
665 | | - */ |
---|
666 | | - sta_id = mvm->aux_sta.sta_id; |
---|
667 | 733 | if (info.control.vif) { |
---|
668 | 734 | struct iwl_mvm_vif *mvmvif = |
---|
669 | 735 | iwl_mvm_vif_from_mac80211(info.control.vif); |
---|
.. | .. |
---|
676 | 742 | else |
---|
677 | 743 | sta_id = mvmvif->mcast_sta.sta_id; |
---|
678 | 744 | |
---|
679 | | - queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, |
---|
680 | | - hdr->frame_control); |
---|
681 | | - if (queue < 0) |
---|
682 | | - return -1; |
---|
683 | | - } else if (info.control.vif->type == NL80211_IFTYPE_STATION && |
---|
684 | | - is_multicast_ether_addr(hdr->addr1)) { |
---|
685 | | - u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id); |
---|
686 | | - |
---|
687 | | - if (ap_sta_id != IWL_MVM_INVALID_STA) |
---|
688 | | - sta_id = ap_sta_id; |
---|
| 745 | + queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr); |
---|
689 | 746 | } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) { |
---|
690 | 747 | queue = mvm->snif_queue; |
---|
691 | 748 | sta_id = mvm->snif_sta.sta_id; |
---|
| 749 | + } else if (info.control.vif->type == NL80211_IFTYPE_STATION && |
---|
| 750 | + offchannel) { |
---|
| 751 | + /* |
---|
| 752 | + * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets |
---|
| 753 | + * that can be used in 2 different types of vifs, P2P & |
---|
| 754 | + * STATION. |
---|
| 755 | + * P2P uses the offchannel queue. |
---|
| 756 | + * STATION (HS2.0) uses the auxiliary context of the FW, |
---|
| 757 | + * and hence needs to be sent on the aux queue. |
---|
| 758 | + */ |
---|
| 759 | + sta_id = mvm->aux_sta.sta_id; |
---|
| 760 | + queue = mvm->aux_queue; |
---|
692 | 761 | } |
---|
693 | 762 | } |
---|
| 763 | + |
---|
| 764 | + if (queue < 0) { |
---|
| 765 | + IWL_ERR(mvm, "No queue was found. Dropping TX\n"); |
---|
| 766 | + return -1; |
---|
| 767 | + } |
---|
| 768 | + |
---|
| 769 | + if (unlikely(ieee80211_is_probe_resp(fc))) |
---|
| 770 | + iwl_mvm_probe_resp_set_noa(mvm, skb); |
---|
694 | 771 | |
---|
695 | 772 | IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue); |
---|
696 | 773 | |
---|
.. | .. |
---|
709 | 786 | return 0; |
---|
710 | 787 | } |
---|
711 | 788 | |
---|
712 | | -#ifdef CONFIG_INET |
---|
713 | | - |
---|
714 | | -static int |
---|
715 | | -iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes, |
---|
716 | | - netdev_features_t netdev_flags, |
---|
717 | | - struct sk_buff_head *mpdus_skb) |
---|
718 | | -{ |
---|
719 | | - struct sk_buff *tmp, *next; |
---|
720 | | - struct ieee80211_hdr *hdr = (void *)skb->data; |
---|
721 | | - char cb[sizeof(skb->cb)]; |
---|
722 | | - u16 i = 0; |
---|
723 | | - unsigned int tcp_payload_len; |
---|
724 | | - unsigned int mss = skb_shinfo(skb)->gso_size; |
---|
725 | | - bool ipv4 = (skb->protocol == htons(ETH_P_IP)); |
---|
726 | | - u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; |
---|
727 | | - |
---|
728 | | - skb_shinfo(skb)->gso_size = num_subframes * mss; |
---|
729 | | - memcpy(cb, skb->cb, sizeof(cb)); |
---|
730 | | - |
---|
731 | | - next = skb_gso_segment(skb, netdev_flags); |
---|
732 | | - skb_shinfo(skb)->gso_size = mss; |
---|
733 | | - if (WARN_ON_ONCE(IS_ERR(next))) |
---|
734 | | - return -EINVAL; |
---|
735 | | - else if (next) |
---|
736 | | - consume_skb(skb); |
---|
737 | | - |
---|
738 | | - while (next) { |
---|
739 | | - tmp = next; |
---|
740 | | - next = tmp->next; |
---|
741 | | - |
---|
742 | | - memcpy(tmp->cb, cb, sizeof(tmp->cb)); |
---|
743 | | - /* |
---|
744 | | - * Compute the length of all the data added for the A-MSDU. |
---|
745 | | - * This will be used to compute the length to write in the TX |
---|
746 | | - * command. We have: SNAP + IP + TCP for n -1 subframes and |
---|
747 | | - * ETH header for n subframes. |
---|
748 | | - */ |
---|
749 | | - tcp_payload_len = skb_tail_pointer(tmp) - |
---|
750 | | - skb_transport_header(tmp) - |
---|
751 | | - tcp_hdrlen(tmp) + tmp->data_len; |
---|
752 | | - |
---|
753 | | - if (ipv4) |
---|
754 | | - ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes); |
---|
755 | | - |
---|
756 | | - if (tcp_payload_len > mss) { |
---|
757 | | - skb_shinfo(tmp)->gso_size = mss; |
---|
758 | | - } else { |
---|
759 | | - if (ieee80211_is_data_qos(hdr->frame_control)) { |
---|
760 | | - u8 *qc; |
---|
761 | | - |
---|
762 | | - if (ipv4) |
---|
763 | | - ip_send_check(ip_hdr(tmp)); |
---|
764 | | - |
---|
765 | | - qc = ieee80211_get_qos_ctl((void *)tmp->data); |
---|
766 | | - *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; |
---|
767 | | - } |
---|
768 | | - skb_shinfo(tmp)->gso_size = 0; |
---|
769 | | - } |
---|
770 | | - |
---|
771 | | - tmp->prev = NULL; |
---|
772 | | - tmp->next = NULL; |
---|
773 | | - |
---|
774 | | - __skb_queue_tail(mpdus_skb, tmp); |
---|
775 | | - i++; |
---|
776 | | - } |
---|
777 | | - |
---|
778 | | - return 0; |
---|
779 | | -} |
---|
780 | | - |
---|
781 | | -static unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, |
---|
782 | | - struct ieee80211_sta *sta, |
---|
783 | | - unsigned int tid) |
---|
| 789 | +unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, |
---|
| 790 | + struct ieee80211_sta *sta, unsigned int tid) |
---|
784 | 791 | { |
---|
785 | 792 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
---|
786 | 793 | enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band; |
---|
787 | 794 | u8 ac = tid_to_mac80211_ac[tid]; |
---|
788 | 795 | unsigned int txf; |
---|
789 | | - int lmac = IWL_LMAC_24G_INDEX; |
---|
790 | | - |
---|
791 | | - if (iwl_mvm_is_cdb_supported(mvm) && |
---|
792 | | - band == NL80211_BAND_5GHZ) |
---|
793 | | - lmac = IWL_LMAC_5G_INDEX; |
---|
| 796 | + int lmac = iwl_mvm_get_lmac_id(mvm->fw, band); |
---|
794 | 797 | |
---|
795 | 798 | /* For HE redirect to trigger based fifos */ |
---|
796 | 799 | if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm))) |
---|
.. | .. |
---|
808 | 811 | mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256); |
---|
809 | 812 | } |
---|
810 | 813 | |
---|
| 814 | +#ifdef CONFIG_INET |
---|
| 815 | + |
---|
| 816 | +static int |
---|
| 817 | +iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes, |
---|
| 818 | + netdev_features_t netdev_flags, |
---|
| 819 | + struct sk_buff_head *mpdus_skb) |
---|
| 820 | +{ |
---|
| 821 | + struct sk_buff *tmp, *next; |
---|
| 822 | + struct ieee80211_hdr *hdr = (void *)skb->data; |
---|
| 823 | + char cb[sizeof(skb->cb)]; |
---|
| 824 | + u16 i = 0; |
---|
| 825 | + unsigned int tcp_payload_len; |
---|
| 826 | + unsigned int mss = skb_shinfo(skb)->gso_size; |
---|
| 827 | + bool ipv4 = (skb->protocol == htons(ETH_P_IP)); |
---|
| 828 | + bool qos = ieee80211_is_data_qos(hdr->frame_control); |
---|
| 829 | + u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; |
---|
| 830 | + |
---|
| 831 | + skb_shinfo(skb)->gso_size = num_subframes * mss; |
---|
| 832 | + memcpy(cb, skb->cb, sizeof(cb)); |
---|
| 833 | + |
---|
| 834 | + next = skb_gso_segment(skb, netdev_flags); |
---|
| 835 | + skb_shinfo(skb)->gso_size = mss; |
---|
| 836 | + skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; |
---|
| 837 | + if (WARN_ON_ONCE(IS_ERR(next))) |
---|
| 838 | + return -EINVAL; |
---|
| 839 | + else if (next) |
---|
| 840 | + consume_skb(skb); |
---|
| 841 | + |
---|
| 842 | + skb_list_walk_safe(next, tmp, next) { |
---|
| 843 | + memcpy(tmp->cb, cb, sizeof(tmp->cb)); |
---|
| 844 | + /* |
---|
| 845 | + * Compute the length of all the data added for the A-MSDU. |
---|
| 846 | + * This will be used to compute the length to write in the TX |
---|
| 847 | + * command. We have: SNAP + IP + TCP for n -1 subframes and |
---|
| 848 | + * ETH header for n subframes. |
---|
| 849 | + */ |
---|
| 850 | + tcp_payload_len = skb_tail_pointer(tmp) - |
---|
| 851 | + skb_transport_header(tmp) - |
---|
| 852 | + tcp_hdrlen(tmp) + tmp->data_len; |
---|
| 853 | + |
---|
| 854 | + if (ipv4) |
---|
| 855 | + ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes); |
---|
| 856 | + |
---|
| 857 | + if (tcp_payload_len > mss) { |
---|
| 858 | + skb_shinfo(tmp)->gso_size = mss; |
---|
| 859 | + skb_shinfo(tmp)->gso_type = ipv4 ? SKB_GSO_TCPV4 : |
---|
| 860 | + SKB_GSO_TCPV6; |
---|
| 861 | + } else { |
---|
| 862 | + if (qos) { |
---|
| 863 | + u8 *qc; |
---|
| 864 | + |
---|
| 865 | + if (ipv4) |
---|
| 866 | + ip_send_check(ip_hdr(tmp)); |
---|
| 867 | + |
---|
| 868 | + qc = ieee80211_get_qos_ctl((void *)tmp->data); |
---|
| 869 | + *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; |
---|
| 870 | + } |
---|
| 871 | + skb_shinfo(tmp)->gso_size = 0; |
---|
| 872 | + } |
---|
| 873 | + |
---|
| 874 | + skb_mark_not_on_list(tmp); |
---|
| 875 | + __skb_queue_tail(mpdus_skb, tmp); |
---|
| 876 | + i++; |
---|
| 877 | + } |
---|
| 878 | + |
---|
| 879 | + return 0; |
---|
| 880 | +} |
---|
| 881 | + |
---|
811 | 882 | static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, |
---|
812 | 883 | struct ieee80211_tx_info *info, |
---|
813 | 884 | struct ieee80211_sta *sta, |
---|
.. | .. |
---|
818 | 889 | unsigned int mss = skb_shinfo(skb)->gso_size; |
---|
819 | 890 | unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len; |
---|
820 | 891 | u16 snap_ip_tcp, pad; |
---|
821 | | - unsigned int dbg_max_amsdu_len; |
---|
822 | 892 | netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG; |
---|
823 | 893 | u8 tid; |
---|
824 | 894 | |
---|
825 | 895 | snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + |
---|
826 | 896 | tcp_hdrlen(skb); |
---|
827 | 897 | |
---|
828 | | - dbg_max_amsdu_len = READ_ONCE(mvm->max_amsdu_len); |
---|
829 | | - |
---|
830 | 898 | if (!mvmsta->max_amsdu_len || |
---|
831 | 899 | !ieee80211_is_data_qos(hdr->frame_control) || |
---|
832 | | - (!mvmsta->amsdu_enabled && !dbg_max_amsdu_len)) |
---|
| 900 | + !mvmsta->amsdu_enabled) |
---|
833 | 901 | return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); |
---|
834 | 902 | |
---|
835 | 903 | /* |
---|
.. | .. |
---|
851 | 919 | * No need to lock amsdu_in_ampdu_allowed since it can't be modified |
---|
852 | 920 | * during an BA session. |
---|
853 | 921 | */ |
---|
854 | | - if (info->flags & IEEE80211_TX_CTL_AMPDU && |
---|
855 | | - !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) |
---|
856 | | - return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); |
---|
857 | | - |
---|
858 | | - if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(mvmsta->vif)) || |
---|
| 922 | + if ((info->flags & IEEE80211_TX_CTL_AMPDU && |
---|
| 923 | + !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) || |
---|
859 | 924 | !(mvmsta->amsdu_enabled & BIT(tid))) |
---|
860 | 925 | return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); |
---|
861 | 926 | |
---|
862 | | - max_amsdu_len = iwl_mvm_max_amsdu_size(mvm, sta, tid); |
---|
863 | | - |
---|
864 | | - if (unlikely(dbg_max_amsdu_len)) |
---|
865 | | - max_amsdu_len = min_t(unsigned int, max_amsdu_len, |
---|
866 | | - dbg_max_amsdu_len); |
---|
| 927 | + /* |
---|
| 928 | + * Take the min of ieee80211 station and mvm station |
---|
| 929 | + */ |
---|
| 930 | + max_amsdu_len = |
---|
| 931 | + min_t(unsigned int, sta->max_amsdu_len, |
---|
| 932 | + iwl_mvm_max_amsdu_size(mvm, sta, tid)); |
---|
867 | 933 | |
---|
868 | 934 | /* |
---|
869 | 935 | * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not |
---|
.. | .. |
---|
930 | 996 | } |
---|
931 | 997 | #endif |
---|
932 | 998 | |
---|
933 | | -static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm, |
---|
934 | | - struct iwl_mvm_sta *mvm_sta, u8 tid, |
---|
935 | | - struct sk_buff *skb) |
---|
936 | | -{ |
---|
937 | | - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
---|
938 | | - u8 mac_queue = info->hw_queue; |
---|
939 | | - struct sk_buff_head *deferred_tx_frames; |
---|
940 | | - |
---|
941 | | - lockdep_assert_held(&mvm_sta->lock); |
---|
942 | | - |
---|
943 | | - mvm_sta->deferred_traffic_tid_map |= BIT(tid); |
---|
944 | | - set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames); |
---|
945 | | - |
---|
946 | | - deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames; |
---|
947 | | - |
---|
948 | | - skb_queue_tail(deferred_tx_frames, skb); |
---|
949 | | - |
---|
950 | | - /* |
---|
951 | | - * The first deferred frame should've stopped the MAC queues, so we |
---|
952 | | - * should never get a second deferred frame for the RA/TID. |
---|
953 | | - * In case of GSO the first packet may have been split, so don't warn. |
---|
954 | | - */ |
---|
955 | | - if (skb_queue_len(deferred_tx_frames) == 1) { |
---|
956 | | - iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue)); |
---|
957 | | - schedule_work(&mvm->add_stream_wk); |
---|
958 | | - } |
---|
959 | | -} |
---|
960 | | - |
---|
961 | 999 | /* Check if there are any timed-out TIDs on a given shared TXQ */ |
---|
962 | 1000 | static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id) |
---|
963 | 1001 | { |
---|
.. | .. |
---|
982 | 1020 | int airtime) |
---|
983 | 1021 | { |
---|
984 | 1022 | int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; |
---|
985 | | - struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac]; |
---|
| 1023 | + struct iwl_mvm_tcm_mac *mdata; |
---|
| 1024 | + |
---|
| 1025 | + if (mac >= NUM_MAC_INDEX_DRIVER) |
---|
| 1026 | + return; |
---|
| 1027 | + |
---|
| 1028 | + mdata = &mvm->tcm.data[mac]; |
---|
986 | 1029 | |
---|
987 | 1030 | if (mvm->tcm.paused) |
---|
988 | 1031 | return; |
---|
.. | .. |
---|
993 | 1036 | mdata->tx.airtime += airtime; |
---|
994 | 1037 | } |
---|
995 | 1038 | |
---|
996 | | -static void iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm, |
---|
997 | | - struct iwl_mvm_sta *mvmsta, int tid) |
---|
| 1039 | +static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm, |
---|
| 1040 | + struct iwl_mvm_sta *mvmsta, int tid) |
---|
998 | 1041 | { |
---|
999 | 1042 | u32 ac = tid_to_mac80211_ac[tid]; |
---|
1000 | 1043 | int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; |
---|
1001 | | - struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac]; |
---|
| 1044 | + struct iwl_mvm_tcm_mac *mdata; |
---|
| 1045 | + |
---|
| 1046 | + if (mac >= NUM_MAC_INDEX_DRIVER) |
---|
| 1047 | + return -EINVAL; |
---|
| 1048 | + |
---|
| 1049 | + mdata = &mvm->tcm.data[mac]; |
---|
1002 | 1050 | |
---|
1003 | 1051 | mdata->tx.pkts[ac]++; |
---|
| 1052 | + |
---|
| 1053 | + return 0; |
---|
1004 | 1054 | } |
---|
1005 | 1055 | |
---|
1006 | 1056 | /* |
---|
1007 | | - * Sets the fields in the Tx cmd that are crypto related |
---|
| 1057 | + * Sets the fields in the Tx cmd that are crypto related. |
---|
| 1058 | + * |
---|
| 1059 | + * This function must be called with BHs disabled. |
---|
1008 | 1060 | */ |
---|
1009 | 1061 | static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, |
---|
1010 | 1062 | struct ieee80211_tx_info *info, |
---|
.. | .. |
---|
1012 | 1064 | { |
---|
1013 | 1065 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
---|
1014 | 1066 | struct iwl_mvm_sta *mvmsta; |
---|
1015 | | - struct iwl_device_cmd *dev_cmd; |
---|
| 1067 | + struct iwl_device_tx_cmd *dev_cmd; |
---|
1016 | 1068 | __le16 fc; |
---|
1017 | 1069 | u16 seq_number = 0; |
---|
1018 | 1070 | u8 tid = IWL_MAX_TID_COUNT; |
---|
1019 | | - u16 txq_id = info->hw_queue; |
---|
| 1071 | + u16 txq_id; |
---|
1020 | 1072 | bool is_ampdu = false; |
---|
1021 | 1073 | int hdrlen; |
---|
1022 | 1074 | |
---|
.. | .. |
---|
1024 | 1076 | fc = hdr->frame_control; |
---|
1025 | 1077 | hdrlen = ieee80211_hdrlen(fc); |
---|
1026 | 1078 | |
---|
| 1079 | + if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc)) |
---|
| 1080 | + return -1; |
---|
| 1081 | + |
---|
1027 | 1082 | if (WARN_ON_ONCE(!mvmsta)) |
---|
1028 | 1083 | return -1; |
---|
1029 | 1084 | |
---|
1030 | 1085 | if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA)) |
---|
1031 | 1086 | return -1; |
---|
| 1087 | + |
---|
| 1088 | + if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->he_cap.has_he) |
---|
| 1089 | + return -1; |
---|
| 1090 | + |
---|
| 1091 | + if (unlikely(ieee80211_is_probe_resp(fc))) |
---|
| 1092 | + iwl_mvm_probe_resp_set_noa(mvm, skb); |
---|
1032 | 1093 | |
---|
1033 | 1094 | dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen, |
---|
1034 | 1095 | sta, mvmsta->sta_id); |
---|
.. | .. |
---|
1050 | 1111 | */ |
---|
1051 | 1112 | if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { |
---|
1052 | 1113 | tid = ieee80211_get_tid(hdr); |
---|
1053 | | - if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) |
---|
| 1114 | + if (WARN_ONCE(tid >= IWL_MAX_TID_COUNT, "Invalid TID %d", tid)) |
---|
1054 | 1115 | goto drop_unlock_sta; |
---|
1055 | 1116 | |
---|
1056 | 1117 | is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; |
---|
1057 | | - if (WARN_ON_ONCE(is_ampdu && |
---|
1058 | | - mvmsta->tid_data[tid].state != IWL_AGG_ON)) |
---|
| 1118 | + if (WARN_ONCE(is_ampdu && |
---|
| 1119 | + mvmsta->tid_data[tid].state != IWL_AGG_ON, |
---|
| 1120 | + "Invalid internal agg state %d for TID %d", |
---|
| 1121 | + mvmsta->tid_data[tid].state, tid)) |
---|
1059 | 1122 | goto drop_unlock_sta; |
---|
1060 | 1123 | |
---|
1061 | 1124 | seq_number = mvmsta->tid_data[tid].seq_number; |
---|
.. | .. |
---|
1069 | 1132 | /* update the tx_cmd hdr as it was already copied */ |
---|
1070 | 1133 | tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl; |
---|
1071 | 1134 | } |
---|
| 1135 | + } else if (ieee80211_is_data(fc) && !ieee80211_is_data_qos(fc)) { |
---|
| 1136 | + tid = IWL_TID_NON_QOS; |
---|
1072 | 1137 | } |
---|
1073 | 1138 | |
---|
1074 | 1139 | txq_id = mvmsta->tid_data[tid].txq_id; |
---|
1075 | 1140 | |
---|
1076 | 1141 | WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); |
---|
1077 | 1142 | |
---|
1078 | | - /* Check if TXQ needs to be allocated or re-activated */ |
---|
1079 | | - if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE || |
---|
1080 | | - !mvmsta->tid_data[tid].is_tid_active)) { |
---|
1081 | | - /* If TXQ needs to be allocated... */ |
---|
1082 | | - if (txq_id == IWL_MVM_INVALID_QUEUE) { |
---|
1083 | | - iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb); |
---|
1084 | | - |
---|
1085 | | - /* |
---|
1086 | | - * The frame is now deferred, and the worker scheduled |
---|
1087 | | - * will re-allocate it, so we can free it for now. |
---|
1088 | | - */ |
---|
1089 | | - iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); |
---|
1090 | | - spin_unlock(&mvmsta->lock); |
---|
1091 | | - return 0; |
---|
1092 | | - } |
---|
1093 | | - |
---|
1094 | | - /* queue should always be active in new TX path */ |
---|
1095 | | - WARN_ON(iwl_mvm_has_new_tx_api(mvm)); |
---|
1096 | | - |
---|
1097 | | - /* If we are here - TXQ exists and needs to be re-activated */ |
---|
1098 | | - spin_lock(&mvm->queue_info_lock); |
---|
1099 | | - mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; |
---|
1100 | | - mvmsta->tid_data[tid].is_tid_active = true; |
---|
1101 | | - spin_unlock(&mvm->queue_info_lock); |
---|
1102 | | - |
---|
1103 | | - IWL_DEBUG_TX_QUEUES(mvm, "Re-activating queue %d for TX\n", |
---|
1104 | | - txq_id); |
---|
| 1143 | + if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) { |
---|
| 1144 | + iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); |
---|
| 1145 | + spin_unlock(&mvmsta->lock); |
---|
| 1146 | + return -1; |
---|
1105 | 1147 | } |
---|
1106 | 1148 | |
---|
1107 | 1149 | if (!iwl_mvm_has_new_tx_api(mvm)) { |
---|
.. | .. |
---|
1112 | 1154 | * If we have timed-out TIDs - schedule the worker that will |
---|
1113 | 1155 | * reconfig the queues and update them |
---|
1114 | 1156 | * |
---|
1115 | | - * Note that the mvm->queue_info_lock isn't being taken here in |
---|
1116 | | - * order to not serialize the TX flow. This isn't dangerous |
---|
1117 | | - * because scheduling mvm->add_stream_wk can't ruin the state, |
---|
1118 | | - * and if we DON'T schedule it due to some race condition then |
---|
1119 | | - * next TX we get here we will. |
---|
| 1157 | + * Note that the no lock is taken here in order to not serialize |
---|
| 1158 | + * the TX flow. This isn't dangerous because scheduling |
---|
| 1159 | + * mvm->add_stream_wk can't ruin the state, and if we DON'T |
---|
| 1160 | + * schedule it due to some race condition then next TX we get |
---|
| 1161 | + * here we will. |
---|
1120 | 1162 | */ |
---|
1121 | 1163 | if (unlikely(mvm->queue_info[txq_id].status == |
---|
1122 | 1164 | IWL_MVM_QUEUE_SHARED && |
---|
.. | .. |
---|
1124 | 1166 | schedule_work(&mvm->add_stream_wk); |
---|
1125 | 1167 | } |
---|
1126 | 1168 | |
---|
1127 | | - IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, |
---|
1128 | | - tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number)); |
---|
| 1169 | + IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x len %d\n", |
---|
| 1170 | + mvmsta->sta_id, tid, txq_id, |
---|
| 1171 | + IEEE80211_SEQ_TO_SN(seq_number), skb->len); |
---|
1129 | 1172 | |
---|
1130 | 1173 | /* From now on, we cannot access info->control */ |
---|
1131 | 1174 | iwl_mvm_skb_prepare_status(skb, dev_cmd); |
---|
.. | .. |
---|
1138 | 1181 | |
---|
1139 | 1182 | spin_unlock(&mvmsta->lock); |
---|
1140 | 1183 | |
---|
1141 | | - iwl_mvm_tx_pkt_queued(mvm, mvmsta, tid == IWL_MAX_TID_COUNT ? 0 : tid); |
---|
| 1184 | + if (iwl_mvm_tx_pkt_queued(mvm, mvmsta, |
---|
| 1185 | + tid == IWL_MAX_TID_COUNT ? 0 : tid)) |
---|
| 1186 | + goto drop; |
---|
1142 | 1187 | |
---|
1143 | 1188 | return 0; |
---|
1144 | 1189 | |
---|
.. | .. |
---|
1146 | 1191 | iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); |
---|
1147 | 1192 | spin_unlock(&mvmsta->lock); |
---|
1148 | 1193 | drop: |
---|
| 1194 | + IWL_DEBUG_TX(mvm, "TX to [%d|%d] dropped\n", mvmsta->sta_id, tid); |
---|
1149 | 1195 | return -1; |
---|
1150 | 1196 | } |
---|
1151 | 1197 | |
---|
1152 | | -int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, |
---|
1153 | | - struct ieee80211_sta *sta) |
---|
| 1198 | +int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, |
---|
| 1199 | + struct ieee80211_sta *sta) |
---|
1154 | 1200 | { |
---|
1155 | 1201 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
---|
1156 | 1202 | struct ieee80211_tx_info info; |
---|
.. | .. |
---|
1223 | 1269 | * to align the wrap around of ssn so we compare relevant values. |
---|
1224 | 1270 | */ |
---|
1225 | 1271 | normalized_ssn = tid_data->ssn; |
---|
1226 | | - if (mvm->trans->cfg->gen2) |
---|
| 1272 | + if (mvm->trans->trans_cfg->gen2) |
---|
1227 | 1273 | normalized_ssn &= 0xff; |
---|
1228 | 1274 | |
---|
1229 | 1275 | if (normalized_ssn != tid_data->next_reclaimed) |
---|
.. | .. |
---|
1327 | 1373 | } |
---|
1328 | 1374 | } |
---|
1329 | 1375 | |
---|
1330 | | -/** |
---|
| 1376 | +/* |
---|
1331 | 1377 | * translate ucode response to mac80211 tx status control values |
---|
1332 | 1378 | */ |
---|
1333 | 1379 | static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags, |
---|
.. | .. |
---|
1347 | 1393 | struct iwl_fw_dbg_trigger_tx_status *status_trig; |
---|
1348 | 1394 | int i; |
---|
1349 | 1395 | |
---|
1350 | | - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_STATUS)) |
---|
| 1396 | + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, |
---|
| 1397 | + FW_DBG_TRIGGER_TX_STATUS); |
---|
| 1398 | + if (!trig) |
---|
1351 | 1399 | return; |
---|
1352 | 1400 | |
---|
1353 | | - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS); |
---|
1354 | 1401 | status_trig = (void *)trig->data; |
---|
1355 | | - |
---|
1356 | | - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) |
---|
1357 | | - return; |
---|
1358 | 1402 | |
---|
1359 | 1403 | for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) { |
---|
1360 | 1404 | /* don't collect on status 0 */ |
---|
.. | .. |
---|
1371 | 1415 | } |
---|
1372 | 1416 | } |
---|
1373 | 1417 | |
---|
1374 | | -/** |
---|
| 1418 | +/* |
---|
1375 | 1419 | * iwl_mvm_get_scd_ssn - returns the SSN of the SCD |
---|
1376 | 1420 | * @tx_resp: the Tx response from the fw (agg or non-agg) |
---|
1377 | 1421 | * |
---|
.. | .. |
---|
1405 | 1449 | iwl_mvm_get_agg_status(mvm, tx_resp); |
---|
1406 | 1450 | u32 status = le16_to_cpu(agg_status->status); |
---|
1407 | 1451 | u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp); |
---|
1408 | | - struct iwl_mvm_sta *mvmsta; |
---|
1409 | 1452 | struct sk_buff_head skbs; |
---|
1410 | 1453 | u8 skb_freed = 0; |
---|
1411 | 1454 | u8 lq_color; |
---|
.. | .. |
---|
1454 | 1497 | default: |
---|
1455 | 1498 | break; |
---|
1456 | 1499 | } |
---|
| 1500 | + |
---|
| 1501 | + if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS && |
---|
| 1502 | + ieee80211_is_mgmt(hdr->frame_control)) |
---|
| 1503 | + iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); |
---|
1457 | 1504 | |
---|
1458 | 1505 | /* |
---|
1459 | 1506 | * If we are freeing multiple frames, mark all the frames |
---|
.. | .. |
---|
1549 | 1596 | goto out; |
---|
1550 | 1597 | |
---|
1551 | 1598 | if (!IS_ERR(sta)) { |
---|
1552 | | - mvmsta = iwl_mvm_sta_from_mac80211(sta); |
---|
| 1599 | + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
---|
1553 | 1600 | |
---|
1554 | 1601 | iwl_mvm_tx_airtime(mvm, mvmsta, |
---|
1555 | 1602 | le16_to_cpu(tx_resp->wireless_media_time)); |
---|
1556 | 1603 | |
---|
1557 | | - if (tid != IWL_TID_NON_QOS && tid != IWL_MGMT_TID) { |
---|
| 1604 | + if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS && |
---|
| 1605 | + mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) |
---|
| 1606 | + iwl_mvm_toggle_tx_ant(mvm, &mvmsta->tx_ant); |
---|
| 1607 | + |
---|
| 1608 | + if (sta->wme && tid != IWL_MGMT_TID) { |
---|
1558 | 1609 | struct iwl_mvm_tid_data *tid_data = |
---|
1559 | 1610 | &mvmsta->tid_data[tid]; |
---|
1560 | 1611 | bool send_eosp_ndp = false; |
---|
.. | .. |
---|
1608 | 1659 | mvmsta->next_status_eosp = false; |
---|
1609 | 1660 | ieee80211_sta_eosp(sta); |
---|
1610 | 1661 | } |
---|
1611 | | - } else { |
---|
1612 | | - mvmsta = NULL; |
---|
1613 | 1662 | } |
---|
1614 | | - |
---|
1615 | 1663 | out: |
---|
1616 | 1664 | rcu_read_unlock(); |
---|
1617 | 1665 | } |
---|
.. | .. |
---|
1674 | 1722 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); |
---|
1675 | 1723 | struct iwl_mvm_sta *mvmsta; |
---|
1676 | 1724 | int queue = SEQ_TO_QUEUE(sequence); |
---|
| 1725 | + struct ieee80211_sta *sta; |
---|
1677 | 1726 | |
---|
1678 | 1727 | if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE && |
---|
1679 | 1728 | (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))) |
---|
1680 | | - return; |
---|
1681 | | - |
---|
1682 | | - if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS)) |
---|
1683 | 1729 | return; |
---|
1684 | 1730 | |
---|
1685 | 1731 | iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt); |
---|
.. | .. |
---|
1687 | 1733 | rcu_read_lock(); |
---|
1688 | 1734 | |
---|
1689 | 1735 | mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); |
---|
| 1736 | + |
---|
| 1737 | + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); |
---|
| 1738 | + if (WARN_ON_ONCE(!sta || !sta->wme)) { |
---|
| 1739 | + rcu_read_unlock(); |
---|
| 1740 | + return; |
---|
| 1741 | + } |
---|
1690 | 1742 | |
---|
1691 | 1743 | if (!WARN_ON_ONCE(!mvmsta)) { |
---|
1692 | 1744 | mvmsta->tid_data[tid].rate_n_flags = |
---|
.. | .. |
---|
1718 | 1770 | struct ieee80211_tx_info *ba_info, u32 rate) |
---|
1719 | 1771 | { |
---|
1720 | 1772 | struct sk_buff_head reclaimed_skbs; |
---|
1721 | | - struct iwl_mvm_tid_data *tid_data; |
---|
| 1773 | + struct iwl_mvm_tid_data *tid_data = NULL; |
---|
1722 | 1774 | struct ieee80211_sta *sta; |
---|
1723 | | - struct iwl_mvm_sta *mvmsta; |
---|
| 1775 | + struct iwl_mvm_sta *mvmsta = NULL; |
---|
1724 | 1776 | struct sk_buff *skb; |
---|
1725 | 1777 | int freed; |
---|
1726 | 1778 | |
---|
1727 | | - if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT || |
---|
| 1779 | + if (WARN_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations || |
---|
1728 | 1780 | tid > IWL_MAX_TID_COUNT, |
---|
1729 | 1781 | "sta_id %d tid %d", sta_id, tid)) |
---|
1730 | 1782 | return; |
---|
.. | .. |
---|
1734 | 1786 | sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); |
---|
1735 | 1787 | |
---|
1736 | 1788 | /* Reclaiming frames for a station that has been deleted ? */ |
---|
1737 | | - if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { |
---|
| 1789 | + if (WARN_ON_ONCE(!sta)) { |
---|
1738 | 1790 | rcu_read_unlock(); |
---|
1739 | 1791 | return; |
---|
1740 | 1792 | } |
---|
| 1793 | + |
---|
| 1794 | + __skb_queue_head_init(&reclaimed_skbs); |
---|
| 1795 | + |
---|
| 1796 | + /* |
---|
| 1797 | + * Release all TFDs before the SSN, i.e. all TFDs in front of |
---|
| 1798 | + * block-ack window (we assume that they've been successfully |
---|
| 1799 | + * transmitted ... if not, it's too late anyway). |
---|
| 1800 | + */ |
---|
| 1801 | + iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs); |
---|
| 1802 | + |
---|
| 1803 | + skb_queue_walk(&reclaimed_skbs, skb) { |
---|
| 1804 | + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
---|
| 1805 | + |
---|
| 1806 | + iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); |
---|
| 1807 | + |
---|
| 1808 | + memset(&info->status, 0, sizeof(info->status)); |
---|
| 1809 | + /* Packet was transmitted successfully, failures come as single |
---|
| 1810 | + * frames because before failing a frame the firmware transmits |
---|
| 1811 | + * it without aggregation at least once. |
---|
| 1812 | + */ |
---|
| 1813 | + info->flags |= IEEE80211_TX_STAT_ACK; |
---|
| 1814 | + } |
---|
| 1815 | + |
---|
| 1816 | + /* |
---|
| 1817 | + * It's possible to get a BA response after invalidating the rcu (rcu is |
---|
| 1818 | + * invalidated in order to prevent new Tx from being sent, but there may |
---|
| 1819 | + * be some frames already in-flight). |
---|
| 1820 | + * In this case we just want to reclaim, and could skip all the |
---|
| 1821 | + * sta-dependent stuff since it's in the middle of being removed |
---|
| 1822 | + * anyways. |
---|
| 1823 | + */ |
---|
| 1824 | + if (IS_ERR(sta)) |
---|
| 1825 | + goto out; |
---|
1741 | 1826 | |
---|
1742 | 1827 | mvmsta = iwl_mvm_sta_from_mac80211(sta); |
---|
1743 | 1828 | tid_data = &mvmsta->tid_data[tid]; |
---|
.. | .. |
---|
1751 | 1836 | } |
---|
1752 | 1837 | |
---|
1753 | 1838 | spin_lock_bh(&mvmsta->lock); |
---|
1754 | | - |
---|
1755 | | - __skb_queue_head_init(&reclaimed_skbs); |
---|
1756 | | - |
---|
1757 | | - /* |
---|
1758 | | - * Release all TFDs before the SSN, i.e. all TFDs in front of |
---|
1759 | | - * block-ack window (we assume that they've been successfully |
---|
1760 | | - * transmitted ... if not, it's too late anyway). |
---|
1761 | | - */ |
---|
1762 | | - iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs); |
---|
1763 | 1839 | |
---|
1764 | 1840 | tid_data->next_reclaimed = index; |
---|
1765 | 1841 | |
---|
.. | .. |
---|
1781 | 1857 | freed++; |
---|
1782 | 1858 | else |
---|
1783 | 1859 | WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT); |
---|
1784 | | - |
---|
1785 | | - iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); |
---|
1786 | | - |
---|
1787 | | - memset(&info->status, 0, sizeof(info->status)); |
---|
1788 | | - /* Packet was transmitted successfully, failures come as single |
---|
1789 | | - * frames because before failing a frame the firmware transmits |
---|
1790 | | - * it without aggregation at least once. |
---|
1791 | | - */ |
---|
1792 | | - info->flags |= IEEE80211_TX_STAT_ACK; |
---|
1793 | 1860 | |
---|
1794 | 1861 | /* this is the first skb we deliver in this batch */ |
---|
1795 | 1862 | /* put the rate scaling data there */ |
---|
.. | .. |
---|
1867 | 1934 | rcu_read_lock(); |
---|
1868 | 1935 | |
---|
1869 | 1936 | mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); |
---|
1870 | | - if (!mvmsta) |
---|
1871 | | - goto out_unlock; |
---|
| 1937 | + /* |
---|
| 1938 | + * It's possible to get a BA response after invalidating the rcu |
---|
| 1939 | + * (rcu is invalidated in order to prevent new Tx from being |
---|
| 1940 | + * sent, but there may be some frames already in-flight). |
---|
| 1941 | + * In this case we just want to reclaim, and could skip all the |
---|
| 1942 | + * sta-dependent stuff since it's in the middle of being removed |
---|
| 1943 | + * anyways. |
---|
| 1944 | + */ |
---|
1872 | 1945 | |
---|
1873 | 1946 | /* Free per TID */ |
---|
1874 | 1947 | for (i = 0; i < le16_to_cpu(ba_res->tfd_cnt); i++) { |
---|
.. | .. |
---|
1879 | 1952 | if (tid == IWL_MGMT_TID) |
---|
1880 | 1953 | tid = IWL_MAX_TID_COUNT; |
---|
1881 | 1954 | |
---|
1882 | | - mvmsta->tid_data[i].lq_color = lq_color; |
---|
| 1955 | + if (mvmsta) |
---|
| 1956 | + mvmsta->tid_data[i].lq_color = lq_color; |
---|
| 1957 | + |
---|
1883 | 1958 | iwl_mvm_tx_reclaim(mvm, sta_id, tid, |
---|
1884 | 1959 | (int)(le16_to_cpu(ba_tfd->q_num)), |
---|
1885 | 1960 | le16_to_cpu(ba_tfd->tfd_index), |
---|
.. | .. |
---|
1887 | 1962 | le32_to_cpu(ba_res->tx_rate)); |
---|
1888 | 1963 | } |
---|
1889 | 1964 | |
---|
1890 | | - iwl_mvm_tx_airtime(mvm, mvmsta, |
---|
1891 | | - le32_to_cpu(ba_res->wireless_time)); |
---|
1892 | | -out_unlock: |
---|
| 1965 | + if (mvmsta) |
---|
| 1966 | + iwl_mvm_tx_airtime(mvm, mvmsta, |
---|
| 1967 | + le32_to_cpu(ba_res->wireless_time)); |
---|
1893 | 1968 | rcu_read_unlock(); |
---|
1894 | 1969 | out: |
---|
1895 | 1970 | IWL_DEBUG_TX_REPLY(mvm, |
---|
.. | .. |
---|
1986 | 2061 | return ret; |
---|
1987 | 2062 | } |
---|
1988 | 2063 | |
---|
1989 | | -int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags) |
---|
| 2064 | +int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal) |
---|
1990 | 2065 | { |
---|
1991 | 2066 | struct iwl_mvm_int_sta *int_sta = sta; |
---|
1992 | 2067 | struct iwl_mvm_sta *mvm_sta = sta; |
---|
.. | .. |
---|
1995 | 2070 | offsetof(struct iwl_mvm_sta, sta_id)); |
---|
1996 | 2071 | |
---|
1997 | 2072 | if (iwl_mvm_has_new_tx_api(mvm)) |
---|
1998 | | - return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id, |
---|
1999 | | - 0xff | BIT(IWL_MGMT_TID), flags); |
---|
| 2073 | + return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id, 0xffff, 0); |
---|
2000 | 2074 | |
---|
2001 | 2075 | if (internal) |
---|
2002 | | - return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk, |
---|
2003 | | - flags); |
---|
| 2076 | + return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk, 0); |
---|
2004 | 2077 | |
---|
2005 | | - return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, flags); |
---|
| 2078 | + return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0); |
---|
2006 | 2079 | } |
---|