.. | .. |
---|
5 | 5 | * |
---|
6 | 6 | * GPL LICENSE SUMMARY |
---|
7 | 7 | * |
---|
8 | | - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
---|
| 8 | + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. |
---|
9 | 9 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH |
---|
10 | 10 | * Copyright (C) 2015 - 2017 Intel Deutschland GmbH |
---|
11 | | - * Copyright(c) 2018 Intel Corporation |
---|
12 | 11 | * |
---|
13 | 12 | * This program is free software; you can redistribute it and/or modify |
---|
14 | 13 | * it under the terms of version 2 of the GNU General Public License as |
---|
.. | .. |
---|
19 | 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
20 | 19 | * General Public License for more details. |
---|
21 | 20 | * |
---|
22 | | - * You should have received a copy of the GNU General Public License |
---|
23 | | - * along with this program; if not, write to the Free Software |
---|
24 | | - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, |
---|
25 | | - * USA |
---|
26 | | - * |
---|
27 | 21 | * The full GNU General Public License is included in this distribution |
---|
28 | 22 | * in the file called COPYING. |
---|
29 | 23 | * |
---|
.. | .. |
---|
33 | 27 | * |
---|
34 | 28 | * BSD LICENSE |
---|
35 | 29 | * |
---|
36 | | - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
---|
| 30 | + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. |
---|
37 | 31 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH |
---|
38 | 32 | * Copyright (C) 2015 - 2017 Intel Deutschland GmbH |
---|
39 | | - * Copyright(c) 2018 Intel Corporation |
---|
40 | 33 | * All rights reserved. |
---|
41 | 34 | * |
---|
42 | 35 | * Redistribution and use in source and binary forms, with or without |
---|
.. | .. |
---|
74 | 67 | #include "iwl-csr.h" |
---|
75 | 68 | #include "mvm.h" |
---|
76 | 69 | #include "fw/api/rs.h" |
---|
| 70 | +#include "fw/img.h" |
---|
77 | 71 | |
---|
78 | 72 | /* |
---|
79 | 73 | * Will return 0 even if the cmd failed when RFKILL is asserted unless |
---|
.. | .. |
---|
93 | 87 | * the mutex, this ensures we don't try to send two |
---|
94 | 88 | * (or more) synchronous commands at a time. |
---|
95 | 89 | */ |
---|
96 | | - if (!(cmd->flags & CMD_ASYNC)) { |
---|
| 90 | + if (!(cmd->flags & CMD_ASYNC)) |
---|
97 | 91 | lockdep_assert_held(&mvm->mutex); |
---|
98 | | - if (!(cmd->flags & CMD_SEND_IN_IDLE)) |
---|
99 | | - iwl_mvm_ref(mvm, IWL_MVM_REF_SENDING_CMD); |
---|
100 | | - } |
---|
101 | 92 | |
---|
102 | 93 | ret = iwl_trans_send_cmd(mvm->trans, cmd); |
---|
103 | | - |
---|
104 | | - if (!(cmd->flags & (CMD_ASYNC | CMD_SEND_IN_IDLE))) |
---|
105 | | - iwl_mvm_unref(mvm, IWL_MVM_REF_SENDING_CMD); |
---|
106 | 94 | |
---|
107 | 95 | /* |
---|
108 | 96 | * If the caller wants the SKB, then don't hide any problems, the |
---|
.. | .. |
---|
228 | 216 | int band_offset = 0; |
---|
229 | 217 | |
---|
230 | 218 | /* Legacy rate format, search for match in table */ |
---|
231 | | - if (band == NL80211_BAND_5GHZ) |
---|
| 219 | + if (band != NL80211_BAND_2GHZ) |
---|
232 | 220 | band_offset = IWL_FIRST_OFDM_RATE; |
---|
233 | 221 | for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) |
---|
234 | 222 | if (fw_rate_idx_to_plcp[idx] == rate) |
---|
.. | .. |
---|
243 | 231 | return fw_rate_idx_to_plcp[rate_idx]; |
---|
244 | 232 | } |
---|
245 | 233 | |
---|
| 234 | +u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac) |
---|
| 235 | +{ |
---|
| 236 | + static const u8 mac80211_ac_to_ucode_ac[] = { |
---|
| 237 | + AC_VO, |
---|
| 238 | + AC_VI, |
---|
| 239 | + AC_BE, |
---|
| 240 | + AC_BK |
---|
| 241 | + }; |
---|
| 242 | + |
---|
| 243 | + return mac80211_ac_to_ucode_ac[ac]; |
---|
| 244 | +} |
---|
| 245 | + |
---|
246 | 246 | void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) |
---|
247 | 247 | { |
---|
248 | 248 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
---|
.. | .. |
---|
253 | 253 | IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n", |
---|
254 | 254 | le16_to_cpu(err_resp->bad_cmd_seq_num), |
---|
255 | 255 | le32_to_cpu(err_resp->error_service)); |
---|
256 | | - IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n", |
---|
| 256 | + IWL_ERR(mvm, "FW Error notification: timestamp 0x%016llX\n", |
---|
257 | 257 | le64_to_cpu(err_resp->timestamp)); |
---|
258 | 258 | } |
---|
259 | 259 | |
---|
.. | .. |
---|
288 | 288 | |
---|
289 | 289 | WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid); |
---|
290 | 290 | return last_idx; |
---|
291 | | -} |
---|
292 | | - |
---|
293 | | -static const struct { |
---|
294 | | - const char *name; |
---|
295 | | - u8 num; |
---|
296 | | -} advanced_lookup[] = { |
---|
297 | | - { "NMI_INTERRUPT_WDG", 0x34 }, |
---|
298 | | - { "SYSASSERT", 0x35 }, |
---|
299 | | - { "UCODE_VERSION_MISMATCH", 0x37 }, |
---|
300 | | - { "BAD_COMMAND", 0x38 }, |
---|
301 | | - { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, |
---|
302 | | - { "FATAL_ERROR", 0x3D }, |
---|
303 | | - { "NMI_TRM_HW_ERR", 0x46 }, |
---|
304 | | - { "NMI_INTERRUPT_TRM", 0x4C }, |
---|
305 | | - { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, |
---|
306 | | - { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, |
---|
307 | | - { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, |
---|
308 | | - { "NMI_INTERRUPT_HOST", 0x66 }, |
---|
309 | | - { "NMI_INTERRUPT_ACTION_PT", 0x7C }, |
---|
310 | | - { "NMI_INTERRUPT_UNKNOWN", 0x84 }, |
---|
311 | | - { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, |
---|
312 | | - { "ADVANCED_SYSASSERT", 0 }, |
---|
313 | | -}; |
---|
314 | | - |
---|
315 | | -static const char *desc_lookup(u32 num) |
---|
316 | | -{ |
---|
317 | | - int i; |
---|
318 | | - |
---|
319 | | - for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++) |
---|
320 | | - if (advanced_lookup[i].num == num) |
---|
321 | | - return advanced_lookup[i].name; |
---|
322 | | - |
---|
323 | | - /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */ |
---|
324 | | - return advanced_lookup[i].name; |
---|
325 | 291 | } |
---|
326 | 292 | |
---|
327 | 293 | /* |
---|
.. | .. |
---|
457 | 423 | { |
---|
458 | 424 | struct iwl_trans *trans = mvm->trans; |
---|
459 | 425 | struct iwl_umac_error_event_table table; |
---|
| 426 | + u32 base = mvm->trans->dbg.umac_error_event_table; |
---|
460 | 427 | |
---|
461 | | - if (!mvm->support_umac_log) |
---|
| 428 | + if (!base && |
---|
| 429 | + !(mvm->trans->dbg.error_event_table_tlv_status & |
---|
| 430 | + IWL_ERROR_EVENT_TABLE_UMAC)) |
---|
462 | 431 | return; |
---|
463 | 432 | |
---|
464 | | - iwl_trans_read_mem_bytes(trans, mvm->umac_error_event_table, &table, |
---|
465 | | - sizeof(table)); |
---|
| 433 | + iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); |
---|
| 434 | + |
---|
| 435 | + if (table.valid) |
---|
| 436 | + mvm->fwrt.dump.umac_err_id = table.error_id; |
---|
466 | 437 | |
---|
467 | 438 | if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { |
---|
468 | 439 | IWL_ERR(trans, "Start IWL Error Log Dump:\n"); |
---|
.. | .. |
---|
471 | 442 | } |
---|
472 | 443 | |
---|
473 | 444 | IWL_ERR(mvm, "0x%08X | %s\n", table.error_id, |
---|
474 | | - desc_lookup(table.error_id)); |
---|
| 445 | + iwl_fw_lookup_assert_desc(table.error_id)); |
---|
475 | 446 | IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1); |
---|
476 | 447 | IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2); |
---|
477 | 448 | IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1); |
---|
.. | .. |
---|
487 | 458 | IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref); |
---|
488 | 459 | } |
---|
489 | 460 | |
---|
490 | | -static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base) |
---|
| 461 | +static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num) |
---|
491 | 462 | { |
---|
492 | 463 | struct iwl_trans *trans = mvm->trans; |
---|
493 | 464 | struct iwl_error_event_table table; |
---|
494 | | - u32 val; |
---|
| 465 | + u32 val, base = mvm->trans->dbg.lmac_error_event_table[lmac_num]; |
---|
495 | 466 | |
---|
496 | 467 | if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) { |
---|
497 | 468 | if (!base) |
---|
.. | .. |
---|
520 | 491 | /* reset the device */ |
---|
521 | 492 | iwl_trans_sw_reset(trans); |
---|
522 | 493 | |
---|
523 | | - /* set INIT_DONE flag */ |
---|
524 | | - iwl_set_bit(trans, CSR_GP_CNTRL, |
---|
525 | | - BIT(trans->cfg->csr->flag_init_done)); |
---|
526 | | - |
---|
527 | | - /* and wait for clock stabilization */ |
---|
528 | | - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) |
---|
529 | | - udelay(2); |
---|
530 | | - |
---|
531 | | - err = iwl_poll_bit(trans, CSR_GP_CNTRL, |
---|
532 | | - BIT(trans->cfg->csr->flag_mac_clock_ready), |
---|
533 | | - BIT(trans->cfg->csr->flag_mac_clock_ready), |
---|
534 | | - 25000); |
---|
535 | | - if (err < 0) { |
---|
536 | | - IWL_DEBUG_INFO(trans, |
---|
537 | | - "Failed to reset the card for the dump\n"); |
---|
| 494 | + err = iwl_finish_nic_init(trans, trans->trans_cfg); |
---|
| 495 | + if (err) |
---|
538 | 496 | return; |
---|
539 | | - } |
---|
540 | 497 | } |
---|
541 | 498 | |
---|
542 | 499 | iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); |
---|
| 500 | + |
---|
| 501 | + if (table.valid) |
---|
| 502 | + mvm->fwrt.dump.lmac_err_id[lmac_num] = table.error_id; |
---|
543 | 503 | |
---|
544 | 504 | if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { |
---|
545 | 505 | IWL_ERR(trans, "Start IWL Error Log Dump:\n"); |
---|
.. | .. |
---|
551 | 511 | |
---|
552 | 512 | IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version); |
---|
553 | 513 | |
---|
554 | | - trace_iwlwifi_dev_ucode_error(trans->dev, &table, table.hw_ver, table.brd_ver); |
---|
555 | 514 | IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, |
---|
556 | | - desc_lookup(table.error_id)); |
---|
| 515 | + iwl_fw_lookup_assert_desc(table.error_id)); |
---|
557 | 516 | IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0); |
---|
558 | 517 | IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1); |
---|
559 | 518 | IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2); |
---|
.. | .. |
---|
589 | 548 | IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler); |
---|
590 | 549 | } |
---|
591 | 550 | |
---|
| 551 | +static void iwl_mvm_dump_iml_error_log(struct iwl_mvm *mvm) |
---|
| 552 | +{ |
---|
| 553 | + struct iwl_trans *trans = mvm->trans; |
---|
| 554 | + u32 error; |
---|
| 555 | + |
---|
| 556 | + error = iwl_read_umac_prph(trans, UMAG_SB_CPU_2_STATUS); |
---|
| 557 | + |
---|
| 558 | + IWL_ERR(trans, "IML/ROM dump:\n"); |
---|
| 559 | + |
---|
| 560 | + if (error & 0xFFFF0000) |
---|
| 561 | + IWL_ERR(trans, "IML/ROM SYSASSERT:\n"); |
---|
| 562 | + |
---|
| 563 | + IWL_ERR(mvm, "0x%08X | IML/ROM error/state\n", error); |
---|
| 564 | + IWL_ERR(mvm, "0x%08X | IML/ROM data1\n", |
---|
| 565 | + iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS)); |
---|
| 566 | +} |
---|
| 567 | + |
---|
592 | 568 | void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) |
---|
593 | 569 | { |
---|
594 | 570 | if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) { |
---|
.. | .. |
---|
597 | 573 | return; |
---|
598 | 574 | } |
---|
599 | 575 | |
---|
600 | | - iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]); |
---|
| 576 | + iwl_mvm_dump_lmac_error_log(mvm, 0); |
---|
601 | 577 | |
---|
602 | | - if (mvm->error_event_table[1]) |
---|
603 | | - iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[1]); |
---|
| 578 | + if (mvm->trans->dbg.lmac_error_event_table[1]) |
---|
| 579 | + iwl_mvm_dump_lmac_error_log(mvm, 1); |
---|
604 | 580 | |
---|
605 | 581 | iwl_mvm_dump_umac_error_log(mvm); |
---|
606 | | -} |
---|
607 | 582 | |
---|
608 | | -int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq) |
---|
609 | | -{ |
---|
610 | | - int i; |
---|
| 583 | + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) |
---|
| 584 | + iwl_mvm_dump_iml_error_log(mvm); |
---|
611 | 585 | |
---|
612 | | - lockdep_assert_held(&mvm->queue_info_lock); |
---|
613 | | - |
---|
614 | | - /* This should not be hit with new TX path */ |
---|
615 | | - if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) |
---|
616 | | - return -ENOSPC; |
---|
617 | | - |
---|
618 | | - /* Start by looking for a free queue */ |
---|
619 | | - for (i = minq; i <= maxq; i++) |
---|
620 | | - if (mvm->queue_info[i].hw_queue_refcount == 0 && |
---|
621 | | - mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) |
---|
622 | | - return i; |
---|
623 | | - |
---|
624 | | - /* |
---|
625 | | - * If no free queue found - settle for an inactive one to reconfigure |
---|
626 | | - * Make sure that the inactive queue either already belongs to this STA, |
---|
627 | | - * or that if it belongs to another one - it isn't the reserved queue |
---|
628 | | - */ |
---|
629 | | - for (i = minq; i <= maxq; i++) |
---|
630 | | - if (mvm->queue_info[i].status == IWL_MVM_QUEUE_INACTIVE && |
---|
631 | | - (sta_id == mvm->queue_info[i].ra_sta_id || |
---|
632 | | - !mvm->queue_info[i].reserved)) |
---|
633 | | - return i; |
---|
634 | | - |
---|
635 | | - return -ENOSPC; |
---|
| 586 | + iwl_fw_error_print_fseq_regs(&mvm->fwrt); |
---|
636 | 587 | } |
---|
637 | 588 | |
---|
638 | 589 | int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, |
---|
.. | .. |
---|
654 | 605 | if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) |
---|
655 | 606 | return -EINVAL; |
---|
656 | 607 | |
---|
657 | | - spin_lock_bh(&mvm->queue_info_lock); |
---|
658 | | - if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0, |
---|
659 | | - "Trying to reconfig unallocated queue %d\n", queue)) { |
---|
660 | | - spin_unlock_bh(&mvm->queue_info_lock); |
---|
| 608 | + if (WARN(mvm->queue_info[queue].tid_bitmap == 0, |
---|
| 609 | + "Trying to reconfig unallocated queue %d\n", queue)) |
---|
661 | 610 | return -ENXIO; |
---|
662 | | - } |
---|
663 | | - spin_unlock_bh(&mvm->queue_info_lock); |
---|
664 | 611 | |
---|
665 | 612 | IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue); |
---|
666 | 613 | |
---|
.. | .. |
---|
671 | 618 | return ret; |
---|
672 | 619 | } |
---|
673 | 620 | |
---|
674 | | -static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, |
---|
675 | | - int mac80211_queue, u8 sta_id, u8 tid) |
---|
676 | | -{ |
---|
677 | | - bool enable_queue = true; |
---|
678 | | - |
---|
679 | | - spin_lock_bh(&mvm->queue_info_lock); |
---|
680 | | - |
---|
681 | | - /* Make sure this TID isn't already enabled */ |
---|
682 | | - if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) { |
---|
683 | | - spin_unlock_bh(&mvm->queue_info_lock); |
---|
684 | | - IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", |
---|
685 | | - queue, tid); |
---|
686 | | - return false; |
---|
687 | | - } |
---|
688 | | - |
---|
689 | | - /* Update mappings and refcounts */ |
---|
690 | | - if (mvm->queue_info[queue].hw_queue_refcount > 0) |
---|
691 | | - enable_queue = false; |
---|
692 | | - |
---|
693 | | - if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) { |
---|
694 | | - WARN(mac80211_queue >= |
---|
695 | | - BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]), |
---|
696 | | - "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n", |
---|
697 | | - mac80211_queue, queue, sta_id, tid); |
---|
698 | | - mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); |
---|
699 | | - } |
---|
700 | | - |
---|
701 | | - mvm->queue_info[queue].hw_queue_refcount++; |
---|
702 | | - mvm->queue_info[queue].tid_bitmap |= BIT(tid); |
---|
703 | | - mvm->queue_info[queue].ra_sta_id = sta_id; |
---|
704 | | - |
---|
705 | | - if (enable_queue) { |
---|
706 | | - if (tid != IWL_MAX_TID_COUNT) |
---|
707 | | - mvm->queue_info[queue].mac80211_ac = |
---|
708 | | - tid_to_mac80211_ac[tid]; |
---|
709 | | - else |
---|
710 | | - mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO; |
---|
711 | | - |
---|
712 | | - mvm->queue_info[queue].txq_tid = tid; |
---|
713 | | - } |
---|
714 | | - |
---|
715 | | - IWL_DEBUG_TX_QUEUES(mvm, |
---|
716 | | - "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", |
---|
717 | | - queue, mvm->queue_info[queue].hw_queue_refcount, |
---|
718 | | - mvm->hw_queue_to_mac80211[queue]); |
---|
719 | | - |
---|
720 | | - spin_unlock_bh(&mvm->queue_info_lock); |
---|
721 | | - |
---|
722 | | - return enable_queue; |
---|
723 | | -} |
---|
724 | | - |
---|
725 | | -int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, |
---|
726 | | - u8 sta_id, u8 tid, unsigned int timeout) |
---|
727 | | -{ |
---|
728 | | - struct iwl_tx_queue_cfg_cmd cmd = { |
---|
729 | | - .flags = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE), |
---|
730 | | - .sta_id = sta_id, |
---|
731 | | - .tid = tid, |
---|
732 | | - }; |
---|
733 | | - int queue, size = IWL_DEFAULT_QUEUE_SIZE; |
---|
734 | | - |
---|
735 | | - if (cmd.tid == IWL_MAX_TID_COUNT) { |
---|
736 | | - cmd.tid = IWL_MGMT_TID; |
---|
737 | | - size = IWL_MGMT_QUEUE_SIZE; |
---|
738 | | - } |
---|
739 | | - queue = iwl_trans_txq_alloc(mvm->trans, (void *)&cmd, |
---|
740 | | - SCD_QUEUE_CFG, size, timeout); |
---|
741 | | - |
---|
742 | | - if (queue < 0) { |
---|
743 | | - IWL_DEBUG_TX_QUEUES(mvm, |
---|
744 | | - "Failed allocating TXQ for sta %d tid %d, ret: %d\n", |
---|
745 | | - sta_id, tid, queue); |
---|
746 | | - return queue; |
---|
747 | | - } |
---|
748 | | - |
---|
749 | | - IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", |
---|
750 | | - queue, sta_id, tid); |
---|
751 | | - |
---|
752 | | - mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); |
---|
753 | | - IWL_DEBUG_TX_QUEUES(mvm, |
---|
754 | | - "Enabling TXQ #%d (mac80211 map:0x%x)\n", |
---|
755 | | - queue, mvm->hw_queue_to_mac80211[queue]); |
---|
756 | | - |
---|
757 | | - return queue; |
---|
758 | | -} |
---|
759 | | - |
---|
760 | | -bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, |
---|
761 | | - u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, |
---|
762 | | - unsigned int wdg_timeout) |
---|
763 | | -{ |
---|
764 | | - struct iwl_scd_txq_cfg_cmd cmd = { |
---|
765 | | - .scd_queue = queue, |
---|
766 | | - .action = SCD_CFG_ENABLE_QUEUE, |
---|
767 | | - .window = cfg->frame_limit, |
---|
768 | | - .sta_id = cfg->sta_id, |
---|
769 | | - .ssn = cpu_to_le16(ssn), |
---|
770 | | - .tx_fifo = cfg->fifo, |
---|
771 | | - .aggregate = cfg->aggregate, |
---|
772 | | - .tid = cfg->tid, |
---|
773 | | - }; |
---|
774 | | - bool inc_ssn; |
---|
775 | | - |
---|
776 | | - if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) |
---|
777 | | - return false; |
---|
778 | | - |
---|
779 | | - /* Send the enabling command if we need to */ |
---|
780 | | - if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue, |
---|
781 | | - cfg->sta_id, cfg->tid)) |
---|
782 | | - return false; |
---|
783 | | - |
---|
784 | | - inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, |
---|
785 | | - NULL, wdg_timeout); |
---|
786 | | - if (inc_ssn) |
---|
787 | | - le16_add_cpu(&cmd.ssn, 1); |
---|
788 | | - |
---|
789 | | - WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd), |
---|
790 | | - "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo); |
---|
791 | | - |
---|
792 | | - return inc_ssn; |
---|
793 | | -} |
---|
794 | | - |
---|
795 | | -int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, |
---|
796 | | - u8 tid, u8 flags) |
---|
797 | | -{ |
---|
798 | | - struct iwl_scd_txq_cfg_cmd cmd = { |
---|
799 | | - .scd_queue = queue, |
---|
800 | | - .action = SCD_CFG_DISABLE_QUEUE, |
---|
801 | | - }; |
---|
802 | | - bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE; |
---|
803 | | - int ret; |
---|
804 | | - |
---|
805 | | - if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES)) |
---|
806 | | - return -EINVAL; |
---|
807 | | - |
---|
808 | | - if (iwl_mvm_has_new_tx_api(mvm)) { |
---|
809 | | - spin_lock_bh(&mvm->queue_info_lock); |
---|
810 | | - |
---|
811 | | - if (remove_mac_queue) |
---|
812 | | - mvm->hw_queue_to_mac80211[queue] &= |
---|
813 | | - ~BIT(mac80211_queue); |
---|
814 | | - |
---|
815 | | - spin_unlock_bh(&mvm->queue_info_lock); |
---|
816 | | - |
---|
817 | | - iwl_trans_txq_free(mvm->trans, queue); |
---|
818 | | - |
---|
819 | | - return 0; |
---|
820 | | - } |
---|
821 | | - |
---|
822 | | - spin_lock_bh(&mvm->queue_info_lock); |
---|
823 | | - |
---|
824 | | - if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) { |
---|
825 | | - spin_unlock_bh(&mvm->queue_info_lock); |
---|
826 | | - return 0; |
---|
827 | | - } |
---|
828 | | - |
---|
829 | | - mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); |
---|
830 | | - |
---|
831 | | - /* |
---|
832 | | - * If there is another TID with the same AC - don't remove the MAC queue |
---|
833 | | - * from the mapping |
---|
834 | | - */ |
---|
835 | | - if (tid < IWL_MAX_TID_COUNT) { |
---|
836 | | - unsigned long tid_bitmap = |
---|
837 | | - mvm->queue_info[queue].tid_bitmap; |
---|
838 | | - int ac = tid_to_mac80211_ac[tid]; |
---|
839 | | - int i; |
---|
840 | | - |
---|
841 | | - for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) { |
---|
842 | | - if (tid_to_mac80211_ac[i] == ac) |
---|
843 | | - remove_mac_queue = false; |
---|
844 | | - } |
---|
845 | | - } |
---|
846 | | - |
---|
847 | | - if (remove_mac_queue) |
---|
848 | | - mvm->hw_queue_to_mac80211[queue] &= |
---|
849 | | - ~BIT(mac80211_queue); |
---|
850 | | - mvm->queue_info[queue].hw_queue_refcount--; |
---|
851 | | - |
---|
852 | | - cmd.action = mvm->queue_info[queue].hw_queue_refcount ? |
---|
853 | | - SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE; |
---|
854 | | - if (cmd.action == SCD_CFG_DISABLE_QUEUE) |
---|
855 | | - mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; |
---|
856 | | - |
---|
857 | | - IWL_DEBUG_TX_QUEUES(mvm, |
---|
858 | | - "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", |
---|
859 | | - queue, |
---|
860 | | - mvm->queue_info[queue].hw_queue_refcount, |
---|
861 | | - mvm->hw_queue_to_mac80211[queue]); |
---|
862 | | - |
---|
863 | | - /* If the queue is still enabled - nothing left to do in this func */ |
---|
864 | | - if (cmd.action == SCD_CFG_ENABLE_QUEUE) { |
---|
865 | | - spin_unlock_bh(&mvm->queue_info_lock); |
---|
866 | | - return 0; |
---|
867 | | - } |
---|
868 | | - |
---|
869 | | - cmd.sta_id = mvm->queue_info[queue].ra_sta_id; |
---|
870 | | - cmd.tid = mvm->queue_info[queue].txq_tid; |
---|
871 | | - |
---|
872 | | - /* Make sure queue info is correct even though we overwrite it */ |
---|
873 | | - WARN(mvm->queue_info[queue].hw_queue_refcount || |
---|
874 | | - mvm->queue_info[queue].tid_bitmap || |
---|
875 | | - mvm->hw_queue_to_mac80211[queue], |
---|
876 | | - "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n", |
---|
877 | | - queue, mvm->queue_info[queue].hw_queue_refcount, |
---|
878 | | - mvm->hw_queue_to_mac80211[queue], |
---|
879 | | - mvm->queue_info[queue].tid_bitmap); |
---|
880 | | - |
---|
881 | | - /* If we are here - the queue is freed and we can zero out these vals */ |
---|
882 | | - mvm->queue_info[queue].hw_queue_refcount = 0; |
---|
883 | | - mvm->queue_info[queue].tid_bitmap = 0; |
---|
884 | | - mvm->hw_queue_to_mac80211[queue] = 0; |
---|
885 | | - |
---|
886 | | - /* Regardless if this is a reserved TXQ for a STA - mark it as false */ |
---|
887 | | - mvm->queue_info[queue].reserved = false; |
---|
888 | | - |
---|
889 | | - spin_unlock_bh(&mvm->queue_info_lock); |
---|
890 | | - |
---|
891 | | - iwl_trans_txq_disable(mvm->trans, queue, false); |
---|
892 | | - ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, |
---|
893 | | - sizeof(struct iwl_scd_txq_cfg_cmd), &cmd); |
---|
894 | | - |
---|
895 | | - if (ret) |
---|
896 | | - IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", |
---|
897 | | - queue, ret); |
---|
898 | | - return ret; |
---|
899 | | -} |
---|
900 | | - |
---|
901 | 621 | /** |
---|
902 | 622 | * iwl_mvm_send_lq_cmd() - Send link quality command |
---|
903 | | - * @sync: This command can be sent synchronously. |
---|
| 623 | + * @mvm: Driver data. |
---|
| 624 | + * @lq: Link quality command to send. |
---|
904 | 625 | * |
---|
905 | 626 | * The link quality command is sent as the last step of station creation. |
---|
906 | 627 | * This is the special case in which init is set and we call a callback in |
---|
907 | 628 | * this case to clear the state indicating that station creation is in |
---|
908 | 629 | * progress. |
---|
909 | 630 | */ |
---|
910 | | -int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync) |
---|
| 631 | +int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq) |
---|
911 | 632 | { |
---|
912 | 633 | struct iwl_host_cmd cmd = { |
---|
913 | 634 | .id = LQ_CMD, |
---|
914 | 635 | .len = { sizeof(struct iwl_lq_cmd), }, |
---|
915 | | - .flags = sync ? 0 : CMD_ASYNC, |
---|
| 636 | + .flags = CMD_ASYNC, |
---|
916 | 637 | .data = { lq, }, |
---|
917 | 638 | }; |
---|
918 | 639 | |
---|
.. | .. |
---|
925 | 646 | |
---|
926 | 647 | /** |
---|
927 | 648 | * iwl_mvm_update_smps - Get a request to change the SMPS mode |
---|
| 649 | + * @mvm: Driver data. |
---|
| 650 | + * @vif: Pointer to the ieee80211_vif structure |
---|
928 | 651 | * @req_type: The part of the driver who call for a change. |
---|
929 | | - * @smps_requests: The request to change the SMPS mode. |
---|
| 652 | + * @smps_request: The request to change the SMPS mode. |
---|
930 | 653 | * |
---|
931 | 654 | * Get a requst to change the SMPS mode, |
---|
932 | 655 | * and change it according to all other requests in the driver. |
---|
.. | .. |
---|
1034 | 757 | return result; |
---|
1035 | 758 | } |
---|
1036 | 759 | |
---|
| 760 | +void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm, |
---|
| 761 | + bool low_latency, u16 mac_id) |
---|
| 762 | +{ |
---|
| 763 | + struct iwl_mac_low_latency_cmd cmd = { |
---|
| 764 | + .mac_id = cpu_to_le32(mac_id) |
---|
| 765 | + }; |
---|
| 766 | + |
---|
| 767 | + if (!fw_has_capa(&mvm->fw->ucode_capa, |
---|
| 768 | + IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) |
---|
| 769 | + return; |
---|
| 770 | + |
---|
| 771 | + if (low_latency) { |
---|
| 772 | + /* currently we don't care about the direction */ |
---|
| 773 | + cmd.low_latency_rx = 1; |
---|
| 774 | + cmd.low_latency_tx = 1; |
---|
| 775 | + } |
---|
| 776 | + |
---|
| 777 | + if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(LOW_LATENCY_CMD, |
---|
| 778 | + MAC_CONF_GROUP, 0), |
---|
| 779 | + 0, sizeof(cmd), &cmd)) |
---|
| 780 | + IWL_ERR(mvm, "Failed to send low latency command\n"); |
---|
| 781 | +} |
---|
| 782 | + |
---|
1037 | 783 | int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
---|
1038 | 784 | bool low_latency, |
---|
1039 | 785 | enum iwl_mvm_low_latency_cause cause) |
---|
.. | .. |
---|
1052 | 798 | if (low_latency == prev) |
---|
1053 | 799 | return 0; |
---|
1054 | 800 | |
---|
1055 | | - if (fw_has_capa(&mvm->fw->ucode_capa, |
---|
1056 | | - IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) { |
---|
1057 | | - struct iwl_mac_low_latency_cmd cmd = { |
---|
1058 | | - .mac_id = cpu_to_le32(mvmvif->id) |
---|
1059 | | - }; |
---|
1060 | | - |
---|
1061 | | - if (low_latency) { |
---|
1062 | | - /* currently we don't care about the direction */ |
---|
1063 | | - cmd.low_latency_rx = 1; |
---|
1064 | | - cmd.low_latency_tx = 1; |
---|
1065 | | - } |
---|
1066 | | - res = iwl_mvm_send_cmd_pdu(mvm, |
---|
1067 | | - iwl_cmd_id(LOW_LATENCY_CMD, |
---|
1068 | | - MAC_CONF_GROUP, 0), |
---|
1069 | | - 0, sizeof(cmd), &cmd); |
---|
1070 | | - if (res) |
---|
1071 | | - IWL_ERR(mvm, "Failed to send low latency command\n"); |
---|
1072 | | - } |
---|
| 801 | + iwl_mvm_send_low_latency_cmd(mvm, low_latency, mvmvif->id); |
---|
1073 | 802 | |
---|
1074 | 803 | res = iwl_mvm_update_quotas(mvm, false, NULL); |
---|
1075 | 804 | if (res) |
---|
.. | .. |
---|
1196 | 925 | { |
---|
1197 | 926 | struct iwl_fw_dbg_trigger_tlv *trigger; |
---|
1198 | 927 | struct iwl_fw_dbg_trigger_txq_timer *txq_timer; |
---|
1199 | | - unsigned int default_timeout = |
---|
1200 | | - cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout; |
---|
| 928 | + unsigned int default_timeout = cmd_q ? |
---|
| 929 | + IWL_DEF_WD_TIMEOUT : |
---|
| 930 | + mvm->trans->trans_cfg->base_params->wd_timeout; |
---|
1201 | 931 | |
---|
1202 | 932 | if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) { |
---|
1203 | 933 | /* |
---|
.. | .. |
---|
1208 | 938 | IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) && |
---|
1209 | 939 | vif && vif->type == NL80211_IFTYPE_AP) |
---|
1210 | 940 | return IWL_WATCHDOG_DISABLED; |
---|
1211 | | - return iwlmvm_mod_params.tfd_q_hang_detect ? |
---|
1212 | | - default_timeout : IWL_WATCHDOG_DISABLED; |
---|
| 941 | + return default_timeout; |
---|
1213 | 942 | } |
---|
1214 | 943 | |
---|
1215 | 944 | trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS); |
---|
.. | .. |
---|
1241 | 970 | return default_timeout; |
---|
1242 | 971 | default: |
---|
1243 | 972 | WARN_ON(1); |
---|
1244 | | - return mvm->cfg->base_params->wd_timeout; |
---|
| 973 | + return mvm->trans->trans_cfg->base_params->wd_timeout; |
---|
1245 | 974 | } |
---|
1246 | 975 | } |
---|
1247 | 976 | |
---|
.. | .. |
---|
1251 | 980 | struct iwl_fw_dbg_trigger_tlv *trig; |
---|
1252 | 981 | struct iwl_fw_dbg_trigger_mlme *trig_mlme; |
---|
1253 | 982 | |
---|
1254 | | - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) |
---|
| 983 | + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), |
---|
| 984 | + FW_DBG_TRIGGER_MLME); |
---|
| 985 | + if (!trig) |
---|
1255 | 986 | goto out; |
---|
1256 | 987 | |
---|
1257 | | - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); |
---|
1258 | 988 | trig_mlme = (void *)trig->data; |
---|
1259 | | - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, |
---|
1260 | | - ieee80211_vif_to_wdev(vif), trig)) |
---|
1261 | | - goto out; |
---|
1262 | 989 | |
---|
1263 | 990 | if (trig_mlme->stop_connection_loss && |
---|
1264 | 991 | --trig_mlme->stop_connection_loss) |
---|
.. | .. |
---|
1270 | 997 | ieee80211_connection_loss(vif); |
---|
1271 | 998 | } |
---|
1272 | 999 | |
---|
1273 | | -/* |
---|
1274 | | - * Remove inactive TIDs of a given queue. |
---|
1275 | | - * If all queue TIDs are inactive - mark the queue as inactive |
---|
1276 | | - * If only some the queue TIDs are inactive - unmap them from the queue |
---|
1277 | | - */ |
---|
1278 | | -static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, |
---|
1279 | | - struct iwl_mvm_sta *mvmsta, int queue, |
---|
1280 | | - unsigned long tid_bitmap) |
---|
1281 | | -{ |
---|
1282 | | - int tid; |
---|
1283 | | - |
---|
1284 | | - lockdep_assert_held(&mvmsta->lock); |
---|
1285 | | - lockdep_assert_held(&mvm->queue_info_lock); |
---|
1286 | | - |
---|
1287 | | - if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) |
---|
1288 | | - return; |
---|
1289 | | - |
---|
1290 | | - /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */ |
---|
1291 | | - for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { |
---|
1292 | | - /* If some TFDs are still queued - don't mark TID as inactive */ |
---|
1293 | | - if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid])) |
---|
1294 | | - tid_bitmap &= ~BIT(tid); |
---|
1295 | | - |
---|
1296 | | - /* Don't mark as inactive any TID that has an active BA */ |
---|
1297 | | - if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) |
---|
1298 | | - tid_bitmap &= ~BIT(tid); |
---|
1299 | | - } |
---|
1300 | | - |
---|
1301 | | - /* If all TIDs in the queue are inactive - mark queue as inactive. */ |
---|
1302 | | - if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) { |
---|
1303 | | - mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE; |
---|
1304 | | - |
---|
1305 | | - for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) |
---|
1306 | | - mvmsta->tid_data[tid].is_tid_active = false; |
---|
1307 | | - |
---|
1308 | | - IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n", |
---|
1309 | | - queue); |
---|
1310 | | - return; |
---|
1311 | | - } |
---|
1312 | | - |
---|
1313 | | - /* |
---|
1314 | | - * If we are here, this is a shared queue and not all TIDs timed-out. |
---|
1315 | | - * Remove the ones that did. |
---|
1316 | | - */ |
---|
1317 | | - for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { |
---|
1318 | | - int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]; |
---|
1319 | | - |
---|
1320 | | - mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; |
---|
1321 | | - mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue); |
---|
1322 | | - mvm->queue_info[queue].hw_queue_refcount--; |
---|
1323 | | - mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); |
---|
1324 | | - mvmsta->tid_data[tid].is_tid_active = false; |
---|
1325 | | - |
---|
1326 | | - IWL_DEBUG_TX_QUEUES(mvm, |
---|
1327 | | - "Removing inactive TID %d from shared Q:%d\n", |
---|
1328 | | - tid, queue); |
---|
1329 | | - } |
---|
1330 | | - |
---|
1331 | | - IWL_DEBUG_TX_QUEUES(mvm, |
---|
1332 | | - "TXQ #%d left with tid bitmap 0x%x\n", queue, |
---|
1333 | | - mvm->queue_info[queue].tid_bitmap); |
---|
1334 | | - |
---|
1335 | | - /* |
---|
1336 | | - * There may be different TIDs with the same mac queues, so make |
---|
1337 | | - * sure all TIDs have existing corresponding mac queues enabled |
---|
1338 | | - */ |
---|
1339 | | - tid_bitmap = mvm->queue_info[queue].tid_bitmap; |
---|
1340 | | - for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { |
---|
1341 | | - mvm->hw_queue_to_mac80211[queue] |= |
---|
1342 | | - BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]); |
---|
1343 | | - } |
---|
1344 | | - |
---|
1345 | | - /* If the queue is marked as shared - "unshare" it */ |
---|
1346 | | - if (mvm->queue_info[queue].hw_queue_refcount == 1 && |
---|
1347 | | - mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) { |
---|
1348 | | - mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING; |
---|
1349 | | - IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n", |
---|
1350 | | - queue); |
---|
1351 | | - } |
---|
1352 | | -} |
---|
1353 | | - |
---|
1354 | | -void iwl_mvm_inactivity_check(struct iwl_mvm *mvm) |
---|
1355 | | -{ |
---|
1356 | | - unsigned long timeout_queues_map = 0; |
---|
1357 | | - unsigned long now = jiffies; |
---|
1358 | | - int i; |
---|
1359 | | - |
---|
1360 | | - if (iwl_mvm_has_new_tx_api(mvm)) |
---|
1361 | | - return; |
---|
1362 | | - |
---|
1363 | | - spin_lock_bh(&mvm->queue_info_lock); |
---|
1364 | | - for (i = 0; i < IWL_MAX_HW_QUEUES; i++) |
---|
1365 | | - if (mvm->queue_info[i].hw_queue_refcount > 0) |
---|
1366 | | - timeout_queues_map |= BIT(i); |
---|
1367 | | - spin_unlock_bh(&mvm->queue_info_lock); |
---|
1368 | | - |
---|
1369 | | - rcu_read_lock(); |
---|
1370 | | - |
---|
1371 | | - /* |
---|
1372 | | - * If a queue time outs - mark it as INACTIVE (don't remove right away |
---|
1373 | | - * if we don't have to.) This is an optimization in case traffic comes |
---|
1374 | | - * later, and we don't HAVE to use a currently-inactive queue |
---|
1375 | | - */ |
---|
1376 | | - for_each_set_bit(i, &timeout_queues_map, IWL_MAX_HW_QUEUES) { |
---|
1377 | | - struct ieee80211_sta *sta; |
---|
1378 | | - struct iwl_mvm_sta *mvmsta; |
---|
1379 | | - u8 sta_id; |
---|
1380 | | - int tid; |
---|
1381 | | - unsigned long inactive_tid_bitmap = 0; |
---|
1382 | | - unsigned long queue_tid_bitmap; |
---|
1383 | | - |
---|
1384 | | - spin_lock_bh(&mvm->queue_info_lock); |
---|
1385 | | - queue_tid_bitmap = mvm->queue_info[i].tid_bitmap; |
---|
1386 | | - |
---|
1387 | | - /* If TXQ isn't in active use anyway - nothing to do here... */ |
---|
1388 | | - if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY && |
---|
1389 | | - mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) { |
---|
1390 | | - spin_unlock_bh(&mvm->queue_info_lock); |
---|
1391 | | - continue; |
---|
1392 | | - } |
---|
1393 | | - |
---|
1394 | | - /* Check to see if there are inactive TIDs on this queue */ |
---|
1395 | | - for_each_set_bit(tid, &queue_tid_bitmap, |
---|
1396 | | - IWL_MAX_TID_COUNT + 1) { |
---|
1397 | | - if (time_after(mvm->queue_info[i].last_frame_time[tid] + |
---|
1398 | | - IWL_MVM_DQA_QUEUE_TIMEOUT, now)) |
---|
1399 | | - continue; |
---|
1400 | | - |
---|
1401 | | - inactive_tid_bitmap |= BIT(tid); |
---|
1402 | | - } |
---|
1403 | | - spin_unlock_bh(&mvm->queue_info_lock); |
---|
1404 | | - |
---|
1405 | | - /* If all TIDs are active - finish check on this queue */ |
---|
1406 | | - if (!inactive_tid_bitmap) |
---|
1407 | | - continue; |
---|
1408 | | - |
---|
1409 | | - /* |
---|
1410 | | - * If we are here - the queue hadn't been served recently and is |
---|
1411 | | - * in use |
---|
1412 | | - */ |
---|
1413 | | - |
---|
1414 | | - sta_id = mvm->queue_info[i].ra_sta_id; |
---|
1415 | | - sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); |
---|
1416 | | - |
---|
1417 | | - /* |
---|
1418 | | - * If the STA doesn't exist anymore, it isn't an error. It could |
---|
1419 | | - * be that it was removed since getting the queues, and in this |
---|
1420 | | - * case it should've inactivated its queues anyway. |
---|
1421 | | - */ |
---|
1422 | | - if (IS_ERR_OR_NULL(sta)) |
---|
1423 | | - continue; |
---|
1424 | | - |
---|
1425 | | - mvmsta = iwl_mvm_sta_from_mac80211(sta); |
---|
1426 | | - |
---|
1427 | | - spin_lock_bh(&mvmsta->lock); |
---|
1428 | | - spin_lock(&mvm->queue_info_lock); |
---|
1429 | | - iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, |
---|
1430 | | - inactive_tid_bitmap); |
---|
1431 | | - spin_unlock(&mvm->queue_info_lock); |
---|
1432 | | - spin_unlock_bh(&mvmsta->lock); |
---|
1433 | | - } |
---|
1434 | | - |
---|
1435 | | - rcu_read_unlock(); |
---|
1436 | | -} |
---|
1437 | | - |
---|
1438 | 1000 | void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, |
---|
1439 | 1001 | struct ieee80211_vif *vif, |
---|
1440 | 1002 | const struct ieee80211_sta *sta, |
---|
.. | .. |
---|
1443 | 1005 | struct iwl_fw_dbg_trigger_tlv *trig; |
---|
1444 | 1006 | struct iwl_fw_dbg_trigger_ba *ba_trig; |
---|
1445 | 1007 | |
---|
1446 | | - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) |
---|
| 1008 | + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), |
---|
| 1009 | + FW_DBG_TRIGGER_BA); |
---|
| 1010 | + if (!trig) |
---|
1447 | 1011 | return; |
---|
1448 | 1012 | |
---|
1449 | | - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); |
---|
1450 | 1013 | ba_trig = (void *)trig->data; |
---|
1451 | | - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, |
---|
1452 | | - ieee80211_vif_to_wdev(vif), trig)) |
---|
1453 | | - return; |
---|
1454 | 1014 | |
---|
1455 | 1015 | if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid))) |
---|
1456 | 1016 | return; |
---|
.. | .. |
---|
1559 | 1119 | "AP isn't using AMPDU with uAPSD enabled"); |
---|
1560 | 1120 | } |
---|
1561 | 1121 | |
---|
1562 | | -static void iwl_mvm_uapsd_agg_disconnect_iter(void *data, u8 *mac, |
---|
1563 | | - struct ieee80211_vif *vif) |
---|
| 1122 | +static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm, |
---|
| 1123 | + struct ieee80211_vif *vif) |
---|
1564 | 1124 | { |
---|
1565 | 1125 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
---|
1566 | | - struct iwl_mvm *mvm = mvmvif->mvm; |
---|
1567 | | - int *mac_id = data; |
---|
1568 | 1126 | |
---|
1569 | 1127 | if (vif->type != NL80211_IFTYPE_STATION) |
---|
1570 | | - return; |
---|
1571 | | - |
---|
1572 | | - if (mvmvif->id != *mac_id) |
---|
1573 | 1128 | return; |
---|
1574 | 1129 | |
---|
1575 | 1130 | if (!vif->bss_conf.assoc) |
---|
.. | .. |
---|
1581 | 1136 | !mvmvif->queue_params[IEEE80211_AC_BK].uapsd) |
---|
1582 | 1137 | return; |
---|
1583 | 1138 | |
---|
1584 | | - if (mvm->tcm.data[*mac_id].uapsd_nonagg_detect.detected) |
---|
| 1139 | + if (mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected) |
---|
1585 | 1140 | return; |
---|
1586 | 1141 | |
---|
1587 | | - mvm->tcm.data[*mac_id].uapsd_nonagg_detect.detected = true; |
---|
| 1142 | + mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected = true; |
---|
1588 | 1143 | IWL_INFO(mvm, |
---|
1589 | 1144 | "detected AP should do aggregation but isn't, likely due to U-APSD\n"); |
---|
1590 | 1145 | schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk, 15 * HZ); |
---|
.. | .. |
---|
1597 | 1152 | u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes; |
---|
1598 | 1153 | u64 tpt; |
---|
1599 | 1154 | unsigned long rate; |
---|
| 1155 | + struct ieee80211_vif *vif; |
---|
1600 | 1156 | |
---|
1601 | 1157 | rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate); |
---|
1602 | 1158 | |
---|
.. | .. |
---|
1625 | 1181 | return; |
---|
1626 | 1182 | } |
---|
1627 | 1183 | |
---|
1628 | | - ieee80211_iterate_active_interfaces_atomic( |
---|
1629 | | - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, |
---|
1630 | | - iwl_mvm_uapsd_agg_disconnect_iter, &mac); |
---|
| 1184 | + rcu_read_lock(); |
---|
| 1185 | + vif = rcu_dereference(mvm->vif_id_to_mac[mac]); |
---|
| 1186 | + if (vif) |
---|
| 1187 | + iwl_mvm_uapsd_agg_disconnect(mvm, vif); |
---|
| 1188 | + rcu_read_unlock(); |
---|
1631 | 1189 | } |
---|
1632 | 1190 | |
---|
1633 | 1191 | static void iwl_mvm_tcm_iterator(void *_data, u8 *mac, |
---|
.. | .. |
---|
1854 | 1412 | cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk); |
---|
1855 | 1413 | } |
---|
1856 | 1414 | |
---|
| 1415 | +u32 iwl_mvm_get_systime(struct iwl_mvm *mvm) |
---|
| 1416 | +{ |
---|
| 1417 | + u32 reg_addr = DEVICE_SYSTEM_TIME_REG; |
---|
| 1418 | + |
---|
| 1419 | + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000 && |
---|
| 1420 | + mvm->trans->cfg->gp2_reg_addr) |
---|
| 1421 | + reg_addr = mvm->trans->cfg->gp2_reg_addr; |
---|
| 1422 | + |
---|
| 1423 | + return iwl_read_prph(mvm->trans, reg_addr); |
---|
| 1424 | +} |
---|
1857 | 1425 | |
---|
1858 | 1426 | void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime) |
---|
1859 | 1427 | { |
---|
.. | .. |
---|
1868 | 1436 | iwl_mvm_power_update_device(mvm); |
---|
1869 | 1437 | } |
---|
1870 | 1438 | |
---|
1871 | | - *gp2 = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG); |
---|
1872 | | - *boottime = ktime_get_boot_ns(); |
---|
| 1439 | + *gp2 = iwl_mvm_get_systime(mvm); |
---|
| 1440 | + *boottime = ktime_get_boottime_ns(); |
---|
1873 | 1441 | |
---|
1874 | 1442 | if (!ps_disabled) { |
---|
1875 | 1443 | mvm->ps_disabled = ps_disabled; |
---|