.. | .. |
---|
5 | 5 | * |
---|
6 | 6 | * GPL LICENSE SUMMARY |
---|
7 | 7 | * |
---|
8 | | - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
---|
| 8 | + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. |
---|
9 | 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
---|
10 | 10 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
---|
11 | | - * Copyright(c) 2018 Intel Corporation |
---|
12 | 11 | * |
---|
13 | 12 | * This program is free software; you can redistribute it and/or modify |
---|
14 | 13 | * it under the terms of version 2 of the GNU General Public License as |
---|
.. | .. |
---|
19 | 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
20 | 19 | * General Public License for more details. |
---|
21 | 20 | * |
---|
22 | | - * You should have received a copy of the GNU General Public License |
---|
23 | | - * along with this program; if not, write to the Free Software |
---|
24 | | - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, |
---|
25 | | - * USA |
---|
26 | | - * |
---|
27 | 21 | * The full GNU General Public License is included in this distribution |
---|
28 | 22 | * in the file called COPYING. |
---|
29 | 23 | * |
---|
.. | .. |
---|
33 | 27 | * |
---|
34 | 28 | * BSD LICENSE |
---|
35 | 29 | * |
---|
36 | | - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
---|
| 30 | + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. |
---|
37 | 31 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
---|
38 | 32 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
---|
39 | | - * Copyright(c) 2018 Intel Corporation |
---|
40 | 33 | * All rights reserved. |
---|
41 | 34 | * |
---|
42 | 35 | * Redistribution and use in source and binary forms, with or without |
---|
.. | .. |
---|
87 | 80 | #include "fw/api/scan.h" |
---|
88 | 81 | #include "time-event.h" |
---|
89 | 82 | #include "fw-api.h" |
---|
90 | | -#include "fw/api/scan.h" |
---|
91 | 83 | #include "fw/acpi.h" |
---|
92 | 84 | |
---|
93 | 85 | #define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux" |
---|
94 | 86 | MODULE_DESCRIPTION(DRV_DESCRIPTION); |
---|
95 | | -MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); |
---|
| 87 | +MODULE_AUTHOR(DRV_AUTHOR); |
---|
96 | 88 | MODULE_LICENSE("GPL"); |
---|
97 | 89 | |
---|
98 | 90 | static const struct iwl_op_mode_ops iwl_mvm_ops; |
---|
.. | .. |
---|
100 | 92 | |
---|
101 | 93 | struct iwl_mvm_mod_params iwlmvm_mod_params = { |
---|
102 | 94 | .power_scheme = IWL_POWER_SCHEME_BPS, |
---|
103 | | - .tfd_q_hang_detect = true |
---|
104 | 95 | /* rest of fields are 0 by default */ |
---|
105 | 96 | }; |
---|
106 | 97 | |
---|
.. | .. |
---|
110 | 101 | module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444); |
---|
111 | 102 | MODULE_PARM_DESC(power_scheme, |
---|
112 | 103 | "power management scheme: 1-active, 2-balanced, 3-low power, default: 2"); |
---|
113 | | -module_param_named(tfd_q_hang_detect, iwlmvm_mod_params.tfd_q_hang_detect, |
---|
114 | | - bool, 0444); |
---|
115 | | -MODULE_PARM_DESC(tfd_q_hang_detect, |
---|
116 | | - "TFD queues hang detection (default: true"); |
---|
117 | 104 | |
---|
118 | 105 | /* |
---|
119 | 106 | * module init and exit functions |
---|
.. | .. |
---|
146 | 133 | static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) |
---|
147 | 134 | { |
---|
148 | 135 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
---|
| 136 | + struct iwl_trans_debug *dbg = &mvm->trans->dbg; |
---|
149 | 137 | u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash; |
---|
150 | 138 | u32 reg_val = 0; |
---|
151 | 139 | u32 phy_config = iwl_mvm_get_phy_config(mvm); |
---|
.. | .. |
---|
179 | 167 | * unrelated errors. Need to further investigate this, but for now |
---|
180 | 168 | * we'll separate cases. |
---|
181 | 169 | */ |
---|
182 | | - if (mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) |
---|
| 170 | + if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) |
---|
183 | 171 | reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI; |
---|
| 172 | + |
---|
| 173 | + if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt) || |
---|
| 174 | + (iwl_trans_dbg_ini_valid(mvm->trans) && |
---|
| 175 | + dbg->fw_mon_cfg[IWL_FW_INI_ALLOCATION_ID_INTERNAL].buf_location) |
---|
| 176 | + ) |
---|
| 177 | + reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG; |
---|
184 | 178 | |
---|
185 | 179 | iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG, |
---|
186 | 180 | CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | |
---|
.. | .. |
---|
189 | 183 | CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP | |
---|
190 | 184 | CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH | |
---|
191 | 185 | CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | |
---|
192 | | - CSR_HW_IF_CONFIG_REG_BIT_MAC_SI, |
---|
| 186 | + CSR_HW_IF_CONFIG_REG_BIT_MAC_SI | |
---|
| 187 | + CSR_HW_IF_CONFIG_REG_D3_DEBUG, |
---|
193 | 188 | reg_val); |
---|
194 | 189 | |
---|
195 | 190 | IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, |
---|
.. | .. |
---|
265 | 260 | |
---|
266 | 261 | RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, |
---|
267 | 262 | RX_HANDLER_SYNC), |
---|
| 263 | + RX_HANDLER_GRP(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF, |
---|
| 264 | + iwl_mvm_rx_session_protect_notif, RX_HANDLER_SYNC), |
---|
268 | 265 | RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, |
---|
269 | 266 | RX_HANDLER_ASYNC_LOCKED), |
---|
270 | 267 | |
---|
.. | .. |
---|
302 | 299 | RX_HANDLER_ASYNC_LOCKED), |
---|
303 | 300 | RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, |
---|
304 | 301 | RX_HANDLER_SYNC), |
---|
305 | | - RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, |
---|
306 | | - RX_HANDLER_ASYNC_LOCKED), |
---|
| 302 | + RX_HANDLER_GRP(LOCATION_GROUP, TOF_RESPONDER_STATS, |
---|
| 303 | + iwl_mvm_ftm_responder_stats, RX_HANDLER_ASYNC_LOCKED), |
---|
| 304 | + |
---|
| 305 | + RX_HANDLER_GRP(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF, |
---|
| 306 | + iwl_mvm_ftm_range_resp, RX_HANDLER_ASYNC_LOCKED), |
---|
| 307 | + RX_HANDLER_GRP(LOCATION_GROUP, TOF_LC_NOTIF, |
---|
| 308 | + iwl_mvm_ftm_lc_notif, RX_HANDLER_ASYNC_LOCKED), |
---|
| 309 | + |
---|
307 | 310 | RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF, |
---|
308 | 311 | iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC), |
---|
309 | 312 | RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF, |
---|
.. | .. |
---|
312 | 315 | iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC), |
---|
313 | 316 | RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF, |
---|
314 | 317 | iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC), |
---|
| 318 | + RX_HANDLER_GRP(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF, |
---|
| 319 | + iwl_mvm_probe_resp_data_notif, |
---|
| 320 | + RX_HANDLER_ASYNC_LOCKED), |
---|
| 321 | + RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_NOA_NOTIF, |
---|
| 322 | + iwl_mvm_channel_switch_noa_notif, |
---|
| 323 | + RX_HANDLER_SYNC), |
---|
315 | 324 | }; |
---|
316 | 325 | #undef RX_HANDLER |
---|
317 | 326 | #undef RX_HANDLER_GRP |
---|
.. | .. |
---|
320 | 329 | * Access is done through binary search |
---|
321 | 330 | */ |
---|
322 | 331 | static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { |
---|
323 | | - HCMD_NAME(MVM_ALIVE), |
---|
| 332 | + HCMD_NAME(UCODE_ALIVE_NTFY), |
---|
324 | 333 | HCMD_NAME(REPLY_ERROR), |
---|
325 | 334 | HCMD_NAME(ECHO_CMD), |
---|
326 | 335 | HCMD_NAME(INIT_COMPLETE_NOTIF), |
---|
.. | .. |
---|
330 | 339 | HCMD_NAME(SCAN_REQ_UMAC), |
---|
331 | 340 | HCMD_NAME(SCAN_ABORT_UMAC), |
---|
332 | 341 | HCMD_NAME(SCAN_COMPLETE_UMAC), |
---|
333 | | - HCMD_NAME(TOF_CMD), |
---|
334 | | - HCMD_NAME(TOF_NOTIFICATION), |
---|
335 | 342 | HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID), |
---|
336 | 343 | HCMD_NAME(ADD_STA_KEY), |
---|
337 | 344 | HCMD_NAME(ADD_STA), |
---|
.. | .. |
---|
387 | 394 | HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC), |
---|
388 | 395 | HCMD_NAME(REPLY_RX_PHY_CMD), |
---|
389 | 396 | HCMD_NAME(REPLY_RX_MPDU_CMD), |
---|
| 397 | + HCMD_NAME(BAR_FRAME_RELEASE), |
---|
390 | 398 | HCMD_NAME(FRAME_RELEASE), |
---|
391 | 399 | HCMD_NAME(BA_NOTIF), |
---|
392 | 400 | HCMD_NAME(MCC_UPDATE_CMD), |
---|
.. | .. |
---|
412 | 420 | HCMD_NAME(SCAN_ITERATION_COMPLETE), |
---|
413 | 421 | HCMD_NAME(D0I3_END_CMD), |
---|
414 | 422 | HCMD_NAME(LTR_CONFIG), |
---|
| 423 | + HCMD_NAME(LDBG_CONFIG_CMD), |
---|
415 | 424 | }; |
---|
416 | 425 | |
---|
417 | 426 | /* Please keep this array *SORTED* by hex value. |
---|
.. | .. |
---|
420 | 429 | static const struct iwl_hcmd_names iwl_mvm_system_names[] = { |
---|
421 | 430 | HCMD_NAME(SHARED_MEM_CFG_CMD), |
---|
422 | 431 | HCMD_NAME(INIT_EXTENDED_CFG_CMD), |
---|
| 432 | + HCMD_NAME(FW_ERROR_RECOVERY_CMD), |
---|
423 | 433 | }; |
---|
424 | 434 | |
---|
425 | 435 | /* Please keep this array *SORTED* by hex value. |
---|
426 | 436 | * Access is done through binary search |
---|
427 | 437 | */ |
---|
428 | 438 | static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = { |
---|
| 439 | + HCMD_NAME(CHANNEL_SWITCH_TIME_EVENT_CMD), |
---|
| 440 | + HCMD_NAME(SESSION_PROTECTION_CMD), |
---|
| 441 | + HCMD_NAME(SESSION_PROTECTION_NOTIF), |
---|
429 | 442 | HCMD_NAME(CHANNEL_SWITCH_NOA_NOTIF), |
---|
430 | 443 | }; |
---|
431 | 444 | |
---|
.. | .. |
---|
450 | 463 | HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), |
---|
451 | 464 | HCMD_NAME(STA_HE_CTXT_CMD), |
---|
452 | 465 | HCMD_NAME(RFH_QUEUE_CONFIG_CMD), |
---|
| 466 | + HCMD_NAME(TLC_MNG_CONFIG_CMD), |
---|
| 467 | + HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD), |
---|
453 | 468 | HCMD_NAME(STA_PM_NOTIF), |
---|
454 | 469 | HCMD_NAME(MU_GROUP_MGMT_NOTIF), |
---|
455 | 470 | HCMD_NAME(RX_QUEUES_NOTIFICATION), |
---|
.. | .. |
---|
458 | 473 | /* Please keep this array *SORTED* by hex value. |
---|
459 | 474 | * Access is done through binary search |
---|
460 | 475 | */ |
---|
461 | | -static const struct iwl_hcmd_names iwl_mvm_debug_names[] = { |
---|
462 | | - HCMD_NAME(MFU_ASSERT_DUMP_NTF), |
---|
| 476 | +static const struct iwl_hcmd_names iwl_mvm_location_names[] = { |
---|
| 477 | + HCMD_NAME(TOF_RANGE_REQ_CMD), |
---|
| 478 | + HCMD_NAME(TOF_CONFIG_CMD), |
---|
| 479 | + HCMD_NAME(TOF_RANGE_ABORT_CMD), |
---|
| 480 | + HCMD_NAME(TOF_RANGE_REQ_EXT_CMD), |
---|
| 481 | + HCMD_NAME(TOF_RESPONDER_CONFIG_CMD), |
---|
| 482 | + HCMD_NAME(TOF_RESPONDER_DYN_CONFIG_CMD), |
---|
| 483 | + HCMD_NAME(TOF_LC_NOTIF), |
---|
| 484 | + HCMD_NAME(TOF_RESPONDER_STATS), |
---|
| 485 | + HCMD_NAME(TOF_MCSI_DEBUG_NOTIF), |
---|
| 486 | + HCMD_NAME(TOF_RANGE_RESPONSE_NOTIF), |
---|
463 | 487 | }; |
---|
464 | 488 | |
---|
465 | 489 | /* Please keep this array *SORTED* by hex value. |
---|
.. | .. |
---|
475 | 499 | static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = { |
---|
476 | 500 | HCMD_NAME(NVM_ACCESS_COMPLETE), |
---|
477 | 501 | HCMD_NAME(NVM_GET_INFO), |
---|
| 502 | + HCMD_NAME(TAS_CONFIG), |
---|
478 | 503 | }; |
---|
479 | 504 | |
---|
480 | 505 | static const struct iwl_hcmd_arr iwl_mvm_groups[] = { |
---|
.. | .. |
---|
484 | 509 | [MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names), |
---|
485 | 510 | [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), |
---|
486 | 511 | [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names), |
---|
| 512 | + [LOCATION_GROUP] = HCMD_ARR(iwl_mvm_location_names), |
---|
487 | 513 | [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), |
---|
488 | 514 | [REGULATORY_AND_NVM_GROUP] = |
---|
489 | 515 | HCMD_ARR(iwl_mvm_regulatory_and_nvm_names), |
---|
.. | .. |
---|
491 | 517 | |
---|
492 | 518 | /* this forward declaration can avoid to export the function */ |
---|
493 | 519 | static void iwl_mvm_async_handlers_wk(struct work_struct *wk); |
---|
494 | | -static void iwl_mvm_d0i3_exit_work(struct work_struct *wk); |
---|
495 | 520 | |
---|
496 | 521 | static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm) |
---|
497 | 522 | { |
---|
.. | .. |
---|
539 | 564 | static int iwl_mvm_fwrt_dump_start(void *ctx) |
---|
540 | 565 | { |
---|
541 | 566 | struct iwl_mvm *mvm = ctx; |
---|
542 | | - int ret; |
---|
543 | | - |
---|
544 | | - ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT); |
---|
545 | | - if (ret) |
---|
546 | | - return ret; |
---|
547 | 567 | |
---|
548 | 568 | mutex_lock(&mvm->mutex); |
---|
549 | 569 | |
---|
.. | .. |
---|
555 | 575 | struct iwl_mvm *mvm = ctx; |
---|
556 | 576 | |
---|
557 | 577 | mutex_unlock(&mvm->mutex); |
---|
558 | | - |
---|
559 | | - iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT); |
---|
560 | 578 | } |
---|
561 | 579 | |
---|
562 | 580 | static bool iwl_mvm_fwrt_fw_running(void *ctx) |
---|
.. | .. |
---|
564 | 582 | return iwl_mvm_firmware_running(ctx); |
---|
565 | 583 | } |
---|
566 | 584 | |
---|
| 585 | +static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd) |
---|
| 586 | +{ |
---|
| 587 | + struct iwl_mvm *mvm = (struct iwl_mvm *)ctx; |
---|
| 588 | + int ret; |
---|
| 589 | + |
---|
| 590 | + mutex_lock(&mvm->mutex); |
---|
| 591 | + ret = iwl_mvm_send_cmd(mvm, host_cmd); |
---|
| 592 | + mutex_unlock(&mvm->mutex); |
---|
| 593 | + |
---|
| 594 | + return ret; |
---|
| 595 | +} |
---|
| 596 | + |
---|
| 597 | +static bool iwl_mvm_d3_debug_enable(void *ctx) |
---|
| 598 | +{ |
---|
| 599 | + return IWL_MVM_D3_DEBUG; |
---|
| 600 | +} |
---|
| 601 | + |
---|
567 | 602 | static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { |
---|
568 | 603 | .dump_start = iwl_mvm_fwrt_dump_start, |
---|
569 | 604 | .dump_end = iwl_mvm_fwrt_dump_end, |
---|
570 | 605 | .fw_running = iwl_mvm_fwrt_fw_running, |
---|
| 606 | + .send_hcmd = iwl_mvm_fwrt_send_hcmd, |
---|
| 607 | + .d3_debug_enable = iwl_mvm_d3_debug_enable, |
---|
571 | 608 | }; |
---|
572 | 609 | |
---|
573 | 610 | static struct iwl_op_mode * |
---|
.. | .. |
---|
583 | 620 | }; |
---|
584 | 621 | int err, scan_size; |
---|
585 | 622 | u32 min_backoff; |
---|
| 623 | + enum iwl_amsdu_size rb_size_default; |
---|
586 | 624 | |
---|
587 | 625 | /* |
---|
588 | | - * We use IWL_MVM_STATION_COUNT to check the validity of the station |
---|
| 626 | + * We use IWL_MVM_STATION_COUNT_MAX to check the validity of the station |
---|
589 | 627 | * index all over the driver - check that its value corresponds to the |
---|
590 | 628 | * array size. |
---|
591 | 629 | */ |
---|
592 | | - BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT); |
---|
| 630 | + BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != |
---|
| 631 | + IWL_MVM_STATION_COUNT_MAX); |
---|
593 | 632 | |
---|
594 | 633 | /******************************** |
---|
595 | 634 | * 1. Allocating and configuring HW data |
---|
.. | .. |
---|
600 | 639 | if (!hw) |
---|
601 | 640 | return NULL; |
---|
602 | 641 | |
---|
603 | | - if (cfg->max_rx_agg_size) |
---|
604 | | - hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size; |
---|
| 642 | + hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; |
---|
605 | 643 | |
---|
606 | 644 | if (cfg->max_tx_agg_size) |
---|
607 | 645 | hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size; |
---|
| 646 | + else |
---|
| 647 | + hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; |
---|
608 | 648 | |
---|
609 | 649 | op_mode = hw->priv; |
---|
610 | 650 | |
---|
.. | .. |
---|
623 | 663 | if (iwl_mvm_has_new_rx_api(mvm)) { |
---|
624 | 664 | op_mode->ops = &iwl_mvm_ops_mq; |
---|
625 | 665 | trans->rx_mpdu_cmd_hdr_size = |
---|
626 | | - (trans->cfg->device_family >= |
---|
627 | | - IWL_DEVICE_FAMILY_22560) ? |
---|
| 666 | + (trans->trans_cfg->device_family >= |
---|
| 667 | + IWL_DEVICE_FAMILY_AX210) ? |
---|
628 | 668 | sizeof(struct iwl_rx_mpdu_desc) : |
---|
629 | 669 | IWL_RX_DESC_SIZE_V1; |
---|
630 | 670 | } else { |
---|
.. | .. |
---|
638 | 678 | |
---|
639 | 679 | mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0; |
---|
640 | 680 | |
---|
641 | | - mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; |
---|
642 | | - mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE; |
---|
643 | | - mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; |
---|
644 | | - mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; |
---|
| 681 | + if (iwl_mvm_has_new_tx_api(mvm)) { |
---|
| 682 | + /* |
---|
| 683 | + * If we have the new TX/queue allocation API initialize them |
---|
| 684 | + * all to invalid numbers. We'll rewrite the ones that we need |
---|
| 685 | + * later, but that doesn't happen for all of them all of the |
---|
| 686 | + * time (e.g. P2P Device is optional), and if a dynamic queue |
---|
| 687 | + * ends up getting number 2 (IWL_MVM_DQA_P2P_DEVICE_QUEUE) then |
---|
| 688 | + * iwl_mvm_is_static_queue() erroneously returns true, and we |
---|
| 689 | + * might have things getting stuck. |
---|
| 690 | + */ |
---|
| 691 | + mvm->aux_queue = IWL_MVM_INVALID_QUEUE; |
---|
| 692 | + mvm->snif_queue = IWL_MVM_INVALID_QUEUE; |
---|
| 693 | + mvm->probe_queue = IWL_MVM_INVALID_QUEUE; |
---|
| 694 | + mvm->p2p_dev_queue = IWL_MVM_INVALID_QUEUE; |
---|
| 695 | + } else { |
---|
| 696 | + mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; |
---|
| 697 | + mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE; |
---|
| 698 | + mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; |
---|
| 699 | + mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; |
---|
| 700 | + } |
---|
645 | 701 | |
---|
646 | 702 | mvm->sf_state = SF_UNINIT; |
---|
647 | 703 | if (iwl_mvm_has_unified_ucode(mvm)) |
---|
.. | .. |
---|
651 | 707 | mvm->drop_bcn_ap_mode = true; |
---|
652 | 708 | |
---|
653 | 709 | mutex_init(&mvm->mutex); |
---|
654 | | - mutex_init(&mvm->d0i3_suspend_mutex); |
---|
655 | 710 | spin_lock_init(&mvm->async_handlers_lock); |
---|
656 | 711 | INIT_LIST_HEAD(&mvm->time_event_list); |
---|
657 | 712 | INIT_LIST_HEAD(&mvm->aux_roc_te_list); |
---|
658 | 713 | INIT_LIST_HEAD(&mvm->async_handlers_list); |
---|
659 | 714 | spin_lock_init(&mvm->time_event_lock); |
---|
660 | | - spin_lock_init(&mvm->queue_info_lock); |
---|
| 715 | + INIT_LIST_HEAD(&mvm->ftm_initiator.loc_list); |
---|
| 716 | + INIT_LIST_HEAD(&mvm->ftm_initiator.pasn_list); |
---|
| 717 | + INIT_LIST_HEAD(&mvm->resp_pasn_list); |
---|
661 | 718 | |
---|
662 | 719 | INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); |
---|
663 | 720 | INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); |
---|
664 | | - INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work); |
---|
665 | 721 | INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); |
---|
666 | 722 | INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); |
---|
667 | 723 | INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); |
---|
| 724 | + INIT_LIST_HEAD(&mvm->add_stream_txqs); |
---|
668 | 725 | |
---|
669 | | - spin_lock_init(&mvm->d0i3_tx_lock); |
---|
670 | | - spin_lock_init(&mvm->refs_lock); |
---|
671 | | - skb_queue_head_init(&mvm->d0i3_tx); |
---|
672 | | - init_waitqueue_head(&mvm->d0i3_exit_waitq); |
---|
673 | 726 | init_waitqueue_head(&mvm->rx_sync_waitq); |
---|
674 | 727 | |
---|
675 | 728 | atomic_set(&mvm->queue_sync_counter, 0); |
---|
.. | .. |
---|
684 | 737 | |
---|
685 | 738 | INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork); |
---|
686 | 739 | |
---|
| 740 | + mvm->cmd_ver.d0i3_resp = |
---|
| 741 | + iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, D0I3_END_CMD, |
---|
| 742 | + 0); |
---|
| 743 | + /* we only support version 1 */ |
---|
| 744 | + if (WARN_ON_ONCE(mvm->cmd_ver.d0i3_resp > 1)) |
---|
| 745 | + goto out_free; |
---|
| 746 | + |
---|
| 747 | + mvm->cmd_ver.range_resp = |
---|
| 748 | + iwl_fw_lookup_notif_ver(mvm->fw, LOCATION_GROUP, |
---|
| 749 | + TOF_RANGE_RESPONSE_NOTIF, 5); |
---|
| 750 | + /* we only support up to version 8 */ |
---|
| 751 | + if (WARN_ON_ONCE(mvm->cmd_ver.range_resp > 8)) |
---|
| 752 | + goto out_free; |
---|
| 753 | + |
---|
687 | 754 | /* |
---|
688 | 755 | * Populate the state variables that the transport layer needs |
---|
689 | 756 | * to know about. |
---|
.. | .. |
---|
691 | 758 | trans_cfg.op_mode = op_mode; |
---|
692 | 759 | trans_cfg.no_reclaim_cmds = no_reclaim_cmds; |
---|
693 | 760 | trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); |
---|
| 761 | + |
---|
| 762 | + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) |
---|
| 763 | + rb_size_default = IWL_AMSDU_2K; |
---|
| 764 | + else |
---|
| 765 | + rb_size_default = IWL_AMSDU_4K; |
---|
| 766 | + |
---|
694 | 767 | switch (iwlwifi_mod_params.amsdu_size) { |
---|
695 | 768 | case IWL_AMSDU_DEF: |
---|
| 769 | + trans_cfg.rx_buf_size = rb_size_default; |
---|
| 770 | + break; |
---|
696 | 771 | case IWL_AMSDU_4K: |
---|
697 | 772 | trans_cfg.rx_buf_size = IWL_AMSDU_4K; |
---|
698 | 773 | break; |
---|
.. | .. |
---|
705 | 780 | default: |
---|
706 | 781 | pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME, |
---|
707 | 782 | iwlwifi_mod_params.amsdu_size); |
---|
708 | | - trans_cfg.rx_buf_size = IWL_AMSDU_4K; |
---|
709 | | - } |
---|
710 | | - |
---|
711 | | - /* the hardware splits the A-MSDU */ |
---|
712 | | - if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { |
---|
713 | | - trans_cfg.rx_buf_size = IWL_AMSDU_2K; |
---|
714 | | - /* TODO: remove when balanced power mode is fw supported */ |
---|
715 | | - iwlmvm_mod_params.power_scheme = IWL_POWER_SCHEME_CAM; |
---|
716 | | - } else if (mvm->cfg->mq_rx_supported) { |
---|
717 | | - trans_cfg.rx_buf_size = IWL_AMSDU_4K; |
---|
| 783 | + trans_cfg.rx_buf_size = rb_size_default; |
---|
718 | 784 | } |
---|
719 | 785 | |
---|
720 | 786 | trans->wide_cmd_header = true; |
---|
721 | 787 | trans_cfg.bc_table_dword = |
---|
722 | | - mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560; |
---|
| 788 | + mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210; |
---|
723 | 789 | |
---|
724 | 790 | trans_cfg.command_groups = iwl_mvm_groups; |
---|
725 | 791 | trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups); |
---|
.. | .. |
---|
745 | 811 | iwl_trans_configure(mvm->trans, &trans_cfg); |
---|
746 | 812 | |
---|
747 | 813 | trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; |
---|
748 | | - trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv; |
---|
749 | | - trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num; |
---|
750 | | - memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv, |
---|
751 | | - sizeof(trans->dbg_conf_tlv)); |
---|
752 | | - trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv; |
---|
753 | | - trans->dbg_dump_mask = mvm->fw->dbg_dump_mask; |
---|
| 814 | + trans->dbg.dest_tlv = mvm->fw->dbg.dest_tlv; |
---|
| 815 | + trans->dbg.n_dest_reg = mvm->fw->dbg.n_dest_reg; |
---|
| 816 | + memcpy(trans->dbg.conf_tlv, mvm->fw->dbg.conf_tlv, |
---|
| 817 | + sizeof(trans->dbg.conf_tlv)); |
---|
| 818 | + trans->dbg.trigger_tlv = mvm->fw->dbg.trigger_tlv; |
---|
754 | 819 | |
---|
755 | 820 | trans->iml = mvm->fw->iml; |
---|
756 | 821 | trans->iml_len = mvm->fw->iml_len; |
---|
.. | .. |
---|
766 | 831 | } |
---|
767 | 832 | |
---|
768 | 833 | IWL_INFO(mvm, "Detected %s, REV=0x%X\n", |
---|
769 | | - mvm->cfg->name, mvm->trans->hw_rev); |
---|
| 834 | + mvm->trans->name, mvm->trans->hw_rev); |
---|
770 | 835 | |
---|
771 | 836 | if (iwlwifi_mod_params.nvm_file) |
---|
772 | 837 | mvm->nvm_file_name = iwlwifi_mod_params.nvm_file; |
---|
.. | .. |
---|
779 | 844 | goto out_free; |
---|
780 | 845 | |
---|
781 | 846 | mutex_lock(&mvm->mutex); |
---|
782 | | - iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE); |
---|
783 | 847 | err = iwl_run_init_mvm_ucode(mvm, true); |
---|
| 848 | + if (err && err != -ERFKILL) |
---|
| 849 | + iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); |
---|
784 | 850 | if (!iwlmvm_mod_params.init_dbg || !err) |
---|
785 | 851 | iwl_mvm_stop_device(mvm); |
---|
786 | | - iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE); |
---|
787 | 852 | mutex_unlock(&mvm->mutex); |
---|
788 | 853 | if (err < 0) { |
---|
789 | 854 | IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err); |
---|
.. | .. |
---|
796 | 861 | if (!mvm->scan_cmd) |
---|
797 | 862 | goto out_free; |
---|
798 | 863 | |
---|
| 864 | + /* invalidate ids to prevent accidental removal of sta_id 0 */ |
---|
| 865 | + mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA; |
---|
| 866 | + mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA; |
---|
| 867 | + |
---|
799 | 868 | /* Set EBS as successful as long as not stated otherwise by the FW. */ |
---|
800 | 869 | mvm->last_ebs_successful = true; |
---|
801 | 870 | |
---|
.. | .. |
---|
807 | 876 | min_backoff = iwl_mvm_min_backoff(mvm); |
---|
808 | 877 | iwl_mvm_thermal_initialize(mvm, min_backoff); |
---|
809 | 878 | |
---|
810 | | - err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir); |
---|
811 | | - if (err) |
---|
812 | | - goto out_unregister; |
---|
| 879 | + iwl_mvm_dbgfs_register(mvm, dbgfs_dir); |
---|
813 | 880 | |
---|
814 | 881 | if (!iwl_mvm_has_new_rx_stats_api(mvm)) |
---|
815 | 882 | memset(&mvm->rx_stats_v3, 0, |
---|
.. | .. |
---|
817 | 884 | else |
---|
818 | 885 | memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx)); |
---|
819 | 886 | |
---|
820 | | - /* The transport always starts with a taken reference, we can |
---|
821 | | - * release it now if d0i3 is supported */ |
---|
822 | | - if (iwl_mvm_is_d0i3_supported(mvm)) |
---|
823 | | - iwl_trans_unref(mvm->trans); |
---|
824 | | - |
---|
825 | | - iwl_mvm_tof_init(mvm); |
---|
| 887 | + iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); |
---|
826 | 888 | |
---|
827 | 889 | return op_mode; |
---|
828 | 890 | |
---|
829 | | - out_unregister: |
---|
830 | | - if (iwlmvm_mod_params.init_dbg) |
---|
831 | | - return op_mode; |
---|
832 | | - |
---|
833 | | - ieee80211_unregister_hw(mvm->hw); |
---|
834 | | - mvm->hw_registered = false; |
---|
835 | | - iwl_mvm_leds_exit(mvm); |
---|
836 | | - iwl_mvm_thermal_exit(mvm); |
---|
837 | 891 | out_free: |
---|
838 | | - iwl_fw_flush_dump(&mvm->fwrt); |
---|
| 892 | + iwl_fw_flush_dumps(&mvm->fwrt); |
---|
| 893 | + iwl_fw_runtime_free(&mvm->fwrt); |
---|
839 | 894 | |
---|
840 | 895 | if (iwlmvm_mod_params.init_dbg) |
---|
841 | 896 | return op_mode; |
---|
.. | .. |
---|
852 | 907 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
---|
853 | 908 | int i; |
---|
854 | 909 | |
---|
855 | | - /* If d0i3 is supported, we have released the reference that |
---|
856 | | - * the transport started with, so we should take it back now |
---|
857 | | - * that we are leaving. |
---|
858 | | - */ |
---|
859 | | - if (iwl_mvm_is_d0i3_supported(mvm)) |
---|
860 | | - iwl_trans_ref(mvm->trans); |
---|
861 | | - |
---|
862 | 910 | iwl_mvm_leds_exit(mvm); |
---|
863 | 911 | |
---|
864 | 912 | iwl_mvm_thermal_exit(mvm); |
---|
865 | 913 | |
---|
866 | | - if (mvm->init_status & IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE) { |
---|
867 | | - ieee80211_unregister_hw(mvm->hw); |
---|
868 | | - mvm->init_status &= ~IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE; |
---|
869 | | - } |
---|
| 914 | + ieee80211_unregister_hw(mvm->hw); |
---|
870 | 915 | |
---|
871 | 916 | kfree(mvm->scan_cmd); |
---|
872 | 917 | kfree(mvm->mcast_filter_cmd); |
---|
873 | 918 | mvm->mcast_filter_cmd = NULL; |
---|
874 | 919 | |
---|
875 | | -#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS) |
---|
876 | | - kfree(mvm->d3_resume_sram); |
---|
877 | | -#endif |
---|
| 920 | + kfree(mvm->error_recovery_buf); |
---|
| 921 | + mvm->error_recovery_buf = NULL; |
---|
| 922 | + |
---|
878 | 923 | iwl_trans_op_mode_leave(mvm->trans); |
---|
879 | 924 | |
---|
880 | 925 | iwl_phy_db_free(mvm->phy_db); |
---|
.. | .. |
---|
886 | 931 | |
---|
887 | 932 | cancel_delayed_work_sync(&mvm->tcm.work); |
---|
888 | 933 | |
---|
889 | | - iwl_mvm_tof_clean(mvm); |
---|
890 | | - |
---|
| 934 | + iwl_fw_runtime_free(&mvm->fwrt); |
---|
891 | 935 | mutex_destroy(&mvm->mutex); |
---|
892 | | - mutex_destroy(&mvm->d0i3_suspend_mutex); |
---|
893 | 936 | |
---|
894 | 937 | ieee80211_free_hw(mvm->hw); |
---|
895 | 938 | } |
---|
.. | .. |
---|
950 | 993 | struct iwl_fw_dbg_trigger_cmd *cmds_trig; |
---|
951 | 994 | int i; |
---|
952 | 995 | |
---|
953 | | - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF)) |
---|
| 996 | + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, |
---|
| 997 | + FW_DBG_TRIGGER_FW_NOTIF); |
---|
| 998 | + if (!trig) |
---|
954 | 999 | return; |
---|
955 | 1000 | |
---|
956 | | - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF); |
---|
957 | 1001 | cmds_trig = (void *)trig->data; |
---|
958 | | - |
---|
959 | | - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) |
---|
960 | | - return; |
---|
961 | 1002 | |
---|
962 | 1003 | for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) { |
---|
963 | 1004 | /* don't collect on CMD 0 */ |
---|
.. | .. |
---|
980 | 1021 | struct iwl_rx_packet *pkt) |
---|
981 | 1022 | { |
---|
982 | 1023 | int i; |
---|
| 1024 | + union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt }; |
---|
983 | 1025 | |
---|
| 1026 | + iwl_dbg_tlv_time_point(&mvm->fwrt, |
---|
| 1027 | + IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF, &tp_data); |
---|
984 | 1028 | iwl_mvm_rx_check_trigger(mvm, pkt); |
---|
985 | 1029 | |
---|
986 | 1030 | /* |
---|
.. | .. |
---|
1048 | 1092 | iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0); |
---|
1049 | 1093 | else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, |
---|
1050 | 1094 | RX_QUEUES_NOTIFICATION))) |
---|
1051 | | - iwl_mvm_rx_queue_notif(mvm, rxb, 0); |
---|
| 1095 | + iwl_mvm_rx_queue_notif(mvm, napi, rxb, 0); |
---|
1052 | 1096 | else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)) |
---|
1053 | 1097 | iwl_mvm_rx_frame_release(mvm, napi, rxb, 0); |
---|
| 1098 | + else if (cmd == WIDE_ID(LEGACY_GROUP, BAR_FRAME_RELEASE)) |
---|
| 1099 | + iwl_mvm_rx_bar_frame_release(mvm, napi, rxb, 0); |
---|
| 1100 | + else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF)) |
---|
| 1101 | + iwl_mvm_rx_monitor_no_data(mvm, napi, rxb, 0); |
---|
1054 | 1102 | else |
---|
1055 | 1103 | iwl_mvm_rx_common(mvm, rxb, pkt); |
---|
1056 | | -} |
---|
1057 | | - |
---|
1058 | | -void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq) |
---|
1059 | | -{ |
---|
1060 | | - int q; |
---|
1061 | | - |
---|
1062 | | - if (WARN_ON_ONCE(!mq)) |
---|
1063 | | - return; |
---|
1064 | | - |
---|
1065 | | - for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) { |
---|
1066 | | - if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) { |
---|
1067 | | - IWL_DEBUG_TX_QUEUES(mvm, |
---|
1068 | | - "mac80211 %d already stopped\n", q); |
---|
1069 | | - continue; |
---|
1070 | | - } |
---|
1071 | | - |
---|
1072 | | - ieee80211_stop_queue(mvm->hw, q); |
---|
1073 | | - } |
---|
1074 | 1104 | } |
---|
1075 | 1105 | |
---|
1076 | 1106 | static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode, |
---|
.. | .. |
---|
1085 | 1115 | iwl_trans_block_txq_ptrs(mvm->trans, false); |
---|
1086 | 1116 | } |
---|
1087 | 1117 | |
---|
1088 | | -static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) |
---|
| 1118 | +static int iwl_mvm_is_static_queue(struct iwl_mvm *mvm, int queue) |
---|
1089 | 1119 | { |
---|
1090 | | - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
---|
1091 | | - unsigned long mq; |
---|
1092 | | - |
---|
1093 | | - spin_lock_bh(&mvm->queue_info_lock); |
---|
1094 | | - mq = mvm->hw_queue_to_mac80211[hw_queue]; |
---|
1095 | | - spin_unlock_bh(&mvm->queue_info_lock); |
---|
1096 | | - |
---|
1097 | | - iwl_mvm_stop_mac_queues(mvm, mq); |
---|
| 1120 | + return queue == mvm->aux_queue || queue == mvm->probe_queue || |
---|
| 1121 | + queue == mvm->p2p_dev_queue || queue == mvm->snif_queue; |
---|
1098 | 1122 | } |
---|
1099 | 1123 | |
---|
1100 | | -void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq) |
---|
| 1124 | +static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode, |
---|
| 1125 | + int hw_queue, bool start) |
---|
1101 | 1126 | { |
---|
1102 | | - int q; |
---|
| 1127 | + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
---|
| 1128 | + struct ieee80211_sta *sta; |
---|
| 1129 | + struct ieee80211_txq *txq; |
---|
| 1130 | + struct iwl_mvm_txq *mvmtxq; |
---|
| 1131 | + int i; |
---|
| 1132 | + unsigned long tid_bitmap; |
---|
| 1133 | + struct iwl_mvm_sta *mvmsta; |
---|
| 1134 | + u8 sta_id; |
---|
1103 | 1135 | |
---|
1104 | | - if (WARN_ON_ONCE(!mq)) |
---|
| 1136 | + sta_id = iwl_mvm_has_new_tx_api(mvm) ? |
---|
| 1137 | + mvm->tvqm_info[hw_queue].sta_id : |
---|
| 1138 | + mvm->queue_info[hw_queue].ra_sta_id; |
---|
| 1139 | + |
---|
| 1140 | + if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) |
---|
1105 | 1141 | return; |
---|
1106 | 1142 | |
---|
1107 | | - for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) { |
---|
1108 | | - if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) { |
---|
1109 | | - IWL_DEBUG_TX_QUEUES(mvm, |
---|
1110 | | - "mac80211 %d still stopped\n", q); |
---|
1111 | | - continue; |
---|
1112 | | - } |
---|
| 1143 | + rcu_read_lock(); |
---|
1113 | 1144 | |
---|
1114 | | - ieee80211_wake_queue(mvm->hw, q); |
---|
| 1145 | + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); |
---|
| 1146 | + if (IS_ERR_OR_NULL(sta)) |
---|
| 1147 | + goto out; |
---|
| 1148 | + mvmsta = iwl_mvm_sta_from_mac80211(sta); |
---|
| 1149 | + |
---|
| 1150 | + if (iwl_mvm_is_static_queue(mvm, hw_queue)) { |
---|
| 1151 | + if (!start) |
---|
| 1152 | + ieee80211_stop_queues(mvm->hw); |
---|
| 1153 | + else if (mvmsta->sta_state != IEEE80211_STA_NOTEXIST) |
---|
| 1154 | + ieee80211_wake_queues(mvm->hw); |
---|
| 1155 | + |
---|
| 1156 | + goto out; |
---|
1115 | 1157 | } |
---|
| 1158 | + |
---|
| 1159 | + if (iwl_mvm_has_new_tx_api(mvm)) { |
---|
| 1160 | + int tid = mvm->tvqm_info[hw_queue].txq_tid; |
---|
| 1161 | + |
---|
| 1162 | + tid_bitmap = BIT(tid); |
---|
| 1163 | + } else { |
---|
| 1164 | + tid_bitmap = mvm->queue_info[hw_queue].tid_bitmap; |
---|
| 1165 | + } |
---|
| 1166 | + |
---|
| 1167 | + for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { |
---|
| 1168 | + int tid = i; |
---|
| 1169 | + |
---|
| 1170 | + if (tid == IWL_MAX_TID_COUNT) |
---|
| 1171 | + tid = IEEE80211_NUM_TIDS; |
---|
| 1172 | + |
---|
| 1173 | + txq = sta->txq[tid]; |
---|
| 1174 | + mvmtxq = iwl_mvm_txq_from_mac80211(txq); |
---|
| 1175 | + mvmtxq->stopped = !start; |
---|
| 1176 | + |
---|
| 1177 | + if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST) |
---|
| 1178 | + iwl_mvm_mac_itxq_xmit(mvm->hw, txq); |
---|
| 1179 | + } |
---|
| 1180 | + |
---|
| 1181 | +out: |
---|
| 1182 | + rcu_read_unlock(); |
---|
| 1183 | +} |
---|
| 1184 | + |
---|
| 1185 | +static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) |
---|
| 1186 | +{ |
---|
| 1187 | + iwl_mvm_queue_state_change(op_mode, hw_queue, false); |
---|
1116 | 1188 | } |
---|
1117 | 1189 | |
---|
1118 | 1190 | static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) |
---|
1119 | 1191 | { |
---|
1120 | | - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
---|
1121 | | - unsigned long mq; |
---|
1122 | | - |
---|
1123 | | - spin_lock_bh(&mvm->queue_info_lock); |
---|
1124 | | - mq = mvm->hw_queue_to_mac80211[hw_queue]; |
---|
1125 | | - spin_unlock_bh(&mvm->queue_info_lock); |
---|
1126 | | - |
---|
1127 | | - iwl_mvm_start_mac_queues(mvm, mq); |
---|
| 1192 | + iwl_mvm_queue_state_change(op_mode, hw_queue, true); |
---|
1128 | 1193 | } |
---|
1129 | 1194 | |
---|
1130 | 1195 | static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm) |
---|
.. | .. |
---|
1150 | 1215 | static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) |
---|
1151 | 1216 | { |
---|
1152 | 1217 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
---|
1153 | | - bool calibrating = READ_ONCE(mvm->calibrating); |
---|
| 1218 | + bool rfkill_safe_init_done = READ_ONCE(mvm->rfkill_safe_init_done); |
---|
| 1219 | + bool unified = iwl_mvm_has_unified_ucode(mvm); |
---|
1154 | 1220 | |
---|
1155 | 1221 | if (state) |
---|
1156 | 1222 | set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); |
---|
.. | .. |
---|
1159 | 1225 | |
---|
1160 | 1226 | iwl_mvm_set_rfkill_state(mvm); |
---|
1161 | 1227 | |
---|
1162 | | - /* iwl_run_init_mvm_ucode is waiting for results, abort it */ |
---|
1163 | | - if (calibrating) |
---|
| 1228 | + /* iwl_run_init_mvm_ucode is waiting for results, abort it. */ |
---|
| 1229 | + if (rfkill_safe_init_done) |
---|
1164 | 1230 | iwl_abort_notification_waits(&mvm->notif_wait); |
---|
| 1231 | + |
---|
| 1232 | + /* |
---|
| 1233 | + * Don't ask the transport to stop the firmware. We'll do it |
---|
| 1234 | + * after cfg80211 takes us down. |
---|
| 1235 | + */ |
---|
| 1236 | + if (unified) |
---|
| 1237 | + return false; |
---|
1165 | 1238 | |
---|
1166 | 1239 | /* |
---|
1167 | 1240 | * Stop the device if we run OPERATIONAL firmware or if we are in the |
---|
1168 | 1241 | * middle of the calibrations. |
---|
1169 | 1242 | */ |
---|
1170 | | - return state && (mvm->fwrt.cur_fw_img != IWL_UCODE_INIT || calibrating); |
---|
| 1243 | + return state && rfkill_safe_init_done; |
---|
1171 | 1244 | } |
---|
1172 | 1245 | |
---|
1173 | 1246 | static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) |
---|
.. | .. |
---|
1200 | 1273 | void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) |
---|
1201 | 1274 | { |
---|
1202 | 1275 | iwl_abort_notification_waits(&mvm->notif_wait); |
---|
| 1276 | + iwl_dbg_tlv_del_timers(mvm->trans); |
---|
1203 | 1277 | |
---|
1204 | 1278 | /* |
---|
1205 | 1279 | * This is a bit racy, but worst case we tell mac80211 about |
---|
.. | .. |
---|
1220 | 1294 | * can't recover this since we're already half suspended. |
---|
1221 | 1295 | */ |
---|
1222 | 1296 | if (!mvm->fw_restart && fw_error) { |
---|
1223 | | - iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, |
---|
1224 | | - NULL); |
---|
| 1297 | + iwl_fw_error_collect(&mvm->fwrt); |
---|
| 1298 | + } else if (test_bit(IWL_MVM_STATUS_STARTING, |
---|
| 1299 | + &mvm->status)) { |
---|
| 1300 | + IWL_ERR(mvm, "Starting mac, retry will be triggered anyway\n"); |
---|
1225 | 1301 | } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { |
---|
1226 | 1302 | struct iwl_mvm_reprobe *reprobe; |
---|
1227 | 1303 | |
---|
.. | .. |
---|
1246 | 1322 | reprobe->dev = get_device(mvm->trans->dev); |
---|
1247 | 1323 | INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); |
---|
1248 | 1324 | schedule_work(&reprobe->work); |
---|
| 1325 | + } else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, |
---|
| 1326 | + &mvm->status)) { |
---|
| 1327 | + IWL_ERR(mvm, "HW restart already requested, but not started\n"); |
---|
1249 | 1328 | } else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR && |
---|
1250 | | - mvm->hw_registered) { |
---|
1251 | | - /* don't let the transport/FW power down */ |
---|
1252 | | - iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); |
---|
| 1329 | + mvm->hw_registered && |
---|
| 1330 | + !test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) { |
---|
| 1331 | + if (mvm->fw->ucode_capa.error_log_size) { |
---|
| 1332 | + u32 src_size = mvm->fw->ucode_capa.error_log_size; |
---|
| 1333 | + u32 src_addr = mvm->fw->ucode_capa.error_log_addr; |
---|
| 1334 | + u8 *recover_buf = kzalloc(src_size, GFP_ATOMIC); |
---|
| 1335 | + |
---|
| 1336 | + if (recover_buf) { |
---|
| 1337 | + mvm->error_recovery_buf = recover_buf; |
---|
| 1338 | + iwl_trans_read_mem_bytes(mvm->trans, |
---|
| 1339 | + src_addr, |
---|
| 1340 | + recover_buf, |
---|
| 1341 | + src_size); |
---|
| 1342 | + } |
---|
| 1343 | + } |
---|
| 1344 | + |
---|
| 1345 | + iwl_fw_error_collect(&mvm->fwrt); |
---|
1253 | 1346 | |
---|
1254 | 1347 | if (fw_error && mvm->fw_restart > 0) |
---|
1255 | 1348 | mvm->fw_restart--; |
---|
.. | .. |
---|
1262 | 1355 | { |
---|
1263 | 1356 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
---|
1264 | 1357 | |
---|
1265 | | - iwl_mvm_dump_nic_error_log(mvm); |
---|
| 1358 | + if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) |
---|
| 1359 | + iwl_mvm_dump_nic_error_log(mvm); |
---|
1266 | 1360 | |
---|
1267 | 1361 | iwl_mvm_nic_restart(mvm, true); |
---|
1268 | 1362 | } |
---|
.. | .. |
---|
1275 | 1369 | iwl_mvm_nic_restart(mvm, true); |
---|
1276 | 1370 | } |
---|
1277 | 1371 | |
---|
1278 | | -struct iwl_d0i3_iter_data { |
---|
1279 | | - struct iwl_mvm *mvm; |
---|
1280 | | - struct ieee80211_vif *connected_vif; |
---|
1281 | | - u8 ap_sta_id; |
---|
1282 | | - u8 vif_count; |
---|
1283 | | - u8 offloading_tid; |
---|
1284 | | - bool disable_offloading; |
---|
1285 | | -}; |
---|
1286 | | - |
---|
1287 | | -static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm, |
---|
1288 | | - struct ieee80211_vif *vif, |
---|
1289 | | - struct iwl_d0i3_iter_data *iter_data) |
---|
1290 | | -{ |
---|
1291 | | - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
---|
1292 | | - struct iwl_mvm_sta *mvmsta; |
---|
1293 | | - u32 available_tids = 0; |
---|
1294 | | - u8 tid; |
---|
1295 | | - |
---|
1296 | | - if (WARN_ON(vif->type != NL80211_IFTYPE_STATION || |
---|
1297 | | - mvmvif->ap_sta_id == IWL_MVM_INVALID_STA)) |
---|
1298 | | - return false; |
---|
1299 | | - |
---|
1300 | | - mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); |
---|
1301 | | - if (!mvmsta) |
---|
1302 | | - return false; |
---|
1303 | | - |
---|
1304 | | - spin_lock_bh(&mvmsta->lock); |
---|
1305 | | - for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { |
---|
1306 | | - struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; |
---|
1307 | | - |
---|
1308 | | - /* |
---|
1309 | | - * in case of pending tx packets, don't use this tid |
---|
1310 | | - * for offloading in order to prevent reuse of the same |
---|
1311 | | - * qos seq counters. |
---|
1312 | | - */ |
---|
1313 | | - if (iwl_mvm_tid_queued(mvm, tid_data)) |
---|
1314 | | - continue; |
---|
1315 | | - |
---|
1316 | | - if (tid_data->state != IWL_AGG_OFF) |
---|
1317 | | - continue; |
---|
1318 | | - |
---|
1319 | | - available_tids |= BIT(tid); |
---|
1320 | | - } |
---|
1321 | | - spin_unlock_bh(&mvmsta->lock); |
---|
1322 | | - |
---|
1323 | | - /* |
---|
1324 | | - * disallow protocol offloading if we have no available tid |
---|
1325 | | - * (with no pending frames and no active aggregation, |
---|
1326 | | - * as we don't handle "holes" properly - the scheduler needs the |
---|
1327 | | - * frame's seq number and TFD index to match) |
---|
1328 | | - */ |
---|
1329 | | - if (!available_tids) |
---|
1330 | | - return true; |
---|
1331 | | - |
---|
1332 | | - /* for simplicity, just use the first available tid */ |
---|
1333 | | - iter_data->offloading_tid = ffs(available_tids) - 1; |
---|
1334 | | - return false; |
---|
1335 | | -} |
---|
1336 | | - |
---|
1337 | | -static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac, |
---|
1338 | | - struct ieee80211_vif *vif) |
---|
1339 | | -{ |
---|
1340 | | - struct iwl_d0i3_iter_data *data = _data; |
---|
1341 | | - struct iwl_mvm *mvm = data->mvm; |
---|
1342 | | - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
---|
1343 | | - u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE; |
---|
1344 | | - |
---|
1345 | | - IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr); |
---|
1346 | | - if (vif->type != NL80211_IFTYPE_STATION || |
---|
1347 | | - !vif->bss_conf.assoc) |
---|
1348 | | - return; |
---|
1349 | | - |
---|
1350 | | - /* |
---|
1351 | | - * in case of pending tx packets or active aggregations, |
---|
1352 | | - * avoid offloading features in order to prevent reuse of |
---|
1353 | | - * the same qos seq counters. |
---|
1354 | | - */ |
---|
1355 | | - if (iwl_mvm_disallow_offloading(mvm, vif, data)) |
---|
1356 | | - data->disable_offloading = true; |
---|
1357 | | - |
---|
1358 | | - iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags); |
---|
1359 | | - iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading, |
---|
1360 | | - false, flags); |
---|
1361 | | - |
---|
1362 | | - /* |
---|
1363 | | - * on init/association, mvm already configures POWER_TABLE_CMD |
---|
1364 | | - * and REPLY_MCAST_FILTER_CMD, so currently don't |
---|
1365 | | - * reconfigure them (we might want to use different |
---|
1366 | | - * params later on, though). |
---|
1367 | | - */ |
---|
1368 | | - data->ap_sta_id = mvmvif->ap_sta_id; |
---|
1369 | | - data->vif_count++; |
---|
1370 | | - |
---|
1371 | | - /* |
---|
1372 | | - * no new commands can be sent at this stage, so it's safe |
---|
1373 | | - * to save the vif pointer during d0i3 entrance. |
---|
1374 | | - */ |
---|
1375 | | - data->connected_vif = vif; |
---|
1376 | | -} |
---|
1377 | | - |
---|
1378 | | -static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm, |
---|
1379 | | - struct iwl_wowlan_config_cmd *cmd, |
---|
1380 | | - struct iwl_d0i3_iter_data *iter_data) |
---|
1381 | | -{ |
---|
1382 | | - struct ieee80211_sta *ap_sta; |
---|
1383 | | - struct iwl_mvm_sta *mvm_ap_sta; |
---|
1384 | | - |
---|
1385 | | - if (iter_data->ap_sta_id == IWL_MVM_INVALID_STA) |
---|
1386 | | - return; |
---|
1387 | | - |
---|
1388 | | - rcu_read_lock(); |
---|
1389 | | - |
---|
1390 | | - ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[iter_data->ap_sta_id]); |
---|
1391 | | - if (IS_ERR_OR_NULL(ap_sta)) |
---|
1392 | | - goto out; |
---|
1393 | | - |
---|
1394 | | - mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta); |
---|
1395 | | - cmd->is_11n_connection = ap_sta->ht_cap.ht_supported; |
---|
1396 | | - cmd->offloading_tid = iter_data->offloading_tid; |
---|
1397 | | - cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING | |
---|
1398 | | - ENABLE_DHCP_FILTERING | ENABLE_STORE_BEACON; |
---|
1399 | | - /* |
---|
1400 | | - * The d0i3 uCode takes care of the nonqos counters, |
---|
1401 | | - * so configure only the qos seq ones. |
---|
1402 | | - */ |
---|
1403 | | - iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, cmd); |
---|
1404 | | -out: |
---|
1405 | | - rcu_read_unlock(); |
---|
1406 | | -} |
---|
1407 | | - |
---|
1408 | | -int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) |
---|
1409 | | -{ |
---|
1410 | | - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
---|
1411 | | - u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE; |
---|
1412 | | - int ret; |
---|
1413 | | - struct iwl_d0i3_iter_data d0i3_iter_data = { |
---|
1414 | | - .mvm = mvm, |
---|
1415 | | - }; |
---|
1416 | | - struct iwl_wowlan_config_cmd wowlan_config_cmd = { |
---|
1417 | | - .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME | |
---|
1418 | | - IWL_WOWLAN_WAKEUP_BEACON_MISS | |
---|
1419 | | - IWL_WOWLAN_WAKEUP_LINK_CHANGE), |
---|
1420 | | - }; |
---|
1421 | | - struct iwl_d3_manager_config d3_cfg_cmd = { |
---|
1422 | | - .min_sleep_time = cpu_to_le32(1000), |
---|
1423 | | - .wakeup_flags = cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR), |
---|
1424 | | - }; |
---|
1425 | | - |
---|
1426 | | - IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n"); |
---|
1427 | | - |
---|
1428 | | - if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)) |
---|
1429 | | - return -EINVAL; |
---|
1430 | | - |
---|
1431 | | - set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); |
---|
1432 | | - |
---|
1433 | | - /* |
---|
1434 | | - * iwl_mvm_ref_sync takes a reference before checking the flag. |
---|
1435 | | - * so by checking there is no held reference we prevent a state |
---|
1436 | | - * in which iwl_mvm_ref_sync continues successfully while we |
---|
1437 | | - * configure the firmware to enter d0i3 |
---|
1438 | | - */ |
---|
1439 | | - if (iwl_mvm_ref_taken(mvm)) { |
---|
1440 | | - IWL_DEBUG_RPM(mvm->trans, "abort d0i3 due to taken ref\n"); |
---|
1441 | | - clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); |
---|
1442 | | - wake_up(&mvm->d0i3_exit_waitq); |
---|
1443 | | - return 1; |
---|
1444 | | - } |
---|
1445 | | - |
---|
1446 | | - ieee80211_iterate_active_interfaces_atomic(mvm->hw, |
---|
1447 | | - IEEE80211_IFACE_ITER_NORMAL, |
---|
1448 | | - iwl_mvm_enter_d0i3_iterator, |
---|
1449 | | - &d0i3_iter_data); |
---|
1450 | | - if (d0i3_iter_data.vif_count == 1) { |
---|
1451 | | - mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id; |
---|
1452 | | - mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading; |
---|
1453 | | - } else { |
---|
1454 | | - WARN_ON_ONCE(d0i3_iter_data.vif_count > 1); |
---|
1455 | | - mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; |
---|
1456 | | - mvm->d0i3_offloading = false; |
---|
1457 | | - } |
---|
1458 | | - |
---|
1459 | | - iwl_mvm_pause_tcm(mvm, true); |
---|
1460 | | - /* make sure we have no running tx while configuring the seqno */ |
---|
1461 | | - synchronize_net(); |
---|
1462 | | - |
---|
1463 | | - /* Flush the hw queues, in case something got queued during entry */ |
---|
1464 | | - /* TODO new tx api */ |
---|
1465 | | - if (iwl_mvm_has_new_tx_api(mvm)) { |
---|
1466 | | - WARN_ONCE(1, "d0i3: Need to implement flush TX queue\n"); |
---|
1467 | | - } else { |
---|
1468 | | - ret = iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm), |
---|
1469 | | - flags); |
---|
1470 | | - if (ret) |
---|
1471 | | - return ret; |
---|
1472 | | - } |
---|
1473 | | - |
---|
1474 | | - /* configure wowlan configuration only if needed */ |
---|
1475 | | - if (mvm->d0i3_ap_sta_id != IWL_MVM_INVALID_STA) { |
---|
1476 | | - /* wake on beacons only if beacon storing isn't supported */ |
---|
1477 | | - if (!fw_has_capa(&mvm->fw->ucode_capa, |
---|
1478 | | - IWL_UCODE_TLV_CAPA_BEACON_STORING)) |
---|
1479 | | - wowlan_config_cmd.wakeup_filter |= |
---|
1480 | | - cpu_to_le32(IWL_WOWLAN_WAKEUP_BCN_FILTERING); |
---|
1481 | | - |
---|
1482 | | - iwl_mvm_wowlan_config_key_params(mvm, |
---|
1483 | | - d0i3_iter_data.connected_vif, |
---|
1484 | | - true, flags); |
---|
1485 | | - |
---|
1486 | | - iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, |
---|
1487 | | - &d0i3_iter_data); |
---|
1488 | | - |
---|
1489 | | - ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags, |
---|
1490 | | - sizeof(wowlan_config_cmd), |
---|
1491 | | - &wowlan_config_cmd); |
---|
1492 | | - if (ret) |
---|
1493 | | - return ret; |
---|
1494 | | - } |
---|
1495 | | - |
---|
1496 | | - return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, |
---|
1497 | | - flags | CMD_MAKE_TRANS_IDLE, |
---|
1498 | | - sizeof(d3_cfg_cmd), &d3_cfg_cmd); |
---|
1499 | | -} |
---|
1500 | | - |
---|
1501 | | -static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac, |
---|
1502 | | - struct ieee80211_vif *vif) |
---|
1503 | | -{ |
---|
1504 | | - struct iwl_mvm *mvm = _data; |
---|
1505 | | - u32 flags = CMD_ASYNC | CMD_HIGH_PRIO; |
---|
1506 | | - |
---|
1507 | | - IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr); |
---|
1508 | | - if (vif->type != NL80211_IFTYPE_STATION || |
---|
1509 | | - !vif->bss_conf.assoc) |
---|
1510 | | - return; |
---|
1511 | | - |
---|
1512 | | - iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags); |
---|
1513 | | -} |
---|
1514 | | - |
---|
1515 | | -struct iwl_mvm_d0i3_exit_work_iter_data { |
---|
1516 | | - struct iwl_mvm *mvm; |
---|
1517 | | - struct iwl_wowlan_status *status; |
---|
1518 | | - u32 wakeup_reasons; |
---|
1519 | | -}; |
---|
1520 | | - |
---|
1521 | | -static void iwl_mvm_d0i3_exit_work_iter(void *_data, u8 *mac, |
---|
1522 | | - struct ieee80211_vif *vif) |
---|
1523 | | -{ |
---|
1524 | | - struct iwl_mvm_d0i3_exit_work_iter_data *data = _data; |
---|
1525 | | - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
---|
1526 | | - u32 reasons = data->wakeup_reasons; |
---|
1527 | | - |
---|
1528 | | - /* consider only the relevant station interface */ |
---|
1529 | | - if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc || |
---|
1530 | | - data->mvm->d0i3_ap_sta_id != mvmvif->ap_sta_id) |
---|
1531 | | - return; |
---|
1532 | | - |
---|
1533 | | - if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH) |
---|
1534 | | - iwl_mvm_connection_loss(data->mvm, vif, "D0i3"); |
---|
1535 | | - else if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON) |
---|
1536 | | - ieee80211_beacon_loss(vif); |
---|
1537 | | - else |
---|
1538 | | - iwl_mvm_d0i3_update_keys(data->mvm, vif, data->status); |
---|
1539 | | -} |
---|
1540 | | - |
---|
1541 | | -void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq) |
---|
1542 | | -{ |
---|
1543 | | - struct ieee80211_sta *sta = NULL; |
---|
1544 | | - struct iwl_mvm_sta *mvm_ap_sta; |
---|
1545 | | - int i; |
---|
1546 | | - bool wake_queues = false; |
---|
1547 | | - |
---|
1548 | | - lockdep_assert_held(&mvm->mutex); |
---|
1549 | | - |
---|
1550 | | - spin_lock_bh(&mvm->d0i3_tx_lock); |
---|
1551 | | - |
---|
1552 | | - if (mvm->d0i3_ap_sta_id == IWL_MVM_INVALID_STA) |
---|
1553 | | - goto out; |
---|
1554 | | - |
---|
1555 | | - IWL_DEBUG_RPM(mvm, "re-enqueue packets\n"); |
---|
1556 | | - |
---|
1557 | | - /* get the sta in order to update seq numbers and re-enqueue skbs */ |
---|
1558 | | - sta = rcu_dereference_protected( |
---|
1559 | | - mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id], |
---|
1560 | | - lockdep_is_held(&mvm->mutex)); |
---|
1561 | | - |
---|
1562 | | - if (IS_ERR_OR_NULL(sta)) { |
---|
1563 | | - sta = NULL; |
---|
1564 | | - goto out; |
---|
1565 | | - } |
---|
1566 | | - |
---|
1567 | | - if (mvm->d0i3_offloading && qos_seq) { |
---|
1568 | | - /* update qos seq numbers if offloading was enabled */ |
---|
1569 | | - mvm_ap_sta = iwl_mvm_sta_from_mac80211(sta); |
---|
1570 | | - for (i = 0; i < IWL_MAX_TID_COUNT; i++) { |
---|
1571 | | - u16 seq = le16_to_cpu(qos_seq[i]); |
---|
1572 | | - /* firmware stores last-used one, we store next one */ |
---|
1573 | | - seq += 0x10; |
---|
1574 | | - mvm_ap_sta->tid_data[i].seq_number = seq; |
---|
1575 | | - } |
---|
1576 | | - } |
---|
1577 | | -out: |
---|
1578 | | - /* re-enqueue (or drop) all packets */ |
---|
1579 | | - while (!skb_queue_empty(&mvm->d0i3_tx)) { |
---|
1580 | | - struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx); |
---|
1581 | | - |
---|
1582 | | - if (!sta || iwl_mvm_tx_skb(mvm, skb, sta)) |
---|
1583 | | - ieee80211_free_txskb(mvm->hw, skb); |
---|
1584 | | - |
---|
1585 | | - /* if the skb_queue is not empty, we need to wake queues */ |
---|
1586 | | - wake_queues = true; |
---|
1587 | | - } |
---|
1588 | | - clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); |
---|
1589 | | - wake_up(&mvm->d0i3_exit_waitq); |
---|
1590 | | - mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; |
---|
1591 | | - if (wake_queues) |
---|
1592 | | - ieee80211_wake_queues(mvm->hw); |
---|
1593 | | - |
---|
1594 | | - spin_unlock_bh(&mvm->d0i3_tx_lock); |
---|
1595 | | -} |
---|
1596 | | - |
---|
1597 | | -static void iwl_mvm_d0i3_exit_work(struct work_struct *wk) |
---|
1598 | | -{ |
---|
1599 | | - struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work); |
---|
1600 | | - struct iwl_host_cmd get_status_cmd = { |
---|
1601 | | - .id = WOWLAN_GET_STATUSES, |
---|
1602 | | - .flags = CMD_HIGH_PRIO | CMD_WANT_SKB, |
---|
1603 | | - }; |
---|
1604 | | - struct iwl_mvm_d0i3_exit_work_iter_data iter_data = { |
---|
1605 | | - .mvm = mvm, |
---|
1606 | | - }; |
---|
1607 | | - |
---|
1608 | | - struct iwl_wowlan_status *status; |
---|
1609 | | - int ret; |
---|
1610 | | - u32 wakeup_reasons = 0; |
---|
1611 | | - __le16 *qos_seq = NULL; |
---|
1612 | | - |
---|
1613 | | - mutex_lock(&mvm->mutex); |
---|
1614 | | - ret = iwl_mvm_send_cmd(mvm, &get_status_cmd); |
---|
1615 | | - if (ret) |
---|
1616 | | - goto out; |
---|
1617 | | - |
---|
1618 | | - status = (void *)get_status_cmd.resp_pkt->data; |
---|
1619 | | - wakeup_reasons = le32_to_cpu(status->wakeup_reasons); |
---|
1620 | | - qos_seq = status->qos_seq_ctr; |
---|
1621 | | - |
---|
1622 | | - IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons); |
---|
1623 | | - |
---|
1624 | | - iter_data.wakeup_reasons = wakeup_reasons; |
---|
1625 | | - iter_data.status = status; |
---|
1626 | | - ieee80211_iterate_active_interfaces(mvm->hw, |
---|
1627 | | - IEEE80211_IFACE_ITER_NORMAL, |
---|
1628 | | - iwl_mvm_d0i3_exit_work_iter, |
---|
1629 | | - &iter_data); |
---|
1630 | | -out: |
---|
1631 | | - iwl_mvm_d0i3_enable_tx(mvm, qos_seq); |
---|
1632 | | - |
---|
1633 | | - IWL_DEBUG_INFO(mvm, "d0i3 exit completed (wakeup reasons: 0x%x)\n", |
---|
1634 | | - wakeup_reasons); |
---|
1635 | | - |
---|
1636 | | - /* qos_seq might point inside resp_pkt, so free it only now */ |
---|
1637 | | - if (get_status_cmd.resp_pkt) |
---|
1638 | | - iwl_free_resp(&get_status_cmd); |
---|
1639 | | - |
---|
1640 | | - /* the FW might have updated the regdomain */ |
---|
1641 | | - iwl_mvm_update_changed_regdom(mvm); |
---|
1642 | | - |
---|
1643 | | - iwl_mvm_resume_tcm(mvm); |
---|
1644 | | - iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK); |
---|
1645 | | - mutex_unlock(&mvm->mutex); |
---|
1646 | | -} |
---|
1647 | | - |
---|
1648 | | -int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm) |
---|
1649 | | -{ |
---|
1650 | | - u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE | |
---|
1651 | | - CMD_WAKE_UP_TRANS; |
---|
1652 | | - int ret; |
---|
1653 | | - |
---|
1654 | | - IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n"); |
---|
1655 | | - |
---|
1656 | | - if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)) |
---|
1657 | | - return -EINVAL; |
---|
1658 | | - |
---|
1659 | | - mutex_lock(&mvm->d0i3_suspend_mutex); |
---|
1660 | | - if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) { |
---|
1661 | | - IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n"); |
---|
1662 | | - __set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags); |
---|
1663 | | - mutex_unlock(&mvm->d0i3_suspend_mutex); |
---|
1664 | | - return 0; |
---|
1665 | | - } |
---|
1666 | | - mutex_unlock(&mvm->d0i3_suspend_mutex); |
---|
1667 | | - |
---|
1668 | | - ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL); |
---|
1669 | | - if (ret) |
---|
1670 | | - goto out; |
---|
1671 | | - |
---|
1672 | | - ieee80211_iterate_active_interfaces_atomic(mvm->hw, |
---|
1673 | | - IEEE80211_IFACE_ITER_NORMAL, |
---|
1674 | | - iwl_mvm_exit_d0i3_iterator, |
---|
1675 | | - mvm); |
---|
1676 | | -out: |
---|
1677 | | - schedule_work(&mvm->d0i3_exit_work); |
---|
1678 | | - return ret; |
---|
1679 | | -} |
---|
1680 | | - |
---|
1681 | | -int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode) |
---|
1682 | | -{ |
---|
1683 | | - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
---|
1684 | | - |
---|
1685 | | - iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK); |
---|
1686 | | - return _iwl_mvm_exit_d0i3(mvm); |
---|
1687 | | -} |
---|
1688 | | - |
---|
1689 | 1372 | #define IWL_MVM_COMMON_OPS \ |
---|
1690 | 1373 | /* these could be differentiated */ \ |
---|
1691 | 1374 | .async_cb = iwl_mvm_async_cb, \ |
---|
.. | .. |
---|
1696 | 1379 | .nic_error = iwl_mvm_nic_error, \ |
---|
1697 | 1380 | .cmd_queue_full = iwl_mvm_cmd_queue_full, \ |
---|
1698 | 1381 | .nic_config = iwl_mvm_nic_config, \ |
---|
1699 | | - .enter_d0i3 = iwl_mvm_enter_d0i3, \ |
---|
1700 | | - .exit_d0i3 = iwl_mvm_exit_d0i3, \ |
---|
1701 | 1382 | /* as we only register one, these MUST be common! */ \ |
---|
1702 | 1383 | .start = iwl_op_mode_mvm_start, \ |
---|
1703 | 1384 | .stop = iwl_op_mode_mvm_stop |
---|
.. | .. |
---|
1720 | 1401 | iwl_mvm_rx_frame_release(mvm, napi, rxb, queue); |
---|
1721 | 1402 | else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, |
---|
1722 | 1403 | RX_QUEUES_NOTIFICATION))) |
---|
1723 | | - iwl_mvm_rx_queue_notif(mvm, rxb, queue); |
---|
| 1404 | + iwl_mvm_rx_queue_notif(mvm, napi, rxb, queue); |
---|
1724 | 1405 | else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) |
---|
1725 | 1406 | iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue); |
---|
1726 | 1407 | } |
---|