| .. | .. |
|---|
| 5 | 5 | * |
|---|
| 6 | 6 | * GPL LICENSE SUMMARY |
|---|
| 7 | 7 | * |
|---|
| 8 | | - * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. |
|---|
| 8 | + * Copyright(c) 2012 - 2015, 2018 - 2020 Intel Corporation. All rights reserved. |
|---|
| 9 | 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
|---|
| 10 | 10 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
|---|
| 11 | | - * Copyright(c) 2018 Intel Corporation |
|---|
| 12 | 11 | * |
|---|
| 13 | 12 | * This program is free software; you can redistribute it and/or modify |
|---|
| 14 | 13 | * it under the terms of version 2 of the GNU General Public License as |
|---|
| .. | .. |
|---|
| 19 | 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
|---|
| 20 | 19 | * General Public License for more details. |
|---|
| 21 | 20 | * |
|---|
| 22 | | - * You should have received a copy of the GNU General Public License |
|---|
| 23 | | - * along with this program; if not, write to the Free Software |
|---|
| 24 | | - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, |
|---|
| 25 | | - * USA |
|---|
| 26 | | - * |
|---|
| 27 | 21 | * The full GNU General Public License is included in this distribution |
|---|
| 28 | 22 | * in the file called COPYING. |
|---|
| 29 | 23 | * |
|---|
| .. | .. |
|---|
| 33 | 27 | * |
|---|
| 34 | 28 | * BSD LICENSE |
|---|
| 35 | 29 | * |
|---|
| 36 | | - * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. |
|---|
| 30 | + * Copyright(c) 2012 - 2015, 2018 - 2020 Intel Corporation. All rights reserved. |
|---|
| 37 | 31 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
|---|
| 38 | 32 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
|---|
| 39 | | - * Copyright(c) 2018 Intel Corporation |
|---|
| 40 | 33 | * All rights reserved. |
|---|
| 41 | 34 | * |
|---|
| 42 | 35 | * Redistribution and use in source and binary forms, with or without |
|---|
| .. | .. |
|---|
| 92 | 85 | int sta_id; |
|---|
| 93 | 86 | u32 reserved_ids = 0; |
|---|
| 94 | 87 | |
|---|
| 95 | | - BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32); |
|---|
| 88 | + BUILD_BUG_ON(IWL_MVM_STATION_COUNT_MAX > 32); |
|---|
| 96 | 89 | WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)); |
|---|
| 97 | 90 | |
|---|
| 98 | 91 | lockdep_assert_held(&mvm->mutex); |
|---|
| .. | .. |
|---|
| 102 | 95 | reserved_ids = BIT(0); |
|---|
| 103 | 96 | |
|---|
| 104 | 97 | /* Don't take rcu_read_lock() since we are protected by mvm->mutex */ |
|---|
| 105 | | - for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) { |
|---|
| 98 | + for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) { |
|---|
| 106 | 99 | if (BIT(sta_id) & reserved_ids) |
|---|
| 107 | 100 | continue; |
|---|
| 108 | 101 | |
|---|
| .. | .. |
|---|
| 203 | 196 | mpdu_dens = sta->ht_cap.ampdu_density; |
|---|
| 204 | 197 | } |
|---|
| 205 | 198 | |
|---|
| 199 | + |
|---|
| 206 | 200 | if (sta->vht_cap.vht_supported) { |
|---|
| 207 | 201 | agg_size = sta->vht_cap.cap & |
|---|
| 208 | 202 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; |
|---|
| .. | .. |
|---|
| 211 | 205 | } else if (sta->ht_cap.ht_supported) { |
|---|
| 212 | 206 | agg_size = sta->ht_cap.ampdu_factor; |
|---|
| 213 | 207 | } |
|---|
| 208 | + |
|---|
| 209 | + /* D6.0 10.12.2 A-MPDU length limit rules |
|---|
| 210 | + * A STA indicates the maximum length of the A-MPDU preEOF padding |
|---|
| 211 | + * that it can receive in an HE PPDU in the Maximum A-MPDU Length |
|---|
| 212 | + * Exponent field in its HT Capabilities, VHT Capabilities, |
|---|
| 213 | + * and HE 6 GHz Band Capabilities elements (if present) and the |
|---|
| 214 | + * Maximum AMPDU Length Exponent Extension field in its HE |
|---|
| 215 | + * Capabilities element |
|---|
| 216 | + */ |
|---|
| 217 | + if (sta->he_cap.has_he) |
|---|
| 218 | + agg_size += u8_get_bits(sta->he_cap.he_cap_elem.mac_cap_info[3], |
|---|
| 219 | + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK); |
|---|
| 220 | + |
|---|
| 221 | + /* Limit to max A-MPDU supported by FW */ |
|---|
| 222 | + if (agg_size > (STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT)) |
|---|
| 223 | + agg_size = (STA_FLG_MAX_AGG_SIZE_4M >> |
|---|
| 224 | + STA_FLG_MAX_AGG_SIZE_SHIFT); |
|---|
| 214 | 225 | |
|---|
| 215 | 226 | add_sta_cmd.station_flags |= |
|---|
| 216 | 227 | cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT); |
|---|
| .. | .. |
|---|
| 311 | 322 | struct iwl_mvm_sta *mvmsta; |
|---|
| 312 | 323 | u32 status; |
|---|
| 313 | 324 | u8 sta_id; |
|---|
| 314 | | - int ret; |
|---|
| 315 | 325 | |
|---|
| 316 | 326 | if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) |
|---|
| 317 | 327 | return -EINVAL; |
|---|
| 318 | 328 | |
|---|
| 319 | | - spin_lock_bh(&mvm->queue_info_lock); |
|---|
| 320 | 329 | sta_id = mvm->queue_info[queue].ra_sta_id; |
|---|
| 321 | | - spin_unlock_bh(&mvm->queue_info_lock); |
|---|
| 322 | 330 | |
|---|
| 323 | 331 | rcu_read_lock(); |
|---|
| 324 | 332 | |
|---|
| .. | .. |
|---|
| 348 | 356 | |
|---|
| 349 | 357 | /* Notify FW of queue removal from the STA queues */ |
|---|
| 350 | 358 | status = ADD_STA_SUCCESS; |
|---|
| 351 | | - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, |
|---|
| 352 | | - iwl_mvm_add_sta_cmd_size(mvm), |
|---|
| 353 | | - &cmd, &status); |
|---|
| 359 | + return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, |
|---|
| 360 | + iwl_mvm_add_sta_cmd_size(mvm), |
|---|
| 361 | + &cmd, &status); |
|---|
| 362 | +} |
|---|
| 354 | 363 | |
|---|
| 364 | +static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, |
|---|
| 365 | + u16 *queueptr, u8 tid, u8 flags) |
|---|
| 366 | +{ |
|---|
| 367 | + int queue = *queueptr; |
|---|
| 368 | + struct iwl_scd_txq_cfg_cmd cmd = { |
|---|
| 369 | + .scd_queue = queue, |
|---|
| 370 | + .action = SCD_CFG_DISABLE_QUEUE, |
|---|
| 371 | + }; |
|---|
| 372 | + int ret; |
|---|
| 373 | + |
|---|
| 374 | + if (iwl_mvm_has_new_tx_api(mvm)) { |
|---|
| 375 | + iwl_trans_txq_free(mvm->trans, queue); |
|---|
| 376 | + *queueptr = IWL_MVM_INVALID_QUEUE; |
|---|
| 377 | + |
|---|
| 378 | + return 0; |
|---|
| 379 | + } |
|---|
| 380 | + |
|---|
| 381 | + if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) |
|---|
| 382 | + return 0; |
|---|
| 383 | + |
|---|
| 384 | + mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); |
|---|
| 385 | + |
|---|
| 386 | + cmd.action = mvm->queue_info[queue].tid_bitmap ? |
|---|
| 387 | + SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE; |
|---|
| 388 | + if (cmd.action == SCD_CFG_DISABLE_QUEUE) |
|---|
| 389 | + mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; |
|---|
| 390 | + |
|---|
| 391 | + IWL_DEBUG_TX_QUEUES(mvm, |
|---|
| 392 | + "Disabling TXQ #%d tids=0x%x\n", |
|---|
| 393 | + queue, |
|---|
| 394 | + mvm->queue_info[queue].tid_bitmap); |
|---|
| 395 | + |
|---|
| 396 | + /* If the queue is still enabled - nothing left to do in this func */ |
|---|
| 397 | + if (cmd.action == SCD_CFG_ENABLE_QUEUE) |
|---|
| 398 | + return 0; |
|---|
| 399 | + |
|---|
| 400 | + cmd.sta_id = mvm->queue_info[queue].ra_sta_id; |
|---|
| 401 | + cmd.tid = mvm->queue_info[queue].txq_tid; |
|---|
| 402 | + |
|---|
| 403 | + /* Make sure queue info is correct even though we overwrite it */ |
|---|
| 404 | + WARN(mvm->queue_info[queue].tid_bitmap, |
|---|
| 405 | + "TXQ #%d info out-of-sync - tids=0x%x\n", |
|---|
| 406 | + queue, mvm->queue_info[queue].tid_bitmap); |
|---|
| 407 | + |
|---|
| 408 | + /* If we are here - the queue is freed and we can zero out these vals */ |
|---|
| 409 | + mvm->queue_info[queue].tid_bitmap = 0; |
|---|
| 410 | + |
|---|
| 411 | + if (sta) { |
|---|
| 412 | + struct iwl_mvm_txq *mvmtxq = |
|---|
| 413 | + iwl_mvm_txq_from_tid(sta, tid); |
|---|
| 414 | + |
|---|
| 415 | + mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; |
|---|
| 416 | + } |
|---|
| 417 | + |
|---|
| 418 | + /* Regardless if this is a reserved TXQ for a STA - mark it as false */ |
|---|
| 419 | + mvm->queue_info[queue].reserved = false; |
|---|
| 420 | + |
|---|
| 421 | + iwl_trans_txq_disable(mvm->trans, queue, false); |
|---|
| 422 | + ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, |
|---|
| 423 | + sizeof(struct iwl_scd_txq_cfg_cmd), &cmd); |
|---|
| 424 | + |
|---|
| 425 | + if (ret) |
|---|
| 426 | + IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", |
|---|
| 427 | + queue, ret); |
|---|
| 355 | 428 | return ret; |
|---|
| 356 | 429 | } |
|---|
| 357 | 430 | |
|---|
| .. | .. |
|---|
| 369 | 442 | if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) |
|---|
| 370 | 443 | return -EINVAL; |
|---|
| 371 | 444 | |
|---|
| 372 | | - spin_lock_bh(&mvm->queue_info_lock); |
|---|
| 373 | 445 | sta_id = mvm->queue_info[queue].ra_sta_id; |
|---|
| 374 | 446 | tid_bitmap = mvm->queue_info[queue].tid_bitmap; |
|---|
| 375 | | - spin_unlock_bh(&mvm->queue_info_lock); |
|---|
| 376 | 447 | |
|---|
| 377 | 448 | sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], |
|---|
| 378 | 449 | lockdep_is_held(&mvm->mutex)); |
|---|
| .. | .. |
|---|
| 411 | 482 | if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) |
|---|
| 412 | 483 | return -EINVAL; |
|---|
| 413 | 484 | |
|---|
| 414 | | - spin_lock_bh(&mvm->queue_info_lock); |
|---|
| 415 | 485 | sta_id = mvm->queue_info[queue].ra_sta_id; |
|---|
| 416 | 486 | tid_bitmap = mvm->queue_info[queue].tid_bitmap; |
|---|
| 417 | | - spin_unlock_bh(&mvm->queue_info_lock); |
|---|
| 418 | 487 | |
|---|
| 419 | 488 | rcu_read_lock(); |
|---|
| 420 | 489 | |
|---|
| .. | .. |
|---|
| 430 | 499 | spin_lock_bh(&mvmsta->lock); |
|---|
| 431 | 500 | /* Unmap MAC queues and TIDs from this queue */ |
|---|
| 432 | 501 | for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { |
|---|
| 502 | + struct iwl_mvm_txq *mvmtxq = |
|---|
| 503 | + iwl_mvm_txq_from_tid(sta, tid); |
|---|
| 504 | + |
|---|
| 433 | 505 | if (mvmsta->tid_data[tid].state == IWL_AGG_ON) |
|---|
| 434 | 506 | disable_agg_tids |= BIT(tid); |
|---|
| 435 | 507 | mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; |
|---|
| 508 | + |
|---|
| 509 | + mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; |
|---|
| 436 | 510 | } |
|---|
| 437 | 511 | |
|---|
| 438 | 512 | mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ |
|---|
| .. | .. |
|---|
| 454 | 528 | } |
|---|
| 455 | 529 | |
|---|
| 456 | 530 | static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, |
|---|
| 457 | | - bool same_sta) |
|---|
| 531 | + struct ieee80211_sta *old_sta, |
|---|
| 532 | + u8 new_sta_id) |
|---|
| 458 | 533 | { |
|---|
| 459 | 534 | struct iwl_mvm_sta *mvmsta; |
|---|
| 460 | | - u8 txq_curr_ac, sta_id, tid; |
|---|
| 535 | + u8 sta_id, tid; |
|---|
| 461 | 536 | unsigned long disable_agg_tids = 0; |
|---|
| 537 | + bool same_sta; |
|---|
| 538 | + u16 queue_tmp = queue; |
|---|
| 462 | 539 | int ret; |
|---|
| 463 | 540 | |
|---|
| 464 | 541 | lockdep_assert_held(&mvm->mutex); |
|---|
| .. | .. |
|---|
| 466 | 543 | if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) |
|---|
| 467 | 544 | return -EINVAL; |
|---|
| 468 | 545 | |
|---|
| 469 | | - spin_lock_bh(&mvm->queue_info_lock); |
|---|
| 470 | | - txq_curr_ac = mvm->queue_info[queue].mac80211_ac; |
|---|
| 471 | 546 | sta_id = mvm->queue_info[queue].ra_sta_id; |
|---|
| 472 | 547 | tid = mvm->queue_info[queue].txq_tid; |
|---|
| 473 | | - spin_unlock_bh(&mvm->queue_info_lock); |
|---|
| 548 | + |
|---|
| 549 | + same_sta = sta_id == new_sta_id; |
|---|
| 474 | 550 | |
|---|
| 475 | 551 | mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); |
|---|
| 476 | 552 | if (WARN_ON(!mvmsta)) |
|---|
| .. | .. |
|---|
| 482 | 558 | iwl_mvm_invalidate_sta_queue(mvm, queue, |
|---|
| 483 | 559 | disable_agg_tids, false); |
|---|
| 484 | 560 | |
|---|
| 485 | | - ret = iwl_mvm_disable_txq(mvm, queue, |
|---|
| 486 | | - mvmsta->vif->hw_queue[txq_curr_ac], |
|---|
| 487 | | - tid, 0); |
|---|
| 561 | + ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid, 0); |
|---|
| 488 | 562 | if (ret) { |
|---|
| 489 | | - /* Re-mark the inactive queue as inactive */ |
|---|
| 490 | | - spin_lock_bh(&mvm->queue_info_lock); |
|---|
| 491 | | - mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE; |
|---|
| 492 | | - spin_unlock_bh(&mvm->queue_info_lock); |
|---|
| 493 | 563 | IWL_ERR(mvm, |
|---|
| 494 | 564 | "Failed to free inactive queue %d (ret=%d)\n", |
|---|
| 495 | 565 | queue, ret); |
|---|
| .. | .. |
|---|
| 511 | 581 | u8 ac_to_queue[IEEE80211_NUM_ACS]; |
|---|
| 512 | 582 | int i; |
|---|
| 513 | 583 | |
|---|
| 514 | | - lockdep_assert_held(&mvm->queue_info_lock); |
|---|
| 584 | + /* |
|---|
| 585 | + * This protects us against grabbing a queue that's being reconfigured |
|---|
| 586 | + * by the inactivity checker. |
|---|
| 587 | + */ |
|---|
| 588 | + lockdep_assert_held(&mvm->mutex); |
|---|
| 589 | + |
|---|
| 515 | 590 | if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) |
|---|
| 516 | 591 | return -EINVAL; |
|---|
| 517 | 592 | |
|---|
| .. | .. |
|---|
| 522 | 597 | /* Only DATA queues can be shared */ |
|---|
| 523 | 598 | if (i < IWL_MVM_DQA_MIN_DATA_QUEUE && |
|---|
| 524 | 599 | i != IWL_MVM_DQA_BSS_CLIENT_QUEUE) |
|---|
| 525 | | - continue; |
|---|
| 526 | | - |
|---|
| 527 | | - /* Don't try and take queues being reconfigured */ |
|---|
| 528 | | - if (mvm->queue_info[queue].status == |
|---|
| 529 | | - IWL_MVM_QUEUE_RECONFIGURING) |
|---|
| 530 | 600 | continue; |
|---|
| 531 | 601 | |
|---|
| 532 | 602 | ac_to_queue[mvm->queue_info[i].mac80211_ac] = i; |
|---|
| .. | .. |
|---|
| 569 | 639 | return -ENOSPC; |
|---|
| 570 | 640 | } |
|---|
| 571 | 641 | |
|---|
| 572 | | - /* Make sure the queue isn't in the middle of being reconfigured */ |
|---|
| 573 | | - if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) { |
|---|
| 574 | | - IWL_ERR(mvm, |
|---|
| 575 | | - "TXQ %d is in the middle of re-config - try again\n", |
|---|
| 576 | | - queue); |
|---|
| 577 | | - return -EBUSY; |
|---|
| 578 | | - } |
|---|
| 579 | | - |
|---|
| 580 | 642 | return queue; |
|---|
| 581 | 643 | } |
|---|
| 582 | 644 | |
|---|
| .. | .. |
|---|
| 586 | 648 | * in such a case, otherwise - if no redirection required - it does nothing, |
|---|
| 587 | 649 | * unless the %force param is true. |
|---|
| 588 | 650 | */ |
|---|
| 589 | | -int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, |
|---|
| 590 | | - int ac, int ssn, unsigned int wdg_timeout, |
|---|
| 591 | | - bool force) |
|---|
| 651 | +static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid, |
|---|
| 652 | + int ac, int ssn, unsigned int wdg_timeout, |
|---|
| 653 | + bool force, struct iwl_mvm_txq *txq) |
|---|
| 592 | 654 | { |
|---|
| 593 | 655 | struct iwl_scd_txq_cfg_cmd cmd = { |
|---|
| 594 | 656 | .scd_queue = queue, |
|---|
| 595 | 657 | .action = SCD_CFG_DISABLE_QUEUE, |
|---|
| 596 | 658 | }; |
|---|
| 597 | 659 | bool shared_queue; |
|---|
| 598 | | - unsigned long mq; |
|---|
| 599 | 660 | int ret; |
|---|
| 600 | 661 | |
|---|
| 601 | 662 | if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) |
|---|
| .. | .. |
|---|
| 609 | 670 | * value 3 and VO with value 0, so to check if ac X is lower than ac Y |
|---|
| 610 | 671 | * we need to check if the numerical value of X is LARGER than of Y. |
|---|
| 611 | 672 | */ |
|---|
| 612 | | - spin_lock_bh(&mvm->queue_info_lock); |
|---|
| 613 | 673 | if (ac <= mvm->queue_info[queue].mac80211_ac && !force) { |
|---|
| 614 | | - spin_unlock_bh(&mvm->queue_info_lock); |
|---|
| 615 | | - |
|---|
| 616 | 674 | IWL_DEBUG_TX_QUEUES(mvm, |
|---|
| 617 | 675 | "No redirection needed on TXQ #%d\n", |
|---|
| 618 | 676 | queue); |
|---|
| .. | .. |
|---|
| 622 | 680 | cmd.sta_id = mvm->queue_info[queue].ra_sta_id; |
|---|
| 623 | 681 | cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; |
|---|
| 624 | 682 | cmd.tid = mvm->queue_info[queue].txq_tid; |
|---|
| 625 | | - mq = mvm->hw_queue_to_mac80211[queue]; |
|---|
| 626 | | - shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1); |
|---|
| 627 | | - spin_unlock_bh(&mvm->queue_info_lock); |
|---|
| 683 | + shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1; |
|---|
| 628 | 684 | |
|---|
| 629 | 685 | IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", |
|---|
| 630 | 686 | queue, iwl_mvm_ac_to_tx_fifo[ac]); |
|---|
| 631 | 687 | |
|---|
| 632 | | - /* Stop MAC queues and wait for this queue to empty */ |
|---|
| 633 | | - iwl_mvm_stop_mac_queues(mvm, mq); |
|---|
| 688 | + /* Stop the queue and wait for it to empty */ |
|---|
| 689 | + txq->stopped = true; |
|---|
| 690 | + |
|---|
| 634 | 691 | ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue)); |
|---|
| 635 | 692 | if (ret) { |
|---|
| 636 | 693 | IWL_ERR(mvm, "Error draining queue %d before reconfig\n", |
|---|
| .. | .. |
|---|
| 650 | 707 | iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); |
|---|
| 651 | 708 | |
|---|
| 652 | 709 | /* Update the TID "owner" of the queue */ |
|---|
| 653 | | - spin_lock_bh(&mvm->queue_info_lock); |
|---|
| 654 | 710 | mvm->queue_info[queue].txq_tid = tid; |
|---|
| 655 | | - spin_unlock_bh(&mvm->queue_info_lock); |
|---|
| 656 | 711 | |
|---|
| 657 | 712 | /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */ |
|---|
| 658 | 713 | |
|---|
| .. | .. |
|---|
| 661 | 716 | cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn); |
|---|
| 662 | 717 | |
|---|
| 663 | 718 | /* Update AC marking of the queue */ |
|---|
| 664 | | - spin_lock_bh(&mvm->queue_info_lock); |
|---|
| 665 | 719 | mvm->queue_info[queue].mac80211_ac = ac; |
|---|
| 666 | | - spin_unlock_bh(&mvm->queue_info_lock); |
|---|
| 667 | 720 | |
|---|
| 668 | 721 | /* |
|---|
| 669 | 722 | * Mark queue as shared in transport if shared |
|---|
| .. | .. |
|---|
| 675 | 728 | iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); |
|---|
| 676 | 729 | |
|---|
| 677 | 730 | out: |
|---|
| 678 | | - /* Continue using the MAC queues */ |
|---|
| 679 | | - iwl_mvm_start_mac_queues(mvm, mq); |
|---|
| 731 | + /* Continue using the queue */ |
|---|
| 732 | + txq->stopped = false; |
|---|
| 680 | 733 | |
|---|
| 681 | 734 | return ret; |
|---|
| 735 | +} |
|---|
| 736 | + |
|---|
| 737 | +static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, |
|---|
| 738 | + u8 minq, u8 maxq) |
|---|
| 739 | +{ |
|---|
| 740 | + int i; |
|---|
| 741 | + |
|---|
| 742 | + lockdep_assert_held(&mvm->mutex); |
|---|
| 743 | + |
|---|
| 744 | + if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues, |
|---|
| 745 | + "max queue %d >= num_of_queues (%d)", maxq, |
|---|
| 746 | + mvm->trans->trans_cfg->base_params->num_of_queues)) |
|---|
| 747 | + maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1; |
|---|
| 748 | + |
|---|
| 749 | + /* This should not be hit with new TX path */ |
|---|
| 750 | + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) |
|---|
| 751 | + return -ENOSPC; |
|---|
| 752 | + |
|---|
| 753 | + /* Start by looking for a free queue */ |
|---|
| 754 | + for (i = minq; i <= maxq; i++) |
|---|
| 755 | + if (mvm->queue_info[i].tid_bitmap == 0 && |
|---|
| 756 | + mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) |
|---|
| 757 | + return i; |
|---|
| 758 | + |
|---|
| 759 | + return -ENOSPC; |
|---|
| 760 | +} |
|---|
| 761 | + |
|---|
| 762 | +static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, |
|---|
| 763 | + u8 sta_id, u8 tid, unsigned int timeout) |
|---|
| 764 | +{ |
|---|
| 765 | + int queue, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, |
|---|
| 766 | + mvm->trans->cfg->min_256_ba_txq_size); |
|---|
| 767 | + |
|---|
| 768 | + if (tid == IWL_MAX_TID_COUNT) { |
|---|
| 769 | + tid = IWL_MGMT_TID; |
|---|
| 770 | + size = max_t(u32, IWL_MGMT_QUEUE_SIZE, |
|---|
| 771 | + mvm->trans->cfg->min_txq_size); |
|---|
| 772 | + } |
|---|
| 773 | + |
|---|
| 774 | + do { |
|---|
| 775 | + __le16 enable = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE); |
|---|
| 776 | + |
|---|
| 777 | + queue = iwl_trans_txq_alloc(mvm->trans, enable, |
|---|
| 778 | + sta_id, tid, SCD_QUEUE_CFG, |
|---|
| 779 | + size, timeout); |
|---|
| 780 | + |
|---|
| 781 | + if (queue < 0) |
|---|
| 782 | + IWL_DEBUG_TX_QUEUES(mvm, |
|---|
| 783 | + "Failed allocating TXQ of size %d for sta %d tid %d, ret: %d\n", |
|---|
| 784 | + size, sta_id, tid, queue); |
|---|
| 785 | + size /= 2; |
|---|
| 786 | + } while (queue < 0 && size >= 16); |
|---|
| 787 | + |
|---|
| 788 | + if (queue < 0) |
|---|
| 789 | + return queue; |
|---|
| 790 | + |
|---|
| 791 | + IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", |
|---|
| 792 | + queue, sta_id, tid); |
|---|
| 793 | + |
|---|
| 794 | + return queue; |
|---|
| 682 | 795 | } |
|---|
| 683 | 796 | |
|---|
| 684 | 797 | static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, |
|---|
| .. | .. |
|---|
| 686 | 799 | int tid) |
|---|
| 687 | 800 | { |
|---|
| 688 | 801 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
|---|
| 802 | + struct iwl_mvm_txq *mvmtxq = |
|---|
| 803 | + iwl_mvm_txq_from_tid(sta, tid); |
|---|
| 689 | 804 | unsigned int wdg_timeout = |
|---|
| 690 | 805 | iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); |
|---|
| 691 | | - u8 mac_queue = mvmsta->vif->hw_queue[ac]; |
|---|
| 692 | 806 | int queue = -1; |
|---|
| 693 | 807 | |
|---|
| 694 | 808 | lockdep_assert_held(&mvm->mutex); |
|---|
| .. | .. |
|---|
| 696 | 810 | IWL_DEBUG_TX_QUEUES(mvm, |
|---|
| 697 | 811 | "Allocating queue for sta %d on tid %d\n", |
|---|
| 698 | 812 | mvmsta->sta_id, tid); |
|---|
| 699 | | - queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid, |
|---|
| 700 | | - wdg_timeout); |
|---|
| 813 | + queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout); |
|---|
| 701 | 814 | if (queue < 0) |
|---|
| 702 | 815 | return queue; |
|---|
| 816 | + |
|---|
| 817 | + mvmtxq->txq_id = queue; |
|---|
| 818 | + mvm->tvqm_info[queue].txq_tid = tid; |
|---|
| 819 | + mvm->tvqm_info[queue].sta_id = mvmsta->sta_id; |
|---|
| 703 | 820 | |
|---|
| 704 | 821 | IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue); |
|---|
| 705 | 822 | |
|---|
| 706 | 823 | spin_lock_bh(&mvmsta->lock); |
|---|
| 707 | 824 | mvmsta->tid_data[tid].txq_id = queue; |
|---|
| 708 | | - mvmsta->tid_data[tid].is_tid_active = true; |
|---|
| 709 | 825 | spin_unlock_bh(&mvmsta->lock); |
|---|
| 710 | 826 | |
|---|
| 711 | 827 | return 0; |
|---|
| 712 | 828 | } |
|---|
| 713 | 829 | |
|---|
| 714 | | -static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, |
|---|
| 715 | | - struct ieee80211_sta *sta, u8 ac, int tid, |
|---|
| 716 | | - struct ieee80211_hdr *hdr) |
|---|
| 830 | +static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, |
|---|
| 831 | + struct ieee80211_sta *sta, |
|---|
| 832 | + int queue, u8 sta_id, u8 tid) |
|---|
| 717 | 833 | { |
|---|
| 718 | | - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
|---|
| 719 | | - struct iwl_trans_txq_scd_cfg cfg = { |
|---|
| 720 | | - .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac), |
|---|
| 721 | | - .sta_id = mvmsta->sta_id, |
|---|
| 722 | | - .tid = tid, |
|---|
| 723 | | - .frame_limit = IWL_FRAME_LIMIT, |
|---|
| 724 | | - }; |
|---|
| 725 | | - unsigned int wdg_timeout = |
|---|
| 726 | | - iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); |
|---|
| 727 | | - u8 mac_queue = mvmsta->vif->hw_queue[ac]; |
|---|
| 728 | | - int queue = -1; |
|---|
| 729 | | - bool using_inactive_queue = false, same_sta = false; |
|---|
| 730 | | - unsigned long disable_agg_tids = 0; |
|---|
| 731 | | - enum iwl_mvm_agg_state queue_state; |
|---|
| 732 | | - bool shared_queue = false, inc_ssn; |
|---|
| 733 | | - int ssn; |
|---|
| 734 | | - unsigned long tfd_queue_mask; |
|---|
| 735 | | - int ret; |
|---|
| 834 | + bool enable_queue = true; |
|---|
| 736 | 835 | |
|---|
| 737 | | - lockdep_assert_held(&mvm->mutex); |
|---|
| 738 | | - |
|---|
| 739 | | - if (iwl_mvm_has_new_tx_api(mvm)) |
|---|
| 740 | | - return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); |
|---|
| 741 | | - |
|---|
| 742 | | - spin_lock_bh(&mvmsta->lock); |
|---|
| 743 | | - tfd_queue_mask = mvmsta->tfd_queue_msk; |
|---|
| 744 | | - spin_unlock_bh(&mvmsta->lock); |
|---|
| 745 | | - |
|---|
| 746 | | - spin_lock_bh(&mvm->queue_info_lock); |
|---|
| 747 | | - |
|---|
| 748 | | - /* |
|---|
| 749 | | - * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one |
|---|
| 750 | | - * exists |
|---|
| 751 | | - */ |
|---|
| 752 | | - if (!ieee80211_is_data_qos(hdr->frame_control) || |
|---|
| 753 | | - ieee80211_is_qos_nullfunc(hdr->frame_control)) { |
|---|
| 754 | | - queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, |
|---|
| 755 | | - IWL_MVM_DQA_MIN_MGMT_QUEUE, |
|---|
| 756 | | - IWL_MVM_DQA_MAX_MGMT_QUEUE); |
|---|
| 757 | | - if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) |
|---|
| 758 | | - IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", |
|---|
| 759 | | - queue); |
|---|
| 760 | | - |
|---|
| 761 | | - /* If no such queue is found, we'll use a DATA queue instead */ |
|---|
| 836 | + /* Make sure this TID isn't already enabled */ |
|---|
| 837 | + if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) { |
|---|
| 838 | + IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", |
|---|
| 839 | + queue, tid); |
|---|
| 840 | + return false; |
|---|
| 762 | 841 | } |
|---|
| 763 | 842 | |
|---|
| 764 | | - if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) && |
|---|
| 765 | | - (mvm->queue_info[mvmsta->reserved_queue].status == |
|---|
| 766 | | - IWL_MVM_QUEUE_RESERVED || |
|---|
| 767 | | - mvm->queue_info[mvmsta->reserved_queue].status == |
|---|
| 768 | | - IWL_MVM_QUEUE_INACTIVE)) { |
|---|
| 769 | | - queue = mvmsta->reserved_queue; |
|---|
| 770 | | - mvm->queue_info[queue].reserved = true; |
|---|
| 771 | | - IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); |
|---|
| 843 | + /* Update mappings and refcounts */ |
|---|
| 844 | + if (mvm->queue_info[queue].tid_bitmap) |
|---|
| 845 | + enable_queue = false; |
|---|
| 846 | + |
|---|
| 847 | + mvm->queue_info[queue].tid_bitmap |= BIT(tid); |
|---|
| 848 | + mvm->queue_info[queue].ra_sta_id = sta_id; |
|---|
| 849 | + |
|---|
| 850 | + if (enable_queue) { |
|---|
| 851 | + if (tid != IWL_MAX_TID_COUNT) |
|---|
| 852 | + mvm->queue_info[queue].mac80211_ac = |
|---|
| 853 | + tid_to_mac80211_ac[tid]; |
|---|
| 854 | + else |
|---|
| 855 | + mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO; |
|---|
| 856 | + |
|---|
| 857 | + mvm->queue_info[queue].txq_tid = tid; |
|---|
| 772 | 858 | } |
|---|
| 773 | 859 | |
|---|
| 774 | | - if (queue < 0) |
|---|
| 775 | | - queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, |
|---|
| 776 | | - IWL_MVM_DQA_MIN_DATA_QUEUE, |
|---|
| 777 | | - IWL_MVM_DQA_MAX_DATA_QUEUE); |
|---|
| 860 | + if (sta) { |
|---|
| 861 | + struct iwl_mvm_txq *mvmtxq = |
|---|
| 862 | + iwl_mvm_txq_from_tid(sta, tid); |
|---|
| 778 | 863 | |
|---|
| 779 | | - /* |
|---|
| 780 | | - * Check if this queue is already allocated but inactive. |
|---|
| 781 | | - * In such a case, we'll need to first free this queue before enabling |
|---|
| 782 | | - * it again, so we'll mark it as reserved to make sure no new traffic |
|---|
| 783 | | - * arrives on it |
|---|
| 784 | | - */ |
|---|
| 785 | | - if (queue > 0 && |
|---|
| 786 | | - mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) { |
|---|
| 787 | | - mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; |
|---|
| 788 | | - using_inactive_queue = true; |
|---|
| 789 | | - same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id; |
|---|
| 790 | | - IWL_DEBUG_TX_QUEUES(mvm, |
|---|
| 791 | | - "Re-assigning TXQ %d: sta_id=%d, tid=%d\n", |
|---|
| 792 | | - queue, mvmsta->sta_id, tid); |
|---|
| 793 | | - } |
|---|
| 794 | | - |
|---|
| 795 | | - /* No free queue - we'll have to share */ |
|---|
| 796 | | - if (queue <= 0) { |
|---|
| 797 | | - queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac); |
|---|
| 798 | | - if (queue > 0) { |
|---|
| 799 | | - shared_queue = true; |
|---|
| 800 | | - mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED; |
|---|
| 801 | | - } |
|---|
| 802 | | - } |
|---|
| 803 | | - |
|---|
| 804 | | - /* |
|---|
| 805 | | - * Mark TXQ as ready, even though it hasn't been fully configured yet, |
|---|
| 806 | | - * to make sure no one else takes it. |
|---|
| 807 | | - * This will allow avoiding re-acquiring the lock at the end of the |
|---|
| 808 | | - * configuration. On error we'll mark it back as free. |
|---|
| 809 | | - */ |
|---|
| 810 | | - if ((queue > 0) && !shared_queue) |
|---|
| 811 | | - mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; |
|---|
| 812 | | - |
|---|
| 813 | | - spin_unlock_bh(&mvm->queue_info_lock); |
|---|
| 814 | | - |
|---|
| 815 | | - /* This shouldn't happen - out of queues */ |
|---|
| 816 | | - if (WARN_ON(queue <= 0)) { |
|---|
| 817 | | - IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", |
|---|
| 818 | | - tid, cfg.sta_id); |
|---|
| 819 | | - return queue; |
|---|
| 820 | | - } |
|---|
| 821 | | - |
|---|
| 822 | | - /* |
|---|
| 823 | | - * Actual en/disablement of aggregations is through the ADD_STA HCMD, |
|---|
| 824 | | - * but for configuring the SCD to send A-MPDUs we need to mark the queue |
|---|
| 825 | | - * as aggregatable. |
|---|
| 826 | | - * Mark all DATA queues as allowing to be aggregated at some point |
|---|
| 827 | | - */ |
|---|
| 828 | | - cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || |
|---|
| 829 | | - queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); |
|---|
| 830 | | - |
|---|
| 831 | | - /* |
|---|
| 832 | | - * If this queue was previously inactive (idle) - we need to free it |
|---|
| 833 | | - * first |
|---|
| 834 | | - */ |
|---|
| 835 | | - if (using_inactive_queue) { |
|---|
| 836 | | - ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta); |
|---|
| 837 | | - if (ret) |
|---|
| 838 | | - return ret; |
|---|
| 864 | + mvmtxq->txq_id = queue; |
|---|
| 839 | 865 | } |
|---|
| 840 | 866 | |
|---|
| 841 | 867 | IWL_DEBUG_TX_QUEUES(mvm, |
|---|
| 842 | | - "Allocating %squeue #%d to sta %d on tid %d\n", |
|---|
| 843 | | - shared_queue ? "shared " : "", queue, |
|---|
| 844 | | - mvmsta->sta_id, tid); |
|---|
| 868 | + "Enabling TXQ #%d tids=0x%x\n", |
|---|
| 869 | + queue, mvm->queue_info[queue].tid_bitmap); |
|---|
| 845 | 870 | |
|---|
| 846 | | - if (shared_queue) { |
|---|
| 847 | | - /* Disable any open aggs on this queue */ |
|---|
| 848 | | - disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue); |
|---|
| 849 | | - |
|---|
| 850 | | - if (disable_agg_tids) { |
|---|
| 851 | | - IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n", |
|---|
| 852 | | - queue); |
|---|
| 853 | | - iwl_mvm_invalidate_sta_queue(mvm, queue, |
|---|
| 854 | | - disable_agg_tids, false); |
|---|
| 855 | | - } |
|---|
| 856 | | - } |
|---|
| 857 | | - |
|---|
| 858 | | - ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); |
|---|
| 859 | | - inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue, |
|---|
| 860 | | - ssn, &cfg, wdg_timeout); |
|---|
| 861 | | - if (inc_ssn) { |
|---|
| 862 | | - ssn = (ssn + 1) & IEEE80211_SCTL_SEQ; |
|---|
| 863 | | - le16_add_cpu(&hdr->seq_ctrl, 0x10); |
|---|
| 864 | | - } |
|---|
| 865 | | - |
|---|
| 866 | | - /* |
|---|
| 867 | | - * Mark queue as shared in transport if shared |
|---|
| 868 | | - * Note this has to be done after queue enablement because enablement |
|---|
| 869 | | - * can also set this value, and there is no indication there to shared |
|---|
| 870 | | - * queues |
|---|
| 871 | | - */ |
|---|
| 872 | | - if (shared_queue) |
|---|
| 873 | | - iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); |
|---|
| 874 | | - |
|---|
| 875 | | - spin_lock_bh(&mvmsta->lock); |
|---|
| 876 | | - /* |
|---|
| 877 | | - * This looks racy, but it is not. We have only one packet for |
|---|
| 878 | | - * this ra/tid in our Tx path since we stop the Qdisc when we |
|---|
| 879 | | - * need to allocate a new TFD queue. |
|---|
| 880 | | - */ |
|---|
| 881 | | - if (inc_ssn) |
|---|
| 882 | | - mvmsta->tid_data[tid].seq_number += 0x10; |
|---|
| 883 | | - mvmsta->tid_data[tid].txq_id = queue; |
|---|
| 884 | | - mvmsta->tid_data[tid].is_tid_active = true; |
|---|
| 885 | | - mvmsta->tfd_queue_msk |= BIT(queue); |
|---|
| 886 | | - queue_state = mvmsta->tid_data[tid].state; |
|---|
| 887 | | - |
|---|
| 888 | | - if (mvmsta->reserved_queue == queue) |
|---|
| 889 | | - mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; |
|---|
| 890 | | - spin_unlock_bh(&mvmsta->lock); |
|---|
| 891 | | - |
|---|
| 892 | | - if (!shared_queue) { |
|---|
| 893 | | - ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); |
|---|
| 894 | | - if (ret) |
|---|
| 895 | | - goto out_err; |
|---|
| 896 | | - |
|---|
| 897 | | - /* If we need to re-enable aggregations... */ |
|---|
| 898 | | - if (queue_state == IWL_AGG_ON) { |
|---|
| 899 | | - ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); |
|---|
| 900 | | - if (ret) |
|---|
| 901 | | - goto out_err; |
|---|
| 902 | | - } |
|---|
| 903 | | - } else { |
|---|
| 904 | | - /* Redirect queue, if needed */ |
|---|
| 905 | | - ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn, |
|---|
| 906 | | - wdg_timeout, false); |
|---|
| 907 | | - if (ret) |
|---|
| 908 | | - goto out_err; |
|---|
| 909 | | - } |
|---|
| 910 | | - |
|---|
| 911 | | - return 0; |
|---|
| 912 | | - |
|---|
| 913 | | -out_err: |
|---|
| 914 | | - iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0); |
|---|
| 915 | | - |
|---|
| 916 | | - return ret; |
|---|
| 871 | + return enable_queue; |
|---|
| 917 | 872 | } |
|---|
| 918 | 873 | |
|---|
| 919 | | -static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue) |
|---|
| 874 | +static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, |
|---|
| 875 | + int queue, u16 ssn, |
|---|
| 876 | + const struct iwl_trans_txq_scd_cfg *cfg, |
|---|
| 877 | + unsigned int wdg_timeout) |
|---|
| 878 | +{ |
|---|
| 879 | + struct iwl_scd_txq_cfg_cmd cmd = { |
|---|
| 880 | + .scd_queue = queue, |
|---|
| 881 | + .action = SCD_CFG_ENABLE_QUEUE, |
|---|
| 882 | + .window = cfg->frame_limit, |
|---|
| 883 | + .sta_id = cfg->sta_id, |
|---|
| 884 | + .ssn = cpu_to_le16(ssn), |
|---|
| 885 | + .tx_fifo = cfg->fifo, |
|---|
| 886 | + .aggregate = cfg->aggregate, |
|---|
| 887 | + .tid = cfg->tid, |
|---|
| 888 | + }; |
|---|
| 889 | + bool inc_ssn; |
|---|
| 890 | + |
|---|
| 891 | + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) |
|---|
| 892 | + return false; |
|---|
| 893 | + |
|---|
| 894 | + /* Send the enabling command if we need to */ |
|---|
| 895 | + if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid)) |
|---|
| 896 | + return false; |
|---|
| 897 | + |
|---|
| 898 | + inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, |
|---|
| 899 | + NULL, wdg_timeout); |
|---|
| 900 | + if (inc_ssn) |
|---|
| 901 | + le16_add_cpu(&cmd.ssn, 1); |
|---|
| 902 | + |
|---|
| 903 | + WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd), |
|---|
| 904 | + "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo); |
|---|
| 905 | + |
|---|
| 906 | + return inc_ssn; |
|---|
| 907 | +} |
|---|
| 908 | + |
|---|
| 909 | +static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue) |
|---|
| 920 | 910 | { |
|---|
| 921 | 911 | struct iwl_scd_txq_cfg_cmd cmd = { |
|---|
| 922 | 912 | .scd_queue = queue, |
|---|
| .. | .. |
|---|
| 931 | 921 | if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) |
|---|
| 932 | 922 | return; |
|---|
| 933 | 923 | |
|---|
| 934 | | - spin_lock_bh(&mvm->queue_info_lock); |
|---|
| 935 | 924 | tid_bitmap = mvm->queue_info[queue].tid_bitmap; |
|---|
| 936 | | - spin_unlock_bh(&mvm->queue_info_lock); |
|---|
| 937 | 925 | |
|---|
| 938 | 926 | if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue)) |
|---|
| 939 | 927 | return; |
|---|
| .. | .. |
|---|
| 950 | 938 | return; |
|---|
| 951 | 939 | } |
|---|
| 952 | 940 | |
|---|
| 953 | | - spin_lock_bh(&mvm->queue_info_lock); |
|---|
| 954 | 941 | mvm->queue_info[queue].txq_tid = tid; |
|---|
| 955 | | - spin_unlock_bh(&mvm->queue_info_lock); |
|---|
| 956 | 942 | IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n", |
|---|
| 957 | 943 | queue, tid); |
|---|
| 958 | 944 | } |
|---|
| .. | .. |
|---|
| 974 | 960 | |
|---|
| 975 | 961 | lockdep_assert_held(&mvm->mutex); |
|---|
| 976 | 962 | |
|---|
| 977 | | - spin_lock_bh(&mvm->queue_info_lock); |
|---|
| 978 | 963 | sta_id = mvm->queue_info[queue].ra_sta_id; |
|---|
| 979 | 964 | tid_bitmap = mvm->queue_info[queue].tid_bitmap; |
|---|
| 980 | | - spin_unlock_bh(&mvm->queue_info_lock); |
|---|
| 981 | 965 | |
|---|
| 982 | 966 | /* Find TID for queue, and make sure it is the only one on the queue */ |
|---|
| 983 | 967 | tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); |
|---|
| .. | .. |
|---|
| 1001 | 985 | |
|---|
| 1002 | 986 | ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); |
|---|
| 1003 | 987 | |
|---|
| 1004 | | - ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, |
|---|
| 1005 | | - tid_to_mac80211_ac[tid], ssn, |
|---|
| 1006 | | - wdg_timeout, true); |
|---|
| 988 | + ret = iwl_mvm_redirect_queue(mvm, queue, tid, |
|---|
| 989 | + tid_to_mac80211_ac[tid], ssn, |
|---|
| 990 | + wdg_timeout, true, |
|---|
| 991 | + iwl_mvm_txq_from_tid(sta, tid)); |
|---|
| 1007 | 992 | if (ret) { |
|---|
| 1008 | 993 | IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue); |
|---|
| 1009 | 994 | return; |
|---|
| .. | .. |
|---|
| 1034 | 1019 | } |
|---|
| 1035 | 1020 | } |
|---|
| 1036 | 1021 | |
|---|
| 1037 | | - spin_lock_bh(&mvm->queue_info_lock); |
|---|
| 1038 | 1022 | mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; |
|---|
| 1039 | | - spin_unlock_bh(&mvm->queue_info_lock); |
|---|
| 1040 | 1023 | } |
|---|
| 1041 | 1024 | |
|---|
| 1042 | | -static inline u8 iwl_mvm_tid_to_ac_queue(int tid) |
|---|
| 1025 | +/* |
|---|
| 1026 | + * Remove inactive TIDs of a given queue. |
|---|
| 1027 | + * If all queue TIDs are inactive - mark the queue as inactive |
|---|
| 1028 | + * If only some the queue TIDs are inactive - unmap them from the queue |
|---|
| 1029 | + * |
|---|
| 1030 | + * Returns %true if all TIDs were removed and the queue could be reused. |
|---|
| 1031 | + */ |
|---|
| 1032 | +static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, |
|---|
| 1033 | + struct iwl_mvm_sta *mvmsta, int queue, |
|---|
| 1034 | + unsigned long tid_bitmap, |
|---|
| 1035 | + unsigned long *unshare_queues, |
|---|
| 1036 | + unsigned long *changetid_queues) |
|---|
| 1043 | 1037 | { |
|---|
| 1044 | | - if (tid == IWL_MAX_TID_COUNT) |
|---|
| 1045 | | - return IEEE80211_AC_VO; /* MGMT */ |
|---|
| 1038 | + int tid; |
|---|
| 1046 | 1039 | |
|---|
| 1047 | | - return tid_to_mac80211_ac[tid]; |
|---|
| 1048 | | -} |
|---|
| 1049 | | - |
|---|
| 1050 | | -static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm, |
|---|
| 1051 | | - struct ieee80211_sta *sta, int tid) |
|---|
| 1052 | | -{ |
|---|
| 1053 | | - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
|---|
| 1054 | | - struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; |
|---|
| 1055 | | - struct sk_buff *skb; |
|---|
| 1056 | | - struct ieee80211_hdr *hdr; |
|---|
| 1057 | | - struct sk_buff_head deferred_tx; |
|---|
| 1058 | | - u8 mac_queue; |
|---|
| 1059 | | - bool no_queue = false; /* Marks if there is a problem with the queue */ |
|---|
| 1060 | | - u8 ac; |
|---|
| 1061 | | - |
|---|
| 1040 | + lockdep_assert_held(&mvmsta->lock); |
|---|
| 1062 | 1041 | lockdep_assert_held(&mvm->mutex); |
|---|
| 1063 | 1042 | |
|---|
| 1064 | | - skb = skb_peek(&tid_data->deferred_tx_frames); |
|---|
| 1065 | | - if (!skb) |
|---|
| 1066 | | - return; |
|---|
| 1067 | | - hdr = (void *)skb->data; |
|---|
| 1043 | + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) |
|---|
| 1044 | + return false; |
|---|
| 1068 | 1045 | |
|---|
| 1069 | | - ac = iwl_mvm_tid_to_ac_queue(tid); |
|---|
| 1070 | | - mac_queue = IEEE80211_SKB_CB(skb)->hw_queue; |
|---|
| 1046 | + /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */ |
|---|
| 1047 | + for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { |
|---|
| 1048 | + /* If some TFDs are still queued - don't mark TID as inactive */ |
|---|
| 1049 | + if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid])) |
|---|
| 1050 | + tid_bitmap &= ~BIT(tid); |
|---|
| 1071 | 1051 | |
|---|
| 1072 | | - if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE && |
|---|
| 1073 | | - iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) { |
|---|
| 1074 | | - IWL_ERR(mvm, |
|---|
| 1075 | | - "Can't alloc TXQ for sta %d tid %d - dropping frame\n", |
|---|
| 1076 | | - mvmsta->sta_id, tid); |
|---|
| 1077 | | - |
|---|
| 1078 | | - /* |
|---|
| 1079 | | - * Mark queue as problematic so later the deferred traffic is |
|---|
| 1080 | | - * freed, as we can do nothing with it |
|---|
| 1081 | | - */ |
|---|
| 1082 | | - no_queue = true; |
|---|
| 1052 | + /* Don't mark as inactive any TID that has an active BA */ |
|---|
| 1053 | + if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) |
|---|
| 1054 | + tid_bitmap &= ~BIT(tid); |
|---|
| 1083 | 1055 | } |
|---|
| 1084 | 1056 | |
|---|
| 1085 | | - __skb_queue_head_init(&deferred_tx); |
|---|
| 1057 | + /* If all TIDs in the queue are inactive - return it can be reused */ |
|---|
| 1058 | + if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) { |
|---|
| 1059 | + IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue); |
|---|
| 1060 | + return true; |
|---|
| 1061 | + } |
|---|
| 1086 | 1062 | |
|---|
| 1087 | | - /* Disable bottom-halves when entering TX path */ |
|---|
| 1088 | | - local_bh_disable(); |
|---|
| 1089 | | - spin_lock(&mvmsta->lock); |
|---|
| 1090 | | - skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx); |
|---|
| 1091 | | - mvmsta->deferred_traffic_tid_map &= ~BIT(tid); |
|---|
| 1092 | | - spin_unlock(&mvmsta->lock); |
|---|
| 1063 | + /* |
|---|
| 1064 | + * If we are here, this is a shared queue and not all TIDs timed-out. |
|---|
| 1065 | + * Remove the ones that did. |
|---|
| 1066 | + */ |
|---|
| 1067 | + for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { |
|---|
| 1068 | + u16 tid_bitmap; |
|---|
| 1093 | 1069 | |
|---|
| 1094 | | - while ((skb = __skb_dequeue(&deferred_tx))) |
|---|
| 1095 | | - if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta)) |
|---|
| 1096 | | - ieee80211_free_txskb(mvm->hw, skb); |
|---|
| 1097 | | - local_bh_enable(); |
|---|
| 1070 | + mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; |
|---|
| 1071 | + mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); |
|---|
| 1098 | 1072 | |
|---|
| 1099 | | - /* Wake queue */ |
|---|
| 1100 | | - iwl_mvm_start_mac_queues(mvm, BIT(mac_queue)); |
|---|
| 1101 | | -} |
|---|
| 1102 | | - |
|---|
| 1103 | | -void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) |
|---|
| 1104 | | -{ |
|---|
| 1105 | | - struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, |
|---|
| 1106 | | - add_stream_wk); |
|---|
| 1107 | | - struct ieee80211_sta *sta; |
|---|
| 1108 | | - struct iwl_mvm_sta *mvmsta; |
|---|
| 1109 | | - unsigned long deferred_tid_traffic; |
|---|
| 1110 | | - int queue, sta_id, tid; |
|---|
| 1111 | | - |
|---|
| 1112 | | - /* Check inactivity of queues */ |
|---|
| 1113 | | - iwl_mvm_inactivity_check(mvm); |
|---|
| 1114 | | - |
|---|
| 1115 | | - mutex_lock(&mvm->mutex); |
|---|
| 1116 | | - |
|---|
| 1117 | | - /* No queue reconfiguration in TVQM mode */ |
|---|
| 1118 | | - if (iwl_mvm_has_new_tx_api(mvm)) |
|---|
| 1119 | | - goto alloc_queues; |
|---|
| 1120 | | - |
|---|
| 1121 | | - /* Reconfigure queues requiring reconfiguation */ |
|---|
| 1122 | | - for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) { |
|---|
| 1123 | | - bool reconfig; |
|---|
| 1124 | | - bool change_owner; |
|---|
| 1125 | | - |
|---|
| 1126 | | - spin_lock_bh(&mvm->queue_info_lock); |
|---|
| 1127 | | - reconfig = (mvm->queue_info[queue].status == |
|---|
| 1128 | | - IWL_MVM_QUEUE_RECONFIGURING); |
|---|
| 1073 | + tid_bitmap = mvm->queue_info[queue].tid_bitmap; |
|---|
| 1129 | 1074 | |
|---|
| 1130 | 1075 | /* |
|---|
| 1131 | 1076 | * We need to take into account a situation in which a TXQ was |
|---|
| .. | .. |
|---|
| 1134 | 1079 | * ownership must be given to one of the remaining TIDs. |
|---|
| 1135 | 1080 | * This is mainly because if TID x continues - a new queue can't |
|---|
| 1136 | 1081 | * be allocated for it as long as it is an owner of another TXQ. |
|---|
| 1082 | + * |
|---|
| 1083 | + * Mark this queue in the right bitmap, we'll send the command |
|---|
| 1084 | + * to the firmware later. |
|---|
| 1137 | 1085 | */ |
|---|
| 1138 | | - change_owner = !(mvm->queue_info[queue].tid_bitmap & |
|---|
| 1139 | | - BIT(mvm->queue_info[queue].txq_tid)) && |
|---|
| 1140 | | - (mvm->queue_info[queue].status == |
|---|
| 1141 | | - IWL_MVM_QUEUE_SHARED); |
|---|
| 1142 | | - spin_unlock_bh(&mvm->queue_info_lock); |
|---|
| 1086 | + if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid))) |
|---|
| 1087 | + set_bit(queue, changetid_queues); |
|---|
| 1143 | 1088 | |
|---|
| 1144 | | - if (reconfig) |
|---|
| 1145 | | - iwl_mvm_unshare_queue(mvm, queue); |
|---|
| 1146 | | - else if (change_owner) |
|---|
| 1147 | | - iwl_mvm_change_queue_owner(mvm, queue); |
|---|
| 1089 | + IWL_DEBUG_TX_QUEUES(mvm, |
|---|
| 1090 | + "Removing inactive TID %d from shared Q:%d\n", |
|---|
| 1091 | + tid, queue); |
|---|
| 1148 | 1092 | } |
|---|
| 1149 | 1093 | |
|---|
| 1150 | | -alloc_queues: |
|---|
| 1151 | | - /* Go over all stations with deferred traffic */ |
|---|
| 1152 | | - for_each_set_bit(sta_id, mvm->sta_deferred_frames, |
|---|
| 1153 | | - IWL_MVM_STATION_COUNT) { |
|---|
| 1154 | | - clear_bit(sta_id, mvm->sta_deferred_frames); |
|---|
| 1155 | | - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], |
|---|
| 1156 | | - lockdep_is_held(&mvm->mutex)); |
|---|
| 1094 | + IWL_DEBUG_TX_QUEUES(mvm, |
|---|
| 1095 | + "TXQ #%d left with tid bitmap 0x%x\n", queue, |
|---|
| 1096 | + mvm->queue_info[queue].tid_bitmap); |
|---|
| 1097 | + |
|---|
| 1098 | + /* |
|---|
| 1099 | + * There may be different TIDs with the same mac queues, so make |
|---|
| 1100 | + * sure all TIDs have existing corresponding mac queues enabled |
|---|
| 1101 | + */ |
|---|
| 1102 | + tid_bitmap = mvm->queue_info[queue].tid_bitmap; |
|---|
| 1103 | + |
|---|
| 1104 | + /* If the queue is marked as shared - "unshare" it */ |
|---|
| 1105 | + if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 && |
|---|
| 1106 | + mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) { |
|---|
| 1107 | + IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n", |
|---|
| 1108 | + queue); |
|---|
| 1109 | + set_bit(queue, unshare_queues); |
|---|
| 1110 | + } |
|---|
| 1111 | + |
|---|
| 1112 | + return false; |
|---|
| 1113 | +} |
|---|
| 1114 | + |
|---|
| 1115 | +/* |
|---|
| 1116 | + * Check for inactivity - this includes checking if any queue |
|---|
| 1117 | + * can be unshared and finding one (and only one) that can be |
|---|
| 1118 | + * reused. |
|---|
| 1119 | + * This function is also invoked as a sort of clean-up task, |
|---|
| 1120 | + * in which case @alloc_for_sta is IWL_MVM_INVALID_STA. |
|---|
| 1121 | + * |
|---|
| 1122 | + * Returns the queue number, or -ENOSPC. |
|---|
| 1123 | + */ |
|---|
| 1124 | +static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) |
|---|
| 1125 | +{ |
|---|
| 1126 | + unsigned long now = jiffies; |
|---|
| 1127 | + unsigned long unshare_queues = 0; |
|---|
| 1128 | + unsigned long changetid_queues = 0; |
|---|
| 1129 | + int i, ret, free_queue = -ENOSPC; |
|---|
| 1130 | + struct ieee80211_sta *queue_owner = NULL; |
|---|
| 1131 | + |
|---|
| 1132 | + lockdep_assert_held(&mvm->mutex); |
|---|
| 1133 | + |
|---|
| 1134 | + if (iwl_mvm_has_new_tx_api(mvm)) |
|---|
| 1135 | + return -ENOSPC; |
|---|
| 1136 | + |
|---|
| 1137 | + rcu_read_lock(); |
|---|
| 1138 | + |
|---|
| 1139 | + /* we skip the CMD queue below by starting at 1 */ |
|---|
| 1140 | + BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0); |
|---|
| 1141 | + |
|---|
| 1142 | + for (i = 1; i < IWL_MAX_HW_QUEUES; i++) { |
|---|
| 1143 | + struct ieee80211_sta *sta; |
|---|
| 1144 | + struct iwl_mvm_sta *mvmsta; |
|---|
| 1145 | + u8 sta_id; |
|---|
| 1146 | + int tid; |
|---|
| 1147 | + unsigned long inactive_tid_bitmap = 0; |
|---|
| 1148 | + unsigned long queue_tid_bitmap; |
|---|
| 1149 | + |
|---|
| 1150 | + queue_tid_bitmap = mvm->queue_info[i].tid_bitmap; |
|---|
| 1151 | + if (!queue_tid_bitmap) |
|---|
| 1152 | + continue; |
|---|
| 1153 | + |
|---|
| 1154 | + /* If TXQ isn't in active use anyway - nothing to do here... */ |
|---|
| 1155 | + if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY && |
|---|
| 1156 | + mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) |
|---|
| 1157 | + continue; |
|---|
| 1158 | + |
|---|
| 1159 | + /* Check to see if there are inactive TIDs on this queue */ |
|---|
| 1160 | + for_each_set_bit(tid, &queue_tid_bitmap, |
|---|
| 1161 | + IWL_MAX_TID_COUNT + 1) { |
|---|
| 1162 | + if (time_after(mvm->queue_info[i].last_frame_time[tid] + |
|---|
| 1163 | + IWL_MVM_DQA_QUEUE_TIMEOUT, now)) |
|---|
| 1164 | + continue; |
|---|
| 1165 | + |
|---|
| 1166 | + inactive_tid_bitmap |= BIT(tid); |
|---|
| 1167 | + } |
|---|
| 1168 | + |
|---|
| 1169 | + /* If all TIDs are active - finish check on this queue */ |
|---|
| 1170 | + if (!inactive_tid_bitmap) |
|---|
| 1171 | + continue; |
|---|
| 1172 | + |
|---|
| 1173 | + /* |
|---|
| 1174 | + * If we are here - the queue hadn't been served recently and is |
|---|
| 1175 | + * in use |
|---|
| 1176 | + */ |
|---|
| 1177 | + |
|---|
| 1178 | + sta_id = mvm->queue_info[i].ra_sta_id; |
|---|
| 1179 | + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); |
|---|
| 1180 | + |
|---|
| 1181 | + /* |
|---|
| 1182 | + * If the STA doesn't exist anymore, it isn't an error. It could |
|---|
| 1183 | + * be that it was removed since getting the queues, and in this |
|---|
| 1184 | + * case it should've inactivated its queues anyway. |
|---|
| 1185 | + */ |
|---|
| 1157 | 1186 | if (IS_ERR_OR_NULL(sta)) |
|---|
| 1158 | 1187 | continue; |
|---|
| 1159 | 1188 | |
|---|
| 1160 | 1189 | mvmsta = iwl_mvm_sta_from_mac80211(sta); |
|---|
| 1161 | | - deferred_tid_traffic = mvmsta->deferred_traffic_tid_map; |
|---|
| 1162 | 1190 | |
|---|
| 1163 | | - for_each_set_bit(tid, &deferred_tid_traffic, |
|---|
| 1164 | | - IWL_MAX_TID_COUNT + 1) |
|---|
| 1165 | | - iwl_mvm_tx_deferred_stream(mvm, sta, tid); |
|---|
| 1191 | + spin_lock_bh(&mvmsta->lock); |
|---|
| 1192 | + ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, |
|---|
| 1193 | + inactive_tid_bitmap, |
|---|
| 1194 | + &unshare_queues, |
|---|
| 1195 | + &changetid_queues); |
|---|
| 1196 | + if (ret && free_queue < 0) { |
|---|
| 1197 | + queue_owner = sta; |
|---|
| 1198 | + free_queue = i; |
|---|
| 1199 | + } |
|---|
| 1200 | + /* only unlock sta lock - we still need the queue info lock */ |
|---|
| 1201 | + spin_unlock_bh(&mvmsta->lock); |
|---|
| 1202 | + } |
|---|
| 1203 | + |
|---|
| 1204 | + |
|---|
| 1205 | + /* Reconfigure queues requiring reconfiguation */ |
|---|
| 1206 | + for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES) |
|---|
| 1207 | + iwl_mvm_unshare_queue(mvm, i); |
|---|
| 1208 | + for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES) |
|---|
| 1209 | + iwl_mvm_change_queue_tid(mvm, i); |
|---|
| 1210 | + |
|---|
| 1211 | + rcu_read_unlock(); |
|---|
| 1212 | + |
|---|
| 1213 | + if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) { |
|---|
| 1214 | + ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner, |
|---|
| 1215 | + alloc_for_sta); |
|---|
| 1216 | + if (ret) |
|---|
| 1217 | + return ret; |
|---|
| 1218 | + } |
|---|
| 1219 | + |
|---|
| 1220 | + return free_queue; |
|---|
| 1221 | +} |
|---|
| 1222 | + |
|---|
| 1223 | +static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, |
|---|
| 1224 | + struct ieee80211_sta *sta, u8 ac, int tid) |
|---|
| 1225 | +{ |
|---|
| 1226 | + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
|---|
| 1227 | + struct iwl_trans_txq_scd_cfg cfg = { |
|---|
| 1228 | + .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac), |
|---|
| 1229 | + .sta_id = mvmsta->sta_id, |
|---|
| 1230 | + .tid = tid, |
|---|
| 1231 | + .frame_limit = IWL_FRAME_LIMIT, |
|---|
| 1232 | + }; |
|---|
| 1233 | + unsigned int wdg_timeout = |
|---|
| 1234 | + iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); |
|---|
| 1235 | + int queue = -1; |
|---|
| 1236 | + u16 queue_tmp; |
|---|
| 1237 | + unsigned long disable_agg_tids = 0; |
|---|
| 1238 | + enum iwl_mvm_agg_state queue_state; |
|---|
| 1239 | + bool shared_queue = false, inc_ssn; |
|---|
| 1240 | + int ssn; |
|---|
| 1241 | + unsigned long tfd_queue_mask; |
|---|
| 1242 | + int ret; |
|---|
| 1243 | + |
|---|
| 1244 | + lockdep_assert_held(&mvm->mutex); |
|---|
| 1245 | + |
|---|
| 1246 | + if (iwl_mvm_has_new_tx_api(mvm)) |
|---|
| 1247 | + return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); |
|---|
| 1248 | + |
|---|
| 1249 | + spin_lock_bh(&mvmsta->lock); |
|---|
| 1250 | + tfd_queue_mask = mvmsta->tfd_queue_msk; |
|---|
| 1251 | + ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); |
|---|
| 1252 | + spin_unlock_bh(&mvmsta->lock); |
|---|
| 1253 | + |
|---|
| 1254 | + if (tid == IWL_MAX_TID_COUNT) { |
|---|
| 1255 | + queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, |
|---|
| 1256 | + IWL_MVM_DQA_MIN_MGMT_QUEUE, |
|---|
| 1257 | + IWL_MVM_DQA_MAX_MGMT_QUEUE); |
|---|
| 1258 | + if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) |
|---|
| 1259 | + IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", |
|---|
| 1260 | + queue); |
|---|
| 1261 | + |
|---|
| 1262 | + /* If no such queue is found, we'll use a DATA queue instead */ |
|---|
| 1263 | + } |
|---|
| 1264 | + |
|---|
| 1265 | + if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) && |
|---|
| 1266 | + (mvm->queue_info[mvmsta->reserved_queue].status == |
|---|
| 1267 | + IWL_MVM_QUEUE_RESERVED)) { |
|---|
| 1268 | + queue = mvmsta->reserved_queue; |
|---|
| 1269 | + mvm->queue_info[queue].reserved = true; |
|---|
| 1270 | + IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); |
|---|
| 1271 | + } |
|---|
| 1272 | + |
|---|
| 1273 | + if (queue < 0) |
|---|
| 1274 | + queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, |
|---|
| 1275 | + IWL_MVM_DQA_MIN_DATA_QUEUE, |
|---|
| 1276 | + IWL_MVM_DQA_MAX_DATA_QUEUE); |
|---|
| 1277 | + if (queue < 0) { |
|---|
| 1278 | + /* try harder - perhaps kill an inactive queue */ |
|---|
| 1279 | + queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); |
|---|
| 1280 | + } |
|---|
| 1281 | + |
|---|
| 1282 | + /* No free queue - we'll have to share */ |
|---|
| 1283 | + if (queue <= 0) { |
|---|
| 1284 | + queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac); |
|---|
| 1285 | + if (queue > 0) { |
|---|
| 1286 | + shared_queue = true; |
|---|
| 1287 | + mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED; |
|---|
| 1288 | + } |
|---|
| 1289 | + } |
|---|
| 1290 | + |
|---|
| 1291 | + /* |
|---|
| 1292 | + * Mark TXQ as ready, even though it hasn't been fully configured yet, |
|---|
| 1293 | + * to make sure no one else takes it. |
|---|
| 1294 | + * This will allow avoiding re-acquiring the lock at the end of the |
|---|
| 1295 | + * configuration. On error we'll mark it back as free. |
|---|
| 1296 | + */ |
|---|
| 1297 | + if (queue > 0 && !shared_queue) |
|---|
| 1298 | + mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; |
|---|
| 1299 | + |
|---|
| 1300 | + /* This shouldn't happen - out of queues */ |
|---|
| 1301 | + if (WARN_ON(queue <= 0)) { |
|---|
| 1302 | + IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", |
|---|
| 1303 | + tid, cfg.sta_id); |
|---|
| 1304 | + return queue; |
|---|
| 1305 | + } |
|---|
| 1306 | + |
|---|
| 1307 | + /* |
|---|
| 1308 | + * Actual en/disablement of aggregations is through the ADD_STA HCMD, |
|---|
| 1309 | + * but for configuring the SCD to send A-MPDUs we need to mark the queue |
|---|
| 1310 | + * as aggregatable. |
|---|
| 1311 | + * Mark all DATA queues as allowing to be aggregated at some point |
|---|
| 1312 | + */ |
|---|
| 1313 | + cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || |
|---|
| 1314 | + queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); |
|---|
| 1315 | + |
|---|
| 1316 | + IWL_DEBUG_TX_QUEUES(mvm, |
|---|
| 1317 | + "Allocating %squeue #%d to sta %d on tid %d\n", |
|---|
| 1318 | + shared_queue ? "shared " : "", queue, |
|---|
| 1319 | + mvmsta->sta_id, tid); |
|---|
| 1320 | + |
|---|
| 1321 | + if (shared_queue) { |
|---|
| 1322 | + /* Disable any open aggs on this queue */ |
|---|
| 1323 | + disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue); |
|---|
| 1324 | + |
|---|
| 1325 | + if (disable_agg_tids) { |
|---|
| 1326 | + IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n", |
|---|
| 1327 | + queue); |
|---|
| 1328 | + iwl_mvm_invalidate_sta_queue(mvm, queue, |
|---|
| 1329 | + disable_agg_tids, false); |
|---|
| 1330 | + } |
|---|
| 1331 | + } |
|---|
| 1332 | + |
|---|
| 1333 | + inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout); |
|---|
| 1334 | + |
|---|
| 1335 | + /* |
|---|
| 1336 | + * Mark queue as shared in transport if shared |
|---|
| 1337 | + * Note this has to be done after queue enablement because enablement |
|---|
| 1338 | + * can also set this value, and there is no indication there to shared |
|---|
| 1339 | + * queues |
|---|
| 1340 | + */ |
|---|
| 1341 | + if (shared_queue) |
|---|
| 1342 | + iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); |
|---|
| 1343 | + |
|---|
| 1344 | + spin_lock_bh(&mvmsta->lock); |
|---|
| 1345 | + /* |
|---|
| 1346 | + * This looks racy, but it is not. We have only one packet for |
|---|
| 1347 | + * this ra/tid in our Tx path since we stop the Qdisc when we |
|---|
| 1348 | + * need to allocate a new TFD queue. |
|---|
| 1349 | + */ |
|---|
| 1350 | + if (inc_ssn) { |
|---|
| 1351 | + mvmsta->tid_data[tid].seq_number += 0x10; |
|---|
| 1352 | + ssn = (ssn + 1) & IEEE80211_SCTL_SEQ; |
|---|
| 1353 | + } |
|---|
| 1354 | + mvmsta->tid_data[tid].txq_id = queue; |
|---|
| 1355 | + mvmsta->tfd_queue_msk |= BIT(queue); |
|---|
| 1356 | + queue_state = mvmsta->tid_data[tid].state; |
|---|
| 1357 | + |
|---|
| 1358 | + if (mvmsta->reserved_queue == queue) |
|---|
| 1359 | + mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; |
|---|
| 1360 | + spin_unlock_bh(&mvmsta->lock); |
|---|
| 1361 | + |
|---|
| 1362 | + if (!shared_queue) { |
|---|
| 1363 | + ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); |
|---|
| 1364 | + if (ret) |
|---|
| 1365 | + goto out_err; |
|---|
| 1366 | + |
|---|
| 1367 | + /* If we need to re-enable aggregations... */ |
|---|
| 1368 | + if (queue_state == IWL_AGG_ON) { |
|---|
| 1369 | + ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); |
|---|
| 1370 | + if (ret) |
|---|
| 1371 | + goto out_err; |
|---|
| 1372 | + } |
|---|
| 1373 | + } else { |
|---|
| 1374 | + /* Redirect queue, if needed */ |
|---|
| 1375 | + ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn, |
|---|
| 1376 | + wdg_timeout, false, |
|---|
| 1377 | + iwl_mvm_txq_from_tid(sta, tid)); |
|---|
| 1378 | + if (ret) |
|---|
| 1379 | + goto out_err; |
|---|
| 1380 | + } |
|---|
| 1381 | + |
|---|
| 1382 | + return 0; |
|---|
| 1383 | + |
|---|
| 1384 | +out_err: |
|---|
| 1385 | + queue_tmp = queue; |
|---|
| 1386 | + iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid, 0); |
|---|
| 1387 | + |
|---|
| 1388 | + return ret; |
|---|
| 1389 | +} |
|---|
| 1390 | + |
|---|
| 1391 | +void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) |
|---|
| 1392 | +{ |
|---|
| 1393 | + struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, |
|---|
| 1394 | + add_stream_wk); |
|---|
| 1395 | + |
|---|
| 1396 | + mutex_lock(&mvm->mutex); |
|---|
| 1397 | + |
|---|
| 1398 | + iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); |
|---|
| 1399 | + |
|---|
| 1400 | + while (!list_empty(&mvm->add_stream_txqs)) { |
|---|
| 1401 | + struct iwl_mvm_txq *mvmtxq; |
|---|
| 1402 | + struct ieee80211_txq *txq; |
|---|
| 1403 | + u8 tid; |
|---|
| 1404 | + |
|---|
| 1405 | + mvmtxq = list_first_entry(&mvm->add_stream_txqs, |
|---|
| 1406 | + struct iwl_mvm_txq, list); |
|---|
| 1407 | + |
|---|
| 1408 | + txq = container_of((void *)mvmtxq, struct ieee80211_txq, |
|---|
| 1409 | + drv_priv); |
|---|
| 1410 | + tid = txq->tid; |
|---|
| 1411 | + if (tid == IEEE80211_NUM_TIDS) |
|---|
| 1412 | + tid = IWL_MAX_TID_COUNT; |
|---|
| 1413 | + |
|---|
| 1414 | + /* |
|---|
| 1415 | + * We can't really do much here, but if this fails we can't |
|---|
| 1416 | + * transmit anyway - so just don't transmit the frame etc. |
|---|
| 1417 | + * and let them back up ... we've tried our best to allocate |
|---|
| 1418 | + * a queue in the function itself. |
|---|
| 1419 | + */ |
|---|
| 1420 | + if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) { |
|---|
| 1421 | + list_del_init(&mvmtxq->list); |
|---|
| 1422 | + continue; |
|---|
| 1423 | + } |
|---|
| 1424 | + |
|---|
| 1425 | + list_del_init(&mvmtxq->list); |
|---|
| 1426 | + local_bh_disable(); |
|---|
| 1427 | + iwl_mvm_mac_itxq_xmit(mvm->hw, txq); |
|---|
| 1428 | + local_bh_enable(); |
|---|
| 1166 | 1429 | } |
|---|
| 1167 | 1430 | |
|---|
| 1168 | 1431 | mutex_unlock(&mvm->mutex); |
|---|
| .. | .. |
|---|
| 1174 | 1437 | { |
|---|
| 1175 | 1438 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
|---|
| 1176 | 1439 | int queue; |
|---|
| 1177 | | - bool using_inactive_queue = false, same_sta = false; |
|---|
| 1178 | 1440 | |
|---|
| 1179 | 1441 | /* queue reserving is disabled on new TX path */ |
|---|
| 1180 | 1442 | if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) |
|---|
| 1181 | 1443 | return 0; |
|---|
| 1182 | 1444 | |
|---|
| 1183 | | - /* |
|---|
| 1184 | | - * Check for inactive queues, so we don't reach a situation where we |
|---|
| 1185 | | - * can't add a STA due to a shortage in queues that doesn't really exist |
|---|
| 1186 | | - */ |
|---|
| 1187 | | - iwl_mvm_inactivity_check(mvm); |
|---|
| 1188 | | - |
|---|
| 1189 | | - spin_lock_bh(&mvm->queue_info_lock); |
|---|
| 1445 | + /* run the general cleanup/unsharing of queues */ |
|---|
| 1446 | + iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); |
|---|
| 1190 | 1447 | |
|---|
| 1191 | 1448 | /* Make sure we have free resources for this STA */ |
|---|
| 1192 | 1449 | if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && |
|---|
| 1193 | | - !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount && |
|---|
| 1450 | + !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap && |
|---|
| 1194 | 1451 | (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status == |
|---|
| 1195 | 1452 | IWL_MVM_QUEUE_FREE)) |
|---|
| 1196 | 1453 | queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; |
|---|
| .. | .. |
|---|
| 1199 | 1456 | IWL_MVM_DQA_MIN_DATA_QUEUE, |
|---|
| 1200 | 1457 | IWL_MVM_DQA_MAX_DATA_QUEUE); |
|---|
| 1201 | 1458 | if (queue < 0) { |
|---|
| 1202 | | - spin_unlock_bh(&mvm->queue_info_lock); |
|---|
| 1203 | | - IWL_ERR(mvm, "No available queues for new station\n"); |
|---|
| 1204 | | - return -ENOSPC; |
|---|
| 1205 | | - } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) { |
|---|
| 1206 | | - /* |
|---|
| 1207 | | - * If this queue is already allocated but inactive we'll need to |
|---|
| 1208 | | - * first free this queue before enabling it again, we'll mark |
|---|
| 1209 | | - * it as reserved to make sure no new traffic arrives on it |
|---|
| 1210 | | - */ |
|---|
| 1211 | | - using_inactive_queue = true; |
|---|
| 1212 | | - same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id; |
|---|
| 1459 | + /* try again - this time kick out a queue if needed */ |
|---|
| 1460 | + queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); |
|---|
| 1461 | + if (queue < 0) { |
|---|
| 1462 | + IWL_ERR(mvm, "No available queues for new station\n"); |
|---|
| 1463 | + return -ENOSPC; |
|---|
| 1464 | + } |
|---|
| 1213 | 1465 | } |
|---|
| 1214 | 1466 | mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; |
|---|
| 1215 | 1467 | |
|---|
| 1216 | | - spin_unlock_bh(&mvm->queue_info_lock); |
|---|
| 1217 | | - |
|---|
| 1218 | 1468 | mvmsta->reserved_queue = queue; |
|---|
| 1219 | | - |
|---|
| 1220 | | - if (using_inactive_queue) |
|---|
| 1221 | | - iwl_mvm_free_inactive_queue(mvm, queue, same_sta); |
|---|
| 1222 | 1469 | |
|---|
| 1223 | 1470 | IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", |
|---|
| 1224 | 1471 | queue, mvmsta->sta_id); |
|---|
| .. | .. |
|---|
| 1234 | 1481 | * Note that re-enabling aggregations isn't done in this function. |
|---|
| 1235 | 1482 | */ |
|---|
| 1236 | 1483 | static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, |
|---|
| 1237 | | - struct iwl_mvm_sta *mvm_sta) |
|---|
| 1484 | + struct ieee80211_sta *sta) |
|---|
| 1238 | 1485 | { |
|---|
| 1239 | | - unsigned int wdg_timeout = |
|---|
| 1240 | | - iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false); |
|---|
| 1486 | + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); |
|---|
| 1487 | + unsigned int wdg = |
|---|
| 1488 | + iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false); |
|---|
| 1241 | 1489 | int i; |
|---|
| 1242 | 1490 | struct iwl_trans_txq_scd_cfg cfg = { |
|---|
| 1243 | 1491 | .sta_id = mvm_sta->sta_id, |
|---|
| .. | .. |
|---|
| 1253 | 1501 | struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i]; |
|---|
| 1254 | 1502 | int txq_id = tid_data->txq_id; |
|---|
| 1255 | 1503 | int ac; |
|---|
| 1256 | | - u8 mac_queue; |
|---|
| 1257 | 1504 | |
|---|
| 1258 | 1505 | if (txq_id == IWL_MVM_INVALID_QUEUE) |
|---|
| 1259 | 1506 | continue; |
|---|
| 1260 | 1507 | |
|---|
| 1261 | | - skb_queue_head_init(&tid_data->deferred_tx_frames); |
|---|
| 1262 | | - |
|---|
| 1263 | 1508 | ac = tid_to_mac80211_ac[i]; |
|---|
| 1264 | | - mac_queue = mvm_sta->vif->hw_queue[ac]; |
|---|
| 1265 | 1509 | |
|---|
| 1266 | 1510 | if (iwl_mvm_has_new_tx_api(mvm)) { |
|---|
| 1267 | 1511 | IWL_DEBUG_TX_QUEUES(mvm, |
|---|
| 1268 | 1512 | "Re-mapping sta %d tid %d\n", |
|---|
| 1269 | 1513 | mvm_sta->sta_id, i); |
|---|
| 1270 | | - txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, |
|---|
| 1271 | | - mvm_sta->sta_id, |
|---|
| 1272 | | - i, wdg_timeout); |
|---|
| 1514 | + txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id, |
|---|
| 1515 | + i, wdg); |
|---|
| 1516 | + /* |
|---|
| 1517 | + * on failures, just set it to IWL_MVM_INVALID_QUEUE |
|---|
| 1518 | + * to try again later, we have no other good way of |
|---|
| 1519 | + * failing here |
|---|
| 1520 | + */ |
|---|
| 1521 | + if (txq_id < 0) |
|---|
| 1522 | + txq_id = IWL_MVM_INVALID_QUEUE; |
|---|
| 1273 | 1523 | tid_data->txq_id = txq_id; |
|---|
| 1274 | 1524 | |
|---|
| 1275 | 1525 | /* |
|---|
| .. | .. |
|---|
| 1292 | 1542 | "Re-mapping sta %d tid %d to queue %d\n", |
|---|
| 1293 | 1543 | mvm_sta->sta_id, i, txq_id); |
|---|
| 1294 | 1544 | |
|---|
| 1295 | | - iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg, |
|---|
| 1296 | | - wdg_timeout); |
|---|
| 1545 | + iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg); |
|---|
| 1297 | 1546 | mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; |
|---|
| 1298 | 1547 | } |
|---|
| 1299 | 1548 | } |
|---|
| .. | .. |
|---|
| 1312 | 1561 | |
|---|
| 1313 | 1562 | memset(&cmd, 0, sizeof(cmd)); |
|---|
| 1314 | 1563 | cmd.sta_id = sta->sta_id; |
|---|
| 1315 | | - cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, |
|---|
| 1316 | | - color)); |
|---|
| 1564 | + |
|---|
| 1565 | + if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, ADD_STA, |
|---|
| 1566 | + 0) >= 12 && |
|---|
| 1567 | + sta->type == IWL_STA_AUX_ACTIVITY) |
|---|
| 1568 | + cmd.mac_id_n_color = cpu_to_le32(mac_id); |
|---|
| 1569 | + else |
|---|
| 1570 | + cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, |
|---|
| 1571 | + color)); |
|---|
| 1572 | + |
|---|
| 1317 | 1573 | if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) |
|---|
| 1318 | 1574 | cmd.station_type = sta->type; |
|---|
| 1319 | 1575 | |
|---|
| .. | .. |
|---|
| 1383 | 1639 | if (ret) |
|---|
| 1384 | 1640 | goto err; |
|---|
| 1385 | 1641 | |
|---|
| 1386 | | - iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta); |
|---|
| 1642 | + iwl_mvm_realloc_queues_after_restart(mvm, sta); |
|---|
| 1387 | 1643 | sta_update = true; |
|---|
| 1388 | 1644 | sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES; |
|---|
| 1389 | 1645 | goto update_fw; |
|---|
| .. | .. |
|---|
| 1393 | 1649 | mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, |
|---|
| 1394 | 1650 | mvmvif->color); |
|---|
| 1395 | 1651 | mvm_sta->vif = vif; |
|---|
| 1396 | | - if (!mvm->trans->cfg->gen2) |
|---|
| 1652 | + if (!mvm->trans->trans_cfg->gen2) |
|---|
| 1397 | 1653 | mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; |
|---|
| 1398 | 1654 | else |
|---|
| 1399 | 1655 | mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF; |
|---|
| .. | .. |
|---|
| 1416 | 1672 | * frames until the queue is allocated |
|---|
| 1417 | 1673 | */ |
|---|
| 1418 | 1674 | mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; |
|---|
| 1419 | | - skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames); |
|---|
| 1420 | 1675 | } |
|---|
| 1421 | | - mvm_sta->deferred_traffic_tid_map = 0; |
|---|
| 1676 | + |
|---|
| 1677 | + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { |
|---|
| 1678 | + struct iwl_mvm_txq *mvmtxq = |
|---|
| 1679 | + iwl_mvm_txq_from_mac80211(sta->txq[i]); |
|---|
| 1680 | + |
|---|
| 1681 | + mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; |
|---|
| 1682 | + INIT_LIST_HEAD(&mvmtxq->list); |
|---|
| 1683 | + atomic_set(&mvmtxq->tx_request, 0); |
|---|
| 1684 | + } |
|---|
| 1685 | + |
|---|
| 1422 | 1686 | mvm_sta->agg_tids = 0; |
|---|
| 1423 | 1687 | |
|---|
| 1424 | 1688 | if (iwl_mvm_has_new_rx_api(mvm) && |
|---|
| .. | .. |
|---|
| 1457 | 1721 | */ |
|---|
| 1458 | 1722 | if (iwl_mvm_has_tlc_offload(mvm)) |
|---|
| 1459 | 1723 | iwl_mvm_rs_add_sta(mvm, mvm_sta); |
|---|
| 1724 | + else |
|---|
| 1725 | + spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock); |
|---|
| 1726 | + |
|---|
| 1727 | + iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant); |
|---|
| 1460 | 1728 | |
|---|
| 1461 | 1729 | update_fw: |
|---|
| 1462 | 1730 | ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags); |
|---|
| .. | .. |
|---|
| 1551 | 1819 | |
|---|
| 1552 | 1820 | static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, |
|---|
| 1553 | 1821 | struct ieee80211_vif *vif, |
|---|
| 1554 | | - struct iwl_mvm_sta *mvm_sta) |
|---|
| 1822 | + struct ieee80211_sta *sta) |
|---|
| 1555 | 1823 | { |
|---|
| 1556 | | - int ac; |
|---|
| 1824 | + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); |
|---|
| 1557 | 1825 | int i; |
|---|
| 1558 | 1826 | |
|---|
| 1559 | 1827 | lockdep_assert_held(&mvm->mutex); |
|---|
| .. | .. |
|---|
| 1562 | 1830 | if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE) |
|---|
| 1563 | 1831 | continue; |
|---|
| 1564 | 1832 | |
|---|
| 1565 | | - ac = iwl_mvm_tid_to_ac_queue(i); |
|---|
| 1566 | | - iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id, |
|---|
| 1567 | | - vif->hw_queue[ac], i, 0); |
|---|
| 1833 | + iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i, |
|---|
| 1834 | + 0); |
|---|
| 1568 | 1835 | mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; |
|---|
| 1836 | + } |
|---|
| 1837 | + |
|---|
| 1838 | + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { |
|---|
| 1839 | + struct iwl_mvm_txq *mvmtxq = |
|---|
| 1840 | + iwl_mvm_txq_from_mac80211(sta->txq[i]); |
|---|
| 1841 | + |
|---|
| 1842 | + mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; |
|---|
| 1843 | + list_del_init(&mvmtxq->list); |
|---|
| 1569 | 1844 | } |
|---|
| 1570 | 1845 | } |
|---|
| 1571 | 1846 | |
|---|
| .. | .. |
|---|
| 1612 | 1887 | return ret; |
|---|
| 1613 | 1888 | |
|---|
| 1614 | 1889 | /* flush its queues here since we are freeing mvm_sta */ |
|---|
| 1615 | | - ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0); |
|---|
| 1890 | + ret = iwl_mvm_flush_sta(mvm, mvm_sta, false); |
|---|
| 1616 | 1891 | if (ret) |
|---|
| 1617 | 1892 | return ret; |
|---|
| 1618 | 1893 | if (iwl_mvm_has_new_tx_api(mvm)) { |
|---|
| .. | .. |
|---|
| 1628 | 1903 | |
|---|
| 1629 | 1904 | ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); |
|---|
| 1630 | 1905 | |
|---|
| 1631 | | - iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta); |
|---|
| 1906 | + iwl_mvm_disable_sta_queues(mvm, vif, sta); |
|---|
| 1632 | 1907 | |
|---|
| 1633 | 1908 | /* If there is a TXQ still marked as reserved - free it */ |
|---|
| 1634 | 1909 | if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { |
|---|
| .. | .. |
|---|
| 1640 | 1915 | * is still marked as IWL_MVM_QUEUE_RESERVED, and |
|---|
| 1641 | 1916 | * should be manually marked as free again |
|---|
| 1642 | 1917 | */ |
|---|
| 1643 | | - spin_lock_bh(&mvm->queue_info_lock); |
|---|
| 1644 | 1918 | status = &mvm->queue_info[reserved_txq].status; |
|---|
| 1645 | 1919 | if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && |
|---|
| 1646 | 1920 | (*status != IWL_MVM_QUEUE_FREE), |
|---|
| 1647 | 1921 | "sta_id %d reserved txq %d status %d", |
|---|
| 1648 | | - sta_id, reserved_txq, *status)) { |
|---|
| 1649 | | - spin_unlock_bh(&mvm->queue_info_lock); |
|---|
| 1922 | + sta_id, reserved_txq, *status)) |
|---|
| 1650 | 1923 | return -EINVAL; |
|---|
| 1651 | | - } |
|---|
| 1652 | 1924 | |
|---|
| 1653 | 1925 | *status = IWL_MVM_QUEUE_FREE; |
|---|
| 1654 | | - spin_unlock_bh(&mvm->queue_info_lock); |
|---|
| 1655 | 1926 | } |
|---|
| 1656 | 1927 | |
|---|
| 1657 | 1928 | if (vif->type == NL80211_IFTYPE_STATION && |
|---|
| .. | .. |
|---|
| 1662 | 1933 | |
|---|
| 1663 | 1934 | /* unassoc - go ahead - remove the AP STA now */ |
|---|
| 1664 | 1935 | mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; |
|---|
| 1665 | | - |
|---|
| 1666 | | - /* clear d0i3_ap_sta_id if no longer relevant */ |
|---|
| 1667 | | - if (mvm->d0i3_ap_sta_id == sta_id) |
|---|
| 1668 | | - mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; |
|---|
| 1669 | 1936 | } |
|---|
| 1670 | 1937 | |
|---|
| 1671 | 1938 | /* |
|---|
| .. | .. |
|---|
| 1729 | 1996 | sta->sta_id = IWL_MVM_INVALID_STA; |
|---|
| 1730 | 1997 | } |
|---|
| 1731 | 1998 | |
|---|
| 1732 | | -static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue, |
|---|
| 1999 | +static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue, |
|---|
| 1733 | 2000 | u8 sta_id, u8 fifo) |
|---|
| 1734 | 2001 | { |
|---|
| 1735 | | - unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? |
|---|
| 1736 | | - mvm->cfg->base_params->wd_timeout : |
|---|
| 1737 | | - IWL_WATCHDOG_DISABLED; |
|---|
| 2002 | + unsigned int wdg_timeout = |
|---|
| 2003 | + mvm->trans->trans_cfg->base_params->wd_timeout; |
|---|
| 2004 | + struct iwl_trans_txq_scd_cfg cfg = { |
|---|
| 2005 | + .fifo = fifo, |
|---|
| 2006 | + .sta_id = sta_id, |
|---|
| 2007 | + .tid = IWL_MAX_TID_COUNT, |
|---|
| 2008 | + .aggregate = false, |
|---|
| 2009 | + .frame_limit = IWL_FRAME_LIMIT, |
|---|
| 2010 | + }; |
|---|
| 1738 | 2011 | |
|---|
| 1739 | | - if (iwl_mvm_has_new_tx_api(mvm)) { |
|---|
| 1740 | | - int tvqm_queue = |
|---|
| 1741 | | - iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id, |
|---|
| 1742 | | - IWL_MAX_TID_COUNT, |
|---|
| 1743 | | - wdg_timeout); |
|---|
| 1744 | | - *queue = tvqm_queue; |
|---|
| 1745 | | - } else { |
|---|
| 1746 | | - struct iwl_trans_txq_scd_cfg cfg = { |
|---|
| 1747 | | - .fifo = fifo, |
|---|
| 1748 | | - .sta_id = sta_id, |
|---|
| 1749 | | - .tid = IWL_MAX_TID_COUNT, |
|---|
| 1750 | | - .aggregate = false, |
|---|
| 1751 | | - .frame_limit = IWL_FRAME_LIMIT, |
|---|
| 1752 | | - }; |
|---|
| 2012 | + WARN_ON(iwl_mvm_has_new_tx_api(mvm)); |
|---|
| 1753 | 2013 | |
|---|
| 1754 | | - iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout); |
|---|
| 1755 | | - } |
|---|
| 2014 | + iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout); |
|---|
| 1756 | 2015 | } |
|---|
| 1757 | 2016 | |
|---|
| 1758 | | -int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) |
|---|
| 2017 | +static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id) |
|---|
| 2018 | +{ |
|---|
| 2019 | + unsigned int wdg_timeout = |
|---|
| 2020 | + mvm->trans->trans_cfg->base_params->wd_timeout; |
|---|
| 2021 | + |
|---|
| 2022 | + WARN_ON(!iwl_mvm_has_new_tx_api(mvm)); |
|---|
| 2023 | + |
|---|
| 2024 | + return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT, |
|---|
| 2025 | + wdg_timeout); |
|---|
| 2026 | +} |
|---|
| 2027 | + |
|---|
| 2028 | +static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx, |
|---|
| 2029 | + int maccolor, u8 *addr, |
|---|
| 2030 | + struct iwl_mvm_int_sta *sta, |
|---|
| 2031 | + u16 *queue, int fifo) |
|---|
| 2032 | +{ |
|---|
| 2033 | + int ret; |
|---|
| 2034 | + |
|---|
| 2035 | + /* Map queue to fifo - needs to happen before adding station */ |
|---|
| 2036 | + if (!iwl_mvm_has_new_tx_api(mvm)) |
|---|
| 2037 | + iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo); |
|---|
| 2038 | + |
|---|
| 2039 | + ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor); |
|---|
| 2040 | + if (ret) { |
|---|
| 2041 | + if (!iwl_mvm_has_new_tx_api(mvm)) |
|---|
| 2042 | + iwl_mvm_disable_txq(mvm, NULL, queue, |
|---|
| 2043 | + IWL_MAX_TID_COUNT, 0); |
|---|
| 2044 | + return ret; |
|---|
| 2045 | + } |
|---|
| 2046 | + |
|---|
| 2047 | + /* |
|---|
| 2048 | + * For 22000 firmware and on we cannot add queue to a station unknown |
|---|
| 2049 | + * to firmware so enable queue here - after the station was added |
|---|
| 2050 | + */ |
|---|
| 2051 | + if (iwl_mvm_has_new_tx_api(mvm)) { |
|---|
| 2052 | + int txq; |
|---|
| 2053 | + |
|---|
| 2054 | + txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id); |
|---|
| 2055 | + if (txq < 0) { |
|---|
| 2056 | + iwl_mvm_rm_sta_common(mvm, sta->sta_id); |
|---|
| 2057 | + return txq; |
|---|
| 2058 | + } |
|---|
| 2059 | + |
|---|
| 2060 | + *queue = txq; |
|---|
| 2061 | + } |
|---|
| 2062 | + |
|---|
| 2063 | + return 0; |
|---|
| 2064 | +} |
|---|
| 2065 | + |
|---|
| 2066 | +int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id) |
|---|
| 1759 | 2067 | { |
|---|
| 1760 | 2068 | int ret; |
|---|
| 1761 | 2069 | |
|---|
| .. | .. |
|---|
| 1768 | 2076 | if (ret) |
|---|
| 1769 | 2077 | return ret; |
|---|
| 1770 | 2078 | |
|---|
| 1771 | | - /* Map Aux queue to fifo - needs to happen before adding Aux station */ |
|---|
| 1772 | | - if (!iwl_mvm_has_new_tx_api(mvm)) |
|---|
| 1773 | | - iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue, |
|---|
| 1774 | | - mvm->aux_sta.sta_id, |
|---|
| 1775 | | - IWL_MVM_TX_FIFO_MCAST); |
|---|
| 1776 | | - |
|---|
| 1777 | | - ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, |
|---|
| 1778 | | - MAC_INDEX_AUX, 0); |
|---|
| 2079 | + /* |
|---|
| 2080 | + * In CDB NICs we need to specify which lmac to use for aux activity |
|---|
| 2081 | + * using the mac_id argument place to send lmac_id to the function |
|---|
| 2082 | + */ |
|---|
| 2083 | + ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL, |
|---|
| 2084 | + &mvm->aux_sta, &mvm->aux_queue, |
|---|
| 2085 | + IWL_MVM_TX_FIFO_MCAST); |
|---|
| 1779 | 2086 | if (ret) { |
|---|
| 1780 | 2087 | iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); |
|---|
| 1781 | 2088 | return ret; |
|---|
| 1782 | 2089 | } |
|---|
| 1783 | | - |
|---|
| 1784 | | - /* |
|---|
| 1785 | | - * For 22000 firmware and on we cannot add queue to a station unknown |
|---|
| 1786 | | - * to firmware so enable queue here - after the station was added |
|---|
| 1787 | | - */ |
|---|
| 1788 | | - if (iwl_mvm_has_new_tx_api(mvm)) |
|---|
| 1789 | | - iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue, |
|---|
| 1790 | | - mvm->aux_sta.sta_id, |
|---|
| 1791 | | - IWL_MVM_TX_FIFO_MCAST); |
|---|
| 1792 | 2090 | |
|---|
| 1793 | 2091 | return 0; |
|---|
| 1794 | 2092 | } |
|---|
| .. | .. |
|---|
| 1796 | 2094 | int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) |
|---|
| 1797 | 2095 | { |
|---|
| 1798 | 2096 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
|---|
| 1799 | | - int ret; |
|---|
| 1800 | 2097 | |
|---|
| 1801 | 2098 | lockdep_assert_held(&mvm->mutex); |
|---|
| 1802 | 2099 | |
|---|
| 1803 | | - /* Map snif queue to fifo - must happen before adding snif station */ |
|---|
| 1804 | | - if (!iwl_mvm_has_new_tx_api(mvm)) |
|---|
| 1805 | | - iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue, |
|---|
| 1806 | | - mvm->snif_sta.sta_id, |
|---|
| 2100 | + return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color, |
|---|
| 2101 | + NULL, &mvm->snif_sta, |
|---|
| 2102 | + &mvm->snif_queue, |
|---|
| 1807 | 2103 | IWL_MVM_TX_FIFO_BE); |
|---|
| 1808 | | - |
|---|
| 1809 | | - ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr, |
|---|
| 1810 | | - mvmvif->id, 0); |
|---|
| 1811 | | - if (ret) |
|---|
| 1812 | | - return ret; |
|---|
| 1813 | | - |
|---|
| 1814 | | - /* |
|---|
| 1815 | | - * For 22000 firmware and on we cannot add queue to a station unknown |
|---|
| 1816 | | - * to firmware so enable queue here - after the station was added |
|---|
| 1817 | | - */ |
|---|
| 1818 | | - if (iwl_mvm_has_new_tx_api(mvm)) |
|---|
| 1819 | | - iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue, |
|---|
| 1820 | | - mvm->snif_sta.sta_id, |
|---|
| 1821 | | - IWL_MVM_TX_FIFO_BE); |
|---|
| 1822 | | - |
|---|
| 1823 | | - return 0; |
|---|
| 1824 | 2104 | } |
|---|
| 1825 | 2105 | |
|---|
| 1826 | 2106 | int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) |
|---|
| .. | .. |
|---|
| 1829 | 2109 | |
|---|
| 1830 | 2110 | lockdep_assert_held(&mvm->mutex); |
|---|
| 1831 | 2111 | |
|---|
| 1832 | | - iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue, |
|---|
| 1833 | | - IWL_MAX_TID_COUNT, 0); |
|---|
| 2112 | + if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA)) |
|---|
| 2113 | + return -EINVAL; |
|---|
| 2114 | + |
|---|
| 2115 | + iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT, 0); |
|---|
| 1834 | 2116 | ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); |
|---|
| 1835 | 2117 | if (ret) |
|---|
| 1836 | 2118 | IWL_WARN(mvm, "Failed sending remove station\n"); |
|---|
| .. | .. |
|---|
| 1838 | 2120 | return ret; |
|---|
| 1839 | 2121 | } |
|---|
| 1840 | 2122 | |
|---|
| 2123 | +int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm) |
|---|
| 2124 | +{ |
|---|
| 2125 | + int ret; |
|---|
| 2126 | + |
|---|
| 2127 | + lockdep_assert_held(&mvm->mutex); |
|---|
| 2128 | + |
|---|
| 2129 | + if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA)) |
|---|
| 2130 | + return -EINVAL; |
|---|
| 2131 | + |
|---|
| 2132 | + iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT, 0); |
|---|
| 2133 | + ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id); |
|---|
| 2134 | + if (ret) |
|---|
| 2135 | + IWL_WARN(mvm, "Failed sending remove station\n"); |
|---|
| 2136 | + iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); |
|---|
| 2137 | + |
|---|
| 2138 | + return ret; |
|---|
| 2139 | +} |
|---|
| 2140 | + |
|---|
| 1841 | 2141 | void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm) |
|---|
| 1842 | 2142 | { |
|---|
| 1843 | 2143 | iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta); |
|---|
| 1844 | | -} |
|---|
| 1845 | | - |
|---|
| 1846 | | -void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm) |
|---|
| 1847 | | -{ |
|---|
| 1848 | | - lockdep_assert_held(&mvm->mutex); |
|---|
| 1849 | | - |
|---|
| 1850 | | - iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); |
|---|
| 1851 | 2144 | } |
|---|
| 1852 | 2145 | |
|---|
| 1853 | 2146 | /* |
|---|
| .. | .. |
|---|
| 1880 | 2173 | |
|---|
| 1881 | 2174 | if (!iwl_mvm_has_new_tx_api(mvm)) { |
|---|
| 1882 | 2175 | if (vif->type == NL80211_IFTYPE_AP || |
|---|
| 1883 | | - vif->type == NL80211_IFTYPE_ADHOC) |
|---|
| 2176 | + vif->type == NL80211_IFTYPE_ADHOC) { |
|---|
| 1884 | 2177 | queue = mvm->probe_queue; |
|---|
| 1885 | | - else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) |
|---|
| 2178 | + } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { |
|---|
| 1886 | 2179 | queue = mvm->p2p_dev_queue; |
|---|
| 1887 | | - else if (WARN(1, "Missing required TXQ for adding bcast STA\n")) |
|---|
| 2180 | + } else { |
|---|
| 2181 | + WARN(1, "Missing required TXQ for adding bcast STA\n"); |
|---|
| 1888 | 2182 | return -EINVAL; |
|---|
| 2183 | + } |
|---|
| 1889 | 2184 | |
|---|
| 1890 | 2185 | bsta->tfd_queue_msk |= BIT(queue); |
|---|
| 1891 | 2186 | |
|---|
| 1892 | | - iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, |
|---|
| 1893 | | - &cfg, wdg_timeout); |
|---|
| 2187 | + iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout); |
|---|
| 1894 | 2188 | } |
|---|
| 1895 | 2189 | |
|---|
| 1896 | 2190 | if (vif->type == NL80211_IFTYPE_ADHOC) |
|---|
| .. | .. |
|---|
| 1909 | 2203 | * to firmware so enable queue here - after the station was added |
|---|
| 1910 | 2204 | */ |
|---|
| 1911 | 2205 | if (iwl_mvm_has_new_tx_api(mvm)) { |
|---|
| 1912 | | - queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0], |
|---|
| 1913 | | - bsta->sta_id, |
|---|
| 2206 | + queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id, |
|---|
| 1914 | 2207 | IWL_MAX_TID_COUNT, |
|---|
| 1915 | 2208 | wdg_timeout); |
|---|
| 2209 | + if (queue < 0) { |
|---|
| 2210 | + iwl_mvm_rm_sta_common(mvm, bsta->sta_id); |
|---|
| 2211 | + return queue; |
|---|
| 2212 | + } |
|---|
| 1916 | 2213 | |
|---|
| 1917 | 2214 | if (vif->type == NL80211_IFTYPE_AP || |
|---|
| 1918 | 2215 | vif->type == NL80211_IFTYPE_ADHOC) |
|---|
| .. | .. |
|---|
| 1928 | 2225 | struct ieee80211_vif *vif) |
|---|
| 1929 | 2226 | { |
|---|
| 1930 | 2227 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
|---|
| 1931 | | - int queue; |
|---|
| 2228 | + u16 *queueptr, queue; |
|---|
| 1932 | 2229 | |
|---|
| 1933 | 2230 | lockdep_assert_held(&mvm->mutex); |
|---|
| 1934 | 2231 | |
|---|
| 1935 | | - iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0); |
|---|
| 2232 | + iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true); |
|---|
| 1936 | 2233 | |
|---|
| 1937 | 2234 | switch (vif->type) { |
|---|
| 1938 | 2235 | case NL80211_IFTYPE_AP: |
|---|
| 1939 | 2236 | case NL80211_IFTYPE_ADHOC: |
|---|
| 1940 | | - queue = mvm->probe_queue; |
|---|
| 2237 | + queueptr = &mvm->probe_queue; |
|---|
| 1941 | 2238 | break; |
|---|
| 1942 | 2239 | case NL80211_IFTYPE_P2P_DEVICE: |
|---|
| 1943 | | - queue = mvm->p2p_dev_queue; |
|---|
| 2240 | + queueptr = &mvm->p2p_dev_queue; |
|---|
| 1944 | 2241 | break; |
|---|
| 1945 | 2242 | default: |
|---|
| 1946 | 2243 | WARN(1, "Can't free bcast queue on vif type %d\n", |
|---|
| .. | .. |
|---|
| 1948 | 2245 | return; |
|---|
| 1949 | 2246 | } |
|---|
| 1950 | 2247 | |
|---|
| 1951 | | - iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0); |
|---|
| 2248 | + queue = *queueptr; |
|---|
| 2249 | + iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT, 0); |
|---|
| 1952 | 2250 | if (iwl_mvm_has_new_tx_api(mvm)) |
|---|
| 1953 | 2251 | return; |
|---|
| 1954 | 2252 | |
|---|
| .. | .. |
|---|
| 2050 | 2348 | static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00}; |
|---|
| 2051 | 2349 | const u8 *maddr = _maddr; |
|---|
| 2052 | 2350 | struct iwl_trans_txq_scd_cfg cfg = { |
|---|
| 2053 | | - .fifo = IWL_MVM_TX_FIFO_MCAST, |
|---|
| 2351 | + .fifo = vif->type == NL80211_IFTYPE_AP ? |
|---|
| 2352 | + IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE, |
|---|
| 2054 | 2353 | .sta_id = msta->sta_id, |
|---|
| 2055 | 2354 | .tid = 0, |
|---|
| 2056 | 2355 | .aggregate = false, |
|---|
| .. | .. |
|---|
| 2071 | 2370 | * Note that this is done here as we want to avoid making DQA |
|---|
| 2072 | 2371 | * changes in mac80211 layer. |
|---|
| 2073 | 2372 | */ |
|---|
| 2074 | | - if (vif->type == NL80211_IFTYPE_ADHOC) { |
|---|
| 2075 | | - vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; |
|---|
| 2076 | | - mvmvif->cab_queue = vif->cab_queue; |
|---|
| 2077 | | - } |
|---|
| 2373 | + if (vif->type == NL80211_IFTYPE_ADHOC) |
|---|
| 2374 | + mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; |
|---|
| 2078 | 2375 | |
|---|
| 2079 | 2376 | /* |
|---|
| 2080 | 2377 | * While in previous FWs we had to exclude cab queue from TFD queue |
|---|
| .. | .. |
|---|
| 2082 | 2379 | */ |
|---|
| 2083 | 2380 | if (!iwl_mvm_has_new_tx_api(mvm) && |
|---|
| 2084 | 2381 | fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { |
|---|
| 2085 | | - iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, |
|---|
| 2086 | | - &cfg, timeout); |
|---|
| 2087 | | - msta->tfd_queue_msk |= BIT(vif->cab_queue); |
|---|
| 2382 | + iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, |
|---|
| 2383 | + timeout); |
|---|
| 2384 | + msta->tfd_queue_msk |= BIT(mvmvif->cab_queue); |
|---|
| 2088 | 2385 | } |
|---|
| 2089 | 2386 | ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr, |
|---|
| 2090 | 2387 | mvmvif->id, mvmvif->color); |
|---|
| 2091 | | - if (ret) { |
|---|
| 2092 | | - iwl_mvm_dealloc_int_sta(mvm, msta); |
|---|
| 2093 | | - return ret; |
|---|
| 2094 | | - } |
|---|
| 2388 | + if (ret) |
|---|
| 2389 | + goto err; |
|---|
| 2095 | 2390 | |
|---|
| 2096 | 2391 | /* |
|---|
| 2097 | 2392 | * Enable cab queue after the ADD_STA command is sent. |
|---|
| .. | .. |
|---|
| 2101 | 2396 | * tfd_queue_mask. |
|---|
| 2102 | 2397 | */ |
|---|
| 2103 | 2398 | if (iwl_mvm_has_new_tx_api(mvm)) { |
|---|
| 2104 | | - int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue, |
|---|
| 2105 | | - msta->sta_id, |
|---|
| 2399 | + int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id, |
|---|
| 2106 | 2400 | 0, |
|---|
| 2107 | 2401 | timeout); |
|---|
| 2402 | + if (queue < 0) { |
|---|
| 2403 | + ret = queue; |
|---|
| 2404 | + goto err; |
|---|
| 2405 | + } |
|---|
| 2108 | 2406 | mvmvif->cab_queue = queue; |
|---|
| 2109 | 2407 | } else if (!fw_has_api(&mvm->fw->ucode_capa, |
|---|
| 2110 | 2408 | IWL_UCODE_TLV_API_STA_TYPE)) |
|---|
| 2111 | | - iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, |
|---|
| 2112 | | - &cfg, timeout); |
|---|
| 2409 | + iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, |
|---|
| 2410 | + timeout); |
|---|
| 2113 | 2411 | |
|---|
| 2114 | 2412 | return 0; |
|---|
| 2413 | +err: |
|---|
| 2414 | + iwl_mvm_dealloc_int_sta(mvm, msta); |
|---|
| 2415 | + return ret; |
|---|
| 2416 | +} |
|---|
| 2417 | + |
|---|
| 2418 | +static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, |
|---|
| 2419 | + struct ieee80211_key_conf *keyconf, |
|---|
| 2420 | + bool mcast) |
|---|
| 2421 | +{ |
|---|
| 2422 | + union { |
|---|
| 2423 | + struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; |
|---|
| 2424 | + struct iwl_mvm_add_sta_key_cmd cmd; |
|---|
| 2425 | + } u = {}; |
|---|
| 2426 | + bool new_api = fw_has_api(&mvm->fw->ucode_capa, |
|---|
| 2427 | + IWL_UCODE_TLV_API_TKIP_MIC_KEYS); |
|---|
| 2428 | + __le16 key_flags; |
|---|
| 2429 | + int ret, size; |
|---|
| 2430 | + u32 status; |
|---|
| 2431 | + |
|---|
| 2432 | + /* This is a valid situation for GTK removal */ |
|---|
| 2433 | + if (sta_id == IWL_MVM_INVALID_STA) |
|---|
| 2434 | + return 0; |
|---|
| 2435 | + |
|---|
| 2436 | + key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & |
|---|
| 2437 | + STA_KEY_FLG_KEYID_MSK); |
|---|
| 2438 | + key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP); |
|---|
| 2439 | + key_flags |= cpu_to_le16(STA_KEY_NOT_VALID); |
|---|
| 2440 | + |
|---|
| 2441 | + if (mcast) |
|---|
| 2442 | + key_flags |= cpu_to_le16(STA_KEY_MULTICAST); |
|---|
| 2443 | + |
|---|
| 2444 | + /* |
|---|
| 2445 | + * The fields assigned here are in the same location at the start |
|---|
| 2446 | + * of the command, so we can do this union trick. |
|---|
| 2447 | + */ |
|---|
| 2448 | + u.cmd.common.key_flags = key_flags; |
|---|
| 2449 | + u.cmd.common.key_offset = keyconf->hw_key_idx; |
|---|
| 2450 | + u.cmd.common.sta_id = sta_id; |
|---|
| 2451 | + |
|---|
| 2452 | + size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1); |
|---|
| 2453 | + |
|---|
| 2454 | + status = ADD_STA_SUCCESS; |
|---|
| 2455 | + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd, |
|---|
| 2456 | + &status); |
|---|
| 2457 | + |
|---|
| 2458 | + switch (status) { |
|---|
| 2459 | + case ADD_STA_SUCCESS: |
|---|
| 2460 | + IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n"); |
|---|
| 2461 | + break; |
|---|
| 2462 | + default: |
|---|
| 2463 | + ret = -EIO; |
|---|
| 2464 | + IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n"); |
|---|
| 2465 | + break; |
|---|
| 2466 | + } |
|---|
| 2467 | + |
|---|
| 2468 | + return ret; |
|---|
| 2115 | 2469 | } |
|---|
| 2116 | 2470 | |
|---|
| 2117 | 2471 | /* |
|---|
| .. | .. |
|---|
| 2125 | 2479 | |
|---|
| 2126 | 2480 | lockdep_assert_held(&mvm->mutex); |
|---|
| 2127 | 2481 | |
|---|
| 2128 | | - iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0); |
|---|
| 2482 | + iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true); |
|---|
| 2129 | 2483 | |
|---|
| 2130 | | - iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue, |
|---|
| 2131 | | - 0, 0); |
|---|
| 2484 | + iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0, 0); |
|---|
| 2132 | 2485 | |
|---|
| 2133 | 2486 | ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); |
|---|
| 2134 | 2487 | if (ret) |
|---|
| .. | .. |
|---|
| 2141 | 2494 | |
|---|
| 2142 | 2495 | static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) |
|---|
| 2143 | 2496 | { |
|---|
| 2144 | | - struct iwl_mvm_delba_notif notif = { |
|---|
| 2497 | + struct iwl_mvm_rss_sync_notif notif = { |
|---|
| 2145 | 2498 | .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA, |
|---|
| 2146 | 2499 | .metadata.sync = 1, |
|---|
| 2147 | 2500 | .delba.baid = baid, |
|---|
| .. | .. |
|---|
| 2237 | 2590 | } |
|---|
| 2238 | 2591 | |
|---|
| 2239 | 2592 | if (iwl_mvm_has_new_rx_api(mvm) && start) { |
|---|
| 2240 | | - u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]); |
|---|
| 2593 | + u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]); |
|---|
| 2241 | 2594 | |
|---|
| 2242 | 2595 | /* sparse doesn't like the __align() so don't check */ |
|---|
| 2243 | 2596 | #ifndef __CHECKER__ |
|---|
| .. | .. |
|---|
| 2489 | 2842 | |
|---|
| 2490 | 2843 | spin_lock_bh(&mvmsta->lock); |
|---|
| 2491 | 2844 | |
|---|
| 2492 | | - /* possible race condition - we entered D0i3 while starting agg */ |
|---|
| 2493 | | - if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) { |
|---|
| 2494 | | - spin_unlock_bh(&mvmsta->lock); |
|---|
| 2495 | | - IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n"); |
|---|
| 2496 | | - return -EIO; |
|---|
| 2497 | | - } |
|---|
| 2498 | | - |
|---|
| 2499 | | - spin_lock(&mvm->queue_info_lock); |
|---|
| 2500 | | - |
|---|
| 2501 | 2845 | /* |
|---|
| 2502 | 2846 | * Note the possible cases: |
|---|
| 2503 | 2847 | * 1. An enabled TXQ - TXQ needs to become agg'ed |
|---|
| .. | .. |
|---|
| 2511 | 2855 | IWL_MVM_DQA_MAX_DATA_QUEUE); |
|---|
| 2512 | 2856 | if (ret < 0) { |
|---|
| 2513 | 2857 | IWL_ERR(mvm, "Failed to allocate agg queue\n"); |
|---|
| 2514 | | - goto release_locks; |
|---|
| 2858 | + goto out; |
|---|
| 2515 | 2859 | } |
|---|
| 2516 | 2860 | |
|---|
| 2517 | 2861 | txq_id = ret; |
|---|
| .. | .. |
|---|
| 2530 | 2874 | IWL_DEBUG_TX_QUEUES(mvm, |
|---|
| 2531 | 2875 | "Can't start tid %d agg on shared queue!\n", |
|---|
| 2532 | 2876 | tid); |
|---|
| 2533 | | - goto release_locks; |
|---|
| 2877 | + goto out; |
|---|
| 2534 | 2878 | } |
|---|
| 2535 | | - |
|---|
| 2536 | | - spin_unlock(&mvm->queue_info_lock); |
|---|
| 2537 | 2879 | |
|---|
| 2538 | 2880 | IWL_DEBUG_TX_QUEUES(mvm, |
|---|
| 2539 | 2881 | "AGG for tid %d will be on queue #%d\n", |
|---|
| .. | .. |
|---|
| 2554 | 2896 | * to align the wrap around of ssn so we compare relevant values. |
|---|
| 2555 | 2897 | */ |
|---|
| 2556 | 2898 | normalized_ssn = tid_data->ssn; |
|---|
| 2557 | | - if (mvm->trans->cfg->gen2) |
|---|
| 2899 | + if (mvm->trans->trans_cfg->gen2) |
|---|
| 2558 | 2900 | normalized_ssn &= 0xff; |
|---|
| 2559 | 2901 | |
|---|
| 2560 | 2902 | if (normalized_ssn == tid_data->next_reclaimed) { |
|---|
| 2561 | 2903 | tid_data->state = IWL_AGG_STARTING; |
|---|
| 2562 | | - ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); |
|---|
| 2904 | + ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; |
|---|
| 2563 | 2905 | } else { |
|---|
| 2564 | 2906 | tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA; |
|---|
| 2907 | + ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA; |
|---|
| 2565 | 2908 | } |
|---|
| 2566 | 2909 | |
|---|
| 2567 | | - ret = 0; |
|---|
| 2568 | | - goto out; |
|---|
| 2569 | | - |
|---|
| 2570 | | -release_locks: |
|---|
| 2571 | | - spin_unlock(&mvm->queue_info_lock); |
|---|
| 2572 | 2910 | out: |
|---|
| 2573 | 2911 | spin_unlock_bh(&mvmsta->lock); |
|---|
| 2574 | 2912 | |
|---|
| .. | .. |
|---|
| 2637 | 2975 | |
|---|
| 2638 | 2976 | cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; |
|---|
| 2639 | 2977 | |
|---|
| 2640 | | - spin_lock_bh(&mvm->queue_info_lock); |
|---|
| 2641 | 2978 | queue_status = mvm->queue_info[queue].status; |
|---|
| 2642 | | - spin_unlock_bh(&mvm->queue_info_lock); |
|---|
| 2643 | 2979 | |
|---|
| 2644 | 2980 | /* Maybe there is no need to even alloc a queue... */ |
|---|
| 2645 | 2981 | if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) |
|---|
| .. | .. |
|---|
| 2673 | 3009 | } |
|---|
| 2674 | 3010 | |
|---|
| 2675 | 3011 | if (alloc_queue) |
|---|
| 2676 | | - iwl_mvm_enable_txq(mvm, queue, |
|---|
| 2677 | | - vif->hw_queue[tid_to_mac80211_ac[tid]], ssn, |
|---|
| 3012 | + iwl_mvm_enable_txq(mvm, sta, queue, ssn, |
|---|
| 2678 | 3013 | &cfg, wdg_timeout); |
|---|
| 2679 | 3014 | |
|---|
| 2680 | 3015 | /* Send ADD_STA command to enable aggs only if the queue isn't shared */ |
|---|
| .. | .. |
|---|
| 2685 | 3020 | } |
|---|
| 2686 | 3021 | |
|---|
| 2687 | 3022 | /* No need to mark as reserved */ |
|---|
| 2688 | | - spin_lock_bh(&mvm->queue_info_lock); |
|---|
| 2689 | 3023 | mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; |
|---|
| 2690 | | - spin_unlock_bh(&mvm->queue_info_lock); |
|---|
| 2691 | 3024 | |
|---|
| 2692 | 3025 | out: |
|---|
| 2693 | 3026 | /* |
|---|
| .. | .. |
|---|
| 2704 | 3037 | IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", |
|---|
| 2705 | 3038 | sta->addr, tid); |
|---|
| 2706 | 3039 | |
|---|
| 2707 | | - return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false); |
|---|
| 3040 | + return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq); |
|---|
| 2708 | 3041 | } |
|---|
| 2709 | 3042 | |
|---|
| 2710 | 3043 | static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, |
|---|
| .. | .. |
|---|
| 2713 | 3046 | { |
|---|
| 2714 | 3047 | u16 txq_id = tid_data->txq_id; |
|---|
| 2715 | 3048 | |
|---|
| 3049 | + lockdep_assert_held(&mvm->mutex); |
|---|
| 3050 | + |
|---|
| 2716 | 3051 | if (iwl_mvm_has_new_tx_api(mvm)) |
|---|
| 2717 | 3052 | return; |
|---|
| 2718 | 3053 | |
|---|
| 2719 | | - spin_lock_bh(&mvm->queue_info_lock); |
|---|
| 2720 | 3054 | /* |
|---|
| 2721 | 3055 | * The TXQ is marked as reserved only if no traffic came through yet |
|---|
| 2722 | 3056 | * This means no traffic has been sent on this TID (agg'd or not), so |
|---|
| .. | .. |
|---|
| 2728 | 3062 | mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; |
|---|
| 2729 | 3063 | tid_data->txq_id = IWL_MVM_INVALID_QUEUE; |
|---|
| 2730 | 3064 | } |
|---|
| 2731 | | - |
|---|
| 2732 | | - spin_unlock_bh(&mvm->queue_info_lock); |
|---|
| 2733 | 3065 | } |
|---|
| 2734 | 3066 | |
|---|
| 2735 | 3067 | int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
|---|
| .. | .. |
|---|
| 3178 | 3510 | return ret; |
|---|
| 3179 | 3511 | } |
|---|
| 3180 | 3512 | |
|---|
| 3181 | | -static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, |
|---|
| 3182 | | - struct ieee80211_key_conf *keyconf, |
|---|
| 3183 | | - bool mcast) |
|---|
| 3184 | | -{ |
|---|
| 3185 | | - union { |
|---|
| 3186 | | - struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; |
|---|
| 3187 | | - struct iwl_mvm_add_sta_key_cmd cmd; |
|---|
| 3188 | | - } u = {}; |
|---|
| 3189 | | - bool new_api = fw_has_api(&mvm->fw->ucode_capa, |
|---|
| 3190 | | - IWL_UCODE_TLV_API_TKIP_MIC_KEYS); |
|---|
| 3191 | | - __le16 key_flags; |
|---|
| 3192 | | - int ret, size; |
|---|
| 3193 | | - u32 status; |
|---|
| 3194 | | - |
|---|
| 3195 | | - /* This is a valid situation for GTK removal */ |
|---|
| 3196 | | - if (sta_id == IWL_MVM_INVALID_STA) |
|---|
| 3197 | | - return 0; |
|---|
| 3198 | | - |
|---|
| 3199 | | - key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & |
|---|
| 3200 | | - STA_KEY_FLG_KEYID_MSK); |
|---|
| 3201 | | - key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP); |
|---|
| 3202 | | - key_flags |= cpu_to_le16(STA_KEY_NOT_VALID); |
|---|
| 3203 | | - |
|---|
| 3204 | | - if (mcast) |
|---|
| 3205 | | - key_flags |= cpu_to_le16(STA_KEY_MULTICAST); |
|---|
| 3206 | | - |
|---|
| 3207 | | - /* |
|---|
| 3208 | | - * The fields assigned here are in the same location at the start |
|---|
| 3209 | | - * of the command, so we can do this union trick. |
|---|
| 3210 | | - */ |
|---|
| 3211 | | - u.cmd.common.key_flags = key_flags; |
|---|
| 3212 | | - u.cmd.common.key_offset = keyconf->hw_key_idx; |
|---|
| 3213 | | - u.cmd.common.sta_id = sta_id; |
|---|
| 3214 | | - |
|---|
| 3215 | | - size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1); |
|---|
| 3216 | | - |
|---|
| 3217 | | - status = ADD_STA_SUCCESS; |
|---|
| 3218 | | - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd, |
|---|
| 3219 | | - &status); |
|---|
| 3220 | | - |
|---|
| 3221 | | - switch (status) { |
|---|
| 3222 | | - case ADD_STA_SUCCESS: |
|---|
| 3223 | | - IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n"); |
|---|
| 3224 | | - break; |
|---|
| 3225 | | - default: |
|---|
| 3226 | | - ret = -EIO; |
|---|
| 3227 | | - IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n"); |
|---|
| 3228 | | - break; |
|---|
| 3229 | | - } |
|---|
| 3230 | | - |
|---|
| 3231 | | - return ret; |
|---|
| 3232 | | -} |
|---|
| 3233 | | - |
|---|
| 3234 | 3513 | int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, |
|---|
| 3235 | 3514 | struct ieee80211_vif *vif, |
|---|
| 3236 | 3515 | struct ieee80211_sta *sta, |
|---|
| .. | .. |
|---|
| 3523 | 3802 | struct ieee80211_sta *sta; |
|---|
| 3524 | 3803 | u32 sta_id = le32_to_cpu(notif->sta_id); |
|---|
| 3525 | 3804 | |
|---|
| 3526 | | - if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT)) |
|---|
| 3805 | + if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) |
|---|
| 3527 | 3806 | return; |
|---|
| 3528 | 3807 | |
|---|
| 3529 | 3808 | rcu_read_lock(); |
|---|
| .. | .. |
|---|
| 3606 | 3885 | lockdep_assert_held(&mvm->mutex); |
|---|
| 3607 | 3886 | |
|---|
| 3608 | 3887 | /* Block/unblock all the stations of the given mvmvif */ |
|---|
| 3609 | | - for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { |
|---|
| 3888 | + for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { |
|---|
| 3610 | 3889 | sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], |
|---|
| 3611 | 3890 | lockdep_is_held(&mvm->mutex)); |
|---|
| 3612 | 3891 | if (IS_ERR_OR_NULL(sta)) |
|---|
| .. | .. |
|---|
| 3660 | 3939 | * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need |
|---|
| 3661 | 3940 | * to align the wrap around of ssn so we compare relevant values. |
|---|
| 3662 | 3941 | */ |
|---|
| 3663 | | - if (mvm->trans->cfg->gen2) |
|---|
| 3942 | + if (mvm->trans->trans_cfg->gen2) |
|---|
| 3664 | 3943 | sn &= 0xff; |
|---|
| 3665 | 3944 | |
|---|
| 3666 | 3945 | return ieee80211_sn_sub(sn, tid_data->next_reclaimed); |
|---|
| 3667 | 3946 | } |
|---|
| 3947 | + |
|---|
| 3948 | +int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
|---|
| 3949 | + struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher, |
|---|
| 3950 | + u8 *key, u32 key_len) |
|---|
| 3951 | +{ |
|---|
| 3952 | + int ret; |
|---|
| 3953 | + u16 queue; |
|---|
| 3954 | + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
|---|
| 3955 | + struct ieee80211_key_conf *keyconf; |
|---|
| 3956 | + |
|---|
| 3957 | + ret = iwl_mvm_allocate_int_sta(mvm, sta, 0, |
|---|
| 3958 | + NL80211_IFTYPE_UNSPECIFIED, |
|---|
| 3959 | + IWL_STA_LINK); |
|---|
| 3960 | + if (ret) |
|---|
| 3961 | + return ret; |
|---|
| 3962 | + |
|---|
| 3963 | + ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color, |
|---|
| 3964 | + addr, sta, &queue, |
|---|
| 3965 | + IWL_MVM_TX_FIFO_BE); |
|---|
| 3966 | + if (ret) |
|---|
| 3967 | + goto out; |
|---|
| 3968 | + |
|---|
| 3969 | + keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL); |
|---|
| 3970 | + if (!keyconf) { |
|---|
| 3971 | + ret = -ENOBUFS; |
|---|
| 3972 | + goto out; |
|---|
| 3973 | + } |
|---|
| 3974 | + |
|---|
| 3975 | + keyconf->cipher = cipher; |
|---|
| 3976 | + memcpy(keyconf->key, key, key_len); |
|---|
| 3977 | + keyconf->keylen = key_len; |
|---|
| 3978 | + |
|---|
| 3979 | + ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false, |
|---|
| 3980 | + 0, NULL, 0, 0, true); |
|---|
| 3981 | + kfree(keyconf); |
|---|
| 3982 | + return 0; |
|---|
| 3983 | +out: |
|---|
| 3984 | + iwl_mvm_dealloc_int_sta(mvm, sta); |
|---|
| 3985 | + return ret; |
|---|
| 3986 | +} |
|---|