| .. | .. |
|---|
| 1 | 1 | /****************************************************************************** |
|---|
| 2 | 2 | * |
|---|
| 3 | + * This file is provided under a dual BSD/GPLv2 license. When using or |
|---|
| 4 | + * redistributing this file, you may do so under either license. |
|---|
| 5 | + * |
|---|
| 6 | + * GPL LICENSE SUMMARY |
|---|
| 7 | + * |
|---|
| 3 | 8 | * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. |
|---|
| 4 | 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
|---|
| 5 | 10 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
|---|
| 6 | | - * Copyright(c) 2018 Intel Corporation |
|---|
| 7 | | - * |
|---|
| 8 | | - * Portions of this file are derived from the ipw3945 project, as well |
|---|
| 9 | | - * as portions of the ieee80211 subsystem header files. |
|---|
| 11 | + * Copyright(c) 2018 - 2019 Intel Corporation |
|---|
| 10 | 12 | * |
|---|
| 11 | 13 | * This program is free software; you can redistribute it and/or modify it |
|---|
| 12 | 14 | * under the terms of version 2 of the GNU General Public License as |
|---|
| .. | .. |
|---|
| 17 | 19 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
|---|
| 18 | 20 | * more details. |
|---|
| 19 | 21 | * |
|---|
| 20 | | - * You should have received a copy of the GNU General Public License along with |
|---|
| 21 | | - * this program. |
|---|
| 22 | | - * |
|---|
| 23 | 22 | * The full GNU General Public License is included in this distribution in the |
|---|
| 24 | | - * file called LICENSE. |
|---|
| 23 | + * file called COPYING. |
|---|
| 25 | 24 | * |
|---|
| 26 | 25 | * Contact Information: |
|---|
| 27 | 26 | * Intel Linux Wireless <linuxwifi@intel.com> |
|---|
| 28 | 27 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
|---|
| 28 | + * |
|---|
| 29 | + * BSD LICENSE |
|---|
| 30 | + * |
|---|
| 31 | + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. |
|---|
| 32 | + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
|---|
| 33 | + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
|---|
| 34 | + * Copyright(c) 2018 - 2019 Intel Corporation |
|---|
| 35 | + * All rights reserved. |
|---|
| 36 | + * |
|---|
| 37 | + * Redistribution and use in source and binary forms, with or without |
|---|
| 38 | + * modification, are permitted provided that the following conditions |
|---|
| 39 | + * are met: |
|---|
| 40 | + * |
|---|
| 41 | + * * Redistributions of source code must retain the above copyright |
|---|
| 42 | + * notice, this list of conditions and the following disclaimer. |
|---|
| 43 | + * * Redistributions in binary form must reproduce the above copyright |
|---|
| 44 | + * notice, this list of conditions and the following disclaimer in |
|---|
| 45 | + * the documentation and/or other materials provided with the |
|---|
| 46 | + * distribution. |
|---|
| 47 | + * * Neither the name Intel Corporation nor the names of its |
|---|
| 48 | + * contributors may be used to endorse or promote products derived |
|---|
| 49 | + * from this software without specific prior written permission. |
|---|
| 50 | + * |
|---|
| 51 | + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
|---|
| 52 | + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
|---|
| 53 | + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
|---|
| 54 | + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
|---|
| 55 | + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
|---|
| 56 | + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
|---|
| 57 | + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
|---|
| 58 | + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
|---|
| 59 | + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
|---|
| 60 | + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|---|
| 61 | + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|---|
| 29 | 62 | * |
|---|
| 30 | 63 | *****************************************************************************/ |
|---|
| 31 | 64 | #include <linux/sched.h> |
|---|
| .. | .. |
|---|
| 167 | 200 | */ |
|---|
| 168 | 201 | int iwl_pcie_rx_stop(struct iwl_trans *trans) |
|---|
| 169 | 202 | { |
|---|
| 170 | | - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { |
|---|
| 171 | | - /* TODO: remove this for 22560 once fw does it */ |
|---|
| 172 | | - iwl_write_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0); |
|---|
| 173 | | - return iwl_poll_prph_bit(trans, RFH_GEN_STATUS_GEN3, |
|---|
| 174 | | - RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); |
|---|
| 175 | | - } else if (trans->cfg->mq_rx_supported) { |
|---|
| 203 | + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { |
|---|
| 204 | + /* TODO: remove this once fw does it */ |
|---|
| 205 | + iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0); |
|---|
| 206 | + return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3, |
|---|
| 207 | + RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); |
|---|
| 208 | + } else if (trans->trans_cfg->mq_rx_supported) { |
|---|
| 176 | 209 | iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); |
|---|
| 177 | 210 | return iwl_poll_prph_bit(trans, RFH_GEN_STATUS, |
|---|
| 178 | 211 | RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); |
|---|
| .. | .. |
|---|
| 199 | 232 | * 1. shadow registers aren't enabled |
|---|
| 200 | 233 | * 2. there is a chance that the NIC is asleep |
|---|
| 201 | 234 | */ |
|---|
| 202 | | - if (!trans->cfg->base_params->shadow_reg_enable && |
|---|
| 235 | + if (!trans->trans_cfg->base_params->shadow_reg_enable && |
|---|
| 203 | 236 | test_bit(STATUS_TPOWER_PMI, &trans->status)) { |
|---|
| 204 | 237 | reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); |
|---|
| 205 | 238 | |
|---|
| .. | .. |
|---|
| 207 | 240 | IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", |
|---|
| 208 | 241 | reg); |
|---|
| 209 | 242 | iwl_set_bit(trans, CSR_GP_CNTRL, |
|---|
| 210 | | - BIT(trans->cfg->csr->flag_mac_access_req)); |
|---|
| 243 | + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
|---|
| 211 | 244 | rxq->need_update = true; |
|---|
| 212 | 245 | return; |
|---|
| 213 | 246 | } |
|---|
| 214 | 247 | } |
|---|
| 215 | 248 | |
|---|
| 216 | 249 | rxq->write_actual = round_down(rxq->write, 8); |
|---|
| 217 | | - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) |
|---|
| 218 | | - iwl_write32(trans, HBUS_TARG_WRPTR, |
|---|
| 219 | | - (rxq->write_actual | |
|---|
| 220 | | - ((FIRST_RX_QUEUE + rxq->id) << 16))); |
|---|
| 221 | | - else if (trans->cfg->mq_rx_supported) |
|---|
| 250 | + if (trans->trans_cfg->mq_rx_supported) |
|---|
| 222 | 251 | iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), |
|---|
| 223 | 252 | rxq->write_actual); |
|---|
| 224 | 253 | else |
|---|
| .. | .. |
|---|
| 246 | 275 | struct iwl_rxq *rxq, |
|---|
| 247 | 276 | struct iwl_rx_mem_buffer *rxb) |
|---|
| 248 | 277 | { |
|---|
| 249 | | - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { |
|---|
| 278 | + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { |
|---|
| 250 | 279 | struct iwl_rx_transfer_desc *bd = rxq->bd; |
|---|
| 251 | 280 | |
|---|
| 252 | | - bd[rxq->write].type_n_size = |
|---|
| 253 | | - cpu_to_le32((IWL_RX_TD_TYPE & IWL_RX_TD_TYPE_MSK) | |
|---|
| 254 | | - ((IWL_RX_TD_SIZE_2K >> 8) & IWL_RX_TD_SIZE_MSK)); |
|---|
| 281 | + BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64)); |
|---|
| 282 | + |
|---|
| 255 | 283 | bd[rxq->write].addr = cpu_to_le64(rxb->page_dma); |
|---|
| 256 | 284 | bd[rxq->write].rbid = cpu_to_le16(rxb->vid); |
|---|
| 257 | 285 | } else { |
|---|
| .. | .. |
|---|
| 259 | 287 | |
|---|
| 260 | 288 | bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); |
|---|
| 261 | 289 | } |
|---|
| 290 | + |
|---|
| 291 | + IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n", |
|---|
| 292 | + (u32)rxb->vid, rxq->id, rxq->write); |
|---|
| 262 | 293 | } |
|---|
| 263 | 294 | |
|---|
| 264 | 295 | /* |
|---|
| .. | .. |
|---|
| 267 | 298 | static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, |
|---|
| 268 | 299 | struct iwl_rxq *rxq) |
|---|
| 269 | 300 | { |
|---|
| 301 | + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 270 | 302 | struct iwl_rx_mem_buffer *rxb; |
|---|
| 271 | 303 | |
|---|
| 272 | 304 | /* |
|---|
| .. | .. |
|---|
| 287 | 319 | list); |
|---|
| 288 | 320 | list_del(&rxb->list); |
|---|
| 289 | 321 | rxb->invalid = false; |
|---|
| 290 | | - /* 12 first bits are expected to be empty */ |
|---|
| 291 | | - WARN_ON(rxb->page_dma & DMA_BIT_MASK(12)); |
|---|
| 322 | + /* some low bits are expected to be unset (depending on hw) */ |
|---|
| 323 | + WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask); |
|---|
| 292 | 324 | /* Point to Rx buffer via next RBD in circular buffer */ |
|---|
| 293 | 325 | iwl_pcie_restock_bd(trans, rxq, rxb); |
|---|
| 294 | | - rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK; |
|---|
| 326 | + rxq->write = (rxq->write + 1) & (rxq->queue_size - 1); |
|---|
| 295 | 327 | rxq->free_count--; |
|---|
| 296 | 328 | } |
|---|
| 297 | 329 | spin_unlock(&rxq->lock); |
|---|
| .. | .. |
|---|
| 370 | 402 | static |
|---|
| 371 | 403 | void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) |
|---|
| 372 | 404 | { |
|---|
| 373 | | - if (trans->cfg->mq_rx_supported) |
|---|
| 405 | + if (trans->trans_cfg->mq_rx_supported) |
|---|
| 374 | 406 | iwl_pcie_rxmq_restock(trans, rxq); |
|---|
| 375 | 407 | else |
|---|
| 376 | 408 | iwl_pcie_rxsq_restock(trans, rxq); |
|---|
| .. | .. |
|---|
| 381 | 413 | * |
|---|
| 382 | 414 | */ |
|---|
| 383 | 415 | static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, |
|---|
| 384 | | - gfp_t priority) |
|---|
| 416 | + u32 *offset, gfp_t priority) |
|---|
| 385 | 417 | { |
|---|
| 386 | 418 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 419 | + unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size); |
|---|
| 420 | + unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order; |
|---|
| 387 | 421 | struct page *page; |
|---|
| 388 | 422 | gfp_t gfp_mask = priority; |
|---|
| 389 | 423 | |
|---|
| 390 | 424 | if (trans_pcie->rx_page_order > 0) |
|---|
| 391 | 425 | gfp_mask |= __GFP_COMP; |
|---|
| 426 | + |
|---|
| 427 | + if (trans_pcie->alloc_page) { |
|---|
| 428 | + spin_lock_bh(&trans_pcie->alloc_page_lock); |
|---|
| 429 | + /* recheck */ |
|---|
| 430 | + if (trans_pcie->alloc_page) { |
|---|
| 431 | + *offset = trans_pcie->alloc_page_used; |
|---|
| 432 | + page = trans_pcie->alloc_page; |
|---|
| 433 | + trans_pcie->alloc_page_used += rbsize; |
|---|
| 434 | + if (trans_pcie->alloc_page_used >= allocsize) |
|---|
| 435 | + trans_pcie->alloc_page = NULL; |
|---|
| 436 | + else |
|---|
| 437 | + get_page(page); |
|---|
| 438 | + spin_unlock_bh(&trans_pcie->alloc_page_lock); |
|---|
| 439 | + return page; |
|---|
| 440 | + } |
|---|
| 441 | + spin_unlock_bh(&trans_pcie->alloc_page_lock); |
|---|
| 442 | + } |
|---|
| 392 | 443 | |
|---|
| 393 | 444 | /* Alloc a new receive buffer */ |
|---|
| 394 | 445 | page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); |
|---|
| .. | .. |
|---|
| 399 | 450 | /* |
|---|
| 400 | 451 | * Issue an error if we don't have enough pre-allocated |
|---|
| 401 | 452 | * buffers. |
|---|
| 402 | | -` */ |
|---|
| 453 | + */ |
|---|
| 403 | 454 | if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit()) |
|---|
| 404 | 455 | IWL_CRIT(trans, |
|---|
| 405 | 456 | "Failed to alloc_pages\n"); |
|---|
| 406 | 457 | return NULL; |
|---|
| 407 | 458 | } |
|---|
| 459 | + |
|---|
| 460 | + if (2 * rbsize <= allocsize) { |
|---|
| 461 | + spin_lock_bh(&trans_pcie->alloc_page_lock); |
|---|
| 462 | + if (!trans_pcie->alloc_page) { |
|---|
| 463 | + get_page(page); |
|---|
| 464 | + trans_pcie->alloc_page = page; |
|---|
| 465 | + trans_pcie->alloc_page_used = rbsize; |
|---|
| 466 | + } |
|---|
| 467 | + spin_unlock_bh(&trans_pcie->alloc_page_lock); |
|---|
| 468 | + } |
|---|
| 469 | + |
|---|
| 470 | + *offset = 0; |
|---|
| 408 | 471 | return page; |
|---|
| 409 | 472 | } |
|---|
| 410 | 473 | |
|---|
| .. | .. |
|---|
| 425 | 488 | struct page *page; |
|---|
| 426 | 489 | |
|---|
| 427 | 490 | while (1) { |
|---|
| 491 | + unsigned int offset; |
|---|
| 492 | + |
|---|
| 428 | 493 | spin_lock(&rxq->lock); |
|---|
| 429 | 494 | if (list_empty(&rxq->rx_used)) { |
|---|
| 430 | 495 | spin_unlock(&rxq->lock); |
|---|
| .. | .. |
|---|
| 432 | 497 | } |
|---|
| 433 | 498 | spin_unlock(&rxq->lock); |
|---|
| 434 | 499 | |
|---|
| 435 | | - /* Alloc a new receive buffer */ |
|---|
| 436 | | - page = iwl_pcie_rx_alloc_page(trans, priority); |
|---|
| 500 | + page = iwl_pcie_rx_alloc_page(trans, &offset, priority); |
|---|
| 437 | 501 | if (!page) |
|---|
| 438 | 502 | return; |
|---|
| 439 | 503 | |
|---|
| .. | .. |
|---|
| 451 | 515 | |
|---|
| 452 | 516 | BUG_ON(rxb->page); |
|---|
| 453 | 517 | rxb->page = page; |
|---|
| 518 | + rxb->offset = offset; |
|---|
| 454 | 519 | /* Get physical address of the RB */ |
|---|
| 455 | 520 | rxb->page_dma = |
|---|
| 456 | | - dma_map_page(trans->dev, page, 0, |
|---|
| 457 | | - PAGE_SIZE << trans_pcie->rx_page_order, |
|---|
| 521 | + dma_map_page(trans->dev, page, rxb->offset, |
|---|
| 522 | + trans_pcie->rx_buf_bytes, |
|---|
| 458 | 523 | DMA_FROM_DEVICE); |
|---|
| 459 | 524 | if (dma_mapping_error(trans->dev, rxb->page_dma)) { |
|---|
| 460 | 525 | rxb->page = NULL; |
|---|
| .. | .. |
|---|
| 479 | 544 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 480 | 545 | int i; |
|---|
| 481 | 546 | |
|---|
| 482 | | - for (i = 0; i < RX_POOL_SIZE; i++) { |
|---|
| 547 | + if (!trans_pcie->rx_pool) |
|---|
| 548 | + return; |
|---|
| 549 | + |
|---|
| 550 | + for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) { |
|---|
| 483 | 551 | if (!trans_pcie->rx_pool[i].page) |
|---|
| 484 | 552 | continue; |
|---|
| 485 | 553 | dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, |
|---|
| 486 | | - PAGE_SIZE << trans_pcie->rx_page_order, |
|---|
| 487 | | - DMA_FROM_DEVICE); |
|---|
| 554 | + trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE); |
|---|
| 488 | 555 | __free_pages(trans_pcie->rx_pool[i].page, |
|---|
| 489 | 556 | trans_pcie->rx_page_order); |
|---|
| 490 | 557 | trans_pcie->rx_pool[i].page = NULL; |
|---|
| .. | .. |
|---|
| 504 | 571 | struct list_head local_empty; |
|---|
| 505 | 572 | int pending = atomic_read(&rba->req_pending); |
|---|
| 506 | 573 | |
|---|
| 507 | | - IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending); |
|---|
| 574 | + IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending); |
|---|
| 508 | 575 | |
|---|
| 509 | 576 | /* If we were scheduled - there is at least one request */ |
|---|
| 510 | 577 | spin_lock(&rba->lock); |
|---|
| .. | .. |
|---|
| 537 | 604 | BUG_ON(rxb->page); |
|---|
| 538 | 605 | |
|---|
| 539 | 606 | /* Alloc a new receive buffer */ |
|---|
| 540 | | - page = iwl_pcie_rx_alloc_page(trans, gfp_mask); |
|---|
| 607 | + page = iwl_pcie_rx_alloc_page(trans, &rxb->offset, |
|---|
| 608 | + gfp_mask); |
|---|
| 541 | 609 | if (!page) |
|---|
| 542 | 610 | continue; |
|---|
| 543 | 611 | rxb->page = page; |
|---|
| 544 | 612 | |
|---|
| 545 | 613 | /* Get physical address of the RB */ |
|---|
| 546 | | - rxb->page_dma = dma_map_page(trans->dev, page, 0, |
|---|
| 547 | | - PAGE_SIZE << trans_pcie->rx_page_order, |
|---|
| 548 | | - DMA_FROM_DEVICE); |
|---|
| 614 | + rxb->page_dma = dma_map_page(trans->dev, page, |
|---|
| 615 | + rxb->offset, |
|---|
| 616 | + trans_pcie->rx_buf_bytes, |
|---|
| 617 | + DMA_FROM_DEVICE); |
|---|
| 549 | 618 | if (dma_mapping_error(trans->dev, rxb->page_dma)) { |
|---|
| 550 | 619 | rxb->page = NULL; |
|---|
| 551 | 620 | __free_pages(page, trans_pcie->rx_page_order); |
|---|
| .. | .. |
|---|
| 562 | 631 | |
|---|
| 563 | 632 | if (!pending) { |
|---|
| 564 | 633 | pending = atomic_read(&rba->req_pending); |
|---|
| 565 | | - IWL_DEBUG_RX(trans, |
|---|
| 566 | | - "Got more pending allocation requests = %d\n", |
|---|
| 567 | | - pending); |
|---|
| 634 | + if (pending) |
|---|
| 635 | + IWL_DEBUG_TPT(trans, |
|---|
| 636 | + "Got more pending allocation requests = %d\n", |
|---|
| 637 | + pending); |
|---|
| 568 | 638 | } |
|---|
| 569 | 639 | |
|---|
| 570 | 640 | spin_lock(&rba->lock); |
|---|
| .. | .. |
|---|
| 583 | 653 | list_splice_tail(&local_empty, &rba->rbd_empty); |
|---|
| 584 | 654 | spin_unlock(&rba->lock); |
|---|
| 585 | 655 | |
|---|
| 586 | | - IWL_DEBUG_RX(trans, "%s, exit.\n", __func__); |
|---|
| 656 | + IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__); |
|---|
| 587 | 657 | } |
|---|
| 588 | 658 | |
|---|
| 589 | 659 | /* |
|---|
| .. | .. |
|---|
| 646 | 716 | if (use_rx_td) |
|---|
| 647 | 717 | return sizeof(*rx_td); |
|---|
| 648 | 718 | else |
|---|
| 649 | | - return trans->cfg->mq_rx_supported ? sizeof(__le64) : |
|---|
| 719 | + return trans->trans_cfg->mq_rx_supported ? sizeof(__le64) : |
|---|
| 650 | 720 | sizeof(__le32); |
|---|
| 651 | 721 | } |
|---|
| 652 | 722 | |
|---|
| .. | .. |
|---|
| 654 | 724 | struct iwl_rxq *rxq) |
|---|
| 655 | 725 | { |
|---|
| 656 | 726 | struct device *dev = trans->dev; |
|---|
| 657 | | - bool use_rx_td = (trans->cfg->device_family >= |
|---|
| 658 | | - IWL_DEVICE_FAMILY_22560); |
|---|
| 727 | + bool use_rx_td = (trans->trans_cfg->device_family >= |
|---|
| 728 | + IWL_DEVICE_FAMILY_AX210); |
|---|
| 659 | 729 | int free_size = iwl_pcie_free_bd_size(trans, use_rx_td); |
|---|
| 660 | 730 | |
|---|
| 661 | 731 | if (rxq->bd) |
|---|
| .. | .. |
|---|
| 665 | 735 | rxq->bd_dma = 0; |
|---|
| 666 | 736 | rxq->bd = NULL; |
|---|
| 667 | 737 | |
|---|
| 668 | | - if (rxq->rb_stts) |
|---|
| 669 | | - dma_free_coherent(trans->dev, |
|---|
| 670 | | - use_rx_td ? sizeof(__le16) : |
|---|
| 671 | | - sizeof(struct iwl_rb_status), |
|---|
| 672 | | - rxq->rb_stts, rxq->rb_stts_dma); |
|---|
| 673 | 738 | rxq->rb_stts_dma = 0; |
|---|
| 674 | 739 | rxq->rb_stts = NULL; |
|---|
| 675 | 740 | |
|---|
| .. | .. |
|---|
| 681 | 746 | rxq->used_bd_dma = 0; |
|---|
| 682 | 747 | rxq->used_bd = NULL; |
|---|
| 683 | 748 | |
|---|
| 684 | | - if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) |
|---|
| 749 | + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) |
|---|
| 685 | 750 | return; |
|---|
| 686 | 751 | |
|---|
| 687 | 752 | if (rxq->tr_tail) |
|---|
| .. | .. |
|---|
| 704 | 769 | struct device *dev = trans->dev; |
|---|
| 705 | 770 | int i; |
|---|
| 706 | 771 | int free_size; |
|---|
| 707 | | - bool use_rx_td = (trans->cfg->device_family >= |
|---|
| 708 | | - IWL_DEVICE_FAMILY_22560); |
|---|
| 772 | + bool use_rx_td = (trans->trans_cfg->device_family >= |
|---|
| 773 | + IWL_DEVICE_FAMILY_AX210); |
|---|
| 774 | + size_t rb_stts_size = use_rx_td ? sizeof(__le16) : |
|---|
| 775 | + sizeof(struct iwl_rb_status); |
|---|
| 709 | 776 | |
|---|
| 710 | 777 | spin_lock_init(&rxq->lock); |
|---|
| 711 | | - if (trans->cfg->mq_rx_supported) |
|---|
| 712 | | - rxq->queue_size = MQ_RX_TABLE_SIZE; |
|---|
| 778 | + if (trans->trans_cfg->mq_rx_supported) |
|---|
| 779 | + rxq->queue_size = trans->cfg->num_rbds; |
|---|
| 713 | 780 | else |
|---|
| 714 | 781 | rxq->queue_size = RX_QUEUE_SIZE; |
|---|
| 715 | 782 | |
|---|
| .. | .. |
|---|
| 719 | 786 | * Allocate the circular buffer of Read Buffer Descriptors |
|---|
| 720 | 787 | * (RBDs) |
|---|
| 721 | 788 | */ |
|---|
| 722 | | - rxq->bd = dma_zalloc_coherent(dev, |
|---|
| 723 | | - free_size * rxq->queue_size, |
|---|
| 724 | | - &rxq->bd_dma, GFP_KERNEL); |
|---|
| 789 | + rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size, |
|---|
| 790 | + &rxq->bd_dma, GFP_KERNEL); |
|---|
| 725 | 791 | if (!rxq->bd) |
|---|
| 726 | 792 | goto err; |
|---|
| 727 | 793 | |
|---|
| 728 | | - if (trans->cfg->mq_rx_supported) { |
|---|
| 729 | | - rxq->used_bd = dma_zalloc_coherent(dev, |
|---|
| 730 | | - (use_rx_td ? |
|---|
| 731 | | - sizeof(*rxq->cd) : |
|---|
| 732 | | - sizeof(__le32)) * |
|---|
| 733 | | - rxq->queue_size, |
|---|
| 734 | | - &rxq->used_bd_dma, |
|---|
| 735 | | - GFP_KERNEL); |
|---|
| 794 | + if (trans->trans_cfg->mq_rx_supported) { |
|---|
| 795 | + rxq->used_bd = dma_alloc_coherent(dev, |
|---|
| 796 | + (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size, |
|---|
| 797 | + &rxq->used_bd_dma, |
|---|
| 798 | + GFP_KERNEL); |
|---|
| 736 | 799 | if (!rxq->used_bd) |
|---|
| 737 | 800 | goto err; |
|---|
| 738 | 801 | } |
|---|
| 739 | 802 | |
|---|
| 740 | | - /* Allocate the driver's pointer to receive buffer status */ |
|---|
| 741 | | - rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ? |
|---|
| 742 | | - sizeof(__le16) : |
|---|
| 743 | | - sizeof(struct iwl_rb_status), |
|---|
| 744 | | - &rxq->rb_stts_dma, |
|---|
| 745 | | - GFP_KERNEL); |
|---|
| 746 | | - if (!rxq->rb_stts) |
|---|
| 747 | | - goto err; |
|---|
| 803 | + rxq->rb_stts = trans_pcie->base_rb_stts + rxq->id * rb_stts_size; |
|---|
| 804 | + rxq->rb_stts_dma = |
|---|
| 805 | + trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size; |
|---|
| 748 | 806 | |
|---|
| 749 | 807 | if (!use_rx_td) |
|---|
| 750 | 808 | return 0; |
|---|
| 751 | 809 | |
|---|
| 752 | 810 | /* Allocate the driver's pointer to TR tail */ |
|---|
| 753 | | - rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16), |
|---|
| 754 | | - &rxq->tr_tail_dma, |
|---|
| 755 | | - GFP_KERNEL); |
|---|
| 811 | + rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16), |
|---|
| 812 | + &rxq->tr_tail_dma, GFP_KERNEL); |
|---|
| 756 | 813 | if (!rxq->tr_tail) |
|---|
| 757 | 814 | goto err; |
|---|
| 758 | 815 | |
|---|
| 759 | 816 | /* Allocate the driver's pointer to CR tail */ |
|---|
| 760 | | - rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16), |
|---|
| 761 | | - &rxq->cr_tail_dma, |
|---|
| 762 | | - GFP_KERNEL); |
|---|
| 817 | + rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16), |
|---|
| 818 | + &rxq->cr_tail_dma, GFP_KERNEL); |
|---|
| 763 | 819 | if (!rxq->cr_tail) |
|---|
| 764 | 820 | goto err; |
|---|
| 765 | | - /* |
|---|
| 766 | | - * W/A 22560 device step Z0 must be non zero bug |
|---|
| 767 | | - * TODO: remove this when stop supporting Z0 |
|---|
| 768 | | - */ |
|---|
| 769 | | - *rxq->cr_tail = cpu_to_le16(500); |
|---|
| 770 | 821 | |
|---|
| 771 | 822 | return 0; |
|---|
| 772 | 823 | |
|---|
| .. | .. |
|---|
| 776 | 827 | |
|---|
| 777 | 828 | iwl_pcie_free_rxq_dma(trans, rxq); |
|---|
| 778 | 829 | } |
|---|
| 779 | | - kfree(trans_pcie->rxq); |
|---|
| 780 | 830 | |
|---|
| 781 | 831 | return -ENOMEM; |
|---|
| 782 | 832 | } |
|---|
| .. | .. |
|---|
| 786 | 836 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 787 | 837 | struct iwl_rb_allocator *rba = &trans_pcie->rba; |
|---|
| 788 | 838 | int i, ret; |
|---|
| 839 | + size_t rb_stts_size = trans->trans_cfg->device_family >= |
|---|
| 840 | + IWL_DEVICE_FAMILY_AX210 ? |
|---|
| 841 | + sizeof(__le16) : sizeof(struct iwl_rb_status); |
|---|
| 789 | 842 | |
|---|
| 790 | 843 | if (WARN_ON(trans_pcie->rxq)) |
|---|
| 791 | 844 | return -EINVAL; |
|---|
| 792 | 845 | |
|---|
| 793 | 846 | trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), |
|---|
| 794 | 847 | GFP_KERNEL); |
|---|
| 795 | | - if (!trans_pcie->rxq) |
|---|
| 796 | | - return -EINVAL; |
|---|
| 848 | + trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs), |
|---|
| 849 | + sizeof(trans_pcie->rx_pool[0]), |
|---|
| 850 | + GFP_KERNEL); |
|---|
| 851 | + trans_pcie->global_table = |
|---|
| 852 | + kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs), |
|---|
| 853 | + sizeof(trans_pcie->global_table[0]), |
|---|
| 854 | + GFP_KERNEL); |
|---|
| 855 | + if (!trans_pcie->rxq || !trans_pcie->rx_pool || |
|---|
| 856 | + !trans_pcie->global_table) { |
|---|
| 857 | + ret = -ENOMEM; |
|---|
| 858 | + goto err; |
|---|
| 859 | + } |
|---|
| 797 | 860 | |
|---|
| 798 | 861 | spin_lock_init(&rba->lock); |
|---|
| 862 | + |
|---|
| 863 | + /* |
|---|
| 864 | + * Allocate the driver's pointer to receive buffer status. |
|---|
| 865 | + * Allocate for all queues continuously (HW requirement). |
|---|
| 866 | + */ |
|---|
| 867 | + trans_pcie->base_rb_stts = |
|---|
| 868 | + dma_alloc_coherent(trans->dev, |
|---|
| 869 | + rb_stts_size * trans->num_rx_queues, |
|---|
| 870 | + &trans_pcie->base_rb_stts_dma, |
|---|
| 871 | + GFP_KERNEL); |
|---|
| 872 | + if (!trans_pcie->base_rb_stts) { |
|---|
| 873 | + ret = -ENOMEM; |
|---|
| 874 | + goto err; |
|---|
| 875 | + } |
|---|
| 799 | 876 | |
|---|
| 800 | 877 | for (i = 0; i < trans->num_rx_queues; i++) { |
|---|
| 801 | 878 | struct iwl_rxq *rxq = &trans_pcie->rxq[i]; |
|---|
| 802 | 879 | |
|---|
| 880 | + rxq->id = i; |
|---|
| 803 | 881 | ret = iwl_pcie_alloc_rxq_dma(trans, rxq); |
|---|
| 804 | 882 | if (ret) |
|---|
| 805 | | - return ret; |
|---|
| 883 | + goto err; |
|---|
| 806 | 884 | } |
|---|
| 807 | 885 | return 0; |
|---|
| 886 | + |
|---|
| 887 | +err: |
|---|
| 888 | + if (trans_pcie->base_rb_stts) { |
|---|
| 889 | + dma_free_coherent(trans->dev, |
|---|
| 890 | + rb_stts_size * trans->num_rx_queues, |
|---|
| 891 | + trans_pcie->base_rb_stts, |
|---|
| 892 | + trans_pcie->base_rb_stts_dma); |
|---|
| 893 | + trans_pcie->base_rb_stts = NULL; |
|---|
| 894 | + trans_pcie->base_rb_stts_dma = 0; |
|---|
| 895 | + } |
|---|
| 896 | + kfree(trans_pcie->rx_pool); |
|---|
| 897 | + kfree(trans_pcie->global_table); |
|---|
| 898 | + kfree(trans_pcie->rxq); |
|---|
| 899 | + |
|---|
| 900 | + return ret; |
|---|
| 808 | 901 | } |
|---|
| 809 | 902 | |
|---|
| 810 | 903 | static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) |
|---|
| .. | .. |
|---|
| 874 | 967 | /* W/A for interrupt coalescing bug in 7260 and 3160 */ |
|---|
| 875 | 968 | if (trans->cfg->host_interrupt_operation_mode) |
|---|
| 876 | 969 | iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); |
|---|
| 877 | | -} |
|---|
| 878 | | - |
|---|
| 879 | | -void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable) |
|---|
| 880 | | -{ |
|---|
| 881 | | - if (trans->cfg->device_family != IWL_DEVICE_FAMILY_9000) |
|---|
| 882 | | - return; |
|---|
| 883 | | - |
|---|
| 884 | | - if (CSR_HW_REV_STEP(trans->hw_rev) != SILICON_A_STEP) |
|---|
| 885 | | - return; |
|---|
| 886 | | - |
|---|
| 887 | | - if (!trans->cfg->integrated) |
|---|
| 888 | | - return; |
|---|
| 889 | | - |
|---|
| 890 | | - /* |
|---|
| 891 | | - * Turn on the chicken-bits that cause MAC wakeup for RX-related |
|---|
| 892 | | - * values. |
|---|
| 893 | | - * This costs some power, but needed for W/A 9000 integrated A-step |
|---|
| 894 | | - * bug where shadow registers are not in the retention list and their |
|---|
| 895 | | - * value is lost when NIC powers down |
|---|
| 896 | | - */ |
|---|
| 897 | | - iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, |
|---|
| 898 | | - CSR_MAC_SHADOW_REG_CTRL_RX_WAKE); |
|---|
| 899 | | - iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTL2, |
|---|
| 900 | | - CSR_MAC_SHADOW_REG_CTL2_RX_WAKE); |
|---|
| 901 | 970 | } |
|---|
| 902 | 971 | |
|---|
| 903 | 972 | static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) |
|---|
| .. | .. |
|---|
| 977 | 1046 | RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) | |
|---|
| 978 | 1047 | RFH_GEN_CFG_SERVICE_DMA_SNOOP | |
|---|
| 979 | 1048 | RFH_GEN_CFG_VAL(RB_CHUNK_SIZE, |
|---|
| 980 | | - trans->cfg->integrated ? |
|---|
| 1049 | + trans->trans_cfg->integrated ? |
|---|
| 981 | 1050 | RFH_GEN_CFG_RB_CHUNK_SIZE_64 : |
|---|
| 982 | 1051 | RFH_GEN_CFG_RB_CHUNK_SIZE_128)); |
|---|
| 983 | 1052 | /* Enable the relevant rx queues */ |
|---|
| .. | .. |
|---|
| 987 | 1056 | |
|---|
| 988 | 1057 | /* Set interrupt coalescing timer to default (2048 usecs) */ |
|---|
| 989 | 1058 | iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); |
|---|
| 990 | | - |
|---|
| 991 | | - iwl_pcie_enable_rx_wake(trans, true); |
|---|
| 992 | 1059 | } |
|---|
| 993 | 1060 | |
|---|
| 994 | 1061 | void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) |
|---|
| .. | .. |
|---|
| 1030 | 1097 | INIT_LIST_HEAD(&rba->rbd_empty); |
|---|
| 1031 | 1098 | spin_unlock(&rba->lock); |
|---|
| 1032 | 1099 | |
|---|
| 1033 | | - /* free all first - we might be reconfigured for a different size */ |
|---|
| 1100 | + /* free all first - we overwrite everything here */ |
|---|
| 1034 | 1101 | iwl_pcie_free_rbs_pool(trans); |
|---|
| 1035 | 1102 | |
|---|
| 1036 | 1103 | for (i = 0; i < RX_QUEUE_SIZE; i++) |
|---|
| .. | .. |
|---|
| 1038 | 1105 | |
|---|
| 1039 | 1106 | for (i = 0; i < trans->num_rx_queues; i++) { |
|---|
| 1040 | 1107 | struct iwl_rxq *rxq = &trans_pcie->rxq[i]; |
|---|
| 1041 | | - |
|---|
| 1042 | | - rxq->id = i; |
|---|
| 1043 | 1108 | |
|---|
| 1044 | 1109 | spin_lock(&rxq->lock); |
|---|
| 1045 | 1110 | /* |
|---|
| .. | .. |
|---|
| 1051 | 1116 | rxq->write = 0; |
|---|
| 1052 | 1117 | rxq->write_actual = 0; |
|---|
| 1053 | 1118 | memset(rxq->rb_stts, 0, |
|---|
| 1054 | | - (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ? |
|---|
| 1119 | + (trans->trans_cfg->device_family >= |
|---|
| 1120 | + IWL_DEVICE_FAMILY_AX210) ? |
|---|
| 1055 | 1121 | sizeof(__le16) : sizeof(struct iwl_rb_status)); |
|---|
| 1056 | 1122 | |
|---|
| 1057 | 1123 | iwl_pcie_rx_init_rxb_lists(rxq); |
|---|
| .. | .. |
|---|
| 1064 | 1130 | } |
|---|
| 1065 | 1131 | |
|---|
| 1066 | 1132 | /* move the pool to the default queue and allocator ownerships */ |
|---|
| 1067 | | - queue_size = trans->cfg->mq_rx_supported ? |
|---|
| 1068 | | - MQ_RX_NUM_RBDS : RX_QUEUE_SIZE; |
|---|
| 1133 | + queue_size = trans->trans_cfg->mq_rx_supported ? |
|---|
| 1134 | + trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE; |
|---|
| 1069 | 1135 | allocator_pool_size = trans->num_rx_queues * |
|---|
| 1070 | 1136 | (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); |
|---|
| 1071 | 1137 | num_alloc = queue_size + allocator_pool_size; |
|---|
| 1072 | | - BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) != |
|---|
| 1073 | | - ARRAY_SIZE(trans_pcie->rx_pool)); |
|---|
| 1138 | + |
|---|
| 1074 | 1139 | for (i = 0; i < num_alloc; i++) { |
|---|
| 1075 | 1140 | struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; |
|---|
| 1076 | 1141 | |
|---|
| .. | .. |
|---|
| 1096 | 1161 | if (ret) |
|---|
| 1097 | 1162 | return ret; |
|---|
| 1098 | 1163 | |
|---|
| 1099 | | - if (trans->cfg->mq_rx_supported) |
|---|
| 1164 | + if (trans->trans_cfg->mq_rx_supported) |
|---|
| 1100 | 1165 | iwl_pcie_rx_mq_hw_init(trans); |
|---|
| 1101 | 1166 | else |
|---|
| 1102 | 1167 | iwl_pcie_rx_hw_init(trans, trans_pcie->rxq); |
|---|
| .. | .. |
|---|
| 1112 | 1177 | |
|---|
| 1113 | 1178 | int iwl_pcie_gen2_rx_init(struct iwl_trans *trans) |
|---|
| 1114 | 1179 | { |
|---|
| 1180 | + /* Set interrupt coalescing timer to default (2048 usecs) */ |
|---|
| 1181 | + iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); |
|---|
| 1182 | + |
|---|
| 1115 | 1183 | /* |
|---|
| 1116 | 1184 | * We don't configure the RFH. |
|---|
| 1117 | 1185 | * Restock will be done at alive, after firmware configured the RFH. |
|---|
| .. | .. |
|---|
| 1124 | 1192 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 1125 | 1193 | struct iwl_rb_allocator *rba = &trans_pcie->rba; |
|---|
| 1126 | 1194 | int i; |
|---|
| 1195 | + size_t rb_stts_size = trans->trans_cfg->device_family >= |
|---|
| 1196 | + IWL_DEVICE_FAMILY_AX210 ? |
|---|
| 1197 | + sizeof(__le16) : sizeof(struct iwl_rb_status); |
|---|
| 1127 | 1198 | |
|---|
| 1128 | 1199 | /* |
|---|
| 1129 | 1200 | * if rxq is NULL, it means that nothing has been allocated, |
|---|
| .. | .. |
|---|
| 1138 | 1209 | |
|---|
| 1139 | 1210 | iwl_pcie_free_rbs_pool(trans); |
|---|
| 1140 | 1211 | |
|---|
| 1212 | + if (trans_pcie->base_rb_stts) { |
|---|
| 1213 | + dma_free_coherent(trans->dev, |
|---|
| 1214 | + rb_stts_size * trans->num_rx_queues, |
|---|
| 1215 | + trans_pcie->base_rb_stts, |
|---|
| 1216 | + trans_pcie->base_rb_stts_dma); |
|---|
| 1217 | + trans_pcie->base_rb_stts = NULL; |
|---|
| 1218 | + trans_pcie->base_rb_stts_dma = 0; |
|---|
| 1219 | + } |
|---|
| 1220 | + |
|---|
| 1141 | 1221 | for (i = 0; i < trans->num_rx_queues; i++) { |
|---|
| 1142 | 1222 | struct iwl_rxq *rxq = &trans_pcie->rxq[i]; |
|---|
| 1143 | 1223 | |
|---|
| .. | .. |
|---|
| 1146 | 1226 | if (rxq->napi.poll) |
|---|
| 1147 | 1227 | netif_napi_del(&rxq->napi); |
|---|
| 1148 | 1228 | } |
|---|
| 1229 | + kfree(trans_pcie->rx_pool); |
|---|
| 1230 | + kfree(trans_pcie->global_table); |
|---|
| 1149 | 1231 | kfree(trans_pcie->rxq); |
|---|
| 1232 | + |
|---|
| 1233 | + if (trans_pcie->alloc_page) |
|---|
| 1234 | + __free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order); |
|---|
| 1150 | 1235 | } |
|---|
| 1151 | 1236 | |
|---|
| 1152 | 1237 | static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq, |
|---|
| .. | .. |
|---|
| 1202 | 1287 | int i) |
|---|
| 1203 | 1288 | { |
|---|
| 1204 | 1289 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 1205 | | - struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; |
|---|
| 1290 | + struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; |
|---|
| 1206 | 1291 | bool page_stolen = false; |
|---|
| 1207 | | - int max_len = PAGE_SIZE << trans_pcie->rx_page_order; |
|---|
| 1292 | + int max_len = trans_pcie->rx_buf_bytes; |
|---|
| 1208 | 1293 | u32 offset = 0; |
|---|
| 1209 | 1294 | |
|---|
| 1210 | 1295 | if (WARN_ON(!rxb)) |
|---|
| .. | .. |
|---|
| 1218 | 1303 | bool reclaim; |
|---|
| 1219 | 1304 | int index, cmd_index, len; |
|---|
| 1220 | 1305 | struct iwl_rx_cmd_buffer rxcb = { |
|---|
| 1221 | | - ._offset = offset, |
|---|
| 1306 | + ._offset = rxb->offset + offset, |
|---|
| 1222 | 1307 | ._rx_page_order = trans_pcie->rx_page_order, |
|---|
| 1223 | 1308 | ._page = rxb->page, |
|---|
| 1224 | 1309 | ._page_stolen = false, |
|---|
| 1225 | 1310 | .truesize = max_len, |
|---|
| 1226 | 1311 | }; |
|---|
| 1227 | | - |
|---|
| 1228 | | - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) |
|---|
| 1229 | | - rxcb.status = rxq->cd[i].status; |
|---|
| 1230 | 1312 | |
|---|
| 1231 | 1313 | pkt = rxb_addr(&rxcb); |
|---|
| 1232 | 1314 | |
|---|
| .. | .. |
|---|
| 1280 | 1362 | |
|---|
| 1281 | 1363 | sequence = le16_to_cpu(pkt->hdr.sequence); |
|---|
| 1282 | 1364 | index = SEQ_TO_INDEX(sequence); |
|---|
| 1283 | | - cmd_index = iwl_pcie_get_cmd_index(txq, index); |
|---|
| 1365 | + cmd_index = iwl_txq_get_cmd_index(txq, index); |
|---|
| 1284 | 1366 | |
|---|
| 1285 | | - if (rxq->id == 0) |
|---|
| 1367 | + if (rxq->id == trans_pcie->def_rx_queue) |
|---|
| 1286 | 1368 | iwl_op_mode_rx(trans->op_mode, &rxq->napi, |
|---|
| 1287 | 1369 | &rxcb); |
|---|
| 1288 | 1370 | else |
|---|
| .. | .. |
|---|
| 1290 | 1372 | &rxcb, rxq->id); |
|---|
| 1291 | 1373 | |
|---|
| 1292 | 1374 | if (reclaim) { |
|---|
| 1293 | | - kzfree(txq->entries[cmd_index].free_buf); |
|---|
| 1375 | + kfree_sensitive(txq->entries[cmd_index].free_buf); |
|---|
| 1294 | 1376 | txq->entries[cmd_index].free_buf = NULL; |
|---|
| 1295 | 1377 | } |
|---|
| 1296 | 1378 | |
|---|
| .. | .. |
|---|
| 1311 | 1393 | } |
|---|
| 1312 | 1394 | |
|---|
| 1313 | 1395 | page_stolen |= rxcb._page_stolen; |
|---|
| 1314 | | - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) |
|---|
| 1396 | + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) |
|---|
| 1315 | 1397 | break; |
|---|
| 1316 | 1398 | offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); |
|---|
| 1317 | 1399 | } |
|---|
| .. | .. |
|---|
| 1327 | 1409 | * rx_free list for reuse later. */ |
|---|
| 1328 | 1410 | if (rxb->page != NULL) { |
|---|
| 1329 | 1411 | rxb->page_dma = |
|---|
| 1330 | | - dma_map_page(trans->dev, rxb->page, 0, |
|---|
| 1331 | | - PAGE_SIZE << trans_pcie->rx_page_order, |
|---|
| 1412 | + dma_map_page(trans->dev, rxb->page, rxb->offset, |
|---|
| 1413 | + trans_pcie->rx_buf_bytes, |
|---|
| 1332 | 1414 | DMA_FROM_DEVICE); |
|---|
| 1333 | 1415 | if (dma_mapping_error(trans->dev, rxb->page_dma)) { |
|---|
| 1334 | 1416 | /* |
|---|
| .. | .. |
|---|
| 1348 | 1430 | } |
|---|
| 1349 | 1431 | |
|---|
| 1350 | 1432 | static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, |
|---|
| 1351 | | - struct iwl_rxq *rxq, int i) |
|---|
| 1433 | + struct iwl_rxq *rxq, int i, |
|---|
| 1434 | + bool *join) |
|---|
| 1352 | 1435 | { |
|---|
| 1353 | 1436 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 1354 | 1437 | struct iwl_rx_mem_buffer *rxb; |
|---|
| 1355 | 1438 | u16 vid; |
|---|
| 1356 | 1439 | |
|---|
| 1357 | | - if (!trans->cfg->mq_rx_supported) { |
|---|
| 1440 | + BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32); |
|---|
| 1441 | + |
|---|
| 1442 | + if (!trans->trans_cfg->mq_rx_supported) { |
|---|
| 1358 | 1443 | rxb = rxq->queue[i]; |
|---|
| 1359 | 1444 | rxq->queue[i] = NULL; |
|---|
| 1360 | 1445 | return rxb; |
|---|
| 1361 | 1446 | } |
|---|
| 1362 | 1447 | |
|---|
| 1363 | | - /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */ |
|---|
| 1364 | | - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) |
|---|
| 1365 | | - vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF; |
|---|
| 1366 | | - else |
|---|
| 1367 | | - vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; |
|---|
| 1448 | + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { |
|---|
| 1449 | + vid = le16_to_cpu(rxq->cd[i].rbid); |
|---|
| 1450 | + *join = rxq->cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED; |
|---|
| 1451 | + } else { |
|---|
| 1452 | + vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */ |
|---|
| 1453 | + } |
|---|
| 1368 | 1454 | |
|---|
| 1369 | | - if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table)) |
|---|
| 1455 | + if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs)) |
|---|
| 1370 | 1456 | goto out_err; |
|---|
| 1371 | 1457 | |
|---|
| 1372 | 1458 | rxb = trans_pcie->global_table[vid - 1]; |
|---|
| 1373 | 1459 | if (rxb->invalid) |
|---|
| 1374 | 1460 | goto out_err; |
|---|
| 1375 | 1461 | |
|---|
| 1376 | | - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) |
|---|
| 1377 | | - rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE; |
|---|
| 1462 | + IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid); |
|---|
| 1378 | 1463 | |
|---|
| 1379 | 1464 | rxb->invalid = true; |
|---|
| 1380 | 1465 | |
|---|
| .. | .. |
|---|
| 1392 | 1477 | static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) |
|---|
| 1393 | 1478 | { |
|---|
| 1394 | 1479 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 1480 | + struct napi_struct *napi; |
|---|
| 1395 | 1481 | struct iwl_rxq *rxq; |
|---|
| 1396 | 1482 | u32 r, i, count = 0; |
|---|
| 1397 | 1483 | bool emergency = false; |
|---|
| .. | .. |
|---|
| 1422 | 1508 | u32 rb_pending_alloc = |
|---|
| 1423 | 1509 | atomic_read(&trans_pcie->rba.req_pending) * |
|---|
| 1424 | 1510 | RX_CLAIM_REQ_ALLOC; |
|---|
| 1511 | + bool join = false; |
|---|
| 1425 | 1512 | |
|---|
| 1426 | 1513 | if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 && |
|---|
| 1427 | 1514 | !emergency)) { |
|---|
| 1428 | 1515 | iwl_pcie_rx_move_to_allocator(rxq, rba); |
|---|
| 1429 | 1516 | emergency = true; |
|---|
| 1517 | + IWL_DEBUG_TPT(trans, |
|---|
| 1518 | + "RX path is in emergency. Pending allocations %d\n", |
|---|
| 1519 | + rb_pending_alloc); |
|---|
| 1430 | 1520 | } |
|---|
| 1431 | 1521 | |
|---|
| 1432 | | - rxb = iwl_pcie_get_rxb(trans, rxq, i); |
|---|
| 1522 | + IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); |
|---|
| 1523 | + |
|---|
| 1524 | + rxb = iwl_pcie_get_rxb(trans, rxq, i, &join); |
|---|
| 1433 | 1525 | if (!rxb) |
|---|
| 1434 | 1526 | goto out; |
|---|
| 1435 | 1527 | |
|---|
| 1436 | | - IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); |
|---|
| 1437 | | - iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i); |
|---|
| 1528 | + if (unlikely(join || rxq->next_rb_is_fragment)) { |
|---|
| 1529 | + rxq->next_rb_is_fragment = join; |
|---|
| 1530 | + /* |
|---|
| 1531 | + * We can only get a multi-RB in the following cases: |
|---|
| 1532 | + * - firmware issue, sending a too big notification |
|---|
| 1533 | + * - sniffer mode with a large A-MSDU |
|---|
| 1534 | + * - large MTU frames (>2k) |
|---|
| 1535 | + * since the multi-RB functionality is limited to newer |
|---|
| 1536 | + * hardware that cannot put multiple entries into a |
|---|
| 1537 | + * single RB. |
|---|
| 1538 | + * |
|---|
| 1539 | + * Right now, the higher layers aren't set up to deal |
|---|
| 1540 | + * with that, so discard all of these. |
|---|
| 1541 | + */ |
|---|
| 1542 | + list_add_tail(&rxb->list, &rxq->rx_free); |
|---|
| 1543 | + rxq->free_count++; |
|---|
| 1544 | + } else { |
|---|
| 1545 | + iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i); |
|---|
| 1546 | + } |
|---|
| 1438 | 1547 | |
|---|
| 1439 | 1548 | i = (i + 1) & (rxq->queue_size - 1); |
|---|
| 1440 | 1549 | |
|---|
| .. | .. |
|---|
| 1455 | 1564 | count++; |
|---|
| 1456 | 1565 | if (count == 8) { |
|---|
| 1457 | 1566 | count = 0; |
|---|
| 1458 | | - if (rb_pending_alloc < rxq->queue_size / 3) |
|---|
| 1567 | + if (rb_pending_alloc < rxq->queue_size / 3) { |
|---|
| 1568 | + IWL_DEBUG_TPT(trans, |
|---|
| 1569 | + "RX path exited emergency. Pending allocations %d\n", |
|---|
| 1570 | + rb_pending_alloc); |
|---|
| 1459 | 1571 | emergency = false; |
|---|
| 1572 | + } |
|---|
| 1460 | 1573 | |
|---|
| 1461 | 1574 | rxq->read = i; |
|---|
| 1462 | 1575 | spin_unlock(&rxq->lock); |
|---|
| .. | .. |
|---|
| 1470 | 1583 | /* Backtrack one entry */ |
|---|
| 1471 | 1584 | rxq->read = i; |
|---|
| 1472 | 1585 | /* update cr tail with the rxq read pointer */ |
|---|
| 1473 | | - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) |
|---|
| 1586 | + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) |
|---|
| 1474 | 1587 | *rxq->cr_tail = cpu_to_le16(r); |
|---|
| 1475 | 1588 | spin_unlock(&rxq->lock); |
|---|
| 1476 | 1589 | |
|---|
| .. | .. |
|---|
| 1489 | 1602 | if (unlikely(emergency && count)) |
|---|
| 1490 | 1603 | iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); |
|---|
| 1491 | 1604 | |
|---|
| 1492 | | - if (rxq->napi.poll) |
|---|
| 1493 | | - napi_gro_flush(&rxq->napi, false); |
|---|
| 1605 | + napi = &rxq->napi; |
|---|
| 1606 | + if (napi->poll) { |
|---|
| 1607 | + napi_gro_flush(napi, false); |
|---|
| 1608 | + |
|---|
| 1609 | + if (napi->rx_count) { |
|---|
| 1610 | + netif_receive_skb_list(&napi->rx_list); |
|---|
| 1611 | + INIT_LIST_HEAD(&napi->rx_list); |
|---|
| 1612 | + napi->rx_count = 0; |
|---|
| 1613 | + } |
|---|
| 1614 | + } |
|---|
| 1494 | 1615 | |
|---|
| 1495 | 1616 | iwl_pcie_rxq_restock(trans, rxq); |
|---|
| 1496 | 1617 | } |
|---|
| .. | .. |
|---|
| 1552 | 1673 | return; |
|---|
| 1553 | 1674 | } |
|---|
| 1554 | 1675 | |
|---|
| 1555 | | - for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { |
|---|
| 1556 | | - if (!trans_pcie->txq[i]) |
|---|
| 1676 | + for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { |
|---|
| 1677 | + if (!trans->txqs.txq[i]) |
|---|
| 1557 | 1678 | continue; |
|---|
| 1558 | | - del_timer(&trans_pcie->txq[i]->stuck_timer); |
|---|
| 1679 | + del_timer(&trans->txqs.txq[i]->stuck_timer); |
|---|
| 1559 | 1680 | } |
|---|
| 1560 | 1681 | |
|---|
| 1561 | 1682 | /* The STATUS_FW_ERROR bit is set in this function. This must happen |
|---|
| .. | .. |
|---|
| 1793 | 1914 | if (inta & CSR_INT_BIT_ALIVE) { |
|---|
| 1794 | 1915 | IWL_DEBUG_ISR(trans, "Alive interrupt\n"); |
|---|
| 1795 | 1916 | isr_stats->alive++; |
|---|
| 1796 | | - if (trans->cfg->gen2) { |
|---|
| 1917 | + if (trans->trans_cfg->gen2) { |
|---|
| 1797 | 1918 | /* |
|---|
| 1798 | 1919 | * We can restock, since firmware configured |
|---|
| 1799 | 1920 | * the RFH |
|---|
| .. | .. |
|---|
| 1960 | 2081 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 1961 | 2082 | |
|---|
| 1962 | 2083 | trans_pcie->ict_tbl = |
|---|
| 1963 | | - dma_zalloc_coherent(trans->dev, ICT_SIZE, |
|---|
| 1964 | | - &trans_pcie->ict_tbl_dma, |
|---|
| 1965 | | - GFP_KERNEL); |
|---|
| 2084 | + dma_alloc_coherent(trans->dev, ICT_SIZE, |
|---|
| 2085 | + &trans_pcie->ict_tbl_dma, GFP_KERNEL); |
|---|
| 1966 | 2086 | if (!trans_pcie->ict_tbl) |
|---|
| 1967 | 2087 | return -ENOMEM; |
|---|
| 1968 | 2088 | |
|---|
| .. | .. |
|---|
| 2108 | 2228 | |
|---|
| 2109 | 2229 | /* Error detected by uCode */ |
|---|
| 2110 | 2230 | if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || |
|---|
| 2111 | | - (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) || |
|---|
| 2112 | | - (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) { |
|---|
| 2231 | + (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) { |
|---|
| 2113 | 2232 | IWL_ERR(trans, |
|---|
| 2114 | 2233 | "Microcode SW error detected. Restarting 0x%X.\n", |
|---|
| 2115 | 2234 | inta_fh); |
|---|
| .. | .. |
|---|
| 2135 | 2254 | if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) { |
|---|
| 2136 | 2255 | IWL_DEBUG_ISR(trans, "Alive interrupt\n"); |
|---|
| 2137 | 2256 | isr_stats->alive++; |
|---|
| 2138 | | - if (trans->cfg->gen2) { |
|---|
| 2257 | + if (trans->trans_cfg->gen2) { |
|---|
| 2139 | 2258 | /* We can restock, since firmware configured the RFH */ |
|---|
| 2140 | 2259 | iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); |
|---|
| 2141 | 2260 | } |
|---|
| 2142 | 2261 | } |
|---|
| 2143 | 2262 | |
|---|
| 2144 | | - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560 && |
|---|
| 2145 | | - inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) { |
|---|
| 2263 | + /* |
|---|
| 2264 | + * In some rare cases when the HW is in a bad state, we may |
|---|
| 2265 | + * get this interrupt too early, when prph_info is still NULL. |
|---|
| 2266 | + * So make sure that it's not NULL to prevent crashing. |
|---|
| 2267 | + */ |
|---|
| 2268 | + if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) { |
|---|
| 2269 | + u32 sleep_notif = |
|---|
| 2270 | + le32_to_cpu(trans_pcie->prph_info->sleep_notif); |
|---|
| 2271 | + if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND || |
|---|
| 2272 | + sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) { |
|---|
| 2273 | + IWL_DEBUG_ISR(trans, |
|---|
| 2274 | + "Sx interrupt: sleep notification = 0x%x\n", |
|---|
| 2275 | + sleep_notif); |
|---|
| 2276 | + trans_pcie->sx_complete = true; |
|---|
| 2277 | + wake_up(&trans_pcie->sx_waitq); |
|---|
| 2278 | + } else { |
|---|
| 2279 | + /* uCode wakes up after power-down sleep */ |
|---|
| 2280 | + IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); |
|---|
| 2281 | + iwl_pcie_rxq_check_wrptr(trans); |
|---|
| 2282 | + iwl_pcie_txq_check_wrptrs(trans); |
|---|
| 2283 | + |
|---|
| 2284 | + isr_stats->wakeup++; |
|---|
| 2285 | + } |
|---|
| 2286 | + } |
|---|
| 2287 | + |
|---|
| 2288 | + if (inta_hw & MSIX_HW_INT_CAUSES_REG_IML) { |
|---|
| 2146 | 2289 | /* Reflect IML transfer status */ |
|---|
| 2147 | 2290 | int res = iwl_read32(trans, CSR_IML_RESP_ADDR); |
|---|
| 2148 | 2291 | |
|---|
| .. | .. |
|---|
| 2151 | 2294 | isr_stats->sw++; |
|---|
| 2152 | 2295 | iwl_pcie_irq_handle_error(trans); |
|---|
| 2153 | 2296 | } |
|---|
| 2154 | | - } else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) { |
|---|
| 2155 | | - /* uCode wakes up after power-down sleep */ |
|---|
| 2156 | | - IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); |
|---|
| 2157 | | - iwl_pcie_rxq_check_wrptr(trans); |
|---|
| 2158 | | - iwl_pcie_txq_check_wrptrs(trans); |
|---|
| 2159 | | - |
|---|
| 2160 | | - isr_stats->wakeup++; |
|---|
| 2161 | 2297 | } |
|---|
| 2162 | 2298 | |
|---|
| 2163 | 2299 | /* Chip got too hot and stopped itself */ |
|---|
| .. | .. |
|---|
| 2175 | 2311 | "Hardware error detected. Restarting.\n"); |
|---|
| 2176 | 2312 | |
|---|
| 2177 | 2313 | isr_stats->hw++; |
|---|
| 2314 | + trans->dbg.hw_error = true; |
|---|
| 2178 | 2315 | iwl_pcie_irq_handle_error(trans); |
|---|
| 2179 | 2316 | } |
|---|
| 2180 | 2317 | |
|---|