hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
....@@ -1,12 +1,14 @@
11 /******************************************************************************
22 *
3
+ * This file is provided under a dual BSD/GPLv2 license. When using or
4
+ * redistributing this file, you may do so under either license.
5
+ *
6
+ * GPL LICENSE SUMMARY
7
+ *
38 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
49 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
510 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
6
- * Copyright(c) 2018 Intel Corporation
7
- *
8
- * Portions of this file are derived from the ipw3945 project, as well
9
- * as portions of the ieee80211 subsystem header files.
11
+ * Copyright(c) 2018 - 2019 Intel Corporation
1012 *
1113 * This program is free software; you can redistribute it and/or modify it
1214 * under the terms of version 2 of the GNU General Public License as
....@@ -17,15 +19,46 @@
1719 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1820 * more details.
1921 *
20
- * You should have received a copy of the GNU General Public License along with
21
- * this program.
22
- *
2322 * The full GNU General Public License is included in this distribution in the
24
- * file called LICENSE.
23
+ * file called COPYING.
2524 *
2625 * Contact Information:
2726 * Intel Linux Wireless <linuxwifi@intel.com>
2827 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28
+ *
29
+ * BSD LICENSE
30
+ *
31
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
32
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34
+ * Copyright(c) 2018 - 2019 Intel Corporation
35
+ * All rights reserved.
36
+ *
37
+ * Redistribution and use in source and binary forms, with or without
38
+ * modification, are permitted provided that the following conditions
39
+ * are met:
40
+ *
41
+ * * Redistributions of source code must retain the above copyright
42
+ * notice, this list of conditions and the following disclaimer.
43
+ * * Redistributions in binary form must reproduce the above copyright
44
+ * notice, this list of conditions and the following disclaimer in
45
+ * the documentation and/or other materials provided with the
46
+ * distribution.
47
+ * * Neither the name Intel Corporation nor the names of its
48
+ * contributors may be used to endorse or promote products derived
49
+ * from this software without specific prior written permission.
50
+ *
51
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2962 *
3063 *****************************************************************************/
3164 #include <linux/sched.h>
....@@ -167,12 +200,12 @@
167200 */
168201 int iwl_pcie_rx_stop(struct iwl_trans *trans)
169202 {
170
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
171
- /* TODO: remove this for 22560 once fw does it */
172
- iwl_write_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
173
- return iwl_poll_prph_bit(trans, RFH_GEN_STATUS_GEN3,
174
- RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
175
- } else if (trans->cfg->mq_rx_supported) {
203
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
204
+ /* TODO: remove this once fw does it */
205
+ iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
206
+ return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3,
207
+ RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
208
+ } else if (trans->trans_cfg->mq_rx_supported) {
176209 iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
177210 return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
178211 RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
....@@ -199,7 +232,7 @@
199232 * 1. shadow registers aren't enabled
200233 * 2. there is a chance that the NIC is asleep
201234 */
202
- if (!trans->cfg->base_params->shadow_reg_enable &&
235
+ if (!trans->trans_cfg->base_params->shadow_reg_enable &&
203236 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
204237 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
205238
....@@ -207,18 +240,14 @@
207240 IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
208241 reg);
209242 iwl_set_bit(trans, CSR_GP_CNTRL,
210
- BIT(trans->cfg->csr->flag_mac_access_req));
243
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
211244 rxq->need_update = true;
212245 return;
213246 }
214247 }
215248
216249 rxq->write_actual = round_down(rxq->write, 8);
217
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
218
- iwl_write32(trans, HBUS_TARG_WRPTR,
219
- (rxq->write_actual |
220
- ((FIRST_RX_QUEUE + rxq->id) << 16)));
221
- else if (trans->cfg->mq_rx_supported)
250
+ if (trans->trans_cfg->mq_rx_supported)
222251 iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
223252 rxq->write_actual);
224253 else
....@@ -246,12 +275,11 @@
246275 struct iwl_rxq *rxq,
247276 struct iwl_rx_mem_buffer *rxb)
248277 {
249
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
278
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
250279 struct iwl_rx_transfer_desc *bd = rxq->bd;
251280
252
- bd[rxq->write].type_n_size =
253
- cpu_to_le32((IWL_RX_TD_TYPE & IWL_RX_TD_TYPE_MSK) |
254
- ((IWL_RX_TD_SIZE_2K >> 8) & IWL_RX_TD_SIZE_MSK));
281
+ BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));
282
+
255283 bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
256284 bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
257285 } else {
....@@ -259,6 +287,9 @@
259287
260288 bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
261289 }
290
+
291
+ IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n",
292
+ (u32)rxb->vid, rxq->id, rxq->write);
262293 }
263294
264295 /*
....@@ -267,6 +298,7 @@
267298 static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
268299 struct iwl_rxq *rxq)
269300 {
301
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
270302 struct iwl_rx_mem_buffer *rxb;
271303
272304 /*
....@@ -287,11 +319,11 @@
287319 list);
288320 list_del(&rxb->list);
289321 rxb->invalid = false;
290
- /* 12 first bits are expected to be empty */
291
- WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
322
+ /* some low bits are expected to be unset (depending on hw) */
323
+ WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask);
292324 /* Point to Rx buffer via next RBD in circular buffer */
293325 iwl_pcie_restock_bd(trans, rxq, rxb);
294
- rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
326
+ rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
295327 rxq->free_count--;
296328 }
297329 spin_unlock(&rxq->lock);
....@@ -370,7 +402,7 @@
370402 static
371403 void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
372404 {
373
- if (trans->cfg->mq_rx_supported)
405
+ if (trans->trans_cfg->mq_rx_supported)
374406 iwl_pcie_rxmq_restock(trans, rxq);
375407 else
376408 iwl_pcie_rxsq_restock(trans, rxq);
....@@ -381,14 +413,33 @@
381413 *
382414 */
383415 static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
384
- gfp_t priority)
416
+ u32 *offset, gfp_t priority)
385417 {
386418 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
419
+ unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
420
+ unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order;
387421 struct page *page;
388422 gfp_t gfp_mask = priority;
389423
390424 if (trans_pcie->rx_page_order > 0)
391425 gfp_mask |= __GFP_COMP;
426
+
427
+ if (trans_pcie->alloc_page) {
428
+ spin_lock_bh(&trans_pcie->alloc_page_lock);
429
+ /* recheck */
430
+ if (trans_pcie->alloc_page) {
431
+ *offset = trans_pcie->alloc_page_used;
432
+ page = trans_pcie->alloc_page;
433
+ trans_pcie->alloc_page_used += rbsize;
434
+ if (trans_pcie->alloc_page_used >= allocsize)
435
+ trans_pcie->alloc_page = NULL;
436
+ else
437
+ get_page(page);
438
+ spin_unlock_bh(&trans_pcie->alloc_page_lock);
439
+ return page;
440
+ }
441
+ spin_unlock_bh(&trans_pcie->alloc_page_lock);
442
+ }
392443
393444 /* Alloc a new receive buffer */
394445 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
....@@ -399,12 +450,24 @@
399450 /*
400451 * Issue an error if we don't have enough pre-allocated
401452 * buffers.
402
-` */
453
+ */
403454 if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
404455 IWL_CRIT(trans,
405456 "Failed to alloc_pages\n");
406457 return NULL;
407458 }
459
+
460
+ if (2 * rbsize <= allocsize) {
461
+ spin_lock_bh(&trans_pcie->alloc_page_lock);
462
+ if (!trans_pcie->alloc_page) {
463
+ get_page(page);
464
+ trans_pcie->alloc_page = page;
465
+ trans_pcie->alloc_page_used = rbsize;
466
+ }
467
+ spin_unlock_bh(&trans_pcie->alloc_page_lock);
468
+ }
469
+
470
+ *offset = 0;
408471 return page;
409472 }
410473
....@@ -425,6 +488,8 @@
425488 struct page *page;
426489
427490 while (1) {
491
+ unsigned int offset;
492
+
428493 spin_lock(&rxq->lock);
429494 if (list_empty(&rxq->rx_used)) {
430495 spin_unlock(&rxq->lock);
....@@ -432,8 +497,7 @@
432497 }
433498 spin_unlock(&rxq->lock);
434499
435
- /* Alloc a new receive buffer */
436
- page = iwl_pcie_rx_alloc_page(trans, priority);
500
+ page = iwl_pcie_rx_alloc_page(trans, &offset, priority);
437501 if (!page)
438502 return;
439503
....@@ -451,10 +515,11 @@
451515
452516 BUG_ON(rxb->page);
453517 rxb->page = page;
518
+ rxb->offset = offset;
454519 /* Get physical address of the RB */
455520 rxb->page_dma =
456
- dma_map_page(trans->dev, page, 0,
457
- PAGE_SIZE << trans_pcie->rx_page_order,
521
+ dma_map_page(trans->dev, page, rxb->offset,
522
+ trans_pcie->rx_buf_bytes,
458523 DMA_FROM_DEVICE);
459524 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
460525 rxb->page = NULL;
....@@ -479,12 +544,14 @@
479544 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
480545 int i;
481546
482
- for (i = 0; i < RX_POOL_SIZE; i++) {
547
+ if (!trans_pcie->rx_pool)
548
+ return;
549
+
550
+ for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) {
483551 if (!trans_pcie->rx_pool[i].page)
484552 continue;
485553 dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
486
- PAGE_SIZE << trans_pcie->rx_page_order,
487
- DMA_FROM_DEVICE);
554
+ trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE);
488555 __free_pages(trans_pcie->rx_pool[i].page,
489556 trans_pcie->rx_page_order);
490557 trans_pcie->rx_pool[i].page = NULL;
....@@ -504,7 +571,7 @@
504571 struct list_head local_empty;
505572 int pending = atomic_read(&rba->req_pending);
506573
507
- IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
574
+ IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending);
508575
509576 /* If we were scheduled - there is at least one request */
510577 spin_lock(&rba->lock);
....@@ -537,15 +604,17 @@
537604 BUG_ON(rxb->page);
538605
539606 /* Alloc a new receive buffer */
540
- page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
607
+ page = iwl_pcie_rx_alloc_page(trans, &rxb->offset,
608
+ gfp_mask);
541609 if (!page)
542610 continue;
543611 rxb->page = page;
544612
545613 /* Get physical address of the RB */
546
- rxb->page_dma = dma_map_page(trans->dev, page, 0,
547
- PAGE_SIZE << trans_pcie->rx_page_order,
548
- DMA_FROM_DEVICE);
614
+ rxb->page_dma = dma_map_page(trans->dev, page,
615
+ rxb->offset,
616
+ trans_pcie->rx_buf_bytes,
617
+ DMA_FROM_DEVICE);
549618 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
550619 rxb->page = NULL;
551620 __free_pages(page, trans_pcie->rx_page_order);
....@@ -562,9 +631,10 @@
562631
563632 if (!pending) {
564633 pending = atomic_read(&rba->req_pending);
565
- IWL_DEBUG_RX(trans,
566
- "Got more pending allocation requests = %d\n",
567
- pending);
634
+ if (pending)
635
+ IWL_DEBUG_TPT(trans,
636
+ "Got more pending allocation requests = %d\n",
637
+ pending);
568638 }
569639
570640 spin_lock(&rba->lock);
....@@ -583,7 +653,7 @@
583653 list_splice_tail(&local_empty, &rba->rbd_empty);
584654 spin_unlock(&rba->lock);
585655
586
- IWL_DEBUG_RX(trans, "%s, exit.\n", __func__);
656
+ IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__);
587657 }
588658
589659 /*
....@@ -646,7 +716,7 @@
646716 if (use_rx_td)
647717 return sizeof(*rx_td);
648718 else
649
- return trans->cfg->mq_rx_supported ? sizeof(__le64) :
719
+ return trans->trans_cfg->mq_rx_supported ? sizeof(__le64) :
650720 sizeof(__le32);
651721 }
652722
....@@ -654,8 +724,8 @@
654724 struct iwl_rxq *rxq)
655725 {
656726 struct device *dev = trans->dev;
657
- bool use_rx_td = (trans->cfg->device_family >=
658
- IWL_DEVICE_FAMILY_22560);
727
+ bool use_rx_td = (trans->trans_cfg->device_family >=
728
+ IWL_DEVICE_FAMILY_AX210);
659729 int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
660730
661731 if (rxq->bd)
....@@ -665,11 +735,6 @@
665735 rxq->bd_dma = 0;
666736 rxq->bd = NULL;
667737
668
- if (rxq->rb_stts)
669
- dma_free_coherent(trans->dev,
670
- use_rx_td ? sizeof(__le16) :
671
- sizeof(struct iwl_rb_status),
672
- rxq->rb_stts, rxq->rb_stts_dma);
673738 rxq->rb_stts_dma = 0;
674739 rxq->rb_stts = NULL;
675740
....@@ -681,7 +746,7 @@
681746 rxq->used_bd_dma = 0;
682747 rxq->used_bd = NULL;
683748
684
- if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
749
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
685750 return;
686751
687752 if (rxq->tr_tail)
....@@ -704,12 +769,14 @@
704769 struct device *dev = trans->dev;
705770 int i;
706771 int free_size;
707
- bool use_rx_td = (trans->cfg->device_family >=
708
- IWL_DEVICE_FAMILY_22560);
772
+ bool use_rx_td = (trans->trans_cfg->device_family >=
773
+ IWL_DEVICE_FAMILY_AX210);
774
+ size_t rb_stts_size = use_rx_td ? sizeof(__le16) :
775
+ sizeof(struct iwl_rb_status);
709776
710777 spin_lock_init(&rxq->lock);
711
- if (trans->cfg->mq_rx_supported)
712
- rxq->queue_size = MQ_RX_TABLE_SIZE;
778
+ if (trans->trans_cfg->mq_rx_supported)
779
+ rxq->queue_size = trans->cfg->num_rbds;
713780 else
714781 rxq->queue_size = RX_QUEUE_SIZE;
715782
....@@ -719,54 +786,38 @@
719786 * Allocate the circular buffer of Read Buffer Descriptors
720787 * (RBDs)
721788 */
722
- rxq->bd = dma_zalloc_coherent(dev,
723
- free_size * rxq->queue_size,
724
- &rxq->bd_dma, GFP_KERNEL);
789
+ rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size,
790
+ &rxq->bd_dma, GFP_KERNEL);
725791 if (!rxq->bd)
726792 goto err;
727793
728
- if (trans->cfg->mq_rx_supported) {
729
- rxq->used_bd = dma_zalloc_coherent(dev,
730
- (use_rx_td ?
731
- sizeof(*rxq->cd) :
732
- sizeof(__le32)) *
733
- rxq->queue_size,
734
- &rxq->used_bd_dma,
735
- GFP_KERNEL);
794
+ if (trans->trans_cfg->mq_rx_supported) {
795
+ rxq->used_bd = dma_alloc_coherent(dev,
796
+ (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
797
+ &rxq->used_bd_dma,
798
+ GFP_KERNEL);
736799 if (!rxq->used_bd)
737800 goto err;
738801 }
739802
740
- /* Allocate the driver's pointer to receive buffer status */
741
- rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ?
742
- sizeof(__le16) :
743
- sizeof(struct iwl_rb_status),
744
- &rxq->rb_stts_dma,
745
- GFP_KERNEL);
746
- if (!rxq->rb_stts)
747
- goto err;
803
+ rxq->rb_stts = trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
804
+ rxq->rb_stts_dma =
805
+ trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
748806
749807 if (!use_rx_td)
750808 return 0;
751809
752810 /* Allocate the driver's pointer to TR tail */
753
- rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
754
- &rxq->tr_tail_dma,
755
- GFP_KERNEL);
811
+ rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16),
812
+ &rxq->tr_tail_dma, GFP_KERNEL);
756813 if (!rxq->tr_tail)
757814 goto err;
758815
759816 /* Allocate the driver's pointer to CR tail */
760
- rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
761
- &rxq->cr_tail_dma,
762
- GFP_KERNEL);
817
+ rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16),
818
+ &rxq->cr_tail_dma, GFP_KERNEL);
763819 if (!rxq->cr_tail)
764820 goto err;
765
- /*
766
- * W/A 22560 device step Z0 must be non zero bug
767
- * TODO: remove this when stop supporting Z0
768
- */
769
- *rxq->cr_tail = cpu_to_le16(500);
770821
771822 return 0;
772823
....@@ -776,7 +827,6 @@
776827
777828 iwl_pcie_free_rxq_dma(trans, rxq);
778829 }
779
- kfree(trans_pcie->rxq);
780830
781831 return -ENOMEM;
782832 }
....@@ -786,25 +836,68 @@
786836 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
787837 struct iwl_rb_allocator *rba = &trans_pcie->rba;
788838 int i, ret;
839
+ size_t rb_stts_size = trans->trans_cfg->device_family >=
840
+ IWL_DEVICE_FAMILY_AX210 ?
841
+ sizeof(__le16) : sizeof(struct iwl_rb_status);
789842
790843 if (WARN_ON(trans_pcie->rxq))
791844 return -EINVAL;
792845
793846 trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
794847 GFP_KERNEL);
795
- if (!trans_pcie->rxq)
796
- return -EINVAL;
848
+ trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
849
+ sizeof(trans_pcie->rx_pool[0]),
850
+ GFP_KERNEL);
851
+ trans_pcie->global_table =
852
+ kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
853
+ sizeof(trans_pcie->global_table[0]),
854
+ GFP_KERNEL);
855
+ if (!trans_pcie->rxq || !trans_pcie->rx_pool ||
856
+ !trans_pcie->global_table) {
857
+ ret = -ENOMEM;
858
+ goto err;
859
+ }
797860
798861 spin_lock_init(&rba->lock);
862
+
863
+ /*
864
+ * Allocate the driver's pointer to receive buffer status.
865
+ * Allocate for all queues continuously (HW requirement).
866
+ */
867
+ trans_pcie->base_rb_stts =
868
+ dma_alloc_coherent(trans->dev,
869
+ rb_stts_size * trans->num_rx_queues,
870
+ &trans_pcie->base_rb_stts_dma,
871
+ GFP_KERNEL);
872
+ if (!trans_pcie->base_rb_stts) {
873
+ ret = -ENOMEM;
874
+ goto err;
875
+ }
799876
800877 for (i = 0; i < trans->num_rx_queues; i++) {
801878 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
802879
880
+ rxq->id = i;
803881 ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
804882 if (ret)
805
- return ret;
883
+ goto err;
806884 }
807885 return 0;
886
+
887
+err:
888
+ if (trans_pcie->base_rb_stts) {
889
+ dma_free_coherent(trans->dev,
890
+ rb_stts_size * trans->num_rx_queues,
891
+ trans_pcie->base_rb_stts,
892
+ trans_pcie->base_rb_stts_dma);
893
+ trans_pcie->base_rb_stts = NULL;
894
+ trans_pcie->base_rb_stts_dma = 0;
895
+ }
896
+ kfree(trans_pcie->rx_pool);
897
+ kfree(trans_pcie->global_table);
898
+ kfree(trans_pcie->rxq);
899
+
900
+ return ret;
808901 }
809902
810903 static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
....@@ -874,30 +967,6 @@
874967 /* W/A for interrupt coalescing bug in 7260 and 3160 */
875968 if (trans->cfg->host_interrupt_operation_mode)
876969 iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
877
-}
878
-
879
-void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable)
880
-{
881
- if (trans->cfg->device_family != IWL_DEVICE_FAMILY_9000)
882
- return;
883
-
884
- if (CSR_HW_REV_STEP(trans->hw_rev) != SILICON_A_STEP)
885
- return;
886
-
887
- if (!trans->cfg->integrated)
888
- return;
889
-
890
- /*
891
- * Turn on the chicken-bits that cause MAC wakeup for RX-related
892
- * values.
893
- * This costs some power, but needed for W/A 9000 integrated A-step
894
- * bug where shadow registers are not in the retention list and their
895
- * value is lost when NIC powers down
896
- */
897
- iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
898
- CSR_MAC_SHADOW_REG_CTRL_RX_WAKE);
899
- iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTL2,
900
- CSR_MAC_SHADOW_REG_CTL2_RX_WAKE);
901970 }
902971
903972 static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
....@@ -977,7 +1046,7 @@
9771046 RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
9781047 RFH_GEN_CFG_SERVICE_DMA_SNOOP |
9791048 RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
980
- trans->cfg->integrated ?
1049
+ trans->trans_cfg->integrated ?
9811050 RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
9821051 RFH_GEN_CFG_RB_CHUNK_SIZE_128));
9831052 /* Enable the relevant rx queues */
....@@ -987,8 +1056,6 @@
9871056
9881057 /* Set interrupt coalescing timer to default (2048 usecs) */
9891058 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
990
-
991
- iwl_pcie_enable_rx_wake(trans, true);
9921059 }
9931060
9941061 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
....@@ -1030,7 +1097,7 @@
10301097 INIT_LIST_HEAD(&rba->rbd_empty);
10311098 spin_unlock(&rba->lock);
10321099
1033
- /* free all first - we might be reconfigured for a different size */
1100
+ /* free all first - we overwrite everything here */
10341101 iwl_pcie_free_rbs_pool(trans);
10351102
10361103 for (i = 0; i < RX_QUEUE_SIZE; i++)
....@@ -1038,8 +1105,6 @@
10381105
10391106 for (i = 0; i < trans->num_rx_queues; i++) {
10401107 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1041
-
1042
- rxq->id = i;
10431108
10441109 spin_lock(&rxq->lock);
10451110 /*
....@@ -1051,7 +1116,8 @@
10511116 rxq->write = 0;
10521117 rxq->write_actual = 0;
10531118 memset(rxq->rb_stts, 0,
1054
- (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
1119
+ (trans->trans_cfg->device_family >=
1120
+ IWL_DEVICE_FAMILY_AX210) ?
10551121 sizeof(__le16) : sizeof(struct iwl_rb_status));
10561122
10571123 iwl_pcie_rx_init_rxb_lists(rxq);
....@@ -1064,13 +1130,12 @@
10641130 }
10651131
10661132 /* move the pool to the default queue and allocator ownerships */
1067
- queue_size = trans->cfg->mq_rx_supported ?
1068
- MQ_RX_NUM_RBDS : RX_QUEUE_SIZE;
1133
+ queue_size = trans->trans_cfg->mq_rx_supported ?
1134
+ trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE;
10691135 allocator_pool_size = trans->num_rx_queues *
10701136 (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
10711137 num_alloc = queue_size + allocator_pool_size;
1072
- BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
1073
- ARRAY_SIZE(trans_pcie->rx_pool));
1138
+
10741139 for (i = 0; i < num_alloc; i++) {
10751140 struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
10761141
....@@ -1096,7 +1161,7 @@
10961161 if (ret)
10971162 return ret;
10981163
1099
- if (trans->cfg->mq_rx_supported)
1164
+ if (trans->trans_cfg->mq_rx_supported)
11001165 iwl_pcie_rx_mq_hw_init(trans);
11011166 else
11021167 iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
....@@ -1112,6 +1177,9 @@
11121177
11131178 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
11141179 {
1180
+ /* Set interrupt coalescing timer to default (2048 usecs) */
1181
+ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
1182
+
11151183 /*
11161184 * We don't configure the RFH.
11171185 * Restock will be done at alive, after firmware configured the RFH.
....@@ -1124,6 +1192,9 @@
11241192 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
11251193 struct iwl_rb_allocator *rba = &trans_pcie->rba;
11261194 int i;
1195
+ size_t rb_stts_size = trans->trans_cfg->device_family >=
1196
+ IWL_DEVICE_FAMILY_AX210 ?
1197
+ sizeof(__le16) : sizeof(struct iwl_rb_status);
11271198
11281199 /*
11291200 * if rxq is NULL, it means that nothing has been allocated,
....@@ -1138,6 +1209,15 @@
11381209
11391210 iwl_pcie_free_rbs_pool(trans);
11401211
1212
+ if (trans_pcie->base_rb_stts) {
1213
+ dma_free_coherent(trans->dev,
1214
+ rb_stts_size * trans->num_rx_queues,
1215
+ trans_pcie->base_rb_stts,
1216
+ trans_pcie->base_rb_stts_dma);
1217
+ trans_pcie->base_rb_stts = NULL;
1218
+ trans_pcie->base_rb_stts_dma = 0;
1219
+ }
1220
+
11411221 for (i = 0; i < trans->num_rx_queues; i++) {
11421222 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
11431223
....@@ -1146,7 +1226,12 @@
11461226 if (rxq->napi.poll)
11471227 netif_napi_del(&rxq->napi);
11481228 }
1229
+ kfree(trans_pcie->rx_pool);
1230
+ kfree(trans_pcie->global_table);
11491231 kfree(trans_pcie->rxq);
1232
+
1233
+ if (trans_pcie->alloc_page)
1234
+ __free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order);
11501235 }
11511236
11521237 static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
....@@ -1202,9 +1287,9 @@
12021287 int i)
12031288 {
12041289 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1205
- struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
1290
+ struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
12061291 bool page_stolen = false;
1207
- int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
1292
+ int max_len = trans_pcie->rx_buf_bytes;
12081293 u32 offset = 0;
12091294
12101295 if (WARN_ON(!rxb))
....@@ -1218,15 +1303,12 @@
12181303 bool reclaim;
12191304 int index, cmd_index, len;
12201305 struct iwl_rx_cmd_buffer rxcb = {
1221
- ._offset = offset,
1306
+ ._offset = rxb->offset + offset,
12221307 ._rx_page_order = trans_pcie->rx_page_order,
12231308 ._page = rxb->page,
12241309 ._page_stolen = false,
12251310 .truesize = max_len,
12261311 };
1227
-
1228
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1229
- rxcb.status = rxq->cd[i].status;
12301312
12311313 pkt = rxb_addr(&rxcb);
12321314
....@@ -1280,9 +1362,9 @@
12801362
12811363 sequence = le16_to_cpu(pkt->hdr.sequence);
12821364 index = SEQ_TO_INDEX(sequence);
1283
- cmd_index = iwl_pcie_get_cmd_index(txq, index);
1365
+ cmd_index = iwl_txq_get_cmd_index(txq, index);
12841366
1285
- if (rxq->id == 0)
1367
+ if (rxq->id == trans_pcie->def_rx_queue)
12861368 iwl_op_mode_rx(trans->op_mode, &rxq->napi,
12871369 &rxcb);
12881370 else
....@@ -1290,7 +1372,7 @@
12901372 &rxcb, rxq->id);
12911373
12921374 if (reclaim) {
1293
- kzfree(txq->entries[cmd_index].free_buf);
1375
+ kfree_sensitive(txq->entries[cmd_index].free_buf);
12941376 txq->entries[cmd_index].free_buf = NULL;
12951377 }
12961378
....@@ -1311,7 +1393,7 @@
13111393 }
13121394
13131395 page_stolen |= rxcb._page_stolen;
1314
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1396
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
13151397 break;
13161398 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
13171399 }
....@@ -1327,8 +1409,8 @@
13271409 * rx_free list for reuse later. */
13281410 if (rxb->page != NULL) {
13291411 rxb->page_dma =
1330
- dma_map_page(trans->dev, rxb->page, 0,
1331
- PAGE_SIZE << trans_pcie->rx_page_order,
1412
+ dma_map_page(trans->dev, rxb->page, rxb->offset,
1413
+ trans_pcie->rx_buf_bytes,
13321414 DMA_FROM_DEVICE);
13331415 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
13341416 /*
....@@ -1348,33 +1430,36 @@
13481430 }
13491431
13501432 static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
1351
- struct iwl_rxq *rxq, int i)
1433
+ struct iwl_rxq *rxq, int i,
1434
+ bool *join)
13521435 {
13531436 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
13541437 struct iwl_rx_mem_buffer *rxb;
13551438 u16 vid;
13561439
1357
- if (!trans->cfg->mq_rx_supported) {
1440
+ BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
1441
+
1442
+ if (!trans->trans_cfg->mq_rx_supported) {
13581443 rxb = rxq->queue[i];
13591444 rxq->queue[i] = NULL;
13601445 return rxb;
13611446 }
13621447
1363
- /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
1364
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1365
- vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
1366
- else
1367
- vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
1448
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
1449
+ vid = le16_to_cpu(rxq->cd[i].rbid);
1450
+ *join = rxq->cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
1451
+ } else {
1452
+ vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */
1453
+ }
13681454
1369
- if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table))
1455
+ if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
13701456 goto out_err;
13711457
13721458 rxb = trans_pcie->global_table[vid - 1];
13731459 if (rxb->invalid)
13741460 goto out_err;
13751461
1376
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1377
- rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE;
1462
+ IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid);
13781463
13791464 rxb->invalid = true;
13801465
....@@ -1392,6 +1477,7 @@
13921477 static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
13931478 {
13941479 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1480
+ struct napi_struct *napi;
13951481 struct iwl_rxq *rxq;
13961482 u32 r, i, count = 0;
13971483 bool emergency = false;
....@@ -1422,19 +1508,42 @@
14221508 u32 rb_pending_alloc =
14231509 atomic_read(&trans_pcie->rba.req_pending) *
14241510 RX_CLAIM_REQ_ALLOC;
1511
+ bool join = false;
14251512
14261513 if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
14271514 !emergency)) {
14281515 iwl_pcie_rx_move_to_allocator(rxq, rba);
14291516 emergency = true;
1517
+ IWL_DEBUG_TPT(trans,
1518
+ "RX path is in emergency. Pending allocations %d\n",
1519
+ rb_pending_alloc);
14301520 }
14311521
1432
- rxb = iwl_pcie_get_rxb(trans, rxq, i);
1522
+ IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
1523
+
1524
+ rxb = iwl_pcie_get_rxb(trans, rxq, i, &join);
14331525 if (!rxb)
14341526 goto out;
14351527
1436
- IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
1437
- iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
1528
+ if (unlikely(join || rxq->next_rb_is_fragment)) {
1529
+ rxq->next_rb_is_fragment = join;
1530
+ /*
1531
+ * We can only get a multi-RB in the following cases:
1532
+ * - firmware issue, sending a too big notification
1533
+ * - sniffer mode with a large A-MSDU
1534
+ * - large MTU frames (>2k)
1535
+ * since the multi-RB functionality is limited to newer
1536
+ * hardware that cannot put multiple entries into a
1537
+ * single RB.
1538
+ *
1539
+ * Right now, the higher layers aren't set up to deal
1540
+ * with that, so discard all of these.
1541
+ */
1542
+ list_add_tail(&rxb->list, &rxq->rx_free);
1543
+ rxq->free_count++;
1544
+ } else {
1545
+ iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
1546
+ }
14381547
14391548 i = (i + 1) & (rxq->queue_size - 1);
14401549
....@@ -1455,8 +1564,12 @@
14551564 count++;
14561565 if (count == 8) {
14571566 count = 0;
1458
- if (rb_pending_alloc < rxq->queue_size / 3)
1567
+ if (rb_pending_alloc < rxq->queue_size / 3) {
1568
+ IWL_DEBUG_TPT(trans,
1569
+ "RX path exited emergency. Pending allocations %d\n",
1570
+ rb_pending_alloc);
14591571 emergency = false;
1572
+ }
14601573
14611574 rxq->read = i;
14621575 spin_unlock(&rxq->lock);
....@@ -1470,7 +1583,7 @@
14701583 /* Backtrack one entry */
14711584 rxq->read = i;
14721585 /* update cr tail with the rxq read pointer */
1473
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1586
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
14741587 *rxq->cr_tail = cpu_to_le16(r);
14751588 spin_unlock(&rxq->lock);
14761589
....@@ -1489,8 +1602,16 @@
14891602 if (unlikely(emergency && count))
14901603 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
14911604
1492
- if (rxq->napi.poll)
1493
- napi_gro_flush(&rxq->napi, false);
1605
+ napi = &rxq->napi;
1606
+ if (napi->poll) {
1607
+ napi_gro_flush(napi, false);
1608
+
1609
+ if (napi->rx_count) {
1610
+ netif_receive_skb_list(&napi->rx_list);
1611
+ INIT_LIST_HEAD(&napi->rx_list);
1612
+ napi->rx_count = 0;
1613
+ }
1614
+ }
14941615
14951616 iwl_pcie_rxq_restock(trans, rxq);
14961617 }
....@@ -1552,10 +1673,10 @@
15521673 return;
15531674 }
15541675
1555
- for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
1556
- if (!trans_pcie->txq[i])
1676
+ for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
1677
+ if (!trans->txqs.txq[i])
15571678 continue;
1558
- del_timer(&trans_pcie->txq[i]->stuck_timer);
1679
+ del_timer(&trans->txqs.txq[i]->stuck_timer);
15591680 }
15601681
15611682 /* The STATUS_FW_ERROR bit is set in this function. This must happen
....@@ -1793,7 +1914,7 @@
17931914 if (inta & CSR_INT_BIT_ALIVE) {
17941915 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
17951916 isr_stats->alive++;
1796
- if (trans->cfg->gen2) {
1917
+ if (trans->trans_cfg->gen2) {
17971918 /*
17981919 * We can restock, since firmware configured
17991920 * the RFH
....@@ -1960,9 +2081,8 @@
19602081 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
19612082
19622083 trans_pcie->ict_tbl =
1963
- dma_zalloc_coherent(trans->dev, ICT_SIZE,
1964
- &trans_pcie->ict_tbl_dma,
1965
- GFP_KERNEL);
2084
+ dma_alloc_coherent(trans->dev, ICT_SIZE,
2085
+ &trans_pcie->ict_tbl_dma, GFP_KERNEL);
19662086 if (!trans_pcie->ict_tbl)
19672087 return -ENOMEM;
19682088
....@@ -2108,8 +2228,7 @@
21082228
21092229 /* Error detected by uCode */
21102230 if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
2111
- (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
2112
- (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
2231
+ (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
21132232 IWL_ERR(trans,
21142233 "Microcode SW error detected. Restarting 0x%X.\n",
21152234 inta_fh);
....@@ -2135,14 +2254,38 @@
21352254 if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
21362255 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
21372256 isr_stats->alive++;
2138
- if (trans->cfg->gen2) {
2257
+ if (trans->trans_cfg->gen2) {
21392258 /* We can restock, since firmware configured the RFH */
21402259 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
21412260 }
21422261 }
21432262
2144
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560 &&
2145
- inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) {
2263
+ /*
2264
+ * In some rare cases when the HW is in a bad state, we may
2265
+ * get this interrupt too early, when prph_info is still NULL.
2266
+ * So make sure that it's not NULL to prevent crashing.
2267
+ */
2268
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) {
2269
+ u32 sleep_notif =
2270
+ le32_to_cpu(trans_pcie->prph_info->sleep_notif);
2271
+ if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND ||
2272
+ sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) {
2273
+ IWL_DEBUG_ISR(trans,
2274
+ "Sx interrupt: sleep notification = 0x%x\n",
2275
+ sleep_notif);
2276
+ trans_pcie->sx_complete = true;
2277
+ wake_up(&trans_pcie->sx_waitq);
2278
+ } else {
2279
+ /* uCode wakes up after power-down sleep */
2280
+ IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
2281
+ iwl_pcie_rxq_check_wrptr(trans);
2282
+ iwl_pcie_txq_check_wrptrs(trans);
2283
+
2284
+ isr_stats->wakeup++;
2285
+ }
2286
+ }
2287
+
2288
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_IML) {
21462289 /* Reflect IML transfer status */
21472290 int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
21482291
....@@ -2151,13 +2294,6 @@
21512294 isr_stats->sw++;
21522295 iwl_pcie_irq_handle_error(trans);
21532296 }
2154
- } else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
2155
- /* uCode wakes up after power-down sleep */
2156
- IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
2157
- iwl_pcie_rxq_check_wrptr(trans);
2158
- iwl_pcie_txq_check_wrptrs(trans);
2159
-
2160
- isr_stats->wakeup++;
21612297 }
21622298
21632299 /* Chip got too hot and stopped itself */
....@@ -2175,6 +2311,7 @@
21752311 "Hardware error detected. Restarting.\n");
21762312
21772313 isr_stats->hw++;
2314
+ trans->dbg.hw_error = true;
21782315 iwl_pcie_irq_handle_error(trans);
21792316 }
21802317