forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 95099d4622f8cb224d94e314c7a8e0df60b13f87
kernel/drivers/net/ethernet/chelsio/cxgb/sge.c
....@@ -239,8 +239,10 @@
239239 unsigned int num; /* num skbs in per port queues */
240240 struct sched_port p[MAX_NPORTS];
241241 struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
242
+ struct sge *sge;
242243 };
243
-static void restart_sched(unsigned long);
244
+
245
+static void restart_sched(struct tasklet_struct *t);
244246
245247
246248 /*
....@@ -378,7 +380,8 @@
378380 return -ENOMEM;
379381
380382 pr_debug("tx_sched_init\n");
381
- tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
383
+ tasklet_setup(&s->sched_tsk, restart_sched);
384
+ s->sge = sge;
382385 sge->tx_sched = s;
383386
384387 for (i = 0; i < MAX_NPORTS; i++) {
....@@ -509,9 +512,8 @@
509512 while (q->credits--) {
510513 struct freelQ_ce *ce = &q->centries[cidx];
511514
512
- pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
513
- dma_unmap_len(ce, dma_len),
514
- PCI_DMA_FROMDEVICE);
515
+ dma_unmap_single(&pdev->dev, dma_unmap_addr(ce, dma_addr),
516
+ dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE);
515517 dev_kfree_skb(ce->skb);
516518 ce->skb = NULL;
517519 if (++cidx == q->size)
....@@ -529,8 +531,8 @@
529531
530532 if (sge->respQ.entries) {
531533 size = sizeof(struct respQ_e) * sge->respQ.size;
532
- pci_free_consistent(pdev, size, sge->respQ.entries,
533
- sge->respQ.dma_addr);
534
+ dma_free_coherent(&pdev->dev, size, sge->respQ.entries,
535
+ sge->respQ.dma_addr);
534536 }
535537
536538 for (i = 0; i < SGE_FREELQ_N; i++) {
....@@ -542,8 +544,8 @@
542544 }
543545 if (q->entries) {
544546 size = sizeof(struct freelQ_e) * q->size;
545
- pci_free_consistent(pdev, size, q->entries,
546
- q->dma_addr);
547
+ dma_free_coherent(&pdev->dev, size, q->entries,
548
+ q->dma_addr);
547549 }
548550 }
549551 }
....@@ -564,7 +566,8 @@
564566 q->size = p->freelQ_size[i];
565567 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
566568 size = sizeof(struct freelQ_e) * q->size;
567
- q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
569
+ q->entries = dma_alloc_coherent(&pdev->dev, size,
570
+ &q->dma_addr, GFP_KERNEL);
568571 if (!q->entries)
569572 goto err_no_mem;
570573
....@@ -585,8 +588,7 @@
585588 sizeof(struct cpl_rx_data) +
586589 sge->freelQ[!sge->jumbo_fl].dma_offset;
587590
588
- size = (16 * 1024) -
589
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
591
+ size = (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
590592
591593 sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
592594
....@@ -602,7 +604,8 @@
602604 sge->respQ.credits = 0;
603605 size = sizeof(struct respQ_e) * sge->respQ.size;
604606 sge->respQ.entries =
605
- pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
607
+ dma_alloc_coherent(&pdev->dev, size, &sge->respQ.dma_addr,
608
+ GFP_KERNEL);
606609 if (!sge->respQ.entries)
607610 goto err_no_mem;
608611 return 0;
....@@ -625,9 +628,10 @@
625628 ce = &q->centries[cidx];
626629 while (n--) {
627630 if (likely(dma_unmap_len(ce, dma_len))) {
628
- pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
631
+ dma_unmap_single(&pdev->dev,
632
+ dma_unmap_addr(ce, dma_addr),
629633 dma_unmap_len(ce, dma_len),
630
- PCI_DMA_TODEVICE);
634
+ DMA_TO_DEVICE);
631635 if (q->sop)
632636 q->sop = 0;
633637 }
....@@ -664,8 +668,8 @@
664668 }
665669 if (q->entries) {
666670 size = sizeof(struct cmdQ_e) * q->size;
667
- pci_free_consistent(pdev, size, q->entries,
668
- q->dma_addr);
671
+ dma_free_coherent(&pdev->dev, size, q->entries,
672
+ q->dma_addr);
669673 }
670674 }
671675 }
....@@ -690,7 +694,8 @@
690694 q->stop_thres = 0;
691695 spin_lock_init(&q->lock);
692696 size = sizeof(struct cmdQ_e) * q->size;
693
- q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
697
+ q->entries = dma_alloc_coherent(&pdev->dev, size,
698
+ &q->dma_addr, GFP_KERNEL);
694699 if (!q->entries)
695700 goto err_no_mem;
696701
....@@ -838,8 +843,8 @@
838843 break;
839844
840845 skb_reserve(skb, q->dma_offset);
841
- mapping = pci_map_single(pdev, skb->data, dma_len,
842
- PCI_DMA_FROMDEVICE);
846
+ mapping = dma_map_single(&pdev->dev, skb->data, dma_len,
847
+ DMA_FROM_DEVICE);
843848 skb_reserve(skb, sge->rx_pkt_pad);
844849
845850 ce->skb = skb;
....@@ -935,10 +940,11 @@
935940 /*
936941 * SGE 'Error' interrupt handler
937942 */
938
-int t1_sge_intr_error_handler(struct sge *sge)
943
+bool t1_sge_intr_error_handler(struct sge *sge)
939944 {
940945 struct adapter *adapter = sge->adapter;
941946 u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
947
+ bool wake = false;
942948
943949 if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
944950 cause &= ~F_PACKET_TOO_BIG;
....@@ -962,11 +968,14 @@
962968 sge->stats.pkt_mismatch++;
963969 pr_alert("%s: SGE packet mismatch\n", adapter->name);
964970 }
965
- if (cause & SGE_INT_FATAL)
966
- t1_fatal_err(adapter);
971
+ if (cause & SGE_INT_FATAL) {
972
+ t1_interrupts_disable(adapter);
973
+ adapter->pending_thread_intr |= F_PL_INTR_SGE_ERR;
974
+ wake = true;
975
+ }
967976
968977 writel(cause, adapter->regs + A_SG_INT_CAUSE);
969
- return 0;
978
+ return wake;
970979 }
971980
972981 const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
....@@ -1050,15 +1059,15 @@
10501059 goto use_orig_buf;
10511060
10521061 skb_put(skb, len);
1053
- pci_dma_sync_single_for_cpu(pdev,
1054
- dma_unmap_addr(ce, dma_addr),
1055
- dma_unmap_len(ce, dma_len),
1056
- PCI_DMA_FROMDEVICE);
1062
+ dma_sync_single_for_cpu(&pdev->dev,
1063
+ dma_unmap_addr(ce, dma_addr),
1064
+ dma_unmap_len(ce, dma_len),
1065
+ DMA_FROM_DEVICE);
10571066 skb_copy_from_linear_data(ce->skb, skb->data, len);
1058
- pci_dma_sync_single_for_device(pdev,
1059
- dma_unmap_addr(ce, dma_addr),
1060
- dma_unmap_len(ce, dma_len),
1061
- PCI_DMA_FROMDEVICE);
1067
+ dma_sync_single_for_device(&pdev->dev,
1068
+ dma_unmap_addr(ce, dma_addr),
1069
+ dma_unmap_len(ce, dma_len),
1070
+ DMA_FROM_DEVICE);
10621071 recycle_fl_buf(fl, fl->cidx);
10631072 return skb;
10641073 }
....@@ -1069,8 +1078,8 @@
10691078 return NULL;
10701079 }
10711080
1072
- pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
1073
- dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1081
+ dma_unmap_single(&pdev->dev, dma_unmap_addr(ce, dma_addr),
1082
+ dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE);
10741083 skb = ce->skb;
10751084 prefetch(skb->data);
10761085
....@@ -1092,8 +1101,9 @@
10921101 struct freelQ_ce *ce = &fl->centries[fl->cidx];
10931102 struct sk_buff *skb = ce->skb;
10941103
1095
- pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
1096
- dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1104
+ dma_sync_single_for_cpu(&adapter->pdev->dev,
1105
+ dma_unmap_addr(ce, dma_addr),
1106
+ dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE);
10971107 pr_err("%s: unexpected offload packet, cmd %u\n",
10981108 adapter->name, *skb->data);
10991109 recycle_fl_buf(fl, fl->cidx);
....@@ -1210,8 +1220,8 @@
12101220 e = e1 = &q->entries[pidx];
12111221 ce = &q->centries[pidx];
12121222
1213
- mapping = pci_map_single(adapter->pdev, skb->data,
1214
- skb_headlen(skb), PCI_DMA_TODEVICE);
1223
+ mapping = dma_map_single(&adapter->pdev->dev, skb->data,
1224
+ skb_headlen(skb), DMA_TO_DEVICE);
12151225
12161226 desc_mapping = mapping;
12171227 desc_len = skb_headlen(skb);
....@@ -1302,9 +1312,10 @@
13021312 * Called from tasklet. Checks the scheduler for any
13031313 * pending skbs that can be sent.
13041314 */
1305
-static void restart_sched(unsigned long arg)
1315
+static void restart_sched(struct tasklet_struct *t)
13061316 {
1307
- struct sge *sge = (struct sge *) arg;
1317
+ struct sched *s = from_tasklet(s, t, sched_tsk);
1318
+ struct sge *sge = s->sge;
13081319 struct adapter *adapter = sge->adapter;
13091320 struct cmdQ *q = &sge->cmdQ[0];
13101321 struct sk_buff *skb;
....@@ -1612,11 +1623,46 @@
16121623 return work_done;
16131624 }
16141625
1626
+irqreturn_t t1_interrupt_thread(int irq, void *data)
1627
+{
1628
+ struct adapter *adapter = data;
1629
+ u32 pending_thread_intr;
1630
+
1631
+ spin_lock_irq(&adapter->async_lock);
1632
+ pending_thread_intr = adapter->pending_thread_intr;
1633
+ adapter->pending_thread_intr = 0;
1634
+ spin_unlock_irq(&adapter->async_lock);
1635
+
1636
+ if (!pending_thread_intr)
1637
+ return IRQ_NONE;
1638
+
1639
+ if (pending_thread_intr & F_PL_INTR_EXT)
1640
+ t1_elmer0_ext_intr_handler(adapter);
1641
+
1642
+ /* This error is fatal, interrupts remain off */
1643
+ if (pending_thread_intr & F_PL_INTR_SGE_ERR) {
1644
+ pr_alert("%s: encountered fatal error, operation suspended\n",
1645
+ adapter->name);
1646
+ t1_sge_stop(adapter->sge);
1647
+ return IRQ_HANDLED;
1648
+ }
1649
+
1650
+ spin_lock_irq(&adapter->async_lock);
1651
+ adapter->slow_intr_mask |= F_PL_INTR_EXT;
1652
+
1653
+ writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
1654
+ writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
1655
+ adapter->regs + A_PL_ENABLE);
1656
+ spin_unlock_irq(&adapter->async_lock);
1657
+
1658
+ return IRQ_HANDLED;
1659
+}
1660
+
16151661 irqreturn_t t1_interrupt(int irq, void *data)
16161662 {
16171663 struct adapter *adapter = data;
16181664 struct sge *sge = adapter->sge;
1619
- int handled;
1665
+ irqreturn_t handled;
16201666
16211667 if (likely(responses_pending(adapter))) {
16221668 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
....@@ -1638,10 +1684,10 @@
16381684 handled = t1_slow_intr_handler(adapter);
16391685 spin_unlock(&adapter->async_lock);
16401686
1641
- if (!handled)
1687
+ if (handled == IRQ_NONE)
16421688 sge->stats.unhandled_irqs++;
16431689
1644
- return IRQ_RETVAL(handled != 0);
1690
+ return handled;
16451691 }
16461692
16471693 /*