forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/drivers/net/ethernet/chelsio/cxgb/sge.c
....@@ -239,8 +239,10 @@
239239 unsigned int num; /* num skbs in per port queues */
240240 struct sched_port p[MAX_NPORTS];
241241 struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
242
+ struct sge *sge;
242243 };
243
-static void restart_sched(unsigned long);
244
+
245
+static void restart_sched(struct tasklet_struct *t);
244246
245247
246248 /*
....@@ -378,7 +380,8 @@
378380 return -ENOMEM;
379381
380382 pr_debug("tx_sched_init\n");
381
- tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
383
+ tasklet_setup(&s->sched_tsk, restart_sched);
384
+ s->sge = sge;
382385 sge->tx_sched = s;
383386
384387 for (i = 0; i < MAX_NPORTS; i++) {
....@@ -509,9 +512,8 @@
509512 while (q->credits--) {
510513 struct freelQ_ce *ce = &q->centries[cidx];
511514
512
- pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
513
- dma_unmap_len(ce, dma_len),
514
- PCI_DMA_FROMDEVICE);
515
+ dma_unmap_single(&pdev->dev, dma_unmap_addr(ce, dma_addr),
516
+ dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE);
515517 dev_kfree_skb(ce->skb);
516518 ce->skb = NULL;
517519 if (++cidx == q->size)
....@@ -529,8 +531,8 @@
529531
530532 if (sge->respQ.entries) {
531533 size = sizeof(struct respQ_e) * sge->respQ.size;
532
- pci_free_consistent(pdev, size, sge->respQ.entries,
533
- sge->respQ.dma_addr);
534
+ dma_free_coherent(&pdev->dev, size, sge->respQ.entries,
535
+ sge->respQ.dma_addr);
534536 }
535537
536538 for (i = 0; i < SGE_FREELQ_N; i++) {
....@@ -542,8 +544,8 @@
542544 }
543545 if (q->entries) {
544546 size = sizeof(struct freelQ_e) * q->size;
545
- pci_free_consistent(pdev, size, q->entries,
546
- q->dma_addr);
547
+ dma_free_coherent(&pdev->dev, size, q->entries,
548
+ q->dma_addr);
547549 }
548550 }
549551 }
....@@ -564,7 +566,8 @@
564566 q->size = p->freelQ_size[i];
565567 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
566568 size = sizeof(struct freelQ_e) * q->size;
567
- q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
569
+ q->entries = dma_alloc_coherent(&pdev->dev, size,
570
+ &q->dma_addr, GFP_KERNEL);
568571 if (!q->entries)
569572 goto err_no_mem;
570573
....@@ -585,8 +588,7 @@
585588 sizeof(struct cpl_rx_data) +
586589 sge->freelQ[!sge->jumbo_fl].dma_offset;
587590
588
- size = (16 * 1024) -
589
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
591
+ size = (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
590592
591593 sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
592594
....@@ -602,7 +604,8 @@
602604 sge->respQ.credits = 0;
603605 size = sizeof(struct respQ_e) * sge->respQ.size;
604606 sge->respQ.entries =
605
- pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
607
+ dma_alloc_coherent(&pdev->dev, size, &sge->respQ.dma_addr,
608
+ GFP_KERNEL);
606609 if (!sge->respQ.entries)
607610 goto err_no_mem;
608611 return 0;
....@@ -625,9 +628,10 @@
625628 ce = &q->centries[cidx];
626629 while (n--) {
627630 if (likely(dma_unmap_len(ce, dma_len))) {
628
- pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
631
+ dma_unmap_single(&pdev->dev,
632
+ dma_unmap_addr(ce, dma_addr),
629633 dma_unmap_len(ce, dma_len),
630
- PCI_DMA_TODEVICE);
634
+ DMA_TO_DEVICE);
631635 if (q->sop)
632636 q->sop = 0;
633637 }
....@@ -664,8 +668,8 @@
664668 }
665669 if (q->entries) {
666670 size = sizeof(struct cmdQ_e) * q->size;
667
- pci_free_consistent(pdev, size, q->entries,
668
- q->dma_addr);
671
+ dma_free_coherent(&pdev->dev, size, q->entries,
672
+ q->dma_addr);
669673 }
670674 }
671675 }
....@@ -690,7 +694,8 @@
690694 q->stop_thres = 0;
691695 spin_lock_init(&q->lock);
692696 size = sizeof(struct cmdQ_e) * q->size;
693
- q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
697
+ q->entries = dma_alloc_coherent(&pdev->dev, size,
698
+ &q->dma_addr, GFP_KERNEL);
694699 if (!q->entries)
695700 goto err_no_mem;
696701
....@@ -838,8 +843,8 @@
838843 break;
839844
840845 skb_reserve(skb, q->dma_offset);
841
- mapping = pci_map_single(pdev, skb->data, dma_len,
842
- PCI_DMA_FROMDEVICE);
846
+ mapping = dma_map_single(&pdev->dev, skb->data, dma_len,
847
+ DMA_FROM_DEVICE);
843848 skb_reserve(skb, sge->rx_pkt_pad);
844849
845850 ce->skb = skb;
....@@ -1050,15 +1055,15 @@
10501055 goto use_orig_buf;
10511056
10521057 skb_put(skb, len);
1053
- pci_dma_sync_single_for_cpu(pdev,
1054
- dma_unmap_addr(ce, dma_addr),
1055
- dma_unmap_len(ce, dma_len),
1056
- PCI_DMA_FROMDEVICE);
1058
+ dma_sync_single_for_cpu(&pdev->dev,
1059
+ dma_unmap_addr(ce, dma_addr),
1060
+ dma_unmap_len(ce, dma_len),
1061
+ DMA_FROM_DEVICE);
10571062 skb_copy_from_linear_data(ce->skb, skb->data, len);
1058
- pci_dma_sync_single_for_device(pdev,
1059
- dma_unmap_addr(ce, dma_addr),
1060
- dma_unmap_len(ce, dma_len),
1061
- PCI_DMA_FROMDEVICE);
1063
+ dma_sync_single_for_device(&pdev->dev,
1064
+ dma_unmap_addr(ce, dma_addr),
1065
+ dma_unmap_len(ce, dma_len),
1066
+ DMA_FROM_DEVICE);
10621067 recycle_fl_buf(fl, fl->cidx);
10631068 return skb;
10641069 }
....@@ -1069,8 +1074,8 @@
10691074 return NULL;
10701075 }
10711076
1072
- pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
1073
- dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1077
+ dma_unmap_single(&pdev->dev, dma_unmap_addr(ce, dma_addr),
1078
+ dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE);
10741079 skb = ce->skb;
10751080 prefetch(skb->data);
10761081
....@@ -1092,8 +1097,9 @@
10921097 struct freelQ_ce *ce = &fl->centries[fl->cidx];
10931098 struct sk_buff *skb = ce->skb;
10941099
1095
- pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
1096
- dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1100
+ dma_sync_single_for_cpu(&adapter->pdev->dev,
1101
+ dma_unmap_addr(ce, dma_addr),
1102
+ dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE);
10971103 pr_err("%s: unexpected offload packet, cmd %u\n",
10981104 adapter->name, *skb->data);
10991105 recycle_fl_buf(fl, fl->cidx);
....@@ -1210,8 +1216,8 @@
12101216 e = e1 = &q->entries[pidx];
12111217 ce = &q->centries[pidx];
12121218
1213
- mapping = pci_map_single(adapter->pdev, skb->data,
1214
- skb_headlen(skb), PCI_DMA_TODEVICE);
1219
+ mapping = dma_map_single(&adapter->pdev->dev, skb->data,
1220
+ skb_headlen(skb), DMA_TO_DEVICE);
12151221
12161222 desc_mapping = mapping;
12171223 desc_len = skb_headlen(skb);
....@@ -1302,9 +1308,10 @@
13021308 * Called from tasklet. Checks the scheduler for any
13031309 * pending skbs that can be sent.
13041310 */
1305
-static void restart_sched(unsigned long arg)
1311
+static void restart_sched(struct tasklet_struct *t)
13061312 {
1307
- struct sge *sge = (struct sge *) arg;
1313
+ struct sched *s = from_tasklet(s, t, sched_tsk);
1314
+ struct sge *sge = s->sge;
13081315 struct adapter *adapter = sge->adapter;
13091316 struct cmdQ *q = &sge->cmdQ[0];
13101317 struct sk_buff *skb;