hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/net/ethernet/amd/pcnet32.c
....@@ -24,12 +24,8 @@
2424 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2525
2626 #define DRV_NAME "pcnet32"
27
-#define DRV_VERSION "1.35"
2827 #define DRV_RELDATE "21.Apr.2008"
2928 #define PFX DRV_NAME ": "
30
-
31
-static const char *const version =
32
- DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
3329
3430 #include <linux/module.h>
3531 #include <linux/kernel.h>
....@@ -254,7 +250,7 @@
254250
255251 /*
256252 * The first field of pcnet32_private is read by the ethernet device
257
- * so the structure should be allocated using pci_alloc_consistent().
253
+ * so the structure should be allocated using dma_alloc_coherent().
258254 */
259255 struct pcnet32_private {
260256 struct pcnet32_init_block *init_block;
....@@ -262,7 +258,7 @@
262258 struct pcnet32_rx_head *rx_ring;
263259 struct pcnet32_tx_head *tx_ring;
264260 dma_addr_t init_dma_addr;/* DMA address of beginning of the init block,
265
- returned by pci_alloc_consistent */
261
+ returned by dma_alloc_coherent */
266262 struct pci_dev *pci_dev;
267263 const char *name;
268264 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
....@@ -314,7 +310,7 @@
314310 static int pcnet32_init_ring(struct net_device *);
315311 static netdev_tx_t pcnet32_start_xmit(struct sk_buff *,
316312 struct net_device *);
317
-static void pcnet32_tx_timeout(struct net_device *dev);
313
+static void pcnet32_tx_timeout(struct net_device *dev, unsigned int txqueue);
318314 static irqreturn_t pcnet32_interrupt(int, void *);
319315 static int pcnet32_close(struct net_device *);
320316 static struct net_device_stats *pcnet32_get_stats(struct net_device *);
....@@ -489,9 +485,9 @@
489485 pcnet32_purge_tx_ring(dev);
490486
491487 new_tx_ring =
492
- pci_zalloc_consistent(lp->pci_dev,
493
- sizeof(struct pcnet32_tx_head) * entries,
494
- &new_ring_dma_addr);
488
+ dma_alloc_coherent(&lp->pci_dev->dev,
489
+ sizeof(struct pcnet32_tx_head) * entries,
490
+ &new_ring_dma_addr, GFP_ATOMIC);
495491 if (new_tx_ring == NULL)
496492 return;
497493
....@@ -505,9 +501,9 @@
505501
506502 kfree(lp->tx_skbuff);
507503 kfree(lp->tx_dma_addr);
508
- pci_free_consistent(lp->pci_dev,
509
- sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
510
- lp->tx_ring, lp->tx_ring_dma_addr);
504
+ dma_free_coherent(&lp->pci_dev->dev,
505
+ sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
506
+ lp->tx_ring, lp->tx_ring_dma_addr);
511507
512508 lp->tx_ring_size = entries;
513509 lp->tx_mod_mask = lp->tx_ring_size - 1;
....@@ -521,10 +517,9 @@
521517 free_new_lists:
522518 kfree(new_dma_addr_list);
523519 free_new_tx_ring:
524
- pci_free_consistent(lp->pci_dev,
525
- sizeof(struct pcnet32_tx_head) * entries,
526
- new_tx_ring,
527
- new_ring_dma_addr);
520
+ dma_free_coherent(&lp->pci_dev->dev,
521
+ sizeof(struct pcnet32_tx_head) * entries,
522
+ new_tx_ring, new_ring_dma_addr);
528523 }
529524
530525 /*
....@@ -549,9 +544,9 @@
549544 unsigned int entries = BIT(size);
550545
551546 new_rx_ring =
552
- pci_zalloc_consistent(lp->pci_dev,
553
- sizeof(struct pcnet32_rx_head) * entries,
554
- &new_ring_dma_addr);
547
+ dma_alloc_coherent(&lp->pci_dev->dev,
548
+ sizeof(struct pcnet32_rx_head) * entries,
549
+ &new_ring_dma_addr, GFP_ATOMIC);
555550 if (new_rx_ring == NULL)
556551 return;
557552
....@@ -584,10 +579,9 @@
584579 skb_reserve(rx_skbuff, NET_IP_ALIGN);
585580
586581 new_dma_addr_list[new] =
587
- pci_map_single(lp->pci_dev, rx_skbuff->data,
588
- PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
589
- if (pci_dma_mapping_error(lp->pci_dev,
590
- new_dma_addr_list[new])) {
582
+ dma_map_single(&lp->pci_dev->dev, rx_skbuff->data,
583
+ PKT_BUF_SIZE, DMA_FROM_DEVICE);
584
+ if (dma_mapping_error(&lp->pci_dev->dev, new_dma_addr_list[new])) {
591585 netif_err(lp, drv, dev, "%s dma mapping failed\n",
592586 __func__);
593587 dev_kfree_skb(new_skb_list[new]);
....@@ -600,22 +594,20 @@
600594 /* and free any unneeded buffers */
601595 for (; new < lp->rx_ring_size; new++) {
602596 if (lp->rx_skbuff[new]) {
603
- if (!pci_dma_mapping_error(lp->pci_dev,
604
- lp->rx_dma_addr[new]))
605
- pci_unmap_single(lp->pci_dev,
597
+ if (!dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[new]))
598
+ dma_unmap_single(&lp->pci_dev->dev,
606599 lp->rx_dma_addr[new],
607600 PKT_BUF_SIZE,
608
- PCI_DMA_FROMDEVICE);
601
+ DMA_FROM_DEVICE);
609602 dev_kfree_skb(lp->rx_skbuff[new]);
610603 }
611604 }
612605
613606 kfree(lp->rx_skbuff);
614607 kfree(lp->rx_dma_addr);
615
- pci_free_consistent(lp->pci_dev,
616
- sizeof(struct pcnet32_rx_head) *
617
- lp->rx_ring_size, lp->rx_ring,
618
- lp->rx_ring_dma_addr);
608
+ dma_free_coherent(&lp->pci_dev->dev,
609
+ sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
610
+ lp->rx_ring, lp->rx_ring_dma_addr);
619611
620612 lp->rx_ring_size = entries;
621613 lp->rx_mod_mask = lp->rx_ring_size - 1;
....@@ -629,12 +621,11 @@
629621 free_all_new:
630622 while (--new >= lp->rx_ring_size) {
631623 if (new_skb_list[new]) {
632
- if (!pci_dma_mapping_error(lp->pci_dev,
633
- new_dma_addr_list[new]))
634
- pci_unmap_single(lp->pci_dev,
624
+ if (!dma_mapping_error(&lp->pci_dev->dev, new_dma_addr_list[new]))
625
+ dma_unmap_single(&lp->pci_dev->dev,
635626 new_dma_addr_list[new],
636627 PKT_BUF_SIZE,
637
- PCI_DMA_FROMDEVICE);
628
+ DMA_FROM_DEVICE);
638629 dev_kfree_skb(new_skb_list[new]);
639630 }
640631 }
....@@ -642,10 +633,9 @@
642633 free_new_lists:
643634 kfree(new_dma_addr_list);
644635 free_new_rx_ring:
645
- pci_free_consistent(lp->pci_dev,
646
- sizeof(struct pcnet32_rx_head) * entries,
647
- new_rx_ring,
648
- new_ring_dma_addr);
636
+ dma_free_coherent(&lp->pci_dev->dev,
637
+ sizeof(struct pcnet32_rx_head) * entries,
638
+ new_rx_ring, new_ring_dma_addr);
649639 }
650640
651641 static void pcnet32_purge_rx_ring(struct net_device *dev)
....@@ -658,12 +648,11 @@
658648 lp->rx_ring[i].status = 0; /* CPU owns buffer */
659649 wmb(); /* Make sure adapter sees owner change */
660650 if (lp->rx_skbuff[i]) {
661
- if (!pci_dma_mapping_error(lp->pci_dev,
662
- lp->rx_dma_addr[i]))
663
- pci_unmap_single(lp->pci_dev,
651
+ if (!dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[i]))
652
+ dma_unmap_single(&lp->pci_dev->dev,
664653 lp->rx_dma_addr[i],
665654 PKT_BUF_SIZE,
666
- PCI_DMA_FROMDEVICE);
655
+ DMA_FROM_DEVICE);
667656 dev_kfree_skb_any(lp->rx_skbuff[i]);
668657 }
669658 lp->rx_skbuff[i] = NULL;
....@@ -809,7 +798,6 @@
809798 struct pcnet32_private *lp = netdev_priv(dev);
810799
811800 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
812
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
813801 if (lp->pci_dev)
814802 strlcpy(info->bus_info, pci_name(lp->pci_dev),
815803 sizeof(info->bus_info));
....@@ -1041,9 +1029,9 @@
10411029 *packet++ = i;
10421030
10431031 lp->tx_dma_addr[x] =
1044
- pci_map_single(lp->pci_dev, skb->data, skb->len,
1045
- PCI_DMA_TODEVICE);
1046
- if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[x])) {
1032
+ dma_map_single(&lp->pci_dev->dev, skb->data, skb->len,
1033
+ DMA_TO_DEVICE);
1034
+ if (dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[x])) {
10471035 netif_printk(lp, hw, KERN_DEBUG, dev,
10481036 "DMA mapping error at line: %d!\n",
10491037 __LINE__);
....@@ -1231,21 +1219,21 @@
12311219 */
12321220 if (newskb) {
12331221 skb_reserve(newskb, NET_IP_ALIGN);
1234
- new_dma_addr = pci_map_single(lp->pci_dev,
1222
+ new_dma_addr = dma_map_single(&lp->pci_dev->dev,
12351223 newskb->data,
12361224 PKT_BUF_SIZE,
1237
- PCI_DMA_FROMDEVICE);
1238
- if (pci_dma_mapping_error(lp->pci_dev, new_dma_addr)) {
1225
+ DMA_FROM_DEVICE);
1226
+ if (dma_mapping_error(&lp->pci_dev->dev, new_dma_addr)) {
12391227 netif_err(lp, rx_err, dev,
12401228 "DMA mapping error.\n");
12411229 dev_kfree_skb(newskb);
12421230 skb = NULL;
12431231 } else {
12441232 skb = lp->rx_skbuff[entry];
1245
- pci_unmap_single(lp->pci_dev,
1233
+ dma_unmap_single(&lp->pci_dev->dev,
12461234 lp->rx_dma_addr[entry],
12471235 PKT_BUF_SIZE,
1248
- PCI_DMA_FROMDEVICE);
1236
+ DMA_FROM_DEVICE);
12491237 skb_put(skb, pkt_len);
12501238 lp->rx_skbuff[entry] = newskb;
12511239 lp->rx_dma_addr[entry] = new_dma_addr;
....@@ -1264,17 +1252,15 @@
12641252 if (!rx_in_place) {
12651253 skb_reserve(skb, NET_IP_ALIGN);
12661254 skb_put(skb, pkt_len); /* Make room */
1267
- pci_dma_sync_single_for_cpu(lp->pci_dev,
1268
- lp->rx_dma_addr[entry],
1269
- pkt_len,
1270
- PCI_DMA_FROMDEVICE);
1255
+ dma_sync_single_for_cpu(&lp->pci_dev->dev,
1256
+ lp->rx_dma_addr[entry], pkt_len,
1257
+ DMA_FROM_DEVICE);
12711258 skb_copy_to_linear_data(skb,
12721259 (unsigned char *)(lp->rx_skbuff[entry]->data),
12731260 pkt_len);
1274
- pci_dma_sync_single_for_device(lp->pci_dev,
1275
- lp->rx_dma_addr[entry],
1276
- pkt_len,
1277
- PCI_DMA_FROMDEVICE);
1261
+ dma_sync_single_for_device(&lp->pci_dev->dev,
1262
+ lp->rx_dma_addr[entry], pkt_len,
1263
+ DMA_FROM_DEVICE);
12781264 }
12791265 dev->stats.rx_bytes += skb->len;
12801266 skb->protocol = eth_type_trans(skb, dev);
....@@ -1363,10 +1349,10 @@
13631349
13641350 /* We must free the original skb */
13651351 if (lp->tx_skbuff[entry]) {
1366
- pci_unmap_single(lp->pci_dev,
1352
+ dma_unmap_single(&lp->pci_dev->dev,
13671353 lp->tx_dma_addr[entry],
1368
- lp->tx_skbuff[entry]->
1369
- len, PCI_DMA_TODEVICE);
1354
+ lp->tx_skbuff[entry]->len,
1355
+ DMA_TO_DEVICE);
13701356 dev_kfree_skb_any(lp->tx_skbuff[entry]);
13711357 lp->tx_skbuff[entry] = NULL;
13721358 lp->tx_dma_addr[entry] = 0;
....@@ -1555,7 +1541,7 @@
15551541 goto err_disable_dev;
15561542 }
15571543
1558
- err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK);
1544
+ err = dma_set_mask(&pdev->dev, PCNET32_DMA_MASK);
15591545 if (err) {
15601546 if (pcnet32_debug & NETIF_MSG_PROBE)
15611547 pr_err("architecture does not support 32bit PCI busmaster DMA\n");
....@@ -1840,12 +1826,13 @@
18401826
18411827 dev->base_addr = ioaddr;
18421828 lp = netdev_priv(dev);
1843
- /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */
1844
- lp->init_block = pci_alloc_consistent(pdev, sizeof(*lp->init_block),
1845
- &lp->init_dma_addr);
1829
+ /* dma_alloc_coherent returns page-aligned memory, so we do not have to check the alignment */
1830
+ lp->init_block = dma_alloc_coherent(&pdev->dev,
1831
+ sizeof(*lp->init_block),
1832
+ &lp->init_dma_addr, GFP_KERNEL);
18461833 if (!lp->init_block) {
18471834 if (pcnet32_debug & NETIF_MSG_PROBE)
1848
- pr_err("Consistent memory allocation failed\n");
1835
+ pr_err("Coherent memory allocation failed\n");
18491836 ret = -ENOMEM;
18501837 goto err_free_netdev;
18511838 }
....@@ -2004,8 +1991,8 @@
20041991
20051992 err_free_ring:
20061993 pcnet32_free_ring(dev);
2007
- pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
2008
- lp->init_block, lp->init_dma_addr);
1994
+ dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block),
1995
+ lp->init_block, lp->init_dma_addr);
20091996 err_free_netdev:
20101997 free_netdev(dev);
20111998 err_release_region:
....@@ -2018,21 +2005,19 @@
20182005 {
20192006 struct pcnet32_private *lp = netdev_priv(dev);
20202007
2021
- lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
2022
- sizeof(struct pcnet32_tx_head) *
2023
- lp->tx_ring_size,
2024
- &lp->tx_ring_dma_addr);
2008
+ lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
2009
+ sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
2010
+ &lp->tx_ring_dma_addr, GFP_KERNEL);
20252011 if (lp->tx_ring == NULL) {
2026
- netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
2012
+ netif_err(lp, drv, dev, "Coherent memory allocation failed\n");
20272013 return -ENOMEM;
20282014 }
20292015
2030
- lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
2031
- sizeof(struct pcnet32_rx_head) *
2032
- lp->rx_ring_size,
2033
- &lp->rx_ring_dma_addr);
2016
+ lp->rx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
2017
+ sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
2018
+ &lp->rx_ring_dma_addr, GFP_KERNEL);
20342019 if (lp->rx_ring == NULL) {
2035
- netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
2020
+ netif_err(lp, drv, dev, "Coherent memory allocation failed\n");
20362021 return -ENOMEM;
20372022 }
20382023
....@@ -2076,18 +2061,16 @@
20762061 lp->rx_dma_addr = NULL;
20772062
20782063 if (lp->tx_ring) {
2079
- pci_free_consistent(lp->pci_dev,
2080
- sizeof(struct pcnet32_tx_head) *
2081
- lp->tx_ring_size, lp->tx_ring,
2082
- lp->tx_ring_dma_addr);
2064
+ dma_free_coherent(&lp->pci_dev->dev,
2065
+ sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
2066
+ lp->tx_ring, lp->tx_ring_dma_addr);
20832067 lp->tx_ring = NULL;
20842068 }
20852069
20862070 if (lp->rx_ring) {
2087
- pci_free_consistent(lp->pci_dev,
2088
- sizeof(struct pcnet32_rx_head) *
2089
- lp->rx_ring_size, lp->rx_ring,
2090
- lp->rx_ring_dma_addr);
2071
+ dma_free_coherent(&lp->pci_dev->dev,
2072
+ sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
2073
+ lp->rx_ring, lp->rx_ring_dma_addr);
20912074 lp->rx_ring = NULL;
20922075 }
20932076 }
....@@ -2348,12 +2331,11 @@
23482331 lp->tx_ring[i].status = 0; /* CPU owns buffer */
23492332 wmb(); /* Make sure adapter sees owner change */
23502333 if (lp->tx_skbuff[i]) {
2351
- if (!pci_dma_mapping_error(lp->pci_dev,
2352
- lp->tx_dma_addr[i]))
2353
- pci_unmap_single(lp->pci_dev,
2334
+ if (!dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[i]))
2335
+ dma_unmap_single(&lp->pci_dev->dev,
23542336 lp->tx_dma_addr[i],
23552337 lp->tx_skbuff[i]->len,
2356
- PCI_DMA_TODEVICE);
2338
+ DMA_TO_DEVICE);
23572339 dev_kfree_skb_any(lp->tx_skbuff[i]);
23582340 }
23592341 lp->tx_skbuff[i] = NULL;
....@@ -2388,10 +2370,9 @@
23882370 rmb();
23892371 if (lp->rx_dma_addr[i] == 0) {
23902372 lp->rx_dma_addr[i] =
2391
- pci_map_single(lp->pci_dev, rx_skbuff->data,
2392
- PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
2393
- if (pci_dma_mapping_error(lp->pci_dev,
2394
- lp->rx_dma_addr[i])) {
2373
+ dma_map_single(&lp->pci_dev->dev, rx_skbuff->data,
2374
+ PKT_BUF_SIZE, DMA_FROM_DEVICE);
2375
+ if (dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[i])) {
23952376 /* there is not much we can do at this point */
23962377 netif_err(lp, drv, dev,
23972378 "%s pci dma mapping error\n",
....@@ -2456,7 +2437,7 @@
24562437 lp->a->write_csr(ioaddr, CSR0, csr0_bits);
24572438 }
24582439
2459
-static void pcnet32_tx_timeout(struct net_device *dev)
2440
+static void pcnet32_tx_timeout(struct net_device *dev, unsigned int txqueue)
24602441 {
24612442 struct pcnet32_private *lp = netdev_priv(dev);
24622443 unsigned long ioaddr = dev->base_addr, flags;
....@@ -2529,8 +2510,9 @@
25292510 lp->tx_ring[entry].misc = 0x00000000;
25302511
25312512 lp->tx_dma_addr[entry] =
2532
- pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2533
- if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[entry])) {
2513
+ dma_map_single(&lp->pci_dev->dev, skb->data, skb->len,
2514
+ DMA_TO_DEVICE);
2515
+ if (dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[entry])) {
25342516 dev_kfree_skb_any(skb);
25352517 dev->stats.tx_dropped++;
25362518 goto drop_packet;
....@@ -2919,30 +2901,27 @@
29192901 mod_timer(&lp->watchdog_timer, round_jiffies(PCNET32_WATCHDOG_TIMEOUT));
29202902 }
29212903
2922
-static int pcnet32_pm_suspend(struct pci_dev *pdev, pm_message_t state)
2904
+static int __maybe_unused pcnet32_pm_suspend(struct device *device_d)
29232905 {
2924
- struct net_device *dev = pci_get_drvdata(pdev);
2906
+ struct net_device *dev = dev_get_drvdata(device_d);
29252907
29262908 if (netif_running(dev)) {
29272909 netif_device_detach(dev);
29282910 pcnet32_close(dev);
29292911 }
2930
- pci_save_state(pdev);
2931
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
2912
+
29322913 return 0;
29332914 }
29342915
2935
-static int pcnet32_pm_resume(struct pci_dev *pdev)
2916
+static int __maybe_unused pcnet32_pm_resume(struct device *device_d)
29362917 {
2937
- struct net_device *dev = pci_get_drvdata(pdev);
2938
-
2939
- pci_set_power_state(pdev, PCI_D0);
2940
- pci_restore_state(pdev);
2918
+ struct net_device *dev = dev_get_drvdata(device_d);
29412919
29422920 if (netif_running(dev)) {
29432921 pcnet32_open(dev);
29442922 netif_device_attach(dev);
29452923 }
2924
+
29462925 return 0;
29472926 }
29482927
....@@ -2956,20 +2935,23 @@
29562935 unregister_netdev(dev);
29572936 pcnet32_free_ring(dev);
29582937 release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
2959
- pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
2960
- lp->init_block, lp->init_dma_addr);
2938
+ dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block),
2939
+ lp->init_block, lp->init_dma_addr);
29612940 free_netdev(dev);
29622941 pci_disable_device(pdev);
29632942 }
29642943 }
2944
+
2945
+static SIMPLE_DEV_PM_OPS(pcnet32_pm_ops, pcnet32_pm_suspend, pcnet32_pm_resume);
29652946
29662947 static struct pci_driver pcnet32_driver = {
29672948 .name = DRV_NAME,
29682949 .probe = pcnet32_probe_pci,
29692950 .remove = pcnet32_remove_one,
29702951 .id_table = pcnet32_pci_tbl,
2971
- .suspend = pcnet32_pm_suspend,
2972
- .resume = pcnet32_pm_resume,
2952
+ .driver = {
2953
+ .pm = &pcnet32_pm_ops,
2954
+ },
29732955 };
29742956
29752957 /* An additional parameter that may be passed in... */
....@@ -3007,8 +2989,6 @@
30072989
30082990 static int __init pcnet32_init_module(void)
30092991 {
3010
- pr_info("%s", version);
3011
-
30122992 pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT);
30132993
30142994 if ((tx_start_pt >= 0) && (tx_start_pt <= 3))
....@@ -3038,8 +3018,8 @@
30383018 unregister_netdev(pcnet32_dev);
30393019 pcnet32_free_ring(pcnet32_dev);
30403020 release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
3041
- pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
3042
- lp->init_block, lp->init_dma_addr);
3021
+ dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block),
3022
+ lp->init_block, lp->init_dma_addr);
30433023 free_netdev(pcnet32_dev);
30443024 pcnet32_dev = next_dev;
30453025 }