.. | .. |
---|
24 | 24 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
---|
25 | 25 | |
---|
26 | 26 | #define DRV_NAME "pcnet32" |
---|
27 | | -#define DRV_VERSION "1.35" |
---|
28 | 27 | #define DRV_RELDATE "21.Apr.2008" |
---|
29 | 28 | #define PFX DRV_NAME ": " |
---|
30 | | - |
---|
31 | | -static const char *const version = |
---|
32 | | - DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n"; |
---|
33 | 29 | |
---|
34 | 30 | #include <linux/module.h> |
---|
35 | 31 | #include <linux/kernel.h> |
---|
.. | .. |
---|
254 | 250 | |
---|
255 | 251 | /* |
---|
256 | 252 | * The first field of pcnet32_private is read by the ethernet device |
---|
257 | | - * so the structure should be allocated using pci_alloc_consistent(). |
---|
| 253 | + * so the structure should be allocated using dma_alloc_coherent(). |
---|
258 | 254 | */ |
---|
259 | 255 | struct pcnet32_private { |
---|
260 | 256 | struct pcnet32_init_block *init_block; |
---|
.. | .. |
---|
262 | 258 | struct pcnet32_rx_head *rx_ring; |
---|
263 | 259 | struct pcnet32_tx_head *tx_ring; |
---|
264 | 260 | dma_addr_t init_dma_addr;/* DMA address of beginning of the init block, |
---|
265 | | - returned by pci_alloc_consistent */ |
---|
| 261 | + returned by dma_alloc_coherent */ |
---|
266 | 262 | struct pci_dev *pci_dev; |
---|
267 | 263 | const char *name; |
---|
268 | 264 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ |
---|
.. | .. |
---|
314 | 310 | static int pcnet32_init_ring(struct net_device *); |
---|
315 | 311 | static netdev_tx_t pcnet32_start_xmit(struct sk_buff *, |
---|
316 | 312 | struct net_device *); |
---|
317 | | -static void pcnet32_tx_timeout(struct net_device *dev); |
---|
| 313 | +static void pcnet32_tx_timeout(struct net_device *dev, unsigned int txqueue); |
---|
318 | 314 | static irqreturn_t pcnet32_interrupt(int, void *); |
---|
319 | 315 | static int pcnet32_close(struct net_device *); |
---|
320 | 316 | static struct net_device_stats *pcnet32_get_stats(struct net_device *); |
---|
.. | .. |
---|
489 | 485 | pcnet32_purge_tx_ring(dev); |
---|
490 | 486 | |
---|
491 | 487 | new_tx_ring = |
---|
492 | | - pci_zalloc_consistent(lp->pci_dev, |
---|
493 | | - sizeof(struct pcnet32_tx_head) * entries, |
---|
494 | | - &new_ring_dma_addr); |
---|
| 488 | + dma_alloc_coherent(&lp->pci_dev->dev, |
---|
| 489 | + sizeof(struct pcnet32_tx_head) * entries, |
---|
| 490 | + &new_ring_dma_addr, GFP_ATOMIC); |
---|
495 | 491 | if (new_tx_ring == NULL) |
---|
496 | 492 | return; |
---|
497 | 493 | |
---|
.. | .. |
---|
505 | 501 | |
---|
506 | 502 | kfree(lp->tx_skbuff); |
---|
507 | 503 | kfree(lp->tx_dma_addr); |
---|
508 | | - pci_free_consistent(lp->pci_dev, |
---|
509 | | - sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, |
---|
510 | | - lp->tx_ring, lp->tx_ring_dma_addr); |
---|
| 504 | + dma_free_coherent(&lp->pci_dev->dev, |
---|
| 505 | + sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, |
---|
| 506 | + lp->tx_ring, lp->tx_ring_dma_addr); |
---|
511 | 507 | |
---|
512 | 508 | lp->tx_ring_size = entries; |
---|
513 | 509 | lp->tx_mod_mask = lp->tx_ring_size - 1; |
---|
.. | .. |
---|
521 | 517 | free_new_lists: |
---|
522 | 518 | kfree(new_dma_addr_list); |
---|
523 | 519 | free_new_tx_ring: |
---|
524 | | - pci_free_consistent(lp->pci_dev, |
---|
525 | | - sizeof(struct pcnet32_tx_head) * entries, |
---|
526 | | - new_tx_ring, |
---|
527 | | - new_ring_dma_addr); |
---|
| 520 | + dma_free_coherent(&lp->pci_dev->dev, |
---|
| 521 | + sizeof(struct pcnet32_tx_head) * entries, |
---|
| 522 | + new_tx_ring, new_ring_dma_addr); |
---|
528 | 523 | } |
---|
529 | 524 | |
---|
530 | 525 | /* |
---|
.. | .. |
---|
549 | 544 | unsigned int entries = BIT(size); |
---|
550 | 545 | |
---|
551 | 546 | new_rx_ring = |
---|
552 | | - pci_zalloc_consistent(lp->pci_dev, |
---|
553 | | - sizeof(struct pcnet32_rx_head) * entries, |
---|
554 | | - &new_ring_dma_addr); |
---|
| 547 | + dma_alloc_coherent(&lp->pci_dev->dev, |
---|
| 548 | + sizeof(struct pcnet32_rx_head) * entries, |
---|
| 549 | + &new_ring_dma_addr, GFP_ATOMIC); |
---|
555 | 550 | if (new_rx_ring == NULL) |
---|
556 | 551 | return; |
---|
557 | 552 | |
---|
.. | .. |
---|
584 | 579 | skb_reserve(rx_skbuff, NET_IP_ALIGN); |
---|
585 | 580 | |
---|
586 | 581 | new_dma_addr_list[new] = |
---|
587 | | - pci_map_single(lp->pci_dev, rx_skbuff->data, |
---|
588 | | - PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); |
---|
589 | | - if (pci_dma_mapping_error(lp->pci_dev, |
---|
590 | | - new_dma_addr_list[new])) { |
---|
| 582 | + dma_map_single(&lp->pci_dev->dev, rx_skbuff->data, |
---|
| 583 | + PKT_BUF_SIZE, DMA_FROM_DEVICE); |
---|
| 584 | + if (dma_mapping_error(&lp->pci_dev->dev, new_dma_addr_list[new])) { |
---|
591 | 585 | netif_err(lp, drv, dev, "%s dma mapping failed\n", |
---|
592 | 586 | __func__); |
---|
593 | 587 | dev_kfree_skb(new_skb_list[new]); |
---|
.. | .. |
---|
600 | 594 | /* and free any unneeded buffers */ |
---|
601 | 595 | for (; new < lp->rx_ring_size; new++) { |
---|
602 | 596 | if (lp->rx_skbuff[new]) { |
---|
603 | | - if (!pci_dma_mapping_error(lp->pci_dev, |
---|
604 | | - lp->rx_dma_addr[new])) |
---|
605 | | - pci_unmap_single(lp->pci_dev, |
---|
| 597 | + if (!dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[new])) |
---|
| 598 | + dma_unmap_single(&lp->pci_dev->dev, |
---|
606 | 599 | lp->rx_dma_addr[new], |
---|
607 | 600 | PKT_BUF_SIZE, |
---|
608 | | - PCI_DMA_FROMDEVICE); |
---|
| 601 | + DMA_FROM_DEVICE); |
---|
609 | 602 | dev_kfree_skb(lp->rx_skbuff[new]); |
---|
610 | 603 | } |
---|
611 | 604 | } |
---|
612 | 605 | |
---|
613 | 606 | kfree(lp->rx_skbuff); |
---|
614 | 607 | kfree(lp->rx_dma_addr); |
---|
615 | | - pci_free_consistent(lp->pci_dev, |
---|
616 | | - sizeof(struct pcnet32_rx_head) * |
---|
617 | | - lp->rx_ring_size, lp->rx_ring, |
---|
618 | | - lp->rx_ring_dma_addr); |
---|
| 608 | + dma_free_coherent(&lp->pci_dev->dev, |
---|
| 609 | + sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, |
---|
| 610 | + lp->rx_ring, lp->rx_ring_dma_addr); |
---|
619 | 611 | |
---|
620 | 612 | lp->rx_ring_size = entries; |
---|
621 | 613 | lp->rx_mod_mask = lp->rx_ring_size - 1; |
---|
.. | .. |
---|
629 | 621 | free_all_new: |
---|
630 | 622 | while (--new >= lp->rx_ring_size) { |
---|
631 | 623 | if (new_skb_list[new]) { |
---|
632 | | - if (!pci_dma_mapping_error(lp->pci_dev, |
---|
633 | | - new_dma_addr_list[new])) |
---|
634 | | - pci_unmap_single(lp->pci_dev, |
---|
| 624 | + if (!dma_mapping_error(&lp->pci_dev->dev, new_dma_addr_list[new])) |
---|
| 625 | + dma_unmap_single(&lp->pci_dev->dev, |
---|
635 | 626 | new_dma_addr_list[new], |
---|
636 | 627 | PKT_BUF_SIZE, |
---|
637 | | - PCI_DMA_FROMDEVICE); |
---|
| 628 | + DMA_FROM_DEVICE); |
---|
638 | 629 | dev_kfree_skb(new_skb_list[new]); |
---|
639 | 630 | } |
---|
640 | 631 | } |
---|
.. | .. |
---|
642 | 633 | free_new_lists: |
---|
643 | 634 | kfree(new_dma_addr_list); |
---|
644 | 635 | free_new_rx_ring: |
---|
645 | | - pci_free_consistent(lp->pci_dev, |
---|
646 | | - sizeof(struct pcnet32_rx_head) * entries, |
---|
647 | | - new_rx_ring, |
---|
648 | | - new_ring_dma_addr); |
---|
| 636 | + dma_free_coherent(&lp->pci_dev->dev, |
---|
| 637 | + sizeof(struct pcnet32_rx_head) * entries, |
---|
| 638 | + new_rx_ring, new_ring_dma_addr); |
---|
649 | 639 | } |
---|
650 | 640 | |
---|
651 | 641 | static void pcnet32_purge_rx_ring(struct net_device *dev) |
---|
.. | .. |
---|
658 | 648 | lp->rx_ring[i].status = 0; /* CPU owns buffer */ |
---|
659 | 649 | wmb(); /* Make sure adapter sees owner change */ |
---|
660 | 650 | if (lp->rx_skbuff[i]) { |
---|
661 | | - if (!pci_dma_mapping_error(lp->pci_dev, |
---|
662 | | - lp->rx_dma_addr[i])) |
---|
663 | | - pci_unmap_single(lp->pci_dev, |
---|
| 651 | + if (!dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[i])) |
---|
| 652 | + dma_unmap_single(&lp->pci_dev->dev, |
---|
664 | 653 | lp->rx_dma_addr[i], |
---|
665 | 654 | PKT_BUF_SIZE, |
---|
666 | | - PCI_DMA_FROMDEVICE); |
---|
| 655 | + DMA_FROM_DEVICE); |
---|
667 | 656 | dev_kfree_skb_any(lp->rx_skbuff[i]); |
---|
668 | 657 | } |
---|
669 | 658 | lp->rx_skbuff[i] = NULL; |
---|
.. | .. |
---|
809 | 798 | struct pcnet32_private *lp = netdev_priv(dev); |
---|
810 | 799 | |
---|
811 | 800 | strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); |
---|
812 | | - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); |
---|
813 | 801 | if (lp->pci_dev) |
---|
814 | 802 | strlcpy(info->bus_info, pci_name(lp->pci_dev), |
---|
815 | 803 | sizeof(info->bus_info)); |
---|
.. | .. |
---|
1041 | 1029 | *packet++ = i; |
---|
1042 | 1030 | |
---|
1043 | 1031 | lp->tx_dma_addr[x] = |
---|
1044 | | - pci_map_single(lp->pci_dev, skb->data, skb->len, |
---|
1045 | | - PCI_DMA_TODEVICE); |
---|
1046 | | - if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[x])) { |
---|
| 1032 | + dma_map_single(&lp->pci_dev->dev, skb->data, skb->len, |
---|
| 1033 | + DMA_TO_DEVICE); |
---|
| 1034 | + if (dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[x])) { |
---|
1047 | 1035 | netif_printk(lp, hw, KERN_DEBUG, dev, |
---|
1048 | 1036 | "DMA mapping error at line: %d!\n", |
---|
1049 | 1037 | __LINE__); |
---|
.. | .. |
---|
1231 | 1219 | */ |
---|
1232 | 1220 | if (newskb) { |
---|
1233 | 1221 | skb_reserve(newskb, NET_IP_ALIGN); |
---|
1234 | | - new_dma_addr = pci_map_single(lp->pci_dev, |
---|
| 1222 | + new_dma_addr = dma_map_single(&lp->pci_dev->dev, |
---|
1235 | 1223 | newskb->data, |
---|
1236 | 1224 | PKT_BUF_SIZE, |
---|
1237 | | - PCI_DMA_FROMDEVICE); |
---|
1238 | | - if (pci_dma_mapping_error(lp->pci_dev, new_dma_addr)) { |
---|
| 1225 | + DMA_FROM_DEVICE); |
---|
| 1226 | + if (dma_mapping_error(&lp->pci_dev->dev, new_dma_addr)) { |
---|
1239 | 1227 | netif_err(lp, rx_err, dev, |
---|
1240 | 1228 | "DMA mapping error.\n"); |
---|
1241 | 1229 | dev_kfree_skb(newskb); |
---|
1242 | 1230 | skb = NULL; |
---|
1243 | 1231 | } else { |
---|
1244 | 1232 | skb = lp->rx_skbuff[entry]; |
---|
1245 | | - pci_unmap_single(lp->pci_dev, |
---|
| 1233 | + dma_unmap_single(&lp->pci_dev->dev, |
---|
1246 | 1234 | lp->rx_dma_addr[entry], |
---|
1247 | 1235 | PKT_BUF_SIZE, |
---|
1248 | | - PCI_DMA_FROMDEVICE); |
---|
| 1236 | + DMA_FROM_DEVICE); |
---|
1249 | 1237 | skb_put(skb, pkt_len); |
---|
1250 | 1238 | lp->rx_skbuff[entry] = newskb; |
---|
1251 | 1239 | lp->rx_dma_addr[entry] = new_dma_addr; |
---|
.. | .. |
---|
1264 | 1252 | if (!rx_in_place) { |
---|
1265 | 1253 | skb_reserve(skb, NET_IP_ALIGN); |
---|
1266 | 1254 | skb_put(skb, pkt_len); /* Make room */ |
---|
1267 | | - pci_dma_sync_single_for_cpu(lp->pci_dev, |
---|
1268 | | - lp->rx_dma_addr[entry], |
---|
1269 | | - pkt_len, |
---|
1270 | | - PCI_DMA_FROMDEVICE); |
---|
| 1255 | + dma_sync_single_for_cpu(&lp->pci_dev->dev, |
---|
| 1256 | + lp->rx_dma_addr[entry], pkt_len, |
---|
| 1257 | + DMA_FROM_DEVICE); |
---|
1271 | 1258 | skb_copy_to_linear_data(skb, |
---|
1272 | 1259 | (unsigned char *)(lp->rx_skbuff[entry]->data), |
---|
1273 | 1260 | pkt_len); |
---|
1274 | | - pci_dma_sync_single_for_device(lp->pci_dev, |
---|
1275 | | - lp->rx_dma_addr[entry], |
---|
1276 | | - pkt_len, |
---|
1277 | | - PCI_DMA_FROMDEVICE); |
---|
| 1261 | + dma_sync_single_for_device(&lp->pci_dev->dev, |
---|
| 1262 | + lp->rx_dma_addr[entry], pkt_len, |
---|
| 1263 | + DMA_FROM_DEVICE); |
---|
1278 | 1264 | } |
---|
1279 | 1265 | dev->stats.rx_bytes += skb->len; |
---|
1280 | 1266 | skb->protocol = eth_type_trans(skb, dev); |
---|
.. | .. |
---|
1363 | 1349 | |
---|
1364 | 1350 | /* We must free the original skb */ |
---|
1365 | 1351 | if (lp->tx_skbuff[entry]) { |
---|
1366 | | - pci_unmap_single(lp->pci_dev, |
---|
| 1352 | + dma_unmap_single(&lp->pci_dev->dev, |
---|
1367 | 1353 | lp->tx_dma_addr[entry], |
---|
1368 | | - lp->tx_skbuff[entry]-> |
---|
1369 | | - len, PCI_DMA_TODEVICE); |
---|
| 1354 | + lp->tx_skbuff[entry]->len, |
---|
| 1355 | + DMA_TO_DEVICE); |
---|
1370 | 1356 | dev_kfree_skb_any(lp->tx_skbuff[entry]); |
---|
1371 | 1357 | lp->tx_skbuff[entry] = NULL; |
---|
1372 | 1358 | lp->tx_dma_addr[entry] = 0; |
---|
.. | .. |
---|
1555 | 1541 | goto err_disable_dev; |
---|
1556 | 1542 | } |
---|
1557 | 1543 | |
---|
1558 | | - err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK); |
---|
| 1544 | + err = dma_set_mask(&pdev->dev, PCNET32_DMA_MASK); |
---|
1559 | 1545 | if (err) { |
---|
1560 | 1546 | if (pcnet32_debug & NETIF_MSG_PROBE) |
---|
1561 | 1547 | pr_err("architecture does not support 32bit PCI busmaster DMA\n"); |
---|
.. | .. |
---|
1840 | 1826 | |
---|
1841 | 1827 | dev->base_addr = ioaddr; |
---|
1842 | 1828 | lp = netdev_priv(dev); |
---|
1843 | | - /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */ |
---|
1844 | | - lp->init_block = pci_alloc_consistent(pdev, sizeof(*lp->init_block), |
---|
1845 | | - &lp->init_dma_addr); |
---|
| 1829 | + /* dma_alloc_coherent returns page-aligned memory, so we do not have to check the alignment */ |
---|
| 1830 | + lp->init_block = dma_alloc_coherent(&pdev->dev, |
---|
| 1831 | + sizeof(*lp->init_block), |
---|
| 1832 | + &lp->init_dma_addr, GFP_KERNEL); |
---|
1846 | 1833 | if (!lp->init_block) { |
---|
1847 | 1834 | if (pcnet32_debug & NETIF_MSG_PROBE) |
---|
1848 | | - pr_err("Consistent memory allocation failed\n"); |
---|
| 1835 | + pr_err("Coherent memory allocation failed\n"); |
---|
1849 | 1836 | ret = -ENOMEM; |
---|
1850 | 1837 | goto err_free_netdev; |
---|
1851 | 1838 | } |
---|
.. | .. |
---|
2004 | 1991 | |
---|
2005 | 1992 | err_free_ring: |
---|
2006 | 1993 | pcnet32_free_ring(dev); |
---|
2007 | | - pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), |
---|
2008 | | - lp->init_block, lp->init_dma_addr); |
---|
| 1994 | + dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block), |
---|
| 1995 | + lp->init_block, lp->init_dma_addr); |
---|
2009 | 1996 | err_free_netdev: |
---|
2010 | 1997 | free_netdev(dev); |
---|
2011 | 1998 | err_release_region: |
---|
.. | .. |
---|
2018 | 2005 | { |
---|
2019 | 2006 | struct pcnet32_private *lp = netdev_priv(dev); |
---|
2020 | 2007 | |
---|
2021 | | - lp->tx_ring = pci_alloc_consistent(lp->pci_dev, |
---|
2022 | | - sizeof(struct pcnet32_tx_head) * |
---|
2023 | | - lp->tx_ring_size, |
---|
2024 | | - &lp->tx_ring_dma_addr); |
---|
| 2008 | + lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev, |
---|
| 2009 | + sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, |
---|
| 2010 | + &lp->tx_ring_dma_addr, GFP_KERNEL); |
---|
2025 | 2011 | if (lp->tx_ring == NULL) { |
---|
2026 | | - netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); |
---|
| 2012 | + netif_err(lp, drv, dev, "Coherent memory allocation failed\n"); |
---|
2027 | 2013 | return -ENOMEM; |
---|
2028 | 2014 | } |
---|
2029 | 2015 | |
---|
2030 | | - lp->rx_ring = pci_alloc_consistent(lp->pci_dev, |
---|
2031 | | - sizeof(struct pcnet32_rx_head) * |
---|
2032 | | - lp->rx_ring_size, |
---|
2033 | | - &lp->rx_ring_dma_addr); |
---|
| 2016 | + lp->rx_ring = dma_alloc_coherent(&lp->pci_dev->dev, |
---|
| 2017 | + sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, |
---|
| 2018 | + &lp->rx_ring_dma_addr, GFP_KERNEL); |
---|
2034 | 2019 | if (lp->rx_ring == NULL) { |
---|
2035 | | - netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); |
---|
| 2020 | + netif_err(lp, drv, dev, "Coherent memory allocation failed\n"); |
---|
2036 | 2021 | return -ENOMEM; |
---|
2037 | 2022 | } |
---|
2038 | 2023 | |
---|
.. | .. |
---|
2076 | 2061 | lp->rx_dma_addr = NULL; |
---|
2077 | 2062 | |
---|
2078 | 2063 | if (lp->tx_ring) { |
---|
2079 | | - pci_free_consistent(lp->pci_dev, |
---|
2080 | | - sizeof(struct pcnet32_tx_head) * |
---|
2081 | | - lp->tx_ring_size, lp->tx_ring, |
---|
2082 | | - lp->tx_ring_dma_addr); |
---|
| 2064 | + dma_free_coherent(&lp->pci_dev->dev, |
---|
| 2065 | + sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, |
---|
| 2066 | + lp->tx_ring, lp->tx_ring_dma_addr); |
---|
2083 | 2067 | lp->tx_ring = NULL; |
---|
2084 | 2068 | } |
---|
2085 | 2069 | |
---|
2086 | 2070 | if (lp->rx_ring) { |
---|
2087 | | - pci_free_consistent(lp->pci_dev, |
---|
2088 | | - sizeof(struct pcnet32_rx_head) * |
---|
2089 | | - lp->rx_ring_size, lp->rx_ring, |
---|
2090 | | - lp->rx_ring_dma_addr); |
---|
| 2071 | + dma_free_coherent(&lp->pci_dev->dev, |
---|
| 2072 | + sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, |
---|
| 2073 | + lp->rx_ring, lp->rx_ring_dma_addr); |
---|
2091 | 2074 | lp->rx_ring = NULL; |
---|
2092 | 2075 | } |
---|
2093 | 2076 | } |
---|
.. | .. |
---|
2348 | 2331 | lp->tx_ring[i].status = 0; /* CPU owns buffer */ |
---|
2349 | 2332 | wmb(); /* Make sure adapter sees owner change */ |
---|
2350 | 2333 | if (lp->tx_skbuff[i]) { |
---|
2351 | | - if (!pci_dma_mapping_error(lp->pci_dev, |
---|
2352 | | - lp->tx_dma_addr[i])) |
---|
2353 | | - pci_unmap_single(lp->pci_dev, |
---|
| 2334 | + if (!dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[i])) |
---|
| 2335 | + dma_unmap_single(&lp->pci_dev->dev, |
---|
2354 | 2336 | lp->tx_dma_addr[i], |
---|
2355 | 2337 | lp->tx_skbuff[i]->len, |
---|
2356 | | - PCI_DMA_TODEVICE); |
---|
| 2338 | + DMA_TO_DEVICE); |
---|
2357 | 2339 | dev_kfree_skb_any(lp->tx_skbuff[i]); |
---|
2358 | 2340 | } |
---|
2359 | 2341 | lp->tx_skbuff[i] = NULL; |
---|
.. | .. |
---|
2388 | 2370 | rmb(); |
---|
2389 | 2371 | if (lp->rx_dma_addr[i] == 0) { |
---|
2390 | 2372 | lp->rx_dma_addr[i] = |
---|
2391 | | - pci_map_single(lp->pci_dev, rx_skbuff->data, |
---|
2392 | | - PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); |
---|
2393 | | - if (pci_dma_mapping_error(lp->pci_dev, |
---|
2394 | | - lp->rx_dma_addr[i])) { |
---|
| 2373 | + dma_map_single(&lp->pci_dev->dev, rx_skbuff->data, |
---|
| 2374 | + PKT_BUF_SIZE, DMA_FROM_DEVICE); |
---|
| 2375 | + if (dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[i])) { |
---|
2395 | 2376 | /* there is not much we can do at this point */ |
---|
2396 | 2377 | netif_err(lp, drv, dev, |
---|
2397 | 2378 | "%s pci dma mapping error\n", |
---|
.. | .. |
---|
2456 | 2437 | lp->a->write_csr(ioaddr, CSR0, csr0_bits); |
---|
2457 | 2438 | } |
---|
2458 | 2439 | |
---|
2459 | | -static void pcnet32_tx_timeout(struct net_device *dev) |
---|
| 2440 | +static void pcnet32_tx_timeout(struct net_device *dev, unsigned int txqueue) |
---|
2460 | 2441 | { |
---|
2461 | 2442 | struct pcnet32_private *lp = netdev_priv(dev); |
---|
2462 | 2443 | unsigned long ioaddr = dev->base_addr, flags; |
---|
.. | .. |
---|
2529 | 2510 | lp->tx_ring[entry].misc = 0x00000000; |
---|
2530 | 2511 | |
---|
2531 | 2512 | lp->tx_dma_addr[entry] = |
---|
2532 | | - pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); |
---|
2533 | | - if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[entry])) { |
---|
| 2513 | + dma_map_single(&lp->pci_dev->dev, skb->data, skb->len, |
---|
| 2514 | + DMA_TO_DEVICE); |
---|
| 2515 | + if (dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[entry])) { |
---|
2534 | 2516 | dev_kfree_skb_any(skb); |
---|
2535 | 2517 | dev->stats.tx_dropped++; |
---|
2536 | 2518 | goto drop_packet; |
---|
.. | .. |
---|
2919 | 2901 | mod_timer(&lp->watchdog_timer, round_jiffies(PCNET32_WATCHDOG_TIMEOUT)); |
---|
2920 | 2902 | } |
---|
2921 | 2903 | |
---|
2922 | | -static int pcnet32_pm_suspend(struct pci_dev *pdev, pm_message_t state) |
---|
| 2904 | +static int __maybe_unused pcnet32_pm_suspend(struct device *device_d) |
---|
2923 | 2905 | { |
---|
2924 | | - struct net_device *dev = pci_get_drvdata(pdev); |
---|
| 2906 | + struct net_device *dev = dev_get_drvdata(device_d); |
---|
2925 | 2907 | |
---|
2926 | 2908 | if (netif_running(dev)) { |
---|
2927 | 2909 | netif_device_detach(dev); |
---|
2928 | 2910 | pcnet32_close(dev); |
---|
2929 | 2911 | } |
---|
2930 | | - pci_save_state(pdev); |
---|
2931 | | - pci_set_power_state(pdev, pci_choose_state(pdev, state)); |
---|
| 2912 | + |
---|
2932 | 2913 | return 0; |
---|
2933 | 2914 | } |
---|
2934 | 2915 | |
---|
2935 | | -static int pcnet32_pm_resume(struct pci_dev *pdev) |
---|
| 2916 | +static int __maybe_unused pcnet32_pm_resume(struct device *device_d) |
---|
2936 | 2917 | { |
---|
2937 | | - struct net_device *dev = pci_get_drvdata(pdev); |
---|
2938 | | - |
---|
2939 | | - pci_set_power_state(pdev, PCI_D0); |
---|
2940 | | - pci_restore_state(pdev); |
---|
| 2918 | + struct net_device *dev = dev_get_drvdata(device_d); |
---|
2941 | 2919 | |
---|
2942 | 2920 | if (netif_running(dev)) { |
---|
2943 | 2921 | pcnet32_open(dev); |
---|
2944 | 2922 | netif_device_attach(dev); |
---|
2945 | 2923 | } |
---|
| 2924 | + |
---|
2946 | 2925 | return 0; |
---|
2947 | 2926 | } |
---|
2948 | 2927 | |
---|
.. | .. |
---|
2956 | 2935 | unregister_netdev(dev); |
---|
2957 | 2936 | pcnet32_free_ring(dev); |
---|
2958 | 2937 | release_region(dev->base_addr, PCNET32_TOTAL_SIZE); |
---|
2959 | | - pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), |
---|
2960 | | - lp->init_block, lp->init_dma_addr); |
---|
| 2938 | + dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block), |
---|
| 2939 | + lp->init_block, lp->init_dma_addr); |
---|
2961 | 2940 | free_netdev(dev); |
---|
2962 | 2941 | pci_disable_device(pdev); |
---|
2963 | 2942 | } |
---|
2964 | 2943 | } |
---|
| 2944 | + |
---|
| 2945 | +static SIMPLE_DEV_PM_OPS(pcnet32_pm_ops, pcnet32_pm_suspend, pcnet32_pm_resume); |
---|
2965 | 2946 | |
---|
2966 | 2947 | static struct pci_driver pcnet32_driver = { |
---|
2967 | 2948 | .name = DRV_NAME, |
---|
2968 | 2949 | .probe = pcnet32_probe_pci, |
---|
2969 | 2950 | .remove = pcnet32_remove_one, |
---|
2970 | 2951 | .id_table = pcnet32_pci_tbl, |
---|
2971 | | - .suspend = pcnet32_pm_suspend, |
---|
2972 | | - .resume = pcnet32_pm_resume, |
---|
| 2952 | + .driver = { |
---|
| 2953 | + .pm = &pcnet32_pm_ops, |
---|
| 2954 | + }, |
---|
2973 | 2955 | }; |
---|
2974 | 2956 | |
---|
2975 | 2957 | /* An additional parameter that may be passed in... */ |
---|
.. | .. |
---|
3007 | 2989 | |
---|
3008 | 2990 | static int __init pcnet32_init_module(void) |
---|
3009 | 2991 | { |
---|
3010 | | - pr_info("%s", version); |
---|
3011 | | - |
---|
3012 | 2992 | pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT); |
---|
3013 | 2993 | |
---|
3014 | 2994 | if ((tx_start_pt >= 0) && (tx_start_pt <= 3)) |
---|
.. | .. |
---|
3038 | 3018 | unregister_netdev(pcnet32_dev); |
---|
3039 | 3019 | pcnet32_free_ring(pcnet32_dev); |
---|
3040 | 3020 | release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); |
---|
3041 | | - pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), |
---|
3042 | | - lp->init_block, lp->init_dma_addr); |
---|
| 3021 | + dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block), |
---|
| 3022 | + lp->init_block, lp->init_dma_addr); |
---|
3043 | 3023 | free_netdev(pcnet32_dev); |
---|
3044 | 3024 | pcnet32_dev = next_dev; |
---|
3045 | 3025 | } |
---|