.. | .. |
---|
612 | 612 | static void check_link(struct net_device *dev); |
---|
613 | 613 | static void netdev_timer(struct timer_list *t); |
---|
614 | 614 | static void dump_ring(struct net_device *dev); |
---|
615 | | -static void ns_tx_timeout(struct net_device *dev); |
---|
| 615 | +static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue); |
---|
616 | 616 | static int alloc_ring(struct net_device *dev); |
---|
617 | 617 | static void refill_rx(struct net_device *dev); |
---|
618 | 618 | static void init_ring(struct net_device *dev); |
---|
.. | .. |
---|
1878 | 1878 | } |
---|
1879 | 1879 | } |
---|
1880 | 1880 | |
---|
1881 | | -static void ns_tx_timeout(struct net_device *dev) |
---|
| 1881 | +static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue) |
---|
1882 | 1882 | { |
---|
1883 | 1883 | struct netdev_private *np = netdev_priv(dev); |
---|
1884 | 1884 | void __iomem * ioaddr = ns_ioaddr(dev); |
---|
.. | .. |
---|
1913 | 1913 | static int alloc_ring(struct net_device *dev) |
---|
1914 | 1914 | { |
---|
1915 | 1915 | struct netdev_private *np = netdev_priv(dev); |
---|
1916 | | - np->rx_ring = pci_alloc_consistent(np->pci_dev, |
---|
1917 | | - sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE), |
---|
1918 | | - &np->ring_dma); |
---|
| 1916 | + np->rx_ring = dma_alloc_coherent(&np->pci_dev->dev, |
---|
| 1917 | + sizeof(struct netdev_desc) * (RX_RING_SIZE + TX_RING_SIZE), |
---|
| 1918 | + &np->ring_dma, GFP_KERNEL); |
---|
1919 | 1919 | if (!np->rx_ring) |
---|
1920 | 1920 | return -ENOMEM; |
---|
1921 | 1921 | np->tx_ring = &np->rx_ring[RX_RING_SIZE]; |
---|
.. | .. |
---|
1936 | 1936 | np->rx_skbuff[entry] = skb; |
---|
1937 | 1937 | if (skb == NULL) |
---|
1938 | 1938 | break; /* Better luck next round. */ |
---|
1939 | | - np->rx_dma[entry] = pci_map_single(np->pci_dev, |
---|
1940 | | - skb->data, buflen, PCI_DMA_FROMDEVICE); |
---|
1941 | | - if (pci_dma_mapping_error(np->pci_dev, |
---|
1942 | | - np->rx_dma[entry])) { |
---|
| 1939 | + np->rx_dma[entry] = dma_map_single(&np->pci_dev->dev, |
---|
| 1940 | + skb->data, buflen, |
---|
| 1941 | + DMA_FROM_DEVICE); |
---|
| 1942 | + if (dma_mapping_error(&np->pci_dev->dev, np->rx_dma[entry])) { |
---|
1943 | 1943 | dev_kfree_skb_any(skb); |
---|
1944 | 1944 | np->rx_skbuff[entry] = NULL; |
---|
1945 | 1945 | break; /* Better luck next round. */ |
---|
.. | .. |
---|
2010 | 2010 | |
---|
2011 | 2011 | for (i = 0; i < TX_RING_SIZE; i++) { |
---|
2012 | 2012 | if (np->tx_skbuff[i]) { |
---|
2013 | | - pci_unmap_single(np->pci_dev, |
---|
2014 | | - np->tx_dma[i], np->tx_skbuff[i]->len, |
---|
2015 | | - PCI_DMA_TODEVICE); |
---|
| 2013 | + dma_unmap_single(&np->pci_dev->dev, np->tx_dma[i], |
---|
| 2014 | + np->tx_skbuff[i]->len, DMA_TO_DEVICE); |
---|
2016 | 2015 | dev_kfree_skb(np->tx_skbuff[i]); |
---|
2017 | 2016 | dev->stats.tx_dropped++; |
---|
2018 | 2017 | } |
---|
.. | .. |
---|
2031 | 2030 | np->rx_ring[i].cmd_status = 0; |
---|
2032 | 2031 | np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ |
---|
2033 | 2032 | if (np->rx_skbuff[i]) { |
---|
2034 | | - pci_unmap_single(np->pci_dev, np->rx_dma[i], |
---|
2035 | | - buflen + NATSEMI_PADDING, |
---|
2036 | | - PCI_DMA_FROMDEVICE); |
---|
| 2033 | + dma_unmap_single(&np->pci_dev->dev, np->rx_dma[i], |
---|
| 2034 | + buflen + NATSEMI_PADDING, |
---|
| 2035 | + DMA_FROM_DEVICE); |
---|
2037 | 2036 | dev_kfree_skb(np->rx_skbuff[i]); |
---|
2038 | 2037 | } |
---|
2039 | 2038 | np->rx_skbuff[i] = NULL; |
---|
.. | .. |
---|
2049 | 2048 | static void free_ring(struct net_device *dev) |
---|
2050 | 2049 | { |
---|
2051 | 2050 | struct netdev_private *np = netdev_priv(dev); |
---|
2052 | | - pci_free_consistent(np->pci_dev, |
---|
2053 | | - sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE), |
---|
2054 | | - np->rx_ring, np->ring_dma); |
---|
| 2051 | + dma_free_coherent(&np->pci_dev->dev, |
---|
| 2052 | + sizeof(struct netdev_desc) * (RX_RING_SIZE + TX_RING_SIZE), |
---|
| 2053 | + np->rx_ring, np->ring_dma); |
---|
2055 | 2054 | } |
---|
2056 | 2055 | |
---|
2057 | 2056 | static void reinit_rx(struct net_device *dev) |
---|
.. | .. |
---|
2098 | 2097 | entry = np->cur_tx % TX_RING_SIZE; |
---|
2099 | 2098 | |
---|
2100 | 2099 | np->tx_skbuff[entry] = skb; |
---|
2101 | | - np->tx_dma[entry] = pci_map_single(np->pci_dev, |
---|
2102 | | - skb->data,skb->len, PCI_DMA_TODEVICE); |
---|
2103 | | - if (pci_dma_mapping_error(np->pci_dev, np->tx_dma[entry])) { |
---|
| 2100 | + np->tx_dma[entry] = dma_map_single(&np->pci_dev->dev, skb->data, |
---|
| 2101 | + skb->len, DMA_TO_DEVICE); |
---|
| 2102 | + if (dma_mapping_error(&np->pci_dev->dev, np->tx_dma[entry])) { |
---|
2104 | 2103 | np->tx_skbuff[entry] = NULL; |
---|
2105 | 2104 | dev_kfree_skb_irq(skb); |
---|
2106 | 2105 | dev->stats.tx_dropped++; |
---|
.. | .. |
---|
2166 | 2165 | dev->stats.tx_window_errors++; |
---|
2167 | 2166 | dev->stats.tx_errors++; |
---|
2168 | 2167 | } |
---|
2169 | | - pci_unmap_single(np->pci_dev,np->tx_dma[entry], |
---|
2170 | | - np->tx_skbuff[entry]->len, |
---|
2171 | | - PCI_DMA_TODEVICE); |
---|
| 2168 | + dma_unmap_single(&np->pci_dev->dev, np->tx_dma[entry], |
---|
| 2169 | + np->tx_skbuff[entry]->len, DMA_TO_DEVICE); |
---|
2172 | 2170 | /* Free the original skb. */ |
---|
2173 | | - dev_kfree_skb_irq(np->tx_skbuff[entry]); |
---|
| 2171 | + dev_consume_skb_irq(np->tx_skbuff[entry]); |
---|
2174 | 2172 | np->tx_skbuff[entry] = NULL; |
---|
2175 | 2173 | } |
---|
2176 | 2174 | if (netif_queue_stopped(dev) && |
---|
.. | .. |
---|
2356 | 2354 | (skb = netdev_alloc_skb(dev, pkt_len + RX_OFFSET)) != NULL) { |
---|
2357 | 2355 | /* 16 byte align the IP header */ |
---|
2358 | 2356 | skb_reserve(skb, RX_OFFSET); |
---|
2359 | | - pci_dma_sync_single_for_cpu(np->pci_dev, |
---|
2360 | | - np->rx_dma[entry], |
---|
2361 | | - buflen, |
---|
2362 | | - PCI_DMA_FROMDEVICE); |
---|
| 2357 | + dma_sync_single_for_cpu(&np->pci_dev->dev, |
---|
| 2358 | + np->rx_dma[entry], |
---|
| 2359 | + buflen, |
---|
| 2360 | + DMA_FROM_DEVICE); |
---|
2363 | 2361 | skb_copy_to_linear_data(skb, |
---|
2364 | 2362 | np->rx_skbuff[entry]->data, pkt_len); |
---|
2365 | 2363 | skb_put(skb, pkt_len); |
---|
2366 | | - pci_dma_sync_single_for_device(np->pci_dev, |
---|
2367 | | - np->rx_dma[entry], |
---|
2368 | | - buflen, |
---|
2369 | | - PCI_DMA_FROMDEVICE); |
---|
| 2364 | + dma_sync_single_for_device(&np->pci_dev->dev, |
---|
| 2365 | + np->rx_dma[entry], |
---|
| 2366 | + buflen, |
---|
| 2367 | + DMA_FROM_DEVICE); |
---|
2370 | 2368 | } else { |
---|
2371 | | - pci_unmap_single(np->pci_dev, np->rx_dma[entry], |
---|
| 2369 | + dma_unmap_single(&np->pci_dev->dev, |
---|
| 2370 | + np->rx_dma[entry], |
---|
2372 | 2371 | buflen + NATSEMI_PADDING, |
---|
2373 | | - PCI_DMA_FROMDEVICE); |
---|
| 2372 | + DMA_FROM_DEVICE); |
---|
2374 | 2373 | skb_put(skb = np->rx_skbuff[entry], pkt_len); |
---|
2375 | 2374 | np->rx_skbuff[entry] = NULL; |
---|
2376 | 2375 | } |
---|
.. | .. |
---|
3078 | 3077 | switch(cmd) { |
---|
3079 | 3078 | case SIOCGMIIPHY: /* Get address of MII PHY in use. */ |
---|
3080 | 3079 | data->phy_id = np->phy_addr_external; |
---|
3081 | | - /* Fall Through */ |
---|
| 3080 | + fallthrough; |
---|
3082 | 3081 | |
---|
3083 | 3082 | case SIOCGMIIREG: /* Read MII PHY register. */ |
---|
3084 | 3083 | /* The phy_id is not enough to uniquely identify |
---|
.. | .. |
---|
3243 | 3242 | free_netdev (dev); |
---|
3244 | 3243 | } |
---|
3245 | 3244 | |
---|
3246 | | -#ifdef CONFIG_PM |
---|
3247 | | - |
---|
3248 | 3245 | /* |
---|
3249 | 3246 | * The ns83815 chip doesn't have explicit RxStop bits. |
---|
3250 | 3247 | * Kicking the Rx or Tx process for a new packet reenables the Rx process |
---|
.. | .. |
---|
3271 | 3268 | * Interrupts must be disabled, otherwise hands_off can cause irq storms. |
---|
3272 | 3269 | */ |
---|
3273 | 3270 | |
---|
3274 | | -static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state) |
---|
| 3271 | +static int __maybe_unused natsemi_suspend(struct device *dev_d) |
---|
3275 | 3272 | { |
---|
3276 | | - struct net_device *dev = pci_get_drvdata (pdev); |
---|
| 3273 | + struct net_device *dev = dev_get_drvdata(dev_d); |
---|
3277 | 3274 | struct netdev_private *np = netdev_priv(dev); |
---|
3278 | 3275 | void __iomem * ioaddr = ns_ioaddr(dev); |
---|
3279 | 3276 | |
---|
.. | .. |
---|
3322 | 3319 | } |
---|
3323 | 3320 | |
---|
3324 | 3321 | |
---|
3325 | | -static int natsemi_resume (struct pci_dev *pdev) |
---|
| 3322 | +static int __maybe_unused natsemi_resume(struct device *dev_d) |
---|
3326 | 3323 | { |
---|
3327 | | - struct net_device *dev = pci_get_drvdata (pdev); |
---|
| 3324 | + struct net_device *dev = dev_get_drvdata(dev_d); |
---|
3328 | 3325 | struct netdev_private *np = netdev_priv(dev); |
---|
3329 | | - int ret = 0; |
---|
3330 | 3326 | |
---|
3331 | 3327 | rtnl_lock(); |
---|
3332 | 3328 | if (netif_device_present(dev)) |
---|
.. | .. |
---|
3335 | 3331 | const int irq = np->pci_dev->irq; |
---|
3336 | 3332 | |
---|
3337 | 3333 | BUG_ON(!np->hands_off); |
---|
3338 | | - ret = pci_enable_device(pdev); |
---|
3339 | | - if (ret < 0) { |
---|
3340 | | - dev_err(&pdev->dev, |
---|
3341 | | - "pci_enable_device() failed: %d\n", ret); |
---|
3342 | | - goto out; |
---|
3343 | | - } |
---|
3344 | 3334 | /* pci_power_on(pdev); */ |
---|
3345 | 3335 | |
---|
3346 | 3336 | napi_enable(&np->napi); |
---|
.. | .. |
---|
3360 | 3350 | netif_device_attach(dev); |
---|
3361 | 3351 | out: |
---|
3362 | 3352 | rtnl_unlock(); |
---|
3363 | | - return ret; |
---|
| 3353 | + return 0; |
---|
3364 | 3354 | } |
---|
3365 | 3355 | |
---|
3366 | | -#endif /* CONFIG_PM */ |
---|
| 3356 | +static SIMPLE_DEV_PM_OPS(natsemi_pm_ops, natsemi_suspend, natsemi_resume); |
---|
3367 | 3357 | |
---|
3368 | 3358 | static struct pci_driver natsemi_driver = { |
---|
3369 | 3359 | .name = DRV_NAME, |
---|
3370 | 3360 | .id_table = natsemi_pci_tbl, |
---|
3371 | 3361 | .probe = natsemi_probe1, |
---|
3372 | 3362 | .remove = natsemi_remove1, |
---|
3373 | | -#ifdef CONFIG_PM |
---|
3374 | | - .suspend = natsemi_suspend, |
---|
3375 | | - .resume = natsemi_resume, |
---|
3376 | | -#endif |
---|
| 3363 | + .driver.pm = &natsemi_pm_ops, |
---|
3377 | 3364 | }; |
---|
3378 | 3365 | |
---|
3379 | 3366 | static int __init natsemi_init_mod (void) |
---|