.. | .. |
---|
545 | 545 | } |
---|
546 | 546 | |
---|
547 | 547 | if (pci_estat & GREG_PCIESTAT_OTHER) { |
---|
548 | | - u16 pci_cfg_stat; |
---|
| 548 | + int pci_errs; |
---|
549 | 549 | |
---|
550 | 550 | /* Interrogate PCI config space for the |
---|
551 | 551 | * true cause. |
---|
552 | 552 | */ |
---|
553 | | - pci_read_config_word(gp->pdev, PCI_STATUS, |
---|
554 | | - &pci_cfg_stat); |
---|
555 | | - netdev_err(dev, "Read PCI cfg space status [%04x]\n", |
---|
556 | | - pci_cfg_stat); |
---|
557 | | - if (pci_cfg_stat & PCI_STATUS_PARITY) |
---|
| 553 | + pci_errs = pci_status_get_and_clear_errors(gp->pdev); |
---|
| 554 | + netdev_err(dev, "PCI status errors[%04x]\n", pci_errs); |
---|
| 555 | + if (pci_errs & PCI_STATUS_PARITY) |
---|
558 | 556 | netdev_err(dev, "PCI parity error detected\n"); |
---|
559 | | - if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT) |
---|
| 557 | + if (pci_errs & PCI_STATUS_SIG_TARGET_ABORT) |
---|
560 | 558 | netdev_err(dev, "PCI target abort\n"); |
---|
561 | | - if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT) |
---|
| 559 | + if (pci_errs & PCI_STATUS_REC_TARGET_ABORT) |
---|
562 | 560 | netdev_err(dev, "PCI master acks target abort\n"); |
---|
563 | | - if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT) |
---|
| 561 | + if (pci_errs & PCI_STATUS_REC_MASTER_ABORT) |
---|
564 | 562 | netdev_err(dev, "PCI master abort\n"); |
---|
565 | | - if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR) |
---|
| 563 | + if (pci_errs & PCI_STATUS_SIG_SYSTEM_ERROR) |
---|
566 | 564 | netdev_err(dev, "PCI system error SERR#\n"); |
---|
567 | | - if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY) |
---|
| 565 | + if (pci_errs & PCI_STATUS_DETECTED_PARITY) |
---|
568 | 566 | netdev_err(dev, "PCI parity error\n"); |
---|
569 | | - |
---|
570 | | - /* Write the error bits back to clear them. */ |
---|
571 | | - pci_cfg_stat &= (PCI_STATUS_PARITY | |
---|
572 | | - PCI_STATUS_SIG_TARGET_ABORT | |
---|
573 | | - PCI_STATUS_REC_TARGET_ABORT | |
---|
574 | | - PCI_STATUS_REC_MASTER_ABORT | |
---|
575 | | - PCI_STATUS_SIG_SYSTEM_ERROR | |
---|
576 | | - PCI_STATUS_DETECTED_PARITY); |
---|
577 | | - pci_write_config_word(gp->pdev, |
---|
578 | | - PCI_STATUS, pci_cfg_stat); |
---|
579 | 567 | } |
---|
580 | 568 | |
---|
581 | 569 | /* For all PCI errors, we should reset the chip. */ |
---|
.. | .. |
---|
682 | 670 | dma_addr = le64_to_cpu(txd->buffer); |
---|
683 | 671 | dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ; |
---|
684 | 672 | |
---|
685 | | - pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE); |
---|
| 673 | + dma_unmap_page(&gp->pdev->dev, dma_addr, dma_len, |
---|
| 674 | + DMA_TO_DEVICE); |
---|
686 | 675 | entry = NEXT_TX(entry); |
---|
687 | 676 | } |
---|
688 | 677 | |
---|
.. | .. |
---|
821 | 810 | drops++; |
---|
822 | 811 | goto drop_it; |
---|
823 | 812 | } |
---|
824 | | - pci_unmap_page(gp->pdev, dma_addr, |
---|
825 | | - RX_BUF_ALLOC_SIZE(gp), |
---|
826 | | - PCI_DMA_FROMDEVICE); |
---|
| 813 | + dma_unmap_page(&gp->pdev->dev, dma_addr, |
---|
| 814 | + RX_BUF_ALLOC_SIZE(gp), DMA_FROM_DEVICE); |
---|
827 | 815 | gp->rx_skbs[entry] = new_skb; |
---|
828 | 816 | skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET)); |
---|
829 | | - rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev, |
---|
| 817 | + rxd->buffer = cpu_to_le64(dma_map_page(&gp->pdev->dev, |
---|
830 | 818 | virt_to_page(new_skb->data), |
---|
831 | 819 | offset_in_page(new_skb->data), |
---|
832 | 820 | RX_BUF_ALLOC_SIZE(gp), |
---|
833 | | - PCI_DMA_FROMDEVICE)); |
---|
| 821 | + DMA_FROM_DEVICE)); |
---|
834 | 822 | skb_reserve(new_skb, RX_OFFSET); |
---|
835 | 823 | |
---|
836 | 824 | /* Trim the original skb for the netif. */ |
---|
.. | .. |
---|
845 | 833 | |
---|
846 | 834 | skb_reserve(copy_skb, 2); |
---|
847 | 835 | skb_put(copy_skb, len); |
---|
848 | | - pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); |
---|
| 836 | + dma_sync_single_for_cpu(&gp->pdev->dev, dma_addr, len, |
---|
| 837 | + DMA_FROM_DEVICE); |
---|
849 | 838 | skb_copy_from_linear_data(skb, copy_skb->data, len); |
---|
850 | | - pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); |
---|
| 839 | + dma_sync_single_for_device(&gp->pdev->dev, dma_addr, |
---|
| 840 | + len, DMA_FROM_DEVICE); |
---|
851 | 841 | |
---|
852 | 842 | /* We'll reuse the original ring buffer. */ |
---|
853 | 843 | skb = copy_skb; |
---|
.. | .. |
---|
970 | 960 | } |
---|
971 | 961 | #endif |
---|
972 | 962 | |
---|
973 | | -static void gem_tx_timeout(struct net_device *dev) |
---|
| 963 | +static void gem_tx_timeout(struct net_device *dev, unsigned int txqueue) |
---|
974 | 964 | { |
---|
975 | 965 | struct gem *gp = netdev_priv(dev); |
---|
976 | 966 | |
---|
.. | .. |
---|
1032 | 1022 | u32 len; |
---|
1033 | 1023 | |
---|
1034 | 1024 | len = skb->len; |
---|
1035 | | - mapping = pci_map_page(gp->pdev, |
---|
| 1025 | + mapping = dma_map_page(&gp->pdev->dev, |
---|
1036 | 1026 | virt_to_page(skb->data), |
---|
1037 | 1027 | offset_in_page(skb->data), |
---|
1038 | | - len, PCI_DMA_TODEVICE); |
---|
| 1028 | + len, DMA_TO_DEVICE); |
---|
1039 | 1029 | ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len; |
---|
1040 | 1030 | if (gem_intme(entry)) |
---|
1041 | 1031 | ctrl |= TXDCTRL_INTME; |
---|
.. | .. |
---|
1058 | 1048 | * Otherwise we could race with the device. |
---|
1059 | 1049 | */ |
---|
1060 | 1050 | first_len = skb_headlen(skb); |
---|
1061 | | - first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data), |
---|
| 1051 | + first_mapping = dma_map_page(&gp->pdev->dev, |
---|
| 1052 | + virt_to_page(skb->data), |
---|
1062 | 1053 | offset_in_page(skb->data), |
---|
1063 | | - first_len, PCI_DMA_TODEVICE); |
---|
| 1054 | + first_len, DMA_TO_DEVICE); |
---|
1064 | 1055 | entry = NEXT_TX(entry); |
---|
1065 | 1056 | |
---|
1066 | 1057 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { |
---|
.. | .. |
---|
1586 | 1577 | if (gp->rx_skbs[i] != NULL) { |
---|
1587 | 1578 | skb = gp->rx_skbs[i]; |
---|
1588 | 1579 | dma_addr = le64_to_cpu(rxd->buffer); |
---|
1589 | | - pci_unmap_page(gp->pdev, dma_addr, |
---|
| 1580 | + dma_unmap_page(&gp->pdev->dev, dma_addr, |
---|
1590 | 1581 | RX_BUF_ALLOC_SIZE(gp), |
---|
1591 | | - PCI_DMA_FROMDEVICE); |
---|
| 1582 | + DMA_FROM_DEVICE); |
---|
1592 | 1583 | dev_kfree_skb_any(skb); |
---|
1593 | 1584 | gp->rx_skbs[i] = NULL; |
---|
1594 | 1585 | } |
---|
.. | .. |
---|
1610 | 1601 | |
---|
1611 | 1602 | txd = &gb->txd[ent]; |
---|
1612 | 1603 | dma_addr = le64_to_cpu(txd->buffer); |
---|
1613 | | - pci_unmap_page(gp->pdev, dma_addr, |
---|
| 1604 | + dma_unmap_page(&gp->pdev->dev, dma_addr, |
---|
1614 | 1605 | le64_to_cpu(txd->control_word) & |
---|
1615 | | - TXDCTRL_BUFSZ, PCI_DMA_TODEVICE); |
---|
| 1606 | + TXDCTRL_BUFSZ, DMA_TO_DEVICE); |
---|
1616 | 1607 | |
---|
1617 | 1608 | if (frag != skb_shinfo(skb)->nr_frags) |
---|
1618 | 1609 | i++; |
---|
.. | .. |
---|
1649 | 1640 | |
---|
1650 | 1641 | gp->rx_skbs[i] = skb; |
---|
1651 | 1642 | skb_put(skb, (gp->rx_buf_sz + RX_OFFSET)); |
---|
1652 | | - dma_addr = pci_map_page(gp->pdev, |
---|
| 1643 | + dma_addr = dma_map_page(&gp->pdev->dev, |
---|
1653 | 1644 | virt_to_page(skb->data), |
---|
1654 | 1645 | offset_in_page(skb->data), |
---|
1655 | 1646 | RX_BUF_ALLOC_SIZE(gp), |
---|
1656 | | - PCI_DMA_FROMDEVICE); |
---|
| 1647 | + DMA_FROM_DEVICE); |
---|
1657 | 1648 | rxd->buffer = cpu_to_le64(dma_addr); |
---|
1658 | 1649 | dma_wmb(); |
---|
1659 | 1650 | rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); |
---|
.. | .. |
---|
2151 | 2142 | struct gem *gp = netdev_priv(dev); |
---|
2152 | 2143 | int rc; |
---|
2153 | 2144 | |
---|
2154 | | - /* Enable the cell */ |
---|
2155 | | - gem_get_cell(gp); |
---|
2156 | | - |
---|
2157 | | - /* Make sure PCI access and bus master are enabled */ |
---|
2158 | | - rc = pci_enable_device(gp->pdev); |
---|
2159 | | - if (rc) { |
---|
2160 | | - netdev_err(dev, "Failed to enable chip on PCI bus !\n"); |
---|
2161 | | - |
---|
2162 | | - /* Put cell and forget it for now, it will be considered as |
---|
2163 | | - * still asleep, a new sleep cycle may bring it back |
---|
2164 | | - */ |
---|
2165 | | - gem_put_cell(gp); |
---|
2166 | | - return -ENXIO; |
---|
2167 | | - } |
---|
2168 | 2145 | pci_set_master(gp->pdev); |
---|
2169 | 2146 | |
---|
2170 | 2147 | /* Init & setup chip hardware */ |
---|
.. | .. |
---|
2242 | 2219 | |
---|
2243 | 2220 | /* Shut the PHY down eventually and setup WOL */ |
---|
2244 | 2221 | gem_stop_phy(gp, wol); |
---|
2245 | | - |
---|
2246 | | - /* Make sure bus master is disabled */ |
---|
2247 | | - pci_disable_device(gp->pdev); |
---|
2248 | | - |
---|
2249 | | - /* Cell not needed neither if no WOL */ |
---|
2250 | | - if (!wol) |
---|
2251 | | - gem_put_cell(gp); |
---|
2252 | 2222 | } |
---|
2253 | 2223 | |
---|
2254 | 2224 | static void gem_reset_task(struct work_struct *work) |
---|
.. | .. |
---|
2300 | 2270 | |
---|
2301 | 2271 | static int gem_open(struct net_device *dev) |
---|
2302 | 2272 | { |
---|
| 2273 | + struct gem *gp = netdev_priv(dev); |
---|
| 2274 | + int rc; |
---|
| 2275 | + |
---|
2303 | 2276 | /* We allow open while suspended, we just do nothing, |
---|
2304 | 2277 | * the chip will be initialized in resume() |
---|
2305 | 2278 | */ |
---|
2306 | | - if (netif_device_present(dev)) |
---|
| 2279 | + if (netif_device_present(dev)) { |
---|
| 2280 | + /* Enable the cell */ |
---|
| 2281 | + gem_get_cell(gp); |
---|
| 2282 | + |
---|
| 2283 | + /* Make sure PCI access and bus master are enabled */ |
---|
| 2284 | + rc = pci_enable_device(gp->pdev); |
---|
| 2285 | + if (rc) { |
---|
| 2286 | + netdev_err(dev, "Failed to enable chip on PCI bus !\n"); |
---|
| 2287 | + |
---|
| 2288 | + /* Put cell and forget it for now, it will be considered |
---|
| 2289 | + *as still asleep, a new sleep cycle may bring it back |
---|
| 2290 | + */ |
---|
| 2291 | + gem_put_cell(gp); |
---|
| 2292 | + return -ENXIO; |
---|
| 2293 | + } |
---|
2307 | 2294 | return gem_do_start(dev); |
---|
| 2295 | + } |
---|
| 2296 | + |
---|
2308 | 2297 | return 0; |
---|
2309 | 2298 | } |
---|
2310 | 2299 | |
---|
2311 | 2300 | static int gem_close(struct net_device *dev) |
---|
2312 | 2301 | { |
---|
2313 | | - if (netif_device_present(dev)) |
---|
| 2302 | + struct gem *gp = netdev_priv(dev); |
---|
| 2303 | + |
---|
| 2304 | + if (netif_device_present(dev)) { |
---|
2314 | 2305 | gem_do_stop(dev, 0); |
---|
2315 | 2306 | |
---|
| 2307 | + /* Make sure bus master is disabled */ |
---|
| 2308 | + pci_disable_device(gp->pdev); |
---|
| 2309 | + |
---|
| 2310 | + /* Cell not needed neither if no WOL */ |
---|
| 2311 | + if (!gp->asleep_wol) |
---|
| 2312 | + gem_put_cell(gp); |
---|
| 2313 | + } |
---|
2316 | 2314 | return 0; |
---|
2317 | 2315 | } |
---|
2318 | 2316 | |
---|
2319 | | -#ifdef CONFIG_PM |
---|
2320 | | -static int gem_suspend(struct pci_dev *pdev, pm_message_t state) |
---|
| 2317 | +static int __maybe_unused gem_suspend(struct device *dev_d) |
---|
2321 | 2318 | { |
---|
2322 | | - struct net_device *dev = pci_get_drvdata(pdev); |
---|
| 2319 | + struct net_device *dev = dev_get_drvdata(dev_d); |
---|
2323 | 2320 | struct gem *gp = netdev_priv(dev); |
---|
2324 | 2321 | |
---|
2325 | 2322 | /* Lock the network stack first to avoid racing with open/close, |
---|
.. | .. |
---|
2348 | 2345 | gp->asleep_wol = !!gp->wake_on_lan; |
---|
2349 | 2346 | gem_do_stop(dev, gp->asleep_wol); |
---|
2350 | 2347 | |
---|
| 2348 | + /* Cell not needed neither if no WOL */ |
---|
| 2349 | + if (!gp->asleep_wol) |
---|
| 2350 | + gem_put_cell(gp); |
---|
| 2351 | + |
---|
2351 | 2352 | /* Unlock the network stack */ |
---|
2352 | 2353 | rtnl_unlock(); |
---|
2353 | 2354 | |
---|
2354 | 2355 | return 0; |
---|
2355 | 2356 | } |
---|
2356 | 2357 | |
---|
2357 | | -static int gem_resume(struct pci_dev *pdev) |
---|
| 2358 | +static int __maybe_unused gem_resume(struct device *dev_d) |
---|
2358 | 2359 | { |
---|
2359 | | - struct net_device *dev = pci_get_drvdata(pdev); |
---|
| 2360 | + struct net_device *dev = dev_get_drvdata(dev_d); |
---|
2360 | 2361 | struct gem *gp = netdev_priv(dev); |
---|
2361 | 2362 | |
---|
2362 | 2363 | /* See locking comment in gem_suspend */ |
---|
.. | .. |
---|
2370 | 2371 | rtnl_unlock(); |
---|
2371 | 2372 | return 0; |
---|
2372 | 2373 | } |
---|
| 2374 | + |
---|
| 2375 | + /* Enable the cell */ |
---|
| 2376 | + gem_get_cell(gp); |
---|
2373 | 2377 | |
---|
2374 | 2378 | /* Restart chip. If that fails there isn't much we can do, we |
---|
2375 | 2379 | * leave things stopped. |
---|
.. | .. |
---|
2387 | 2391 | |
---|
2388 | 2392 | return 0; |
---|
2389 | 2393 | } |
---|
2390 | | -#endif /* CONFIG_PM */ |
---|
2391 | 2394 | |
---|
2392 | 2395 | static struct net_device_stats *gem_get_stats(struct net_device *dev) |
---|
2393 | 2396 | { |
---|
.. | .. |
---|
2709 | 2712 | switch (cmd) { |
---|
2710 | 2713 | case SIOCGMIIPHY: /* Get address of MII PHY in use. */ |
---|
2711 | 2714 | data->phy_id = gp->mii_phy_addr; |
---|
2712 | | - /* Fallthrough... */ |
---|
| 2715 | + fallthrough; |
---|
2713 | 2716 | |
---|
2714 | 2717 | case SIOCGMIIREG: /* Read MII PHY register. */ |
---|
2715 | 2718 | data->val_out = __sungem_phy_read(gp, data->phy_id & 0x1f, |
---|
.. | .. |
---|
2760 | 2763 | void __iomem *p = pci_map_rom(pdev, &size); |
---|
2761 | 2764 | |
---|
2762 | 2765 | if (p) { |
---|
2763 | | - int found; |
---|
| 2766 | + int found; |
---|
2764 | 2767 | |
---|
2765 | 2768 | found = readb(p) == 0x55 && |
---|
2766 | 2769 | readb(p + 1) == 0xaa && |
---|
.. | .. |
---|
2814 | 2817 | cancel_work_sync(&gp->reset_task); |
---|
2815 | 2818 | |
---|
2816 | 2819 | /* Free resources */ |
---|
2817 | | - pci_free_consistent(pdev, |
---|
2818 | | - sizeof(struct gem_init_block), |
---|
2819 | | - gp->init_block, |
---|
2820 | | - gp->gblock_dvma); |
---|
| 2820 | + dma_free_coherent(&pdev->dev, sizeof(struct gem_init_block), |
---|
| 2821 | + gp->init_block, gp->gblock_dvma); |
---|
2821 | 2822 | iounmap(gp->regs); |
---|
2822 | 2823 | pci_release_regions(pdev); |
---|
2823 | 2824 | free_netdev(dev); |
---|
.. | .. |
---|
2873 | 2874 | */ |
---|
2874 | 2875 | if (pdev->vendor == PCI_VENDOR_ID_SUN && |
---|
2875 | 2876 | pdev->device == PCI_DEVICE_ID_SUN_GEM && |
---|
2876 | | - !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { |
---|
| 2877 | + !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { |
---|
2877 | 2878 | pci_using_dac = 1; |
---|
2878 | 2879 | } else { |
---|
2879 | | - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
---|
| 2880 | + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
---|
2880 | 2881 | if (err) { |
---|
2881 | 2882 | pr_err("No usable DMA configuration, aborting\n"); |
---|
2882 | 2883 | goto err_disable_device; |
---|
.. | .. |
---|
2964 | 2965 | /* It is guaranteed that the returned buffer will be at least |
---|
2965 | 2966 | * PAGE_SIZE aligned. |
---|
2966 | 2967 | */ |
---|
2967 | | - gp->init_block = (struct gem_init_block *) |
---|
2968 | | - pci_alloc_consistent(pdev, sizeof(struct gem_init_block), |
---|
2969 | | - &gp->gblock_dvma); |
---|
| 2968 | + gp->init_block = dma_alloc_coherent(&pdev->dev, sizeof(struct gem_init_block), |
---|
| 2969 | + &gp->gblock_dvma, GFP_KERNEL); |
---|
2970 | 2970 | if (!gp->init_block) { |
---|
2971 | 2971 | pr_err("Cannot allocate init block, aborting\n"); |
---|
2972 | 2972 | err = -ENOMEM; |
---|
.. | .. |
---|
3031 | 3031 | |
---|
3032 | 3032 | } |
---|
3033 | 3033 | |
---|
| 3034 | +static SIMPLE_DEV_PM_OPS(gem_pm_ops, gem_suspend, gem_resume); |
---|
3034 | 3035 | |
---|
3035 | 3036 | static struct pci_driver gem_driver = { |
---|
3036 | 3037 | .name = GEM_MODULE_NAME, |
---|
3037 | 3038 | .id_table = gem_pci_tbl, |
---|
3038 | 3039 | .probe = gem_init_one, |
---|
3039 | 3040 | .remove = gem_remove_one, |
---|
3040 | | -#ifdef CONFIG_PM |
---|
3041 | | - .suspend = gem_suspend, |
---|
3042 | | - .resume = gem_resume, |
---|
3043 | | -#endif /* CONFIG_PM */ |
---|
| 3041 | + .driver.pm = &gem_pm_ops, |
---|
3044 | 3042 | }; |
---|
3045 | 3043 | |
---|
3046 | 3044 | module_pci_driver(gem_driver); |
---|