.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * New driver for Marvell Yukon chipset and SysKonnect Gigabit |
---|
3 | 4 | * Ethernet adapters. Based on earlier sk98lin, e100 and |
---|
.. | .. |
---|
8 | 9 | * those should be done at higher levels. |
---|
9 | 10 | * |
---|
10 | 11 | * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org> |
---|
11 | | - * |
---|
12 | | - * This program is free software; you can redistribute it and/or modify |
---|
13 | | - * it under the terms of the GNU General Public License as published by |
---|
14 | | - * the Free Software Foundation; either version 2 of the License. |
---|
15 | | - * |
---|
16 | | - * This program is distributed in the hope that it will be useful, |
---|
17 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
18 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
19 | | - * GNU General Public License for more details. |
---|
20 | | - * |
---|
21 | | - * You should have received a copy of the GNU General Public License |
---|
22 | | - * along with this program; if not, write to the Free Software |
---|
23 | | - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
---|
24 | 12 | */ |
---|
25 | 13 | |
---|
26 | 14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
---|
.. | .. |
---|
888 | 876 | } |
---|
889 | 877 | |
---|
890 | 878 | static const struct ethtool_ops skge_ethtool_ops = { |
---|
| 879 | + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, |
---|
891 | 880 | .get_drvinfo = skge_get_drvinfo, |
---|
892 | 881 | .get_regs_len = skge_get_regs_len, |
---|
893 | 882 | .get_regs = skge_get_regs, |
---|
.. | .. |
---|
950 | 939 | struct skge_rx_desc *rd = e->desc; |
---|
951 | 940 | dma_addr_t map; |
---|
952 | 941 | |
---|
953 | | - map = pci_map_single(skge->hw->pdev, skb->data, bufsize, |
---|
954 | | - PCI_DMA_FROMDEVICE); |
---|
| 942 | + map = dma_map_single(&skge->hw->pdev->dev, skb->data, bufsize, |
---|
| 943 | + DMA_FROM_DEVICE); |
---|
955 | 944 | |
---|
956 | | - if (pci_dma_mapping_error(skge->hw->pdev, map)) |
---|
| 945 | + if (dma_mapping_error(&skge->hw->pdev->dev, map)) |
---|
957 | 946 | return -1; |
---|
958 | 947 | |
---|
959 | 948 | rd->dma_lo = lower_32_bits(map); |
---|
.. | .. |
---|
1001 | 990 | struct skge_rx_desc *rd = e->desc; |
---|
1002 | 991 | rd->control = 0; |
---|
1003 | 992 | if (e->skb) { |
---|
1004 | | - pci_unmap_single(hw->pdev, |
---|
| 993 | + dma_unmap_single(&hw->pdev->dev, |
---|
1005 | 994 | dma_unmap_addr(e, mapaddr), |
---|
1006 | 995 | dma_unmap_len(e, maplen), |
---|
1007 | | - PCI_DMA_FROMDEVICE); |
---|
| 996 | + DMA_FROM_DEVICE); |
---|
1008 | 997 | dev_kfree_skb(e->skb); |
---|
1009 | 998 | e->skb = NULL; |
---|
1010 | 999 | } |
---|
.. | .. |
---|
2459 | 2448 | case SIOCGMIIPHY: |
---|
2460 | 2449 | data->phy_id = hw->phy_addr; |
---|
2461 | 2450 | |
---|
2462 | | - /* fallthru */ |
---|
| 2451 | + fallthrough; |
---|
2463 | 2452 | case SIOCGMIIREG: { |
---|
2464 | 2453 | u16 val = 0; |
---|
2465 | 2454 | spin_lock_bh(&hw->phy_lock); |
---|
.. | .. |
---|
2558 | 2547 | rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc); |
---|
2559 | 2548 | tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc); |
---|
2560 | 2549 | skge->mem_size = tx_size + rx_size; |
---|
2561 | | - skge->mem = pci_alloc_consistent(hw->pdev, skge->mem_size, &skge->dma); |
---|
| 2550 | + skge->mem = dma_alloc_coherent(&hw->pdev->dev, skge->mem_size, |
---|
| 2551 | + &skge->dma, GFP_KERNEL); |
---|
2562 | 2552 | if (!skge->mem) |
---|
2563 | 2553 | return -ENOMEM; |
---|
2564 | 2554 | |
---|
2565 | 2555 | BUG_ON(skge->dma & 7); |
---|
2566 | 2556 | |
---|
2567 | 2557 | if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) { |
---|
2568 | | - dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); |
---|
| 2558 | + dev_err(&hw->pdev->dev, "dma_alloc_coherent region crosses 4G boundary\n"); |
---|
2569 | 2559 | err = -EINVAL; |
---|
2570 | 2560 | goto free_pci_mem; |
---|
2571 | 2561 | } |
---|
2572 | | - |
---|
2573 | | - memset(skge->mem, 0, skge->mem_size); |
---|
2574 | 2562 | |
---|
2575 | 2563 | err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma); |
---|
2576 | 2564 | if (err) |
---|
.. | .. |
---|
2638 | 2626 | skge_rx_clean(skge); |
---|
2639 | 2627 | kfree(skge->rx_ring.start); |
---|
2640 | 2628 | free_pci_mem: |
---|
2641 | | - pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma); |
---|
| 2629 | + dma_free_coherent(&hw->pdev->dev, skge->mem_size, skge->mem, |
---|
| 2630 | + skge->dma); |
---|
2642 | 2631 | skge->mem = NULL; |
---|
2643 | 2632 | |
---|
2644 | 2633 | return err; |
---|
.. | .. |
---|
2728 | 2717 | |
---|
2729 | 2718 | kfree(skge->rx_ring.start); |
---|
2730 | 2719 | kfree(skge->tx_ring.start); |
---|
2731 | | - pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma); |
---|
| 2720 | + dma_free_coherent(&hw->pdev->dev, skge->mem_size, skge->mem, |
---|
| 2721 | + skge->dma); |
---|
2732 | 2722 | skge->mem = NULL; |
---|
2733 | 2723 | return 0; |
---|
2734 | 2724 | } |
---|
.. | .. |
---|
2762 | 2752 | BUG_ON(td->control & BMU_OWN); |
---|
2763 | 2753 | e->skb = skb; |
---|
2764 | 2754 | len = skb_headlen(skb); |
---|
2765 | | - map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); |
---|
2766 | | - if (pci_dma_mapping_error(hw->pdev, map)) |
---|
| 2755 | + map = dma_map_single(&hw->pdev->dev, skb->data, len, DMA_TO_DEVICE); |
---|
| 2756 | + if (dma_mapping_error(&hw->pdev->dev, map)) |
---|
2767 | 2757 | goto mapping_error; |
---|
2768 | 2758 | |
---|
2769 | 2759 | dma_unmap_addr_set(e, mapaddr, map); |
---|
.. | .. |
---|
2843 | 2833 | |
---|
2844 | 2834 | mapping_unwind: |
---|
2845 | 2835 | e = skge->tx_ring.to_use; |
---|
2846 | | - pci_unmap_single(hw->pdev, |
---|
2847 | | - dma_unmap_addr(e, mapaddr), |
---|
2848 | | - dma_unmap_len(e, maplen), |
---|
2849 | | - PCI_DMA_TODEVICE); |
---|
| 2836 | + dma_unmap_single(&hw->pdev->dev, dma_unmap_addr(e, mapaddr), |
---|
| 2837 | + dma_unmap_len(e, maplen), DMA_TO_DEVICE); |
---|
2850 | 2838 | while (i-- > 0) { |
---|
2851 | 2839 | e = e->next; |
---|
2852 | | - pci_unmap_page(hw->pdev, |
---|
2853 | | - dma_unmap_addr(e, mapaddr), |
---|
2854 | | - dma_unmap_len(e, maplen), |
---|
2855 | | - PCI_DMA_TODEVICE); |
---|
| 2840 | + dma_unmap_page(&hw->pdev->dev, dma_unmap_addr(e, mapaddr), |
---|
| 2841 | + dma_unmap_len(e, maplen), DMA_TO_DEVICE); |
---|
2856 | 2842 | } |
---|
2857 | 2843 | |
---|
2858 | 2844 | mapping_error: |
---|
.. | .. |
---|
2869 | 2855 | { |
---|
2870 | 2856 | /* skb header vs. fragment */ |
---|
2871 | 2857 | if (control & BMU_STF) |
---|
2872 | | - pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr), |
---|
2873 | | - dma_unmap_len(e, maplen), |
---|
2874 | | - PCI_DMA_TODEVICE); |
---|
| 2858 | + dma_unmap_single(&pdev->dev, dma_unmap_addr(e, mapaddr), |
---|
| 2859 | + dma_unmap_len(e, maplen), DMA_TO_DEVICE); |
---|
2875 | 2860 | else |
---|
2876 | | - pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr), |
---|
2877 | | - dma_unmap_len(e, maplen), |
---|
2878 | | - PCI_DMA_TODEVICE); |
---|
| 2861 | + dma_unmap_page(&pdev->dev, dma_unmap_addr(e, mapaddr), |
---|
| 2862 | + dma_unmap_len(e, maplen), DMA_TO_DEVICE); |
---|
2879 | 2863 | } |
---|
2880 | 2864 | |
---|
2881 | 2865 | /* Free all buffers in transmit ring */ |
---|
.. | .. |
---|
2898 | 2882 | skge->tx_ring.to_clean = e; |
---|
2899 | 2883 | } |
---|
2900 | 2884 | |
---|
2901 | | -static void skge_tx_timeout(struct net_device *dev) |
---|
| 2885 | +static void skge_tx_timeout(struct net_device *dev, unsigned int txqueue) |
---|
2902 | 2886 | { |
---|
2903 | 2887 | struct skge_port *skge = netdev_priv(dev); |
---|
2904 | 2888 | |
---|
.. | .. |
---|
3085 | 3069 | if (!skb) |
---|
3086 | 3070 | goto resubmit; |
---|
3087 | 3071 | |
---|
3088 | | - pci_dma_sync_single_for_cpu(skge->hw->pdev, |
---|
3089 | | - dma_unmap_addr(e, mapaddr), |
---|
3090 | | - dma_unmap_len(e, maplen), |
---|
3091 | | - PCI_DMA_FROMDEVICE); |
---|
| 3072 | + dma_sync_single_for_cpu(&skge->hw->pdev->dev, |
---|
| 3073 | + dma_unmap_addr(e, mapaddr), |
---|
| 3074 | + dma_unmap_len(e, maplen), |
---|
| 3075 | + DMA_FROM_DEVICE); |
---|
3092 | 3076 | skb_copy_from_linear_data(e->skb, skb->data, len); |
---|
3093 | | - pci_dma_sync_single_for_device(skge->hw->pdev, |
---|
3094 | | - dma_unmap_addr(e, mapaddr), |
---|
3095 | | - dma_unmap_len(e, maplen), |
---|
3096 | | - PCI_DMA_FROMDEVICE); |
---|
| 3077 | + dma_sync_single_for_device(&skge->hw->pdev->dev, |
---|
| 3078 | + dma_unmap_addr(e, mapaddr), |
---|
| 3079 | + dma_unmap_len(e, maplen), |
---|
| 3080 | + DMA_FROM_DEVICE); |
---|
3097 | 3081 | skge_rx_reuse(e, skge->rx_buf_size); |
---|
3098 | 3082 | } else { |
---|
3099 | 3083 | struct skge_element ee; |
---|
.. | .. |
---|
3113 | 3097 | goto resubmit; |
---|
3114 | 3098 | } |
---|
3115 | 3099 | |
---|
3116 | | - pci_unmap_single(skge->hw->pdev, |
---|
| 3100 | + dma_unmap_single(&skge->hw->pdev->dev, |
---|
3117 | 3101 | dma_unmap_addr(&ee, mapaddr), |
---|
3118 | | - dma_unmap_len(&ee, maplen), |
---|
3119 | | - PCI_DMA_FROMDEVICE); |
---|
| 3102 | + dma_unmap_len(&ee, maplen), DMA_FROM_DEVICE); |
---|
3120 | 3103 | } |
---|
3121 | 3104 | |
---|
3122 | 3105 | skb_put(skb, len); |
---|
.. | .. |
---|
3355 | 3338 | * because accessing phy registers requires spin wait which might |
---|
3356 | 3339 | * cause excess interrupt latency. |
---|
3357 | 3340 | */ |
---|
3358 | | -static void skge_extirq(unsigned long arg) |
---|
| 3341 | +static void skge_extirq(struct tasklet_struct *t) |
---|
3359 | 3342 | { |
---|
3360 | | - struct skge_hw *hw = (struct skge_hw *) arg; |
---|
| 3343 | + struct skge_hw *hw = from_tasklet(hw, t, phy_task); |
---|
3361 | 3344 | int port; |
---|
3362 | 3345 | |
---|
3363 | 3346 | for (port = 0; port < hw->ports; port++) { |
---|
.. | .. |
---|
3734 | 3717 | |
---|
3735 | 3718 | return 0; |
---|
3736 | 3719 | } |
---|
3737 | | - |
---|
3738 | | -static int skge_debug_open(struct inode *inode, struct file *file) |
---|
3739 | | -{ |
---|
3740 | | - return single_open(file, skge_debug_show, inode->i_private); |
---|
3741 | | -} |
---|
3742 | | - |
---|
3743 | | -static const struct file_operations skge_debug_fops = { |
---|
3744 | | - .owner = THIS_MODULE, |
---|
3745 | | - .open = skge_debug_open, |
---|
3746 | | - .read = seq_read, |
---|
3747 | | - .llseek = seq_lseek, |
---|
3748 | | - .release = single_release, |
---|
3749 | | -}; |
---|
| 3720 | +DEFINE_SHOW_ATTRIBUTE(skge_debug); |
---|
3750 | 3721 | |
---|
3751 | 3722 | /* |
---|
3752 | 3723 | * Use network device events to create/remove/rename |
---|
.. | .. |
---|
3757 | 3728 | { |
---|
3758 | 3729 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
---|
3759 | 3730 | struct skge_port *skge; |
---|
3760 | | - struct dentry *d; |
---|
3761 | 3731 | |
---|
3762 | 3732 | if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug) |
---|
3763 | 3733 | goto done; |
---|
.. | .. |
---|
3765 | 3735 | skge = netdev_priv(dev); |
---|
3766 | 3736 | switch (event) { |
---|
3767 | 3737 | case NETDEV_CHANGENAME: |
---|
3768 | | - if (skge->debugfs) { |
---|
3769 | | - d = debugfs_rename(skge_debug, skge->debugfs, |
---|
3770 | | - skge_debug, dev->name); |
---|
3771 | | - if (d) |
---|
3772 | | - skge->debugfs = d; |
---|
3773 | | - else { |
---|
3774 | | - netdev_info(dev, "rename failed\n"); |
---|
3775 | | - debugfs_remove(skge->debugfs); |
---|
3776 | | - } |
---|
3777 | | - } |
---|
| 3738 | + if (skge->debugfs) |
---|
| 3739 | + skge->debugfs = debugfs_rename(skge_debug, |
---|
| 3740 | + skge->debugfs, |
---|
| 3741 | + skge_debug, dev->name); |
---|
3778 | 3742 | break; |
---|
3779 | 3743 | |
---|
3780 | 3744 | case NETDEV_GOING_DOWN: |
---|
3781 | | - if (skge->debugfs) { |
---|
3782 | | - debugfs_remove(skge->debugfs); |
---|
3783 | | - skge->debugfs = NULL; |
---|
3784 | | - } |
---|
| 3745 | + debugfs_remove(skge->debugfs); |
---|
| 3746 | + skge->debugfs = NULL; |
---|
3785 | 3747 | break; |
---|
3786 | 3748 | |
---|
3787 | 3749 | case NETDEV_UP: |
---|
3788 | | - d = debugfs_create_file(dev->name, 0444, |
---|
3789 | | - skge_debug, dev, |
---|
3790 | | - &skge_debug_fops); |
---|
3791 | | - if (!d || IS_ERR(d)) |
---|
3792 | | - netdev_info(dev, "debugfs create failed\n"); |
---|
3793 | | - else |
---|
3794 | | - skge->debugfs = d; |
---|
| 3750 | + skge->debugfs = debugfs_create_file(dev->name, 0444, skge_debug, |
---|
| 3751 | + dev, &skge_debug_fops); |
---|
3795 | 3752 | break; |
---|
3796 | 3753 | } |
---|
3797 | 3754 | |
---|
.. | .. |
---|
3806 | 3763 | |
---|
3807 | 3764 | static __init void skge_debug_init(void) |
---|
3808 | 3765 | { |
---|
3809 | | - struct dentry *ent; |
---|
| 3766 | + skge_debug = debugfs_create_dir("skge", NULL); |
---|
3810 | 3767 | |
---|
3811 | | - ent = debugfs_create_dir("skge", NULL); |
---|
3812 | | - if (!ent || IS_ERR(ent)) { |
---|
3813 | | - pr_info("debugfs create directory failed\n"); |
---|
3814 | | - return; |
---|
3815 | | - } |
---|
3816 | | - |
---|
3817 | | - skge_debug = ent; |
---|
3818 | 3768 | register_netdevice_notifier(&skge_notifier); |
---|
3819 | 3769 | } |
---|
3820 | 3770 | |
---|
.. | .. |
---|
3941 | 3891 | |
---|
3942 | 3892 | pci_set_master(pdev); |
---|
3943 | 3893 | |
---|
3944 | | - if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { |
---|
| 3894 | + if (!only_32bit_dma && !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { |
---|
3945 | 3895 | using_dac = 1; |
---|
3946 | | - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
---|
3947 | | - } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { |
---|
| 3896 | + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); |
---|
| 3897 | + } else if (!(err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) { |
---|
3948 | 3898 | using_dac = 0; |
---|
3949 | | - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
---|
| 3899 | + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
---|
3950 | 3900 | } |
---|
3951 | 3901 | |
---|
3952 | 3902 | if (err) { |
---|
.. | .. |
---|
3977 | 3927 | hw->pdev = pdev; |
---|
3978 | 3928 | spin_lock_init(&hw->hw_lock); |
---|
3979 | 3929 | spin_lock_init(&hw->phy_lock); |
---|
3980 | | - tasklet_init(&hw->phy_task, skge_extirq, (unsigned long) hw); |
---|
| 3930 | + tasklet_setup(&hw->phy_task, skge_extirq); |
---|
3981 | 3931 | |
---|
3982 | | - hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); |
---|
| 3932 | + hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000); |
---|
3983 | 3933 | if (!hw->regs) { |
---|
3984 | 3934 | dev_err(&pdev->dev, "cannot map device registers\n"); |
---|
3985 | 3935 | goto err_out_free_hw; |
---|
.. | .. |
---|
4104 | 4054 | #ifdef CONFIG_PM_SLEEP |
---|
4105 | 4055 | static int skge_suspend(struct device *dev) |
---|
4106 | 4056 | { |
---|
4107 | | - struct pci_dev *pdev = to_pci_dev(dev); |
---|
4108 | | - struct skge_hw *hw = pci_get_drvdata(pdev); |
---|
| 4057 | + struct skge_hw *hw = dev_get_drvdata(dev); |
---|
4109 | 4058 | int i; |
---|
4110 | 4059 | |
---|
4111 | 4060 | if (!hw) |
---|
.. | .. |
---|
4129 | 4078 | |
---|
4130 | 4079 | static int skge_resume(struct device *dev) |
---|
4131 | 4080 | { |
---|
4132 | | - struct pci_dev *pdev = to_pci_dev(dev); |
---|
4133 | | - struct skge_hw *hw = pci_get_drvdata(pdev); |
---|
| 4081 | + struct skge_hw *hw = dev_get_drvdata(dev); |
---|
4134 | 4082 | int i, err; |
---|
4135 | 4083 | |
---|
4136 | 4084 | if (!hw) |
---|