From 1543e317f1da31b75942316931e8f491a8920811 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Thu, 04 Jan 2024 10:08:02 +0000
Subject: [PATCH] disable FB
---
kernel/drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 1273 ++++++++++++++++++++++++++++++++++++++++-----------------
1 files changed, 883 insertions(+), 390 deletions(-)
diff --git a/kernel/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/kernel/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 5190402..9d36228 100644
--- a/kernel/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/kernel/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Xilinx Axi Ethernet device driver
*
@@ -6,6 +7,7 @@
* Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
* Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
* Copyright (c) 2010 - 2011 PetaLogix
+ * Copyright (c) 2019 SED Systems, a division of Calian Ltd.
* Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
*
* This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
@@ -20,6 +22,7 @@
* - Add support for extended VLAN support.
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/module.h>
@@ -37,16 +40,19 @@
#include "xilinx_axienet.h"
-/* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */
-#define TX_BD_NUM 64
-#define RX_BD_NUM 128
+/* Descriptors defines for Tx and Rx DMA */
+#define TX_BD_NUM_DEFAULT 128
+#define RX_BD_NUM_DEFAULT 1024
+#define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
+#define TX_BD_NUM_MAX 4096
+#define RX_BD_NUM_MAX 4096
/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
#define DRIVER_NAME "xaxienet"
#define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
#define DRIVER_VERSION "1.00a"
-#define AXIENET_REGS_N 32
+#define AXIENET_REGS_N 40
/* Match table for of_platform binding */
static const struct of_device_id axienet_of_match[] = {
@@ -124,7 +130,7 @@
*/
static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
{
- return in_be32(lp->dma_regs + reg);
+ return ioread32(lp->dma_regs + reg);
}
/**
@@ -139,7 +145,35 @@
static inline void axienet_dma_out32(struct axienet_local *lp,
off_t reg, u32 value)
{
- out_be32((lp->dma_regs + reg), value);
+ iowrite32(value, lp->dma_regs + reg);
+}
+
+static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
+ dma_addr_t addr)
+{
+ axienet_dma_out32(lp, reg, lower_32_bits(addr));
+
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ axienet_dma_out32(lp, reg + 4, upper_32_bits(addr));
+}
+
+static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
+ struct axidma_bd *desc)
+{
+ desc->phys = lower_32_bits(addr);
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ desc->phys_msb = upper_32_bits(addr);
+}
+
+static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
+ struct axidma_bd *desc)
+{
+ dma_addr_t ret = desc->phys;
+
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
+
+ return ret;
}
/**
@@ -155,25 +189,41 @@
int i;
struct axienet_local *lp = netdev_priv(ndev);
- for (i = 0; i < RX_BD_NUM; i++) {
- dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
- lp->max_frm_size, DMA_FROM_DEVICE);
- dev_kfree_skb((struct sk_buff *)
- (lp->rx_bd_v[i].sw_id_offset));
+ /* If we end up here, tx_bd_v must have been DMA allocated. */
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
+ lp->tx_bd_v,
+ lp->tx_bd_p);
+
+ if (!lp->rx_bd_v)
+ return;
+
+ for (i = 0; i < lp->rx_bd_num; i++) {
+ dma_addr_t phys;
+
+ /* A NULL skb means this descriptor has not been initialised
+ * at all.
+ */
+ if (!lp->rx_bd_v[i].skb)
+ break;
+
+ dev_kfree_skb(lp->rx_bd_v[i].skb);
+
+ /* For each descriptor, we programmed cntrl with the (non-zero)
+ * descriptor size, after it had been successfully allocated.
+ * So a non-zero value in there means we need to unmap it.
+ */
+ if (lp->rx_bd_v[i].cntrl) {
+ phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
+ dma_unmap_single(ndev->dev.parent, phys,
+ lp->max_frm_size, DMA_FROM_DEVICE);
+ }
}
- if (lp->rx_bd_v) {
- dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- lp->rx_bd_v,
- lp->rx_bd_p);
- }
- if (lp->tx_bd_v) {
- dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- lp->tx_bd_v,
- lp->tx_bd_p);
- }
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
+ lp->rx_bd_v,
+ lp->rx_bd_p);
}
/**
@@ -199,38 +249,50 @@
lp->rx_bd_ci = 0;
/* Allocate the Tx and Rx buffer descriptors. */
- lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- &lp->tx_bd_p, GFP_KERNEL);
+ lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
+ &lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
- goto out;
+ return -ENOMEM;
- lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- &lp->rx_bd_p, GFP_KERNEL);
+ lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
+ &lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
goto out;
- for (i = 0; i < TX_BD_NUM; i++) {
- lp->tx_bd_v[i].next = lp->tx_bd_p +
- sizeof(*lp->tx_bd_v) *
- ((i + 1) % TX_BD_NUM);
+ for (i = 0; i < lp->tx_bd_num; i++) {
+ dma_addr_t addr = lp->tx_bd_p +
+ sizeof(*lp->tx_bd_v) *
+ ((i + 1) % lp->tx_bd_num);
+
+ lp->tx_bd_v[i].next = lower_32_bits(addr);
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
}
- for (i = 0; i < RX_BD_NUM; i++) {
- lp->rx_bd_v[i].next = lp->rx_bd_p +
- sizeof(*lp->rx_bd_v) *
- ((i + 1) % RX_BD_NUM);
+ for (i = 0; i < lp->rx_bd_num; i++) {
+ dma_addr_t addr;
+
+ addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
+ ((i + 1) % lp->rx_bd_num);
+ lp->rx_bd_v[i].next = lower_32_bits(addr);
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
if (!skb)
goto out;
- lp->rx_bd_v[i].sw_id_offset = (u32) skb;
- lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
- skb->data,
- lp->max_frm_size,
- DMA_FROM_DEVICE);
+ lp->rx_bd_v[i].skb = skb;
+ addr = dma_map_single(ndev->dev.parent, skb->data,
+ lp->max_frm_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, addr)) {
+ netdev_err(ndev, "DMA mapping error\n");
+ goto out;
+ }
+ desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
+
lp->rx_bd_v[i].cntrl = lp->max_frm_size;
}
@@ -263,31 +325,21 @@
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
* halted state. This will make the Rx side ready for reception.
*/
- axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
+ axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
+ axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
+ (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
* tail pointer register that the Tx channel will start transmitting.
*/
- axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
+ axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
-
- /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
- ret = read_poll_timeout(axienet_ior, value,
- value & XAE_INT_PHYRSTCMPLT_MASK,
- DELAY_OF_ONE_MILLISEC, 50000, false, lp,
- XAE_IS_OFFSET);
- if (ret) {
- dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
- return ret;
- }
return 0;
out:
@@ -443,24 +495,39 @@
lp->options |= options;
}
-static void __axienet_device_reset(struct axienet_local *lp, off_t offset)
+static int __axienet_device_reset(struct axienet_local *lp)
{
- u32 timeout;
+ u32 value;
+ int ret;
+
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
* process of Axi DMA takes a while to complete as all pending
* commands/transfers will be flushed or completed during this
* reset process.
+ * Note that even though both TX and RX have their own reset register,
+ * they both reset the entire DMA core, so only one needs to be used.
*/
- axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK);
- timeout = DELAY_OF_ONE_MILLISEC;
- while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) {
- udelay(1);
- if (--timeout == 0) {
- netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
- __func__);
- break;
- }
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
+ ret = read_poll_timeout(axienet_dma_in32, value,
+ !(value & XAXIDMA_CR_RESET_MASK),
+ DELAY_OF_ONE_MILLISEC, 50000, false, lp,
+ XAXIDMA_TX_CR_OFFSET);
+ if (ret) {
+ dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
+ return ret;
}
+
+ /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
+ ret = read_poll_timeout(axienet_ior, value,
+ value & XAE_INT_PHYRSTCMPLT_MASK,
+ DELAY_OF_ONE_MILLISEC, 50000, false, lp,
+ XAE_IS_OFFSET);
+ if (ret) {
+ dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
+ return ret;
+ }
+
+ return 0;
}
/**
@@ -473,14 +540,17 @@
* areconnected to Axi Ethernet reset lines, this in turn resets the Axi
* Ethernet core. No separate hardware reset is done for the Axi Ethernet
* core.
+ * Returns 0 on success or a negative error number otherwise.
*/
-static void axienet_device_reset(struct net_device *ndev)
+static int axienet_device_reset(struct net_device *ndev)
{
u32 axienet_status;
struct axienet_local *lp = netdev_priv(ndev);
+ int ret;
- __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
- __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
+ ret = __axienet_device_reset(lp);
+ if (ret)
+ return ret;
lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
lp->options |= XAE_OPTION_VLAN;
@@ -495,9 +565,11 @@
lp->options |= XAE_OPTION_JUMBO;
}
- if (axienet_dma_bd_init(ndev)) {
+ ret = axienet_dma_bd_init(ndev);
+ if (ret) {
netdev_err(ndev, "%s: descriptor allocation failed\n",
__func__);
+ return ret;
}
axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
@@ -507,6 +579,8 @@
axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
if (axienet_status & XAE_INT_RXRJECT_MASK)
axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
+ axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
+ XAE_INT_RECV_ERROR_MASK : 0);
axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
@@ -520,115 +594,70 @@
axienet_setoptions(ndev, lp->options);
netif_trans_update(ndev);
+
+ return 0;
}
/**
- * axienet_adjust_link - Adjust the PHY link speed/duplex.
+ * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
* @ndev: Pointer to the net_device structure
+ * @first_bd: Index of first descriptor to clean up
+ * @nr_bds: Number of descriptors to clean up, can be -1 if unknown.
+ * @sizep: Pointer to a u32 filled with the total sum of all bytes
+ * in all cleaned-up descriptors. Ignored if NULL.
*
- * This function is called to change the speed and duplex setting after
- * auto negotiation is done by the PHY. This is the function that gets
- * registered with the PHY interface through the "of_phy_connect" call.
+ * Would either be called after a successful transmit operation, or after
+ * there was an error when setting up the chain.
+ * Returns the number of descriptors handled.
*/
-static void axienet_adjust_link(struct net_device *ndev)
+static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
+ int nr_bds, u32 *sizep)
{
- u32 emmc_reg;
- u32 link_state;
- u32 setspeed = 1;
- struct axienet_local *lp = netdev_priv(ndev);
- struct phy_device *phy = ndev->phydev;
-
- link_state = phy->speed | (phy->duplex << 1) | phy->link;
- if (lp->last_link != link_state) {
- if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) {
- if (lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX)
- setspeed = 0;
- } else {
- if ((phy->speed == SPEED_1000) &&
- (lp->phy_mode == PHY_INTERFACE_MODE_MII))
- setspeed = 0;
- }
-
- if (setspeed == 1) {
- emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
- emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
-
- switch (phy->speed) {
- case SPEED_1000:
- emmc_reg |= XAE_EMMC_LINKSPD_1000;
- break;
- case SPEED_100:
- emmc_reg |= XAE_EMMC_LINKSPD_100;
- break;
- case SPEED_10:
- emmc_reg |= XAE_EMMC_LINKSPD_10;
- break;
- default:
- dev_err(&ndev->dev, "Speed other than 10, 100 "
- "or 1Gbps is not supported\n");
- break;
- }
-
- axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
- lp->last_link = link_state;
- phy_print_status(phy);
- } else {
- netdev_err(ndev,
- "Error setting Axi Ethernet mac speed\n");
- }
- }
-}
-
-/**
- * axienet_start_xmit_done - Invoked once a transmit is completed by the
- * Axi DMA Tx channel.
- * @ndev: Pointer to the net_device structure
- *
- * This function is invoked from the Axi DMA Tx isr to notify the completion
- * of transmit operation. It clears fields in the corresponding Tx BDs and
- * unmaps the corresponding buffer so that CPU can regain ownership of the
- * buffer. It finally invokes "netif_wake_queue" to restart transmission if
- * required.
- */
-static void axienet_start_xmit_done(struct net_device *ndev)
-{
- u32 size = 0;
- u32 packets = 0;
struct axienet_local *lp = netdev_priv(ndev);
struct axidma_bd *cur_p;
- unsigned int status = 0;
+ int max_bds = nr_bds;
+ unsigned int status;
+ dma_addr_t phys;
+ int i;
- cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
- status = cur_p->status;
- while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
- (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
- DMA_TO_DEVICE);
- if (cur_p->app4)
- dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
- /*cur_p->phys = 0;*/
+ if (max_bds == -1)
+ max_bds = lp->tx_bd_num;
+
+ for (i = 0; i < max_bds; i++) {
+ cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
+ status = cur_p->status;
+
+ /* If no number is given, clean up *all* descriptors that have
+ * been completed by the MAC.
+ */
+ if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
+ break;
+
+ /* Ensure we see complete descriptor update */
+ dma_rmb();
+ phys = desc_get_phys_addr(lp, cur_p);
+ dma_unmap_single(ndev->dev.parent, phys,
+ (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
+ DMA_TO_DEVICE);
+
+ if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
+ dev_consume_skb_irq(cur_p->skb);
+
cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app2 = 0;
cur_p->app4 = 0;
+ cur_p->skb = NULL;
+ /* ensure our transmit path and device don't prematurely see status cleared */
+ wmb();
+ cur_p->cntrl = 0;
cur_p->status = 0;
- size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
- packets++;
-
- ++lp->tx_bd_ci;
- lp->tx_bd_ci %= TX_BD_NUM;
- cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
- status = cur_p->status;
+ if (sizep)
+ *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
}
- ndev->stats.tx_packets += packets;
- ndev->stats.tx_bytes += size;
-
- /* Matches barrier in axienet_start_xmit */
- smp_mb();
-
- netif_wake_queue(ndev);
+ return i;
}
/**
@@ -648,10 +677,46 @@
int num_frag)
{
struct axidma_bd *cur_p;
- cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM];
- if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
+
+ /* Ensure we see all descriptor updates from device or TX IRQ path */
+ rmb();
+ cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
+ if (cur_p->cntrl)
return NETDEV_TX_BUSY;
return 0;
+}
+
+/**
+ * axienet_start_xmit_done - Invoked once a transmit is completed by the
+ * Axi DMA Tx channel.
+ * @ndev: Pointer to the net_device structure
+ *
+ * This function is invoked from the Axi DMA Tx isr to notify the completion
+ * of transmit operation. It clears fields in the corresponding Tx BDs and
+ * unmaps the corresponding buffer so that CPU can regain ownership of the
+ * buffer. It finally invokes "netif_wake_queue" to restart transmission if
+ * required.
+ */
+static void axienet_start_xmit_done(struct net_device *ndev)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ u32 packets = 0;
+ u32 size = 0;
+
+ packets = axienet_free_tx_chain(ndev, lp->tx_bd_ci, -1, &size);
+
+ lp->tx_bd_ci += packets;
+ if (lp->tx_bd_ci >= lp->tx_bd_num)
+ lp->tx_bd_ci -= lp->tx_bd_num;
+
+ ndev->stats.tx_packets += packets;
+ ndev->stats.tx_bytes += size;
+
+ /* Matches barrier in axienet_start_xmit */
+ smp_mb();
+
+ if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
+ netif_wake_queue(ndev);
}
/**
@@ -675,27 +740,23 @@
u32 csum_start_off;
u32 csum_index_off;
skb_frag_t *frag;
- dma_addr_t tail_p;
+ dma_addr_t tail_p, phys;
struct axienet_local *lp = netdev_priv(ndev);
struct axidma_bd *cur_p;
+ u32 orig_tail_ptr = lp->tx_bd_tail;
num_frag = skb_shinfo(skb)->nr_frags;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
- if (netif_queue_stopped(ndev))
- return NETDEV_TX_BUSY;
-
+ /* Should not happen as last start_xmit call should have
+ * checked for sufficient space and queue should only be
+ * woken when sufficient space is available.
+ */
netif_stop_queue(ndev);
-
- /* Matches barrier in axienet_start_xmit_done */
- smp_mb();
-
- /* Space might have just been freed - check again */
- if (axienet_check_tx_bd_space(lp, num_frag + 1))
- return NETDEV_TX_BUSY;
-
- netif_wake_queue(ndev);
+ if (net_ratelimit())
+ netdev_warn(ndev, "TX ring unexpectedly full\n");
+ return NETDEV_TX_BUSY;
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -713,30 +774,60 @@
cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
}
+ phys = dma_map_single(ndev->dev.parent, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+ if (net_ratelimit())
+ netdev_err(ndev, "TX DMA mapping error\n");
+ ndev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ desc_set_phys_addr(lp, phys, cur_p);
cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
- cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
for (ii = 0; ii < num_frag; ii++) {
- ++lp->tx_bd_tail;
- lp->tx_bd_tail %= TX_BD_NUM;
+ if (++lp->tx_bd_tail >= lp->tx_bd_num)
+ lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
frag = &skb_shinfo(skb)->frags[ii];
- cur_p->phys = dma_map_single(ndev->dev.parent,
- skb_frag_address(frag),
- skb_frag_size(frag),
- DMA_TO_DEVICE);
+ phys = dma_map_single(ndev->dev.parent,
+ skb_frag_address(frag),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+ if (net_ratelimit())
+ netdev_err(ndev, "TX DMA mapping error\n");
+ ndev->stats.tx_dropped++;
+ axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1,
+ NULL);
+ lp->tx_bd_tail = orig_tail_ptr;
+
+ return NETDEV_TX_OK;
+ }
+ desc_set_phys_addr(lp, phys, cur_p);
cur_p->cntrl = skb_frag_size(frag);
}
cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
- cur_p->app4 = (unsigned long)skb;
+ cur_p->skb = skb;
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
/* Start the transfer */
- axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
- ++lp->tx_bd_tail;
- lp->tx_bd_tail %= TX_BD_NUM;
+ axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
+ if (++lp->tx_bd_tail >= lp->tx_bd_num)
+ lp->tx_bd_tail = 0;
+
+ /* Stop queue if next transmit may not have space */
+ if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
+ netif_stop_queue(ndev);
+
+ /* Matches barrier in axienet_start_xmit_done */
+ smp_mb();
+
+ /* Space might have just been freed - check again */
+ if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
+ netif_wake_queue(ndev);
+ }
return NETDEV_TX_OK;
}
@@ -764,52 +855,78 @@
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
- tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
- skb = (struct sk_buff *) (cur_p->sw_id_offset);
- length = cur_p->app4 & 0x0000FFFF;
+ dma_addr_t phys;
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
- lp->max_frm_size,
- DMA_FROM_DEVICE);
+ /* Ensure we see complete descriptor update */
+ dma_rmb();
- skb_put(skb, length);
- skb->protocol = eth_type_trans(skb, ndev);
- /*skb_checksum_none_assert(skb);*/
- skb->ip_summed = CHECKSUM_NONE;
+ skb = cur_p->skb;
+ cur_p->skb = NULL;
- /* if we're doing Rx csum offload, set it up */
- if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
- csumstatus = (cur_p->app2 &
- XAE_FULL_CSUM_STATUS_MASK) >> 3;
- if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) ||
- (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* skb could be NULL if a previous pass already received the
+ * packet for this slot in the ring, but failed to refill it
+ * with a newly allocated buffer. In this case, don't try to
+ * receive it again.
+ */
+ if (likely(skb)) {
+ length = cur_p->app4 & 0x0000FFFF;
+
+ phys = desc_get_phys_addr(lp, cur_p);
+ dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
+ DMA_FROM_DEVICE);
+
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, ndev);
+ /*skb_checksum_none_assert(skb);*/
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* if we're doing Rx csum offload, set it up */
+ if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
+ csumstatus = (cur_p->app2 &
+ XAE_FULL_CSUM_STATUS_MASK) >> 3;
+ if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
+ csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
+ skb->protocol == htons(ETH_P_IP) &&
+ skb->len > 64) {
+ skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
+ skb->ip_summed = CHECKSUM_COMPLETE;
}
- } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
- skb->protocol == htons(ETH_P_IP) &&
- skb->len > 64) {
- skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
- skb->ip_summed = CHECKSUM_COMPLETE;
+
+ netif_rx(skb);
+
+ size += length;
+ packets++;
}
-
- netif_rx(skb);
-
- size += length;
- packets++;
new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
if (!new_skb)
- return;
+ break;
- cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
- lp->max_frm_size,
- DMA_FROM_DEVICE);
+ phys = dma_map_single(ndev->dev.parent, new_skb->data,
+ lp->max_frm_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+ if (net_ratelimit())
+ netdev_err(ndev, "RX DMA mapping error\n");
+ dev_kfree_skb(new_skb);
+ break;
+ }
+ desc_set_phys_addr(lp, phys, cur_p);
+
cur_p->cntrl = lp->max_frm_size;
cur_p->status = 0;
- cur_p->sw_id_offset = (u32) new_skb;
+ cur_p->skb = new_skb;
- ++lp->rx_bd_ci;
- lp->rx_bd_ci %= RX_BD_NUM;
+ /* Only update tail_p to mark this slot as usable after it has
+ * been successfully refilled.
+ */
+ tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
+
+ if (++lp->rx_bd_ci >= lp->rx_bd_num)
+ lp->rx_bd_ci = 0;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
}
@@ -817,7 +934,7 @@
ndev->stats.rx_bytes += size;
if (tail_p)
- axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+ axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
}
/**
@@ -825,7 +942,7 @@
* @irq: irq number
* @_ndev: net_device pointer
*
- * Return: IRQ_HANDLED for all cases.
+ * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
*
* This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
* to complete the BD processing.
@@ -844,10 +961,11 @@
goto out;
}
if (!(status & XAXIDMA_IRQ_ALL_MASK))
- dev_err(&ndev->dev, "No interrupts asserted in Tx path\n");
+ return IRQ_NONE;
if (status & XAXIDMA_IRQ_ERROR_MASK) {
dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
+ dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
+ (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
(lp->tx_bd_v[lp->tx_bd_ci]).phys);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
@@ -862,7 +980,7 @@
/* Write to the Rx channel control register */
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
- tasklet_schedule(&lp->dma_err_tasklet);
+ schedule_work(&lp->dma_err_task);
axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
}
out:
@@ -874,7 +992,7 @@
* @irq: irq number
* @_ndev: net_device pointer
*
- * Return: IRQ_HANDLED for all cases.
+ * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
*
* This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
* processing.
@@ -893,10 +1011,11 @@
goto out;
}
if (!(status & XAXIDMA_IRQ_ALL_MASK))
- dev_err(&ndev->dev, "No interrupts asserted in Rx path\n");
+ return IRQ_NONE;
if (status & XAXIDMA_IRQ_ERROR_MASK) {
dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
+ dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
+ (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
(lp->rx_bd_v[lp->rx_bd_ci]).phys);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
@@ -911,14 +1030,43 @@
/* write to the Rx channel control register */
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
- tasklet_schedule(&lp->dma_err_tasklet);
+ schedule_work(&lp->dma_err_task);
axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
}
out:
return IRQ_HANDLED;
}
-static void axienet_dma_err_handler(unsigned long data);
+/**
+ * axienet_eth_irq - Ethernet core Isr.
+ * @irq: irq number
+ * @_ndev: net_device pointer
+ *
+ * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
+ *
+ * Handle miscellaneous conditions indicated by Ethernet core IRQ.
+ */
+static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
+{
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+ unsigned int pending;
+
+ pending = axienet_ior(lp, XAE_IP_OFFSET);
+ if (!pending)
+ return IRQ_NONE;
+
+ if (pending & XAE_INT_RXFIFOOVR_MASK)
+ ndev->stats.rx_missed_errors++;
+
+ if (pending & XAE_INT_RXRJECT_MASK)
+ ndev->stats.rx_frame_errors++;
+
+ axienet_iow(lp, XAE_IS_OFFSET, pending);
+ return IRQ_HANDLED;
+}
+
+static void axienet_dma_err_handler(struct work_struct *work);
/**
* axienet_open - Driver open routine.
@@ -927,68 +1075,73 @@
* Return: 0, on success.
* non-zero error value on failure
*
- * This is the driver open routine. It calls phy_start to start the PHY device.
+ * This is the driver open routine. It calls phylink_start to start the
+ * PHY device.
* It also allocates interrupt service routines, enables the interrupt lines
* and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
* descriptors are initialized.
*/
static int axienet_open(struct net_device *ndev)
{
- int ret, mdio_mcreg;
+ int ret;
struct axienet_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = NULL;
dev_dbg(&ndev->dev, "axienet_open()\n");
- mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
- ret = axienet_mdio_wait_until_ready(lp);
- if (ret < 0)
- return ret;
/* Disable the MDIO interface till Axi Ethernet Reset is completed.
* When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. If MDIO is not disabled when the reset
- * process is started, MDIO will be broken afterwards.
+ * including the MDIO. MDIO must be disabled before resetting
+ * and re-enabled afterwards.
+ * Hold MDIO bus lock to avoid MDIO accesses during the reset.
*/
- axienet_iow(lp, XAE_MDIO_MC_OFFSET,
- (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK)));
- axienet_device_reset(ndev);
- /* Enable the MDIO */
- axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
- ret = axienet_mdio_wait_until_ready(lp);
+ mutex_lock(&lp->mii_bus->mdio_lock);
+ axienet_mdio_disable(lp);
+ ret = axienet_device_reset(ndev);
+ if (ret == 0)
+ ret = axienet_mdio_enable(lp);
+ mutex_unlock(&lp->mii_bus->mdio_lock);
if (ret < 0)
return ret;
- if (lp->phy_node) {
- phydev = of_phy_connect(lp->ndev, lp->phy_node,
- axienet_adjust_link, 0, lp->phy_mode);
-
- if (!phydev)
- dev_err(lp->dev, "of_phy_connect() failed\n");
- else
- phy_start(phydev);
+ ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
+ if (ret) {
+ dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
+ return ret;
}
- /* Enable tasklets for Axi DMA error handling */
- tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
- (unsigned long) lp);
+ phylink_start(lp->phylink);
+
+ /* Enable worker thread for Axi DMA error handling */
+ INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
/* Enable interrupts for Axi DMA Tx */
- ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev);
+ ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
+ ndev->name, ndev);
if (ret)
goto err_tx_irq;
/* Enable interrupts for Axi DMA Rx */
- ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev);
+ ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
+ ndev->name, ndev);
if (ret)
goto err_rx_irq;
+ /* Enable interrupts for Axi Ethernet core (if defined) */
+ if (lp->eth_irq > 0) {
+ ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
+ ndev->name, ndev);
+ if (ret)
+ goto err_eth_irq;
+ }
return 0;
+err_eth_irq:
+ free_irq(lp->rx_irq, ndev);
err_rx_irq:
free_irq(lp->tx_irq, ndev);
err_tx_irq:
- if (phydev)
- phy_disconnect(phydev);
- tasklet_kill(&lp->dma_err_tasklet);
+ phylink_stop(lp->phylink);
+ phylink_disconnect_phy(lp->phylink);
+ cancel_work_sync(&lp->dma_err_task);
dev_err(lp->dev, "request_irq() failed\n");
return ret;
}
@@ -999,33 +1152,60 @@
*
* Return: 0, on success.
*
- * This is the driver stop routine. It calls phy_disconnect to stop the PHY
+ * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
* device. It also removes the interrupt handlers and disables the interrupts.
* The Axi DMA Tx/Rx BDs are released.
*/
static int axienet_stop(struct net_device *ndev)
{
- u32 cr;
+ u32 cr, sr;
+ int count;
struct axienet_local *lp = netdev_priv(ndev);
dev_dbg(&ndev->dev, "axienet_close()\n");
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
- cr & (~XAXIDMA_CR_RUNSTOP_MASK));
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
- cr & (~XAXIDMA_CR_RUNSTOP_MASK));
+ phylink_stop(lp->phylink);
+ phylink_disconnect_phy(lp->phylink);
+
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- tasklet_kill(&lp->dma_err_tasklet);
+ cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+
+ axienet_iow(lp, XAE_IE_OFFSET, 0);
+
+ /* Give DMAs a chance to halt gracefully */
+ sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
+ msleep(20);
+ sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ }
+
+ sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
+ msleep(20);
+ sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ }
+
+ /* Do a reset to ensure DMA is really stopped */
+ mutex_lock(&lp->mii_bus->mdio_lock);
+ axienet_mdio_disable(lp);
+ __axienet_device_reset(lp);
+ axienet_mdio_enable(lp);
+ mutex_unlock(&lp->mii_bus->mdio_lock);
+
+ cancel_work_sync(&lp->dma_err_task);
+
+ if (lp->eth_irq > 0)
+ free_irq(lp->eth_irq, ndev);
free_irq(lp->tx_irq, ndev);
free_irq(lp->rx_irq, ndev);
-
- if (ndev->phydev)
- phy_disconnect(ndev->phydev);
axienet_dma_bd_release(ndev);
return 0;
@@ -1078,6 +1258,16 @@
}
#endif
+static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ return phylink_mii_ioctl(lp->phylink, rq, cmd);
+}
+
static const struct net_device_ops axienet_netdev_ops = {
.ndo_open = axienet_open,
.ndo_stop = axienet_stop,
@@ -1085,6 +1275,7 @@
.ndo_change_mtu = axienet_change_mtu,
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = axienet_ioctl,
.ndo_set_rx_mode = axienet_set_multicast_list,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = axienet_poll_controller,
@@ -1165,15 +1356,54 @@
data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
- data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET);
- data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET);
- data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET);
- data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET);
data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
+ data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
+ data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
+ data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
+ data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
+}
+
+static void axienet_ethtools_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ ering->rx_max_pending = RX_BD_NUM_MAX;
+ ering->rx_mini_max_pending = 0;
+ ering->rx_jumbo_max_pending = 0;
+ ering->tx_max_pending = TX_BD_NUM_MAX;
+ ering->rx_pending = lp->rx_bd_num;
+ ering->rx_mini_pending = 0;
+ ering->rx_jumbo_pending = 0;
+ ering->tx_pending = lp->tx_bd_num;
+}
+
+static int axienet_ethtools_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ if (ering->rx_pending > RX_BD_NUM_MAX ||
+ ering->rx_mini_pending ||
+ ering->rx_jumbo_pending ||
+ ering->tx_pending < TX_BD_NUM_MIN ||
+ ering->tx_pending > TX_BD_NUM_MAX)
+ return -EINVAL;
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ lp->rx_bd_num = ering->rx_pending;
+ lp->tx_bd_num = ering->tx_pending;
+ return 0;
}
/**
@@ -1189,12 +1419,9 @@
axienet_ethtools_get_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *epauseparm)
{
- u32 regval;
struct axienet_local *lp = netdev_priv(ndev);
- epauseparm->autoneg = 0;
- regval = axienet_ior(lp, XAE_FCC_OFFSET);
- epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK;
- epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK;
+
+ phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
}
/**
@@ -1213,27 +1440,9 @@
axienet_ethtools_set_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *epauseparm)
{
- u32 regval = 0;
struct axienet_local *lp = netdev_priv(ndev);
- if (netif_running(ndev)) {
- netdev_err(ndev,
- "Please stop netif before applying configuration\n");
- return -EFAULT;
- }
-
- regval = axienet_ior(lp, XAE_FCC_OFFSET);
- if (epauseparm->tx_pause)
- regval |= XAE_FCC_FCTX_MASK;
- else
- regval &= ~XAE_FCC_FCTX_MASK;
- if (epauseparm->rx_pause)
- regval |= XAE_FCC_FCRX_MASK;
- else
- regval &= ~XAE_FCC_FCRX_MASK;
- axienet_iow(lp, XAE_FCC_OFFSET, regval);
-
- return 0;
+ return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
}
/**
@@ -1283,27 +1492,6 @@
return -EFAULT;
}
- if ((ecoalesce->rx_coalesce_usecs) ||
- (ecoalesce->rx_coalesce_usecs_irq) ||
- (ecoalesce->rx_max_coalesced_frames_irq) ||
- (ecoalesce->tx_coalesce_usecs) ||
- (ecoalesce->tx_coalesce_usecs_irq) ||
- (ecoalesce->tx_max_coalesced_frames_irq) ||
- (ecoalesce->stats_block_coalesce_usecs) ||
- (ecoalesce->use_adaptive_rx_coalesce) ||
- (ecoalesce->use_adaptive_tx_coalesce) ||
- (ecoalesce->pkt_rate_low) ||
- (ecoalesce->rx_coalesce_usecs_low) ||
- (ecoalesce->rx_max_coalesced_frames_low) ||
- (ecoalesce->tx_coalesce_usecs_low) ||
- (ecoalesce->tx_max_coalesced_frames_low) ||
- (ecoalesce->pkt_rate_high) ||
- (ecoalesce->rx_coalesce_usecs_high) ||
- (ecoalesce->rx_max_coalesced_frames_high) ||
- (ecoalesce->tx_coalesce_usecs_high) ||
- (ecoalesce->tx_max_coalesced_frames_high) ||
- (ecoalesce->rate_sample_interval))
- return -EOPNOTSUPP;
if (ecoalesce->rx_max_coalesced_frames)
lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
if (ecoalesce->tx_max_coalesced_frames)
@@ -1312,63 +1500,241 @@
return 0;
}
+static int
+axienet_ethtools_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_get(lp->phylink, cmd);
+}
+
+static int
+axienet_ethtools_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_set(lp->phylink, cmd);
+}
+
static const struct ethtool_ops axienet_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
.get_regs = axienet_ethtools_get_regs,
.get_link = ethtool_op_get_link,
+ .get_ringparam = axienet_ethtools_get_ringparam,
+ .set_ringparam = axienet_ethtools_set_ringparam,
.get_pauseparam = axienet_ethtools_get_pauseparam,
.set_pauseparam = axienet_ethtools_set_pauseparam,
.get_coalesce = axienet_ethtools_get_coalesce,
.set_coalesce = axienet_ethtools_set_coalesce,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = axienet_ethtools_get_link_ksettings,
+ .set_link_ksettings = axienet_ethtools_set_link_ksettings,
+};
+
+static void axienet_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ /* Only support the mode we are configured for */
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != lp->phy_mode) {
+ netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n",
+ phy_modes(state->interface),
+ phy_modes(lp->phy_mode));
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+
+ phylink_set(mask, Autoneg);
+ phylink_set_port_modes(mask);
+
+ phylink_set(mask, Asym_Pause);
+ phylink_set(mask, Pause);
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_GMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ phylink_set(mask, 1000baseX_Full);
+ phylink_set(mask, 1000baseT_Full);
+ if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
+ break;
+ fallthrough;
+ case PHY_INTERFACE_MODE_MII:
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 10baseT_Full);
+ default:
+ break;
+ }
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static void axienet_mac_pcs_get_state(struct phylink_config *config,
+ struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ phylink_mii_c22_pcs_get_state(lp->pcs_phy, state);
+ break;
+ default:
+ break;
+ }
+}
+
+static void axienet_mac_an_restart(struct phylink_config *config)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ phylink_mii_c22_pcs_an_restart(lp->pcs_phy);
+}
+
+static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ int ret;
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ ret = phylink_mii_c22_pcs_config(lp->pcs_phy, mode,
+ state->interface,
+ state->advertising);
+ if (ret < 0)
+ netdev_warn(ndev, "Failed to configure PCS: %d\n",
+ ret);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void axienet_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ /* nothing meaningful to do */
+}
+
+static void axienet_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ u32 emmc_reg, fcc_reg;
+
+ emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
+ emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
+
+ switch (speed) {
+ case SPEED_1000:
+ emmc_reg |= XAE_EMMC_LINKSPD_1000;
+ break;
+ case SPEED_100:
+ emmc_reg |= XAE_EMMC_LINKSPD_100;
+ break;
+ case SPEED_10:
+ emmc_reg |= XAE_EMMC_LINKSPD_10;
+ break;
+ default:
+ dev_err(&ndev->dev,
+ "Speed other than 10, 100 or 1Gbps is not supported\n");
+ break;
+ }
+
+ axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
+
+ fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
+ if (tx_pause)
+ fcc_reg |= XAE_FCC_FCTX_MASK;
+ else
+ fcc_reg &= ~XAE_FCC_FCTX_MASK;
+ if (rx_pause)
+ fcc_reg |= XAE_FCC_FCRX_MASK;
+ else
+ fcc_reg &= ~XAE_FCC_FCRX_MASK;
+ axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
+}
+
+static const struct phylink_mac_ops axienet_phylink_ops = {
+ .validate = axienet_validate,
+ .mac_pcs_get_state = axienet_mac_pcs_get_state,
+ .mac_an_restart = axienet_mac_an_restart,
+ .mac_config = axienet_mac_config,
+ .mac_link_down = axienet_mac_link_down,
+ .mac_link_up = axienet_mac_link_up,
};
/**
- * axienet_dma_err_handler - Tasklet handler for Axi DMA Error
- * @data: Data passed
+ * axienet_dma_err_handler - Work queue task for Axi DMA Error
+ * @work: pointer to work_struct
*
* Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
* Tx/Rx BDs.
*/
-static void axienet_dma_err_handler(unsigned long data)
+static void axienet_dma_err_handler(struct work_struct *work)
{
u32 axienet_status;
u32 cr, i;
- int mdio_mcreg;
- struct axienet_local *lp = (struct axienet_local *) data;
+ struct axienet_local *lp = container_of(work, struct axienet_local,
+ dma_err_task);
struct net_device *ndev = lp->ndev;
struct axidma_bd *cur_p;
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
- axienet_mdio_wait_until_ready(lp);
/* Disable the MDIO interface till Axi Ethernet Reset is completed.
* When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. So if MDIO is not disabled when the reset
- * process is started, MDIO will be broken afterwards.
+ * including the MDIO. MDIO must be disabled before resetting
+ * and re-enabled afterwards.
+ * Hold MDIO bus lock to avoid MDIO accesses during the reset.
*/
- axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
- ~XAE_MDIO_MC_MDIOEN_MASK));
+ mutex_lock(&lp->mii_bus->mdio_lock);
+ axienet_mdio_disable(lp);
+ __axienet_device_reset(lp);
+ axienet_mdio_enable(lp);
+ mutex_unlock(&lp->mii_bus->mdio_lock);
- __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
- __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
-
- axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
- axienet_mdio_wait_until_ready(lp);
-
- for (i = 0; i < TX_BD_NUM; i++) {
+ for (i = 0; i < lp->tx_bd_num; i++) {
cur_p = &lp->tx_bd_v[i];
- if (cur_p->phys)
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ if (cur_p->cntrl) {
+ dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
+
+ dma_unmap_single(ndev->dev.parent, addr,
(cur_p->cntrl &
XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
- if (cur_p->app4)
- dev_kfree_skb_irq((struct sk_buff *) cur_p->app4);
+ }
+ if (cur_p->skb)
+ dev_kfree_skb_irq(cur_p->skb);
cur_p->phys = 0;
+ cur_p->phys_msb = 0;
cur_p->cntrl = 0;
cur_p->status = 0;
cur_p->app0 = 0;
@@ -1376,10 +1742,10 @@
cur_p->app2 = 0;
cur_p->app3 = 0;
cur_p->app4 = 0;
- cur_p->sw_id_offset = 0;
+ cur_p->skb = NULL;
}
- for (i = 0; i < RX_BD_NUM; i++) {
+ for (i = 0; i < lp->rx_bd_num; i++) {
cur_p = &lp->rx_bd_v[i];
cur_p->status = 0;
cur_p->app0 = 0;
@@ -1422,18 +1788,18 @@
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
* halted state. This will make the Rx side ready for reception.
*/
- axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
+ axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
+ axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
+ (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
* tail pointer register that the Tx channel will start transmitting
*/
- axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
+ axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
@@ -1445,6 +1811,8 @@
axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
if (axienet_status & XAE_INT_RXRJECT_MASK)
axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
+ axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
+ XAE_INT_RECV_ERROR_MASK : 0);
axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
/* Sync default options with HW but leave receiver and
@@ -1476,7 +1844,8 @@
struct axienet_local *lp;
struct net_device *ndev;
const void *mac_addr;
- struct resource *ethres, dmares;
+ struct resource *ethres;
+ int addr_width = 32;
u32 value;
ndev = alloc_etherdev(sizeof(*lp));
@@ -1499,14 +1868,29 @@
lp->ndev = ndev;
lp->dev = &pdev->dev;
lp->options = XAE_OPTION_DEFAULTS;
+ lp->rx_bd_num = RX_BD_NUM_DEFAULT;
+ lp->tx_bd_num = TX_BD_NUM_DEFAULT;
+
+ lp->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(lp->clk)) {
+ ret = PTR_ERR(lp->clk);
+ goto free_netdev;
+ }
+ ret = clk_prepare_enable(lp->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock: %d\n", ret);
+ goto free_netdev;
+ }
+
/* Map device registers */
ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
if (IS_ERR(lp->regs)) {
dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
ret = PTR_ERR(lp->regs);
- goto free_netdev;
+ goto cleanup_clk;
}
+ lp->regs_start = ethres->start;
/* Setup checksum offload, but default to off if not specified */
lp->features = 0;
@@ -1579,70 +1963,156 @@
break;
default:
ret = -EINVAL;
- goto free_netdev;
+ goto cleanup_clk;
}
} else {
- lp->phy_mode = of_get_phy_mode(pdev->dev.of_node);
- if ((int)lp->phy_mode < 0) {
- ret = -EINVAL;
- goto free_netdev;
- }
+ ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
+ if (ret)
+ goto cleanup_clk;
}
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
- if (!np) {
- dev_err(&pdev->dev, "could not find DMA node\n");
- ret = -ENODEV;
- goto free_netdev;
- }
- ret = of_address_to_resource(np, 0, &dmares);
- if (ret) {
- dev_err(&pdev->dev, "unable to get DMA resource\n");
+ if (np) {
+ struct resource dmares;
+
+ ret = of_address_to_resource(np, 0, &dmares);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to get DMA resource\n");
+ of_node_put(np);
+ goto cleanup_clk;
+ }
+ lp->dma_regs = devm_ioremap_resource(&pdev->dev,
+ &dmares);
+ lp->rx_irq = irq_of_parse_and_map(np, 1);
+ lp->tx_irq = irq_of_parse_and_map(np, 0);
of_node_put(np);
- goto free_netdev;
+ lp->eth_irq = platform_get_irq_optional(pdev, 0);
+ } else {
+ /* Check for these resources directly on the Ethernet node. */
+ struct resource *res = platform_get_resource(pdev,
+ IORESOURCE_MEM, 1);
+ lp->dma_regs = devm_ioremap_resource(&pdev->dev, res);
+ lp->rx_irq = platform_get_irq(pdev, 1);
+ lp->tx_irq = platform_get_irq(pdev, 0);
+ lp->eth_irq = platform_get_irq_optional(pdev, 2);
}
- lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
if (IS_ERR(lp->dma_regs)) {
dev_err(&pdev->dev, "could not map DMA regs\n");
ret = PTR_ERR(lp->dma_regs);
- of_node_put(np);
- goto free_netdev;
+ goto cleanup_clk;
}
- lp->rx_irq = irq_of_parse_and_map(np, 1);
- lp->tx_irq = irq_of_parse_and_map(np, 0);
- of_node_put(np);
if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
dev_err(&pdev->dev, "could not determine irqs\n");
ret = -ENOMEM;
- goto free_netdev;
+ goto cleanup_clk;
}
+
+ /* Reset core now that clocks are enabled, prior to accessing MDIO */
+ ret = __axienet_device_reset(lp);
+ if (ret)
+ goto cleanup_clk;
+
+ /* Autodetect the need for 64-bit DMA pointers.
+ * When the IP is configured for a bus width bigger than 32 bits,
+ * writing the MSB registers is mandatory, even if they are all 0.
+ * We can detect this case by writing all 1's to one such register
+ * and see if that sticks: when the IP is configured for 32 bits
+ * only, those registers are RES0.
+ * Those MSB registers were introduced in IP v7.1, which we check first.
+ */
+ if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
+ void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
+
+ iowrite32(0x0, desc);
+ if (ioread32(desc) == 0) { /* sanity check */
+ iowrite32(0xffffffff, desc);
+ if (ioread32(desc) > 0) {
+ lp->features |= XAE_FEATURE_DMA_64BIT;
+ addr_width = 64;
+ dev_info(&pdev->dev,
+ "autodetected 64-bit DMA range\n");
+ }
+ iowrite32(0x0, desc);
+ }
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
+ if (ret) {
+ dev_err(&pdev->dev, "No suitable DMA available\n");
+ goto cleanup_clk;
+ }
+
+ /* Check for Ethernet core IRQ (optional) */
+ if (lp->eth_irq <= 0)
+ dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
/* Retrieve the MAC address */
mac_addr = of_get_mac_address(pdev->dev.of_node);
- if (!mac_addr) {
- dev_err(&pdev->dev, "could not find MAC address\n");
- goto free_netdev;
+ if (IS_ERR(mac_addr)) {
+ dev_warn(&pdev->dev, "could not find MAC address property: %ld\n",
+ PTR_ERR(mac_addr));
+ mac_addr = NULL;
}
axienet_set_mac_address(ndev, mac_addr);
lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
- lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
- if (lp->phy_node) {
- ret = axienet_mdio_setup(lp, pdev->dev.of_node);
- if (ret)
- dev_warn(&pdev->dev, "error registering MDIO bus\n");
+ ret = axienet_mdio_setup(lp);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "error registering MDIO bus: %d\n", ret);
+
+ if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
+ lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (!lp->phy_node) {
+ dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n");
+ ret = -EINVAL;
+ goto cleanup_mdio;
+ }
+ lp->pcs_phy = of_mdio_find_device(lp->phy_node);
+ if (!lp->pcs_phy) {
+ ret = -EPROBE_DEFER;
+ goto cleanup_mdio;
+ }
+ lp->phylink_config.pcs_poll = true;
+ }
+
+ lp->phylink_config.dev = &ndev->dev;
+ lp->phylink_config.type = PHYLINK_NETDEV;
+
+ lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
+ lp->phy_mode,
+ &axienet_phylink_ops);
+ if (IS_ERR(lp->phylink)) {
+ ret = PTR_ERR(lp->phylink);
+ dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
+ goto cleanup_mdio;
}
ret = register_netdev(lp->ndev);
if (ret) {
dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
- goto free_netdev;
+ goto cleanup_phylink;
}
return 0;
+
+cleanup_phylink:
+ phylink_destroy(lp->phylink);
+
+cleanup_mdio:
+ if (lp->pcs_phy)
+ put_device(&lp->pcs_phy->dev);
+ if (lp->mii_bus)
+ axienet_mdio_teardown(lp);
+ of_node_put(lp->phy_node);
+
+cleanup_clk:
+ clk_disable_unprepare(lp->clk);
free_netdev:
free_netdev(ndev);
@@ -1655,8 +2125,17 @@
struct net_device *ndev = platform_get_drvdata(pdev);
struct axienet_local *lp = netdev_priv(ndev);
- axienet_mdio_teardown(lp);
unregister_netdev(ndev);
+
+ if (lp->phylink)
+ phylink_destroy(lp->phylink);
+
+ if (lp->pcs_phy)
+ put_device(&lp->pcs_phy->dev);
+
+ axienet_mdio_teardown(lp);
+
+ clk_disable_unprepare(lp->clk);
of_node_put(lp->phy_node);
lp->phy_node = NULL;
@@ -1666,9 +2145,23 @@
return 0;
}
+static void axienet_shutdown(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+
+ rtnl_lock();
+ netif_device_detach(ndev);
+
+ if (netif_running(ndev))
+ dev_close(ndev);
+
+ rtnl_unlock();
+}
+
static struct platform_driver axienet_driver = {
.probe = axienet_probe,
.remove = axienet_remove,
+ .shutdown = axienet_shutdown,
.driver = {
.name = "xilinx_axienet",
.of_match_table = axienet_of_match,
--
Gitblit v1.6.2