From 297b60346df8beafee954a0fd7c2d64f33f3b9bc Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Sat, 11 May 2024 01:44:05 +0000 Subject: [PATCH] rtl8211F_led_control --- kernel/drivers/net/ethernet/renesas/ravb_main.c | 335 ++++++++++++++++++++++++++++++++----------------------- 1 files changed, 196 insertions(+), 139 deletions(-) diff --git a/kernel/drivers/net/ethernet/renesas/ravb_main.c b/kernel/drivers/net/ethernet/renesas/ravb_main.c index c24b7ea..f218bac 100644 --- a/kernel/drivers/net/ethernet/renesas/ravb_main.c +++ b/kernel/drivers/net/ethernet/renesas/ravb_main.c @@ -111,7 +111,7 @@ */ static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac) { - if (mac) { + if (!IS_ERR(mac)) { ether_addr_copy(ndev->dev_addr, mac); } else { u32 mahr = ravb_read(ndev, MAHR); @@ -162,7 +162,7 @@ } /* MDIO bus control struct */ -static struct mdiobb_ops bb_ops = { +static const struct mdiobb_ops bb_ops = { .owner = THIS_MODULE, .set_mdc = ravb_set_mdc, .set_mdio_dir = ravb_set_mdio_dir, @@ -175,6 +175,7 @@ { struct ravb_private *priv = netdev_priv(ndev); struct net_device_stats *stats = &priv->stats[q]; + int num_tx_desc = priv->num_tx_desc; struct ravb_tx_desc *desc; int free_num = 0; int entry; @@ -184,7 +185,7 @@ bool txed; entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * - NUM_TX_DESC); + num_tx_desc); desc = &priv->tx_ring[q][entry]; txed = desc->die_dt == DT_FEMPTY; if (free_txed_only && !txed) @@ -193,12 +194,12 @@ dma_rmb(); size = le16_to_cpu(desc->ds_tagl) & TX_DS; /* Free the original skb. */ - if (priv->tx_skb[q][entry / NUM_TX_DESC]) { + if (priv->tx_skb[q][entry / num_tx_desc]) { dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), size, DMA_TO_DEVICE); /* Last packet descriptor? */ - if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { - entry /= NUM_TX_DESC; + if (entry % num_tx_desc == num_tx_desc - 1) { + entry /= num_tx_desc; dev_kfree_skb_any(priv->tx_skb[q][entry]); priv->tx_skb[q][entry] = NULL; if (txed) @@ -217,6 +218,7 @@ static void ravb_ring_free(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); + int num_tx_desc = priv->num_tx_desc; int ring_size; int i; @@ -228,7 +230,7 @@ le32_to_cpu(desc->dptr))) dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), - priv->rx_buf_sz, + RX_BUF_SZ, DMA_FROM_DEVICE); } ring_size = sizeof(struct ravb_ex_rx_desc) * @@ -242,7 +244,7 @@ ravb_tx_free(ndev, q, false); ring_size = sizeof(struct ravb_tx_desc) * - (priv->num_tx_ring[q] * NUM_TX_DESC + 1); + (priv->num_tx_ring[q] * num_tx_desc + 1); dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], priv->tx_desc_dma[q]); priv->tx_ring[q] = NULL; @@ -271,12 +273,13 @@ static void ravb_ring_format(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); + int num_tx_desc = priv->num_tx_desc; struct ravb_ex_rx_desc *rx_desc; struct ravb_tx_desc *tx_desc; struct ravb_desc *desc; int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] * - NUM_TX_DESC; + num_tx_desc; dma_addr_t dma_addr; int i; @@ -290,9 +293,9 @@ for (i = 0; i < priv->num_rx_ring[q]; i++) { /* RX descriptor */ rx_desc = &priv->rx_ring[q][i]; - rx_desc->ds_cc = cpu_to_le16(priv->rx_buf_sz); + rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ); dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, - priv->rx_buf_sz, + RX_BUF_SZ, DMA_FROM_DEVICE); /* We just set the data size to 0 for a failed mapping which * should prevent DMA from happening... @@ -311,8 +314,10 @@ for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q]; i++, tx_desc++) { tx_desc->die_dt = DT_EEMPTY; - tx_desc++; - tx_desc->die_dt = DT_EEMPTY; + if (num_tx_desc > 1) { + tx_desc++; + tx_desc->die_dt = DT_EEMPTY; + } } tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); tx_desc->die_dt = DT_LINKFIX; /* type */ @@ -332,12 +337,10 @@ static int ravb_ring_init(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); + int num_tx_desc = priv->num_tx_desc; struct sk_buff *skb; int ring_size; int i; - - priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) + - ETH_HLEN + VLAN_HLEN + sizeof(__sum16); /* Allocate RX and TX skb rings */ priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], @@ -348,18 +351,20 @@ goto error; for (i = 0; i < priv->num_rx_ring[q]; i++) { - skb = netdev_alloc_skb(ndev, priv->rx_buf_sz + RAVB_ALIGN - 1); + skb = netdev_alloc_skb(ndev, RX_BUF_SZ + RAVB_ALIGN - 1); if (!skb) goto error; ravb_set_buffer_align(skb); priv->rx_skb[q][i] = skb; } - /* Allocate rings for the aligned buffers */ - priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + - DPTR_ALIGN - 1, GFP_KERNEL); - if (!priv->tx_align[q]) - goto error; + if (num_tx_desc > 1) { + /* Allocate rings for the aligned buffers */ + priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + + DPTR_ALIGN - 1, GFP_KERNEL); + if (!priv->tx_align[q]) + goto error; + } /* Allocate all RX descriptors. */ ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); @@ -373,7 +378,7 @@ /* Allocate all TX descriptors. */ ring_size = sizeof(struct ravb_tx_desc) * - (priv->num_tx_ring[q] * NUM_TX_DESC + 1); + (priv->num_tx_ring[q] * num_tx_desc + 1); priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, &priv->tx_desc_dma[q], GFP_KERNEL); @@ -438,12 +443,6 @@ /* Descriptor format */ ravb_ring_format(ndev, RAVB_BE); ravb_ring_format(ndev, RAVB_NC); - -#if defined(__LITTLE_ENDIAN) - ravb_modify(ndev, CCC, CCC_BOC, 0); -#else - ravb_modify(ndev, CCC, CCC_BOC, CCC_BOC); -#endif /* Set AVB RX */ ravb_write(ndev, @@ -582,7 +581,7 @@ skb = priv->rx_skb[q][entry]; priv->rx_skb[q][entry] = NULL; dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), - priv->rx_buf_sz, + RX_BUF_SZ, DMA_FROM_DEVICE); get_ts &= (q == RAVB_NC) ? RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : @@ -615,11 +614,11 @@ for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; desc = &priv->rx_ring[q][entry]; - desc->ds_cc = cpu_to_le16(priv->rx_buf_sz); + desc->ds_cc = cpu_to_le16(RX_BUF_SZ); if (!priv->rx_skb[q][entry]) { skb = netdev_alloc_skb(ndev, - priv->rx_buf_sz + + RX_BUF_SZ + RAVB_ALIGN - 1); if (!skb) break; /* Better luck next round. */ @@ -723,7 +722,6 @@ spin_lock(&priv->lock); ravb_emac_interrupt_unlocked(ndev); - mmiowb(); spin_unlock(&priv->lock); return IRQ_HANDLED; } @@ -738,14 +736,14 @@ ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS); if (eis & EIS_QFS) { ris2 = ravb_read(ndev, RIS2); - ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED), + ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED), RIS2); /* Receive Descriptor Empty int */ if (ris2 & RIS2_QFF0) priv->stats[RAVB_BE].rx_over_errors++; - /* Receive Descriptor Empty int */ + /* Receive Descriptor Empty int */ if (ris2 & RIS2_QFF1) priv->stats[RAVB_NC].rx_over_errors++; @@ -843,7 +841,6 @@ result = IRQ_HANDLED; } - mmiowb(); spin_unlock(&priv->lock); return result; } @@ -876,7 +873,6 @@ result = IRQ_HANDLED; } - mmiowb(); spin_unlock(&priv->lock); return result; } @@ -893,7 +889,6 @@ if (ravb_queue_interrupt(ndev, q)) result = IRQ_HANDLED; - mmiowb(); spin_unlock(&priv->lock); return result; } @@ -916,32 +911,20 @@ int q = napi - priv->napi; int mask = BIT(q); int quota = budget; - u32 ris0, tis; - for (;;) { - tis = ravb_read(ndev, TIS); - ris0 = ravb_read(ndev, RIS0); - if (!((ris0 & mask) || (tis & mask))) - break; + /* Processing RX Descriptor Ring */ + /* Clear RX interrupt */ + ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); + if (ravb_rx(ndev, "a, q)) + goto out; - /* Processing RX Descriptor Ring */ - if (ris0 & mask) { - /* Clear RX interrupt */ - ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); - if (ravb_rx(ndev, "a, q)) - goto out; - } - /* Processing TX Descriptor Ring */ - if (tis & mask) { - spin_lock_irqsave(&priv->lock, flags); - /* Clear TX interrupt */ - ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); - ravb_tx_free(ndev, q, true); - netif_wake_subqueue(ndev, q); - mmiowb(); - spin_unlock_irqrestore(&priv->lock, flags); - } - } + /* Processing RX Descriptor Ring */ + spin_lock_irqsave(&priv->lock, flags); + /* Clear TX interrupt */ + ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); + ravb_tx_free(ndev, q, true); + netif_wake_subqueue(ndev, q); + spin_unlock_irqrestore(&priv->lock, flags); napi_complete(napi); @@ -954,7 +937,6 @@ ravb_write(ndev, mask, RIE0); ravb_write(ndev, mask, TIE); } - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); /* Receive error message handling */ @@ -1003,7 +985,6 @@ if (priv->no_avb_link && phydev->link) ravb_rcv_snd_enable(ndev); - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); if (new_state && netif_msg_link(priv)) @@ -1022,6 +1003,7 @@ struct ravb_private *priv = netdev_priv(ndev); struct phy_device *phydev; struct device_node *pn; + phy_interface_t iface; int err; priv->link = 0; @@ -1040,8 +1022,10 @@ } pn = of_node_get(np); } - phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, - priv->phy_interface); + + iface = priv->rgmii_override ? PHY_INTERFACE_MODE_RGMII + : priv->phy_interface; + phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface); of_node_put(pn); if (!phydev) { netdev_err(ndev, "failed to connect PHY\n"); @@ -1062,8 +1046,15 @@ netdev_info(ndev, "limited PHY to 100Mbit/s\n"); } - /* 10BASE is not supported */ - phydev->supported &= ~PHY_10BT_FEATURES; + /* 10BASE, Pause and Asym Pause is not supported */ + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT); + + /* Half Duplex is not supported */ + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); phy_attached_info(phydev); @@ -1426,7 +1417,7 @@ } /* Timeout function for Ethernet AVB */ -static void ravb_tx_timeout(struct net_device *ndev) +static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue) { struct ravb_private *priv = netdev_priv(ndev); @@ -1495,6 +1486,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + int num_tx_desc = priv->num_tx_desc; u16 q = skb_get_queue_mapping(skb); struct ravb_tstamp_skb *ts_skb; struct ravb_tx_desc *desc; @@ -1506,7 +1498,7 @@ spin_lock_irqsave(&priv->lock, flags); if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) * - NUM_TX_DESC) { + num_tx_desc) { netif_err(priv, tx_queued, ndev, "still transmitting with the full ring!\n"); netif_stop_subqueue(ndev, q); @@ -1517,41 +1509,55 @@ if (skb_put_padto(skb, ETH_ZLEN)) goto exit; - entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC); - priv->tx_skb[q][entry / NUM_TX_DESC] = skb; + entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc); + priv->tx_skb[q][entry / num_tx_desc] = skb; - buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + - entry / NUM_TX_DESC * DPTR_ALIGN; - len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; - /* Zero length DMA descriptors are problematic as they seem to - * terminate DMA transfers. Avoid them by simply using a length of - * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN. - * - * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of - * data by the call to skb_put_padto() above this is safe with - * respect to both the length of the first DMA descriptor (len) - * overflowing the available data and the length of the second DMA - * descriptor (skb->len - len) being negative. - */ - if (len == 0) - len = DPTR_ALIGN; + if (num_tx_desc > 1) { + buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + + entry / num_tx_desc * DPTR_ALIGN; + len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; - memcpy(buffer, skb->data, len); - dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); - if (dma_mapping_error(ndev->dev.parent, dma_addr)) - goto drop; + /* Zero length DMA descriptors are problematic as they seem + * to terminate DMA transfers. Avoid them by simply using a + * length of DPTR_ALIGN (4) when skb data is aligned to + * DPTR_ALIGN. + * + * As skb is guaranteed to have at least ETH_ZLEN (60) + * bytes of data by the call to skb_put_padto() above this + * is safe with respect to both the length of the first DMA + * descriptor (len) overflowing the available data and the + * length of the second DMA descriptor (skb->len - len) + * being negative. + */ + if (len == 0) + len = DPTR_ALIGN; - desc = &priv->tx_ring[q][entry]; - desc->ds_tagl = cpu_to_le16(len); - desc->dptr = cpu_to_le32(dma_addr); + memcpy(buffer, skb->data, len); + dma_addr = dma_map_single(ndev->dev.parent, buffer, len, + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto drop; - buffer = skb->data + len; - len = skb->len - len; - dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); - if (dma_mapping_error(ndev->dev.parent, dma_addr)) - goto unmap; + desc = &priv->tx_ring[q][entry]; + desc->ds_tagl = cpu_to_le16(len); + desc->dptr = cpu_to_le32(dma_addr); - desc++; + buffer = skb->data + len; + len = skb->len - len; + dma_addr = dma_map_single(ndev->dev.parent, buffer, len, + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto unmap; + + desc++; + } else { + desc = &priv->tx_ring[q][entry]; + len = skb->len; + dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto drop; + } desc->ds_tagl = cpu_to_le16(len); desc->dptr = cpu_to_le32(dma_addr); @@ -1559,9 +1565,11 @@ if (q == RAVB_NC) { ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC); if (!ts_skb) { - desc--; - dma_unmap_single(ndev->dev.parent, dma_addr, len, - DMA_TO_DEVICE); + if (num_tx_desc > 1) { + desc--; + dma_unmap_single(ndev->dev.parent, dma_addr, + len, DMA_TO_DEVICE); + } goto unmap; } ts_skb->skb = skb_get(skb); @@ -1578,20 +1586,22 @@ skb_tx_timestamp(skb); /* Descriptor type must be set after all the above writes */ dma_wmb(); - desc->die_dt = DT_FEND; - desc--; - desc->die_dt = DT_FSTART; - + if (num_tx_desc > 1) { + desc->die_dt = DT_FEND; + desc--; + desc->die_dt = DT_FSTART; + } else { + desc->die_dt = DT_FSINGLE; + } ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q); - priv->cur_tx[q] += NUM_TX_DESC; + priv->cur_tx[q] += num_tx_desc; if (priv->cur_tx[q] - priv->dirty_tx[q] > - (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && + (priv->num_tx_ring[q] - 1) * num_tx_desc && !ravb_tx_free(ndev, q, true)) netif_stop_subqueue(ndev, q); exit: - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); return NETDEV_TX_OK; @@ -1600,13 +1610,12 @@ le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE); drop: dev_kfree_skb_any(skb); - priv->tx_skb[q][entry / NUM_TX_DESC] = NULL; + priv->tx_skb[q][entry / num_tx_desc] = NULL; goto exit; } static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { /* If skb needs TX timestamp, it is handled in network control queue */ return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC : @@ -1623,17 +1632,10 @@ stats0 = &priv->stats[RAVB_BE]; stats1 = &priv->stats[RAVB_NC]; - nstats->tx_dropped += ravb_read(ndev, TROCR); - ravb_write(ndev, 0, TROCR); /* (write clear) */ - nstats->collisions += ravb_read(ndev, CDCR); - ravb_write(ndev, 0, CDCR); /* (write clear) */ - nstats->tx_carrier_errors += ravb_read(ndev, LCCR); - ravb_write(ndev, 0, LCCR); /* (write clear) */ - - nstats->tx_carrier_errors += ravb_read(ndev, CERCR); - ravb_write(ndev, 0, CERCR); /* (write clear) */ - nstats->tx_carrier_errors += ravb_read(ndev, CEECR); - ravb_write(ndev, 0, CEECR); /* (write clear) */ + if (priv->chip_id == RCAR_GEN3) { + nstats->tx_dropped += ravb_read(ndev, TROCR); + ravb_write(ndev, 0, TROCR); /* (write clear) */ + } nstats->rx_packets = stats0->rx_packets + stats1->rx_packets; nstats->tx_packets = stats0->tx_packets + stats1->tx_packets; @@ -1663,7 +1665,6 @@ spin_lock_irqsave(&priv->lock, flags); ravb_modify(ndev, ECMR, ECMR_PRM, ndev->flags & IFF_PROMISC ? ECMR_PRM : 0); - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); } @@ -1704,6 +1705,8 @@ if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); } + + cancel_work_sync(&priv->work); if (priv->chip_id != RCAR_GEN2) { free_irq(priv->tx_irqs[RAVB_NC], ndev); @@ -1815,10 +1818,15 @@ static int ravb_change_mtu(struct net_device *ndev, int new_mtu) { - if (netif_running(ndev)) - return -EBUSY; + struct ravb_private *priv = netdev_priv(ndev); ndev->mtu = new_mtu; + + if (netif_running(ndev)) { + synchronize_irq(priv->emac_irq); + ravb_emac_init(ndev); + } + netdev_update_features(ndev); return 0; @@ -1965,20 +1973,61 @@ } } +static const struct soc_device_attribute ravb_delay_mode_quirk_match[] = { + { .soc_id = "r8a774c0" }, + { .soc_id = "r8a77990" }, + { .soc_id = "r8a77995" }, + { /* sentinel */ } +}; + /* Set tx and rx clock internal delay modes */ +static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + bool explicit_delay = false; + u32 delay; + + if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) { + /* Valid values are 0 and 1800, according to DT bindings */ + priv->rxcidm = !!delay; + explicit_delay = true; + } + if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) { + /* Valid values are 0 and 2000, according to DT bindings */ + priv->txcidm = !!delay; + explicit_delay = true; + } + + if (explicit_delay) + return; + + /* Fall back to legacy rgmii-*id behavior */ + if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || + priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) { + priv->rxcidm = 1; + priv->rgmii_override = 1; + } + + if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || + priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) { + if (!WARN(soc_device_match(ravb_delay_mode_quirk_match), + "phy-mode %s requires TX clock internal delay mode which is not supported by this hardware revision. Please update device tree", + phy_modes(priv->phy_interface))) { + priv->txcidm = 1; + priv->rgmii_override = 1; + } + } +} + static void ravb_set_delay_mode(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); - int set = 0; + u32 set = 0; - if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || - priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) + if (priv->rxcidm) set |= APSR_DM_RDM; - - if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || - priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) + if (priv->txcidm) set |= APSR_DM_TDM; - ravb_modify(ndev, APSR, APSR_DM, set); } @@ -2049,7 +2098,9 @@ spin_lock_init(&priv->lock); INIT_WORK(&priv->work, ravb_tx_timeout_work); - priv->phy_interface = of_get_phy_mode(np); + error = of_get_phy_mode(np, &priv->phy_interface); + if (error && error != -ENODEV) + goto out_release; priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link"); priv->avb_link_active_low = @@ -2091,6 +2142,9 @@ ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); ndev->min_mtu = ETH_MIN_MTU; + priv->num_tx_desc = chip_id == RCAR_GEN2 ? + NUM_TX_DESC_GEN2 : NUM_TX_DESC_GEN3; + /* Set function */ ndev->netdev_ops = &ravb_netdev_ops; ndev->ethtool_ops = &ravb_ethtool_ops; @@ -2106,8 +2160,10 @@ /* Request GTI loading */ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); - if (priv->chip_id != RCAR_GEN2) + if (priv->chip_id != RCAR_GEN2) { + ravb_parse_delay_mode(np, ndev); ravb_set_delay_mode(ndev); + } /* Allocate descriptor base address table */ priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM; @@ -2195,15 +2251,15 @@ if (priv->chip_id != RCAR_GEN2) ravb_ptp_stop(ndev); - dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, - priv->desc_bat_dma); /* Set reset mode */ ravb_write(ndev, CCC_OPC_RESET, CCC); - pm_runtime_put_sync(&pdev->dev); unregister_netdev(ndev); netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_BE]); ravb_mdio_release(priv); + dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, + priv->desc_bat_dma); + pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); free_netdev(ndev); platform_set_drvdata(pdev, NULL); @@ -2310,6 +2366,7 @@ ret = ravb_open(ndev); if (ret < 0) return ret; + ravb_set_rx_mode(ndev); netif_device_attach(ndev); } -- Gitblit v1.6.2