hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/net/ethernet/renesas/ravb_main.c
....@@ -111,7 +111,7 @@
111111 */
112112 static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac)
113113 {
114
- if (mac) {
114
+ if (!IS_ERR(mac)) {
115115 ether_addr_copy(ndev->dev_addr, mac);
116116 } else {
117117 u32 mahr = ravb_read(ndev, MAHR);
....@@ -162,7 +162,7 @@
162162 }
163163
164164 /* MDIO bus control struct */
165
-static struct mdiobb_ops bb_ops = {
165
+static const struct mdiobb_ops bb_ops = {
166166 .owner = THIS_MODULE,
167167 .set_mdc = ravb_set_mdc,
168168 .set_mdio_dir = ravb_set_mdio_dir,
....@@ -175,6 +175,7 @@
175175 {
176176 struct ravb_private *priv = netdev_priv(ndev);
177177 struct net_device_stats *stats = &priv->stats[q];
178
+ int num_tx_desc = priv->num_tx_desc;
178179 struct ravb_tx_desc *desc;
179180 int free_num = 0;
180181 int entry;
....@@ -184,7 +185,7 @@
184185 bool txed;
185186
186187 entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
187
- NUM_TX_DESC);
188
+ num_tx_desc);
188189 desc = &priv->tx_ring[q][entry];
189190 txed = desc->die_dt == DT_FEMPTY;
190191 if (free_txed_only && !txed)
....@@ -193,12 +194,12 @@
193194 dma_rmb();
194195 size = le16_to_cpu(desc->ds_tagl) & TX_DS;
195196 /* Free the original skb. */
196
- if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
197
+ if (priv->tx_skb[q][entry / num_tx_desc]) {
197198 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
198199 size, DMA_TO_DEVICE);
199200 /* Last packet descriptor? */
200
- if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
201
- entry /= NUM_TX_DESC;
201
+ if (entry % num_tx_desc == num_tx_desc - 1) {
202
+ entry /= num_tx_desc;
202203 dev_kfree_skb_any(priv->tx_skb[q][entry]);
203204 priv->tx_skb[q][entry] = NULL;
204205 if (txed)
....@@ -217,6 +218,7 @@
217218 static void ravb_ring_free(struct net_device *ndev, int q)
218219 {
219220 struct ravb_private *priv = netdev_priv(ndev);
221
+ int num_tx_desc = priv->num_tx_desc;
220222 int ring_size;
221223 int i;
222224
....@@ -228,7 +230,7 @@
228230 le32_to_cpu(desc->dptr)))
229231 dma_unmap_single(ndev->dev.parent,
230232 le32_to_cpu(desc->dptr),
231
- priv->rx_buf_sz,
233
+ RX_BUF_SZ,
232234 DMA_FROM_DEVICE);
233235 }
234236 ring_size = sizeof(struct ravb_ex_rx_desc) *
....@@ -242,7 +244,7 @@
242244 ravb_tx_free(ndev, q, false);
243245
244246 ring_size = sizeof(struct ravb_tx_desc) *
245
- (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
247
+ (priv->num_tx_ring[q] * num_tx_desc + 1);
246248 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
247249 priv->tx_desc_dma[q]);
248250 priv->tx_ring[q] = NULL;
....@@ -271,12 +273,13 @@
271273 static void ravb_ring_format(struct net_device *ndev, int q)
272274 {
273275 struct ravb_private *priv = netdev_priv(ndev);
276
+ int num_tx_desc = priv->num_tx_desc;
274277 struct ravb_ex_rx_desc *rx_desc;
275278 struct ravb_tx_desc *tx_desc;
276279 struct ravb_desc *desc;
277280 int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
278281 int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
279
- NUM_TX_DESC;
282
+ num_tx_desc;
280283 dma_addr_t dma_addr;
281284 int i;
282285
....@@ -290,9 +293,9 @@
290293 for (i = 0; i < priv->num_rx_ring[q]; i++) {
291294 /* RX descriptor */
292295 rx_desc = &priv->rx_ring[q][i];
293
- rx_desc->ds_cc = cpu_to_le16(priv->rx_buf_sz);
296
+ rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
294297 dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
295
- priv->rx_buf_sz,
298
+ RX_BUF_SZ,
296299 DMA_FROM_DEVICE);
297300 /* We just set the data size to 0 for a failed mapping which
298301 * should prevent DMA from happening...
....@@ -311,8 +314,10 @@
311314 for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
312315 i++, tx_desc++) {
313316 tx_desc->die_dt = DT_EEMPTY;
314
- tx_desc++;
315
- tx_desc->die_dt = DT_EEMPTY;
317
+ if (num_tx_desc > 1) {
318
+ tx_desc++;
319
+ tx_desc->die_dt = DT_EEMPTY;
320
+ }
316321 }
317322 tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
318323 tx_desc->die_dt = DT_LINKFIX; /* type */
....@@ -332,12 +337,10 @@
332337 static int ravb_ring_init(struct net_device *ndev, int q)
333338 {
334339 struct ravb_private *priv = netdev_priv(ndev);
340
+ int num_tx_desc = priv->num_tx_desc;
335341 struct sk_buff *skb;
336342 int ring_size;
337343 int i;
338
-
339
- priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) +
340
- ETH_HLEN + VLAN_HLEN + sizeof(__sum16);
341344
342345 /* Allocate RX and TX skb rings */
343346 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
....@@ -348,18 +351,20 @@
348351 goto error;
349352
350353 for (i = 0; i < priv->num_rx_ring[q]; i++) {
351
- skb = netdev_alloc_skb(ndev, priv->rx_buf_sz + RAVB_ALIGN - 1);
354
+ skb = netdev_alloc_skb(ndev, RX_BUF_SZ + RAVB_ALIGN - 1);
352355 if (!skb)
353356 goto error;
354357 ravb_set_buffer_align(skb);
355358 priv->rx_skb[q][i] = skb;
356359 }
357360
358
- /* Allocate rings for the aligned buffers */
359
- priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
360
- DPTR_ALIGN - 1, GFP_KERNEL);
361
- if (!priv->tx_align[q])
362
- goto error;
361
+ if (num_tx_desc > 1) {
362
+ /* Allocate rings for the aligned buffers */
363
+ priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
364
+ DPTR_ALIGN - 1, GFP_KERNEL);
365
+ if (!priv->tx_align[q])
366
+ goto error;
367
+ }
363368
364369 /* Allocate all RX descriptors. */
365370 ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
....@@ -373,7 +378,7 @@
373378
374379 /* Allocate all TX descriptors. */
375380 ring_size = sizeof(struct ravb_tx_desc) *
376
- (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
381
+ (priv->num_tx_ring[q] * num_tx_desc + 1);
377382 priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
378383 &priv->tx_desc_dma[q],
379384 GFP_KERNEL);
....@@ -438,12 +443,6 @@
438443 /* Descriptor format */
439444 ravb_ring_format(ndev, RAVB_BE);
440445 ravb_ring_format(ndev, RAVB_NC);
441
-
442
-#if defined(__LITTLE_ENDIAN)
443
- ravb_modify(ndev, CCC, CCC_BOC, 0);
444
-#else
445
- ravb_modify(ndev, CCC, CCC_BOC, CCC_BOC);
446
-#endif
447446
448447 /* Set AVB RX */
449448 ravb_write(ndev,
....@@ -582,7 +581,7 @@
582581 skb = priv->rx_skb[q][entry];
583582 priv->rx_skb[q][entry] = NULL;
584583 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
585
- priv->rx_buf_sz,
584
+ RX_BUF_SZ,
586585 DMA_FROM_DEVICE);
587586 get_ts &= (q == RAVB_NC) ?
588587 RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
....@@ -615,11 +614,11 @@
615614 for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
616615 entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
617616 desc = &priv->rx_ring[q][entry];
618
- desc->ds_cc = cpu_to_le16(priv->rx_buf_sz);
617
+ desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
619618
620619 if (!priv->rx_skb[q][entry]) {
621620 skb = netdev_alloc_skb(ndev,
622
- priv->rx_buf_sz +
621
+ RX_BUF_SZ +
623622 RAVB_ALIGN - 1);
624623 if (!skb)
625624 break; /* Better luck next round. */
....@@ -723,7 +722,6 @@
723722
724723 spin_lock(&priv->lock);
725724 ravb_emac_interrupt_unlocked(ndev);
726
- mmiowb();
727725 spin_unlock(&priv->lock);
728726 return IRQ_HANDLED;
729727 }
....@@ -738,14 +736,14 @@
738736 ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
739737 if (eis & EIS_QFS) {
740738 ris2 = ravb_read(ndev, RIS2);
741
- ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
739
+ ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED),
742740 RIS2);
743741
744742 /* Receive Descriptor Empty int */
745743 if (ris2 & RIS2_QFF0)
746744 priv->stats[RAVB_BE].rx_over_errors++;
747745
748
- /* Receive Descriptor Empty int */
746
+ /* Receive Descriptor Empty int */
749747 if (ris2 & RIS2_QFF1)
750748 priv->stats[RAVB_NC].rx_over_errors++;
751749
....@@ -843,7 +841,6 @@
843841 result = IRQ_HANDLED;
844842 }
845843
846
- mmiowb();
847844 spin_unlock(&priv->lock);
848845 return result;
849846 }
....@@ -876,7 +873,6 @@
876873 result = IRQ_HANDLED;
877874 }
878875
879
- mmiowb();
880876 spin_unlock(&priv->lock);
881877 return result;
882878 }
....@@ -893,7 +889,6 @@
893889 if (ravb_queue_interrupt(ndev, q))
894890 result = IRQ_HANDLED;
895891
896
- mmiowb();
897892 spin_unlock(&priv->lock);
898893 return result;
899894 }
....@@ -916,32 +911,20 @@
916911 int q = napi - priv->napi;
917912 int mask = BIT(q);
918913 int quota = budget;
919
- u32 ris0, tis;
920914
921
- for (;;) {
922
- tis = ravb_read(ndev, TIS);
923
- ris0 = ravb_read(ndev, RIS0);
924
- if (!((ris0 & mask) || (tis & mask)))
925
- break;
915
+ /* Processing RX Descriptor Ring */
916
+ /* Clear RX interrupt */
917
+ ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
918
+ if (ravb_rx(ndev, &quota, q))
919
+ goto out;
926920
927
- /* Processing RX Descriptor Ring */
928
- if (ris0 & mask) {
929
- /* Clear RX interrupt */
930
- ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
931
- if (ravb_rx(ndev, &quota, q))
932
- goto out;
933
- }
934
- /* Processing TX Descriptor Ring */
935
- if (tis & mask) {
936
- spin_lock_irqsave(&priv->lock, flags);
937
- /* Clear TX interrupt */
938
- ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
939
- ravb_tx_free(ndev, q, true);
940
- netif_wake_subqueue(ndev, q);
941
- mmiowb();
942
- spin_unlock_irqrestore(&priv->lock, flags);
943
- }
944
- }
921
+ /* Processing RX Descriptor Ring */
922
+ spin_lock_irqsave(&priv->lock, flags);
923
+ /* Clear TX interrupt */
924
+ ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
925
+ ravb_tx_free(ndev, q, true);
926
+ netif_wake_subqueue(ndev, q);
927
+ spin_unlock_irqrestore(&priv->lock, flags);
945928
946929 napi_complete(napi);
947930
....@@ -954,7 +937,6 @@
954937 ravb_write(ndev, mask, RIE0);
955938 ravb_write(ndev, mask, TIE);
956939 }
957
- mmiowb();
958940 spin_unlock_irqrestore(&priv->lock, flags);
959941
960942 /* Receive error message handling */
....@@ -1003,7 +985,6 @@
1003985 if (priv->no_avb_link && phydev->link)
1004986 ravb_rcv_snd_enable(ndev);
1005987
1006
- mmiowb();
1007988 spin_unlock_irqrestore(&priv->lock, flags);
1008989
1009990 if (new_state && netif_msg_link(priv))
....@@ -1022,6 +1003,7 @@
10221003 struct ravb_private *priv = netdev_priv(ndev);
10231004 struct phy_device *phydev;
10241005 struct device_node *pn;
1006
+ phy_interface_t iface;
10251007 int err;
10261008
10271009 priv->link = 0;
....@@ -1040,8 +1022,10 @@
10401022 }
10411023 pn = of_node_get(np);
10421024 }
1043
- phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0,
1044
- priv->phy_interface);
1025
+
1026
+ iface = priv->rgmii_override ? PHY_INTERFACE_MODE_RGMII
1027
+ : priv->phy_interface;
1028
+ phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface);
10451029 of_node_put(pn);
10461030 if (!phydev) {
10471031 netdev_err(ndev, "failed to connect PHY\n");
....@@ -1062,8 +1046,15 @@
10621046 netdev_info(ndev, "limited PHY to 100Mbit/s\n");
10631047 }
10641048
1065
- /* 10BASE is not supported */
1066
- phydev->supported &= ~PHY_10BT_FEATURES;
1049
+ /* 10BASE, Pause and Asym Pause is not supported */
1050
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1051
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
1052
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
1053
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
1054
+
1055
+ /* Half Duplex is not supported */
1056
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1057
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
10671058
10681059 phy_attached_info(phydev);
10691060
....@@ -1426,7 +1417,7 @@
14261417 }
14271418
14281419 /* Timeout function for Ethernet AVB */
1429
-static void ravb_tx_timeout(struct net_device *ndev)
1420
+static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue)
14301421 {
14311422 struct ravb_private *priv = netdev_priv(ndev);
14321423
....@@ -1495,6 +1486,7 @@
14951486 static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
14961487 {
14971488 struct ravb_private *priv = netdev_priv(ndev);
1489
+ int num_tx_desc = priv->num_tx_desc;
14981490 u16 q = skb_get_queue_mapping(skb);
14991491 struct ravb_tstamp_skb *ts_skb;
15001492 struct ravb_tx_desc *desc;
....@@ -1506,7 +1498,7 @@
15061498
15071499 spin_lock_irqsave(&priv->lock, flags);
15081500 if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
1509
- NUM_TX_DESC) {
1501
+ num_tx_desc) {
15101502 netif_err(priv, tx_queued, ndev,
15111503 "still transmitting with the full ring!\n");
15121504 netif_stop_subqueue(ndev, q);
....@@ -1517,41 +1509,55 @@
15171509 if (skb_put_padto(skb, ETH_ZLEN))
15181510 goto exit;
15191511
1520
- entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
1521
- priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
1512
+ entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc);
1513
+ priv->tx_skb[q][entry / num_tx_desc] = skb;
15221514
1523
- buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
1524
- entry / NUM_TX_DESC * DPTR_ALIGN;
1525
- len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
1526
- /* Zero length DMA descriptors are problematic as they seem to
1527
- * terminate DMA transfers. Avoid them by simply using a length of
1528
- * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN.
1529
- *
1530
- * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of
1531
- * data by the call to skb_put_padto() above this is safe with
1532
- * respect to both the length of the first DMA descriptor (len)
1533
- * overflowing the available data and the length of the second DMA
1534
- * descriptor (skb->len - len) being negative.
1535
- */
1536
- if (len == 0)
1537
- len = DPTR_ALIGN;
1515
+ if (num_tx_desc > 1) {
1516
+ buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
1517
+ entry / num_tx_desc * DPTR_ALIGN;
1518
+ len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
15381519
1539
- memcpy(buffer, skb->data, len);
1540
- dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
1541
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
1542
- goto drop;
1520
+ /* Zero length DMA descriptors are problematic as they seem
1521
+ * to terminate DMA transfers. Avoid them by simply using a
1522
+ * length of DPTR_ALIGN (4) when skb data is aligned to
1523
+ * DPTR_ALIGN.
1524
+ *
1525
+ * As skb is guaranteed to have at least ETH_ZLEN (60)
1526
+ * bytes of data by the call to skb_put_padto() above this
1527
+ * is safe with respect to both the length of the first DMA
1528
+ * descriptor (len) overflowing the available data and the
1529
+ * length of the second DMA descriptor (skb->len - len)
1530
+ * being negative.
1531
+ */
1532
+ if (len == 0)
1533
+ len = DPTR_ALIGN;
15431534
1544
- desc = &priv->tx_ring[q][entry];
1545
- desc->ds_tagl = cpu_to_le16(len);
1546
- desc->dptr = cpu_to_le32(dma_addr);
1535
+ memcpy(buffer, skb->data, len);
1536
+ dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
1537
+ DMA_TO_DEVICE);
1538
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
1539
+ goto drop;
15471540
1548
- buffer = skb->data + len;
1549
- len = skb->len - len;
1550
- dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
1551
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
1552
- goto unmap;
1541
+ desc = &priv->tx_ring[q][entry];
1542
+ desc->ds_tagl = cpu_to_le16(len);
1543
+ desc->dptr = cpu_to_le32(dma_addr);
15531544
1554
- desc++;
1545
+ buffer = skb->data + len;
1546
+ len = skb->len - len;
1547
+ dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
1548
+ DMA_TO_DEVICE);
1549
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
1550
+ goto unmap;
1551
+
1552
+ desc++;
1553
+ } else {
1554
+ desc = &priv->tx_ring[q][entry];
1555
+ len = skb->len;
1556
+ dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
1557
+ DMA_TO_DEVICE);
1558
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
1559
+ goto drop;
1560
+ }
15551561 desc->ds_tagl = cpu_to_le16(len);
15561562 desc->dptr = cpu_to_le32(dma_addr);
15571563
....@@ -1559,9 +1565,11 @@
15591565 if (q == RAVB_NC) {
15601566 ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
15611567 if (!ts_skb) {
1562
- desc--;
1563
- dma_unmap_single(ndev->dev.parent, dma_addr, len,
1564
- DMA_TO_DEVICE);
1568
+ if (num_tx_desc > 1) {
1569
+ desc--;
1570
+ dma_unmap_single(ndev->dev.parent, dma_addr,
1571
+ len, DMA_TO_DEVICE);
1572
+ }
15651573 goto unmap;
15661574 }
15671575 ts_skb->skb = skb_get(skb);
....@@ -1578,20 +1586,22 @@
15781586 skb_tx_timestamp(skb);
15791587 /* Descriptor type must be set after all the above writes */
15801588 dma_wmb();
1581
- desc->die_dt = DT_FEND;
1582
- desc--;
1583
- desc->die_dt = DT_FSTART;
1584
-
1589
+ if (num_tx_desc > 1) {
1590
+ desc->die_dt = DT_FEND;
1591
+ desc--;
1592
+ desc->die_dt = DT_FSTART;
1593
+ } else {
1594
+ desc->die_dt = DT_FSINGLE;
1595
+ }
15851596 ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
15861597
1587
- priv->cur_tx[q] += NUM_TX_DESC;
1598
+ priv->cur_tx[q] += num_tx_desc;
15881599 if (priv->cur_tx[q] - priv->dirty_tx[q] >
1589
- (priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
1600
+ (priv->num_tx_ring[q] - 1) * num_tx_desc &&
15901601 !ravb_tx_free(ndev, q, true))
15911602 netif_stop_subqueue(ndev, q);
15921603
15931604 exit:
1594
- mmiowb();
15951605 spin_unlock_irqrestore(&priv->lock, flags);
15961606 return NETDEV_TX_OK;
15971607
....@@ -1600,13 +1610,12 @@
16001610 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
16011611 drop:
16021612 dev_kfree_skb_any(skb);
1603
- priv->tx_skb[q][entry / NUM_TX_DESC] = NULL;
1613
+ priv->tx_skb[q][entry / num_tx_desc] = NULL;
16041614 goto exit;
16051615 }
16061616
16071617 static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
1608
- struct net_device *sb_dev,
1609
- select_queue_fallback_t fallback)
1618
+ struct net_device *sb_dev)
16101619 {
16111620 /* If skb needs TX timestamp, it is handled in network control queue */
16121621 return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
....@@ -1623,17 +1632,10 @@
16231632 stats0 = &priv->stats[RAVB_BE];
16241633 stats1 = &priv->stats[RAVB_NC];
16251634
1626
- nstats->tx_dropped += ravb_read(ndev, TROCR);
1627
- ravb_write(ndev, 0, TROCR); /* (write clear) */
1628
- nstats->collisions += ravb_read(ndev, CDCR);
1629
- ravb_write(ndev, 0, CDCR); /* (write clear) */
1630
- nstats->tx_carrier_errors += ravb_read(ndev, LCCR);
1631
- ravb_write(ndev, 0, LCCR); /* (write clear) */
1632
-
1633
- nstats->tx_carrier_errors += ravb_read(ndev, CERCR);
1634
- ravb_write(ndev, 0, CERCR); /* (write clear) */
1635
- nstats->tx_carrier_errors += ravb_read(ndev, CEECR);
1636
- ravb_write(ndev, 0, CEECR); /* (write clear) */
1635
+ if (priv->chip_id == RCAR_GEN3) {
1636
+ nstats->tx_dropped += ravb_read(ndev, TROCR);
1637
+ ravb_write(ndev, 0, TROCR); /* (write clear) */
1638
+ }
16371639
16381640 nstats->rx_packets = stats0->rx_packets + stats1->rx_packets;
16391641 nstats->tx_packets = stats0->tx_packets + stats1->tx_packets;
....@@ -1663,7 +1665,6 @@
16631665 spin_lock_irqsave(&priv->lock, flags);
16641666 ravb_modify(ndev, ECMR, ECMR_PRM,
16651667 ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
1666
- mmiowb();
16671668 spin_unlock_irqrestore(&priv->lock, flags);
16681669 }
16691670
....@@ -1704,6 +1705,8 @@
17041705 if (of_phy_is_fixed_link(np))
17051706 of_phy_deregister_fixed_link(np);
17061707 }
1708
+
1709
+ cancel_work_sync(&priv->work);
17071710
17081711 if (priv->chip_id != RCAR_GEN2) {
17091712 free_irq(priv->tx_irqs[RAVB_NC], ndev);
....@@ -1815,10 +1818,15 @@
18151818
18161819 static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
18171820 {
1818
- if (netif_running(ndev))
1819
- return -EBUSY;
1821
+ struct ravb_private *priv = netdev_priv(ndev);
18201822
18211823 ndev->mtu = new_mtu;
1824
+
1825
+ if (netif_running(ndev)) {
1826
+ synchronize_irq(priv->emac_irq);
1827
+ ravb_emac_init(ndev);
1828
+ }
1829
+
18221830 netdev_update_features(ndev);
18231831
18241832 return 0;
....@@ -1965,20 +1973,61 @@
19651973 }
19661974 }
19671975
1976
+static const struct soc_device_attribute ravb_delay_mode_quirk_match[] = {
1977
+ { .soc_id = "r8a774c0" },
1978
+ { .soc_id = "r8a77990" },
1979
+ { .soc_id = "r8a77995" },
1980
+ { /* sentinel */ }
1981
+};
1982
+
19681983 /* Set tx and rx clock internal delay modes */
1984
+static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
1985
+{
1986
+ struct ravb_private *priv = netdev_priv(ndev);
1987
+ bool explicit_delay = false;
1988
+ u32 delay;
1989
+
1990
+ if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
1991
+ /* Valid values are 0 and 1800, according to DT bindings */
1992
+ priv->rxcidm = !!delay;
1993
+ explicit_delay = true;
1994
+ }
1995
+ if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
1996
+ /* Valid values are 0 and 2000, according to DT bindings */
1997
+ priv->txcidm = !!delay;
1998
+ explicit_delay = true;
1999
+ }
2000
+
2001
+ if (explicit_delay)
2002
+ return;
2003
+
2004
+ /* Fall back to legacy rgmii-*id behavior */
2005
+ if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
2006
+ priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
2007
+ priv->rxcidm = 1;
2008
+ priv->rgmii_override = 1;
2009
+ }
2010
+
2011
+ if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
2012
+ priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
2013
+ if (!WARN(soc_device_match(ravb_delay_mode_quirk_match),
2014
+ "phy-mode %s requires TX clock internal delay mode which is not supported by this hardware revision. Please update device tree",
2015
+ phy_modes(priv->phy_interface))) {
2016
+ priv->txcidm = 1;
2017
+ priv->rgmii_override = 1;
2018
+ }
2019
+ }
2020
+}
2021
+
19692022 static void ravb_set_delay_mode(struct net_device *ndev)
19702023 {
19712024 struct ravb_private *priv = netdev_priv(ndev);
1972
- int set = 0;
2025
+ u32 set = 0;
19732026
1974
- if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
1975
- priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID)
2027
+ if (priv->rxcidm)
19762028 set |= APSR_DM_RDM;
1977
-
1978
- if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
1979
- priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
2029
+ if (priv->txcidm)
19802030 set |= APSR_DM_TDM;
1981
-
19822031 ravb_modify(ndev, APSR, APSR_DM, set);
19832032 }
19842033
....@@ -2049,7 +2098,9 @@
20492098 spin_lock_init(&priv->lock);
20502099 INIT_WORK(&priv->work, ravb_tx_timeout_work);
20512100
2052
- priv->phy_interface = of_get_phy_mode(np);
2101
+ error = of_get_phy_mode(np, &priv->phy_interface);
2102
+ if (error && error != -ENODEV)
2103
+ goto out_release;
20532104
20542105 priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
20552106 priv->avb_link_active_low =
....@@ -2091,6 +2142,9 @@
20912142 ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
20922143 ndev->min_mtu = ETH_MIN_MTU;
20932144
2145
+ priv->num_tx_desc = chip_id == RCAR_GEN2 ?
2146
+ NUM_TX_DESC_GEN2 : NUM_TX_DESC_GEN3;
2147
+
20942148 /* Set function */
20952149 ndev->netdev_ops = &ravb_netdev_ops;
20962150 ndev->ethtool_ops = &ravb_ethtool_ops;
....@@ -2106,8 +2160,10 @@
21062160 /* Request GTI loading */
21072161 ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
21082162
2109
- if (priv->chip_id != RCAR_GEN2)
2163
+ if (priv->chip_id != RCAR_GEN2) {
2164
+ ravb_parse_delay_mode(np, ndev);
21102165 ravb_set_delay_mode(ndev);
2166
+ }
21112167
21122168 /* Allocate descriptor base address table */
21132169 priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
....@@ -2195,15 +2251,15 @@
21952251 if (priv->chip_id != RCAR_GEN2)
21962252 ravb_ptp_stop(ndev);
21972253
2198
- dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
2199
- priv->desc_bat_dma);
22002254 /* Set reset mode */
22012255 ravb_write(ndev, CCC_OPC_RESET, CCC);
2202
- pm_runtime_put_sync(&pdev->dev);
22032256 unregister_netdev(ndev);
22042257 netif_napi_del(&priv->napi[RAVB_NC]);
22052258 netif_napi_del(&priv->napi[RAVB_BE]);
22062259 ravb_mdio_release(priv);
2260
+ dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
2261
+ priv->desc_bat_dma);
2262
+ pm_runtime_put_sync(&pdev->dev);
22072263 pm_runtime_disable(&pdev->dev);
22082264 free_netdev(ndev);
22092265 platform_set_drvdata(pdev, NULL);
....@@ -2310,6 +2366,7 @@
23102366 ret = ravb_open(ndev);
23112367 if (ret < 0)
23122368 return ret;
2369
+ ravb_set_rx_mode(ndev);
23132370 netif_device_attach(ndev);
23142371 }
23152372