.. | .. |
---|
111 | 111 | */ |
---|
112 | 112 | static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac) |
---|
113 | 113 | { |
---|
114 | | - if (mac) { |
---|
| 114 | + if (!IS_ERR(mac)) { |
---|
115 | 115 | ether_addr_copy(ndev->dev_addr, mac); |
---|
116 | 116 | } else { |
---|
117 | 117 | u32 mahr = ravb_read(ndev, MAHR); |
---|
.. | .. |
---|
162 | 162 | } |
---|
163 | 163 | |
---|
164 | 164 | /* MDIO bus control struct */ |
---|
165 | | -static struct mdiobb_ops bb_ops = { |
---|
| 165 | +static const struct mdiobb_ops bb_ops = { |
---|
166 | 166 | .owner = THIS_MODULE, |
---|
167 | 167 | .set_mdc = ravb_set_mdc, |
---|
168 | 168 | .set_mdio_dir = ravb_set_mdio_dir, |
---|
.. | .. |
---|
175 | 175 | { |
---|
176 | 176 | struct ravb_private *priv = netdev_priv(ndev); |
---|
177 | 177 | struct net_device_stats *stats = &priv->stats[q]; |
---|
| 178 | + int num_tx_desc = priv->num_tx_desc; |
---|
178 | 179 | struct ravb_tx_desc *desc; |
---|
179 | 180 | int free_num = 0; |
---|
180 | 181 | int entry; |
---|
.. | .. |
---|
184 | 185 | bool txed; |
---|
185 | 186 | |
---|
186 | 187 | entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * |
---|
187 | | - NUM_TX_DESC); |
---|
| 188 | + num_tx_desc); |
---|
188 | 189 | desc = &priv->tx_ring[q][entry]; |
---|
189 | 190 | txed = desc->die_dt == DT_FEMPTY; |
---|
190 | 191 | if (free_txed_only && !txed) |
---|
.. | .. |
---|
193 | 194 | dma_rmb(); |
---|
194 | 195 | size = le16_to_cpu(desc->ds_tagl) & TX_DS; |
---|
195 | 196 | /* Free the original skb. */ |
---|
196 | | - if (priv->tx_skb[q][entry / NUM_TX_DESC]) { |
---|
| 197 | + if (priv->tx_skb[q][entry / num_tx_desc]) { |
---|
197 | 198 | dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), |
---|
198 | 199 | size, DMA_TO_DEVICE); |
---|
199 | 200 | /* Last packet descriptor? */ |
---|
200 | | - if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { |
---|
201 | | - entry /= NUM_TX_DESC; |
---|
| 201 | + if (entry % num_tx_desc == num_tx_desc - 1) { |
---|
| 202 | + entry /= num_tx_desc; |
---|
202 | 203 | dev_kfree_skb_any(priv->tx_skb[q][entry]); |
---|
203 | 204 | priv->tx_skb[q][entry] = NULL; |
---|
204 | 205 | if (txed) |
---|
.. | .. |
---|
217 | 218 | static void ravb_ring_free(struct net_device *ndev, int q) |
---|
218 | 219 | { |
---|
219 | 220 | struct ravb_private *priv = netdev_priv(ndev); |
---|
| 221 | + int num_tx_desc = priv->num_tx_desc; |
---|
220 | 222 | int ring_size; |
---|
221 | 223 | int i; |
---|
222 | 224 | |
---|
.. | .. |
---|
228 | 230 | le32_to_cpu(desc->dptr))) |
---|
229 | 231 | dma_unmap_single(ndev->dev.parent, |
---|
230 | 232 | le32_to_cpu(desc->dptr), |
---|
231 | | - priv->rx_buf_sz, |
---|
| 233 | + RX_BUF_SZ, |
---|
232 | 234 | DMA_FROM_DEVICE); |
---|
233 | 235 | } |
---|
234 | 236 | ring_size = sizeof(struct ravb_ex_rx_desc) * |
---|
.. | .. |
---|
242 | 244 | ravb_tx_free(ndev, q, false); |
---|
243 | 245 | |
---|
244 | 246 | ring_size = sizeof(struct ravb_tx_desc) * |
---|
245 | | - (priv->num_tx_ring[q] * NUM_TX_DESC + 1); |
---|
| 247 | + (priv->num_tx_ring[q] * num_tx_desc + 1); |
---|
246 | 248 | dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], |
---|
247 | 249 | priv->tx_desc_dma[q]); |
---|
248 | 250 | priv->tx_ring[q] = NULL; |
---|
.. | .. |
---|
271 | 273 | static void ravb_ring_format(struct net_device *ndev, int q) |
---|
272 | 274 | { |
---|
273 | 275 | struct ravb_private *priv = netdev_priv(ndev); |
---|
| 276 | + int num_tx_desc = priv->num_tx_desc; |
---|
274 | 277 | struct ravb_ex_rx_desc *rx_desc; |
---|
275 | 278 | struct ravb_tx_desc *tx_desc; |
---|
276 | 279 | struct ravb_desc *desc; |
---|
277 | 280 | int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; |
---|
278 | 281 | int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] * |
---|
279 | | - NUM_TX_DESC; |
---|
| 282 | + num_tx_desc; |
---|
280 | 283 | dma_addr_t dma_addr; |
---|
281 | 284 | int i; |
---|
282 | 285 | |
---|
.. | .. |
---|
290 | 293 | for (i = 0; i < priv->num_rx_ring[q]; i++) { |
---|
291 | 294 | /* RX descriptor */ |
---|
292 | 295 | rx_desc = &priv->rx_ring[q][i]; |
---|
293 | | - rx_desc->ds_cc = cpu_to_le16(priv->rx_buf_sz); |
---|
| 296 | + rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ); |
---|
294 | 297 | dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, |
---|
295 | | - priv->rx_buf_sz, |
---|
| 298 | + RX_BUF_SZ, |
---|
296 | 299 | DMA_FROM_DEVICE); |
---|
297 | 300 | /* We just set the data size to 0 for a failed mapping which |
---|
298 | 301 | * should prevent DMA from happening... |
---|
.. | .. |
---|
311 | 314 | for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q]; |
---|
312 | 315 | i++, tx_desc++) { |
---|
313 | 316 | tx_desc->die_dt = DT_EEMPTY; |
---|
314 | | - tx_desc++; |
---|
315 | | - tx_desc->die_dt = DT_EEMPTY; |
---|
| 317 | + if (num_tx_desc > 1) { |
---|
| 318 | + tx_desc++; |
---|
| 319 | + tx_desc->die_dt = DT_EEMPTY; |
---|
| 320 | + } |
---|
316 | 321 | } |
---|
317 | 322 | tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); |
---|
318 | 323 | tx_desc->die_dt = DT_LINKFIX; /* type */ |
---|
.. | .. |
---|
332 | 337 | static int ravb_ring_init(struct net_device *ndev, int q) |
---|
333 | 338 | { |
---|
334 | 339 | struct ravb_private *priv = netdev_priv(ndev); |
---|
| 340 | + int num_tx_desc = priv->num_tx_desc; |
---|
335 | 341 | struct sk_buff *skb; |
---|
336 | 342 | int ring_size; |
---|
337 | 343 | int i; |
---|
338 | | - |
---|
339 | | - priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) + |
---|
340 | | - ETH_HLEN + VLAN_HLEN + sizeof(__sum16); |
---|
341 | 344 | |
---|
342 | 345 | /* Allocate RX and TX skb rings */ |
---|
343 | 346 | priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], |
---|
.. | .. |
---|
348 | 351 | goto error; |
---|
349 | 352 | |
---|
350 | 353 | for (i = 0; i < priv->num_rx_ring[q]; i++) { |
---|
351 | | - skb = netdev_alloc_skb(ndev, priv->rx_buf_sz + RAVB_ALIGN - 1); |
---|
| 354 | + skb = netdev_alloc_skb(ndev, RX_BUF_SZ + RAVB_ALIGN - 1); |
---|
352 | 355 | if (!skb) |
---|
353 | 356 | goto error; |
---|
354 | 357 | ravb_set_buffer_align(skb); |
---|
355 | 358 | priv->rx_skb[q][i] = skb; |
---|
356 | 359 | } |
---|
357 | 360 | |
---|
358 | | - /* Allocate rings for the aligned buffers */ |
---|
359 | | - priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + |
---|
360 | | - DPTR_ALIGN - 1, GFP_KERNEL); |
---|
361 | | - if (!priv->tx_align[q]) |
---|
362 | | - goto error; |
---|
| 361 | + if (num_tx_desc > 1) { |
---|
| 362 | + /* Allocate rings for the aligned buffers */ |
---|
| 363 | + priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + |
---|
| 364 | + DPTR_ALIGN - 1, GFP_KERNEL); |
---|
| 365 | + if (!priv->tx_align[q]) |
---|
| 366 | + goto error; |
---|
| 367 | + } |
---|
363 | 368 | |
---|
364 | 369 | /* Allocate all RX descriptors. */ |
---|
365 | 370 | ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); |
---|
.. | .. |
---|
373 | 378 | |
---|
374 | 379 | /* Allocate all TX descriptors. */ |
---|
375 | 380 | ring_size = sizeof(struct ravb_tx_desc) * |
---|
376 | | - (priv->num_tx_ring[q] * NUM_TX_DESC + 1); |
---|
| 381 | + (priv->num_tx_ring[q] * num_tx_desc + 1); |
---|
377 | 382 | priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, |
---|
378 | 383 | &priv->tx_desc_dma[q], |
---|
379 | 384 | GFP_KERNEL); |
---|
.. | .. |
---|
438 | 443 | /* Descriptor format */ |
---|
439 | 444 | ravb_ring_format(ndev, RAVB_BE); |
---|
440 | 445 | ravb_ring_format(ndev, RAVB_NC); |
---|
441 | | - |
---|
442 | | -#if defined(__LITTLE_ENDIAN) |
---|
443 | | - ravb_modify(ndev, CCC, CCC_BOC, 0); |
---|
444 | | -#else |
---|
445 | | - ravb_modify(ndev, CCC, CCC_BOC, CCC_BOC); |
---|
446 | | -#endif |
---|
447 | 446 | |
---|
448 | 447 | /* Set AVB RX */ |
---|
449 | 448 | ravb_write(ndev, |
---|
.. | .. |
---|
582 | 581 | skb = priv->rx_skb[q][entry]; |
---|
583 | 582 | priv->rx_skb[q][entry] = NULL; |
---|
584 | 583 | dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), |
---|
585 | | - priv->rx_buf_sz, |
---|
| 584 | + RX_BUF_SZ, |
---|
586 | 585 | DMA_FROM_DEVICE); |
---|
587 | 586 | get_ts &= (q == RAVB_NC) ? |
---|
588 | 587 | RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : |
---|
.. | .. |
---|
615 | 614 | for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { |
---|
616 | 615 | entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; |
---|
617 | 616 | desc = &priv->rx_ring[q][entry]; |
---|
618 | | - desc->ds_cc = cpu_to_le16(priv->rx_buf_sz); |
---|
| 617 | + desc->ds_cc = cpu_to_le16(RX_BUF_SZ); |
---|
619 | 618 | |
---|
620 | 619 | if (!priv->rx_skb[q][entry]) { |
---|
621 | 620 | skb = netdev_alloc_skb(ndev, |
---|
622 | | - priv->rx_buf_sz + |
---|
| 621 | + RX_BUF_SZ + |
---|
623 | 622 | RAVB_ALIGN - 1); |
---|
624 | 623 | if (!skb) |
---|
625 | 624 | break; /* Better luck next round. */ |
---|
.. | .. |
---|
723 | 722 | |
---|
724 | 723 | spin_lock(&priv->lock); |
---|
725 | 724 | ravb_emac_interrupt_unlocked(ndev); |
---|
726 | | - mmiowb(); |
---|
727 | 725 | spin_unlock(&priv->lock); |
---|
728 | 726 | return IRQ_HANDLED; |
---|
729 | 727 | } |
---|
.. | .. |
---|
738 | 736 | ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS); |
---|
739 | 737 | if (eis & EIS_QFS) { |
---|
740 | 738 | ris2 = ravb_read(ndev, RIS2); |
---|
741 | | - ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED), |
---|
| 739 | + ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED), |
---|
742 | 740 | RIS2); |
---|
743 | 741 | |
---|
744 | 742 | /* Receive Descriptor Empty int */ |
---|
745 | 743 | if (ris2 & RIS2_QFF0) |
---|
746 | 744 | priv->stats[RAVB_BE].rx_over_errors++; |
---|
747 | 745 | |
---|
748 | | - /* Receive Descriptor Empty int */ |
---|
| 746 | + /* Receive Descriptor Empty int */ |
---|
749 | 747 | if (ris2 & RIS2_QFF1) |
---|
750 | 748 | priv->stats[RAVB_NC].rx_over_errors++; |
---|
751 | 749 | |
---|
.. | .. |
---|
843 | 841 | result = IRQ_HANDLED; |
---|
844 | 842 | } |
---|
845 | 843 | |
---|
846 | | - mmiowb(); |
---|
847 | 844 | spin_unlock(&priv->lock); |
---|
848 | 845 | return result; |
---|
849 | 846 | } |
---|
.. | .. |
---|
876 | 873 | result = IRQ_HANDLED; |
---|
877 | 874 | } |
---|
878 | 875 | |
---|
879 | | - mmiowb(); |
---|
880 | 876 | spin_unlock(&priv->lock); |
---|
881 | 877 | return result; |
---|
882 | 878 | } |
---|
.. | .. |
---|
893 | 889 | if (ravb_queue_interrupt(ndev, q)) |
---|
894 | 890 | result = IRQ_HANDLED; |
---|
895 | 891 | |
---|
896 | | - mmiowb(); |
---|
897 | 892 | spin_unlock(&priv->lock); |
---|
898 | 893 | return result; |
---|
899 | 894 | } |
---|
.. | .. |
---|
916 | 911 | int q = napi - priv->napi; |
---|
917 | 912 | int mask = BIT(q); |
---|
918 | 913 | int quota = budget; |
---|
919 | | - u32 ris0, tis; |
---|
920 | 914 | |
---|
921 | | - for (;;) { |
---|
922 | | - tis = ravb_read(ndev, TIS); |
---|
923 | | - ris0 = ravb_read(ndev, RIS0); |
---|
924 | | - if (!((ris0 & mask) || (tis & mask))) |
---|
925 | | - break; |
---|
| 915 | + /* Processing RX Descriptor Ring */ |
---|
| 916 | + /* Clear RX interrupt */ |
---|
| 917 | + ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); |
---|
| 918 | + if (ravb_rx(ndev, "a, q)) |
---|
| 919 | + goto out; |
---|
926 | 920 | |
---|
927 | | - /* Processing RX Descriptor Ring */ |
---|
928 | | - if (ris0 & mask) { |
---|
929 | | - /* Clear RX interrupt */ |
---|
930 | | - ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); |
---|
931 | | - if (ravb_rx(ndev, "a, q)) |
---|
932 | | - goto out; |
---|
933 | | - } |
---|
934 | | - /* Processing TX Descriptor Ring */ |
---|
935 | | - if (tis & mask) { |
---|
936 | | - spin_lock_irqsave(&priv->lock, flags); |
---|
937 | | - /* Clear TX interrupt */ |
---|
938 | | - ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); |
---|
939 | | - ravb_tx_free(ndev, q, true); |
---|
940 | | - netif_wake_subqueue(ndev, q); |
---|
941 | | - mmiowb(); |
---|
942 | | - spin_unlock_irqrestore(&priv->lock, flags); |
---|
943 | | - } |
---|
944 | | - } |
---|
| 921 | + /* Processing RX Descriptor Ring */ |
---|
| 922 | + spin_lock_irqsave(&priv->lock, flags); |
---|
| 923 | + /* Clear TX interrupt */ |
---|
| 924 | + ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); |
---|
| 925 | + ravb_tx_free(ndev, q, true); |
---|
| 926 | + netif_wake_subqueue(ndev, q); |
---|
| 927 | + spin_unlock_irqrestore(&priv->lock, flags); |
---|
945 | 928 | |
---|
946 | 929 | napi_complete(napi); |
---|
947 | 930 | |
---|
.. | .. |
---|
954 | 937 | ravb_write(ndev, mask, RIE0); |
---|
955 | 938 | ravb_write(ndev, mask, TIE); |
---|
956 | 939 | } |
---|
957 | | - mmiowb(); |
---|
958 | 940 | spin_unlock_irqrestore(&priv->lock, flags); |
---|
959 | 941 | |
---|
960 | 942 | /* Receive error message handling */ |
---|
.. | .. |
---|
1003 | 985 | if (priv->no_avb_link && phydev->link) |
---|
1004 | 986 | ravb_rcv_snd_enable(ndev); |
---|
1005 | 987 | |
---|
1006 | | - mmiowb(); |
---|
1007 | 988 | spin_unlock_irqrestore(&priv->lock, flags); |
---|
1008 | 989 | |
---|
1009 | 990 | if (new_state && netif_msg_link(priv)) |
---|
.. | .. |
---|
1022 | 1003 | struct ravb_private *priv = netdev_priv(ndev); |
---|
1023 | 1004 | struct phy_device *phydev; |
---|
1024 | 1005 | struct device_node *pn; |
---|
| 1006 | + phy_interface_t iface; |
---|
1025 | 1007 | int err; |
---|
1026 | 1008 | |
---|
1027 | 1009 | priv->link = 0; |
---|
.. | .. |
---|
1040 | 1022 | } |
---|
1041 | 1023 | pn = of_node_get(np); |
---|
1042 | 1024 | } |
---|
1043 | | - phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, |
---|
1044 | | - priv->phy_interface); |
---|
| 1025 | + |
---|
| 1026 | + iface = priv->rgmii_override ? PHY_INTERFACE_MODE_RGMII |
---|
| 1027 | + : priv->phy_interface; |
---|
| 1028 | + phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface); |
---|
1045 | 1029 | of_node_put(pn); |
---|
1046 | 1030 | if (!phydev) { |
---|
1047 | 1031 | netdev_err(ndev, "failed to connect PHY\n"); |
---|
.. | .. |
---|
1062 | 1046 | netdev_info(ndev, "limited PHY to 100Mbit/s\n"); |
---|
1063 | 1047 | } |
---|
1064 | 1048 | |
---|
1065 | | - /* 10BASE is not supported */ |
---|
1066 | | - phydev->supported &= ~PHY_10BT_FEATURES; |
---|
| 1049 | + /* 10BASE, Pause and Asym Pause is not supported */ |
---|
| 1050 | + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); |
---|
| 1051 | + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); |
---|
| 1052 | + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT); |
---|
| 1053 | + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT); |
---|
| 1054 | + |
---|
| 1055 | + /* Half Duplex is not supported */ |
---|
| 1056 | + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); |
---|
| 1057 | + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); |
---|
1067 | 1058 | |
---|
1068 | 1059 | phy_attached_info(phydev); |
---|
1069 | 1060 | |
---|
.. | .. |
---|
1426 | 1417 | } |
---|
1427 | 1418 | |
---|
1428 | 1419 | /* Timeout function for Ethernet AVB */ |
---|
1429 | | -static void ravb_tx_timeout(struct net_device *ndev) |
---|
| 1420 | +static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue) |
---|
1430 | 1421 | { |
---|
1431 | 1422 | struct ravb_private *priv = netdev_priv(ndev); |
---|
1432 | 1423 | |
---|
.. | .. |
---|
1495 | 1486 | static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
---|
1496 | 1487 | { |
---|
1497 | 1488 | struct ravb_private *priv = netdev_priv(ndev); |
---|
| 1489 | + int num_tx_desc = priv->num_tx_desc; |
---|
1498 | 1490 | u16 q = skb_get_queue_mapping(skb); |
---|
1499 | 1491 | struct ravb_tstamp_skb *ts_skb; |
---|
1500 | 1492 | struct ravb_tx_desc *desc; |
---|
.. | .. |
---|
1506 | 1498 | |
---|
1507 | 1499 | spin_lock_irqsave(&priv->lock, flags); |
---|
1508 | 1500 | if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) * |
---|
1509 | | - NUM_TX_DESC) { |
---|
| 1501 | + num_tx_desc) { |
---|
1510 | 1502 | netif_err(priv, tx_queued, ndev, |
---|
1511 | 1503 | "still transmitting with the full ring!\n"); |
---|
1512 | 1504 | netif_stop_subqueue(ndev, q); |
---|
.. | .. |
---|
1517 | 1509 | if (skb_put_padto(skb, ETH_ZLEN)) |
---|
1518 | 1510 | goto exit; |
---|
1519 | 1511 | |
---|
1520 | | - entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC); |
---|
1521 | | - priv->tx_skb[q][entry / NUM_TX_DESC] = skb; |
---|
| 1512 | + entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc); |
---|
| 1513 | + priv->tx_skb[q][entry / num_tx_desc] = skb; |
---|
1522 | 1514 | |
---|
1523 | | - buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + |
---|
1524 | | - entry / NUM_TX_DESC * DPTR_ALIGN; |
---|
1525 | | - len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; |
---|
1526 | | - /* Zero length DMA descriptors are problematic as they seem to |
---|
1527 | | - * terminate DMA transfers. Avoid them by simply using a length of |
---|
1528 | | - * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN. |
---|
1529 | | - * |
---|
1530 | | - * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of |
---|
1531 | | - * data by the call to skb_put_padto() above this is safe with |
---|
1532 | | - * respect to both the length of the first DMA descriptor (len) |
---|
1533 | | - * overflowing the available data and the length of the second DMA |
---|
1534 | | - * descriptor (skb->len - len) being negative. |
---|
1535 | | - */ |
---|
1536 | | - if (len == 0) |
---|
1537 | | - len = DPTR_ALIGN; |
---|
| 1515 | + if (num_tx_desc > 1) { |
---|
| 1516 | + buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + |
---|
| 1517 | + entry / num_tx_desc * DPTR_ALIGN; |
---|
| 1518 | + len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; |
---|
1538 | 1519 | |
---|
1539 | | - memcpy(buffer, skb->data, len); |
---|
1540 | | - dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); |
---|
1541 | | - if (dma_mapping_error(ndev->dev.parent, dma_addr)) |
---|
1542 | | - goto drop; |
---|
| 1520 | + /* Zero length DMA descriptors are problematic as they seem |
---|
| 1521 | + * to terminate DMA transfers. Avoid them by simply using a |
---|
| 1522 | + * length of DPTR_ALIGN (4) when skb data is aligned to |
---|
| 1523 | + * DPTR_ALIGN. |
---|
| 1524 | + * |
---|
| 1525 | + * As skb is guaranteed to have at least ETH_ZLEN (60) |
---|
| 1526 | + * bytes of data by the call to skb_put_padto() above this |
---|
| 1527 | + * is safe with respect to both the length of the first DMA |
---|
| 1528 | + * descriptor (len) overflowing the available data and the |
---|
| 1529 | + * length of the second DMA descriptor (skb->len - len) |
---|
| 1530 | + * being negative. |
---|
| 1531 | + */ |
---|
| 1532 | + if (len == 0) |
---|
| 1533 | + len = DPTR_ALIGN; |
---|
1543 | 1534 | |
---|
1544 | | - desc = &priv->tx_ring[q][entry]; |
---|
1545 | | - desc->ds_tagl = cpu_to_le16(len); |
---|
1546 | | - desc->dptr = cpu_to_le32(dma_addr); |
---|
| 1535 | + memcpy(buffer, skb->data, len); |
---|
| 1536 | + dma_addr = dma_map_single(ndev->dev.parent, buffer, len, |
---|
| 1537 | + DMA_TO_DEVICE); |
---|
| 1538 | + if (dma_mapping_error(ndev->dev.parent, dma_addr)) |
---|
| 1539 | + goto drop; |
---|
1547 | 1540 | |
---|
1548 | | - buffer = skb->data + len; |
---|
1549 | | - len = skb->len - len; |
---|
1550 | | - dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); |
---|
1551 | | - if (dma_mapping_error(ndev->dev.parent, dma_addr)) |
---|
1552 | | - goto unmap; |
---|
| 1541 | + desc = &priv->tx_ring[q][entry]; |
---|
| 1542 | + desc->ds_tagl = cpu_to_le16(len); |
---|
| 1543 | + desc->dptr = cpu_to_le32(dma_addr); |
---|
1553 | 1544 | |
---|
1554 | | - desc++; |
---|
| 1545 | + buffer = skb->data + len; |
---|
| 1546 | + len = skb->len - len; |
---|
| 1547 | + dma_addr = dma_map_single(ndev->dev.parent, buffer, len, |
---|
| 1548 | + DMA_TO_DEVICE); |
---|
| 1549 | + if (dma_mapping_error(ndev->dev.parent, dma_addr)) |
---|
| 1550 | + goto unmap; |
---|
| 1551 | + |
---|
| 1552 | + desc++; |
---|
| 1553 | + } else { |
---|
| 1554 | + desc = &priv->tx_ring[q][entry]; |
---|
| 1555 | + len = skb->len; |
---|
| 1556 | + dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, |
---|
| 1557 | + DMA_TO_DEVICE); |
---|
| 1558 | + if (dma_mapping_error(ndev->dev.parent, dma_addr)) |
---|
| 1559 | + goto drop; |
---|
| 1560 | + } |
---|
1555 | 1561 | desc->ds_tagl = cpu_to_le16(len); |
---|
1556 | 1562 | desc->dptr = cpu_to_le32(dma_addr); |
---|
1557 | 1563 | |
---|
.. | .. |
---|
1559 | 1565 | if (q == RAVB_NC) { |
---|
1560 | 1566 | ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC); |
---|
1561 | 1567 | if (!ts_skb) { |
---|
1562 | | - desc--; |
---|
1563 | | - dma_unmap_single(ndev->dev.parent, dma_addr, len, |
---|
1564 | | - DMA_TO_DEVICE); |
---|
| 1568 | + if (num_tx_desc > 1) { |
---|
| 1569 | + desc--; |
---|
| 1570 | + dma_unmap_single(ndev->dev.parent, dma_addr, |
---|
| 1571 | + len, DMA_TO_DEVICE); |
---|
| 1572 | + } |
---|
1565 | 1573 | goto unmap; |
---|
1566 | 1574 | } |
---|
1567 | 1575 | ts_skb->skb = skb_get(skb); |
---|
.. | .. |
---|
1578 | 1586 | skb_tx_timestamp(skb); |
---|
1579 | 1587 | /* Descriptor type must be set after all the above writes */ |
---|
1580 | 1588 | dma_wmb(); |
---|
1581 | | - desc->die_dt = DT_FEND; |
---|
1582 | | - desc--; |
---|
1583 | | - desc->die_dt = DT_FSTART; |
---|
1584 | | - |
---|
| 1589 | + if (num_tx_desc > 1) { |
---|
| 1590 | + desc->die_dt = DT_FEND; |
---|
| 1591 | + desc--; |
---|
| 1592 | + desc->die_dt = DT_FSTART; |
---|
| 1593 | + } else { |
---|
| 1594 | + desc->die_dt = DT_FSINGLE; |
---|
| 1595 | + } |
---|
1585 | 1596 | ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q); |
---|
1586 | 1597 | |
---|
1587 | | - priv->cur_tx[q] += NUM_TX_DESC; |
---|
| 1598 | + priv->cur_tx[q] += num_tx_desc; |
---|
1588 | 1599 | if (priv->cur_tx[q] - priv->dirty_tx[q] > |
---|
1589 | | - (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && |
---|
| 1600 | + (priv->num_tx_ring[q] - 1) * num_tx_desc && |
---|
1590 | 1601 | !ravb_tx_free(ndev, q, true)) |
---|
1591 | 1602 | netif_stop_subqueue(ndev, q); |
---|
1592 | 1603 | |
---|
1593 | 1604 | exit: |
---|
1594 | | - mmiowb(); |
---|
1595 | 1605 | spin_unlock_irqrestore(&priv->lock, flags); |
---|
1596 | 1606 | return NETDEV_TX_OK; |
---|
1597 | 1607 | |
---|
.. | .. |
---|
1600 | 1610 | le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE); |
---|
1601 | 1611 | drop: |
---|
1602 | 1612 | dev_kfree_skb_any(skb); |
---|
1603 | | - priv->tx_skb[q][entry / NUM_TX_DESC] = NULL; |
---|
| 1613 | + priv->tx_skb[q][entry / num_tx_desc] = NULL; |
---|
1604 | 1614 | goto exit; |
---|
1605 | 1615 | } |
---|
1606 | 1616 | |
---|
1607 | 1617 | static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb, |
---|
1608 | | - struct net_device *sb_dev, |
---|
1609 | | - select_queue_fallback_t fallback) |
---|
| 1618 | + struct net_device *sb_dev) |
---|
1610 | 1619 | { |
---|
1611 | 1620 | /* If skb needs TX timestamp, it is handled in network control queue */ |
---|
1612 | 1621 | return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC : |
---|
.. | .. |
---|
1623 | 1632 | stats0 = &priv->stats[RAVB_BE]; |
---|
1624 | 1633 | stats1 = &priv->stats[RAVB_NC]; |
---|
1625 | 1634 | |
---|
1626 | | - nstats->tx_dropped += ravb_read(ndev, TROCR); |
---|
1627 | | - ravb_write(ndev, 0, TROCR); /* (write clear) */ |
---|
1628 | | - nstats->collisions += ravb_read(ndev, CDCR); |
---|
1629 | | - ravb_write(ndev, 0, CDCR); /* (write clear) */ |
---|
1630 | | - nstats->tx_carrier_errors += ravb_read(ndev, LCCR); |
---|
1631 | | - ravb_write(ndev, 0, LCCR); /* (write clear) */ |
---|
1632 | | - |
---|
1633 | | - nstats->tx_carrier_errors += ravb_read(ndev, CERCR); |
---|
1634 | | - ravb_write(ndev, 0, CERCR); /* (write clear) */ |
---|
1635 | | - nstats->tx_carrier_errors += ravb_read(ndev, CEECR); |
---|
1636 | | - ravb_write(ndev, 0, CEECR); /* (write clear) */ |
---|
| 1635 | + if (priv->chip_id == RCAR_GEN3) { |
---|
| 1636 | + nstats->tx_dropped += ravb_read(ndev, TROCR); |
---|
| 1637 | + ravb_write(ndev, 0, TROCR); /* (write clear) */ |
---|
| 1638 | + } |
---|
1637 | 1639 | |
---|
1638 | 1640 | nstats->rx_packets = stats0->rx_packets + stats1->rx_packets; |
---|
1639 | 1641 | nstats->tx_packets = stats0->tx_packets + stats1->tx_packets; |
---|
.. | .. |
---|
1663 | 1665 | spin_lock_irqsave(&priv->lock, flags); |
---|
1664 | 1666 | ravb_modify(ndev, ECMR, ECMR_PRM, |
---|
1665 | 1667 | ndev->flags & IFF_PROMISC ? ECMR_PRM : 0); |
---|
1666 | | - mmiowb(); |
---|
1667 | 1668 | spin_unlock_irqrestore(&priv->lock, flags); |
---|
1668 | 1669 | } |
---|
1669 | 1670 | |
---|
.. | .. |
---|
1704 | 1705 | if (of_phy_is_fixed_link(np)) |
---|
1705 | 1706 | of_phy_deregister_fixed_link(np); |
---|
1706 | 1707 | } |
---|
| 1708 | + |
---|
| 1709 | + cancel_work_sync(&priv->work); |
---|
1707 | 1710 | |
---|
1708 | 1711 | if (priv->chip_id != RCAR_GEN2) { |
---|
1709 | 1712 | free_irq(priv->tx_irqs[RAVB_NC], ndev); |
---|
.. | .. |
---|
1815 | 1818 | |
---|
1816 | 1819 | static int ravb_change_mtu(struct net_device *ndev, int new_mtu) |
---|
1817 | 1820 | { |
---|
1818 | | - if (netif_running(ndev)) |
---|
1819 | | - return -EBUSY; |
---|
| 1821 | + struct ravb_private *priv = netdev_priv(ndev); |
---|
1820 | 1822 | |
---|
1821 | 1823 | ndev->mtu = new_mtu; |
---|
| 1824 | + |
---|
| 1825 | + if (netif_running(ndev)) { |
---|
| 1826 | + synchronize_irq(priv->emac_irq); |
---|
| 1827 | + ravb_emac_init(ndev); |
---|
| 1828 | + } |
---|
| 1829 | + |
---|
1822 | 1830 | netdev_update_features(ndev); |
---|
1823 | 1831 | |
---|
1824 | 1832 | return 0; |
---|
.. | .. |
---|
1965 | 1973 | } |
---|
1966 | 1974 | } |
---|
1967 | 1975 | |
---|
| 1976 | +static const struct soc_device_attribute ravb_delay_mode_quirk_match[] = { |
---|
| 1977 | + { .soc_id = "r8a774c0" }, |
---|
| 1978 | + { .soc_id = "r8a77990" }, |
---|
| 1979 | + { .soc_id = "r8a77995" }, |
---|
| 1980 | + { /* sentinel */ } |
---|
| 1981 | +}; |
---|
| 1982 | + |
---|
1968 | 1983 | /* Set tx and rx clock internal delay modes */ |
---|
| 1984 | +static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev) |
---|
| 1985 | +{ |
---|
| 1986 | + struct ravb_private *priv = netdev_priv(ndev); |
---|
| 1987 | + bool explicit_delay = false; |
---|
| 1988 | + u32 delay; |
---|
| 1989 | + |
---|
| 1990 | + if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) { |
---|
| 1991 | + /* Valid values are 0 and 1800, according to DT bindings */ |
---|
| 1992 | + priv->rxcidm = !!delay; |
---|
| 1993 | + explicit_delay = true; |
---|
| 1994 | + } |
---|
| 1995 | + if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) { |
---|
| 1996 | + /* Valid values are 0 and 2000, according to DT bindings */ |
---|
| 1997 | + priv->txcidm = !!delay; |
---|
| 1998 | + explicit_delay = true; |
---|
| 1999 | + } |
---|
| 2000 | + |
---|
| 2001 | + if (explicit_delay) |
---|
| 2002 | + return; |
---|
| 2003 | + |
---|
| 2004 | + /* Fall back to legacy rgmii-*id behavior */ |
---|
| 2005 | + if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || |
---|
| 2006 | + priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) { |
---|
| 2007 | + priv->rxcidm = 1; |
---|
| 2008 | + priv->rgmii_override = 1; |
---|
| 2009 | + } |
---|
| 2010 | + |
---|
| 2011 | + if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || |
---|
| 2012 | + priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) { |
---|
| 2013 | + if (!WARN(soc_device_match(ravb_delay_mode_quirk_match), |
---|
| 2014 | + "phy-mode %s requires TX clock internal delay mode which is not supported by this hardware revision. Please update device tree", |
---|
| 2015 | + phy_modes(priv->phy_interface))) { |
---|
| 2016 | + priv->txcidm = 1; |
---|
| 2017 | + priv->rgmii_override = 1; |
---|
| 2018 | + } |
---|
| 2019 | + } |
---|
| 2020 | +} |
---|
| 2021 | + |
---|
1969 | 2022 | static void ravb_set_delay_mode(struct net_device *ndev) |
---|
1970 | 2023 | { |
---|
1971 | 2024 | struct ravb_private *priv = netdev_priv(ndev); |
---|
1972 | | - int set = 0; |
---|
| 2025 | + u32 set = 0; |
---|
1973 | 2026 | |
---|
1974 | | - if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || |
---|
1975 | | - priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) |
---|
| 2027 | + if (priv->rxcidm) |
---|
1976 | 2028 | set |= APSR_DM_RDM; |
---|
1977 | | - |
---|
1978 | | - if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || |
---|
1979 | | - priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) |
---|
| 2029 | + if (priv->txcidm) |
---|
1980 | 2030 | set |= APSR_DM_TDM; |
---|
1981 | | - |
---|
1982 | 2031 | ravb_modify(ndev, APSR, APSR_DM, set); |
---|
1983 | 2032 | } |
---|
1984 | 2033 | |
---|
.. | .. |
---|
2049 | 2098 | spin_lock_init(&priv->lock); |
---|
2050 | 2099 | INIT_WORK(&priv->work, ravb_tx_timeout_work); |
---|
2051 | 2100 | |
---|
2052 | | - priv->phy_interface = of_get_phy_mode(np); |
---|
| 2101 | + error = of_get_phy_mode(np, &priv->phy_interface); |
---|
| 2102 | + if (error && error != -ENODEV) |
---|
| 2103 | + goto out_release; |
---|
2053 | 2104 | |
---|
2054 | 2105 | priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link"); |
---|
2055 | 2106 | priv->avb_link_active_low = |
---|
.. | .. |
---|
2091 | 2142 | ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); |
---|
2092 | 2143 | ndev->min_mtu = ETH_MIN_MTU; |
---|
2093 | 2144 | |
---|
| 2145 | + priv->num_tx_desc = chip_id == RCAR_GEN2 ? |
---|
| 2146 | + NUM_TX_DESC_GEN2 : NUM_TX_DESC_GEN3; |
---|
| 2147 | + |
---|
2094 | 2148 | /* Set function */ |
---|
2095 | 2149 | ndev->netdev_ops = &ravb_netdev_ops; |
---|
2096 | 2150 | ndev->ethtool_ops = &ravb_ethtool_ops; |
---|
.. | .. |
---|
2106 | 2160 | /* Request GTI loading */ |
---|
2107 | 2161 | ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); |
---|
2108 | 2162 | |
---|
2109 | | - if (priv->chip_id != RCAR_GEN2) |
---|
| 2163 | + if (priv->chip_id != RCAR_GEN2) { |
---|
| 2164 | + ravb_parse_delay_mode(np, ndev); |
---|
2110 | 2165 | ravb_set_delay_mode(ndev); |
---|
| 2166 | + } |
---|
2111 | 2167 | |
---|
2112 | 2168 | /* Allocate descriptor base address table */ |
---|
2113 | 2169 | priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM; |
---|
.. | .. |
---|
2195 | 2251 | if (priv->chip_id != RCAR_GEN2) |
---|
2196 | 2252 | ravb_ptp_stop(ndev); |
---|
2197 | 2253 | |
---|
2198 | | - dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, |
---|
2199 | | - priv->desc_bat_dma); |
---|
2200 | 2254 | /* Set reset mode */ |
---|
2201 | 2255 | ravb_write(ndev, CCC_OPC_RESET, CCC); |
---|
2202 | | - pm_runtime_put_sync(&pdev->dev); |
---|
2203 | 2256 | unregister_netdev(ndev); |
---|
2204 | 2257 | netif_napi_del(&priv->napi[RAVB_NC]); |
---|
2205 | 2258 | netif_napi_del(&priv->napi[RAVB_BE]); |
---|
2206 | 2259 | ravb_mdio_release(priv); |
---|
| 2260 | + dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, |
---|
| 2261 | + priv->desc_bat_dma); |
---|
| 2262 | + pm_runtime_put_sync(&pdev->dev); |
---|
2207 | 2263 | pm_runtime_disable(&pdev->dev); |
---|
2208 | 2264 | free_netdev(ndev); |
---|
2209 | 2265 | platform_set_drvdata(pdev, NULL); |
---|
.. | .. |
---|
2310 | 2366 | ret = ravb_open(ndev); |
---|
2311 | 2367 | if (ret < 0) |
---|
2312 | 2368 | return ret; |
---|
| 2369 | + ravb_set_rx_mode(ndev); |
---|
2313 | 2370 | netif_device_attach(ndev); |
---|
2314 | 2371 | } |
---|
2315 | 2372 | |
---|