.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* drivers/net/ethernet/freescale/gianfar.c |
---|
2 | 3 | * |
---|
3 | 4 | * Gianfar Ethernet Driver |
---|
.. | .. |
---|
11 | 12 | * |
---|
12 | 13 | * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc. |
---|
13 | 14 | * Copyright 2007 MontaVista Software, Inc. |
---|
14 | | - * |
---|
15 | | - * This program is free software; you can redistribute it and/or modify it |
---|
16 | | - * under the terms of the GNU General Public License as published by the |
---|
17 | | - * Free Software Foundation; either version 2 of the License, or (at your |
---|
18 | | - * option) any later version. |
---|
19 | 15 | * |
---|
20 | 16 | * Gianfar: AKA Lambda Draconis, "Dragon" |
---|
21 | 17 | * RA 11 31 24.2 |
---|
.. | .. |
---|
102 | 98 | #include <linux/phy_fixed.h> |
---|
103 | 99 | #include <linux/of.h> |
---|
104 | 100 | #include <linux/of_net.h> |
---|
105 | | -#include <linux/of_address.h> |
---|
106 | | -#include <linux/of_irq.h> |
---|
107 | 101 | |
---|
108 | 102 | #include "gianfar.h" |
---|
109 | 103 | |
---|
110 | 104 | #define TX_TIMEOUT (5*HZ) |
---|
111 | | - |
---|
112 | | -const char gfar_driver_version[] = "2.0"; |
---|
113 | | - |
---|
114 | | -static int gfar_enet_open(struct net_device *dev); |
---|
115 | | -static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); |
---|
116 | | -static void gfar_reset_task(struct work_struct *work); |
---|
117 | | -static void gfar_timeout(struct net_device *dev); |
---|
118 | | -static int gfar_close(struct net_device *dev); |
---|
119 | | -static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, |
---|
120 | | - int alloc_cnt); |
---|
121 | | -static int gfar_set_mac_address(struct net_device *dev); |
---|
122 | | -static int gfar_change_mtu(struct net_device *dev, int new_mtu); |
---|
123 | | -static irqreturn_t gfar_error(int irq, void *dev_id); |
---|
124 | | -static irqreturn_t gfar_transmit(int irq, void *dev_id); |
---|
125 | | -static irqreturn_t gfar_interrupt(int irq, void *dev_id); |
---|
126 | | -static void adjust_link(struct net_device *dev); |
---|
127 | | -static noinline void gfar_update_link_state(struct gfar_private *priv); |
---|
128 | | -static int init_phy(struct net_device *dev); |
---|
129 | | -static int gfar_probe(struct platform_device *ofdev); |
---|
130 | | -static int gfar_remove(struct platform_device *ofdev); |
---|
131 | | -static void free_skb_resources(struct gfar_private *priv); |
---|
132 | | -static void gfar_set_multi(struct net_device *dev); |
---|
133 | | -static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); |
---|
134 | | -static void gfar_configure_serdes(struct net_device *dev); |
---|
135 | | -static int gfar_poll_rx(struct napi_struct *napi, int budget); |
---|
136 | | -static int gfar_poll_tx(struct napi_struct *napi, int budget); |
---|
137 | | -static int gfar_poll_rx_sq(struct napi_struct *napi, int budget); |
---|
138 | | -static int gfar_poll_tx_sq(struct napi_struct *napi, int budget); |
---|
139 | | -#ifdef CONFIG_NET_POLL_CONTROLLER |
---|
140 | | -static void gfar_netpoll(struct net_device *dev); |
---|
141 | | -#endif |
---|
142 | | -int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); |
---|
143 | | -static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); |
---|
144 | | -static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb); |
---|
145 | | -static void gfar_halt_nodisable(struct gfar_private *priv); |
---|
146 | | -static void gfar_clear_exact_match(struct net_device *dev); |
---|
147 | | -static void gfar_set_mac_for_addr(struct net_device *dev, int num, |
---|
148 | | - const u8 *addr); |
---|
149 | | -static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
---|
150 | 105 | |
---|
151 | 106 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); |
---|
152 | 107 | MODULE_DESCRIPTION("Gianfar Ethernet Driver"); |
---|
.. | .. |
---|
166 | 121 | gfar_wmb(); |
---|
167 | 122 | |
---|
168 | 123 | bdp->lstatus = cpu_to_be32(lstatus); |
---|
169 | | -} |
---|
170 | | - |
---|
171 | | -static void gfar_init_bds(struct net_device *ndev) |
---|
172 | | -{ |
---|
173 | | - struct gfar_private *priv = netdev_priv(ndev); |
---|
174 | | - struct gfar __iomem *regs = priv->gfargrp[0].regs; |
---|
175 | | - struct gfar_priv_tx_q *tx_queue = NULL; |
---|
176 | | - struct gfar_priv_rx_q *rx_queue = NULL; |
---|
177 | | - struct txbd8 *txbdp; |
---|
178 | | - u32 __iomem *rfbptr; |
---|
179 | | - int i, j; |
---|
180 | | - |
---|
181 | | - for (i = 0; i < priv->num_tx_queues; i++) { |
---|
182 | | - tx_queue = priv->tx_queue[i]; |
---|
183 | | - /* Initialize some variables in our dev structure */ |
---|
184 | | - tx_queue->num_txbdfree = tx_queue->tx_ring_size; |
---|
185 | | - tx_queue->dirty_tx = tx_queue->tx_bd_base; |
---|
186 | | - tx_queue->cur_tx = tx_queue->tx_bd_base; |
---|
187 | | - tx_queue->skb_curtx = 0; |
---|
188 | | - tx_queue->skb_dirtytx = 0; |
---|
189 | | - |
---|
190 | | - /* Initialize Transmit Descriptor Ring */ |
---|
191 | | - txbdp = tx_queue->tx_bd_base; |
---|
192 | | - for (j = 0; j < tx_queue->tx_ring_size; j++) { |
---|
193 | | - txbdp->lstatus = 0; |
---|
194 | | - txbdp->bufPtr = 0; |
---|
195 | | - txbdp++; |
---|
196 | | - } |
---|
197 | | - |
---|
198 | | - /* Set the last descriptor in the ring to indicate wrap */ |
---|
199 | | - txbdp--; |
---|
200 | | - txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) | |
---|
201 | | - TXBD_WRAP); |
---|
202 | | - } |
---|
203 | | - |
---|
204 | | - rfbptr = ®s->rfbptr0; |
---|
205 | | - for (i = 0; i < priv->num_rx_queues; i++) { |
---|
206 | | - rx_queue = priv->rx_queue[i]; |
---|
207 | | - |
---|
208 | | - rx_queue->next_to_clean = 0; |
---|
209 | | - rx_queue->next_to_use = 0; |
---|
210 | | - rx_queue->next_to_alloc = 0; |
---|
211 | | - |
---|
212 | | - /* make sure next_to_clean != next_to_use after this |
---|
213 | | - * by leaving at least 1 unused descriptor |
---|
214 | | - */ |
---|
215 | | - gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue)); |
---|
216 | | - |
---|
217 | | - rx_queue->rfbptr = rfbptr; |
---|
218 | | - rfbptr += 2; |
---|
219 | | - } |
---|
220 | | -} |
---|
221 | | - |
---|
222 | | -static int gfar_alloc_skb_resources(struct net_device *ndev) |
---|
223 | | -{ |
---|
224 | | - void *vaddr; |
---|
225 | | - dma_addr_t addr; |
---|
226 | | - int i, j; |
---|
227 | | - struct gfar_private *priv = netdev_priv(ndev); |
---|
228 | | - struct device *dev = priv->dev; |
---|
229 | | - struct gfar_priv_tx_q *tx_queue = NULL; |
---|
230 | | - struct gfar_priv_rx_q *rx_queue = NULL; |
---|
231 | | - |
---|
232 | | - priv->total_tx_ring_size = 0; |
---|
233 | | - for (i = 0; i < priv->num_tx_queues; i++) |
---|
234 | | - priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; |
---|
235 | | - |
---|
236 | | - priv->total_rx_ring_size = 0; |
---|
237 | | - for (i = 0; i < priv->num_rx_queues; i++) |
---|
238 | | - priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; |
---|
239 | | - |
---|
240 | | - /* Allocate memory for the buffer descriptors */ |
---|
241 | | - vaddr = dma_alloc_coherent(dev, |
---|
242 | | - (priv->total_tx_ring_size * |
---|
243 | | - sizeof(struct txbd8)) + |
---|
244 | | - (priv->total_rx_ring_size * |
---|
245 | | - sizeof(struct rxbd8)), |
---|
246 | | - &addr, GFP_KERNEL); |
---|
247 | | - if (!vaddr) |
---|
248 | | - return -ENOMEM; |
---|
249 | | - |
---|
250 | | - for (i = 0; i < priv->num_tx_queues; i++) { |
---|
251 | | - tx_queue = priv->tx_queue[i]; |
---|
252 | | - tx_queue->tx_bd_base = vaddr; |
---|
253 | | - tx_queue->tx_bd_dma_base = addr; |
---|
254 | | - tx_queue->dev = ndev; |
---|
255 | | - /* enet DMA only understands physical addresses */ |
---|
256 | | - addr += sizeof(struct txbd8) * tx_queue->tx_ring_size; |
---|
257 | | - vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size; |
---|
258 | | - } |
---|
259 | | - |
---|
260 | | - /* Start the rx descriptor ring where the tx ring leaves off */ |
---|
261 | | - for (i = 0; i < priv->num_rx_queues; i++) { |
---|
262 | | - rx_queue = priv->rx_queue[i]; |
---|
263 | | - rx_queue->rx_bd_base = vaddr; |
---|
264 | | - rx_queue->rx_bd_dma_base = addr; |
---|
265 | | - rx_queue->ndev = ndev; |
---|
266 | | - rx_queue->dev = dev; |
---|
267 | | - addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; |
---|
268 | | - vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; |
---|
269 | | - } |
---|
270 | | - |
---|
271 | | - /* Setup the skbuff rings */ |
---|
272 | | - for (i = 0; i < priv->num_tx_queues; i++) { |
---|
273 | | - tx_queue = priv->tx_queue[i]; |
---|
274 | | - tx_queue->tx_skbuff = |
---|
275 | | - kmalloc_array(tx_queue->tx_ring_size, |
---|
276 | | - sizeof(*tx_queue->tx_skbuff), |
---|
277 | | - GFP_KERNEL); |
---|
278 | | - if (!tx_queue->tx_skbuff) |
---|
279 | | - goto cleanup; |
---|
280 | | - |
---|
281 | | - for (j = 0; j < tx_queue->tx_ring_size; j++) |
---|
282 | | - tx_queue->tx_skbuff[j] = NULL; |
---|
283 | | - } |
---|
284 | | - |
---|
285 | | - for (i = 0; i < priv->num_rx_queues; i++) { |
---|
286 | | - rx_queue = priv->rx_queue[i]; |
---|
287 | | - rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size, |
---|
288 | | - sizeof(*rx_queue->rx_buff), |
---|
289 | | - GFP_KERNEL); |
---|
290 | | - if (!rx_queue->rx_buff) |
---|
291 | | - goto cleanup; |
---|
292 | | - } |
---|
293 | | - |
---|
294 | | - gfar_init_bds(ndev); |
---|
295 | | - |
---|
296 | | - return 0; |
---|
297 | | - |
---|
298 | | -cleanup: |
---|
299 | | - free_skb_resources(priv); |
---|
300 | | - return -ENOMEM; |
---|
301 | 124 | } |
---|
302 | 125 | |
---|
303 | 126 | static void gfar_init_tx_rx_base(struct gfar_private *priv) |
---|
.. | .. |
---|
450 | 273 | } |
---|
451 | 274 | } |
---|
452 | 275 | |
---|
453 | | -void gfar_configure_coalescing_all(struct gfar_private *priv) |
---|
| 276 | +static void gfar_configure_coalescing_all(struct gfar_private *priv) |
---|
454 | 277 | { |
---|
455 | 278 | gfar_configure_coalescing(priv, 0xFF, 0xFF); |
---|
456 | 279 | } |
---|
.. | .. |
---|
483 | 306 | return &dev->stats; |
---|
484 | 307 | } |
---|
485 | 308 | |
---|
| 309 | +/* Set the appropriate hash bit for the given addr */ |
---|
| 310 | +/* The algorithm works like so: |
---|
| 311 | + * 1) Take the Destination Address (ie the multicast address), and |
---|
| 312 | + * do a CRC on it (little endian), and reverse the bits of the |
---|
| 313 | + * result. |
---|
| 314 | + * 2) Use the 8 most significant bits as a hash into a 256-entry |
---|
| 315 | + * table. The table is controlled through 8 32-bit registers: |
---|
| 316 | + * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is |
---|
| 317 | + * gaddr7. This means that the 3 most significant bits in the |
---|
| 318 | + * hash index which gaddr register to use, and the 5 other bits |
---|
| 319 | + * indicate which bit (assuming an IBM numbering scheme, which |
---|
| 320 | + * for PowerPC (tm) is usually the case) in the register holds |
---|
| 321 | + * the entry. |
---|
| 322 | + */ |
---|
| 323 | +static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) |
---|
| 324 | +{ |
---|
| 325 | + u32 tempval; |
---|
| 326 | + struct gfar_private *priv = netdev_priv(dev); |
---|
| 327 | + u32 result = ether_crc(ETH_ALEN, addr); |
---|
| 328 | + int width = priv->hash_width; |
---|
| 329 | + u8 whichbit = (result >> (32 - width)) & 0x1f; |
---|
| 330 | + u8 whichreg = result >> (32 - width + 5); |
---|
| 331 | + u32 value = (1 << (31-whichbit)); |
---|
| 332 | + |
---|
| 333 | + tempval = gfar_read(priv->hash_regs[whichreg]); |
---|
| 334 | + tempval |= value; |
---|
| 335 | + gfar_write(priv->hash_regs[whichreg], tempval); |
---|
| 336 | +} |
---|
| 337 | + |
---|
| 338 | +/* There are multiple MAC Address register pairs on some controllers |
---|
| 339 | + * This function sets the numth pair to a given address |
---|
| 340 | + */ |
---|
| 341 | +static void gfar_set_mac_for_addr(struct net_device *dev, int num, |
---|
| 342 | + const u8 *addr) |
---|
| 343 | +{ |
---|
| 344 | + struct gfar_private *priv = netdev_priv(dev); |
---|
| 345 | + struct gfar __iomem *regs = priv->gfargrp[0].regs; |
---|
| 346 | + u32 tempval; |
---|
| 347 | + u32 __iomem *macptr = ®s->macstnaddr1; |
---|
| 348 | + |
---|
| 349 | + macptr += num*2; |
---|
| 350 | + |
---|
| 351 | + /* For a station address of 0x12345678ABCD in transmission |
---|
| 352 | + * order (BE), MACnADDR1 is set to 0xCDAB7856 and |
---|
| 353 | + * MACnADDR2 is set to 0x34120000. |
---|
| 354 | + */ |
---|
| 355 | + tempval = (addr[5] << 24) | (addr[4] << 16) | |
---|
| 356 | + (addr[3] << 8) | addr[2]; |
---|
| 357 | + |
---|
| 358 | + gfar_write(macptr, tempval); |
---|
| 359 | + |
---|
| 360 | + tempval = (addr[1] << 24) | (addr[0] << 16); |
---|
| 361 | + |
---|
| 362 | + gfar_write(macptr+1, tempval); |
---|
| 363 | +} |
---|
| 364 | + |
---|
486 | 365 | static int gfar_set_mac_addr(struct net_device *dev, void *p) |
---|
487 | 366 | { |
---|
488 | 367 | int ret; |
---|
.. | .. |
---|
495 | 374 | |
---|
496 | 375 | return 0; |
---|
497 | 376 | } |
---|
498 | | - |
---|
499 | | -static const struct net_device_ops gfar_netdev_ops = { |
---|
500 | | - .ndo_open = gfar_enet_open, |
---|
501 | | - .ndo_start_xmit = gfar_start_xmit, |
---|
502 | | - .ndo_stop = gfar_close, |
---|
503 | | - .ndo_change_mtu = gfar_change_mtu, |
---|
504 | | - .ndo_set_features = gfar_set_features, |
---|
505 | | - .ndo_set_rx_mode = gfar_set_multi, |
---|
506 | | - .ndo_tx_timeout = gfar_timeout, |
---|
507 | | - .ndo_do_ioctl = gfar_ioctl, |
---|
508 | | - .ndo_get_stats = gfar_get_stats, |
---|
509 | | - .ndo_set_mac_address = gfar_set_mac_addr, |
---|
510 | | - .ndo_validate_addr = eth_validate_addr, |
---|
511 | | -#ifdef CONFIG_NET_POLL_CONTROLLER |
---|
512 | | - .ndo_poll_controller = gfar_netpoll, |
---|
513 | | -#endif |
---|
514 | | -}; |
---|
515 | 377 | |
---|
516 | 378 | static void gfar_ints_disable(struct gfar_private *priv) |
---|
517 | 379 | { |
---|
.. | .. |
---|
726 | 588 | int num = 0; |
---|
727 | 589 | |
---|
728 | 590 | for_each_available_child_of_node(np, child) |
---|
729 | | - if (!of_node_cmp(child->name, "queue-group")) |
---|
| 591 | + if (of_node_name_eq(child, "queue-group")) |
---|
730 | 592 | num++; |
---|
731 | 593 | |
---|
732 | 594 | return num; |
---|
733 | 595 | } |
---|
734 | 596 | |
---|
| 597 | +/* Reads the controller's registers to determine what interface |
---|
| 598 | + * connects it to the PHY. |
---|
| 599 | + */ |
---|
| 600 | +static phy_interface_t gfar_get_interface(struct net_device *dev) |
---|
| 601 | +{ |
---|
| 602 | + struct gfar_private *priv = netdev_priv(dev); |
---|
| 603 | + struct gfar __iomem *regs = priv->gfargrp[0].regs; |
---|
| 604 | + u32 ecntrl; |
---|
| 605 | + |
---|
| 606 | + ecntrl = gfar_read(®s->ecntrl); |
---|
| 607 | + |
---|
| 608 | + if (ecntrl & ECNTRL_SGMII_MODE) |
---|
| 609 | + return PHY_INTERFACE_MODE_SGMII; |
---|
| 610 | + |
---|
| 611 | + if (ecntrl & ECNTRL_TBI_MODE) { |
---|
| 612 | + if (ecntrl & ECNTRL_REDUCED_MODE) |
---|
| 613 | + return PHY_INTERFACE_MODE_RTBI; |
---|
| 614 | + else |
---|
| 615 | + return PHY_INTERFACE_MODE_TBI; |
---|
| 616 | + } |
---|
| 617 | + |
---|
| 618 | + if (ecntrl & ECNTRL_REDUCED_MODE) { |
---|
| 619 | + if (ecntrl & ECNTRL_REDUCED_MII_MODE) { |
---|
| 620 | + return PHY_INTERFACE_MODE_RMII; |
---|
| 621 | + } |
---|
| 622 | + else { |
---|
| 623 | + phy_interface_t interface = priv->interface; |
---|
| 624 | + |
---|
| 625 | + /* This isn't autodetected right now, so it must |
---|
| 626 | + * be set by the device tree or platform code. |
---|
| 627 | + */ |
---|
| 628 | + if (interface == PHY_INTERFACE_MODE_RGMII_ID) |
---|
| 629 | + return PHY_INTERFACE_MODE_RGMII_ID; |
---|
| 630 | + |
---|
| 631 | + return PHY_INTERFACE_MODE_RGMII; |
---|
| 632 | + } |
---|
| 633 | + } |
---|
| 634 | + |
---|
| 635 | + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) |
---|
| 636 | + return PHY_INTERFACE_MODE_GMII; |
---|
| 637 | + |
---|
| 638 | + return PHY_INTERFACE_MODE_MII; |
---|
| 639 | +} |
---|
| 640 | + |
---|
735 | 641 | static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) |
---|
736 | 642 | { |
---|
737 | 643 | const char *model; |
---|
738 | | - const char *ctype; |
---|
739 | 644 | const void *mac_addr; |
---|
740 | 645 | int err = 0, i; |
---|
| 646 | + phy_interface_t interface; |
---|
741 | 647 | struct net_device *dev = NULL; |
---|
742 | 648 | struct gfar_private *priv = NULL; |
---|
743 | 649 | struct device_node *np = ofdev->dev.of_node; |
---|
.. | .. |
---|
844 | 750 | /* Parse and initialize group specific information */ |
---|
845 | 751 | if (priv->mode == MQ_MG_MODE) { |
---|
846 | 752 | for_each_available_child_of_node(np, child) { |
---|
847 | | - if (of_node_cmp(child->name, "queue-group")) |
---|
| 753 | + if (!of_node_name_eq(child, "queue-group")) |
---|
848 | 754 | continue; |
---|
849 | 755 | |
---|
850 | 756 | err = gfar_parse_group(child, priv, model); |
---|
.. | .. |
---|
879 | 785 | |
---|
880 | 786 | mac_addr = of_get_mac_address(np); |
---|
881 | 787 | |
---|
882 | | - if (mac_addr) |
---|
883 | | - memcpy(dev->dev_addr, mac_addr, ETH_ALEN); |
---|
| 788 | + if (!IS_ERR(mac_addr)) { |
---|
| 789 | + ether_addr_copy(dev->dev_addr, mac_addr); |
---|
| 790 | + } else { |
---|
| 791 | + eth_hw_addr_random(dev); |
---|
| 792 | + dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr); |
---|
| 793 | + } |
---|
884 | 794 | |
---|
885 | 795 | if (model && !strcasecmp(model, "TSEC")) |
---|
886 | 796 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | |
---|
.. | .. |
---|
900 | 810 | FSL_GIANFAR_DEV_HAS_TIMER | |
---|
901 | 811 | FSL_GIANFAR_DEV_HAS_RX_FILER; |
---|
902 | 812 | |
---|
903 | | - err = of_property_read_string(np, "phy-connection-type", &ctype); |
---|
904 | | - |
---|
905 | | - /* We only care about rgmii-id. The rest are autodetected */ |
---|
906 | | - if (err == 0 && !strcmp(ctype, "rgmii-id")) |
---|
907 | | - priv->interface = PHY_INTERFACE_MODE_RGMII_ID; |
---|
| 813 | + /* Use PHY connection type from the DT node if one is specified there. |
---|
| 814 | + * rgmii-id really needs to be specified. Other types can be |
---|
| 815 | + * detected by hardware |
---|
| 816 | + */ |
---|
| 817 | + err = of_get_phy_mode(np, &interface); |
---|
| 818 | + if (!err) |
---|
| 819 | + priv->interface = interface; |
---|
908 | 820 | else |
---|
909 | | - priv->interface = PHY_INTERFACE_MODE_MII; |
---|
| 821 | + priv->interface = gfar_get_interface(dev); |
---|
910 | 822 | |
---|
911 | 823 | if (of_find_property(np, "fsl,magic-packet", NULL)) |
---|
912 | 824 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; |
---|
.. | .. |
---|
940 | 852 | gfar_free_tx_queues(priv); |
---|
941 | 853 | free_gfar_dev(priv); |
---|
942 | 854 | return err; |
---|
943 | | -} |
---|
944 | | - |
---|
945 | | -static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) |
---|
946 | | -{ |
---|
947 | | - struct hwtstamp_config config; |
---|
948 | | - struct gfar_private *priv = netdev_priv(netdev); |
---|
949 | | - |
---|
950 | | - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) |
---|
951 | | - return -EFAULT; |
---|
952 | | - |
---|
953 | | - /* reserved for future extensions */ |
---|
954 | | - if (config.flags) |
---|
955 | | - return -EINVAL; |
---|
956 | | - |
---|
957 | | - switch (config.tx_type) { |
---|
958 | | - case HWTSTAMP_TX_OFF: |
---|
959 | | - priv->hwts_tx_en = 0; |
---|
960 | | - break; |
---|
961 | | - case HWTSTAMP_TX_ON: |
---|
962 | | - if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) |
---|
963 | | - return -ERANGE; |
---|
964 | | - priv->hwts_tx_en = 1; |
---|
965 | | - break; |
---|
966 | | - default: |
---|
967 | | - return -ERANGE; |
---|
968 | | - } |
---|
969 | | - |
---|
970 | | - switch (config.rx_filter) { |
---|
971 | | - case HWTSTAMP_FILTER_NONE: |
---|
972 | | - if (priv->hwts_rx_en) { |
---|
973 | | - priv->hwts_rx_en = 0; |
---|
974 | | - reset_gfar(netdev); |
---|
975 | | - } |
---|
976 | | - break; |
---|
977 | | - default: |
---|
978 | | - if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) |
---|
979 | | - return -ERANGE; |
---|
980 | | - if (!priv->hwts_rx_en) { |
---|
981 | | - priv->hwts_rx_en = 1; |
---|
982 | | - reset_gfar(netdev); |
---|
983 | | - } |
---|
984 | | - config.rx_filter = HWTSTAMP_FILTER_ALL; |
---|
985 | | - break; |
---|
986 | | - } |
---|
987 | | - |
---|
988 | | - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? |
---|
989 | | - -EFAULT : 0; |
---|
990 | | -} |
---|
991 | | - |
---|
992 | | -static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) |
---|
993 | | -{ |
---|
994 | | - struct hwtstamp_config config; |
---|
995 | | - struct gfar_private *priv = netdev_priv(netdev); |
---|
996 | | - |
---|
997 | | - config.flags = 0; |
---|
998 | | - config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; |
---|
999 | | - config.rx_filter = (priv->hwts_rx_en ? |
---|
1000 | | - HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); |
---|
1001 | | - |
---|
1002 | | - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? |
---|
1003 | | - -EFAULT : 0; |
---|
1004 | | -} |
---|
1005 | | - |
---|
1006 | | -static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
---|
1007 | | -{ |
---|
1008 | | - struct phy_device *phydev = dev->phydev; |
---|
1009 | | - |
---|
1010 | | - if (!netif_running(dev)) |
---|
1011 | | - return -EINVAL; |
---|
1012 | | - |
---|
1013 | | - if (cmd == SIOCSHWTSTAMP) |
---|
1014 | | - return gfar_hwtstamp_set(dev, rq); |
---|
1015 | | - if (cmd == SIOCGHWTSTAMP) |
---|
1016 | | - return gfar_hwtstamp_get(dev, rq); |
---|
1017 | | - |
---|
1018 | | - if (!phydev) |
---|
1019 | | - return -ENODEV; |
---|
1020 | | - |
---|
1021 | | - return phy_mii_ioctl(phydev, rq, cmd); |
---|
1022 | 855 | } |
---|
1023 | 856 | |
---|
1024 | 857 | static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, |
---|
.. | .. |
---|
1142 | 975 | if (priv->errata) |
---|
1143 | 976 | dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", |
---|
1144 | 977 | priv->errata); |
---|
| 978 | +} |
---|
| 979 | + |
---|
| 980 | +static void gfar_init_addr_hash_table(struct gfar_private *priv) |
---|
| 981 | +{ |
---|
| 982 | + struct gfar __iomem *regs = priv->gfargrp[0].regs; |
---|
| 983 | + |
---|
| 984 | + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { |
---|
| 985 | + priv->extended_hash = 1; |
---|
| 986 | + priv->hash_width = 9; |
---|
| 987 | + |
---|
| 988 | + priv->hash_regs[0] = ®s->igaddr0; |
---|
| 989 | + priv->hash_regs[1] = ®s->igaddr1; |
---|
| 990 | + priv->hash_regs[2] = ®s->igaddr2; |
---|
| 991 | + priv->hash_regs[3] = ®s->igaddr3; |
---|
| 992 | + priv->hash_regs[4] = ®s->igaddr4; |
---|
| 993 | + priv->hash_regs[5] = ®s->igaddr5; |
---|
| 994 | + priv->hash_regs[6] = ®s->igaddr6; |
---|
| 995 | + priv->hash_regs[7] = ®s->igaddr7; |
---|
| 996 | + priv->hash_regs[8] = ®s->gaddr0; |
---|
| 997 | + priv->hash_regs[9] = ®s->gaddr1; |
---|
| 998 | + priv->hash_regs[10] = ®s->gaddr2; |
---|
| 999 | + priv->hash_regs[11] = ®s->gaddr3; |
---|
| 1000 | + priv->hash_regs[12] = ®s->gaddr4; |
---|
| 1001 | + priv->hash_regs[13] = ®s->gaddr5; |
---|
| 1002 | + priv->hash_regs[14] = ®s->gaddr6; |
---|
| 1003 | + priv->hash_regs[15] = ®s->gaddr7; |
---|
| 1004 | + |
---|
| 1005 | + } else { |
---|
| 1006 | + priv->extended_hash = 0; |
---|
| 1007 | + priv->hash_width = 8; |
---|
| 1008 | + |
---|
| 1009 | + priv->hash_regs[0] = ®s->gaddr0; |
---|
| 1010 | + priv->hash_regs[1] = ®s->gaddr1; |
---|
| 1011 | + priv->hash_regs[2] = ®s->gaddr2; |
---|
| 1012 | + priv->hash_regs[3] = ®s->gaddr3; |
---|
| 1013 | + priv->hash_regs[4] = ®s->gaddr4; |
---|
| 1014 | + priv->hash_regs[5] = ®s->gaddr5; |
---|
| 1015 | + priv->hash_regs[6] = ®s->gaddr6; |
---|
| 1016 | + priv->hash_regs[7] = ®s->gaddr7; |
---|
| 1017 | + } |
---|
| 1018 | +} |
---|
| 1019 | + |
---|
| 1020 | +static int __gfar_is_rx_idle(struct gfar_private *priv) |
---|
| 1021 | +{ |
---|
| 1022 | + u32 res; |
---|
| 1023 | + |
---|
| 1024 | + /* Normaly TSEC should not hang on GRS commands, so we should |
---|
| 1025 | + * actually wait for IEVENT_GRSC flag. |
---|
| 1026 | + */ |
---|
| 1027 | + if (!gfar_has_errata(priv, GFAR_ERRATA_A002)) |
---|
| 1028 | + return 0; |
---|
| 1029 | + |
---|
| 1030 | + /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are |
---|
| 1031 | + * the same as bits 23-30, the eTSEC Rx is assumed to be idle |
---|
| 1032 | + * and the Rx can be safely reset. |
---|
| 1033 | + */ |
---|
| 1034 | + res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); |
---|
| 1035 | + res &= 0x7f807f80; |
---|
| 1036 | + if ((res & 0xffff) == (res >> 16)) |
---|
| 1037 | + return 1; |
---|
| 1038 | + |
---|
| 1039 | + return 0; |
---|
| 1040 | +} |
---|
| 1041 | + |
---|
| 1042 | +/* Halt the receive and transmit queues */ |
---|
| 1043 | +static void gfar_halt_nodisable(struct gfar_private *priv) |
---|
| 1044 | +{ |
---|
| 1045 | + struct gfar __iomem *regs = priv->gfargrp[0].regs; |
---|
| 1046 | + u32 tempval; |
---|
| 1047 | + unsigned int timeout; |
---|
| 1048 | + int stopped; |
---|
| 1049 | + |
---|
| 1050 | + gfar_ints_disable(priv); |
---|
| 1051 | + |
---|
| 1052 | + if (gfar_is_dma_stopped(priv)) |
---|
| 1053 | + return; |
---|
| 1054 | + |
---|
| 1055 | + /* Stop the DMA, and wait for it to stop */ |
---|
| 1056 | + tempval = gfar_read(®s->dmactrl); |
---|
| 1057 | + tempval |= (DMACTRL_GRS | DMACTRL_GTS); |
---|
| 1058 | + gfar_write(®s->dmactrl, tempval); |
---|
| 1059 | + |
---|
| 1060 | +retry: |
---|
| 1061 | + timeout = 1000; |
---|
| 1062 | + while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) { |
---|
| 1063 | + cpu_relax(); |
---|
| 1064 | + timeout--; |
---|
| 1065 | + } |
---|
| 1066 | + |
---|
| 1067 | + if (!timeout) |
---|
| 1068 | + stopped = gfar_is_dma_stopped(priv); |
---|
| 1069 | + |
---|
| 1070 | + if (!stopped && !gfar_is_rx_dma_stopped(priv) && |
---|
| 1071 | + !__gfar_is_rx_idle(priv)) |
---|
| 1072 | + goto retry; |
---|
| 1073 | +} |
---|
| 1074 | + |
---|
| 1075 | +/* Halt the receive and transmit queues */ |
---|
| 1076 | +static void gfar_halt(struct gfar_private *priv) |
---|
| 1077 | +{ |
---|
| 1078 | + struct gfar __iomem *regs = priv->gfargrp[0].regs; |
---|
| 1079 | + u32 tempval; |
---|
| 1080 | + |
---|
| 1081 | + /* Dissable the Rx/Tx hw queues */ |
---|
| 1082 | + gfar_write(®s->rqueue, 0); |
---|
| 1083 | + gfar_write(®s->tqueue, 0); |
---|
| 1084 | + |
---|
| 1085 | + mdelay(10); |
---|
| 1086 | + |
---|
| 1087 | + gfar_halt_nodisable(priv); |
---|
| 1088 | + |
---|
| 1089 | + /* Disable Rx/Tx DMA */ |
---|
| 1090 | + tempval = gfar_read(®s->maccfg1); |
---|
| 1091 | + tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); |
---|
| 1092 | + gfar_write(®s->maccfg1, tempval); |
---|
| 1093 | +} |
---|
| 1094 | + |
---|
| 1095 | +static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) |
---|
| 1096 | +{ |
---|
| 1097 | + struct txbd8 *txbdp; |
---|
| 1098 | + struct gfar_private *priv = netdev_priv(tx_queue->dev); |
---|
| 1099 | + int i, j; |
---|
| 1100 | + |
---|
| 1101 | + txbdp = tx_queue->tx_bd_base; |
---|
| 1102 | + |
---|
| 1103 | + for (i = 0; i < tx_queue->tx_ring_size; i++) { |
---|
| 1104 | + if (!tx_queue->tx_skbuff[i]) |
---|
| 1105 | + continue; |
---|
| 1106 | + |
---|
| 1107 | + dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr), |
---|
| 1108 | + be16_to_cpu(txbdp->length), DMA_TO_DEVICE); |
---|
| 1109 | + txbdp->lstatus = 0; |
---|
| 1110 | + for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; |
---|
| 1111 | + j++) { |
---|
| 1112 | + txbdp++; |
---|
| 1113 | + dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr), |
---|
| 1114 | + be16_to_cpu(txbdp->length), |
---|
| 1115 | + DMA_TO_DEVICE); |
---|
| 1116 | + } |
---|
| 1117 | + txbdp++; |
---|
| 1118 | + dev_kfree_skb_any(tx_queue->tx_skbuff[i]); |
---|
| 1119 | + tx_queue->tx_skbuff[i] = NULL; |
---|
| 1120 | + } |
---|
| 1121 | + kfree(tx_queue->tx_skbuff); |
---|
| 1122 | + tx_queue->tx_skbuff = NULL; |
---|
| 1123 | +} |
---|
| 1124 | + |
---|
| 1125 | +static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) |
---|
| 1126 | +{ |
---|
| 1127 | + int i; |
---|
| 1128 | + |
---|
| 1129 | + struct rxbd8 *rxbdp = rx_queue->rx_bd_base; |
---|
| 1130 | + |
---|
| 1131 | + dev_kfree_skb(rx_queue->skb); |
---|
| 1132 | + |
---|
| 1133 | + for (i = 0; i < rx_queue->rx_ring_size; i++) { |
---|
| 1134 | + struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i]; |
---|
| 1135 | + |
---|
| 1136 | + rxbdp->lstatus = 0; |
---|
| 1137 | + rxbdp->bufPtr = 0; |
---|
| 1138 | + rxbdp++; |
---|
| 1139 | + |
---|
| 1140 | + if (!rxb->page) |
---|
| 1141 | + continue; |
---|
| 1142 | + |
---|
| 1143 | + dma_unmap_page(rx_queue->dev, rxb->dma, |
---|
| 1144 | + PAGE_SIZE, DMA_FROM_DEVICE); |
---|
| 1145 | + __free_page(rxb->page); |
---|
| 1146 | + |
---|
| 1147 | + rxb->page = NULL; |
---|
| 1148 | + } |
---|
| 1149 | + |
---|
| 1150 | + kfree(rx_queue->rx_buff); |
---|
| 1151 | + rx_queue->rx_buff = NULL; |
---|
| 1152 | +} |
---|
| 1153 | + |
---|
| 1154 | +/* If there are any tx skbs or rx skbs still around, free them. |
---|
| 1155 | + * Then free tx_skbuff and rx_skbuff |
---|
| 1156 | + */ |
---|
| 1157 | +static void free_skb_resources(struct gfar_private *priv) |
---|
| 1158 | +{ |
---|
| 1159 | + struct gfar_priv_tx_q *tx_queue = NULL; |
---|
| 1160 | + struct gfar_priv_rx_q *rx_queue = NULL; |
---|
| 1161 | + int i; |
---|
| 1162 | + |
---|
| 1163 | + /* Go through all the buffer descriptors and free their data buffers */ |
---|
| 1164 | + for (i = 0; i < priv->num_tx_queues; i++) { |
---|
| 1165 | + struct netdev_queue *txq; |
---|
| 1166 | + |
---|
| 1167 | + tx_queue = priv->tx_queue[i]; |
---|
| 1168 | + txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); |
---|
| 1169 | + if (tx_queue->tx_skbuff) |
---|
| 1170 | + free_skb_tx_queue(tx_queue); |
---|
| 1171 | + netdev_tx_reset_queue(txq); |
---|
| 1172 | + } |
---|
| 1173 | + |
---|
| 1174 | + for (i = 0; i < priv->num_rx_queues; i++) { |
---|
| 1175 | + rx_queue = priv->rx_queue[i]; |
---|
| 1176 | + if (rx_queue->rx_buff) |
---|
| 1177 | + free_skb_rx_queue(rx_queue); |
---|
| 1178 | + } |
---|
| 1179 | + |
---|
| 1180 | + dma_free_coherent(priv->dev, |
---|
| 1181 | + sizeof(struct txbd8) * priv->total_tx_ring_size + |
---|
| 1182 | + sizeof(struct rxbd8) * priv->total_rx_ring_size, |
---|
| 1183 | + priv->tx_queue[0]->tx_bd_base, |
---|
| 1184 | + priv->tx_queue[0]->tx_bd_dma_base); |
---|
| 1185 | +} |
---|
| 1186 | + |
---|
| 1187 | +void stop_gfar(struct net_device *dev) |
---|
| 1188 | +{ |
---|
| 1189 | + struct gfar_private *priv = netdev_priv(dev); |
---|
| 1190 | + |
---|
| 1191 | + netif_tx_stop_all_queues(dev); |
---|
| 1192 | + |
---|
| 1193 | + smp_mb__before_atomic(); |
---|
| 1194 | + set_bit(GFAR_DOWN, &priv->state); |
---|
| 1195 | + smp_mb__after_atomic(); |
---|
| 1196 | + |
---|
| 1197 | + disable_napi(priv); |
---|
| 1198 | + |
---|
| 1199 | + /* disable ints and gracefully shut down Rx/Tx DMA */ |
---|
| 1200 | + gfar_halt(priv); |
---|
| 1201 | + |
---|
| 1202 | + phy_stop(dev->phydev); |
---|
| 1203 | + |
---|
| 1204 | + free_skb_resources(priv); |
---|
| 1205 | +} |
---|
| 1206 | + |
---|
| 1207 | +static void gfar_start(struct gfar_private *priv) |
---|
| 1208 | +{ |
---|
| 1209 | + struct gfar __iomem *regs = priv->gfargrp[0].regs; |
---|
| 1210 | + u32 tempval; |
---|
| 1211 | + int i = 0; |
---|
| 1212 | + |
---|
| 1213 | + /* Enable Rx/Tx hw queues */ |
---|
| 1214 | + gfar_write(®s->rqueue, priv->rqueue); |
---|
| 1215 | + gfar_write(®s->tqueue, priv->tqueue); |
---|
| 1216 | + |
---|
| 1217 | + /* Initialize DMACTRL to have WWR and WOP */ |
---|
| 1218 | + tempval = gfar_read(®s->dmactrl); |
---|
| 1219 | + tempval |= DMACTRL_INIT_SETTINGS; |
---|
| 1220 | + gfar_write(®s->dmactrl, tempval); |
---|
| 1221 | + |
---|
| 1222 | + /* Make sure we aren't stopped */ |
---|
| 1223 | + tempval = gfar_read(®s->dmactrl); |
---|
| 1224 | + tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); |
---|
| 1225 | + gfar_write(®s->dmactrl, tempval); |
---|
| 1226 | + |
---|
| 1227 | + for (i = 0; i < priv->num_grps; i++) { |
---|
| 1228 | + regs = priv->gfargrp[i].regs; |
---|
| 1229 | + /* Clear THLT/RHLT, so that the DMA starts polling now */ |
---|
| 1230 | + gfar_write(®s->tstat, priv->gfargrp[i].tstat); |
---|
| 1231 | + gfar_write(®s->rstat, priv->gfargrp[i].rstat); |
---|
| 1232 | + } |
---|
| 1233 | + |
---|
| 1234 | + /* Enable Rx/Tx DMA */ |
---|
| 1235 | + tempval = gfar_read(®s->maccfg1); |
---|
| 1236 | + tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); |
---|
| 1237 | + gfar_write(®s->maccfg1, tempval); |
---|
| 1238 | + |
---|
| 1239 | + gfar_ints_enable(priv); |
---|
| 1240 | + |
---|
| 1241 | + netif_trans_update(priv->ndev); /* prevent tx timeout */ |
---|
| 1242 | +} |
---|
| 1243 | + |
---|
| 1244 | +static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb) |
---|
| 1245 | +{ |
---|
| 1246 | + struct page *page; |
---|
| 1247 | + dma_addr_t addr; |
---|
| 1248 | + |
---|
| 1249 | + page = dev_alloc_page(); |
---|
| 1250 | + if (unlikely(!page)) |
---|
| 1251 | + return false; |
---|
| 1252 | + |
---|
| 1253 | + addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); |
---|
| 1254 | + if (unlikely(dma_mapping_error(rxq->dev, addr))) { |
---|
| 1255 | + __free_page(page); |
---|
| 1256 | + |
---|
| 1257 | + return false; |
---|
| 1258 | + } |
---|
| 1259 | + |
---|
| 1260 | + rxb->dma = addr; |
---|
| 1261 | + rxb->page = page; |
---|
| 1262 | + rxb->page_offset = 0; |
---|
| 1263 | + |
---|
| 1264 | + return true; |
---|
| 1265 | +} |
---|
| 1266 | + |
---|
| 1267 | +static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue) |
---|
| 1268 | +{ |
---|
| 1269 | + struct gfar_private *priv = netdev_priv(rx_queue->ndev); |
---|
| 1270 | + struct gfar_extra_stats *estats = &priv->extra_stats; |
---|
| 1271 | + |
---|
| 1272 | + netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n"); |
---|
| 1273 | + atomic64_inc(&estats->rx_alloc_err); |
---|
| 1274 | +} |
---|
| 1275 | + |
---|
| 1276 | +static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, |
---|
| 1277 | + int alloc_cnt) |
---|
| 1278 | +{ |
---|
| 1279 | + struct rxbd8 *bdp; |
---|
| 1280 | + struct gfar_rx_buff *rxb; |
---|
| 1281 | + int i; |
---|
| 1282 | + |
---|
| 1283 | + i = rx_queue->next_to_use; |
---|
| 1284 | + bdp = &rx_queue->rx_bd_base[i]; |
---|
| 1285 | + rxb = &rx_queue->rx_buff[i]; |
---|
| 1286 | + |
---|
| 1287 | + while (alloc_cnt--) { |
---|
| 1288 | + /* try reuse page */ |
---|
| 1289 | + if (unlikely(!rxb->page)) { |
---|
| 1290 | + if (unlikely(!gfar_new_page(rx_queue, rxb))) { |
---|
| 1291 | + gfar_rx_alloc_err(rx_queue); |
---|
| 1292 | + break; |
---|
| 1293 | + } |
---|
| 1294 | + } |
---|
| 1295 | + |
---|
| 1296 | + /* Setup the new RxBD */ |
---|
| 1297 | + gfar_init_rxbdp(rx_queue, bdp, |
---|
| 1298 | + rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT); |
---|
| 1299 | + |
---|
| 1300 | + /* Update to the next pointer */ |
---|
| 1301 | + bdp++; |
---|
| 1302 | + rxb++; |
---|
| 1303 | + |
---|
| 1304 | + if (unlikely(++i == rx_queue->rx_ring_size)) { |
---|
| 1305 | + i = 0; |
---|
| 1306 | + bdp = rx_queue->rx_bd_base; |
---|
| 1307 | + rxb = rx_queue->rx_buff; |
---|
| 1308 | + } |
---|
| 1309 | + } |
---|
| 1310 | + |
---|
| 1311 | + rx_queue->next_to_use = i; |
---|
| 1312 | + rx_queue->next_to_alloc = i; |
---|
| 1313 | +} |
---|
| 1314 | + |
---|
| 1315 | +static void gfar_init_bds(struct net_device *ndev) |
---|
| 1316 | +{ |
---|
| 1317 | + struct gfar_private *priv = netdev_priv(ndev); |
---|
| 1318 | + struct gfar __iomem *regs = priv->gfargrp[0].regs; |
---|
| 1319 | + struct gfar_priv_tx_q *tx_queue = NULL; |
---|
| 1320 | + struct gfar_priv_rx_q *rx_queue = NULL; |
---|
| 1321 | + struct txbd8 *txbdp; |
---|
| 1322 | + u32 __iomem *rfbptr; |
---|
| 1323 | + int i, j; |
---|
| 1324 | + |
---|
| 1325 | + for (i = 0; i < priv->num_tx_queues; i++) { |
---|
| 1326 | + tx_queue = priv->tx_queue[i]; |
---|
| 1327 | + /* Initialize some variables in our dev structure */ |
---|
| 1328 | + tx_queue->num_txbdfree = tx_queue->tx_ring_size; |
---|
| 1329 | + tx_queue->dirty_tx = tx_queue->tx_bd_base; |
---|
| 1330 | + tx_queue->cur_tx = tx_queue->tx_bd_base; |
---|
| 1331 | + tx_queue->skb_curtx = 0; |
---|
| 1332 | + tx_queue->skb_dirtytx = 0; |
---|
| 1333 | + |
---|
| 1334 | + /* Initialize Transmit Descriptor Ring */ |
---|
| 1335 | + txbdp = tx_queue->tx_bd_base; |
---|
| 1336 | + for (j = 0; j < tx_queue->tx_ring_size; j++) { |
---|
| 1337 | + txbdp->lstatus = 0; |
---|
| 1338 | + txbdp->bufPtr = 0; |
---|
| 1339 | + txbdp++; |
---|
| 1340 | + } |
---|
| 1341 | + |
---|
| 1342 | + /* Set the last descriptor in the ring to indicate wrap */ |
---|
| 1343 | + txbdp--; |
---|
| 1344 | + txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) | |
---|
| 1345 | + TXBD_WRAP); |
---|
| 1346 | + } |
---|
| 1347 | + |
---|
| 1348 | + rfbptr = ®s->rfbptr0; |
---|
| 1349 | + for (i = 0; i < priv->num_rx_queues; i++) { |
---|
| 1350 | + rx_queue = priv->rx_queue[i]; |
---|
| 1351 | + |
---|
| 1352 | + rx_queue->next_to_clean = 0; |
---|
| 1353 | + rx_queue->next_to_use = 0; |
---|
| 1354 | + rx_queue->next_to_alloc = 0; |
---|
| 1355 | + |
---|
| 1356 | + /* make sure next_to_clean != next_to_use after this |
---|
| 1357 | + * by leaving at least 1 unused descriptor |
---|
| 1358 | + */ |
---|
| 1359 | + gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue)); |
---|
| 1360 | + |
---|
| 1361 | + rx_queue->rfbptr = rfbptr; |
---|
| 1362 | + rfbptr += 2; |
---|
| 1363 | + } |
---|
| 1364 | +} |
---|
| 1365 | + |
---|
| 1366 | +static int gfar_alloc_skb_resources(struct net_device *ndev) |
---|
| 1367 | +{ |
---|
| 1368 | + void *vaddr; |
---|
| 1369 | + dma_addr_t addr; |
---|
| 1370 | + int i, j; |
---|
| 1371 | + struct gfar_private *priv = netdev_priv(ndev); |
---|
| 1372 | + struct device *dev = priv->dev; |
---|
| 1373 | + struct gfar_priv_tx_q *tx_queue = NULL; |
---|
| 1374 | + struct gfar_priv_rx_q *rx_queue = NULL; |
---|
| 1375 | + |
---|
| 1376 | + priv->total_tx_ring_size = 0; |
---|
| 1377 | + for (i = 0; i < priv->num_tx_queues; i++) |
---|
| 1378 | + priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; |
---|
| 1379 | + |
---|
| 1380 | + priv->total_rx_ring_size = 0; |
---|
| 1381 | + for (i = 0; i < priv->num_rx_queues; i++) |
---|
| 1382 | + priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; |
---|
| 1383 | + |
---|
| 1384 | + /* Allocate memory for the buffer descriptors */ |
---|
| 1385 | + vaddr = dma_alloc_coherent(dev, |
---|
| 1386 | + (priv->total_tx_ring_size * |
---|
| 1387 | + sizeof(struct txbd8)) + |
---|
| 1388 | + (priv->total_rx_ring_size * |
---|
| 1389 | + sizeof(struct rxbd8)), |
---|
| 1390 | + &addr, GFP_KERNEL); |
---|
| 1391 | + if (!vaddr) |
---|
| 1392 | + return -ENOMEM; |
---|
| 1393 | + |
---|
| 1394 | + for (i = 0; i < priv->num_tx_queues; i++) { |
---|
| 1395 | + tx_queue = priv->tx_queue[i]; |
---|
| 1396 | + tx_queue->tx_bd_base = vaddr; |
---|
| 1397 | + tx_queue->tx_bd_dma_base = addr; |
---|
| 1398 | + tx_queue->dev = ndev; |
---|
| 1399 | + /* enet DMA only understands physical addresses */ |
---|
| 1400 | + addr += sizeof(struct txbd8) * tx_queue->tx_ring_size; |
---|
| 1401 | + vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size; |
---|
| 1402 | + } |
---|
| 1403 | + |
---|
| 1404 | + /* Start the rx descriptor ring where the tx ring leaves off */ |
---|
| 1405 | + for (i = 0; i < priv->num_rx_queues; i++) { |
---|
| 1406 | + rx_queue = priv->rx_queue[i]; |
---|
| 1407 | + rx_queue->rx_bd_base = vaddr; |
---|
| 1408 | + rx_queue->rx_bd_dma_base = addr; |
---|
| 1409 | + rx_queue->ndev = ndev; |
---|
| 1410 | + rx_queue->dev = dev; |
---|
| 1411 | + addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; |
---|
| 1412 | + vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; |
---|
| 1413 | + } |
---|
| 1414 | + |
---|
| 1415 | + /* Setup the skbuff rings */ |
---|
| 1416 | + for (i = 0; i < priv->num_tx_queues; i++) { |
---|
| 1417 | + tx_queue = priv->tx_queue[i]; |
---|
| 1418 | + tx_queue->tx_skbuff = |
---|
| 1419 | + kmalloc_array(tx_queue->tx_ring_size, |
---|
| 1420 | + sizeof(*tx_queue->tx_skbuff), |
---|
| 1421 | + GFP_KERNEL); |
---|
| 1422 | + if (!tx_queue->tx_skbuff) |
---|
| 1423 | + goto cleanup; |
---|
| 1424 | + |
---|
| 1425 | + for (j = 0; j < tx_queue->tx_ring_size; j++) |
---|
| 1426 | + tx_queue->tx_skbuff[j] = NULL; |
---|
| 1427 | + } |
---|
| 1428 | + |
---|
| 1429 | + for (i = 0; i < priv->num_rx_queues; i++) { |
---|
| 1430 | + rx_queue = priv->rx_queue[i]; |
---|
| 1431 | + rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size, |
---|
| 1432 | + sizeof(*rx_queue->rx_buff), |
---|
| 1433 | + GFP_KERNEL); |
---|
| 1434 | + if (!rx_queue->rx_buff) |
---|
| 1435 | + goto cleanup; |
---|
| 1436 | + } |
---|
| 1437 | + |
---|
| 1438 | + gfar_init_bds(ndev); |
---|
| 1439 | + |
---|
| 1440 | + return 0; |
---|
| 1441 | + |
---|
| 1442 | +cleanup: |
---|
| 1443 | + free_skb_resources(priv); |
---|
| 1444 | + return -ENOMEM; |
---|
| 1445 | +} |
---|
| 1446 | + |
---|
| 1447 | +/* Bring the controller up and running */ |
---|
| 1448 | +int startup_gfar(struct net_device *ndev) |
---|
| 1449 | +{ |
---|
| 1450 | + struct gfar_private *priv = netdev_priv(ndev); |
---|
| 1451 | + int err; |
---|
| 1452 | + |
---|
| 1453 | + gfar_mac_reset(priv); |
---|
| 1454 | + |
---|
| 1455 | + err = gfar_alloc_skb_resources(ndev); |
---|
| 1456 | + if (err) |
---|
| 1457 | + return err; |
---|
| 1458 | + |
---|
| 1459 | + gfar_init_tx_rx_base(priv); |
---|
| 1460 | + |
---|
| 1461 | + smp_mb__before_atomic(); |
---|
| 1462 | + clear_bit(GFAR_DOWN, &priv->state); |
---|
| 1463 | + smp_mb__after_atomic(); |
---|
| 1464 | + |
---|
| 1465 | + /* Start Rx/Tx DMA and enable the interrupts */ |
---|
| 1466 | + gfar_start(priv); |
---|
| 1467 | + |
---|
| 1468 | + /* force link state update after mac reset */ |
---|
| 1469 | + priv->oldlink = 0; |
---|
| 1470 | + priv->oldspeed = 0; |
---|
| 1471 | + priv->oldduplex = -1; |
---|
| 1472 | + |
---|
| 1473 | + phy_start(ndev->phydev); |
---|
| 1474 | + |
---|
| 1475 | + enable_napi(priv); |
---|
| 1476 | + |
---|
| 1477 | + netif_tx_wake_all_queues(ndev); |
---|
| 1478 | + |
---|
| 1479 | + return 0; |
---|
| 1480 | +} |
---|
| 1481 | + |
---|
| 1482 | +static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) |
---|
| 1483 | +{ |
---|
| 1484 | + struct net_device *ndev = priv->ndev; |
---|
| 1485 | + struct phy_device *phydev = ndev->phydev; |
---|
| 1486 | + u32 val = 0; |
---|
| 1487 | + |
---|
| 1488 | + if (!phydev->duplex) |
---|
| 1489 | + return val; |
---|
| 1490 | + |
---|
| 1491 | + if (!priv->pause_aneg_en) { |
---|
| 1492 | + if (priv->tx_pause_en) |
---|
| 1493 | + val |= MACCFG1_TX_FLOW; |
---|
| 1494 | + if (priv->rx_pause_en) |
---|
| 1495 | + val |= MACCFG1_RX_FLOW; |
---|
| 1496 | + } else { |
---|
| 1497 | + u16 lcl_adv, rmt_adv; |
---|
| 1498 | + u8 flowctrl; |
---|
| 1499 | + /* get link partner capabilities */ |
---|
| 1500 | + rmt_adv = 0; |
---|
| 1501 | + if (phydev->pause) |
---|
| 1502 | + rmt_adv = LPA_PAUSE_CAP; |
---|
| 1503 | + if (phydev->asym_pause) |
---|
| 1504 | + rmt_adv |= LPA_PAUSE_ASYM; |
---|
| 1505 | + |
---|
| 1506 | + lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising); |
---|
| 1507 | + flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); |
---|
| 1508 | + if (flowctrl & FLOW_CTRL_TX) |
---|
| 1509 | + val |= MACCFG1_TX_FLOW; |
---|
| 1510 | + if (flowctrl & FLOW_CTRL_RX) |
---|
| 1511 | + val |= MACCFG1_RX_FLOW; |
---|
| 1512 | + } |
---|
| 1513 | + |
---|
| 1514 | + return val; |
---|
| 1515 | +} |
---|
| 1516 | + |
---|
| 1517 | +static noinline void gfar_update_link_state(struct gfar_private *priv) |
---|
| 1518 | +{ |
---|
| 1519 | + struct gfar __iomem *regs = priv->gfargrp[0].regs; |
---|
| 1520 | + struct net_device *ndev = priv->ndev; |
---|
| 1521 | + struct phy_device *phydev = ndev->phydev; |
---|
| 1522 | + struct gfar_priv_rx_q *rx_queue = NULL; |
---|
| 1523 | + int i; |
---|
| 1524 | + |
---|
| 1525 | + if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) |
---|
| 1526 | + return; |
---|
| 1527 | + |
---|
| 1528 | + if (phydev->link) { |
---|
| 1529 | + u32 tempval1 = gfar_read(®s->maccfg1); |
---|
| 1530 | + u32 tempval = gfar_read(®s->maccfg2); |
---|
| 1531 | + u32 ecntrl = gfar_read(®s->ecntrl); |
---|
| 1532 | + u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW); |
---|
| 1533 | + |
---|
| 1534 | + if (phydev->duplex != priv->oldduplex) { |
---|
| 1535 | + if (!(phydev->duplex)) |
---|
| 1536 | + tempval &= ~(MACCFG2_FULL_DUPLEX); |
---|
| 1537 | + else |
---|
| 1538 | + tempval |= MACCFG2_FULL_DUPLEX; |
---|
| 1539 | + |
---|
| 1540 | + priv->oldduplex = phydev->duplex; |
---|
| 1541 | + } |
---|
| 1542 | + |
---|
| 1543 | + if (phydev->speed != priv->oldspeed) { |
---|
| 1544 | + switch (phydev->speed) { |
---|
| 1545 | + case 1000: |
---|
| 1546 | + tempval = |
---|
| 1547 | + ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); |
---|
| 1548 | + |
---|
| 1549 | + ecntrl &= ~(ECNTRL_R100); |
---|
| 1550 | + break; |
---|
| 1551 | + case 100: |
---|
| 1552 | + case 10: |
---|
| 1553 | + tempval = |
---|
| 1554 | + ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); |
---|
| 1555 | + |
---|
| 1556 | + /* Reduced mode distinguishes |
---|
| 1557 | + * between 10 and 100 |
---|
| 1558 | + */ |
---|
| 1559 | + if (phydev->speed == SPEED_100) |
---|
| 1560 | + ecntrl |= ECNTRL_R100; |
---|
| 1561 | + else |
---|
| 1562 | + ecntrl &= ~(ECNTRL_R100); |
---|
| 1563 | + break; |
---|
| 1564 | + default: |
---|
| 1565 | + netif_warn(priv, link, priv->ndev, |
---|
| 1566 | + "Ack! Speed (%d) is not 10/100/1000!\n", |
---|
| 1567 | + phydev->speed); |
---|
| 1568 | + break; |
---|
| 1569 | + } |
---|
| 1570 | + |
---|
| 1571 | + priv->oldspeed = phydev->speed; |
---|
| 1572 | + } |
---|
| 1573 | + |
---|
| 1574 | + tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); |
---|
| 1575 | + tempval1 |= gfar_get_flowctrl_cfg(priv); |
---|
| 1576 | + |
---|
| 1577 | + /* Turn last free buffer recording on */ |
---|
| 1578 | + if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { |
---|
| 1579 | + for (i = 0; i < priv->num_rx_queues; i++) { |
---|
| 1580 | + u32 bdp_dma; |
---|
| 1581 | + |
---|
| 1582 | + rx_queue = priv->rx_queue[i]; |
---|
| 1583 | + bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); |
---|
| 1584 | + gfar_write(rx_queue->rfbptr, bdp_dma); |
---|
| 1585 | + } |
---|
| 1586 | + |
---|
| 1587 | + priv->tx_actual_en = 1; |
---|
| 1588 | + } |
---|
| 1589 | + |
---|
| 1590 | + if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval)) |
---|
| 1591 | + priv->tx_actual_en = 0; |
---|
| 1592 | + |
---|
| 1593 | + gfar_write(®s->maccfg1, tempval1); |
---|
| 1594 | + gfar_write(®s->maccfg2, tempval); |
---|
| 1595 | + gfar_write(®s->ecntrl, ecntrl); |
---|
| 1596 | + |
---|
| 1597 | + if (!priv->oldlink) |
---|
| 1598 | + priv->oldlink = 1; |
---|
| 1599 | + |
---|
| 1600 | + } else if (priv->oldlink) { |
---|
| 1601 | + priv->oldlink = 0; |
---|
| 1602 | + priv->oldspeed = 0; |
---|
| 1603 | + priv->oldduplex = -1; |
---|
| 1604 | + } |
---|
| 1605 | + |
---|
| 1606 | + if (netif_msg_link(priv)) |
---|
| 1607 | + phy_print_status(phydev); |
---|
| 1608 | +} |
---|
| 1609 | + |
---|
| 1610 | +/* Called every time the controller might need to be made |
---|
| 1611 | + * aware of new link state. The PHY code conveys this |
---|
| 1612 | + * information through variables in the phydev structure, and this |
---|
| 1613 | + * function converts those variables into the appropriate |
---|
| 1614 | + * register values, and can bring down the device if needed. |
---|
| 1615 | + */ |
---|
| 1616 | +static void adjust_link(struct net_device *dev) |
---|
| 1617 | +{ |
---|
| 1618 | + struct gfar_private *priv = netdev_priv(dev); |
---|
| 1619 | + struct phy_device *phydev = dev->phydev; |
---|
| 1620 | + |
---|
| 1621 | + if (unlikely(phydev->link != priv->oldlink || |
---|
| 1622 | + (phydev->link && (phydev->duplex != priv->oldduplex || |
---|
| 1623 | + phydev->speed != priv->oldspeed)))) |
---|
| 1624 | + gfar_update_link_state(priv); |
---|
| 1625 | +} |
---|
| 1626 | + |
---|
| 1627 | +/* Initialize TBI PHY interface for communicating with the |
---|
| 1628 | + * SERDES lynx PHY on the chip. We communicate with this PHY |
---|
| 1629 | + * through the MDIO bus on each controller, treating it as a |
---|
| 1630 | + * "normal" PHY at the address found in the TBIPA register. We assume |
---|
| 1631 | + * that the TBIPA register is valid. Either the MDIO bus code will set |
---|
| 1632 | + * it to a value that doesn't conflict with other PHYs on the bus, or the |
---|
| 1633 | + * value doesn't matter, as there are no other PHYs on the bus. |
---|
| 1634 | + */ |
---|
| 1635 | +static void gfar_configure_serdes(struct net_device *dev) |
---|
| 1636 | +{ |
---|
| 1637 | + struct gfar_private *priv = netdev_priv(dev); |
---|
| 1638 | + struct phy_device *tbiphy; |
---|
| 1639 | + |
---|
| 1640 | + if (!priv->tbi_node) { |
---|
| 1641 | + dev_warn(&dev->dev, "error: SGMII mode requires that the " |
---|
| 1642 | + "device tree specify a tbi-handle\n"); |
---|
| 1643 | + return; |
---|
| 1644 | + } |
---|
| 1645 | + |
---|
| 1646 | + tbiphy = of_phy_find_device(priv->tbi_node); |
---|
| 1647 | + if (!tbiphy) { |
---|
| 1648 | + dev_err(&dev->dev, "error: Could not get TBI device\n"); |
---|
| 1649 | + return; |
---|
| 1650 | + } |
---|
| 1651 | + |
---|
| 1652 | + /* If the link is already up, we must already be ok, and don't need to |
---|
| 1653 | + * configure and reset the TBI<->SerDes link. Maybe U-Boot configured |
---|
| 1654 | + * everything for us? Resetting it takes the link down and requires |
---|
| 1655 | + * several seconds for it to come back. |
---|
| 1656 | + */ |
---|
| 1657 | + if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) { |
---|
| 1658 | + put_device(&tbiphy->mdio.dev); |
---|
| 1659 | + return; |
---|
| 1660 | + } |
---|
| 1661 | + |
---|
| 1662 | + /* Single clk mode, mii mode off(for serdes communication) */ |
---|
| 1663 | + phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); |
---|
| 1664 | + |
---|
| 1665 | + phy_write(tbiphy, MII_ADVERTISE, |
---|
| 1666 | + ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | |
---|
| 1667 | + ADVERTISE_1000XPSE_ASYM); |
---|
| 1668 | + |
---|
| 1669 | + phy_write(tbiphy, MII_BMCR, |
---|
| 1670 | + BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | |
---|
| 1671 | + BMCR_SPEED1000); |
---|
| 1672 | + |
---|
| 1673 | + put_device(&tbiphy->mdio.dev); |
---|
| 1674 | +} |
---|
| 1675 | + |
---|
| 1676 | +/* Initializes driver's PHY state, and attaches to the PHY. |
---|
| 1677 | + * Returns 0 on success. |
---|
| 1678 | + */ |
---|
| 1679 | +static int init_phy(struct net_device *dev) |
---|
| 1680 | +{ |
---|
| 1681 | + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; |
---|
| 1682 | + struct gfar_private *priv = netdev_priv(dev); |
---|
| 1683 | + phy_interface_t interface = priv->interface; |
---|
| 1684 | + struct phy_device *phydev; |
---|
| 1685 | + struct ethtool_eee edata; |
---|
| 1686 | + |
---|
| 1687 | + linkmode_set_bit_array(phy_10_100_features_array, |
---|
| 1688 | + ARRAY_SIZE(phy_10_100_features_array), |
---|
| 1689 | + mask); |
---|
| 1690 | + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask); |
---|
| 1691 | + linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask); |
---|
| 1692 | + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) |
---|
| 1693 | + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask); |
---|
| 1694 | + |
---|
| 1695 | + priv->oldlink = 0; |
---|
| 1696 | + priv->oldspeed = 0; |
---|
| 1697 | + priv->oldduplex = -1; |
---|
| 1698 | + |
---|
| 1699 | + phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, |
---|
| 1700 | + interface); |
---|
| 1701 | + if (!phydev) { |
---|
| 1702 | + dev_err(&dev->dev, "could not attach to PHY\n"); |
---|
| 1703 | + return -ENODEV; |
---|
| 1704 | + } |
---|
| 1705 | + |
---|
| 1706 | + if (interface == PHY_INTERFACE_MODE_SGMII) |
---|
| 1707 | + gfar_configure_serdes(dev); |
---|
| 1708 | + |
---|
| 1709 | + /* Remove any features not supported by the controller */ |
---|
| 1710 | + linkmode_and(phydev->supported, phydev->supported, mask); |
---|
| 1711 | + linkmode_copy(phydev->advertising, phydev->supported); |
---|
| 1712 | + |
---|
| 1713 | + /* Add support for flow control */ |
---|
| 1714 | + phy_support_asym_pause(phydev); |
---|
| 1715 | + |
---|
| 1716 | + /* disable EEE autoneg, EEE not supported by eTSEC */ |
---|
| 1717 | + memset(&edata, 0, sizeof(struct ethtool_eee)); |
---|
| 1718 | + phy_ethtool_set_eee(phydev, &edata); |
---|
| 1719 | + |
---|
| 1720 | + return 0; |
---|
| 1721 | +} |
---|
| 1722 | + |
---|
| 1723 | +static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) |
---|
| 1724 | +{ |
---|
| 1725 | + struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN); |
---|
| 1726 | + |
---|
| 1727 | + memset(fcb, 0, GMAC_FCB_LEN); |
---|
| 1728 | + |
---|
| 1729 | + return fcb; |
---|
| 1730 | +} |
---|
| 1731 | + |
---|
| 1732 | +static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, |
---|
| 1733 | + int fcb_length) |
---|
| 1734 | +{ |
---|
| 1735 | + /* If we're here, it's a IP packet with a TCP or UDP |
---|
| 1736 | + * payload. We set it to checksum, using a pseudo-header |
---|
| 1737 | + * we provide |
---|
| 1738 | + */ |
---|
| 1739 | + u8 flags = TXFCB_DEFAULT; |
---|
| 1740 | + |
---|
| 1741 | + /* Tell the controller what the protocol is |
---|
| 1742 | + * And provide the already calculated phcs |
---|
| 1743 | + */ |
---|
| 1744 | + if (ip_hdr(skb)->protocol == IPPROTO_UDP) { |
---|
| 1745 | + flags |= TXFCB_UDP; |
---|
| 1746 | + fcb->phcs = (__force __be16)(udp_hdr(skb)->check); |
---|
| 1747 | + } else |
---|
| 1748 | + fcb->phcs = (__force __be16)(tcp_hdr(skb)->check); |
---|
| 1749 | + |
---|
| 1750 | + /* l3os is the distance between the start of the |
---|
| 1751 | + * frame (skb->data) and the start of the IP hdr. |
---|
| 1752 | + * l4os is the distance between the start of the |
---|
| 1753 | + * l3 hdr and the l4 hdr |
---|
| 1754 | + */ |
---|
| 1755 | + fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length); |
---|
| 1756 | + fcb->l4os = skb_network_header_len(skb); |
---|
| 1757 | + |
---|
| 1758 | + fcb->flags = flags; |
---|
| 1759 | +} |
---|
| 1760 | + |
---|
| 1761 | +static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) |
---|
| 1762 | +{ |
---|
| 1763 | + fcb->flags |= TXFCB_VLN; |
---|
| 1764 | + fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb)); |
---|
| 1765 | +} |
---|
| 1766 | + |
---|
| 1767 | +static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, |
---|
| 1768 | + struct txbd8 *base, int ring_size) |
---|
| 1769 | +{ |
---|
| 1770 | + struct txbd8 *new_bd = bdp + stride; |
---|
| 1771 | + |
---|
| 1772 | + return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; |
---|
| 1773 | +} |
---|
| 1774 | + |
---|
| 1775 | +static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, |
---|
| 1776 | + int ring_size) |
---|
| 1777 | +{ |
---|
| 1778 | + return skip_txbd(bdp, 1, base, ring_size); |
---|
| 1779 | +} |
---|
| 1780 | + |
---|
| 1781 | +/* eTSEC12: csum generation not supported for some fcb offsets */ |
---|
| 1782 | +static inline bool gfar_csum_errata_12(struct gfar_private *priv, |
---|
| 1783 | + unsigned long fcb_addr) |
---|
| 1784 | +{ |
---|
| 1785 | + return (gfar_has_errata(priv, GFAR_ERRATA_12) && |
---|
| 1786 | + (fcb_addr % 0x20) > 0x18); |
---|
| 1787 | +} |
---|
| 1788 | + |
---|
| 1789 | +/* eTSEC76: csum generation for frames larger than 2500 may |
---|
| 1790 | + * cause excess delays before start of transmission |
---|
| 1791 | + */ |
---|
| 1792 | +static inline bool gfar_csum_errata_76(struct gfar_private *priv, |
---|
| 1793 | + unsigned int len) |
---|
| 1794 | +{ |
---|
| 1795 | + return (gfar_has_errata(priv, GFAR_ERRATA_76) && |
---|
| 1796 | + (len > 2500)); |
---|
| 1797 | +} |
---|
| 1798 | + |
---|
| 1799 | +/* This is called by the kernel when a frame is ready for transmission. |
---|
| 1800 | + * It is pointed to by the dev->hard_start_xmit function pointer |
---|
| 1801 | + */ |
---|
| 1802 | +static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) |
---|
| 1803 | +{ |
---|
| 1804 | + struct gfar_private *priv = netdev_priv(dev); |
---|
| 1805 | + struct gfar_priv_tx_q *tx_queue = NULL; |
---|
| 1806 | + struct netdev_queue *txq; |
---|
| 1807 | + struct gfar __iomem *regs = NULL; |
---|
| 1808 | + struct txfcb *fcb = NULL; |
---|
| 1809 | + struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; |
---|
| 1810 | + u32 lstatus; |
---|
| 1811 | + skb_frag_t *frag; |
---|
| 1812 | + int i, rq = 0; |
---|
| 1813 | + int do_tstamp, do_csum, do_vlan; |
---|
| 1814 | + u32 bufaddr; |
---|
| 1815 | + unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0; |
---|
| 1816 | + |
---|
| 1817 | + rq = skb->queue_mapping; |
---|
| 1818 | + tx_queue = priv->tx_queue[rq]; |
---|
| 1819 | + txq = netdev_get_tx_queue(dev, rq); |
---|
| 1820 | + base = tx_queue->tx_bd_base; |
---|
| 1821 | + regs = tx_queue->grp->regs; |
---|
| 1822 | + |
---|
| 1823 | + do_csum = (CHECKSUM_PARTIAL == skb->ip_summed); |
---|
| 1824 | + do_vlan = skb_vlan_tag_present(skb); |
---|
| 1825 | + do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && |
---|
| 1826 | + priv->hwts_tx_en; |
---|
| 1827 | + |
---|
| 1828 | + if (do_csum || do_vlan) |
---|
| 1829 | + fcb_len = GMAC_FCB_LEN; |
---|
| 1830 | + |
---|
| 1831 | + /* check if time stamp should be generated */ |
---|
| 1832 | + if (unlikely(do_tstamp)) |
---|
| 1833 | + fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN; |
---|
| 1834 | + |
---|
| 1835 | + /* make space for additional header when fcb is needed */ |
---|
| 1836 | + if (fcb_len) { |
---|
| 1837 | + if (unlikely(skb_cow_head(skb, fcb_len))) { |
---|
| 1838 | + dev->stats.tx_errors++; |
---|
| 1839 | + dev_kfree_skb_any(skb); |
---|
| 1840 | + return NETDEV_TX_OK; |
---|
| 1841 | + } |
---|
| 1842 | + } |
---|
| 1843 | + |
---|
| 1844 | + /* total number of fragments in the SKB */ |
---|
| 1845 | + nr_frags = skb_shinfo(skb)->nr_frags; |
---|
| 1846 | + |
---|
| 1847 | + /* calculate the required number of TxBDs for this skb */ |
---|
| 1848 | + if (unlikely(do_tstamp)) |
---|
| 1849 | + nr_txbds = nr_frags + 2; |
---|
| 1850 | + else |
---|
| 1851 | + nr_txbds = nr_frags + 1; |
---|
| 1852 | + |
---|
| 1853 | + /* check if there is space to queue this packet */ |
---|
| 1854 | + if (nr_txbds > tx_queue->num_txbdfree) { |
---|
| 1855 | + /* no space, stop the queue */ |
---|
| 1856 | + netif_tx_stop_queue(txq); |
---|
| 1857 | + dev->stats.tx_fifo_errors++; |
---|
| 1858 | + return NETDEV_TX_BUSY; |
---|
| 1859 | + } |
---|
| 1860 | + |
---|
| 1861 | + /* Update transmit stats */ |
---|
| 1862 | + bytes_sent = skb->len; |
---|
| 1863 | + tx_queue->stats.tx_bytes += bytes_sent; |
---|
| 1864 | + /* keep Tx bytes on wire for BQL accounting */ |
---|
| 1865 | + GFAR_CB(skb)->bytes_sent = bytes_sent; |
---|
| 1866 | + tx_queue->stats.tx_packets++; |
---|
| 1867 | + |
---|
| 1868 | + txbdp = txbdp_start = tx_queue->cur_tx; |
---|
| 1869 | + lstatus = be32_to_cpu(txbdp->lstatus); |
---|
| 1870 | + |
---|
| 1871 | + /* Add TxPAL between FCB and frame if required */ |
---|
| 1872 | + if (unlikely(do_tstamp)) { |
---|
| 1873 | + skb_push(skb, GMAC_TXPAL_LEN); |
---|
| 1874 | + memset(skb->data, 0, GMAC_TXPAL_LEN); |
---|
| 1875 | + } |
---|
| 1876 | + |
---|
| 1877 | + /* Add TxFCB if required */ |
---|
| 1878 | + if (fcb_len) { |
---|
| 1879 | + fcb = gfar_add_fcb(skb); |
---|
| 1880 | + lstatus |= BD_LFLAG(TXBD_TOE); |
---|
| 1881 | + } |
---|
| 1882 | + |
---|
| 1883 | + /* Set up checksumming */ |
---|
| 1884 | + if (do_csum) { |
---|
| 1885 | + gfar_tx_checksum(skb, fcb, fcb_len); |
---|
| 1886 | + |
---|
| 1887 | + if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) || |
---|
| 1888 | + unlikely(gfar_csum_errata_76(priv, skb->len))) { |
---|
| 1889 | + __skb_pull(skb, GMAC_FCB_LEN); |
---|
| 1890 | + skb_checksum_help(skb); |
---|
| 1891 | + if (do_vlan || do_tstamp) { |
---|
| 1892 | + /* put back a new fcb for vlan/tstamp TOE */ |
---|
| 1893 | + fcb = gfar_add_fcb(skb); |
---|
| 1894 | + } else { |
---|
| 1895 | + /* Tx TOE not used */ |
---|
| 1896 | + lstatus &= ~(BD_LFLAG(TXBD_TOE)); |
---|
| 1897 | + fcb = NULL; |
---|
| 1898 | + } |
---|
| 1899 | + } |
---|
| 1900 | + } |
---|
| 1901 | + |
---|
| 1902 | + if (do_vlan) |
---|
| 1903 | + gfar_tx_vlan(skb, fcb); |
---|
| 1904 | + |
---|
| 1905 | + bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb), |
---|
| 1906 | + DMA_TO_DEVICE); |
---|
| 1907 | + if (unlikely(dma_mapping_error(priv->dev, bufaddr))) |
---|
| 1908 | + goto dma_map_err; |
---|
| 1909 | + |
---|
| 1910 | + txbdp_start->bufPtr = cpu_to_be32(bufaddr); |
---|
| 1911 | + |
---|
| 1912 | + /* Time stamp insertion requires one additional TxBD */ |
---|
| 1913 | + if (unlikely(do_tstamp)) |
---|
| 1914 | + txbdp_tstamp = txbdp = next_txbd(txbdp, base, |
---|
| 1915 | + tx_queue->tx_ring_size); |
---|
| 1916 | + |
---|
| 1917 | + if (likely(!nr_frags)) { |
---|
| 1918 | + if (likely(!do_tstamp)) |
---|
| 1919 | + lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); |
---|
| 1920 | + } else { |
---|
| 1921 | + u32 lstatus_start = lstatus; |
---|
| 1922 | + |
---|
| 1923 | + /* Place the fragment addresses and lengths into the TxBDs */ |
---|
| 1924 | + frag = &skb_shinfo(skb)->frags[0]; |
---|
| 1925 | + for (i = 0; i < nr_frags; i++, frag++) { |
---|
| 1926 | + unsigned int size; |
---|
| 1927 | + |
---|
| 1928 | + /* Point at the next BD, wrapping as needed */ |
---|
| 1929 | + txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
---|
| 1930 | + |
---|
| 1931 | + size = skb_frag_size(frag); |
---|
| 1932 | + |
---|
| 1933 | + lstatus = be32_to_cpu(txbdp->lstatus) | size | |
---|
| 1934 | + BD_LFLAG(TXBD_READY); |
---|
| 1935 | + |
---|
| 1936 | + /* Handle the last BD specially */ |
---|
| 1937 | + if (i == nr_frags - 1) |
---|
| 1938 | + lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); |
---|
| 1939 | + |
---|
| 1940 | + bufaddr = skb_frag_dma_map(priv->dev, frag, 0, |
---|
| 1941 | + size, DMA_TO_DEVICE); |
---|
| 1942 | + if (unlikely(dma_mapping_error(priv->dev, bufaddr))) |
---|
| 1943 | + goto dma_map_err; |
---|
| 1944 | + |
---|
| 1945 | + /* set the TxBD length and buffer pointer */ |
---|
| 1946 | + txbdp->bufPtr = cpu_to_be32(bufaddr); |
---|
| 1947 | + txbdp->lstatus = cpu_to_be32(lstatus); |
---|
| 1948 | + } |
---|
| 1949 | + |
---|
| 1950 | + lstatus = lstatus_start; |
---|
| 1951 | + } |
---|
| 1952 | + |
---|
| 1953 | + /* If time stamping is requested one additional TxBD must be set up. The |
---|
| 1954 | + * first TxBD points to the FCB and must have a data length of |
---|
| 1955 | + * GMAC_FCB_LEN. The second TxBD points to the actual frame data with |
---|
| 1956 | + * the full frame length. |
---|
| 1957 | + */ |
---|
| 1958 | + if (unlikely(do_tstamp)) { |
---|
| 1959 | + u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus); |
---|
| 1960 | + |
---|
| 1961 | + bufaddr = be32_to_cpu(txbdp_start->bufPtr); |
---|
| 1962 | + bufaddr += fcb_len; |
---|
| 1963 | + |
---|
| 1964 | + lstatus_ts |= BD_LFLAG(TXBD_READY) | |
---|
| 1965 | + (skb_headlen(skb) - fcb_len); |
---|
| 1966 | + if (!nr_frags) |
---|
| 1967 | + lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); |
---|
| 1968 | + |
---|
| 1969 | + txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr); |
---|
| 1970 | + txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts); |
---|
| 1971 | + lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; |
---|
| 1972 | + |
---|
| 1973 | + /* Setup tx hardware time stamping */ |
---|
| 1974 | + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
---|
| 1975 | + fcb->ptp = 1; |
---|
| 1976 | + } else { |
---|
| 1977 | + lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); |
---|
| 1978 | + } |
---|
| 1979 | + |
---|
| 1980 | + netdev_tx_sent_queue(txq, bytes_sent); |
---|
| 1981 | + |
---|
| 1982 | + gfar_wmb(); |
---|
| 1983 | + |
---|
| 1984 | + txbdp_start->lstatus = cpu_to_be32(lstatus); |
---|
| 1985 | + |
---|
| 1986 | + gfar_wmb(); /* force lstatus write before tx_skbuff */ |
---|
| 1987 | + |
---|
| 1988 | + tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; |
---|
| 1989 | + |
---|
| 1990 | + /* Update the current skb pointer to the next entry we will use |
---|
| 1991 | + * (wrapping if necessary) |
---|
| 1992 | + */ |
---|
| 1993 | + tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & |
---|
| 1994 | + TX_RING_MOD_MASK(tx_queue->tx_ring_size); |
---|
| 1995 | + |
---|
| 1996 | + tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
---|
| 1997 | + |
---|
| 1998 | + /* We can work in parallel with gfar_clean_tx_ring(), except |
---|
| 1999 | + * when modifying num_txbdfree. Note that we didn't grab the lock |
---|
| 2000 | + * when we were reading the num_txbdfree and checking for available |
---|
| 2001 | + * space, that's because outside of this function it can only grow. |
---|
| 2002 | + */ |
---|
| 2003 | + spin_lock_bh(&tx_queue->txlock); |
---|
| 2004 | + /* reduce TxBD free count */ |
---|
| 2005 | + tx_queue->num_txbdfree -= (nr_txbds); |
---|
| 2006 | + spin_unlock_bh(&tx_queue->txlock); |
---|
| 2007 | + |
---|
| 2008 | + /* If the next BD still needs to be cleaned up, then the bds |
---|
| 2009 | + * are full. We need to tell the kernel to stop sending us stuff. |
---|
| 2010 | + */ |
---|
| 2011 | + if (!tx_queue->num_txbdfree) { |
---|
| 2012 | + netif_tx_stop_queue(txq); |
---|
| 2013 | + |
---|
| 2014 | + dev->stats.tx_fifo_errors++; |
---|
| 2015 | + } |
---|
| 2016 | + |
---|
| 2017 | + /* Tell the DMA to go go go */ |
---|
| 2018 | + gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); |
---|
| 2019 | + |
---|
| 2020 | + return NETDEV_TX_OK; |
---|
| 2021 | + |
---|
| 2022 | +dma_map_err: |
---|
| 2023 | + txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size); |
---|
| 2024 | + if (do_tstamp) |
---|
| 2025 | + txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
---|
| 2026 | + for (i = 0; i < nr_frags; i++) { |
---|
| 2027 | + lstatus = be32_to_cpu(txbdp->lstatus); |
---|
| 2028 | + if (!(lstatus & BD_LFLAG(TXBD_READY))) |
---|
| 2029 | + break; |
---|
| 2030 | + |
---|
| 2031 | + lstatus &= ~BD_LFLAG(TXBD_READY); |
---|
| 2032 | + txbdp->lstatus = cpu_to_be32(lstatus); |
---|
| 2033 | + bufaddr = be32_to_cpu(txbdp->bufPtr); |
---|
| 2034 | + dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length), |
---|
| 2035 | + DMA_TO_DEVICE); |
---|
| 2036 | + txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
---|
| 2037 | + } |
---|
| 2038 | + gfar_wmb(); |
---|
| 2039 | + dev_kfree_skb_any(skb); |
---|
| 2040 | + return NETDEV_TX_OK; |
---|
| 2041 | +} |
---|
| 2042 | + |
---|
| 2043 | +/* Changes the mac address if the controller is not running. */ |
---|
| 2044 | +static int gfar_set_mac_address(struct net_device *dev) |
---|
| 2045 | +{ |
---|
| 2046 | + gfar_set_mac_for_addr(dev, 0, dev->dev_addr); |
---|
| 2047 | + |
---|
| 2048 | + return 0; |
---|
| 2049 | +} |
---|
| 2050 | + |
---|
| 2051 | +static int gfar_change_mtu(struct net_device *dev, int new_mtu) |
---|
| 2052 | +{ |
---|
| 2053 | + struct gfar_private *priv = netdev_priv(dev); |
---|
| 2054 | + |
---|
| 2055 | + while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) |
---|
| 2056 | + cpu_relax(); |
---|
| 2057 | + |
---|
| 2058 | + if (dev->flags & IFF_UP) |
---|
| 2059 | + stop_gfar(dev); |
---|
| 2060 | + |
---|
| 2061 | + dev->mtu = new_mtu; |
---|
| 2062 | + |
---|
| 2063 | + if (dev->flags & IFF_UP) |
---|
| 2064 | + startup_gfar(dev); |
---|
| 2065 | + |
---|
| 2066 | + clear_bit_unlock(GFAR_RESETTING, &priv->state); |
---|
| 2067 | + |
---|
| 2068 | + return 0; |
---|
| 2069 | +} |
---|
| 2070 | + |
---|
| 2071 | +static void reset_gfar(struct net_device *ndev) |
---|
| 2072 | +{ |
---|
| 2073 | + struct gfar_private *priv = netdev_priv(ndev); |
---|
| 2074 | + |
---|
| 2075 | + while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) |
---|
| 2076 | + cpu_relax(); |
---|
| 2077 | + |
---|
| 2078 | + stop_gfar(ndev); |
---|
| 2079 | + startup_gfar(ndev); |
---|
| 2080 | + |
---|
| 2081 | + clear_bit_unlock(GFAR_RESETTING, &priv->state); |
---|
| 2082 | +} |
---|
| 2083 | + |
---|
| 2084 | +/* gfar_reset_task gets scheduled when a packet has not been |
---|
| 2085 | + * transmitted after a set amount of time. |
---|
| 2086 | + * For now, assume that clearing out all the structures, and |
---|
| 2087 | + * starting over will fix the problem. |
---|
| 2088 | + */ |
---|
| 2089 | +static void gfar_reset_task(struct work_struct *work) |
---|
| 2090 | +{ |
---|
| 2091 | + struct gfar_private *priv = container_of(work, struct gfar_private, |
---|
| 2092 | + reset_task); |
---|
| 2093 | + reset_gfar(priv->ndev); |
---|
| 2094 | +} |
---|
| 2095 | + |
---|
| 2096 | +static void gfar_timeout(struct net_device *dev, unsigned int txqueue) |
---|
| 2097 | +{ |
---|
| 2098 | + struct gfar_private *priv = netdev_priv(dev); |
---|
| 2099 | + |
---|
| 2100 | + dev->stats.tx_errors++; |
---|
| 2101 | + schedule_work(&priv->reset_task); |
---|
| 2102 | +} |
---|
| 2103 | + |
---|
| 2104 | +static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) |
---|
| 2105 | +{ |
---|
| 2106 | + struct hwtstamp_config config; |
---|
| 2107 | + struct gfar_private *priv = netdev_priv(netdev); |
---|
| 2108 | + |
---|
| 2109 | + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) |
---|
| 2110 | + return -EFAULT; |
---|
| 2111 | + |
---|
| 2112 | + /* reserved for future extensions */ |
---|
| 2113 | + if (config.flags) |
---|
| 2114 | + return -EINVAL; |
---|
| 2115 | + |
---|
| 2116 | + switch (config.tx_type) { |
---|
| 2117 | + case HWTSTAMP_TX_OFF: |
---|
| 2118 | + priv->hwts_tx_en = 0; |
---|
| 2119 | + break; |
---|
| 2120 | + case HWTSTAMP_TX_ON: |
---|
| 2121 | + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) |
---|
| 2122 | + return -ERANGE; |
---|
| 2123 | + priv->hwts_tx_en = 1; |
---|
| 2124 | + break; |
---|
| 2125 | + default: |
---|
| 2126 | + return -ERANGE; |
---|
| 2127 | + } |
---|
| 2128 | + |
---|
| 2129 | + switch (config.rx_filter) { |
---|
| 2130 | + case HWTSTAMP_FILTER_NONE: |
---|
| 2131 | + if (priv->hwts_rx_en) { |
---|
| 2132 | + priv->hwts_rx_en = 0; |
---|
| 2133 | + reset_gfar(netdev); |
---|
| 2134 | + } |
---|
| 2135 | + break; |
---|
| 2136 | + default: |
---|
| 2137 | + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) |
---|
| 2138 | + return -ERANGE; |
---|
| 2139 | + if (!priv->hwts_rx_en) { |
---|
| 2140 | + priv->hwts_rx_en = 1; |
---|
| 2141 | + reset_gfar(netdev); |
---|
| 2142 | + } |
---|
| 2143 | + config.rx_filter = HWTSTAMP_FILTER_ALL; |
---|
| 2144 | + break; |
---|
| 2145 | + } |
---|
| 2146 | + |
---|
| 2147 | + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? |
---|
| 2148 | + -EFAULT : 0; |
---|
| 2149 | +} |
---|
| 2150 | + |
---|
| 2151 | +static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) |
---|
| 2152 | +{ |
---|
| 2153 | + struct hwtstamp_config config; |
---|
| 2154 | + struct gfar_private *priv = netdev_priv(netdev); |
---|
| 2155 | + |
---|
| 2156 | + config.flags = 0; |
---|
| 2157 | + config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; |
---|
| 2158 | + config.rx_filter = (priv->hwts_rx_en ? |
---|
| 2159 | + HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); |
---|
| 2160 | + |
---|
| 2161 | + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? |
---|
| 2162 | + -EFAULT : 0; |
---|
| 2163 | +} |
---|
| 2164 | + |
---|
| 2165 | +static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
---|
| 2166 | +{ |
---|
| 2167 | + struct phy_device *phydev = dev->phydev; |
---|
| 2168 | + |
---|
| 2169 | + if (!netif_running(dev)) |
---|
| 2170 | + return -EINVAL; |
---|
| 2171 | + |
---|
| 2172 | + if (cmd == SIOCSHWTSTAMP) |
---|
| 2173 | + return gfar_hwtstamp_set(dev, rq); |
---|
| 2174 | + if (cmd == SIOCGHWTSTAMP) |
---|
| 2175 | + return gfar_hwtstamp_get(dev, rq); |
---|
| 2176 | + |
---|
| 2177 | + if (!phydev) |
---|
| 2178 | + return -ENODEV; |
---|
| 2179 | + |
---|
| 2180 | + return phy_mii_ioctl(phydev, rq, cmd); |
---|
| 2181 | +} |
---|
| 2182 | + |
---|
| 2183 | +/* Interrupt Handler for Transmit complete */ |
---|
| 2184 | +static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) |
---|
| 2185 | +{ |
---|
| 2186 | + struct net_device *dev = tx_queue->dev; |
---|
| 2187 | + struct netdev_queue *txq; |
---|
| 2188 | + struct gfar_private *priv = netdev_priv(dev); |
---|
| 2189 | + struct txbd8 *bdp, *next = NULL; |
---|
| 2190 | + struct txbd8 *lbdp = NULL; |
---|
| 2191 | + struct txbd8 *base = tx_queue->tx_bd_base; |
---|
| 2192 | + struct sk_buff *skb; |
---|
| 2193 | + int skb_dirtytx; |
---|
| 2194 | + int tx_ring_size = tx_queue->tx_ring_size; |
---|
| 2195 | + int frags = 0, nr_txbds = 0; |
---|
| 2196 | + int i; |
---|
| 2197 | + int howmany = 0; |
---|
| 2198 | + int tqi = tx_queue->qindex; |
---|
| 2199 | + unsigned int bytes_sent = 0; |
---|
| 2200 | + u32 lstatus; |
---|
| 2201 | + size_t buflen; |
---|
| 2202 | + |
---|
| 2203 | + txq = netdev_get_tx_queue(dev, tqi); |
---|
| 2204 | + bdp = tx_queue->dirty_tx; |
---|
| 2205 | + skb_dirtytx = tx_queue->skb_dirtytx; |
---|
| 2206 | + |
---|
| 2207 | + while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { |
---|
| 2208 | + bool do_tstamp; |
---|
| 2209 | + |
---|
| 2210 | + do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && |
---|
| 2211 | + priv->hwts_tx_en; |
---|
| 2212 | + |
---|
| 2213 | + frags = skb_shinfo(skb)->nr_frags; |
---|
| 2214 | + |
---|
| 2215 | + /* When time stamping, one additional TxBD must be freed. |
---|
| 2216 | + * Also, we need to dma_unmap_single() the TxPAL. |
---|
| 2217 | + */ |
---|
| 2218 | + if (unlikely(do_tstamp)) |
---|
| 2219 | + nr_txbds = frags + 2; |
---|
| 2220 | + else |
---|
| 2221 | + nr_txbds = frags + 1; |
---|
| 2222 | + |
---|
| 2223 | + lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); |
---|
| 2224 | + |
---|
| 2225 | + lstatus = be32_to_cpu(lbdp->lstatus); |
---|
| 2226 | + |
---|
| 2227 | + /* Only clean completed frames */ |
---|
| 2228 | + if ((lstatus & BD_LFLAG(TXBD_READY)) && |
---|
| 2229 | + (lstatus & BD_LENGTH_MASK)) |
---|
| 2230 | + break; |
---|
| 2231 | + |
---|
| 2232 | + if (unlikely(do_tstamp)) { |
---|
| 2233 | + next = next_txbd(bdp, base, tx_ring_size); |
---|
| 2234 | + buflen = be16_to_cpu(next->length) + |
---|
| 2235 | + GMAC_FCB_LEN + GMAC_TXPAL_LEN; |
---|
| 2236 | + } else |
---|
| 2237 | + buflen = be16_to_cpu(bdp->length); |
---|
| 2238 | + |
---|
| 2239 | + dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), |
---|
| 2240 | + buflen, DMA_TO_DEVICE); |
---|
| 2241 | + |
---|
| 2242 | + if (unlikely(do_tstamp)) { |
---|
| 2243 | + struct skb_shared_hwtstamps shhwtstamps; |
---|
| 2244 | + u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) & |
---|
| 2245 | + ~0x7UL); |
---|
| 2246 | + |
---|
| 2247 | + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); |
---|
| 2248 | + shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); |
---|
| 2249 | + skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); |
---|
| 2250 | + skb_tstamp_tx(skb, &shhwtstamps); |
---|
| 2251 | + gfar_clear_txbd_status(bdp); |
---|
| 2252 | + bdp = next; |
---|
| 2253 | + } |
---|
| 2254 | + |
---|
| 2255 | + gfar_clear_txbd_status(bdp); |
---|
| 2256 | + bdp = next_txbd(bdp, base, tx_ring_size); |
---|
| 2257 | + |
---|
| 2258 | + for (i = 0; i < frags; i++) { |
---|
| 2259 | + dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr), |
---|
| 2260 | + be16_to_cpu(bdp->length), |
---|
| 2261 | + DMA_TO_DEVICE); |
---|
| 2262 | + gfar_clear_txbd_status(bdp); |
---|
| 2263 | + bdp = next_txbd(bdp, base, tx_ring_size); |
---|
| 2264 | + } |
---|
| 2265 | + |
---|
| 2266 | + bytes_sent += GFAR_CB(skb)->bytes_sent; |
---|
| 2267 | + |
---|
| 2268 | + dev_kfree_skb_any(skb); |
---|
| 2269 | + |
---|
| 2270 | + tx_queue->tx_skbuff[skb_dirtytx] = NULL; |
---|
| 2271 | + |
---|
| 2272 | + skb_dirtytx = (skb_dirtytx + 1) & |
---|
| 2273 | + TX_RING_MOD_MASK(tx_ring_size); |
---|
| 2274 | + |
---|
| 2275 | + howmany++; |
---|
| 2276 | + spin_lock(&tx_queue->txlock); |
---|
| 2277 | + tx_queue->num_txbdfree += nr_txbds; |
---|
| 2278 | + spin_unlock(&tx_queue->txlock); |
---|
| 2279 | + } |
---|
| 2280 | + |
---|
| 2281 | + /* If we freed a buffer, we can restart transmission, if necessary */ |
---|
| 2282 | + if (tx_queue->num_txbdfree && |
---|
| 2283 | + netif_tx_queue_stopped(txq) && |
---|
| 2284 | + !(test_bit(GFAR_DOWN, &priv->state))) |
---|
| 2285 | + netif_wake_subqueue(priv->ndev, tqi); |
---|
| 2286 | + |
---|
| 2287 | + /* Update dirty indicators */ |
---|
| 2288 | + tx_queue->skb_dirtytx = skb_dirtytx; |
---|
| 2289 | + tx_queue->dirty_tx = bdp; |
---|
| 2290 | + |
---|
| 2291 | + netdev_tx_completed_queue(txq, howmany, bytes_sent); |
---|
| 2292 | +} |
---|
| 2293 | + |
---|
| 2294 | +static void count_errors(u32 lstatus, struct net_device *ndev) |
---|
| 2295 | +{ |
---|
| 2296 | + struct gfar_private *priv = netdev_priv(ndev); |
---|
| 2297 | + struct net_device_stats *stats = &ndev->stats; |
---|
| 2298 | + struct gfar_extra_stats *estats = &priv->extra_stats; |
---|
| 2299 | + |
---|
| 2300 | + /* If the packet was truncated, none of the other errors matter */ |
---|
| 2301 | + if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) { |
---|
| 2302 | + stats->rx_length_errors++; |
---|
| 2303 | + |
---|
| 2304 | + atomic64_inc(&estats->rx_trunc); |
---|
| 2305 | + |
---|
| 2306 | + return; |
---|
| 2307 | + } |
---|
| 2308 | + /* Count the errors, if there were any */ |
---|
| 2309 | + if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) { |
---|
| 2310 | + stats->rx_length_errors++; |
---|
| 2311 | + |
---|
| 2312 | + if (lstatus & BD_LFLAG(RXBD_LARGE)) |
---|
| 2313 | + atomic64_inc(&estats->rx_large); |
---|
| 2314 | + else |
---|
| 2315 | + atomic64_inc(&estats->rx_short); |
---|
| 2316 | + } |
---|
| 2317 | + if (lstatus & BD_LFLAG(RXBD_NONOCTET)) { |
---|
| 2318 | + stats->rx_frame_errors++; |
---|
| 2319 | + atomic64_inc(&estats->rx_nonoctet); |
---|
| 2320 | + } |
---|
| 2321 | + if (lstatus & BD_LFLAG(RXBD_CRCERR)) { |
---|
| 2322 | + atomic64_inc(&estats->rx_crcerr); |
---|
| 2323 | + stats->rx_crc_errors++; |
---|
| 2324 | + } |
---|
| 2325 | + if (lstatus & BD_LFLAG(RXBD_OVERRUN)) { |
---|
| 2326 | + atomic64_inc(&estats->rx_overrun); |
---|
| 2327 | + stats->rx_over_errors++; |
---|
| 2328 | + } |
---|
| 2329 | +} |
---|
| 2330 | + |
---|
| 2331 | +static irqreturn_t gfar_receive(int irq, void *grp_id) |
---|
| 2332 | +{ |
---|
| 2333 | + struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; |
---|
| 2334 | + unsigned long flags; |
---|
| 2335 | + u32 imask, ievent; |
---|
| 2336 | + |
---|
| 2337 | + ievent = gfar_read(&grp->regs->ievent); |
---|
| 2338 | + |
---|
| 2339 | + if (unlikely(ievent & IEVENT_FGPI)) { |
---|
| 2340 | + gfar_write(&grp->regs->ievent, IEVENT_FGPI); |
---|
| 2341 | + return IRQ_HANDLED; |
---|
| 2342 | + } |
---|
| 2343 | + |
---|
| 2344 | + if (likely(napi_schedule_prep(&grp->napi_rx))) { |
---|
| 2345 | + spin_lock_irqsave(&grp->grplock, flags); |
---|
| 2346 | + imask = gfar_read(&grp->regs->imask); |
---|
| 2347 | + imask &= IMASK_RX_DISABLED; |
---|
| 2348 | + gfar_write(&grp->regs->imask, imask); |
---|
| 2349 | + spin_unlock_irqrestore(&grp->grplock, flags); |
---|
| 2350 | + __napi_schedule(&grp->napi_rx); |
---|
| 2351 | + } else { |
---|
| 2352 | + /* Clear IEVENT, so interrupts aren't called again |
---|
| 2353 | + * because of the packets that have already arrived. |
---|
| 2354 | + */ |
---|
| 2355 | + gfar_write(&grp->regs->ievent, IEVENT_RX_MASK); |
---|
| 2356 | + } |
---|
| 2357 | + |
---|
| 2358 | + return IRQ_HANDLED; |
---|
| 2359 | +} |
---|
| 2360 | + |
---|
| 2361 | +/* Interrupt Handler for Transmit complete */ |
---|
| 2362 | +static irqreturn_t gfar_transmit(int irq, void *grp_id) |
---|
| 2363 | +{ |
---|
| 2364 | + struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; |
---|
| 2365 | + unsigned long flags; |
---|
| 2366 | + u32 imask; |
---|
| 2367 | + |
---|
| 2368 | + if (likely(napi_schedule_prep(&grp->napi_tx))) { |
---|
| 2369 | + spin_lock_irqsave(&grp->grplock, flags); |
---|
| 2370 | + imask = gfar_read(&grp->regs->imask); |
---|
| 2371 | + imask &= IMASK_TX_DISABLED; |
---|
| 2372 | + gfar_write(&grp->regs->imask, imask); |
---|
| 2373 | + spin_unlock_irqrestore(&grp->grplock, flags); |
---|
| 2374 | + __napi_schedule(&grp->napi_tx); |
---|
| 2375 | + } else { |
---|
| 2376 | + /* Clear IEVENT, so interrupts aren't called again |
---|
| 2377 | + * because of the packets that have already arrived. |
---|
| 2378 | + */ |
---|
| 2379 | + gfar_write(&grp->regs->ievent, IEVENT_TX_MASK); |
---|
| 2380 | + } |
---|
| 2381 | + |
---|
| 2382 | + return IRQ_HANDLED; |
---|
| 2383 | +} |
---|
| 2384 | + |
---|
| 2385 | +static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, |
---|
| 2386 | + struct sk_buff *skb, bool first) |
---|
| 2387 | +{ |
---|
| 2388 | + int size = lstatus & BD_LENGTH_MASK; |
---|
| 2389 | + struct page *page = rxb->page; |
---|
| 2390 | + |
---|
| 2391 | + if (likely(first)) { |
---|
| 2392 | + skb_put(skb, size); |
---|
| 2393 | + } else { |
---|
| 2394 | + /* the last fragments' length contains the full frame length */ |
---|
| 2395 | + if (lstatus & BD_LFLAG(RXBD_LAST)) |
---|
| 2396 | + size -= skb->len; |
---|
| 2397 | + |
---|
| 2398 | + WARN(size < 0, "gianfar: rx fragment size underflow"); |
---|
| 2399 | + if (size < 0) |
---|
| 2400 | + return false; |
---|
| 2401 | + |
---|
| 2402 | + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, |
---|
| 2403 | + rxb->page_offset + RXBUF_ALIGNMENT, |
---|
| 2404 | + size, GFAR_RXB_TRUESIZE); |
---|
| 2405 | + } |
---|
| 2406 | + |
---|
| 2407 | + /* try reuse page */ |
---|
| 2408 | + if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page))) |
---|
| 2409 | + return false; |
---|
| 2410 | + |
---|
| 2411 | + /* change offset to the other half */ |
---|
| 2412 | + rxb->page_offset ^= GFAR_RXB_TRUESIZE; |
---|
| 2413 | + |
---|
| 2414 | + page_ref_inc(page); |
---|
| 2415 | + |
---|
| 2416 | + return true; |
---|
| 2417 | +} |
---|
| 2418 | + |
---|
| 2419 | +static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq, |
---|
| 2420 | + struct gfar_rx_buff *old_rxb) |
---|
| 2421 | +{ |
---|
| 2422 | + struct gfar_rx_buff *new_rxb; |
---|
| 2423 | + u16 nta = rxq->next_to_alloc; |
---|
| 2424 | + |
---|
| 2425 | + new_rxb = &rxq->rx_buff[nta]; |
---|
| 2426 | + |
---|
| 2427 | + /* find next buf that can reuse a page */ |
---|
| 2428 | + nta++; |
---|
| 2429 | + rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0; |
---|
| 2430 | + |
---|
| 2431 | + /* copy page reference */ |
---|
| 2432 | + *new_rxb = *old_rxb; |
---|
| 2433 | + |
---|
| 2434 | + /* sync for use by the device */ |
---|
| 2435 | + dma_sync_single_range_for_device(rxq->dev, old_rxb->dma, |
---|
| 2436 | + old_rxb->page_offset, |
---|
| 2437 | + GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); |
---|
| 2438 | +} |
---|
| 2439 | + |
---|
| 2440 | +static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue, |
---|
| 2441 | + u32 lstatus, struct sk_buff *skb) |
---|
| 2442 | +{ |
---|
| 2443 | + struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean]; |
---|
| 2444 | + struct page *page = rxb->page; |
---|
| 2445 | + bool first = false; |
---|
| 2446 | + |
---|
| 2447 | + if (likely(!skb)) { |
---|
| 2448 | + void *buff_addr = page_address(page) + rxb->page_offset; |
---|
| 2449 | + |
---|
| 2450 | + skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE); |
---|
| 2451 | + if (unlikely(!skb)) { |
---|
| 2452 | + gfar_rx_alloc_err(rx_queue); |
---|
| 2453 | + return NULL; |
---|
| 2454 | + } |
---|
| 2455 | + skb_reserve(skb, RXBUF_ALIGNMENT); |
---|
| 2456 | + first = true; |
---|
| 2457 | + } |
---|
| 2458 | + |
---|
| 2459 | + dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset, |
---|
| 2460 | + GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); |
---|
| 2461 | + |
---|
| 2462 | + if (gfar_add_rx_frag(rxb, lstatus, skb, first)) { |
---|
| 2463 | + /* reuse the free half of the page */ |
---|
| 2464 | + gfar_reuse_rx_page(rx_queue, rxb); |
---|
| 2465 | + } else { |
---|
| 2466 | + /* page cannot be reused, unmap it */ |
---|
| 2467 | + dma_unmap_page(rx_queue->dev, rxb->dma, |
---|
| 2468 | + PAGE_SIZE, DMA_FROM_DEVICE); |
---|
| 2469 | + } |
---|
| 2470 | + |
---|
| 2471 | + /* clear rxb content */ |
---|
| 2472 | + rxb->page = NULL; |
---|
| 2473 | + |
---|
| 2474 | + return skb; |
---|
| 2475 | +} |
---|
| 2476 | + |
---|
| 2477 | +static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) |
---|
| 2478 | +{ |
---|
| 2479 | + /* If valid headers were found, and valid sums |
---|
| 2480 | + * were verified, then we tell the kernel that no |
---|
| 2481 | + * checksumming is necessary. Otherwise, it is [FIXME] |
---|
| 2482 | + */ |
---|
| 2483 | + if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) == |
---|
| 2484 | + (RXFCB_CIP | RXFCB_CTU)) |
---|
| 2485 | + skb->ip_summed = CHECKSUM_UNNECESSARY; |
---|
| 2486 | + else |
---|
| 2487 | + skb_checksum_none_assert(skb); |
---|
| 2488 | +} |
---|
| 2489 | + |
---|
| 2490 | +/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ |
---|
| 2491 | +static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) |
---|
| 2492 | +{ |
---|
| 2493 | + struct gfar_private *priv = netdev_priv(ndev); |
---|
| 2494 | + struct rxfcb *fcb = NULL; |
---|
| 2495 | + |
---|
| 2496 | + /* fcb is at the beginning if exists */ |
---|
| 2497 | + fcb = (struct rxfcb *)skb->data; |
---|
| 2498 | + |
---|
| 2499 | + /* Remove the FCB from the skb |
---|
| 2500 | + * Remove the padded bytes, if there are any |
---|
| 2501 | + */ |
---|
| 2502 | + if (priv->uses_rxfcb) |
---|
| 2503 | + skb_pull(skb, GMAC_FCB_LEN); |
---|
| 2504 | + |
---|
| 2505 | + /* Get receive timestamp from the skb */ |
---|
| 2506 | + if (priv->hwts_rx_en) { |
---|
| 2507 | + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); |
---|
| 2508 | + u64 *ns = (u64 *) skb->data; |
---|
| 2509 | + |
---|
| 2510 | + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); |
---|
| 2511 | + shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); |
---|
| 2512 | + } |
---|
| 2513 | + |
---|
| 2514 | + if (priv->padding) |
---|
| 2515 | + skb_pull(skb, priv->padding); |
---|
| 2516 | + |
---|
| 2517 | + /* Trim off the FCS */ |
---|
| 2518 | + pskb_trim(skb, skb->len - ETH_FCS_LEN); |
---|
| 2519 | + |
---|
| 2520 | + if (ndev->features & NETIF_F_RXCSUM) |
---|
| 2521 | + gfar_rx_checksum(skb, fcb); |
---|
| 2522 | + |
---|
| 2523 | + /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. |
---|
| 2524 | + * Even if vlan rx accel is disabled, on some chips |
---|
| 2525 | + * RXFCB_VLN is pseudo randomly set. |
---|
| 2526 | + */ |
---|
| 2527 | + if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX && |
---|
| 2528 | + be16_to_cpu(fcb->flags) & RXFCB_VLN) |
---|
| 2529 | + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), |
---|
| 2530 | + be16_to_cpu(fcb->vlctl)); |
---|
| 2531 | +} |
---|
| 2532 | + |
---|
| 2533 | +/* gfar_clean_rx_ring() -- Processes each frame in the rx ring |
---|
| 2534 | + * until the budget/quota has been reached. Returns the number |
---|
| 2535 | + * of frames handled |
---|
| 2536 | + */ |
---|
| 2537 | +static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, |
---|
| 2538 | + int rx_work_limit) |
---|
| 2539 | +{ |
---|
| 2540 | + struct net_device *ndev = rx_queue->ndev; |
---|
| 2541 | + struct gfar_private *priv = netdev_priv(ndev); |
---|
| 2542 | + struct rxbd8 *bdp; |
---|
| 2543 | + int i, howmany = 0; |
---|
| 2544 | + struct sk_buff *skb = rx_queue->skb; |
---|
| 2545 | + int cleaned_cnt = gfar_rxbd_unused(rx_queue); |
---|
| 2546 | + unsigned int total_bytes = 0, total_pkts = 0; |
---|
| 2547 | + |
---|
| 2548 | + /* Get the first full descriptor */ |
---|
| 2549 | + i = rx_queue->next_to_clean; |
---|
| 2550 | + |
---|
| 2551 | + while (rx_work_limit--) { |
---|
| 2552 | + u32 lstatus; |
---|
| 2553 | + |
---|
| 2554 | + if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) { |
---|
| 2555 | + gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); |
---|
| 2556 | + cleaned_cnt = 0; |
---|
| 2557 | + } |
---|
| 2558 | + |
---|
| 2559 | + bdp = &rx_queue->rx_bd_base[i]; |
---|
| 2560 | + lstatus = be32_to_cpu(bdp->lstatus); |
---|
| 2561 | + if (lstatus & BD_LFLAG(RXBD_EMPTY)) |
---|
| 2562 | + break; |
---|
| 2563 | + |
---|
| 2564 | + /* lost RXBD_LAST descriptor due to overrun */ |
---|
| 2565 | + if (skb && |
---|
| 2566 | + (lstatus & BD_LFLAG(RXBD_FIRST))) { |
---|
| 2567 | + /* discard faulty buffer */ |
---|
| 2568 | + dev_kfree_skb(skb); |
---|
| 2569 | + skb = NULL; |
---|
| 2570 | + rx_queue->stats.rx_dropped++; |
---|
| 2571 | + |
---|
| 2572 | + /* can continue normally */ |
---|
| 2573 | + } |
---|
| 2574 | + |
---|
| 2575 | + /* order rx buffer descriptor reads */ |
---|
| 2576 | + rmb(); |
---|
| 2577 | + |
---|
| 2578 | + /* fetch next to clean buffer from the ring */ |
---|
| 2579 | + skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb); |
---|
| 2580 | + if (unlikely(!skb)) |
---|
| 2581 | + break; |
---|
| 2582 | + |
---|
| 2583 | + cleaned_cnt++; |
---|
| 2584 | + howmany++; |
---|
| 2585 | + |
---|
| 2586 | + if (unlikely(++i == rx_queue->rx_ring_size)) |
---|
| 2587 | + i = 0; |
---|
| 2588 | + |
---|
| 2589 | + rx_queue->next_to_clean = i; |
---|
| 2590 | + |
---|
| 2591 | + /* fetch next buffer if not the last in frame */ |
---|
| 2592 | + if (!(lstatus & BD_LFLAG(RXBD_LAST))) |
---|
| 2593 | + continue; |
---|
| 2594 | + |
---|
| 2595 | + if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) { |
---|
| 2596 | + count_errors(lstatus, ndev); |
---|
| 2597 | + |
---|
| 2598 | + /* discard faulty buffer */ |
---|
| 2599 | + dev_kfree_skb(skb); |
---|
| 2600 | + skb = NULL; |
---|
| 2601 | + rx_queue->stats.rx_dropped++; |
---|
| 2602 | + continue; |
---|
| 2603 | + } |
---|
| 2604 | + |
---|
| 2605 | + gfar_process_frame(ndev, skb); |
---|
| 2606 | + |
---|
| 2607 | + /* Increment the number of packets */ |
---|
| 2608 | + total_pkts++; |
---|
| 2609 | + total_bytes += skb->len; |
---|
| 2610 | + |
---|
| 2611 | + skb_record_rx_queue(skb, rx_queue->qindex); |
---|
| 2612 | + |
---|
| 2613 | + skb->protocol = eth_type_trans(skb, ndev); |
---|
| 2614 | + |
---|
| 2615 | + /* Send the packet up the stack */ |
---|
| 2616 | + napi_gro_receive(&rx_queue->grp->napi_rx, skb); |
---|
| 2617 | + |
---|
| 2618 | + skb = NULL; |
---|
| 2619 | + } |
---|
| 2620 | + |
---|
| 2621 | + /* Store incomplete frames for completion */ |
---|
| 2622 | + rx_queue->skb = skb; |
---|
| 2623 | + |
---|
| 2624 | + rx_queue->stats.rx_packets += total_pkts; |
---|
| 2625 | + rx_queue->stats.rx_bytes += total_bytes; |
---|
| 2626 | + |
---|
| 2627 | + if (cleaned_cnt) |
---|
| 2628 | + gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); |
---|
| 2629 | + |
---|
| 2630 | + /* Update Last Free RxBD pointer for LFC */ |
---|
| 2631 | + if (unlikely(priv->tx_actual_en)) { |
---|
| 2632 | + u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); |
---|
| 2633 | + |
---|
| 2634 | + gfar_write(rx_queue->rfbptr, bdp_dma); |
---|
| 2635 | + } |
---|
| 2636 | + |
---|
| 2637 | + return howmany; |
---|
| 2638 | +} |
---|
| 2639 | + |
---|
| 2640 | +static int gfar_poll_rx_sq(struct napi_struct *napi, int budget) |
---|
| 2641 | +{ |
---|
| 2642 | + struct gfar_priv_grp *gfargrp = |
---|
| 2643 | + container_of(napi, struct gfar_priv_grp, napi_rx); |
---|
| 2644 | + struct gfar __iomem *regs = gfargrp->regs; |
---|
| 2645 | + struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue; |
---|
| 2646 | + int work_done = 0; |
---|
| 2647 | + |
---|
| 2648 | + /* Clear IEVENT, so interrupts aren't called again |
---|
| 2649 | + * because of the packets that have already arrived |
---|
| 2650 | + */ |
---|
| 2651 | + gfar_write(®s->ievent, IEVENT_RX_MASK); |
---|
| 2652 | + |
---|
| 2653 | + work_done = gfar_clean_rx_ring(rx_queue, budget); |
---|
| 2654 | + |
---|
| 2655 | + if (work_done < budget) { |
---|
| 2656 | + u32 imask; |
---|
| 2657 | + napi_complete_done(napi, work_done); |
---|
| 2658 | + /* Clear the halt bit in RSTAT */ |
---|
| 2659 | + gfar_write(®s->rstat, gfargrp->rstat); |
---|
| 2660 | + |
---|
| 2661 | + spin_lock_irq(&gfargrp->grplock); |
---|
| 2662 | + imask = gfar_read(®s->imask); |
---|
| 2663 | + imask |= IMASK_RX_DEFAULT; |
---|
| 2664 | + gfar_write(®s->imask, imask); |
---|
| 2665 | + spin_unlock_irq(&gfargrp->grplock); |
---|
| 2666 | + } |
---|
| 2667 | + |
---|
| 2668 | + return work_done; |
---|
| 2669 | +} |
---|
| 2670 | + |
---|
| 2671 | +static int gfar_poll_tx_sq(struct napi_struct *napi, int budget) |
---|
| 2672 | +{ |
---|
| 2673 | + struct gfar_priv_grp *gfargrp = |
---|
| 2674 | + container_of(napi, struct gfar_priv_grp, napi_tx); |
---|
| 2675 | + struct gfar __iomem *regs = gfargrp->regs; |
---|
| 2676 | + struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue; |
---|
| 2677 | + u32 imask; |
---|
| 2678 | + |
---|
| 2679 | + /* Clear IEVENT, so interrupts aren't called again |
---|
| 2680 | + * because of the packets that have already arrived |
---|
| 2681 | + */ |
---|
| 2682 | + gfar_write(®s->ievent, IEVENT_TX_MASK); |
---|
| 2683 | + |
---|
| 2684 | + /* run Tx cleanup to completion */ |
---|
| 2685 | + if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) |
---|
| 2686 | + gfar_clean_tx_ring(tx_queue); |
---|
| 2687 | + |
---|
| 2688 | + napi_complete(napi); |
---|
| 2689 | + |
---|
| 2690 | + spin_lock_irq(&gfargrp->grplock); |
---|
| 2691 | + imask = gfar_read(®s->imask); |
---|
| 2692 | + imask |= IMASK_TX_DEFAULT; |
---|
| 2693 | + gfar_write(®s->imask, imask); |
---|
| 2694 | + spin_unlock_irq(&gfargrp->grplock); |
---|
| 2695 | + |
---|
| 2696 | + return 0; |
---|
| 2697 | +} |
---|
| 2698 | + |
---|
| 2699 | +static int gfar_poll_rx(struct napi_struct *napi, int budget) |
---|
| 2700 | +{ |
---|
| 2701 | + struct gfar_priv_grp *gfargrp = |
---|
| 2702 | + container_of(napi, struct gfar_priv_grp, napi_rx); |
---|
| 2703 | + struct gfar_private *priv = gfargrp->priv; |
---|
| 2704 | + struct gfar __iomem *regs = gfargrp->regs; |
---|
| 2705 | + struct gfar_priv_rx_q *rx_queue = NULL; |
---|
| 2706 | + int work_done = 0, work_done_per_q = 0; |
---|
| 2707 | + int i, budget_per_q = 0; |
---|
| 2708 | + unsigned long rstat_rxf; |
---|
| 2709 | + int num_act_queues; |
---|
| 2710 | + |
---|
| 2711 | + /* Clear IEVENT, so interrupts aren't called again |
---|
| 2712 | + * because of the packets that have already arrived |
---|
| 2713 | + */ |
---|
| 2714 | + gfar_write(®s->ievent, IEVENT_RX_MASK); |
---|
| 2715 | + |
---|
| 2716 | + rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK; |
---|
| 2717 | + |
---|
| 2718 | + num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS); |
---|
| 2719 | + if (num_act_queues) |
---|
| 2720 | + budget_per_q = budget/num_act_queues; |
---|
| 2721 | + |
---|
| 2722 | + for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { |
---|
| 2723 | + /* skip queue if not active */ |
---|
| 2724 | + if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) |
---|
| 2725 | + continue; |
---|
| 2726 | + |
---|
| 2727 | + rx_queue = priv->rx_queue[i]; |
---|
| 2728 | + work_done_per_q = |
---|
| 2729 | + gfar_clean_rx_ring(rx_queue, budget_per_q); |
---|
| 2730 | + work_done += work_done_per_q; |
---|
| 2731 | + |
---|
| 2732 | + /* finished processing this queue */ |
---|
| 2733 | + if (work_done_per_q < budget_per_q) { |
---|
| 2734 | + /* clear active queue hw indication */ |
---|
| 2735 | + gfar_write(®s->rstat, |
---|
| 2736 | + RSTAT_CLEAR_RXF0 >> i); |
---|
| 2737 | + num_act_queues--; |
---|
| 2738 | + |
---|
| 2739 | + if (!num_act_queues) |
---|
| 2740 | + break; |
---|
| 2741 | + } |
---|
| 2742 | + } |
---|
| 2743 | + |
---|
| 2744 | + if (!num_act_queues) { |
---|
| 2745 | + u32 imask; |
---|
| 2746 | + napi_complete_done(napi, work_done); |
---|
| 2747 | + |
---|
| 2748 | + /* Clear the halt bit in RSTAT */ |
---|
| 2749 | + gfar_write(®s->rstat, gfargrp->rstat); |
---|
| 2750 | + |
---|
| 2751 | + spin_lock_irq(&gfargrp->grplock); |
---|
| 2752 | + imask = gfar_read(®s->imask); |
---|
| 2753 | + imask |= IMASK_RX_DEFAULT; |
---|
| 2754 | + gfar_write(®s->imask, imask); |
---|
| 2755 | + spin_unlock_irq(&gfargrp->grplock); |
---|
| 2756 | + } |
---|
| 2757 | + |
---|
| 2758 | + return work_done; |
---|
| 2759 | +} |
---|
| 2760 | + |
---|
| 2761 | +static int gfar_poll_tx(struct napi_struct *napi, int budget) |
---|
| 2762 | +{ |
---|
| 2763 | + struct gfar_priv_grp *gfargrp = |
---|
| 2764 | + container_of(napi, struct gfar_priv_grp, napi_tx); |
---|
| 2765 | + struct gfar_private *priv = gfargrp->priv; |
---|
| 2766 | + struct gfar __iomem *regs = gfargrp->regs; |
---|
| 2767 | + struct gfar_priv_tx_q *tx_queue = NULL; |
---|
| 2768 | + int has_tx_work = 0; |
---|
| 2769 | + int i; |
---|
| 2770 | + |
---|
| 2771 | + /* Clear IEVENT, so interrupts aren't called again |
---|
| 2772 | + * because of the packets that have already arrived |
---|
| 2773 | + */ |
---|
| 2774 | + gfar_write(®s->ievent, IEVENT_TX_MASK); |
---|
| 2775 | + |
---|
| 2776 | + for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { |
---|
| 2777 | + tx_queue = priv->tx_queue[i]; |
---|
| 2778 | + /* run Tx cleanup to completion */ |
---|
| 2779 | + if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { |
---|
| 2780 | + gfar_clean_tx_ring(tx_queue); |
---|
| 2781 | + has_tx_work = 1; |
---|
| 2782 | + } |
---|
| 2783 | + } |
---|
| 2784 | + |
---|
| 2785 | + if (!has_tx_work) { |
---|
| 2786 | + u32 imask; |
---|
| 2787 | + napi_complete(napi); |
---|
| 2788 | + |
---|
| 2789 | + spin_lock_irq(&gfargrp->grplock); |
---|
| 2790 | + imask = gfar_read(®s->imask); |
---|
| 2791 | + imask |= IMASK_TX_DEFAULT; |
---|
| 2792 | + gfar_write(®s->imask, imask); |
---|
| 2793 | + spin_unlock_irq(&gfargrp->grplock); |
---|
| 2794 | + } |
---|
| 2795 | + |
---|
| 2796 | + return 0; |
---|
| 2797 | +} |
---|
| 2798 | + |
---|
| 2799 | +/* GFAR error interrupt handler */ |
---|
| 2800 | +static irqreturn_t gfar_error(int irq, void *grp_id) |
---|
| 2801 | +{ |
---|
| 2802 | + struct gfar_priv_grp *gfargrp = grp_id; |
---|
| 2803 | + struct gfar __iomem *regs = gfargrp->regs; |
---|
| 2804 | + struct gfar_private *priv= gfargrp->priv; |
---|
| 2805 | + struct net_device *dev = priv->ndev; |
---|
| 2806 | + |
---|
| 2807 | + /* Save ievent for future reference */ |
---|
| 2808 | + u32 events = gfar_read(®s->ievent); |
---|
| 2809 | + |
---|
| 2810 | + /* Clear IEVENT */ |
---|
| 2811 | + gfar_write(®s->ievent, events & IEVENT_ERR_MASK); |
---|
| 2812 | + |
---|
| 2813 | + /* Magic Packet is not an error. */ |
---|
| 2814 | + if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && |
---|
| 2815 | + (events & IEVENT_MAG)) |
---|
| 2816 | + events &= ~IEVENT_MAG; |
---|
| 2817 | + |
---|
| 2818 | + /* Hmm... */ |
---|
| 2819 | + if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) |
---|
| 2820 | + netdev_dbg(dev, |
---|
| 2821 | + "error interrupt (ievent=0x%08x imask=0x%08x)\n", |
---|
| 2822 | + events, gfar_read(®s->imask)); |
---|
| 2823 | + |
---|
| 2824 | + /* Update the error counters */ |
---|
| 2825 | + if (events & IEVENT_TXE) { |
---|
| 2826 | + dev->stats.tx_errors++; |
---|
| 2827 | + |
---|
| 2828 | + if (events & IEVENT_LC) |
---|
| 2829 | + dev->stats.tx_window_errors++; |
---|
| 2830 | + if (events & IEVENT_CRL) |
---|
| 2831 | + dev->stats.tx_aborted_errors++; |
---|
| 2832 | + if (events & IEVENT_XFUN) { |
---|
| 2833 | + netif_dbg(priv, tx_err, dev, |
---|
| 2834 | + "TX FIFO underrun, packet dropped\n"); |
---|
| 2835 | + dev->stats.tx_dropped++; |
---|
| 2836 | + atomic64_inc(&priv->extra_stats.tx_underrun); |
---|
| 2837 | + |
---|
| 2838 | + schedule_work(&priv->reset_task); |
---|
| 2839 | + } |
---|
| 2840 | + netif_dbg(priv, tx_err, dev, "Transmit Error\n"); |
---|
| 2841 | + } |
---|
| 2842 | + if (events & IEVENT_BSY) { |
---|
| 2843 | + dev->stats.rx_over_errors++; |
---|
| 2844 | + atomic64_inc(&priv->extra_stats.rx_bsy); |
---|
| 2845 | + |
---|
| 2846 | + netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", |
---|
| 2847 | + gfar_read(®s->rstat)); |
---|
| 2848 | + } |
---|
| 2849 | + if (events & IEVENT_BABR) { |
---|
| 2850 | + dev->stats.rx_errors++; |
---|
| 2851 | + atomic64_inc(&priv->extra_stats.rx_babr); |
---|
| 2852 | + |
---|
| 2853 | + netif_dbg(priv, rx_err, dev, "babbling RX error\n"); |
---|
| 2854 | + } |
---|
| 2855 | + if (events & IEVENT_EBERR) { |
---|
| 2856 | + atomic64_inc(&priv->extra_stats.eberr); |
---|
| 2857 | + netif_dbg(priv, rx_err, dev, "bus error\n"); |
---|
| 2858 | + } |
---|
| 2859 | + if (events & IEVENT_RXC) |
---|
| 2860 | + netif_dbg(priv, rx_status, dev, "control frame\n"); |
---|
| 2861 | + |
---|
| 2862 | + if (events & IEVENT_BABT) { |
---|
| 2863 | + atomic64_inc(&priv->extra_stats.tx_babt); |
---|
| 2864 | + netif_dbg(priv, tx_err, dev, "babbling TX error\n"); |
---|
| 2865 | + } |
---|
| 2866 | + return IRQ_HANDLED; |
---|
| 2867 | +} |
---|
| 2868 | + |
---|
| 2869 | +/* The interrupt handler for devices with one interrupt */ |
---|
| 2870 | +static irqreturn_t gfar_interrupt(int irq, void *grp_id) |
---|
| 2871 | +{ |
---|
| 2872 | + struct gfar_priv_grp *gfargrp = grp_id; |
---|
| 2873 | + |
---|
| 2874 | + /* Save ievent for future reference */ |
---|
| 2875 | + u32 events = gfar_read(&gfargrp->regs->ievent); |
---|
| 2876 | + |
---|
| 2877 | + /* Check for reception */ |
---|
| 2878 | + if (events & IEVENT_RX_MASK) |
---|
| 2879 | + gfar_receive(irq, grp_id); |
---|
| 2880 | + |
---|
| 2881 | + /* Check for transmit completion */ |
---|
| 2882 | + if (events & IEVENT_TX_MASK) |
---|
| 2883 | + gfar_transmit(irq, grp_id); |
---|
| 2884 | + |
---|
| 2885 | + /* Check for errors */ |
---|
| 2886 | + if (events & IEVENT_ERR_MASK) |
---|
| 2887 | + gfar_error(irq, grp_id); |
---|
| 2888 | + |
---|
| 2889 | + return IRQ_HANDLED; |
---|
| 2890 | +} |
---|
| 2891 | + |
---|
| 2892 | +#ifdef CONFIG_NET_POLL_CONTROLLER |
---|
| 2893 | +/* Polling 'interrupt' - used by things like netconsole to send skbs |
---|
| 2894 | + * without having to re-enable interrupts. It's not called while |
---|
| 2895 | + * the interrupt routine is executing. |
---|
| 2896 | + */ |
---|
| 2897 | +static void gfar_netpoll(struct net_device *dev) |
---|
| 2898 | +{ |
---|
| 2899 | + struct gfar_private *priv = netdev_priv(dev); |
---|
| 2900 | + int i; |
---|
| 2901 | + |
---|
| 2902 | + /* If the device has multiple interrupts, run tx/rx */ |
---|
| 2903 | + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
---|
| 2904 | + for (i = 0; i < priv->num_grps; i++) { |
---|
| 2905 | + struct gfar_priv_grp *grp = &priv->gfargrp[i]; |
---|
| 2906 | + |
---|
| 2907 | + disable_irq(gfar_irq(grp, TX)->irq); |
---|
| 2908 | + disable_irq(gfar_irq(grp, RX)->irq); |
---|
| 2909 | + disable_irq(gfar_irq(grp, ER)->irq); |
---|
| 2910 | + gfar_interrupt(gfar_irq(grp, TX)->irq, grp); |
---|
| 2911 | + enable_irq(gfar_irq(grp, ER)->irq); |
---|
| 2912 | + enable_irq(gfar_irq(grp, RX)->irq); |
---|
| 2913 | + enable_irq(gfar_irq(grp, TX)->irq); |
---|
| 2914 | + } |
---|
| 2915 | + } else { |
---|
| 2916 | + for (i = 0; i < priv->num_grps; i++) { |
---|
| 2917 | + struct gfar_priv_grp *grp = &priv->gfargrp[i]; |
---|
| 2918 | + |
---|
| 2919 | + disable_irq(gfar_irq(grp, TX)->irq); |
---|
| 2920 | + gfar_interrupt(gfar_irq(grp, TX)->irq, grp); |
---|
| 2921 | + enable_irq(gfar_irq(grp, TX)->irq); |
---|
| 2922 | + } |
---|
| 2923 | + } |
---|
| 2924 | +} |
---|
| 2925 | +#endif |
---|
| 2926 | + |
---|
| 2927 | +static void free_grp_irqs(struct gfar_priv_grp *grp) |
---|
| 2928 | +{ |
---|
| 2929 | + free_irq(gfar_irq(grp, TX)->irq, grp); |
---|
| 2930 | + free_irq(gfar_irq(grp, RX)->irq, grp); |
---|
| 2931 | + free_irq(gfar_irq(grp, ER)->irq, grp); |
---|
| 2932 | +} |
---|
| 2933 | + |
---|
| 2934 | +static int register_grp_irqs(struct gfar_priv_grp *grp) |
---|
| 2935 | +{ |
---|
| 2936 | + struct gfar_private *priv = grp->priv; |
---|
| 2937 | + struct net_device *dev = priv->ndev; |
---|
| 2938 | + int err; |
---|
| 2939 | + |
---|
| 2940 | + /* If the device has multiple interrupts, register for |
---|
| 2941 | + * them. Otherwise, only register for the one |
---|
| 2942 | + */ |
---|
| 2943 | + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
---|
| 2944 | + /* Install our interrupt handlers for Error, |
---|
| 2945 | + * Transmit, and Receive |
---|
| 2946 | + */ |
---|
| 2947 | + err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, |
---|
| 2948 | + gfar_irq(grp, ER)->name, grp); |
---|
| 2949 | + if (err < 0) { |
---|
| 2950 | + netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
---|
| 2951 | + gfar_irq(grp, ER)->irq); |
---|
| 2952 | + |
---|
| 2953 | + goto err_irq_fail; |
---|
| 2954 | + } |
---|
| 2955 | + enable_irq_wake(gfar_irq(grp, ER)->irq); |
---|
| 2956 | + |
---|
| 2957 | + err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, |
---|
| 2958 | + gfar_irq(grp, TX)->name, grp); |
---|
| 2959 | + if (err < 0) { |
---|
| 2960 | + netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
---|
| 2961 | + gfar_irq(grp, TX)->irq); |
---|
| 2962 | + goto tx_irq_fail; |
---|
| 2963 | + } |
---|
| 2964 | + err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0, |
---|
| 2965 | + gfar_irq(grp, RX)->name, grp); |
---|
| 2966 | + if (err < 0) { |
---|
| 2967 | + netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
---|
| 2968 | + gfar_irq(grp, RX)->irq); |
---|
| 2969 | + goto rx_irq_fail; |
---|
| 2970 | + } |
---|
| 2971 | + enable_irq_wake(gfar_irq(grp, RX)->irq); |
---|
| 2972 | + |
---|
| 2973 | + } else { |
---|
| 2974 | + err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, |
---|
| 2975 | + gfar_irq(grp, TX)->name, grp); |
---|
| 2976 | + if (err < 0) { |
---|
| 2977 | + netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
---|
| 2978 | + gfar_irq(grp, TX)->irq); |
---|
| 2979 | + goto err_irq_fail; |
---|
| 2980 | + } |
---|
| 2981 | + enable_irq_wake(gfar_irq(grp, TX)->irq); |
---|
| 2982 | + } |
---|
| 2983 | + |
---|
| 2984 | + return 0; |
---|
| 2985 | + |
---|
| 2986 | +rx_irq_fail: |
---|
| 2987 | + free_irq(gfar_irq(grp, TX)->irq, grp); |
---|
| 2988 | +tx_irq_fail: |
---|
| 2989 | + free_irq(gfar_irq(grp, ER)->irq, grp); |
---|
| 2990 | +err_irq_fail: |
---|
| 2991 | + return err; |
---|
| 2992 | + |
---|
| 2993 | +} |
---|
| 2994 | + |
---|
| 2995 | +static void gfar_free_irq(struct gfar_private *priv) |
---|
| 2996 | +{ |
---|
| 2997 | + int i; |
---|
| 2998 | + |
---|
| 2999 | + /* Free the IRQs */ |
---|
| 3000 | + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
---|
| 3001 | + for (i = 0; i < priv->num_grps; i++) |
---|
| 3002 | + free_grp_irqs(&priv->gfargrp[i]); |
---|
| 3003 | + } else { |
---|
| 3004 | + for (i = 0; i < priv->num_grps; i++) |
---|
| 3005 | + free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, |
---|
| 3006 | + &priv->gfargrp[i]); |
---|
| 3007 | + } |
---|
| 3008 | +} |
---|
| 3009 | + |
---|
| 3010 | +static int gfar_request_irq(struct gfar_private *priv) |
---|
| 3011 | +{ |
---|
| 3012 | + int err, i, j; |
---|
| 3013 | + |
---|
| 3014 | + for (i = 0; i < priv->num_grps; i++) { |
---|
| 3015 | + err = register_grp_irqs(&priv->gfargrp[i]); |
---|
| 3016 | + if (err) { |
---|
| 3017 | + for (j = 0; j < i; j++) |
---|
| 3018 | + free_grp_irqs(&priv->gfargrp[j]); |
---|
| 3019 | + return err; |
---|
| 3020 | + } |
---|
| 3021 | + } |
---|
| 3022 | + |
---|
| 3023 | + return 0; |
---|
| 3024 | +} |
---|
| 3025 | + |
---|
| 3026 | +/* Called when something needs to use the ethernet device |
---|
| 3027 | + * Returns 0 for success. |
---|
| 3028 | + */ |
---|
| 3029 | +static int gfar_enet_open(struct net_device *dev) |
---|
| 3030 | +{ |
---|
| 3031 | + struct gfar_private *priv = netdev_priv(dev); |
---|
| 3032 | + int err; |
---|
| 3033 | + |
---|
| 3034 | + err = init_phy(dev); |
---|
| 3035 | + if (err) |
---|
| 3036 | + return err; |
---|
| 3037 | + |
---|
| 3038 | + err = gfar_request_irq(priv); |
---|
| 3039 | + if (err) |
---|
| 3040 | + return err; |
---|
| 3041 | + |
---|
| 3042 | + err = startup_gfar(dev); |
---|
| 3043 | + if (err) |
---|
| 3044 | + return err; |
---|
| 3045 | + |
---|
| 3046 | + return err; |
---|
| 3047 | +} |
---|
| 3048 | + |
---|
| 3049 | +/* Stops the kernel queue, and halts the controller */ |
---|
| 3050 | +static int gfar_close(struct net_device *dev) |
---|
| 3051 | +{ |
---|
| 3052 | + struct gfar_private *priv = netdev_priv(dev); |
---|
| 3053 | + |
---|
| 3054 | + cancel_work_sync(&priv->reset_task); |
---|
| 3055 | + stop_gfar(dev); |
---|
| 3056 | + |
---|
| 3057 | + /* Disconnect from the PHY */ |
---|
| 3058 | + phy_disconnect(dev->phydev); |
---|
| 3059 | + |
---|
| 3060 | + gfar_free_irq(priv); |
---|
| 3061 | + |
---|
| 3062 | + return 0; |
---|
| 3063 | +} |
---|
| 3064 | + |
---|
| 3065 | +/* Clears each of the exact match registers to zero, so they |
---|
| 3066 | + * don't interfere with normal reception |
---|
| 3067 | + */ |
---|
| 3068 | +static void gfar_clear_exact_match(struct net_device *dev) |
---|
| 3069 | +{ |
---|
| 3070 | + int idx; |
---|
| 3071 | + static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; |
---|
| 3072 | + |
---|
| 3073 | + for (idx = 1; idx < GFAR_EM_NUM + 1; idx++) |
---|
| 3074 | + gfar_set_mac_for_addr(dev, idx, zero_arr); |
---|
| 3075 | +} |
---|
| 3076 | + |
---|
| 3077 | +/* Update the hash table based on the current list of multicast |
---|
| 3078 | + * addresses we subscribe to. Also, change the promiscuity of |
---|
| 3079 | + * the device based on the flags (this function is called |
---|
| 3080 | + * whenever dev->flags is changed |
---|
| 3081 | + */ |
---|
| 3082 | +static void gfar_set_multi(struct net_device *dev) |
---|
| 3083 | +{ |
---|
| 3084 | + struct netdev_hw_addr *ha; |
---|
| 3085 | + struct gfar_private *priv = netdev_priv(dev); |
---|
| 3086 | + struct gfar __iomem *regs = priv->gfargrp[0].regs; |
---|
| 3087 | + u32 tempval; |
---|
| 3088 | + |
---|
| 3089 | + if (dev->flags & IFF_PROMISC) { |
---|
| 3090 | + /* Set RCTRL to PROM */ |
---|
| 3091 | + tempval = gfar_read(®s->rctrl); |
---|
| 3092 | + tempval |= RCTRL_PROM; |
---|
| 3093 | + gfar_write(®s->rctrl, tempval); |
---|
| 3094 | + } else { |
---|
| 3095 | + /* Set RCTRL to not PROM */ |
---|
| 3096 | + tempval = gfar_read(®s->rctrl); |
---|
| 3097 | + tempval &= ~(RCTRL_PROM); |
---|
| 3098 | + gfar_write(®s->rctrl, tempval); |
---|
| 3099 | + } |
---|
| 3100 | + |
---|
| 3101 | + if (dev->flags & IFF_ALLMULTI) { |
---|
| 3102 | + /* Set the hash to rx all multicast frames */ |
---|
| 3103 | + gfar_write(®s->igaddr0, 0xffffffff); |
---|
| 3104 | + gfar_write(®s->igaddr1, 0xffffffff); |
---|
| 3105 | + gfar_write(®s->igaddr2, 0xffffffff); |
---|
| 3106 | + gfar_write(®s->igaddr3, 0xffffffff); |
---|
| 3107 | + gfar_write(®s->igaddr4, 0xffffffff); |
---|
| 3108 | + gfar_write(®s->igaddr5, 0xffffffff); |
---|
| 3109 | + gfar_write(®s->igaddr6, 0xffffffff); |
---|
| 3110 | + gfar_write(®s->igaddr7, 0xffffffff); |
---|
| 3111 | + gfar_write(®s->gaddr0, 0xffffffff); |
---|
| 3112 | + gfar_write(®s->gaddr1, 0xffffffff); |
---|
| 3113 | + gfar_write(®s->gaddr2, 0xffffffff); |
---|
| 3114 | + gfar_write(®s->gaddr3, 0xffffffff); |
---|
| 3115 | + gfar_write(®s->gaddr4, 0xffffffff); |
---|
| 3116 | + gfar_write(®s->gaddr5, 0xffffffff); |
---|
| 3117 | + gfar_write(®s->gaddr6, 0xffffffff); |
---|
| 3118 | + gfar_write(®s->gaddr7, 0xffffffff); |
---|
| 3119 | + } else { |
---|
| 3120 | + int em_num; |
---|
| 3121 | + int idx; |
---|
| 3122 | + |
---|
| 3123 | + /* zero out the hash */ |
---|
| 3124 | + gfar_write(®s->igaddr0, 0x0); |
---|
| 3125 | + gfar_write(®s->igaddr1, 0x0); |
---|
| 3126 | + gfar_write(®s->igaddr2, 0x0); |
---|
| 3127 | + gfar_write(®s->igaddr3, 0x0); |
---|
| 3128 | + gfar_write(®s->igaddr4, 0x0); |
---|
| 3129 | + gfar_write(®s->igaddr5, 0x0); |
---|
| 3130 | + gfar_write(®s->igaddr6, 0x0); |
---|
| 3131 | + gfar_write(®s->igaddr7, 0x0); |
---|
| 3132 | + gfar_write(®s->gaddr0, 0x0); |
---|
| 3133 | + gfar_write(®s->gaddr1, 0x0); |
---|
| 3134 | + gfar_write(®s->gaddr2, 0x0); |
---|
| 3135 | + gfar_write(®s->gaddr3, 0x0); |
---|
| 3136 | + gfar_write(®s->gaddr4, 0x0); |
---|
| 3137 | + gfar_write(®s->gaddr5, 0x0); |
---|
| 3138 | + gfar_write(®s->gaddr6, 0x0); |
---|
| 3139 | + gfar_write(®s->gaddr7, 0x0); |
---|
| 3140 | + |
---|
| 3141 | + /* If we have extended hash tables, we need to |
---|
| 3142 | + * clear the exact match registers to prepare for |
---|
| 3143 | + * setting them |
---|
| 3144 | + */ |
---|
| 3145 | + if (priv->extended_hash) { |
---|
| 3146 | + em_num = GFAR_EM_NUM + 1; |
---|
| 3147 | + gfar_clear_exact_match(dev); |
---|
| 3148 | + idx = 1; |
---|
| 3149 | + } else { |
---|
| 3150 | + idx = 0; |
---|
| 3151 | + em_num = 0; |
---|
| 3152 | + } |
---|
| 3153 | + |
---|
| 3154 | + if (netdev_mc_empty(dev)) |
---|
| 3155 | + return; |
---|
| 3156 | + |
---|
| 3157 | + /* Parse the list, and set the appropriate bits */ |
---|
| 3158 | + netdev_for_each_mc_addr(ha, dev) { |
---|
| 3159 | + if (idx < em_num) { |
---|
| 3160 | + gfar_set_mac_for_addr(dev, idx, ha->addr); |
---|
| 3161 | + idx++; |
---|
| 3162 | + } else |
---|
| 3163 | + gfar_set_hash_for_addr(dev, ha->addr); |
---|
| 3164 | + } |
---|
| 3165 | + } |
---|
1145 | 3166 | } |
---|
1146 | 3167 | |
---|
1147 | 3168 | void gfar_mac_reset(struct gfar_private *priv) |
---|
.. | .. |
---|
1273 | 3294 | gfar_write_isrg(priv); |
---|
1274 | 3295 | } |
---|
1275 | 3296 | |
---|
1276 | | -static void gfar_init_addr_hash_table(struct gfar_private *priv) |
---|
1277 | | -{ |
---|
1278 | | - struct gfar __iomem *regs = priv->gfargrp[0].regs; |
---|
1279 | | - |
---|
1280 | | - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { |
---|
1281 | | - priv->extended_hash = 1; |
---|
1282 | | - priv->hash_width = 9; |
---|
1283 | | - |
---|
1284 | | - priv->hash_regs[0] = ®s->igaddr0; |
---|
1285 | | - priv->hash_regs[1] = ®s->igaddr1; |
---|
1286 | | - priv->hash_regs[2] = ®s->igaddr2; |
---|
1287 | | - priv->hash_regs[3] = ®s->igaddr3; |
---|
1288 | | - priv->hash_regs[4] = ®s->igaddr4; |
---|
1289 | | - priv->hash_regs[5] = ®s->igaddr5; |
---|
1290 | | - priv->hash_regs[6] = ®s->igaddr6; |
---|
1291 | | - priv->hash_regs[7] = ®s->igaddr7; |
---|
1292 | | - priv->hash_regs[8] = ®s->gaddr0; |
---|
1293 | | - priv->hash_regs[9] = ®s->gaddr1; |
---|
1294 | | - priv->hash_regs[10] = ®s->gaddr2; |
---|
1295 | | - priv->hash_regs[11] = ®s->gaddr3; |
---|
1296 | | - priv->hash_regs[12] = ®s->gaddr4; |
---|
1297 | | - priv->hash_regs[13] = ®s->gaddr5; |
---|
1298 | | - priv->hash_regs[14] = ®s->gaddr6; |
---|
1299 | | - priv->hash_regs[15] = ®s->gaddr7; |
---|
1300 | | - |
---|
1301 | | - } else { |
---|
1302 | | - priv->extended_hash = 0; |
---|
1303 | | - priv->hash_width = 8; |
---|
1304 | | - |
---|
1305 | | - priv->hash_regs[0] = ®s->gaddr0; |
---|
1306 | | - priv->hash_regs[1] = ®s->gaddr1; |
---|
1307 | | - priv->hash_regs[2] = ®s->gaddr2; |
---|
1308 | | - priv->hash_regs[3] = ®s->gaddr3; |
---|
1309 | | - priv->hash_regs[4] = ®s->gaddr4; |
---|
1310 | | - priv->hash_regs[5] = ®s->gaddr5; |
---|
1311 | | - priv->hash_regs[6] = ®s->gaddr6; |
---|
1312 | | - priv->hash_regs[7] = ®s->gaddr7; |
---|
1313 | | - } |
---|
1314 | | -} |
---|
| 3297 | +static const struct net_device_ops gfar_netdev_ops = { |
---|
| 3298 | + .ndo_open = gfar_enet_open, |
---|
| 3299 | + .ndo_start_xmit = gfar_start_xmit, |
---|
| 3300 | + .ndo_stop = gfar_close, |
---|
| 3301 | + .ndo_change_mtu = gfar_change_mtu, |
---|
| 3302 | + .ndo_set_features = gfar_set_features, |
---|
| 3303 | + .ndo_set_rx_mode = gfar_set_multi, |
---|
| 3304 | + .ndo_tx_timeout = gfar_timeout, |
---|
| 3305 | + .ndo_do_ioctl = gfar_ioctl, |
---|
| 3306 | + .ndo_get_stats = gfar_get_stats, |
---|
| 3307 | + .ndo_change_carrier = fixed_phy_change_carrier, |
---|
| 3308 | + .ndo_set_mac_address = gfar_set_mac_addr, |
---|
| 3309 | + .ndo_validate_addr = eth_validate_addr, |
---|
| 3310 | +#ifdef CONFIG_NET_POLL_CONTROLLER |
---|
| 3311 | + .ndo_poll_controller = gfar_netpoll, |
---|
| 3312 | +#endif |
---|
| 3313 | +}; |
---|
1315 | 3314 | |
---|
1316 | 3315 | /* Set up the ethernet device structure, private data, |
---|
1317 | 3316 | * and anything else we need before we start |
---|
.. | .. |
---|
1741 | 3740 | #define GFAR_PM_OPS NULL |
---|
1742 | 3741 | |
---|
1743 | 3742 | #endif |
---|
1744 | | - |
---|
1745 | | -/* Reads the controller's registers to determine what interface |
---|
1746 | | - * connects it to the PHY. |
---|
1747 | | - */ |
---|
1748 | | -static phy_interface_t gfar_get_interface(struct net_device *dev) |
---|
1749 | | -{ |
---|
1750 | | - struct gfar_private *priv = netdev_priv(dev); |
---|
1751 | | - struct gfar __iomem *regs = priv->gfargrp[0].regs; |
---|
1752 | | - u32 ecntrl; |
---|
1753 | | - |
---|
1754 | | - ecntrl = gfar_read(®s->ecntrl); |
---|
1755 | | - |
---|
1756 | | - if (ecntrl & ECNTRL_SGMII_MODE) |
---|
1757 | | - return PHY_INTERFACE_MODE_SGMII; |
---|
1758 | | - |
---|
1759 | | - if (ecntrl & ECNTRL_TBI_MODE) { |
---|
1760 | | - if (ecntrl & ECNTRL_REDUCED_MODE) |
---|
1761 | | - return PHY_INTERFACE_MODE_RTBI; |
---|
1762 | | - else |
---|
1763 | | - return PHY_INTERFACE_MODE_TBI; |
---|
1764 | | - } |
---|
1765 | | - |
---|
1766 | | - if (ecntrl & ECNTRL_REDUCED_MODE) { |
---|
1767 | | - if (ecntrl & ECNTRL_REDUCED_MII_MODE) { |
---|
1768 | | - return PHY_INTERFACE_MODE_RMII; |
---|
1769 | | - } |
---|
1770 | | - else { |
---|
1771 | | - phy_interface_t interface = priv->interface; |
---|
1772 | | - |
---|
1773 | | - /* This isn't autodetected right now, so it must |
---|
1774 | | - * be set by the device tree or platform code. |
---|
1775 | | - */ |
---|
1776 | | - if (interface == PHY_INTERFACE_MODE_RGMII_ID) |
---|
1777 | | - return PHY_INTERFACE_MODE_RGMII_ID; |
---|
1778 | | - |
---|
1779 | | - return PHY_INTERFACE_MODE_RGMII; |
---|
1780 | | - } |
---|
1781 | | - } |
---|
1782 | | - |
---|
1783 | | - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) |
---|
1784 | | - return PHY_INTERFACE_MODE_GMII; |
---|
1785 | | - |
---|
1786 | | - return PHY_INTERFACE_MODE_MII; |
---|
1787 | | -} |
---|
1788 | | - |
---|
1789 | | - |
---|
1790 | | -/* Initializes driver's PHY state, and attaches to the PHY. |
---|
1791 | | - * Returns 0 on success. |
---|
1792 | | - */ |
---|
1793 | | -static int init_phy(struct net_device *dev) |
---|
1794 | | -{ |
---|
1795 | | - struct gfar_private *priv = netdev_priv(dev); |
---|
1796 | | - uint gigabit_support = |
---|
1797 | | - priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? |
---|
1798 | | - GFAR_SUPPORTED_GBIT : 0; |
---|
1799 | | - phy_interface_t interface; |
---|
1800 | | - struct phy_device *phydev; |
---|
1801 | | - struct ethtool_eee edata; |
---|
1802 | | - |
---|
1803 | | - priv->oldlink = 0; |
---|
1804 | | - priv->oldspeed = 0; |
---|
1805 | | - priv->oldduplex = -1; |
---|
1806 | | - |
---|
1807 | | - interface = gfar_get_interface(dev); |
---|
1808 | | - |
---|
1809 | | - phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, |
---|
1810 | | - interface); |
---|
1811 | | - if (!phydev) { |
---|
1812 | | - dev_err(&dev->dev, "could not attach to PHY\n"); |
---|
1813 | | - return -ENODEV; |
---|
1814 | | - } |
---|
1815 | | - |
---|
1816 | | - if (interface == PHY_INTERFACE_MODE_SGMII) |
---|
1817 | | - gfar_configure_serdes(dev); |
---|
1818 | | - |
---|
1819 | | - /* Remove any features not supported by the controller */ |
---|
1820 | | - phydev->supported &= (GFAR_SUPPORTED | gigabit_support); |
---|
1821 | | - phydev->advertising = phydev->supported; |
---|
1822 | | - |
---|
1823 | | - /* Add support for flow control, but don't advertise it by default */ |
---|
1824 | | - phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); |
---|
1825 | | - |
---|
1826 | | - /* disable EEE autoneg, EEE not supported by eTSEC */ |
---|
1827 | | - memset(&edata, 0, sizeof(struct ethtool_eee)); |
---|
1828 | | - phy_ethtool_set_eee(phydev, &edata); |
---|
1829 | | - |
---|
1830 | | - return 0; |
---|
1831 | | -} |
---|
1832 | | - |
---|
1833 | | -/* Initialize TBI PHY interface for communicating with the |
---|
1834 | | - * SERDES lynx PHY on the chip. We communicate with this PHY |
---|
1835 | | - * through the MDIO bus on each controller, treating it as a |
---|
1836 | | - * "normal" PHY at the address found in the TBIPA register. We assume |
---|
1837 | | - * that the TBIPA register is valid. Either the MDIO bus code will set |
---|
1838 | | - * it to a value that doesn't conflict with other PHYs on the bus, or the |
---|
1839 | | - * value doesn't matter, as there are no other PHYs on the bus. |
---|
1840 | | - */ |
---|
1841 | | -static void gfar_configure_serdes(struct net_device *dev) |
---|
1842 | | -{ |
---|
1843 | | - struct gfar_private *priv = netdev_priv(dev); |
---|
1844 | | - struct phy_device *tbiphy; |
---|
1845 | | - |
---|
1846 | | - if (!priv->tbi_node) { |
---|
1847 | | - dev_warn(&dev->dev, "error: SGMII mode requires that the " |
---|
1848 | | - "device tree specify a tbi-handle\n"); |
---|
1849 | | - return; |
---|
1850 | | - } |
---|
1851 | | - |
---|
1852 | | - tbiphy = of_phy_find_device(priv->tbi_node); |
---|
1853 | | - if (!tbiphy) { |
---|
1854 | | - dev_err(&dev->dev, "error: Could not get TBI device\n"); |
---|
1855 | | - return; |
---|
1856 | | - } |
---|
1857 | | - |
---|
1858 | | - /* If the link is already up, we must already be ok, and don't need to |
---|
1859 | | - * configure and reset the TBI<->SerDes link. Maybe U-Boot configured |
---|
1860 | | - * everything for us? Resetting it takes the link down and requires |
---|
1861 | | - * several seconds for it to come back. |
---|
1862 | | - */ |
---|
1863 | | - if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) { |
---|
1864 | | - put_device(&tbiphy->mdio.dev); |
---|
1865 | | - return; |
---|
1866 | | - } |
---|
1867 | | - |
---|
1868 | | - /* Single clk mode, mii mode off(for serdes communication) */ |
---|
1869 | | - phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); |
---|
1870 | | - |
---|
1871 | | - phy_write(tbiphy, MII_ADVERTISE, |
---|
1872 | | - ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | |
---|
1873 | | - ADVERTISE_1000XPSE_ASYM); |
---|
1874 | | - |
---|
1875 | | - phy_write(tbiphy, MII_BMCR, |
---|
1876 | | - BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | |
---|
1877 | | - BMCR_SPEED1000); |
---|
1878 | | - |
---|
1879 | | - put_device(&tbiphy->mdio.dev); |
---|
1880 | | -} |
---|
1881 | | - |
---|
1882 | | -static int __gfar_is_rx_idle(struct gfar_private *priv) |
---|
1883 | | -{ |
---|
1884 | | - u32 res; |
---|
1885 | | - |
---|
1886 | | - /* Normaly TSEC should not hang on GRS commands, so we should |
---|
1887 | | - * actually wait for IEVENT_GRSC flag. |
---|
1888 | | - */ |
---|
1889 | | - if (!gfar_has_errata(priv, GFAR_ERRATA_A002)) |
---|
1890 | | - return 0; |
---|
1891 | | - |
---|
1892 | | - /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are |
---|
1893 | | - * the same as bits 23-30, the eTSEC Rx is assumed to be idle |
---|
1894 | | - * and the Rx can be safely reset. |
---|
1895 | | - */ |
---|
1896 | | - res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); |
---|
1897 | | - res &= 0x7f807f80; |
---|
1898 | | - if ((res & 0xffff) == (res >> 16)) |
---|
1899 | | - return 1; |
---|
1900 | | - |
---|
1901 | | - return 0; |
---|
1902 | | -} |
---|
1903 | | - |
---|
1904 | | -/* Halt the receive and transmit queues */ |
---|
1905 | | -static void gfar_halt_nodisable(struct gfar_private *priv) |
---|
1906 | | -{ |
---|
1907 | | - struct gfar __iomem *regs = priv->gfargrp[0].regs; |
---|
1908 | | - u32 tempval; |
---|
1909 | | - unsigned int timeout; |
---|
1910 | | - int stopped; |
---|
1911 | | - |
---|
1912 | | - gfar_ints_disable(priv); |
---|
1913 | | - |
---|
1914 | | - if (gfar_is_dma_stopped(priv)) |
---|
1915 | | - return; |
---|
1916 | | - |
---|
1917 | | - /* Stop the DMA, and wait for it to stop */ |
---|
1918 | | - tempval = gfar_read(®s->dmactrl); |
---|
1919 | | - tempval |= (DMACTRL_GRS | DMACTRL_GTS); |
---|
1920 | | - gfar_write(®s->dmactrl, tempval); |
---|
1921 | | - |
---|
1922 | | -retry: |
---|
1923 | | - timeout = 1000; |
---|
1924 | | - while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) { |
---|
1925 | | - cpu_relax(); |
---|
1926 | | - timeout--; |
---|
1927 | | - } |
---|
1928 | | - |
---|
1929 | | - if (!timeout) |
---|
1930 | | - stopped = gfar_is_dma_stopped(priv); |
---|
1931 | | - |
---|
1932 | | - if (!stopped && !gfar_is_rx_dma_stopped(priv) && |
---|
1933 | | - !__gfar_is_rx_idle(priv)) |
---|
1934 | | - goto retry; |
---|
1935 | | -} |
---|
1936 | | - |
---|
1937 | | -/* Halt the receive and transmit queues */ |
---|
1938 | | -void gfar_halt(struct gfar_private *priv) |
---|
1939 | | -{ |
---|
1940 | | - struct gfar __iomem *regs = priv->gfargrp[0].regs; |
---|
1941 | | - u32 tempval; |
---|
1942 | | - |
---|
1943 | | - /* Dissable the Rx/Tx hw queues */ |
---|
1944 | | - gfar_write(®s->rqueue, 0); |
---|
1945 | | - gfar_write(®s->tqueue, 0); |
---|
1946 | | - |
---|
1947 | | - mdelay(10); |
---|
1948 | | - |
---|
1949 | | - gfar_halt_nodisable(priv); |
---|
1950 | | - |
---|
1951 | | - /* Disable Rx/Tx DMA */ |
---|
1952 | | - tempval = gfar_read(®s->maccfg1); |
---|
1953 | | - tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); |
---|
1954 | | - gfar_write(®s->maccfg1, tempval); |
---|
1955 | | -} |
---|
1956 | | - |
---|
1957 | | -void stop_gfar(struct net_device *dev) |
---|
1958 | | -{ |
---|
1959 | | - struct gfar_private *priv = netdev_priv(dev); |
---|
1960 | | - |
---|
1961 | | - netif_tx_stop_all_queues(dev); |
---|
1962 | | - |
---|
1963 | | - smp_mb__before_atomic(); |
---|
1964 | | - set_bit(GFAR_DOWN, &priv->state); |
---|
1965 | | - smp_mb__after_atomic(); |
---|
1966 | | - |
---|
1967 | | - disable_napi(priv); |
---|
1968 | | - |
---|
1969 | | - /* disable ints and gracefully shut down Rx/Tx DMA */ |
---|
1970 | | - gfar_halt(priv); |
---|
1971 | | - |
---|
1972 | | - phy_stop(dev->phydev); |
---|
1973 | | - |
---|
1974 | | - free_skb_resources(priv); |
---|
1975 | | -} |
---|
1976 | | - |
---|
1977 | | -static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) |
---|
1978 | | -{ |
---|
1979 | | - struct txbd8 *txbdp; |
---|
1980 | | - struct gfar_private *priv = netdev_priv(tx_queue->dev); |
---|
1981 | | - int i, j; |
---|
1982 | | - |
---|
1983 | | - txbdp = tx_queue->tx_bd_base; |
---|
1984 | | - |
---|
1985 | | - for (i = 0; i < tx_queue->tx_ring_size; i++) { |
---|
1986 | | - if (!tx_queue->tx_skbuff[i]) |
---|
1987 | | - continue; |
---|
1988 | | - |
---|
1989 | | - dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr), |
---|
1990 | | - be16_to_cpu(txbdp->length), DMA_TO_DEVICE); |
---|
1991 | | - txbdp->lstatus = 0; |
---|
1992 | | - for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; |
---|
1993 | | - j++) { |
---|
1994 | | - txbdp++; |
---|
1995 | | - dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr), |
---|
1996 | | - be16_to_cpu(txbdp->length), |
---|
1997 | | - DMA_TO_DEVICE); |
---|
1998 | | - } |
---|
1999 | | - txbdp++; |
---|
2000 | | - dev_kfree_skb_any(tx_queue->tx_skbuff[i]); |
---|
2001 | | - tx_queue->tx_skbuff[i] = NULL; |
---|
2002 | | - } |
---|
2003 | | - kfree(tx_queue->tx_skbuff); |
---|
2004 | | - tx_queue->tx_skbuff = NULL; |
---|
2005 | | -} |
---|
2006 | | - |
---|
2007 | | -static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) |
---|
2008 | | -{ |
---|
2009 | | - int i; |
---|
2010 | | - |
---|
2011 | | - struct rxbd8 *rxbdp = rx_queue->rx_bd_base; |
---|
2012 | | - |
---|
2013 | | - if (rx_queue->skb) |
---|
2014 | | - dev_kfree_skb(rx_queue->skb); |
---|
2015 | | - |
---|
2016 | | - for (i = 0; i < rx_queue->rx_ring_size; i++) { |
---|
2017 | | - struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i]; |
---|
2018 | | - |
---|
2019 | | - rxbdp->lstatus = 0; |
---|
2020 | | - rxbdp->bufPtr = 0; |
---|
2021 | | - rxbdp++; |
---|
2022 | | - |
---|
2023 | | - if (!rxb->page) |
---|
2024 | | - continue; |
---|
2025 | | - |
---|
2026 | | - dma_unmap_page(rx_queue->dev, rxb->dma, |
---|
2027 | | - PAGE_SIZE, DMA_FROM_DEVICE); |
---|
2028 | | - __free_page(rxb->page); |
---|
2029 | | - |
---|
2030 | | - rxb->page = NULL; |
---|
2031 | | - } |
---|
2032 | | - |
---|
2033 | | - kfree(rx_queue->rx_buff); |
---|
2034 | | - rx_queue->rx_buff = NULL; |
---|
2035 | | -} |
---|
2036 | | - |
---|
2037 | | -/* If there are any tx skbs or rx skbs still around, free them. |
---|
2038 | | - * Then free tx_skbuff and rx_skbuff |
---|
2039 | | - */ |
---|
2040 | | -static void free_skb_resources(struct gfar_private *priv) |
---|
2041 | | -{ |
---|
2042 | | - struct gfar_priv_tx_q *tx_queue = NULL; |
---|
2043 | | - struct gfar_priv_rx_q *rx_queue = NULL; |
---|
2044 | | - int i; |
---|
2045 | | - |
---|
2046 | | - /* Go through all the buffer descriptors and free their data buffers */ |
---|
2047 | | - for (i = 0; i < priv->num_tx_queues; i++) { |
---|
2048 | | - struct netdev_queue *txq; |
---|
2049 | | - |
---|
2050 | | - tx_queue = priv->tx_queue[i]; |
---|
2051 | | - txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); |
---|
2052 | | - if (tx_queue->tx_skbuff) |
---|
2053 | | - free_skb_tx_queue(tx_queue); |
---|
2054 | | - netdev_tx_reset_queue(txq); |
---|
2055 | | - } |
---|
2056 | | - |
---|
2057 | | - for (i = 0; i < priv->num_rx_queues; i++) { |
---|
2058 | | - rx_queue = priv->rx_queue[i]; |
---|
2059 | | - if (rx_queue->rx_buff) |
---|
2060 | | - free_skb_rx_queue(rx_queue); |
---|
2061 | | - } |
---|
2062 | | - |
---|
2063 | | - dma_free_coherent(priv->dev, |
---|
2064 | | - sizeof(struct txbd8) * priv->total_tx_ring_size + |
---|
2065 | | - sizeof(struct rxbd8) * priv->total_rx_ring_size, |
---|
2066 | | - priv->tx_queue[0]->tx_bd_base, |
---|
2067 | | - priv->tx_queue[0]->tx_bd_dma_base); |
---|
2068 | | -} |
---|
2069 | | - |
---|
2070 | | -void gfar_start(struct gfar_private *priv) |
---|
2071 | | -{ |
---|
2072 | | - struct gfar __iomem *regs = priv->gfargrp[0].regs; |
---|
2073 | | - u32 tempval; |
---|
2074 | | - int i = 0; |
---|
2075 | | - |
---|
2076 | | - /* Enable Rx/Tx hw queues */ |
---|
2077 | | - gfar_write(®s->rqueue, priv->rqueue); |
---|
2078 | | - gfar_write(®s->tqueue, priv->tqueue); |
---|
2079 | | - |
---|
2080 | | - /* Initialize DMACTRL to have WWR and WOP */ |
---|
2081 | | - tempval = gfar_read(®s->dmactrl); |
---|
2082 | | - tempval |= DMACTRL_INIT_SETTINGS; |
---|
2083 | | - gfar_write(®s->dmactrl, tempval); |
---|
2084 | | - |
---|
2085 | | - /* Make sure we aren't stopped */ |
---|
2086 | | - tempval = gfar_read(®s->dmactrl); |
---|
2087 | | - tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); |
---|
2088 | | - gfar_write(®s->dmactrl, tempval); |
---|
2089 | | - |
---|
2090 | | - for (i = 0; i < priv->num_grps; i++) { |
---|
2091 | | - regs = priv->gfargrp[i].regs; |
---|
2092 | | - /* Clear THLT/RHLT, so that the DMA starts polling now */ |
---|
2093 | | - gfar_write(®s->tstat, priv->gfargrp[i].tstat); |
---|
2094 | | - gfar_write(®s->rstat, priv->gfargrp[i].rstat); |
---|
2095 | | - } |
---|
2096 | | - |
---|
2097 | | - /* Enable Rx/Tx DMA */ |
---|
2098 | | - tempval = gfar_read(®s->maccfg1); |
---|
2099 | | - tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); |
---|
2100 | | - gfar_write(®s->maccfg1, tempval); |
---|
2101 | | - |
---|
2102 | | - gfar_ints_enable(priv); |
---|
2103 | | - |
---|
2104 | | - netif_trans_update(priv->ndev); /* prevent tx timeout */ |
---|
2105 | | -} |
---|
2106 | | - |
---|
2107 | | -static void free_grp_irqs(struct gfar_priv_grp *grp) |
---|
2108 | | -{ |
---|
2109 | | - free_irq(gfar_irq(grp, TX)->irq, grp); |
---|
2110 | | - free_irq(gfar_irq(grp, RX)->irq, grp); |
---|
2111 | | - free_irq(gfar_irq(grp, ER)->irq, grp); |
---|
2112 | | -} |
---|
2113 | | - |
---|
2114 | | -static int register_grp_irqs(struct gfar_priv_grp *grp) |
---|
2115 | | -{ |
---|
2116 | | - struct gfar_private *priv = grp->priv; |
---|
2117 | | - struct net_device *dev = priv->ndev; |
---|
2118 | | - int err; |
---|
2119 | | - |
---|
2120 | | - /* If the device has multiple interrupts, register for |
---|
2121 | | - * them. Otherwise, only register for the one |
---|
2122 | | - */ |
---|
2123 | | - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
---|
2124 | | - /* Install our interrupt handlers for Error, |
---|
2125 | | - * Transmit, and Receive |
---|
2126 | | - */ |
---|
2127 | | - err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, |
---|
2128 | | - gfar_irq(grp, ER)->name, grp); |
---|
2129 | | - if (err < 0) { |
---|
2130 | | - netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
---|
2131 | | - gfar_irq(grp, ER)->irq); |
---|
2132 | | - |
---|
2133 | | - goto err_irq_fail; |
---|
2134 | | - } |
---|
2135 | | - enable_irq_wake(gfar_irq(grp, ER)->irq); |
---|
2136 | | - |
---|
2137 | | - err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, |
---|
2138 | | - gfar_irq(grp, TX)->name, grp); |
---|
2139 | | - if (err < 0) { |
---|
2140 | | - netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
---|
2141 | | - gfar_irq(grp, TX)->irq); |
---|
2142 | | - goto tx_irq_fail; |
---|
2143 | | - } |
---|
2144 | | - err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0, |
---|
2145 | | - gfar_irq(grp, RX)->name, grp); |
---|
2146 | | - if (err < 0) { |
---|
2147 | | - netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
---|
2148 | | - gfar_irq(grp, RX)->irq); |
---|
2149 | | - goto rx_irq_fail; |
---|
2150 | | - } |
---|
2151 | | - enable_irq_wake(gfar_irq(grp, RX)->irq); |
---|
2152 | | - |
---|
2153 | | - } else { |
---|
2154 | | - err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, |
---|
2155 | | - gfar_irq(grp, TX)->name, grp); |
---|
2156 | | - if (err < 0) { |
---|
2157 | | - netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
---|
2158 | | - gfar_irq(grp, TX)->irq); |
---|
2159 | | - goto err_irq_fail; |
---|
2160 | | - } |
---|
2161 | | - enable_irq_wake(gfar_irq(grp, TX)->irq); |
---|
2162 | | - } |
---|
2163 | | - |
---|
2164 | | - return 0; |
---|
2165 | | - |
---|
2166 | | -rx_irq_fail: |
---|
2167 | | - free_irq(gfar_irq(grp, TX)->irq, grp); |
---|
2168 | | -tx_irq_fail: |
---|
2169 | | - free_irq(gfar_irq(grp, ER)->irq, grp); |
---|
2170 | | -err_irq_fail: |
---|
2171 | | - return err; |
---|
2172 | | - |
---|
2173 | | -} |
---|
2174 | | - |
---|
2175 | | -static void gfar_free_irq(struct gfar_private *priv) |
---|
2176 | | -{ |
---|
2177 | | - int i; |
---|
2178 | | - |
---|
2179 | | - /* Free the IRQs */ |
---|
2180 | | - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
---|
2181 | | - for (i = 0; i < priv->num_grps; i++) |
---|
2182 | | - free_grp_irqs(&priv->gfargrp[i]); |
---|
2183 | | - } else { |
---|
2184 | | - for (i = 0; i < priv->num_grps; i++) |
---|
2185 | | - free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, |
---|
2186 | | - &priv->gfargrp[i]); |
---|
2187 | | - } |
---|
2188 | | -} |
---|
2189 | | - |
---|
2190 | | -static int gfar_request_irq(struct gfar_private *priv) |
---|
2191 | | -{ |
---|
2192 | | - int err, i, j; |
---|
2193 | | - |
---|
2194 | | - for (i = 0; i < priv->num_grps; i++) { |
---|
2195 | | - err = register_grp_irqs(&priv->gfargrp[i]); |
---|
2196 | | - if (err) { |
---|
2197 | | - for (j = 0; j < i; j++) |
---|
2198 | | - free_grp_irqs(&priv->gfargrp[j]); |
---|
2199 | | - return err; |
---|
2200 | | - } |
---|
2201 | | - } |
---|
2202 | | - |
---|
2203 | | - return 0; |
---|
2204 | | -} |
---|
2205 | | - |
---|
2206 | | -/* Bring the controller up and running */ |
---|
2207 | | -int startup_gfar(struct net_device *ndev) |
---|
2208 | | -{ |
---|
2209 | | - struct gfar_private *priv = netdev_priv(ndev); |
---|
2210 | | - int err; |
---|
2211 | | - |
---|
2212 | | - gfar_mac_reset(priv); |
---|
2213 | | - |
---|
2214 | | - err = gfar_alloc_skb_resources(ndev); |
---|
2215 | | - if (err) |
---|
2216 | | - return err; |
---|
2217 | | - |
---|
2218 | | - gfar_init_tx_rx_base(priv); |
---|
2219 | | - |
---|
2220 | | - smp_mb__before_atomic(); |
---|
2221 | | - clear_bit(GFAR_DOWN, &priv->state); |
---|
2222 | | - smp_mb__after_atomic(); |
---|
2223 | | - |
---|
2224 | | - /* Start Rx/Tx DMA and enable the interrupts */ |
---|
2225 | | - gfar_start(priv); |
---|
2226 | | - |
---|
2227 | | - /* force link state update after mac reset */ |
---|
2228 | | - priv->oldlink = 0; |
---|
2229 | | - priv->oldspeed = 0; |
---|
2230 | | - priv->oldduplex = -1; |
---|
2231 | | - |
---|
2232 | | - phy_start(ndev->phydev); |
---|
2233 | | - |
---|
2234 | | - enable_napi(priv); |
---|
2235 | | - |
---|
2236 | | - netif_tx_wake_all_queues(ndev); |
---|
2237 | | - |
---|
2238 | | - return 0; |
---|
2239 | | -} |
---|
2240 | | - |
---|
2241 | | -/* Called when something needs to use the ethernet device |
---|
2242 | | - * Returns 0 for success. |
---|
2243 | | - */ |
---|
2244 | | -static int gfar_enet_open(struct net_device *dev) |
---|
2245 | | -{ |
---|
2246 | | - struct gfar_private *priv = netdev_priv(dev); |
---|
2247 | | - int err; |
---|
2248 | | - |
---|
2249 | | - err = init_phy(dev); |
---|
2250 | | - if (err) |
---|
2251 | | - return err; |
---|
2252 | | - |
---|
2253 | | - err = gfar_request_irq(priv); |
---|
2254 | | - if (err) |
---|
2255 | | - return err; |
---|
2256 | | - |
---|
2257 | | - err = startup_gfar(dev); |
---|
2258 | | - if (err) |
---|
2259 | | - return err; |
---|
2260 | | - |
---|
2261 | | - return err; |
---|
2262 | | -} |
---|
2263 | | - |
---|
2264 | | -static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) |
---|
2265 | | -{ |
---|
2266 | | - struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN); |
---|
2267 | | - |
---|
2268 | | - memset(fcb, 0, GMAC_FCB_LEN); |
---|
2269 | | - |
---|
2270 | | - return fcb; |
---|
2271 | | -} |
---|
2272 | | - |
---|
2273 | | -static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, |
---|
2274 | | - int fcb_length) |
---|
2275 | | -{ |
---|
2276 | | - /* If we're here, it's a IP packet with a TCP or UDP |
---|
2277 | | - * payload. We set it to checksum, using a pseudo-header |
---|
2278 | | - * we provide |
---|
2279 | | - */ |
---|
2280 | | - u8 flags = TXFCB_DEFAULT; |
---|
2281 | | - |
---|
2282 | | - /* Tell the controller what the protocol is |
---|
2283 | | - * And provide the already calculated phcs |
---|
2284 | | - */ |
---|
2285 | | - if (ip_hdr(skb)->protocol == IPPROTO_UDP) { |
---|
2286 | | - flags |= TXFCB_UDP; |
---|
2287 | | - fcb->phcs = (__force __be16)(udp_hdr(skb)->check); |
---|
2288 | | - } else |
---|
2289 | | - fcb->phcs = (__force __be16)(tcp_hdr(skb)->check); |
---|
2290 | | - |
---|
2291 | | - /* l3os is the distance between the start of the |
---|
2292 | | - * frame (skb->data) and the start of the IP hdr. |
---|
2293 | | - * l4os is the distance between the start of the |
---|
2294 | | - * l3 hdr and the l4 hdr |
---|
2295 | | - */ |
---|
2296 | | - fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length); |
---|
2297 | | - fcb->l4os = skb_network_header_len(skb); |
---|
2298 | | - |
---|
2299 | | - fcb->flags = flags; |
---|
2300 | | -} |
---|
2301 | | - |
---|
2302 | | -static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) |
---|
2303 | | -{ |
---|
2304 | | - fcb->flags |= TXFCB_VLN; |
---|
2305 | | - fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb)); |
---|
2306 | | -} |
---|
2307 | | - |
---|
2308 | | -static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, |
---|
2309 | | - struct txbd8 *base, int ring_size) |
---|
2310 | | -{ |
---|
2311 | | - struct txbd8 *new_bd = bdp + stride; |
---|
2312 | | - |
---|
2313 | | - return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; |
---|
2314 | | -} |
---|
2315 | | - |
---|
2316 | | -static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, |
---|
2317 | | - int ring_size) |
---|
2318 | | -{ |
---|
2319 | | - return skip_txbd(bdp, 1, base, ring_size); |
---|
2320 | | -} |
---|
2321 | | - |
---|
2322 | | -/* eTSEC12: csum generation not supported for some fcb offsets */ |
---|
2323 | | -static inline bool gfar_csum_errata_12(struct gfar_private *priv, |
---|
2324 | | - unsigned long fcb_addr) |
---|
2325 | | -{ |
---|
2326 | | - return (gfar_has_errata(priv, GFAR_ERRATA_12) && |
---|
2327 | | - (fcb_addr % 0x20) > 0x18); |
---|
2328 | | -} |
---|
2329 | | - |
---|
2330 | | -/* eTSEC76: csum generation for frames larger than 2500 may |
---|
2331 | | - * cause excess delays before start of transmission |
---|
2332 | | - */ |
---|
2333 | | -static inline bool gfar_csum_errata_76(struct gfar_private *priv, |
---|
2334 | | - unsigned int len) |
---|
2335 | | -{ |
---|
2336 | | - return (gfar_has_errata(priv, GFAR_ERRATA_76) && |
---|
2337 | | - (len > 2500)); |
---|
2338 | | -} |
---|
2339 | | - |
---|
2340 | | -/* This is called by the kernel when a frame is ready for transmission. |
---|
2341 | | - * It is pointed to by the dev->hard_start_xmit function pointer |
---|
2342 | | - */ |
---|
2343 | | -static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) |
---|
2344 | | -{ |
---|
2345 | | - struct gfar_private *priv = netdev_priv(dev); |
---|
2346 | | - struct gfar_priv_tx_q *tx_queue = NULL; |
---|
2347 | | - struct netdev_queue *txq; |
---|
2348 | | - struct gfar __iomem *regs = NULL; |
---|
2349 | | - struct txfcb *fcb = NULL; |
---|
2350 | | - struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; |
---|
2351 | | - u32 lstatus; |
---|
2352 | | - skb_frag_t *frag; |
---|
2353 | | - int i, rq = 0; |
---|
2354 | | - int do_tstamp, do_csum, do_vlan; |
---|
2355 | | - u32 bufaddr; |
---|
2356 | | - unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0; |
---|
2357 | | - |
---|
2358 | | - rq = skb->queue_mapping; |
---|
2359 | | - tx_queue = priv->tx_queue[rq]; |
---|
2360 | | - txq = netdev_get_tx_queue(dev, rq); |
---|
2361 | | - base = tx_queue->tx_bd_base; |
---|
2362 | | - regs = tx_queue->grp->regs; |
---|
2363 | | - |
---|
2364 | | - do_csum = (CHECKSUM_PARTIAL == skb->ip_summed); |
---|
2365 | | - do_vlan = skb_vlan_tag_present(skb); |
---|
2366 | | - do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && |
---|
2367 | | - priv->hwts_tx_en; |
---|
2368 | | - |
---|
2369 | | - if (do_csum || do_vlan) |
---|
2370 | | - fcb_len = GMAC_FCB_LEN; |
---|
2371 | | - |
---|
2372 | | - /* check if time stamp should be generated */ |
---|
2373 | | - if (unlikely(do_tstamp)) |
---|
2374 | | - fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN; |
---|
2375 | | - |
---|
2376 | | - /* make space for additional header when fcb is needed */ |
---|
2377 | | - if (fcb_len) { |
---|
2378 | | - if (unlikely(skb_cow_head(skb, fcb_len))) { |
---|
2379 | | - dev->stats.tx_errors++; |
---|
2380 | | - dev_kfree_skb_any(skb); |
---|
2381 | | - return NETDEV_TX_OK; |
---|
2382 | | - } |
---|
2383 | | - } |
---|
2384 | | - |
---|
2385 | | - /* total number of fragments in the SKB */ |
---|
2386 | | - nr_frags = skb_shinfo(skb)->nr_frags; |
---|
2387 | | - |
---|
2388 | | - /* calculate the required number of TxBDs for this skb */ |
---|
2389 | | - if (unlikely(do_tstamp)) |
---|
2390 | | - nr_txbds = nr_frags + 2; |
---|
2391 | | - else |
---|
2392 | | - nr_txbds = nr_frags + 1; |
---|
2393 | | - |
---|
2394 | | - /* check if there is space to queue this packet */ |
---|
2395 | | - if (nr_txbds > tx_queue->num_txbdfree) { |
---|
2396 | | - /* no space, stop the queue */ |
---|
2397 | | - netif_tx_stop_queue(txq); |
---|
2398 | | - dev->stats.tx_fifo_errors++; |
---|
2399 | | - return NETDEV_TX_BUSY; |
---|
2400 | | - } |
---|
2401 | | - |
---|
2402 | | - /* Update transmit stats */ |
---|
2403 | | - bytes_sent = skb->len; |
---|
2404 | | - tx_queue->stats.tx_bytes += bytes_sent; |
---|
2405 | | - /* keep Tx bytes on wire for BQL accounting */ |
---|
2406 | | - GFAR_CB(skb)->bytes_sent = bytes_sent; |
---|
2407 | | - tx_queue->stats.tx_packets++; |
---|
2408 | | - |
---|
2409 | | - txbdp = txbdp_start = tx_queue->cur_tx; |
---|
2410 | | - lstatus = be32_to_cpu(txbdp->lstatus); |
---|
2411 | | - |
---|
2412 | | - /* Add TxPAL between FCB and frame if required */ |
---|
2413 | | - if (unlikely(do_tstamp)) { |
---|
2414 | | - skb_push(skb, GMAC_TXPAL_LEN); |
---|
2415 | | - memset(skb->data, 0, GMAC_TXPAL_LEN); |
---|
2416 | | - } |
---|
2417 | | - |
---|
2418 | | - /* Add TxFCB if required */ |
---|
2419 | | - if (fcb_len) { |
---|
2420 | | - fcb = gfar_add_fcb(skb); |
---|
2421 | | - lstatus |= BD_LFLAG(TXBD_TOE); |
---|
2422 | | - } |
---|
2423 | | - |
---|
2424 | | - /* Set up checksumming */ |
---|
2425 | | - if (do_csum) { |
---|
2426 | | - gfar_tx_checksum(skb, fcb, fcb_len); |
---|
2427 | | - |
---|
2428 | | - if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) || |
---|
2429 | | - unlikely(gfar_csum_errata_76(priv, skb->len))) { |
---|
2430 | | - __skb_pull(skb, GMAC_FCB_LEN); |
---|
2431 | | - skb_checksum_help(skb); |
---|
2432 | | - if (do_vlan || do_tstamp) { |
---|
2433 | | - /* put back a new fcb for vlan/tstamp TOE */ |
---|
2434 | | - fcb = gfar_add_fcb(skb); |
---|
2435 | | - } else { |
---|
2436 | | - /* Tx TOE not used */ |
---|
2437 | | - lstatus &= ~(BD_LFLAG(TXBD_TOE)); |
---|
2438 | | - fcb = NULL; |
---|
2439 | | - } |
---|
2440 | | - } |
---|
2441 | | - } |
---|
2442 | | - |
---|
2443 | | - if (do_vlan) |
---|
2444 | | - gfar_tx_vlan(skb, fcb); |
---|
2445 | | - |
---|
2446 | | - bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb), |
---|
2447 | | - DMA_TO_DEVICE); |
---|
2448 | | - if (unlikely(dma_mapping_error(priv->dev, bufaddr))) |
---|
2449 | | - goto dma_map_err; |
---|
2450 | | - |
---|
2451 | | - txbdp_start->bufPtr = cpu_to_be32(bufaddr); |
---|
2452 | | - |
---|
2453 | | - /* Time stamp insertion requires one additional TxBD */ |
---|
2454 | | - if (unlikely(do_tstamp)) |
---|
2455 | | - txbdp_tstamp = txbdp = next_txbd(txbdp, base, |
---|
2456 | | - tx_queue->tx_ring_size); |
---|
2457 | | - |
---|
2458 | | - if (likely(!nr_frags)) { |
---|
2459 | | - if (likely(!do_tstamp)) |
---|
2460 | | - lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); |
---|
2461 | | - } else { |
---|
2462 | | - u32 lstatus_start = lstatus; |
---|
2463 | | - |
---|
2464 | | - /* Place the fragment addresses and lengths into the TxBDs */ |
---|
2465 | | - frag = &skb_shinfo(skb)->frags[0]; |
---|
2466 | | - for (i = 0; i < nr_frags; i++, frag++) { |
---|
2467 | | - unsigned int size; |
---|
2468 | | - |
---|
2469 | | - /* Point at the next BD, wrapping as needed */ |
---|
2470 | | - txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
---|
2471 | | - |
---|
2472 | | - size = skb_frag_size(frag); |
---|
2473 | | - |
---|
2474 | | - lstatus = be32_to_cpu(txbdp->lstatus) | size | |
---|
2475 | | - BD_LFLAG(TXBD_READY); |
---|
2476 | | - |
---|
2477 | | - /* Handle the last BD specially */ |
---|
2478 | | - if (i == nr_frags - 1) |
---|
2479 | | - lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); |
---|
2480 | | - |
---|
2481 | | - bufaddr = skb_frag_dma_map(priv->dev, frag, 0, |
---|
2482 | | - size, DMA_TO_DEVICE); |
---|
2483 | | - if (unlikely(dma_mapping_error(priv->dev, bufaddr))) |
---|
2484 | | - goto dma_map_err; |
---|
2485 | | - |
---|
2486 | | - /* set the TxBD length and buffer pointer */ |
---|
2487 | | - txbdp->bufPtr = cpu_to_be32(bufaddr); |
---|
2488 | | - txbdp->lstatus = cpu_to_be32(lstatus); |
---|
2489 | | - } |
---|
2490 | | - |
---|
2491 | | - lstatus = lstatus_start; |
---|
2492 | | - } |
---|
2493 | | - |
---|
2494 | | - /* If time stamping is requested one additional TxBD must be set up. The |
---|
2495 | | - * first TxBD points to the FCB and must have a data length of |
---|
2496 | | - * GMAC_FCB_LEN. The second TxBD points to the actual frame data with |
---|
2497 | | - * the full frame length. |
---|
2498 | | - */ |
---|
2499 | | - if (unlikely(do_tstamp)) { |
---|
2500 | | - u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus); |
---|
2501 | | - |
---|
2502 | | - bufaddr = be32_to_cpu(txbdp_start->bufPtr); |
---|
2503 | | - bufaddr += fcb_len; |
---|
2504 | | - |
---|
2505 | | - lstatus_ts |= BD_LFLAG(TXBD_READY) | |
---|
2506 | | - (skb_headlen(skb) - fcb_len); |
---|
2507 | | - if (!nr_frags) |
---|
2508 | | - lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); |
---|
2509 | | - |
---|
2510 | | - txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr); |
---|
2511 | | - txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts); |
---|
2512 | | - lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; |
---|
2513 | | - |
---|
2514 | | - /* Setup tx hardware time stamping */ |
---|
2515 | | - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
---|
2516 | | - fcb->ptp = 1; |
---|
2517 | | - } else { |
---|
2518 | | - lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); |
---|
2519 | | - } |
---|
2520 | | - |
---|
2521 | | - netdev_tx_sent_queue(txq, bytes_sent); |
---|
2522 | | - |
---|
2523 | | - gfar_wmb(); |
---|
2524 | | - |
---|
2525 | | - txbdp_start->lstatus = cpu_to_be32(lstatus); |
---|
2526 | | - |
---|
2527 | | - gfar_wmb(); /* force lstatus write before tx_skbuff */ |
---|
2528 | | - |
---|
2529 | | - tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; |
---|
2530 | | - |
---|
2531 | | - /* Update the current skb pointer to the next entry we will use |
---|
2532 | | - * (wrapping if necessary) |
---|
2533 | | - */ |
---|
2534 | | - tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & |
---|
2535 | | - TX_RING_MOD_MASK(tx_queue->tx_ring_size); |
---|
2536 | | - |
---|
2537 | | - tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
---|
2538 | | - |
---|
2539 | | - /* We can work in parallel with gfar_clean_tx_ring(), except |
---|
2540 | | - * when modifying num_txbdfree. Note that we didn't grab the lock |
---|
2541 | | - * when we were reading the num_txbdfree and checking for available |
---|
2542 | | - * space, that's because outside of this function it can only grow. |
---|
2543 | | - */ |
---|
2544 | | - spin_lock_bh(&tx_queue->txlock); |
---|
2545 | | - /* reduce TxBD free count */ |
---|
2546 | | - tx_queue->num_txbdfree -= (nr_txbds); |
---|
2547 | | - spin_unlock_bh(&tx_queue->txlock); |
---|
2548 | | - |
---|
2549 | | - /* If the next BD still needs to be cleaned up, then the bds |
---|
2550 | | - * are full. We need to tell the kernel to stop sending us stuff. |
---|
2551 | | - */ |
---|
2552 | | - if (!tx_queue->num_txbdfree) { |
---|
2553 | | - netif_tx_stop_queue(txq); |
---|
2554 | | - |
---|
2555 | | - dev->stats.tx_fifo_errors++; |
---|
2556 | | - } |
---|
2557 | | - |
---|
2558 | | - /* Tell the DMA to go go go */ |
---|
2559 | | - gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); |
---|
2560 | | - |
---|
2561 | | - return NETDEV_TX_OK; |
---|
2562 | | - |
---|
2563 | | -dma_map_err: |
---|
2564 | | - txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size); |
---|
2565 | | - if (do_tstamp) |
---|
2566 | | - txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
---|
2567 | | - for (i = 0; i < nr_frags; i++) { |
---|
2568 | | - lstatus = be32_to_cpu(txbdp->lstatus); |
---|
2569 | | - if (!(lstatus & BD_LFLAG(TXBD_READY))) |
---|
2570 | | - break; |
---|
2571 | | - |
---|
2572 | | - lstatus &= ~BD_LFLAG(TXBD_READY); |
---|
2573 | | - txbdp->lstatus = cpu_to_be32(lstatus); |
---|
2574 | | - bufaddr = be32_to_cpu(txbdp->bufPtr); |
---|
2575 | | - dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length), |
---|
2576 | | - DMA_TO_DEVICE); |
---|
2577 | | - txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
---|
2578 | | - } |
---|
2579 | | - gfar_wmb(); |
---|
2580 | | - dev_kfree_skb_any(skb); |
---|
2581 | | - return NETDEV_TX_OK; |
---|
2582 | | -} |
---|
2583 | | - |
---|
2584 | | -/* Stops the kernel queue, and halts the controller */ |
---|
2585 | | -static int gfar_close(struct net_device *dev) |
---|
2586 | | -{ |
---|
2587 | | - struct gfar_private *priv = netdev_priv(dev); |
---|
2588 | | - |
---|
2589 | | - cancel_work_sync(&priv->reset_task); |
---|
2590 | | - stop_gfar(dev); |
---|
2591 | | - |
---|
2592 | | - /* Disconnect from the PHY */ |
---|
2593 | | - phy_disconnect(dev->phydev); |
---|
2594 | | - |
---|
2595 | | - gfar_free_irq(priv); |
---|
2596 | | - |
---|
2597 | | - return 0; |
---|
2598 | | -} |
---|
2599 | | - |
---|
2600 | | -/* Changes the mac address if the controller is not running. */ |
---|
2601 | | -static int gfar_set_mac_address(struct net_device *dev) |
---|
2602 | | -{ |
---|
2603 | | - gfar_set_mac_for_addr(dev, 0, dev->dev_addr); |
---|
2604 | | - |
---|
2605 | | - return 0; |
---|
2606 | | -} |
---|
2607 | | - |
---|
2608 | | -static int gfar_change_mtu(struct net_device *dev, int new_mtu) |
---|
2609 | | -{ |
---|
2610 | | - struct gfar_private *priv = netdev_priv(dev); |
---|
2611 | | - |
---|
2612 | | - while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) |
---|
2613 | | - cpu_relax(); |
---|
2614 | | - |
---|
2615 | | - if (dev->flags & IFF_UP) |
---|
2616 | | - stop_gfar(dev); |
---|
2617 | | - |
---|
2618 | | - dev->mtu = new_mtu; |
---|
2619 | | - |
---|
2620 | | - if (dev->flags & IFF_UP) |
---|
2621 | | - startup_gfar(dev); |
---|
2622 | | - |
---|
2623 | | - clear_bit_unlock(GFAR_RESETTING, &priv->state); |
---|
2624 | | - |
---|
2625 | | - return 0; |
---|
2626 | | -} |
---|
2627 | | - |
---|
2628 | | -void reset_gfar(struct net_device *ndev) |
---|
2629 | | -{ |
---|
2630 | | - struct gfar_private *priv = netdev_priv(ndev); |
---|
2631 | | - |
---|
2632 | | - while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) |
---|
2633 | | - cpu_relax(); |
---|
2634 | | - |
---|
2635 | | - stop_gfar(ndev); |
---|
2636 | | - startup_gfar(ndev); |
---|
2637 | | - |
---|
2638 | | - clear_bit_unlock(GFAR_RESETTING, &priv->state); |
---|
2639 | | -} |
---|
2640 | | - |
---|
2641 | | -/* gfar_reset_task gets scheduled when a packet has not been |
---|
2642 | | - * transmitted after a set amount of time. |
---|
2643 | | - * For now, assume that clearing out all the structures, and |
---|
2644 | | - * starting over will fix the problem. |
---|
2645 | | - */ |
---|
2646 | | -static void gfar_reset_task(struct work_struct *work) |
---|
2647 | | -{ |
---|
2648 | | - struct gfar_private *priv = container_of(work, struct gfar_private, |
---|
2649 | | - reset_task); |
---|
2650 | | - reset_gfar(priv->ndev); |
---|
2651 | | -} |
---|
2652 | | - |
---|
2653 | | -static void gfar_timeout(struct net_device *dev) |
---|
2654 | | -{ |
---|
2655 | | - struct gfar_private *priv = netdev_priv(dev); |
---|
2656 | | - |
---|
2657 | | - dev->stats.tx_errors++; |
---|
2658 | | - schedule_work(&priv->reset_task); |
---|
2659 | | -} |
---|
2660 | | - |
---|
2661 | | -/* Interrupt Handler for Transmit complete */ |
---|
2662 | | -static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) |
---|
2663 | | -{ |
---|
2664 | | - struct net_device *dev = tx_queue->dev; |
---|
2665 | | - struct netdev_queue *txq; |
---|
2666 | | - struct gfar_private *priv = netdev_priv(dev); |
---|
2667 | | - struct txbd8 *bdp, *next = NULL; |
---|
2668 | | - struct txbd8 *lbdp = NULL; |
---|
2669 | | - struct txbd8 *base = tx_queue->tx_bd_base; |
---|
2670 | | - struct sk_buff *skb; |
---|
2671 | | - int skb_dirtytx; |
---|
2672 | | - int tx_ring_size = tx_queue->tx_ring_size; |
---|
2673 | | - int frags = 0, nr_txbds = 0; |
---|
2674 | | - int i; |
---|
2675 | | - int howmany = 0; |
---|
2676 | | - int tqi = tx_queue->qindex; |
---|
2677 | | - unsigned int bytes_sent = 0; |
---|
2678 | | - u32 lstatus; |
---|
2679 | | - size_t buflen; |
---|
2680 | | - |
---|
2681 | | - txq = netdev_get_tx_queue(dev, tqi); |
---|
2682 | | - bdp = tx_queue->dirty_tx; |
---|
2683 | | - skb_dirtytx = tx_queue->skb_dirtytx; |
---|
2684 | | - |
---|
2685 | | - while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { |
---|
2686 | | - bool do_tstamp; |
---|
2687 | | - |
---|
2688 | | - do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && |
---|
2689 | | - priv->hwts_tx_en; |
---|
2690 | | - |
---|
2691 | | - frags = skb_shinfo(skb)->nr_frags; |
---|
2692 | | - |
---|
2693 | | - /* When time stamping, one additional TxBD must be freed. |
---|
2694 | | - * Also, we need to dma_unmap_single() the TxPAL. |
---|
2695 | | - */ |
---|
2696 | | - if (unlikely(do_tstamp)) |
---|
2697 | | - nr_txbds = frags + 2; |
---|
2698 | | - else |
---|
2699 | | - nr_txbds = frags + 1; |
---|
2700 | | - |
---|
2701 | | - lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); |
---|
2702 | | - |
---|
2703 | | - lstatus = be32_to_cpu(lbdp->lstatus); |
---|
2704 | | - |
---|
2705 | | - /* Only clean completed frames */ |
---|
2706 | | - if ((lstatus & BD_LFLAG(TXBD_READY)) && |
---|
2707 | | - (lstatus & BD_LENGTH_MASK)) |
---|
2708 | | - break; |
---|
2709 | | - |
---|
2710 | | - if (unlikely(do_tstamp)) { |
---|
2711 | | - next = next_txbd(bdp, base, tx_ring_size); |
---|
2712 | | - buflen = be16_to_cpu(next->length) + |
---|
2713 | | - GMAC_FCB_LEN + GMAC_TXPAL_LEN; |
---|
2714 | | - } else |
---|
2715 | | - buflen = be16_to_cpu(bdp->length); |
---|
2716 | | - |
---|
2717 | | - dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), |
---|
2718 | | - buflen, DMA_TO_DEVICE); |
---|
2719 | | - |
---|
2720 | | - if (unlikely(do_tstamp)) { |
---|
2721 | | - struct skb_shared_hwtstamps shhwtstamps; |
---|
2722 | | - u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) & |
---|
2723 | | - ~0x7UL); |
---|
2724 | | - |
---|
2725 | | - memset(&shhwtstamps, 0, sizeof(shhwtstamps)); |
---|
2726 | | - shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); |
---|
2727 | | - skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); |
---|
2728 | | - skb_tstamp_tx(skb, &shhwtstamps); |
---|
2729 | | - gfar_clear_txbd_status(bdp); |
---|
2730 | | - bdp = next; |
---|
2731 | | - } |
---|
2732 | | - |
---|
2733 | | - gfar_clear_txbd_status(bdp); |
---|
2734 | | - bdp = next_txbd(bdp, base, tx_ring_size); |
---|
2735 | | - |
---|
2736 | | - for (i = 0; i < frags; i++) { |
---|
2737 | | - dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr), |
---|
2738 | | - be16_to_cpu(bdp->length), |
---|
2739 | | - DMA_TO_DEVICE); |
---|
2740 | | - gfar_clear_txbd_status(bdp); |
---|
2741 | | - bdp = next_txbd(bdp, base, tx_ring_size); |
---|
2742 | | - } |
---|
2743 | | - |
---|
2744 | | - bytes_sent += GFAR_CB(skb)->bytes_sent; |
---|
2745 | | - |
---|
2746 | | - dev_kfree_skb_any(skb); |
---|
2747 | | - |
---|
2748 | | - tx_queue->tx_skbuff[skb_dirtytx] = NULL; |
---|
2749 | | - |
---|
2750 | | - skb_dirtytx = (skb_dirtytx + 1) & |
---|
2751 | | - TX_RING_MOD_MASK(tx_ring_size); |
---|
2752 | | - |
---|
2753 | | - howmany++; |
---|
2754 | | - spin_lock(&tx_queue->txlock); |
---|
2755 | | - tx_queue->num_txbdfree += nr_txbds; |
---|
2756 | | - spin_unlock(&tx_queue->txlock); |
---|
2757 | | - } |
---|
2758 | | - |
---|
2759 | | - /* If we freed a buffer, we can restart transmission, if necessary */ |
---|
2760 | | - if (tx_queue->num_txbdfree && |
---|
2761 | | - netif_tx_queue_stopped(txq) && |
---|
2762 | | - !(test_bit(GFAR_DOWN, &priv->state))) |
---|
2763 | | - netif_wake_subqueue(priv->ndev, tqi); |
---|
2764 | | - |
---|
2765 | | - /* Update dirty indicators */ |
---|
2766 | | - tx_queue->skb_dirtytx = skb_dirtytx; |
---|
2767 | | - tx_queue->dirty_tx = bdp; |
---|
2768 | | - |
---|
2769 | | - netdev_tx_completed_queue(txq, howmany, bytes_sent); |
---|
2770 | | -} |
---|
2771 | | - |
---|
2772 | | -static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb) |
---|
2773 | | -{ |
---|
2774 | | - struct page *page; |
---|
2775 | | - dma_addr_t addr; |
---|
2776 | | - |
---|
2777 | | - page = dev_alloc_page(); |
---|
2778 | | - if (unlikely(!page)) |
---|
2779 | | - return false; |
---|
2780 | | - |
---|
2781 | | - addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); |
---|
2782 | | - if (unlikely(dma_mapping_error(rxq->dev, addr))) { |
---|
2783 | | - __free_page(page); |
---|
2784 | | - |
---|
2785 | | - return false; |
---|
2786 | | - } |
---|
2787 | | - |
---|
2788 | | - rxb->dma = addr; |
---|
2789 | | - rxb->page = page; |
---|
2790 | | - rxb->page_offset = 0; |
---|
2791 | | - |
---|
2792 | | - return true; |
---|
2793 | | -} |
---|
2794 | | - |
---|
2795 | | -static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue) |
---|
2796 | | -{ |
---|
2797 | | - struct gfar_private *priv = netdev_priv(rx_queue->ndev); |
---|
2798 | | - struct gfar_extra_stats *estats = &priv->extra_stats; |
---|
2799 | | - |
---|
2800 | | - netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n"); |
---|
2801 | | - atomic64_inc(&estats->rx_alloc_err); |
---|
2802 | | -} |
---|
2803 | | - |
---|
2804 | | -static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, |
---|
2805 | | - int alloc_cnt) |
---|
2806 | | -{ |
---|
2807 | | - struct rxbd8 *bdp; |
---|
2808 | | - struct gfar_rx_buff *rxb; |
---|
2809 | | - int i; |
---|
2810 | | - |
---|
2811 | | - i = rx_queue->next_to_use; |
---|
2812 | | - bdp = &rx_queue->rx_bd_base[i]; |
---|
2813 | | - rxb = &rx_queue->rx_buff[i]; |
---|
2814 | | - |
---|
2815 | | - while (alloc_cnt--) { |
---|
2816 | | - /* try reuse page */ |
---|
2817 | | - if (unlikely(!rxb->page)) { |
---|
2818 | | - if (unlikely(!gfar_new_page(rx_queue, rxb))) { |
---|
2819 | | - gfar_rx_alloc_err(rx_queue); |
---|
2820 | | - break; |
---|
2821 | | - } |
---|
2822 | | - } |
---|
2823 | | - |
---|
2824 | | - /* Setup the new RxBD */ |
---|
2825 | | - gfar_init_rxbdp(rx_queue, bdp, |
---|
2826 | | - rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT); |
---|
2827 | | - |
---|
2828 | | - /* Update to the next pointer */ |
---|
2829 | | - bdp++; |
---|
2830 | | - rxb++; |
---|
2831 | | - |
---|
2832 | | - if (unlikely(++i == rx_queue->rx_ring_size)) { |
---|
2833 | | - i = 0; |
---|
2834 | | - bdp = rx_queue->rx_bd_base; |
---|
2835 | | - rxb = rx_queue->rx_buff; |
---|
2836 | | - } |
---|
2837 | | - } |
---|
2838 | | - |
---|
2839 | | - rx_queue->next_to_use = i; |
---|
2840 | | - rx_queue->next_to_alloc = i; |
---|
2841 | | -} |
---|
2842 | | - |
---|
2843 | | -static void count_errors(u32 lstatus, struct net_device *ndev) |
---|
2844 | | -{ |
---|
2845 | | - struct gfar_private *priv = netdev_priv(ndev); |
---|
2846 | | - struct net_device_stats *stats = &ndev->stats; |
---|
2847 | | - struct gfar_extra_stats *estats = &priv->extra_stats; |
---|
2848 | | - |
---|
2849 | | - /* If the packet was truncated, none of the other errors matter */ |
---|
2850 | | - if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) { |
---|
2851 | | - stats->rx_length_errors++; |
---|
2852 | | - |
---|
2853 | | - atomic64_inc(&estats->rx_trunc); |
---|
2854 | | - |
---|
2855 | | - return; |
---|
2856 | | - } |
---|
2857 | | - /* Count the errors, if there were any */ |
---|
2858 | | - if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) { |
---|
2859 | | - stats->rx_length_errors++; |
---|
2860 | | - |
---|
2861 | | - if (lstatus & BD_LFLAG(RXBD_LARGE)) |
---|
2862 | | - atomic64_inc(&estats->rx_large); |
---|
2863 | | - else |
---|
2864 | | - atomic64_inc(&estats->rx_short); |
---|
2865 | | - } |
---|
2866 | | - if (lstatus & BD_LFLAG(RXBD_NONOCTET)) { |
---|
2867 | | - stats->rx_frame_errors++; |
---|
2868 | | - atomic64_inc(&estats->rx_nonoctet); |
---|
2869 | | - } |
---|
2870 | | - if (lstatus & BD_LFLAG(RXBD_CRCERR)) { |
---|
2871 | | - atomic64_inc(&estats->rx_crcerr); |
---|
2872 | | - stats->rx_crc_errors++; |
---|
2873 | | - } |
---|
2874 | | - if (lstatus & BD_LFLAG(RXBD_OVERRUN)) { |
---|
2875 | | - atomic64_inc(&estats->rx_overrun); |
---|
2876 | | - stats->rx_over_errors++; |
---|
2877 | | - } |
---|
2878 | | -} |
---|
2879 | | - |
---|
2880 | | -irqreturn_t gfar_receive(int irq, void *grp_id) |
---|
2881 | | -{ |
---|
2882 | | - struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; |
---|
2883 | | - unsigned long flags; |
---|
2884 | | - u32 imask, ievent; |
---|
2885 | | - |
---|
2886 | | - ievent = gfar_read(&grp->regs->ievent); |
---|
2887 | | - |
---|
2888 | | - if (unlikely(ievent & IEVENT_FGPI)) { |
---|
2889 | | - gfar_write(&grp->regs->ievent, IEVENT_FGPI); |
---|
2890 | | - return IRQ_HANDLED; |
---|
2891 | | - } |
---|
2892 | | - |
---|
2893 | | - if (likely(napi_schedule_prep(&grp->napi_rx))) { |
---|
2894 | | - spin_lock_irqsave(&grp->grplock, flags); |
---|
2895 | | - imask = gfar_read(&grp->regs->imask); |
---|
2896 | | - imask &= IMASK_RX_DISABLED; |
---|
2897 | | - gfar_write(&grp->regs->imask, imask); |
---|
2898 | | - spin_unlock_irqrestore(&grp->grplock, flags); |
---|
2899 | | - __napi_schedule(&grp->napi_rx); |
---|
2900 | | - } else { |
---|
2901 | | - /* Clear IEVENT, so interrupts aren't called again |
---|
2902 | | - * because of the packets that have already arrived. |
---|
2903 | | - */ |
---|
2904 | | - gfar_write(&grp->regs->ievent, IEVENT_RX_MASK); |
---|
2905 | | - } |
---|
2906 | | - |
---|
2907 | | - return IRQ_HANDLED; |
---|
2908 | | -} |
---|
2909 | | - |
---|
2910 | | -/* Interrupt Handler for Transmit complete */ |
---|
2911 | | -static irqreturn_t gfar_transmit(int irq, void *grp_id) |
---|
2912 | | -{ |
---|
2913 | | - struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; |
---|
2914 | | - unsigned long flags; |
---|
2915 | | - u32 imask; |
---|
2916 | | - |
---|
2917 | | - if (likely(napi_schedule_prep(&grp->napi_tx))) { |
---|
2918 | | - spin_lock_irqsave(&grp->grplock, flags); |
---|
2919 | | - imask = gfar_read(&grp->regs->imask); |
---|
2920 | | - imask &= IMASK_TX_DISABLED; |
---|
2921 | | - gfar_write(&grp->regs->imask, imask); |
---|
2922 | | - spin_unlock_irqrestore(&grp->grplock, flags); |
---|
2923 | | - __napi_schedule(&grp->napi_tx); |
---|
2924 | | - } else { |
---|
2925 | | - /* Clear IEVENT, so interrupts aren't called again |
---|
2926 | | - * because of the packets that have already arrived. |
---|
2927 | | - */ |
---|
2928 | | - gfar_write(&grp->regs->ievent, IEVENT_TX_MASK); |
---|
2929 | | - } |
---|
2930 | | - |
---|
2931 | | - return IRQ_HANDLED; |
---|
2932 | | -} |
---|
2933 | | - |
---|
2934 | | -static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, |
---|
2935 | | - struct sk_buff *skb, bool first) |
---|
2936 | | -{ |
---|
2937 | | - int size = lstatus & BD_LENGTH_MASK; |
---|
2938 | | - struct page *page = rxb->page; |
---|
2939 | | - |
---|
2940 | | - if (likely(first)) { |
---|
2941 | | - skb_put(skb, size); |
---|
2942 | | - } else { |
---|
2943 | | - /* the last fragments' length contains the full frame length */ |
---|
2944 | | - if (lstatus & BD_LFLAG(RXBD_LAST)) |
---|
2945 | | - size -= skb->len; |
---|
2946 | | - |
---|
2947 | | - WARN(size < 0, "gianfar: rx fragment size underflow"); |
---|
2948 | | - if (size < 0) |
---|
2949 | | - return false; |
---|
2950 | | - |
---|
2951 | | - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, |
---|
2952 | | - rxb->page_offset + RXBUF_ALIGNMENT, |
---|
2953 | | - size, GFAR_RXB_TRUESIZE); |
---|
2954 | | - } |
---|
2955 | | - |
---|
2956 | | - /* try reuse page */ |
---|
2957 | | - if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page))) |
---|
2958 | | - return false; |
---|
2959 | | - |
---|
2960 | | - /* change offset to the other half */ |
---|
2961 | | - rxb->page_offset ^= GFAR_RXB_TRUESIZE; |
---|
2962 | | - |
---|
2963 | | - page_ref_inc(page); |
---|
2964 | | - |
---|
2965 | | - return true; |
---|
2966 | | -} |
---|
2967 | | - |
---|
2968 | | -static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq, |
---|
2969 | | - struct gfar_rx_buff *old_rxb) |
---|
2970 | | -{ |
---|
2971 | | - struct gfar_rx_buff *new_rxb; |
---|
2972 | | - u16 nta = rxq->next_to_alloc; |
---|
2973 | | - |
---|
2974 | | - new_rxb = &rxq->rx_buff[nta]; |
---|
2975 | | - |
---|
2976 | | - /* find next buf that can reuse a page */ |
---|
2977 | | - nta++; |
---|
2978 | | - rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0; |
---|
2979 | | - |
---|
2980 | | - /* copy page reference */ |
---|
2981 | | - *new_rxb = *old_rxb; |
---|
2982 | | - |
---|
2983 | | - /* sync for use by the device */ |
---|
2984 | | - dma_sync_single_range_for_device(rxq->dev, old_rxb->dma, |
---|
2985 | | - old_rxb->page_offset, |
---|
2986 | | - GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); |
---|
2987 | | -} |
---|
2988 | | - |
---|
2989 | | -static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue, |
---|
2990 | | - u32 lstatus, struct sk_buff *skb) |
---|
2991 | | -{ |
---|
2992 | | - struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean]; |
---|
2993 | | - struct page *page = rxb->page; |
---|
2994 | | - bool first = false; |
---|
2995 | | - |
---|
2996 | | - if (likely(!skb)) { |
---|
2997 | | - void *buff_addr = page_address(page) + rxb->page_offset; |
---|
2998 | | - |
---|
2999 | | - skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE); |
---|
3000 | | - if (unlikely(!skb)) { |
---|
3001 | | - gfar_rx_alloc_err(rx_queue); |
---|
3002 | | - return NULL; |
---|
3003 | | - } |
---|
3004 | | - skb_reserve(skb, RXBUF_ALIGNMENT); |
---|
3005 | | - first = true; |
---|
3006 | | - } |
---|
3007 | | - |
---|
3008 | | - dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset, |
---|
3009 | | - GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); |
---|
3010 | | - |
---|
3011 | | - if (gfar_add_rx_frag(rxb, lstatus, skb, first)) { |
---|
3012 | | - /* reuse the free half of the page */ |
---|
3013 | | - gfar_reuse_rx_page(rx_queue, rxb); |
---|
3014 | | - } else { |
---|
3015 | | - /* page cannot be reused, unmap it */ |
---|
3016 | | - dma_unmap_page(rx_queue->dev, rxb->dma, |
---|
3017 | | - PAGE_SIZE, DMA_FROM_DEVICE); |
---|
3018 | | - } |
---|
3019 | | - |
---|
3020 | | - /* clear rxb content */ |
---|
3021 | | - rxb->page = NULL; |
---|
3022 | | - |
---|
3023 | | - return skb; |
---|
3024 | | -} |
---|
3025 | | - |
---|
3026 | | -static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) |
---|
3027 | | -{ |
---|
3028 | | - /* If valid headers were found, and valid sums |
---|
3029 | | - * were verified, then we tell the kernel that no |
---|
3030 | | - * checksumming is necessary. Otherwise, it is [FIXME] |
---|
3031 | | - */ |
---|
3032 | | - if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) == |
---|
3033 | | - (RXFCB_CIP | RXFCB_CTU)) |
---|
3034 | | - skb->ip_summed = CHECKSUM_UNNECESSARY; |
---|
3035 | | - else |
---|
3036 | | - skb_checksum_none_assert(skb); |
---|
3037 | | -} |
---|
3038 | | - |
---|
3039 | | -/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ |
---|
3040 | | -static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) |
---|
3041 | | -{ |
---|
3042 | | - struct gfar_private *priv = netdev_priv(ndev); |
---|
3043 | | - struct rxfcb *fcb = NULL; |
---|
3044 | | - |
---|
3045 | | - /* fcb is at the beginning if exists */ |
---|
3046 | | - fcb = (struct rxfcb *)skb->data; |
---|
3047 | | - |
---|
3048 | | - /* Remove the FCB from the skb |
---|
3049 | | - * Remove the padded bytes, if there are any |
---|
3050 | | - */ |
---|
3051 | | - if (priv->uses_rxfcb) |
---|
3052 | | - skb_pull(skb, GMAC_FCB_LEN); |
---|
3053 | | - |
---|
3054 | | - /* Get receive timestamp from the skb */ |
---|
3055 | | - if (priv->hwts_rx_en) { |
---|
3056 | | - struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); |
---|
3057 | | - u64 *ns = (u64 *) skb->data; |
---|
3058 | | - |
---|
3059 | | - memset(shhwtstamps, 0, sizeof(*shhwtstamps)); |
---|
3060 | | - shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); |
---|
3061 | | - } |
---|
3062 | | - |
---|
3063 | | - if (priv->padding) |
---|
3064 | | - skb_pull(skb, priv->padding); |
---|
3065 | | - |
---|
3066 | | - /* Trim off the FCS */ |
---|
3067 | | - pskb_trim(skb, skb->len - ETH_FCS_LEN); |
---|
3068 | | - |
---|
3069 | | - if (ndev->features & NETIF_F_RXCSUM) |
---|
3070 | | - gfar_rx_checksum(skb, fcb); |
---|
3071 | | - |
---|
3072 | | - /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. |
---|
3073 | | - * Even if vlan rx accel is disabled, on some chips |
---|
3074 | | - * RXFCB_VLN is pseudo randomly set. |
---|
3075 | | - */ |
---|
3076 | | - if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX && |
---|
3077 | | - be16_to_cpu(fcb->flags) & RXFCB_VLN) |
---|
3078 | | - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), |
---|
3079 | | - be16_to_cpu(fcb->vlctl)); |
---|
3080 | | -} |
---|
3081 | | - |
---|
3082 | | -/* gfar_clean_rx_ring() -- Processes each frame in the rx ring |
---|
3083 | | - * until the budget/quota has been reached. Returns the number |
---|
3084 | | - * of frames handled |
---|
3085 | | - */ |
---|
3086 | | -int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) |
---|
3087 | | -{ |
---|
3088 | | - struct net_device *ndev = rx_queue->ndev; |
---|
3089 | | - struct gfar_private *priv = netdev_priv(ndev); |
---|
3090 | | - struct rxbd8 *bdp; |
---|
3091 | | - int i, howmany = 0; |
---|
3092 | | - struct sk_buff *skb = rx_queue->skb; |
---|
3093 | | - int cleaned_cnt = gfar_rxbd_unused(rx_queue); |
---|
3094 | | - unsigned int total_bytes = 0, total_pkts = 0; |
---|
3095 | | - |
---|
3096 | | - /* Get the first full descriptor */ |
---|
3097 | | - i = rx_queue->next_to_clean; |
---|
3098 | | - |
---|
3099 | | - while (rx_work_limit--) { |
---|
3100 | | - u32 lstatus; |
---|
3101 | | - |
---|
3102 | | - if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) { |
---|
3103 | | - gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); |
---|
3104 | | - cleaned_cnt = 0; |
---|
3105 | | - } |
---|
3106 | | - |
---|
3107 | | - bdp = &rx_queue->rx_bd_base[i]; |
---|
3108 | | - lstatus = be32_to_cpu(bdp->lstatus); |
---|
3109 | | - if (lstatus & BD_LFLAG(RXBD_EMPTY)) |
---|
3110 | | - break; |
---|
3111 | | - |
---|
3112 | | - /* lost RXBD_LAST descriptor due to overrun */ |
---|
3113 | | - if (skb && |
---|
3114 | | - (lstatus & BD_LFLAG(RXBD_FIRST))) { |
---|
3115 | | - /* discard faulty buffer */ |
---|
3116 | | - dev_kfree_skb(skb); |
---|
3117 | | - skb = NULL; |
---|
3118 | | - rx_queue->stats.rx_dropped++; |
---|
3119 | | - |
---|
3120 | | - /* can continue normally */ |
---|
3121 | | - } |
---|
3122 | | - |
---|
3123 | | - /* order rx buffer descriptor reads */ |
---|
3124 | | - rmb(); |
---|
3125 | | - |
---|
3126 | | - /* fetch next to clean buffer from the ring */ |
---|
3127 | | - skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb); |
---|
3128 | | - if (unlikely(!skb)) |
---|
3129 | | - break; |
---|
3130 | | - |
---|
3131 | | - cleaned_cnt++; |
---|
3132 | | - howmany++; |
---|
3133 | | - |
---|
3134 | | - if (unlikely(++i == rx_queue->rx_ring_size)) |
---|
3135 | | - i = 0; |
---|
3136 | | - |
---|
3137 | | - rx_queue->next_to_clean = i; |
---|
3138 | | - |
---|
3139 | | - /* fetch next buffer if not the last in frame */ |
---|
3140 | | - if (!(lstatus & BD_LFLAG(RXBD_LAST))) |
---|
3141 | | - continue; |
---|
3142 | | - |
---|
3143 | | - if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) { |
---|
3144 | | - count_errors(lstatus, ndev); |
---|
3145 | | - |
---|
3146 | | - /* discard faulty buffer */ |
---|
3147 | | - dev_kfree_skb(skb); |
---|
3148 | | - skb = NULL; |
---|
3149 | | - rx_queue->stats.rx_dropped++; |
---|
3150 | | - continue; |
---|
3151 | | - } |
---|
3152 | | - |
---|
3153 | | - gfar_process_frame(ndev, skb); |
---|
3154 | | - |
---|
3155 | | - /* Increment the number of packets */ |
---|
3156 | | - total_pkts++; |
---|
3157 | | - total_bytes += skb->len; |
---|
3158 | | - |
---|
3159 | | - skb_record_rx_queue(skb, rx_queue->qindex); |
---|
3160 | | - |
---|
3161 | | - skb->protocol = eth_type_trans(skb, ndev); |
---|
3162 | | - |
---|
3163 | | - /* Send the packet up the stack */ |
---|
3164 | | - napi_gro_receive(&rx_queue->grp->napi_rx, skb); |
---|
3165 | | - |
---|
3166 | | - skb = NULL; |
---|
3167 | | - } |
---|
3168 | | - |
---|
3169 | | - /* Store incomplete frames for completion */ |
---|
3170 | | - rx_queue->skb = skb; |
---|
3171 | | - |
---|
3172 | | - rx_queue->stats.rx_packets += total_pkts; |
---|
3173 | | - rx_queue->stats.rx_bytes += total_bytes; |
---|
3174 | | - |
---|
3175 | | - if (cleaned_cnt) |
---|
3176 | | - gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); |
---|
3177 | | - |
---|
3178 | | - /* Update Last Free RxBD pointer for LFC */ |
---|
3179 | | - if (unlikely(priv->tx_actual_en)) { |
---|
3180 | | - u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); |
---|
3181 | | - |
---|
3182 | | - gfar_write(rx_queue->rfbptr, bdp_dma); |
---|
3183 | | - } |
---|
3184 | | - |
---|
3185 | | - return howmany; |
---|
3186 | | -} |
---|
3187 | | - |
---|
3188 | | -static int gfar_poll_rx_sq(struct napi_struct *napi, int budget) |
---|
3189 | | -{ |
---|
3190 | | - struct gfar_priv_grp *gfargrp = |
---|
3191 | | - container_of(napi, struct gfar_priv_grp, napi_rx); |
---|
3192 | | - struct gfar __iomem *regs = gfargrp->regs; |
---|
3193 | | - struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue; |
---|
3194 | | - int work_done = 0; |
---|
3195 | | - |
---|
3196 | | - /* Clear IEVENT, so interrupts aren't called again |
---|
3197 | | - * because of the packets that have already arrived |
---|
3198 | | - */ |
---|
3199 | | - gfar_write(®s->ievent, IEVENT_RX_MASK); |
---|
3200 | | - |
---|
3201 | | - work_done = gfar_clean_rx_ring(rx_queue, budget); |
---|
3202 | | - |
---|
3203 | | - if (work_done < budget) { |
---|
3204 | | - u32 imask; |
---|
3205 | | - napi_complete_done(napi, work_done); |
---|
3206 | | - /* Clear the halt bit in RSTAT */ |
---|
3207 | | - gfar_write(®s->rstat, gfargrp->rstat); |
---|
3208 | | - |
---|
3209 | | - spin_lock_irq(&gfargrp->grplock); |
---|
3210 | | - imask = gfar_read(®s->imask); |
---|
3211 | | - imask |= IMASK_RX_DEFAULT; |
---|
3212 | | - gfar_write(®s->imask, imask); |
---|
3213 | | - spin_unlock_irq(&gfargrp->grplock); |
---|
3214 | | - } |
---|
3215 | | - |
---|
3216 | | - return work_done; |
---|
3217 | | -} |
---|
3218 | | - |
---|
3219 | | -static int gfar_poll_tx_sq(struct napi_struct *napi, int budget) |
---|
3220 | | -{ |
---|
3221 | | - struct gfar_priv_grp *gfargrp = |
---|
3222 | | - container_of(napi, struct gfar_priv_grp, napi_tx); |
---|
3223 | | - struct gfar __iomem *regs = gfargrp->regs; |
---|
3224 | | - struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue; |
---|
3225 | | - u32 imask; |
---|
3226 | | - |
---|
3227 | | - /* Clear IEVENT, so interrupts aren't called again |
---|
3228 | | - * because of the packets that have already arrived |
---|
3229 | | - */ |
---|
3230 | | - gfar_write(®s->ievent, IEVENT_TX_MASK); |
---|
3231 | | - |
---|
3232 | | - /* run Tx cleanup to completion */ |
---|
3233 | | - if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) |
---|
3234 | | - gfar_clean_tx_ring(tx_queue); |
---|
3235 | | - |
---|
3236 | | - napi_complete(napi); |
---|
3237 | | - |
---|
3238 | | - spin_lock_irq(&gfargrp->grplock); |
---|
3239 | | - imask = gfar_read(®s->imask); |
---|
3240 | | - imask |= IMASK_TX_DEFAULT; |
---|
3241 | | - gfar_write(®s->imask, imask); |
---|
3242 | | - spin_unlock_irq(&gfargrp->grplock); |
---|
3243 | | - |
---|
3244 | | - return 0; |
---|
3245 | | -} |
---|
3246 | | - |
---|
3247 | | -static int gfar_poll_rx(struct napi_struct *napi, int budget) |
---|
3248 | | -{ |
---|
3249 | | - struct gfar_priv_grp *gfargrp = |
---|
3250 | | - container_of(napi, struct gfar_priv_grp, napi_rx); |
---|
3251 | | - struct gfar_private *priv = gfargrp->priv; |
---|
3252 | | - struct gfar __iomem *regs = gfargrp->regs; |
---|
3253 | | - struct gfar_priv_rx_q *rx_queue = NULL; |
---|
3254 | | - int work_done = 0, work_done_per_q = 0; |
---|
3255 | | - int i, budget_per_q = 0; |
---|
3256 | | - unsigned long rstat_rxf; |
---|
3257 | | - int num_act_queues; |
---|
3258 | | - |
---|
3259 | | - /* Clear IEVENT, so interrupts aren't called again |
---|
3260 | | - * because of the packets that have already arrived |
---|
3261 | | - */ |
---|
3262 | | - gfar_write(®s->ievent, IEVENT_RX_MASK); |
---|
3263 | | - |
---|
3264 | | - rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK; |
---|
3265 | | - |
---|
3266 | | - num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS); |
---|
3267 | | - if (num_act_queues) |
---|
3268 | | - budget_per_q = budget/num_act_queues; |
---|
3269 | | - |
---|
3270 | | - for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { |
---|
3271 | | - /* skip queue if not active */ |
---|
3272 | | - if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) |
---|
3273 | | - continue; |
---|
3274 | | - |
---|
3275 | | - rx_queue = priv->rx_queue[i]; |
---|
3276 | | - work_done_per_q = |
---|
3277 | | - gfar_clean_rx_ring(rx_queue, budget_per_q); |
---|
3278 | | - work_done += work_done_per_q; |
---|
3279 | | - |
---|
3280 | | - /* finished processing this queue */ |
---|
3281 | | - if (work_done_per_q < budget_per_q) { |
---|
3282 | | - /* clear active queue hw indication */ |
---|
3283 | | - gfar_write(®s->rstat, |
---|
3284 | | - RSTAT_CLEAR_RXF0 >> i); |
---|
3285 | | - num_act_queues--; |
---|
3286 | | - |
---|
3287 | | - if (!num_act_queues) |
---|
3288 | | - break; |
---|
3289 | | - } |
---|
3290 | | - } |
---|
3291 | | - |
---|
3292 | | - if (!num_act_queues) { |
---|
3293 | | - u32 imask; |
---|
3294 | | - napi_complete_done(napi, work_done); |
---|
3295 | | - |
---|
3296 | | - /* Clear the halt bit in RSTAT */ |
---|
3297 | | - gfar_write(®s->rstat, gfargrp->rstat); |
---|
3298 | | - |
---|
3299 | | - spin_lock_irq(&gfargrp->grplock); |
---|
3300 | | - imask = gfar_read(®s->imask); |
---|
3301 | | - imask |= IMASK_RX_DEFAULT; |
---|
3302 | | - gfar_write(®s->imask, imask); |
---|
3303 | | - spin_unlock_irq(&gfargrp->grplock); |
---|
3304 | | - } |
---|
3305 | | - |
---|
3306 | | - return work_done; |
---|
3307 | | -} |
---|
3308 | | - |
---|
3309 | | -static int gfar_poll_tx(struct napi_struct *napi, int budget) |
---|
3310 | | -{ |
---|
3311 | | - struct gfar_priv_grp *gfargrp = |
---|
3312 | | - container_of(napi, struct gfar_priv_grp, napi_tx); |
---|
3313 | | - struct gfar_private *priv = gfargrp->priv; |
---|
3314 | | - struct gfar __iomem *regs = gfargrp->regs; |
---|
3315 | | - struct gfar_priv_tx_q *tx_queue = NULL; |
---|
3316 | | - int has_tx_work = 0; |
---|
3317 | | - int i; |
---|
3318 | | - |
---|
3319 | | - /* Clear IEVENT, so interrupts aren't called again |
---|
3320 | | - * because of the packets that have already arrived |
---|
3321 | | - */ |
---|
3322 | | - gfar_write(®s->ievent, IEVENT_TX_MASK); |
---|
3323 | | - |
---|
3324 | | - for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { |
---|
3325 | | - tx_queue = priv->tx_queue[i]; |
---|
3326 | | - /* run Tx cleanup to completion */ |
---|
3327 | | - if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { |
---|
3328 | | - gfar_clean_tx_ring(tx_queue); |
---|
3329 | | - has_tx_work = 1; |
---|
3330 | | - } |
---|
3331 | | - } |
---|
3332 | | - |
---|
3333 | | - if (!has_tx_work) { |
---|
3334 | | - u32 imask; |
---|
3335 | | - napi_complete(napi); |
---|
3336 | | - |
---|
3337 | | - spin_lock_irq(&gfargrp->grplock); |
---|
3338 | | - imask = gfar_read(®s->imask); |
---|
3339 | | - imask |= IMASK_TX_DEFAULT; |
---|
3340 | | - gfar_write(®s->imask, imask); |
---|
3341 | | - spin_unlock_irq(&gfargrp->grplock); |
---|
3342 | | - } |
---|
3343 | | - |
---|
3344 | | - return 0; |
---|
3345 | | -} |
---|
3346 | | - |
---|
3347 | | - |
---|
3348 | | -#ifdef CONFIG_NET_POLL_CONTROLLER |
---|
3349 | | -/* Polling 'interrupt' - used by things like netconsole to send skbs |
---|
3350 | | - * without having to re-enable interrupts. It's not called while |
---|
3351 | | - * the interrupt routine is executing. |
---|
3352 | | - */ |
---|
3353 | | -static void gfar_netpoll(struct net_device *dev) |
---|
3354 | | -{ |
---|
3355 | | - struct gfar_private *priv = netdev_priv(dev); |
---|
3356 | | - int i; |
---|
3357 | | - |
---|
3358 | | - /* If the device has multiple interrupts, run tx/rx */ |
---|
3359 | | - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
---|
3360 | | - for (i = 0; i < priv->num_grps; i++) { |
---|
3361 | | - struct gfar_priv_grp *grp = &priv->gfargrp[i]; |
---|
3362 | | - |
---|
3363 | | - disable_irq(gfar_irq(grp, TX)->irq); |
---|
3364 | | - disable_irq(gfar_irq(grp, RX)->irq); |
---|
3365 | | - disable_irq(gfar_irq(grp, ER)->irq); |
---|
3366 | | - gfar_interrupt(gfar_irq(grp, TX)->irq, grp); |
---|
3367 | | - enable_irq(gfar_irq(grp, ER)->irq); |
---|
3368 | | - enable_irq(gfar_irq(grp, RX)->irq); |
---|
3369 | | - enable_irq(gfar_irq(grp, TX)->irq); |
---|
3370 | | - } |
---|
3371 | | - } else { |
---|
3372 | | - for (i = 0; i < priv->num_grps; i++) { |
---|
3373 | | - struct gfar_priv_grp *grp = &priv->gfargrp[i]; |
---|
3374 | | - |
---|
3375 | | - disable_irq(gfar_irq(grp, TX)->irq); |
---|
3376 | | - gfar_interrupt(gfar_irq(grp, TX)->irq, grp); |
---|
3377 | | - enable_irq(gfar_irq(grp, TX)->irq); |
---|
3378 | | - } |
---|
3379 | | - } |
---|
3380 | | -} |
---|
3381 | | -#endif |
---|
3382 | | - |
---|
3383 | | -/* The interrupt handler for devices with one interrupt */ |
---|
3384 | | -static irqreturn_t gfar_interrupt(int irq, void *grp_id) |
---|
3385 | | -{ |
---|
3386 | | - struct gfar_priv_grp *gfargrp = grp_id; |
---|
3387 | | - |
---|
3388 | | - /* Save ievent for future reference */ |
---|
3389 | | - u32 events = gfar_read(&gfargrp->regs->ievent); |
---|
3390 | | - |
---|
3391 | | - /* Check for reception */ |
---|
3392 | | - if (events & IEVENT_RX_MASK) |
---|
3393 | | - gfar_receive(irq, grp_id); |
---|
3394 | | - |
---|
3395 | | - /* Check for transmit completion */ |
---|
3396 | | - if (events & IEVENT_TX_MASK) |
---|
3397 | | - gfar_transmit(irq, grp_id); |
---|
3398 | | - |
---|
3399 | | - /* Check for errors */ |
---|
3400 | | - if (events & IEVENT_ERR_MASK) |
---|
3401 | | - gfar_error(irq, grp_id); |
---|
3402 | | - |
---|
3403 | | - return IRQ_HANDLED; |
---|
3404 | | -} |
---|
3405 | | - |
---|
3406 | | -/* Called every time the controller might need to be made |
---|
3407 | | - * aware of new link state. The PHY code conveys this |
---|
3408 | | - * information through variables in the phydev structure, and this |
---|
3409 | | - * function converts those variables into the appropriate |
---|
3410 | | - * register values, and can bring down the device if needed. |
---|
3411 | | - */ |
---|
3412 | | -static void adjust_link(struct net_device *dev) |
---|
3413 | | -{ |
---|
3414 | | - struct gfar_private *priv = netdev_priv(dev); |
---|
3415 | | - struct phy_device *phydev = dev->phydev; |
---|
3416 | | - |
---|
3417 | | - if (unlikely(phydev->link != priv->oldlink || |
---|
3418 | | - (phydev->link && (phydev->duplex != priv->oldduplex || |
---|
3419 | | - phydev->speed != priv->oldspeed)))) |
---|
3420 | | - gfar_update_link_state(priv); |
---|
3421 | | -} |
---|
3422 | | - |
---|
3423 | | -/* Update the hash table based on the current list of multicast |
---|
3424 | | - * addresses we subscribe to. Also, change the promiscuity of |
---|
3425 | | - * the device based on the flags (this function is called |
---|
3426 | | - * whenever dev->flags is changed |
---|
3427 | | - */ |
---|
3428 | | -static void gfar_set_multi(struct net_device *dev) |
---|
3429 | | -{ |
---|
3430 | | - struct netdev_hw_addr *ha; |
---|
3431 | | - struct gfar_private *priv = netdev_priv(dev); |
---|
3432 | | - struct gfar __iomem *regs = priv->gfargrp[0].regs; |
---|
3433 | | - u32 tempval; |
---|
3434 | | - |
---|
3435 | | - if (dev->flags & IFF_PROMISC) { |
---|
3436 | | - /* Set RCTRL to PROM */ |
---|
3437 | | - tempval = gfar_read(®s->rctrl); |
---|
3438 | | - tempval |= RCTRL_PROM; |
---|
3439 | | - gfar_write(®s->rctrl, tempval); |
---|
3440 | | - } else { |
---|
3441 | | - /* Set RCTRL to not PROM */ |
---|
3442 | | - tempval = gfar_read(®s->rctrl); |
---|
3443 | | - tempval &= ~(RCTRL_PROM); |
---|
3444 | | - gfar_write(®s->rctrl, tempval); |
---|
3445 | | - } |
---|
3446 | | - |
---|
3447 | | - if (dev->flags & IFF_ALLMULTI) { |
---|
3448 | | - /* Set the hash to rx all multicast frames */ |
---|
3449 | | - gfar_write(®s->igaddr0, 0xffffffff); |
---|
3450 | | - gfar_write(®s->igaddr1, 0xffffffff); |
---|
3451 | | - gfar_write(®s->igaddr2, 0xffffffff); |
---|
3452 | | - gfar_write(®s->igaddr3, 0xffffffff); |
---|
3453 | | - gfar_write(®s->igaddr4, 0xffffffff); |
---|
3454 | | - gfar_write(®s->igaddr5, 0xffffffff); |
---|
3455 | | - gfar_write(®s->igaddr6, 0xffffffff); |
---|
3456 | | - gfar_write(®s->igaddr7, 0xffffffff); |
---|
3457 | | - gfar_write(®s->gaddr0, 0xffffffff); |
---|
3458 | | - gfar_write(®s->gaddr1, 0xffffffff); |
---|
3459 | | - gfar_write(®s->gaddr2, 0xffffffff); |
---|
3460 | | - gfar_write(®s->gaddr3, 0xffffffff); |
---|
3461 | | - gfar_write(®s->gaddr4, 0xffffffff); |
---|
3462 | | - gfar_write(®s->gaddr5, 0xffffffff); |
---|
3463 | | - gfar_write(®s->gaddr6, 0xffffffff); |
---|
3464 | | - gfar_write(®s->gaddr7, 0xffffffff); |
---|
3465 | | - } else { |
---|
3466 | | - int em_num; |
---|
3467 | | - int idx; |
---|
3468 | | - |
---|
3469 | | - /* zero out the hash */ |
---|
3470 | | - gfar_write(®s->igaddr0, 0x0); |
---|
3471 | | - gfar_write(®s->igaddr1, 0x0); |
---|
3472 | | - gfar_write(®s->igaddr2, 0x0); |
---|
3473 | | - gfar_write(®s->igaddr3, 0x0); |
---|
3474 | | - gfar_write(®s->igaddr4, 0x0); |
---|
3475 | | - gfar_write(®s->igaddr5, 0x0); |
---|
3476 | | - gfar_write(®s->igaddr6, 0x0); |
---|
3477 | | - gfar_write(®s->igaddr7, 0x0); |
---|
3478 | | - gfar_write(®s->gaddr0, 0x0); |
---|
3479 | | - gfar_write(®s->gaddr1, 0x0); |
---|
3480 | | - gfar_write(®s->gaddr2, 0x0); |
---|
3481 | | - gfar_write(®s->gaddr3, 0x0); |
---|
3482 | | - gfar_write(®s->gaddr4, 0x0); |
---|
3483 | | - gfar_write(®s->gaddr5, 0x0); |
---|
3484 | | - gfar_write(®s->gaddr6, 0x0); |
---|
3485 | | - gfar_write(®s->gaddr7, 0x0); |
---|
3486 | | - |
---|
3487 | | - /* If we have extended hash tables, we need to |
---|
3488 | | - * clear the exact match registers to prepare for |
---|
3489 | | - * setting them |
---|
3490 | | - */ |
---|
3491 | | - if (priv->extended_hash) { |
---|
3492 | | - em_num = GFAR_EM_NUM + 1; |
---|
3493 | | - gfar_clear_exact_match(dev); |
---|
3494 | | - idx = 1; |
---|
3495 | | - } else { |
---|
3496 | | - idx = 0; |
---|
3497 | | - em_num = 0; |
---|
3498 | | - } |
---|
3499 | | - |
---|
3500 | | - if (netdev_mc_empty(dev)) |
---|
3501 | | - return; |
---|
3502 | | - |
---|
3503 | | - /* Parse the list, and set the appropriate bits */ |
---|
3504 | | - netdev_for_each_mc_addr(ha, dev) { |
---|
3505 | | - if (idx < em_num) { |
---|
3506 | | - gfar_set_mac_for_addr(dev, idx, ha->addr); |
---|
3507 | | - idx++; |
---|
3508 | | - } else |
---|
3509 | | - gfar_set_hash_for_addr(dev, ha->addr); |
---|
3510 | | - } |
---|
3511 | | - } |
---|
3512 | | -} |
---|
3513 | | - |
---|
3514 | | - |
---|
3515 | | -/* Clears each of the exact match registers to zero, so they |
---|
3516 | | - * don't interfere with normal reception |
---|
3517 | | - */ |
---|
3518 | | -static void gfar_clear_exact_match(struct net_device *dev) |
---|
3519 | | -{ |
---|
3520 | | - int idx; |
---|
3521 | | - static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; |
---|
3522 | | - |
---|
3523 | | - for (idx = 1; idx < GFAR_EM_NUM + 1; idx++) |
---|
3524 | | - gfar_set_mac_for_addr(dev, idx, zero_arr); |
---|
3525 | | -} |
---|
3526 | | - |
---|
3527 | | -/* Set the appropriate hash bit for the given addr */ |
---|
3528 | | -/* The algorithm works like so: |
---|
3529 | | - * 1) Take the Destination Address (ie the multicast address), and |
---|
3530 | | - * do a CRC on it (little endian), and reverse the bits of the |
---|
3531 | | - * result. |
---|
3532 | | - * 2) Use the 8 most significant bits as a hash into a 256-entry |
---|
3533 | | - * table. The table is controlled through 8 32-bit registers: |
---|
3534 | | - * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is |
---|
3535 | | - * gaddr7. This means that the 3 most significant bits in the |
---|
3536 | | - * hash index which gaddr register to use, and the 5 other bits |
---|
3537 | | - * indicate which bit (assuming an IBM numbering scheme, which |
---|
3538 | | - * for PowerPC (tm) is usually the case) in the register holds |
---|
3539 | | - * the entry. |
---|
3540 | | - */ |
---|
3541 | | -static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) |
---|
3542 | | -{ |
---|
3543 | | - u32 tempval; |
---|
3544 | | - struct gfar_private *priv = netdev_priv(dev); |
---|
3545 | | - u32 result = ether_crc(ETH_ALEN, addr); |
---|
3546 | | - int width = priv->hash_width; |
---|
3547 | | - u8 whichbit = (result >> (32 - width)) & 0x1f; |
---|
3548 | | - u8 whichreg = result >> (32 - width + 5); |
---|
3549 | | - u32 value = (1 << (31-whichbit)); |
---|
3550 | | - |
---|
3551 | | - tempval = gfar_read(priv->hash_regs[whichreg]); |
---|
3552 | | - tempval |= value; |
---|
3553 | | - gfar_write(priv->hash_regs[whichreg], tempval); |
---|
3554 | | -} |
---|
3555 | | - |
---|
3556 | | - |
---|
3557 | | -/* There are multiple MAC Address register pairs on some controllers |
---|
3558 | | - * This function sets the numth pair to a given address |
---|
3559 | | - */ |
---|
3560 | | -static void gfar_set_mac_for_addr(struct net_device *dev, int num, |
---|
3561 | | - const u8 *addr) |
---|
3562 | | -{ |
---|
3563 | | - struct gfar_private *priv = netdev_priv(dev); |
---|
3564 | | - struct gfar __iomem *regs = priv->gfargrp[0].regs; |
---|
3565 | | - u32 tempval; |
---|
3566 | | - u32 __iomem *macptr = ®s->macstnaddr1; |
---|
3567 | | - |
---|
3568 | | - macptr += num*2; |
---|
3569 | | - |
---|
3570 | | - /* For a station address of 0x12345678ABCD in transmission |
---|
3571 | | - * order (BE), MACnADDR1 is set to 0xCDAB7856 and |
---|
3572 | | - * MACnADDR2 is set to 0x34120000. |
---|
3573 | | - */ |
---|
3574 | | - tempval = (addr[5] << 24) | (addr[4] << 16) | |
---|
3575 | | - (addr[3] << 8) | addr[2]; |
---|
3576 | | - |
---|
3577 | | - gfar_write(macptr, tempval); |
---|
3578 | | - |
---|
3579 | | - tempval = (addr[1] << 24) | (addr[0] << 16); |
---|
3580 | | - |
---|
3581 | | - gfar_write(macptr+1, tempval); |
---|
3582 | | -} |
---|
3583 | | - |
---|
3584 | | -/* GFAR error interrupt handler */ |
---|
3585 | | -static irqreturn_t gfar_error(int irq, void *grp_id) |
---|
3586 | | -{ |
---|
3587 | | - struct gfar_priv_grp *gfargrp = grp_id; |
---|
3588 | | - struct gfar __iomem *regs = gfargrp->regs; |
---|
3589 | | - struct gfar_private *priv= gfargrp->priv; |
---|
3590 | | - struct net_device *dev = priv->ndev; |
---|
3591 | | - |
---|
3592 | | - /* Save ievent for future reference */ |
---|
3593 | | - u32 events = gfar_read(®s->ievent); |
---|
3594 | | - |
---|
3595 | | - /* Clear IEVENT */ |
---|
3596 | | - gfar_write(®s->ievent, events & IEVENT_ERR_MASK); |
---|
3597 | | - |
---|
3598 | | - /* Magic Packet is not an error. */ |
---|
3599 | | - if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && |
---|
3600 | | - (events & IEVENT_MAG)) |
---|
3601 | | - events &= ~IEVENT_MAG; |
---|
3602 | | - |
---|
3603 | | - /* Hmm... */ |
---|
3604 | | - if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) |
---|
3605 | | - netdev_dbg(dev, |
---|
3606 | | - "error interrupt (ievent=0x%08x imask=0x%08x)\n", |
---|
3607 | | - events, gfar_read(®s->imask)); |
---|
3608 | | - |
---|
3609 | | - /* Update the error counters */ |
---|
3610 | | - if (events & IEVENT_TXE) { |
---|
3611 | | - dev->stats.tx_errors++; |
---|
3612 | | - |
---|
3613 | | - if (events & IEVENT_LC) |
---|
3614 | | - dev->stats.tx_window_errors++; |
---|
3615 | | - if (events & IEVENT_CRL) |
---|
3616 | | - dev->stats.tx_aborted_errors++; |
---|
3617 | | - if (events & IEVENT_XFUN) { |
---|
3618 | | - netif_dbg(priv, tx_err, dev, |
---|
3619 | | - "TX FIFO underrun, packet dropped\n"); |
---|
3620 | | - dev->stats.tx_dropped++; |
---|
3621 | | - atomic64_inc(&priv->extra_stats.tx_underrun); |
---|
3622 | | - |
---|
3623 | | - schedule_work(&priv->reset_task); |
---|
3624 | | - } |
---|
3625 | | - netif_dbg(priv, tx_err, dev, "Transmit Error\n"); |
---|
3626 | | - } |
---|
3627 | | - if (events & IEVENT_BSY) { |
---|
3628 | | - dev->stats.rx_over_errors++; |
---|
3629 | | - atomic64_inc(&priv->extra_stats.rx_bsy); |
---|
3630 | | - |
---|
3631 | | - netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", |
---|
3632 | | - gfar_read(®s->rstat)); |
---|
3633 | | - } |
---|
3634 | | - if (events & IEVENT_BABR) { |
---|
3635 | | - dev->stats.rx_errors++; |
---|
3636 | | - atomic64_inc(&priv->extra_stats.rx_babr); |
---|
3637 | | - |
---|
3638 | | - netif_dbg(priv, rx_err, dev, "babbling RX error\n"); |
---|
3639 | | - } |
---|
3640 | | - if (events & IEVENT_EBERR) { |
---|
3641 | | - atomic64_inc(&priv->extra_stats.eberr); |
---|
3642 | | - netif_dbg(priv, rx_err, dev, "bus error\n"); |
---|
3643 | | - } |
---|
3644 | | - if (events & IEVENT_RXC) |
---|
3645 | | - netif_dbg(priv, rx_status, dev, "control frame\n"); |
---|
3646 | | - |
---|
3647 | | - if (events & IEVENT_BABT) { |
---|
3648 | | - atomic64_inc(&priv->extra_stats.tx_babt); |
---|
3649 | | - netif_dbg(priv, tx_err, dev, "babbling TX error\n"); |
---|
3650 | | - } |
---|
3651 | | - return IRQ_HANDLED; |
---|
3652 | | -} |
---|
3653 | | - |
---|
3654 | | -static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) |
---|
3655 | | -{ |
---|
3656 | | - struct net_device *ndev = priv->ndev; |
---|
3657 | | - struct phy_device *phydev = ndev->phydev; |
---|
3658 | | - u32 val = 0; |
---|
3659 | | - |
---|
3660 | | - if (!phydev->duplex) |
---|
3661 | | - return val; |
---|
3662 | | - |
---|
3663 | | - if (!priv->pause_aneg_en) { |
---|
3664 | | - if (priv->tx_pause_en) |
---|
3665 | | - val |= MACCFG1_TX_FLOW; |
---|
3666 | | - if (priv->rx_pause_en) |
---|
3667 | | - val |= MACCFG1_RX_FLOW; |
---|
3668 | | - } else { |
---|
3669 | | - u16 lcl_adv, rmt_adv; |
---|
3670 | | - u8 flowctrl; |
---|
3671 | | - /* get link partner capabilities */ |
---|
3672 | | - rmt_adv = 0; |
---|
3673 | | - if (phydev->pause) |
---|
3674 | | - rmt_adv = LPA_PAUSE_CAP; |
---|
3675 | | - if (phydev->asym_pause) |
---|
3676 | | - rmt_adv |= LPA_PAUSE_ASYM; |
---|
3677 | | - |
---|
3678 | | - lcl_adv = 0; |
---|
3679 | | - if (phydev->advertising & ADVERTISED_Pause) |
---|
3680 | | - lcl_adv |= ADVERTISE_PAUSE_CAP; |
---|
3681 | | - if (phydev->advertising & ADVERTISED_Asym_Pause) |
---|
3682 | | - lcl_adv |= ADVERTISE_PAUSE_ASYM; |
---|
3683 | | - |
---|
3684 | | - flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); |
---|
3685 | | - if (flowctrl & FLOW_CTRL_TX) |
---|
3686 | | - val |= MACCFG1_TX_FLOW; |
---|
3687 | | - if (flowctrl & FLOW_CTRL_RX) |
---|
3688 | | - val |= MACCFG1_RX_FLOW; |
---|
3689 | | - } |
---|
3690 | | - |
---|
3691 | | - return val; |
---|
3692 | | -} |
---|
3693 | | - |
---|
3694 | | -static noinline void gfar_update_link_state(struct gfar_private *priv) |
---|
3695 | | -{ |
---|
3696 | | - struct gfar __iomem *regs = priv->gfargrp[0].regs; |
---|
3697 | | - struct net_device *ndev = priv->ndev; |
---|
3698 | | - struct phy_device *phydev = ndev->phydev; |
---|
3699 | | - struct gfar_priv_rx_q *rx_queue = NULL; |
---|
3700 | | - int i; |
---|
3701 | | - |
---|
3702 | | - if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) |
---|
3703 | | - return; |
---|
3704 | | - |
---|
3705 | | - if (phydev->link) { |
---|
3706 | | - u32 tempval1 = gfar_read(®s->maccfg1); |
---|
3707 | | - u32 tempval = gfar_read(®s->maccfg2); |
---|
3708 | | - u32 ecntrl = gfar_read(®s->ecntrl); |
---|
3709 | | - u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW); |
---|
3710 | | - |
---|
3711 | | - if (phydev->duplex != priv->oldduplex) { |
---|
3712 | | - if (!(phydev->duplex)) |
---|
3713 | | - tempval &= ~(MACCFG2_FULL_DUPLEX); |
---|
3714 | | - else |
---|
3715 | | - tempval |= MACCFG2_FULL_DUPLEX; |
---|
3716 | | - |
---|
3717 | | - priv->oldduplex = phydev->duplex; |
---|
3718 | | - } |
---|
3719 | | - |
---|
3720 | | - if (phydev->speed != priv->oldspeed) { |
---|
3721 | | - switch (phydev->speed) { |
---|
3722 | | - case 1000: |
---|
3723 | | - tempval = |
---|
3724 | | - ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); |
---|
3725 | | - |
---|
3726 | | - ecntrl &= ~(ECNTRL_R100); |
---|
3727 | | - break; |
---|
3728 | | - case 100: |
---|
3729 | | - case 10: |
---|
3730 | | - tempval = |
---|
3731 | | - ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); |
---|
3732 | | - |
---|
3733 | | - /* Reduced mode distinguishes |
---|
3734 | | - * between 10 and 100 |
---|
3735 | | - */ |
---|
3736 | | - if (phydev->speed == SPEED_100) |
---|
3737 | | - ecntrl |= ECNTRL_R100; |
---|
3738 | | - else |
---|
3739 | | - ecntrl &= ~(ECNTRL_R100); |
---|
3740 | | - break; |
---|
3741 | | - default: |
---|
3742 | | - netif_warn(priv, link, priv->ndev, |
---|
3743 | | - "Ack! Speed (%d) is not 10/100/1000!\n", |
---|
3744 | | - phydev->speed); |
---|
3745 | | - break; |
---|
3746 | | - } |
---|
3747 | | - |
---|
3748 | | - priv->oldspeed = phydev->speed; |
---|
3749 | | - } |
---|
3750 | | - |
---|
3751 | | - tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); |
---|
3752 | | - tempval1 |= gfar_get_flowctrl_cfg(priv); |
---|
3753 | | - |
---|
3754 | | - /* Turn last free buffer recording on */ |
---|
3755 | | - if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { |
---|
3756 | | - for (i = 0; i < priv->num_rx_queues; i++) { |
---|
3757 | | - u32 bdp_dma; |
---|
3758 | | - |
---|
3759 | | - rx_queue = priv->rx_queue[i]; |
---|
3760 | | - bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); |
---|
3761 | | - gfar_write(rx_queue->rfbptr, bdp_dma); |
---|
3762 | | - } |
---|
3763 | | - |
---|
3764 | | - priv->tx_actual_en = 1; |
---|
3765 | | - } |
---|
3766 | | - |
---|
3767 | | - if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval)) |
---|
3768 | | - priv->tx_actual_en = 0; |
---|
3769 | | - |
---|
3770 | | - gfar_write(®s->maccfg1, tempval1); |
---|
3771 | | - gfar_write(®s->maccfg2, tempval); |
---|
3772 | | - gfar_write(®s->ecntrl, ecntrl); |
---|
3773 | | - |
---|
3774 | | - if (!priv->oldlink) |
---|
3775 | | - priv->oldlink = 1; |
---|
3776 | | - |
---|
3777 | | - } else if (priv->oldlink) { |
---|
3778 | | - priv->oldlink = 0; |
---|
3779 | | - priv->oldspeed = 0; |
---|
3780 | | - priv->oldduplex = -1; |
---|
3781 | | - } |
---|
3782 | | - |
---|
3783 | | - if (netif_msg_link(priv)) |
---|
3784 | | - phy_print_status(phydev); |
---|
3785 | | -} |
---|
3786 | 3743 | |
---|
3787 | 3744 | static const struct of_device_id gfar_match[] = |
---|
3788 | 3745 | { |
---|