.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Xilinx Axi Ethernet device driver |
---|
3 | 4 | * |
---|
.. | .. |
---|
6 | 7 | * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. |
---|
7 | 8 | * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> |
---|
8 | 9 | * Copyright (c) 2010 - 2011 PetaLogix |
---|
| 10 | + * Copyright (c) 2019 SED Systems, a division of Calian Ltd. |
---|
9 | 11 | * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. |
---|
10 | 12 | * |
---|
11 | 13 | * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 |
---|
.. | .. |
---|
20 | 22 | * - Add support for extended VLAN support. |
---|
21 | 23 | */ |
---|
22 | 24 | |
---|
| 25 | +#include <linux/clk.h> |
---|
23 | 26 | #include <linux/delay.h> |
---|
24 | 27 | #include <linux/etherdevice.h> |
---|
25 | 28 | #include <linux/module.h> |
---|
.. | .. |
---|
37 | 40 | |
---|
38 | 41 | #include "xilinx_axienet.h" |
---|
39 | 42 | |
---|
40 | | -/* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */ |
---|
41 | | -#define TX_BD_NUM 64 |
---|
42 | | -#define RX_BD_NUM 128 |
---|
| 43 | +/* Descriptors defines for Tx and Rx DMA */ |
---|
| 44 | +#define TX_BD_NUM_DEFAULT 128 |
---|
| 45 | +#define RX_BD_NUM_DEFAULT 1024 |
---|
| 46 | +#define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1) |
---|
| 47 | +#define TX_BD_NUM_MAX 4096 |
---|
| 48 | +#define RX_BD_NUM_MAX 4096 |
---|
43 | 49 | |
---|
44 | 50 | /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ |
---|
45 | 51 | #define DRIVER_NAME "xaxienet" |
---|
46 | 52 | #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" |
---|
47 | 53 | #define DRIVER_VERSION "1.00a" |
---|
48 | 54 | |
---|
49 | | -#define AXIENET_REGS_N 32 |
---|
| 55 | +#define AXIENET_REGS_N 40 |
---|
50 | 56 | |
---|
51 | 57 | /* Match table for of_platform binding */ |
---|
52 | 58 | static const struct of_device_id axienet_of_match[] = { |
---|
.. | .. |
---|
124 | 130 | */ |
---|
125 | 131 | static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) |
---|
126 | 132 | { |
---|
127 | | - return in_be32(lp->dma_regs + reg); |
---|
| 133 | + return ioread32(lp->dma_regs + reg); |
---|
128 | 134 | } |
---|
129 | 135 | |
---|
130 | 136 | /** |
---|
.. | .. |
---|
139 | 145 | static inline void axienet_dma_out32(struct axienet_local *lp, |
---|
140 | 146 | off_t reg, u32 value) |
---|
141 | 147 | { |
---|
142 | | - out_be32((lp->dma_regs + reg), value); |
---|
| 148 | + iowrite32(value, lp->dma_regs + reg); |
---|
| 149 | +} |
---|
| 150 | + |
---|
| 151 | +static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg, |
---|
| 152 | + dma_addr_t addr) |
---|
| 153 | +{ |
---|
| 154 | + axienet_dma_out32(lp, reg, lower_32_bits(addr)); |
---|
| 155 | + |
---|
| 156 | + if (lp->features & XAE_FEATURE_DMA_64BIT) |
---|
| 157 | + axienet_dma_out32(lp, reg + 4, upper_32_bits(addr)); |
---|
| 158 | +} |
---|
| 159 | + |
---|
| 160 | +static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr, |
---|
| 161 | + struct axidma_bd *desc) |
---|
| 162 | +{ |
---|
| 163 | + desc->phys = lower_32_bits(addr); |
---|
| 164 | + if (lp->features & XAE_FEATURE_DMA_64BIT) |
---|
| 165 | + desc->phys_msb = upper_32_bits(addr); |
---|
| 166 | +} |
---|
| 167 | + |
---|
| 168 | +static dma_addr_t desc_get_phys_addr(struct axienet_local *lp, |
---|
| 169 | + struct axidma_bd *desc) |
---|
| 170 | +{ |
---|
| 171 | + dma_addr_t ret = desc->phys; |
---|
| 172 | + |
---|
| 173 | + if (lp->features & XAE_FEATURE_DMA_64BIT) |
---|
| 174 | + ret |= ((dma_addr_t)desc->phys_msb << 16) << 16; |
---|
| 175 | + |
---|
| 176 | + return ret; |
---|
143 | 177 | } |
---|
144 | 178 | |
---|
145 | 179 | /** |
---|
.. | .. |
---|
155 | 189 | int i; |
---|
156 | 190 | struct axienet_local *lp = netdev_priv(ndev); |
---|
157 | 191 | |
---|
158 | | - for (i = 0; i < RX_BD_NUM; i++) { |
---|
159 | | - dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys, |
---|
160 | | - lp->max_frm_size, DMA_FROM_DEVICE); |
---|
161 | | - dev_kfree_skb((struct sk_buff *) |
---|
162 | | - (lp->rx_bd_v[i].sw_id_offset)); |
---|
| 192 | + /* If we end up here, tx_bd_v must have been DMA allocated. */ |
---|
| 193 | + dma_free_coherent(ndev->dev.parent, |
---|
| 194 | + sizeof(*lp->tx_bd_v) * lp->tx_bd_num, |
---|
| 195 | + lp->tx_bd_v, |
---|
| 196 | + lp->tx_bd_p); |
---|
| 197 | + |
---|
| 198 | + if (!lp->rx_bd_v) |
---|
| 199 | + return; |
---|
| 200 | + |
---|
| 201 | + for (i = 0; i < lp->rx_bd_num; i++) { |
---|
| 202 | + dma_addr_t phys; |
---|
| 203 | + |
---|
| 204 | + /* A NULL skb means this descriptor has not been initialised |
---|
| 205 | + * at all. |
---|
| 206 | + */ |
---|
| 207 | + if (!lp->rx_bd_v[i].skb) |
---|
| 208 | + break; |
---|
| 209 | + |
---|
| 210 | + dev_kfree_skb(lp->rx_bd_v[i].skb); |
---|
| 211 | + |
---|
| 212 | + /* For each descriptor, we programmed cntrl with the (non-zero) |
---|
| 213 | + * descriptor size, after it had been successfully allocated. |
---|
| 214 | + * So a non-zero value in there means we need to unmap it. |
---|
| 215 | + */ |
---|
| 216 | + if (lp->rx_bd_v[i].cntrl) { |
---|
| 217 | + phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]); |
---|
| 218 | + dma_unmap_single(ndev->dev.parent, phys, |
---|
| 219 | + lp->max_frm_size, DMA_FROM_DEVICE); |
---|
| 220 | + } |
---|
163 | 221 | } |
---|
164 | 222 | |
---|
165 | | - if (lp->rx_bd_v) { |
---|
166 | | - dma_free_coherent(ndev->dev.parent, |
---|
167 | | - sizeof(*lp->rx_bd_v) * RX_BD_NUM, |
---|
168 | | - lp->rx_bd_v, |
---|
169 | | - lp->rx_bd_p); |
---|
170 | | - } |
---|
171 | | - if (lp->tx_bd_v) { |
---|
172 | | - dma_free_coherent(ndev->dev.parent, |
---|
173 | | - sizeof(*lp->tx_bd_v) * TX_BD_NUM, |
---|
174 | | - lp->tx_bd_v, |
---|
175 | | - lp->tx_bd_p); |
---|
176 | | - } |
---|
| 223 | + dma_free_coherent(ndev->dev.parent, |
---|
| 224 | + sizeof(*lp->rx_bd_v) * lp->rx_bd_num, |
---|
| 225 | + lp->rx_bd_v, |
---|
| 226 | + lp->rx_bd_p); |
---|
177 | 227 | } |
---|
178 | 228 | |
---|
179 | 229 | /** |
---|
.. | .. |
---|
199 | 249 | lp->rx_bd_ci = 0; |
---|
200 | 250 | |
---|
201 | 251 | /* Allocate the Tx and Rx buffer descriptors. */ |
---|
202 | | - lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, |
---|
203 | | - sizeof(*lp->tx_bd_v) * TX_BD_NUM, |
---|
204 | | - &lp->tx_bd_p, GFP_KERNEL); |
---|
| 252 | + lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, |
---|
| 253 | + sizeof(*lp->tx_bd_v) * lp->tx_bd_num, |
---|
| 254 | + &lp->tx_bd_p, GFP_KERNEL); |
---|
205 | 255 | if (!lp->tx_bd_v) |
---|
206 | | - goto out; |
---|
| 256 | + return -ENOMEM; |
---|
207 | 257 | |
---|
208 | | - lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, |
---|
209 | | - sizeof(*lp->rx_bd_v) * RX_BD_NUM, |
---|
210 | | - &lp->rx_bd_p, GFP_KERNEL); |
---|
| 258 | + lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, |
---|
| 259 | + sizeof(*lp->rx_bd_v) * lp->rx_bd_num, |
---|
| 260 | + &lp->rx_bd_p, GFP_KERNEL); |
---|
211 | 261 | if (!lp->rx_bd_v) |
---|
212 | 262 | goto out; |
---|
213 | 263 | |
---|
214 | | - for (i = 0; i < TX_BD_NUM; i++) { |
---|
215 | | - lp->tx_bd_v[i].next = lp->tx_bd_p + |
---|
216 | | - sizeof(*lp->tx_bd_v) * |
---|
217 | | - ((i + 1) % TX_BD_NUM); |
---|
| 264 | + for (i = 0; i < lp->tx_bd_num; i++) { |
---|
| 265 | + dma_addr_t addr = lp->tx_bd_p + |
---|
| 266 | + sizeof(*lp->tx_bd_v) * |
---|
| 267 | + ((i + 1) % lp->tx_bd_num); |
---|
| 268 | + |
---|
| 269 | + lp->tx_bd_v[i].next = lower_32_bits(addr); |
---|
| 270 | + if (lp->features & XAE_FEATURE_DMA_64BIT) |
---|
| 271 | + lp->tx_bd_v[i].next_msb = upper_32_bits(addr); |
---|
218 | 272 | } |
---|
219 | 273 | |
---|
220 | | - for (i = 0; i < RX_BD_NUM; i++) { |
---|
221 | | - lp->rx_bd_v[i].next = lp->rx_bd_p + |
---|
222 | | - sizeof(*lp->rx_bd_v) * |
---|
223 | | - ((i + 1) % RX_BD_NUM); |
---|
| 274 | + for (i = 0; i < lp->rx_bd_num; i++) { |
---|
| 275 | + dma_addr_t addr; |
---|
| 276 | + |
---|
| 277 | + addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * |
---|
| 278 | + ((i + 1) % lp->rx_bd_num); |
---|
| 279 | + lp->rx_bd_v[i].next = lower_32_bits(addr); |
---|
| 280 | + if (lp->features & XAE_FEATURE_DMA_64BIT) |
---|
| 281 | + lp->rx_bd_v[i].next_msb = upper_32_bits(addr); |
---|
224 | 282 | |
---|
225 | 283 | skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); |
---|
226 | 284 | if (!skb) |
---|
227 | 285 | goto out; |
---|
228 | 286 | |
---|
229 | | - lp->rx_bd_v[i].sw_id_offset = (u32) skb; |
---|
230 | | - lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, |
---|
231 | | - skb->data, |
---|
232 | | - lp->max_frm_size, |
---|
233 | | - DMA_FROM_DEVICE); |
---|
| 287 | + lp->rx_bd_v[i].skb = skb; |
---|
| 288 | + addr = dma_map_single(ndev->dev.parent, skb->data, |
---|
| 289 | + lp->max_frm_size, DMA_FROM_DEVICE); |
---|
| 290 | + if (dma_mapping_error(ndev->dev.parent, addr)) { |
---|
| 291 | + netdev_err(ndev, "DMA mapping error\n"); |
---|
| 292 | + goto out; |
---|
| 293 | + } |
---|
| 294 | + desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]); |
---|
| 295 | + |
---|
234 | 296 | lp->rx_bd_v[i].cntrl = lp->max_frm_size; |
---|
235 | 297 | } |
---|
236 | 298 | |
---|
.. | .. |
---|
263 | 325 | /* Populate the tail pointer and bring the Rx Axi DMA engine out of |
---|
264 | 326 | * halted state. This will make the Rx side ready for reception. |
---|
265 | 327 | */ |
---|
266 | | - axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); |
---|
| 328 | + axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); |
---|
267 | 329 | cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); |
---|
268 | 330 | axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, |
---|
269 | 331 | cr | XAXIDMA_CR_RUNSTOP_MASK); |
---|
270 | | - axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + |
---|
271 | | - (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); |
---|
| 332 | + axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + |
---|
| 333 | + (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); |
---|
272 | 334 | |
---|
273 | 335 | /* Write to the RS (Run-stop) bit in the Tx channel control register. |
---|
274 | 336 | * Tx channel is now ready to run. But only after we write to the |
---|
275 | 337 | * tail pointer register that the Tx channel will start transmitting. |
---|
276 | 338 | */ |
---|
277 | | - axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); |
---|
| 339 | + axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); |
---|
278 | 340 | cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); |
---|
279 | 341 | axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, |
---|
280 | 342 | cr | XAXIDMA_CR_RUNSTOP_MASK); |
---|
281 | | - |
---|
282 | | - /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */ |
---|
283 | | - ret = read_poll_timeout(axienet_ior, value, |
---|
284 | | - value & XAE_INT_PHYRSTCMPLT_MASK, |
---|
285 | | - DELAY_OF_ONE_MILLISEC, 50000, false, lp, |
---|
286 | | - XAE_IS_OFFSET); |
---|
287 | | - if (ret) { |
---|
288 | | - dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); |
---|
289 | | - return ret; |
---|
290 | | - } |
---|
291 | 343 | |
---|
292 | 344 | return 0; |
---|
293 | 345 | out: |
---|
.. | .. |
---|
443 | 495 | lp->options |= options; |
---|
444 | 496 | } |
---|
445 | 497 | |
---|
446 | | -static void __axienet_device_reset(struct axienet_local *lp, off_t offset) |
---|
| 498 | +static int __axienet_device_reset(struct axienet_local *lp) |
---|
447 | 499 | { |
---|
448 | | - u32 timeout; |
---|
| 500 | + u32 value; |
---|
| 501 | + int ret; |
---|
| 502 | + |
---|
449 | 503 | /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset |
---|
450 | 504 | * process of Axi DMA takes a while to complete as all pending |
---|
451 | 505 | * commands/transfers will be flushed or completed during this |
---|
452 | 506 | * reset process. |
---|
| 507 | + * Note that even though both TX and RX have their own reset register, |
---|
| 508 | + * they both reset the entire DMA core, so only one needs to be used. |
---|
453 | 509 | */ |
---|
454 | | - axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK); |
---|
455 | | - timeout = DELAY_OF_ONE_MILLISEC; |
---|
456 | | - while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) { |
---|
457 | | - udelay(1); |
---|
458 | | - if (--timeout == 0) { |
---|
459 | | - netdev_err(lp->ndev, "%s: DMA reset timeout!\n", |
---|
460 | | - __func__); |
---|
461 | | - break; |
---|
462 | | - } |
---|
| 510 | + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK); |
---|
| 511 | + ret = read_poll_timeout(axienet_dma_in32, value, |
---|
| 512 | + !(value & XAXIDMA_CR_RESET_MASK), |
---|
| 513 | + DELAY_OF_ONE_MILLISEC, 50000, false, lp, |
---|
| 514 | + XAXIDMA_TX_CR_OFFSET); |
---|
| 515 | + if (ret) { |
---|
| 516 | + dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__); |
---|
| 517 | + return ret; |
---|
463 | 518 | } |
---|
| 519 | + |
---|
| 520 | + /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */ |
---|
| 521 | + ret = read_poll_timeout(axienet_ior, value, |
---|
| 522 | + value & XAE_INT_PHYRSTCMPLT_MASK, |
---|
| 523 | + DELAY_OF_ONE_MILLISEC, 50000, false, lp, |
---|
| 524 | + XAE_IS_OFFSET); |
---|
| 525 | + if (ret) { |
---|
| 526 | + dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); |
---|
| 527 | + return ret; |
---|
| 528 | + } |
---|
| 529 | + |
---|
| 530 | + return 0; |
---|
464 | 531 | } |
---|
465 | 532 | |
---|
466 | 533 | /** |
---|
.. | .. |
---|
473 | 540 | * areconnected to Axi Ethernet reset lines, this in turn resets the Axi |
---|
474 | 541 | * Ethernet core. No separate hardware reset is done for the Axi Ethernet |
---|
475 | 542 | * core. |
---|
| 543 | + * Returns 0 on success or a negative error number otherwise. |
---|
476 | 544 | */ |
---|
477 | | -static void axienet_device_reset(struct net_device *ndev) |
---|
| 545 | +static int axienet_device_reset(struct net_device *ndev) |
---|
478 | 546 | { |
---|
479 | 547 | u32 axienet_status; |
---|
480 | 548 | struct axienet_local *lp = netdev_priv(ndev); |
---|
| 549 | + int ret; |
---|
481 | 550 | |
---|
482 | | - __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET); |
---|
483 | | - __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET); |
---|
| 551 | + ret = __axienet_device_reset(lp); |
---|
| 552 | + if (ret) |
---|
| 553 | + return ret; |
---|
484 | 554 | |
---|
485 | 555 | lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; |
---|
486 | 556 | lp->options |= XAE_OPTION_VLAN; |
---|
.. | .. |
---|
495 | 565 | lp->options |= XAE_OPTION_JUMBO; |
---|
496 | 566 | } |
---|
497 | 567 | |
---|
498 | | - if (axienet_dma_bd_init(ndev)) { |
---|
| 568 | + ret = axienet_dma_bd_init(ndev); |
---|
| 569 | + if (ret) { |
---|
499 | 570 | netdev_err(ndev, "%s: descriptor allocation failed\n", |
---|
500 | 571 | __func__); |
---|
| 572 | + return ret; |
---|
501 | 573 | } |
---|
502 | 574 | |
---|
503 | 575 | axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); |
---|
.. | .. |
---|
507 | 579 | axienet_status = axienet_ior(lp, XAE_IP_OFFSET); |
---|
508 | 580 | if (axienet_status & XAE_INT_RXRJECT_MASK) |
---|
509 | 581 | axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); |
---|
| 582 | + axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? |
---|
| 583 | + XAE_INT_RECV_ERROR_MASK : 0); |
---|
510 | 584 | |
---|
511 | 585 | axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); |
---|
512 | 586 | |
---|
.. | .. |
---|
520 | 594 | axienet_setoptions(ndev, lp->options); |
---|
521 | 595 | |
---|
522 | 596 | netif_trans_update(ndev); |
---|
| 597 | + |
---|
| 598 | + return 0; |
---|
523 | 599 | } |
---|
524 | 600 | |
---|
525 | 601 | /** |
---|
526 | | - * axienet_adjust_link - Adjust the PHY link speed/duplex. |
---|
| 602 | + * axienet_free_tx_chain - Clean up a series of linked TX descriptors. |
---|
527 | 603 | * @ndev: Pointer to the net_device structure |
---|
| 604 | + * @first_bd: Index of first descriptor to clean up |
---|
| 605 | + * @nr_bds: Number of descriptors to clean up, can be -1 if unknown. |
---|
| 606 | + * @sizep: Pointer to a u32 filled with the total sum of all bytes |
---|
| 607 | + * in all cleaned-up descriptors. Ignored if NULL. |
---|
528 | 608 | * |
---|
529 | | - * This function is called to change the speed and duplex setting after |
---|
530 | | - * auto negotiation is done by the PHY. This is the function that gets |
---|
531 | | - * registered with the PHY interface through the "of_phy_connect" call. |
---|
| 609 | + * Would either be called after a successful transmit operation, or after |
---|
| 610 | + * there was an error when setting up the chain. |
---|
| 611 | + * Returns the number of descriptors handled. |
---|
532 | 612 | */ |
---|
533 | | -static void axienet_adjust_link(struct net_device *ndev) |
---|
| 613 | +static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, |
---|
| 614 | + int nr_bds, u32 *sizep) |
---|
534 | 615 | { |
---|
535 | | - u32 emmc_reg; |
---|
536 | | - u32 link_state; |
---|
537 | | - u32 setspeed = 1; |
---|
538 | | - struct axienet_local *lp = netdev_priv(ndev); |
---|
539 | | - struct phy_device *phy = ndev->phydev; |
---|
540 | | - |
---|
541 | | - link_state = phy->speed | (phy->duplex << 1) | phy->link; |
---|
542 | | - if (lp->last_link != link_state) { |
---|
543 | | - if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) { |
---|
544 | | - if (lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) |
---|
545 | | - setspeed = 0; |
---|
546 | | - } else { |
---|
547 | | - if ((phy->speed == SPEED_1000) && |
---|
548 | | - (lp->phy_mode == PHY_INTERFACE_MODE_MII)) |
---|
549 | | - setspeed = 0; |
---|
550 | | - } |
---|
551 | | - |
---|
552 | | - if (setspeed == 1) { |
---|
553 | | - emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); |
---|
554 | | - emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; |
---|
555 | | - |
---|
556 | | - switch (phy->speed) { |
---|
557 | | - case SPEED_1000: |
---|
558 | | - emmc_reg |= XAE_EMMC_LINKSPD_1000; |
---|
559 | | - break; |
---|
560 | | - case SPEED_100: |
---|
561 | | - emmc_reg |= XAE_EMMC_LINKSPD_100; |
---|
562 | | - break; |
---|
563 | | - case SPEED_10: |
---|
564 | | - emmc_reg |= XAE_EMMC_LINKSPD_10; |
---|
565 | | - break; |
---|
566 | | - default: |
---|
567 | | - dev_err(&ndev->dev, "Speed other than 10, 100 " |
---|
568 | | - "or 1Gbps is not supported\n"); |
---|
569 | | - break; |
---|
570 | | - } |
---|
571 | | - |
---|
572 | | - axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); |
---|
573 | | - lp->last_link = link_state; |
---|
574 | | - phy_print_status(phy); |
---|
575 | | - } else { |
---|
576 | | - netdev_err(ndev, |
---|
577 | | - "Error setting Axi Ethernet mac speed\n"); |
---|
578 | | - } |
---|
579 | | - } |
---|
580 | | -} |
---|
581 | | - |
---|
582 | | -/** |
---|
583 | | - * axienet_start_xmit_done - Invoked once a transmit is completed by the |
---|
584 | | - * Axi DMA Tx channel. |
---|
585 | | - * @ndev: Pointer to the net_device structure |
---|
586 | | - * |
---|
587 | | - * This function is invoked from the Axi DMA Tx isr to notify the completion |
---|
588 | | - * of transmit operation. It clears fields in the corresponding Tx BDs and |
---|
589 | | - * unmaps the corresponding buffer so that CPU can regain ownership of the |
---|
590 | | - * buffer. It finally invokes "netif_wake_queue" to restart transmission if |
---|
591 | | - * required. |
---|
592 | | - */ |
---|
593 | | -static void axienet_start_xmit_done(struct net_device *ndev) |
---|
594 | | -{ |
---|
595 | | - u32 size = 0; |
---|
596 | | - u32 packets = 0; |
---|
597 | 616 | struct axienet_local *lp = netdev_priv(ndev); |
---|
598 | 617 | struct axidma_bd *cur_p; |
---|
599 | | - unsigned int status = 0; |
---|
| 618 | + int max_bds = nr_bds; |
---|
| 619 | + unsigned int status; |
---|
| 620 | + dma_addr_t phys; |
---|
| 621 | + int i; |
---|
600 | 622 | |
---|
601 | | - cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; |
---|
602 | | - status = cur_p->status; |
---|
603 | | - while (status & XAXIDMA_BD_STS_COMPLETE_MASK) { |
---|
604 | | - dma_unmap_single(ndev->dev.parent, cur_p->phys, |
---|
605 | | - (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), |
---|
606 | | - DMA_TO_DEVICE); |
---|
607 | | - if (cur_p->app4) |
---|
608 | | - dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); |
---|
609 | | - /*cur_p->phys = 0;*/ |
---|
| 623 | + if (max_bds == -1) |
---|
| 624 | + max_bds = lp->tx_bd_num; |
---|
| 625 | + |
---|
| 626 | + for (i = 0; i < max_bds; i++) { |
---|
| 627 | + cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; |
---|
| 628 | + status = cur_p->status; |
---|
| 629 | + |
---|
| 630 | + /* If no number is given, clean up *all* descriptors that have |
---|
| 631 | + * been completed by the MAC. |
---|
| 632 | + */ |
---|
| 633 | + if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) |
---|
| 634 | + break; |
---|
| 635 | + |
---|
| 636 | + /* Ensure we see complete descriptor update */ |
---|
| 637 | + dma_rmb(); |
---|
| 638 | + phys = desc_get_phys_addr(lp, cur_p); |
---|
| 639 | + dma_unmap_single(ndev->dev.parent, phys, |
---|
| 640 | + (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), |
---|
| 641 | + DMA_TO_DEVICE); |
---|
| 642 | + |
---|
| 643 | + if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) |
---|
| 644 | + dev_consume_skb_irq(cur_p->skb); |
---|
| 645 | + |
---|
610 | 646 | cur_p->app0 = 0; |
---|
611 | 647 | cur_p->app1 = 0; |
---|
612 | 648 | cur_p->app2 = 0; |
---|
613 | 649 | cur_p->app4 = 0; |
---|
| 650 | + cur_p->skb = NULL; |
---|
| 651 | + /* ensure our transmit path and device don't prematurely see status cleared */ |
---|
| 652 | + wmb(); |
---|
| 653 | + cur_p->cntrl = 0; |
---|
614 | 654 | cur_p->status = 0; |
---|
615 | 655 | |
---|
616 | | - size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; |
---|
617 | | - packets++; |
---|
618 | | - |
---|
619 | | - ++lp->tx_bd_ci; |
---|
620 | | - lp->tx_bd_ci %= TX_BD_NUM; |
---|
621 | | - cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; |
---|
622 | | - status = cur_p->status; |
---|
| 656 | + if (sizep) |
---|
| 657 | + *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; |
---|
623 | 658 | } |
---|
624 | 659 | |
---|
625 | | - ndev->stats.tx_packets += packets; |
---|
626 | | - ndev->stats.tx_bytes += size; |
---|
627 | | - |
---|
628 | | - /* Matches barrier in axienet_start_xmit */ |
---|
629 | | - smp_mb(); |
---|
630 | | - |
---|
631 | | - netif_wake_queue(ndev); |
---|
| 660 | + return i; |
---|
632 | 661 | } |
---|
633 | 662 | |
---|
634 | 663 | /** |
---|
.. | .. |
---|
648 | 677 | int num_frag) |
---|
649 | 678 | { |
---|
650 | 679 | struct axidma_bd *cur_p; |
---|
651 | | - cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM]; |
---|
652 | | - if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK) |
---|
| 680 | + |
---|
| 681 | + /* Ensure we see all descriptor updates from device or TX IRQ path */ |
---|
| 682 | + rmb(); |
---|
| 683 | + cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num]; |
---|
| 684 | + if (cur_p->cntrl) |
---|
653 | 685 | return NETDEV_TX_BUSY; |
---|
654 | 686 | return 0; |
---|
| 687 | +} |
---|
| 688 | + |
---|
| 689 | +/** |
---|
| 690 | + * axienet_start_xmit_done - Invoked once a transmit is completed by the |
---|
| 691 | + * Axi DMA Tx channel. |
---|
| 692 | + * @ndev: Pointer to the net_device structure |
---|
| 693 | + * |
---|
| 694 | + * This function is invoked from the Axi DMA Tx isr to notify the completion |
---|
| 695 | + * of transmit operation. It clears fields in the corresponding Tx BDs and |
---|
| 696 | + * unmaps the corresponding buffer so that CPU can regain ownership of the |
---|
| 697 | + * buffer. It finally invokes "netif_wake_queue" to restart transmission if |
---|
| 698 | + * required. |
---|
| 699 | + */ |
---|
| 700 | +static void axienet_start_xmit_done(struct net_device *ndev) |
---|
| 701 | +{ |
---|
| 702 | + struct axienet_local *lp = netdev_priv(ndev); |
---|
| 703 | + u32 packets = 0; |
---|
| 704 | + u32 size = 0; |
---|
| 705 | + |
---|
| 706 | + packets = axienet_free_tx_chain(ndev, lp->tx_bd_ci, -1, &size); |
---|
| 707 | + |
---|
| 708 | + lp->tx_bd_ci += packets; |
---|
| 709 | + if (lp->tx_bd_ci >= lp->tx_bd_num) |
---|
| 710 | + lp->tx_bd_ci -= lp->tx_bd_num; |
---|
| 711 | + |
---|
| 712 | + ndev->stats.tx_packets += packets; |
---|
| 713 | + ndev->stats.tx_bytes += size; |
---|
| 714 | + |
---|
| 715 | + /* Matches barrier in axienet_start_xmit */ |
---|
| 716 | + smp_mb(); |
---|
| 717 | + |
---|
| 718 | + if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) |
---|
| 719 | + netif_wake_queue(ndev); |
---|
655 | 720 | } |
---|
656 | 721 | |
---|
657 | 722 | /** |
---|
.. | .. |
---|
675 | 740 | u32 csum_start_off; |
---|
676 | 741 | u32 csum_index_off; |
---|
677 | 742 | skb_frag_t *frag; |
---|
678 | | - dma_addr_t tail_p; |
---|
| 743 | + dma_addr_t tail_p, phys; |
---|
679 | 744 | struct axienet_local *lp = netdev_priv(ndev); |
---|
680 | 745 | struct axidma_bd *cur_p; |
---|
| 746 | + u32 orig_tail_ptr = lp->tx_bd_tail; |
---|
681 | 747 | |
---|
682 | 748 | num_frag = skb_shinfo(skb)->nr_frags; |
---|
683 | 749 | cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; |
---|
684 | 750 | |
---|
685 | 751 | if (axienet_check_tx_bd_space(lp, num_frag + 1)) { |
---|
686 | | - if (netif_queue_stopped(ndev)) |
---|
687 | | - return NETDEV_TX_BUSY; |
---|
688 | | - |
---|
| 752 | + /* Should not happen as last start_xmit call should have |
---|
| 753 | + * checked for sufficient space and queue should only be |
---|
| 754 | + * woken when sufficient space is available. |
---|
| 755 | + */ |
---|
689 | 756 | netif_stop_queue(ndev); |
---|
690 | | - |
---|
691 | | - /* Matches barrier in axienet_start_xmit_done */ |
---|
692 | | - smp_mb(); |
---|
693 | | - |
---|
694 | | - /* Space might have just been freed - check again */ |
---|
695 | | - if (axienet_check_tx_bd_space(lp, num_frag + 1)) |
---|
696 | | - return NETDEV_TX_BUSY; |
---|
697 | | - |
---|
698 | | - netif_wake_queue(ndev); |
---|
| 757 | + if (net_ratelimit()) |
---|
| 758 | + netdev_warn(ndev, "TX ring unexpectedly full\n"); |
---|
| 759 | + return NETDEV_TX_BUSY; |
---|
699 | 760 | } |
---|
700 | 761 | |
---|
701 | 762 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
---|
.. | .. |
---|
713 | 774 | cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ |
---|
714 | 775 | } |
---|
715 | 776 | |
---|
| 777 | + phys = dma_map_single(ndev->dev.parent, skb->data, |
---|
| 778 | + skb_headlen(skb), DMA_TO_DEVICE); |
---|
| 779 | + if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) { |
---|
| 780 | + if (net_ratelimit()) |
---|
| 781 | + netdev_err(ndev, "TX DMA mapping error\n"); |
---|
| 782 | + ndev->stats.tx_dropped++; |
---|
| 783 | + return NETDEV_TX_OK; |
---|
| 784 | + } |
---|
| 785 | + desc_set_phys_addr(lp, phys, cur_p); |
---|
716 | 786 | cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; |
---|
717 | | - cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, |
---|
718 | | - skb_headlen(skb), DMA_TO_DEVICE); |
---|
719 | 787 | |
---|
720 | 788 | for (ii = 0; ii < num_frag; ii++) { |
---|
721 | | - ++lp->tx_bd_tail; |
---|
722 | | - lp->tx_bd_tail %= TX_BD_NUM; |
---|
| 789 | + if (++lp->tx_bd_tail >= lp->tx_bd_num) |
---|
| 790 | + lp->tx_bd_tail = 0; |
---|
723 | 791 | cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; |
---|
724 | 792 | frag = &skb_shinfo(skb)->frags[ii]; |
---|
725 | | - cur_p->phys = dma_map_single(ndev->dev.parent, |
---|
726 | | - skb_frag_address(frag), |
---|
727 | | - skb_frag_size(frag), |
---|
728 | | - DMA_TO_DEVICE); |
---|
| 793 | + phys = dma_map_single(ndev->dev.parent, |
---|
| 794 | + skb_frag_address(frag), |
---|
| 795 | + skb_frag_size(frag), |
---|
| 796 | + DMA_TO_DEVICE); |
---|
| 797 | + if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) { |
---|
| 798 | + if (net_ratelimit()) |
---|
| 799 | + netdev_err(ndev, "TX DMA mapping error\n"); |
---|
| 800 | + ndev->stats.tx_dropped++; |
---|
| 801 | + axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1, |
---|
| 802 | + NULL); |
---|
| 803 | + lp->tx_bd_tail = orig_tail_ptr; |
---|
| 804 | + |
---|
| 805 | + return NETDEV_TX_OK; |
---|
| 806 | + } |
---|
| 807 | + desc_set_phys_addr(lp, phys, cur_p); |
---|
729 | 808 | cur_p->cntrl = skb_frag_size(frag); |
---|
730 | 809 | } |
---|
731 | 810 | |
---|
732 | 811 | cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; |
---|
733 | | - cur_p->app4 = (unsigned long)skb; |
---|
| 812 | + cur_p->skb = skb; |
---|
734 | 813 | |
---|
735 | 814 | tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; |
---|
736 | 815 | /* Start the transfer */ |
---|
737 | | - axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); |
---|
738 | | - ++lp->tx_bd_tail; |
---|
739 | | - lp->tx_bd_tail %= TX_BD_NUM; |
---|
| 816 | + axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); |
---|
| 817 | + if (++lp->tx_bd_tail >= lp->tx_bd_num) |
---|
| 818 | + lp->tx_bd_tail = 0; |
---|
| 819 | + |
---|
| 820 | + /* Stop queue if next transmit may not have space */ |
---|
| 821 | + if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { |
---|
| 822 | + netif_stop_queue(ndev); |
---|
| 823 | + |
---|
| 824 | + /* Matches barrier in axienet_start_xmit_done */ |
---|
| 825 | + smp_mb(); |
---|
| 826 | + |
---|
| 827 | + /* Space might have just been freed - check again */ |
---|
| 828 | + if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) |
---|
| 829 | + netif_wake_queue(ndev); |
---|
| 830 | + } |
---|
740 | 831 | |
---|
741 | 832 | return NETDEV_TX_OK; |
---|
742 | 833 | } |
---|
.. | .. |
---|
764 | 855 | cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; |
---|
765 | 856 | |
---|
766 | 857 | while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { |
---|
767 | | - tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; |
---|
768 | | - skb = (struct sk_buff *) (cur_p->sw_id_offset); |
---|
769 | | - length = cur_p->app4 & 0x0000FFFF; |
---|
| 858 | + dma_addr_t phys; |
---|
770 | 859 | |
---|
771 | | - dma_unmap_single(ndev->dev.parent, cur_p->phys, |
---|
772 | | - lp->max_frm_size, |
---|
773 | | - DMA_FROM_DEVICE); |
---|
| 860 | + /* Ensure we see complete descriptor update */ |
---|
| 861 | + dma_rmb(); |
---|
774 | 862 | |
---|
775 | | - skb_put(skb, length); |
---|
776 | | - skb->protocol = eth_type_trans(skb, ndev); |
---|
777 | | - /*skb_checksum_none_assert(skb);*/ |
---|
778 | | - skb->ip_summed = CHECKSUM_NONE; |
---|
| 863 | + skb = cur_p->skb; |
---|
| 864 | + cur_p->skb = NULL; |
---|
779 | 865 | |
---|
780 | | - /* if we're doing Rx csum offload, set it up */ |
---|
781 | | - if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { |
---|
782 | | - csumstatus = (cur_p->app2 & |
---|
783 | | - XAE_FULL_CSUM_STATUS_MASK) >> 3; |
---|
784 | | - if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) || |
---|
785 | | - (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) { |
---|
786 | | - skb->ip_summed = CHECKSUM_UNNECESSARY; |
---|
| 866 | + /* skb could be NULL if a previous pass already received the |
---|
| 867 | + * packet for this slot in the ring, but failed to refill it |
---|
| 868 | + * with a newly allocated buffer. In this case, don't try to |
---|
| 869 | + * receive it again. |
---|
| 870 | + */ |
---|
| 871 | + if (likely(skb)) { |
---|
| 872 | + length = cur_p->app4 & 0x0000FFFF; |
---|
| 873 | + |
---|
| 874 | + phys = desc_get_phys_addr(lp, cur_p); |
---|
| 875 | + dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size, |
---|
| 876 | + DMA_FROM_DEVICE); |
---|
| 877 | + |
---|
| 878 | + skb_put(skb, length); |
---|
| 879 | + skb->protocol = eth_type_trans(skb, ndev); |
---|
| 880 | + /*skb_checksum_none_assert(skb);*/ |
---|
| 881 | + skb->ip_summed = CHECKSUM_NONE; |
---|
| 882 | + |
---|
| 883 | + /* if we're doing Rx csum offload, set it up */ |
---|
| 884 | + if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { |
---|
| 885 | + csumstatus = (cur_p->app2 & |
---|
| 886 | + XAE_FULL_CSUM_STATUS_MASK) >> 3; |
---|
| 887 | + if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED || |
---|
| 888 | + csumstatus == XAE_IP_UDP_CSUM_VALIDATED) { |
---|
| 889 | + skb->ip_summed = CHECKSUM_UNNECESSARY; |
---|
| 890 | + } |
---|
| 891 | + } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && |
---|
| 892 | + skb->protocol == htons(ETH_P_IP) && |
---|
| 893 | + skb->len > 64) { |
---|
| 894 | + skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); |
---|
| 895 | + skb->ip_summed = CHECKSUM_COMPLETE; |
---|
787 | 896 | } |
---|
788 | | - } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && |
---|
789 | | - skb->protocol == htons(ETH_P_IP) && |
---|
790 | | - skb->len > 64) { |
---|
791 | | - skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); |
---|
792 | | - skb->ip_summed = CHECKSUM_COMPLETE; |
---|
| 897 | + |
---|
| 898 | + netif_rx(skb); |
---|
| 899 | + |
---|
| 900 | + size += length; |
---|
| 901 | + packets++; |
---|
793 | 902 | } |
---|
794 | | - |
---|
795 | | - netif_rx(skb); |
---|
796 | | - |
---|
797 | | - size += length; |
---|
798 | | - packets++; |
---|
799 | 903 | |
---|
800 | 904 | new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); |
---|
801 | 905 | if (!new_skb) |
---|
802 | | - return; |
---|
| 906 | + break; |
---|
803 | 907 | |
---|
804 | | - cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, |
---|
805 | | - lp->max_frm_size, |
---|
806 | | - DMA_FROM_DEVICE); |
---|
| 908 | + phys = dma_map_single(ndev->dev.parent, new_skb->data, |
---|
| 909 | + lp->max_frm_size, |
---|
| 910 | + DMA_FROM_DEVICE); |
---|
| 911 | + if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) { |
---|
| 912 | + if (net_ratelimit()) |
---|
| 913 | + netdev_err(ndev, "RX DMA mapping error\n"); |
---|
| 914 | + dev_kfree_skb(new_skb); |
---|
| 915 | + break; |
---|
| 916 | + } |
---|
| 917 | + desc_set_phys_addr(lp, phys, cur_p); |
---|
| 918 | + |
---|
807 | 919 | cur_p->cntrl = lp->max_frm_size; |
---|
808 | 920 | cur_p->status = 0; |
---|
809 | | - cur_p->sw_id_offset = (u32) new_skb; |
---|
| 921 | + cur_p->skb = new_skb; |
---|
810 | 922 | |
---|
811 | | - ++lp->rx_bd_ci; |
---|
812 | | - lp->rx_bd_ci %= RX_BD_NUM; |
---|
| 923 | + /* Only update tail_p to mark this slot as usable after it has |
---|
| 924 | + * been successfully refilled. |
---|
| 925 | + */ |
---|
| 926 | + tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; |
---|
| 927 | + |
---|
| 928 | + if (++lp->rx_bd_ci >= lp->rx_bd_num) |
---|
| 929 | + lp->rx_bd_ci = 0; |
---|
813 | 930 | cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; |
---|
814 | 931 | } |
---|
815 | 932 | |
---|
.. | .. |
---|
817 | 934 | ndev->stats.rx_bytes += size; |
---|
818 | 935 | |
---|
819 | 936 | if (tail_p) |
---|
820 | | - axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); |
---|
| 937 | + axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); |
---|
821 | 938 | } |
---|
822 | 939 | |
---|
823 | 940 | /** |
---|
.. | .. |
---|
825 | 942 | * @irq: irq number |
---|
826 | 943 | * @_ndev: net_device pointer |
---|
827 | 944 | * |
---|
828 | | - * Return: IRQ_HANDLED for all cases. |
---|
| 945 | + * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise. |
---|
829 | 946 | * |
---|
830 | 947 | * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done" |
---|
831 | 948 | * to complete the BD processing. |
---|
.. | .. |
---|
844 | 961 | goto out; |
---|
845 | 962 | } |
---|
846 | 963 | if (!(status & XAXIDMA_IRQ_ALL_MASK)) |
---|
847 | | - dev_err(&ndev->dev, "No interrupts asserted in Tx path\n"); |
---|
| 964 | + return IRQ_NONE; |
---|
848 | 965 | if (status & XAXIDMA_IRQ_ERROR_MASK) { |
---|
849 | 966 | dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status); |
---|
850 | | - dev_err(&ndev->dev, "Current BD is at: 0x%x\n", |
---|
| 967 | + dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n", |
---|
| 968 | + (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb, |
---|
851 | 969 | (lp->tx_bd_v[lp->tx_bd_ci]).phys); |
---|
852 | 970 | |
---|
853 | 971 | cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); |
---|
.. | .. |
---|
862 | 980 | /* Write to the Rx channel control register */ |
---|
863 | 981 | axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); |
---|
864 | 982 | |
---|
865 | | - tasklet_schedule(&lp->dma_err_tasklet); |
---|
| 983 | + schedule_work(&lp->dma_err_task); |
---|
866 | 984 | axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); |
---|
867 | 985 | } |
---|
868 | 986 | out: |
---|
.. | .. |
---|
874 | 992 | * @irq: irq number |
---|
875 | 993 | * @_ndev: net_device pointer |
---|
876 | 994 | * |
---|
877 | | - * Return: IRQ_HANDLED for all cases. |
---|
| 995 | + * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise. |
---|
878 | 996 | * |
---|
879 | 997 | * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD |
---|
880 | 998 | * processing. |
---|
.. | .. |
---|
893 | 1011 | goto out; |
---|
894 | 1012 | } |
---|
895 | 1013 | if (!(status & XAXIDMA_IRQ_ALL_MASK)) |
---|
896 | | - dev_err(&ndev->dev, "No interrupts asserted in Rx path\n"); |
---|
| 1014 | + return IRQ_NONE; |
---|
897 | 1015 | if (status & XAXIDMA_IRQ_ERROR_MASK) { |
---|
898 | 1016 | dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status); |
---|
899 | | - dev_err(&ndev->dev, "Current BD is at: 0x%x\n", |
---|
| 1017 | + dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n", |
---|
| 1018 | + (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb, |
---|
900 | 1019 | (lp->rx_bd_v[lp->rx_bd_ci]).phys); |
---|
901 | 1020 | |
---|
902 | 1021 | cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); |
---|
.. | .. |
---|
911 | 1030 | /* write to the Rx channel control register */ |
---|
912 | 1031 | axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); |
---|
913 | 1032 | |
---|
914 | | - tasklet_schedule(&lp->dma_err_tasklet); |
---|
| 1033 | + schedule_work(&lp->dma_err_task); |
---|
915 | 1034 | axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); |
---|
916 | 1035 | } |
---|
917 | 1036 | out: |
---|
918 | 1037 | return IRQ_HANDLED; |
---|
919 | 1038 | } |
---|
920 | 1039 | |
---|
921 | | -static void axienet_dma_err_handler(unsigned long data); |
---|
| 1040 | +/** |
---|
| 1041 | + * axienet_eth_irq - Ethernet core Isr. |
---|
| 1042 | + * @irq: irq number |
---|
| 1043 | + * @_ndev: net_device pointer |
---|
| 1044 | + * |
---|
| 1045 | + * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise. |
---|
| 1046 | + * |
---|
| 1047 | + * Handle miscellaneous conditions indicated by Ethernet core IRQ. |
---|
| 1048 | + */ |
---|
| 1049 | +static irqreturn_t axienet_eth_irq(int irq, void *_ndev) |
---|
| 1050 | +{ |
---|
| 1051 | + struct net_device *ndev = _ndev; |
---|
| 1052 | + struct axienet_local *lp = netdev_priv(ndev); |
---|
| 1053 | + unsigned int pending; |
---|
| 1054 | + |
---|
| 1055 | + pending = axienet_ior(lp, XAE_IP_OFFSET); |
---|
| 1056 | + if (!pending) |
---|
| 1057 | + return IRQ_NONE; |
---|
| 1058 | + |
---|
| 1059 | + if (pending & XAE_INT_RXFIFOOVR_MASK) |
---|
| 1060 | + ndev->stats.rx_missed_errors++; |
---|
| 1061 | + |
---|
| 1062 | + if (pending & XAE_INT_RXRJECT_MASK) |
---|
| 1063 | + ndev->stats.rx_frame_errors++; |
---|
| 1064 | + |
---|
| 1065 | + axienet_iow(lp, XAE_IS_OFFSET, pending); |
---|
| 1066 | + return IRQ_HANDLED; |
---|
| 1067 | +} |
---|
| 1068 | + |
---|
| 1069 | +static void axienet_dma_err_handler(struct work_struct *work); |
---|
922 | 1070 | |
---|
923 | 1071 | /** |
---|
924 | 1072 | * axienet_open - Driver open routine. |
---|
.. | .. |
---|
927 | 1075 | * Return: 0, on success. |
---|
928 | 1076 | * non-zero error value on failure |
---|
929 | 1077 | * |
---|
930 | | - * This is the driver open routine. It calls phy_start to start the PHY device. |
---|
| 1078 | + * This is the driver open routine. It calls phylink_start to start the |
---|
| 1079 | + * PHY device. |
---|
931 | 1080 | * It also allocates interrupt service routines, enables the interrupt lines |
---|
932 | 1081 | * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer |
---|
933 | 1082 | * descriptors are initialized. |
---|
934 | 1083 | */ |
---|
935 | 1084 | static int axienet_open(struct net_device *ndev) |
---|
936 | 1085 | { |
---|
937 | | - int ret, mdio_mcreg; |
---|
| 1086 | + int ret; |
---|
938 | 1087 | struct axienet_local *lp = netdev_priv(ndev); |
---|
939 | | - struct phy_device *phydev = NULL; |
---|
940 | 1088 | |
---|
941 | 1089 | dev_dbg(&ndev->dev, "axienet_open()\n"); |
---|
942 | 1090 | |
---|
943 | | - mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET); |
---|
944 | | - ret = axienet_mdio_wait_until_ready(lp); |
---|
945 | | - if (ret < 0) |
---|
946 | | - return ret; |
---|
947 | 1091 | /* Disable the MDIO interface till Axi Ethernet Reset is completed. |
---|
948 | 1092 | * When we do an Axi Ethernet reset, it resets the complete core |
---|
949 | | - * including the MDIO. If MDIO is not disabled when the reset |
---|
950 | | - * process is started, MDIO will be broken afterwards. |
---|
| 1093 | + * including the MDIO. MDIO must be disabled before resetting |
---|
| 1094 | + * and re-enabled afterwards. |
---|
| 1095 | + * Hold MDIO bus lock to avoid MDIO accesses during the reset. |
---|
951 | 1096 | */ |
---|
952 | | - axienet_iow(lp, XAE_MDIO_MC_OFFSET, |
---|
953 | | - (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK))); |
---|
954 | | - axienet_device_reset(ndev); |
---|
955 | | - /* Enable the MDIO */ |
---|
956 | | - axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg); |
---|
957 | | - ret = axienet_mdio_wait_until_ready(lp); |
---|
| 1097 | + mutex_lock(&lp->mii_bus->mdio_lock); |
---|
| 1098 | + axienet_mdio_disable(lp); |
---|
| 1099 | + ret = axienet_device_reset(ndev); |
---|
| 1100 | + if (ret == 0) |
---|
| 1101 | + ret = axienet_mdio_enable(lp); |
---|
| 1102 | + mutex_unlock(&lp->mii_bus->mdio_lock); |
---|
958 | 1103 | if (ret < 0) |
---|
959 | 1104 | return ret; |
---|
960 | 1105 | |
---|
961 | | - if (lp->phy_node) { |
---|
962 | | - phydev = of_phy_connect(lp->ndev, lp->phy_node, |
---|
963 | | - axienet_adjust_link, 0, lp->phy_mode); |
---|
964 | | - |
---|
965 | | - if (!phydev) |
---|
966 | | - dev_err(lp->dev, "of_phy_connect() failed\n"); |
---|
967 | | - else |
---|
968 | | - phy_start(phydev); |
---|
| 1106 | + ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); |
---|
| 1107 | + if (ret) { |
---|
| 1108 | + dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret); |
---|
| 1109 | + return ret; |
---|
969 | 1110 | } |
---|
970 | 1111 | |
---|
971 | | - /* Enable tasklets for Axi DMA error handling */ |
---|
972 | | - tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler, |
---|
973 | | - (unsigned long) lp); |
---|
| 1112 | + phylink_start(lp->phylink); |
---|
| 1113 | + |
---|
| 1114 | + /* Enable worker thread for Axi DMA error handling */ |
---|
| 1115 | + INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); |
---|
974 | 1116 | |
---|
975 | 1117 | /* Enable interrupts for Axi DMA Tx */ |
---|
976 | | - ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev); |
---|
| 1118 | + ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, |
---|
| 1119 | + ndev->name, ndev); |
---|
977 | 1120 | if (ret) |
---|
978 | 1121 | goto err_tx_irq; |
---|
979 | 1122 | /* Enable interrupts for Axi DMA Rx */ |
---|
980 | | - ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev); |
---|
| 1123 | + ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED, |
---|
| 1124 | + ndev->name, ndev); |
---|
981 | 1125 | if (ret) |
---|
982 | 1126 | goto err_rx_irq; |
---|
| 1127 | + /* Enable interrupts for Axi Ethernet core (if defined) */ |
---|
| 1128 | + if (lp->eth_irq > 0) { |
---|
| 1129 | + ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, |
---|
| 1130 | + ndev->name, ndev); |
---|
| 1131 | + if (ret) |
---|
| 1132 | + goto err_eth_irq; |
---|
| 1133 | + } |
---|
983 | 1134 | |
---|
984 | 1135 | return 0; |
---|
985 | 1136 | |
---|
| 1137 | +err_eth_irq: |
---|
| 1138 | + free_irq(lp->rx_irq, ndev); |
---|
986 | 1139 | err_rx_irq: |
---|
987 | 1140 | free_irq(lp->tx_irq, ndev); |
---|
988 | 1141 | err_tx_irq: |
---|
989 | | - if (phydev) |
---|
990 | | - phy_disconnect(phydev); |
---|
991 | | - tasklet_kill(&lp->dma_err_tasklet); |
---|
| 1142 | + phylink_stop(lp->phylink); |
---|
| 1143 | + phylink_disconnect_phy(lp->phylink); |
---|
| 1144 | + cancel_work_sync(&lp->dma_err_task); |
---|
992 | 1145 | dev_err(lp->dev, "request_irq() failed\n"); |
---|
993 | 1146 | return ret; |
---|
994 | 1147 | } |
---|
.. | .. |
---|
999 | 1152 | * |
---|
1000 | 1153 | * Return: 0, on success. |
---|
1001 | 1154 | * |
---|
1002 | | - * This is the driver stop routine. It calls phy_disconnect to stop the PHY |
---|
| 1155 | + * This is the driver stop routine. It calls phylink_disconnect to stop the PHY |
---|
1003 | 1156 | * device. It also removes the interrupt handlers and disables the interrupts. |
---|
1004 | 1157 | * The Axi DMA Tx/Rx BDs are released. |
---|
1005 | 1158 | */ |
---|
1006 | 1159 | static int axienet_stop(struct net_device *ndev) |
---|
1007 | 1160 | { |
---|
1008 | | - u32 cr; |
---|
| 1161 | + u32 cr, sr; |
---|
| 1162 | + int count; |
---|
1009 | 1163 | struct axienet_local *lp = netdev_priv(ndev); |
---|
1010 | 1164 | |
---|
1011 | 1165 | dev_dbg(&ndev->dev, "axienet_close()\n"); |
---|
1012 | 1166 | |
---|
1013 | | - cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); |
---|
1014 | | - axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, |
---|
1015 | | - cr & (~XAXIDMA_CR_RUNSTOP_MASK)); |
---|
1016 | | - cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); |
---|
1017 | | - axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, |
---|
1018 | | - cr & (~XAXIDMA_CR_RUNSTOP_MASK)); |
---|
| 1167 | + phylink_stop(lp->phylink); |
---|
| 1168 | + phylink_disconnect_phy(lp->phylink); |
---|
| 1169 | + |
---|
1019 | 1170 | axienet_setoptions(ndev, lp->options & |
---|
1020 | 1171 | ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); |
---|
1021 | 1172 | |
---|
1022 | | - tasklet_kill(&lp->dma_err_tasklet); |
---|
| 1173 | + cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); |
---|
| 1174 | + cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); |
---|
| 1175 | + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); |
---|
1023 | 1176 | |
---|
| 1177 | + cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); |
---|
| 1178 | + cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); |
---|
| 1179 | + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); |
---|
| 1180 | + |
---|
| 1181 | + axienet_iow(lp, XAE_IE_OFFSET, 0); |
---|
| 1182 | + |
---|
| 1183 | + /* Give DMAs a chance to halt gracefully */ |
---|
| 1184 | + sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); |
---|
| 1185 | + for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { |
---|
| 1186 | + msleep(20); |
---|
| 1187 | + sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); |
---|
| 1188 | + } |
---|
| 1189 | + |
---|
| 1190 | + sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); |
---|
| 1191 | + for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { |
---|
| 1192 | + msleep(20); |
---|
| 1193 | + sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); |
---|
| 1194 | + } |
---|
| 1195 | + |
---|
| 1196 | + /* Do a reset to ensure DMA is really stopped */ |
---|
| 1197 | + mutex_lock(&lp->mii_bus->mdio_lock); |
---|
| 1198 | + axienet_mdio_disable(lp); |
---|
| 1199 | + __axienet_device_reset(lp); |
---|
| 1200 | + axienet_mdio_enable(lp); |
---|
| 1201 | + mutex_unlock(&lp->mii_bus->mdio_lock); |
---|
| 1202 | + |
---|
| 1203 | + cancel_work_sync(&lp->dma_err_task); |
---|
| 1204 | + |
---|
| 1205 | + if (lp->eth_irq > 0) |
---|
| 1206 | + free_irq(lp->eth_irq, ndev); |
---|
1024 | 1207 | free_irq(lp->tx_irq, ndev); |
---|
1025 | 1208 | free_irq(lp->rx_irq, ndev); |
---|
1026 | | - |
---|
1027 | | - if (ndev->phydev) |
---|
1028 | | - phy_disconnect(ndev->phydev); |
---|
1029 | 1209 | |
---|
1030 | 1210 | axienet_dma_bd_release(ndev); |
---|
1031 | 1211 | return 0; |
---|
.. | .. |
---|
1078 | 1258 | } |
---|
1079 | 1259 | #endif |
---|
1080 | 1260 | |
---|
| 1261 | +static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
---|
| 1262 | +{ |
---|
| 1263 | + struct axienet_local *lp = netdev_priv(dev); |
---|
| 1264 | + |
---|
| 1265 | + if (!netif_running(dev)) |
---|
| 1266 | + return -EINVAL; |
---|
| 1267 | + |
---|
| 1268 | + return phylink_mii_ioctl(lp->phylink, rq, cmd); |
---|
| 1269 | +} |
---|
| 1270 | + |
---|
1081 | 1271 | static const struct net_device_ops axienet_netdev_ops = { |
---|
1082 | 1272 | .ndo_open = axienet_open, |
---|
1083 | 1273 | .ndo_stop = axienet_stop, |
---|
.. | .. |
---|
1085 | 1275 | .ndo_change_mtu = axienet_change_mtu, |
---|
1086 | 1276 | .ndo_set_mac_address = netdev_set_mac_address, |
---|
1087 | 1277 | .ndo_validate_addr = eth_validate_addr, |
---|
| 1278 | + .ndo_do_ioctl = axienet_ioctl, |
---|
1088 | 1279 | .ndo_set_rx_mode = axienet_set_multicast_list, |
---|
1089 | 1280 | #ifdef CONFIG_NET_POLL_CONTROLLER |
---|
1090 | 1281 | .ndo_poll_controller = axienet_poll_controller, |
---|
.. | .. |
---|
1165 | 1356 | data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); |
---|
1166 | 1357 | data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); |
---|
1167 | 1358 | data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); |
---|
1168 | | - data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET); |
---|
1169 | | - data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET); |
---|
1170 | | - data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET); |
---|
1171 | | - data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET); |
---|
1172 | 1359 | data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); |
---|
1173 | 1360 | data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); |
---|
1174 | 1361 | data[29] = axienet_ior(lp, XAE_FMI_OFFSET); |
---|
1175 | 1362 | data[30] = axienet_ior(lp, XAE_AF0_OFFSET); |
---|
1176 | 1363 | data[31] = axienet_ior(lp, XAE_AF1_OFFSET); |
---|
| 1364 | + data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); |
---|
| 1365 | + data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); |
---|
| 1366 | + data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET); |
---|
| 1367 | + data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET); |
---|
| 1368 | + data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); |
---|
| 1369 | + data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); |
---|
| 1370 | + data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET); |
---|
| 1371 | + data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET); |
---|
| 1372 | +} |
---|
| 1373 | + |
---|
| 1374 | +static void axienet_ethtools_get_ringparam(struct net_device *ndev, |
---|
| 1375 | + struct ethtool_ringparam *ering) |
---|
| 1376 | +{ |
---|
| 1377 | + struct axienet_local *lp = netdev_priv(ndev); |
---|
| 1378 | + |
---|
| 1379 | + ering->rx_max_pending = RX_BD_NUM_MAX; |
---|
| 1380 | + ering->rx_mini_max_pending = 0; |
---|
| 1381 | + ering->rx_jumbo_max_pending = 0; |
---|
| 1382 | + ering->tx_max_pending = TX_BD_NUM_MAX; |
---|
| 1383 | + ering->rx_pending = lp->rx_bd_num; |
---|
| 1384 | + ering->rx_mini_pending = 0; |
---|
| 1385 | + ering->rx_jumbo_pending = 0; |
---|
| 1386 | + ering->tx_pending = lp->tx_bd_num; |
---|
| 1387 | +} |
---|
| 1388 | + |
---|
| 1389 | +static int axienet_ethtools_set_ringparam(struct net_device *ndev, |
---|
| 1390 | + struct ethtool_ringparam *ering) |
---|
| 1391 | +{ |
---|
| 1392 | + struct axienet_local *lp = netdev_priv(ndev); |
---|
| 1393 | + |
---|
| 1394 | + if (ering->rx_pending > RX_BD_NUM_MAX || |
---|
| 1395 | + ering->rx_mini_pending || |
---|
| 1396 | + ering->rx_jumbo_pending || |
---|
| 1397 | + ering->tx_pending < TX_BD_NUM_MIN || |
---|
| 1398 | + ering->tx_pending > TX_BD_NUM_MAX) |
---|
| 1399 | + return -EINVAL; |
---|
| 1400 | + |
---|
| 1401 | + if (netif_running(ndev)) |
---|
| 1402 | + return -EBUSY; |
---|
| 1403 | + |
---|
| 1404 | + lp->rx_bd_num = ering->rx_pending; |
---|
| 1405 | + lp->tx_bd_num = ering->tx_pending; |
---|
| 1406 | + return 0; |
---|
1177 | 1407 | } |
---|
1178 | 1408 | |
---|
1179 | 1409 | /** |
---|
.. | .. |
---|
1189 | 1419 | axienet_ethtools_get_pauseparam(struct net_device *ndev, |
---|
1190 | 1420 | struct ethtool_pauseparam *epauseparm) |
---|
1191 | 1421 | { |
---|
1192 | | - u32 regval; |
---|
1193 | 1422 | struct axienet_local *lp = netdev_priv(ndev); |
---|
1194 | | - epauseparm->autoneg = 0; |
---|
1195 | | - regval = axienet_ior(lp, XAE_FCC_OFFSET); |
---|
1196 | | - epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK; |
---|
1197 | | - epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK; |
---|
| 1423 | + |
---|
| 1424 | + phylink_ethtool_get_pauseparam(lp->phylink, epauseparm); |
---|
1198 | 1425 | } |
---|
1199 | 1426 | |
---|
1200 | 1427 | /** |
---|
.. | .. |
---|
1213 | 1440 | axienet_ethtools_set_pauseparam(struct net_device *ndev, |
---|
1214 | 1441 | struct ethtool_pauseparam *epauseparm) |
---|
1215 | 1442 | { |
---|
1216 | | - u32 regval = 0; |
---|
1217 | 1443 | struct axienet_local *lp = netdev_priv(ndev); |
---|
1218 | 1444 | |
---|
1219 | | - if (netif_running(ndev)) { |
---|
1220 | | - netdev_err(ndev, |
---|
1221 | | - "Please stop netif before applying configuration\n"); |
---|
1222 | | - return -EFAULT; |
---|
1223 | | - } |
---|
1224 | | - |
---|
1225 | | - regval = axienet_ior(lp, XAE_FCC_OFFSET); |
---|
1226 | | - if (epauseparm->tx_pause) |
---|
1227 | | - regval |= XAE_FCC_FCTX_MASK; |
---|
1228 | | - else |
---|
1229 | | - regval &= ~XAE_FCC_FCTX_MASK; |
---|
1230 | | - if (epauseparm->rx_pause) |
---|
1231 | | - regval |= XAE_FCC_FCRX_MASK; |
---|
1232 | | - else |
---|
1233 | | - regval &= ~XAE_FCC_FCRX_MASK; |
---|
1234 | | - axienet_iow(lp, XAE_FCC_OFFSET, regval); |
---|
1235 | | - |
---|
1236 | | - return 0; |
---|
| 1445 | + return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm); |
---|
1237 | 1446 | } |
---|
1238 | 1447 | |
---|
1239 | 1448 | /** |
---|
.. | .. |
---|
1283 | 1492 | return -EFAULT; |
---|
1284 | 1493 | } |
---|
1285 | 1494 | |
---|
1286 | | - if ((ecoalesce->rx_coalesce_usecs) || |
---|
1287 | | - (ecoalesce->rx_coalesce_usecs_irq) || |
---|
1288 | | - (ecoalesce->rx_max_coalesced_frames_irq) || |
---|
1289 | | - (ecoalesce->tx_coalesce_usecs) || |
---|
1290 | | - (ecoalesce->tx_coalesce_usecs_irq) || |
---|
1291 | | - (ecoalesce->tx_max_coalesced_frames_irq) || |
---|
1292 | | - (ecoalesce->stats_block_coalesce_usecs) || |
---|
1293 | | - (ecoalesce->use_adaptive_rx_coalesce) || |
---|
1294 | | - (ecoalesce->use_adaptive_tx_coalesce) || |
---|
1295 | | - (ecoalesce->pkt_rate_low) || |
---|
1296 | | - (ecoalesce->rx_coalesce_usecs_low) || |
---|
1297 | | - (ecoalesce->rx_max_coalesced_frames_low) || |
---|
1298 | | - (ecoalesce->tx_coalesce_usecs_low) || |
---|
1299 | | - (ecoalesce->tx_max_coalesced_frames_low) || |
---|
1300 | | - (ecoalesce->pkt_rate_high) || |
---|
1301 | | - (ecoalesce->rx_coalesce_usecs_high) || |
---|
1302 | | - (ecoalesce->rx_max_coalesced_frames_high) || |
---|
1303 | | - (ecoalesce->tx_coalesce_usecs_high) || |
---|
1304 | | - (ecoalesce->tx_max_coalesced_frames_high) || |
---|
1305 | | - (ecoalesce->rate_sample_interval)) |
---|
1306 | | - return -EOPNOTSUPP; |
---|
1307 | 1495 | if (ecoalesce->rx_max_coalesced_frames) |
---|
1308 | 1496 | lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; |
---|
1309 | 1497 | if (ecoalesce->tx_max_coalesced_frames) |
---|
.. | .. |
---|
1312 | 1500 | return 0; |
---|
1313 | 1501 | } |
---|
1314 | 1502 | |
---|
| 1503 | +static int |
---|
| 1504 | +axienet_ethtools_get_link_ksettings(struct net_device *ndev, |
---|
| 1505 | + struct ethtool_link_ksettings *cmd) |
---|
| 1506 | +{ |
---|
| 1507 | + struct axienet_local *lp = netdev_priv(ndev); |
---|
| 1508 | + |
---|
| 1509 | + return phylink_ethtool_ksettings_get(lp->phylink, cmd); |
---|
| 1510 | +} |
---|
| 1511 | + |
---|
| 1512 | +static int |
---|
| 1513 | +axienet_ethtools_set_link_ksettings(struct net_device *ndev, |
---|
| 1514 | + const struct ethtool_link_ksettings *cmd) |
---|
| 1515 | +{ |
---|
| 1516 | + struct axienet_local *lp = netdev_priv(ndev); |
---|
| 1517 | + |
---|
| 1518 | + return phylink_ethtool_ksettings_set(lp->phylink, cmd); |
---|
| 1519 | +} |
---|
| 1520 | + |
---|
1315 | 1521 | static const struct ethtool_ops axienet_ethtool_ops = { |
---|
| 1522 | + .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES, |
---|
1316 | 1523 | .get_drvinfo = axienet_ethtools_get_drvinfo, |
---|
1317 | 1524 | .get_regs_len = axienet_ethtools_get_regs_len, |
---|
1318 | 1525 | .get_regs = axienet_ethtools_get_regs, |
---|
1319 | 1526 | .get_link = ethtool_op_get_link, |
---|
| 1527 | + .get_ringparam = axienet_ethtools_get_ringparam, |
---|
| 1528 | + .set_ringparam = axienet_ethtools_set_ringparam, |
---|
1320 | 1529 | .get_pauseparam = axienet_ethtools_get_pauseparam, |
---|
1321 | 1530 | .set_pauseparam = axienet_ethtools_set_pauseparam, |
---|
1322 | 1531 | .get_coalesce = axienet_ethtools_get_coalesce, |
---|
1323 | 1532 | .set_coalesce = axienet_ethtools_set_coalesce, |
---|
1324 | | - .get_link_ksettings = phy_ethtool_get_link_ksettings, |
---|
1325 | | - .set_link_ksettings = phy_ethtool_set_link_ksettings, |
---|
| 1533 | + .get_link_ksettings = axienet_ethtools_get_link_ksettings, |
---|
| 1534 | + .set_link_ksettings = axienet_ethtools_set_link_ksettings, |
---|
| 1535 | +}; |
---|
| 1536 | + |
---|
| 1537 | +static void axienet_validate(struct phylink_config *config, |
---|
| 1538 | + unsigned long *supported, |
---|
| 1539 | + struct phylink_link_state *state) |
---|
| 1540 | +{ |
---|
| 1541 | + struct net_device *ndev = to_net_dev(config->dev); |
---|
| 1542 | + struct axienet_local *lp = netdev_priv(ndev); |
---|
| 1543 | + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; |
---|
| 1544 | + |
---|
| 1545 | + /* Only support the mode we are configured for */ |
---|
| 1546 | + if (state->interface != PHY_INTERFACE_MODE_NA && |
---|
| 1547 | + state->interface != lp->phy_mode) { |
---|
| 1548 | + netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n", |
---|
| 1549 | + phy_modes(state->interface), |
---|
| 1550 | + phy_modes(lp->phy_mode)); |
---|
| 1551 | + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); |
---|
| 1552 | + return; |
---|
| 1553 | + } |
---|
| 1554 | + |
---|
| 1555 | + phylink_set(mask, Autoneg); |
---|
| 1556 | + phylink_set_port_modes(mask); |
---|
| 1557 | + |
---|
| 1558 | + phylink_set(mask, Asym_Pause); |
---|
| 1559 | + phylink_set(mask, Pause); |
---|
| 1560 | + |
---|
| 1561 | + switch (state->interface) { |
---|
| 1562 | + case PHY_INTERFACE_MODE_NA: |
---|
| 1563 | + case PHY_INTERFACE_MODE_1000BASEX: |
---|
| 1564 | + case PHY_INTERFACE_MODE_SGMII: |
---|
| 1565 | + case PHY_INTERFACE_MODE_GMII: |
---|
| 1566 | + case PHY_INTERFACE_MODE_RGMII: |
---|
| 1567 | + case PHY_INTERFACE_MODE_RGMII_ID: |
---|
| 1568 | + case PHY_INTERFACE_MODE_RGMII_RXID: |
---|
| 1569 | + case PHY_INTERFACE_MODE_RGMII_TXID: |
---|
| 1570 | + phylink_set(mask, 1000baseX_Full); |
---|
| 1571 | + phylink_set(mask, 1000baseT_Full); |
---|
| 1572 | + if (state->interface == PHY_INTERFACE_MODE_1000BASEX) |
---|
| 1573 | + break; |
---|
| 1574 | + fallthrough; |
---|
| 1575 | + case PHY_INTERFACE_MODE_MII: |
---|
| 1576 | + phylink_set(mask, 100baseT_Full); |
---|
| 1577 | + phylink_set(mask, 10baseT_Full); |
---|
| 1578 | + default: |
---|
| 1579 | + break; |
---|
| 1580 | + } |
---|
| 1581 | + |
---|
| 1582 | + bitmap_and(supported, supported, mask, |
---|
| 1583 | + __ETHTOOL_LINK_MODE_MASK_NBITS); |
---|
| 1584 | + bitmap_and(state->advertising, state->advertising, mask, |
---|
| 1585 | + __ETHTOOL_LINK_MODE_MASK_NBITS); |
---|
| 1586 | +} |
---|
| 1587 | + |
---|
| 1588 | +static void axienet_mac_pcs_get_state(struct phylink_config *config, |
---|
| 1589 | + struct phylink_link_state *state) |
---|
| 1590 | +{ |
---|
| 1591 | + struct net_device *ndev = to_net_dev(config->dev); |
---|
| 1592 | + struct axienet_local *lp = netdev_priv(ndev); |
---|
| 1593 | + |
---|
| 1594 | + switch (state->interface) { |
---|
| 1595 | + case PHY_INTERFACE_MODE_SGMII: |
---|
| 1596 | + case PHY_INTERFACE_MODE_1000BASEX: |
---|
| 1597 | + phylink_mii_c22_pcs_get_state(lp->pcs_phy, state); |
---|
| 1598 | + break; |
---|
| 1599 | + default: |
---|
| 1600 | + break; |
---|
| 1601 | + } |
---|
| 1602 | +} |
---|
| 1603 | + |
---|
| 1604 | +static void axienet_mac_an_restart(struct phylink_config *config) |
---|
| 1605 | +{ |
---|
| 1606 | + struct net_device *ndev = to_net_dev(config->dev); |
---|
| 1607 | + struct axienet_local *lp = netdev_priv(ndev); |
---|
| 1608 | + |
---|
| 1609 | + phylink_mii_c22_pcs_an_restart(lp->pcs_phy); |
---|
| 1610 | +} |
---|
| 1611 | + |
---|
| 1612 | +static void axienet_mac_config(struct phylink_config *config, unsigned int mode, |
---|
| 1613 | + const struct phylink_link_state *state) |
---|
| 1614 | +{ |
---|
| 1615 | + struct net_device *ndev = to_net_dev(config->dev); |
---|
| 1616 | + struct axienet_local *lp = netdev_priv(ndev); |
---|
| 1617 | + int ret; |
---|
| 1618 | + |
---|
| 1619 | + switch (state->interface) { |
---|
| 1620 | + case PHY_INTERFACE_MODE_SGMII: |
---|
| 1621 | + case PHY_INTERFACE_MODE_1000BASEX: |
---|
| 1622 | + ret = phylink_mii_c22_pcs_config(lp->pcs_phy, mode, |
---|
| 1623 | + state->interface, |
---|
| 1624 | + state->advertising); |
---|
| 1625 | + if (ret < 0) |
---|
| 1626 | + netdev_warn(ndev, "Failed to configure PCS: %d\n", |
---|
| 1627 | + ret); |
---|
| 1628 | + break; |
---|
| 1629 | + |
---|
| 1630 | + default: |
---|
| 1631 | + break; |
---|
| 1632 | + } |
---|
| 1633 | +} |
---|
| 1634 | + |
---|
| 1635 | +static void axienet_mac_link_down(struct phylink_config *config, |
---|
| 1636 | + unsigned int mode, |
---|
| 1637 | + phy_interface_t interface) |
---|
| 1638 | +{ |
---|
| 1639 | + /* nothing meaningful to do */ |
---|
| 1640 | +} |
---|
| 1641 | + |
---|
| 1642 | +static void axienet_mac_link_up(struct phylink_config *config, |
---|
| 1643 | + struct phy_device *phy, |
---|
| 1644 | + unsigned int mode, phy_interface_t interface, |
---|
| 1645 | + int speed, int duplex, |
---|
| 1646 | + bool tx_pause, bool rx_pause) |
---|
| 1647 | +{ |
---|
| 1648 | + struct net_device *ndev = to_net_dev(config->dev); |
---|
| 1649 | + struct axienet_local *lp = netdev_priv(ndev); |
---|
| 1650 | + u32 emmc_reg, fcc_reg; |
---|
| 1651 | + |
---|
| 1652 | + emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); |
---|
| 1653 | + emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; |
---|
| 1654 | + |
---|
| 1655 | + switch (speed) { |
---|
| 1656 | + case SPEED_1000: |
---|
| 1657 | + emmc_reg |= XAE_EMMC_LINKSPD_1000; |
---|
| 1658 | + break; |
---|
| 1659 | + case SPEED_100: |
---|
| 1660 | + emmc_reg |= XAE_EMMC_LINKSPD_100; |
---|
| 1661 | + break; |
---|
| 1662 | + case SPEED_10: |
---|
| 1663 | + emmc_reg |= XAE_EMMC_LINKSPD_10; |
---|
| 1664 | + break; |
---|
| 1665 | + default: |
---|
| 1666 | + dev_err(&ndev->dev, |
---|
| 1667 | + "Speed other than 10, 100 or 1Gbps is not supported\n"); |
---|
| 1668 | + break; |
---|
| 1669 | + } |
---|
| 1670 | + |
---|
| 1671 | + axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); |
---|
| 1672 | + |
---|
| 1673 | + fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET); |
---|
| 1674 | + if (tx_pause) |
---|
| 1675 | + fcc_reg |= XAE_FCC_FCTX_MASK; |
---|
| 1676 | + else |
---|
| 1677 | + fcc_reg &= ~XAE_FCC_FCTX_MASK; |
---|
| 1678 | + if (rx_pause) |
---|
| 1679 | + fcc_reg |= XAE_FCC_FCRX_MASK; |
---|
| 1680 | + else |
---|
| 1681 | + fcc_reg &= ~XAE_FCC_FCRX_MASK; |
---|
| 1682 | + axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg); |
---|
| 1683 | +} |
---|
| 1684 | + |
---|
| 1685 | +static const struct phylink_mac_ops axienet_phylink_ops = { |
---|
| 1686 | + .validate = axienet_validate, |
---|
| 1687 | + .mac_pcs_get_state = axienet_mac_pcs_get_state, |
---|
| 1688 | + .mac_an_restart = axienet_mac_an_restart, |
---|
| 1689 | + .mac_config = axienet_mac_config, |
---|
| 1690 | + .mac_link_down = axienet_mac_link_down, |
---|
| 1691 | + .mac_link_up = axienet_mac_link_up, |
---|
1326 | 1692 | }; |
---|
1327 | 1693 | |
---|
1328 | 1694 | /** |
---|
1329 | | - * axienet_dma_err_handler - Tasklet handler for Axi DMA Error |
---|
1330 | | - * @data: Data passed |
---|
| 1695 | + * axienet_dma_err_handler - Work queue task for Axi DMA Error |
---|
| 1696 | + * @work: pointer to work_struct |
---|
1331 | 1697 | * |
---|
1332 | 1698 | * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the |
---|
1333 | 1699 | * Tx/Rx BDs. |
---|
1334 | 1700 | */ |
---|
1335 | | -static void axienet_dma_err_handler(unsigned long data) |
---|
| 1701 | +static void axienet_dma_err_handler(struct work_struct *work) |
---|
1336 | 1702 | { |
---|
1337 | 1703 | u32 axienet_status; |
---|
1338 | 1704 | u32 cr, i; |
---|
1339 | | - int mdio_mcreg; |
---|
1340 | | - struct axienet_local *lp = (struct axienet_local *) data; |
---|
| 1705 | + struct axienet_local *lp = container_of(work, struct axienet_local, |
---|
| 1706 | + dma_err_task); |
---|
1341 | 1707 | struct net_device *ndev = lp->ndev; |
---|
1342 | 1708 | struct axidma_bd *cur_p; |
---|
1343 | 1709 | |
---|
1344 | 1710 | axienet_setoptions(ndev, lp->options & |
---|
1345 | 1711 | ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); |
---|
1346 | | - mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET); |
---|
1347 | | - axienet_mdio_wait_until_ready(lp); |
---|
1348 | 1712 | /* Disable the MDIO interface till Axi Ethernet Reset is completed. |
---|
1349 | 1713 | * When we do an Axi Ethernet reset, it resets the complete core |
---|
1350 | | - * including the MDIO. So if MDIO is not disabled when the reset |
---|
1351 | | - * process is started, MDIO will be broken afterwards. |
---|
| 1714 | + * including the MDIO. MDIO must be disabled before resetting |
---|
| 1715 | + * and re-enabled afterwards. |
---|
| 1716 | + * Hold MDIO bus lock to avoid MDIO accesses during the reset. |
---|
1352 | 1717 | */ |
---|
1353 | | - axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg & |
---|
1354 | | - ~XAE_MDIO_MC_MDIOEN_MASK)); |
---|
| 1718 | + mutex_lock(&lp->mii_bus->mdio_lock); |
---|
| 1719 | + axienet_mdio_disable(lp); |
---|
| 1720 | + __axienet_device_reset(lp); |
---|
| 1721 | + axienet_mdio_enable(lp); |
---|
| 1722 | + mutex_unlock(&lp->mii_bus->mdio_lock); |
---|
1355 | 1723 | |
---|
1356 | | - __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET); |
---|
1357 | | - __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET); |
---|
1358 | | - |
---|
1359 | | - axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg); |
---|
1360 | | - axienet_mdio_wait_until_ready(lp); |
---|
1361 | | - |
---|
1362 | | - for (i = 0; i < TX_BD_NUM; i++) { |
---|
| 1724 | + for (i = 0; i < lp->tx_bd_num; i++) { |
---|
1363 | 1725 | cur_p = &lp->tx_bd_v[i]; |
---|
1364 | | - if (cur_p->phys) |
---|
1365 | | - dma_unmap_single(ndev->dev.parent, cur_p->phys, |
---|
| 1726 | + if (cur_p->cntrl) { |
---|
| 1727 | + dma_addr_t addr = desc_get_phys_addr(lp, cur_p); |
---|
| 1728 | + |
---|
| 1729 | + dma_unmap_single(ndev->dev.parent, addr, |
---|
1366 | 1730 | (cur_p->cntrl & |
---|
1367 | 1731 | XAXIDMA_BD_CTRL_LENGTH_MASK), |
---|
1368 | 1732 | DMA_TO_DEVICE); |
---|
1369 | | - if (cur_p->app4) |
---|
1370 | | - dev_kfree_skb_irq((struct sk_buff *) cur_p->app4); |
---|
| 1733 | + } |
---|
| 1734 | + if (cur_p->skb) |
---|
| 1735 | + dev_kfree_skb_irq(cur_p->skb); |
---|
1371 | 1736 | cur_p->phys = 0; |
---|
| 1737 | + cur_p->phys_msb = 0; |
---|
1372 | 1738 | cur_p->cntrl = 0; |
---|
1373 | 1739 | cur_p->status = 0; |
---|
1374 | 1740 | cur_p->app0 = 0; |
---|
.. | .. |
---|
1376 | 1742 | cur_p->app2 = 0; |
---|
1377 | 1743 | cur_p->app3 = 0; |
---|
1378 | 1744 | cur_p->app4 = 0; |
---|
1379 | | - cur_p->sw_id_offset = 0; |
---|
| 1745 | + cur_p->skb = NULL; |
---|
1380 | 1746 | } |
---|
1381 | 1747 | |
---|
1382 | | - for (i = 0; i < RX_BD_NUM; i++) { |
---|
| 1748 | + for (i = 0; i < lp->rx_bd_num; i++) { |
---|
1383 | 1749 | cur_p = &lp->rx_bd_v[i]; |
---|
1384 | 1750 | cur_p->status = 0; |
---|
1385 | 1751 | cur_p->app0 = 0; |
---|
.. | .. |
---|
1422 | 1788 | /* Populate the tail pointer and bring the Rx Axi DMA engine out of |
---|
1423 | 1789 | * halted state. This will make the Rx side ready for reception. |
---|
1424 | 1790 | */ |
---|
1425 | | - axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); |
---|
| 1791 | + axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); |
---|
1426 | 1792 | cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); |
---|
1427 | 1793 | axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, |
---|
1428 | 1794 | cr | XAXIDMA_CR_RUNSTOP_MASK); |
---|
1429 | | - axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + |
---|
1430 | | - (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); |
---|
| 1795 | + axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + |
---|
| 1796 | + (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); |
---|
1431 | 1797 | |
---|
1432 | 1798 | /* Write to the RS (Run-stop) bit in the Tx channel control register. |
---|
1433 | 1799 | * Tx channel is now ready to run. But only after we write to the |
---|
1434 | 1800 | * tail pointer register that the Tx channel will start transmitting |
---|
1435 | 1801 | */ |
---|
1436 | | - axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); |
---|
| 1802 | + axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); |
---|
1437 | 1803 | cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); |
---|
1438 | 1804 | axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, |
---|
1439 | 1805 | cr | XAXIDMA_CR_RUNSTOP_MASK); |
---|
.. | .. |
---|
1445 | 1811 | axienet_status = axienet_ior(lp, XAE_IP_OFFSET); |
---|
1446 | 1812 | if (axienet_status & XAE_INT_RXRJECT_MASK) |
---|
1447 | 1813 | axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); |
---|
| 1814 | + axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? |
---|
| 1815 | + XAE_INT_RECV_ERROR_MASK : 0); |
---|
1448 | 1816 | axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); |
---|
1449 | 1817 | |
---|
1450 | 1818 | /* Sync default options with HW but leave receiver and |
---|
.. | .. |
---|
1476 | 1844 | struct axienet_local *lp; |
---|
1477 | 1845 | struct net_device *ndev; |
---|
1478 | 1846 | const void *mac_addr; |
---|
1479 | | - struct resource *ethres, dmares; |
---|
| 1847 | + struct resource *ethres; |
---|
| 1848 | + int addr_width = 32; |
---|
1480 | 1849 | u32 value; |
---|
1481 | 1850 | |
---|
1482 | 1851 | ndev = alloc_etherdev(sizeof(*lp)); |
---|
.. | .. |
---|
1499 | 1868 | lp->ndev = ndev; |
---|
1500 | 1869 | lp->dev = &pdev->dev; |
---|
1501 | 1870 | lp->options = XAE_OPTION_DEFAULTS; |
---|
| 1871 | + lp->rx_bd_num = RX_BD_NUM_DEFAULT; |
---|
| 1872 | + lp->tx_bd_num = TX_BD_NUM_DEFAULT; |
---|
| 1873 | + |
---|
| 1874 | + lp->clk = devm_clk_get_optional(&pdev->dev, NULL); |
---|
| 1875 | + if (IS_ERR(lp->clk)) { |
---|
| 1876 | + ret = PTR_ERR(lp->clk); |
---|
| 1877 | + goto free_netdev; |
---|
| 1878 | + } |
---|
| 1879 | + ret = clk_prepare_enable(lp->clk); |
---|
| 1880 | + if (ret) { |
---|
| 1881 | + dev_err(&pdev->dev, "Unable to enable clock: %d\n", ret); |
---|
| 1882 | + goto free_netdev; |
---|
| 1883 | + } |
---|
| 1884 | + |
---|
1502 | 1885 | /* Map device registers */ |
---|
1503 | 1886 | ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
---|
1504 | 1887 | lp->regs = devm_ioremap_resource(&pdev->dev, ethres); |
---|
1505 | 1888 | if (IS_ERR(lp->regs)) { |
---|
1506 | 1889 | dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n"); |
---|
1507 | 1890 | ret = PTR_ERR(lp->regs); |
---|
1508 | | - goto free_netdev; |
---|
| 1891 | + goto cleanup_clk; |
---|
1509 | 1892 | } |
---|
| 1893 | + lp->regs_start = ethres->start; |
---|
1510 | 1894 | |
---|
1511 | 1895 | /* Setup checksum offload, but default to off if not specified */ |
---|
1512 | 1896 | lp->features = 0; |
---|
.. | .. |
---|
1579 | 1963 | break; |
---|
1580 | 1964 | default: |
---|
1581 | 1965 | ret = -EINVAL; |
---|
1582 | | - goto free_netdev; |
---|
| 1966 | + goto cleanup_clk; |
---|
1583 | 1967 | } |
---|
1584 | 1968 | } else { |
---|
1585 | | - lp->phy_mode = of_get_phy_mode(pdev->dev.of_node); |
---|
1586 | | - if ((int)lp->phy_mode < 0) { |
---|
1587 | | - ret = -EINVAL; |
---|
1588 | | - goto free_netdev; |
---|
1589 | | - } |
---|
| 1969 | + ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); |
---|
| 1970 | + if (ret) |
---|
| 1971 | + goto cleanup_clk; |
---|
1590 | 1972 | } |
---|
1591 | 1973 | |
---|
1592 | 1974 | /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ |
---|
1593 | 1975 | np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); |
---|
1594 | | - if (!np) { |
---|
1595 | | - dev_err(&pdev->dev, "could not find DMA node\n"); |
---|
1596 | | - ret = -ENODEV; |
---|
1597 | | - goto free_netdev; |
---|
1598 | | - } |
---|
1599 | | - ret = of_address_to_resource(np, 0, &dmares); |
---|
1600 | | - if (ret) { |
---|
1601 | | - dev_err(&pdev->dev, "unable to get DMA resource\n"); |
---|
| 1976 | + if (np) { |
---|
| 1977 | + struct resource dmares; |
---|
| 1978 | + |
---|
| 1979 | + ret = of_address_to_resource(np, 0, &dmares); |
---|
| 1980 | + if (ret) { |
---|
| 1981 | + dev_err(&pdev->dev, |
---|
| 1982 | + "unable to get DMA resource\n"); |
---|
| 1983 | + of_node_put(np); |
---|
| 1984 | + goto cleanup_clk; |
---|
| 1985 | + } |
---|
| 1986 | + lp->dma_regs = devm_ioremap_resource(&pdev->dev, |
---|
| 1987 | + &dmares); |
---|
| 1988 | + lp->rx_irq = irq_of_parse_and_map(np, 1); |
---|
| 1989 | + lp->tx_irq = irq_of_parse_and_map(np, 0); |
---|
1602 | 1990 | of_node_put(np); |
---|
1603 | | - goto free_netdev; |
---|
| 1991 | + lp->eth_irq = platform_get_irq_optional(pdev, 0); |
---|
| 1992 | + } else { |
---|
| 1993 | + /* Check for these resources directly on the Ethernet node. */ |
---|
| 1994 | + struct resource *res = platform_get_resource(pdev, |
---|
| 1995 | + IORESOURCE_MEM, 1); |
---|
| 1996 | + lp->dma_regs = devm_ioremap_resource(&pdev->dev, res); |
---|
| 1997 | + lp->rx_irq = platform_get_irq(pdev, 1); |
---|
| 1998 | + lp->tx_irq = platform_get_irq(pdev, 0); |
---|
| 1999 | + lp->eth_irq = platform_get_irq_optional(pdev, 2); |
---|
1604 | 2000 | } |
---|
1605 | | - lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares); |
---|
1606 | 2001 | if (IS_ERR(lp->dma_regs)) { |
---|
1607 | 2002 | dev_err(&pdev->dev, "could not map DMA regs\n"); |
---|
1608 | 2003 | ret = PTR_ERR(lp->dma_regs); |
---|
1609 | | - of_node_put(np); |
---|
1610 | | - goto free_netdev; |
---|
| 2004 | + goto cleanup_clk; |
---|
1611 | 2005 | } |
---|
1612 | | - lp->rx_irq = irq_of_parse_and_map(np, 1); |
---|
1613 | | - lp->tx_irq = irq_of_parse_and_map(np, 0); |
---|
1614 | | - of_node_put(np); |
---|
1615 | 2006 | if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { |
---|
1616 | 2007 | dev_err(&pdev->dev, "could not determine irqs\n"); |
---|
1617 | 2008 | ret = -ENOMEM; |
---|
1618 | | - goto free_netdev; |
---|
| 2009 | + goto cleanup_clk; |
---|
1619 | 2010 | } |
---|
| 2011 | + |
---|
| 2012 | + /* Reset core now that clocks are enabled, prior to accessing MDIO */ |
---|
| 2013 | + ret = __axienet_device_reset(lp); |
---|
| 2014 | + if (ret) |
---|
| 2015 | + goto cleanup_clk; |
---|
| 2016 | + |
---|
| 2017 | + /* Autodetect the need for 64-bit DMA pointers. |
---|
| 2018 | + * When the IP is configured for a bus width bigger than 32 bits, |
---|
| 2019 | + * writing the MSB registers is mandatory, even if they are all 0. |
---|
| 2020 | + * We can detect this case by writing all 1's to one such register |
---|
| 2021 | + * and see if that sticks: when the IP is configured for 32 bits |
---|
| 2022 | + * only, those registers are RES0. |
---|
| 2023 | + * Those MSB registers were introduced in IP v7.1, which we check first. |
---|
| 2024 | + */ |
---|
| 2025 | + if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) { |
---|
| 2026 | + void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4; |
---|
| 2027 | + |
---|
| 2028 | + iowrite32(0x0, desc); |
---|
| 2029 | + if (ioread32(desc) == 0) { /* sanity check */ |
---|
| 2030 | + iowrite32(0xffffffff, desc); |
---|
| 2031 | + if (ioread32(desc) > 0) { |
---|
| 2032 | + lp->features |= XAE_FEATURE_DMA_64BIT; |
---|
| 2033 | + addr_width = 64; |
---|
| 2034 | + dev_info(&pdev->dev, |
---|
| 2035 | + "autodetected 64-bit DMA range\n"); |
---|
| 2036 | + } |
---|
| 2037 | + iowrite32(0x0, desc); |
---|
| 2038 | + } |
---|
| 2039 | + } |
---|
| 2040 | + |
---|
| 2041 | + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); |
---|
| 2042 | + if (ret) { |
---|
| 2043 | + dev_err(&pdev->dev, "No suitable DMA available\n"); |
---|
| 2044 | + goto cleanup_clk; |
---|
| 2045 | + } |
---|
| 2046 | + |
---|
| 2047 | + /* Check for Ethernet core IRQ (optional) */ |
---|
| 2048 | + if (lp->eth_irq <= 0) |
---|
| 2049 | + dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); |
---|
1620 | 2050 | |
---|
1621 | 2051 | /* Retrieve the MAC address */ |
---|
1622 | 2052 | mac_addr = of_get_mac_address(pdev->dev.of_node); |
---|
1623 | | - if (!mac_addr) { |
---|
1624 | | - dev_err(&pdev->dev, "could not find MAC address\n"); |
---|
1625 | | - goto free_netdev; |
---|
| 2053 | + if (IS_ERR(mac_addr)) { |
---|
| 2054 | + dev_warn(&pdev->dev, "could not find MAC address property: %ld\n", |
---|
| 2055 | + PTR_ERR(mac_addr)); |
---|
| 2056 | + mac_addr = NULL; |
---|
1626 | 2057 | } |
---|
1627 | 2058 | axienet_set_mac_address(ndev, mac_addr); |
---|
1628 | 2059 | |
---|
1629 | 2060 | lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; |
---|
1630 | 2061 | lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; |
---|
1631 | 2062 | |
---|
1632 | | - lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); |
---|
1633 | | - if (lp->phy_node) { |
---|
1634 | | - ret = axienet_mdio_setup(lp, pdev->dev.of_node); |
---|
1635 | | - if (ret) |
---|
1636 | | - dev_warn(&pdev->dev, "error registering MDIO bus\n"); |
---|
| 2063 | + ret = axienet_mdio_setup(lp); |
---|
| 2064 | + if (ret) |
---|
| 2065 | + dev_warn(&pdev->dev, |
---|
| 2066 | + "error registering MDIO bus: %d\n", ret); |
---|
| 2067 | + |
---|
| 2068 | + if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || |
---|
| 2069 | + lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { |
---|
| 2070 | + lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); |
---|
| 2071 | + if (!lp->phy_node) { |
---|
| 2072 | + dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n"); |
---|
| 2073 | + ret = -EINVAL; |
---|
| 2074 | + goto cleanup_mdio; |
---|
| 2075 | + } |
---|
| 2076 | + lp->pcs_phy = of_mdio_find_device(lp->phy_node); |
---|
| 2077 | + if (!lp->pcs_phy) { |
---|
| 2078 | + ret = -EPROBE_DEFER; |
---|
| 2079 | + goto cleanup_mdio; |
---|
| 2080 | + } |
---|
| 2081 | + lp->phylink_config.pcs_poll = true; |
---|
| 2082 | + } |
---|
| 2083 | + |
---|
| 2084 | + lp->phylink_config.dev = &ndev->dev; |
---|
| 2085 | + lp->phylink_config.type = PHYLINK_NETDEV; |
---|
| 2086 | + |
---|
| 2087 | + lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode, |
---|
| 2088 | + lp->phy_mode, |
---|
| 2089 | + &axienet_phylink_ops); |
---|
| 2090 | + if (IS_ERR(lp->phylink)) { |
---|
| 2091 | + ret = PTR_ERR(lp->phylink); |
---|
| 2092 | + dev_err(&pdev->dev, "phylink_create error (%i)\n", ret); |
---|
| 2093 | + goto cleanup_mdio; |
---|
1637 | 2094 | } |
---|
1638 | 2095 | |
---|
1639 | 2096 | ret = register_netdev(lp->ndev); |
---|
1640 | 2097 | if (ret) { |
---|
1641 | 2098 | dev_err(lp->dev, "register_netdev() error (%i)\n", ret); |
---|
1642 | | - goto free_netdev; |
---|
| 2099 | + goto cleanup_phylink; |
---|
1643 | 2100 | } |
---|
1644 | 2101 | |
---|
1645 | 2102 | return 0; |
---|
| 2103 | + |
---|
| 2104 | +cleanup_phylink: |
---|
| 2105 | + phylink_destroy(lp->phylink); |
---|
| 2106 | + |
---|
| 2107 | +cleanup_mdio: |
---|
| 2108 | + if (lp->pcs_phy) |
---|
| 2109 | + put_device(&lp->pcs_phy->dev); |
---|
| 2110 | + if (lp->mii_bus) |
---|
| 2111 | + axienet_mdio_teardown(lp); |
---|
| 2112 | + of_node_put(lp->phy_node); |
---|
| 2113 | + |
---|
| 2114 | +cleanup_clk: |
---|
| 2115 | + clk_disable_unprepare(lp->clk); |
---|
1646 | 2116 | |
---|
1647 | 2117 | free_netdev: |
---|
1648 | 2118 | free_netdev(ndev); |
---|
.. | .. |
---|
1655 | 2125 | struct net_device *ndev = platform_get_drvdata(pdev); |
---|
1656 | 2126 | struct axienet_local *lp = netdev_priv(ndev); |
---|
1657 | 2127 | |
---|
1658 | | - axienet_mdio_teardown(lp); |
---|
1659 | 2128 | unregister_netdev(ndev); |
---|
| 2129 | + |
---|
| 2130 | + if (lp->phylink) |
---|
| 2131 | + phylink_destroy(lp->phylink); |
---|
| 2132 | + |
---|
| 2133 | + if (lp->pcs_phy) |
---|
| 2134 | + put_device(&lp->pcs_phy->dev); |
---|
| 2135 | + |
---|
| 2136 | + axienet_mdio_teardown(lp); |
---|
| 2137 | + |
---|
| 2138 | + clk_disable_unprepare(lp->clk); |
---|
1660 | 2139 | |
---|
1661 | 2140 | of_node_put(lp->phy_node); |
---|
1662 | 2141 | lp->phy_node = NULL; |
---|
.. | .. |
---|
1666 | 2145 | return 0; |
---|
1667 | 2146 | } |
---|
1668 | 2147 | |
---|
| 2148 | +static void axienet_shutdown(struct platform_device *pdev) |
---|
| 2149 | +{ |
---|
| 2150 | + struct net_device *ndev = platform_get_drvdata(pdev); |
---|
| 2151 | + |
---|
| 2152 | + rtnl_lock(); |
---|
| 2153 | + netif_device_detach(ndev); |
---|
| 2154 | + |
---|
| 2155 | + if (netif_running(ndev)) |
---|
| 2156 | + dev_close(ndev); |
---|
| 2157 | + |
---|
| 2158 | + rtnl_unlock(); |
---|
| 2159 | +} |
---|
| 2160 | + |
---|
1669 | 2161 | static struct platform_driver axienet_driver = { |
---|
1670 | 2162 | .probe = axienet_probe, |
---|
1671 | 2163 | .remove = axienet_remove, |
---|
| 2164 | + .shutdown = axienet_shutdown, |
---|
1672 | 2165 | .driver = { |
---|
1673 | 2166 | .name = "xilinx_axienet", |
---|
1674 | 2167 | .of_match_table = axienet_of_match, |
---|