.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Cadence MACB/GEM Ethernet Controller driver |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 2004-2006 Atmel Corporation |
---|
5 | | - * |
---|
6 | | - * This program is free software; you can redistribute it and/or modify |
---|
7 | | - * it under the terms of the GNU General Public License version 2 as |
---|
8 | | - * published by the Free Software Foundation. |
---|
9 | 6 | */ |
---|
10 | 7 | |
---|
11 | 8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
---|
12 | 9 | #include <linux/clk.h> |
---|
| 10 | +#include <linux/clk-provider.h> |
---|
13 | 11 | #include <linux/crc32.h> |
---|
14 | 12 | #include <linux/module.h> |
---|
15 | 13 | #include <linux/moduleparam.h> |
---|
.. | .. |
---|
25 | 23 | #include <linux/netdevice.h> |
---|
26 | 24 | #include <linux/etherdevice.h> |
---|
27 | 25 | #include <linux/dma-mapping.h> |
---|
28 | | -#include <linux/platform_data/macb.h> |
---|
29 | 26 | #include <linux/platform_device.h> |
---|
30 | | -#include <linux/phy.h> |
---|
| 27 | +#include <linux/phylink.h> |
---|
31 | 28 | #include <linux/of.h> |
---|
32 | 29 | #include <linux/of_device.h> |
---|
33 | 30 | #include <linux/of_gpio.h> |
---|
.. | .. |
---|
36 | 33 | #include <linux/ip.h> |
---|
37 | 34 | #include <linux/udp.h> |
---|
38 | 35 | #include <linux/tcp.h> |
---|
| 36 | +#include <linux/iopoll.h> |
---|
| 37 | +#include <linux/pm_runtime.h> |
---|
39 | 38 | #include "macb.h" |
---|
| 39 | + |
---|
| 40 | +/* This structure is only used for MACB on SiFive FU540 devices */ |
---|
| 41 | +struct sifive_fu540_macb_mgmt { |
---|
| 42 | + void __iomem *reg; |
---|
| 43 | + unsigned long rate; |
---|
| 44 | + struct clk_hw hw; |
---|
| 45 | +}; |
---|
40 | 46 | |
---|
41 | 47 | #define MACB_RX_BUFFER_SIZE 128 |
---|
42 | 48 | #define RX_BUFFER_MULTIPLE 64 /* bytes */ |
---|
.. | .. |
---|
82 | 88 | * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) |
---|
83 | 89 | */ |
---|
84 | 90 | #define MACB_HALT_TIMEOUT 1230 |
---|
| 91 | + |
---|
| 92 | +#define MACB_PM_TIMEOUT 100 /* ms */ |
---|
| 93 | + |
---|
| 94 | +#define MACB_MDIO_TIMEOUT 1000000 /* in usecs */ |
---|
85 | 95 | |
---|
86 | 96 | /* DMA buffer descriptor might be different size |
---|
87 | 97 | * depends on hardware configuration: |
---|
.. | .. |
---|
158 | 168 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
---|
159 | 169 | static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc) |
---|
160 | 170 | { |
---|
161 | | - if (bp->hw_dma_cap & HW_DMA_CAP_64B) |
---|
162 | | - return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc)); |
---|
163 | | - return NULL; |
---|
| 171 | + return (struct macb_dma_desc_64 *)((void *)desc |
---|
| 172 | + + sizeof(struct macb_dma_desc)); |
---|
164 | 173 | } |
---|
165 | 174 | #endif |
---|
166 | 175 | |
---|
.. | .. |
---|
283 | 292 | |
---|
284 | 293 | static void macb_get_hwaddr(struct macb *bp) |
---|
285 | 294 | { |
---|
286 | | - struct macb_platform_data *pdata; |
---|
287 | 295 | u32 bottom; |
---|
288 | 296 | u16 top; |
---|
289 | 297 | u8 addr[6]; |
---|
290 | 298 | int i; |
---|
291 | | - |
---|
292 | | - pdata = dev_get_platdata(&bp->pdev->dev); |
---|
293 | 299 | |
---|
294 | 300 | /* Check all 4 address register for valid address */ |
---|
295 | 301 | for (i = 0; i < 4; i++) { |
---|
296 | 302 | bottom = macb_or_gem_readl(bp, SA1B + i * 8); |
---|
297 | 303 | top = macb_or_gem_readl(bp, SA1T + i * 8); |
---|
298 | 304 | |
---|
299 | | - if (pdata && pdata->rev_eth_addr) { |
---|
300 | | - addr[5] = bottom & 0xff; |
---|
301 | | - addr[4] = (bottom >> 8) & 0xff; |
---|
302 | | - addr[3] = (bottom >> 16) & 0xff; |
---|
303 | | - addr[2] = (bottom >> 24) & 0xff; |
---|
304 | | - addr[1] = top & 0xff; |
---|
305 | | - addr[0] = (top & 0xff00) >> 8; |
---|
306 | | - } else { |
---|
307 | | - addr[0] = bottom & 0xff; |
---|
308 | | - addr[1] = (bottom >> 8) & 0xff; |
---|
309 | | - addr[2] = (bottom >> 16) & 0xff; |
---|
310 | | - addr[3] = (bottom >> 24) & 0xff; |
---|
311 | | - addr[4] = top & 0xff; |
---|
312 | | - addr[5] = (top >> 8) & 0xff; |
---|
313 | | - } |
---|
| 305 | + addr[0] = bottom & 0xff; |
---|
| 306 | + addr[1] = (bottom >> 8) & 0xff; |
---|
| 307 | + addr[2] = (bottom >> 16) & 0xff; |
---|
| 308 | + addr[3] = (bottom >> 24) & 0xff; |
---|
| 309 | + addr[4] = top & 0xff; |
---|
| 310 | + addr[5] = (top >> 8) & 0xff; |
---|
314 | 311 | |
---|
315 | 312 | if (is_valid_ether_addr(addr)) { |
---|
316 | 313 | memcpy(bp->dev->dev_addr, addr, sizeof(addr)); |
---|
.. | .. |
---|
322 | 319 | eth_hw_addr_random(bp->dev); |
---|
323 | 320 | } |
---|
324 | 321 | |
---|
| 322 | +static int macb_mdio_wait_for_idle(struct macb *bp) |
---|
| 323 | +{ |
---|
| 324 | + u32 val; |
---|
| 325 | + |
---|
| 326 | + return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE), |
---|
| 327 | + 1, MACB_MDIO_TIMEOUT); |
---|
| 328 | +} |
---|
| 329 | + |
---|
325 | 330 | static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) |
---|
326 | 331 | { |
---|
327 | 332 | struct macb *bp = bus->priv; |
---|
328 | | - int value; |
---|
| 333 | + int status; |
---|
329 | 334 | |
---|
330 | | - macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) |
---|
331 | | - | MACB_BF(RW, MACB_MAN_READ) |
---|
332 | | - | MACB_BF(PHYA, mii_id) |
---|
333 | | - | MACB_BF(REGA, regnum) |
---|
334 | | - | MACB_BF(CODE, MACB_MAN_CODE))); |
---|
| 335 | + status = pm_runtime_get_sync(&bp->pdev->dev); |
---|
| 336 | + if (status < 0) { |
---|
| 337 | + pm_runtime_put_noidle(&bp->pdev->dev); |
---|
| 338 | + goto mdio_pm_exit; |
---|
| 339 | + } |
---|
335 | 340 | |
---|
336 | | - /* wait for end of transfer */ |
---|
337 | | - while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) |
---|
338 | | - cpu_relax(); |
---|
| 341 | + status = macb_mdio_wait_for_idle(bp); |
---|
| 342 | + if (status < 0) |
---|
| 343 | + goto mdio_read_exit; |
---|
339 | 344 | |
---|
340 | | - value = MACB_BFEXT(DATA, macb_readl(bp, MAN)); |
---|
| 345 | + if (regnum & MII_ADDR_C45) { |
---|
| 346 | + macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) |
---|
| 347 | + | MACB_BF(RW, MACB_MAN_C45_ADDR) |
---|
| 348 | + | MACB_BF(PHYA, mii_id) |
---|
| 349 | + | MACB_BF(REGA, (regnum >> 16) & 0x1F) |
---|
| 350 | + | MACB_BF(DATA, regnum & 0xFFFF) |
---|
| 351 | + | MACB_BF(CODE, MACB_MAN_C45_CODE))); |
---|
341 | 352 | |
---|
342 | | - return value; |
---|
| 353 | + status = macb_mdio_wait_for_idle(bp); |
---|
| 354 | + if (status < 0) |
---|
| 355 | + goto mdio_read_exit; |
---|
| 356 | + |
---|
| 357 | + macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) |
---|
| 358 | + | MACB_BF(RW, MACB_MAN_C45_READ) |
---|
| 359 | + | MACB_BF(PHYA, mii_id) |
---|
| 360 | + | MACB_BF(REGA, (regnum >> 16) & 0x1F) |
---|
| 361 | + | MACB_BF(CODE, MACB_MAN_C45_CODE))); |
---|
| 362 | + } else { |
---|
| 363 | + macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) |
---|
| 364 | + | MACB_BF(RW, MACB_MAN_C22_READ) |
---|
| 365 | + | MACB_BF(PHYA, mii_id) |
---|
| 366 | + | MACB_BF(REGA, regnum) |
---|
| 367 | + | MACB_BF(CODE, MACB_MAN_C22_CODE))); |
---|
| 368 | + } |
---|
| 369 | + |
---|
| 370 | + status = macb_mdio_wait_for_idle(bp); |
---|
| 371 | + if (status < 0) |
---|
| 372 | + goto mdio_read_exit; |
---|
| 373 | + |
---|
| 374 | + status = MACB_BFEXT(DATA, macb_readl(bp, MAN)); |
---|
| 375 | + |
---|
| 376 | +mdio_read_exit: |
---|
| 377 | + pm_runtime_mark_last_busy(&bp->pdev->dev); |
---|
| 378 | + pm_runtime_put_autosuspend(&bp->pdev->dev); |
---|
| 379 | +mdio_pm_exit: |
---|
| 380 | + return status; |
---|
343 | 381 | } |
---|
344 | 382 | |
---|
345 | 383 | static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, |
---|
346 | 384 | u16 value) |
---|
347 | 385 | { |
---|
348 | 386 | struct macb *bp = bus->priv; |
---|
| 387 | + int status; |
---|
349 | 388 | |
---|
350 | | - macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) |
---|
351 | | - | MACB_BF(RW, MACB_MAN_WRITE) |
---|
352 | | - | MACB_BF(PHYA, mii_id) |
---|
353 | | - | MACB_BF(REGA, regnum) |
---|
354 | | - | MACB_BF(CODE, MACB_MAN_CODE) |
---|
355 | | - | MACB_BF(DATA, value))); |
---|
| 389 | + status = pm_runtime_get_sync(&bp->pdev->dev); |
---|
| 390 | + if (status < 0) { |
---|
| 391 | + pm_runtime_put_noidle(&bp->pdev->dev); |
---|
| 392 | + goto mdio_pm_exit; |
---|
| 393 | + } |
---|
356 | 394 | |
---|
357 | | - /* wait for end of transfer */ |
---|
358 | | - while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) |
---|
359 | | - cpu_relax(); |
---|
| 395 | + status = macb_mdio_wait_for_idle(bp); |
---|
| 396 | + if (status < 0) |
---|
| 397 | + goto mdio_write_exit; |
---|
360 | 398 | |
---|
361 | | - return 0; |
---|
| 399 | + if (regnum & MII_ADDR_C45) { |
---|
| 400 | + macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) |
---|
| 401 | + | MACB_BF(RW, MACB_MAN_C45_ADDR) |
---|
| 402 | + | MACB_BF(PHYA, mii_id) |
---|
| 403 | + | MACB_BF(REGA, (regnum >> 16) & 0x1F) |
---|
| 404 | + | MACB_BF(DATA, regnum & 0xFFFF) |
---|
| 405 | + | MACB_BF(CODE, MACB_MAN_C45_CODE))); |
---|
| 406 | + |
---|
| 407 | + status = macb_mdio_wait_for_idle(bp); |
---|
| 408 | + if (status < 0) |
---|
| 409 | + goto mdio_write_exit; |
---|
| 410 | + |
---|
| 411 | + macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) |
---|
| 412 | + | MACB_BF(RW, MACB_MAN_C45_WRITE) |
---|
| 413 | + | MACB_BF(PHYA, mii_id) |
---|
| 414 | + | MACB_BF(REGA, (regnum >> 16) & 0x1F) |
---|
| 415 | + | MACB_BF(CODE, MACB_MAN_C45_CODE) |
---|
| 416 | + | MACB_BF(DATA, value))); |
---|
| 417 | + } else { |
---|
| 418 | + macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) |
---|
| 419 | + | MACB_BF(RW, MACB_MAN_C22_WRITE) |
---|
| 420 | + | MACB_BF(PHYA, mii_id) |
---|
| 421 | + | MACB_BF(REGA, regnum) |
---|
| 422 | + | MACB_BF(CODE, MACB_MAN_C22_CODE) |
---|
| 423 | + | MACB_BF(DATA, value))); |
---|
| 424 | + } |
---|
| 425 | + |
---|
| 426 | + status = macb_mdio_wait_for_idle(bp); |
---|
| 427 | + if (status < 0) |
---|
| 428 | + goto mdio_write_exit; |
---|
| 429 | + |
---|
| 430 | +mdio_write_exit: |
---|
| 431 | + pm_runtime_mark_last_busy(&bp->pdev->dev); |
---|
| 432 | + pm_runtime_put_autosuspend(&bp->pdev->dev); |
---|
| 433 | +mdio_pm_exit: |
---|
| 434 | + return status; |
---|
| 435 | +} |
---|
| 436 | + |
---|
| 437 | +static void macb_init_buffers(struct macb *bp) |
---|
| 438 | +{ |
---|
| 439 | + struct macb_queue *queue; |
---|
| 440 | + unsigned int q; |
---|
| 441 | + |
---|
| 442 | + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
---|
| 443 | + queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); |
---|
| 444 | +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
---|
| 445 | + if (bp->hw_dma_cap & HW_DMA_CAP_64B) |
---|
| 446 | + queue_writel(queue, RBQPH, |
---|
| 447 | + upper_32_bits(queue->rx_ring_dma)); |
---|
| 448 | +#endif |
---|
| 449 | + queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); |
---|
| 450 | +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
---|
| 451 | + if (bp->hw_dma_cap & HW_DMA_CAP_64B) |
---|
| 452 | + queue_writel(queue, TBQPH, |
---|
| 453 | + upper_32_bits(queue->tx_ring_dma)); |
---|
| 454 | +#endif |
---|
| 455 | + } |
---|
362 | 456 | } |
---|
363 | 457 | |
---|
364 | 458 | /** |
---|
365 | 459 | * macb_set_tx_clk() - Set a clock to a new frequency |
---|
366 | | - * @clk Pointer to the clock to change |
---|
367 | | - * @rate New frequency in Hz |
---|
368 | | - * @dev Pointer to the struct net_device |
---|
| 460 | + * @clk: Pointer to the clock to change |
---|
| 461 | + * @speed: New frequency in Hz |
---|
| 462 | + * @dev: Pointer to the struct net_device |
---|
369 | 463 | */ |
---|
370 | 464 | static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) |
---|
371 | 465 | { |
---|
.. | .. |
---|
405 | 499 | netdev_err(dev, "adjusting tx_clk failed.\n"); |
---|
406 | 500 | } |
---|
407 | 501 | |
---|
408 | | -static void macb_handle_link_change(struct net_device *dev) |
---|
| 502 | +static void macb_validate(struct phylink_config *config, |
---|
| 503 | + unsigned long *supported, |
---|
| 504 | + struct phylink_link_state *state) |
---|
409 | 505 | { |
---|
410 | | - struct macb *bp = netdev_priv(dev); |
---|
411 | | - struct phy_device *phydev = dev->phydev; |
---|
| 506 | + struct net_device *ndev = to_net_dev(config->dev); |
---|
| 507 | + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; |
---|
| 508 | + struct macb *bp = netdev_priv(ndev); |
---|
| 509 | + |
---|
| 510 | + /* We only support MII, RMII, GMII, RGMII & SGMII. */ |
---|
| 511 | + if (state->interface != PHY_INTERFACE_MODE_NA && |
---|
| 512 | + state->interface != PHY_INTERFACE_MODE_MII && |
---|
| 513 | + state->interface != PHY_INTERFACE_MODE_RMII && |
---|
| 514 | + state->interface != PHY_INTERFACE_MODE_GMII && |
---|
| 515 | + state->interface != PHY_INTERFACE_MODE_SGMII && |
---|
| 516 | + !phy_interface_mode_is_rgmii(state->interface)) { |
---|
| 517 | + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); |
---|
| 518 | + return; |
---|
| 519 | + } |
---|
| 520 | + |
---|
| 521 | + if (!macb_is_gem(bp) && |
---|
| 522 | + (state->interface == PHY_INTERFACE_MODE_GMII || |
---|
| 523 | + phy_interface_mode_is_rgmii(state->interface))) { |
---|
| 524 | + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); |
---|
| 525 | + return; |
---|
| 526 | + } |
---|
| 527 | + |
---|
| 528 | + phylink_set_port_modes(mask); |
---|
| 529 | + phylink_set(mask, Autoneg); |
---|
| 530 | + phylink_set(mask, Asym_Pause); |
---|
| 531 | + |
---|
| 532 | + phylink_set(mask, 10baseT_Half); |
---|
| 533 | + phylink_set(mask, 10baseT_Full); |
---|
| 534 | + phylink_set(mask, 100baseT_Half); |
---|
| 535 | + phylink_set(mask, 100baseT_Full); |
---|
| 536 | + |
---|
| 537 | + if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE && |
---|
| 538 | + (state->interface == PHY_INTERFACE_MODE_NA || |
---|
| 539 | + state->interface == PHY_INTERFACE_MODE_GMII || |
---|
| 540 | + state->interface == PHY_INTERFACE_MODE_SGMII || |
---|
| 541 | + phy_interface_mode_is_rgmii(state->interface))) { |
---|
| 542 | + phylink_set(mask, 1000baseT_Full); |
---|
| 543 | + phylink_set(mask, 1000baseX_Full); |
---|
| 544 | + |
---|
| 545 | + if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF)) |
---|
| 546 | + phylink_set(mask, 1000baseT_Half); |
---|
| 547 | + } |
---|
| 548 | + |
---|
| 549 | + bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); |
---|
| 550 | + bitmap_and(state->advertising, state->advertising, mask, |
---|
| 551 | + __ETHTOOL_LINK_MODE_MASK_NBITS); |
---|
| 552 | +} |
---|
| 553 | + |
---|
| 554 | +static void macb_mac_pcs_get_state(struct phylink_config *config, |
---|
| 555 | + struct phylink_link_state *state) |
---|
| 556 | +{ |
---|
| 557 | + state->link = 0; |
---|
| 558 | +} |
---|
| 559 | + |
---|
| 560 | +static void macb_mac_an_restart(struct phylink_config *config) |
---|
| 561 | +{ |
---|
| 562 | + /* Not supported */ |
---|
| 563 | +} |
---|
| 564 | + |
---|
| 565 | +static void macb_mac_config(struct phylink_config *config, unsigned int mode, |
---|
| 566 | + const struct phylink_link_state *state) |
---|
| 567 | +{ |
---|
| 568 | + struct net_device *ndev = to_net_dev(config->dev); |
---|
| 569 | + struct macb *bp = netdev_priv(ndev); |
---|
412 | 570 | unsigned long flags; |
---|
413 | | - int status_change = 0; |
---|
| 571 | + u32 old_ctrl, ctrl; |
---|
414 | 572 | |
---|
415 | 573 | spin_lock_irqsave(&bp->lock, flags); |
---|
416 | 574 | |
---|
417 | | - if (phydev->link) { |
---|
418 | | - if ((bp->speed != phydev->speed) || |
---|
419 | | - (bp->duplex != phydev->duplex)) { |
---|
420 | | - u32 reg; |
---|
| 575 | + old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR); |
---|
421 | 576 | |
---|
422 | | - reg = macb_readl(bp, NCFGR); |
---|
423 | | - reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); |
---|
424 | | - if (macb_is_gem(bp)) |
---|
425 | | - reg &= ~GEM_BIT(GBE); |
---|
| 577 | + if (bp->caps & MACB_CAPS_MACB_IS_EMAC) { |
---|
| 578 | + if (state->interface == PHY_INTERFACE_MODE_RMII) |
---|
| 579 | + ctrl |= MACB_BIT(RM9200_RMII); |
---|
| 580 | + } else if (macb_is_gem(bp)) { |
---|
| 581 | + ctrl &= ~(GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL)); |
---|
426 | 582 | |
---|
427 | | - if (phydev->duplex) |
---|
428 | | - reg |= MACB_BIT(FD); |
---|
429 | | - if (phydev->speed == SPEED_100) |
---|
430 | | - reg |= MACB_BIT(SPD); |
---|
431 | | - if (phydev->speed == SPEED_1000 && |
---|
432 | | - bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) |
---|
433 | | - reg |= GEM_BIT(GBE); |
---|
434 | | - |
---|
435 | | - macb_or_gem_writel(bp, NCFGR, reg); |
---|
436 | | - |
---|
437 | | - bp->speed = phydev->speed; |
---|
438 | | - bp->duplex = phydev->duplex; |
---|
439 | | - status_change = 1; |
---|
440 | | - } |
---|
| 583 | + if (state->interface == PHY_INTERFACE_MODE_SGMII) |
---|
| 584 | + ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); |
---|
441 | 585 | } |
---|
442 | 586 | |
---|
443 | | - if (phydev->link != bp->link) { |
---|
444 | | - if (!phydev->link) { |
---|
445 | | - bp->speed = 0; |
---|
446 | | - bp->duplex = -1; |
---|
447 | | - } |
---|
448 | | - bp->link = phydev->link; |
---|
| 587 | + /* Apply the new configuration, if any */ |
---|
| 588 | + if (old_ctrl ^ ctrl) |
---|
| 589 | + macb_or_gem_writel(bp, NCFGR, ctrl); |
---|
449 | 590 | |
---|
450 | | - status_change = 1; |
---|
| 591 | + spin_unlock_irqrestore(&bp->lock, flags); |
---|
| 592 | +} |
---|
| 593 | + |
---|
| 594 | +static void macb_mac_link_down(struct phylink_config *config, unsigned int mode, |
---|
| 595 | + phy_interface_t interface) |
---|
| 596 | +{ |
---|
| 597 | + struct net_device *ndev = to_net_dev(config->dev); |
---|
| 598 | + struct macb *bp = netdev_priv(ndev); |
---|
| 599 | + struct macb_queue *queue; |
---|
| 600 | + unsigned int q; |
---|
| 601 | + u32 ctrl; |
---|
| 602 | + |
---|
| 603 | + if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) |
---|
| 604 | + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) |
---|
| 605 | + queue_writel(queue, IDR, |
---|
| 606 | + bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); |
---|
| 607 | + |
---|
| 608 | + /* Disable Rx and Tx */ |
---|
| 609 | + ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE)); |
---|
| 610 | + macb_writel(bp, NCR, ctrl); |
---|
| 611 | + |
---|
| 612 | + netif_tx_stop_all_queues(ndev); |
---|
| 613 | +} |
---|
| 614 | + |
---|
| 615 | +static void macb_mac_link_up(struct phylink_config *config, |
---|
| 616 | + struct phy_device *phy, |
---|
| 617 | + unsigned int mode, phy_interface_t interface, |
---|
| 618 | + int speed, int duplex, |
---|
| 619 | + bool tx_pause, bool rx_pause) |
---|
| 620 | +{ |
---|
| 621 | + struct net_device *ndev = to_net_dev(config->dev); |
---|
| 622 | + struct macb *bp = netdev_priv(ndev); |
---|
| 623 | + struct macb_queue *queue; |
---|
| 624 | + unsigned long flags; |
---|
| 625 | + unsigned int q; |
---|
| 626 | + u32 ctrl; |
---|
| 627 | + |
---|
| 628 | + spin_lock_irqsave(&bp->lock, flags); |
---|
| 629 | + |
---|
| 630 | + ctrl = macb_or_gem_readl(bp, NCFGR); |
---|
| 631 | + |
---|
| 632 | + ctrl &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); |
---|
| 633 | + |
---|
| 634 | + if (speed == SPEED_100) |
---|
| 635 | + ctrl |= MACB_BIT(SPD); |
---|
| 636 | + |
---|
| 637 | + if (duplex) |
---|
| 638 | + ctrl |= MACB_BIT(FD); |
---|
| 639 | + |
---|
| 640 | + if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) { |
---|
| 641 | + ctrl &= ~MACB_BIT(PAE); |
---|
| 642 | + if (macb_is_gem(bp)) { |
---|
| 643 | + ctrl &= ~GEM_BIT(GBE); |
---|
| 644 | + |
---|
| 645 | + if (speed == SPEED_1000) |
---|
| 646 | + ctrl |= GEM_BIT(GBE); |
---|
| 647 | + } |
---|
| 648 | + |
---|
| 649 | + if (rx_pause) |
---|
| 650 | + ctrl |= MACB_BIT(PAE); |
---|
| 651 | + |
---|
| 652 | + macb_set_tx_clk(bp->tx_clk, speed, ndev); |
---|
| 653 | + |
---|
| 654 | + /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down |
---|
| 655 | + * cleared the pipeline and control registers. |
---|
| 656 | + */ |
---|
| 657 | + bp->macbgem_ops.mog_init_rings(bp); |
---|
| 658 | + macb_init_buffers(bp); |
---|
| 659 | + |
---|
| 660 | + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) |
---|
| 661 | + queue_writel(queue, IER, |
---|
| 662 | + bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); |
---|
451 | 663 | } |
---|
| 664 | + |
---|
| 665 | + macb_or_gem_writel(bp, NCFGR, ctrl); |
---|
452 | 666 | |
---|
453 | 667 | spin_unlock_irqrestore(&bp->lock, flags); |
---|
454 | 668 | |
---|
455 | | - if (status_change) { |
---|
456 | | - if (phydev->link) { |
---|
457 | | - /* Update the TX clock rate if and only if the link is |
---|
458 | | - * up and there has been a link change. |
---|
459 | | - */ |
---|
460 | | - macb_set_tx_clk(bp->tx_clk, phydev->speed, dev); |
---|
| 669 | + /* Enable Rx and Tx */ |
---|
| 670 | + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE)); |
---|
461 | 671 | |
---|
462 | | - netif_carrier_on(dev); |
---|
463 | | - netdev_info(dev, "link up (%d/%s)\n", |
---|
464 | | - phydev->speed, |
---|
465 | | - phydev->duplex == DUPLEX_FULL ? |
---|
466 | | - "Full" : "Half"); |
---|
467 | | - } else { |
---|
468 | | - netif_carrier_off(dev); |
---|
469 | | - netdev_info(dev, "link down\n"); |
---|
470 | | - } |
---|
471 | | - } |
---|
| 672 | + netif_tx_wake_all_queues(ndev); |
---|
472 | 673 | } |
---|
473 | 674 | |
---|
474 | | -/* based on au1000_eth. c*/ |
---|
475 | | -static int macb_mii_probe(struct net_device *dev) |
---|
| 675 | +static const struct phylink_mac_ops macb_phylink_ops = { |
---|
| 676 | + .validate = macb_validate, |
---|
| 677 | + .mac_pcs_get_state = macb_mac_pcs_get_state, |
---|
| 678 | + .mac_an_restart = macb_mac_an_restart, |
---|
| 679 | + .mac_config = macb_mac_config, |
---|
| 680 | + .mac_link_down = macb_mac_link_down, |
---|
| 681 | + .mac_link_up = macb_mac_link_up, |
---|
| 682 | +}; |
---|
| 683 | + |
---|
| 684 | +static bool macb_phy_handle_exists(struct device_node *dn) |
---|
476 | 685 | { |
---|
477 | | - struct macb *bp = netdev_priv(dev); |
---|
478 | | - struct macb_platform_data *pdata; |
---|
| 686 | + dn = of_parse_phandle(dn, "phy-handle", 0); |
---|
| 687 | + of_node_put(dn); |
---|
| 688 | + return dn != NULL; |
---|
| 689 | +} |
---|
| 690 | + |
---|
| 691 | +static int macb_phylink_connect(struct macb *bp) |
---|
| 692 | +{ |
---|
| 693 | + struct device_node *dn = bp->pdev->dev.of_node; |
---|
| 694 | + struct net_device *dev = bp->dev; |
---|
479 | 695 | struct phy_device *phydev; |
---|
480 | | - struct device_node *np; |
---|
481 | | - int phy_irq, ret, i; |
---|
| 696 | + int ret; |
---|
482 | 697 | |
---|
483 | | - pdata = dev_get_platdata(&bp->pdev->dev); |
---|
484 | | - np = bp->pdev->dev.of_node; |
---|
485 | | - ret = 0; |
---|
| 698 | + if (dn) |
---|
| 699 | + ret = phylink_of_phy_connect(bp->phylink, dn, 0); |
---|
486 | 700 | |
---|
487 | | - if (np) { |
---|
488 | | - if (of_phy_is_fixed_link(np)) { |
---|
489 | | - bp->phy_node = of_node_get(np); |
---|
490 | | - } else { |
---|
491 | | - bp->phy_node = of_parse_phandle(np, "phy-handle", 0); |
---|
492 | | - /* fallback to standard phy registration if no |
---|
493 | | - * phy-handle was found nor any phy found during |
---|
494 | | - * dt phy registration |
---|
495 | | - */ |
---|
496 | | - if (!bp->phy_node && !phy_find_first(bp->mii_bus)) { |
---|
497 | | - for (i = 0; i < PHY_MAX_ADDR; i++) { |
---|
498 | | - struct phy_device *phydev; |
---|
499 | | - |
---|
500 | | - phydev = mdiobus_scan(bp->mii_bus, i); |
---|
501 | | - if (IS_ERR(phydev) && |
---|
502 | | - PTR_ERR(phydev) != -ENODEV) { |
---|
503 | | - ret = PTR_ERR(phydev); |
---|
504 | | - break; |
---|
505 | | - } |
---|
506 | | - } |
---|
507 | | - |
---|
508 | | - if (ret) |
---|
509 | | - return -ENODEV; |
---|
510 | | - } |
---|
511 | | - } |
---|
512 | | - } |
---|
513 | | - |
---|
514 | | - if (bp->phy_node) { |
---|
515 | | - phydev = of_phy_connect(dev, bp->phy_node, |
---|
516 | | - &macb_handle_link_change, 0, |
---|
517 | | - bp->phy_interface); |
---|
518 | | - if (!phydev) |
---|
519 | | - return -ENODEV; |
---|
520 | | - } else { |
---|
| 701 | + if (!dn || (ret && !macb_phy_handle_exists(dn))) { |
---|
521 | 702 | phydev = phy_find_first(bp->mii_bus); |
---|
522 | 703 | if (!phydev) { |
---|
523 | 704 | netdev_err(dev, "no PHY found\n"); |
---|
524 | 705 | return -ENXIO; |
---|
525 | 706 | } |
---|
526 | 707 | |
---|
527 | | - if (pdata) { |
---|
528 | | - if (gpio_is_valid(pdata->phy_irq_pin)) { |
---|
529 | | - ret = devm_gpio_request(&bp->pdev->dev, |
---|
530 | | - pdata->phy_irq_pin, "phy int"); |
---|
531 | | - if (!ret) { |
---|
532 | | - phy_irq = gpio_to_irq(pdata->phy_irq_pin); |
---|
533 | | - phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq; |
---|
534 | | - } |
---|
535 | | - } else { |
---|
536 | | - phydev->irq = PHY_POLL; |
---|
537 | | - } |
---|
538 | | - } |
---|
539 | | - |
---|
540 | 708 | /* attach the mac to the phy */ |
---|
541 | | - ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, |
---|
542 | | - bp->phy_interface); |
---|
543 | | - if (ret) { |
---|
544 | | - netdev_err(dev, "Could not attach to PHY\n"); |
---|
545 | | - return ret; |
---|
546 | | - } |
---|
| 709 | + ret = phylink_connect_phy(bp->phylink, phydev); |
---|
547 | 710 | } |
---|
548 | 711 | |
---|
549 | | - /* mask with MAC supported features */ |
---|
550 | | - if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) |
---|
551 | | - phydev->supported &= PHY_GBIT_FEATURES; |
---|
552 | | - else |
---|
553 | | - phydev->supported &= PHY_BASIC_FEATURES; |
---|
| 712 | + if (ret) { |
---|
| 713 | + netdev_err(dev, "Could not attach PHY (%d)\n", ret); |
---|
| 714 | + return ret; |
---|
| 715 | + } |
---|
554 | 716 | |
---|
555 | | - if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF) |
---|
556 | | - phydev->supported &= ~SUPPORTED_1000baseT_Half; |
---|
557 | | - |
---|
558 | | - phydev->advertising = phydev->supported; |
---|
559 | | - |
---|
560 | | - bp->link = 0; |
---|
561 | | - bp->speed = 0; |
---|
562 | | - bp->duplex = -1; |
---|
| 717 | + phylink_start(bp->phylink); |
---|
563 | 718 | |
---|
564 | 719 | return 0; |
---|
565 | 720 | } |
---|
566 | 721 | |
---|
| 722 | +/* based on au1000_eth. c*/ |
---|
| 723 | +static int macb_mii_probe(struct net_device *dev) |
---|
| 724 | +{ |
---|
| 725 | + struct macb *bp = netdev_priv(dev); |
---|
| 726 | + |
---|
| 727 | + bp->phylink_config.dev = &dev->dev; |
---|
| 728 | + bp->phylink_config.type = PHYLINK_NETDEV; |
---|
| 729 | + |
---|
| 730 | + bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode, |
---|
| 731 | + bp->phy_interface, &macb_phylink_ops); |
---|
| 732 | + if (IS_ERR(bp->phylink)) { |
---|
| 733 | + netdev_err(dev, "Could not create a phylink instance (%ld)\n", |
---|
| 734 | + PTR_ERR(bp->phylink)); |
---|
| 735 | + return PTR_ERR(bp->phylink); |
---|
| 736 | + } |
---|
| 737 | + |
---|
| 738 | + return 0; |
---|
| 739 | +} |
---|
| 740 | + |
---|
| 741 | +static int macb_mdiobus_register(struct macb *bp) |
---|
| 742 | +{ |
---|
| 743 | + struct device_node *child, *np = bp->pdev->dev.of_node; |
---|
| 744 | + |
---|
| 745 | + if (of_phy_is_fixed_link(np)) |
---|
| 746 | + return mdiobus_register(bp->mii_bus); |
---|
| 747 | + |
---|
| 748 | + /* Only create the PHY from the device tree if at least one PHY is |
---|
| 749 | + * described. Otherwise scan the entire MDIO bus. We do this to support |
---|
| 750 | + * old device tree that did not follow the best practices and did not |
---|
| 751 | + * describe their network PHYs. |
---|
| 752 | + */ |
---|
| 753 | + for_each_available_child_of_node(np, child) |
---|
| 754 | + if (of_mdiobus_child_is_phy(child)) { |
---|
| 755 | + /* The loop increments the child refcount, |
---|
| 756 | + * decrement it before returning. |
---|
| 757 | + */ |
---|
| 758 | + of_node_put(child); |
---|
| 759 | + |
---|
| 760 | + return of_mdiobus_register(bp->mii_bus, np); |
---|
| 761 | + } |
---|
| 762 | + |
---|
| 763 | + return mdiobus_register(bp->mii_bus); |
---|
| 764 | +} |
---|
| 765 | + |
---|
567 | 766 | static int macb_mii_init(struct macb *bp) |
---|
568 | 767 | { |
---|
569 | | - struct macb_platform_data *pdata; |
---|
570 | | - struct device_node *np; |
---|
571 | 768 | int err = -ENXIO; |
---|
572 | 769 | |
---|
573 | 770 | /* Enable management port */ |
---|
.. | .. |
---|
586 | 783 | bp->pdev->name, bp->pdev->id); |
---|
587 | 784 | bp->mii_bus->priv = bp; |
---|
588 | 785 | bp->mii_bus->parent = &bp->pdev->dev; |
---|
589 | | - pdata = dev_get_platdata(&bp->pdev->dev); |
---|
590 | 786 | |
---|
591 | 787 | dev_set_drvdata(&bp->dev->dev, bp->mii_bus); |
---|
592 | 788 | |
---|
593 | | - np = bp->pdev->dev.of_node; |
---|
594 | | - if (np && of_phy_is_fixed_link(np)) { |
---|
595 | | - if (of_phy_register_fixed_link(np) < 0) { |
---|
596 | | - dev_err(&bp->pdev->dev, |
---|
597 | | - "broken fixed-link specification %pOF\n", np); |
---|
598 | | - goto err_out_free_mdiobus; |
---|
599 | | - } |
---|
600 | | - |
---|
601 | | - err = mdiobus_register(bp->mii_bus); |
---|
602 | | - } else { |
---|
603 | | - if (pdata) |
---|
604 | | - bp->mii_bus->phy_mask = pdata->phy_mask; |
---|
605 | | - |
---|
606 | | - err = of_mdiobus_register(bp->mii_bus, np); |
---|
607 | | - } |
---|
608 | | - |
---|
| 789 | + err = macb_mdiobus_register(bp); |
---|
609 | 790 | if (err) |
---|
610 | | - goto err_out_free_fixed_link; |
---|
| 791 | + goto err_out_free_mdiobus; |
---|
611 | 792 | |
---|
612 | 793 | err = macb_mii_probe(bp->dev); |
---|
613 | 794 | if (err) |
---|
.. | .. |
---|
617 | 798 | |
---|
618 | 799 | err_out_unregister_bus: |
---|
619 | 800 | mdiobus_unregister(bp->mii_bus); |
---|
620 | | -err_out_free_fixed_link: |
---|
621 | | - if (np && of_phy_is_fixed_link(np)) |
---|
622 | | - of_phy_deregister_fixed_link(np); |
---|
623 | 801 | err_out_free_mdiobus: |
---|
624 | | - of_node_put(bp->phy_node); |
---|
625 | 802 | mdiobus_free(bp->mii_bus); |
---|
626 | 803 | err_out: |
---|
627 | 804 | return err; |
---|
.. | .. |
---|
707 | 884 | } |
---|
708 | 885 | #endif |
---|
709 | 886 | addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); |
---|
| 887 | +#ifdef CONFIG_MACB_USE_HWSTAMP |
---|
| 888 | + if (bp->hw_dma_cap & HW_DMA_CAP_PTP) |
---|
| 889 | + addr &= ~GEM_BIT(DMA_RXVALID); |
---|
| 890 | +#endif |
---|
710 | 891 | return addr; |
---|
711 | 892 | } |
---|
712 | 893 | |
---|
.. | .. |
---|
915 | 1096 | /* Make hw descriptor updates visible to CPU */ |
---|
916 | 1097 | rmb(); |
---|
917 | 1098 | |
---|
918 | | - queue->rx_prepared_head++; |
---|
919 | 1099 | desc = macb_rx_desc(queue, entry); |
---|
920 | 1100 | |
---|
921 | 1101 | if (!queue->rx_skbuff[entry]) { |
---|
.. | .. |
---|
954 | 1134 | dma_wmb(); |
---|
955 | 1135 | desc->addr &= ~MACB_BIT(RX_USED); |
---|
956 | 1136 | } |
---|
| 1137 | + queue->rx_prepared_head++; |
---|
957 | 1138 | } |
---|
958 | 1139 | |
---|
959 | 1140 | /* Make descriptor updates visible to hardware */ |
---|
.. | .. |
---|
984 | 1165 | */ |
---|
985 | 1166 | } |
---|
986 | 1167 | |
---|
987 | | -static int gem_rx(struct macb_queue *queue, int budget) |
---|
| 1168 | +static int gem_rx(struct macb_queue *queue, struct napi_struct *napi, |
---|
| 1169 | + int budget) |
---|
988 | 1170 | { |
---|
989 | 1171 | struct macb *bp = queue->bp; |
---|
990 | 1172 | unsigned int len; |
---|
.. | .. |
---|
1066 | 1248 | skb->data, 32, true); |
---|
1067 | 1249 | #endif |
---|
1068 | 1250 | |
---|
1069 | | - netif_receive_skb(skb); |
---|
| 1251 | + napi_gro_receive(napi, skb); |
---|
1070 | 1252 | } |
---|
1071 | 1253 | |
---|
1072 | 1254 | gem_rx_refill(queue); |
---|
.. | .. |
---|
1074 | 1256 | return count; |
---|
1075 | 1257 | } |
---|
1076 | 1258 | |
---|
1077 | | -static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag, |
---|
1078 | | - unsigned int last_frag) |
---|
| 1259 | +static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi, |
---|
| 1260 | + unsigned int first_frag, unsigned int last_frag) |
---|
1079 | 1261 | { |
---|
1080 | 1262 | unsigned int len; |
---|
1081 | 1263 | unsigned int frag; |
---|
.. | .. |
---|
1151 | 1333 | bp->dev->stats.rx_bytes += skb->len; |
---|
1152 | 1334 | netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", |
---|
1153 | 1335 | skb->len, skb->csum); |
---|
1154 | | - netif_receive_skb(skb); |
---|
| 1336 | + napi_gro_receive(napi, skb); |
---|
1155 | 1337 | |
---|
1156 | 1338 | return 0; |
---|
1157 | 1339 | } |
---|
.. | .. |
---|
1174 | 1356 | queue->rx_tail = 0; |
---|
1175 | 1357 | } |
---|
1176 | 1358 | |
---|
1177 | | -static int macb_rx(struct macb_queue *queue, int budget) |
---|
| 1359 | +static int macb_rx(struct macb_queue *queue, struct napi_struct *napi, |
---|
| 1360 | + int budget) |
---|
1178 | 1361 | { |
---|
1179 | 1362 | struct macb *bp = queue->bp; |
---|
1180 | 1363 | bool reset_rx_queue = false; |
---|
.. | .. |
---|
1211 | 1394 | continue; |
---|
1212 | 1395 | } |
---|
1213 | 1396 | |
---|
1214 | | - dropped = macb_rx_frame(queue, first_frag, tail); |
---|
| 1397 | + dropped = macb_rx_frame(queue, napi, first_frag, tail); |
---|
1215 | 1398 | first_frag = -1; |
---|
1216 | 1399 | if (unlikely(dropped < 0)) { |
---|
1217 | 1400 | reset_rx_queue = true; |
---|
.. | .. |
---|
1265 | 1448 | netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", |
---|
1266 | 1449 | (unsigned long)status, budget); |
---|
1267 | 1450 | |
---|
1268 | | - work_done = bp->macbgem_ops.mog_rx(queue, budget); |
---|
| 1451 | + work_done = bp->macbgem_ops.mog_rx(queue, napi, budget); |
---|
1269 | 1452 | if (work_done < budget) { |
---|
1270 | 1453 | napi_complete_done(napi, work_done); |
---|
1271 | 1454 | |
---|
1272 | | - /* Packets received while interrupts were disabled */ |
---|
| 1455 | + /* RSR bits only seem to propagate to raise interrupts when |
---|
| 1456 | + * interrupts are enabled at the time, so if bits are already |
---|
| 1457 | + * set due to packets received while interrupts were disabled, |
---|
| 1458 | + * they will not cause another interrupt to be generated when |
---|
| 1459 | + * interrupts are re-enabled. |
---|
| 1460 | + * Check for this case here. This has been seen to happen |
---|
| 1461 | + * around 30% of the time under heavy network load. |
---|
| 1462 | + */ |
---|
1273 | 1463 | status = macb_readl(bp, RSR); |
---|
1274 | 1464 | if (status) { |
---|
1275 | 1465 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
---|
.. | .. |
---|
1277 | 1467 | napi_reschedule(napi); |
---|
1278 | 1468 | } else { |
---|
1279 | 1469 | queue_writel(queue, IER, bp->rx_intr_mask); |
---|
| 1470 | + |
---|
| 1471 | + /* In rare cases, packets could have been received in |
---|
| 1472 | + * the window between the check above and re-enabling |
---|
| 1473 | + * interrupts. Therefore, a double-check is required |
---|
| 1474 | + * to avoid losing a wakeup. This can potentially race |
---|
| 1475 | + * with the interrupt handler doing the same actions |
---|
| 1476 | + * if an interrupt is raised just after enabling them, |
---|
| 1477 | + * but this should be harmless. |
---|
| 1478 | + */ |
---|
| 1479 | + status = macb_readl(bp, RSR); |
---|
| 1480 | + if (unlikely(status)) { |
---|
| 1481 | + queue_writel(queue, IDR, bp->rx_intr_mask); |
---|
| 1482 | + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
---|
| 1483 | + queue_writel(queue, ISR, MACB_BIT(RCOMP)); |
---|
| 1484 | + napi_schedule(napi); |
---|
| 1485 | + } |
---|
1280 | 1486 | } |
---|
1281 | 1487 | } |
---|
1282 | 1488 | |
---|
.. | .. |
---|
1285 | 1491 | return work_done; |
---|
1286 | 1492 | } |
---|
1287 | 1493 | |
---|
1288 | | -static void macb_hresp_error_task(unsigned long data) |
---|
| 1494 | +static void macb_hresp_error_task(struct tasklet_struct *t) |
---|
1289 | 1495 | { |
---|
1290 | | - struct macb *bp = (struct macb *)data; |
---|
| 1496 | + struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet); |
---|
1291 | 1497 | struct net_device *dev = bp->dev; |
---|
1292 | | - struct macb_queue *queue = bp->queues; |
---|
| 1498 | + struct macb_queue *queue; |
---|
1293 | 1499 | unsigned int q; |
---|
1294 | 1500 | u32 ctrl; |
---|
1295 | 1501 | |
---|
.. | .. |
---|
1308 | 1514 | bp->macbgem_ops.mog_init_rings(bp); |
---|
1309 | 1515 | |
---|
1310 | 1516 | /* Initialize TX and RX buffers */ |
---|
1311 | | - for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
---|
1312 | | - queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); |
---|
1313 | | -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
---|
1314 | | - if (bp->hw_dma_cap & HW_DMA_CAP_64B) |
---|
1315 | | - queue_writel(queue, RBQPH, |
---|
1316 | | - upper_32_bits(queue->rx_ring_dma)); |
---|
1317 | | -#endif |
---|
1318 | | - queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); |
---|
1319 | | -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
---|
1320 | | - if (bp->hw_dma_cap & HW_DMA_CAP_64B) |
---|
1321 | | - queue_writel(queue, TBQPH, |
---|
1322 | | - upper_32_bits(queue->tx_ring_dma)); |
---|
1323 | | -#endif |
---|
| 1517 | + macb_init_buffers(bp); |
---|
1324 | 1518 | |
---|
1325 | | - /* Enable interrupts */ |
---|
| 1519 | + /* Enable interrupts */ |
---|
| 1520 | + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) |
---|
1326 | 1521 | queue_writel(queue, IER, |
---|
1327 | 1522 | bp->rx_intr_mask | |
---|
1328 | 1523 | MACB_TX_INT_FLAGS | |
---|
1329 | 1524 | MACB_BIT(HRESP)); |
---|
1330 | | - } |
---|
1331 | 1525 | |
---|
1332 | 1526 | ctrl |= MACB_BIT(RE) | MACB_BIT(TE); |
---|
1333 | 1527 | macb_writel(bp, NCR, ctrl); |
---|
.. | .. |
---|
1341 | 1535 | unsigned int head = queue->tx_head; |
---|
1342 | 1536 | unsigned int tail = queue->tx_tail; |
---|
1343 | 1537 | struct macb *bp = queue->bp; |
---|
| 1538 | + unsigned int head_idx, tbqp; |
---|
1344 | 1539 | |
---|
1345 | 1540 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
---|
1346 | 1541 | queue_writel(queue, ISR, MACB_BIT(TXUBR)); |
---|
.. | .. |
---|
1348 | 1543 | if (head == tail) |
---|
1349 | 1544 | return; |
---|
1350 | 1545 | |
---|
| 1546 | + tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp); |
---|
| 1547 | + tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp)); |
---|
| 1548 | + head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head)); |
---|
| 1549 | + |
---|
| 1550 | + if (tbqp == head_idx) |
---|
| 1551 | + return; |
---|
| 1552 | + |
---|
1351 | 1553 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); |
---|
| 1554 | +} |
---|
| 1555 | + |
---|
| 1556 | +static irqreturn_t macb_wol_interrupt(int irq, void *dev_id) |
---|
| 1557 | +{ |
---|
| 1558 | + struct macb_queue *queue = dev_id; |
---|
| 1559 | + struct macb *bp = queue->bp; |
---|
| 1560 | + u32 status; |
---|
| 1561 | + |
---|
| 1562 | + status = queue_readl(queue, ISR); |
---|
| 1563 | + |
---|
| 1564 | + if (unlikely(!status)) |
---|
| 1565 | + return IRQ_NONE; |
---|
| 1566 | + |
---|
| 1567 | + spin_lock(&bp->lock); |
---|
| 1568 | + |
---|
| 1569 | + if (status & MACB_BIT(WOL)) { |
---|
| 1570 | + queue_writel(queue, IDR, MACB_BIT(WOL)); |
---|
| 1571 | + macb_writel(bp, WOL, 0); |
---|
| 1572 | + netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n", |
---|
| 1573 | + (unsigned int)(queue - bp->queues), |
---|
| 1574 | + (unsigned long)status); |
---|
| 1575 | + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
---|
| 1576 | + queue_writel(queue, ISR, MACB_BIT(WOL)); |
---|
| 1577 | + pm_wakeup_event(&bp->pdev->dev, 0); |
---|
| 1578 | + } |
---|
| 1579 | + |
---|
| 1580 | + spin_unlock(&bp->lock); |
---|
| 1581 | + |
---|
| 1582 | + return IRQ_HANDLED; |
---|
| 1583 | +} |
---|
| 1584 | + |
---|
| 1585 | +static irqreturn_t gem_wol_interrupt(int irq, void *dev_id) |
---|
| 1586 | +{ |
---|
| 1587 | + struct macb_queue *queue = dev_id; |
---|
| 1588 | + struct macb *bp = queue->bp; |
---|
| 1589 | + u32 status; |
---|
| 1590 | + |
---|
| 1591 | + status = queue_readl(queue, ISR); |
---|
| 1592 | + |
---|
| 1593 | + if (unlikely(!status)) |
---|
| 1594 | + return IRQ_NONE; |
---|
| 1595 | + |
---|
| 1596 | + spin_lock(&bp->lock); |
---|
| 1597 | + |
---|
| 1598 | + if (status & GEM_BIT(WOL)) { |
---|
| 1599 | + queue_writel(queue, IDR, GEM_BIT(WOL)); |
---|
| 1600 | + gem_writel(bp, WOL, 0); |
---|
| 1601 | + netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n", |
---|
| 1602 | + (unsigned int)(queue - bp->queues), |
---|
| 1603 | + (unsigned long)status); |
---|
| 1604 | + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
---|
| 1605 | + queue_writel(queue, ISR, GEM_BIT(WOL)); |
---|
| 1606 | + pm_wakeup_event(&bp->pdev->dev, 0); |
---|
| 1607 | + } |
---|
| 1608 | + |
---|
| 1609 | + spin_unlock(&bp->lock); |
---|
| 1610 | + |
---|
| 1611 | + return IRQ_HANDLED; |
---|
1352 | 1612 | } |
---|
1353 | 1613 | |
---|
1354 | 1614 | static irqreturn_t macb_interrupt(int irq, void *dev_id) |
---|
.. | .. |
---|
1707 | 1967 | bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) || |
---|
1708 | 1968 | skb_is_nonlinear(*skb); |
---|
1709 | 1969 | int padlen = ETH_ZLEN - (*skb)->len; |
---|
1710 | | - int headroom = skb_headroom(*skb); |
---|
1711 | 1970 | int tailroom = skb_tailroom(*skb); |
---|
1712 | 1971 | struct sk_buff *nskb; |
---|
1713 | 1972 | u32 fcs; |
---|
.. | .. |
---|
1721 | 1980 | /* FCS could be appeded to tailroom. */ |
---|
1722 | 1981 | if (tailroom >= ETH_FCS_LEN) |
---|
1723 | 1982 | goto add_fcs; |
---|
1724 | | - /* FCS could be appeded by moving data to headroom. */ |
---|
1725 | | - else if (!cloned && headroom + tailroom >= ETH_FCS_LEN) |
---|
1726 | | - padlen = 0; |
---|
1727 | 1983 | /* No room for FCS, need to reallocate skb. */ |
---|
1728 | 1984 | else |
---|
1729 | 1985 | padlen = ETH_FCS_LEN; |
---|
.. | .. |
---|
1732 | 1988 | padlen += ETH_FCS_LEN; |
---|
1733 | 1989 | } |
---|
1734 | 1990 | |
---|
1735 | | - if (!cloned && headroom + tailroom >= padlen) { |
---|
1736 | | - (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len); |
---|
1737 | | - skb_set_tail_pointer(*skb, (*skb)->len); |
---|
1738 | | - } else { |
---|
| 1991 | + if (cloned || tailroom < padlen) { |
---|
1739 | 1992 | nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC); |
---|
1740 | 1993 | if (!nskb) |
---|
1741 | 1994 | return -ENOMEM; |
---|
1742 | 1995 | |
---|
1743 | | - dev_kfree_skb_any(*skb); |
---|
| 1996 | + dev_consume_skb_any(*skb); |
---|
1744 | 1997 | *skb = nskb; |
---|
1745 | 1998 | } |
---|
1746 | 1999 | |
---|
1747 | | - if (padlen) { |
---|
1748 | | - if (padlen >= ETH_FCS_LEN) |
---|
1749 | | - skb_put_zero(*skb, padlen - ETH_FCS_LEN); |
---|
1750 | | - else |
---|
1751 | | - skb_trim(*skb, ETH_FCS_LEN - padlen); |
---|
1752 | | - } |
---|
| 2000 | + if (padlen > ETH_FCS_LEN) |
---|
| 2001 | + skb_put_zero(*skb, padlen - ETH_FCS_LEN); |
---|
1753 | 2002 | |
---|
1754 | 2003 | add_fcs: |
---|
1755 | 2004 | /* set FCS to packet */ |
---|
.. | .. |
---|
1772 | 2021 | unsigned long flags; |
---|
1773 | 2022 | unsigned int desc_cnt, nr_frags, frag_size, f; |
---|
1774 | 2023 | unsigned int hdrlen; |
---|
1775 | | - bool is_lso, is_udp = 0; |
---|
| 2024 | + bool is_lso; |
---|
1776 | 2025 | netdev_tx_t ret = NETDEV_TX_OK; |
---|
1777 | 2026 | |
---|
1778 | 2027 | if (macb_clear_csum(skb)) { |
---|
.. | .. |
---|
1788 | 2037 | is_lso = (skb_shinfo(skb)->gso_size != 0); |
---|
1789 | 2038 | |
---|
1790 | 2039 | if (is_lso) { |
---|
1791 | | - is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP); |
---|
1792 | | - |
---|
1793 | 2040 | /* length of headers */ |
---|
1794 | | - if (is_udp) |
---|
| 2041 | + if (ip_hdr(skb)->protocol == IPPROTO_UDP) |
---|
1795 | 2042 | /* only queue eth + ip headers separately for UDP */ |
---|
1796 | 2043 | hdrlen = skb_transport_offset(skb); |
---|
1797 | 2044 | else |
---|
.. | .. |
---|
2218 | 2465 | |
---|
2219 | 2466 | static void macb_init_hw(struct macb *bp) |
---|
2220 | 2467 | { |
---|
2221 | | - struct macb_queue *queue; |
---|
2222 | | - unsigned int q; |
---|
2223 | | - |
---|
2224 | 2468 | u32 config; |
---|
2225 | 2469 | |
---|
2226 | 2470 | macb_reset_hw(bp); |
---|
2227 | 2471 | macb_set_hwaddr(bp); |
---|
2228 | 2472 | |
---|
2229 | 2473 | config = macb_mdc_clk_div(bp); |
---|
2230 | | - if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) |
---|
2231 | | - config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); |
---|
2232 | 2474 | config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ |
---|
2233 | | - config |= MACB_BIT(PAE); /* PAuse Enable */ |
---|
2234 | 2475 | config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ |
---|
2235 | 2476 | if (bp->caps & MACB_CAPS_JUMBO) |
---|
2236 | 2477 | config |= MACB_BIT(JFRAME); /* Enable jumbo frames */ |
---|
.. | .. |
---|
2246 | 2487 | macb_writel(bp, NCFGR, config); |
---|
2247 | 2488 | if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) |
---|
2248 | 2489 | gem_writel(bp, JML, bp->jumbo_max_len); |
---|
2249 | | - bp->speed = SPEED_10; |
---|
2250 | | - bp->duplex = DUPLEX_HALF; |
---|
2251 | 2490 | bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; |
---|
2252 | 2491 | if (bp->caps & MACB_CAPS_JUMBO) |
---|
2253 | 2492 | bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; |
---|
2254 | 2493 | |
---|
2255 | 2494 | macb_configure_dma(bp); |
---|
2256 | | - |
---|
2257 | | - /* Initialize TX and RX buffers */ |
---|
2258 | | - for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
---|
2259 | | - queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); |
---|
2260 | | -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
---|
2261 | | - if (bp->hw_dma_cap & HW_DMA_CAP_64B) |
---|
2262 | | - queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma)); |
---|
2263 | | -#endif |
---|
2264 | | - queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); |
---|
2265 | | -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
---|
2266 | | - if (bp->hw_dma_cap & HW_DMA_CAP_64B) |
---|
2267 | | - queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); |
---|
2268 | | -#endif |
---|
2269 | | - |
---|
2270 | | - /* Enable interrupts */ |
---|
2271 | | - queue_writel(queue, IER, |
---|
2272 | | - bp->rx_intr_mask | |
---|
2273 | | - MACB_TX_INT_FLAGS | |
---|
2274 | | - MACB_BIT(HRESP)); |
---|
2275 | | - } |
---|
2276 | | - |
---|
2277 | | - /* Enable TX and RX */ |
---|
2278 | | - macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE)); |
---|
2279 | 2495 | } |
---|
2280 | 2496 | |
---|
2281 | 2497 | /* The hash address register is 64 bits long and takes up two |
---|
.. | .. |
---|
2399 | 2615 | |
---|
2400 | 2616 | static int macb_open(struct net_device *dev) |
---|
2401 | 2617 | { |
---|
2402 | | - struct macb *bp = netdev_priv(dev); |
---|
2403 | 2618 | size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; |
---|
| 2619 | + struct macb *bp = netdev_priv(dev); |
---|
2404 | 2620 | struct macb_queue *queue; |
---|
2405 | 2621 | unsigned int q; |
---|
2406 | 2622 | int err; |
---|
2407 | 2623 | |
---|
2408 | 2624 | netdev_dbg(bp->dev, "open\n"); |
---|
2409 | 2625 | |
---|
2410 | | - /* carrier starts down */ |
---|
2411 | | - netif_carrier_off(dev); |
---|
2412 | | - |
---|
2413 | | - /* if the phy is not yet register, retry later*/ |
---|
2414 | | - if (!dev->phydev) |
---|
2415 | | - return -EAGAIN; |
---|
| 2626 | + err = pm_runtime_get_sync(&bp->pdev->dev); |
---|
| 2627 | + if (err < 0) |
---|
| 2628 | + goto pm_exit; |
---|
2416 | 2629 | |
---|
2417 | 2630 | /* RX buffers initialization */ |
---|
2418 | 2631 | macb_init_rx_buffer_size(bp, bufsz); |
---|
.. | .. |
---|
2421 | 2634 | if (err) { |
---|
2422 | 2635 | netdev_err(dev, "Unable to allocate DMA memory (error %d)\n", |
---|
2423 | 2636 | err); |
---|
2424 | | - return err; |
---|
| 2637 | + goto pm_exit; |
---|
2425 | 2638 | } |
---|
2426 | 2639 | |
---|
2427 | 2640 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) |
---|
2428 | 2641 | napi_enable(&queue->napi); |
---|
2429 | 2642 | |
---|
2430 | | - bp->macbgem_ops.mog_init_rings(bp); |
---|
2431 | 2643 | macb_init_hw(bp); |
---|
2432 | 2644 | |
---|
2433 | | - /* schedule a link state check */ |
---|
2434 | | - phy_start(dev->phydev); |
---|
| 2645 | + err = macb_phylink_connect(bp); |
---|
| 2646 | + if (err) |
---|
| 2647 | + goto reset_hw; |
---|
2435 | 2648 | |
---|
2436 | 2649 | netif_tx_start_all_queues(dev); |
---|
2437 | 2650 | |
---|
.. | .. |
---|
2439 | 2652 | bp->ptp_info->ptp_init(dev); |
---|
2440 | 2653 | |
---|
2441 | 2654 | return 0; |
---|
| 2655 | + |
---|
| 2656 | +reset_hw: |
---|
| 2657 | + macb_reset_hw(bp); |
---|
| 2658 | + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) |
---|
| 2659 | + napi_disable(&queue->napi); |
---|
| 2660 | + macb_free_consistent(bp); |
---|
| 2661 | +pm_exit: |
---|
| 2662 | + pm_runtime_put_sync(&bp->pdev->dev); |
---|
| 2663 | + return err; |
---|
2442 | 2664 | } |
---|
2443 | 2665 | |
---|
2444 | 2666 | static int macb_close(struct net_device *dev) |
---|
.. | .. |
---|
2453 | 2675 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) |
---|
2454 | 2676 | napi_disable(&queue->napi); |
---|
2455 | 2677 | |
---|
2456 | | - if (dev->phydev) |
---|
2457 | | - phy_stop(dev->phydev); |
---|
| 2678 | + phylink_stop(bp->phylink); |
---|
| 2679 | + phylink_disconnect_phy(bp->phylink); |
---|
2458 | 2680 | |
---|
2459 | 2681 | spin_lock_irqsave(&bp->lock, flags); |
---|
2460 | 2682 | macb_reset_hw(bp); |
---|
.. | .. |
---|
2465 | 2687 | |
---|
2466 | 2688 | if (bp->ptp_info) |
---|
2467 | 2689 | bp->ptp_info->ptp_remove(dev); |
---|
| 2690 | + |
---|
| 2691 | + pm_runtime_put(&bp->pdev->dev); |
---|
2468 | 2692 | |
---|
2469 | 2693 | return 0; |
---|
2470 | 2694 | } |
---|
.. | .. |
---|
2686 | 2910 | { |
---|
2687 | 2911 | struct macb *bp = netdev_priv(netdev); |
---|
2688 | 2912 | |
---|
2689 | | - wol->supported = 0; |
---|
2690 | | - wol->wolopts = 0; |
---|
2691 | | - |
---|
2692 | 2913 | if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { |
---|
2693 | | - wol->supported = WAKE_MAGIC; |
---|
| 2914 | + phylink_ethtool_get_wol(bp->phylink, wol); |
---|
| 2915 | + wol->supported |= WAKE_MAGIC; |
---|
2694 | 2916 | |
---|
2695 | 2917 | if (bp->wol & MACB_WOL_ENABLED) |
---|
2696 | 2918 | wol->wolopts |= WAKE_MAGIC; |
---|
.. | .. |
---|
2700 | 2922 | static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) |
---|
2701 | 2923 | { |
---|
2702 | 2924 | struct macb *bp = netdev_priv(netdev); |
---|
| 2925 | + int ret; |
---|
| 2926 | + |
---|
| 2927 | + /* Pass the order to phylink layer */ |
---|
| 2928 | + ret = phylink_ethtool_set_wol(bp->phylink, wol); |
---|
| 2929 | + /* Don't manage WoL on MAC if handled by the PHY |
---|
| 2930 | + * or if there's a failure in talking to the PHY |
---|
| 2931 | + */ |
---|
| 2932 | + if (!ret || ret != -EOPNOTSUPP) |
---|
| 2933 | + return ret; |
---|
2703 | 2934 | |
---|
2704 | 2935 | if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || |
---|
2705 | 2936 | (wol->wolopts & ~WAKE_MAGIC)) |
---|
.. | .. |
---|
2713 | 2944 | device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED); |
---|
2714 | 2945 | |
---|
2715 | 2946 | return 0; |
---|
| 2947 | +} |
---|
| 2948 | + |
---|
| 2949 | +static int macb_get_link_ksettings(struct net_device *netdev, |
---|
| 2950 | + struct ethtool_link_ksettings *kset) |
---|
| 2951 | +{ |
---|
| 2952 | + struct macb *bp = netdev_priv(netdev); |
---|
| 2953 | + |
---|
| 2954 | + return phylink_ethtool_ksettings_get(bp->phylink, kset); |
---|
| 2955 | +} |
---|
| 2956 | + |
---|
| 2957 | +static int macb_set_link_ksettings(struct net_device *netdev, |
---|
| 2958 | + const struct ethtool_link_ksettings *kset) |
---|
| 2959 | +{ |
---|
| 2960 | + struct macb *bp = netdev_priv(netdev); |
---|
| 2961 | + |
---|
| 2962 | + return phylink_ethtool_ksettings_set(bp->phylink, kset); |
---|
2716 | 2963 | } |
---|
2717 | 2964 | |
---|
2718 | 2965 | static void macb_get_ringparam(struct net_device *netdev, |
---|
.. | .. |
---|
2842 | 3089 | |
---|
2843 | 3090 | static void gem_enable_flow_filters(struct macb *bp, bool enable) |
---|
2844 | 3091 | { |
---|
| 3092 | + struct net_device *netdev = bp->dev; |
---|
2845 | 3093 | struct ethtool_rx_fs_item *item; |
---|
2846 | 3094 | u32 t2_scr; |
---|
2847 | 3095 | int num_t2_scr; |
---|
| 3096 | + |
---|
| 3097 | + if (!(netdev->features & NETIF_F_NTUPLE)) |
---|
| 3098 | + return; |
---|
2848 | 3099 | |
---|
2849 | 3100 | num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8)); |
---|
2850 | 3101 | |
---|
.. | .. |
---|
2890 | 3141 | bool cmp_a = false; |
---|
2891 | 3142 | bool cmp_b = false; |
---|
2892 | 3143 | bool cmp_c = false; |
---|
| 3144 | + |
---|
| 3145 | + if (!macb_is_gem(bp)) |
---|
| 3146 | + return; |
---|
2893 | 3147 | |
---|
2894 | 3148 | tp4sp_v = &(fs->h_u.tcp_ip4_spec); |
---|
2895 | 3149 | tp4sp_m = &(fs->m_u.tcp_ip4_spec); |
---|
.. | .. |
---|
3005 | 3259 | gem_prog_cmp_regs(bp, fs); |
---|
3006 | 3260 | bp->rx_fs_list.count++; |
---|
3007 | 3261 | /* enable filtering if NTUPLE on */ |
---|
3008 | | - if (netdev->features & NETIF_F_NTUPLE) |
---|
3009 | | - gem_enable_flow_filters(bp, 1); |
---|
| 3262 | + gem_enable_flow_filters(bp, 1); |
---|
3010 | 3263 | |
---|
3011 | 3264 | spin_unlock_irqrestore(&bp->rx_fs_lock, flags); |
---|
3012 | 3265 | return 0; |
---|
.. | .. |
---|
3148 | 3401 | .get_ts_info = ethtool_op_get_ts_info, |
---|
3149 | 3402 | .get_wol = macb_get_wol, |
---|
3150 | 3403 | .set_wol = macb_set_wol, |
---|
3151 | | - .get_link_ksettings = phy_ethtool_get_link_ksettings, |
---|
3152 | | - .set_link_ksettings = phy_ethtool_set_link_ksettings, |
---|
| 3404 | + .get_link_ksettings = macb_get_link_ksettings, |
---|
| 3405 | + .set_link_ksettings = macb_set_link_ksettings, |
---|
3153 | 3406 | .get_ringparam = macb_get_ringparam, |
---|
3154 | 3407 | .set_ringparam = macb_set_ringparam, |
---|
3155 | 3408 | }; |
---|
.. | .. |
---|
3157 | 3410 | static const struct ethtool_ops gem_ethtool_ops = { |
---|
3158 | 3411 | .get_regs_len = macb_get_regs_len, |
---|
3159 | 3412 | .get_regs = macb_get_regs, |
---|
| 3413 | + .get_wol = macb_get_wol, |
---|
| 3414 | + .set_wol = macb_set_wol, |
---|
3160 | 3415 | .get_link = ethtool_op_get_link, |
---|
3161 | 3416 | .get_ts_info = macb_get_ts_info, |
---|
3162 | 3417 | .get_ethtool_stats = gem_get_ethtool_stats, |
---|
3163 | 3418 | .get_strings = gem_get_ethtool_strings, |
---|
3164 | 3419 | .get_sset_count = gem_get_sset_count, |
---|
3165 | | - .get_link_ksettings = phy_ethtool_get_link_ksettings, |
---|
3166 | | - .set_link_ksettings = phy_ethtool_set_link_ksettings, |
---|
| 3420 | + .get_link_ksettings = macb_get_link_ksettings, |
---|
| 3421 | + .set_link_ksettings = macb_set_link_ksettings, |
---|
3167 | 3422 | .get_ringparam = macb_get_ringparam, |
---|
3168 | 3423 | .set_ringparam = macb_set_ringparam, |
---|
3169 | 3424 | .get_rxnfc = gem_get_rxnfc, |
---|
.. | .. |
---|
3172 | 3427 | |
---|
3173 | 3428 | static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
---|
3174 | 3429 | { |
---|
3175 | | - struct phy_device *phydev = dev->phydev; |
---|
3176 | 3430 | struct macb *bp = netdev_priv(dev); |
---|
3177 | 3431 | |
---|
3178 | 3432 | if (!netif_running(dev)) |
---|
3179 | 3433 | return -EINVAL; |
---|
3180 | 3434 | |
---|
3181 | | - if (!phydev) |
---|
3182 | | - return -ENODEV; |
---|
3183 | | - |
---|
3184 | | - if (!bp->ptp_info) |
---|
3185 | | - return phy_mii_ioctl(phydev, rq, cmd); |
---|
3186 | | - |
---|
3187 | | - switch (cmd) { |
---|
3188 | | - case SIOCSHWTSTAMP: |
---|
3189 | | - return bp->ptp_info->set_hwtst(dev, rq, cmd); |
---|
3190 | | - case SIOCGHWTSTAMP: |
---|
3191 | | - return bp->ptp_info->get_hwtst(dev, rq); |
---|
3192 | | - default: |
---|
3193 | | - return phy_mii_ioctl(phydev, rq, cmd); |
---|
| 3435 | + if (bp->ptp_info) { |
---|
| 3436 | + switch (cmd) { |
---|
| 3437 | + case SIOCSHWTSTAMP: |
---|
| 3438 | + return bp->ptp_info->set_hwtst(dev, rq, cmd); |
---|
| 3439 | + case SIOCGHWTSTAMP: |
---|
| 3440 | + return bp->ptp_info->get_hwtst(dev, rq); |
---|
| 3441 | + } |
---|
3194 | 3442 | } |
---|
| 3443 | + |
---|
| 3444 | + return phylink_mii_ioctl(bp->phylink, rq, cmd); |
---|
| 3445 | +} |
---|
| 3446 | + |
---|
| 3447 | +static inline void macb_set_txcsum_feature(struct macb *bp, |
---|
| 3448 | + netdev_features_t features) |
---|
| 3449 | +{ |
---|
| 3450 | + u32 val; |
---|
| 3451 | + |
---|
| 3452 | + if (!macb_is_gem(bp)) |
---|
| 3453 | + return; |
---|
| 3454 | + |
---|
| 3455 | + val = gem_readl(bp, DMACFG); |
---|
| 3456 | + if (features & NETIF_F_HW_CSUM) |
---|
| 3457 | + val |= GEM_BIT(TXCOEN); |
---|
| 3458 | + else |
---|
| 3459 | + val &= ~GEM_BIT(TXCOEN); |
---|
| 3460 | + |
---|
| 3461 | + gem_writel(bp, DMACFG, val); |
---|
| 3462 | +} |
---|
| 3463 | + |
---|
| 3464 | +static inline void macb_set_rxcsum_feature(struct macb *bp, |
---|
| 3465 | + netdev_features_t features) |
---|
| 3466 | +{ |
---|
| 3467 | + struct net_device *netdev = bp->dev; |
---|
| 3468 | + u32 val; |
---|
| 3469 | + |
---|
| 3470 | + if (!macb_is_gem(bp)) |
---|
| 3471 | + return; |
---|
| 3472 | + |
---|
| 3473 | + val = gem_readl(bp, NCFGR); |
---|
| 3474 | + if ((features & NETIF_F_RXCSUM) && !(netdev->flags & IFF_PROMISC)) |
---|
| 3475 | + val |= GEM_BIT(RXCOEN); |
---|
| 3476 | + else |
---|
| 3477 | + val &= ~GEM_BIT(RXCOEN); |
---|
| 3478 | + |
---|
| 3479 | + gem_writel(bp, NCFGR, val); |
---|
| 3480 | +} |
---|
| 3481 | + |
---|
| 3482 | +static inline void macb_set_rxflow_feature(struct macb *bp, |
---|
| 3483 | + netdev_features_t features) |
---|
| 3484 | +{ |
---|
| 3485 | + if (!macb_is_gem(bp)) |
---|
| 3486 | + return; |
---|
| 3487 | + |
---|
| 3488 | + gem_enable_flow_filters(bp, !!(features & NETIF_F_NTUPLE)); |
---|
3195 | 3489 | } |
---|
3196 | 3490 | |
---|
3197 | 3491 | static int macb_set_features(struct net_device *netdev, |
---|
.. | .. |
---|
3201 | 3495 | netdev_features_t changed = features ^ netdev->features; |
---|
3202 | 3496 | |
---|
3203 | 3497 | /* TX checksum offload */ |
---|
3204 | | - if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) { |
---|
3205 | | - u32 dmacfg; |
---|
3206 | | - |
---|
3207 | | - dmacfg = gem_readl(bp, DMACFG); |
---|
3208 | | - if (features & NETIF_F_HW_CSUM) |
---|
3209 | | - dmacfg |= GEM_BIT(TXCOEN); |
---|
3210 | | - else |
---|
3211 | | - dmacfg &= ~GEM_BIT(TXCOEN); |
---|
3212 | | - gem_writel(bp, DMACFG, dmacfg); |
---|
3213 | | - } |
---|
| 3498 | + if (changed & NETIF_F_HW_CSUM) |
---|
| 3499 | + macb_set_txcsum_feature(bp, features); |
---|
3214 | 3500 | |
---|
3215 | 3501 | /* RX checksum offload */ |
---|
3216 | | - if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) { |
---|
3217 | | - u32 netcfg; |
---|
3218 | | - |
---|
3219 | | - netcfg = gem_readl(bp, NCFGR); |
---|
3220 | | - if (features & NETIF_F_RXCSUM && |
---|
3221 | | - !(netdev->flags & IFF_PROMISC)) |
---|
3222 | | - netcfg |= GEM_BIT(RXCOEN); |
---|
3223 | | - else |
---|
3224 | | - netcfg &= ~GEM_BIT(RXCOEN); |
---|
3225 | | - gem_writel(bp, NCFGR, netcfg); |
---|
3226 | | - } |
---|
| 3502 | + if (changed & NETIF_F_RXCSUM) |
---|
| 3503 | + macb_set_rxcsum_feature(bp, features); |
---|
3227 | 3504 | |
---|
3228 | 3505 | /* RX Flow Filters */ |
---|
3229 | | - if ((changed & NETIF_F_NTUPLE) && macb_is_gem(bp)) { |
---|
3230 | | - bool turn_on = features & NETIF_F_NTUPLE; |
---|
| 3506 | + if (changed & NETIF_F_NTUPLE) |
---|
| 3507 | + macb_set_rxflow_feature(bp, features); |
---|
3231 | 3508 | |
---|
3232 | | - gem_enable_flow_filters(bp, turn_on); |
---|
3233 | | - } |
---|
3234 | 3509 | return 0; |
---|
| 3510 | +} |
---|
| 3511 | + |
---|
| 3512 | +static void macb_restore_features(struct macb *bp) |
---|
| 3513 | +{ |
---|
| 3514 | + struct net_device *netdev = bp->dev; |
---|
| 3515 | + netdev_features_t features = netdev->features; |
---|
| 3516 | + struct ethtool_rx_fs_item *item; |
---|
| 3517 | + |
---|
| 3518 | + /* TX checksum offload */ |
---|
| 3519 | + macb_set_txcsum_feature(bp, features); |
---|
| 3520 | + |
---|
| 3521 | + /* RX checksum offload */ |
---|
| 3522 | + macb_set_rxcsum_feature(bp, features); |
---|
| 3523 | + |
---|
| 3524 | + /* RX Flow Filters */ |
---|
| 3525 | + list_for_each_entry(item, &bp->rx_fs_list.list, list) |
---|
| 3526 | + gem_prog_cmp_regs(bp, &item->fs); |
---|
| 3527 | + |
---|
| 3528 | + macb_set_rxflow_feature(bp, features); |
---|
3235 | 3529 | } |
---|
3236 | 3530 | |
---|
3237 | 3531 | static const struct net_device_ops macb_netdev_ops = { |
---|
.. | .. |
---|
3274 | 3568 | #ifdef CONFIG_MACB_USE_HWSTAMP |
---|
3275 | 3569 | if (gem_has_ptp(bp)) { |
---|
3276 | 3570 | if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5))) |
---|
3277 | | - pr_err("GEM doesn't support hardware ptp.\n"); |
---|
| 3571 | + dev_err(&bp->pdev->dev, |
---|
| 3572 | + "GEM doesn't support hardware ptp.\n"); |
---|
3278 | 3573 | else { |
---|
3279 | 3574 | bp->hw_dma_cap |= HW_DMA_CAP_PTP; |
---|
3280 | 3575 | bp->ptp_info = &gem_ptp_info; |
---|
.. | .. |
---|
3291 | 3586 | unsigned int *queue_mask, |
---|
3292 | 3587 | unsigned int *num_queues) |
---|
3293 | 3588 | { |
---|
3294 | | - unsigned int hw_q; |
---|
3295 | | - |
---|
3296 | 3589 | *queue_mask = 0x1; |
---|
3297 | 3590 | *num_queues = 1; |
---|
3298 | 3591 | |
---|
.. | .. |
---|
3306 | 3599 | return; |
---|
3307 | 3600 | |
---|
3308 | 3601 | /* bit 0 is never set but queue 0 always exists */ |
---|
3309 | | - *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff; |
---|
3310 | | - |
---|
3311 | | - *queue_mask |= 0x1; |
---|
3312 | | - |
---|
3313 | | - for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q) |
---|
3314 | | - if (*queue_mask & (1 << hw_q)) |
---|
3315 | | - (*num_queues)++; |
---|
| 3602 | + *queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xff; |
---|
| 3603 | + *num_queues = hweight32(*queue_mask); |
---|
3316 | 3604 | } |
---|
3317 | 3605 | |
---|
3318 | 3606 | static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, |
---|
3319 | 3607 | struct clk **hclk, struct clk **tx_clk, |
---|
3320 | | - struct clk **rx_clk) |
---|
| 3608 | + struct clk **rx_clk, struct clk **tsu_clk) |
---|
3321 | 3609 | { |
---|
3322 | 3610 | struct macb_platform_data *pdata; |
---|
3323 | 3611 | int err; |
---|
.. | .. |
---|
3349 | 3637 | return err; |
---|
3350 | 3638 | } |
---|
3351 | 3639 | |
---|
3352 | | - *tx_clk = devm_clk_get(&pdev->dev, "tx_clk"); |
---|
| 3640 | + *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk"); |
---|
3353 | 3641 | if (IS_ERR(*tx_clk)) |
---|
3354 | | - *tx_clk = NULL; |
---|
| 3642 | + return PTR_ERR(*tx_clk); |
---|
3355 | 3643 | |
---|
3356 | | - *rx_clk = devm_clk_get(&pdev->dev, "rx_clk"); |
---|
| 3644 | + *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk"); |
---|
3357 | 3645 | if (IS_ERR(*rx_clk)) |
---|
3358 | | - *rx_clk = NULL; |
---|
| 3646 | + return PTR_ERR(*rx_clk); |
---|
| 3647 | + |
---|
| 3648 | + *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk"); |
---|
| 3649 | + if (IS_ERR(*tsu_clk)) |
---|
| 3650 | + return PTR_ERR(*tsu_clk); |
---|
3359 | 3651 | |
---|
3360 | 3652 | err = clk_prepare_enable(*pclk); |
---|
3361 | 3653 | if (err) { |
---|
.. | .. |
---|
3381 | 3673 | goto err_disable_txclk; |
---|
3382 | 3674 | } |
---|
3383 | 3675 | |
---|
| 3676 | + err = clk_prepare_enable(*tsu_clk); |
---|
| 3677 | + if (err) { |
---|
| 3678 | + dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err); |
---|
| 3679 | + goto err_disable_rxclk; |
---|
| 3680 | + } |
---|
| 3681 | + |
---|
3384 | 3682 | return 0; |
---|
| 3683 | + |
---|
| 3684 | +err_disable_rxclk: |
---|
| 3685 | + clk_disable_unprepare(*rx_clk); |
---|
3385 | 3686 | |
---|
3386 | 3687 | err_disable_txclk: |
---|
3387 | 3688 | clk_disable_unprepare(*tx_clk); |
---|
.. | .. |
---|
3417 | 3718 | |
---|
3418 | 3719 | queue = &bp->queues[q]; |
---|
3419 | 3720 | queue->bp = bp; |
---|
3420 | | - netif_napi_add(dev, &queue->napi, macb_poll, 64); |
---|
| 3721 | + netif_napi_add(dev, &queue->napi, macb_poll, NAPI_POLL_WEIGHT); |
---|
3421 | 3722 | if (hw_q) { |
---|
3422 | 3723 | queue->ISR = GEM_ISR(hw_q - 1); |
---|
3423 | 3724 | queue->IER = GEM_IER(hw_q - 1); |
---|
.. | .. |
---|
3507 | 3808 | reg = gem_readl(bp, DCFG8); |
---|
3508 | 3809 | bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), |
---|
3509 | 3810 | GEM_BFEXT(T2SCR, reg)); |
---|
| 3811 | + INIT_LIST_HEAD(&bp->rx_fs_list.list); |
---|
3510 | 3812 | if (bp->max_tuples > 0) { |
---|
3511 | 3813 | /* also needs one ethtype match to check IPv4 */ |
---|
3512 | 3814 | if (GEM_BFEXT(SCR2ETH, reg) > 0) { |
---|
.. | .. |
---|
3517 | 3819 | /* Filtering is supported in hw but don't enable it in kernel now */ |
---|
3518 | 3820 | dev->hw_features |= NETIF_F_NTUPLE; |
---|
3519 | 3821 | /* init Rx flow definitions */ |
---|
3520 | | - INIT_LIST_HEAD(&bp->rx_fs_list.list); |
---|
3521 | 3822 | bp->rx_fs_list.count = 0; |
---|
3522 | 3823 | spin_lock_init(&bp->rx_fs_lock); |
---|
3523 | 3824 | } else |
---|
.. | .. |
---|
3526 | 3827 | |
---|
3527 | 3828 | if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { |
---|
3528 | 3829 | val = 0; |
---|
3529 | | - if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) |
---|
| 3830 | + if (phy_interface_mode_is_rgmii(bp->phy_interface)) |
---|
3530 | 3831 | val = GEM_BIT(RGMII); |
---|
3531 | 3832 | else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && |
---|
3532 | 3833 | (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) |
---|
.. | .. |
---|
3556 | 3857 | /* max number of receive buffers */ |
---|
3557 | 3858 | #define AT91ETHER_MAX_RX_DESCR 9 |
---|
3558 | 3859 | |
---|
3559 | | -/* Initialize and start the Receiver and Transmit subsystems */ |
---|
3560 | | -static int at91ether_start(struct net_device *dev) |
---|
| 3860 | +static struct sifive_fu540_macb_mgmt *mgmt; |
---|
| 3861 | + |
---|
| 3862 | +static int at91ether_alloc_coherent(struct macb *lp) |
---|
3561 | 3863 | { |
---|
3562 | | - struct macb *lp = netdev_priv(dev); |
---|
3563 | 3864 | struct macb_queue *q = &lp->queues[0]; |
---|
3564 | | - struct macb_dma_desc *desc; |
---|
3565 | | - dma_addr_t addr; |
---|
3566 | | - u32 ctl; |
---|
3567 | | - int i; |
---|
3568 | 3865 | |
---|
3569 | 3866 | q->rx_ring = dma_alloc_coherent(&lp->pdev->dev, |
---|
3570 | 3867 | (AT91ETHER_MAX_RX_DESCR * |
---|
.. | .. |
---|
3585 | 3882 | q->rx_ring = NULL; |
---|
3586 | 3883 | return -ENOMEM; |
---|
3587 | 3884 | } |
---|
| 3885 | + |
---|
| 3886 | + return 0; |
---|
| 3887 | +} |
---|
| 3888 | + |
---|
| 3889 | +static void at91ether_free_coherent(struct macb *lp) |
---|
| 3890 | +{ |
---|
| 3891 | + struct macb_queue *q = &lp->queues[0]; |
---|
| 3892 | + |
---|
| 3893 | + if (q->rx_ring) { |
---|
| 3894 | + dma_free_coherent(&lp->pdev->dev, |
---|
| 3895 | + AT91ETHER_MAX_RX_DESCR * |
---|
| 3896 | + macb_dma_desc_get_size(lp), |
---|
| 3897 | + q->rx_ring, q->rx_ring_dma); |
---|
| 3898 | + q->rx_ring = NULL; |
---|
| 3899 | + } |
---|
| 3900 | + |
---|
| 3901 | + if (q->rx_buffers) { |
---|
| 3902 | + dma_free_coherent(&lp->pdev->dev, |
---|
| 3903 | + AT91ETHER_MAX_RX_DESCR * |
---|
| 3904 | + AT91ETHER_MAX_RBUFF_SZ, |
---|
| 3905 | + q->rx_buffers, q->rx_buffers_dma); |
---|
| 3906 | + q->rx_buffers = NULL; |
---|
| 3907 | + } |
---|
| 3908 | +} |
---|
| 3909 | + |
---|
| 3910 | +/* Initialize and start the Receiver and Transmit subsystems */ |
---|
| 3911 | +static int at91ether_start(struct macb *lp) |
---|
| 3912 | +{ |
---|
| 3913 | + struct macb_queue *q = &lp->queues[0]; |
---|
| 3914 | + struct macb_dma_desc *desc; |
---|
| 3915 | + dma_addr_t addr; |
---|
| 3916 | + u32 ctl; |
---|
| 3917 | + int i, ret; |
---|
| 3918 | + |
---|
| 3919 | + ret = at91ether_alloc_coherent(lp); |
---|
| 3920 | + if (ret) |
---|
| 3921 | + return ret; |
---|
3588 | 3922 | |
---|
3589 | 3923 | addr = q->rx_buffers_dma; |
---|
3590 | 3924 | for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { |
---|
.. | .. |
---|
3607 | 3941 | ctl = macb_readl(lp, NCR); |
---|
3608 | 3942 | macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE)); |
---|
3609 | 3943 | |
---|
| 3944 | + /* Enable MAC interrupts */ |
---|
| 3945 | + macb_writel(lp, IER, MACB_BIT(RCOMP) | |
---|
| 3946 | + MACB_BIT(RXUBR) | |
---|
| 3947 | + MACB_BIT(ISR_TUND) | |
---|
| 3948 | + MACB_BIT(ISR_RLE) | |
---|
| 3949 | + MACB_BIT(TCOMP) | |
---|
| 3950 | + MACB_BIT(RM9200_TBRE) | |
---|
| 3951 | + MACB_BIT(ISR_ROVR) | |
---|
| 3952 | + MACB_BIT(HRESP)); |
---|
| 3953 | + |
---|
3610 | 3954 | return 0; |
---|
| 3955 | +} |
---|
| 3956 | + |
---|
| 3957 | +static void at91ether_stop(struct macb *lp) |
---|
| 3958 | +{ |
---|
| 3959 | + u32 ctl; |
---|
| 3960 | + |
---|
| 3961 | + /* Disable MAC interrupts */ |
---|
| 3962 | + macb_writel(lp, IDR, MACB_BIT(RCOMP) | |
---|
| 3963 | + MACB_BIT(RXUBR) | |
---|
| 3964 | + MACB_BIT(ISR_TUND) | |
---|
| 3965 | + MACB_BIT(ISR_RLE) | |
---|
| 3966 | + MACB_BIT(TCOMP) | |
---|
| 3967 | + MACB_BIT(RM9200_TBRE) | |
---|
| 3968 | + MACB_BIT(ISR_ROVR) | |
---|
| 3969 | + MACB_BIT(HRESP)); |
---|
| 3970 | + |
---|
| 3971 | + /* Disable Receiver and Transmitter */ |
---|
| 3972 | + ctl = macb_readl(lp, NCR); |
---|
| 3973 | + macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE))); |
---|
| 3974 | + |
---|
| 3975 | + /* Free resources. */ |
---|
| 3976 | + at91ether_free_coherent(lp); |
---|
3611 | 3977 | } |
---|
3612 | 3978 | |
---|
3613 | 3979 | /* Open the ethernet interface */ |
---|
.. | .. |
---|
3617 | 3983 | u32 ctl; |
---|
3618 | 3984 | int ret; |
---|
3619 | 3985 | |
---|
| 3986 | + ret = pm_runtime_get_sync(&lp->pdev->dev); |
---|
| 3987 | + if (ret < 0) { |
---|
| 3988 | + pm_runtime_put_noidle(&lp->pdev->dev); |
---|
| 3989 | + return ret; |
---|
| 3990 | + } |
---|
| 3991 | + |
---|
3620 | 3992 | /* Clear internal statistics */ |
---|
3621 | 3993 | ctl = macb_readl(lp, NCR); |
---|
3622 | 3994 | macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT)); |
---|
3623 | 3995 | |
---|
3624 | 3996 | macb_set_hwaddr(lp); |
---|
3625 | 3997 | |
---|
3626 | | - ret = at91ether_start(dev); |
---|
| 3998 | + ret = at91ether_start(lp); |
---|
3627 | 3999 | if (ret) |
---|
3628 | | - return ret; |
---|
| 4000 | + goto pm_exit; |
---|
3629 | 4001 | |
---|
3630 | | - /* Enable MAC interrupts */ |
---|
3631 | | - macb_writel(lp, IER, MACB_BIT(RCOMP) | |
---|
3632 | | - MACB_BIT(RXUBR) | |
---|
3633 | | - MACB_BIT(ISR_TUND) | |
---|
3634 | | - MACB_BIT(ISR_RLE) | |
---|
3635 | | - MACB_BIT(TCOMP) | |
---|
3636 | | - MACB_BIT(ISR_ROVR) | |
---|
3637 | | - MACB_BIT(HRESP)); |
---|
3638 | | - |
---|
3639 | | - /* schedule a link state check */ |
---|
3640 | | - phy_start(dev->phydev); |
---|
| 4002 | + ret = macb_phylink_connect(lp); |
---|
| 4003 | + if (ret) |
---|
| 4004 | + goto stop; |
---|
3641 | 4005 | |
---|
3642 | 4006 | netif_start_queue(dev); |
---|
3643 | 4007 | |
---|
3644 | 4008 | return 0; |
---|
| 4009 | + |
---|
| 4010 | +stop: |
---|
| 4011 | + at91ether_stop(lp); |
---|
| 4012 | +pm_exit: |
---|
| 4013 | + pm_runtime_put_sync(&lp->pdev->dev); |
---|
| 4014 | + return ret; |
---|
3645 | 4015 | } |
---|
3646 | 4016 | |
---|
3647 | 4017 | /* Close the interface */ |
---|
3648 | 4018 | static int at91ether_close(struct net_device *dev) |
---|
3649 | 4019 | { |
---|
3650 | 4020 | struct macb *lp = netdev_priv(dev); |
---|
3651 | | - struct macb_queue *q = &lp->queues[0]; |
---|
3652 | | - u32 ctl; |
---|
3653 | | - |
---|
3654 | | - /* Disable Receiver and Transmitter */ |
---|
3655 | | - ctl = macb_readl(lp, NCR); |
---|
3656 | | - macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE))); |
---|
3657 | | - |
---|
3658 | | - /* Disable MAC interrupts */ |
---|
3659 | | - macb_writel(lp, IDR, MACB_BIT(RCOMP) | |
---|
3660 | | - MACB_BIT(RXUBR) | |
---|
3661 | | - MACB_BIT(ISR_TUND) | |
---|
3662 | | - MACB_BIT(ISR_RLE) | |
---|
3663 | | - MACB_BIT(TCOMP) | |
---|
3664 | | - MACB_BIT(ISR_ROVR) | |
---|
3665 | | - MACB_BIT(HRESP)); |
---|
3666 | 4021 | |
---|
3667 | 4022 | netif_stop_queue(dev); |
---|
3668 | 4023 | |
---|
3669 | | - dma_free_coherent(&lp->pdev->dev, |
---|
3670 | | - AT91ETHER_MAX_RX_DESCR * |
---|
3671 | | - macb_dma_desc_get_size(lp), |
---|
3672 | | - q->rx_ring, q->rx_ring_dma); |
---|
3673 | | - q->rx_ring = NULL; |
---|
| 4024 | + phylink_stop(lp->phylink); |
---|
| 4025 | + phylink_disconnect_phy(lp->phylink); |
---|
3674 | 4026 | |
---|
3675 | | - dma_free_coherent(&lp->pdev->dev, |
---|
3676 | | - AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ, |
---|
3677 | | - q->rx_buffers, q->rx_buffers_dma); |
---|
3678 | | - q->rx_buffers = NULL; |
---|
| 4027 | + at91ether_stop(lp); |
---|
3679 | 4028 | |
---|
3680 | | - return 0; |
---|
| 4029 | + return pm_runtime_put(&lp->pdev->dev); |
---|
3681 | 4030 | } |
---|
3682 | 4031 | |
---|
3683 | 4032 | /* Transmit packet */ |
---|
.. | .. |
---|
3685 | 4034 | struct net_device *dev) |
---|
3686 | 4035 | { |
---|
3687 | 4036 | struct macb *lp = netdev_priv(dev); |
---|
| 4037 | + unsigned long flags; |
---|
3688 | 4038 | |
---|
3689 | | - if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) { |
---|
3690 | | - netif_stop_queue(dev); |
---|
| 4039 | + if (lp->rm9200_tx_len < 2) { |
---|
| 4040 | + int desc = lp->rm9200_tx_tail; |
---|
3691 | 4041 | |
---|
3692 | 4042 | /* Store packet information (to free when Tx completed) */ |
---|
3693 | | - lp->skb = skb; |
---|
3694 | | - lp->skb_length = skb->len; |
---|
3695 | | - lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, |
---|
3696 | | - DMA_TO_DEVICE); |
---|
3697 | | - if (dma_mapping_error(NULL, lp->skb_physaddr)) { |
---|
| 4043 | + lp->rm9200_txq[desc].skb = skb; |
---|
| 4044 | + lp->rm9200_txq[desc].size = skb->len; |
---|
| 4045 | + lp->rm9200_txq[desc].mapping = dma_map_single(&lp->pdev->dev, skb->data, |
---|
| 4046 | + skb->len, DMA_TO_DEVICE); |
---|
| 4047 | + if (dma_mapping_error(&lp->pdev->dev, lp->rm9200_txq[desc].mapping)) { |
---|
3698 | 4048 | dev_kfree_skb_any(skb); |
---|
3699 | 4049 | dev->stats.tx_dropped++; |
---|
3700 | 4050 | netdev_err(dev, "%s: DMA mapping error\n", __func__); |
---|
3701 | 4051 | return NETDEV_TX_OK; |
---|
3702 | 4052 | } |
---|
3703 | 4053 | |
---|
| 4054 | + spin_lock_irqsave(&lp->lock, flags); |
---|
| 4055 | + |
---|
| 4056 | + lp->rm9200_tx_tail = (desc + 1) & 1; |
---|
| 4057 | + lp->rm9200_tx_len++; |
---|
| 4058 | + if (lp->rm9200_tx_len > 1) |
---|
| 4059 | + netif_stop_queue(dev); |
---|
| 4060 | + |
---|
| 4061 | + spin_unlock_irqrestore(&lp->lock, flags); |
---|
| 4062 | + |
---|
3704 | 4063 | /* Set address of the data in the Transmit Address register */ |
---|
3705 | | - macb_writel(lp, TAR, lp->skb_physaddr); |
---|
| 4064 | + macb_writel(lp, TAR, lp->rm9200_txq[desc].mapping); |
---|
3706 | 4065 | /* Set length of the packet in the Transmit Control register */ |
---|
3707 | 4066 | macb_writel(lp, TCR, skb->len); |
---|
3708 | 4067 | |
---|
.. | .. |
---|
3765 | 4124 | struct net_device *dev = dev_id; |
---|
3766 | 4125 | struct macb *lp = netdev_priv(dev); |
---|
3767 | 4126 | u32 intstatus, ctl; |
---|
| 4127 | + unsigned int desc; |
---|
| 4128 | + unsigned int qlen; |
---|
| 4129 | + u32 tsr; |
---|
3768 | 4130 | |
---|
3769 | 4131 | /* MAC Interrupt Status register indicates what interrupts are pending. |
---|
3770 | 4132 | * It is automatically cleared once read. |
---|
.. | .. |
---|
3776 | 4138 | at91ether_rx(dev); |
---|
3777 | 4139 | |
---|
3778 | 4140 | /* Transmit complete */ |
---|
3779 | | - if (intstatus & MACB_BIT(TCOMP)) { |
---|
| 4141 | + if (intstatus & (MACB_BIT(TCOMP) | MACB_BIT(RM9200_TBRE))) { |
---|
3780 | 4142 | /* The TCOM bit is set even if the transmission failed */ |
---|
3781 | 4143 | if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE))) |
---|
3782 | 4144 | dev->stats.tx_errors++; |
---|
3783 | 4145 | |
---|
3784 | | - if (lp->skb) { |
---|
3785 | | - dev_kfree_skb_irq(lp->skb); |
---|
3786 | | - lp->skb = NULL; |
---|
3787 | | - dma_unmap_single(NULL, lp->skb_physaddr, |
---|
3788 | | - lp->skb_length, DMA_TO_DEVICE); |
---|
| 4146 | + spin_lock(&lp->lock); |
---|
| 4147 | + |
---|
| 4148 | + tsr = macb_readl(lp, TSR); |
---|
| 4149 | + |
---|
| 4150 | + /* we have three possibilities here: |
---|
| 4151 | + * - all pending packets transmitted (TGO, implies BNQ) |
---|
| 4152 | + * - only first packet transmitted (!TGO && BNQ) |
---|
| 4153 | + * - two frames pending (!TGO && !BNQ) |
---|
| 4154 | + * Note that TGO ("transmit go") is called "IDLE" on RM9200. |
---|
| 4155 | + */ |
---|
| 4156 | + qlen = (tsr & MACB_BIT(TGO)) ? 0 : |
---|
| 4157 | + (tsr & MACB_BIT(RM9200_BNQ)) ? 1 : 2; |
---|
| 4158 | + |
---|
| 4159 | + while (lp->rm9200_tx_len > qlen) { |
---|
| 4160 | + desc = (lp->rm9200_tx_tail - lp->rm9200_tx_len) & 1; |
---|
| 4161 | + dev_consume_skb_irq(lp->rm9200_txq[desc].skb); |
---|
| 4162 | + lp->rm9200_txq[desc].skb = NULL; |
---|
| 4163 | + dma_unmap_single(&lp->pdev->dev, lp->rm9200_txq[desc].mapping, |
---|
| 4164 | + lp->rm9200_txq[desc].size, DMA_TO_DEVICE); |
---|
3789 | 4165 | dev->stats.tx_packets++; |
---|
3790 | | - dev->stats.tx_bytes += lp->skb_length; |
---|
| 4166 | + dev->stats.tx_bytes += lp->rm9200_txq[desc].size; |
---|
| 4167 | + lp->rm9200_tx_len--; |
---|
3791 | 4168 | } |
---|
3792 | | - netif_wake_queue(dev); |
---|
| 4169 | + |
---|
| 4170 | + if (lp->rm9200_tx_len < 2 && netif_queue_stopped(dev)) |
---|
| 4171 | + netif_wake_queue(dev); |
---|
| 4172 | + |
---|
| 4173 | + spin_unlock(&lp->lock); |
---|
3793 | 4174 | } |
---|
3794 | 4175 | |
---|
3795 | 4176 | /* Work-around for EMAC Errata section 41.3.1 */ |
---|
.. | .. |
---|
3833 | 4214 | |
---|
3834 | 4215 | static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, |
---|
3835 | 4216 | struct clk **hclk, struct clk **tx_clk, |
---|
3836 | | - struct clk **rx_clk) |
---|
| 4217 | + struct clk **rx_clk, struct clk **tsu_clk) |
---|
3837 | 4218 | { |
---|
3838 | 4219 | int err; |
---|
3839 | 4220 | |
---|
3840 | 4221 | *hclk = NULL; |
---|
3841 | 4222 | *tx_clk = NULL; |
---|
3842 | 4223 | *rx_clk = NULL; |
---|
| 4224 | + *tsu_clk = NULL; |
---|
3843 | 4225 | |
---|
3844 | 4226 | *pclk = devm_clk_get(&pdev->dev, "ether_clk"); |
---|
3845 | 4227 | if (IS_ERR(*pclk)) |
---|
.. | .. |
---|
3859 | 4241 | struct net_device *dev = platform_get_drvdata(pdev); |
---|
3860 | 4242 | struct macb *bp = netdev_priv(dev); |
---|
3861 | 4243 | int err; |
---|
3862 | | - u32 reg; |
---|
3863 | 4244 | |
---|
3864 | 4245 | bp->queues[0].bp = bp; |
---|
3865 | 4246 | |
---|
.. | .. |
---|
3873 | 4254 | |
---|
3874 | 4255 | macb_writel(bp, NCR, 0); |
---|
3875 | 4256 | |
---|
3876 | | - reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG); |
---|
3877 | | - if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) |
---|
3878 | | - reg |= MACB_BIT(RM9200_RMII); |
---|
3879 | | - |
---|
3880 | | - macb_writel(bp, NCFGR, reg); |
---|
| 4257 | + macb_writel(bp, NCFGR, MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG)); |
---|
3881 | 4258 | |
---|
3882 | 4259 | return 0; |
---|
3883 | 4260 | } |
---|
| 4261 | + |
---|
| 4262 | +static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw, |
---|
| 4263 | + unsigned long parent_rate) |
---|
| 4264 | +{ |
---|
| 4265 | + return mgmt->rate; |
---|
| 4266 | +} |
---|
| 4267 | + |
---|
| 4268 | +static long fu540_macb_tx_round_rate(struct clk_hw *hw, unsigned long rate, |
---|
| 4269 | + unsigned long *parent_rate) |
---|
| 4270 | +{ |
---|
| 4271 | + if (WARN_ON(rate < 2500000)) |
---|
| 4272 | + return 2500000; |
---|
| 4273 | + else if (rate == 2500000) |
---|
| 4274 | + return 2500000; |
---|
| 4275 | + else if (WARN_ON(rate < 13750000)) |
---|
| 4276 | + return 2500000; |
---|
| 4277 | + else if (WARN_ON(rate < 25000000)) |
---|
| 4278 | + return 25000000; |
---|
| 4279 | + else if (rate == 25000000) |
---|
| 4280 | + return 25000000; |
---|
| 4281 | + else if (WARN_ON(rate < 75000000)) |
---|
| 4282 | + return 25000000; |
---|
| 4283 | + else if (WARN_ON(rate < 125000000)) |
---|
| 4284 | + return 125000000; |
---|
| 4285 | + else if (rate == 125000000) |
---|
| 4286 | + return 125000000; |
---|
| 4287 | + |
---|
| 4288 | + WARN_ON(rate > 125000000); |
---|
| 4289 | + |
---|
| 4290 | + return 125000000; |
---|
| 4291 | +} |
---|
| 4292 | + |
---|
| 4293 | +static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate, |
---|
| 4294 | + unsigned long parent_rate) |
---|
| 4295 | +{ |
---|
| 4296 | + rate = fu540_macb_tx_round_rate(hw, rate, &parent_rate); |
---|
| 4297 | + if (rate != 125000000) |
---|
| 4298 | + iowrite32(1, mgmt->reg); |
---|
| 4299 | + else |
---|
| 4300 | + iowrite32(0, mgmt->reg); |
---|
| 4301 | + mgmt->rate = rate; |
---|
| 4302 | + |
---|
| 4303 | + return 0; |
---|
| 4304 | +} |
---|
| 4305 | + |
---|
| 4306 | +static const struct clk_ops fu540_c000_ops = { |
---|
| 4307 | + .recalc_rate = fu540_macb_tx_recalc_rate, |
---|
| 4308 | + .round_rate = fu540_macb_tx_round_rate, |
---|
| 4309 | + .set_rate = fu540_macb_tx_set_rate, |
---|
| 4310 | +}; |
---|
| 4311 | + |
---|
| 4312 | +static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk, |
---|
| 4313 | + struct clk **hclk, struct clk **tx_clk, |
---|
| 4314 | + struct clk **rx_clk, struct clk **tsu_clk) |
---|
| 4315 | +{ |
---|
| 4316 | + struct clk_init_data init; |
---|
| 4317 | + int err = 0; |
---|
| 4318 | + |
---|
| 4319 | + err = macb_clk_init(pdev, pclk, hclk, tx_clk, rx_clk, tsu_clk); |
---|
| 4320 | + if (err) |
---|
| 4321 | + return err; |
---|
| 4322 | + |
---|
| 4323 | + mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL); |
---|
| 4324 | + if (!mgmt) |
---|
| 4325 | + return -ENOMEM; |
---|
| 4326 | + |
---|
| 4327 | + init.name = "sifive-gemgxl-mgmt"; |
---|
| 4328 | + init.ops = &fu540_c000_ops; |
---|
| 4329 | + init.flags = 0; |
---|
| 4330 | + init.num_parents = 0; |
---|
| 4331 | + |
---|
| 4332 | + mgmt->rate = 0; |
---|
| 4333 | + mgmt->hw.init = &init; |
---|
| 4334 | + |
---|
| 4335 | + *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw); |
---|
| 4336 | + if (IS_ERR(*tx_clk)) |
---|
| 4337 | + return PTR_ERR(*tx_clk); |
---|
| 4338 | + |
---|
| 4339 | + err = clk_prepare_enable(*tx_clk); |
---|
| 4340 | + if (err) |
---|
| 4341 | + dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); |
---|
| 4342 | + else |
---|
| 4343 | + dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name); |
---|
| 4344 | + |
---|
| 4345 | + return 0; |
---|
| 4346 | +} |
---|
| 4347 | + |
---|
| 4348 | +static int fu540_c000_init(struct platform_device *pdev) |
---|
| 4349 | +{ |
---|
| 4350 | + mgmt->reg = devm_platform_ioremap_resource(pdev, 1); |
---|
| 4351 | + if (IS_ERR(mgmt->reg)) |
---|
| 4352 | + return PTR_ERR(mgmt->reg); |
---|
| 4353 | + |
---|
| 4354 | + return macb_init(pdev); |
---|
| 4355 | +} |
---|
| 4356 | + |
---|
| 4357 | +static const struct macb_config fu540_c000_config = { |
---|
| 4358 | + .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO | |
---|
| 4359 | + MACB_CAPS_GEM_HAS_PTP, |
---|
| 4360 | + .dma_burst_length = 16, |
---|
| 4361 | + .clk_init = fu540_c000_clk_init, |
---|
| 4362 | + .init = fu540_c000_init, |
---|
| 4363 | + .jumbo_max_len = 10240, |
---|
| 4364 | +}; |
---|
3884 | 4365 | |
---|
3885 | 4366 | static const struct macb_config at91sam9260_config = { |
---|
3886 | 4367 | .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, |
---|
.. | .. |
---|
3926 | 4407 | }; |
---|
3927 | 4408 | |
---|
3928 | 4409 | static const struct macb_config emac_config = { |
---|
3929 | | - .caps = MACB_CAPS_NEEDS_RSTONUBR, |
---|
| 4410 | + .caps = MACB_CAPS_NEEDS_RSTONUBR | MACB_CAPS_MACB_IS_EMAC, |
---|
3930 | 4411 | .clk_init = at91ether_clk_init, |
---|
3931 | 4412 | .init = at91ether_init, |
---|
3932 | 4413 | }; |
---|
.. | .. |
---|
3962 | 4443 | { .compatible = "cdns,np4-macb", .data = &np4_config }, |
---|
3963 | 4444 | { .compatible = "cdns,pc302-gem", .data = &pc302gem_config }, |
---|
3964 | 4445 | { .compatible = "cdns,gem", .data = &pc302gem_config }, |
---|
| 4446 | + { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config }, |
---|
3965 | 4447 | { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, |
---|
3966 | 4448 | { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, |
---|
3967 | 4449 | { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config }, |
---|
.. | .. |
---|
3970 | 4452 | { .compatible = "cdns,emac", .data = &emac_config }, |
---|
3971 | 4453 | { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, |
---|
3972 | 4454 | { .compatible = "cdns,zynq-gem", .data = &zynq_config }, |
---|
| 4455 | + { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config }, |
---|
3973 | 4456 | { /* sentinel */ } |
---|
3974 | 4457 | }; |
---|
3975 | 4458 | MODULE_DEVICE_TABLE(of, macb_dt_ids); |
---|
.. | .. |
---|
3989 | 4472 | { |
---|
3990 | 4473 | const struct macb_config *macb_config = &default_gem_config; |
---|
3991 | 4474 | int (*clk_init)(struct platform_device *, struct clk **, |
---|
3992 | | - struct clk **, struct clk **, struct clk **) |
---|
3993 | | - = macb_config->clk_init; |
---|
| 4475 | + struct clk **, struct clk **, struct clk **, |
---|
| 4476 | + struct clk **) = macb_config->clk_init; |
---|
3994 | 4477 | int (*init)(struct platform_device *) = macb_config->init; |
---|
3995 | 4478 | struct device_node *np = pdev->dev.of_node; |
---|
3996 | 4479 | struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL; |
---|
| 4480 | + struct clk *tsu_clk = NULL; |
---|
3997 | 4481 | unsigned int queue_mask, num_queues; |
---|
3998 | | - struct macb_platform_data *pdata; |
---|
3999 | 4482 | bool native_io; |
---|
4000 | | - struct phy_device *phydev; |
---|
| 4483 | + phy_interface_t interface; |
---|
4001 | 4484 | struct net_device *dev; |
---|
4002 | 4485 | struct resource *regs; |
---|
4003 | 4486 | void __iomem *mem; |
---|
.. | .. |
---|
4021 | 4504 | } |
---|
4022 | 4505 | } |
---|
4023 | 4506 | |
---|
4024 | | - err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk); |
---|
| 4507 | + err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk); |
---|
4025 | 4508 | if (err) |
---|
4026 | 4509 | return err; |
---|
4027 | 4510 | |
---|
| 4511 | + pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT); |
---|
| 4512 | + pm_runtime_use_autosuspend(&pdev->dev); |
---|
| 4513 | + pm_runtime_get_noresume(&pdev->dev); |
---|
| 4514 | + pm_runtime_set_active(&pdev->dev); |
---|
| 4515 | + pm_runtime_enable(&pdev->dev); |
---|
4028 | 4516 | native_io = hw_is_native_io(mem); |
---|
4029 | 4517 | |
---|
4030 | 4518 | macb_probe_queues(mem, native_io, &queue_mask, &num_queues); |
---|
.. | .. |
---|
4058 | 4546 | bp->hclk = hclk; |
---|
4059 | 4547 | bp->tx_clk = tx_clk; |
---|
4060 | 4548 | bp->rx_clk = rx_clk; |
---|
| 4549 | + bp->tsu_clk = tsu_clk; |
---|
4061 | 4550 | if (macb_config) |
---|
4062 | 4551 | bp->jumbo_max_len = macb_config->jumbo_max_len; |
---|
4063 | 4552 | |
---|
.. | .. |
---|
4109 | 4598 | bp->rx_intr_mask |= MACB_BIT(RXUBR); |
---|
4110 | 4599 | |
---|
4111 | 4600 | mac = of_get_mac_address(np); |
---|
4112 | | - if (mac) { |
---|
| 4601 | + if (PTR_ERR(mac) == -EPROBE_DEFER) { |
---|
| 4602 | + err = -EPROBE_DEFER; |
---|
| 4603 | + goto err_out_free_netdev; |
---|
| 4604 | + } else if (!IS_ERR_OR_NULL(mac)) { |
---|
4113 | 4605 | ether_addr_copy(bp->dev->dev_addr, mac); |
---|
4114 | 4606 | } else { |
---|
4115 | | - err = of_get_nvmem_mac_address(np, bp->dev->dev_addr); |
---|
4116 | | - if (err) { |
---|
4117 | | - if (err == -EPROBE_DEFER) |
---|
4118 | | - goto err_out_free_netdev; |
---|
4119 | | - macb_get_hwaddr(bp); |
---|
4120 | | - } |
---|
| 4607 | + macb_get_hwaddr(bp); |
---|
4121 | 4608 | } |
---|
4122 | 4609 | |
---|
4123 | | - err = of_get_phy_mode(np); |
---|
4124 | | - if (err < 0) { |
---|
4125 | | - pdata = dev_get_platdata(&pdev->dev); |
---|
4126 | | - if (pdata && pdata->is_rmii) |
---|
4127 | | - bp->phy_interface = PHY_INTERFACE_MODE_RMII; |
---|
4128 | | - else |
---|
4129 | | - bp->phy_interface = PHY_INTERFACE_MODE_MII; |
---|
4130 | | - } else { |
---|
4131 | | - bp->phy_interface = err; |
---|
4132 | | - } |
---|
| 4610 | + err = of_get_phy_mode(np, &interface); |
---|
| 4611 | + if (err) |
---|
| 4612 | + /* not found in DT, MII by default */ |
---|
| 4613 | + bp->phy_interface = PHY_INTERFACE_MODE_MII; |
---|
| 4614 | + else |
---|
| 4615 | + bp->phy_interface = interface; |
---|
4133 | 4616 | |
---|
4134 | 4617 | /* IP specific init */ |
---|
4135 | 4618 | err = init(pdev); |
---|
.. | .. |
---|
4140 | 4623 | if (err) |
---|
4141 | 4624 | goto err_out_free_netdev; |
---|
4142 | 4625 | |
---|
4143 | | - phydev = dev->phydev; |
---|
4144 | | - |
---|
4145 | 4626 | netif_carrier_off(dev); |
---|
4146 | 4627 | |
---|
4147 | 4628 | err = register_netdev(dev); |
---|
.. | .. |
---|
4150 | 4631 | goto err_out_unregister_mdio; |
---|
4151 | 4632 | } |
---|
4152 | 4633 | |
---|
4153 | | - tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task, |
---|
4154 | | - (unsigned long)bp); |
---|
4155 | | - |
---|
4156 | | - phy_attached_info(phydev); |
---|
| 4634 | + tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task); |
---|
4157 | 4635 | |
---|
4158 | 4636 | netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", |
---|
4159 | 4637 | macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), |
---|
4160 | 4638 | dev->base_addr, dev->irq, dev->dev_addr); |
---|
4161 | 4639 | |
---|
| 4640 | + pm_runtime_mark_last_busy(&bp->pdev->dev); |
---|
| 4641 | + pm_runtime_put_autosuspend(&bp->pdev->dev); |
---|
| 4642 | + |
---|
4162 | 4643 | return 0; |
---|
4163 | 4644 | |
---|
4164 | 4645 | err_out_unregister_mdio: |
---|
4165 | | - phy_disconnect(dev->phydev); |
---|
4166 | 4646 | mdiobus_unregister(bp->mii_bus); |
---|
4167 | | - of_node_put(bp->phy_node); |
---|
4168 | | - if (np && of_phy_is_fixed_link(np)) |
---|
4169 | | - of_phy_deregister_fixed_link(np); |
---|
4170 | 4647 | mdiobus_free(bp->mii_bus); |
---|
4171 | 4648 | |
---|
4172 | 4649 | err_out_free_netdev: |
---|
.. | .. |
---|
4177 | 4654 | clk_disable_unprepare(hclk); |
---|
4178 | 4655 | clk_disable_unprepare(pclk); |
---|
4179 | 4656 | clk_disable_unprepare(rx_clk); |
---|
| 4657 | + clk_disable_unprepare(tsu_clk); |
---|
| 4658 | + pm_runtime_disable(&pdev->dev); |
---|
| 4659 | + pm_runtime_set_suspended(&pdev->dev); |
---|
| 4660 | + pm_runtime_dont_use_autosuspend(&pdev->dev); |
---|
4180 | 4661 | |
---|
4181 | 4662 | return err; |
---|
4182 | 4663 | } |
---|
.. | .. |
---|
4185 | 4666 | { |
---|
4186 | 4667 | struct net_device *dev; |
---|
4187 | 4668 | struct macb *bp; |
---|
4188 | | - struct device_node *np = pdev->dev.of_node; |
---|
4189 | 4669 | |
---|
4190 | 4670 | dev = platform_get_drvdata(pdev); |
---|
4191 | 4671 | |
---|
4192 | 4672 | if (dev) { |
---|
4193 | 4673 | bp = netdev_priv(dev); |
---|
4194 | | - if (dev->phydev) |
---|
4195 | | - phy_disconnect(dev->phydev); |
---|
4196 | 4674 | mdiobus_unregister(bp->mii_bus); |
---|
4197 | | - if (np && of_phy_is_fixed_link(np)) |
---|
4198 | | - of_phy_deregister_fixed_link(np); |
---|
4199 | | - dev->phydev = NULL; |
---|
4200 | 4675 | mdiobus_free(bp->mii_bus); |
---|
4201 | 4676 | |
---|
4202 | 4677 | unregister_netdev(dev); |
---|
4203 | 4678 | tasklet_kill(&bp->hresp_err_tasklet); |
---|
4204 | | - clk_disable_unprepare(bp->tx_clk); |
---|
4205 | | - clk_disable_unprepare(bp->hclk); |
---|
4206 | | - clk_disable_unprepare(bp->pclk); |
---|
4207 | | - clk_disable_unprepare(bp->rx_clk); |
---|
4208 | | - of_node_put(bp->phy_node); |
---|
| 4679 | + pm_runtime_disable(&pdev->dev); |
---|
| 4680 | + pm_runtime_dont_use_autosuspend(&pdev->dev); |
---|
| 4681 | + if (!pm_runtime_suspended(&pdev->dev)) { |
---|
| 4682 | + clk_disable_unprepare(bp->tx_clk); |
---|
| 4683 | + clk_disable_unprepare(bp->hclk); |
---|
| 4684 | + clk_disable_unprepare(bp->pclk); |
---|
| 4685 | + clk_disable_unprepare(bp->rx_clk); |
---|
| 4686 | + clk_disable_unprepare(bp->tsu_clk); |
---|
| 4687 | + pm_runtime_set_suspended(&pdev->dev); |
---|
| 4688 | + } |
---|
| 4689 | + phylink_destroy(bp->phylink); |
---|
4209 | 4690 | free_netdev(dev); |
---|
4210 | 4691 | } |
---|
4211 | 4692 | |
---|
.. | .. |
---|
4214 | 4695 | |
---|
4215 | 4696 | static int __maybe_unused macb_suspend(struct device *dev) |
---|
4216 | 4697 | { |
---|
4217 | | - struct platform_device *pdev = to_platform_device(dev); |
---|
4218 | | - struct net_device *netdev = platform_get_drvdata(pdev); |
---|
| 4698 | + struct net_device *netdev = dev_get_drvdata(dev); |
---|
4219 | 4699 | struct macb *bp = netdev_priv(netdev); |
---|
| 4700 | + struct macb_queue *queue = bp->queues; |
---|
| 4701 | + unsigned long flags; |
---|
| 4702 | + unsigned int q; |
---|
| 4703 | + int err; |
---|
4220 | 4704 | |
---|
4221 | | - netif_carrier_off(netdev); |
---|
4222 | | - netif_device_detach(netdev); |
---|
| 4705 | + if (!netif_running(netdev)) |
---|
| 4706 | + return 0; |
---|
4223 | 4707 | |
---|
4224 | 4708 | if (bp->wol & MACB_WOL_ENABLED) { |
---|
4225 | | - macb_writel(bp, IER, MACB_BIT(WOL)); |
---|
4226 | | - macb_writel(bp, WOL, MACB_BIT(MAG)); |
---|
| 4709 | + spin_lock_irqsave(&bp->lock, flags); |
---|
| 4710 | + /* Flush all status bits */ |
---|
| 4711 | + macb_writel(bp, TSR, -1); |
---|
| 4712 | + macb_writel(bp, RSR, -1); |
---|
| 4713 | + for (q = 0, queue = bp->queues; q < bp->num_queues; |
---|
| 4714 | + ++q, ++queue) { |
---|
| 4715 | + /* Disable all interrupts */ |
---|
| 4716 | + queue_writel(queue, IDR, -1); |
---|
| 4717 | + queue_readl(queue, ISR); |
---|
| 4718 | + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
---|
| 4719 | + queue_writel(queue, ISR, -1); |
---|
| 4720 | + } |
---|
| 4721 | + /* Change interrupt handler and |
---|
| 4722 | + * Enable WoL IRQ on queue 0 |
---|
| 4723 | + */ |
---|
| 4724 | + devm_free_irq(dev, bp->queues[0].irq, bp->queues); |
---|
| 4725 | + if (macb_is_gem(bp)) { |
---|
| 4726 | + err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt, |
---|
| 4727 | + IRQF_SHARED, netdev->name, bp->queues); |
---|
| 4728 | + if (err) { |
---|
| 4729 | + dev_err(dev, |
---|
| 4730 | + "Unable to request IRQ %d (error %d)\n", |
---|
| 4731 | + bp->queues[0].irq, err); |
---|
| 4732 | + spin_unlock_irqrestore(&bp->lock, flags); |
---|
| 4733 | + return err; |
---|
| 4734 | + } |
---|
| 4735 | + queue_writel(bp->queues, IER, GEM_BIT(WOL)); |
---|
| 4736 | + gem_writel(bp, WOL, MACB_BIT(MAG)); |
---|
| 4737 | + } else { |
---|
| 4738 | + err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt, |
---|
| 4739 | + IRQF_SHARED, netdev->name, bp->queues); |
---|
| 4740 | + if (err) { |
---|
| 4741 | + dev_err(dev, |
---|
| 4742 | + "Unable to request IRQ %d (error %d)\n", |
---|
| 4743 | + bp->queues[0].irq, err); |
---|
| 4744 | + spin_unlock_irqrestore(&bp->lock, flags); |
---|
| 4745 | + return err; |
---|
| 4746 | + } |
---|
| 4747 | + queue_writel(bp->queues, IER, MACB_BIT(WOL)); |
---|
| 4748 | + macb_writel(bp, WOL, MACB_BIT(MAG)); |
---|
| 4749 | + } |
---|
| 4750 | + spin_unlock_irqrestore(&bp->lock, flags); |
---|
| 4751 | + |
---|
4227 | 4752 | enable_irq_wake(bp->queues[0].irq); |
---|
4228 | | - } else { |
---|
4229 | | - clk_disable_unprepare(bp->tx_clk); |
---|
4230 | | - clk_disable_unprepare(bp->hclk); |
---|
4231 | | - clk_disable_unprepare(bp->pclk); |
---|
4232 | | - clk_disable_unprepare(bp->rx_clk); |
---|
4233 | 4753 | } |
---|
| 4754 | + |
---|
| 4755 | + netif_device_detach(netdev); |
---|
| 4756 | + for (q = 0, queue = bp->queues; q < bp->num_queues; |
---|
| 4757 | + ++q, ++queue) |
---|
| 4758 | + napi_disable(&queue->napi); |
---|
| 4759 | + |
---|
| 4760 | + if (!(bp->wol & MACB_WOL_ENABLED)) { |
---|
| 4761 | + rtnl_lock(); |
---|
| 4762 | + phylink_stop(bp->phylink); |
---|
| 4763 | + rtnl_unlock(); |
---|
| 4764 | + spin_lock_irqsave(&bp->lock, flags); |
---|
| 4765 | + macb_reset_hw(bp); |
---|
| 4766 | + spin_unlock_irqrestore(&bp->lock, flags); |
---|
| 4767 | + } |
---|
| 4768 | + |
---|
| 4769 | + if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) |
---|
| 4770 | + bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO); |
---|
| 4771 | + |
---|
| 4772 | + if (netdev->hw_features & NETIF_F_NTUPLE) |
---|
| 4773 | + bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); |
---|
| 4774 | + |
---|
| 4775 | + if (bp->ptp_info) |
---|
| 4776 | + bp->ptp_info->ptp_remove(netdev); |
---|
| 4777 | + if (!device_may_wakeup(dev)) |
---|
| 4778 | + pm_runtime_force_suspend(dev); |
---|
4234 | 4779 | |
---|
4235 | 4780 | return 0; |
---|
4236 | 4781 | } |
---|
4237 | 4782 | |
---|
4238 | 4783 | static int __maybe_unused macb_resume(struct device *dev) |
---|
4239 | 4784 | { |
---|
4240 | | - struct platform_device *pdev = to_platform_device(dev); |
---|
4241 | | - struct net_device *netdev = platform_get_drvdata(pdev); |
---|
| 4785 | + struct net_device *netdev = dev_get_drvdata(dev); |
---|
4242 | 4786 | struct macb *bp = netdev_priv(netdev); |
---|
| 4787 | + struct macb_queue *queue = bp->queues; |
---|
| 4788 | + unsigned long flags; |
---|
| 4789 | + unsigned int q; |
---|
| 4790 | + int err; |
---|
| 4791 | + |
---|
| 4792 | + if (!netif_running(netdev)) |
---|
| 4793 | + return 0; |
---|
| 4794 | + |
---|
| 4795 | + if (!device_may_wakeup(dev)) |
---|
| 4796 | + pm_runtime_force_resume(dev); |
---|
4243 | 4797 | |
---|
4244 | 4798 | if (bp->wol & MACB_WOL_ENABLED) { |
---|
4245 | | - macb_writel(bp, IDR, MACB_BIT(WOL)); |
---|
4246 | | - macb_writel(bp, WOL, 0); |
---|
| 4799 | + spin_lock_irqsave(&bp->lock, flags); |
---|
| 4800 | + /* Disable WoL */ |
---|
| 4801 | + if (macb_is_gem(bp)) { |
---|
| 4802 | + queue_writel(bp->queues, IDR, GEM_BIT(WOL)); |
---|
| 4803 | + gem_writel(bp, WOL, 0); |
---|
| 4804 | + } else { |
---|
| 4805 | + queue_writel(bp->queues, IDR, MACB_BIT(WOL)); |
---|
| 4806 | + macb_writel(bp, WOL, 0); |
---|
| 4807 | + } |
---|
| 4808 | + /* Clear ISR on queue 0 */ |
---|
| 4809 | + queue_readl(bp->queues, ISR); |
---|
| 4810 | + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
---|
| 4811 | + queue_writel(bp->queues, ISR, -1); |
---|
| 4812 | + /* Replace interrupt handler on queue 0 */ |
---|
| 4813 | + devm_free_irq(dev, bp->queues[0].irq, bp->queues); |
---|
| 4814 | + err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt, |
---|
| 4815 | + IRQF_SHARED, netdev->name, bp->queues); |
---|
| 4816 | + if (err) { |
---|
| 4817 | + dev_err(dev, |
---|
| 4818 | + "Unable to request IRQ %d (error %d)\n", |
---|
| 4819 | + bp->queues[0].irq, err); |
---|
| 4820 | + spin_unlock_irqrestore(&bp->lock, flags); |
---|
| 4821 | + return err; |
---|
| 4822 | + } |
---|
| 4823 | + spin_unlock_irqrestore(&bp->lock, flags); |
---|
| 4824 | + |
---|
4247 | 4825 | disable_irq_wake(bp->queues[0].irq); |
---|
4248 | | - } else { |
---|
| 4826 | + |
---|
| 4827 | + /* Now make sure we disable phy before moving |
---|
| 4828 | + * to common restore path |
---|
| 4829 | + */ |
---|
| 4830 | + rtnl_lock(); |
---|
| 4831 | + phylink_stop(bp->phylink); |
---|
| 4832 | + rtnl_unlock(); |
---|
| 4833 | + } |
---|
| 4834 | + |
---|
| 4835 | + for (q = 0, queue = bp->queues; q < bp->num_queues; |
---|
| 4836 | + ++q, ++queue) |
---|
| 4837 | + napi_enable(&queue->napi); |
---|
| 4838 | + |
---|
| 4839 | + if (netdev->hw_features & NETIF_F_NTUPLE) |
---|
| 4840 | + gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2); |
---|
| 4841 | + |
---|
| 4842 | + if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) |
---|
| 4843 | + macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio); |
---|
| 4844 | + |
---|
| 4845 | + macb_writel(bp, NCR, MACB_BIT(MPE)); |
---|
| 4846 | + macb_init_hw(bp); |
---|
| 4847 | + macb_set_rx_mode(netdev); |
---|
| 4848 | + macb_restore_features(bp); |
---|
| 4849 | + rtnl_lock(); |
---|
| 4850 | + phylink_start(bp->phylink); |
---|
| 4851 | + rtnl_unlock(); |
---|
| 4852 | + |
---|
| 4853 | + netif_device_attach(netdev); |
---|
| 4854 | + if (bp->ptp_info) |
---|
| 4855 | + bp->ptp_info->ptp_init(netdev); |
---|
| 4856 | + |
---|
| 4857 | + return 0; |
---|
| 4858 | +} |
---|
| 4859 | + |
---|
| 4860 | +static int __maybe_unused macb_runtime_suspend(struct device *dev) |
---|
| 4861 | +{ |
---|
| 4862 | + struct net_device *netdev = dev_get_drvdata(dev); |
---|
| 4863 | + struct macb *bp = netdev_priv(netdev); |
---|
| 4864 | + |
---|
| 4865 | + if (!(device_may_wakeup(dev))) { |
---|
| 4866 | + clk_disable_unprepare(bp->tx_clk); |
---|
| 4867 | + clk_disable_unprepare(bp->hclk); |
---|
| 4868 | + clk_disable_unprepare(bp->pclk); |
---|
| 4869 | + clk_disable_unprepare(bp->rx_clk); |
---|
| 4870 | + } |
---|
| 4871 | + clk_disable_unprepare(bp->tsu_clk); |
---|
| 4872 | + |
---|
| 4873 | + return 0; |
---|
| 4874 | +} |
---|
| 4875 | + |
---|
| 4876 | +static int __maybe_unused macb_runtime_resume(struct device *dev) |
---|
| 4877 | +{ |
---|
| 4878 | + struct net_device *netdev = dev_get_drvdata(dev); |
---|
| 4879 | + struct macb *bp = netdev_priv(netdev); |
---|
| 4880 | + |
---|
| 4881 | + if (!(device_may_wakeup(dev))) { |
---|
4249 | 4882 | clk_prepare_enable(bp->pclk); |
---|
4250 | 4883 | clk_prepare_enable(bp->hclk); |
---|
4251 | 4884 | clk_prepare_enable(bp->tx_clk); |
---|
4252 | 4885 | clk_prepare_enable(bp->rx_clk); |
---|
4253 | 4886 | } |
---|
4254 | | - |
---|
4255 | | - netif_device_attach(netdev); |
---|
| 4887 | + clk_prepare_enable(bp->tsu_clk); |
---|
4256 | 4888 | |
---|
4257 | 4889 | return 0; |
---|
4258 | 4890 | } |
---|
4259 | 4891 | |
---|
4260 | | -static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume); |
---|
| 4892 | +static const struct dev_pm_ops macb_pm_ops = { |
---|
| 4893 | + SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume) |
---|
| 4894 | + SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL) |
---|
| 4895 | +}; |
---|
4261 | 4896 | |
---|
4262 | 4897 | static struct platform_driver macb_driver = { |
---|
4263 | 4898 | .probe = macb_probe, |
---|