hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/net/ethernet/cadence/macb_main.c
....@@ -1,15 +1,13 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Cadence MACB/GEM Ethernet Controller driver
34 *
45 * Copyright (C) 2004-2006 Atmel Corporation
5
- *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License version 2 as
8
- * published by the Free Software Foundation.
96 */
107
118 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
129 #include <linux/clk.h>
10
+#include <linux/clk-provider.h>
1311 #include <linux/crc32.h>
1412 #include <linux/module.h>
1513 #include <linux/moduleparam.h>
....@@ -25,9 +23,8 @@
2523 #include <linux/netdevice.h>
2624 #include <linux/etherdevice.h>
2725 #include <linux/dma-mapping.h>
28
-#include <linux/platform_data/macb.h>
2926 #include <linux/platform_device.h>
30
-#include <linux/phy.h>
27
+#include <linux/phylink.h>
3128 #include <linux/of.h>
3229 #include <linux/of_device.h>
3330 #include <linux/of_gpio.h>
....@@ -36,7 +33,16 @@
3633 #include <linux/ip.h>
3734 #include <linux/udp.h>
3835 #include <linux/tcp.h>
36
+#include <linux/iopoll.h>
37
+#include <linux/pm_runtime.h>
3938 #include "macb.h"
39
+
40
+/* This structure is only used for MACB on SiFive FU540 devices */
41
+struct sifive_fu540_macb_mgmt {
42
+ void __iomem *reg;
43
+ unsigned long rate;
44
+ struct clk_hw hw;
45
+};
4046
4147 #define MACB_RX_BUFFER_SIZE 128
4248 #define RX_BUFFER_MULTIPLE 64 /* bytes */
....@@ -82,6 +88,10 @@
8288 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
8389 */
8490 #define MACB_HALT_TIMEOUT 1230
91
+
92
+#define MACB_PM_TIMEOUT 100 /* ms */
93
+
94
+#define MACB_MDIO_TIMEOUT 1000000 /* in usecs */
8595
8696 /* DMA buffer descriptor might be different size
8797 * depends on hardware configuration:
....@@ -158,9 +168,8 @@
158168 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
159169 static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
160170 {
161
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
162
- return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
163
- return NULL;
171
+ return (struct macb_dma_desc_64 *)((void *)desc
172
+ + sizeof(struct macb_dma_desc));
164173 }
165174 #endif
166175
....@@ -283,34 +292,22 @@
283292
284293 static void macb_get_hwaddr(struct macb *bp)
285294 {
286
- struct macb_platform_data *pdata;
287295 u32 bottom;
288296 u16 top;
289297 u8 addr[6];
290298 int i;
291
-
292
- pdata = dev_get_platdata(&bp->pdev->dev);
293299
294300 /* Check all 4 address register for valid address */
295301 for (i = 0; i < 4; i++) {
296302 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
297303 top = macb_or_gem_readl(bp, SA1T + i * 8);
298304
299
- if (pdata && pdata->rev_eth_addr) {
300
- addr[5] = bottom & 0xff;
301
- addr[4] = (bottom >> 8) & 0xff;
302
- addr[3] = (bottom >> 16) & 0xff;
303
- addr[2] = (bottom >> 24) & 0xff;
304
- addr[1] = top & 0xff;
305
- addr[0] = (top & 0xff00) >> 8;
306
- } else {
307
- addr[0] = bottom & 0xff;
308
- addr[1] = (bottom >> 8) & 0xff;
309
- addr[2] = (bottom >> 16) & 0xff;
310
- addr[3] = (bottom >> 24) & 0xff;
311
- addr[4] = top & 0xff;
312
- addr[5] = (top >> 8) & 0xff;
313
- }
305
+ addr[0] = bottom & 0xff;
306
+ addr[1] = (bottom >> 8) & 0xff;
307
+ addr[2] = (bottom >> 16) & 0xff;
308
+ addr[3] = (bottom >> 24) & 0xff;
309
+ addr[4] = top & 0xff;
310
+ addr[5] = (top >> 8) & 0xff;
314311
315312 if (is_valid_ether_addr(addr)) {
316313 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
....@@ -322,50 +319,147 @@
322319 eth_hw_addr_random(bp->dev);
323320 }
324321
322
+static int macb_mdio_wait_for_idle(struct macb *bp)
323
+{
324
+ u32 val;
325
+
326
+ return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE),
327
+ 1, MACB_MDIO_TIMEOUT);
328
+}
329
+
325330 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
326331 {
327332 struct macb *bp = bus->priv;
328
- int value;
333
+ int status;
329334
330
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
331
- | MACB_BF(RW, MACB_MAN_READ)
332
- | MACB_BF(PHYA, mii_id)
333
- | MACB_BF(REGA, regnum)
334
- | MACB_BF(CODE, MACB_MAN_CODE)));
335
+ status = pm_runtime_get_sync(&bp->pdev->dev);
336
+ if (status < 0) {
337
+ pm_runtime_put_noidle(&bp->pdev->dev);
338
+ goto mdio_pm_exit;
339
+ }
335340
336
- /* wait for end of transfer */
337
- while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
338
- cpu_relax();
341
+ status = macb_mdio_wait_for_idle(bp);
342
+ if (status < 0)
343
+ goto mdio_read_exit;
339344
340
- value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
345
+ if (regnum & MII_ADDR_C45) {
346
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
347
+ | MACB_BF(RW, MACB_MAN_C45_ADDR)
348
+ | MACB_BF(PHYA, mii_id)
349
+ | MACB_BF(REGA, (regnum >> 16) & 0x1F)
350
+ | MACB_BF(DATA, regnum & 0xFFFF)
351
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
341352
342
- return value;
353
+ status = macb_mdio_wait_for_idle(bp);
354
+ if (status < 0)
355
+ goto mdio_read_exit;
356
+
357
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
358
+ | MACB_BF(RW, MACB_MAN_C45_READ)
359
+ | MACB_BF(PHYA, mii_id)
360
+ | MACB_BF(REGA, (regnum >> 16) & 0x1F)
361
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
362
+ } else {
363
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
364
+ | MACB_BF(RW, MACB_MAN_C22_READ)
365
+ | MACB_BF(PHYA, mii_id)
366
+ | MACB_BF(REGA, regnum)
367
+ | MACB_BF(CODE, MACB_MAN_C22_CODE)));
368
+ }
369
+
370
+ status = macb_mdio_wait_for_idle(bp);
371
+ if (status < 0)
372
+ goto mdio_read_exit;
373
+
374
+ status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
375
+
376
+mdio_read_exit:
377
+ pm_runtime_mark_last_busy(&bp->pdev->dev);
378
+ pm_runtime_put_autosuspend(&bp->pdev->dev);
379
+mdio_pm_exit:
380
+ return status;
343381 }
344382
345383 static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
346384 u16 value)
347385 {
348386 struct macb *bp = bus->priv;
387
+ int status;
349388
350
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
351
- | MACB_BF(RW, MACB_MAN_WRITE)
352
- | MACB_BF(PHYA, mii_id)
353
- | MACB_BF(REGA, regnum)
354
- | MACB_BF(CODE, MACB_MAN_CODE)
355
- | MACB_BF(DATA, value)));
389
+ status = pm_runtime_get_sync(&bp->pdev->dev);
390
+ if (status < 0) {
391
+ pm_runtime_put_noidle(&bp->pdev->dev);
392
+ goto mdio_pm_exit;
393
+ }
356394
357
- /* wait for end of transfer */
358
- while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
359
- cpu_relax();
395
+ status = macb_mdio_wait_for_idle(bp);
396
+ if (status < 0)
397
+ goto mdio_write_exit;
360398
361
- return 0;
399
+ if (regnum & MII_ADDR_C45) {
400
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
401
+ | MACB_BF(RW, MACB_MAN_C45_ADDR)
402
+ | MACB_BF(PHYA, mii_id)
403
+ | MACB_BF(REGA, (regnum >> 16) & 0x1F)
404
+ | MACB_BF(DATA, regnum & 0xFFFF)
405
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
406
+
407
+ status = macb_mdio_wait_for_idle(bp);
408
+ if (status < 0)
409
+ goto mdio_write_exit;
410
+
411
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
412
+ | MACB_BF(RW, MACB_MAN_C45_WRITE)
413
+ | MACB_BF(PHYA, mii_id)
414
+ | MACB_BF(REGA, (regnum >> 16) & 0x1F)
415
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)
416
+ | MACB_BF(DATA, value)));
417
+ } else {
418
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
419
+ | MACB_BF(RW, MACB_MAN_C22_WRITE)
420
+ | MACB_BF(PHYA, mii_id)
421
+ | MACB_BF(REGA, regnum)
422
+ | MACB_BF(CODE, MACB_MAN_C22_CODE)
423
+ | MACB_BF(DATA, value)));
424
+ }
425
+
426
+ status = macb_mdio_wait_for_idle(bp);
427
+ if (status < 0)
428
+ goto mdio_write_exit;
429
+
430
+mdio_write_exit:
431
+ pm_runtime_mark_last_busy(&bp->pdev->dev);
432
+ pm_runtime_put_autosuspend(&bp->pdev->dev);
433
+mdio_pm_exit:
434
+ return status;
435
+}
436
+
437
+static void macb_init_buffers(struct macb *bp)
438
+{
439
+ struct macb_queue *queue;
440
+ unsigned int q;
441
+
442
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
443
+ queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
444
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
445
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
446
+ queue_writel(queue, RBQPH,
447
+ upper_32_bits(queue->rx_ring_dma));
448
+#endif
449
+ queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
450
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
451
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
452
+ queue_writel(queue, TBQPH,
453
+ upper_32_bits(queue->tx_ring_dma));
454
+#endif
455
+ }
362456 }
363457
364458 /**
365459 * macb_set_tx_clk() - Set a clock to a new frequency
366
- * @clk Pointer to the clock to change
367
- * @rate New frequency in Hz
368
- * @dev Pointer to the struct net_device
460
+ * @clk: Pointer to the clock to change
461
+ * @speed: New frequency in Hz
462
+ * @dev: Pointer to the struct net_device
369463 */
370464 static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
371465 {
....@@ -405,169 +499,272 @@
405499 netdev_err(dev, "adjusting tx_clk failed.\n");
406500 }
407501
408
-static void macb_handle_link_change(struct net_device *dev)
502
+static void macb_validate(struct phylink_config *config,
503
+ unsigned long *supported,
504
+ struct phylink_link_state *state)
409505 {
410
- struct macb *bp = netdev_priv(dev);
411
- struct phy_device *phydev = dev->phydev;
506
+ struct net_device *ndev = to_net_dev(config->dev);
507
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
508
+ struct macb *bp = netdev_priv(ndev);
509
+
510
+ /* We only support MII, RMII, GMII, RGMII & SGMII. */
511
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
512
+ state->interface != PHY_INTERFACE_MODE_MII &&
513
+ state->interface != PHY_INTERFACE_MODE_RMII &&
514
+ state->interface != PHY_INTERFACE_MODE_GMII &&
515
+ state->interface != PHY_INTERFACE_MODE_SGMII &&
516
+ !phy_interface_mode_is_rgmii(state->interface)) {
517
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
518
+ return;
519
+ }
520
+
521
+ if (!macb_is_gem(bp) &&
522
+ (state->interface == PHY_INTERFACE_MODE_GMII ||
523
+ phy_interface_mode_is_rgmii(state->interface))) {
524
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
525
+ return;
526
+ }
527
+
528
+ phylink_set_port_modes(mask);
529
+ phylink_set(mask, Autoneg);
530
+ phylink_set(mask, Asym_Pause);
531
+
532
+ phylink_set(mask, 10baseT_Half);
533
+ phylink_set(mask, 10baseT_Full);
534
+ phylink_set(mask, 100baseT_Half);
535
+ phylink_set(mask, 100baseT_Full);
536
+
537
+ if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
538
+ (state->interface == PHY_INTERFACE_MODE_NA ||
539
+ state->interface == PHY_INTERFACE_MODE_GMII ||
540
+ state->interface == PHY_INTERFACE_MODE_SGMII ||
541
+ phy_interface_mode_is_rgmii(state->interface))) {
542
+ phylink_set(mask, 1000baseT_Full);
543
+ phylink_set(mask, 1000baseX_Full);
544
+
545
+ if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
546
+ phylink_set(mask, 1000baseT_Half);
547
+ }
548
+
549
+ bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
550
+ bitmap_and(state->advertising, state->advertising, mask,
551
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
552
+}
553
+
554
+static void macb_mac_pcs_get_state(struct phylink_config *config,
555
+ struct phylink_link_state *state)
556
+{
557
+ state->link = 0;
558
+}
559
+
560
+static void macb_mac_an_restart(struct phylink_config *config)
561
+{
562
+ /* Not supported */
563
+}
564
+
565
+static void macb_mac_config(struct phylink_config *config, unsigned int mode,
566
+ const struct phylink_link_state *state)
567
+{
568
+ struct net_device *ndev = to_net_dev(config->dev);
569
+ struct macb *bp = netdev_priv(ndev);
412570 unsigned long flags;
413
- int status_change = 0;
571
+ u32 old_ctrl, ctrl;
414572
415573 spin_lock_irqsave(&bp->lock, flags);
416574
417
- if (phydev->link) {
418
- if ((bp->speed != phydev->speed) ||
419
- (bp->duplex != phydev->duplex)) {
420
- u32 reg;
575
+ old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR);
421576
422
- reg = macb_readl(bp, NCFGR);
423
- reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
424
- if (macb_is_gem(bp))
425
- reg &= ~GEM_BIT(GBE);
577
+ if (bp->caps & MACB_CAPS_MACB_IS_EMAC) {
578
+ if (state->interface == PHY_INTERFACE_MODE_RMII)
579
+ ctrl |= MACB_BIT(RM9200_RMII);
580
+ } else if (macb_is_gem(bp)) {
581
+ ctrl &= ~(GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL));
426582
427
- if (phydev->duplex)
428
- reg |= MACB_BIT(FD);
429
- if (phydev->speed == SPEED_100)
430
- reg |= MACB_BIT(SPD);
431
- if (phydev->speed == SPEED_1000 &&
432
- bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
433
- reg |= GEM_BIT(GBE);
434
-
435
- macb_or_gem_writel(bp, NCFGR, reg);
436
-
437
- bp->speed = phydev->speed;
438
- bp->duplex = phydev->duplex;
439
- status_change = 1;
440
- }
583
+ if (state->interface == PHY_INTERFACE_MODE_SGMII)
584
+ ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
441585 }
442586
443
- if (phydev->link != bp->link) {
444
- if (!phydev->link) {
445
- bp->speed = 0;
446
- bp->duplex = -1;
447
- }
448
- bp->link = phydev->link;
587
+ /* Apply the new configuration, if any */
588
+ if (old_ctrl ^ ctrl)
589
+ macb_or_gem_writel(bp, NCFGR, ctrl);
449590
450
- status_change = 1;
591
+ spin_unlock_irqrestore(&bp->lock, flags);
592
+}
593
+
594
+static void macb_mac_link_down(struct phylink_config *config, unsigned int mode,
595
+ phy_interface_t interface)
596
+{
597
+ struct net_device *ndev = to_net_dev(config->dev);
598
+ struct macb *bp = netdev_priv(ndev);
599
+ struct macb_queue *queue;
600
+ unsigned int q;
601
+ u32 ctrl;
602
+
603
+ if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
604
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
605
+ queue_writel(queue, IDR,
606
+ bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
607
+
608
+ /* Disable Rx and Tx */
609
+ ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE));
610
+ macb_writel(bp, NCR, ctrl);
611
+
612
+ netif_tx_stop_all_queues(ndev);
613
+}
614
+
615
+static void macb_mac_link_up(struct phylink_config *config,
616
+ struct phy_device *phy,
617
+ unsigned int mode, phy_interface_t interface,
618
+ int speed, int duplex,
619
+ bool tx_pause, bool rx_pause)
620
+{
621
+ struct net_device *ndev = to_net_dev(config->dev);
622
+ struct macb *bp = netdev_priv(ndev);
623
+ struct macb_queue *queue;
624
+ unsigned long flags;
625
+ unsigned int q;
626
+ u32 ctrl;
627
+
628
+ spin_lock_irqsave(&bp->lock, flags);
629
+
630
+ ctrl = macb_or_gem_readl(bp, NCFGR);
631
+
632
+ ctrl &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
633
+
634
+ if (speed == SPEED_100)
635
+ ctrl |= MACB_BIT(SPD);
636
+
637
+ if (duplex)
638
+ ctrl |= MACB_BIT(FD);
639
+
640
+ if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) {
641
+ ctrl &= ~MACB_BIT(PAE);
642
+ if (macb_is_gem(bp)) {
643
+ ctrl &= ~GEM_BIT(GBE);
644
+
645
+ if (speed == SPEED_1000)
646
+ ctrl |= GEM_BIT(GBE);
647
+ }
648
+
649
+ if (rx_pause)
650
+ ctrl |= MACB_BIT(PAE);
651
+
652
+ macb_set_tx_clk(bp->tx_clk, speed, ndev);
653
+
654
+ /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
655
+ * cleared the pipeline and control registers.
656
+ */
657
+ bp->macbgem_ops.mog_init_rings(bp);
658
+ macb_init_buffers(bp);
659
+
660
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
661
+ queue_writel(queue, IER,
662
+ bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
451663 }
664
+
665
+ macb_or_gem_writel(bp, NCFGR, ctrl);
452666
453667 spin_unlock_irqrestore(&bp->lock, flags);
454668
455
- if (status_change) {
456
- if (phydev->link) {
457
- /* Update the TX clock rate if and only if the link is
458
- * up and there has been a link change.
459
- */
460
- macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
669
+ /* Enable Rx and Tx */
670
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
461671
462
- netif_carrier_on(dev);
463
- netdev_info(dev, "link up (%d/%s)\n",
464
- phydev->speed,
465
- phydev->duplex == DUPLEX_FULL ?
466
- "Full" : "Half");
467
- } else {
468
- netif_carrier_off(dev);
469
- netdev_info(dev, "link down\n");
470
- }
471
- }
672
+ netif_tx_wake_all_queues(ndev);
472673 }
473674
474
-/* based on au1000_eth. c*/
475
-static int macb_mii_probe(struct net_device *dev)
675
+static const struct phylink_mac_ops macb_phylink_ops = {
676
+ .validate = macb_validate,
677
+ .mac_pcs_get_state = macb_mac_pcs_get_state,
678
+ .mac_an_restart = macb_mac_an_restart,
679
+ .mac_config = macb_mac_config,
680
+ .mac_link_down = macb_mac_link_down,
681
+ .mac_link_up = macb_mac_link_up,
682
+};
683
+
684
+static bool macb_phy_handle_exists(struct device_node *dn)
476685 {
477
- struct macb *bp = netdev_priv(dev);
478
- struct macb_platform_data *pdata;
686
+ dn = of_parse_phandle(dn, "phy-handle", 0);
687
+ of_node_put(dn);
688
+ return dn != NULL;
689
+}
690
+
691
+static int macb_phylink_connect(struct macb *bp)
692
+{
693
+ struct device_node *dn = bp->pdev->dev.of_node;
694
+ struct net_device *dev = bp->dev;
479695 struct phy_device *phydev;
480
- struct device_node *np;
481
- int phy_irq, ret, i;
696
+ int ret;
482697
483
- pdata = dev_get_platdata(&bp->pdev->dev);
484
- np = bp->pdev->dev.of_node;
485
- ret = 0;
698
+ if (dn)
699
+ ret = phylink_of_phy_connect(bp->phylink, dn, 0);
486700
487
- if (np) {
488
- if (of_phy_is_fixed_link(np)) {
489
- bp->phy_node = of_node_get(np);
490
- } else {
491
- bp->phy_node = of_parse_phandle(np, "phy-handle", 0);
492
- /* fallback to standard phy registration if no
493
- * phy-handle was found nor any phy found during
494
- * dt phy registration
495
- */
496
- if (!bp->phy_node && !phy_find_first(bp->mii_bus)) {
497
- for (i = 0; i < PHY_MAX_ADDR; i++) {
498
- struct phy_device *phydev;
499
-
500
- phydev = mdiobus_scan(bp->mii_bus, i);
501
- if (IS_ERR(phydev) &&
502
- PTR_ERR(phydev) != -ENODEV) {
503
- ret = PTR_ERR(phydev);
504
- break;
505
- }
506
- }
507
-
508
- if (ret)
509
- return -ENODEV;
510
- }
511
- }
512
- }
513
-
514
- if (bp->phy_node) {
515
- phydev = of_phy_connect(dev, bp->phy_node,
516
- &macb_handle_link_change, 0,
517
- bp->phy_interface);
518
- if (!phydev)
519
- return -ENODEV;
520
- } else {
701
+ if (!dn || (ret && !macb_phy_handle_exists(dn))) {
521702 phydev = phy_find_first(bp->mii_bus);
522703 if (!phydev) {
523704 netdev_err(dev, "no PHY found\n");
524705 return -ENXIO;
525706 }
526707
527
- if (pdata) {
528
- if (gpio_is_valid(pdata->phy_irq_pin)) {
529
- ret = devm_gpio_request(&bp->pdev->dev,
530
- pdata->phy_irq_pin, "phy int");
531
- if (!ret) {
532
- phy_irq = gpio_to_irq(pdata->phy_irq_pin);
533
- phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
534
- }
535
- } else {
536
- phydev->irq = PHY_POLL;
537
- }
538
- }
539
-
540708 /* attach the mac to the phy */
541
- ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
542
- bp->phy_interface);
543
- if (ret) {
544
- netdev_err(dev, "Could not attach to PHY\n");
545
- return ret;
546
- }
709
+ ret = phylink_connect_phy(bp->phylink, phydev);
547710 }
548711
549
- /* mask with MAC supported features */
550
- if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
551
- phydev->supported &= PHY_GBIT_FEATURES;
552
- else
553
- phydev->supported &= PHY_BASIC_FEATURES;
712
+ if (ret) {
713
+ netdev_err(dev, "Could not attach PHY (%d)\n", ret);
714
+ return ret;
715
+ }
554716
555
- if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
556
- phydev->supported &= ~SUPPORTED_1000baseT_Half;
557
-
558
- phydev->advertising = phydev->supported;
559
-
560
- bp->link = 0;
561
- bp->speed = 0;
562
- bp->duplex = -1;
717
+ phylink_start(bp->phylink);
563718
564719 return 0;
565720 }
566721
722
+/* based on au1000_eth. c*/
723
+static int macb_mii_probe(struct net_device *dev)
724
+{
725
+ struct macb *bp = netdev_priv(dev);
726
+
727
+ bp->phylink_config.dev = &dev->dev;
728
+ bp->phylink_config.type = PHYLINK_NETDEV;
729
+
730
+ bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode,
731
+ bp->phy_interface, &macb_phylink_ops);
732
+ if (IS_ERR(bp->phylink)) {
733
+ netdev_err(dev, "Could not create a phylink instance (%ld)\n",
734
+ PTR_ERR(bp->phylink));
735
+ return PTR_ERR(bp->phylink);
736
+ }
737
+
738
+ return 0;
739
+}
740
+
741
+static int macb_mdiobus_register(struct macb *bp)
742
+{
743
+ struct device_node *child, *np = bp->pdev->dev.of_node;
744
+
745
+ if (of_phy_is_fixed_link(np))
746
+ return mdiobus_register(bp->mii_bus);
747
+
748
+ /* Only create the PHY from the device tree if at least one PHY is
749
+ * described. Otherwise scan the entire MDIO bus. We do this to support
750
+ * old device tree that did not follow the best practices and did not
751
+ * describe their network PHYs.
752
+ */
753
+ for_each_available_child_of_node(np, child)
754
+ if (of_mdiobus_child_is_phy(child)) {
755
+ /* The loop increments the child refcount,
756
+ * decrement it before returning.
757
+ */
758
+ of_node_put(child);
759
+
760
+ return of_mdiobus_register(bp->mii_bus, np);
761
+ }
762
+
763
+ return mdiobus_register(bp->mii_bus);
764
+}
765
+
567766 static int macb_mii_init(struct macb *bp)
568767 {
569
- struct macb_platform_data *pdata;
570
- struct device_node *np;
571768 int err = -ENXIO;
572769
573770 /* Enable management port */
....@@ -586,28 +783,12 @@
586783 bp->pdev->name, bp->pdev->id);
587784 bp->mii_bus->priv = bp;
588785 bp->mii_bus->parent = &bp->pdev->dev;
589
- pdata = dev_get_platdata(&bp->pdev->dev);
590786
591787 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
592788
593
- np = bp->pdev->dev.of_node;
594
- if (np && of_phy_is_fixed_link(np)) {
595
- if (of_phy_register_fixed_link(np) < 0) {
596
- dev_err(&bp->pdev->dev,
597
- "broken fixed-link specification %pOF\n", np);
598
- goto err_out_free_mdiobus;
599
- }
600
-
601
- err = mdiobus_register(bp->mii_bus);
602
- } else {
603
- if (pdata)
604
- bp->mii_bus->phy_mask = pdata->phy_mask;
605
-
606
- err = of_mdiobus_register(bp->mii_bus, np);
607
- }
608
-
789
+ err = macb_mdiobus_register(bp);
609790 if (err)
610
- goto err_out_free_fixed_link;
791
+ goto err_out_free_mdiobus;
611792
612793 err = macb_mii_probe(bp->dev);
613794 if (err)
....@@ -617,11 +798,7 @@
617798
618799 err_out_unregister_bus:
619800 mdiobus_unregister(bp->mii_bus);
620
-err_out_free_fixed_link:
621
- if (np && of_phy_is_fixed_link(np))
622
- of_phy_deregister_fixed_link(np);
623801 err_out_free_mdiobus:
624
- of_node_put(bp->phy_node);
625802 mdiobus_free(bp->mii_bus);
626803 err_out:
627804 return err;
....@@ -707,6 +884,10 @@
707884 }
708885 #endif
709886 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
887
+#ifdef CONFIG_MACB_USE_HWSTAMP
888
+ if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
889
+ addr &= ~GEM_BIT(DMA_RXVALID);
890
+#endif
710891 return addr;
711892 }
712893
....@@ -915,7 +1096,6 @@
9151096 /* Make hw descriptor updates visible to CPU */
9161097 rmb();
9171098
918
- queue->rx_prepared_head++;
9191099 desc = macb_rx_desc(queue, entry);
9201100
9211101 if (!queue->rx_skbuff[entry]) {
....@@ -954,6 +1134,7 @@
9541134 dma_wmb();
9551135 desc->addr &= ~MACB_BIT(RX_USED);
9561136 }
1137
+ queue->rx_prepared_head++;
9571138 }
9581139
9591140 /* Make descriptor updates visible to hardware */
....@@ -984,7 +1165,8 @@
9841165 */
9851166 }
9861167
987
-static int gem_rx(struct macb_queue *queue, int budget)
1168
+static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
1169
+ int budget)
9881170 {
9891171 struct macb *bp = queue->bp;
9901172 unsigned int len;
....@@ -1066,7 +1248,7 @@
10661248 skb->data, 32, true);
10671249 #endif
10681250
1069
- netif_receive_skb(skb);
1251
+ napi_gro_receive(napi, skb);
10701252 }
10711253
10721254 gem_rx_refill(queue);
....@@ -1074,8 +1256,8 @@
10741256 return count;
10751257 }
10761258
1077
-static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag,
1078
- unsigned int last_frag)
1259
+static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi,
1260
+ unsigned int first_frag, unsigned int last_frag)
10791261 {
10801262 unsigned int len;
10811263 unsigned int frag;
....@@ -1151,7 +1333,7 @@
11511333 bp->dev->stats.rx_bytes += skb->len;
11521334 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
11531335 skb->len, skb->csum);
1154
- netif_receive_skb(skb);
1336
+ napi_gro_receive(napi, skb);
11551337
11561338 return 0;
11571339 }
....@@ -1174,7 +1356,8 @@
11741356 queue->rx_tail = 0;
11751357 }
11761358
1177
-static int macb_rx(struct macb_queue *queue, int budget)
1359
+static int macb_rx(struct macb_queue *queue, struct napi_struct *napi,
1360
+ int budget)
11781361 {
11791362 struct macb *bp = queue->bp;
11801363 bool reset_rx_queue = false;
....@@ -1211,7 +1394,7 @@
12111394 continue;
12121395 }
12131396
1214
- dropped = macb_rx_frame(queue, first_frag, tail);
1397
+ dropped = macb_rx_frame(queue, napi, first_frag, tail);
12151398 first_frag = -1;
12161399 if (unlikely(dropped < 0)) {
12171400 reset_rx_queue = true;
....@@ -1265,11 +1448,18 @@
12651448 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
12661449 (unsigned long)status, budget);
12671450
1268
- work_done = bp->macbgem_ops.mog_rx(queue, budget);
1451
+ work_done = bp->macbgem_ops.mog_rx(queue, napi, budget);
12691452 if (work_done < budget) {
12701453 napi_complete_done(napi, work_done);
12711454
1272
- /* Packets received while interrupts were disabled */
1455
+ /* RSR bits only seem to propagate to raise interrupts when
1456
+ * interrupts are enabled at the time, so if bits are already
1457
+ * set due to packets received while interrupts were disabled,
1458
+ * they will not cause another interrupt to be generated when
1459
+ * interrupts are re-enabled.
1460
+ * Check for this case here. This has been seen to happen
1461
+ * around 30% of the time under heavy network load.
1462
+ */
12731463 status = macb_readl(bp, RSR);
12741464 if (status) {
12751465 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
....@@ -1277,6 +1467,22 @@
12771467 napi_reschedule(napi);
12781468 } else {
12791469 queue_writel(queue, IER, bp->rx_intr_mask);
1470
+
1471
+ /* In rare cases, packets could have been received in
1472
+ * the window between the check above and re-enabling
1473
+ * interrupts. Therefore, a double-check is required
1474
+ * to avoid losing a wakeup. This can potentially race
1475
+ * with the interrupt handler doing the same actions
1476
+ * if an interrupt is raised just after enabling them,
1477
+ * but this should be harmless.
1478
+ */
1479
+ status = macb_readl(bp, RSR);
1480
+ if (unlikely(status)) {
1481
+ queue_writel(queue, IDR, bp->rx_intr_mask);
1482
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1483
+ queue_writel(queue, ISR, MACB_BIT(RCOMP));
1484
+ napi_schedule(napi);
1485
+ }
12801486 }
12811487 }
12821488
....@@ -1285,11 +1491,11 @@
12851491 return work_done;
12861492 }
12871493
1288
-static void macb_hresp_error_task(unsigned long data)
1494
+static void macb_hresp_error_task(struct tasklet_struct *t)
12891495 {
1290
- struct macb *bp = (struct macb *)data;
1496
+ struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet);
12911497 struct net_device *dev = bp->dev;
1292
- struct macb_queue *queue = bp->queues;
1498
+ struct macb_queue *queue;
12931499 unsigned int q;
12941500 u32 ctrl;
12951501
....@@ -1308,26 +1514,14 @@
13081514 bp->macbgem_ops.mog_init_rings(bp);
13091515
13101516 /* Initialize TX and RX buffers */
1311
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1312
- queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
1313
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1314
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
1315
- queue_writel(queue, RBQPH,
1316
- upper_32_bits(queue->rx_ring_dma));
1317
-#endif
1318
- queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
1319
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1320
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
1321
- queue_writel(queue, TBQPH,
1322
- upper_32_bits(queue->tx_ring_dma));
1323
-#endif
1517
+ macb_init_buffers(bp);
13241518
1325
- /* Enable interrupts */
1519
+ /* Enable interrupts */
1520
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
13261521 queue_writel(queue, IER,
13271522 bp->rx_intr_mask |
13281523 MACB_TX_INT_FLAGS |
13291524 MACB_BIT(HRESP));
1330
- }
13311525
13321526 ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
13331527 macb_writel(bp, NCR, ctrl);
....@@ -1341,6 +1535,7 @@
13411535 unsigned int head = queue->tx_head;
13421536 unsigned int tail = queue->tx_tail;
13431537 struct macb *bp = queue->bp;
1538
+ unsigned int head_idx, tbqp;
13441539
13451540 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
13461541 queue_writel(queue, ISR, MACB_BIT(TXUBR));
....@@ -1348,7 +1543,72 @@
13481543 if (head == tail)
13491544 return;
13501545
1546
+ tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
1547
+ tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
1548
+ head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head));
1549
+
1550
+ if (tbqp == head_idx)
1551
+ return;
1552
+
13511553 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1554
+}
1555
+
1556
+static irqreturn_t macb_wol_interrupt(int irq, void *dev_id)
1557
+{
1558
+ struct macb_queue *queue = dev_id;
1559
+ struct macb *bp = queue->bp;
1560
+ u32 status;
1561
+
1562
+ status = queue_readl(queue, ISR);
1563
+
1564
+ if (unlikely(!status))
1565
+ return IRQ_NONE;
1566
+
1567
+ spin_lock(&bp->lock);
1568
+
1569
+ if (status & MACB_BIT(WOL)) {
1570
+ queue_writel(queue, IDR, MACB_BIT(WOL));
1571
+ macb_writel(bp, WOL, 0);
1572
+ netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n",
1573
+ (unsigned int)(queue - bp->queues),
1574
+ (unsigned long)status);
1575
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1576
+ queue_writel(queue, ISR, MACB_BIT(WOL));
1577
+ pm_wakeup_event(&bp->pdev->dev, 0);
1578
+ }
1579
+
1580
+ spin_unlock(&bp->lock);
1581
+
1582
+ return IRQ_HANDLED;
1583
+}
1584
+
1585
+static irqreturn_t gem_wol_interrupt(int irq, void *dev_id)
1586
+{
1587
+ struct macb_queue *queue = dev_id;
1588
+ struct macb *bp = queue->bp;
1589
+ u32 status;
1590
+
1591
+ status = queue_readl(queue, ISR);
1592
+
1593
+ if (unlikely(!status))
1594
+ return IRQ_NONE;
1595
+
1596
+ spin_lock(&bp->lock);
1597
+
1598
+ if (status & GEM_BIT(WOL)) {
1599
+ queue_writel(queue, IDR, GEM_BIT(WOL));
1600
+ gem_writel(bp, WOL, 0);
1601
+ netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n",
1602
+ (unsigned int)(queue - bp->queues),
1603
+ (unsigned long)status);
1604
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1605
+ queue_writel(queue, ISR, GEM_BIT(WOL));
1606
+ pm_wakeup_event(&bp->pdev->dev, 0);
1607
+ }
1608
+
1609
+ spin_unlock(&bp->lock);
1610
+
1611
+ return IRQ_HANDLED;
13521612 }
13531613
13541614 static irqreturn_t macb_interrupt(int irq, void *dev_id)
....@@ -1707,7 +1967,6 @@
17071967 bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
17081968 skb_is_nonlinear(*skb);
17091969 int padlen = ETH_ZLEN - (*skb)->len;
1710
- int headroom = skb_headroom(*skb);
17111970 int tailroom = skb_tailroom(*skb);
17121971 struct sk_buff *nskb;
17131972 u32 fcs;
....@@ -1721,9 +1980,6 @@
17211980 /* FCS could be appeded to tailroom. */
17221981 if (tailroom >= ETH_FCS_LEN)
17231982 goto add_fcs;
1724
- /* FCS could be appeded by moving data to headroom. */
1725
- else if (!cloned && headroom + tailroom >= ETH_FCS_LEN)
1726
- padlen = 0;
17271983 /* No room for FCS, need to reallocate skb. */
17281984 else
17291985 padlen = ETH_FCS_LEN;
....@@ -1732,24 +1988,17 @@
17321988 padlen += ETH_FCS_LEN;
17331989 }
17341990
1735
- if (!cloned && headroom + tailroom >= padlen) {
1736
- (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
1737
- skb_set_tail_pointer(*skb, (*skb)->len);
1738
- } else {
1991
+ if (cloned || tailroom < padlen) {
17391992 nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
17401993 if (!nskb)
17411994 return -ENOMEM;
17421995
1743
- dev_kfree_skb_any(*skb);
1996
+ dev_consume_skb_any(*skb);
17441997 *skb = nskb;
17451998 }
17461999
1747
- if (padlen) {
1748
- if (padlen >= ETH_FCS_LEN)
1749
- skb_put_zero(*skb, padlen - ETH_FCS_LEN);
1750
- else
1751
- skb_trim(*skb, ETH_FCS_LEN - padlen);
1752
- }
2000
+ if (padlen > ETH_FCS_LEN)
2001
+ skb_put_zero(*skb, padlen - ETH_FCS_LEN);
17532002
17542003 add_fcs:
17552004 /* set FCS to packet */
....@@ -1772,7 +2021,7 @@
17722021 unsigned long flags;
17732022 unsigned int desc_cnt, nr_frags, frag_size, f;
17742023 unsigned int hdrlen;
1775
- bool is_lso, is_udp = 0;
2024
+ bool is_lso;
17762025 netdev_tx_t ret = NETDEV_TX_OK;
17772026
17782027 if (macb_clear_csum(skb)) {
....@@ -1788,10 +2037,8 @@
17882037 is_lso = (skb_shinfo(skb)->gso_size != 0);
17892038
17902039 if (is_lso) {
1791
- is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP);
1792
-
17932040 /* length of headers */
1794
- if (is_udp)
2041
+ if (ip_hdr(skb)->protocol == IPPROTO_UDP)
17952042 /* only queue eth + ip headers separately for UDP */
17962043 hdrlen = skb_transport_offset(skb);
17972044 else
....@@ -2218,19 +2465,13 @@
22182465
22192466 static void macb_init_hw(struct macb *bp)
22202467 {
2221
- struct macb_queue *queue;
2222
- unsigned int q;
2223
-
22242468 u32 config;
22252469
22262470 macb_reset_hw(bp);
22272471 macb_set_hwaddr(bp);
22282472
22292473 config = macb_mdc_clk_div(bp);
2230
- if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
2231
- config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
22322474 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
2233
- config |= MACB_BIT(PAE); /* PAuse Enable */
22342475 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
22352476 if (bp->caps & MACB_CAPS_JUMBO)
22362477 config |= MACB_BIT(JFRAME); /* Enable jumbo frames */
....@@ -2246,36 +2487,11 @@
22462487 macb_writel(bp, NCFGR, config);
22472488 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
22482489 gem_writel(bp, JML, bp->jumbo_max_len);
2249
- bp->speed = SPEED_10;
2250
- bp->duplex = DUPLEX_HALF;
22512490 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
22522491 if (bp->caps & MACB_CAPS_JUMBO)
22532492 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
22542493
22552494 macb_configure_dma(bp);
2256
-
2257
- /* Initialize TX and RX buffers */
2258
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2259
- queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
2260
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2261
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2262
- queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma));
2263
-#endif
2264
- queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
2265
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2266
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2267
- queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
2268
-#endif
2269
-
2270
- /* Enable interrupts */
2271
- queue_writel(queue, IER,
2272
- bp->rx_intr_mask |
2273
- MACB_TX_INT_FLAGS |
2274
- MACB_BIT(HRESP));
2275
- }
2276
-
2277
- /* Enable TX and RX */
2278
- macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
22792495 }
22802496
22812497 /* The hash address register is 64 bits long and takes up two
....@@ -2399,20 +2615,17 @@
23992615
24002616 static int macb_open(struct net_device *dev)
24012617 {
2402
- struct macb *bp = netdev_priv(dev);
24032618 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
2619
+ struct macb *bp = netdev_priv(dev);
24042620 struct macb_queue *queue;
24052621 unsigned int q;
24062622 int err;
24072623
24082624 netdev_dbg(bp->dev, "open\n");
24092625
2410
- /* carrier starts down */
2411
- netif_carrier_off(dev);
2412
-
2413
- /* if the phy is not yet register, retry later*/
2414
- if (!dev->phydev)
2415
- return -EAGAIN;
2626
+ err = pm_runtime_get_sync(&bp->pdev->dev);
2627
+ if (err < 0)
2628
+ goto pm_exit;
24162629
24172630 /* RX buffers initialization */
24182631 macb_init_rx_buffer_size(bp, bufsz);
....@@ -2421,17 +2634,17 @@
24212634 if (err) {
24222635 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
24232636 err);
2424
- return err;
2637
+ goto pm_exit;
24252638 }
24262639
24272640 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
24282641 napi_enable(&queue->napi);
24292642
2430
- bp->macbgem_ops.mog_init_rings(bp);
24312643 macb_init_hw(bp);
24322644
2433
- /* schedule a link state check */
2434
- phy_start(dev->phydev);
2645
+ err = macb_phylink_connect(bp);
2646
+ if (err)
2647
+ goto reset_hw;
24352648
24362649 netif_tx_start_all_queues(dev);
24372650
....@@ -2439,6 +2652,15 @@
24392652 bp->ptp_info->ptp_init(dev);
24402653
24412654 return 0;
2655
+
2656
+reset_hw:
2657
+ macb_reset_hw(bp);
2658
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2659
+ napi_disable(&queue->napi);
2660
+ macb_free_consistent(bp);
2661
+pm_exit:
2662
+ pm_runtime_put_sync(&bp->pdev->dev);
2663
+ return err;
24422664 }
24432665
24442666 static int macb_close(struct net_device *dev)
....@@ -2453,8 +2675,8 @@
24532675 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
24542676 napi_disable(&queue->napi);
24552677
2456
- if (dev->phydev)
2457
- phy_stop(dev->phydev);
2678
+ phylink_stop(bp->phylink);
2679
+ phylink_disconnect_phy(bp->phylink);
24582680
24592681 spin_lock_irqsave(&bp->lock, flags);
24602682 macb_reset_hw(bp);
....@@ -2465,6 +2687,8 @@
24652687
24662688 if (bp->ptp_info)
24672689 bp->ptp_info->ptp_remove(dev);
2690
+
2691
+ pm_runtime_put(&bp->pdev->dev);
24682692
24692693 return 0;
24702694 }
....@@ -2686,11 +2910,9 @@
26862910 {
26872911 struct macb *bp = netdev_priv(netdev);
26882912
2689
- wol->supported = 0;
2690
- wol->wolopts = 0;
2691
-
26922913 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
2693
- wol->supported = WAKE_MAGIC;
2914
+ phylink_ethtool_get_wol(bp->phylink, wol);
2915
+ wol->supported |= WAKE_MAGIC;
26942916
26952917 if (bp->wol & MACB_WOL_ENABLED)
26962918 wol->wolopts |= WAKE_MAGIC;
....@@ -2700,6 +2922,15 @@
27002922 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
27012923 {
27022924 struct macb *bp = netdev_priv(netdev);
2925
+ int ret;
2926
+
2927
+ /* Pass the order to phylink layer */
2928
+ ret = phylink_ethtool_set_wol(bp->phylink, wol);
2929
+ /* Don't manage WoL on MAC if handled by the PHY
2930
+ * or if there's a failure in talking to the PHY
2931
+ */
2932
+ if (!ret || ret != -EOPNOTSUPP)
2933
+ return ret;
27032934
27042935 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
27052936 (wol->wolopts & ~WAKE_MAGIC))
....@@ -2713,6 +2944,22 @@
27132944 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
27142945
27152946 return 0;
2947
+}
2948
+
2949
+static int macb_get_link_ksettings(struct net_device *netdev,
2950
+ struct ethtool_link_ksettings *kset)
2951
+{
2952
+ struct macb *bp = netdev_priv(netdev);
2953
+
2954
+ return phylink_ethtool_ksettings_get(bp->phylink, kset);
2955
+}
2956
+
2957
+static int macb_set_link_ksettings(struct net_device *netdev,
2958
+ const struct ethtool_link_ksettings *kset)
2959
+{
2960
+ struct macb *bp = netdev_priv(netdev);
2961
+
2962
+ return phylink_ethtool_ksettings_set(bp->phylink, kset);
27162963 }
27172964
27182965 static void macb_get_ringparam(struct net_device *netdev,
....@@ -2842,9 +3089,13 @@
28423089
28433090 static void gem_enable_flow_filters(struct macb *bp, bool enable)
28443091 {
3092
+ struct net_device *netdev = bp->dev;
28453093 struct ethtool_rx_fs_item *item;
28463094 u32 t2_scr;
28473095 int num_t2_scr;
3096
+
3097
+ if (!(netdev->features & NETIF_F_NTUPLE))
3098
+ return;
28483099
28493100 num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
28503101
....@@ -2890,6 +3141,9 @@
28903141 bool cmp_a = false;
28913142 bool cmp_b = false;
28923143 bool cmp_c = false;
3144
+
3145
+ if (!macb_is_gem(bp))
3146
+ return;
28933147
28943148 tp4sp_v = &(fs->h_u.tcp_ip4_spec);
28953149 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
....@@ -3005,8 +3259,7 @@
30053259 gem_prog_cmp_regs(bp, fs);
30063260 bp->rx_fs_list.count++;
30073261 /* enable filtering if NTUPLE on */
3008
- if (netdev->features & NETIF_F_NTUPLE)
3009
- gem_enable_flow_filters(bp, 1);
3262
+ gem_enable_flow_filters(bp, 1);
30103263
30113264 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
30123265 return 0;
....@@ -3148,8 +3401,8 @@
31483401 .get_ts_info = ethtool_op_get_ts_info,
31493402 .get_wol = macb_get_wol,
31503403 .set_wol = macb_set_wol,
3151
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
3152
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
3404
+ .get_link_ksettings = macb_get_link_ksettings,
3405
+ .set_link_ksettings = macb_set_link_ksettings,
31533406 .get_ringparam = macb_get_ringparam,
31543407 .set_ringparam = macb_set_ringparam,
31553408 };
....@@ -3157,13 +3410,15 @@
31573410 static const struct ethtool_ops gem_ethtool_ops = {
31583411 .get_regs_len = macb_get_regs_len,
31593412 .get_regs = macb_get_regs,
3413
+ .get_wol = macb_get_wol,
3414
+ .set_wol = macb_set_wol,
31603415 .get_link = ethtool_op_get_link,
31613416 .get_ts_info = macb_get_ts_info,
31623417 .get_ethtool_stats = gem_get_ethtool_stats,
31633418 .get_strings = gem_get_ethtool_strings,
31643419 .get_sset_count = gem_get_sset_count,
3165
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
3166
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
3420
+ .get_link_ksettings = macb_get_link_ksettings,
3421
+ .set_link_ksettings = macb_set_link_ksettings,
31673422 .get_ringparam = macb_get_ringparam,
31683423 .set_ringparam = macb_set_ringparam,
31693424 .get_rxnfc = gem_get_rxnfc,
....@@ -3172,26 +3427,65 @@
31723427
31733428 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
31743429 {
3175
- struct phy_device *phydev = dev->phydev;
31763430 struct macb *bp = netdev_priv(dev);
31773431
31783432 if (!netif_running(dev))
31793433 return -EINVAL;
31803434
3181
- if (!phydev)
3182
- return -ENODEV;
3183
-
3184
- if (!bp->ptp_info)
3185
- return phy_mii_ioctl(phydev, rq, cmd);
3186
-
3187
- switch (cmd) {
3188
- case SIOCSHWTSTAMP:
3189
- return bp->ptp_info->set_hwtst(dev, rq, cmd);
3190
- case SIOCGHWTSTAMP:
3191
- return bp->ptp_info->get_hwtst(dev, rq);
3192
- default:
3193
- return phy_mii_ioctl(phydev, rq, cmd);
3435
+ if (bp->ptp_info) {
3436
+ switch (cmd) {
3437
+ case SIOCSHWTSTAMP:
3438
+ return bp->ptp_info->set_hwtst(dev, rq, cmd);
3439
+ case SIOCGHWTSTAMP:
3440
+ return bp->ptp_info->get_hwtst(dev, rq);
3441
+ }
31943442 }
3443
+
3444
+ return phylink_mii_ioctl(bp->phylink, rq, cmd);
3445
+}
3446
+
3447
+static inline void macb_set_txcsum_feature(struct macb *bp,
3448
+ netdev_features_t features)
3449
+{
3450
+ u32 val;
3451
+
3452
+ if (!macb_is_gem(bp))
3453
+ return;
3454
+
3455
+ val = gem_readl(bp, DMACFG);
3456
+ if (features & NETIF_F_HW_CSUM)
3457
+ val |= GEM_BIT(TXCOEN);
3458
+ else
3459
+ val &= ~GEM_BIT(TXCOEN);
3460
+
3461
+ gem_writel(bp, DMACFG, val);
3462
+}
3463
+
3464
+static inline void macb_set_rxcsum_feature(struct macb *bp,
3465
+ netdev_features_t features)
3466
+{
3467
+ struct net_device *netdev = bp->dev;
3468
+ u32 val;
3469
+
3470
+ if (!macb_is_gem(bp))
3471
+ return;
3472
+
3473
+ val = gem_readl(bp, NCFGR);
3474
+ if ((features & NETIF_F_RXCSUM) && !(netdev->flags & IFF_PROMISC))
3475
+ val |= GEM_BIT(RXCOEN);
3476
+ else
3477
+ val &= ~GEM_BIT(RXCOEN);
3478
+
3479
+ gem_writel(bp, NCFGR, val);
3480
+}
3481
+
3482
+static inline void macb_set_rxflow_feature(struct macb *bp,
3483
+ netdev_features_t features)
3484
+{
3485
+ if (!macb_is_gem(bp))
3486
+ return;
3487
+
3488
+ gem_enable_flow_filters(bp, !!(features & NETIF_F_NTUPLE));
31953489 }
31963490
31973491 static int macb_set_features(struct net_device *netdev,
....@@ -3201,37 +3495,37 @@
32013495 netdev_features_t changed = features ^ netdev->features;
32023496
32033497 /* TX checksum offload */
3204
- if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
3205
- u32 dmacfg;
3206
-
3207
- dmacfg = gem_readl(bp, DMACFG);
3208
- if (features & NETIF_F_HW_CSUM)
3209
- dmacfg |= GEM_BIT(TXCOEN);
3210
- else
3211
- dmacfg &= ~GEM_BIT(TXCOEN);
3212
- gem_writel(bp, DMACFG, dmacfg);
3213
- }
3498
+ if (changed & NETIF_F_HW_CSUM)
3499
+ macb_set_txcsum_feature(bp, features);
32143500
32153501 /* RX checksum offload */
3216
- if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
3217
- u32 netcfg;
3218
-
3219
- netcfg = gem_readl(bp, NCFGR);
3220
- if (features & NETIF_F_RXCSUM &&
3221
- !(netdev->flags & IFF_PROMISC))
3222
- netcfg |= GEM_BIT(RXCOEN);
3223
- else
3224
- netcfg &= ~GEM_BIT(RXCOEN);
3225
- gem_writel(bp, NCFGR, netcfg);
3226
- }
3502
+ if (changed & NETIF_F_RXCSUM)
3503
+ macb_set_rxcsum_feature(bp, features);
32273504
32283505 /* RX Flow Filters */
3229
- if ((changed & NETIF_F_NTUPLE) && macb_is_gem(bp)) {
3230
- bool turn_on = features & NETIF_F_NTUPLE;
3506
+ if (changed & NETIF_F_NTUPLE)
3507
+ macb_set_rxflow_feature(bp, features);
32313508
3232
- gem_enable_flow_filters(bp, turn_on);
3233
- }
32343509 return 0;
3510
+}
3511
+
3512
+static void macb_restore_features(struct macb *bp)
3513
+{
3514
+ struct net_device *netdev = bp->dev;
3515
+ netdev_features_t features = netdev->features;
3516
+ struct ethtool_rx_fs_item *item;
3517
+
3518
+ /* TX checksum offload */
3519
+ macb_set_txcsum_feature(bp, features);
3520
+
3521
+ /* RX checksum offload */
3522
+ macb_set_rxcsum_feature(bp, features);
3523
+
3524
+ /* RX Flow Filters */
3525
+ list_for_each_entry(item, &bp->rx_fs_list.list, list)
3526
+ gem_prog_cmp_regs(bp, &item->fs);
3527
+
3528
+ macb_set_rxflow_feature(bp, features);
32353529 }
32363530
32373531 static const struct net_device_ops macb_netdev_ops = {
....@@ -3274,7 +3568,8 @@
32743568 #ifdef CONFIG_MACB_USE_HWSTAMP
32753569 if (gem_has_ptp(bp)) {
32763570 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
3277
- pr_err("GEM doesn't support hardware ptp.\n");
3571
+ dev_err(&bp->pdev->dev,
3572
+ "GEM doesn't support hardware ptp.\n");
32783573 else {
32793574 bp->hw_dma_cap |= HW_DMA_CAP_PTP;
32803575 bp->ptp_info = &gem_ptp_info;
....@@ -3291,8 +3586,6 @@
32913586 unsigned int *queue_mask,
32923587 unsigned int *num_queues)
32933588 {
3294
- unsigned int hw_q;
3295
-
32963589 *queue_mask = 0x1;
32973590 *num_queues = 1;
32983591
....@@ -3306,18 +3599,13 @@
33063599 return;
33073600
33083601 /* bit 0 is never set but queue 0 always exists */
3309
- *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
3310
-
3311
- *queue_mask |= 0x1;
3312
-
3313
- for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
3314
- if (*queue_mask & (1 << hw_q))
3315
- (*num_queues)++;
3602
+ *queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xff;
3603
+ *num_queues = hweight32(*queue_mask);
33163604 }
33173605
33183606 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
33193607 struct clk **hclk, struct clk **tx_clk,
3320
- struct clk **rx_clk)
3608
+ struct clk **rx_clk, struct clk **tsu_clk)
33213609 {
33223610 struct macb_platform_data *pdata;
33233611 int err;
....@@ -3349,13 +3637,17 @@
33493637 return err;
33503638 }
33513639
3352
- *tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
3640
+ *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk");
33533641 if (IS_ERR(*tx_clk))
3354
- *tx_clk = NULL;
3642
+ return PTR_ERR(*tx_clk);
33553643
3356
- *rx_clk = devm_clk_get(&pdev->dev, "rx_clk");
3644
+ *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk");
33573645 if (IS_ERR(*rx_clk))
3358
- *rx_clk = NULL;
3646
+ return PTR_ERR(*rx_clk);
3647
+
3648
+ *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk");
3649
+ if (IS_ERR(*tsu_clk))
3650
+ return PTR_ERR(*tsu_clk);
33593651
33603652 err = clk_prepare_enable(*pclk);
33613653 if (err) {
....@@ -3381,7 +3673,16 @@
33813673 goto err_disable_txclk;
33823674 }
33833675
3676
+ err = clk_prepare_enable(*tsu_clk);
3677
+ if (err) {
3678
+ dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err);
3679
+ goto err_disable_rxclk;
3680
+ }
3681
+
33843682 return 0;
3683
+
3684
+err_disable_rxclk:
3685
+ clk_disable_unprepare(*rx_clk);
33853686
33863687 err_disable_txclk:
33873688 clk_disable_unprepare(*tx_clk);
....@@ -3417,7 +3718,7 @@
34173718
34183719 queue = &bp->queues[q];
34193720 queue->bp = bp;
3420
- netif_napi_add(dev, &queue->napi, macb_poll, 64);
3721
+ netif_napi_add(dev, &queue->napi, macb_poll, NAPI_POLL_WEIGHT);
34213722 if (hw_q) {
34223723 queue->ISR = GEM_ISR(hw_q - 1);
34233724 queue->IER = GEM_IER(hw_q - 1);
....@@ -3507,6 +3808,7 @@
35073808 reg = gem_readl(bp, DCFG8);
35083809 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
35093810 GEM_BFEXT(T2SCR, reg));
3811
+ INIT_LIST_HEAD(&bp->rx_fs_list.list);
35103812 if (bp->max_tuples > 0) {
35113813 /* also needs one ethtype match to check IPv4 */
35123814 if (GEM_BFEXT(SCR2ETH, reg) > 0) {
....@@ -3517,7 +3819,6 @@
35173819 /* Filtering is supported in hw but don't enable it in kernel now */
35183820 dev->hw_features |= NETIF_F_NTUPLE;
35193821 /* init Rx flow definitions */
3520
- INIT_LIST_HEAD(&bp->rx_fs_list.list);
35213822 bp->rx_fs_list.count = 0;
35223823 spin_lock_init(&bp->rx_fs_lock);
35233824 } else
....@@ -3526,7 +3827,7 @@
35263827
35273828 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
35283829 val = 0;
3529
- if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
3830
+ if (phy_interface_mode_is_rgmii(bp->phy_interface))
35303831 val = GEM_BIT(RGMII);
35313832 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
35323833 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
....@@ -3556,15 +3857,11 @@
35563857 /* max number of receive buffers */
35573858 #define AT91ETHER_MAX_RX_DESCR 9
35583859
3559
-/* Initialize and start the Receiver and Transmit subsystems */
3560
-static int at91ether_start(struct net_device *dev)
3860
+static struct sifive_fu540_macb_mgmt *mgmt;
3861
+
3862
+static int at91ether_alloc_coherent(struct macb *lp)
35613863 {
3562
- struct macb *lp = netdev_priv(dev);
35633864 struct macb_queue *q = &lp->queues[0];
3564
- struct macb_dma_desc *desc;
3565
- dma_addr_t addr;
3566
- u32 ctl;
3567
- int i;
35683865
35693866 q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
35703867 (AT91ETHER_MAX_RX_DESCR *
....@@ -3585,6 +3882,43 @@
35853882 q->rx_ring = NULL;
35863883 return -ENOMEM;
35873884 }
3885
+
3886
+ return 0;
3887
+}
3888
+
3889
+static void at91ether_free_coherent(struct macb *lp)
3890
+{
3891
+ struct macb_queue *q = &lp->queues[0];
3892
+
3893
+ if (q->rx_ring) {
3894
+ dma_free_coherent(&lp->pdev->dev,
3895
+ AT91ETHER_MAX_RX_DESCR *
3896
+ macb_dma_desc_get_size(lp),
3897
+ q->rx_ring, q->rx_ring_dma);
3898
+ q->rx_ring = NULL;
3899
+ }
3900
+
3901
+ if (q->rx_buffers) {
3902
+ dma_free_coherent(&lp->pdev->dev,
3903
+ AT91ETHER_MAX_RX_DESCR *
3904
+ AT91ETHER_MAX_RBUFF_SZ,
3905
+ q->rx_buffers, q->rx_buffers_dma);
3906
+ q->rx_buffers = NULL;
3907
+ }
3908
+}
3909
+
3910
+/* Initialize and start the Receiver and Transmit subsystems */
3911
+static int at91ether_start(struct macb *lp)
3912
+{
3913
+ struct macb_queue *q = &lp->queues[0];
3914
+ struct macb_dma_desc *desc;
3915
+ dma_addr_t addr;
3916
+ u32 ctl;
3917
+ int i, ret;
3918
+
3919
+ ret = at91ether_alloc_coherent(lp);
3920
+ if (ret)
3921
+ return ret;
35883922
35893923 addr = q->rx_buffers_dma;
35903924 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
....@@ -3607,7 +3941,39 @@
36073941 ctl = macb_readl(lp, NCR);
36083942 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
36093943
3944
+ /* Enable MAC interrupts */
3945
+ macb_writel(lp, IER, MACB_BIT(RCOMP) |
3946
+ MACB_BIT(RXUBR) |
3947
+ MACB_BIT(ISR_TUND) |
3948
+ MACB_BIT(ISR_RLE) |
3949
+ MACB_BIT(TCOMP) |
3950
+ MACB_BIT(RM9200_TBRE) |
3951
+ MACB_BIT(ISR_ROVR) |
3952
+ MACB_BIT(HRESP));
3953
+
36103954 return 0;
3955
+}
3956
+
3957
+static void at91ether_stop(struct macb *lp)
3958
+{
3959
+ u32 ctl;
3960
+
3961
+ /* Disable MAC interrupts */
3962
+ macb_writel(lp, IDR, MACB_BIT(RCOMP) |
3963
+ MACB_BIT(RXUBR) |
3964
+ MACB_BIT(ISR_TUND) |
3965
+ MACB_BIT(ISR_RLE) |
3966
+ MACB_BIT(TCOMP) |
3967
+ MACB_BIT(RM9200_TBRE) |
3968
+ MACB_BIT(ISR_ROVR) |
3969
+ MACB_BIT(HRESP));
3970
+
3971
+ /* Disable Receiver and Transmitter */
3972
+ ctl = macb_readl(lp, NCR);
3973
+ macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
3974
+
3975
+ /* Free resources. */
3976
+ at91ether_free_coherent(lp);
36113977 }
36123978
36133979 /* Open the ethernet interface */
....@@ -3617,67 +3983,50 @@
36173983 u32 ctl;
36183984 int ret;
36193985
3986
+ ret = pm_runtime_get_sync(&lp->pdev->dev);
3987
+ if (ret < 0) {
3988
+ pm_runtime_put_noidle(&lp->pdev->dev);
3989
+ return ret;
3990
+ }
3991
+
36203992 /* Clear internal statistics */
36213993 ctl = macb_readl(lp, NCR);
36223994 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
36233995
36243996 macb_set_hwaddr(lp);
36253997
3626
- ret = at91ether_start(dev);
3998
+ ret = at91ether_start(lp);
36273999 if (ret)
3628
- return ret;
4000
+ goto pm_exit;
36294001
3630
- /* Enable MAC interrupts */
3631
- macb_writel(lp, IER, MACB_BIT(RCOMP) |
3632
- MACB_BIT(RXUBR) |
3633
- MACB_BIT(ISR_TUND) |
3634
- MACB_BIT(ISR_RLE) |
3635
- MACB_BIT(TCOMP) |
3636
- MACB_BIT(ISR_ROVR) |
3637
- MACB_BIT(HRESP));
3638
-
3639
- /* schedule a link state check */
3640
- phy_start(dev->phydev);
4002
+ ret = macb_phylink_connect(lp);
4003
+ if (ret)
4004
+ goto stop;
36414005
36424006 netif_start_queue(dev);
36434007
36444008 return 0;
4009
+
4010
+stop:
4011
+ at91ether_stop(lp);
4012
+pm_exit:
4013
+ pm_runtime_put_sync(&lp->pdev->dev);
4014
+ return ret;
36454015 }
36464016
36474017 /* Close the interface */
36484018 static int at91ether_close(struct net_device *dev)
36494019 {
36504020 struct macb *lp = netdev_priv(dev);
3651
- struct macb_queue *q = &lp->queues[0];
3652
- u32 ctl;
3653
-
3654
- /* Disable Receiver and Transmitter */
3655
- ctl = macb_readl(lp, NCR);
3656
- macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
3657
-
3658
- /* Disable MAC interrupts */
3659
- macb_writel(lp, IDR, MACB_BIT(RCOMP) |
3660
- MACB_BIT(RXUBR) |
3661
- MACB_BIT(ISR_TUND) |
3662
- MACB_BIT(ISR_RLE) |
3663
- MACB_BIT(TCOMP) |
3664
- MACB_BIT(ISR_ROVR) |
3665
- MACB_BIT(HRESP));
36664021
36674022 netif_stop_queue(dev);
36684023
3669
- dma_free_coherent(&lp->pdev->dev,
3670
- AT91ETHER_MAX_RX_DESCR *
3671
- macb_dma_desc_get_size(lp),
3672
- q->rx_ring, q->rx_ring_dma);
3673
- q->rx_ring = NULL;
4024
+ phylink_stop(lp->phylink);
4025
+ phylink_disconnect_phy(lp->phylink);
36744026
3675
- dma_free_coherent(&lp->pdev->dev,
3676
- AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
3677
- q->rx_buffers, q->rx_buffers_dma);
3678
- q->rx_buffers = NULL;
4027
+ at91ether_stop(lp);
36794028
3680
- return 0;
4029
+ return pm_runtime_put(&lp->pdev->dev);
36814030 }
36824031
36834032 /* Transmit packet */
....@@ -3685,24 +4034,34 @@
36854034 struct net_device *dev)
36864035 {
36874036 struct macb *lp = netdev_priv(dev);
4037
+ unsigned long flags;
36884038
3689
- if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
3690
- netif_stop_queue(dev);
4039
+ if (lp->rm9200_tx_len < 2) {
4040
+ int desc = lp->rm9200_tx_tail;
36914041
36924042 /* Store packet information (to free when Tx completed) */
3693
- lp->skb = skb;
3694
- lp->skb_length = skb->len;
3695
- lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
3696
- DMA_TO_DEVICE);
3697
- if (dma_mapping_error(NULL, lp->skb_physaddr)) {
4043
+ lp->rm9200_txq[desc].skb = skb;
4044
+ lp->rm9200_txq[desc].size = skb->len;
4045
+ lp->rm9200_txq[desc].mapping = dma_map_single(&lp->pdev->dev, skb->data,
4046
+ skb->len, DMA_TO_DEVICE);
4047
+ if (dma_mapping_error(&lp->pdev->dev, lp->rm9200_txq[desc].mapping)) {
36984048 dev_kfree_skb_any(skb);
36994049 dev->stats.tx_dropped++;
37004050 netdev_err(dev, "%s: DMA mapping error\n", __func__);
37014051 return NETDEV_TX_OK;
37024052 }
37034053
4054
+ spin_lock_irqsave(&lp->lock, flags);
4055
+
4056
+ lp->rm9200_tx_tail = (desc + 1) & 1;
4057
+ lp->rm9200_tx_len++;
4058
+ if (lp->rm9200_tx_len > 1)
4059
+ netif_stop_queue(dev);
4060
+
4061
+ spin_unlock_irqrestore(&lp->lock, flags);
4062
+
37044063 /* Set address of the data in the Transmit Address register */
3705
- macb_writel(lp, TAR, lp->skb_physaddr);
4064
+ macb_writel(lp, TAR, lp->rm9200_txq[desc].mapping);
37064065 /* Set length of the packet in the Transmit Control register */
37074066 macb_writel(lp, TCR, skb->len);
37084067
....@@ -3765,6 +4124,9 @@
37654124 struct net_device *dev = dev_id;
37664125 struct macb *lp = netdev_priv(dev);
37674126 u32 intstatus, ctl;
4127
+ unsigned int desc;
4128
+ unsigned int qlen;
4129
+ u32 tsr;
37684130
37694131 /* MAC Interrupt Status register indicates what interrupts are pending.
37704132 * It is automatically cleared once read.
....@@ -3776,20 +4138,39 @@
37764138 at91ether_rx(dev);
37774139
37784140 /* Transmit complete */
3779
- if (intstatus & MACB_BIT(TCOMP)) {
4141
+ if (intstatus & (MACB_BIT(TCOMP) | MACB_BIT(RM9200_TBRE))) {
37804142 /* The TCOM bit is set even if the transmission failed */
37814143 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
37824144 dev->stats.tx_errors++;
37834145
3784
- if (lp->skb) {
3785
- dev_kfree_skb_irq(lp->skb);
3786
- lp->skb = NULL;
3787
- dma_unmap_single(NULL, lp->skb_physaddr,
3788
- lp->skb_length, DMA_TO_DEVICE);
4146
+ spin_lock(&lp->lock);
4147
+
4148
+ tsr = macb_readl(lp, TSR);
4149
+
4150
+ /* we have three possibilities here:
4151
+ * - all pending packets transmitted (TGO, implies BNQ)
4152
+ * - only first packet transmitted (!TGO && BNQ)
4153
+ * - two frames pending (!TGO && !BNQ)
4154
+ * Note that TGO ("transmit go") is called "IDLE" on RM9200.
4155
+ */
4156
+ qlen = (tsr & MACB_BIT(TGO)) ? 0 :
4157
+ (tsr & MACB_BIT(RM9200_BNQ)) ? 1 : 2;
4158
+
4159
+ while (lp->rm9200_tx_len > qlen) {
4160
+ desc = (lp->rm9200_tx_tail - lp->rm9200_tx_len) & 1;
4161
+ dev_consume_skb_irq(lp->rm9200_txq[desc].skb);
4162
+ lp->rm9200_txq[desc].skb = NULL;
4163
+ dma_unmap_single(&lp->pdev->dev, lp->rm9200_txq[desc].mapping,
4164
+ lp->rm9200_txq[desc].size, DMA_TO_DEVICE);
37894165 dev->stats.tx_packets++;
3790
- dev->stats.tx_bytes += lp->skb_length;
4166
+ dev->stats.tx_bytes += lp->rm9200_txq[desc].size;
4167
+ lp->rm9200_tx_len--;
37914168 }
3792
- netif_wake_queue(dev);
4169
+
4170
+ if (lp->rm9200_tx_len < 2 && netif_queue_stopped(dev))
4171
+ netif_wake_queue(dev);
4172
+
4173
+ spin_unlock(&lp->lock);
37934174 }
37944175
37954176 /* Work-around for EMAC Errata section 41.3.1 */
....@@ -3833,13 +4214,14 @@
38334214
38344215 static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
38354216 struct clk **hclk, struct clk **tx_clk,
3836
- struct clk **rx_clk)
4217
+ struct clk **rx_clk, struct clk **tsu_clk)
38374218 {
38384219 int err;
38394220
38404221 *hclk = NULL;
38414222 *tx_clk = NULL;
38424223 *rx_clk = NULL;
4224
+ *tsu_clk = NULL;
38434225
38444226 *pclk = devm_clk_get(&pdev->dev, "ether_clk");
38454227 if (IS_ERR(*pclk))
....@@ -3859,7 +4241,6 @@
38594241 struct net_device *dev = platform_get_drvdata(pdev);
38604242 struct macb *bp = netdev_priv(dev);
38614243 int err;
3862
- u32 reg;
38634244
38644245 bp->queues[0].bp = bp;
38654246
....@@ -3873,14 +4254,114 @@
38734254
38744255 macb_writel(bp, NCR, 0);
38754256
3876
- reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
3877
- if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
3878
- reg |= MACB_BIT(RM9200_RMII);
3879
-
3880
- macb_writel(bp, NCFGR, reg);
4257
+ macb_writel(bp, NCFGR, MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG));
38814258
38824259 return 0;
38834260 }
4261
+
4262
+static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw,
4263
+ unsigned long parent_rate)
4264
+{
4265
+ return mgmt->rate;
4266
+}
4267
+
4268
+static long fu540_macb_tx_round_rate(struct clk_hw *hw, unsigned long rate,
4269
+ unsigned long *parent_rate)
4270
+{
4271
+ if (WARN_ON(rate < 2500000))
4272
+ return 2500000;
4273
+ else if (rate == 2500000)
4274
+ return 2500000;
4275
+ else if (WARN_ON(rate < 13750000))
4276
+ return 2500000;
4277
+ else if (WARN_ON(rate < 25000000))
4278
+ return 25000000;
4279
+ else if (rate == 25000000)
4280
+ return 25000000;
4281
+ else if (WARN_ON(rate < 75000000))
4282
+ return 25000000;
4283
+ else if (WARN_ON(rate < 125000000))
4284
+ return 125000000;
4285
+ else if (rate == 125000000)
4286
+ return 125000000;
4287
+
4288
+ WARN_ON(rate > 125000000);
4289
+
4290
+ return 125000000;
4291
+}
4292
+
4293
+static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate,
4294
+ unsigned long parent_rate)
4295
+{
4296
+ rate = fu540_macb_tx_round_rate(hw, rate, &parent_rate);
4297
+ if (rate != 125000000)
4298
+ iowrite32(1, mgmt->reg);
4299
+ else
4300
+ iowrite32(0, mgmt->reg);
4301
+ mgmt->rate = rate;
4302
+
4303
+ return 0;
4304
+}
4305
+
4306
+static const struct clk_ops fu540_c000_ops = {
4307
+ .recalc_rate = fu540_macb_tx_recalc_rate,
4308
+ .round_rate = fu540_macb_tx_round_rate,
4309
+ .set_rate = fu540_macb_tx_set_rate,
4310
+};
4311
+
4312
+static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
4313
+ struct clk **hclk, struct clk **tx_clk,
4314
+ struct clk **rx_clk, struct clk **tsu_clk)
4315
+{
4316
+ struct clk_init_data init;
4317
+ int err = 0;
4318
+
4319
+ err = macb_clk_init(pdev, pclk, hclk, tx_clk, rx_clk, tsu_clk);
4320
+ if (err)
4321
+ return err;
4322
+
4323
+ mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL);
4324
+ if (!mgmt)
4325
+ return -ENOMEM;
4326
+
4327
+ init.name = "sifive-gemgxl-mgmt";
4328
+ init.ops = &fu540_c000_ops;
4329
+ init.flags = 0;
4330
+ init.num_parents = 0;
4331
+
4332
+ mgmt->rate = 0;
4333
+ mgmt->hw.init = &init;
4334
+
4335
+ *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw);
4336
+ if (IS_ERR(*tx_clk))
4337
+ return PTR_ERR(*tx_clk);
4338
+
4339
+ err = clk_prepare_enable(*tx_clk);
4340
+ if (err)
4341
+ dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
4342
+ else
4343
+ dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name);
4344
+
4345
+ return 0;
4346
+}
4347
+
4348
+static int fu540_c000_init(struct platform_device *pdev)
4349
+{
4350
+ mgmt->reg = devm_platform_ioremap_resource(pdev, 1);
4351
+ if (IS_ERR(mgmt->reg))
4352
+ return PTR_ERR(mgmt->reg);
4353
+
4354
+ return macb_init(pdev);
4355
+}
4356
+
4357
+static const struct macb_config fu540_c000_config = {
4358
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
4359
+ MACB_CAPS_GEM_HAS_PTP,
4360
+ .dma_burst_length = 16,
4361
+ .clk_init = fu540_c000_clk_init,
4362
+ .init = fu540_c000_init,
4363
+ .jumbo_max_len = 10240,
4364
+};
38844365
38854366 static const struct macb_config at91sam9260_config = {
38864367 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
....@@ -3926,7 +4407,7 @@
39264407 };
39274408
39284409 static const struct macb_config emac_config = {
3929
- .caps = MACB_CAPS_NEEDS_RSTONUBR,
4410
+ .caps = MACB_CAPS_NEEDS_RSTONUBR | MACB_CAPS_MACB_IS_EMAC,
39304411 .clk_init = at91ether_clk_init,
39314412 .init = at91ether_init,
39324413 };
....@@ -3962,6 +4443,7 @@
39624443 { .compatible = "cdns,np4-macb", .data = &np4_config },
39634444 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
39644445 { .compatible = "cdns,gem", .data = &pc302gem_config },
4446
+ { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config },
39654447 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
39664448 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
39674449 { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
....@@ -3970,6 +4452,7 @@
39704452 { .compatible = "cdns,emac", .data = &emac_config },
39714453 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
39724454 { .compatible = "cdns,zynq-gem", .data = &zynq_config },
4455
+ { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
39734456 { /* sentinel */ }
39744457 };
39754458 MODULE_DEVICE_TABLE(of, macb_dt_ids);
....@@ -3989,15 +4472,15 @@
39894472 {
39904473 const struct macb_config *macb_config = &default_gem_config;
39914474 int (*clk_init)(struct platform_device *, struct clk **,
3992
- struct clk **, struct clk **, struct clk **)
3993
- = macb_config->clk_init;
4475
+ struct clk **, struct clk **, struct clk **,
4476
+ struct clk **) = macb_config->clk_init;
39944477 int (*init)(struct platform_device *) = macb_config->init;
39954478 struct device_node *np = pdev->dev.of_node;
39964479 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
4480
+ struct clk *tsu_clk = NULL;
39974481 unsigned int queue_mask, num_queues;
3998
- struct macb_platform_data *pdata;
39994482 bool native_io;
4000
- struct phy_device *phydev;
4483
+ phy_interface_t interface;
40014484 struct net_device *dev;
40024485 struct resource *regs;
40034486 void __iomem *mem;
....@@ -4021,10 +4504,15 @@
40214504 }
40224505 }
40234506
4024
- err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk);
4507
+ err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk);
40254508 if (err)
40264509 return err;
40274510
4511
+ pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT);
4512
+ pm_runtime_use_autosuspend(&pdev->dev);
4513
+ pm_runtime_get_noresume(&pdev->dev);
4514
+ pm_runtime_set_active(&pdev->dev);
4515
+ pm_runtime_enable(&pdev->dev);
40284516 native_io = hw_is_native_io(mem);
40294517
40304518 macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
....@@ -4058,6 +4546,7 @@
40584546 bp->hclk = hclk;
40594547 bp->tx_clk = tx_clk;
40604548 bp->rx_clk = rx_clk;
4549
+ bp->tsu_clk = tsu_clk;
40614550 if (macb_config)
40624551 bp->jumbo_max_len = macb_config->jumbo_max_len;
40634552
....@@ -4109,27 +4598,21 @@
41094598 bp->rx_intr_mask |= MACB_BIT(RXUBR);
41104599
41114600 mac = of_get_mac_address(np);
4112
- if (mac) {
4601
+ if (PTR_ERR(mac) == -EPROBE_DEFER) {
4602
+ err = -EPROBE_DEFER;
4603
+ goto err_out_free_netdev;
4604
+ } else if (!IS_ERR_OR_NULL(mac)) {
41134605 ether_addr_copy(bp->dev->dev_addr, mac);
41144606 } else {
4115
- err = of_get_nvmem_mac_address(np, bp->dev->dev_addr);
4116
- if (err) {
4117
- if (err == -EPROBE_DEFER)
4118
- goto err_out_free_netdev;
4119
- macb_get_hwaddr(bp);
4120
- }
4607
+ macb_get_hwaddr(bp);
41214608 }
41224609
4123
- err = of_get_phy_mode(np);
4124
- if (err < 0) {
4125
- pdata = dev_get_platdata(&pdev->dev);
4126
- if (pdata && pdata->is_rmii)
4127
- bp->phy_interface = PHY_INTERFACE_MODE_RMII;
4128
- else
4129
- bp->phy_interface = PHY_INTERFACE_MODE_MII;
4130
- } else {
4131
- bp->phy_interface = err;
4132
- }
4610
+ err = of_get_phy_mode(np, &interface);
4611
+ if (err)
4612
+ /* not found in DT, MII by default */
4613
+ bp->phy_interface = PHY_INTERFACE_MODE_MII;
4614
+ else
4615
+ bp->phy_interface = interface;
41334616
41344617 /* IP specific init */
41354618 err = init(pdev);
....@@ -4140,8 +4623,6 @@
41404623 if (err)
41414624 goto err_out_free_netdev;
41424625
4143
- phydev = dev->phydev;
4144
-
41454626 netif_carrier_off(dev);
41464627
41474628 err = register_netdev(dev);
....@@ -4150,23 +4631,19 @@
41504631 goto err_out_unregister_mdio;
41514632 }
41524633
4153
- tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
4154
- (unsigned long)bp);
4155
-
4156
- phy_attached_info(phydev);
4634
+ tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task);
41574635
41584636 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
41594637 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
41604638 dev->base_addr, dev->irq, dev->dev_addr);
41614639
4640
+ pm_runtime_mark_last_busy(&bp->pdev->dev);
4641
+ pm_runtime_put_autosuspend(&bp->pdev->dev);
4642
+
41624643 return 0;
41634644
41644645 err_out_unregister_mdio:
4165
- phy_disconnect(dev->phydev);
41664646 mdiobus_unregister(bp->mii_bus);
4167
- of_node_put(bp->phy_node);
4168
- if (np && of_phy_is_fixed_link(np))
4169
- of_phy_deregister_fixed_link(np);
41704647 mdiobus_free(bp->mii_bus);
41714648
41724649 err_out_free_netdev:
....@@ -4177,6 +4654,10 @@
41774654 clk_disable_unprepare(hclk);
41784655 clk_disable_unprepare(pclk);
41794656 clk_disable_unprepare(rx_clk);
4657
+ clk_disable_unprepare(tsu_clk);
4658
+ pm_runtime_disable(&pdev->dev);
4659
+ pm_runtime_set_suspended(&pdev->dev);
4660
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
41804661
41814662 return err;
41824663 }
....@@ -4185,27 +4666,27 @@
41854666 {
41864667 struct net_device *dev;
41874668 struct macb *bp;
4188
- struct device_node *np = pdev->dev.of_node;
41894669
41904670 dev = platform_get_drvdata(pdev);
41914671
41924672 if (dev) {
41934673 bp = netdev_priv(dev);
4194
- if (dev->phydev)
4195
- phy_disconnect(dev->phydev);
41964674 mdiobus_unregister(bp->mii_bus);
4197
- if (np && of_phy_is_fixed_link(np))
4198
- of_phy_deregister_fixed_link(np);
4199
- dev->phydev = NULL;
42004675 mdiobus_free(bp->mii_bus);
42014676
42024677 unregister_netdev(dev);
42034678 tasklet_kill(&bp->hresp_err_tasklet);
4204
- clk_disable_unprepare(bp->tx_clk);
4205
- clk_disable_unprepare(bp->hclk);
4206
- clk_disable_unprepare(bp->pclk);
4207
- clk_disable_unprepare(bp->rx_clk);
4208
- of_node_put(bp->phy_node);
4679
+ pm_runtime_disable(&pdev->dev);
4680
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
4681
+ if (!pm_runtime_suspended(&pdev->dev)) {
4682
+ clk_disable_unprepare(bp->tx_clk);
4683
+ clk_disable_unprepare(bp->hclk);
4684
+ clk_disable_unprepare(bp->pclk);
4685
+ clk_disable_unprepare(bp->rx_clk);
4686
+ clk_disable_unprepare(bp->tsu_clk);
4687
+ pm_runtime_set_suspended(&pdev->dev);
4688
+ }
4689
+ phylink_destroy(bp->phylink);
42094690 free_netdev(dev);
42104691 }
42114692
....@@ -4214,50 +4695,204 @@
42144695
42154696 static int __maybe_unused macb_suspend(struct device *dev)
42164697 {
4217
- struct platform_device *pdev = to_platform_device(dev);
4218
- struct net_device *netdev = platform_get_drvdata(pdev);
4698
+ struct net_device *netdev = dev_get_drvdata(dev);
42194699 struct macb *bp = netdev_priv(netdev);
4700
+ struct macb_queue *queue = bp->queues;
4701
+ unsigned long flags;
4702
+ unsigned int q;
4703
+ int err;
42204704
4221
- netif_carrier_off(netdev);
4222
- netif_device_detach(netdev);
4705
+ if (!netif_running(netdev))
4706
+ return 0;
42234707
42244708 if (bp->wol & MACB_WOL_ENABLED) {
4225
- macb_writel(bp, IER, MACB_BIT(WOL));
4226
- macb_writel(bp, WOL, MACB_BIT(MAG));
4709
+ spin_lock_irqsave(&bp->lock, flags);
4710
+ /* Flush all status bits */
4711
+ macb_writel(bp, TSR, -1);
4712
+ macb_writel(bp, RSR, -1);
4713
+ for (q = 0, queue = bp->queues; q < bp->num_queues;
4714
+ ++q, ++queue) {
4715
+ /* Disable all interrupts */
4716
+ queue_writel(queue, IDR, -1);
4717
+ queue_readl(queue, ISR);
4718
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
4719
+ queue_writel(queue, ISR, -1);
4720
+ }
4721
+ /* Change interrupt handler and
4722
+ * Enable WoL IRQ on queue 0
4723
+ */
4724
+ devm_free_irq(dev, bp->queues[0].irq, bp->queues);
4725
+ if (macb_is_gem(bp)) {
4726
+ err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt,
4727
+ IRQF_SHARED, netdev->name, bp->queues);
4728
+ if (err) {
4729
+ dev_err(dev,
4730
+ "Unable to request IRQ %d (error %d)\n",
4731
+ bp->queues[0].irq, err);
4732
+ spin_unlock_irqrestore(&bp->lock, flags);
4733
+ return err;
4734
+ }
4735
+ queue_writel(bp->queues, IER, GEM_BIT(WOL));
4736
+ gem_writel(bp, WOL, MACB_BIT(MAG));
4737
+ } else {
4738
+ err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt,
4739
+ IRQF_SHARED, netdev->name, bp->queues);
4740
+ if (err) {
4741
+ dev_err(dev,
4742
+ "Unable to request IRQ %d (error %d)\n",
4743
+ bp->queues[0].irq, err);
4744
+ spin_unlock_irqrestore(&bp->lock, flags);
4745
+ return err;
4746
+ }
4747
+ queue_writel(bp->queues, IER, MACB_BIT(WOL));
4748
+ macb_writel(bp, WOL, MACB_BIT(MAG));
4749
+ }
4750
+ spin_unlock_irqrestore(&bp->lock, flags);
4751
+
42274752 enable_irq_wake(bp->queues[0].irq);
4228
- } else {
4229
- clk_disable_unprepare(bp->tx_clk);
4230
- clk_disable_unprepare(bp->hclk);
4231
- clk_disable_unprepare(bp->pclk);
4232
- clk_disable_unprepare(bp->rx_clk);
42334753 }
4754
+
4755
+ netif_device_detach(netdev);
4756
+ for (q = 0, queue = bp->queues; q < bp->num_queues;
4757
+ ++q, ++queue)
4758
+ napi_disable(&queue->napi);
4759
+
4760
+ if (!(bp->wol & MACB_WOL_ENABLED)) {
4761
+ rtnl_lock();
4762
+ phylink_stop(bp->phylink);
4763
+ rtnl_unlock();
4764
+ spin_lock_irqsave(&bp->lock, flags);
4765
+ macb_reset_hw(bp);
4766
+ spin_unlock_irqrestore(&bp->lock, flags);
4767
+ }
4768
+
4769
+ if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
4770
+ bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO);
4771
+
4772
+ if (netdev->hw_features & NETIF_F_NTUPLE)
4773
+ bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT);
4774
+
4775
+ if (bp->ptp_info)
4776
+ bp->ptp_info->ptp_remove(netdev);
4777
+ if (!device_may_wakeup(dev))
4778
+ pm_runtime_force_suspend(dev);
42344779
42354780 return 0;
42364781 }
42374782
42384783 static int __maybe_unused macb_resume(struct device *dev)
42394784 {
4240
- struct platform_device *pdev = to_platform_device(dev);
4241
- struct net_device *netdev = platform_get_drvdata(pdev);
4785
+ struct net_device *netdev = dev_get_drvdata(dev);
42424786 struct macb *bp = netdev_priv(netdev);
4787
+ struct macb_queue *queue = bp->queues;
4788
+ unsigned long flags;
4789
+ unsigned int q;
4790
+ int err;
4791
+
4792
+ if (!netif_running(netdev))
4793
+ return 0;
4794
+
4795
+ if (!device_may_wakeup(dev))
4796
+ pm_runtime_force_resume(dev);
42434797
42444798 if (bp->wol & MACB_WOL_ENABLED) {
4245
- macb_writel(bp, IDR, MACB_BIT(WOL));
4246
- macb_writel(bp, WOL, 0);
4799
+ spin_lock_irqsave(&bp->lock, flags);
4800
+ /* Disable WoL */
4801
+ if (macb_is_gem(bp)) {
4802
+ queue_writel(bp->queues, IDR, GEM_BIT(WOL));
4803
+ gem_writel(bp, WOL, 0);
4804
+ } else {
4805
+ queue_writel(bp->queues, IDR, MACB_BIT(WOL));
4806
+ macb_writel(bp, WOL, 0);
4807
+ }
4808
+ /* Clear ISR on queue 0 */
4809
+ queue_readl(bp->queues, ISR);
4810
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
4811
+ queue_writel(bp->queues, ISR, -1);
4812
+ /* Replace interrupt handler on queue 0 */
4813
+ devm_free_irq(dev, bp->queues[0].irq, bp->queues);
4814
+ err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt,
4815
+ IRQF_SHARED, netdev->name, bp->queues);
4816
+ if (err) {
4817
+ dev_err(dev,
4818
+ "Unable to request IRQ %d (error %d)\n",
4819
+ bp->queues[0].irq, err);
4820
+ spin_unlock_irqrestore(&bp->lock, flags);
4821
+ return err;
4822
+ }
4823
+ spin_unlock_irqrestore(&bp->lock, flags);
4824
+
42474825 disable_irq_wake(bp->queues[0].irq);
4248
- } else {
4826
+
4827
+ /* Now make sure we disable phy before moving
4828
+ * to common restore path
4829
+ */
4830
+ rtnl_lock();
4831
+ phylink_stop(bp->phylink);
4832
+ rtnl_unlock();
4833
+ }
4834
+
4835
+ for (q = 0, queue = bp->queues; q < bp->num_queues;
4836
+ ++q, ++queue)
4837
+ napi_enable(&queue->napi);
4838
+
4839
+ if (netdev->hw_features & NETIF_F_NTUPLE)
4840
+ gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2);
4841
+
4842
+ if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
4843
+ macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio);
4844
+
4845
+ macb_writel(bp, NCR, MACB_BIT(MPE));
4846
+ macb_init_hw(bp);
4847
+ macb_set_rx_mode(netdev);
4848
+ macb_restore_features(bp);
4849
+ rtnl_lock();
4850
+ phylink_start(bp->phylink);
4851
+ rtnl_unlock();
4852
+
4853
+ netif_device_attach(netdev);
4854
+ if (bp->ptp_info)
4855
+ bp->ptp_info->ptp_init(netdev);
4856
+
4857
+ return 0;
4858
+}
4859
+
4860
+static int __maybe_unused macb_runtime_suspend(struct device *dev)
4861
+{
4862
+ struct net_device *netdev = dev_get_drvdata(dev);
4863
+ struct macb *bp = netdev_priv(netdev);
4864
+
4865
+ if (!(device_may_wakeup(dev))) {
4866
+ clk_disable_unprepare(bp->tx_clk);
4867
+ clk_disable_unprepare(bp->hclk);
4868
+ clk_disable_unprepare(bp->pclk);
4869
+ clk_disable_unprepare(bp->rx_clk);
4870
+ }
4871
+ clk_disable_unprepare(bp->tsu_clk);
4872
+
4873
+ return 0;
4874
+}
4875
+
4876
+static int __maybe_unused macb_runtime_resume(struct device *dev)
4877
+{
4878
+ struct net_device *netdev = dev_get_drvdata(dev);
4879
+ struct macb *bp = netdev_priv(netdev);
4880
+
4881
+ if (!(device_may_wakeup(dev))) {
42494882 clk_prepare_enable(bp->pclk);
42504883 clk_prepare_enable(bp->hclk);
42514884 clk_prepare_enable(bp->tx_clk);
42524885 clk_prepare_enable(bp->rx_clk);
42534886 }
4254
-
4255
- netif_device_attach(netdev);
4887
+ clk_prepare_enable(bp->tsu_clk);
42564888
42574889 return 0;
42584890 }
42594891
4260
-static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
4892
+static const struct dev_pm_ops macb_pm_ops = {
4893
+ SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume)
4894
+ SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL)
4895
+};
42614896
42624897 static struct platform_driver macb_driver = {
42634898 .probe = macb_probe,