hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/net/ethernet/cadence/macb_main.c
....@@ -1,15 +1,13 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Cadence MACB/GEM Ethernet Controller driver
34 *
45 * Copyright (C) 2004-2006 Atmel Corporation
5
- *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License version 2 as
8
- * published by the Free Software Foundation.
96 */
107
118 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
129 #include <linux/clk.h>
10
+#include <linux/clk-provider.h>
1311 #include <linux/crc32.h>
1412 #include <linux/module.h>
1513 #include <linux/moduleparam.h>
....@@ -25,9 +23,8 @@
2523 #include <linux/netdevice.h>
2624 #include <linux/etherdevice.h>
2725 #include <linux/dma-mapping.h>
28
-#include <linux/platform_data/macb.h>
2926 #include <linux/platform_device.h>
30
-#include <linux/phy.h>
27
+#include <linux/phylink.h>
3128 #include <linux/of.h>
3229 #include <linux/of_device.h>
3330 #include <linux/of_gpio.h>
....@@ -36,7 +33,16 @@
3633 #include <linux/ip.h>
3734 #include <linux/udp.h>
3835 #include <linux/tcp.h>
36
+#include <linux/iopoll.h>
37
+#include <linux/pm_runtime.h>
3938 #include "macb.h"
39
+
40
+/* This structure is only used for MACB on SiFive FU540 devices */
41
+struct sifive_fu540_macb_mgmt {
42
+ void __iomem *reg;
43
+ unsigned long rate;
44
+ struct clk_hw hw;
45
+};
4046
4147 #define MACB_RX_BUFFER_SIZE 128
4248 #define RX_BUFFER_MULTIPLE 64 /* bytes */
....@@ -82,6 +88,10 @@
8288 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
8389 */
8490 #define MACB_HALT_TIMEOUT 1230
91
+
92
+#define MACB_PM_TIMEOUT 100 /* ms */
93
+
94
+#define MACB_MDIO_TIMEOUT 1000000 /* in usecs */
8595
8696 /* DMA buffer descriptor might be different size
8797 * depends on hardware configuration:
....@@ -158,9 +168,8 @@
158168 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
159169 static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
160170 {
161
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
162
- return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
163
- return NULL;
171
+ return (struct macb_dma_desc_64 *)((void *)desc
172
+ + sizeof(struct macb_dma_desc));
164173 }
165174 #endif
166175
....@@ -283,34 +292,22 @@
283292
284293 static void macb_get_hwaddr(struct macb *bp)
285294 {
286
- struct macb_platform_data *pdata;
287295 u32 bottom;
288296 u16 top;
289297 u8 addr[6];
290298 int i;
291
-
292
- pdata = dev_get_platdata(&bp->pdev->dev);
293299
294300 /* Check all 4 address register for valid address */
295301 for (i = 0; i < 4; i++) {
296302 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
297303 top = macb_or_gem_readl(bp, SA1T + i * 8);
298304
299
- if (pdata && pdata->rev_eth_addr) {
300
- addr[5] = bottom & 0xff;
301
- addr[4] = (bottom >> 8) & 0xff;
302
- addr[3] = (bottom >> 16) & 0xff;
303
- addr[2] = (bottom >> 24) & 0xff;
304
- addr[1] = top & 0xff;
305
- addr[0] = (top & 0xff00) >> 8;
306
- } else {
307
- addr[0] = bottom & 0xff;
308
- addr[1] = (bottom >> 8) & 0xff;
309
- addr[2] = (bottom >> 16) & 0xff;
310
- addr[3] = (bottom >> 24) & 0xff;
311
- addr[4] = top & 0xff;
312
- addr[5] = (top >> 8) & 0xff;
313
- }
305
+ addr[0] = bottom & 0xff;
306
+ addr[1] = (bottom >> 8) & 0xff;
307
+ addr[2] = (bottom >> 16) & 0xff;
308
+ addr[3] = (bottom >> 24) & 0xff;
309
+ addr[4] = top & 0xff;
310
+ addr[5] = (top >> 8) & 0xff;
314311
315312 if (is_valid_ether_addr(addr)) {
316313 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
....@@ -322,50 +319,147 @@
322319 eth_hw_addr_random(bp->dev);
323320 }
324321
322
+static int macb_mdio_wait_for_idle(struct macb *bp)
323
+{
324
+ u32 val;
325
+
326
+ return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE),
327
+ 1, MACB_MDIO_TIMEOUT);
328
+}
329
+
325330 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
326331 {
327332 struct macb *bp = bus->priv;
328
- int value;
333
+ int status;
329334
330
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
331
- | MACB_BF(RW, MACB_MAN_READ)
332
- | MACB_BF(PHYA, mii_id)
333
- | MACB_BF(REGA, regnum)
334
- | MACB_BF(CODE, MACB_MAN_CODE)));
335
+ status = pm_runtime_get_sync(&bp->pdev->dev);
336
+ if (status < 0) {
337
+ pm_runtime_put_noidle(&bp->pdev->dev);
338
+ goto mdio_pm_exit;
339
+ }
335340
336
- /* wait for end of transfer */
337
- while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
338
- cpu_relax();
341
+ status = macb_mdio_wait_for_idle(bp);
342
+ if (status < 0)
343
+ goto mdio_read_exit;
339344
340
- value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
345
+ if (regnum & MII_ADDR_C45) {
346
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
347
+ | MACB_BF(RW, MACB_MAN_C45_ADDR)
348
+ | MACB_BF(PHYA, mii_id)
349
+ | MACB_BF(REGA, (regnum >> 16) & 0x1F)
350
+ | MACB_BF(DATA, regnum & 0xFFFF)
351
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
341352
342
- return value;
353
+ status = macb_mdio_wait_for_idle(bp);
354
+ if (status < 0)
355
+ goto mdio_read_exit;
356
+
357
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
358
+ | MACB_BF(RW, MACB_MAN_C45_READ)
359
+ | MACB_BF(PHYA, mii_id)
360
+ | MACB_BF(REGA, (regnum >> 16) & 0x1F)
361
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
362
+ } else {
363
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
364
+ | MACB_BF(RW, MACB_MAN_C22_READ)
365
+ | MACB_BF(PHYA, mii_id)
366
+ | MACB_BF(REGA, regnum)
367
+ | MACB_BF(CODE, MACB_MAN_C22_CODE)));
368
+ }
369
+
370
+ status = macb_mdio_wait_for_idle(bp);
371
+ if (status < 0)
372
+ goto mdio_read_exit;
373
+
374
+ status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
375
+
376
+mdio_read_exit:
377
+ pm_runtime_mark_last_busy(&bp->pdev->dev);
378
+ pm_runtime_put_autosuspend(&bp->pdev->dev);
379
+mdio_pm_exit:
380
+ return status;
343381 }
344382
345383 static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
346384 u16 value)
347385 {
348386 struct macb *bp = bus->priv;
387
+ int status;
349388
350
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
351
- | MACB_BF(RW, MACB_MAN_WRITE)
352
- | MACB_BF(PHYA, mii_id)
353
- | MACB_BF(REGA, regnum)
354
- | MACB_BF(CODE, MACB_MAN_CODE)
355
- | MACB_BF(DATA, value)));
389
+ status = pm_runtime_get_sync(&bp->pdev->dev);
390
+ if (status < 0) {
391
+ pm_runtime_put_noidle(&bp->pdev->dev);
392
+ goto mdio_pm_exit;
393
+ }
356394
357
- /* wait for end of transfer */
358
- while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
359
- cpu_relax();
395
+ status = macb_mdio_wait_for_idle(bp);
396
+ if (status < 0)
397
+ goto mdio_write_exit;
360398
361
- return 0;
399
+ if (regnum & MII_ADDR_C45) {
400
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
401
+ | MACB_BF(RW, MACB_MAN_C45_ADDR)
402
+ | MACB_BF(PHYA, mii_id)
403
+ | MACB_BF(REGA, (regnum >> 16) & 0x1F)
404
+ | MACB_BF(DATA, regnum & 0xFFFF)
405
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
406
+
407
+ status = macb_mdio_wait_for_idle(bp);
408
+ if (status < 0)
409
+ goto mdio_write_exit;
410
+
411
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
412
+ | MACB_BF(RW, MACB_MAN_C45_WRITE)
413
+ | MACB_BF(PHYA, mii_id)
414
+ | MACB_BF(REGA, (regnum >> 16) & 0x1F)
415
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)
416
+ | MACB_BF(DATA, value)));
417
+ } else {
418
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
419
+ | MACB_BF(RW, MACB_MAN_C22_WRITE)
420
+ | MACB_BF(PHYA, mii_id)
421
+ | MACB_BF(REGA, regnum)
422
+ | MACB_BF(CODE, MACB_MAN_C22_CODE)
423
+ | MACB_BF(DATA, value)));
424
+ }
425
+
426
+ status = macb_mdio_wait_for_idle(bp);
427
+ if (status < 0)
428
+ goto mdio_write_exit;
429
+
430
+mdio_write_exit:
431
+ pm_runtime_mark_last_busy(&bp->pdev->dev);
432
+ pm_runtime_put_autosuspend(&bp->pdev->dev);
433
+mdio_pm_exit:
434
+ return status;
435
+}
436
+
437
+static void macb_init_buffers(struct macb *bp)
438
+{
439
+ struct macb_queue *queue;
440
+ unsigned int q;
441
+
442
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
443
+ queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
444
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
445
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
446
+ queue_writel(queue, RBQPH,
447
+ upper_32_bits(queue->rx_ring_dma));
448
+#endif
449
+ queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
450
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
451
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
452
+ queue_writel(queue, TBQPH,
453
+ upper_32_bits(queue->tx_ring_dma));
454
+#endif
455
+ }
362456 }
363457
364458 /**
365459 * macb_set_tx_clk() - Set a clock to a new frequency
366
- * @clk Pointer to the clock to change
367
- * @rate New frequency in Hz
368
- * @dev Pointer to the struct net_device
460
+ * @clk: Pointer to the clock to change
461
+ * @speed: New frequency in Hz
462
+ * @dev: Pointer to the struct net_device
369463 */
370464 static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
371465 {
....@@ -405,169 +499,272 @@
405499 netdev_err(dev, "adjusting tx_clk failed.\n");
406500 }
407501
408
-static void macb_handle_link_change(struct net_device *dev)
502
+static void macb_validate(struct phylink_config *config,
503
+ unsigned long *supported,
504
+ struct phylink_link_state *state)
409505 {
410
- struct macb *bp = netdev_priv(dev);
411
- struct phy_device *phydev = dev->phydev;
506
+ struct net_device *ndev = to_net_dev(config->dev);
507
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
508
+ struct macb *bp = netdev_priv(ndev);
509
+
510
+ /* We only support MII, RMII, GMII, RGMII & SGMII. */
511
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
512
+ state->interface != PHY_INTERFACE_MODE_MII &&
513
+ state->interface != PHY_INTERFACE_MODE_RMII &&
514
+ state->interface != PHY_INTERFACE_MODE_GMII &&
515
+ state->interface != PHY_INTERFACE_MODE_SGMII &&
516
+ !phy_interface_mode_is_rgmii(state->interface)) {
517
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
518
+ return;
519
+ }
520
+
521
+ if (!macb_is_gem(bp) &&
522
+ (state->interface == PHY_INTERFACE_MODE_GMII ||
523
+ phy_interface_mode_is_rgmii(state->interface))) {
524
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
525
+ return;
526
+ }
527
+
528
+ phylink_set_port_modes(mask);
529
+ phylink_set(mask, Autoneg);
530
+ phylink_set(mask, Asym_Pause);
531
+
532
+ phylink_set(mask, 10baseT_Half);
533
+ phylink_set(mask, 10baseT_Full);
534
+ phylink_set(mask, 100baseT_Half);
535
+ phylink_set(mask, 100baseT_Full);
536
+
537
+ if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
538
+ (state->interface == PHY_INTERFACE_MODE_NA ||
539
+ state->interface == PHY_INTERFACE_MODE_GMII ||
540
+ state->interface == PHY_INTERFACE_MODE_SGMII ||
541
+ phy_interface_mode_is_rgmii(state->interface))) {
542
+ phylink_set(mask, 1000baseT_Full);
543
+ phylink_set(mask, 1000baseX_Full);
544
+
545
+ if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
546
+ phylink_set(mask, 1000baseT_Half);
547
+ }
548
+
549
+ bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
550
+ bitmap_and(state->advertising, state->advertising, mask,
551
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
552
+}
553
+
554
+static void macb_mac_pcs_get_state(struct phylink_config *config,
555
+ struct phylink_link_state *state)
556
+{
557
+ state->link = 0;
558
+}
559
+
560
+static void macb_mac_an_restart(struct phylink_config *config)
561
+{
562
+ /* Not supported */
563
+}
564
+
565
+static void macb_mac_config(struct phylink_config *config, unsigned int mode,
566
+ const struct phylink_link_state *state)
567
+{
568
+ struct net_device *ndev = to_net_dev(config->dev);
569
+ struct macb *bp = netdev_priv(ndev);
412570 unsigned long flags;
413
- int status_change = 0;
571
+ u32 old_ctrl, ctrl;
414572
415573 spin_lock_irqsave(&bp->lock, flags);
416574
417
- if (phydev->link) {
418
- if ((bp->speed != phydev->speed) ||
419
- (bp->duplex != phydev->duplex)) {
420
- u32 reg;
575
+ old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR);
421576
422
- reg = macb_readl(bp, NCFGR);
423
- reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
424
- if (macb_is_gem(bp))
425
- reg &= ~GEM_BIT(GBE);
577
+ if (bp->caps & MACB_CAPS_MACB_IS_EMAC) {
578
+ if (state->interface == PHY_INTERFACE_MODE_RMII)
579
+ ctrl |= MACB_BIT(RM9200_RMII);
580
+ } else if (macb_is_gem(bp)) {
581
+ ctrl &= ~(GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL));
426582
427
- if (phydev->duplex)
428
- reg |= MACB_BIT(FD);
429
- if (phydev->speed == SPEED_100)
430
- reg |= MACB_BIT(SPD);
431
- if (phydev->speed == SPEED_1000 &&
432
- bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
433
- reg |= GEM_BIT(GBE);
434
-
435
- macb_or_gem_writel(bp, NCFGR, reg);
436
-
437
- bp->speed = phydev->speed;
438
- bp->duplex = phydev->duplex;
439
- status_change = 1;
440
- }
583
+ if (state->interface == PHY_INTERFACE_MODE_SGMII)
584
+ ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
441585 }
442586
443
- if (phydev->link != bp->link) {
444
- if (!phydev->link) {
445
- bp->speed = 0;
446
- bp->duplex = -1;
447
- }
448
- bp->link = phydev->link;
587
+ /* Apply the new configuration, if any */
588
+ if (old_ctrl ^ ctrl)
589
+ macb_or_gem_writel(bp, NCFGR, ctrl);
449590
450
- status_change = 1;
591
+ spin_unlock_irqrestore(&bp->lock, flags);
592
+}
593
+
594
+static void macb_mac_link_down(struct phylink_config *config, unsigned int mode,
595
+ phy_interface_t interface)
596
+{
597
+ struct net_device *ndev = to_net_dev(config->dev);
598
+ struct macb *bp = netdev_priv(ndev);
599
+ struct macb_queue *queue;
600
+ unsigned int q;
601
+ u32 ctrl;
602
+
603
+ if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
604
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
605
+ queue_writel(queue, IDR,
606
+ bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
607
+
608
+ /* Disable Rx and Tx */
609
+ ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE));
610
+ macb_writel(bp, NCR, ctrl);
611
+
612
+ netif_tx_stop_all_queues(ndev);
613
+}
614
+
615
+static void macb_mac_link_up(struct phylink_config *config,
616
+ struct phy_device *phy,
617
+ unsigned int mode, phy_interface_t interface,
618
+ int speed, int duplex,
619
+ bool tx_pause, bool rx_pause)
620
+{
621
+ struct net_device *ndev = to_net_dev(config->dev);
622
+ struct macb *bp = netdev_priv(ndev);
623
+ struct macb_queue *queue;
624
+ unsigned long flags;
625
+ unsigned int q;
626
+ u32 ctrl;
627
+
628
+ spin_lock_irqsave(&bp->lock, flags);
629
+
630
+ ctrl = macb_or_gem_readl(bp, NCFGR);
631
+
632
+ ctrl &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
633
+
634
+ if (speed == SPEED_100)
635
+ ctrl |= MACB_BIT(SPD);
636
+
637
+ if (duplex)
638
+ ctrl |= MACB_BIT(FD);
639
+
640
+ if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) {
641
+ ctrl &= ~MACB_BIT(PAE);
642
+ if (macb_is_gem(bp)) {
643
+ ctrl &= ~GEM_BIT(GBE);
644
+
645
+ if (speed == SPEED_1000)
646
+ ctrl |= GEM_BIT(GBE);
647
+ }
648
+
649
+ if (rx_pause)
650
+ ctrl |= MACB_BIT(PAE);
651
+
652
+ macb_set_tx_clk(bp->tx_clk, speed, ndev);
653
+
654
+ /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
655
+ * cleared the pipeline and control registers.
656
+ */
657
+ bp->macbgem_ops.mog_init_rings(bp);
658
+ macb_init_buffers(bp);
659
+
660
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
661
+ queue_writel(queue, IER,
662
+ bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
451663 }
664
+
665
+ macb_or_gem_writel(bp, NCFGR, ctrl);
452666
453667 spin_unlock_irqrestore(&bp->lock, flags);
454668
455
- if (status_change) {
456
- if (phydev->link) {
457
- /* Update the TX clock rate if and only if the link is
458
- * up and there has been a link change.
459
- */
460
- macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
669
+ /* Enable Rx and Tx */
670
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
461671
462
- netif_carrier_on(dev);
463
- netdev_info(dev, "link up (%d/%s)\n",
464
- phydev->speed,
465
- phydev->duplex == DUPLEX_FULL ?
466
- "Full" : "Half");
467
- } else {
468
- netif_carrier_off(dev);
469
- netdev_info(dev, "link down\n");
470
- }
471
- }
672
+ netif_tx_wake_all_queues(ndev);
472673 }
473674
474
-/* based on au1000_eth. c*/
475
-static int macb_mii_probe(struct net_device *dev)
675
+static const struct phylink_mac_ops macb_phylink_ops = {
676
+ .validate = macb_validate,
677
+ .mac_pcs_get_state = macb_mac_pcs_get_state,
678
+ .mac_an_restart = macb_mac_an_restart,
679
+ .mac_config = macb_mac_config,
680
+ .mac_link_down = macb_mac_link_down,
681
+ .mac_link_up = macb_mac_link_up,
682
+};
683
+
684
+static bool macb_phy_handle_exists(struct device_node *dn)
476685 {
477
- struct macb *bp = netdev_priv(dev);
478
- struct macb_platform_data *pdata;
686
+ dn = of_parse_phandle(dn, "phy-handle", 0);
687
+ of_node_put(dn);
688
+ return dn != NULL;
689
+}
690
+
691
+static int macb_phylink_connect(struct macb *bp)
692
+{
693
+ struct device_node *dn = bp->pdev->dev.of_node;
694
+ struct net_device *dev = bp->dev;
479695 struct phy_device *phydev;
480
- struct device_node *np;
481
- int phy_irq, ret, i;
696
+ int ret;
482697
483
- pdata = dev_get_platdata(&bp->pdev->dev);
484
- np = bp->pdev->dev.of_node;
485
- ret = 0;
698
+ if (dn)
699
+ ret = phylink_of_phy_connect(bp->phylink, dn, 0);
486700
487
- if (np) {
488
- if (of_phy_is_fixed_link(np)) {
489
- bp->phy_node = of_node_get(np);
490
- } else {
491
- bp->phy_node = of_parse_phandle(np, "phy-handle", 0);
492
- /* fallback to standard phy registration if no
493
- * phy-handle was found nor any phy found during
494
- * dt phy registration
495
- */
496
- if (!bp->phy_node && !phy_find_first(bp->mii_bus)) {
497
- for (i = 0; i < PHY_MAX_ADDR; i++) {
498
- struct phy_device *phydev;
499
-
500
- phydev = mdiobus_scan(bp->mii_bus, i);
501
- if (IS_ERR(phydev) &&
502
- PTR_ERR(phydev) != -ENODEV) {
503
- ret = PTR_ERR(phydev);
504
- break;
505
- }
506
- }
507
-
508
- if (ret)
509
- return -ENODEV;
510
- }
511
- }
512
- }
513
-
514
- if (bp->phy_node) {
515
- phydev = of_phy_connect(dev, bp->phy_node,
516
- &macb_handle_link_change, 0,
517
- bp->phy_interface);
518
- if (!phydev)
519
- return -ENODEV;
520
- } else {
701
+ if (!dn || (ret && !macb_phy_handle_exists(dn))) {
521702 phydev = phy_find_first(bp->mii_bus);
522703 if (!phydev) {
523704 netdev_err(dev, "no PHY found\n");
524705 return -ENXIO;
525706 }
526707
527
- if (pdata) {
528
- if (gpio_is_valid(pdata->phy_irq_pin)) {
529
- ret = devm_gpio_request(&bp->pdev->dev,
530
- pdata->phy_irq_pin, "phy int");
531
- if (!ret) {
532
- phy_irq = gpio_to_irq(pdata->phy_irq_pin);
533
- phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
534
- }
535
- } else {
536
- phydev->irq = PHY_POLL;
537
- }
538
- }
539
-
540708 /* attach the mac to the phy */
541
- ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
542
- bp->phy_interface);
543
- if (ret) {
544
- netdev_err(dev, "Could not attach to PHY\n");
545
- return ret;
546
- }
709
+ ret = phylink_connect_phy(bp->phylink, phydev);
547710 }
548711
549
- /* mask with MAC supported features */
550
- if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
551
- phydev->supported &= PHY_GBIT_FEATURES;
552
- else
553
- phydev->supported &= PHY_BASIC_FEATURES;
712
+ if (ret) {
713
+ netdev_err(dev, "Could not attach PHY (%d)\n", ret);
714
+ return ret;
715
+ }
554716
555
- if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
556
- phydev->supported &= ~SUPPORTED_1000baseT_Half;
557
-
558
- phydev->advertising = phydev->supported;
559
-
560
- bp->link = 0;
561
- bp->speed = 0;
562
- bp->duplex = -1;
717
+ phylink_start(bp->phylink);
563718
564719 return 0;
565720 }
566721
722
+/* based on au1000_eth. c*/
723
+static int macb_mii_probe(struct net_device *dev)
724
+{
725
+ struct macb *bp = netdev_priv(dev);
726
+
727
+ bp->phylink_config.dev = &dev->dev;
728
+ bp->phylink_config.type = PHYLINK_NETDEV;
729
+
730
+ bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode,
731
+ bp->phy_interface, &macb_phylink_ops);
732
+ if (IS_ERR(bp->phylink)) {
733
+ netdev_err(dev, "Could not create a phylink instance (%ld)\n",
734
+ PTR_ERR(bp->phylink));
735
+ return PTR_ERR(bp->phylink);
736
+ }
737
+
738
+ return 0;
739
+}
740
+
741
+static int macb_mdiobus_register(struct macb *bp)
742
+{
743
+ struct device_node *child, *np = bp->pdev->dev.of_node;
744
+
745
+ if (of_phy_is_fixed_link(np))
746
+ return mdiobus_register(bp->mii_bus);
747
+
748
+ /* Only create the PHY from the device tree if at least one PHY is
749
+ * described. Otherwise scan the entire MDIO bus. We do this to support
750
+ * old device tree that did not follow the best practices and did not
751
+ * describe their network PHYs.
752
+ */
753
+ for_each_available_child_of_node(np, child)
754
+ if (of_mdiobus_child_is_phy(child)) {
755
+ /* The loop increments the child refcount,
756
+ * decrement it before returning.
757
+ */
758
+ of_node_put(child);
759
+
760
+ return of_mdiobus_register(bp->mii_bus, np);
761
+ }
762
+
763
+ return mdiobus_register(bp->mii_bus);
764
+}
765
+
567766 static int macb_mii_init(struct macb *bp)
568767 {
569
- struct macb_platform_data *pdata;
570
- struct device_node *np;
571768 int err = -ENXIO;
572769
573770 /* Enable management port */
....@@ -586,28 +783,12 @@
586783 bp->pdev->name, bp->pdev->id);
587784 bp->mii_bus->priv = bp;
588785 bp->mii_bus->parent = &bp->pdev->dev;
589
- pdata = dev_get_platdata(&bp->pdev->dev);
590786
591787 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
592788
593
- np = bp->pdev->dev.of_node;
594
- if (np && of_phy_is_fixed_link(np)) {
595
- if (of_phy_register_fixed_link(np) < 0) {
596
- dev_err(&bp->pdev->dev,
597
- "broken fixed-link specification %pOF\n", np);
598
- goto err_out_free_mdiobus;
599
- }
600
-
601
- err = mdiobus_register(bp->mii_bus);
602
- } else {
603
- if (pdata)
604
- bp->mii_bus->phy_mask = pdata->phy_mask;
605
-
606
- err = of_mdiobus_register(bp->mii_bus, np);
607
- }
608
-
789
+ err = macb_mdiobus_register(bp);
609790 if (err)
610
- goto err_out_free_fixed_link;
791
+ goto err_out_free_mdiobus;
611792
612793 err = macb_mii_probe(bp->dev);
613794 if (err)
....@@ -617,11 +798,7 @@
617798
618799 err_out_unregister_bus:
619800 mdiobus_unregister(bp->mii_bus);
620
-err_out_free_fixed_link:
621
- if (np && of_phy_is_fixed_link(np))
622
- of_phy_deregister_fixed_link(np);
623801 err_out_free_mdiobus:
624
- of_node_put(bp->phy_node);
625802 mdiobus_free(bp->mii_bus);
626803 err_out:
627804 return err;
....@@ -915,7 +1092,6 @@
9151092 /* Make hw descriptor updates visible to CPU */
9161093 rmb();
9171094
918
- queue->rx_prepared_head++;
9191095 desc = macb_rx_desc(queue, entry);
9201096
9211097 if (!queue->rx_skbuff[entry]) {
....@@ -954,6 +1130,7 @@
9541130 dma_wmb();
9551131 desc->addr &= ~MACB_BIT(RX_USED);
9561132 }
1133
+ queue->rx_prepared_head++;
9571134 }
9581135
9591136 /* Make descriptor updates visible to hardware */
....@@ -984,7 +1161,8 @@
9841161 */
9851162 }
9861163
987
-static int gem_rx(struct macb_queue *queue, int budget)
1164
+static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
1165
+ int budget)
9881166 {
9891167 struct macb *bp = queue->bp;
9901168 unsigned int len;
....@@ -1066,7 +1244,7 @@
10661244 skb->data, 32, true);
10671245 #endif
10681246
1069
- netif_receive_skb(skb);
1247
+ napi_gro_receive(napi, skb);
10701248 }
10711249
10721250 gem_rx_refill(queue);
....@@ -1074,8 +1252,8 @@
10741252 return count;
10751253 }
10761254
1077
-static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag,
1078
- unsigned int last_frag)
1255
+static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi,
1256
+ unsigned int first_frag, unsigned int last_frag)
10791257 {
10801258 unsigned int len;
10811259 unsigned int frag;
....@@ -1151,7 +1329,7 @@
11511329 bp->dev->stats.rx_bytes += skb->len;
11521330 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
11531331 skb->len, skb->csum);
1154
- netif_receive_skb(skb);
1332
+ napi_gro_receive(napi, skb);
11551333
11561334 return 0;
11571335 }
....@@ -1174,7 +1352,8 @@
11741352 queue->rx_tail = 0;
11751353 }
11761354
1177
-static int macb_rx(struct macb_queue *queue, int budget)
1355
+static int macb_rx(struct macb_queue *queue, struct napi_struct *napi,
1356
+ int budget)
11781357 {
11791358 struct macb *bp = queue->bp;
11801359 bool reset_rx_queue = false;
....@@ -1211,7 +1390,7 @@
12111390 continue;
12121391 }
12131392
1214
- dropped = macb_rx_frame(queue, first_frag, tail);
1393
+ dropped = macb_rx_frame(queue, napi, first_frag, tail);
12151394 first_frag = -1;
12161395 if (unlikely(dropped < 0)) {
12171396 reset_rx_queue = true;
....@@ -1265,11 +1444,18 @@
12651444 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
12661445 (unsigned long)status, budget);
12671446
1268
- work_done = bp->macbgem_ops.mog_rx(queue, budget);
1447
+ work_done = bp->macbgem_ops.mog_rx(queue, napi, budget);
12691448 if (work_done < budget) {
12701449 napi_complete_done(napi, work_done);
12711450
1272
- /* Packets received while interrupts were disabled */
1451
+ /* RSR bits only seem to propagate to raise interrupts when
1452
+ * interrupts are enabled at the time, so if bits are already
1453
+ * set due to packets received while interrupts were disabled,
1454
+ * they will not cause another interrupt to be generated when
1455
+ * interrupts are re-enabled.
1456
+ * Check for this case here. This has been seen to happen
1457
+ * around 30% of the time under heavy network load.
1458
+ */
12731459 status = macb_readl(bp, RSR);
12741460 if (status) {
12751461 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
....@@ -1277,6 +1463,22 @@
12771463 napi_reschedule(napi);
12781464 } else {
12791465 queue_writel(queue, IER, bp->rx_intr_mask);
1466
+
1467
+ /* In rare cases, packets could have been received in
1468
+ * the window between the check above and re-enabling
1469
+ * interrupts. Therefore, a double-check is required
1470
+ * to avoid losing a wakeup. This can potentially race
1471
+ * with the interrupt handler doing the same actions
1472
+ * if an interrupt is raised just after enabling them,
1473
+ * but this should be harmless.
1474
+ */
1475
+ status = macb_readl(bp, RSR);
1476
+ if (unlikely(status)) {
1477
+ queue_writel(queue, IDR, bp->rx_intr_mask);
1478
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1479
+ queue_writel(queue, ISR, MACB_BIT(RCOMP));
1480
+ napi_schedule(napi);
1481
+ }
12801482 }
12811483 }
12821484
....@@ -1285,11 +1487,11 @@
12851487 return work_done;
12861488 }
12871489
1288
-static void macb_hresp_error_task(unsigned long data)
1490
+static void macb_hresp_error_task(struct tasklet_struct *t)
12891491 {
1290
- struct macb *bp = (struct macb *)data;
1492
+ struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet);
12911493 struct net_device *dev = bp->dev;
1292
- struct macb_queue *queue = bp->queues;
1494
+ struct macb_queue *queue;
12931495 unsigned int q;
12941496 u32 ctrl;
12951497
....@@ -1308,26 +1510,14 @@
13081510 bp->macbgem_ops.mog_init_rings(bp);
13091511
13101512 /* Initialize TX and RX buffers */
1311
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1312
- queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
1313
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1314
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
1315
- queue_writel(queue, RBQPH,
1316
- upper_32_bits(queue->rx_ring_dma));
1317
-#endif
1318
- queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
1319
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1320
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
1321
- queue_writel(queue, TBQPH,
1322
- upper_32_bits(queue->tx_ring_dma));
1323
-#endif
1513
+ macb_init_buffers(bp);
13241514
1325
- /* Enable interrupts */
1515
+ /* Enable interrupts */
1516
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
13261517 queue_writel(queue, IER,
13271518 bp->rx_intr_mask |
13281519 MACB_TX_INT_FLAGS |
13291520 MACB_BIT(HRESP));
1330
- }
13311521
13321522 ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
13331523 macb_writel(bp, NCR, ctrl);
....@@ -1341,6 +1531,7 @@
13411531 unsigned int head = queue->tx_head;
13421532 unsigned int tail = queue->tx_tail;
13431533 struct macb *bp = queue->bp;
1534
+ unsigned int head_idx, tbqp;
13441535
13451536 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
13461537 queue_writel(queue, ISR, MACB_BIT(TXUBR));
....@@ -1348,7 +1539,72 @@
13481539 if (head == tail)
13491540 return;
13501541
1542
+ tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
1543
+ tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
1544
+ head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head));
1545
+
1546
+ if (tbqp == head_idx)
1547
+ return;
1548
+
13511549 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1550
+}
1551
+
1552
+static irqreturn_t macb_wol_interrupt(int irq, void *dev_id)
1553
+{
1554
+ struct macb_queue *queue = dev_id;
1555
+ struct macb *bp = queue->bp;
1556
+ u32 status;
1557
+
1558
+ status = queue_readl(queue, ISR);
1559
+
1560
+ if (unlikely(!status))
1561
+ return IRQ_NONE;
1562
+
1563
+ spin_lock(&bp->lock);
1564
+
1565
+ if (status & MACB_BIT(WOL)) {
1566
+ queue_writel(queue, IDR, MACB_BIT(WOL));
1567
+ macb_writel(bp, WOL, 0);
1568
+ netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n",
1569
+ (unsigned int)(queue - bp->queues),
1570
+ (unsigned long)status);
1571
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1572
+ queue_writel(queue, ISR, MACB_BIT(WOL));
1573
+ pm_wakeup_event(&bp->pdev->dev, 0);
1574
+ }
1575
+
1576
+ spin_unlock(&bp->lock);
1577
+
1578
+ return IRQ_HANDLED;
1579
+}
1580
+
1581
+static irqreturn_t gem_wol_interrupt(int irq, void *dev_id)
1582
+{
1583
+ struct macb_queue *queue = dev_id;
1584
+ struct macb *bp = queue->bp;
1585
+ u32 status;
1586
+
1587
+ status = queue_readl(queue, ISR);
1588
+
1589
+ if (unlikely(!status))
1590
+ return IRQ_NONE;
1591
+
1592
+ spin_lock(&bp->lock);
1593
+
1594
+ if (status & GEM_BIT(WOL)) {
1595
+ queue_writel(queue, IDR, GEM_BIT(WOL));
1596
+ gem_writel(bp, WOL, 0);
1597
+ netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n",
1598
+ (unsigned int)(queue - bp->queues),
1599
+ (unsigned long)status);
1600
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1601
+ queue_writel(queue, ISR, GEM_BIT(WOL));
1602
+ pm_wakeup_event(&bp->pdev->dev, 0);
1603
+ }
1604
+
1605
+ spin_unlock(&bp->lock);
1606
+
1607
+ return IRQ_HANDLED;
13521608 }
13531609
13541610 static irqreturn_t macb_interrupt(int irq, void *dev_id)
....@@ -1740,16 +1996,12 @@
17401996 if (!nskb)
17411997 return -ENOMEM;
17421998
1743
- dev_kfree_skb_any(*skb);
1999
+ dev_consume_skb_any(*skb);
17442000 *skb = nskb;
17452001 }
17462002
1747
- if (padlen) {
1748
- if (padlen >= ETH_FCS_LEN)
1749
- skb_put_zero(*skb, padlen - ETH_FCS_LEN);
1750
- else
1751
- skb_trim(*skb, ETH_FCS_LEN - padlen);
1752
- }
2003
+ if (padlen > ETH_FCS_LEN)
2004
+ skb_put_zero(*skb, padlen - ETH_FCS_LEN);
17532005
17542006 add_fcs:
17552007 /* set FCS to packet */
....@@ -1772,7 +2024,7 @@
17722024 unsigned long flags;
17732025 unsigned int desc_cnt, nr_frags, frag_size, f;
17742026 unsigned int hdrlen;
1775
- bool is_lso, is_udp = 0;
2027
+ bool is_lso;
17762028 netdev_tx_t ret = NETDEV_TX_OK;
17772029
17782030 if (macb_clear_csum(skb)) {
....@@ -1788,10 +2040,8 @@
17882040 is_lso = (skb_shinfo(skb)->gso_size != 0);
17892041
17902042 if (is_lso) {
1791
- is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP);
1792
-
17932043 /* length of headers */
1794
- if (is_udp)
2044
+ if (ip_hdr(skb)->protocol == IPPROTO_UDP)
17952045 /* only queue eth + ip headers separately for UDP */
17962046 hdrlen = skb_transport_offset(skb);
17972047 else
....@@ -2218,19 +2468,13 @@
22182468
22192469 static void macb_init_hw(struct macb *bp)
22202470 {
2221
- struct macb_queue *queue;
2222
- unsigned int q;
2223
-
22242471 u32 config;
22252472
22262473 macb_reset_hw(bp);
22272474 macb_set_hwaddr(bp);
22282475
22292476 config = macb_mdc_clk_div(bp);
2230
- if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
2231
- config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
22322477 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
2233
- config |= MACB_BIT(PAE); /* PAuse Enable */
22342478 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
22352479 if (bp->caps & MACB_CAPS_JUMBO)
22362480 config |= MACB_BIT(JFRAME); /* Enable jumbo frames */
....@@ -2246,36 +2490,11 @@
22462490 macb_writel(bp, NCFGR, config);
22472491 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
22482492 gem_writel(bp, JML, bp->jumbo_max_len);
2249
- bp->speed = SPEED_10;
2250
- bp->duplex = DUPLEX_HALF;
22512493 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
22522494 if (bp->caps & MACB_CAPS_JUMBO)
22532495 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
22542496
22552497 macb_configure_dma(bp);
2256
-
2257
- /* Initialize TX and RX buffers */
2258
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2259
- queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
2260
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2261
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2262
- queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma));
2263
-#endif
2264
- queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
2265
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2266
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2267
- queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
2268
-#endif
2269
-
2270
- /* Enable interrupts */
2271
- queue_writel(queue, IER,
2272
- bp->rx_intr_mask |
2273
- MACB_TX_INT_FLAGS |
2274
- MACB_BIT(HRESP));
2275
- }
2276
-
2277
- /* Enable TX and RX */
2278
- macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
22792498 }
22802499
22812500 /* The hash address register is 64 bits long and takes up two
....@@ -2399,20 +2618,17 @@
23992618
24002619 static int macb_open(struct net_device *dev)
24012620 {
2402
- struct macb *bp = netdev_priv(dev);
24032621 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
2622
+ struct macb *bp = netdev_priv(dev);
24042623 struct macb_queue *queue;
24052624 unsigned int q;
24062625 int err;
24072626
24082627 netdev_dbg(bp->dev, "open\n");
24092628
2410
- /* carrier starts down */
2411
- netif_carrier_off(dev);
2412
-
2413
- /* if the phy is not yet register, retry later*/
2414
- if (!dev->phydev)
2415
- return -EAGAIN;
2629
+ err = pm_runtime_get_sync(&bp->pdev->dev);
2630
+ if (err < 0)
2631
+ goto pm_exit;
24162632
24172633 /* RX buffers initialization */
24182634 macb_init_rx_buffer_size(bp, bufsz);
....@@ -2421,17 +2637,17 @@
24212637 if (err) {
24222638 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
24232639 err);
2424
- return err;
2640
+ goto pm_exit;
24252641 }
24262642
24272643 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
24282644 napi_enable(&queue->napi);
24292645
2430
- bp->macbgem_ops.mog_init_rings(bp);
24312646 macb_init_hw(bp);
24322647
2433
- /* schedule a link state check */
2434
- phy_start(dev->phydev);
2648
+ err = macb_phylink_connect(bp);
2649
+ if (err)
2650
+ goto reset_hw;
24352651
24362652 netif_tx_start_all_queues(dev);
24372653
....@@ -2439,6 +2655,15 @@
24392655 bp->ptp_info->ptp_init(dev);
24402656
24412657 return 0;
2658
+
2659
+reset_hw:
2660
+ macb_reset_hw(bp);
2661
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2662
+ napi_disable(&queue->napi);
2663
+ macb_free_consistent(bp);
2664
+pm_exit:
2665
+ pm_runtime_put_sync(&bp->pdev->dev);
2666
+ return err;
24422667 }
24432668
24442669 static int macb_close(struct net_device *dev)
....@@ -2453,8 +2678,8 @@
24532678 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
24542679 napi_disable(&queue->napi);
24552680
2456
- if (dev->phydev)
2457
- phy_stop(dev->phydev);
2681
+ phylink_stop(bp->phylink);
2682
+ phylink_disconnect_phy(bp->phylink);
24582683
24592684 spin_lock_irqsave(&bp->lock, flags);
24602685 macb_reset_hw(bp);
....@@ -2465,6 +2690,8 @@
24652690
24662691 if (bp->ptp_info)
24672692 bp->ptp_info->ptp_remove(dev);
2693
+
2694
+ pm_runtime_put(&bp->pdev->dev);
24682695
24692696 return 0;
24702697 }
....@@ -2686,11 +2913,9 @@
26862913 {
26872914 struct macb *bp = netdev_priv(netdev);
26882915
2689
- wol->supported = 0;
2690
- wol->wolopts = 0;
2691
-
26922916 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
2693
- wol->supported = WAKE_MAGIC;
2917
+ phylink_ethtool_get_wol(bp->phylink, wol);
2918
+ wol->supported |= WAKE_MAGIC;
26942919
26952920 if (bp->wol & MACB_WOL_ENABLED)
26962921 wol->wolopts |= WAKE_MAGIC;
....@@ -2700,6 +2925,15 @@
27002925 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
27012926 {
27022927 struct macb *bp = netdev_priv(netdev);
2928
+ int ret;
2929
+
2930
+ /* Pass the order to phylink layer */
2931
+ ret = phylink_ethtool_set_wol(bp->phylink, wol);
2932
+ /* Don't manage WoL on MAC if handled by the PHY
2933
+ * or if there's a failure in talking to the PHY
2934
+ */
2935
+ if (!ret || ret != -EOPNOTSUPP)
2936
+ return ret;
27032937
27042938 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
27052939 (wol->wolopts & ~WAKE_MAGIC))
....@@ -2713,6 +2947,22 @@
27132947 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
27142948
27152949 return 0;
2950
+}
2951
+
2952
+static int macb_get_link_ksettings(struct net_device *netdev,
2953
+ struct ethtool_link_ksettings *kset)
2954
+{
2955
+ struct macb *bp = netdev_priv(netdev);
2956
+
2957
+ return phylink_ethtool_ksettings_get(bp->phylink, kset);
2958
+}
2959
+
2960
+static int macb_set_link_ksettings(struct net_device *netdev,
2961
+ const struct ethtool_link_ksettings *kset)
2962
+{
2963
+ struct macb *bp = netdev_priv(netdev);
2964
+
2965
+ return phylink_ethtool_ksettings_set(bp->phylink, kset);
27162966 }
27172967
27182968 static void macb_get_ringparam(struct net_device *netdev,
....@@ -2842,9 +3092,13 @@
28423092
28433093 static void gem_enable_flow_filters(struct macb *bp, bool enable)
28443094 {
3095
+ struct net_device *netdev = bp->dev;
28453096 struct ethtool_rx_fs_item *item;
28463097 u32 t2_scr;
28473098 int num_t2_scr;
3099
+
3100
+ if (!(netdev->features & NETIF_F_NTUPLE))
3101
+ return;
28483102
28493103 num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
28503104
....@@ -2890,6 +3144,9 @@
28903144 bool cmp_a = false;
28913145 bool cmp_b = false;
28923146 bool cmp_c = false;
3147
+
3148
+ if (!macb_is_gem(bp))
3149
+ return;
28933150
28943151 tp4sp_v = &(fs->h_u.tcp_ip4_spec);
28953152 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
....@@ -3005,8 +3262,7 @@
30053262 gem_prog_cmp_regs(bp, fs);
30063263 bp->rx_fs_list.count++;
30073264 /* enable filtering if NTUPLE on */
3008
- if (netdev->features & NETIF_F_NTUPLE)
3009
- gem_enable_flow_filters(bp, 1);
3265
+ gem_enable_flow_filters(bp, 1);
30103266
30113267 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
30123268 return 0;
....@@ -3148,8 +3404,8 @@
31483404 .get_ts_info = ethtool_op_get_ts_info,
31493405 .get_wol = macb_get_wol,
31503406 .set_wol = macb_set_wol,
3151
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
3152
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
3407
+ .get_link_ksettings = macb_get_link_ksettings,
3408
+ .set_link_ksettings = macb_set_link_ksettings,
31533409 .get_ringparam = macb_get_ringparam,
31543410 .set_ringparam = macb_set_ringparam,
31553411 };
....@@ -3157,13 +3413,15 @@
31573413 static const struct ethtool_ops gem_ethtool_ops = {
31583414 .get_regs_len = macb_get_regs_len,
31593415 .get_regs = macb_get_regs,
3416
+ .get_wol = macb_get_wol,
3417
+ .set_wol = macb_set_wol,
31603418 .get_link = ethtool_op_get_link,
31613419 .get_ts_info = macb_get_ts_info,
31623420 .get_ethtool_stats = gem_get_ethtool_stats,
31633421 .get_strings = gem_get_ethtool_strings,
31643422 .get_sset_count = gem_get_sset_count,
3165
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
3166
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
3423
+ .get_link_ksettings = macb_get_link_ksettings,
3424
+ .set_link_ksettings = macb_set_link_ksettings,
31673425 .get_ringparam = macb_get_ringparam,
31683426 .set_ringparam = macb_set_ringparam,
31693427 .get_rxnfc = gem_get_rxnfc,
....@@ -3172,26 +3430,65 @@
31723430
31733431 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
31743432 {
3175
- struct phy_device *phydev = dev->phydev;
31763433 struct macb *bp = netdev_priv(dev);
31773434
31783435 if (!netif_running(dev))
31793436 return -EINVAL;
31803437
3181
- if (!phydev)
3182
- return -ENODEV;
3183
-
3184
- if (!bp->ptp_info)
3185
- return phy_mii_ioctl(phydev, rq, cmd);
3186
-
3187
- switch (cmd) {
3188
- case SIOCSHWTSTAMP:
3189
- return bp->ptp_info->set_hwtst(dev, rq, cmd);
3190
- case SIOCGHWTSTAMP:
3191
- return bp->ptp_info->get_hwtst(dev, rq);
3192
- default:
3193
- return phy_mii_ioctl(phydev, rq, cmd);
3438
+ if (bp->ptp_info) {
3439
+ switch (cmd) {
3440
+ case SIOCSHWTSTAMP:
3441
+ return bp->ptp_info->set_hwtst(dev, rq, cmd);
3442
+ case SIOCGHWTSTAMP:
3443
+ return bp->ptp_info->get_hwtst(dev, rq);
3444
+ }
31943445 }
3446
+
3447
+ return phylink_mii_ioctl(bp->phylink, rq, cmd);
3448
+}
3449
+
3450
+static inline void macb_set_txcsum_feature(struct macb *bp,
3451
+ netdev_features_t features)
3452
+{
3453
+ u32 val;
3454
+
3455
+ if (!macb_is_gem(bp))
3456
+ return;
3457
+
3458
+ val = gem_readl(bp, DMACFG);
3459
+ if (features & NETIF_F_HW_CSUM)
3460
+ val |= GEM_BIT(TXCOEN);
3461
+ else
3462
+ val &= ~GEM_BIT(TXCOEN);
3463
+
3464
+ gem_writel(bp, DMACFG, val);
3465
+}
3466
+
3467
+static inline void macb_set_rxcsum_feature(struct macb *bp,
3468
+ netdev_features_t features)
3469
+{
3470
+ struct net_device *netdev = bp->dev;
3471
+ u32 val;
3472
+
3473
+ if (!macb_is_gem(bp))
3474
+ return;
3475
+
3476
+ val = gem_readl(bp, NCFGR);
3477
+ if ((features & NETIF_F_RXCSUM) && !(netdev->flags & IFF_PROMISC))
3478
+ val |= GEM_BIT(RXCOEN);
3479
+ else
3480
+ val &= ~GEM_BIT(RXCOEN);
3481
+
3482
+ gem_writel(bp, NCFGR, val);
3483
+}
3484
+
3485
+static inline void macb_set_rxflow_feature(struct macb *bp,
3486
+ netdev_features_t features)
3487
+{
3488
+ if (!macb_is_gem(bp))
3489
+ return;
3490
+
3491
+ gem_enable_flow_filters(bp, !!(features & NETIF_F_NTUPLE));
31953492 }
31963493
31973494 static int macb_set_features(struct net_device *netdev,
....@@ -3201,37 +3498,37 @@
32013498 netdev_features_t changed = features ^ netdev->features;
32023499
32033500 /* TX checksum offload */
3204
- if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
3205
- u32 dmacfg;
3206
-
3207
- dmacfg = gem_readl(bp, DMACFG);
3208
- if (features & NETIF_F_HW_CSUM)
3209
- dmacfg |= GEM_BIT(TXCOEN);
3210
- else
3211
- dmacfg &= ~GEM_BIT(TXCOEN);
3212
- gem_writel(bp, DMACFG, dmacfg);
3213
- }
3501
+ if (changed & NETIF_F_HW_CSUM)
3502
+ macb_set_txcsum_feature(bp, features);
32143503
32153504 /* RX checksum offload */
3216
- if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
3217
- u32 netcfg;
3218
-
3219
- netcfg = gem_readl(bp, NCFGR);
3220
- if (features & NETIF_F_RXCSUM &&
3221
- !(netdev->flags & IFF_PROMISC))
3222
- netcfg |= GEM_BIT(RXCOEN);
3223
- else
3224
- netcfg &= ~GEM_BIT(RXCOEN);
3225
- gem_writel(bp, NCFGR, netcfg);
3226
- }
3505
+ if (changed & NETIF_F_RXCSUM)
3506
+ macb_set_rxcsum_feature(bp, features);
32273507
32283508 /* RX Flow Filters */
3229
- if ((changed & NETIF_F_NTUPLE) && macb_is_gem(bp)) {
3230
- bool turn_on = features & NETIF_F_NTUPLE;
3509
+ if (changed & NETIF_F_NTUPLE)
3510
+ macb_set_rxflow_feature(bp, features);
32313511
3232
- gem_enable_flow_filters(bp, turn_on);
3233
- }
32343512 return 0;
3513
+}
3514
+
3515
+static void macb_restore_features(struct macb *bp)
3516
+{
3517
+ struct net_device *netdev = bp->dev;
3518
+ netdev_features_t features = netdev->features;
3519
+ struct ethtool_rx_fs_item *item;
3520
+
3521
+ /* TX checksum offload */
3522
+ macb_set_txcsum_feature(bp, features);
3523
+
3524
+ /* RX checksum offload */
3525
+ macb_set_rxcsum_feature(bp, features);
3526
+
3527
+ /* RX Flow Filters */
3528
+ list_for_each_entry(item, &bp->rx_fs_list.list, list)
3529
+ gem_prog_cmp_regs(bp, &item->fs);
3530
+
3531
+ macb_set_rxflow_feature(bp, features);
32353532 }
32363533
32373534 static const struct net_device_ops macb_netdev_ops = {
....@@ -3274,7 +3571,8 @@
32743571 #ifdef CONFIG_MACB_USE_HWSTAMP
32753572 if (gem_has_ptp(bp)) {
32763573 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
3277
- pr_err("GEM doesn't support hardware ptp.\n");
3574
+ dev_err(&bp->pdev->dev,
3575
+ "GEM doesn't support hardware ptp.\n");
32783576 else {
32793577 bp->hw_dma_cap |= HW_DMA_CAP_PTP;
32803578 bp->ptp_info = &gem_ptp_info;
....@@ -3291,8 +3589,6 @@
32913589 unsigned int *queue_mask,
32923590 unsigned int *num_queues)
32933591 {
3294
- unsigned int hw_q;
3295
-
32963592 *queue_mask = 0x1;
32973593 *num_queues = 1;
32983594
....@@ -3306,18 +3602,13 @@
33063602 return;
33073603
33083604 /* bit 0 is never set but queue 0 always exists */
3309
- *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
3310
-
3311
- *queue_mask |= 0x1;
3312
-
3313
- for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
3314
- if (*queue_mask & (1 << hw_q))
3315
- (*num_queues)++;
3605
+ *queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xff;
3606
+ *num_queues = hweight32(*queue_mask);
33163607 }
33173608
33183609 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
33193610 struct clk **hclk, struct clk **tx_clk,
3320
- struct clk **rx_clk)
3611
+ struct clk **rx_clk, struct clk **tsu_clk)
33213612 {
33223613 struct macb_platform_data *pdata;
33233614 int err;
....@@ -3349,13 +3640,17 @@
33493640 return err;
33503641 }
33513642
3352
- *tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
3643
+ *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk");
33533644 if (IS_ERR(*tx_clk))
3354
- *tx_clk = NULL;
3645
+ return PTR_ERR(*tx_clk);
33553646
3356
- *rx_clk = devm_clk_get(&pdev->dev, "rx_clk");
3647
+ *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk");
33573648 if (IS_ERR(*rx_clk))
3358
- *rx_clk = NULL;
3649
+ return PTR_ERR(*rx_clk);
3650
+
3651
+ *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk");
3652
+ if (IS_ERR(*tsu_clk))
3653
+ return PTR_ERR(*tsu_clk);
33593654
33603655 err = clk_prepare_enable(*pclk);
33613656 if (err) {
....@@ -3381,7 +3676,16 @@
33813676 goto err_disable_txclk;
33823677 }
33833678
3679
+ err = clk_prepare_enable(*tsu_clk);
3680
+ if (err) {
3681
+ dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err);
3682
+ goto err_disable_rxclk;
3683
+ }
3684
+
33843685 return 0;
3686
+
3687
+err_disable_rxclk:
3688
+ clk_disable_unprepare(*rx_clk);
33853689
33863690 err_disable_txclk:
33873691 clk_disable_unprepare(*tx_clk);
....@@ -3417,7 +3721,7 @@
34173721
34183722 queue = &bp->queues[q];
34193723 queue->bp = bp;
3420
- netif_napi_add(dev, &queue->napi, macb_poll, 64);
3724
+ netif_napi_add(dev, &queue->napi, macb_poll, NAPI_POLL_WEIGHT);
34213725 if (hw_q) {
34223726 queue->ISR = GEM_ISR(hw_q - 1);
34233727 queue->IER = GEM_IER(hw_q - 1);
....@@ -3507,6 +3811,7 @@
35073811 reg = gem_readl(bp, DCFG8);
35083812 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
35093813 GEM_BFEXT(T2SCR, reg));
3814
+ INIT_LIST_HEAD(&bp->rx_fs_list.list);
35103815 if (bp->max_tuples > 0) {
35113816 /* also needs one ethtype match to check IPv4 */
35123817 if (GEM_BFEXT(SCR2ETH, reg) > 0) {
....@@ -3517,7 +3822,6 @@
35173822 /* Filtering is supported in hw but don't enable it in kernel now */
35183823 dev->hw_features |= NETIF_F_NTUPLE;
35193824 /* init Rx flow definitions */
3520
- INIT_LIST_HEAD(&bp->rx_fs_list.list);
35213825 bp->rx_fs_list.count = 0;
35223826 spin_lock_init(&bp->rx_fs_lock);
35233827 } else
....@@ -3526,7 +3830,7 @@
35263830
35273831 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
35283832 val = 0;
3529
- if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
3833
+ if (phy_interface_mode_is_rgmii(bp->phy_interface))
35303834 val = GEM_BIT(RGMII);
35313835 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
35323836 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
....@@ -3556,15 +3860,11 @@
35563860 /* max number of receive buffers */
35573861 #define AT91ETHER_MAX_RX_DESCR 9
35583862
3559
-/* Initialize and start the Receiver and Transmit subsystems */
3560
-static int at91ether_start(struct net_device *dev)
3863
+static struct sifive_fu540_macb_mgmt *mgmt;
3864
+
3865
+static int at91ether_alloc_coherent(struct macb *lp)
35613866 {
3562
- struct macb *lp = netdev_priv(dev);
35633867 struct macb_queue *q = &lp->queues[0];
3564
- struct macb_dma_desc *desc;
3565
- dma_addr_t addr;
3566
- u32 ctl;
3567
- int i;
35683868
35693869 q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
35703870 (AT91ETHER_MAX_RX_DESCR *
....@@ -3585,6 +3885,43 @@
35853885 q->rx_ring = NULL;
35863886 return -ENOMEM;
35873887 }
3888
+
3889
+ return 0;
3890
+}
3891
+
3892
+static void at91ether_free_coherent(struct macb *lp)
3893
+{
3894
+ struct macb_queue *q = &lp->queues[0];
3895
+
3896
+ if (q->rx_ring) {
3897
+ dma_free_coherent(&lp->pdev->dev,
3898
+ AT91ETHER_MAX_RX_DESCR *
3899
+ macb_dma_desc_get_size(lp),
3900
+ q->rx_ring, q->rx_ring_dma);
3901
+ q->rx_ring = NULL;
3902
+ }
3903
+
3904
+ if (q->rx_buffers) {
3905
+ dma_free_coherent(&lp->pdev->dev,
3906
+ AT91ETHER_MAX_RX_DESCR *
3907
+ AT91ETHER_MAX_RBUFF_SZ,
3908
+ q->rx_buffers, q->rx_buffers_dma);
3909
+ q->rx_buffers = NULL;
3910
+ }
3911
+}
3912
+
3913
+/* Initialize and start the Receiver and Transmit subsystems */
3914
+static int at91ether_start(struct macb *lp)
3915
+{
3916
+ struct macb_queue *q = &lp->queues[0];
3917
+ struct macb_dma_desc *desc;
3918
+ dma_addr_t addr;
3919
+ u32 ctl;
3920
+ int i, ret;
3921
+
3922
+ ret = at91ether_alloc_coherent(lp);
3923
+ if (ret)
3924
+ return ret;
35883925
35893926 addr = q->rx_buffers_dma;
35903927 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
....@@ -3607,7 +3944,39 @@
36073944 ctl = macb_readl(lp, NCR);
36083945 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
36093946
3947
+ /* Enable MAC interrupts */
3948
+ macb_writel(lp, IER, MACB_BIT(RCOMP) |
3949
+ MACB_BIT(RXUBR) |
3950
+ MACB_BIT(ISR_TUND) |
3951
+ MACB_BIT(ISR_RLE) |
3952
+ MACB_BIT(TCOMP) |
3953
+ MACB_BIT(RM9200_TBRE) |
3954
+ MACB_BIT(ISR_ROVR) |
3955
+ MACB_BIT(HRESP));
3956
+
36103957 return 0;
3958
+}
3959
+
3960
+static void at91ether_stop(struct macb *lp)
3961
+{
3962
+ u32 ctl;
3963
+
3964
+ /* Disable MAC interrupts */
3965
+ macb_writel(lp, IDR, MACB_BIT(RCOMP) |
3966
+ MACB_BIT(RXUBR) |
3967
+ MACB_BIT(ISR_TUND) |
3968
+ MACB_BIT(ISR_RLE) |
3969
+ MACB_BIT(TCOMP) |
3970
+ MACB_BIT(RM9200_TBRE) |
3971
+ MACB_BIT(ISR_ROVR) |
3972
+ MACB_BIT(HRESP));
3973
+
3974
+ /* Disable Receiver and Transmitter */
3975
+ ctl = macb_readl(lp, NCR);
3976
+ macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
3977
+
3978
+ /* Free resources. */
3979
+ at91ether_free_coherent(lp);
36113980 }
36123981
36133982 /* Open the ethernet interface */
....@@ -3617,67 +3986,50 @@
36173986 u32 ctl;
36183987 int ret;
36193988
3989
+ ret = pm_runtime_get_sync(&lp->pdev->dev);
3990
+ if (ret < 0) {
3991
+ pm_runtime_put_noidle(&lp->pdev->dev);
3992
+ return ret;
3993
+ }
3994
+
36203995 /* Clear internal statistics */
36213996 ctl = macb_readl(lp, NCR);
36223997 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
36233998
36243999 macb_set_hwaddr(lp);
36254000
3626
- ret = at91ether_start(dev);
4001
+ ret = at91ether_start(lp);
36274002 if (ret)
3628
- return ret;
4003
+ goto pm_exit;
36294004
3630
- /* Enable MAC interrupts */
3631
- macb_writel(lp, IER, MACB_BIT(RCOMP) |
3632
- MACB_BIT(RXUBR) |
3633
- MACB_BIT(ISR_TUND) |
3634
- MACB_BIT(ISR_RLE) |
3635
- MACB_BIT(TCOMP) |
3636
- MACB_BIT(ISR_ROVR) |
3637
- MACB_BIT(HRESP));
3638
-
3639
- /* schedule a link state check */
3640
- phy_start(dev->phydev);
4005
+ ret = macb_phylink_connect(lp);
4006
+ if (ret)
4007
+ goto stop;
36414008
36424009 netif_start_queue(dev);
36434010
36444011 return 0;
4012
+
4013
+stop:
4014
+ at91ether_stop(lp);
4015
+pm_exit:
4016
+ pm_runtime_put_sync(&lp->pdev->dev);
4017
+ return ret;
36454018 }
36464019
36474020 /* Close the interface */
36484021 static int at91ether_close(struct net_device *dev)
36494022 {
36504023 struct macb *lp = netdev_priv(dev);
3651
- struct macb_queue *q = &lp->queues[0];
3652
- u32 ctl;
3653
-
3654
- /* Disable Receiver and Transmitter */
3655
- ctl = macb_readl(lp, NCR);
3656
- macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
3657
-
3658
- /* Disable MAC interrupts */
3659
- macb_writel(lp, IDR, MACB_BIT(RCOMP) |
3660
- MACB_BIT(RXUBR) |
3661
- MACB_BIT(ISR_TUND) |
3662
- MACB_BIT(ISR_RLE) |
3663
- MACB_BIT(TCOMP) |
3664
- MACB_BIT(ISR_ROVR) |
3665
- MACB_BIT(HRESP));
36664024
36674025 netif_stop_queue(dev);
36684026
3669
- dma_free_coherent(&lp->pdev->dev,
3670
- AT91ETHER_MAX_RX_DESCR *
3671
- macb_dma_desc_get_size(lp),
3672
- q->rx_ring, q->rx_ring_dma);
3673
- q->rx_ring = NULL;
4027
+ phylink_stop(lp->phylink);
4028
+ phylink_disconnect_phy(lp->phylink);
36744029
3675
- dma_free_coherent(&lp->pdev->dev,
3676
- AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
3677
- q->rx_buffers, q->rx_buffers_dma);
3678
- q->rx_buffers = NULL;
4030
+ at91ether_stop(lp);
36794031
3680
- return 0;
4032
+ return pm_runtime_put(&lp->pdev->dev);
36814033 }
36824034
36834035 /* Transmit packet */
....@@ -3685,24 +4037,34 @@
36854037 struct net_device *dev)
36864038 {
36874039 struct macb *lp = netdev_priv(dev);
4040
+ unsigned long flags;
36884041
3689
- if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
3690
- netif_stop_queue(dev);
4042
+ if (lp->rm9200_tx_len < 2) {
4043
+ int desc = lp->rm9200_tx_tail;
36914044
36924045 /* Store packet information (to free when Tx completed) */
3693
- lp->skb = skb;
3694
- lp->skb_length = skb->len;
3695
- lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
3696
- DMA_TO_DEVICE);
3697
- if (dma_mapping_error(NULL, lp->skb_physaddr)) {
4046
+ lp->rm9200_txq[desc].skb = skb;
4047
+ lp->rm9200_txq[desc].size = skb->len;
4048
+ lp->rm9200_txq[desc].mapping = dma_map_single(&lp->pdev->dev, skb->data,
4049
+ skb->len, DMA_TO_DEVICE);
4050
+ if (dma_mapping_error(&lp->pdev->dev, lp->rm9200_txq[desc].mapping)) {
36984051 dev_kfree_skb_any(skb);
36994052 dev->stats.tx_dropped++;
37004053 netdev_err(dev, "%s: DMA mapping error\n", __func__);
37014054 return NETDEV_TX_OK;
37024055 }
37034056
4057
+ spin_lock_irqsave(&lp->lock, flags);
4058
+
4059
+ lp->rm9200_tx_tail = (desc + 1) & 1;
4060
+ lp->rm9200_tx_len++;
4061
+ if (lp->rm9200_tx_len > 1)
4062
+ netif_stop_queue(dev);
4063
+
4064
+ spin_unlock_irqrestore(&lp->lock, flags);
4065
+
37044066 /* Set address of the data in the Transmit Address register */
3705
- macb_writel(lp, TAR, lp->skb_physaddr);
4067
+ macb_writel(lp, TAR, lp->rm9200_txq[desc].mapping);
37064068 /* Set length of the packet in the Transmit Control register */
37074069 macb_writel(lp, TCR, skb->len);
37084070
....@@ -3765,6 +4127,9 @@
37654127 struct net_device *dev = dev_id;
37664128 struct macb *lp = netdev_priv(dev);
37674129 u32 intstatus, ctl;
4130
+ unsigned int desc;
4131
+ unsigned int qlen;
4132
+ u32 tsr;
37684133
37694134 /* MAC Interrupt Status register indicates what interrupts are pending.
37704135 * It is automatically cleared once read.
....@@ -3776,20 +4141,39 @@
37764141 at91ether_rx(dev);
37774142
37784143 /* Transmit complete */
3779
- if (intstatus & MACB_BIT(TCOMP)) {
4144
+ if (intstatus & (MACB_BIT(TCOMP) | MACB_BIT(RM9200_TBRE))) {
37804145 /* The TCOM bit is set even if the transmission failed */
37814146 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
37824147 dev->stats.tx_errors++;
37834148
3784
- if (lp->skb) {
3785
- dev_kfree_skb_irq(lp->skb);
3786
- lp->skb = NULL;
3787
- dma_unmap_single(NULL, lp->skb_physaddr,
3788
- lp->skb_length, DMA_TO_DEVICE);
4149
+ spin_lock(&lp->lock);
4150
+
4151
+ tsr = macb_readl(lp, TSR);
4152
+
4153
+ /* we have three possibilities here:
4154
+ * - all pending packets transmitted (TGO, implies BNQ)
4155
+ * - only first packet transmitted (!TGO && BNQ)
4156
+ * - two frames pending (!TGO && !BNQ)
4157
+ * Note that TGO ("transmit go") is called "IDLE" on RM9200.
4158
+ */
4159
+ qlen = (tsr & MACB_BIT(TGO)) ? 0 :
4160
+ (tsr & MACB_BIT(RM9200_BNQ)) ? 1 : 2;
4161
+
4162
+ while (lp->rm9200_tx_len > qlen) {
4163
+ desc = (lp->rm9200_tx_tail - lp->rm9200_tx_len) & 1;
4164
+ dev_consume_skb_irq(lp->rm9200_txq[desc].skb);
4165
+ lp->rm9200_txq[desc].skb = NULL;
4166
+ dma_unmap_single(&lp->pdev->dev, lp->rm9200_txq[desc].mapping,
4167
+ lp->rm9200_txq[desc].size, DMA_TO_DEVICE);
37894168 dev->stats.tx_packets++;
3790
- dev->stats.tx_bytes += lp->skb_length;
4169
+ dev->stats.tx_bytes += lp->rm9200_txq[desc].size;
4170
+ lp->rm9200_tx_len--;
37914171 }
3792
- netif_wake_queue(dev);
4172
+
4173
+ if (lp->rm9200_tx_len < 2 && netif_queue_stopped(dev))
4174
+ netif_wake_queue(dev);
4175
+
4176
+ spin_unlock(&lp->lock);
37934177 }
37944178
37954179 /* Work-around for EMAC Errata section 41.3.1 */
....@@ -3833,13 +4217,14 @@
38334217
38344218 static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
38354219 struct clk **hclk, struct clk **tx_clk,
3836
- struct clk **rx_clk)
4220
+ struct clk **rx_clk, struct clk **tsu_clk)
38374221 {
38384222 int err;
38394223
38404224 *hclk = NULL;
38414225 *tx_clk = NULL;
38424226 *rx_clk = NULL;
4227
+ *tsu_clk = NULL;
38434228
38444229 *pclk = devm_clk_get(&pdev->dev, "ether_clk");
38454230 if (IS_ERR(*pclk))
....@@ -3859,7 +4244,6 @@
38594244 struct net_device *dev = platform_get_drvdata(pdev);
38604245 struct macb *bp = netdev_priv(dev);
38614246 int err;
3862
- u32 reg;
38634247
38644248 bp->queues[0].bp = bp;
38654249
....@@ -3873,14 +4257,114 @@
38734257
38744258 macb_writel(bp, NCR, 0);
38754259
3876
- reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
3877
- if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
3878
- reg |= MACB_BIT(RM9200_RMII);
3879
-
3880
- macb_writel(bp, NCFGR, reg);
4260
+ macb_writel(bp, NCFGR, MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG));
38814261
38824262 return 0;
38834263 }
4264
+
4265
+static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw,
4266
+ unsigned long parent_rate)
4267
+{
4268
+ return mgmt->rate;
4269
+}
4270
+
4271
+static long fu540_macb_tx_round_rate(struct clk_hw *hw, unsigned long rate,
4272
+ unsigned long *parent_rate)
4273
+{
4274
+ if (WARN_ON(rate < 2500000))
4275
+ return 2500000;
4276
+ else if (rate == 2500000)
4277
+ return 2500000;
4278
+ else if (WARN_ON(rate < 13750000))
4279
+ return 2500000;
4280
+ else if (WARN_ON(rate < 25000000))
4281
+ return 25000000;
4282
+ else if (rate == 25000000)
4283
+ return 25000000;
4284
+ else if (WARN_ON(rate < 75000000))
4285
+ return 25000000;
4286
+ else if (WARN_ON(rate < 125000000))
4287
+ return 125000000;
4288
+ else if (rate == 125000000)
4289
+ return 125000000;
4290
+
4291
+ WARN_ON(rate > 125000000);
4292
+
4293
+ return 125000000;
4294
+}
4295
+
4296
+static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate,
4297
+ unsigned long parent_rate)
4298
+{
4299
+ rate = fu540_macb_tx_round_rate(hw, rate, &parent_rate);
4300
+ if (rate != 125000000)
4301
+ iowrite32(1, mgmt->reg);
4302
+ else
4303
+ iowrite32(0, mgmt->reg);
4304
+ mgmt->rate = rate;
4305
+
4306
+ return 0;
4307
+}
4308
+
4309
+static const struct clk_ops fu540_c000_ops = {
4310
+ .recalc_rate = fu540_macb_tx_recalc_rate,
4311
+ .round_rate = fu540_macb_tx_round_rate,
4312
+ .set_rate = fu540_macb_tx_set_rate,
4313
+};
4314
+
4315
+static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
4316
+ struct clk **hclk, struct clk **tx_clk,
4317
+ struct clk **rx_clk, struct clk **tsu_clk)
4318
+{
4319
+ struct clk_init_data init;
4320
+ int err = 0;
4321
+
4322
+ err = macb_clk_init(pdev, pclk, hclk, tx_clk, rx_clk, tsu_clk);
4323
+ if (err)
4324
+ return err;
4325
+
4326
+ mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL);
4327
+ if (!mgmt)
4328
+ return -ENOMEM;
4329
+
4330
+ init.name = "sifive-gemgxl-mgmt";
4331
+ init.ops = &fu540_c000_ops;
4332
+ init.flags = 0;
4333
+ init.num_parents = 0;
4334
+
4335
+ mgmt->rate = 0;
4336
+ mgmt->hw.init = &init;
4337
+
4338
+ *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw);
4339
+ if (IS_ERR(*tx_clk))
4340
+ return PTR_ERR(*tx_clk);
4341
+
4342
+ err = clk_prepare_enable(*tx_clk);
4343
+ if (err)
4344
+ dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
4345
+ else
4346
+ dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name);
4347
+
4348
+ return 0;
4349
+}
4350
+
4351
+static int fu540_c000_init(struct platform_device *pdev)
4352
+{
4353
+ mgmt->reg = devm_platform_ioremap_resource(pdev, 1);
4354
+ if (IS_ERR(mgmt->reg))
4355
+ return PTR_ERR(mgmt->reg);
4356
+
4357
+ return macb_init(pdev);
4358
+}
4359
+
4360
+static const struct macb_config fu540_c000_config = {
4361
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
4362
+ MACB_CAPS_GEM_HAS_PTP,
4363
+ .dma_burst_length = 16,
4364
+ .clk_init = fu540_c000_clk_init,
4365
+ .init = fu540_c000_init,
4366
+ .jumbo_max_len = 10240,
4367
+};
38844368
38854369 static const struct macb_config at91sam9260_config = {
38864370 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
....@@ -3926,7 +4410,7 @@
39264410 };
39274411
39284412 static const struct macb_config emac_config = {
3929
- .caps = MACB_CAPS_NEEDS_RSTONUBR,
4413
+ .caps = MACB_CAPS_NEEDS_RSTONUBR | MACB_CAPS_MACB_IS_EMAC,
39304414 .clk_init = at91ether_clk_init,
39314415 .init = at91ether_init,
39324416 };
....@@ -3962,6 +4446,7 @@
39624446 { .compatible = "cdns,np4-macb", .data = &np4_config },
39634447 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
39644448 { .compatible = "cdns,gem", .data = &pc302gem_config },
4449
+ { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config },
39654450 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
39664451 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
39674452 { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
....@@ -3970,6 +4455,7 @@
39704455 { .compatible = "cdns,emac", .data = &emac_config },
39714456 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
39724457 { .compatible = "cdns,zynq-gem", .data = &zynq_config },
4458
+ { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
39734459 { /* sentinel */ }
39744460 };
39754461 MODULE_DEVICE_TABLE(of, macb_dt_ids);
....@@ -3989,15 +4475,15 @@
39894475 {
39904476 const struct macb_config *macb_config = &default_gem_config;
39914477 int (*clk_init)(struct platform_device *, struct clk **,
3992
- struct clk **, struct clk **, struct clk **)
3993
- = macb_config->clk_init;
4478
+ struct clk **, struct clk **, struct clk **,
4479
+ struct clk **) = macb_config->clk_init;
39944480 int (*init)(struct platform_device *) = macb_config->init;
39954481 struct device_node *np = pdev->dev.of_node;
39964482 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
4483
+ struct clk *tsu_clk = NULL;
39974484 unsigned int queue_mask, num_queues;
3998
- struct macb_platform_data *pdata;
39994485 bool native_io;
4000
- struct phy_device *phydev;
4486
+ phy_interface_t interface;
40014487 struct net_device *dev;
40024488 struct resource *regs;
40034489 void __iomem *mem;
....@@ -4021,10 +4507,15 @@
40214507 }
40224508 }
40234509
4024
- err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk);
4510
+ err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk);
40254511 if (err)
40264512 return err;
40274513
4514
+ pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT);
4515
+ pm_runtime_use_autosuspend(&pdev->dev);
4516
+ pm_runtime_get_noresume(&pdev->dev);
4517
+ pm_runtime_set_active(&pdev->dev);
4518
+ pm_runtime_enable(&pdev->dev);
40284519 native_io = hw_is_native_io(mem);
40294520
40304521 macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
....@@ -4058,6 +4549,7 @@
40584549 bp->hclk = hclk;
40594550 bp->tx_clk = tx_clk;
40604551 bp->rx_clk = rx_clk;
4552
+ bp->tsu_clk = tsu_clk;
40614553 if (macb_config)
40624554 bp->jumbo_max_len = macb_config->jumbo_max_len;
40634555
....@@ -4109,27 +4601,21 @@
41094601 bp->rx_intr_mask |= MACB_BIT(RXUBR);
41104602
41114603 mac = of_get_mac_address(np);
4112
- if (mac) {
4604
+ if (PTR_ERR(mac) == -EPROBE_DEFER) {
4605
+ err = -EPROBE_DEFER;
4606
+ goto err_out_free_netdev;
4607
+ } else if (!IS_ERR_OR_NULL(mac)) {
41134608 ether_addr_copy(bp->dev->dev_addr, mac);
41144609 } else {
4115
- err = of_get_nvmem_mac_address(np, bp->dev->dev_addr);
4116
- if (err) {
4117
- if (err == -EPROBE_DEFER)
4118
- goto err_out_free_netdev;
4119
- macb_get_hwaddr(bp);
4120
- }
4610
+ macb_get_hwaddr(bp);
41214611 }
41224612
4123
- err = of_get_phy_mode(np);
4124
- if (err < 0) {
4125
- pdata = dev_get_platdata(&pdev->dev);
4126
- if (pdata && pdata->is_rmii)
4127
- bp->phy_interface = PHY_INTERFACE_MODE_RMII;
4128
- else
4129
- bp->phy_interface = PHY_INTERFACE_MODE_MII;
4130
- } else {
4131
- bp->phy_interface = err;
4132
- }
4613
+ err = of_get_phy_mode(np, &interface);
4614
+ if (err)
4615
+ /* not found in DT, MII by default */
4616
+ bp->phy_interface = PHY_INTERFACE_MODE_MII;
4617
+ else
4618
+ bp->phy_interface = interface;
41334619
41344620 /* IP specific init */
41354621 err = init(pdev);
....@@ -4140,8 +4626,6 @@
41404626 if (err)
41414627 goto err_out_free_netdev;
41424628
4143
- phydev = dev->phydev;
4144
-
41454629 netif_carrier_off(dev);
41464630
41474631 err = register_netdev(dev);
....@@ -4150,23 +4634,19 @@
41504634 goto err_out_unregister_mdio;
41514635 }
41524636
4153
- tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
4154
- (unsigned long)bp);
4155
-
4156
- phy_attached_info(phydev);
4637
+ tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task);
41574638
41584639 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
41594640 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
41604641 dev->base_addr, dev->irq, dev->dev_addr);
41614642
4643
+ pm_runtime_mark_last_busy(&bp->pdev->dev);
4644
+ pm_runtime_put_autosuspend(&bp->pdev->dev);
4645
+
41624646 return 0;
41634647
41644648 err_out_unregister_mdio:
4165
- phy_disconnect(dev->phydev);
41664649 mdiobus_unregister(bp->mii_bus);
4167
- of_node_put(bp->phy_node);
4168
- if (np && of_phy_is_fixed_link(np))
4169
- of_phy_deregister_fixed_link(np);
41704650 mdiobus_free(bp->mii_bus);
41714651
41724652 err_out_free_netdev:
....@@ -4177,6 +4657,10 @@
41774657 clk_disable_unprepare(hclk);
41784658 clk_disable_unprepare(pclk);
41794659 clk_disable_unprepare(rx_clk);
4660
+ clk_disable_unprepare(tsu_clk);
4661
+ pm_runtime_disable(&pdev->dev);
4662
+ pm_runtime_set_suspended(&pdev->dev);
4663
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
41804664
41814665 return err;
41824666 }
....@@ -4185,27 +4669,27 @@
41854669 {
41864670 struct net_device *dev;
41874671 struct macb *bp;
4188
- struct device_node *np = pdev->dev.of_node;
41894672
41904673 dev = platform_get_drvdata(pdev);
41914674
41924675 if (dev) {
41934676 bp = netdev_priv(dev);
4194
- if (dev->phydev)
4195
- phy_disconnect(dev->phydev);
41964677 mdiobus_unregister(bp->mii_bus);
4197
- if (np && of_phy_is_fixed_link(np))
4198
- of_phy_deregister_fixed_link(np);
4199
- dev->phydev = NULL;
42004678 mdiobus_free(bp->mii_bus);
42014679
42024680 unregister_netdev(dev);
42034681 tasklet_kill(&bp->hresp_err_tasklet);
4204
- clk_disable_unprepare(bp->tx_clk);
4205
- clk_disable_unprepare(bp->hclk);
4206
- clk_disable_unprepare(bp->pclk);
4207
- clk_disable_unprepare(bp->rx_clk);
4208
- of_node_put(bp->phy_node);
4682
+ pm_runtime_disable(&pdev->dev);
4683
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
4684
+ if (!pm_runtime_suspended(&pdev->dev)) {
4685
+ clk_disable_unprepare(bp->tx_clk);
4686
+ clk_disable_unprepare(bp->hclk);
4687
+ clk_disable_unprepare(bp->pclk);
4688
+ clk_disable_unprepare(bp->rx_clk);
4689
+ clk_disable_unprepare(bp->tsu_clk);
4690
+ pm_runtime_set_suspended(&pdev->dev);
4691
+ }
4692
+ phylink_destroy(bp->phylink);
42094693 free_netdev(dev);
42104694 }
42114695
....@@ -4214,50 +4698,204 @@
42144698
42154699 static int __maybe_unused macb_suspend(struct device *dev)
42164700 {
4217
- struct platform_device *pdev = to_platform_device(dev);
4218
- struct net_device *netdev = platform_get_drvdata(pdev);
4701
+ struct net_device *netdev = dev_get_drvdata(dev);
42194702 struct macb *bp = netdev_priv(netdev);
4703
+ struct macb_queue *queue = bp->queues;
4704
+ unsigned long flags;
4705
+ unsigned int q;
4706
+ int err;
42204707
4221
- netif_carrier_off(netdev);
4222
- netif_device_detach(netdev);
4708
+ if (!netif_running(netdev))
4709
+ return 0;
42234710
42244711 if (bp->wol & MACB_WOL_ENABLED) {
4225
- macb_writel(bp, IER, MACB_BIT(WOL));
4226
- macb_writel(bp, WOL, MACB_BIT(MAG));
4712
+ spin_lock_irqsave(&bp->lock, flags);
4713
+ /* Flush all status bits */
4714
+ macb_writel(bp, TSR, -1);
4715
+ macb_writel(bp, RSR, -1);
4716
+ for (q = 0, queue = bp->queues; q < bp->num_queues;
4717
+ ++q, ++queue) {
4718
+ /* Disable all interrupts */
4719
+ queue_writel(queue, IDR, -1);
4720
+ queue_readl(queue, ISR);
4721
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
4722
+ queue_writel(queue, ISR, -1);
4723
+ }
4724
+ /* Change interrupt handler and
4725
+ * Enable WoL IRQ on queue 0
4726
+ */
4727
+ devm_free_irq(dev, bp->queues[0].irq, bp->queues);
4728
+ if (macb_is_gem(bp)) {
4729
+ err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt,
4730
+ IRQF_SHARED, netdev->name, bp->queues);
4731
+ if (err) {
4732
+ dev_err(dev,
4733
+ "Unable to request IRQ %d (error %d)\n",
4734
+ bp->queues[0].irq, err);
4735
+ spin_unlock_irqrestore(&bp->lock, flags);
4736
+ return err;
4737
+ }
4738
+ queue_writel(bp->queues, IER, GEM_BIT(WOL));
4739
+ gem_writel(bp, WOL, MACB_BIT(MAG));
4740
+ } else {
4741
+ err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt,
4742
+ IRQF_SHARED, netdev->name, bp->queues);
4743
+ if (err) {
4744
+ dev_err(dev,
4745
+ "Unable to request IRQ %d (error %d)\n",
4746
+ bp->queues[0].irq, err);
4747
+ spin_unlock_irqrestore(&bp->lock, flags);
4748
+ return err;
4749
+ }
4750
+ queue_writel(bp->queues, IER, MACB_BIT(WOL));
4751
+ macb_writel(bp, WOL, MACB_BIT(MAG));
4752
+ }
4753
+ spin_unlock_irqrestore(&bp->lock, flags);
4754
+
42274755 enable_irq_wake(bp->queues[0].irq);
4228
- } else {
4229
- clk_disable_unprepare(bp->tx_clk);
4230
- clk_disable_unprepare(bp->hclk);
4231
- clk_disable_unprepare(bp->pclk);
4232
- clk_disable_unprepare(bp->rx_clk);
42334756 }
4757
+
4758
+ netif_device_detach(netdev);
4759
+ for (q = 0, queue = bp->queues; q < bp->num_queues;
4760
+ ++q, ++queue)
4761
+ napi_disable(&queue->napi);
4762
+
4763
+ if (!(bp->wol & MACB_WOL_ENABLED)) {
4764
+ rtnl_lock();
4765
+ phylink_stop(bp->phylink);
4766
+ rtnl_unlock();
4767
+ spin_lock_irqsave(&bp->lock, flags);
4768
+ macb_reset_hw(bp);
4769
+ spin_unlock_irqrestore(&bp->lock, flags);
4770
+ }
4771
+
4772
+ if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
4773
+ bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO);
4774
+
4775
+ if (netdev->hw_features & NETIF_F_NTUPLE)
4776
+ bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT);
4777
+
4778
+ if (bp->ptp_info)
4779
+ bp->ptp_info->ptp_remove(netdev);
4780
+ if (!device_may_wakeup(dev))
4781
+ pm_runtime_force_suspend(dev);
42344782
42354783 return 0;
42364784 }
42374785
42384786 static int __maybe_unused macb_resume(struct device *dev)
42394787 {
4240
- struct platform_device *pdev = to_platform_device(dev);
4241
- struct net_device *netdev = platform_get_drvdata(pdev);
4788
+ struct net_device *netdev = dev_get_drvdata(dev);
42424789 struct macb *bp = netdev_priv(netdev);
4790
+ struct macb_queue *queue = bp->queues;
4791
+ unsigned long flags;
4792
+ unsigned int q;
4793
+ int err;
4794
+
4795
+ if (!netif_running(netdev))
4796
+ return 0;
4797
+
4798
+ if (!device_may_wakeup(dev))
4799
+ pm_runtime_force_resume(dev);
42434800
42444801 if (bp->wol & MACB_WOL_ENABLED) {
4245
- macb_writel(bp, IDR, MACB_BIT(WOL));
4246
- macb_writel(bp, WOL, 0);
4802
+ spin_lock_irqsave(&bp->lock, flags);
4803
+ /* Disable WoL */
4804
+ if (macb_is_gem(bp)) {
4805
+ queue_writel(bp->queues, IDR, GEM_BIT(WOL));
4806
+ gem_writel(bp, WOL, 0);
4807
+ } else {
4808
+ queue_writel(bp->queues, IDR, MACB_BIT(WOL));
4809
+ macb_writel(bp, WOL, 0);
4810
+ }
4811
+ /* Clear ISR on queue 0 */
4812
+ queue_readl(bp->queues, ISR);
4813
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
4814
+ queue_writel(bp->queues, ISR, -1);
4815
+ /* Replace interrupt handler on queue 0 */
4816
+ devm_free_irq(dev, bp->queues[0].irq, bp->queues);
4817
+ err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt,
4818
+ IRQF_SHARED, netdev->name, bp->queues);
4819
+ if (err) {
4820
+ dev_err(dev,
4821
+ "Unable to request IRQ %d (error %d)\n",
4822
+ bp->queues[0].irq, err);
4823
+ spin_unlock_irqrestore(&bp->lock, flags);
4824
+ return err;
4825
+ }
4826
+ spin_unlock_irqrestore(&bp->lock, flags);
4827
+
42474828 disable_irq_wake(bp->queues[0].irq);
4248
- } else {
4829
+
4830
+ /* Now make sure we disable phy before moving
4831
+ * to common restore path
4832
+ */
4833
+ rtnl_lock();
4834
+ phylink_stop(bp->phylink);
4835
+ rtnl_unlock();
4836
+ }
4837
+
4838
+ for (q = 0, queue = bp->queues; q < bp->num_queues;
4839
+ ++q, ++queue)
4840
+ napi_enable(&queue->napi);
4841
+
4842
+ if (netdev->hw_features & NETIF_F_NTUPLE)
4843
+ gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2);
4844
+
4845
+ if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
4846
+ macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio);
4847
+
4848
+ macb_writel(bp, NCR, MACB_BIT(MPE));
4849
+ macb_init_hw(bp);
4850
+ macb_set_rx_mode(netdev);
4851
+ macb_restore_features(bp);
4852
+ rtnl_lock();
4853
+ phylink_start(bp->phylink);
4854
+ rtnl_unlock();
4855
+
4856
+ netif_device_attach(netdev);
4857
+ if (bp->ptp_info)
4858
+ bp->ptp_info->ptp_init(netdev);
4859
+
4860
+ return 0;
4861
+}
4862
+
4863
+static int __maybe_unused macb_runtime_suspend(struct device *dev)
4864
+{
4865
+ struct net_device *netdev = dev_get_drvdata(dev);
4866
+ struct macb *bp = netdev_priv(netdev);
4867
+
4868
+ if (!(device_may_wakeup(dev))) {
4869
+ clk_disable_unprepare(bp->tx_clk);
4870
+ clk_disable_unprepare(bp->hclk);
4871
+ clk_disable_unprepare(bp->pclk);
4872
+ clk_disable_unprepare(bp->rx_clk);
4873
+ }
4874
+ clk_disable_unprepare(bp->tsu_clk);
4875
+
4876
+ return 0;
4877
+}
4878
+
4879
+static int __maybe_unused macb_runtime_resume(struct device *dev)
4880
+{
4881
+ struct net_device *netdev = dev_get_drvdata(dev);
4882
+ struct macb *bp = netdev_priv(netdev);
4883
+
4884
+ if (!(device_may_wakeup(dev))) {
42494885 clk_prepare_enable(bp->pclk);
42504886 clk_prepare_enable(bp->hclk);
42514887 clk_prepare_enable(bp->tx_clk);
42524888 clk_prepare_enable(bp->rx_clk);
42534889 }
4254
-
4255
- netif_device_attach(netdev);
4890
+ clk_prepare_enable(bp->tsu_clk);
42564891
42574892 return 0;
42584893 }
42594894
4260
-static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
4895
+static const struct dev_pm_ops macb_pm_ops = {
4896
+ SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume)
4897
+ SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL)
4898
+};
42614899
42624900 static struct platform_driver macb_driver = {
42634901 .probe = macb_probe,