hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/net/ethernet/broadcom/genet/bcmgenet.c
....@@ -1,15 +1,13 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Broadcom GENET (Gigabit Ethernet) controller driver
34 *
4
- * Copyright (c) 2014-2017 Broadcom
5
- *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License version 2 as
8
- * published by the Free Software Foundation.
5
+ * Copyright (c) 2014-2020 Broadcom
96 */
107
118 #define pr_fmt(fmt) "bcmgenet: " fmt
129
10
+#include <linux/acpi.h>
1311 #include <linux/kernel.h>
1412 #include <linux/module.h>
1513 #include <linux/sched.h>
....@@ -25,11 +23,6 @@
2523 #include <linux/dma-mapping.h>
2624 #include <linux/pm.h>
2725 #include <linux/clk.h>
28
-#include <linux/of.h>
29
-#include <linux/of_address.h>
30
-#include <linux/of_irq.h>
31
-#include <linux/of_net.h>
32
-#include <linux/of_platform.h>
3326 #include <net/arp.h>
3427
3528 #include <linux/mii.h>
....@@ -98,12 +91,6 @@
9891 void __iomem *d, u32 value)
9992 {
10093 bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS);
101
-}
102
-
103
-static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
104
- void __iomem *d)
105
-{
106
- return bcmgenet_readl(d + DMA_DESC_LENGTH_STATUS);
10794 }
10895
10996 static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
....@@ -472,6 +459,298 @@
472459 genet_dma_ring_regs[r]);
473460 }
474461
462
+static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
463
+{
464
+ u32 offset;
465
+ u32 reg;
466
+
467
+ offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
468
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
469
+ reg |= (1 << (f_index % 32));
470
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
471
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
472
+ reg |= RBUF_HFB_EN;
473
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
474
+}
475
+
476
+static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index)
477
+{
478
+ u32 offset, reg, reg1;
479
+
480
+ offset = HFB_FLT_ENABLE_V3PLUS;
481
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
482
+ reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
483
+ if (f_index < 32) {
484
+ reg1 &= ~(1 << (f_index % 32));
485
+ bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
486
+ } else {
487
+ reg &= ~(1 << (f_index % 32));
488
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
489
+ }
490
+ if (!reg && !reg1) {
491
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
492
+ reg &= ~RBUF_HFB_EN;
493
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
494
+ }
495
+}
496
+
497
+static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
498
+ u32 f_index, u32 rx_queue)
499
+{
500
+ u32 offset;
501
+ u32 reg;
502
+
503
+ offset = f_index / 8;
504
+ reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
505
+ reg &= ~(0xF << (4 * (f_index % 8)));
506
+ reg |= ((rx_queue & 0xF) << (4 * (f_index % 8)));
507
+ bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset);
508
+}
509
+
510
+static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
511
+ u32 f_index, u32 f_length)
512
+{
513
+ u32 offset;
514
+ u32 reg;
515
+
516
+ offset = HFB_FLT_LEN_V3PLUS +
517
+ ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
518
+ sizeof(u32);
519
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
520
+ reg &= ~(0xFF << (8 * (f_index % 4)));
521
+ reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
522
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
523
+}
524
+
525
+static int bcmgenet_hfb_validate_mask(void *mask, size_t size)
526
+{
527
+ while (size) {
528
+ switch (*(unsigned char *)mask++) {
529
+ case 0x00:
530
+ case 0x0f:
531
+ case 0xf0:
532
+ case 0xff:
533
+ size--;
534
+ continue;
535
+ default:
536
+ return -EINVAL;
537
+ }
538
+ }
539
+
540
+ return 0;
541
+}
542
+
543
+#define VALIDATE_MASK(x) \
544
+ bcmgenet_hfb_validate_mask(&(x), sizeof(x))
545
+
546
+static int bcmgenet_hfb_insert_data(struct bcmgenet_priv *priv, u32 f_index,
547
+ u32 offset, void *val, void *mask,
548
+ size_t size)
549
+{
550
+ u32 index, tmp;
551
+
552
+ index = f_index * priv->hw_params->hfb_filter_size + offset / 2;
553
+ tmp = bcmgenet_hfb_readl(priv, index * sizeof(u32));
554
+
555
+ while (size--) {
556
+ if (offset++ & 1) {
557
+ tmp &= ~0x300FF;
558
+ tmp |= (*(unsigned char *)val++);
559
+ switch ((*(unsigned char *)mask++)) {
560
+ case 0xFF:
561
+ tmp |= 0x30000;
562
+ break;
563
+ case 0xF0:
564
+ tmp |= 0x20000;
565
+ break;
566
+ case 0x0F:
567
+ tmp |= 0x10000;
568
+ break;
569
+ }
570
+ bcmgenet_hfb_writel(priv, tmp, index++ * sizeof(u32));
571
+ if (size)
572
+ tmp = bcmgenet_hfb_readl(priv,
573
+ index * sizeof(u32));
574
+ } else {
575
+ tmp &= ~0xCFF00;
576
+ tmp |= (*(unsigned char *)val++) << 8;
577
+ switch ((*(unsigned char *)mask++)) {
578
+ case 0xFF:
579
+ tmp |= 0xC0000;
580
+ break;
581
+ case 0xF0:
582
+ tmp |= 0x80000;
583
+ break;
584
+ case 0x0F:
585
+ tmp |= 0x40000;
586
+ break;
587
+ }
588
+ if (!size)
589
+ bcmgenet_hfb_writel(priv, tmp, index * sizeof(u32));
590
+ }
591
+ }
592
+
593
+ return 0;
594
+}
595
+
596
+static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
597
+ struct bcmgenet_rxnfc_rule *rule)
598
+{
599
+ struct ethtool_rx_flow_spec *fs = &rule->fs;
600
+ u32 offset = 0, f_length = 0, f;
601
+ u8 val_8, mask_8;
602
+ __be16 val_16;
603
+ u16 mask_16;
604
+ size_t size;
605
+
606
+ f = fs->location;
607
+ if (fs->flow_type & FLOW_MAC_EXT) {
608
+ bcmgenet_hfb_insert_data(priv, f, 0,
609
+ &fs->h_ext.h_dest, &fs->m_ext.h_dest,
610
+ sizeof(fs->h_ext.h_dest));
611
+ }
612
+
613
+ if (fs->flow_type & FLOW_EXT) {
614
+ if (fs->m_ext.vlan_etype ||
615
+ fs->m_ext.vlan_tci) {
616
+ bcmgenet_hfb_insert_data(priv, f, 12,
617
+ &fs->h_ext.vlan_etype,
618
+ &fs->m_ext.vlan_etype,
619
+ sizeof(fs->h_ext.vlan_etype));
620
+ bcmgenet_hfb_insert_data(priv, f, 14,
621
+ &fs->h_ext.vlan_tci,
622
+ &fs->m_ext.vlan_tci,
623
+ sizeof(fs->h_ext.vlan_tci));
624
+ offset += VLAN_HLEN;
625
+ f_length += DIV_ROUND_UP(VLAN_HLEN, 2);
626
+ }
627
+ }
628
+
629
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
630
+ case ETHER_FLOW:
631
+ f_length += DIV_ROUND_UP(ETH_HLEN, 2);
632
+ bcmgenet_hfb_insert_data(priv, f, 0,
633
+ &fs->h_u.ether_spec.h_dest,
634
+ &fs->m_u.ether_spec.h_dest,
635
+ sizeof(fs->h_u.ether_spec.h_dest));
636
+ bcmgenet_hfb_insert_data(priv, f, ETH_ALEN,
637
+ &fs->h_u.ether_spec.h_source,
638
+ &fs->m_u.ether_spec.h_source,
639
+ sizeof(fs->h_u.ether_spec.h_source));
640
+ bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,
641
+ &fs->h_u.ether_spec.h_proto,
642
+ &fs->m_u.ether_spec.h_proto,
643
+ sizeof(fs->h_u.ether_spec.h_proto));
644
+ break;
645
+ case IP_USER_FLOW:
646
+ f_length += DIV_ROUND_UP(ETH_HLEN + 20, 2);
647
+ /* Specify IP Ether Type */
648
+ val_16 = htons(ETH_P_IP);
649
+ mask_16 = 0xFFFF;
650
+ bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,
651
+ &val_16, &mask_16, sizeof(val_16));
652
+ bcmgenet_hfb_insert_data(priv, f, 15 + offset,
653
+ &fs->h_u.usr_ip4_spec.tos,
654
+ &fs->m_u.usr_ip4_spec.tos,
655
+ sizeof(fs->h_u.usr_ip4_spec.tos));
656
+ bcmgenet_hfb_insert_data(priv, f, 23 + offset,
657
+ &fs->h_u.usr_ip4_spec.proto,
658
+ &fs->m_u.usr_ip4_spec.proto,
659
+ sizeof(fs->h_u.usr_ip4_spec.proto));
660
+ bcmgenet_hfb_insert_data(priv, f, 26 + offset,
661
+ &fs->h_u.usr_ip4_spec.ip4src,
662
+ &fs->m_u.usr_ip4_spec.ip4src,
663
+ sizeof(fs->h_u.usr_ip4_spec.ip4src));
664
+ bcmgenet_hfb_insert_data(priv, f, 30 + offset,
665
+ &fs->h_u.usr_ip4_spec.ip4dst,
666
+ &fs->m_u.usr_ip4_spec.ip4dst,
667
+ sizeof(fs->h_u.usr_ip4_spec.ip4dst));
668
+ if (!fs->m_u.usr_ip4_spec.l4_4_bytes)
669
+ break;
670
+
671
+ /* Only supports 20 byte IPv4 header */
672
+ val_8 = 0x45;
673
+ mask_8 = 0xFF;
674
+ bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + offset,
675
+ &val_8, &mask_8,
676
+ sizeof(val_8));
677
+ size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes);
678
+ bcmgenet_hfb_insert_data(priv, f,
679
+ ETH_HLEN + 20 + offset,
680
+ &fs->h_u.usr_ip4_spec.l4_4_bytes,
681
+ &fs->m_u.usr_ip4_spec.l4_4_bytes,
682
+ size);
683
+ f_length += DIV_ROUND_UP(size, 2);
684
+ break;
685
+ }
686
+
687
+ bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length);
688
+ if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) {
689
+ /* Ring 0 flows can be handled by the default Descriptor Ring
690
+ * We'll map them to ring 0, but don't enable the filter
691
+ */
692
+ bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0);
693
+ rule->state = BCMGENET_RXNFC_STATE_DISABLED;
694
+ } else {
695
+ /* Other Rx rings are direct mapped here */
696
+ bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f,
697
+ fs->ring_cookie);
698
+ bcmgenet_hfb_enable_filter(priv, f);
699
+ rule->state = BCMGENET_RXNFC_STATE_ENABLED;
700
+ }
701
+}
702
+
703
+/* bcmgenet_hfb_clear
704
+ *
705
+ * Clear Hardware Filter Block and disable all filtering.
706
+ */
707
+static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index)
708
+{
709
+ u32 base, i;
710
+
711
+ base = f_index * priv->hw_params->hfb_filter_size;
712
+ for (i = 0; i < priv->hw_params->hfb_filter_size; i++)
713
+ bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32));
714
+}
715
+
716
+static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
717
+{
718
+ u32 i;
719
+
720
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
721
+ return;
722
+
723
+ bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
724
+ bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
725
+ bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
726
+
727
+ for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
728
+ bcmgenet_rdma_writel(priv, 0x0, i);
729
+
730
+ for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
731
+ bcmgenet_hfb_reg_writel(priv, 0x0,
732
+ HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
733
+
734
+ for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++)
735
+ bcmgenet_hfb_clear_filter(priv, i);
736
+}
737
+
738
+static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
739
+{
740
+ int i;
741
+
742
+ INIT_LIST_HEAD(&priv->rxnfc_list);
743
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
744
+ return;
745
+
746
+ for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
747
+ INIT_LIST_HEAD(&priv->rxnfc_rules[i].list);
748
+ priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED;
749
+ }
750
+
751
+ bcmgenet_hfb_clear(priv);
752
+}
753
+
475754 static int bcmgenet_begin(struct net_device *dev)
476755 {
477756 struct bcmgenet_priv *priv = netdev_priv(dev);
....@@ -514,76 +793,22 @@
514793 return phy_ethtool_ksettings_set(dev->phydev, cmd);
515794 }
516795
517
-static int bcmgenet_set_rx_csum(struct net_device *dev,
518
- netdev_features_t wanted)
519
-{
520
- struct bcmgenet_priv *priv = netdev_priv(dev);
521
- u32 rbuf_chk_ctrl;
522
- bool rx_csum_en;
523
-
524
- rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
525
-
526
- rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
527
-
528
- /* enable rx checksumming */
529
- if (rx_csum_en)
530
- rbuf_chk_ctrl |= RBUF_RXCHK_EN;
531
- else
532
- rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
533
- priv->desc_rxchk_en = rx_csum_en;
534
-
535
- /* If UniMAC forwards CRC, we need to skip over it to get
536
- * a valid CHK bit to be set in the per-packet status word
537
- */
538
- if (rx_csum_en && priv->crc_fwd_en)
539
- rbuf_chk_ctrl |= RBUF_SKIP_FCS;
540
- else
541
- rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
542
-
543
- bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
544
-
545
- return 0;
546
-}
547
-
548
-static int bcmgenet_set_tx_csum(struct net_device *dev,
549
- netdev_features_t wanted)
550
-{
551
- struct bcmgenet_priv *priv = netdev_priv(dev);
552
- bool desc_64b_en;
553
- u32 tbuf_ctrl, rbuf_ctrl;
554
-
555
- tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
556
- rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
557
-
558
- desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
559
-
560
- /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
561
- if (desc_64b_en) {
562
- tbuf_ctrl |= RBUF_64B_EN;
563
- rbuf_ctrl |= RBUF_64B_EN;
564
- } else {
565
- tbuf_ctrl &= ~RBUF_64B_EN;
566
- rbuf_ctrl &= ~RBUF_64B_EN;
567
- }
568
- priv->desc_64b_en = desc_64b_en;
569
-
570
- bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
571
- bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
572
-
573
- return 0;
574
-}
575
-
576796 static int bcmgenet_set_features(struct net_device *dev,
577797 netdev_features_t features)
578798 {
579
- netdev_features_t changed = features ^ dev->features;
580
- netdev_features_t wanted = dev->wanted_features;
581
- int ret = 0;
799
+ struct bcmgenet_priv *priv = netdev_priv(dev);
800
+ u32 reg;
801
+ int ret;
582802
583
- if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
584
- ret = bcmgenet_set_tx_csum(dev, wanted);
585
- if (changed & (NETIF_F_RXCSUM))
586
- ret = bcmgenet_set_rx_csum(dev, wanted);
803
+ ret = clk_prepare_enable(priv->clk);
804
+ if (ret)
805
+ return ret;
806
+
807
+ /* Make sure we reflect the value of CRC_CMD_FWD */
808
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
809
+ priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
810
+
811
+ clk_disable_unprepare(priv->clk);
587812
588813 return ret;
589814 }
....@@ -646,7 +871,7 @@
646871 static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring,
647872 struct ethtool_coalesce *ec)
648873 {
649
- struct net_dim_cq_moder moder;
874
+ struct dim_cq_moder moder;
650875 u32 usecs, pkts;
651876
652877 ring->rx_coalesce_usecs = ec->rx_coalesce_usecs;
....@@ -687,10 +912,6 @@
687912 * always generate an interrupt either after MBDONE packets have been
688913 * transmitted, or when the ring is empty.
689914 */
690
- if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
691
- ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low ||
692
- ec->use_adaptive_tx_coalesce)
693
- return -EOPNOTSUPP;
694915
695916 /* Program all TX queues with the same values, as there is no
696917 * ethtool knob to do coalescing on a per-queue basis
....@@ -863,6 +1084,9 @@
8631084 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
8641085 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
8651086 STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
1087
+ STAT_GENET_SOFT_MIB("tx_realloc_tsb", mib.tx_realloc_tsb),
1088
+ STAT_GENET_SOFT_MIB("tx_realloc_tsb_failed",
1089
+ mib.tx_realloc_tsb_failed),
8661090 /* Per TX queues */
8671091 STAT_GENET_Q(0),
8681092 STAT_GENET_Q(1),
....@@ -877,7 +1101,6 @@
8771101 struct ethtool_drvinfo *info)
8781102 {
8791103 strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
880
- strlcpy(info->version, "v2.0", sizeof(info->version));
8811104 }
8821105
8831106 static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
....@@ -962,10 +1185,10 @@
9621185 continue;
9631186 case BCMGENET_STAT_RUNT:
9641187 offset += BCMGENET_STAT_OFFSET;
965
- /* fall through */
1188
+ fallthrough;
9661189 case BCMGENET_STAT_MIB_TX:
9671190 offset += BCMGENET_STAT_OFFSET;
968
- /* fall through */
1191
+ fallthrough;
9691192 case BCMGENET_STAT_MIB_RX:
9701193 val = bcmgenet_umac_readl(priv,
9711194 UMAC_MIB_START + j + offset);
....@@ -1112,8 +1335,233 @@
11121335 return phy_ethtool_set_eee(dev->phydev, e);
11131336 }
11141337
1338
+static int bcmgenet_validate_flow(struct net_device *dev,
1339
+ struct ethtool_rxnfc *cmd)
1340
+{
1341
+ struct ethtool_usrip4_spec *l4_mask;
1342
+ struct ethhdr *eth_mask;
1343
+
1344
+ if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) {
1345
+ netdev_err(dev, "rxnfc: Invalid location (%d)\n",
1346
+ cmd->fs.location);
1347
+ return -EINVAL;
1348
+ }
1349
+
1350
+ switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
1351
+ case IP_USER_FLOW:
1352
+ l4_mask = &cmd->fs.m_u.usr_ip4_spec;
1353
+ /* don't allow mask which isn't valid */
1354
+ if (VALIDATE_MASK(l4_mask->ip4src) ||
1355
+ VALIDATE_MASK(l4_mask->ip4dst) ||
1356
+ VALIDATE_MASK(l4_mask->l4_4_bytes) ||
1357
+ VALIDATE_MASK(l4_mask->proto) ||
1358
+ VALIDATE_MASK(l4_mask->ip_ver) ||
1359
+ VALIDATE_MASK(l4_mask->tos)) {
1360
+ netdev_err(dev, "rxnfc: Unsupported mask\n");
1361
+ return -EINVAL;
1362
+ }
1363
+ break;
1364
+ case ETHER_FLOW:
1365
+ eth_mask = &cmd->fs.m_u.ether_spec;
1366
+ /* don't allow mask which isn't valid */
1367
+ if (VALIDATE_MASK(eth_mask->h_dest) ||
1368
+ VALIDATE_MASK(eth_mask->h_source) ||
1369
+ VALIDATE_MASK(eth_mask->h_proto)) {
1370
+ netdev_err(dev, "rxnfc: Unsupported mask\n");
1371
+ return -EINVAL;
1372
+ }
1373
+ break;
1374
+ default:
1375
+ netdev_err(dev, "rxnfc: Unsupported flow type (0x%x)\n",
1376
+ cmd->fs.flow_type);
1377
+ return -EINVAL;
1378
+ }
1379
+
1380
+ if ((cmd->fs.flow_type & FLOW_EXT)) {
1381
+ /* don't allow mask which isn't valid */
1382
+ if (VALIDATE_MASK(cmd->fs.m_ext.vlan_etype) ||
1383
+ VALIDATE_MASK(cmd->fs.m_ext.vlan_tci)) {
1384
+ netdev_err(dev, "rxnfc: Unsupported mask\n");
1385
+ return -EINVAL;
1386
+ }
1387
+ if (cmd->fs.m_ext.data[0] || cmd->fs.m_ext.data[1]) {
1388
+ netdev_err(dev, "rxnfc: user-def not supported\n");
1389
+ return -EINVAL;
1390
+ }
1391
+ }
1392
+
1393
+ if ((cmd->fs.flow_type & FLOW_MAC_EXT)) {
1394
+ /* don't allow mask which isn't valid */
1395
+ if (VALIDATE_MASK(cmd->fs.m_ext.h_dest)) {
1396
+ netdev_err(dev, "rxnfc: Unsupported mask\n");
1397
+ return -EINVAL;
1398
+ }
1399
+ }
1400
+
1401
+ return 0;
1402
+}
1403
+
1404
+static int bcmgenet_insert_flow(struct net_device *dev,
1405
+ struct ethtool_rxnfc *cmd)
1406
+{
1407
+ struct bcmgenet_priv *priv = netdev_priv(dev);
1408
+ struct bcmgenet_rxnfc_rule *loc_rule;
1409
+ int err;
1410
+
1411
+ if (priv->hw_params->hfb_filter_size < 128) {
1412
+ netdev_err(dev, "rxnfc: Not supported by this device\n");
1413
+ return -EINVAL;
1414
+ }
1415
+
1416
+ if (cmd->fs.ring_cookie > priv->hw_params->rx_queues &&
1417
+ cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) {
1418
+ netdev_err(dev, "rxnfc: Unsupported action (%llu)\n",
1419
+ cmd->fs.ring_cookie);
1420
+ return -EINVAL;
1421
+ }
1422
+
1423
+ err = bcmgenet_validate_flow(dev, cmd);
1424
+ if (err)
1425
+ return err;
1426
+
1427
+ loc_rule = &priv->rxnfc_rules[cmd->fs.location];
1428
+ if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)
1429
+ bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
1430
+ if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
1431
+ list_del(&loc_rule->list);
1432
+ bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
1433
+ }
1434
+ loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED;
1435
+ memcpy(&loc_rule->fs, &cmd->fs,
1436
+ sizeof(struct ethtool_rx_flow_spec));
1437
+
1438
+ bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule);
1439
+
1440
+ list_add_tail(&loc_rule->list, &priv->rxnfc_list);
1441
+
1442
+ return 0;
1443
+}
1444
+
1445
+static int bcmgenet_delete_flow(struct net_device *dev,
1446
+ struct ethtool_rxnfc *cmd)
1447
+{
1448
+ struct bcmgenet_priv *priv = netdev_priv(dev);
1449
+ struct bcmgenet_rxnfc_rule *rule;
1450
+ int err = 0;
1451
+
1452
+ if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
1453
+ return -EINVAL;
1454
+
1455
+ rule = &priv->rxnfc_rules[cmd->fs.location];
1456
+ if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) {
1457
+ err = -ENOENT;
1458
+ goto out;
1459
+ }
1460
+
1461
+ if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
1462
+ bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
1463
+ if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
1464
+ list_del(&rule->list);
1465
+ bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
1466
+ }
1467
+ rule->state = BCMGENET_RXNFC_STATE_UNUSED;
1468
+ memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec));
1469
+
1470
+out:
1471
+ return err;
1472
+}
1473
+
1474
+static int bcmgenet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1475
+{
1476
+ struct bcmgenet_priv *priv = netdev_priv(dev);
1477
+ int err = 0;
1478
+
1479
+ switch (cmd->cmd) {
1480
+ case ETHTOOL_SRXCLSRLINS:
1481
+ err = bcmgenet_insert_flow(dev, cmd);
1482
+ break;
1483
+ case ETHTOOL_SRXCLSRLDEL:
1484
+ err = bcmgenet_delete_flow(dev, cmd);
1485
+ break;
1486
+ default:
1487
+ netdev_warn(priv->dev, "Unsupported ethtool command. (%d)\n",
1488
+ cmd->cmd);
1489
+ return -EINVAL;
1490
+ }
1491
+
1492
+ return err;
1493
+}
1494
+
1495
+static int bcmgenet_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
1496
+ int loc)
1497
+{
1498
+ struct bcmgenet_priv *priv = netdev_priv(dev);
1499
+ struct bcmgenet_rxnfc_rule *rule;
1500
+ int err = 0;
1501
+
1502
+ if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
1503
+ return -EINVAL;
1504
+
1505
+ rule = &priv->rxnfc_rules[loc];
1506
+ if (rule->state == BCMGENET_RXNFC_STATE_UNUSED)
1507
+ err = -ENOENT;
1508
+ else
1509
+ memcpy(&cmd->fs, &rule->fs,
1510
+ sizeof(struct ethtool_rx_flow_spec));
1511
+
1512
+ return err;
1513
+}
1514
+
1515
+static int bcmgenet_get_num_flows(struct bcmgenet_priv *priv)
1516
+{
1517
+ struct list_head *pos;
1518
+ int res = 0;
1519
+
1520
+ list_for_each(pos, &priv->rxnfc_list)
1521
+ res++;
1522
+
1523
+ return res;
1524
+}
1525
+
1526
+static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1527
+ u32 *rule_locs)
1528
+{
1529
+ struct bcmgenet_priv *priv = netdev_priv(dev);
1530
+ struct bcmgenet_rxnfc_rule *rule;
1531
+ int err = 0;
1532
+ int i = 0;
1533
+
1534
+ switch (cmd->cmd) {
1535
+ case ETHTOOL_GRXRINGS:
1536
+ cmd->data = priv->hw_params->rx_queues ?: 1;
1537
+ break;
1538
+ case ETHTOOL_GRXCLSRLCNT:
1539
+ cmd->rule_cnt = bcmgenet_get_num_flows(priv);
1540
+ cmd->data = MAX_NUM_OF_FS_RULES;
1541
+ break;
1542
+ case ETHTOOL_GRXCLSRULE:
1543
+ err = bcmgenet_get_flow(dev, cmd, cmd->fs.location);
1544
+ break;
1545
+ case ETHTOOL_GRXCLSRLALL:
1546
+ list_for_each_entry(rule, &priv->rxnfc_list, list)
1547
+ if (i < cmd->rule_cnt)
1548
+ rule_locs[i++] = rule->fs.location;
1549
+ cmd->rule_cnt = i;
1550
+ cmd->data = MAX_NUM_OF_FS_RULES;
1551
+ break;
1552
+ default:
1553
+ err = -EOPNOTSUPP;
1554
+ break;
1555
+ }
1556
+
1557
+ return err;
1558
+}
1559
+
11151560 /* standard ethtool support functions. */
11161561 static const struct ethtool_ops bcmgenet_ethtool_ops = {
1562
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
1563
+ ETHTOOL_COALESCE_MAX_FRAMES |
1564
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
11171565 .begin = bcmgenet_begin,
11181566 .complete = bcmgenet_complete,
11191567 .get_strings = bcmgenet_get_strings,
....@@ -1132,6 +1580,9 @@
11321580 .set_coalesce = bcmgenet_set_coalesce,
11331581 .get_link_ksettings = bcmgenet_get_link_ksettings,
11341582 .set_link_ksettings = bcmgenet_set_link_ksettings,
1583
+ .get_ts_info = ethtool_op_get_ts_info,
1584
+ .get_rxnfc = bcmgenet_get_rxnfc,
1585
+ .set_rxnfc = bcmgenet_set_rxnfc,
11351586 };
11361587
11371588 /* Power down the unimac, based on mode. */
....@@ -1224,18 +1675,6 @@
12241675 default:
12251676 break;
12261677 }
1227
-}
1228
-
1229
-/* ioctl handle special commands that are not present in ethtool. */
1230
-static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1231
-{
1232
- if (!netif_running(dev))
1233
- return -EINVAL;
1234
-
1235
- if (!dev->phydev)
1236
- return -ENODEV;
1237
-
1238
- return phy_mii_ioctl(dev->phydev, rq, cmd);
12391678 }
12401679
12411680 static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
....@@ -1488,9 +1927,10 @@
14881927 /* Reallocate the SKB to put enough headroom in front of it and insert
14891928 * the transmit checksum offsets in the descriptors
14901929 */
1491
-static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
1492
- struct sk_buff *skb)
1930
+static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
1931
+ struct sk_buff *skb)
14931932 {
1933
+ struct bcmgenet_priv *priv = netdev_priv(dev);
14941934 struct status_64 *status = NULL;
14951935 struct sk_buff *new_skb;
14961936 u16 offset;
....@@ -1503,12 +1943,15 @@
15031943 * enough headroom for us to insert 64B status block.
15041944 */
15051945 new_skb = skb_realloc_headroom(skb, sizeof(*status));
1506
- dev_kfree_skb(skb);
15071946 if (!new_skb) {
1947
+ dev_kfree_skb_any(skb);
1948
+ priv->mib.tx_realloc_tsb_failed++;
15081949 dev->stats.tx_dropped++;
15091950 return NULL;
15101951 }
1952
+ dev_consume_skb_any(skb);
15111953 skb = new_skb;
1954
+ priv->mib.tx_realloc_tsb++;
15121955 }
15131956
15141957 skb_push(skb, sizeof(*status));
....@@ -1524,29 +1967,29 @@
15241967 ip_proto = ipv6_hdr(skb)->nexthdr;
15251968 break;
15261969 default:
1527
- return skb;
1970
+ /* don't use UDP flag */
1971
+ ip_proto = 0;
1972
+ break;
15281973 }
15291974
15301975 offset = skb_checksum_start_offset(skb) - sizeof(*status);
15311976 tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
1532
- (offset + skb->csum_offset);
1977
+ (offset + skb->csum_offset) |
1978
+ STATUS_TX_CSUM_LV;
15331979
1534
- /* Set the length valid bit for TCP and UDP and just set
1535
- * the special UDP flag for IPv4, else just set to 0.
1536
- */
1537
- if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1538
- tx_csum_info |= STATUS_TX_CSUM_LV;
1539
- if (ip_proto == IPPROTO_UDP &&
1540
- ip_ver == htons(ETH_P_IP))
1541
- tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
1542
- } else {
1543
- tx_csum_info = 0;
1544
- }
1980
+ /* Set the special UDP flag for UDP */
1981
+ if (ip_proto == IPPROTO_UDP)
1982
+ tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
15451983
15461984 status->tx_csum_info = tx_csum_info;
15471985 }
15481986
15491987 return skb;
1988
+}
1989
+
1990
+static void bcmgenet_hide_tsb(struct sk_buff *skb)
1991
+{
1992
+ __skb_pull(skb, sizeof(struct status_64));
15501993 }
15511994
15521995 static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
....@@ -1599,13 +2042,11 @@
15992042 */
16002043 GENET_CB(skb)->bytes_sent = skb->len;
16012044
1602
- /* set the SKB transmit checksum */
1603
- if (priv->desc_64b_en) {
1604
- skb = bcmgenet_put_tx_csum(dev, skb);
1605
- if (!skb) {
1606
- ret = NETDEV_TX_OK;
1607
- goto out;
1608
- }
2045
+ /* add the Transmit Status Block */
2046
+ skb = bcmgenet_add_tsb(dev, skb);
2047
+ if (!skb) {
2048
+ ret = NETDEV_TX_OK;
2049
+ goto out;
16092050 }
16102051
16112052 for (i = 0; i <= nr_frags; i++) {
....@@ -1657,6 +2098,8 @@
16572098 }
16582099
16592100 GENET_CB(skb)->last_cb = tx_cb_ptr;
2101
+
2102
+ bcmgenet_hide_tsb(skb);
16602103 skb_tx_timestamp(skb);
16612104
16622105 /* Decrement total BD count and advance our write pointer */
....@@ -1669,7 +2112,7 @@
16692112 if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
16702113 netif_tx_stop_queue(txq);
16712114
1672
- if (!skb->xmit_more || netif_xmit_stopped(txq))
2115
+ if (!netdev_xmit_more() || netif_xmit_stopped(txq))
16732116 /* Packets are ready, update producer index */
16742117 bcmgenet_tdma_ring_writel(priv, ring->index,
16752118 ring->prod_index, TDMA_PROD_INDEX);
....@@ -1751,7 +2194,6 @@
17512194 unsigned int bytes_processed = 0;
17522195 unsigned int p_index, mask;
17532196 unsigned int discards;
1754
- unsigned int chksum_ok = 0;
17552197
17562198 /* Clear status before servicing to reduce spurious interrupts */
17572199 if (ring->index == DESC_INDEX) {
....@@ -1789,6 +2231,9 @@
17892231
17902232 while ((rxpktprocessed < rxpkttoprocess) &&
17912233 (rxpktprocessed < budget)) {
2234
+ struct status_64 *status;
2235
+ __be16 rx_csum;
2236
+
17922237 cb = &priv->rx_cbs[ring->read_ptr];
17932238 skb = bcmgenet_rx_refill(priv, cb);
17942239
....@@ -1797,14 +2242,14 @@
17972242 goto next;
17982243 }
17992244
1800
- if (!priv->desc_64b_en) {
1801
- dma_length_status =
1802
- dmadesc_get_length_status(priv, cb->bd_addr);
1803
- } else {
1804
- struct status_64 *status;
1805
-
1806
- status = (struct status_64 *)skb->data;
1807
- dma_length_status = status->length_status;
2245
+ status = (struct status_64 *)skb->data;
2246
+ dma_length_status = status->length_status;
2247
+ if (dev->features & NETIF_F_RXCSUM) {
2248
+ rx_csum = (__force __be16)(status->rx_csum & 0xffff);
2249
+ if (rx_csum) {
2250
+ skb->csum = (__force __wsum)ntohs(rx_csum);
2251
+ skb->ip_summed = CHECKSUM_COMPLETE;
2252
+ }
18082253 }
18092254
18102255 /* DMA flags and length are still valid no matter how
....@@ -1817,6 +2262,14 @@
18172262 "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
18182263 __func__, p_index, ring->c_index,
18192264 ring->read_ptr, dma_length_status);
2265
+
2266
+ if (unlikely(len > RX_BUF_LENGTH)) {
2267
+ netif_err(priv, rx_status, dev, "oversized packet\n");
2268
+ dev->stats.rx_length_errors++;
2269
+ dev->stats.rx_errors++;
2270
+ dev_kfree_skb_any(skb);
2271
+ goto next;
2272
+ }
18202273
18212274 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
18222275 netif_err(priv, rx_status, dev,
....@@ -1847,21 +2300,11 @@
18472300 goto next;
18482301 } /* error packet */
18492302
1850
- chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
1851
- priv->desc_rxchk_en;
1852
-
18532303 skb_put(skb, len);
1854
- if (priv->desc_64b_en) {
1855
- skb_pull(skb, 64);
1856
- len -= 64;
1857
- }
18582304
1859
- if (likely(chksum_ok))
1860
- skb->ip_summed = CHECKSUM_UNNECESSARY;
1861
-
1862
- /* remove hardware 2bytes added for IP alignment */
1863
- skb_pull(skb, 2);
1864
- len -= 2;
2305
+ /* remove RSB and hardware 2bytes added for IP alignment */
2306
+ skb_pull(skb, 66);
2307
+ len -= 66;
18652308
18662309 if (priv->crc_fwd_en) {
18672310 skb_trim(skb, len - ETH_FCS_LEN);
....@@ -1903,7 +2346,7 @@
19032346 {
19042347 struct bcmgenet_rx_ring *ring = container_of(napi,
19052348 struct bcmgenet_rx_ring, napi);
1906
- struct net_dim_sample dim_sample;
2349
+ struct dim_sample dim_sample = {};
19072350 unsigned int work_done;
19082351
19092352 work_done = bcmgenet_desc_rx(ring, budget);
....@@ -1914,8 +2357,8 @@
19142357 }
19152358
19162359 if (ring->dim.use_dim) {
1917
- net_dim_sample(ring->dim.event_ctr, ring->dim.packets,
1918
- ring->dim.bytes, &dim_sample);
2360
+ dim_update_sample(ring->dim.event_ctr, ring->dim.packets,
2361
+ ring->dim.bytes, &dim_sample);
19192362 net_dim(&ring->dim.dim, dim_sample);
19202363 }
19212364
....@@ -1924,16 +2367,16 @@
19242367
19252368 static void bcmgenet_dim_work(struct work_struct *work)
19262369 {
1927
- struct net_dim *dim = container_of(work, struct net_dim, work);
2370
+ struct dim *dim = container_of(work, struct dim, work);
19282371 struct bcmgenet_net_dim *ndim =
19292372 container_of(dim, struct bcmgenet_net_dim, dim);
19302373 struct bcmgenet_rx_ring *ring =
19312374 container_of(ndim, struct bcmgenet_rx_ring, dim);
1932
- struct net_dim_cq_moder cur_profile =
2375
+ struct dim_cq_moder cur_profile =
19332376 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
19342377
19352378 bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts);
1936
- dim->state = NET_DIM_START_MEASURE;
2379
+ dim->state = DIM_START_MEASURE;
19372380 }
19382381
19392382 /* Assign skb to RX DMA descriptor. */
....@@ -1979,6 +2422,8 @@
19792422 u32 reg;
19802423
19812424 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2425
+ if (reg & CMD_SW_RESET)
2426
+ return;
19822427 if (enable)
19832428 reg |= mask;
19842429 else
....@@ -1998,11 +2443,9 @@
19982443 bcmgenet_rbuf_ctrl_set(priv, 0);
19992444 udelay(10);
20002445
2001
- /* disable MAC while updating its registers */
2002
- bcmgenet_umac_writel(priv, 0, UMAC_CMD);
2003
-
2004
- /* issue soft reset with (rg)mii loopback to ensure a stable rxclk */
2005
- bcmgenet_umac_writel(priv, CMD_SW_RESET | CMD_LCL_LOOP_EN, UMAC_CMD);
2446
+ /* issue soft reset and disable MAC while updating its registers */
2447
+ bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
2448
+ udelay(2);
20062449 }
20072450
20082451 static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
....@@ -2052,10 +2495,27 @@
20522495
20532496 bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
20542497
2055
- /* init rx registers, enable ip header optimization */
2498
+ /* init tx registers, enable TSB */
2499
+ reg = bcmgenet_tbuf_ctrl_get(priv);
2500
+ reg |= TBUF_64B_EN;
2501
+ bcmgenet_tbuf_ctrl_set(priv, reg);
2502
+
2503
+ /* init rx registers, enable ip header optimization and RSB */
20562504 reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
2057
- reg |= RBUF_ALIGN_2B;
2505
+ reg |= RBUF_ALIGN_2B | RBUF_64B_EN;
20582506 bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
2507
+
2508
+ /* enable rx checksumming */
2509
+ reg = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
2510
+ reg |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS;
2511
+ /* If UniMAC forwards CRC, we need to skip over it to get
2512
+ * a valid CHK bit to be set in the per-packet status word
2513
+ */
2514
+ if (priv->crc_fwd_en)
2515
+ reg |= RBUF_SKIP_FCS;
2516
+ else
2517
+ reg &= ~RBUF_SKIP_FCS;
2518
+ bcmgenet_rbuf_writel(priv, reg, RBUF_CHK_CTRL);
20592519
20602520 if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
20612521 bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
....@@ -2090,7 +2550,7 @@
20902550 struct bcmgenet_net_dim *dim = &ring->dim;
20912551
20922552 INIT_WORK(&dim->dim.work, cb);
2093
- dim->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
2553
+ dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
20942554 dim->event_ctr = 0;
20952555 dim->packets = 0;
20962556 dim->bytes = 0;
....@@ -2099,7 +2559,7 @@
20992559 static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring)
21002560 {
21012561 struct bcmgenet_net_dim *dim = &ring->dim;
2102
- struct net_dim_cq_moder moder;
2562
+ struct dim_cq_moder moder;
21032563 u32 usecs, pkts;
21042564
21052565 usecs = ring->rx_coalesce_usecs;
....@@ -2523,19 +2983,14 @@
25232983 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
25242984 {
25252985 struct netdev_queue *txq;
2526
- struct sk_buff *skb;
2527
- struct enet_cb *cb;
25282986 int i;
25292987
25302988 bcmgenet_fini_rx_napi(priv);
25312989 bcmgenet_fini_tx_napi(priv);
25322990
2533
- for (i = 0; i < priv->num_tx_bds; i++) {
2534
- cb = priv->tx_cbs + i;
2535
- skb = bcmgenet_free_tx_cb(&priv->pdev->dev, cb);
2536
- if (skb)
2537
- dev_kfree_skb(skb);
2538
- }
2991
+ for (i = 0; i < priv->num_tx_bds; i++)
2992
+ dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev,
2993
+ priv->tx_cbs + i));
25392994
25402995 for (i = 0; i < priv->hw_params->tx_queues; i++) {
25412996 txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
....@@ -2588,7 +3043,8 @@
25883043 }
25893044
25903045 /* Init rDma */
2591
- bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
3046
+ bcmgenet_rdma_writel(priv, priv->dma_max_burst_length,
3047
+ DMA_SCB_BURST_SIZE);
25923048
25933049 /* Initialize Rx queues */
25943050 ret = bcmgenet_init_rx_queues(priv->dev);
....@@ -2601,7 +3057,8 @@
26013057 }
26023058
26033059 /* Init tDma */
2604
- bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
3060
+ bcmgenet_tdma_writel(priv, priv->dma_max_burst_length,
3061
+ DMA_SCB_BURST_SIZE);
26053062
26063063 /* Initialize Tx queues */
26073064 bcmgenet_init_tx_queues(priv->dev);
....@@ -2742,10 +3199,7 @@
27423199
27433200 static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
27443201 {
2745
- struct bcmgenet_priv *priv = dev_id;
2746
-
2747
- pm_wakeup_event(&priv->pdev->dev, 0);
2748
-
3202
+ /* Acknowledge the interrupt */
27493203 return IRQ_HANDLED;
27503204 }
27513205
....@@ -2783,9 +3237,19 @@
27833237 static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
27843238 unsigned char *addr)
27853239 {
2786
- bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
2787
- (addr[2] << 8) | addr[3], UMAC_MAC0);
2788
- bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
3240
+ bcmgenet_umac_writel(priv, get_unaligned_be32(&addr[0]), UMAC_MAC0);
3241
+ bcmgenet_umac_writel(priv, get_unaligned_be16(&addr[4]), UMAC_MAC1);
3242
+}
3243
+
3244
+static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
3245
+ unsigned char *addr)
3246
+{
3247
+ u32 addr_tmp;
3248
+
3249
+ addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC0);
3250
+ put_unaligned_be32(addr_tmp, &addr[0]);
3251
+ addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC1);
3252
+ put_unaligned_be16(addr_tmp, &addr[4]);
27893253 }
27903254
27913255 /* Returns a reusable dma control register value */
....@@ -2830,38 +3294,6 @@
28303294 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
28313295 }
28323296
2833
-/* bcmgenet_hfb_clear
2834
- *
2835
- * Clear Hardware Filter Block and disable all filtering.
2836
- */
2837
-static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
2838
-{
2839
- u32 i;
2840
-
2841
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
2842
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
2843
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
2844
-
2845
- for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
2846
- bcmgenet_rdma_writel(priv, 0x0, i);
2847
-
2848
- for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
2849
- bcmgenet_hfb_reg_writel(priv, 0x0,
2850
- HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
2851
-
2852
- for (i = 0; i < priv->hw_params->hfb_filter_cnt *
2853
- priv->hw_params->hfb_filter_size; i++)
2854
- bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
2855
-}
2856
-
2857
-static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
2858
-{
2859
- if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
2860
- return;
2861
-
2862
- bcmgenet_hfb_clear(priv);
2863
-}
2864
-
28653297 static void bcmgenet_netif_start(struct net_device *dev)
28663298 {
28673299 struct bcmgenet_priv *priv = netdev_priv(dev);
....@@ -2884,7 +3316,6 @@
28843316 {
28853317 struct bcmgenet_priv *priv = netdev_priv(dev);
28863318 unsigned long dma_ctrl;
2887
- u32 reg;
28883319 int ret;
28893320
28903321 netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
....@@ -2903,9 +3334,10 @@
29033334
29043335 init_umac(priv);
29053336
2906
- /* Make sure we reflect the value of CRC_CMD_FWD */
2907
- reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2908
- priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
3337
+ /* Apply features again in case we changed them while interface was
3338
+ * down
3339
+ */
3340
+ bcmgenet_set_features(dev, dev->features);
29093341
29103342 bcmgenet_set_hw_addr(priv, dev->dev_addr);
29113343
....@@ -2965,7 +3397,7 @@
29653397 return ret;
29663398 }
29673399
2968
-static void bcmgenet_netif_stop(struct net_device *dev)
3400
+static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy)
29693401 {
29703402 struct bcmgenet_priv *priv = netdev_priv(dev);
29713403
....@@ -2980,7 +3412,8 @@
29803412 /* Disable MAC transmit. TX DMA disabled must be done before this */
29813413 umac_enable_set(priv, CMD_TX_EN, false);
29823414
2983
- phy_stop(dev->phydev);
3415
+ if (stop_phy)
3416
+ phy_stop(dev->phydev);
29843417 bcmgenet_disable_rx_napi(priv);
29853418 bcmgenet_intr_disable(priv);
29863419
....@@ -3006,7 +3439,7 @@
30063439
30073440 netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
30083441
3009
- bcmgenet_netif_stop(dev);
3442
+ bcmgenet_netif_stop(dev, false);
30103443
30113444 /* Really kill the PHY state machine and disconnect from it */
30123445 phy_disconnect(dev->phydev);
....@@ -3066,7 +3499,7 @@
30663499 ring->cb_ptr, ring->end_ptr);
30673500 }
30683501
3069
-static void bcmgenet_timeout(struct net_device *dev)
3502
+static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
30703503 {
30713504 struct bcmgenet_priv *priv = netdev_priv(dev);
30723505 u32 int0_enable = 0;
....@@ -3221,6 +3654,22 @@
32213654 return &dev->stats;
32223655 }
32233656
3657
+static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier)
3658
+{
3659
+ struct bcmgenet_priv *priv = netdev_priv(dev);
3660
+
3661
+ if (!dev->phydev || !phy_is_pseudo_fixed_link(dev->phydev) ||
3662
+ priv->phy_interface != PHY_INTERFACE_MODE_MOCA)
3663
+ return -EOPNOTSUPP;
3664
+
3665
+ if (new_carrier)
3666
+ netif_carrier_on(dev);
3667
+ else
3668
+ netif_carrier_off(dev);
3669
+
3670
+ return 0;
3671
+}
3672
+
32243673 static const struct net_device_ops bcmgenet_netdev_ops = {
32253674 .ndo_open = bcmgenet_open,
32263675 .ndo_stop = bcmgenet_close,
....@@ -3228,12 +3677,13 @@
32283677 .ndo_tx_timeout = bcmgenet_timeout,
32293678 .ndo_set_rx_mode = bcmgenet_set_rx_mode,
32303679 .ndo_set_mac_address = bcmgenet_set_mac_addr,
3231
- .ndo_do_ioctl = bcmgenet_ioctl,
3680
+ .ndo_do_ioctl = phy_do_ioctl_running,
32323681 .ndo_set_features = bcmgenet_set_features,
32333682 #ifdef CONFIG_NET_POLL_CONTROLLER
32343683 .ndo_poll_controller = bcmgenet_poll_controller,
32353684 #endif
32363685 .ndo_get_stats = bcmgenet_get_stats,
3686
+ .ndo_change_carrier = bcmgenet_change_carrier,
32373687 };
32383688
32393689 /* Array of GENET hardware parameters/characteristics */
....@@ -3339,19 +3789,15 @@
33393789 if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
33403790 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
33413791 genet_dma_ring_regs = genet_dma_ring_regs_v4;
3342
- priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
33433792 } else if (GENET_IS_V3(priv)) {
33443793 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
33453794 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3346
- priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
33473795 } else if (GENET_IS_V2(priv)) {
33483796 bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
33493797 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3350
- priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
33513798 } else if (GENET_IS_V1(priv)) {
33523799 bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
33533800 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3354
- priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
33553801 }
33563802
33573803 /* enum genet_version starts at 1 */
....@@ -3434,12 +3880,48 @@
34343880 params->words_per_bd);
34353881 }
34363882
3883
+struct bcmgenet_plat_data {
3884
+ enum bcmgenet_version version;
3885
+ u32 dma_max_burst_length;
3886
+};
3887
+
3888
+static const struct bcmgenet_plat_data v1_plat_data = {
3889
+ .version = GENET_V1,
3890
+ .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3891
+};
3892
+
3893
+static const struct bcmgenet_plat_data v2_plat_data = {
3894
+ .version = GENET_V2,
3895
+ .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3896
+};
3897
+
3898
+static const struct bcmgenet_plat_data v3_plat_data = {
3899
+ .version = GENET_V3,
3900
+ .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3901
+};
3902
+
3903
+static const struct bcmgenet_plat_data v4_plat_data = {
3904
+ .version = GENET_V4,
3905
+ .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3906
+};
3907
+
3908
+static const struct bcmgenet_plat_data v5_plat_data = {
3909
+ .version = GENET_V5,
3910
+ .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3911
+};
3912
+
3913
+static const struct bcmgenet_plat_data bcm2711_plat_data = {
3914
+ .version = GENET_V5,
3915
+ .dma_max_burst_length = 0x08,
3916
+};
3917
+
34373918 static const struct of_device_id bcmgenet_match[] = {
3438
- { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
3439
- { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
3440
- { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
3441
- { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
3442
- { .compatible = "brcm,genet-v5", .data = (void *)GENET_V5 },
3919
+ { .compatible = "brcm,genet-v1", .data = &v1_plat_data },
3920
+ { .compatible = "brcm,genet-v2", .data = &v2_plat_data },
3921
+ { .compatible = "brcm,genet-v3", .data = &v3_plat_data },
3922
+ { .compatible = "brcm,genet-v4", .data = &v4_plat_data },
3923
+ { .compatible = "brcm,genet-v5", .data = &v5_plat_data },
3924
+ { .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data },
34433925 { },
34443926 };
34453927 MODULE_DEVICE_TABLE(of, bcmgenet_match);
....@@ -3447,15 +3929,11 @@
34473929 static int bcmgenet_probe(struct platform_device *pdev)
34483930 {
34493931 struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
3450
- struct device_node *dn = pdev->dev.of_node;
3451
- const struct of_device_id *of_id = NULL;
3932
+ const struct bcmgenet_plat_data *pdata;
34523933 struct bcmgenet_priv *priv;
34533934 struct net_device *dev;
3454
- const void *macaddr;
3455
- struct resource *r;
34563935 unsigned int i;
34573936 int err = -EIO;
3458
- const char *phy_mode_str;
34593937
34603938 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
34613939 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
....@@ -3465,35 +3943,24 @@
34653943 return -ENOMEM;
34663944 }
34673945
3468
- if (dn) {
3469
- of_id = of_match_node(bcmgenet_match, dn);
3470
- if (!of_id)
3471
- return -EINVAL;
3472
- }
3473
-
34743946 priv = netdev_priv(dev);
34753947 priv->irq0 = platform_get_irq(pdev, 0);
3948
+ if (priv->irq0 < 0) {
3949
+ err = priv->irq0;
3950
+ goto err;
3951
+ }
34763952 priv->irq1 = platform_get_irq(pdev, 1);
3477
- priv->wol_irq = platform_get_irq(pdev, 2);
3478
- if (!priv->irq0 || !priv->irq1) {
3479
- dev_err(&pdev->dev, "can't find IRQs\n");
3480
- err = -EINVAL;
3953
+ if (priv->irq1 < 0) {
3954
+ err = priv->irq1;
3955
+ goto err;
3956
+ }
3957
+ priv->wol_irq = platform_get_irq_optional(pdev, 2);
3958
+ if (priv->wol_irq == -EPROBE_DEFER) {
3959
+ err = priv->wol_irq;
34813960 goto err;
34823961 }
34833962
3484
- if (dn) {
3485
- macaddr = of_get_mac_address(dn);
3486
- if (!macaddr) {
3487
- dev_err(&pdev->dev, "can't find MAC address\n");
3488
- err = -EINVAL;
3489
- goto err;
3490
- }
3491
- } else {
3492
- macaddr = pd->mac_address;
3493
- }
3494
-
3495
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3496
- priv->base = devm_ioremap_resource(&pdev->dev, r);
3963
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
34973964 if (IS_ERR(priv->base)) {
34983965 err = PTR_ERR(priv->base);
34993966 goto err;
....@@ -3503,16 +3970,17 @@
35033970
35043971 SET_NETDEV_DEV(dev, &pdev->dev);
35053972 dev_set_drvdata(&pdev->dev, dev);
3506
- ether_addr_copy(dev->dev_addr, macaddr);
35073973 dev->watchdog_timeo = 2 * HZ;
35083974 dev->ethtool_ops = &bcmgenet_ethtool_ops;
35093975 dev->netdev_ops = &bcmgenet_netdev_ops;
35103976
35113977 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
35123978
3513
- /* Set hardware features */
3514
- dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
3515
- NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
3979
+ /* Set default features */
3980
+ dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
3981
+ NETIF_F_RXCSUM;
3982
+ dev->hw_features |= dev->features;
3983
+ dev->vlan_features |= dev->features;
35163984
35173985 /* Request the WOL interrupt and advertise suspend if available */
35183986 priv->wol_irq_disabled = true;
....@@ -3532,20 +4000,36 @@
35324000
35334001 priv->dev = dev;
35344002 priv->pdev = pdev;
3535
- if (of_id)
3536
- priv->version = (enum bcmgenet_version)of_id->data;
3537
- else
3538
- priv->version = pd->genet_version;
35394003
3540
- priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
3541
- if (IS_ERR(priv->clk)) {
3542
- dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
3543
- priv->clk = NULL;
4004
+ pdata = device_get_match_data(&pdev->dev);
4005
+ if (pdata) {
4006
+ priv->version = pdata->version;
4007
+ priv->dma_max_burst_length = pdata->dma_max_burst_length;
4008
+ } else {
4009
+ priv->version = pd->genet_version;
4010
+ priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH;
35444011 }
35454012
3546
- clk_prepare_enable(priv->clk);
4013
+ priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet");
4014
+ if (IS_ERR(priv->clk)) {
4015
+ dev_dbg(&priv->pdev->dev, "failed to get enet clock\n");
4016
+ err = PTR_ERR(priv->clk);
4017
+ goto err;
4018
+ }
4019
+
4020
+ err = clk_prepare_enable(priv->clk);
4021
+ if (err)
4022
+ goto err;
35474023
35484024 bcmgenet_set_hw_params(priv);
4025
+
4026
+ err = -EIO;
4027
+ if (priv->hw_params->flags & GENET_HAS_40BITS)
4028
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
4029
+ if (err)
4030
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4031
+ if (err)
4032
+ goto err_clk_disable;
35494033
35504034 /* Mii wait queue */
35514035 init_waitqueue_head(&priv->wq);
....@@ -3553,24 +4037,37 @@
35534037 priv->rx_buf_len = RX_BUF_LENGTH;
35544038 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
35554039
3556
- priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
4040
+ priv->clk_wol = devm_clk_get_optional(&priv->pdev->dev, "enet-wol");
35574041 if (IS_ERR(priv->clk_wol)) {
3558
- dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
3559
- priv->clk_wol = NULL;
4042
+ dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n");
4043
+ err = PTR_ERR(priv->clk_wol);
4044
+ goto err_clk_disable;
35604045 }
35614046
3562
- priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
4047
+ priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee");
35634048 if (IS_ERR(priv->clk_eee)) {
3564
- dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n");
3565
- priv->clk_eee = NULL;
4049
+ dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n");
4050
+ err = PTR_ERR(priv->clk_eee);
4051
+ goto err_clk_disable;
35664052 }
35674053
35684054 /* If this is an internal GPHY, power it on now, before UniMAC is
35694055 * brought out of reset as absolutely no UniMAC activity is allowed
35704056 */
3571
- if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) &&
3572
- !strcasecmp(phy_mode_str, "internal"))
4057
+ if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL)
35734058 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
4059
+
4060
+ if (pd && !IS_ERR_OR_NULL(pd->mac_address))
4061
+ ether_addr_copy(dev->dev_addr, pd->mac_address);
4062
+ else
4063
+ if (!device_get_mac_address(&pdev->dev, dev->dev_addr, ETH_ALEN))
4064
+ if (has_acpi_companion(&pdev->dev))
4065
+ bcmgenet_get_hw_addr(priv, dev->dev_addr);
4066
+
4067
+ if (!is_valid_ether_addr(dev->dev_addr)) {
4068
+ dev_warn(&pdev->dev, "using random Ethernet MAC\n");
4069
+ eth_hw_addr_random(dev);
4070
+ }
35744071
35754072 reset_umac(priv);
35764073
....@@ -3622,13 +4119,18 @@
36224119 return 0;
36234120 }
36244121
4122
+static void bcmgenet_shutdown(struct platform_device *pdev)
4123
+{
4124
+ bcmgenet_remove(pdev);
4125
+}
4126
+
36254127 #ifdef CONFIG_PM_SLEEP
3626
-static int bcmgenet_resume(struct device *d)
4128
+static int bcmgenet_resume_noirq(struct device *d)
36274129 {
36284130 struct net_device *dev = dev_get_drvdata(d);
36294131 struct bcmgenet_priv *priv = netdev_priv(dev);
3630
- unsigned long dma_ctrl;
36314132 int ret;
4133
+ u32 reg;
36324134
36334135 if (!netif_running(dev))
36344136 return 0;
....@@ -3637,6 +4139,38 @@
36374139 ret = clk_prepare_enable(priv->clk);
36384140 if (ret)
36394141 return ret;
4142
+
4143
+ if (device_may_wakeup(d) && priv->wolopts) {
4144
+ /* Account for Wake-on-LAN events and clear those events
4145
+ * (Some devices need more time between enabling the clocks
4146
+ * and the interrupt register reflecting the wake event so
4147
+ * read the register twice)
4148
+ */
4149
+ reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
4150
+ reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
4151
+ if (reg & UMAC_IRQ_WAKE_EVENT)
4152
+ pm_wakeup_event(&priv->pdev->dev, 0);
4153
+ }
4154
+
4155
+ bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, INTRL2_CPU_CLEAR);
4156
+
4157
+ return 0;
4158
+}
4159
+
4160
+static int bcmgenet_resume(struct device *d)
4161
+{
4162
+ struct net_device *dev = dev_get_drvdata(d);
4163
+ struct bcmgenet_priv *priv = netdev_priv(dev);
4164
+ struct bcmgenet_rxnfc_rule *rule;
4165
+ unsigned long dma_ctrl;
4166
+ int ret;
4167
+
4168
+ if (!netif_running(dev))
4169
+ return 0;
4170
+
4171
+ /* From WOL-enabled suspend, switch to regular clock */
4172
+ if (device_may_wakeup(d) && priv->wolopts)
4173
+ bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
36404174
36414175 /* If this is an internal GPHY, power it back on now, before UniMAC is
36424176 * brought out of reset as absolutely no UniMAC activity is allowed
....@@ -3648,20 +4182,22 @@
36484182
36494183 init_umac(priv);
36504184
3651
- /* From WOL-enabled suspend, switch to regular clock */
3652
- if (priv->wolopts)
3653
- clk_disable_unprepare(priv->clk_wol);
3654
-
36554185 phy_init_hw(dev->phydev);
36564186
36574187 /* Speed settings must be restored */
36584188 genphy_config_aneg(dev->phydev);
36594189 bcmgenet_mii_config(priv->dev, false);
36604190
4191
+ /* Restore enabled features */
4192
+ bcmgenet_set_features(dev, dev->features);
4193
+
36614194 bcmgenet_set_hw_addr(priv, dev->dev_addr);
36624195
3663
- if (priv->wolopts)
3664
- bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
4196
+ /* Restore hardware filters */
4197
+ bcmgenet_hfb_clear(priv);
4198
+ list_for_each_entry(rule, &priv->rxnfc_list, list)
4199
+ if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
4200
+ bcmgenet_hfb_create_rxnfc_filter(priv, rule);
36654201
36664202 /* Disable RX/TX DMA and flush TX queues */
36674203 dma_ctrl = bcmgenet_dma_disable(priv);
....@@ -3699,45 +4235,76 @@
36994235 {
37004236 struct net_device *dev = dev_get_drvdata(d);
37014237 struct bcmgenet_priv *priv = netdev_priv(dev);
3702
- int ret = 0;
37034238
37044239 if (!netif_running(dev))
37054240 return 0;
37064241
37074242 netif_device_detach(dev);
37084243
3709
- bcmgenet_netif_stop(dev);
4244
+ bcmgenet_netif_stop(dev, true);
37104245
37114246 if (!device_may_wakeup(d))
37124247 phy_suspend(dev->phydev);
37134248
4249
+ /* Disable filtering */
4250
+ bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
4251
+
4252
+ return 0;
4253
+}
4254
+
4255
+static int bcmgenet_suspend_noirq(struct device *d)
4256
+{
4257
+ struct net_device *dev = dev_get_drvdata(d);
4258
+ struct bcmgenet_priv *priv = netdev_priv(dev);
4259
+ int ret = 0;
4260
+
4261
+ if (!netif_running(dev))
4262
+ return 0;
4263
+
37144264 /* Prepare the device for Wake-on-LAN and switch to the slow clock */
3715
- if (device_may_wakeup(d) && priv->wolopts) {
4265
+ if (device_may_wakeup(d) && priv->wolopts)
37164266 ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
3717
- clk_prepare_enable(priv->clk_wol);
3718
- } else if (priv->internal_phy) {
4267
+ else if (priv->internal_phy)
37194268 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3720
- }
4269
+
4270
+ /* Let the framework handle resumption and leave the clocks on */
4271
+ if (ret)
4272
+ return ret;
37214273
37224274 /* Turn off the clocks */
37234275 clk_disable_unprepare(priv->clk);
37244276
3725
- if (ret)
3726
- bcmgenet_resume(d);
3727
-
3728
- return ret;
4277
+ return 0;
37294278 }
4279
+#else
4280
+#define bcmgenet_suspend NULL
4281
+#define bcmgenet_suspend_noirq NULL
4282
+#define bcmgenet_resume NULL
4283
+#define bcmgenet_resume_noirq NULL
37304284 #endif /* CONFIG_PM_SLEEP */
37314285
3732
-static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
4286
+static const struct dev_pm_ops bcmgenet_pm_ops = {
4287
+ .suspend = bcmgenet_suspend,
4288
+ .suspend_noirq = bcmgenet_suspend_noirq,
4289
+ .resume = bcmgenet_resume,
4290
+ .resume_noirq = bcmgenet_resume_noirq,
4291
+};
4292
+
4293
+static const struct acpi_device_id genet_acpi_match[] = {
4294
+ { "BCM6E4E", (kernel_ulong_t)&bcm2711_plat_data },
4295
+ { },
4296
+};
4297
+MODULE_DEVICE_TABLE(acpi, genet_acpi_match);
37334298
37344299 static struct platform_driver bcmgenet_driver = {
37354300 .probe = bcmgenet_probe,
37364301 .remove = bcmgenet_remove,
4302
+ .shutdown = bcmgenet_shutdown,
37374303 .driver = {
37384304 .name = "bcmgenet",
37394305 .of_match_table = bcmgenet_match,
37404306 .pm = &bcmgenet_pm_ops,
4307
+ .acpi_match_table = genet_acpi_match,
37414308 },
37424309 };
37434310 module_platform_driver(bcmgenet_driver);