hc
2024-10-12 a5969cabbb4660eab42b6ef0412cbbd1200cf14d
kernel/drivers/net/ethernet/broadcom/bcmsysport.c
....@@ -1,11 +1,8 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Broadcom BCM7xxx System Port Ethernet MAC driver
34 *
45 * Copyright (C) 2014 Broadcom Corporation
5
- *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License version 2 as
8
- * published by the Free Software Foundation.
96 */
107
118 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
....@@ -23,6 +20,7 @@
2320 #include <linux/phy.h>
2421 #include <linux/phy_fixed.h>
2522 #include <net/dsa.h>
23
+#include <linux/clk.h>
2624 #include <net/ip.h>
2725 #include <net/ipv6.h>
2826
....@@ -116,22 +114,9 @@
116114 writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO);
117115 }
118116
119
-static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
120
- struct dma_desc *desc,
121
- unsigned int port)
122
-{
123
- unsigned long desc_flags;
124
-
125
- /* Ports are latched, so write upper address first */
126
- spin_lock_irqsave(&priv->desc_lock, desc_flags);
127
- tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
128
- tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
129
- spin_unlock_irqrestore(&priv->desc_lock, desc_flags);
130
-}
131
-
132117 /* Ethtool operations */
133
-static int bcm_sysport_set_rx_csum(struct net_device *dev,
134
- netdev_features_t wanted)
118
+static void bcm_sysport_set_rx_csum(struct net_device *dev,
119
+ netdev_features_t wanted)
135120 {
136121 struct bcm_sysport_priv *priv = netdev_priv(dev);
137122 u32 reg;
....@@ -165,12 +150,10 @@
165150 reg &= ~RXCHK_BRCM_TAG_EN;
166151
167152 rxchk_writel(priv, reg, RXCHK_CONTROL);
168
-
169
- return 0;
170153 }
171154
172
-static int bcm_sysport_set_tx_csum(struct net_device *dev,
173
- netdev_features_t wanted)
155
+static void bcm_sysport_set_tx_csum(struct net_device *dev,
156
+ netdev_features_t wanted)
174157 {
175158 struct bcm_sysport_priv *priv = netdev_priv(dev);
176159 u32 reg;
....@@ -178,30 +161,51 @@
178161 /* Hardware transmit checksum requires us to enable the Transmit status
179162 * block prepended to the packet contents
180163 */
181
- priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
164
+ priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
165
+ NETIF_F_HW_VLAN_CTAG_TX));
182166 reg = tdma_readl(priv, TDMA_CONTROL);
183167 if (priv->tsb_en)
184168 reg |= tdma_control_bit(priv, TSB_EN);
185169 else
186170 reg &= ~tdma_control_bit(priv, TSB_EN);
171
+ /* Indicating that software inserts Broadcom tags is needed for the TX
172
+ * checksum to be computed correctly when using VLAN HW acceleration,
173
+ * else it has no effect, so it can always be turned on.
174
+ */
175
+ if (netdev_uses_dsa(dev))
176
+ reg |= tdma_control_bit(priv, SW_BRCM_TAG);
177
+ else
178
+ reg &= ~tdma_control_bit(priv, SW_BRCM_TAG);
187179 tdma_writel(priv, reg, TDMA_CONTROL);
188180
189
- return 0;
181
+ /* Default TPID is ETH_P_8021AD, change to ETH_P_8021Q */
182
+ if (wanted & NETIF_F_HW_VLAN_CTAG_TX)
183
+ tdma_writel(priv, ETH_P_8021Q, TDMA_TPID);
190184 }
191185
192186 static int bcm_sysport_set_features(struct net_device *dev,
193187 netdev_features_t features)
194188 {
195
- netdev_features_t changed = features ^ dev->features;
196
- netdev_features_t wanted = dev->wanted_features;
197
- int ret = 0;
189
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
190
+ int ret;
198191
199
- if (changed & NETIF_F_RXCSUM)
200
- ret = bcm_sysport_set_rx_csum(dev, wanted);
201
- if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
202
- ret = bcm_sysport_set_tx_csum(dev, wanted);
192
+ ret = clk_prepare_enable(priv->clk);
193
+ if (ret)
194
+ return ret;
203195
204
- return ret;
196
+ /* Read CRC forward */
197
+ if (!priv->is_lite)
198
+ priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
199
+ else
200
+ priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
201
+ GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
202
+
203
+ bcm_sysport_set_rx_csum(dev, features);
204
+ bcm_sysport_set_tx_csum(dev, features);
205
+
206
+ clk_disable_unprepare(priv->clk);
207
+
208
+ return 0;
205209 }
206210
207211 /* Hardware counters must be kept in sync because the order/offset
....@@ -293,6 +297,8 @@
293297 STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
294298 STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
295299 STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
300
+ STAT_MIB_SOFT("tx_realloc_tsb", mib.tx_realloc_tsb),
301
+ STAT_MIB_SOFT("tx_realloc_tsb_failed", mib.tx_realloc_tsb_failed),
296302 /* Per TX-queue statistics are dynamically appended */
297303 };
298304
....@@ -302,7 +308,6 @@
302308 struct ethtool_drvinfo *info)
303309 {
304310 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
305
- strlcpy(info->version, "0.1", sizeof(info->version));
306311 strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
307312 }
308313
....@@ -624,7 +629,7 @@
624629 struct ethtool_coalesce *ec)
625630 {
626631 struct bcm_sysport_priv *priv = netdev_priv(dev);
627
- struct net_dim_cq_moder moder;
632
+ struct dim_cq_moder moder;
628633 u32 usecs, pkts;
629634 unsigned int i;
630635
....@@ -639,8 +644,7 @@
639644 return -EINVAL;
640645
641646 if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
642
- (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) ||
643
- ec->use_adaptive_tx_coalesce)
647
+ (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
644648 return -EINVAL;
645649
646650 for (i = 0; i < dev->num_tx_queues; i++)
....@@ -724,8 +728,7 @@
724728 for (i = 0; i < priv->num_rx_bds; i++) {
725729 cb = &priv->rx_cbs[i];
726730 skb = bcm_sysport_rx_refill(priv, cb);
727
- if (skb)
728
- dev_kfree_skb(skb);
731
+ dev_kfree_skb(skb);
729732 if (!cb->skb)
730733 return -ENOMEM;
731734 }
....@@ -1008,7 +1011,7 @@
10081011 {
10091012 struct bcm_sysport_priv *priv =
10101013 container_of(napi, struct bcm_sysport_priv, napi);
1011
- struct net_dim_sample dim_sample;
1014
+ struct dim_sample dim_sample = {};
10121015 unsigned int work_done = 0;
10131016
10141017 work_done = bcm_sysport_desc_rx(priv, budget);
....@@ -1032,8 +1035,8 @@
10321035 }
10331036
10341037 if (priv->dim.use_dim) {
1035
- net_dim_sample(priv->dim.event_ctr, priv->dim.packets,
1036
- priv->dim.bytes, &dim_sample);
1038
+ dim_update_sample(priv->dim.event_ctr, priv->dim.packets,
1039
+ priv->dim.bytes, &dim_sample);
10371040 net_dim(&priv->dim.dim, dim_sample);
10381041 }
10391042
....@@ -1066,6 +1069,7 @@
10661069
10671070 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
10681071 {
1072
+ unsigned int index;
10691073 u32 reg;
10701074
10711075 /* Disable RXCHK, active filters and Broadcom tag matching */
....@@ -1073,6 +1077,15 @@
10731077 reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
10741078 RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN);
10751079 rxchk_writel(priv, reg, RXCHK_CONTROL);
1080
+
1081
+ /* Make sure we restore correct CID index in case HW lost
1082
+ * its context during deep idle state
1083
+ */
1084
+ for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
1085
+ rxchk_writel(priv, priv->filters_loc[index] <<
1086
+ RXCHK_BRCM_TAG_CID_SHIFT, RXCHK_BRCM_TAG(index));
1087
+ rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
1088
+ }
10761089
10771090 /* Clear the MagicPacket detection logic */
10781091 mpd_enable_set(priv, false);
....@@ -1093,16 +1106,16 @@
10931106
10941107 static void bcm_sysport_dim_work(struct work_struct *work)
10951108 {
1096
- struct net_dim *dim = container_of(work, struct net_dim, work);
1109
+ struct dim *dim = container_of(work, struct dim, work);
10971110 struct bcm_sysport_net_dim *ndim =
10981111 container_of(dim, struct bcm_sysport_net_dim, dim);
10991112 struct bcm_sysport_priv *priv =
11001113 container_of(ndim, struct bcm_sysport_priv, dim);
1101
- struct net_dim_cq_moder cur_profile =
1102
- net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1114
+ struct dim_cq_moder cur_profile = net_dim_get_rx_moderation(dim->mode,
1115
+ dim->profile_ix);
11031116
11041117 bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts);
1105
- dim->state = NET_DIM_START_MEASURE;
1118
+ dim->state = DIM_START_MEASURE;
11061119 }
11071120
11081121 /* RX and misc interrupt routine */
....@@ -1217,6 +1230,7 @@
12171230 static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
12181231 struct net_device *dev)
12191232 {
1233
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
12201234 struct sk_buff *nskb;
12211235 struct bcm_tsb *tsb;
12221236 u32 csum_info;
....@@ -1227,18 +1241,26 @@
12271241 /* Re-allocate SKB if needed */
12281242 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
12291243 nskb = skb_realloc_headroom(skb, sizeof(*tsb));
1230
- dev_kfree_skb(skb);
12311244 if (!nskb) {
1245
+ dev_kfree_skb_any(skb);
1246
+ priv->mib.tx_realloc_tsb_failed++;
12321247 dev->stats.tx_errors++;
12331248 dev->stats.tx_dropped++;
12341249 return NULL;
12351250 }
1251
+ dev_consume_skb_any(skb);
12361252 skb = nskb;
1253
+ priv->mib.tx_realloc_tsb++;
12371254 }
12381255
12391256 tsb = skb_push(skb, sizeof(*tsb));
12401257 /* Zero-out TSB by default */
12411258 memset(tsb, 0, sizeof(*tsb));
1259
+
1260
+ if (skb_vlan_tag_present(skb)) {
1261
+ tsb->pcp_dei_vid = skb_vlan_tag_get_prio(skb) & PCP_DEI_MASK;
1262
+ tsb->pcp_dei_vid |= (u32)skb_vlan_tag_get_id(skb) << VID_SHIFT;
1263
+ }
12421264
12431265 if (skb->ip_summed == CHECKSUM_PARTIAL) {
12441266 ip_ver = skb->protocol;
....@@ -1255,6 +1277,9 @@
12551277
12561278 /* Get the checksum offset and the L4 (transport) offset */
12571279 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
1280
+ /* Account for the HW inserted VLAN tag */
1281
+ if (skb_vlan_tag_present(skb))
1282
+ csum_start += VLAN_HLEN;
12581283 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
12591284 csum_info |= (csum_start << L4_PTR_SHIFT);
12601285
....@@ -1279,13 +1304,12 @@
12791304 struct bcm_sysport_priv *priv = netdev_priv(dev);
12801305 struct device *kdev = &priv->pdev->dev;
12811306 struct bcm_sysport_tx_ring *ring;
1307
+ unsigned long flags, desc_flags;
12821308 struct bcm_sysport_cb *cb;
12831309 struct netdev_queue *txq;
1284
- struct dma_desc *desc;
1310
+ u32 len_status, addr_lo;
12851311 unsigned int skb_len;
1286
- unsigned long flags;
12871312 dma_addr_t mapping;
1288
- u32 len_status;
12891313 u16 queue;
12901314 int ret;
12911315
....@@ -1328,32 +1352,26 @@
13281352 dma_unmap_addr_set(cb, dma_addr, mapping);
13291353 dma_unmap_len_set(cb, dma_len, skb_len);
13301354
1331
- /* Fetch a descriptor entry from our pool */
1332
- desc = ring->desc_cpu;
1333
-
1334
- desc->addr_lo = lower_32_bits(mapping);
1355
+ addr_lo = lower_32_bits(mapping);
13351356 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
13361357 len_status |= (skb_len << DESC_LEN_SHIFT);
13371358 len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
13381359 DESC_STATUS_SHIFT;
13391360 if (skb->ip_summed == CHECKSUM_PARTIAL)
13401361 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
1362
+ if (skb_vlan_tag_present(skb))
1363
+ len_status |= (TX_STATUS_VLAN_VID_TSB << DESC_STATUS_SHIFT);
13411364
13421365 ring->curr_desc++;
13431366 if (ring->curr_desc == ring->size)
13441367 ring->curr_desc = 0;
13451368 ring->desc_count--;
13461369
1347
- /* Ensure write completion of the descriptor status/length
1348
- * in DRAM before the System Port WRITE_PORT register latches
1349
- * the value
1350
- */
1351
- wmb();
1352
- desc->addr_status_len = len_status;
1353
- wmb();
1354
-
1355
- /* Write this descriptor address to the RING write port */
1356
- tdma_port_write_desc_addr(priv, desc, ring->index);
1370
+ /* Ports are latched, so write upper address first */
1371
+ spin_lock_irqsave(&priv->desc_lock, desc_flags);
1372
+ tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index));
1373
+ tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index));
1374
+ spin_unlock_irqrestore(&priv->desc_lock, desc_flags);
13571375
13581376 /* Check ring space and update SW control flow */
13591377 if (ring->desc_count == 0)
....@@ -1368,7 +1386,7 @@
13681386 return ret;
13691387 }
13701388
1371
-static void bcm_sysport_tx_timeout(struct net_device *dev)
1389
+static void bcm_sysport_tx_timeout(struct net_device *dev, unsigned int txqueue)
13721390 {
13731391 netdev_warn(dev, "transmit timeout!\n");
13741392
....@@ -1450,7 +1468,7 @@
14501468 struct bcm_sysport_net_dim *dim = &priv->dim;
14511469
14521470 INIT_WORK(&dim->dim.work, cb);
1453
- dim->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
1471
+ dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
14541472 dim->event_ctr = 0;
14551473 dim->packets = 0;
14561474 dim->bytes = 0;
....@@ -1459,7 +1477,7 @@
14591477 static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv)
14601478 {
14611479 struct bcm_sysport_net_dim *dim = &priv->dim;
1462
- struct net_dim_cq_moder moder;
1480
+ struct dim_cq_moder moder;
14631481 u32 usecs, pkts;
14641482
14651483 usecs = priv->rx_coalesce_usecs;
....@@ -1479,28 +1497,14 @@
14791497 unsigned int index)
14801498 {
14811499 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1482
- struct device *kdev = &priv->pdev->dev;
14831500 size_t size;
1484
- void *p;
14851501 u32 reg;
14861502
14871503 /* Simple descriptors partitioning for now */
14881504 size = 256;
14891505
1490
- /* We just need one DMA descriptor which is DMA-able, since writing to
1491
- * the port will allocate a new descriptor in its internal linked-list
1492
- */
1493
- p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
1494
- GFP_KERNEL);
1495
- if (!p) {
1496
- netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
1497
- return -ENOMEM;
1498
- }
1499
-
15001506 ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
15011507 if (!ring->cbs) {
1502
- dma_free_coherent(kdev, sizeof(struct dma_desc),
1503
- ring->desc_cpu, ring->desc_dma);
15041508 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
15051509 return -ENOMEM;
15061510 }
....@@ -1513,7 +1517,6 @@
15131517 ring->size = size;
15141518 ring->clean_index = 0;
15151519 ring->alloc_size = ring->size;
1516
- ring->desc_cpu = p;
15171520 ring->desc_count = ring->size;
15181521 ring->curr_desc = 0;
15191522
....@@ -1533,7 +1536,13 @@
15331536 reg |= RING_IGNORE_STATUS;
15341537 }
15351538 tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index));
1536
- tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
1539
+ reg = 0;
1540
+ /* Adjust the packet size calculations if SYSTEMPORT is responsible
1541
+ * for HW insertion of VLAN tags
1542
+ */
1543
+ if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
1544
+ reg = VLAN_HLEN << RING_PKT_SIZE_ADJ_SHIFT;
1545
+ tdma_writel(priv, reg, TDMA_DESC_RING_PCP_DEI_VID(index));
15371546
15381547 /* Enable ACB algorithm 2 */
15391548 reg = tdma_readl(priv, TDMA_CONTROL);
....@@ -1568,8 +1577,8 @@
15681577 napi_enable(&ring->napi);
15691578
15701579 netif_dbg(priv, hw, priv->netdev,
1571
- "TDMA cfg, size=%d, desc_cpu=%p switch q=%d,port=%d\n",
1572
- ring->size, ring->desc_cpu, ring->switch_queue,
1580
+ "TDMA cfg, size=%d, switch q=%d,port=%d\n",
1581
+ ring->size, ring->switch_queue,
15731582 ring->switch_port);
15741583
15751584 return 0;
....@@ -1579,7 +1588,6 @@
15791588 unsigned int index)
15801589 {
15811590 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1582
- struct device *kdev = &priv->pdev->dev;
15831591 u32 reg;
15841592
15851593 /* Caller should stop the TDMA engine */
....@@ -1601,12 +1609,6 @@
16011609
16021610 kfree(ring->cbs);
16031611 ring->cbs = NULL;
1604
-
1605
- if (ring->desc_dma) {
1606
- dma_free_coherent(kdev, sizeof(struct dma_desc),
1607
- ring->desc_cpu, ring->desc_dma);
1608
- ring->desc_dma = 0;
1609
- }
16101612 ring->size = 0;
16111613 ring->alloc_size = 0;
16121614
....@@ -1948,6 +1950,8 @@
19481950 unsigned int i;
19491951 int ret;
19501952
1953
+ clk_prepare_enable(priv->clk);
1954
+
19511955 /* Reset UniMAC */
19521956 umac_reset(priv);
19531957
....@@ -1966,21 +1970,20 @@
19661970 else
19671971 gib_set_pad_extension(priv);
19681972
1973
+ /* Apply features again in case we changed them while interface was
1974
+ * down
1975
+ */
1976
+ bcm_sysport_set_features(dev, dev->features);
1977
+
19691978 /* Set MAC address */
19701979 umac_set_hw_addr(priv, dev->dev_addr);
1971
-
1972
- /* Read CRC forward */
1973
- if (!priv->is_lite)
1974
- priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
1975
- else
1976
- priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
1977
- GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
19781980
19791981 phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
19801982 0, priv->phy_interface);
19811983 if (!phydev) {
19821984 netdev_err(dev, "could not attach to PHY\n");
1983
- return -ENODEV;
1985
+ ret = -ENODEV;
1986
+ goto out_clk_disable;
19841987 }
19851988
19861989 /* Reset house keeping link status */
....@@ -2059,6 +2062,8 @@
20592062 free_irq(priv->irq0, dev);
20602063 out_phy_disconnect:
20612064 phy_disconnect(phydev);
2065
+out_clk_disable:
2066
+ clk_disable_unprepare(priv->clk);
20622067 return ret;
20632068 }
20642069
....@@ -2116,6 +2121,8 @@
21162121
21172122 /* Disconnect from PHY */
21182123 phy_disconnect(dev->phydev);
2124
+
2125
+ clk_disable_unprepare(priv->clk);
21192126
21202127 return 0;
21212128 }
....@@ -2186,6 +2193,7 @@
21862193 rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index));
21872194 rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
21882195
2196
+ priv->filters_loc[index] = nfc->fs.location;
21892197 set_bit(index, priv->filters);
21902198
21912199 return 0;
....@@ -2205,6 +2213,7 @@
22052213 * be taken care of during suspend time by bcm_sysport_suspend_to_wol
22062214 */
22072215 clear_bit(index, priv->filters);
2216
+ priv->filters_loc[index] = 0;
22082217
22092218 return 0;
22102219 }
....@@ -2247,6 +2256,9 @@
22472256 }
22482257
22492258 static const struct ethtool_ops bcm_sysport_ethtool_ops = {
2259
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
2260
+ ETHTOOL_COALESCE_MAX_FRAMES |
2261
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
22502262 .get_drvinfo = bcm_sysport_get_drvinfo,
22512263 .get_msglevel = bcm_sysport_get_msglvl,
22522264 .set_msglevel = bcm_sysport_set_msglvl,
....@@ -2265,8 +2277,7 @@
22652277 };
22662278
22672279 static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
2268
- struct net_device *sb_dev,
2269
- select_queue_fallback_t fallback)
2280
+ struct net_device *sb_dev)
22702281 {
22712282 struct bcm_sysport_priv *priv = netdev_priv(dev);
22722283 u16 queue = skb_get_queue_mapping(skb);
....@@ -2274,7 +2285,7 @@
22742285 unsigned int q, port;
22752286
22762287 if (!netdev_uses_dsa(dev))
2277
- return fallback(dev, skb, NULL);
2288
+ return netdev_pick_tx(dev, skb, NULL);
22782289
22792290 /* DSA tagging layer will have configured the correct queue */
22802291 q = BRCM_TAG_GET_QUEUE(queue);
....@@ -2282,7 +2293,7 @@
22822293 tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
22832294
22842295 if (unlikely(!tx_ring))
2285
- return fallback(dev, skb, NULL);
2296
+ return netdev_pick_tx(dev, skb, NULL);
22862297
22872298 return tx_ring->index;
22882299 }
....@@ -2309,7 +2320,7 @@
23092320 struct bcm_sysport_priv *priv;
23102321 struct net_device *slave_dev;
23112322 unsigned int num_tx_queues;
2312
- unsigned int q, start, port;
2323
+ unsigned int q, qp, port;
23132324 struct net_device *dev;
23142325
23152326 priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
....@@ -2348,20 +2359,62 @@
23482359
23492360 priv->per_port_num_tx_queues = num_tx_queues;
23502361
2351
- start = find_first_zero_bit(&priv->queue_bitmap, dev->num_tx_queues);
2352
- for (q = 0; q < num_tx_queues; q++) {
2353
- ring = &priv->tx_rings[q + start];
2362
+ for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues;
2363
+ q++) {
2364
+ ring = &priv->tx_rings[q];
2365
+
2366
+ if (ring->inspect)
2367
+ continue;
23542368
23552369 /* Just remember the mapping actual programming done
23562370 * during bcm_sysport_init_tx_ring
23572371 */
2358
- ring->switch_queue = q;
2372
+ ring->switch_queue = qp;
23592373 ring->switch_port = port;
23602374 ring->inspect = true;
2361
- priv->ring_map[q + port * num_tx_queues] = ring;
2375
+ priv->ring_map[qp + port * num_tx_queues] = ring;
2376
+ qp++;
2377
+ }
23622378
2363
- /* Set all queues as being used now */
2364
- set_bit(q + start, &priv->queue_bitmap);
2379
+ return 0;
2380
+}
2381
+
2382
+static int bcm_sysport_unmap_queues(struct notifier_block *nb,
2383
+ struct dsa_notifier_register_info *info)
2384
+{
2385
+ struct bcm_sysport_tx_ring *ring;
2386
+ struct bcm_sysport_priv *priv;
2387
+ struct net_device *slave_dev;
2388
+ unsigned int num_tx_queues;
2389
+ struct net_device *dev;
2390
+ unsigned int q, qp, port;
2391
+
2392
+ priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
2393
+ if (priv->netdev != info->master)
2394
+ return 0;
2395
+
2396
+ dev = info->master;
2397
+
2398
+ if (dev->netdev_ops != &bcm_sysport_netdev_ops)
2399
+ return 0;
2400
+
2401
+ port = info->port_number;
2402
+ slave_dev = info->info.dev;
2403
+
2404
+ num_tx_queues = slave_dev->real_num_tx_queues;
2405
+
2406
+ for (q = 0; q < dev->num_tx_queues; q++) {
2407
+ ring = &priv->tx_rings[q];
2408
+
2409
+ if (ring->switch_port != port)
2410
+ continue;
2411
+
2412
+ if (!ring->inspect)
2413
+ continue;
2414
+
2415
+ ring->inspect = false;
2416
+ qp = ring->switch_queue;
2417
+ priv->ring_map[qp + port * num_tx_queues] = NULL;
23652418 }
23662419
23672420 return 0;
....@@ -2370,14 +2423,18 @@
23702423 static int bcm_sysport_dsa_notifier(struct notifier_block *nb,
23712424 unsigned long event, void *ptr)
23722425 {
2373
- struct dsa_notifier_register_info *info;
2426
+ int ret = NOTIFY_DONE;
23742427
2375
- if (event != DSA_PORT_REGISTER)
2376
- return NOTIFY_DONE;
2428
+ switch (event) {
2429
+ case DSA_PORT_REGISTER:
2430
+ ret = bcm_sysport_map_queues(nb, ptr);
2431
+ break;
2432
+ case DSA_PORT_UNREGISTER:
2433
+ ret = bcm_sysport_unmap_queues(nb, ptr);
2434
+ break;
2435
+ }
23772436
2378
- info = ptr;
2379
-
2380
- return notifier_from_errno(bcm_sysport_map_queues(nb, info));
2437
+ return notifier_from_errno(ret);
23812438 }
23822439
23832440 #define REV_FMT "v%2x.%02x"
....@@ -2412,15 +2469,21 @@
24122469 struct device_node *dn;
24132470 struct net_device *dev;
24142471 const void *macaddr;
2415
- struct resource *r;
24162472 u32 txq, rxq;
24172473 int ret;
24182474
24192475 dn = pdev->dev.of_node;
2420
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
24212476 of_id = of_match_node(bcm_sysport_of_match, dn);
24222477 if (!of_id || !of_id->data)
24232478 return -EINVAL;
2479
+
2480
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
2481
+ if (ret)
2482
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2483
+ if (ret) {
2484
+ dev_err(&pdev->dev, "unable to set DMA mask: %d\n", ret);
2485
+ return ret;
2486
+ }
24242487
24252488 /* Fairly quickly we need to know the type of adapter we have */
24262489 params = of_id->data;
....@@ -2442,6 +2505,12 @@
24422505 /* Initialize private members */
24432506 priv = netdev_priv(dev);
24442507
2508
+ priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport");
2509
+ if (IS_ERR(priv->clk)) {
2510
+ ret = PTR_ERR(priv->clk);
2511
+ goto err_free_netdev;
2512
+ }
2513
+
24452514 /* Allocate number of TX rings */
24462515 priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
24472516 sizeof(struct bcm_sysport_tx_ring),
....@@ -2462,12 +2531,11 @@
24622531 priv->wol_irq = platform_get_irq(pdev, 1);
24632532 }
24642533 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
2465
- dev_err(&pdev->dev, "invalid interrupts\n");
24662534 ret = -EINVAL;
24672535 goto err_free_netdev;
24682536 }
24692537
2470
- priv->base = devm_ioremap_resource(&pdev->dev, r);
2538
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
24712539 if (IS_ERR(priv->base)) {
24722540 ret = PTR_ERR(priv->base);
24732541 goto err_free_netdev;
....@@ -2476,9 +2544,9 @@
24762544 priv->netdev = dev;
24772545 priv->pdev = pdev;
24782546
2479
- priv->phy_interface = of_get_phy_mode(dn);
2547
+ ret = of_get_phy_mode(dn, &priv->phy_interface);
24802548 /* Default to GMII interface mode */
2481
- if ((int)priv->phy_interface < 0)
2549
+ if (ret)
24822550 priv->phy_interface = PHY_INTERFACE_MODE_GMII;
24832551
24842552 /* In the case of a fixed PHY, the DT node associated
....@@ -2496,7 +2564,7 @@
24962564
24972565 /* Initialize netdevice members */
24982566 macaddr = of_get_mac_address(dn);
2499
- if (!macaddr || !is_valid_ether_addr(macaddr)) {
2567
+ if (IS_ERR(macaddr)) {
25002568 dev_warn(&pdev->dev, "using random Ethernet MAC\n");
25012569 eth_hw_addr_random(dev);
25022570 } else {
....@@ -2509,9 +2577,11 @@
25092577 dev->netdev_ops = &bcm_sysport_netdev_ops;
25102578 netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
25112579
2512
- /* HW supported features, none enabled by default */
2513
- dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
2514
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2580
+ dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
2581
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2582
+ NETIF_F_HW_VLAN_CTAG_TX;
2583
+ dev->hw_features |= dev->features;
2584
+ dev->vlan_features |= dev->features;
25152585 dev->max_mtu = UMAC_MAX_MTU_SIZE;
25162586
25172587 /* Request the WOL interrupt and advertise suspend if available */
....@@ -2520,6 +2590,12 @@
25202590 bcm_sysport_wol_isr, 0, dev->name, priv);
25212591 if (!ret)
25222592 device_set_wakeup_capable(&pdev->dev, 1);
2593
+
2594
+ priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
2595
+ if (IS_ERR(priv->wol_clk)) {
2596
+ ret = PTR_ERR(priv->wol_clk);
2597
+ goto err_deregister_fixed_link;
2598
+ }
25232599
25242600 /* Set the needed headroom once and for all */
25252601 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
....@@ -2545,13 +2621,17 @@
25452621 goto err_deregister_notifier;
25462622 }
25472623
2624
+ clk_prepare_enable(priv->clk);
2625
+
25482626 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
25492627 dev_info(&pdev->dev,
2550
- "Broadcom SYSTEMPORT%s" REV_FMT
2551
- " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
2628
+ "Broadcom SYSTEMPORT%s " REV_FMT
2629
+ " (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
25522630 priv->is_lite ? " Lite" : "",
25532631 (priv->rev >> 8) & 0xff, priv->rev & 0xff,
2554
- priv->base, priv->irq0, priv->irq1, txq, rxq);
2632
+ priv->irq0, priv->irq1, txq, rxq);
2633
+
2634
+ clk_disable_unprepare(priv->clk);
25552635
25562636 return 0;
25572637
....@@ -2706,8 +2786,12 @@
27062786 bcm_sysport_fini_rx_ring(priv);
27072787
27082788 /* Get prepared for Wake-on-LAN */
2709
- if (device_may_wakeup(d) && priv->wolopts)
2789
+ if (device_may_wakeup(d) && priv->wolopts) {
2790
+ clk_prepare_enable(priv->wol_clk);
27102791 ret = bcm_sysport_suspend_to_wol(priv);
2792
+ }
2793
+
2794
+ clk_disable_unprepare(priv->clk);
27112795
27122796 return ret;
27132797 }
....@@ -2717,11 +2801,14 @@
27172801 struct net_device *dev = dev_get_drvdata(d);
27182802 struct bcm_sysport_priv *priv = netdev_priv(dev);
27192803 unsigned int i;
2720
- u32 reg;
27212804 int ret;
27222805
27232806 if (!netif_running(dev))
27242807 return 0;
2808
+
2809
+ clk_prepare_enable(priv->clk);
2810
+ if (priv->wolopts)
2811
+ clk_disable_unprepare(priv->wol_clk);
27252812
27262813 umac_reset(priv);
27272814
....@@ -2762,12 +2849,8 @@
27622849 goto out_free_rx_ring;
27632850 }
27642851
2765
- /* Enable rxhck */
2766
- if (priv->rx_chk_en) {
2767
- reg = rxchk_readl(priv, RXCHK_CONTROL);
2768
- reg |= RXCHK_EN;
2769
- rxchk_writel(priv, reg, RXCHK_CONTROL);
2770
- }
2852
+ /* Restore enabled features */
2853
+ bcm_sysport_set_features(dev, dev->features);
27712854
27722855 rbuf_init(priv);
27732856
....@@ -2806,6 +2889,7 @@
28062889 out_free_tx_rings:
28072890 for (i = 0; i < dev->num_tx_queues; i++)
28082891 bcm_sysport_fini_tx_ring(priv, i);
2892
+ clk_disable_unprepare(priv->clk);
28092893 return ret;
28102894 }
28112895