hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/net/ethernet/mediatek/mtk_eth_soc.c
....@@ -1,11 +1,5 @@
1
-/* This program is free software; you can redistribute it and/or modify
2
- * it under the terms of the GNU General Public License as published by
3
- * the Free Software Foundation; version 2 of the License
4
- *
5
- * This program is distributed in the hope that it will be useful,
6
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
7
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8
- * GNU General Public License for more details.
1
+// SPDX-License-Identifier: GPL-2.0-only
2
+/*
93 *
104 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
115 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
....@@ -24,6 +18,7 @@
2418 #include <linux/tcp.h>
2519 #include <linux/interrupt.h>
2620 #include <linux/pinctrl/devinfo.h>
21
+#include <linux/phylink.h>
2722
2823 #include "mtk_eth_soc.h"
2924
....@@ -54,8 +49,10 @@
5449 };
5550
5651 static const char * const mtk_clks_source_name[] = {
57
- "ethif", "esw", "gp0", "gp1", "gp2", "trgpll", "sgmii_tx250m",
58
- "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb", "sgmii_ck", "eth2pll"
52
+ "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
53
+ "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
54
+ "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
55
+ "sgmii_ck", "eth2pll",
5956 };
6057
6158 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
....@@ -66,6 +63,17 @@
6663 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
6764 {
6865 return __raw_readl(eth->base + reg);
66
+}
67
+
68
+static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
69
+{
70
+ u32 val;
71
+
72
+ val = mtk_r32(eth, reg);
73
+ val &= ~mask;
74
+ val |= set;
75
+ mtk_w32(eth, val, reg);
76
+ return reg;
6977 }
7078
7179 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
....@@ -138,10 +146,45 @@
138146 return _mtk_mdio_read(eth, phy_addr, phy_reg);
139147 }
140148
141
-static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
149
+static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
150
+ phy_interface_t interface)
151
+{
152
+ u32 val;
153
+
154
+ /* Check DDR memory type.
155
+ * Currently TRGMII mode with DDR2 memory is not supported.
156
+ */
157
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
158
+ if (interface == PHY_INTERFACE_MODE_TRGMII &&
159
+ val & SYSCFG_DRAM_TYPE_DDR2) {
160
+ dev_err(eth->dev,
161
+ "TRGMII mode with DDR2 memory is not supported!\n");
162
+ return -EOPNOTSUPP;
163
+ }
164
+
165
+ val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
166
+ ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
167
+
168
+ regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
169
+ ETHSYS_TRGMII_MT7621_MASK, val);
170
+
171
+ return 0;
172
+}
173
+
174
+static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
175
+ phy_interface_t interface, int speed)
142176 {
143177 u32 val;
144178 int ret;
179
+
180
+ if (interface == PHY_INTERFACE_MODE_TRGMII) {
181
+ mtk_w32(eth, TRGMII_MODE, INTF_MODE);
182
+ val = 500000000;
183
+ ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
184
+ if (ret)
185
+ dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
186
+ return;
187
+ }
145188
146189 val = (speed == SPEED_1000) ?
147190 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
....@@ -165,217 +208,353 @@
165208 mtk_w32(eth, val, TRGMII_TCK_CTRL);
166209 }
167210
168
-static void mtk_gmac_sgmii_hw_setup(struct mtk_eth *eth, int mac_id)
211
+static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
212
+ const struct phylink_link_state *state)
169213 {
170
- u32 val;
214
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
215
+ phylink_config);
216
+ struct mtk_eth *eth = mac->hw;
217
+ u32 mcr_cur, mcr_new, sid, i;
218
+ int val, ge_mode, err = 0;
171219
172
- /* Setup the link timer and QPHY power up inside SGMIISYS */
173
- regmap_write(eth->sgmiisys, SGMSYS_PCS_LINK_TIMER,
174
- SGMII_LINK_TIMER_DEFAULT);
220
+ /* MT76x8 has no hardware settings between for the MAC */
221
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
222
+ mac->interface != state->interface) {
223
+ /* Setup soc pin functions */
224
+ switch (state->interface) {
225
+ case PHY_INTERFACE_MODE_TRGMII:
226
+ if (mac->id)
227
+ goto err_phy;
228
+ if (!MTK_HAS_CAPS(mac->hw->soc->caps,
229
+ MTK_GMAC1_TRGMII))
230
+ goto err_phy;
231
+ fallthrough;
232
+ case PHY_INTERFACE_MODE_RGMII_TXID:
233
+ case PHY_INTERFACE_MODE_RGMII_RXID:
234
+ case PHY_INTERFACE_MODE_RGMII_ID:
235
+ case PHY_INTERFACE_MODE_RGMII:
236
+ case PHY_INTERFACE_MODE_MII:
237
+ case PHY_INTERFACE_MODE_REVMII:
238
+ case PHY_INTERFACE_MODE_RMII:
239
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
240
+ err = mtk_gmac_rgmii_path_setup(eth, mac->id);
241
+ if (err)
242
+ goto init_err;
243
+ }
244
+ break;
245
+ case PHY_INTERFACE_MODE_1000BASEX:
246
+ case PHY_INTERFACE_MODE_2500BASEX:
247
+ case PHY_INTERFACE_MODE_SGMII:
248
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
249
+ err = mtk_gmac_sgmii_path_setup(eth, mac->id);
250
+ if (err)
251
+ goto init_err;
252
+ }
253
+ break;
254
+ case PHY_INTERFACE_MODE_GMII:
255
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
256
+ err = mtk_gmac_gephy_path_setup(eth, mac->id);
257
+ if (err)
258
+ goto init_err;
259
+ }
260
+ break;
261
+ default:
262
+ goto err_phy;
263
+ }
175264
176
- regmap_read(eth->sgmiisys, SGMSYS_SGMII_MODE, &val);
177
- val |= SGMII_REMOTE_FAULT_DIS;
178
- regmap_write(eth->sgmiisys, SGMSYS_SGMII_MODE, val);
265
+ /* Setup clock for 1st gmac */
266
+ if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
267
+ !phy_interface_mode_is_8023z(state->interface) &&
268
+ MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
269
+ if (MTK_HAS_CAPS(mac->hw->soc->caps,
270
+ MTK_TRGMII_MT7621_CLK)) {
271
+ if (mt7621_gmac0_rgmii_adjust(mac->hw,
272
+ state->interface))
273
+ goto err_phy;
274
+ } else {
275
+ mtk_gmac0_rgmii_adjust(mac->hw,
276
+ state->interface,
277
+ state->speed);
179278
180
- regmap_read(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, &val);
181
- val |= SGMII_AN_RESTART;
182
- regmap_write(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, val);
279
+ /* mt7623_pad_clk_setup */
280
+ for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
281
+ mtk_w32(mac->hw,
282
+ TD_DM_DRVP(8) | TD_DM_DRVN(8),
283
+ TRGMII_TD_ODT(i));
183284
184
- regmap_read(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, &val);
185
- val &= ~SGMII_PHYA_PWD;
186
- regmap_write(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, val);
285
+ /* Assert/release MT7623 RXC reset */
286
+ mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
287
+ TRGMII_RCK_CTRL);
288
+ mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
289
+ }
290
+ }
187291
188
- /* Determine MUX for which GMAC uses the SGMII interface */
189
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_DUAL_GMAC_SHARED_SGMII)) {
292
+ ge_mode = 0;
293
+ switch (state->interface) {
294
+ case PHY_INTERFACE_MODE_MII:
295
+ case PHY_INTERFACE_MODE_GMII:
296
+ ge_mode = 1;
297
+ break;
298
+ case PHY_INTERFACE_MODE_REVMII:
299
+ ge_mode = 2;
300
+ break;
301
+ case PHY_INTERFACE_MODE_RMII:
302
+ if (mac->id)
303
+ goto err_phy;
304
+ ge_mode = 3;
305
+ break;
306
+ default:
307
+ break;
308
+ }
309
+
310
+ /* put the gmac into the right mode */
190311 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
191
- val &= ~SYSCFG0_SGMII_MASK;
192
- val |= !mac_id ? SYSCFG0_SGMII_GMAC1 : SYSCFG0_SGMII_GMAC2;
312
+ val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
313
+ val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
193314 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
194315
195
- dev_info(eth->dev, "setup shared sgmii for gmac=%d\n",
196
- mac_id);
316
+ mac->interface = state->interface;
197317 }
198318
199
- /* Setup the GMAC1 going through SGMII path when SoC also support
200
- * ESW on GMAC1
201
- */
202
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_GMAC1_ESW | MTK_GMAC1_SGMII) &&
203
- !mac_id) {
204
- mtk_w32(eth, 0, MTK_MAC_MISC);
205
- dev_info(eth->dev, "setup gmac1 going through sgmii");
319
+ /* SGMII */
320
+ if (state->interface == PHY_INTERFACE_MODE_SGMII ||
321
+ phy_interface_mode_is_8023z(state->interface)) {
322
+ /* The path GMAC to SGMII will be enabled once the SGMIISYS is
323
+ * being setup done.
324
+ */
325
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
326
+
327
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
328
+ SYSCFG0_SGMII_MASK,
329
+ ~(u32)SYSCFG0_SGMII_MASK);
330
+
331
+ /* Decide how GMAC and SGMIISYS be mapped */
332
+ sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
333
+ 0 : mac->id;
334
+
335
+ /* Setup SGMIISYS with the determined property */
336
+ if (state->interface != PHY_INTERFACE_MODE_SGMII)
337
+ err = mtk_sgmii_setup_mode_force(eth->sgmii, sid,
338
+ state);
339
+ else if (phylink_autoneg_inband(mode))
340
+ err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
341
+
342
+ if (err)
343
+ goto init_err;
344
+
345
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
346
+ SYSCFG0_SGMII_MASK, val);
347
+ } else if (phylink_autoneg_inband(mode)) {
348
+ dev_err(eth->dev,
349
+ "In-band mode not supported in non SGMII mode!\n");
350
+ return;
206351 }
352
+
353
+ /* Setup gmac */
354
+ mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
355
+ mcr_new = mcr_cur;
356
+ mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
357
+ MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
358
+
359
+ /* Only update control register when needed! */
360
+ if (mcr_new != mcr_cur)
361
+ mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
362
+
363
+ return;
364
+
365
+err_phy:
366
+ dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
367
+ mac->id, phy_modes(state->interface));
368
+ return;
369
+
370
+init_err:
371
+ dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
372
+ mac->id, phy_modes(state->interface), err);
207373 }
208374
209
-static void mtk_phy_link_adjust(struct net_device *dev)
375
+static void mtk_mac_pcs_get_state(struct phylink_config *config,
376
+ struct phylink_link_state *state)
210377 {
211
- struct mtk_mac *mac = netdev_priv(dev);
212
- u16 lcl_adv = 0, rmt_adv = 0;
213
- u8 flowctrl;
214
- u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
215
- MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
216
- MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
217
- MAC_MCR_BACKPR_EN;
378
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
379
+ phylink_config);
380
+ u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
218381
219
- if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
220
- return;
382
+ state->link = (pmsr & MAC_MSR_LINK);
383
+ state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
221384
222
- switch (dev->phydev->speed) {
385
+ switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
386
+ case 0:
387
+ state->speed = SPEED_10;
388
+ break;
389
+ case MAC_MSR_SPEED_100:
390
+ state->speed = SPEED_100;
391
+ break;
392
+ case MAC_MSR_SPEED_1000:
393
+ state->speed = SPEED_1000;
394
+ break;
395
+ default:
396
+ state->speed = SPEED_UNKNOWN;
397
+ break;
398
+ }
399
+
400
+ state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
401
+ if (pmsr & MAC_MSR_RX_FC)
402
+ state->pause |= MLO_PAUSE_RX;
403
+ if (pmsr & MAC_MSR_TX_FC)
404
+ state->pause |= MLO_PAUSE_TX;
405
+}
406
+
407
+static void mtk_mac_an_restart(struct phylink_config *config)
408
+{
409
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
410
+ phylink_config);
411
+
412
+ mtk_sgmii_restart_an(mac->hw, mac->id);
413
+}
414
+
415
+static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
416
+ phy_interface_t interface)
417
+{
418
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
419
+ phylink_config);
420
+ u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
421
+
422
+ mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
423
+ mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
424
+}
425
+
426
+static void mtk_mac_link_up(struct phylink_config *config,
427
+ struct phy_device *phy,
428
+ unsigned int mode, phy_interface_t interface,
429
+ int speed, int duplex, bool tx_pause, bool rx_pause)
430
+{
431
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
432
+ phylink_config);
433
+ u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
434
+
435
+ mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
436
+ MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
437
+ MAC_MCR_FORCE_RX_FC);
438
+
439
+ /* Configure speed */
440
+ switch (speed) {
441
+ case SPEED_2500:
223442 case SPEED_1000:
224443 mcr |= MAC_MCR_SPEED_1000;
225444 break;
226445 case SPEED_100:
227446 mcr |= MAC_MCR_SPEED_100;
228447 break;
229
- };
448
+ }
230449
231
- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
232
- !mac->id && !mac->trgmii)
233
- mtk_gmac0_rgmii_adjust(mac->hw, dev->phydev->speed);
234
-
235
- if (dev->phydev->link)
236
- mcr |= MAC_MCR_FORCE_LINK;
237
-
238
- if (dev->phydev->duplex) {
450
+ /* Configure duplex */
451
+ if (duplex == DUPLEX_FULL)
239452 mcr |= MAC_MCR_FORCE_DPX;
240453
241
- if (dev->phydev->pause)
242
- rmt_adv = LPA_PAUSE_CAP;
243
- if (dev->phydev->asym_pause)
244
- rmt_adv |= LPA_PAUSE_ASYM;
454
+ /* Configure pause modes - phylink will avoid these for half duplex */
455
+ if (tx_pause)
456
+ mcr |= MAC_MCR_FORCE_TX_FC;
457
+ if (rx_pause)
458
+ mcr |= MAC_MCR_FORCE_RX_FC;
245459
246
- if (dev->phydev->advertising & ADVERTISED_Pause)
247
- lcl_adv |= ADVERTISE_PAUSE_CAP;
248
- if (dev->phydev->advertising & ADVERTISED_Asym_Pause)
249
- lcl_adv |= ADVERTISE_PAUSE_ASYM;
250
-
251
- flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
252
-
253
- if (flowctrl & FLOW_CTRL_TX)
254
- mcr |= MAC_MCR_FORCE_TX_FC;
255
- if (flowctrl & FLOW_CTRL_RX)
256
- mcr |= MAC_MCR_FORCE_RX_FC;
257
-
258
- netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
259
- flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
260
- flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
261
- }
262
-
460
+ mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
263461 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
264
-
265
- if (dev->phydev->link)
266
- netif_carrier_on(dev);
267
- else
268
- netif_carrier_off(dev);
269
-
270
- if (!of_phy_is_fixed_link(mac->of_node))
271
- phy_print_status(dev->phydev);
272462 }
273463
274
-static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
275
- struct device_node *phy_node)
464
+static void mtk_validate(struct phylink_config *config,
465
+ unsigned long *supported,
466
+ struct phylink_link_state *state)
276467 {
277
- struct phy_device *phydev;
278
- int phy_mode;
468
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
469
+ phylink_config);
470
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
279471
280
- phy_mode = of_get_phy_mode(phy_node);
281
- if (phy_mode < 0) {
282
- dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
283
- return -EINVAL;
472
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
473
+ state->interface != PHY_INTERFACE_MODE_MII &&
474
+ state->interface != PHY_INTERFACE_MODE_GMII &&
475
+ !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
476
+ phy_interface_mode_is_rgmii(state->interface)) &&
477
+ !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
478
+ !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
479
+ !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
480
+ (state->interface == PHY_INTERFACE_MODE_SGMII ||
481
+ phy_interface_mode_is_8023z(state->interface)))) {
482
+ linkmode_zero(supported);
483
+ return;
284484 }
285485
286
- phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
287
- mtk_phy_link_adjust, 0, phy_mode);
288
- if (!phydev) {
289
- dev_err(eth->dev, "could not connect to PHY\n");
290
- return -ENODEV;
291
- }
486
+ phylink_set_port_modes(mask);
487
+ phylink_set(mask, Autoneg);
292488
293
- dev_info(eth->dev,
294
- "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
295
- mac->id, phydev_name(phydev), phydev->phy_id,
296
- phydev->drv->name);
297
-
298
- return 0;
299
-}
300
-
301
-static int mtk_phy_connect(struct net_device *dev)
302
-{
303
- struct mtk_mac *mac = netdev_priv(dev);
304
- struct mtk_eth *eth;
305
- struct device_node *np;
306
- u32 val;
307
-
308
- eth = mac->hw;
309
- np = of_parse_phandle(mac->of_node, "phy-handle", 0);
310
- if (!np && of_phy_is_fixed_link(mac->of_node))
311
- if (!of_phy_register_fixed_link(mac->of_node))
312
- np = of_node_get(mac->of_node);
313
- if (!np)
314
- return -ENODEV;
315
-
316
- mac->ge_mode = 0;
317
- switch (of_get_phy_mode(np)) {
489
+ switch (state->interface) {
318490 case PHY_INTERFACE_MODE_TRGMII:
319
- mac->trgmii = true;
320
- case PHY_INTERFACE_MODE_RGMII_TXID:
321
- case PHY_INTERFACE_MODE_RGMII_RXID:
322
- case PHY_INTERFACE_MODE_RGMII_ID:
491
+ phylink_set(mask, 1000baseT_Full);
492
+ break;
493
+ case PHY_INTERFACE_MODE_1000BASEX:
494
+ case PHY_INTERFACE_MODE_2500BASEX:
495
+ phylink_set(mask, 1000baseX_Full);
496
+ phylink_set(mask, 2500baseX_Full);
497
+ break;
498
+ case PHY_INTERFACE_MODE_GMII:
323499 case PHY_INTERFACE_MODE_RGMII:
324
- break;
500
+ case PHY_INTERFACE_MODE_RGMII_ID:
501
+ case PHY_INTERFACE_MODE_RGMII_RXID:
502
+ case PHY_INTERFACE_MODE_RGMII_TXID:
503
+ phylink_set(mask, 1000baseT_Half);
504
+ fallthrough;
325505 case PHY_INTERFACE_MODE_SGMII:
326
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII))
327
- mtk_gmac_sgmii_hw_setup(eth, mac->id);
328
- break;
506
+ phylink_set(mask, 1000baseT_Full);
507
+ phylink_set(mask, 1000baseX_Full);
508
+ fallthrough;
329509 case PHY_INTERFACE_MODE_MII:
330
- mac->ge_mode = 1;
331
- break;
332
- case PHY_INTERFACE_MODE_REVMII:
333
- mac->ge_mode = 2;
334
- break;
335510 case PHY_INTERFACE_MODE_RMII:
336
- if (!mac->id)
337
- goto err_phy;
338
- mac->ge_mode = 3;
339
- break;
511
+ case PHY_INTERFACE_MODE_REVMII:
512
+ case PHY_INTERFACE_MODE_NA:
340513 default:
341
- goto err_phy;
514
+ phylink_set(mask, 10baseT_Half);
515
+ phylink_set(mask, 10baseT_Full);
516
+ phylink_set(mask, 100baseT_Half);
517
+ phylink_set(mask, 100baseT_Full);
518
+ break;
342519 }
343520
344
- /* put the gmac into the right mode */
345
- regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
346
- val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
347
- val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id);
348
- regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
521
+ if (state->interface == PHY_INTERFACE_MODE_NA) {
522
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
523
+ phylink_set(mask, 1000baseT_Full);
524
+ phylink_set(mask, 1000baseX_Full);
525
+ phylink_set(mask, 2500baseX_Full);
526
+ }
527
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
528
+ phylink_set(mask, 1000baseT_Full);
529
+ phylink_set(mask, 1000baseT_Half);
530
+ phylink_set(mask, 1000baseX_Full);
531
+ }
532
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
533
+ phylink_set(mask, 1000baseT_Full);
534
+ phylink_set(mask, 1000baseT_Half);
535
+ }
536
+ }
349537
350
- /* couple phydev to net_device */
351
- if (mtk_phy_connect_node(eth, mac, np))
352
- goto err_phy;
538
+ phylink_set(mask, Pause);
539
+ phylink_set(mask, Asym_Pause);
353540
354
- dev->phydev->autoneg = AUTONEG_ENABLE;
355
- dev->phydev->speed = 0;
356
- dev->phydev->duplex = 0;
541
+ linkmode_and(supported, supported, mask);
542
+ linkmode_and(state->advertising, state->advertising, mask);
357543
358
- if (of_phy_is_fixed_link(mac->of_node))
359
- dev->phydev->supported |=
360
- SUPPORTED_Pause | SUPPORTED_Asym_Pause;
361
-
362
- dev->phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
363
- SUPPORTED_Asym_Pause;
364
- dev->phydev->advertising = dev->phydev->supported |
365
- ADVERTISED_Autoneg;
366
- phy_start_aneg(dev->phydev);
367
-
368
- of_node_put(np);
369
-
370
- return 0;
371
-
372
-err_phy:
373
- if (of_phy_is_fixed_link(mac->of_node))
374
- of_phy_deregister_fixed_link(mac->of_node);
375
- of_node_put(np);
376
- dev_err(eth->dev, "%s: invalid phy\n", __func__);
377
- return -EINVAL;
544
+ /* We can only operate at 2500BaseX or 1000BaseX. If requested
545
+ * to advertise both, only report advertising at 2500BaseX.
546
+ */
547
+ phylink_helper_basex_speed(state);
378548 }
549
+
550
+static const struct phylink_mac_ops mtk_phylink_ops = {
551
+ .validate = mtk_validate,
552
+ .mac_pcs_get_state = mtk_mac_pcs_get_state,
553
+ .mac_an_restart = mtk_mac_an_restart,
554
+ .mac_config = mtk_mac_config,
555
+ .mac_link_down = mtk_mac_link_down,
556
+ .mac_link_up = mtk_mac_link_up,
557
+};
379558
380559 static int mtk_mdio_init(struct mtk_eth *eth)
381560 {
....@@ -405,7 +584,7 @@
405584 eth->mii_bus->priv = eth;
406585 eth->mii_bus->parent = eth->dev;
407586
408
- snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
587
+ snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
409588 ret = of_mdiobus_register(eth->mii_bus, mii_np);
410589
411590 err_put_node:
....@@ -427,8 +606,8 @@
427606 u32 val;
428607
429608 spin_lock_irqsave(&eth->tx_irq_lock, flags);
430
- val = mtk_r32(eth, MTK_QDMA_INT_MASK);
431
- mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
609
+ val = mtk_r32(eth, eth->tx_int_mask_reg);
610
+ mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
432611 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
433612 }
434613
....@@ -438,8 +617,8 @@
438617 u32 val;
439618
440619 spin_lock_irqsave(&eth->tx_irq_lock, flags);
441
- val = mtk_r32(eth, MTK_QDMA_INT_MASK);
442
- mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
620
+ val = mtk_r32(eth, eth->tx_int_mask_reg);
621
+ mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
443622 spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
444623 }
445624
....@@ -469,6 +648,7 @@
469648 {
470649 int ret = eth_mac_addr(dev, p);
471650 struct mtk_mac *mac = netdev_priv(dev);
651
+ struct mtk_eth *eth = mac->hw;
472652 const char *macaddr = dev->dev_addr;
473653
474654 if (ret)
....@@ -478,11 +658,19 @@
478658 return -EBUSY;
479659
480660 spin_lock_bh(&mac->hw->page_lock);
481
- mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
482
- MTK_GDMA_MAC_ADRH(mac->id));
483
- mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
484
- (macaddr[4] << 8) | macaddr[5],
485
- MTK_GDMA_MAC_ADRL(mac->id));
661
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
662
+ mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
663
+ MT7628_SDM_MAC_ADRH);
664
+ mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
665
+ (macaddr[4] << 8) | macaddr[5],
666
+ MT7628_SDM_MAC_ADRL);
667
+ } else {
668
+ mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
669
+ MTK_GDMA_MAC_ADRH(mac->id));
670
+ mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
671
+ (macaddr[4] << 8) | macaddr[5],
672
+ MTK_GDMA_MAC_ADRL(mac->id));
673
+ }
486674 spin_unlock_bh(&mac->hw->page_lock);
487675
488676 return 0;
....@@ -491,32 +679,53 @@
491679 void mtk_stats_update_mac(struct mtk_mac *mac)
492680 {
493681 struct mtk_hw_stats *hw_stats = mac->hw_stats;
494
- unsigned int base = MTK_GDM1_TX_GBCNT;
495
- u64 stats;
496
-
497
- base += hw_stats->reg_offset;
682
+ struct mtk_eth *eth = mac->hw;
498683
499684 u64_stats_update_begin(&hw_stats->syncp);
500685
501
- hw_stats->rx_bytes += mtk_r32(mac->hw, base);
502
- stats = mtk_r32(mac->hw, base + 0x04);
503
- if (stats)
504
- hw_stats->rx_bytes += (stats << 32);
505
- hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
506
- hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
507
- hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
508
- hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
509
- hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
510
- hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
511
- hw_stats->rx_flow_control_packets +=
512
- mtk_r32(mac->hw, base + 0x24);
513
- hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
514
- hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
515
- hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
516
- stats = mtk_r32(mac->hw, base + 0x34);
517
- if (stats)
518
- hw_stats->tx_bytes += (stats << 32);
519
- hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
686
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
687
+ hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
688
+ hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
689
+ hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
690
+ hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
691
+ hw_stats->rx_checksum_errors +=
692
+ mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
693
+ } else {
694
+ unsigned int offs = hw_stats->reg_offset;
695
+ u64 stats;
696
+
697
+ hw_stats->rx_bytes += mtk_r32(mac->hw,
698
+ MTK_GDM1_RX_GBCNT_L + offs);
699
+ stats = mtk_r32(mac->hw, MTK_GDM1_RX_GBCNT_H + offs);
700
+ if (stats)
701
+ hw_stats->rx_bytes += (stats << 32);
702
+ hw_stats->rx_packets +=
703
+ mtk_r32(mac->hw, MTK_GDM1_RX_GPCNT + offs);
704
+ hw_stats->rx_overflow +=
705
+ mtk_r32(mac->hw, MTK_GDM1_RX_OERCNT + offs);
706
+ hw_stats->rx_fcs_errors +=
707
+ mtk_r32(mac->hw, MTK_GDM1_RX_FERCNT + offs);
708
+ hw_stats->rx_short_errors +=
709
+ mtk_r32(mac->hw, MTK_GDM1_RX_SERCNT + offs);
710
+ hw_stats->rx_long_errors +=
711
+ mtk_r32(mac->hw, MTK_GDM1_RX_LENCNT + offs);
712
+ hw_stats->rx_checksum_errors +=
713
+ mtk_r32(mac->hw, MTK_GDM1_RX_CERCNT + offs);
714
+ hw_stats->rx_flow_control_packets +=
715
+ mtk_r32(mac->hw, MTK_GDM1_RX_FCCNT + offs);
716
+ hw_stats->tx_skip +=
717
+ mtk_r32(mac->hw, MTK_GDM1_TX_SKIPCNT + offs);
718
+ hw_stats->tx_collisions +=
719
+ mtk_r32(mac->hw, MTK_GDM1_TX_COLCNT + offs);
720
+ hw_stats->tx_bytes +=
721
+ mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_L + offs);
722
+ stats = mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_H + offs);
723
+ if (stats)
724
+ hw_stats->tx_bytes += (stats << 32);
725
+ hw_stats->tx_packets +=
726
+ mtk_r32(mac->hw, MTK_GDM1_TX_GPCNT + offs);
727
+ }
728
+
520729 u64_stats_update_end(&hw_stats->syncp);
521730 }
522731
....@@ -597,6 +806,17 @@
597806 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
598807 }
599808
809
+static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
810
+{
811
+ unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
812
+ unsigned long data;
813
+
814
+ data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
815
+ get_order(size));
816
+
817
+ return (void *)data;
818
+}
819
+
600820 /* the qdma core needs scratch memory to be setup */
601821 static int mtk_init_fq_dma(struct mtk_eth *eth)
602822 {
....@@ -605,10 +825,10 @@
605825 dma_addr_t dma_addr;
606826 int i;
607827
608
- eth->scratch_ring = dma_zalloc_coherent(eth->dev,
609
- cnt * sizeof(struct mtk_tx_dma),
610
- &eth->phy_scratch_ring,
611
- GFP_ATOMIC);
828
+ eth->scratch_ring = dma_alloc_coherent(eth->dev,
829
+ cnt * sizeof(struct mtk_tx_dma),
830
+ &eth->phy_scratch_ring,
831
+ GFP_ATOMIC);
612832 if (unlikely(!eth->scratch_ring))
613833 return -ENOMEM;
614834
....@@ -658,24 +878,75 @@
658878 return &ring->buf[idx];
659879 }
660880
881
+static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
882
+ struct mtk_tx_dma *dma)
883
+{
884
+ return ring->dma_pdma - ring->dma + dma;
885
+}
886
+
887
+static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
888
+{
889
+ return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
890
+}
891
+
661892 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
662893 {
663
- if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
664
- dma_unmap_single(eth->dev,
665
- dma_unmap_addr(tx_buf, dma_addr0),
666
- dma_unmap_len(tx_buf, dma_len0),
667
- DMA_TO_DEVICE);
668
- } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
669
- dma_unmap_page(eth->dev,
670
- dma_unmap_addr(tx_buf, dma_addr0),
671
- dma_unmap_len(tx_buf, dma_len0),
672
- DMA_TO_DEVICE);
894
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
895
+ if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
896
+ dma_unmap_single(eth->dev,
897
+ dma_unmap_addr(tx_buf, dma_addr0),
898
+ dma_unmap_len(tx_buf, dma_len0),
899
+ DMA_TO_DEVICE);
900
+ } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
901
+ dma_unmap_page(eth->dev,
902
+ dma_unmap_addr(tx_buf, dma_addr0),
903
+ dma_unmap_len(tx_buf, dma_len0),
904
+ DMA_TO_DEVICE);
905
+ }
906
+ } else {
907
+ if (dma_unmap_len(tx_buf, dma_len0)) {
908
+ dma_unmap_page(eth->dev,
909
+ dma_unmap_addr(tx_buf, dma_addr0),
910
+ dma_unmap_len(tx_buf, dma_len0),
911
+ DMA_TO_DEVICE);
912
+ }
913
+
914
+ if (dma_unmap_len(tx_buf, dma_len1)) {
915
+ dma_unmap_page(eth->dev,
916
+ dma_unmap_addr(tx_buf, dma_addr1),
917
+ dma_unmap_len(tx_buf, dma_len1),
918
+ DMA_TO_DEVICE);
919
+ }
673920 }
921
+
674922 tx_buf->flags = 0;
675923 if (tx_buf->skb &&
676924 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
677925 dev_kfree_skb_any(tx_buf->skb);
678926 tx_buf->skb = NULL;
927
+}
928
+
929
+static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
930
+ struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
931
+ size_t size, int idx)
932
+{
933
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
934
+ dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
935
+ dma_unmap_len_set(tx_buf, dma_len0, size);
936
+ } else {
937
+ if (idx & 1) {
938
+ txd->txd3 = mapped_addr;
939
+ txd->txd2 |= TX_DMA_PLEN1(size);
940
+ dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
941
+ dma_unmap_len_set(tx_buf, dma_len1, size);
942
+ } else {
943
+ tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
944
+ txd->txd1 = mapped_addr;
945
+ txd->txd2 = TX_DMA_PLEN0(size);
946
+ dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
947
+ dma_unmap_len_set(tx_buf, dma_len0, size);
948
+ }
949
+ }
679950 }
680951
681952 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
....@@ -684,13 +955,16 @@
684955 struct mtk_mac *mac = netdev_priv(dev);
685956 struct mtk_eth *eth = mac->hw;
686957 struct mtk_tx_dma *itxd, *txd;
958
+ struct mtk_tx_dma *itxd_pdma, *txd_pdma;
687959 struct mtk_tx_buf *itx_buf, *tx_buf;
688960 dma_addr_t mapped_addr;
689961 unsigned int nr_frags;
690962 int i, n_desc = 1;
691963 u32 txd4 = 0, fport;
964
+ int k = 0;
692965
693966 itxd = ring->next_free;
967
+ itxd_pdma = qdma_to_pdma(ring, itxd);
694968 if (itxd == ring->last_free)
695969 return -ENOMEM;
696970
....@@ -721,26 +995,37 @@
721995 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
722996 itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
723997 MTK_TX_FLAGS_FPORT1;
724
- dma_unmap_addr_set(itx_buf, dma_addr0, mapped_addr);
725
- dma_unmap_len_set(itx_buf, dma_len0, skb_headlen(skb));
998
+ setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb),
999
+ k++);
7261000
7271001 /* TX SG offload */
7281002 txd = itxd;
1003
+ txd_pdma = qdma_to_pdma(ring, txd);
7291004 nr_frags = skb_shinfo(skb)->nr_frags;
1005
+
7301006 for (i = 0; i < nr_frags; i++) {
731
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1007
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7321008 unsigned int offset = 0;
7331009 int frag_size = skb_frag_size(frag);
7341010
7351011 while (frag_size) {
7361012 bool last_frag = false;
7371013 unsigned int frag_map_size;
1014
+ bool new_desc = true;
7381015
739
- txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
740
- if (txd == ring->last_free)
741
- goto err_dma;
1016
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
1017
+ (i & 0x1)) {
1018
+ txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1019
+ txd_pdma = qdma_to_pdma(ring, txd);
1020
+ if (txd == ring->last_free)
1021
+ goto err_dma;
7421022
743
- n_desc++;
1023
+ n_desc++;
1024
+ } else {
1025
+ new_desc = false;
1026
+ }
1027
+
1028
+
7441029 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
7451030 mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
7461031 frag_map_size,
....@@ -759,14 +1044,16 @@
7591044 WRITE_ONCE(txd->txd4, fport);
7601045
7611046 tx_buf = mtk_desc_to_tx_buf(ring, txd);
762
- memset(tx_buf, 0, sizeof(*tx_buf));
1047
+ if (new_desc)
1048
+ memset(tx_buf, 0, sizeof(*tx_buf));
7631049 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
7641050 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
7651051 tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
7661052 MTK_TX_FLAGS_FPORT1;
7671053
768
- dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
769
- dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
1054
+ setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr,
1055
+ frag_map_size, k++);
1056
+
7701057 frag_size -= frag_map_size;
7711058 offset += frag_map_size;
7721059 }
....@@ -778,6 +1065,12 @@
7781065 WRITE_ONCE(itxd->txd4, txd4);
7791066 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
7801067 (!nr_frags * TX_DMA_LS0)));
1068
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1069
+ if (k & 0x1)
1070
+ txd_pdma->txd2 |= TX_DMA_LS0;
1071
+ else
1072
+ txd_pdma->txd2 |= TX_DMA_LS1;
1073
+ }
7811074
7821075 netdev_sent_queue(dev, skb->len);
7831076 skb_tx_timestamp(skb);
....@@ -790,8 +1083,15 @@
7901083 */
7911084 wmb();
7921085
793
- if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
794
- mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
1086
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1087
+ if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
1088
+ !netdev_xmit_more())
1089
+ mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
1090
+ } else {
1091
+ int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd),
1092
+ ring->dma_size);
1093
+ mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1094
+ }
7951095
7961096 return 0;
7971097
....@@ -803,7 +1103,11 @@
8031103 mtk_tx_unmap(eth, tx_buf);
8041104
8051105 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1106
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1107
+ itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1108
+
8061109 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1110
+ itxd_pdma = qdma_to_pdma(ring, itxd);
8071111 } while (itxd != txd);
8081112
8091113 return -ENOMEM;
....@@ -812,13 +1116,14 @@
8121116 static inline int mtk_cal_txd_req(struct sk_buff *skb)
8131117 {
8141118 int i, nfrags;
815
- struct skb_frag_struct *frag;
1119
+ skb_frag_t *frag;
8161120
8171121 nfrags = 1;
8181122 if (skb_is_gso(skb)) {
8191123 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
8201124 frag = &skb_shinfo(skb)->frags[i];
821
- nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN);
1125
+ nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1126
+ MTK_TX_DMA_BUF_LEN);
8221127 }
8231128 } else {
8241129 nfrags += skb_shinfo(skb)->nr_frags;
....@@ -863,7 +1168,7 @@
8631168 }
8641169 }
8651170
866
-static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1171
+static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
8671172 {
8681173 struct mtk_mac *mac = netdev_priv(dev);
8691174 struct mtk_eth *eth = mac->hw;
....@@ -933,7 +1238,7 @@
9331238
9341239 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
9351240 ring = &eth->rx_ring[i];
936
- idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
1241
+ idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
9371242 if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
9381243 ring->calc_idx_update = true;
9391244 return ring;
....@@ -976,13 +1281,13 @@
9761281 struct net_device *netdev;
9771282 unsigned int pktlen;
9781283 dma_addr_t dma_addr;
979
- int mac = 0;
1284
+ int mac;
9801285
9811286 ring = mtk_get_rx_ring(eth);
9821287 if (unlikely(!ring))
9831288 goto rx_done;
9841289
985
- idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
1290
+ idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
9861291 rxd = &ring->dma[idx];
9871292 data = ring->data[idx];
9881293
....@@ -991,9 +1296,13 @@
9911296 break;
9921297
9931298 /* find out which mac the packet come from. values start at 1 */
994
- mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
995
- RX_DMA_FPORT_MASK;
996
- mac--;
1299
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1300
+ mac = 0;
1301
+ } else {
1302
+ mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
1303
+ RX_DMA_FPORT_MASK;
1304
+ mac--;
1305
+ }
9971306
9981307 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
9991308 !eth->netdev[mac]))
....@@ -1005,13 +1314,17 @@
10051314 goto release_desc;
10061315
10071316 /* alloc new buffer */
1008
- new_data = napi_alloc_frag(ring->frag_size);
1317
+ if (ring->frag_size <= PAGE_SIZE)
1318
+ new_data = napi_alloc_frag(ring->frag_size);
1319
+ else
1320
+ new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
10091321 if (unlikely(!new_data)) {
10101322 netdev->stats.rx_dropped++;
10111323 goto release_desc;
10121324 }
10131325 dma_addr = dma_map_single(eth->dev,
1014
- new_data + NET_SKB_PAD,
1326
+ new_data + NET_SKB_PAD +
1327
+ eth->ip_align,
10151328 ring->buf_size,
10161329 DMA_FROM_DEVICE);
10171330 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
....@@ -1034,7 +1347,7 @@
10341347 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
10351348 skb->dev = netdev;
10361349 skb_put(skb, pktlen);
1037
- if (trxd.rxd4 & RX_DMA_L4_VALID)
1350
+ if (trxd.rxd4 & eth->rx_dma_l4_valid)
10381351 skb->ip_summed = CHECKSUM_UNNECESSARY;
10391352 else
10401353 skb_checksum_none_assert(skb);
....@@ -1051,7 +1364,10 @@
10511364 rxd->rxd1 = (unsigned int)dma_addr;
10521365
10531366 release_desc:
1054
- rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
1367
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1368
+ rxd->rxd2 = RX_DMA_LSO;
1369
+ else
1370
+ rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
10551371
10561372 ring->calc_idx = idx;
10571373
....@@ -1070,19 +1386,14 @@
10701386 return done;
10711387 }
10721388
1073
-static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1389
+static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
1390
+ unsigned int *done, unsigned int *bytes)
10741391 {
10751392 struct mtk_tx_ring *ring = &eth->tx_ring;
10761393 struct mtk_tx_dma *desc;
10771394 struct sk_buff *skb;
10781395 struct mtk_tx_buf *tx_buf;
1079
- unsigned int done[MTK_MAX_DEVS];
1080
- unsigned int bytes[MTK_MAX_DEVS];
10811396 u32 cpu, dma;
1082
- int total = 0, i;
1083
-
1084
- memset(done, 0, sizeof(done));
1085
- memset(bytes, 0, sizeof(bytes));
10861397
10871398 cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
10881399 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
....@@ -1120,6 +1431,62 @@
11201431
11211432 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
11221433
1434
+ return budget;
1435
+}
1436
+
1437
+static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
1438
+ unsigned int *done, unsigned int *bytes)
1439
+{
1440
+ struct mtk_tx_ring *ring = &eth->tx_ring;
1441
+ struct mtk_tx_dma *desc;
1442
+ struct sk_buff *skb;
1443
+ struct mtk_tx_buf *tx_buf;
1444
+ u32 cpu, dma;
1445
+
1446
+ cpu = ring->cpu_idx;
1447
+ dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
1448
+
1449
+ while ((cpu != dma) && budget) {
1450
+ tx_buf = &ring->buf[cpu];
1451
+ skb = tx_buf->skb;
1452
+ if (!skb)
1453
+ break;
1454
+
1455
+ if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1456
+ bytes[0] += skb->len;
1457
+ done[0]++;
1458
+ budget--;
1459
+ }
1460
+
1461
+ mtk_tx_unmap(eth, tx_buf);
1462
+
1463
+ desc = &ring->dma[cpu];
1464
+ ring->last_free = desc;
1465
+ atomic_inc(&ring->free_count);
1466
+
1467
+ cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
1468
+ }
1469
+
1470
+ ring->cpu_idx = cpu;
1471
+
1472
+ return budget;
1473
+}
1474
+
1475
+static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1476
+{
1477
+ struct mtk_tx_ring *ring = &eth->tx_ring;
1478
+ unsigned int done[MTK_MAX_DEVS];
1479
+ unsigned int bytes[MTK_MAX_DEVS];
1480
+ int total = 0, i;
1481
+
1482
+ memset(done, 0, sizeof(done));
1483
+ memset(bytes, 0, sizeof(bytes));
1484
+
1485
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1486
+ budget = mtk_poll_tx_qdma(eth, budget, done, bytes);
1487
+ else
1488
+ budget = mtk_poll_tx_pdma(eth, budget, done, bytes);
1489
+
11231490 for (i = 0; i < MTK_MAC_COUNT; i++) {
11241491 if (!eth->netdev[i] || !done[i])
11251492 continue;
....@@ -1151,13 +1518,14 @@
11511518 u32 status, mask;
11521519 int tx_done = 0;
11531520
1154
- mtk_handle_status_irq(eth);
1155
- mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
1521
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1522
+ mtk_handle_status_irq(eth);
1523
+ mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
11561524 tx_done = mtk_poll_tx(eth, budget);
11571525
11581526 if (unlikely(netif_msg_intr(eth))) {
1159
- status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
1160
- mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
1527
+ status = mtk_r32(eth, eth->tx_int_status_reg);
1528
+ mask = mtk_r32(eth, eth->tx_int_mask_reg);
11611529 dev_info(eth->dev,
11621530 "done tx %d, intr 0x%08x/0x%x\n",
11631531 tx_done, status, mask);
....@@ -1166,7 +1534,7 @@
11661534 if (tx_done == budget)
11671535 return budget;
11681536
1169
- status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
1537
+ status = mtk_r32(eth, eth->tx_int_status_reg);
11701538 if (status & MTK_TX_DONE_INT)
11711539 return budget;
11721540
....@@ -1220,8 +1588,8 @@
12201588 if (!ring->buf)
12211589 goto no_tx_mem;
12221590
1223
- ring->dma = dma_zalloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1224
- &ring->phys, GFP_ATOMIC);
1591
+ ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1592
+ &ring->phys, GFP_ATOMIC);
12251593 if (!ring->dma)
12261594 goto no_tx_mem;
12271595
....@@ -1233,6 +1601,24 @@
12331601 ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
12341602 }
12351603
1604
+ /* On MT7688 (PDMA only) this driver uses the ring->dma structs
1605
+ * only as the framework. The real HW descriptors are the PDMA
1606
+ * descriptors in ring->dma_pdma.
1607
+ */
1608
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1609
+ ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1610
+ &ring->phys_pdma,
1611
+ GFP_ATOMIC);
1612
+ if (!ring->dma_pdma)
1613
+ goto no_tx_mem;
1614
+
1615
+ for (i = 0; i < MTK_DMA_SIZE; i++) {
1616
+ ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
1617
+ ring->dma_pdma[i].txd4 = 0;
1618
+ }
1619
+ }
1620
+
1621
+ ring->dma_size = MTK_DMA_SIZE;
12361622 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
12371623 ring->next_free = &ring->dma[0];
12381624 ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
....@@ -1243,15 +1629,23 @@
12431629 */
12441630 wmb();
12451631
1246
- mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1247
- mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1248
- mtk_w32(eth,
1249
- ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1250
- MTK_QTX_CRX_PTR);
1251
- mtk_w32(eth,
1252
- ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1253
- MTK_QTX_DRX_PTR);
1254
- mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
1632
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1633
+ mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1634
+ mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1635
+ mtk_w32(eth,
1636
+ ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1637
+ MTK_QTX_CRX_PTR);
1638
+ mtk_w32(eth,
1639
+ ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1640
+ MTK_QTX_DRX_PTR);
1641
+ mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
1642
+ MTK_QTX_CFG(0));
1643
+ } else {
1644
+ mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
1645
+ mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
1646
+ mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
1647
+ mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
1648
+ }
12551649
12561650 return 0;
12571651
....@@ -1277,6 +1671,14 @@
12771671 ring->dma,
12781672 ring->phys);
12791673 ring->dma = NULL;
1674
+ }
1675
+
1676
+ if (ring->dma_pdma) {
1677
+ dma_free_coherent(eth->dev,
1678
+ MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
1679
+ ring->dma_pdma,
1680
+ ring->phys_pdma);
1681
+ ring->dma_pdma = NULL;
12801682 }
12811683 }
12821684
....@@ -1312,27 +1714,33 @@
13121714 return -ENOMEM;
13131715
13141716 for (i = 0; i < rx_dma_size; i++) {
1315
- ring->data[i] = netdev_alloc_frag(ring->frag_size);
1717
+ if (ring->frag_size <= PAGE_SIZE)
1718
+ ring->data[i] = netdev_alloc_frag(ring->frag_size);
1719
+ else
1720
+ ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
13161721 if (!ring->data[i])
13171722 return -ENOMEM;
13181723 }
13191724
1320
- ring->dma = dma_zalloc_coherent(eth->dev,
1321
- rx_dma_size * sizeof(*ring->dma),
1322
- &ring->phys, GFP_ATOMIC);
1725
+ ring->dma = dma_alloc_coherent(eth->dev,
1726
+ rx_dma_size * sizeof(*ring->dma),
1727
+ &ring->phys, GFP_ATOMIC);
13231728 if (!ring->dma)
13241729 return -ENOMEM;
13251730
13261731 for (i = 0; i < rx_dma_size; i++) {
13271732 dma_addr_t dma_addr = dma_map_single(eth->dev,
1328
- ring->data[i] + NET_SKB_PAD,
1733
+ ring->data[i] + NET_SKB_PAD + eth->ip_align,
13291734 ring->buf_size,
13301735 DMA_FROM_DEVICE);
13311736 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
13321737 return -ENOMEM;
13331738 ring->dma[i].rxd1 = (unsigned int)dma_addr;
13341739
1335
- ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
1740
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1741
+ ring->dma[i].rxd2 = RX_DMA_LSO;
1742
+ else
1743
+ ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
13361744 }
13371745 ring->dma_size = rx_dma_size;
13381746 ring->calc_idx_update = false;
....@@ -1575,6 +1983,9 @@
15751983 struct ethtool_rx_flow_spec *fsp =
15761984 (struct ethtool_rx_flow_spec *)&cmd->fs;
15771985
1986
+ if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
1987
+ return -EINVAL;
1988
+
15781989 /* only tcp dst ipv4 is meaningful, others are meaningless */
15791990 fsp->flow_type = TCP_V4_FLOW;
15801991 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
....@@ -1648,9 +2059,16 @@
16482059 unsigned long t_start = jiffies;
16492060
16502061 while (1) {
1651
- if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
1652
- (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
1653
- return 0;
2062
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2063
+ if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
2064
+ (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2065
+ return 0;
2066
+ } else {
2067
+ if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
2068
+ (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2069
+ return 0;
2070
+ }
2071
+
16542072 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
16552073 break;
16562074 }
....@@ -1667,20 +2085,24 @@
16672085 if (mtk_dma_busy_wait(eth))
16682086 return -EBUSY;
16692087
1670
- /* QDMA needs scratch memory for internal reordering of the
1671
- * descriptors
1672
- */
1673
- err = mtk_init_fq_dma(eth);
1674
- if (err)
1675
- return err;
2088
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2089
+ /* QDMA needs scratch memory for internal reordering of the
2090
+ * descriptors
2091
+ */
2092
+ err = mtk_init_fq_dma(eth);
2093
+ if (err)
2094
+ return err;
2095
+ }
16762096
16772097 err = mtk_tx_alloc(eth);
16782098 if (err)
16792099 return err;
16802100
1681
- err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
1682
- if (err)
1683
- return err;
2101
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2102
+ err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2103
+ if (err)
2104
+ return err;
2105
+ }
16842106
16852107 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
16862108 if (err)
....@@ -1697,10 +2119,14 @@
16972119 return err;
16982120 }
16992121
1700
- /* Enable random early drop and set drop threshold automatically */
1701
- mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
1702
- MTK_QDMA_FC_THRES);
1703
- mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
2122
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2123
+ /* Enable random early drop and set drop threshold
2124
+ * automatically
2125
+ */
2126
+ mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
2127
+ FC_THRES_MIN, MTK_QDMA_FC_THRES);
2128
+ mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
2129
+ }
17042130
17052131 return 0;
17062132 }
....@@ -1733,7 +2159,7 @@
17332159 kfree(eth->scratch_head);
17342160 }
17352161
1736
-static void mtk_tx_timeout(struct net_device *dev)
2162
+static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
17372163 {
17382164 struct mtk_mac *mac = netdev_priv(dev);
17392165 struct mtk_eth *eth = mac->hw;
....@@ -1768,6 +2194,22 @@
17682194 return IRQ_HANDLED;
17692195 }
17702196
2197
+static irqreturn_t mtk_handle_irq(int irq, void *_eth)
2198
+{
2199
+ struct mtk_eth *eth = _eth;
2200
+
2201
+ if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) {
2202
+ if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
2203
+ mtk_handle_irq_rx(irq, _eth);
2204
+ }
2205
+ if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
2206
+ if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
2207
+ mtk_handle_irq_tx(irq, _eth);
2208
+ }
2209
+
2210
+ return IRQ_HANDLED;
2211
+}
2212
+
17712213 #ifdef CONFIG_NET_POLL_CONTROLLER
17722214 static void mtk_poll_controller(struct net_device *dev)
17732215 {
....@@ -1793,32 +2235,76 @@
17932235 return err;
17942236 }
17952237
1796
- mtk_w32(eth,
1797
- MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
1798
- MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO |
1799
- MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
1800
- MTK_RX_BT_32DWORDS,
1801
- MTK_QDMA_GLO_CFG);
2238
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2239
+ mtk_w32(eth,
2240
+ MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
2241
+ MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO |
2242
+ MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
2243
+ MTK_RX_BT_32DWORDS,
2244
+ MTK_QDMA_GLO_CFG);
18022245
1803
- mtk_w32(eth,
1804
- MTK_RX_DMA_EN | rx_2b_offset |
1805
- MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
1806
- MTK_PDMA_GLO_CFG);
2246
+ mtk_w32(eth,
2247
+ MTK_RX_DMA_EN | rx_2b_offset |
2248
+ MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
2249
+ MTK_PDMA_GLO_CFG);
2250
+ } else {
2251
+ mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
2252
+ MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
2253
+ MTK_PDMA_GLO_CFG);
2254
+ }
18072255
18082256 return 0;
2257
+}
2258
+
2259
+static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
2260
+{
2261
+ int i;
2262
+
2263
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2264
+ return;
2265
+
2266
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
2267
+ u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
2268
+
2269
+ /* default setup the forward port to send frame to PDMA */
2270
+ val &= ~0xffff;
2271
+
2272
+ /* Enable RX checksum */
2273
+ val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
2274
+
2275
+ val |= config;
2276
+
2277
+ mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
2278
+ }
2279
+ /* Reset and enable PSE */
2280
+ mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
2281
+ mtk_w32(eth, 0, MTK_RST_GL);
18092282 }
18102283
18112284 static int mtk_open(struct net_device *dev)
18122285 {
18132286 struct mtk_mac *mac = netdev_priv(dev);
18142287 struct mtk_eth *eth = mac->hw;
2288
+ int err;
2289
+
2290
+ err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
2291
+ if (err) {
2292
+ netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
2293
+ err);
2294
+ return err;
2295
+ }
18152296
18162297 /* we run 2 netdevs on the same dma ring so we only bring it up once */
18172298 if (!refcount_read(&eth->dma_refcnt)) {
18182299 int err = mtk_start_dma(eth);
18192300
18202301 if (err)
2302
+ if (err) {
2303
+ phylink_disconnect_phy(mac->phylink);
18212304 return err;
2305
+ }
2306
+
2307
+ mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
18222308
18232309 napi_enable(&eth->tx_napi);
18242310 napi_enable(&eth->rx_napi);
....@@ -1829,9 +2315,8 @@
18292315 else
18302316 refcount_inc(&eth->dma_refcnt);
18312317
1832
- phy_start(dev->phydev);
2318
+ phylink_start(mac->phylink);
18332319 netif_start_queue(dev);
1834
-
18352320 return 0;
18362321 }
18372322
....@@ -1863,19 +2348,25 @@
18632348 struct mtk_mac *mac = netdev_priv(dev);
18642349 struct mtk_eth *eth = mac->hw;
18652350
2351
+ phylink_stop(mac->phylink);
2352
+
18662353 netif_tx_disable(dev);
1867
- phy_stop(dev->phydev);
2354
+
2355
+ phylink_disconnect_phy(mac->phylink);
18682356
18692357 /* only shutdown DMA if this is the last user */
18702358 if (!refcount_dec_and_test(&eth->dma_refcnt))
18712359 return 0;
2360
+
2361
+ mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
18722362
18732363 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
18742364 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
18752365 napi_disable(&eth->tx_napi);
18762366 napi_disable(&eth->rx_napi);
18772367
1878
- mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
2368
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2369
+ mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
18792370 mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
18802371
18812372 mtk_dma_free(eth);
....@@ -1937,17 +2428,26 @@
19372428 if (ret)
19382429 goto err_disable_pm;
19392430
2431
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2432
+ ret = device_reset(eth->dev);
2433
+ if (ret) {
2434
+ dev_err(eth->dev, "MAC reset failed!\n");
2435
+ goto err_disable_pm;
2436
+ }
2437
+
2438
+ /* enable interrupt delay for RX */
2439
+ mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
2440
+
2441
+ /* disable delay and normal interrupt */
2442
+ mtk_tx_irq_disable(eth, ~0);
2443
+ mtk_rx_irq_disable(eth, ~0);
2444
+
2445
+ return 0;
2446
+ }
2447
+
2448
+ /* Non-MT7628 handling... */
19402449 ethsys_reset(eth, RSTCTRL_FE);
19412450 ethsys_reset(eth, RSTCTRL_PPE);
1942
-
1943
- regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
1944
- for (i = 0; i < MTK_MAC_COUNT; i++) {
1945
- if (!eth->mac[i])
1946
- continue;
1947
- val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, eth->mac[i]->id);
1948
- val |= SYSCFG0_GE_MODE(eth->mac[i]->ge_mode, eth->mac[i]->id);
1949
- }
1950
- regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
19512451
19522452 if (eth->pctl) {
19532453 /* Set GE2 driving and slew rate */
....@@ -1961,11 +2461,11 @@
19612461 }
19622462
19632463 /* Set linkdown as the default for each GMAC. Its own MCR would be set
1964
- * up with the more appropriate value when mtk_phy_link_adjust call is
1965
- * being invoked.
2464
+ * up with the more appropriate value when mtk_mac_config call is being
2465
+ * invoked.
19662466 */
19672467 for (i = 0; i < MTK_MAC_COUNT; i++)
1968
- mtk_w32(eth, 0, MTK_MAC_MCR(i));
2468
+ mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
19692469
19702470 /* Indicates CDM to parse the MTK special tag from CPU
19712471 * which also is working out for untag packets.
....@@ -1983,8 +2483,6 @@
19832483 mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
19842484 mtk_tx_irq_disable(eth, ~0);
19852485 mtk_rx_irq_disable(eth, ~0);
1986
- mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
1987
- mtk_w32(eth, 0, MTK_RST_GL);
19882486
19892487 /* FE int grouping */
19902488 mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
....@@ -1992,19 +2490,6 @@
19922490 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
19932491 mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
19942492 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
1995
-
1996
- for (i = 0; i < 2; i++) {
1997
- u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
1998
-
1999
- /* setup the forward port to send frame to PDMA */
2000
- val &= ~0xffff;
2001
-
2002
- /* Enable RX checksum */
2003
- val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
2004
-
2005
- /* setup the mac dma */
2006
- mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
2007
- }
20082493
20092494 return 0;
20102495
....@@ -2035,7 +2520,7 @@
20352520 const char *mac_addr;
20362521
20372522 mac_addr = of_get_mac_address(mac->of_node);
2038
- if (mac_addr)
2523
+ if (!IS_ERR(mac_addr))
20392524 ether_addr_copy(dev->dev_addr, mac_addr);
20402525
20412526 /* If the mac address is invalid, use random mac address */
....@@ -2045,7 +2530,7 @@
20452530 dev->dev_addr);
20462531 }
20472532
2048
- return mtk_phy_connect(dev);
2533
+ return 0;
20492534 }
20502535
20512536 static void mtk_uninit(struct net_device *dev)
....@@ -2053,20 +2538,20 @@
20532538 struct mtk_mac *mac = netdev_priv(dev);
20542539 struct mtk_eth *eth = mac->hw;
20552540
2056
- phy_disconnect(dev->phydev);
2057
- if (of_phy_is_fixed_link(mac->of_node))
2058
- of_phy_deregister_fixed_link(mac->of_node);
2541
+ phylink_disconnect_phy(mac->phylink);
20592542 mtk_tx_irq_disable(eth, ~0);
20602543 mtk_rx_irq_disable(eth, ~0);
20612544 }
20622545
20632546 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
20642547 {
2548
+ struct mtk_mac *mac = netdev_priv(dev);
2549
+
20652550 switch (cmd) {
20662551 case SIOCGMIIPHY:
20672552 case SIOCGMIIREG:
20682553 case SIOCSMIIREG:
2069
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
2554
+ return phylink_mii_ioctl(mac->phylink, ifr, cmd);
20702555 default:
20712556 break;
20722557 }
....@@ -2106,16 +2591,6 @@
21062591 pinctrl_select_state(eth->dev->pins->p,
21072592 eth->dev->pins->default_state);
21082593 mtk_hw_init(eth);
2109
-
2110
- for (i = 0; i < MTK_MAC_COUNT; i++) {
2111
- if (!eth->mac[i] ||
2112
- of_phy_is_fixed_link(eth->mac[i]->of_node))
2113
- continue;
2114
- err = phy_init_hw(eth->netdev[i]->phydev);
2115
- if (err)
2116
- dev_err(eth->dev, "%s: PHY init failed.\n",
2117
- eth->netdev[i]->name);
2118
- }
21192594
21202595 /* restart DMA and enable IRQs */
21212596 for (i = 0; i < MTK_MAC_COUNT; i++) {
....@@ -2179,9 +2654,7 @@
21792654 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
21802655 return -EBUSY;
21812656
2182
- phy_ethtool_ksettings_get(ndev->phydev, cmd);
2183
-
2184
- return 0;
2657
+ return phylink_ethtool_ksettings_get(mac->phylink, cmd);
21852658 }
21862659
21872660 static int mtk_set_link_ksettings(struct net_device *ndev,
....@@ -2192,7 +2665,7 @@
21922665 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
21932666 return -EBUSY;
21942667
2195
- return phy_ethtool_ksettings_set(ndev->phydev, cmd);
2668
+ return phylink_ethtool_ksettings_set(mac->phylink, cmd);
21962669 }
21972670
21982671 static void mtk_get_drvinfo(struct net_device *dev,
....@@ -2226,22 +2699,10 @@
22262699 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
22272700 return -EBUSY;
22282701
2229
- return genphy_restart_aneg(dev->phydev);
2230
-}
2702
+ if (!mac->phylink)
2703
+ return -ENOTSUPP;
22312704
2232
-static u32 mtk_get_link(struct net_device *dev)
2233
-{
2234
- struct mtk_mac *mac = netdev_priv(dev);
2235
- int err;
2236
-
2237
- if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2238
- return -EBUSY;
2239
-
2240
- err = genphy_update_link(dev->phydev);
2241
- if (err)
2242
- return ethtool_op_get_link(dev);
2243
-
2244
- return dev->phydev->link;
2705
+ return phylink_ethtool_nway_reset(mac->phylink);
22452706 }
22462707
22472708 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
....@@ -2361,7 +2822,7 @@
23612822 .get_msglevel = mtk_get_msglevel,
23622823 .set_msglevel = mtk_set_msglevel,
23632824 .nway_reset = mtk_nway_reset,
2364
- .get_link = mtk_get_link,
2825
+ .get_link = ethtool_op_get_link,
23652826 .get_strings = mtk_get_strings,
23662827 .get_sset_count = mtk_get_sset_count,
23672828 .get_ethtool_stats = mtk_get_ethtool_stats,
....@@ -2389,8 +2850,10 @@
23892850
23902851 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
23912852 {
2392
- struct mtk_mac *mac;
23932853 const __be32 *_id = of_get_property(np, "reg", NULL);
2854
+ phy_interface_t phy_mode;
2855
+ struct phylink *phylink;
2856
+ struct mtk_mac *mac;
23942857 int id, err;
23952858
23962859 if (!_id) {
....@@ -2435,18 +2898,43 @@
24352898 u64_stats_init(&mac->hw_stats->syncp);
24362899 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
24372900
2901
+ /* phylink create */
2902
+ err = of_get_phy_mode(np, &phy_mode);
2903
+ if (err) {
2904
+ dev_err(eth->dev, "incorrect phy-mode\n");
2905
+ goto free_netdev;
2906
+ }
2907
+
2908
+ /* mac config is not set */
2909
+ mac->interface = PHY_INTERFACE_MODE_NA;
2910
+ mac->mode = MLO_AN_PHY;
2911
+ mac->speed = SPEED_UNKNOWN;
2912
+
2913
+ mac->phylink_config.dev = &eth->netdev[id]->dev;
2914
+ mac->phylink_config.type = PHYLINK_NETDEV;
2915
+
2916
+ phylink = phylink_create(&mac->phylink_config,
2917
+ of_fwnode_handle(mac->of_node),
2918
+ phy_mode, &mtk_phylink_ops);
2919
+ if (IS_ERR(phylink)) {
2920
+ err = PTR_ERR(phylink);
2921
+ goto free_netdev;
2922
+ }
2923
+
2924
+ mac->phylink = phylink;
2925
+
24382926 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
24392927 eth->netdev[id]->watchdog_timeo = 5 * HZ;
24402928 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
24412929 eth->netdev[id]->base_addr = (unsigned long)eth->base;
24422930
2443
- eth->netdev[id]->hw_features = MTK_HW_FEATURES;
2931
+ eth->netdev[id]->hw_features = eth->soc->hw_features;
24442932 if (eth->hwlro)
24452933 eth->netdev[id]->hw_features |= NETIF_F_LRO;
24462934
2447
- eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
2935
+ eth->netdev[id]->vlan_features = eth->soc->hw_features &
24482936 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
2449
- eth->netdev[id]->features |= MTK_HW_FEATURES;
2937
+ eth->netdev[id]->features |= eth->soc->hw_features;
24502938 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
24512939
24522940 eth->netdev[id]->irq = eth->irq[0];
....@@ -2463,11 +2951,9 @@
24632951
24642952 static int mtk_probe(struct platform_device *pdev)
24652953 {
2466
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
24672954 struct device_node *mac_np;
24682955 struct mtk_eth *eth;
2469
- int err;
2470
- int i;
2956
+ int err, i;
24712957
24722958 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
24732959 if (!eth)
....@@ -2476,29 +2962,58 @@
24762962 eth->soc = of_device_get_match_data(&pdev->dev);
24772963
24782964 eth->dev = &pdev->dev;
2479
- eth->base = devm_ioremap_resource(&pdev->dev, res);
2965
+ eth->base = devm_platform_ioremap_resource(pdev, 0);
24802966 if (IS_ERR(eth->base))
24812967 return PTR_ERR(eth->base);
2968
+
2969
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2970
+ eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
2971
+ eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
2972
+ } else {
2973
+ eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
2974
+ eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
2975
+ }
2976
+
2977
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2978
+ eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
2979
+ eth->ip_align = NET_IP_ALIGN;
2980
+ } else {
2981
+ eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
2982
+ }
24822983
24832984 spin_lock_init(&eth->page_lock);
24842985 spin_lock_init(&eth->tx_irq_lock);
24852986 spin_lock_init(&eth->rx_irq_lock);
24862987
2487
- eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2488
- "mediatek,ethsys");
2489
- if (IS_ERR(eth->ethsys)) {
2490
- dev_err(&pdev->dev, "no ethsys regmap found\n");
2491
- return PTR_ERR(eth->ethsys);
2988
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2989
+ eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2990
+ "mediatek,ethsys");
2991
+ if (IS_ERR(eth->ethsys)) {
2992
+ dev_err(&pdev->dev, "no ethsys regmap found\n");
2993
+ return PTR_ERR(eth->ethsys);
2994
+ }
2995
+ }
2996
+
2997
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
2998
+ eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2999
+ "mediatek,infracfg");
3000
+ if (IS_ERR(eth->infra)) {
3001
+ dev_err(&pdev->dev, "no infracfg regmap found\n");
3002
+ return PTR_ERR(eth->infra);
3003
+ }
24923004 }
24933005
24943006 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
2495
- eth->sgmiisys =
2496
- syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2497
- "mediatek,sgmiisys");
2498
- if (IS_ERR(eth->sgmiisys)) {
2499
- dev_err(&pdev->dev, "no sgmiisys regmap found\n");
2500
- return PTR_ERR(eth->sgmiisys);
2501
- }
3007
+ eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
3008
+ GFP_KERNEL);
3009
+ if (!eth->sgmii)
3010
+ return -ENOMEM;
3011
+
3012
+ err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
3013
+ eth->soc->ana_rgc3);
3014
+
3015
+ if (err)
3016
+ return err;
25023017 }
25033018
25043019 if (eth->soc->required_pctl) {
....@@ -2511,7 +3026,10 @@
25113026 }
25123027
25133028 for (i = 0; i < 3; i++) {
2514
- eth->irq[i] = platform_get_irq(pdev, i);
3029
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
3030
+ eth->irq[i] = eth->irq[0];
3031
+ else
3032
+ eth->irq[i] = platform_get_irq(pdev, i);
25153033 if (eth->irq[i] < 0) {
25163034 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
25173035 return -ENXIO;
....@@ -2550,23 +3068,36 @@
25503068 continue;
25513069
25523070 err = mtk_add_mac(eth, mac_np);
2553
- if (err)
3071
+ if (err) {
3072
+ of_node_put(mac_np);
25543073 goto err_deinit_hw;
3074
+ }
25553075 }
25563076
2557
- err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
2558
- dev_name(eth->dev), eth);
3077
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
3078
+ err = devm_request_irq(eth->dev, eth->irq[0],
3079
+ mtk_handle_irq, 0,
3080
+ dev_name(eth->dev), eth);
3081
+ } else {
3082
+ err = devm_request_irq(eth->dev, eth->irq[1],
3083
+ mtk_handle_irq_tx, 0,
3084
+ dev_name(eth->dev), eth);
3085
+ if (err)
3086
+ goto err_free_dev;
3087
+
3088
+ err = devm_request_irq(eth->dev, eth->irq[2],
3089
+ mtk_handle_irq_rx, 0,
3090
+ dev_name(eth->dev), eth);
3091
+ }
25593092 if (err)
25603093 goto err_free_dev;
25613094
2562
- err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
2563
- dev_name(eth->dev), eth);
2564
- if (err)
2565
- goto err_free_dev;
2566
-
2567
- err = mtk_mdio_init(eth);
2568
- if (err)
2569
- goto err_free_dev;
3095
+ /* No MT7628/88 support yet */
3096
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3097
+ err = mtk_mdio_init(eth);
3098
+ if (err)
3099
+ goto err_free_dev;
3100
+ }
25703101
25713102 for (i = 0; i < MTK_MAX_DEVS; i++) {
25723103 if (!eth->netdev[i])
....@@ -2608,6 +3139,7 @@
26083139 static int mtk_remove(struct platform_device *pdev)
26093140 {
26103141 struct mtk_eth *eth = platform_get_drvdata(pdev);
3142
+ struct mtk_mac *mac;
26113143 int i;
26123144
26133145 /* stop all devices to make sure that dma is properly shut down */
....@@ -2615,6 +3147,8 @@
26153147 if (!eth->netdev[i])
26163148 continue;
26173149 mtk_stop(eth->netdev[i]);
3150
+ mac = netdev_priv(eth->netdev[i]);
3151
+ phylink_disconnect_phy(mac->phylink);
26183152 }
26193153
26203154 mtk_hw_deinit(eth);
....@@ -2628,27 +3162,56 @@
26283162 }
26293163
26303164 static const struct mtk_soc_data mt2701_data = {
2631
- .caps = MTK_GMAC1_TRGMII | MTK_HWLRO,
3165
+ .caps = MT7623_CAPS | MTK_HWLRO,
3166
+ .hw_features = MTK_HW_FEATURES,
26323167 .required_clks = MT7623_CLKS_BITMAP,
26333168 .required_pctl = true,
26343169 };
26353170
3171
+static const struct mtk_soc_data mt7621_data = {
3172
+ .caps = MT7621_CAPS,
3173
+ .hw_features = MTK_HW_FEATURES,
3174
+ .required_clks = MT7621_CLKS_BITMAP,
3175
+ .required_pctl = false,
3176
+};
3177
+
26363178 static const struct mtk_soc_data mt7622_data = {
2637
- .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW | MTK_HWLRO,
3179
+ .ana_rgc3 = 0x2028,
3180
+ .caps = MT7622_CAPS | MTK_HWLRO,
3181
+ .hw_features = MTK_HW_FEATURES,
26383182 .required_clks = MT7622_CLKS_BITMAP,
26393183 .required_pctl = false,
26403184 };
26413185
26423186 static const struct mtk_soc_data mt7623_data = {
2643
- .caps = MTK_GMAC1_TRGMII | MTK_HWLRO,
3187
+ .caps = MT7623_CAPS | MTK_HWLRO,
3188
+ .hw_features = MTK_HW_FEATURES,
26443189 .required_clks = MT7623_CLKS_BITMAP,
26453190 .required_pctl = true,
26463191 };
26473192
3193
+static const struct mtk_soc_data mt7629_data = {
3194
+ .ana_rgc3 = 0x128,
3195
+ .caps = MT7629_CAPS | MTK_HWLRO,
3196
+ .hw_features = MTK_HW_FEATURES,
3197
+ .required_clks = MT7629_CLKS_BITMAP,
3198
+ .required_pctl = false,
3199
+};
3200
+
3201
+static const struct mtk_soc_data rt5350_data = {
3202
+ .caps = MT7628_CAPS,
3203
+ .hw_features = MTK_HW_FEATURES_MT7628,
3204
+ .required_clks = MT7628_CLKS_BITMAP,
3205
+ .required_pctl = false,
3206
+};
3207
+
26483208 const struct of_device_id of_mtk_match[] = {
26493209 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
3210
+ { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
26503211 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
26513212 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
3213
+ { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
3214
+ { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
26523215 {},
26533216 };
26543217 MODULE_DEVICE_TABLE(of, of_mtk_match);