hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
....@@ -1,20 +1,10 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*******************************************************************************
23 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
34 ST Ethernet IPs are built around a Synopsys IP Core.
45
56 Copyright(C) 2007-2011 STMicroelectronics Ltd
67
7
- This program is free software; you can redistribute it and/or modify it
8
- under the terms and conditions of the GNU General Public License,
9
- version 2, as published by the Free Software Foundation.
10
-
11
- This program is distributed in the hope it will be useful, but WITHOUT
12
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14
- more details.
15
-
16
- The full GNU General Public License is included in this distribution in
17
- the file called "COPYING".
188
199 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
2010
....@@ -38,6 +28,7 @@
3828 #include <linux/if_vlan.h>
3929 #include <linux/dma-mapping.h>
4030 #include <linux/slab.h>
31
+#include <linux/pm_runtime.h>
4132 #include <linux/prefetch.h>
4233 #include <linux/pinctrl/consumer.h>
4334 #ifdef CONFIG_DEBUG_FS
....@@ -45,6 +36,7 @@
4536 #include <linux/seq_file.h>
4637 #endif /* CONFIG_DEBUG_FS */
4738 #include <linux/net_tstamp.h>
39
+#include <linux/phylink.h>
4840 #include <linux/udp.h>
4941 #include <net/pkt_cls.h>
5042 #include "stmmac_ptp.h"
....@@ -54,6 +46,13 @@
5446 #include "dwmac1000.h"
5547 #include "dwxgmac2.h"
5648 #include "hwif.h"
49
+
50
+/* As long as the interface is active, we keep the timestamping counter enabled
51
+ * with fine resolution and binary rollover. This avoid non-monotonic behavior
52
+ * (clock jumps) when changing timestamping settings at runtime.
53
+ */
54
+#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
55
+ PTP_TCR_TSCTRLSSR)
5756
5857 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
5958 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
....@@ -72,10 +71,10 @@
7271 module_param(phyaddr, int, 0444);
7372 MODULE_PARM_DESC(phyaddr, "Physical device address");
7473
75
-#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
76
-#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
74
+#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4)
75
+#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4)
7776
78
-static int flow_ctrl = FLOW_OFF;
77
+static int flow_ctrl = FLOW_AUTO;
7978 module_param(flow_ctrl, int, 0644);
8079 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
8180
....@@ -103,7 +102,7 @@
103102 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
104103 module_param(eee_timer, int, 0644);
105104 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
106
-#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
105
+#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
107106
108107 /* By default the driver will use the ring mode to manage tx and rx descriptors,
109108 * but allow user to force to use the chain instead of the ring
....@@ -115,11 +114,34 @@
115114 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
116115
117116 #ifdef CONFIG_DEBUG_FS
118
-static int stmmac_init_fs(struct net_device *dev);
117
+static const struct net_device_ops stmmac_netdev_ops;
118
+static void stmmac_init_fs(struct net_device *dev);
119119 static void stmmac_exit_fs(struct net_device *dev);
120120 #endif
121121
122122 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
123
+
124
+int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
125
+{
126
+ int ret = 0;
127
+
128
+ if (enabled) {
129
+ ret = clk_prepare_enable(priv->plat->stmmac_clk);
130
+ if (ret)
131
+ return ret;
132
+ ret = clk_prepare_enable(priv->plat->pclk);
133
+ if (ret) {
134
+ clk_disable_unprepare(priv->plat->stmmac_clk);
135
+ return ret;
136
+ }
137
+ } else {
138
+ clk_disable_unprepare(priv->plat->stmmac_clk);
139
+ clk_disable_unprepare(priv->plat->pclk);
140
+ }
141
+
142
+ return ret;
143
+}
144
+EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
123145
124146 /**
125147 * stmmac_verify_args - verify the driver parameters.
....@@ -156,7 +178,10 @@
156178 for (queue = 0; queue < maxq; queue++) {
157179 struct stmmac_channel *ch = &priv->channel[queue];
158180
159
- napi_disable(&ch->napi);
181
+ if (queue < rx_queues_cnt)
182
+ napi_disable(&ch->rx_napi);
183
+ if (queue < tx_queues_cnt)
184
+ napi_disable(&ch->tx_napi);
160185 }
161186 }
162187
....@@ -174,7 +199,10 @@
174199 for (queue = 0; queue < maxq; queue++) {
175200 struct stmmac_channel *ch = &priv->channel[queue];
176201
177
- napi_enable(&ch->napi);
202
+ if (queue < rx_queues_cnt)
203
+ napi_enable(&ch->rx_napi);
204
+ if (queue < tx_queues_cnt)
205
+ napi_enable(&ch->tx_napi);
178206 }
179207 }
180208
....@@ -273,7 +301,7 @@
273301 if (tx_q->dirty_tx > tx_q->cur_tx)
274302 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
275303 else
276
- avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
304
+ avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
277305
278306 return avail;
279307 }
....@@ -291,24 +319,9 @@
291319 if (rx_q->dirty_rx <= rx_q->cur_rx)
292320 dirty = rx_q->cur_rx - rx_q->dirty_rx;
293321 else
294
- dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
322
+ dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
295323
296324 return dirty;
297
-}
298
-
299
-/**
300
- * stmmac_hw_fix_mac_speed - callback for speed selection
301
- * @priv: driver private structure
302
- * Description: on some platforms (e.g. ST), some HW system configuration
303
- * registers have to be set according to the link speed negotiated.
304
- */
305
-static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
306
-{
307
- struct net_device *ndev = priv->dev;
308
- struct phy_device *phydev = ndev->phydev;
309
-
310
- if (likely(priv->plat->fix_mac_speed))
311
- priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
312325 }
313326
314327 /**
....@@ -351,7 +364,7 @@
351364
352365 /**
353366 * stmmac_eee_ctrl_timer - EEE TX SW timer.
354
- * @arg : data hook
367
+ * @t: timer_list struct containing private info
355368 * Description:
356369 * if there is no data transfer and if we are not in LPI state,
357370 * then MAC Transmitter can be moved to LPI state.
....@@ -361,7 +374,7 @@
361374 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
362375
363376 stmmac_enable_eee_mode(priv);
364
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
377
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
365378 }
366379
367380 /**
....@@ -374,67 +387,43 @@
374387 */
375388 bool stmmac_eee_init(struct stmmac_priv *priv)
376389 {
377
- struct net_device *ndev = priv->dev;
378
- int interface = priv->plat->interface;
379
- bool ret = false;
380
-
381
- if ((interface != PHY_INTERFACE_MODE_MII) &&
382
- (interface != PHY_INTERFACE_MODE_GMII) &&
383
- !phy_interface_mode_is_rgmii(interface))
384
- goto out;
390
+ int eee_tw_timer = priv->eee_tw_timer;
385391
386392 /* Using PCS we cannot dial with the phy registers at this stage
387393 * so we do not support extra feature like EEE.
388394 */
389
- if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
390
- (priv->hw->pcs == STMMAC_PCS_TBI) ||
391
- (priv->hw->pcs == STMMAC_PCS_RTBI))
392
- goto out;
395
+ if (priv->hw->pcs == STMMAC_PCS_TBI ||
396
+ priv->hw->pcs == STMMAC_PCS_RTBI)
397
+ return false;
393398
394
- /* MAC core supports the EEE feature. */
395
- if (priv->dma_cap.eee) {
396
- int tx_lpi_timer = priv->tx_lpi_timer;
399
+ /* Check if MAC core supports the EEE feature. */
400
+ if (!priv->dma_cap.eee)
401
+ return false;
397402
398
- /* Check if the PHY supports EEE */
399
- if (phy_init_eee(ndev->phydev, 1)) {
400
- /* To manage at run-time if the EEE cannot be supported
401
- * anymore (for example because the lp caps have been
402
- * changed).
403
- * In that case the driver disable own timers.
404
- */
405
- mutex_lock(&priv->lock);
406
- if (priv->eee_active) {
407
- netdev_dbg(priv->dev, "disable EEE\n");
408
- del_timer_sync(&priv->eee_ctrl_timer);
409
- stmmac_set_eee_timer(priv, priv->hw, 0,
410
- tx_lpi_timer);
411
- }
412
- priv->eee_active = 0;
413
- mutex_unlock(&priv->lock);
414
- goto out;
403
+ mutex_lock(&priv->lock);
404
+
405
+ /* Check if it needs to be deactivated */
406
+ if (!priv->eee_active) {
407
+ if (priv->eee_enabled) {
408
+ netdev_dbg(priv->dev, "disable EEE\n");
409
+ del_timer_sync(&priv->eee_ctrl_timer);
410
+ stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
415411 }
416
- /* Activate the EEE and start timers */
417
- mutex_lock(&priv->lock);
418
- if (!priv->eee_active) {
419
- priv->eee_active = 1;
420
- timer_setup(&priv->eee_ctrl_timer,
421
- stmmac_eee_ctrl_timer, 0);
422
- mod_timer(&priv->eee_ctrl_timer,
423
- STMMAC_LPI_T(eee_timer));
424
-
425
- stmmac_set_eee_timer(priv, priv->hw,
426
- STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
427
- }
428
- /* Set HW EEE according to the speed */
429
- stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
430
-
431
- ret = true;
432412 mutex_unlock(&priv->lock);
433
-
434
- netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
413
+ return false;
435414 }
436
-out:
437
- return ret;
415
+
416
+ if (priv->eee_active && !priv->eee_enabled) {
417
+ timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
418
+ stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
419
+ eee_tw_timer);
420
+ }
421
+
422
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
423
+
424
+ mutex_unlock(&priv->lock);
425
+ netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
426
+ return true;
438427 }
439428
440429 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
....@@ -449,6 +438,7 @@
449438 struct dma_desc *p, struct sk_buff *skb)
450439 {
451440 struct skb_shared_hwtstamps shhwtstamp;
441
+ bool found = false;
452442 u64 ns = 0;
453443
454444 if (!priv->hwts_tx_en)
....@@ -460,9 +450,13 @@
460450
461451 /* check tx tstamp status */
462452 if (stmmac_get_tx_timestamp_status(priv, p)) {
463
- /* get the valid tstamp */
464453 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
454
+ found = true;
455
+ } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
456
+ found = true;
457
+ }
465458
459
+ if (found) {
466460 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
467461 shhwtstamp.hwtstamp = ns_to_ktime(ns);
468462
....@@ -470,8 +464,6 @@
470464 /* pass tstamp to stack */
471465 skb_tstamp_tx(skb, &shhwtstamp);
472466 }
473
-
474
- return;
475467 }
476468
477469 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
....@@ -508,7 +500,6 @@
508500 }
509501 }
510502
511
-#ifdef CONFIG_STMMAC_PTP
512503 /**
513504 * stmmac_hwtstamp_set - control hardware timestamping.
514505 * @dev: device pointer.
....@@ -524,8 +515,6 @@
524515 {
525516 struct stmmac_priv *priv = netdev_priv(dev);
526517 struct hwtstamp_config config;
527
- struct timespec64 now;
528
- u64 temp = 0;
529518 u32 ptp_v2 = 0;
530519 u32 tstamp_all = 0;
531520 u32 ptp_over_ipv4_udp = 0;
....@@ -534,11 +523,6 @@
534523 u32 snap_type_sel = 0;
535524 u32 ts_master_en = 0;
536525 u32 ts_event_en = 0;
537
- u32 sec_inc = 0;
538
- u32 value = 0;
539
- bool xmac;
540
-
541
- xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
542526
543527 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
544528 netdev_alert(priv->dev, "No support for HW time stamping\n");
....@@ -644,7 +628,8 @@
644628 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
645629 ptp_v2 = PTP_TCR_TSVER2ENA;
646630 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
647
- ts_event_en = PTP_TCR_TSEVNTENA;
631
+ if (priv->synopsys_id < DWMAC_CORE_4_10)
632
+ ts_event_en = PTP_TCR_TSEVNTENA;
648633 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
649634 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
650635 ptp_over_ethernet = PTP_TCR_TSIPENA;
....@@ -699,41 +684,16 @@
699684 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
700685 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
701686
702
- if (!priv->hwts_tx_en && !priv->hwts_rx_en)
703
- stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
704
- else {
705
- value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
706
- tstamp_all | ptp_v2 | ptp_over_ethernet |
707
- ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
708
- ts_master_en | snap_type_sel);
709
- stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
687
+ priv->systime_flags = STMMAC_HWTS_ACTIVE;
710688
711
- /* program Sub Second Increment reg */
712
- stmmac_config_sub_second_increment(priv,
713
- priv->ptpaddr, priv->plat->clk_ptp_rate,
714
- xmac, &sec_inc);
715
- temp = div_u64(1000000000ULL, sec_inc);
716
-
717
- /* Store sub second increment and flags for later use */
718
- priv->sub_second_inc = sec_inc;
719
- priv->systime_flags = value;
720
-
721
- /* calculate default added value:
722
- * formula is :
723
- * addend = (2^32)/freq_div_ratio;
724
- * where, freq_div_ratio = 1e9ns/sec_inc
725
- */
726
- temp = (u64)(temp << 32);
727
- priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
728
- stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
729
-
730
- /* initialize system time */
731
- ktime_get_real_ts64(&now);
732
-
733
- /* lower 32 bits of tv_sec are safe until y2106 */
734
- stmmac_init_systime(priv, priv->ptpaddr,
735
- (u32)now.tv_sec, now.tv_nsec);
689
+ if (priv->hwts_tx_en || priv->hwts_rx_en) {
690
+ priv->systime_flags |= tstamp_all | ptp_v2 |
691
+ ptp_over_ethernet | ptp_over_ipv6_udp |
692
+ ptp_over_ipv4_udp | ts_event_en |
693
+ ts_master_en | snap_type_sel;
736694 }
695
+
696
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
737697
738698 memcpy(&priv->tstamp_config, &config, sizeof(config));
739699
....@@ -748,7 +708,7 @@
748708 * a proprietary structure used to pass information to the driver.
749709 * Description:
750710 * This function obtain the current hardware timestamping settings
751
- as requested.
711
+ * as requested.
752712 */
753713 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
754714 {
....@@ -761,7 +721,57 @@
761721 return copy_to_user(ifr->ifr_data, config,
762722 sizeof(*config)) ? -EFAULT : 0;
763723 }
764
-#endif /* CONFIG_STMMAC_PTP */
724
+
725
+/**
726
+ * stmmac_init_tstamp_counter - init hardware timestamping counter
727
+ * @priv: driver private structure
728
+ * @systime_flags: timestamping flags
729
+ * Description:
730
+ * Initialize hardware counter for packet timestamping.
731
+ * This is valid as long as the interface is open and not suspended.
732
+ * Will be rerun after resuming from suspend, case in which the timestamping
733
+ * flags updated by stmmac_hwtstamp_set() also need to be restored.
734
+ */
735
+int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
736
+{
737
+ bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
738
+ struct timespec64 now;
739
+ u32 sec_inc = 0;
740
+ u64 temp = 0;
741
+
742
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
743
+ return -EOPNOTSUPP;
744
+
745
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
746
+ priv->systime_flags = systime_flags;
747
+
748
+ /* program Sub Second Increment reg */
749
+ stmmac_config_sub_second_increment(priv, priv->ptpaddr,
750
+ priv->plat->clk_ptp_rate,
751
+ xmac, &sec_inc);
752
+ temp = div_u64(1000000000ULL, sec_inc);
753
+
754
+ /* Store sub second increment for later use */
755
+ priv->sub_second_inc = sec_inc;
756
+
757
+ /* calculate default added value:
758
+ * formula is :
759
+ * addend = (2^32)/freq_div_ratio;
760
+ * where, freq_div_ratio = 1e9ns/sec_inc
761
+ */
762
+ temp = (u64)(temp << 32);
763
+ priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
764
+ stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
765
+
766
+ /* initialize system time */
767
+ ktime_get_real_ts64(&now);
768
+
769
+ /* lower 32 bits of tv_sec are safe until y2106 */
770
+ stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
771
+
772
+ return 0;
773
+}
774
+EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
765775
766776 /**
767777 * stmmac_init_ptp - init PTP
....@@ -773,9 +783,11 @@
773783 static int stmmac_init_ptp(struct stmmac_priv *priv)
774784 {
775785 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
786
+ int ret;
776787
777
- if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
778
- return -EOPNOTSUPP;
788
+ ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
789
+ if (ret)
790
+ return ret;
779791
780792 priv->adv_ts = 0;
781793 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
....@@ -795,121 +807,274 @@
795807 priv->hwts_tx_en = 0;
796808 priv->hwts_rx_en = 0;
797809
798
- stmmac_ptp_register(priv);
799
-
800810 return 0;
801811 }
802812
803813 static void stmmac_release_ptp(struct stmmac_priv *priv)
804814 {
805
- if (priv->plat->clk_ptp_ref && IS_ENABLED(CONFIG_STMMAC_PTP))
806
- clk_disable_unprepare(priv->plat->clk_ptp_ref);
815
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
807816 stmmac_ptp_unregister(priv);
808817 }
809818
810819 /**
811820 * stmmac_mac_flow_ctrl - Configure flow control in all queues
812821 * @priv: driver private structure
822
+ * @duplex: duplex passed to the next function
813823 * Description: It is used for configuring the flow control in all queues
814824 */
815825 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
816826 {
817827 u32 tx_cnt = priv->plat->tx_queues_to_use;
818828
819
- stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
820
- priv->pause, tx_cnt);
829
+ stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl & priv->plat->flow_ctrl,
830
+ priv->pause, tx_cnt);
821831 }
822832
823
-/**
824
- * stmmac_adjust_link - adjusts the link parameters
825
- * @dev: net device structure
826
- * Description: this is the helper called by the physical abstraction layer
827
- * drivers to communicate the phy link status. According the speed and duplex
828
- * this driver can invoke registered glue-logic as well.
829
- * It also invoke the eee initialization because it could happen when switch
830
- * on different networks (that are eee capable).
831
- */
832
-static void stmmac_adjust_link(struct net_device *dev)
833
+static void stmmac_validate(struct phylink_config *config,
834
+ unsigned long *supported,
835
+ struct phylink_link_state *state)
833836 {
834
- struct stmmac_priv *priv = netdev_priv(dev);
835
- struct phy_device *phydev = dev->phydev;
836
- bool new_state = false;
837
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
838
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
839
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
840
+ int tx_cnt = priv->plat->tx_queues_to_use;
841
+ int max_speed = priv->plat->max_speed;
837842
838
- if (!phydev)
839
- return;
843
+ phylink_set(mac_supported, 10baseT_Half);
844
+ phylink_set(mac_supported, 10baseT_Full);
845
+ phylink_set(mac_supported, 100baseT_Half);
846
+ phylink_set(mac_supported, 100baseT_Full);
847
+ phylink_set(mac_supported, 1000baseT_Half);
848
+ phylink_set(mac_supported, 1000baseT_Full);
849
+ phylink_set(mac_supported, 1000baseKX_Full);
850
+ phylink_set(mac_supported, 100baseT1_Full);
851
+ phylink_set(mac_supported, 1000baseT1_Full);
840852
841
- mutex_lock(&priv->lock);
853
+ phylink_set(mac_supported, Autoneg);
854
+ phylink_set(mac_supported, Pause);
855
+ phylink_set(mac_supported, Asym_Pause);
856
+ phylink_set_port_modes(mac_supported);
842857
843
- if (phydev->link) {
844
- u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
845
-
846
- /* Now we make sure that we can be in full duplex mode.
847
- * If not, we operate in half-duplex mode. */
848
- if (phydev->duplex != priv->oldduplex) {
849
- new_state = true;
850
- if (!phydev->duplex)
851
- ctrl &= ~priv->hw->link.duplex;
852
- else
853
- ctrl |= priv->hw->link.duplex;
854
- priv->oldduplex = phydev->duplex;
858
+ /* Cut down 1G if asked to */
859
+ if ((max_speed > 0) && (max_speed < 1000)) {
860
+ phylink_set(mask, 1000baseT_Full);
861
+ phylink_set(mask, 1000baseX_Full);
862
+ } else if (priv->plat->has_xgmac) {
863
+ if (!max_speed || (max_speed >= 2500)) {
864
+ phylink_set(mac_supported, 2500baseT_Full);
865
+ phylink_set(mac_supported, 2500baseX_Full);
855866 }
856
- /* Flow Control operation */
857
- if (phydev->pause)
858
- stmmac_mac_flow_ctrl(priv, phydev->duplex);
859
-
860
- if (phydev->speed != priv->speed) {
861
- new_state = true;
862
- ctrl &= ~priv->hw->link.speed_mask;
863
- switch (phydev->speed) {
864
- case SPEED_1000:
865
- ctrl |= priv->hw->link.speed1000;
866
- break;
867
- case SPEED_100:
868
- ctrl |= priv->hw->link.speed100;
869
- break;
870
- case SPEED_10:
871
- ctrl |= priv->hw->link.speed10;
872
- break;
873
- default:
874
- netif_warn(priv, link, priv->dev,
875
- "broken speed: %d\n", phydev->speed);
876
- phydev->speed = SPEED_UNKNOWN;
877
- break;
878
- }
879
- if (phydev->speed != SPEED_UNKNOWN)
880
- stmmac_hw_fix_mac_speed(priv);
881
- priv->speed = phydev->speed;
867
+ if (!max_speed || (max_speed >= 5000)) {
868
+ phylink_set(mac_supported, 5000baseT_Full);
882869 }
883
-
884
- writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
885
-
886
- if (!priv->oldlink) {
887
- new_state = true;
888
- priv->oldlink = true;
870
+ if (!max_speed || (max_speed >= 10000)) {
871
+ phylink_set(mac_supported, 10000baseSR_Full);
872
+ phylink_set(mac_supported, 10000baseLR_Full);
873
+ phylink_set(mac_supported, 10000baseER_Full);
874
+ phylink_set(mac_supported, 10000baseLRM_Full);
875
+ phylink_set(mac_supported, 10000baseT_Full);
876
+ phylink_set(mac_supported, 10000baseKX4_Full);
877
+ phylink_set(mac_supported, 10000baseKR_Full);
889878 }
890
- } else if (priv->oldlink) {
891
- new_state = true;
892
- priv->oldlink = false;
893
- priv->speed = SPEED_UNKNOWN;
894
- priv->oldduplex = DUPLEX_UNKNOWN;
879
+ if (!max_speed || (max_speed >= 25000)) {
880
+ phylink_set(mac_supported, 25000baseCR_Full);
881
+ phylink_set(mac_supported, 25000baseKR_Full);
882
+ phylink_set(mac_supported, 25000baseSR_Full);
883
+ }
884
+ if (!max_speed || (max_speed >= 40000)) {
885
+ phylink_set(mac_supported, 40000baseKR4_Full);
886
+ phylink_set(mac_supported, 40000baseCR4_Full);
887
+ phylink_set(mac_supported, 40000baseSR4_Full);
888
+ phylink_set(mac_supported, 40000baseLR4_Full);
889
+ }
890
+ if (!max_speed || (max_speed >= 50000)) {
891
+ phylink_set(mac_supported, 50000baseCR2_Full);
892
+ phylink_set(mac_supported, 50000baseKR2_Full);
893
+ phylink_set(mac_supported, 50000baseSR2_Full);
894
+ phylink_set(mac_supported, 50000baseKR_Full);
895
+ phylink_set(mac_supported, 50000baseSR_Full);
896
+ phylink_set(mac_supported, 50000baseCR_Full);
897
+ phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
898
+ phylink_set(mac_supported, 50000baseDR_Full);
899
+ }
900
+ if (!max_speed || (max_speed >= 100000)) {
901
+ phylink_set(mac_supported, 100000baseKR4_Full);
902
+ phylink_set(mac_supported, 100000baseSR4_Full);
903
+ phylink_set(mac_supported, 100000baseCR4_Full);
904
+ phylink_set(mac_supported, 100000baseLR4_ER4_Full);
905
+ phylink_set(mac_supported, 100000baseKR2_Full);
906
+ phylink_set(mac_supported, 100000baseSR2_Full);
907
+ phylink_set(mac_supported, 100000baseCR2_Full);
908
+ phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
909
+ phylink_set(mac_supported, 100000baseDR2_Full);
910
+ }
895911 }
896912
897
- if (new_state && netif_msg_link(priv))
898
- phy_print_status(phydev);
913
+ /* Half-Duplex can only work with single queue */
914
+ if (tx_cnt > 1) {
915
+ phylink_set(mask, 10baseT_Half);
916
+ phylink_set(mask, 100baseT_Half);
917
+ phylink_set(mask, 1000baseT_Half);
918
+ }
899919
900
- mutex_unlock(&priv->lock);
920
+ linkmode_and(supported, supported, mac_supported);
921
+ linkmode_andnot(supported, supported, mask);
901922
902
- if (phydev->is_pseudo_fixed_link)
903
- /* Stop PHY layer to call the hook to adjust the link in case
904
- * of a switch is attached to the stmmac driver.
905
- */
906
- phydev->irq = PHY_IGNORE_INTERRUPT;
907
- else
908
- /* At this stage, init the EEE if supported.
909
- * Never called in case of fixed_link.
910
- */
911
- priv->eee_enabled = stmmac_eee_init(priv);
923
+ linkmode_and(state->advertising, state->advertising, mac_supported);
924
+ linkmode_andnot(state->advertising, state->advertising, mask);
925
+
926
+ /* If PCS is supported, check which modes it supports. */
927
+ stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state);
912928 }
929
+
930
+static void stmmac_mac_pcs_get_state(struct phylink_config *config,
931
+ struct phylink_link_state *state)
932
+{
933
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
934
+
935
+ state->link = 0;
936
+ stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state);
937
+}
938
+
939
+static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
940
+ const struct phylink_link_state *state)
941
+{
942
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943
+
944
+ stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state);
945
+}
946
+
947
+static void stmmac_mac_an_restart(struct phylink_config *config)
948
+{
949
+ /* Not Supported */
950
+}
951
+
952
+static void stmmac_mac_link_down(struct phylink_config *config,
953
+ unsigned int mode, phy_interface_t interface)
954
+{
955
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
956
+
957
+ stmmac_mac_set(priv, priv->ioaddr, false);
958
+ priv->eee_active = false;
959
+ priv->tx_lpi_enabled = false;
960
+ stmmac_eee_init(priv);
961
+ stmmac_set_eee_pls(priv, priv->hw, false);
962
+}
963
+
964
+static void stmmac_mac_link_up(struct phylink_config *config,
965
+ struct phy_device *phy,
966
+ unsigned int mode, phy_interface_t interface,
967
+ int speed, int duplex,
968
+ bool tx_pause, bool rx_pause)
969
+{
970
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
971
+ u32 ctrl;
972
+
973
+ stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface);
974
+
975
+ ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
976
+ ctrl &= ~priv->hw->link.speed_mask;
977
+
978
+ if (interface == PHY_INTERFACE_MODE_USXGMII) {
979
+ switch (speed) {
980
+ case SPEED_10000:
981
+ ctrl |= priv->hw->link.xgmii.speed10000;
982
+ break;
983
+ case SPEED_5000:
984
+ ctrl |= priv->hw->link.xgmii.speed5000;
985
+ break;
986
+ case SPEED_2500:
987
+ ctrl |= priv->hw->link.xgmii.speed2500;
988
+ break;
989
+ default:
990
+ return;
991
+ }
992
+ } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
993
+ switch (speed) {
994
+ case SPEED_100000:
995
+ ctrl |= priv->hw->link.xlgmii.speed100000;
996
+ break;
997
+ case SPEED_50000:
998
+ ctrl |= priv->hw->link.xlgmii.speed50000;
999
+ break;
1000
+ case SPEED_40000:
1001
+ ctrl |= priv->hw->link.xlgmii.speed40000;
1002
+ break;
1003
+ case SPEED_25000:
1004
+ ctrl |= priv->hw->link.xlgmii.speed25000;
1005
+ break;
1006
+ case SPEED_10000:
1007
+ ctrl |= priv->hw->link.xgmii.speed10000;
1008
+ break;
1009
+ case SPEED_2500:
1010
+ ctrl |= priv->hw->link.speed2500;
1011
+ break;
1012
+ case SPEED_1000:
1013
+ ctrl |= priv->hw->link.speed1000;
1014
+ break;
1015
+ default:
1016
+ return;
1017
+ }
1018
+ } else {
1019
+ switch (speed) {
1020
+ case SPEED_2500:
1021
+ ctrl |= priv->hw->link.speed2500;
1022
+ break;
1023
+ case SPEED_1000:
1024
+ ctrl |= priv->hw->link.speed1000;
1025
+ break;
1026
+ case SPEED_100:
1027
+ ctrl |= priv->hw->link.speed100;
1028
+ break;
1029
+ case SPEED_10:
1030
+ ctrl |= priv->hw->link.speed10;
1031
+ break;
1032
+ default:
1033
+ return;
1034
+ }
1035
+ }
1036
+
1037
+ priv->speed = speed;
1038
+
1039
+ if (priv->plat->fix_mac_speed)
1040
+ priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1041
+
1042
+ if (!duplex)
1043
+ ctrl &= ~priv->hw->link.duplex;
1044
+ else
1045
+ ctrl |= priv->hw->link.duplex;
1046
+
1047
+ /* Flow Control operation */
1048
+ if (rx_pause && tx_pause)
1049
+ priv->flow_ctrl = FLOW_AUTO;
1050
+ else if (rx_pause && !tx_pause)
1051
+ priv->flow_ctrl = FLOW_RX;
1052
+ else if (!rx_pause && tx_pause)
1053
+ priv->flow_ctrl = FLOW_TX;
1054
+ else
1055
+ priv->flow_ctrl = FLOW_OFF;
1056
+
1057
+ stmmac_mac_flow_ctrl(priv, duplex);
1058
+
1059
+ writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1060
+
1061
+ stmmac_mac_set(priv, priv->ioaddr, true);
1062
+ if (phy && priv->dma_cap.eee) {
1063
+ priv->eee_active = phy_init_eee(phy, 1) >= 0;
1064
+ priv->eee_enabled = stmmac_eee_init(priv);
1065
+ priv->tx_lpi_enabled = priv->eee_enabled;
1066
+ stmmac_set_eee_pls(priv, priv->hw, true);
1067
+ }
1068
+}
1069
+
1070
+static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1071
+ .validate = stmmac_validate,
1072
+ .mac_pcs_get_state = stmmac_mac_pcs_get_state,
1073
+ .mac_config = stmmac_mac_config,
1074
+ .mac_an_restart = stmmac_mac_an_restart,
1075
+ .mac_link_down = stmmac_mac_link_down,
1076
+ .mac_link_up = stmmac_mac_link_up,
1077
+};
9131078
9141079 /**
9151080 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
....@@ -936,6 +1101,55 @@
9361101 }
9371102 }
9381103
1104
+#if 0
1105
+static void rtl8211F_led_control(struct phy_device *phydev)
1106
+{
1107
+ printk("ben debug:rtl8211F_led_control...1 \n");
1108
+
1109
+ if(!phydev) return;
1110
+ if(phydev->phy_id!=0x001cc916) return; /* only for 8211E*/
1111
+
1112
+ /*switch to extension page44*/
1113
+ phy_write(phydev, 31, 0x0d04);
1114
+//add hc 1000M --> orange
1115
+// 100M --> green
1116
+ phy_write(phydev, 16, 0x6D02);
1117
+//add hc 1000M&100M --> green
1118
+// phy_write(phydev, 16, 0x6C0A);
1119
+ printk("ben debug:rtl8211F_led_control...2 \n");
1120
+}
1121
+#endif
1122
+#define RTL_8211F_PHY_ID 0x001cc916
1123
+#define RTL_8211F_PHY_ID_MASK 0x001fffff
1124
+#define RTL_8211F_PAGE_SELECT 0x1f
1125
+#define RTL_8211F_LCR_ADDR 0x10
1126
+
1127
+#define GREEN_LED 0
1128
+#define YELLOW0_LED 1
1129
+#define YELLOW1_LED 2
1130
+
1131
+static int rtl8211F_led_control(struct phy_device *phydev)
1132
+{
1133
+ unsigned int temp;
1134
+
1135
+ printk("<<<<<<ben test led ctrl start... %s\n",__FUNCTION__);
1136
+ if(!phydev) return 0;
1137
+ if(phydev->phy_id!=0x001cc916) return 0; /* only for 8211E*/
1138
+
1139
+ phy_write(phydev, 31, 0xd04);
1140
+ temp = 0x02 << (5 * GREEN_LED);
1141
+ temp |= 0x08 << (5 * YELLOW0_LED);
1142
+
1143
+ temp |= 0x1b << (5 * YELLOW1_LED);
1144
+ phy_write(phydev, 0x10, temp);
1145
+
1146
+ temp = 1 << (YELLOW1_LED + 1);
1147
+ phy_write(phydev, 0x11, 0x00);
1148
+ phy_write(phydev, 31, 0);
1149
+
1150
+ return 0;
1151
+}
1152
+
9391153 /**
9401154 * stmmac_init_phy - PHY initialization
9411155 * @dev: net device structure
....@@ -947,85 +1161,83 @@
9471161 static int stmmac_init_phy(struct net_device *dev)
9481162 {
9491163 struct stmmac_priv *priv = netdev_priv(dev);
950
- u32 tx_cnt = priv->plat->tx_queues_to_use;
951
- struct phy_device *phydev;
952
- char phy_id_fmt[MII_BUS_ID_SIZE + 3];
953
- char bus_id[MII_BUS_ID_SIZE];
954
- int interface = priv->plat->interface;
955
- int max_speed = priv->plat->max_speed;
956
- priv->oldlink = false;
957
- priv->speed = SPEED_UNKNOWN;
958
- priv->oldduplex = DUPLEX_UNKNOWN;
1164
+ struct device_node *node;
1165
+ int ret;
9591166
1167
+
1168
+ printk("ben stmmac_init_phy .. \n");
1169
+ mdelay(2000);
1170
+ printk("ben stmmac_init_phy delay .. \n");
9601171 if (priv->plat->integrated_phy_power)
961
- priv->plat->integrated_phy_power(priv->plat->bsp_priv, true);
1172
+ ret = priv->plat->integrated_phy_power(priv->plat->bsp_priv, true);
9621173
963
- if (priv->plat->phy_node) {
964
- phydev = of_phy_connect(dev, priv->plat->phy_node,
965
- &stmmac_adjust_link, 0, interface);
966
- } else {
967
- snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
968
- priv->plat->bus_id);
1174
+ node = priv->plat->phylink_node;
9691175
970
- snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
971
- priv->plat->phy_addr);
972
- netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
973
- phy_id_fmt);
1176
+ if (node)
1177
+ {
1178
+ //printk("ben ttt.. \n");
1179
+ ret = phylink_of_phy_connect(priv->phylink, node, 0);
1180
+ //printk("ben ttt:%d \n", ret);
1181
+ }
9741182
975
- phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
976
- interface);
977
- }
1183
+ /* Some DT bindings do not set-up the PHY handle. Let's try to
1184
+ * manually parse it
1185
+ */
1186
+ //printk("ben:stmmac_init_phy..1 \n");
1187
+ if (!node || ret) {
1188
+ //if (1) {
1189
+ int addr = priv->plat->phy_addr;
1190
+ struct phy_device *phydev;
9781191
979
- if (IS_ERR_OR_NULL(phydev)) {
980
- netdev_err(priv->dev, "Could not attach to PHY\n");
981
- if (!phydev)
1192
+ //printk("ben:stmmac_init_phy..2 \n");
1193
+ phydev = mdiobus_get_phy(priv->mii, addr);
1194
+ if (!phydev) {
1195
+ netdev_err(priv->dev, "no phy at addr %d\n", addr);
9821196 return -ENODEV;
1197
+ }
9831198
984
- return PTR_ERR(phydev);
1199
+ //rtl8211F_led_control(phydev);
1200
+
1201
+ //printk("ben:stmmac_init_phy..3 \n");
1202
+ ret = phylink_connect_phy(priv->phylink, phydev);
1203
+ //rtl8211F_led_control(phydev);
9851204 }
9861205
987
- /* Stop Advertising 1000BASE Capability if interface is not GMII */
988
- if ((interface == PHY_INTERFACE_MODE_MII) ||
989
- (interface == PHY_INTERFACE_MODE_RMII) ||
990
- (max_speed < 1000 && max_speed > 0))
991
- phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
992
- SUPPORTED_1000baseT_Full);
1206
+ if (!priv->plat->pmt) {
1207
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
9931208
994
- /*
995
- * Half-duplex mode not supported with multiqueue
996
- * half-duplex can only works with single queue
997
- */
998
- if (tx_cnt > 1)
999
- phydev->supported &= ~(SUPPORTED_1000baseT_Half |
1000
- SUPPORTED_100baseT_Half |
1001
- SUPPORTED_10baseT_Half);
1002
-
1003
- /*
1004
- * Broken HW is sometimes missing the pull-up resistor on the
1005
- * MDIO line, which results in reads to non-existent devices returning
1006
- * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
1007
- * device as well.
1008
- * Note: phydev->phy_id is the result of reading the UID PHY registers.
1009
- */
1010
- if (!priv->plat->phy_node && phydev->phy_id == 0) {
1011
- phy_disconnect(phydev);
1012
- return -ENODEV;
1209
+ phylink_ethtool_get_wol(priv->phylink, &wol);
1210
+ device_set_wakeup_capable(priv->device, !!wol.supported);
10131211 }
1212
+ return ret;
1213
+}
10141214
1015
- /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
1016
- * subsequent PHY polling, make sure we force a link transition if
1017
- * we have a UP/DOWN/UP transition
1018
- */
1019
- if (phydev->is_pseudo_fixed_link)
1020
- phydev->irq = PHY_POLL;
1215
+static int stmmac_phy_setup(struct stmmac_priv *priv)
1216
+{
1217
+ struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1218
+ int mode = priv->plat->phy_interface;
1219
+ struct phylink *phylink;
10211220
1022
- phy_attached_info(phydev);
1221
+ priv->phylink_config.dev = &priv->dev->dev;
1222
+ priv->phylink_config.type = PHYLINK_NETDEV;
1223
+ priv->phylink_config.pcs_poll = true;
1224
+
1225
+ if (!fwnode)
1226
+ fwnode = dev_fwnode(priv->device);
1227
+
1228
+ phylink = phylink_create(&priv->phylink_config, fwnode,
1229
+ mode, &stmmac_phylink_mac_ops);
1230
+ if (IS_ERR(phylink))
1231
+ return PTR_ERR(phylink);
1232
+
1233
+ priv->phylink = phylink;
10231234 return 0;
10241235 }
10251236
10261237 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
10271238 {
10281239 u32 rx_cnt = priv->plat->rx_queues_to_use;
1240
+ unsigned int desc_size;
10291241 void *head_rx;
10301242 u32 queue;
10311243
....@@ -1035,19 +1247,24 @@
10351247
10361248 pr_info("\tRX Queue %u rings\n", queue);
10371249
1038
- if (priv->extend_desc)
1250
+ if (priv->extend_desc) {
10391251 head_rx = (void *)rx_q->dma_erx;
1040
- else
1252
+ desc_size = sizeof(struct dma_extended_desc);
1253
+ } else {
10411254 head_rx = (void *)rx_q->dma_rx;
1255
+ desc_size = sizeof(struct dma_desc);
1256
+ }
10421257
10431258 /* Display RX ring */
1044
- stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1259
+ stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
1260
+ rx_q->dma_rx_phy, desc_size);
10451261 }
10461262 }
10471263
10481264 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
10491265 {
10501266 u32 tx_cnt = priv->plat->tx_queues_to_use;
1267
+ unsigned int desc_size;
10511268 void *head_tx;
10521269 u32 queue;
10531270
....@@ -1057,12 +1274,19 @@
10571274
10581275 pr_info("\tTX Queue %d rings\n", queue);
10591276
1060
- if (priv->extend_desc)
1277
+ if (priv->extend_desc) {
10611278 head_tx = (void *)tx_q->dma_etx;
1062
- else
1279
+ desc_size = sizeof(struct dma_extended_desc);
1280
+ } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1281
+ head_tx = (void *)tx_q->dma_entx;
1282
+ desc_size = sizeof(struct dma_edesc);
1283
+ } else {
10631284 head_tx = (void *)tx_q->dma_tx;
1285
+ desc_size = sizeof(struct dma_desc);
1286
+ }
10641287
1065
- stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1288
+ stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
1289
+ tx_q->dma_tx_phy, desc_size);
10661290 }
10671291 }
10681292
....@@ -1106,16 +1330,16 @@
11061330 int i;
11071331
11081332 /* Clear the RX descriptors */
1109
- for (i = 0; i < DMA_RX_SIZE; i++)
1333
+ for (i = 0; i < priv->dma_rx_size; i++)
11101334 if (priv->extend_desc)
11111335 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
11121336 priv->use_riwt, priv->mode,
1113
- (i == DMA_RX_SIZE - 1),
1337
+ (i == priv->dma_rx_size - 1),
11141338 priv->dma_buf_sz);
11151339 else
11161340 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
11171341 priv->use_riwt, priv->mode,
1118
- (i == DMA_RX_SIZE - 1),
1342
+ (i == priv->dma_rx_size - 1),
11191343 priv->dma_buf_sz);
11201344 }
11211345
....@@ -1132,13 +1356,19 @@
11321356 int i;
11331357
11341358 /* Clear the TX descriptors */
1135
- for (i = 0; i < DMA_TX_SIZE; i++)
1359
+ for (i = 0; i < priv->dma_tx_size; i++) {
1360
+ int last = (i == (priv->dma_tx_size - 1));
1361
+ struct dma_desc *p;
1362
+
11361363 if (priv->extend_desc)
1137
- stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1138
- priv->mode, (i == DMA_TX_SIZE - 1));
1364
+ p = &tx_q->dma_etx[i].basic;
1365
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1366
+ p = &tx_q->dma_entx[i].basic;
11391367 else
1140
- stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1141
- priv->mode, (i == DMA_TX_SIZE - 1));
1368
+ p = &tx_q->dma_tx[i];
1369
+
1370
+ stmmac_init_tx_desc(priv, p, priv->mode, last);
1371
+ }
11421372 }
11431373
11441374 /**
....@@ -1176,26 +1406,30 @@
11761406 int i, gfp_t flags, u32 queue)
11771407 {
11781408 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1179
- struct sk_buff *skb;
1409
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1410
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
11801411
1181
- skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1182
- if (!skb) {
1183
- netdev_err(priv->dev,
1184
- "%s: Rx init fails; skb is NULL\n", __func__);
1412
+ if (priv->dma_cap.addr64 <= 32)
1413
+ gfp |= GFP_DMA32;
1414
+
1415
+ buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1416
+ if (!buf->page)
11851417 return -ENOMEM;
1186
- }
1187
- rx_q->rx_skbuff[i] = skb;
1188
- rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1189
- priv->dma_buf_sz,
1190
- DMA_FROM_DEVICE);
1191
- if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1192
- netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1193
- dev_kfree_skb_any(skb);
1194
- return -EINVAL;
1418
+
1419
+ if (priv->sph) {
1420
+ buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1421
+ if (!buf->sec_page)
1422
+ return -ENOMEM;
1423
+
1424
+ buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1425
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1426
+ } else {
1427
+ buf->sec_page = NULL;
1428
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
11951429 }
11961430
1197
- stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1198
-
1431
+ buf->addr = page_pool_get_dma_addr(buf->page);
1432
+ stmmac_set_desc_addr(priv, p, buf->addr);
11991433 if (priv->dma_buf_sz == BUF_SIZE_16KiB)
12001434 stmmac_init_desc3(priv, p);
12011435
....@@ -1211,13 +1445,15 @@
12111445 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
12121446 {
12131447 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1448
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
12141449
1215
- if (rx_q->rx_skbuff[i]) {
1216
- dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1217
- priv->dma_buf_sz, DMA_FROM_DEVICE);
1218
- dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1219
- }
1220
- rx_q->rx_skbuff[i] = NULL;
1450
+ if (buf->page)
1451
+ page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1452
+ buf->page = NULL;
1453
+
1454
+ if (buf->sec_page)
1455
+ page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1456
+ buf->sec_page = NULL;
12211457 }
12221458
12231459 /**
....@@ -1264,18 +1500,8 @@
12641500 struct stmmac_priv *priv = netdev_priv(dev);
12651501 u32 rx_count = priv->plat->rx_queues_to_use;
12661502 int ret = -ENOMEM;
1267
- int bfsize = 0;
12681503 int queue;
12691504 int i;
1270
-
1271
- bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1272
- if (bfsize < 0)
1273
- bfsize = 0;
1274
-
1275
- if (bfsize < BUF_SIZE_16KiB)
1276
- bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1277
-
1278
- priv->dma_buf_sz = bfsize;
12791505
12801506 /* RX INITIALIZATION */
12811507 netif_dbg(priv, probe, priv->dev,
....@@ -1288,7 +1514,9 @@
12881514 "(%s) dma_rx_phy=0x%08x\n", __func__,
12891515 (u32)rx_q->dma_rx_phy);
12901516
1291
- for (i = 0; i < DMA_RX_SIZE; i++) {
1517
+ stmmac_clear_rx_descriptors(priv, queue);
1518
+
1519
+ for (i = 0; i < priv->dma_rx_size; i++) {
12921520 struct dma_desc *p;
12931521
12941522 if (priv->extend_desc)
....@@ -1300,29 +1528,23 @@
13001528 queue);
13011529 if (ret)
13021530 goto err_init_rx_buffers;
1303
-
1304
- netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1305
- rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1306
- (unsigned int)rx_q->rx_skbuff_dma[i]);
13071531 }
13081532
13091533 rx_q->cur_rx = 0;
1310
- rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1311
-
1312
- stmmac_clear_rx_descriptors(priv, queue);
1534
+ rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size);
13131535
13141536 /* Setup the chained descriptor addresses */
13151537 if (priv->mode == STMMAC_CHAIN_MODE) {
13161538 if (priv->extend_desc)
13171539 stmmac_mode_init(priv, rx_q->dma_erx,
1318
- rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1540
+ rx_q->dma_rx_phy,
1541
+ priv->dma_rx_size, 1);
13191542 else
13201543 stmmac_mode_init(priv, rx_q->dma_rx,
1321
- rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1544
+ rx_q->dma_rx_phy,
1545
+ priv->dma_rx_size, 0);
13221546 }
13231547 }
1324
-
1325
- buf_sz = bfsize;
13261548
13271549 return 0;
13281550
....@@ -1334,7 +1556,7 @@
13341556 if (queue == 0)
13351557 break;
13361558
1337
- i = DMA_RX_SIZE;
1559
+ i = priv->dma_rx_size;
13381560 queue--;
13391561 }
13401562
....@@ -1366,16 +1588,20 @@
13661588 if (priv->mode == STMMAC_CHAIN_MODE) {
13671589 if (priv->extend_desc)
13681590 stmmac_mode_init(priv, tx_q->dma_etx,
1369
- tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1370
- else
1591
+ tx_q->dma_tx_phy,
1592
+ priv->dma_tx_size, 1);
1593
+ else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
13711594 stmmac_mode_init(priv, tx_q->dma_tx,
1372
- tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1595
+ tx_q->dma_tx_phy,
1596
+ priv->dma_tx_size, 0);
13731597 }
13741598
1375
- for (i = 0; i < DMA_TX_SIZE; i++) {
1599
+ for (i = 0; i < priv->dma_tx_size; i++) {
13761600 struct dma_desc *p;
13771601 if (priv->extend_desc)
13781602 p = &((tx_q->dma_etx + i)->basic);
1603
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1604
+ p = &((tx_q->dma_entx + i)->basic);
13791605 else
13801606 p = tx_q->dma_tx + i;
13811607
....@@ -1434,7 +1660,7 @@
14341660 {
14351661 int i;
14361662
1437
- for (i = 0; i < DMA_RX_SIZE; i++)
1663
+ for (i = 0; i < priv->dma_rx_size; i++)
14381664 stmmac_free_rx_buffer(priv, queue, i);
14391665 }
14401666
....@@ -1447,7 +1673,7 @@
14471673 {
14481674 int i;
14491675
1450
- for (i = 0; i < DMA_TX_SIZE; i++)
1676
+ for (i = 0; i < priv->dma_tx_size; i++)
14511677 stmmac_free_tx_buffer(priv, queue, i);
14521678 }
14531679
....@@ -1482,16 +1708,17 @@
14821708
14831709 /* Free DMA regions of consistent memory previously allocated */
14841710 if (!priv->extend_desc)
1485
- dma_free_coherent(priv->device,
1486
- DMA_RX_SIZE * sizeof(struct dma_desc),
1711
+ dma_free_coherent(priv->device, priv->dma_rx_size *
1712
+ sizeof(struct dma_desc),
14871713 rx_q->dma_rx, rx_q->dma_rx_phy);
14881714 else
1489
- dma_free_coherent(priv->device, DMA_RX_SIZE *
1715
+ dma_free_coherent(priv->device, priv->dma_rx_size *
14901716 sizeof(struct dma_extended_desc),
14911717 rx_q->dma_erx, rx_q->dma_rx_phy);
14921718
1493
- kfree(rx_q->rx_skbuff_dma);
1494
- kfree(rx_q->rx_skbuff);
1719
+ kfree(rx_q->buf_pool);
1720
+ if (rx_q->page_pool)
1721
+ page_pool_destroy(rx_q->page_pool);
14951722 }
14961723 }
14971724
....@@ -1507,19 +1734,26 @@
15071734 /* Free TX queue resources */
15081735 for (queue = 0; queue < tx_count; queue++) {
15091736 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1737
+ size_t size;
1738
+ void *addr;
15101739
15111740 /* Release the DMA TX socket buffers */
15121741 dma_free_tx_skbufs(priv, queue);
15131742
1514
- /* Free DMA regions of consistent memory previously allocated */
1515
- if (!priv->extend_desc)
1516
- dma_free_coherent(priv->device,
1517
- DMA_TX_SIZE * sizeof(struct dma_desc),
1518
- tx_q->dma_tx, tx_q->dma_tx_phy);
1519
- else
1520
- dma_free_coherent(priv->device, DMA_TX_SIZE *
1521
- sizeof(struct dma_extended_desc),
1522
- tx_q->dma_etx, tx_q->dma_tx_phy);
1743
+ if (priv->extend_desc) {
1744
+ size = sizeof(struct dma_extended_desc);
1745
+ addr = tx_q->dma_etx;
1746
+ } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1747
+ size = sizeof(struct dma_edesc);
1748
+ addr = tx_q->dma_entx;
1749
+ } else {
1750
+ size = sizeof(struct dma_desc);
1751
+ addr = tx_q->dma_tx;
1752
+ }
1753
+
1754
+ size *= priv->dma_tx_size;
1755
+
1756
+ dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
15231757
15241758 kfree(tx_q->tx_skbuff_dma);
15251759 kfree(tx_q->tx_skbuff);
....@@ -1543,39 +1777,49 @@
15431777 /* RX queues buffers and DMA */
15441778 for (queue = 0; queue < rx_count; queue++) {
15451779 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1780
+ struct page_pool_params pp_params = { 0 };
1781
+ unsigned int num_pages;
15461782
15471783 rx_q->queue_index = queue;
15481784 rx_q->priv_data = priv;
15491785
1550
- rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1551
- sizeof(dma_addr_t),
1552
- GFP_KERNEL);
1553
- if (!rx_q->rx_skbuff_dma)
1554
- goto err_dma;
1786
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
1787
+ pp_params.pool_size = priv->dma_rx_size;
1788
+ num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
1789
+ pp_params.order = ilog2(num_pages);
1790
+ pp_params.nid = dev_to_node(priv->device);
1791
+ pp_params.dev = priv->device;
1792
+ pp_params.dma_dir = DMA_FROM_DEVICE;
1793
+ pp_params.max_len = num_pages * PAGE_SIZE;
15551794
1556
- rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1557
- sizeof(struct sk_buff *),
1558
- GFP_KERNEL);
1559
- if (!rx_q->rx_skbuff)
1795
+ rx_q->page_pool = page_pool_create(&pp_params);
1796
+ if (IS_ERR(rx_q->page_pool)) {
1797
+ ret = PTR_ERR(rx_q->page_pool);
1798
+ rx_q->page_pool = NULL;
1799
+ goto err_dma;
1800
+ }
1801
+
1802
+ rx_q->buf_pool = kcalloc(priv->dma_rx_size,
1803
+ sizeof(*rx_q->buf_pool),
1804
+ GFP_KERNEL);
1805
+ if (!rx_q->buf_pool)
15601806 goto err_dma;
15611807
15621808 if (priv->extend_desc) {
1563
- rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1564
- DMA_RX_SIZE *
1565
- sizeof(struct
1566
- dma_extended_desc),
1567
- &rx_q->dma_rx_phy,
1568
- GFP_KERNEL);
1809
+ rx_q->dma_erx = dma_alloc_coherent(priv->device,
1810
+ priv->dma_rx_size *
1811
+ sizeof(struct dma_extended_desc),
1812
+ &rx_q->dma_rx_phy,
1813
+ GFP_KERNEL);
15691814 if (!rx_q->dma_erx)
15701815 goto err_dma;
15711816
15721817 } else {
1573
- rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1574
- DMA_RX_SIZE *
1575
- sizeof(struct
1576
- dma_desc),
1577
- &rx_q->dma_rx_phy,
1578
- GFP_KERNEL);
1818
+ rx_q->dma_rx = dma_alloc_coherent(priv->device,
1819
+ priv->dma_rx_size *
1820
+ sizeof(struct dma_desc),
1821
+ &rx_q->dma_rx_phy,
1822
+ GFP_KERNEL);
15791823 if (!rx_q->dma_rx)
15801824 goto err_dma;
15811825 }
....@@ -1606,48 +1850,50 @@
16061850 /* TX queues buffers and DMA */
16071851 for (queue = 0; queue < tx_count; queue++) {
16081852 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1853
+ size_t size;
1854
+ void *addr;
16091855
16101856 tx_q->queue_index = queue;
16111857 tx_q->priv_data = priv;
16121858
1613
- tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1614
- sizeof(*tx_q->tx_skbuff_dma),
1615
- GFP_KERNEL);
1859
+ tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
1860
+ sizeof(*tx_q->tx_skbuff_dma),
1861
+ GFP_KERNEL);
16161862 if (!tx_q->tx_skbuff_dma)
16171863 goto err_dma;
16181864
1619
- tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1620
- sizeof(struct sk_buff *),
1621
- GFP_KERNEL);
1865
+ tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
1866
+ sizeof(struct sk_buff *),
1867
+ GFP_KERNEL);
16221868 if (!tx_q->tx_skbuff)
16231869 goto err_dma;
16241870
1625
- if (priv->extend_desc) {
1626
- tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1627
- DMA_TX_SIZE *
1628
- sizeof(struct
1629
- dma_extended_desc),
1630
- &tx_q->dma_tx_phy,
1631
- GFP_KERNEL);
1632
- if (!tx_q->dma_etx)
1633
- goto err_dma;
1634
- } else {
1635
- tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1636
- DMA_TX_SIZE *
1637
- sizeof(struct
1638
- dma_desc),
1639
- &tx_q->dma_tx_phy,
1640
- GFP_KERNEL);
1641
- if (!tx_q->dma_tx)
1642
- goto err_dma;
1643
- }
1871
+ if (priv->extend_desc)
1872
+ size = sizeof(struct dma_extended_desc);
1873
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1874
+ size = sizeof(struct dma_edesc);
1875
+ else
1876
+ size = sizeof(struct dma_desc);
1877
+
1878
+ size *= priv->dma_tx_size;
1879
+
1880
+ addr = dma_alloc_coherent(priv->device, size,
1881
+ &tx_q->dma_tx_phy, GFP_KERNEL);
1882
+ if (!addr)
1883
+ goto err_dma;
1884
+
1885
+ if (priv->extend_desc)
1886
+ tx_q->dma_etx = addr;
1887
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1888
+ tx_q->dma_entx = addr;
1889
+ else
1890
+ tx_q->dma_tx = addr;
16441891 }
16451892
16461893 return 0;
16471894
16481895 err_dma:
16491896 free_dma_tx_desc_resources(priv);
1650
-
16511897 return ret;
16521898 }
16531899
....@@ -1858,6 +2104,7 @@
18582104 /**
18592105 * stmmac_tx_clean - to manage the transmission completion
18602106 * @priv: driver private structure
2107
+ * @budget: napi budget limiting this functions packet handling
18612108 * @queue: TX queue index
18622109 * Description: it reclaims the transmit resources after transmission completes.
18632110 */
....@@ -1879,6 +2126,8 @@
18792126
18802127 if (priv->extend_desc)
18812128 p = (struct dma_desc *)(tx_q->dma_etx + entry);
2129
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2130
+ p = &tx_q->dma_entx[entry].basic;
18822131 else
18832132 p = tx_q->dma_tx + entry;
18842133
....@@ -1937,7 +2186,7 @@
19372186
19382187 stmmac_release_tx_desc(priv, p, priv->mode);
19392188
1940
- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2189
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
19412190 }
19422191 tx_q->dirty_tx = entry;
19432192
....@@ -1946,7 +2195,7 @@
19462195
19472196 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
19482197 queue))) &&
1949
- stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
2198
+ stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
19502199
19512200 netif_dbg(priv, tx_done, priv->dev,
19522201 "%s: restart transmit\n", __func__);
....@@ -1955,8 +2204,12 @@
19552204
19562205 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
19572206 stmmac_enable_eee_mode(priv);
1958
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
2207
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
19592208 }
2209
+
2210
+ /* We still have pending packets, let's call for a new scheduling */
2211
+ if (tx_q->dirty_tx != tx_q->cur_tx)
2212
+ mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
19602213
19612214 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
19622215
....@@ -1973,23 +2226,18 @@
19732226 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
19742227 {
19752228 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1976
- int i;
19772229
19782230 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
19792231
19802232 stmmac_stop_tx_dma(priv, chan);
19812233 dma_free_tx_skbufs(priv, chan);
1982
- for (i = 0; i < DMA_TX_SIZE; i++)
1983
- if (priv->extend_desc)
1984
- stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1985
- priv->mode, (i == DMA_TX_SIZE - 1));
1986
- else
1987
- stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1988
- priv->mode, (i == DMA_TX_SIZE - 1));
2234
+ stmmac_clear_tx_descriptors(priv, chan);
19892235 tx_q->dirty_tx = 0;
19902236 tx_q->cur_tx = 0;
19912237 tx_q->mss = 0;
19922238 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2239
+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2240
+ tx_q->dma_tx_phy, chan);
19932241 stmmac_start_tx_dma(priv, chan);
19942242
19952243 priv->dev->stats.tx_errors++;
....@@ -2048,23 +2296,24 @@
20482296 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
20492297 &priv->xstats, chan);
20502298 struct stmmac_channel *ch = &priv->channel[chan];
2051
- bool needs_work = false;
2299
+ unsigned long flags;
20522300
2053
- if ((status & handle_rx) && ch->has_rx) {
2054
- needs_work = true;
2055
- } else {
2056
- status &= ~handle_rx;
2301
+ if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2302
+ if (napi_schedule_prep(&ch->rx_napi)) {
2303
+ spin_lock_irqsave(&ch->lock, flags);
2304
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2305
+ spin_unlock_irqrestore(&ch->lock, flags);
2306
+ __napi_schedule(&ch->rx_napi);
2307
+ }
20572308 }
20582309
2059
- if ((status & handle_tx) && ch->has_tx) {
2060
- needs_work = true;
2061
- } else {
2062
- status &= ~handle_tx;
2063
- }
2064
-
2065
- if (needs_work && napi_schedule_prep(&ch->napi)) {
2066
- stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2067
- __napi_schedule(&ch->napi);
2310
+ if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2311
+ if (napi_schedule_prep(&ch->tx_napi)) {
2312
+ spin_lock_irqsave(&ch->lock, flags);
2313
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2314
+ spin_unlock_irqrestore(&ch->lock, flags);
2315
+ __napi_schedule(&ch->tx_napi);
2316
+ }
20682317 }
20692318
20702319 return status;
....@@ -2127,10 +2376,10 @@
21272376 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
21282377 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
21292378
2130
- dwmac_mmc_intr_all_mask(priv->mmcaddr);
2379
+ stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
21312380
21322381 if (priv->dma_cap.rmon) {
2133
- dwmac_mmc_ctrl(priv->mmcaddr, mode);
2382
+ stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
21342383 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
21352384 } else
21362385 netdev_info(priv->dev, "No MAC Management Counters available\n");
....@@ -2159,7 +2408,8 @@
21592408 */
21602409 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
21612410 {
2162
- if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2411
+// if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2412
+ if (1) {
21632413 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
21642414 if (likely(priv->plat->get_eth_addr))
21652415 priv->plat->get_eth_addr(priv->plat->bsp_priv,
....@@ -2222,7 +2472,8 @@
22222472 rx_q->dma_rx_phy, chan);
22232473
22242474 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2225
- (DMA_RX_SIZE * sizeof(struct dma_desc));
2475
+ (priv->dma_rx_size *
2476
+ sizeof(struct dma_desc));
22262477 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
22272478 rx_q->rx_tail_addr, chan);
22282479 }
....@@ -2251,7 +2502,7 @@
22512502
22522503 /**
22532504 * stmmac_tx_timer - mitigation sw timer for tx.
2254
- * @data: data pointer
2505
+ * @t: data pointer
22552506 * Description:
22562507 * This is the timer handler to directly invoke the stmmac_tx_clean.
22572508 */
....@@ -2263,25 +2514,32 @@
22632514
22642515 ch = &priv->channel[tx_q->queue_index];
22652516
2266
- if (likely(napi_schedule_prep(&ch->napi)))
2267
- __napi_schedule(&ch->napi);
2517
+ if (likely(napi_schedule_prep(&ch->tx_napi))) {
2518
+ unsigned long flags;
2519
+
2520
+ spin_lock_irqsave(&ch->lock, flags);
2521
+ stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2522
+ spin_unlock_irqrestore(&ch->lock, flags);
2523
+ __napi_schedule(&ch->tx_napi);
2524
+ }
22682525 }
22692526
22702527 /**
2271
- * stmmac_init_tx_coalesce - init tx mitigation options.
2528
+ * stmmac_init_coalesce - init mitigation options.
22722529 * @priv: driver private structure
22732530 * Description:
2274
- * This inits the transmit coalesce parameters: i.e. timer rate,
2531
+ * This inits the coalesce parameters: i.e. timer rate,
22752532 * timer handler and default threshold used for enabling the
22762533 * interrupt on completion bit.
22772534 */
2278
-static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2535
+static void stmmac_init_coalesce(struct stmmac_priv *priv)
22792536 {
22802537 u32 tx_channel_count = priv->plat->tx_queues_to_use;
22812538 u32 chan;
22822539
22832540 priv->tx_coal_frames = STMMAC_TX_FRAMES;
22842541 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2542
+ priv->rx_coal_frames = STMMAC_RX_FRAMES;
22852543
22862544 for (chan = 0; chan < tx_channel_count; chan++) {
22872545 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
....@@ -2299,12 +2557,12 @@
22992557 /* set TX ring length */
23002558 for (chan = 0; chan < tx_channels_count; chan++)
23012559 stmmac_set_tx_ring_len(priv, priv->ioaddr,
2302
- (DMA_TX_SIZE - 1), chan);
2560
+ (priv->dma_tx_size - 1), chan);
23032561
23042562 /* set RX ring length */
23052563 for (chan = 0; chan < rx_channels_count; chan++)
23062564 stmmac_set_rx_ring_len(priv, priv->ioaddr,
2307
- (DMA_RX_SIZE - 1), chan);
2565
+ (priv->dma_rx_size - 1), chan);
23082566 }
23092567
23102568 /**
....@@ -2428,6 +2686,22 @@
24282686 }
24292687 }
24302688
2689
+static void stmmac_mac_config_rss(struct stmmac_priv *priv)
2690
+{
2691
+ if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
2692
+ priv->rss.enable = false;
2693
+ return;
2694
+ }
2695
+
2696
+ if (priv->dev->features & NETIF_F_RXHASH)
2697
+ priv->rss.enable = true;
2698
+ else
2699
+ priv->rss.enable = false;
2700
+
2701
+ stmmac_rss_configure(priv, priv->hw, &priv->rss,
2702
+ priv->plat->rx_queues_to_use);
2703
+}
2704
+
24312705 /**
24322706 * stmmac_mtl_configuration - Configure MTL
24332707 * @priv: driver private structure
....@@ -2472,6 +2746,10 @@
24722746 /* Set RX routing */
24732747 if (rx_queues_count > 1)
24742748 stmmac_mac_config_rx_queues_routing(priv);
2749
+
2750
+ /* Receive Side Scaling */
2751
+ if (rx_queues_count > 1)
2752
+ stmmac_mac_config_rss(priv);
24752753 }
24762754
24772755 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
....@@ -2487,6 +2765,7 @@
24872765 /**
24882766 * stmmac_hw_setup - setup mac in a usable state.
24892767 * @dev : pointer to the device structure.
2768
+ * @ptp_register: register PTP if set
24902769 * Description:
24912770 * this is the main function to setup the HW in a usable state because the
24922771 * dma engine is reset, the core registers are configured (e.g. AXI,
....@@ -2496,7 +2775,7 @@
24962775 * 0 on success and an appropriate (-)ve integer as defined in errno.h
24972776 * file on failure.
24982777 */
2499
-static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2778
+static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
25002779 {
25012780 struct stmmac_priv *priv = netdev_priv(dev);
25022781 u32 rx_cnt = priv->plat->rx_queues_to_use;
....@@ -2552,37 +2831,75 @@
25522831
25532832 stmmac_mmc_setup(priv);
25542833
2555
- if (IS_ENABLED(CONFIG_STMMAC_PTP) && init_ptp) {
2834
+ if (ptp_register) {
25562835 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
25572836 if (ret < 0)
2558
- netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2559
-
2560
- ret = stmmac_init_ptp(priv);
2561
- if (ret == -EOPNOTSUPP)
2562
- netdev_warn(priv->dev, "PTP not supported by HW\n");
2563
- else if (ret)
2564
- netdev_warn(priv->dev, "PTP init failed\n");
2837
+ netdev_warn(priv->dev,
2838
+ "failed to enable PTP reference clock: %pe\n",
2839
+ ERR_PTR(ret));
25652840 }
25662841
2567
- priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2842
+ ret = stmmac_init_ptp(priv);
2843
+ if (ret == -EOPNOTSUPP)
2844
+ netdev_warn(priv->dev, "PTP not supported by HW\n");
2845
+ else if (ret)
2846
+ netdev_warn(priv->dev, "PTP init failed\n");
2847
+ else if (ptp_register)
2848
+ stmmac_ptp_register(priv);
2849
+
2850
+ priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
2851
+
2852
+ /* Convert the timer from msec to usec */
2853
+ if (!priv->tx_lpi_timer)
2854
+ priv->tx_lpi_timer = eee_timer * 1000;
25682855
25692856 if (priv->use_riwt) {
2570
- ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2571
- if (!ret)
2572
- priv->rx_riwt = MAX_DMA_RIWT;
2857
+ if (!priv->rx_riwt)
2858
+ priv->rx_riwt = DEF_DMA_RIWT;
2859
+
2860
+ ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
25732861 }
25742862
25752863 if (priv->hw->pcs)
2576
- stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2864
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
25772865
25782866 /* set TX and RX rings length */
25792867 stmmac_set_rings_length(priv);
25802868
25812869 /* Enable TSO */
25822870 if (priv->tso) {
2583
- for (chan = 0; chan < tx_cnt; chan++)
2871
+ for (chan = 0; chan < tx_cnt; chan++) {
2872
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2873
+
2874
+ /* TSO and TBS cannot co-exist */
2875
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
2876
+ continue;
2877
+
25842878 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2879
+ }
25852880 }
2881
+
2882
+ /* Enable Split Header */
2883
+ if (priv->sph && priv->hw->rx_csum) {
2884
+ for (chan = 0; chan < rx_cnt; chan++)
2885
+ stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
2886
+ }
2887
+
2888
+ /* VLAN Tag Insertion */
2889
+ if (priv->dma_cap.vlins)
2890
+ stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
2891
+
2892
+ /* TBS */
2893
+ for (chan = 0; chan < tx_cnt; chan++) {
2894
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2895
+ int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
2896
+
2897
+ stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
2898
+ }
2899
+
2900
+ /* Configure real RX and TX queues */
2901
+ netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
2902
+ netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
25862903
25872904 /* Start the ball rolling... */
25882905 stmmac_start_all_dma(priv);
....@@ -2594,8 +2911,7 @@
25942911 {
25952912 struct stmmac_priv *priv = netdev_priv(dev);
25962913
2597
- if (IS_ENABLED(CONFIG_STMMAC_PTP))
2598
- clk_disable_unprepare(priv->plat->clk_ptp_ref);
2914
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
25992915 }
26002916
26012917 /**
....@@ -2610,18 +2926,26 @@
26102926 static int stmmac_open(struct net_device *dev)
26112927 {
26122928 struct stmmac_priv *priv = netdev_priv(dev);
2929
+ int bfsize = 0;
26132930 u32 chan;
26142931 int ret;
26152932
2616
- if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2617
- priv->hw->pcs != STMMAC_PCS_TBI &&
2618
- priv->hw->pcs != STMMAC_PCS_RTBI) {
2933
+ //printk("ben:stmmac_open.. \n");
2934
+ ret = pm_runtime_get_sync(priv->device);
2935
+ if (ret < 0) {
2936
+ pm_runtime_put_noidle(priv->device);
2937
+ return ret;
2938
+ }
2939
+
2940
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
2941
+ priv->hw->pcs != STMMAC_PCS_RTBI &&
2942
+ priv->hw->xpcs == NULL) {
26192943 ret = stmmac_init_phy(dev);
26202944 if (ret) {
26212945 netdev_err(priv->dev,
26222946 "%s: Cannot attach to PHY (error: %d)\n",
26232947 __func__, ret);
2624
- return ret;
2948
+ goto init_phy_error;
26252949 }
26262950 }
26272951
....@@ -2629,8 +2953,34 @@
26292953 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
26302954 priv->xstats.threshold = tc;
26312955
2632
- priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2956
+ bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
2957
+ if (bfsize < 0)
2958
+ bfsize = 0;
2959
+
2960
+ if (bfsize < BUF_SIZE_16KiB)
2961
+ bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
2962
+
2963
+ priv->dma_buf_sz = bfsize;
2964
+ buf_sz = bfsize;
2965
+
26332966 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2967
+
2968
+ if (!priv->dma_tx_size)
2969
+ priv->dma_tx_size = priv->plat->dma_tx_size ? priv->plat->dma_tx_size :
2970
+ DMA_DEFAULT_TX_SIZE;
2971
+
2972
+ if (!priv->dma_rx_size)
2973
+ priv->dma_rx_size = priv->plat->dma_rx_size ? priv->plat->dma_rx_size :
2974
+ DMA_DEFAULT_RX_SIZE;
2975
+
2976
+ /* Earlier check for TBS */
2977
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
2978
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2979
+ int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
2980
+
2981
+ /* Setup per-TXQ tbs flag before TX descriptor alloc */
2982
+ tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
2983
+ }
26342984
26352985 ret = alloc_dma_desc_resources(priv);
26362986 if (ret < 0) {
....@@ -2646,16 +2996,32 @@
26462996 goto init_error;
26472997 }
26482998
2999
+ if (priv->plat->serdes_powerup) {
3000
+ ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3001
+ if (ret < 0) {
3002
+ netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3003
+ __func__);
3004
+ goto init_error;
3005
+ }
3006
+ }
3007
+
3008
+
3009
+ #if 1
3010
+ printk("ben -------bootup add 2s delay time.\n");
3011
+ mdelay(2500);
3012
+ #endif
3013
+
26493014 ret = stmmac_hw_setup(dev, true);
26503015 if (ret < 0) {
26513016 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
26523017 goto init_error;
26533018 }
26543019
2655
- stmmac_init_tx_coalesce(priv);
3020
+ stmmac_init_coalesce(priv);
26563021
2657
- if (dev->phydev)
2658
- phy_start(dev->phydev);
3022
+ phylink_start(priv->phylink);
3023
+ /* We may have called phylink_speed_down before */
3024
+ phylink_speed_up(priv->phylink);
26593025
26603026 /* Request the IRQ lines */
26613027 ret = request_irq(dev->irq, stmmac_interrupt,
....@@ -2702,8 +3068,7 @@
27023068 wolirq_error:
27033069 free_irq(dev->irq, dev);
27043070 irq_error:
2705
- if (dev->phydev)
2706
- phy_stop(dev->phydev);
3071
+ phylink_stop(priv->phylink);
27073072
27083073 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
27093074 del_timer_sync(&priv->tx_queue[chan].txtimer);
....@@ -2712,9 +3077,9 @@
27123077 init_error:
27133078 free_dma_desc_resources(priv);
27143079 dma_desc_error:
2715
- if (dev->phydev)
2716
- phy_disconnect(dev->phydev);
2717
-
3080
+ phylink_disconnect_phy(priv->phylink);
3081
+init_phy_error:
3082
+ pm_runtime_put(priv->device);
27183083 return ret;
27193084 }
27203085
....@@ -2729,14 +3094,14 @@
27293094 struct stmmac_priv *priv = netdev_priv(dev);
27303095 u32 chan;
27313096
3097
+ if (device_may_wakeup(priv->device))
3098
+ phylink_speed_down(priv->phylink, false);
27323099 /* Stop and disconnect the PHY */
2733
- if (dev->phydev) {
2734
- phy_stop(dev->phydev);
2735
- phy_disconnect(dev->phydev);
2736
- if (priv->plat->integrated_phy_power)
2737
- priv->plat->integrated_phy_power(priv->plat->bsp_priv,
2738
- false);
2739
- }
3100
+ phylink_stop(priv->phylink);
3101
+ phylink_disconnect_phy(priv->phylink);
3102
+
3103
+ if (priv->plat->integrated_phy_power)
3104
+ priv->plat->integrated_phy_power(priv->plat->bsp_priv, false);
27403105
27413106 stmmac_disable_all_queues(priv);
27423107
....@@ -2764,12 +3129,48 @@
27643129 /* Disable the MAC Rx/Tx */
27653130 stmmac_mac_set(priv, priv->ioaddr, false);
27663131
3132
+ /* Powerdown Serdes if there is */
3133
+ if (priv->plat->serdes_powerdown)
3134
+ priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3135
+
27673136 netif_carrier_off(dev);
27683137
2769
- if (IS_ENABLED(CONFIG_STMMAC_PTP))
2770
- stmmac_release_ptp(priv);
3138
+ stmmac_release_ptp(priv);
3139
+
3140
+ pm_runtime_put(priv->device);
27713141
27723142 return 0;
3143
+}
3144
+
3145
+static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3146
+ struct stmmac_tx_queue *tx_q)
3147
+{
3148
+ u16 tag = 0x0, inner_tag = 0x0;
3149
+ u32 inner_type = 0x0;
3150
+ struct dma_desc *p;
3151
+
3152
+ if (!priv->dma_cap.vlins)
3153
+ return false;
3154
+ if (!skb_vlan_tag_present(skb))
3155
+ return false;
3156
+ if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3157
+ inner_tag = skb_vlan_tag_get(skb);
3158
+ inner_type = STMMAC_VLAN_INSERT;
3159
+ }
3160
+
3161
+ tag = skb_vlan_tag_get(skb);
3162
+
3163
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
3164
+ p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3165
+ else
3166
+ p = &tx_q->dma_tx[tx_q->cur_tx];
3167
+
3168
+ if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3169
+ return false;
3170
+
3171
+ stmmac_set_tx_owner(priv, p);
3172
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3173
+ return true;
27733174 }
27743175
27753176 /**
....@@ -2777,13 +3178,13 @@
27773178 * @priv: driver private structure
27783179 * @des: buffer start address
27793180 * @total_len: total length to fill in descriptors
2780
- * @last_segmant: condition for the last descriptor
3181
+ * @last_segment: condition for the last descriptor
27813182 * @queue: TX queue index
27823183 * Description:
27833184 * This function fills descriptor and request new descriptors according to
27843185 * buffer length to fill
27853186 */
2786
-static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
3187
+static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
27873188 int total_len, bool last_segment, u32 queue)
27883189 {
27893190 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
....@@ -2794,11 +3195,23 @@
27943195 tmp_len = total_len;
27953196
27963197 while (tmp_len > 0) {
2797
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2798
- WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2799
- desc = tx_q->dma_tx + tx_q->cur_tx;
3198
+ dma_addr_t curr_addr;
28003199
2801
- desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
3200
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3201
+ priv->dma_tx_size);
3202
+ WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3203
+
3204
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
3205
+ desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3206
+ else
3207
+ desc = &tx_q->dma_tx[tx_q->cur_tx];
3208
+
3209
+ curr_addr = des + (total_len - tmp_len);
3210
+ if (priv->dma_cap.addr64 <= 32)
3211
+ desc->des0 = cpu_to_le32(curr_addr);
3212
+ else
3213
+ stmmac_set_desc_addr(priv, desc, curr_addr);
3214
+
28023215 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
28033216 TSO_MAX_BUFF_SIZE : tmp_len;
28043217
....@@ -2842,16 +3255,19 @@
28423255 {
28433256 struct dma_desc *desc, *first, *mss_desc = NULL;
28443257 struct stmmac_priv *priv = netdev_priv(dev);
3258
+ int desc_size, tmp_pay_len = 0, first_tx;
28453259 int nfrags = skb_shinfo(skb)->nr_frags;
28463260 u32 queue = skb_get_queue_mapping(skb);
2847
- unsigned int first_entry, des;
2848
- u8 proto_hdr_len, hdr;
3261
+ unsigned int first_entry, tx_packets;
28493262 struct stmmac_tx_queue *tx_q;
2850
- int tmp_pay_len = 0;
3263
+ bool has_vlan, set_ic;
3264
+ u8 proto_hdr_len, hdr;
28513265 u32 pay_len, mss;
3266
+ dma_addr_t des;
28523267 int i;
28533268
28543269 tx_q = &priv->tx_queue[queue];
3270
+ first_tx = tx_q->cur_tx;
28553271
28563272 /* Compute header lengths */
28573273 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
....@@ -2882,10 +3298,15 @@
28823298
28833299 /* set new MSS value if needed */
28843300 if (mss != tx_q->mss) {
2885
- mss_desc = tx_q->dma_tx + tx_q->cur_tx;
3301
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
3302
+ mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3303
+ else
3304
+ mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
3305
+
28863306 stmmac_set_mss(priv, mss_desc, mss);
28873307 tx_q->mss = mss;
2888
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
3308
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3309
+ priv->dma_tx_size);
28893310 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
28903311 }
28913312
....@@ -2896,11 +3317,20 @@
28963317 skb->data_len);
28973318 }
28983319
3320
+ /* Check if VLAN can be inserted by HW */
3321
+ has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3322
+
28993323 first_entry = tx_q->cur_tx;
29003324 WARN_ON(tx_q->tx_skbuff[first_entry]);
29013325
2902
- desc = tx_q->dma_tx + first_entry;
3326
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
3327
+ desc = &tx_q->dma_entx[first_entry].basic;
3328
+ else
3329
+ desc = &tx_q->dma_tx[first_entry];
29033330 first = desc;
3331
+
3332
+ if (has_vlan)
3333
+ stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
29043334
29053335 /* first descriptor: fill Headers on Buf1 */
29063336 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
....@@ -2911,14 +3341,21 @@
29113341 tx_q->tx_skbuff_dma[first_entry].buf = des;
29123342 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
29133343
2914
- first->des0 = cpu_to_le32(des);
3344
+ if (priv->dma_cap.addr64 <= 32) {
3345
+ first->des0 = cpu_to_le32(des);
29153346
2916
- /* Fill start of payload in buff2 of first descriptor */
2917
- if (pay_len)
2918
- first->des1 = cpu_to_le32(des + proto_hdr_len);
3347
+ /* Fill start of payload in buff2 of first descriptor */
3348
+ if (pay_len)
3349
+ first->des1 = cpu_to_le32(des + proto_hdr_len);
29193350
2920
- /* If needed take extra descriptors to fill the remaining payload */
2921
- tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
3351
+ /* If needed take extra descriptors to fill the remaining payload */
3352
+ tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
3353
+ } else {
3354
+ stmmac_set_desc_addr(priv, first, des);
3355
+ tmp_pay_len = pay_len;
3356
+ des += proto_hdr_len;
3357
+ pay_len = 0;
3358
+ }
29223359
29233360 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
29243361
....@@ -2945,12 +3382,38 @@
29453382 /* Only the last descriptor gets to point to the skb. */
29463383 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
29473384
3385
+ /* Manage tx mitigation */
3386
+ tx_packets = (tx_q->cur_tx + 1) - first_tx;
3387
+ tx_q->tx_count_frames += tx_packets;
3388
+
3389
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3390
+ set_ic = true;
3391
+ else if (!priv->tx_coal_frames)
3392
+ set_ic = false;
3393
+ else if (tx_packets > priv->tx_coal_frames)
3394
+ set_ic = true;
3395
+ else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3396
+ set_ic = true;
3397
+ else
3398
+ set_ic = false;
3399
+
3400
+ if (set_ic) {
3401
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
3402
+ desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3403
+ else
3404
+ desc = &tx_q->dma_tx[tx_q->cur_tx];
3405
+
3406
+ tx_q->tx_count_frames = 0;
3407
+ stmmac_set_tx_ic(priv, desc);
3408
+ priv->xstats.tx_set_ic_bit++;
3409
+ }
3410
+
29483411 /* We've used all descriptors we need for this skb, however,
29493412 * advance cur_tx so that it references a fresh descriptor.
29503413 * ndo_start_xmit will fill this descriptor the next time it's
29513414 * called and stmmac_tx_clean may clean up to this descriptor.
29523415 */
2953
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
3416
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
29543417
29553418 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
29563419 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
....@@ -2962,18 +3425,8 @@
29623425 priv->xstats.tx_tso_frames++;
29633426 priv->xstats.tx_tso_nfrags += nfrags;
29643427
2965
- /* Manage tx mitigation */
2966
- tx_q->tx_count_frames += nfrags + 1;
2967
- if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
2968
- !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
2969
- (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2970
- priv->hwts_tx_en)) {
2971
- stmmac_tx_timer_arm(priv, queue);
2972
- } else {
2973
- tx_q->tx_count_frames = 0;
2974
- stmmac_set_tx_ic(priv, desc);
2975
- priv->xstats.tx_set_ic_bit++;
2976
- }
3428
+ if (priv->sarc_type)
3429
+ stmmac_set_desc_sarc(priv, first, priv->sarc_type);
29773430
29783431 skb_tx_timestamp(skb);
29793432
....@@ -3012,16 +3465,18 @@
30123465 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
30133466 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
30143467 tx_q->cur_tx, first, nfrags);
3015
-
3016
- stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
3017
-
30183468 pr_info(">>> frame to be transmitted: ");
30193469 print_pkt(skb->data, skb_headlen(skb));
30203470 }
30213471
30223472 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
30233473
3024
- tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3474
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
3475
+ desc_size = sizeof(struct dma_edesc);
3476
+ else
3477
+ desc_size = sizeof(struct dma_desc);
3478
+
3479
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
30253480 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
30263481 stmmac_tx_timer_arm(priv, queue);
30273482
....@@ -3044,20 +3499,22 @@
30443499 */
30453500 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
30463501 {
3502
+ unsigned int first_entry, tx_packets, enh_desc;
30473503 struct stmmac_priv *priv = netdev_priv(dev);
30483504 unsigned int nopaged_len = skb_headlen(skb);
30493505 int i, csum_insertion = 0, is_jumbo = 0;
30503506 u32 queue = skb_get_queue_mapping(skb);
30513507 int nfrags = skb_shinfo(skb)->nr_frags;
30523508 int gso = skb_shinfo(skb)->gso_type;
3053
- int entry;
3054
- unsigned int first_entry;
3509
+ struct dma_edesc *tbs_desc = NULL;
3510
+ int entry, desc_size, first_tx;
30553511 struct dma_desc *desc, *first;
30563512 struct stmmac_tx_queue *tx_q;
3057
- unsigned int enh_desc;
3058
- unsigned int des;
3513
+ bool has_vlan, set_ic;
3514
+ dma_addr_t des;
30593515
30603516 tx_q = &priv->tx_queue[queue];
3517
+ first_tx = tx_q->cur_tx;
30613518
30623519 if (priv->tx_path_in_lpi_mode)
30633520 stmmac_disable_eee_mode(priv);
....@@ -3082,6 +3539,9 @@
30823539 return NETDEV_TX_BUSY;
30833540 }
30843541
3542
+ /* Check if VLAN can be inserted by HW */
3543
+ has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3544
+
30853545 entry = tx_q->cur_tx;
30863546 first_entry = entry;
30873547 WARN_ON(tx_q->tx_skbuff[first_entry]);
....@@ -3090,10 +3550,15 @@
30903550
30913551 if (likely(priv->extend_desc))
30923552 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3553
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3554
+ desc = &tx_q->dma_entx[entry].basic;
30933555 else
30943556 desc = tx_q->dma_tx + entry;
30953557
30963558 first = desc;
3559
+
3560
+ if (has_vlan)
3561
+ stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
30973562
30983563 enh_desc = priv->plat->enh_desc;
30993564 /* To program the descriptors according to the size of the frame */
....@@ -3111,11 +3576,13 @@
31113576 int len = skb_frag_size(frag);
31123577 bool last_segment = (i == (nfrags - 1));
31133578
3114
- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3579
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
31153580 WARN_ON(tx_q->tx_skbuff[entry]);
31163581
31173582 if (likely(priv->extend_desc))
31183583 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3584
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3585
+ desc = &tx_q->dma_entx[entry].basic;
31193586 else
31203587 desc = tx_q->dma_tx + entry;
31213588
....@@ -3140,28 +3607,51 @@
31403607 /* Only the last descriptor gets to point to the skb. */
31413608 tx_q->tx_skbuff[entry] = skb;
31423609
3610
+ /* According to the coalesce parameter the IC bit for the latest
3611
+ * segment is reset and the timer re-started to clean the tx status.
3612
+ * This approach takes care about the fragments: desc is the first
3613
+ * element in case of no SG.
3614
+ */
3615
+ tx_packets = (entry + 1) - first_tx;
3616
+ tx_q->tx_count_frames += tx_packets;
3617
+
3618
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3619
+ set_ic = true;
3620
+ else if (!priv->tx_coal_frames)
3621
+ set_ic = false;
3622
+ else if (tx_packets > priv->tx_coal_frames)
3623
+ set_ic = true;
3624
+ else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3625
+ set_ic = true;
3626
+ else
3627
+ set_ic = false;
3628
+
3629
+ if (set_ic) {
3630
+ if (likely(priv->extend_desc))
3631
+ desc = &tx_q->dma_etx[entry].basic;
3632
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3633
+ desc = &tx_q->dma_entx[entry].basic;
3634
+ else
3635
+ desc = &tx_q->dma_tx[entry];
3636
+
3637
+ tx_q->tx_count_frames = 0;
3638
+ stmmac_set_tx_ic(priv, desc);
3639
+ priv->xstats.tx_set_ic_bit++;
3640
+ }
3641
+
31433642 /* We've used all descriptors we need for this skb, however,
31443643 * advance cur_tx so that it references a fresh descriptor.
31453644 * ndo_start_xmit will fill this descriptor the next time it's
31463645 * called and stmmac_tx_clean may clean up to this descriptor.
31473646 */
3148
- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3647
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
31493648 tx_q->cur_tx = entry;
31503649
31513650 if (netif_msg_pktdata(priv)) {
3152
- void *tx_head;
3153
-
31543651 netdev_dbg(priv->dev,
31553652 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
31563653 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
31573654 entry, first, nfrags);
3158
-
3159
- if (priv->extend_desc)
3160
- tx_head = (void *)tx_q->dma_etx;
3161
- else
3162
- tx_head = (void *)tx_q->dma_tx;
3163
-
3164
- stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
31653655
31663656 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
31673657 print_pkt(skb->data, skb->len);
....@@ -3175,22 +3665,8 @@
31753665
31763666 dev->stats.tx_bytes += skb->len;
31773667
3178
- /* According to the coalesce parameter the IC bit for the latest
3179
- * segment is reset and the timer re-started to clean the tx status.
3180
- * This approach takes care about the fragments: desc is the first
3181
- * element in case of no SG.
3182
- */
3183
- tx_q->tx_count_frames += nfrags + 1;
3184
- if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
3185
- !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
3186
- (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3187
- priv->hwts_tx_en)) {
3188
- stmmac_tx_timer_arm(priv, queue);
3189
- } else {
3190
- tx_q->tx_count_frames = 0;
3191
- stmmac_set_tx_ic(priv, desc);
3192
- priv->xstats.tx_set_ic_bit++;
3193
- }
3668
+ if (priv->sarc_type)
3669
+ stmmac_set_desc_sarc(priv, first, priv->sarc_type);
31943670
31953671 skb_tx_timestamp(skb);
31963672
....@@ -3222,11 +3698,18 @@
32223698
32233699 /* Prepare the first descriptor setting the OWN bit too */
32243700 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3225
- csum_insertion, priv->mode, 1, last_segment,
3701
+ csum_insertion, priv->mode, 0, last_segment,
32263702 skb->len);
3227
- } else {
3228
- stmmac_set_tx_owner(priv, first);
32293703 }
3704
+
3705
+ if (tx_q->tbs & STMMAC_TBS_EN) {
3706
+ struct timespec64 ts = ns_to_timespec64(skb->tstamp);
3707
+
3708
+ tbs_desc = &tx_q->dma_entx[first_entry];
3709
+ stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
3710
+ }
3711
+
3712
+ stmmac_set_tx_owner(priv, first);
32303713
32313714 /* The own bit must be the latest setting done when prepare the
32323715 * descriptor and then barrier is needed to make sure that
....@@ -3238,7 +3721,14 @@
32383721
32393722 stmmac_enable_dma_transmission(priv, priv->ioaddr);
32403723
3241
- tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3724
+ if (likely(priv->extend_desc))
3725
+ desc_size = sizeof(struct dma_extended_desc);
3726
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3727
+ desc_size = sizeof(struct dma_edesc);
3728
+ else
3729
+ desc_size = sizeof(struct dma_desc);
3730
+
3731
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
32423732 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
32433733 stmmac_tx_timer_arm(priv, queue);
32443734
....@@ -3272,15 +3762,6 @@
32723762 }
32733763 }
32743764
3275
-
3276
-static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3277
-{
3278
- if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3279
- return 0;
3280
-
3281
- return 1;
3282
-}
3283
-
32843765 /**
32853766 * stmmac_rx_refill - refill used skb preallocated buffers
32863767 * @priv: driver private structure
....@@ -3291,63 +3772,115 @@
32913772 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
32923773 {
32933774 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3294
- int dirty = stmmac_rx_dirty(priv, queue);
3775
+ int len, dirty = stmmac_rx_dirty(priv, queue);
32953776 unsigned int entry = rx_q->dirty_rx;
3777
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
32963778
3297
- int bfsize = priv->dma_buf_sz;
3779
+ if (priv->dma_cap.addr64 <= 32)
3780
+ gfp |= GFP_DMA32;
3781
+
3782
+ len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
32983783
32993784 while (dirty-- > 0) {
3785
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
33003786 struct dma_desc *p;
3787
+ bool use_rx_wd;
33013788
33023789 if (priv->extend_desc)
33033790 p = (struct dma_desc *)(rx_q->dma_erx + entry);
33043791 else
33053792 p = rx_q->dma_rx + entry;
33063793
3307
- if (likely(!rx_q->rx_skbuff[entry])) {
3308
- struct sk_buff *skb;
3309
-
3310
- skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3311
- if (unlikely(!skb)) {
3312
- /* so for a while no zero-copy! */
3313
- rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3314
- if (unlikely(net_ratelimit()))
3315
- dev_err(priv->device,
3316
- "fail to alloc skb entry %d\n",
3317
- entry);
3794
+ if (!buf->page) {
3795
+ buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
3796
+ if (!buf->page)
33183797 break;
3319
- }
3320
-
3321
- rx_q->rx_skbuff[entry] = skb;
3322
- rx_q->rx_skbuff_dma[entry] =
3323
- dma_map_single(priv->device, skb->data, bfsize,
3324
- DMA_FROM_DEVICE);
3325
- if (dma_mapping_error(priv->device,
3326
- rx_q->rx_skbuff_dma[entry])) {
3327
- netdev_err(priv->dev, "Rx DMA map failed\n");
3328
- dev_kfree_skb(skb);
3329
- break;
3330
- }
3331
-
3332
- stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3333
- stmmac_refill_desc3(priv, rx_q, p);
3334
-
3335
- if (rx_q->rx_zeroc_thresh > 0)
3336
- rx_q->rx_zeroc_thresh--;
3337
-
3338
- netif_dbg(priv, rx_status, priv->dev,
3339
- "refill entry #%d\n", entry);
33403798 }
3341
- dma_wmb();
33423799
3343
- stmmac_set_rx_owner(priv, p, priv->use_riwt);
3800
+ if (priv->sph && !buf->sec_page) {
3801
+ buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
3802
+ if (!buf->sec_page)
3803
+ break;
3804
+
3805
+ buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
3806
+ }
3807
+
3808
+ buf->addr = page_pool_get_dma_addr(buf->page);
3809
+ stmmac_set_desc_addr(priv, p, buf->addr);
3810
+ if (priv->sph)
3811
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
3812
+ else
3813
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
3814
+ stmmac_refill_desc3(priv, rx_q, p);
3815
+
3816
+ rx_q->rx_count_frames++;
3817
+ rx_q->rx_count_frames += priv->rx_coal_frames;
3818
+ if (rx_q->rx_count_frames > priv->rx_coal_frames)
3819
+ rx_q->rx_count_frames = 0;
3820
+
3821
+ use_rx_wd = !priv->rx_coal_frames;
3822
+ use_rx_wd |= rx_q->rx_count_frames > 0;
3823
+ if (!priv->use_riwt)
3824
+ use_rx_wd = false;
33443825
33453826 dma_wmb();
3827
+ stmmac_set_rx_owner(priv, p, use_rx_wd);
33463828
3347
- entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3829
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
33483830 }
33493831 rx_q->dirty_rx = entry;
3832
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3833
+ (rx_q->dirty_rx * sizeof(struct dma_desc));
33503834 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
3835
+}
3836
+
3837
+static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
3838
+ struct dma_desc *p,
3839
+ int status, unsigned int len)
3840
+{
3841
+ unsigned int plen = 0, hlen = 0;
3842
+ int coe = priv->hw->rx_csum;
3843
+
3844
+ /* Not first descriptor, buffer is always zero */
3845
+ if (priv->sph && len)
3846
+ return 0;
3847
+
3848
+ /* First descriptor, get split header length */
3849
+ stmmac_get_rx_header_len(priv, p, &hlen);
3850
+ if (priv->sph && hlen) {
3851
+ priv->xstats.rx_split_hdr_pkt_n++;
3852
+ return hlen;
3853
+ }
3854
+
3855
+ /* First descriptor, not last descriptor and not split header */
3856
+ if (status & rx_not_ls)
3857
+ return priv->dma_buf_sz;
3858
+
3859
+ plen = stmmac_get_rx_frame_len(priv, p, coe);
3860
+
3861
+ /* First descriptor and last descriptor and not split header */
3862
+ return min_t(unsigned int, priv->dma_buf_sz, plen);
3863
+}
3864
+
3865
+static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
3866
+ struct dma_desc *p,
3867
+ int status, unsigned int len)
3868
+{
3869
+ int coe = priv->hw->rx_csum;
3870
+ unsigned int plen = 0;
3871
+
3872
+ /* Not split header, buffer is not available */
3873
+ if (!priv->sph)
3874
+ return 0;
3875
+
3876
+ /* Not last descriptor */
3877
+ if (status & rx_not_ls)
3878
+ return priv->dma_buf_sz;
3879
+
3880
+ plen = stmmac_get_rx_frame_len(priv, p, coe);
3881
+
3882
+ /* Last descriptor */
3883
+ return plen - len;
33513884 }
33523885
33533886 /**
....@@ -3362,30 +3895,54 @@
33623895 {
33633896 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
33643897 struct stmmac_channel *ch = &priv->channel[queue];
3898
+ unsigned int count = 0, error = 0, len = 0;
3899
+ int status = 0, coe = priv->hw->rx_csum;
33653900 unsigned int next_entry = rx_q->cur_rx;
3366
- int coe = priv->hw->rx_csum;
3367
- unsigned int count = 0;
3368
- bool xmac;
3369
-
3370
- xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3901
+ unsigned int desc_size;
3902
+ struct sk_buff *skb = NULL;
33713903
33723904 if (netif_msg_rx_status(priv)) {
33733905 void *rx_head;
33743906
33753907 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3376
- if (priv->extend_desc)
3908
+ if (priv->extend_desc) {
33773909 rx_head = (void *)rx_q->dma_erx;
3378
- else
3910
+ desc_size = sizeof(struct dma_extended_desc);
3911
+ } else {
33793912 rx_head = (void *)rx_q->dma_rx;
3913
+ desc_size = sizeof(struct dma_desc);
3914
+ }
33803915
3381
- stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3916
+ stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
3917
+ rx_q->dma_rx_phy, desc_size);
33823918 }
33833919 while (count < limit) {
3384
- int entry, status;
3385
- struct dma_desc *p;
3386
- struct dma_desc *np;
3920
+ unsigned int buf1_len = 0, buf2_len = 0;
3921
+ enum pkt_hash_types hash_type;
3922
+ struct stmmac_rx_buffer *buf;
3923
+ struct dma_desc *np, *p;
3924
+ int entry;
3925
+ u32 hash;
33873926
3927
+ if (!count && rx_q->state_saved) {
3928
+ skb = rx_q->state.skb;
3929
+ error = rx_q->state.error;
3930
+ len = rx_q->state.len;
3931
+ } else {
3932
+ rx_q->state_saved = false;
3933
+ skb = NULL;
3934
+ error = 0;
3935
+ len = 0;
3936
+ }
3937
+
3938
+ if ((count >= limit - 1) && limit > 1)
3939
+ break;
3940
+
3941
+read_again:
3942
+ buf1_len = 0;
3943
+ buf2_len = 0;
33883944 entry = next_entry;
3945
+ buf = &rx_q->buf_pool[entry];
33893946
33903947 if (priv->extend_desc)
33913948 p = (struct dma_desc *)(rx_q->dma_erx + entry);
....@@ -3399,9 +3956,8 @@
33993956 if (unlikely(status & dma_own))
34003957 break;
34013958
3402
- count++;
3403
-
3404
- rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3959
+ rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
3960
+ priv->dma_rx_size);
34053961 next_entry = rx_q->cur_rx;
34063962
34073963 if (priv->extend_desc)
....@@ -3415,133 +3971,126 @@
34153971 stmmac_rx_extended_status(priv, &priv->dev->stats,
34163972 &priv->xstats, rx_q->dma_erx + entry);
34173973 if (unlikely(status == discard_frame)) {
3418
- priv->dev->stats.rx_errors++;
3419
- if (priv->hwts_rx_en && !priv->extend_desc) {
3420
- /* DESC2 & DESC3 will be overwritten by device
3421
- * with timestamp value, hence reinitialize
3422
- * them in stmmac_rx_refill() function so that
3423
- * device can reuse it.
3424
- */
3425
- dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3426
- rx_q->rx_skbuff[entry] = NULL;
3427
- dma_unmap_single(priv->device,
3428
- rx_q->rx_skbuff_dma[entry],
3429
- priv->dma_buf_sz,
3430
- DMA_FROM_DEVICE);
3431
- }
3432
- } else {
3433
- struct sk_buff *skb;
3434
- int frame_len;
3435
- unsigned int des;
3436
-
3437
- stmmac_get_desc_addr(priv, p, &des);
3438
- frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3439
-
3440
- /* If frame length is greater than skb buffer size
3441
- * (preallocated during init) then the packet is
3442
- * ignored
3443
- */
3444
- if (frame_len > priv->dma_buf_sz) {
3445
- if (net_ratelimit())
3446
- netdev_err(priv->dev,
3447
- "len %d larger than size (%d)\n",
3448
- frame_len, priv->dma_buf_sz);
3449
- priv->dev->stats.rx_length_errors++;
3450
- continue;
3451
- }
3452
-
3453
- /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3454
- * Type frames (LLC/LLC-SNAP)
3455
- *
3456
- * llc_snap is never checked in GMAC >= 4, so this ACS
3457
- * feature is always disabled and packets need to be
3458
- * stripped manually.
3459
- */
3460
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3461
- unlikely(status != llc_snap))
3462
- frame_len -= ETH_FCS_LEN;
3463
-
3464
- if (netif_msg_rx_status(priv)) {
3465
- netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3466
- p, entry, des);
3467
- netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3468
- frame_len, status);
3469
- }
3470
-
3471
- /* The zero-copy is always used for all the sizes
3472
- * in case of GMAC4 because it needs
3473
- * to refill the used descriptors, always.
3474
- */
3475
- if (unlikely(!xmac &&
3476
- ((frame_len < priv->rx_copybreak) ||
3477
- stmmac_rx_threshold_count(rx_q)))) {
3478
- skb = netdev_alloc_skb_ip_align(priv->dev,
3479
- frame_len);
3480
- if (unlikely(!skb)) {
3481
- if (net_ratelimit())
3482
- dev_warn(priv->device,
3483
- "packet dropped\n");
3484
- priv->dev->stats.rx_dropped++;
3485
- continue;
3486
- }
3487
-
3488
- dma_sync_single_for_cpu(priv->device,
3489
- rx_q->rx_skbuff_dma
3490
- [entry], frame_len,
3491
- DMA_FROM_DEVICE);
3492
- skb_copy_to_linear_data(skb,
3493
- rx_q->
3494
- rx_skbuff[entry]->data,
3495
- frame_len);
3496
-
3497
- skb_put(skb, frame_len);
3498
- dma_sync_single_for_device(priv->device,
3499
- rx_q->rx_skbuff_dma
3500
- [entry], frame_len,
3501
- DMA_FROM_DEVICE);
3502
- } else {
3503
- skb = rx_q->rx_skbuff[entry];
3504
- if (unlikely(!skb)) {
3505
- if (net_ratelimit())
3506
- netdev_err(priv->dev,
3507
- "%s: Inconsistent Rx chain\n",
3508
- priv->dev->name);
3509
- priv->dev->stats.rx_dropped++;
3510
- continue;
3511
- }
3512
- prefetch(skb->data - NET_IP_ALIGN);
3513
- rx_q->rx_skbuff[entry] = NULL;
3514
- rx_q->rx_zeroc_thresh++;
3515
-
3516
- skb_put(skb, frame_len);
3517
- dma_unmap_single(priv->device,
3518
- rx_q->rx_skbuff_dma[entry],
3519
- priv->dma_buf_sz,
3520
- DMA_FROM_DEVICE);
3521
- }
3522
-
3523
- if (netif_msg_pktdata(priv)) {
3524
- netdev_dbg(priv->dev, "frame received (%dbytes)",
3525
- frame_len);
3526
- print_pkt(skb->data, frame_len);
3527
- }
3528
-
3529
- stmmac_get_rx_hwtstamp(priv, p, np, skb);
3530
-
3531
- stmmac_rx_vlan(priv->dev, skb);
3532
-
3533
- skb->protocol = eth_type_trans(skb, priv->dev);
3534
-
3535
- if (unlikely(!coe))
3536
- skb_checksum_none_assert(skb);
3537
- else
3538
- skb->ip_summed = CHECKSUM_UNNECESSARY;
3539
-
3540
- napi_gro_receive(&ch->napi, skb);
3541
-
3542
- priv->dev->stats.rx_packets++;
3543
- priv->dev->stats.rx_bytes += frame_len;
3974
+ page_pool_recycle_direct(rx_q->page_pool, buf->page);
3975
+ buf->page = NULL;
3976
+ error = 1;
3977
+ if (!priv->hwts_rx_en)
3978
+ priv->dev->stats.rx_errors++;
35443979 }
3980
+
3981
+ if (unlikely(error && (status & rx_not_ls)))
3982
+ goto read_again;
3983
+ if (unlikely(error)) {
3984
+ dev_kfree_skb(skb);
3985
+ skb = NULL;
3986
+ count++;
3987
+ continue;
3988
+ }
3989
+
3990
+ /* Buffer is good. Go on. */
3991
+
3992
+ prefetch(page_address(buf->page));
3993
+ if (buf->sec_page)
3994
+ prefetch(page_address(buf->sec_page));
3995
+
3996
+ buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
3997
+ len += buf1_len;
3998
+ buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
3999
+ len += buf2_len;
4000
+
4001
+ /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
4002
+ * Type frames (LLC/LLC-SNAP)
4003
+ *
4004
+ * llc_snap is never checked in GMAC >= 4, so this ACS
4005
+ * feature is always disabled and packets need to be
4006
+ * stripped manually.
4007
+ */
4008
+ if (likely(!(status & rx_not_ls)) &&
4009
+ (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
4010
+ unlikely(status != llc_snap))) {
4011
+ if (buf2_len)
4012
+ buf2_len -= ETH_FCS_LEN;
4013
+ else
4014
+ buf1_len -= ETH_FCS_LEN;
4015
+
4016
+ len -= ETH_FCS_LEN;
4017
+ }
4018
+
4019
+ if (!skb) {
4020
+ skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
4021
+ if (!skb) {
4022
+ priv->dev->stats.rx_dropped++;
4023
+ count++;
4024
+ goto drain_data;
4025
+ }
4026
+
4027
+ dma_sync_single_for_cpu(priv->device, buf->addr,
4028
+ buf1_len, DMA_FROM_DEVICE);
4029
+ skb_copy_to_linear_data(skb, page_address(buf->page),
4030
+ buf1_len);
4031
+ skb_put(skb, buf1_len);
4032
+
4033
+ /* Data payload copied into SKB, page ready for recycle */
4034
+ page_pool_recycle_direct(rx_q->page_pool, buf->page);
4035
+ buf->page = NULL;
4036
+ } else if (buf1_len) {
4037
+ dma_sync_single_for_cpu(priv->device, buf->addr,
4038
+ buf1_len, DMA_FROM_DEVICE);
4039
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
4040
+ buf->page, 0, buf1_len,
4041
+ priv->dma_buf_sz);
4042
+
4043
+ /* Data payload appended into SKB */
4044
+ page_pool_release_page(rx_q->page_pool, buf->page);
4045
+ buf->page = NULL;
4046
+ }
4047
+
4048
+ if (buf2_len) {
4049
+ dma_sync_single_for_cpu(priv->device, buf->sec_addr,
4050
+ buf2_len, DMA_FROM_DEVICE);
4051
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
4052
+ buf->sec_page, 0, buf2_len,
4053
+ priv->dma_buf_sz);
4054
+
4055
+ /* Data payload appended into SKB */
4056
+ page_pool_release_page(rx_q->page_pool, buf->sec_page);
4057
+ buf->sec_page = NULL;
4058
+ }
4059
+
4060
+drain_data:
4061
+ if (likely(status & rx_not_ls))
4062
+ goto read_again;
4063
+ if (!skb)
4064
+ continue;
4065
+
4066
+ /* Got entire packet into SKB. Finish it. */
4067
+
4068
+ stmmac_get_rx_hwtstamp(priv, p, np, skb);
4069
+ stmmac_rx_vlan(priv->dev, skb);
4070
+ skb->protocol = eth_type_trans(skb, priv->dev);
4071
+
4072
+ if (unlikely(!coe))
4073
+ skb_checksum_none_assert(skb);
4074
+ else
4075
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
4076
+
4077
+ if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4078
+ skb_set_hash(skb, hash, hash_type);
4079
+
4080
+ skb_record_rx_queue(skb, queue);
4081
+ napi_gro_receive(&ch->rx_napi, skb);
4082
+ skb = NULL;
4083
+
4084
+ priv->dev->stats.rx_packets++;
4085
+ priv->dev->stats.rx_bytes += len;
4086
+ count++;
4087
+ }
4088
+
4089
+ if (status & rx_not_ls || skb) {
4090
+ rx_q->state_saved = true;
4091
+ rx_q->state.skb = skb;
4092
+ rx_q->state.error = error;
4093
+ rx_q->state.len = len;
35454094 }
35464095
35474096 stmmac_rx_refill(priv, queue);
....@@ -3551,40 +4100,47 @@
35514100 return count;
35524101 }
35534102
3554
-/**
3555
- * stmmac_poll - stmmac poll method (NAPI)
3556
- * @napi : pointer to the napi structure.
3557
- * @budget : maximum number of packets that the current CPU can receive from
3558
- * all interfaces.
3559
- * Description :
3560
- * To look at the incoming frames and clear the tx resources.
3561
- */
3562
-static int stmmac_napi_poll(struct napi_struct *napi, int budget)
4103
+static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
35634104 {
35644105 struct stmmac_channel *ch =
3565
- container_of(napi, struct stmmac_channel, napi);
4106
+ container_of(napi, struct stmmac_channel, rx_napi);
35664107 struct stmmac_priv *priv = ch->priv_data;
3567
- int work_done, rx_done = 0, tx_done = 0;
35684108 u32 chan = ch->index;
4109
+ int work_done;
35694110
35704111 priv->xstats.napi_poll++;
35714112
3572
- if (ch->has_tx)
3573
- tx_done = stmmac_tx_clean(priv, budget, chan);
3574
- if (ch->has_rx)
3575
- rx_done = stmmac_rx(priv, budget, chan);
4113
+ work_done = stmmac_rx(priv, budget, chan);
4114
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
4115
+ unsigned long flags;
35764116
3577
- work_done = max(rx_done, tx_done);
4117
+ spin_lock_irqsave(&ch->lock, flags);
4118
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
4119
+ spin_unlock_irqrestore(&ch->lock, flags);
4120
+ }
4121
+
4122
+ return work_done;
4123
+}
4124
+
4125
+static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
4126
+{
4127
+ struct stmmac_channel *ch =
4128
+ container_of(napi, struct stmmac_channel, tx_napi);
4129
+ struct stmmac_priv *priv = ch->priv_data;
4130
+ u32 chan = ch->index;
4131
+ int work_done;
4132
+
4133
+ priv->xstats.napi_poll++;
4134
+
4135
+ work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan);
35784136 work_done = min(work_done, budget);
35794137
35804138 if (work_done < budget && napi_complete_done(napi, work_done)) {
3581
- int stat;
4139
+ unsigned long flags;
35824140
3583
- stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3584
- stat = stmmac_dma_interrupt_status(priv, priv->ioaddr,
3585
- &priv->xstats, chan);
3586
- if (stat && napi_reschedule(napi))
3587
- stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
4141
+ spin_lock_irqsave(&ch->lock, flags);
4142
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
4143
+ spin_unlock_irqrestore(&ch->lock, flags);
35884144 }
35894145
35904146 return work_done;
....@@ -3593,12 +4149,13 @@
35934149 /**
35944150 * stmmac_tx_timeout
35954151 * @dev : Pointer to net device structure
4152
+ * @txqueue: the index of the hanging transmit queue
35964153 * Description: this function is called when a packet transmission fails to
35974154 * complete within a reasonable time. The driver will mark the error in the
35984155 * netdev structure and arrange for the device to be reset to a sane state
35994156 * in order to transmit a new packet.
36004157 */
3601
-static void stmmac_tx_timeout(struct net_device *dev)
4158
+static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
36024159 {
36034160 struct stmmac_priv *priv = netdev_priv(dev);
36044161
....@@ -3695,6 +4252,8 @@
36954252 netdev_features_t features)
36964253 {
36974254 struct stmmac_priv *priv = netdev_priv(netdev);
4255
+ bool sph_en;
4256
+ u32 chan;
36984257
36994258 /* Keep the COE Type in case of csum is supporting */
37004259 if (features & NETIF_F_RXCSUM)
....@@ -3705,6 +4264,10 @@
37054264 * fixed in case of issue.
37064265 */
37074266 stmmac_rx_ipc(priv, priv->hw);
4267
+
4268
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph;
4269
+ for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
4270
+ stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
37084271
37094272 return 0;
37104273 }
....@@ -3798,6 +4361,7 @@
37984361 */
37994362 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
38004363 {
4364
+ struct stmmac_priv *priv = netdev_priv (dev);
38014365 int ret = -EOPNOTSUPP;
38024366
38034367 if (!netif_running(dev))
....@@ -3807,18 +4371,14 @@
38074371 case SIOCGMIIPHY:
38084372 case SIOCGMIIREG:
38094373 case SIOCSMIIREG:
3810
- if (!dev->phydev)
3811
- return -EINVAL;
3812
- ret = phy_mii_ioctl(dev->phydev, rq, cmd);
4374
+ ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
38134375 break;
3814
-#ifdef CONFIG_STMMAC_PTP
38154376 case SIOCSHWTSTAMP:
38164377 ret = stmmac_hwtstamp_set(dev, rq);
38174378 break;
38184379 case SIOCGHWTSTAMP:
38194380 ret = stmmac_hwtstamp_get(dev, rq);
38204381 break;
3821
-#endif
38224382 default:
38234383 break;
38244384 }
....@@ -3832,12 +4392,17 @@
38324392 struct stmmac_priv *priv = cb_priv;
38334393 int ret = -EOPNOTSUPP;
38344394
4395
+ if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
4396
+ return ret;
4397
+
38354398 stmmac_disable_all_queues(priv);
38364399
38374400 switch (type) {
38384401 case TC_SETUP_CLSU32:
3839
- if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3840
- ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
4402
+ ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
4403
+ break;
4404
+ case TC_SETUP_CLSFLOWER:
4405
+ ret = stmmac_tc_setup_cls(priv, priv, type_data);
38414406 break;
38424407 default:
38434408 break;
....@@ -3847,23 +4412,7 @@
38474412 return ret;
38484413 }
38494414
3850
-static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3851
- struct tc_block_offload *f)
3852
-{
3853
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3854
- return -EOPNOTSUPP;
3855
-
3856
- switch (f->command) {
3857
- case TC_BLOCK_BIND:
3858
- return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3859
- priv, priv, f->extack);
3860
- case TC_BLOCK_UNBIND:
3861
- tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3862
- return 0;
3863
- default:
3864
- return -EOPNOTSUPP;
3865
- }
3866
-}
4415
+static LIST_HEAD(stmmac_block_cb_list);
38674416
38684417 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
38694418 void *type_data)
....@@ -3872,17 +4421,23 @@
38724421
38734422 switch (type) {
38744423 case TC_SETUP_BLOCK:
3875
- return stmmac_setup_tc_block(priv, type_data);
4424
+ return flow_block_cb_setup_simple(type_data,
4425
+ &stmmac_block_cb_list,
4426
+ stmmac_setup_tc_block_cb,
4427
+ priv, priv, true);
38764428 case TC_SETUP_QDISC_CBS:
38774429 return stmmac_tc_setup_cbs(priv, priv, type_data);
4430
+ case TC_SETUP_QDISC_TAPRIO:
4431
+ return stmmac_tc_setup_taprio(priv, priv, type_data);
4432
+ case TC_SETUP_QDISC_ETF:
4433
+ return stmmac_tc_setup_etf(priv, priv, type_data);
38784434 default:
38794435 return -EOPNOTSUPP;
38804436 }
38814437 }
38824438
38834439 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
3884
- struct net_device *sb_dev,
3885
- select_queue_fallback_t fallback)
4440
+ struct net_device *sb_dev)
38864441 {
38874442 int gso = skb_shinfo(skb)->gso_type;
38884443
....@@ -3896,7 +4451,7 @@
38964451 return 0;
38974452 }
38984453
3899
- return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
4454
+ return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
39004455 }
39014456
39024457 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
....@@ -3904,11 +4459,20 @@
39044459 struct stmmac_priv *priv = netdev_priv(ndev);
39054460 int ret = 0;
39064461
4462
+ ret = pm_runtime_get_sync(priv->device);
4463
+ if (ret < 0) {
4464
+ pm_runtime_put_noidle(priv->device);
4465
+ return ret;
4466
+ }
4467
+
39074468 ret = eth_mac_addr(ndev, addr);
39084469 if (ret)
3909
- return ret;
4470
+ goto set_mac_error;
39104471
39114472 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
4473
+
4474
+set_mac_error:
4475
+ pm_runtime_put(priv->device);
39124476
39134477 return ret;
39144478 }
....@@ -3917,24 +4481,27 @@
39174481 static struct dentry *stmmac_fs_dir;
39184482
39194483 static void sysfs_display_ring(void *head, int size, int extend_desc,
3920
- struct seq_file *seq)
4484
+ struct seq_file *seq, dma_addr_t dma_phy_addr)
39214485 {
39224486 int i;
39234487 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
39244488 struct dma_desc *p = (struct dma_desc *)head;
4489
+ dma_addr_t dma_addr;
39254490
39264491 for (i = 0; i < size; i++) {
39274492 if (extend_desc) {
3928
- seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3929
- i, (unsigned int)virt_to_phys(ep),
4493
+ dma_addr = dma_phy_addr + i * sizeof(*ep);
4494
+ seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
4495
+ i, &dma_addr,
39304496 le32_to_cpu(ep->basic.des0),
39314497 le32_to_cpu(ep->basic.des1),
39324498 le32_to_cpu(ep->basic.des2),
39334499 le32_to_cpu(ep->basic.des3));
39344500 ep++;
39354501 } else {
3936
- seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3937
- i, (unsigned int)virt_to_phys(p),
4502
+ dma_addr = dma_phy_addr + i * sizeof(*p);
4503
+ seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
4504
+ i, &dma_addr,
39384505 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
39394506 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
39404507 p++;
....@@ -3943,7 +4510,7 @@
39434510 }
39444511 }
39454512
3946
-static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
4513
+static int stmmac_rings_status_show(struct seq_file *seq, void *v)
39474514 {
39484515 struct net_device *dev = seq->private;
39494516 struct stmmac_priv *priv = netdev_priv(dev);
....@@ -3962,11 +4529,11 @@
39624529 if (priv->extend_desc) {
39634530 seq_printf(seq, "Extended descriptor ring:\n");
39644531 sysfs_display_ring((void *)rx_q->dma_erx,
3965
- DMA_RX_SIZE, 1, seq);
4532
+ priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
39664533 } else {
39674534 seq_printf(seq, "Descriptor ring:\n");
39684535 sysfs_display_ring((void *)rx_q->dma_rx,
3969
- DMA_RX_SIZE, 0, seq);
4536
+ priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
39704537 }
39714538 }
39724539
....@@ -3978,33 +4545,19 @@
39784545 if (priv->extend_desc) {
39794546 seq_printf(seq, "Extended descriptor ring:\n");
39804547 sysfs_display_ring((void *)tx_q->dma_etx,
3981
- DMA_TX_SIZE, 1, seq);
3982
- } else {
4548
+ priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
4549
+ } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
39834550 seq_printf(seq, "Descriptor ring:\n");
39844551 sysfs_display_ring((void *)tx_q->dma_tx,
3985
- DMA_TX_SIZE, 0, seq);
4552
+ priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
39864553 }
39874554 }
39884555
39894556 return 0;
39904557 }
4558
+DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
39914559
3992
-static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3993
-{
3994
- return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3995
-}
3996
-
3997
-/* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3998
-
3999
-static const struct file_operations stmmac_rings_status_fops = {
4000
- .owner = THIS_MODULE,
4001
- .open = stmmac_sysfs_ring_open,
4002
- .read = seq_read,
4003
- .llseek = seq_lseek,
4004
- .release = single_release,
4005
-};
4006
-
4007
-static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
4560
+static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
40084561 {
40094562 struct net_device *dev = seq->private;
40104563 struct stmmac_priv *priv = netdev_priv(dev);
....@@ -4062,64 +4615,94 @@
40624615 priv->dma_cap.number_rx_channel);
40634616 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
40644617 priv->dma_cap.number_tx_channel);
4618
+ seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
4619
+ priv->dma_cap.number_rx_queues);
4620
+ seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
4621
+ priv->dma_cap.number_tx_queues);
40654622 seq_printf(seq, "\tEnhanced descriptors: %s\n",
40664623 (priv->dma_cap.enh_desc) ? "Y" : "N");
4067
-
4624
+ seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
4625
+ seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
4626
+ seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
4627
+ seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
4628
+ seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
4629
+ priv->dma_cap.pps_out_num);
4630
+ seq_printf(seq, "\tSafety Features: %s\n",
4631
+ priv->dma_cap.asp ? "Y" : "N");
4632
+ seq_printf(seq, "\tFlexible RX Parser: %s\n",
4633
+ priv->dma_cap.frpsel ? "Y" : "N");
4634
+ seq_printf(seq, "\tEnhanced Addressing: %d\n",
4635
+ priv->dma_cap.addr64);
4636
+ seq_printf(seq, "\tReceive Side Scaling: %s\n",
4637
+ priv->dma_cap.rssen ? "Y" : "N");
4638
+ seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
4639
+ priv->dma_cap.vlhash ? "Y" : "N");
4640
+ seq_printf(seq, "\tSplit Header: %s\n",
4641
+ priv->dma_cap.sphen ? "Y" : "N");
4642
+ seq_printf(seq, "\tVLAN TX Insertion: %s\n",
4643
+ priv->dma_cap.vlins ? "Y" : "N");
4644
+ seq_printf(seq, "\tDouble VLAN: %s\n",
4645
+ priv->dma_cap.dvlan ? "Y" : "N");
4646
+ seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
4647
+ priv->dma_cap.l3l4fnum);
4648
+ seq_printf(seq, "\tARP Offloading: %s\n",
4649
+ priv->dma_cap.arpoffsel ? "Y" : "N");
4650
+ seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
4651
+ priv->dma_cap.estsel ? "Y" : "N");
4652
+ seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
4653
+ priv->dma_cap.fpesel ? "Y" : "N");
4654
+ seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
4655
+ priv->dma_cap.tbssel ? "Y" : "N");
40684656 return 0;
40694657 }
4658
+DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
40704659
4071
-static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
4660
+/* Use network device events to rename debugfs file entries.
4661
+ */
4662
+static int stmmac_device_event(struct notifier_block *unused,
4663
+ unsigned long event, void *ptr)
40724664 {
4073
- return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
4665
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4666
+ struct stmmac_priv *priv = netdev_priv(dev);
4667
+
4668
+ if (dev->netdev_ops != &stmmac_netdev_ops)
4669
+ goto done;
4670
+
4671
+ switch (event) {
4672
+ case NETDEV_CHANGENAME:
4673
+ if (priv->dbgfs_dir)
4674
+ priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
4675
+ priv->dbgfs_dir,
4676
+ stmmac_fs_dir,
4677
+ dev->name);
4678
+ break;
4679
+ }
4680
+done:
4681
+ return NOTIFY_DONE;
40744682 }
40754683
4076
-static const struct file_operations stmmac_dma_cap_fops = {
4077
- .owner = THIS_MODULE,
4078
- .open = stmmac_sysfs_dma_cap_open,
4079
- .read = seq_read,
4080
- .llseek = seq_lseek,
4081
- .release = single_release,
4684
+static struct notifier_block stmmac_notifier = {
4685
+ .notifier_call = stmmac_device_event,
40824686 };
40834687
4084
-static int stmmac_init_fs(struct net_device *dev)
4688
+static void stmmac_init_fs(struct net_device *dev)
40854689 {
40864690 struct stmmac_priv *priv = netdev_priv(dev);
4691
+
4692
+ rtnl_lock();
40874693
40884694 /* Create per netdev entries */
40894695 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
40904696
4091
- if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
4092
- netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
4093
-
4094
- return -ENOMEM;
4095
- }
4096
-
40974697 /* Entry to report DMA RX/TX rings */
4098
- priv->dbgfs_rings_status =
4099
- debugfs_create_file("descriptors_status", 0444,
4100
- priv->dbgfs_dir, dev,
4101
- &stmmac_rings_status_fops);
4102
-
4103
- if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
4104
- netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4105
- debugfs_remove_recursive(priv->dbgfs_dir);
4106
-
4107
- return -ENOMEM;
4108
- }
4698
+ debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
4699
+ &stmmac_rings_status_fops);
41094700
41104701 /* Entry to report the DMA HW features */
4111
- priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4112
- priv->dbgfs_dir,
4113
- dev, &stmmac_dma_cap_fops);
4702
+ debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
4703
+ &stmmac_dma_cap_fops);
41144704
4115
- if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4116
- netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4117
- debugfs_remove_recursive(priv->dbgfs_dir);
4118
-
4119
- return -ENOMEM;
4120
- }
4121
-
4122
- return 0;
4705
+ rtnl_unlock();
41234706 }
41244707
41254708 static void stmmac_exit_fs(struct net_device *dev)
....@@ -4129,6 +4712,111 @@
41294712 debugfs_remove_recursive(priv->dbgfs_dir);
41304713 }
41314714 #endif /* CONFIG_DEBUG_FS */
4715
+
4716
+static u32 stmmac_vid_crc32_le(__le16 vid_le)
4717
+{
4718
+ unsigned char *data = (unsigned char *)&vid_le;
4719
+ unsigned char data_byte = 0;
4720
+ u32 crc = ~0x0;
4721
+ u32 temp = 0;
4722
+ int i, bits;
4723
+
4724
+ bits = get_bitmask_order(VLAN_VID_MASK);
4725
+ for (i = 0; i < bits; i++) {
4726
+ if ((i % 8) == 0)
4727
+ data_byte = data[i / 8];
4728
+
4729
+ temp = ((crc & 1) ^ data_byte) & 1;
4730
+ crc >>= 1;
4731
+ data_byte >>= 1;
4732
+
4733
+ if (temp)
4734
+ crc ^= 0xedb88320;
4735
+ }
4736
+
4737
+ return crc;
4738
+}
4739
+
4740
+static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
4741
+{
4742
+ u32 crc, hash = 0;
4743
+ __le16 pmatch = 0;
4744
+ int count = 0;
4745
+ u16 vid = 0;
4746
+
4747
+ for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
4748
+ __le16 vid_le = cpu_to_le16(vid);
4749
+ crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
4750
+ hash |= (1 << crc);
4751
+ count++;
4752
+ }
4753
+
4754
+ if (!priv->dma_cap.vlhash) {
4755
+ if (count > 2) /* VID = 0 always passes filter */
4756
+ return -EOPNOTSUPP;
4757
+
4758
+ pmatch = cpu_to_le16(vid);
4759
+ hash = 0;
4760
+ }
4761
+
4762
+ return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
4763
+}
4764
+
4765
+static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
4766
+{
4767
+ struct stmmac_priv *priv = netdev_priv(ndev);
4768
+ bool is_double = false;
4769
+ int ret;
4770
+
4771
+ if (be16_to_cpu(proto) == ETH_P_8021AD)
4772
+ is_double = true;
4773
+
4774
+ set_bit(vid, priv->active_vlans);
4775
+ ret = stmmac_vlan_update(priv, is_double);
4776
+ if (ret) {
4777
+ clear_bit(vid, priv->active_vlans);
4778
+ return ret;
4779
+ }
4780
+
4781
+ if (priv->hw->num_vlan) {
4782
+ ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4783
+ if (ret)
4784
+ return ret;
4785
+ }
4786
+
4787
+ return 0;
4788
+}
4789
+
4790
+static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
4791
+{
4792
+ struct stmmac_priv *priv = netdev_priv(ndev);
4793
+ bool is_double = false;
4794
+ int ret;
4795
+
4796
+ ret = pm_runtime_get_sync(priv->device);
4797
+ if (ret < 0) {
4798
+ pm_runtime_put_noidle(priv->device);
4799
+ return ret;
4800
+ }
4801
+
4802
+ if (be16_to_cpu(proto) == ETH_P_8021AD)
4803
+ is_double = true;
4804
+
4805
+ clear_bit(vid, priv->active_vlans);
4806
+
4807
+ if (priv->hw->num_vlan) {
4808
+ ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4809
+ if (ret)
4810
+ goto del_vlan_error;
4811
+ }
4812
+
4813
+ ret = stmmac_vlan_update(priv, is_double);
4814
+
4815
+del_vlan_error:
4816
+ pm_runtime_put(priv->device);
4817
+
4818
+ return ret;
4819
+}
41324820
41334821 static const struct net_device_ops stmmac_netdev_ops = {
41344822 .ndo_open = stmmac_open,
....@@ -4146,6 +4834,8 @@
41464834 .ndo_poll_controller = stmmac_poll_controller,
41474835 #endif
41484836 .ndo_set_mac_address = stmmac_set_mac_address,
4837
+ .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
4838
+ .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
41494839 };
41504840
41514841 static void stmmac_reset_subtask(struct stmmac_priv *priv)
....@@ -4164,7 +4854,7 @@
41644854
41654855 set_bit(STMMAC_DOWN, &priv->state);
41664856 dev_close(priv->dev);
4167
- dev_open(priv->dev);
4857
+ dev_open(priv->dev, NULL);
41684858 clear_bit(STMMAC_DOWN, &priv->state);
41694859 clear_bit(STMMAC_RESETING, &priv->state);
41704860 rtnl_unlock();
....@@ -4214,6 +4904,12 @@
42144904 priv->plat->enh_desc = priv->dma_cap.enh_desc;
42154905 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
42164906 priv->hw->pmt = priv->plat->pmt;
4907
+ if (priv->dma_cap.hash_tb_sz) {
4908
+ priv->hw->multicast_filter_bins =
4909
+ (BIT(priv->dma_cap.hash_tb_sz) << 5);
4910
+ priv->hw->mcast_bits_log2 =
4911
+ ilog2(priv->hw->multicast_filter_bins);
4912
+ }
42174913
42184914 /* TXCOE doesn't work in thresh DMA mode */
42194915 if (priv->plat->force_thresh_dma_mode)
....@@ -4250,6 +4946,9 @@
42504946 if (priv->dma_cap.tsoen)
42514947 dev_info(priv->device, "TSO supported\n");
42524948
4949
+ priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
4950
+ priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
4951
+
42534952 /* Run HW quirks, if any */
42544953 if (priv->hwif_quirks) {
42554954 ret = priv->hwif_quirks(priv);
....@@ -4272,6 +4971,92 @@
42724971 return 0;
42734972 }
42744973
4974
+static void stmmac_napi_add(struct net_device *dev)
4975
+{
4976
+ struct stmmac_priv *priv = netdev_priv(dev);
4977
+ u32 queue, maxq;
4978
+
4979
+ maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4980
+
4981
+ for (queue = 0; queue < maxq; queue++) {
4982
+ struct stmmac_channel *ch = &priv->channel[queue];
4983
+ int rx_budget = ((priv->plat->dma_rx_size < NAPI_POLL_WEIGHT) &&
4984
+ (priv->plat->dma_rx_size > 0)) ?
4985
+ priv->plat->dma_rx_size : NAPI_POLL_WEIGHT;
4986
+ int tx_budget = ((priv->plat->dma_tx_size < NAPI_POLL_WEIGHT) &&
4987
+ (priv->plat->dma_tx_size > 0)) ?
4988
+ priv->plat->dma_tx_size : NAPI_POLL_WEIGHT;
4989
+
4990
+ ch->priv_data = priv;
4991
+ ch->index = queue;
4992
+ spin_lock_init(&ch->lock);
4993
+
4994
+ if (queue < priv->plat->rx_queues_to_use) {
4995
+ netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
4996
+ rx_budget);
4997
+ }
4998
+ if (queue < priv->plat->tx_queues_to_use) {
4999
+ netif_tx_napi_add(dev, &ch->tx_napi,
5000
+ stmmac_napi_poll_tx, tx_budget);
5001
+ }
5002
+ }
5003
+}
5004
+
5005
+static void stmmac_napi_del(struct net_device *dev)
5006
+{
5007
+ struct stmmac_priv *priv = netdev_priv(dev);
5008
+ u32 queue, maxq;
5009
+
5010
+ maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
5011
+
5012
+ for (queue = 0; queue < maxq; queue++) {
5013
+ struct stmmac_channel *ch = &priv->channel[queue];
5014
+
5015
+ if (queue < priv->plat->rx_queues_to_use)
5016
+ netif_napi_del(&ch->rx_napi);
5017
+ if (queue < priv->plat->tx_queues_to_use)
5018
+ netif_napi_del(&ch->tx_napi);
5019
+ }
5020
+}
5021
+
5022
+int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
5023
+{
5024
+ struct stmmac_priv *priv = netdev_priv(dev);
5025
+ int ret = 0;
5026
+
5027
+ if (netif_running(dev))
5028
+ stmmac_release(dev);
5029
+
5030
+ stmmac_napi_del(dev);
5031
+
5032
+ priv->plat->rx_queues_to_use = rx_cnt;
5033
+ priv->plat->tx_queues_to_use = tx_cnt;
5034
+
5035
+ stmmac_napi_add(dev);
5036
+
5037
+ if (netif_running(dev))
5038
+ ret = stmmac_open(dev);
5039
+
5040
+ return ret;
5041
+}
5042
+
5043
+int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
5044
+{
5045
+ struct stmmac_priv *priv = netdev_priv(dev);
5046
+ int ret = 0;
5047
+
5048
+ if (netif_running(dev))
5049
+ stmmac_release(dev);
5050
+
5051
+ priv->dma_rx_size = rx_size;
5052
+ priv->dma_tx_size = tx_size;
5053
+
5054
+ if (netif_running(dev))
5055
+ ret = stmmac_open(dev);
5056
+
5057
+ return ret;
5058
+}
5059
+
42755060 /**
42765061 * stmmac_dvr_probe
42775062 * @device: device pointer
....@@ -4288,12 +5073,11 @@
42885073 {
42895074 struct net_device *ndev = NULL;
42905075 struct stmmac_priv *priv;
4291
- u32 queue, maxq;
4292
- int ret = 0;
5076
+ u32 rxq;
5077
+ int i, ret = 0;
42935078
4294
- ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4295
- MTL_MAX_TX_QUEUES,
4296
- MTL_MAX_RX_QUEUES);
5079
+ ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
5080
+ MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
42975081 if (!ndev)
42985082 return -ENOMEM;
42995083
....@@ -4313,7 +5097,7 @@
43135097 priv->wol_irq = res->wol_irq;
43145098 priv->lpi_irq = res->lpi_irq;
43155099
4316
- if (res->mac)
5100
+ if (!IS_ERR_OR_NULL(res->mac))
43175101 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
43185102
43195103 dev_set_drvdata(device, priv->dev);
....@@ -4325,8 +5109,7 @@
43255109 priv->wq = create_singlethread_workqueue("stmmac_wq");
43265110 if (!priv->wq) {
43275111 dev_err(priv->device, "failed to create workqueue\n");
4328
- ret = -ENOMEM;
4329
- goto error_wq;
5112
+ return -ENOMEM;
43305113 }
43315114
43325115 INIT_WORK(&priv->service_task, stmmac_service_task);
....@@ -4354,10 +5137,6 @@
43545137
43555138 stmmac_check_ether_addr(priv);
43565139
4357
- /* Configure real RX and TX queues */
4358
- netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4359
- netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4360
-
43615140 ndev->netdev_ops = &stmmac_netdev_ops;
43625141
43635142 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
....@@ -4375,20 +5154,79 @@
43755154 priv->tso = true;
43765155 dev_info(priv->device, "TSO feature enabled\n");
43775156 }
5157
+
5158
+ if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
5159
+ ndev->hw_features |= NETIF_F_GRO;
5160
+ if (!priv->plat->sph_disable) {
5161
+ priv->sph = true;
5162
+ dev_info(priv->device, "SPH feature enabled\n");
5163
+ }
5164
+ }
5165
+
5166
+ /* The current IP register MAC_HW_Feature1[ADDR64] only define
5167
+ * 32/40/64 bit width, but some SOC support others like i.MX8MP
5168
+ * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
5169
+ * So overwrite dma_cap.addr64 according to HW real design.
5170
+ */
5171
+ if (priv->plat->addr64)
5172
+ priv->dma_cap.addr64 = priv->plat->addr64;
5173
+
5174
+ if (priv->dma_cap.addr64) {
5175
+ ret = dma_set_mask_and_coherent(device,
5176
+ DMA_BIT_MASK(priv->dma_cap.addr64));
5177
+ if (!ret) {
5178
+ dev_info(priv->device, "Using %d bits DMA width\n",
5179
+ priv->dma_cap.addr64);
5180
+
5181
+ /*
5182
+ * If more than 32 bits can be addressed, make sure to
5183
+ * enable enhanced addressing mode.
5184
+ */
5185
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
5186
+ priv->plat->dma_cfg->eame = true;
5187
+ } else {
5188
+ ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
5189
+ if (ret) {
5190
+ dev_err(priv->device, "Failed to set DMA Mask\n");
5191
+ goto error_hw_init;
5192
+ }
5193
+
5194
+ priv->dma_cap.addr64 = 32;
5195
+ }
5196
+ }
5197
+
43785198 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
43795199 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
43805200 #ifdef STMMAC_VLAN_TAG_USED
43815201 /* Both mac100 and gmac support receive VLAN tag detection */
43825202 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
5203
+ if (priv->dma_cap.vlhash) {
5204
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
5205
+ ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
5206
+ }
5207
+ if (priv->dma_cap.vlins) {
5208
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
5209
+ if (priv->dma_cap.dvlan)
5210
+ ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
5211
+ }
43835212 #endif
43845213 priv->msg_enable = netif_msg_init(debug, default_msg_level);
43855214
5215
+ /* Initialize RSS */
5216
+ rxq = priv->plat->rx_queues_to_use;
5217
+ netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
5218
+ for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
5219
+ priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
5220
+
5221
+ if (priv->dma_cap.rssen && priv->plat->rss_en)
5222
+ ndev->features |= NETIF_F_RXHASH;
5223
+
43865224 /* MTU range: 46 - hw-specific max */
43875225 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4388
- if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4389
- ndev->max_mtu = JUMBO_LEN;
4390
- else if (priv->plat->has_xgmac)
5226
+ if (priv->plat->has_xgmac)
43915227 ndev->max_mtu = XGMAC_JUMBO_LEN;
5228
+ else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
5229
+ ndev->max_mtu = JUMBO_LEN;
43925230 else
43935231 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
43945232 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
....@@ -4406,22 +5244,7 @@
44065244 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
44075245
44085246 /* Setup channels NAPI */
4409
- maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4410
-
4411
- for (queue = 0; queue < maxq; queue++) {
4412
- struct stmmac_channel *ch = &priv->channel[queue];
4413
-
4414
- ch->priv_data = priv;
4415
- ch->index = queue;
4416
-
4417
- if (queue < priv->plat->rx_queues_to_use)
4418
- ch->has_rx = true;
4419
- if (queue < priv->plat->tx_queues_to_use)
4420
- ch->has_tx = true;
4421
-
4422
- netif_napi_add(ndev, &ch->napi, stmmac_napi_poll,
4423
- NAPI_POLL_WEIGHT);
4424
- }
5247
+ stmmac_napi_add(ndev);
44255248
44265249 mutex_init(&priv->lock);
44275250
....@@ -4431,15 +5254,18 @@
44315254 * set the MDC clock dynamically according to the csr actual
44325255 * clock input.
44335256 */
4434
- if (!priv->plat->clk_csr)
4435
- stmmac_clk_csr_set(priv);
4436
- else
5257
+ if (priv->plat->clk_csr >= 0)
44375258 priv->clk_csr = priv->plat->clk_csr;
5259
+ else
5260
+ stmmac_clk_csr_set(priv);
44385261
44395262 stmmac_check_pcs_mode(priv);
44405263
4441
- if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4442
- priv->hw->pcs != STMMAC_PCS_TBI &&
5264
+ pm_runtime_get_noresume(device);
5265
+ pm_runtime_set_active(device);
5266
+ pm_runtime_enable(device);
5267
+
5268
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
44435269 priv->hw->pcs != STMMAC_PCS_RTBI) {
44445270 /* MDIO bus Registration */
44455271 ret = stmmac_mdio_register(ndev);
....@@ -4451,6 +5277,12 @@
44515277 }
44525278 }
44535279
5280
+ ret = stmmac_phy_setup(priv);
5281
+ if (ret) {
5282
+ netdev_err(ndev, "failed to setup phy (%d)\n", ret);
5283
+ goto error_phy_setup;
5284
+ }
5285
+
44545286 ret = register_netdev(ndev);
44555287 if (ret) {
44565288 dev_err(priv->device, "%s: ERROR %i registering the device\n",
....@@ -4459,29 +5291,29 @@
44595291 }
44605292
44615293 #ifdef CONFIG_DEBUG_FS
4462
- ret = stmmac_init_fs(ndev);
4463
- if (ret < 0)
4464
- netdev_warn(priv->dev, "%s: failed debugFS registration\n",
4465
- __func__);
5294
+ stmmac_init_fs(ndev);
44665295 #endif
5296
+
5297
+ /* Let pm_runtime_put() disable the clocks.
5298
+ * If CONFIG_PM is not enabled, the clocks will stay powered.
5299
+ */
5300
+ pm_runtime_put(device);
5301
+
5302
+ //add
5303
+ phy_register_fixup_for_uid(RTL_8211F_PHY_ID, RTL_8211F_PHY_ID_MASK, rtl8211F_led_control);
44675304
44685305 return ret;
44695306
44705307 error_netdev_register:
4471
- if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4472
- priv->hw->pcs != STMMAC_PCS_TBI &&
5308
+ phylink_destroy(priv->phylink);
5309
+error_phy_setup:
5310
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
44735311 priv->hw->pcs != STMMAC_PCS_RTBI)
44745312 stmmac_mdio_unregister(ndev);
44755313 error_mdio_register:
4476
- for (queue = 0; queue < maxq; queue++) {
4477
- struct stmmac_channel *ch = &priv->channel[queue];
4478
-
4479
- netif_napi_del(&ch->napi);
4480
- }
5314
+ stmmac_napi_del(ndev);
44815315 error_hw_init:
44825316 destroy_workqueue(priv->wq);
4483
-error_wq:
4484
- free_netdev(ndev);
44855317
44865318 return ret;
44875319 }
....@@ -4500,25 +5332,30 @@
45005332
45015333 netdev_info(priv->dev, "%s: removing driver", __func__);
45025334
4503
-#ifdef CONFIG_DEBUG_FS
4504
- stmmac_exit_fs(ndev);
4505
-#endif
45065335 stmmac_stop_all_dma(priv);
4507
-
45085336 stmmac_mac_set(priv, priv->ioaddr, false);
45095337 netif_carrier_off(ndev);
45105338 unregister_netdev(ndev);
5339
+
5340
+ /* Serdes power down needs to happen after VLAN filter
5341
+ * is deleted that is triggered by unregister_netdev().
5342
+ */
5343
+ if (priv->plat->serdes_powerdown)
5344
+ priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5345
+
5346
+#ifdef CONFIG_DEBUG_FS
5347
+ stmmac_exit_fs(ndev);
5348
+#endif
5349
+ phylink_destroy(priv->phylink);
45115350 if (priv->plat->stmmac_rst)
45125351 reset_control_assert(priv->plat->stmmac_rst);
4513
- clk_disable_unprepare(priv->plat->pclk);
4514
- clk_disable_unprepare(priv->plat->stmmac_clk);
4515
- if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4516
- priv->hw->pcs != STMMAC_PCS_TBI &&
5352
+ pm_runtime_put(dev);
5353
+ pm_runtime_disable(dev);
5354
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
45175355 priv->hw->pcs != STMMAC_PCS_RTBI)
45185356 stmmac_mdio_unregister(ndev);
45195357 destroy_workqueue(priv->wq);
45205358 mutex_destroy(&priv->lock);
4521
- free_netdev(ndev);
45225359
45235360 return 0;
45245361 }
....@@ -4540,8 +5377,7 @@
45405377 if (!ndev || !netif_running(ndev))
45415378 return 0;
45425379
4543
- if (ndev->phydev)
4544
- phy_stop(ndev->phydev);
5380
+ phylink_mac_change(priv->phylink, false);
45455381
45465382 mutex_lock(&priv->lock);
45475383
....@@ -4560,34 +5396,38 @@
45605396 /* Stop TX/RX DMA */
45615397 stmmac_stop_all_dma(priv);
45625398
5399
+ if (priv->plat->serdes_powerdown)
5400
+ priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5401
+
45635402 /* Enable Power down mode by programming the PMT regs */
4564
- if (device_may_wakeup(priv->device)) {
5403
+ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
45655404 stmmac_pmt(priv, priv->hw, priv->wolopts);
45665405 priv->irq_wake = 1;
45675406 } else {
5407
+ mutex_unlock(&priv->lock);
5408
+ rtnl_lock();
5409
+ if (device_may_wakeup(priv->device))
5410
+ phylink_speed_down(priv->phylink, false);
45685411 if (priv->plat->integrated_phy_power)
45695412 priv->plat->integrated_phy_power(priv->plat->bsp_priv,
45705413 false);
5414
+ phylink_stop(priv->phylink);
5415
+ rtnl_unlock();
5416
+ mutex_lock(&priv->lock);
5417
+
45715418 stmmac_mac_set(priv, priv->ioaddr, false);
45725419 pinctrl_pm_select_sleep_state(priv->device);
4573
- /* Disable clock in case of PWM is off */
4574
- if (priv->plat->clk_ptp_ref && IS_ENABLED(CONFIG_STMMAC_PTP))
4575
- clk_disable_unprepare(priv->plat->clk_ptp_ref);
4576
- clk_disable_unprepare(priv->plat->pclk);
4577
- clk_disable_unprepare(priv->plat->stmmac_clk);
45785420 }
45795421 mutex_unlock(&priv->lock);
45805422
4581
- priv->oldlink = false;
45825423 priv->speed = SPEED_UNKNOWN;
4583
- priv->oldduplex = DUPLEX_UNKNOWN;
45845424 return 0;
45855425 }
45865426 EXPORT_SYMBOL_GPL(stmmac_suspend);
45875427
45885428 /**
45895429 * stmmac_reset_queues_param - reset queue parameters
4590
- * @dev: device pointer
5430
+ * @priv: device pointer
45915431 */
45925432 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
45935433 {
....@@ -4623,6 +5463,7 @@
46235463 {
46245464 struct net_device *ndev = dev_get_drvdata(dev);
46255465 struct stmmac_priv *priv = netdev_priv(ndev);
5466
+ int ret;
46265467
46275468 if (!netif_running(ndev))
46285469 return 0;
....@@ -4633,18 +5474,13 @@
46335474 * this bit because it can generate problems while resuming
46345475 * from another devices (e.g. serial console).
46355476 */
4636
- if (device_may_wakeup(priv->device)) {
5477
+ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
46375478 mutex_lock(&priv->lock);
46385479 stmmac_pmt(priv, priv->hw, 0);
46395480 mutex_unlock(&priv->lock);
46405481 priv->irq_wake = 0;
46415482 } else {
46425483 pinctrl_pm_select_default_state(priv->device);
4643
- /* enable the clk previously disabled */
4644
- clk_prepare_enable(priv->plat->stmmac_clk);
4645
- clk_prepare_enable(priv->plat->pclk);
4646
- if (priv->plat->clk_ptp_ref && IS_ENABLED(CONFIG_STMMAC_PTP))
4647
- clk_prepare_enable(priv->plat->clk_ptp_ref);
46485484 /* reset the phy so that it's ready */
46495485 if (priv->mii)
46505486 stmmac_mdio_reset(priv->mii);
....@@ -4653,6 +5489,23 @@
46535489 true);
46545490 }
46555491
5492
+ if (priv->plat->serdes_powerup) {
5493
+ ret = priv->plat->serdes_powerup(ndev,
5494
+ priv->plat->bsp_priv);
5495
+
5496
+ if (ret < 0)
5497
+ return ret;
5498
+ }
5499
+
5500
+ if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
5501
+ rtnl_lock();
5502
+ phylink_start(priv->phylink);
5503
+ /* We may have called phylink_speed_down before */
5504
+ phylink_speed_up(priv->phylink);
5505
+ rtnl_unlock();
5506
+ }
5507
+
5508
+ rtnl_lock();
46565509 mutex_lock(&priv->lock);
46575510
46585511 stmmac_reset_queues_param(priv);
....@@ -4660,18 +5513,26 @@
46605513 stmmac_free_tx_skbufs(priv);
46615514 stmmac_clear_descriptors(priv);
46625515
5516
+#if 1
5517
+ printk("ben -------resume add 2s delay time.\n");
5518
+ mdelay(2000);
5519
+
5520
+#endif
5521
+
46635522 stmmac_hw_setup(ndev, false);
4664
- stmmac_init_tx_coalesce(priv);
5523
+ stmmac_init_coalesce(priv);
46655524 stmmac_set_rx_mode(ndev);
5525
+
5526
+ stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
46665527
46675528 stmmac_enable_all_queues(priv);
46685529
4669
- netif_device_attach(ndev);
4670
-
46715530 mutex_unlock(&priv->lock);
5531
+ rtnl_unlock();
46725532
4673
- if (ndev->phydev)
4674
- phy_start(ndev->phydev);
5533
+ phylink_mac_change(priv->phylink, true);
5534
+
5535
+ netif_device_attach(ndev);
46755536
46765537 return 0;
46775538 }
....@@ -4683,7 +5544,7 @@
46835544 char *opt;
46845545
46855546 if (!str || !*str)
4686
- return -EINVAL;
5547
+ return 1;
46875548 while ((opt = strsep(&str, ",")) != NULL) {
46885549 if (!strncmp(opt, "debug:", 6)) {
46895550 if (kstrtoint(opt + 6, 0, &debug))
....@@ -4714,11 +5575,11 @@
47145575 goto err;
47155576 }
47165577 }
4717
- return 0;
5578
+ return 1;
47185579
47195580 err:
47205581 pr_err("%s: ERROR broken module parameter conversion", __func__);
4721
- return -EINVAL;
5582
+ return 1;
47225583 }
47235584
47245585 __setup("stmmaceth=", stmmac_cmdline_opt);
....@@ -4728,16 +5589,9 @@
47285589 {
47295590 #ifdef CONFIG_DEBUG_FS
47305591 /* Create debugfs main directory if it doesn't exist yet */
4731
- if (!stmmac_fs_dir) {
5592
+ if (!stmmac_fs_dir)
47325593 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4733
-
4734
- if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4735
- pr_err("ERROR %s, debugfs create directory failed\n",
4736
- STMMAC_RESOURCE_NAME);
4737
-
4738
- return -ENOMEM;
4739
- }
4740
- }
5594
+ register_netdevice_notifier(&stmmac_notifier);
47415595 #endif
47425596
47435597 return 0;
....@@ -4746,6 +5600,7 @@
47465600 static void __exit stmmac_exit(void)
47475601 {
47485602 #ifdef CONFIG_DEBUG_FS
5603
+ unregister_netdevice_notifier(&stmmac_notifier);
47495604 debugfs_remove_recursive(stmmac_fs_dir);
47505605 #endif
47515606 }