forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
....@@ -1,20 +1,10 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*******************************************************************************
23 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
34 ST Ethernet IPs are built around a Synopsys IP Core.
45
56 Copyright(C) 2007-2011 STMicroelectronics Ltd
67
7
- This program is free software; you can redistribute it and/or modify it
8
- under the terms and conditions of the GNU General Public License,
9
- version 2, as published by the Free Software Foundation.
10
-
11
- This program is distributed in the hope it will be useful, but WITHOUT
12
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14
- more details.
15
-
16
- The full GNU General Public License is included in this distribution in
17
- the file called "COPYING".
188
199 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
2010
....@@ -38,6 +28,7 @@
3828 #include <linux/if_vlan.h>
3929 #include <linux/dma-mapping.h>
4030 #include <linux/slab.h>
31
+#include <linux/pm_runtime.h>
4132 #include <linux/prefetch.h>
4233 #include <linux/pinctrl/consumer.h>
4334 #ifdef CONFIG_DEBUG_FS
....@@ -45,6 +36,7 @@
4536 #include <linux/seq_file.h>
4637 #endif /* CONFIG_DEBUG_FS */
4738 #include <linux/net_tstamp.h>
39
+#include <linux/phylink.h>
4840 #include <linux/udp.h>
4941 #include <net/pkt_cls.h>
5042 #include "stmmac_ptp.h"
....@@ -54,6 +46,13 @@
5446 #include "dwmac1000.h"
5547 #include "dwxgmac2.h"
5648 #include "hwif.h"
49
+
50
+/* As long as the interface is active, we keep the timestamping counter enabled
51
+ * with fine resolution and binary rollover. This avoid non-monotonic behavior
52
+ * (clock jumps) when changing timestamping settings at runtime.
53
+ */
54
+#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
55
+ PTP_TCR_TSCTRLSSR)
5756
5857 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
5958 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
....@@ -72,10 +71,10 @@
7271 module_param(phyaddr, int, 0444);
7372 MODULE_PARM_DESC(phyaddr, "Physical device address");
7473
75
-#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
76
-#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
74
+#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4)
75
+#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4)
7776
78
-static int flow_ctrl = FLOW_OFF;
77
+static int flow_ctrl = FLOW_AUTO;
7978 module_param(flow_ctrl, int, 0644);
8079 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
8180
....@@ -103,7 +102,7 @@
103102 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
104103 module_param(eee_timer, int, 0644);
105104 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
106
-#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
105
+#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
107106
108107 /* By default the driver will use the ring mode to manage tx and rx descriptors,
109108 * but allow user to force to use the chain instead of the ring
....@@ -115,11 +114,34 @@
115114 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
116115
117116 #ifdef CONFIG_DEBUG_FS
118
-static int stmmac_init_fs(struct net_device *dev);
117
+static const struct net_device_ops stmmac_netdev_ops;
118
+static void stmmac_init_fs(struct net_device *dev);
119119 static void stmmac_exit_fs(struct net_device *dev);
120120 #endif
121121
122122 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
123
+
124
+int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
125
+{
126
+ int ret = 0;
127
+
128
+ if (enabled) {
129
+ ret = clk_prepare_enable(priv->plat->stmmac_clk);
130
+ if (ret)
131
+ return ret;
132
+ ret = clk_prepare_enable(priv->plat->pclk);
133
+ if (ret) {
134
+ clk_disable_unprepare(priv->plat->stmmac_clk);
135
+ return ret;
136
+ }
137
+ } else {
138
+ clk_disable_unprepare(priv->plat->stmmac_clk);
139
+ clk_disable_unprepare(priv->plat->pclk);
140
+ }
141
+
142
+ return ret;
143
+}
144
+EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
123145
124146 /**
125147 * stmmac_verify_args - verify the driver parameters.
....@@ -156,7 +178,10 @@
156178 for (queue = 0; queue < maxq; queue++) {
157179 struct stmmac_channel *ch = &priv->channel[queue];
158180
159
- napi_disable(&ch->napi);
181
+ if (queue < rx_queues_cnt)
182
+ napi_disable(&ch->rx_napi);
183
+ if (queue < tx_queues_cnt)
184
+ napi_disable(&ch->tx_napi);
160185 }
161186 }
162187
....@@ -174,7 +199,10 @@
174199 for (queue = 0; queue < maxq; queue++) {
175200 struct stmmac_channel *ch = &priv->channel[queue];
176201
177
- napi_enable(&ch->napi);
202
+ if (queue < rx_queues_cnt)
203
+ napi_enable(&ch->rx_napi);
204
+ if (queue < tx_queues_cnt)
205
+ napi_enable(&ch->tx_napi);
178206 }
179207 }
180208
....@@ -228,7 +256,7 @@
228256 priv->clk_csr = STMMAC_CSR_100_150M;
229257 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
230258 priv->clk_csr = STMMAC_CSR_150_250M;
231
- else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
259
+ else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
232260 priv->clk_csr = STMMAC_CSR_250_300M;
233261 }
234262
....@@ -273,7 +301,7 @@
273301 if (tx_q->dirty_tx > tx_q->cur_tx)
274302 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
275303 else
276
- avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
304
+ avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
277305
278306 return avail;
279307 }
....@@ -291,24 +319,9 @@
291319 if (rx_q->dirty_rx <= rx_q->cur_rx)
292320 dirty = rx_q->cur_rx - rx_q->dirty_rx;
293321 else
294
- dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
322
+ dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
295323
296324 return dirty;
297
-}
298
-
299
-/**
300
- * stmmac_hw_fix_mac_speed - callback for speed selection
301
- * @priv: driver private structure
302
- * Description: on some platforms (e.g. ST), some HW system configuration
303
- * registers have to be set according to the link speed negotiated.
304
- */
305
-static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
306
-{
307
- struct net_device *ndev = priv->dev;
308
- struct phy_device *phydev = ndev->phydev;
309
-
310
- if (likely(priv->plat->fix_mac_speed))
311
- priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
312325 }
313326
314327 /**
....@@ -351,7 +364,7 @@
351364
352365 /**
353366 * stmmac_eee_ctrl_timer - EEE TX SW timer.
354
- * @arg : data hook
367
+ * @t: timer_list struct containing private info
355368 * Description:
356369 * if there is no data transfer and if we are not in LPI state,
357370 * then MAC Transmitter can be moved to LPI state.
....@@ -361,7 +374,7 @@
361374 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
362375
363376 stmmac_enable_eee_mode(priv);
364
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
377
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
365378 }
366379
367380 /**
....@@ -374,67 +387,43 @@
374387 */
375388 bool stmmac_eee_init(struct stmmac_priv *priv)
376389 {
377
- struct net_device *ndev = priv->dev;
378
- int interface = priv->plat->interface;
379
- bool ret = false;
380
-
381
- if ((interface != PHY_INTERFACE_MODE_MII) &&
382
- (interface != PHY_INTERFACE_MODE_GMII) &&
383
- !phy_interface_mode_is_rgmii(interface))
384
- goto out;
390
+ int eee_tw_timer = priv->eee_tw_timer;
385391
386392 /* Using PCS we cannot dial with the phy registers at this stage
387393 * so we do not support extra feature like EEE.
388394 */
389
- if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
390
- (priv->hw->pcs == STMMAC_PCS_TBI) ||
391
- (priv->hw->pcs == STMMAC_PCS_RTBI))
392
- goto out;
395
+ if (priv->hw->pcs == STMMAC_PCS_TBI ||
396
+ priv->hw->pcs == STMMAC_PCS_RTBI)
397
+ return false;
393398
394
- /* MAC core supports the EEE feature. */
395
- if (priv->dma_cap.eee) {
396
- int tx_lpi_timer = priv->tx_lpi_timer;
399
+ /* Check if MAC core supports the EEE feature. */
400
+ if (!priv->dma_cap.eee)
401
+ return false;
397402
398
- /* Check if the PHY supports EEE */
399
- if (phy_init_eee(ndev->phydev, 1)) {
400
- /* To manage at run-time if the EEE cannot be supported
401
- * anymore (for example because the lp caps have been
402
- * changed).
403
- * In that case the driver disable own timers.
404
- */
405
- mutex_lock(&priv->lock);
406
- if (priv->eee_active) {
407
- netdev_dbg(priv->dev, "disable EEE\n");
408
- del_timer_sync(&priv->eee_ctrl_timer);
409
- stmmac_set_eee_timer(priv, priv->hw, 0,
410
- tx_lpi_timer);
411
- }
412
- priv->eee_active = 0;
413
- mutex_unlock(&priv->lock);
414
- goto out;
403
+ mutex_lock(&priv->lock);
404
+
405
+ /* Check if it needs to be deactivated */
406
+ if (!priv->eee_active) {
407
+ if (priv->eee_enabled) {
408
+ netdev_dbg(priv->dev, "disable EEE\n");
409
+ del_timer_sync(&priv->eee_ctrl_timer);
410
+ stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
415411 }
416
- /* Activate the EEE and start timers */
417
- mutex_lock(&priv->lock);
418
- if (!priv->eee_active) {
419
- priv->eee_active = 1;
420
- timer_setup(&priv->eee_ctrl_timer,
421
- stmmac_eee_ctrl_timer, 0);
422
- mod_timer(&priv->eee_ctrl_timer,
423
- STMMAC_LPI_T(eee_timer));
424
-
425
- stmmac_set_eee_timer(priv, priv->hw,
426
- STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
427
- }
428
- /* Set HW EEE according to the speed */
429
- stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
430
-
431
- ret = true;
432412 mutex_unlock(&priv->lock);
433
-
434
- netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
413
+ return false;
435414 }
436
-out:
437
- return ret;
415
+
416
+ if (priv->eee_active && !priv->eee_enabled) {
417
+ timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
418
+ stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
419
+ eee_tw_timer);
420
+ }
421
+
422
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
423
+
424
+ mutex_unlock(&priv->lock);
425
+ netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
426
+ return true;
438427 }
439428
440429 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
....@@ -449,6 +438,7 @@
449438 struct dma_desc *p, struct sk_buff *skb)
450439 {
451440 struct skb_shared_hwtstamps shhwtstamp;
441
+ bool found = false;
452442 u64 ns = 0;
453443
454444 if (!priv->hwts_tx_en)
....@@ -460,9 +450,13 @@
460450
461451 /* check tx tstamp status */
462452 if (stmmac_get_tx_timestamp_status(priv, p)) {
463
- /* get the valid tstamp */
464453 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
454
+ found = true;
455
+ } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
456
+ found = true;
457
+ }
465458
459
+ if (found) {
466460 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
467461 shhwtstamp.hwtstamp = ns_to_ktime(ns);
468462
....@@ -470,8 +464,6 @@
470464 /* pass tstamp to stack */
471465 skb_tstamp_tx(skb, &shhwtstamp);
472466 }
473
-
474
- return;
475467 }
476468
477469 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
....@@ -523,8 +515,6 @@
523515 {
524516 struct stmmac_priv *priv = netdev_priv(dev);
525517 struct hwtstamp_config config;
526
- struct timespec64 now;
527
- u64 temp = 0;
528518 u32 ptp_v2 = 0;
529519 u32 tstamp_all = 0;
530520 u32 ptp_over_ipv4_udp = 0;
....@@ -533,11 +523,6 @@
533523 u32 snap_type_sel = 0;
534524 u32 ts_master_en = 0;
535525 u32 ts_event_en = 0;
536
- u32 sec_inc = 0;
537
- u32 value = 0;
538
- bool xmac;
539
-
540
- xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
541526
542527 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
543528 netdev_alert(priv->dev, "No support for HW time stamping\n");
....@@ -643,7 +628,8 @@
643628 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
644629 ptp_v2 = PTP_TCR_TSVER2ENA;
645630 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
646
- ts_event_en = PTP_TCR_TSEVNTENA;
631
+ if (priv->synopsys_id < DWMAC_CORE_4_10)
632
+ ts_event_en = PTP_TCR_TSEVNTENA;
647633 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
648634 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
649635 ptp_over_ethernet = PTP_TCR_TSIPENA;
....@@ -698,41 +684,16 @@
698684 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
699685 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
700686
701
- if (!priv->hwts_tx_en && !priv->hwts_rx_en)
702
- stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
703
- else {
704
- value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
705
- tstamp_all | ptp_v2 | ptp_over_ethernet |
706
- ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
707
- ts_master_en | snap_type_sel);
708
- stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
687
+ priv->systime_flags = STMMAC_HWTS_ACTIVE;
709688
710
- /* program Sub Second Increment reg */
711
- stmmac_config_sub_second_increment(priv,
712
- priv->ptpaddr, priv->plat->clk_ptp_rate,
713
- xmac, &sec_inc);
714
- temp = div_u64(1000000000ULL, sec_inc);
715
-
716
- /* Store sub second increment and flags for later use */
717
- priv->sub_second_inc = sec_inc;
718
- priv->systime_flags = value;
719
-
720
- /* calculate default added value:
721
- * formula is :
722
- * addend = (2^32)/freq_div_ratio;
723
- * where, freq_div_ratio = 1e9ns/sec_inc
724
- */
725
- temp = (u64)(temp << 32);
726
- priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
727
- stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
728
-
729
- /* initialize system time */
730
- ktime_get_real_ts64(&now);
731
-
732
- /* lower 32 bits of tv_sec are safe until y2106 */
733
- stmmac_init_systime(priv, priv->ptpaddr,
734
- (u32)now.tv_sec, now.tv_nsec);
689
+ if (priv->hwts_tx_en || priv->hwts_rx_en) {
690
+ priv->systime_flags |= tstamp_all | ptp_v2 |
691
+ ptp_over_ethernet | ptp_over_ipv6_udp |
692
+ ptp_over_ipv4_udp | ts_event_en |
693
+ ts_master_en | snap_type_sel;
735694 }
695
+
696
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
736697
737698 memcpy(&priv->tstamp_config, &config, sizeof(config));
738699
....@@ -747,7 +708,7 @@
747708 * a proprietary structure used to pass information to the driver.
748709 * Description:
749710 * This function obtain the current hardware timestamping settings
750
- as requested.
711
+ * as requested.
751712 */
752713 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
753714 {
....@@ -762,6 +723,57 @@
762723 }
763724
764725 /**
726
+ * stmmac_init_tstamp_counter - init hardware timestamping counter
727
+ * @priv: driver private structure
728
+ * @systime_flags: timestamping flags
729
+ * Description:
730
+ * Initialize hardware counter for packet timestamping.
731
+ * This is valid as long as the interface is open and not suspended.
732
+ * Will be rerun after resuming from suspend, case in which the timestamping
733
+ * flags updated by stmmac_hwtstamp_set() also need to be restored.
734
+ */
735
+int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
736
+{
737
+ bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
738
+ struct timespec64 now;
739
+ u32 sec_inc = 0;
740
+ u64 temp = 0;
741
+
742
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
743
+ return -EOPNOTSUPP;
744
+
745
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
746
+ priv->systime_flags = systime_flags;
747
+
748
+ /* program Sub Second Increment reg */
749
+ stmmac_config_sub_second_increment(priv, priv->ptpaddr,
750
+ priv->plat->clk_ptp_rate,
751
+ xmac, &sec_inc);
752
+ temp = div_u64(1000000000ULL, sec_inc);
753
+
754
+ /* Store sub second increment for later use */
755
+ priv->sub_second_inc = sec_inc;
756
+
757
+ /* calculate default added value:
758
+ * formula is :
759
+ * addend = (2^32)/freq_div_ratio;
760
+ * where, freq_div_ratio = 1e9ns/sec_inc
761
+ */
762
+ temp = (u64)(temp << 32);
763
+ priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
764
+ stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
765
+
766
+ /* initialize system time */
767
+ ktime_get_real_ts64(&now);
768
+
769
+ /* lower 32 bits of tv_sec are safe until y2106 */
770
+ stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
771
+
772
+ return 0;
773
+}
774
+EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
775
+
776
+/**
765777 * stmmac_init_ptp - init PTP
766778 * @priv: driver private structure
767779 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
....@@ -771,9 +783,11 @@
771783 static int stmmac_init_ptp(struct stmmac_priv *priv)
772784 {
773785 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
786
+ int ret;
774787
775
- if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
776
- return -EOPNOTSUPP;
788
+ ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
789
+ if (ret)
790
+ return ret;
777791
778792 priv->adv_ts = 0;
779793 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
....@@ -793,121 +807,274 @@
793807 priv->hwts_tx_en = 0;
794808 priv->hwts_rx_en = 0;
795809
796
- stmmac_ptp_register(priv);
797
-
798810 return 0;
799811 }
800812
801813 static void stmmac_release_ptp(struct stmmac_priv *priv)
802814 {
803
- if (priv->plat->clk_ptp_ref)
804
- clk_disable_unprepare(priv->plat->clk_ptp_ref);
815
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
805816 stmmac_ptp_unregister(priv);
806817 }
807818
808819 /**
809820 * stmmac_mac_flow_ctrl - Configure flow control in all queues
810821 * @priv: driver private structure
822
+ * @duplex: duplex passed to the next function
811823 * Description: It is used for configuring the flow control in all queues
812824 */
813825 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
814826 {
815827 u32 tx_cnt = priv->plat->tx_queues_to_use;
816828
817
- stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
818
- priv->pause, tx_cnt);
829
+ stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl & priv->plat->flow_ctrl,
830
+ priv->pause, tx_cnt);
819831 }
820832
821
-/**
822
- * stmmac_adjust_link - adjusts the link parameters
823
- * @dev: net device structure
824
- * Description: this is the helper called by the physical abstraction layer
825
- * drivers to communicate the phy link status. According the speed and duplex
826
- * this driver can invoke registered glue-logic as well.
827
- * It also invoke the eee initialization because it could happen when switch
828
- * on different networks (that are eee capable).
829
- */
830
-static void stmmac_adjust_link(struct net_device *dev)
833
+static void stmmac_validate(struct phylink_config *config,
834
+ unsigned long *supported,
835
+ struct phylink_link_state *state)
831836 {
832
- struct stmmac_priv *priv = netdev_priv(dev);
833
- struct phy_device *phydev = dev->phydev;
834
- bool new_state = false;
837
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
838
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
839
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
840
+ int tx_cnt = priv->plat->tx_queues_to_use;
841
+ int max_speed = priv->plat->max_speed;
835842
836
- if (!phydev)
837
- return;
843
+ phylink_set(mac_supported, 10baseT_Half);
844
+ phylink_set(mac_supported, 10baseT_Full);
845
+ phylink_set(mac_supported, 100baseT_Half);
846
+ phylink_set(mac_supported, 100baseT_Full);
847
+ phylink_set(mac_supported, 1000baseT_Half);
848
+ phylink_set(mac_supported, 1000baseT_Full);
849
+ phylink_set(mac_supported, 1000baseKX_Full);
850
+ phylink_set(mac_supported, 100baseT1_Full);
851
+ phylink_set(mac_supported, 1000baseT1_Full);
838852
839
- mutex_lock(&priv->lock);
853
+ phylink_set(mac_supported, Autoneg);
854
+ phylink_set(mac_supported, Pause);
855
+ phylink_set(mac_supported, Asym_Pause);
856
+ phylink_set_port_modes(mac_supported);
840857
841
- if (phydev->link) {
842
- u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
843
-
844
- /* Now we make sure that we can be in full duplex mode.
845
- * If not, we operate in half-duplex mode. */
846
- if (phydev->duplex != priv->oldduplex) {
847
- new_state = true;
848
- if (!phydev->duplex)
849
- ctrl &= ~priv->hw->link.duplex;
850
- else
851
- ctrl |= priv->hw->link.duplex;
852
- priv->oldduplex = phydev->duplex;
858
+ /* Cut down 1G if asked to */
859
+ if ((max_speed > 0) && (max_speed < 1000)) {
860
+ phylink_set(mask, 1000baseT_Full);
861
+ phylink_set(mask, 1000baseX_Full);
862
+ } else if (priv->plat->has_xgmac) {
863
+ if (!max_speed || (max_speed >= 2500)) {
864
+ phylink_set(mac_supported, 2500baseT_Full);
865
+ phylink_set(mac_supported, 2500baseX_Full);
853866 }
854
- /* Flow Control operation */
855
- if (phydev->pause)
856
- stmmac_mac_flow_ctrl(priv, phydev->duplex);
857
-
858
- if (phydev->speed != priv->speed) {
859
- new_state = true;
860
- ctrl &= ~priv->hw->link.speed_mask;
861
- switch (phydev->speed) {
862
- case SPEED_1000:
863
- ctrl |= priv->hw->link.speed1000;
864
- break;
865
- case SPEED_100:
866
- ctrl |= priv->hw->link.speed100;
867
- break;
868
- case SPEED_10:
869
- ctrl |= priv->hw->link.speed10;
870
- break;
871
- default:
872
- netif_warn(priv, link, priv->dev,
873
- "broken speed: %d\n", phydev->speed);
874
- phydev->speed = SPEED_UNKNOWN;
875
- break;
876
- }
877
- if (phydev->speed != SPEED_UNKNOWN)
878
- stmmac_hw_fix_mac_speed(priv);
879
- priv->speed = phydev->speed;
867
+ if (!max_speed || (max_speed >= 5000)) {
868
+ phylink_set(mac_supported, 5000baseT_Full);
880869 }
881
-
882
- writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
883
-
884
- if (!priv->oldlink) {
885
- new_state = true;
886
- priv->oldlink = true;
870
+ if (!max_speed || (max_speed >= 10000)) {
871
+ phylink_set(mac_supported, 10000baseSR_Full);
872
+ phylink_set(mac_supported, 10000baseLR_Full);
873
+ phylink_set(mac_supported, 10000baseER_Full);
874
+ phylink_set(mac_supported, 10000baseLRM_Full);
875
+ phylink_set(mac_supported, 10000baseT_Full);
876
+ phylink_set(mac_supported, 10000baseKX4_Full);
877
+ phylink_set(mac_supported, 10000baseKR_Full);
887878 }
888
- } else if (priv->oldlink) {
889
- new_state = true;
890
- priv->oldlink = false;
891
- priv->speed = SPEED_UNKNOWN;
892
- priv->oldduplex = DUPLEX_UNKNOWN;
879
+ if (!max_speed || (max_speed >= 25000)) {
880
+ phylink_set(mac_supported, 25000baseCR_Full);
881
+ phylink_set(mac_supported, 25000baseKR_Full);
882
+ phylink_set(mac_supported, 25000baseSR_Full);
883
+ }
884
+ if (!max_speed || (max_speed >= 40000)) {
885
+ phylink_set(mac_supported, 40000baseKR4_Full);
886
+ phylink_set(mac_supported, 40000baseCR4_Full);
887
+ phylink_set(mac_supported, 40000baseSR4_Full);
888
+ phylink_set(mac_supported, 40000baseLR4_Full);
889
+ }
890
+ if (!max_speed || (max_speed >= 50000)) {
891
+ phylink_set(mac_supported, 50000baseCR2_Full);
892
+ phylink_set(mac_supported, 50000baseKR2_Full);
893
+ phylink_set(mac_supported, 50000baseSR2_Full);
894
+ phylink_set(mac_supported, 50000baseKR_Full);
895
+ phylink_set(mac_supported, 50000baseSR_Full);
896
+ phylink_set(mac_supported, 50000baseCR_Full);
897
+ phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
898
+ phylink_set(mac_supported, 50000baseDR_Full);
899
+ }
900
+ if (!max_speed || (max_speed >= 100000)) {
901
+ phylink_set(mac_supported, 100000baseKR4_Full);
902
+ phylink_set(mac_supported, 100000baseSR4_Full);
903
+ phylink_set(mac_supported, 100000baseCR4_Full);
904
+ phylink_set(mac_supported, 100000baseLR4_ER4_Full);
905
+ phylink_set(mac_supported, 100000baseKR2_Full);
906
+ phylink_set(mac_supported, 100000baseSR2_Full);
907
+ phylink_set(mac_supported, 100000baseCR2_Full);
908
+ phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
909
+ phylink_set(mac_supported, 100000baseDR2_Full);
910
+ }
893911 }
894912
895
- if (new_state && netif_msg_link(priv))
896
- phy_print_status(phydev);
913
+ /* Half-Duplex can only work with single queue */
914
+ if (tx_cnt > 1) {
915
+ phylink_set(mask, 10baseT_Half);
916
+ phylink_set(mask, 100baseT_Half);
917
+ phylink_set(mask, 1000baseT_Half);
918
+ }
897919
898
- mutex_unlock(&priv->lock);
920
+ linkmode_and(supported, supported, mac_supported);
921
+ linkmode_andnot(supported, supported, mask);
899922
900
- if (phydev->is_pseudo_fixed_link)
901
- /* Stop PHY layer to call the hook to adjust the link in case
902
- * of a switch is attached to the stmmac driver.
903
- */
904
- phydev->irq = PHY_IGNORE_INTERRUPT;
905
- else
906
- /* At this stage, init the EEE if supported.
907
- * Never called in case of fixed_link.
908
- */
909
- priv->eee_enabled = stmmac_eee_init(priv);
923
+ linkmode_and(state->advertising, state->advertising, mac_supported);
924
+ linkmode_andnot(state->advertising, state->advertising, mask);
925
+
926
+ /* If PCS is supported, check which modes it supports. */
927
+ stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state);
910928 }
929
+
930
+static void stmmac_mac_pcs_get_state(struct phylink_config *config,
931
+ struct phylink_link_state *state)
932
+{
933
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
934
+
935
+ state->link = 0;
936
+ stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state);
937
+}
938
+
939
+static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
940
+ const struct phylink_link_state *state)
941
+{
942
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943
+
944
+ stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state);
945
+}
946
+
947
+static void stmmac_mac_an_restart(struct phylink_config *config)
948
+{
949
+ /* Not Supported */
950
+}
951
+
952
+static void stmmac_mac_link_down(struct phylink_config *config,
953
+ unsigned int mode, phy_interface_t interface)
954
+{
955
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
956
+
957
+ stmmac_mac_set(priv, priv->ioaddr, false);
958
+ priv->eee_active = false;
959
+ priv->tx_lpi_enabled = false;
960
+ stmmac_eee_init(priv);
961
+ stmmac_set_eee_pls(priv, priv->hw, false);
962
+}
963
+
964
+static void stmmac_mac_link_up(struct phylink_config *config,
965
+ struct phy_device *phy,
966
+ unsigned int mode, phy_interface_t interface,
967
+ int speed, int duplex,
968
+ bool tx_pause, bool rx_pause)
969
+{
970
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
971
+ u32 ctrl;
972
+
973
+ stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface);
974
+
975
+ ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
976
+ ctrl &= ~priv->hw->link.speed_mask;
977
+
978
+ if (interface == PHY_INTERFACE_MODE_USXGMII) {
979
+ switch (speed) {
980
+ case SPEED_10000:
981
+ ctrl |= priv->hw->link.xgmii.speed10000;
982
+ break;
983
+ case SPEED_5000:
984
+ ctrl |= priv->hw->link.xgmii.speed5000;
985
+ break;
986
+ case SPEED_2500:
987
+ ctrl |= priv->hw->link.xgmii.speed2500;
988
+ break;
989
+ default:
990
+ return;
991
+ }
992
+ } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
993
+ switch (speed) {
994
+ case SPEED_100000:
995
+ ctrl |= priv->hw->link.xlgmii.speed100000;
996
+ break;
997
+ case SPEED_50000:
998
+ ctrl |= priv->hw->link.xlgmii.speed50000;
999
+ break;
1000
+ case SPEED_40000:
1001
+ ctrl |= priv->hw->link.xlgmii.speed40000;
1002
+ break;
1003
+ case SPEED_25000:
1004
+ ctrl |= priv->hw->link.xlgmii.speed25000;
1005
+ break;
1006
+ case SPEED_10000:
1007
+ ctrl |= priv->hw->link.xgmii.speed10000;
1008
+ break;
1009
+ case SPEED_2500:
1010
+ ctrl |= priv->hw->link.speed2500;
1011
+ break;
1012
+ case SPEED_1000:
1013
+ ctrl |= priv->hw->link.speed1000;
1014
+ break;
1015
+ default:
1016
+ return;
1017
+ }
1018
+ } else {
1019
+ switch (speed) {
1020
+ case SPEED_2500:
1021
+ ctrl |= priv->hw->link.speed2500;
1022
+ break;
1023
+ case SPEED_1000:
1024
+ ctrl |= priv->hw->link.speed1000;
1025
+ break;
1026
+ case SPEED_100:
1027
+ ctrl |= priv->hw->link.speed100;
1028
+ break;
1029
+ case SPEED_10:
1030
+ ctrl |= priv->hw->link.speed10;
1031
+ break;
1032
+ default:
1033
+ return;
1034
+ }
1035
+ }
1036
+
1037
+ priv->speed = speed;
1038
+
1039
+ if (priv->plat->fix_mac_speed)
1040
+ priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1041
+
1042
+ if (!duplex)
1043
+ ctrl &= ~priv->hw->link.duplex;
1044
+ else
1045
+ ctrl |= priv->hw->link.duplex;
1046
+
1047
+ /* Flow Control operation */
1048
+ if (rx_pause && tx_pause)
1049
+ priv->flow_ctrl = FLOW_AUTO;
1050
+ else if (rx_pause && !tx_pause)
1051
+ priv->flow_ctrl = FLOW_RX;
1052
+ else if (!rx_pause && tx_pause)
1053
+ priv->flow_ctrl = FLOW_TX;
1054
+ else
1055
+ priv->flow_ctrl = FLOW_OFF;
1056
+
1057
+ stmmac_mac_flow_ctrl(priv, duplex);
1058
+
1059
+ writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1060
+
1061
+ stmmac_mac_set(priv, priv->ioaddr, true);
1062
+ if (phy && priv->dma_cap.eee) {
1063
+ priv->eee_active = phy_init_eee(phy, 1) >= 0;
1064
+ priv->eee_enabled = stmmac_eee_init(priv);
1065
+ priv->tx_lpi_enabled = priv->eee_enabled;
1066
+ stmmac_set_eee_pls(priv, priv->hw, true);
1067
+ }
1068
+}
1069
+
1070
+static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1071
+ .validate = stmmac_validate,
1072
+ .mac_pcs_get_state = stmmac_mac_pcs_get_state,
1073
+ .mac_config = stmmac_mac_config,
1074
+ .mac_an_restart = stmmac_mac_an_restart,
1075
+ .mac_link_down = stmmac_mac_link_down,
1076
+ .mac_link_up = stmmac_mac_link_up,
1077
+};
9111078
9121079 /**
9131080 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
....@@ -934,6 +1101,7 @@
9341101 }
9351102 }
9361103
1104
+#if 0
9371105 static void rtl8211F_led_control(struct phy_device *phydev)
9381106 {
9391107 printk("ben debug:rtl8211F_led_control...1 \n");
....@@ -950,6 +1118,44 @@
9501118 // phy_write(phydev, 16, 0x6C0A);
9511119 printk("ben debug:rtl8211F_led_control...2 \n");
9521120 }
1121
+#endif
1122
+#define RTL_8211F_PHY_ID 0x001cc916
1123
+#define RTL_8211F_PHY_ID_MASK 0x001fffff
1124
+#define RTL_8211F_PAGE_SELECT 0x1f
1125
+#define RTL_8211F_LCR_ADDR 0x10
1126
+
1127
+#define GREEN_LED 0 // Â̵ÆÊÇLED0
1128
+#define YELLOW0_LED 1 // »ÆµÆÊÇLED1
1129
+#define YELLOW1_LED 2 // »ÆµÆÊÇLED2
1130
+
1131
+static int rtl8211F_led_control(struct phy_device *phydev)
1132
+{
1133
+ unsigned int temp;
1134
+
1135
+ printk("<<<<<<ben test led ctrl start... %s\n",__FUNCTION__);
1136
+ if(!phydev) return 0;
1137
+ if(phydev->phy_id!=0x001cc916) return 0; /* only for 8211E*/
1138
+
1139
+ // ÅäÖÃÍø¿ÚLEDµÆ
1140
+ phy_write(phydev, 31, 0xd04);
1141
+ temp = 0x02 << (5 * GREEN_LED); // Â̵ƱíʾÁ¬½Ó״̬
1142
+ temp |= 0x08 << (5 * YELLOW0_LED); // »ÆµÆ±íʾÁ¬½Ó״̬
1143
+
1144
+ temp |= 0x1b << (5 * YELLOW1_LED); // »ÆµÆ±íʾÊý¾Ý°üÊÕ·¢×´Ì¬
1145
+ phy_write(phydev, 0x10, temp);
1146
+
1147
+ temp = 1 << (YELLOW1_LED + 1); // Â̵Ʋ»Ö¸Ê¾EEE½ÚÄÜ״̬, ²åÁËÍøÏߺóµÆÒª³£ÁÁ
1148
+ // µ«»ÆµÆÒªÖ¸Ê¾EEE½ÚÄÜ״̬, ûÓÐÊý¾Ý°üʱµÆÒªÏ¨Ãð
1149
+ phy_write(phydev, 0x11, 0x00);
1150
+ phy_write(phydev, 31, 0);
1151
+
1152
+ // ²åÉÏÍøÏߺó, Â̵Ƴ£ÁÁ; °ÎµôÍøÏߺó, Â̵ÆÏ¨Ãð
1153
+ // Á´Â·Ö§³ÖEEE½ÚÄÜ: ÓÐÊý¾Ý´«Êä, »ÆµÆÉÁ˸, ·ñÔòϨÃð
1154
+ // Á´Â·²»Ö§³ÖEEE½ÚÄÜ: ÓÐÊý¾Ý´«Êä, »ÆµÆÉÁ˸, ·ñÔò³£ÁÁ
1155
+
1156
+ printk("<<<<<<<ben test led ctrl end %s\n",__FUNCTION__);
1157
+ return 0;
1158
+}
9531159
9541160 /**
9551161 * stmmac_init_phy - PHY initialization
....@@ -962,85 +1168,83 @@
9621168 static int stmmac_init_phy(struct net_device *dev)
9631169 {
9641170 struct stmmac_priv *priv = netdev_priv(dev);
965
- u32 tx_cnt = priv->plat->tx_queues_to_use;
966
- struct phy_device *phydev;
967
- char phy_id_fmt[MII_BUS_ID_SIZE + 3];
968
- char bus_id[MII_BUS_ID_SIZE];
969
- int interface = priv->plat->interface;
970
- int max_speed = priv->plat->max_speed;
971
- priv->oldlink = false;
972
- priv->speed = SPEED_UNKNOWN;
973
- priv->oldduplex = DUPLEX_UNKNOWN;
1171
+ struct device_node *node;
1172
+ int ret;
9741173
975
- if (priv->plat->phy_node) {
976
- phydev = of_phy_connect(dev, priv->plat->phy_node,
977
- &stmmac_adjust_link, 0, interface);
978
- } else {
979
- snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
980
- priv->plat->bus_id);
9811174
982
- snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
983
- priv->plat->phy_addr);
984
- netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
985
- phy_id_fmt);
1175
+ printk("ben stmmac_init_phy .. \n");
1176
+ mdelay(2000);
1177
+ printk("ben stmmac_init_phy delay .. \n");
1178
+ if (priv->plat->integrated_phy_power)
1179
+ ret = priv->plat->integrated_phy_power(priv->plat->bsp_priv, true);
9861180
987
- phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
988
- interface);
989
- }
1181
+ node = priv->plat->phylink_node;
9901182
991
- if (IS_ERR_OR_NULL(phydev)) {
992
- netdev_err(priv->dev, "Could not attach to PHY\n");
993
- if (!phydev)
1183
+ if (node)
1184
+ {
1185
+ //printk("ben ttt.. \n");
1186
+ ret = phylink_of_phy_connect(priv->phylink, node, 0);
1187
+ //printk("ben ttt:%d \n", ret);
1188
+ }
1189
+
1190
+ /* Some DT bindings do not set-up the PHY handle. Let's try to
1191
+ * manually parse it
1192
+ */
1193
+ //printk("ben:stmmac_init_phy..1 \n");
1194
+ if (!node || ret) {
1195
+ //if (1) {
1196
+ int addr = priv->plat->phy_addr;
1197
+ struct phy_device *phydev;
1198
+
1199
+ //printk("ben:stmmac_init_phy..2 \n");
1200
+ phydev = mdiobus_get_phy(priv->mii, addr);
1201
+ if (!phydev) {
1202
+ netdev_err(priv->dev, "no phy at addr %d\n", addr);
9941203 return -ENODEV;
1204
+ }
9951205
996
- return PTR_ERR(phydev);
1206
+ //rtl8211F_led_control(phydev);
1207
+
1208
+ //printk("ben:stmmac_init_phy..3 \n");
1209
+ ret = phylink_connect_phy(priv->phylink, phydev);
1210
+ //rtl8211F_led_control(phydev);
9971211 }
9981212
999
- /* Stop Advertising 1000BASE Capability if interface is not GMII */
1000
- if ((interface == PHY_INTERFACE_MODE_MII) ||
1001
- (interface == PHY_INTERFACE_MODE_RMII) ||
1002
- (max_speed < 1000 && max_speed > 0))
1003
- phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
1004
- SUPPORTED_1000baseT_Full);
1213
+ if (!priv->plat->pmt) {
1214
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
10051215
1006
- /*
1007
- * Half-duplex mode not supported with multiqueue
1008
- * half-duplex can only works with single queue
1009
- */
1010
- if (tx_cnt > 1)
1011
- phydev->supported &= ~(SUPPORTED_1000baseT_Half |
1012
- SUPPORTED_100baseT_Half |
1013
- SUPPORTED_10baseT_Half);
1014
-
1015
- /*
1016
- * Broken HW is sometimes missing the pull-up resistor on the
1017
- * MDIO line, which results in reads to non-existent devices returning
1018
- * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
1019
- * device as well.
1020
- * Note: phydev->phy_id is the result of reading the UID PHY registers.
1021
- */
1022
- if (!priv->plat->phy_node && phydev->phy_id == 0) {
1023
- phy_disconnect(phydev);
1024
- return -ENODEV;
1216
+ phylink_ethtool_get_wol(priv->phylink, &wol);
1217
+ device_set_wakeup_capable(priv->device, !!wol.supported);
10251218 }
1219
+ return ret;
1220
+}
10261221
1027
- /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
1028
- * subsequent PHY polling, make sure we force a link transition if
1029
- * we have a UP/DOWN/UP transition
1030
- */
1031
- if (phydev->is_pseudo_fixed_link)
1032
- phydev->irq = PHY_POLL;
1222
+static int stmmac_phy_setup(struct stmmac_priv *priv)
1223
+{
1224
+ struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1225
+ int mode = priv->plat->phy_interface;
1226
+ struct phylink *phylink;
10331227
1034
- phy_attached_info(phydev);
1228
+ priv->phylink_config.dev = &priv->dev->dev;
1229
+ priv->phylink_config.type = PHYLINK_NETDEV;
1230
+ priv->phylink_config.pcs_poll = true;
10351231
1036
- //add ben
1037
- rtl8211F_led_control(phydev);
1232
+ if (!fwnode)
1233
+ fwnode = dev_fwnode(priv->device);
1234
+
1235
+ phylink = phylink_create(&priv->phylink_config, fwnode,
1236
+ mode, &stmmac_phylink_mac_ops);
1237
+ if (IS_ERR(phylink))
1238
+ return PTR_ERR(phylink);
1239
+
1240
+ priv->phylink = phylink;
10381241 return 0;
10391242 }
10401243
10411244 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
10421245 {
10431246 u32 rx_cnt = priv->plat->rx_queues_to_use;
1247
+ unsigned int desc_size;
10441248 void *head_rx;
10451249 u32 queue;
10461250
....@@ -1050,19 +1254,24 @@
10501254
10511255 pr_info("\tRX Queue %u rings\n", queue);
10521256
1053
- if (priv->extend_desc)
1257
+ if (priv->extend_desc) {
10541258 head_rx = (void *)rx_q->dma_erx;
1055
- else
1259
+ desc_size = sizeof(struct dma_extended_desc);
1260
+ } else {
10561261 head_rx = (void *)rx_q->dma_rx;
1262
+ desc_size = sizeof(struct dma_desc);
1263
+ }
10571264
10581265 /* Display RX ring */
1059
- stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1266
+ stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
1267
+ rx_q->dma_rx_phy, desc_size);
10601268 }
10611269 }
10621270
10631271 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
10641272 {
10651273 u32 tx_cnt = priv->plat->tx_queues_to_use;
1274
+ unsigned int desc_size;
10661275 void *head_tx;
10671276 u32 queue;
10681277
....@@ -1072,12 +1281,19 @@
10721281
10731282 pr_info("\tTX Queue %d rings\n", queue);
10741283
1075
- if (priv->extend_desc)
1284
+ if (priv->extend_desc) {
10761285 head_tx = (void *)tx_q->dma_etx;
1077
- else
1286
+ desc_size = sizeof(struct dma_extended_desc);
1287
+ } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1288
+ head_tx = (void *)tx_q->dma_entx;
1289
+ desc_size = sizeof(struct dma_edesc);
1290
+ } else {
10781291 head_tx = (void *)tx_q->dma_tx;
1292
+ desc_size = sizeof(struct dma_desc);
1293
+ }
10791294
1080
- stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1295
+ stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
1296
+ tx_q->dma_tx_phy, desc_size);
10811297 }
10821298 }
10831299
....@@ -1121,16 +1337,16 @@
11211337 int i;
11221338
11231339 /* Clear the RX descriptors */
1124
- for (i = 0; i < DMA_RX_SIZE; i++)
1340
+ for (i = 0; i < priv->dma_rx_size; i++)
11251341 if (priv->extend_desc)
11261342 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
11271343 priv->use_riwt, priv->mode,
1128
- (i == DMA_RX_SIZE - 1),
1344
+ (i == priv->dma_rx_size - 1),
11291345 priv->dma_buf_sz);
11301346 else
11311347 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
11321348 priv->use_riwt, priv->mode,
1133
- (i == DMA_RX_SIZE - 1),
1349
+ (i == priv->dma_rx_size - 1),
11341350 priv->dma_buf_sz);
11351351 }
11361352
....@@ -1147,13 +1363,19 @@
11471363 int i;
11481364
11491365 /* Clear the TX descriptors */
1150
- for (i = 0; i < DMA_TX_SIZE; i++)
1366
+ for (i = 0; i < priv->dma_tx_size; i++) {
1367
+ int last = (i == (priv->dma_tx_size - 1));
1368
+ struct dma_desc *p;
1369
+
11511370 if (priv->extend_desc)
1152
- stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1153
- priv->mode, (i == DMA_TX_SIZE - 1));
1371
+ p = &tx_q->dma_etx[i].basic;
1372
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1373
+ p = &tx_q->dma_entx[i].basic;
11541374 else
1155
- stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1156
- priv->mode, (i == DMA_TX_SIZE - 1));
1375
+ p = &tx_q->dma_tx[i];
1376
+
1377
+ stmmac_init_tx_desc(priv, p, priv->mode, last);
1378
+ }
11571379 }
11581380
11591381 /**
....@@ -1191,26 +1413,30 @@
11911413 int i, gfp_t flags, u32 queue)
11921414 {
11931415 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1194
- struct sk_buff *skb;
1416
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1417
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
11951418
1196
- skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1197
- if (!skb) {
1198
- netdev_err(priv->dev,
1199
- "%s: Rx init fails; skb is NULL\n", __func__);
1419
+ if (priv->dma_cap.addr64 <= 32)
1420
+ gfp |= GFP_DMA32;
1421
+
1422
+ buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1423
+ if (!buf->page)
12001424 return -ENOMEM;
1201
- }
1202
- rx_q->rx_skbuff[i] = skb;
1203
- rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1204
- priv->dma_buf_sz,
1205
- DMA_FROM_DEVICE);
1206
- if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1207
- netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1208
- dev_kfree_skb_any(skb);
1209
- return -EINVAL;
1425
+
1426
+ if (priv->sph) {
1427
+ buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1428
+ if (!buf->sec_page)
1429
+ return -ENOMEM;
1430
+
1431
+ buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1432
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1433
+ } else {
1434
+ buf->sec_page = NULL;
1435
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
12101436 }
12111437
1212
- stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1213
-
1438
+ buf->addr = page_pool_get_dma_addr(buf->page);
1439
+ stmmac_set_desc_addr(priv, p, buf->addr);
12141440 if (priv->dma_buf_sz == BUF_SIZE_16KiB)
12151441 stmmac_init_desc3(priv, p);
12161442
....@@ -1226,13 +1452,15 @@
12261452 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
12271453 {
12281454 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1455
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
12291456
1230
- if (rx_q->rx_skbuff[i]) {
1231
- dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1232
- priv->dma_buf_sz, DMA_FROM_DEVICE);
1233
- dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1234
- }
1235
- rx_q->rx_skbuff[i] = NULL;
1457
+ if (buf->page)
1458
+ page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1459
+ buf->page = NULL;
1460
+
1461
+ if (buf->sec_page)
1462
+ page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1463
+ buf->sec_page = NULL;
12361464 }
12371465
12381466 /**
....@@ -1279,18 +1507,8 @@
12791507 struct stmmac_priv *priv = netdev_priv(dev);
12801508 u32 rx_count = priv->plat->rx_queues_to_use;
12811509 int ret = -ENOMEM;
1282
- int bfsize = 0;
12831510 int queue;
12841511 int i;
1285
-
1286
- bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1287
- if (bfsize < 0)
1288
- bfsize = 0;
1289
-
1290
- if (bfsize < BUF_SIZE_16KiB)
1291
- bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1292
-
1293
- priv->dma_buf_sz = bfsize;
12941512
12951513 /* RX INITIALIZATION */
12961514 netif_dbg(priv, probe, priv->dev,
....@@ -1303,7 +1521,9 @@
13031521 "(%s) dma_rx_phy=0x%08x\n", __func__,
13041522 (u32)rx_q->dma_rx_phy);
13051523
1306
- for (i = 0; i < DMA_RX_SIZE; i++) {
1524
+ stmmac_clear_rx_descriptors(priv, queue);
1525
+
1526
+ for (i = 0; i < priv->dma_rx_size; i++) {
13071527 struct dma_desc *p;
13081528
13091529 if (priv->extend_desc)
....@@ -1315,29 +1535,23 @@
13151535 queue);
13161536 if (ret)
13171537 goto err_init_rx_buffers;
1318
-
1319
- netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1320
- rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1321
- (unsigned int)rx_q->rx_skbuff_dma[i]);
13221538 }
13231539
13241540 rx_q->cur_rx = 0;
1325
- rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1326
-
1327
- stmmac_clear_rx_descriptors(priv, queue);
1541
+ rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size);
13281542
13291543 /* Setup the chained descriptor addresses */
13301544 if (priv->mode == STMMAC_CHAIN_MODE) {
13311545 if (priv->extend_desc)
13321546 stmmac_mode_init(priv, rx_q->dma_erx,
1333
- rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1547
+ rx_q->dma_rx_phy,
1548
+ priv->dma_rx_size, 1);
13341549 else
13351550 stmmac_mode_init(priv, rx_q->dma_rx,
1336
- rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1551
+ rx_q->dma_rx_phy,
1552
+ priv->dma_rx_size, 0);
13371553 }
13381554 }
1339
-
1340
- buf_sz = bfsize;
13411555
13421556 return 0;
13431557
....@@ -1349,7 +1563,7 @@
13491563 if (queue == 0)
13501564 break;
13511565
1352
- i = DMA_RX_SIZE;
1566
+ i = priv->dma_rx_size;
13531567 queue--;
13541568 }
13551569
....@@ -1381,16 +1595,20 @@
13811595 if (priv->mode == STMMAC_CHAIN_MODE) {
13821596 if (priv->extend_desc)
13831597 stmmac_mode_init(priv, tx_q->dma_etx,
1384
- tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1385
- else
1598
+ tx_q->dma_tx_phy,
1599
+ priv->dma_tx_size, 1);
1600
+ else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
13861601 stmmac_mode_init(priv, tx_q->dma_tx,
1387
- tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1602
+ tx_q->dma_tx_phy,
1603
+ priv->dma_tx_size, 0);
13881604 }
13891605
1390
- for (i = 0; i < DMA_TX_SIZE; i++) {
1606
+ for (i = 0; i < priv->dma_tx_size; i++) {
13911607 struct dma_desc *p;
13921608 if (priv->extend_desc)
13931609 p = &((tx_q->dma_etx + i)->basic);
1610
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1611
+ p = &((tx_q->dma_entx + i)->basic);
13941612 else
13951613 p = tx_q->dma_tx + i;
13961614
....@@ -1449,7 +1667,7 @@
14491667 {
14501668 int i;
14511669
1452
- for (i = 0; i < DMA_RX_SIZE; i++)
1670
+ for (i = 0; i < priv->dma_rx_size; i++)
14531671 stmmac_free_rx_buffer(priv, queue, i);
14541672 }
14551673
....@@ -1462,7 +1680,7 @@
14621680 {
14631681 int i;
14641682
1465
- for (i = 0; i < DMA_TX_SIZE; i++)
1683
+ for (i = 0; i < priv->dma_tx_size; i++)
14661684 stmmac_free_tx_buffer(priv, queue, i);
14671685 }
14681686
....@@ -1497,16 +1715,17 @@
14971715
14981716 /* Free DMA regions of consistent memory previously allocated */
14991717 if (!priv->extend_desc)
1500
- dma_free_coherent(priv->device,
1501
- DMA_RX_SIZE * sizeof(struct dma_desc),
1718
+ dma_free_coherent(priv->device, priv->dma_rx_size *
1719
+ sizeof(struct dma_desc),
15021720 rx_q->dma_rx, rx_q->dma_rx_phy);
15031721 else
1504
- dma_free_coherent(priv->device, DMA_RX_SIZE *
1722
+ dma_free_coherent(priv->device, priv->dma_rx_size *
15051723 sizeof(struct dma_extended_desc),
15061724 rx_q->dma_erx, rx_q->dma_rx_phy);
15071725
1508
- kfree(rx_q->rx_skbuff_dma);
1509
- kfree(rx_q->rx_skbuff);
1726
+ kfree(rx_q->buf_pool);
1727
+ if (rx_q->page_pool)
1728
+ page_pool_destroy(rx_q->page_pool);
15101729 }
15111730 }
15121731
....@@ -1522,19 +1741,26 @@
15221741 /* Free TX queue resources */
15231742 for (queue = 0; queue < tx_count; queue++) {
15241743 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1744
+ size_t size;
1745
+ void *addr;
15251746
15261747 /* Release the DMA TX socket buffers */
15271748 dma_free_tx_skbufs(priv, queue);
15281749
1529
- /* Free DMA regions of consistent memory previously allocated */
1530
- if (!priv->extend_desc)
1531
- dma_free_coherent(priv->device,
1532
- DMA_TX_SIZE * sizeof(struct dma_desc),
1533
- tx_q->dma_tx, tx_q->dma_tx_phy);
1534
- else
1535
- dma_free_coherent(priv->device, DMA_TX_SIZE *
1536
- sizeof(struct dma_extended_desc),
1537
- tx_q->dma_etx, tx_q->dma_tx_phy);
1750
+ if (priv->extend_desc) {
1751
+ size = sizeof(struct dma_extended_desc);
1752
+ addr = tx_q->dma_etx;
1753
+ } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1754
+ size = sizeof(struct dma_edesc);
1755
+ addr = tx_q->dma_entx;
1756
+ } else {
1757
+ size = sizeof(struct dma_desc);
1758
+ addr = tx_q->dma_tx;
1759
+ }
1760
+
1761
+ size *= priv->dma_tx_size;
1762
+
1763
+ dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
15381764
15391765 kfree(tx_q->tx_skbuff_dma);
15401766 kfree(tx_q->tx_skbuff);
....@@ -1558,39 +1784,49 @@
15581784 /* RX queues buffers and DMA */
15591785 for (queue = 0; queue < rx_count; queue++) {
15601786 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1787
+ struct page_pool_params pp_params = { 0 };
1788
+ unsigned int num_pages;
15611789
15621790 rx_q->queue_index = queue;
15631791 rx_q->priv_data = priv;
15641792
1565
- rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1566
- sizeof(dma_addr_t),
1567
- GFP_KERNEL);
1568
- if (!rx_q->rx_skbuff_dma)
1569
- goto err_dma;
1793
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
1794
+ pp_params.pool_size = priv->dma_rx_size;
1795
+ num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
1796
+ pp_params.order = ilog2(num_pages);
1797
+ pp_params.nid = dev_to_node(priv->device);
1798
+ pp_params.dev = priv->device;
1799
+ pp_params.dma_dir = DMA_FROM_DEVICE;
1800
+ pp_params.max_len = num_pages * PAGE_SIZE;
15701801
1571
- rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1572
- sizeof(struct sk_buff *),
1573
- GFP_KERNEL);
1574
- if (!rx_q->rx_skbuff)
1802
+ rx_q->page_pool = page_pool_create(&pp_params);
1803
+ if (IS_ERR(rx_q->page_pool)) {
1804
+ ret = PTR_ERR(rx_q->page_pool);
1805
+ rx_q->page_pool = NULL;
1806
+ goto err_dma;
1807
+ }
1808
+
1809
+ rx_q->buf_pool = kcalloc(priv->dma_rx_size,
1810
+ sizeof(*rx_q->buf_pool),
1811
+ GFP_KERNEL);
1812
+ if (!rx_q->buf_pool)
15751813 goto err_dma;
15761814
15771815 if (priv->extend_desc) {
1578
- rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1579
- DMA_RX_SIZE *
1580
- sizeof(struct
1581
- dma_extended_desc),
1582
- &rx_q->dma_rx_phy,
1583
- GFP_KERNEL);
1816
+ rx_q->dma_erx = dma_alloc_coherent(priv->device,
1817
+ priv->dma_rx_size *
1818
+ sizeof(struct dma_extended_desc),
1819
+ &rx_q->dma_rx_phy,
1820
+ GFP_KERNEL);
15841821 if (!rx_q->dma_erx)
15851822 goto err_dma;
15861823
15871824 } else {
1588
- rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1589
- DMA_RX_SIZE *
1590
- sizeof(struct
1591
- dma_desc),
1592
- &rx_q->dma_rx_phy,
1593
- GFP_KERNEL);
1825
+ rx_q->dma_rx = dma_alloc_coherent(priv->device,
1826
+ priv->dma_rx_size *
1827
+ sizeof(struct dma_desc),
1828
+ &rx_q->dma_rx_phy,
1829
+ GFP_KERNEL);
15941830 if (!rx_q->dma_rx)
15951831 goto err_dma;
15961832 }
....@@ -1621,48 +1857,50 @@
16211857 /* TX queues buffers and DMA */
16221858 for (queue = 0; queue < tx_count; queue++) {
16231859 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1860
+ size_t size;
1861
+ void *addr;
16241862
16251863 tx_q->queue_index = queue;
16261864 tx_q->priv_data = priv;
16271865
1628
- tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1629
- sizeof(*tx_q->tx_skbuff_dma),
1630
- GFP_KERNEL);
1866
+ tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
1867
+ sizeof(*tx_q->tx_skbuff_dma),
1868
+ GFP_KERNEL);
16311869 if (!tx_q->tx_skbuff_dma)
16321870 goto err_dma;
16331871
1634
- tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1635
- sizeof(struct sk_buff *),
1636
- GFP_KERNEL);
1872
+ tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
1873
+ sizeof(struct sk_buff *),
1874
+ GFP_KERNEL);
16371875 if (!tx_q->tx_skbuff)
16381876 goto err_dma;
16391877
1640
- if (priv->extend_desc) {
1641
- tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1642
- DMA_TX_SIZE *
1643
- sizeof(struct
1644
- dma_extended_desc),
1645
- &tx_q->dma_tx_phy,
1646
- GFP_KERNEL);
1647
- if (!tx_q->dma_etx)
1648
- goto err_dma;
1649
- } else {
1650
- tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1651
- DMA_TX_SIZE *
1652
- sizeof(struct
1653
- dma_desc),
1654
- &tx_q->dma_tx_phy,
1655
- GFP_KERNEL);
1656
- if (!tx_q->dma_tx)
1657
- goto err_dma;
1658
- }
1878
+ if (priv->extend_desc)
1879
+ size = sizeof(struct dma_extended_desc);
1880
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1881
+ size = sizeof(struct dma_edesc);
1882
+ else
1883
+ size = sizeof(struct dma_desc);
1884
+
1885
+ size *= priv->dma_tx_size;
1886
+
1887
+ addr = dma_alloc_coherent(priv->device, size,
1888
+ &tx_q->dma_tx_phy, GFP_KERNEL);
1889
+ if (!addr)
1890
+ goto err_dma;
1891
+
1892
+ if (priv->extend_desc)
1893
+ tx_q->dma_etx = addr;
1894
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1895
+ tx_q->dma_entx = addr;
1896
+ else
1897
+ tx_q->dma_tx = addr;
16591898 }
16601899
16611900 return 0;
16621901
16631902 err_dma:
16641903 free_dma_tx_desc_resources(priv);
1665
-
16661904 return ret;
16671905 }
16681906
....@@ -1873,6 +2111,7 @@
18732111 /**
18742112 * stmmac_tx_clean - to manage the transmission completion
18752113 * @priv: driver private structure
2114
+ * @budget: napi budget limiting this functions packet handling
18762115 * @queue: TX queue index
18772116 * Description: it reclaims the transmit resources after transmission completes.
18782117 */
....@@ -1894,6 +2133,8 @@
18942133
18952134 if (priv->extend_desc)
18962135 p = (struct dma_desc *)(tx_q->dma_etx + entry);
2136
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2137
+ p = &tx_q->dma_entx[entry].basic;
18972138 else
18982139 p = tx_q->dma_tx + entry;
18992140
....@@ -1952,7 +2193,7 @@
19522193
19532194 stmmac_release_tx_desc(priv, p, priv->mode);
19542195
1955
- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2196
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
19562197 }
19572198 tx_q->dirty_tx = entry;
19582199
....@@ -1961,7 +2202,7 @@
19612202
19622203 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
19632204 queue))) &&
1964
- stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
2205
+ stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
19652206
19662207 netif_dbg(priv, tx_done, priv->dev,
19672208 "%s: restart transmit\n", __func__);
....@@ -1970,8 +2211,12 @@
19702211
19712212 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
19722213 stmmac_enable_eee_mode(priv);
1973
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
2214
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
19742215 }
2216
+
2217
+ /* We still have pending packets, let's call for a new scheduling */
2218
+ if (tx_q->dirty_tx != tx_q->cur_tx)
2219
+ mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
19752220
19762221 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
19772222
....@@ -1988,23 +2233,18 @@
19882233 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
19892234 {
19902235 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1991
- int i;
19922236
19932237 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
19942238
19952239 stmmac_stop_tx_dma(priv, chan);
19962240 dma_free_tx_skbufs(priv, chan);
1997
- for (i = 0; i < DMA_TX_SIZE; i++)
1998
- if (priv->extend_desc)
1999
- stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
2000
- priv->mode, (i == DMA_TX_SIZE - 1));
2001
- else
2002
- stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
2003
- priv->mode, (i == DMA_TX_SIZE - 1));
2241
+ stmmac_clear_tx_descriptors(priv, chan);
20042242 tx_q->dirty_tx = 0;
20052243 tx_q->cur_tx = 0;
20062244 tx_q->mss = 0;
20072245 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2246
+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2247
+ tx_q->dma_tx_phy, chan);
20082248 stmmac_start_tx_dma(priv, chan);
20092249
20102250 priv->dev->stats.tx_errors++;
....@@ -2063,23 +2303,24 @@
20632303 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
20642304 &priv->xstats, chan);
20652305 struct stmmac_channel *ch = &priv->channel[chan];
2066
- bool needs_work = false;
2306
+ unsigned long flags;
20672307
2068
- if ((status & handle_rx) && ch->has_rx) {
2069
- needs_work = true;
2070
- } else {
2071
- status &= ~handle_rx;
2308
+ if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2309
+ if (napi_schedule_prep(&ch->rx_napi)) {
2310
+ spin_lock_irqsave(&ch->lock, flags);
2311
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2312
+ spin_unlock_irqrestore(&ch->lock, flags);
2313
+ __napi_schedule(&ch->rx_napi);
2314
+ }
20722315 }
20732316
2074
- if ((status & handle_tx) && ch->has_tx) {
2075
- needs_work = true;
2076
- } else {
2077
- status &= ~handle_tx;
2078
- }
2079
-
2080
- if (needs_work && napi_schedule_prep(&ch->napi)) {
2081
- stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2082
- __napi_schedule(&ch->napi);
2317
+ if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2318
+ if (napi_schedule_prep(&ch->tx_napi)) {
2319
+ spin_lock_irqsave(&ch->lock, flags);
2320
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2321
+ spin_unlock_irqrestore(&ch->lock, flags);
2322
+ __napi_schedule(&ch->tx_napi);
2323
+ }
20832324 }
20842325
20852326 return status;
....@@ -2142,10 +2383,10 @@
21422383 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
21432384 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
21442385
2145
- dwmac_mmc_intr_all_mask(priv->mmcaddr);
2386
+ stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
21462387
21472388 if (priv->dma_cap.rmon) {
2148
- dwmac_mmc_ctrl(priv->mmcaddr, mode);
2389
+ stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
21492390 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
21502391 } else
21512392 netdev_info(priv->dev, "No MAC Management Counters available\n");
....@@ -2174,7 +2415,7 @@
21742415 */
21752416 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
21762417 {
2177
- //if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2418
+// if (!is_valid_ether_addr(priv->dev->dev_addr)) {
21782419 if (1) {
21792420 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
21802421 if (likely(priv->plat->get_eth_addr))
....@@ -2238,7 +2479,8 @@
22382479 rx_q->dma_rx_phy, chan);
22392480
22402481 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2241
- (DMA_RX_SIZE * sizeof(struct dma_desc));
2482
+ (priv->dma_rx_size *
2483
+ sizeof(struct dma_desc));
22422484 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
22432485 rx_q->rx_tail_addr, chan);
22442486 }
....@@ -2267,7 +2509,7 @@
22672509
22682510 /**
22692511 * stmmac_tx_timer - mitigation sw timer for tx.
2270
- * @data: data pointer
2512
+ * @t: data pointer
22712513 * Description:
22722514 * This is the timer handler to directly invoke the stmmac_tx_clean.
22732515 */
....@@ -2279,25 +2521,32 @@
22792521
22802522 ch = &priv->channel[tx_q->queue_index];
22812523
2282
- if (likely(napi_schedule_prep(&ch->napi)))
2283
- __napi_schedule(&ch->napi);
2524
+ if (likely(napi_schedule_prep(&ch->tx_napi))) {
2525
+ unsigned long flags;
2526
+
2527
+ spin_lock_irqsave(&ch->lock, flags);
2528
+ stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2529
+ spin_unlock_irqrestore(&ch->lock, flags);
2530
+ __napi_schedule(&ch->tx_napi);
2531
+ }
22842532 }
22852533
22862534 /**
2287
- * stmmac_init_tx_coalesce - init tx mitigation options.
2535
+ * stmmac_init_coalesce - init mitigation options.
22882536 * @priv: driver private structure
22892537 * Description:
2290
- * This inits the transmit coalesce parameters: i.e. timer rate,
2538
+ * This inits the coalesce parameters: i.e. timer rate,
22912539 * timer handler and default threshold used for enabling the
22922540 * interrupt on completion bit.
22932541 */
2294
-static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2542
+static void stmmac_init_coalesce(struct stmmac_priv *priv)
22952543 {
22962544 u32 tx_channel_count = priv->plat->tx_queues_to_use;
22972545 u32 chan;
22982546
22992547 priv->tx_coal_frames = STMMAC_TX_FRAMES;
23002548 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2549
+ priv->rx_coal_frames = STMMAC_RX_FRAMES;
23012550
23022551 for (chan = 0; chan < tx_channel_count; chan++) {
23032552 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
....@@ -2315,12 +2564,12 @@
23152564 /* set TX ring length */
23162565 for (chan = 0; chan < tx_channels_count; chan++)
23172566 stmmac_set_tx_ring_len(priv, priv->ioaddr,
2318
- (DMA_TX_SIZE - 1), chan);
2567
+ (priv->dma_tx_size - 1), chan);
23192568
23202569 /* set RX ring length */
23212570 for (chan = 0; chan < rx_channels_count; chan++)
23222571 stmmac_set_rx_ring_len(priv, priv->ioaddr,
2323
- (DMA_RX_SIZE - 1), chan);
2572
+ (priv->dma_rx_size - 1), chan);
23242573 }
23252574
23262575 /**
....@@ -2444,6 +2693,22 @@
24442693 }
24452694 }
24462695
2696
+static void stmmac_mac_config_rss(struct stmmac_priv *priv)
2697
+{
2698
+ if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
2699
+ priv->rss.enable = false;
2700
+ return;
2701
+ }
2702
+
2703
+ if (priv->dev->features & NETIF_F_RXHASH)
2704
+ priv->rss.enable = true;
2705
+ else
2706
+ priv->rss.enable = false;
2707
+
2708
+ stmmac_rss_configure(priv, priv->hw, &priv->rss,
2709
+ priv->plat->rx_queues_to_use);
2710
+}
2711
+
24472712 /**
24482713 * stmmac_mtl_configuration - Configure MTL
24492714 * @priv: driver private structure
....@@ -2488,6 +2753,10 @@
24882753 /* Set RX routing */
24892754 if (rx_queues_count > 1)
24902755 stmmac_mac_config_rx_queues_routing(priv);
2756
+
2757
+ /* Receive Side Scaling */
2758
+ if (rx_queues_count > 1)
2759
+ stmmac_mac_config_rss(priv);
24912760 }
24922761
24932762 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
....@@ -2503,6 +2772,7 @@
25032772 /**
25042773 * stmmac_hw_setup - setup mac in a usable state.
25052774 * @dev : pointer to the device structure.
2775
+ * @ptp_register: register PTP if set
25062776 * Description:
25072777 * this is the main function to setup the HW in a usable state because the
25082778 * dma engine is reset, the core registers are configured (e.g. AXI,
....@@ -2512,7 +2782,7 @@
25122782 * 0 on success and an appropriate (-)ve integer as defined in errno.h
25132783 * file on failure.
25142784 */
2515
-static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2785
+static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
25162786 {
25172787 struct stmmac_priv *priv = netdev_priv(dev);
25182788 u32 rx_cnt = priv->plat->rx_queues_to_use;
....@@ -2568,37 +2838,75 @@
25682838
25692839 stmmac_mmc_setup(priv);
25702840
2571
- if (init_ptp) {
2841
+ if (ptp_register) {
25722842 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
25732843 if (ret < 0)
2574
- netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2575
-
2576
- ret = stmmac_init_ptp(priv);
2577
- if (ret == -EOPNOTSUPP)
2578
- netdev_warn(priv->dev, "PTP not supported by HW\n");
2579
- else if (ret)
2580
- netdev_warn(priv->dev, "PTP init failed\n");
2844
+ netdev_warn(priv->dev,
2845
+ "failed to enable PTP reference clock: %pe\n",
2846
+ ERR_PTR(ret));
25812847 }
25822848
2583
- priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2849
+ ret = stmmac_init_ptp(priv);
2850
+ if (ret == -EOPNOTSUPP)
2851
+ netdev_warn(priv->dev, "PTP not supported by HW\n");
2852
+ else if (ret)
2853
+ netdev_warn(priv->dev, "PTP init failed\n");
2854
+ else if (ptp_register)
2855
+ stmmac_ptp_register(priv);
2856
+
2857
+ priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
2858
+
2859
+ /* Convert the timer from msec to usec */
2860
+ if (!priv->tx_lpi_timer)
2861
+ priv->tx_lpi_timer = eee_timer * 1000;
25842862
25852863 if (priv->use_riwt) {
2586
- ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2587
- if (!ret)
2588
- priv->rx_riwt = MAX_DMA_RIWT;
2864
+ if (!priv->rx_riwt)
2865
+ priv->rx_riwt = DEF_DMA_RIWT;
2866
+
2867
+ ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
25892868 }
25902869
25912870 if (priv->hw->pcs)
2592
- stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2871
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
25932872
25942873 /* set TX and RX rings length */
25952874 stmmac_set_rings_length(priv);
25962875
25972876 /* Enable TSO */
25982877 if (priv->tso) {
2599
- for (chan = 0; chan < tx_cnt; chan++)
2878
+ for (chan = 0; chan < tx_cnt; chan++) {
2879
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2880
+
2881
+ /* TSO and TBS cannot co-exist */
2882
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
2883
+ continue;
2884
+
26002885 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2886
+ }
26012887 }
2888
+
2889
+ /* Enable Split Header */
2890
+ if (priv->sph && priv->hw->rx_csum) {
2891
+ for (chan = 0; chan < rx_cnt; chan++)
2892
+ stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
2893
+ }
2894
+
2895
+ /* VLAN Tag Insertion */
2896
+ if (priv->dma_cap.vlins)
2897
+ stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
2898
+
2899
+ /* TBS */
2900
+ for (chan = 0; chan < tx_cnt; chan++) {
2901
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2902
+ int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
2903
+
2904
+ stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
2905
+ }
2906
+
2907
+ /* Configure real RX and TX queues */
2908
+ netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
2909
+ netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
26022910
26032911 /* Start the ball rolling... */
26042912 stmmac_start_all_dma(priv);
....@@ -2625,18 +2933,26 @@
26252933 static int stmmac_open(struct net_device *dev)
26262934 {
26272935 struct stmmac_priv *priv = netdev_priv(dev);
2936
+ int bfsize = 0;
26282937 u32 chan;
26292938 int ret;
26302939
2631
- if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2632
- priv->hw->pcs != STMMAC_PCS_TBI &&
2633
- priv->hw->pcs != STMMAC_PCS_RTBI) {
2940
+ //printk("ben:stmmac_open.. \n");
2941
+ ret = pm_runtime_get_sync(priv->device);
2942
+ if (ret < 0) {
2943
+ pm_runtime_put_noidle(priv->device);
2944
+ return ret;
2945
+ }
2946
+
2947
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
2948
+ priv->hw->pcs != STMMAC_PCS_RTBI &&
2949
+ priv->hw->xpcs == NULL) {
26342950 ret = stmmac_init_phy(dev);
26352951 if (ret) {
26362952 netdev_err(priv->dev,
26372953 "%s: Cannot attach to PHY (error: %d)\n",
26382954 __func__, ret);
2639
- return ret;
2955
+ goto init_phy_error;
26402956 }
26412957 }
26422958
....@@ -2644,8 +2960,34 @@
26442960 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
26452961 priv->xstats.threshold = tc;
26462962
2647
- priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2963
+ bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
2964
+ if (bfsize < 0)
2965
+ bfsize = 0;
2966
+
2967
+ if (bfsize < BUF_SIZE_16KiB)
2968
+ bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
2969
+
2970
+ priv->dma_buf_sz = bfsize;
2971
+ buf_sz = bfsize;
2972
+
26482973 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2974
+
2975
+ if (!priv->dma_tx_size)
2976
+ priv->dma_tx_size = priv->plat->dma_tx_size ? priv->plat->dma_tx_size :
2977
+ DMA_DEFAULT_TX_SIZE;
2978
+
2979
+ if (!priv->dma_rx_size)
2980
+ priv->dma_rx_size = priv->plat->dma_rx_size ? priv->plat->dma_rx_size :
2981
+ DMA_DEFAULT_RX_SIZE;
2982
+
2983
+ /* Earlier check for TBS */
2984
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
2985
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2986
+ int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
2987
+
2988
+ /* Setup per-TXQ tbs flag before TX descriptor alloc */
2989
+ tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
2990
+ }
26492991
26502992 ret = alloc_dma_desc_resources(priv);
26512993 if (ret < 0) {
....@@ -2661,16 +3003,32 @@
26613003 goto init_error;
26623004 }
26633005
3006
+ if (priv->plat->serdes_powerup) {
3007
+ ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3008
+ if (ret < 0) {
3009
+ netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3010
+ __func__);
3011
+ goto init_error;
3012
+ }
3013
+ }
3014
+
3015
+
3016
+ #if 1
3017
+ printk("ben -------bootup add 2s delay time.\n");
3018
+ mdelay(2500);
3019
+ #endif
3020
+
26643021 ret = stmmac_hw_setup(dev, true);
26653022 if (ret < 0) {
26663023 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
26673024 goto init_error;
26683025 }
26693026
2670
- stmmac_init_tx_coalesce(priv);
3027
+ stmmac_init_coalesce(priv);
26713028
2672
- if (dev->phydev)
2673
- phy_start(dev->phydev);
3029
+ phylink_start(priv->phylink);
3030
+ /* We may have called phylink_speed_down before */
3031
+ phylink_speed_up(priv->phylink);
26743032
26753033 /* Request the IRQ lines */
26763034 ret = request_irq(dev->irq, stmmac_interrupt,
....@@ -2717,8 +3075,7 @@
27173075 wolirq_error:
27183076 free_irq(dev->irq, dev);
27193077 irq_error:
2720
- if (dev->phydev)
2721
- phy_stop(dev->phydev);
3078
+ phylink_stop(priv->phylink);
27223079
27233080 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
27243081 del_timer_sync(&priv->tx_queue[chan].txtimer);
....@@ -2727,9 +3084,9 @@
27273084 init_error:
27283085 free_dma_desc_resources(priv);
27293086 dma_desc_error:
2730
- if (dev->phydev)
2731
- phy_disconnect(dev->phydev);
2732
-
3087
+ phylink_disconnect_phy(priv->phylink);
3088
+init_phy_error:
3089
+ pm_runtime_put(priv->device);
27333090 return ret;
27343091 }
27353092
....@@ -2744,11 +3101,14 @@
27443101 struct stmmac_priv *priv = netdev_priv(dev);
27453102 u32 chan;
27463103
3104
+ if (device_may_wakeup(priv->device))
3105
+ phylink_speed_down(priv->phylink, false);
27473106 /* Stop and disconnect the PHY */
2748
- if (dev->phydev) {
2749
- phy_stop(dev->phydev);
2750
- phy_disconnect(dev->phydev);
2751
- }
3107
+ phylink_stop(priv->phylink);
3108
+ phylink_disconnect_phy(priv->phylink);
3109
+
3110
+ if (priv->plat->integrated_phy_power)
3111
+ priv->plat->integrated_phy_power(priv->plat->bsp_priv, false);
27523112
27533113 stmmac_disable_all_queues(priv);
27543114
....@@ -2776,11 +3136,48 @@
27763136 /* Disable the MAC Rx/Tx */
27773137 stmmac_mac_set(priv, priv->ioaddr, false);
27783138
3139
+ /* Powerdown Serdes if there is */
3140
+ if (priv->plat->serdes_powerdown)
3141
+ priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3142
+
27793143 netif_carrier_off(dev);
27803144
27813145 stmmac_release_ptp(priv);
27823146
3147
+ pm_runtime_put(priv->device);
3148
+
27833149 return 0;
3150
+}
3151
+
3152
+static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3153
+ struct stmmac_tx_queue *tx_q)
3154
+{
3155
+ u16 tag = 0x0, inner_tag = 0x0;
3156
+ u32 inner_type = 0x0;
3157
+ struct dma_desc *p;
3158
+
3159
+ if (!priv->dma_cap.vlins)
3160
+ return false;
3161
+ if (!skb_vlan_tag_present(skb))
3162
+ return false;
3163
+ if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3164
+ inner_tag = skb_vlan_tag_get(skb);
3165
+ inner_type = STMMAC_VLAN_INSERT;
3166
+ }
3167
+
3168
+ tag = skb_vlan_tag_get(skb);
3169
+
3170
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
3171
+ p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3172
+ else
3173
+ p = &tx_q->dma_tx[tx_q->cur_tx];
3174
+
3175
+ if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3176
+ return false;
3177
+
3178
+ stmmac_set_tx_owner(priv, p);
3179
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3180
+ return true;
27843181 }
27853182
27863183 /**
....@@ -2788,13 +3185,13 @@
27883185 * @priv: driver private structure
27893186 * @des: buffer start address
27903187 * @total_len: total length to fill in descriptors
2791
- * @last_segmant: condition for the last descriptor
3188
+ * @last_segment: condition for the last descriptor
27923189 * @queue: TX queue index
27933190 * Description:
27943191 * This function fills descriptor and request new descriptors according to
27953192 * buffer length to fill
27963193 */
2797
-static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
3194
+static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
27983195 int total_len, bool last_segment, u32 queue)
27993196 {
28003197 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
....@@ -2805,11 +3202,23 @@
28053202 tmp_len = total_len;
28063203
28073204 while (tmp_len > 0) {
2808
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2809
- WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2810
- desc = tx_q->dma_tx + tx_q->cur_tx;
3205
+ dma_addr_t curr_addr;
28113206
2812
- desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
3207
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3208
+ priv->dma_tx_size);
3209
+ WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3210
+
3211
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
3212
+ desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3213
+ else
3214
+ desc = &tx_q->dma_tx[tx_q->cur_tx];
3215
+
3216
+ curr_addr = des + (total_len - tmp_len);
3217
+ if (priv->dma_cap.addr64 <= 32)
3218
+ desc->des0 = cpu_to_le32(curr_addr);
3219
+ else
3220
+ stmmac_set_desc_addr(priv, desc, curr_addr);
3221
+
28133222 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
28143223 TSO_MAX_BUFF_SIZE : tmp_len;
28153224
....@@ -2853,16 +3262,19 @@
28533262 {
28543263 struct dma_desc *desc, *first, *mss_desc = NULL;
28553264 struct stmmac_priv *priv = netdev_priv(dev);
3265
+ int desc_size, tmp_pay_len = 0, first_tx;
28563266 int nfrags = skb_shinfo(skb)->nr_frags;
28573267 u32 queue = skb_get_queue_mapping(skb);
2858
- unsigned int first_entry, des;
2859
- u8 proto_hdr_len, hdr;
3268
+ unsigned int first_entry, tx_packets;
28603269 struct stmmac_tx_queue *tx_q;
2861
- int tmp_pay_len = 0;
3270
+ bool has_vlan, set_ic;
3271
+ u8 proto_hdr_len, hdr;
28623272 u32 pay_len, mss;
3273
+ dma_addr_t des;
28633274 int i;
28643275
28653276 tx_q = &priv->tx_queue[queue];
3277
+ first_tx = tx_q->cur_tx;
28663278
28673279 /* Compute header lengths */
28683280 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
....@@ -2893,10 +3305,15 @@
28933305
28943306 /* set new MSS value if needed */
28953307 if (mss != tx_q->mss) {
2896
- mss_desc = tx_q->dma_tx + tx_q->cur_tx;
3308
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
3309
+ mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3310
+ else
3311
+ mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
3312
+
28973313 stmmac_set_mss(priv, mss_desc, mss);
28983314 tx_q->mss = mss;
2899
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
3315
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3316
+ priv->dma_tx_size);
29003317 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
29013318 }
29023319
....@@ -2907,11 +3324,20 @@
29073324 skb->data_len);
29083325 }
29093326
3327
+ /* Check if VLAN can be inserted by HW */
3328
+ has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3329
+
29103330 first_entry = tx_q->cur_tx;
29113331 WARN_ON(tx_q->tx_skbuff[first_entry]);
29123332
2913
- desc = tx_q->dma_tx + first_entry;
3333
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
3334
+ desc = &tx_q->dma_entx[first_entry].basic;
3335
+ else
3336
+ desc = &tx_q->dma_tx[first_entry];
29143337 first = desc;
3338
+
3339
+ if (has_vlan)
3340
+ stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
29153341
29163342 /* first descriptor: fill Headers on Buf1 */
29173343 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
....@@ -2922,14 +3348,21 @@
29223348 tx_q->tx_skbuff_dma[first_entry].buf = des;
29233349 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
29243350
2925
- first->des0 = cpu_to_le32(des);
3351
+ if (priv->dma_cap.addr64 <= 32) {
3352
+ first->des0 = cpu_to_le32(des);
29263353
2927
- /* Fill start of payload in buff2 of first descriptor */
2928
- if (pay_len)
2929
- first->des1 = cpu_to_le32(des + proto_hdr_len);
3354
+ /* Fill start of payload in buff2 of first descriptor */
3355
+ if (pay_len)
3356
+ first->des1 = cpu_to_le32(des + proto_hdr_len);
29303357
2931
- /* If needed take extra descriptors to fill the remaining payload */
2932
- tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
3358
+ /* If needed take extra descriptors to fill the remaining payload */
3359
+ tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
3360
+ } else {
3361
+ stmmac_set_desc_addr(priv, first, des);
3362
+ tmp_pay_len = pay_len;
3363
+ des += proto_hdr_len;
3364
+ pay_len = 0;
3365
+ }
29333366
29343367 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
29353368
....@@ -2956,12 +3389,38 @@
29563389 /* Only the last descriptor gets to point to the skb. */
29573390 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
29583391
3392
+ /* Manage tx mitigation */
3393
+ tx_packets = (tx_q->cur_tx + 1) - first_tx;
3394
+ tx_q->tx_count_frames += tx_packets;
3395
+
3396
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3397
+ set_ic = true;
3398
+ else if (!priv->tx_coal_frames)
3399
+ set_ic = false;
3400
+ else if (tx_packets > priv->tx_coal_frames)
3401
+ set_ic = true;
3402
+ else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3403
+ set_ic = true;
3404
+ else
3405
+ set_ic = false;
3406
+
3407
+ if (set_ic) {
3408
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
3409
+ desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3410
+ else
3411
+ desc = &tx_q->dma_tx[tx_q->cur_tx];
3412
+
3413
+ tx_q->tx_count_frames = 0;
3414
+ stmmac_set_tx_ic(priv, desc);
3415
+ priv->xstats.tx_set_ic_bit++;
3416
+ }
3417
+
29593418 /* We've used all descriptors we need for this skb, however,
29603419 * advance cur_tx so that it references a fresh descriptor.
29613420 * ndo_start_xmit will fill this descriptor the next time it's
29623421 * called and stmmac_tx_clean may clean up to this descriptor.
29633422 */
2964
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
3423
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
29653424
29663425 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
29673426 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
....@@ -2973,18 +3432,8 @@
29733432 priv->xstats.tx_tso_frames++;
29743433 priv->xstats.tx_tso_nfrags += nfrags;
29753434
2976
- /* Manage tx mitigation */
2977
- tx_q->tx_count_frames += nfrags + 1;
2978
- if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
2979
- !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
2980
- (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2981
- priv->hwts_tx_en)) {
2982
- stmmac_tx_timer_arm(priv, queue);
2983
- } else {
2984
- tx_q->tx_count_frames = 0;
2985
- stmmac_set_tx_ic(priv, desc);
2986
- priv->xstats.tx_set_ic_bit++;
2987
- }
3435
+ if (priv->sarc_type)
3436
+ stmmac_set_desc_sarc(priv, first, priv->sarc_type);
29883437
29893438 skb_tx_timestamp(skb);
29903439
....@@ -3023,16 +3472,18 @@
30233472 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
30243473 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
30253474 tx_q->cur_tx, first, nfrags);
3026
-
3027
- stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
3028
-
30293475 pr_info(">>> frame to be transmitted: ");
30303476 print_pkt(skb->data, skb_headlen(skb));
30313477 }
30323478
30333479 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
30343480
3035
- tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3481
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
3482
+ desc_size = sizeof(struct dma_edesc);
3483
+ else
3484
+ desc_size = sizeof(struct dma_desc);
3485
+
3486
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
30363487 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
30373488 stmmac_tx_timer_arm(priv, queue);
30383489
....@@ -3055,20 +3506,22 @@
30553506 */
30563507 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
30573508 {
3509
+ unsigned int first_entry, tx_packets, enh_desc;
30583510 struct stmmac_priv *priv = netdev_priv(dev);
30593511 unsigned int nopaged_len = skb_headlen(skb);
30603512 int i, csum_insertion = 0, is_jumbo = 0;
30613513 u32 queue = skb_get_queue_mapping(skb);
30623514 int nfrags = skb_shinfo(skb)->nr_frags;
30633515 int gso = skb_shinfo(skb)->gso_type;
3064
- int entry;
3065
- unsigned int first_entry;
3516
+ struct dma_edesc *tbs_desc = NULL;
3517
+ int entry, desc_size, first_tx;
30663518 struct dma_desc *desc, *first;
30673519 struct stmmac_tx_queue *tx_q;
3068
- unsigned int enh_desc;
3069
- unsigned int des;
3520
+ bool has_vlan, set_ic;
3521
+ dma_addr_t des;
30703522
30713523 tx_q = &priv->tx_queue[queue];
3524
+ first_tx = tx_q->cur_tx;
30723525
30733526 if (priv->tx_path_in_lpi_mode)
30743527 stmmac_disable_eee_mode(priv);
....@@ -3093,6 +3546,9 @@
30933546 return NETDEV_TX_BUSY;
30943547 }
30953548
3549
+ /* Check if VLAN can be inserted by HW */
3550
+ has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3551
+
30963552 entry = tx_q->cur_tx;
30973553 first_entry = entry;
30983554 WARN_ON(tx_q->tx_skbuff[first_entry]);
....@@ -3101,10 +3557,15 @@
31013557
31023558 if (likely(priv->extend_desc))
31033559 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3560
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3561
+ desc = &tx_q->dma_entx[entry].basic;
31043562 else
31053563 desc = tx_q->dma_tx + entry;
31063564
31073565 first = desc;
3566
+
3567
+ if (has_vlan)
3568
+ stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
31083569
31093570 enh_desc = priv->plat->enh_desc;
31103571 /* To program the descriptors according to the size of the frame */
....@@ -3122,11 +3583,13 @@
31223583 int len = skb_frag_size(frag);
31233584 bool last_segment = (i == (nfrags - 1));
31243585
3125
- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3586
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
31263587 WARN_ON(tx_q->tx_skbuff[entry]);
31273588
31283589 if (likely(priv->extend_desc))
31293590 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3591
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3592
+ desc = &tx_q->dma_entx[entry].basic;
31303593 else
31313594 desc = tx_q->dma_tx + entry;
31323595
....@@ -3151,28 +3614,51 @@
31513614 /* Only the last descriptor gets to point to the skb. */
31523615 tx_q->tx_skbuff[entry] = skb;
31533616
3617
+ /* According to the coalesce parameter the IC bit for the latest
3618
+ * segment is reset and the timer re-started to clean the tx status.
3619
+ * This approach takes care about the fragments: desc is the first
3620
+ * element in case of no SG.
3621
+ */
3622
+ tx_packets = (entry + 1) - first_tx;
3623
+ tx_q->tx_count_frames += tx_packets;
3624
+
3625
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3626
+ set_ic = true;
3627
+ else if (!priv->tx_coal_frames)
3628
+ set_ic = false;
3629
+ else if (tx_packets > priv->tx_coal_frames)
3630
+ set_ic = true;
3631
+ else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3632
+ set_ic = true;
3633
+ else
3634
+ set_ic = false;
3635
+
3636
+ if (set_ic) {
3637
+ if (likely(priv->extend_desc))
3638
+ desc = &tx_q->dma_etx[entry].basic;
3639
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3640
+ desc = &tx_q->dma_entx[entry].basic;
3641
+ else
3642
+ desc = &tx_q->dma_tx[entry];
3643
+
3644
+ tx_q->tx_count_frames = 0;
3645
+ stmmac_set_tx_ic(priv, desc);
3646
+ priv->xstats.tx_set_ic_bit++;
3647
+ }
3648
+
31543649 /* We've used all descriptors we need for this skb, however,
31553650 * advance cur_tx so that it references a fresh descriptor.
31563651 * ndo_start_xmit will fill this descriptor the next time it's
31573652 * called and stmmac_tx_clean may clean up to this descriptor.
31583653 */
3159
- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3654
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
31603655 tx_q->cur_tx = entry;
31613656
31623657 if (netif_msg_pktdata(priv)) {
3163
- void *tx_head;
3164
-
31653658 netdev_dbg(priv->dev,
31663659 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
31673660 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
31683661 entry, first, nfrags);
3169
-
3170
- if (priv->extend_desc)
3171
- tx_head = (void *)tx_q->dma_etx;
3172
- else
3173
- tx_head = (void *)tx_q->dma_tx;
3174
-
3175
- stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
31763662
31773663 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
31783664 print_pkt(skb->data, skb->len);
....@@ -3186,22 +3672,8 @@
31863672
31873673 dev->stats.tx_bytes += skb->len;
31883674
3189
- /* According to the coalesce parameter the IC bit for the latest
3190
- * segment is reset and the timer re-started to clean the tx status.
3191
- * This approach takes care about the fragments: desc is the first
3192
- * element in case of no SG.
3193
- */
3194
- tx_q->tx_count_frames += nfrags + 1;
3195
- if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
3196
- !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
3197
- (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3198
- priv->hwts_tx_en)) {
3199
- stmmac_tx_timer_arm(priv, queue);
3200
- } else {
3201
- tx_q->tx_count_frames = 0;
3202
- stmmac_set_tx_ic(priv, desc);
3203
- priv->xstats.tx_set_ic_bit++;
3204
- }
3675
+ if (priv->sarc_type)
3676
+ stmmac_set_desc_sarc(priv, first, priv->sarc_type);
32053677
32063678 skb_tx_timestamp(skb);
32073679
....@@ -3233,11 +3705,18 @@
32333705
32343706 /* Prepare the first descriptor setting the OWN bit too */
32353707 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3236
- csum_insertion, priv->mode, 1, last_segment,
3708
+ csum_insertion, priv->mode, 0, last_segment,
32373709 skb->len);
3238
- } else {
3239
- stmmac_set_tx_owner(priv, first);
32403710 }
3711
+
3712
+ if (tx_q->tbs & STMMAC_TBS_EN) {
3713
+ struct timespec64 ts = ns_to_timespec64(skb->tstamp);
3714
+
3715
+ tbs_desc = &tx_q->dma_entx[first_entry];
3716
+ stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
3717
+ }
3718
+
3719
+ stmmac_set_tx_owner(priv, first);
32413720
32423721 /* The own bit must be the latest setting done when prepare the
32433722 * descriptor and then barrier is needed to make sure that
....@@ -3249,7 +3728,14 @@
32493728
32503729 stmmac_enable_dma_transmission(priv, priv->ioaddr);
32513730
3252
- tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3731
+ if (likely(priv->extend_desc))
3732
+ desc_size = sizeof(struct dma_extended_desc);
3733
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3734
+ desc_size = sizeof(struct dma_edesc);
3735
+ else
3736
+ desc_size = sizeof(struct dma_desc);
3737
+
3738
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
32533739 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
32543740 stmmac_tx_timer_arm(priv, queue);
32553741
....@@ -3283,15 +3769,6 @@
32833769 }
32843770 }
32853771
3286
-
3287
-static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3288
-{
3289
- if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3290
- return 0;
3291
-
3292
- return 1;
3293
-}
3294
-
32953772 /**
32963773 * stmmac_rx_refill - refill used skb preallocated buffers
32973774 * @priv: driver private structure
....@@ -3302,63 +3779,115 @@
33023779 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
33033780 {
33043781 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3305
- int dirty = stmmac_rx_dirty(priv, queue);
3782
+ int len, dirty = stmmac_rx_dirty(priv, queue);
33063783 unsigned int entry = rx_q->dirty_rx;
3784
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
33073785
3308
- int bfsize = priv->dma_buf_sz;
3786
+ if (priv->dma_cap.addr64 <= 32)
3787
+ gfp |= GFP_DMA32;
3788
+
3789
+ len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
33093790
33103791 while (dirty-- > 0) {
3792
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
33113793 struct dma_desc *p;
3794
+ bool use_rx_wd;
33123795
33133796 if (priv->extend_desc)
33143797 p = (struct dma_desc *)(rx_q->dma_erx + entry);
33153798 else
33163799 p = rx_q->dma_rx + entry;
33173800
3318
- if (likely(!rx_q->rx_skbuff[entry])) {
3319
- struct sk_buff *skb;
3320
-
3321
- skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3322
- if (unlikely(!skb)) {
3323
- /* so for a while no zero-copy! */
3324
- rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3325
- if (unlikely(net_ratelimit()))
3326
- dev_err(priv->device,
3327
- "fail to alloc skb entry %d\n",
3328
- entry);
3801
+ if (!buf->page) {
3802
+ buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
3803
+ if (!buf->page)
33293804 break;
3330
- }
3331
-
3332
- rx_q->rx_skbuff[entry] = skb;
3333
- rx_q->rx_skbuff_dma[entry] =
3334
- dma_map_single(priv->device, skb->data, bfsize,
3335
- DMA_FROM_DEVICE);
3336
- if (dma_mapping_error(priv->device,
3337
- rx_q->rx_skbuff_dma[entry])) {
3338
- netdev_err(priv->dev, "Rx DMA map failed\n");
3339
- dev_kfree_skb(skb);
3340
- break;
3341
- }
3342
-
3343
- stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3344
- stmmac_refill_desc3(priv, rx_q, p);
3345
-
3346
- if (rx_q->rx_zeroc_thresh > 0)
3347
- rx_q->rx_zeroc_thresh--;
3348
-
3349
- netif_dbg(priv, rx_status, priv->dev,
3350
- "refill entry #%d\n", entry);
33513805 }
3352
- dma_wmb();
33533806
3354
- stmmac_set_rx_owner(priv, p, priv->use_riwt);
3807
+ if (priv->sph && !buf->sec_page) {
3808
+ buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
3809
+ if (!buf->sec_page)
3810
+ break;
3811
+
3812
+ buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
3813
+ }
3814
+
3815
+ buf->addr = page_pool_get_dma_addr(buf->page);
3816
+ stmmac_set_desc_addr(priv, p, buf->addr);
3817
+ if (priv->sph)
3818
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
3819
+ else
3820
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
3821
+ stmmac_refill_desc3(priv, rx_q, p);
3822
+
3823
+ rx_q->rx_count_frames++;
3824
+ rx_q->rx_count_frames += priv->rx_coal_frames;
3825
+ if (rx_q->rx_count_frames > priv->rx_coal_frames)
3826
+ rx_q->rx_count_frames = 0;
3827
+
3828
+ use_rx_wd = !priv->rx_coal_frames;
3829
+ use_rx_wd |= rx_q->rx_count_frames > 0;
3830
+ if (!priv->use_riwt)
3831
+ use_rx_wd = false;
33553832
33563833 dma_wmb();
3834
+ stmmac_set_rx_owner(priv, p, use_rx_wd);
33573835
3358
- entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3836
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
33593837 }
33603838 rx_q->dirty_rx = entry;
3839
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3840
+ (rx_q->dirty_rx * sizeof(struct dma_desc));
33613841 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
3842
+}
3843
+
3844
+static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
3845
+ struct dma_desc *p,
3846
+ int status, unsigned int len)
3847
+{
3848
+ unsigned int plen = 0, hlen = 0;
3849
+ int coe = priv->hw->rx_csum;
3850
+
3851
+ /* Not first descriptor, buffer is always zero */
3852
+ if (priv->sph && len)
3853
+ return 0;
3854
+
3855
+ /* First descriptor, get split header length */
3856
+ stmmac_get_rx_header_len(priv, p, &hlen);
3857
+ if (priv->sph && hlen) {
3858
+ priv->xstats.rx_split_hdr_pkt_n++;
3859
+ return hlen;
3860
+ }
3861
+
3862
+ /* First descriptor, not last descriptor and not split header */
3863
+ if (status & rx_not_ls)
3864
+ return priv->dma_buf_sz;
3865
+
3866
+ plen = stmmac_get_rx_frame_len(priv, p, coe);
3867
+
3868
+ /* First descriptor and last descriptor and not split header */
3869
+ return min_t(unsigned int, priv->dma_buf_sz, plen);
3870
+}
3871
+
3872
+static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
3873
+ struct dma_desc *p,
3874
+ int status, unsigned int len)
3875
+{
3876
+ int coe = priv->hw->rx_csum;
3877
+ unsigned int plen = 0;
3878
+
3879
+ /* Not split header, buffer is not available */
3880
+ if (!priv->sph)
3881
+ return 0;
3882
+
3883
+ /* Not last descriptor */
3884
+ if (status & rx_not_ls)
3885
+ return priv->dma_buf_sz;
3886
+
3887
+ plen = stmmac_get_rx_frame_len(priv, p, coe);
3888
+
3889
+ /* Last descriptor */
3890
+ return plen - len;
33623891 }
33633892
33643893 /**
....@@ -3373,30 +3902,54 @@
33733902 {
33743903 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
33753904 struct stmmac_channel *ch = &priv->channel[queue];
3905
+ unsigned int count = 0, error = 0, len = 0;
3906
+ int status = 0, coe = priv->hw->rx_csum;
33763907 unsigned int next_entry = rx_q->cur_rx;
3377
- int coe = priv->hw->rx_csum;
3378
- unsigned int count = 0;
3379
- bool xmac;
3380
-
3381
- xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3908
+ unsigned int desc_size;
3909
+ struct sk_buff *skb = NULL;
33823910
33833911 if (netif_msg_rx_status(priv)) {
33843912 void *rx_head;
33853913
33863914 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3387
- if (priv->extend_desc)
3915
+ if (priv->extend_desc) {
33883916 rx_head = (void *)rx_q->dma_erx;
3389
- else
3917
+ desc_size = sizeof(struct dma_extended_desc);
3918
+ } else {
33903919 rx_head = (void *)rx_q->dma_rx;
3920
+ desc_size = sizeof(struct dma_desc);
3921
+ }
33913922
3392
- stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3923
+ stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
3924
+ rx_q->dma_rx_phy, desc_size);
33933925 }
33943926 while (count < limit) {
3395
- int entry, status;
3396
- struct dma_desc *p;
3397
- struct dma_desc *np;
3927
+ unsigned int buf1_len = 0, buf2_len = 0;
3928
+ enum pkt_hash_types hash_type;
3929
+ struct stmmac_rx_buffer *buf;
3930
+ struct dma_desc *np, *p;
3931
+ int entry;
3932
+ u32 hash;
33983933
3934
+ if (!count && rx_q->state_saved) {
3935
+ skb = rx_q->state.skb;
3936
+ error = rx_q->state.error;
3937
+ len = rx_q->state.len;
3938
+ } else {
3939
+ rx_q->state_saved = false;
3940
+ skb = NULL;
3941
+ error = 0;
3942
+ len = 0;
3943
+ }
3944
+
3945
+ if ((count >= limit - 1) && limit > 1)
3946
+ break;
3947
+
3948
+read_again:
3949
+ buf1_len = 0;
3950
+ buf2_len = 0;
33993951 entry = next_entry;
3952
+ buf = &rx_q->buf_pool[entry];
34003953
34013954 if (priv->extend_desc)
34023955 p = (struct dma_desc *)(rx_q->dma_erx + entry);
....@@ -3410,9 +3963,8 @@
34103963 if (unlikely(status & dma_own))
34113964 break;
34123965
3413
- count++;
3414
-
3415
- rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3966
+ rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
3967
+ priv->dma_rx_size);
34163968 next_entry = rx_q->cur_rx;
34173969
34183970 if (priv->extend_desc)
....@@ -3426,133 +3978,126 @@
34263978 stmmac_rx_extended_status(priv, &priv->dev->stats,
34273979 &priv->xstats, rx_q->dma_erx + entry);
34283980 if (unlikely(status == discard_frame)) {
3429
- priv->dev->stats.rx_errors++;
3430
- if (priv->hwts_rx_en && !priv->extend_desc) {
3431
- /* DESC2 & DESC3 will be overwritten by device
3432
- * with timestamp value, hence reinitialize
3433
- * them in stmmac_rx_refill() function so that
3434
- * device can reuse it.
3435
- */
3436
- dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3437
- rx_q->rx_skbuff[entry] = NULL;
3438
- dma_unmap_single(priv->device,
3439
- rx_q->rx_skbuff_dma[entry],
3440
- priv->dma_buf_sz,
3441
- DMA_FROM_DEVICE);
3442
- }
3443
- } else {
3444
- struct sk_buff *skb;
3445
- int frame_len;
3446
- unsigned int des;
3447
-
3448
- stmmac_get_desc_addr(priv, p, &des);
3449
- frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3450
-
3451
- /* If frame length is greater than skb buffer size
3452
- * (preallocated during init) then the packet is
3453
- * ignored
3454
- */
3455
- if (frame_len > priv->dma_buf_sz) {
3456
- if (net_ratelimit())
3457
- netdev_err(priv->dev,
3458
- "len %d larger than size (%d)\n",
3459
- frame_len, priv->dma_buf_sz);
3460
- priv->dev->stats.rx_length_errors++;
3461
- continue;
3462
- }
3463
-
3464
- /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3465
- * Type frames (LLC/LLC-SNAP)
3466
- *
3467
- * llc_snap is never checked in GMAC >= 4, so this ACS
3468
- * feature is always disabled and packets need to be
3469
- * stripped manually.
3470
- */
3471
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3472
- unlikely(status != llc_snap))
3473
- frame_len -= ETH_FCS_LEN;
3474
-
3475
- if (netif_msg_rx_status(priv)) {
3476
- netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3477
- p, entry, des);
3478
- netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3479
- frame_len, status);
3480
- }
3481
-
3482
- /* The zero-copy is always used for all the sizes
3483
- * in case of GMAC4 because it needs
3484
- * to refill the used descriptors, always.
3485
- */
3486
- if (unlikely(!xmac &&
3487
- ((frame_len < priv->rx_copybreak) ||
3488
- stmmac_rx_threshold_count(rx_q)))) {
3489
- skb = netdev_alloc_skb_ip_align(priv->dev,
3490
- frame_len);
3491
- if (unlikely(!skb)) {
3492
- if (net_ratelimit())
3493
- dev_warn(priv->device,
3494
- "packet dropped\n");
3495
- priv->dev->stats.rx_dropped++;
3496
- continue;
3497
- }
3498
-
3499
- dma_sync_single_for_cpu(priv->device,
3500
- rx_q->rx_skbuff_dma
3501
- [entry], frame_len,
3502
- DMA_FROM_DEVICE);
3503
- skb_copy_to_linear_data(skb,
3504
- rx_q->
3505
- rx_skbuff[entry]->data,
3506
- frame_len);
3507
-
3508
- skb_put(skb, frame_len);
3509
- dma_sync_single_for_device(priv->device,
3510
- rx_q->rx_skbuff_dma
3511
- [entry], frame_len,
3512
- DMA_FROM_DEVICE);
3513
- } else {
3514
- skb = rx_q->rx_skbuff[entry];
3515
- if (unlikely(!skb)) {
3516
- if (net_ratelimit())
3517
- netdev_err(priv->dev,
3518
- "%s: Inconsistent Rx chain\n",
3519
- priv->dev->name);
3520
- priv->dev->stats.rx_dropped++;
3521
- continue;
3522
- }
3523
- prefetch(skb->data - NET_IP_ALIGN);
3524
- rx_q->rx_skbuff[entry] = NULL;
3525
- rx_q->rx_zeroc_thresh++;
3526
-
3527
- skb_put(skb, frame_len);
3528
- dma_unmap_single(priv->device,
3529
- rx_q->rx_skbuff_dma[entry],
3530
- priv->dma_buf_sz,
3531
- DMA_FROM_DEVICE);
3532
- }
3533
-
3534
- if (netif_msg_pktdata(priv)) {
3535
- netdev_dbg(priv->dev, "frame received (%dbytes)",
3536
- frame_len);
3537
- print_pkt(skb->data, frame_len);
3538
- }
3539
-
3540
- stmmac_get_rx_hwtstamp(priv, p, np, skb);
3541
-
3542
- stmmac_rx_vlan(priv->dev, skb);
3543
-
3544
- skb->protocol = eth_type_trans(skb, priv->dev);
3545
-
3546
- if (unlikely(!coe))
3547
- skb_checksum_none_assert(skb);
3548
- else
3549
- skb->ip_summed = CHECKSUM_UNNECESSARY;
3550
-
3551
- napi_gro_receive(&ch->napi, skb);
3552
-
3553
- priv->dev->stats.rx_packets++;
3554
- priv->dev->stats.rx_bytes += frame_len;
3981
+ page_pool_recycle_direct(rx_q->page_pool, buf->page);
3982
+ buf->page = NULL;
3983
+ error = 1;
3984
+ if (!priv->hwts_rx_en)
3985
+ priv->dev->stats.rx_errors++;
35553986 }
3987
+
3988
+ if (unlikely(error && (status & rx_not_ls)))
3989
+ goto read_again;
3990
+ if (unlikely(error)) {
3991
+ dev_kfree_skb(skb);
3992
+ skb = NULL;
3993
+ count++;
3994
+ continue;
3995
+ }
3996
+
3997
+ /* Buffer is good. Go on. */
3998
+
3999
+ prefetch(page_address(buf->page));
4000
+ if (buf->sec_page)
4001
+ prefetch(page_address(buf->sec_page));
4002
+
4003
+ buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
4004
+ len += buf1_len;
4005
+ buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
4006
+ len += buf2_len;
4007
+
4008
+ /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
4009
+ * Type frames (LLC/LLC-SNAP)
4010
+ *
4011
+ * llc_snap is never checked in GMAC >= 4, so this ACS
4012
+ * feature is always disabled and packets need to be
4013
+ * stripped manually.
4014
+ */
4015
+ if (likely(!(status & rx_not_ls)) &&
4016
+ (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
4017
+ unlikely(status != llc_snap))) {
4018
+ if (buf2_len)
4019
+ buf2_len -= ETH_FCS_LEN;
4020
+ else
4021
+ buf1_len -= ETH_FCS_LEN;
4022
+
4023
+ len -= ETH_FCS_LEN;
4024
+ }
4025
+
4026
+ if (!skb) {
4027
+ skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
4028
+ if (!skb) {
4029
+ priv->dev->stats.rx_dropped++;
4030
+ count++;
4031
+ goto drain_data;
4032
+ }
4033
+
4034
+ dma_sync_single_for_cpu(priv->device, buf->addr,
4035
+ buf1_len, DMA_FROM_DEVICE);
4036
+ skb_copy_to_linear_data(skb, page_address(buf->page),
4037
+ buf1_len);
4038
+ skb_put(skb, buf1_len);
4039
+
4040
+ /* Data payload copied into SKB, page ready for recycle */
4041
+ page_pool_recycle_direct(rx_q->page_pool, buf->page);
4042
+ buf->page = NULL;
4043
+ } else if (buf1_len) {
4044
+ dma_sync_single_for_cpu(priv->device, buf->addr,
4045
+ buf1_len, DMA_FROM_DEVICE);
4046
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
4047
+ buf->page, 0, buf1_len,
4048
+ priv->dma_buf_sz);
4049
+
4050
+ /* Data payload appended into SKB */
4051
+ page_pool_release_page(rx_q->page_pool, buf->page);
4052
+ buf->page = NULL;
4053
+ }
4054
+
4055
+ if (buf2_len) {
4056
+ dma_sync_single_for_cpu(priv->device, buf->sec_addr,
4057
+ buf2_len, DMA_FROM_DEVICE);
4058
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
4059
+ buf->sec_page, 0, buf2_len,
4060
+ priv->dma_buf_sz);
4061
+
4062
+ /* Data payload appended into SKB */
4063
+ page_pool_release_page(rx_q->page_pool, buf->sec_page);
4064
+ buf->sec_page = NULL;
4065
+ }
4066
+
4067
+drain_data:
4068
+ if (likely(status & rx_not_ls))
4069
+ goto read_again;
4070
+ if (!skb)
4071
+ continue;
4072
+
4073
+ /* Got entire packet into SKB. Finish it. */
4074
+
4075
+ stmmac_get_rx_hwtstamp(priv, p, np, skb);
4076
+ stmmac_rx_vlan(priv->dev, skb);
4077
+ skb->protocol = eth_type_trans(skb, priv->dev);
4078
+
4079
+ if (unlikely(!coe))
4080
+ skb_checksum_none_assert(skb);
4081
+ else
4082
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
4083
+
4084
+ if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4085
+ skb_set_hash(skb, hash, hash_type);
4086
+
4087
+ skb_record_rx_queue(skb, queue);
4088
+ napi_gro_receive(&ch->rx_napi, skb);
4089
+ skb = NULL;
4090
+
4091
+ priv->dev->stats.rx_packets++;
4092
+ priv->dev->stats.rx_bytes += len;
4093
+ count++;
4094
+ }
4095
+
4096
+ if (status & rx_not_ls || skb) {
4097
+ rx_q->state_saved = true;
4098
+ rx_q->state.skb = skb;
4099
+ rx_q->state.error = error;
4100
+ rx_q->state.len = len;
35564101 }
35574102
35584103 stmmac_rx_refill(priv, queue);
....@@ -3562,40 +4107,47 @@
35624107 return count;
35634108 }
35644109
3565
-/**
3566
- * stmmac_poll - stmmac poll method (NAPI)
3567
- * @napi : pointer to the napi structure.
3568
- * @budget : maximum number of packets that the current CPU can receive from
3569
- * all interfaces.
3570
- * Description :
3571
- * To look at the incoming frames and clear the tx resources.
3572
- */
3573
-static int stmmac_napi_poll(struct napi_struct *napi, int budget)
4110
+static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
35744111 {
35754112 struct stmmac_channel *ch =
3576
- container_of(napi, struct stmmac_channel, napi);
4113
+ container_of(napi, struct stmmac_channel, rx_napi);
35774114 struct stmmac_priv *priv = ch->priv_data;
3578
- int work_done, rx_done = 0, tx_done = 0;
35794115 u32 chan = ch->index;
4116
+ int work_done;
35804117
35814118 priv->xstats.napi_poll++;
35824119
3583
- if (ch->has_tx)
3584
- tx_done = stmmac_tx_clean(priv, budget, chan);
3585
- if (ch->has_rx)
3586
- rx_done = stmmac_rx(priv, budget, chan);
4120
+ work_done = stmmac_rx(priv, budget, chan);
4121
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
4122
+ unsigned long flags;
35874123
3588
- work_done = max(rx_done, tx_done);
4124
+ spin_lock_irqsave(&ch->lock, flags);
4125
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
4126
+ spin_unlock_irqrestore(&ch->lock, flags);
4127
+ }
4128
+
4129
+ return work_done;
4130
+}
4131
+
4132
+static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
4133
+{
4134
+ struct stmmac_channel *ch =
4135
+ container_of(napi, struct stmmac_channel, tx_napi);
4136
+ struct stmmac_priv *priv = ch->priv_data;
4137
+ u32 chan = ch->index;
4138
+ int work_done;
4139
+
4140
+ priv->xstats.napi_poll++;
4141
+
4142
+ work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan);
35894143 work_done = min(work_done, budget);
35904144
35914145 if (work_done < budget && napi_complete_done(napi, work_done)) {
3592
- int stat;
4146
+ unsigned long flags;
35934147
3594
- stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3595
- stat = stmmac_dma_interrupt_status(priv, priv->ioaddr,
3596
- &priv->xstats, chan);
3597
- if (stat && napi_reschedule(napi))
3598
- stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
4148
+ spin_lock_irqsave(&ch->lock, flags);
4149
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
4150
+ spin_unlock_irqrestore(&ch->lock, flags);
35994151 }
36004152
36014153 return work_done;
....@@ -3604,12 +4156,13 @@
36044156 /**
36054157 * stmmac_tx_timeout
36064158 * @dev : Pointer to net device structure
4159
+ * @txqueue: the index of the hanging transmit queue
36074160 * Description: this function is called when a packet transmission fails to
36084161 * complete within a reasonable time. The driver will mark the error in the
36094162 * netdev structure and arrange for the device to be reset to a sane state
36104163 * in order to transmit a new packet.
36114164 */
3612
-static void stmmac_tx_timeout(struct net_device *dev)
4165
+static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
36134166 {
36144167 struct stmmac_priv *priv = netdev_priv(dev);
36154168
....@@ -3706,6 +4259,8 @@
37064259 netdev_features_t features)
37074260 {
37084261 struct stmmac_priv *priv = netdev_priv(netdev);
4262
+ bool sph_en;
4263
+ u32 chan;
37094264
37104265 /* Keep the COE Type in case of csum is supporting */
37114266 if (features & NETIF_F_RXCSUM)
....@@ -3716,6 +4271,10 @@
37164271 * fixed in case of issue.
37174272 */
37184273 stmmac_rx_ipc(priv, priv->hw);
4274
+
4275
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph;
4276
+ for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
4277
+ stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
37194278
37204279 return 0;
37214280 }
....@@ -3757,7 +4316,6 @@
37574316 /* To handle GMAC own interrupts */
37584317 if ((priv->plat->has_gmac) || xmac) {
37594318 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3760
- int mtl_status;
37614319
37624320 if (unlikely(status)) {
37634321 /* For LPI we need to save the tx status */
....@@ -3768,17 +4326,8 @@
37684326 }
37694327
37704328 for (queue = 0; queue < queues_count; queue++) {
3771
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3772
-
3773
- mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3774
- queue);
3775
- if (mtl_status != -EINVAL)
3776
- status |= mtl_status;
3777
-
3778
- if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3779
- stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3780
- rx_q->rx_tail_addr,
3781
- queue);
4329
+ status = stmmac_host_mtl_irq_status(priv, priv->hw,
4330
+ queue);
37824331 }
37834332
37844333 /* PCS link status */
....@@ -3819,6 +4368,7 @@
38194368 */
38204369 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
38214370 {
4371
+ struct stmmac_priv *priv = netdev_priv (dev);
38224372 int ret = -EOPNOTSUPP;
38234373
38244374 if (!netif_running(dev))
....@@ -3828,9 +4378,7 @@
38284378 case SIOCGMIIPHY:
38294379 case SIOCGMIIREG:
38304380 case SIOCSMIIREG:
3831
- if (!dev->phydev)
3832
- return -EINVAL;
3833
- ret = phy_mii_ioctl(dev->phydev, rq, cmd);
4381
+ ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
38344382 break;
38354383 case SIOCSHWTSTAMP:
38364384 ret = stmmac_hwtstamp_set(dev, rq);
....@@ -3851,12 +4399,17 @@
38514399 struct stmmac_priv *priv = cb_priv;
38524400 int ret = -EOPNOTSUPP;
38534401
4402
+ if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
4403
+ return ret;
4404
+
38544405 stmmac_disable_all_queues(priv);
38554406
38564407 switch (type) {
38574408 case TC_SETUP_CLSU32:
3858
- if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3859
- ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
4409
+ ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
4410
+ break;
4411
+ case TC_SETUP_CLSFLOWER:
4412
+ ret = stmmac_tc_setup_cls(priv, priv, type_data);
38604413 break;
38614414 default:
38624415 break;
....@@ -3866,23 +4419,7 @@
38664419 return ret;
38674420 }
38684421
3869
-static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3870
- struct tc_block_offload *f)
3871
-{
3872
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3873
- return -EOPNOTSUPP;
3874
-
3875
- switch (f->command) {
3876
- case TC_BLOCK_BIND:
3877
- return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3878
- priv, priv, f->extack);
3879
- case TC_BLOCK_UNBIND:
3880
- tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3881
- return 0;
3882
- default:
3883
- return -EOPNOTSUPP;
3884
- }
3885
-}
4422
+static LIST_HEAD(stmmac_block_cb_list);
38864423
38874424 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
38884425 void *type_data)
....@@ -3891,17 +4428,23 @@
38914428
38924429 switch (type) {
38934430 case TC_SETUP_BLOCK:
3894
- return stmmac_setup_tc_block(priv, type_data);
4431
+ return flow_block_cb_setup_simple(type_data,
4432
+ &stmmac_block_cb_list,
4433
+ stmmac_setup_tc_block_cb,
4434
+ priv, priv, true);
38954435 case TC_SETUP_QDISC_CBS:
38964436 return stmmac_tc_setup_cbs(priv, priv, type_data);
4437
+ case TC_SETUP_QDISC_TAPRIO:
4438
+ return stmmac_tc_setup_taprio(priv, priv, type_data);
4439
+ case TC_SETUP_QDISC_ETF:
4440
+ return stmmac_tc_setup_etf(priv, priv, type_data);
38974441 default:
38984442 return -EOPNOTSUPP;
38994443 }
39004444 }
39014445
39024446 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
3903
- struct net_device *sb_dev,
3904
- select_queue_fallback_t fallback)
4447
+ struct net_device *sb_dev)
39054448 {
39064449 int gso = skb_shinfo(skb)->gso_type;
39074450
....@@ -3915,7 +4458,7 @@
39154458 return 0;
39164459 }
39174460
3918
- return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
4461
+ return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
39194462 }
39204463
39214464 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
....@@ -3923,11 +4466,20 @@
39234466 struct stmmac_priv *priv = netdev_priv(ndev);
39244467 int ret = 0;
39254468
4469
+ ret = pm_runtime_get_sync(priv->device);
4470
+ if (ret < 0) {
4471
+ pm_runtime_put_noidle(priv->device);
4472
+ return ret;
4473
+ }
4474
+
39264475 ret = eth_mac_addr(ndev, addr);
39274476 if (ret)
3928
- return ret;
4477
+ goto set_mac_error;
39294478
39304479 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
4480
+
4481
+set_mac_error:
4482
+ pm_runtime_put(priv->device);
39314483
39324484 return ret;
39334485 }
....@@ -3936,24 +4488,27 @@
39364488 static struct dentry *stmmac_fs_dir;
39374489
39384490 static void sysfs_display_ring(void *head, int size, int extend_desc,
3939
- struct seq_file *seq)
4491
+ struct seq_file *seq, dma_addr_t dma_phy_addr)
39404492 {
39414493 int i;
39424494 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
39434495 struct dma_desc *p = (struct dma_desc *)head;
4496
+ dma_addr_t dma_addr;
39444497
39454498 for (i = 0; i < size; i++) {
39464499 if (extend_desc) {
3947
- seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3948
- i, (unsigned int)virt_to_phys(ep),
4500
+ dma_addr = dma_phy_addr + i * sizeof(*ep);
4501
+ seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
4502
+ i, &dma_addr,
39494503 le32_to_cpu(ep->basic.des0),
39504504 le32_to_cpu(ep->basic.des1),
39514505 le32_to_cpu(ep->basic.des2),
39524506 le32_to_cpu(ep->basic.des3));
39534507 ep++;
39544508 } else {
3955
- seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3956
- i, (unsigned int)virt_to_phys(p),
4509
+ dma_addr = dma_phy_addr + i * sizeof(*p);
4510
+ seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
4511
+ i, &dma_addr,
39574512 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
39584513 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
39594514 p++;
....@@ -3962,7 +4517,7 @@
39624517 }
39634518 }
39644519
3965
-static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
4520
+static int stmmac_rings_status_show(struct seq_file *seq, void *v)
39664521 {
39674522 struct net_device *dev = seq->private;
39684523 struct stmmac_priv *priv = netdev_priv(dev);
....@@ -3981,11 +4536,11 @@
39814536 if (priv->extend_desc) {
39824537 seq_printf(seq, "Extended descriptor ring:\n");
39834538 sysfs_display_ring((void *)rx_q->dma_erx,
3984
- DMA_RX_SIZE, 1, seq);
4539
+ priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
39854540 } else {
39864541 seq_printf(seq, "Descriptor ring:\n");
39874542 sysfs_display_ring((void *)rx_q->dma_rx,
3988
- DMA_RX_SIZE, 0, seq);
4543
+ priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
39894544 }
39904545 }
39914546
....@@ -3997,33 +4552,19 @@
39974552 if (priv->extend_desc) {
39984553 seq_printf(seq, "Extended descriptor ring:\n");
39994554 sysfs_display_ring((void *)tx_q->dma_etx,
4000
- DMA_TX_SIZE, 1, seq);
4001
- } else {
4555
+ priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
4556
+ } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
40024557 seq_printf(seq, "Descriptor ring:\n");
40034558 sysfs_display_ring((void *)tx_q->dma_tx,
4004
- DMA_TX_SIZE, 0, seq);
4559
+ priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
40054560 }
40064561 }
40074562
40084563 return 0;
40094564 }
4565
+DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
40104566
4011
-static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
4012
-{
4013
- return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
4014
-}
4015
-
4016
-/* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
4017
-
4018
-static const struct file_operations stmmac_rings_status_fops = {
4019
- .owner = THIS_MODULE,
4020
- .open = stmmac_sysfs_ring_open,
4021
- .read = seq_read,
4022
- .llseek = seq_lseek,
4023
- .release = single_release,
4024
-};
4025
-
4026
-static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
4567
+static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
40274568 {
40284569 struct net_device *dev = seq->private;
40294570 struct stmmac_priv *priv = netdev_priv(dev);
....@@ -4081,64 +4622,94 @@
40814622 priv->dma_cap.number_rx_channel);
40824623 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
40834624 priv->dma_cap.number_tx_channel);
4625
+ seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
4626
+ priv->dma_cap.number_rx_queues);
4627
+ seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
4628
+ priv->dma_cap.number_tx_queues);
40844629 seq_printf(seq, "\tEnhanced descriptors: %s\n",
40854630 (priv->dma_cap.enh_desc) ? "Y" : "N");
4086
-
4631
+ seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
4632
+ seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
4633
+ seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
4634
+ seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
4635
+ seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
4636
+ priv->dma_cap.pps_out_num);
4637
+ seq_printf(seq, "\tSafety Features: %s\n",
4638
+ priv->dma_cap.asp ? "Y" : "N");
4639
+ seq_printf(seq, "\tFlexible RX Parser: %s\n",
4640
+ priv->dma_cap.frpsel ? "Y" : "N");
4641
+ seq_printf(seq, "\tEnhanced Addressing: %d\n",
4642
+ priv->dma_cap.addr64);
4643
+ seq_printf(seq, "\tReceive Side Scaling: %s\n",
4644
+ priv->dma_cap.rssen ? "Y" : "N");
4645
+ seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
4646
+ priv->dma_cap.vlhash ? "Y" : "N");
4647
+ seq_printf(seq, "\tSplit Header: %s\n",
4648
+ priv->dma_cap.sphen ? "Y" : "N");
4649
+ seq_printf(seq, "\tVLAN TX Insertion: %s\n",
4650
+ priv->dma_cap.vlins ? "Y" : "N");
4651
+ seq_printf(seq, "\tDouble VLAN: %s\n",
4652
+ priv->dma_cap.dvlan ? "Y" : "N");
4653
+ seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
4654
+ priv->dma_cap.l3l4fnum);
4655
+ seq_printf(seq, "\tARP Offloading: %s\n",
4656
+ priv->dma_cap.arpoffsel ? "Y" : "N");
4657
+ seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
4658
+ priv->dma_cap.estsel ? "Y" : "N");
4659
+ seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
4660
+ priv->dma_cap.fpesel ? "Y" : "N");
4661
+ seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
4662
+ priv->dma_cap.tbssel ? "Y" : "N");
40874663 return 0;
40884664 }
4665
+DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
40894666
4090
-static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
4667
+/* Use network device events to rename debugfs file entries.
4668
+ */
4669
+static int stmmac_device_event(struct notifier_block *unused,
4670
+ unsigned long event, void *ptr)
40914671 {
4092
- return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
4672
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4673
+ struct stmmac_priv *priv = netdev_priv(dev);
4674
+
4675
+ if (dev->netdev_ops != &stmmac_netdev_ops)
4676
+ goto done;
4677
+
4678
+ switch (event) {
4679
+ case NETDEV_CHANGENAME:
4680
+ if (priv->dbgfs_dir)
4681
+ priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
4682
+ priv->dbgfs_dir,
4683
+ stmmac_fs_dir,
4684
+ dev->name);
4685
+ break;
4686
+ }
4687
+done:
4688
+ return NOTIFY_DONE;
40934689 }
40944690
4095
-static const struct file_operations stmmac_dma_cap_fops = {
4096
- .owner = THIS_MODULE,
4097
- .open = stmmac_sysfs_dma_cap_open,
4098
- .read = seq_read,
4099
- .llseek = seq_lseek,
4100
- .release = single_release,
4691
+static struct notifier_block stmmac_notifier = {
4692
+ .notifier_call = stmmac_device_event,
41014693 };
41024694
4103
-static int stmmac_init_fs(struct net_device *dev)
4695
+static void stmmac_init_fs(struct net_device *dev)
41044696 {
41054697 struct stmmac_priv *priv = netdev_priv(dev);
4698
+
4699
+ rtnl_lock();
41064700
41074701 /* Create per netdev entries */
41084702 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
41094703
4110
- if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
4111
- netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
4112
-
4113
- return -ENOMEM;
4114
- }
4115
-
41164704 /* Entry to report DMA RX/TX rings */
4117
- priv->dbgfs_rings_status =
4118
- debugfs_create_file("descriptors_status", 0444,
4119
- priv->dbgfs_dir, dev,
4120
- &stmmac_rings_status_fops);
4121
-
4122
- if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
4123
- netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4124
- debugfs_remove_recursive(priv->dbgfs_dir);
4125
-
4126
- return -ENOMEM;
4127
- }
4705
+ debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
4706
+ &stmmac_rings_status_fops);
41284707
41294708 /* Entry to report the DMA HW features */
4130
- priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4131
- priv->dbgfs_dir,
4132
- dev, &stmmac_dma_cap_fops);
4709
+ debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
4710
+ &stmmac_dma_cap_fops);
41334711
4134
- if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4135
- netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4136
- debugfs_remove_recursive(priv->dbgfs_dir);
4137
-
4138
- return -ENOMEM;
4139
- }
4140
-
4141
- return 0;
4712
+ rtnl_unlock();
41424713 }
41434714
41444715 static void stmmac_exit_fs(struct net_device *dev)
....@@ -4148,6 +4719,111 @@
41484719 debugfs_remove_recursive(priv->dbgfs_dir);
41494720 }
41504721 #endif /* CONFIG_DEBUG_FS */
4722
+
4723
+static u32 stmmac_vid_crc32_le(__le16 vid_le)
4724
+{
4725
+ unsigned char *data = (unsigned char *)&vid_le;
4726
+ unsigned char data_byte = 0;
4727
+ u32 crc = ~0x0;
4728
+ u32 temp = 0;
4729
+ int i, bits;
4730
+
4731
+ bits = get_bitmask_order(VLAN_VID_MASK);
4732
+ for (i = 0; i < bits; i++) {
4733
+ if ((i % 8) == 0)
4734
+ data_byte = data[i / 8];
4735
+
4736
+ temp = ((crc & 1) ^ data_byte) & 1;
4737
+ crc >>= 1;
4738
+ data_byte >>= 1;
4739
+
4740
+ if (temp)
4741
+ crc ^= 0xedb88320;
4742
+ }
4743
+
4744
+ return crc;
4745
+}
4746
+
4747
+static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
4748
+{
4749
+ u32 crc, hash = 0;
4750
+ __le16 pmatch = 0;
4751
+ int count = 0;
4752
+ u16 vid = 0;
4753
+
4754
+ for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
4755
+ __le16 vid_le = cpu_to_le16(vid);
4756
+ crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
4757
+ hash |= (1 << crc);
4758
+ count++;
4759
+ }
4760
+
4761
+ if (!priv->dma_cap.vlhash) {
4762
+ if (count > 2) /* VID = 0 always passes filter */
4763
+ return -EOPNOTSUPP;
4764
+
4765
+ pmatch = cpu_to_le16(vid);
4766
+ hash = 0;
4767
+ }
4768
+
4769
+ return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
4770
+}
4771
+
4772
+static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
4773
+{
4774
+ struct stmmac_priv *priv = netdev_priv(ndev);
4775
+ bool is_double = false;
4776
+ int ret;
4777
+
4778
+ if (be16_to_cpu(proto) == ETH_P_8021AD)
4779
+ is_double = true;
4780
+
4781
+ set_bit(vid, priv->active_vlans);
4782
+ ret = stmmac_vlan_update(priv, is_double);
4783
+ if (ret) {
4784
+ clear_bit(vid, priv->active_vlans);
4785
+ return ret;
4786
+ }
4787
+
4788
+ if (priv->hw->num_vlan) {
4789
+ ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4790
+ if (ret)
4791
+ return ret;
4792
+ }
4793
+
4794
+ return 0;
4795
+}
4796
+
4797
+static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
4798
+{
4799
+ struct stmmac_priv *priv = netdev_priv(ndev);
4800
+ bool is_double = false;
4801
+ int ret;
4802
+
4803
+ ret = pm_runtime_get_sync(priv->device);
4804
+ if (ret < 0) {
4805
+ pm_runtime_put_noidle(priv->device);
4806
+ return ret;
4807
+ }
4808
+
4809
+ if (be16_to_cpu(proto) == ETH_P_8021AD)
4810
+ is_double = true;
4811
+
4812
+ clear_bit(vid, priv->active_vlans);
4813
+
4814
+ if (priv->hw->num_vlan) {
4815
+ ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4816
+ if (ret)
4817
+ goto del_vlan_error;
4818
+ }
4819
+
4820
+ ret = stmmac_vlan_update(priv, is_double);
4821
+
4822
+del_vlan_error:
4823
+ pm_runtime_put(priv->device);
4824
+
4825
+ return ret;
4826
+}
41514827
41524828 static const struct net_device_ops stmmac_netdev_ops = {
41534829 .ndo_open = stmmac_open,
....@@ -4165,6 +4841,8 @@
41654841 .ndo_poll_controller = stmmac_poll_controller,
41664842 #endif
41674843 .ndo_set_mac_address = stmmac_set_mac_address,
4844
+ .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
4845
+ .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
41684846 };
41694847
41704848 static void stmmac_reset_subtask(struct stmmac_priv *priv)
....@@ -4183,7 +4861,7 @@
41834861
41844862 set_bit(STMMAC_DOWN, &priv->state);
41854863 dev_close(priv->dev);
4186
- dev_open(priv->dev);
4864
+ dev_open(priv->dev, NULL);
41874865 clear_bit(STMMAC_DOWN, &priv->state);
41884866 clear_bit(STMMAC_RESETING, &priv->state);
41894867 rtnl_unlock();
....@@ -4233,6 +4911,12 @@
42334911 priv->plat->enh_desc = priv->dma_cap.enh_desc;
42344912 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
42354913 priv->hw->pmt = priv->plat->pmt;
4914
+ if (priv->dma_cap.hash_tb_sz) {
4915
+ priv->hw->multicast_filter_bins =
4916
+ (BIT(priv->dma_cap.hash_tb_sz) << 5);
4917
+ priv->hw->mcast_bits_log2 =
4918
+ ilog2(priv->hw->multicast_filter_bins);
4919
+ }
42364920
42374921 /* TXCOE doesn't work in thresh DMA mode */
42384922 if (priv->plat->force_thresh_dma_mode)
....@@ -4269,6 +4953,9 @@
42694953 if (priv->dma_cap.tsoen)
42704954 dev_info(priv->device, "TSO supported\n");
42714955
4956
+ priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
4957
+ priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
4958
+
42724959 /* Run HW quirks, if any */
42734960 if (priv->hwif_quirks) {
42744961 ret = priv->hwif_quirks(priv);
....@@ -4291,6 +4978,92 @@
42914978 return 0;
42924979 }
42934980
4981
+static void stmmac_napi_add(struct net_device *dev)
4982
+{
4983
+ struct stmmac_priv *priv = netdev_priv(dev);
4984
+ u32 queue, maxq;
4985
+
4986
+ maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4987
+
4988
+ for (queue = 0; queue < maxq; queue++) {
4989
+ struct stmmac_channel *ch = &priv->channel[queue];
4990
+ int rx_budget = ((priv->plat->dma_rx_size < NAPI_POLL_WEIGHT) &&
4991
+ (priv->plat->dma_rx_size > 0)) ?
4992
+ priv->plat->dma_rx_size : NAPI_POLL_WEIGHT;
4993
+ int tx_budget = ((priv->plat->dma_tx_size < NAPI_POLL_WEIGHT) &&
4994
+ (priv->plat->dma_tx_size > 0)) ?
4995
+ priv->plat->dma_tx_size : NAPI_POLL_WEIGHT;
4996
+
4997
+ ch->priv_data = priv;
4998
+ ch->index = queue;
4999
+ spin_lock_init(&ch->lock);
5000
+
5001
+ if (queue < priv->plat->rx_queues_to_use) {
5002
+ netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
5003
+ rx_budget);
5004
+ }
5005
+ if (queue < priv->plat->tx_queues_to_use) {
5006
+ netif_tx_napi_add(dev, &ch->tx_napi,
5007
+ stmmac_napi_poll_tx, tx_budget);
5008
+ }
5009
+ }
5010
+}
5011
+
5012
+static void stmmac_napi_del(struct net_device *dev)
5013
+{
5014
+ struct stmmac_priv *priv = netdev_priv(dev);
5015
+ u32 queue, maxq;
5016
+
5017
+ maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
5018
+
5019
+ for (queue = 0; queue < maxq; queue++) {
5020
+ struct stmmac_channel *ch = &priv->channel[queue];
5021
+
5022
+ if (queue < priv->plat->rx_queues_to_use)
5023
+ netif_napi_del(&ch->rx_napi);
5024
+ if (queue < priv->plat->tx_queues_to_use)
5025
+ netif_napi_del(&ch->tx_napi);
5026
+ }
5027
+}
5028
+
5029
+int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
5030
+{
5031
+ struct stmmac_priv *priv = netdev_priv(dev);
5032
+ int ret = 0;
5033
+
5034
+ if (netif_running(dev))
5035
+ stmmac_release(dev);
5036
+
5037
+ stmmac_napi_del(dev);
5038
+
5039
+ priv->plat->rx_queues_to_use = rx_cnt;
5040
+ priv->plat->tx_queues_to_use = tx_cnt;
5041
+
5042
+ stmmac_napi_add(dev);
5043
+
5044
+ if (netif_running(dev))
5045
+ ret = stmmac_open(dev);
5046
+
5047
+ return ret;
5048
+}
5049
+
5050
+int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
5051
+{
5052
+ struct stmmac_priv *priv = netdev_priv(dev);
5053
+ int ret = 0;
5054
+
5055
+ if (netif_running(dev))
5056
+ stmmac_release(dev);
5057
+
5058
+ priv->dma_rx_size = rx_size;
5059
+ priv->dma_tx_size = tx_size;
5060
+
5061
+ if (netif_running(dev))
5062
+ ret = stmmac_open(dev);
5063
+
5064
+ return ret;
5065
+}
5066
+
42945067 /**
42955068 * stmmac_dvr_probe
42965069 * @device: device pointer
....@@ -4307,12 +5080,11 @@
43075080 {
43085081 struct net_device *ndev = NULL;
43095082 struct stmmac_priv *priv;
4310
- u32 queue, maxq;
4311
- int ret = 0;
5083
+ u32 rxq;
5084
+ int i, ret = 0;
43125085
4313
- ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4314
- MTL_MAX_TX_QUEUES,
4315
- MTL_MAX_RX_QUEUES);
5086
+ ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
5087
+ MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
43165088 if (!ndev)
43175089 return -ENOMEM;
43185090
....@@ -4332,7 +5104,7 @@
43325104 priv->wol_irq = res->wol_irq;
43335105 priv->lpi_irq = res->lpi_irq;
43345106
4335
- if (res->mac)
5107
+ if (!IS_ERR_OR_NULL(res->mac))
43365108 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
43375109
43385110 dev_set_drvdata(device, priv->dev);
....@@ -4344,8 +5116,7 @@
43445116 priv->wq = create_singlethread_workqueue("stmmac_wq");
43455117 if (!priv->wq) {
43465118 dev_err(priv->device, "failed to create workqueue\n");
4347
- ret = -ENOMEM;
4348
- goto error_wq;
5119
+ return -ENOMEM;
43495120 }
43505121
43515122 INIT_WORK(&priv->service_task, stmmac_service_task);
....@@ -4373,10 +5144,6 @@
43735144
43745145 stmmac_check_ether_addr(priv);
43755146
4376
- /* Configure real RX and TX queues */
4377
- netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4378
- netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4379
-
43805147 ndev->netdev_ops = &stmmac_netdev_ops;
43815148
43825149 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
....@@ -4394,20 +5161,79 @@
43945161 priv->tso = true;
43955162 dev_info(priv->device, "TSO feature enabled\n");
43965163 }
5164
+
5165
+ if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
5166
+ ndev->hw_features |= NETIF_F_GRO;
5167
+ if (!priv->plat->sph_disable) {
5168
+ priv->sph = true;
5169
+ dev_info(priv->device, "SPH feature enabled\n");
5170
+ }
5171
+ }
5172
+
5173
+ /* The current IP register MAC_HW_Feature1[ADDR64] only define
5174
+ * 32/40/64 bit width, but some SOC support others like i.MX8MP
5175
+ * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
5176
+ * So overwrite dma_cap.addr64 according to HW real design.
5177
+ */
5178
+ if (priv->plat->addr64)
5179
+ priv->dma_cap.addr64 = priv->plat->addr64;
5180
+
5181
+ if (priv->dma_cap.addr64) {
5182
+ ret = dma_set_mask_and_coherent(device,
5183
+ DMA_BIT_MASK(priv->dma_cap.addr64));
5184
+ if (!ret) {
5185
+ dev_info(priv->device, "Using %d bits DMA width\n",
5186
+ priv->dma_cap.addr64);
5187
+
5188
+ /*
5189
+ * If more than 32 bits can be addressed, make sure to
5190
+ * enable enhanced addressing mode.
5191
+ */
5192
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
5193
+ priv->plat->dma_cfg->eame = true;
5194
+ } else {
5195
+ ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
5196
+ if (ret) {
5197
+ dev_err(priv->device, "Failed to set DMA Mask\n");
5198
+ goto error_hw_init;
5199
+ }
5200
+
5201
+ priv->dma_cap.addr64 = 32;
5202
+ }
5203
+ }
5204
+
43975205 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
43985206 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
43995207 #ifdef STMMAC_VLAN_TAG_USED
44005208 /* Both mac100 and gmac support receive VLAN tag detection */
44015209 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
5210
+ if (priv->dma_cap.vlhash) {
5211
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
5212
+ ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
5213
+ }
5214
+ if (priv->dma_cap.vlins) {
5215
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
5216
+ if (priv->dma_cap.dvlan)
5217
+ ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
5218
+ }
44025219 #endif
44035220 priv->msg_enable = netif_msg_init(debug, default_msg_level);
44045221
5222
+ /* Initialize RSS */
5223
+ rxq = priv->plat->rx_queues_to_use;
5224
+ netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
5225
+ for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
5226
+ priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
5227
+
5228
+ if (priv->dma_cap.rssen && priv->plat->rss_en)
5229
+ ndev->features |= NETIF_F_RXHASH;
5230
+
44055231 /* MTU range: 46 - hw-specific max */
44065232 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4407
- if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4408
- ndev->max_mtu = JUMBO_LEN;
4409
- else if (priv->plat->has_xgmac)
5233
+ if (priv->plat->has_xgmac)
44105234 ndev->max_mtu = XGMAC_JUMBO_LEN;
5235
+ else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
5236
+ ndev->max_mtu = JUMBO_LEN;
44115237 else
44125238 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
44135239 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
....@@ -4425,22 +5251,7 @@
44255251 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
44265252
44275253 /* Setup channels NAPI */
4428
- maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4429
-
4430
- for (queue = 0; queue < maxq; queue++) {
4431
- struct stmmac_channel *ch = &priv->channel[queue];
4432
-
4433
- ch->priv_data = priv;
4434
- ch->index = queue;
4435
-
4436
- if (queue < priv->plat->rx_queues_to_use)
4437
- ch->has_rx = true;
4438
- if (queue < priv->plat->tx_queues_to_use)
4439
- ch->has_tx = true;
4440
-
4441
- netif_napi_add(ndev, &ch->napi, stmmac_napi_poll,
4442
- NAPI_POLL_WEIGHT);
4443
- }
5254
+ stmmac_napi_add(ndev);
44445255
44455256 mutex_init(&priv->lock);
44465257
....@@ -4450,15 +5261,18 @@
44505261 * set the MDC clock dynamically according to the csr actual
44515262 * clock input.
44525263 */
4453
- if (!priv->plat->clk_csr)
4454
- stmmac_clk_csr_set(priv);
4455
- else
5264
+ if (priv->plat->clk_csr >= 0)
44565265 priv->clk_csr = priv->plat->clk_csr;
5266
+ else
5267
+ stmmac_clk_csr_set(priv);
44575268
44585269 stmmac_check_pcs_mode(priv);
44595270
4460
- if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4461
- priv->hw->pcs != STMMAC_PCS_TBI &&
5271
+ pm_runtime_get_noresume(device);
5272
+ pm_runtime_set_active(device);
5273
+ pm_runtime_enable(device);
5274
+
5275
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
44625276 priv->hw->pcs != STMMAC_PCS_RTBI) {
44635277 /* MDIO bus Registration */
44645278 ret = stmmac_mdio_register(ndev);
....@@ -4470,6 +5284,12 @@
44705284 }
44715285 }
44725286
5287
+ ret = stmmac_phy_setup(priv);
5288
+ if (ret) {
5289
+ netdev_err(ndev, "failed to setup phy (%d)\n", ret);
5290
+ goto error_phy_setup;
5291
+ }
5292
+
44735293 ret = register_netdev(ndev);
44745294 if (ret) {
44755295 dev_err(priv->device, "%s: ERROR %i registering the device\n",
....@@ -4478,29 +5298,29 @@
44785298 }
44795299
44805300 #ifdef CONFIG_DEBUG_FS
4481
- ret = stmmac_init_fs(ndev);
4482
- if (ret < 0)
4483
- netdev_warn(priv->dev, "%s: failed debugFS registration\n",
4484
- __func__);
5301
+ stmmac_init_fs(ndev);
44855302 #endif
5303
+
5304
+ /* Let pm_runtime_put() disable the clocks.
5305
+ * If CONFIG_PM is not enabled, the clocks will stay powered.
5306
+ */
5307
+ pm_runtime_put(device);
5308
+
5309
+ //add
5310
+ phy_register_fixup_for_uid(RTL_8211F_PHY_ID, RTL_8211F_PHY_ID_MASK, rtl8211F_led_control);
44865311
44875312 return ret;
44885313
44895314 error_netdev_register:
4490
- if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4491
- priv->hw->pcs != STMMAC_PCS_TBI &&
5315
+ phylink_destroy(priv->phylink);
5316
+error_phy_setup:
5317
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
44925318 priv->hw->pcs != STMMAC_PCS_RTBI)
44935319 stmmac_mdio_unregister(ndev);
44945320 error_mdio_register:
4495
- for (queue = 0; queue < maxq; queue++) {
4496
- struct stmmac_channel *ch = &priv->channel[queue];
4497
-
4498
- netif_napi_del(&ch->napi);
4499
- }
5321
+ stmmac_napi_del(ndev);
45005322 error_hw_init:
45015323 destroy_workqueue(priv->wq);
4502
-error_wq:
4503
- free_netdev(ndev);
45045324
45055325 return ret;
45065326 }
....@@ -4519,25 +5339,30 @@
45195339
45205340 netdev_info(priv->dev, "%s: removing driver", __func__);
45215341
4522
-#ifdef CONFIG_DEBUG_FS
4523
- stmmac_exit_fs(ndev);
4524
-#endif
45255342 stmmac_stop_all_dma(priv);
4526
-
45275343 stmmac_mac_set(priv, priv->ioaddr, false);
45285344 netif_carrier_off(ndev);
45295345 unregister_netdev(ndev);
5346
+
5347
+ /* Serdes power down needs to happen after VLAN filter
5348
+ * is deleted that is triggered by unregister_netdev().
5349
+ */
5350
+ if (priv->plat->serdes_powerdown)
5351
+ priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5352
+
5353
+#ifdef CONFIG_DEBUG_FS
5354
+ stmmac_exit_fs(ndev);
5355
+#endif
5356
+ phylink_destroy(priv->phylink);
45305357 if (priv->plat->stmmac_rst)
45315358 reset_control_assert(priv->plat->stmmac_rst);
4532
- clk_disable_unprepare(priv->plat->pclk);
4533
- clk_disable_unprepare(priv->plat->stmmac_clk);
4534
- if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4535
- priv->hw->pcs != STMMAC_PCS_TBI &&
5359
+ pm_runtime_put(dev);
5360
+ pm_runtime_disable(dev);
5361
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
45365362 priv->hw->pcs != STMMAC_PCS_RTBI)
45375363 stmmac_mdio_unregister(ndev);
45385364 destroy_workqueue(priv->wq);
45395365 mutex_destroy(&priv->lock);
4540
- free_netdev(ndev);
45415366
45425367 return 0;
45435368 }
....@@ -4559,8 +5384,7 @@
45595384 if (!ndev || !netif_running(ndev))
45605385 return 0;
45615386
4562
- if (ndev->phydev)
4563
- phy_stop(ndev->phydev);
5387
+ phylink_mac_change(priv->phylink, false);
45645388
45655389 mutex_lock(&priv->lock);
45665390
....@@ -4579,31 +5403,38 @@
45795403 /* Stop TX/RX DMA */
45805404 stmmac_stop_all_dma(priv);
45815405
5406
+ if (priv->plat->serdes_powerdown)
5407
+ priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5408
+
45825409 /* Enable Power down mode by programming the PMT regs */
4583
- if (device_may_wakeup(priv->device)) {
5410
+ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
45845411 stmmac_pmt(priv, priv->hw, priv->wolopts);
45855412 priv->irq_wake = 1;
45865413 } else {
5414
+ mutex_unlock(&priv->lock);
5415
+ rtnl_lock();
5416
+ if (device_may_wakeup(priv->device))
5417
+ phylink_speed_down(priv->phylink, false);
5418
+ if (priv->plat->integrated_phy_power)
5419
+ priv->plat->integrated_phy_power(priv->plat->bsp_priv,
5420
+ false);
5421
+ phylink_stop(priv->phylink);
5422
+ rtnl_unlock();
5423
+ mutex_lock(&priv->lock);
5424
+
45875425 stmmac_mac_set(priv, priv->ioaddr, false);
45885426 pinctrl_pm_select_sleep_state(priv->device);
4589
- /* Disable clock in case of PWM is off */
4590
- if (priv->plat->clk_ptp_ref)
4591
- clk_disable_unprepare(priv->plat->clk_ptp_ref);
4592
- clk_disable_unprepare(priv->plat->pclk);
4593
- clk_disable_unprepare(priv->plat->stmmac_clk);
45945427 }
45955428 mutex_unlock(&priv->lock);
45965429
4597
- priv->oldlink = false;
45985430 priv->speed = SPEED_UNKNOWN;
4599
- priv->oldduplex = DUPLEX_UNKNOWN;
46005431 return 0;
46015432 }
46025433 EXPORT_SYMBOL_GPL(stmmac_suspend);
46035434
46045435 /**
46055436 * stmmac_reset_queues_param - reset queue parameters
4606
- * @dev: device pointer
5437
+ * @priv: device pointer
46075438 */
46085439 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
46095440 {
....@@ -4624,6 +5455,8 @@
46245455 tx_q->cur_tx = 0;
46255456 tx_q->dirty_tx = 0;
46265457 tx_q->mss = 0;
5458
+
5459
+ netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
46275460 }
46285461 }
46295462
....@@ -4637,34 +5470,49 @@
46375470 {
46385471 struct net_device *ndev = dev_get_drvdata(dev);
46395472 struct stmmac_priv *priv = netdev_priv(ndev);
5473
+ int ret;
46405474
46415475 if (!netif_running(ndev))
46425476 return 0;
46435477
4644
- printk("troy test %s start .... \n",__func__);
46455478 /* Power Down bit, into the PM register, is cleared
46465479 * automatically as soon as a magic packet or a Wake-up frame
46475480 * is received. Anyway, it's better to manually clear
46485481 * this bit because it can generate problems while resuming
46495482 * from another devices (e.g. serial console).
46505483 */
4651
- if (device_may_wakeup(priv->device)) {
5484
+ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
46525485 mutex_lock(&priv->lock);
46535486 stmmac_pmt(priv, priv->hw, 0);
46545487 mutex_unlock(&priv->lock);
46555488 priv->irq_wake = 0;
46565489 } else {
46575490 pinctrl_pm_select_default_state(priv->device);
4658
- /* enable the clk previously disabled */
4659
- clk_prepare_enable(priv->plat->stmmac_clk);
4660
- clk_prepare_enable(priv->plat->pclk);
4661
- if (priv->plat->clk_ptp_ref)
4662
- clk_prepare_enable(priv->plat->clk_ptp_ref);
46635491 /* reset the phy so that it's ready */
46645492 if (priv->mii)
46655493 stmmac_mdio_reset(priv->mii);
5494
+ if (priv->plat->integrated_phy_power)
5495
+ priv->plat->integrated_phy_power(priv->plat->bsp_priv,
5496
+ true);
46665497 }
46675498
5499
+ if (priv->plat->serdes_powerup) {
5500
+ ret = priv->plat->serdes_powerup(ndev,
5501
+ priv->plat->bsp_priv);
5502
+
5503
+ if (ret < 0)
5504
+ return ret;
5505
+ }
5506
+
5507
+ if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
5508
+ rtnl_lock();
5509
+ phylink_start(priv->phylink);
5510
+ /* We may have called phylink_speed_down before */
5511
+ phylink_speed_up(priv->phylink);
5512
+ rtnl_unlock();
5513
+ }
5514
+
5515
+ rtnl_lock();
46685516 mutex_lock(&priv->lock);
46695517
46705518 stmmac_reset_queues_param(priv);
....@@ -4672,20 +5520,26 @@
46725520 stmmac_free_tx_skbufs(priv);
46735521 stmmac_clear_descriptors(priv);
46745522
5523
+#if 1
5524
+ printk("ben -------resume add 2s delay time.\n");
5525
+ mdelay(2000);
5526
+
5527
+#endif
5528
+
46755529 stmmac_hw_setup(ndev, false);
4676
- stmmac_init_tx_coalesce(priv);
5530
+ stmmac_init_coalesce(priv);
46775531 stmmac_set_rx_mode(ndev);
5532
+
5533
+ stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
46785534
46795535 stmmac_enable_all_queues(priv);
46805536
4681
- netif_device_attach(ndev);
4682
-
46835537 mutex_unlock(&priv->lock);
5538
+ rtnl_unlock();
46845539
4685
- if (ndev->phydev)
4686
- phy_start(ndev->phydev);
4687
- printk("troy test %s end .... \n",__func__);
4688
- rtl8211F_led_control(ndev->phydev);
5540
+ phylink_mac_change(priv->phylink, true);
5541
+
5542
+ netif_device_attach(ndev);
46895543
46905544 return 0;
46915545 }
....@@ -4697,7 +5551,7 @@
46975551 char *opt;
46985552
46995553 if (!str || !*str)
4700
- return -EINVAL;
5554
+ return 1;
47015555 while ((opt = strsep(&str, ",")) != NULL) {
47025556 if (!strncmp(opt, "debug:", 6)) {
47035557 if (kstrtoint(opt + 6, 0, &debug))
....@@ -4728,11 +5582,11 @@
47285582 goto err;
47295583 }
47305584 }
4731
- return 0;
5585
+ return 1;
47325586
47335587 err:
47345588 pr_err("%s: ERROR broken module parameter conversion", __func__);
4735
- return -EINVAL;
5589
+ return 1;
47365590 }
47375591
47385592 __setup("stmmaceth=", stmmac_cmdline_opt);
....@@ -4742,16 +5596,9 @@
47425596 {
47435597 #ifdef CONFIG_DEBUG_FS
47445598 /* Create debugfs main directory if it doesn't exist yet */
4745
- if (!stmmac_fs_dir) {
5599
+ if (!stmmac_fs_dir)
47465600 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4747
-
4748
- if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4749
- pr_err("ERROR %s, debugfs create directory failed\n",
4750
- STMMAC_RESOURCE_NAME);
4751
-
4752
- return -ENOMEM;
4753
- }
4754
- }
5601
+ register_netdevice_notifier(&stmmac_notifier);
47555602 #endif
47565603
47575604 return 0;
....@@ -4760,6 +5607,7 @@
47605607 static void __exit stmmac_exit(void)
47615608 {
47625609 #ifdef CONFIG_DEBUG_FS
5610
+ unregister_netdevice_notifier(&stmmac_notifier);
47635611 debugfs_remove_recursive(stmmac_fs_dir);
47645612 #endif
47655613 }