forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-04 1543e317f1da31b75942316931e8f491a8920811
kernel/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
....@@ -1,20 +1,10 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*******************************************************************************
23 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
34 ST Ethernet IPs are built around a Synopsys IP Core.
45
56 Copyright(C) 2007-2011 STMicroelectronics Ltd
67
7
- This program is free software; you can redistribute it and/or modify it
8
- under the terms and conditions of the GNU General Public License,
9
- version 2, as published by the Free Software Foundation.
10
-
11
- This program is distributed in the hope it will be useful, but WITHOUT
12
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14
- more details.
15
-
16
- The full GNU General Public License is included in this distribution in
17
- the file called "COPYING".
188
199 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
2010
....@@ -38,6 +28,7 @@
3828 #include <linux/if_vlan.h>
3929 #include <linux/dma-mapping.h>
4030 #include <linux/slab.h>
31
+#include <linux/pm_runtime.h>
4132 #include <linux/prefetch.h>
4233 #include <linux/pinctrl/consumer.h>
4334 #ifdef CONFIG_DEBUG_FS
....@@ -45,6 +36,7 @@
4536 #include <linux/seq_file.h>
4637 #endif /* CONFIG_DEBUG_FS */
4738 #include <linux/net_tstamp.h>
39
+#include <linux/phylink.h>
4840 #include <linux/udp.h>
4941 #include <net/pkt_cls.h>
5042 #include "stmmac_ptp.h"
....@@ -54,6 +46,13 @@
5446 #include "dwmac1000.h"
5547 #include "dwxgmac2.h"
5648 #include "hwif.h"
49
+
50
+/* As long as the interface is active, we keep the timestamping counter enabled
51
+ * with fine resolution and binary rollover. This avoid non-monotonic behavior
52
+ * (clock jumps) when changing timestamping settings at runtime.
53
+ */
54
+#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
55
+ PTP_TCR_TSCTRLSSR)
5756
5857 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
5958 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
....@@ -72,10 +71,10 @@
7271 module_param(phyaddr, int, 0444);
7372 MODULE_PARM_DESC(phyaddr, "Physical device address");
7473
75
-#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
76
-#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
74
+#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4)
75
+#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4)
7776
78
-static int flow_ctrl = FLOW_OFF;
77
+static int flow_ctrl = FLOW_AUTO;
7978 module_param(flow_ctrl, int, 0644);
8079 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
8180
....@@ -103,7 +102,7 @@
103102 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
104103 module_param(eee_timer, int, 0644);
105104 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
106
-#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
105
+#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
107106
108107 /* By default the driver will use the ring mode to manage tx and rx descriptors,
109108 * but allow user to force to use the chain instead of the ring
....@@ -115,11 +114,34 @@
115114 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
116115
117116 #ifdef CONFIG_DEBUG_FS
118
-static int stmmac_init_fs(struct net_device *dev);
117
+static const struct net_device_ops stmmac_netdev_ops;
118
+static void stmmac_init_fs(struct net_device *dev);
119119 static void stmmac_exit_fs(struct net_device *dev);
120120 #endif
121121
122122 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
123
+
124
+int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
125
+{
126
+ int ret = 0;
127
+
128
+ if (enabled) {
129
+ ret = clk_prepare_enable(priv->plat->stmmac_clk);
130
+ if (ret)
131
+ return ret;
132
+ ret = clk_prepare_enable(priv->plat->pclk);
133
+ if (ret) {
134
+ clk_disable_unprepare(priv->plat->stmmac_clk);
135
+ return ret;
136
+ }
137
+ } else {
138
+ clk_disable_unprepare(priv->plat->stmmac_clk);
139
+ clk_disable_unprepare(priv->plat->pclk);
140
+ }
141
+
142
+ return ret;
143
+}
144
+EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
123145
124146 /**
125147 * stmmac_verify_args - verify the driver parameters.
....@@ -156,7 +178,10 @@
156178 for (queue = 0; queue < maxq; queue++) {
157179 struct stmmac_channel *ch = &priv->channel[queue];
158180
159
- napi_disable(&ch->napi);
181
+ if (queue < rx_queues_cnt)
182
+ napi_disable(&ch->rx_napi);
183
+ if (queue < tx_queues_cnt)
184
+ napi_disable(&ch->tx_napi);
160185 }
161186 }
162187
....@@ -174,7 +199,10 @@
174199 for (queue = 0; queue < maxq; queue++) {
175200 struct stmmac_channel *ch = &priv->channel[queue];
176201
177
- napi_enable(&ch->napi);
202
+ if (queue < rx_queues_cnt)
203
+ napi_enable(&ch->rx_napi);
204
+ if (queue < tx_queues_cnt)
205
+ napi_enable(&ch->tx_napi);
178206 }
179207 }
180208
....@@ -273,7 +301,7 @@
273301 if (tx_q->dirty_tx > tx_q->cur_tx)
274302 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
275303 else
276
- avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
304
+ avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
277305
278306 return avail;
279307 }
....@@ -291,24 +319,9 @@
291319 if (rx_q->dirty_rx <= rx_q->cur_rx)
292320 dirty = rx_q->cur_rx - rx_q->dirty_rx;
293321 else
294
- dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
322
+ dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
295323
296324 return dirty;
297
-}
298
-
299
-/**
300
- * stmmac_hw_fix_mac_speed - callback for speed selection
301
- * @priv: driver private structure
302
- * Description: on some platforms (e.g. ST), some HW system configuration
303
- * registers have to be set according to the link speed negotiated.
304
- */
305
-static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
306
-{
307
- struct net_device *ndev = priv->dev;
308
- struct phy_device *phydev = ndev->phydev;
309
-
310
- if (likely(priv->plat->fix_mac_speed))
311
- priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
312325 }
313326
314327 /**
....@@ -351,7 +364,7 @@
351364
352365 /**
353366 * stmmac_eee_ctrl_timer - EEE TX SW timer.
354
- * @arg : data hook
367
+ * @t: timer_list struct containing private info
355368 * Description:
356369 * if there is no data transfer and if we are not in LPI state,
357370 * then MAC Transmitter can be moved to LPI state.
....@@ -361,7 +374,7 @@
361374 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
362375
363376 stmmac_enable_eee_mode(priv);
364
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
377
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
365378 }
366379
367380 /**
....@@ -374,67 +387,43 @@
374387 */
375388 bool stmmac_eee_init(struct stmmac_priv *priv)
376389 {
377
- struct net_device *ndev = priv->dev;
378
- int interface = priv->plat->interface;
379
- bool ret = false;
380
-
381
- if ((interface != PHY_INTERFACE_MODE_MII) &&
382
- (interface != PHY_INTERFACE_MODE_GMII) &&
383
- !phy_interface_mode_is_rgmii(interface))
384
- goto out;
390
+ int eee_tw_timer = priv->eee_tw_timer;
385391
386392 /* Using PCS we cannot dial with the phy registers at this stage
387393 * so we do not support extra feature like EEE.
388394 */
389
- if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
390
- (priv->hw->pcs == STMMAC_PCS_TBI) ||
391
- (priv->hw->pcs == STMMAC_PCS_RTBI))
392
- goto out;
395
+ if (priv->hw->pcs == STMMAC_PCS_TBI ||
396
+ priv->hw->pcs == STMMAC_PCS_RTBI)
397
+ return false;
393398
394
- /* MAC core supports the EEE feature. */
395
- if (priv->dma_cap.eee) {
396
- int tx_lpi_timer = priv->tx_lpi_timer;
399
+ /* Check if MAC core supports the EEE feature. */
400
+ if (!priv->dma_cap.eee)
401
+ return false;
397402
398
- /* Check if the PHY supports EEE */
399
- if (phy_init_eee(ndev->phydev, 1)) {
400
- /* To manage at run-time if the EEE cannot be supported
401
- * anymore (for example because the lp caps have been
402
- * changed).
403
- * In that case the driver disable own timers.
404
- */
405
- mutex_lock(&priv->lock);
406
- if (priv->eee_active) {
407
- netdev_dbg(priv->dev, "disable EEE\n");
408
- del_timer_sync(&priv->eee_ctrl_timer);
409
- stmmac_set_eee_timer(priv, priv->hw, 0,
410
- tx_lpi_timer);
411
- }
412
- priv->eee_active = 0;
413
- mutex_unlock(&priv->lock);
414
- goto out;
403
+ mutex_lock(&priv->lock);
404
+
405
+ /* Check if it needs to be deactivated */
406
+ if (!priv->eee_active) {
407
+ if (priv->eee_enabled) {
408
+ netdev_dbg(priv->dev, "disable EEE\n");
409
+ del_timer_sync(&priv->eee_ctrl_timer);
410
+ stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
415411 }
416
- /* Activate the EEE and start timers */
417
- mutex_lock(&priv->lock);
418
- if (!priv->eee_active) {
419
- priv->eee_active = 1;
420
- timer_setup(&priv->eee_ctrl_timer,
421
- stmmac_eee_ctrl_timer, 0);
422
- mod_timer(&priv->eee_ctrl_timer,
423
- STMMAC_LPI_T(eee_timer));
424
-
425
- stmmac_set_eee_timer(priv, priv->hw,
426
- STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
427
- }
428
- /* Set HW EEE according to the speed */
429
- stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
430
-
431
- ret = true;
432412 mutex_unlock(&priv->lock);
433
-
434
- netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
413
+ return false;
435414 }
436
-out:
437
- return ret;
415
+
416
+ if (priv->eee_active && !priv->eee_enabled) {
417
+ timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
418
+ stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
419
+ eee_tw_timer);
420
+ }
421
+
422
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
423
+
424
+ mutex_unlock(&priv->lock);
425
+ netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
426
+ return true;
438427 }
439428
440429 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
....@@ -449,6 +438,7 @@
449438 struct dma_desc *p, struct sk_buff *skb)
450439 {
451440 struct skb_shared_hwtstamps shhwtstamp;
441
+ bool found = false;
452442 u64 ns = 0;
453443
454444 if (!priv->hwts_tx_en)
....@@ -460,9 +450,13 @@
460450
461451 /* check tx tstamp status */
462452 if (stmmac_get_tx_timestamp_status(priv, p)) {
463
- /* get the valid tstamp */
464453 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
454
+ found = true;
455
+ } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
456
+ found = true;
457
+ }
465458
459
+ if (found) {
466460 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
467461 shhwtstamp.hwtstamp = ns_to_ktime(ns);
468462
....@@ -470,8 +464,6 @@
470464 /* pass tstamp to stack */
471465 skb_tstamp_tx(skb, &shhwtstamp);
472466 }
473
-
474
- return;
475467 }
476468
477469 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
....@@ -508,7 +500,6 @@
508500 }
509501 }
510502
511
-#ifdef CONFIG_STMMAC_PTP
512503 /**
513504 * stmmac_hwtstamp_set - control hardware timestamping.
514505 * @dev: device pointer.
....@@ -524,8 +515,6 @@
524515 {
525516 struct stmmac_priv *priv = netdev_priv(dev);
526517 struct hwtstamp_config config;
527
- struct timespec64 now;
528
- u64 temp = 0;
529518 u32 ptp_v2 = 0;
530519 u32 tstamp_all = 0;
531520 u32 ptp_over_ipv4_udp = 0;
....@@ -534,11 +523,6 @@
534523 u32 snap_type_sel = 0;
535524 u32 ts_master_en = 0;
536525 u32 ts_event_en = 0;
537
- u32 sec_inc = 0;
538
- u32 value = 0;
539
- bool xmac;
540
-
541
- xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
542526
543527 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
544528 netdev_alert(priv->dev, "No support for HW time stamping\n");
....@@ -644,7 +628,8 @@
644628 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
645629 ptp_v2 = PTP_TCR_TSVER2ENA;
646630 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
647
- ts_event_en = PTP_TCR_TSEVNTENA;
631
+ if (priv->synopsys_id < DWMAC_CORE_4_10)
632
+ ts_event_en = PTP_TCR_TSEVNTENA;
648633 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
649634 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
650635 ptp_over_ethernet = PTP_TCR_TSIPENA;
....@@ -699,41 +684,16 @@
699684 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
700685 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
701686
702
- if (!priv->hwts_tx_en && !priv->hwts_rx_en)
703
- stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
704
- else {
705
- value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
706
- tstamp_all | ptp_v2 | ptp_over_ethernet |
707
- ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
708
- ts_master_en | snap_type_sel);
709
- stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
687
+ priv->systime_flags = STMMAC_HWTS_ACTIVE;
710688
711
- /* program Sub Second Increment reg */
712
- stmmac_config_sub_second_increment(priv,
713
- priv->ptpaddr, priv->plat->clk_ptp_rate,
714
- xmac, &sec_inc);
715
- temp = div_u64(1000000000ULL, sec_inc);
716
-
717
- /* Store sub second increment and flags for later use */
718
- priv->sub_second_inc = sec_inc;
719
- priv->systime_flags = value;
720
-
721
- /* calculate default added value:
722
- * formula is :
723
- * addend = (2^32)/freq_div_ratio;
724
- * where, freq_div_ratio = 1e9ns/sec_inc
725
- */
726
- temp = (u64)(temp << 32);
727
- priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
728
- stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
729
-
730
- /* initialize system time */
731
- ktime_get_real_ts64(&now);
732
-
733
- /* lower 32 bits of tv_sec are safe until y2106 */
734
- stmmac_init_systime(priv, priv->ptpaddr,
735
- (u32)now.tv_sec, now.tv_nsec);
689
+ if (priv->hwts_tx_en || priv->hwts_rx_en) {
690
+ priv->systime_flags |= tstamp_all | ptp_v2 |
691
+ ptp_over_ethernet | ptp_over_ipv6_udp |
692
+ ptp_over_ipv4_udp | ts_event_en |
693
+ ts_master_en | snap_type_sel;
736694 }
695
+
696
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
737697
738698 memcpy(&priv->tstamp_config, &config, sizeof(config));
739699
....@@ -748,7 +708,7 @@
748708 * a proprietary structure used to pass information to the driver.
749709 * Description:
750710 * This function obtain the current hardware timestamping settings
751
- as requested.
711
+ * as requested.
752712 */
753713 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
754714 {
....@@ -761,7 +721,57 @@
761721 return copy_to_user(ifr->ifr_data, config,
762722 sizeof(*config)) ? -EFAULT : 0;
763723 }
764
-#endif /* CONFIG_STMMAC_PTP */
724
+
725
+/**
726
+ * stmmac_init_tstamp_counter - init hardware timestamping counter
727
+ * @priv: driver private structure
728
+ * @systime_flags: timestamping flags
729
+ * Description:
730
+ * Initialize hardware counter for packet timestamping.
731
+ * This is valid as long as the interface is open and not suspended.
732
+ * Will be rerun after resuming from suspend, case in which the timestamping
733
+ * flags updated by stmmac_hwtstamp_set() also need to be restored.
734
+ */
735
+int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
736
+{
737
+ bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
738
+ struct timespec64 now;
739
+ u32 sec_inc = 0;
740
+ u64 temp = 0;
741
+
742
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
743
+ return -EOPNOTSUPP;
744
+
745
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
746
+ priv->systime_flags = systime_flags;
747
+
748
+ /* program Sub Second Increment reg */
749
+ stmmac_config_sub_second_increment(priv, priv->ptpaddr,
750
+ priv->plat->clk_ptp_rate,
751
+ xmac, &sec_inc);
752
+ temp = div_u64(1000000000ULL, sec_inc);
753
+
754
+ /* Store sub second increment for later use */
755
+ priv->sub_second_inc = sec_inc;
756
+
757
+ /* calculate default added value:
758
+ * formula is :
759
+ * addend = (2^32)/freq_div_ratio;
760
+ * where, freq_div_ratio = 1e9ns/sec_inc
761
+ */
762
+ temp = (u64)(temp << 32);
763
+ priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
764
+ stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
765
+
766
+ /* initialize system time */
767
+ ktime_get_real_ts64(&now);
768
+
769
+ /* lower 32 bits of tv_sec are safe until y2106 */
770
+ stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
771
+
772
+ return 0;
773
+}
774
+EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
765775
766776 /**
767777 * stmmac_init_ptp - init PTP
....@@ -773,9 +783,11 @@
773783 static int stmmac_init_ptp(struct stmmac_priv *priv)
774784 {
775785 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
786
+ int ret;
776787
777
- if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
778
- return -EOPNOTSUPP;
788
+ ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
789
+ if (ret)
790
+ return ret;
779791
780792 priv->adv_ts = 0;
781793 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
....@@ -795,121 +807,274 @@
795807 priv->hwts_tx_en = 0;
796808 priv->hwts_rx_en = 0;
797809
798
- stmmac_ptp_register(priv);
799
-
800810 return 0;
801811 }
802812
803813 static void stmmac_release_ptp(struct stmmac_priv *priv)
804814 {
805
- if (priv->plat->clk_ptp_ref && IS_ENABLED(CONFIG_STMMAC_PTP))
806
- clk_disable_unprepare(priv->plat->clk_ptp_ref);
815
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
807816 stmmac_ptp_unregister(priv);
808817 }
809818
810819 /**
811820 * stmmac_mac_flow_ctrl - Configure flow control in all queues
812821 * @priv: driver private structure
822
+ * @duplex: duplex passed to the next function
813823 * Description: It is used for configuring the flow control in all queues
814824 */
815825 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
816826 {
817827 u32 tx_cnt = priv->plat->tx_queues_to_use;
818828
819
- stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
820
- priv->pause, tx_cnt);
829
+ stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl & priv->plat->flow_ctrl,
830
+ priv->pause, tx_cnt);
821831 }
822832
823
-/**
824
- * stmmac_adjust_link - adjusts the link parameters
825
- * @dev: net device structure
826
- * Description: this is the helper called by the physical abstraction layer
827
- * drivers to communicate the phy link status. According the speed and duplex
828
- * this driver can invoke registered glue-logic as well.
829
- * It also invoke the eee initialization because it could happen when switch
830
- * on different networks (that are eee capable).
831
- */
832
-static void stmmac_adjust_link(struct net_device *dev)
833
+static void stmmac_validate(struct phylink_config *config,
834
+ unsigned long *supported,
835
+ struct phylink_link_state *state)
833836 {
834
- struct stmmac_priv *priv = netdev_priv(dev);
835
- struct phy_device *phydev = dev->phydev;
836
- bool new_state = false;
837
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
838
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
839
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
840
+ int tx_cnt = priv->plat->tx_queues_to_use;
841
+ int max_speed = priv->plat->max_speed;
837842
838
- if (!phydev)
839
- return;
843
+ phylink_set(mac_supported, 10baseT_Half);
844
+ phylink_set(mac_supported, 10baseT_Full);
845
+ phylink_set(mac_supported, 100baseT_Half);
846
+ phylink_set(mac_supported, 100baseT_Full);
847
+ phylink_set(mac_supported, 1000baseT_Half);
848
+ phylink_set(mac_supported, 1000baseT_Full);
849
+ phylink_set(mac_supported, 1000baseKX_Full);
850
+ phylink_set(mac_supported, 100baseT1_Full);
851
+ phylink_set(mac_supported, 1000baseT1_Full);
840852
841
- mutex_lock(&priv->lock);
853
+ phylink_set(mac_supported, Autoneg);
854
+ phylink_set(mac_supported, Pause);
855
+ phylink_set(mac_supported, Asym_Pause);
856
+ phylink_set_port_modes(mac_supported);
842857
843
- if (phydev->link) {
844
- u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
845
-
846
- /* Now we make sure that we can be in full duplex mode.
847
- * If not, we operate in half-duplex mode. */
848
- if (phydev->duplex != priv->oldduplex) {
849
- new_state = true;
850
- if (!phydev->duplex)
851
- ctrl &= ~priv->hw->link.duplex;
852
- else
853
- ctrl |= priv->hw->link.duplex;
854
- priv->oldduplex = phydev->duplex;
858
+ /* Cut down 1G if asked to */
859
+ if ((max_speed > 0) && (max_speed < 1000)) {
860
+ phylink_set(mask, 1000baseT_Full);
861
+ phylink_set(mask, 1000baseX_Full);
862
+ } else if (priv->plat->has_xgmac) {
863
+ if (!max_speed || (max_speed >= 2500)) {
864
+ phylink_set(mac_supported, 2500baseT_Full);
865
+ phylink_set(mac_supported, 2500baseX_Full);
855866 }
856
- /* Flow Control operation */
857
- if (phydev->pause)
858
- stmmac_mac_flow_ctrl(priv, phydev->duplex);
859
-
860
- if (phydev->speed != priv->speed) {
861
- new_state = true;
862
- ctrl &= ~priv->hw->link.speed_mask;
863
- switch (phydev->speed) {
864
- case SPEED_1000:
865
- ctrl |= priv->hw->link.speed1000;
866
- break;
867
- case SPEED_100:
868
- ctrl |= priv->hw->link.speed100;
869
- break;
870
- case SPEED_10:
871
- ctrl |= priv->hw->link.speed10;
872
- break;
873
- default:
874
- netif_warn(priv, link, priv->dev,
875
- "broken speed: %d\n", phydev->speed);
876
- phydev->speed = SPEED_UNKNOWN;
877
- break;
878
- }
879
- if (phydev->speed != SPEED_UNKNOWN)
880
- stmmac_hw_fix_mac_speed(priv);
881
- priv->speed = phydev->speed;
867
+ if (!max_speed || (max_speed >= 5000)) {
868
+ phylink_set(mac_supported, 5000baseT_Full);
882869 }
883
-
884
- writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
885
-
886
- if (!priv->oldlink) {
887
- new_state = true;
888
- priv->oldlink = true;
870
+ if (!max_speed || (max_speed >= 10000)) {
871
+ phylink_set(mac_supported, 10000baseSR_Full);
872
+ phylink_set(mac_supported, 10000baseLR_Full);
873
+ phylink_set(mac_supported, 10000baseER_Full);
874
+ phylink_set(mac_supported, 10000baseLRM_Full);
875
+ phylink_set(mac_supported, 10000baseT_Full);
876
+ phylink_set(mac_supported, 10000baseKX4_Full);
877
+ phylink_set(mac_supported, 10000baseKR_Full);
889878 }
890
- } else if (priv->oldlink) {
891
- new_state = true;
892
- priv->oldlink = false;
893
- priv->speed = SPEED_UNKNOWN;
894
- priv->oldduplex = DUPLEX_UNKNOWN;
879
+ if (!max_speed || (max_speed >= 25000)) {
880
+ phylink_set(mac_supported, 25000baseCR_Full);
881
+ phylink_set(mac_supported, 25000baseKR_Full);
882
+ phylink_set(mac_supported, 25000baseSR_Full);
883
+ }
884
+ if (!max_speed || (max_speed >= 40000)) {
885
+ phylink_set(mac_supported, 40000baseKR4_Full);
886
+ phylink_set(mac_supported, 40000baseCR4_Full);
887
+ phylink_set(mac_supported, 40000baseSR4_Full);
888
+ phylink_set(mac_supported, 40000baseLR4_Full);
889
+ }
890
+ if (!max_speed || (max_speed >= 50000)) {
891
+ phylink_set(mac_supported, 50000baseCR2_Full);
892
+ phylink_set(mac_supported, 50000baseKR2_Full);
893
+ phylink_set(mac_supported, 50000baseSR2_Full);
894
+ phylink_set(mac_supported, 50000baseKR_Full);
895
+ phylink_set(mac_supported, 50000baseSR_Full);
896
+ phylink_set(mac_supported, 50000baseCR_Full);
897
+ phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
898
+ phylink_set(mac_supported, 50000baseDR_Full);
899
+ }
900
+ if (!max_speed || (max_speed >= 100000)) {
901
+ phylink_set(mac_supported, 100000baseKR4_Full);
902
+ phylink_set(mac_supported, 100000baseSR4_Full);
903
+ phylink_set(mac_supported, 100000baseCR4_Full);
904
+ phylink_set(mac_supported, 100000baseLR4_ER4_Full);
905
+ phylink_set(mac_supported, 100000baseKR2_Full);
906
+ phylink_set(mac_supported, 100000baseSR2_Full);
907
+ phylink_set(mac_supported, 100000baseCR2_Full);
908
+ phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
909
+ phylink_set(mac_supported, 100000baseDR2_Full);
910
+ }
895911 }
896912
897
- if (new_state && netif_msg_link(priv))
898
- phy_print_status(phydev);
913
+ /* Half-Duplex can only work with single queue */
914
+ if (tx_cnt > 1) {
915
+ phylink_set(mask, 10baseT_Half);
916
+ phylink_set(mask, 100baseT_Half);
917
+ phylink_set(mask, 1000baseT_Half);
918
+ }
899919
900
- mutex_unlock(&priv->lock);
920
+ linkmode_and(supported, supported, mac_supported);
921
+ linkmode_andnot(supported, supported, mask);
901922
902
- if (phydev->is_pseudo_fixed_link)
903
- /* Stop PHY layer to call the hook to adjust the link in case
904
- * of a switch is attached to the stmmac driver.
905
- */
906
- phydev->irq = PHY_IGNORE_INTERRUPT;
907
- else
908
- /* At this stage, init the EEE if supported.
909
- * Never called in case of fixed_link.
910
- */
911
- priv->eee_enabled = stmmac_eee_init(priv);
923
+ linkmode_and(state->advertising, state->advertising, mac_supported);
924
+ linkmode_andnot(state->advertising, state->advertising, mask);
925
+
926
+ /* If PCS is supported, check which modes it supports. */
927
+ stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state);
912928 }
929
+
930
+static void stmmac_mac_pcs_get_state(struct phylink_config *config,
931
+ struct phylink_link_state *state)
932
+{
933
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
934
+
935
+ state->link = 0;
936
+ stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state);
937
+}
938
+
939
+static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
940
+ const struct phylink_link_state *state)
941
+{
942
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943
+
944
+ stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state);
945
+}
946
+
947
+static void stmmac_mac_an_restart(struct phylink_config *config)
948
+{
949
+ /* Not Supported */
950
+}
951
+
952
+static void stmmac_mac_link_down(struct phylink_config *config,
953
+ unsigned int mode, phy_interface_t interface)
954
+{
955
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
956
+
957
+ stmmac_mac_set(priv, priv->ioaddr, false);
958
+ priv->eee_active = false;
959
+ priv->tx_lpi_enabled = false;
960
+ stmmac_eee_init(priv);
961
+ stmmac_set_eee_pls(priv, priv->hw, false);
962
+}
963
+
964
+static void stmmac_mac_link_up(struct phylink_config *config,
965
+ struct phy_device *phy,
966
+ unsigned int mode, phy_interface_t interface,
967
+ int speed, int duplex,
968
+ bool tx_pause, bool rx_pause)
969
+{
970
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
971
+ u32 ctrl;
972
+
973
+ stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface);
974
+
975
+ ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
976
+ ctrl &= ~priv->hw->link.speed_mask;
977
+
978
+ if (interface == PHY_INTERFACE_MODE_USXGMII) {
979
+ switch (speed) {
980
+ case SPEED_10000:
981
+ ctrl |= priv->hw->link.xgmii.speed10000;
982
+ break;
983
+ case SPEED_5000:
984
+ ctrl |= priv->hw->link.xgmii.speed5000;
985
+ break;
986
+ case SPEED_2500:
987
+ ctrl |= priv->hw->link.xgmii.speed2500;
988
+ break;
989
+ default:
990
+ return;
991
+ }
992
+ } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
993
+ switch (speed) {
994
+ case SPEED_100000:
995
+ ctrl |= priv->hw->link.xlgmii.speed100000;
996
+ break;
997
+ case SPEED_50000:
998
+ ctrl |= priv->hw->link.xlgmii.speed50000;
999
+ break;
1000
+ case SPEED_40000:
1001
+ ctrl |= priv->hw->link.xlgmii.speed40000;
1002
+ break;
1003
+ case SPEED_25000:
1004
+ ctrl |= priv->hw->link.xlgmii.speed25000;
1005
+ break;
1006
+ case SPEED_10000:
1007
+ ctrl |= priv->hw->link.xgmii.speed10000;
1008
+ break;
1009
+ case SPEED_2500:
1010
+ ctrl |= priv->hw->link.speed2500;
1011
+ break;
1012
+ case SPEED_1000:
1013
+ ctrl |= priv->hw->link.speed1000;
1014
+ break;
1015
+ default:
1016
+ return;
1017
+ }
1018
+ } else {
1019
+ switch (speed) {
1020
+ case SPEED_2500:
1021
+ ctrl |= priv->hw->link.speed2500;
1022
+ break;
1023
+ case SPEED_1000:
1024
+ ctrl |= priv->hw->link.speed1000;
1025
+ break;
1026
+ case SPEED_100:
1027
+ ctrl |= priv->hw->link.speed100;
1028
+ break;
1029
+ case SPEED_10:
1030
+ ctrl |= priv->hw->link.speed10;
1031
+ break;
1032
+ default:
1033
+ return;
1034
+ }
1035
+ }
1036
+
1037
+ priv->speed = speed;
1038
+
1039
+ if (priv->plat->fix_mac_speed)
1040
+ priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1041
+
1042
+ if (!duplex)
1043
+ ctrl &= ~priv->hw->link.duplex;
1044
+ else
1045
+ ctrl |= priv->hw->link.duplex;
1046
+
1047
+ /* Flow Control operation */
1048
+ if (rx_pause && tx_pause)
1049
+ priv->flow_ctrl = FLOW_AUTO;
1050
+ else if (rx_pause && !tx_pause)
1051
+ priv->flow_ctrl = FLOW_RX;
1052
+ else if (!rx_pause && tx_pause)
1053
+ priv->flow_ctrl = FLOW_TX;
1054
+ else
1055
+ priv->flow_ctrl = FLOW_OFF;
1056
+
1057
+ stmmac_mac_flow_ctrl(priv, duplex);
1058
+
1059
+ writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1060
+
1061
+ stmmac_mac_set(priv, priv->ioaddr, true);
1062
+ if (phy && priv->dma_cap.eee) {
1063
+ priv->eee_active = phy_init_eee(phy, 1) >= 0;
1064
+ priv->eee_enabled = stmmac_eee_init(priv);
1065
+ priv->tx_lpi_enabled = priv->eee_enabled;
1066
+ stmmac_set_eee_pls(priv, priv->hw, true);
1067
+ }
1068
+}
1069
+
1070
+static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1071
+ .validate = stmmac_validate,
1072
+ .mac_pcs_get_state = stmmac_mac_pcs_get_state,
1073
+ .mac_config = stmmac_mac_config,
1074
+ .mac_an_restart = stmmac_mac_an_restart,
1075
+ .mac_link_down = stmmac_mac_link_down,
1076
+ .mac_link_up = stmmac_mac_link_up,
1077
+};
9131078
9141079 /**
9151080 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
....@@ -936,6 +1101,23 @@
9361101 }
9371102 }
9381103
1104
+static void rtl8211F_led_control(struct phy_device *phydev)
1105
+{
1106
+ printk("ben debug:rtl8211F_led_control...1 \n");
1107
+
1108
+ if(!phydev) return;
1109
+ if(phydev->phy_id!=0x001cc916) return; /* only for 8211E*/
1110
+
1111
+ /*switch to extension page44*/
1112
+ phy_write(phydev, 31, 0x0d04);
1113
+//add hc 1000M --> orange
1114
+// 100M --> green
1115
+ phy_write(phydev, 16, 0x6D02);
1116
+//add hc 1000M&100M --> green
1117
+// phy_write(phydev, 16, 0x6C0A);
1118
+ printk("ben debug:rtl8211F_led_control...2 \n");
1119
+}
1120
+
9391121 /**
9401122 * stmmac_init_phy - PHY initialization
9411123 * @dev: net device structure
....@@ -947,85 +1129,69 @@
9471129 static int stmmac_init_phy(struct net_device *dev)
9481130 {
9491131 struct stmmac_priv *priv = netdev_priv(dev);
950
- u32 tx_cnt = priv->plat->tx_queues_to_use;
951
- struct phy_device *phydev;
952
- char phy_id_fmt[MII_BUS_ID_SIZE + 3];
953
- char bus_id[MII_BUS_ID_SIZE];
954
- int interface = priv->plat->interface;
955
- int max_speed = priv->plat->max_speed;
956
- priv->oldlink = false;
957
- priv->speed = SPEED_UNKNOWN;
958
- priv->oldduplex = DUPLEX_UNKNOWN;
1132
+ struct device_node *node;
1133
+ int ret;
9591134
9601135 if (priv->plat->integrated_phy_power)
961
- priv->plat->integrated_phy_power(priv->plat->bsp_priv, true);
1136
+ ret = priv->plat->integrated_phy_power(priv->plat->bsp_priv, true);
9621137
963
- if (priv->plat->phy_node) {
964
- phydev = of_phy_connect(dev, priv->plat->phy_node,
965
- &stmmac_adjust_link, 0, interface);
966
- } else {
967
- snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
968
- priv->plat->bus_id);
1138
+ node = priv->plat->phylink_node;
9691139
970
- snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
971
- priv->plat->phy_addr);
972
- netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
973
- phy_id_fmt);
1140
+ if (node)
1141
+ ret = phylink_of_phy_connect(priv->phylink, node, 0);
9741142
975
- phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
976
- interface);
977
- }
1143
+ /* Some DT bindings do not set-up the PHY handle. Let's try to
1144
+ * manually parse it
1145
+ */
1146
+ if (!node || ret) {
1147
+ int addr = priv->plat->phy_addr;
1148
+ struct phy_device *phydev;
9781149
979
- if (IS_ERR_OR_NULL(phydev)) {
980
- netdev_err(priv->dev, "Could not attach to PHY\n");
981
- if (!phydev)
1150
+ phydev = mdiobus_get_phy(priv->mii, addr);
1151
+ if (!phydev) {
1152
+ netdev_err(priv->dev, "no phy at addr %d\n", addr);
9821153 return -ENODEV;
1154
+ }
9831155
984
- return PTR_ERR(phydev);
1156
+ rtl8211F_led_control(phydev);
1157
+ ret = phylink_connect_phy(priv->phylink, phydev);
9851158 }
9861159
987
- /* Stop Advertising 1000BASE Capability if interface is not GMII */
988
- if ((interface == PHY_INTERFACE_MODE_MII) ||
989
- (interface == PHY_INTERFACE_MODE_RMII) ||
990
- (max_speed < 1000 && max_speed > 0))
991
- phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
992
- SUPPORTED_1000baseT_Full);
1160
+ if (!priv->plat->pmt) {
1161
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
9931162
994
- /*
995
- * Half-duplex mode not supported with multiqueue
996
- * half-duplex can only works with single queue
997
- */
998
- if (tx_cnt > 1)
999
- phydev->supported &= ~(SUPPORTED_1000baseT_Half |
1000
- SUPPORTED_100baseT_Half |
1001
- SUPPORTED_10baseT_Half);
1002
-
1003
- /*
1004
- * Broken HW is sometimes missing the pull-up resistor on the
1005
- * MDIO line, which results in reads to non-existent devices returning
1006
- * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
1007
- * device as well.
1008
- * Note: phydev->phy_id is the result of reading the UID PHY registers.
1009
- */
1010
- if (!priv->plat->phy_node && phydev->phy_id == 0) {
1011
- phy_disconnect(phydev);
1012
- return -ENODEV;
1163
+ phylink_ethtool_get_wol(priv->phylink, &wol);
1164
+ device_set_wakeup_capable(priv->device, !!wol.supported);
10131165 }
1166
+ return ret;
1167
+}
10141168
1015
- /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
1016
- * subsequent PHY polling, make sure we force a link transition if
1017
- * we have a UP/DOWN/UP transition
1018
- */
1019
- if (phydev->is_pseudo_fixed_link)
1020
- phydev->irq = PHY_POLL;
1169
+static int stmmac_phy_setup(struct stmmac_priv *priv)
1170
+{
1171
+ struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1172
+ int mode = priv->plat->phy_interface;
1173
+ struct phylink *phylink;
10211174
1022
- phy_attached_info(phydev);
1175
+ priv->phylink_config.dev = &priv->dev->dev;
1176
+ priv->phylink_config.type = PHYLINK_NETDEV;
1177
+ priv->phylink_config.pcs_poll = true;
1178
+
1179
+ if (!fwnode)
1180
+ fwnode = dev_fwnode(priv->device);
1181
+
1182
+ phylink = phylink_create(&priv->phylink_config, fwnode,
1183
+ mode, &stmmac_phylink_mac_ops);
1184
+ if (IS_ERR(phylink))
1185
+ return PTR_ERR(phylink);
1186
+
1187
+ priv->phylink = phylink;
10231188 return 0;
10241189 }
10251190
10261191 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
10271192 {
10281193 u32 rx_cnt = priv->plat->rx_queues_to_use;
1194
+ unsigned int desc_size;
10291195 void *head_rx;
10301196 u32 queue;
10311197
....@@ -1035,19 +1201,24 @@
10351201
10361202 pr_info("\tRX Queue %u rings\n", queue);
10371203
1038
- if (priv->extend_desc)
1204
+ if (priv->extend_desc) {
10391205 head_rx = (void *)rx_q->dma_erx;
1040
- else
1206
+ desc_size = sizeof(struct dma_extended_desc);
1207
+ } else {
10411208 head_rx = (void *)rx_q->dma_rx;
1209
+ desc_size = sizeof(struct dma_desc);
1210
+ }
10421211
10431212 /* Display RX ring */
1044
- stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1213
+ stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
1214
+ rx_q->dma_rx_phy, desc_size);
10451215 }
10461216 }
10471217
10481218 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
10491219 {
10501220 u32 tx_cnt = priv->plat->tx_queues_to_use;
1221
+ unsigned int desc_size;
10511222 void *head_tx;
10521223 u32 queue;
10531224
....@@ -1057,12 +1228,19 @@
10571228
10581229 pr_info("\tTX Queue %d rings\n", queue);
10591230
1060
- if (priv->extend_desc)
1231
+ if (priv->extend_desc) {
10611232 head_tx = (void *)tx_q->dma_etx;
1062
- else
1233
+ desc_size = sizeof(struct dma_extended_desc);
1234
+ } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1235
+ head_tx = (void *)tx_q->dma_entx;
1236
+ desc_size = sizeof(struct dma_edesc);
1237
+ } else {
10631238 head_tx = (void *)tx_q->dma_tx;
1239
+ desc_size = sizeof(struct dma_desc);
1240
+ }
10641241
1065
- stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1242
+ stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
1243
+ tx_q->dma_tx_phy, desc_size);
10661244 }
10671245 }
10681246
....@@ -1106,16 +1284,16 @@
11061284 int i;
11071285
11081286 /* Clear the RX descriptors */
1109
- for (i = 0; i < DMA_RX_SIZE; i++)
1287
+ for (i = 0; i < priv->dma_rx_size; i++)
11101288 if (priv->extend_desc)
11111289 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
11121290 priv->use_riwt, priv->mode,
1113
- (i == DMA_RX_SIZE - 1),
1291
+ (i == priv->dma_rx_size - 1),
11141292 priv->dma_buf_sz);
11151293 else
11161294 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
11171295 priv->use_riwt, priv->mode,
1118
- (i == DMA_RX_SIZE - 1),
1296
+ (i == priv->dma_rx_size - 1),
11191297 priv->dma_buf_sz);
11201298 }
11211299
....@@ -1132,13 +1310,19 @@
11321310 int i;
11331311
11341312 /* Clear the TX descriptors */
1135
- for (i = 0; i < DMA_TX_SIZE; i++)
1313
+ for (i = 0; i < priv->dma_tx_size; i++) {
1314
+ int last = (i == (priv->dma_tx_size - 1));
1315
+ struct dma_desc *p;
1316
+
11361317 if (priv->extend_desc)
1137
- stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1138
- priv->mode, (i == DMA_TX_SIZE - 1));
1318
+ p = &tx_q->dma_etx[i].basic;
1319
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1320
+ p = &tx_q->dma_entx[i].basic;
11391321 else
1140
- stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1141
- priv->mode, (i == DMA_TX_SIZE - 1));
1322
+ p = &tx_q->dma_tx[i];
1323
+
1324
+ stmmac_init_tx_desc(priv, p, priv->mode, last);
1325
+ }
11421326 }
11431327
11441328 /**
....@@ -1176,26 +1360,30 @@
11761360 int i, gfp_t flags, u32 queue)
11771361 {
11781362 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1179
- struct sk_buff *skb;
1363
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1364
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
11801365
1181
- skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1182
- if (!skb) {
1183
- netdev_err(priv->dev,
1184
- "%s: Rx init fails; skb is NULL\n", __func__);
1366
+ if (priv->dma_cap.addr64 <= 32)
1367
+ gfp |= GFP_DMA32;
1368
+
1369
+ buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1370
+ if (!buf->page)
11851371 return -ENOMEM;
1186
- }
1187
- rx_q->rx_skbuff[i] = skb;
1188
- rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1189
- priv->dma_buf_sz,
1190
- DMA_FROM_DEVICE);
1191
- if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1192
- netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1193
- dev_kfree_skb_any(skb);
1194
- return -EINVAL;
1372
+
1373
+ if (priv->sph) {
1374
+ buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1375
+ if (!buf->sec_page)
1376
+ return -ENOMEM;
1377
+
1378
+ buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1379
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1380
+ } else {
1381
+ buf->sec_page = NULL;
1382
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
11951383 }
11961384
1197
- stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1198
-
1385
+ buf->addr = page_pool_get_dma_addr(buf->page);
1386
+ stmmac_set_desc_addr(priv, p, buf->addr);
11991387 if (priv->dma_buf_sz == BUF_SIZE_16KiB)
12001388 stmmac_init_desc3(priv, p);
12011389
....@@ -1211,13 +1399,15 @@
12111399 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
12121400 {
12131401 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1402
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
12141403
1215
- if (rx_q->rx_skbuff[i]) {
1216
- dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1217
- priv->dma_buf_sz, DMA_FROM_DEVICE);
1218
- dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1219
- }
1220
- rx_q->rx_skbuff[i] = NULL;
1404
+ if (buf->page)
1405
+ page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1406
+ buf->page = NULL;
1407
+
1408
+ if (buf->sec_page)
1409
+ page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1410
+ buf->sec_page = NULL;
12211411 }
12221412
12231413 /**
....@@ -1264,18 +1454,8 @@
12641454 struct stmmac_priv *priv = netdev_priv(dev);
12651455 u32 rx_count = priv->plat->rx_queues_to_use;
12661456 int ret = -ENOMEM;
1267
- int bfsize = 0;
12681457 int queue;
12691458 int i;
1270
-
1271
- bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1272
- if (bfsize < 0)
1273
- bfsize = 0;
1274
-
1275
- if (bfsize < BUF_SIZE_16KiB)
1276
- bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1277
-
1278
- priv->dma_buf_sz = bfsize;
12791459
12801460 /* RX INITIALIZATION */
12811461 netif_dbg(priv, probe, priv->dev,
....@@ -1288,7 +1468,9 @@
12881468 "(%s) dma_rx_phy=0x%08x\n", __func__,
12891469 (u32)rx_q->dma_rx_phy);
12901470
1291
- for (i = 0; i < DMA_RX_SIZE; i++) {
1471
+ stmmac_clear_rx_descriptors(priv, queue);
1472
+
1473
+ for (i = 0; i < priv->dma_rx_size; i++) {
12921474 struct dma_desc *p;
12931475
12941476 if (priv->extend_desc)
....@@ -1300,29 +1482,23 @@
13001482 queue);
13011483 if (ret)
13021484 goto err_init_rx_buffers;
1303
-
1304
- netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1305
- rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1306
- (unsigned int)rx_q->rx_skbuff_dma[i]);
13071485 }
13081486
13091487 rx_q->cur_rx = 0;
1310
- rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1311
-
1312
- stmmac_clear_rx_descriptors(priv, queue);
1488
+ rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size);
13131489
13141490 /* Setup the chained descriptor addresses */
13151491 if (priv->mode == STMMAC_CHAIN_MODE) {
13161492 if (priv->extend_desc)
13171493 stmmac_mode_init(priv, rx_q->dma_erx,
1318
- rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1494
+ rx_q->dma_rx_phy,
1495
+ priv->dma_rx_size, 1);
13191496 else
13201497 stmmac_mode_init(priv, rx_q->dma_rx,
1321
- rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1498
+ rx_q->dma_rx_phy,
1499
+ priv->dma_rx_size, 0);
13221500 }
13231501 }
1324
-
1325
- buf_sz = bfsize;
13261502
13271503 return 0;
13281504
....@@ -1334,7 +1510,7 @@
13341510 if (queue == 0)
13351511 break;
13361512
1337
- i = DMA_RX_SIZE;
1513
+ i = priv->dma_rx_size;
13381514 queue--;
13391515 }
13401516
....@@ -1366,16 +1542,20 @@
13661542 if (priv->mode == STMMAC_CHAIN_MODE) {
13671543 if (priv->extend_desc)
13681544 stmmac_mode_init(priv, tx_q->dma_etx,
1369
- tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1370
- else
1545
+ tx_q->dma_tx_phy,
1546
+ priv->dma_tx_size, 1);
1547
+ else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
13711548 stmmac_mode_init(priv, tx_q->dma_tx,
1372
- tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1549
+ tx_q->dma_tx_phy,
1550
+ priv->dma_tx_size, 0);
13731551 }
13741552
1375
- for (i = 0; i < DMA_TX_SIZE; i++) {
1553
+ for (i = 0; i < priv->dma_tx_size; i++) {
13761554 struct dma_desc *p;
13771555 if (priv->extend_desc)
13781556 p = &((tx_q->dma_etx + i)->basic);
1557
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1558
+ p = &((tx_q->dma_entx + i)->basic);
13791559 else
13801560 p = tx_q->dma_tx + i;
13811561
....@@ -1434,7 +1614,7 @@
14341614 {
14351615 int i;
14361616
1437
- for (i = 0; i < DMA_RX_SIZE; i++)
1617
+ for (i = 0; i < priv->dma_rx_size; i++)
14381618 stmmac_free_rx_buffer(priv, queue, i);
14391619 }
14401620
....@@ -1447,7 +1627,7 @@
14471627 {
14481628 int i;
14491629
1450
- for (i = 0; i < DMA_TX_SIZE; i++)
1630
+ for (i = 0; i < priv->dma_tx_size; i++)
14511631 stmmac_free_tx_buffer(priv, queue, i);
14521632 }
14531633
....@@ -1482,16 +1662,17 @@
14821662
14831663 /* Free DMA regions of consistent memory previously allocated */
14841664 if (!priv->extend_desc)
1485
- dma_free_coherent(priv->device,
1486
- DMA_RX_SIZE * sizeof(struct dma_desc),
1665
+ dma_free_coherent(priv->device, priv->dma_rx_size *
1666
+ sizeof(struct dma_desc),
14871667 rx_q->dma_rx, rx_q->dma_rx_phy);
14881668 else
1489
- dma_free_coherent(priv->device, DMA_RX_SIZE *
1669
+ dma_free_coherent(priv->device, priv->dma_rx_size *
14901670 sizeof(struct dma_extended_desc),
14911671 rx_q->dma_erx, rx_q->dma_rx_phy);
14921672
1493
- kfree(rx_q->rx_skbuff_dma);
1494
- kfree(rx_q->rx_skbuff);
1673
+ kfree(rx_q->buf_pool);
1674
+ if (rx_q->page_pool)
1675
+ page_pool_destroy(rx_q->page_pool);
14951676 }
14961677 }
14971678
....@@ -1507,19 +1688,26 @@
15071688 /* Free TX queue resources */
15081689 for (queue = 0; queue < tx_count; queue++) {
15091690 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1691
+ size_t size;
1692
+ void *addr;
15101693
15111694 /* Release the DMA TX socket buffers */
15121695 dma_free_tx_skbufs(priv, queue);
15131696
1514
- /* Free DMA regions of consistent memory previously allocated */
1515
- if (!priv->extend_desc)
1516
- dma_free_coherent(priv->device,
1517
- DMA_TX_SIZE * sizeof(struct dma_desc),
1518
- tx_q->dma_tx, tx_q->dma_tx_phy);
1519
- else
1520
- dma_free_coherent(priv->device, DMA_TX_SIZE *
1521
- sizeof(struct dma_extended_desc),
1522
- tx_q->dma_etx, tx_q->dma_tx_phy);
1697
+ if (priv->extend_desc) {
1698
+ size = sizeof(struct dma_extended_desc);
1699
+ addr = tx_q->dma_etx;
1700
+ } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1701
+ size = sizeof(struct dma_edesc);
1702
+ addr = tx_q->dma_entx;
1703
+ } else {
1704
+ size = sizeof(struct dma_desc);
1705
+ addr = tx_q->dma_tx;
1706
+ }
1707
+
1708
+ size *= priv->dma_tx_size;
1709
+
1710
+ dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
15231711
15241712 kfree(tx_q->tx_skbuff_dma);
15251713 kfree(tx_q->tx_skbuff);
....@@ -1543,39 +1731,49 @@
15431731 /* RX queues buffers and DMA */
15441732 for (queue = 0; queue < rx_count; queue++) {
15451733 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1734
+ struct page_pool_params pp_params = { 0 };
1735
+ unsigned int num_pages;
15461736
15471737 rx_q->queue_index = queue;
15481738 rx_q->priv_data = priv;
15491739
1550
- rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1551
- sizeof(dma_addr_t),
1552
- GFP_KERNEL);
1553
- if (!rx_q->rx_skbuff_dma)
1554
- goto err_dma;
1740
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
1741
+ pp_params.pool_size = priv->dma_rx_size;
1742
+ num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
1743
+ pp_params.order = ilog2(num_pages);
1744
+ pp_params.nid = dev_to_node(priv->device);
1745
+ pp_params.dev = priv->device;
1746
+ pp_params.dma_dir = DMA_FROM_DEVICE;
1747
+ pp_params.max_len = num_pages * PAGE_SIZE;
15551748
1556
- rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1557
- sizeof(struct sk_buff *),
1558
- GFP_KERNEL);
1559
- if (!rx_q->rx_skbuff)
1749
+ rx_q->page_pool = page_pool_create(&pp_params);
1750
+ if (IS_ERR(rx_q->page_pool)) {
1751
+ ret = PTR_ERR(rx_q->page_pool);
1752
+ rx_q->page_pool = NULL;
1753
+ goto err_dma;
1754
+ }
1755
+
1756
+ rx_q->buf_pool = kcalloc(priv->dma_rx_size,
1757
+ sizeof(*rx_q->buf_pool),
1758
+ GFP_KERNEL);
1759
+ if (!rx_q->buf_pool)
15601760 goto err_dma;
15611761
15621762 if (priv->extend_desc) {
1563
- rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1564
- DMA_RX_SIZE *
1565
- sizeof(struct
1566
- dma_extended_desc),
1567
- &rx_q->dma_rx_phy,
1568
- GFP_KERNEL);
1763
+ rx_q->dma_erx = dma_alloc_coherent(priv->device,
1764
+ priv->dma_rx_size *
1765
+ sizeof(struct dma_extended_desc),
1766
+ &rx_q->dma_rx_phy,
1767
+ GFP_KERNEL);
15691768 if (!rx_q->dma_erx)
15701769 goto err_dma;
15711770
15721771 } else {
1573
- rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1574
- DMA_RX_SIZE *
1575
- sizeof(struct
1576
- dma_desc),
1577
- &rx_q->dma_rx_phy,
1578
- GFP_KERNEL);
1772
+ rx_q->dma_rx = dma_alloc_coherent(priv->device,
1773
+ priv->dma_rx_size *
1774
+ sizeof(struct dma_desc),
1775
+ &rx_q->dma_rx_phy,
1776
+ GFP_KERNEL);
15791777 if (!rx_q->dma_rx)
15801778 goto err_dma;
15811779 }
....@@ -1606,48 +1804,50 @@
16061804 /* TX queues buffers and DMA */
16071805 for (queue = 0; queue < tx_count; queue++) {
16081806 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1807
+ size_t size;
1808
+ void *addr;
16091809
16101810 tx_q->queue_index = queue;
16111811 tx_q->priv_data = priv;
16121812
1613
- tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1614
- sizeof(*tx_q->tx_skbuff_dma),
1615
- GFP_KERNEL);
1813
+ tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
1814
+ sizeof(*tx_q->tx_skbuff_dma),
1815
+ GFP_KERNEL);
16161816 if (!tx_q->tx_skbuff_dma)
16171817 goto err_dma;
16181818
1619
- tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1620
- sizeof(struct sk_buff *),
1621
- GFP_KERNEL);
1819
+ tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
1820
+ sizeof(struct sk_buff *),
1821
+ GFP_KERNEL);
16221822 if (!tx_q->tx_skbuff)
16231823 goto err_dma;
16241824
1625
- if (priv->extend_desc) {
1626
- tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1627
- DMA_TX_SIZE *
1628
- sizeof(struct
1629
- dma_extended_desc),
1630
- &tx_q->dma_tx_phy,
1631
- GFP_KERNEL);
1632
- if (!tx_q->dma_etx)
1633
- goto err_dma;
1634
- } else {
1635
- tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1636
- DMA_TX_SIZE *
1637
- sizeof(struct
1638
- dma_desc),
1639
- &tx_q->dma_tx_phy,
1640
- GFP_KERNEL);
1641
- if (!tx_q->dma_tx)
1642
- goto err_dma;
1643
- }
1825
+ if (priv->extend_desc)
1826
+ size = sizeof(struct dma_extended_desc);
1827
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1828
+ size = sizeof(struct dma_edesc);
1829
+ else
1830
+ size = sizeof(struct dma_desc);
1831
+
1832
+ size *= priv->dma_tx_size;
1833
+
1834
+ addr = dma_alloc_coherent(priv->device, size,
1835
+ &tx_q->dma_tx_phy, GFP_KERNEL);
1836
+ if (!addr)
1837
+ goto err_dma;
1838
+
1839
+ if (priv->extend_desc)
1840
+ tx_q->dma_etx = addr;
1841
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1842
+ tx_q->dma_entx = addr;
1843
+ else
1844
+ tx_q->dma_tx = addr;
16441845 }
16451846
16461847 return 0;
16471848
16481849 err_dma:
16491850 free_dma_tx_desc_resources(priv);
1650
-
16511851 return ret;
16521852 }
16531853
....@@ -1858,6 +2058,7 @@
18582058 /**
18592059 * stmmac_tx_clean - to manage the transmission completion
18602060 * @priv: driver private structure
2061
+ * @budget: napi budget limiting this functions packet handling
18612062 * @queue: TX queue index
18622063 * Description: it reclaims the transmit resources after transmission completes.
18632064 */
....@@ -1879,6 +2080,8 @@
18792080
18802081 if (priv->extend_desc)
18812082 p = (struct dma_desc *)(tx_q->dma_etx + entry);
2083
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2084
+ p = &tx_q->dma_entx[entry].basic;
18822085 else
18832086 p = tx_q->dma_tx + entry;
18842087
....@@ -1937,7 +2140,7 @@
19372140
19382141 stmmac_release_tx_desc(priv, p, priv->mode);
19392142
1940
- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2143
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
19412144 }
19422145 tx_q->dirty_tx = entry;
19432146
....@@ -1946,7 +2149,7 @@
19462149
19472150 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
19482151 queue))) &&
1949
- stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
2152
+ stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
19502153
19512154 netif_dbg(priv, tx_done, priv->dev,
19522155 "%s: restart transmit\n", __func__);
....@@ -1955,8 +2158,12 @@
19552158
19562159 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
19572160 stmmac_enable_eee_mode(priv);
1958
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
2161
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
19592162 }
2163
+
2164
+ /* We still have pending packets, let's call for a new scheduling */
2165
+ if (tx_q->dirty_tx != tx_q->cur_tx)
2166
+ mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
19602167
19612168 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
19622169
....@@ -1973,23 +2180,18 @@
19732180 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
19742181 {
19752182 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1976
- int i;
19772183
19782184 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
19792185
19802186 stmmac_stop_tx_dma(priv, chan);
19812187 dma_free_tx_skbufs(priv, chan);
1982
- for (i = 0; i < DMA_TX_SIZE; i++)
1983
- if (priv->extend_desc)
1984
- stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1985
- priv->mode, (i == DMA_TX_SIZE - 1));
1986
- else
1987
- stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1988
- priv->mode, (i == DMA_TX_SIZE - 1));
2188
+ stmmac_clear_tx_descriptors(priv, chan);
19892189 tx_q->dirty_tx = 0;
19902190 tx_q->cur_tx = 0;
19912191 tx_q->mss = 0;
19922192 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2193
+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2194
+ tx_q->dma_tx_phy, chan);
19932195 stmmac_start_tx_dma(priv, chan);
19942196
19952197 priv->dev->stats.tx_errors++;
....@@ -2048,23 +2250,24 @@
20482250 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
20492251 &priv->xstats, chan);
20502252 struct stmmac_channel *ch = &priv->channel[chan];
2051
- bool needs_work = false;
2253
+ unsigned long flags;
20522254
2053
- if ((status & handle_rx) && ch->has_rx) {
2054
- needs_work = true;
2055
- } else {
2056
- status &= ~handle_rx;
2255
+ if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2256
+ if (napi_schedule_prep(&ch->rx_napi)) {
2257
+ spin_lock_irqsave(&ch->lock, flags);
2258
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2259
+ spin_unlock_irqrestore(&ch->lock, flags);
2260
+ __napi_schedule(&ch->rx_napi);
2261
+ }
20572262 }
20582263
2059
- if ((status & handle_tx) && ch->has_tx) {
2060
- needs_work = true;
2061
- } else {
2062
- status &= ~handle_tx;
2063
- }
2064
-
2065
- if (needs_work && napi_schedule_prep(&ch->napi)) {
2066
- stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2067
- __napi_schedule(&ch->napi);
2264
+ if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2265
+ if (napi_schedule_prep(&ch->tx_napi)) {
2266
+ spin_lock_irqsave(&ch->lock, flags);
2267
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2268
+ spin_unlock_irqrestore(&ch->lock, flags);
2269
+ __napi_schedule(&ch->tx_napi);
2270
+ }
20682271 }
20692272
20702273 return status;
....@@ -2127,10 +2330,10 @@
21272330 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
21282331 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
21292332
2130
- dwmac_mmc_intr_all_mask(priv->mmcaddr);
2333
+ stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
21312334
21322335 if (priv->dma_cap.rmon) {
2133
- dwmac_mmc_ctrl(priv->mmcaddr, mode);
2336
+ stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
21342337 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
21352338 } else
21362339 netdev_info(priv->dev, "No MAC Management Counters available\n");
....@@ -2160,7 +2363,7 @@
21602363 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
21612364 {
21622365 // if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2163
- if(1) {
2366
+ if (1) {
21642367 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
21652368 if (likely(priv->plat->get_eth_addr))
21662369 priv->plat->get_eth_addr(priv->plat->bsp_priv,
....@@ -2223,7 +2426,8 @@
22232426 rx_q->dma_rx_phy, chan);
22242427
22252428 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2226
- (DMA_RX_SIZE * sizeof(struct dma_desc));
2429
+ (priv->dma_rx_size *
2430
+ sizeof(struct dma_desc));
22272431 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
22282432 rx_q->rx_tail_addr, chan);
22292433 }
....@@ -2252,7 +2456,7 @@
22522456
22532457 /**
22542458 * stmmac_tx_timer - mitigation sw timer for tx.
2255
- * @data: data pointer
2459
+ * @t: data pointer
22562460 * Description:
22572461 * This is the timer handler to directly invoke the stmmac_tx_clean.
22582462 */
....@@ -2264,25 +2468,32 @@
22642468
22652469 ch = &priv->channel[tx_q->queue_index];
22662470
2267
- if (likely(napi_schedule_prep(&ch->napi)))
2268
- __napi_schedule(&ch->napi);
2471
+ if (likely(napi_schedule_prep(&ch->tx_napi))) {
2472
+ unsigned long flags;
2473
+
2474
+ spin_lock_irqsave(&ch->lock, flags);
2475
+ stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2476
+ spin_unlock_irqrestore(&ch->lock, flags);
2477
+ __napi_schedule(&ch->tx_napi);
2478
+ }
22692479 }
22702480
22712481 /**
2272
- * stmmac_init_tx_coalesce - init tx mitigation options.
2482
+ * stmmac_init_coalesce - init mitigation options.
22732483 * @priv: driver private structure
22742484 * Description:
2275
- * This inits the transmit coalesce parameters: i.e. timer rate,
2485
+ * This inits the coalesce parameters: i.e. timer rate,
22762486 * timer handler and default threshold used for enabling the
22772487 * interrupt on completion bit.
22782488 */
2279
-static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2489
+static void stmmac_init_coalesce(struct stmmac_priv *priv)
22802490 {
22812491 u32 tx_channel_count = priv->plat->tx_queues_to_use;
22822492 u32 chan;
22832493
22842494 priv->tx_coal_frames = STMMAC_TX_FRAMES;
22852495 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2496
+ priv->rx_coal_frames = STMMAC_RX_FRAMES;
22862497
22872498 for (chan = 0; chan < tx_channel_count; chan++) {
22882499 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
....@@ -2300,12 +2511,12 @@
23002511 /* set TX ring length */
23012512 for (chan = 0; chan < tx_channels_count; chan++)
23022513 stmmac_set_tx_ring_len(priv, priv->ioaddr,
2303
- (DMA_TX_SIZE - 1), chan);
2514
+ (priv->dma_tx_size - 1), chan);
23042515
23052516 /* set RX ring length */
23062517 for (chan = 0; chan < rx_channels_count; chan++)
23072518 stmmac_set_rx_ring_len(priv, priv->ioaddr,
2308
- (DMA_RX_SIZE - 1), chan);
2519
+ (priv->dma_rx_size - 1), chan);
23092520 }
23102521
23112522 /**
....@@ -2429,6 +2640,22 @@
24292640 }
24302641 }
24312642
2643
+static void stmmac_mac_config_rss(struct stmmac_priv *priv)
2644
+{
2645
+ if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
2646
+ priv->rss.enable = false;
2647
+ return;
2648
+ }
2649
+
2650
+ if (priv->dev->features & NETIF_F_RXHASH)
2651
+ priv->rss.enable = true;
2652
+ else
2653
+ priv->rss.enable = false;
2654
+
2655
+ stmmac_rss_configure(priv, priv->hw, &priv->rss,
2656
+ priv->plat->rx_queues_to_use);
2657
+}
2658
+
24322659 /**
24332660 * stmmac_mtl_configuration - Configure MTL
24342661 * @priv: driver private structure
....@@ -2473,6 +2700,10 @@
24732700 /* Set RX routing */
24742701 if (rx_queues_count > 1)
24752702 stmmac_mac_config_rx_queues_routing(priv);
2703
+
2704
+ /* Receive Side Scaling */
2705
+ if (rx_queues_count > 1)
2706
+ stmmac_mac_config_rss(priv);
24762707 }
24772708
24782709 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
....@@ -2488,6 +2719,7 @@
24882719 /**
24892720 * stmmac_hw_setup - setup mac in a usable state.
24902721 * @dev : pointer to the device structure.
2722
+ * @ptp_register: register PTP if set
24912723 * Description:
24922724 * this is the main function to setup the HW in a usable state because the
24932725 * dma engine is reset, the core registers are configured (e.g. AXI,
....@@ -2497,7 +2729,7 @@
24972729 * 0 on success and an appropriate (-)ve integer as defined in errno.h
24982730 * file on failure.
24992731 */
2500
-static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2732
+static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
25012733 {
25022734 struct stmmac_priv *priv = netdev_priv(dev);
25032735 u32 rx_cnt = priv->plat->rx_queues_to_use;
....@@ -2553,37 +2785,75 @@
25532785
25542786 stmmac_mmc_setup(priv);
25552787
2556
- if (IS_ENABLED(CONFIG_STMMAC_PTP) && init_ptp) {
2788
+ if (ptp_register) {
25572789 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
25582790 if (ret < 0)
2559
- netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2560
-
2561
- ret = stmmac_init_ptp(priv);
2562
- if (ret == -EOPNOTSUPP)
2563
- netdev_warn(priv->dev, "PTP not supported by HW\n");
2564
- else if (ret)
2565
- netdev_warn(priv->dev, "PTP init failed\n");
2791
+ netdev_warn(priv->dev,
2792
+ "failed to enable PTP reference clock: %pe\n",
2793
+ ERR_PTR(ret));
25662794 }
25672795
2568
- priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2796
+ ret = stmmac_init_ptp(priv);
2797
+ if (ret == -EOPNOTSUPP)
2798
+ netdev_warn(priv->dev, "PTP not supported by HW\n");
2799
+ else if (ret)
2800
+ netdev_warn(priv->dev, "PTP init failed\n");
2801
+ else if (ptp_register)
2802
+ stmmac_ptp_register(priv);
2803
+
2804
+ priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
2805
+
2806
+ /* Convert the timer from msec to usec */
2807
+ if (!priv->tx_lpi_timer)
2808
+ priv->tx_lpi_timer = eee_timer * 1000;
25692809
25702810 if (priv->use_riwt) {
2571
- ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2572
- if (!ret)
2573
- priv->rx_riwt = MAX_DMA_RIWT;
2811
+ if (!priv->rx_riwt)
2812
+ priv->rx_riwt = DEF_DMA_RIWT;
2813
+
2814
+ ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
25742815 }
25752816
25762817 if (priv->hw->pcs)
2577
- stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2818
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
25782819
25792820 /* set TX and RX rings length */
25802821 stmmac_set_rings_length(priv);
25812822
25822823 /* Enable TSO */
25832824 if (priv->tso) {
2584
- for (chan = 0; chan < tx_cnt; chan++)
2825
+ for (chan = 0; chan < tx_cnt; chan++) {
2826
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2827
+
2828
+ /* TSO and TBS cannot co-exist */
2829
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
2830
+ continue;
2831
+
25852832 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2833
+ }
25862834 }
2835
+
2836
+ /* Enable Split Header */
2837
+ if (priv->sph && priv->hw->rx_csum) {
2838
+ for (chan = 0; chan < rx_cnt; chan++)
2839
+ stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
2840
+ }
2841
+
2842
+ /* VLAN Tag Insertion */
2843
+ if (priv->dma_cap.vlins)
2844
+ stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
2845
+
2846
+ /* TBS */
2847
+ for (chan = 0; chan < tx_cnt; chan++) {
2848
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2849
+ int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
2850
+
2851
+ stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
2852
+ }
2853
+
2854
+ /* Configure real RX and TX queues */
2855
+ netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
2856
+ netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
25872857
25882858 /* Start the ball rolling... */
25892859 stmmac_start_all_dma(priv);
....@@ -2595,8 +2865,7 @@
25952865 {
25962866 struct stmmac_priv *priv = netdev_priv(dev);
25972867
2598
- if (IS_ENABLED(CONFIG_STMMAC_PTP))
2599
- clk_disable_unprepare(priv->plat->clk_ptp_ref);
2868
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
26002869 }
26012870
26022871 /**
....@@ -2611,18 +2880,25 @@
26112880 static int stmmac_open(struct net_device *dev)
26122881 {
26132882 struct stmmac_priv *priv = netdev_priv(dev);
2883
+ int bfsize = 0;
26142884 u32 chan;
26152885 int ret;
26162886
2617
- if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2618
- priv->hw->pcs != STMMAC_PCS_TBI &&
2619
- priv->hw->pcs != STMMAC_PCS_RTBI) {
2887
+ ret = pm_runtime_get_sync(priv->device);
2888
+ if (ret < 0) {
2889
+ pm_runtime_put_noidle(priv->device);
2890
+ return ret;
2891
+ }
2892
+
2893
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
2894
+ priv->hw->pcs != STMMAC_PCS_RTBI &&
2895
+ priv->hw->xpcs == NULL) {
26202896 ret = stmmac_init_phy(dev);
26212897 if (ret) {
26222898 netdev_err(priv->dev,
26232899 "%s: Cannot attach to PHY (error: %d)\n",
26242900 __func__, ret);
2625
- return ret;
2901
+ goto init_phy_error;
26262902 }
26272903 }
26282904
....@@ -2630,8 +2906,34 @@
26302906 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
26312907 priv->xstats.threshold = tc;
26322908
2633
- priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2909
+ bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
2910
+ if (bfsize < 0)
2911
+ bfsize = 0;
2912
+
2913
+ if (bfsize < BUF_SIZE_16KiB)
2914
+ bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
2915
+
2916
+ priv->dma_buf_sz = bfsize;
2917
+ buf_sz = bfsize;
2918
+
26342919 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2920
+
2921
+ if (!priv->dma_tx_size)
2922
+ priv->dma_tx_size = priv->plat->dma_tx_size ? priv->plat->dma_tx_size :
2923
+ DMA_DEFAULT_TX_SIZE;
2924
+
2925
+ if (!priv->dma_rx_size)
2926
+ priv->dma_rx_size = priv->plat->dma_rx_size ? priv->plat->dma_rx_size :
2927
+ DMA_DEFAULT_RX_SIZE;
2928
+
2929
+ /* Earlier check for TBS */
2930
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
2931
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2932
+ int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
2933
+
2934
+ /* Setup per-TXQ tbs flag before TX descriptor alloc */
2935
+ tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
2936
+ }
26352937
26362938 ret = alloc_dma_desc_resources(priv);
26372939 if (ret < 0) {
....@@ -2647,16 +2949,26 @@
26472949 goto init_error;
26482950 }
26492951
2952
+ if (priv->plat->serdes_powerup) {
2953
+ ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
2954
+ if (ret < 0) {
2955
+ netdev_err(priv->dev, "%s: Serdes powerup failed\n",
2956
+ __func__);
2957
+ goto init_error;
2958
+ }
2959
+ }
2960
+
26502961 ret = stmmac_hw_setup(dev, true);
26512962 if (ret < 0) {
26522963 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
26532964 goto init_error;
26542965 }
26552966
2656
- stmmac_init_tx_coalesce(priv);
2967
+ stmmac_init_coalesce(priv);
26572968
2658
- if (dev->phydev)
2659
- phy_start(dev->phydev);
2969
+ phylink_start(priv->phylink);
2970
+ /* We may have called phylink_speed_down before */
2971
+ phylink_speed_up(priv->phylink);
26602972
26612973 /* Request the IRQ lines */
26622974 ret = request_irq(dev->irq, stmmac_interrupt,
....@@ -2703,8 +3015,7 @@
27033015 wolirq_error:
27043016 free_irq(dev->irq, dev);
27053017 irq_error:
2706
- if (dev->phydev)
2707
- phy_stop(dev->phydev);
3018
+ phylink_stop(priv->phylink);
27083019
27093020 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
27103021 del_timer_sync(&priv->tx_queue[chan].txtimer);
....@@ -2713,9 +3024,9 @@
27133024 init_error:
27143025 free_dma_desc_resources(priv);
27153026 dma_desc_error:
2716
- if (dev->phydev)
2717
- phy_disconnect(dev->phydev);
2718
-
3027
+ phylink_disconnect_phy(priv->phylink);
3028
+init_phy_error:
3029
+ pm_runtime_put(priv->device);
27193030 return ret;
27203031 }
27213032
....@@ -2730,14 +3041,14 @@
27303041 struct stmmac_priv *priv = netdev_priv(dev);
27313042 u32 chan;
27323043
3044
+ if (device_may_wakeup(priv->device))
3045
+ phylink_speed_down(priv->phylink, false);
27333046 /* Stop and disconnect the PHY */
2734
- if (dev->phydev) {
2735
- phy_stop(dev->phydev);
2736
- phy_disconnect(dev->phydev);
2737
- if (priv->plat->integrated_phy_power)
2738
- priv->plat->integrated_phy_power(priv->plat->bsp_priv,
2739
- false);
2740
- }
3047
+ phylink_stop(priv->phylink);
3048
+ phylink_disconnect_phy(priv->phylink);
3049
+
3050
+ if (priv->plat->integrated_phy_power)
3051
+ priv->plat->integrated_phy_power(priv->plat->bsp_priv, false);
27413052
27423053 stmmac_disable_all_queues(priv);
27433054
....@@ -2765,12 +3076,48 @@
27653076 /* Disable the MAC Rx/Tx */
27663077 stmmac_mac_set(priv, priv->ioaddr, false);
27673078
3079
+ /* Powerdown Serdes if there is */
3080
+ if (priv->plat->serdes_powerdown)
3081
+ priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3082
+
27683083 netif_carrier_off(dev);
27693084
2770
- if (IS_ENABLED(CONFIG_STMMAC_PTP))
2771
- stmmac_release_ptp(priv);
3085
+ stmmac_release_ptp(priv);
3086
+
3087
+ pm_runtime_put(priv->device);
27723088
27733089 return 0;
3090
+}
3091
+
3092
+static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3093
+ struct stmmac_tx_queue *tx_q)
3094
+{
3095
+ u16 tag = 0x0, inner_tag = 0x0;
3096
+ u32 inner_type = 0x0;
3097
+ struct dma_desc *p;
3098
+
3099
+ if (!priv->dma_cap.vlins)
3100
+ return false;
3101
+ if (!skb_vlan_tag_present(skb))
3102
+ return false;
3103
+ if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3104
+ inner_tag = skb_vlan_tag_get(skb);
3105
+ inner_type = STMMAC_VLAN_INSERT;
3106
+ }
3107
+
3108
+ tag = skb_vlan_tag_get(skb);
3109
+
3110
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
3111
+ p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3112
+ else
3113
+ p = &tx_q->dma_tx[tx_q->cur_tx];
3114
+
3115
+ if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3116
+ return false;
3117
+
3118
+ stmmac_set_tx_owner(priv, p);
3119
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3120
+ return true;
27743121 }
27753122
27763123 /**
....@@ -2778,13 +3125,13 @@
27783125 * @priv: driver private structure
27793126 * @des: buffer start address
27803127 * @total_len: total length to fill in descriptors
2781
- * @last_segmant: condition for the last descriptor
3128
+ * @last_segment: condition for the last descriptor
27823129 * @queue: TX queue index
27833130 * Description:
27843131 * This function fills descriptor and request new descriptors according to
27853132 * buffer length to fill
27863133 */
2787
-static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
3134
+static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
27883135 int total_len, bool last_segment, u32 queue)
27893136 {
27903137 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
....@@ -2795,11 +3142,23 @@
27953142 tmp_len = total_len;
27963143
27973144 while (tmp_len > 0) {
2798
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2799
- WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2800
- desc = tx_q->dma_tx + tx_q->cur_tx;
3145
+ dma_addr_t curr_addr;
28013146
2802
- desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
3147
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3148
+ priv->dma_tx_size);
3149
+ WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3150
+
3151
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
3152
+ desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3153
+ else
3154
+ desc = &tx_q->dma_tx[tx_q->cur_tx];
3155
+
3156
+ curr_addr = des + (total_len - tmp_len);
3157
+ if (priv->dma_cap.addr64 <= 32)
3158
+ desc->des0 = cpu_to_le32(curr_addr);
3159
+ else
3160
+ stmmac_set_desc_addr(priv, desc, curr_addr);
3161
+
28033162 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
28043163 TSO_MAX_BUFF_SIZE : tmp_len;
28053164
....@@ -2843,16 +3202,19 @@
28433202 {
28443203 struct dma_desc *desc, *first, *mss_desc = NULL;
28453204 struct stmmac_priv *priv = netdev_priv(dev);
3205
+ int desc_size, tmp_pay_len = 0, first_tx;
28463206 int nfrags = skb_shinfo(skb)->nr_frags;
28473207 u32 queue = skb_get_queue_mapping(skb);
2848
- unsigned int first_entry, des;
2849
- u8 proto_hdr_len, hdr;
3208
+ unsigned int first_entry, tx_packets;
28503209 struct stmmac_tx_queue *tx_q;
2851
- int tmp_pay_len = 0;
3210
+ bool has_vlan, set_ic;
3211
+ u8 proto_hdr_len, hdr;
28523212 u32 pay_len, mss;
3213
+ dma_addr_t des;
28533214 int i;
28543215
28553216 tx_q = &priv->tx_queue[queue];
3217
+ first_tx = tx_q->cur_tx;
28563218
28573219 /* Compute header lengths */
28583220 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
....@@ -2883,10 +3245,15 @@
28833245
28843246 /* set new MSS value if needed */
28853247 if (mss != tx_q->mss) {
2886
- mss_desc = tx_q->dma_tx + tx_q->cur_tx;
3248
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
3249
+ mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3250
+ else
3251
+ mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
3252
+
28873253 stmmac_set_mss(priv, mss_desc, mss);
28883254 tx_q->mss = mss;
2889
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
3255
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3256
+ priv->dma_tx_size);
28903257 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
28913258 }
28923259
....@@ -2897,11 +3264,20 @@
28973264 skb->data_len);
28983265 }
28993266
3267
+ /* Check if VLAN can be inserted by HW */
3268
+ has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3269
+
29003270 first_entry = tx_q->cur_tx;
29013271 WARN_ON(tx_q->tx_skbuff[first_entry]);
29023272
2903
- desc = tx_q->dma_tx + first_entry;
3273
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
3274
+ desc = &tx_q->dma_entx[first_entry].basic;
3275
+ else
3276
+ desc = &tx_q->dma_tx[first_entry];
29043277 first = desc;
3278
+
3279
+ if (has_vlan)
3280
+ stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
29053281
29063282 /* first descriptor: fill Headers on Buf1 */
29073283 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
....@@ -2912,14 +3288,21 @@
29123288 tx_q->tx_skbuff_dma[first_entry].buf = des;
29133289 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
29143290
2915
- first->des0 = cpu_to_le32(des);
3291
+ if (priv->dma_cap.addr64 <= 32) {
3292
+ first->des0 = cpu_to_le32(des);
29163293
2917
- /* Fill start of payload in buff2 of first descriptor */
2918
- if (pay_len)
2919
- first->des1 = cpu_to_le32(des + proto_hdr_len);
3294
+ /* Fill start of payload in buff2 of first descriptor */
3295
+ if (pay_len)
3296
+ first->des1 = cpu_to_le32(des + proto_hdr_len);
29203297
2921
- /* If needed take extra descriptors to fill the remaining payload */
2922
- tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
3298
+ /* If needed take extra descriptors to fill the remaining payload */
3299
+ tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
3300
+ } else {
3301
+ stmmac_set_desc_addr(priv, first, des);
3302
+ tmp_pay_len = pay_len;
3303
+ des += proto_hdr_len;
3304
+ pay_len = 0;
3305
+ }
29233306
29243307 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
29253308
....@@ -2946,12 +3329,38 @@
29463329 /* Only the last descriptor gets to point to the skb. */
29473330 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
29483331
3332
+ /* Manage tx mitigation */
3333
+ tx_packets = (tx_q->cur_tx + 1) - first_tx;
3334
+ tx_q->tx_count_frames += tx_packets;
3335
+
3336
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3337
+ set_ic = true;
3338
+ else if (!priv->tx_coal_frames)
3339
+ set_ic = false;
3340
+ else if (tx_packets > priv->tx_coal_frames)
3341
+ set_ic = true;
3342
+ else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3343
+ set_ic = true;
3344
+ else
3345
+ set_ic = false;
3346
+
3347
+ if (set_ic) {
3348
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
3349
+ desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3350
+ else
3351
+ desc = &tx_q->dma_tx[tx_q->cur_tx];
3352
+
3353
+ tx_q->tx_count_frames = 0;
3354
+ stmmac_set_tx_ic(priv, desc);
3355
+ priv->xstats.tx_set_ic_bit++;
3356
+ }
3357
+
29493358 /* We've used all descriptors we need for this skb, however,
29503359 * advance cur_tx so that it references a fresh descriptor.
29513360 * ndo_start_xmit will fill this descriptor the next time it's
29523361 * called and stmmac_tx_clean may clean up to this descriptor.
29533362 */
2954
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
3363
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
29553364
29563365 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
29573366 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
....@@ -2963,18 +3372,8 @@
29633372 priv->xstats.tx_tso_frames++;
29643373 priv->xstats.tx_tso_nfrags += nfrags;
29653374
2966
- /* Manage tx mitigation */
2967
- tx_q->tx_count_frames += nfrags + 1;
2968
- if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
2969
- !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
2970
- (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2971
- priv->hwts_tx_en)) {
2972
- stmmac_tx_timer_arm(priv, queue);
2973
- } else {
2974
- tx_q->tx_count_frames = 0;
2975
- stmmac_set_tx_ic(priv, desc);
2976
- priv->xstats.tx_set_ic_bit++;
2977
- }
3375
+ if (priv->sarc_type)
3376
+ stmmac_set_desc_sarc(priv, first, priv->sarc_type);
29783377
29793378 skb_tx_timestamp(skb);
29803379
....@@ -3013,16 +3412,18 @@
30133412 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
30143413 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
30153414 tx_q->cur_tx, first, nfrags);
3016
-
3017
- stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
3018
-
30193415 pr_info(">>> frame to be transmitted: ");
30203416 print_pkt(skb->data, skb_headlen(skb));
30213417 }
30223418
30233419 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
30243420
3025
- tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3421
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
3422
+ desc_size = sizeof(struct dma_edesc);
3423
+ else
3424
+ desc_size = sizeof(struct dma_desc);
3425
+
3426
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
30263427 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
30273428 stmmac_tx_timer_arm(priv, queue);
30283429
....@@ -3045,20 +3446,22 @@
30453446 */
30463447 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
30473448 {
3449
+ unsigned int first_entry, tx_packets, enh_desc;
30483450 struct stmmac_priv *priv = netdev_priv(dev);
30493451 unsigned int nopaged_len = skb_headlen(skb);
30503452 int i, csum_insertion = 0, is_jumbo = 0;
30513453 u32 queue = skb_get_queue_mapping(skb);
30523454 int nfrags = skb_shinfo(skb)->nr_frags;
30533455 int gso = skb_shinfo(skb)->gso_type;
3054
- int entry;
3055
- unsigned int first_entry;
3456
+ struct dma_edesc *tbs_desc = NULL;
3457
+ int entry, desc_size, first_tx;
30563458 struct dma_desc *desc, *first;
30573459 struct stmmac_tx_queue *tx_q;
3058
- unsigned int enh_desc;
3059
- unsigned int des;
3460
+ bool has_vlan, set_ic;
3461
+ dma_addr_t des;
30603462
30613463 tx_q = &priv->tx_queue[queue];
3464
+ first_tx = tx_q->cur_tx;
30623465
30633466 if (priv->tx_path_in_lpi_mode)
30643467 stmmac_disable_eee_mode(priv);
....@@ -3083,6 +3486,9 @@
30833486 return NETDEV_TX_BUSY;
30843487 }
30853488
3489
+ /* Check if VLAN can be inserted by HW */
3490
+ has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3491
+
30863492 entry = tx_q->cur_tx;
30873493 first_entry = entry;
30883494 WARN_ON(tx_q->tx_skbuff[first_entry]);
....@@ -3091,10 +3497,15 @@
30913497
30923498 if (likely(priv->extend_desc))
30933499 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3500
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3501
+ desc = &tx_q->dma_entx[entry].basic;
30943502 else
30953503 desc = tx_q->dma_tx + entry;
30963504
30973505 first = desc;
3506
+
3507
+ if (has_vlan)
3508
+ stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
30983509
30993510 enh_desc = priv->plat->enh_desc;
31003511 /* To program the descriptors according to the size of the frame */
....@@ -3112,11 +3523,13 @@
31123523 int len = skb_frag_size(frag);
31133524 bool last_segment = (i == (nfrags - 1));
31143525
3115
- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3526
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
31163527 WARN_ON(tx_q->tx_skbuff[entry]);
31173528
31183529 if (likely(priv->extend_desc))
31193530 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3531
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3532
+ desc = &tx_q->dma_entx[entry].basic;
31203533 else
31213534 desc = tx_q->dma_tx + entry;
31223535
....@@ -3141,28 +3554,51 @@
31413554 /* Only the last descriptor gets to point to the skb. */
31423555 tx_q->tx_skbuff[entry] = skb;
31433556
3557
+ /* According to the coalesce parameter the IC bit for the latest
3558
+ * segment is reset and the timer re-started to clean the tx status.
3559
+ * This approach takes care about the fragments: desc is the first
3560
+ * element in case of no SG.
3561
+ */
3562
+ tx_packets = (entry + 1) - first_tx;
3563
+ tx_q->tx_count_frames += tx_packets;
3564
+
3565
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3566
+ set_ic = true;
3567
+ else if (!priv->tx_coal_frames)
3568
+ set_ic = false;
3569
+ else if (tx_packets > priv->tx_coal_frames)
3570
+ set_ic = true;
3571
+ else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3572
+ set_ic = true;
3573
+ else
3574
+ set_ic = false;
3575
+
3576
+ if (set_ic) {
3577
+ if (likely(priv->extend_desc))
3578
+ desc = &tx_q->dma_etx[entry].basic;
3579
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3580
+ desc = &tx_q->dma_entx[entry].basic;
3581
+ else
3582
+ desc = &tx_q->dma_tx[entry];
3583
+
3584
+ tx_q->tx_count_frames = 0;
3585
+ stmmac_set_tx_ic(priv, desc);
3586
+ priv->xstats.tx_set_ic_bit++;
3587
+ }
3588
+
31443589 /* We've used all descriptors we need for this skb, however,
31453590 * advance cur_tx so that it references a fresh descriptor.
31463591 * ndo_start_xmit will fill this descriptor the next time it's
31473592 * called and stmmac_tx_clean may clean up to this descriptor.
31483593 */
3149
- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3594
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
31503595 tx_q->cur_tx = entry;
31513596
31523597 if (netif_msg_pktdata(priv)) {
3153
- void *tx_head;
3154
-
31553598 netdev_dbg(priv->dev,
31563599 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
31573600 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
31583601 entry, first, nfrags);
3159
-
3160
- if (priv->extend_desc)
3161
- tx_head = (void *)tx_q->dma_etx;
3162
- else
3163
- tx_head = (void *)tx_q->dma_tx;
3164
-
3165
- stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
31663602
31673603 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
31683604 print_pkt(skb->data, skb->len);
....@@ -3176,22 +3612,8 @@
31763612
31773613 dev->stats.tx_bytes += skb->len;
31783614
3179
- /* According to the coalesce parameter the IC bit for the latest
3180
- * segment is reset and the timer re-started to clean the tx status.
3181
- * This approach takes care about the fragments: desc is the first
3182
- * element in case of no SG.
3183
- */
3184
- tx_q->tx_count_frames += nfrags + 1;
3185
- if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
3186
- !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
3187
- (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3188
- priv->hwts_tx_en)) {
3189
- stmmac_tx_timer_arm(priv, queue);
3190
- } else {
3191
- tx_q->tx_count_frames = 0;
3192
- stmmac_set_tx_ic(priv, desc);
3193
- priv->xstats.tx_set_ic_bit++;
3194
- }
3615
+ if (priv->sarc_type)
3616
+ stmmac_set_desc_sarc(priv, first, priv->sarc_type);
31953617
31963618 skb_tx_timestamp(skb);
31973619
....@@ -3223,11 +3645,18 @@
32233645
32243646 /* Prepare the first descriptor setting the OWN bit too */
32253647 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3226
- csum_insertion, priv->mode, 1, last_segment,
3648
+ csum_insertion, priv->mode, 0, last_segment,
32273649 skb->len);
3228
- } else {
3229
- stmmac_set_tx_owner(priv, first);
32303650 }
3651
+
3652
+ if (tx_q->tbs & STMMAC_TBS_EN) {
3653
+ struct timespec64 ts = ns_to_timespec64(skb->tstamp);
3654
+
3655
+ tbs_desc = &tx_q->dma_entx[first_entry];
3656
+ stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
3657
+ }
3658
+
3659
+ stmmac_set_tx_owner(priv, first);
32313660
32323661 /* The own bit must be the latest setting done when prepare the
32333662 * descriptor and then barrier is needed to make sure that
....@@ -3239,7 +3668,14 @@
32393668
32403669 stmmac_enable_dma_transmission(priv, priv->ioaddr);
32413670
3242
- tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3671
+ if (likely(priv->extend_desc))
3672
+ desc_size = sizeof(struct dma_extended_desc);
3673
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3674
+ desc_size = sizeof(struct dma_edesc);
3675
+ else
3676
+ desc_size = sizeof(struct dma_desc);
3677
+
3678
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
32433679 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
32443680 stmmac_tx_timer_arm(priv, queue);
32453681
....@@ -3273,15 +3709,6 @@
32733709 }
32743710 }
32753711
3276
-
3277
-static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3278
-{
3279
- if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3280
- return 0;
3281
-
3282
- return 1;
3283
-}
3284
-
32853712 /**
32863713 * stmmac_rx_refill - refill used skb preallocated buffers
32873714 * @priv: driver private structure
....@@ -3292,63 +3719,115 @@
32923719 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
32933720 {
32943721 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3295
- int dirty = stmmac_rx_dirty(priv, queue);
3722
+ int len, dirty = stmmac_rx_dirty(priv, queue);
32963723 unsigned int entry = rx_q->dirty_rx;
3724
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
32973725
3298
- int bfsize = priv->dma_buf_sz;
3726
+ if (priv->dma_cap.addr64 <= 32)
3727
+ gfp |= GFP_DMA32;
3728
+
3729
+ len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
32993730
33003731 while (dirty-- > 0) {
3732
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
33013733 struct dma_desc *p;
3734
+ bool use_rx_wd;
33023735
33033736 if (priv->extend_desc)
33043737 p = (struct dma_desc *)(rx_q->dma_erx + entry);
33053738 else
33063739 p = rx_q->dma_rx + entry;
33073740
3308
- if (likely(!rx_q->rx_skbuff[entry])) {
3309
- struct sk_buff *skb;
3310
-
3311
- skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3312
- if (unlikely(!skb)) {
3313
- /* so for a while no zero-copy! */
3314
- rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3315
- if (unlikely(net_ratelimit()))
3316
- dev_err(priv->device,
3317
- "fail to alloc skb entry %d\n",
3318
- entry);
3741
+ if (!buf->page) {
3742
+ buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
3743
+ if (!buf->page)
33193744 break;
3320
- }
3321
-
3322
- rx_q->rx_skbuff[entry] = skb;
3323
- rx_q->rx_skbuff_dma[entry] =
3324
- dma_map_single(priv->device, skb->data, bfsize,
3325
- DMA_FROM_DEVICE);
3326
- if (dma_mapping_error(priv->device,
3327
- rx_q->rx_skbuff_dma[entry])) {
3328
- netdev_err(priv->dev, "Rx DMA map failed\n");
3329
- dev_kfree_skb(skb);
3330
- break;
3331
- }
3332
-
3333
- stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3334
- stmmac_refill_desc3(priv, rx_q, p);
3335
-
3336
- if (rx_q->rx_zeroc_thresh > 0)
3337
- rx_q->rx_zeroc_thresh--;
3338
-
3339
- netif_dbg(priv, rx_status, priv->dev,
3340
- "refill entry #%d\n", entry);
33413745 }
3342
- dma_wmb();
33433746
3344
- stmmac_set_rx_owner(priv, p, priv->use_riwt);
3747
+ if (priv->sph && !buf->sec_page) {
3748
+ buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
3749
+ if (!buf->sec_page)
3750
+ break;
3751
+
3752
+ buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
3753
+ }
3754
+
3755
+ buf->addr = page_pool_get_dma_addr(buf->page);
3756
+ stmmac_set_desc_addr(priv, p, buf->addr);
3757
+ if (priv->sph)
3758
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
3759
+ else
3760
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
3761
+ stmmac_refill_desc3(priv, rx_q, p);
3762
+
3763
+ rx_q->rx_count_frames++;
3764
+ rx_q->rx_count_frames += priv->rx_coal_frames;
3765
+ if (rx_q->rx_count_frames > priv->rx_coal_frames)
3766
+ rx_q->rx_count_frames = 0;
3767
+
3768
+ use_rx_wd = !priv->rx_coal_frames;
3769
+ use_rx_wd |= rx_q->rx_count_frames > 0;
3770
+ if (!priv->use_riwt)
3771
+ use_rx_wd = false;
33453772
33463773 dma_wmb();
3774
+ stmmac_set_rx_owner(priv, p, use_rx_wd);
33473775
3348
- entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3776
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
33493777 }
33503778 rx_q->dirty_rx = entry;
3779
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3780
+ (rx_q->dirty_rx * sizeof(struct dma_desc));
33513781 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
3782
+}
3783
+
3784
+static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
3785
+ struct dma_desc *p,
3786
+ int status, unsigned int len)
3787
+{
3788
+ unsigned int plen = 0, hlen = 0;
3789
+ int coe = priv->hw->rx_csum;
3790
+
3791
+ /* Not first descriptor, buffer is always zero */
3792
+ if (priv->sph && len)
3793
+ return 0;
3794
+
3795
+ /* First descriptor, get split header length */
3796
+ stmmac_get_rx_header_len(priv, p, &hlen);
3797
+ if (priv->sph && hlen) {
3798
+ priv->xstats.rx_split_hdr_pkt_n++;
3799
+ return hlen;
3800
+ }
3801
+
3802
+ /* First descriptor, not last descriptor and not split header */
3803
+ if (status & rx_not_ls)
3804
+ return priv->dma_buf_sz;
3805
+
3806
+ plen = stmmac_get_rx_frame_len(priv, p, coe);
3807
+
3808
+ /* First descriptor and last descriptor and not split header */
3809
+ return min_t(unsigned int, priv->dma_buf_sz, plen);
3810
+}
3811
+
3812
+static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
3813
+ struct dma_desc *p,
3814
+ int status, unsigned int len)
3815
+{
3816
+ int coe = priv->hw->rx_csum;
3817
+ unsigned int plen = 0;
3818
+
3819
+ /* Not split header, buffer is not available */
3820
+ if (!priv->sph)
3821
+ return 0;
3822
+
3823
+ /* Not last descriptor */
3824
+ if (status & rx_not_ls)
3825
+ return priv->dma_buf_sz;
3826
+
3827
+ plen = stmmac_get_rx_frame_len(priv, p, coe);
3828
+
3829
+ /* Last descriptor */
3830
+ return plen - len;
33523831 }
33533832
33543833 /**
....@@ -3363,30 +3842,54 @@
33633842 {
33643843 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
33653844 struct stmmac_channel *ch = &priv->channel[queue];
3845
+ unsigned int count = 0, error = 0, len = 0;
3846
+ int status = 0, coe = priv->hw->rx_csum;
33663847 unsigned int next_entry = rx_q->cur_rx;
3367
- int coe = priv->hw->rx_csum;
3368
- unsigned int count = 0;
3369
- bool xmac;
3370
-
3371
- xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3848
+ unsigned int desc_size;
3849
+ struct sk_buff *skb = NULL;
33723850
33733851 if (netif_msg_rx_status(priv)) {
33743852 void *rx_head;
33753853
33763854 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3377
- if (priv->extend_desc)
3855
+ if (priv->extend_desc) {
33783856 rx_head = (void *)rx_q->dma_erx;
3379
- else
3857
+ desc_size = sizeof(struct dma_extended_desc);
3858
+ } else {
33803859 rx_head = (void *)rx_q->dma_rx;
3860
+ desc_size = sizeof(struct dma_desc);
3861
+ }
33813862
3382
- stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3863
+ stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
3864
+ rx_q->dma_rx_phy, desc_size);
33833865 }
33843866 while (count < limit) {
3385
- int entry, status;
3386
- struct dma_desc *p;
3387
- struct dma_desc *np;
3867
+ unsigned int buf1_len = 0, buf2_len = 0;
3868
+ enum pkt_hash_types hash_type;
3869
+ struct stmmac_rx_buffer *buf;
3870
+ struct dma_desc *np, *p;
3871
+ int entry;
3872
+ u32 hash;
33883873
3874
+ if (!count && rx_q->state_saved) {
3875
+ skb = rx_q->state.skb;
3876
+ error = rx_q->state.error;
3877
+ len = rx_q->state.len;
3878
+ } else {
3879
+ rx_q->state_saved = false;
3880
+ skb = NULL;
3881
+ error = 0;
3882
+ len = 0;
3883
+ }
3884
+
3885
+ if ((count >= limit - 1) && limit > 1)
3886
+ break;
3887
+
3888
+read_again:
3889
+ buf1_len = 0;
3890
+ buf2_len = 0;
33893891 entry = next_entry;
3892
+ buf = &rx_q->buf_pool[entry];
33903893
33913894 if (priv->extend_desc)
33923895 p = (struct dma_desc *)(rx_q->dma_erx + entry);
....@@ -3400,9 +3903,8 @@
34003903 if (unlikely(status & dma_own))
34013904 break;
34023905
3403
- count++;
3404
-
3405
- rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3906
+ rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
3907
+ priv->dma_rx_size);
34063908 next_entry = rx_q->cur_rx;
34073909
34083910 if (priv->extend_desc)
....@@ -3416,133 +3918,126 @@
34163918 stmmac_rx_extended_status(priv, &priv->dev->stats,
34173919 &priv->xstats, rx_q->dma_erx + entry);
34183920 if (unlikely(status == discard_frame)) {
3419
- priv->dev->stats.rx_errors++;
3420
- if (priv->hwts_rx_en && !priv->extend_desc) {
3421
- /* DESC2 & DESC3 will be overwritten by device
3422
- * with timestamp value, hence reinitialize
3423
- * them in stmmac_rx_refill() function so that
3424
- * device can reuse it.
3425
- */
3426
- dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3427
- rx_q->rx_skbuff[entry] = NULL;
3428
- dma_unmap_single(priv->device,
3429
- rx_q->rx_skbuff_dma[entry],
3430
- priv->dma_buf_sz,
3431
- DMA_FROM_DEVICE);
3432
- }
3433
- } else {
3434
- struct sk_buff *skb;
3435
- int frame_len;
3436
- unsigned int des;
3437
-
3438
- stmmac_get_desc_addr(priv, p, &des);
3439
- frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3440
-
3441
- /* If frame length is greater than skb buffer size
3442
- * (preallocated during init) then the packet is
3443
- * ignored
3444
- */
3445
- if (frame_len > priv->dma_buf_sz) {
3446
- if (net_ratelimit())
3447
- netdev_err(priv->dev,
3448
- "len %d larger than size (%d)\n",
3449
- frame_len, priv->dma_buf_sz);
3450
- priv->dev->stats.rx_length_errors++;
3451
- continue;
3452
- }
3453
-
3454
- /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3455
- * Type frames (LLC/LLC-SNAP)
3456
- *
3457
- * llc_snap is never checked in GMAC >= 4, so this ACS
3458
- * feature is always disabled and packets need to be
3459
- * stripped manually.
3460
- */
3461
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3462
- unlikely(status != llc_snap))
3463
- frame_len -= ETH_FCS_LEN;
3464
-
3465
- if (netif_msg_rx_status(priv)) {
3466
- netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3467
- p, entry, des);
3468
- netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3469
- frame_len, status);
3470
- }
3471
-
3472
- /* The zero-copy is always used for all the sizes
3473
- * in case of GMAC4 because it needs
3474
- * to refill the used descriptors, always.
3475
- */
3476
- if (unlikely(!xmac &&
3477
- ((frame_len < priv->rx_copybreak) ||
3478
- stmmac_rx_threshold_count(rx_q)))) {
3479
- skb = netdev_alloc_skb_ip_align(priv->dev,
3480
- frame_len);
3481
- if (unlikely(!skb)) {
3482
- if (net_ratelimit())
3483
- dev_warn(priv->device,
3484
- "packet dropped\n");
3485
- priv->dev->stats.rx_dropped++;
3486
- continue;
3487
- }
3488
-
3489
- dma_sync_single_for_cpu(priv->device,
3490
- rx_q->rx_skbuff_dma
3491
- [entry], frame_len,
3492
- DMA_FROM_DEVICE);
3493
- skb_copy_to_linear_data(skb,
3494
- rx_q->
3495
- rx_skbuff[entry]->data,
3496
- frame_len);
3497
-
3498
- skb_put(skb, frame_len);
3499
- dma_sync_single_for_device(priv->device,
3500
- rx_q->rx_skbuff_dma
3501
- [entry], frame_len,
3502
- DMA_FROM_DEVICE);
3503
- } else {
3504
- skb = rx_q->rx_skbuff[entry];
3505
- if (unlikely(!skb)) {
3506
- if (net_ratelimit())
3507
- netdev_err(priv->dev,
3508
- "%s: Inconsistent Rx chain\n",
3509
- priv->dev->name);
3510
- priv->dev->stats.rx_dropped++;
3511
- continue;
3512
- }
3513
- prefetch(skb->data - NET_IP_ALIGN);
3514
- rx_q->rx_skbuff[entry] = NULL;
3515
- rx_q->rx_zeroc_thresh++;
3516
-
3517
- skb_put(skb, frame_len);
3518
- dma_unmap_single(priv->device,
3519
- rx_q->rx_skbuff_dma[entry],
3520
- priv->dma_buf_sz,
3521
- DMA_FROM_DEVICE);
3522
- }
3523
-
3524
- if (netif_msg_pktdata(priv)) {
3525
- netdev_dbg(priv->dev, "frame received (%dbytes)",
3526
- frame_len);
3527
- print_pkt(skb->data, frame_len);
3528
- }
3529
-
3530
- stmmac_get_rx_hwtstamp(priv, p, np, skb);
3531
-
3532
- stmmac_rx_vlan(priv->dev, skb);
3533
-
3534
- skb->protocol = eth_type_trans(skb, priv->dev);
3535
-
3536
- if (unlikely(!coe))
3537
- skb_checksum_none_assert(skb);
3538
- else
3539
- skb->ip_summed = CHECKSUM_UNNECESSARY;
3540
-
3541
- napi_gro_receive(&ch->napi, skb);
3542
-
3543
- priv->dev->stats.rx_packets++;
3544
- priv->dev->stats.rx_bytes += frame_len;
3921
+ page_pool_recycle_direct(rx_q->page_pool, buf->page);
3922
+ buf->page = NULL;
3923
+ error = 1;
3924
+ if (!priv->hwts_rx_en)
3925
+ priv->dev->stats.rx_errors++;
35453926 }
3927
+
3928
+ if (unlikely(error && (status & rx_not_ls)))
3929
+ goto read_again;
3930
+ if (unlikely(error)) {
3931
+ dev_kfree_skb(skb);
3932
+ skb = NULL;
3933
+ count++;
3934
+ continue;
3935
+ }
3936
+
3937
+ /* Buffer is good. Go on. */
3938
+
3939
+ prefetch(page_address(buf->page));
3940
+ if (buf->sec_page)
3941
+ prefetch(page_address(buf->sec_page));
3942
+
3943
+ buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
3944
+ len += buf1_len;
3945
+ buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
3946
+ len += buf2_len;
3947
+
3948
+ /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3949
+ * Type frames (LLC/LLC-SNAP)
3950
+ *
3951
+ * llc_snap is never checked in GMAC >= 4, so this ACS
3952
+ * feature is always disabled and packets need to be
3953
+ * stripped manually.
3954
+ */
3955
+ if (likely(!(status & rx_not_ls)) &&
3956
+ (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3957
+ unlikely(status != llc_snap))) {
3958
+ if (buf2_len)
3959
+ buf2_len -= ETH_FCS_LEN;
3960
+ else
3961
+ buf1_len -= ETH_FCS_LEN;
3962
+
3963
+ len -= ETH_FCS_LEN;
3964
+ }
3965
+
3966
+ if (!skb) {
3967
+ skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
3968
+ if (!skb) {
3969
+ priv->dev->stats.rx_dropped++;
3970
+ count++;
3971
+ goto drain_data;
3972
+ }
3973
+
3974
+ dma_sync_single_for_cpu(priv->device, buf->addr,
3975
+ buf1_len, DMA_FROM_DEVICE);
3976
+ skb_copy_to_linear_data(skb, page_address(buf->page),
3977
+ buf1_len);
3978
+ skb_put(skb, buf1_len);
3979
+
3980
+ /* Data payload copied into SKB, page ready for recycle */
3981
+ page_pool_recycle_direct(rx_q->page_pool, buf->page);
3982
+ buf->page = NULL;
3983
+ } else if (buf1_len) {
3984
+ dma_sync_single_for_cpu(priv->device, buf->addr,
3985
+ buf1_len, DMA_FROM_DEVICE);
3986
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
3987
+ buf->page, 0, buf1_len,
3988
+ priv->dma_buf_sz);
3989
+
3990
+ /* Data payload appended into SKB */
3991
+ page_pool_release_page(rx_q->page_pool, buf->page);
3992
+ buf->page = NULL;
3993
+ }
3994
+
3995
+ if (buf2_len) {
3996
+ dma_sync_single_for_cpu(priv->device, buf->sec_addr,
3997
+ buf2_len, DMA_FROM_DEVICE);
3998
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
3999
+ buf->sec_page, 0, buf2_len,
4000
+ priv->dma_buf_sz);
4001
+
4002
+ /* Data payload appended into SKB */
4003
+ page_pool_release_page(rx_q->page_pool, buf->sec_page);
4004
+ buf->sec_page = NULL;
4005
+ }
4006
+
4007
+drain_data:
4008
+ if (likely(status & rx_not_ls))
4009
+ goto read_again;
4010
+ if (!skb)
4011
+ continue;
4012
+
4013
+ /* Got entire packet into SKB. Finish it. */
4014
+
4015
+ stmmac_get_rx_hwtstamp(priv, p, np, skb);
4016
+ stmmac_rx_vlan(priv->dev, skb);
4017
+ skb->protocol = eth_type_trans(skb, priv->dev);
4018
+
4019
+ if (unlikely(!coe))
4020
+ skb_checksum_none_assert(skb);
4021
+ else
4022
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
4023
+
4024
+ if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4025
+ skb_set_hash(skb, hash, hash_type);
4026
+
4027
+ skb_record_rx_queue(skb, queue);
4028
+ napi_gro_receive(&ch->rx_napi, skb);
4029
+ skb = NULL;
4030
+
4031
+ priv->dev->stats.rx_packets++;
4032
+ priv->dev->stats.rx_bytes += len;
4033
+ count++;
4034
+ }
4035
+
4036
+ if (status & rx_not_ls || skb) {
4037
+ rx_q->state_saved = true;
4038
+ rx_q->state.skb = skb;
4039
+ rx_q->state.error = error;
4040
+ rx_q->state.len = len;
35464041 }
35474042
35484043 stmmac_rx_refill(priv, queue);
....@@ -3552,40 +4047,47 @@
35524047 return count;
35534048 }
35544049
3555
-/**
3556
- * stmmac_poll - stmmac poll method (NAPI)
3557
- * @napi : pointer to the napi structure.
3558
- * @budget : maximum number of packets that the current CPU can receive from
3559
- * all interfaces.
3560
- * Description :
3561
- * To look at the incoming frames and clear the tx resources.
3562
- */
3563
-static int stmmac_napi_poll(struct napi_struct *napi, int budget)
4050
+static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
35644051 {
35654052 struct stmmac_channel *ch =
3566
- container_of(napi, struct stmmac_channel, napi);
4053
+ container_of(napi, struct stmmac_channel, rx_napi);
35674054 struct stmmac_priv *priv = ch->priv_data;
3568
- int work_done, rx_done = 0, tx_done = 0;
35694055 u32 chan = ch->index;
4056
+ int work_done;
35704057
35714058 priv->xstats.napi_poll++;
35724059
3573
- if (ch->has_tx)
3574
- tx_done = stmmac_tx_clean(priv, budget, chan);
3575
- if (ch->has_rx)
3576
- rx_done = stmmac_rx(priv, budget, chan);
4060
+ work_done = stmmac_rx(priv, budget, chan);
4061
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
4062
+ unsigned long flags;
35774063
3578
- work_done = max(rx_done, tx_done);
4064
+ spin_lock_irqsave(&ch->lock, flags);
4065
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
4066
+ spin_unlock_irqrestore(&ch->lock, flags);
4067
+ }
4068
+
4069
+ return work_done;
4070
+}
4071
+
4072
+static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
4073
+{
4074
+ struct stmmac_channel *ch =
4075
+ container_of(napi, struct stmmac_channel, tx_napi);
4076
+ struct stmmac_priv *priv = ch->priv_data;
4077
+ u32 chan = ch->index;
4078
+ int work_done;
4079
+
4080
+ priv->xstats.napi_poll++;
4081
+
4082
+ work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan);
35794083 work_done = min(work_done, budget);
35804084
35814085 if (work_done < budget && napi_complete_done(napi, work_done)) {
3582
- int stat;
4086
+ unsigned long flags;
35834087
3584
- stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3585
- stat = stmmac_dma_interrupt_status(priv, priv->ioaddr,
3586
- &priv->xstats, chan);
3587
- if (stat && napi_reschedule(napi))
3588
- stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
4088
+ spin_lock_irqsave(&ch->lock, flags);
4089
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
4090
+ spin_unlock_irqrestore(&ch->lock, flags);
35894091 }
35904092
35914093 return work_done;
....@@ -3594,12 +4096,13 @@
35944096 /**
35954097 * stmmac_tx_timeout
35964098 * @dev : Pointer to net device structure
4099
+ * @txqueue: the index of the hanging transmit queue
35974100 * Description: this function is called when a packet transmission fails to
35984101 * complete within a reasonable time. The driver will mark the error in the
35994102 * netdev structure and arrange for the device to be reset to a sane state
36004103 * in order to transmit a new packet.
36014104 */
3602
-static void stmmac_tx_timeout(struct net_device *dev)
4105
+static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
36034106 {
36044107 struct stmmac_priv *priv = netdev_priv(dev);
36054108
....@@ -3696,6 +4199,8 @@
36964199 netdev_features_t features)
36974200 {
36984201 struct stmmac_priv *priv = netdev_priv(netdev);
4202
+ bool sph_en;
4203
+ u32 chan;
36994204
37004205 /* Keep the COE Type in case of csum is supporting */
37014206 if (features & NETIF_F_RXCSUM)
....@@ -3706,6 +4211,10 @@
37064211 * fixed in case of issue.
37074212 */
37084213 stmmac_rx_ipc(priv, priv->hw);
4214
+
4215
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph;
4216
+ for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
4217
+ stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
37094218
37104219 return 0;
37114220 }
....@@ -3799,6 +4308,7 @@
37994308 */
38004309 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
38014310 {
4311
+ struct stmmac_priv *priv = netdev_priv (dev);
38024312 int ret = -EOPNOTSUPP;
38034313
38044314 if (!netif_running(dev))
....@@ -3808,18 +4318,14 @@
38084318 case SIOCGMIIPHY:
38094319 case SIOCGMIIREG:
38104320 case SIOCSMIIREG:
3811
- if (!dev->phydev)
3812
- return -EINVAL;
3813
- ret = phy_mii_ioctl(dev->phydev, rq, cmd);
4321
+ ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
38144322 break;
3815
-#ifdef CONFIG_STMMAC_PTP
38164323 case SIOCSHWTSTAMP:
38174324 ret = stmmac_hwtstamp_set(dev, rq);
38184325 break;
38194326 case SIOCGHWTSTAMP:
38204327 ret = stmmac_hwtstamp_get(dev, rq);
38214328 break;
3822
-#endif
38234329 default:
38244330 break;
38254331 }
....@@ -3833,12 +4339,17 @@
38334339 struct stmmac_priv *priv = cb_priv;
38344340 int ret = -EOPNOTSUPP;
38354341
4342
+ if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
4343
+ return ret;
4344
+
38364345 stmmac_disable_all_queues(priv);
38374346
38384347 switch (type) {
38394348 case TC_SETUP_CLSU32:
3840
- if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3841
- ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
4349
+ ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
4350
+ break;
4351
+ case TC_SETUP_CLSFLOWER:
4352
+ ret = stmmac_tc_setup_cls(priv, priv, type_data);
38424353 break;
38434354 default:
38444355 break;
....@@ -3848,23 +4359,7 @@
38484359 return ret;
38494360 }
38504361
3851
-static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3852
- struct tc_block_offload *f)
3853
-{
3854
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3855
- return -EOPNOTSUPP;
3856
-
3857
- switch (f->command) {
3858
- case TC_BLOCK_BIND:
3859
- return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3860
- priv, priv, f->extack);
3861
- case TC_BLOCK_UNBIND:
3862
- tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3863
- return 0;
3864
- default:
3865
- return -EOPNOTSUPP;
3866
- }
3867
-}
4362
+static LIST_HEAD(stmmac_block_cb_list);
38684363
38694364 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
38704365 void *type_data)
....@@ -3873,17 +4368,23 @@
38734368
38744369 switch (type) {
38754370 case TC_SETUP_BLOCK:
3876
- return stmmac_setup_tc_block(priv, type_data);
4371
+ return flow_block_cb_setup_simple(type_data,
4372
+ &stmmac_block_cb_list,
4373
+ stmmac_setup_tc_block_cb,
4374
+ priv, priv, true);
38774375 case TC_SETUP_QDISC_CBS:
38784376 return stmmac_tc_setup_cbs(priv, priv, type_data);
4377
+ case TC_SETUP_QDISC_TAPRIO:
4378
+ return stmmac_tc_setup_taprio(priv, priv, type_data);
4379
+ case TC_SETUP_QDISC_ETF:
4380
+ return stmmac_tc_setup_etf(priv, priv, type_data);
38794381 default:
38804382 return -EOPNOTSUPP;
38814383 }
38824384 }
38834385
38844386 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
3885
- struct net_device *sb_dev,
3886
- select_queue_fallback_t fallback)
4387
+ struct net_device *sb_dev)
38874388 {
38884389 int gso = skb_shinfo(skb)->gso_type;
38894390
....@@ -3897,7 +4398,7 @@
38974398 return 0;
38984399 }
38994400
3900
- return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
4401
+ return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
39014402 }
39024403
39034404 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
....@@ -3905,11 +4406,20 @@
39054406 struct stmmac_priv *priv = netdev_priv(ndev);
39064407 int ret = 0;
39074408
4409
+ ret = pm_runtime_get_sync(priv->device);
4410
+ if (ret < 0) {
4411
+ pm_runtime_put_noidle(priv->device);
4412
+ return ret;
4413
+ }
4414
+
39084415 ret = eth_mac_addr(ndev, addr);
39094416 if (ret)
3910
- return ret;
4417
+ goto set_mac_error;
39114418
39124419 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
4420
+
4421
+set_mac_error:
4422
+ pm_runtime_put(priv->device);
39134423
39144424 return ret;
39154425 }
....@@ -3918,24 +4428,27 @@
39184428 static struct dentry *stmmac_fs_dir;
39194429
39204430 static void sysfs_display_ring(void *head, int size, int extend_desc,
3921
- struct seq_file *seq)
4431
+ struct seq_file *seq, dma_addr_t dma_phy_addr)
39224432 {
39234433 int i;
39244434 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
39254435 struct dma_desc *p = (struct dma_desc *)head;
4436
+ dma_addr_t dma_addr;
39264437
39274438 for (i = 0; i < size; i++) {
39284439 if (extend_desc) {
3929
- seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3930
- i, (unsigned int)virt_to_phys(ep),
4440
+ dma_addr = dma_phy_addr + i * sizeof(*ep);
4441
+ seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
4442
+ i, &dma_addr,
39314443 le32_to_cpu(ep->basic.des0),
39324444 le32_to_cpu(ep->basic.des1),
39334445 le32_to_cpu(ep->basic.des2),
39344446 le32_to_cpu(ep->basic.des3));
39354447 ep++;
39364448 } else {
3937
- seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3938
- i, (unsigned int)virt_to_phys(p),
4449
+ dma_addr = dma_phy_addr + i * sizeof(*p);
4450
+ seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
4451
+ i, &dma_addr,
39394452 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
39404453 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
39414454 p++;
....@@ -3944,7 +4457,7 @@
39444457 }
39454458 }
39464459
3947
-static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
4460
+static int stmmac_rings_status_show(struct seq_file *seq, void *v)
39484461 {
39494462 struct net_device *dev = seq->private;
39504463 struct stmmac_priv *priv = netdev_priv(dev);
....@@ -3963,11 +4476,11 @@
39634476 if (priv->extend_desc) {
39644477 seq_printf(seq, "Extended descriptor ring:\n");
39654478 sysfs_display_ring((void *)rx_q->dma_erx,
3966
- DMA_RX_SIZE, 1, seq);
4479
+ priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
39674480 } else {
39684481 seq_printf(seq, "Descriptor ring:\n");
39694482 sysfs_display_ring((void *)rx_q->dma_rx,
3970
- DMA_RX_SIZE, 0, seq);
4483
+ priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
39714484 }
39724485 }
39734486
....@@ -3979,33 +4492,19 @@
39794492 if (priv->extend_desc) {
39804493 seq_printf(seq, "Extended descriptor ring:\n");
39814494 sysfs_display_ring((void *)tx_q->dma_etx,
3982
- DMA_TX_SIZE, 1, seq);
3983
- } else {
4495
+ priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
4496
+ } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
39844497 seq_printf(seq, "Descriptor ring:\n");
39854498 sysfs_display_ring((void *)tx_q->dma_tx,
3986
- DMA_TX_SIZE, 0, seq);
4499
+ priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
39874500 }
39884501 }
39894502
39904503 return 0;
39914504 }
4505
+DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
39924506
3993
-static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3994
-{
3995
- return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3996
-}
3997
-
3998
-/* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3999
-
4000
-static const struct file_operations stmmac_rings_status_fops = {
4001
- .owner = THIS_MODULE,
4002
- .open = stmmac_sysfs_ring_open,
4003
- .read = seq_read,
4004
- .llseek = seq_lseek,
4005
- .release = single_release,
4006
-};
4007
-
4008
-static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
4507
+static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
40094508 {
40104509 struct net_device *dev = seq->private;
40114510 struct stmmac_priv *priv = netdev_priv(dev);
....@@ -4063,64 +4562,94 @@
40634562 priv->dma_cap.number_rx_channel);
40644563 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
40654564 priv->dma_cap.number_tx_channel);
4565
+ seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
4566
+ priv->dma_cap.number_rx_queues);
4567
+ seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
4568
+ priv->dma_cap.number_tx_queues);
40664569 seq_printf(seq, "\tEnhanced descriptors: %s\n",
40674570 (priv->dma_cap.enh_desc) ? "Y" : "N");
4068
-
4571
+ seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
4572
+ seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
4573
+ seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
4574
+ seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
4575
+ seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
4576
+ priv->dma_cap.pps_out_num);
4577
+ seq_printf(seq, "\tSafety Features: %s\n",
4578
+ priv->dma_cap.asp ? "Y" : "N");
4579
+ seq_printf(seq, "\tFlexible RX Parser: %s\n",
4580
+ priv->dma_cap.frpsel ? "Y" : "N");
4581
+ seq_printf(seq, "\tEnhanced Addressing: %d\n",
4582
+ priv->dma_cap.addr64);
4583
+ seq_printf(seq, "\tReceive Side Scaling: %s\n",
4584
+ priv->dma_cap.rssen ? "Y" : "N");
4585
+ seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
4586
+ priv->dma_cap.vlhash ? "Y" : "N");
4587
+ seq_printf(seq, "\tSplit Header: %s\n",
4588
+ priv->dma_cap.sphen ? "Y" : "N");
4589
+ seq_printf(seq, "\tVLAN TX Insertion: %s\n",
4590
+ priv->dma_cap.vlins ? "Y" : "N");
4591
+ seq_printf(seq, "\tDouble VLAN: %s\n",
4592
+ priv->dma_cap.dvlan ? "Y" : "N");
4593
+ seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
4594
+ priv->dma_cap.l3l4fnum);
4595
+ seq_printf(seq, "\tARP Offloading: %s\n",
4596
+ priv->dma_cap.arpoffsel ? "Y" : "N");
4597
+ seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
4598
+ priv->dma_cap.estsel ? "Y" : "N");
4599
+ seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
4600
+ priv->dma_cap.fpesel ? "Y" : "N");
4601
+ seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
4602
+ priv->dma_cap.tbssel ? "Y" : "N");
40694603 return 0;
40704604 }
4605
+DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
40714606
4072
-static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
4607
+/* Use network device events to rename debugfs file entries.
4608
+ */
4609
+static int stmmac_device_event(struct notifier_block *unused,
4610
+ unsigned long event, void *ptr)
40734611 {
4074
- return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
4612
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4613
+ struct stmmac_priv *priv = netdev_priv(dev);
4614
+
4615
+ if (dev->netdev_ops != &stmmac_netdev_ops)
4616
+ goto done;
4617
+
4618
+ switch (event) {
4619
+ case NETDEV_CHANGENAME:
4620
+ if (priv->dbgfs_dir)
4621
+ priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
4622
+ priv->dbgfs_dir,
4623
+ stmmac_fs_dir,
4624
+ dev->name);
4625
+ break;
4626
+ }
4627
+done:
4628
+ return NOTIFY_DONE;
40754629 }
40764630
4077
-static const struct file_operations stmmac_dma_cap_fops = {
4078
- .owner = THIS_MODULE,
4079
- .open = stmmac_sysfs_dma_cap_open,
4080
- .read = seq_read,
4081
- .llseek = seq_lseek,
4082
- .release = single_release,
4631
+static struct notifier_block stmmac_notifier = {
4632
+ .notifier_call = stmmac_device_event,
40834633 };
40844634
4085
-static int stmmac_init_fs(struct net_device *dev)
4635
+static void stmmac_init_fs(struct net_device *dev)
40864636 {
40874637 struct stmmac_priv *priv = netdev_priv(dev);
4638
+
4639
+ rtnl_lock();
40884640
40894641 /* Create per netdev entries */
40904642 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
40914643
4092
- if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
4093
- netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
4094
-
4095
- return -ENOMEM;
4096
- }
4097
-
40984644 /* Entry to report DMA RX/TX rings */
4099
- priv->dbgfs_rings_status =
4100
- debugfs_create_file("descriptors_status", 0444,
4101
- priv->dbgfs_dir, dev,
4102
- &stmmac_rings_status_fops);
4103
-
4104
- if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
4105
- netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4106
- debugfs_remove_recursive(priv->dbgfs_dir);
4107
-
4108
- return -ENOMEM;
4109
- }
4645
+ debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
4646
+ &stmmac_rings_status_fops);
41104647
41114648 /* Entry to report the DMA HW features */
4112
- priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4113
- priv->dbgfs_dir,
4114
- dev, &stmmac_dma_cap_fops);
4649
+ debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
4650
+ &stmmac_dma_cap_fops);
41154651
4116
- if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4117
- netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4118
- debugfs_remove_recursive(priv->dbgfs_dir);
4119
-
4120
- return -ENOMEM;
4121
- }
4122
-
4123
- return 0;
4652
+ rtnl_unlock();
41244653 }
41254654
41264655 static void stmmac_exit_fs(struct net_device *dev)
....@@ -4130,6 +4659,111 @@
41304659 debugfs_remove_recursive(priv->dbgfs_dir);
41314660 }
41324661 #endif /* CONFIG_DEBUG_FS */
4662
+
4663
+static u32 stmmac_vid_crc32_le(__le16 vid_le)
4664
+{
4665
+ unsigned char *data = (unsigned char *)&vid_le;
4666
+ unsigned char data_byte = 0;
4667
+ u32 crc = ~0x0;
4668
+ u32 temp = 0;
4669
+ int i, bits;
4670
+
4671
+ bits = get_bitmask_order(VLAN_VID_MASK);
4672
+ for (i = 0; i < bits; i++) {
4673
+ if ((i % 8) == 0)
4674
+ data_byte = data[i / 8];
4675
+
4676
+ temp = ((crc & 1) ^ data_byte) & 1;
4677
+ crc >>= 1;
4678
+ data_byte >>= 1;
4679
+
4680
+ if (temp)
4681
+ crc ^= 0xedb88320;
4682
+ }
4683
+
4684
+ return crc;
4685
+}
4686
+
4687
+static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
4688
+{
4689
+ u32 crc, hash = 0;
4690
+ __le16 pmatch = 0;
4691
+ int count = 0;
4692
+ u16 vid = 0;
4693
+
4694
+ for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
4695
+ __le16 vid_le = cpu_to_le16(vid);
4696
+ crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
4697
+ hash |= (1 << crc);
4698
+ count++;
4699
+ }
4700
+
4701
+ if (!priv->dma_cap.vlhash) {
4702
+ if (count > 2) /* VID = 0 always passes filter */
4703
+ return -EOPNOTSUPP;
4704
+
4705
+ pmatch = cpu_to_le16(vid);
4706
+ hash = 0;
4707
+ }
4708
+
4709
+ return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
4710
+}
4711
+
4712
+static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
4713
+{
4714
+ struct stmmac_priv *priv = netdev_priv(ndev);
4715
+ bool is_double = false;
4716
+ int ret;
4717
+
4718
+ if (be16_to_cpu(proto) == ETH_P_8021AD)
4719
+ is_double = true;
4720
+
4721
+ set_bit(vid, priv->active_vlans);
4722
+ ret = stmmac_vlan_update(priv, is_double);
4723
+ if (ret) {
4724
+ clear_bit(vid, priv->active_vlans);
4725
+ return ret;
4726
+ }
4727
+
4728
+ if (priv->hw->num_vlan) {
4729
+ ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4730
+ if (ret)
4731
+ return ret;
4732
+ }
4733
+
4734
+ return 0;
4735
+}
4736
+
4737
+static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
4738
+{
4739
+ struct stmmac_priv *priv = netdev_priv(ndev);
4740
+ bool is_double = false;
4741
+ int ret;
4742
+
4743
+ ret = pm_runtime_get_sync(priv->device);
4744
+ if (ret < 0) {
4745
+ pm_runtime_put_noidle(priv->device);
4746
+ return ret;
4747
+ }
4748
+
4749
+ if (be16_to_cpu(proto) == ETH_P_8021AD)
4750
+ is_double = true;
4751
+
4752
+ clear_bit(vid, priv->active_vlans);
4753
+
4754
+ if (priv->hw->num_vlan) {
4755
+ ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4756
+ if (ret)
4757
+ goto del_vlan_error;
4758
+ }
4759
+
4760
+ ret = stmmac_vlan_update(priv, is_double);
4761
+
4762
+del_vlan_error:
4763
+ pm_runtime_put(priv->device);
4764
+
4765
+ return ret;
4766
+}
41334767
41344768 static const struct net_device_ops stmmac_netdev_ops = {
41354769 .ndo_open = stmmac_open,
....@@ -4147,6 +4781,8 @@
41474781 .ndo_poll_controller = stmmac_poll_controller,
41484782 #endif
41494783 .ndo_set_mac_address = stmmac_set_mac_address,
4784
+ .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
4785
+ .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
41504786 };
41514787
41524788 static void stmmac_reset_subtask(struct stmmac_priv *priv)
....@@ -4165,7 +4801,7 @@
41654801
41664802 set_bit(STMMAC_DOWN, &priv->state);
41674803 dev_close(priv->dev);
4168
- dev_open(priv->dev);
4804
+ dev_open(priv->dev, NULL);
41694805 clear_bit(STMMAC_DOWN, &priv->state);
41704806 clear_bit(STMMAC_RESETING, &priv->state);
41714807 rtnl_unlock();
....@@ -4215,6 +4851,12 @@
42154851 priv->plat->enh_desc = priv->dma_cap.enh_desc;
42164852 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
42174853 priv->hw->pmt = priv->plat->pmt;
4854
+ if (priv->dma_cap.hash_tb_sz) {
4855
+ priv->hw->multicast_filter_bins =
4856
+ (BIT(priv->dma_cap.hash_tb_sz) << 5);
4857
+ priv->hw->mcast_bits_log2 =
4858
+ ilog2(priv->hw->multicast_filter_bins);
4859
+ }
42184860
42194861 /* TXCOE doesn't work in thresh DMA mode */
42204862 if (priv->plat->force_thresh_dma_mode)
....@@ -4251,6 +4893,9 @@
42514893 if (priv->dma_cap.tsoen)
42524894 dev_info(priv->device, "TSO supported\n");
42534895
4896
+ priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
4897
+ priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
4898
+
42544899 /* Run HW quirks, if any */
42554900 if (priv->hwif_quirks) {
42564901 ret = priv->hwif_quirks(priv);
....@@ -4273,6 +4918,92 @@
42734918 return 0;
42744919 }
42754920
4921
+static void stmmac_napi_add(struct net_device *dev)
4922
+{
4923
+ struct stmmac_priv *priv = netdev_priv(dev);
4924
+ u32 queue, maxq;
4925
+
4926
+ maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4927
+
4928
+ for (queue = 0; queue < maxq; queue++) {
4929
+ struct stmmac_channel *ch = &priv->channel[queue];
4930
+ int rx_budget = ((priv->plat->dma_rx_size < NAPI_POLL_WEIGHT) &&
4931
+ (priv->plat->dma_rx_size > 0)) ?
4932
+ priv->plat->dma_rx_size : NAPI_POLL_WEIGHT;
4933
+ int tx_budget = ((priv->plat->dma_tx_size < NAPI_POLL_WEIGHT) &&
4934
+ (priv->plat->dma_tx_size > 0)) ?
4935
+ priv->plat->dma_tx_size : NAPI_POLL_WEIGHT;
4936
+
4937
+ ch->priv_data = priv;
4938
+ ch->index = queue;
4939
+ spin_lock_init(&ch->lock);
4940
+
4941
+ if (queue < priv->plat->rx_queues_to_use) {
4942
+ netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
4943
+ rx_budget);
4944
+ }
4945
+ if (queue < priv->plat->tx_queues_to_use) {
4946
+ netif_tx_napi_add(dev, &ch->tx_napi,
4947
+ stmmac_napi_poll_tx, tx_budget);
4948
+ }
4949
+ }
4950
+}
4951
+
4952
+static void stmmac_napi_del(struct net_device *dev)
4953
+{
4954
+ struct stmmac_priv *priv = netdev_priv(dev);
4955
+ u32 queue, maxq;
4956
+
4957
+ maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4958
+
4959
+ for (queue = 0; queue < maxq; queue++) {
4960
+ struct stmmac_channel *ch = &priv->channel[queue];
4961
+
4962
+ if (queue < priv->plat->rx_queues_to_use)
4963
+ netif_napi_del(&ch->rx_napi);
4964
+ if (queue < priv->plat->tx_queues_to_use)
4965
+ netif_napi_del(&ch->tx_napi);
4966
+ }
4967
+}
4968
+
4969
+int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
4970
+{
4971
+ struct stmmac_priv *priv = netdev_priv(dev);
4972
+ int ret = 0;
4973
+
4974
+ if (netif_running(dev))
4975
+ stmmac_release(dev);
4976
+
4977
+ stmmac_napi_del(dev);
4978
+
4979
+ priv->plat->rx_queues_to_use = rx_cnt;
4980
+ priv->plat->tx_queues_to_use = tx_cnt;
4981
+
4982
+ stmmac_napi_add(dev);
4983
+
4984
+ if (netif_running(dev))
4985
+ ret = stmmac_open(dev);
4986
+
4987
+ return ret;
4988
+}
4989
+
4990
+int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
4991
+{
4992
+ struct stmmac_priv *priv = netdev_priv(dev);
4993
+ int ret = 0;
4994
+
4995
+ if (netif_running(dev))
4996
+ stmmac_release(dev);
4997
+
4998
+ priv->dma_rx_size = rx_size;
4999
+ priv->dma_tx_size = tx_size;
5000
+
5001
+ if (netif_running(dev))
5002
+ ret = stmmac_open(dev);
5003
+
5004
+ return ret;
5005
+}
5006
+
42765007 /**
42775008 * stmmac_dvr_probe
42785009 * @device: device pointer
....@@ -4289,12 +5020,11 @@
42895020 {
42905021 struct net_device *ndev = NULL;
42915022 struct stmmac_priv *priv;
4292
- u32 queue, maxq;
4293
- int ret = 0;
5023
+ u32 rxq;
5024
+ int i, ret = 0;
42945025
4295
- ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4296
- MTL_MAX_TX_QUEUES,
4297
- MTL_MAX_RX_QUEUES);
5026
+ ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
5027
+ MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
42985028 if (!ndev)
42995029 return -ENOMEM;
43005030
....@@ -4314,7 +5044,7 @@
43145044 priv->wol_irq = res->wol_irq;
43155045 priv->lpi_irq = res->lpi_irq;
43165046
4317
- if (res->mac)
5047
+ if (!IS_ERR_OR_NULL(res->mac))
43185048 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
43195049
43205050 dev_set_drvdata(device, priv->dev);
....@@ -4326,8 +5056,7 @@
43265056 priv->wq = create_singlethread_workqueue("stmmac_wq");
43275057 if (!priv->wq) {
43285058 dev_err(priv->device, "failed to create workqueue\n");
4329
- ret = -ENOMEM;
4330
- goto error_wq;
5059
+ return -ENOMEM;
43315060 }
43325061
43335062 INIT_WORK(&priv->service_task, stmmac_service_task);
....@@ -4355,10 +5084,6 @@
43555084
43565085 stmmac_check_ether_addr(priv);
43575086
4358
- /* Configure real RX and TX queues */
4359
- netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4360
- netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4361
-
43625087 ndev->netdev_ops = &stmmac_netdev_ops;
43635088
43645089 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
....@@ -4376,20 +5101,79 @@
43765101 priv->tso = true;
43775102 dev_info(priv->device, "TSO feature enabled\n");
43785103 }
5104
+
5105
+ if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
5106
+ ndev->hw_features |= NETIF_F_GRO;
5107
+ if (!priv->plat->sph_disable) {
5108
+ priv->sph = true;
5109
+ dev_info(priv->device, "SPH feature enabled\n");
5110
+ }
5111
+ }
5112
+
5113
+ /* The current IP register MAC_HW_Feature1[ADDR64] only define
5114
+ * 32/40/64 bit width, but some SOC support others like i.MX8MP
5115
+ * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
5116
+ * So overwrite dma_cap.addr64 according to HW real design.
5117
+ */
5118
+ if (priv->plat->addr64)
5119
+ priv->dma_cap.addr64 = priv->plat->addr64;
5120
+
5121
+ if (priv->dma_cap.addr64) {
5122
+ ret = dma_set_mask_and_coherent(device,
5123
+ DMA_BIT_MASK(priv->dma_cap.addr64));
5124
+ if (!ret) {
5125
+ dev_info(priv->device, "Using %d bits DMA width\n",
5126
+ priv->dma_cap.addr64);
5127
+
5128
+ /*
5129
+ * If more than 32 bits can be addressed, make sure to
5130
+ * enable enhanced addressing mode.
5131
+ */
5132
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
5133
+ priv->plat->dma_cfg->eame = true;
5134
+ } else {
5135
+ ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
5136
+ if (ret) {
5137
+ dev_err(priv->device, "Failed to set DMA Mask\n");
5138
+ goto error_hw_init;
5139
+ }
5140
+
5141
+ priv->dma_cap.addr64 = 32;
5142
+ }
5143
+ }
5144
+
43795145 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
43805146 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
43815147 #ifdef STMMAC_VLAN_TAG_USED
43825148 /* Both mac100 and gmac support receive VLAN tag detection */
43835149 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
5150
+ if (priv->dma_cap.vlhash) {
5151
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
5152
+ ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
5153
+ }
5154
+ if (priv->dma_cap.vlins) {
5155
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
5156
+ if (priv->dma_cap.dvlan)
5157
+ ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
5158
+ }
43845159 #endif
43855160 priv->msg_enable = netif_msg_init(debug, default_msg_level);
43865161
5162
+ /* Initialize RSS */
5163
+ rxq = priv->plat->rx_queues_to_use;
5164
+ netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
5165
+ for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
5166
+ priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
5167
+
5168
+ if (priv->dma_cap.rssen && priv->plat->rss_en)
5169
+ ndev->features |= NETIF_F_RXHASH;
5170
+
43875171 /* MTU range: 46 - hw-specific max */
43885172 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4389
- if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4390
- ndev->max_mtu = JUMBO_LEN;
4391
- else if (priv->plat->has_xgmac)
5173
+ if (priv->plat->has_xgmac)
43925174 ndev->max_mtu = XGMAC_JUMBO_LEN;
5175
+ else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
5176
+ ndev->max_mtu = JUMBO_LEN;
43935177 else
43945178 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
43955179 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
....@@ -4407,22 +5191,7 @@
44075191 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
44085192
44095193 /* Setup channels NAPI */
4410
- maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4411
-
4412
- for (queue = 0; queue < maxq; queue++) {
4413
- struct stmmac_channel *ch = &priv->channel[queue];
4414
-
4415
- ch->priv_data = priv;
4416
- ch->index = queue;
4417
-
4418
- if (queue < priv->plat->rx_queues_to_use)
4419
- ch->has_rx = true;
4420
- if (queue < priv->plat->tx_queues_to_use)
4421
- ch->has_tx = true;
4422
-
4423
- netif_napi_add(ndev, &ch->napi, stmmac_napi_poll,
4424
- NAPI_POLL_WEIGHT);
4425
- }
5194
+ stmmac_napi_add(ndev);
44265195
44275196 mutex_init(&priv->lock);
44285197
....@@ -4432,15 +5201,18 @@
44325201 * set the MDC clock dynamically according to the csr actual
44335202 * clock input.
44345203 */
4435
- if (!priv->plat->clk_csr)
4436
- stmmac_clk_csr_set(priv);
4437
- else
5204
+ if (priv->plat->clk_csr >= 0)
44385205 priv->clk_csr = priv->plat->clk_csr;
5206
+ else
5207
+ stmmac_clk_csr_set(priv);
44395208
44405209 stmmac_check_pcs_mode(priv);
44415210
4442
- if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4443
- priv->hw->pcs != STMMAC_PCS_TBI &&
5211
+ pm_runtime_get_noresume(device);
5212
+ pm_runtime_set_active(device);
5213
+ pm_runtime_enable(device);
5214
+
5215
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
44445216 priv->hw->pcs != STMMAC_PCS_RTBI) {
44455217 /* MDIO bus Registration */
44465218 ret = stmmac_mdio_register(ndev);
....@@ -4452,6 +5224,12 @@
44525224 }
44535225 }
44545226
5227
+ ret = stmmac_phy_setup(priv);
5228
+ if (ret) {
5229
+ netdev_err(ndev, "failed to setup phy (%d)\n", ret);
5230
+ goto error_phy_setup;
5231
+ }
5232
+
44555233 ret = register_netdev(ndev);
44565234 if (ret) {
44575235 dev_err(priv->device, "%s: ERROR %i registering the device\n",
....@@ -4460,29 +5238,26 @@
44605238 }
44615239
44625240 #ifdef CONFIG_DEBUG_FS
4463
- ret = stmmac_init_fs(ndev);
4464
- if (ret < 0)
4465
- netdev_warn(priv->dev, "%s: failed debugFS registration\n",
4466
- __func__);
5241
+ stmmac_init_fs(ndev);
44675242 #endif
5243
+
5244
+ /* Let pm_runtime_put() disable the clocks.
5245
+ * If CONFIG_PM is not enabled, the clocks will stay powered.
5246
+ */
5247
+ pm_runtime_put(device);
44685248
44695249 return ret;
44705250
44715251 error_netdev_register:
4472
- if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4473
- priv->hw->pcs != STMMAC_PCS_TBI &&
5252
+ phylink_destroy(priv->phylink);
5253
+error_phy_setup:
5254
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
44745255 priv->hw->pcs != STMMAC_PCS_RTBI)
44755256 stmmac_mdio_unregister(ndev);
44765257 error_mdio_register:
4477
- for (queue = 0; queue < maxq; queue++) {
4478
- struct stmmac_channel *ch = &priv->channel[queue];
4479
-
4480
- netif_napi_del(&ch->napi);
4481
- }
5258
+ stmmac_napi_del(ndev);
44825259 error_hw_init:
44835260 destroy_workqueue(priv->wq);
4484
-error_wq:
4485
- free_netdev(ndev);
44865261
44875262 return ret;
44885263 }
....@@ -4501,25 +5276,30 @@
45015276
45025277 netdev_info(priv->dev, "%s: removing driver", __func__);
45035278
4504
-#ifdef CONFIG_DEBUG_FS
4505
- stmmac_exit_fs(ndev);
4506
-#endif
45075279 stmmac_stop_all_dma(priv);
4508
-
45095280 stmmac_mac_set(priv, priv->ioaddr, false);
45105281 netif_carrier_off(ndev);
45115282 unregister_netdev(ndev);
5283
+
5284
+ /* Serdes power down needs to happen after VLAN filter
5285
+ * is deleted that is triggered by unregister_netdev().
5286
+ */
5287
+ if (priv->plat->serdes_powerdown)
5288
+ priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5289
+
5290
+#ifdef CONFIG_DEBUG_FS
5291
+ stmmac_exit_fs(ndev);
5292
+#endif
5293
+ phylink_destroy(priv->phylink);
45125294 if (priv->plat->stmmac_rst)
45135295 reset_control_assert(priv->plat->stmmac_rst);
4514
- clk_disable_unprepare(priv->plat->pclk);
4515
- clk_disable_unprepare(priv->plat->stmmac_clk);
4516
- if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4517
- priv->hw->pcs != STMMAC_PCS_TBI &&
5296
+ pm_runtime_put(dev);
5297
+ pm_runtime_disable(dev);
5298
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
45185299 priv->hw->pcs != STMMAC_PCS_RTBI)
45195300 stmmac_mdio_unregister(ndev);
45205301 destroy_workqueue(priv->wq);
45215302 mutex_destroy(&priv->lock);
4522
- free_netdev(ndev);
45235303
45245304 return 0;
45255305 }
....@@ -4541,8 +5321,7 @@
45415321 if (!ndev || !netif_running(ndev))
45425322 return 0;
45435323
4544
- if (ndev->phydev)
4545
- phy_stop(ndev->phydev);
5324
+ phylink_mac_change(priv->phylink, false);
45465325
45475326 mutex_lock(&priv->lock);
45485327
....@@ -4561,34 +5340,38 @@
45615340 /* Stop TX/RX DMA */
45625341 stmmac_stop_all_dma(priv);
45635342
5343
+ if (priv->plat->serdes_powerdown)
5344
+ priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5345
+
45645346 /* Enable Power down mode by programming the PMT regs */
4565
- if (device_may_wakeup(priv->device)) {
5347
+ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
45665348 stmmac_pmt(priv, priv->hw, priv->wolopts);
45675349 priv->irq_wake = 1;
45685350 } else {
5351
+ mutex_unlock(&priv->lock);
5352
+ rtnl_lock();
5353
+ if (device_may_wakeup(priv->device))
5354
+ phylink_speed_down(priv->phylink, false);
45695355 if (priv->plat->integrated_phy_power)
45705356 priv->plat->integrated_phy_power(priv->plat->bsp_priv,
45715357 false);
5358
+ phylink_stop(priv->phylink);
5359
+ rtnl_unlock();
5360
+ mutex_lock(&priv->lock);
5361
+
45725362 stmmac_mac_set(priv, priv->ioaddr, false);
45735363 pinctrl_pm_select_sleep_state(priv->device);
4574
- /* Disable clock in case of PWM is off */
4575
- if (priv->plat->clk_ptp_ref && IS_ENABLED(CONFIG_STMMAC_PTP))
4576
- clk_disable_unprepare(priv->plat->clk_ptp_ref);
4577
- clk_disable_unprepare(priv->plat->pclk);
4578
- clk_disable_unprepare(priv->plat->stmmac_clk);
45795364 }
45805365 mutex_unlock(&priv->lock);
45815366
4582
- priv->oldlink = false;
45835367 priv->speed = SPEED_UNKNOWN;
4584
- priv->oldduplex = DUPLEX_UNKNOWN;
45855368 return 0;
45865369 }
45875370 EXPORT_SYMBOL_GPL(stmmac_suspend);
45885371
45895372 /**
45905373 * stmmac_reset_queues_param - reset queue parameters
4591
- * @dev: device pointer
5374
+ * @priv: device pointer
45925375 */
45935376 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
45945377 {
....@@ -4624,6 +5407,7 @@
46245407 {
46255408 struct net_device *ndev = dev_get_drvdata(dev);
46265409 struct stmmac_priv *priv = netdev_priv(ndev);
5410
+ int ret;
46275411
46285412 if (!netif_running(ndev))
46295413 return 0;
....@@ -4634,18 +5418,13 @@
46345418 * this bit because it can generate problems while resuming
46355419 * from another devices (e.g. serial console).
46365420 */
4637
- if (device_may_wakeup(priv->device)) {
5421
+ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
46385422 mutex_lock(&priv->lock);
46395423 stmmac_pmt(priv, priv->hw, 0);
46405424 mutex_unlock(&priv->lock);
46415425 priv->irq_wake = 0;
46425426 } else {
46435427 pinctrl_pm_select_default_state(priv->device);
4644
- /* enable the clk previously disabled */
4645
- clk_prepare_enable(priv->plat->stmmac_clk);
4646
- clk_prepare_enable(priv->plat->pclk);
4647
- if (priv->plat->clk_ptp_ref && IS_ENABLED(CONFIG_STMMAC_PTP))
4648
- clk_prepare_enable(priv->plat->clk_ptp_ref);
46495428 /* reset the phy so that it's ready */
46505429 if (priv->mii)
46515430 stmmac_mdio_reset(priv->mii);
....@@ -4654,6 +5433,23 @@
46545433 true);
46555434 }
46565435
5436
+ if (priv->plat->serdes_powerup) {
5437
+ ret = priv->plat->serdes_powerup(ndev,
5438
+ priv->plat->bsp_priv);
5439
+
5440
+ if (ret < 0)
5441
+ return ret;
5442
+ }
5443
+
5444
+ if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
5445
+ rtnl_lock();
5446
+ phylink_start(priv->phylink);
5447
+ /* We may have called phylink_speed_down before */
5448
+ phylink_speed_up(priv->phylink);
5449
+ rtnl_unlock();
5450
+ }
5451
+
5452
+ rtnl_lock();
46575453 mutex_lock(&priv->lock);
46585454
46595455 stmmac_reset_queues_param(priv);
....@@ -4662,17 +5458,19 @@
46625458 stmmac_clear_descriptors(priv);
46635459
46645460 stmmac_hw_setup(ndev, false);
4665
- stmmac_init_tx_coalesce(priv);
5461
+ stmmac_init_coalesce(priv);
46665462 stmmac_set_rx_mode(ndev);
5463
+
5464
+ stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
46675465
46685466 stmmac_enable_all_queues(priv);
46695467
4670
- netif_device_attach(ndev);
4671
-
46725468 mutex_unlock(&priv->lock);
5469
+ rtnl_unlock();
46735470
4674
- if (ndev->phydev)
4675
- phy_start(ndev->phydev);
5471
+ phylink_mac_change(priv->phylink, true);
5472
+
5473
+ netif_device_attach(ndev);
46765474
46775475 return 0;
46785476 }
....@@ -4684,7 +5482,7 @@
46845482 char *opt;
46855483
46865484 if (!str || !*str)
4687
- return -EINVAL;
5485
+ return 1;
46885486 while ((opt = strsep(&str, ",")) != NULL) {
46895487 if (!strncmp(opt, "debug:", 6)) {
46905488 if (kstrtoint(opt + 6, 0, &debug))
....@@ -4715,11 +5513,11 @@
47155513 goto err;
47165514 }
47175515 }
4718
- return 0;
5516
+ return 1;
47195517
47205518 err:
47215519 pr_err("%s: ERROR broken module parameter conversion", __func__);
4722
- return -EINVAL;
5520
+ return 1;
47235521 }
47245522
47255523 __setup("stmmaceth=", stmmac_cmdline_opt);
....@@ -4729,16 +5527,9 @@
47295527 {
47305528 #ifdef CONFIG_DEBUG_FS
47315529 /* Create debugfs main directory if it doesn't exist yet */
4732
- if (!stmmac_fs_dir) {
5530
+ if (!stmmac_fs_dir)
47335531 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4734
-
4735
- if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4736
- pr_err("ERROR %s, debugfs create directory failed\n",
4737
- STMMAC_RESOURCE_NAME);
4738
-
4739
- return -ENOMEM;
4740
- }
4741
- }
5532
+ register_netdevice_notifier(&stmmac_notifier);
47425533 #endif
47435534
47445535 return 0;
....@@ -4747,6 +5538,7 @@
47475538 static void __exit stmmac_exit(void)
47485539 {
47495540 #ifdef CONFIG_DEBUG_FS
5541
+ unregister_netdevice_notifier(&stmmac_notifier);
47505542 debugfs_remove_recursive(stmmac_fs_dir);
47515543 #endif
47525544 }