.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /******************************************************************************* |
---|
2 | 3 | This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. |
---|
3 | 4 | ST Ethernet IPs are built around a Synopsys IP Core. |
---|
4 | 5 | |
---|
5 | 6 | Copyright(C) 2007-2011 STMicroelectronics Ltd |
---|
6 | 7 | |
---|
7 | | - This program is free software; you can redistribute it and/or modify it |
---|
8 | | - under the terms and conditions of the GNU General Public License, |
---|
9 | | - version 2, as published by the Free Software Foundation. |
---|
10 | | - |
---|
11 | | - This program is distributed in the hope it will be useful, but WITHOUT |
---|
12 | | - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
---|
13 | | - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
---|
14 | | - more details. |
---|
15 | | - |
---|
16 | | - The full GNU General Public License is included in this distribution in |
---|
17 | | - the file called "COPYING". |
---|
18 | 8 | |
---|
19 | 9 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> |
---|
20 | 10 | |
---|
.. | .. |
---|
38 | 28 | #include <linux/if_vlan.h> |
---|
39 | 29 | #include <linux/dma-mapping.h> |
---|
40 | 30 | #include <linux/slab.h> |
---|
| 31 | +#include <linux/pm_runtime.h> |
---|
41 | 32 | #include <linux/prefetch.h> |
---|
42 | 33 | #include <linux/pinctrl/consumer.h> |
---|
43 | 34 | #ifdef CONFIG_DEBUG_FS |
---|
.. | .. |
---|
45 | 36 | #include <linux/seq_file.h> |
---|
46 | 37 | #endif /* CONFIG_DEBUG_FS */ |
---|
47 | 38 | #include <linux/net_tstamp.h> |
---|
| 39 | +#include <linux/phylink.h> |
---|
48 | 40 | #include <linux/udp.h> |
---|
49 | 41 | #include <net/pkt_cls.h> |
---|
50 | 42 | #include "stmmac_ptp.h" |
---|
.. | .. |
---|
54 | 46 | #include "dwmac1000.h" |
---|
55 | 47 | #include "dwxgmac2.h" |
---|
56 | 48 | #include "hwif.h" |
---|
| 49 | + |
---|
| 50 | +/* As long as the interface is active, we keep the timestamping counter enabled |
---|
| 51 | + * with fine resolution and binary rollover. This avoid non-monotonic behavior |
---|
| 52 | + * (clock jumps) when changing timestamping settings at runtime. |
---|
| 53 | + */ |
---|
| 54 | +#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \ |
---|
| 55 | + PTP_TCR_TSCTRLSSR) |
---|
57 | 56 | |
---|
58 | 57 | #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) |
---|
59 | 58 | #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) |
---|
.. | .. |
---|
72 | 71 | module_param(phyaddr, int, 0444); |
---|
73 | 72 | MODULE_PARM_DESC(phyaddr, "Physical device address"); |
---|
74 | 73 | |
---|
75 | | -#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4) |
---|
76 | | -#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4) |
---|
| 74 | +#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4) |
---|
| 75 | +#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4) |
---|
77 | 76 | |
---|
78 | | -static int flow_ctrl = FLOW_OFF; |
---|
| 77 | +static int flow_ctrl = FLOW_AUTO; |
---|
79 | 78 | module_param(flow_ctrl, int, 0644); |
---|
80 | 79 | MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); |
---|
81 | 80 | |
---|
.. | .. |
---|
103 | 102 | static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; |
---|
104 | 103 | module_param(eee_timer, int, 0644); |
---|
105 | 104 | MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); |
---|
106 | | -#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x)) |
---|
| 105 | +#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x)) |
---|
107 | 106 | |
---|
108 | 107 | /* By default the driver will use the ring mode to manage tx and rx descriptors, |
---|
109 | 108 | * but allow user to force to use the chain instead of the ring |
---|
.. | .. |
---|
115 | 114 | static irqreturn_t stmmac_interrupt(int irq, void *dev_id); |
---|
116 | 115 | |
---|
117 | 116 | #ifdef CONFIG_DEBUG_FS |
---|
118 | | -static int stmmac_init_fs(struct net_device *dev); |
---|
| 117 | +static const struct net_device_ops stmmac_netdev_ops; |
---|
| 118 | +static void stmmac_init_fs(struct net_device *dev); |
---|
119 | 119 | static void stmmac_exit_fs(struct net_device *dev); |
---|
120 | 120 | #endif |
---|
121 | 121 | |
---|
122 | 122 | #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x)) |
---|
| 123 | + |
---|
| 124 | +int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled) |
---|
| 125 | +{ |
---|
| 126 | + int ret = 0; |
---|
| 127 | + |
---|
| 128 | + if (enabled) { |
---|
| 129 | + ret = clk_prepare_enable(priv->plat->stmmac_clk); |
---|
| 130 | + if (ret) |
---|
| 131 | + return ret; |
---|
| 132 | + ret = clk_prepare_enable(priv->plat->pclk); |
---|
| 133 | + if (ret) { |
---|
| 134 | + clk_disable_unprepare(priv->plat->stmmac_clk); |
---|
| 135 | + return ret; |
---|
| 136 | + } |
---|
| 137 | + } else { |
---|
| 138 | + clk_disable_unprepare(priv->plat->stmmac_clk); |
---|
| 139 | + clk_disable_unprepare(priv->plat->pclk); |
---|
| 140 | + } |
---|
| 141 | + |
---|
| 142 | + return ret; |
---|
| 143 | +} |
---|
| 144 | +EXPORT_SYMBOL_GPL(stmmac_bus_clks_config); |
---|
123 | 145 | |
---|
124 | 146 | /** |
---|
125 | 147 | * stmmac_verify_args - verify the driver parameters. |
---|
.. | .. |
---|
156 | 178 | for (queue = 0; queue < maxq; queue++) { |
---|
157 | 179 | struct stmmac_channel *ch = &priv->channel[queue]; |
---|
158 | 180 | |
---|
159 | | - napi_disable(&ch->napi); |
---|
| 181 | + if (queue < rx_queues_cnt) |
---|
| 182 | + napi_disable(&ch->rx_napi); |
---|
| 183 | + if (queue < tx_queues_cnt) |
---|
| 184 | + napi_disable(&ch->tx_napi); |
---|
160 | 185 | } |
---|
161 | 186 | } |
---|
162 | 187 | |
---|
.. | .. |
---|
174 | 199 | for (queue = 0; queue < maxq; queue++) { |
---|
175 | 200 | struct stmmac_channel *ch = &priv->channel[queue]; |
---|
176 | 201 | |
---|
177 | | - napi_enable(&ch->napi); |
---|
| 202 | + if (queue < rx_queues_cnt) |
---|
| 203 | + napi_enable(&ch->rx_napi); |
---|
| 204 | + if (queue < tx_queues_cnt) |
---|
| 205 | + napi_enable(&ch->tx_napi); |
---|
178 | 206 | } |
---|
179 | 207 | } |
---|
180 | 208 | |
---|
.. | .. |
---|
273 | 301 | if (tx_q->dirty_tx > tx_q->cur_tx) |
---|
274 | 302 | avail = tx_q->dirty_tx - tx_q->cur_tx - 1; |
---|
275 | 303 | else |
---|
276 | | - avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1; |
---|
| 304 | + avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; |
---|
277 | 305 | |
---|
278 | 306 | return avail; |
---|
279 | 307 | } |
---|
.. | .. |
---|
291 | 319 | if (rx_q->dirty_rx <= rx_q->cur_rx) |
---|
292 | 320 | dirty = rx_q->cur_rx - rx_q->dirty_rx; |
---|
293 | 321 | else |
---|
294 | | - dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx; |
---|
| 322 | + dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; |
---|
295 | 323 | |
---|
296 | 324 | return dirty; |
---|
297 | | -} |
---|
298 | | - |
---|
299 | | -/** |
---|
300 | | - * stmmac_hw_fix_mac_speed - callback for speed selection |
---|
301 | | - * @priv: driver private structure |
---|
302 | | - * Description: on some platforms (e.g. ST), some HW system configuration |
---|
303 | | - * registers have to be set according to the link speed negotiated. |
---|
304 | | - */ |
---|
305 | | -static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv) |
---|
306 | | -{ |
---|
307 | | - struct net_device *ndev = priv->dev; |
---|
308 | | - struct phy_device *phydev = ndev->phydev; |
---|
309 | | - |
---|
310 | | - if (likely(priv->plat->fix_mac_speed)) |
---|
311 | | - priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed); |
---|
312 | 325 | } |
---|
313 | 326 | |
---|
314 | 327 | /** |
---|
.. | .. |
---|
351 | 364 | |
---|
352 | 365 | /** |
---|
353 | 366 | * stmmac_eee_ctrl_timer - EEE TX SW timer. |
---|
354 | | - * @arg : data hook |
---|
| 367 | + * @t: timer_list struct containing private info |
---|
355 | 368 | * Description: |
---|
356 | 369 | * if there is no data transfer and if we are not in LPI state, |
---|
357 | 370 | * then MAC Transmitter can be moved to LPI state. |
---|
.. | .. |
---|
361 | 374 | struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); |
---|
362 | 375 | |
---|
363 | 376 | stmmac_enable_eee_mode(priv); |
---|
364 | | - mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); |
---|
| 377 | + mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); |
---|
365 | 378 | } |
---|
366 | 379 | |
---|
367 | 380 | /** |
---|
.. | .. |
---|
374 | 387 | */ |
---|
375 | 388 | bool stmmac_eee_init(struct stmmac_priv *priv) |
---|
376 | 389 | { |
---|
377 | | - struct net_device *ndev = priv->dev; |
---|
378 | | - int interface = priv->plat->interface; |
---|
379 | | - bool ret = false; |
---|
380 | | - |
---|
381 | | - if ((interface != PHY_INTERFACE_MODE_MII) && |
---|
382 | | - (interface != PHY_INTERFACE_MODE_GMII) && |
---|
383 | | - !phy_interface_mode_is_rgmii(interface)) |
---|
384 | | - goto out; |
---|
| 390 | + int eee_tw_timer = priv->eee_tw_timer; |
---|
385 | 391 | |
---|
386 | 392 | /* Using PCS we cannot dial with the phy registers at this stage |
---|
387 | 393 | * so we do not support extra feature like EEE. |
---|
388 | 394 | */ |
---|
389 | | - if ((priv->hw->pcs == STMMAC_PCS_RGMII) || |
---|
390 | | - (priv->hw->pcs == STMMAC_PCS_TBI) || |
---|
391 | | - (priv->hw->pcs == STMMAC_PCS_RTBI)) |
---|
392 | | - goto out; |
---|
| 395 | + if (priv->hw->pcs == STMMAC_PCS_TBI || |
---|
| 396 | + priv->hw->pcs == STMMAC_PCS_RTBI) |
---|
| 397 | + return false; |
---|
393 | 398 | |
---|
394 | | - /* MAC core supports the EEE feature. */ |
---|
395 | | - if (priv->dma_cap.eee) { |
---|
396 | | - int tx_lpi_timer = priv->tx_lpi_timer; |
---|
| 399 | + /* Check if MAC core supports the EEE feature. */ |
---|
| 400 | + if (!priv->dma_cap.eee) |
---|
| 401 | + return false; |
---|
397 | 402 | |
---|
398 | | - /* Check if the PHY supports EEE */ |
---|
399 | | - if (phy_init_eee(ndev->phydev, 1)) { |
---|
400 | | - /* To manage at run-time if the EEE cannot be supported |
---|
401 | | - * anymore (for example because the lp caps have been |
---|
402 | | - * changed). |
---|
403 | | - * In that case the driver disable own timers. |
---|
404 | | - */ |
---|
405 | | - mutex_lock(&priv->lock); |
---|
406 | | - if (priv->eee_active) { |
---|
407 | | - netdev_dbg(priv->dev, "disable EEE\n"); |
---|
408 | | - del_timer_sync(&priv->eee_ctrl_timer); |
---|
409 | | - stmmac_set_eee_timer(priv, priv->hw, 0, |
---|
410 | | - tx_lpi_timer); |
---|
411 | | - } |
---|
412 | | - priv->eee_active = 0; |
---|
413 | | - mutex_unlock(&priv->lock); |
---|
414 | | - goto out; |
---|
| 403 | + mutex_lock(&priv->lock); |
---|
| 404 | + |
---|
| 405 | + /* Check if it needs to be deactivated */ |
---|
| 406 | + if (!priv->eee_active) { |
---|
| 407 | + if (priv->eee_enabled) { |
---|
| 408 | + netdev_dbg(priv->dev, "disable EEE\n"); |
---|
| 409 | + del_timer_sync(&priv->eee_ctrl_timer); |
---|
| 410 | + stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); |
---|
415 | 411 | } |
---|
416 | | - /* Activate the EEE and start timers */ |
---|
417 | | - mutex_lock(&priv->lock); |
---|
418 | | - if (!priv->eee_active) { |
---|
419 | | - priv->eee_active = 1; |
---|
420 | | - timer_setup(&priv->eee_ctrl_timer, |
---|
421 | | - stmmac_eee_ctrl_timer, 0); |
---|
422 | | - mod_timer(&priv->eee_ctrl_timer, |
---|
423 | | - STMMAC_LPI_T(eee_timer)); |
---|
424 | | - |
---|
425 | | - stmmac_set_eee_timer(priv, priv->hw, |
---|
426 | | - STMMAC_DEFAULT_LIT_LS, tx_lpi_timer); |
---|
427 | | - } |
---|
428 | | - /* Set HW EEE according to the speed */ |
---|
429 | | - stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link); |
---|
430 | | - |
---|
431 | | - ret = true; |
---|
432 | 412 | mutex_unlock(&priv->lock); |
---|
433 | | - |
---|
434 | | - netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); |
---|
| 413 | + return false; |
---|
435 | 414 | } |
---|
436 | | -out: |
---|
437 | | - return ret; |
---|
| 415 | + |
---|
| 416 | + if (priv->eee_active && !priv->eee_enabled) { |
---|
| 417 | + timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); |
---|
| 418 | + stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, |
---|
| 419 | + eee_tw_timer); |
---|
| 420 | + } |
---|
| 421 | + |
---|
| 422 | + mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); |
---|
| 423 | + |
---|
| 424 | + mutex_unlock(&priv->lock); |
---|
| 425 | + netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); |
---|
| 426 | + return true; |
---|
438 | 427 | } |
---|
439 | 428 | |
---|
440 | 429 | /* stmmac_get_tx_hwtstamp - get HW TX timestamps |
---|
.. | .. |
---|
449 | 438 | struct dma_desc *p, struct sk_buff *skb) |
---|
450 | 439 | { |
---|
451 | 440 | struct skb_shared_hwtstamps shhwtstamp; |
---|
| 441 | + bool found = false; |
---|
452 | 442 | u64 ns = 0; |
---|
453 | 443 | |
---|
454 | 444 | if (!priv->hwts_tx_en) |
---|
.. | .. |
---|
460 | 450 | |
---|
461 | 451 | /* check tx tstamp status */ |
---|
462 | 452 | if (stmmac_get_tx_timestamp_status(priv, p)) { |
---|
463 | | - /* get the valid tstamp */ |
---|
464 | 453 | stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); |
---|
| 454 | + found = true; |
---|
| 455 | + } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { |
---|
| 456 | + found = true; |
---|
| 457 | + } |
---|
465 | 458 | |
---|
| 459 | + if (found) { |
---|
466 | 460 | memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); |
---|
467 | 461 | shhwtstamp.hwtstamp = ns_to_ktime(ns); |
---|
468 | 462 | |
---|
.. | .. |
---|
470 | 464 | /* pass tstamp to stack */ |
---|
471 | 465 | skb_tstamp_tx(skb, &shhwtstamp); |
---|
472 | 466 | } |
---|
473 | | - |
---|
474 | | - return; |
---|
475 | 467 | } |
---|
476 | 468 | |
---|
477 | 469 | /* stmmac_get_rx_hwtstamp - get HW RX timestamps |
---|
.. | .. |
---|
508 | 500 | } |
---|
509 | 501 | } |
---|
510 | 502 | |
---|
511 | | -#ifdef CONFIG_STMMAC_PTP |
---|
512 | 503 | /** |
---|
513 | 504 | * stmmac_hwtstamp_set - control hardware timestamping. |
---|
514 | 505 | * @dev: device pointer. |
---|
.. | .. |
---|
524 | 515 | { |
---|
525 | 516 | struct stmmac_priv *priv = netdev_priv(dev); |
---|
526 | 517 | struct hwtstamp_config config; |
---|
527 | | - struct timespec64 now; |
---|
528 | | - u64 temp = 0; |
---|
529 | 518 | u32 ptp_v2 = 0; |
---|
530 | 519 | u32 tstamp_all = 0; |
---|
531 | 520 | u32 ptp_over_ipv4_udp = 0; |
---|
.. | .. |
---|
534 | 523 | u32 snap_type_sel = 0; |
---|
535 | 524 | u32 ts_master_en = 0; |
---|
536 | 525 | u32 ts_event_en = 0; |
---|
537 | | - u32 sec_inc = 0; |
---|
538 | | - u32 value = 0; |
---|
539 | | - bool xmac; |
---|
540 | | - |
---|
541 | | - xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; |
---|
542 | 526 | |
---|
543 | 527 | if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { |
---|
544 | 528 | netdev_alert(priv->dev, "No support for HW time stamping\n"); |
---|
.. | .. |
---|
644 | 628 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; |
---|
645 | 629 | ptp_v2 = PTP_TCR_TSVER2ENA; |
---|
646 | 630 | snap_type_sel = PTP_TCR_SNAPTYPSEL_1; |
---|
647 | | - ts_event_en = PTP_TCR_TSEVNTENA; |
---|
| 631 | + if (priv->synopsys_id < DWMAC_CORE_4_10) |
---|
| 632 | + ts_event_en = PTP_TCR_TSEVNTENA; |
---|
648 | 633 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; |
---|
649 | 634 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; |
---|
650 | 635 | ptp_over_ethernet = PTP_TCR_TSIPENA; |
---|
.. | .. |
---|
699 | 684 | priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); |
---|
700 | 685 | priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; |
---|
701 | 686 | |
---|
702 | | - if (!priv->hwts_tx_en && !priv->hwts_rx_en) |
---|
703 | | - stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0); |
---|
704 | | - else { |
---|
705 | | - value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | |
---|
706 | | - tstamp_all | ptp_v2 | ptp_over_ethernet | |
---|
707 | | - ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | |
---|
708 | | - ts_master_en | snap_type_sel); |
---|
709 | | - stmmac_config_hw_tstamping(priv, priv->ptpaddr, value); |
---|
| 687 | + priv->systime_flags = STMMAC_HWTS_ACTIVE; |
---|
710 | 688 | |
---|
711 | | - /* program Sub Second Increment reg */ |
---|
712 | | - stmmac_config_sub_second_increment(priv, |
---|
713 | | - priv->ptpaddr, priv->plat->clk_ptp_rate, |
---|
714 | | - xmac, &sec_inc); |
---|
715 | | - temp = div_u64(1000000000ULL, sec_inc); |
---|
716 | | - |
---|
717 | | - /* Store sub second increment and flags for later use */ |
---|
718 | | - priv->sub_second_inc = sec_inc; |
---|
719 | | - priv->systime_flags = value; |
---|
720 | | - |
---|
721 | | - /* calculate default added value: |
---|
722 | | - * formula is : |
---|
723 | | - * addend = (2^32)/freq_div_ratio; |
---|
724 | | - * where, freq_div_ratio = 1e9ns/sec_inc |
---|
725 | | - */ |
---|
726 | | - temp = (u64)(temp << 32); |
---|
727 | | - priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); |
---|
728 | | - stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); |
---|
729 | | - |
---|
730 | | - /* initialize system time */ |
---|
731 | | - ktime_get_real_ts64(&now); |
---|
732 | | - |
---|
733 | | - /* lower 32 bits of tv_sec are safe until y2106 */ |
---|
734 | | - stmmac_init_systime(priv, priv->ptpaddr, |
---|
735 | | - (u32)now.tv_sec, now.tv_nsec); |
---|
| 689 | + if (priv->hwts_tx_en || priv->hwts_rx_en) { |
---|
| 690 | + priv->systime_flags |= tstamp_all | ptp_v2 | |
---|
| 691 | + ptp_over_ethernet | ptp_over_ipv6_udp | |
---|
| 692 | + ptp_over_ipv4_udp | ts_event_en | |
---|
| 693 | + ts_master_en | snap_type_sel; |
---|
736 | 694 | } |
---|
| 695 | + |
---|
| 696 | + stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags); |
---|
737 | 697 | |
---|
738 | 698 | memcpy(&priv->tstamp_config, &config, sizeof(config)); |
---|
739 | 699 | |
---|
.. | .. |
---|
748 | 708 | * a proprietary structure used to pass information to the driver. |
---|
749 | 709 | * Description: |
---|
750 | 710 | * This function obtain the current hardware timestamping settings |
---|
751 | | - as requested. |
---|
| 711 | + * as requested. |
---|
752 | 712 | */ |
---|
753 | 713 | static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) |
---|
754 | 714 | { |
---|
.. | .. |
---|
761 | 721 | return copy_to_user(ifr->ifr_data, config, |
---|
762 | 722 | sizeof(*config)) ? -EFAULT : 0; |
---|
763 | 723 | } |
---|
764 | | -#endif /* CONFIG_STMMAC_PTP */ |
---|
| 724 | + |
---|
| 725 | +/** |
---|
| 726 | + * stmmac_init_tstamp_counter - init hardware timestamping counter |
---|
| 727 | + * @priv: driver private structure |
---|
| 728 | + * @systime_flags: timestamping flags |
---|
| 729 | + * Description: |
---|
| 730 | + * Initialize hardware counter for packet timestamping. |
---|
| 731 | + * This is valid as long as the interface is open and not suspended. |
---|
| 732 | + * Will be rerun after resuming from suspend, case in which the timestamping |
---|
| 733 | + * flags updated by stmmac_hwtstamp_set() also need to be restored. |
---|
| 734 | + */ |
---|
| 735 | +int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags) |
---|
| 736 | +{ |
---|
| 737 | + bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; |
---|
| 738 | + struct timespec64 now; |
---|
| 739 | + u32 sec_inc = 0; |
---|
| 740 | + u64 temp = 0; |
---|
| 741 | + |
---|
| 742 | + if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) |
---|
| 743 | + return -EOPNOTSUPP; |
---|
| 744 | + |
---|
| 745 | + stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); |
---|
| 746 | + priv->systime_flags = systime_flags; |
---|
| 747 | + |
---|
| 748 | + /* program Sub Second Increment reg */ |
---|
| 749 | + stmmac_config_sub_second_increment(priv, priv->ptpaddr, |
---|
| 750 | + priv->plat->clk_ptp_rate, |
---|
| 751 | + xmac, &sec_inc); |
---|
| 752 | + temp = div_u64(1000000000ULL, sec_inc); |
---|
| 753 | + |
---|
| 754 | + /* Store sub second increment for later use */ |
---|
| 755 | + priv->sub_second_inc = sec_inc; |
---|
| 756 | + |
---|
| 757 | + /* calculate default added value: |
---|
| 758 | + * formula is : |
---|
| 759 | + * addend = (2^32)/freq_div_ratio; |
---|
| 760 | + * where, freq_div_ratio = 1e9ns/sec_inc |
---|
| 761 | + */ |
---|
| 762 | + temp = (u64)(temp << 32); |
---|
| 763 | + priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); |
---|
| 764 | + stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); |
---|
| 765 | + |
---|
| 766 | + /* initialize system time */ |
---|
| 767 | + ktime_get_real_ts64(&now); |
---|
| 768 | + |
---|
| 769 | + /* lower 32 bits of tv_sec are safe until y2106 */ |
---|
| 770 | + stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec); |
---|
| 771 | + |
---|
| 772 | + return 0; |
---|
| 773 | +} |
---|
| 774 | +EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter); |
---|
765 | 775 | |
---|
766 | 776 | /** |
---|
767 | 777 | * stmmac_init_ptp - init PTP |
---|
.. | .. |
---|
773 | 783 | static int stmmac_init_ptp(struct stmmac_priv *priv) |
---|
774 | 784 | { |
---|
775 | 785 | bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; |
---|
| 786 | + int ret; |
---|
776 | 787 | |
---|
777 | | - if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) |
---|
778 | | - return -EOPNOTSUPP; |
---|
| 788 | + ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE); |
---|
| 789 | + if (ret) |
---|
| 790 | + return ret; |
---|
779 | 791 | |
---|
780 | 792 | priv->adv_ts = 0; |
---|
781 | 793 | /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */ |
---|
.. | .. |
---|
795 | 807 | priv->hwts_tx_en = 0; |
---|
796 | 808 | priv->hwts_rx_en = 0; |
---|
797 | 809 | |
---|
798 | | - stmmac_ptp_register(priv); |
---|
799 | | - |
---|
800 | 810 | return 0; |
---|
801 | 811 | } |
---|
802 | 812 | |
---|
803 | 813 | static void stmmac_release_ptp(struct stmmac_priv *priv) |
---|
804 | 814 | { |
---|
805 | | - if (priv->plat->clk_ptp_ref && IS_ENABLED(CONFIG_STMMAC_PTP)) |
---|
806 | | - clk_disable_unprepare(priv->plat->clk_ptp_ref); |
---|
| 815 | + clk_disable_unprepare(priv->plat->clk_ptp_ref); |
---|
807 | 816 | stmmac_ptp_unregister(priv); |
---|
808 | 817 | } |
---|
809 | 818 | |
---|
810 | 819 | /** |
---|
811 | 820 | * stmmac_mac_flow_ctrl - Configure flow control in all queues |
---|
812 | 821 | * @priv: driver private structure |
---|
| 822 | + * @duplex: duplex passed to the next function |
---|
813 | 823 | * Description: It is used for configuring the flow control in all queues |
---|
814 | 824 | */ |
---|
815 | 825 | static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) |
---|
816 | 826 | { |
---|
817 | 827 | u32 tx_cnt = priv->plat->tx_queues_to_use; |
---|
818 | 828 | |
---|
819 | | - stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, |
---|
820 | | - priv->pause, tx_cnt); |
---|
| 829 | + stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl & priv->plat->flow_ctrl, |
---|
| 830 | + priv->pause, tx_cnt); |
---|
821 | 831 | } |
---|
822 | 832 | |
---|
823 | | -/** |
---|
824 | | - * stmmac_adjust_link - adjusts the link parameters |
---|
825 | | - * @dev: net device structure |
---|
826 | | - * Description: this is the helper called by the physical abstraction layer |
---|
827 | | - * drivers to communicate the phy link status. According the speed and duplex |
---|
828 | | - * this driver can invoke registered glue-logic as well. |
---|
829 | | - * It also invoke the eee initialization because it could happen when switch |
---|
830 | | - * on different networks (that are eee capable). |
---|
831 | | - */ |
---|
832 | | -static void stmmac_adjust_link(struct net_device *dev) |
---|
| 833 | +static void stmmac_validate(struct phylink_config *config, |
---|
| 834 | + unsigned long *supported, |
---|
| 835 | + struct phylink_link_state *state) |
---|
833 | 836 | { |
---|
834 | | - struct stmmac_priv *priv = netdev_priv(dev); |
---|
835 | | - struct phy_device *phydev = dev->phydev; |
---|
836 | | - bool new_state = false; |
---|
| 837 | + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); |
---|
| 838 | + __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, }; |
---|
| 839 | + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; |
---|
| 840 | + int tx_cnt = priv->plat->tx_queues_to_use; |
---|
| 841 | + int max_speed = priv->plat->max_speed; |
---|
837 | 842 | |
---|
838 | | - if (!phydev) |
---|
839 | | - return; |
---|
| 843 | + phylink_set(mac_supported, 10baseT_Half); |
---|
| 844 | + phylink_set(mac_supported, 10baseT_Full); |
---|
| 845 | + phylink_set(mac_supported, 100baseT_Half); |
---|
| 846 | + phylink_set(mac_supported, 100baseT_Full); |
---|
| 847 | + phylink_set(mac_supported, 1000baseT_Half); |
---|
| 848 | + phylink_set(mac_supported, 1000baseT_Full); |
---|
| 849 | + phylink_set(mac_supported, 1000baseKX_Full); |
---|
| 850 | + phylink_set(mac_supported, 100baseT1_Full); |
---|
| 851 | + phylink_set(mac_supported, 1000baseT1_Full); |
---|
840 | 852 | |
---|
841 | | - mutex_lock(&priv->lock); |
---|
| 853 | + phylink_set(mac_supported, Autoneg); |
---|
| 854 | + phylink_set(mac_supported, Pause); |
---|
| 855 | + phylink_set(mac_supported, Asym_Pause); |
---|
| 856 | + phylink_set_port_modes(mac_supported); |
---|
842 | 857 | |
---|
843 | | - if (phydev->link) { |
---|
844 | | - u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG); |
---|
845 | | - |
---|
846 | | - /* Now we make sure that we can be in full duplex mode. |
---|
847 | | - * If not, we operate in half-duplex mode. */ |
---|
848 | | - if (phydev->duplex != priv->oldduplex) { |
---|
849 | | - new_state = true; |
---|
850 | | - if (!phydev->duplex) |
---|
851 | | - ctrl &= ~priv->hw->link.duplex; |
---|
852 | | - else |
---|
853 | | - ctrl |= priv->hw->link.duplex; |
---|
854 | | - priv->oldduplex = phydev->duplex; |
---|
| 858 | + /* Cut down 1G if asked to */ |
---|
| 859 | + if ((max_speed > 0) && (max_speed < 1000)) { |
---|
| 860 | + phylink_set(mask, 1000baseT_Full); |
---|
| 861 | + phylink_set(mask, 1000baseX_Full); |
---|
| 862 | + } else if (priv->plat->has_xgmac) { |
---|
| 863 | + if (!max_speed || (max_speed >= 2500)) { |
---|
| 864 | + phylink_set(mac_supported, 2500baseT_Full); |
---|
| 865 | + phylink_set(mac_supported, 2500baseX_Full); |
---|
855 | 866 | } |
---|
856 | | - /* Flow Control operation */ |
---|
857 | | - if (phydev->pause) |
---|
858 | | - stmmac_mac_flow_ctrl(priv, phydev->duplex); |
---|
859 | | - |
---|
860 | | - if (phydev->speed != priv->speed) { |
---|
861 | | - new_state = true; |
---|
862 | | - ctrl &= ~priv->hw->link.speed_mask; |
---|
863 | | - switch (phydev->speed) { |
---|
864 | | - case SPEED_1000: |
---|
865 | | - ctrl |= priv->hw->link.speed1000; |
---|
866 | | - break; |
---|
867 | | - case SPEED_100: |
---|
868 | | - ctrl |= priv->hw->link.speed100; |
---|
869 | | - break; |
---|
870 | | - case SPEED_10: |
---|
871 | | - ctrl |= priv->hw->link.speed10; |
---|
872 | | - break; |
---|
873 | | - default: |
---|
874 | | - netif_warn(priv, link, priv->dev, |
---|
875 | | - "broken speed: %d\n", phydev->speed); |
---|
876 | | - phydev->speed = SPEED_UNKNOWN; |
---|
877 | | - break; |
---|
878 | | - } |
---|
879 | | - if (phydev->speed != SPEED_UNKNOWN) |
---|
880 | | - stmmac_hw_fix_mac_speed(priv); |
---|
881 | | - priv->speed = phydev->speed; |
---|
| 867 | + if (!max_speed || (max_speed >= 5000)) { |
---|
| 868 | + phylink_set(mac_supported, 5000baseT_Full); |
---|
882 | 869 | } |
---|
883 | | - |
---|
884 | | - writel(ctrl, priv->ioaddr + MAC_CTRL_REG); |
---|
885 | | - |
---|
886 | | - if (!priv->oldlink) { |
---|
887 | | - new_state = true; |
---|
888 | | - priv->oldlink = true; |
---|
| 870 | + if (!max_speed || (max_speed >= 10000)) { |
---|
| 871 | + phylink_set(mac_supported, 10000baseSR_Full); |
---|
| 872 | + phylink_set(mac_supported, 10000baseLR_Full); |
---|
| 873 | + phylink_set(mac_supported, 10000baseER_Full); |
---|
| 874 | + phylink_set(mac_supported, 10000baseLRM_Full); |
---|
| 875 | + phylink_set(mac_supported, 10000baseT_Full); |
---|
| 876 | + phylink_set(mac_supported, 10000baseKX4_Full); |
---|
| 877 | + phylink_set(mac_supported, 10000baseKR_Full); |
---|
889 | 878 | } |
---|
890 | | - } else if (priv->oldlink) { |
---|
891 | | - new_state = true; |
---|
892 | | - priv->oldlink = false; |
---|
893 | | - priv->speed = SPEED_UNKNOWN; |
---|
894 | | - priv->oldduplex = DUPLEX_UNKNOWN; |
---|
| 879 | + if (!max_speed || (max_speed >= 25000)) { |
---|
| 880 | + phylink_set(mac_supported, 25000baseCR_Full); |
---|
| 881 | + phylink_set(mac_supported, 25000baseKR_Full); |
---|
| 882 | + phylink_set(mac_supported, 25000baseSR_Full); |
---|
| 883 | + } |
---|
| 884 | + if (!max_speed || (max_speed >= 40000)) { |
---|
| 885 | + phylink_set(mac_supported, 40000baseKR4_Full); |
---|
| 886 | + phylink_set(mac_supported, 40000baseCR4_Full); |
---|
| 887 | + phylink_set(mac_supported, 40000baseSR4_Full); |
---|
| 888 | + phylink_set(mac_supported, 40000baseLR4_Full); |
---|
| 889 | + } |
---|
| 890 | + if (!max_speed || (max_speed >= 50000)) { |
---|
| 891 | + phylink_set(mac_supported, 50000baseCR2_Full); |
---|
| 892 | + phylink_set(mac_supported, 50000baseKR2_Full); |
---|
| 893 | + phylink_set(mac_supported, 50000baseSR2_Full); |
---|
| 894 | + phylink_set(mac_supported, 50000baseKR_Full); |
---|
| 895 | + phylink_set(mac_supported, 50000baseSR_Full); |
---|
| 896 | + phylink_set(mac_supported, 50000baseCR_Full); |
---|
| 897 | + phylink_set(mac_supported, 50000baseLR_ER_FR_Full); |
---|
| 898 | + phylink_set(mac_supported, 50000baseDR_Full); |
---|
| 899 | + } |
---|
| 900 | + if (!max_speed || (max_speed >= 100000)) { |
---|
| 901 | + phylink_set(mac_supported, 100000baseKR4_Full); |
---|
| 902 | + phylink_set(mac_supported, 100000baseSR4_Full); |
---|
| 903 | + phylink_set(mac_supported, 100000baseCR4_Full); |
---|
| 904 | + phylink_set(mac_supported, 100000baseLR4_ER4_Full); |
---|
| 905 | + phylink_set(mac_supported, 100000baseKR2_Full); |
---|
| 906 | + phylink_set(mac_supported, 100000baseSR2_Full); |
---|
| 907 | + phylink_set(mac_supported, 100000baseCR2_Full); |
---|
| 908 | + phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full); |
---|
| 909 | + phylink_set(mac_supported, 100000baseDR2_Full); |
---|
| 910 | + } |
---|
895 | 911 | } |
---|
896 | 912 | |
---|
897 | | - if (new_state && netif_msg_link(priv)) |
---|
898 | | - phy_print_status(phydev); |
---|
| 913 | + /* Half-Duplex can only work with single queue */ |
---|
| 914 | + if (tx_cnt > 1) { |
---|
| 915 | + phylink_set(mask, 10baseT_Half); |
---|
| 916 | + phylink_set(mask, 100baseT_Half); |
---|
| 917 | + phylink_set(mask, 1000baseT_Half); |
---|
| 918 | + } |
---|
899 | 919 | |
---|
900 | | - mutex_unlock(&priv->lock); |
---|
| 920 | + linkmode_and(supported, supported, mac_supported); |
---|
| 921 | + linkmode_andnot(supported, supported, mask); |
---|
901 | 922 | |
---|
902 | | - if (phydev->is_pseudo_fixed_link) |
---|
903 | | - /* Stop PHY layer to call the hook to adjust the link in case |
---|
904 | | - * of a switch is attached to the stmmac driver. |
---|
905 | | - */ |
---|
906 | | - phydev->irq = PHY_IGNORE_INTERRUPT; |
---|
907 | | - else |
---|
908 | | - /* At this stage, init the EEE if supported. |
---|
909 | | - * Never called in case of fixed_link. |
---|
910 | | - */ |
---|
911 | | - priv->eee_enabled = stmmac_eee_init(priv); |
---|
| 923 | + linkmode_and(state->advertising, state->advertising, mac_supported); |
---|
| 924 | + linkmode_andnot(state->advertising, state->advertising, mask); |
---|
| 925 | + |
---|
| 926 | + /* If PCS is supported, check which modes it supports. */ |
---|
| 927 | + stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state); |
---|
912 | 928 | } |
---|
| 929 | + |
---|
| 930 | +static void stmmac_mac_pcs_get_state(struct phylink_config *config, |
---|
| 931 | + struct phylink_link_state *state) |
---|
| 932 | +{ |
---|
| 933 | + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); |
---|
| 934 | + |
---|
| 935 | + state->link = 0; |
---|
| 936 | + stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state); |
---|
| 937 | +} |
---|
| 938 | + |
---|
| 939 | +static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, |
---|
| 940 | + const struct phylink_link_state *state) |
---|
| 941 | +{ |
---|
| 942 | + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); |
---|
| 943 | + |
---|
| 944 | + stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state); |
---|
| 945 | +} |
---|
| 946 | + |
---|
| 947 | +static void stmmac_mac_an_restart(struct phylink_config *config) |
---|
| 948 | +{ |
---|
| 949 | + /* Not Supported */ |
---|
| 950 | +} |
---|
| 951 | + |
---|
| 952 | +static void stmmac_mac_link_down(struct phylink_config *config, |
---|
| 953 | + unsigned int mode, phy_interface_t interface) |
---|
| 954 | +{ |
---|
| 955 | + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); |
---|
| 956 | + |
---|
| 957 | + stmmac_mac_set(priv, priv->ioaddr, false); |
---|
| 958 | + priv->eee_active = false; |
---|
| 959 | + priv->tx_lpi_enabled = false; |
---|
| 960 | + stmmac_eee_init(priv); |
---|
| 961 | + stmmac_set_eee_pls(priv, priv->hw, false); |
---|
| 962 | +} |
---|
| 963 | + |
---|
| 964 | +static void stmmac_mac_link_up(struct phylink_config *config, |
---|
| 965 | + struct phy_device *phy, |
---|
| 966 | + unsigned int mode, phy_interface_t interface, |
---|
| 967 | + int speed, int duplex, |
---|
| 968 | + bool tx_pause, bool rx_pause) |
---|
| 969 | +{ |
---|
| 970 | + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); |
---|
| 971 | + u32 ctrl; |
---|
| 972 | + |
---|
| 973 | + stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface); |
---|
| 974 | + |
---|
| 975 | + ctrl = readl(priv->ioaddr + MAC_CTRL_REG); |
---|
| 976 | + ctrl &= ~priv->hw->link.speed_mask; |
---|
| 977 | + |
---|
| 978 | + if (interface == PHY_INTERFACE_MODE_USXGMII) { |
---|
| 979 | + switch (speed) { |
---|
| 980 | + case SPEED_10000: |
---|
| 981 | + ctrl |= priv->hw->link.xgmii.speed10000; |
---|
| 982 | + break; |
---|
| 983 | + case SPEED_5000: |
---|
| 984 | + ctrl |= priv->hw->link.xgmii.speed5000; |
---|
| 985 | + break; |
---|
| 986 | + case SPEED_2500: |
---|
| 987 | + ctrl |= priv->hw->link.xgmii.speed2500; |
---|
| 988 | + break; |
---|
| 989 | + default: |
---|
| 990 | + return; |
---|
| 991 | + } |
---|
| 992 | + } else if (interface == PHY_INTERFACE_MODE_XLGMII) { |
---|
| 993 | + switch (speed) { |
---|
| 994 | + case SPEED_100000: |
---|
| 995 | + ctrl |= priv->hw->link.xlgmii.speed100000; |
---|
| 996 | + break; |
---|
| 997 | + case SPEED_50000: |
---|
| 998 | + ctrl |= priv->hw->link.xlgmii.speed50000; |
---|
| 999 | + break; |
---|
| 1000 | + case SPEED_40000: |
---|
| 1001 | + ctrl |= priv->hw->link.xlgmii.speed40000; |
---|
| 1002 | + break; |
---|
| 1003 | + case SPEED_25000: |
---|
| 1004 | + ctrl |= priv->hw->link.xlgmii.speed25000; |
---|
| 1005 | + break; |
---|
| 1006 | + case SPEED_10000: |
---|
| 1007 | + ctrl |= priv->hw->link.xgmii.speed10000; |
---|
| 1008 | + break; |
---|
| 1009 | + case SPEED_2500: |
---|
| 1010 | + ctrl |= priv->hw->link.speed2500; |
---|
| 1011 | + break; |
---|
| 1012 | + case SPEED_1000: |
---|
| 1013 | + ctrl |= priv->hw->link.speed1000; |
---|
| 1014 | + break; |
---|
| 1015 | + default: |
---|
| 1016 | + return; |
---|
| 1017 | + } |
---|
| 1018 | + } else { |
---|
| 1019 | + switch (speed) { |
---|
| 1020 | + case SPEED_2500: |
---|
| 1021 | + ctrl |= priv->hw->link.speed2500; |
---|
| 1022 | + break; |
---|
| 1023 | + case SPEED_1000: |
---|
| 1024 | + ctrl |= priv->hw->link.speed1000; |
---|
| 1025 | + break; |
---|
| 1026 | + case SPEED_100: |
---|
| 1027 | + ctrl |= priv->hw->link.speed100; |
---|
| 1028 | + break; |
---|
| 1029 | + case SPEED_10: |
---|
| 1030 | + ctrl |= priv->hw->link.speed10; |
---|
| 1031 | + break; |
---|
| 1032 | + default: |
---|
| 1033 | + return; |
---|
| 1034 | + } |
---|
| 1035 | + } |
---|
| 1036 | + |
---|
| 1037 | + priv->speed = speed; |
---|
| 1038 | + |
---|
| 1039 | + if (priv->plat->fix_mac_speed) |
---|
| 1040 | + priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed); |
---|
| 1041 | + |
---|
| 1042 | + if (!duplex) |
---|
| 1043 | + ctrl &= ~priv->hw->link.duplex; |
---|
| 1044 | + else |
---|
| 1045 | + ctrl |= priv->hw->link.duplex; |
---|
| 1046 | + |
---|
| 1047 | + /* Flow Control operation */ |
---|
| 1048 | + if (rx_pause && tx_pause) |
---|
| 1049 | + priv->flow_ctrl = FLOW_AUTO; |
---|
| 1050 | + else if (rx_pause && !tx_pause) |
---|
| 1051 | + priv->flow_ctrl = FLOW_RX; |
---|
| 1052 | + else if (!rx_pause && tx_pause) |
---|
| 1053 | + priv->flow_ctrl = FLOW_TX; |
---|
| 1054 | + else |
---|
| 1055 | + priv->flow_ctrl = FLOW_OFF; |
---|
| 1056 | + |
---|
| 1057 | + stmmac_mac_flow_ctrl(priv, duplex); |
---|
| 1058 | + |
---|
| 1059 | + writel(ctrl, priv->ioaddr + MAC_CTRL_REG); |
---|
| 1060 | + |
---|
| 1061 | + stmmac_mac_set(priv, priv->ioaddr, true); |
---|
| 1062 | + if (phy && priv->dma_cap.eee) { |
---|
| 1063 | + priv->eee_active = phy_init_eee(phy, 1) >= 0; |
---|
| 1064 | + priv->eee_enabled = stmmac_eee_init(priv); |
---|
| 1065 | + priv->tx_lpi_enabled = priv->eee_enabled; |
---|
| 1066 | + stmmac_set_eee_pls(priv, priv->hw, true); |
---|
| 1067 | + } |
---|
| 1068 | +} |
---|
| 1069 | + |
---|
| 1070 | +static const struct phylink_mac_ops stmmac_phylink_mac_ops = { |
---|
| 1071 | + .validate = stmmac_validate, |
---|
| 1072 | + .mac_pcs_get_state = stmmac_mac_pcs_get_state, |
---|
| 1073 | + .mac_config = stmmac_mac_config, |
---|
| 1074 | + .mac_an_restart = stmmac_mac_an_restart, |
---|
| 1075 | + .mac_link_down = stmmac_mac_link_down, |
---|
| 1076 | + .mac_link_up = stmmac_mac_link_up, |
---|
| 1077 | +}; |
---|
913 | 1078 | |
---|
914 | 1079 | /** |
---|
915 | 1080 | * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported |
---|
.. | .. |
---|
936 | 1101 | } |
---|
937 | 1102 | } |
---|
938 | 1103 | |
---|
| 1104 | +#if 0 |
---|
| 1105 | +static void rtl8211F_led_control(struct phy_device *phydev) |
---|
| 1106 | +{ |
---|
| 1107 | + printk("ben debug:rtl8211F_led_control...1 \n"); |
---|
| 1108 | + |
---|
| 1109 | + if(!phydev) return; |
---|
| 1110 | + if(phydev->phy_id!=0x001cc916) return; /* only for 8211E*/ |
---|
| 1111 | + |
---|
| 1112 | + /*switch to extension page44*/ |
---|
| 1113 | + phy_write(phydev, 31, 0x0d04); |
---|
| 1114 | +//add hc 1000M --> orange |
---|
| 1115 | +// 100M --> green |
---|
| 1116 | + phy_write(phydev, 16, 0x6D02); |
---|
| 1117 | +//add hc 1000M&100M --> green |
---|
| 1118 | +// phy_write(phydev, 16, 0x6C0A); |
---|
| 1119 | + printk("ben debug:rtl8211F_led_control...2 \n"); |
---|
| 1120 | +} |
---|
| 1121 | +#endif |
---|
| 1122 | +#define RTL_8211F_PHY_ID 0x001cc916 |
---|
| 1123 | +#define RTL_8211F_PHY_ID_MASK 0x001fffff |
---|
| 1124 | +#define RTL_8211F_PAGE_SELECT 0x1f |
---|
| 1125 | +#define RTL_8211F_LCR_ADDR 0x10 |
---|
| 1126 | + |
---|
| 1127 | +#define GREEN_LED 0 |
---|
| 1128 | +#define YELLOW0_LED 1 |
---|
| 1129 | +#define YELLOW1_LED 2 |
---|
| 1130 | + |
---|
| 1131 | +static int rtl8211F_led_control(struct phy_device *phydev) |
---|
| 1132 | +{ |
---|
| 1133 | + unsigned int temp; |
---|
| 1134 | + |
---|
| 1135 | + printk("<<<<<<ben test led ctrl start... %s\n",__FUNCTION__); |
---|
| 1136 | + if(!phydev) return 0; |
---|
| 1137 | + if(phydev->phy_id!=0x001cc916) return 0; /* only for 8211E*/ |
---|
| 1138 | + |
---|
| 1139 | + phy_write(phydev, 31, 0xd04); |
---|
| 1140 | + temp = 0x02 << (5 * GREEN_LED); |
---|
| 1141 | + temp |= 0x08 << (5 * YELLOW0_LED); |
---|
| 1142 | + |
---|
| 1143 | + temp |= 0x1b << (5 * YELLOW1_LED); |
---|
| 1144 | + phy_write(phydev, 0x10, temp); |
---|
| 1145 | + |
---|
| 1146 | + temp = 1 << (YELLOW1_LED + 1); |
---|
| 1147 | + phy_write(phydev, 0x11, 0x00); |
---|
| 1148 | + phy_write(phydev, 31, 0); |
---|
| 1149 | + |
---|
| 1150 | + return 0; |
---|
| 1151 | +} |
---|
| 1152 | + |
---|
939 | 1153 | /** |
---|
940 | 1154 | * stmmac_init_phy - PHY initialization |
---|
941 | 1155 | * @dev: net device structure |
---|
.. | .. |
---|
947 | 1161 | static int stmmac_init_phy(struct net_device *dev) |
---|
948 | 1162 | { |
---|
949 | 1163 | struct stmmac_priv *priv = netdev_priv(dev); |
---|
950 | | - u32 tx_cnt = priv->plat->tx_queues_to_use; |
---|
951 | | - struct phy_device *phydev; |
---|
952 | | - char phy_id_fmt[MII_BUS_ID_SIZE + 3]; |
---|
953 | | - char bus_id[MII_BUS_ID_SIZE]; |
---|
954 | | - int interface = priv->plat->interface; |
---|
955 | | - int max_speed = priv->plat->max_speed; |
---|
956 | | - priv->oldlink = false; |
---|
957 | | - priv->speed = SPEED_UNKNOWN; |
---|
958 | | - priv->oldduplex = DUPLEX_UNKNOWN; |
---|
| 1164 | + struct device_node *node; |
---|
| 1165 | + int ret; |
---|
959 | 1166 | |
---|
| 1167 | + |
---|
| 1168 | + printk("ben stmmac_init_phy .. \n"); |
---|
| 1169 | + mdelay(2000); |
---|
| 1170 | + printk("ben stmmac_init_phy delay .. \n"); |
---|
960 | 1171 | if (priv->plat->integrated_phy_power) |
---|
961 | | - priv->plat->integrated_phy_power(priv->plat->bsp_priv, true); |
---|
| 1172 | + ret = priv->plat->integrated_phy_power(priv->plat->bsp_priv, true); |
---|
962 | 1173 | |
---|
963 | | - if (priv->plat->phy_node) { |
---|
964 | | - phydev = of_phy_connect(dev, priv->plat->phy_node, |
---|
965 | | - &stmmac_adjust_link, 0, interface); |
---|
966 | | - } else { |
---|
967 | | - snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", |
---|
968 | | - priv->plat->bus_id); |
---|
| 1174 | + node = priv->plat->phylink_node; |
---|
969 | 1175 | |
---|
970 | | - snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, |
---|
971 | | - priv->plat->phy_addr); |
---|
972 | | - netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__, |
---|
973 | | - phy_id_fmt); |
---|
| 1176 | + if (node) |
---|
| 1177 | + { |
---|
| 1178 | + //printk("ben ttt.. \n"); |
---|
| 1179 | + ret = phylink_of_phy_connect(priv->phylink, node, 0); |
---|
| 1180 | + //printk("ben ttt:%d \n", ret); |
---|
| 1181 | + } |
---|
974 | 1182 | |
---|
975 | | - phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, |
---|
976 | | - interface); |
---|
977 | | - } |
---|
| 1183 | + /* Some DT bindings do not set-up the PHY handle. Let's try to |
---|
| 1184 | + * manually parse it |
---|
| 1185 | + */ |
---|
| 1186 | + //printk("ben:stmmac_init_phy..1 \n"); |
---|
| 1187 | + if (!node || ret) { |
---|
| 1188 | + //if (1) { |
---|
| 1189 | + int addr = priv->plat->phy_addr; |
---|
| 1190 | + struct phy_device *phydev; |
---|
978 | 1191 | |
---|
979 | | - if (IS_ERR_OR_NULL(phydev)) { |
---|
980 | | - netdev_err(priv->dev, "Could not attach to PHY\n"); |
---|
981 | | - if (!phydev) |
---|
| 1192 | + //printk("ben:stmmac_init_phy..2 \n"); |
---|
| 1193 | + phydev = mdiobus_get_phy(priv->mii, addr); |
---|
| 1194 | + if (!phydev) { |
---|
| 1195 | + netdev_err(priv->dev, "no phy at addr %d\n", addr); |
---|
982 | 1196 | return -ENODEV; |
---|
| 1197 | + } |
---|
983 | 1198 | |
---|
984 | | - return PTR_ERR(phydev); |
---|
| 1199 | + //rtl8211F_led_control(phydev); |
---|
| 1200 | + |
---|
| 1201 | + //printk("ben:stmmac_init_phy..3 \n"); |
---|
| 1202 | + ret = phylink_connect_phy(priv->phylink, phydev); |
---|
| 1203 | + //rtl8211F_led_control(phydev); |
---|
985 | 1204 | } |
---|
986 | 1205 | |
---|
987 | | - /* Stop Advertising 1000BASE Capability if interface is not GMII */ |
---|
988 | | - if ((interface == PHY_INTERFACE_MODE_MII) || |
---|
989 | | - (interface == PHY_INTERFACE_MODE_RMII) || |
---|
990 | | - (max_speed < 1000 && max_speed > 0)) |
---|
991 | | - phydev->advertising &= ~(SUPPORTED_1000baseT_Half | |
---|
992 | | - SUPPORTED_1000baseT_Full); |
---|
| 1206 | + if (!priv->plat->pmt) { |
---|
| 1207 | + struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; |
---|
993 | 1208 | |
---|
994 | | - /* |
---|
995 | | - * Half-duplex mode not supported with multiqueue |
---|
996 | | - * half-duplex can only works with single queue |
---|
997 | | - */ |
---|
998 | | - if (tx_cnt > 1) |
---|
999 | | - phydev->supported &= ~(SUPPORTED_1000baseT_Half | |
---|
1000 | | - SUPPORTED_100baseT_Half | |
---|
1001 | | - SUPPORTED_10baseT_Half); |
---|
1002 | | - |
---|
1003 | | - /* |
---|
1004 | | - * Broken HW is sometimes missing the pull-up resistor on the |
---|
1005 | | - * MDIO line, which results in reads to non-existent devices returning |
---|
1006 | | - * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent |
---|
1007 | | - * device as well. |
---|
1008 | | - * Note: phydev->phy_id is the result of reading the UID PHY registers. |
---|
1009 | | - */ |
---|
1010 | | - if (!priv->plat->phy_node && phydev->phy_id == 0) { |
---|
1011 | | - phy_disconnect(phydev); |
---|
1012 | | - return -ENODEV; |
---|
| 1209 | + phylink_ethtool_get_wol(priv->phylink, &wol); |
---|
| 1210 | + device_set_wakeup_capable(priv->device, !!wol.supported); |
---|
1013 | 1211 | } |
---|
| 1212 | + return ret; |
---|
| 1213 | +} |
---|
1014 | 1214 | |
---|
1015 | | - /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid |
---|
1016 | | - * subsequent PHY polling, make sure we force a link transition if |
---|
1017 | | - * we have a UP/DOWN/UP transition |
---|
1018 | | - */ |
---|
1019 | | - if (phydev->is_pseudo_fixed_link) |
---|
1020 | | - phydev->irq = PHY_POLL; |
---|
| 1215 | +static int stmmac_phy_setup(struct stmmac_priv *priv) |
---|
| 1216 | +{ |
---|
| 1217 | + struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node); |
---|
| 1218 | + int mode = priv->plat->phy_interface; |
---|
| 1219 | + struct phylink *phylink; |
---|
1021 | 1220 | |
---|
1022 | | - phy_attached_info(phydev); |
---|
| 1221 | + priv->phylink_config.dev = &priv->dev->dev; |
---|
| 1222 | + priv->phylink_config.type = PHYLINK_NETDEV; |
---|
| 1223 | + priv->phylink_config.pcs_poll = true; |
---|
| 1224 | + |
---|
| 1225 | + if (!fwnode) |
---|
| 1226 | + fwnode = dev_fwnode(priv->device); |
---|
| 1227 | + |
---|
| 1228 | + phylink = phylink_create(&priv->phylink_config, fwnode, |
---|
| 1229 | + mode, &stmmac_phylink_mac_ops); |
---|
| 1230 | + if (IS_ERR(phylink)) |
---|
| 1231 | + return PTR_ERR(phylink); |
---|
| 1232 | + |
---|
| 1233 | + priv->phylink = phylink; |
---|
1023 | 1234 | return 0; |
---|
1024 | 1235 | } |
---|
1025 | 1236 | |
---|
1026 | 1237 | static void stmmac_display_rx_rings(struct stmmac_priv *priv) |
---|
1027 | 1238 | { |
---|
1028 | 1239 | u32 rx_cnt = priv->plat->rx_queues_to_use; |
---|
| 1240 | + unsigned int desc_size; |
---|
1029 | 1241 | void *head_rx; |
---|
1030 | 1242 | u32 queue; |
---|
1031 | 1243 | |
---|
.. | .. |
---|
1035 | 1247 | |
---|
1036 | 1248 | pr_info("\tRX Queue %u rings\n", queue); |
---|
1037 | 1249 | |
---|
1038 | | - if (priv->extend_desc) |
---|
| 1250 | + if (priv->extend_desc) { |
---|
1039 | 1251 | head_rx = (void *)rx_q->dma_erx; |
---|
1040 | | - else |
---|
| 1252 | + desc_size = sizeof(struct dma_extended_desc); |
---|
| 1253 | + } else { |
---|
1041 | 1254 | head_rx = (void *)rx_q->dma_rx; |
---|
| 1255 | + desc_size = sizeof(struct dma_desc); |
---|
| 1256 | + } |
---|
1042 | 1257 | |
---|
1043 | 1258 | /* Display RX ring */ |
---|
1044 | | - stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true); |
---|
| 1259 | + stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true, |
---|
| 1260 | + rx_q->dma_rx_phy, desc_size); |
---|
1045 | 1261 | } |
---|
1046 | 1262 | } |
---|
1047 | 1263 | |
---|
1048 | 1264 | static void stmmac_display_tx_rings(struct stmmac_priv *priv) |
---|
1049 | 1265 | { |
---|
1050 | 1266 | u32 tx_cnt = priv->plat->tx_queues_to_use; |
---|
| 1267 | + unsigned int desc_size; |
---|
1051 | 1268 | void *head_tx; |
---|
1052 | 1269 | u32 queue; |
---|
1053 | 1270 | |
---|
.. | .. |
---|
1057 | 1274 | |
---|
1058 | 1275 | pr_info("\tTX Queue %d rings\n", queue); |
---|
1059 | 1276 | |
---|
1060 | | - if (priv->extend_desc) |
---|
| 1277 | + if (priv->extend_desc) { |
---|
1061 | 1278 | head_tx = (void *)tx_q->dma_etx; |
---|
1062 | | - else |
---|
| 1279 | + desc_size = sizeof(struct dma_extended_desc); |
---|
| 1280 | + } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { |
---|
| 1281 | + head_tx = (void *)tx_q->dma_entx; |
---|
| 1282 | + desc_size = sizeof(struct dma_edesc); |
---|
| 1283 | + } else { |
---|
1063 | 1284 | head_tx = (void *)tx_q->dma_tx; |
---|
| 1285 | + desc_size = sizeof(struct dma_desc); |
---|
| 1286 | + } |
---|
1064 | 1287 | |
---|
1065 | | - stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false); |
---|
| 1288 | + stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false, |
---|
| 1289 | + tx_q->dma_tx_phy, desc_size); |
---|
1066 | 1290 | } |
---|
1067 | 1291 | } |
---|
1068 | 1292 | |
---|
.. | .. |
---|
1106 | 1330 | int i; |
---|
1107 | 1331 | |
---|
1108 | 1332 | /* Clear the RX descriptors */ |
---|
1109 | | - for (i = 0; i < DMA_RX_SIZE; i++) |
---|
| 1333 | + for (i = 0; i < priv->dma_rx_size; i++) |
---|
1110 | 1334 | if (priv->extend_desc) |
---|
1111 | 1335 | stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, |
---|
1112 | 1336 | priv->use_riwt, priv->mode, |
---|
1113 | | - (i == DMA_RX_SIZE - 1), |
---|
| 1337 | + (i == priv->dma_rx_size - 1), |
---|
1114 | 1338 | priv->dma_buf_sz); |
---|
1115 | 1339 | else |
---|
1116 | 1340 | stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], |
---|
1117 | 1341 | priv->use_riwt, priv->mode, |
---|
1118 | | - (i == DMA_RX_SIZE - 1), |
---|
| 1342 | + (i == priv->dma_rx_size - 1), |
---|
1119 | 1343 | priv->dma_buf_sz); |
---|
1120 | 1344 | } |
---|
1121 | 1345 | |
---|
.. | .. |
---|
1132 | 1356 | int i; |
---|
1133 | 1357 | |
---|
1134 | 1358 | /* Clear the TX descriptors */ |
---|
1135 | | - for (i = 0; i < DMA_TX_SIZE; i++) |
---|
| 1359 | + for (i = 0; i < priv->dma_tx_size; i++) { |
---|
| 1360 | + int last = (i == (priv->dma_tx_size - 1)); |
---|
| 1361 | + struct dma_desc *p; |
---|
| 1362 | + |
---|
1136 | 1363 | if (priv->extend_desc) |
---|
1137 | | - stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic, |
---|
1138 | | - priv->mode, (i == DMA_TX_SIZE - 1)); |
---|
| 1364 | + p = &tx_q->dma_etx[i].basic; |
---|
| 1365 | + else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
---|
| 1366 | + p = &tx_q->dma_entx[i].basic; |
---|
1139 | 1367 | else |
---|
1140 | | - stmmac_init_tx_desc(priv, &tx_q->dma_tx[i], |
---|
1141 | | - priv->mode, (i == DMA_TX_SIZE - 1)); |
---|
| 1368 | + p = &tx_q->dma_tx[i]; |
---|
| 1369 | + |
---|
| 1370 | + stmmac_init_tx_desc(priv, p, priv->mode, last); |
---|
| 1371 | + } |
---|
1142 | 1372 | } |
---|
1143 | 1373 | |
---|
1144 | 1374 | /** |
---|
.. | .. |
---|
1176 | 1406 | int i, gfp_t flags, u32 queue) |
---|
1177 | 1407 | { |
---|
1178 | 1408 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
---|
1179 | | - struct sk_buff *skb; |
---|
| 1409 | + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; |
---|
| 1410 | + gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); |
---|
1180 | 1411 | |
---|
1181 | | - skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags); |
---|
1182 | | - if (!skb) { |
---|
1183 | | - netdev_err(priv->dev, |
---|
1184 | | - "%s: Rx init fails; skb is NULL\n", __func__); |
---|
| 1412 | + if (priv->dma_cap.addr64 <= 32) |
---|
| 1413 | + gfp |= GFP_DMA32; |
---|
| 1414 | + |
---|
| 1415 | + buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); |
---|
| 1416 | + if (!buf->page) |
---|
1185 | 1417 | return -ENOMEM; |
---|
1186 | | - } |
---|
1187 | | - rx_q->rx_skbuff[i] = skb; |
---|
1188 | | - rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, |
---|
1189 | | - priv->dma_buf_sz, |
---|
1190 | | - DMA_FROM_DEVICE); |
---|
1191 | | - if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) { |
---|
1192 | | - netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); |
---|
1193 | | - dev_kfree_skb_any(skb); |
---|
1194 | | - return -EINVAL; |
---|
| 1418 | + |
---|
| 1419 | + if (priv->sph) { |
---|
| 1420 | + buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); |
---|
| 1421 | + if (!buf->sec_page) |
---|
| 1422 | + return -ENOMEM; |
---|
| 1423 | + |
---|
| 1424 | + buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); |
---|
| 1425 | + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); |
---|
| 1426 | + } else { |
---|
| 1427 | + buf->sec_page = NULL; |
---|
| 1428 | + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); |
---|
1195 | 1429 | } |
---|
1196 | 1430 | |
---|
1197 | | - stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]); |
---|
1198 | | - |
---|
| 1431 | + buf->addr = page_pool_get_dma_addr(buf->page); |
---|
| 1432 | + stmmac_set_desc_addr(priv, p, buf->addr); |
---|
1199 | 1433 | if (priv->dma_buf_sz == BUF_SIZE_16KiB) |
---|
1200 | 1434 | stmmac_init_desc3(priv, p); |
---|
1201 | 1435 | |
---|
.. | .. |
---|
1211 | 1445 | static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) |
---|
1212 | 1446 | { |
---|
1213 | 1447 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
---|
| 1448 | + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; |
---|
1214 | 1449 | |
---|
1215 | | - if (rx_q->rx_skbuff[i]) { |
---|
1216 | | - dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i], |
---|
1217 | | - priv->dma_buf_sz, DMA_FROM_DEVICE); |
---|
1218 | | - dev_kfree_skb_any(rx_q->rx_skbuff[i]); |
---|
1219 | | - } |
---|
1220 | | - rx_q->rx_skbuff[i] = NULL; |
---|
| 1450 | + if (buf->page) |
---|
| 1451 | + page_pool_put_full_page(rx_q->page_pool, buf->page, false); |
---|
| 1452 | + buf->page = NULL; |
---|
| 1453 | + |
---|
| 1454 | + if (buf->sec_page) |
---|
| 1455 | + page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false); |
---|
| 1456 | + buf->sec_page = NULL; |
---|
1221 | 1457 | } |
---|
1222 | 1458 | |
---|
1223 | 1459 | /** |
---|
.. | .. |
---|
1264 | 1500 | struct stmmac_priv *priv = netdev_priv(dev); |
---|
1265 | 1501 | u32 rx_count = priv->plat->rx_queues_to_use; |
---|
1266 | 1502 | int ret = -ENOMEM; |
---|
1267 | | - int bfsize = 0; |
---|
1268 | 1503 | int queue; |
---|
1269 | 1504 | int i; |
---|
1270 | | - |
---|
1271 | | - bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); |
---|
1272 | | - if (bfsize < 0) |
---|
1273 | | - bfsize = 0; |
---|
1274 | | - |
---|
1275 | | - if (bfsize < BUF_SIZE_16KiB) |
---|
1276 | | - bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); |
---|
1277 | | - |
---|
1278 | | - priv->dma_buf_sz = bfsize; |
---|
1279 | 1505 | |
---|
1280 | 1506 | /* RX INITIALIZATION */ |
---|
1281 | 1507 | netif_dbg(priv, probe, priv->dev, |
---|
.. | .. |
---|
1288 | 1514 | "(%s) dma_rx_phy=0x%08x\n", __func__, |
---|
1289 | 1515 | (u32)rx_q->dma_rx_phy); |
---|
1290 | 1516 | |
---|
1291 | | - for (i = 0; i < DMA_RX_SIZE; i++) { |
---|
| 1517 | + stmmac_clear_rx_descriptors(priv, queue); |
---|
| 1518 | + |
---|
| 1519 | + for (i = 0; i < priv->dma_rx_size; i++) { |
---|
1292 | 1520 | struct dma_desc *p; |
---|
1293 | 1521 | |
---|
1294 | 1522 | if (priv->extend_desc) |
---|
.. | .. |
---|
1300 | 1528 | queue); |
---|
1301 | 1529 | if (ret) |
---|
1302 | 1530 | goto err_init_rx_buffers; |
---|
1303 | | - |
---|
1304 | | - netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n", |
---|
1305 | | - rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data, |
---|
1306 | | - (unsigned int)rx_q->rx_skbuff_dma[i]); |
---|
1307 | 1531 | } |
---|
1308 | 1532 | |
---|
1309 | 1533 | rx_q->cur_rx = 0; |
---|
1310 | | - rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); |
---|
1311 | | - |
---|
1312 | | - stmmac_clear_rx_descriptors(priv, queue); |
---|
| 1534 | + rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size); |
---|
1313 | 1535 | |
---|
1314 | 1536 | /* Setup the chained descriptor addresses */ |
---|
1315 | 1537 | if (priv->mode == STMMAC_CHAIN_MODE) { |
---|
1316 | 1538 | if (priv->extend_desc) |
---|
1317 | 1539 | stmmac_mode_init(priv, rx_q->dma_erx, |
---|
1318 | | - rx_q->dma_rx_phy, DMA_RX_SIZE, 1); |
---|
| 1540 | + rx_q->dma_rx_phy, |
---|
| 1541 | + priv->dma_rx_size, 1); |
---|
1319 | 1542 | else |
---|
1320 | 1543 | stmmac_mode_init(priv, rx_q->dma_rx, |
---|
1321 | | - rx_q->dma_rx_phy, DMA_RX_SIZE, 0); |
---|
| 1544 | + rx_q->dma_rx_phy, |
---|
| 1545 | + priv->dma_rx_size, 0); |
---|
1322 | 1546 | } |
---|
1323 | 1547 | } |
---|
1324 | | - |
---|
1325 | | - buf_sz = bfsize; |
---|
1326 | 1548 | |
---|
1327 | 1549 | return 0; |
---|
1328 | 1550 | |
---|
.. | .. |
---|
1334 | 1556 | if (queue == 0) |
---|
1335 | 1557 | break; |
---|
1336 | 1558 | |
---|
1337 | | - i = DMA_RX_SIZE; |
---|
| 1559 | + i = priv->dma_rx_size; |
---|
1338 | 1560 | queue--; |
---|
1339 | 1561 | } |
---|
1340 | 1562 | |
---|
.. | .. |
---|
1366 | 1588 | if (priv->mode == STMMAC_CHAIN_MODE) { |
---|
1367 | 1589 | if (priv->extend_desc) |
---|
1368 | 1590 | stmmac_mode_init(priv, tx_q->dma_etx, |
---|
1369 | | - tx_q->dma_tx_phy, DMA_TX_SIZE, 1); |
---|
1370 | | - else |
---|
| 1591 | + tx_q->dma_tx_phy, |
---|
| 1592 | + priv->dma_tx_size, 1); |
---|
| 1593 | + else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) |
---|
1371 | 1594 | stmmac_mode_init(priv, tx_q->dma_tx, |
---|
1372 | | - tx_q->dma_tx_phy, DMA_TX_SIZE, 0); |
---|
| 1595 | + tx_q->dma_tx_phy, |
---|
| 1596 | + priv->dma_tx_size, 0); |
---|
1373 | 1597 | } |
---|
1374 | 1598 | |
---|
1375 | | - for (i = 0; i < DMA_TX_SIZE; i++) { |
---|
| 1599 | + for (i = 0; i < priv->dma_tx_size; i++) { |
---|
1376 | 1600 | struct dma_desc *p; |
---|
1377 | 1601 | if (priv->extend_desc) |
---|
1378 | 1602 | p = &((tx_q->dma_etx + i)->basic); |
---|
| 1603 | + else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
---|
| 1604 | + p = &((tx_q->dma_entx + i)->basic); |
---|
1379 | 1605 | else |
---|
1380 | 1606 | p = tx_q->dma_tx + i; |
---|
1381 | 1607 | |
---|
.. | .. |
---|
1434 | 1660 | { |
---|
1435 | 1661 | int i; |
---|
1436 | 1662 | |
---|
1437 | | - for (i = 0; i < DMA_RX_SIZE; i++) |
---|
| 1663 | + for (i = 0; i < priv->dma_rx_size; i++) |
---|
1438 | 1664 | stmmac_free_rx_buffer(priv, queue, i); |
---|
1439 | 1665 | } |
---|
1440 | 1666 | |
---|
.. | .. |
---|
1447 | 1673 | { |
---|
1448 | 1674 | int i; |
---|
1449 | 1675 | |
---|
1450 | | - for (i = 0; i < DMA_TX_SIZE; i++) |
---|
| 1676 | + for (i = 0; i < priv->dma_tx_size; i++) |
---|
1451 | 1677 | stmmac_free_tx_buffer(priv, queue, i); |
---|
1452 | 1678 | } |
---|
1453 | 1679 | |
---|
.. | .. |
---|
1482 | 1708 | |
---|
1483 | 1709 | /* Free DMA regions of consistent memory previously allocated */ |
---|
1484 | 1710 | if (!priv->extend_desc) |
---|
1485 | | - dma_free_coherent(priv->device, |
---|
1486 | | - DMA_RX_SIZE * sizeof(struct dma_desc), |
---|
| 1711 | + dma_free_coherent(priv->device, priv->dma_rx_size * |
---|
| 1712 | + sizeof(struct dma_desc), |
---|
1487 | 1713 | rx_q->dma_rx, rx_q->dma_rx_phy); |
---|
1488 | 1714 | else |
---|
1489 | | - dma_free_coherent(priv->device, DMA_RX_SIZE * |
---|
| 1715 | + dma_free_coherent(priv->device, priv->dma_rx_size * |
---|
1490 | 1716 | sizeof(struct dma_extended_desc), |
---|
1491 | 1717 | rx_q->dma_erx, rx_q->dma_rx_phy); |
---|
1492 | 1718 | |
---|
1493 | | - kfree(rx_q->rx_skbuff_dma); |
---|
1494 | | - kfree(rx_q->rx_skbuff); |
---|
| 1719 | + kfree(rx_q->buf_pool); |
---|
| 1720 | + if (rx_q->page_pool) |
---|
| 1721 | + page_pool_destroy(rx_q->page_pool); |
---|
1495 | 1722 | } |
---|
1496 | 1723 | } |
---|
1497 | 1724 | |
---|
.. | .. |
---|
1507 | 1734 | /* Free TX queue resources */ |
---|
1508 | 1735 | for (queue = 0; queue < tx_count; queue++) { |
---|
1509 | 1736 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
---|
| 1737 | + size_t size; |
---|
| 1738 | + void *addr; |
---|
1510 | 1739 | |
---|
1511 | 1740 | /* Release the DMA TX socket buffers */ |
---|
1512 | 1741 | dma_free_tx_skbufs(priv, queue); |
---|
1513 | 1742 | |
---|
1514 | | - /* Free DMA regions of consistent memory previously allocated */ |
---|
1515 | | - if (!priv->extend_desc) |
---|
1516 | | - dma_free_coherent(priv->device, |
---|
1517 | | - DMA_TX_SIZE * sizeof(struct dma_desc), |
---|
1518 | | - tx_q->dma_tx, tx_q->dma_tx_phy); |
---|
1519 | | - else |
---|
1520 | | - dma_free_coherent(priv->device, DMA_TX_SIZE * |
---|
1521 | | - sizeof(struct dma_extended_desc), |
---|
1522 | | - tx_q->dma_etx, tx_q->dma_tx_phy); |
---|
| 1743 | + if (priv->extend_desc) { |
---|
| 1744 | + size = sizeof(struct dma_extended_desc); |
---|
| 1745 | + addr = tx_q->dma_etx; |
---|
| 1746 | + } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { |
---|
| 1747 | + size = sizeof(struct dma_edesc); |
---|
| 1748 | + addr = tx_q->dma_entx; |
---|
| 1749 | + } else { |
---|
| 1750 | + size = sizeof(struct dma_desc); |
---|
| 1751 | + addr = tx_q->dma_tx; |
---|
| 1752 | + } |
---|
| 1753 | + |
---|
| 1754 | + size *= priv->dma_tx_size; |
---|
| 1755 | + |
---|
| 1756 | + dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); |
---|
1523 | 1757 | |
---|
1524 | 1758 | kfree(tx_q->tx_skbuff_dma); |
---|
1525 | 1759 | kfree(tx_q->tx_skbuff); |
---|
.. | .. |
---|
1543 | 1777 | /* RX queues buffers and DMA */ |
---|
1544 | 1778 | for (queue = 0; queue < rx_count; queue++) { |
---|
1545 | 1779 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
---|
| 1780 | + struct page_pool_params pp_params = { 0 }; |
---|
| 1781 | + unsigned int num_pages; |
---|
1546 | 1782 | |
---|
1547 | 1783 | rx_q->queue_index = queue; |
---|
1548 | 1784 | rx_q->priv_data = priv; |
---|
1549 | 1785 | |
---|
1550 | | - rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, |
---|
1551 | | - sizeof(dma_addr_t), |
---|
1552 | | - GFP_KERNEL); |
---|
1553 | | - if (!rx_q->rx_skbuff_dma) |
---|
1554 | | - goto err_dma; |
---|
| 1786 | + pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; |
---|
| 1787 | + pp_params.pool_size = priv->dma_rx_size; |
---|
| 1788 | + num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE); |
---|
| 1789 | + pp_params.order = ilog2(num_pages); |
---|
| 1790 | + pp_params.nid = dev_to_node(priv->device); |
---|
| 1791 | + pp_params.dev = priv->device; |
---|
| 1792 | + pp_params.dma_dir = DMA_FROM_DEVICE; |
---|
| 1793 | + pp_params.max_len = num_pages * PAGE_SIZE; |
---|
1555 | 1794 | |
---|
1556 | | - rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE, |
---|
1557 | | - sizeof(struct sk_buff *), |
---|
1558 | | - GFP_KERNEL); |
---|
1559 | | - if (!rx_q->rx_skbuff) |
---|
| 1795 | + rx_q->page_pool = page_pool_create(&pp_params); |
---|
| 1796 | + if (IS_ERR(rx_q->page_pool)) { |
---|
| 1797 | + ret = PTR_ERR(rx_q->page_pool); |
---|
| 1798 | + rx_q->page_pool = NULL; |
---|
| 1799 | + goto err_dma; |
---|
| 1800 | + } |
---|
| 1801 | + |
---|
| 1802 | + rx_q->buf_pool = kcalloc(priv->dma_rx_size, |
---|
| 1803 | + sizeof(*rx_q->buf_pool), |
---|
| 1804 | + GFP_KERNEL); |
---|
| 1805 | + if (!rx_q->buf_pool) |
---|
1560 | 1806 | goto err_dma; |
---|
1561 | 1807 | |
---|
1562 | 1808 | if (priv->extend_desc) { |
---|
1563 | | - rx_q->dma_erx = dma_zalloc_coherent(priv->device, |
---|
1564 | | - DMA_RX_SIZE * |
---|
1565 | | - sizeof(struct |
---|
1566 | | - dma_extended_desc), |
---|
1567 | | - &rx_q->dma_rx_phy, |
---|
1568 | | - GFP_KERNEL); |
---|
| 1809 | + rx_q->dma_erx = dma_alloc_coherent(priv->device, |
---|
| 1810 | + priv->dma_rx_size * |
---|
| 1811 | + sizeof(struct dma_extended_desc), |
---|
| 1812 | + &rx_q->dma_rx_phy, |
---|
| 1813 | + GFP_KERNEL); |
---|
1569 | 1814 | if (!rx_q->dma_erx) |
---|
1570 | 1815 | goto err_dma; |
---|
1571 | 1816 | |
---|
1572 | 1817 | } else { |
---|
1573 | | - rx_q->dma_rx = dma_zalloc_coherent(priv->device, |
---|
1574 | | - DMA_RX_SIZE * |
---|
1575 | | - sizeof(struct |
---|
1576 | | - dma_desc), |
---|
1577 | | - &rx_q->dma_rx_phy, |
---|
1578 | | - GFP_KERNEL); |
---|
| 1818 | + rx_q->dma_rx = dma_alloc_coherent(priv->device, |
---|
| 1819 | + priv->dma_rx_size * |
---|
| 1820 | + sizeof(struct dma_desc), |
---|
| 1821 | + &rx_q->dma_rx_phy, |
---|
| 1822 | + GFP_KERNEL); |
---|
1579 | 1823 | if (!rx_q->dma_rx) |
---|
1580 | 1824 | goto err_dma; |
---|
1581 | 1825 | } |
---|
.. | .. |
---|
1606 | 1850 | /* TX queues buffers and DMA */ |
---|
1607 | 1851 | for (queue = 0; queue < tx_count; queue++) { |
---|
1608 | 1852 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
---|
| 1853 | + size_t size; |
---|
| 1854 | + void *addr; |
---|
1609 | 1855 | |
---|
1610 | 1856 | tx_q->queue_index = queue; |
---|
1611 | 1857 | tx_q->priv_data = priv; |
---|
1612 | 1858 | |
---|
1613 | | - tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, |
---|
1614 | | - sizeof(*tx_q->tx_skbuff_dma), |
---|
1615 | | - GFP_KERNEL); |
---|
| 1859 | + tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size, |
---|
| 1860 | + sizeof(*tx_q->tx_skbuff_dma), |
---|
| 1861 | + GFP_KERNEL); |
---|
1616 | 1862 | if (!tx_q->tx_skbuff_dma) |
---|
1617 | 1863 | goto err_dma; |
---|
1618 | 1864 | |
---|
1619 | | - tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE, |
---|
1620 | | - sizeof(struct sk_buff *), |
---|
1621 | | - GFP_KERNEL); |
---|
| 1865 | + tx_q->tx_skbuff = kcalloc(priv->dma_tx_size, |
---|
| 1866 | + sizeof(struct sk_buff *), |
---|
| 1867 | + GFP_KERNEL); |
---|
1622 | 1868 | if (!tx_q->tx_skbuff) |
---|
1623 | 1869 | goto err_dma; |
---|
1624 | 1870 | |
---|
1625 | | - if (priv->extend_desc) { |
---|
1626 | | - tx_q->dma_etx = dma_zalloc_coherent(priv->device, |
---|
1627 | | - DMA_TX_SIZE * |
---|
1628 | | - sizeof(struct |
---|
1629 | | - dma_extended_desc), |
---|
1630 | | - &tx_q->dma_tx_phy, |
---|
1631 | | - GFP_KERNEL); |
---|
1632 | | - if (!tx_q->dma_etx) |
---|
1633 | | - goto err_dma; |
---|
1634 | | - } else { |
---|
1635 | | - tx_q->dma_tx = dma_zalloc_coherent(priv->device, |
---|
1636 | | - DMA_TX_SIZE * |
---|
1637 | | - sizeof(struct |
---|
1638 | | - dma_desc), |
---|
1639 | | - &tx_q->dma_tx_phy, |
---|
1640 | | - GFP_KERNEL); |
---|
1641 | | - if (!tx_q->dma_tx) |
---|
1642 | | - goto err_dma; |
---|
1643 | | - } |
---|
| 1871 | + if (priv->extend_desc) |
---|
| 1872 | + size = sizeof(struct dma_extended_desc); |
---|
| 1873 | + else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
---|
| 1874 | + size = sizeof(struct dma_edesc); |
---|
| 1875 | + else |
---|
| 1876 | + size = sizeof(struct dma_desc); |
---|
| 1877 | + |
---|
| 1878 | + size *= priv->dma_tx_size; |
---|
| 1879 | + |
---|
| 1880 | + addr = dma_alloc_coherent(priv->device, size, |
---|
| 1881 | + &tx_q->dma_tx_phy, GFP_KERNEL); |
---|
| 1882 | + if (!addr) |
---|
| 1883 | + goto err_dma; |
---|
| 1884 | + |
---|
| 1885 | + if (priv->extend_desc) |
---|
| 1886 | + tx_q->dma_etx = addr; |
---|
| 1887 | + else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
---|
| 1888 | + tx_q->dma_entx = addr; |
---|
| 1889 | + else |
---|
| 1890 | + tx_q->dma_tx = addr; |
---|
1644 | 1891 | } |
---|
1645 | 1892 | |
---|
1646 | 1893 | return 0; |
---|
1647 | 1894 | |
---|
1648 | 1895 | err_dma: |
---|
1649 | 1896 | free_dma_tx_desc_resources(priv); |
---|
1650 | | - |
---|
1651 | 1897 | return ret; |
---|
1652 | 1898 | } |
---|
1653 | 1899 | |
---|
.. | .. |
---|
1858 | 2104 | /** |
---|
1859 | 2105 | * stmmac_tx_clean - to manage the transmission completion |
---|
1860 | 2106 | * @priv: driver private structure |
---|
| 2107 | + * @budget: napi budget limiting this functions packet handling |
---|
1861 | 2108 | * @queue: TX queue index |
---|
1862 | 2109 | * Description: it reclaims the transmit resources after transmission completes. |
---|
1863 | 2110 | */ |
---|
.. | .. |
---|
1879 | 2126 | |
---|
1880 | 2127 | if (priv->extend_desc) |
---|
1881 | 2128 | p = (struct dma_desc *)(tx_q->dma_etx + entry); |
---|
| 2129 | + else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
---|
| 2130 | + p = &tx_q->dma_entx[entry].basic; |
---|
1882 | 2131 | else |
---|
1883 | 2132 | p = tx_q->dma_tx + entry; |
---|
1884 | 2133 | |
---|
.. | .. |
---|
1937 | 2186 | |
---|
1938 | 2187 | stmmac_release_tx_desc(priv, p, priv->mode); |
---|
1939 | 2188 | |
---|
1940 | | - entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); |
---|
| 2189 | + entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); |
---|
1941 | 2190 | } |
---|
1942 | 2191 | tx_q->dirty_tx = entry; |
---|
1943 | 2192 | |
---|
.. | .. |
---|
1946 | 2195 | |
---|
1947 | 2196 | if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, |
---|
1948 | 2197 | queue))) && |
---|
1949 | | - stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) { |
---|
| 2198 | + stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) { |
---|
1950 | 2199 | |
---|
1951 | 2200 | netif_dbg(priv, tx_done, priv->dev, |
---|
1952 | 2201 | "%s: restart transmit\n", __func__); |
---|
.. | .. |
---|
1955 | 2204 | |
---|
1956 | 2205 | if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { |
---|
1957 | 2206 | stmmac_enable_eee_mode(priv); |
---|
1958 | | - mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); |
---|
| 2207 | + mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); |
---|
1959 | 2208 | } |
---|
| 2209 | + |
---|
| 2210 | + /* We still have pending packets, let's call for a new scheduling */ |
---|
| 2211 | + if (tx_q->dirty_tx != tx_q->cur_tx) |
---|
| 2212 | + mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer)); |
---|
1960 | 2213 | |
---|
1961 | 2214 | __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); |
---|
1962 | 2215 | |
---|
.. | .. |
---|
1973 | 2226 | static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) |
---|
1974 | 2227 | { |
---|
1975 | 2228 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; |
---|
1976 | | - int i; |
---|
1977 | 2229 | |
---|
1978 | 2230 | netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); |
---|
1979 | 2231 | |
---|
1980 | 2232 | stmmac_stop_tx_dma(priv, chan); |
---|
1981 | 2233 | dma_free_tx_skbufs(priv, chan); |
---|
1982 | | - for (i = 0; i < DMA_TX_SIZE; i++) |
---|
1983 | | - if (priv->extend_desc) |
---|
1984 | | - stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic, |
---|
1985 | | - priv->mode, (i == DMA_TX_SIZE - 1)); |
---|
1986 | | - else |
---|
1987 | | - stmmac_init_tx_desc(priv, &tx_q->dma_tx[i], |
---|
1988 | | - priv->mode, (i == DMA_TX_SIZE - 1)); |
---|
| 2234 | + stmmac_clear_tx_descriptors(priv, chan); |
---|
1989 | 2235 | tx_q->dirty_tx = 0; |
---|
1990 | 2236 | tx_q->cur_tx = 0; |
---|
1991 | 2237 | tx_q->mss = 0; |
---|
1992 | 2238 | netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan)); |
---|
| 2239 | + stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, |
---|
| 2240 | + tx_q->dma_tx_phy, chan); |
---|
1993 | 2241 | stmmac_start_tx_dma(priv, chan); |
---|
1994 | 2242 | |
---|
1995 | 2243 | priv->dev->stats.tx_errors++; |
---|
.. | .. |
---|
2048 | 2296 | int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, |
---|
2049 | 2297 | &priv->xstats, chan); |
---|
2050 | 2298 | struct stmmac_channel *ch = &priv->channel[chan]; |
---|
2051 | | - bool needs_work = false; |
---|
| 2299 | + unsigned long flags; |
---|
2052 | 2300 | |
---|
2053 | | - if ((status & handle_rx) && ch->has_rx) { |
---|
2054 | | - needs_work = true; |
---|
2055 | | - } else { |
---|
2056 | | - status &= ~handle_rx; |
---|
| 2301 | + if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { |
---|
| 2302 | + if (napi_schedule_prep(&ch->rx_napi)) { |
---|
| 2303 | + spin_lock_irqsave(&ch->lock, flags); |
---|
| 2304 | + stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); |
---|
| 2305 | + spin_unlock_irqrestore(&ch->lock, flags); |
---|
| 2306 | + __napi_schedule(&ch->rx_napi); |
---|
| 2307 | + } |
---|
2057 | 2308 | } |
---|
2058 | 2309 | |
---|
2059 | | - if ((status & handle_tx) && ch->has_tx) { |
---|
2060 | | - needs_work = true; |
---|
2061 | | - } else { |
---|
2062 | | - status &= ~handle_tx; |
---|
2063 | | - } |
---|
2064 | | - |
---|
2065 | | - if (needs_work && napi_schedule_prep(&ch->napi)) { |
---|
2066 | | - stmmac_disable_dma_irq(priv, priv->ioaddr, chan); |
---|
2067 | | - __napi_schedule(&ch->napi); |
---|
| 2310 | + if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { |
---|
| 2311 | + if (napi_schedule_prep(&ch->tx_napi)) { |
---|
| 2312 | + spin_lock_irqsave(&ch->lock, flags); |
---|
| 2313 | + stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); |
---|
| 2314 | + spin_unlock_irqrestore(&ch->lock, flags); |
---|
| 2315 | + __napi_schedule(&ch->tx_napi); |
---|
| 2316 | + } |
---|
2068 | 2317 | } |
---|
2069 | 2318 | |
---|
2070 | 2319 | return status; |
---|
.. | .. |
---|
2127 | 2376 | unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | |
---|
2128 | 2377 | MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; |
---|
2129 | 2378 | |
---|
2130 | | - dwmac_mmc_intr_all_mask(priv->mmcaddr); |
---|
| 2379 | + stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); |
---|
2131 | 2380 | |
---|
2132 | 2381 | if (priv->dma_cap.rmon) { |
---|
2133 | | - dwmac_mmc_ctrl(priv->mmcaddr, mode); |
---|
| 2382 | + stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); |
---|
2134 | 2383 | memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); |
---|
2135 | 2384 | } else |
---|
2136 | 2385 | netdev_info(priv->dev, "No MAC Management Counters available\n"); |
---|
.. | .. |
---|
2159 | 2408 | */ |
---|
2160 | 2409 | static void stmmac_check_ether_addr(struct stmmac_priv *priv) |
---|
2161 | 2410 | { |
---|
2162 | | - if (!is_valid_ether_addr(priv->dev->dev_addr)) { |
---|
| 2411 | +// if (!is_valid_ether_addr(priv->dev->dev_addr)) { |
---|
| 2412 | + if (1) { |
---|
2163 | 2413 | stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0); |
---|
2164 | 2414 | if (likely(priv->plat->get_eth_addr)) |
---|
2165 | 2415 | priv->plat->get_eth_addr(priv->plat->bsp_priv, |
---|
.. | .. |
---|
2222 | 2472 | rx_q->dma_rx_phy, chan); |
---|
2223 | 2473 | |
---|
2224 | 2474 | rx_q->rx_tail_addr = rx_q->dma_rx_phy + |
---|
2225 | | - (DMA_RX_SIZE * sizeof(struct dma_desc)); |
---|
| 2475 | + (priv->dma_rx_size * |
---|
| 2476 | + sizeof(struct dma_desc)); |
---|
2226 | 2477 | stmmac_set_rx_tail_ptr(priv, priv->ioaddr, |
---|
2227 | 2478 | rx_q->rx_tail_addr, chan); |
---|
2228 | 2479 | } |
---|
.. | .. |
---|
2251 | 2502 | |
---|
2252 | 2503 | /** |
---|
2253 | 2504 | * stmmac_tx_timer - mitigation sw timer for tx. |
---|
2254 | | - * @data: data pointer |
---|
| 2505 | + * @t: data pointer |
---|
2255 | 2506 | * Description: |
---|
2256 | 2507 | * This is the timer handler to directly invoke the stmmac_tx_clean. |
---|
2257 | 2508 | */ |
---|
.. | .. |
---|
2263 | 2514 | |
---|
2264 | 2515 | ch = &priv->channel[tx_q->queue_index]; |
---|
2265 | 2516 | |
---|
2266 | | - if (likely(napi_schedule_prep(&ch->napi))) |
---|
2267 | | - __napi_schedule(&ch->napi); |
---|
| 2517 | + if (likely(napi_schedule_prep(&ch->tx_napi))) { |
---|
| 2518 | + unsigned long flags; |
---|
| 2519 | + |
---|
| 2520 | + spin_lock_irqsave(&ch->lock, flags); |
---|
| 2521 | + stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); |
---|
| 2522 | + spin_unlock_irqrestore(&ch->lock, flags); |
---|
| 2523 | + __napi_schedule(&ch->tx_napi); |
---|
| 2524 | + } |
---|
2268 | 2525 | } |
---|
2269 | 2526 | |
---|
2270 | 2527 | /** |
---|
2271 | | - * stmmac_init_tx_coalesce - init tx mitigation options. |
---|
| 2528 | + * stmmac_init_coalesce - init mitigation options. |
---|
2272 | 2529 | * @priv: driver private structure |
---|
2273 | 2530 | * Description: |
---|
2274 | | - * This inits the transmit coalesce parameters: i.e. timer rate, |
---|
| 2531 | + * This inits the coalesce parameters: i.e. timer rate, |
---|
2275 | 2532 | * timer handler and default threshold used for enabling the |
---|
2276 | 2533 | * interrupt on completion bit. |
---|
2277 | 2534 | */ |
---|
2278 | | -static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) |
---|
| 2535 | +static void stmmac_init_coalesce(struct stmmac_priv *priv) |
---|
2279 | 2536 | { |
---|
2280 | 2537 | u32 tx_channel_count = priv->plat->tx_queues_to_use; |
---|
2281 | 2538 | u32 chan; |
---|
2282 | 2539 | |
---|
2283 | 2540 | priv->tx_coal_frames = STMMAC_TX_FRAMES; |
---|
2284 | 2541 | priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; |
---|
| 2542 | + priv->rx_coal_frames = STMMAC_RX_FRAMES; |
---|
2285 | 2543 | |
---|
2286 | 2544 | for (chan = 0; chan < tx_channel_count; chan++) { |
---|
2287 | 2545 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; |
---|
.. | .. |
---|
2299 | 2557 | /* set TX ring length */ |
---|
2300 | 2558 | for (chan = 0; chan < tx_channels_count; chan++) |
---|
2301 | 2559 | stmmac_set_tx_ring_len(priv, priv->ioaddr, |
---|
2302 | | - (DMA_TX_SIZE - 1), chan); |
---|
| 2560 | + (priv->dma_tx_size - 1), chan); |
---|
2303 | 2561 | |
---|
2304 | 2562 | /* set RX ring length */ |
---|
2305 | 2563 | for (chan = 0; chan < rx_channels_count; chan++) |
---|
2306 | 2564 | stmmac_set_rx_ring_len(priv, priv->ioaddr, |
---|
2307 | | - (DMA_RX_SIZE - 1), chan); |
---|
| 2565 | + (priv->dma_rx_size - 1), chan); |
---|
2308 | 2566 | } |
---|
2309 | 2567 | |
---|
2310 | 2568 | /** |
---|
.. | .. |
---|
2428 | 2686 | } |
---|
2429 | 2687 | } |
---|
2430 | 2688 | |
---|
| 2689 | +static void stmmac_mac_config_rss(struct stmmac_priv *priv) |
---|
| 2690 | +{ |
---|
| 2691 | + if (!priv->dma_cap.rssen || !priv->plat->rss_en) { |
---|
| 2692 | + priv->rss.enable = false; |
---|
| 2693 | + return; |
---|
| 2694 | + } |
---|
| 2695 | + |
---|
| 2696 | + if (priv->dev->features & NETIF_F_RXHASH) |
---|
| 2697 | + priv->rss.enable = true; |
---|
| 2698 | + else |
---|
| 2699 | + priv->rss.enable = false; |
---|
| 2700 | + |
---|
| 2701 | + stmmac_rss_configure(priv, priv->hw, &priv->rss, |
---|
| 2702 | + priv->plat->rx_queues_to_use); |
---|
| 2703 | +} |
---|
| 2704 | + |
---|
2431 | 2705 | /** |
---|
2432 | 2706 | * stmmac_mtl_configuration - Configure MTL |
---|
2433 | 2707 | * @priv: driver private structure |
---|
.. | .. |
---|
2472 | 2746 | /* Set RX routing */ |
---|
2473 | 2747 | if (rx_queues_count > 1) |
---|
2474 | 2748 | stmmac_mac_config_rx_queues_routing(priv); |
---|
| 2749 | + |
---|
| 2750 | + /* Receive Side Scaling */ |
---|
| 2751 | + if (rx_queues_count > 1) |
---|
| 2752 | + stmmac_mac_config_rss(priv); |
---|
2475 | 2753 | } |
---|
2476 | 2754 | |
---|
2477 | 2755 | static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) |
---|
.. | .. |
---|
2487 | 2765 | /** |
---|
2488 | 2766 | * stmmac_hw_setup - setup mac in a usable state. |
---|
2489 | 2767 | * @dev : pointer to the device structure. |
---|
| 2768 | + * @ptp_register: register PTP if set |
---|
2490 | 2769 | * Description: |
---|
2491 | 2770 | * this is the main function to setup the HW in a usable state because the |
---|
2492 | 2771 | * dma engine is reset, the core registers are configured (e.g. AXI, |
---|
.. | .. |
---|
2496 | 2775 | * 0 on success and an appropriate (-)ve integer as defined in errno.h |
---|
2497 | 2776 | * file on failure. |
---|
2498 | 2777 | */ |
---|
2499 | | -static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) |
---|
| 2778 | +static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) |
---|
2500 | 2779 | { |
---|
2501 | 2780 | struct stmmac_priv *priv = netdev_priv(dev); |
---|
2502 | 2781 | u32 rx_cnt = priv->plat->rx_queues_to_use; |
---|
.. | .. |
---|
2552 | 2831 | |
---|
2553 | 2832 | stmmac_mmc_setup(priv); |
---|
2554 | 2833 | |
---|
2555 | | - if (IS_ENABLED(CONFIG_STMMAC_PTP) && init_ptp) { |
---|
| 2834 | + if (ptp_register) { |
---|
2556 | 2835 | ret = clk_prepare_enable(priv->plat->clk_ptp_ref); |
---|
2557 | 2836 | if (ret < 0) |
---|
2558 | | - netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret); |
---|
2559 | | - |
---|
2560 | | - ret = stmmac_init_ptp(priv); |
---|
2561 | | - if (ret == -EOPNOTSUPP) |
---|
2562 | | - netdev_warn(priv->dev, "PTP not supported by HW\n"); |
---|
2563 | | - else if (ret) |
---|
2564 | | - netdev_warn(priv->dev, "PTP init failed\n"); |
---|
| 2837 | + netdev_warn(priv->dev, |
---|
| 2838 | + "failed to enable PTP reference clock: %pe\n", |
---|
| 2839 | + ERR_PTR(ret)); |
---|
2565 | 2840 | } |
---|
2566 | 2841 | |
---|
2567 | | - priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; |
---|
| 2842 | + ret = stmmac_init_ptp(priv); |
---|
| 2843 | + if (ret == -EOPNOTSUPP) |
---|
| 2844 | + netdev_warn(priv->dev, "PTP not supported by HW\n"); |
---|
| 2845 | + else if (ret) |
---|
| 2846 | + netdev_warn(priv->dev, "PTP init failed\n"); |
---|
| 2847 | + else if (ptp_register) |
---|
| 2848 | + stmmac_ptp_register(priv); |
---|
| 2849 | + |
---|
| 2850 | + priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; |
---|
| 2851 | + |
---|
| 2852 | + /* Convert the timer from msec to usec */ |
---|
| 2853 | + if (!priv->tx_lpi_timer) |
---|
| 2854 | + priv->tx_lpi_timer = eee_timer * 1000; |
---|
2568 | 2855 | |
---|
2569 | 2856 | if (priv->use_riwt) { |
---|
2570 | | - ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt); |
---|
2571 | | - if (!ret) |
---|
2572 | | - priv->rx_riwt = MAX_DMA_RIWT; |
---|
| 2857 | + if (!priv->rx_riwt) |
---|
| 2858 | + priv->rx_riwt = DEF_DMA_RIWT; |
---|
| 2859 | + |
---|
| 2860 | + ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt); |
---|
2573 | 2861 | } |
---|
2574 | 2862 | |
---|
2575 | 2863 | if (priv->hw->pcs) |
---|
2576 | | - stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0); |
---|
| 2864 | + stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); |
---|
2577 | 2865 | |
---|
2578 | 2866 | /* set TX and RX rings length */ |
---|
2579 | 2867 | stmmac_set_rings_length(priv); |
---|
2580 | 2868 | |
---|
2581 | 2869 | /* Enable TSO */ |
---|
2582 | 2870 | if (priv->tso) { |
---|
2583 | | - for (chan = 0; chan < tx_cnt; chan++) |
---|
| 2871 | + for (chan = 0; chan < tx_cnt; chan++) { |
---|
| 2872 | + struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; |
---|
| 2873 | + |
---|
| 2874 | + /* TSO and TBS cannot co-exist */ |
---|
| 2875 | + if (tx_q->tbs & STMMAC_TBS_AVAIL) |
---|
| 2876 | + continue; |
---|
| 2877 | + |
---|
2584 | 2878 | stmmac_enable_tso(priv, priv->ioaddr, 1, chan); |
---|
| 2879 | + } |
---|
2585 | 2880 | } |
---|
| 2881 | + |
---|
| 2882 | + /* Enable Split Header */ |
---|
| 2883 | + if (priv->sph && priv->hw->rx_csum) { |
---|
| 2884 | + for (chan = 0; chan < rx_cnt; chan++) |
---|
| 2885 | + stmmac_enable_sph(priv, priv->ioaddr, 1, chan); |
---|
| 2886 | + } |
---|
| 2887 | + |
---|
| 2888 | + /* VLAN Tag Insertion */ |
---|
| 2889 | + if (priv->dma_cap.vlins) |
---|
| 2890 | + stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); |
---|
| 2891 | + |
---|
| 2892 | + /* TBS */ |
---|
| 2893 | + for (chan = 0; chan < tx_cnt; chan++) { |
---|
| 2894 | + struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; |
---|
| 2895 | + int enable = tx_q->tbs & STMMAC_TBS_AVAIL; |
---|
| 2896 | + |
---|
| 2897 | + stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); |
---|
| 2898 | + } |
---|
| 2899 | + |
---|
| 2900 | + /* Configure real RX and TX queues */ |
---|
| 2901 | + netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); |
---|
| 2902 | + netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); |
---|
2586 | 2903 | |
---|
2587 | 2904 | /* Start the ball rolling... */ |
---|
2588 | 2905 | stmmac_start_all_dma(priv); |
---|
.. | .. |
---|
2594 | 2911 | { |
---|
2595 | 2912 | struct stmmac_priv *priv = netdev_priv(dev); |
---|
2596 | 2913 | |
---|
2597 | | - if (IS_ENABLED(CONFIG_STMMAC_PTP)) |
---|
2598 | | - clk_disable_unprepare(priv->plat->clk_ptp_ref); |
---|
| 2914 | + clk_disable_unprepare(priv->plat->clk_ptp_ref); |
---|
2599 | 2915 | } |
---|
2600 | 2916 | |
---|
2601 | 2917 | /** |
---|
.. | .. |
---|
2610 | 2926 | static int stmmac_open(struct net_device *dev) |
---|
2611 | 2927 | { |
---|
2612 | 2928 | struct stmmac_priv *priv = netdev_priv(dev); |
---|
| 2929 | + int bfsize = 0; |
---|
2613 | 2930 | u32 chan; |
---|
2614 | 2931 | int ret; |
---|
2615 | 2932 | |
---|
2616 | | - if (priv->hw->pcs != STMMAC_PCS_RGMII && |
---|
2617 | | - priv->hw->pcs != STMMAC_PCS_TBI && |
---|
2618 | | - priv->hw->pcs != STMMAC_PCS_RTBI) { |
---|
| 2933 | + //printk("ben:stmmac_open.. \n"); |
---|
| 2934 | + ret = pm_runtime_get_sync(priv->device); |
---|
| 2935 | + if (ret < 0) { |
---|
| 2936 | + pm_runtime_put_noidle(priv->device); |
---|
| 2937 | + return ret; |
---|
| 2938 | + } |
---|
| 2939 | + |
---|
| 2940 | + if (priv->hw->pcs != STMMAC_PCS_TBI && |
---|
| 2941 | + priv->hw->pcs != STMMAC_PCS_RTBI && |
---|
| 2942 | + priv->hw->xpcs == NULL) { |
---|
2619 | 2943 | ret = stmmac_init_phy(dev); |
---|
2620 | 2944 | if (ret) { |
---|
2621 | 2945 | netdev_err(priv->dev, |
---|
2622 | 2946 | "%s: Cannot attach to PHY (error: %d)\n", |
---|
2623 | 2947 | __func__, ret); |
---|
2624 | | - return ret; |
---|
| 2948 | + goto init_phy_error; |
---|
2625 | 2949 | } |
---|
2626 | 2950 | } |
---|
2627 | 2951 | |
---|
.. | .. |
---|
2629 | 2953 | memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); |
---|
2630 | 2954 | priv->xstats.threshold = tc; |
---|
2631 | 2955 | |
---|
2632 | | - priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); |
---|
| 2956 | + bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); |
---|
| 2957 | + if (bfsize < 0) |
---|
| 2958 | + bfsize = 0; |
---|
| 2959 | + |
---|
| 2960 | + if (bfsize < BUF_SIZE_16KiB) |
---|
| 2961 | + bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); |
---|
| 2962 | + |
---|
| 2963 | + priv->dma_buf_sz = bfsize; |
---|
| 2964 | + buf_sz = bfsize; |
---|
| 2965 | + |
---|
2633 | 2966 | priv->rx_copybreak = STMMAC_RX_COPYBREAK; |
---|
| 2967 | + |
---|
| 2968 | + if (!priv->dma_tx_size) |
---|
| 2969 | + priv->dma_tx_size = priv->plat->dma_tx_size ? priv->plat->dma_tx_size : |
---|
| 2970 | + DMA_DEFAULT_TX_SIZE; |
---|
| 2971 | + |
---|
| 2972 | + if (!priv->dma_rx_size) |
---|
| 2973 | + priv->dma_rx_size = priv->plat->dma_rx_size ? priv->plat->dma_rx_size : |
---|
| 2974 | + DMA_DEFAULT_RX_SIZE; |
---|
| 2975 | + |
---|
| 2976 | + /* Earlier check for TBS */ |
---|
| 2977 | + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { |
---|
| 2978 | + struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; |
---|
| 2979 | + int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; |
---|
| 2980 | + |
---|
| 2981 | + /* Setup per-TXQ tbs flag before TX descriptor alloc */ |
---|
| 2982 | + tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; |
---|
| 2983 | + } |
---|
2634 | 2984 | |
---|
2635 | 2985 | ret = alloc_dma_desc_resources(priv); |
---|
2636 | 2986 | if (ret < 0) { |
---|
.. | .. |
---|
2646 | 2996 | goto init_error; |
---|
2647 | 2997 | } |
---|
2648 | 2998 | |
---|
| 2999 | + if (priv->plat->serdes_powerup) { |
---|
| 3000 | + ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv); |
---|
| 3001 | + if (ret < 0) { |
---|
| 3002 | + netdev_err(priv->dev, "%s: Serdes powerup failed\n", |
---|
| 3003 | + __func__); |
---|
| 3004 | + goto init_error; |
---|
| 3005 | + } |
---|
| 3006 | + } |
---|
| 3007 | + |
---|
| 3008 | + |
---|
| 3009 | + #if 1 |
---|
| 3010 | + printk("ben -------bootup add 2s delay time.\n"); |
---|
| 3011 | + mdelay(2500); |
---|
| 3012 | + #endif |
---|
| 3013 | + |
---|
2649 | 3014 | ret = stmmac_hw_setup(dev, true); |
---|
2650 | 3015 | if (ret < 0) { |
---|
2651 | 3016 | netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); |
---|
2652 | 3017 | goto init_error; |
---|
2653 | 3018 | } |
---|
2654 | 3019 | |
---|
2655 | | - stmmac_init_tx_coalesce(priv); |
---|
| 3020 | + stmmac_init_coalesce(priv); |
---|
2656 | 3021 | |
---|
2657 | | - if (dev->phydev) |
---|
2658 | | - phy_start(dev->phydev); |
---|
| 3022 | + phylink_start(priv->phylink); |
---|
| 3023 | + /* We may have called phylink_speed_down before */ |
---|
| 3024 | + phylink_speed_up(priv->phylink); |
---|
2659 | 3025 | |
---|
2660 | 3026 | /* Request the IRQ lines */ |
---|
2661 | 3027 | ret = request_irq(dev->irq, stmmac_interrupt, |
---|
.. | .. |
---|
2702 | 3068 | wolirq_error: |
---|
2703 | 3069 | free_irq(dev->irq, dev); |
---|
2704 | 3070 | irq_error: |
---|
2705 | | - if (dev->phydev) |
---|
2706 | | - phy_stop(dev->phydev); |
---|
| 3071 | + phylink_stop(priv->phylink); |
---|
2707 | 3072 | |
---|
2708 | 3073 | for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) |
---|
2709 | 3074 | del_timer_sync(&priv->tx_queue[chan].txtimer); |
---|
.. | .. |
---|
2712 | 3077 | init_error: |
---|
2713 | 3078 | free_dma_desc_resources(priv); |
---|
2714 | 3079 | dma_desc_error: |
---|
2715 | | - if (dev->phydev) |
---|
2716 | | - phy_disconnect(dev->phydev); |
---|
2717 | | - |
---|
| 3080 | + phylink_disconnect_phy(priv->phylink); |
---|
| 3081 | +init_phy_error: |
---|
| 3082 | + pm_runtime_put(priv->device); |
---|
2718 | 3083 | return ret; |
---|
2719 | 3084 | } |
---|
2720 | 3085 | |
---|
.. | .. |
---|
2729 | 3094 | struct stmmac_priv *priv = netdev_priv(dev); |
---|
2730 | 3095 | u32 chan; |
---|
2731 | 3096 | |
---|
| 3097 | + if (device_may_wakeup(priv->device)) |
---|
| 3098 | + phylink_speed_down(priv->phylink, false); |
---|
2732 | 3099 | /* Stop and disconnect the PHY */ |
---|
2733 | | - if (dev->phydev) { |
---|
2734 | | - phy_stop(dev->phydev); |
---|
2735 | | - phy_disconnect(dev->phydev); |
---|
2736 | | - if (priv->plat->integrated_phy_power) |
---|
2737 | | - priv->plat->integrated_phy_power(priv->plat->bsp_priv, |
---|
2738 | | - false); |
---|
2739 | | - } |
---|
| 3100 | + phylink_stop(priv->phylink); |
---|
| 3101 | + phylink_disconnect_phy(priv->phylink); |
---|
| 3102 | + |
---|
| 3103 | + if (priv->plat->integrated_phy_power) |
---|
| 3104 | + priv->plat->integrated_phy_power(priv->plat->bsp_priv, false); |
---|
2740 | 3105 | |
---|
2741 | 3106 | stmmac_disable_all_queues(priv); |
---|
2742 | 3107 | |
---|
.. | .. |
---|
2764 | 3129 | /* Disable the MAC Rx/Tx */ |
---|
2765 | 3130 | stmmac_mac_set(priv, priv->ioaddr, false); |
---|
2766 | 3131 | |
---|
| 3132 | + /* Powerdown Serdes if there is */ |
---|
| 3133 | + if (priv->plat->serdes_powerdown) |
---|
| 3134 | + priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv); |
---|
| 3135 | + |
---|
2767 | 3136 | netif_carrier_off(dev); |
---|
2768 | 3137 | |
---|
2769 | | - if (IS_ENABLED(CONFIG_STMMAC_PTP)) |
---|
2770 | | - stmmac_release_ptp(priv); |
---|
| 3138 | + stmmac_release_ptp(priv); |
---|
| 3139 | + |
---|
| 3140 | + pm_runtime_put(priv->device); |
---|
2771 | 3141 | |
---|
2772 | 3142 | return 0; |
---|
| 3143 | +} |
---|
| 3144 | + |
---|
| 3145 | +static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, |
---|
| 3146 | + struct stmmac_tx_queue *tx_q) |
---|
| 3147 | +{ |
---|
| 3148 | + u16 tag = 0x0, inner_tag = 0x0; |
---|
| 3149 | + u32 inner_type = 0x0; |
---|
| 3150 | + struct dma_desc *p; |
---|
| 3151 | + |
---|
| 3152 | + if (!priv->dma_cap.vlins) |
---|
| 3153 | + return false; |
---|
| 3154 | + if (!skb_vlan_tag_present(skb)) |
---|
| 3155 | + return false; |
---|
| 3156 | + if (skb->vlan_proto == htons(ETH_P_8021AD)) { |
---|
| 3157 | + inner_tag = skb_vlan_tag_get(skb); |
---|
| 3158 | + inner_type = STMMAC_VLAN_INSERT; |
---|
| 3159 | + } |
---|
| 3160 | + |
---|
| 3161 | + tag = skb_vlan_tag_get(skb); |
---|
| 3162 | + |
---|
| 3163 | + if (tx_q->tbs & STMMAC_TBS_AVAIL) |
---|
| 3164 | + p = &tx_q->dma_entx[tx_q->cur_tx].basic; |
---|
| 3165 | + else |
---|
| 3166 | + p = &tx_q->dma_tx[tx_q->cur_tx]; |
---|
| 3167 | + |
---|
| 3168 | + if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type)) |
---|
| 3169 | + return false; |
---|
| 3170 | + |
---|
| 3171 | + stmmac_set_tx_owner(priv, p); |
---|
| 3172 | + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); |
---|
| 3173 | + return true; |
---|
2773 | 3174 | } |
---|
2774 | 3175 | |
---|
2775 | 3176 | /** |
---|
.. | .. |
---|
2777 | 3178 | * @priv: driver private structure |
---|
2778 | 3179 | * @des: buffer start address |
---|
2779 | 3180 | * @total_len: total length to fill in descriptors |
---|
2780 | | - * @last_segmant: condition for the last descriptor |
---|
| 3181 | + * @last_segment: condition for the last descriptor |
---|
2781 | 3182 | * @queue: TX queue index |
---|
2782 | 3183 | * Description: |
---|
2783 | 3184 | * This function fills descriptor and request new descriptors according to |
---|
2784 | 3185 | * buffer length to fill |
---|
2785 | 3186 | */ |
---|
2786 | | -static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des, |
---|
| 3187 | +static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, |
---|
2787 | 3188 | int total_len, bool last_segment, u32 queue) |
---|
2788 | 3189 | { |
---|
2789 | 3190 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
---|
.. | .. |
---|
2794 | 3195 | tmp_len = total_len; |
---|
2795 | 3196 | |
---|
2796 | 3197 | while (tmp_len > 0) { |
---|
2797 | | - tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); |
---|
2798 | | - WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); |
---|
2799 | | - desc = tx_q->dma_tx + tx_q->cur_tx; |
---|
| 3198 | + dma_addr_t curr_addr; |
---|
2800 | 3199 | |
---|
2801 | | - desc->des0 = cpu_to_le32(des + (total_len - tmp_len)); |
---|
| 3200 | + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, |
---|
| 3201 | + priv->dma_tx_size); |
---|
| 3202 | + WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); |
---|
| 3203 | + |
---|
| 3204 | + if (tx_q->tbs & STMMAC_TBS_AVAIL) |
---|
| 3205 | + desc = &tx_q->dma_entx[tx_q->cur_tx].basic; |
---|
| 3206 | + else |
---|
| 3207 | + desc = &tx_q->dma_tx[tx_q->cur_tx]; |
---|
| 3208 | + |
---|
| 3209 | + curr_addr = des + (total_len - tmp_len); |
---|
| 3210 | + if (priv->dma_cap.addr64 <= 32) |
---|
| 3211 | + desc->des0 = cpu_to_le32(curr_addr); |
---|
| 3212 | + else |
---|
| 3213 | + stmmac_set_desc_addr(priv, desc, curr_addr); |
---|
| 3214 | + |
---|
2802 | 3215 | buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? |
---|
2803 | 3216 | TSO_MAX_BUFF_SIZE : tmp_len; |
---|
2804 | 3217 | |
---|
.. | .. |
---|
2842 | 3255 | { |
---|
2843 | 3256 | struct dma_desc *desc, *first, *mss_desc = NULL; |
---|
2844 | 3257 | struct stmmac_priv *priv = netdev_priv(dev); |
---|
| 3258 | + int desc_size, tmp_pay_len = 0, first_tx; |
---|
2845 | 3259 | int nfrags = skb_shinfo(skb)->nr_frags; |
---|
2846 | 3260 | u32 queue = skb_get_queue_mapping(skb); |
---|
2847 | | - unsigned int first_entry, des; |
---|
2848 | | - u8 proto_hdr_len, hdr; |
---|
| 3261 | + unsigned int first_entry, tx_packets; |
---|
2849 | 3262 | struct stmmac_tx_queue *tx_q; |
---|
2850 | | - int tmp_pay_len = 0; |
---|
| 3263 | + bool has_vlan, set_ic; |
---|
| 3264 | + u8 proto_hdr_len, hdr; |
---|
2851 | 3265 | u32 pay_len, mss; |
---|
| 3266 | + dma_addr_t des; |
---|
2852 | 3267 | int i; |
---|
2853 | 3268 | |
---|
2854 | 3269 | tx_q = &priv->tx_queue[queue]; |
---|
| 3270 | + first_tx = tx_q->cur_tx; |
---|
2855 | 3271 | |
---|
2856 | 3272 | /* Compute header lengths */ |
---|
2857 | 3273 | if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { |
---|
.. | .. |
---|
2882 | 3298 | |
---|
2883 | 3299 | /* set new MSS value if needed */ |
---|
2884 | 3300 | if (mss != tx_q->mss) { |
---|
2885 | | - mss_desc = tx_q->dma_tx + tx_q->cur_tx; |
---|
| 3301 | + if (tx_q->tbs & STMMAC_TBS_AVAIL) |
---|
| 3302 | + mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic; |
---|
| 3303 | + else |
---|
| 3304 | + mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; |
---|
| 3305 | + |
---|
2886 | 3306 | stmmac_set_mss(priv, mss_desc, mss); |
---|
2887 | 3307 | tx_q->mss = mss; |
---|
2888 | | - tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); |
---|
| 3308 | + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, |
---|
| 3309 | + priv->dma_tx_size); |
---|
2889 | 3310 | WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); |
---|
2890 | 3311 | } |
---|
2891 | 3312 | |
---|
.. | .. |
---|
2896 | 3317 | skb->data_len); |
---|
2897 | 3318 | } |
---|
2898 | 3319 | |
---|
| 3320 | + /* Check if VLAN can be inserted by HW */ |
---|
| 3321 | + has_vlan = stmmac_vlan_insert(priv, skb, tx_q); |
---|
| 3322 | + |
---|
2899 | 3323 | first_entry = tx_q->cur_tx; |
---|
2900 | 3324 | WARN_ON(tx_q->tx_skbuff[first_entry]); |
---|
2901 | 3325 | |
---|
2902 | | - desc = tx_q->dma_tx + first_entry; |
---|
| 3326 | + if (tx_q->tbs & STMMAC_TBS_AVAIL) |
---|
| 3327 | + desc = &tx_q->dma_entx[first_entry].basic; |
---|
| 3328 | + else |
---|
| 3329 | + desc = &tx_q->dma_tx[first_entry]; |
---|
2903 | 3330 | first = desc; |
---|
| 3331 | + |
---|
| 3332 | + if (has_vlan) |
---|
| 3333 | + stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); |
---|
2904 | 3334 | |
---|
2905 | 3335 | /* first descriptor: fill Headers on Buf1 */ |
---|
2906 | 3336 | des = dma_map_single(priv->device, skb->data, skb_headlen(skb), |
---|
.. | .. |
---|
2911 | 3341 | tx_q->tx_skbuff_dma[first_entry].buf = des; |
---|
2912 | 3342 | tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); |
---|
2913 | 3343 | |
---|
2914 | | - first->des0 = cpu_to_le32(des); |
---|
| 3344 | + if (priv->dma_cap.addr64 <= 32) { |
---|
| 3345 | + first->des0 = cpu_to_le32(des); |
---|
2915 | 3346 | |
---|
2916 | | - /* Fill start of payload in buff2 of first descriptor */ |
---|
2917 | | - if (pay_len) |
---|
2918 | | - first->des1 = cpu_to_le32(des + proto_hdr_len); |
---|
| 3347 | + /* Fill start of payload in buff2 of first descriptor */ |
---|
| 3348 | + if (pay_len) |
---|
| 3349 | + first->des1 = cpu_to_le32(des + proto_hdr_len); |
---|
2919 | 3350 | |
---|
2920 | | - /* If needed take extra descriptors to fill the remaining payload */ |
---|
2921 | | - tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; |
---|
| 3351 | + /* If needed take extra descriptors to fill the remaining payload */ |
---|
| 3352 | + tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; |
---|
| 3353 | + } else { |
---|
| 3354 | + stmmac_set_desc_addr(priv, first, des); |
---|
| 3355 | + tmp_pay_len = pay_len; |
---|
| 3356 | + des += proto_hdr_len; |
---|
| 3357 | + pay_len = 0; |
---|
| 3358 | + } |
---|
2922 | 3359 | |
---|
2923 | 3360 | stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); |
---|
2924 | 3361 | |
---|
.. | .. |
---|
2945 | 3382 | /* Only the last descriptor gets to point to the skb. */ |
---|
2946 | 3383 | tx_q->tx_skbuff[tx_q->cur_tx] = skb; |
---|
2947 | 3384 | |
---|
| 3385 | + /* Manage tx mitigation */ |
---|
| 3386 | + tx_packets = (tx_q->cur_tx + 1) - first_tx; |
---|
| 3387 | + tx_q->tx_count_frames += tx_packets; |
---|
| 3388 | + |
---|
| 3389 | + if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) |
---|
| 3390 | + set_ic = true; |
---|
| 3391 | + else if (!priv->tx_coal_frames) |
---|
| 3392 | + set_ic = false; |
---|
| 3393 | + else if (tx_packets > priv->tx_coal_frames) |
---|
| 3394 | + set_ic = true; |
---|
| 3395 | + else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets) |
---|
| 3396 | + set_ic = true; |
---|
| 3397 | + else |
---|
| 3398 | + set_ic = false; |
---|
| 3399 | + |
---|
| 3400 | + if (set_ic) { |
---|
| 3401 | + if (tx_q->tbs & STMMAC_TBS_AVAIL) |
---|
| 3402 | + desc = &tx_q->dma_entx[tx_q->cur_tx].basic; |
---|
| 3403 | + else |
---|
| 3404 | + desc = &tx_q->dma_tx[tx_q->cur_tx]; |
---|
| 3405 | + |
---|
| 3406 | + tx_q->tx_count_frames = 0; |
---|
| 3407 | + stmmac_set_tx_ic(priv, desc); |
---|
| 3408 | + priv->xstats.tx_set_ic_bit++; |
---|
| 3409 | + } |
---|
| 3410 | + |
---|
2948 | 3411 | /* We've used all descriptors we need for this skb, however, |
---|
2949 | 3412 | * advance cur_tx so that it references a fresh descriptor. |
---|
2950 | 3413 | * ndo_start_xmit will fill this descriptor the next time it's |
---|
2951 | 3414 | * called and stmmac_tx_clean may clean up to this descriptor. |
---|
2952 | 3415 | */ |
---|
2953 | | - tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); |
---|
| 3416 | + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); |
---|
2954 | 3417 | |
---|
2955 | 3418 | if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { |
---|
2956 | 3419 | netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", |
---|
.. | .. |
---|
2962 | 3425 | priv->xstats.tx_tso_frames++; |
---|
2963 | 3426 | priv->xstats.tx_tso_nfrags += nfrags; |
---|
2964 | 3427 | |
---|
2965 | | - /* Manage tx mitigation */ |
---|
2966 | | - tx_q->tx_count_frames += nfrags + 1; |
---|
2967 | | - if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) && |
---|
2968 | | - !(priv->synopsys_id >= DWMAC_CORE_4_00 && |
---|
2969 | | - (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && |
---|
2970 | | - priv->hwts_tx_en)) { |
---|
2971 | | - stmmac_tx_timer_arm(priv, queue); |
---|
2972 | | - } else { |
---|
2973 | | - tx_q->tx_count_frames = 0; |
---|
2974 | | - stmmac_set_tx_ic(priv, desc); |
---|
2975 | | - priv->xstats.tx_set_ic_bit++; |
---|
2976 | | - } |
---|
| 3428 | + if (priv->sarc_type) |
---|
| 3429 | + stmmac_set_desc_sarc(priv, first, priv->sarc_type); |
---|
2977 | 3430 | |
---|
2978 | 3431 | skb_tx_timestamp(skb); |
---|
2979 | 3432 | |
---|
.. | .. |
---|
3012 | 3465 | pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", |
---|
3013 | 3466 | __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, |
---|
3014 | 3467 | tx_q->cur_tx, first, nfrags); |
---|
3015 | | - |
---|
3016 | | - stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0); |
---|
3017 | | - |
---|
3018 | 3468 | pr_info(">>> frame to be transmitted: "); |
---|
3019 | 3469 | print_pkt(skb->data, skb_headlen(skb)); |
---|
3020 | 3470 | } |
---|
3021 | 3471 | |
---|
3022 | 3472 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); |
---|
3023 | 3473 | |
---|
3024 | | - tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); |
---|
| 3474 | + if (tx_q->tbs & STMMAC_TBS_AVAIL) |
---|
| 3475 | + desc_size = sizeof(struct dma_edesc); |
---|
| 3476 | + else |
---|
| 3477 | + desc_size = sizeof(struct dma_desc); |
---|
| 3478 | + |
---|
| 3479 | + tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); |
---|
3025 | 3480 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); |
---|
3026 | 3481 | stmmac_tx_timer_arm(priv, queue); |
---|
3027 | 3482 | |
---|
.. | .. |
---|
3044 | 3499 | */ |
---|
3045 | 3500 | static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) |
---|
3046 | 3501 | { |
---|
| 3502 | + unsigned int first_entry, tx_packets, enh_desc; |
---|
3047 | 3503 | struct stmmac_priv *priv = netdev_priv(dev); |
---|
3048 | 3504 | unsigned int nopaged_len = skb_headlen(skb); |
---|
3049 | 3505 | int i, csum_insertion = 0, is_jumbo = 0; |
---|
3050 | 3506 | u32 queue = skb_get_queue_mapping(skb); |
---|
3051 | 3507 | int nfrags = skb_shinfo(skb)->nr_frags; |
---|
3052 | 3508 | int gso = skb_shinfo(skb)->gso_type; |
---|
3053 | | - int entry; |
---|
3054 | | - unsigned int first_entry; |
---|
| 3509 | + struct dma_edesc *tbs_desc = NULL; |
---|
| 3510 | + int entry, desc_size, first_tx; |
---|
3055 | 3511 | struct dma_desc *desc, *first; |
---|
3056 | 3512 | struct stmmac_tx_queue *tx_q; |
---|
3057 | | - unsigned int enh_desc; |
---|
3058 | | - unsigned int des; |
---|
| 3513 | + bool has_vlan, set_ic; |
---|
| 3514 | + dma_addr_t des; |
---|
3059 | 3515 | |
---|
3060 | 3516 | tx_q = &priv->tx_queue[queue]; |
---|
| 3517 | + first_tx = tx_q->cur_tx; |
---|
3061 | 3518 | |
---|
3062 | 3519 | if (priv->tx_path_in_lpi_mode) |
---|
3063 | 3520 | stmmac_disable_eee_mode(priv); |
---|
.. | .. |
---|
3082 | 3539 | return NETDEV_TX_BUSY; |
---|
3083 | 3540 | } |
---|
3084 | 3541 | |
---|
| 3542 | + /* Check if VLAN can be inserted by HW */ |
---|
| 3543 | + has_vlan = stmmac_vlan_insert(priv, skb, tx_q); |
---|
| 3544 | + |
---|
3085 | 3545 | entry = tx_q->cur_tx; |
---|
3086 | 3546 | first_entry = entry; |
---|
3087 | 3547 | WARN_ON(tx_q->tx_skbuff[first_entry]); |
---|
.. | .. |
---|
3090 | 3550 | |
---|
3091 | 3551 | if (likely(priv->extend_desc)) |
---|
3092 | 3552 | desc = (struct dma_desc *)(tx_q->dma_etx + entry); |
---|
| 3553 | + else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
---|
| 3554 | + desc = &tx_q->dma_entx[entry].basic; |
---|
3093 | 3555 | else |
---|
3094 | 3556 | desc = tx_q->dma_tx + entry; |
---|
3095 | 3557 | |
---|
3096 | 3558 | first = desc; |
---|
| 3559 | + |
---|
| 3560 | + if (has_vlan) |
---|
| 3561 | + stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); |
---|
3097 | 3562 | |
---|
3098 | 3563 | enh_desc = priv->plat->enh_desc; |
---|
3099 | 3564 | /* To program the descriptors according to the size of the frame */ |
---|
.. | .. |
---|
3111 | 3576 | int len = skb_frag_size(frag); |
---|
3112 | 3577 | bool last_segment = (i == (nfrags - 1)); |
---|
3113 | 3578 | |
---|
3114 | | - entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); |
---|
| 3579 | + entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); |
---|
3115 | 3580 | WARN_ON(tx_q->tx_skbuff[entry]); |
---|
3116 | 3581 | |
---|
3117 | 3582 | if (likely(priv->extend_desc)) |
---|
3118 | 3583 | desc = (struct dma_desc *)(tx_q->dma_etx + entry); |
---|
| 3584 | + else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
---|
| 3585 | + desc = &tx_q->dma_entx[entry].basic; |
---|
3119 | 3586 | else |
---|
3120 | 3587 | desc = tx_q->dma_tx + entry; |
---|
3121 | 3588 | |
---|
.. | .. |
---|
3140 | 3607 | /* Only the last descriptor gets to point to the skb. */ |
---|
3141 | 3608 | tx_q->tx_skbuff[entry] = skb; |
---|
3142 | 3609 | |
---|
| 3610 | + /* According to the coalesce parameter the IC bit for the latest |
---|
| 3611 | + * segment is reset and the timer re-started to clean the tx status. |
---|
| 3612 | + * This approach takes care about the fragments: desc is the first |
---|
| 3613 | + * element in case of no SG. |
---|
| 3614 | + */ |
---|
| 3615 | + tx_packets = (entry + 1) - first_tx; |
---|
| 3616 | + tx_q->tx_count_frames += tx_packets; |
---|
| 3617 | + |
---|
| 3618 | + if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) |
---|
| 3619 | + set_ic = true; |
---|
| 3620 | + else if (!priv->tx_coal_frames) |
---|
| 3621 | + set_ic = false; |
---|
| 3622 | + else if (tx_packets > priv->tx_coal_frames) |
---|
| 3623 | + set_ic = true; |
---|
| 3624 | + else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets) |
---|
| 3625 | + set_ic = true; |
---|
| 3626 | + else |
---|
| 3627 | + set_ic = false; |
---|
| 3628 | + |
---|
| 3629 | + if (set_ic) { |
---|
| 3630 | + if (likely(priv->extend_desc)) |
---|
| 3631 | + desc = &tx_q->dma_etx[entry].basic; |
---|
| 3632 | + else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
---|
| 3633 | + desc = &tx_q->dma_entx[entry].basic; |
---|
| 3634 | + else |
---|
| 3635 | + desc = &tx_q->dma_tx[entry]; |
---|
| 3636 | + |
---|
| 3637 | + tx_q->tx_count_frames = 0; |
---|
| 3638 | + stmmac_set_tx_ic(priv, desc); |
---|
| 3639 | + priv->xstats.tx_set_ic_bit++; |
---|
| 3640 | + } |
---|
| 3641 | + |
---|
3143 | 3642 | /* We've used all descriptors we need for this skb, however, |
---|
3144 | 3643 | * advance cur_tx so that it references a fresh descriptor. |
---|
3145 | 3644 | * ndo_start_xmit will fill this descriptor the next time it's |
---|
3146 | 3645 | * called and stmmac_tx_clean may clean up to this descriptor. |
---|
3147 | 3646 | */ |
---|
3148 | | - entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); |
---|
| 3647 | + entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); |
---|
3149 | 3648 | tx_q->cur_tx = entry; |
---|
3150 | 3649 | |
---|
3151 | 3650 | if (netif_msg_pktdata(priv)) { |
---|
3152 | | - void *tx_head; |
---|
3153 | | - |
---|
3154 | 3651 | netdev_dbg(priv->dev, |
---|
3155 | 3652 | "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", |
---|
3156 | 3653 | __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, |
---|
3157 | 3654 | entry, first, nfrags); |
---|
3158 | | - |
---|
3159 | | - if (priv->extend_desc) |
---|
3160 | | - tx_head = (void *)tx_q->dma_etx; |
---|
3161 | | - else |
---|
3162 | | - tx_head = (void *)tx_q->dma_tx; |
---|
3163 | | - |
---|
3164 | | - stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false); |
---|
3165 | 3655 | |
---|
3166 | 3656 | netdev_dbg(priv->dev, ">>> frame to be transmitted: "); |
---|
3167 | 3657 | print_pkt(skb->data, skb->len); |
---|
.. | .. |
---|
3175 | 3665 | |
---|
3176 | 3666 | dev->stats.tx_bytes += skb->len; |
---|
3177 | 3667 | |
---|
3178 | | - /* According to the coalesce parameter the IC bit for the latest |
---|
3179 | | - * segment is reset and the timer re-started to clean the tx status. |
---|
3180 | | - * This approach takes care about the fragments: desc is the first |
---|
3181 | | - * element in case of no SG. |
---|
3182 | | - */ |
---|
3183 | | - tx_q->tx_count_frames += nfrags + 1; |
---|
3184 | | - if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) && |
---|
3185 | | - !(priv->synopsys_id >= DWMAC_CORE_4_00 && |
---|
3186 | | - (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && |
---|
3187 | | - priv->hwts_tx_en)) { |
---|
3188 | | - stmmac_tx_timer_arm(priv, queue); |
---|
3189 | | - } else { |
---|
3190 | | - tx_q->tx_count_frames = 0; |
---|
3191 | | - stmmac_set_tx_ic(priv, desc); |
---|
3192 | | - priv->xstats.tx_set_ic_bit++; |
---|
3193 | | - } |
---|
| 3668 | + if (priv->sarc_type) |
---|
| 3669 | + stmmac_set_desc_sarc(priv, first, priv->sarc_type); |
---|
3194 | 3670 | |
---|
3195 | 3671 | skb_tx_timestamp(skb); |
---|
3196 | 3672 | |
---|
.. | .. |
---|
3222 | 3698 | |
---|
3223 | 3699 | /* Prepare the first descriptor setting the OWN bit too */ |
---|
3224 | 3700 | stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, |
---|
3225 | | - csum_insertion, priv->mode, 1, last_segment, |
---|
| 3701 | + csum_insertion, priv->mode, 0, last_segment, |
---|
3226 | 3702 | skb->len); |
---|
3227 | | - } else { |
---|
3228 | | - stmmac_set_tx_owner(priv, first); |
---|
3229 | 3703 | } |
---|
| 3704 | + |
---|
| 3705 | + if (tx_q->tbs & STMMAC_TBS_EN) { |
---|
| 3706 | + struct timespec64 ts = ns_to_timespec64(skb->tstamp); |
---|
| 3707 | + |
---|
| 3708 | + tbs_desc = &tx_q->dma_entx[first_entry]; |
---|
| 3709 | + stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec); |
---|
| 3710 | + } |
---|
| 3711 | + |
---|
| 3712 | + stmmac_set_tx_owner(priv, first); |
---|
3230 | 3713 | |
---|
3231 | 3714 | /* The own bit must be the latest setting done when prepare the |
---|
3232 | 3715 | * descriptor and then barrier is needed to make sure that |
---|
.. | .. |
---|
3238 | 3721 | |
---|
3239 | 3722 | stmmac_enable_dma_transmission(priv, priv->ioaddr); |
---|
3240 | 3723 | |
---|
3241 | | - tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); |
---|
| 3724 | + if (likely(priv->extend_desc)) |
---|
| 3725 | + desc_size = sizeof(struct dma_extended_desc); |
---|
| 3726 | + else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
---|
| 3727 | + desc_size = sizeof(struct dma_edesc); |
---|
| 3728 | + else |
---|
| 3729 | + desc_size = sizeof(struct dma_desc); |
---|
| 3730 | + |
---|
| 3731 | + tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); |
---|
3242 | 3732 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); |
---|
3243 | 3733 | stmmac_tx_timer_arm(priv, queue); |
---|
3244 | 3734 | |
---|
.. | .. |
---|
3272 | 3762 | } |
---|
3273 | 3763 | } |
---|
3274 | 3764 | |
---|
3275 | | - |
---|
3276 | | -static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q) |
---|
3277 | | -{ |
---|
3278 | | - if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH) |
---|
3279 | | - return 0; |
---|
3280 | | - |
---|
3281 | | - return 1; |
---|
3282 | | -} |
---|
3283 | | - |
---|
3284 | 3765 | /** |
---|
3285 | 3766 | * stmmac_rx_refill - refill used skb preallocated buffers |
---|
3286 | 3767 | * @priv: driver private structure |
---|
.. | .. |
---|
3291 | 3772 | static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) |
---|
3292 | 3773 | { |
---|
3293 | 3774 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
---|
3294 | | - int dirty = stmmac_rx_dirty(priv, queue); |
---|
| 3775 | + int len, dirty = stmmac_rx_dirty(priv, queue); |
---|
3295 | 3776 | unsigned int entry = rx_q->dirty_rx; |
---|
| 3777 | + gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); |
---|
3296 | 3778 | |
---|
3297 | | - int bfsize = priv->dma_buf_sz; |
---|
| 3779 | + if (priv->dma_cap.addr64 <= 32) |
---|
| 3780 | + gfp |= GFP_DMA32; |
---|
| 3781 | + |
---|
| 3782 | + len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; |
---|
3298 | 3783 | |
---|
3299 | 3784 | while (dirty-- > 0) { |
---|
| 3785 | + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; |
---|
3300 | 3786 | struct dma_desc *p; |
---|
| 3787 | + bool use_rx_wd; |
---|
3301 | 3788 | |
---|
3302 | 3789 | if (priv->extend_desc) |
---|
3303 | 3790 | p = (struct dma_desc *)(rx_q->dma_erx + entry); |
---|
3304 | 3791 | else |
---|
3305 | 3792 | p = rx_q->dma_rx + entry; |
---|
3306 | 3793 | |
---|
3307 | | - if (likely(!rx_q->rx_skbuff[entry])) { |
---|
3308 | | - struct sk_buff *skb; |
---|
3309 | | - |
---|
3310 | | - skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); |
---|
3311 | | - if (unlikely(!skb)) { |
---|
3312 | | - /* so for a while no zero-copy! */ |
---|
3313 | | - rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH; |
---|
3314 | | - if (unlikely(net_ratelimit())) |
---|
3315 | | - dev_err(priv->device, |
---|
3316 | | - "fail to alloc skb entry %d\n", |
---|
3317 | | - entry); |
---|
| 3794 | + if (!buf->page) { |
---|
| 3795 | + buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); |
---|
| 3796 | + if (!buf->page) |
---|
3318 | 3797 | break; |
---|
3319 | | - } |
---|
3320 | | - |
---|
3321 | | - rx_q->rx_skbuff[entry] = skb; |
---|
3322 | | - rx_q->rx_skbuff_dma[entry] = |
---|
3323 | | - dma_map_single(priv->device, skb->data, bfsize, |
---|
3324 | | - DMA_FROM_DEVICE); |
---|
3325 | | - if (dma_mapping_error(priv->device, |
---|
3326 | | - rx_q->rx_skbuff_dma[entry])) { |
---|
3327 | | - netdev_err(priv->dev, "Rx DMA map failed\n"); |
---|
3328 | | - dev_kfree_skb(skb); |
---|
3329 | | - break; |
---|
3330 | | - } |
---|
3331 | | - |
---|
3332 | | - stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]); |
---|
3333 | | - stmmac_refill_desc3(priv, rx_q, p); |
---|
3334 | | - |
---|
3335 | | - if (rx_q->rx_zeroc_thresh > 0) |
---|
3336 | | - rx_q->rx_zeroc_thresh--; |
---|
3337 | | - |
---|
3338 | | - netif_dbg(priv, rx_status, priv->dev, |
---|
3339 | | - "refill entry #%d\n", entry); |
---|
3340 | 3798 | } |
---|
3341 | | - dma_wmb(); |
---|
3342 | 3799 | |
---|
3343 | | - stmmac_set_rx_owner(priv, p, priv->use_riwt); |
---|
| 3800 | + if (priv->sph && !buf->sec_page) { |
---|
| 3801 | + buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); |
---|
| 3802 | + if (!buf->sec_page) |
---|
| 3803 | + break; |
---|
| 3804 | + |
---|
| 3805 | + buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); |
---|
| 3806 | + } |
---|
| 3807 | + |
---|
| 3808 | + buf->addr = page_pool_get_dma_addr(buf->page); |
---|
| 3809 | + stmmac_set_desc_addr(priv, p, buf->addr); |
---|
| 3810 | + if (priv->sph) |
---|
| 3811 | + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); |
---|
| 3812 | + else |
---|
| 3813 | + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); |
---|
| 3814 | + stmmac_refill_desc3(priv, rx_q, p); |
---|
| 3815 | + |
---|
| 3816 | + rx_q->rx_count_frames++; |
---|
| 3817 | + rx_q->rx_count_frames += priv->rx_coal_frames; |
---|
| 3818 | + if (rx_q->rx_count_frames > priv->rx_coal_frames) |
---|
| 3819 | + rx_q->rx_count_frames = 0; |
---|
| 3820 | + |
---|
| 3821 | + use_rx_wd = !priv->rx_coal_frames; |
---|
| 3822 | + use_rx_wd |= rx_q->rx_count_frames > 0; |
---|
| 3823 | + if (!priv->use_riwt) |
---|
| 3824 | + use_rx_wd = false; |
---|
3344 | 3825 | |
---|
3345 | 3826 | dma_wmb(); |
---|
| 3827 | + stmmac_set_rx_owner(priv, p, use_rx_wd); |
---|
3346 | 3828 | |
---|
3347 | | - entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); |
---|
| 3829 | + entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size); |
---|
3348 | 3830 | } |
---|
3349 | 3831 | rx_q->dirty_rx = entry; |
---|
| 3832 | + rx_q->rx_tail_addr = rx_q->dma_rx_phy + |
---|
| 3833 | + (rx_q->dirty_rx * sizeof(struct dma_desc)); |
---|
3350 | 3834 | stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); |
---|
| 3835 | +} |
---|
| 3836 | + |
---|
| 3837 | +static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv, |
---|
| 3838 | + struct dma_desc *p, |
---|
| 3839 | + int status, unsigned int len) |
---|
| 3840 | +{ |
---|
| 3841 | + unsigned int plen = 0, hlen = 0; |
---|
| 3842 | + int coe = priv->hw->rx_csum; |
---|
| 3843 | + |
---|
| 3844 | + /* Not first descriptor, buffer is always zero */ |
---|
| 3845 | + if (priv->sph && len) |
---|
| 3846 | + return 0; |
---|
| 3847 | + |
---|
| 3848 | + /* First descriptor, get split header length */ |
---|
| 3849 | + stmmac_get_rx_header_len(priv, p, &hlen); |
---|
| 3850 | + if (priv->sph && hlen) { |
---|
| 3851 | + priv->xstats.rx_split_hdr_pkt_n++; |
---|
| 3852 | + return hlen; |
---|
| 3853 | + } |
---|
| 3854 | + |
---|
| 3855 | + /* First descriptor, not last descriptor and not split header */ |
---|
| 3856 | + if (status & rx_not_ls) |
---|
| 3857 | + return priv->dma_buf_sz; |
---|
| 3858 | + |
---|
| 3859 | + plen = stmmac_get_rx_frame_len(priv, p, coe); |
---|
| 3860 | + |
---|
| 3861 | + /* First descriptor and last descriptor and not split header */ |
---|
| 3862 | + return min_t(unsigned int, priv->dma_buf_sz, plen); |
---|
| 3863 | +} |
---|
| 3864 | + |
---|
| 3865 | +static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, |
---|
| 3866 | + struct dma_desc *p, |
---|
| 3867 | + int status, unsigned int len) |
---|
| 3868 | +{ |
---|
| 3869 | + int coe = priv->hw->rx_csum; |
---|
| 3870 | + unsigned int plen = 0; |
---|
| 3871 | + |
---|
| 3872 | + /* Not split header, buffer is not available */ |
---|
| 3873 | + if (!priv->sph) |
---|
| 3874 | + return 0; |
---|
| 3875 | + |
---|
| 3876 | + /* Not last descriptor */ |
---|
| 3877 | + if (status & rx_not_ls) |
---|
| 3878 | + return priv->dma_buf_sz; |
---|
| 3879 | + |
---|
| 3880 | + plen = stmmac_get_rx_frame_len(priv, p, coe); |
---|
| 3881 | + |
---|
| 3882 | + /* Last descriptor */ |
---|
| 3883 | + return plen - len; |
---|
3351 | 3884 | } |
---|
3352 | 3885 | |
---|
3353 | 3886 | /** |
---|
.. | .. |
---|
3362 | 3895 | { |
---|
3363 | 3896 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
---|
3364 | 3897 | struct stmmac_channel *ch = &priv->channel[queue]; |
---|
| 3898 | + unsigned int count = 0, error = 0, len = 0; |
---|
| 3899 | + int status = 0, coe = priv->hw->rx_csum; |
---|
3365 | 3900 | unsigned int next_entry = rx_q->cur_rx; |
---|
3366 | | - int coe = priv->hw->rx_csum; |
---|
3367 | | - unsigned int count = 0; |
---|
3368 | | - bool xmac; |
---|
3369 | | - |
---|
3370 | | - xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; |
---|
| 3901 | + unsigned int desc_size; |
---|
| 3902 | + struct sk_buff *skb = NULL; |
---|
3371 | 3903 | |
---|
3372 | 3904 | if (netif_msg_rx_status(priv)) { |
---|
3373 | 3905 | void *rx_head; |
---|
3374 | 3906 | |
---|
3375 | 3907 | netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); |
---|
3376 | | - if (priv->extend_desc) |
---|
| 3908 | + if (priv->extend_desc) { |
---|
3377 | 3909 | rx_head = (void *)rx_q->dma_erx; |
---|
3378 | | - else |
---|
| 3910 | + desc_size = sizeof(struct dma_extended_desc); |
---|
| 3911 | + } else { |
---|
3379 | 3912 | rx_head = (void *)rx_q->dma_rx; |
---|
| 3913 | + desc_size = sizeof(struct dma_desc); |
---|
| 3914 | + } |
---|
3380 | 3915 | |
---|
3381 | | - stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); |
---|
| 3916 | + stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true, |
---|
| 3917 | + rx_q->dma_rx_phy, desc_size); |
---|
3382 | 3918 | } |
---|
3383 | 3919 | while (count < limit) { |
---|
3384 | | - int entry, status; |
---|
3385 | | - struct dma_desc *p; |
---|
3386 | | - struct dma_desc *np; |
---|
| 3920 | + unsigned int buf1_len = 0, buf2_len = 0; |
---|
| 3921 | + enum pkt_hash_types hash_type; |
---|
| 3922 | + struct stmmac_rx_buffer *buf; |
---|
| 3923 | + struct dma_desc *np, *p; |
---|
| 3924 | + int entry; |
---|
| 3925 | + u32 hash; |
---|
3387 | 3926 | |
---|
| 3927 | + if (!count && rx_q->state_saved) { |
---|
| 3928 | + skb = rx_q->state.skb; |
---|
| 3929 | + error = rx_q->state.error; |
---|
| 3930 | + len = rx_q->state.len; |
---|
| 3931 | + } else { |
---|
| 3932 | + rx_q->state_saved = false; |
---|
| 3933 | + skb = NULL; |
---|
| 3934 | + error = 0; |
---|
| 3935 | + len = 0; |
---|
| 3936 | + } |
---|
| 3937 | + |
---|
| 3938 | + if ((count >= limit - 1) && limit > 1) |
---|
| 3939 | + break; |
---|
| 3940 | + |
---|
| 3941 | +read_again: |
---|
| 3942 | + buf1_len = 0; |
---|
| 3943 | + buf2_len = 0; |
---|
3388 | 3944 | entry = next_entry; |
---|
| 3945 | + buf = &rx_q->buf_pool[entry]; |
---|
3389 | 3946 | |
---|
3390 | 3947 | if (priv->extend_desc) |
---|
3391 | 3948 | p = (struct dma_desc *)(rx_q->dma_erx + entry); |
---|
.. | .. |
---|
3399 | 3956 | if (unlikely(status & dma_own)) |
---|
3400 | 3957 | break; |
---|
3401 | 3958 | |
---|
3402 | | - count++; |
---|
3403 | | - |
---|
3404 | | - rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE); |
---|
| 3959 | + rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, |
---|
| 3960 | + priv->dma_rx_size); |
---|
3405 | 3961 | next_entry = rx_q->cur_rx; |
---|
3406 | 3962 | |
---|
3407 | 3963 | if (priv->extend_desc) |
---|
.. | .. |
---|
3415 | 3971 | stmmac_rx_extended_status(priv, &priv->dev->stats, |
---|
3416 | 3972 | &priv->xstats, rx_q->dma_erx + entry); |
---|
3417 | 3973 | if (unlikely(status == discard_frame)) { |
---|
3418 | | - priv->dev->stats.rx_errors++; |
---|
3419 | | - if (priv->hwts_rx_en && !priv->extend_desc) { |
---|
3420 | | - /* DESC2 & DESC3 will be overwritten by device |
---|
3421 | | - * with timestamp value, hence reinitialize |
---|
3422 | | - * them in stmmac_rx_refill() function so that |
---|
3423 | | - * device can reuse it. |
---|
3424 | | - */ |
---|
3425 | | - dev_kfree_skb_any(rx_q->rx_skbuff[entry]); |
---|
3426 | | - rx_q->rx_skbuff[entry] = NULL; |
---|
3427 | | - dma_unmap_single(priv->device, |
---|
3428 | | - rx_q->rx_skbuff_dma[entry], |
---|
3429 | | - priv->dma_buf_sz, |
---|
3430 | | - DMA_FROM_DEVICE); |
---|
3431 | | - } |
---|
3432 | | - } else { |
---|
3433 | | - struct sk_buff *skb; |
---|
3434 | | - int frame_len; |
---|
3435 | | - unsigned int des; |
---|
3436 | | - |
---|
3437 | | - stmmac_get_desc_addr(priv, p, &des); |
---|
3438 | | - frame_len = stmmac_get_rx_frame_len(priv, p, coe); |
---|
3439 | | - |
---|
3440 | | - /* If frame length is greater than skb buffer size |
---|
3441 | | - * (preallocated during init) then the packet is |
---|
3442 | | - * ignored |
---|
3443 | | - */ |
---|
3444 | | - if (frame_len > priv->dma_buf_sz) { |
---|
3445 | | - if (net_ratelimit()) |
---|
3446 | | - netdev_err(priv->dev, |
---|
3447 | | - "len %d larger than size (%d)\n", |
---|
3448 | | - frame_len, priv->dma_buf_sz); |
---|
3449 | | - priv->dev->stats.rx_length_errors++; |
---|
3450 | | - continue; |
---|
3451 | | - } |
---|
3452 | | - |
---|
3453 | | - /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 |
---|
3454 | | - * Type frames (LLC/LLC-SNAP) |
---|
3455 | | - * |
---|
3456 | | - * llc_snap is never checked in GMAC >= 4, so this ACS |
---|
3457 | | - * feature is always disabled and packets need to be |
---|
3458 | | - * stripped manually. |
---|
3459 | | - */ |
---|
3460 | | - if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) || |
---|
3461 | | - unlikely(status != llc_snap)) |
---|
3462 | | - frame_len -= ETH_FCS_LEN; |
---|
3463 | | - |
---|
3464 | | - if (netif_msg_rx_status(priv)) { |
---|
3465 | | - netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n", |
---|
3466 | | - p, entry, des); |
---|
3467 | | - netdev_dbg(priv->dev, "frame size %d, COE: %d\n", |
---|
3468 | | - frame_len, status); |
---|
3469 | | - } |
---|
3470 | | - |
---|
3471 | | - /* The zero-copy is always used for all the sizes |
---|
3472 | | - * in case of GMAC4 because it needs |
---|
3473 | | - * to refill the used descriptors, always. |
---|
3474 | | - */ |
---|
3475 | | - if (unlikely(!xmac && |
---|
3476 | | - ((frame_len < priv->rx_copybreak) || |
---|
3477 | | - stmmac_rx_threshold_count(rx_q)))) { |
---|
3478 | | - skb = netdev_alloc_skb_ip_align(priv->dev, |
---|
3479 | | - frame_len); |
---|
3480 | | - if (unlikely(!skb)) { |
---|
3481 | | - if (net_ratelimit()) |
---|
3482 | | - dev_warn(priv->device, |
---|
3483 | | - "packet dropped\n"); |
---|
3484 | | - priv->dev->stats.rx_dropped++; |
---|
3485 | | - continue; |
---|
3486 | | - } |
---|
3487 | | - |
---|
3488 | | - dma_sync_single_for_cpu(priv->device, |
---|
3489 | | - rx_q->rx_skbuff_dma |
---|
3490 | | - [entry], frame_len, |
---|
3491 | | - DMA_FROM_DEVICE); |
---|
3492 | | - skb_copy_to_linear_data(skb, |
---|
3493 | | - rx_q-> |
---|
3494 | | - rx_skbuff[entry]->data, |
---|
3495 | | - frame_len); |
---|
3496 | | - |
---|
3497 | | - skb_put(skb, frame_len); |
---|
3498 | | - dma_sync_single_for_device(priv->device, |
---|
3499 | | - rx_q->rx_skbuff_dma |
---|
3500 | | - [entry], frame_len, |
---|
3501 | | - DMA_FROM_DEVICE); |
---|
3502 | | - } else { |
---|
3503 | | - skb = rx_q->rx_skbuff[entry]; |
---|
3504 | | - if (unlikely(!skb)) { |
---|
3505 | | - if (net_ratelimit()) |
---|
3506 | | - netdev_err(priv->dev, |
---|
3507 | | - "%s: Inconsistent Rx chain\n", |
---|
3508 | | - priv->dev->name); |
---|
3509 | | - priv->dev->stats.rx_dropped++; |
---|
3510 | | - continue; |
---|
3511 | | - } |
---|
3512 | | - prefetch(skb->data - NET_IP_ALIGN); |
---|
3513 | | - rx_q->rx_skbuff[entry] = NULL; |
---|
3514 | | - rx_q->rx_zeroc_thresh++; |
---|
3515 | | - |
---|
3516 | | - skb_put(skb, frame_len); |
---|
3517 | | - dma_unmap_single(priv->device, |
---|
3518 | | - rx_q->rx_skbuff_dma[entry], |
---|
3519 | | - priv->dma_buf_sz, |
---|
3520 | | - DMA_FROM_DEVICE); |
---|
3521 | | - } |
---|
3522 | | - |
---|
3523 | | - if (netif_msg_pktdata(priv)) { |
---|
3524 | | - netdev_dbg(priv->dev, "frame received (%dbytes)", |
---|
3525 | | - frame_len); |
---|
3526 | | - print_pkt(skb->data, frame_len); |
---|
3527 | | - } |
---|
3528 | | - |
---|
3529 | | - stmmac_get_rx_hwtstamp(priv, p, np, skb); |
---|
3530 | | - |
---|
3531 | | - stmmac_rx_vlan(priv->dev, skb); |
---|
3532 | | - |
---|
3533 | | - skb->protocol = eth_type_trans(skb, priv->dev); |
---|
3534 | | - |
---|
3535 | | - if (unlikely(!coe)) |
---|
3536 | | - skb_checksum_none_assert(skb); |
---|
3537 | | - else |
---|
3538 | | - skb->ip_summed = CHECKSUM_UNNECESSARY; |
---|
3539 | | - |
---|
3540 | | - napi_gro_receive(&ch->napi, skb); |
---|
3541 | | - |
---|
3542 | | - priv->dev->stats.rx_packets++; |
---|
3543 | | - priv->dev->stats.rx_bytes += frame_len; |
---|
| 3974 | + page_pool_recycle_direct(rx_q->page_pool, buf->page); |
---|
| 3975 | + buf->page = NULL; |
---|
| 3976 | + error = 1; |
---|
| 3977 | + if (!priv->hwts_rx_en) |
---|
| 3978 | + priv->dev->stats.rx_errors++; |
---|
3544 | 3979 | } |
---|
| 3980 | + |
---|
| 3981 | + if (unlikely(error && (status & rx_not_ls))) |
---|
| 3982 | + goto read_again; |
---|
| 3983 | + if (unlikely(error)) { |
---|
| 3984 | + dev_kfree_skb(skb); |
---|
| 3985 | + skb = NULL; |
---|
| 3986 | + count++; |
---|
| 3987 | + continue; |
---|
| 3988 | + } |
---|
| 3989 | + |
---|
| 3990 | + /* Buffer is good. Go on. */ |
---|
| 3991 | + |
---|
| 3992 | + prefetch(page_address(buf->page)); |
---|
| 3993 | + if (buf->sec_page) |
---|
| 3994 | + prefetch(page_address(buf->sec_page)); |
---|
| 3995 | + |
---|
| 3996 | + buf1_len = stmmac_rx_buf1_len(priv, p, status, len); |
---|
| 3997 | + len += buf1_len; |
---|
| 3998 | + buf2_len = stmmac_rx_buf2_len(priv, p, status, len); |
---|
| 3999 | + len += buf2_len; |
---|
| 4000 | + |
---|
| 4001 | + /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 |
---|
| 4002 | + * Type frames (LLC/LLC-SNAP) |
---|
| 4003 | + * |
---|
| 4004 | + * llc_snap is never checked in GMAC >= 4, so this ACS |
---|
| 4005 | + * feature is always disabled and packets need to be |
---|
| 4006 | + * stripped manually. |
---|
| 4007 | + */ |
---|
| 4008 | + if (likely(!(status & rx_not_ls)) && |
---|
| 4009 | + (likely(priv->synopsys_id >= DWMAC_CORE_4_00) || |
---|
| 4010 | + unlikely(status != llc_snap))) { |
---|
| 4011 | + if (buf2_len) |
---|
| 4012 | + buf2_len -= ETH_FCS_LEN; |
---|
| 4013 | + else |
---|
| 4014 | + buf1_len -= ETH_FCS_LEN; |
---|
| 4015 | + |
---|
| 4016 | + len -= ETH_FCS_LEN; |
---|
| 4017 | + } |
---|
| 4018 | + |
---|
| 4019 | + if (!skb) { |
---|
| 4020 | + skb = napi_alloc_skb(&ch->rx_napi, buf1_len); |
---|
| 4021 | + if (!skb) { |
---|
| 4022 | + priv->dev->stats.rx_dropped++; |
---|
| 4023 | + count++; |
---|
| 4024 | + goto drain_data; |
---|
| 4025 | + } |
---|
| 4026 | + |
---|
| 4027 | + dma_sync_single_for_cpu(priv->device, buf->addr, |
---|
| 4028 | + buf1_len, DMA_FROM_DEVICE); |
---|
| 4029 | + skb_copy_to_linear_data(skb, page_address(buf->page), |
---|
| 4030 | + buf1_len); |
---|
| 4031 | + skb_put(skb, buf1_len); |
---|
| 4032 | + |
---|
| 4033 | + /* Data payload copied into SKB, page ready for recycle */ |
---|
| 4034 | + page_pool_recycle_direct(rx_q->page_pool, buf->page); |
---|
| 4035 | + buf->page = NULL; |
---|
| 4036 | + } else if (buf1_len) { |
---|
| 4037 | + dma_sync_single_for_cpu(priv->device, buf->addr, |
---|
| 4038 | + buf1_len, DMA_FROM_DEVICE); |
---|
| 4039 | + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, |
---|
| 4040 | + buf->page, 0, buf1_len, |
---|
| 4041 | + priv->dma_buf_sz); |
---|
| 4042 | + |
---|
| 4043 | + /* Data payload appended into SKB */ |
---|
| 4044 | + page_pool_release_page(rx_q->page_pool, buf->page); |
---|
| 4045 | + buf->page = NULL; |
---|
| 4046 | + } |
---|
| 4047 | + |
---|
| 4048 | + if (buf2_len) { |
---|
| 4049 | + dma_sync_single_for_cpu(priv->device, buf->sec_addr, |
---|
| 4050 | + buf2_len, DMA_FROM_DEVICE); |
---|
| 4051 | + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, |
---|
| 4052 | + buf->sec_page, 0, buf2_len, |
---|
| 4053 | + priv->dma_buf_sz); |
---|
| 4054 | + |
---|
| 4055 | + /* Data payload appended into SKB */ |
---|
| 4056 | + page_pool_release_page(rx_q->page_pool, buf->sec_page); |
---|
| 4057 | + buf->sec_page = NULL; |
---|
| 4058 | + } |
---|
| 4059 | + |
---|
| 4060 | +drain_data: |
---|
| 4061 | + if (likely(status & rx_not_ls)) |
---|
| 4062 | + goto read_again; |
---|
| 4063 | + if (!skb) |
---|
| 4064 | + continue; |
---|
| 4065 | + |
---|
| 4066 | + /* Got entire packet into SKB. Finish it. */ |
---|
| 4067 | + |
---|
| 4068 | + stmmac_get_rx_hwtstamp(priv, p, np, skb); |
---|
| 4069 | + stmmac_rx_vlan(priv->dev, skb); |
---|
| 4070 | + skb->protocol = eth_type_trans(skb, priv->dev); |
---|
| 4071 | + |
---|
| 4072 | + if (unlikely(!coe)) |
---|
| 4073 | + skb_checksum_none_assert(skb); |
---|
| 4074 | + else |
---|
| 4075 | + skb->ip_summed = CHECKSUM_UNNECESSARY; |
---|
| 4076 | + |
---|
| 4077 | + if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) |
---|
| 4078 | + skb_set_hash(skb, hash, hash_type); |
---|
| 4079 | + |
---|
| 4080 | + skb_record_rx_queue(skb, queue); |
---|
| 4081 | + napi_gro_receive(&ch->rx_napi, skb); |
---|
| 4082 | + skb = NULL; |
---|
| 4083 | + |
---|
| 4084 | + priv->dev->stats.rx_packets++; |
---|
| 4085 | + priv->dev->stats.rx_bytes += len; |
---|
| 4086 | + count++; |
---|
| 4087 | + } |
---|
| 4088 | + |
---|
| 4089 | + if (status & rx_not_ls || skb) { |
---|
| 4090 | + rx_q->state_saved = true; |
---|
| 4091 | + rx_q->state.skb = skb; |
---|
| 4092 | + rx_q->state.error = error; |
---|
| 4093 | + rx_q->state.len = len; |
---|
3545 | 4094 | } |
---|
3546 | 4095 | |
---|
3547 | 4096 | stmmac_rx_refill(priv, queue); |
---|
.. | .. |
---|
3551 | 4100 | return count; |
---|
3552 | 4101 | } |
---|
3553 | 4102 | |
---|
3554 | | -/** |
---|
3555 | | - * stmmac_poll - stmmac poll method (NAPI) |
---|
3556 | | - * @napi : pointer to the napi structure. |
---|
3557 | | - * @budget : maximum number of packets that the current CPU can receive from |
---|
3558 | | - * all interfaces. |
---|
3559 | | - * Description : |
---|
3560 | | - * To look at the incoming frames and clear the tx resources. |
---|
3561 | | - */ |
---|
3562 | | -static int stmmac_napi_poll(struct napi_struct *napi, int budget) |
---|
| 4103 | +static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) |
---|
3563 | 4104 | { |
---|
3564 | 4105 | struct stmmac_channel *ch = |
---|
3565 | | - container_of(napi, struct stmmac_channel, napi); |
---|
| 4106 | + container_of(napi, struct stmmac_channel, rx_napi); |
---|
3566 | 4107 | struct stmmac_priv *priv = ch->priv_data; |
---|
3567 | | - int work_done, rx_done = 0, tx_done = 0; |
---|
3568 | 4108 | u32 chan = ch->index; |
---|
| 4109 | + int work_done; |
---|
3569 | 4110 | |
---|
3570 | 4111 | priv->xstats.napi_poll++; |
---|
3571 | 4112 | |
---|
3572 | | - if (ch->has_tx) |
---|
3573 | | - tx_done = stmmac_tx_clean(priv, budget, chan); |
---|
3574 | | - if (ch->has_rx) |
---|
3575 | | - rx_done = stmmac_rx(priv, budget, chan); |
---|
| 4113 | + work_done = stmmac_rx(priv, budget, chan); |
---|
| 4114 | + if (work_done < budget && napi_complete_done(napi, work_done)) { |
---|
| 4115 | + unsigned long flags; |
---|
3576 | 4116 | |
---|
3577 | | - work_done = max(rx_done, tx_done); |
---|
| 4117 | + spin_lock_irqsave(&ch->lock, flags); |
---|
| 4118 | + stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0); |
---|
| 4119 | + spin_unlock_irqrestore(&ch->lock, flags); |
---|
| 4120 | + } |
---|
| 4121 | + |
---|
| 4122 | + return work_done; |
---|
| 4123 | +} |
---|
| 4124 | + |
---|
| 4125 | +static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) |
---|
| 4126 | +{ |
---|
| 4127 | + struct stmmac_channel *ch = |
---|
| 4128 | + container_of(napi, struct stmmac_channel, tx_napi); |
---|
| 4129 | + struct stmmac_priv *priv = ch->priv_data; |
---|
| 4130 | + u32 chan = ch->index; |
---|
| 4131 | + int work_done; |
---|
| 4132 | + |
---|
| 4133 | + priv->xstats.napi_poll++; |
---|
| 4134 | + |
---|
| 4135 | + work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan); |
---|
3578 | 4136 | work_done = min(work_done, budget); |
---|
3579 | 4137 | |
---|
3580 | 4138 | if (work_done < budget && napi_complete_done(napi, work_done)) { |
---|
3581 | | - int stat; |
---|
| 4139 | + unsigned long flags; |
---|
3582 | 4140 | |
---|
3583 | | - stmmac_enable_dma_irq(priv, priv->ioaddr, chan); |
---|
3584 | | - stat = stmmac_dma_interrupt_status(priv, priv->ioaddr, |
---|
3585 | | - &priv->xstats, chan); |
---|
3586 | | - if (stat && napi_reschedule(napi)) |
---|
3587 | | - stmmac_disable_dma_irq(priv, priv->ioaddr, chan); |
---|
| 4141 | + spin_lock_irqsave(&ch->lock, flags); |
---|
| 4142 | + stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1); |
---|
| 4143 | + spin_unlock_irqrestore(&ch->lock, flags); |
---|
3588 | 4144 | } |
---|
3589 | 4145 | |
---|
3590 | 4146 | return work_done; |
---|
.. | .. |
---|
3593 | 4149 | /** |
---|
3594 | 4150 | * stmmac_tx_timeout |
---|
3595 | 4151 | * @dev : Pointer to net device structure |
---|
| 4152 | + * @txqueue: the index of the hanging transmit queue |
---|
3596 | 4153 | * Description: this function is called when a packet transmission fails to |
---|
3597 | 4154 | * complete within a reasonable time. The driver will mark the error in the |
---|
3598 | 4155 | * netdev structure and arrange for the device to be reset to a sane state |
---|
3599 | 4156 | * in order to transmit a new packet. |
---|
3600 | 4157 | */ |
---|
3601 | | -static void stmmac_tx_timeout(struct net_device *dev) |
---|
| 4158 | +static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue) |
---|
3602 | 4159 | { |
---|
3603 | 4160 | struct stmmac_priv *priv = netdev_priv(dev); |
---|
3604 | 4161 | |
---|
.. | .. |
---|
3695 | 4252 | netdev_features_t features) |
---|
3696 | 4253 | { |
---|
3697 | 4254 | struct stmmac_priv *priv = netdev_priv(netdev); |
---|
| 4255 | + bool sph_en; |
---|
| 4256 | + u32 chan; |
---|
3698 | 4257 | |
---|
3699 | 4258 | /* Keep the COE Type in case of csum is supporting */ |
---|
3700 | 4259 | if (features & NETIF_F_RXCSUM) |
---|
.. | .. |
---|
3705 | 4264 | * fixed in case of issue. |
---|
3706 | 4265 | */ |
---|
3707 | 4266 | stmmac_rx_ipc(priv, priv->hw); |
---|
| 4267 | + |
---|
| 4268 | + sph_en = (priv->hw->rx_csum > 0) && priv->sph; |
---|
| 4269 | + for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) |
---|
| 4270 | + stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); |
---|
3708 | 4271 | |
---|
3709 | 4272 | return 0; |
---|
3710 | 4273 | } |
---|
.. | .. |
---|
3798 | 4361 | */ |
---|
3799 | 4362 | static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
---|
3800 | 4363 | { |
---|
| 4364 | + struct stmmac_priv *priv = netdev_priv (dev); |
---|
3801 | 4365 | int ret = -EOPNOTSUPP; |
---|
3802 | 4366 | |
---|
3803 | 4367 | if (!netif_running(dev)) |
---|
.. | .. |
---|
3807 | 4371 | case SIOCGMIIPHY: |
---|
3808 | 4372 | case SIOCGMIIREG: |
---|
3809 | 4373 | case SIOCSMIIREG: |
---|
3810 | | - if (!dev->phydev) |
---|
3811 | | - return -EINVAL; |
---|
3812 | | - ret = phy_mii_ioctl(dev->phydev, rq, cmd); |
---|
| 4374 | + ret = phylink_mii_ioctl(priv->phylink, rq, cmd); |
---|
3813 | 4375 | break; |
---|
3814 | | -#ifdef CONFIG_STMMAC_PTP |
---|
3815 | 4376 | case SIOCSHWTSTAMP: |
---|
3816 | 4377 | ret = stmmac_hwtstamp_set(dev, rq); |
---|
3817 | 4378 | break; |
---|
3818 | 4379 | case SIOCGHWTSTAMP: |
---|
3819 | 4380 | ret = stmmac_hwtstamp_get(dev, rq); |
---|
3820 | 4381 | break; |
---|
3821 | | -#endif |
---|
3822 | 4382 | default: |
---|
3823 | 4383 | break; |
---|
3824 | 4384 | } |
---|
.. | .. |
---|
3832 | 4392 | struct stmmac_priv *priv = cb_priv; |
---|
3833 | 4393 | int ret = -EOPNOTSUPP; |
---|
3834 | 4394 | |
---|
| 4395 | + if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) |
---|
| 4396 | + return ret; |
---|
| 4397 | + |
---|
3835 | 4398 | stmmac_disable_all_queues(priv); |
---|
3836 | 4399 | |
---|
3837 | 4400 | switch (type) { |
---|
3838 | 4401 | case TC_SETUP_CLSU32: |
---|
3839 | | - if (tc_cls_can_offload_and_chain0(priv->dev, type_data)) |
---|
3840 | | - ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); |
---|
| 4402 | + ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); |
---|
| 4403 | + break; |
---|
| 4404 | + case TC_SETUP_CLSFLOWER: |
---|
| 4405 | + ret = stmmac_tc_setup_cls(priv, priv, type_data); |
---|
3841 | 4406 | break; |
---|
3842 | 4407 | default: |
---|
3843 | 4408 | break; |
---|
.. | .. |
---|
3847 | 4412 | return ret; |
---|
3848 | 4413 | } |
---|
3849 | 4414 | |
---|
3850 | | -static int stmmac_setup_tc_block(struct stmmac_priv *priv, |
---|
3851 | | - struct tc_block_offload *f) |
---|
3852 | | -{ |
---|
3853 | | - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
---|
3854 | | - return -EOPNOTSUPP; |
---|
3855 | | - |
---|
3856 | | - switch (f->command) { |
---|
3857 | | - case TC_BLOCK_BIND: |
---|
3858 | | - return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb, |
---|
3859 | | - priv, priv, f->extack); |
---|
3860 | | - case TC_BLOCK_UNBIND: |
---|
3861 | | - tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv); |
---|
3862 | | - return 0; |
---|
3863 | | - default: |
---|
3864 | | - return -EOPNOTSUPP; |
---|
3865 | | - } |
---|
3866 | | -} |
---|
| 4415 | +static LIST_HEAD(stmmac_block_cb_list); |
---|
3867 | 4416 | |
---|
3868 | 4417 | static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, |
---|
3869 | 4418 | void *type_data) |
---|
.. | .. |
---|
3872 | 4421 | |
---|
3873 | 4422 | switch (type) { |
---|
3874 | 4423 | case TC_SETUP_BLOCK: |
---|
3875 | | - return stmmac_setup_tc_block(priv, type_data); |
---|
| 4424 | + return flow_block_cb_setup_simple(type_data, |
---|
| 4425 | + &stmmac_block_cb_list, |
---|
| 4426 | + stmmac_setup_tc_block_cb, |
---|
| 4427 | + priv, priv, true); |
---|
3876 | 4428 | case TC_SETUP_QDISC_CBS: |
---|
3877 | 4429 | return stmmac_tc_setup_cbs(priv, priv, type_data); |
---|
| 4430 | + case TC_SETUP_QDISC_TAPRIO: |
---|
| 4431 | + return stmmac_tc_setup_taprio(priv, priv, type_data); |
---|
| 4432 | + case TC_SETUP_QDISC_ETF: |
---|
| 4433 | + return stmmac_tc_setup_etf(priv, priv, type_data); |
---|
3878 | 4434 | default: |
---|
3879 | 4435 | return -EOPNOTSUPP; |
---|
3880 | 4436 | } |
---|
3881 | 4437 | } |
---|
3882 | 4438 | |
---|
3883 | 4439 | static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb, |
---|
3884 | | - struct net_device *sb_dev, |
---|
3885 | | - select_queue_fallback_t fallback) |
---|
| 4440 | + struct net_device *sb_dev) |
---|
3886 | 4441 | { |
---|
3887 | 4442 | int gso = skb_shinfo(skb)->gso_type; |
---|
3888 | 4443 | |
---|
.. | .. |
---|
3896 | 4451 | return 0; |
---|
3897 | 4452 | } |
---|
3898 | 4453 | |
---|
3899 | | - return fallback(dev, skb, NULL) % dev->real_num_tx_queues; |
---|
| 4454 | + return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; |
---|
3900 | 4455 | } |
---|
3901 | 4456 | |
---|
3902 | 4457 | static int stmmac_set_mac_address(struct net_device *ndev, void *addr) |
---|
.. | .. |
---|
3904 | 4459 | struct stmmac_priv *priv = netdev_priv(ndev); |
---|
3905 | 4460 | int ret = 0; |
---|
3906 | 4461 | |
---|
| 4462 | + ret = pm_runtime_get_sync(priv->device); |
---|
| 4463 | + if (ret < 0) { |
---|
| 4464 | + pm_runtime_put_noidle(priv->device); |
---|
| 4465 | + return ret; |
---|
| 4466 | + } |
---|
| 4467 | + |
---|
3907 | 4468 | ret = eth_mac_addr(ndev, addr); |
---|
3908 | 4469 | if (ret) |
---|
3909 | | - return ret; |
---|
| 4470 | + goto set_mac_error; |
---|
3910 | 4471 | |
---|
3911 | 4472 | stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); |
---|
| 4473 | + |
---|
| 4474 | +set_mac_error: |
---|
| 4475 | + pm_runtime_put(priv->device); |
---|
3912 | 4476 | |
---|
3913 | 4477 | return ret; |
---|
3914 | 4478 | } |
---|
.. | .. |
---|
3917 | 4481 | static struct dentry *stmmac_fs_dir; |
---|
3918 | 4482 | |
---|
3919 | 4483 | static void sysfs_display_ring(void *head, int size, int extend_desc, |
---|
3920 | | - struct seq_file *seq) |
---|
| 4484 | + struct seq_file *seq, dma_addr_t dma_phy_addr) |
---|
3921 | 4485 | { |
---|
3922 | 4486 | int i; |
---|
3923 | 4487 | struct dma_extended_desc *ep = (struct dma_extended_desc *)head; |
---|
3924 | 4488 | struct dma_desc *p = (struct dma_desc *)head; |
---|
| 4489 | + dma_addr_t dma_addr; |
---|
3925 | 4490 | |
---|
3926 | 4491 | for (i = 0; i < size; i++) { |
---|
3927 | 4492 | if (extend_desc) { |
---|
3928 | | - seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", |
---|
3929 | | - i, (unsigned int)virt_to_phys(ep), |
---|
| 4493 | + dma_addr = dma_phy_addr + i * sizeof(*ep); |
---|
| 4494 | + seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", |
---|
| 4495 | + i, &dma_addr, |
---|
3930 | 4496 | le32_to_cpu(ep->basic.des0), |
---|
3931 | 4497 | le32_to_cpu(ep->basic.des1), |
---|
3932 | 4498 | le32_to_cpu(ep->basic.des2), |
---|
3933 | 4499 | le32_to_cpu(ep->basic.des3)); |
---|
3934 | 4500 | ep++; |
---|
3935 | 4501 | } else { |
---|
3936 | | - seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", |
---|
3937 | | - i, (unsigned int)virt_to_phys(p), |
---|
| 4502 | + dma_addr = dma_phy_addr + i * sizeof(*p); |
---|
| 4503 | + seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", |
---|
| 4504 | + i, &dma_addr, |
---|
3938 | 4505 | le32_to_cpu(p->des0), le32_to_cpu(p->des1), |
---|
3939 | 4506 | le32_to_cpu(p->des2), le32_to_cpu(p->des3)); |
---|
3940 | 4507 | p++; |
---|
.. | .. |
---|
3943 | 4510 | } |
---|
3944 | 4511 | } |
---|
3945 | 4512 | |
---|
3946 | | -static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) |
---|
| 4513 | +static int stmmac_rings_status_show(struct seq_file *seq, void *v) |
---|
3947 | 4514 | { |
---|
3948 | 4515 | struct net_device *dev = seq->private; |
---|
3949 | 4516 | struct stmmac_priv *priv = netdev_priv(dev); |
---|
.. | .. |
---|
3962 | 4529 | if (priv->extend_desc) { |
---|
3963 | 4530 | seq_printf(seq, "Extended descriptor ring:\n"); |
---|
3964 | 4531 | sysfs_display_ring((void *)rx_q->dma_erx, |
---|
3965 | | - DMA_RX_SIZE, 1, seq); |
---|
| 4532 | + priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy); |
---|
3966 | 4533 | } else { |
---|
3967 | 4534 | seq_printf(seq, "Descriptor ring:\n"); |
---|
3968 | 4535 | sysfs_display_ring((void *)rx_q->dma_rx, |
---|
3969 | | - DMA_RX_SIZE, 0, seq); |
---|
| 4536 | + priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy); |
---|
3970 | 4537 | } |
---|
3971 | 4538 | } |
---|
3972 | 4539 | |
---|
.. | .. |
---|
3978 | 4545 | if (priv->extend_desc) { |
---|
3979 | 4546 | seq_printf(seq, "Extended descriptor ring:\n"); |
---|
3980 | 4547 | sysfs_display_ring((void *)tx_q->dma_etx, |
---|
3981 | | - DMA_TX_SIZE, 1, seq); |
---|
3982 | | - } else { |
---|
| 4548 | + priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy); |
---|
| 4549 | + } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { |
---|
3983 | 4550 | seq_printf(seq, "Descriptor ring:\n"); |
---|
3984 | 4551 | sysfs_display_ring((void *)tx_q->dma_tx, |
---|
3985 | | - DMA_TX_SIZE, 0, seq); |
---|
| 4552 | + priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy); |
---|
3986 | 4553 | } |
---|
3987 | 4554 | } |
---|
3988 | 4555 | |
---|
3989 | 4556 | return 0; |
---|
3990 | 4557 | } |
---|
| 4558 | +DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status); |
---|
3991 | 4559 | |
---|
3992 | | -static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file) |
---|
3993 | | -{ |
---|
3994 | | - return single_open(file, stmmac_sysfs_ring_read, inode->i_private); |
---|
3995 | | -} |
---|
3996 | | - |
---|
3997 | | -/* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */ |
---|
3998 | | - |
---|
3999 | | -static const struct file_operations stmmac_rings_status_fops = { |
---|
4000 | | - .owner = THIS_MODULE, |
---|
4001 | | - .open = stmmac_sysfs_ring_open, |
---|
4002 | | - .read = seq_read, |
---|
4003 | | - .llseek = seq_lseek, |
---|
4004 | | - .release = single_release, |
---|
4005 | | -}; |
---|
4006 | | - |
---|
4007 | | -static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v) |
---|
| 4560 | +static int stmmac_dma_cap_show(struct seq_file *seq, void *v) |
---|
4008 | 4561 | { |
---|
4009 | 4562 | struct net_device *dev = seq->private; |
---|
4010 | 4563 | struct stmmac_priv *priv = netdev_priv(dev); |
---|
.. | .. |
---|
4062 | 4615 | priv->dma_cap.number_rx_channel); |
---|
4063 | 4616 | seq_printf(seq, "\tNumber of Additional TX channel: %d\n", |
---|
4064 | 4617 | priv->dma_cap.number_tx_channel); |
---|
| 4618 | + seq_printf(seq, "\tNumber of Additional RX queues: %d\n", |
---|
| 4619 | + priv->dma_cap.number_rx_queues); |
---|
| 4620 | + seq_printf(seq, "\tNumber of Additional TX queues: %d\n", |
---|
| 4621 | + priv->dma_cap.number_tx_queues); |
---|
4065 | 4622 | seq_printf(seq, "\tEnhanced descriptors: %s\n", |
---|
4066 | 4623 | (priv->dma_cap.enh_desc) ? "Y" : "N"); |
---|
4067 | | - |
---|
| 4624 | + seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size); |
---|
| 4625 | + seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size); |
---|
| 4626 | + seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz); |
---|
| 4627 | + seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N"); |
---|
| 4628 | + seq_printf(seq, "\tNumber of PPS Outputs: %d\n", |
---|
| 4629 | + priv->dma_cap.pps_out_num); |
---|
| 4630 | + seq_printf(seq, "\tSafety Features: %s\n", |
---|
| 4631 | + priv->dma_cap.asp ? "Y" : "N"); |
---|
| 4632 | + seq_printf(seq, "\tFlexible RX Parser: %s\n", |
---|
| 4633 | + priv->dma_cap.frpsel ? "Y" : "N"); |
---|
| 4634 | + seq_printf(seq, "\tEnhanced Addressing: %d\n", |
---|
| 4635 | + priv->dma_cap.addr64); |
---|
| 4636 | + seq_printf(seq, "\tReceive Side Scaling: %s\n", |
---|
| 4637 | + priv->dma_cap.rssen ? "Y" : "N"); |
---|
| 4638 | + seq_printf(seq, "\tVLAN Hash Filtering: %s\n", |
---|
| 4639 | + priv->dma_cap.vlhash ? "Y" : "N"); |
---|
| 4640 | + seq_printf(seq, "\tSplit Header: %s\n", |
---|
| 4641 | + priv->dma_cap.sphen ? "Y" : "N"); |
---|
| 4642 | + seq_printf(seq, "\tVLAN TX Insertion: %s\n", |
---|
| 4643 | + priv->dma_cap.vlins ? "Y" : "N"); |
---|
| 4644 | + seq_printf(seq, "\tDouble VLAN: %s\n", |
---|
| 4645 | + priv->dma_cap.dvlan ? "Y" : "N"); |
---|
| 4646 | + seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n", |
---|
| 4647 | + priv->dma_cap.l3l4fnum); |
---|
| 4648 | + seq_printf(seq, "\tARP Offloading: %s\n", |
---|
| 4649 | + priv->dma_cap.arpoffsel ? "Y" : "N"); |
---|
| 4650 | + seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n", |
---|
| 4651 | + priv->dma_cap.estsel ? "Y" : "N"); |
---|
| 4652 | + seq_printf(seq, "\tFrame Preemption (FPE): %s\n", |
---|
| 4653 | + priv->dma_cap.fpesel ? "Y" : "N"); |
---|
| 4654 | + seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n", |
---|
| 4655 | + priv->dma_cap.tbssel ? "Y" : "N"); |
---|
4068 | 4656 | return 0; |
---|
4069 | 4657 | } |
---|
| 4658 | +DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); |
---|
4070 | 4659 | |
---|
4071 | | -static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file) |
---|
| 4660 | +/* Use network device events to rename debugfs file entries. |
---|
| 4661 | + */ |
---|
| 4662 | +static int stmmac_device_event(struct notifier_block *unused, |
---|
| 4663 | + unsigned long event, void *ptr) |
---|
4072 | 4664 | { |
---|
4073 | | - return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private); |
---|
| 4665 | + struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
---|
| 4666 | + struct stmmac_priv *priv = netdev_priv(dev); |
---|
| 4667 | + |
---|
| 4668 | + if (dev->netdev_ops != &stmmac_netdev_ops) |
---|
| 4669 | + goto done; |
---|
| 4670 | + |
---|
| 4671 | + switch (event) { |
---|
| 4672 | + case NETDEV_CHANGENAME: |
---|
| 4673 | + if (priv->dbgfs_dir) |
---|
| 4674 | + priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, |
---|
| 4675 | + priv->dbgfs_dir, |
---|
| 4676 | + stmmac_fs_dir, |
---|
| 4677 | + dev->name); |
---|
| 4678 | + break; |
---|
| 4679 | + } |
---|
| 4680 | +done: |
---|
| 4681 | + return NOTIFY_DONE; |
---|
4074 | 4682 | } |
---|
4075 | 4683 | |
---|
4076 | | -static const struct file_operations stmmac_dma_cap_fops = { |
---|
4077 | | - .owner = THIS_MODULE, |
---|
4078 | | - .open = stmmac_sysfs_dma_cap_open, |
---|
4079 | | - .read = seq_read, |
---|
4080 | | - .llseek = seq_lseek, |
---|
4081 | | - .release = single_release, |
---|
| 4684 | +static struct notifier_block stmmac_notifier = { |
---|
| 4685 | + .notifier_call = stmmac_device_event, |
---|
4082 | 4686 | }; |
---|
4083 | 4687 | |
---|
4084 | | -static int stmmac_init_fs(struct net_device *dev) |
---|
| 4688 | +static void stmmac_init_fs(struct net_device *dev) |
---|
4085 | 4689 | { |
---|
4086 | 4690 | struct stmmac_priv *priv = netdev_priv(dev); |
---|
| 4691 | + |
---|
| 4692 | + rtnl_lock(); |
---|
4087 | 4693 | |
---|
4088 | 4694 | /* Create per netdev entries */ |
---|
4089 | 4695 | priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); |
---|
4090 | 4696 | |
---|
4091 | | - if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) { |
---|
4092 | | - netdev_err(priv->dev, "ERROR failed to create debugfs directory\n"); |
---|
4093 | | - |
---|
4094 | | - return -ENOMEM; |
---|
4095 | | - } |
---|
4096 | | - |
---|
4097 | 4697 | /* Entry to report DMA RX/TX rings */ |
---|
4098 | | - priv->dbgfs_rings_status = |
---|
4099 | | - debugfs_create_file("descriptors_status", 0444, |
---|
4100 | | - priv->dbgfs_dir, dev, |
---|
4101 | | - &stmmac_rings_status_fops); |
---|
4102 | | - |
---|
4103 | | - if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) { |
---|
4104 | | - netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n"); |
---|
4105 | | - debugfs_remove_recursive(priv->dbgfs_dir); |
---|
4106 | | - |
---|
4107 | | - return -ENOMEM; |
---|
4108 | | - } |
---|
| 4698 | + debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, |
---|
| 4699 | + &stmmac_rings_status_fops); |
---|
4109 | 4700 | |
---|
4110 | 4701 | /* Entry to report the DMA HW features */ |
---|
4111 | | - priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444, |
---|
4112 | | - priv->dbgfs_dir, |
---|
4113 | | - dev, &stmmac_dma_cap_fops); |
---|
| 4702 | + debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, |
---|
| 4703 | + &stmmac_dma_cap_fops); |
---|
4114 | 4704 | |
---|
4115 | | - if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) { |
---|
4116 | | - netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n"); |
---|
4117 | | - debugfs_remove_recursive(priv->dbgfs_dir); |
---|
4118 | | - |
---|
4119 | | - return -ENOMEM; |
---|
4120 | | - } |
---|
4121 | | - |
---|
4122 | | - return 0; |
---|
| 4705 | + rtnl_unlock(); |
---|
4123 | 4706 | } |
---|
4124 | 4707 | |
---|
4125 | 4708 | static void stmmac_exit_fs(struct net_device *dev) |
---|
.. | .. |
---|
4129 | 4712 | debugfs_remove_recursive(priv->dbgfs_dir); |
---|
4130 | 4713 | } |
---|
4131 | 4714 | #endif /* CONFIG_DEBUG_FS */ |
---|
| 4715 | + |
---|
| 4716 | +static u32 stmmac_vid_crc32_le(__le16 vid_le) |
---|
| 4717 | +{ |
---|
| 4718 | + unsigned char *data = (unsigned char *)&vid_le; |
---|
| 4719 | + unsigned char data_byte = 0; |
---|
| 4720 | + u32 crc = ~0x0; |
---|
| 4721 | + u32 temp = 0; |
---|
| 4722 | + int i, bits; |
---|
| 4723 | + |
---|
| 4724 | + bits = get_bitmask_order(VLAN_VID_MASK); |
---|
| 4725 | + for (i = 0; i < bits; i++) { |
---|
| 4726 | + if ((i % 8) == 0) |
---|
| 4727 | + data_byte = data[i / 8]; |
---|
| 4728 | + |
---|
| 4729 | + temp = ((crc & 1) ^ data_byte) & 1; |
---|
| 4730 | + crc >>= 1; |
---|
| 4731 | + data_byte >>= 1; |
---|
| 4732 | + |
---|
| 4733 | + if (temp) |
---|
| 4734 | + crc ^= 0xedb88320; |
---|
| 4735 | + } |
---|
| 4736 | + |
---|
| 4737 | + return crc; |
---|
| 4738 | +} |
---|
| 4739 | + |
---|
| 4740 | +static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) |
---|
| 4741 | +{ |
---|
| 4742 | + u32 crc, hash = 0; |
---|
| 4743 | + __le16 pmatch = 0; |
---|
| 4744 | + int count = 0; |
---|
| 4745 | + u16 vid = 0; |
---|
| 4746 | + |
---|
| 4747 | + for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { |
---|
| 4748 | + __le16 vid_le = cpu_to_le16(vid); |
---|
| 4749 | + crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28; |
---|
| 4750 | + hash |= (1 << crc); |
---|
| 4751 | + count++; |
---|
| 4752 | + } |
---|
| 4753 | + |
---|
| 4754 | + if (!priv->dma_cap.vlhash) { |
---|
| 4755 | + if (count > 2) /* VID = 0 always passes filter */ |
---|
| 4756 | + return -EOPNOTSUPP; |
---|
| 4757 | + |
---|
| 4758 | + pmatch = cpu_to_le16(vid); |
---|
| 4759 | + hash = 0; |
---|
| 4760 | + } |
---|
| 4761 | + |
---|
| 4762 | + return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); |
---|
| 4763 | +} |
---|
| 4764 | + |
---|
| 4765 | +static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) |
---|
| 4766 | +{ |
---|
| 4767 | + struct stmmac_priv *priv = netdev_priv(ndev); |
---|
| 4768 | + bool is_double = false; |
---|
| 4769 | + int ret; |
---|
| 4770 | + |
---|
| 4771 | + if (be16_to_cpu(proto) == ETH_P_8021AD) |
---|
| 4772 | + is_double = true; |
---|
| 4773 | + |
---|
| 4774 | + set_bit(vid, priv->active_vlans); |
---|
| 4775 | + ret = stmmac_vlan_update(priv, is_double); |
---|
| 4776 | + if (ret) { |
---|
| 4777 | + clear_bit(vid, priv->active_vlans); |
---|
| 4778 | + return ret; |
---|
| 4779 | + } |
---|
| 4780 | + |
---|
| 4781 | + if (priv->hw->num_vlan) { |
---|
| 4782 | + ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); |
---|
| 4783 | + if (ret) |
---|
| 4784 | + return ret; |
---|
| 4785 | + } |
---|
| 4786 | + |
---|
| 4787 | + return 0; |
---|
| 4788 | +} |
---|
| 4789 | + |
---|
| 4790 | +static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) |
---|
| 4791 | +{ |
---|
| 4792 | + struct stmmac_priv *priv = netdev_priv(ndev); |
---|
| 4793 | + bool is_double = false; |
---|
| 4794 | + int ret; |
---|
| 4795 | + |
---|
| 4796 | + ret = pm_runtime_get_sync(priv->device); |
---|
| 4797 | + if (ret < 0) { |
---|
| 4798 | + pm_runtime_put_noidle(priv->device); |
---|
| 4799 | + return ret; |
---|
| 4800 | + } |
---|
| 4801 | + |
---|
| 4802 | + if (be16_to_cpu(proto) == ETH_P_8021AD) |
---|
| 4803 | + is_double = true; |
---|
| 4804 | + |
---|
| 4805 | + clear_bit(vid, priv->active_vlans); |
---|
| 4806 | + |
---|
| 4807 | + if (priv->hw->num_vlan) { |
---|
| 4808 | + ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); |
---|
| 4809 | + if (ret) |
---|
| 4810 | + goto del_vlan_error; |
---|
| 4811 | + } |
---|
| 4812 | + |
---|
| 4813 | + ret = stmmac_vlan_update(priv, is_double); |
---|
| 4814 | + |
---|
| 4815 | +del_vlan_error: |
---|
| 4816 | + pm_runtime_put(priv->device); |
---|
| 4817 | + |
---|
| 4818 | + return ret; |
---|
| 4819 | +} |
---|
4132 | 4820 | |
---|
4133 | 4821 | static const struct net_device_ops stmmac_netdev_ops = { |
---|
4134 | 4822 | .ndo_open = stmmac_open, |
---|
.. | .. |
---|
4146 | 4834 | .ndo_poll_controller = stmmac_poll_controller, |
---|
4147 | 4835 | #endif |
---|
4148 | 4836 | .ndo_set_mac_address = stmmac_set_mac_address, |
---|
| 4837 | + .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, |
---|
| 4838 | + .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, |
---|
4149 | 4839 | }; |
---|
4150 | 4840 | |
---|
4151 | 4841 | static void stmmac_reset_subtask(struct stmmac_priv *priv) |
---|
.. | .. |
---|
4164 | 4854 | |
---|
4165 | 4855 | set_bit(STMMAC_DOWN, &priv->state); |
---|
4166 | 4856 | dev_close(priv->dev); |
---|
4167 | | - dev_open(priv->dev); |
---|
| 4857 | + dev_open(priv->dev, NULL); |
---|
4168 | 4858 | clear_bit(STMMAC_DOWN, &priv->state); |
---|
4169 | 4859 | clear_bit(STMMAC_RESETING, &priv->state); |
---|
4170 | 4860 | rtnl_unlock(); |
---|
.. | .. |
---|
4214 | 4904 | priv->plat->enh_desc = priv->dma_cap.enh_desc; |
---|
4215 | 4905 | priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; |
---|
4216 | 4906 | priv->hw->pmt = priv->plat->pmt; |
---|
| 4907 | + if (priv->dma_cap.hash_tb_sz) { |
---|
| 4908 | + priv->hw->multicast_filter_bins = |
---|
| 4909 | + (BIT(priv->dma_cap.hash_tb_sz) << 5); |
---|
| 4910 | + priv->hw->mcast_bits_log2 = |
---|
| 4911 | + ilog2(priv->hw->multicast_filter_bins); |
---|
| 4912 | + } |
---|
4217 | 4913 | |
---|
4218 | 4914 | /* TXCOE doesn't work in thresh DMA mode */ |
---|
4219 | 4915 | if (priv->plat->force_thresh_dma_mode) |
---|
.. | .. |
---|
4250 | 4946 | if (priv->dma_cap.tsoen) |
---|
4251 | 4947 | dev_info(priv->device, "TSO supported\n"); |
---|
4252 | 4948 | |
---|
| 4949 | + priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en; |
---|
| 4950 | + priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; |
---|
| 4951 | + |
---|
4253 | 4952 | /* Run HW quirks, if any */ |
---|
4254 | 4953 | if (priv->hwif_quirks) { |
---|
4255 | 4954 | ret = priv->hwif_quirks(priv); |
---|
.. | .. |
---|
4272 | 4971 | return 0; |
---|
4273 | 4972 | } |
---|
4274 | 4973 | |
---|
| 4974 | +static void stmmac_napi_add(struct net_device *dev) |
---|
| 4975 | +{ |
---|
| 4976 | + struct stmmac_priv *priv = netdev_priv(dev); |
---|
| 4977 | + u32 queue, maxq; |
---|
| 4978 | + |
---|
| 4979 | + maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); |
---|
| 4980 | + |
---|
| 4981 | + for (queue = 0; queue < maxq; queue++) { |
---|
| 4982 | + struct stmmac_channel *ch = &priv->channel[queue]; |
---|
| 4983 | + int rx_budget = ((priv->plat->dma_rx_size < NAPI_POLL_WEIGHT) && |
---|
| 4984 | + (priv->plat->dma_rx_size > 0)) ? |
---|
| 4985 | + priv->plat->dma_rx_size : NAPI_POLL_WEIGHT; |
---|
| 4986 | + int tx_budget = ((priv->plat->dma_tx_size < NAPI_POLL_WEIGHT) && |
---|
| 4987 | + (priv->plat->dma_tx_size > 0)) ? |
---|
| 4988 | + priv->plat->dma_tx_size : NAPI_POLL_WEIGHT; |
---|
| 4989 | + |
---|
| 4990 | + ch->priv_data = priv; |
---|
| 4991 | + ch->index = queue; |
---|
| 4992 | + spin_lock_init(&ch->lock); |
---|
| 4993 | + |
---|
| 4994 | + if (queue < priv->plat->rx_queues_to_use) { |
---|
| 4995 | + netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx, |
---|
| 4996 | + rx_budget); |
---|
| 4997 | + } |
---|
| 4998 | + if (queue < priv->plat->tx_queues_to_use) { |
---|
| 4999 | + netif_tx_napi_add(dev, &ch->tx_napi, |
---|
| 5000 | + stmmac_napi_poll_tx, tx_budget); |
---|
| 5001 | + } |
---|
| 5002 | + } |
---|
| 5003 | +} |
---|
| 5004 | + |
---|
| 5005 | +static void stmmac_napi_del(struct net_device *dev) |
---|
| 5006 | +{ |
---|
| 5007 | + struct stmmac_priv *priv = netdev_priv(dev); |
---|
| 5008 | + u32 queue, maxq; |
---|
| 5009 | + |
---|
| 5010 | + maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); |
---|
| 5011 | + |
---|
| 5012 | + for (queue = 0; queue < maxq; queue++) { |
---|
| 5013 | + struct stmmac_channel *ch = &priv->channel[queue]; |
---|
| 5014 | + |
---|
| 5015 | + if (queue < priv->plat->rx_queues_to_use) |
---|
| 5016 | + netif_napi_del(&ch->rx_napi); |
---|
| 5017 | + if (queue < priv->plat->tx_queues_to_use) |
---|
| 5018 | + netif_napi_del(&ch->tx_napi); |
---|
| 5019 | + } |
---|
| 5020 | +} |
---|
| 5021 | + |
---|
| 5022 | +int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) |
---|
| 5023 | +{ |
---|
| 5024 | + struct stmmac_priv *priv = netdev_priv(dev); |
---|
| 5025 | + int ret = 0; |
---|
| 5026 | + |
---|
| 5027 | + if (netif_running(dev)) |
---|
| 5028 | + stmmac_release(dev); |
---|
| 5029 | + |
---|
| 5030 | + stmmac_napi_del(dev); |
---|
| 5031 | + |
---|
| 5032 | + priv->plat->rx_queues_to_use = rx_cnt; |
---|
| 5033 | + priv->plat->tx_queues_to_use = tx_cnt; |
---|
| 5034 | + |
---|
| 5035 | + stmmac_napi_add(dev); |
---|
| 5036 | + |
---|
| 5037 | + if (netif_running(dev)) |
---|
| 5038 | + ret = stmmac_open(dev); |
---|
| 5039 | + |
---|
| 5040 | + return ret; |
---|
| 5041 | +} |
---|
| 5042 | + |
---|
| 5043 | +int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) |
---|
| 5044 | +{ |
---|
| 5045 | + struct stmmac_priv *priv = netdev_priv(dev); |
---|
| 5046 | + int ret = 0; |
---|
| 5047 | + |
---|
| 5048 | + if (netif_running(dev)) |
---|
| 5049 | + stmmac_release(dev); |
---|
| 5050 | + |
---|
| 5051 | + priv->dma_rx_size = rx_size; |
---|
| 5052 | + priv->dma_tx_size = tx_size; |
---|
| 5053 | + |
---|
| 5054 | + if (netif_running(dev)) |
---|
| 5055 | + ret = stmmac_open(dev); |
---|
| 5056 | + |
---|
| 5057 | + return ret; |
---|
| 5058 | +} |
---|
| 5059 | + |
---|
4275 | 5060 | /** |
---|
4276 | 5061 | * stmmac_dvr_probe |
---|
4277 | 5062 | * @device: device pointer |
---|
.. | .. |
---|
4288 | 5073 | { |
---|
4289 | 5074 | struct net_device *ndev = NULL; |
---|
4290 | 5075 | struct stmmac_priv *priv; |
---|
4291 | | - u32 queue, maxq; |
---|
4292 | | - int ret = 0; |
---|
| 5076 | + u32 rxq; |
---|
| 5077 | + int i, ret = 0; |
---|
4293 | 5078 | |
---|
4294 | | - ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv), |
---|
4295 | | - MTL_MAX_TX_QUEUES, |
---|
4296 | | - MTL_MAX_RX_QUEUES); |
---|
| 5079 | + ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv), |
---|
| 5080 | + MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES); |
---|
4297 | 5081 | if (!ndev) |
---|
4298 | 5082 | return -ENOMEM; |
---|
4299 | 5083 | |
---|
.. | .. |
---|
4313 | 5097 | priv->wol_irq = res->wol_irq; |
---|
4314 | 5098 | priv->lpi_irq = res->lpi_irq; |
---|
4315 | 5099 | |
---|
4316 | | - if (res->mac) |
---|
| 5100 | + if (!IS_ERR_OR_NULL(res->mac)) |
---|
4317 | 5101 | memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); |
---|
4318 | 5102 | |
---|
4319 | 5103 | dev_set_drvdata(device, priv->dev); |
---|
.. | .. |
---|
4325 | 5109 | priv->wq = create_singlethread_workqueue("stmmac_wq"); |
---|
4326 | 5110 | if (!priv->wq) { |
---|
4327 | 5111 | dev_err(priv->device, "failed to create workqueue\n"); |
---|
4328 | | - ret = -ENOMEM; |
---|
4329 | | - goto error_wq; |
---|
| 5112 | + return -ENOMEM; |
---|
4330 | 5113 | } |
---|
4331 | 5114 | |
---|
4332 | 5115 | INIT_WORK(&priv->service_task, stmmac_service_task); |
---|
.. | .. |
---|
4354 | 5137 | |
---|
4355 | 5138 | stmmac_check_ether_addr(priv); |
---|
4356 | 5139 | |
---|
4357 | | - /* Configure real RX and TX queues */ |
---|
4358 | | - netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use); |
---|
4359 | | - netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use); |
---|
4360 | | - |
---|
4361 | 5140 | ndev->netdev_ops = &stmmac_netdev_ops; |
---|
4362 | 5141 | |
---|
4363 | 5142 | ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
---|
.. | .. |
---|
4375 | 5154 | priv->tso = true; |
---|
4376 | 5155 | dev_info(priv->device, "TSO feature enabled\n"); |
---|
4377 | 5156 | } |
---|
| 5157 | + |
---|
| 5158 | + if (priv->dma_cap.sphen && !priv->plat->sph_disable) { |
---|
| 5159 | + ndev->hw_features |= NETIF_F_GRO; |
---|
| 5160 | + if (!priv->plat->sph_disable) { |
---|
| 5161 | + priv->sph = true; |
---|
| 5162 | + dev_info(priv->device, "SPH feature enabled\n"); |
---|
| 5163 | + } |
---|
| 5164 | + } |
---|
| 5165 | + |
---|
| 5166 | + /* The current IP register MAC_HW_Feature1[ADDR64] only define |
---|
| 5167 | + * 32/40/64 bit width, but some SOC support others like i.MX8MP |
---|
| 5168 | + * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64]. |
---|
| 5169 | + * So overwrite dma_cap.addr64 according to HW real design. |
---|
| 5170 | + */ |
---|
| 5171 | + if (priv->plat->addr64) |
---|
| 5172 | + priv->dma_cap.addr64 = priv->plat->addr64; |
---|
| 5173 | + |
---|
| 5174 | + if (priv->dma_cap.addr64) { |
---|
| 5175 | + ret = dma_set_mask_and_coherent(device, |
---|
| 5176 | + DMA_BIT_MASK(priv->dma_cap.addr64)); |
---|
| 5177 | + if (!ret) { |
---|
| 5178 | + dev_info(priv->device, "Using %d bits DMA width\n", |
---|
| 5179 | + priv->dma_cap.addr64); |
---|
| 5180 | + |
---|
| 5181 | + /* |
---|
| 5182 | + * If more than 32 bits can be addressed, make sure to |
---|
| 5183 | + * enable enhanced addressing mode. |
---|
| 5184 | + */ |
---|
| 5185 | + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) |
---|
| 5186 | + priv->plat->dma_cfg->eame = true; |
---|
| 5187 | + } else { |
---|
| 5188 | + ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32)); |
---|
| 5189 | + if (ret) { |
---|
| 5190 | + dev_err(priv->device, "Failed to set DMA Mask\n"); |
---|
| 5191 | + goto error_hw_init; |
---|
| 5192 | + } |
---|
| 5193 | + |
---|
| 5194 | + priv->dma_cap.addr64 = 32; |
---|
| 5195 | + } |
---|
| 5196 | + } |
---|
| 5197 | + |
---|
4378 | 5198 | ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; |
---|
4379 | 5199 | ndev->watchdog_timeo = msecs_to_jiffies(watchdog); |
---|
4380 | 5200 | #ifdef STMMAC_VLAN_TAG_USED |
---|
4381 | 5201 | /* Both mac100 and gmac support receive VLAN tag detection */ |
---|
4382 | 5202 | ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; |
---|
| 5203 | + if (priv->dma_cap.vlhash) { |
---|
| 5204 | + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; |
---|
| 5205 | + ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; |
---|
| 5206 | + } |
---|
| 5207 | + if (priv->dma_cap.vlins) { |
---|
| 5208 | + ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; |
---|
| 5209 | + if (priv->dma_cap.dvlan) |
---|
| 5210 | + ndev->features |= NETIF_F_HW_VLAN_STAG_TX; |
---|
| 5211 | + } |
---|
4383 | 5212 | #endif |
---|
4384 | 5213 | priv->msg_enable = netif_msg_init(debug, default_msg_level); |
---|
4385 | 5214 | |
---|
| 5215 | + /* Initialize RSS */ |
---|
| 5216 | + rxq = priv->plat->rx_queues_to_use; |
---|
| 5217 | + netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); |
---|
| 5218 | + for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) |
---|
| 5219 | + priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); |
---|
| 5220 | + |
---|
| 5221 | + if (priv->dma_cap.rssen && priv->plat->rss_en) |
---|
| 5222 | + ndev->features |= NETIF_F_RXHASH; |
---|
| 5223 | + |
---|
4386 | 5224 | /* MTU range: 46 - hw-specific max */ |
---|
4387 | 5225 | ndev->min_mtu = ETH_ZLEN - ETH_HLEN; |
---|
4388 | | - if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) |
---|
4389 | | - ndev->max_mtu = JUMBO_LEN; |
---|
4390 | | - else if (priv->plat->has_xgmac) |
---|
| 5226 | + if (priv->plat->has_xgmac) |
---|
4391 | 5227 | ndev->max_mtu = XGMAC_JUMBO_LEN; |
---|
| 5228 | + else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) |
---|
| 5229 | + ndev->max_mtu = JUMBO_LEN; |
---|
4392 | 5230 | else |
---|
4393 | 5231 | ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); |
---|
4394 | 5232 | /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu |
---|
.. | .. |
---|
4406 | 5244 | priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ |
---|
4407 | 5245 | |
---|
4408 | 5246 | /* Setup channels NAPI */ |
---|
4409 | | - maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); |
---|
4410 | | - |
---|
4411 | | - for (queue = 0; queue < maxq; queue++) { |
---|
4412 | | - struct stmmac_channel *ch = &priv->channel[queue]; |
---|
4413 | | - |
---|
4414 | | - ch->priv_data = priv; |
---|
4415 | | - ch->index = queue; |
---|
4416 | | - |
---|
4417 | | - if (queue < priv->plat->rx_queues_to_use) |
---|
4418 | | - ch->has_rx = true; |
---|
4419 | | - if (queue < priv->plat->tx_queues_to_use) |
---|
4420 | | - ch->has_tx = true; |
---|
4421 | | - |
---|
4422 | | - netif_napi_add(ndev, &ch->napi, stmmac_napi_poll, |
---|
4423 | | - NAPI_POLL_WEIGHT); |
---|
4424 | | - } |
---|
| 5247 | + stmmac_napi_add(ndev); |
---|
4425 | 5248 | |
---|
4426 | 5249 | mutex_init(&priv->lock); |
---|
4427 | 5250 | |
---|
.. | .. |
---|
4431 | 5254 | * set the MDC clock dynamically according to the csr actual |
---|
4432 | 5255 | * clock input. |
---|
4433 | 5256 | */ |
---|
4434 | | - if (!priv->plat->clk_csr) |
---|
4435 | | - stmmac_clk_csr_set(priv); |
---|
4436 | | - else |
---|
| 5257 | + if (priv->plat->clk_csr >= 0) |
---|
4437 | 5258 | priv->clk_csr = priv->plat->clk_csr; |
---|
| 5259 | + else |
---|
| 5260 | + stmmac_clk_csr_set(priv); |
---|
4438 | 5261 | |
---|
4439 | 5262 | stmmac_check_pcs_mode(priv); |
---|
4440 | 5263 | |
---|
4441 | | - if (priv->hw->pcs != STMMAC_PCS_RGMII && |
---|
4442 | | - priv->hw->pcs != STMMAC_PCS_TBI && |
---|
| 5264 | + pm_runtime_get_noresume(device); |
---|
| 5265 | + pm_runtime_set_active(device); |
---|
| 5266 | + pm_runtime_enable(device); |
---|
| 5267 | + |
---|
| 5268 | + if (priv->hw->pcs != STMMAC_PCS_TBI && |
---|
4443 | 5269 | priv->hw->pcs != STMMAC_PCS_RTBI) { |
---|
4444 | 5270 | /* MDIO bus Registration */ |
---|
4445 | 5271 | ret = stmmac_mdio_register(ndev); |
---|
.. | .. |
---|
4451 | 5277 | } |
---|
4452 | 5278 | } |
---|
4453 | 5279 | |
---|
| 5280 | + ret = stmmac_phy_setup(priv); |
---|
| 5281 | + if (ret) { |
---|
| 5282 | + netdev_err(ndev, "failed to setup phy (%d)\n", ret); |
---|
| 5283 | + goto error_phy_setup; |
---|
| 5284 | + } |
---|
| 5285 | + |
---|
4454 | 5286 | ret = register_netdev(ndev); |
---|
4455 | 5287 | if (ret) { |
---|
4456 | 5288 | dev_err(priv->device, "%s: ERROR %i registering the device\n", |
---|
.. | .. |
---|
4459 | 5291 | } |
---|
4460 | 5292 | |
---|
4461 | 5293 | #ifdef CONFIG_DEBUG_FS |
---|
4462 | | - ret = stmmac_init_fs(ndev); |
---|
4463 | | - if (ret < 0) |
---|
4464 | | - netdev_warn(priv->dev, "%s: failed debugFS registration\n", |
---|
4465 | | - __func__); |
---|
| 5294 | + stmmac_init_fs(ndev); |
---|
4466 | 5295 | #endif |
---|
| 5296 | + |
---|
| 5297 | + /* Let pm_runtime_put() disable the clocks. |
---|
| 5298 | + * If CONFIG_PM is not enabled, the clocks will stay powered. |
---|
| 5299 | + */ |
---|
| 5300 | + pm_runtime_put(device); |
---|
| 5301 | + |
---|
| 5302 | + //add |
---|
| 5303 | + phy_register_fixup_for_uid(RTL_8211F_PHY_ID, RTL_8211F_PHY_ID_MASK, rtl8211F_led_control); |
---|
4467 | 5304 | |
---|
4468 | 5305 | return ret; |
---|
4469 | 5306 | |
---|
4470 | 5307 | error_netdev_register: |
---|
4471 | | - if (priv->hw->pcs != STMMAC_PCS_RGMII && |
---|
4472 | | - priv->hw->pcs != STMMAC_PCS_TBI && |
---|
| 5308 | + phylink_destroy(priv->phylink); |
---|
| 5309 | +error_phy_setup: |
---|
| 5310 | + if (priv->hw->pcs != STMMAC_PCS_TBI && |
---|
4473 | 5311 | priv->hw->pcs != STMMAC_PCS_RTBI) |
---|
4474 | 5312 | stmmac_mdio_unregister(ndev); |
---|
4475 | 5313 | error_mdio_register: |
---|
4476 | | - for (queue = 0; queue < maxq; queue++) { |
---|
4477 | | - struct stmmac_channel *ch = &priv->channel[queue]; |
---|
4478 | | - |
---|
4479 | | - netif_napi_del(&ch->napi); |
---|
4480 | | - } |
---|
| 5314 | + stmmac_napi_del(ndev); |
---|
4481 | 5315 | error_hw_init: |
---|
4482 | 5316 | destroy_workqueue(priv->wq); |
---|
4483 | | -error_wq: |
---|
4484 | | - free_netdev(ndev); |
---|
4485 | 5317 | |
---|
4486 | 5318 | return ret; |
---|
4487 | 5319 | } |
---|
.. | .. |
---|
4500 | 5332 | |
---|
4501 | 5333 | netdev_info(priv->dev, "%s: removing driver", __func__); |
---|
4502 | 5334 | |
---|
4503 | | -#ifdef CONFIG_DEBUG_FS |
---|
4504 | | - stmmac_exit_fs(ndev); |
---|
4505 | | -#endif |
---|
4506 | 5335 | stmmac_stop_all_dma(priv); |
---|
4507 | | - |
---|
4508 | 5336 | stmmac_mac_set(priv, priv->ioaddr, false); |
---|
4509 | 5337 | netif_carrier_off(ndev); |
---|
4510 | 5338 | unregister_netdev(ndev); |
---|
| 5339 | + |
---|
| 5340 | + /* Serdes power down needs to happen after VLAN filter |
---|
| 5341 | + * is deleted that is triggered by unregister_netdev(). |
---|
| 5342 | + */ |
---|
| 5343 | + if (priv->plat->serdes_powerdown) |
---|
| 5344 | + priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); |
---|
| 5345 | + |
---|
| 5346 | +#ifdef CONFIG_DEBUG_FS |
---|
| 5347 | + stmmac_exit_fs(ndev); |
---|
| 5348 | +#endif |
---|
| 5349 | + phylink_destroy(priv->phylink); |
---|
4511 | 5350 | if (priv->plat->stmmac_rst) |
---|
4512 | 5351 | reset_control_assert(priv->plat->stmmac_rst); |
---|
4513 | | - clk_disable_unprepare(priv->plat->pclk); |
---|
4514 | | - clk_disable_unprepare(priv->plat->stmmac_clk); |
---|
4515 | | - if (priv->hw->pcs != STMMAC_PCS_RGMII && |
---|
4516 | | - priv->hw->pcs != STMMAC_PCS_TBI && |
---|
| 5352 | + pm_runtime_put(dev); |
---|
| 5353 | + pm_runtime_disable(dev); |
---|
| 5354 | + if (priv->hw->pcs != STMMAC_PCS_TBI && |
---|
4517 | 5355 | priv->hw->pcs != STMMAC_PCS_RTBI) |
---|
4518 | 5356 | stmmac_mdio_unregister(ndev); |
---|
4519 | 5357 | destroy_workqueue(priv->wq); |
---|
4520 | 5358 | mutex_destroy(&priv->lock); |
---|
4521 | | - free_netdev(ndev); |
---|
4522 | 5359 | |
---|
4523 | 5360 | return 0; |
---|
4524 | 5361 | } |
---|
.. | .. |
---|
4540 | 5377 | if (!ndev || !netif_running(ndev)) |
---|
4541 | 5378 | return 0; |
---|
4542 | 5379 | |
---|
4543 | | - if (ndev->phydev) |
---|
4544 | | - phy_stop(ndev->phydev); |
---|
| 5380 | + phylink_mac_change(priv->phylink, false); |
---|
4545 | 5381 | |
---|
4546 | 5382 | mutex_lock(&priv->lock); |
---|
4547 | 5383 | |
---|
.. | .. |
---|
4560 | 5396 | /* Stop TX/RX DMA */ |
---|
4561 | 5397 | stmmac_stop_all_dma(priv); |
---|
4562 | 5398 | |
---|
| 5399 | + if (priv->plat->serdes_powerdown) |
---|
| 5400 | + priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); |
---|
| 5401 | + |
---|
4563 | 5402 | /* Enable Power down mode by programming the PMT regs */ |
---|
4564 | | - if (device_may_wakeup(priv->device)) { |
---|
| 5403 | + if (device_may_wakeup(priv->device) && priv->plat->pmt) { |
---|
4565 | 5404 | stmmac_pmt(priv, priv->hw, priv->wolopts); |
---|
4566 | 5405 | priv->irq_wake = 1; |
---|
4567 | 5406 | } else { |
---|
| 5407 | + mutex_unlock(&priv->lock); |
---|
| 5408 | + rtnl_lock(); |
---|
| 5409 | + if (device_may_wakeup(priv->device)) |
---|
| 5410 | + phylink_speed_down(priv->phylink, false); |
---|
4568 | 5411 | if (priv->plat->integrated_phy_power) |
---|
4569 | 5412 | priv->plat->integrated_phy_power(priv->plat->bsp_priv, |
---|
4570 | 5413 | false); |
---|
| 5414 | + phylink_stop(priv->phylink); |
---|
| 5415 | + rtnl_unlock(); |
---|
| 5416 | + mutex_lock(&priv->lock); |
---|
| 5417 | + |
---|
4571 | 5418 | stmmac_mac_set(priv, priv->ioaddr, false); |
---|
4572 | 5419 | pinctrl_pm_select_sleep_state(priv->device); |
---|
4573 | | - /* Disable clock in case of PWM is off */ |
---|
4574 | | - if (priv->plat->clk_ptp_ref && IS_ENABLED(CONFIG_STMMAC_PTP)) |
---|
4575 | | - clk_disable_unprepare(priv->plat->clk_ptp_ref); |
---|
4576 | | - clk_disable_unprepare(priv->plat->pclk); |
---|
4577 | | - clk_disable_unprepare(priv->plat->stmmac_clk); |
---|
4578 | 5420 | } |
---|
4579 | 5421 | mutex_unlock(&priv->lock); |
---|
4580 | 5422 | |
---|
4581 | | - priv->oldlink = false; |
---|
4582 | 5423 | priv->speed = SPEED_UNKNOWN; |
---|
4583 | | - priv->oldduplex = DUPLEX_UNKNOWN; |
---|
4584 | 5424 | return 0; |
---|
4585 | 5425 | } |
---|
4586 | 5426 | EXPORT_SYMBOL_GPL(stmmac_suspend); |
---|
4587 | 5427 | |
---|
4588 | 5428 | /** |
---|
4589 | 5429 | * stmmac_reset_queues_param - reset queue parameters |
---|
4590 | | - * @dev: device pointer |
---|
| 5430 | + * @priv: device pointer |
---|
4591 | 5431 | */ |
---|
4592 | 5432 | static void stmmac_reset_queues_param(struct stmmac_priv *priv) |
---|
4593 | 5433 | { |
---|
.. | .. |
---|
4623 | 5463 | { |
---|
4624 | 5464 | struct net_device *ndev = dev_get_drvdata(dev); |
---|
4625 | 5465 | struct stmmac_priv *priv = netdev_priv(ndev); |
---|
| 5466 | + int ret; |
---|
4626 | 5467 | |
---|
4627 | 5468 | if (!netif_running(ndev)) |
---|
4628 | 5469 | return 0; |
---|
.. | .. |
---|
4633 | 5474 | * this bit because it can generate problems while resuming |
---|
4634 | 5475 | * from another devices (e.g. serial console). |
---|
4635 | 5476 | */ |
---|
4636 | | - if (device_may_wakeup(priv->device)) { |
---|
| 5477 | + if (device_may_wakeup(priv->device) && priv->plat->pmt) { |
---|
4637 | 5478 | mutex_lock(&priv->lock); |
---|
4638 | 5479 | stmmac_pmt(priv, priv->hw, 0); |
---|
4639 | 5480 | mutex_unlock(&priv->lock); |
---|
4640 | 5481 | priv->irq_wake = 0; |
---|
4641 | 5482 | } else { |
---|
4642 | 5483 | pinctrl_pm_select_default_state(priv->device); |
---|
4643 | | - /* enable the clk previously disabled */ |
---|
4644 | | - clk_prepare_enable(priv->plat->stmmac_clk); |
---|
4645 | | - clk_prepare_enable(priv->plat->pclk); |
---|
4646 | | - if (priv->plat->clk_ptp_ref && IS_ENABLED(CONFIG_STMMAC_PTP)) |
---|
4647 | | - clk_prepare_enable(priv->plat->clk_ptp_ref); |
---|
4648 | 5484 | /* reset the phy so that it's ready */ |
---|
4649 | 5485 | if (priv->mii) |
---|
4650 | 5486 | stmmac_mdio_reset(priv->mii); |
---|
.. | .. |
---|
4653 | 5489 | true); |
---|
4654 | 5490 | } |
---|
4655 | 5491 | |
---|
| 5492 | + if (priv->plat->serdes_powerup) { |
---|
| 5493 | + ret = priv->plat->serdes_powerup(ndev, |
---|
| 5494 | + priv->plat->bsp_priv); |
---|
| 5495 | + |
---|
| 5496 | + if (ret < 0) |
---|
| 5497 | + return ret; |
---|
| 5498 | + } |
---|
| 5499 | + |
---|
| 5500 | + if (!device_may_wakeup(priv->device) || !priv->plat->pmt) { |
---|
| 5501 | + rtnl_lock(); |
---|
| 5502 | + phylink_start(priv->phylink); |
---|
| 5503 | + /* We may have called phylink_speed_down before */ |
---|
| 5504 | + phylink_speed_up(priv->phylink); |
---|
| 5505 | + rtnl_unlock(); |
---|
| 5506 | + } |
---|
| 5507 | + |
---|
| 5508 | + rtnl_lock(); |
---|
4656 | 5509 | mutex_lock(&priv->lock); |
---|
4657 | 5510 | |
---|
4658 | 5511 | stmmac_reset_queues_param(priv); |
---|
.. | .. |
---|
4660 | 5513 | stmmac_free_tx_skbufs(priv); |
---|
4661 | 5514 | stmmac_clear_descriptors(priv); |
---|
4662 | 5515 | |
---|
| 5516 | +#if 1 |
---|
| 5517 | + printk("ben -------resume add 2s delay time.\n"); |
---|
| 5518 | + mdelay(2000); |
---|
| 5519 | + |
---|
| 5520 | +#endif |
---|
| 5521 | + |
---|
4663 | 5522 | stmmac_hw_setup(ndev, false); |
---|
4664 | | - stmmac_init_tx_coalesce(priv); |
---|
| 5523 | + stmmac_init_coalesce(priv); |
---|
4665 | 5524 | stmmac_set_rx_mode(ndev); |
---|
| 5525 | + |
---|
| 5526 | + stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); |
---|
4666 | 5527 | |
---|
4667 | 5528 | stmmac_enable_all_queues(priv); |
---|
4668 | 5529 | |
---|
4669 | | - netif_device_attach(ndev); |
---|
4670 | | - |
---|
4671 | 5530 | mutex_unlock(&priv->lock); |
---|
| 5531 | + rtnl_unlock(); |
---|
4672 | 5532 | |
---|
4673 | | - if (ndev->phydev) |
---|
4674 | | - phy_start(ndev->phydev); |
---|
| 5533 | + phylink_mac_change(priv->phylink, true); |
---|
| 5534 | + |
---|
| 5535 | + netif_device_attach(ndev); |
---|
4675 | 5536 | |
---|
4676 | 5537 | return 0; |
---|
4677 | 5538 | } |
---|
.. | .. |
---|
4683 | 5544 | char *opt; |
---|
4684 | 5545 | |
---|
4685 | 5546 | if (!str || !*str) |
---|
4686 | | - return -EINVAL; |
---|
| 5547 | + return 1; |
---|
4687 | 5548 | while ((opt = strsep(&str, ",")) != NULL) { |
---|
4688 | 5549 | if (!strncmp(opt, "debug:", 6)) { |
---|
4689 | 5550 | if (kstrtoint(opt + 6, 0, &debug)) |
---|
.. | .. |
---|
4714 | 5575 | goto err; |
---|
4715 | 5576 | } |
---|
4716 | 5577 | } |
---|
4717 | | - return 0; |
---|
| 5578 | + return 1; |
---|
4718 | 5579 | |
---|
4719 | 5580 | err: |
---|
4720 | 5581 | pr_err("%s: ERROR broken module parameter conversion", __func__); |
---|
4721 | | - return -EINVAL; |
---|
| 5582 | + return 1; |
---|
4722 | 5583 | } |
---|
4723 | 5584 | |
---|
4724 | 5585 | __setup("stmmaceth=", stmmac_cmdline_opt); |
---|
.. | .. |
---|
4728 | 5589 | { |
---|
4729 | 5590 | #ifdef CONFIG_DEBUG_FS |
---|
4730 | 5591 | /* Create debugfs main directory if it doesn't exist yet */ |
---|
4731 | | - if (!stmmac_fs_dir) { |
---|
| 5592 | + if (!stmmac_fs_dir) |
---|
4732 | 5593 | stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); |
---|
4733 | | - |
---|
4734 | | - if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) { |
---|
4735 | | - pr_err("ERROR %s, debugfs create directory failed\n", |
---|
4736 | | - STMMAC_RESOURCE_NAME); |
---|
4737 | | - |
---|
4738 | | - return -ENOMEM; |
---|
4739 | | - } |
---|
4740 | | - } |
---|
| 5594 | + register_netdevice_notifier(&stmmac_notifier); |
---|
4741 | 5595 | #endif |
---|
4742 | 5596 | |
---|
4743 | 5597 | return 0; |
---|
.. | .. |
---|
4746 | 5600 | static void __exit stmmac_exit(void) |
---|
4747 | 5601 | { |
---|
4748 | 5602 | #ifdef CONFIG_DEBUG_FS |
---|
| 5603 | + unregister_netdevice_notifier(&stmmac_notifier); |
---|
4749 | 5604 | debugfs_remove_recursive(stmmac_fs_dir); |
---|
4750 | 5605 | #endif |
---|
4751 | 5606 | } |
---|