| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /******************************************************************************* |
|---|
| 2 | 3 | This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. |
|---|
| 3 | 4 | ST Ethernet IPs are built around a Synopsys IP Core. |
|---|
| 4 | 5 | |
|---|
| 5 | 6 | Copyright(C) 2007-2011 STMicroelectronics Ltd |
|---|
| 6 | 7 | |
|---|
| 7 | | - This program is free software; you can redistribute it and/or modify it |
|---|
| 8 | | - under the terms and conditions of the GNU General Public License, |
|---|
| 9 | | - version 2, as published by the Free Software Foundation. |
|---|
| 10 | | - |
|---|
| 11 | | - This program is distributed in the hope it will be useful, but WITHOUT |
|---|
| 12 | | - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|---|
| 13 | | - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
|---|
| 14 | | - more details. |
|---|
| 15 | | - |
|---|
| 16 | | - The full GNU General Public License is included in this distribution in |
|---|
| 17 | | - the file called "COPYING". |
|---|
| 18 | 8 | |
|---|
| 19 | 9 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> |
|---|
| 20 | 10 | |
|---|
| .. | .. |
|---|
| 38 | 28 | #include <linux/if_vlan.h> |
|---|
| 39 | 29 | #include <linux/dma-mapping.h> |
|---|
| 40 | 30 | #include <linux/slab.h> |
|---|
| 31 | +#include <linux/pm_runtime.h> |
|---|
| 41 | 32 | #include <linux/prefetch.h> |
|---|
| 42 | 33 | #include <linux/pinctrl/consumer.h> |
|---|
| 43 | 34 | #ifdef CONFIG_DEBUG_FS |
|---|
| .. | .. |
|---|
| 45 | 36 | #include <linux/seq_file.h> |
|---|
| 46 | 37 | #endif /* CONFIG_DEBUG_FS */ |
|---|
| 47 | 38 | #include <linux/net_tstamp.h> |
|---|
| 39 | +#include <linux/phylink.h> |
|---|
| 48 | 40 | #include <linux/udp.h> |
|---|
| 49 | 41 | #include <net/pkt_cls.h> |
|---|
| 50 | 42 | #include "stmmac_ptp.h" |
|---|
| .. | .. |
|---|
| 54 | 46 | #include "dwmac1000.h" |
|---|
| 55 | 47 | #include "dwxgmac2.h" |
|---|
| 56 | 48 | #include "hwif.h" |
|---|
| 49 | + |
|---|
| 50 | +/* As long as the interface is active, we keep the timestamping counter enabled |
|---|
| 51 | + * with fine resolution and binary rollover. This avoid non-monotonic behavior |
|---|
| 52 | + * (clock jumps) when changing timestamping settings at runtime. |
|---|
| 53 | + */ |
|---|
| 54 | +#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \ |
|---|
| 55 | + PTP_TCR_TSCTRLSSR) |
|---|
| 57 | 56 | |
|---|
| 58 | 57 | #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) |
|---|
| 59 | 58 | #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) |
|---|
| .. | .. |
|---|
| 72 | 71 | module_param(phyaddr, int, 0444); |
|---|
| 73 | 72 | MODULE_PARM_DESC(phyaddr, "Physical device address"); |
|---|
| 74 | 73 | |
|---|
| 75 | | -#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4) |
|---|
| 76 | | -#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4) |
|---|
| 74 | +#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4) |
|---|
| 75 | +#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4) |
|---|
| 77 | 76 | |
|---|
| 78 | | -static int flow_ctrl = FLOW_OFF; |
|---|
| 77 | +static int flow_ctrl = FLOW_AUTO; |
|---|
| 79 | 78 | module_param(flow_ctrl, int, 0644); |
|---|
| 80 | 79 | MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); |
|---|
| 81 | 80 | |
|---|
| .. | .. |
|---|
| 103 | 102 | static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; |
|---|
| 104 | 103 | module_param(eee_timer, int, 0644); |
|---|
| 105 | 104 | MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); |
|---|
| 106 | | -#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x)) |
|---|
| 105 | +#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x)) |
|---|
| 107 | 106 | |
|---|
| 108 | 107 | /* By default the driver will use the ring mode to manage tx and rx descriptors, |
|---|
| 109 | 108 | * but allow user to force to use the chain instead of the ring |
|---|
| .. | .. |
|---|
| 115 | 114 | static irqreturn_t stmmac_interrupt(int irq, void *dev_id); |
|---|
| 116 | 115 | |
|---|
| 117 | 116 | #ifdef CONFIG_DEBUG_FS |
|---|
| 118 | | -static int stmmac_init_fs(struct net_device *dev); |
|---|
| 117 | +static const struct net_device_ops stmmac_netdev_ops; |
|---|
| 118 | +static void stmmac_init_fs(struct net_device *dev); |
|---|
| 119 | 119 | static void stmmac_exit_fs(struct net_device *dev); |
|---|
| 120 | 120 | #endif |
|---|
| 121 | 121 | |
|---|
| 122 | 122 | #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x)) |
|---|
| 123 | + |
|---|
| 124 | +int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled) |
|---|
| 125 | +{ |
|---|
| 126 | + int ret = 0; |
|---|
| 127 | + |
|---|
| 128 | + if (enabled) { |
|---|
| 129 | + ret = clk_prepare_enable(priv->plat->stmmac_clk); |
|---|
| 130 | + if (ret) |
|---|
| 131 | + return ret; |
|---|
| 132 | + ret = clk_prepare_enable(priv->plat->pclk); |
|---|
| 133 | + if (ret) { |
|---|
| 134 | + clk_disable_unprepare(priv->plat->stmmac_clk); |
|---|
| 135 | + return ret; |
|---|
| 136 | + } |
|---|
| 137 | + } else { |
|---|
| 138 | + clk_disable_unprepare(priv->plat->stmmac_clk); |
|---|
| 139 | + clk_disable_unprepare(priv->plat->pclk); |
|---|
| 140 | + } |
|---|
| 141 | + |
|---|
| 142 | + return ret; |
|---|
| 143 | +} |
|---|
| 144 | +EXPORT_SYMBOL_GPL(stmmac_bus_clks_config); |
|---|
| 123 | 145 | |
|---|
| 124 | 146 | /** |
|---|
| 125 | 147 | * stmmac_verify_args - verify the driver parameters. |
|---|
| .. | .. |
|---|
| 156 | 178 | for (queue = 0; queue < maxq; queue++) { |
|---|
| 157 | 179 | struct stmmac_channel *ch = &priv->channel[queue]; |
|---|
| 158 | 180 | |
|---|
| 159 | | - napi_disable(&ch->napi); |
|---|
| 181 | + if (queue < rx_queues_cnt) |
|---|
| 182 | + napi_disable(&ch->rx_napi); |
|---|
| 183 | + if (queue < tx_queues_cnt) |
|---|
| 184 | + napi_disable(&ch->tx_napi); |
|---|
| 160 | 185 | } |
|---|
| 161 | 186 | } |
|---|
| 162 | 187 | |
|---|
| .. | .. |
|---|
| 174 | 199 | for (queue = 0; queue < maxq; queue++) { |
|---|
| 175 | 200 | struct stmmac_channel *ch = &priv->channel[queue]; |
|---|
| 176 | 201 | |
|---|
| 177 | | - napi_enable(&ch->napi); |
|---|
| 202 | + if (queue < rx_queues_cnt) |
|---|
| 203 | + napi_enable(&ch->rx_napi); |
|---|
| 204 | + if (queue < tx_queues_cnt) |
|---|
| 205 | + napi_enable(&ch->tx_napi); |
|---|
| 178 | 206 | } |
|---|
| 179 | 207 | } |
|---|
| 180 | 208 | |
|---|
| .. | .. |
|---|
| 273 | 301 | if (tx_q->dirty_tx > tx_q->cur_tx) |
|---|
| 274 | 302 | avail = tx_q->dirty_tx - tx_q->cur_tx - 1; |
|---|
| 275 | 303 | else |
|---|
| 276 | | - avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1; |
|---|
| 304 | + avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; |
|---|
| 277 | 305 | |
|---|
| 278 | 306 | return avail; |
|---|
| 279 | 307 | } |
|---|
| .. | .. |
|---|
| 291 | 319 | if (rx_q->dirty_rx <= rx_q->cur_rx) |
|---|
| 292 | 320 | dirty = rx_q->cur_rx - rx_q->dirty_rx; |
|---|
| 293 | 321 | else |
|---|
| 294 | | - dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx; |
|---|
| 322 | + dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; |
|---|
| 295 | 323 | |
|---|
| 296 | 324 | return dirty; |
|---|
| 297 | | -} |
|---|
| 298 | | - |
|---|
| 299 | | -/** |
|---|
| 300 | | - * stmmac_hw_fix_mac_speed - callback for speed selection |
|---|
| 301 | | - * @priv: driver private structure |
|---|
| 302 | | - * Description: on some platforms (e.g. ST), some HW system configuration |
|---|
| 303 | | - * registers have to be set according to the link speed negotiated. |
|---|
| 304 | | - */ |
|---|
| 305 | | -static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv) |
|---|
| 306 | | -{ |
|---|
| 307 | | - struct net_device *ndev = priv->dev; |
|---|
| 308 | | - struct phy_device *phydev = ndev->phydev; |
|---|
| 309 | | - |
|---|
| 310 | | - if (likely(priv->plat->fix_mac_speed)) |
|---|
| 311 | | - priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed); |
|---|
| 312 | 325 | } |
|---|
| 313 | 326 | |
|---|
| 314 | 327 | /** |
|---|
| .. | .. |
|---|
| 351 | 364 | |
|---|
| 352 | 365 | /** |
|---|
| 353 | 366 | * stmmac_eee_ctrl_timer - EEE TX SW timer. |
|---|
| 354 | | - * @arg : data hook |
|---|
| 367 | + * @t: timer_list struct containing private info |
|---|
| 355 | 368 | * Description: |
|---|
| 356 | 369 | * if there is no data transfer and if we are not in LPI state, |
|---|
| 357 | 370 | * then MAC Transmitter can be moved to LPI state. |
|---|
| .. | .. |
|---|
| 361 | 374 | struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); |
|---|
| 362 | 375 | |
|---|
| 363 | 376 | stmmac_enable_eee_mode(priv); |
|---|
| 364 | | - mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); |
|---|
| 377 | + mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); |
|---|
| 365 | 378 | } |
|---|
| 366 | 379 | |
|---|
| 367 | 380 | /** |
|---|
| .. | .. |
|---|
| 374 | 387 | */ |
|---|
| 375 | 388 | bool stmmac_eee_init(struct stmmac_priv *priv) |
|---|
| 376 | 389 | { |
|---|
| 377 | | - struct net_device *ndev = priv->dev; |
|---|
| 378 | | - int interface = priv->plat->interface; |
|---|
| 379 | | - bool ret = false; |
|---|
| 380 | | - |
|---|
| 381 | | - if ((interface != PHY_INTERFACE_MODE_MII) && |
|---|
| 382 | | - (interface != PHY_INTERFACE_MODE_GMII) && |
|---|
| 383 | | - !phy_interface_mode_is_rgmii(interface)) |
|---|
| 384 | | - goto out; |
|---|
| 390 | + int eee_tw_timer = priv->eee_tw_timer; |
|---|
| 385 | 391 | |
|---|
| 386 | 392 | /* Using PCS we cannot dial with the phy registers at this stage |
|---|
| 387 | 393 | * so we do not support extra feature like EEE. |
|---|
| 388 | 394 | */ |
|---|
| 389 | | - if ((priv->hw->pcs == STMMAC_PCS_RGMII) || |
|---|
| 390 | | - (priv->hw->pcs == STMMAC_PCS_TBI) || |
|---|
| 391 | | - (priv->hw->pcs == STMMAC_PCS_RTBI)) |
|---|
| 392 | | - goto out; |
|---|
| 395 | + if (priv->hw->pcs == STMMAC_PCS_TBI || |
|---|
| 396 | + priv->hw->pcs == STMMAC_PCS_RTBI) |
|---|
| 397 | + return false; |
|---|
| 393 | 398 | |
|---|
| 394 | | - /* MAC core supports the EEE feature. */ |
|---|
| 395 | | - if (priv->dma_cap.eee) { |
|---|
| 396 | | - int tx_lpi_timer = priv->tx_lpi_timer; |
|---|
| 399 | + /* Check if MAC core supports the EEE feature. */ |
|---|
| 400 | + if (!priv->dma_cap.eee) |
|---|
| 401 | + return false; |
|---|
| 397 | 402 | |
|---|
| 398 | | - /* Check if the PHY supports EEE */ |
|---|
| 399 | | - if (phy_init_eee(ndev->phydev, 1)) { |
|---|
| 400 | | - /* To manage at run-time if the EEE cannot be supported |
|---|
| 401 | | - * anymore (for example because the lp caps have been |
|---|
| 402 | | - * changed). |
|---|
| 403 | | - * In that case the driver disable own timers. |
|---|
| 404 | | - */ |
|---|
| 405 | | - mutex_lock(&priv->lock); |
|---|
| 406 | | - if (priv->eee_active) { |
|---|
| 407 | | - netdev_dbg(priv->dev, "disable EEE\n"); |
|---|
| 408 | | - del_timer_sync(&priv->eee_ctrl_timer); |
|---|
| 409 | | - stmmac_set_eee_timer(priv, priv->hw, 0, |
|---|
| 410 | | - tx_lpi_timer); |
|---|
| 411 | | - } |
|---|
| 412 | | - priv->eee_active = 0; |
|---|
| 413 | | - mutex_unlock(&priv->lock); |
|---|
| 414 | | - goto out; |
|---|
| 403 | + mutex_lock(&priv->lock); |
|---|
| 404 | + |
|---|
| 405 | + /* Check if it needs to be deactivated */ |
|---|
| 406 | + if (!priv->eee_active) { |
|---|
| 407 | + if (priv->eee_enabled) { |
|---|
| 408 | + netdev_dbg(priv->dev, "disable EEE\n"); |
|---|
| 409 | + del_timer_sync(&priv->eee_ctrl_timer); |
|---|
| 410 | + stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); |
|---|
| 415 | 411 | } |
|---|
| 416 | | - /* Activate the EEE and start timers */ |
|---|
| 417 | | - mutex_lock(&priv->lock); |
|---|
| 418 | | - if (!priv->eee_active) { |
|---|
| 419 | | - priv->eee_active = 1; |
|---|
| 420 | | - timer_setup(&priv->eee_ctrl_timer, |
|---|
| 421 | | - stmmac_eee_ctrl_timer, 0); |
|---|
| 422 | | - mod_timer(&priv->eee_ctrl_timer, |
|---|
| 423 | | - STMMAC_LPI_T(eee_timer)); |
|---|
| 424 | | - |
|---|
| 425 | | - stmmac_set_eee_timer(priv, priv->hw, |
|---|
| 426 | | - STMMAC_DEFAULT_LIT_LS, tx_lpi_timer); |
|---|
| 427 | | - } |
|---|
| 428 | | - /* Set HW EEE according to the speed */ |
|---|
| 429 | | - stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link); |
|---|
| 430 | | - |
|---|
| 431 | | - ret = true; |
|---|
| 432 | 412 | mutex_unlock(&priv->lock); |
|---|
| 433 | | - |
|---|
| 434 | | - netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); |
|---|
| 413 | + return false; |
|---|
| 435 | 414 | } |
|---|
| 436 | | -out: |
|---|
| 437 | | - return ret; |
|---|
| 415 | + |
|---|
| 416 | + if (priv->eee_active && !priv->eee_enabled) { |
|---|
| 417 | + timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); |
|---|
| 418 | + stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, |
|---|
| 419 | + eee_tw_timer); |
|---|
| 420 | + } |
|---|
| 421 | + |
|---|
| 422 | + mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); |
|---|
| 423 | + |
|---|
| 424 | + mutex_unlock(&priv->lock); |
|---|
| 425 | + netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); |
|---|
| 426 | + return true; |
|---|
| 438 | 427 | } |
|---|
| 439 | 428 | |
|---|
| 440 | 429 | /* stmmac_get_tx_hwtstamp - get HW TX timestamps |
|---|
| .. | .. |
|---|
| 449 | 438 | struct dma_desc *p, struct sk_buff *skb) |
|---|
| 450 | 439 | { |
|---|
| 451 | 440 | struct skb_shared_hwtstamps shhwtstamp; |
|---|
| 441 | + bool found = false; |
|---|
| 452 | 442 | u64 ns = 0; |
|---|
| 453 | 443 | |
|---|
| 454 | 444 | if (!priv->hwts_tx_en) |
|---|
| .. | .. |
|---|
| 460 | 450 | |
|---|
| 461 | 451 | /* check tx tstamp status */ |
|---|
| 462 | 452 | if (stmmac_get_tx_timestamp_status(priv, p)) { |
|---|
| 463 | | - /* get the valid tstamp */ |
|---|
| 464 | 453 | stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); |
|---|
| 454 | + found = true; |
|---|
| 455 | + } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { |
|---|
| 456 | + found = true; |
|---|
| 457 | + } |
|---|
| 465 | 458 | |
|---|
| 459 | + if (found) { |
|---|
| 466 | 460 | memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); |
|---|
| 467 | 461 | shhwtstamp.hwtstamp = ns_to_ktime(ns); |
|---|
| 468 | 462 | |
|---|
| .. | .. |
|---|
| 470 | 464 | /* pass tstamp to stack */ |
|---|
| 471 | 465 | skb_tstamp_tx(skb, &shhwtstamp); |
|---|
| 472 | 466 | } |
|---|
| 473 | | - |
|---|
| 474 | | - return; |
|---|
| 475 | 467 | } |
|---|
| 476 | 468 | |
|---|
| 477 | 469 | /* stmmac_get_rx_hwtstamp - get HW RX timestamps |
|---|
| .. | .. |
|---|
| 508 | 500 | } |
|---|
| 509 | 501 | } |
|---|
| 510 | 502 | |
|---|
| 511 | | -#ifdef CONFIG_STMMAC_PTP |
|---|
| 512 | 503 | /** |
|---|
| 513 | 504 | * stmmac_hwtstamp_set - control hardware timestamping. |
|---|
| 514 | 505 | * @dev: device pointer. |
|---|
| .. | .. |
|---|
| 524 | 515 | { |
|---|
| 525 | 516 | struct stmmac_priv *priv = netdev_priv(dev); |
|---|
| 526 | 517 | struct hwtstamp_config config; |
|---|
| 527 | | - struct timespec64 now; |
|---|
| 528 | | - u64 temp = 0; |
|---|
| 529 | 518 | u32 ptp_v2 = 0; |
|---|
| 530 | 519 | u32 tstamp_all = 0; |
|---|
| 531 | 520 | u32 ptp_over_ipv4_udp = 0; |
|---|
| .. | .. |
|---|
| 534 | 523 | u32 snap_type_sel = 0; |
|---|
| 535 | 524 | u32 ts_master_en = 0; |
|---|
| 536 | 525 | u32 ts_event_en = 0; |
|---|
| 537 | | - u32 sec_inc = 0; |
|---|
| 538 | | - u32 value = 0; |
|---|
| 539 | | - bool xmac; |
|---|
| 540 | | - |
|---|
| 541 | | - xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; |
|---|
| 542 | 526 | |
|---|
| 543 | 527 | if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { |
|---|
| 544 | 528 | netdev_alert(priv->dev, "No support for HW time stamping\n"); |
|---|
| .. | .. |
|---|
| 644 | 628 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; |
|---|
| 645 | 629 | ptp_v2 = PTP_TCR_TSVER2ENA; |
|---|
| 646 | 630 | snap_type_sel = PTP_TCR_SNAPTYPSEL_1; |
|---|
| 647 | | - ts_event_en = PTP_TCR_TSEVNTENA; |
|---|
| 631 | + if (priv->synopsys_id < DWMAC_CORE_4_10) |
|---|
| 632 | + ts_event_en = PTP_TCR_TSEVNTENA; |
|---|
| 648 | 633 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; |
|---|
| 649 | 634 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; |
|---|
| 650 | 635 | ptp_over_ethernet = PTP_TCR_TSIPENA; |
|---|
| .. | .. |
|---|
| 699 | 684 | priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); |
|---|
| 700 | 685 | priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; |
|---|
| 701 | 686 | |
|---|
| 702 | | - if (!priv->hwts_tx_en && !priv->hwts_rx_en) |
|---|
| 703 | | - stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0); |
|---|
| 704 | | - else { |
|---|
| 705 | | - value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | |
|---|
| 706 | | - tstamp_all | ptp_v2 | ptp_over_ethernet | |
|---|
| 707 | | - ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | |
|---|
| 708 | | - ts_master_en | snap_type_sel); |
|---|
| 709 | | - stmmac_config_hw_tstamping(priv, priv->ptpaddr, value); |
|---|
| 687 | + priv->systime_flags = STMMAC_HWTS_ACTIVE; |
|---|
| 710 | 688 | |
|---|
| 711 | | - /* program Sub Second Increment reg */ |
|---|
| 712 | | - stmmac_config_sub_second_increment(priv, |
|---|
| 713 | | - priv->ptpaddr, priv->plat->clk_ptp_rate, |
|---|
| 714 | | - xmac, &sec_inc); |
|---|
| 715 | | - temp = div_u64(1000000000ULL, sec_inc); |
|---|
| 716 | | - |
|---|
| 717 | | - /* Store sub second increment and flags for later use */ |
|---|
| 718 | | - priv->sub_second_inc = sec_inc; |
|---|
| 719 | | - priv->systime_flags = value; |
|---|
| 720 | | - |
|---|
| 721 | | - /* calculate default added value: |
|---|
| 722 | | - * formula is : |
|---|
| 723 | | - * addend = (2^32)/freq_div_ratio; |
|---|
| 724 | | - * where, freq_div_ratio = 1e9ns/sec_inc |
|---|
| 725 | | - */ |
|---|
| 726 | | - temp = (u64)(temp << 32); |
|---|
| 727 | | - priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); |
|---|
| 728 | | - stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); |
|---|
| 729 | | - |
|---|
| 730 | | - /* initialize system time */ |
|---|
| 731 | | - ktime_get_real_ts64(&now); |
|---|
| 732 | | - |
|---|
| 733 | | - /* lower 32 bits of tv_sec are safe until y2106 */ |
|---|
| 734 | | - stmmac_init_systime(priv, priv->ptpaddr, |
|---|
| 735 | | - (u32)now.tv_sec, now.tv_nsec); |
|---|
| 689 | + if (priv->hwts_tx_en || priv->hwts_rx_en) { |
|---|
| 690 | + priv->systime_flags |= tstamp_all | ptp_v2 | |
|---|
| 691 | + ptp_over_ethernet | ptp_over_ipv6_udp | |
|---|
| 692 | + ptp_over_ipv4_udp | ts_event_en | |
|---|
| 693 | + ts_master_en | snap_type_sel; |
|---|
| 736 | 694 | } |
|---|
| 695 | + |
|---|
| 696 | + stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags); |
|---|
| 737 | 697 | |
|---|
| 738 | 698 | memcpy(&priv->tstamp_config, &config, sizeof(config)); |
|---|
| 739 | 699 | |
|---|
| .. | .. |
|---|
| 748 | 708 | * a proprietary structure used to pass information to the driver. |
|---|
| 749 | 709 | * Description: |
|---|
| 750 | 710 | * This function obtain the current hardware timestamping settings |
|---|
| 751 | | - as requested. |
|---|
| 711 | + * as requested. |
|---|
| 752 | 712 | */ |
|---|
| 753 | 713 | static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) |
|---|
| 754 | 714 | { |
|---|
| .. | .. |
|---|
| 761 | 721 | return copy_to_user(ifr->ifr_data, config, |
|---|
| 762 | 722 | sizeof(*config)) ? -EFAULT : 0; |
|---|
| 763 | 723 | } |
|---|
| 764 | | -#endif /* CONFIG_STMMAC_PTP */ |
|---|
| 724 | + |
|---|
| 725 | +/** |
|---|
| 726 | + * stmmac_init_tstamp_counter - init hardware timestamping counter |
|---|
| 727 | + * @priv: driver private structure |
|---|
| 728 | + * @systime_flags: timestamping flags |
|---|
| 729 | + * Description: |
|---|
| 730 | + * Initialize hardware counter for packet timestamping. |
|---|
| 731 | + * This is valid as long as the interface is open and not suspended. |
|---|
| 732 | + * Will be rerun after resuming from suspend, case in which the timestamping |
|---|
| 733 | + * flags updated by stmmac_hwtstamp_set() also need to be restored. |
|---|
| 734 | + */ |
|---|
| 735 | +int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags) |
|---|
| 736 | +{ |
|---|
| 737 | + bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; |
|---|
| 738 | + struct timespec64 now; |
|---|
| 739 | + u32 sec_inc = 0; |
|---|
| 740 | + u64 temp = 0; |
|---|
| 741 | + |
|---|
| 742 | + if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) |
|---|
| 743 | + return -EOPNOTSUPP; |
|---|
| 744 | + |
|---|
| 745 | + stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); |
|---|
| 746 | + priv->systime_flags = systime_flags; |
|---|
| 747 | + |
|---|
| 748 | + /* program Sub Second Increment reg */ |
|---|
| 749 | + stmmac_config_sub_second_increment(priv, priv->ptpaddr, |
|---|
| 750 | + priv->plat->clk_ptp_rate, |
|---|
| 751 | + xmac, &sec_inc); |
|---|
| 752 | + temp = div_u64(1000000000ULL, sec_inc); |
|---|
| 753 | + |
|---|
| 754 | + /* Store sub second increment for later use */ |
|---|
| 755 | + priv->sub_second_inc = sec_inc; |
|---|
| 756 | + |
|---|
| 757 | + /* calculate default added value: |
|---|
| 758 | + * formula is : |
|---|
| 759 | + * addend = (2^32)/freq_div_ratio; |
|---|
| 760 | + * where, freq_div_ratio = 1e9ns/sec_inc |
|---|
| 761 | + */ |
|---|
| 762 | + temp = (u64)(temp << 32); |
|---|
| 763 | + priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); |
|---|
| 764 | + stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); |
|---|
| 765 | + |
|---|
| 766 | + /* initialize system time */ |
|---|
| 767 | + ktime_get_real_ts64(&now); |
|---|
| 768 | + |
|---|
| 769 | + /* lower 32 bits of tv_sec are safe until y2106 */ |
|---|
| 770 | + stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec); |
|---|
| 771 | + |
|---|
| 772 | + return 0; |
|---|
| 773 | +} |
|---|
| 774 | +EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter); |
|---|
| 765 | 775 | |
|---|
| 766 | 776 | /** |
|---|
| 767 | 777 | * stmmac_init_ptp - init PTP |
|---|
| .. | .. |
|---|
| 773 | 783 | static int stmmac_init_ptp(struct stmmac_priv *priv) |
|---|
| 774 | 784 | { |
|---|
| 775 | 785 | bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; |
|---|
| 786 | + int ret; |
|---|
| 776 | 787 | |
|---|
| 777 | | - if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) |
|---|
| 778 | | - return -EOPNOTSUPP; |
|---|
| 788 | + ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE); |
|---|
| 789 | + if (ret) |
|---|
| 790 | + return ret; |
|---|
| 779 | 791 | |
|---|
| 780 | 792 | priv->adv_ts = 0; |
|---|
| 781 | 793 | /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */ |
|---|
| .. | .. |
|---|
| 795 | 807 | priv->hwts_tx_en = 0; |
|---|
| 796 | 808 | priv->hwts_rx_en = 0; |
|---|
| 797 | 809 | |
|---|
| 798 | | - stmmac_ptp_register(priv); |
|---|
| 799 | | - |
|---|
| 800 | 810 | return 0; |
|---|
| 801 | 811 | } |
|---|
| 802 | 812 | |
|---|
| 803 | 813 | static void stmmac_release_ptp(struct stmmac_priv *priv) |
|---|
| 804 | 814 | { |
|---|
| 805 | | - if (priv->plat->clk_ptp_ref && IS_ENABLED(CONFIG_STMMAC_PTP)) |
|---|
| 806 | | - clk_disable_unprepare(priv->plat->clk_ptp_ref); |
|---|
| 815 | + clk_disable_unprepare(priv->plat->clk_ptp_ref); |
|---|
| 807 | 816 | stmmac_ptp_unregister(priv); |
|---|
| 808 | 817 | } |
|---|
| 809 | 818 | |
|---|
| 810 | 819 | /** |
|---|
| 811 | 820 | * stmmac_mac_flow_ctrl - Configure flow control in all queues |
|---|
| 812 | 821 | * @priv: driver private structure |
|---|
| 822 | + * @duplex: duplex passed to the next function |
|---|
| 813 | 823 | * Description: It is used for configuring the flow control in all queues |
|---|
| 814 | 824 | */ |
|---|
| 815 | 825 | static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) |
|---|
| 816 | 826 | { |
|---|
| 817 | 827 | u32 tx_cnt = priv->plat->tx_queues_to_use; |
|---|
| 818 | 828 | |
|---|
| 819 | | - stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, |
|---|
| 820 | | - priv->pause, tx_cnt); |
|---|
| 829 | + stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl & priv->plat->flow_ctrl, |
|---|
| 830 | + priv->pause, tx_cnt); |
|---|
| 821 | 831 | } |
|---|
| 822 | 832 | |
|---|
| 823 | | -/** |
|---|
| 824 | | - * stmmac_adjust_link - adjusts the link parameters |
|---|
| 825 | | - * @dev: net device structure |
|---|
| 826 | | - * Description: this is the helper called by the physical abstraction layer |
|---|
| 827 | | - * drivers to communicate the phy link status. According the speed and duplex |
|---|
| 828 | | - * this driver can invoke registered glue-logic as well. |
|---|
| 829 | | - * It also invoke the eee initialization because it could happen when switch |
|---|
| 830 | | - * on different networks (that are eee capable). |
|---|
| 831 | | - */ |
|---|
| 832 | | -static void stmmac_adjust_link(struct net_device *dev) |
|---|
| 833 | +static void stmmac_validate(struct phylink_config *config, |
|---|
| 834 | + unsigned long *supported, |
|---|
| 835 | + struct phylink_link_state *state) |
|---|
| 833 | 836 | { |
|---|
| 834 | | - struct stmmac_priv *priv = netdev_priv(dev); |
|---|
| 835 | | - struct phy_device *phydev = dev->phydev; |
|---|
| 836 | | - bool new_state = false; |
|---|
| 837 | + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); |
|---|
| 838 | + __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, }; |
|---|
| 839 | + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; |
|---|
| 840 | + int tx_cnt = priv->plat->tx_queues_to_use; |
|---|
| 841 | + int max_speed = priv->plat->max_speed; |
|---|
| 837 | 842 | |
|---|
| 838 | | - if (!phydev) |
|---|
| 839 | | - return; |
|---|
| 843 | + phylink_set(mac_supported, 10baseT_Half); |
|---|
| 844 | + phylink_set(mac_supported, 10baseT_Full); |
|---|
| 845 | + phylink_set(mac_supported, 100baseT_Half); |
|---|
| 846 | + phylink_set(mac_supported, 100baseT_Full); |
|---|
| 847 | + phylink_set(mac_supported, 1000baseT_Half); |
|---|
| 848 | + phylink_set(mac_supported, 1000baseT_Full); |
|---|
| 849 | + phylink_set(mac_supported, 1000baseKX_Full); |
|---|
| 850 | + phylink_set(mac_supported, 100baseT1_Full); |
|---|
| 851 | + phylink_set(mac_supported, 1000baseT1_Full); |
|---|
| 840 | 852 | |
|---|
| 841 | | - mutex_lock(&priv->lock); |
|---|
| 853 | + phylink_set(mac_supported, Autoneg); |
|---|
| 854 | + phylink_set(mac_supported, Pause); |
|---|
| 855 | + phylink_set(mac_supported, Asym_Pause); |
|---|
| 856 | + phylink_set_port_modes(mac_supported); |
|---|
| 842 | 857 | |
|---|
| 843 | | - if (phydev->link) { |
|---|
| 844 | | - u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG); |
|---|
| 845 | | - |
|---|
| 846 | | - /* Now we make sure that we can be in full duplex mode. |
|---|
| 847 | | - * If not, we operate in half-duplex mode. */ |
|---|
| 848 | | - if (phydev->duplex != priv->oldduplex) { |
|---|
| 849 | | - new_state = true; |
|---|
| 850 | | - if (!phydev->duplex) |
|---|
| 851 | | - ctrl &= ~priv->hw->link.duplex; |
|---|
| 852 | | - else |
|---|
| 853 | | - ctrl |= priv->hw->link.duplex; |
|---|
| 854 | | - priv->oldduplex = phydev->duplex; |
|---|
| 858 | + /* Cut down 1G if asked to */ |
|---|
| 859 | + if ((max_speed > 0) && (max_speed < 1000)) { |
|---|
| 860 | + phylink_set(mask, 1000baseT_Full); |
|---|
| 861 | + phylink_set(mask, 1000baseX_Full); |
|---|
| 862 | + } else if (priv->plat->has_xgmac) { |
|---|
| 863 | + if (!max_speed || (max_speed >= 2500)) { |
|---|
| 864 | + phylink_set(mac_supported, 2500baseT_Full); |
|---|
| 865 | + phylink_set(mac_supported, 2500baseX_Full); |
|---|
| 855 | 866 | } |
|---|
| 856 | | - /* Flow Control operation */ |
|---|
| 857 | | - if (phydev->pause) |
|---|
| 858 | | - stmmac_mac_flow_ctrl(priv, phydev->duplex); |
|---|
| 859 | | - |
|---|
| 860 | | - if (phydev->speed != priv->speed) { |
|---|
| 861 | | - new_state = true; |
|---|
| 862 | | - ctrl &= ~priv->hw->link.speed_mask; |
|---|
| 863 | | - switch (phydev->speed) { |
|---|
| 864 | | - case SPEED_1000: |
|---|
| 865 | | - ctrl |= priv->hw->link.speed1000; |
|---|
| 866 | | - break; |
|---|
| 867 | | - case SPEED_100: |
|---|
| 868 | | - ctrl |= priv->hw->link.speed100; |
|---|
| 869 | | - break; |
|---|
| 870 | | - case SPEED_10: |
|---|
| 871 | | - ctrl |= priv->hw->link.speed10; |
|---|
| 872 | | - break; |
|---|
| 873 | | - default: |
|---|
| 874 | | - netif_warn(priv, link, priv->dev, |
|---|
| 875 | | - "broken speed: %d\n", phydev->speed); |
|---|
| 876 | | - phydev->speed = SPEED_UNKNOWN; |
|---|
| 877 | | - break; |
|---|
| 878 | | - } |
|---|
| 879 | | - if (phydev->speed != SPEED_UNKNOWN) |
|---|
| 880 | | - stmmac_hw_fix_mac_speed(priv); |
|---|
| 881 | | - priv->speed = phydev->speed; |
|---|
| 867 | + if (!max_speed || (max_speed >= 5000)) { |
|---|
| 868 | + phylink_set(mac_supported, 5000baseT_Full); |
|---|
| 882 | 869 | } |
|---|
| 883 | | - |
|---|
| 884 | | - writel(ctrl, priv->ioaddr + MAC_CTRL_REG); |
|---|
| 885 | | - |
|---|
| 886 | | - if (!priv->oldlink) { |
|---|
| 887 | | - new_state = true; |
|---|
| 888 | | - priv->oldlink = true; |
|---|
| 870 | + if (!max_speed || (max_speed >= 10000)) { |
|---|
| 871 | + phylink_set(mac_supported, 10000baseSR_Full); |
|---|
| 872 | + phylink_set(mac_supported, 10000baseLR_Full); |
|---|
| 873 | + phylink_set(mac_supported, 10000baseER_Full); |
|---|
| 874 | + phylink_set(mac_supported, 10000baseLRM_Full); |
|---|
| 875 | + phylink_set(mac_supported, 10000baseT_Full); |
|---|
| 876 | + phylink_set(mac_supported, 10000baseKX4_Full); |
|---|
| 877 | + phylink_set(mac_supported, 10000baseKR_Full); |
|---|
| 889 | 878 | } |
|---|
| 890 | | - } else if (priv->oldlink) { |
|---|
| 891 | | - new_state = true; |
|---|
| 892 | | - priv->oldlink = false; |
|---|
| 893 | | - priv->speed = SPEED_UNKNOWN; |
|---|
| 894 | | - priv->oldduplex = DUPLEX_UNKNOWN; |
|---|
| 879 | + if (!max_speed || (max_speed >= 25000)) { |
|---|
| 880 | + phylink_set(mac_supported, 25000baseCR_Full); |
|---|
| 881 | + phylink_set(mac_supported, 25000baseKR_Full); |
|---|
| 882 | + phylink_set(mac_supported, 25000baseSR_Full); |
|---|
| 883 | + } |
|---|
| 884 | + if (!max_speed || (max_speed >= 40000)) { |
|---|
| 885 | + phylink_set(mac_supported, 40000baseKR4_Full); |
|---|
| 886 | + phylink_set(mac_supported, 40000baseCR4_Full); |
|---|
| 887 | + phylink_set(mac_supported, 40000baseSR4_Full); |
|---|
| 888 | + phylink_set(mac_supported, 40000baseLR4_Full); |
|---|
| 889 | + } |
|---|
| 890 | + if (!max_speed || (max_speed >= 50000)) { |
|---|
| 891 | + phylink_set(mac_supported, 50000baseCR2_Full); |
|---|
| 892 | + phylink_set(mac_supported, 50000baseKR2_Full); |
|---|
| 893 | + phylink_set(mac_supported, 50000baseSR2_Full); |
|---|
| 894 | + phylink_set(mac_supported, 50000baseKR_Full); |
|---|
| 895 | + phylink_set(mac_supported, 50000baseSR_Full); |
|---|
| 896 | + phylink_set(mac_supported, 50000baseCR_Full); |
|---|
| 897 | + phylink_set(mac_supported, 50000baseLR_ER_FR_Full); |
|---|
| 898 | + phylink_set(mac_supported, 50000baseDR_Full); |
|---|
| 899 | + } |
|---|
| 900 | + if (!max_speed || (max_speed >= 100000)) { |
|---|
| 901 | + phylink_set(mac_supported, 100000baseKR4_Full); |
|---|
| 902 | + phylink_set(mac_supported, 100000baseSR4_Full); |
|---|
| 903 | + phylink_set(mac_supported, 100000baseCR4_Full); |
|---|
| 904 | + phylink_set(mac_supported, 100000baseLR4_ER4_Full); |
|---|
| 905 | + phylink_set(mac_supported, 100000baseKR2_Full); |
|---|
| 906 | + phylink_set(mac_supported, 100000baseSR2_Full); |
|---|
| 907 | + phylink_set(mac_supported, 100000baseCR2_Full); |
|---|
| 908 | + phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full); |
|---|
| 909 | + phylink_set(mac_supported, 100000baseDR2_Full); |
|---|
| 910 | + } |
|---|
| 895 | 911 | } |
|---|
| 896 | 912 | |
|---|
| 897 | | - if (new_state && netif_msg_link(priv)) |
|---|
| 898 | | - phy_print_status(phydev); |
|---|
| 913 | + /* Half-Duplex can only work with single queue */ |
|---|
| 914 | + if (tx_cnt > 1) { |
|---|
| 915 | + phylink_set(mask, 10baseT_Half); |
|---|
| 916 | + phylink_set(mask, 100baseT_Half); |
|---|
| 917 | + phylink_set(mask, 1000baseT_Half); |
|---|
| 918 | + } |
|---|
| 899 | 919 | |
|---|
| 900 | | - mutex_unlock(&priv->lock); |
|---|
| 920 | + linkmode_and(supported, supported, mac_supported); |
|---|
| 921 | + linkmode_andnot(supported, supported, mask); |
|---|
| 901 | 922 | |
|---|
| 902 | | - if (phydev->is_pseudo_fixed_link) |
|---|
| 903 | | - /* Stop PHY layer to call the hook to adjust the link in case |
|---|
| 904 | | - * of a switch is attached to the stmmac driver. |
|---|
| 905 | | - */ |
|---|
| 906 | | - phydev->irq = PHY_IGNORE_INTERRUPT; |
|---|
| 907 | | - else |
|---|
| 908 | | - /* At this stage, init the EEE if supported. |
|---|
| 909 | | - * Never called in case of fixed_link. |
|---|
| 910 | | - */ |
|---|
| 911 | | - priv->eee_enabled = stmmac_eee_init(priv); |
|---|
| 923 | + linkmode_and(state->advertising, state->advertising, mac_supported); |
|---|
| 924 | + linkmode_andnot(state->advertising, state->advertising, mask); |
|---|
| 925 | + |
|---|
| 926 | + /* If PCS is supported, check which modes it supports. */ |
|---|
| 927 | + stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state); |
|---|
| 912 | 928 | } |
|---|
| 929 | + |
|---|
| 930 | +static void stmmac_mac_pcs_get_state(struct phylink_config *config, |
|---|
| 931 | + struct phylink_link_state *state) |
|---|
| 932 | +{ |
|---|
| 933 | + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); |
|---|
| 934 | + |
|---|
| 935 | + state->link = 0; |
|---|
| 936 | + stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state); |
|---|
| 937 | +} |
|---|
| 938 | + |
|---|
| 939 | +static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, |
|---|
| 940 | + const struct phylink_link_state *state) |
|---|
| 941 | +{ |
|---|
| 942 | + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); |
|---|
| 943 | + |
|---|
| 944 | + stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state); |
|---|
| 945 | +} |
|---|
| 946 | + |
|---|
| 947 | +static void stmmac_mac_an_restart(struct phylink_config *config) |
|---|
| 948 | +{ |
|---|
| 949 | + /* Not Supported */ |
|---|
| 950 | +} |
|---|
| 951 | + |
|---|
| 952 | +static void stmmac_mac_link_down(struct phylink_config *config, |
|---|
| 953 | + unsigned int mode, phy_interface_t interface) |
|---|
| 954 | +{ |
|---|
| 955 | + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); |
|---|
| 956 | + |
|---|
| 957 | + stmmac_mac_set(priv, priv->ioaddr, false); |
|---|
| 958 | + priv->eee_active = false; |
|---|
| 959 | + priv->tx_lpi_enabled = false; |
|---|
| 960 | + stmmac_eee_init(priv); |
|---|
| 961 | + stmmac_set_eee_pls(priv, priv->hw, false); |
|---|
| 962 | +} |
|---|
| 963 | + |
|---|
| 964 | +static void stmmac_mac_link_up(struct phylink_config *config, |
|---|
| 965 | + struct phy_device *phy, |
|---|
| 966 | + unsigned int mode, phy_interface_t interface, |
|---|
| 967 | + int speed, int duplex, |
|---|
| 968 | + bool tx_pause, bool rx_pause) |
|---|
| 969 | +{ |
|---|
| 970 | + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); |
|---|
| 971 | + u32 ctrl; |
|---|
| 972 | + |
|---|
| 973 | + stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface); |
|---|
| 974 | + |
|---|
| 975 | + ctrl = readl(priv->ioaddr + MAC_CTRL_REG); |
|---|
| 976 | + ctrl &= ~priv->hw->link.speed_mask; |
|---|
| 977 | + |
|---|
| 978 | + if (interface == PHY_INTERFACE_MODE_USXGMII) { |
|---|
| 979 | + switch (speed) { |
|---|
| 980 | + case SPEED_10000: |
|---|
| 981 | + ctrl |= priv->hw->link.xgmii.speed10000; |
|---|
| 982 | + break; |
|---|
| 983 | + case SPEED_5000: |
|---|
| 984 | + ctrl |= priv->hw->link.xgmii.speed5000; |
|---|
| 985 | + break; |
|---|
| 986 | + case SPEED_2500: |
|---|
| 987 | + ctrl |= priv->hw->link.xgmii.speed2500; |
|---|
| 988 | + break; |
|---|
| 989 | + default: |
|---|
| 990 | + return; |
|---|
| 991 | + } |
|---|
| 992 | + } else if (interface == PHY_INTERFACE_MODE_XLGMII) { |
|---|
| 993 | + switch (speed) { |
|---|
| 994 | + case SPEED_100000: |
|---|
| 995 | + ctrl |= priv->hw->link.xlgmii.speed100000; |
|---|
| 996 | + break; |
|---|
| 997 | + case SPEED_50000: |
|---|
| 998 | + ctrl |= priv->hw->link.xlgmii.speed50000; |
|---|
| 999 | + break; |
|---|
| 1000 | + case SPEED_40000: |
|---|
| 1001 | + ctrl |= priv->hw->link.xlgmii.speed40000; |
|---|
| 1002 | + break; |
|---|
| 1003 | + case SPEED_25000: |
|---|
| 1004 | + ctrl |= priv->hw->link.xlgmii.speed25000; |
|---|
| 1005 | + break; |
|---|
| 1006 | + case SPEED_10000: |
|---|
| 1007 | + ctrl |= priv->hw->link.xgmii.speed10000; |
|---|
| 1008 | + break; |
|---|
| 1009 | + case SPEED_2500: |
|---|
| 1010 | + ctrl |= priv->hw->link.speed2500; |
|---|
| 1011 | + break; |
|---|
| 1012 | + case SPEED_1000: |
|---|
| 1013 | + ctrl |= priv->hw->link.speed1000; |
|---|
| 1014 | + break; |
|---|
| 1015 | + default: |
|---|
| 1016 | + return; |
|---|
| 1017 | + } |
|---|
| 1018 | + } else { |
|---|
| 1019 | + switch (speed) { |
|---|
| 1020 | + case SPEED_2500: |
|---|
| 1021 | + ctrl |= priv->hw->link.speed2500; |
|---|
| 1022 | + break; |
|---|
| 1023 | + case SPEED_1000: |
|---|
| 1024 | + ctrl |= priv->hw->link.speed1000; |
|---|
| 1025 | + break; |
|---|
| 1026 | + case SPEED_100: |
|---|
| 1027 | + ctrl |= priv->hw->link.speed100; |
|---|
| 1028 | + break; |
|---|
| 1029 | + case SPEED_10: |
|---|
| 1030 | + ctrl |= priv->hw->link.speed10; |
|---|
| 1031 | + break; |
|---|
| 1032 | + default: |
|---|
| 1033 | + return; |
|---|
| 1034 | + } |
|---|
| 1035 | + } |
|---|
| 1036 | + |
|---|
| 1037 | + priv->speed = speed; |
|---|
| 1038 | + |
|---|
| 1039 | + if (priv->plat->fix_mac_speed) |
|---|
| 1040 | + priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed); |
|---|
| 1041 | + |
|---|
| 1042 | + if (!duplex) |
|---|
| 1043 | + ctrl &= ~priv->hw->link.duplex; |
|---|
| 1044 | + else |
|---|
| 1045 | + ctrl |= priv->hw->link.duplex; |
|---|
| 1046 | + |
|---|
| 1047 | + /* Flow Control operation */ |
|---|
| 1048 | + if (rx_pause && tx_pause) |
|---|
| 1049 | + priv->flow_ctrl = FLOW_AUTO; |
|---|
| 1050 | + else if (rx_pause && !tx_pause) |
|---|
| 1051 | + priv->flow_ctrl = FLOW_RX; |
|---|
| 1052 | + else if (!rx_pause && tx_pause) |
|---|
| 1053 | + priv->flow_ctrl = FLOW_TX; |
|---|
| 1054 | + else |
|---|
| 1055 | + priv->flow_ctrl = FLOW_OFF; |
|---|
| 1056 | + |
|---|
| 1057 | + stmmac_mac_flow_ctrl(priv, duplex); |
|---|
| 1058 | + |
|---|
| 1059 | + writel(ctrl, priv->ioaddr + MAC_CTRL_REG); |
|---|
| 1060 | + |
|---|
| 1061 | + stmmac_mac_set(priv, priv->ioaddr, true); |
|---|
| 1062 | + if (phy && priv->dma_cap.eee) { |
|---|
| 1063 | + priv->eee_active = phy_init_eee(phy, 1) >= 0; |
|---|
| 1064 | + priv->eee_enabled = stmmac_eee_init(priv); |
|---|
| 1065 | + priv->tx_lpi_enabled = priv->eee_enabled; |
|---|
| 1066 | + stmmac_set_eee_pls(priv, priv->hw, true); |
|---|
| 1067 | + } |
|---|
| 1068 | +} |
|---|
| 1069 | + |
|---|
| 1070 | +static const struct phylink_mac_ops stmmac_phylink_mac_ops = { |
|---|
| 1071 | + .validate = stmmac_validate, |
|---|
| 1072 | + .mac_pcs_get_state = stmmac_mac_pcs_get_state, |
|---|
| 1073 | + .mac_config = stmmac_mac_config, |
|---|
| 1074 | + .mac_an_restart = stmmac_mac_an_restart, |
|---|
| 1075 | + .mac_link_down = stmmac_mac_link_down, |
|---|
| 1076 | + .mac_link_up = stmmac_mac_link_up, |
|---|
| 1077 | +}; |
|---|
| 913 | 1078 | |
|---|
| 914 | 1079 | /** |
|---|
| 915 | 1080 | * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported |
|---|
| .. | .. |
|---|
| 936 | 1101 | } |
|---|
| 937 | 1102 | } |
|---|
| 938 | 1103 | |
|---|
| 1104 | +static void rtl8211F_led_control(struct phy_device *phydev) |
|---|
| 1105 | +{ |
|---|
| 1106 | + printk("ben debug:rtl8211F_led_control...1 \n"); |
|---|
| 1107 | + |
|---|
| 1108 | + if(!phydev) return; |
|---|
| 1109 | + if(phydev->phy_id!=0x001cc916) return; /* only for 8211E*/ |
|---|
| 1110 | + |
|---|
| 1111 | + /*switch to extension page44*/ |
|---|
| 1112 | + phy_write(phydev, 31, 0x0d04); |
|---|
| 1113 | +//add hc 1000M --> orange |
|---|
| 1114 | +// 100M --> green |
|---|
| 1115 | + phy_write(phydev, 16, 0x6D02); |
|---|
| 1116 | +//add hc 1000M&100M --> green |
|---|
| 1117 | +// phy_write(phydev, 16, 0x6C0A); |
|---|
| 1118 | + printk("ben debug:rtl8211F_led_control...2 \n"); |
|---|
| 1119 | +} |
|---|
| 1120 | + |
|---|
| 939 | 1121 | /** |
|---|
| 940 | 1122 | * stmmac_init_phy - PHY initialization |
|---|
| 941 | 1123 | * @dev: net device structure |
|---|
| .. | .. |
|---|
| 947 | 1129 | static int stmmac_init_phy(struct net_device *dev) |
|---|
| 948 | 1130 | { |
|---|
| 949 | 1131 | struct stmmac_priv *priv = netdev_priv(dev); |
|---|
| 950 | | - u32 tx_cnt = priv->plat->tx_queues_to_use; |
|---|
| 951 | | - struct phy_device *phydev; |
|---|
| 952 | | - char phy_id_fmt[MII_BUS_ID_SIZE + 3]; |
|---|
| 953 | | - char bus_id[MII_BUS_ID_SIZE]; |
|---|
| 954 | | - int interface = priv->plat->interface; |
|---|
| 955 | | - int max_speed = priv->plat->max_speed; |
|---|
| 956 | | - priv->oldlink = false; |
|---|
| 957 | | - priv->speed = SPEED_UNKNOWN; |
|---|
| 958 | | - priv->oldduplex = DUPLEX_UNKNOWN; |
|---|
| 1132 | + struct device_node *node; |
|---|
| 1133 | + int ret; |
|---|
| 959 | 1134 | |
|---|
| 960 | 1135 | if (priv->plat->integrated_phy_power) |
|---|
| 961 | | - priv->plat->integrated_phy_power(priv->plat->bsp_priv, true); |
|---|
| 1136 | + ret = priv->plat->integrated_phy_power(priv->plat->bsp_priv, true); |
|---|
| 962 | 1137 | |
|---|
| 963 | | - if (priv->plat->phy_node) { |
|---|
| 964 | | - phydev = of_phy_connect(dev, priv->plat->phy_node, |
|---|
| 965 | | - &stmmac_adjust_link, 0, interface); |
|---|
| 966 | | - } else { |
|---|
| 967 | | - snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", |
|---|
| 968 | | - priv->plat->bus_id); |
|---|
| 1138 | + node = priv->plat->phylink_node; |
|---|
| 969 | 1139 | |
|---|
| 970 | | - snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, |
|---|
| 971 | | - priv->plat->phy_addr); |
|---|
| 972 | | - netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__, |
|---|
| 973 | | - phy_id_fmt); |
|---|
| 1140 | + if (node) |
|---|
| 1141 | + ret = phylink_of_phy_connect(priv->phylink, node, 0); |
|---|
| 974 | 1142 | |
|---|
| 975 | | - phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, |
|---|
| 976 | | - interface); |
|---|
| 977 | | - } |
|---|
| 1143 | + /* Some DT bindings do not set-up the PHY handle. Let's try to |
|---|
| 1144 | + * manually parse it |
|---|
| 1145 | + */ |
|---|
| 1146 | + if (!node || ret) { |
|---|
| 1147 | + int addr = priv->plat->phy_addr; |
|---|
| 1148 | + struct phy_device *phydev; |
|---|
| 978 | 1149 | |
|---|
| 979 | | - if (IS_ERR_OR_NULL(phydev)) { |
|---|
| 980 | | - netdev_err(priv->dev, "Could not attach to PHY\n"); |
|---|
| 981 | | - if (!phydev) |
|---|
| 1150 | + phydev = mdiobus_get_phy(priv->mii, addr); |
|---|
| 1151 | + if (!phydev) { |
|---|
| 1152 | + netdev_err(priv->dev, "no phy at addr %d\n", addr); |
|---|
| 982 | 1153 | return -ENODEV; |
|---|
| 1154 | + } |
|---|
| 983 | 1155 | |
|---|
| 984 | | - return PTR_ERR(phydev); |
|---|
| 1156 | + rtl8211F_led_control(phydev); |
|---|
| 1157 | + ret = phylink_connect_phy(priv->phylink, phydev); |
|---|
| 985 | 1158 | } |
|---|
| 986 | 1159 | |
|---|
| 987 | | - /* Stop Advertising 1000BASE Capability if interface is not GMII */ |
|---|
| 988 | | - if ((interface == PHY_INTERFACE_MODE_MII) || |
|---|
| 989 | | - (interface == PHY_INTERFACE_MODE_RMII) || |
|---|
| 990 | | - (max_speed < 1000 && max_speed > 0)) |
|---|
| 991 | | - phydev->advertising &= ~(SUPPORTED_1000baseT_Half | |
|---|
| 992 | | - SUPPORTED_1000baseT_Full); |
|---|
| 1160 | + if (!priv->plat->pmt) { |
|---|
| 1161 | + struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; |
|---|
| 993 | 1162 | |
|---|
| 994 | | - /* |
|---|
| 995 | | - * Half-duplex mode not supported with multiqueue |
|---|
| 996 | | - * half-duplex can only works with single queue |
|---|
| 997 | | - */ |
|---|
| 998 | | - if (tx_cnt > 1) |
|---|
| 999 | | - phydev->supported &= ~(SUPPORTED_1000baseT_Half | |
|---|
| 1000 | | - SUPPORTED_100baseT_Half | |
|---|
| 1001 | | - SUPPORTED_10baseT_Half); |
|---|
| 1002 | | - |
|---|
| 1003 | | - /* |
|---|
| 1004 | | - * Broken HW is sometimes missing the pull-up resistor on the |
|---|
| 1005 | | - * MDIO line, which results in reads to non-existent devices returning |
|---|
| 1006 | | - * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent |
|---|
| 1007 | | - * device as well. |
|---|
| 1008 | | - * Note: phydev->phy_id is the result of reading the UID PHY registers. |
|---|
| 1009 | | - */ |
|---|
| 1010 | | - if (!priv->plat->phy_node && phydev->phy_id == 0) { |
|---|
| 1011 | | - phy_disconnect(phydev); |
|---|
| 1012 | | - return -ENODEV; |
|---|
| 1163 | + phylink_ethtool_get_wol(priv->phylink, &wol); |
|---|
| 1164 | + device_set_wakeup_capable(priv->device, !!wol.supported); |
|---|
| 1013 | 1165 | } |
|---|
| 1166 | + return ret; |
|---|
| 1167 | +} |
|---|
| 1014 | 1168 | |
|---|
| 1015 | | - /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid |
|---|
| 1016 | | - * subsequent PHY polling, make sure we force a link transition if |
|---|
| 1017 | | - * we have a UP/DOWN/UP transition |
|---|
| 1018 | | - */ |
|---|
| 1019 | | - if (phydev->is_pseudo_fixed_link) |
|---|
| 1020 | | - phydev->irq = PHY_POLL; |
|---|
| 1169 | +static int stmmac_phy_setup(struct stmmac_priv *priv) |
|---|
| 1170 | +{ |
|---|
| 1171 | + struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node); |
|---|
| 1172 | + int mode = priv->plat->phy_interface; |
|---|
| 1173 | + struct phylink *phylink; |
|---|
| 1021 | 1174 | |
|---|
| 1022 | | - phy_attached_info(phydev); |
|---|
| 1175 | + priv->phylink_config.dev = &priv->dev->dev; |
|---|
| 1176 | + priv->phylink_config.type = PHYLINK_NETDEV; |
|---|
| 1177 | + priv->phylink_config.pcs_poll = true; |
|---|
| 1178 | + |
|---|
| 1179 | + if (!fwnode) |
|---|
| 1180 | + fwnode = dev_fwnode(priv->device); |
|---|
| 1181 | + |
|---|
| 1182 | + phylink = phylink_create(&priv->phylink_config, fwnode, |
|---|
| 1183 | + mode, &stmmac_phylink_mac_ops); |
|---|
| 1184 | + if (IS_ERR(phylink)) |
|---|
| 1185 | + return PTR_ERR(phylink); |
|---|
| 1186 | + |
|---|
| 1187 | + priv->phylink = phylink; |
|---|
| 1023 | 1188 | return 0; |
|---|
| 1024 | 1189 | } |
|---|
| 1025 | 1190 | |
|---|
| 1026 | 1191 | static void stmmac_display_rx_rings(struct stmmac_priv *priv) |
|---|
| 1027 | 1192 | { |
|---|
| 1028 | 1193 | u32 rx_cnt = priv->plat->rx_queues_to_use; |
|---|
| 1194 | + unsigned int desc_size; |
|---|
| 1029 | 1195 | void *head_rx; |
|---|
| 1030 | 1196 | u32 queue; |
|---|
| 1031 | 1197 | |
|---|
| .. | .. |
|---|
| 1035 | 1201 | |
|---|
| 1036 | 1202 | pr_info("\tRX Queue %u rings\n", queue); |
|---|
| 1037 | 1203 | |
|---|
| 1038 | | - if (priv->extend_desc) |
|---|
| 1204 | + if (priv->extend_desc) { |
|---|
| 1039 | 1205 | head_rx = (void *)rx_q->dma_erx; |
|---|
| 1040 | | - else |
|---|
| 1206 | + desc_size = sizeof(struct dma_extended_desc); |
|---|
| 1207 | + } else { |
|---|
| 1041 | 1208 | head_rx = (void *)rx_q->dma_rx; |
|---|
| 1209 | + desc_size = sizeof(struct dma_desc); |
|---|
| 1210 | + } |
|---|
| 1042 | 1211 | |
|---|
| 1043 | 1212 | /* Display RX ring */ |
|---|
| 1044 | | - stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true); |
|---|
| 1213 | + stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true, |
|---|
| 1214 | + rx_q->dma_rx_phy, desc_size); |
|---|
| 1045 | 1215 | } |
|---|
| 1046 | 1216 | } |
|---|
| 1047 | 1217 | |
|---|
| 1048 | 1218 | static void stmmac_display_tx_rings(struct stmmac_priv *priv) |
|---|
| 1049 | 1219 | { |
|---|
| 1050 | 1220 | u32 tx_cnt = priv->plat->tx_queues_to_use; |
|---|
| 1221 | + unsigned int desc_size; |
|---|
| 1051 | 1222 | void *head_tx; |
|---|
| 1052 | 1223 | u32 queue; |
|---|
| 1053 | 1224 | |
|---|
| .. | .. |
|---|
| 1057 | 1228 | |
|---|
| 1058 | 1229 | pr_info("\tTX Queue %d rings\n", queue); |
|---|
| 1059 | 1230 | |
|---|
| 1060 | | - if (priv->extend_desc) |
|---|
| 1231 | + if (priv->extend_desc) { |
|---|
| 1061 | 1232 | head_tx = (void *)tx_q->dma_etx; |
|---|
| 1062 | | - else |
|---|
| 1233 | + desc_size = sizeof(struct dma_extended_desc); |
|---|
| 1234 | + } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { |
|---|
| 1235 | + head_tx = (void *)tx_q->dma_entx; |
|---|
| 1236 | + desc_size = sizeof(struct dma_edesc); |
|---|
| 1237 | + } else { |
|---|
| 1063 | 1238 | head_tx = (void *)tx_q->dma_tx; |
|---|
| 1239 | + desc_size = sizeof(struct dma_desc); |
|---|
| 1240 | + } |
|---|
| 1064 | 1241 | |
|---|
| 1065 | | - stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false); |
|---|
| 1242 | + stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false, |
|---|
| 1243 | + tx_q->dma_tx_phy, desc_size); |
|---|
| 1066 | 1244 | } |
|---|
| 1067 | 1245 | } |
|---|
| 1068 | 1246 | |
|---|
| .. | .. |
|---|
| 1106 | 1284 | int i; |
|---|
| 1107 | 1285 | |
|---|
| 1108 | 1286 | /* Clear the RX descriptors */ |
|---|
| 1109 | | - for (i = 0; i < DMA_RX_SIZE; i++) |
|---|
| 1287 | + for (i = 0; i < priv->dma_rx_size; i++) |
|---|
| 1110 | 1288 | if (priv->extend_desc) |
|---|
| 1111 | 1289 | stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, |
|---|
| 1112 | 1290 | priv->use_riwt, priv->mode, |
|---|
| 1113 | | - (i == DMA_RX_SIZE - 1), |
|---|
| 1291 | + (i == priv->dma_rx_size - 1), |
|---|
| 1114 | 1292 | priv->dma_buf_sz); |
|---|
| 1115 | 1293 | else |
|---|
| 1116 | 1294 | stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], |
|---|
| 1117 | 1295 | priv->use_riwt, priv->mode, |
|---|
| 1118 | | - (i == DMA_RX_SIZE - 1), |
|---|
| 1296 | + (i == priv->dma_rx_size - 1), |
|---|
| 1119 | 1297 | priv->dma_buf_sz); |
|---|
| 1120 | 1298 | } |
|---|
| 1121 | 1299 | |
|---|
| .. | .. |
|---|
| 1132 | 1310 | int i; |
|---|
| 1133 | 1311 | |
|---|
| 1134 | 1312 | /* Clear the TX descriptors */ |
|---|
| 1135 | | - for (i = 0; i < DMA_TX_SIZE; i++) |
|---|
| 1313 | + for (i = 0; i < priv->dma_tx_size; i++) { |
|---|
| 1314 | + int last = (i == (priv->dma_tx_size - 1)); |
|---|
| 1315 | + struct dma_desc *p; |
|---|
| 1316 | + |
|---|
| 1136 | 1317 | if (priv->extend_desc) |
|---|
| 1137 | | - stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic, |
|---|
| 1138 | | - priv->mode, (i == DMA_TX_SIZE - 1)); |
|---|
| 1318 | + p = &tx_q->dma_etx[i].basic; |
|---|
| 1319 | + else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
|---|
| 1320 | + p = &tx_q->dma_entx[i].basic; |
|---|
| 1139 | 1321 | else |
|---|
| 1140 | | - stmmac_init_tx_desc(priv, &tx_q->dma_tx[i], |
|---|
| 1141 | | - priv->mode, (i == DMA_TX_SIZE - 1)); |
|---|
| 1322 | + p = &tx_q->dma_tx[i]; |
|---|
| 1323 | + |
|---|
| 1324 | + stmmac_init_tx_desc(priv, p, priv->mode, last); |
|---|
| 1325 | + } |
|---|
| 1142 | 1326 | } |
|---|
| 1143 | 1327 | |
|---|
| 1144 | 1328 | /** |
|---|
| .. | .. |
|---|
| 1176 | 1360 | int i, gfp_t flags, u32 queue) |
|---|
| 1177 | 1361 | { |
|---|
| 1178 | 1362 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
|---|
| 1179 | | - struct sk_buff *skb; |
|---|
| 1363 | + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; |
|---|
| 1364 | + gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); |
|---|
| 1180 | 1365 | |
|---|
| 1181 | | - skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags); |
|---|
| 1182 | | - if (!skb) { |
|---|
| 1183 | | - netdev_err(priv->dev, |
|---|
| 1184 | | - "%s: Rx init fails; skb is NULL\n", __func__); |
|---|
| 1366 | + if (priv->dma_cap.addr64 <= 32) |
|---|
| 1367 | + gfp |= GFP_DMA32; |
|---|
| 1368 | + |
|---|
| 1369 | + buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); |
|---|
| 1370 | + if (!buf->page) |
|---|
| 1185 | 1371 | return -ENOMEM; |
|---|
| 1186 | | - } |
|---|
| 1187 | | - rx_q->rx_skbuff[i] = skb; |
|---|
| 1188 | | - rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, |
|---|
| 1189 | | - priv->dma_buf_sz, |
|---|
| 1190 | | - DMA_FROM_DEVICE); |
|---|
| 1191 | | - if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) { |
|---|
| 1192 | | - netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); |
|---|
| 1193 | | - dev_kfree_skb_any(skb); |
|---|
| 1194 | | - return -EINVAL; |
|---|
| 1372 | + |
|---|
| 1373 | + if (priv->sph) { |
|---|
| 1374 | + buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); |
|---|
| 1375 | + if (!buf->sec_page) |
|---|
| 1376 | + return -ENOMEM; |
|---|
| 1377 | + |
|---|
| 1378 | + buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); |
|---|
| 1379 | + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); |
|---|
| 1380 | + } else { |
|---|
| 1381 | + buf->sec_page = NULL; |
|---|
| 1382 | + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); |
|---|
| 1195 | 1383 | } |
|---|
| 1196 | 1384 | |
|---|
| 1197 | | - stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]); |
|---|
| 1198 | | - |
|---|
| 1385 | + buf->addr = page_pool_get_dma_addr(buf->page); |
|---|
| 1386 | + stmmac_set_desc_addr(priv, p, buf->addr); |
|---|
| 1199 | 1387 | if (priv->dma_buf_sz == BUF_SIZE_16KiB) |
|---|
| 1200 | 1388 | stmmac_init_desc3(priv, p); |
|---|
| 1201 | 1389 | |
|---|
| .. | .. |
|---|
| 1211 | 1399 | static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) |
|---|
| 1212 | 1400 | { |
|---|
| 1213 | 1401 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
|---|
| 1402 | + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; |
|---|
| 1214 | 1403 | |
|---|
| 1215 | | - if (rx_q->rx_skbuff[i]) { |
|---|
| 1216 | | - dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i], |
|---|
| 1217 | | - priv->dma_buf_sz, DMA_FROM_DEVICE); |
|---|
| 1218 | | - dev_kfree_skb_any(rx_q->rx_skbuff[i]); |
|---|
| 1219 | | - } |
|---|
| 1220 | | - rx_q->rx_skbuff[i] = NULL; |
|---|
| 1404 | + if (buf->page) |
|---|
| 1405 | + page_pool_put_full_page(rx_q->page_pool, buf->page, false); |
|---|
| 1406 | + buf->page = NULL; |
|---|
| 1407 | + |
|---|
| 1408 | + if (buf->sec_page) |
|---|
| 1409 | + page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false); |
|---|
| 1410 | + buf->sec_page = NULL; |
|---|
| 1221 | 1411 | } |
|---|
| 1222 | 1412 | |
|---|
| 1223 | 1413 | /** |
|---|
| .. | .. |
|---|
| 1264 | 1454 | struct stmmac_priv *priv = netdev_priv(dev); |
|---|
| 1265 | 1455 | u32 rx_count = priv->plat->rx_queues_to_use; |
|---|
| 1266 | 1456 | int ret = -ENOMEM; |
|---|
| 1267 | | - int bfsize = 0; |
|---|
| 1268 | 1457 | int queue; |
|---|
| 1269 | 1458 | int i; |
|---|
| 1270 | | - |
|---|
| 1271 | | - bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); |
|---|
| 1272 | | - if (bfsize < 0) |
|---|
| 1273 | | - bfsize = 0; |
|---|
| 1274 | | - |
|---|
| 1275 | | - if (bfsize < BUF_SIZE_16KiB) |
|---|
| 1276 | | - bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); |
|---|
| 1277 | | - |
|---|
| 1278 | | - priv->dma_buf_sz = bfsize; |
|---|
| 1279 | 1459 | |
|---|
| 1280 | 1460 | /* RX INITIALIZATION */ |
|---|
| 1281 | 1461 | netif_dbg(priv, probe, priv->dev, |
|---|
| .. | .. |
|---|
| 1288 | 1468 | "(%s) dma_rx_phy=0x%08x\n", __func__, |
|---|
| 1289 | 1469 | (u32)rx_q->dma_rx_phy); |
|---|
| 1290 | 1470 | |
|---|
| 1291 | | - for (i = 0; i < DMA_RX_SIZE; i++) { |
|---|
| 1471 | + stmmac_clear_rx_descriptors(priv, queue); |
|---|
| 1472 | + |
|---|
| 1473 | + for (i = 0; i < priv->dma_rx_size; i++) { |
|---|
| 1292 | 1474 | struct dma_desc *p; |
|---|
| 1293 | 1475 | |
|---|
| 1294 | 1476 | if (priv->extend_desc) |
|---|
| .. | .. |
|---|
| 1300 | 1482 | queue); |
|---|
| 1301 | 1483 | if (ret) |
|---|
| 1302 | 1484 | goto err_init_rx_buffers; |
|---|
| 1303 | | - |
|---|
| 1304 | | - netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n", |
|---|
| 1305 | | - rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data, |
|---|
| 1306 | | - (unsigned int)rx_q->rx_skbuff_dma[i]); |
|---|
| 1307 | 1485 | } |
|---|
| 1308 | 1486 | |
|---|
| 1309 | 1487 | rx_q->cur_rx = 0; |
|---|
| 1310 | | - rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); |
|---|
| 1311 | | - |
|---|
| 1312 | | - stmmac_clear_rx_descriptors(priv, queue); |
|---|
| 1488 | + rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size); |
|---|
| 1313 | 1489 | |
|---|
| 1314 | 1490 | /* Setup the chained descriptor addresses */ |
|---|
| 1315 | 1491 | if (priv->mode == STMMAC_CHAIN_MODE) { |
|---|
| 1316 | 1492 | if (priv->extend_desc) |
|---|
| 1317 | 1493 | stmmac_mode_init(priv, rx_q->dma_erx, |
|---|
| 1318 | | - rx_q->dma_rx_phy, DMA_RX_SIZE, 1); |
|---|
| 1494 | + rx_q->dma_rx_phy, |
|---|
| 1495 | + priv->dma_rx_size, 1); |
|---|
| 1319 | 1496 | else |
|---|
| 1320 | 1497 | stmmac_mode_init(priv, rx_q->dma_rx, |
|---|
| 1321 | | - rx_q->dma_rx_phy, DMA_RX_SIZE, 0); |
|---|
| 1498 | + rx_q->dma_rx_phy, |
|---|
| 1499 | + priv->dma_rx_size, 0); |
|---|
| 1322 | 1500 | } |
|---|
| 1323 | 1501 | } |
|---|
| 1324 | | - |
|---|
| 1325 | | - buf_sz = bfsize; |
|---|
| 1326 | 1502 | |
|---|
| 1327 | 1503 | return 0; |
|---|
| 1328 | 1504 | |
|---|
| .. | .. |
|---|
| 1334 | 1510 | if (queue == 0) |
|---|
| 1335 | 1511 | break; |
|---|
| 1336 | 1512 | |
|---|
| 1337 | | - i = DMA_RX_SIZE; |
|---|
| 1513 | + i = priv->dma_rx_size; |
|---|
| 1338 | 1514 | queue--; |
|---|
| 1339 | 1515 | } |
|---|
| 1340 | 1516 | |
|---|
| .. | .. |
|---|
| 1366 | 1542 | if (priv->mode == STMMAC_CHAIN_MODE) { |
|---|
| 1367 | 1543 | if (priv->extend_desc) |
|---|
| 1368 | 1544 | stmmac_mode_init(priv, tx_q->dma_etx, |
|---|
| 1369 | | - tx_q->dma_tx_phy, DMA_TX_SIZE, 1); |
|---|
| 1370 | | - else |
|---|
| 1545 | + tx_q->dma_tx_phy, |
|---|
| 1546 | + priv->dma_tx_size, 1); |
|---|
| 1547 | + else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) |
|---|
| 1371 | 1548 | stmmac_mode_init(priv, tx_q->dma_tx, |
|---|
| 1372 | | - tx_q->dma_tx_phy, DMA_TX_SIZE, 0); |
|---|
| 1549 | + tx_q->dma_tx_phy, |
|---|
| 1550 | + priv->dma_tx_size, 0); |
|---|
| 1373 | 1551 | } |
|---|
| 1374 | 1552 | |
|---|
| 1375 | | - for (i = 0; i < DMA_TX_SIZE; i++) { |
|---|
| 1553 | + for (i = 0; i < priv->dma_tx_size; i++) { |
|---|
| 1376 | 1554 | struct dma_desc *p; |
|---|
| 1377 | 1555 | if (priv->extend_desc) |
|---|
| 1378 | 1556 | p = &((tx_q->dma_etx + i)->basic); |
|---|
| 1557 | + else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
|---|
| 1558 | + p = &((tx_q->dma_entx + i)->basic); |
|---|
| 1379 | 1559 | else |
|---|
| 1380 | 1560 | p = tx_q->dma_tx + i; |
|---|
| 1381 | 1561 | |
|---|
| .. | .. |
|---|
| 1434 | 1614 | { |
|---|
| 1435 | 1615 | int i; |
|---|
| 1436 | 1616 | |
|---|
| 1437 | | - for (i = 0; i < DMA_RX_SIZE; i++) |
|---|
| 1617 | + for (i = 0; i < priv->dma_rx_size; i++) |
|---|
| 1438 | 1618 | stmmac_free_rx_buffer(priv, queue, i); |
|---|
| 1439 | 1619 | } |
|---|
| 1440 | 1620 | |
|---|
| .. | .. |
|---|
| 1447 | 1627 | { |
|---|
| 1448 | 1628 | int i; |
|---|
| 1449 | 1629 | |
|---|
| 1450 | | - for (i = 0; i < DMA_TX_SIZE; i++) |
|---|
| 1630 | + for (i = 0; i < priv->dma_tx_size; i++) |
|---|
| 1451 | 1631 | stmmac_free_tx_buffer(priv, queue, i); |
|---|
| 1452 | 1632 | } |
|---|
| 1453 | 1633 | |
|---|
| .. | .. |
|---|
| 1482 | 1662 | |
|---|
| 1483 | 1663 | /* Free DMA regions of consistent memory previously allocated */ |
|---|
| 1484 | 1664 | if (!priv->extend_desc) |
|---|
| 1485 | | - dma_free_coherent(priv->device, |
|---|
| 1486 | | - DMA_RX_SIZE * sizeof(struct dma_desc), |
|---|
| 1665 | + dma_free_coherent(priv->device, priv->dma_rx_size * |
|---|
| 1666 | + sizeof(struct dma_desc), |
|---|
| 1487 | 1667 | rx_q->dma_rx, rx_q->dma_rx_phy); |
|---|
| 1488 | 1668 | else |
|---|
| 1489 | | - dma_free_coherent(priv->device, DMA_RX_SIZE * |
|---|
| 1669 | + dma_free_coherent(priv->device, priv->dma_rx_size * |
|---|
| 1490 | 1670 | sizeof(struct dma_extended_desc), |
|---|
| 1491 | 1671 | rx_q->dma_erx, rx_q->dma_rx_phy); |
|---|
| 1492 | 1672 | |
|---|
| 1493 | | - kfree(rx_q->rx_skbuff_dma); |
|---|
| 1494 | | - kfree(rx_q->rx_skbuff); |
|---|
| 1673 | + kfree(rx_q->buf_pool); |
|---|
| 1674 | + if (rx_q->page_pool) |
|---|
| 1675 | + page_pool_destroy(rx_q->page_pool); |
|---|
| 1495 | 1676 | } |
|---|
| 1496 | 1677 | } |
|---|
| 1497 | 1678 | |
|---|
| .. | .. |
|---|
| 1507 | 1688 | /* Free TX queue resources */ |
|---|
| 1508 | 1689 | for (queue = 0; queue < tx_count; queue++) { |
|---|
| 1509 | 1690 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
|---|
| 1691 | + size_t size; |
|---|
| 1692 | + void *addr; |
|---|
| 1510 | 1693 | |
|---|
| 1511 | 1694 | /* Release the DMA TX socket buffers */ |
|---|
| 1512 | 1695 | dma_free_tx_skbufs(priv, queue); |
|---|
| 1513 | 1696 | |
|---|
| 1514 | | - /* Free DMA regions of consistent memory previously allocated */ |
|---|
| 1515 | | - if (!priv->extend_desc) |
|---|
| 1516 | | - dma_free_coherent(priv->device, |
|---|
| 1517 | | - DMA_TX_SIZE * sizeof(struct dma_desc), |
|---|
| 1518 | | - tx_q->dma_tx, tx_q->dma_tx_phy); |
|---|
| 1519 | | - else |
|---|
| 1520 | | - dma_free_coherent(priv->device, DMA_TX_SIZE * |
|---|
| 1521 | | - sizeof(struct dma_extended_desc), |
|---|
| 1522 | | - tx_q->dma_etx, tx_q->dma_tx_phy); |
|---|
| 1697 | + if (priv->extend_desc) { |
|---|
| 1698 | + size = sizeof(struct dma_extended_desc); |
|---|
| 1699 | + addr = tx_q->dma_etx; |
|---|
| 1700 | + } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { |
|---|
| 1701 | + size = sizeof(struct dma_edesc); |
|---|
| 1702 | + addr = tx_q->dma_entx; |
|---|
| 1703 | + } else { |
|---|
| 1704 | + size = sizeof(struct dma_desc); |
|---|
| 1705 | + addr = tx_q->dma_tx; |
|---|
| 1706 | + } |
|---|
| 1707 | + |
|---|
| 1708 | + size *= priv->dma_tx_size; |
|---|
| 1709 | + |
|---|
| 1710 | + dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); |
|---|
| 1523 | 1711 | |
|---|
| 1524 | 1712 | kfree(tx_q->tx_skbuff_dma); |
|---|
| 1525 | 1713 | kfree(tx_q->tx_skbuff); |
|---|
| .. | .. |
|---|
| 1543 | 1731 | /* RX queues buffers and DMA */ |
|---|
| 1544 | 1732 | for (queue = 0; queue < rx_count; queue++) { |
|---|
| 1545 | 1733 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
|---|
| 1734 | + struct page_pool_params pp_params = { 0 }; |
|---|
| 1735 | + unsigned int num_pages; |
|---|
| 1546 | 1736 | |
|---|
| 1547 | 1737 | rx_q->queue_index = queue; |
|---|
| 1548 | 1738 | rx_q->priv_data = priv; |
|---|
| 1549 | 1739 | |
|---|
| 1550 | | - rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, |
|---|
| 1551 | | - sizeof(dma_addr_t), |
|---|
| 1552 | | - GFP_KERNEL); |
|---|
| 1553 | | - if (!rx_q->rx_skbuff_dma) |
|---|
| 1554 | | - goto err_dma; |
|---|
| 1740 | + pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; |
|---|
| 1741 | + pp_params.pool_size = priv->dma_rx_size; |
|---|
| 1742 | + num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE); |
|---|
| 1743 | + pp_params.order = ilog2(num_pages); |
|---|
| 1744 | + pp_params.nid = dev_to_node(priv->device); |
|---|
| 1745 | + pp_params.dev = priv->device; |
|---|
| 1746 | + pp_params.dma_dir = DMA_FROM_DEVICE; |
|---|
| 1747 | + pp_params.max_len = num_pages * PAGE_SIZE; |
|---|
| 1555 | 1748 | |
|---|
| 1556 | | - rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE, |
|---|
| 1557 | | - sizeof(struct sk_buff *), |
|---|
| 1558 | | - GFP_KERNEL); |
|---|
| 1559 | | - if (!rx_q->rx_skbuff) |
|---|
| 1749 | + rx_q->page_pool = page_pool_create(&pp_params); |
|---|
| 1750 | + if (IS_ERR(rx_q->page_pool)) { |
|---|
| 1751 | + ret = PTR_ERR(rx_q->page_pool); |
|---|
| 1752 | + rx_q->page_pool = NULL; |
|---|
| 1753 | + goto err_dma; |
|---|
| 1754 | + } |
|---|
| 1755 | + |
|---|
| 1756 | + rx_q->buf_pool = kcalloc(priv->dma_rx_size, |
|---|
| 1757 | + sizeof(*rx_q->buf_pool), |
|---|
| 1758 | + GFP_KERNEL); |
|---|
| 1759 | + if (!rx_q->buf_pool) |
|---|
| 1560 | 1760 | goto err_dma; |
|---|
| 1561 | 1761 | |
|---|
| 1562 | 1762 | if (priv->extend_desc) { |
|---|
| 1563 | | - rx_q->dma_erx = dma_zalloc_coherent(priv->device, |
|---|
| 1564 | | - DMA_RX_SIZE * |
|---|
| 1565 | | - sizeof(struct |
|---|
| 1566 | | - dma_extended_desc), |
|---|
| 1567 | | - &rx_q->dma_rx_phy, |
|---|
| 1568 | | - GFP_KERNEL); |
|---|
| 1763 | + rx_q->dma_erx = dma_alloc_coherent(priv->device, |
|---|
| 1764 | + priv->dma_rx_size * |
|---|
| 1765 | + sizeof(struct dma_extended_desc), |
|---|
| 1766 | + &rx_q->dma_rx_phy, |
|---|
| 1767 | + GFP_KERNEL); |
|---|
| 1569 | 1768 | if (!rx_q->dma_erx) |
|---|
| 1570 | 1769 | goto err_dma; |
|---|
| 1571 | 1770 | |
|---|
| 1572 | 1771 | } else { |
|---|
| 1573 | | - rx_q->dma_rx = dma_zalloc_coherent(priv->device, |
|---|
| 1574 | | - DMA_RX_SIZE * |
|---|
| 1575 | | - sizeof(struct |
|---|
| 1576 | | - dma_desc), |
|---|
| 1577 | | - &rx_q->dma_rx_phy, |
|---|
| 1578 | | - GFP_KERNEL); |
|---|
| 1772 | + rx_q->dma_rx = dma_alloc_coherent(priv->device, |
|---|
| 1773 | + priv->dma_rx_size * |
|---|
| 1774 | + sizeof(struct dma_desc), |
|---|
| 1775 | + &rx_q->dma_rx_phy, |
|---|
| 1776 | + GFP_KERNEL); |
|---|
| 1579 | 1777 | if (!rx_q->dma_rx) |
|---|
| 1580 | 1778 | goto err_dma; |
|---|
| 1581 | 1779 | } |
|---|
| .. | .. |
|---|
| 1606 | 1804 | /* TX queues buffers and DMA */ |
|---|
| 1607 | 1805 | for (queue = 0; queue < tx_count; queue++) { |
|---|
| 1608 | 1806 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
|---|
| 1807 | + size_t size; |
|---|
| 1808 | + void *addr; |
|---|
| 1609 | 1809 | |
|---|
| 1610 | 1810 | tx_q->queue_index = queue; |
|---|
| 1611 | 1811 | tx_q->priv_data = priv; |
|---|
| 1612 | 1812 | |
|---|
| 1613 | | - tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, |
|---|
| 1614 | | - sizeof(*tx_q->tx_skbuff_dma), |
|---|
| 1615 | | - GFP_KERNEL); |
|---|
| 1813 | + tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size, |
|---|
| 1814 | + sizeof(*tx_q->tx_skbuff_dma), |
|---|
| 1815 | + GFP_KERNEL); |
|---|
| 1616 | 1816 | if (!tx_q->tx_skbuff_dma) |
|---|
| 1617 | 1817 | goto err_dma; |
|---|
| 1618 | 1818 | |
|---|
| 1619 | | - tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE, |
|---|
| 1620 | | - sizeof(struct sk_buff *), |
|---|
| 1621 | | - GFP_KERNEL); |
|---|
| 1819 | + tx_q->tx_skbuff = kcalloc(priv->dma_tx_size, |
|---|
| 1820 | + sizeof(struct sk_buff *), |
|---|
| 1821 | + GFP_KERNEL); |
|---|
| 1622 | 1822 | if (!tx_q->tx_skbuff) |
|---|
| 1623 | 1823 | goto err_dma; |
|---|
| 1624 | 1824 | |
|---|
| 1625 | | - if (priv->extend_desc) { |
|---|
| 1626 | | - tx_q->dma_etx = dma_zalloc_coherent(priv->device, |
|---|
| 1627 | | - DMA_TX_SIZE * |
|---|
| 1628 | | - sizeof(struct |
|---|
| 1629 | | - dma_extended_desc), |
|---|
| 1630 | | - &tx_q->dma_tx_phy, |
|---|
| 1631 | | - GFP_KERNEL); |
|---|
| 1632 | | - if (!tx_q->dma_etx) |
|---|
| 1633 | | - goto err_dma; |
|---|
| 1634 | | - } else { |
|---|
| 1635 | | - tx_q->dma_tx = dma_zalloc_coherent(priv->device, |
|---|
| 1636 | | - DMA_TX_SIZE * |
|---|
| 1637 | | - sizeof(struct |
|---|
| 1638 | | - dma_desc), |
|---|
| 1639 | | - &tx_q->dma_tx_phy, |
|---|
| 1640 | | - GFP_KERNEL); |
|---|
| 1641 | | - if (!tx_q->dma_tx) |
|---|
| 1642 | | - goto err_dma; |
|---|
| 1643 | | - } |
|---|
| 1825 | + if (priv->extend_desc) |
|---|
| 1826 | + size = sizeof(struct dma_extended_desc); |
|---|
| 1827 | + else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
|---|
| 1828 | + size = sizeof(struct dma_edesc); |
|---|
| 1829 | + else |
|---|
| 1830 | + size = sizeof(struct dma_desc); |
|---|
| 1831 | + |
|---|
| 1832 | + size *= priv->dma_tx_size; |
|---|
| 1833 | + |
|---|
| 1834 | + addr = dma_alloc_coherent(priv->device, size, |
|---|
| 1835 | + &tx_q->dma_tx_phy, GFP_KERNEL); |
|---|
| 1836 | + if (!addr) |
|---|
| 1837 | + goto err_dma; |
|---|
| 1838 | + |
|---|
| 1839 | + if (priv->extend_desc) |
|---|
| 1840 | + tx_q->dma_etx = addr; |
|---|
| 1841 | + else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
|---|
| 1842 | + tx_q->dma_entx = addr; |
|---|
| 1843 | + else |
|---|
| 1844 | + tx_q->dma_tx = addr; |
|---|
| 1644 | 1845 | } |
|---|
| 1645 | 1846 | |
|---|
| 1646 | 1847 | return 0; |
|---|
| 1647 | 1848 | |
|---|
| 1648 | 1849 | err_dma: |
|---|
| 1649 | 1850 | free_dma_tx_desc_resources(priv); |
|---|
| 1650 | | - |
|---|
| 1651 | 1851 | return ret; |
|---|
| 1652 | 1852 | } |
|---|
| 1653 | 1853 | |
|---|
| .. | .. |
|---|
| 1858 | 2058 | /** |
|---|
| 1859 | 2059 | * stmmac_tx_clean - to manage the transmission completion |
|---|
| 1860 | 2060 | * @priv: driver private structure |
|---|
| 2061 | + * @budget: napi budget limiting this functions packet handling |
|---|
| 1861 | 2062 | * @queue: TX queue index |
|---|
| 1862 | 2063 | * Description: it reclaims the transmit resources after transmission completes. |
|---|
| 1863 | 2064 | */ |
|---|
| .. | .. |
|---|
| 1879 | 2080 | |
|---|
| 1880 | 2081 | if (priv->extend_desc) |
|---|
| 1881 | 2082 | p = (struct dma_desc *)(tx_q->dma_etx + entry); |
|---|
| 2083 | + else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
|---|
| 2084 | + p = &tx_q->dma_entx[entry].basic; |
|---|
| 1882 | 2085 | else |
|---|
| 1883 | 2086 | p = tx_q->dma_tx + entry; |
|---|
| 1884 | 2087 | |
|---|
| .. | .. |
|---|
| 1937 | 2140 | |
|---|
| 1938 | 2141 | stmmac_release_tx_desc(priv, p, priv->mode); |
|---|
| 1939 | 2142 | |
|---|
| 1940 | | - entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); |
|---|
| 2143 | + entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); |
|---|
| 1941 | 2144 | } |
|---|
| 1942 | 2145 | tx_q->dirty_tx = entry; |
|---|
| 1943 | 2146 | |
|---|
| .. | .. |
|---|
| 1946 | 2149 | |
|---|
| 1947 | 2150 | if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, |
|---|
| 1948 | 2151 | queue))) && |
|---|
| 1949 | | - stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) { |
|---|
| 2152 | + stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) { |
|---|
| 1950 | 2153 | |
|---|
| 1951 | 2154 | netif_dbg(priv, tx_done, priv->dev, |
|---|
| 1952 | 2155 | "%s: restart transmit\n", __func__); |
|---|
| .. | .. |
|---|
| 1955 | 2158 | |
|---|
| 1956 | 2159 | if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { |
|---|
| 1957 | 2160 | stmmac_enable_eee_mode(priv); |
|---|
| 1958 | | - mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); |
|---|
| 2161 | + mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); |
|---|
| 1959 | 2162 | } |
|---|
| 2163 | + |
|---|
| 2164 | + /* We still have pending packets, let's call for a new scheduling */ |
|---|
| 2165 | + if (tx_q->dirty_tx != tx_q->cur_tx) |
|---|
| 2166 | + mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer)); |
|---|
| 1960 | 2167 | |
|---|
| 1961 | 2168 | __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); |
|---|
| 1962 | 2169 | |
|---|
| .. | .. |
|---|
| 1973 | 2180 | static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) |
|---|
| 1974 | 2181 | { |
|---|
| 1975 | 2182 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; |
|---|
| 1976 | | - int i; |
|---|
| 1977 | 2183 | |
|---|
| 1978 | 2184 | netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); |
|---|
| 1979 | 2185 | |
|---|
| 1980 | 2186 | stmmac_stop_tx_dma(priv, chan); |
|---|
| 1981 | 2187 | dma_free_tx_skbufs(priv, chan); |
|---|
| 1982 | | - for (i = 0; i < DMA_TX_SIZE; i++) |
|---|
| 1983 | | - if (priv->extend_desc) |
|---|
| 1984 | | - stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic, |
|---|
| 1985 | | - priv->mode, (i == DMA_TX_SIZE - 1)); |
|---|
| 1986 | | - else |
|---|
| 1987 | | - stmmac_init_tx_desc(priv, &tx_q->dma_tx[i], |
|---|
| 1988 | | - priv->mode, (i == DMA_TX_SIZE - 1)); |
|---|
| 2188 | + stmmac_clear_tx_descriptors(priv, chan); |
|---|
| 1989 | 2189 | tx_q->dirty_tx = 0; |
|---|
| 1990 | 2190 | tx_q->cur_tx = 0; |
|---|
| 1991 | 2191 | tx_q->mss = 0; |
|---|
| 1992 | 2192 | netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan)); |
|---|
| 2193 | + stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, |
|---|
| 2194 | + tx_q->dma_tx_phy, chan); |
|---|
| 1993 | 2195 | stmmac_start_tx_dma(priv, chan); |
|---|
| 1994 | 2196 | |
|---|
| 1995 | 2197 | priv->dev->stats.tx_errors++; |
|---|
| .. | .. |
|---|
| 2048 | 2250 | int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, |
|---|
| 2049 | 2251 | &priv->xstats, chan); |
|---|
| 2050 | 2252 | struct stmmac_channel *ch = &priv->channel[chan]; |
|---|
| 2051 | | - bool needs_work = false; |
|---|
| 2253 | + unsigned long flags; |
|---|
| 2052 | 2254 | |
|---|
| 2053 | | - if ((status & handle_rx) && ch->has_rx) { |
|---|
| 2054 | | - needs_work = true; |
|---|
| 2055 | | - } else { |
|---|
| 2056 | | - status &= ~handle_rx; |
|---|
| 2255 | + if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { |
|---|
| 2256 | + if (napi_schedule_prep(&ch->rx_napi)) { |
|---|
| 2257 | + spin_lock_irqsave(&ch->lock, flags); |
|---|
| 2258 | + stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); |
|---|
| 2259 | + spin_unlock_irqrestore(&ch->lock, flags); |
|---|
| 2260 | + __napi_schedule(&ch->rx_napi); |
|---|
| 2261 | + } |
|---|
| 2057 | 2262 | } |
|---|
| 2058 | 2263 | |
|---|
| 2059 | | - if ((status & handle_tx) && ch->has_tx) { |
|---|
| 2060 | | - needs_work = true; |
|---|
| 2061 | | - } else { |
|---|
| 2062 | | - status &= ~handle_tx; |
|---|
| 2063 | | - } |
|---|
| 2064 | | - |
|---|
| 2065 | | - if (needs_work && napi_schedule_prep(&ch->napi)) { |
|---|
| 2066 | | - stmmac_disable_dma_irq(priv, priv->ioaddr, chan); |
|---|
| 2067 | | - __napi_schedule(&ch->napi); |
|---|
| 2264 | + if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { |
|---|
| 2265 | + if (napi_schedule_prep(&ch->tx_napi)) { |
|---|
| 2266 | + spin_lock_irqsave(&ch->lock, flags); |
|---|
| 2267 | + stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); |
|---|
| 2268 | + spin_unlock_irqrestore(&ch->lock, flags); |
|---|
| 2269 | + __napi_schedule(&ch->tx_napi); |
|---|
| 2270 | + } |
|---|
| 2068 | 2271 | } |
|---|
| 2069 | 2272 | |
|---|
| 2070 | 2273 | return status; |
|---|
| .. | .. |
|---|
| 2127 | 2330 | unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | |
|---|
| 2128 | 2331 | MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; |
|---|
| 2129 | 2332 | |
|---|
| 2130 | | - dwmac_mmc_intr_all_mask(priv->mmcaddr); |
|---|
| 2333 | + stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); |
|---|
| 2131 | 2334 | |
|---|
| 2132 | 2335 | if (priv->dma_cap.rmon) { |
|---|
| 2133 | | - dwmac_mmc_ctrl(priv->mmcaddr, mode); |
|---|
| 2336 | + stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); |
|---|
| 2134 | 2337 | memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); |
|---|
| 2135 | 2338 | } else |
|---|
| 2136 | 2339 | netdev_info(priv->dev, "No MAC Management Counters available\n"); |
|---|
| .. | .. |
|---|
| 2159 | 2362 | */ |
|---|
| 2160 | 2363 | static void stmmac_check_ether_addr(struct stmmac_priv *priv) |
|---|
| 2161 | 2364 | { |
|---|
| 2162 | | - if (!is_valid_ether_addr(priv->dev->dev_addr)) { |
|---|
| 2365 | +// if (!is_valid_ether_addr(priv->dev->dev_addr)) { |
|---|
| 2366 | + if (1) { |
|---|
| 2163 | 2367 | stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0); |
|---|
| 2164 | 2368 | if (likely(priv->plat->get_eth_addr)) |
|---|
| 2165 | 2369 | priv->plat->get_eth_addr(priv->plat->bsp_priv, |
|---|
| .. | .. |
|---|
| 2222 | 2426 | rx_q->dma_rx_phy, chan); |
|---|
| 2223 | 2427 | |
|---|
| 2224 | 2428 | rx_q->rx_tail_addr = rx_q->dma_rx_phy + |
|---|
| 2225 | | - (DMA_RX_SIZE * sizeof(struct dma_desc)); |
|---|
| 2429 | + (priv->dma_rx_size * |
|---|
| 2430 | + sizeof(struct dma_desc)); |
|---|
| 2226 | 2431 | stmmac_set_rx_tail_ptr(priv, priv->ioaddr, |
|---|
| 2227 | 2432 | rx_q->rx_tail_addr, chan); |
|---|
| 2228 | 2433 | } |
|---|
| .. | .. |
|---|
| 2251 | 2456 | |
|---|
| 2252 | 2457 | /** |
|---|
| 2253 | 2458 | * stmmac_tx_timer - mitigation sw timer for tx. |
|---|
| 2254 | | - * @data: data pointer |
|---|
| 2459 | + * @t: data pointer |
|---|
| 2255 | 2460 | * Description: |
|---|
| 2256 | 2461 | * This is the timer handler to directly invoke the stmmac_tx_clean. |
|---|
| 2257 | 2462 | */ |
|---|
| .. | .. |
|---|
| 2263 | 2468 | |
|---|
| 2264 | 2469 | ch = &priv->channel[tx_q->queue_index]; |
|---|
| 2265 | 2470 | |
|---|
| 2266 | | - if (likely(napi_schedule_prep(&ch->napi))) |
|---|
| 2267 | | - __napi_schedule(&ch->napi); |
|---|
| 2471 | + if (likely(napi_schedule_prep(&ch->tx_napi))) { |
|---|
| 2472 | + unsigned long flags; |
|---|
| 2473 | + |
|---|
| 2474 | + spin_lock_irqsave(&ch->lock, flags); |
|---|
| 2475 | + stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); |
|---|
| 2476 | + spin_unlock_irqrestore(&ch->lock, flags); |
|---|
| 2477 | + __napi_schedule(&ch->tx_napi); |
|---|
| 2478 | + } |
|---|
| 2268 | 2479 | } |
|---|
| 2269 | 2480 | |
|---|
| 2270 | 2481 | /** |
|---|
| 2271 | | - * stmmac_init_tx_coalesce - init tx mitigation options. |
|---|
| 2482 | + * stmmac_init_coalesce - init mitigation options. |
|---|
| 2272 | 2483 | * @priv: driver private structure |
|---|
| 2273 | 2484 | * Description: |
|---|
| 2274 | | - * This inits the transmit coalesce parameters: i.e. timer rate, |
|---|
| 2485 | + * This inits the coalesce parameters: i.e. timer rate, |
|---|
| 2275 | 2486 | * timer handler and default threshold used for enabling the |
|---|
| 2276 | 2487 | * interrupt on completion bit. |
|---|
| 2277 | 2488 | */ |
|---|
| 2278 | | -static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) |
|---|
| 2489 | +static void stmmac_init_coalesce(struct stmmac_priv *priv) |
|---|
| 2279 | 2490 | { |
|---|
| 2280 | 2491 | u32 tx_channel_count = priv->plat->tx_queues_to_use; |
|---|
| 2281 | 2492 | u32 chan; |
|---|
| 2282 | 2493 | |
|---|
| 2283 | 2494 | priv->tx_coal_frames = STMMAC_TX_FRAMES; |
|---|
| 2284 | 2495 | priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; |
|---|
| 2496 | + priv->rx_coal_frames = STMMAC_RX_FRAMES; |
|---|
| 2285 | 2497 | |
|---|
| 2286 | 2498 | for (chan = 0; chan < tx_channel_count; chan++) { |
|---|
| 2287 | 2499 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; |
|---|
| .. | .. |
|---|
| 2299 | 2511 | /* set TX ring length */ |
|---|
| 2300 | 2512 | for (chan = 0; chan < tx_channels_count; chan++) |
|---|
| 2301 | 2513 | stmmac_set_tx_ring_len(priv, priv->ioaddr, |
|---|
| 2302 | | - (DMA_TX_SIZE - 1), chan); |
|---|
| 2514 | + (priv->dma_tx_size - 1), chan); |
|---|
| 2303 | 2515 | |
|---|
| 2304 | 2516 | /* set RX ring length */ |
|---|
| 2305 | 2517 | for (chan = 0; chan < rx_channels_count; chan++) |
|---|
| 2306 | 2518 | stmmac_set_rx_ring_len(priv, priv->ioaddr, |
|---|
| 2307 | | - (DMA_RX_SIZE - 1), chan); |
|---|
| 2519 | + (priv->dma_rx_size - 1), chan); |
|---|
| 2308 | 2520 | } |
|---|
| 2309 | 2521 | |
|---|
| 2310 | 2522 | /** |
|---|
| .. | .. |
|---|
| 2428 | 2640 | } |
|---|
| 2429 | 2641 | } |
|---|
| 2430 | 2642 | |
|---|
| 2643 | +static void stmmac_mac_config_rss(struct stmmac_priv *priv) |
|---|
| 2644 | +{ |
|---|
| 2645 | + if (!priv->dma_cap.rssen || !priv->plat->rss_en) { |
|---|
| 2646 | + priv->rss.enable = false; |
|---|
| 2647 | + return; |
|---|
| 2648 | + } |
|---|
| 2649 | + |
|---|
| 2650 | + if (priv->dev->features & NETIF_F_RXHASH) |
|---|
| 2651 | + priv->rss.enable = true; |
|---|
| 2652 | + else |
|---|
| 2653 | + priv->rss.enable = false; |
|---|
| 2654 | + |
|---|
| 2655 | + stmmac_rss_configure(priv, priv->hw, &priv->rss, |
|---|
| 2656 | + priv->plat->rx_queues_to_use); |
|---|
| 2657 | +} |
|---|
| 2658 | + |
|---|
| 2431 | 2659 | /** |
|---|
| 2432 | 2660 | * stmmac_mtl_configuration - Configure MTL |
|---|
| 2433 | 2661 | * @priv: driver private structure |
|---|
| .. | .. |
|---|
| 2472 | 2700 | /* Set RX routing */ |
|---|
| 2473 | 2701 | if (rx_queues_count > 1) |
|---|
| 2474 | 2702 | stmmac_mac_config_rx_queues_routing(priv); |
|---|
| 2703 | + |
|---|
| 2704 | + /* Receive Side Scaling */ |
|---|
| 2705 | + if (rx_queues_count > 1) |
|---|
| 2706 | + stmmac_mac_config_rss(priv); |
|---|
| 2475 | 2707 | } |
|---|
| 2476 | 2708 | |
|---|
| 2477 | 2709 | static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) |
|---|
| .. | .. |
|---|
| 2487 | 2719 | /** |
|---|
| 2488 | 2720 | * stmmac_hw_setup - setup mac in a usable state. |
|---|
| 2489 | 2721 | * @dev : pointer to the device structure. |
|---|
| 2722 | + * @ptp_register: register PTP if set |
|---|
| 2490 | 2723 | * Description: |
|---|
| 2491 | 2724 | * this is the main function to setup the HW in a usable state because the |
|---|
| 2492 | 2725 | * dma engine is reset, the core registers are configured (e.g. AXI, |
|---|
| .. | .. |
|---|
| 2496 | 2729 | * 0 on success and an appropriate (-)ve integer as defined in errno.h |
|---|
| 2497 | 2730 | * file on failure. |
|---|
| 2498 | 2731 | */ |
|---|
| 2499 | | -static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) |
|---|
| 2732 | +static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) |
|---|
| 2500 | 2733 | { |
|---|
| 2501 | 2734 | struct stmmac_priv *priv = netdev_priv(dev); |
|---|
| 2502 | 2735 | u32 rx_cnt = priv->plat->rx_queues_to_use; |
|---|
| .. | .. |
|---|
| 2552 | 2785 | |
|---|
| 2553 | 2786 | stmmac_mmc_setup(priv); |
|---|
| 2554 | 2787 | |
|---|
| 2555 | | - if (IS_ENABLED(CONFIG_STMMAC_PTP) && init_ptp) { |
|---|
| 2788 | + if (ptp_register) { |
|---|
| 2556 | 2789 | ret = clk_prepare_enable(priv->plat->clk_ptp_ref); |
|---|
| 2557 | 2790 | if (ret < 0) |
|---|
| 2558 | | - netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret); |
|---|
| 2559 | | - |
|---|
| 2560 | | - ret = stmmac_init_ptp(priv); |
|---|
| 2561 | | - if (ret == -EOPNOTSUPP) |
|---|
| 2562 | | - netdev_warn(priv->dev, "PTP not supported by HW\n"); |
|---|
| 2563 | | - else if (ret) |
|---|
| 2564 | | - netdev_warn(priv->dev, "PTP init failed\n"); |
|---|
| 2791 | + netdev_warn(priv->dev, |
|---|
| 2792 | + "failed to enable PTP reference clock: %pe\n", |
|---|
| 2793 | + ERR_PTR(ret)); |
|---|
| 2565 | 2794 | } |
|---|
| 2566 | 2795 | |
|---|
| 2567 | | - priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; |
|---|
| 2796 | + ret = stmmac_init_ptp(priv); |
|---|
| 2797 | + if (ret == -EOPNOTSUPP) |
|---|
| 2798 | + netdev_warn(priv->dev, "PTP not supported by HW\n"); |
|---|
| 2799 | + else if (ret) |
|---|
| 2800 | + netdev_warn(priv->dev, "PTP init failed\n"); |
|---|
| 2801 | + else if (ptp_register) |
|---|
| 2802 | + stmmac_ptp_register(priv); |
|---|
| 2803 | + |
|---|
| 2804 | + priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; |
|---|
| 2805 | + |
|---|
| 2806 | + /* Convert the timer from msec to usec */ |
|---|
| 2807 | + if (!priv->tx_lpi_timer) |
|---|
| 2808 | + priv->tx_lpi_timer = eee_timer * 1000; |
|---|
| 2568 | 2809 | |
|---|
| 2569 | 2810 | if (priv->use_riwt) { |
|---|
| 2570 | | - ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt); |
|---|
| 2571 | | - if (!ret) |
|---|
| 2572 | | - priv->rx_riwt = MAX_DMA_RIWT; |
|---|
| 2811 | + if (!priv->rx_riwt) |
|---|
| 2812 | + priv->rx_riwt = DEF_DMA_RIWT; |
|---|
| 2813 | + |
|---|
| 2814 | + ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt); |
|---|
| 2573 | 2815 | } |
|---|
| 2574 | 2816 | |
|---|
| 2575 | 2817 | if (priv->hw->pcs) |
|---|
| 2576 | | - stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0); |
|---|
| 2818 | + stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); |
|---|
| 2577 | 2819 | |
|---|
| 2578 | 2820 | /* set TX and RX rings length */ |
|---|
| 2579 | 2821 | stmmac_set_rings_length(priv); |
|---|
| 2580 | 2822 | |
|---|
| 2581 | 2823 | /* Enable TSO */ |
|---|
| 2582 | 2824 | if (priv->tso) { |
|---|
| 2583 | | - for (chan = 0; chan < tx_cnt; chan++) |
|---|
| 2825 | + for (chan = 0; chan < tx_cnt; chan++) { |
|---|
| 2826 | + struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; |
|---|
| 2827 | + |
|---|
| 2828 | + /* TSO and TBS cannot co-exist */ |
|---|
| 2829 | + if (tx_q->tbs & STMMAC_TBS_AVAIL) |
|---|
| 2830 | + continue; |
|---|
| 2831 | + |
|---|
| 2584 | 2832 | stmmac_enable_tso(priv, priv->ioaddr, 1, chan); |
|---|
| 2833 | + } |
|---|
| 2585 | 2834 | } |
|---|
| 2835 | + |
|---|
| 2836 | + /* Enable Split Header */ |
|---|
| 2837 | + if (priv->sph && priv->hw->rx_csum) { |
|---|
| 2838 | + for (chan = 0; chan < rx_cnt; chan++) |
|---|
| 2839 | + stmmac_enable_sph(priv, priv->ioaddr, 1, chan); |
|---|
| 2840 | + } |
|---|
| 2841 | + |
|---|
| 2842 | + /* VLAN Tag Insertion */ |
|---|
| 2843 | + if (priv->dma_cap.vlins) |
|---|
| 2844 | + stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); |
|---|
| 2845 | + |
|---|
| 2846 | + /* TBS */ |
|---|
| 2847 | + for (chan = 0; chan < tx_cnt; chan++) { |
|---|
| 2848 | + struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; |
|---|
| 2849 | + int enable = tx_q->tbs & STMMAC_TBS_AVAIL; |
|---|
| 2850 | + |
|---|
| 2851 | + stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); |
|---|
| 2852 | + } |
|---|
| 2853 | + |
|---|
| 2854 | + /* Configure real RX and TX queues */ |
|---|
| 2855 | + netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); |
|---|
| 2856 | + netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); |
|---|
| 2586 | 2857 | |
|---|
| 2587 | 2858 | /* Start the ball rolling... */ |
|---|
| 2588 | 2859 | stmmac_start_all_dma(priv); |
|---|
| .. | .. |
|---|
| 2594 | 2865 | { |
|---|
| 2595 | 2866 | struct stmmac_priv *priv = netdev_priv(dev); |
|---|
| 2596 | 2867 | |
|---|
| 2597 | | - if (IS_ENABLED(CONFIG_STMMAC_PTP)) |
|---|
| 2598 | | - clk_disable_unprepare(priv->plat->clk_ptp_ref); |
|---|
| 2868 | + clk_disable_unprepare(priv->plat->clk_ptp_ref); |
|---|
| 2599 | 2869 | } |
|---|
| 2600 | 2870 | |
|---|
| 2601 | 2871 | /** |
|---|
| .. | .. |
|---|
| 2610 | 2880 | static int stmmac_open(struct net_device *dev) |
|---|
| 2611 | 2881 | { |
|---|
| 2612 | 2882 | struct stmmac_priv *priv = netdev_priv(dev); |
|---|
| 2883 | + int bfsize = 0; |
|---|
| 2613 | 2884 | u32 chan; |
|---|
| 2614 | 2885 | int ret; |
|---|
| 2615 | 2886 | |
|---|
| 2616 | | - if (priv->hw->pcs != STMMAC_PCS_RGMII && |
|---|
| 2617 | | - priv->hw->pcs != STMMAC_PCS_TBI && |
|---|
| 2618 | | - priv->hw->pcs != STMMAC_PCS_RTBI) { |
|---|
| 2887 | + ret = pm_runtime_get_sync(priv->device); |
|---|
| 2888 | + if (ret < 0) { |
|---|
| 2889 | + pm_runtime_put_noidle(priv->device); |
|---|
| 2890 | + return ret; |
|---|
| 2891 | + } |
|---|
| 2892 | + |
|---|
| 2893 | + if (priv->hw->pcs != STMMAC_PCS_TBI && |
|---|
| 2894 | + priv->hw->pcs != STMMAC_PCS_RTBI && |
|---|
| 2895 | + priv->hw->xpcs == NULL) { |
|---|
| 2619 | 2896 | ret = stmmac_init_phy(dev); |
|---|
| 2620 | 2897 | if (ret) { |
|---|
| 2621 | 2898 | netdev_err(priv->dev, |
|---|
| 2622 | 2899 | "%s: Cannot attach to PHY (error: %d)\n", |
|---|
| 2623 | 2900 | __func__, ret); |
|---|
| 2624 | | - return ret; |
|---|
| 2901 | + goto init_phy_error; |
|---|
| 2625 | 2902 | } |
|---|
| 2626 | 2903 | } |
|---|
| 2627 | 2904 | |
|---|
| .. | .. |
|---|
| 2629 | 2906 | memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); |
|---|
| 2630 | 2907 | priv->xstats.threshold = tc; |
|---|
| 2631 | 2908 | |
|---|
| 2632 | | - priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); |
|---|
| 2909 | + bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); |
|---|
| 2910 | + if (bfsize < 0) |
|---|
| 2911 | + bfsize = 0; |
|---|
| 2912 | + |
|---|
| 2913 | + if (bfsize < BUF_SIZE_16KiB) |
|---|
| 2914 | + bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); |
|---|
| 2915 | + |
|---|
| 2916 | + priv->dma_buf_sz = bfsize; |
|---|
| 2917 | + buf_sz = bfsize; |
|---|
| 2918 | + |
|---|
| 2633 | 2919 | priv->rx_copybreak = STMMAC_RX_COPYBREAK; |
|---|
| 2920 | + |
|---|
| 2921 | + if (!priv->dma_tx_size) |
|---|
| 2922 | + priv->dma_tx_size = priv->plat->dma_tx_size ? priv->plat->dma_tx_size : |
|---|
| 2923 | + DMA_DEFAULT_TX_SIZE; |
|---|
| 2924 | + |
|---|
| 2925 | + if (!priv->dma_rx_size) |
|---|
| 2926 | + priv->dma_rx_size = priv->plat->dma_rx_size ? priv->plat->dma_rx_size : |
|---|
| 2927 | + DMA_DEFAULT_RX_SIZE; |
|---|
| 2928 | + |
|---|
| 2929 | + /* Earlier check for TBS */ |
|---|
| 2930 | + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { |
|---|
| 2931 | + struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; |
|---|
| 2932 | + int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; |
|---|
| 2933 | + |
|---|
| 2934 | + /* Setup per-TXQ tbs flag before TX descriptor alloc */ |
|---|
| 2935 | + tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; |
|---|
| 2936 | + } |
|---|
| 2634 | 2937 | |
|---|
| 2635 | 2938 | ret = alloc_dma_desc_resources(priv); |
|---|
| 2636 | 2939 | if (ret < 0) { |
|---|
| .. | .. |
|---|
| 2646 | 2949 | goto init_error; |
|---|
| 2647 | 2950 | } |
|---|
| 2648 | 2951 | |
|---|
| 2952 | + if (priv->plat->serdes_powerup) { |
|---|
| 2953 | + ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv); |
|---|
| 2954 | + if (ret < 0) { |
|---|
| 2955 | + netdev_err(priv->dev, "%s: Serdes powerup failed\n", |
|---|
| 2956 | + __func__); |
|---|
| 2957 | + goto init_error; |
|---|
| 2958 | + } |
|---|
| 2959 | + } |
|---|
| 2960 | + |
|---|
| 2649 | 2961 | ret = stmmac_hw_setup(dev, true); |
|---|
| 2650 | 2962 | if (ret < 0) { |
|---|
| 2651 | 2963 | netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); |
|---|
| 2652 | 2964 | goto init_error; |
|---|
| 2653 | 2965 | } |
|---|
| 2654 | 2966 | |
|---|
| 2655 | | - stmmac_init_tx_coalesce(priv); |
|---|
| 2967 | + stmmac_init_coalesce(priv); |
|---|
| 2656 | 2968 | |
|---|
| 2657 | | - if (dev->phydev) |
|---|
| 2658 | | - phy_start(dev->phydev); |
|---|
| 2969 | + phylink_start(priv->phylink); |
|---|
| 2970 | + /* We may have called phylink_speed_down before */ |
|---|
| 2971 | + phylink_speed_up(priv->phylink); |
|---|
| 2659 | 2972 | |
|---|
| 2660 | 2973 | /* Request the IRQ lines */ |
|---|
| 2661 | 2974 | ret = request_irq(dev->irq, stmmac_interrupt, |
|---|
| .. | .. |
|---|
| 2702 | 3015 | wolirq_error: |
|---|
| 2703 | 3016 | free_irq(dev->irq, dev); |
|---|
| 2704 | 3017 | irq_error: |
|---|
| 2705 | | - if (dev->phydev) |
|---|
| 2706 | | - phy_stop(dev->phydev); |
|---|
| 3018 | + phylink_stop(priv->phylink); |
|---|
| 2707 | 3019 | |
|---|
| 2708 | 3020 | for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) |
|---|
| 2709 | 3021 | del_timer_sync(&priv->tx_queue[chan].txtimer); |
|---|
| .. | .. |
|---|
| 2712 | 3024 | init_error: |
|---|
| 2713 | 3025 | free_dma_desc_resources(priv); |
|---|
| 2714 | 3026 | dma_desc_error: |
|---|
| 2715 | | - if (dev->phydev) |
|---|
| 2716 | | - phy_disconnect(dev->phydev); |
|---|
| 2717 | | - |
|---|
| 3027 | + phylink_disconnect_phy(priv->phylink); |
|---|
| 3028 | +init_phy_error: |
|---|
| 3029 | + pm_runtime_put(priv->device); |
|---|
| 2718 | 3030 | return ret; |
|---|
| 2719 | 3031 | } |
|---|
| 2720 | 3032 | |
|---|
| .. | .. |
|---|
| 2729 | 3041 | struct stmmac_priv *priv = netdev_priv(dev); |
|---|
| 2730 | 3042 | u32 chan; |
|---|
| 2731 | 3043 | |
|---|
| 3044 | + if (device_may_wakeup(priv->device)) |
|---|
| 3045 | + phylink_speed_down(priv->phylink, false); |
|---|
| 2732 | 3046 | /* Stop and disconnect the PHY */ |
|---|
| 2733 | | - if (dev->phydev) { |
|---|
| 2734 | | - phy_stop(dev->phydev); |
|---|
| 2735 | | - phy_disconnect(dev->phydev); |
|---|
| 2736 | | - if (priv->plat->integrated_phy_power) |
|---|
| 2737 | | - priv->plat->integrated_phy_power(priv->plat->bsp_priv, |
|---|
| 2738 | | - false); |
|---|
| 2739 | | - } |
|---|
| 3047 | + phylink_stop(priv->phylink); |
|---|
| 3048 | + phylink_disconnect_phy(priv->phylink); |
|---|
| 3049 | + |
|---|
| 3050 | + if (priv->plat->integrated_phy_power) |
|---|
| 3051 | + priv->plat->integrated_phy_power(priv->plat->bsp_priv, false); |
|---|
| 2740 | 3052 | |
|---|
| 2741 | 3053 | stmmac_disable_all_queues(priv); |
|---|
| 2742 | 3054 | |
|---|
| .. | .. |
|---|
| 2764 | 3076 | /* Disable the MAC Rx/Tx */ |
|---|
| 2765 | 3077 | stmmac_mac_set(priv, priv->ioaddr, false); |
|---|
| 2766 | 3078 | |
|---|
| 3079 | + /* Powerdown Serdes if there is */ |
|---|
| 3080 | + if (priv->plat->serdes_powerdown) |
|---|
| 3081 | + priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv); |
|---|
| 3082 | + |
|---|
| 2767 | 3083 | netif_carrier_off(dev); |
|---|
| 2768 | 3084 | |
|---|
| 2769 | | - if (IS_ENABLED(CONFIG_STMMAC_PTP)) |
|---|
| 2770 | | - stmmac_release_ptp(priv); |
|---|
| 3085 | + stmmac_release_ptp(priv); |
|---|
| 3086 | + |
|---|
| 3087 | + pm_runtime_put(priv->device); |
|---|
| 2771 | 3088 | |
|---|
| 2772 | 3089 | return 0; |
|---|
| 3090 | +} |
|---|
| 3091 | + |
|---|
| 3092 | +static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, |
|---|
| 3093 | + struct stmmac_tx_queue *tx_q) |
|---|
| 3094 | +{ |
|---|
| 3095 | + u16 tag = 0x0, inner_tag = 0x0; |
|---|
| 3096 | + u32 inner_type = 0x0; |
|---|
| 3097 | + struct dma_desc *p; |
|---|
| 3098 | + |
|---|
| 3099 | + if (!priv->dma_cap.vlins) |
|---|
| 3100 | + return false; |
|---|
| 3101 | + if (!skb_vlan_tag_present(skb)) |
|---|
| 3102 | + return false; |
|---|
| 3103 | + if (skb->vlan_proto == htons(ETH_P_8021AD)) { |
|---|
| 3104 | + inner_tag = skb_vlan_tag_get(skb); |
|---|
| 3105 | + inner_type = STMMAC_VLAN_INSERT; |
|---|
| 3106 | + } |
|---|
| 3107 | + |
|---|
| 3108 | + tag = skb_vlan_tag_get(skb); |
|---|
| 3109 | + |
|---|
| 3110 | + if (tx_q->tbs & STMMAC_TBS_AVAIL) |
|---|
| 3111 | + p = &tx_q->dma_entx[tx_q->cur_tx].basic; |
|---|
| 3112 | + else |
|---|
| 3113 | + p = &tx_q->dma_tx[tx_q->cur_tx]; |
|---|
| 3114 | + |
|---|
| 3115 | + if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type)) |
|---|
| 3116 | + return false; |
|---|
| 3117 | + |
|---|
| 3118 | + stmmac_set_tx_owner(priv, p); |
|---|
| 3119 | + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); |
|---|
| 3120 | + return true; |
|---|
| 2773 | 3121 | } |
|---|
| 2774 | 3122 | |
|---|
| 2775 | 3123 | /** |
|---|
| .. | .. |
|---|
| 2777 | 3125 | * @priv: driver private structure |
|---|
| 2778 | 3126 | * @des: buffer start address |
|---|
| 2779 | 3127 | * @total_len: total length to fill in descriptors |
|---|
| 2780 | | - * @last_segmant: condition for the last descriptor |
|---|
| 3128 | + * @last_segment: condition for the last descriptor |
|---|
| 2781 | 3129 | * @queue: TX queue index |
|---|
| 2782 | 3130 | * Description: |
|---|
| 2783 | 3131 | * This function fills descriptor and request new descriptors according to |
|---|
| 2784 | 3132 | * buffer length to fill |
|---|
| 2785 | 3133 | */ |
|---|
| 2786 | | -static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des, |
|---|
| 3134 | +static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, |
|---|
| 2787 | 3135 | int total_len, bool last_segment, u32 queue) |
|---|
| 2788 | 3136 | { |
|---|
| 2789 | 3137 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
|---|
| .. | .. |
|---|
| 2794 | 3142 | tmp_len = total_len; |
|---|
| 2795 | 3143 | |
|---|
| 2796 | 3144 | while (tmp_len > 0) { |
|---|
| 2797 | | - tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); |
|---|
| 2798 | | - WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); |
|---|
| 2799 | | - desc = tx_q->dma_tx + tx_q->cur_tx; |
|---|
| 3145 | + dma_addr_t curr_addr; |
|---|
| 2800 | 3146 | |
|---|
| 2801 | | - desc->des0 = cpu_to_le32(des + (total_len - tmp_len)); |
|---|
| 3147 | + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, |
|---|
| 3148 | + priv->dma_tx_size); |
|---|
| 3149 | + WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); |
|---|
| 3150 | + |
|---|
| 3151 | + if (tx_q->tbs & STMMAC_TBS_AVAIL) |
|---|
| 3152 | + desc = &tx_q->dma_entx[tx_q->cur_tx].basic; |
|---|
| 3153 | + else |
|---|
| 3154 | + desc = &tx_q->dma_tx[tx_q->cur_tx]; |
|---|
| 3155 | + |
|---|
| 3156 | + curr_addr = des + (total_len - tmp_len); |
|---|
| 3157 | + if (priv->dma_cap.addr64 <= 32) |
|---|
| 3158 | + desc->des0 = cpu_to_le32(curr_addr); |
|---|
| 3159 | + else |
|---|
| 3160 | + stmmac_set_desc_addr(priv, desc, curr_addr); |
|---|
| 3161 | + |
|---|
| 2802 | 3162 | buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? |
|---|
| 2803 | 3163 | TSO_MAX_BUFF_SIZE : tmp_len; |
|---|
| 2804 | 3164 | |
|---|
| .. | .. |
|---|
| 2842 | 3202 | { |
|---|
| 2843 | 3203 | struct dma_desc *desc, *first, *mss_desc = NULL; |
|---|
| 2844 | 3204 | struct stmmac_priv *priv = netdev_priv(dev); |
|---|
| 3205 | + int desc_size, tmp_pay_len = 0, first_tx; |
|---|
| 2845 | 3206 | int nfrags = skb_shinfo(skb)->nr_frags; |
|---|
| 2846 | 3207 | u32 queue = skb_get_queue_mapping(skb); |
|---|
| 2847 | | - unsigned int first_entry, des; |
|---|
| 2848 | | - u8 proto_hdr_len, hdr; |
|---|
| 3208 | + unsigned int first_entry, tx_packets; |
|---|
| 2849 | 3209 | struct stmmac_tx_queue *tx_q; |
|---|
| 2850 | | - int tmp_pay_len = 0; |
|---|
| 3210 | + bool has_vlan, set_ic; |
|---|
| 3211 | + u8 proto_hdr_len, hdr; |
|---|
| 2851 | 3212 | u32 pay_len, mss; |
|---|
| 3213 | + dma_addr_t des; |
|---|
| 2852 | 3214 | int i; |
|---|
| 2853 | 3215 | |
|---|
| 2854 | 3216 | tx_q = &priv->tx_queue[queue]; |
|---|
| 3217 | + first_tx = tx_q->cur_tx; |
|---|
| 2855 | 3218 | |
|---|
| 2856 | 3219 | /* Compute header lengths */ |
|---|
| 2857 | 3220 | if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { |
|---|
| .. | .. |
|---|
| 2882 | 3245 | |
|---|
| 2883 | 3246 | /* set new MSS value if needed */ |
|---|
| 2884 | 3247 | if (mss != tx_q->mss) { |
|---|
| 2885 | | - mss_desc = tx_q->dma_tx + tx_q->cur_tx; |
|---|
| 3248 | + if (tx_q->tbs & STMMAC_TBS_AVAIL) |
|---|
| 3249 | + mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic; |
|---|
| 3250 | + else |
|---|
| 3251 | + mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; |
|---|
| 3252 | + |
|---|
| 2886 | 3253 | stmmac_set_mss(priv, mss_desc, mss); |
|---|
| 2887 | 3254 | tx_q->mss = mss; |
|---|
| 2888 | | - tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); |
|---|
| 3255 | + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, |
|---|
| 3256 | + priv->dma_tx_size); |
|---|
| 2889 | 3257 | WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); |
|---|
| 2890 | 3258 | } |
|---|
| 2891 | 3259 | |
|---|
| .. | .. |
|---|
| 2896 | 3264 | skb->data_len); |
|---|
| 2897 | 3265 | } |
|---|
| 2898 | 3266 | |
|---|
| 3267 | + /* Check if VLAN can be inserted by HW */ |
|---|
| 3268 | + has_vlan = stmmac_vlan_insert(priv, skb, tx_q); |
|---|
| 3269 | + |
|---|
| 2899 | 3270 | first_entry = tx_q->cur_tx; |
|---|
| 2900 | 3271 | WARN_ON(tx_q->tx_skbuff[first_entry]); |
|---|
| 2901 | 3272 | |
|---|
| 2902 | | - desc = tx_q->dma_tx + first_entry; |
|---|
| 3273 | + if (tx_q->tbs & STMMAC_TBS_AVAIL) |
|---|
| 3274 | + desc = &tx_q->dma_entx[first_entry].basic; |
|---|
| 3275 | + else |
|---|
| 3276 | + desc = &tx_q->dma_tx[first_entry]; |
|---|
| 2903 | 3277 | first = desc; |
|---|
| 3278 | + |
|---|
| 3279 | + if (has_vlan) |
|---|
| 3280 | + stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); |
|---|
| 2904 | 3281 | |
|---|
| 2905 | 3282 | /* first descriptor: fill Headers on Buf1 */ |
|---|
| 2906 | 3283 | des = dma_map_single(priv->device, skb->data, skb_headlen(skb), |
|---|
| .. | .. |
|---|
| 2911 | 3288 | tx_q->tx_skbuff_dma[first_entry].buf = des; |
|---|
| 2912 | 3289 | tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); |
|---|
| 2913 | 3290 | |
|---|
| 2914 | | - first->des0 = cpu_to_le32(des); |
|---|
| 3291 | + if (priv->dma_cap.addr64 <= 32) { |
|---|
| 3292 | + first->des0 = cpu_to_le32(des); |
|---|
| 2915 | 3293 | |
|---|
| 2916 | | - /* Fill start of payload in buff2 of first descriptor */ |
|---|
| 2917 | | - if (pay_len) |
|---|
| 2918 | | - first->des1 = cpu_to_le32(des + proto_hdr_len); |
|---|
| 3294 | + /* Fill start of payload in buff2 of first descriptor */ |
|---|
| 3295 | + if (pay_len) |
|---|
| 3296 | + first->des1 = cpu_to_le32(des + proto_hdr_len); |
|---|
| 2919 | 3297 | |
|---|
| 2920 | | - /* If needed take extra descriptors to fill the remaining payload */ |
|---|
| 2921 | | - tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; |
|---|
| 3298 | + /* If needed take extra descriptors to fill the remaining payload */ |
|---|
| 3299 | + tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; |
|---|
| 3300 | + } else { |
|---|
| 3301 | + stmmac_set_desc_addr(priv, first, des); |
|---|
| 3302 | + tmp_pay_len = pay_len; |
|---|
| 3303 | + des += proto_hdr_len; |
|---|
| 3304 | + pay_len = 0; |
|---|
| 3305 | + } |
|---|
| 2922 | 3306 | |
|---|
| 2923 | 3307 | stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); |
|---|
| 2924 | 3308 | |
|---|
| .. | .. |
|---|
| 2945 | 3329 | /* Only the last descriptor gets to point to the skb. */ |
|---|
| 2946 | 3330 | tx_q->tx_skbuff[tx_q->cur_tx] = skb; |
|---|
| 2947 | 3331 | |
|---|
| 3332 | + /* Manage tx mitigation */ |
|---|
| 3333 | + tx_packets = (tx_q->cur_tx + 1) - first_tx; |
|---|
| 3334 | + tx_q->tx_count_frames += tx_packets; |
|---|
| 3335 | + |
|---|
| 3336 | + if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) |
|---|
| 3337 | + set_ic = true; |
|---|
| 3338 | + else if (!priv->tx_coal_frames) |
|---|
| 3339 | + set_ic = false; |
|---|
| 3340 | + else if (tx_packets > priv->tx_coal_frames) |
|---|
| 3341 | + set_ic = true; |
|---|
| 3342 | + else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets) |
|---|
| 3343 | + set_ic = true; |
|---|
| 3344 | + else |
|---|
| 3345 | + set_ic = false; |
|---|
| 3346 | + |
|---|
| 3347 | + if (set_ic) { |
|---|
| 3348 | + if (tx_q->tbs & STMMAC_TBS_AVAIL) |
|---|
| 3349 | + desc = &tx_q->dma_entx[tx_q->cur_tx].basic; |
|---|
| 3350 | + else |
|---|
| 3351 | + desc = &tx_q->dma_tx[tx_q->cur_tx]; |
|---|
| 3352 | + |
|---|
| 3353 | + tx_q->tx_count_frames = 0; |
|---|
| 3354 | + stmmac_set_tx_ic(priv, desc); |
|---|
| 3355 | + priv->xstats.tx_set_ic_bit++; |
|---|
| 3356 | + } |
|---|
| 3357 | + |
|---|
| 2948 | 3358 | /* We've used all descriptors we need for this skb, however, |
|---|
| 2949 | 3359 | * advance cur_tx so that it references a fresh descriptor. |
|---|
| 2950 | 3360 | * ndo_start_xmit will fill this descriptor the next time it's |
|---|
| 2951 | 3361 | * called and stmmac_tx_clean may clean up to this descriptor. |
|---|
| 2952 | 3362 | */ |
|---|
| 2953 | | - tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); |
|---|
| 3363 | + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); |
|---|
| 2954 | 3364 | |
|---|
| 2955 | 3365 | if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { |
|---|
| 2956 | 3366 | netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", |
|---|
| .. | .. |
|---|
| 2962 | 3372 | priv->xstats.tx_tso_frames++; |
|---|
| 2963 | 3373 | priv->xstats.tx_tso_nfrags += nfrags; |
|---|
| 2964 | 3374 | |
|---|
| 2965 | | - /* Manage tx mitigation */ |
|---|
| 2966 | | - tx_q->tx_count_frames += nfrags + 1; |
|---|
| 2967 | | - if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) && |
|---|
| 2968 | | - !(priv->synopsys_id >= DWMAC_CORE_4_00 && |
|---|
| 2969 | | - (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && |
|---|
| 2970 | | - priv->hwts_tx_en)) { |
|---|
| 2971 | | - stmmac_tx_timer_arm(priv, queue); |
|---|
| 2972 | | - } else { |
|---|
| 2973 | | - tx_q->tx_count_frames = 0; |
|---|
| 2974 | | - stmmac_set_tx_ic(priv, desc); |
|---|
| 2975 | | - priv->xstats.tx_set_ic_bit++; |
|---|
| 2976 | | - } |
|---|
| 3375 | + if (priv->sarc_type) |
|---|
| 3376 | + stmmac_set_desc_sarc(priv, first, priv->sarc_type); |
|---|
| 2977 | 3377 | |
|---|
| 2978 | 3378 | skb_tx_timestamp(skb); |
|---|
| 2979 | 3379 | |
|---|
| .. | .. |
|---|
| 3012 | 3412 | pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", |
|---|
| 3013 | 3413 | __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, |
|---|
| 3014 | 3414 | tx_q->cur_tx, first, nfrags); |
|---|
| 3015 | | - |
|---|
| 3016 | | - stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0); |
|---|
| 3017 | | - |
|---|
| 3018 | 3415 | pr_info(">>> frame to be transmitted: "); |
|---|
| 3019 | 3416 | print_pkt(skb->data, skb_headlen(skb)); |
|---|
| 3020 | 3417 | } |
|---|
| 3021 | 3418 | |
|---|
| 3022 | 3419 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); |
|---|
| 3023 | 3420 | |
|---|
| 3024 | | - tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); |
|---|
| 3421 | + if (tx_q->tbs & STMMAC_TBS_AVAIL) |
|---|
| 3422 | + desc_size = sizeof(struct dma_edesc); |
|---|
| 3423 | + else |
|---|
| 3424 | + desc_size = sizeof(struct dma_desc); |
|---|
| 3425 | + |
|---|
| 3426 | + tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); |
|---|
| 3025 | 3427 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); |
|---|
| 3026 | 3428 | stmmac_tx_timer_arm(priv, queue); |
|---|
| 3027 | 3429 | |
|---|
| .. | .. |
|---|
| 3044 | 3446 | */ |
|---|
| 3045 | 3447 | static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) |
|---|
| 3046 | 3448 | { |
|---|
| 3449 | + unsigned int first_entry, tx_packets, enh_desc; |
|---|
| 3047 | 3450 | struct stmmac_priv *priv = netdev_priv(dev); |
|---|
| 3048 | 3451 | unsigned int nopaged_len = skb_headlen(skb); |
|---|
| 3049 | 3452 | int i, csum_insertion = 0, is_jumbo = 0; |
|---|
| 3050 | 3453 | u32 queue = skb_get_queue_mapping(skb); |
|---|
| 3051 | 3454 | int nfrags = skb_shinfo(skb)->nr_frags; |
|---|
| 3052 | 3455 | int gso = skb_shinfo(skb)->gso_type; |
|---|
| 3053 | | - int entry; |
|---|
| 3054 | | - unsigned int first_entry; |
|---|
| 3456 | + struct dma_edesc *tbs_desc = NULL; |
|---|
| 3457 | + int entry, desc_size, first_tx; |
|---|
| 3055 | 3458 | struct dma_desc *desc, *first; |
|---|
| 3056 | 3459 | struct stmmac_tx_queue *tx_q; |
|---|
| 3057 | | - unsigned int enh_desc; |
|---|
| 3058 | | - unsigned int des; |
|---|
| 3460 | + bool has_vlan, set_ic; |
|---|
| 3461 | + dma_addr_t des; |
|---|
| 3059 | 3462 | |
|---|
| 3060 | 3463 | tx_q = &priv->tx_queue[queue]; |
|---|
| 3464 | + first_tx = tx_q->cur_tx; |
|---|
| 3061 | 3465 | |
|---|
| 3062 | 3466 | if (priv->tx_path_in_lpi_mode) |
|---|
| 3063 | 3467 | stmmac_disable_eee_mode(priv); |
|---|
| .. | .. |
|---|
| 3082 | 3486 | return NETDEV_TX_BUSY; |
|---|
| 3083 | 3487 | } |
|---|
| 3084 | 3488 | |
|---|
| 3489 | + /* Check if VLAN can be inserted by HW */ |
|---|
| 3490 | + has_vlan = stmmac_vlan_insert(priv, skb, tx_q); |
|---|
| 3491 | + |
|---|
| 3085 | 3492 | entry = tx_q->cur_tx; |
|---|
| 3086 | 3493 | first_entry = entry; |
|---|
| 3087 | 3494 | WARN_ON(tx_q->tx_skbuff[first_entry]); |
|---|
| .. | .. |
|---|
| 3090 | 3497 | |
|---|
| 3091 | 3498 | if (likely(priv->extend_desc)) |
|---|
| 3092 | 3499 | desc = (struct dma_desc *)(tx_q->dma_etx + entry); |
|---|
| 3500 | + else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
|---|
| 3501 | + desc = &tx_q->dma_entx[entry].basic; |
|---|
| 3093 | 3502 | else |
|---|
| 3094 | 3503 | desc = tx_q->dma_tx + entry; |
|---|
| 3095 | 3504 | |
|---|
| 3096 | 3505 | first = desc; |
|---|
| 3506 | + |
|---|
| 3507 | + if (has_vlan) |
|---|
| 3508 | + stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); |
|---|
| 3097 | 3509 | |
|---|
| 3098 | 3510 | enh_desc = priv->plat->enh_desc; |
|---|
| 3099 | 3511 | /* To program the descriptors according to the size of the frame */ |
|---|
| .. | .. |
|---|
| 3111 | 3523 | int len = skb_frag_size(frag); |
|---|
| 3112 | 3524 | bool last_segment = (i == (nfrags - 1)); |
|---|
| 3113 | 3525 | |
|---|
| 3114 | | - entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); |
|---|
| 3526 | + entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); |
|---|
| 3115 | 3527 | WARN_ON(tx_q->tx_skbuff[entry]); |
|---|
| 3116 | 3528 | |
|---|
| 3117 | 3529 | if (likely(priv->extend_desc)) |
|---|
| 3118 | 3530 | desc = (struct dma_desc *)(tx_q->dma_etx + entry); |
|---|
| 3531 | + else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
|---|
| 3532 | + desc = &tx_q->dma_entx[entry].basic; |
|---|
| 3119 | 3533 | else |
|---|
| 3120 | 3534 | desc = tx_q->dma_tx + entry; |
|---|
| 3121 | 3535 | |
|---|
| .. | .. |
|---|
| 3140 | 3554 | /* Only the last descriptor gets to point to the skb. */ |
|---|
| 3141 | 3555 | tx_q->tx_skbuff[entry] = skb; |
|---|
| 3142 | 3556 | |
|---|
| 3557 | + /* According to the coalesce parameter the IC bit for the latest |
|---|
| 3558 | + * segment is reset and the timer re-started to clean the tx status. |
|---|
| 3559 | + * This approach takes care about the fragments: desc is the first |
|---|
| 3560 | + * element in case of no SG. |
|---|
| 3561 | + */ |
|---|
| 3562 | + tx_packets = (entry + 1) - first_tx; |
|---|
| 3563 | + tx_q->tx_count_frames += tx_packets; |
|---|
| 3564 | + |
|---|
| 3565 | + if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) |
|---|
| 3566 | + set_ic = true; |
|---|
| 3567 | + else if (!priv->tx_coal_frames) |
|---|
| 3568 | + set_ic = false; |
|---|
| 3569 | + else if (tx_packets > priv->tx_coal_frames) |
|---|
| 3570 | + set_ic = true; |
|---|
| 3571 | + else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets) |
|---|
| 3572 | + set_ic = true; |
|---|
| 3573 | + else |
|---|
| 3574 | + set_ic = false; |
|---|
| 3575 | + |
|---|
| 3576 | + if (set_ic) { |
|---|
| 3577 | + if (likely(priv->extend_desc)) |
|---|
| 3578 | + desc = &tx_q->dma_etx[entry].basic; |
|---|
| 3579 | + else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
|---|
| 3580 | + desc = &tx_q->dma_entx[entry].basic; |
|---|
| 3581 | + else |
|---|
| 3582 | + desc = &tx_q->dma_tx[entry]; |
|---|
| 3583 | + |
|---|
| 3584 | + tx_q->tx_count_frames = 0; |
|---|
| 3585 | + stmmac_set_tx_ic(priv, desc); |
|---|
| 3586 | + priv->xstats.tx_set_ic_bit++; |
|---|
| 3587 | + } |
|---|
| 3588 | + |
|---|
| 3143 | 3589 | /* We've used all descriptors we need for this skb, however, |
|---|
| 3144 | 3590 | * advance cur_tx so that it references a fresh descriptor. |
|---|
| 3145 | 3591 | * ndo_start_xmit will fill this descriptor the next time it's |
|---|
| 3146 | 3592 | * called and stmmac_tx_clean may clean up to this descriptor. |
|---|
| 3147 | 3593 | */ |
|---|
| 3148 | | - entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); |
|---|
| 3594 | + entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); |
|---|
| 3149 | 3595 | tx_q->cur_tx = entry; |
|---|
| 3150 | 3596 | |
|---|
| 3151 | 3597 | if (netif_msg_pktdata(priv)) { |
|---|
| 3152 | | - void *tx_head; |
|---|
| 3153 | | - |
|---|
| 3154 | 3598 | netdev_dbg(priv->dev, |
|---|
| 3155 | 3599 | "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", |
|---|
| 3156 | 3600 | __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, |
|---|
| 3157 | 3601 | entry, first, nfrags); |
|---|
| 3158 | | - |
|---|
| 3159 | | - if (priv->extend_desc) |
|---|
| 3160 | | - tx_head = (void *)tx_q->dma_etx; |
|---|
| 3161 | | - else |
|---|
| 3162 | | - tx_head = (void *)tx_q->dma_tx; |
|---|
| 3163 | | - |
|---|
| 3164 | | - stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false); |
|---|
| 3165 | 3602 | |
|---|
| 3166 | 3603 | netdev_dbg(priv->dev, ">>> frame to be transmitted: "); |
|---|
| 3167 | 3604 | print_pkt(skb->data, skb->len); |
|---|
| .. | .. |
|---|
| 3175 | 3612 | |
|---|
| 3176 | 3613 | dev->stats.tx_bytes += skb->len; |
|---|
| 3177 | 3614 | |
|---|
| 3178 | | - /* According to the coalesce parameter the IC bit for the latest |
|---|
| 3179 | | - * segment is reset and the timer re-started to clean the tx status. |
|---|
| 3180 | | - * This approach takes care about the fragments: desc is the first |
|---|
| 3181 | | - * element in case of no SG. |
|---|
| 3182 | | - */ |
|---|
| 3183 | | - tx_q->tx_count_frames += nfrags + 1; |
|---|
| 3184 | | - if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) && |
|---|
| 3185 | | - !(priv->synopsys_id >= DWMAC_CORE_4_00 && |
|---|
| 3186 | | - (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && |
|---|
| 3187 | | - priv->hwts_tx_en)) { |
|---|
| 3188 | | - stmmac_tx_timer_arm(priv, queue); |
|---|
| 3189 | | - } else { |
|---|
| 3190 | | - tx_q->tx_count_frames = 0; |
|---|
| 3191 | | - stmmac_set_tx_ic(priv, desc); |
|---|
| 3192 | | - priv->xstats.tx_set_ic_bit++; |
|---|
| 3193 | | - } |
|---|
| 3615 | + if (priv->sarc_type) |
|---|
| 3616 | + stmmac_set_desc_sarc(priv, first, priv->sarc_type); |
|---|
| 3194 | 3617 | |
|---|
| 3195 | 3618 | skb_tx_timestamp(skb); |
|---|
| 3196 | 3619 | |
|---|
| .. | .. |
|---|
| 3222 | 3645 | |
|---|
| 3223 | 3646 | /* Prepare the first descriptor setting the OWN bit too */ |
|---|
| 3224 | 3647 | stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, |
|---|
| 3225 | | - csum_insertion, priv->mode, 1, last_segment, |
|---|
| 3648 | + csum_insertion, priv->mode, 0, last_segment, |
|---|
| 3226 | 3649 | skb->len); |
|---|
| 3227 | | - } else { |
|---|
| 3228 | | - stmmac_set_tx_owner(priv, first); |
|---|
| 3229 | 3650 | } |
|---|
| 3651 | + |
|---|
| 3652 | + if (tx_q->tbs & STMMAC_TBS_EN) { |
|---|
| 3653 | + struct timespec64 ts = ns_to_timespec64(skb->tstamp); |
|---|
| 3654 | + |
|---|
| 3655 | + tbs_desc = &tx_q->dma_entx[first_entry]; |
|---|
| 3656 | + stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec); |
|---|
| 3657 | + } |
|---|
| 3658 | + |
|---|
| 3659 | + stmmac_set_tx_owner(priv, first); |
|---|
| 3230 | 3660 | |
|---|
| 3231 | 3661 | /* The own bit must be the latest setting done when prepare the |
|---|
| 3232 | 3662 | * descriptor and then barrier is needed to make sure that |
|---|
| .. | .. |
|---|
| 3238 | 3668 | |
|---|
| 3239 | 3669 | stmmac_enable_dma_transmission(priv, priv->ioaddr); |
|---|
| 3240 | 3670 | |
|---|
| 3241 | | - tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); |
|---|
| 3671 | + if (likely(priv->extend_desc)) |
|---|
| 3672 | + desc_size = sizeof(struct dma_extended_desc); |
|---|
| 3673 | + else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
|---|
| 3674 | + desc_size = sizeof(struct dma_edesc); |
|---|
| 3675 | + else |
|---|
| 3676 | + desc_size = sizeof(struct dma_desc); |
|---|
| 3677 | + |
|---|
| 3678 | + tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); |
|---|
| 3242 | 3679 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); |
|---|
| 3243 | 3680 | stmmac_tx_timer_arm(priv, queue); |
|---|
| 3244 | 3681 | |
|---|
| .. | .. |
|---|
| 3272 | 3709 | } |
|---|
| 3273 | 3710 | } |
|---|
| 3274 | 3711 | |
|---|
| 3275 | | - |
|---|
| 3276 | | -static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q) |
|---|
| 3277 | | -{ |
|---|
| 3278 | | - if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH) |
|---|
| 3279 | | - return 0; |
|---|
| 3280 | | - |
|---|
| 3281 | | - return 1; |
|---|
| 3282 | | -} |
|---|
| 3283 | | - |
|---|
| 3284 | 3712 | /** |
|---|
| 3285 | 3713 | * stmmac_rx_refill - refill used skb preallocated buffers |
|---|
| 3286 | 3714 | * @priv: driver private structure |
|---|
| .. | .. |
|---|
| 3291 | 3719 | static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) |
|---|
| 3292 | 3720 | { |
|---|
| 3293 | 3721 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
|---|
| 3294 | | - int dirty = stmmac_rx_dirty(priv, queue); |
|---|
| 3722 | + int len, dirty = stmmac_rx_dirty(priv, queue); |
|---|
| 3295 | 3723 | unsigned int entry = rx_q->dirty_rx; |
|---|
| 3724 | + gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); |
|---|
| 3296 | 3725 | |
|---|
| 3297 | | - int bfsize = priv->dma_buf_sz; |
|---|
| 3726 | + if (priv->dma_cap.addr64 <= 32) |
|---|
| 3727 | + gfp |= GFP_DMA32; |
|---|
| 3728 | + |
|---|
| 3729 | + len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; |
|---|
| 3298 | 3730 | |
|---|
| 3299 | 3731 | while (dirty-- > 0) { |
|---|
| 3732 | + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; |
|---|
| 3300 | 3733 | struct dma_desc *p; |
|---|
| 3734 | + bool use_rx_wd; |
|---|
| 3301 | 3735 | |
|---|
| 3302 | 3736 | if (priv->extend_desc) |
|---|
| 3303 | 3737 | p = (struct dma_desc *)(rx_q->dma_erx + entry); |
|---|
| 3304 | 3738 | else |
|---|
| 3305 | 3739 | p = rx_q->dma_rx + entry; |
|---|
| 3306 | 3740 | |
|---|
| 3307 | | - if (likely(!rx_q->rx_skbuff[entry])) { |
|---|
| 3308 | | - struct sk_buff *skb; |
|---|
| 3309 | | - |
|---|
| 3310 | | - skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); |
|---|
| 3311 | | - if (unlikely(!skb)) { |
|---|
| 3312 | | - /* so for a while no zero-copy! */ |
|---|
| 3313 | | - rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH; |
|---|
| 3314 | | - if (unlikely(net_ratelimit())) |
|---|
| 3315 | | - dev_err(priv->device, |
|---|
| 3316 | | - "fail to alloc skb entry %d\n", |
|---|
| 3317 | | - entry); |
|---|
| 3741 | + if (!buf->page) { |
|---|
| 3742 | + buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); |
|---|
| 3743 | + if (!buf->page) |
|---|
| 3318 | 3744 | break; |
|---|
| 3319 | | - } |
|---|
| 3320 | | - |
|---|
| 3321 | | - rx_q->rx_skbuff[entry] = skb; |
|---|
| 3322 | | - rx_q->rx_skbuff_dma[entry] = |
|---|
| 3323 | | - dma_map_single(priv->device, skb->data, bfsize, |
|---|
| 3324 | | - DMA_FROM_DEVICE); |
|---|
| 3325 | | - if (dma_mapping_error(priv->device, |
|---|
| 3326 | | - rx_q->rx_skbuff_dma[entry])) { |
|---|
| 3327 | | - netdev_err(priv->dev, "Rx DMA map failed\n"); |
|---|
| 3328 | | - dev_kfree_skb(skb); |
|---|
| 3329 | | - break; |
|---|
| 3330 | | - } |
|---|
| 3331 | | - |
|---|
| 3332 | | - stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]); |
|---|
| 3333 | | - stmmac_refill_desc3(priv, rx_q, p); |
|---|
| 3334 | | - |
|---|
| 3335 | | - if (rx_q->rx_zeroc_thresh > 0) |
|---|
| 3336 | | - rx_q->rx_zeroc_thresh--; |
|---|
| 3337 | | - |
|---|
| 3338 | | - netif_dbg(priv, rx_status, priv->dev, |
|---|
| 3339 | | - "refill entry #%d\n", entry); |
|---|
| 3340 | 3745 | } |
|---|
| 3341 | | - dma_wmb(); |
|---|
| 3342 | 3746 | |
|---|
| 3343 | | - stmmac_set_rx_owner(priv, p, priv->use_riwt); |
|---|
| 3747 | + if (priv->sph && !buf->sec_page) { |
|---|
| 3748 | + buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); |
|---|
| 3749 | + if (!buf->sec_page) |
|---|
| 3750 | + break; |
|---|
| 3751 | + |
|---|
| 3752 | + buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); |
|---|
| 3753 | + } |
|---|
| 3754 | + |
|---|
| 3755 | + buf->addr = page_pool_get_dma_addr(buf->page); |
|---|
| 3756 | + stmmac_set_desc_addr(priv, p, buf->addr); |
|---|
| 3757 | + if (priv->sph) |
|---|
| 3758 | + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); |
|---|
| 3759 | + else |
|---|
| 3760 | + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); |
|---|
| 3761 | + stmmac_refill_desc3(priv, rx_q, p); |
|---|
| 3762 | + |
|---|
| 3763 | + rx_q->rx_count_frames++; |
|---|
| 3764 | + rx_q->rx_count_frames += priv->rx_coal_frames; |
|---|
| 3765 | + if (rx_q->rx_count_frames > priv->rx_coal_frames) |
|---|
| 3766 | + rx_q->rx_count_frames = 0; |
|---|
| 3767 | + |
|---|
| 3768 | + use_rx_wd = !priv->rx_coal_frames; |
|---|
| 3769 | + use_rx_wd |= rx_q->rx_count_frames > 0; |
|---|
| 3770 | + if (!priv->use_riwt) |
|---|
| 3771 | + use_rx_wd = false; |
|---|
| 3344 | 3772 | |
|---|
| 3345 | 3773 | dma_wmb(); |
|---|
| 3774 | + stmmac_set_rx_owner(priv, p, use_rx_wd); |
|---|
| 3346 | 3775 | |
|---|
| 3347 | | - entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); |
|---|
| 3776 | + entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size); |
|---|
| 3348 | 3777 | } |
|---|
| 3349 | 3778 | rx_q->dirty_rx = entry; |
|---|
| 3779 | + rx_q->rx_tail_addr = rx_q->dma_rx_phy + |
|---|
| 3780 | + (rx_q->dirty_rx * sizeof(struct dma_desc)); |
|---|
| 3350 | 3781 | stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); |
|---|
| 3782 | +} |
|---|
| 3783 | + |
|---|
| 3784 | +static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv, |
|---|
| 3785 | + struct dma_desc *p, |
|---|
| 3786 | + int status, unsigned int len) |
|---|
| 3787 | +{ |
|---|
| 3788 | + unsigned int plen = 0, hlen = 0; |
|---|
| 3789 | + int coe = priv->hw->rx_csum; |
|---|
| 3790 | + |
|---|
| 3791 | + /* Not first descriptor, buffer is always zero */ |
|---|
| 3792 | + if (priv->sph && len) |
|---|
| 3793 | + return 0; |
|---|
| 3794 | + |
|---|
| 3795 | + /* First descriptor, get split header length */ |
|---|
| 3796 | + stmmac_get_rx_header_len(priv, p, &hlen); |
|---|
| 3797 | + if (priv->sph && hlen) { |
|---|
| 3798 | + priv->xstats.rx_split_hdr_pkt_n++; |
|---|
| 3799 | + return hlen; |
|---|
| 3800 | + } |
|---|
| 3801 | + |
|---|
| 3802 | + /* First descriptor, not last descriptor and not split header */ |
|---|
| 3803 | + if (status & rx_not_ls) |
|---|
| 3804 | + return priv->dma_buf_sz; |
|---|
| 3805 | + |
|---|
| 3806 | + plen = stmmac_get_rx_frame_len(priv, p, coe); |
|---|
| 3807 | + |
|---|
| 3808 | + /* First descriptor and last descriptor and not split header */ |
|---|
| 3809 | + return min_t(unsigned int, priv->dma_buf_sz, plen); |
|---|
| 3810 | +} |
|---|
| 3811 | + |
|---|
| 3812 | +static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, |
|---|
| 3813 | + struct dma_desc *p, |
|---|
| 3814 | + int status, unsigned int len) |
|---|
| 3815 | +{ |
|---|
| 3816 | + int coe = priv->hw->rx_csum; |
|---|
| 3817 | + unsigned int plen = 0; |
|---|
| 3818 | + |
|---|
| 3819 | + /* Not split header, buffer is not available */ |
|---|
| 3820 | + if (!priv->sph) |
|---|
| 3821 | + return 0; |
|---|
| 3822 | + |
|---|
| 3823 | + /* Not last descriptor */ |
|---|
| 3824 | + if (status & rx_not_ls) |
|---|
| 3825 | + return priv->dma_buf_sz; |
|---|
| 3826 | + |
|---|
| 3827 | + plen = stmmac_get_rx_frame_len(priv, p, coe); |
|---|
| 3828 | + |
|---|
| 3829 | + /* Last descriptor */ |
|---|
| 3830 | + return plen - len; |
|---|
| 3351 | 3831 | } |
|---|
| 3352 | 3832 | |
|---|
| 3353 | 3833 | /** |
|---|
| .. | .. |
|---|
| 3362 | 3842 | { |
|---|
| 3363 | 3843 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
|---|
| 3364 | 3844 | struct stmmac_channel *ch = &priv->channel[queue]; |
|---|
| 3845 | + unsigned int count = 0, error = 0, len = 0; |
|---|
| 3846 | + int status = 0, coe = priv->hw->rx_csum; |
|---|
| 3365 | 3847 | unsigned int next_entry = rx_q->cur_rx; |
|---|
| 3366 | | - int coe = priv->hw->rx_csum; |
|---|
| 3367 | | - unsigned int count = 0; |
|---|
| 3368 | | - bool xmac; |
|---|
| 3369 | | - |
|---|
| 3370 | | - xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; |
|---|
| 3848 | + unsigned int desc_size; |
|---|
| 3849 | + struct sk_buff *skb = NULL; |
|---|
| 3371 | 3850 | |
|---|
| 3372 | 3851 | if (netif_msg_rx_status(priv)) { |
|---|
| 3373 | 3852 | void *rx_head; |
|---|
| 3374 | 3853 | |
|---|
| 3375 | 3854 | netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); |
|---|
| 3376 | | - if (priv->extend_desc) |
|---|
| 3855 | + if (priv->extend_desc) { |
|---|
| 3377 | 3856 | rx_head = (void *)rx_q->dma_erx; |
|---|
| 3378 | | - else |
|---|
| 3857 | + desc_size = sizeof(struct dma_extended_desc); |
|---|
| 3858 | + } else { |
|---|
| 3379 | 3859 | rx_head = (void *)rx_q->dma_rx; |
|---|
| 3860 | + desc_size = sizeof(struct dma_desc); |
|---|
| 3861 | + } |
|---|
| 3380 | 3862 | |
|---|
| 3381 | | - stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); |
|---|
| 3863 | + stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true, |
|---|
| 3864 | + rx_q->dma_rx_phy, desc_size); |
|---|
| 3382 | 3865 | } |
|---|
| 3383 | 3866 | while (count < limit) { |
|---|
| 3384 | | - int entry, status; |
|---|
| 3385 | | - struct dma_desc *p; |
|---|
| 3386 | | - struct dma_desc *np; |
|---|
| 3867 | + unsigned int buf1_len = 0, buf2_len = 0; |
|---|
| 3868 | + enum pkt_hash_types hash_type; |
|---|
| 3869 | + struct stmmac_rx_buffer *buf; |
|---|
| 3870 | + struct dma_desc *np, *p; |
|---|
| 3871 | + int entry; |
|---|
| 3872 | + u32 hash; |
|---|
| 3387 | 3873 | |
|---|
| 3874 | + if (!count && rx_q->state_saved) { |
|---|
| 3875 | + skb = rx_q->state.skb; |
|---|
| 3876 | + error = rx_q->state.error; |
|---|
| 3877 | + len = rx_q->state.len; |
|---|
| 3878 | + } else { |
|---|
| 3879 | + rx_q->state_saved = false; |
|---|
| 3880 | + skb = NULL; |
|---|
| 3881 | + error = 0; |
|---|
| 3882 | + len = 0; |
|---|
| 3883 | + } |
|---|
| 3884 | + |
|---|
| 3885 | + if ((count >= limit - 1) && limit > 1) |
|---|
| 3886 | + break; |
|---|
| 3887 | + |
|---|
| 3888 | +read_again: |
|---|
| 3889 | + buf1_len = 0; |
|---|
| 3890 | + buf2_len = 0; |
|---|
| 3388 | 3891 | entry = next_entry; |
|---|
| 3892 | + buf = &rx_q->buf_pool[entry]; |
|---|
| 3389 | 3893 | |
|---|
| 3390 | 3894 | if (priv->extend_desc) |
|---|
| 3391 | 3895 | p = (struct dma_desc *)(rx_q->dma_erx + entry); |
|---|
| .. | .. |
|---|
| 3399 | 3903 | if (unlikely(status & dma_own)) |
|---|
| 3400 | 3904 | break; |
|---|
| 3401 | 3905 | |
|---|
| 3402 | | - count++; |
|---|
| 3403 | | - |
|---|
| 3404 | | - rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE); |
|---|
| 3906 | + rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, |
|---|
| 3907 | + priv->dma_rx_size); |
|---|
| 3405 | 3908 | next_entry = rx_q->cur_rx; |
|---|
| 3406 | 3909 | |
|---|
| 3407 | 3910 | if (priv->extend_desc) |
|---|
| .. | .. |
|---|
| 3415 | 3918 | stmmac_rx_extended_status(priv, &priv->dev->stats, |
|---|
| 3416 | 3919 | &priv->xstats, rx_q->dma_erx + entry); |
|---|
| 3417 | 3920 | if (unlikely(status == discard_frame)) { |
|---|
| 3418 | | - priv->dev->stats.rx_errors++; |
|---|
| 3419 | | - if (priv->hwts_rx_en && !priv->extend_desc) { |
|---|
| 3420 | | - /* DESC2 & DESC3 will be overwritten by device |
|---|
| 3421 | | - * with timestamp value, hence reinitialize |
|---|
| 3422 | | - * them in stmmac_rx_refill() function so that |
|---|
| 3423 | | - * device can reuse it. |
|---|
| 3424 | | - */ |
|---|
| 3425 | | - dev_kfree_skb_any(rx_q->rx_skbuff[entry]); |
|---|
| 3426 | | - rx_q->rx_skbuff[entry] = NULL; |
|---|
| 3427 | | - dma_unmap_single(priv->device, |
|---|
| 3428 | | - rx_q->rx_skbuff_dma[entry], |
|---|
| 3429 | | - priv->dma_buf_sz, |
|---|
| 3430 | | - DMA_FROM_DEVICE); |
|---|
| 3431 | | - } |
|---|
| 3432 | | - } else { |
|---|
| 3433 | | - struct sk_buff *skb; |
|---|
| 3434 | | - int frame_len; |
|---|
| 3435 | | - unsigned int des; |
|---|
| 3436 | | - |
|---|
| 3437 | | - stmmac_get_desc_addr(priv, p, &des); |
|---|
| 3438 | | - frame_len = stmmac_get_rx_frame_len(priv, p, coe); |
|---|
| 3439 | | - |
|---|
| 3440 | | - /* If frame length is greater than skb buffer size |
|---|
| 3441 | | - * (preallocated during init) then the packet is |
|---|
| 3442 | | - * ignored |
|---|
| 3443 | | - */ |
|---|
| 3444 | | - if (frame_len > priv->dma_buf_sz) { |
|---|
| 3445 | | - if (net_ratelimit()) |
|---|
| 3446 | | - netdev_err(priv->dev, |
|---|
| 3447 | | - "len %d larger than size (%d)\n", |
|---|
| 3448 | | - frame_len, priv->dma_buf_sz); |
|---|
| 3449 | | - priv->dev->stats.rx_length_errors++; |
|---|
| 3450 | | - continue; |
|---|
| 3451 | | - } |
|---|
| 3452 | | - |
|---|
| 3453 | | - /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 |
|---|
| 3454 | | - * Type frames (LLC/LLC-SNAP) |
|---|
| 3455 | | - * |
|---|
| 3456 | | - * llc_snap is never checked in GMAC >= 4, so this ACS |
|---|
| 3457 | | - * feature is always disabled and packets need to be |
|---|
| 3458 | | - * stripped manually. |
|---|
| 3459 | | - */ |
|---|
| 3460 | | - if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) || |
|---|
| 3461 | | - unlikely(status != llc_snap)) |
|---|
| 3462 | | - frame_len -= ETH_FCS_LEN; |
|---|
| 3463 | | - |
|---|
| 3464 | | - if (netif_msg_rx_status(priv)) { |
|---|
| 3465 | | - netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n", |
|---|
| 3466 | | - p, entry, des); |
|---|
| 3467 | | - netdev_dbg(priv->dev, "frame size %d, COE: %d\n", |
|---|
| 3468 | | - frame_len, status); |
|---|
| 3469 | | - } |
|---|
| 3470 | | - |
|---|
| 3471 | | - /* The zero-copy is always used for all the sizes |
|---|
| 3472 | | - * in case of GMAC4 because it needs |
|---|
| 3473 | | - * to refill the used descriptors, always. |
|---|
| 3474 | | - */ |
|---|
| 3475 | | - if (unlikely(!xmac && |
|---|
| 3476 | | - ((frame_len < priv->rx_copybreak) || |
|---|
| 3477 | | - stmmac_rx_threshold_count(rx_q)))) { |
|---|
| 3478 | | - skb = netdev_alloc_skb_ip_align(priv->dev, |
|---|
| 3479 | | - frame_len); |
|---|
| 3480 | | - if (unlikely(!skb)) { |
|---|
| 3481 | | - if (net_ratelimit()) |
|---|
| 3482 | | - dev_warn(priv->device, |
|---|
| 3483 | | - "packet dropped\n"); |
|---|
| 3484 | | - priv->dev->stats.rx_dropped++; |
|---|
| 3485 | | - continue; |
|---|
| 3486 | | - } |
|---|
| 3487 | | - |
|---|
| 3488 | | - dma_sync_single_for_cpu(priv->device, |
|---|
| 3489 | | - rx_q->rx_skbuff_dma |
|---|
| 3490 | | - [entry], frame_len, |
|---|
| 3491 | | - DMA_FROM_DEVICE); |
|---|
| 3492 | | - skb_copy_to_linear_data(skb, |
|---|
| 3493 | | - rx_q-> |
|---|
| 3494 | | - rx_skbuff[entry]->data, |
|---|
| 3495 | | - frame_len); |
|---|
| 3496 | | - |
|---|
| 3497 | | - skb_put(skb, frame_len); |
|---|
| 3498 | | - dma_sync_single_for_device(priv->device, |
|---|
| 3499 | | - rx_q->rx_skbuff_dma |
|---|
| 3500 | | - [entry], frame_len, |
|---|
| 3501 | | - DMA_FROM_DEVICE); |
|---|
| 3502 | | - } else { |
|---|
| 3503 | | - skb = rx_q->rx_skbuff[entry]; |
|---|
| 3504 | | - if (unlikely(!skb)) { |
|---|
| 3505 | | - if (net_ratelimit()) |
|---|
| 3506 | | - netdev_err(priv->dev, |
|---|
| 3507 | | - "%s: Inconsistent Rx chain\n", |
|---|
| 3508 | | - priv->dev->name); |
|---|
| 3509 | | - priv->dev->stats.rx_dropped++; |
|---|
| 3510 | | - continue; |
|---|
| 3511 | | - } |
|---|
| 3512 | | - prefetch(skb->data - NET_IP_ALIGN); |
|---|
| 3513 | | - rx_q->rx_skbuff[entry] = NULL; |
|---|
| 3514 | | - rx_q->rx_zeroc_thresh++; |
|---|
| 3515 | | - |
|---|
| 3516 | | - skb_put(skb, frame_len); |
|---|
| 3517 | | - dma_unmap_single(priv->device, |
|---|
| 3518 | | - rx_q->rx_skbuff_dma[entry], |
|---|
| 3519 | | - priv->dma_buf_sz, |
|---|
| 3520 | | - DMA_FROM_DEVICE); |
|---|
| 3521 | | - } |
|---|
| 3522 | | - |
|---|
| 3523 | | - if (netif_msg_pktdata(priv)) { |
|---|
| 3524 | | - netdev_dbg(priv->dev, "frame received (%dbytes)", |
|---|
| 3525 | | - frame_len); |
|---|
| 3526 | | - print_pkt(skb->data, frame_len); |
|---|
| 3527 | | - } |
|---|
| 3528 | | - |
|---|
| 3529 | | - stmmac_get_rx_hwtstamp(priv, p, np, skb); |
|---|
| 3530 | | - |
|---|
| 3531 | | - stmmac_rx_vlan(priv->dev, skb); |
|---|
| 3532 | | - |
|---|
| 3533 | | - skb->protocol = eth_type_trans(skb, priv->dev); |
|---|
| 3534 | | - |
|---|
| 3535 | | - if (unlikely(!coe)) |
|---|
| 3536 | | - skb_checksum_none_assert(skb); |
|---|
| 3537 | | - else |
|---|
| 3538 | | - skb->ip_summed = CHECKSUM_UNNECESSARY; |
|---|
| 3539 | | - |
|---|
| 3540 | | - napi_gro_receive(&ch->napi, skb); |
|---|
| 3541 | | - |
|---|
| 3542 | | - priv->dev->stats.rx_packets++; |
|---|
| 3543 | | - priv->dev->stats.rx_bytes += frame_len; |
|---|
| 3921 | + page_pool_recycle_direct(rx_q->page_pool, buf->page); |
|---|
| 3922 | + buf->page = NULL; |
|---|
| 3923 | + error = 1; |
|---|
| 3924 | + if (!priv->hwts_rx_en) |
|---|
| 3925 | + priv->dev->stats.rx_errors++; |
|---|
| 3544 | 3926 | } |
|---|
| 3927 | + |
|---|
| 3928 | + if (unlikely(error && (status & rx_not_ls))) |
|---|
| 3929 | + goto read_again; |
|---|
| 3930 | + if (unlikely(error)) { |
|---|
| 3931 | + dev_kfree_skb(skb); |
|---|
| 3932 | + skb = NULL; |
|---|
| 3933 | + count++; |
|---|
| 3934 | + continue; |
|---|
| 3935 | + } |
|---|
| 3936 | + |
|---|
| 3937 | + /* Buffer is good. Go on. */ |
|---|
| 3938 | + |
|---|
| 3939 | + prefetch(page_address(buf->page)); |
|---|
| 3940 | + if (buf->sec_page) |
|---|
| 3941 | + prefetch(page_address(buf->sec_page)); |
|---|
| 3942 | + |
|---|
| 3943 | + buf1_len = stmmac_rx_buf1_len(priv, p, status, len); |
|---|
| 3944 | + len += buf1_len; |
|---|
| 3945 | + buf2_len = stmmac_rx_buf2_len(priv, p, status, len); |
|---|
| 3946 | + len += buf2_len; |
|---|
| 3947 | + |
|---|
| 3948 | + /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 |
|---|
| 3949 | + * Type frames (LLC/LLC-SNAP) |
|---|
| 3950 | + * |
|---|
| 3951 | + * llc_snap is never checked in GMAC >= 4, so this ACS |
|---|
| 3952 | + * feature is always disabled and packets need to be |
|---|
| 3953 | + * stripped manually. |
|---|
| 3954 | + */ |
|---|
| 3955 | + if (likely(!(status & rx_not_ls)) && |
|---|
| 3956 | + (likely(priv->synopsys_id >= DWMAC_CORE_4_00) || |
|---|
| 3957 | + unlikely(status != llc_snap))) { |
|---|
| 3958 | + if (buf2_len) |
|---|
| 3959 | + buf2_len -= ETH_FCS_LEN; |
|---|
| 3960 | + else |
|---|
| 3961 | + buf1_len -= ETH_FCS_LEN; |
|---|
| 3962 | + |
|---|
| 3963 | + len -= ETH_FCS_LEN; |
|---|
| 3964 | + } |
|---|
| 3965 | + |
|---|
| 3966 | + if (!skb) { |
|---|
| 3967 | + skb = napi_alloc_skb(&ch->rx_napi, buf1_len); |
|---|
| 3968 | + if (!skb) { |
|---|
| 3969 | + priv->dev->stats.rx_dropped++; |
|---|
| 3970 | + count++; |
|---|
| 3971 | + goto drain_data; |
|---|
| 3972 | + } |
|---|
| 3973 | + |
|---|
| 3974 | + dma_sync_single_for_cpu(priv->device, buf->addr, |
|---|
| 3975 | + buf1_len, DMA_FROM_DEVICE); |
|---|
| 3976 | + skb_copy_to_linear_data(skb, page_address(buf->page), |
|---|
| 3977 | + buf1_len); |
|---|
| 3978 | + skb_put(skb, buf1_len); |
|---|
| 3979 | + |
|---|
| 3980 | + /* Data payload copied into SKB, page ready for recycle */ |
|---|
| 3981 | + page_pool_recycle_direct(rx_q->page_pool, buf->page); |
|---|
| 3982 | + buf->page = NULL; |
|---|
| 3983 | + } else if (buf1_len) { |
|---|
| 3984 | + dma_sync_single_for_cpu(priv->device, buf->addr, |
|---|
| 3985 | + buf1_len, DMA_FROM_DEVICE); |
|---|
| 3986 | + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, |
|---|
| 3987 | + buf->page, 0, buf1_len, |
|---|
| 3988 | + priv->dma_buf_sz); |
|---|
| 3989 | + |
|---|
| 3990 | + /* Data payload appended into SKB */ |
|---|
| 3991 | + page_pool_release_page(rx_q->page_pool, buf->page); |
|---|
| 3992 | + buf->page = NULL; |
|---|
| 3993 | + } |
|---|
| 3994 | + |
|---|
| 3995 | + if (buf2_len) { |
|---|
| 3996 | + dma_sync_single_for_cpu(priv->device, buf->sec_addr, |
|---|
| 3997 | + buf2_len, DMA_FROM_DEVICE); |
|---|
| 3998 | + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, |
|---|
| 3999 | + buf->sec_page, 0, buf2_len, |
|---|
| 4000 | + priv->dma_buf_sz); |
|---|
| 4001 | + |
|---|
| 4002 | + /* Data payload appended into SKB */ |
|---|
| 4003 | + page_pool_release_page(rx_q->page_pool, buf->sec_page); |
|---|
| 4004 | + buf->sec_page = NULL; |
|---|
| 4005 | + } |
|---|
| 4006 | + |
|---|
| 4007 | +drain_data: |
|---|
| 4008 | + if (likely(status & rx_not_ls)) |
|---|
| 4009 | + goto read_again; |
|---|
| 4010 | + if (!skb) |
|---|
| 4011 | + continue; |
|---|
| 4012 | + |
|---|
| 4013 | + /* Got entire packet into SKB. Finish it. */ |
|---|
| 4014 | + |
|---|
| 4015 | + stmmac_get_rx_hwtstamp(priv, p, np, skb); |
|---|
| 4016 | + stmmac_rx_vlan(priv->dev, skb); |
|---|
| 4017 | + skb->protocol = eth_type_trans(skb, priv->dev); |
|---|
| 4018 | + |
|---|
| 4019 | + if (unlikely(!coe)) |
|---|
| 4020 | + skb_checksum_none_assert(skb); |
|---|
| 4021 | + else |
|---|
| 4022 | + skb->ip_summed = CHECKSUM_UNNECESSARY; |
|---|
| 4023 | + |
|---|
| 4024 | + if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) |
|---|
| 4025 | + skb_set_hash(skb, hash, hash_type); |
|---|
| 4026 | + |
|---|
| 4027 | + skb_record_rx_queue(skb, queue); |
|---|
| 4028 | + napi_gro_receive(&ch->rx_napi, skb); |
|---|
| 4029 | + skb = NULL; |
|---|
| 4030 | + |
|---|
| 4031 | + priv->dev->stats.rx_packets++; |
|---|
| 4032 | + priv->dev->stats.rx_bytes += len; |
|---|
| 4033 | + count++; |
|---|
| 4034 | + } |
|---|
| 4035 | + |
|---|
| 4036 | + if (status & rx_not_ls || skb) { |
|---|
| 4037 | + rx_q->state_saved = true; |
|---|
| 4038 | + rx_q->state.skb = skb; |
|---|
| 4039 | + rx_q->state.error = error; |
|---|
| 4040 | + rx_q->state.len = len; |
|---|
| 3545 | 4041 | } |
|---|
| 3546 | 4042 | |
|---|
| 3547 | 4043 | stmmac_rx_refill(priv, queue); |
|---|
| .. | .. |
|---|
| 3551 | 4047 | return count; |
|---|
| 3552 | 4048 | } |
|---|
| 3553 | 4049 | |
|---|
| 3554 | | -/** |
|---|
| 3555 | | - * stmmac_poll - stmmac poll method (NAPI) |
|---|
| 3556 | | - * @napi : pointer to the napi structure. |
|---|
| 3557 | | - * @budget : maximum number of packets that the current CPU can receive from |
|---|
| 3558 | | - * all interfaces. |
|---|
| 3559 | | - * Description : |
|---|
| 3560 | | - * To look at the incoming frames and clear the tx resources. |
|---|
| 3561 | | - */ |
|---|
| 3562 | | -static int stmmac_napi_poll(struct napi_struct *napi, int budget) |
|---|
| 4050 | +static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) |
|---|
| 3563 | 4051 | { |
|---|
| 3564 | 4052 | struct stmmac_channel *ch = |
|---|
| 3565 | | - container_of(napi, struct stmmac_channel, napi); |
|---|
| 4053 | + container_of(napi, struct stmmac_channel, rx_napi); |
|---|
| 3566 | 4054 | struct stmmac_priv *priv = ch->priv_data; |
|---|
| 3567 | | - int work_done, rx_done = 0, tx_done = 0; |
|---|
| 3568 | 4055 | u32 chan = ch->index; |
|---|
| 4056 | + int work_done; |
|---|
| 3569 | 4057 | |
|---|
| 3570 | 4058 | priv->xstats.napi_poll++; |
|---|
| 3571 | 4059 | |
|---|
| 3572 | | - if (ch->has_tx) |
|---|
| 3573 | | - tx_done = stmmac_tx_clean(priv, budget, chan); |
|---|
| 3574 | | - if (ch->has_rx) |
|---|
| 3575 | | - rx_done = stmmac_rx(priv, budget, chan); |
|---|
| 4060 | + work_done = stmmac_rx(priv, budget, chan); |
|---|
| 4061 | + if (work_done < budget && napi_complete_done(napi, work_done)) { |
|---|
| 4062 | + unsigned long flags; |
|---|
| 3576 | 4063 | |
|---|
| 3577 | | - work_done = max(rx_done, tx_done); |
|---|
| 4064 | + spin_lock_irqsave(&ch->lock, flags); |
|---|
| 4065 | + stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0); |
|---|
| 4066 | + spin_unlock_irqrestore(&ch->lock, flags); |
|---|
| 4067 | + } |
|---|
| 4068 | + |
|---|
| 4069 | + return work_done; |
|---|
| 4070 | +} |
|---|
| 4071 | + |
|---|
| 4072 | +static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) |
|---|
| 4073 | +{ |
|---|
| 4074 | + struct stmmac_channel *ch = |
|---|
| 4075 | + container_of(napi, struct stmmac_channel, tx_napi); |
|---|
| 4076 | + struct stmmac_priv *priv = ch->priv_data; |
|---|
| 4077 | + u32 chan = ch->index; |
|---|
| 4078 | + int work_done; |
|---|
| 4079 | + |
|---|
| 4080 | + priv->xstats.napi_poll++; |
|---|
| 4081 | + |
|---|
| 4082 | + work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan); |
|---|
| 3578 | 4083 | work_done = min(work_done, budget); |
|---|
| 3579 | 4084 | |
|---|
| 3580 | 4085 | if (work_done < budget && napi_complete_done(napi, work_done)) { |
|---|
| 3581 | | - int stat; |
|---|
| 4086 | + unsigned long flags; |
|---|
| 3582 | 4087 | |
|---|
| 3583 | | - stmmac_enable_dma_irq(priv, priv->ioaddr, chan); |
|---|
| 3584 | | - stat = stmmac_dma_interrupt_status(priv, priv->ioaddr, |
|---|
| 3585 | | - &priv->xstats, chan); |
|---|
| 3586 | | - if (stat && napi_reschedule(napi)) |
|---|
| 3587 | | - stmmac_disable_dma_irq(priv, priv->ioaddr, chan); |
|---|
| 4088 | + spin_lock_irqsave(&ch->lock, flags); |
|---|
| 4089 | + stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1); |
|---|
| 4090 | + spin_unlock_irqrestore(&ch->lock, flags); |
|---|
| 3588 | 4091 | } |
|---|
| 3589 | 4092 | |
|---|
| 3590 | 4093 | return work_done; |
|---|
| .. | .. |
|---|
| 3593 | 4096 | /** |
|---|
| 3594 | 4097 | * stmmac_tx_timeout |
|---|
| 3595 | 4098 | * @dev : Pointer to net device structure |
|---|
| 4099 | + * @txqueue: the index of the hanging transmit queue |
|---|
| 3596 | 4100 | * Description: this function is called when a packet transmission fails to |
|---|
| 3597 | 4101 | * complete within a reasonable time. The driver will mark the error in the |
|---|
| 3598 | 4102 | * netdev structure and arrange for the device to be reset to a sane state |
|---|
| 3599 | 4103 | * in order to transmit a new packet. |
|---|
| 3600 | 4104 | */ |
|---|
| 3601 | | -static void stmmac_tx_timeout(struct net_device *dev) |
|---|
| 4105 | +static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue) |
|---|
| 3602 | 4106 | { |
|---|
| 3603 | 4107 | struct stmmac_priv *priv = netdev_priv(dev); |
|---|
| 3604 | 4108 | |
|---|
| .. | .. |
|---|
| 3695 | 4199 | netdev_features_t features) |
|---|
| 3696 | 4200 | { |
|---|
| 3697 | 4201 | struct stmmac_priv *priv = netdev_priv(netdev); |
|---|
| 4202 | + bool sph_en; |
|---|
| 4203 | + u32 chan; |
|---|
| 3698 | 4204 | |
|---|
| 3699 | 4205 | /* Keep the COE Type in case of csum is supporting */ |
|---|
| 3700 | 4206 | if (features & NETIF_F_RXCSUM) |
|---|
| .. | .. |
|---|
| 3705 | 4211 | * fixed in case of issue. |
|---|
| 3706 | 4212 | */ |
|---|
| 3707 | 4213 | stmmac_rx_ipc(priv, priv->hw); |
|---|
| 4214 | + |
|---|
| 4215 | + sph_en = (priv->hw->rx_csum > 0) && priv->sph; |
|---|
| 4216 | + for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) |
|---|
| 4217 | + stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); |
|---|
| 3708 | 4218 | |
|---|
| 3709 | 4219 | return 0; |
|---|
| 3710 | 4220 | } |
|---|
| .. | .. |
|---|
| 3798 | 4308 | */ |
|---|
| 3799 | 4309 | static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
|---|
| 3800 | 4310 | { |
|---|
| 4311 | + struct stmmac_priv *priv = netdev_priv (dev); |
|---|
| 3801 | 4312 | int ret = -EOPNOTSUPP; |
|---|
| 3802 | 4313 | |
|---|
| 3803 | 4314 | if (!netif_running(dev)) |
|---|
| .. | .. |
|---|
| 3807 | 4318 | case SIOCGMIIPHY: |
|---|
| 3808 | 4319 | case SIOCGMIIREG: |
|---|
| 3809 | 4320 | case SIOCSMIIREG: |
|---|
| 3810 | | - if (!dev->phydev) |
|---|
| 3811 | | - return -EINVAL; |
|---|
| 3812 | | - ret = phy_mii_ioctl(dev->phydev, rq, cmd); |
|---|
| 4321 | + ret = phylink_mii_ioctl(priv->phylink, rq, cmd); |
|---|
| 3813 | 4322 | break; |
|---|
| 3814 | | -#ifdef CONFIG_STMMAC_PTP |
|---|
| 3815 | 4323 | case SIOCSHWTSTAMP: |
|---|
| 3816 | 4324 | ret = stmmac_hwtstamp_set(dev, rq); |
|---|
| 3817 | 4325 | break; |
|---|
| 3818 | 4326 | case SIOCGHWTSTAMP: |
|---|
| 3819 | 4327 | ret = stmmac_hwtstamp_get(dev, rq); |
|---|
| 3820 | 4328 | break; |
|---|
| 3821 | | -#endif |
|---|
| 3822 | 4329 | default: |
|---|
| 3823 | 4330 | break; |
|---|
| 3824 | 4331 | } |
|---|
| .. | .. |
|---|
| 3832 | 4339 | struct stmmac_priv *priv = cb_priv; |
|---|
| 3833 | 4340 | int ret = -EOPNOTSUPP; |
|---|
| 3834 | 4341 | |
|---|
| 4342 | + if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) |
|---|
| 4343 | + return ret; |
|---|
| 4344 | + |
|---|
| 3835 | 4345 | stmmac_disable_all_queues(priv); |
|---|
| 3836 | 4346 | |
|---|
| 3837 | 4347 | switch (type) { |
|---|
| 3838 | 4348 | case TC_SETUP_CLSU32: |
|---|
| 3839 | | - if (tc_cls_can_offload_and_chain0(priv->dev, type_data)) |
|---|
| 3840 | | - ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); |
|---|
| 4349 | + ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); |
|---|
| 4350 | + break; |
|---|
| 4351 | + case TC_SETUP_CLSFLOWER: |
|---|
| 4352 | + ret = stmmac_tc_setup_cls(priv, priv, type_data); |
|---|
| 3841 | 4353 | break; |
|---|
| 3842 | 4354 | default: |
|---|
| 3843 | 4355 | break; |
|---|
| .. | .. |
|---|
| 3847 | 4359 | return ret; |
|---|
| 3848 | 4360 | } |
|---|
| 3849 | 4361 | |
|---|
| 3850 | | -static int stmmac_setup_tc_block(struct stmmac_priv *priv, |
|---|
| 3851 | | - struct tc_block_offload *f) |
|---|
| 3852 | | -{ |
|---|
| 3853 | | - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
|---|
| 3854 | | - return -EOPNOTSUPP; |
|---|
| 3855 | | - |
|---|
| 3856 | | - switch (f->command) { |
|---|
| 3857 | | - case TC_BLOCK_BIND: |
|---|
| 3858 | | - return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb, |
|---|
| 3859 | | - priv, priv, f->extack); |
|---|
| 3860 | | - case TC_BLOCK_UNBIND: |
|---|
| 3861 | | - tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv); |
|---|
| 3862 | | - return 0; |
|---|
| 3863 | | - default: |
|---|
| 3864 | | - return -EOPNOTSUPP; |
|---|
| 3865 | | - } |
|---|
| 3866 | | -} |
|---|
| 4362 | +static LIST_HEAD(stmmac_block_cb_list); |
|---|
| 3867 | 4363 | |
|---|
| 3868 | 4364 | static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, |
|---|
| 3869 | 4365 | void *type_data) |
|---|
| .. | .. |
|---|
| 3872 | 4368 | |
|---|
| 3873 | 4369 | switch (type) { |
|---|
| 3874 | 4370 | case TC_SETUP_BLOCK: |
|---|
| 3875 | | - return stmmac_setup_tc_block(priv, type_data); |
|---|
| 4371 | + return flow_block_cb_setup_simple(type_data, |
|---|
| 4372 | + &stmmac_block_cb_list, |
|---|
| 4373 | + stmmac_setup_tc_block_cb, |
|---|
| 4374 | + priv, priv, true); |
|---|
| 3876 | 4375 | case TC_SETUP_QDISC_CBS: |
|---|
| 3877 | 4376 | return stmmac_tc_setup_cbs(priv, priv, type_data); |
|---|
| 4377 | + case TC_SETUP_QDISC_TAPRIO: |
|---|
| 4378 | + return stmmac_tc_setup_taprio(priv, priv, type_data); |
|---|
| 4379 | + case TC_SETUP_QDISC_ETF: |
|---|
| 4380 | + return stmmac_tc_setup_etf(priv, priv, type_data); |
|---|
| 3878 | 4381 | default: |
|---|
| 3879 | 4382 | return -EOPNOTSUPP; |
|---|
| 3880 | 4383 | } |
|---|
| 3881 | 4384 | } |
|---|
| 3882 | 4385 | |
|---|
| 3883 | 4386 | static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb, |
|---|
| 3884 | | - struct net_device *sb_dev, |
|---|
| 3885 | | - select_queue_fallback_t fallback) |
|---|
| 4387 | + struct net_device *sb_dev) |
|---|
| 3886 | 4388 | { |
|---|
| 3887 | 4389 | int gso = skb_shinfo(skb)->gso_type; |
|---|
| 3888 | 4390 | |
|---|
| .. | .. |
|---|
| 3896 | 4398 | return 0; |
|---|
| 3897 | 4399 | } |
|---|
| 3898 | 4400 | |
|---|
| 3899 | | - return fallback(dev, skb, NULL) % dev->real_num_tx_queues; |
|---|
| 4401 | + return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; |
|---|
| 3900 | 4402 | } |
|---|
| 3901 | 4403 | |
|---|
| 3902 | 4404 | static int stmmac_set_mac_address(struct net_device *ndev, void *addr) |
|---|
| .. | .. |
|---|
| 3904 | 4406 | struct stmmac_priv *priv = netdev_priv(ndev); |
|---|
| 3905 | 4407 | int ret = 0; |
|---|
| 3906 | 4408 | |
|---|
| 4409 | + ret = pm_runtime_get_sync(priv->device); |
|---|
| 4410 | + if (ret < 0) { |
|---|
| 4411 | + pm_runtime_put_noidle(priv->device); |
|---|
| 4412 | + return ret; |
|---|
| 4413 | + } |
|---|
| 4414 | + |
|---|
| 3907 | 4415 | ret = eth_mac_addr(ndev, addr); |
|---|
| 3908 | 4416 | if (ret) |
|---|
| 3909 | | - return ret; |
|---|
| 4417 | + goto set_mac_error; |
|---|
| 3910 | 4418 | |
|---|
| 3911 | 4419 | stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); |
|---|
| 4420 | + |
|---|
| 4421 | +set_mac_error: |
|---|
| 4422 | + pm_runtime_put(priv->device); |
|---|
| 3912 | 4423 | |
|---|
| 3913 | 4424 | return ret; |
|---|
| 3914 | 4425 | } |
|---|
| .. | .. |
|---|
| 3917 | 4428 | static struct dentry *stmmac_fs_dir; |
|---|
| 3918 | 4429 | |
|---|
| 3919 | 4430 | static void sysfs_display_ring(void *head, int size, int extend_desc, |
|---|
| 3920 | | - struct seq_file *seq) |
|---|
| 4431 | + struct seq_file *seq, dma_addr_t dma_phy_addr) |
|---|
| 3921 | 4432 | { |
|---|
| 3922 | 4433 | int i; |
|---|
| 3923 | 4434 | struct dma_extended_desc *ep = (struct dma_extended_desc *)head; |
|---|
| 3924 | 4435 | struct dma_desc *p = (struct dma_desc *)head; |
|---|
| 4436 | + dma_addr_t dma_addr; |
|---|
| 3925 | 4437 | |
|---|
| 3926 | 4438 | for (i = 0; i < size; i++) { |
|---|
| 3927 | 4439 | if (extend_desc) { |
|---|
| 3928 | | - seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", |
|---|
| 3929 | | - i, (unsigned int)virt_to_phys(ep), |
|---|
| 4440 | + dma_addr = dma_phy_addr + i * sizeof(*ep); |
|---|
| 4441 | + seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", |
|---|
| 4442 | + i, &dma_addr, |
|---|
| 3930 | 4443 | le32_to_cpu(ep->basic.des0), |
|---|
| 3931 | 4444 | le32_to_cpu(ep->basic.des1), |
|---|
| 3932 | 4445 | le32_to_cpu(ep->basic.des2), |
|---|
| 3933 | 4446 | le32_to_cpu(ep->basic.des3)); |
|---|
| 3934 | 4447 | ep++; |
|---|
| 3935 | 4448 | } else { |
|---|
| 3936 | | - seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", |
|---|
| 3937 | | - i, (unsigned int)virt_to_phys(p), |
|---|
| 4449 | + dma_addr = dma_phy_addr + i * sizeof(*p); |
|---|
| 4450 | + seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", |
|---|
| 4451 | + i, &dma_addr, |
|---|
| 3938 | 4452 | le32_to_cpu(p->des0), le32_to_cpu(p->des1), |
|---|
| 3939 | 4453 | le32_to_cpu(p->des2), le32_to_cpu(p->des3)); |
|---|
| 3940 | 4454 | p++; |
|---|
| .. | .. |
|---|
| 3943 | 4457 | } |
|---|
| 3944 | 4458 | } |
|---|
| 3945 | 4459 | |
|---|
| 3946 | | -static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) |
|---|
| 4460 | +static int stmmac_rings_status_show(struct seq_file *seq, void *v) |
|---|
| 3947 | 4461 | { |
|---|
| 3948 | 4462 | struct net_device *dev = seq->private; |
|---|
| 3949 | 4463 | struct stmmac_priv *priv = netdev_priv(dev); |
|---|
| .. | .. |
|---|
| 3962 | 4476 | if (priv->extend_desc) { |
|---|
| 3963 | 4477 | seq_printf(seq, "Extended descriptor ring:\n"); |
|---|
| 3964 | 4478 | sysfs_display_ring((void *)rx_q->dma_erx, |
|---|
| 3965 | | - DMA_RX_SIZE, 1, seq); |
|---|
| 4479 | + priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy); |
|---|
| 3966 | 4480 | } else { |
|---|
| 3967 | 4481 | seq_printf(seq, "Descriptor ring:\n"); |
|---|
| 3968 | 4482 | sysfs_display_ring((void *)rx_q->dma_rx, |
|---|
| 3969 | | - DMA_RX_SIZE, 0, seq); |
|---|
| 4483 | + priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy); |
|---|
| 3970 | 4484 | } |
|---|
| 3971 | 4485 | } |
|---|
| 3972 | 4486 | |
|---|
| .. | .. |
|---|
| 3978 | 4492 | if (priv->extend_desc) { |
|---|
| 3979 | 4493 | seq_printf(seq, "Extended descriptor ring:\n"); |
|---|
| 3980 | 4494 | sysfs_display_ring((void *)tx_q->dma_etx, |
|---|
| 3981 | | - DMA_TX_SIZE, 1, seq); |
|---|
| 3982 | | - } else { |
|---|
| 4495 | + priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy); |
|---|
| 4496 | + } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { |
|---|
| 3983 | 4497 | seq_printf(seq, "Descriptor ring:\n"); |
|---|
| 3984 | 4498 | sysfs_display_ring((void *)tx_q->dma_tx, |
|---|
| 3985 | | - DMA_TX_SIZE, 0, seq); |
|---|
| 4499 | + priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy); |
|---|
| 3986 | 4500 | } |
|---|
| 3987 | 4501 | } |
|---|
| 3988 | 4502 | |
|---|
| 3989 | 4503 | return 0; |
|---|
| 3990 | 4504 | } |
|---|
| 4505 | +DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status); |
|---|
| 3991 | 4506 | |
|---|
| 3992 | | -static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file) |
|---|
| 3993 | | -{ |
|---|
| 3994 | | - return single_open(file, stmmac_sysfs_ring_read, inode->i_private); |
|---|
| 3995 | | -} |
|---|
| 3996 | | - |
|---|
| 3997 | | -/* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */ |
|---|
| 3998 | | - |
|---|
| 3999 | | -static const struct file_operations stmmac_rings_status_fops = { |
|---|
| 4000 | | - .owner = THIS_MODULE, |
|---|
| 4001 | | - .open = stmmac_sysfs_ring_open, |
|---|
| 4002 | | - .read = seq_read, |
|---|
| 4003 | | - .llseek = seq_lseek, |
|---|
| 4004 | | - .release = single_release, |
|---|
| 4005 | | -}; |
|---|
| 4006 | | - |
|---|
| 4007 | | -static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v) |
|---|
| 4507 | +static int stmmac_dma_cap_show(struct seq_file *seq, void *v) |
|---|
| 4008 | 4508 | { |
|---|
| 4009 | 4509 | struct net_device *dev = seq->private; |
|---|
| 4010 | 4510 | struct stmmac_priv *priv = netdev_priv(dev); |
|---|
| .. | .. |
|---|
| 4062 | 4562 | priv->dma_cap.number_rx_channel); |
|---|
| 4063 | 4563 | seq_printf(seq, "\tNumber of Additional TX channel: %d\n", |
|---|
| 4064 | 4564 | priv->dma_cap.number_tx_channel); |
|---|
| 4565 | + seq_printf(seq, "\tNumber of Additional RX queues: %d\n", |
|---|
| 4566 | + priv->dma_cap.number_rx_queues); |
|---|
| 4567 | + seq_printf(seq, "\tNumber of Additional TX queues: %d\n", |
|---|
| 4568 | + priv->dma_cap.number_tx_queues); |
|---|
| 4065 | 4569 | seq_printf(seq, "\tEnhanced descriptors: %s\n", |
|---|
| 4066 | 4570 | (priv->dma_cap.enh_desc) ? "Y" : "N"); |
|---|
| 4067 | | - |
|---|
| 4571 | + seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size); |
|---|
| 4572 | + seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size); |
|---|
| 4573 | + seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz); |
|---|
| 4574 | + seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N"); |
|---|
| 4575 | + seq_printf(seq, "\tNumber of PPS Outputs: %d\n", |
|---|
| 4576 | + priv->dma_cap.pps_out_num); |
|---|
| 4577 | + seq_printf(seq, "\tSafety Features: %s\n", |
|---|
| 4578 | + priv->dma_cap.asp ? "Y" : "N"); |
|---|
| 4579 | + seq_printf(seq, "\tFlexible RX Parser: %s\n", |
|---|
| 4580 | + priv->dma_cap.frpsel ? "Y" : "N"); |
|---|
| 4581 | + seq_printf(seq, "\tEnhanced Addressing: %d\n", |
|---|
| 4582 | + priv->dma_cap.addr64); |
|---|
| 4583 | + seq_printf(seq, "\tReceive Side Scaling: %s\n", |
|---|
| 4584 | + priv->dma_cap.rssen ? "Y" : "N"); |
|---|
| 4585 | + seq_printf(seq, "\tVLAN Hash Filtering: %s\n", |
|---|
| 4586 | + priv->dma_cap.vlhash ? "Y" : "N"); |
|---|
| 4587 | + seq_printf(seq, "\tSplit Header: %s\n", |
|---|
| 4588 | + priv->dma_cap.sphen ? "Y" : "N"); |
|---|
| 4589 | + seq_printf(seq, "\tVLAN TX Insertion: %s\n", |
|---|
| 4590 | + priv->dma_cap.vlins ? "Y" : "N"); |
|---|
| 4591 | + seq_printf(seq, "\tDouble VLAN: %s\n", |
|---|
| 4592 | + priv->dma_cap.dvlan ? "Y" : "N"); |
|---|
| 4593 | + seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n", |
|---|
| 4594 | + priv->dma_cap.l3l4fnum); |
|---|
| 4595 | + seq_printf(seq, "\tARP Offloading: %s\n", |
|---|
| 4596 | + priv->dma_cap.arpoffsel ? "Y" : "N"); |
|---|
| 4597 | + seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n", |
|---|
| 4598 | + priv->dma_cap.estsel ? "Y" : "N"); |
|---|
| 4599 | + seq_printf(seq, "\tFrame Preemption (FPE): %s\n", |
|---|
| 4600 | + priv->dma_cap.fpesel ? "Y" : "N"); |
|---|
| 4601 | + seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n", |
|---|
| 4602 | + priv->dma_cap.tbssel ? "Y" : "N"); |
|---|
| 4068 | 4603 | return 0; |
|---|
| 4069 | 4604 | } |
|---|
| 4605 | +DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); |
|---|
| 4070 | 4606 | |
|---|
| 4071 | | -static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file) |
|---|
| 4607 | +/* Use network device events to rename debugfs file entries. |
|---|
| 4608 | + */ |
|---|
| 4609 | +static int stmmac_device_event(struct notifier_block *unused, |
|---|
| 4610 | + unsigned long event, void *ptr) |
|---|
| 4072 | 4611 | { |
|---|
| 4073 | | - return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private); |
|---|
| 4612 | + struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
|---|
| 4613 | + struct stmmac_priv *priv = netdev_priv(dev); |
|---|
| 4614 | + |
|---|
| 4615 | + if (dev->netdev_ops != &stmmac_netdev_ops) |
|---|
| 4616 | + goto done; |
|---|
| 4617 | + |
|---|
| 4618 | + switch (event) { |
|---|
| 4619 | + case NETDEV_CHANGENAME: |
|---|
| 4620 | + if (priv->dbgfs_dir) |
|---|
| 4621 | + priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, |
|---|
| 4622 | + priv->dbgfs_dir, |
|---|
| 4623 | + stmmac_fs_dir, |
|---|
| 4624 | + dev->name); |
|---|
| 4625 | + break; |
|---|
| 4626 | + } |
|---|
| 4627 | +done: |
|---|
| 4628 | + return NOTIFY_DONE; |
|---|
| 4074 | 4629 | } |
|---|
| 4075 | 4630 | |
|---|
| 4076 | | -static const struct file_operations stmmac_dma_cap_fops = { |
|---|
| 4077 | | - .owner = THIS_MODULE, |
|---|
| 4078 | | - .open = stmmac_sysfs_dma_cap_open, |
|---|
| 4079 | | - .read = seq_read, |
|---|
| 4080 | | - .llseek = seq_lseek, |
|---|
| 4081 | | - .release = single_release, |
|---|
| 4631 | +static struct notifier_block stmmac_notifier = { |
|---|
| 4632 | + .notifier_call = stmmac_device_event, |
|---|
| 4082 | 4633 | }; |
|---|
| 4083 | 4634 | |
|---|
| 4084 | | -static int stmmac_init_fs(struct net_device *dev) |
|---|
| 4635 | +static void stmmac_init_fs(struct net_device *dev) |
|---|
| 4085 | 4636 | { |
|---|
| 4086 | 4637 | struct stmmac_priv *priv = netdev_priv(dev); |
|---|
| 4638 | + |
|---|
| 4639 | + rtnl_lock(); |
|---|
| 4087 | 4640 | |
|---|
| 4088 | 4641 | /* Create per netdev entries */ |
|---|
| 4089 | 4642 | priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); |
|---|
| 4090 | 4643 | |
|---|
| 4091 | | - if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) { |
|---|
| 4092 | | - netdev_err(priv->dev, "ERROR failed to create debugfs directory\n"); |
|---|
| 4093 | | - |
|---|
| 4094 | | - return -ENOMEM; |
|---|
| 4095 | | - } |
|---|
| 4096 | | - |
|---|
| 4097 | 4644 | /* Entry to report DMA RX/TX rings */ |
|---|
| 4098 | | - priv->dbgfs_rings_status = |
|---|
| 4099 | | - debugfs_create_file("descriptors_status", 0444, |
|---|
| 4100 | | - priv->dbgfs_dir, dev, |
|---|
| 4101 | | - &stmmac_rings_status_fops); |
|---|
| 4102 | | - |
|---|
| 4103 | | - if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) { |
|---|
| 4104 | | - netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n"); |
|---|
| 4105 | | - debugfs_remove_recursive(priv->dbgfs_dir); |
|---|
| 4106 | | - |
|---|
| 4107 | | - return -ENOMEM; |
|---|
| 4108 | | - } |
|---|
| 4645 | + debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, |
|---|
| 4646 | + &stmmac_rings_status_fops); |
|---|
| 4109 | 4647 | |
|---|
| 4110 | 4648 | /* Entry to report the DMA HW features */ |
|---|
| 4111 | | - priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444, |
|---|
| 4112 | | - priv->dbgfs_dir, |
|---|
| 4113 | | - dev, &stmmac_dma_cap_fops); |
|---|
| 4649 | + debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, |
|---|
| 4650 | + &stmmac_dma_cap_fops); |
|---|
| 4114 | 4651 | |
|---|
| 4115 | | - if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) { |
|---|
| 4116 | | - netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n"); |
|---|
| 4117 | | - debugfs_remove_recursive(priv->dbgfs_dir); |
|---|
| 4118 | | - |
|---|
| 4119 | | - return -ENOMEM; |
|---|
| 4120 | | - } |
|---|
| 4121 | | - |
|---|
| 4122 | | - return 0; |
|---|
| 4652 | + rtnl_unlock(); |
|---|
| 4123 | 4653 | } |
|---|
| 4124 | 4654 | |
|---|
| 4125 | 4655 | static void stmmac_exit_fs(struct net_device *dev) |
|---|
| .. | .. |
|---|
| 4129 | 4659 | debugfs_remove_recursive(priv->dbgfs_dir); |
|---|
| 4130 | 4660 | } |
|---|
| 4131 | 4661 | #endif /* CONFIG_DEBUG_FS */ |
|---|
| 4662 | + |
|---|
| 4663 | +static u32 stmmac_vid_crc32_le(__le16 vid_le) |
|---|
| 4664 | +{ |
|---|
| 4665 | + unsigned char *data = (unsigned char *)&vid_le; |
|---|
| 4666 | + unsigned char data_byte = 0; |
|---|
| 4667 | + u32 crc = ~0x0; |
|---|
| 4668 | + u32 temp = 0; |
|---|
| 4669 | + int i, bits; |
|---|
| 4670 | + |
|---|
| 4671 | + bits = get_bitmask_order(VLAN_VID_MASK); |
|---|
| 4672 | + for (i = 0; i < bits; i++) { |
|---|
| 4673 | + if ((i % 8) == 0) |
|---|
| 4674 | + data_byte = data[i / 8]; |
|---|
| 4675 | + |
|---|
| 4676 | + temp = ((crc & 1) ^ data_byte) & 1; |
|---|
| 4677 | + crc >>= 1; |
|---|
| 4678 | + data_byte >>= 1; |
|---|
| 4679 | + |
|---|
| 4680 | + if (temp) |
|---|
| 4681 | + crc ^= 0xedb88320; |
|---|
| 4682 | + } |
|---|
| 4683 | + |
|---|
| 4684 | + return crc; |
|---|
| 4685 | +} |
|---|
| 4686 | + |
|---|
| 4687 | +static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) |
|---|
| 4688 | +{ |
|---|
| 4689 | + u32 crc, hash = 0; |
|---|
| 4690 | + __le16 pmatch = 0; |
|---|
| 4691 | + int count = 0; |
|---|
| 4692 | + u16 vid = 0; |
|---|
| 4693 | + |
|---|
| 4694 | + for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { |
|---|
| 4695 | + __le16 vid_le = cpu_to_le16(vid); |
|---|
| 4696 | + crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28; |
|---|
| 4697 | + hash |= (1 << crc); |
|---|
| 4698 | + count++; |
|---|
| 4699 | + } |
|---|
| 4700 | + |
|---|
| 4701 | + if (!priv->dma_cap.vlhash) { |
|---|
| 4702 | + if (count > 2) /* VID = 0 always passes filter */ |
|---|
| 4703 | + return -EOPNOTSUPP; |
|---|
| 4704 | + |
|---|
| 4705 | + pmatch = cpu_to_le16(vid); |
|---|
| 4706 | + hash = 0; |
|---|
| 4707 | + } |
|---|
| 4708 | + |
|---|
| 4709 | + return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); |
|---|
| 4710 | +} |
|---|
| 4711 | + |
|---|
| 4712 | +static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) |
|---|
| 4713 | +{ |
|---|
| 4714 | + struct stmmac_priv *priv = netdev_priv(ndev); |
|---|
| 4715 | + bool is_double = false; |
|---|
| 4716 | + int ret; |
|---|
| 4717 | + |
|---|
| 4718 | + if (be16_to_cpu(proto) == ETH_P_8021AD) |
|---|
| 4719 | + is_double = true; |
|---|
| 4720 | + |
|---|
| 4721 | + set_bit(vid, priv->active_vlans); |
|---|
| 4722 | + ret = stmmac_vlan_update(priv, is_double); |
|---|
| 4723 | + if (ret) { |
|---|
| 4724 | + clear_bit(vid, priv->active_vlans); |
|---|
| 4725 | + return ret; |
|---|
| 4726 | + } |
|---|
| 4727 | + |
|---|
| 4728 | + if (priv->hw->num_vlan) { |
|---|
| 4729 | + ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); |
|---|
| 4730 | + if (ret) |
|---|
| 4731 | + return ret; |
|---|
| 4732 | + } |
|---|
| 4733 | + |
|---|
| 4734 | + return 0; |
|---|
| 4735 | +} |
|---|
| 4736 | + |
|---|
| 4737 | +static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) |
|---|
| 4738 | +{ |
|---|
| 4739 | + struct stmmac_priv *priv = netdev_priv(ndev); |
|---|
| 4740 | + bool is_double = false; |
|---|
| 4741 | + int ret; |
|---|
| 4742 | + |
|---|
| 4743 | + ret = pm_runtime_get_sync(priv->device); |
|---|
| 4744 | + if (ret < 0) { |
|---|
| 4745 | + pm_runtime_put_noidle(priv->device); |
|---|
| 4746 | + return ret; |
|---|
| 4747 | + } |
|---|
| 4748 | + |
|---|
| 4749 | + if (be16_to_cpu(proto) == ETH_P_8021AD) |
|---|
| 4750 | + is_double = true; |
|---|
| 4751 | + |
|---|
| 4752 | + clear_bit(vid, priv->active_vlans); |
|---|
| 4753 | + |
|---|
| 4754 | + if (priv->hw->num_vlan) { |
|---|
| 4755 | + ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); |
|---|
| 4756 | + if (ret) |
|---|
| 4757 | + goto del_vlan_error; |
|---|
| 4758 | + } |
|---|
| 4759 | + |
|---|
| 4760 | + ret = stmmac_vlan_update(priv, is_double); |
|---|
| 4761 | + |
|---|
| 4762 | +del_vlan_error: |
|---|
| 4763 | + pm_runtime_put(priv->device); |
|---|
| 4764 | + |
|---|
| 4765 | + return ret; |
|---|
| 4766 | +} |
|---|
| 4132 | 4767 | |
|---|
| 4133 | 4768 | static const struct net_device_ops stmmac_netdev_ops = { |
|---|
| 4134 | 4769 | .ndo_open = stmmac_open, |
|---|
| .. | .. |
|---|
| 4146 | 4781 | .ndo_poll_controller = stmmac_poll_controller, |
|---|
| 4147 | 4782 | #endif |
|---|
| 4148 | 4783 | .ndo_set_mac_address = stmmac_set_mac_address, |
|---|
| 4784 | + .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, |
|---|
| 4785 | + .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, |
|---|
| 4149 | 4786 | }; |
|---|
| 4150 | 4787 | |
|---|
| 4151 | 4788 | static void stmmac_reset_subtask(struct stmmac_priv *priv) |
|---|
| .. | .. |
|---|
| 4164 | 4801 | |
|---|
| 4165 | 4802 | set_bit(STMMAC_DOWN, &priv->state); |
|---|
| 4166 | 4803 | dev_close(priv->dev); |
|---|
| 4167 | | - dev_open(priv->dev); |
|---|
| 4804 | + dev_open(priv->dev, NULL); |
|---|
| 4168 | 4805 | clear_bit(STMMAC_DOWN, &priv->state); |
|---|
| 4169 | 4806 | clear_bit(STMMAC_RESETING, &priv->state); |
|---|
| 4170 | 4807 | rtnl_unlock(); |
|---|
| .. | .. |
|---|
| 4214 | 4851 | priv->plat->enh_desc = priv->dma_cap.enh_desc; |
|---|
| 4215 | 4852 | priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; |
|---|
| 4216 | 4853 | priv->hw->pmt = priv->plat->pmt; |
|---|
| 4854 | + if (priv->dma_cap.hash_tb_sz) { |
|---|
| 4855 | + priv->hw->multicast_filter_bins = |
|---|
| 4856 | + (BIT(priv->dma_cap.hash_tb_sz) << 5); |
|---|
| 4857 | + priv->hw->mcast_bits_log2 = |
|---|
| 4858 | + ilog2(priv->hw->multicast_filter_bins); |
|---|
| 4859 | + } |
|---|
| 4217 | 4860 | |
|---|
| 4218 | 4861 | /* TXCOE doesn't work in thresh DMA mode */ |
|---|
| 4219 | 4862 | if (priv->plat->force_thresh_dma_mode) |
|---|
| .. | .. |
|---|
| 4250 | 4893 | if (priv->dma_cap.tsoen) |
|---|
| 4251 | 4894 | dev_info(priv->device, "TSO supported\n"); |
|---|
| 4252 | 4895 | |
|---|
| 4896 | + priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en; |
|---|
| 4897 | + priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; |
|---|
| 4898 | + |
|---|
| 4253 | 4899 | /* Run HW quirks, if any */ |
|---|
| 4254 | 4900 | if (priv->hwif_quirks) { |
|---|
| 4255 | 4901 | ret = priv->hwif_quirks(priv); |
|---|
| .. | .. |
|---|
| 4272 | 4918 | return 0; |
|---|
| 4273 | 4919 | } |
|---|
| 4274 | 4920 | |
|---|
| 4921 | +static void stmmac_napi_add(struct net_device *dev) |
|---|
| 4922 | +{ |
|---|
| 4923 | + struct stmmac_priv *priv = netdev_priv(dev); |
|---|
| 4924 | + u32 queue, maxq; |
|---|
| 4925 | + |
|---|
| 4926 | + maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); |
|---|
| 4927 | + |
|---|
| 4928 | + for (queue = 0; queue < maxq; queue++) { |
|---|
| 4929 | + struct stmmac_channel *ch = &priv->channel[queue]; |
|---|
| 4930 | + int rx_budget = ((priv->plat->dma_rx_size < NAPI_POLL_WEIGHT) && |
|---|
| 4931 | + (priv->plat->dma_rx_size > 0)) ? |
|---|
| 4932 | + priv->plat->dma_rx_size : NAPI_POLL_WEIGHT; |
|---|
| 4933 | + int tx_budget = ((priv->plat->dma_tx_size < NAPI_POLL_WEIGHT) && |
|---|
| 4934 | + (priv->plat->dma_tx_size > 0)) ? |
|---|
| 4935 | + priv->plat->dma_tx_size : NAPI_POLL_WEIGHT; |
|---|
| 4936 | + |
|---|
| 4937 | + ch->priv_data = priv; |
|---|
| 4938 | + ch->index = queue; |
|---|
| 4939 | + spin_lock_init(&ch->lock); |
|---|
| 4940 | + |
|---|
| 4941 | + if (queue < priv->plat->rx_queues_to_use) { |
|---|
| 4942 | + netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx, |
|---|
| 4943 | + rx_budget); |
|---|
| 4944 | + } |
|---|
| 4945 | + if (queue < priv->plat->tx_queues_to_use) { |
|---|
| 4946 | + netif_tx_napi_add(dev, &ch->tx_napi, |
|---|
| 4947 | + stmmac_napi_poll_tx, tx_budget); |
|---|
| 4948 | + } |
|---|
| 4949 | + } |
|---|
| 4950 | +} |
|---|
| 4951 | + |
|---|
| 4952 | +static void stmmac_napi_del(struct net_device *dev) |
|---|
| 4953 | +{ |
|---|
| 4954 | + struct stmmac_priv *priv = netdev_priv(dev); |
|---|
| 4955 | + u32 queue, maxq; |
|---|
| 4956 | + |
|---|
| 4957 | + maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); |
|---|
| 4958 | + |
|---|
| 4959 | + for (queue = 0; queue < maxq; queue++) { |
|---|
| 4960 | + struct stmmac_channel *ch = &priv->channel[queue]; |
|---|
| 4961 | + |
|---|
| 4962 | + if (queue < priv->plat->rx_queues_to_use) |
|---|
| 4963 | + netif_napi_del(&ch->rx_napi); |
|---|
| 4964 | + if (queue < priv->plat->tx_queues_to_use) |
|---|
| 4965 | + netif_napi_del(&ch->tx_napi); |
|---|
| 4966 | + } |
|---|
| 4967 | +} |
|---|
| 4968 | + |
|---|
| 4969 | +int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) |
|---|
| 4970 | +{ |
|---|
| 4971 | + struct stmmac_priv *priv = netdev_priv(dev); |
|---|
| 4972 | + int ret = 0; |
|---|
| 4973 | + |
|---|
| 4974 | + if (netif_running(dev)) |
|---|
| 4975 | + stmmac_release(dev); |
|---|
| 4976 | + |
|---|
| 4977 | + stmmac_napi_del(dev); |
|---|
| 4978 | + |
|---|
| 4979 | + priv->plat->rx_queues_to_use = rx_cnt; |
|---|
| 4980 | + priv->plat->tx_queues_to_use = tx_cnt; |
|---|
| 4981 | + |
|---|
| 4982 | + stmmac_napi_add(dev); |
|---|
| 4983 | + |
|---|
| 4984 | + if (netif_running(dev)) |
|---|
| 4985 | + ret = stmmac_open(dev); |
|---|
| 4986 | + |
|---|
| 4987 | + return ret; |
|---|
| 4988 | +} |
|---|
| 4989 | + |
|---|
| 4990 | +int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) |
|---|
| 4991 | +{ |
|---|
| 4992 | + struct stmmac_priv *priv = netdev_priv(dev); |
|---|
| 4993 | + int ret = 0; |
|---|
| 4994 | + |
|---|
| 4995 | + if (netif_running(dev)) |
|---|
| 4996 | + stmmac_release(dev); |
|---|
| 4997 | + |
|---|
| 4998 | + priv->dma_rx_size = rx_size; |
|---|
| 4999 | + priv->dma_tx_size = tx_size; |
|---|
| 5000 | + |
|---|
| 5001 | + if (netif_running(dev)) |
|---|
| 5002 | + ret = stmmac_open(dev); |
|---|
| 5003 | + |
|---|
| 5004 | + return ret; |
|---|
| 5005 | +} |
|---|
| 5006 | + |
|---|
| 4275 | 5007 | /** |
|---|
| 4276 | 5008 | * stmmac_dvr_probe |
|---|
| 4277 | 5009 | * @device: device pointer |
|---|
| .. | .. |
|---|
| 4288 | 5020 | { |
|---|
| 4289 | 5021 | struct net_device *ndev = NULL; |
|---|
| 4290 | 5022 | struct stmmac_priv *priv; |
|---|
| 4291 | | - u32 queue, maxq; |
|---|
| 4292 | | - int ret = 0; |
|---|
| 5023 | + u32 rxq; |
|---|
| 5024 | + int i, ret = 0; |
|---|
| 4293 | 5025 | |
|---|
| 4294 | | - ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv), |
|---|
| 4295 | | - MTL_MAX_TX_QUEUES, |
|---|
| 4296 | | - MTL_MAX_RX_QUEUES); |
|---|
| 5026 | + ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv), |
|---|
| 5027 | + MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES); |
|---|
| 4297 | 5028 | if (!ndev) |
|---|
| 4298 | 5029 | return -ENOMEM; |
|---|
| 4299 | 5030 | |
|---|
| .. | .. |
|---|
| 4313 | 5044 | priv->wol_irq = res->wol_irq; |
|---|
| 4314 | 5045 | priv->lpi_irq = res->lpi_irq; |
|---|
| 4315 | 5046 | |
|---|
| 4316 | | - if (res->mac) |
|---|
| 5047 | + if (!IS_ERR_OR_NULL(res->mac)) |
|---|
| 4317 | 5048 | memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); |
|---|
| 4318 | 5049 | |
|---|
| 4319 | 5050 | dev_set_drvdata(device, priv->dev); |
|---|
| .. | .. |
|---|
| 4325 | 5056 | priv->wq = create_singlethread_workqueue("stmmac_wq"); |
|---|
| 4326 | 5057 | if (!priv->wq) { |
|---|
| 4327 | 5058 | dev_err(priv->device, "failed to create workqueue\n"); |
|---|
| 4328 | | - ret = -ENOMEM; |
|---|
| 4329 | | - goto error_wq; |
|---|
| 5059 | + return -ENOMEM; |
|---|
| 4330 | 5060 | } |
|---|
| 4331 | 5061 | |
|---|
| 4332 | 5062 | INIT_WORK(&priv->service_task, stmmac_service_task); |
|---|
| .. | .. |
|---|
| 4354 | 5084 | |
|---|
| 4355 | 5085 | stmmac_check_ether_addr(priv); |
|---|
| 4356 | 5086 | |
|---|
| 4357 | | - /* Configure real RX and TX queues */ |
|---|
| 4358 | | - netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use); |
|---|
| 4359 | | - netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use); |
|---|
| 4360 | | - |
|---|
| 4361 | 5087 | ndev->netdev_ops = &stmmac_netdev_ops; |
|---|
| 4362 | 5088 | |
|---|
| 4363 | 5089 | ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
|---|
| .. | .. |
|---|
| 4375 | 5101 | priv->tso = true; |
|---|
| 4376 | 5102 | dev_info(priv->device, "TSO feature enabled\n"); |
|---|
| 4377 | 5103 | } |
|---|
| 5104 | + |
|---|
| 5105 | + if (priv->dma_cap.sphen && !priv->plat->sph_disable) { |
|---|
| 5106 | + ndev->hw_features |= NETIF_F_GRO; |
|---|
| 5107 | + if (!priv->plat->sph_disable) { |
|---|
| 5108 | + priv->sph = true; |
|---|
| 5109 | + dev_info(priv->device, "SPH feature enabled\n"); |
|---|
| 5110 | + } |
|---|
| 5111 | + } |
|---|
| 5112 | + |
|---|
| 5113 | + /* The current IP register MAC_HW_Feature1[ADDR64] only define |
|---|
| 5114 | + * 32/40/64 bit width, but some SOC support others like i.MX8MP |
|---|
| 5115 | + * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64]. |
|---|
| 5116 | + * So overwrite dma_cap.addr64 according to HW real design. |
|---|
| 5117 | + */ |
|---|
| 5118 | + if (priv->plat->addr64) |
|---|
| 5119 | + priv->dma_cap.addr64 = priv->plat->addr64; |
|---|
| 5120 | + |
|---|
| 5121 | + if (priv->dma_cap.addr64) { |
|---|
| 5122 | + ret = dma_set_mask_and_coherent(device, |
|---|
| 5123 | + DMA_BIT_MASK(priv->dma_cap.addr64)); |
|---|
| 5124 | + if (!ret) { |
|---|
| 5125 | + dev_info(priv->device, "Using %d bits DMA width\n", |
|---|
| 5126 | + priv->dma_cap.addr64); |
|---|
| 5127 | + |
|---|
| 5128 | + /* |
|---|
| 5129 | + * If more than 32 bits can be addressed, make sure to |
|---|
| 5130 | + * enable enhanced addressing mode. |
|---|
| 5131 | + */ |
|---|
| 5132 | + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) |
|---|
| 5133 | + priv->plat->dma_cfg->eame = true; |
|---|
| 5134 | + } else { |
|---|
| 5135 | + ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32)); |
|---|
| 5136 | + if (ret) { |
|---|
| 5137 | + dev_err(priv->device, "Failed to set DMA Mask\n"); |
|---|
| 5138 | + goto error_hw_init; |
|---|
| 5139 | + } |
|---|
| 5140 | + |
|---|
| 5141 | + priv->dma_cap.addr64 = 32; |
|---|
| 5142 | + } |
|---|
| 5143 | + } |
|---|
| 5144 | + |
|---|
| 4378 | 5145 | ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; |
|---|
| 4379 | 5146 | ndev->watchdog_timeo = msecs_to_jiffies(watchdog); |
|---|
| 4380 | 5147 | #ifdef STMMAC_VLAN_TAG_USED |
|---|
| 4381 | 5148 | /* Both mac100 and gmac support receive VLAN tag detection */ |
|---|
| 4382 | 5149 | ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; |
|---|
| 5150 | + if (priv->dma_cap.vlhash) { |
|---|
| 5151 | + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; |
|---|
| 5152 | + ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; |
|---|
| 5153 | + } |
|---|
| 5154 | + if (priv->dma_cap.vlins) { |
|---|
| 5155 | + ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; |
|---|
| 5156 | + if (priv->dma_cap.dvlan) |
|---|
| 5157 | + ndev->features |= NETIF_F_HW_VLAN_STAG_TX; |
|---|
| 5158 | + } |
|---|
| 4383 | 5159 | #endif |
|---|
| 4384 | 5160 | priv->msg_enable = netif_msg_init(debug, default_msg_level); |
|---|
| 4385 | 5161 | |
|---|
| 5162 | + /* Initialize RSS */ |
|---|
| 5163 | + rxq = priv->plat->rx_queues_to_use; |
|---|
| 5164 | + netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); |
|---|
| 5165 | + for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) |
|---|
| 5166 | + priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); |
|---|
| 5167 | + |
|---|
| 5168 | + if (priv->dma_cap.rssen && priv->plat->rss_en) |
|---|
| 5169 | + ndev->features |= NETIF_F_RXHASH; |
|---|
| 5170 | + |
|---|
| 4386 | 5171 | /* MTU range: 46 - hw-specific max */ |
|---|
| 4387 | 5172 | ndev->min_mtu = ETH_ZLEN - ETH_HLEN; |
|---|
| 4388 | | - if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) |
|---|
| 4389 | | - ndev->max_mtu = JUMBO_LEN; |
|---|
| 4390 | | - else if (priv->plat->has_xgmac) |
|---|
| 5173 | + if (priv->plat->has_xgmac) |
|---|
| 4391 | 5174 | ndev->max_mtu = XGMAC_JUMBO_LEN; |
|---|
| 5175 | + else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) |
|---|
| 5176 | + ndev->max_mtu = JUMBO_LEN; |
|---|
| 4392 | 5177 | else |
|---|
| 4393 | 5178 | ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); |
|---|
| 4394 | 5179 | /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu |
|---|
| .. | .. |
|---|
| 4406 | 5191 | priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ |
|---|
| 4407 | 5192 | |
|---|
| 4408 | 5193 | /* Setup channels NAPI */ |
|---|
| 4409 | | - maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); |
|---|
| 4410 | | - |
|---|
| 4411 | | - for (queue = 0; queue < maxq; queue++) { |
|---|
| 4412 | | - struct stmmac_channel *ch = &priv->channel[queue]; |
|---|
| 4413 | | - |
|---|
| 4414 | | - ch->priv_data = priv; |
|---|
| 4415 | | - ch->index = queue; |
|---|
| 4416 | | - |
|---|
| 4417 | | - if (queue < priv->plat->rx_queues_to_use) |
|---|
| 4418 | | - ch->has_rx = true; |
|---|
| 4419 | | - if (queue < priv->plat->tx_queues_to_use) |
|---|
| 4420 | | - ch->has_tx = true; |
|---|
| 4421 | | - |
|---|
| 4422 | | - netif_napi_add(ndev, &ch->napi, stmmac_napi_poll, |
|---|
| 4423 | | - NAPI_POLL_WEIGHT); |
|---|
| 4424 | | - } |
|---|
| 5194 | + stmmac_napi_add(ndev); |
|---|
| 4425 | 5195 | |
|---|
| 4426 | 5196 | mutex_init(&priv->lock); |
|---|
| 4427 | 5197 | |
|---|
| .. | .. |
|---|
| 4431 | 5201 | * set the MDC clock dynamically according to the csr actual |
|---|
| 4432 | 5202 | * clock input. |
|---|
| 4433 | 5203 | */ |
|---|
| 4434 | | - if (!priv->plat->clk_csr) |
|---|
| 4435 | | - stmmac_clk_csr_set(priv); |
|---|
| 4436 | | - else |
|---|
| 5204 | + if (priv->plat->clk_csr >= 0) |
|---|
| 4437 | 5205 | priv->clk_csr = priv->plat->clk_csr; |
|---|
| 5206 | + else |
|---|
| 5207 | + stmmac_clk_csr_set(priv); |
|---|
| 4438 | 5208 | |
|---|
| 4439 | 5209 | stmmac_check_pcs_mode(priv); |
|---|
| 4440 | 5210 | |
|---|
| 4441 | | - if (priv->hw->pcs != STMMAC_PCS_RGMII && |
|---|
| 4442 | | - priv->hw->pcs != STMMAC_PCS_TBI && |
|---|
| 5211 | + pm_runtime_get_noresume(device); |
|---|
| 5212 | + pm_runtime_set_active(device); |
|---|
| 5213 | + pm_runtime_enable(device); |
|---|
| 5214 | + |
|---|
| 5215 | + if (priv->hw->pcs != STMMAC_PCS_TBI && |
|---|
| 4443 | 5216 | priv->hw->pcs != STMMAC_PCS_RTBI) { |
|---|
| 4444 | 5217 | /* MDIO bus Registration */ |
|---|
| 4445 | 5218 | ret = stmmac_mdio_register(ndev); |
|---|
| .. | .. |
|---|
| 4451 | 5224 | } |
|---|
| 4452 | 5225 | } |
|---|
| 4453 | 5226 | |
|---|
| 5227 | + ret = stmmac_phy_setup(priv); |
|---|
| 5228 | + if (ret) { |
|---|
| 5229 | + netdev_err(ndev, "failed to setup phy (%d)\n", ret); |
|---|
| 5230 | + goto error_phy_setup; |
|---|
| 5231 | + } |
|---|
| 5232 | + |
|---|
| 4454 | 5233 | ret = register_netdev(ndev); |
|---|
| 4455 | 5234 | if (ret) { |
|---|
| 4456 | 5235 | dev_err(priv->device, "%s: ERROR %i registering the device\n", |
|---|
| .. | .. |
|---|
| 4459 | 5238 | } |
|---|
| 4460 | 5239 | |
|---|
| 4461 | 5240 | #ifdef CONFIG_DEBUG_FS |
|---|
| 4462 | | - ret = stmmac_init_fs(ndev); |
|---|
| 4463 | | - if (ret < 0) |
|---|
| 4464 | | - netdev_warn(priv->dev, "%s: failed debugFS registration\n", |
|---|
| 4465 | | - __func__); |
|---|
| 5241 | + stmmac_init_fs(ndev); |
|---|
| 4466 | 5242 | #endif |
|---|
| 5243 | + |
|---|
| 5244 | + /* Let pm_runtime_put() disable the clocks. |
|---|
| 5245 | + * If CONFIG_PM is not enabled, the clocks will stay powered. |
|---|
| 5246 | + */ |
|---|
| 5247 | + pm_runtime_put(device); |
|---|
| 4467 | 5248 | |
|---|
| 4468 | 5249 | return ret; |
|---|
| 4469 | 5250 | |
|---|
| 4470 | 5251 | error_netdev_register: |
|---|
| 4471 | | - if (priv->hw->pcs != STMMAC_PCS_RGMII && |
|---|
| 4472 | | - priv->hw->pcs != STMMAC_PCS_TBI && |
|---|
| 5252 | + phylink_destroy(priv->phylink); |
|---|
| 5253 | +error_phy_setup: |
|---|
| 5254 | + if (priv->hw->pcs != STMMAC_PCS_TBI && |
|---|
| 4473 | 5255 | priv->hw->pcs != STMMAC_PCS_RTBI) |
|---|
| 4474 | 5256 | stmmac_mdio_unregister(ndev); |
|---|
| 4475 | 5257 | error_mdio_register: |
|---|
| 4476 | | - for (queue = 0; queue < maxq; queue++) { |
|---|
| 4477 | | - struct stmmac_channel *ch = &priv->channel[queue]; |
|---|
| 4478 | | - |
|---|
| 4479 | | - netif_napi_del(&ch->napi); |
|---|
| 4480 | | - } |
|---|
| 5258 | + stmmac_napi_del(ndev); |
|---|
| 4481 | 5259 | error_hw_init: |
|---|
| 4482 | 5260 | destroy_workqueue(priv->wq); |
|---|
| 4483 | | -error_wq: |
|---|
| 4484 | | - free_netdev(ndev); |
|---|
| 4485 | 5261 | |
|---|
| 4486 | 5262 | return ret; |
|---|
| 4487 | 5263 | } |
|---|
| .. | .. |
|---|
| 4500 | 5276 | |
|---|
| 4501 | 5277 | netdev_info(priv->dev, "%s: removing driver", __func__); |
|---|
| 4502 | 5278 | |
|---|
| 4503 | | -#ifdef CONFIG_DEBUG_FS |
|---|
| 4504 | | - stmmac_exit_fs(ndev); |
|---|
| 4505 | | -#endif |
|---|
| 4506 | 5279 | stmmac_stop_all_dma(priv); |
|---|
| 4507 | | - |
|---|
| 4508 | 5280 | stmmac_mac_set(priv, priv->ioaddr, false); |
|---|
| 4509 | 5281 | netif_carrier_off(ndev); |
|---|
| 4510 | 5282 | unregister_netdev(ndev); |
|---|
| 5283 | + |
|---|
| 5284 | + /* Serdes power down needs to happen after VLAN filter |
|---|
| 5285 | + * is deleted that is triggered by unregister_netdev(). |
|---|
| 5286 | + */ |
|---|
| 5287 | + if (priv->plat->serdes_powerdown) |
|---|
| 5288 | + priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); |
|---|
| 5289 | + |
|---|
| 5290 | +#ifdef CONFIG_DEBUG_FS |
|---|
| 5291 | + stmmac_exit_fs(ndev); |
|---|
| 5292 | +#endif |
|---|
| 5293 | + phylink_destroy(priv->phylink); |
|---|
| 4511 | 5294 | if (priv->plat->stmmac_rst) |
|---|
| 4512 | 5295 | reset_control_assert(priv->plat->stmmac_rst); |
|---|
| 4513 | | - clk_disable_unprepare(priv->plat->pclk); |
|---|
| 4514 | | - clk_disable_unprepare(priv->plat->stmmac_clk); |
|---|
| 4515 | | - if (priv->hw->pcs != STMMAC_PCS_RGMII && |
|---|
| 4516 | | - priv->hw->pcs != STMMAC_PCS_TBI && |
|---|
| 5296 | + pm_runtime_put(dev); |
|---|
| 5297 | + pm_runtime_disable(dev); |
|---|
| 5298 | + if (priv->hw->pcs != STMMAC_PCS_TBI && |
|---|
| 4517 | 5299 | priv->hw->pcs != STMMAC_PCS_RTBI) |
|---|
| 4518 | 5300 | stmmac_mdio_unregister(ndev); |
|---|
| 4519 | 5301 | destroy_workqueue(priv->wq); |
|---|
| 4520 | 5302 | mutex_destroy(&priv->lock); |
|---|
| 4521 | | - free_netdev(ndev); |
|---|
| 4522 | 5303 | |
|---|
| 4523 | 5304 | return 0; |
|---|
| 4524 | 5305 | } |
|---|
| .. | .. |
|---|
| 4540 | 5321 | if (!ndev || !netif_running(ndev)) |
|---|
| 4541 | 5322 | return 0; |
|---|
| 4542 | 5323 | |
|---|
| 4543 | | - if (ndev->phydev) |
|---|
| 4544 | | - phy_stop(ndev->phydev); |
|---|
| 5324 | + phylink_mac_change(priv->phylink, false); |
|---|
| 4545 | 5325 | |
|---|
| 4546 | 5326 | mutex_lock(&priv->lock); |
|---|
| 4547 | 5327 | |
|---|
| .. | .. |
|---|
| 4560 | 5340 | /* Stop TX/RX DMA */ |
|---|
| 4561 | 5341 | stmmac_stop_all_dma(priv); |
|---|
| 4562 | 5342 | |
|---|
| 5343 | + if (priv->plat->serdes_powerdown) |
|---|
| 5344 | + priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); |
|---|
| 5345 | + |
|---|
| 4563 | 5346 | /* Enable Power down mode by programming the PMT regs */ |
|---|
| 4564 | | - if (device_may_wakeup(priv->device)) { |
|---|
| 5347 | + if (device_may_wakeup(priv->device) && priv->plat->pmt) { |
|---|
| 4565 | 5348 | stmmac_pmt(priv, priv->hw, priv->wolopts); |
|---|
| 4566 | 5349 | priv->irq_wake = 1; |
|---|
| 4567 | 5350 | } else { |
|---|
| 5351 | + mutex_unlock(&priv->lock); |
|---|
| 5352 | + rtnl_lock(); |
|---|
| 5353 | + if (device_may_wakeup(priv->device)) |
|---|
| 5354 | + phylink_speed_down(priv->phylink, false); |
|---|
| 4568 | 5355 | if (priv->plat->integrated_phy_power) |
|---|
| 4569 | 5356 | priv->plat->integrated_phy_power(priv->plat->bsp_priv, |
|---|
| 4570 | 5357 | false); |
|---|
| 5358 | + phylink_stop(priv->phylink); |
|---|
| 5359 | + rtnl_unlock(); |
|---|
| 5360 | + mutex_lock(&priv->lock); |
|---|
| 5361 | + |
|---|
| 4571 | 5362 | stmmac_mac_set(priv, priv->ioaddr, false); |
|---|
| 4572 | 5363 | pinctrl_pm_select_sleep_state(priv->device); |
|---|
| 4573 | | - /* Disable clock in case of PWM is off */ |
|---|
| 4574 | | - if (priv->plat->clk_ptp_ref && IS_ENABLED(CONFIG_STMMAC_PTP)) |
|---|
| 4575 | | - clk_disable_unprepare(priv->plat->clk_ptp_ref); |
|---|
| 4576 | | - clk_disable_unprepare(priv->plat->pclk); |
|---|
| 4577 | | - clk_disable_unprepare(priv->plat->stmmac_clk); |
|---|
| 4578 | 5364 | } |
|---|
| 4579 | 5365 | mutex_unlock(&priv->lock); |
|---|
| 4580 | 5366 | |
|---|
| 4581 | | - priv->oldlink = false; |
|---|
| 4582 | 5367 | priv->speed = SPEED_UNKNOWN; |
|---|
| 4583 | | - priv->oldduplex = DUPLEX_UNKNOWN; |
|---|
| 4584 | 5368 | return 0; |
|---|
| 4585 | 5369 | } |
|---|
| 4586 | 5370 | EXPORT_SYMBOL_GPL(stmmac_suspend); |
|---|
| 4587 | 5371 | |
|---|
| 4588 | 5372 | /** |
|---|
| 4589 | 5373 | * stmmac_reset_queues_param - reset queue parameters |
|---|
| 4590 | | - * @dev: device pointer |
|---|
| 5374 | + * @priv: device pointer |
|---|
| 4591 | 5375 | */ |
|---|
| 4592 | 5376 | static void stmmac_reset_queues_param(struct stmmac_priv *priv) |
|---|
| 4593 | 5377 | { |
|---|
| .. | .. |
|---|
| 4623 | 5407 | { |
|---|
| 4624 | 5408 | struct net_device *ndev = dev_get_drvdata(dev); |
|---|
| 4625 | 5409 | struct stmmac_priv *priv = netdev_priv(ndev); |
|---|
| 5410 | + int ret; |
|---|
| 4626 | 5411 | |
|---|
| 4627 | 5412 | if (!netif_running(ndev)) |
|---|
| 4628 | 5413 | return 0; |
|---|
| .. | .. |
|---|
| 4633 | 5418 | * this bit because it can generate problems while resuming |
|---|
| 4634 | 5419 | * from another devices (e.g. serial console). |
|---|
| 4635 | 5420 | */ |
|---|
| 4636 | | - if (device_may_wakeup(priv->device)) { |
|---|
| 5421 | + if (device_may_wakeup(priv->device) && priv->plat->pmt) { |
|---|
| 4637 | 5422 | mutex_lock(&priv->lock); |
|---|
| 4638 | 5423 | stmmac_pmt(priv, priv->hw, 0); |
|---|
| 4639 | 5424 | mutex_unlock(&priv->lock); |
|---|
| 4640 | 5425 | priv->irq_wake = 0; |
|---|
| 4641 | 5426 | } else { |
|---|
| 4642 | 5427 | pinctrl_pm_select_default_state(priv->device); |
|---|
| 4643 | | - /* enable the clk previously disabled */ |
|---|
| 4644 | | - clk_prepare_enable(priv->plat->stmmac_clk); |
|---|
| 4645 | | - clk_prepare_enable(priv->plat->pclk); |
|---|
| 4646 | | - if (priv->plat->clk_ptp_ref && IS_ENABLED(CONFIG_STMMAC_PTP)) |
|---|
| 4647 | | - clk_prepare_enable(priv->plat->clk_ptp_ref); |
|---|
| 4648 | 5428 | /* reset the phy so that it's ready */ |
|---|
| 4649 | 5429 | if (priv->mii) |
|---|
| 4650 | 5430 | stmmac_mdio_reset(priv->mii); |
|---|
| .. | .. |
|---|
| 4653 | 5433 | true); |
|---|
| 4654 | 5434 | } |
|---|
| 4655 | 5435 | |
|---|
| 5436 | + if (priv->plat->serdes_powerup) { |
|---|
| 5437 | + ret = priv->plat->serdes_powerup(ndev, |
|---|
| 5438 | + priv->plat->bsp_priv); |
|---|
| 5439 | + |
|---|
| 5440 | + if (ret < 0) |
|---|
| 5441 | + return ret; |
|---|
| 5442 | + } |
|---|
| 5443 | + |
|---|
| 5444 | + if (!device_may_wakeup(priv->device) || !priv->plat->pmt) { |
|---|
| 5445 | + rtnl_lock(); |
|---|
| 5446 | + phylink_start(priv->phylink); |
|---|
| 5447 | + /* We may have called phylink_speed_down before */ |
|---|
| 5448 | + phylink_speed_up(priv->phylink); |
|---|
| 5449 | + rtnl_unlock(); |
|---|
| 5450 | + } |
|---|
| 5451 | + |
|---|
| 5452 | + rtnl_lock(); |
|---|
| 4656 | 5453 | mutex_lock(&priv->lock); |
|---|
| 4657 | 5454 | |
|---|
| 4658 | 5455 | stmmac_reset_queues_param(priv); |
|---|
| .. | .. |
|---|
| 4661 | 5458 | stmmac_clear_descriptors(priv); |
|---|
| 4662 | 5459 | |
|---|
| 4663 | 5460 | stmmac_hw_setup(ndev, false); |
|---|
| 4664 | | - stmmac_init_tx_coalesce(priv); |
|---|
| 5461 | + stmmac_init_coalesce(priv); |
|---|
| 4665 | 5462 | stmmac_set_rx_mode(ndev); |
|---|
| 5463 | + |
|---|
| 5464 | + stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); |
|---|
| 4666 | 5465 | |
|---|
| 4667 | 5466 | stmmac_enable_all_queues(priv); |
|---|
| 4668 | 5467 | |
|---|
| 4669 | | - netif_device_attach(ndev); |
|---|
| 4670 | | - |
|---|
| 4671 | 5468 | mutex_unlock(&priv->lock); |
|---|
| 5469 | + rtnl_unlock(); |
|---|
| 4672 | 5470 | |
|---|
| 4673 | | - if (ndev->phydev) |
|---|
| 4674 | | - phy_start(ndev->phydev); |
|---|
| 5471 | + phylink_mac_change(priv->phylink, true); |
|---|
| 5472 | + |
|---|
| 5473 | + netif_device_attach(ndev); |
|---|
| 4675 | 5474 | |
|---|
| 4676 | 5475 | return 0; |
|---|
| 4677 | 5476 | } |
|---|
| .. | .. |
|---|
| 4683 | 5482 | char *opt; |
|---|
| 4684 | 5483 | |
|---|
| 4685 | 5484 | if (!str || !*str) |
|---|
| 4686 | | - return -EINVAL; |
|---|
| 5485 | + return 1; |
|---|
| 4687 | 5486 | while ((opt = strsep(&str, ",")) != NULL) { |
|---|
| 4688 | 5487 | if (!strncmp(opt, "debug:", 6)) { |
|---|
| 4689 | 5488 | if (kstrtoint(opt + 6, 0, &debug)) |
|---|
| .. | .. |
|---|
| 4714 | 5513 | goto err; |
|---|
| 4715 | 5514 | } |
|---|
| 4716 | 5515 | } |
|---|
| 4717 | | - return 0; |
|---|
| 5516 | + return 1; |
|---|
| 4718 | 5517 | |
|---|
| 4719 | 5518 | err: |
|---|
| 4720 | 5519 | pr_err("%s: ERROR broken module parameter conversion", __func__); |
|---|
| 4721 | | - return -EINVAL; |
|---|
| 5520 | + return 1; |
|---|
| 4722 | 5521 | } |
|---|
| 4723 | 5522 | |
|---|
| 4724 | 5523 | __setup("stmmaceth=", stmmac_cmdline_opt); |
|---|
| .. | .. |
|---|
| 4728 | 5527 | { |
|---|
| 4729 | 5528 | #ifdef CONFIG_DEBUG_FS |
|---|
| 4730 | 5529 | /* Create debugfs main directory if it doesn't exist yet */ |
|---|
| 4731 | | - if (!stmmac_fs_dir) { |
|---|
| 5530 | + if (!stmmac_fs_dir) |
|---|
| 4732 | 5531 | stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); |
|---|
| 4733 | | - |
|---|
| 4734 | | - if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) { |
|---|
| 4735 | | - pr_err("ERROR %s, debugfs create directory failed\n", |
|---|
| 4736 | | - STMMAC_RESOURCE_NAME); |
|---|
| 4737 | | - |
|---|
| 4738 | | - return -ENOMEM; |
|---|
| 4739 | | - } |
|---|
| 4740 | | - } |
|---|
| 5532 | + register_netdevice_notifier(&stmmac_notifier); |
|---|
| 4741 | 5533 | #endif |
|---|
| 4742 | 5534 | |
|---|
| 4743 | 5535 | return 0; |
|---|
| .. | .. |
|---|
| 4746 | 5538 | static void __exit stmmac_exit(void) |
|---|
| 4747 | 5539 | { |
|---|
| 4748 | 5540 | #ifdef CONFIG_DEBUG_FS |
|---|
| 5541 | + unregister_netdevice_notifier(&stmmac_notifier); |
|---|
| 4749 | 5542 | debugfs_remove_recursive(stmmac_fs_dir); |
|---|
| 4750 | 5543 | #endif |
|---|
| 4751 | 5544 | } |
|---|