From 01573e231f18eb2d99162747186f59511f56b64d Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 08 Dec 2023 10:40:48 +0000
Subject: [PATCH] 移去rt
---
kernel/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 574 +++++++++++++++++++++++++++++++++++---------------------
1 files changed, 359 insertions(+), 215 deletions(-)
diff --git a/kernel/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/kernel/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 4b21ae2..cb7c028 100644
--- a/kernel/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/kernel/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1,4 +1,5 @@
/* Copyright 2008 - 2016 Freescale Semiconductor Inc.
+ * Copyright 2020 NXP
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -51,9 +52,9 @@
#include <linux/percpu.h>
#include <linux/dma-mapping.h>
#include <linux/sort.h>
+#include <linux/phy_fixed.h>
#include <soc/fsl/bman.h>
#include <soc/fsl/qman.h>
-
#include "fman.h"
#include "fman_port.h"
#include "mac.h"
@@ -86,7 +87,7 @@
#define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
NETIF_MSG_LINK | NETIF_MSG_IFUP | \
- NETIF_MSG_IFDOWN)
+ NETIF_MSG_IFDOWN | NETIF_MSG_HW)
#define DPAA_INGRESS_CS_THRESHOLD 0x10000000
/* Ingress congestion threshold on FMan ports
@@ -123,7 +124,22 @@
#define FSL_QMAN_MAX_OAL 127
/* Default alignment for start of data in an Rx FD */
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+/* aligning data start to 64 avoids DMA transaction splits, unless the buffer
+ * is crossing a 4k page boundary
+ */
+#define DPAA_FD_DATA_ALIGNMENT (fman_has_errata_a050385() ? 64 : 16)
+/* aligning to 256 avoids DMA transaction splits caused by 4k page boundary
+ * crossings; also, all SG fragments except the last must have a size multiple
+ * of 256 to avoid DMA transaction splits
+ */
+#define DPAA_A050385_ALIGN 256
+#define DPAA_FD_RX_DATA_ALIGNMENT (fman_has_errata_a050385() ? \
+ DPAA_A050385_ALIGN : 16)
+#else
#define DPAA_FD_DATA_ALIGNMENT 16
+#define DPAA_FD_RX_DATA_ALIGNMENT DPAA_FD_DATA_ALIGNMENT
+#endif
/* The DPAA requires 256 bytes reserved and mapped for the SGT */
#define DPAA_SGT_SIZE 256
@@ -158,8 +174,18 @@
#define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
#define DPAA_TIME_STAMP_SIZE 8
#define DPAA_HASH_RESULTS_SIZE 8
-#define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \
+#define DPAA_HWA_SIZE (DPAA_PARSE_RESULTS_SIZE + DPAA_TIME_STAMP_SIZE \
+ + DPAA_HASH_RESULTS_SIZE)
+#define DPAA_RX_PRIV_DATA_DEFAULT_SIZE (DPAA_TX_PRIV_DATA_SIZE + \
dpaa_rx_extra_headroom)
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+#define DPAA_RX_PRIV_DATA_A050385_SIZE (DPAA_A050385_ALIGN - DPAA_HWA_SIZE)
+#define DPAA_RX_PRIV_DATA_SIZE (fman_has_errata_a050385() ? \
+ DPAA_RX_PRIV_DATA_A050385_SIZE : \
+ DPAA_RX_PRIV_DATA_DEFAULT_SIZE)
+#else
+#define DPAA_RX_PRIV_DATA_SIZE DPAA_RX_PRIV_DATA_DEFAULT_SIZE
+#endif
#define DPAA_ETH_PCD_RXQ_NUM 128
@@ -178,31 +204,14 @@
/* All the dpa bps in use at any moment */
static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
-/* The raw buffer size must be cacheline aligned */
#define DPAA_BP_RAW_SIZE 4096
-/* When using more than one buffer pool, the raw sizes are as follows:
- * 1 bp: 4KB
- * 2 bp: 2KB, 4KB
- * 3 bp: 1KB, 2KB, 4KB
- * 4 bp: 1KB, 2KB, 4KB, 8KB
- */
-static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt)
-{
- size_t res = DPAA_BP_RAW_SIZE / 4;
- u8 i;
- for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++)
- res *= 2;
- return res;
-}
-
-/* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
- * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
- * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
- * half-page-aligned buffers, so we reserve some more space for start-of-buffer
- * alignment.
- */
-#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES)
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+#define dpaa_bp_size(raw_size) (SKB_WITH_OVERHEAD(raw_size) & \
+ ~(DPAA_A050385_ALIGN - 1))
+#else
+#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size)
+#endif
static int dpaa_max_frm;
@@ -255,8 +264,20 @@
net_dev->features |= net_dev->hw_features;
net_dev->vlan_features = net_dev->features;
- memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
- memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+ if (is_valid_ether_addr(mac_addr)) {
+ memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
+ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+ } else {
+ eth_hw_addr_random(net_dev);
+ err = priv->mac_dev->change_addr(priv->mac_dev->fman_mac,
+ (enet_addr_t *)net_dev->dev_addr);
+ if (err) {
+ dev_err(dev, "Failed to set random MAC address\n");
+ return -EINVAL;
+ }
+ dev_info(dev, "Using random MAC address: %pM\n",
+ net_dev->dev_addr);
+ }
net_dev->ethtool_ops = &dpaa_ethtool_ops;
@@ -288,7 +309,7 @@
/* Allow the Fman (Tx) port to process in-flight frames before we
* try switching it off.
*/
- usleep_range(5000, 10000);
+ msleep(200);
err = mac_dev->stop(mac_dev);
if (err < 0)
@@ -305,10 +326,12 @@
phy_disconnect(net_dev->phydev);
net_dev->phydev = NULL;
+ msleep(200);
+
return err;
}
-static void dpaa_tx_timeout(struct net_device *net_dev)
+static void dpaa_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
{
struct dpaa_percpu_priv *percpu_priv;
const struct dpaa_priv *priv;
@@ -485,7 +508,7 @@
static bool dpaa_bpid2pool_use(int bpid)
{
if (dpaa_bpid2pool(bpid)) {
- atomic_inc(&dpaa_bp_array[bpid]->refs);
+ refcount_inc(&dpaa_bp_array[bpid]->refs);
return true;
}
@@ -496,7 +519,7 @@
static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp)
{
dpaa_bp_array[bpid] = dpaa_bp;
- atomic_set(&dpaa_bp->refs, 1);
+ refcount_set(&dpaa_bp->refs, 1);
}
static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp)
@@ -584,7 +607,7 @@
if (!bp)
return;
- if (!atomic_dec_and_test(&bp->refs))
+ if (!refcount_dec_and_test(&bp->refs))
return;
if (bp->free_buf_cb)
@@ -596,10 +619,7 @@
static void dpaa_bps_free(struct dpaa_priv *priv)
{
- int i;
-
- for (i = 0; i < DPAA_BPS_NUM; i++)
- dpaa_bp_free(priv->dpaa_bps[i]);
+ dpaa_bp_free(priv->dpaa_bp);
}
/* Use multiple WQs for FQ assignment:
@@ -773,16 +793,17 @@
qman_release_pool(rx_pool_channel);
}
-static void dpaa_eth_add_channel(u16 channel)
+static void dpaa_eth_add_channel(u16 channel, struct device *dev)
{
u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
const cpumask_t *cpus = qman_affine_cpus();
struct qman_portal *portal;
int cpu;
- for_each_cpu(cpu, cpus) {
+ for_each_cpu_and(cpu, cpus, cpu_online_mask) {
portal = qman_get_affine_portal(cpu);
qman_p_static_dequeue_add(portal, pool);
+ qman_start_using_portal(portal, dev);
}
}
@@ -896,12 +917,12 @@
u16 channels[NR_CPUS];
struct dpaa_fq *fq;
- for_each_cpu(cpu, affine_cpus)
+ for_each_cpu_and(cpu, affine_cpus, cpu_online_mask)
channels[num_portals++] = qman_affine_channel(cpu);
if (num_portals == 0)
dev_err(priv->net_dev->dev.parent,
- "No Qman software (affine) channels found");
+ "No Qman software (affine) channels found\n");
/* Initialize each FQ in the list */
list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
@@ -929,7 +950,7 @@
break;
case FQ_TYPE_TX_CONF_MQ:
priv->conf_fqs[conf_cnt++] = &fq->fq_base;
- /* fall through */
+ fallthrough;
case FQ_TYPE_TX_CONFIRM:
dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
break;
@@ -1197,15 +1218,15 @@
return err;
}
-static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
- size_t count, struct dpaa_fq *errq,
+static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp,
+ struct dpaa_fq *errq,
struct dpaa_fq *defq, struct dpaa_fq *pcdq,
struct dpaa_buffer_layout *buf_layout)
{
struct fman_buffer_prefix_content buf_prefix_content;
struct fman_port_rx_params *rx_p;
struct fman_port_params params;
- int i, err;
+ int err;
memset(¶ms, 0, sizeof(params));
memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
@@ -1214,7 +1235,7 @@
buf_prefix_content.pass_prs_result = true;
buf_prefix_content.pass_hash_result = true;
buf_prefix_content.pass_time_stamp = true;
- buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
+ buf_prefix_content.data_align = DPAA_FD_RX_DATA_ALIGNMENT;
rx_p = ¶ms.specific_params.rx_params;
rx_p->err_fqid = errq->fqid;
@@ -1224,12 +1245,9 @@
rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM;
}
- count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count);
- rx_p->ext_buf_pools.num_of_pools_used = (u8)count;
- for (i = 0; i < count; i++) {
- rx_p->ext_buf_pools.ext_buf_pool[i].id = bps[i]->bpid;
- rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size;
- }
+ rx_p->ext_buf_pools.num_of_pools_used = 1;
+ rx_p->ext_buf_pools.ext_buf_pool[0].id = bp->bpid;
+ rx_p->ext_buf_pools.ext_buf_pool[0].size = (u16)bp->size;
err = fman_port_config(port, ¶ms);
if (err) {
@@ -1252,7 +1270,7 @@
}
static int dpaa_eth_init_ports(struct mac_device *mac_dev,
- struct dpaa_bp **bps, size_t count,
+ struct dpaa_bp *bp,
struct fm_port_fqs *port_fqs,
struct dpaa_buffer_layout *buf_layout,
struct device *dev)
@@ -1266,7 +1284,7 @@
if (err)
return err;
- err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
+ err = dpaa_eth_init_rx_port(rxport, bp, port_fqs->rx_errq,
port_fqs->rx_defq, port_fqs->rx_pcdq,
&buf_layout[RX]);
@@ -1280,7 +1298,7 @@
err = bman_release(dpaa_bp->pool, bmb, cnt);
/* Should never occur, address anyway to avoid leaking the buffers */
- if (unlikely(WARN_ON(err)) && dpaa_bp->free_buf_cb)
+ if (WARN_ON(err) && dpaa_bp->free_buf_cb)
while (cnt-- > 0)
dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]);
@@ -1335,15 +1353,16 @@
vaddr = phys_to_virt(qm_fd_addr(fd));
sgt = vaddr + qm_fd_get_offset(fd);
- dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size,
- DMA_FROM_DEVICE);
+ dma_unmap_page(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
+ DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
dpaa_release_sgt_members(sgt);
- addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dpaa_bp->dev, addr)) {
- dev_err(dpaa_bp->dev, "DMA mapping failed");
+ addr = dma_map_page(dpaa_bp->priv->rx_dma_dev,
+ virt_to_page(vaddr), 0, DPAA_BP_RAW_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) {
+ netdev_err(net_dev, "DMA mapping failed\n");
return;
}
bm_buffer_set64(&bmb, addr);
@@ -1396,7 +1415,7 @@
static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
struct sk_buff *skb,
struct qm_fd *fd,
- char *parse_results)
+ void *parse_results)
{
struct fman_prs_result *parse_result;
u16 ethertype = ntohs(skb->protocol);
@@ -1488,25 +1507,24 @@
static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
{
- struct device *dev = dpaa_bp->dev;
+ struct net_device *net_dev = dpaa_bp->priv->net_dev;
struct bm_buffer bmb[8];
dma_addr_t addr;
- void *new_buf;
+ struct page *p;
u8 i;
for (i = 0; i < 8; i++) {
- new_buf = netdev_alloc_frag(dpaa_bp->raw_size);
- if (unlikely(!new_buf)) {
- dev_err(dev, "netdev_alloc_frag() failed, size %zu\n",
- dpaa_bp->raw_size);
+ p = dev_alloc_pages(0);
+ if (unlikely(!p)) {
+ netdev_err(net_dev, "dev_alloc_pages() failed\n");
goto release_previous_buffs;
}
- new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);
- addr = dma_map_single(dev, new_buf,
- dpaa_bp->size, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, addr))) {
- dev_err(dpaa_bp->dev, "DMA map failed");
+ addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, p, 0,
+ DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev,
+ addr))) {
+ netdev_err(net_dev, "DMA map failed\n");
goto release_previous_buffs;
}
@@ -1581,17 +1599,16 @@
{
struct dpaa_bp *dpaa_bp;
int *countptr;
- int res, i;
+ int res;
- for (i = 0; i < DPAA_BPS_NUM; i++) {
- dpaa_bp = priv->dpaa_bps[i];
- if (!dpaa_bp)
- return -EINVAL;
- countptr = this_cpu_ptr(dpaa_bp->percpu_count);
- res = dpaa_eth_refill_bpool(dpaa_bp, countptr);
- if (res)
- return res;
- }
+ dpaa_bp = priv->dpaa_bp;
+ if (!dpaa_bp)
+ return -EINVAL;
+ countptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ res = dpaa_eth_refill_bpool(dpaa_bp, countptr);
+ if (res)
+ return res;
+
return 0;
}
@@ -1614,47 +1631,48 @@
struct device *dev = priv->net_dev->dev.parent;
struct skb_shared_hwtstamps shhwtstamps;
dma_addr_t addr = qm_fd_addr(fd);
+ void *vaddr = phys_to_virt(addr);
const struct qm_sg_entry *sgt;
- struct sk_buff **skbh, *skb;
- int nr_frags, i;
+ struct sk_buff *skb;
u64 ns;
-
- skbh = (struct sk_buff **)phys_to_virt(addr);
- skb = *skbh;
+ int i;
if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
- nr_frags = skb_shinfo(skb)->nr_frags;
- dma_unmap_single(dev, addr,
- qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
- dma_dir);
+ dma_unmap_page(priv->tx_dma_dev, addr,
+ qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
+ dma_dir);
/* The sgt buffer has been allocated with netdev_alloc_frag(),
* it's from lowmem.
*/
- sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
+ sgt = vaddr + qm_fd_get_offset(fd);
/* sgt[0] is from lowmem, was dma_map_single()-ed */
- dma_unmap_single(dev, qm_sg_addr(&sgt[0]),
+ dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]),
qm_sg_entry_get_len(&sgt[0]), dma_dir);
/* remaining pages were mapped with skb_frag_dma_map() */
- for (i = 1; i <= nr_frags; i++) {
+ for (i = 1; (i < DPAA_SGT_MAX_ENTRIES) &&
+ !qm_sg_entry_is_final(&sgt[i - 1]); i++) {
WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
- dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
+ dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]),
qm_sg_entry_get_len(&sgt[i]), dma_dir);
}
} else {
- dma_unmap_single(dev, addr,
- skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
+ dma_unmap_single(priv->tx_dma_dev, addr,
+ priv->tx_headroom + qm_fd_get_length(fd),
+ dma_dir);
}
+
+ skb = *(struct sk_buff **)vaddr;
/* DMA unmapping is required before accessing the HW provided info */
if (ts && priv->tx_tstamp &&
skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
- if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
+ if (!fman_port_get_tstamp(priv->mac_dev->port[TX], vaddr,
&ns)) {
shhwtstamps.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(skb, &shhwtstamps);
@@ -1664,8 +1682,8 @@
}
if (qm_fd_get_format(fd) == qm_fd_sg)
- /* Free the page frag that we allocated on Tx */
- skb_free_frag(phys_to_virt(addr));
+ /* Free the page that we allocated on Tx for the SGT */
+ free_pages((unsigned long)vaddr, 0);
return skb;
}
@@ -1686,6 +1704,8 @@
*/
return CHECKSUM_NONE;
}
+
+#define PTR_IS_ALIGNED(x, a) (IS_ALIGNED((unsigned long)(x), (a)))
/* Build a linear skb around the received buffer.
* We are guaranteed there is enough room at the end of the data buffer to
@@ -1709,10 +1729,8 @@
skb = build_skb(vaddr, dpaa_bp->size +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
- if (unlikely(!skb)) {
- WARN_ONCE(1, "Build skb failure on Rx\n");
+ if (WARN_ONCE(!skb, "Build skb failure on Rx\n"))
goto free_buffer;
- }
WARN_ON(fd_off != priv->rx_headroom);
skb_reserve(skb, fd_off);
skb_put(skb, qm_fd_get_length(fd));
@@ -1722,7 +1740,7 @@
return skb;
free_buffer:
- skb_free_frag(vaddr);
+ free_pages((unsigned long)vaddr, 0);
return NULL;
}
@@ -1746,7 +1764,7 @@
int page_offset;
unsigned int sz;
int *count_ptr;
- int i;
+ int i, j;
vaddr = phys_to_virt(addr);
WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
@@ -1760,22 +1778,21 @@
sg_addr = qm_sg_addr(&sgt[i]);
sg_vaddr = phys_to_virt(sg_addr);
- WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
- SMP_CACHE_BYTES));
+ WARN_ON(!PTR_IS_ALIGNED(sg_vaddr, SMP_CACHE_BYTES));
+
+ dma_unmap_page(priv->rx_dma_dev, sg_addr,
+ DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
/* We may use multiple Rx pools */
dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
if (!dpaa_bp)
goto free_buffers;
- count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
- dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size,
- DMA_FROM_DEVICE);
if (!skb) {
sz = dpaa_bp->size +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
skb = build_skb(sg_vaddr, sz);
- if (WARN_ON(unlikely(!skb)))
+ if (WARN_ON(!skb))
goto free_buffers;
skb->ip_summed = rx_csum_offload(priv, fd);
@@ -1813,7 +1830,9 @@
skb_add_rx_frag(skb, i - 1, head_page, frag_off,
frag_len, dpaa_bp->size);
}
+
/* Update the pool count for the current {cpu x bpool} */
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
(*count_ptr)--;
if (qm_sg_entry_is_final(&sgt[i]))
@@ -1822,35 +1841,34 @@
WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
/* free the SG table buffer */
- skb_free_frag(vaddr);
+ free_pages((unsigned long)vaddr, 0);
return skb;
free_buffers:
- /* compensate sw bpool counter changes */
- for (i--; i >= 0; i--) {
- dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
- if (dpaa_bp) {
- count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
- (*count_ptr)++;
- }
- }
/* free all the SG entries */
- for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
- sg_addr = qm_sg_addr(&sgt[i]);
+ for (j = 0; j < DPAA_SGT_MAX_ENTRIES ; j++) {
+ sg_addr = qm_sg_addr(&sgt[j]);
sg_vaddr = phys_to_virt(sg_addr);
- skb_free_frag(sg_vaddr);
- dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
- if (dpaa_bp) {
- count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
- (*count_ptr)--;
+ /* all pages 0..i were unmaped */
+ if (j > i)
+ dma_unmap_page(priv->rx_dma_dev, qm_sg_addr(&sgt[j]),
+ DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
+ free_pages((unsigned long)sg_vaddr, 0);
+ /* counters 0..i-1 were decremented */
+ if (j >= i) {
+ dpaa_bp = dpaa_bpid2pool(sgt[j].bpid);
+ if (dpaa_bp) {
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ (*count_ptr)--;
+ }
}
- if (qm_sg_entry_is_final(&sgt[i]))
+ if (qm_sg_entry_is_final(&sgt[j]))
break;
}
/* free the SGT fragment */
- skb_free_frag(vaddr);
+ free_pages((unsigned long)vaddr, 0);
return NULL;
}
@@ -1860,9 +1878,8 @@
int *offset)
{
struct net_device *net_dev = priv->net_dev;
- struct device *dev = net_dev->dev.parent;
enum dma_data_direction dma_dir;
- unsigned char *buffer_start;
+ unsigned char *buff_start;
struct sk_buff **skbh;
dma_addr_t addr;
int err;
@@ -1871,10 +1888,10 @@
* available, so just use that for offset.
*/
fd->bpid = FSL_DPAA_BPID_INV;
- buffer_start = skb->data - priv->tx_headroom;
+ buff_start = skb->data - priv->tx_headroom;
dma_dir = DMA_TO_DEVICE;
- skbh = (struct sk_buff **)buffer_start;
+ skbh = (struct sk_buff **)buff_start;
*skbh = skb;
/* Enable L3/L4 hardware checksum computation.
@@ -1883,7 +1900,7 @@
* need to write into the skb.
*/
err = dpaa_enable_tx_csum(priv, skb, fd,
- ((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE);
+ buff_start + DPAA_TX_PRIV_DATA_SIZE);
if (unlikely(err < 0)) {
if (net_ratelimit())
netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
@@ -1896,9 +1913,9 @@
fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
/* Map the entire buffer size that may be seen by FMan, but no more */
- addr = dma_map_single(dev, skbh,
- skb_tail_pointer(skb) - buffer_start, dma_dir);
- if (unlikely(dma_mapping_error(dev, addr))) {
+ addr = dma_map_single(priv->tx_dma_dev, buff_start,
+ priv->tx_headroom + skb->len, dma_dir);
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
if (net_ratelimit())
netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
return -EINVAL;
@@ -1914,24 +1931,22 @@
const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
const int nr_frags = skb_shinfo(skb)->nr_frags;
struct net_device *net_dev = priv->net_dev;
- struct device *dev = net_dev->dev.parent;
struct qm_sg_entry *sgt;
struct sk_buff **skbh;
- int i, j, err, sz;
- void *buffer_start;
+ void *buff_start;
skb_frag_t *frag;
dma_addr_t addr;
size_t frag_len;
- void *sgt_buf;
+ struct page *p;
+ int i, j, err;
- /* get a page frag to store the SGTable */
- sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
- sgt_buf = netdev_alloc_frag(sz);
- if (unlikely(!sgt_buf)) {
- netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
- sz);
+ /* get a page to store the SGTable */
+ p = dev_alloc_pages(0);
+ if (unlikely(!p)) {
+ netdev_err(net_dev, "dev_alloc_pages() failed\n");
return -ENOMEM;
}
+ buff_start = page_address(p);
/* Enable L3/L4 hardware checksum computation.
*
@@ -1939,7 +1954,7 @@
* need to write into the skb.
*/
err = dpaa_enable_tx_csum(priv, skb, fd,
- sgt_buf + DPAA_TX_PRIV_DATA_SIZE);
+ buff_start + DPAA_TX_PRIV_DATA_SIZE);
if (unlikely(err < 0)) {
if (net_ratelimit())
netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
@@ -1948,15 +1963,15 @@
}
/* SGT[0] is used by the linear part */
- sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
+ sgt = (struct qm_sg_entry *)(buff_start + priv->tx_headroom);
frag_len = skb_headlen(skb);
qm_sg_entry_set_len(&sgt[0], frag_len);
sgt[0].bpid = FSL_DPAA_BPID_INV;
sgt[0].offset = 0;
- addr = dma_map_single(dev, skb->data,
+ addr = dma_map_single(priv->tx_dma_dev, skb->data,
skb_headlen(skb), dma_dir);
- if (unlikely(dma_mapping_error(dev, addr))) {
- dev_err(dev, "DMA mapping failed");
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
+ netdev_err(priv->net_dev, "DMA mapping failed\n");
err = -EINVAL;
goto sg0_map_failed;
}
@@ -1965,12 +1980,12 @@
/* populate the rest of SGT entries */
for (i = 0; i < nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
- frag_len = frag->size;
+ frag_len = skb_frag_size(frag);
WARN_ON(!skb_frag_page(frag));
- addr = skb_frag_dma_map(dev, frag, 0,
+ addr = skb_frag_dma_map(priv->tx_dma_dev, frag, 0,
frag_len, dma_dir);
- if (unlikely(dma_mapping_error(dev, addr))) {
- dev_err(dev, "DMA mapping failed");
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
+ netdev_err(priv->net_dev, "DMA mapping failed\n");
err = -EINVAL;
goto sg_map_failed;
}
@@ -1986,17 +2001,17 @@
/* Set the final bit in the last used entry of the SGT */
qm_sg_entry_set_f(&sgt[nr_frags], frag_len);
+ /* set fd offset to priv->tx_headroom */
qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
/* DMA map the SGT page */
- buffer_start = (void *)sgt - priv->tx_headroom;
- skbh = (struct sk_buff **)buffer_start;
+ skbh = (struct sk_buff **)buff_start;
*skbh = skb;
- addr = dma_map_single(dev, buffer_start,
- priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
- if (unlikely(dma_mapping_error(dev, addr))) {
- dev_err(dev, "DMA mapping failed");
+ addr = dma_map_page(priv->tx_dma_dev, p, 0,
+ priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
+ netdev_err(priv->net_dev, "DMA mapping failed\n");
err = -EINVAL;
goto sgt_map_failed;
}
@@ -2010,11 +2025,11 @@
sgt_map_failed:
sg_map_failed:
for (j = 0; j < i; j++)
- dma_unmap_page(dev, qm_sg_addr(&sgt[j]),
+ dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[j]),
qm_sg_entry_get_len(&sgt[j]), dma_dir);
sg0_map_failed:
csum_failed:
- skb_free_frag(sgt_buf);
+ free_pages((unsigned long)buff_start, 0);
return err;
}
@@ -2050,6 +2065,83 @@
return 0;
}
+
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+static int dpaa_a050385_wa(struct net_device *net_dev, struct sk_buff **s)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct sk_buff *new_skb, *skb = *s;
+ unsigned char *start, i;
+
+ /* check linear buffer alignment */
+ if (!PTR_IS_ALIGNED(skb->data, DPAA_A050385_ALIGN))
+ goto workaround;
+
+ /* linear buffers just need to have an aligned start */
+ if (!skb_is_nonlinear(skb))
+ return 0;
+
+ /* linear data size for nonlinear skbs needs to be aligned */
+ if (!IS_ALIGNED(skb_headlen(skb), DPAA_A050385_ALIGN))
+ goto workaround;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ /* all fragments need to have aligned start addresses */
+ if (!IS_ALIGNED(skb_frag_off(frag), DPAA_A050385_ALIGN))
+ goto workaround;
+
+ /* all but last fragment need to have aligned sizes */
+ if (!IS_ALIGNED(skb_frag_size(frag), DPAA_A050385_ALIGN) &&
+ (i < skb_shinfo(skb)->nr_frags - 1))
+ goto workaround;
+ }
+
+ return 0;
+
+workaround:
+ /* copy all the skb content into a new linear buffer */
+ new_skb = netdev_alloc_skb(net_dev, skb->len + DPAA_A050385_ALIGN - 1 +
+ priv->tx_headroom);
+ if (!new_skb)
+ return -ENOMEM;
+
+ /* NET_SKB_PAD bytes already reserved, adding up to tx_headroom */
+ skb_reserve(new_skb, priv->tx_headroom - NET_SKB_PAD);
+
+ /* Workaround for DPAA_A050385 requires data start to be aligned */
+ start = PTR_ALIGN(new_skb->data, DPAA_A050385_ALIGN);
+ if (start - new_skb->data)
+ skb_reserve(new_skb, start - new_skb->data);
+
+ skb_put(new_skb, skb->len);
+ skb_copy_bits(skb, 0, new_skb->data, skb->len);
+ skb_copy_header(new_skb, skb);
+ new_skb->dev = skb->dev;
+
+ /* Copy relevant timestamp info from the old skb to the new */
+ if (priv->tx_tstamp) {
+ skb_shinfo(new_skb)->tx_flags = skb_shinfo(skb)->tx_flags;
+ skb_shinfo(new_skb)->hwtstamps = skb_shinfo(skb)->hwtstamps;
+ skb_shinfo(new_skb)->tskey = skb_shinfo(skb)->tskey;
+ if (skb->sk)
+ skb_set_owner_w(new_skb, skb->sk);
+ }
+
+ /* We move the headroom when we align it so we have to reset the
+ * network and transport header offsets relative to the new data
+ * pointer. The checksum offload relies on these offsets.
+ */
+ skb_set_network_header(new_skb, skb_network_offset(skb));
+ skb_set_transport_header(new_skb, skb_transport_offset(skb));
+
+ dev_kfree_skb(skb);
+ *s = new_skb;
+
+ return 0;
+}
+#endif
static netdev_tx_t
dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
@@ -2096,6 +2188,14 @@
nonlinear = skb_is_nonlinear(skb);
}
+
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+ if (unlikely(fman_has_errata_a050385())) {
+ if (dpaa_a050385_wa(net_dev, &skb))
+ goto enomem;
+ nonlinear = skb_is_nonlinear(skb);
+ }
+#endif
if (nonlinear) {
/* Just create a S/G fd based on the skb */
@@ -2181,7 +2281,6 @@
if (cleaned < budget) {
napi_complete_done(napi, cleaned);
qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
-
} else if (np->down) {
qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
}
@@ -2312,11 +2411,8 @@
return qman_cb_dqrr_consume;
}
- dpaa_bp = dpaa_bpid2pool(fd->bpid);
- if (!dpaa_bp)
- return qman_cb_dqrr_consume;
-
- dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
+ dma_unmap_page(dpaa_bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
+ DMA_FROM_DEVICE);
/* prefetch the first 64 bytes of the frame or the SGT start */
vaddr = phys_to_virt(addr);
@@ -2455,7 +2551,7 @@
struct dpaa_percpu_priv *percpu_priv;
int i;
- for_each_possible_cpu(i) {
+ for_each_online_cpu(i) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
percpu_priv->np.down = 0;
@@ -2468,7 +2564,7 @@
struct dpaa_percpu_priv *percpu_priv;
int i;
- for_each_possible_cpu(i) {
+ for_each_online_cpu(i) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
percpu_priv->np.down = 1;
@@ -2486,8 +2582,12 @@
mac_dev->adjust_link(mac_dev);
}
+/* The Aquantia PHYs are capable of performing rate adaptation */
+#define PHY_VEND_AQUANTIA 0x03a1b400
+
static int dpaa_phy_init(struct net_device *net_dev)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct mac_device *mac_dev;
struct phy_device *phy_dev;
struct dpaa_priv *priv;
@@ -2503,10 +2603,16 @@
return -ENODEV;
}
- /* Remove any features not supported by the controller */
- phy_dev->supported &= mac_dev->if_support;
- phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
- phy_dev->advertising = phy_dev->supported;
+ /* Unless the PHY is capable of rate adaptation */
+ if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII ||
+ ((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) {
+ /* remove any features not supported by the controller */
+ ethtool_convert_legacy_u32_to_link_mode(mask,
+ mac_dev->if_support);
+ linkmode_and(phy_dev->supported, phy_dev->supported, mask);
+ }
+
+ phy_support_asym_pause(phy_dev);
mac_dev->phy_dev = phy_dev;
net_dev->phydev = phy_dev;
@@ -2627,6 +2733,7 @@
.ndo_stop = dpaa_eth_stop,
.ndo_tx_timeout = dpaa_tx_timeout,
.ndo_get_stats64 = dpaa_get_stats64,
+ .ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = dpaa_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = dpaa_set_rx_mode,
@@ -2668,7 +2775,8 @@
{
dma_addr_t addr = bm_buf_addr(bmb);
- dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE);
+ dma_unmap_page(bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
+ DMA_FROM_DEVICE);
skb_free_frag(phys_to_virt(addr));
}
@@ -2745,9 +2853,8 @@
return err;
}
-static const struct of_device_id dpaa_match[];
-
-static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
+static u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl,
+ enum port_type port)
{
u16 headroom;
@@ -2761,29 +2868,56 @@
*
* Also make sure the headroom is a multiple of data_align bytes
*/
- headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE +
- DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE);
+ headroom = (u16)(bl[port].priv_data_size + DPAA_HWA_SIZE);
- return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
+ if (port == RX)
+ return ALIGN(headroom, DPAA_FD_RX_DATA_ALIGNMENT);
+ else
+ return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
}
static int dpaa_eth_probe(struct platform_device *pdev)
{
- struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL};
struct net_device *net_dev = NULL;
+ struct dpaa_bp *dpaa_bp = NULL;
struct dpaa_fq *dpaa_fq, *tmp;
struct dpaa_priv *priv = NULL;
struct fm_port_fqs port_fqs;
struct mac_device *mac_dev;
- int err = 0, i, channel;
+ int err = 0, channel;
struct device *dev;
- /* device used for DMA mapping */
- dev = pdev->dev.parent;
- err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
- if (err) {
- dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
- return err;
+ dev = &pdev->dev;
+
+ err = bman_is_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(dev, "failing probe due to bman probe error\n");
+ return -ENODEV;
+ }
+ err = qman_is_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(dev, "failing probe due to qman probe error\n");
+ return -ENODEV;
+ }
+ err = bman_portals_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(dev,
+ "failing probe due to bman portals probe error\n");
+ return -ENODEV;
+ }
+ err = qman_portals_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(dev,
+ "failing probe due to qman portals probe error\n");
+ return -ENODEV;
}
/* Allocate this early, so we can store relevant information in
@@ -2796,7 +2930,7 @@
}
/* Do this here, so we can be verbose early */
- SET_NETDEV_DEV(net_dev, dev);
+ SET_NETDEV_DEV(net_dev, dev->parent);
dev_set_drvdata(dev, net_dev);
priv = netdev_priv(net_dev);
@@ -2806,8 +2940,20 @@
mac_dev = dpaa_mac_dev_get(pdev);
if (IS_ERR(mac_dev)) {
- dev_err(dev, "dpaa_mac_dev_get() failed\n");
+ netdev_err(net_dev, "dpaa_mac_dev_get() failed\n");
err = PTR_ERR(mac_dev);
+ goto free_netdev;
+ }
+
+ /* Devices used for DMA mapping */
+ priv->rx_dma_dev = fman_port_get_device(mac_dev->port[RX]);
+ priv->tx_dma_dev = fman_port_get_device(mac_dev->port[TX]);
+ err = dma_coerce_mask_and_coherent(priv->rx_dma_dev, DMA_BIT_MASK(40));
+ if (!err)
+ err = dma_coerce_mask_and_coherent(priv->tx_dma_dev,
+ DMA_BIT_MASK(40));
+ if (err) {
+ netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n");
goto free_netdev;
}
@@ -2827,23 +2973,21 @@
priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
/* bp init */
- for (i = 0; i < DPAA_BPS_NUM; i++) {
- dpaa_bps[i] = dpaa_bp_alloc(dev);
- if (IS_ERR(dpaa_bps[i])) {
- err = PTR_ERR(dpaa_bps[i]);
- goto free_dpaa_bps;
- }
- /* the raw size of the buffers used for reception */
- dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM);
- /* avoid runtime computations by keeping the usable size here */
- dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size);
- dpaa_bps[i]->dev = dev;
-
- err = dpaa_bp_alloc_pool(dpaa_bps[i]);
- if (err < 0)
- goto free_dpaa_bps;
- priv->dpaa_bps[i] = dpaa_bps[i];
+ dpaa_bp = dpaa_bp_alloc(dev);
+ if (IS_ERR(dpaa_bp)) {
+ err = PTR_ERR(dpaa_bp);
+ goto free_dpaa_bps;
}
+ /* the raw size of the buffers used for reception */
+ dpaa_bp->raw_size = DPAA_BP_RAW_SIZE;
+ /* avoid runtime computations by keeping the usable size here */
+ dpaa_bp->size = dpaa_bp_size(dpaa_bp->raw_size);
+ dpaa_bp->priv = priv;
+
+ err = dpaa_bp_alloc_pool(dpaa_bp);
+ if (err < 0)
+ goto free_dpaa_bps;
+ priv->dpaa_bp = dpaa_bp;
INIT_LIST_HEAD(&priv->dpaa_fq_list);
@@ -2869,7 +3013,7 @@
/* Walk the CPUs with affine portals
* and add this pool channel to each's dequeue mask.
*/
- dpaa_eth_add_channel(priv->channel);
+ dpaa_eth_add_channel(priv->channel, &pdev->dev);
dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
@@ -2897,11 +3041,11 @@
goto free_dpaa_fqs;
}
- priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]);
- priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
+ priv->tx_headroom = dpaa_get_headroom(priv->buf_layout, TX);
+ priv->rx_headroom = dpaa_get_headroom(priv->buf_layout, RX);
/* All real interfaces need their ports initialized */
- err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
+ err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs,
&priv->buf_layout[0], dev);
if (err)
goto free_dpaa_fqs;
@@ -2960,7 +3104,7 @@
struct device *dev;
int err;
- dev = pdev->dev.parent;
+ dev = &pdev->dev;
net_dev = dev_get_drvdata(dev);
priv = netdev_priv(net_dev);
--
Gitblit v1.6.2