forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
....@@ -1,4 +1,5 @@
11 /* Copyright 2008 - 2016 Freescale Semiconductor Inc.
2
+ * Copyright 2020 NXP
23 *
34 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions are met:
....@@ -51,9 +52,9 @@
5152 #include <linux/percpu.h>
5253 #include <linux/dma-mapping.h>
5354 #include <linux/sort.h>
55
+#include <linux/phy_fixed.h>
5456 #include <soc/fsl/bman.h>
5557 #include <soc/fsl/qman.h>
56
-
5758 #include "fman.h"
5859 #include "fman_port.h"
5960 #include "mac.h"
....@@ -86,7 +87,7 @@
8687
8788 #define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
8889 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
89
- NETIF_MSG_IFDOWN)
90
+ NETIF_MSG_IFDOWN | NETIF_MSG_HW)
9091
9192 #define DPAA_INGRESS_CS_THRESHOLD 0x10000000
9293 /* Ingress congestion threshold on FMan ports
....@@ -123,7 +124,22 @@
123124 #define FSL_QMAN_MAX_OAL 127
124125
125126 /* Default alignment for start of data in an Rx FD */
127
+#ifdef CONFIG_DPAA_ERRATUM_A050385
128
+/* aligning data start to 64 avoids DMA transaction splits, unless the buffer
129
+ * is crossing a 4k page boundary
130
+ */
131
+#define DPAA_FD_DATA_ALIGNMENT (fman_has_errata_a050385() ? 64 : 16)
132
+/* aligning to 256 avoids DMA transaction splits caused by 4k page boundary
133
+ * crossings; also, all SG fragments except the last must have a size multiple
134
+ * of 256 to avoid DMA transaction splits
135
+ */
136
+#define DPAA_A050385_ALIGN 256
137
+#define DPAA_FD_RX_DATA_ALIGNMENT (fman_has_errata_a050385() ? \
138
+ DPAA_A050385_ALIGN : 16)
139
+#else
126140 #define DPAA_FD_DATA_ALIGNMENT 16
141
+#define DPAA_FD_RX_DATA_ALIGNMENT DPAA_FD_DATA_ALIGNMENT
142
+#endif
127143
128144 /* The DPAA requires 256 bytes reserved and mapped for the SGT */
129145 #define DPAA_SGT_SIZE 256
....@@ -158,8 +174,18 @@
158174 #define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
159175 #define DPAA_TIME_STAMP_SIZE 8
160176 #define DPAA_HASH_RESULTS_SIZE 8
161
-#define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \
177
+#define DPAA_HWA_SIZE (DPAA_PARSE_RESULTS_SIZE + DPAA_TIME_STAMP_SIZE \
178
+ + DPAA_HASH_RESULTS_SIZE)
179
+#define DPAA_RX_PRIV_DATA_DEFAULT_SIZE (DPAA_TX_PRIV_DATA_SIZE + \
162180 dpaa_rx_extra_headroom)
181
+#ifdef CONFIG_DPAA_ERRATUM_A050385
182
+#define DPAA_RX_PRIV_DATA_A050385_SIZE (DPAA_A050385_ALIGN - DPAA_HWA_SIZE)
183
+#define DPAA_RX_PRIV_DATA_SIZE (fman_has_errata_a050385() ? \
184
+ DPAA_RX_PRIV_DATA_A050385_SIZE : \
185
+ DPAA_RX_PRIV_DATA_DEFAULT_SIZE)
186
+#else
187
+#define DPAA_RX_PRIV_DATA_SIZE DPAA_RX_PRIV_DATA_DEFAULT_SIZE
188
+#endif
163189
164190 #define DPAA_ETH_PCD_RXQ_NUM 128
165191
....@@ -178,31 +204,14 @@
178204 /* All the dpa bps in use at any moment */
179205 static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
180206
181
-/* The raw buffer size must be cacheline aligned */
182207 #define DPAA_BP_RAW_SIZE 4096
183
-/* When using more than one buffer pool, the raw sizes are as follows:
184
- * 1 bp: 4KB
185
- * 2 bp: 2KB, 4KB
186
- * 3 bp: 1KB, 2KB, 4KB
187
- * 4 bp: 1KB, 2KB, 4KB, 8KB
188
- */
189
-static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt)
190
-{
191
- size_t res = DPAA_BP_RAW_SIZE / 4;
192
- u8 i;
193208
194
- for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++)
195
- res *= 2;
196
- return res;
197
-}
198
-
199
-/* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
200
- * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
201
- * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
202
- * half-page-aligned buffers, so we reserve some more space for start-of-buffer
203
- * alignment.
204
- */
205
-#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES)
209
+#ifdef CONFIG_DPAA_ERRATUM_A050385
210
+#define dpaa_bp_size(raw_size) (SKB_WITH_OVERHEAD(raw_size) & \
211
+ ~(DPAA_A050385_ALIGN - 1))
212
+#else
213
+#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size)
214
+#endif
206215
207216 static int dpaa_max_frm;
208217
....@@ -255,8 +264,20 @@
255264 net_dev->features |= net_dev->hw_features;
256265 net_dev->vlan_features = net_dev->features;
257266
258
- memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
259
- memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
267
+ if (is_valid_ether_addr(mac_addr)) {
268
+ memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
269
+ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
270
+ } else {
271
+ eth_hw_addr_random(net_dev);
272
+ err = priv->mac_dev->change_addr(priv->mac_dev->fman_mac,
273
+ (enet_addr_t *)net_dev->dev_addr);
274
+ if (err) {
275
+ dev_err(dev, "Failed to set random MAC address\n");
276
+ return -EINVAL;
277
+ }
278
+ dev_info(dev, "Using random MAC address: %pM\n",
279
+ net_dev->dev_addr);
280
+ }
260281
261282 net_dev->ethtool_ops = &dpaa_ethtool_ops;
262283
....@@ -288,7 +309,7 @@
288309 /* Allow the Fman (Tx) port to process in-flight frames before we
289310 * try switching it off.
290311 */
291
- usleep_range(5000, 10000);
312
+ msleep(200);
292313
293314 err = mac_dev->stop(mac_dev);
294315 if (err < 0)
....@@ -305,10 +326,12 @@
305326 phy_disconnect(net_dev->phydev);
306327 net_dev->phydev = NULL;
307328
329
+ msleep(200);
330
+
308331 return err;
309332 }
310333
311
-static void dpaa_tx_timeout(struct net_device *net_dev)
334
+static void dpaa_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
312335 {
313336 struct dpaa_percpu_priv *percpu_priv;
314337 const struct dpaa_priv *priv;
....@@ -485,7 +508,7 @@
485508 static bool dpaa_bpid2pool_use(int bpid)
486509 {
487510 if (dpaa_bpid2pool(bpid)) {
488
- atomic_inc(&dpaa_bp_array[bpid]->refs);
511
+ refcount_inc(&dpaa_bp_array[bpid]->refs);
489512 return true;
490513 }
491514
....@@ -496,7 +519,7 @@
496519 static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp)
497520 {
498521 dpaa_bp_array[bpid] = dpaa_bp;
499
- atomic_set(&dpaa_bp->refs, 1);
522
+ refcount_set(&dpaa_bp->refs, 1);
500523 }
501524
502525 static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp)
....@@ -584,7 +607,7 @@
584607 if (!bp)
585608 return;
586609
587
- if (!atomic_dec_and_test(&bp->refs))
610
+ if (!refcount_dec_and_test(&bp->refs))
588611 return;
589612
590613 if (bp->free_buf_cb)
....@@ -596,10 +619,7 @@
596619
597620 static void dpaa_bps_free(struct dpaa_priv *priv)
598621 {
599
- int i;
600
-
601
- for (i = 0; i < DPAA_BPS_NUM; i++)
602
- dpaa_bp_free(priv->dpaa_bps[i]);
622
+ dpaa_bp_free(priv->dpaa_bp);
603623 }
604624
605625 /* Use multiple WQs for FQ assignment:
....@@ -773,16 +793,17 @@
773793 qman_release_pool(rx_pool_channel);
774794 }
775795
776
-static void dpaa_eth_add_channel(u16 channel)
796
+static void dpaa_eth_add_channel(u16 channel, struct device *dev)
777797 {
778798 u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
779799 const cpumask_t *cpus = qman_affine_cpus();
780800 struct qman_portal *portal;
781801 int cpu;
782802
783
- for_each_cpu(cpu, cpus) {
803
+ for_each_cpu_and(cpu, cpus, cpu_online_mask) {
784804 portal = qman_get_affine_portal(cpu);
785805 qman_p_static_dequeue_add(portal, pool);
806
+ qman_start_using_portal(portal, dev);
786807 }
787808 }
788809
....@@ -896,12 +917,12 @@
896917 u16 channels[NR_CPUS];
897918 struct dpaa_fq *fq;
898919
899
- for_each_cpu(cpu, affine_cpus)
920
+ for_each_cpu_and(cpu, affine_cpus, cpu_online_mask)
900921 channels[num_portals++] = qman_affine_channel(cpu);
901922
902923 if (num_portals == 0)
903924 dev_err(priv->net_dev->dev.parent,
904
- "No Qman software (affine) channels found");
925
+ "No Qman software (affine) channels found\n");
905926
906927 /* Initialize each FQ in the list */
907928 list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
....@@ -929,7 +950,7 @@
929950 break;
930951 case FQ_TYPE_TX_CONF_MQ:
931952 priv->conf_fqs[conf_cnt++] = &fq->fq_base;
932
- /* fall through */
953
+ fallthrough;
933954 case FQ_TYPE_TX_CONFIRM:
934955 dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
935956 break;
....@@ -1197,15 +1218,15 @@
11971218 return err;
11981219 }
11991220
1200
-static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
1201
- size_t count, struct dpaa_fq *errq,
1221
+static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp,
1222
+ struct dpaa_fq *errq,
12021223 struct dpaa_fq *defq, struct dpaa_fq *pcdq,
12031224 struct dpaa_buffer_layout *buf_layout)
12041225 {
12051226 struct fman_buffer_prefix_content buf_prefix_content;
12061227 struct fman_port_rx_params *rx_p;
12071228 struct fman_port_params params;
1208
- int i, err;
1229
+ int err;
12091230
12101231 memset(&params, 0, sizeof(params));
12111232 memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
....@@ -1214,7 +1235,7 @@
12141235 buf_prefix_content.pass_prs_result = true;
12151236 buf_prefix_content.pass_hash_result = true;
12161237 buf_prefix_content.pass_time_stamp = true;
1217
- buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
1238
+ buf_prefix_content.data_align = DPAA_FD_RX_DATA_ALIGNMENT;
12181239
12191240 rx_p = &params.specific_params.rx_params;
12201241 rx_p->err_fqid = errq->fqid;
....@@ -1224,12 +1245,9 @@
12241245 rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM;
12251246 }
12261247
1227
- count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count);
1228
- rx_p->ext_buf_pools.num_of_pools_used = (u8)count;
1229
- for (i = 0; i < count; i++) {
1230
- rx_p->ext_buf_pools.ext_buf_pool[i].id = bps[i]->bpid;
1231
- rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size;
1232
- }
1248
+ rx_p->ext_buf_pools.num_of_pools_used = 1;
1249
+ rx_p->ext_buf_pools.ext_buf_pool[0].id = bp->bpid;
1250
+ rx_p->ext_buf_pools.ext_buf_pool[0].size = (u16)bp->size;
12331251
12341252 err = fman_port_config(port, &params);
12351253 if (err) {
....@@ -1252,7 +1270,7 @@
12521270 }
12531271
12541272 static int dpaa_eth_init_ports(struct mac_device *mac_dev,
1255
- struct dpaa_bp **bps, size_t count,
1273
+ struct dpaa_bp *bp,
12561274 struct fm_port_fqs *port_fqs,
12571275 struct dpaa_buffer_layout *buf_layout,
12581276 struct device *dev)
....@@ -1266,7 +1284,7 @@
12661284 if (err)
12671285 return err;
12681286
1269
- err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
1287
+ err = dpaa_eth_init_rx_port(rxport, bp, port_fqs->rx_errq,
12701288 port_fqs->rx_defq, port_fqs->rx_pcdq,
12711289 &buf_layout[RX]);
12721290
....@@ -1280,7 +1298,7 @@
12801298
12811299 err = bman_release(dpaa_bp->pool, bmb, cnt);
12821300 /* Should never occur, address anyway to avoid leaking the buffers */
1283
- if (unlikely(WARN_ON(err)) && dpaa_bp->free_buf_cb)
1301
+ if (WARN_ON(err) && dpaa_bp->free_buf_cb)
12841302 while (cnt-- > 0)
12851303 dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]);
12861304
....@@ -1335,15 +1353,16 @@
13351353 vaddr = phys_to_virt(qm_fd_addr(fd));
13361354 sgt = vaddr + qm_fd_get_offset(fd);
13371355
1338
- dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size,
1339
- DMA_FROM_DEVICE);
1356
+ dma_unmap_page(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
1357
+ DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
13401358
13411359 dpaa_release_sgt_members(sgt);
13421360
1343
- addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size,
1344
- DMA_FROM_DEVICE);
1345
- if (dma_mapping_error(dpaa_bp->dev, addr)) {
1346
- dev_err(dpaa_bp->dev, "DMA mapping failed");
1361
+ addr = dma_map_page(dpaa_bp->priv->rx_dma_dev,
1362
+ virt_to_page(vaddr), 0, DPAA_BP_RAW_SIZE,
1363
+ DMA_FROM_DEVICE);
1364
+ if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) {
1365
+ netdev_err(net_dev, "DMA mapping failed\n");
13471366 return;
13481367 }
13491368 bm_buffer_set64(&bmb, addr);
....@@ -1396,7 +1415,7 @@
13961415 static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
13971416 struct sk_buff *skb,
13981417 struct qm_fd *fd,
1399
- char *parse_results)
1418
+ void *parse_results)
14001419 {
14011420 struct fman_prs_result *parse_result;
14021421 u16 ethertype = ntohs(skb->protocol);
....@@ -1488,25 +1507,24 @@
14881507
14891508 static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
14901509 {
1491
- struct device *dev = dpaa_bp->dev;
1510
+ struct net_device *net_dev = dpaa_bp->priv->net_dev;
14921511 struct bm_buffer bmb[8];
14931512 dma_addr_t addr;
1494
- void *new_buf;
1513
+ struct page *p;
14951514 u8 i;
14961515
14971516 for (i = 0; i < 8; i++) {
1498
- new_buf = netdev_alloc_frag(dpaa_bp->raw_size);
1499
- if (unlikely(!new_buf)) {
1500
- dev_err(dev, "netdev_alloc_frag() failed, size %zu\n",
1501
- dpaa_bp->raw_size);
1517
+ p = dev_alloc_pages(0);
1518
+ if (unlikely(!p)) {
1519
+ netdev_err(net_dev, "dev_alloc_pages() failed\n");
15021520 goto release_previous_buffs;
15031521 }
1504
- new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);
15051522
1506
- addr = dma_map_single(dev, new_buf,
1507
- dpaa_bp->size, DMA_FROM_DEVICE);
1508
- if (unlikely(dma_mapping_error(dev, addr))) {
1509
- dev_err(dpaa_bp->dev, "DMA map failed");
1523
+ addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, p, 0,
1524
+ DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
1525
+ if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev,
1526
+ addr))) {
1527
+ netdev_err(net_dev, "DMA map failed\n");
15101528 goto release_previous_buffs;
15111529 }
15121530
....@@ -1581,17 +1599,16 @@
15811599 {
15821600 struct dpaa_bp *dpaa_bp;
15831601 int *countptr;
1584
- int res, i;
1602
+ int res;
15851603
1586
- for (i = 0; i < DPAA_BPS_NUM; i++) {
1587
- dpaa_bp = priv->dpaa_bps[i];
1588
- if (!dpaa_bp)
1589
- return -EINVAL;
1590
- countptr = this_cpu_ptr(dpaa_bp->percpu_count);
1591
- res = dpaa_eth_refill_bpool(dpaa_bp, countptr);
1592
- if (res)
1593
- return res;
1594
- }
1604
+ dpaa_bp = priv->dpaa_bp;
1605
+ if (!dpaa_bp)
1606
+ return -EINVAL;
1607
+ countptr = this_cpu_ptr(dpaa_bp->percpu_count);
1608
+ res = dpaa_eth_refill_bpool(dpaa_bp, countptr);
1609
+ if (res)
1610
+ return res;
1611
+
15951612 return 0;
15961613 }
15971614
....@@ -1614,47 +1631,48 @@
16141631 struct device *dev = priv->net_dev->dev.parent;
16151632 struct skb_shared_hwtstamps shhwtstamps;
16161633 dma_addr_t addr = qm_fd_addr(fd);
1634
+ void *vaddr = phys_to_virt(addr);
16171635 const struct qm_sg_entry *sgt;
1618
- struct sk_buff **skbh, *skb;
1619
- int nr_frags, i;
1636
+ struct sk_buff *skb;
16201637 u64 ns;
1621
-
1622
- skbh = (struct sk_buff **)phys_to_virt(addr);
1623
- skb = *skbh;
1638
+ int i;
16241639
16251640 if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
1626
- nr_frags = skb_shinfo(skb)->nr_frags;
1627
- dma_unmap_single(dev, addr,
1628
- qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
1629
- dma_dir);
1641
+ dma_unmap_page(priv->tx_dma_dev, addr,
1642
+ qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
1643
+ dma_dir);
16301644
16311645 /* The sgt buffer has been allocated with netdev_alloc_frag(),
16321646 * it's from lowmem.
16331647 */
1634
- sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
1648
+ sgt = vaddr + qm_fd_get_offset(fd);
16351649
16361650 /* sgt[0] is from lowmem, was dma_map_single()-ed */
1637
- dma_unmap_single(dev, qm_sg_addr(&sgt[0]),
1651
+ dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]),
16381652 qm_sg_entry_get_len(&sgt[0]), dma_dir);
16391653
16401654 /* remaining pages were mapped with skb_frag_dma_map() */
1641
- for (i = 1; i <= nr_frags; i++) {
1655
+ for (i = 1; (i < DPAA_SGT_MAX_ENTRIES) &&
1656
+ !qm_sg_entry_is_final(&sgt[i - 1]); i++) {
16421657 WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
16431658
1644
- dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
1659
+ dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]),
16451660 qm_sg_entry_get_len(&sgt[i]), dma_dir);
16461661 }
16471662 } else {
1648
- dma_unmap_single(dev, addr,
1649
- skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
1663
+ dma_unmap_single(priv->tx_dma_dev, addr,
1664
+ priv->tx_headroom + qm_fd_get_length(fd),
1665
+ dma_dir);
16501666 }
1667
+
1668
+ skb = *(struct sk_buff **)vaddr;
16511669
16521670 /* DMA unmapping is required before accessing the HW provided info */
16531671 if (ts && priv->tx_tstamp &&
16541672 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
16551673 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
16561674
1657
- if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
1675
+ if (!fman_port_get_tstamp(priv->mac_dev->port[TX], vaddr,
16581676 &ns)) {
16591677 shhwtstamps.hwtstamp = ns_to_ktime(ns);
16601678 skb_tstamp_tx(skb, &shhwtstamps);
....@@ -1664,8 +1682,8 @@
16641682 }
16651683
16661684 if (qm_fd_get_format(fd) == qm_fd_sg)
1667
- /* Free the page frag that we allocated on Tx */
1668
- skb_free_frag(phys_to_virt(addr));
1685
+ /* Free the page that we allocated on Tx for the SGT */
1686
+ free_pages((unsigned long)vaddr, 0);
16691687
16701688 return skb;
16711689 }
....@@ -1686,6 +1704,8 @@
16861704 */
16871705 return CHECKSUM_NONE;
16881706 }
1707
+
1708
+#define PTR_IS_ALIGNED(x, a) (IS_ALIGNED((unsigned long)(x), (a)))
16891709
16901710 /* Build a linear skb around the received buffer.
16911711 * We are guaranteed there is enough room at the end of the data buffer to
....@@ -1709,10 +1729,8 @@
17091729
17101730 skb = build_skb(vaddr, dpaa_bp->size +
17111731 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1712
- if (unlikely(!skb)) {
1713
- WARN_ONCE(1, "Build skb failure on Rx\n");
1732
+ if (WARN_ONCE(!skb, "Build skb failure on Rx\n"))
17141733 goto free_buffer;
1715
- }
17161734 WARN_ON(fd_off != priv->rx_headroom);
17171735 skb_reserve(skb, fd_off);
17181736 skb_put(skb, qm_fd_get_length(fd));
....@@ -1722,7 +1740,7 @@
17221740 return skb;
17231741
17241742 free_buffer:
1725
- skb_free_frag(vaddr);
1743
+ free_pages((unsigned long)vaddr, 0);
17261744 return NULL;
17271745 }
17281746
....@@ -1746,7 +1764,7 @@
17461764 int page_offset;
17471765 unsigned int sz;
17481766 int *count_ptr;
1749
- int i;
1767
+ int i, j;
17501768
17511769 vaddr = phys_to_virt(addr);
17521770 WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
....@@ -1760,22 +1778,21 @@
17601778
17611779 sg_addr = qm_sg_addr(&sgt[i]);
17621780 sg_vaddr = phys_to_virt(sg_addr);
1763
- WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
1764
- SMP_CACHE_BYTES));
1781
+ WARN_ON(!PTR_IS_ALIGNED(sg_vaddr, SMP_CACHE_BYTES));
1782
+
1783
+ dma_unmap_page(priv->rx_dma_dev, sg_addr,
1784
+ DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
17651785
17661786 /* We may use multiple Rx pools */
17671787 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
17681788 if (!dpaa_bp)
17691789 goto free_buffers;
17701790
1771
- count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1772
- dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size,
1773
- DMA_FROM_DEVICE);
17741791 if (!skb) {
17751792 sz = dpaa_bp->size +
17761793 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
17771794 skb = build_skb(sg_vaddr, sz);
1778
- if (WARN_ON(unlikely(!skb)))
1795
+ if (WARN_ON(!skb))
17791796 goto free_buffers;
17801797
17811798 skb->ip_summed = rx_csum_offload(priv, fd);
....@@ -1813,7 +1830,9 @@
18131830 skb_add_rx_frag(skb, i - 1, head_page, frag_off,
18141831 frag_len, dpaa_bp->size);
18151832 }
1833
+
18161834 /* Update the pool count for the current {cpu x bpool} */
1835
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
18171836 (*count_ptr)--;
18181837
18191838 if (qm_sg_entry_is_final(&sgt[i]))
....@@ -1822,35 +1841,34 @@
18221841 WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
18231842
18241843 /* free the SG table buffer */
1825
- skb_free_frag(vaddr);
1844
+ free_pages((unsigned long)vaddr, 0);
18261845
18271846 return skb;
18281847
18291848 free_buffers:
1830
- /* compensate sw bpool counter changes */
1831
- for (i--; i >= 0; i--) {
1832
- dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1833
- if (dpaa_bp) {
1834
- count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1835
- (*count_ptr)++;
1836
- }
1837
- }
18381849 /* free all the SG entries */
1839
- for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
1840
- sg_addr = qm_sg_addr(&sgt[i]);
1850
+ for (j = 0; j < DPAA_SGT_MAX_ENTRIES ; j++) {
1851
+ sg_addr = qm_sg_addr(&sgt[j]);
18411852 sg_vaddr = phys_to_virt(sg_addr);
1842
- skb_free_frag(sg_vaddr);
1843
- dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1844
- if (dpaa_bp) {
1845
- count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1846
- (*count_ptr)--;
1853
+ /* all pages 0..i were unmaped */
1854
+ if (j > i)
1855
+ dma_unmap_page(priv->rx_dma_dev, qm_sg_addr(&sgt[j]),
1856
+ DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
1857
+ free_pages((unsigned long)sg_vaddr, 0);
1858
+ /* counters 0..i-1 were decremented */
1859
+ if (j >= i) {
1860
+ dpaa_bp = dpaa_bpid2pool(sgt[j].bpid);
1861
+ if (dpaa_bp) {
1862
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1863
+ (*count_ptr)--;
1864
+ }
18471865 }
18481866
1849
- if (qm_sg_entry_is_final(&sgt[i]))
1867
+ if (qm_sg_entry_is_final(&sgt[j]))
18501868 break;
18511869 }
18521870 /* free the SGT fragment */
1853
- skb_free_frag(vaddr);
1871
+ free_pages((unsigned long)vaddr, 0);
18541872
18551873 return NULL;
18561874 }
....@@ -1860,9 +1878,8 @@
18601878 int *offset)
18611879 {
18621880 struct net_device *net_dev = priv->net_dev;
1863
- struct device *dev = net_dev->dev.parent;
18641881 enum dma_data_direction dma_dir;
1865
- unsigned char *buffer_start;
1882
+ unsigned char *buff_start;
18661883 struct sk_buff **skbh;
18671884 dma_addr_t addr;
18681885 int err;
....@@ -1871,10 +1888,10 @@
18711888 * available, so just use that for offset.
18721889 */
18731890 fd->bpid = FSL_DPAA_BPID_INV;
1874
- buffer_start = skb->data - priv->tx_headroom;
1891
+ buff_start = skb->data - priv->tx_headroom;
18751892 dma_dir = DMA_TO_DEVICE;
18761893
1877
- skbh = (struct sk_buff **)buffer_start;
1894
+ skbh = (struct sk_buff **)buff_start;
18781895 *skbh = skb;
18791896
18801897 /* Enable L3/L4 hardware checksum computation.
....@@ -1883,7 +1900,7 @@
18831900 * need to write into the skb.
18841901 */
18851902 err = dpaa_enable_tx_csum(priv, skb, fd,
1886
- ((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE);
1903
+ buff_start + DPAA_TX_PRIV_DATA_SIZE);
18871904 if (unlikely(err < 0)) {
18881905 if (net_ratelimit())
18891906 netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
....@@ -1896,9 +1913,9 @@
18961913 fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
18971914
18981915 /* Map the entire buffer size that may be seen by FMan, but no more */
1899
- addr = dma_map_single(dev, skbh,
1900
- skb_tail_pointer(skb) - buffer_start, dma_dir);
1901
- if (unlikely(dma_mapping_error(dev, addr))) {
1916
+ addr = dma_map_single(priv->tx_dma_dev, buff_start,
1917
+ priv->tx_headroom + skb->len, dma_dir);
1918
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
19021919 if (net_ratelimit())
19031920 netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
19041921 return -EINVAL;
....@@ -1914,24 +1931,22 @@
19141931 const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
19151932 const int nr_frags = skb_shinfo(skb)->nr_frags;
19161933 struct net_device *net_dev = priv->net_dev;
1917
- struct device *dev = net_dev->dev.parent;
19181934 struct qm_sg_entry *sgt;
19191935 struct sk_buff **skbh;
1920
- int i, j, err, sz;
1921
- void *buffer_start;
1936
+ void *buff_start;
19221937 skb_frag_t *frag;
19231938 dma_addr_t addr;
19241939 size_t frag_len;
1925
- void *sgt_buf;
1940
+ struct page *p;
1941
+ int i, j, err;
19261942
1927
- /* get a page frag to store the SGTable */
1928
- sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
1929
- sgt_buf = netdev_alloc_frag(sz);
1930
- if (unlikely(!sgt_buf)) {
1931
- netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
1932
- sz);
1943
+ /* get a page to store the SGTable */
1944
+ p = dev_alloc_pages(0);
1945
+ if (unlikely(!p)) {
1946
+ netdev_err(net_dev, "dev_alloc_pages() failed\n");
19331947 return -ENOMEM;
19341948 }
1949
+ buff_start = page_address(p);
19351950
19361951 /* Enable L3/L4 hardware checksum computation.
19371952 *
....@@ -1939,7 +1954,7 @@
19391954 * need to write into the skb.
19401955 */
19411956 err = dpaa_enable_tx_csum(priv, skb, fd,
1942
- sgt_buf + DPAA_TX_PRIV_DATA_SIZE);
1957
+ buff_start + DPAA_TX_PRIV_DATA_SIZE);
19431958 if (unlikely(err < 0)) {
19441959 if (net_ratelimit())
19451960 netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
....@@ -1948,15 +1963,15 @@
19481963 }
19491964
19501965 /* SGT[0] is used by the linear part */
1951
- sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
1966
+ sgt = (struct qm_sg_entry *)(buff_start + priv->tx_headroom);
19521967 frag_len = skb_headlen(skb);
19531968 qm_sg_entry_set_len(&sgt[0], frag_len);
19541969 sgt[0].bpid = FSL_DPAA_BPID_INV;
19551970 sgt[0].offset = 0;
1956
- addr = dma_map_single(dev, skb->data,
1971
+ addr = dma_map_single(priv->tx_dma_dev, skb->data,
19571972 skb_headlen(skb), dma_dir);
1958
- if (unlikely(dma_mapping_error(dev, addr))) {
1959
- dev_err(dev, "DMA mapping failed");
1973
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
1974
+ netdev_err(priv->net_dev, "DMA mapping failed\n");
19601975 err = -EINVAL;
19611976 goto sg0_map_failed;
19621977 }
....@@ -1965,12 +1980,12 @@
19651980 /* populate the rest of SGT entries */
19661981 for (i = 0; i < nr_frags; i++) {
19671982 frag = &skb_shinfo(skb)->frags[i];
1968
- frag_len = frag->size;
1983
+ frag_len = skb_frag_size(frag);
19691984 WARN_ON(!skb_frag_page(frag));
1970
- addr = skb_frag_dma_map(dev, frag, 0,
1985
+ addr = skb_frag_dma_map(priv->tx_dma_dev, frag, 0,
19711986 frag_len, dma_dir);
1972
- if (unlikely(dma_mapping_error(dev, addr))) {
1973
- dev_err(dev, "DMA mapping failed");
1987
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
1988
+ netdev_err(priv->net_dev, "DMA mapping failed\n");
19741989 err = -EINVAL;
19751990 goto sg_map_failed;
19761991 }
....@@ -1986,17 +2001,17 @@
19862001 /* Set the final bit in the last used entry of the SGT */
19872002 qm_sg_entry_set_f(&sgt[nr_frags], frag_len);
19882003
2004
+ /* set fd offset to priv->tx_headroom */
19892005 qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
19902006
19912007 /* DMA map the SGT page */
1992
- buffer_start = (void *)sgt - priv->tx_headroom;
1993
- skbh = (struct sk_buff **)buffer_start;
2008
+ skbh = (struct sk_buff **)buff_start;
19942009 *skbh = skb;
19952010
1996
- addr = dma_map_single(dev, buffer_start,
1997
- priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
1998
- if (unlikely(dma_mapping_error(dev, addr))) {
1999
- dev_err(dev, "DMA mapping failed");
2011
+ addr = dma_map_page(priv->tx_dma_dev, p, 0,
2012
+ priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
2013
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
2014
+ netdev_err(priv->net_dev, "DMA mapping failed\n");
20002015 err = -EINVAL;
20012016 goto sgt_map_failed;
20022017 }
....@@ -2010,11 +2025,11 @@
20102025 sgt_map_failed:
20112026 sg_map_failed:
20122027 for (j = 0; j < i; j++)
2013
- dma_unmap_page(dev, qm_sg_addr(&sgt[j]),
2028
+ dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[j]),
20142029 qm_sg_entry_get_len(&sgt[j]), dma_dir);
20152030 sg0_map_failed:
20162031 csum_failed:
2017
- skb_free_frag(sgt_buf);
2032
+ free_pages((unsigned long)buff_start, 0);
20182033
20192034 return err;
20202035 }
....@@ -2050,6 +2065,83 @@
20502065
20512066 return 0;
20522067 }
2068
+
2069
+#ifdef CONFIG_DPAA_ERRATUM_A050385
2070
+static int dpaa_a050385_wa(struct net_device *net_dev, struct sk_buff **s)
2071
+{
2072
+ struct dpaa_priv *priv = netdev_priv(net_dev);
2073
+ struct sk_buff *new_skb, *skb = *s;
2074
+ unsigned char *start, i;
2075
+
2076
+ /* check linear buffer alignment */
2077
+ if (!PTR_IS_ALIGNED(skb->data, DPAA_A050385_ALIGN))
2078
+ goto workaround;
2079
+
2080
+ /* linear buffers just need to have an aligned start */
2081
+ if (!skb_is_nonlinear(skb))
2082
+ return 0;
2083
+
2084
+ /* linear data size for nonlinear skbs needs to be aligned */
2085
+ if (!IS_ALIGNED(skb_headlen(skb), DPAA_A050385_ALIGN))
2086
+ goto workaround;
2087
+
2088
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2089
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2090
+
2091
+ /* all fragments need to have aligned start addresses */
2092
+ if (!IS_ALIGNED(skb_frag_off(frag), DPAA_A050385_ALIGN))
2093
+ goto workaround;
2094
+
2095
+ /* all but last fragment need to have aligned sizes */
2096
+ if (!IS_ALIGNED(skb_frag_size(frag), DPAA_A050385_ALIGN) &&
2097
+ (i < skb_shinfo(skb)->nr_frags - 1))
2098
+ goto workaround;
2099
+ }
2100
+
2101
+ return 0;
2102
+
2103
+workaround:
2104
+ /* copy all the skb content into a new linear buffer */
2105
+ new_skb = netdev_alloc_skb(net_dev, skb->len + DPAA_A050385_ALIGN - 1 +
2106
+ priv->tx_headroom);
2107
+ if (!new_skb)
2108
+ return -ENOMEM;
2109
+
2110
+ /* NET_SKB_PAD bytes already reserved, adding up to tx_headroom */
2111
+ skb_reserve(new_skb, priv->tx_headroom - NET_SKB_PAD);
2112
+
2113
+ /* Workaround for DPAA_A050385 requires data start to be aligned */
2114
+ start = PTR_ALIGN(new_skb->data, DPAA_A050385_ALIGN);
2115
+ if (start - new_skb->data)
2116
+ skb_reserve(new_skb, start - new_skb->data);
2117
+
2118
+ skb_put(new_skb, skb->len);
2119
+ skb_copy_bits(skb, 0, new_skb->data, skb->len);
2120
+ skb_copy_header(new_skb, skb);
2121
+ new_skb->dev = skb->dev;
2122
+
2123
+ /* Copy relevant timestamp info from the old skb to the new */
2124
+ if (priv->tx_tstamp) {
2125
+ skb_shinfo(new_skb)->tx_flags = skb_shinfo(skb)->tx_flags;
2126
+ skb_shinfo(new_skb)->hwtstamps = skb_shinfo(skb)->hwtstamps;
2127
+ skb_shinfo(new_skb)->tskey = skb_shinfo(skb)->tskey;
2128
+ if (skb->sk)
2129
+ skb_set_owner_w(new_skb, skb->sk);
2130
+ }
2131
+
2132
+ /* We move the headroom when we align it so we have to reset the
2133
+ * network and transport header offsets relative to the new data
2134
+ * pointer. The checksum offload relies on these offsets.
2135
+ */
2136
+ skb_set_network_header(new_skb, skb_network_offset(skb));
2137
+ skb_set_transport_header(new_skb, skb_transport_offset(skb));
2138
+
2139
+ dev_kfree_skb(skb);
2140
+ *s = new_skb;
2141
+
2142
+ return 0;
2143
+}
2144
+#endif
20532145
20542146 static netdev_tx_t
20552147 dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
....@@ -2096,6 +2188,14 @@
20962188
20972189 nonlinear = skb_is_nonlinear(skb);
20982190 }
2191
+
2192
+#ifdef CONFIG_DPAA_ERRATUM_A050385
2193
+ if (unlikely(fman_has_errata_a050385())) {
2194
+ if (dpaa_a050385_wa(net_dev, &skb))
2195
+ goto enomem;
2196
+ nonlinear = skb_is_nonlinear(skb);
2197
+ }
2198
+#endif
20992199
21002200 if (nonlinear) {
21012201 /* Just create a S/G fd based on the skb */
....@@ -2181,7 +2281,6 @@
21812281 if (cleaned < budget) {
21822282 napi_complete_done(napi, cleaned);
21832283 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
2184
-
21852284 } else if (np->down) {
21862285 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
21872286 }
....@@ -2312,11 +2411,8 @@
23122411 return qman_cb_dqrr_consume;
23132412 }
23142413
2315
- dpaa_bp = dpaa_bpid2pool(fd->bpid);
2316
- if (!dpaa_bp)
2317
- return qman_cb_dqrr_consume;
2318
-
2319
- dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
2414
+ dma_unmap_page(dpaa_bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
2415
+ DMA_FROM_DEVICE);
23202416
23212417 /* prefetch the first 64 bytes of the frame or the SGT start */
23222418 vaddr = phys_to_virt(addr);
....@@ -2455,7 +2551,7 @@
24552551 struct dpaa_percpu_priv *percpu_priv;
24562552 int i;
24572553
2458
- for_each_possible_cpu(i) {
2554
+ for_each_online_cpu(i) {
24592555 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
24602556
24612557 percpu_priv->np.down = 0;
....@@ -2468,7 +2564,7 @@
24682564 struct dpaa_percpu_priv *percpu_priv;
24692565 int i;
24702566
2471
- for_each_possible_cpu(i) {
2567
+ for_each_online_cpu(i) {
24722568 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
24732569
24742570 percpu_priv->np.down = 1;
....@@ -2486,8 +2582,12 @@
24862582 mac_dev->adjust_link(mac_dev);
24872583 }
24882584
2585
+/* The Aquantia PHYs are capable of performing rate adaptation */
2586
+#define PHY_VEND_AQUANTIA 0x03a1b400
2587
+
24892588 static int dpaa_phy_init(struct net_device *net_dev)
24902589 {
2590
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
24912591 struct mac_device *mac_dev;
24922592 struct phy_device *phy_dev;
24932593 struct dpaa_priv *priv;
....@@ -2503,10 +2603,16 @@
25032603 return -ENODEV;
25042604 }
25052605
2506
- /* Remove any features not supported by the controller */
2507
- phy_dev->supported &= mac_dev->if_support;
2508
- phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
2509
- phy_dev->advertising = phy_dev->supported;
2606
+ /* Unless the PHY is capable of rate adaptation */
2607
+ if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII ||
2608
+ ((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) {
2609
+ /* remove any features not supported by the controller */
2610
+ ethtool_convert_legacy_u32_to_link_mode(mask,
2611
+ mac_dev->if_support);
2612
+ linkmode_and(phy_dev->supported, phy_dev->supported, mask);
2613
+ }
2614
+
2615
+ phy_support_asym_pause(phy_dev);
25102616
25112617 mac_dev->phy_dev = phy_dev;
25122618 net_dev->phydev = phy_dev;
....@@ -2627,6 +2733,7 @@
26272733 .ndo_stop = dpaa_eth_stop,
26282734 .ndo_tx_timeout = dpaa_tx_timeout,
26292735 .ndo_get_stats64 = dpaa_get_stats64,
2736
+ .ndo_change_carrier = fixed_phy_change_carrier,
26302737 .ndo_set_mac_address = dpaa_set_mac_address,
26312738 .ndo_validate_addr = eth_validate_addr,
26322739 .ndo_set_rx_mode = dpaa_set_rx_mode,
....@@ -2668,7 +2775,8 @@
26682775 {
26692776 dma_addr_t addr = bm_buf_addr(bmb);
26702777
2671
- dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE);
2778
+ dma_unmap_page(bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
2779
+ DMA_FROM_DEVICE);
26722780
26732781 skb_free_frag(phys_to_virt(addr));
26742782 }
....@@ -2745,9 +2853,8 @@
27452853 return err;
27462854 }
27472855
2748
-static const struct of_device_id dpaa_match[];
2749
-
2750
-static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
2856
+static u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl,
2857
+ enum port_type port)
27512858 {
27522859 u16 headroom;
27532860
....@@ -2761,29 +2868,56 @@
27612868 *
27622869 * Also make sure the headroom is a multiple of data_align bytes
27632870 */
2764
- headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE +
2765
- DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE);
2871
+ headroom = (u16)(bl[port].priv_data_size + DPAA_HWA_SIZE);
27662872
2767
- return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
2873
+ if (port == RX)
2874
+ return ALIGN(headroom, DPAA_FD_RX_DATA_ALIGNMENT);
2875
+ else
2876
+ return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
27682877 }
27692878
27702879 static int dpaa_eth_probe(struct platform_device *pdev)
27712880 {
2772
- struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL};
27732881 struct net_device *net_dev = NULL;
2882
+ struct dpaa_bp *dpaa_bp = NULL;
27742883 struct dpaa_fq *dpaa_fq, *tmp;
27752884 struct dpaa_priv *priv = NULL;
27762885 struct fm_port_fqs port_fqs;
27772886 struct mac_device *mac_dev;
2778
- int err = 0, i, channel;
2887
+ int err = 0, channel;
27792888 struct device *dev;
27802889
2781
- /* device used for DMA mapping */
2782
- dev = pdev->dev.parent;
2783
- err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
2784
- if (err) {
2785
- dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
2786
- return err;
2890
+ dev = &pdev->dev;
2891
+
2892
+ err = bman_is_probed();
2893
+ if (!err)
2894
+ return -EPROBE_DEFER;
2895
+ if (err < 0) {
2896
+ dev_err(dev, "failing probe due to bman probe error\n");
2897
+ return -ENODEV;
2898
+ }
2899
+ err = qman_is_probed();
2900
+ if (!err)
2901
+ return -EPROBE_DEFER;
2902
+ if (err < 0) {
2903
+ dev_err(dev, "failing probe due to qman probe error\n");
2904
+ return -ENODEV;
2905
+ }
2906
+ err = bman_portals_probed();
2907
+ if (!err)
2908
+ return -EPROBE_DEFER;
2909
+ if (err < 0) {
2910
+ dev_err(dev,
2911
+ "failing probe due to bman portals probe error\n");
2912
+ return -ENODEV;
2913
+ }
2914
+ err = qman_portals_probed();
2915
+ if (!err)
2916
+ return -EPROBE_DEFER;
2917
+ if (err < 0) {
2918
+ dev_err(dev,
2919
+ "failing probe due to qman portals probe error\n");
2920
+ return -ENODEV;
27872921 }
27882922
27892923 /* Allocate this early, so we can store relevant information in
....@@ -2796,7 +2930,7 @@
27962930 }
27972931
27982932 /* Do this here, so we can be verbose early */
2799
- SET_NETDEV_DEV(net_dev, dev);
2933
+ SET_NETDEV_DEV(net_dev, dev->parent);
28002934 dev_set_drvdata(dev, net_dev);
28012935
28022936 priv = netdev_priv(net_dev);
....@@ -2806,8 +2940,20 @@
28062940
28072941 mac_dev = dpaa_mac_dev_get(pdev);
28082942 if (IS_ERR(mac_dev)) {
2809
- dev_err(dev, "dpaa_mac_dev_get() failed\n");
2943
+ netdev_err(net_dev, "dpaa_mac_dev_get() failed\n");
28102944 err = PTR_ERR(mac_dev);
2945
+ goto free_netdev;
2946
+ }
2947
+
2948
+ /* Devices used for DMA mapping */
2949
+ priv->rx_dma_dev = fman_port_get_device(mac_dev->port[RX]);
2950
+ priv->tx_dma_dev = fman_port_get_device(mac_dev->port[TX]);
2951
+ err = dma_coerce_mask_and_coherent(priv->rx_dma_dev, DMA_BIT_MASK(40));
2952
+ if (!err)
2953
+ err = dma_coerce_mask_and_coherent(priv->tx_dma_dev,
2954
+ DMA_BIT_MASK(40));
2955
+ if (err) {
2956
+ netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n");
28112957 goto free_netdev;
28122958 }
28132959
....@@ -2827,23 +2973,21 @@
28272973 priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
28282974
28292975 /* bp init */
2830
- for (i = 0; i < DPAA_BPS_NUM; i++) {
2831
- dpaa_bps[i] = dpaa_bp_alloc(dev);
2832
- if (IS_ERR(dpaa_bps[i])) {
2833
- err = PTR_ERR(dpaa_bps[i]);
2834
- goto free_dpaa_bps;
2835
- }
2836
- /* the raw size of the buffers used for reception */
2837
- dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM);
2838
- /* avoid runtime computations by keeping the usable size here */
2839
- dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size);
2840
- dpaa_bps[i]->dev = dev;
2841
-
2842
- err = dpaa_bp_alloc_pool(dpaa_bps[i]);
2843
- if (err < 0)
2844
- goto free_dpaa_bps;
2845
- priv->dpaa_bps[i] = dpaa_bps[i];
2976
+ dpaa_bp = dpaa_bp_alloc(dev);
2977
+ if (IS_ERR(dpaa_bp)) {
2978
+ err = PTR_ERR(dpaa_bp);
2979
+ goto free_dpaa_bps;
28462980 }
2981
+ /* the raw size of the buffers used for reception */
2982
+ dpaa_bp->raw_size = DPAA_BP_RAW_SIZE;
2983
+ /* avoid runtime computations by keeping the usable size here */
2984
+ dpaa_bp->size = dpaa_bp_size(dpaa_bp->raw_size);
2985
+ dpaa_bp->priv = priv;
2986
+
2987
+ err = dpaa_bp_alloc_pool(dpaa_bp);
2988
+ if (err < 0)
2989
+ goto free_dpaa_bps;
2990
+ priv->dpaa_bp = dpaa_bp;
28472991
28482992 INIT_LIST_HEAD(&priv->dpaa_fq_list);
28492993
....@@ -2869,7 +3013,7 @@
28693013 /* Walk the CPUs with affine portals
28703014 * and add this pool channel to each's dequeue mask.
28713015 */
2872
- dpaa_eth_add_channel(priv->channel);
3016
+ dpaa_eth_add_channel(priv->channel, &pdev->dev);
28733017
28743018 dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
28753019
....@@ -2897,11 +3041,11 @@
28973041 goto free_dpaa_fqs;
28983042 }
28993043
2900
- priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]);
2901
- priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
3044
+ priv->tx_headroom = dpaa_get_headroom(priv->buf_layout, TX);
3045
+ priv->rx_headroom = dpaa_get_headroom(priv->buf_layout, RX);
29023046
29033047 /* All real interfaces need their ports initialized */
2904
- err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
3048
+ err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs,
29053049 &priv->buf_layout[0], dev);
29063050 if (err)
29073051 goto free_dpaa_fqs;
....@@ -2960,7 +3104,7 @@
29603104 struct device *dev;
29613105 int err;
29623106
2963
- dev = pdev->dev.parent;
3107
+ dev = &pdev->dev;
29643108 net_dev = dev_get_drvdata(dev);
29653109
29663110 priv = netdev_priv(net_dev);