hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
....@@ -28,6 +28,7 @@
2828 #include <linux/phy.h>
2929 #include <linux/phylink.h>
3030 #include <linux/phy/phy.h>
31
+#include <linux/ptp_classify.h>
3132 #include <linux/clk.h>
3233 #include <linux/hrtimer.h>
3334 #include <linux/ktime.h>
....@@ -36,6 +37,7 @@
3637 #include <net/ip.h>
3738 #include <net/ipv6.h>
3839 #include <net/tso.h>
40
+#include <linux/bpf_trace.h>
3941
4042 #include "mvpp2.h"
4143 #include "mvpp2_prs.h"
....@@ -56,10 +58,7 @@
5658 /* The prototype is added here to be used in start_dev when using ACPI. This
5759 * will be removed once phylink is used for all modes (dt+ACPI).
5860 */
59
-static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
60
- const struct phylink_link_state *state);
61
-static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
62
- phy_interface_t interface, struct phy_device *phy);
61
+static void mvpp2_acpi_start(struct mvpp2_port *port);
6362
6463 /* Queue modes */
6564 #define MVPP2_QDIST_SINGLE_MODE 0
....@@ -82,13 +81,37 @@
8281 return readl(priv->swth_base[0] + offset);
8382 }
8483
85
-u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
84
+static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
8685 {
8786 return readl_relaxed(priv->swth_base[0] + offset);
8887 }
88
+
89
+static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
90
+{
91
+ return cpu % priv->nthreads;
92
+}
93
+
94
+static struct page_pool *
95
+mvpp2_create_page_pool(struct device *dev, int num, int len,
96
+ enum dma_data_direction dma_dir)
97
+{
98
+ struct page_pool_params pp_params = {
99
+ /* internal DMA mapping in page_pool */
100
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
101
+ .pool_size = num,
102
+ .nid = NUMA_NO_NODE,
103
+ .dev = dev,
104
+ .dma_dir = dma_dir,
105
+ .offset = MVPP2_SKB_HEADROOM,
106
+ .max_len = len,
107
+ };
108
+
109
+ return page_pool_create(&pp_params);
110
+}
111
+
89112 /* These accessors should be used to access:
90113 *
91
- * - per-CPU registers, where each CPU has its own copy of the
114
+ * - per-thread registers, where each thread has its own copy of the
92115 * register.
93116 *
94117 * MVPP2_BM_VIRT_ALLOC_REG
....@@ -104,8 +127,8 @@
104127 * MVPP2_TXQ_SENT_REG
105128 * MVPP2_RXQ_NUM_REG
106129 *
107
- * - global registers that must be accessed through a specific CPU
108
- * window, because they are related to an access to a per-CPU
130
+ * - global registers that must be accessed through a specific thread
131
+ * window, because they are related to an access to a per-thread
109132 * register
110133 *
111134 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
....@@ -122,28 +145,28 @@
122145 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
123146 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
124147 */
125
-void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
148
+static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread,
126149 u32 offset, u32 data)
127150 {
128
- writel(data, priv->swth_base[cpu] + offset);
151
+ writel(data, priv->swth_base[thread] + offset);
129152 }
130153
131
-u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
154
+static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread,
132155 u32 offset)
133156 {
134
- return readl(priv->swth_base[cpu] + offset);
157
+ return readl(priv->swth_base[thread] + offset);
135158 }
136159
137
-void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu,
160
+static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread,
138161 u32 offset, u32 data)
139162 {
140
- writel_relaxed(data, priv->swth_base[cpu] + offset);
163
+ writel_relaxed(data, priv->swth_base[thread] + offset);
141164 }
142165
143
-static u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu,
166
+static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread,
144167 u32 offset)
145168 {
146
- return readl_relaxed(priv->swth_base[cpu] + offset);
169
+ return readl_relaxed(priv->swth_base[thread] + offset);
147170 }
148171
149172 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
....@@ -272,18 +295,43 @@
272295
273296 static void mvpp2_txq_inc_put(struct mvpp2_port *port,
274297 struct mvpp2_txq_pcpu *txq_pcpu,
275
- struct sk_buff *skb,
276
- struct mvpp2_tx_desc *tx_desc)
298
+ void *data,
299
+ struct mvpp2_tx_desc *tx_desc,
300
+ enum mvpp2_tx_buf_type buf_type)
277301 {
278302 struct mvpp2_txq_pcpu_buf *tx_buf =
279303 txq_pcpu->buffs + txq_pcpu->txq_put_index;
280
- tx_buf->skb = skb;
304
+ tx_buf->type = buf_type;
305
+ if (buf_type == MVPP2_TYPE_SKB)
306
+ tx_buf->skb = data;
307
+ else
308
+ tx_buf->xdpf = data;
281309 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
282310 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
283311 mvpp2_txdesc_offset_get(port, tx_desc);
284312 txq_pcpu->txq_put_index++;
285313 if (txq_pcpu->txq_put_index == txq_pcpu->size)
286314 txq_pcpu->txq_put_index = 0;
315
+}
316
+
317
+/* Get number of maximum RXQ */
318
+static int mvpp2_get_nrxqs(struct mvpp2 *priv)
319
+{
320
+ unsigned int nrxqs;
321
+
322
+ if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE)
323
+ return 1;
324
+
325
+ /* According to the PPv2.2 datasheet and our experiments on
326
+ * PPv2.1, RX queues have an allocation granularity of 4 (when
327
+ * more than a single one on PPv2.2).
328
+ * Round up to nearest multiple of 4.
329
+ */
330
+ nrxqs = (num_possible_cpus() + 3) & ~0x3;
331
+ if (nrxqs > MVPP2_PORT_MAX_RXQ)
332
+ nrxqs = MVPP2_PORT_MAX_RXQ;
333
+
334
+ return nrxqs;
287335 }
288336
289337 /* Get number of physical egress port */
....@@ -298,17 +346,25 @@
298346 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
299347 }
300348
301
-static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
349
+/* Returns a struct page if page_pool is set, otherwise a buffer */
350
+static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool,
351
+ struct page_pool *page_pool)
302352 {
353
+ if (page_pool)
354
+ return page_pool_dev_alloc_pages(page_pool);
355
+
303356 if (likely(pool->frag_size <= PAGE_SIZE))
304357 return netdev_alloc_frag(pool->frag_size);
305
- else
306
- return kmalloc(pool->frag_size, GFP_ATOMIC);
358
+
359
+ return kmalloc(pool->frag_size, GFP_ATOMIC);
307360 }
308361
309
-static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
362
+static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool,
363
+ struct page_pool *page_pool, void *data)
310364 {
311
- if (likely(pool->frag_size <= PAGE_SIZE))
365
+ if (page_pool)
366
+ page_pool_put_full_page(page_pool, virt_to_head_page(data), false);
367
+ else if (likely(pool->frag_size <= PAGE_SIZE))
312368 skb_free_frag(data);
313369 else
314370 kfree(data);
....@@ -317,8 +373,7 @@
317373 /* Buffer Manager configuration routines */
318374
319375 /* Create pool */
320
-static int mvpp2_bm_pool_create(struct platform_device *pdev,
321
- struct mvpp2 *priv,
376
+static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
322377 struct mvpp2_bm_pool *bm_pool, int size)
323378 {
324379 u32 val;
....@@ -337,7 +392,7 @@
337392 else
338393 bm_pool->size_bytes = 2 * sizeof(u64) * size;
339394
340
- bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
395
+ bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes,
341396 &bm_pool->dma_addr,
342397 GFP_KERNEL);
343398 if (!bm_pool->virt_addr)
....@@ -345,9 +400,9 @@
345400
346401 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
347402 MVPP2_BM_POOL_PTR_ALIGN)) {
348
- dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
403
+ dma_free_coherent(dev, bm_pool->size_bytes,
349404 bm_pool->virt_addr, bm_pool->dma_addr);
350
- dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
405
+ dev_err(dev, "BM pool %d is not %d bytes aligned\n",
351406 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
352407 return -ENOMEM;
353408 }
....@@ -385,17 +440,17 @@
385440 dma_addr_t *dma_addr,
386441 phys_addr_t *phys_addr)
387442 {
388
- int cpu = get_cpu();
443
+ unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
389444
390
- *dma_addr = mvpp2_percpu_read(priv, cpu,
445
+ *dma_addr = mvpp2_thread_read(priv, thread,
391446 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
392
- *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
447
+ *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
393448
394449 if (priv->hw_version == MVPP22) {
395450 u32 val;
396451 u32 dma_addr_highbits, phys_addr_highbits;
397452
398
- val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
453
+ val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC);
399454 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
400455 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
401456 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
....@@ -414,6 +469,7 @@
414469 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
415470 struct mvpp2_bm_pool *bm_pool, int buf_num)
416471 {
472
+ struct page_pool *pp = NULL;
417473 int i;
418474
419475 if (buf_num > bm_pool->buf_num) {
....@@ -421,6 +477,9 @@
421477 bm_pool->id, buf_num);
422478 buf_num = bm_pool->buf_num;
423479 }
480
+
481
+ if (priv->percpu_pools)
482
+ pp = priv->page_pool[bm_pool->id];
424483
425484 for (i = 0; i < buf_num; i++) {
426485 dma_addr_t buf_dma_addr;
....@@ -430,14 +489,15 @@
430489 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
431490 &buf_dma_addr, &buf_phys_addr);
432491
433
- dma_unmap_single(dev, buf_dma_addr,
434
- bm_pool->buf_size, DMA_FROM_DEVICE);
492
+ if (!pp)
493
+ dma_unmap_single(dev, buf_dma_addr,
494
+ bm_pool->buf_size, DMA_FROM_DEVICE);
435495
436496 data = (void *)phys_to_virt(buf_phys_addr);
437497 if (!data)
438498 break;
439499
440
- mvpp2_frag_free(bm_pool, data);
500
+ mvpp2_frag_free(bm_pool, pp, data);
441501 }
442502
443503 /* Update BM driver with number of buffers removed from pool */
....@@ -462,15 +522,14 @@
462522 }
463523
464524 /* Cleanup pool */
465
-static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
466
- struct mvpp2 *priv,
525
+static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv,
467526 struct mvpp2_bm_pool *bm_pool)
468527 {
469528 int buf_num;
470529 u32 val;
471530
472531 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
473
- mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool, buf_num);
532
+ mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num);
474533
475534 /* Check buffer counters after free */
476535 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
....@@ -484,24 +543,31 @@
484543 val |= MVPP2_BM_STOP_MASK;
485544 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
486545
487
- dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
546
+ if (priv->percpu_pools) {
547
+ page_pool_destroy(priv->page_pool[bm_pool->id]);
548
+ priv->page_pool[bm_pool->id] = NULL;
549
+ }
550
+
551
+ dma_free_coherent(dev, bm_pool->size_bytes,
488552 bm_pool->virt_addr,
489553 bm_pool->dma_addr);
490554 return 0;
491555 }
492556
493
-static int mvpp2_bm_pools_init(struct platform_device *pdev,
494
- struct mvpp2 *priv)
557
+static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv)
495558 {
496
- int i, err, size;
559
+ int i, err, size, poolnum = MVPP2_BM_POOLS_NUM;
497560 struct mvpp2_bm_pool *bm_pool;
561
+
562
+ if (priv->percpu_pools)
563
+ poolnum = mvpp2_get_nrxqs(priv) * 2;
498564
499565 /* Create all pools with maximum size */
500566 size = MVPP2_BM_POOL_SIZE_MAX;
501
- for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
567
+ for (i = 0; i < poolnum; i++) {
502568 bm_pool = &priv->bm_pools[i];
503569 bm_pool->id = i;
504
- err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
570
+ err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
505571 if (err)
506572 goto err_unroll_pools;
507573 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
....@@ -509,17 +575,53 @@
509575 return 0;
510576
511577 err_unroll_pools:
512
- dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
578
+ dev_err(dev, "failed to create BM pool %d, size %d\n", i, size);
513579 for (i = i - 1; i >= 0; i--)
514
- mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
580
+ mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
515581 return err;
516582 }
517583
518
-static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
584
+static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
519585 {
520
- int i, err;
586
+ enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
587
+ int i, err, poolnum = MVPP2_BM_POOLS_NUM;
588
+ struct mvpp2_port *port;
521589
522
- for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
590
+ if (priv->percpu_pools) {
591
+ for (i = 0; i < priv->port_count; i++) {
592
+ port = priv->port_list[i];
593
+ if (port->xdp_prog) {
594
+ dma_dir = DMA_BIDIRECTIONAL;
595
+ break;
596
+ }
597
+ }
598
+
599
+ poolnum = mvpp2_get_nrxqs(priv) * 2;
600
+ for (i = 0; i < poolnum; i++) {
601
+ /* the pool in use */
602
+ int pn = i / (poolnum / 2);
603
+
604
+ priv->page_pool[i] =
605
+ mvpp2_create_page_pool(dev,
606
+ mvpp2_pools[pn].buf_num,
607
+ mvpp2_pools[pn].pkt_size,
608
+ dma_dir);
609
+ if (IS_ERR(priv->page_pool[i])) {
610
+ int j;
611
+
612
+ for (j = 0; j < i; j++) {
613
+ page_pool_destroy(priv->page_pool[j]);
614
+ priv->page_pool[j] = NULL;
615
+ }
616
+ return PTR_ERR(priv->page_pool[i]);
617
+ }
618
+ }
619
+ }
620
+
621
+ dev_info(dev, "using %d %s buffers\n", poolnum,
622
+ priv->percpu_pools ? "per-cpu" : "shared");
623
+
624
+ for (i = 0; i < poolnum; i++) {
523625 /* Mask BM all interrupts */
524626 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
525627 /* Clear BM cause register */
....@@ -527,12 +629,12 @@
527629 }
528630
529631 /* Allocate and initialize BM pools */
530
- priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
632
+ priv->bm_pools = devm_kcalloc(dev, poolnum,
531633 sizeof(*priv->bm_pools), GFP_KERNEL);
532634 if (!priv->bm_pools)
533635 return -ENOMEM;
534636
535
- err = mvpp2_bm_pools_init(pdev, priv);
637
+ err = mvpp2_bm_pools_init(dev, priv);
536638 if (err < 0)
537639 return err;
538640 return 0;
....@@ -597,23 +699,31 @@
597699
598700 static void *mvpp2_buf_alloc(struct mvpp2_port *port,
599701 struct mvpp2_bm_pool *bm_pool,
702
+ struct page_pool *page_pool,
600703 dma_addr_t *buf_dma_addr,
601704 phys_addr_t *buf_phys_addr,
602705 gfp_t gfp_mask)
603706 {
604707 dma_addr_t dma_addr;
708
+ struct page *page;
605709 void *data;
606710
607
- data = mvpp2_frag_alloc(bm_pool);
711
+ data = mvpp2_frag_alloc(bm_pool, page_pool);
608712 if (!data)
609713 return NULL;
610714
611
- dma_addr = dma_map_single(port->dev->dev.parent, data,
612
- MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
613
- DMA_FROM_DEVICE);
614
- if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
615
- mvpp2_frag_free(bm_pool, data);
616
- return NULL;
715
+ if (page_pool) {
716
+ page = (struct page *)data;
717
+ dma_addr = page_pool_get_dma_addr(page);
718
+ data = page_to_virt(page);
719
+ } else {
720
+ dma_addr = dma_map_single(port->dev->dev.parent, data,
721
+ MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
722
+ DMA_FROM_DEVICE);
723
+ if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
724
+ mvpp2_frag_free(bm_pool, NULL, data);
725
+ return NULL;
726
+ }
617727 }
618728 *buf_dma_addr = dma_addr;
619729 *buf_phys_addr = virt_to_phys(data);
....@@ -626,7 +736,11 @@
626736 dma_addr_t buf_dma_addr,
627737 phys_addr_t buf_phys_addr)
628738 {
629
- int cpu = get_cpu();
739
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
740
+ unsigned long flags = 0;
741
+
742
+ if (test_bit(thread, &port->priv->lock_map))
743
+ spin_lock_irqsave(&port->bm_lock[thread], flags);
630744
631745 if (port->priv->hw_version == MVPP22) {
632746 u32 val = 0;
....@@ -640,7 +754,7 @@
640754 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
641755 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
642756
643
- mvpp2_percpu_write_relaxed(port->priv, cpu,
757
+ mvpp2_thread_write_relaxed(port->priv, thread,
644758 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
645759 }
646760
....@@ -649,10 +763,13 @@
649763 * descriptor. Instead of storing the virtual address, we
650764 * store the physical address
651765 */
652
- mvpp2_percpu_write_relaxed(port->priv, cpu,
766
+ mvpp2_thread_write_relaxed(port->priv, thread,
653767 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
654
- mvpp2_percpu_write_relaxed(port->priv, cpu,
768
+ mvpp2_thread_write_relaxed(port->priv, thread,
655769 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
770
+
771
+ if (test_bit(thread, &port->priv->lock_map))
772
+ spin_unlock_irqrestore(&port->bm_lock[thread], flags);
656773
657774 put_cpu();
658775 }
....@@ -664,7 +781,15 @@
664781 int i, buf_size, total_size;
665782 dma_addr_t dma_addr;
666783 phys_addr_t phys_addr;
784
+ struct page_pool *pp = NULL;
667785 void *buf;
786
+
787
+ if (port->priv->percpu_pools &&
788
+ bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
789
+ netdev_err(port->dev,
790
+ "attempted to use jumbo frames with per-cpu pools");
791
+ return 0;
792
+ }
668793
669794 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
670795 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
....@@ -677,8 +802,10 @@
677802 return 0;
678803 }
679804
805
+ if (port->priv->percpu_pools)
806
+ pp = port->priv->page_pool[bm_pool->id];
680807 for (i = 0; i < buf_num; i++) {
681
- buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
808
+ buf = mvpp2_buf_alloc(port, bm_pool, pp, &dma_addr,
682809 &phys_addr, GFP_KERNEL);
683810 if (!buf)
684811 break;
....@@ -709,7 +836,64 @@
709836 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
710837 int num;
711838
712
- if (pool >= MVPP2_BM_POOLS_NUM) {
839
+ if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) ||
840
+ (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) {
841
+ netdev_err(port->dev, "Invalid pool %d\n", pool);
842
+ return NULL;
843
+ }
844
+
845
+ /* Allocate buffers in case BM pool is used as long pool, but packet
846
+ * size doesn't match MTU or BM pool hasn't being used yet
847
+ */
848
+ if (new_pool->pkt_size == 0) {
849
+ int pkts_num;
850
+
851
+ /* Set default buffer number or free all the buffers in case
852
+ * the pool is not empty
853
+ */
854
+ pkts_num = new_pool->buf_num;
855
+ if (pkts_num == 0) {
856
+ if (port->priv->percpu_pools) {
857
+ if (pool < port->nrxqs)
858
+ pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num;
859
+ else
860
+ pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num;
861
+ } else {
862
+ pkts_num = mvpp2_pools[pool].buf_num;
863
+ }
864
+ } else {
865
+ mvpp2_bm_bufs_free(port->dev->dev.parent,
866
+ port->priv, new_pool, pkts_num);
867
+ }
868
+
869
+ new_pool->pkt_size = pkt_size;
870
+ new_pool->frag_size =
871
+ SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
872
+ MVPP2_SKB_SHINFO_SIZE;
873
+
874
+ /* Allocate buffers for this pool */
875
+ num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
876
+ if (num != pkts_num) {
877
+ WARN(1, "pool %d: %d of %d allocated\n",
878
+ new_pool->id, num, pkts_num);
879
+ return NULL;
880
+ }
881
+ }
882
+
883
+ mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
884
+ MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
885
+
886
+ return new_pool;
887
+}
888
+
889
+static struct mvpp2_bm_pool *
890
+mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type,
891
+ unsigned int pool, int pkt_size)
892
+{
893
+ struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
894
+ int num;
895
+
896
+ if (pool > port->nrxqs * 2) {
713897 netdev_err(port->dev, "Invalid pool %d\n", pool);
714898 return NULL;
715899 }
....@@ -725,7 +909,7 @@
725909 */
726910 pkts_num = new_pool->buf_num;
727911 if (pkts_num == 0)
728
- pkts_num = mvpp2_pools[pool].buf_num;
912
+ pkts_num = mvpp2_pools[type].buf_num;
729913 else
730914 mvpp2_bm_bufs_free(port->dev->dev.parent,
731915 port->priv, new_pool, pkts_num);
....@@ -750,11 +934,11 @@
750934 return new_pool;
751935 }
752936
753
-/* Initialize pools for swf */
754
-static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
937
+/* Initialize pools for swf, shared buffers variant */
938
+static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port)
755939 {
756
- int rxq;
757940 enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
941
+ int rxq;
758942
759943 /* If port pkt_size is higher than 1518B:
760944 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
....@@ -798,11 +982,74 @@
798982 return 0;
799983 }
800984
985
+/* Initialize pools for swf, percpu buffers variant */
986
+static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port)
987
+{
988
+ struct mvpp2_bm_pool *bm_pool;
989
+ int i;
990
+
991
+ for (i = 0; i < port->nrxqs; i++) {
992
+ bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i,
993
+ mvpp2_pools[MVPP2_BM_SHORT].pkt_size);
994
+ if (!bm_pool)
995
+ return -ENOMEM;
996
+
997
+ bm_pool->port_map |= BIT(port->id);
998
+ mvpp2_rxq_short_pool_set(port, i, bm_pool->id);
999
+ }
1000
+
1001
+ for (i = 0; i < port->nrxqs; i++) {
1002
+ bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs,
1003
+ mvpp2_pools[MVPP2_BM_LONG].pkt_size);
1004
+ if (!bm_pool)
1005
+ return -ENOMEM;
1006
+
1007
+ bm_pool->port_map |= BIT(port->id);
1008
+ mvpp2_rxq_long_pool_set(port, i, bm_pool->id);
1009
+ }
1010
+
1011
+ port->pool_long = NULL;
1012
+ port->pool_short = NULL;
1013
+
1014
+ return 0;
1015
+}
1016
+
1017
+static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
1018
+{
1019
+ if (port->priv->percpu_pools)
1020
+ return mvpp2_swf_bm_pool_init_percpu(port);
1021
+ else
1022
+ return mvpp2_swf_bm_pool_init_shared(port);
1023
+}
1024
+
1025
+static void mvpp2_set_hw_csum(struct mvpp2_port *port,
1026
+ enum mvpp2_bm_pool_log_num new_long_pool)
1027
+{
1028
+ const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1029
+
1030
+ /* Update L4 checksum when jumbo enable/disable on port.
1031
+ * Only port 0 supports hardware checksum offload due to
1032
+ * the Tx FIFO size limitation.
1033
+ * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor
1034
+ * has 7 bits, so the maximum L3 offset is 128.
1035
+ */
1036
+ if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
1037
+ port->dev->features &= ~csums;
1038
+ port->dev->hw_features &= ~csums;
1039
+ } else {
1040
+ port->dev->features |= csums;
1041
+ port->dev->hw_features |= csums;
1042
+ }
1043
+}
1044
+
8011045 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
8021046 {
8031047 struct mvpp2_port *port = netdev_priv(dev);
8041048 enum mvpp2_bm_pool_log_num new_long_pool;
8051049 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
1050
+
1051
+ if (port->priv->percpu_pools)
1052
+ goto out_set;
8061053
8071054 /* If port MTU is higher than 1518B:
8081055 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
....@@ -830,17 +1077,10 @@
8301077 /* Add port to new short & long pool */
8311078 mvpp2_swf_bm_pool_init(port);
8321079
833
- /* Update L4 checksum when jumbo enable/disable on port */
834
- if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
835
- dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
836
- dev->hw_features &= ~(NETIF_F_IP_CSUM |
837
- NETIF_F_IPV6_CSUM);
838
- } else {
839
- dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
840
- dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
841
- }
1080
+ mvpp2_set_hw_csum(port, new_long_pool);
8421081 }
8431082
1083
+out_set:
8441084 dev->mtu = mtu;
8451085 dev->wanted_features = dev->features;
8461086
....@@ -886,7 +1126,7 @@
8861126 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
8871127 }
8881128
889
-/* Mask the current CPU's Rx/Tx interrupts
1129
+/* Mask the current thread's Rx/Tx interrupts
8901130 * Called by on_each_cpu(), guaranteed to run with migration disabled,
8911131 * using smp_processor_id() is OK.
8921132 */
....@@ -894,11 +1134,16 @@
8941134 {
8951135 struct mvpp2_port *port = arg;
8961136
897
- mvpp2_percpu_write(port->priv, smp_processor_id(),
1137
+ /* If the thread isn't used, don't do anything */
1138
+ if (smp_processor_id() > port->priv->nthreads)
1139
+ return;
1140
+
1141
+ mvpp2_thread_write(port->priv,
1142
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
8981143 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
8991144 }
9001145
901
-/* Unmask the current CPU's Rx/Tx interrupts.
1146
+/* Unmask the current thread's Rx/Tx interrupts.
9021147 * Called by on_each_cpu(), guaranteed to run with migration disabled,
9031148 * using smp_processor_id() is OK.
9041149 */
....@@ -907,12 +1152,17 @@
9071152 struct mvpp2_port *port = arg;
9081153 u32 val;
9091154
1155
+ /* If the thread isn't used, don't do anything */
1156
+ if (smp_processor_id() >= port->priv->nthreads)
1157
+ return;
1158
+
9101159 val = MVPP2_CAUSE_MISC_SUM_MASK |
9111160 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
9121161 if (port->has_tx_irqs)
9131162 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
9141163
915
- mvpp2_percpu_write(port->priv, smp_processor_id(),
1164
+ mvpp2_thread_write(port->priv,
1165
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
9161166 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
9171167 }
9181168
....@@ -936,12 +1186,39 @@
9361186 if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
9371187 continue;
9381188
939
- mvpp2_percpu_write(port->priv, v->sw_thread_id,
1189
+ mvpp2_thread_write(port->priv, v->sw_thread_id,
9401190 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
9411191 }
9421192 }
9431193
1194
+/* Only GOP port 0 has an XLG MAC */
1195
+static bool mvpp2_port_supports_xlg(struct mvpp2_port *port)
1196
+{
1197
+ return port->gop_id == 0;
1198
+}
1199
+
1200
+static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port)
1201
+{
1202
+ return !(port->priv->hw_version == MVPP22 && port->gop_id == 0);
1203
+}
1204
+
9441205 /* Port configuration routines */
1206
+static bool mvpp2_is_xlg(phy_interface_t interface)
1207
+{
1208
+ return interface == PHY_INTERFACE_MODE_10GBASER ||
1209
+ interface == PHY_INTERFACE_MODE_XAUI;
1210
+}
1211
+
1212
+static void mvpp2_modify(void __iomem *ptr, u32 mask, u32 set)
1213
+{
1214
+ u32 old, val;
1215
+
1216
+ old = val = readl(ptr);
1217
+ val &= ~mask;
1218
+ val |= set;
1219
+ if (old != val)
1220
+ writel(val, ptr);
1221
+}
9451222
9461223 static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
9471224 {
....@@ -987,26 +1264,19 @@
9871264 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
9881265 u32 val;
9891266
990
- /* XPCS */
9911267 val = readl(xpcs + MVPP22_XPCS_CFG0);
9921268 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
9931269 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
9941270 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
9951271 writel(val, xpcs + MVPP22_XPCS_CFG0);
9961272
997
- /* MPCS */
9981273 val = readl(mpcs + MVPP22_MPCS_CTRL);
9991274 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
10001275 writel(val, mpcs + MVPP22_MPCS_CTRL);
10011276
10021277 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1003
- val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC |
1004
- MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
1278
+ val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7);
10051279 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
1006
- writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1007
-
1008
- val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
1009
- val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX;
10101280 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
10111281 }
10121282
....@@ -1023,7 +1293,7 @@
10231293 case PHY_INTERFACE_MODE_RGMII_ID:
10241294 case PHY_INTERFACE_MODE_RGMII_RXID:
10251295 case PHY_INTERFACE_MODE_RGMII_TXID:
1026
- if (port->gop_id == 0)
1296
+ if (!mvpp2_port_supports_rgmii(port))
10271297 goto invalid_conf;
10281298 mvpp22_gop_init_rgmii(port);
10291299 break;
....@@ -1032,8 +1302,8 @@
10321302 case PHY_INTERFACE_MODE_2500BASEX:
10331303 mvpp22_gop_init_sgmii(port);
10341304 break;
1035
- case PHY_INTERFACE_MODE_10GKR:
1036
- if (port->gop_id != 0)
1305
+ case PHY_INTERFACE_MODE_10GBASER:
1306
+ if (!mvpp2_port_supports_xlg(port))
10371307 goto invalid_conf;
10381308 mvpp22_gop_init_10gkr(port);
10391309 break;
....@@ -1067,19 +1337,18 @@
10671337 u32 val;
10681338
10691339 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1070
- port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
1071
- port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
1072
- port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
1340
+ phy_interface_mode_is_8023z(port->phy_interface) ||
1341
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
10731342 /* Enable the GMAC link status irq for this port */
10741343 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
10751344 val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
10761345 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
10771346 }
10781347
1079
- if (port->gop_id == 0) {
1348
+ if (mvpp2_port_supports_xlg(port)) {
10801349 /* Enable the XLG/GIG irqs for this port */
10811350 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1082
- if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
1351
+ if (mvpp2_is_xlg(port->phy_interface))
10831352 val |= MVPP22_XLG_EXT_INT_MASK_XLG;
10841353 else
10851354 val |= MVPP22_XLG_EXT_INT_MASK_GIG;
....@@ -1091,7 +1360,7 @@
10911360 {
10921361 u32 val;
10931362
1094
- if (port->gop_id == 0) {
1363
+ if (mvpp2_port_supports_xlg(port)) {
10951364 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
10961365 val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
10971366 MVPP22_XLG_EXT_INT_MASK_GIG);
....@@ -1099,9 +1368,8 @@
10991368 }
11001369
11011370 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1102
- port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
1103
- port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
1104
- port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
1371
+ phy_interface_mode_is_8023z(port->phy_interface) ||
1372
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
11051373 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
11061374 val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
11071375 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
....@@ -1112,19 +1380,27 @@
11121380 {
11131381 u32 val;
11141382
1115
- if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1116
- port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
1117
- port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
1118
- port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
1383
+ mvpp2_modify(port->base + MVPP22_GMAC_INT_SUM_MASK,
1384
+ MVPP22_GMAC_INT_SUM_MASK_PTP,
1385
+ MVPP22_GMAC_INT_SUM_MASK_PTP);
1386
+
1387
+ if (port->phylink ||
1388
+ phy_interface_mode_is_rgmii(port->phy_interface) ||
1389
+ phy_interface_mode_is_8023z(port->phy_interface) ||
1390
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
11191391 val = readl(port->base + MVPP22_GMAC_INT_MASK);
11201392 val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
11211393 writel(val, port->base + MVPP22_GMAC_INT_MASK);
11221394 }
11231395
1124
- if (port->gop_id == 0) {
1396
+ if (mvpp2_port_supports_xlg(port)) {
11251397 val = readl(port->base + MVPP22_XLG_INT_MASK);
11261398 val |= MVPP22_XLG_INT_MASK_LINK;
11271399 writel(val, port->base + MVPP22_XLG_INT_MASK);
1400
+
1401
+ mvpp2_modify(port->base + MVPP22_XLG_EXT_INT_MASK,
1402
+ MVPP22_XLG_EXT_INT_MASK_PTP,
1403
+ MVPP22_XLG_EXT_INT_MASK_PTP);
11281404 }
11291405
11301406 mvpp22_gop_unmask_irq(port);
....@@ -1142,28 +1418,13 @@
11421418 */
11431419 static int mvpp22_comphy_init(struct mvpp2_port *port)
11441420 {
1145
- enum phy_mode mode;
11461421 int ret;
11471422
11481423 if (!port->comphy)
11491424 return 0;
11501425
1151
- switch (port->phy_interface) {
1152
- case PHY_INTERFACE_MODE_SGMII:
1153
- case PHY_INTERFACE_MODE_1000BASEX:
1154
- mode = PHY_MODE_SGMII;
1155
- break;
1156
- case PHY_INTERFACE_MODE_2500BASEX:
1157
- mode = PHY_MODE_2500SGMII;
1158
- break;
1159
- case PHY_INTERFACE_MODE_10GKR:
1160
- mode = PHY_MODE_10GKR;
1161
- break;
1162
- default:
1163
- return -EINVAL;
1164
- }
1165
-
1166
- ret = phy_set_mode(port->comphy, mode);
1426
+ ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET,
1427
+ port->phy_interface);
11671428 if (ret)
11681429 return ret;
11691430
....@@ -1174,13 +1435,10 @@
11741435 {
11751436 u32 val;
11761437
1177
- /* Only GOP port 0 has an XLG MAC */
1178
- if (port->gop_id == 0 &&
1179
- (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
1180
- port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
1438
+ if (mvpp2_port_supports_xlg(port) &&
1439
+ mvpp2_is_xlg(port->phy_interface)) {
11811440 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1182
- val |= MVPP22_XLG_CTRL0_PORT_EN |
1183
- MVPP22_XLG_CTRL0_MAC_RESET_DIS;
1441
+ val |= MVPP22_XLG_CTRL0_PORT_EN;
11841442 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
11851443 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
11861444 } else {
....@@ -1195,22 +1453,16 @@
11951453 {
11961454 u32 val;
11971455
1198
- /* Only GOP port 0 has an XLG MAC */
1199
- if (port->gop_id == 0 &&
1200
- (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
1201
- port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
1456
+ if (mvpp2_port_supports_xlg(port) &&
1457
+ mvpp2_is_xlg(port->phy_interface)) {
12021458 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
12031459 val &= ~MVPP22_XLG_CTRL0_PORT_EN;
12041460 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1205
-
1206
- /* Disable & reset should be done separately */
1207
- val &= ~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
1208
- writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1209
- } else {
1210
- val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1211
- val &= ~(MVPP2_GMAC_PORT_EN_MASK);
1212
- writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
12131461 }
1462
+
1463
+ val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1464
+ val &= ~(MVPP2_GMAC_PORT_EN_MASK);
1465
+ writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
12141466 }
12151467
12161468 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
....@@ -1236,15 +1488,24 @@
12361488 else
12371489 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
12381490
1239
- if (port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
1240
- port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
1241
- port->phy_interface == PHY_INTERFACE_MODE_2500BASEX)
1491
+ if (phy_interface_mode_is_8023z(state->interface) ||
1492
+ state->interface == PHY_INTERFACE_MODE_SGMII)
12421493 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
12431494 else
12441495 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
12451496
12461497 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
12471498 }
1499
+
1500
+enum {
1501
+ ETHTOOL_XDP_REDIRECT,
1502
+ ETHTOOL_XDP_PASS,
1503
+ ETHTOOL_XDP_DROP,
1504
+ ETHTOOL_XDP_TX,
1505
+ ETHTOOL_XDP_TX_ERR,
1506
+ ETHTOOL_XDP_XMIT,
1507
+ ETHTOOL_XDP_XMIT_ERR,
1508
+};
12481509
12491510 struct mvpp2_ethtool_counter {
12501511 unsigned int offset;
....@@ -1264,6 +1525,17 @@
12641525 return val;
12651526 }
12661527
1528
+/* Some counters are accessed indirectly by first writing an index to
1529
+ * MVPP2_CTRS_IDX. The index can represent various resources depending on the
1530
+ * register we access, it can be a hit counter for some classification tables,
1531
+ * a counter specific to a rxq, a txq or a buffer pool.
1532
+ */
1533
+static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
1534
+{
1535
+ mvpp2_write(priv, MVPP2_CTRS_IDX, index);
1536
+ return mvpp2_read(priv, reg);
1537
+}
1538
+
12671539 /* Due to the fact that software statistics and hardware statistics are, by
12681540 * design, incremented at different moments in the chain of packet processing,
12691541 * it is very likely that incoming packets could have been dropped after being
....@@ -1273,7 +1545,7 @@
12731545 * Hence, statistics gathered from userspace with ifconfig (software) and
12741546 * ethtool (hardware) cannot be compared.
12751547 */
1276
-static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = {
1548
+static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = {
12771549 { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
12781550 { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
12791551 { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
....@@ -1303,15 +1575,191 @@
13031575 { MVPP2_MIB_LATE_COLLISION, "late_collision" },
13041576 };
13051577
1578
+static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = {
1579
+ { MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" },
1580
+ { MVPP2_CLS_ETH_DROP, "rx_classifier_drops" },
1581
+};
1582
+
1583
+static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = {
1584
+ { MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" },
1585
+ { MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" },
1586
+ { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" },
1587
+ { MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" },
1588
+ { MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" },
1589
+ { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" },
1590
+ { MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" },
1591
+ { MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" },
1592
+ { MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" },
1593
+};
1594
+
1595
+static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = {
1596
+ { MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" },
1597
+ { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" },
1598
+ { MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" },
1599
+ { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" },
1600
+};
1601
+
1602
+static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = {
1603
+ { ETHTOOL_XDP_REDIRECT, "rx_xdp_redirect", },
1604
+ { ETHTOOL_XDP_PASS, "rx_xdp_pass", },
1605
+ { ETHTOOL_XDP_DROP, "rx_xdp_drop", },
1606
+ { ETHTOOL_XDP_TX, "rx_xdp_tx", },
1607
+ { ETHTOOL_XDP_TX_ERR, "rx_xdp_tx_errors", },
1608
+ { ETHTOOL_XDP_XMIT, "tx_xdp_xmit", },
1609
+ { ETHTOOL_XDP_XMIT_ERR, "tx_xdp_xmit_errors", },
1610
+};
1611
+
1612
+#define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \
1613
+ ARRAY_SIZE(mvpp2_ethtool_port_regs) + \
1614
+ (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \
1615
+ (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)) + \
1616
+ ARRAY_SIZE(mvpp2_ethtool_xdp))
1617
+
13061618 static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
13071619 u8 *data)
13081620 {
1309
- if (sset == ETH_SS_STATS) {
1310
- int i;
1621
+ struct mvpp2_port *port = netdev_priv(netdev);
1622
+ int i, q;
13111623
1312
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
1313
- strscpy(data + i * ETH_GSTRING_LEN,
1314
- mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
1624
+ if (sset != ETH_SS_STATS)
1625
+ return;
1626
+
1627
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) {
1628
+ strscpy(data, mvpp2_ethtool_mib_regs[i].string,
1629
+ ETH_GSTRING_LEN);
1630
+ data += ETH_GSTRING_LEN;
1631
+ }
1632
+
1633
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) {
1634
+ strscpy(data, mvpp2_ethtool_port_regs[i].string,
1635
+ ETH_GSTRING_LEN);
1636
+ data += ETH_GSTRING_LEN;
1637
+ }
1638
+
1639
+ for (q = 0; q < port->ntxqs; q++) {
1640
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) {
1641
+ snprintf(data, ETH_GSTRING_LEN,
1642
+ mvpp2_ethtool_txq_regs[i].string, q);
1643
+ data += ETH_GSTRING_LEN;
1644
+ }
1645
+ }
1646
+
1647
+ for (q = 0; q < port->nrxqs; q++) {
1648
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) {
1649
+ snprintf(data, ETH_GSTRING_LEN,
1650
+ mvpp2_ethtool_rxq_regs[i].string,
1651
+ q);
1652
+ data += ETH_GSTRING_LEN;
1653
+ }
1654
+ }
1655
+
1656
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) {
1657
+ strscpy(data, mvpp2_ethtool_xdp[i].string,
1658
+ ETH_GSTRING_LEN);
1659
+ data += ETH_GSTRING_LEN;
1660
+ }
1661
+}
1662
+
1663
+static void
1664
+mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats)
1665
+{
1666
+ unsigned int start;
1667
+ unsigned int cpu;
1668
+
1669
+ /* Gather XDP Statistics */
1670
+ for_each_possible_cpu(cpu) {
1671
+ struct mvpp2_pcpu_stats *cpu_stats;
1672
+ u64 xdp_redirect;
1673
+ u64 xdp_pass;
1674
+ u64 xdp_drop;
1675
+ u64 xdp_xmit;
1676
+ u64 xdp_xmit_err;
1677
+ u64 xdp_tx;
1678
+ u64 xdp_tx_err;
1679
+
1680
+ cpu_stats = per_cpu_ptr(port->stats, cpu);
1681
+ do {
1682
+ start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1683
+ xdp_redirect = cpu_stats->xdp_redirect;
1684
+ xdp_pass = cpu_stats->xdp_pass;
1685
+ xdp_drop = cpu_stats->xdp_drop;
1686
+ xdp_xmit = cpu_stats->xdp_xmit;
1687
+ xdp_xmit_err = cpu_stats->xdp_xmit_err;
1688
+ xdp_tx = cpu_stats->xdp_tx;
1689
+ xdp_tx_err = cpu_stats->xdp_tx_err;
1690
+ } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1691
+
1692
+ xdp_stats->xdp_redirect += xdp_redirect;
1693
+ xdp_stats->xdp_pass += xdp_pass;
1694
+ xdp_stats->xdp_drop += xdp_drop;
1695
+ xdp_stats->xdp_xmit += xdp_xmit;
1696
+ xdp_stats->xdp_xmit_err += xdp_xmit_err;
1697
+ xdp_stats->xdp_tx += xdp_tx;
1698
+ xdp_stats->xdp_tx_err += xdp_tx_err;
1699
+ }
1700
+}
1701
+
1702
+static void mvpp2_read_stats(struct mvpp2_port *port)
1703
+{
1704
+ struct mvpp2_pcpu_stats xdp_stats = {};
1705
+ const struct mvpp2_ethtool_counter *s;
1706
+ u64 *pstats;
1707
+ int i, q;
1708
+
1709
+ pstats = port->ethtool_stats;
1710
+
1711
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++)
1712
+ *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]);
1713
+
1714
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++)
1715
+ *pstats++ += mvpp2_read(port->priv,
1716
+ mvpp2_ethtool_port_regs[i].offset +
1717
+ 4 * port->id);
1718
+
1719
+ for (q = 0; q < port->ntxqs; q++)
1720
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++)
1721
+ *pstats++ += mvpp2_read_index(port->priv,
1722
+ MVPP22_CTRS_TX_CTR(port->id, q),
1723
+ mvpp2_ethtool_txq_regs[i].offset);
1724
+
1725
+ /* Rxqs are numbered from 0 from the user standpoint, but not from the
1726
+ * driver's. We need to add the port->first_rxq offset.
1727
+ */
1728
+ for (q = 0; q < port->nrxqs; q++)
1729
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++)
1730
+ *pstats++ += mvpp2_read_index(port->priv,
1731
+ port->first_rxq + q,
1732
+ mvpp2_ethtool_rxq_regs[i].offset);
1733
+
1734
+ /* Gather XDP Statistics */
1735
+ mvpp2_get_xdp_stats(port, &xdp_stats);
1736
+
1737
+ for (i = 0, s = mvpp2_ethtool_xdp;
1738
+ s < mvpp2_ethtool_xdp + ARRAY_SIZE(mvpp2_ethtool_xdp);
1739
+ s++, i++) {
1740
+ switch (s->offset) {
1741
+ case ETHTOOL_XDP_REDIRECT:
1742
+ *pstats++ = xdp_stats.xdp_redirect;
1743
+ break;
1744
+ case ETHTOOL_XDP_PASS:
1745
+ *pstats++ = xdp_stats.xdp_pass;
1746
+ break;
1747
+ case ETHTOOL_XDP_DROP:
1748
+ *pstats++ = xdp_stats.xdp_drop;
1749
+ break;
1750
+ case ETHTOOL_XDP_TX:
1751
+ *pstats++ = xdp_stats.xdp_tx;
1752
+ break;
1753
+ case ETHTOOL_XDP_TX_ERR:
1754
+ *pstats++ = xdp_stats.xdp_tx_err;
1755
+ break;
1756
+ case ETHTOOL_XDP_XMIT:
1757
+ *pstats++ = xdp_stats.xdp_xmit;
1758
+ break;
1759
+ case ETHTOOL_XDP_XMIT_ERR:
1760
+ *pstats++ = xdp_stats.xdp_xmit_err;
1761
+ break;
1762
+ }
13151763 }
13161764 }
13171765
....@@ -1320,14 +1768,10 @@
13201768 struct delayed_work *del_work = to_delayed_work(work);
13211769 struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
13221770 stats_work);
1323
- u64 *pstats;
1324
- int i;
13251771
13261772 mutex_lock(&port->gather_stats_lock);
13271773
1328
- pstats = port->ethtool_stats;
1329
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
1330
- *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
1774
+ mvpp2_read_stats(port);
13311775
13321776 /* No need to read again the counters right after this function if it
13331777 * was called asynchronously by the user (ie. use of ethtool).
....@@ -1351,30 +1795,84 @@
13511795
13521796 mutex_lock(&port->gather_stats_lock);
13531797 memcpy(data, port->ethtool_stats,
1354
- sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs));
1798
+ sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs));
13551799 mutex_unlock(&port->gather_stats_lock);
13561800 }
13571801
13581802 static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
13591803 {
1804
+ struct mvpp2_port *port = netdev_priv(dev);
1805
+
13601806 if (sset == ETH_SS_STATS)
1361
- return ARRAY_SIZE(mvpp2_ethtool_regs);
1807
+ return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs);
13621808
13631809 return -EOPNOTSUPP;
13641810 }
13651811
1366
-static void mvpp2_port_reset(struct mvpp2_port *port)
1812
+static void mvpp2_mac_reset_assert(struct mvpp2_port *port)
13671813 {
13681814 u32 val;
1369
- unsigned int i;
1370
-
1371
- /* Read the GOP statistics to reset the hardware counters */
1372
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
1373
- mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
13741815
13751816 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) |
13761817 MVPP2_GMAC_PORT_RESET_MASK;
13771818 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
1819
+
1820
+ if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
1821
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG) &
1822
+ ~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
1823
+ writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1824
+ }
1825
+}
1826
+
1827
+static void mvpp22_pcs_reset_assert(struct mvpp2_port *port)
1828
+{
1829
+ struct mvpp2 *priv = port->priv;
1830
+ void __iomem *mpcs, *xpcs;
1831
+ u32 val;
1832
+
1833
+ if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
1834
+ return;
1835
+
1836
+ mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1837
+ xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1838
+
1839
+ val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1840
+ val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
1841
+ val |= MVPP22_MPCS_CLK_RESET_DIV_SET;
1842
+ writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1843
+
1844
+ val = readl(xpcs + MVPP22_XPCS_CFG0);
1845
+ writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
1846
+}
1847
+
1848
+static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port)
1849
+{
1850
+ struct mvpp2 *priv = port->priv;
1851
+ void __iomem *mpcs, *xpcs;
1852
+ u32 val;
1853
+
1854
+ if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
1855
+ return;
1856
+
1857
+ mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1858
+ xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1859
+
1860
+ switch (port->phy_interface) {
1861
+ case PHY_INTERFACE_MODE_10GBASER:
1862
+ val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1863
+ val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
1864
+ MAC_CLK_RESET_SD_TX;
1865
+ val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
1866
+ writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1867
+ break;
1868
+ case PHY_INTERFACE_MODE_XAUI:
1869
+ case PHY_INTERFACE_MODE_RXAUI:
1870
+ val = readl(xpcs + MVPP22_XPCS_CFG0);
1871
+ writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
1872
+ break;
1873
+ default:
1874
+ break;
1875
+ }
13781876 }
13791877
13801878 /* Change maximum receive size of the port */
....@@ -1420,6 +1918,9 @@
14201918 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
14211919 tx_port_num);
14221920 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
1921
+
1922
+ /* Set TXQ scheduling to Round-Robin */
1923
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0);
14231924
14241925 /* Close bandwidth for all queues */
14251926 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
....@@ -1618,7 +2119,8 @@
16182119 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
16192120 {
16202121 /* aggregated access - relevant TXQ number is written in TX desc */
1621
- mvpp2_percpu_write(port->priv, smp_processor_id(),
2122
+ mvpp2_thread_write(port->priv,
2123
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
16222124 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
16232125 }
16242126
....@@ -1628,14 +2130,15 @@
16282130 * Called only from mvpp2_tx(), so migration is disabled, using
16292131 * smp_processor_id() is OK.
16302132 */
1631
-static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
2133
+static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port,
16322134 struct mvpp2_tx_queue *aggr_txq, int num)
16332135 {
16342136 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
16352137 /* Update number of occupied aggregated Tx descriptors */
1636
- int cpu = smp_processor_id();
1637
- u32 val = mvpp2_read_relaxed(priv,
1638
- MVPP2_AGGR_TXQ_STATUS_REG(cpu));
2138
+ unsigned int thread =
2139
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id());
2140
+ u32 val = mvpp2_read_relaxed(port->priv,
2141
+ MVPP2_AGGR_TXQ_STATUS_REG(thread));
16392142
16402143 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
16412144
....@@ -1651,16 +2154,17 @@
16512154 * only by mvpp2_tx(), so migration is disabled, using
16522155 * smp_processor_id() is OK.
16532156 */
1654
-static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
2157
+static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port,
16552158 struct mvpp2_tx_queue *txq, int num)
16562159 {
2160
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
2161
+ struct mvpp2 *priv = port->priv;
16572162 u32 val;
1658
- int cpu = smp_processor_id();
16592163
16602164 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
1661
- mvpp2_percpu_write_relaxed(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
2165
+ mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val);
16622166
1663
- val = mvpp2_percpu_read_relaxed(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
2167
+ val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG);
16642168
16652169 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
16662170 }
....@@ -1668,12 +2172,13 @@
16682172 /* Check if there are enough reserved descriptors for transmission.
16692173 * If not, request chunk of reserved descriptors and check again.
16702174 */
1671
-static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
2175
+static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port,
16722176 struct mvpp2_tx_queue *txq,
16732177 struct mvpp2_txq_pcpu *txq_pcpu,
16742178 int num)
16752179 {
1676
- int req, cpu, desc_count;
2180
+ int req, desc_count;
2181
+ unsigned int thread;
16772182
16782183 if (txq_pcpu->reserved_num >= num)
16792184 return 0;
....@@ -1684,10 +2189,10 @@
16842189
16852190 desc_count = 0;
16862191 /* Compute total of used descriptors */
1687
- for_each_present_cpu(cpu) {
2192
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
16882193 struct mvpp2_txq_pcpu *txq_pcpu_aux;
16892194
1690
- txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
2195
+ txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread);
16912196 desc_count += txq_pcpu_aux->count;
16922197 desc_count += txq_pcpu_aux->reserved_num;
16932198 }
....@@ -1696,10 +2201,10 @@
16962201 desc_count += req;
16972202
16982203 if (desc_count >
1699
- (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
2204
+ (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK)))
17002205 return -ENOMEM;
17012206
1702
- txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
2207
+ txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req);
17032208
17042209 /* OK, the descriptor could have been updated: check again. */
17052210 if (txq_pcpu->reserved_num < num)
....@@ -1753,7 +2258,7 @@
17532258
17542259 /* Get number of sent descriptors and decrement counter.
17552260 * The number of sent descriptors is returned.
1756
- * Per-CPU access
2261
+ * Per-thread access
17572262 *
17582263 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
17592264 * (migration disabled) and from the TX completion tasklet (migration
....@@ -1765,7 +2270,8 @@
17652270 u32 val;
17662271
17672272 /* Reading status reg resets transmitted descriptor counter */
1768
- val = mvpp2_percpu_read_relaxed(port->priv, smp_processor_id(),
2273
+ val = mvpp2_thread_read_relaxed(port->priv,
2274
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
17692275 MVPP2_TXQ_SENT_REG(txq->id));
17702276
17712277 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
....@@ -1780,10 +2286,15 @@
17802286 struct mvpp2_port *port = arg;
17812287 int queue;
17822288
2289
+ /* If the thread isn't used, don't do anything */
2290
+ if (smp_processor_id() >= port->priv->nthreads)
2291
+ return;
2292
+
17832293 for (queue = 0; queue < port->ntxqs; queue++) {
17842294 int id = port->txqs[queue]->id;
17852295
1786
- mvpp2_percpu_read(port->priv, smp_processor_id(),
2296
+ mvpp2_thread_read(port->priv,
2297
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
17872298 MVPP2_TXQ_SENT_REG(id));
17882299 }
17892300 }
....@@ -1843,13 +2354,13 @@
18432354 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
18442355 struct mvpp2_rx_queue *rxq)
18452356 {
1846
- int cpu = get_cpu();
2357
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
18472358
18482359 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
18492360 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
18502361
1851
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
1852
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
2362
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2363
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG,
18532364 rxq->pkts_coal);
18542365
18552366 put_cpu();
....@@ -1859,17 +2370,18 @@
18592370 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
18602371 struct mvpp2_tx_queue *txq)
18612372 {
1862
- int cpu = get_cpu();
2373
+ unsigned int thread;
18632374 u32 val;
18642375
18652376 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
18662377 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
18672378
18682379 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
1869
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
1870
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val);
1871
-
1872
- put_cpu();
2380
+ /* PKT-coalescing registers are per-queue + per-thread */
2381
+ for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) {
2382
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2383
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
2384
+ }
18732385 }
18742386
18752387 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
....@@ -1935,11 +2447,15 @@
19352447 struct mvpp2_txq_pcpu_buf *tx_buf =
19362448 txq_pcpu->buffs + txq_pcpu->txq_get_index;
19372449
1938
- if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma))
2450
+ if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma) &&
2451
+ tx_buf->type != MVPP2_TYPE_XDP_TX)
19392452 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
19402453 tx_buf->size, DMA_TO_DEVICE);
1941
- if (tx_buf->skb)
2454
+ if (tx_buf->type == MVPP2_TYPE_SKB && tx_buf->skb)
19422455 dev_kfree_skb_any(tx_buf->skb);
2456
+ else if (tx_buf->type == MVPP2_TYPE_XDP_TX ||
2457
+ tx_buf->type == MVPP2_TYPE_XDP_NDO)
2458
+ xdp_return_frame(tx_buf->xdpf);
19432459
19442460 mvpp2_txq_inc_get(txq_pcpu);
19452461 }
....@@ -1968,7 +2484,7 @@
19682484 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
19692485 int tx_done;
19702486
1971
- if (txq_pcpu->cpu != smp_processor_id())
2487
+ if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id()))
19722488 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
19732489
19742490 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
....@@ -1984,7 +2500,7 @@
19842500 }
19852501
19862502 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
1987
- int cpu)
2503
+ unsigned int thread)
19882504 {
19892505 struct mvpp2_tx_queue *txq;
19902506 struct mvpp2_txq_pcpu *txq_pcpu;
....@@ -1995,7 +2511,7 @@
19952511 if (!txq)
19962512 break;
19972513
1998
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
2514
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
19992515
20002516 if (txq_pcpu->count) {
20012517 mvpp2_txq_done(port, txq, txq_pcpu);
....@@ -2011,15 +2527,15 @@
20112527
20122528 /* Allocate and initialize descriptors for aggr TXQ */
20132529 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
2014
- struct mvpp2_tx_queue *aggr_txq, int cpu,
2015
- struct mvpp2 *priv)
2530
+ struct mvpp2_tx_queue *aggr_txq,
2531
+ unsigned int thread, struct mvpp2 *priv)
20162532 {
20172533 u32 txq_dma;
20182534
20192535 /* Allocate memory for TX descriptors */
2020
- aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
2021
- MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
2022
- &aggr_txq->descs_dma, GFP_KERNEL);
2536
+ aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
2537
+ MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
2538
+ &aggr_txq->descs_dma, GFP_KERNEL);
20232539 if (!aggr_txq->descs)
20242540 return -ENOMEM;
20252541
....@@ -2027,7 +2543,7 @@
20272543
20282544 /* Aggr TXQ no reset WA */
20292545 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
2030
- MVPP2_AGGR_TXQ_INDEX_REG(cpu));
2546
+ MVPP2_AGGR_TXQ_INDEX_REG(thread));
20312547
20322548 /* Set Tx descriptors queue starting address indirect
20332549 * access
....@@ -2038,8 +2554,8 @@
20382554 txq_dma = aggr_txq->descs_dma >>
20392555 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
20402556
2041
- mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
2042
- mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu),
2557
+ mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma);
2558
+ mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread),
20432559 MVPP2_AGGR_TXQ_SIZE);
20442560
20452561 return 0;
....@@ -2048,10 +2564,11 @@
20482564 /* Create a specified Rx queue */
20492565 static int mvpp2_rxq_init(struct mvpp2_port *port,
20502566 struct mvpp2_rx_queue *rxq)
2051
-
20522567 {
2568
+ struct mvpp2 *priv = port->priv;
2569
+ unsigned int thread;
20532570 u32 rxq_dma;
2054
- int cpu;
2571
+ int err;
20552572
20562573 rxq->size = port->rx_ring_size;
20572574
....@@ -2068,19 +2585,19 @@
20682585 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
20692586
20702587 /* Set Rx descriptors queue starting address - indirect access */
2071
- cpu = get_cpu();
2072
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
2588
+ thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2589
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
20732590 if (port->priv->hw_version == MVPP21)
20742591 rxq_dma = rxq->descs_dma;
20752592 else
20762593 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
2077
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
2078
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
2079
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
2594
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
2595
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
2596
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0);
20802597 put_cpu();
20812598
20822599 /* Set Offset */
2083
- mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
2600
+ mvpp2_rxq_offset_set(port, rxq->id, MVPP2_SKB_HEADROOM);
20842601
20852602 /* Set coalescing pkts and time */
20862603 mvpp2_rx_pkts_coal_set(port, rxq);
....@@ -2089,7 +2606,43 @@
20892606 /* Add number of descriptors ready for receiving packets */
20902607 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
20912608
2609
+ if (priv->percpu_pools) {
2610
+ err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq);
2611
+ if (err < 0)
2612
+ goto err_free_dma;
2613
+
2614
+ err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq);
2615
+ if (err < 0)
2616
+ goto err_unregister_rxq_short;
2617
+
2618
+ /* Every RXQ has a pool for short and another for long packets */
2619
+ err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_short,
2620
+ MEM_TYPE_PAGE_POOL,
2621
+ priv->page_pool[rxq->logic_rxq]);
2622
+ if (err < 0)
2623
+ goto err_unregister_rxq_long;
2624
+
2625
+ err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_long,
2626
+ MEM_TYPE_PAGE_POOL,
2627
+ priv->page_pool[rxq->logic_rxq +
2628
+ port->nrxqs]);
2629
+ if (err < 0)
2630
+ goto err_unregister_mem_rxq_short;
2631
+ }
2632
+
20922633 return 0;
2634
+
2635
+err_unregister_mem_rxq_short:
2636
+ xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq_short);
2637
+err_unregister_rxq_long:
2638
+ xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
2639
+err_unregister_rxq_short:
2640
+ xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
2641
+err_free_dma:
2642
+ dma_free_coherent(port->dev->dev.parent,
2643
+ rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2644
+ rxq->descs, rxq->descs_dma);
2645
+ return err;
20932646 }
20942647
20952648 /* Push packets received by the RXQ to BM pool */
....@@ -2121,7 +2674,13 @@
21212674 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
21222675 struct mvpp2_rx_queue *rxq)
21232676 {
2124
- int cpu;
2677
+ unsigned int thread;
2678
+
2679
+ if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_short))
2680
+ xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
2681
+
2682
+ if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_long))
2683
+ xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
21252684
21262685 mvpp2_rxq_drop_pkts(port, rxq);
21272686
....@@ -2140,10 +2699,10 @@
21402699 * free descriptor number
21412700 */
21422701 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2143
- cpu = get_cpu();
2144
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
2145
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
2146
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
2702
+ thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2703
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2704
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0);
2705
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0);
21472706 put_cpu();
21482707 }
21492708
....@@ -2152,7 +2711,8 @@
21522711 struct mvpp2_tx_queue *txq)
21532712 {
21542713 u32 val;
2155
- int cpu, desc, desc_per_txq, tx_port_num;
2714
+ unsigned int thread;
2715
+ int desc, desc_per_txq, tx_port_num;
21562716 struct mvpp2_txq_pcpu *txq_pcpu;
21572717
21582718 txq->size = port->tx_ring_size;
....@@ -2167,18 +2727,18 @@
21672727 txq->last_desc = txq->size - 1;
21682728
21692729 /* Set Tx descriptors queue starting address - indirect access */
2170
- cpu = get_cpu();
2171
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
2172
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
2730
+ thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2731
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2732
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG,
21732733 txq->descs_dma);
2174
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
2734
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG,
21752735 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
2176
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
2177
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
2736
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0);
2737
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG,
21782738 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
2179
- val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
2739
+ val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG);
21802740 val &= ~MVPP2_TXQ_PENDING_MASK;
2181
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
2741
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val);
21822742
21832743 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
21842744 * for each existing TXQ.
....@@ -2189,7 +2749,7 @@
21892749 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
21902750 (txq->log_id * desc_per_txq);
21912751
2192
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
2752
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG,
21932753 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
21942754 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
21952755 put_cpu();
....@@ -2208,8 +2768,8 @@
22082768 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
22092769 val);
22102770
2211
- for_each_present_cpu(cpu) {
2212
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
2771
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
2772
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
22132773 txq_pcpu->size = txq->size;
22142774 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
22152775 sizeof(*txq_pcpu->buffs),
....@@ -2243,10 +2803,10 @@
22432803 struct mvpp2_tx_queue *txq)
22442804 {
22452805 struct mvpp2_txq_pcpu *txq_pcpu;
2246
- int cpu;
2806
+ unsigned int thread;
22472807
2248
- for_each_present_cpu(cpu) {
2249
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
2808
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
2809
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
22502810 kfree(txq_pcpu->buffs);
22512811
22522812 if (txq_pcpu->tso_headers)
....@@ -2272,10 +2832,10 @@
22722832 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
22732833
22742834 /* Set Tx descriptors queue starting address and size */
2275
- cpu = get_cpu();
2276
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
2277
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
2278
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
2835
+ thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2836
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2837
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0);
2838
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0);
22792839 put_cpu();
22802840 }
22812841
....@@ -2283,14 +2843,14 @@
22832843 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
22842844 {
22852845 struct mvpp2_txq_pcpu *txq_pcpu;
2286
- int delay, pending, cpu;
2846
+ int delay, pending;
2847
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
22872848 u32 val;
22882849
2289
- cpu = get_cpu();
2290
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
2291
- val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
2850
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2851
+ val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG);
22922852 val |= MVPP2_TXQ_DRAIN_EN_MASK;
2293
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
2853
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
22942854
22952855 /* The napi queue has been stopped so wait for all packets
22962856 * to be transmitted.
....@@ -2306,17 +2866,17 @@
23062866 mdelay(1);
23072867 delay++;
23082868
2309
- pending = mvpp2_percpu_read(port->priv, cpu,
2869
+ pending = mvpp2_thread_read(port->priv, thread,
23102870 MVPP2_TXQ_PENDING_REG);
23112871 pending &= MVPP2_TXQ_PENDING_MASK;
23122872 } while (pending);
23132873
23142874 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
2315
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
2875
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
23162876 put_cpu();
23172877
2318
- for_each_present_cpu(cpu) {
2319
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
2878
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
2879
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
23202880
23212881 /* Release all packets */
23222882 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
....@@ -2390,6 +2950,10 @@
23902950 err = mvpp2_txq_init(port, txq);
23912951 if (err)
23922952 goto err_cleanup;
2953
+
2954
+ /* Assign this queue to a CPU */
2955
+ if (queue < num_possible_cpus())
2956
+ netif_set_xps_queue(port->dev, cpumask_of(queue), queue);
23932957 }
23942958
23952959 if (port->has_tx_irqs) {
....@@ -2420,45 +2984,67 @@
24202984 return IRQ_HANDLED;
24212985 }
24222986
2423
-/* Per-port interrupt for link status changes */
2424
-static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
2987
+static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq)
24252988 {
2426
- struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
2427
- struct net_device *dev = port->dev;
2428
- bool event = false, link = false;
2429
- u32 val;
2989
+ struct skb_shared_hwtstamps shhwtstamps;
2990
+ struct mvpp2_hwtstamp_queue *queue;
2991
+ struct sk_buff *skb;
2992
+ void __iomem *ptp_q;
2993
+ unsigned int id;
2994
+ u32 r0, r1, r2;
24302995
2431
- mvpp22_gop_mask_irq(port);
2996
+ ptp_q = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
2997
+ if (nq)
2998
+ ptp_q += MVPP22_PTP_TX_Q1_R0 - MVPP22_PTP_TX_Q0_R0;
24322999
2433
- if (port->gop_id == 0 &&
2434
- port->phy_interface == PHY_INTERFACE_MODE_10GKR) {
2435
- val = readl(port->base + MVPP22_XLG_INT_STAT);
2436
- if (val & MVPP22_XLG_INT_STAT_LINK) {
2437
- event = true;
2438
- val = readl(port->base + MVPP22_XLG_STATUS);
2439
- if (val & MVPP22_XLG_STATUS_LINK_UP)
2440
- link = true;
2441
- }
2442
- } else if (phy_interface_mode_is_rgmii(port->phy_interface) ||
2443
- port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
2444
- port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
2445
- port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
2446
- val = readl(port->base + MVPP22_GMAC_INT_STAT);
2447
- if (val & MVPP22_GMAC_INT_STAT_LINK) {
2448
- event = true;
2449
- val = readl(port->base + MVPP2_GMAC_STATUS0);
2450
- if (val & MVPP2_GMAC_STATUS0_LINK_UP)
2451
- link = true;
3000
+ queue = &port->tx_hwtstamp_queue[nq];
3001
+
3002
+ while (1) {
3003
+ r0 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R0) & 0xffff;
3004
+ if (!r0)
3005
+ break;
3006
+
3007
+ r1 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R1) & 0xffff;
3008
+ r2 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R2) & 0xffff;
3009
+
3010
+ id = (r0 >> 1) & 31;
3011
+
3012
+ skb = queue->skb[id];
3013
+ queue->skb[id] = NULL;
3014
+ if (skb) {
3015
+ u32 ts = r2 << 19 | r1 << 3 | r0 >> 13;
3016
+
3017
+ mvpp22_tai_tstamp(port->priv->tai, ts, &shhwtstamps);
3018
+ skb_tstamp_tx(skb, &shhwtstamps);
3019
+ dev_kfree_skb_any(skb);
24523020 }
24533021 }
3022
+}
3023
+
3024
+static void mvpp2_isr_handle_ptp(struct mvpp2_port *port)
3025
+{
3026
+ void __iomem *ptp;
3027
+ u32 val;
3028
+
3029
+ ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
3030
+ val = readl(ptp + MVPP22_PTP_INT_CAUSE);
3031
+ if (val & MVPP22_PTP_INT_CAUSE_QUEUE0)
3032
+ mvpp2_isr_handle_ptp_queue(port, 0);
3033
+ if (val & MVPP22_PTP_INT_CAUSE_QUEUE1)
3034
+ mvpp2_isr_handle_ptp_queue(port, 1);
3035
+}
3036
+
3037
+static void mvpp2_isr_handle_link(struct mvpp2_port *port, bool link)
3038
+{
3039
+ struct net_device *dev = port->dev;
24543040
24553041 if (port->phylink) {
24563042 phylink_mac_change(port->phylink, link);
2457
- goto handled;
3043
+ return;
24583044 }
24593045
2460
- if (!netif_running(dev) || !event)
2461
- goto handled;
3046
+ if (!netif_running(dev))
3047
+ return;
24623048
24633049 if (link) {
24643050 mvpp2_interrupts_enable(port);
....@@ -2475,52 +3061,98 @@
24753061
24763062 mvpp2_interrupts_disable(port);
24773063 }
3064
+}
24783065
2479
-handled:
3066
+static void mvpp2_isr_handle_xlg(struct mvpp2_port *port)
3067
+{
3068
+ bool link;
3069
+ u32 val;
3070
+
3071
+ val = readl(port->base + MVPP22_XLG_INT_STAT);
3072
+ if (val & MVPP22_XLG_INT_STAT_LINK) {
3073
+ val = readl(port->base + MVPP22_XLG_STATUS);
3074
+ link = (val & MVPP22_XLG_STATUS_LINK_UP);
3075
+ mvpp2_isr_handle_link(port, link);
3076
+ }
3077
+}
3078
+
3079
+static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port)
3080
+{
3081
+ bool link;
3082
+ u32 val;
3083
+
3084
+ if (phy_interface_mode_is_rgmii(port->phy_interface) ||
3085
+ phy_interface_mode_is_8023z(port->phy_interface) ||
3086
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
3087
+ val = readl(port->base + MVPP22_GMAC_INT_STAT);
3088
+ if (val & MVPP22_GMAC_INT_STAT_LINK) {
3089
+ val = readl(port->base + MVPP2_GMAC_STATUS0);
3090
+ link = (val & MVPP2_GMAC_STATUS0_LINK_UP);
3091
+ mvpp2_isr_handle_link(port, link);
3092
+ }
3093
+ }
3094
+}
3095
+
3096
+/* Per-port interrupt for link status changes */
3097
+static irqreturn_t mvpp2_port_isr(int irq, void *dev_id)
3098
+{
3099
+ struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
3100
+ u32 val;
3101
+
3102
+ mvpp22_gop_mask_irq(port);
3103
+
3104
+ if (mvpp2_port_supports_xlg(port) &&
3105
+ mvpp2_is_xlg(port->phy_interface)) {
3106
+ /* Check the external status register */
3107
+ val = readl(port->base + MVPP22_XLG_EXT_INT_STAT);
3108
+ if (val & MVPP22_XLG_EXT_INT_STAT_XLG)
3109
+ mvpp2_isr_handle_xlg(port);
3110
+ if (val & MVPP22_XLG_EXT_INT_STAT_PTP)
3111
+ mvpp2_isr_handle_ptp(port);
3112
+ } else {
3113
+ /* If it's not the XLG, we must be using the GMAC.
3114
+ * Check the summary status.
3115
+ */
3116
+ val = readl(port->base + MVPP22_GMAC_INT_SUM_STAT);
3117
+ if (val & MVPP22_GMAC_INT_SUM_STAT_INTERNAL)
3118
+ mvpp2_isr_handle_gmac_internal(port);
3119
+ if (val & MVPP22_GMAC_INT_SUM_STAT_PTP)
3120
+ mvpp2_isr_handle_ptp(port);
3121
+ }
3122
+
24803123 mvpp22_gop_unmask_irq(port);
24813124 return IRQ_HANDLED;
24823125 }
24833126
2484
-static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
3127
+static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
24853128 {
2486
- ktime_t interval;
2487
-
2488
- if (!port_pcpu->timer_scheduled) {
2489
- port_pcpu->timer_scheduled = true;
2490
- interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
2491
- hrtimer_start(&port_pcpu->tx_done_timer, interval,
2492
- HRTIMER_MODE_REL_PINNED);
2493
- }
2494
-}
2495
-
2496
-static void mvpp2_tx_proc_cb(unsigned long data)
2497
-{
2498
- struct net_device *dev = (struct net_device *)data;
2499
- struct mvpp2_port *port = netdev_priv(dev);
2500
- struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
3129
+ struct net_device *dev;
3130
+ struct mvpp2_port *port;
3131
+ struct mvpp2_port_pcpu *port_pcpu;
25013132 unsigned int tx_todo, cause;
25023133
3134
+ port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer);
3135
+ dev = port_pcpu->dev;
3136
+
25033137 if (!netif_running(dev))
2504
- return;
3138
+ return HRTIMER_NORESTART;
3139
+
25053140 port_pcpu->timer_scheduled = false;
3141
+ port = netdev_priv(dev);
25063142
25073143 /* Process all the Tx queues */
25083144 cause = (1 << port->ntxqs) - 1;
2509
- tx_todo = mvpp2_tx_done(port, cause, smp_processor_id());
3145
+ tx_todo = mvpp2_tx_done(port, cause,
3146
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
25103147
25113148 /* Set the timer in case not all the packets were processed */
2512
- if (tx_todo)
2513
- mvpp2_timer_set(port_pcpu);
2514
-}
3149
+ if (tx_todo && !port_pcpu->timer_scheduled) {
3150
+ port_pcpu->timer_scheduled = true;
3151
+ hrtimer_forward_now(&port_pcpu->tx_done_timer,
3152
+ MVPP2_TXDONE_HRTIMER_PERIOD_NS);
25153153
2516
-static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
2517
-{
2518
- struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
2519
- struct mvpp2_port_pcpu,
2520
- tx_done_timer);
2521
-
2522
- tasklet_schedule(&port_pcpu->tx_done_tasklet);
2523
-
3154
+ return HRTIMER_RESTART;
3155
+ }
25243156 return HRTIMER_NORESTART;
25253157 }
25263158
....@@ -2569,17 +3201,17 @@
25693201 skb->ip_summed = CHECKSUM_NONE;
25703202 }
25713203
2572
-/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
3204
+/* Allocate a new skb and add it to BM pool */
25733205 static int mvpp2_rx_refill(struct mvpp2_port *port,
2574
- struct mvpp2_bm_pool *bm_pool, int pool)
3206
+ struct mvpp2_bm_pool *bm_pool,
3207
+ struct page_pool *page_pool, int pool)
25753208 {
25763209 dma_addr_t dma_addr;
25773210 phys_addr_t phys_addr;
25783211 void *buf;
25793212
2580
- /* No recycle or too many buffers are in use, so allocate a new skb */
2581
- buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
2582
- GFP_ATOMIC);
3213
+ buf = mvpp2_buf_alloc(port, bm_pool, page_pool,
3214
+ &dma_addr, &phys_addr, GFP_ATOMIC);
25833215 if (!buf)
25843216 return -ENOMEM;
25853217
....@@ -2620,15 +3252,280 @@
26203252 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
26213253 }
26223254
3255
+static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, int nxmit_byte)
3256
+{
3257
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3258
+ struct mvpp2_tx_queue *aggr_txq;
3259
+ struct mvpp2_txq_pcpu *txq_pcpu;
3260
+ struct mvpp2_tx_queue *txq;
3261
+ struct netdev_queue *nq;
3262
+
3263
+ txq = port->txqs[txq_id];
3264
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3265
+ nq = netdev_get_tx_queue(port->dev, txq_id);
3266
+ aggr_txq = &port->priv->aggr_txqs[thread];
3267
+
3268
+ txq_pcpu->reserved_num -= nxmit;
3269
+ txq_pcpu->count += nxmit;
3270
+ aggr_txq->count += nxmit;
3271
+
3272
+ /* Enable transmit */
3273
+ wmb();
3274
+ mvpp2_aggr_txq_pend_desc_add(port, nxmit);
3275
+
3276
+ if (txq_pcpu->count >= txq_pcpu->stop_threshold)
3277
+ netif_tx_stop_queue(nq);
3278
+
3279
+ /* Finalize TX processing */
3280
+ if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
3281
+ mvpp2_txq_done(port, txq, txq_pcpu);
3282
+}
3283
+
3284
+static int
3285
+mvpp2_xdp_submit_frame(struct mvpp2_port *port, u16 txq_id,
3286
+ struct xdp_frame *xdpf, bool dma_map)
3287
+{
3288
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3289
+ u32 tx_cmd = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE |
3290
+ MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
3291
+ enum mvpp2_tx_buf_type buf_type;
3292
+ struct mvpp2_txq_pcpu *txq_pcpu;
3293
+ struct mvpp2_tx_queue *aggr_txq;
3294
+ struct mvpp2_tx_desc *tx_desc;
3295
+ struct mvpp2_tx_queue *txq;
3296
+ int ret = MVPP2_XDP_TX;
3297
+ dma_addr_t dma_addr;
3298
+
3299
+ txq = port->txqs[txq_id];
3300
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3301
+ aggr_txq = &port->priv->aggr_txqs[thread];
3302
+
3303
+ /* Check number of available descriptors */
3304
+ if (mvpp2_aggr_desc_num_check(port, aggr_txq, 1) ||
3305
+ mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 1)) {
3306
+ ret = MVPP2_XDP_DROPPED;
3307
+ goto out;
3308
+ }
3309
+
3310
+ /* Get a descriptor for the first part of the packet */
3311
+ tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3312
+ mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3313
+ mvpp2_txdesc_size_set(port, tx_desc, xdpf->len);
3314
+
3315
+ if (dma_map) {
3316
+ /* XDP_REDIRECT or AF_XDP */
3317
+ dma_addr = dma_map_single(port->dev->dev.parent, xdpf->data,
3318
+ xdpf->len, DMA_TO_DEVICE);
3319
+
3320
+ if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
3321
+ mvpp2_txq_desc_put(txq);
3322
+ ret = MVPP2_XDP_DROPPED;
3323
+ goto out;
3324
+ }
3325
+
3326
+ buf_type = MVPP2_TYPE_XDP_NDO;
3327
+ } else {
3328
+ /* XDP_TX */
3329
+ struct page *page = virt_to_page(xdpf->data);
3330
+
3331
+ dma_addr = page_pool_get_dma_addr(page) +
3332
+ sizeof(*xdpf) + xdpf->headroom;
3333
+ dma_sync_single_for_device(port->dev->dev.parent, dma_addr,
3334
+ xdpf->len, DMA_BIDIRECTIONAL);
3335
+
3336
+ buf_type = MVPP2_TYPE_XDP_TX;
3337
+ }
3338
+
3339
+ mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr);
3340
+
3341
+ mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
3342
+ mvpp2_txq_inc_put(port, txq_pcpu, xdpf, tx_desc, buf_type);
3343
+
3344
+out:
3345
+ return ret;
3346
+}
3347
+
3348
+static int
3349
+mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp)
3350
+{
3351
+ struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
3352
+ struct xdp_frame *xdpf;
3353
+ u16 txq_id;
3354
+ int ret;
3355
+
3356
+ xdpf = xdp_convert_buff_to_frame(xdp);
3357
+ if (unlikely(!xdpf))
3358
+ return MVPP2_XDP_DROPPED;
3359
+
3360
+ /* The first of the TX queues are used for XPS,
3361
+ * the second half for XDP_TX
3362
+ */
3363
+ txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
3364
+
3365
+ ret = mvpp2_xdp_submit_frame(port, txq_id, xdpf, false);
3366
+ if (ret == MVPP2_XDP_TX) {
3367
+ u64_stats_update_begin(&stats->syncp);
3368
+ stats->tx_bytes += xdpf->len;
3369
+ stats->tx_packets++;
3370
+ stats->xdp_tx++;
3371
+ u64_stats_update_end(&stats->syncp);
3372
+
3373
+ mvpp2_xdp_finish_tx(port, txq_id, 1, xdpf->len);
3374
+ } else {
3375
+ u64_stats_update_begin(&stats->syncp);
3376
+ stats->xdp_tx_err++;
3377
+ u64_stats_update_end(&stats->syncp);
3378
+ }
3379
+
3380
+ return ret;
3381
+}
3382
+
3383
+static int
3384
+mvpp2_xdp_xmit(struct net_device *dev, int num_frame,
3385
+ struct xdp_frame **frames, u32 flags)
3386
+{
3387
+ struct mvpp2_port *port = netdev_priv(dev);
3388
+ int i, nxmit_byte = 0, nxmit = num_frame;
3389
+ struct mvpp2_pcpu_stats *stats;
3390
+ u16 txq_id;
3391
+ u32 ret;
3392
+
3393
+ if (unlikely(test_bit(0, &port->state)))
3394
+ return -ENETDOWN;
3395
+
3396
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3397
+ return -EINVAL;
3398
+
3399
+ /* The first of the TX queues are used for XPS,
3400
+ * the second half for XDP_TX
3401
+ */
3402
+ txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
3403
+
3404
+ for (i = 0; i < num_frame; i++) {
3405
+ ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true);
3406
+ if (ret == MVPP2_XDP_TX) {
3407
+ nxmit_byte += frames[i]->len;
3408
+ } else {
3409
+ xdp_return_frame_rx_napi(frames[i]);
3410
+ nxmit--;
3411
+ }
3412
+ }
3413
+
3414
+ if (likely(nxmit > 0))
3415
+ mvpp2_xdp_finish_tx(port, txq_id, nxmit, nxmit_byte);
3416
+
3417
+ stats = this_cpu_ptr(port->stats);
3418
+ u64_stats_update_begin(&stats->syncp);
3419
+ stats->tx_bytes += nxmit_byte;
3420
+ stats->tx_packets += nxmit;
3421
+ stats->xdp_xmit += nxmit;
3422
+ stats->xdp_xmit_err += num_frame - nxmit;
3423
+ u64_stats_update_end(&stats->syncp);
3424
+
3425
+ return nxmit;
3426
+}
3427
+
3428
+static int
3429
+mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
3430
+ struct bpf_prog *prog, struct xdp_buff *xdp,
3431
+ struct page_pool *pp, struct mvpp2_pcpu_stats *stats)
3432
+{
3433
+ unsigned int len, sync, err;
3434
+ struct page *page;
3435
+ u32 ret, act;
3436
+
3437
+ len = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
3438
+ act = bpf_prog_run_xdp(prog, xdp);
3439
+
3440
+ /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
3441
+ sync = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
3442
+ sync = max(sync, len);
3443
+
3444
+ switch (act) {
3445
+ case XDP_PASS:
3446
+ stats->xdp_pass++;
3447
+ ret = MVPP2_XDP_PASS;
3448
+ break;
3449
+ case XDP_REDIRECT:
3450
+ err = xdp_do_redirect(port->dev, xdp, prog);
3451
+ if (unlikely(err)) {
3452
+ ret = MVPP2_XDP_DROPPED;
3453
+ page = virt_to_head_page(xdp->data);
3454
+ page_pool_put_page(pp, page, sync, true);
3455
+ } else {
3456
+ ret = MVPP2_XDP_REDIR;
3457
+ stats->xdp_redirect++;
3458
+ }
3459
+ break;
3460
+ case XDP_TX:
3461
+ ret = mvpp2_xdp_xmit_back(port, xdp);
3462
+ if (ret != MVPP2_XDP_TX) {
3463
+ page = virt_to_head_page(xdp->data);
3464
+ page_pool_put_page(pp, page, sync, true);
3465
+ }
3466
+ break;
3467
+ default:
3468
+ bpf_warn_invalid_xdp_action(act);
3469
+ fallthrough;
3470
+ case XDP_ABORTED:
3471
+ trace_xdp_exception(port->dev, prog, act);
3472
+ fallthrough;
3473
+ case XDP_DROP:
3474
+ page = virt_to_head_page(xdp->data);
3475
+ page_pool_put_page(pp, page, sync, true);
3476
+ ret = MVPP2_XDP_DROPPED;
3477
+ stats->xdp_drop++;
3478
+ break;
3479
+ }
3480
+
3481
+ return ret;
3482
+}
3483
+
3484
+static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc,
3485
+ int pool, u32 rx_status)
3486
+{
3487
+ phys_addr_t phys_addr, phys_addr_next;
3488
+ dma_addr_t dma_addr, dma_addr_next;
3489
+ struct mvpp2_buff_hdr *buff_hdr;
3490
+
3491
+ phys_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
3492
+ dma_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
3493
+
3494
+ do {
3495
+ buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(phys_addr);
3496
+
3497
+ phys_addr_next = le32_to_cpu(buff_hdr->next_phys_addr);
3498
+ dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr);
3499
+
3500
+ if (port->priv->hw_version >= MVPP22) {
3501
+ phys_addr_next |= ((u64)buff_hdr->next_phys_addr_high << 32);
3502
+ dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32);
3503
+ }
3504
+
3505
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3506
+
3507
+ phys_addr = phys_addr_next;
3508
+ dma_addr = dma_addr_next;
3509
+
3510
+ } while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info)));
3511
+}
3512
+
26233513 /* Main rx processing */
26243514 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
26253515 int rx_todo, struct mvpp2_rx_queue *rxq)
26263516 {
26273517 struct net_device *dev = port->dev;
3518
+ struct mvpp2_pcpu_stats ps = {};
3519
+ enum dma_data_direction dma_dir;
3520
+ struct bpf_prog *xdp_prog;
3521
+ struct xdp_buff xdp;
26283522 int rx_received;
26293523 int rx_done = 0;
2630
- u32 rcvd_pkts = 0;
2631
- u32 rcvd_bytes = 0;
3524
+ u32 xdp_ret = 0;
3525
+
3526
+ rcu_read_lock();
3527
+
3528
+ xdp_prog = READ_ONCE(port->xdp_prog);
26323529
26333530 /* Get number of received packets and clamp the to-do */
26343531 rx_received = mvpp2_rxq_received(port, rxq->id);
....@@ -2638,12 +3535,13 @@
26383535 while (rx_done < rx_todo) {
26393536 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
26403537 struct mvpp2_bm_pool *bm_pool;
3538
+ struct page_pool *pp = NULL;
26413539 struct sk_buff *skb;
26423540 unsigned int frag_size;
26433541 dma_addr_t dma_addr;
26443542 phys_addr_t phys_addr;
2645
- u32 rx_status;
2646
- int pool, rx_bytes, err;
3543
+ u32 rx_status, timestamp;
3544
+ int pool, rx_bytes, err, ret;
26473545 void *data;
26483546
26493547 rx_done++;
....@@ -2658,24 +3556,65 @@
26583556 MVPP2_RXD_BM_POOL_ID_OFFS;
26593557 bm_pool = &port->priv->bm_pools[pool];
26603558
3559
+ if (port->priv->percpu_pools) {
3560
+ pp = port->priv->page_pool[pool];
3561
+ dma_dir = page_pool_get_dma_dir(pp);
3562
+ } else {
3563
+ dma_dir = DMA_FROM_DEVICE;
3564
+ }
3565
+
3566
+ dma_sync_single_for_cpu(dev->dev.parent, dma_addr,
3567
+ rx_bytes + MVPP2_MH_SIZE,
3568
+ dma_dir);
3569
+
3570
+ /* Buffer header not supported */
3571
+ if (rx_status & MVPP2_RXD_BUF_HDR)
3572
+ goto err_drop_frame;
3573
+
26613574 /* In case of an error, release the requested buffer pointer
26623575 * to the Buffer Manager. This request process is controlled
26633576 * by the hardware, and the information about the buffer is
26643577 * comprised by the RX descriptor.
26653578 */
2666
- if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
2667
-err_drop_frame:
2668
- dev->stats.rx_errors++;
2669
- mvpp2_rx_error(port, rx_desc);
2670
- /* Return the buffer to the pool */
2671
- mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
2672
- continue;
2673
- }
3579
+ if (rx_status & MVPP2_RXD_ERR_SUMMARY)
3580
+ goto err_drop_frame;
3581
+
3582
+ /* Prefetch header */
3583
+ prefetch(data);
26743584
26753585 if (bm_pool->frag_size > PAGE_SIZE)
26763586 frag_size = 0;
26773587 else
26783588 frag_size = bm_pool->frag_size;
3589
+
3590
+ if (xdp_prog) {
3591
+ xdp.data_hard_start = data;
3592
+ xdp.data = data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM;
3593
+ xdp.data_end = xdp.data + rx_bytes;
3594
+ xdp.frame_sz = PAGE_SIZE;
3595
+
3596
+ if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE)
3597
+ xdp.rxq = &rxq->xdp_rxq_short;
3598
+ else
3599
+ xdp.rxq = &rxq->xdp_rxq_long;
3600
+
3601
+ xdp_set_data_meta_invalid(&xdp);
3602
+
3603
+ ret = mvpp2_run_xdp(port, rxq, xdp_prog, &xdp, pp, &ps);
3604
+
3605
+ if (ret) {
3606
+ xdp_ret |= ret;
3607
+ err = mvpp2_rx_refill(port, bm_pool, pp, pool);
3608
+ if (err) {
3609
+ netdev_err(port->dev, "failed to refill BM pools\n");
3610
+ goto err_drop_frame;
3611
+ }
3612
+
3613
+ ps.rx_packets++;
3614
+ ps.rx_bytes += rx_bytes;
3615
+ continue;
3616
+ }
3617
+ }
26793618
26803619 skb = build_skb(data, frag_size);
26813620 if (!skb) {
....@@ -2683,32 +3622,65 @@
26833622 goto err_drop_frame;
26843623 }
26853624
2686
- err = mvpp2_rx_refill(port, bm_pool, pool);
3625
+ /* If we have RX hardware timestamping enabled, grab the
3626
+ * timestamp from the queue and convert.
3627
+ */
3628
+ if (mvpp22_rx_hwtstamping(port)) {
3629
+ timestamp = le32_to_cpu(rx_desc->pp22.timestamp);
3630
+ mvpp22_tai_tstamp(port->priv->tai, timestamp,
3631
+ skb_hwtstamps(skb));
3632
+ }
3633
+
3634
+ err = mvpp2_rx_refill(port, bm_pool, pp, pool);
26873635 if (err) {
26883636 netdev_err(port->dev, "failed to refill BM pools\n");
3637
+ dev_kfree_skb_any(skb);
26893638 goto err_drop_frame;
26903639 }
26913640
2692
- dma_unmap_single(dev->dev.parent, dma_addr,
2693
- bm_pool->buf_size, DMA_FROM_DEVICE);
3641
+ if (pp)
3642
+ page_pool_release_page(pp, virt_to_page(data));
3643
+ else
3644
+ dma_unmap_single_attrs(dev->dev.parent, dma_addr,
3645
+ bm_pool->buf_size, DMA_FROM_DEVICE,
3646
+ DMA_ATTR_SKIP_CPU_SYNC);
26943647
2695
- rcvd_pkts++;
2696
- rcvd_bytes += rx_bytes;
3648
+ ps.rx_packets++;
3649
+ ps.rx_bytes += rx_bytes;
26973650
2698
- skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
3651
+ skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
26993652 skb_put(skb, rx_bytes);
27003653 skb->protocol = eth_type_trans(skb, dev);
27013654 mvpp2_rx_csum(port, rx_status, skb);
27023655
27033656 napi_gro_receive(napi, skb);
3657
+ continue;
3658
+
3659
+err_drop_frame:
3660
+ dev->stats.rx_errors++;
3661
+ mvpp2_rx_error(port, rx_desc);
3662
+ /* Return the buffer to the pool */
3663
+ if (rx_status & MVPP2_RXD_BUF_HDR)
3664
+ mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status);
3665
+ else
3666
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
27043667 }
27053668
2706
- if (rcvd_pkts) {
3669
+ rcu_read_unlock();
3670
+
3671
+ if (xdp_ret & MVPP2_XDP_REDIR)
3672
+ xdp_do_flush_map();
3673
+
3674
+ if (ps.rx_packets) {
27073675 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
27083676
27093677 u64_stats_update_begin(&stats->syncp);
2710
- stats->rx_packets += rcvd_pkts;
2711
- stats->rx_bytes += rcvd_bytes;
3678
+ stats->rx_packets += ps.rx_packets;
3679
+ stats->rx_bytes += ps.rx_bytes;
3680
+ /* xdp */
3681
+ stats->xdp_redirect += ps.xdp_redirect;
3682
+ stats->xdp_pass += ps.xdp_pass;
3683
+ stats->xdp_drop += ps.xdp_drop;
27123684 u64_stats_update_end(&stats->syncp);
27133685 }
27143686
....@@ -2723,7 +3695,8 @@
27233695 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
27243696 struct mvpp2_tx_desc *desc)
27253697 {
2726
- struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
3698
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3699
+ struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
27273700
27283701 dma_addr_t buf_dma_addr =
27293702 mvpp2_txdesc_dma_addr_get(port, desc);
....@@ -2735,26 +3708,117 @@
27353708 mvpp2_txq_desc_put(txq);
27363709 }
27373710
3711
+static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port,
3712
+ struct mvpp2_tx_desc *desc)
3713
+{
3714
+ /* We only need to clear the low bits */
3715
+ if (port->priv->hw_version != MVPP21)
3716
+ desc->pp22.ptp_descriptor &=
3717
+ cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
3718
+}
3719
+
3720
+static bool mvpp2_tx_hw_tstamp(struct mvpp2_port *port,
3721
+ struct mvpp2_tx_desc *tx_desc,
3722
+ struct sk_buff *skb)
3723
+{
3724
+ struct mvpp2_hwtstamp_queue *queue;
3725
+ unsigned int mtype, type, i;
3726
+ struct ptp_header *hdr;
3727
+ u64 ptpdesc;
3728
+
3729
+ if (port->priv->hw_version == MVPP21 ||
3730
+ port->tx_hwtstamp_type == HWTSTAMP_TX_OFF)
3731
+ return false;
3732
+
3733
+ type = ptp_classify_raw(skb);
3734
+ if (!type)
3735
+ return false;
3736
+
3737
+ hdr = ptp_parse_header(skb, type);
3738
+ if (!hdr)
3739
+ return false;
3740
+
3741
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3742
+
3743
+ ptpdesc = MVPP22_PTP_MACTIMESTAMPINGEN |
3744
+ MVPP22_PTP_ACTION_CAPTURE;
3745
+ queue = &port->tx_hwtstamp_queue[0];
3746
+
3747
+ switch (type & PTP_CLASS_VMASK) {
3748
+ case PTP_CLASS_V1:
3749
+ ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV1);
3750
+ break;
3751
+
3752
+ case PTP_CLASS_V2:
3753
+ ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV2);
3754
+ mtype = hdr->tsmt & 15;
3755
+ /* Direct PTP Sync messages to queue 1 */
3756
+ if (mtype == 0) {
3757
+ ptpdesc |= MVPP22_PTP_TIMESTAMPQUEUESELECT;
3758
+ queue = &port->tx_hwtstamp_queue[1];
3759
+ }
3760
+ break;
3761
+ }
3762
+
3763
+ /* Take a reference on the skb and insert into our queue */
3764
+ i = queue->next;
3765
+ queue->next = (i + 1) & 31;
3766
+ if (queue->skb[i])
3767
+ dev_kfree_skb_any(queue->skb[i]);
3768
+ queue->skb[i] = skb_get(skb);
3769
+
3770
+ ptpdesc |= MVPP22_PTP_TIMESTAMPENTRYID(i);
3771
+
3772
+ /*
3773
+ * 3:0 - PTPAction
3774
+ * 6:4 - PTPPacketFormat
3775
+ * 7 - PTP_CF_WraparoundCheckEn
3776
+ * 9:8 - IngressTimestampSeconds[1:0]
3777
+ * 10 - Reserved
3778
+ * 11 - MACTimestampingEn
3779
+ * 17:12 - PTP_TimestampQueueEntryID[5:0]
3780
+ * 18 - PTPTimestampQueueSelect
3781
+ * 19 - UDPChecksumUpdateEn
3782
+ * 27:20 - TimestampOffset
3783
+ * PTP, NTPTransmit, OWAMP/TWAMP - L3 to PTP header
3784
+ * NTPTs, Y.1731 - L3 to timestamp entry
3785
+ * 35:28 - UDP Checksum Offset
3786
+ *
3787
+ * stored in tx descriptor bits 75:64 (11:0) and 191:168 (35:12)
3788
+ */
3789
+ tx_desc->pp22.ptp_descriptor &=
3790
+ cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
3791
+ tx_desc->pp22.ptp_descriptor |=
3792
+ cpu_to_le32(ptpdesc & MVPP22_PTP_DESC_MASK_LOW);
3793
+ tx_desc->pp22.buf_dma_addr_ptp &= cpu_to_le64(~0xffffff0000000000ULL);
3794
+ tx_desc->pp22.buf_dma_addr_ptp |= cpu_to_le64((ptpdesc >> 12) << 40);
3795
+
3796
+ return true;
3797
+}
3798
+
27383799 /* Handle tx fragmentation processing */
27393800 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
27403801 struct mvpp2_tx_queue *aggr_txq,
27413802 struct mvpp2_tx_queue *txq)
27423803 {
2743
- struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
3804
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3805
+ struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
27443806 struct mvpp2_tx_desc *tx_desc;
27453807 int i;
27463808 dma_addr_t buf_dma_addr;
27473809
27483810 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
27493811 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2750
- void *addr = page_address(frag->page.p) + frag->page_offset;
3812
+ void *addr = skb_frag_address(frag);
27513813
27523814 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3815
+ mvpp2_txdesc_clear_ptp(port, tx_desc);
27533816 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
2754
- mvpp2_txdesc_size_set(port, tx_desc, frag->size);
3817
+ mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
27553818
27563819 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
2757
- frag->size, DMA_TO_DEVICE);
3820
+ skb_frag_size(frag),
3821
+ DMA_TO_DEVICE);
27583822 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
27593823 mvpp2_txq_desc_put(txq);
27603824 goto cleanup;
....@@ -2766,11 +3830,11 @@
27663830 /* Last descriptor */
27673831 mvpp2_txdesc_cmd_set(port, tx_desc,
27683832 MVPP2_TXD_L_DESC);
2769
- mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
3833
+ mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
27703834 } else {
27713835 /* Descriptor in the middle: Not First, Not Last */
27723836 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
2773
- mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
3837
+ mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
27743838 }
27753839 }
27763840
....@@ -2798,6 +3862,7 @@
27983862 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
27993863 dma_addr_t addr;
28003864
3865
+ mvpp2_txdesc_clear_ptp(port, tx_desc);
28013866 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
28023867 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
28033868
....@@ -2808,7 +3873,7 @@
28083873 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
28093874 MVPP2_TXD_F_DESC |
28103875 MVPP2_TXD_PADDING_DISABLE);
2811
- mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
3876
+ mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
28123877 }
28133878
28143879 static inline int mvpp2_tso_put_data(struct sk_buff *skb,
....@@ -2822,6 +3887,7 @@
28223887 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
28233888 dma_addr_t buf_dma_addr;
28243889
3890
+ mvpp2_txdesc_clear_ptp(port, tx_desc);
28253891 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
28263892 mvpp2_txdesc_size_set(port, tx_desc, sz);
28273893
....@@ -2837,14 +3903,14 @@
28373903 if (!left) {
28383904 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
28393905 if (last) {
2840
- mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
3906
+ mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
28413907 return 0;
28423908 }
28433909 } else {
28443910 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
28453911 }
28463912
2847
- mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
3913
+ mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
28483914 return 0;
28493915 }
28503916
....@@ -2854,18 +3920,17 @@
28543920 struct mvpp2_txq_pcpu *txq_pcpu)
28553921 {
28563922 struct mvpp2_port *port = netdev_priv(dev);
3923
+ int hdr_sz, i, len, descs = 0;
28573924 struct tso_t tso;
2858
- int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb);
2859
- int i, len, descs = 0;
28603925
28613926 /* Check number of available descriptors */
2862
- if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq,
2863
- tso_count_descs(skb)) ||
2864
- mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu,
3927
+ if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) ||
3928
+ mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu,
28653929 tso_count_descs(skb)))
28663930 return 0;
28673931
2868
- tso_start(skb, &tso);
3932
+ hdr_sz = tso_start(skb, &tso);
3933
+
28693934 len = skb->len - hdr_sz;
28703935 while (len > 0) {
28713936 int left = min_t(int, skb_shinfo(skb)->gso_size, len);
....@@ -2908,14 +3973,21 @@
29083973 struct mvpp2_txq_pcpu *txq_pcpu;
29093974 struct mvpp2_tx_desc *tx_desc;
29103975 dma_addr_t buf_dma_addr;
3976
+ unsigned long flags = 0;
3977
+ unsigned int thread;
29113978 int frags = 0;
29123979 u16 txq_id;
29133980 u32 tx_cmd;
29143981
3982
+ thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3983
+
29153984 txq_id = skb_get_queue_mapping(skb);
29163985 txq = port->txqs[txq_id];
2917
- txq_pcpu = this_cpu_ptr(txq->pcpu);
2918
- aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
3986
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3987
+ aggr_txq = &port->priv->aggr_txqs[thread];
3988
+
3989
+ if (test_bit(thread, &port->priv->lock_map))
3990
+ spin_lock_irqsave(&port->tx_lock[thread], flags);
29193991
29203992 if (skb_is_gso(skb)) {
29213993 frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
....@@ -2924,15 +3996,17 @@
29243996 frags = skb_shinfo(skb)->nr_frags + 1;
29253997
29263998 /* Check number of available descriptors */
2927
- if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
2928
- mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
2929
- txq_pcpu, frags)) {
3999
+ if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) ||
4000
+ mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) {
29304001 frags = 0;
29314002 goto out;
29324003 }
29334004
29344005 /* Get a descriptor for the first part of the packet */
29354006 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4007
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ||
4008
+ !mvpp2_tx_hw_tstamp(port, tx_desc, skb))
4009
+ mvpp2_txdesc_clear_ptp(port, tx_desc);
29364010 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
29374011 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
29384012
....@@ -2952,12 +4026,12 @@
29524026 /* First and Last descriptor */
29534027 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
29544028 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
2955
- mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
4029
+ mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
29564030 } else {
29574031 /* First but not Last */
29584032 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
29594033 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
2960
- mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
4034
+ mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
29614035
29624036 /* Continue with other skb fragments */
29634037 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
....@@ -2968,7 +4042,7 @@
29684042
29694043 out:
29704044 if (frags > 0) {
2971
- struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
4045
+ struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread);
29724046 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
29734047
29744048 txq_pcpu->reserved_num -= frags;
....@@ -2998,10 +4072,18 @@
29984072 /* Set the timer in case not all frags were processed */
29994073 if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
30004074 txq_pcpu->count > 0) {
3001
- struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
4075
+ struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
30024076
3003
- mvpp2_timer_set(port_pcpu);
4077
+ if (!port_pcpu->timer_scheduled) {
4078
+ port_pcpu->timer_scheduled = true;
4079
+ hrtimer_start(&port_pcpu->tx_done_timer,
4080
+ MVPP2_TXDONE_HRTIMER_PERIOD_NS,
4081
+ HRTIMER_MODE_REL_PINNED_SOFT);
4082
+ }
30044083 }
4084
+
4085
+ if (test_bit(thread, &port->priv->lock_map))
4086
+ spin_unlock_irqrestore(&port->tx_lock[thread], flags);
30054087
30064088 return NETDEV_TX_OK;
30074089 }
....@@ -3022,7 +4104,7 @@
30224104 int rx_done = 0;
30234105 struct mvpp2_port *port = netdev_priv(napi->dev);
30244106 struct mvpp2_queue_vector *qv;
3025
- int cpu = smp_processor_id();
4107
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
30264108
30274109 qv = container_of(napi, struct mvpp2_queue_vector, napi);
30284110
....@@ -3036,7 +4118,7 @@
30364118 *
30374119 * Each CPU has its own Rx/Tx cause register
30384120 */
3039
- cause_rx_tx = mvpp2_percpu_read_relaxed(port->priv, qv->sw_thread_id,
4121
+ cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id,
30404122 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
30414123
30424124 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
....@@ -3045,7 +4127,7 @@
30454127
30464128 /* Clear the cause register */
30474129 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
3048
- mvpp2_percpu_write(port->priv, cpu,
4130
+ mvpp2_thread_write(port->priv, thread,
30494131 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
30504132 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
30514133 }
....@@ -3097,19 +4179,25 @@
30974179 {
30984180 u32 ctrl3;
30994181
4182
+ /* Set the GMAC & XLG MAC in reset */
4183
+ mvpp2_mac_reset_assert(port);
4184
+
4185
+ /* Set the MPCS and XPCS in reset */
4186
+ mvpp22_pcs_reset_assert(port);
4187
+
31004188 /* comphy reconfiguration */
31014189 mvpp22_comphy_init(port);
31024190
31034191 /* gop reconfiguration */
31044192 mvpp22_gop_init(port);
31054193
3106
- /* Only GOP port 0 has an XLG MAC */
3107
- if (port->gop_id == 0) {
4194
+ mvpp22_pcs_reset_deassert(port);
4195
+
4196
+ if (mvpp2_port_supports_xlg(port)) {
31084197 ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG);
31094198 ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
31104199
3111
- if (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
3112
- port->phy_interface == PHY_INTERFACE_MODE_10GKR)
4200
+ if (mvpp2_is_xlg(port->phy_interface))
31134201 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
31144202 else
31154203 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
....@@ -3117,9 +4205,7 @@
31174205 writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG);
31184206 }
31194207
3120
- if (port->gop_id == 0 &&
3121
- (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
3122
- port->phy_interface == PHY_INTERFACE_MODE_10GKR))
4208
+ if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(port->phy_interface))
31234209 mvpp2_xlg_max_rx_size_set(port);
31244210 else
31254211 mvpp2_gmac_max_rx_size_set(port);
....@@ -3135,29 +4221,21 @@
31354221 for (i = 0; i < port->nqvecs; i++)
31364222 napi_enable(&port->qvecs[i].napi);
31374223
3138
- /* Enable interrupts on all CPUs */
4224
+ /* Enable interrupts on all threads */
31394225 mvpp2_interrupts_enable(port);
31404226
31414227 if (port->priv->hw_version == MVPP22)
31424228 mvpp22_mode_reconfigure(port);
31434229
31444230 if (port->phylink) {
3145
- netif_carrier_off(port->dev);
31464231 phylink_start(port->phylink);
31474232 } else {
3148
- /* Phylink isn't used as of now for ACPI, so the MAC has to be
3149
- * configured manually when the interface is started. This will
3150
- * be removed as soon as the phylink ACPI support lands in.
3151
- */
3152
- struct phylink_link_state state = {
3153
- .interface = port->phy_interface,
3154
- };
3155
- mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state);
3156
- mvpp2_mac_link_up(port->dev, MLO_AN_INBAND, port->phy_interface,
3157
- NULL);
4233
+ mvpp2_acpi_start(port);
31584234 }
31594235
31604236 netif_tx_start_all_queues(port->dev);
4237
+
4238
+ clear_bit(0, &port->state);
31614239 }
31624240
31634241 /* Set hw internals when stopping port */
....@@ -3165,7 +4243,9 @@
31654243 {
31664244 int i;
31674245
3168
- /* Disable interrupts on all CPUs */
4246
+ set_bit(0, &port->state);
4247
+
4248
+ /* Disable interrupts on all threads */
31694249 mvpp2_interrupts_disable(port);
31704250
31714251 for (i = 0; i < port->nqvecs; i++)
....@@ -3238,16 +4318,31 @@
32384318 for (i = 0; i < port->nqvecs; i++) {
32394319 struct mvpp2_queue_vector *qv = port->qvecs + i;
32404320
3241
- if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
4321
+ if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
4322
+ qv->mask = kzalloc(cpumask_size(), GFP_KERNEL);
4323
+ if (!qv->mask) {
4324
+ err = -ENOMEM;
4325
+ goto err;
4326
+ }
4327
+
32424328 irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
4329
+ }
32434330
32444331 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
32454332 if (err)
32464333 goto err;
32474334
3248
- if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
3249
- irq_set_affinity_hint(qv->irq,
3250
- cpumask_of(qv->sw_thread_id));
4335
+ if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
4336
+ unsigned int cpu;
4337
+
4338
+ for_each_present_cpu(cpu) {
4339
+ if (mvpp2_cpu_to_thread(port->priv, cpu) ==
4340
+ qv->sw_thread_id)
4341
+ cpumask_set_cpu(cpu, qv->mask);
4342
+ }
4343
+
4344
+ irq_set_affinity_hint(qv->irq, qv->mask);
4345
+ }
32514346 }
32524347
32534348 return 0;
....@@ -3256,6 +4351,8 @@
32564351 struct mvpp2_queue_vector *qv = port->qvecs + i;
32574352
32584353 irq_set_affinity_hint(qv->irq, NULL);
4354
+ kfree(qv->mask);
4355
+ qv->mask = NULL;
32594356 free_irq(qv->irq, qv);
32604357 }
32614358
....@@ -3270,6 +4367,8 @@
32704367 struct mvpp2_queue_vector *qv = port->qvecs + i;
32714368
32724369 irq_set_affinity_hint(qv->irq, NULL);
4370
+ kfree(qv->mask);
4371
+ qv->mask = NULL;
32734372 irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
32744373 free_irq(qv->irq, qv);
32754374 }
....@@ -3341,12 +4440,13 @@
33414440 valid = true;
33424441 }
33434442
3344
- if (priv->hw_version == MVPP22 && port->link_irq) {
3345
- err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
4443
+ if (priv->hw_version == MVPP22 && port->port_irq) {
4444
+ err = request_irq(port->port_irq, mvpp2_port_isr, 0,
33464445 dev->name, port);
33474446 if (err) {
3348
- netdev_err(port->dev, "cannot request link IRQ %d\n",
3349
- port->link_irq);
4447
+ netdev_err(port->dev,
4448
+ "cannot request port link/ptp IRQ %d\n",
4449
+ port->port_irq);
33504450 goto err_free_irq;
33514451 }
33524452
....@@ -3357,7 +4457,7 @@
33574457
33584458 valid = true;
33594459 } else {
3360
- port->link_irq = 0;
4460
+ port->port_irq = 0;
33614461 }
33624462
33634463 if (!valid) {
....@@ -3392,33 +4492,35 @@
33924492 {
33934493 struct mvpp2_port *port = netdev_priv(dev);
33944494 struct mvpp2_port_pcpu *port_pcpu;
3395
- int cpu;
4495
+ unsigned int thread;
33964496
33974497 mvpp2_stop_dev(port);
33984498
3399
- /* Mask interrupts on all CPUs */
4499
+ /* Mask interrupts on all threads */
34004500 on_each_cpu(mvpp2_interrupts_mask, port, 1);
34014501 mvpp2_shared_interrupt_mask_unmask(port, true);
34024502
34034503 if (port->phylink)
34044504 phylink_disconnect_phy(port->phylink);
3405
- if (port->link_irq)
3406
- free_irq(port->link_irq, port);
4505
+ if (port->port_irq)
4506
+ free_irq(port->port_irq, port);
34074507
34084508 mvpp2_irqs_deinit(port);
34094509 if (!port->has_tx_irqs) {
3410
- for_each_present_cpu(cpu) {
3411
- port_pcpu = per_cpu_ptr(port->pcpu, cpu);
4510
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
4511
+ port_pcpu = per_cpu_ptr(port->pcpu, thread);
34124512
34134513 hrtimer_cancel(&port_pcpu->tx_done_timer);
34144514 port_pcpu->timer_scheduled = false;
3415
- tasklet_kill(&port_pcpu->tx_done_tasklet);
34164515 }
34174516 }
34184517 mvpp2_cleanup_rxqs(port);
34194518 mvpp2_cleanup_txqs(port);
34204519
34214520 cancel_delayed_work_sync(&port->stats_work);
4521
+
4522
+ mvpp2_mac_reset_assert(port);
4523
+ mvpp22_pcs_reset_assert(port);
34224524
34234525 return 0;
34244526 }
....@@ -3500,16 +4602,85 @@
35004602 return err;
35014603 }
35024604
4605
+/* Shut down all the ports, reconfigure the pools as percpu or shared,
4606
+ * then bring up again all ports.
4607
+ */
4608
+static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
4609
+{
4610
+ int numbufs = MVPP2_BM_POOLS_NUM, i;
4611
+ struct mvpp2_port *port = NULL;
4612
+ bool status[MVPP2_MAX_PORTS];
4613
+
4614
+ for (i = 0; i < priv->port_count; i++) {
4615
+ port = priv->port_list[i];
4616
+ status[i] = netif_running(port->dev);
4617
+ if (status[i])
4618
+ mvpp2_stop(port->dev);
4619
+ }
4620
+
4621
+ /* nrxqs is the same for all ports */
4622
+ if (priv->percpu_pools)
4623
+ numbufs = port->nrxqs * 2;
4624
+
4625
+ for (i = 0; i < numbufs; i++)
4626
+ mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]);
4627
+
4628
+ devm_kfree(port->dev->dev.parent, priv->bm_pools);
4629
+ priv->percpu_pools = percpu;
4630
+ mvpp2_bm_init(port->dev->dev.parent, priv);
4631
+
4632
+ for (i = 0; i < priv->port_count; i++) {
4633
+ port = priv->port_list[i];
4634
+ mvpp2_swf_bm_pool_init(port);
4635
+ if (status[i])
4636
+ mvpp2_open(port->dev);
4637
+ }
4638
+
4639
+ return 0;
4640
+}
4641
+
35034642 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
35044643 {
35054644 struct mvpp2_port *port = netdev_priv(dev);
35064645 bool running = netif_running(dev);
4646
+ struct mvpp2 *priv = port->priv;
35074647 int err;
35084648
35094649 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
35104650 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
35114651 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
35124652 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
4653
+ }
4654
+
4655
+ if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) {
4656
+ netdev_err(dev, "Illegal MTU value %d (> %d) for XDP mode\n",
4657
+ mtu, (int)MVPP2_MAX_RX_BUF_SIZE);
4658
+ return -EINVAL;
4659
+ }
4660
+
4661
+ if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) {
4662
+ if (priv->percpu_pools) {
4663
+ netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu);
4664
+ mvpp2_bm_switch_buffers(priv, false);
4665
+ }
4666
+ } else {
4667
+ bool jumbo = false;
4668
+ int i;
4669
+
4670
+ for (i = 0; i < priv->port_count; i++)
4671
+ if (priv->port_list[i] != port &&
4672
+ MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) >
4673
+ MVPP2_BM_LONG_PKT_SIZE) {
4674
+ jumbo = true;
4675
+ break;
4676
+ }
4677
+
4678
+ /* No port is using jumbo frames */
4679
+ if (!jumbo) {
4680
+ dev_info(port->dev->dev.parent,
4681
+ "all ports have a low MTU, switching to per-cpu buffers");
4682
+ mvpp2_bm_switch_buffers(priv, true);
4683
+ }
35134684 }
35144685
35154686 if (running)
....@@ -3533,12 +4704,39 @@
35334704 return err;
35344705 }
35354706
4707
+static int mvpp2_check_pagepool_dma(struct mvpp2_port *port)
4708
+{
4709
+ enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
4710
+ struct mvpp2 *priv = port->priv;
4711
+ int err = -1, i;
4712
+
4713
+ if (!priv->percpu_pools)
4714
+ return err;
4715
+
4716
+ if (!priv->page_pool[0])
4717
+ return -ENOMEM;
4718
+
4719
+ for (i = 0; i < priv->port_count; i++) {
4720
+ port = priv->port_list[i];
4721
+ if (port->xdp_prog) {
4722
+ dma_dir = DMA_BIDIRECTIONAL;
4723
+ break;
4724
+ }
4725
+ }
4726
+
4727
+ /* All pools are equal in terms of DMA direction */
4728
+ if (priv->page_pool[0]->p.dma_dir != dma_dir)
4729
+ err = mvpp2_bm_switch_buffers(priv, true);
4730
+
4731
+ return err;
4732
+}
4733
+
35364734 static void
35374735 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
35384736 {
35394737 struct mvpp2_port *port = netdev_priv(dev);
35404738 unsigned int start;
3541
- int cpu;
4739
+ unsigned int cpu;
35424740
35434741 for_each_possible_cpu(cpu) {
35444742 struct mvpp2_pcpu_stats *cpu_stats;
....@@ -3567,9 +4765,123 @@
35674765 stats->tx_dropped = dev->stats.tx_dropped;
35684766 }
35694767
4768
+static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
4769
+{
4770
+ struct hwtstamp_config config;
4771
+ void __iomem *ptp;
4772
+ u32 gcr, int_mask;
4773
+
4774
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
4775
+ return -EFAULT;
4776
+
4777
+ if (config.flags)
4778
+ return -EINVAL;
4779
+
4780
+ if (config.tx_type != HWTSTAMP_TX_OFF &&
4781
+ config.tx_type != HWTSTAMP_TX_ON)
4782
+ return -ERANGE;
4783
+
4784
+ ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
4785
+
4786
+ int_mask = gcr = 0;
4787
+ if (config.tx_type != HWTSTAMP_TX_OFF) {
4788
+ gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET;
4789
+ int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 |
4790
+ MVPP22_PTP_INT_MASK_QUEUE0;
4791
+ }
4792
+
4793
+ /* It seems we must also release the TX reset when enabling the TSU */
4794
+ if (config.rx_filter != HWTSTAMP_FILTER_NONE)
4795
+ gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET |
4796
+ MVPP22_PTP_GCR_TX_RESET;
4797
+
4798
+ if (gcr & MVPP22_PTP_GCR_TSU_ENABLE)
4799
+ mvpp22_tai_start(port->priv->tai);
4800
+
4801
+ if (config.rx_filter != HWTSTAMP_FILTER_NONE) {
4802
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
4803
+ mvpp2_modify(ptp + MVPP22_PTP_GCR,
4804
+ MVPP22_PTP_GCR_RX_RESET |
4805
+ MVPP22_PTP_GCR_TX_RESET |
4806
+ MVPP22_PTP_GCR_TSU_ENABLE, gcr);
4807
+ port->rx_hwtstamp = true;
4808
+ } else {
4809
+ port->rx_hwtstamp = false;
4810
+ mvpp2_modify(ptp + MVPP22_PTP_GCR,
4811
+ MVPP22_PTP_GCR_RX_RESET |
4812
+ MVPP22_PTP_GCR_TX_RESET |
4813
+ MVPP22_PTP_GCR_TSU_ENABLE, gcr);
4814
+ }
4815
+
4816
+ mvpp2_modify(ptp + MVPP22_PTP_INT_MASK,
4817
+ MVPP22_PTP_INT_MASK_QUEUE1 |
4818
+ MVPP22_PTP_INT_MASK_QUEUE0, int_mask);
4819
+
4820
+ if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE))
4821
+ mvpp22_tai_stop(port->priv->tai);
4822
+
4823
+ port->tx_hwtstamp_type = config.tx_type;
4824
+
4825
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
4826
+ return -EFAULT;
4827
+
4828
+ return 0;
4829
+}
4830
+
4831
+static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
4832
+{
4833
+ struct hwtstamp_config config;
4834
+
4835
+ memset(&config, 0, sizeof(config));
4836
+
4837
+ config.tx_type = port->tx_hwtstamp_type;
4838
+ config.rx_filter = port->rx_hwtstamp ?
4839
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
4840
+
4841
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
4842
+ return -EFAULT;
4843
+
4844
+ return 0;
4845
+}
4846
+
4847
+static int mvpp2_ethtool_get_ts_info(struct net_device *dev,
4848
+ struct ethtool_ts_info *info)
4849
+{
4850
+ struct mvpp2_port *port = netdev_priv(dev);
4851
+
4852
+ if (!port->hwtstamp)
4853
+ return -EOPNOTSUPP;
4854
+
4855
+ info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai);
4856
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
4857
+ SOF_TIMESTAMPING_RX_SOFTWARE |
4858
+ SOF_TIMESTAMPING_SOFTWARE |
4859
+ SOF_TIMESTAMPING_TX_HARDWARE |
4860
+ SOF_TIMESTAMPING_RX_HARDWARE |
4861
+ SOF_TIMESTAMPING_RAW_HARDWARE;
4862
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
4863
+ BIT(HWTSTAMP_TX_ON);
4864
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
4865
+ BIT(HWTSTAMP_FILTER_ALL);
4866
+
4867
+ return 0;
4868
+}
4869
+
35704870 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35714871 {
35724872 struct mvpp2_port *port = netdev_priv(dev);
4873
+
4874
+ switch (cmd) {
4875
+ case SIOCSHWTSTAMP:
4876
+ if (port->hwtstamp)
4877
+ return mvpp2_set_ts_config(port, ifr);
4878
+ break;
4879
+
4880
+ case SIOCGHWTSTAMP:
4881
+ if (port->hwtstamp)
4882
+ return mvpp2_get_ts_config(port, ifr);
4883
+ break;
4884
+ }
35734885
35744886 if (!port->phylink)
35754887 return -ENOTSUPP;
....@@ -3618,12 +4930,67 @@
36184930
36194931 if (changed & NETIF_F_RXHASH) {
36204932 if (features & NETIF_F_RXHASH)
3621
- mvpp22_rss_enable(port);
4933
+ mvpp22_port_rss_enable(port);
36224934 else
3623
- mvpp22_rss_disable(port);
4935
+ mvpp22_port_rss_disable(port);
36244936 }
36254937
36264938 return 0;
4939
+}
4940
+
4941
+static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf)
4942
+{
4943
+ struct bpf_prog *prog = bpf->prog, *old_prog;
4944
+ bool running = netif_running(port->dev);
4945
+ bool reset = !prog != !port->xdp_prog;
4946
+
4947
+ if (port->dev->mtu > MVPP2_MAX_RX_BUF_SIZE) {
4948
+ NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP");
4949
+ return -EOPNOTSUPP;
4950
+ }
4951
+
4952
+ if (!port->priv->percpu_pools) {
4953
+ NL_SET_ERR_MSG_MOD(bpf->extack, "Per CPU Pools required for XDP");
4954
+ return -EOPNOTSUPP;
4955
+ }
4956
+
4957
+ if (port->ntxqs < num_possible_cpus() * 2) {
4958
+ NL_SET_ERR_MSG_MOD(bpf->extack, "XDP_TX needs two TX queues per CPU");
4959
+ return -EOPNOTSUPP;
4960
+ }
4961
+
4962
+ /* device is up and bpf is added/removed, must setup the RX queues */
4963
+ if (running && reset)
4964
+ mvpp2_stop(port->dev);
4965
+
4966
+ old_prog = xchg(&port->xdp_prog, prog);
4967
+ if (old_prog)
4968
+ bpf_prog_put(old_prog);
4969
+
4970
+ /* bpf is just replaced, RXQ and MTU are already setup */
4971
+ if (!reset)
4972
+ return 0;
4973
+
4974
+ /* device was up, restore the link */
4975
+ if (running)
4976
+ mvpp2_open(port->dev);
4977
+
4978
+ /* Check Page Pool DMA Direction */
4979
+ mvpp2_check_pagepool_dma(port);
4980
+
4981
+ return 0;
4982
+}
4983
+
4984
+static int mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4985
+{
4986
+ struct mvpp2_port *port = netdev_priv(dev);
4987
+
4988
+ switch (xdp->command) {
4989
+ case XDP_SETUP_PROG:
4990
+ return mvpp2_xdp_setup(port, xdp);
4991
+ default:
4992
+ return -EINVAL;
4993
+ }
36274994 }
36284995
36294996 /* Ethtool methods */
....@@ -3814,7 +5181,7 @@
38145181 struct ethtool_rxnfc *info, u32 *rules)
38155182 {
38165183 struct mvpp2_port *port = netdev_priv(dev);
3817
- int ret = 0;
5184
+ int ret = 0, i, loc = 0;
38185185
38195186 if (!mvpp22_rss_is_supported())
38205187 return -EOPNOTSUPP;
....@@ -3825,6 +5192,18 @@
38255192 break;
38265193 case ETHTOOL_GRXRINGS:
38275194 info->data = port->nrxqs;
5195
+ break;
5196
+ case ETHTOOL_GRXCLSRLCNT:
5197
+ info->rule_cnt = port->n_rfs_rules;
5198
+ break;
5199
+ case ETHTOOL_GRXCLSRULE:
5200
+ ret = mvpp2_ethtool_cls_rule_get(port, info);
5201
+ break;
5202
+ case ETHTOOL_GRXCLSRLALL:
5203
+ for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
5204
+ if (port->rfs_rules[i])
5205
+ rules[loc++] = i;
5206
+ }
38285207 break;
38295208 default:
38305209 return -ENOTSUPP;
....@@ -3846,6 +5225,12 @@
38465225 case ETHTOOL_SRXFH:
38475226 ret = mvpp2_ethtool_rxfh_set(port, info);
38485227 break;
5228
+ case ETHTOOL_SRXCLSRLINS:
5229
+ ret = mvpp2_ethtool_cls_rule_ins(port, info);
5230
+ break;
5231
+ case ETHTOOL_SRXCLSRLDEL:
5232
+ ret = mvpp2_ethtool_cls_rule_del(port, info);
5233
+ break;
38495234 default:
38505235 return -EOPNOTSUPP;
38515236 }
....@@ -3861,24 +5246,25 @@
38615246 u8 *hfunc)
38625247 {
38635248 struct mvpp2_port *port = netdev_priv(dev);
5249
+ int ret = 0;
38645250
38655251 if (!mvpp22_rss_is_supported())
38665252 return -EOPNOTSUPP;
38675253
38685254 if (indir)
3869
- memcpy(indir, port->indir,
3870
- ARRAY_SIZE(port->indir) * sizeof(port->indir[0]));
5255
+ ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir);
38715256
38725257 if (hfunc)
38735258 *hfunc = ETH_RSS_HASH_CRC32;
38745259
3875
- return 0;
5260
+ return ret;
38765261 }
38775262
38785263 static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
38795264 const u8 *key, const u8 hfunc)
38805265 {
38815266 struct mvpp2_port *port = netdev_priv(dev);
5267
+ int ret = 0;
38825268
38835269 if (!mvpp22_rss_is_supported())
38845270 return -EOPNOTSUPP;
....@@ -3889,15 +5275,60 @@
38895275 if (key)
38905276 return -EOPNOTSUPP;
38915277
3892
- if (indir) {
3893
- memcpy(port->indir, indir,
3894
- ARRAY_SIZE(port->indir) * sizeof(port->indir[0]));
3895
- mvpp22_rss_fill_table(port, port->id);
3896
- }
5278
+ if (indir)
5279
+ ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir);
38975280
3898
- return 0;
5281
+ return ret;
38995282 }
39005283
5284
+static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
5285
+ u8 *key, u8 *hfunc, u32 rss_context)
5286
+{
5287
+ struct mvpp2_port *port = netdev_priv(dev);
5288
+ int ret = 0;
5289
+
5290
+ if (!mvpp22_rss_is_supported())
5291
+ return -EOPNOTSUPP;
5292
+ if (rss_context >= MVPP22_N_RSS_TABLES)
5293
+ return -EINVAL;
5294
+
5295
+ if (hfunc)
5296
+ *hfunc = ETH_RSS_HASH_CRC32;
5297
+
5298
+ if (indir)
5299
+ ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir);
5300
+
5301
+ return ret;
5302
+}
5303
+
5304
+static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
5305
+ const u32 *indir, const u8 *key,
5306
+ const u8 hfunc, u32 *rss_context,
5307
+ bool delete)
5308
+{
5309
+ struct mvpp2_port *port = netdev_priv(dev);
5310
+ int ret;
5311
+
5312
+ if (!mvpp22_rss_is_supported())
5313
+ return -EOPNOTSUPP;
5314
+
5315
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
5316
+ return -EOPNOTSUPP;
5317
+
5318
+ if (key)
5319
+ return -EOPNOTSUPP;
5320
+
5321
+ if (delete)
5322
+ return mvpp22_port_rss_ctx_delete(port, *rss_context);
5323
+
5324
+ if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
5325
+ ret = mvpp22_port_rss_ctx_create(port, rss_context);
5326
+ if (ret)
5327
+ return ret;
5328
+ }
5329
+
5330
+ return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir);
5331
+}
39015332 /* Device ops */
39025333
39035334 static const struct net_device_ops mvpp2_netdev_ops = {
....@@ -3912,11 +5343,16 @@
39125343 .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid,
39135344 .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid,
39145345 .ndo_set_features = mvpp2_set_features,
5346
+ .ndo_bpf = mvpp2_xdp,
5347
+ .ndo_xdp_xmit = mvpp2_xdp_xmit,
39155348 };
39165349
39175350 static const struct ethtool_ops mvpp2_eth_tool_ops = {
5351
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
5352
+ ETHTOOL_COALESCE_MAX_FRAMES,
39185353 .nway_reset = mvpp2_ethtool_nway_reset,
39195354 .get_link = ethtool_op_get_link,
5355
+ .get_ts_info = mvpp2_ethtool_get_ts_info,
39205356 .set_coalesce = mvpp2_ethtool_set_coalesce,
39215357 .get_coalesce = mvpp2_ethtool_get_coalesce,
39225358 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
....@@ -3934,7 +5370,8 @@
39345370 .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
39355371 .get_rxfh = mvpp2_ethtool_get_rxfh,
39365372 .set_rxfh = mvpp2_ethtool_set_rxfh,
3937
-
5373
+ .get_rxfh_context = mvpp2_ethtool_get_rxfh_context,
5374
+ .set_rxfh_context = mvpp2_ethtool_set_rxfh_context,
39385375 };
39395376
39405377 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
....@@ -3965,12 +5402,18 @@
39655402 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
39665403 struct device_node *port_node)
39675404 {
5405
+ struct mvpp2 *priv = port->priv;
39685406 struct mvpp2_queue_vector *v;
39695407 int i, ret;
39705408
3971
- port->nqvecs = num_possible_cpus();
3972
- if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
3973
- port->nqvecs += 1;
5409
+ switch (queue_mode) {
5410
+ case MVPP2_QDIST_SINGLE_MODE:
5411
+ port->nqvecs = priv->nthreads + 1;
5412
+ break;
5413
+ case MVPP2_QDIST_MULTI_MODE:
5414
+ port->nqvecs = priv->nthreads;
5415
+ break;
5416
+ }
39745417
39755418 for (i = 0; i < port->nqvecs; i++) {
39765419 char irqname[16];
....@@ -3982,17 +5425,22 @@
39825425 v->sw_thread_id = i;
39835426 v->sw_thread_mask = BIT(i);
39845427
3985
- snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
5428
+ if (port->flags & MVPP2_F_DT_COMPAT)
5429
+ snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
5430
+ else
5431
+ snprintf(irqname, sizeof(irqname), "hif%d", i);
39865432
39875433 if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
3988
- v->first_rxq = i * MVPP2_DEFAULT_RXQ;
3989
- v->nrxqs = MVPP2_DEFAULT_RXQ;
5434
+ v->first_rxq = i;
5435
+ v->nrxqs = 1;
39905436 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
39915437 i == (port->nqvecs - 1)) {
39925438 v->first_rxq = 0;
39935439 v->nrxqs = port->nrxqs;
39945440 v->type = MVPP2_QUEUE_VECTOR_SHARED;
3995
- strncpy(irqname, "rx-shared", sizeof(irqname));
5441
+
5442
+ if (port->flags & MVPP2_F_DT_COMPAT)
5443
+ strncpy(irqname, "rx-shared", sizeof(irqname));
39965444 }
39975445
39985446 if (port_node)
....@@ -4069,20 +5517,32 @@
40695517 struct device *dev = port->dev->dev.parent;
40705518 struct mvpp2 *priv = port->priv;
40715519 struct mvpp2_txq_pcpu *txq_pcpu;
4072
- int queue, cpu, err;
5520
+ unsigned int thread;
5521
+ int queue, err, val;
40735522
40745523 /* Checks for hardware constraints */
40755524 if (port->first_rxq + port->nrxqs >
40765525 MVPP2_MAX_PORTS * priv->max_port_rxqs)
40775526 return -EINVAL;
40785527
4079
- if (port->nrxqs % MVPP2_DEFAULT_RXQ ||
4080
- port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
5528
+ if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
40815529 return -EINVAL;
40825530
40835531 /* Disable port */
40845532 mvpp2_egress_disable(port);
40855533 mvpp2_port_disable(port);
5534
+
5535
+ if (mvpp2_is_xlg(port->phy_interface)) {
5536
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5537
+ val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
5538
+ val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
5539
+ writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5540
+ } else {
5541
+ val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5542
+ val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
5543
+ val |= MVPP2_GMAC_FORCE_LINK_DOWN;
5544
+ writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5545
+ }
40865546
40875547 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
40885548
....@@ -4113,9 +5573,9 @@
41135573 txq->id = queue_phy_id;
41145574 txq->log_id = queue;
41155575 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
4116
- for_each_present_cpu(cpu) {
4117
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4118
- txq_pcpu->cpu = cpu;
5576
+ for (thread = 0; thread < priv->nthreads; thread++) {
5577
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
5578
+ txq_pcpu->thread = thread;
41195579 }
41205580
41215581 port->txqs[queue] = txq;
....@@ -4167,7 +5627,7 @@
41675627 mvpp2_cls_port_config(port);
41685628
41695629 if (mvpp22_rss_is_supported())
4170
- mvpp22_rss_port_init(port);
5630
+ mvpp22_port_rss_init(port);
41715631
41725632 /* Provide an initial Rx packet size */
41735633 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
....@@ -4176,6 +5636,11 @@
41765636 err = mvpp2_swf_bm_pool_init(port);
41775637 if (err)
41785638 goto err_free_percpu;
5639
+
5640
+ /* Clear all port stats */
5641
+ mvpp2_read_stats(port);
5642
+ memset(port->ethtool_stats, 0,
5643
+ MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64));
41795644
41805645 return 0;
41815646
....@@ -4188,24 +5653,51 @@
41885653 return err;
41895654 }
41905655
4191
-/* Checks if the port DT description has the TX interrupts
4192
- * described. On PPv2.1, there are no such interrupts. On PPv2.2,
4193
- * there are available, but we need to keep support for old DTs.
4194
- */
4195
-static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv,
4196
- struct device_node *port_node)
5656
+static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node,
5657
+ unsigned long *flags)
41975658 {
4198
- char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1",
4199
- "tx-cpu2", "tx-cpu3" };
4200
- int ret, i;
5659
+ char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2",
5660
+ "tx-cpu3" };
5661
+ int i;
5662
+
5663
+ for (i = 0; i < 5; i++)
5664
+ if (of_property_match_string(port_node, "interrupt-names",
5665
+ irqs[i]) < 0)
5666
+ return false;
5667
+
5668
+ *flags |= MVPP2_F_DT_COMPAT;
5669
+ return true;
5670
+}
5671
+
5672
+/* Checks if the port dt description has the required Tx interrupts:
5673
+ * - PPv2.1: there are no such interrupts.
5674
+ * - PPv2.2:
5675
+ * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3]
5676
+ * - The new ones have: "hifX" with X in [0..8]
5677
+ *
5678
+ * All those variants are supported to keep the backward compatibility.
5679
+ */
5680
+static bool mvpp2_port_has_irqs(struct mvpp2 *priv,
5681
+ struct device_node *port_node,
5682
+ unsigned long *flags)
5683
+{
5684
+ char name[5];
5685
+ int i;
5686
+
5687
+ /* ACPI */
5688
+ if (!port_node)
5689
+ return true;
42015690
42025691 if (priv->hw_version == MVPP21)
42035692 return false;
42045693
4205
- for (i = 0; i < 5; i++) {
4206
- ret = of_property_match_string(port_node, "interrupt-names",
4207
- irqs[i]);
4208
- if (ret < 0)
5694
+ if (mvpp22_port_has_legacy_tx_irqs(port_node, flags))
5695
+ return true;
5696
+
5697
+ for (i = 0; i < MVPP2_MAX_THREADS; i++) {
5698
+ snprintf(name, 5, "hif%d", i);
5699
+ if (of_property_match_string(port_node, "interrupt-names",
5700
+ name) < 0)
42095701 return false;
42105702 }
42115703
....@@ -4239,80 +5731,20 @@
42395731 eth_hw_addr_random(dev);
42405732 }
42415733
4242
-static void mvpp2_phylink_validate(struct net_device *dev,
4243
- unsigned long *supported,
4244
- struct phylink_link_state *state)
5734
+static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
42455735 {
4246
- struct mvpp2_port *port = netdev_priv(dev);
4247
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
4248
-
4249
- /* Invalid combinations */
4250
- switch (state->interface) {
4251
- case PHY_INTERFACE_MODE_10GKR:
4252
- case PHY_INTERFACE_MODE_XAUI:
4253
- if (port->gop_id != 0)
4254
- goto empty_set;
4255
- break;
4256
- case PHY_INTERFACE_MODE_RGMII:
4257
- case PHY_INTERFACE_MODE_RGMII_ID:
4258
- case PHY_INTERFACE_MODE_RGMII_RXID:
4259
- case PHY_INTERFACE_MODE_RGMII_TXID:
4260
- if (port->priv->hw_version == MVPP22 && port->gop_id == 0)
4261
- goto empty_set;
4262
- break;
4263
- default:
4264
- break;
4265
- }
4266
-
4267
- phylink_set(mask, Autoneg);
4268
- phylink_set_port_modes(mask);
4269
-
4270
- switch (state->interface) {
4271
- case PHY_INTERFACE_MODE_10GKR:
4272
- case PHY_INTERFACE_MODE_XAUI:
4273
- case PHY_INTERFACE_MODE_NA:
4274
- if (port->gop_id == 0) {
4275
- phylink_set(mask, 10000baseT_Full);
4276
- phylink_set(mask, 10000baseCR_Full);
4277
- phylink_set(mask, 10000baseSR_Full);
4278
- phylink_set(mask, 10000baseLR_Full);
4279
- phylink_set(mask, 10000baseLRM_Full);
4280
- phylink_set(mask, 10000baseER_Full);
4281
- phylink_set(mask, 10000baseKR_Full);
4282
- }
4283
- /* Fall-through */
4284
- case PHY_INTERFACE_MODE_RGMII:
4285
- case PHY_INTERFACE_MODE_RGMII_ID:
4286
- case PHY_INTERFACE_MODE_RGMII_RXID:
4287
- case PHY_INTERFACE_MODE_RGMII_TXID:
4288
- case PHY_INTERFACE_MODE_SGMII:
4289
- phylink_set(mask, 10baseT_Half);
4290
- phylink_set(mask, 10baseT_Full);
4291
- phylink_set(mask, 100baseT_Half);
4292
- phylink_set(mask, 100baseT_Full);
4293
- /* Fall-through */
4294
- case PHY_INTERFACE_MODE_1000BASEX:
4295
- case PHY_INTERFACE_MODE_2500BASEX:
4296
- phylink_set(mask, 1000baseT_Full);
4297
- phylink_set(mask, 1000baseX_Full);
4298
- phylink_set(mask, 2500baseX_Full);
4299
- break;
4300
- default:
4301
- goto empty_set;
4302
- }
4303
-
4304
- bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
4305
- bitmap_and(state->advertising, state->advertising, mask,
4306
- __ETHTOOL_LINK_MODE_MASK_NBITS);
4307
- return;
4308
-
4309
-empty_set:
4310
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
5736
+ return container_of(config, struct mvpp2_port, phylink_config);
43115737 }
43125738
4313
-static void mvpp22_xlg_link_state(struct mvpp2_port *port,
4314
- struct phylink_link_state *state)
5739
+static struct mvpp2_port *mvpp2_pcs_to_port(struct phylink_pcs *pcs)
43155740 {
5741
+ return container_of(pcs, struct mvpp2_port, phylink_pcs);
5742
+}
5743
+
5744
+static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
5745
+ struct phylink_link_state *state)
5746
+{
5747
+ struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
43165748 u32 val;
43175749
43185750 state->speed = SPEED_10000;
....@@ -4330,9 +5762,24 @@
43305762 state->pause |= MLO_PAUSE_RX;
43315763 }
43325764
4333
-static void mvpp2_gmac_link_state(struct mvpp2_port *port,
4334
- struct phylink_link_state *state)
5765
+static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs,
5766
+ unsigned int mode,
5767
+ phy_interface_t interface,
5768
+ const unsigned long *advertising,
5769
+ bool permit_pause_to_mac)
43355770 {
5771
+ return 0;
5772
+}
5773
+
5774
+static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
5775
+ .pcs_get_state = mvpp2_xlg_pcs_get_state,
5776
+ .pcs_config = mvpp2_xlg_pcs_config,
5777
+};
5778
+
5779
+static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
5780
+ struct phylink_link_state *state)
5781
+{
5782
+ struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
43365783 u32 val;
43375784
43385785 val = readl(port->base + MVPP2_GMAC_STATUS0);
....@@ -4364,251 +5811,502 @@
43645811 state->pause |= MLO_PAUSE_TX;
43655812 }
43665813
4367
-static int mvpp2_phylink_mac_link_state(struct net_device *dev,
4368
- struct phylink_link_state *state)
5814
+static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
5815
+ phy_interface_t interface,
5816
+ const unsigned long *advertising,
5817
+ bool permit_pause_to_mac)
43695818 {
4370
- struct mvpp2_port *port = netdev_priv(dev);
5819
+ struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
5820
+ u32 mask, val, an, old_an, changed;
43715821
4372
- if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
4373
- u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG);
4374
- mode &= MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
5822
+ mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
5823
+ MVPP2_GMAC_IN_BAND_AUTONEG |
5824
+ MVPP2_GMAC_AN_SPEED_EN |
5825
+ MVPP2_GMAC_FLOW_CTRL_AUTONEG |
5826
+ MVPP2_GMAC_AN_DUPLEX_EN;
43755827
4376
- if (mode == MVPP22_XLG_CTRL3_MACMODESELECT_10G) {
4377
- mvpp22_xlg_link_state(port, state);
4378
- return 1;
5828
+ if (phylink_autoneg_inband(mode)) {
5829
+ mask |= MVPP2_GMAC_CONFIG_MII_SPEED |
5830
+ MVPP2_GMAC_CONFIG_GMII_SPEED |
5831
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX;
5832
+ val = MVPP2_GMAC_IN_BAND_AUTONEG;
5833
+
5834
+ if (interface == PHY_INTERFACE_MODE_SGMII) {
5835
+ /* SGMII mode receives the speed and duplex from PHY */
5836
+ val |= MVPP2_GMAC_AN_SPEED_EN |
5837
+ MVPP2_GMAC_AN_DUPLEX_EN;
5838
+ } else {
5839
+ /* 802.3z mode has fixed speed and duplex */
5840
+ val |= MVPP2_GMAC_CONFIG_GMII_SPEED |
5841
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX;
5842
+
5843
+ /* The FLOW_CTRL_AUTONEG bit selects either the hardware
5844
+ * automatically or the bits in MVPP22_GMAC_CTRL_4_REG
5845
+ * manually controls the GMAC pause modes.
5846
+ */
5847
+ if (permit_pause_to_mac)
5848
+ val |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
5849
+
5850
+ /* Configure advertisement bits */
5851
+ mask |= MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN;
5852
+ if (phylink_test(advertising, Pause))
5853
+ val |= MVPP2_GMAC_FC_ADV_EN;
5854
+ if (phylink_test(advertising, Asym_Pause))
5855
+ val |= MVPP2_GMAC_FC_ADV_ASM_EN;
43795856 }
5857
+ } else {
5858
+ val = 0;
43805859 }
43815860
4382
- mvpp2_gmac_link_state(port, state);
4383
- return 1;
5861
+ old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5862
+ an = (an & ~mask) | val;
5863
+ changed = an ^ old_an;
5864
+ if (changed)
5865
+ writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5866
+
5867
+ /* We are only interested in the advertisement bits changing */
5868
+ return changed & (MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN);
43845869 }
43855870
4386
-static void mvpp2_mac_an_restart(struct net_device *dev)
5871
+static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
43875872 {
4388
- struct mvpp2_port *port = netdev_priv(dev);
4389
- u32 val;
5873
+ struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
5874
+ u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
43905875
4391
- if (port->phy_interface != PHY_INTERFACE_MODE_SGMII)
4392
- return;
5876
+ writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
5877
+ port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5878
+ writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN,
5879
+ port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5880
+}
43935881
4394
- val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4395
- /* The RESTART_AN bit is cleared by the h/w after restarting the AN
4396
- * process.
4397
- */
4398
- val |= MVPP2_GMAC_IN_BAND_RESTART_AN | MVPP2_GMAC_IN_BAND_AUTONEG;
4399
- writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5882
+static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = {
5883
+ .pcs_get_state = mvpp2_gmac_pcs_get_state,
5884
+ .pcs_config = mvpp2_gmac_pcs_config,
5885
+ .pcs_an_restart = mvpp2_gmac_pcs_an_restart,
5886
+};
5887
+
5888
+static void mvpp2_phylink_validate(struct phylink_config *config,
5889
+ unsigned long *supported,
5890
+ struct phylink_link_state *state)
5891
+{
5892
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
5893
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
5894
+
5895
+ /* Invalid combinations */
5896
+ switch (state->interface) {
5897
+ case PHY_INTERFACE_MODE_10GBASER:
5898
+ case PHY_INTERFACE_MODE_XAUI:
5899
+ if (!mvpp2_port_supports_xlg(port))
5900
+ goto empty_set;
5901
+ break;
5902
+ case PHY_INTERFACE_MODE_RGMII:
5903
+ case PHY_INTERFACE_MODE_RGMII_ID:
5904
+ case PHY_INTERFACE_MODE_RGMII_RXID:
5905
+ case PHY_INTERFACE_MODE_RGMII_TXID:
5906
+ if (!mvpp2_port_supports_rgmii(port))
5907
+ goto empty_set;
5908
+ break;
5909
+ default:
5910
+ break;
5911
+ }
5912
+
5913
+ phylink_set(mask, Autoneg);
5914
+ phylink_set_port_modes(mask);
5915
+
5916
+ switch (state->interface) {
5917
+ case PHY_INTERFACE_MODE_10GBASER:
5918
+ case PHY_INTERFACE_MODE_XAUI:
5919
+ case PHY_INTERFACE_MODE_NA:
5920
+ if (mvpp2_port_supports_xlg(port)) {
5921
+ phylink_set(mask, 10000baseT_Full);
5922
+ phylink_set(mask, 10000baseCR_Full);
5923
+ phylink_set(mask, 10000baseSR_Full);
5924
+ phylink_set(mask, 10000baseLR_Full);
5925
+ phylink_set(mask, 10000baseLRM_Full);
5926
+ phylink_set(mask, 10000baseER_Full);
5927
+ phylink_set(mask, 10000baseKR_Full);
5928
+ }
5929
+ if (state->interface != PHY_INTERFACE_MODE_NA)
5930
+ break;
5931
+ fallthrough;
5932
+ case PHY_INTERFACE_MODE_RGMII:
5933
+ case PHY_INTERFACE_MODE_RGMII_ID:
5934
+ case PHY_INTERFACE_MODE_RGMII_RXID:
5935
+ case PHY_INTERFACE_MODE_RGMII_TXID:
5936
+ case PHY_INTERFACE_MODE_SGMII:
5937
+ phylink_set(mask, 10baseT_Half);
5938
+ phylink_set(mask, 10baseT_Full);
5939
+ phylink_set(mask, 100baseT_Half);
5940
+ phylink_set(mask, 100baseT_Full);
5941
+ phylink_set(mask, 1000baseT_Full);
5942
+ phylink_set(mask, 1000baseX_Full);
5943
+ if (state->interface != PHY_INTERFACE_MODE_NA)
5944
+ break;
5945
+ fallthrough;
5946
+ case PHY_INTERFACE_MODE_1000BASEX:
5947
+ case PHY_INTERFACE_MODE_2500BASEX:
5948
+ if (port->comphy ||
5949
+ state->interface != PHY_INTERFACE_MODE_2500BASEX) {
5950
+ phylink_set(mask, 1000baseT_Full);
5951
+ phylink_set(mask, 1000baseX_Full);
5952
+ }
5953
+ if (port->comphy ||
5954
+ state->interface == PHY_INTERFACE_MODE_2500BASEX) {
5955
+ phylink_set(mask, 2500baseT_Full);
5956
+ phylink_set(mask, 2500baseX_Full);
5957
+ }
5958
+ break;
5959
+ default:
5960
+ goto empty_set;
5961
+ }
5962
+
5963
+ bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
5964
+ bitmap_and(state->advertising, state->advertising, mask,
5965
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
5966
+
5967
+ phylink_helper_basex_speed(state);
5968
+ return;
5969
+
5970
+empty_set:
5971
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
44005972 }
44015973
44025974 static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
44035975 const struct phylink_link_state *state)
44045976 {
4405
- u32 ctrl0, ctrl4;
5977
+ u32 val;
44065978
4407
- ctrl0 = readl(port->base + MVPP22_XLG_CTRL0_REG);
4408
- ctrl4 = readl(port->base + MVPP22_XLG_CTRL4_REG);
5979
+ mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
5980
+ MVPP22_XLG_CTRL0_MAC_RESET_DIS,
5981
+ MVPP22_XLG_CTRL0_MAC_RESET_DIS);
5982
+ mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG,
5983
+ MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
5984
+ MVPP22_XLG_CTRL4_EN_IDLE_CHECK |
5985
+ MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC,
5986
+ MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC);
44095987
4410
- if (state->pause & MLO_PAUSE_TX)
4411
- ctrl0 |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
4412
- if (state->pause & MLO_PAUSE_RX)
4413
- ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
4414
-
4415
- ctrl4 &= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
4416
- MVPP22_XLG_CTRL4_EN_IDLE_CHECK);
4417
- ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
4418
-
4419
- writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG);
4420
- writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG);
5988
+ /* Wait for reset to deassert */
5989
+ do {
5990
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5991
+ } while (!(val & MVPP22_XLG_CTRL0_MAC_RESET_DIS));
44215992 }
44225993
44235994 static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
44245995 const struct phylink_link_state *state)
44255996 {
4426
- u32 an, ctrl0, ctrl2, ctrl4;
4427
- u32 old_ctrl2;
5997
+ u32 old_ctrl0, ctrl0;
5998
+ u32 old_ctrl2, ctrl2;
5999
+ u32 old_ctrl4, ctrl4;
44286000
4429
- an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4430
- ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4431
- ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4432
- ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
6001
+ old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
6002
+ old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
6003
+ old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
44336004
4434
- old_ctrl2 = ctrl2;
4435
-
4436
- /* Force link down */
4437
- an &= ~MVPP2_GMAC_FORCE_LINK_PASS;
4438
- an |= MVPP2_GMAC_FORCE_LINK_DOWN;
4439
- writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4440
-
4441
- /* Set the GMAC in a reset state */
4442
- ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK;
4443
- writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
4444
-
4445
- an &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED |
4446
- MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN |
4447
- MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
4448
- MVPP2_GMAC_CONFIG_FULL_DUPLEX | MVPP2_GMAC_AN_DUPLEX_EN |
4449
- MVPP2_GMAC_FORCE_LINK_DOWN);
44506005 ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
4451
- ctrl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK);
6006
+ ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK);
44526007
4453
- if (state->interface == PHY_INTERFACE_MODE_1000BASEX ||
4454
- state->interface == PHY_INTERFACE_MODE_2500BASEX) {
4455
- /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
4456
- * they negotiate duplex: they are always operating with a fixed
4457
- * speed of 1000/2500Mbps in full duplex, so force 1000/2500
4458
- * speed and full duplex here.
4459
- */
4460
- ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
4461
- an |= MVPP2_GMAC_CONFIG_GMII_SPEED |
4462
- MVPP2_GMAC_CONFIG_FULL_DUPLEX;
4463
- } else if (!phy_interface_mode_is_rgmii(state->interface)) {
4464
- an |= MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG;
4465
- }
4466
-
4467
- if (state->duplex)
4468
- an |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
4469
- if (phylink_test(state->advertising, Pause))
4470
- an |= MVPP2_GMAC_FC_ADV_EN;
4471
- if (phylink_test(state->advertising, Asym_Pause))
4472
- an |= MVPP2_GMAC_FC_ADV_ASM_EN;
4473
-
4474
- if (state->interface == PHY_INTERFACE_MODE_SGMII ||
4475
- state->interface == PHY_INTERFACE_MODE_1000BASEX ||
4476
- state->interface == PHY_INTERFACE_MODE_2500BASEX) {
4477
- an |= MVPP2_GMAC_IN_BAND_AUTONEG;
4478
- ctrl2 |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
4479
-
4480
- ctrl4 &= ~(MVPP22_CTRL4_EXT_PIN_GMII_SEL |
4481
- MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN);
6008
+ /* Configure port type */
6009
+ if (phy_interface_mode_is_8023z(state->interface)) {
6010
+ ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
6011
+ ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
44826012 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
44836013 MVPP22_CTRL4_DP_CLK_SEL |
44846014 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4485
-
4486
- if (state->pause & MLO_PAUSE_TX)
4487
- ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
4488
- if (state->pause & MLO_PAUSE_RX)
4489
- ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
6015
+ } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
6016
+ ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK;
6017
+ ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
6018
+ ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
6019
+ MVPP22_CTRL4_DP_CLK_SEL |
6020
+ MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
44906021 } else if (phy_interface_mode_is_rgmii(state->interface)) {
4491
- an |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS;
4492
-
4493
- if (state->speed == SPEED_1000)
4494
- an |= MVPP2_GMAC_CONFIG_GMII_SPEED;
4495
- else if (state->speed == SPEED_100)
4496
- an |= MVPP2_GMAC_CONFIG_MII_SPEED;
4497
-
44986022 ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL;
44996023 ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
45006024 MVPP22_CTRL4_SYNC_BYPASS_DIS |
45016025 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
45026026 }
45036027
4504
- writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG);
4505
- writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
4506
- writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
4507
- writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4508
-
4509
- if (old_ctrl2 & MVPP2_GMAC_PORT_RESET_MASK) {
4510
- while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
4511
- MVPP2_GMAC_PORT_RESET_MASK)
4512
- continue;
6028
+ /* Configure negotiation style */
6029
+ if (!phylink_autoneg_inband(mode)) {
6030
+ /* Phy or fixed speed - no in-band AN, nothing to do, leave the
6031
+ * configured speed, duplex and flow control as-is.
6032
+ */
6033
+ } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
6034
+ /* SGMII in-band mode receives the speed and duplex from
6035
+ * the PHY. Flow control information is not received. */
6036
+ } else if (phy_interface_mode_is_8023z(state->interface)) {
6037
+ /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
6038
+ * they negotiate duplex: they are always operating with a fixed
6039
+ * speed of 1000/2500Mbps in full duplex, so force 1000/2500
6040
+ * speed and full duplex here.
6041
+ */
6042
+ ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
45136043 }
6044
+
6045
+ if (old_ctrl0 != ctrl0)
6046
+ writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG);
6047
+ if (old_ctrl2 != ctrl2)
6048
+ writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
6049
+ if (old_ctrl4 != ctrl4)
6050
+ writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
45146051 }
45156052
4516
-static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
4517
- const struct phylink_link_state *state)
6053
+static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode,
6054
+ phy_interface_t interface)
45186055 {
4519
- struct mvpp2_port *port = netdev_priv(dev);
6056
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
45206057
45216058 /* Check for invalid configuration */
4522
- if (state->interface == PHY_INTERFACE_MODE_10GKR && port->gop_id != 0) {
4523
- netdev_err(dev, "Invalid mode on %s\n", dev->name);
4524
- return;
6059
+ if (mvpp2_is_xlg(interface) && port->gop_id != 0) {
6060
+ netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name);
6061
+ return -EINVAL;
6062
+ }
6063
+
6064
+ if (port->phy_interface != interface ||
6065
+ phylink_autoneg_inband(mode)) {
6066
+ /* Force the link down when changing the interface or if in
6067
+ * in-band mode to ensure we do not change the configuration
6068
+ * while the hardware is indicating link is up. We force both
6069
+ * XLG and GMAC down to ensure that they're both in a known
6070
+ * state.
6071
+ */
6072
+ mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6073
+ MVPP2_GMAC_FORCE_LINK_PASS |
6074
+ MVPP2_GMAC_FORCE_LINK_DOWN,
6075
+ MVPP2_GMAC_FORCE_LINK_DOWN);
6076
+
6077
+ if (mvpp2_port_supports_xlg(port))
6078
+ mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6079
+ MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6080
+ MVPP22_XLG_CTRL0_FORCE_LINK_DOWN,
6081
+ MVPP22_XLG_CTRL0_FORCE_LINK_DOWN);
45256082 }
45266083
45276084 /* Make sure the port is disabled when reconfiguring the mode */
45286085 mvpp2_port_disable(port);
45296086
4530
- if (port->priv->hw_version == MVPP22 &&
4531
- port->phy_interface != state->interface) {
4532
- port->phy_interface = state->interface;
6087
+ if (port->phy_interface != interface) {
6088
+ /* Place GMAC into reset */
6089
+ mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
6090
+ MVPP2_GMAC_PORT_RESET_MASK,
6091
+ MVPP2_GMAC_PORT_RESET_MASK);
45336092
4534
- /* Reconfigure the serdes lanes */
4535
- phy_power_off(port->comphy);
4536
- mvpp22_mode_reconfigure(port);
6093
+ if (port->priv->hw_version == MVPP22) {
6094
+ mvpp22_gop_mask_irq(port);
6095
+
6096
+ phy_power_off(port->comphy);
6097
+ }
45376098 }
45386099
6100
+ /* Select the appropriate PCS operations depending on the
6101
+ * configured interface mode. We will only switch to a mode
6102
+ * that the validate() checks have already passed.
6103
+ */
6104
+ if (mvpp2_is_xlg(interface))
6105
+ port->phylink_pcs.ops = &mvpp2_phylink_xlg_pcs_ops;
6106
+ else
6107
+ port->phylink_pcs.ops = &mvpp2_phylink_gmac_pcs_ops;
6108
+
6109
+ return 0;
6110
+}
6111
+
6112
+static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode,
6113
+ phy_interface_t interface)
6114
+{
6115
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6116
+ int ret;
6117
+
6118
+ ret = mvpp2__mac_prepare(config, mode, interface);
6119
+ if (ret == 0)
6120
+ phylink_set_pcs(port->phylink, &port->phylink_pcs);
6121
+
6122
+ return ret;
6123
+}
6124
+
6125
+static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
6126
+ const struct phylink_link_state *state)
6127
+{
6128
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6129
+
45396130 /* mac (re)configuration */
4540
- if (state->interface == PHY_INTERFACE_MODE_10GKR)
6131
+ if (mvpp2_is_xlg(state->interface))
45416132 mvpp2_xlg_config(port, mode, state);
45426133 else if (phy_interface_mode_is_rgmii(state->interface) ||
4543
- state->interface == PHY_INTERFACE_MODE_SGMII ||
4544
- state->interface == PHY_INTERFACE_MODE_1000BASEX ||
4545
- state->interface == PHY_INTERFACE_MODE_2500BASEX)
6134
+ phy_interface_mode_is_8023z(state->interface) ||
6135
+ state->interface == PHY_INTERFACE_MODE_SGMII)
45466136 mvpp2_gmac_config(port, mode, state);
45476137
45486138 if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
45496139 mvpp2_port_loopback_set(port, state);
4550
-
4551
- mvpp2_port_enable(port);
45526140 }
45536141
4554
-static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
4555
- phy_interface_t interface, struct phy_device *phy)
6142
+static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode,
6143
+ phy_interface_t interface)
45566144 {
4557
- struct mvpp2_port *port = netdev_priv(dev);
6145
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6146
+
6147
+ if (port->priv->hw_version == MVPP22 &&
6148
+ port->phy_interface != interface) {
6149
+ port->phy_interface = interface;
6150
+
6151
+ /* Reconfigure the serdes lanes */
6152
+ mvpp22_mode_reconfigure(port);
6153
+
6154
+ /* Unmask interrupts */
6155
+ mvpp22_gop_unmask_irq(port);
6156
+ }
6157
+
6158
+ if (!mvpp2_is_xlg(interface)) {
6159
+ /* Release GMAC reset and wait */
6160
+ mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
6161
+ MVPP2_GMAC_PORT_RESET_MASK, 0);
6162
+
6163
+ while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
6164
+ MVPP2_GMAC_PORT_RESET_MASK)
6165
+ continue;
6166
+ }
6167
+
6168
+ mvpp2_port_enable(port);
6169
+
6170
+ /* Allow the link to come up if in in-band mode, otherwise the
6171
+ * link is forced via mac_link_down()/mac_link_up()
6172
+ */
6173
+ if (phylink_autoneg_inband(mode)) {
6174
+ if (mvpp2_is_xlg(interface))
6175
+ mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6176
+ MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6177
+ MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 0);
6178
+ else
6179
+ mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6180
+ MVPP2_GMAC_FORCE_LINK_PASS |
6181
+ MVPP2_GMAC_FORCE_LINK_DOWN, 0);
6182
+ }
6183
+
6184
+ return 0;
6185
+}
6186
+
6187
+static void mvpp2_mac_link_up(struct phylink_config *config,
6188
+ struct phy_device *phy,
6189
+ unsigned int mode, phy_interface_t interface,
6190
+ int speed, int duplex,
6191
+ bool tx_pause, bool rx_pause)
6192
+{
6193
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
45586194 u32 val;
45596195
4560
- if (!phylink_autoneg_inband(mode) &&
4561
- interface != PHY_INTERFACE_MODE_10GKR) {
4562
- val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4563
- val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
4564
- if (phy_interface_mode_is_rgmii(interface))
4565
- val |= MVPP2_GMAC_FORCE_LINK_PASS;
4566
- writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6196
+ if (mvpp2_is_xlg(interface)) {
6197
+ if (!phylink_autoneg_inband(mode)) {
6198
+ val = MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
6199
+ if (tx_pause)
6200
+ val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
6201
+ if (rx_pause)
6202
+ val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
6203
+
6204
+ mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6205
+ MVPP22_XLG_CTRL0_FORCE_LINK_DOWN |
6206
+ MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6207
+ MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN |
6208
+ MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, val);
6209
+ }
6210
+ } else {
6211
+ if (!phylink_autoneg_inband(mode)) {
6212
+ val = MVPP2_GMAC_FORCE_LINK_PASS;
6213
+
6214
+ if (speed == SPEED_1000 || speed == SPEED_2500)
6215
+ val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
6216
+ else if (speed == SPEED_100)
6217
+ val |= MVPP2_GMAC_CONFIG_MII_SPEED;
6218
+
6219
+ if (duplex == DUPLEX_FULL)
6220
+ val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
6221
+
6222
+ mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6223
+ MVPP2_GMAC_FORCE_LINK_DOWN |
6224
+ MVPP2_GMAC_FORCE_LINK_PASS |
6225
+ MVPP2_GMAC_CONFIG_MII_SPEED |
6226
+ MVPP2_GMAC_CONFIG_GMII_SPEED |
6227
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX, val);
6228
+ }
6229
+
6230
+ /* We can always update the flow control enable bits;
6231
+ * these will only be effective if flow control AN
6232
+ * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled.
6233
+ */
6234
+ val = 0;
6235
+ if (tx_pause)
6236
+ val |= MVPP22_CTRL4_TX_FC_EN;
6237
+ if (rx_pause)
6238
+ val |= MVPP22_CTRL4_RX_FC_EN;
6239
+
6240
+ mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG,
6241
+ MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN,
6242
+ val);
45676243 }
45686244
45696245 mvpp2_port_enable(port);
45706246
45716247 mvpp2_egress_enable(port);
45726248 mvpp2_ingress_enable(port);
4573
- netif_tx_wake_all_queues(dev);
6249
+ netif_tx_wake_all_queues(port->dev);
45746250 }
45756251
4576
-static void mvpp2_mac_link_down(struct net_device *dev, unsigned int mode,
4577
- phy_interface_t interface)
6252
+static void mvpp2_mac_link_down(struct phylink_config *config,
6253
+ unsigned int mode, phy_interface_t interface)
45786254 {
4579
- struct mvpp2_port *port = netdev_priv(dev);
6255
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
45806256 u32 val;
45816257
4582
- if (!phylink_autoneg_inband(mode) &&
4583
- interface != PHY_INTERFACE_MODE_10GKR) {
4584
- val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4585
- val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
4586
- val |= MVPP2_GMAC_FORCE_LINK_DOWN;
4587
- writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6258
+ if (!phylink_autoneg_inband(mode)) {
6259
+ if (mvpp2_is_xlg(interface)) {
6260
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
6261
+ val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
6262
+ val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
6263
+ writel(val, port->base + MVPP22_XLG_CTRL0_REG);
6264
+ } else {
6265
+ val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6266
+ val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
6267
+ val |= MVPP2_GMAC_FORCE_LINK_DOWN;
6268
+ writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6269
+ }
45886270 }
45896271
4590
- netif_tx_stop_all_queues(dev);
6272
+ netif_tx_stop_all_queues(port->dev);
45916273 mvpp2_egress_disable(port);
45926274 mvpp2_ingress_disable(port);
4593
-
4594
- /* When using link interrupts to notify phylink of a MAC state change,
4595
- * we do not want the port to be disabled (we want to receive further
4596
- * interrupts, to be notified when the port will have a link later).
4597
- */
4598
- if (!port->has_phy)
4599
- return;
46006275
46016276 mvpp2_port_disable(port);
46026277 }
46036278
46046279 static const struct phylink_mac_ops mvpp2_phylink_ops = {
46056280 .validate = mvpp2_phylink_validate,
4606
- .mac_link_state = mvpp2_phylink_mac_link_state,
4607
- .mac_an_restart = mvpp2_mac_an_restart,
6281
+ .mac_prepare = mvpp2_mac_prepare,
46086282 .mac_config = mvpp2_mac_config,
6283
+ .mac_finish = mvpp2_mac_finish,
46096284 .mac_link_up = mvpp2_mac_link_up,
46106285 .mac_link_down = mvpp2_mac_link_down,
46116286 };
6287
+
6288
+/* Work-around for ACPI */
6289
+static void mvpp2_acpi_start(struct mvpp2_port *port)
6290
+{
6291
+ /* Phylink isn't used as of now for ACPI, so the MAC has to be
6292
+ * configured manually when the interface is started. This will
6293
+ * be removed as soon as the phylink ACPI support lands in.
6294
+ */
6295
+ struct phylink_link_state state = {
6296
+ .interface = port->phy_interface,
6297
+ };
6298
+ mvpp2__mac_prepare(&port->phylink_config, MLO_AN_INBAND,
6299
+ port->phy_interface);
6300
+ mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
6301
+ port->phylink_pcs.ops->pcs_config(&port->phylink_pcs, MLO_AN_INBAND,
6302
+ port->phy_interface,
6303
+ state.advertising, false);
6304
+ mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND,
6305
+ port->phy_interface);
6306
+ mvpp2_mac_link_up(&port->phylink_config, NULL,
6307
+ MLO_AN_INBAND, port->phy_interface,
6308
+ SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
6309
+}
46126310
46136311 /* Ports initialization */
46146312 static int mvpp2_port_probe(struct platform_device *pdev,
....@@ -4619,32 +6317,26 @@
46196317 struct mvpp2_port *port;
46206318 struct mvpp2_port_pcpu *port_pcpu;
46216319 struct device_node *port_node = to_of_node(port_fwnode);
6320
+ netdev_features_t features;
46226321 struct net_device *dev;
4623
- struct resource *res;
46246322 struct phylink *phylink;
46256323 char *mac_from = "";
4626
- unsigned int ntxqs, nrxqs;
6324
+ unsigned int ntxqs, nrxqs, thread;
6325
+ unsigned long flags = 0;
46276326 bool has_tx_irqs;
46286327 u32 id;
4629
- int features;
46306328 int phy_mode;
4631
- int err, i, cpu;
6329
+ int err, i;
46326330
4633
- if (port_node) {
4634
- has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
4635
- } else {
4636
- has_tx_irqs = true;
4637
- queue_mode = MVPP2_QDIST_MULTI_MODE;
6331
+ has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags);
6332
+ if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) {
6333
+ dev_err(&pdev->dev,
6334
+ "not enough IRQs to support multi queue mode\n");
6335
+ return -EINVAL;
46386336 }
46396337
4640
- if (!has_tx_irqs)
4641
- queue_mode = MVPP2_QDIST_SINGLE_MODE;
4642
-
46436338 ntxqs = MVPP2_MAX_TXQ;
4644
- if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE)
4645
- nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus();
4646
- else
4647
- nrxqs = MVPP2_DEFAULT_RXQ;
6339
+ nrxqs = mvpp2_get_nrxqs(priv);
46486340
46496341 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
46506342 if (!dev)
....@@ -4656,6 +6348,15 @@
46566348 err = phy_mode;
46576349 goto err_free_netdev;
46586350 }
6351
+
6352
+ /*
6353
+ * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT.
6354
+ * Existing usage of 10GBASE-KR is not correct; no backplane
6355
+ * negotiation is done, and this driver does not actually support
6356
+ * 10GBASE-KR.
6357
+ */
6358
+ if (phy_mode == PHY_INTERFACE_MODE_10GKR)
6359
+ phy_mode = PHY_INTERFACE_MODE_10GBASER;
46596360
46606361 if (port_node) {
46616362 comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
....@@ -4687,22 +6388,23 @@
46876388 port->nrxqs = nrxqs;
46886389 port->priv = priv;
46896390 port->has_tx_irqs = has_tx_irqs;
6391
+ port->flags = flags;
46906392
46916393 err = mvpp2_queue_vectors_init(port, port_node);
46926394 if (err)
46936395 goto err_free_netdev;
46946396
46956397 if (port_node)
4696
- port->link_irq = of_irq_get_byname(port_node, "link");
6398
+ port->port_irq = of_irq_get_byname(port_node, "link");
46976399 else
4698
- port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
4699
- if (port->link_irq == -EPROBE_DEFER) {
6400
+ port->port_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
6401
+ if (port->port_irq == -EPROBE_DEFER) {
47006402 err = -EPROBE_DEFER;
47016403 goto err_deinit_qvecs;
47026404 }
4703
- if (port->link_irq <= 0)
6405
+ if (port->port_irq <= 0)
47046406 /* the link irq is optional */
4705
- port->link_irq = 0;
6407
+ port->port_irq = 0;
47066408
47076409 if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
47086410 port->flags |= MVPP2_F_LOOPBACK;
....@@ -4718,8 +6420,7 @@
47186420 port->comphy = comphy;
47196421
47206422 if (priv->hw_version == MVPP21) {
4721
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
4722
- port->base = devm_ioremap_resource(&pdev->dev, res);
6423
+ port->base = devm_platform_ioremap_resource(pdev, 2 + id);
47236424 if (IS_ERR(port->base)) {
47246425 err = PTR_ERR(port->base);
47256426 goto err_free_irq;
....@@ -4740,6 +6441,12 @@
47406441 port->stats_base = port->priv->iface_base +
47416442 MVPP22_MIB_COUNTERS_OFFSET +
47426443 port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
6444
+
6445
+ /* We may want a property to describe whether we should use
6446
+ * MAC hardware timestamping.
6447
+ */
6448
+ if (priv->tai)
6449
+ port->hwtstamp = true;
47436450 }
47446451
47456452 /* Alloc per-cpu and ethtool stats */
....@@ -4750,7 +6457,7 @@
47506457 }
47516458
47526459 port->ethtool_stats = devm_kcalloc(&pdev->dev,
4753
- ARRAY_SIZE(mvpp2_ethtool_regs),
6460
+ MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs),
47546461 sizeof(u64), GFP_KERNEL);
47556462 if (!port->ethtool_stats) {
47566463 err = -ENOMEM;
....@@ -4774,7 +6481,8 @@
47746481
47756482 mvpp2_port_periodic_xon_disable(port);
47766483
4777
- mvpp2_port_reset(port);
6484
+ mvpp2_mac_reset_assert(port);
6485
+ mvpp22_pcs_reset_assert(port);
47786486
47796487 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
47806488 if (!port->pcpu) {
....@@ -4783,17 +6491,14 @@
47836491 }
47846492
47856493 if (!port->has_tx_irqs) {
4786
- for_each_present_cpu(cpu) {
4787
- port_pcpu = per_cpu_ptr(port->pcpu, cpu);
6494
+ for (thread = 0; thread < priv->nthreads; thread++) {
6495
+ port_pcpu = per_cpu_ptr(port->pcpu, thread);
47886496
47896497 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
4790
- HRTIMER_MODE_REL_PINNED);
6498
+ HRTIMER_MODE_REL_PINNED_SOFT);
47916499 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
47926500 port_pcpu->timer_scheduled = false;
4793
-
4794
- tasklet_init(&port_pcpu->tx_done_tasklet,
4795
- mvpp2_tx_proc_cb,
4796
- (unsigned long)dev);
6501
+ port_pcpu->dev = dev;
47976502 }
47986503 }
47996504
....@@ -4803,13 +6508,13 @@
48036508 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
48046509 NETIF_F_HW_VLAN_CTAG_FILTER;
48056510
4806
- if (mvpp22_rss_is_supported())
6511
+ if (mvpp22_rss_is_supported()) {
48076512 dev->hw_features |= NETIF_F_RXHASH;
4808
-
4809
- if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
4810
- dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
4811
- dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
6513
+ dev->features |= NETIF_F_NTUPLE;
48126514 }
6515
+
6516
+ if (!port->priv->percpu_pools)
6517
+ mvpp2_set_hw_csum(port, port->pool_long->id);
48136518
48146519 dev->vlan_features |= features;
48156520 dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
....@@ -4823,8 +6528,11 @@
48236528
48246529 /* Phylink isn't used w/ ACPI as of now */
48256530 if (port_node) {
4826
- phylink = phylink_create(dev, port_fwnode, phy_mode,
4827
- &mvpp2_phylink_ops);
6531
+ port->phylink_config.dev = &dev->dev;
6532
+ port->phylink_config.type = PHYLINK_NETDEV;
6533
+
6534
+ phylink = phylink_create(&port->phylink_config, port_fwnode,
6535
+ phy_mode, &mvpp2_phylink_ops);
48286536 if (IS_ERR(phylink)) {
48296537 err = PTR_ERR(phylink);
48306538 goto err_free_port_pcpu;
....@@ -4832,6 +6540,16 @@
48326540 port->phylink = phylink;
48336541 } else {
48346542 port->phylink = NULL;
6543
+ }
6544
+
6545
+ /* Cycle the comphy to power it down, saving 270mW per port -
6546
+ * don't worry about an error powering it up. When the comphy
6547
+ * driver does this, we can remove this code.
6548
+ */
6549
+ if (port->comphy) {
6550
+ err = mvpp22_comphy_init(port);
6551
+ if (err == 0)
6552
+ phy_power_off(port->comphy);
48356553 }
48366554
48376555 err = register_netdev(dev);
....@@ -4856,8 +6574,8 @@
48566574 err_free_stats:
48576575 free_percpu(port->stats);
48586576 err_free_irq:
4859
- if (port->link_irq)
4860
- irq_dispose_mapping(port->link_irq);
6577
+ if (port->port_irq)
6578
+ irq_dispose_mapping(port->port_irq);
48616579 err_deinit_qvecs:
48626580 mvpp2_queue_vectors_deinit(port);
48636581 err_free_netdev:
....@@ -4878,8 +6596,8 @@
48786596 for (i = 0; i < port->ntxqs; i++)
48796597 free_percpu(port->txqs[i]->pcpu);
48806598 mvpp2_queue_vectors_deinit(port);
4881
- if (port->link_irq)
4882
- irq_dispose_mapping(port->link_irq);
6599
+ if (port->port_irq)
6600
+ irq_dispose_mapping(port->port_irq);
48836601 free_netdev(port->dev);
48846602 }
48856603
....@@ -5068,13 +6786,13 @@
50686786 }
50696787
50706788 /* Allocate and initialize aggregated TXQs */
5071
- priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
6789
+ priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS,
50726790 sizeof(*priv->aggr_txqs),
50736791 GFP_KERNEL);
50746792 if (!priv->aggr_txqs)
50756793 return -ENOMEM;
50766794
5077
- for_each_present_cpu(i) {
6795
+ for (i = 0; i < MVPP2_MAX_THREADS; i++) {
50786796 priv->aggr_txqs[i].id = i;
50796797 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
50806798 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
....@@ -5098,7 +6816,7 @@
50986816 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
50996817
51006818 /* Buffer Manager initialization */
5101
- err = mvpp2_bm_init(pdev, priv);
6819
+ err = mvpp2_bm_init(&pdev->dev, priv);
51026820 if (err < 0)
51036821 return err;
51046822
....@@ -5121,7 +6839,7 @@
51216839 struct mvpp2 *priv;
51226840 struct resource *res;
51236841 void __iomem *base;
5124
- int i;
6842
+ int i, shared;
51256843 int err;
51266844
51276845 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
....@@ -5145,14 +6863,12 @@
51456863 if (priv->hw_version == MVPP21)
51466864 queue_mode = MVPP2_QDIST_SINGLE_MODE;
51476865
5148
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5149
- base = devm_ioremap_resource(&pdev->dev, res);
6866
+ base = devm_platform_ioremap_resource(pdev, 0);
51506867 if (IS_ERR(base))
51516868 return PTR_ERR(base);
51526869
51536870 if (priv->hw_version == MVPP21) {
5154
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
5155
- priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
6871
+ priv->lms_base = devm_platform_ioremap_resource(pdev, 1);
51566872 if (IS_ERR(priv->lms_base))
51576873 return PTR_ERR(priv->lms_base);
51586874 } else {
....@@ -5190,7 +6906,20 @@
51906906 priv->sysctrl_base = NULL;
51916907 }
51926908
6909
+ if (priv->hw_version == MVPP22 &&
6910
+ mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS)
6911
+ priv->percpu_pools = 1;
6912
+
51936913 mvpp2_setup_bm_pool();
6914
+
6915
+
6916
+ priv->nthreads = min_t(unsigned int, num_present_cpus(),
6917
+ MVPP2_MAX_THREADS);
6918
+
6919
+ shared = num_present_cpus() - priv->nthreads;
6920
+ if (shared > 0)
6921
+ bitmap_set(&priv->lock_map, 0,
6922
+ min_t(int, shared, MVPP2_MAX_THREADS));
51946923
51956924 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
51966925 u32 addr_space_sz;
....@@ -5284,6 +7013,10 @@
52847013 goto err_axi_clk;
52857014 }
52867015
7016
+ err = mvpp22_tai_probe(&pdev->dev, priv);
7017
+ if (err < 0)
7018
+ goto err_axi_clk;
7019
+
52877020 /* Initialize ports */
52887021 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
52897022 err = mvpp2_port_probe(pdev, port_fwnode, priv);
....@@ -5346,8 +7079,8 @@
53467079 {
53477080 struct mvpp2 *priv = platform_get_drvdata(pdev);
53487081 struct fwnode_handle *fwnode = pdev->dev.fwnode;
7082
+ int i = 0, poolnum = MVPP2_BM_POOLS_NUM;
53497083 struct fwnode_handle *port_fwnode;
5350
- int i = 0;
53517084
53527085 mvpp2_dbgfs_cleanup(priv);
53537086
....@@ -5361,13 +7094,16 @@
53617094
53627095 destroy_workqueue(priv->stats_queue);
53637096
5364
- for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
7097
+ if (priv->percpu_pools)
7098
+ poolnum = mvpp2_get_nrxqs(priv) * 2;
7099
+
7100
+ for (i = 0; i < poolnum; i++) {
53657101 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
53667102
5367
- mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
7103
+ mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool);
53687104 }
53697105
5370
- for_each_present_cpu(i) {
7106
+ for (i = 0; i < MVPP2_MAX_THREADS; i++) {
53717107 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
53727108
53737109 dma_free_coherent(&pdev->dev,
....@@ -5401,11 +7137,13 @@
54017137 };
54027138 MODULE_DEVICE_TABLE(of, mvpp2_match);
54037139
7140
+#ifdef CONFIG_ACPI
54047141 static const struct acpi_device_id mvpp2_acpi_match[] = {
54057142 { "MRVL0110", MVPP22 },
54067143 { },
54077144 };
54087145 MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
7146
+#endif
54097147
54107148 static struct platform_driver mvpp2_driver = {
54117149 .probe = mvpp2_probe,
....@@ -5417,7 +7155,18 @@
54177155 },
54187156 };
54197157
5420
-module_platform_driver(mvpp2_driver);
7158
+static int __init mvpp2_driver_init(void)
7159
+{
7160
+ return platform_driver_register(&mvpp2_driver);
7161
+}
7162
+module_init(mvpp2_driver_init);
7163
+
7164
+static void __exit mvpp2_driver_exit(void)
7165
+{
7166
+ platform_driver_unregister(&mvpp2_driver);
7167
+ mvpp2_dbgfs_exit();
7168
+}
7169
+module_exit(mvpp2_driver_exit);
54217170
54227171 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
54237172 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");