.. | .. |
---|
28 | 28 | #include <linux/phy.h> |
---|
29 | 29 | #include <linux/phylink.h> |
---|
30 | 30 | #include <linux/phy/phy.h> |
---|
| 31 | +#include <linux/ptp_classify.h> |
---|
31 | 32 | #include <linux/clk.h> |
---|
32 | 33 | #include <linux/hrtimer.h> |
---|
33 | 34 | #include <linux/ktime.h> |
---|
.. | .. |
---|
36 | 37 | #include <net/ip.h> |
---|
37 | 38 | #include <net/ipv6.h> |
---|
38 | 39 | #include <net/tso.h> |
---|
| 40 | +#include <linux/bpf_trace.h> |
---|
39 | 41 | |
---|
40 | 42 | #include "mvpp2.h" |
---|
41 | 43 | #include "mvpp2_prs.h" |
---|
.. | .. |
---|
56 | 58 | /* The prototype is added here to be used in start_dev when using ACPI. This |
---|
57 | 59 | * will be removed once phylink is used for all modes (dt+ACPI). |
---|
58 | 60 | */ |
---|
59 | | -static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, |
---|
60 | | - const struct phylink_link_state *state); |
---|
61 | | -static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode, |
---|
62 | | - phy_interface_t interface, struct phy_device *phy); |
---|
| 61 | +static void mvpp2_acpi_start(struct mvpp2_port *port); |
---|
63 | 62 | |
---|
64 | 63 | /* Queue modes */ |
---|
65 | 64 | #define MVPP2_QDIST_SINGLE_MODE 0 |
---|
.. | .. |
---|
82 | 81 | return readl(priv->swth_base[0] + offset); |
---|
83 | 82 | } |
---|
84 | 83 | |
---|
85 | | -u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) |
---|
| 84 | +static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) |
---|
86 | 85 | { |
---|
87 | 86 | return readl_relaxed(priv->swth_base[0] + offset); |
---|
88 | 87 | } |
---|
| 88 | + |
---|
| 89 | +static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu) |
---|
| 90 | +{ |
---|
| 91 | + return cpu % priv->nthreads; |
---|
| 92 | +} |
---|
| 93 | + |
---|
| 94 | +static struct page_pool * |
---|
| 95 | +mvpp2_create_page_pool(struct device *dev, int num, int len, |
---|
| 96 | + enum dma_data_direction dma_dir) |
---|
| 97 | +{ |
---|
| 98 | + struct page_pool_params pp_params = { |
---|
| 99 | + /* internal DMA mapping in page_pool */ |
---|
| 100 | + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, |
---|
| 101 | + .pool_size = num, |
---|
| 102 | + .nid = NUMA_NO_NODE, |
---|
| 103 | + .dev = dev, |
---|
| 104 | + .dma_dir = dma_dir, |
---|
| 105 | + .offset = MVPP2_SKB_HEADROOM, |
---|
| 106 | + .max_len = len, |
---|
| 107 | + }; |
---|
| 108 | + |
---|
| 109 | + return page_pool_create(&pp_params); |
---|
| 110 | +} |
---|
| 111 | + |
---|
89 | 112 | /* These accessors should be used to access: |
---|
90 | 113 | * |
---|
91 | | - * - per-CPU registers, where each CPU has its own copy of the |
---|
| 114 | + * - per-thread registers, where each thread has its own copy of the |
---|
92 | 115 | * register. |
---|
93 | 116 | * |
---|
94 | 117 | * MVPP2_BM_VIRT_ALLOC_REG |
---|
.. | .. |
---|
104 | 127 | * MVPP2_TXQ_SENT_REG |
---|
105 | 128 | * MVPP2_RXQ_NUM_REG |
---|
106 | 129 | * |
---|
107 | | - * - global registers that must be accessed through a specific CPU |
---|
108 | | - * window, because they are related to an access to a per-CPU |
---|
| 130 | + * - global registers that must be accessed through a specific thread |
---|
| 131 | + * window, because they are related to an access to a per-thread |
---|
109 | 132 | * register |
---|
110 | 133 | * |
---|
111 | 134 | * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG) |
---|
.. | .. |
---|
122 | 145 | * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) |
---|
123 | 146 | * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) |
---|
124 | 147 | */ |
---|
125 | | -void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, |
---|
| 148 | +static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread, |
---|
126 | 149 | u32 offset, u32 data) |
---|
127 | 150 | { |
---|
128 | | - writel(data, priv->swth_base[cpu] + offset); |
---|
| 151 | + writel(data, priv->swth_base[thread] + offset); |
---|
129 | 152 | } |
---|
130 | 153 | |
---|
131 | | -u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, |
---|
| 154 | +static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread, |
---|
132 | 155 | u32 offset) |
---|
133 | 156 | { |
---|
134 | | - return readl(priv->swth_base[cpu] + offset); |
---|
| 157 | + return readl(priv->swth_base[thread] + offset); |
---|
135 | 158 | } |
---|
136 | 159 | |
---|
137 | | -void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, |
---|
| 160 | +static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread, |
---|
138 | 161 | u32 offset, u32 data) |
---|
139 | 162 | { |
---|
140 | | - writel_relaxed(data, priv->swth_base[cpu] + offset); |
---|
| 163 | + writel_relaxed(data, priv->swth_base[thread] + offset); |
---|
141 | 164 | } |
---|
142 | 165 | |
---|
143 | | -static u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu, |
---|
| 166 | +static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread, |
---|
144 | 167 | u32 offset) |
---|
145 | 168 | { |
---|
146 | | - return readl_relaxed(priv->swth_base[cpu] + offset); |
---|
| 169 | + return readl_relaxed(priv->swth_base[thread] + offset); |
---|
147 | 170 | } |
---|
148 | 171 | |
---|
149 | 172 | static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, |
---|
.. | .. |
---|
272 | 295 | |
---|
273 | 296 | static void mvpp2_txq_inc_put(struct mvpp2_port *port, |
---|
274 | 297 | struct mvpp2_txq_pcpu *txq_pcpu, |
---|
275 | | - struct sk_buff *skb, |
---|
276 | | - struct mvpp2_tx_desc *tx_desc) |
---|
| 298 | + void *data, |
---|
| 299 | + struct mvpp2_tx_desc *tx_desc, |
---|
| 300 | + enum mvpp2_tx_buf_type buf_type) |
---|
277 | 301 | { |
---|
278 | 302 | struct mvpp2_txq_pcpu_buf *tx_buf = |
---|
279 | 303 | txq_pcpu->buffs + txq_pcpu->txq_put_index; |
---|
280 | | - tx_buf->skb = skb; |
---|
| 304 | + tx_buf->type = buf_type; |
---|
| 305 | + if (buf_type == MVPP2_TYPE_SKB) |
---|
| 306 | + tx_buf->skb = data; |
---|
| 307 | + else |
---|
| 308 | + tx_buf->xdpf = data; |
---|
281 | 309 | tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc); |
---|
282 | 310 | tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) + |
---|
283 | 311 | mvpp2_txdesc_offset_get(port, tx_desc); |
---|
284 | 312 | txq_pcpu->txq_put_index++; |
---|
285 | 313 | if (txq_pcpu->txq_put_index == txq_pcpu->size) |
---|
286 | 314 | txq_pcpu->txq_put_index = 0; |
---|
| 315 | +} |
---|
| 316 | + |
---|
| 317 | +/* Get number of maximum RXQ */ |
---|
| 318 | +static int mvpp2_get_nrxqs(struct mvpp2 *priv) |
---|
| 319 | +{ |
---|
| 320 | + unsigned int nrxqs; |
---|
| 321 | + |
---|
| 322 | + if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE) |
---|
| 323 | + return 1; |
---|
| 324 | + |
---|
| 325 | + /* According to the PPv2.2 datasheet and our experiments on |
---|
| 326 | + * PPv2.1, RX queues have an allocation granularity of 4 (when |
---|
| 327 | + * more than a single one on PPv2.2). |
---|
| 328 | + * Round up to nearest multiple of 4. |
---|
| 329 | + */ |
---|
| 330 | + nrxqs = (num_possible_cpus() + 3) & ~0x3; |
---|
| 331 | + if (nrxqs > MVPP2_PORT_MAX_RXQ) |
---|
| 332 | + nrxqs = MVPP2_PORT_MAX_RXQ; |
---|
| 333 | + |
---|
| 334 | + return nrxqs; |
---|
287 | 335 | } |
---|
288 | 336 | |
---|
289 | 337 | /* Get number of physical egress port */ |
---|
.. | .. |
---|
298 | 346 | return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; |
---|
299 | 347 | } |
---|
300 | 348 | |
---|
301 | | -static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool) |
---|
| 349 | +/* Returns a struct page if page_pool is set, otherwise a buffer */ |
---|
| 350 | +static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool, |
---|
| 351 | + struct page_pool *page_pool) |
---|
302 | 352 | { |
---|
| 353 | + if (page_pool) |
---|
| 354 | + return page_pool_dev_alloc_pages(page_pool); |
---|
| 355 | + |
---|
303 | 356 | if (likely(pool->frag_size <= PAGE_SIZE)) |
---|
304 | 357 | return netdev_alloc_frag(pool->frag_size); |
---|
305 | | - else |
---|
306 | | - return kmalloc(pool->frag_size, GFP_ATOMIC); |
---|
| 358 | + |
---|
| 359 | + return kmalloc(pool->frag_size, GFP_ATOMIC); |
---|
307 | 360 | } |
---|
308 | 361 | |
---|
309 | | -static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data) |
---|
| 362 | +static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, |
---|
| 363 | + struct page_pool *page_pool, void *data) |
---|
310 | 364 | { |
---|
311 | | - if (likely(pool->frag_size <= PAGE_SIZE)) |
---|
| 365 | + if (page_pool) |
---|
| 366 | + page_pool_put_full_page(page_pool, virt_to_head_page(data), false); |
---|
| 367 | + else if (likely(pool->frag_size <= PAGE_SIZE)) |
---|
312 | 368 | skb_free_frag(data); |
---|
313 | 369 | else |
---|
314 | 370 | kfree(data); |
---|
.. | .. |
---|
317 | 373 | /* Buffer Manager configuration routines */ |
---|
318 | 374 | |
---|
319 | 375 | /* Create pool */ |
---|
320 | | -static int mvpp2_bm_pool_create(struct platform_device *pdev, |
---|
321 | | - struct mvpp2 *priv, |
---|
| 376 | +static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv, |
---|
322 | 377 | struct mvpp2_bm_pool *bm_pool, int size) |
---|
323 | 378 | { |
---|
324 | 379 | u32 val; |
---|
.. | .. |
---|
337 | 392 | else |
---|
338 | 393 | bm_pool->size_bytes = 2 * sizeof(u64) * size; |
---|
339 | 394 | |
---|
340 | | - bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes, |
---|
| 395 | + bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes, |
---|
341 | 396 | &bm_pool->dma_addr, |
---|
342 | 397 | GFP_KERNEL); |
---|
343 | 398 | if (!bm_pool->virt_addr) |
---|
.. | .. |
---|
345 | 400 | |
---|
346 | 401 | if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, |
---|
347 | 402 | MVPP2_BM_POOL_PTR_ALIGN)) { |
---|
348 | | - dma_free_coherent(&pdev->dev, bm_pool->size_bytes, |
---|
| 403 | + dma_free_coherent(dev, bm_pool->size_bytes, |
---|
349 | 404 | bm_pool->virt_addr, bm_pool->dma_addr); |
---|
350 | | - dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", |
---|
| 405 | + dev_err(dev, "BM pool %d is not %d bytes aligned\n", |
---|
351 | 406 | bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); |
---|
352 | 407 | return -ENOMEM; |
---|
353 | 408 | } |
---|
.. | .. |
---|
385 | 440 | dma_addr_t *dma_addr, |
---|
386 | 441 | phys_addr_t *phys_addr) |
---|
387 | 442 | { |
---|
388 | | - int cpu = get_cpu(); |
---|
| 443 | + unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu()); |
---|
389 | 444 | |
---|
390 | | - *dma_addr = mvpp2_percpu_read(priv, cpu, |
---|
| 445 | + *dma_addr = mvpp2_thread_read(priv, thread, |
---|
391 | 446 | MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); |
---|
392 | | - *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG); |
---|
| 447 | + *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG); |
---|
393 | 448 | |
---|
394 | 449 | if (priv->hw_version == MVPP22) { |
---|
395 | 450 | u32 val; |
---|
396 | 451 | u32 dma_addr_highbits, phys_addr_highbits; |
---|
397 | 452 | |
---|
398 | | - val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC); |
---|
| 453 | + val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC); |
---|
399 | 454 | dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK); |
---|
400 | 455 | phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >> |
---|
401 | 456 | MVPP22_BM_ADDR_HIGH_VIRT_SHIFT; |
---|
.. | .. |
---|
414 | 469 | static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, |
---|
415 | 470 | struct mvpp2_bm_pool *bm_pool, int buf_num) |
---|
416 | 471 | { |
---|
| 472 | + struct page_pool *pp = NULL; |
---|
417 | 473 | int i; |
---|
418 | 474 | |
---|
419 | 475 | if (buf_num > bm_pool->buf_num) { |
---|
.. | .. |
---|
421 | 477 | bm_pool->id, buf_num); |
---|
422 | 478 | buf_num = bm_pool->buf_num; |
---|
423 | 479 | } |
---|
| 480 | + |
---|
| 481 | + if (priv->percpu_pools) |
---|
| 482 | + pp = priv->page_pool[bm_pool->id]; |
---|
424 | 483 | |
---|
425 | 484 | for (i = 0; i < buf_num; i++) { |
---|
426 | 485 | dma_addr_t buf_dma_addr; |
---|
.. | .. |
---|
430 | 489 | mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool, |
---|
431 | 490 | &buf_dma_addr, &buf_phys_addr); |
---|
432 | 491 | |
---|
433 | | - dma_unmap_single(dev, buf_dma_addr, |
---|
434 | | - bm_pool->buf_size, DMA_FROM_DEVICE); |
---|
| 492 | + if (!pp) |
---|
| 493 | + dma_unmap_single(dev, buf_dma_addr, |
---|
| 494 | + bm_pool->buf_size, DMA_FROM_DEVICE); |
---|
435 | 495 | |
---|
436 | 496 | data = (void *)phys_to_virt(buf_phys_addr); |
---|
437 | 497 | if (!data) |
---|
438 | 498 | break; |
---|
439 | 499 | |
---|
440 | | - mvpp2_frag_free(bm_pool, data); |
---|
| 500 | + mvpp2_frag_free(bm_pool, pp, data); |
---|
441 | 501 | } |
---|
442 | 502 | |
---|
443 | 503 | /* Update BM driver with number of buffers removed from pool */ |
---|
.. | .. |
---|
462 | 522 | } |
---|
463 | 523 | |
---|
464 | 524 | /* Cleanup pool */ |
---|
465 | | -static int mvpp2_bm_pool_destroy(struct platform_device *pdev, |
---|
466 | | - struct mvpp2 *priv, |
---|
| 525 | +static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv, |
---|
467 | 526 | struct mvpp2_bm_pool *bm_pool) |
---|
468 | 527 | { |
---|
469 | 528 | int buf_num; |
---|
470 | 529 | u32 val; |
---|
471 | 530 | |
---|
472 | 531 | buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); |
---|
473 | | - mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool, buf_num); |
---|
| 532 | + mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num); |
---|
474 | 533 | |
---|
475 | 534 | /* Check buffer counters after free */ |
---|
476 | 535 | buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); |
---|
.. | .. |
---|
484 | 543 | val |= MVPP2_BM_STOP_MASK; |
---|
485 | 544 | mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); |
---|
486 | 545 | |
---|
487 | | - dma_free_coherent(&pdev->dev, bm_pool->size_bytes, |
---|
| 546 | + if (priv->percpu_pools) { |
---|
| 547 | + page_pool_destroy(priv->page_pool[bm_pool->id]); |
---|
| 548 | + priv->page_pool[bm_pool->id] = NULL; |
---|
| 549 | + } |
---|
| 550 | + |
---|
| 551 | + dma_free_coherent(dev, bm_pool->size_bytes, |
---|
488 | 552 | bm_pool->virt_addr, |
---|
489 | 553 | bm_pool->dma_addr); |
---|
490 | 554 | return 0; |
---|
491 | 555 | } |
---|
492 | 556 | |
---|
493 | | -static int mvpp2_bm_pools_init(struct platform_device *pdev, |
---|
494 | | - struct mvpp2 *priv) |
---|
| 557 | +static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv) |
---|
495 | 558 | { |
---|
496 | | - int i, err, size; |
---|
| 559 | + int i, err, size, poolnum = MVPP2_BM_POOLS_NUM; |
---|
497 | 560 | struct mvpp2_bm_pool *bm_pool; |
---|
| 561 | + |
---|
| 562 | + if (priv->percpu_pools) |
---|
| 563 | + poolnum = mvpp2_get_nrxqs(priv) * 2; |
---|
498 | 564 | |
---|
499 | 565 | /* Create all pools with maximum size */ |
---|
500 | 566 | size = MVPP2_BM_POOL_SIZE_MAX; |
---|
501 | | - for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { |
---|
| 567 | + for (i = 0; i < poolnum; i++) { |
---|
502 | 568 | bm_pool = &priv->bm_pools[i]; |
---|
503 | 569 | bm_pool->id = i; |
---|
504 | | - err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size); |
---|
| 570 | + err = mvpp2_bm_pool_create(dev, priv, bm_pool, size); |
---|
505 | 571 | if (err) |
---|
506 | 572 | goto err_unroll_pools; |
---|
507 | 573 | mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0); |
---|
.. | .. |
---|
509 | 575 | return 0; |
---|
510 | 576 | |
---|
511 | 577 | err_unroll_pools: |
---|
512 | | - dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size); |
---|
| 578 | + dev_err(dev, "failed to create BM pool %d, size %d\n", i, size); |
---|
513 | 579 | for (i = i - 1; i >= 0; i--) |
---|
514 | | - mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]); |
---|
| 580 | + mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); |
---|
515 | 581 | return err; |
---|
516 | 582 | } |
---|
517 | 583 | |
---|
518 | | -static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv) |
---|
| 584 | +static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv) |
---|
519 | 585 | { |
---|
520 | | - int i, err; |
---|
| 586 | + enum dma_data_direction dma_dir = DMA_FROM_DEVICE; |
---|
| 587 | + int i, err, poolnum = MVPP2_BM_POOLS_NUM; |
---|
| 588 | + struct mvpp2_port *port; |
---|
521 | 589 | |
---|
522 | | - for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { |
---|
| 590 | + if (priv->percpu_pools) { |
---|
| 591 | + for (i = 0; i < priv->port_count; i++) { |
---|
| 592 | + port = priv->port_list[i]; |
---|
| 593 | + if (port->xdp_prog) { |
---|
| 594 | + dma_dir = DMA_BIDIRECTIONAL; |
---|
| 595 | + break; |
---|
| 596 | + } |
---|
| 597 | + } |
---|
| 598 | + |
---|
| 599 | + poolnum = mvpp2_get_nrxqs(priv) * 2; |
---|
| 600 | + for (i = 0; i < poolnum; i++) { |
---|
| 601 | + /* the pool in use */ |
---|
| 602 | + int pn = i / (poolnum / 2); |
---|
| 603 | + |
---|
| 604 | + priv->page_pool[i] = |
---|
| 605 | + mvpp2_create_page_pool(dev, |
---|
| 606 | + mvpp2_pools[pn].buf_num, |
---|
| 607 | + mvpp2_pools[pn].pkt_size, |
---|
| 608 | + dma_dir); |
---|
| 609 | + if (IS_ERR(priv->page_pool[i])) { |
---|
| 610 | + int j; |
---|
| 611 | + |
---|
| 612 | + for (j = 0; j < i; j++) { |
---|
| 613 | + page_pool_destroy(priv->page_pool[j]); |
---|
| 614 | + priv->page_pool[j] = NULL; |
---|
| 615 | + } |
---|
| 616 | + return PTR_ERR(priv->page_pool[i]); |
---|
| 617 | + } |
---|
| 618 | + } |
---|
| 619 | + } |
---|
| 620 | + |
---|
| 621 | + dev_info(dev, "using %d %s buffers\n", poolnum, |
---|
| 622 | + priv->percpu_pools ? "per-cpu" : "shared"); |
---|
| 623 | + |
---|
| 624 | + for (i = 0; i < poolnum; i++) { |
---|
523 | 625 | /* Mask BM all interrupts */ |
---|
524 | 626 | mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); |
---|
525 | 627 | /* Clear BM cause register */ |
---|
.. | .. |
---|
527 | 629 | } |
---|
528 | 630 | |
---|
529 | 631 | /* Allocate and initialize BM pools */ |
---|
530 | | - priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM, |
---|
| 632 | + priv->bm_pools = devm_kcalloc(dev, poolnum, |
---|
531 | 633 | sizeof(*priv->bm_pools), GFP_KERNEL); |
---|
532 | 634 | if (!priv->bm_pools) |
---|
533 | 635 | return -ENOMEM; |
---|
534 | 636 | |
---|
535 | | - err = mvpp2_bm_pools_init(pdev, priv); |
---|
| 637 | + err = mvpp2_bm_pools_init(dev, priv); |
---|
536 | 638 | if (err < 0) |
---|
537 | 639 | return err; |
---|
538 | 640 | return 0; |
---|
.. | .. |
---|
597 | 699 | |
---|
598 | 700 | static void *mvpp2_buf_alloc(struct mvpp2_port *port, |
---|
599 | 701 | struct mvpp2_bm_pool *bm_pool, |
---|
| 702 | + struct page_pool *page_pool, |
---|
600 | 703 | dma_addr_t *buf_dma_addr, |
---|
601 | 704 | phys_addr_t *buf_phys_addr, |
---|
602 | 705 | gfp_t gfp_mask) |
---|
603 | 706 | { |
---|
604 | 707 | dma_addr_t dma_addr; |
---|
| 708 | + struct page *page; |
---|
605 | 709 | void *data; |
---|
606 | 710 | |
---|
607 | | - data = mvpp2_frag_alloc(bm_pool); |
---|
| 711 | + data = mvpp2_frag_alloc(bm_pool, page_pool); |
---|
608 | 712 | if (!data) |
---|
609 | 713 | return NULL; |
---|
610 | 714 | |
---|
611 | | - dma_addr = dma_map_single(port->dev->dev.parent, data, |
---|
612 | | - MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), |
---|
613 | | - DMA_FROM_DEVICE); |
---|
614 | | - if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { |
---|
615 | | - mvpp2_frag_free(bm_pool, data); |
---|
616 | | - return NULL; |
---|
| 715 | + if (page_pool) { |
---|
| 716 | + page = (struct page *)data; |
---|
| 717 | + dma_addr = page_pool_get_dma_addr(page); |
---|
| 718 | + data = page_to_virt(page); |
---|
| 719 | + } else { |
---|
| 720 | + dma_addr = dma_map_single(port->dev->dev.parent, data, |
---|
| 721 | + MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), |
---|
| 722 | + DMA_FROM_DEVICE); |
---|
| 723 | + if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { |
---|
| 724 | + mvpp2_frag_free(bm_pool, NULL, data); |
---|
| 725 | + return NULL; |
---|
| 726 | + } |
---|
617 | 727 | } |
---|
618 | 728 | *buf_dma_addr = dma_addr; |
---|
619 | 729 | *buf_phys_addr = virt_to_phys(data); |
---|
.. | .. |
---|
626 | 736 | dma_addr_t buf_dma_addr, |
---|
627 | 737 | phys_addr_t buf_phys_addr) |
---|
628 | 738 | { |
---|
629 | | - int cpu = get_cpu(); |
---|
| 739 | + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); |
---|
| 740 | + unsigned long flags = 0; |
---|
| 741 | + |
---|
| 742 | + if (test_bit(thread, &port->priv->lock_map)) |
---|
| 743 | + spin_lock_irqsave(&port->bm_lock[thread], flags); |
---|
630 | 744 | |
---|
631 | 745 | if (port->priv->hw_version == MVPP22) { |
---|
632 | 746 | u32 val = 0; |
---|
.. | .. |
---|
640 | 754 | << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & |
---|
641 | 755 | MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; |
---|
642 | 756 | |
---|
643 | | - mvpp2_percpu_write_relaxed(port->priv, cpu, |
---|
| 757 | + mvpp2_thread_write_relaxed(port->priv, thread, |
---|
644 | 758 | MVPP22_BM_ADDR_HIGH_RLS_REG, val); |
---|
645 | 759 | } |
---|
646 | 760 | |
---|
.. | .. |
---|
649 | 763 | * descriptor. Instead of storing the virtual address, we |
---|
650 | 764 | * store the physical address |
---|
651 | 765 | */ |
---|
652 | | - mvpp2_percpu_write_relaxed(port->priv, cpu, |
---|
| 766 | + mvpp2_thread_write_relaxed(port->priv, thread, |
---|
653 | 767 | MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); |
---|
654 | | - mvpp2_percpu_write_relaxed(port->priv, cpu, |
---|
| 768 | + mvpp2_thread_write_relaxed(port->priv, thread, |
---|
655 | 769 | MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); |
---|
| 770 | + |
---|
| 771 | + if (test_bit(thread, &port->priv->lock_map)) |
---|
| 772 | + spin_unlock_irqrestore(&port->bm_lock[thread], flags); |
---|
656 | 773 | |
---|
657 | 774 | put_cpu(); |
---|
658 | 775 | } |
---|
.. | .. |
---|
664 | 781 | int i, buf_size, total_size; |
---|
665 | 782 | dma_addr_t dma_addr; |
---|
666 | 783 | phys_addr_t phys_addr; |
---|
| 784 | + struct page_pool *pp = NULL; |
---|
667 | 785 | void *buf; |
---|
| 786 | + |
---|
| 787 | + if (port->priv->percpu_pools && |
---|
| 788 | + bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { |
---|
| 789 | + netdev_err(port->dev, |
---|
| 790 | + "attempted to use jumbo frames with per-cpu pools"); |
---|
| 791 | + return 0; |
---|
| 792 | + } |
---|
668 | 793 | |
---|
669 | 794 | buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); |
---|
670 | 795 | total_size = MVPP2_RX_TOTAL_SIZE(buf_size); |
---|
.. | .. |
---|
677 | 802 | return 0; |
---|
678 | 803 | } |
---|
679 | 804 | |
---|
| 805 | + if (port->priv->percpu_pools) |
---|
| 806 | + pp = port->priv->page_pool[bm_pool->id]; |
---|
680 | 807 | for (i = 0; i < buf_num; i++) { |
---|
681 | | - buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, |
---|
| 808 | + buf = mvpp2_buf_alloc(port, bm_pool, pp, &dma_addr, |
---|
682 | 809 | &phys_addr, GFP_KERNEL); |
---|
683 | 810 | if (!buf) |
---|
684 | 811 | break; |
---|
.. | .. |
---|
709 | 836 | struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; |
---|
710 | 837 | int num; |
---|
711 | 838 | |
---|
712 | | - if (pool >= MVPP2_BM_POOLS_NUM) { |
---|
| 839 | + if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) || |
---|
| 840 | + (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) { |
---|
| 841 | + netdev_err(port->dev, "Invalid pool %d\n", pool); |
---|
| 842 | + return NULL; |
---|
| 843 | + } |
---|
| 844 | + |
---|
| 845 | + /* Allocate buffers in case BM pool is used as long pool, but packet |
---|
| 846 | + * size doesn't match MTU or BM pool hasn't being used yet |
---|
| 847 | + */ |
---|
| 848 | + if (new_pool->pkt_size == 0) { |
---|
| 849 | + int pkts_num; |
---|
| 850 | + |
---|
| 851 | + /* Set default buffer number or free all the buffers in case |
---|
| 852 | + * the pool is not empty |
---|
| 853 | + */ |
---|
| 854 | + pkts_num = new_pool->buf_num; |
---|
| 855 | + if (pkts_num == 0) { |
---|
| 856 | + if (port->priv->percpu_pools) { |
---|
| 857 | + if (pool < port->nrxqs) |
---|
| 858 | + pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num; |
---|
| 859 | + else |
---|
| 860 | + pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num; |
---|
| 861 | + } else { |
---|
| 862 | + pkts_num = mvpp2_pools[pool].buf_num; |
---|
| 863 | + } |
---|
| 864 | + } else { |
---|
| 865 | + mvpp2_bm_bufs_free(port->dev->dev.parent, |
---|
| 866 | + port->priv, new_pool, pkts_num); |
---|
| 867 | + } |
---|
| 868 | + |
---|
| 869 | + new_pool->pkt_size = pkt_size; |
---|
| 870 | + new_pool->frag_size = |
---|
| 871 | + SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + |
---|
| 872 | + MVPP2_SKB_SHINFO_SIZE; |
---|
| 873 | + |
---|
| 874 | + /* Allocate buffers for this pool */ |
---|
| 875 | + num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); |
---|
| 876 | + if (num != pkts_num) { |
---|
| 877 | + WARN(1, "pool %d: %d of %d allocated\n", |
---|
| 878 | + new_pool->id, num, pkts_num); |
---|
| 879 | + return NULL; |
---|
| 880 | + } |
---|
| 881 | + } |
---|
| 882 | + |
---|
| 883 | + mvpp2_bm_pool_bufsize_set(port->priv, new_pool, |
---|
| 884 | + MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); |
---|
| 885 | + |
---|
| 886 | + return new_pool; |
---|
| 887 | +} |
---|
| 888 | + |
---|
| 889 | +static struct mvpp2_bm_pool * |
---|
| 890 | +mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type, |
---|
| 891 | + unsigned int pool, int pkt_size) |
---|
| 892 | +{ |
---|
| 893 | + struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; |
---|
| 894 | + int num; |
---|
| 895 | + |
---|
| 896 | + if (pool > port->nrxqs * 2) { |
---|
713 | 897 | netdev_err(port->dev, "Invalid pool %d\n", pool); |
---|
714 | 898 | return NULL; |
---|
715 | 899 | } |
---|
.. | .. |
---|
725 | 909 | */ |
---|
726 | 910 | pkts_num = new_pool->buf_num; |
---|
727 | 911 | if (pkts_num == 0) |
---|
728 | | - pkts_num = mvpp2_pools[pool].buf_num; |
---|
| 912 | + pkts_num = mvpp2_pools[type].buf_num; |
---|
729 | 913 | else |
---|
730 | 914 | mvpp2_bm_bufs_free(port->dev->dev.parent, |
---|
731 | 915 | port->priv, new_pool, pkts_num); |
---|
.. | .. |
---|
750 | 934 | return new_pool; |
---|
751 | 935 | } |
---|
752 | 936 | |
---|
753 | | -/* Initialize pools for swf */ |
---|
754 | | -static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) |
---|
| 937 | +/* Initialize pools for swf, shared buffers variant */ |
---|
| 938 | +static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port) |
---|
755 | 939 | { |
---|
756 | | - int rxq; |
---|
757 | 940 | enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool; |
---|
| 941 | + int rxq; |
---|
758 | 942 | |
---|
759 | 943 | /* If port pkt_size is higher than 1518B: |
---|
760 | 944 | * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool |
---|
.. | .. |
---|
798 | 982 | return 0; |
---|
799 | 983 | } |
---|
800 | 984 | |
---|
| 985 | +/* Initialize pools for swf, percpu buffers variant */ |
---|
| 986 | +static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port) |
---|
| 987 | +{ |
---|
| 988 | + struct mvpp2_bm_pool *bm_pool; |
---|
| 989 | + int i; |
---|
| 990 | + |
---|
| 991 | + for (i = 0; i < port->nrxqs; i++) { |
---|
| 992 | + bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i, |
---|
| 993 | + mvpp2_pools[MVPP2_BM_SHORT].pkt_size); |
---|
| 994 | + if (!bm_pool) |
---|
| 995 | + return -ENOMEM; |
---|
| 996 | + |
---|
| 997 | + bm_pool->port_map |= BIT(port->id); |
---|
| 998 | + mvpp2_rxq_short_pool_set(port, i, bm_pool->id); |
---|
| 999 | + } |
---|
| 1000 | + |
---|
| 1001 | + for (i = 0; i < port->nrxqs; i++) { |
---|
| 1002 | + bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs, |
---|
| 1003 | + mvpp2_pools[MVPP2_BM_LONG].pkt_size); |
---|
| 1004 | + if (!bm_pool) |
---|
| 1005 | + return -ENOMEM; |
---|
| 1006 | + |
---|
| 1007 | + bm_pool->port_map |= BIT(port->id); |
---|
| 1008 | + mvpp2_rxq_long_pool_set(port, i, bm_pool->id); |
---|
| 1009 | + } |
---|
| 1010 | + |
---|
| 1011 | + port->pool_long = NULL; |
---|
| 1012 | + port->pool_short = NULL; |
---|
| 1013 | + |
---|
| 1014 | + return 0; |
---|
| 1015 | +} |
---|
| 1016 | + |
---|
| 1017 | +static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) |
---|
| 1018 | +{ |
---|
| 1019 | + if (port->priv->percpu_pools) |
---|
| 1020 | + return mvpp2_swf_bm_pool_init_percpu(port); |
---|
| 1021 | + else |
---|
| 1022 | + return mvpp2_swf_bm_pool_init_shared(port); |
---|
| 1023 | +} |
---|
| 1024 | + |
---|
| 1025 | +static void mvpp2_set_hw_csum(struct mvpp2_port *port, |
---|
| 1026 | + enum mvpp2_bm_pool_log_num new_long_pool) |
---|
| 1027 | +{ |
---|
| 1028 | + const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; |
---|
| 1029 | + |
---|
| 1030 | + /* Update L4 checksum when jumbo enable/disable on port. |
---|
| 1031 | + * Only port 0 supports hardware checksum offload due to |
---|
| 1032 | + * the Tx FIFO size limitation. |
---|
| 1033 | + * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor |
---|
| 1034 | + * has 7 bits, so the maximum L3 offset is 128. |
---|
| 1035 | + */ |
---|
| 1036 | + if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { |
---|
| 1037 | + port->dev->features &= ~csums; |
---|
| 1038 | + port->dev->hw_features &= ~csums; |
---|
| 1039 | + } else { |
---|
| 1040 | + port->dev->features |= csums; |
---|
| 1041 | + port->dev->hw_features |= csums; |
---|
| 1042 | + } |
---|
| 1043 | +} |
---|
| 1044 | + |
---|
801 | 1045 | static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) |
---|
802 | 1046 | { |
---|
803 | 1047 | struct mvpp2_port *port = netdev_priv(dev); |
---|
804 | 1048 | enum mvpp2_bm_pool_log_num new_long_pool; |
---|
805 | 1049 | int pkt_size = MVPP2_RX_PKT_SIZE(mtu); |
---|
| 1050 | + |
---|
| 1051 | + if (port->priv->percpu_pools) |
---|
| 1052 | + goto out_set; |
---|
806 | 1053 | |
---|
807 | 1054 | /* If port MTU is higher than 1518B: |
---|
808 | 1055 | * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool |
---|
.. | .. |
---|
830 | 1077 | /* Add port to new short & long pool */ |
---|
831 | 1078 | mvpp2_swf_bm_pool_init(port); |
---|
832 | 1079 | |
---|
833 | | - /* Update L4 checksum when jumbo enable/disable on port */ |
---|
834 | | - if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { |
---|
835 | | - dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); |
---|
836 | | - dev->hw_features &= ~(NETIF_F_IP_CSUM | |
---|
837 | | - NETIF_F_IPV6_CSUM); |
---|
838 | | - } else { |
---|
839 | | - dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; |
---|
840 | | - dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; |
---|
841 | | - } |
---|
| 1080 | + mvpp2_set_hw_csum(port, new_long_pool); |
---|
842 | 1081 | } |
---|
843 | 1082 | |
---|
| 1083 | +out_set: |
---|
844 | 1084 | dev->mtu = mtu; |
---|
845 | 1085 | dev->wanted_features = dev->features; |
---|
846 | 1086 | |
---|
.. | .. |
---|
886 | 1126 | MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask)); |
---|
887 | 1127 | } |
---|
888 | 1128 | |
---|
889 | | -/* Mask the current CPU's Rx/Tx interrupts |
---|
| 1129 | +/* Mask the current thread's Rx/Tx interrupts |
---|
890 | 1130 | * Called by on_each_cpu(), guaranteed to run with migration disabled, |
---|
891 | 1131 | * using smp_processor_id() is OK. |
---|
892 | 1132 | */ |
---|
.. | .. |
---|
894 | 1134 | { |
---|
895 | 1135 | struct mvpp2_port *port = arg; |
---|
896 | 1136 | |
---|
897 | | - mvpp2_percpu_write(port->priv, smp_processor_id(), |
---|
| 1137 | + /* If the thread isn't used, don't do anything */ |
---|
| 1138 | + if (smp_processor_id() > port->priv->nthreads) |
---|
| 1139 | + return; |
---|
| 1140 | + |
---|
| 1141 | + mvpp2_thread_write(port->priv, |
---|
| 1142 | + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), |
---|
898 | 1143 | MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); |
---|
899 | 1144 | } |
---|
900 | 1145 | |
---|
901 | | -/* Unmask the current CPU's Rx/Tx interrupts. |
---|
| 1146 | +/* Unmask the current thread's Rx/Tx interrupts. |
---|
902 | 1147 | * Called by on_each_cpu(), guaranteed to run with migration disabled, |
---|
903 | 1148 | * using smp_processor_id() is OK. |
---|
904 | 1149 | */ |
---|
.. | .. |
---|
907 | 1152 | struct mvpp2_port *port = arg; |
---|
908 | 1153 | u32 val; |
---|
909 | 1154 | |
---|
| 1155 | + /* If the thread isn't used, don't do anything */ |
---|
| 1156 | + if (smp_processor_id() >= port->priv->nthreads) |
---|
| 1157 | + return; |
---|
| 1158 | + |
---|
910 | 1159 | val = MVPP2_CAUSE_MISC_SUM_MASK | |
---|
911 | 1160 | MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); |
---|
912 | 1161 | if (port->has_tx_irqs) |
---|
913 | 1162 | val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; |
---|
914 | 1163 | |
---|
915 | | - mvpp2_percpu_write(port->priv, smp_processor_id(), |
---|
| 1164 | + mvpp2_thread_write(port->priv, |
---|
| 1165 | + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), |
---|
916 | 1166 | MVPP2_ISR_RX_TX_MASK_REG(port->id), val); |
---|
917 | 1167 | } |
---|
918 | 1168 | |
---|
.. | .. |
---|
936 | 1186 | if (v->type != MVPP2_QUEUE_VECTOR_SHARED) |
---|
937 | 1187 | continue; |
---|
938 | 1188 | |
---|
939 | | - mvpp2_percpu_write(port->priv, v->sw_thread_id, |
---|
| 1189 | + mvpp2_thread_write(port->priv, v->sw_thread_id, |
---|
940 | 1190 | MVPP2_ISR_RX_TX_MASK_REG(port->id), val); |
---|
941 | 1191 | } |
---|
942 | 1192 | } |
---|
943 | 1193 | |
---|
| 1194 | +/* Only GOP port 0 has an XLG MAC */ |
---|
| 1195 | +static bool mvpp2_port_supports_xlg(struct mvpp2_port *port) |
---|
| 1196 | +{ |
---|
| 1197 | + return port->gop_id == 0; |
---|
| 1198 | +} |
---|
| 1199 | + |
---|
| 1200 | +static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port) |
---|
| 1201 | +{ |
---|
| 1202 | + return !(port->priv->hw_version == MVPP22 && port->gop_id == 0); |
---|
| 1203 | +} |
---|
| 1204 | + |
---|
944 | 1205 | /* Port configuration routines */ |
---|
| 1206 | +static bool mvpp2_is_xlg(phy_interface_t interface) |
---|
| 1207 | +{ |
---|
| 1208 | + return interface == PHY_INTERFACE_MODE_10GBASER || |
---|
| 1209 | + interface == PHY_INTERFACE_MODE_XAUI; |
---|
| 1210 | +} |
---|
| 1211 | + |
---|
| 1212 | +static void mvpp2_modify(void __iomem *ptr, u32 mask, u32 set) |
---|
| 1213 | +{ |
---|
| 1214 | + u32 old, val; |
---|
| 1215 | + |
---|
| 1216 | + old = val = readl(ptr); |
---|
| 1217 | + val &= ~mask; |
---|
| 1218 | + val |= set; |
---|
| 1219 | + if (old != val) |
---|
| 1220 | + writel(val, ptr); |
---|
| 1221 | +} |
---|
945 | 1222 | |
---|
946 | 1223 | static void mvpp22_gop_init_rgmii(struct mvpp2_port *port) |
---|
947 | 1224 | { |
---|
.. | .. |
---|
987 | 1264 | void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); |
---|
988 | 1265 | u32 val; |
---|
989 | 1266 | |
---|
990 | | - /* XPCS */ |
---|
991 | 1267 | val = readl(xpcs + MVPP22_XPCS_CFG0); |
---|
992 | 1268 | val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) | |
---|
993 | 1269 | MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3)); |
---|
994 | 1270 | val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2); |
---|
995 | 1271 | writel(val, xpcs + MVPP22_XPCS_CFG0); |
---|
996 | 1272 | |
---|
997 | | - /* MPCS */ |
---|
998 | 1273 | val = readl(mpcs + MVPP22_MPCS_CTRL); |
---|
999 | 1274 | val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN; |
---|
1000 | 1275 | writel(val, mpcs + MVPP22_MPCS_CTRL); |
---|
1001 | 1276 | |
---|
1002 | 1277 | val = readl(mpcs + MVPP22_MPCS_CLK_RESET); |
---|
1003 | | - val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC | |
---|
1004 | | - MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX); |
---|
| 1278 | + val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7); |
---|
1005 | 1279 | val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1); |
---|
1006 | | - writel(val, mpcs + MVPP22_MPCS_CLK_RESET); |
---|
1007 | | - |
---|
1008 | | - val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET; |
---|
1009 | | - val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX; |
---|
1010 | 1280 | writel(val, mpcs + MVPP22_MPCS_CLK_RESET); |
---|
1011 | 1281 | } |
---|
1012 | 1282 | |
---|
.. | .. |
---|
1023 | 1293 | case PHY_INTERFACE_MODE_RGMII_ID: |
---|
1024 | 1294 | case PHY_INTERFACE_MODE_RGMII_RXID: |
---|
1025 | 1295 | case PHY_INTERFACE_MODE_RGMII_TXID: |
---|
1026 | | - if (port->gop_id == 0) |
---|
| 1296 | + if (!mvpp2_port_supports_rgmii(port)) |
---|
1027 | 1297 | goto invalid_conf; |
---|
1028 | 1298 | mvpp22_gop_init_rgmii(port); |
---|
1029 | 1299 | break; |
---|
.. | .. |
---|
1032 | 1302 | case PHY_INTERFACE_MODE_2500BASEX: |
---|
1033 | 1303 | mvpp22_gop_init_sgmii(port); |
---|
1034 | 1304 | break; |
---|
1035 | | - case PHY_INTERFACE_MODE_10GKR: |
---|
1036 | | - if (port->gop_id != 0) |
---|
| 1305 | + case PHY_INTERFACE_MODE_10GBASER: |
---|
| 1306 | + if (!mvpp2_port_supports_xlg(port)) |
---|
1037 | 1307 | goto invalid_conf; |
---|
1038 | 1308 | mvpp22_gop_init_10gkr(port); |
---|
1039 | 1309 | break; |
---|
.. | .. |
---|
1067 | 1337 | u32 val; |
---|
1068 | 1338 | |
---|
1069 | 1339 | if (phy_interface_mode_is_rgmii(port->phy_interface) || |
---|
1070 | | - port->phy_interface == PHY_INTERFACE_MODE_SGMII || |
---|
1071 | | - port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || |
---|
1072 | | - port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { |
---|
| 1340 | + phy_interface_mode_is_8023z(port->phy_interface) || |
---|
| 1341 | + port->phy_interface == PHY_INTERFACE_MODE_SGMII) { |
---|
1073 | 1342 | /* Enable the GMAC link status irq for this port */ |
---|
1074 | 1343 | val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); |
---|
1075 | 1344 | val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; |
---|
1076 | 1345 | writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); |
---|
1077 | 1346 | } |
---|
1078 | 1347 | |
---|
1079 | | - if (port->gop_id == 0) { |
---|
| 1348 | + if (mvpp2_port_supports_xlg(port)) { |
---|
1080 | 1349 | /* Enable the XLG/GIG irqs for this port */ |
---|
1081 | 1350 | val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); |
---|
1082 | | - if (port->phy_interface == PHY_INTERFACE_MODE_10GKR) |
---|
| 1351 | + if (mvpp2_is_xlg(port->phy_interface)) |
---|
1083 | 1352 | val |= MVPP22_XLG_EXT_INT_MASK_XLG; |
---|
1084 | 1353 | else |
---|
1085 | 1354 | val |= MVPP22_XLG_EXT_INT_MASK_GIG; |
---|
.. | .. |
---|
1091 | 1360 | { |
---|
1092 | 1361 | u32 val; |
---|
1093 | 1362 | |
---|
1094 | | - if (port->gop_id == 0) { |
---|
| 1363 | + if (mvpp2_port_supports_xlg(port)) { |
---|
1095 | 1364 | val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); |
---|
1096 | 1365 | val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG | |
---|
1097 | 1366 | MVPP22_XLG_EXT_INT_MASK_GIG); |
---|
.. | .. |
---|
1099 | 1368 | } |
---|
1100 | 1369 | |
---|
1101 | 1370 | if (phy_interface_mode_is_rgmii(port->phy_interface) || |
---|
1102 | | - port->phy_interface == PHY_INTERFACE_MODE_SGMII || |
---|
1103 | | - port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || |
---|
1104 | | - port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { |
---|
| 1371 | + phy_interface_mode_is_8023z(port->phy_interface) || |
---|
| 1372 | + port->phy_interface == PHY_INTERFACE_MODE_SGMII) { |
---|
1105 | 1373 | val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); |
---|
1106 | 1374 | val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; |
---|
1107 | 1375 | writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); |
---|
.. | .. |
---|
1112 | 1380 | { |
---|
1113 | 1381 | u32 val; |
---|
1114 | 1382 | |
---|
1115 | | - if (phy_interface_mode_is_rgmii(port->phy_interface) || |
---|
1116 | | - port->phy_interface == PHY_INTERFACE_MODE_SGMII || |
---|
1117 | | - port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || |
---|
1118 | | - port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { |
---|
| 1383 | + mvpp2_modify(port->base + MVPP22_GMAC_INT_SUM_MASK, |
---|
| 1384 | + MVPP22_GMAC_INT_SUM_MASK_PTP, |
---|
| 1385 | + MVPP22_GMAC_INT_SUM_MASK_PTP); |
---|
| 1386 | + |
---|
| 1387 | + if (port->phylink || |
---|
| 1388 | + phy_interface_mode_is_rgmii(port->phy_interface) || |
---|
| 1389 | + phy_interface_mode_is_8023z(port->phy_interface) || |
---|
| 1390 | + port->phy_interface == PHY_INTERFACE_MODE_SGMII) { |
---|
1119 | 1391 | val = readl(port->base + MVPP22_GMAC_INT_MASK); |
---|
1120 | 1392 | val |= MVPP22_GMAC_INT_MASK_LINK_STAT; |
---|
1121 | 1393 | writel(val, port->base + MVPP22_GMAC_INT_MASK); |
---|
1122 | 1394 | } |
---|
1123 | 1395 | |
---|
1124 | | - if (port->gop_id == 0) { |
---|
| 1396 | + if (mvpp2_port_supports_xlg(port)) { |
---|
1125 | 1397 | val = readl(port->base + MVPP22_XLG_INT_MASK); |
---|
1126 | 1398 | val |= MVPP22_XLG_INT_MASK_LINK; |
---|
1127 | 1399 | writel(val, port->base + MVPP22_XLG_INT_MASK); |
---|
| 1400 | + |
---|
| 1401 | + mvpp2_modify(port->base + MVPP22_XLG_EXT_INT_MASK, |
---|
| 1402 | + MVPP22_XLG_EXT_INT_MASK_PTP, |
---|
| 1403 | + MVPP22_XLG_EXT_INT_MASK_PTP); |
---|
1128 | 1404 | } |
---|
1129 | 1405 | |
---|
1130 | 1406 | mvpp22_gop_unmask_irq(port); |
---|
.. | .. |
---|
1142 | 1418 | */ |
---|
1143 | 1419 | static int mvpp22_comphy_init(struct mvpp2_port *port) |
---|
1144 | 1420 | { |
---|
1145 | | - enum phy_mode mode; |
---|
1146 | 1421 | int ret; |
---|
1147 | 1422 | |
---|
1148 | 1423 | if (!port->comphy) |
---|
1149 | 1424 | return 0; |
---|
1150 | 1425 | |
---|
1151 | | - switch (port->phy_interface) { |
---|
1152 | | - case PHY_INTERFACE_MODE_SGMII: |
---|
1153 | | - case PHY_INTERFACE_MODE_1000BASEX: |
---|
1154 | | - mode = PHY_MODE_SGMII; |
---|
1155 | | - break; |
---|
1156 | | - case PHY_INTERFACE_MODE_2500BASEX: |
---|
1157 | | - mode = PHY_MODE_2500SGMII; |
---|
1158 | | - break; |
---|
1159 | | - case PHY_INTERFACE_MODE_10GKR: |
---|
1160 | | - mode = PHY_MODE_10GKR; |
---|
1161 | | - break; |
---|
1162 | | - default: |
---|
1163 | | - return -EINVAL; |
---|
1164 | | - } |
---|
1165 | | - |
---|
1166 | | - ret = phy_set_mode(port->comphy, mode); |
---|
| 1426 | + ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET, |
---|
| 1427 | + port->phy_interface); |
---|
1167 | 1428 | if (ret) |
---|
1168 | 1429 | return ret; |
---|
1169 | 1430 | |
---|
.. | .. |
---|
1174 | 1435 | { |
---|
1175 | 1436 | u32 val; |
---|
1176 | 1437 | |
---|
1177 | | - /* Only GOP port 0 has an XLG MAC */ |
---|
1178 | | - if (port->gop_id == 0 && |
---|
1179 | | - (port->phy_interface == PHY_INTERFACE_MODE_XAUI || |
---|
1180 | | - port->phy_interface == PHY_INTERFACE_MODE_10GKR)) { |
---|
| 1438 | + if (mvpp2_port_supports_xlg(port) && |
---|
| 1439 | + mvpp2_is_xlg(port->phy_interface)) { |
---|
1181 | 1440 | val = readl(port->base + MVPP22_XLG_CTRL0_REG); |
---|
1182 | | - val |= MVPP22_XLG_CTRL0_PORT_EN | |
---|
1183 | | - MVPP22_XLG_CTRL0_MAC_RESET_DIS; |
---|
| 1441 | + val |= MVPP22_XLG_CTRL0_PORT_EN; |
---|
1184 | 1442 | val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS; |
---|
1185 | 1443 | writel(val, port->base + MVPP22_XLG_CTRL0_REG); |
---|
1186 | 1444 | } else { |
---|
.. | .. |
---|
1195 | 1453 | { |
---|
1196 | 1454 | u32 val; |
---|
1197 | 1455 | |
---|
1198 | | - /* Only GOP port 0 has an XLG MAC */ |
---|
1199 | | - if (port->gop_id == 0 && |
---|
1200 | | - (port->phy_interface == PHY_INTERFACE_MODE_XAUI || |
---|
1201 | | - port->phy_interface == PHY_INTERFACE_MODE_10GKR)) { |
---|
| 1456 | + if (mvpp2_port_supports_xlg(port) && |
---|
| 1457 | + mvpp2_is_xlg(port->phy_interface)) { |
---|
1202 | 1458 | val = readl(port->base + MVPP22_XLG_CTRL0_REG); |
---|
1203 | 1459 | val &= ~MVPP22_XLG_CTRL0_PORT_EN; |
---|
1204 | 1460 | writel(val, port->base + MVPP22_XLG_CTRL0_REG); |
---|
1205 | | - |
---|
1206 | | - /* Disable & reset should be done separately */ |
---|
1207 | | - val &= ~MVPP22_XLG_CTRL0_MAC_RESET_DIS; |
---|
1208 | | - writel(val, port->base + MVPP22_XLG_CTRL0_REG); |
---|
1209 | | - } else { |
---|
1210 | | - val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); |
---|
1211 | | - val &= ~(MVPP2_GMAC_PORT_EN_MASK); |
---|
1212 | | - writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); |
---|
1213 | 1461 | } |
---|
| 1462 | + |
---|
| 1463 | + val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); |
---|
| 1464 | + val &= ~(MVPP2_GMAC_PORT_EN_MASK); |
---|
| 1465 | + writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); |
---|
1214 | 1466 | } |
---|
1215 | 1467 | |
---|
1216 | 1468 | /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ |
---|
.. | .. |
---|
1236 | 1488 | else |
---|
1237 | 1489 | val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; |
---|
1238 | 1490 | |
---|
1239 | | - if (port->phy_interface == PHY_INTERFACE_MODE_SGMII || |
---|
1240 | | - port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || |
---|
1241 | | - port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) |
---|
| 1491 | + if (phy_interface_mode_is_8023z(state->interface) || |
---|
| 1492 | + state->interface == PHY_INTERFACE_MODE_SGMII) |
---|
1242 | 1493 | val |= MVPP2_GMAC_PCS_LB_EN_MASK; |
---|
1243 | 1494 | else |
---|
1244 | 1495 | val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; |
---|
1245 | 1496 | |
---|
1246 | 1497 | writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); |
---|
1247 | 1498 | } |
---|
| 1499 | + |
---|
| 1500 | +enum { |
---|
| 1501 | + ETHTOOL_XDP_REDIRECT, |
---|
| 1502 | + ETHTOOL_XDP_PASS, |
---|
| 1503 | + ETHTOOL_XDP_DROP, |
---|
| 1504 | + ETHTOOL_XDP_TX, |
---|
| 1505 | + ETHTOOL_XDP_TX_ERR, |
---|
| 1506 | + ETHTOOL_XDP_XMIT, |
---|
| 1507 | + ETHTOOL_XDP_XMIT_ERR, |
---|
| 1508 | +}; |
---|
1248 | 1509 | |
---|
1249 | 1510 | struct mvpp2_ethtool_counter { |
---|
1250 | 1511 | unsigned int offset; |
---|
.. | .. |
---|
1264 | 1525 | return val; |
---|
1265 | 1526 | } |
---|
1266 | 1527 | |
---|
| 1528 | +/* Some counters are accessed indirectly by first writing an index to |
---|
| 1529 | + * MVPP2_CTRS_IDX. The index can represent various resources depending on the |
---|
| 1530 | + * register we access, it can be a hit counter for some classification tables, |
---|
| 1531 | + * a counter specific to a rxq, a txq or a buffer pool. |
---|
| 1532 | + */ |
---|
| 1533 | +static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg) |
---|
| 1534 | +{ |
---|
| 1535 | + mvpp2_write(priv, MVPP2_CTRS_IDX, index); |
---|
| 1536 | + return mvpp2_read(priv, reg); |
---|
| 1537 | +} |
---|
| 1538 | + |
---|
1267 | 1539 | /* Due to the fact that software statistics and hardware statistics are, by |
---|
1268 | 1540 | * design, incremented at different moments in the chain of packet processing, |
---|
1269 | 1541 | * it is very likely that incoming packets could have been dropped after being |
---|
.. | .. |
---|
1273 | 1545 | * Hence, statistics gathered from userspace with ifconfig (software) and |
---|
1274 | 1546 | * ethtool (hardware) cannot be compared. |
---|
1275 | 1547 | */ |
---|
1276 | | -static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = { |
---|
| 1548 | +static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = { |
---|
1277 | 1549 | { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true }, |
---|
1278 | 1550 | { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" }, |
---|
1279 | 1551 | { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" }, |
---|
.. | .. |
---|
1303 | 1575 | { MVPP2_MIB_LATE_COLLISION, "late_collision" }, |
---|
1304 | 1576 | }; |
---|
1305 | 1577 | |
---|
| 1578 | +static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = { |
---|
| 1579 | + { MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" }, |
---|
| 1580 | + { MVPP2_CLS_ETH_DROP, "rx_classifier_drops" }, |
---|
| 1581 | +}; |
---|
| 1582 | + |
---|
| 1583 | +static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = { |
---|
| 1584 | + { MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" }, |
---|
| 1585 | + { MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" }, |
---|
| 1586 | + { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" }, |
---|
| 1587 | + { MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" }, |
---|
| 1588 | + { MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" }, |
---|
| 1589 | + { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" }, |
---|
| 1590 | + { MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" }, |
---|
| 1591 | + { MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" }, |
---|
| 1592 | + { MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" }, |
---|
| 1593 | +}; |
---|
| 1594 | + |
---|
| 1595 | +static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = { |
---|
| 1596 | + { MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" }, |
---|
| 1597 | + { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" }, |
---|
| 1598 | + { MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" }, |
---|
| 1599 | + { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" }, |
---|
| 1600 | +}; |
---|
| 1601 | + |
---|
| 1602 | +static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = { |
---|
| 1603 | + { ETHTOOL_XDP_REDIRECT, "rx_xdp_redirect", }, |
---|
| 1604 | + { ETHTOOL_XDP_PASS, "rx_xdp_pass", }, |
---|
| 1605 | + { ETHTOOL_XDP_DROP, "rx_xdp_drop", }, |
---|
| 1606 | + { ETHTOOL_XDP_TX, "rx_xdp_tx", }, |
---|
| 1607 | + { ETHTOOL_XDP_TX_ERR, "rx_xdp_tx_errors", }, |
---|
| 1608 | + { ETHTOOL_XDP_XMIT, "tx_xdp_xmit", }, |
---|
| 1609 | + { ETHTOOL_XDP_XMIT_ERR, "tx_xdp_xmit_errors", }, |
---|
| 1610 | +}; |
---|
| 1611 | + |
---|
| 1612 | +#define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \ |
---|
| 1613 | + ARRAY_SIZE(mvpp2_ethtool_port_regs) + \ |
---|
| 1614 | + (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \ |
---|
| 1615 | + (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)) + \ |
---|
| 1616 | + ARRAY_SIZE(mvpp2_ethtool_xdp)) |
---|
| 1617 | + |
---|
1306 | 1618 | static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset, |
---|
1307 | 1619 | u8 *data) |
---|
1308 | 1620 | { |
---|
1309 | | - if (sset == ETH_SS_STATS) { |
---|
1310 | | - int i; |
---|
| 1621 | + struct mvpp2_port *port = netdev_priv(netdev); |
---|
| 1622 | + int i, q; |
---|
1311 | 1623 | |
---|
1312 | | - for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) |
---|
1313 | | - strscpy(data + i * ETH_GSTRING_LEN, |
---|
1314 | | - mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN); |
---|
| 1624 | + if (sset != ETH_SS_STATS) |
---|
| 1625 | + return; |
---|
| 1626 | + |
---|
| 1627 | + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) { |
---|
| 1628 | + strscpy(data, mvpp2_ethtool_mib_regs[i].string, |
---|
| 1629 | + ETH_GSTRING_LEN); |
---|
| 1630 | + data += ETH_GSTRING_LEN; |
---|
| 1631 | + } |
---|
| 1632 | + |
---|
| 1633 | + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) { |
---|
| 1634 | + strscpy(data, mvpp2_ethtool_port_regs[i].string, |
---|
| 1635 | + ETH_GSTRING_LEN); |
---|
| 1636 | + data += ETH_GSTRING_LEN; |
---|
| 1637 | + } |
---|
| 1638 | + |
---|
| 1639 | + for (q = 0; q < port->ntxqs; q++) { |
---|
| 1640 | + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) { |
---|
| 1641 | + snprintf(data, ETH_GSTRING_LEN, |
---|
| 1642 | + mvpp2_ethtool_txq_regs[i].string, q); |
---|
| 1643 | + data += ETH_GSTRING_LEN; |
---|
| 1644 | + } |
---|
| 1645 | + } |
---|
| 1646 | + |
---|
| 1647 | + for (q = 0; q < port->nrxqs; q++) { |
---|
| 1648 | + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) { |
---|
| 1649 | + snprintf(data, ETH_GSTRING_LEN, |
---|
| 1650 | + mvpp2_ethtool_rxq_regs[i].string, |
---|
| 1651 | + q); |
---|
| 1652 | + data += ETH_GSTRING_LEN; |
---|
| 1653 | + } |
---|
| 1654 | + } |
---|
| 1655 | + |
---|
| 1656 | + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) { |
---|
| 1657 | + strscpy(data, mvpp2_ethtool_xdp[i].string, |
---|
| 1658 | + ETH_GSTRING_LEN); |
---|
| 1659 | + data += ETH_GSTRING_LEN; |
---|
| 1660 | + } |
---|
| 1661 | +} |
---|
| 1662 | + |
---|
| 1663 | +static void |
---|
| 1664 | +mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats) |
---|
| 1665 | +{ |
---|
| 1666 | + unsigned int start; |
---|
| 1667 | + unsigned int cpu; |
---|
| 1668 | + |
---|
| 1669 | + /* Gather XDP Statistics */ |
---|
| 1670 | + for_each_possible_cpu(cpu) { |
---|
| 1671 | + struct mvpp2_pcpu_stats *cpu_stats; |
---|
| 1672 | + u64 xdp_redirect; |
---|
| 1673 | + u64 xdp_pass; |
---|
| 1674 | + u64 xdp_drop; |
---|
| 1675 | + u64 xdp_xmit; |
---|
| 1676 | + u64 xdp_xmit_err; |
---|
| 1677 | + u64 xdp_tx; |
---|
| 1678 | + u64 xdp_tx_err; |
---|
| 1679 | + |
---|
| 1680 | + cpu_stats = per_cpu_ptr(port->stats, cpu); |
---|
| 1681 | + do { |
---|
| 1682 | + start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); |
---|
| 1683 | + xdp_redirect = cpu_stats->xdp_redirect; |
---|
| 1684 | + xdp_pass = cpu_stats->xdp_pass; |
---|
| 1685 | + xdp_drop = cpu_stats->xdp_drop; |
---|
| 1686 | + xdp_xmit = cpu_stats->xdp_xmit; |
---|
| 1687 | + xdp_xmit_err = cpu_stats->xdp_xmit_err; |
---|
| 1688 | + xdp_tx = cpu_stats->xdp_tx; |
---|
| 1689 | + xdp_tx_err = cpu_stats->xdp_tx_err; |
---|
| 1690 | + } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); |
---|
| 1691 | + |
---|
| 1692 | + xdp_stats->xdp_redirect += xdp_redirect; |
---|
| 1693 | + xdp_stats->xdp_pass += xdp_pass; |
---|
| 1694 | + xdp_stats->xdp_drop += xdp_drop; |
---|
| 1695 | + xdp_stats->xdp_xmit += xdp_xmit; |
---|
| 1696 | + xdp_stats->xdp_xmit_err += xdp_xmit_err; |
---|
| 1697 | + xdp_stats->xdp_tx += xdp_tx; |
---|
| 1698 | + xdp_stats->xdp_tx_err += xdp_tx_err; |
---|
| 1699 | + } |
---|
| 1700 | +} |
---|
| 1701 | + |
---|
| 1702 | +static void mvpp2_read_stats(struct mvpp2_port *port) |
---|
| 1703 | +{ |
---|
| 1704 | + struct mvpp2_pcpu_stats xdp_stats = {}; |
---|
| 1705 | + const struct mvpp2_ethtool_counter *s; |
---|
| 1706 | + u64 *pstats; |
---|
| 1707 | + int i, q; |
---|
| 1708 | + |
---|
| 1709 | + pstats = port->ethtool_stats; |
---|
| 1710 | + |
---|
| 1711 | + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) |
---|
| 1712 | + *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]); |
---|
| 1713 | + |
---|
| 1714 | + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) |
---|
| 1715 | + *pstats++ += mvpp2_read(port->priv, |
---|
| 1716 | + mvpp2_ethtool_port_regs[i].offset + |
---|
| 1717 | + 4 * port->id); |
---|
| 1718 | + |
---|
| 1719 | + for (q = 0; q < port->ntxqs; q++) |
---|
| 1720 | + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) |
---|
| 1721 | + *pstats++ += mvpp2_read_index(port->priv, |
---|
| 1722 | + MVPP22_CTRS_TX_CTR(port->id, q), |
---|
| 1723 | + mvpp2_ethtool_txq_regs[i].offset); |
---|
| 1724 | + |
---|
| 1725 | + /* Rxqs are numbered from 0 from the user standpoint, but not from the |
---|
| 1726 | + * driver's. We need to add the port->first_rxq offset. |
---|
| 1727 | + */ |
---|
| 1728 | + for (q = 0; q < port->nrxqs; q++) |
---|
| 1729 | + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) |
---|
| 1730 | + *pstats++ += mvpp2_read_index(port->priv, |
---|
| 1731 | + port->first_rxq + q, |
---|
| 1732 | + mvpp2_ethtool_rxq_regs[i].offset); |
---|
| 1733 | + |
---|
| 1734 | + /* Gather XDP Statistics */ |
---|
| 1735 | + mvpp2_get_xdp_stats(port, &xdp_stats); |
---|
| 1736 | + |
---|
| 1737 | + for (i = 0, s = mvpp2_ethtool_xdp; |
---|
| 1738 | + s < mvpp2_ethtool_xdp + ARRAY_SIZE(mvpp2_ethtool_xdp); |
---|
| 1739 | + s++, i++) { |
---|
| 1740 | + switch (s->offset) { |
---|
| 1741 | + case ETHTOOL_XDP_REDIRECT: |
---|
| 1742 | + *pstats++ = xdp_stats.xdp_redirect; |
---|
| 1743 | + break; |
---|
| 1744 | + case ETHTOOL_XDP_PASS: |
---|
| 1745 | + *pstats++ = xdp_stats.xdp_pass; |
---|
| 1746 | + break; |
---|
| 1747 | + case ETHTOOL_XDP_DROP: |
---|
| 1748 | + *pstats++ = xdp_stats.xdp_drop; |
---|
| 1749 | + break; |
---|
| 1750 | + case ETHTOOL_XDP_TX: |
---|
| 1751 | + *pstats++ = xdp_stats.xdp_tx; |
---|
| 1752 | + break; |
---|
| 1753 | + case ETHTOOL_XDP_TX_ERR: |
---|
| 1754 | + *pstats++ = xdp_stats.xdp_tx_err; |
---|
| 1755 | + break; |
---|
| 1756 | + case ETHTOOL_XDP_XMIT: |
---|
| 1757 | + *pstats++ = xdp_stats.xdp_xmit; |
---|
| 1758 | + break; |
---|
| 1759 | + case ETHTOOL_XDP_XMIT_ERR: |
---|
| 1760 | + *pstats++ = xdp_stats.xdp_xmit_err; |
---|
| 1761 | + break; |
---|
| 1762 | + } |
---|
1315 | 1763 | } |
---|
1316 | 1764 | } |
---|
1317 | 1765 | |
---|
.. | .. |
---|
1320 | 1768 | struct delayed_work *del_work = to_delayed_work(work); |
---|
1321 | 1769 | struct mvpp2_port *port = container_of(del_work, struct mvpp2_port, |
---|
1322 | 1770 | stats_work); |
---|
1323 | | - u64 *pstats; |
---|
1324 | | - int i; |
---|
1325 | 1771 | |
---|
1326 | 1772 | mutex_lock(&port->gather_stats_lock); |
---|
1327 | 1773 | |
---|
1328 | | - pstats = port->ethtool_stats; |
---|
1329 | | - for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) |
---|
1330 | | - *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_regs[i]); |
---|
| 1774 | + mvpp2_read_stats(port); |
---|
1331 | 1775 | |
---|
1332 | 1776 | /* No need to read again the counters right after this function if it |
---|
1333 | 1777 | * was called asynchronously by the user (ie. use of ethtool). |
---|
.. | .. |
---|
1351 | 1795 | |
---|
1352 | 1796 | mutex_lock(&port->gather_stats_lock); |
---|
1353 | 1797 | memcpy(data, port->ethtool_stats, |
---|
1354 | | - sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs)); |
---|
| 1798 | + sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs)); |
---|
1355 | 1799 | mutex_unlock(&port->gather_stats_lock); |
---|
1356 | 1800 | } |
---|
1357 | 1801 | |
---|
1358 | 1802 | static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset) |
---|
1359 | 1803 | { |
---|
| 1804 | + struct mvpp2_port *port = netdev_priv(dev); |
---|
| 1805 | + |
---|
1360 | 1806 | if (sset == ETH_SS_STATS) |
---|
1361 | | - return ARRAY_SIZE(mvpp2_ethtool_regs); |
---|
| 1807 | + return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs); |
---|
1362 | 1808 | |
---|
1363 | 1809 | return -EOPNOTSUPP; |
---|
1364 | 1810 | } |
---|
1365 | 1811 | |
---|
1366 | | -static void mvpp2_port_reset(struct mvpp2_port *port) |
---|
| 1812 | +static void mvpp2_mac_reset_assert(struct mvpp2_port *port) |
---|
1367 | 1813 | { |
---|
1368 | 1814 | u32 val; |
---|
1369 | | - unsigned int i; |
---|
1370 | | - |
---|
1371 | | - /* Read the GOP statistics to reset the hardware counters */ |
---|
1372 | | - for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) |
---|
1373 | | - mvpp2_read_count(port, &mvpp2_ethtool_regs[i]); |
---|
1374 | 1815 | |
---|
1375 | 1816 | val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) | |
---|
1376 | 1817 | MVPP2_GMAC_PORT_RESET_MASK; |
---|
1377 | 1818 | writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); |
---|
| 1819 | + |
---|
| 1820 | + if (port->priv->hw_version == MVPP22 && port->gop_id == 0) { |
---|
| 1821 | + val = readl(port->base + MVPP22_XLG_CTRL0_REG) & |
---|
| 1822 | + ~MVPP22_XLG_CTRL0_MAC_RESET_DIS; |
---|
| 1823 | + writel(val, port->base + MVPP22_XLG_CTRL0_REG); |
---|
| 1824 | + } |
---|
| 1825 | +} |
---|
| 1826 | + |
---|
| 1827 | +static void mvpp22_pcs_reset_assert(struct mvpp2_port *port) |
---|
| 1828 | +{ |
---|
| 1829 | + struct mvpp2 *priv = port->priv; |
---|
| 1830 | + void __iomem *mpcs, *xpcs; |
---|
| 1831 | + u32 val; |
---|
| 1832 | + |
---|
| 1833 | + if (port->priv->hw_version != MVPP22 || port->gop_id != 0) |
---|
| 1834 | + return; |
---|
| 1835 | + |
---|
| 1836 | + mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); |
---|
| 1837 | + xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); |
---|
| 1838 | + |
---|
| 1839 | + val = readl(mpcs + MVPP22_MPCS_CLK_RESET); |
---|
| 1840 | + val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX); |
---|
| 1841 | + val |= MVPP22_MPCS_CLK_RESET_DIV_SET; |
---|
| 1842 | + writel(val, mpcs + MVPP22_MPCS_CLK_RESET); |
---|
| 1843 | + |
---|
| 1844 | + val = readl(xpcs + MVPP22_XPCS_CFG0); |
---|
| 1845 | + writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0); |
---|
| 1846 | +} |
---|
| 1847 | + |
---|
| 1848 | +static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port) |
---|
| 1849 | +{ |
---|
| 1850 | + struct mvpp2 *priv = port->priv; |
---|
| 1851 | + void __iomem *mpcs, *xpcs; |
---|
| 1852 | + u32 val; |
---|
| 1853 | + |
---|
| 1854 | + if (port->priv->hw_version != MVPP22 || port->gop_id != 0) |
---|
| 1855 | + return; |
---|
| 1856 | + |
---|
| 1857 | + mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); |
---|
| 1858 | + xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); |
---|
| 1859 | + |
---|
| 1860 | + switch (port->phy_interface) { |
---|
| 1861 | + case PHY_INTERFACE_MODE_10GBASER: |
---|
| 1862 | + val = readl(mpcs + MVPP22_MPCS_CLK_RESET); |
---|
| 1863 | + val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | |
---|
| 1864 | + MAC_CLK_RESET_SD_TX; |
---|
| 1865 | + val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET; |
---|
| 1866 | + writel(val, mpcs + MVPP22_MPCS_CLK_RESET); |
---|
| 1867 | + break; |
---|
| 1868 | + case PHY_INTERFACE_MODE_XAUI: |
---|
| 1869 | + case PHY_INTERFACE_MODE_RXAUI: |
---|
| 1870 | + val = readl(xpcs + MVPP22_XPCS_CFG0); |
---|
| 1871 | + writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0); |
---|
| 1872 | + break; |
---|
| 1873 | + default: |
---|
| 1874 | + break; |
---|
| 1875 | + } |
---|
1378 | 1876 | } |
---|
1379 | 1877 | |
---|
1380 | 1878 | /* Change maximum receive size of the port */ |
---|
.. | .. |
---|
1420 | 1918 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, |
---|
1421 | 1919 | tx_port_num); |
---|
1422 | 1920 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); |
---|
| 1921 | + |
---|
| 1922 | + /* Set TXQ scheduling to Round-Robin */ |
---|
| 1923 | + mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0); |
---|
1423 | 1924 | |
---|
1424 | 1925 | /* Close bandwidth for all queues */ |
---|
1425 | 1926 | for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) |
---|
.. | .. |
---|
1618 | 2119 | static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) |
---|
1619 | 2120 | { |
---|
1620 | 2121 | /* aggregated access - relevant TXQ number is written in TX desc */ |
---|
1621 | | - mvpp2_percpu_write(port->priv, smp_processor_id(), |
---|
| 2122 | + mvpp2_thread_write(port->priv, |
---|
| 2123 | + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), |
---|
1622 | 2124 | MVPP2_AGGR_TXQ_UPDATE_REG, pending); |
---|
1623 | 2125 | } |
---|
1624 | 2126 | |
---|
.. | .. |
---|
1628 | 2130 | * Called only from mvpp2_tx(), so migration is disabled, using |
---|
1629 | 2131 | * smp_processor_id() is OK. |
---|
1630 | 2132 | */ |
---|
1631 | | -static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv, |
---|
| 2133 | +static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port, |
---|
1632 | 2134 | struct mvpp2_tx_queue *aggr_txq, int num) |
---|
1633 | 2135 | { |
---|
1634 | 2136 | if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) { |
---|
1635 | 2137 | /* Update number of occupied aggregated Tx descriptors */ |
---|
1636 | | - int cpu = smp_processor_id(); |
---|
1637 | | - u32 val = mvpp2_read_relaxed(priv, |
---|
1638 | | - MVPP2_AGGR_TXQ_STATUS_REG(cpu)); |
---|
| 2138 | + unsigned int thread = |
---|
| 2139 | + mvpp2_cpu_to_thread(port->priv, smp_processor_id()); |
---|
| 2140 | + u32 val = mvpp2_read_relaxed(port->priv, |
---|
| 2141 | + MVPP2_AGGR_TXQ_STATUS_REG(thread)); |
---|
1639 | 2142 | |
---|
1640 | 2143 | aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; |
---|
1641 | 2144 | |
---|
.. | .. |
---|
1651 | 2154 | * only by mvpp2_tx(), so migration is disabled, using |
---|
1652 | 2155 | * smp_processor_id() is OK. |
---|
1653 | 2156 | */ |
---|
1654 | | -static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv, |
---|
| 2157 | +static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port, |
---|
1655 | 2158 | struct mvpp2_tx_queue *txq, int num) |
---|
1656 | 2159 | { |
---|
| 2160 | + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); |
---|
| 2161 | + struct mvpp2 *priv = port->priv; |
---|
1657 | 2162 | u32 val; |
---|
1658 | | - int cpu = smp_processor_id(); |
---|
1659 | 2163 | |
---|
1660 | 2164 | val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; |
---|
1661 | | - mvpp2_percpu_write_relaxed(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val); |
---|
| 2165 | + mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val); |
---|
1662 | 2166 | |
---|
1663 | | - val = mvpp2_percpu_read_relaxed(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG); |
---|
| 2167 | + val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG); |
---|
1664 | 2168 | |
---|
1665 | 2169 | return val & MVPP2_TXQ_RSVD_RSLT_MASK; |
---|
1666 | 2170 | } |
---|
.. | .. |
---|
1668 | 2172 | /* Check if there are enough reserved descriptors for transmission. |
---|
1669 | 2173 | * If not, request chunk of reserved descriptors and check again. |
---|
1670 | 2174 | */ |
---|
1671 | | -static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv, |
---|
| 2175 | +static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port, |
---|
1672 | 2176 | struct mvpp2_tx_queue *txq, |
---|
1673 | 2177 | struct mvpp2_txq_pcpu *txq_pcpu, |
---|
1674 | 2178 | int num) |
---|
1675 | 2179 | { |
---|
1676 | | - int req, cpu, desc_count; |
---|
| 2180 | + int req, desc_count; |
---|
| 2181 | + unsigned int thread; |
---|
1677 | 2182 | |
---|
1678 | 2183 | if (txq_pcpu->reserved_num >= num) |
---|
1679 | 2184 | return 0; |
---|
.. | .. |
---|
1684 | 2189 | |
---|
1685 | 2190 | desc_count = 0; |
---|
1686 | 2191 | /* Compute total of used descriptors */ |
---|
1687 | | - for_each_present_cpu(cpu) { |
---|
| 2192 | + for (thread = 0; thread < port->priv->nthreads; thread++) { |
---|
1688 | 2193 | struct mvpp2_txq_pcpu *txq_pcpu_aux; |
---|
1689 | 2194 | |
---|
1690 | | - txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu); |
---|
| 2195 | + txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread); |
---|
1691 | 2196 | desc_count += txq_pcpu_aux->count; |
---|
1692 | 2197 | desc_count += txq_pcpu_aux->reserved_num; |
---|
1693 | 2198 | } |
---|
.. | .. |
---|
1696 | 2201 | desc_count += req; |
---|
1697 | 2202 | |
---|
1698 | 2203 | if (desc_count > |
---|
1699 | | - (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK))) |
---|
| 2204 | + (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK))) |
---|
1700 | 2205 | return -ENOMEM; |
---|
1701 | 2206 | |
---|
1702 | | - txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req); |
---|
| 2207 | + txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req); |
---|
1703 | 2208 | |
---|
1704 | 2209 | /* OK, the descriptor could have been updated: check again. */ |
---|
1705 | 2210 | if (txq_pcpu->reserved_num < num) |
---|
.. | .. |
---|
1753 | 2258 | |
---|
1754 | 2259 | /* Get number of sent descriptors and decrement counter. |
---|
1755 | 2260 | * The number of sent descriptors is returned. |
---|
1756 | | - * Per-CPU access |
---|
| 2261 | + * Per-thread access |
---|
1757 | 2262 | * |
---|
1758 | 2263 | * Called only from mvpp2_txq_done(), called from mvpp2_tx() |
---|
1759 | 2264 | * (migration disabled) and from the TX completion tasklet (migration |
---|
.. | .. |
---|
1765 | 2270 | u32 val; |
---|
1766 | 2271 | |
---|
1767 | 2272 | /* Reading status reg resets transmitted descriptor counter */ |
---|
1768 | | - val = mvpp2_percpu_read_relaxed(port->priv, smp_processor_id(), |
---|
| 2273 | + val = mvpp2_thread_read_relaxed(port->priv, |
---|
| 2274 | + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), |
---|
1769 | 2275 | MVPP2_TXQ_SENT_REG(txq->id)); |
---|
1770 | 2276 | |
---|
1771 | 2277 | return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> |
---|
.. | .. |
---|
1780 | 2286 | struct mvpp2_port *port = arg; |
---|
1781 | 2287 | int queue; |
---|
1782 | 2288 | |
---|
| 2289 | + /* If the thread isn't used, don't do anything */ |
---|
| 2290 | + if (smp_processor_id() >= port->priv->nthreads) |
---|
| 2291 | + return; |
---|
| 2292 | + |
---|
1783 | 2293 | for (queue = 0; queue < port->ntxqs; queue++) { |
---|
1784 | 2294 | int id = port->txqs[queue]->id; |
---|
1785 | 2295 | |
---|
1786 | | - mvpp2_percpu_read(port->priv, smp_processor_id(), |
---|
| 2296 | + mvpp2_thread_read(port->priv, |
---|
| 2297 | + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), |
---|
1787 | 2298 | MVPP2_TXQ_SENT_REG(id)); |
---|
1788 | 2299 | } |
---|
1789 | 2300 | } |
---|
.. | .. |
---|
1843 | 2354 | static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, |
---|
1844 | 2355 | struct mvpp2_rx_queue *rxq) |
---|
1845 | 2356 | { |
---|
1846 | | - int cpu = get_cpu(); |
---|
| 2357 | + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); |
---|
1847 | 2358 | |
---|
1848 | 2359 | if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) |
---|
1849 | 2360 | rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; |
---|
1850 | 2361 | |
---|
1851 | | - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); |
---|
1852 | | - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG, |
---|
| 2362 | + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); |
---|
| 2363 | + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG, |
---|
1853 | 2364 | rxq->pkts_coal); |
---|
1854 | 2365 | |
---|
1855 | 2366 | put_cpu(); |
---|
.. | .. |
---|
1859 | 2370 | static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, |
---|
1860 | 2371 | struct mvpp2_tx_queue *txq) |
---|
1861 | 2372 | { |
---|
1862 | | - int cpu = get_cpu(); |
---|
| 2373 | + unsigned int thread; |
---|
1863 | 2374 | u32 val; |
---|
1864 | 2375 | |
---|
1865 | 2376 | if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK) |
---|
1866 | 2377 | txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK; |
---|
1867 | 2378 | |
---|
1868 | 2379 | val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET); |
---|
1869 | | - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); |
---|
1870 | | - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val); |
---|
1871 | | - |
---|
1872 | | - put_cpu(); |
---|
| 2380 | + /* PKT-coalescing registers are per-queue + per-thread */ |
---|
| 2381 | + for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) { |
---|
| 2382 | + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); |
---|
| 2383 | + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val); |
---|
| 2384 | + } |
---|
1873 | 2385 | } |
---|
1874 | 2386 | |
---|
1875 | 2387 | static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) |
---|
.. | .. |
---|
1935 | 2447 | struct mvpp2_txq_pcpu_buf *tx_buf = |
---|
1936 | 2448 | txq_pcpu->buffs + txq_pcpu->txq_get_index; |
---|
1937 | 2449 | |
---|
1938 | | - if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma)) |
---|
| 2450 | + if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma) && |
---|
| 2451 | + tx_buf->type != MVPP2_TYPE_XDP_TX) |
---|
1939 | 2452 | dma_unmap_single(port->dev->dev.parent, tx_buf->dma, |
---|
1940 | 2453 | tx_buf->size, DMA_TO_DEVICE); |
---|
1941 | | - if (tx_buf->skb) |
---|
| 2454 | + if (tx_buf->type == MVPP2_TYPE_SKB && tx_buf->skb) |
---|
1942 | 2455 | dev_kfree_skb_any(tx_buf->skb); |
---|
| 2456 | + else if (tx_buf->type == MVPP2_TYPE_XDP_TX || |
---|
| 2457 | + tx_buf->type == MVPP2_TYPE_XDP_NDO) |
---|
| 2458 | + xdp_return_frame(tx_buf->xdpf); |
---|
1943 | 2459 | |
---|
1944 | 2460 | mvpp2_txq_inc_get(txq_pcpu); |
---|
1945 | 2461 | } |
---|
.. | .. |
---|
1968 | 2484 | struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); |
---|
1969 | 2485 | int tx_done; |
---|
1970 | 2486 | |
---|
1971 | | - if (txq_pcpu->cpu != smp_processor_id()) |
---|
| 2487 | + if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id())) |
---|
1972 | 2488 | netdev_err(port->dev, "wrong cpu on the end of Tx processing\n"); |
---|
1973 | 2489 | |
---|
1974 | 2490 | tx_done = mvpp2_txq_sent_desc_proc(port, txq); |
---|
.. | .. |
---|
1984 | 2500 | } |
---|
1985 | 2501 | |
---|
1986 | 2502 | static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, |
---|
1987 | | - int cpu) |
---|
| 2503 | + unsigned int thread) |
---|
1988 | 2504 | { |
---|
1989 | 2505 | struct mvpp2_tx_queue *txq; |
---|
1990 | 2506 | struct mvpp2_txq_pcpu *txq_pcpu; |
---|
.. | .. |
---|
1995 | 2511 | if (!txq) |
---|
1996 | 2512 | break; |
---|
1997 | 2513 | |
---|
1998 | | - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); |
---|
| 2514 | + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
---|
1999 | 2515 | |
---|
2000 | 2516 | if (txq_pcpu->count) { |
---|
2001 | 2517 | mvpp2_txq_done(port, txq, txq_pcpu); |
---|
.. | .. |
---|
2011 | 2527 | |
---|
2012 | 2528 | /* Allocate and initialize descriptors for aggr TXQ */ |
---|
2013 | 2529 | static int mvpp2_aggr_txq_init(struct platform_device *pdev, |
---|
2014 | | - struct mvpp2_tx_queue *aggr_txq, int cpu, |
---|
2015 | | - struct mvpp2 *priv) |
---|
| 2530 | + struct mvpp2_tx_queue *aggr_txq, |
---|
| 2531 | + unsigned int thread, struct mvpp2 *priv) |
---|
2016 | 2532 | { |
---|
2017 | 2533 | u32 txq_dma; |
---|
2018 | 2534 | |
---|
2019 | 2535 | /* Allocate memory for TX descriptors */ |
---|
2020 | | - aggr_txq->descs = dma_zalloc_coherent(&pdev->dev, |
---|
2021 | | - MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, |
---|
2022 | | - &aggr_txq->descs_dma, GFP_KERNEL); |
---|
| 2536 | + aggr_txq->descs = dma_alloc_coherent(&pdev->dev, |
---|
| 2537 | + MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, |
---|
| 2538 | + &aggr_txq->descs_dma, GFP_KERNEL); |
---|
2023 | 2539 | if (!aggr_txq->descs) |
---|
2024 | 2540 | return -ENOMEM; |
---|
2025 | 2541 | |
---|
.. | .. |
---|
2027 | 2543 | |
---|
2028 | 2544 | /* Aggr TXQ no reset WA */ |
---|
2029 | 2545 | aggr_txq->next_desc_to_proc = mvpp2_read(priv, |
---|
2030 | | - MVPP2_AGGR_TXQ_INDEX_REG(cpu)); |
---|
| 2546 | + MVPP2_AGGR_TXQ_INDEX_REG(thread)); |
---|
2031 | 2547 | |
---|
2032 | 2548 | /* Set Tx descriptors queue starting address indirect |
---|
2033 | 2549 | * access |
---|
.. | .. |
---|
2038 | 2554 | txq_dma = aggr_txq->descs_dma >> |
---|
2039 | 2555 | MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; |
---|
2040 | 2556 | |
---|
2041 | | - mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma); |
---|
2042 | | - mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), |
---|
| 2557 | + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma); |
---|
| 2558 | + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread), |
---|
2043 | 2559 | MVPP2_AGGR_TXQ_SIZE); |
---|
2044 | 2560 | |
---|
2045 | 2561 | return 0; |
---|
.. | .. |
---|
2048 | 2564 | /* Create a specified Rx queue */ |
---|
2049 | 2565 | static int mvpp2_rxq_init(struct mvpp2_port *port, |
---|
2050 | 2566 | struct mvpp2_rx_queue *rxq) |
---|
2051 | | - |
---|
2052 | 2567 | { |
---|
| 2568 | + struct mvpp2 *priv = port->priv; |
---|
| 2569 | + unsigned int thread; |
---|
2053 | 2570 | u32 rxq_dma; |
---|
2054 | | - int cpu; |
---|
| 2571 | + int err; |
---|
2055 | 2572 | |
---|
2056 | 2573 | rxq->size = port->rx_ring_size; |
---|
2057 | 2574 | |
---|
.. | .. |
---|
2068 | 2585 | mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); |
---|
2069 | 2586 | |
---|
2070 | 2587 | /* Set Rx descriptors queue starting address - indirect access */ |
---|
2071 | | - cpu = get_cpu(); |
---|
2072 | | - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); |
---|
| 2588 | + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); |
---|
| 2589 | + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); |
---|
2073 | 2590 | if (port->priv->hw_version == MVPP21) |
---|
2074 | 2591 | rxq_dma = rxq->descs_dma; |
---|
2075 | 2592 | else |
---|
2076 | 2593 | rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; |
---|
2077 | | - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); |
---|
2078 | | - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); |
---|
2079 | | - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0); |
---|
| 2594 | + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); |
---|
| 2595 | + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); |
---|
| 2596 | + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0); |
---|
2080 | 2597 | put_cpu(); |
---|
2081 | 2598 | |
---|
2082 | 2599 | /* Set Offset */ |
---|
2083 | | - mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); |
---|
| 2600 | + mvpp2_rxq_offset_set(port, rxq->id, MVPP2_SKB_HEADROOM); |
---|
2084 | 2601 | |
---|
2085 | 2602 | /* Set coalescing pkts and time */ |
---|
2086 | 2603 | mvpp2_rx_pkts_coal_set(port, rxq); |
---|
.. | .. |
---|
2089 | 2606 | /* Add number of descriptors ready for receiving packets */ |
---|
2090 | 2607 | mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); |
---|
2091 | 2608 | |
---|
| 2609 | + if (priv->percpu_pools) { |
---|
| 2610 | + err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq); |
---|
| 2611 | + if (err < 0) |
---|
| 2612 | + goto err_free_dma; |
---|
| 2613 | + |
---|
| 2614 | + err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq); |
---|
| 2615 | + if (err < 0) |
---|
| 2616 | + goto err_unregister_rxq_short; |
---|
| 2617 | + |
---|
| 2618 | + /* Every RXQ has a pool for short and another for long packets */ |
---|
| 2619 | + err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_short, |
---|
| 2620 | + MEM_TYPE_PAGE_POOL, |
---|
| 2621 | + priv->page_pool[rxq->logic_rxq]); |
---|
| 2622 | + if (err < 0) |
---|
| 2623 | + goto err_unregister_rxq_long; |
---|
| 2624 | + |
---|
| 2625 | + err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_long, |
---|
| 2626 | + MEM_TYPE_PAGE_POOL, |
---|
| 2627 | + priv->page_pool[rxq->logic_rxq + |
---|
| 2628 | + port->nrxqs]); |
---|
| 2629 | + if (err < 0) |
---|
| 2630 | + goto err_unregister_mem_rxq_short; |
---|
| 2631 | + } |
---|
| 2632 | + |
---|
2092 | 2633 | return 0; |
---|
| 2634 | + |
---|
| 2635 | +err_unregister_mem_rxq_short: |
---|
| 2636 | + xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq_short); |
---|
| 2637 | +err_unregister_rxq_long: |
---|
| 2638 | + xdp_rxq_info_unreg(&rxq->xdp_rxq_long); |
---|
| 2639 | +err_unregister_rxq_short: |
---|
| 2640 | + xdp_rxq_info_unreg(&rxq->xdp_rxq_short); |
---|
| 2641 | +err_free_dma: |
---|
| 2642 | + dma_free_coherent(port->dev->dev.parent, |
---|
| 2643 | + rxq->size * MVPP2_DESC_ALIGNED_SIZE, |
---|
| 2644 | + rxq->descs, rxq->descs_dma); |
---|
| 2645 | + return err; |
---|
2093 | 2646 | } |
---|
2094 | 2647 | |
---|
2095 | 2648 | /* Push packets received by the RXQ to BM pool */ |
---|
.. | .. |
---|
2121 | 2674 | static void mvpp2_rxq_deinit(struct mvpp2_port *port, |
---|
2122 | 2675 | struct mvpp2_rx_queue *rxq) |
---|
2123 | 2676 | { |
---|
2124 | | - int cpu; |
---|
| 2677 | + unsigned int thread; |
---|
| 2678 | + |
---|
| 2679 | + if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_short)) |
---|
| 2680 | + xdp_rxq_info_unreg(&rxq->xdp_rxq_short); |
---|
| 2681 | + |
---|
| 2682 | + if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_long)) |
---|
| 2683 | + xdp_rxq_info_unreg(&rxq->xdp_rxq_long); |
---|
2125 | 2684 | |
---|
2126 | 2685 | mvpp2_rxq_drop_pkts(port, rxq); |
---|
2127 | 2686 | |
---|
.. | .. |
---|
2140 | 2699 | * free descriptor number |
---|
2141 | 2700 | */ |
---|
2142 | 2701 | mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); |
---|
2143 | | - cpu = get_cpu(); |
---|
2144 | | - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); |
---|
2145 | | - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0); |
---|
2146 | | - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0); |
---|
| 2702 | + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); |
---|
| 2703 | + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); |
---|
| 2704 | + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0); |
---|
| 2705 | + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0); |
---|
2147 | 2706 | put_cpu(); |
---|
2148 | 2707 | } |
---|
2149 | 2708 | |
---|
.. | .. |
---|
2152 | 2711 | struct mvpp2_tx_queue *txq) |
---|
2153 | 2712 | { |
---|
2154 | 2713 | u32 val; |
---|
2155 | | - int cpu, desc, desc_per_txq, tx_port_num; |
---|
| 2714 | + unsigned int thread; |
---|
| 2715 | + int desc, desc_per_txq, tx_port_num; |
---|
2156 | 2716 | struct mvpp2_txq_pcpu *txq_pcpu; |
---|
2157 | 2717 | |
---|
2158 | 2718 | txq->size = port->tx_ring_size; |
---|
.. | .. |
---|
2167 | 2727 | txq->last_desc = txq->size - 1; |
---|
2168 | 2728 | |
---|
2169 | 2729 | /* Set Tx descriptors queue starting address - indirect access */ |
---|
2170 | | - cpu = get_cpu(); |
---|
2171 | | - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); |
---|
2172 | | - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, |
---|
| 2730 | + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); |
---|
| 2731 | + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); |
---|
| 2732 | + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, |
---|
2173 | 2733 | txq->descs_dma); |
---|
2174 | | - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, |
---|
| 2734 | + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, |
---|
2175 | 2735 | txq->size & MVPP2_TXQ_DESC_SIZE_MASK); |
---|
2176 | | - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0); |
---|
2177 | | - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG, |
---|
| 2736 | + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0); |
---|
| 2737 | + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG, |
---|
2178 | 2738 | txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); |
---|
2179 | | - val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG); |
---|
| 2739 | + val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG); |
---|
2180 | 2740 | val &= ~MVPP2_TXQ_PENDING_MASK; |
---|
2181 | | - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val); |
---|
| 2741 | + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val); |
---|
2182 | 2742 | |
---|
2183 | 2743 | /* Calculate base address in prefetch buffer. We reserve 16 descriptors |
---|
2184 | 2744 | * for each existing TXQ. |
---|
.. | .. |
---|
2189 | 2749 | desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + |
---|
2190 | 2750 | (txq->log_id * desc_per_txq); |
---|
2191 | 2751 | |
---|
2192 | | - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, |
---|
| 2752 | + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, |
---|
2193 | 2753 | MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | |
---|
2194 | 2754 | MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); |
---|
2195 | 2755 | put_cpu(); |
---|
.. | .. |
---|
2208 | 2768 | mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), |
---|
2209 | 2769 | val); |
---|
2210 | 2770 | |
---|
2211 | | - for_each_present_cpu(cpu) { |
---|
2212 | | - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); |
---|
| 2771 | + for (thread = 0; thread < port->priv->nthreads; thread++) { |
---|
| 2772 | + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
---|
2213 | 2773 | txq_pcpu->size = txq->size; |
---|
2214 | 2774 | txq_pcpu->buffs = kmalloc_array(txq_pcpu->size, |
---|
2215 | 2775 | sizeof(*txq_pcpu->buffs), |
---|
.. | .. |
---|
2243 | 2803 | struct mvpp2_tx_queue *txq) |
---|
2244 | 2804 | { |
---|
2245 | 2805 | struct mvpp2_txq_pcpu *txq_pcpu; |
---|
2246 | | - int cpu; |
---|
| 2806 | + unsigned int thread; |
---|
2247 | 2807 | |
---|
2248 | | - for_each_present_cpu(cpu) { |
---|
2249 | | - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); |
---|
| 2808 | + for (thread = 0; thread < port->priv->nthreads; thread++) { |
---|
| 2809 | + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
---|
2250 | 2810 | kfree(txq_pcpu->buffs); |
---|
2251 | 2811 | |
---|
2252 | 2812 | if (txq_pcpu->tso_headers) |
---|
.. | .. |
---|
2272 | 2832 | mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0); |
---|
2273 | 2833 | |
---|
2274 | 2834 | /* Set Tx descriptors queue starting address and size */ |
---|
2275 | | - cpu = get_cpu(); |
---|
2276 | | - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); |
---|
2277 | | - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0); |
---|
2278 | | - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0); |
---|
| 2835 | + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); |
---|
| 2836 | + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); |
---|
| 2837 | + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0); |
---|
| 2838 | + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0); |
---|
2279 | 2839 | put_cpu(); |
---|
2280 | 2840 | } |
---|
2281 | 2841 | |
---|
.. | .. |
---|
2283 | 2843 | static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) |
---|
2284 | 2844 | { |
---|
2285 | 2845 | struct mvpp2_txq_pcpu *txq_pcpu; |
---|
2286 | | - int delay, pending, cpu; |
---|
| 2846 | + int delay, pending; |
---|
| 2847 | + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); |
---|
2287 | 2848 | u32 val; |
---|
2288 | 2849 | |
---|
2289 | | - cpu = get_cpu(); |
---|
2290 | | - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); |
---|
2291 | | - val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG); |
---|
| 2850 | + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); |
---|
| 2851 | + val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG); |
---|
2292 | 2852 | val |= MVPP2_TXQ_DRAIN_EN_MASK; |
---|
2293 | | - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); |
---|
| 2853 | + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); |
---|
2294 | 2854 | |
---|
2295 | 2855 | /* The napi queue has been stopped so wait for all packets |
---|
2296 | 2856 | * to be transmitted. |
---|
.. | .. |
---|
2306 | 2866 | mdelay(1); |
---|
2307 | 2867 | delay++; |
---|
2308 | 2868 | |
---|
2309 | | - pending = mvpp2_percpu_read(port->priv, cpu, |
---|
| 2869 | + pending = mvpp2_thread_read(port->priv, thread, |
---|
2310 | 2870 | MVPP2_TXQ_PENDING_REG); |
---|
2311 | 2871 | pending &= MVPP2_TXQ_PENDING_MASK; |
---|
2312 | 2872 | } while (pending); |
---|
2313 | 2873 | |
---|
2314 | 2874 | val &= ~MVPP2_TXQ_DRAIN_EN_MASK; |
---|
2315 | | - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); |
---|
| 2875 | + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); |
---|
2316 | 2876 | put_cpu(); |
---|
2317 | 2877 | |
---|
2318 | | - for_each_present_cpu(cpu) { |
---|
2319 | | - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); |
---|
| 2878 | + for (thread = 0; thread < port->priv->nthreads; thread++) { |
---|
| 2879 | + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
---|
2320 | 2880 | |
---|
2321 | 2881 | /* Release all packets */ |
---|
2322 | 2882 | mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); |
---|
.. | .. |
---|
2390 | 2950 | err = mvpp2_txq_init(port, txq); |
---|
2391 | 2951 | if (err) |
---|
2392 | 2952 | goto err_cleanup; |
---|
| 2953 | + |
---|
| 2954 | + /* Assign this queue to a CPU */ |
---|
| 2955 | + if (queue < num_possible_cpus()) |
---|
| 2956 | + netif_set_xps_queue(port->dev, cpumask_of(queue), queue); |
---|
2393 | 2957 | } |
---|
2394 | 2958 | |
---|
2395 | 2959 | if (port->has_tx_irqs) { |
---|
.. | .. |
---|
2420 | 2984 | return IRQ_HANDLED; |
---|
2421 | 2985 | } |
---|
2422 | 2986 | |
---|
2423 | | -/* Per-port interrupt for link status changes */ |
---|
2424 | | -static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id) |
---|
| 2987 | +static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq) |
---|
2425 | 2988 | { |
---|
2426 | | - struct mvpp2_port *port = (struct mvpp2_port *)dev_id; |
---|
2427 | | - struct net_device *dev = port->dev; |
---|
2428 | | - bool event = false, link = false; |
---|
2429 | | - u32 val; |
---|
| 2989 | + struct skb_shared_hwtstamps shhwtstamps; |
---|
| 2990 | + struct mvpp2_hwtstamp_queue *queue; |
---|
| 2991 | + struct sk_buff *skb; |
---|
| 2992 | + void __iomem *ptp_q; |
---|
| 2993 | + unsigned int id; |
---|
| 2994 | + u32 r0, r1, r2; |
---|
2430 | 2995 | |
---|
2431 | | - mvpp22_gop_mask_irq(port); |
---|
| 2996 | + ptp_q = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); |
---|
| 2997 | + if (nq) |
---|
| 2998 | + ptp_q += MVPP22_PTP_TX_Q1_R0 - MVPP22_PTP_TX_Q0_R0; |
---|
2432 | 2999 | |
---|
2433 | | - if (port->gop_id == 0 && |
---|
2434 | | - port->phy_interface == PHY_INTERFACE_MODE_10GKR) { |
---|
2435 | | - val = readl(port->base + MVPP22_XLG_INT_STAT); |
---|
2436 | | - if (val & MVPP22_XLG_INT_STAT_LINK) { |
---|
2437 | | - event = true; |
---|
2438 | | - val = readl(port->base + MVPP22_XLG_STATUS); |
---|
2439 | | - if (val & MVPP22_XLG_STATUS_LINK_UP) |
---|
2440 | | - link = true; |
---|
2441 | | - } |
---|
2442 | | - } else if (phy_interface_mode_is_rgmii(port->phy_interface) || |
---|
2443 | | - port->phy_interface == PHY_INTERFACE_MODE_SGMII || |
---|
2444 | | - port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || |
---|
2445 | | - port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { |
---|
2446 | | - val = readl(port->base + MVPP22_GMAC_INT_STAT); |
---|
2447 | | - if (val & MVPP22_GMAC_INT_STAT_LINK) { |
---|
2448 | | - event = true; |
---|
2449 | | - val = readl(port->base + MVPP2_GMAC_STATUS0); |
---|
2450 | | - if (val & MVPP2_GMAC_STATUS0_LINK_UP) |
---|
2451 | | - link = true; |
---|
| 3000 | + queue = &port->tx_hwtstamp_queue[nq]; |
---|
| 3001 | + |
---|
| 3002 | + while (1) { |
---|
| 3003 | + r0 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R0) & 0xffff; |
---|
| 3004 | + if (!r0) |
---|
| 3005 | + break; |
---|
| 3006 | + |
---|
| 3007 | + r1 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R1) & 0xffff; |
---|
| 3008 | + r2 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R2) & 0xffff; |
---|
| 3009 | + |
---|
| 3010 | + id = (r0 >> 1) & 31; |
---|
| 3011 | + |
---|
| 3012 | + skb = queue->skb[id]; |
---|
| 3013 | + queue->skb[id] = NULL; |
---|
| 3014 | + if (skb) { |
---|
| 3015 | + u32 ts = r2 << 19 | r1 << 3 | r0 >> 13; |
---|
| 3016 | + |
---|
| 3017 | + mvpp22_tai_tstamp(port->priv->tai, ts, &shhwtstamps); |
---|
| 3018 | + skb_tstamp_tx(skb, &shhwtstamps); |
---|
| 3019 | + dev_kfree_skb_any(skb); |
---|
2452 | 3020 | } |
---|
2453 | 3021 | } |
---|
| 3022 | +} |
---|
| 3023 | + |
---|
| 3024 | +static void mvpp2_isr_handle_ptp(struct mvpp2_port *port) |
---|
| 3025 | +{ |
---|
| 3026 | + void __iomem *ptp; |
---|
| 3027 | + u32 val; |
---|
| 3028 | + |
---|
| 3029 | + ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); |
---|
| 3030 | + val = readl(ptp + MVPP22_PTP_INT_CAUSE); |
---|
| 3031 | + if (val & MVPP22_PTP_INT_CAUSE_QUEUE0) |
---|
| 3032 | + mvpp2_isr_handle_ptp_queue(port, 0); |
---|
| 3033 | + if (val & MVPP22_PTP_INT_CAUSE_QUEUE1) |
---|
| 3034 | + mvpp2_isr_handle_ptp_queue(port, 1); |
---|
| 3035 | +} |
---|
| 3036 | + |
---|
| 3037 | +static void mvpp2_isr_handle_link(struct mvpp2_port *port, bool link) |
---|
| 3038 | +{ |
---|
| 3039 | + struct net_device *dev = port->dev; |
---|
2454 | 3040 | |
---|
2455 | 3041 | if (port->phylink) { |
---|
2456 | 3042 | phylink_mac_change(port->phylink, link); |
---|
2457 | | - goto handled; |
---|
| 3043 | + return; |
---|
2458 | 3044 | } |
---|
2459 | 3045 | |
---|
2460 | | - if (!netif_running(dev) || !event) |
---|
2461 | | - goto handled; |
---|
| 3046 | + if (!netif_running(dev)) |
---|
| 3047 | + return; |
---|
2462 | 3048 | |
---|
2463 | 3049 | if (link) { |
---|
2464 | 3050 | mvpp2_interrupts_enable(port); |
---|
.. | .. |
---|
2475 | 3061 | |
---|
2476 | 3062 | mvpp2_interrupts_disable(port); |
---|
2477 | 3063 | } |
---|
| 3064 | +} |
---|
2478 | 3065 | |
---|
2479 | | -handled: |
---|
| 3066 | +static void mvpp2_isr_handle_xlg(struct mvpp2_port *port) |
---|
| 3067 | +{ |
---|
| 3068 | + bool link; |
---|
| 3069 | + u32 val; |
---|
| 3070 | + |
---|
| 3071 | + val = readl(port->base + MVPP22_XLG_INT_STAT); |
---|
| 3072 | + if (val & MVPP22_XLG_INT_STAT_LINK) { |
---|
| 3073 | + val = readl(port->base + MVPP22_XLG_STATUS); |
---|
| 3074 | + link = (val & MVPP22_XLG_STATUS_LINK_UP); |
---|
| 3075 | + mvpp2_isr_handle_link(port, link); |
---|
| 3076 | + } |
---|
| 3077 | +} |
---|
| 3078 | + |
---|
| 3079 | +static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port) |
---|
| 3080 | +{ |
---|
| 3081 | + bool link; |
---|
| 3082 | + u32 val; |
---|
| 3083 | + |
---|
| 3084 | + if (phy_interface_mode_is_rgmii(port->phy_interface) || |
---|
| 3085 | + phy_interface_mode_is_8023z(port->phy_interface) || |
---|
| 3086 | + port->phy_interface == PHY_INTERFACE_MODE_SGMII) { |
---|
| 3087 | + val = readl(port->base + MVPP22_GMAC_INT_STAT); |
---|
| 3088 | + if (val & MVPP22_GMAC_INT_STAT_LINK) { |
---|
| 3089 | + val = readl(port->base + MVPP2_GMAC_STATUS0); |
---|
| 3090 | + link = (val & MVPP2_GMAC_STATUS0_LINK_UP); |
---|
| 3091 | + mvpp2_isr_handle_link(port, link); |
---|
| 3092 | + } |
---|
| 3093 | + } |
---|
| 3094 | +} |
---|
| 3095 | + |
---|
| 3096 | +/* Per-port interrupt for link status changes */ |
---|
| 3097 | +static irqreturn_t mvpp2_port_isr(int irq, void *dev_id) |
---|
| 3098 | +{ |
---|
| 3099 | + struct mvpp2_port *port = (struct mvpp2_port *)dev_id; |
---|
| 3100 | + u32 val; |
---|
| 3101 | + |
---|
| 3102 | + mvpp22_gop_mask_irq(port); |
---|
| 3103 | + |
---|
| 3104 | + if (mvpp2_port_supports_xlg(port) && |
---|
| 3105 | + mvpp2_is_xlg(port->phy_interface)) { |
---|
| 3106 | + /* Check the external status register */ |
---|
| 3107 | + val = readl(port->base + MVPP22_XLG_EXT_INT_STAT); |
---|
| 3108 | + if (val & MVPP22_XLG_EXT_INT_STAT_XLG) |
---|
| 3109 | + mvpp2_isr_handle_xlg(port); |
---|
| 3110 | + if (val & MVPP22_XLG_EXT_INT_STAT_PTP) |
---|
| 3111 | + mvpp2_isr_handle_ptp(port); |
---|
| 3112 | + } else { |
---|
| 3113 | + /* If it's not the XLG, we must be using the GMAC. |
---|
| 3114 | + * Check the summary status. |
---|
| 3115 | + */ |
---|
| 3116 | + val = readl(port->base + MVPP22_GMAC_INT_SUM_STAT); |
---|
| 3117 | + if (val & MVPP22_GMAC_INT_SUM_STAT_INTERNAL) |
---|
| 3118 | + mvpp2_isr_handle_gmac_internal(port); |
---|
| 3119 | + if (val & MVPP22_GMAC_INT_SUM_STAT_PTP) |
---|
| 3120 | + mvpp2_isr_handle_ptp(port); |
---|
| 3121 | + } |
---|
| 3122 | + |
---|
2480 | 3123 | mvpp22_gop_unmask_irq(port); |
---|
2481 | 3124 | return IRQ_HANDLED; |
---|
2482 | 3125 | } |
---|
2483 | 3126 | |
---|
2484 | | -static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu) |
---|
| 3127 | +static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) |
---|
2485 | 3128 | { |
---|
2486 | | - ktime_t interval; |
---|
2487 | | - |
---|
2488 | | - if (!port_pcpu->timer_scheduled) { |
---|
2489 | | - port_pcpu->timer_scheduled = true; |
---|
2490 | | - interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS; |
---|
2491 | | - hrtimer_start(&port_pcpu->tx_done_timer, interval, |
---|
2492 | | - HRTIMER_MODE_REL_PINNED); |
---|
2493 | | - } |
---|
2494 | | -} |
---|
2495 | | - |
---|
2496 | | -static void mvpp2_tx_proc_cb(unsigned long data) |
---|
2497 | | -{ |
---|
2498 | | - struct net_device *dev = (struct net_device *)data; |
---|
2499 | | - struct mvpp2_port *port = netdev_priv(dev); |
---|
2500 | | - struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); |
---|
| 3129 | + struct net_device *dev; |
---|
| 3130 | + struct mvpp2_port *port; |
---|
| 3131 | + struct mvpp2_port_pcpu *port_pcpu; |
---|
2501 | 3132 | unsigned int tx_todo, cause; |
---|
2502 | 3133 | |
---|
| 3134 | + port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer); |
---|
| 3135 | + dev = port_pcpu->dev; |
---|
| 3136 | + |
---|
2503 | 3137 | if (!netif_running(dev)) |
---|
2504 | | - return; |
---|
| 3138 | + return HRTIMER_NORESTART; |
---|
| 3139 | + |
---|
2505 | 3140 | port_pcpu->timer_scheduled = false; |
---|
| 3141 | + port = netdev_priv(dev); |
---|
2506 | 3142 | |
---|
2507 | 3143 | /* Process all the Tx queues */ |
---|
2508 | 3144 | cause = (1 << port->ntxqs) - 1; |
---|
2509 | | - tx_todo = mvpp2_tx_done(port, cause, smp_processor_id()); |
---|
| 3145 | + tx_todo = mvpp2_tx_done(port, cause, |
---|
| 3146 | + mvpp2_cpu_to_thread(port->priv, smp_processor_id())); |
---|
2510 | 3147 | |
---|
2511 | 3148 | /* Set the timer in case not all the packets were processed */ |
---|
2512 | | - if (tx_todo) |
---|
2513 | | - mvpp2_timer_set(port_pcpu); |
---|
2514 | | -} |
---|
| 3149 | + if (tx_todo && !port_pcpu->timer_scheduled) { |
---|
| 3150 | + port_pcpu->timer_scheduled = true; |
---|
| 3151 | + hrtimer_forward_now(&port_pcpu->tx_done_timer, |
---|
| 3152 | + MVPP2_TXDONE_HRTIMER_PERIOD_NS); |
---|
2515 | 3153 | |
---|
2516 | | -static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) |
---|
2517 | | -{ |
---|
2518 | | - struct mvpp2_port_pcpu *port_pcpu = container_of(timer, |
---|
2519 | | - struct mvpp2_port_pcpu, |
---|
2520 | | - tx_done_timer); |
---|
2521 | | - |
---|
2522 | | - tasklet_schedule(&port_pcpu->tx_done_tasklet); |
---|
2523 | | - |
---|
| 3154 | + return HRTIMER_RESTART; |
---|
| 3155 | + } |
---|
2524 | 3156 | return HRTIMER_NORESTART; |
---|
2525 | 3157 | } |
---|
2526 | 3158 | |
---|
.. | .. |
---|
2569 | 3201 | skb->ip_summed = CHECKSUM_NONE; |
---|
2570 | 3202 | } |
---|
2571 | 3203 | |
---|
2572 | | -/* Reuse skb if possible, or allocate a new skb and add it to BM pool */ |
---|
| 3204 | +/* Allocate a new skb and add it to BM pool */ |
---|
2573 | 3205 | static int mvpp2_rx_refill(struct mvpp2_port *port, |
---|
2574 | | - struct mvpp2_bm_pool *bm_pool, int pool) |
---|
| 3206 | + struct mvpp2_bm_pool *bm_pool, |
---|
| 3207 | + struct page_pool *page_pool, int pool) |
---|
2575 | 3208 | { |
---|
2576 | 3209 | dma_addr_t dma_addr; |
---|
2577 | 3210 | phys_addr_t phys_addr; |
---|
2578 | 3211 | void *buf; |
---|
2579 | 3212 | |
---|
2580 | | - /* No recycle or too many buffers are in use, so allocate a new skb */ |
---|
2581 | | - buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr, |
---|
2582 | | - GFP_ATOMIC); |
---|
| 3213 | + buf = mvpp2_buf_alloc(port, bm_pool, page_pool, |
---|
| 3214 | + &dma_addr, &phys_addr, GFP_ATOMIC); |
---|
2583 | 3215 | if (!buf) |
---|
2584 | 3216 | return -ENOMEM; |
---|
2585 | 3217 | |
---|
.. | .. |
---|
2620 | 3252 | return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; |
---|
2621 | 3253 | } |
---|
2622 | 3254 | |
---|
| 3255 | +static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, int nxmit_byte) |
---|
| 3256 | +{ |
---|
| 3257 | + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); |
---|
| 3258 | + struct mvpp2_tx_queue *aggr_txq; |
---|
| 3259 | + struct mvpp2_txq_pcpu *txq_pcpu; |
---|
| 3260 | + struct mvpp2_tx_queue *txq; |
---|
| 3261 | + struct netdev_queue *nq; |
---|
| 3262 | + |
---|
| 3263 | + txq = port->txqs[txq_id]; |
---|
| 3264 | + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
---|
| 3265 | + nq = netdev_get_tx_queue(port->dev, txq_id); |
---|
| 3266 | + aggr_txq = &port->priv->aggr_txqs[thread]; |
---|
| 3267 | + |
---|
| 3268 | + txq_pcpu->reserved_num -= nxmit; |
---|
| 3269 | + txq_pcpu->count += nxmit; |
---|
| 3270 | + aggr_txq->count += nxmit; |
---|
| 3271 | + |
---|
| 3272 | + /* Enable transmit */ |
---|
| 3273 | + wmb(); |
---|
| 3274 | + mvpp2_aggr_txq_pend_desc_add(port, nxmit); |
---|
| 3275 | + |
---|
| 3276 | + if (txq_pcpu->count >= txq_pcpu->stop_threshold) |
---|
| 3277 | + netif_tx_stop_queue(nq); |
---|
| 3278 | + |
---|
| 3279 | + /* Finalize TX processing */ |
---|
| 3280 | + if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) |
---|
| 3281 | + mvpp2_txq_done(port, txq, txq_pcpu); |
---|
| 3282 | +} |
---|
| 3283 | + |
---|
| 3284 | +static int |
---|
| 3285 | +mvpp2_xdp_submit_frame(struct mvpp2_port *port, u16 txq_id, |
---|
| 3286 | + struct xdp_frame *xdpf, bool dma_map) |
---|
| 3287 | +{ |
---|
| 3288 | + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); |
---|
| 3289 | + u32 tx_cmd = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE | |
---|
| 3290 | + MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; |
---|
| 3291 | + enum mvpp2_tx_buf_type buf_type; |
---|
| 3292 | + struct mvpp2_txq_pcpu *txq_pcpu; |
---|
| 3293 | + struct mvpp2_tx_queue *aggr_txq; |
---|
| 3294 | + struct mvpp2_tx_desc *tx_desc; |
---|
| 3295 | + struct mvpp2_tx_queue *txq; |
---|
| 3296 | + int ret = MVPP2_XDP_TX; |
---|
| 3297 | + dma_addr_t dma_addr; |
---|
| 3298 | + |
---|
| 3299 | + txq = port->txqs[txq_id]; |
---|
| 3300 | + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
---|
| 3301 | + aggr_txq = &port->priv->aggr_txqs[thread]; |
---|
| 3302 | + |
---|
| 3303 | + /* Check number of available descriptors */ |
---|
| 3304 | + if (mvpp2_aggr_desc_num_check(port, aggr_txq, 1) || |
---|
| 3305 | + mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 1)) { |
---|
| 3306 | + ret = MVPP2_XDP_DROPPED; |
---|
| 3307 | + goto out; |
---|
| 3308 | + } |
---|
| 3309 | + |
---|
| 3310 | + /* Get a descriptor for the first part of the packet */ |
---|
| 3311 | + tx_desc = mvpp2_txq_next_desc_get(aggr_txq); |
---|
| 3312 | + mvpp2_txdesc_txq_set(port, tx_desc, txq->id); |
---|
| 3313 | + mvpp2_txdesc_size_set(port, tx_desc, xdpf->len); |
---|
| 3314 | + |
---|
| 3315 | + if (dma_map) { |
---|
| 3316 | + /* XDP_REDIRECT or AF_XDP */ |
---|
| 3317 | + dma_addr = dma_map_single(port->dev->dev.parent, xdpf->data, |
---|
| 3318 | + xdpf->len, DMA_TO_DEVICE); |
---|
| 3319 | + |
---|
| 3320 | + if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { |
---|
| 3321 | + mvpp2_txq_desc_put(txq); |
---|
| 3322 | + ret = MVPP2_XDP_DROPPED; |
---|
| 3323 | + goto out; |
---|
| 3324 | + } |
---|
| 3325 | + |
---|
| 3326 | + buf_type = MVPP2_TYPE_XDP_NDO; |
---|
| 3327 | + } else { |
---|
| 3328 | + /* XDP_TX */ |
---|
| 3329 | + struct page *page = virt_to_page(xdpf->data); |
---|
| 3330 | + |
---|
| 3331 | + dma_addr = page_pool_get_dma_addr(page) + |
---|
| 3332 | + sizeof(*xdpf) + xdpf->headroom; |
---|
| 3333 | + dma_sync_single_for_device(port->dev->dev.parent, dma_addr, |
---|
| 3334 | + xdpf->len, DMA_BIDIRECTIONAL); |
---|
| 3335 | + |
---|
| 3336 | + buf_type = MVPP2_TYPE_XDP_TX; |
---|
| 3337 | + } |
---|
| 3338 | + |
---|
| 3339 | + mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr); |
---|
| 3340 | + |
---|
| 3341 | + mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); |
---|
| 3342 | + mvpp2_txq_inc_put(port, txq_pcpu, xdpf, tx_desc, buf_type); |
---|
| 3343 | + |
---|
| 3344 | +out: |
---|
| 3345 | + return ret; |
---|
| 3346 | +} |
---|
| 3347 | + |
---|
| 3348 | +static int |
---|
| 3349 | +mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp) |
---|
| 3350 | +{ |
---|
| 3351 | + struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); |
---|
| 3352 | + struct xdp_frame *xdpf; |
---|
| 3353 | + u16 txq_id; |
---|
| 3354 | + int ret; |
---|
| 3355 | + |
---|
| 3356 | + xdpf = xdp_convert_buff_to_frame(xdp); |
---|
| 3357 | + if (unlikely(!xdpf)) |
---|
| 3358 | + return MVPP2_XDP_DROPPED; |
---|
| 3359 | + |
---|
| 3360 | + /* The first of the TX queues are used for XPS, |
---|
| 3361 | + * the second half for XDP_TX |
---|
| 3362 | + */ |
---|
| 3363 | + txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2); |
---|
| 3364 | + |
---|
| 3365 | + ret = mvpp2_xdp_submit_frame(port, txq_id, xdpf, false); |
---|
| 3366 | + if (ret == MVPP2_XDP_TX) { |
---|
| 3367 | + u64_stats_update_begin(&stats->syncp); |
---|
| 3368 | + stats->tx_bytes += xdpf->len; |
---|
| 3369 | + stats->tx_packets++; |
---|
| 3370 | + stats->xdp_tx++; |
---|
| 3371 | + u64_stats_update_end(&stats->syncp); |
---|
| 3372 | + |
---|
| 3373 | + mvpp2_xdp_finish_tx(port, txq_id, 1, xdpf->len); |
---|
| 3374 | + } else { |
---|
| 3375 | + u64_stats_update_begin(&stats->syncp); |
---|
| 3376 | + stats->xdp_tx_err++; |
---|
| 3377 | + u64_stats_update_end(&stats->syncp); |
---|
| 3378 | + } |
---|
| 3379 | + |
---|
| 3380 | + return ret; |
---|
| 3381 | +} |
---|
| 3382 | + |
---|
| 3383 | +static int |
---|
| 3384 | +mvpp2_xdp_xmit(struct net_device *dev, int num_frame, |
---|
| 3385 | + struct xdp_frame **frames, u32 flags) |
---|
| 3386 | +{ |
---|
| 3387 | + struct mvpp2_port *port = netdev_priv(dev); |
---|
| 3388 | + int i, nxmit_byte = 0, nxmit = num_frame; |
---|
| 3389 | + struct mvpp2_pcpu_stats *stats; |
---|
| 3390 | + u16 txq_id; |
---|
| 3391 | + u32 ret; |
---|
| 3392 | + |
---|
| 3393 | + if (unlikely(test_bit(0, &port->state))) |
---|
| 3394 | + return -ENETDOWN; |
---|
| 3395 | + |
---|
| 3396 | + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) |
---|
| 3397 | + return -EINVAL; |
---|
| 3398 | + |
---|
| 3399 | + /* The first of the TX queues are used for XPS, |
---|
| 3400 | + * the second half for XDP_TX |
---|
| 3401 | + */ |
---|
| 3402 | + txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2); |
---|
| 3403 | + |
---|
| 3404 | + for (i = 0; i < num_frame; i++) { |
---|
| 3405 | + ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true); |
---|
| 3406 | + if (ret == MVPP2_XDP_TX) { |
---|
| 3407 | + nxmit_byte += frames[i]->len; |
---|
| 3408 | + } else { |
---|
| 3409 | + xdp_return_frame_rx_napi(frames[i]); |
---|
| 3410 | + nxmit--; |
---|
| 3411 | + } |
---|
| 3412 | + } |
---|
| 3413 | + |
---|
| 3414 | + if (likely(nxmit > 0)) |
---|
| 3415 | + mvpp2_xdp_finish_tx(port, txq_id, nxmit, nxmit_byte); |
---|
| 3416 | + |
---|
| 3417 | + stats = this_cpu_ptr(port->stats); |
---|
| 3418 | + u64_stats_update_begin(&stats->syncp); |
---|
| 3419 | + stats->tx_bytes += nxmit_byte; |
---|
| 3420 | + stats->tx_packets += nxmit; |
---|
| 3421 | + stats->xdp_xmit += nxmit; |
---|
| 3422 | + stats->xdp_xmit_err += num_frame - nxmit; |
---|
| 3423 | + u64_stats_update_end(&stats->syncp); |
---|
| 3424 | + |
---|
| 3425 | + return nxmit; |
---|
| 3426 | +} |
---|
| 3427 | + |
---|
| 3428 | +static int |
---|
| 3429 | +mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq, |
---|
| 3430 | + struct bpf_prog *prog, struct xdp_buff *xdp, |
---|
| 3431 | + struct page_pool *pp, struct mvpp2_pcpu_stats *stats) |
---|
| 3432 | +{ |
---|
| 3433 | + unsigned int len, sync, err; |
---|
| 3434 | + struct page *page; |
---|
| 3435 | + u32 ret, act; |
---|
| 3436 | + |
---|
| 3437 | + len = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM; |
---|
| 3438 | + act = bpf_prog_run_xdp(prog, xdp); |
---|
| 3439 | + |
---|
| 3440 | + /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ |
---|
| 3441 | + sync = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM; |
---|
| 3442 | + sync = max(sync, len); |
---|
| 3443 | + |
---|
| 3444 | + switch (act) { |
---|
| 3445 | + case XDP_PASS: |
---|
| 3446 | + stats->xdp_pass++; |
---|
| 3447 | + ret = MVPP2_XDP_PASS; |
---|
| 3448 | + break; |
---|
| 3449 | + case XDP_REDIRECT: |
---|
| 3450 | + err = xdp_do_redirect(port->dev, xdp, prog); |
---|
| 3451 | + if (unlikely(err)) { |
---|
| 3452 | + ret = MVPP2_XDP_DROPPED; |
---|
| 3453 | + page = virt_to_head_page(xdp->data); |
---|
| 3454 | + page_pool_put_page(pp, page, sync, true); |
---|
| 3455 | + } else { |
---|
| 3456 | + ret = MVPP2_XDP_REDIR; |
---|
| 3457 | + stats->xdp_redirect++; |
---|
| 3458 | + } |
---|
| 3459 | + break; |
---|
| 3460 | + case XDP_TX: |
---|
| 3461 | + ret = mvpp2_xdp_xmit_back(port, xdp); |
---|
| 3462 | + if (ret != MVPP2_XDP_TX) { |
---|
| 3463 | + page = virt_to_head_page(xdp->data); |
---|
| 3464 | + page_pool_put_page(pp, page, sync, true); |
---|
| 3465 | + } |
---|
| 3466 | + break; |
---|
| 3467 | + default: |
---|
| 3468 | + bpf_warn_invalid_xdp_action(act); |
---|
| 3469 | + fallthrough; |
---|
| 3470 | + case XDP_ABORTED: |
---|
| 3471 | + trace_xdp_exception(port->dev, prog, act); |
---|
| 3472 | + fallthrough; |
---|
| 3473 | + case XDP_DROP: |
---|
| 3474 | + page = virt_to_head_page(xdp->data); |
---|
| 3475 | + page_pool_put_page(pp, page, sync, true); |
---|
| 3476 | + ret = MVPP2_XDP_DROPPED; |
---|
| 3477 | + stats->xdp_drop++; |
---|
| 3478 | + break; |
---|
| 3479 | + } |
---|
| 3480 | + |
---|
| 3481 | + return ret; |
---|
| 3482 | +} |
---|
| 3483 | + |
---|
| 3484 | +static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc, |
---|
| 3485 | + int pool, u32 rx_status) |
---|
| 3486 | +{ |
---|
| 3487 | + phys_addr_t phys_addr, phys_addr_next; |
---|
| 3488 | + dma_addr_t dma_addr, dma_addr_next; |
---|
| 3489 | + struct mvpp2_buff_hdr *buff_hdr; |
---|
| 3490 | + |
---|
| 3491 | + phys_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); |
---|
| 3492 | + dma_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); |
---|
| 3493 | + |
---|
| 3494 | + do { |
---|
| 3495 | + buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(phys_addr); |
---|
| 3496 | + |
---|
| 3497 | + phys_addr_next = le32_to_cpu(buff_hdr->next_phys_addr); |
---|
| 3498 | + dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr); |
---|
| 3499 | + |
---|
| 3500 | + if (port->priv->hw_version >= MVPP22) { |
---|
| 3501 | + phys_addr_next |= ((u64)buff_hdr->next_phys_addr_high << 32); |
---|
| 3502 | + dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32); |
---|
| 3503 | + } |
---|
| 3504 | + |
---|
| 3505 | + mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); |
---|
| 3506 | + |
---|
| 3507 | + phys_addr = phys_addr_next; |
---|
| 3508 | + dma_addr = dma_addr_next; |
---|
| 3509 | + |
---|
| 3510 | + } while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info))); |
---|
| 3511 | +} |
---|
| 3512 | + |
---|
2623 | 3513 | /* Main rx processing */ |
---|
2624 | 3514 | static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, |
---|
2625 | 3515 | int rx_todo, struct mvpp2_rx_queue *rxq) |
---|
2626 | 3516 | { |
---|
2627 | 3517 | struct net_device *dev = port->dev; |
---|
| 3518 | + struct mvpp2_pcpu_stats ps = {}; |
---|
| 3519 | + enum dma_data_direction dma_dir; |
---|
| 3520 | + struct bpf_prog *xdp_prog; |
---|
| 3521 | + struct xdp_buff xdp; |
---|
2628 | 3522 | int rx_received; |
---|
2629 | 3523 | int rx_done = 0; |
---|
2630 | | - u32 rcvd_pkts = 0; |
---|
2631 | | - u32 rcvd_bytes = 0; |
---|
| 3524 | + u32 xdp_ret = 0; |
---|
| 3525 | + |
---|
| 3526 | + rcu_read_lock(); |
---|
| 3527 | + |
---|
| 3528 | + xdp_prog = READ_ONCE(port->xdp_prog); |
---|
2632 | 3529 | |
---|
2633 | 3530 | /* Get number of received packets and clamp the to-do */ |
---|
2634 | 3531 | rx_received = mvpp2_rxq_received(port, rxq->id); |
---|
.. | .. |
---|
2638 | 3535 | while (rx_done < rx_todo) { |
---|
2639 | 3536 | struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); |
---|
2640 | 3537 | struct mvpp2_bm_pool *bm_pool; |
---|
| 3538 | + struct page_pool *pp = NULL; |
---|
2641 | 3539 | struct sk_buff *skb; |
---|
2642 | 3540 | unsigned int frag_size; |
---|
2643 | 3541 | dma_addr_t dma_addr; |
---|
2644 | 3542 | phys_addr_t phys_addr; |
---|
2645 | | - u32 rx_status; |
---|
2646 | | - int pool, rx_bytes, err; |
---|
| 3543 | + u32 rx_status, timestamp; |
---|
| 3544 | + int pool, rx_bytes, err, ret; |
---|
2647 | 3545 | void *data; |
---|
2648 | 3546 | |
---|
2649 | 3547 | rx_done++; |
---|
.. | .. |
---|
2658 | 3556 | MVPP2_RXD_BM_POOL_ID_OFFS; |
---|
2659 | 3557 | bm_pool = &port->priv->bm_pools[pool]; |
---|
2660 | 3558 | |
---|
| 3559 | + if (port->priv->percpu_pools) { |
---|
| 3560 | + pp = port->priv->page_pool[pool]; |
---|
| 3561 | + dma_dir = page_pool_get_dma_dir(pp); |
---|
| 3562 | + } else { |
---|
| 3563 | + dma_dir = DMA_FROM_DEVICE; |
---|
| 3564 | + } |
---|
| 3565 | + |
---|
| 3566 | + dma_sync_single_for_cpu(dev->dev.parent, dma_addr, |
---|
| 3567 | + rx_bytes + MVPP2_MH_SIZE, |
---|
| 3568 | + dma_dir); |
---|
| 3569 | + |
---|
| 3570 | + /* Buffer header not supported */ |
---|
| 3571 | + if (rx_status & MVPP2_RXD_BUF_HDR) |
---|
| 3572 | + goto err_drop_frame; |
---|
| 3573 | + |
---|
2661 | 3574 | /* In case of an error, release the requested buffer pointer |
---|
2662 | 3575 | * to the Buffer Manager. This request process is controlled |
---|
2663 | 3576 | * by the hardware, and the information about the buffer is |
---|
2664 | 3577 | * comprised by the RX descriptor. |
---|
2665 | 3578 | */ |
---|
2666 | | - if (rx_status & MVPP2_RXD_ERR_SUMMARY) { |
---|
2667 | | -err_drop_frame: |
---|
2668 | | - dev->stats.rx_errors++; |
---|
2669 | | - mvpp2_rx_error(port, rx_desc); |
---|
2670 | | - /* Return the buffer to the pool */ |
---|
2671 | | - mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); |
---|
2672 | | - continue; |
---|
2673 | | - } |
---|
| 3579 | + if (rx_status & MVPP2_RXD_ERR_SUMMARY) |
---|
| 3580 | + goto err_drop_frame; |
---|
| 3581 | + |
---|
| 3582 | + /* Prefetch header */ |
---|
| 3583 | + prefetch(data); |
---|
2674 | 3584 | |
---|
2675 | 3585 | if (bm_pool->frag_size > PAGE_SIZE) |
---|
2676 | 3586 | frag_size = 0; |
---|
2677 | 3587 | else |
---|
2678 | 3588 | frag_size = bm_pool->frag_size; |
---|
| 3589 | + |
---|
| 3590 | + if (xdp_prog) { |
---|
| 3591 | + xdp.data_hard_start = data; |
---|
| 3592 | + xdp.data = data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM; |
---|
| 3593 | + xdp.data_end = xdp.data + rx_bytes; |
---|
| 3594 | + xdp.frame_sz = PAGE_SIZE; |
---|
| 3595 | + |
---|
| 3596 | + if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE) |
---|
| 3597 | + xdp.rxq = &rxq->xdp_rxq_short; |
---|
| 3598 | + else |
---|
| 3599 | + xdp.rxq = &rxq->xdp_rxq_long; |
---|
| 3600 | + |
---|
| 3601 | + xdp_set_data_meta_invalid(&xdp); |
---|
| 3602 | + |
---|
| 3603 | + ret = mvpp2_run_xdp(port, rxq, xdp_prog, &xdp, pp, &ps); |
---|
| 3604 | + |
---|
| 3605 | + if (ret) { |
---|
| 3606 | + xdp_ret |= ret; |
---|
| 3607 | + err = mvpp2_rx_refill(port, bm_pool, pp, pool); |
---|
| 3608 | + if (err) { |
---|
| 3609 | + netdev_err(port->dev, "failed to refill BM pools\n"); |
---|
| 3610 | + goto err_drop_frame; |
---|
| 3611 | + } |
---|
| 3612 | + |
---|
| 3613 | + ps.rx_packets++; |
---|
| 3614 | + ps.rx_bytes += rx_bytes; |
---|
| 3615 | + continue; |
---|
| 3616 | + } |
---|
| 3617 | + } |
---|
2679 | 3618 | |
---|
2680 | 3619 | skb = build_skb(data, frag_size); |
---|
2681 | 3620 | if (!skb) { |
---|
.. | .. |
---|
2683 | 3622 | goto err_drop_frame; |
---|
2684 | 3623 | } |
---|
2685 | 3624 | |
---|
2686 | | - err = mvpp2_rx_refill(port, bm_pool, pool); |
---|
| 3625 | + /* If we have RX hardware timestamping enabled, grab the |
---|
| 3626 | + * timestamp from the queue and convert. |
---|
| 3627 | + */ |
---|
| 3628 | + if (mvpp22_rx_hwtstamping(port)) { |
---|
| 3629 | + timestamp = le32_to_cpu(rx_desc->pp22.timestamp); |
---|
| 3630 | + mvpp22_tai_tstamp(port->priv->tai, timestamp, |
---|
| 3631 | + skb_hwtstamps(skb)); |
---|
| 3632 | + } |
---|
| 3633 | + |
---|
| 3634 | + err = mvpp2_rx_refill(port, bm_pool, pp, pool); |
---|
2687 | 3635 | if (err) { |
---|
2688 | 3636 | netdev_err(port->dev, "failed to refill BM pools\n"); |
---|
| 3637 | + dev_kfree_skb_any(skb); |
---|
2689 | 3638 | goto err_drop_frame; |
---|
2690 | 3639 | } |
---|
2691 | 3640 | |
---|
2692 | | - dma_unmap_single(dev->dev.parent, dma_addr, |
---|
2693 | | - bm_pool->buf_size, DMA_FROM_DEVICE); |
---|
| 3641 | + if (pp) |
---|
| 3642 | + page_pool_release_page(pp, virt_to_page(data)); |
---|
| 3643 | + else |
---|
| 3644 | + dma_unmap_single_attrs(dev->dev.parent, dma_addr, |
---|
| 3645 | + bm_pool->buf_size, DMA_FROM_DEVICE, |
---|
| 3646 | + DMA_ATTR_SKIP_CPU_SYNC); |
---|
2694 | 3647 | |
---|
2695 | | - rcvd_pkts++; |
---|
2696 | | - rcvd_bytes += rx_bytes; |
---|
| 3648 | + ps.rx_packets++; |
---|
| 3649 | + ps.rx_bytes += rx_bytes; |
---|
2697 | 3650 | |
---|
2698 | | - skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD); |
---|
| 3651 | + skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM); |
---|
2699 | 3652 | skb_put(skb, rx_bytes); |
---|
2700 | 3653 | skb->protocol = eth_type_trans(skb, dev); |
---|
2701 | 3654 | mvpp2_rx_csum(port, rx_status, skb); |
---|
2702 | 3655 | |
---|
2703 | 3656 | napi_gro_receive(napi, skb); |
---|
| 3657 | + continue; |
---|
| 3658 | + |
---|
| 3659 | +err_drop_frame: |
---|
| 3660 | + dev->stats.rx_errors++; |
---|
| 3661 | + mvpp2_rx_error(port, rx_desc); |
---|
| 3662 | + /* Return the buffer to the pool */ |
---|
| 3663 | + if (rx_status & MVPP2_RXD_BUF_HDR) |
---|
| 3664 | + mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status); |
---|
| 3665 | + else |
---|
| 3666 | + mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); |
---|
2704 | 3667 | } |
---|
2705 | 3668 | |
---|
2706 | | - if (rcvd_pkts) { |
---|
| 3669 | + rcu_read_unlock(); |
---|
| 3670 | + |
---|
| 3671 | + if (xdp_ret & MVPP2_XDP_REDIR) |
---|
| 3672 | + xdp_do_flush_map(); |
---|
| 3673 | + |
---|
| 3674 | + if (ps.rx_packets) { |
---|
2707 | 3675 | struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); |
---|
2708 | 3676 | |
---|
2709 | 3677 | u64_stats_update_begin(&stats->syncp); |
---|
2710 | | - stats->rx_packets += rcvd_pkts; |
---|
2711 | | - stats->rx_bytes += rcvd_bytes; |
---|
| 3678 | + stats->rx_packets += ps.rx_packets; |
---|
| 3679 | + stats->rx_bytes += ps.rx_bytes; |
---|
| 3680 | + /* xdp */ |
---|
| 3681 | + stats->xdp_redirect += ps.xdp_redirect; |
---|
| 3682 | + stats->xdp_pass += ps.xdp_pass; |
---|
| 3683 | + stats->xdp_drop += ps.xdp_drop; |
---|
2712 | 3684 | u64_stats_update_end(&stats->syncp); |
---|
2713 | 3685 | } |
---|
2714 | 3686 | |
---|
.. | .. |
---|
2723 | 3695 | tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, |
---|
2724 | 3696 | struct mvpp2_tx_desc *desc) |
---|
2725 | 3697 | { |
---|
2726 | | - struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); |
---|
| 3698 | + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); |
---|
| 3699 | + struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
---|
2727 | 3700 | |
---|
2728 | 3701 | dma_addr_t buf_dma_addr = |
---|
2729 | 3702 | mvpp2_txdesc_dma_addr_get(port, desc); |
---|
.. | .. |
---|
2735 | 3708 | mvpp2_txq_desc_put(txq); |
---|
2736 | 3709 | } |
---|
2737 | 3710 | |
---|
| 3711 | +static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port, |
---|
| 3712 | + struct mvpp2_tx_desc *desc) |
---|
| 3713 | +{ |
---|
| 3714 | + /* We only need to clear the low bits */ |
---|
| 3715 | + if (port->priv->hw_version != MVPP21) |
---|
| 3716 | + desc->pp22.ptp_descriptor &= |
---|
| 3717 | + cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW); |
---|
| 3718 | +} |
---|
| 3719 | + |
---|
| 3720 | +static bool mvpp2_tx_hw_tstamp(struct mvpp2_port *port, |
---|
| 3721 | + struct mvpp2_tx_desc *tx_desc, |
---|
| 3722 | + struct sk_buff *skb) |
---|
| 3723 | +{ |
---|
| 3724 | + struct mvpp2_hwtstamp_queue *queue; |
---|
| 3725 | + unsigned int mtype, type, i; |
---|
| 3726 | + struct ptp_header *hdr; |
---|
| 3727 | + u64 ptpdesc; |
---|
| 3728 | + |
---|
| 3729 | + if (port->priv->hw_version == MVPP21 || |
---|
| 3730 | + port->tx_hwtstamp_type == HWTSTAMP_TX_OFF) |
---|
| 3731 | + return false; |
---|
| 3732 | + |
---|
| 3733 | + type = ptp_classify_raw(skb); |
---|
| 3734 | + if (!type) |
---|
| 3735 | + return false; |
---|
| 3736 | + |
---|
| 3737 | + hdr = ptp_parse_header(skb, type); |
---|
| 3738 | + if (!hdr) |
---|
| 3739 | + return false; |
---|
| 3740 | + |
---|
| 3741 | + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
---|
| 3742 | + |
---|
| 3743 | + ptpdesc = MVPP22_PTP_MACTIMESTAMPINGEN | |
---|
| 3744 | + MVPP22_PTP_ACTION_CAPTURE; |
---|
| 3745 | + queue = &port->tx_hwtstamp_queue[0]; |
---|
| 3746 | + |
---|
| 3747 | + switch (type & PTP_CLASS_VMASK) { |
---|
| 3748 | + case PTP_CLASS_V1: |
---|
| 3749 | + ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV1); |
---|
| 3750 | + break; |
---|
| 3751 | + |
---|
| 3752 | + case PTP_CLASS_V2: |
---|
| 3753 | + ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV2); |
---|
| 3754 | + mtype = hdr->tsmt & 15; |
---|
| 3755 | + /* Direct PTP Sync messages to queue 1 */ |
---|
| 3756 | + if (mtype == 0) { |
---|
| 3757 | + ptpdesc |= MVPP22_PTP_TIMESTAMPQUEUESELECT; |
---|
| 3758 | + queue = &port->tx_hwtstamp_queue[1]; |
---|
| 3759 | + } |
---|
| 3760 | + break; |
---|
| 3761 | + } |
---|
| 3762 | + |
---|
| 3763 | + /* Take a reference on the skb and insert into our queue */ |
---|
| 3764 | + i = queue->next; |
---|
| 3765 | + queue->next = (i + 1) & 31; |
---|
| 3766 | + if (queue->skb[i]) |
---|
| 3767 | + dev_kfree_skb_any(queue->skb[i]); |
---|
| 3768 | + queue->skb[i] = skb_get(skb); |
---|
| 3769 | + |
---|
| 3770 | + ptpdesc |= MVPP22_PTP_TIMESTAMPENTRYID(i); |
---|
| 3771 | + |
---|
| 3772 | + /* |
---|
| 3773 | + * 3:0 - PTPAction |
---|
| 3774 | + * 6:4 - PTPPacketFormat |
---|
| 3775 | + * 7 - PTP_CF_WraparoundCheckEn |
---|
| 3776 | + * 9:8 - IngressTimestampSeconds[1:0] |
---|
| 3777 | + * 10 - Reserved |
---|
| 3778 | + * 11 - MACTimestampingEn |
---|
| 3779 | + * 17:12 - PTP_TimestampQueueEntryID[5:0] |
---|
| 3780 | + * 18 - PTPTimestampQueueSelect |
---|
| 3781 | + * 19 - UDPChecksumUpdateEn |
---|
| 3782 | + * 27:20 - TimestampOffset |
---|
| 3783 | + * PTP, NTPTransmit, OWAMP/TWAMP - L3 to PTP header |
---|
| 3784 | + * NTPTs, Y.1731 - L3 to timestamp entry |
---|
| 3785 | + * 35:28 - UDP Checksum Offset |
---|
| 3786 | + * |
---|
| 3787 | + * stored in tx descriptor bits 75:64 (11:0) and 191:168 (35:12) |
---|
| 3788 | + */ |
---|
| 3789 | + tx_desc->pp22.ptp_descriptor &= |
---|
| 3790 | + cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW); |
---|
| 3791 | + tx_desc->pp22.ptp_descriptor |= |
---|
| 3792 | + cpu_to_le32(ptpdesc & MVPP22_PTP_DESC_MASK_LOW); |
---|
| 3793 | + tx_desc->pp22.buf_dma_addr_ptp &= cpu_to_le64(~0xffffff0000000000ULL); |
---|
| 3794 | + tx_desc->pp22.buf_dma_addr_ptp |= cpu_to_le64((ptpdesc >> 12) << 40); |
---|
| 3795 | + |
---|
| 3796 | + return true; |
---|
| 3797 | +} |
---|
| 3798 | + |
---|
2738 | 3799 | /* Handle tx fragmentation processing */ |
---|
2739 | 3800 | static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, |
---|
2740 | 3801 | struct mvpp2_tx_queue *aggr_txq, |
---|
2741 | 3802 | struct mvpp2_tx_queue *txq) |
---|
2742 | 3803 | { |
---|
2743 | | - struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); |
---|
| 3804 | + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); |
---|
| 3805 | + struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
---|
2744 | 3806 | struct mvpp2_tx_desc *tx_desc; |
---|
2745 | 3807 | int i; |
---|
2746 | 3808 | dma_addr_t buf_dma_addr; |
---|
2747 | 3809 | |
---|
2748 | 3810 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
---|
2749 | 3811 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
---|
2750 | | - void *addr = page_address(frag->page.p) + frag->page_offset; |
---|
| 3812 | + void *addr = skb_frag_address(frag); |
---|
2751 | 3813 | |
---|
2752 | 3814 | tx_desc = mvpp2_txq_next_desc_get(aggr_txq); |
---|
| 3815 | + mvpp2_txdesc_clear_ptp(port, tx_desc); |
---|
2753 | 3816 | mvpp2_txdesc_txq_set(port, tx_desc, txq->id); |
---|
2754 | | - mvpp2_txdesc_size_set(port, tx_desc, frag->size); |
---|
| 3817 | + mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag)); |
---|
2755 | 3818 | |
---|
2756 | 3819 | buf_dma_addr = dma_map_single(port->dev->dev.parent, addr, |
---|
2757 | | - frag->size, DMA_TO_DEVICE); |
---|
| 3820 | + skb_frag_size(frag), |
---|
| 3821 | + DMA_TO_DEVICE); |
---|
2758 | 3822 | if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) { |
---|
2759 | 3823 | mvpp2_txq_desc_put(txq); |
---|
2760 | 3824 | goto cleanup; |
---|
.. | .. |
---|
2766 | 3830 | /* Last descriptor */ |
---|
2767 | 3831 | mvpp2_txdesc_cmd_set(port, tx_desc, |
---|
2768 | 3832 | MVPP2_TXD_L_DESC); |
---|
2769 | | - mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); |
---|
| 3833 | + mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); |
---|
2770 | 3834 | } else { |
---|
2771 | 3835 | /* Descriptor in the middle: Not First, Not Last */ |
---|
2772 | 3836 | mvpp2_txdesc_cmd_set(port, tx_desc, 0); |
---|
2773 | | - mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); |
---|
| 3837 | + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); |
---|
2774 | 3838 | } |
---|
2775 | 3839 | } |
---|
2776 | 3840 | |
---|
.. | .. |
---|
2798 | 3862 | struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); |
---|
2799 | 3863 | dma_addr_t addr; |
---|
2800 | 3864 | |
---|
| 3865 | + mvpp2_txdesc_clear_ptp(port, tx_desc); |
---|
2801 | 3866 | mvpp2_txdesc_txq_set(port, tx_desc, txq->id); |
---|
2802 | 3867 | mvpp2_txdesc_size_set(port, tx_desc, hdr_sz); |
---|
2803 | 3868 | |
---|
.. | .. |
---|
2808 | 3873 | mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) | |
---|
2809 | 3874 | MVPP2_TXD_F_DESC | |
---|
2810 | 3875 | MVPP2_TXD_PADDING_DISABLE); |
---|
2811 | | - mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); |
---|
| 3876 | + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); |
---|
2812 | 3877 | } |
---|
2813 | 3878 | |
---|
2814 | 3879 | static inline int mvpp2_tso_put_data(struct sk_buff *skb, |
---|
.. | .. |
---|
2822 | 3887 | struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); |
---|
2823 | 3888 | dma_addr_t buf_dma_addr; |
---|
2824 | 3889 | |
---|
| 3890 | + mvpp2_txdesc_clear_ptp(port, tx_desc); |
---|
2825 | 3891 | mvpp2_txdesc_txq_set(port, tx_desc, txq->id); |
---|
2826 | 3892 | mvpp2_txdesc_size_set(port, tx_desc, sz); |
---|
2827 | 3893 | |
---|
.. | .. |
---|
2837 | 3903 | if (!left) { |
---|
2838 | 3904 | mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC); |
---|
2839 | 3905 | if (last) { |
---|
2840 | | - mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); |
---|
| 3906 | + mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); |
---|
2841 | 3907 | return 0; |
---|
2842 | 3908 | } |
---|
2843 | 3909 | } else { |
---|
2844 | 3910 | mvpp2_txdesc_cmd_set(port, tx_desc, 0); |
---|
2845 | 3911 | } |
---|
2846 | 3912 | |
---|
2847 | | - mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); |
---|
| 3913 | + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); |
---|
2848 | 3914 | return 0; |
---|
2849 | 3915 | } |
---|
2850 | 3916 | |
---|
.. | .. |
---|
2854 | 3920 | struct mvpp2_txq_pcpu *txq_pcpu) |
---|
2855 | 3921 | { |
---|
2856 | 3922 | struct mvpp2_port *port = netdev_priv(dev); |
---|
| 3923 | + int hdr_sz, i, len, descs = 0; |
---|
2857 | 3924 | struct tso_t tso; |
---|
2858 | | - int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb); |
---|
2859 | | - int i, len, descs = 0; |
---|
2860 | 3925 | |
---|
2861 | 3926 | /* Check number of available descriptors */ |
---|
2862 | | - if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, |
---|
2863 | | - tso_count_descs(skb)) || |
---|
2864 | | - mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu, |
---|
| 3927 | + if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) || |
---|
| 3928 | + mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, |
---|
2865 | 3929 | tso_count_descs(skb))) |
---|
2866 | 3930 | return 0; |
---|
2867 | 3931 | |
---|
2868 | | - tso_start(skb, &tso); |
---|
| 3932 | + hdr_sz = tso_start(skb, &tso); |
---|
| 3933 | + |
---|
2869 | 3934 | len = skb->len - hdr_sz; |
---|
2870 | 3935 | while (len > 0) { |
---|
2871 | 3936 | int left = min_t(int, skb_shinfo(skb)->gso_size, len); |
---|
.. | .. |
---|
2908 | 3973 | struct mvpp2_txq_pcpu *txq_pcpu; |
---|
2909 | 3974 | struct mvpp2_tx_desc *tx_desc; |
---|
2910 | 3975 | dma_addr_t buf_dma_addr; |
---|
| 3976 | + unsigned long flags = 0; |
---|
| 3977 | + unsigned int thread; |
---|
2911 | 3978 | int frags = 0; |
---|
2912 | 3979 | u16 txq_id; |
---|
2913 | 3980 | u32 tx_cmd; |
---|
2914 | 3981 | |
---|
| 3982 | + thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); |
---|
| 3983 | + |
---|
2915 | 3984 | txq_id = skb_get_queue_mapping(skb); |
---|
2916 | 3985 | txq = port->txqs[txq_id]; |
---|
2917 | | - txq_pcpu = this_cpu_ptr(txq->pcpu); |
---|
2918 | | - aggr_txq = &port->priv->aggr_txqs[smp_processor_id()]; |
---|
| 3986 | + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
---|
| 3987 | + aggr_txq = &port->priv->aggr_txqs[thread]; |
---|
| 3988 | + |
---|
| 3989 | + if (test_bit(thread, &port->priv->lock_map)) |
---|
| 3990 | + spin_lock_irqsave(&port->tx_lock[thread], flags); |
---|
2919 | 3991 | |
---|
2920 | 3992 | if (skb_is_gso(skb)) { |
---|
2921 | 3993 | frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu); |
---|
.. | .. |
---|
2924 | 3996 | frags = skb_shinfo(skb)->nr_frags + 1; |
---|
2925 | 3997 | |
---|
2926 | 3998 | /* Check number of available descriptors */ |
---|
2927 | | - if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) || |
---|
2928 | | - mvpp2_txq_reserved_desc_num_proc(port->priv, txq, |
---|
2929 | | - txq_pcpu, frags)) { |
---|
| 3999 | + if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) || |
---|
| 4000 | + mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) { |
---|
2930 | 4001 | frags = 0; |
---|
2931 | 4002 | goto out; |
---|
2932 | 4003 | } |
---|
2933 | 4004 | |
---|
2934 | 4005 | /* Get a descriptor for the first part of the packet */ |
---|
2935 | 4006 | tx_desc = mvpp2_txq_next_desc_get(aggr_txq); |
---|
| 4007 | + if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) || |
---|
| 4008 | + !mvpp2_tx_hw_tstamp(port, tx_desc, skb)) |
---|
| 4009 | + mvpp2_txdesc_clear_ptp(port, tx_desc); |
---|
2936 | 4010 | mvpp2_txdesc_txq_set(port, tx_desc, txq->id); |
---|
2937 | 4011 | mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb)); |
---|
2938 | 4012 | |
---|
.. | .. |
---|
2952 | 4026 | /* First and Last descriptor */ |
---|
2953 | 4027 | tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; |
---|
2954 | 4028 | mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); |
---|
2955 | | - mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); |
---|
| 4029 | + mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); |
---|
2956 | 4030 | } else { |
---|
2957 | 4031 | /* First but not Last */ |
---|
2958 | 4032 | tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; |
---|
2959 | 4033 | mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); |
---|
2960 | | - mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); |
---|
| 4034 | + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); |
---|
2961 | 4035 | |
---|
2962 | 4036 | /* Continue with other skb fragments */ |
---|
2963 | 4037 | if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { |
---|
.. | .. |
---|
2968 | 4042 | |
---|
2969 | 4043 | out: |
---|
2970 | 4044 | if (frags > 0) { |
---|
2971 | | - struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); |
---|
| 4045 | + struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread); |
---|
2972 | 4046 | struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); |
---|
2973 | 4047 | |
---|
2974 | 4048 | txq_pcpu->reserved_num -= frags; |
---|
.. | .. |
---|
2998 | 4072 | /* Set the timer in case not all frags were processed */ |
---|
2999 | 4073 | if (!port->has_tx_irqs && txq_pcpu->count <= frags && |
---|
3000 | 4074 | txq_pcpu->count > 0) { |
---|
3001 | | - struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); |
---|
| 4075 | + struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread); |
---|
3002 | 4076 | |
---|
3003 | | - mvpp2_timer_set(port_pcpu); |
---|
| 4077 | + if (!port_pcpu->timer_scheduled) { |
---|
| 4078 | + port_pcpu->timer_scheduled = true; |
---|
| 4079 | + hrtimer_start(&port_pcpu->tx_done_timer, |
---|
| 4080 | + MVPP2_TXDONE_HRTIMER_PERIOD_NS, |
---|
| 4081 | + HRTIMER_MODE_REL_PINNED_SOFT); |
---|
| 4082 | + } |
---|
3004 | 4083 | } |
---|
| 4084 | + |
---|
| 4085 | + if (test_bit(thread, &port->priv->lock_map)) |
---|
| 4086 | + spin_unlock_irqrestore(&port->tx_lock[thread], flags); |
---|
3005 | 4087 | |
---|
3006 | 4088 | return NETDEV_TX_OK; |
---|
3007 | 4089 | } |
---|
.. | .. |
---|
3022 | 4104 | int rx_done = 0; |
---|
3023 | 4105 | struct mvpp2_port *port = netdev_priv(napi->dev); |
---|
3024 | 4106 | struct mvpp2_queue_vector *qv; |
---|
3025 | | - int cpu = smp_processor_id(); |
---|
| 4107 | + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); |
---|
3026 | 4108 | |
---|
3027 | 4109 | qv = container_of(napi, struct mvpp2_queue_vector, napi); |
---|
3028 | 4110 | |
---|
.. | .. |
---|
3036 | 4118 | * |
---|
3037 | 4119 | * Each CPU has its own Rx/Tx cause register |
---|
3038 | 4120 | */ |
---|
3039 | | - cause_rx_tx = mvpp2_percpu_read_relaxed(port->priv, qv->sw_thread_id, |
---|
| 4121 | + cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id, |
---|
3040 | 4122 | MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); |
---|
3041 | 4123 | |
---|
3042 | 4124 | cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; |
---|
.. | .. |
---|
3045 | 4127 | |
---|
3046 | 4128 | /* Clear the cause register */ |
---|
3047 | 4129 | mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); |
---|
3048 | | - mvpp2_percpu_write(port->priv, cpu, |
---|
| 4130 | + mvpp2_thread_write(port->priv, thread, |
---|
3049 | 4131 | MVPP2_ISR_RX_TX_CAUSE_REG(port->id), |
---|
3050 | 4132 | cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); |
---|
3051 | 4133 | } |
---|
.. | .. |
---|
3097 | 4179 | { |
---|
3098 | 4180 | u32 ctrl3; |
---|
3099 | 4181 | |
---|
| 4182 | + /* Set the GMAC & XLG MAC in reset */ |
---|
| 4183 | + mvpp2_mac_reset_assert(port); |
---|
| 4184 | + |
---|
| 4185 | + /* Set the MPCS and XPCS in reset */ |
---|
| 4186 | + mvpp22_pcs_reset_assert(port); |
---|
| 4187 | + |
---|
3100 | 4188 | /* comphy reconfiguration */ |
---|
3101 | 4189 | mvpp22_comphy_init(port); |
---|
3102 | 4190 | |
---|
3103 | 4191 | /* gop reconfiguration */ |
---|
3104 | 4192 | mvpp22_gop_init(port); |
---|
3105 | 4193 | |
---|
3106 | | - /* Only GOP port 0 has an XLG MAC */ |
---|
3107 | | - if (port->gop_id == 0) { |
---|
| 4194 | + mvpp22_pcs_reset_deassert(port); |
---|
| 4195 | + |
---|
| 4196 | + if (mvpp2_port_supports_xlg(port)) { |
---|
3108 | 4197 | ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG); |
---|
3109 | 4198 | ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; |
---|
3110 | 4199 | |
---|
3111 | | - if (port->phy_interface == PHY_INTERFACE_MODE_XAUI || |
---|
3112 | | - port->phy_interface == PHY_INTERFACE_MODE_10GKR) |
---|
| 4200 | + if (mvpp2_is_xlg(port->phy_interface)) |
---|
3113 | 4201 | ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G; |
---|
3114 | 4202 | else |
---|
3115 | 4203 | ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; |
---|
.. | .. |
---|
3117 | 4205 | writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG); |
---|
3118 | 4206 | } |
---|
3119 | 4207 | |
---|
3120 | | - if (port->gop_id == 0 && |
---|
3121 | | - (port->phy_interface == PHY_INTERFACE_MODE_XAUI || |
---|
3122 | | - port->phy_interface == PHY_INTERFACE_MODE_10GKR)) |
---|
| 4208 | + if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(port->phy_interface)) |
---|
3123 | 4209 | mvpp2_xlg_max_rx_size_set(port); |
---|
3124 | 4210 | else |
---|
3125 | 4211 | mvpp2_gmac_max_rx_size_set(port); |
---|
.. | .. |
---|
3135 | 4221 | for (i = 0; i < port->nqvecs; i++) |
---|
3136 | 4222 | napi_enable(&port->qvecs[i].napi); |
---|
3137 | 4223 | |
---|
3138 | | - /* Enable interrupts on all CPUs */ |
---|
| 4224 | + /* Enable interrupts on all threads */ |
---|
3139 | 4225 | mvpp2_interrupts_enable(port); |
---|
3140 | 4226 | |
---|
3141 | 4227 | if (port->priv->hw_version == MVPP22) |
---|
3142 | 4228 | mvpp22_mode_reconfigure(port); |
---|
3143 | 4229 | |
---|
3144 | 4230 | if (port->phylink) { |
---|
3145 | | - netif_carrier_off(port->dev); |
---|
3146 | 4231 | phylink_start(port->phylink); |
---|
3147 | 4232 | } else { |
---|
3148 | | - /* Phylink isn't used as of now for ACPI, so the MAC has to be |
---|
3149 | | - * configured manually when the interface is started. This will |
---|
3150 | | - * be removed as soon as the phylink ACPI support lands in. |
---|
3151 | | - */ |
---|
3152 | | - struct phylink_link_state state = { |
---|
3153 | | - .interface = port->phy_interface, |
---|
3154 | | - }; |
---|
3155 | | - mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state); |
---|
3156 | | - mvpp2_mac_link_up(port->dev, MLO_AN_INBAND, port->phy_interface, |
---|
3157 | | - NULL); |
---|
| 4233 | + mvpp2_acpi_start(port); |
---|
3158 | 4234 | } |
---|
3159 | 4235 | |
---|
3160 | 4236 | netif_tx_start_all_queues(port->dev); |
---|
| 4237 | + |
---|
| 4238 | + clear_bit(0, &port->state); |
---|
3161 | 4239 | } |
---|
3162 | 4240 | |
---|
3163 | 4241 | /* Set hw internals when stopping port */ |
---|
.. | .. |
---|
3165 | 4243 | { |
---|
3166 | 4244 | int i; |
---|
3167 | 4245 | |
---|
3168 | | - /* Disable interrupts on all CPUs */ |
---|
| 4246 | + set_bit(0, &port->state); |
---|
| 4247 | + |
---|
| 4248 | + /* Disable interrupts on all threads */ |
---|
3169 | 4249 | mvpp2_interrupts_disable(port); |
---|
3170 | 4250 | |
---|
3171 | 4251 | for (i = 0; i < port->nqvecs; i++) |
---|
.. | .. |
---|
3238 | 4318 | for (i = 0; i < port->nqvecs; i++) { |
---|
3239 | 4319 | struct mvpp2_queue_vector *qv = port->qvecs + i; |
---|
3240 | 4320 | |
---|
3241 | | - if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) |
---|
| 4321 | + if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { |
---|
| 4322 | + qv->mask = kzalloc(cpumask_size(), GFP_KERNEL); |
---|
| 4323 | + if (!qv->mask) { |
---|
| 4324 | + err = -ENOMEM; |
---|
| 4325 | + goto err; |
---|
| 4326 | + } |
---|
| 4327 | + |
---|
3242 | 4328 | irq_set_status_flags(qv->irq, IRQ_NO_BALANCING); |
---|
| 4329 | + } |
---|
3243 | 4330 | |
---|
3244 | 4331 | err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv); |
---|
3245 | 4332 | if (err) |
---|
3246 | 4333 | goto err; |
---|
3247 | 4334 | |
---|
3248 | | - if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) |
---|
3249 | | - irq_set_affinity_hint(qv->irq, |
---|
3250 | | - cpumask_of(qv->sw_thread_id)); |
---|
| 4335 | + if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { |
---|
| 4336 | + unsigned int cpu; |
---|
| 4337 | + |
---|
| 4338 | + for_each_present_cpu(cpu) { |
---|
| 4339 | + if (mvpp2_cpu_to_thread(port->priv, cpu) == |
---|
| 4340 | + qv->sw_thread_id) |
---|
| 4341 | + cpumask_set_cpu(cpu, qv->mask); |
---|
| 4342 | + } |
---|
| 4343 | + |
---|
| 4344 | + irq_set_affinity_hint(qv->irq, qv->mask); |
---|
| 4345 | + } |
---|
3251 | 4346 | } |
---|
3252 | 4347 | |
---|
3253 | 4348 | return 0; |
---|
.. | .. |
---|
3256 | 4351 | struct mvpp2_queue_vector *qv = port->qvecs + i; |
---|
3257 | 4352 | |
---|
3258 | 4353 | irq_set_affinity_hint(qv->irq, NULL); |
---|
| 4354 | + kfree(qv->mask); |
---|
| 4355 | + qv->mask = NULL; |
---|
3259 | 4356 | free_irq(qv->irq, qv); |
---|
3260 | 4357 | } |
---|
3261 | 4358 | |
---|
.. | .. |
---|
3270 | 4367 | struct mvpp2_queue_vector *qv = port->qvecs + i; |
---|
3271 | 4368 | |
---|
3272 | 4369 | irq_set_affinity_hint(qv->irq, NULL); |
---|
| 4370 | + kfree(qv->mask); |
---|
| 4371 | + qv->mask = NULL; |
---|
3273 | 4372 | irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING); |
---|
3274 | 4373 | free_irq(qv->irq, qv); |
---|
3275 | 4374 | } |
---|
.. | .. |
---|
3341 | 4440 | valid = true; |
---|
3342 | 4441 | } |
---|
3343 | 4442 | |
---|
3344 | | - if (priv->hw_version == MVPP22 && port->link_irq) { |
---|
3345 | | - err = request_irq(port->link_irq, mvpp2_link_status_isr, 0, |
---|
| 4443 | + if (priv->hw_version == MVPP22 && port->port_irq) { |
---|
| 4444 | + err = request_irq(port->port_irq, mvpp2_port_isr, 0, |
---|
3346 | 4445 | dev->name, port); |
---|
3347 | 4446 | if (err) { |
---|
3348 | | - netdev_err(port->dev, "cannot request link IRQ %d\n", |
---|
3349 | | - port->link_irq); |
---|
| 4447 | + netdev_err(port->dev, |
---|
| 4448 | + "cannot request port link/ptp IRQ %d\n", |
---|
| 4449 | + port->port_irq); |
---|
3350 | 4450 | goto err_free_irq; |
---|
3351 | 4451 | } |
---|
3352 | 4452 | |
---|
.. | .. |
---|
3357 | 4457 | |
---|
3358 | 4458 | valid = true; |
---|
3359 | 4459 | } else { |
---|
3360 | | - port->link_irq = 0; |
---|
| 4460 | + port->port_irq = 0; |
---|
3361 | 4461 | } |
---|
3362 | 4462 | |
---|
3363 | 4463 | if (!valid) { |
---|
.. | .. |
---|
3392 | 4492 | { |
---|
3393 | 4493 | struct mvpp2_port *port = netdev_priv(dev); |
---|
3394 | 4494 | struct mvpp2_port_pcpu *port_pcpu; |
---|
3395 | | - int cpu; |
---|
| 4495 | + unsigned int thread; |
---|
3396 | 4496 | |
---|
3397 | 4497 | mvpp2_stop_dev(port); |
---|
3398 | 4498 | |
---|
3399 | | - /* Mask interrupts on all CPUs */ |
---|
| 4499 | + /* Mask interrupts on all threads */ |
---|
3400 | 4500 | on_each_cpu(mvpp2_interrupts_mask, port, 1); |
---|
3401 | 4501 | mvpp2_shared_interrupt_mask_unmask(port, true); |
---|
3402 | 4502 | |
---|
3403 | 4503 | if (port->phylink) |
---|
3404 | 4504 | phylink_disconnect_phy(port->phylink); |
---|
3405 | | - if (port->link_irq) |
---|
3406 | | - free_irq(port->link_irq, port); |
---|
| 4505 | + if (port->port_irq) |
---|
| 4506 | + free_irq(port->port_irq, port); |
---|
3407 | 4507 | |
---|
3408 | 4508 | mvpp2_irqs_deinit(port); |
---|
3409 | 4509 | if (!port->has_tx_irqs) { |
---|
3410 | | - for_each_present_cpu(cpu) { |
---|
3411 | | - port_pcpu = per_cpu_ptr(port->pcpu, cpu); |
---|
| 4510 | + for (thread = 0; thread < port->priv->nthreads; thread++) { |
---|
| 4511 | + port_pcpu = per_cpu_ptr(port->pcpu, thread); |
---|
3412 | 4512 | |
---|
3413 | 4513 | hrtimer_cancel(&port_pcpu->tx_done_timer); |
---|
3414 | 4514 | port_pcpu->timer_scheduled = false; |
---|
3415 | | - tasklet_kill(&port_pcpu->tx_done_tasklet); |
---|
3416 | 4515 | } |
---|
3417 | 4516 | } |
---|
3418 | 4517 | mvpp2_cleanup_rxqs(port); |
---|
3419 | 4518 | mvpp2_cleanup_txqs(port); |
---|
3420 | 4519 | |
---|
3421 | 4520 | cancel_delayed_work_sync(&port->stats_work); |
---|
| 4521 | + |
---|
| 4522 | + mvpp2_mac_reset_assert(port); |
---|
| 4523 | + mvpp22_pcs_reset_assert(port); |
---|
3422 | 4524 | |
---|
3423 | 4525 | return 0; |
---|
3424 | 4526 | } |
---|
.. | .. |
---|
3500 | 4602 | return err; |
---|
3501 | 4603 | } |
---|
3502 | 4604 | |
---|
| 4605 | +/* Shut down all the ports, reconfigure the pools as percpu or shared, |
---|
| 4606 | + * then bring up again all ports. |
---|
| 4607 | + */ |
---|
| 4608 | +static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu) |
---|
| 4609 | +{ |
---|
| 4610 | + int numbufs = MVPP2_BM_POOLS_NUM, i; |
---|
| 4611 | + struct mvpp2_port *port = NULL; |
---|
| 4612 | + bool status[MVPP2_MAX_PORTS]; |
---|
| 4613 | + |
---|
| 4614 | + for (i = 0; i < priv->port_count; i++) { |
---|
| 4615 | + port = priv->port_list[i]; |
---|
| 4616 | + status[i] = netif_running(port->dev); |
---|
| 4617 | + if (status[i]) |
---|
| 4618 | + mvpp2_stop(port->dev); |
---|
| 4619 | + } |
---|
| 4620 | + |
---|
| 4621 | + /* nrxqs is the same for all ports */ |
---|
| 4622 | + if (priv->percpu_pools) |
---|
| 4623 | + numbufs = port->nrxqs * 2; |
---|
| 4624 | + |
---|
| 4625 | + for (i = 0; i < numbufs; i++) |
---|
| 4626 | + mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]); |
---|
| 4627 | + |
---|
| 4628 | + devm_kfree(port->dev->dev.parent, priv->bm_pools); |
---|
| 4629 | + priv->percpu_pools = percpu; |
---|
| 4630 | + mvpp2_bm_init(port->dev->dev.parent, priv); |
---|
| 4631 | + |
---|
| 4632 | + for (i = 0; i < priv->port_count; i++) { |
---|
| 4633 | + port = priv->port_list[i]; |
---|
| 4634 | + mvpp2_swf_bm_pool_init(port); |
---|
| 4635 | + if (status[i]) |
---|
| 4636 | + mvpp2_open(port->dev); |
---|
| 4637 | + } |
---|
| 4638 | + |
---|
| 4639 | + return 0; |
---|
| 4640 | +} |
---|
| 4641 | + |
---|
3503 | 4642 | static int mvpp2_change_mtu(struct net_device *dev, int mtu) |
---|
3504 | 4643 | { |
---|
3505 | 4644 | struct mvpp2_port *port = netdev_priv(dev); |
---|
3506 | 4645 | bool running = netif_running(dev); |
---|
| 4646 | + struct mvpp2 *priv = port->priv; |
---|
3507 | 4647 | int err; |
---|
3508 | 4648 | |
---|
3509 | 4649 | if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) { |
---|
3510 | 4650 | netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu, |
---|
3511 | 4651 | ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8)); |
---|
3512 | 4652 | mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); |
---|
| 4653 | + } |
---|
| 4654 | + |
---|
| 4655 | + if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) { |
---|
| 4656 | + netdev_err(dev, "Illegal MTU value %d (> %d) for XDP mode\n", |
---|
| 4657 | + mtu, (int)MVPP2_MAX_RX_BUF_SIZE); |
---|
| 4658 | + return -EINVAL; |
---|
| 4659 | + } |
---|
| 4660 | + |
---|
| 4661 | + if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) { |
---|
| 4662 | + if (priv->percpu_pools) { |
---|
| 4663 | + netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu); |
---|
| 4664 | + mvpp2_bm_switch_buffers(priv, false); |
---|
| 4665 | + } |
---|
| 4666 | + } else { |
---|
| 4667 | + bool jumbo = false; |
---|
| 4668 | + int i; |
---|
| 4669 | + |
---|
| 4670 | + for (i = 0; i < priv->port_count; i++) |
---|
| 4671 | + if (priv->port_list[i] != port && |
---|
| 4672 | + MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) > |
---|
| 4673 | + MVPP2_BM_LONG_PKT_SIZE) { |
---|
| 4674 | + jumbo = true; |
---|
| 4675 | + break; |
---|
| 4676 | + } |
---|
| 4677 | + |
---|
| 4678 | + /* No port is using jumbo frames */ |
---|
| 4679 | + if (!jumbo) { |
---|
| 4680 | + dev_info(port->dev->dev.parent, |
---|
| 4681 | + "all ports have a low MTU, switching to per-cpu buffers"); |
---|
| 4682 | + mvpp2_bm_switch_buffers(priv, true); |
---|
| 4683 | + } |
---|
3513 | 4684 | } |
---|
3514 | 4685 | |
---|
3515 | 4686 | if (running) |
---|
.. | .. |
---|
3533 | 4704 | return err; |
---|
3534 | 4705 | } |
---|
3535 | 4706 | |
---|
| 4707 | +static int mvpp2_check_pagepool_dma(struct mvpp2_port *port) |
---|
| 4708 | +{ |
---|
| 4709 | + enum dma_data_direction dma_dir = DMA_FROM_DEVICE; |
---|
| 4710 | + struct mvpp2 *priv = port->priv; |
---|
| 4711 | + int err = -1, i; |
---|
| 4712 | + |
---|
| 4713 | + if (!priv->percpu_pools) |
---|
| 4714 | + return err; |
---|
| 4715 | + |
---|
| 4716 | + if (!priv->page_pool[0]) |
---|
| 4717 | + return -ENOMEM; |
---|
| 4718 | + |
---|
| 4719 | + for (i = 0; i < priv->port_count; i++) { |
---|
| 4720 | + port = priv->port_list[i]; |
---|
| 4721 | + if (port->xdp_prog) { |
---|
| 4722 | + dma_dir = DMA_BIDIRECTIONAL; |
---|
| 4723 | + break; |
---|
| 4724 | + } |
---|
| 4725 | + } |
---|
| 4726 | + |
---|
| 4727 | + /* All pools are equal in terms of DMA direction */ |
---|
| 4728 | + if (priv->page_pool[0]->p.dma_dir != dma_dir) |
---|
| 4729 | + err = mvpp2_bm_switch_buffers(priv, true); |
---|
| 4730 | + |
---|
| 4731 | + return err; |
---|
| 4732 | +} |
---|
| 4733 | + |
---|
3536 | 4734 | static void |
---|
3537 | 4735 | mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) |
---|
3538 | 4736 | { |
---|
3539 | 4737 | struct mvpp2_port *port = netdev_priv(dev); |
---|
3540 | 4738 | unsigned int start; |
---|
3541 | | - int cpu; |
---|
| 4739 | + unsigned int cpu; |
---|
3542 | 4740 | |
---|
3543 | 4741 | for_each_possible_cpu(cpu) { |
---|
3544 | 4742 | struct mvpp2_pcpu_stats *cpu_stats; |
---|
.. | .. |
---|
3567 | 4765 | stats->tx_dropped = dev->stats.tx_dropped; |
---|
3568 | 4766 | } |
---|
3569 | 4767 | |
---|
| 4768 | +static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr) |
---|
| 4769 | +{ |
---|
| 4770 | + struct hwtstamp_config config; |
---|
| 4771 | + void __iomem *ptp; |
---|
| 4772 | + u32 gcr, int_mask; |
---|
| 4773 | + |
---|
| 4774 | + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) |
---|
| 4775 | + return -EFAULT; |
---|
| 4776 | + |
---|
| 4777 | + if (config.flags) |
---|
| 4778 | + return -EINVAL; |
---|
| 4779 | + |
---|
| 4780 | + if (config.tx_type != HWTSTAMP_TX_OFF && |
---|
| 4781 | + config.tx_type != HWTSTAMP_TX_ON) |
---|
| 4782 | + return -ERANGE; |
---|
| 4783 | + |
---|
| 4784 | + ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); |
---|
| 4785 | + |
---|
| 4786 | + int_mask = gcr = 0; |
---|
| 4787 | + if (config.tx_type != HWTSTAMP_TX_OFF) { |
---|
| 4788 | + gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET; |
---|
| 4789 | + int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 | |
---|
| 4790 | + MVPP22_PTP_INT_MASK_QUEUE0; |
---|
| 4791 | + } |
---|
| 4792 | + |
---|
| 4793 | + /* It seems we must also release the TX reset when enabling the TSU */ |
---|
| 4794 | + if (config.rx_filter != HWTSTAMP_FILTER_NONE) |
---|
| 4795 | + gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET | |
---|
| 4796 | + MVPP22_PTP_GCR_TX_RESET; |
---|
| 4797 | + |
---|
| 4798 | + if (gcr & MVPP22_PTP_GCR_TSU_ENABLE) |
---|
| 4799 | + mvpp22_tai_start(port->priv->tai); |
---|
| 4800 | + |
---|
| 4801 | + if (config.rx_filter != HWTSTAMP_FILTER_NONE) { |
---|
| 4802 | + config.rx_filter = HWTSTAMP_FILTER_ALL; |
---|
| 4803 | + mvpp2_modify(ptp + MVPP22_PTP_GCR, |
---|
| 4804 | + MVPP22_PTP_GCR_RX_RESET | |
---|
| 4805 | + MVPP22_PTP_GCR_TX_RESET | |
---|
| 4806 | + MVPP22_PTP_GCR_TSU_ENABLE, gcr); |
---|
| 4807 | + port->rx_hwtstamp = true; |
---|
| 4808 | + } else { |
---|
| 4809 | + port->rx_hwtstamp = false; |
---|
| 4810 | + mvpp2_modify(ptp + MVPP22_PTP_GCR, |
---|
| 4811 | + MVPP22_PTP_GCR_RX_RESET | |
---|
| 4812 | + MVPP22_PTP_GCR_TX_RESET | |
---|
| 4813 | + MVPP22_PTP_GCR_TSU_ENABLE, gcr); |
---|
| 4814 | + } |
---|
| 4815 | + |
---|
| 4816 | + mvpp2_modify(ptp + MVPP22_PTP_INT_MASK, |
---|
| 4817 | + MVPP22_PTP_INT_MASK_QUEUE1 | |
---|
| 4818 | + MVPP22_PTP_INT_MASK_QUEUE0, int_mask); |
---|
| 4819 | + |
---|
| 4820 | + if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE)) |
---|
| 4821 | + mvpp22_tai_stop(port->priv->tai); |
---|
| 4822 | + |
---|
| 4823 | + port->tx_hwtstamp_type = config.tx_type; |
---|
| 4824 | + |
---|
| 4825 | + if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) |
---|
| 4826 | + return -EFAULT; |
---|
| 4827 | + |
---|
| 4828 | + return 0; |
---|
| 4829 | +} |
---|
| 4830 | + |
---|
| 4831 | +static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr) |
---|
| 4832 | +{ |
---|
| 4833 | + struct hwtstamp_config config; |
---|
| 4834 | + |
---|
| 4835 | + memset(&config, 0, sizeof(config)); |
---|
| 4836 | + |
---|
| 4837 | + config.tx_type = port->tx_hwtstamp_type; |
---|
| 4838 | + config.rx_filter = port->rx_hwtstamp ? |
---|
| 4839 | + HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; |
---|
| 4840 | + |
---|
| 4841 | + if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) |
---|
| 4842 | + return -EFAULT; |
---|
| 4843 | + |
---|
| 4844 | + return 0; |
---|
| 4845 | +} |
---|
| 4846 | + |
---|
| 4847 | +static int mvpp2_ethtool_get_ts_info(struct net_device *dev, |
---|
| 4848 | + struct ethtool_ts_info *info) |
---|
| 4849 | +{ |
---|
| 4850 | + struct mvpp2_port *port = netdev_priv(dev); |
---|
| 4851 | + |
---|
| 4852 | + if (!port->hwtstamp) |
---|
| 4853 | + return -EOPNOTSUPP; |
---|
| 4854 | + |
---|
| 4855 | + info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai); |
---|
| 4856 | + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | |
---|
| 4857 | + SOF_TIMESTAMPING_RX_SOFTWARE | |
---|
| 4858 | + SOF_TIMESTAMPING_SOFTWARE | |
---|
| 4859 | + SOF_TIMESTAMPING_TX_HARDWARE | |
---|
| 4860 | + SOF_TIMESTAMPING_RX_HARDWARE | |
---|
| 4861 | + SOF_TIMESTAMPING_RAW_HARDWARE; |
---|
| 4862 | + info->tx_types = BIT(HWTSTAMP_TX_OFF) | |
---|
| 4863 | + BIT(HWTSTAMP_TX_ON); |
---|
| 4864 | + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | |
---|
| 4865 | + BIT(HWTSTAMP_FILTER_ALL); |
---|
| 4866 | + |
---|
| 4867 | + return 0; |
---|
| 4868 | +} |
---|
| 4869 | + |
---|
3570 | 4870 | static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
---|
3571 | 4871 | { |
---|
3572 | 4872 | struct mvpp2_port *port = netdev_priv(dev); |
---|
| 4873 | + |
---|
| 4874 | + switch (cmd) { |
---|
| 4875 | + case SIOCSHWTSTAMP: |
---|
| 4876 | + if (port->hwtstamp) |
---|
| 4877 | + return mvpp2_set_ts_config(port, ifr); |
---|
| 4878 | + break; |
---|
| 4879 | + |
---|
| 4880 | + case SIOCGHWTSTAMP: |
---|
| 4881 | + if (port->hwtstamp) |
---|
| 4882 | + return mvpp2_get_ts_config(port, ifr); |
---|
| 4883 | + break; |
---|
| 4884 | + } |
---|
3573 | 4885 | |
---|
3574 | 4886 | if (!port->phylink) |
---|
3575 | 4887 | return -ENOTSUPP; |
---|
.. | .. |
---|
3618 | 4930 | |
---|
3619 | 4931 | if (changed & NETIF_F_RXHASH) { |
---|
3620 | 4932 | if (features & NETIF_F_RXHASH) |
---|
3621 | | - mvpp22_rss_enable(port); |
---|
| 4933 | + mvpp22_port_rss_enable(port); |
---|
3622 | 4934 | else |
---|
3623 | | - mvpp22_rss_disable(port); |
---|
| 4935 | + mvpp22_port_rss_disable(port); |
---|
3624 | 4936 | } |
---|
3625 | 4937 | |
---|
3626 | 4938 | return 0; |
---|
| 4939 | +} |
---|
| 4940 | + |
---|
| 4941 | +static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf) |
---|
| 4942 | +{ |
---|
| 4943 | + struct bpf_prog *prog = bpf->prog, *old_prog; |
---|
| 4944 | + bool running = netif_running(port->dev); |
---|
| 4945 | + bool reset = !prog != !port->xdp_prog; |
---|
| 4946 | + |
---|
| 4947 | + if (port->dev->mtu > MVPP2_MAX_RX_BUF_SIZE) { |
---|
| 4948 | + NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP"); |
---|
| 4949 | + return -EOPNOTSUPP; |
---|
| 4950 | + } |
---|
| 4951 | + |
---|
| 4952 | + if (!port->priv->percpu_pools) { |
---|
| 4953 | + NL_SET_ERR_MSG_MOD(bpf->extack, "Per CPU Pools required for XDP"); |
---|
| 4954 | + return -EOPNOTSUPP; |
---|
| 4955 | + } |
---|
| 4956 | + |
---|
| 4957 | + if (port->ntxqs < num_possible_cpus() * 2) { |
---|
| 4958 | + NL_SET_ERR_MSG_MOD(bpf->extack, "XDP_TX needs two TX queues per CPU"); |
---|
| 4959 | + return -EOPNOTSUPP; |
---|
| 4960 | + } |
---|
| 4961 | + |
---|
| 4962 | + /* device is up and bpf is added/removed, must setup the RX queues */ |
---|
| 4963 | + if (running && reset) |
---|
| 4964 | + mvpp2_stop(port->dev); |
---|
| 4965 | + |
---|
| 4966 | + old_prog = xchg(&port->xdp_prog, prog); |
---|
| 4967 | + if (old_prog) |
---|
| 4968 | + bpf_prog_put(old_prog); |
---|
| 4969 | + |
---|
| 4970 | + /* bpf is just replaced, RXQ and MTU are already setup */ |
---|
| 4971 | + if (!reset) |
---|
| 4972 | + return 0; |
---|
| 4973 | + |
---|
| 4974 | + /* device was up, restore the link */ |
---|
| 4975 | + if (running) |
---|
| 4976 | + mvpp2_open(port->dev); |
---|
| 4977 | + |
---|
| 4978 | + /* Check Page Pool DMA Direction */ |
---|
| 4979 | + mvpp2_check_pagepool_dma(port); |
---|
| 4980 | + |
---|
| 4981 | + return 0; |
---|
| 4982 | +} |
---|
| 4983 | + |
---|
| 4984 | +static int mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp) |
---|
| 4985 | +{ |
---|
| 4986 | + struct mvpp2_port *port = netdev_priv(dev); |
---|
| 4987 | + |
---|
| 4988 | + switch (xdp->command) { |
---|
| 4989 | + case XDP_SETUP_PROG: |
---|
| 4990 | + return mvpp2_xdp_setup(port, xdp); |
---|
| 4991 | + default: |
---|
| 4992 | + return -EINVAL; |
---|
| 4993 | + } |
---|
3627 | 4994 | } |
---|
3628 | 4995 | |
---|
3629 | 4996 | /* Ethtool methods */ |
---|
.. | .. |
---|
3814 | 5181 | struct ethtool_rxnfc *info, u32 *rules) |
---|
3815 | 5182 | { |
---|
3816 | 5183 | struct mvpp2_port *port = netdev_priv(dev); |
---|
3817 | | - int ret = 0; |
---|
| 5184 | + int ret = 0, i, loc = 0; |
---|
3818 | 5185 | |
---|
3819 | 5186 | if (!mvpp22_rss_is_supported()) |
---|
3820 | 5187 | return -EOPNOTSUPP; |
---|
.. | .. |
---|
3825 | 5192 | break; |
---|
3826 | 5193 | case ETHTOOL_GRXRINGS: |
---|
3827 | 5194 | info->data = port->nrxqs; |
---|
| 5195 | + break; |
---|
| 5196 | + case ETHTOOL_GRXCLSRLCNT: |
---|
| 5197 | + info->rule_cnt = port->n_rfs_rules; |
---|
| 5198 | + break; |
---|
| 5199 | + case ETHTOOL_GRXCLSRULE: |
---|
| 5200 | + ret = mvpp2_ethtool_cls_rule_get(port, info); |
---|
| 5201 | + break; |
---|
| 5202 | + case ETHTOOL_GRXCLSRLALL: |
---|
| 5203 | + for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) { |
---|
| 5204 | + if (port->rfs_rules[i]) |
---|
| 5205 | + rules[loc++] = i; |
---|
| 5206 | + } |
---|
3828 | 5207 | break; |
---|
3829 | 5208 | default: |
---|
3830 | 5209 | return -ENOTSUPP; |
---|
.. | .. |
---|
3846 | 5225 | case ETHTOOL_SRXFH: |
---|
3847 | 5226 | ret = mvpp2_ethtool_rxfh_set(port, info); |
---|
3848 | 5227 | break; |
---|
| 5228 | + case ETHTOOL_SRXCLSRLINS: |
---|
| 5229 | + ret = mvpp2_ethtool_cls_rule_ins(port, info); |
---|
| 5230 | + break; |
---|
| 5231 | + case ETHTOOL_SRXCLSRLDEL: |
---|
| 5232 | + ret = mvpp2_ethtool_cls_rule_del(port, info); |
---|
| 5233 | + break; |
---|
3849 | 5234 | default: |
---|
3850 | 5235 | return -EOPNOTSUPP; |
---|
3851 | 5236 | } |
---|
.. | .. |
---|
3861 | 5246 | u8 *hfunc) |
---|
3862 | 5247 | { |
---|
3863 | 5248 | struct mvpp2_port *port = netdev_priv(dev); |
---|
| 5249 | + int ret = 0; |
---|
3864 | 5250 | |
---|
3865 | 5251 | if (!mvpp22_rss_is_supported()) |
---|
3866 | 5252 | return -EOPNOTSUPP; |
---|
3867 | 5253 | |
---|
3868 | 5254 | if (indir) |
---|
3869 | | - memcpy(indir, port->indir, |
---|
3870 | | - ARRAY_SIZE(port->indir) * sizeof(port->indir[0])); |
---|
| 5255 | + ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir); |
---|
3871 | 5256 | |
---|
3872 | 5257 | if (hfunc) |
---|
3873 | 5258 | *hfunc = ETH_RSS_HASH_CRC32; |
---|
3874 | 5259 | |
---|
3875 | | - return 0; |
---|
| 5260 | + return ret; |
---|
3876 | 5261 | } |
---|
3877 | 5262 | |
---|
3878 | 5263 | static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir, |
---|
3879 | 5264 | const u8 *key, const u8 hfunc) |
---|
3880 | 5265 | { |
---|
3881 | 5266 | struct mvpp2_port *port = netdev_priv(dev); |
---|
| 5267 | + int ret = 0; |
---|
3882 | 5268 | |
---|
3883 | 5269 | if (!mvpp22_rss_is_supported()) |
---|
3884 | 5270 | return -EOPNOTSUPP; |
---|
.. | .. |
---|
3889 | 5275 | if (key) |
---|
3890 | 5276 | return -EOPNOTSUPP; |
---|
3891 | 5277 | |
---|
3892 | | - if (indir) { |
---|
3893 | | - memcpy(port->indir, indir, |
---|
3894 | | - ARRAY_SIZE(port->indir) * sizeof(port->indir[0])); |
---|
3895 | | - mvpp22_rss_fill_table(port, port->id); |
---|
3896 | | - } |
---|
| 5278 | + if (indir) |
---|
| 5279 | + ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir); |
---|
3897 | 5280 | |
---|
3898 | | - return 0; |
---|
| 5281 | + return ret; |
---|
3899 | 5282 | } |
---|
3900 | 5283 | |
---|
| 5284 | +static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir, |
---|
| 5285 | + u8 *key, u8 *hfunc, u32 rss_context) |
---|
| 5286 | +{ |
---|
| 5287 | + struct mvpp2_port *port = netdev_priv(dev); |
---|
| 5288 | + int ret = 0; |
---|
| 5289 | + |
---|
| 5290 | + if (!mvpp22_rss_is_supported()) |
---|
| 5291 | + return -EOPNOTSUPP; |
---|
| 5292 | + if (rss_context >= MVPP22_N_RSS_TABLES) |
---|
| 5293 | + return -EINVAL; |
---|
| 5294 | + |
---|
| 5295 | + if (hfunc) |
---|
| 5296 | + *hfunc = ETH_RSS_HASH_CRC32; |
---|
| 5297 | + |
---|
| 5298 | + if (indir) |
---|
| 5299 | + ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir); |
---|
| 5300 | + |
---|
| 5301 | + return ret; |
---|
| 5302 | +} |
---|
| 5303 | + |
---|
| 5304 | +static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev, |
---|
| 5305 | + const u32 *indir, const u8 *key, |
---|
| 5306 | + const u8 hfunc, u32 *rss_context, |
---|
| 5307 | + bool delete) |
---|
| 5308 | +{ |
---|
| 5309 | + struct mvpp2_port *port = netdev_priv(dev); |
---|
| 5310 | + int ret; |
---|
| 5311 | + |
---|
| 5312 | + if (!mvpp22_rss_is_supported()) |
---|
| 5313 | + return -EOPNOTSUPP; |
---|
| 5314 | + |
---|
| 5315 | + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32) |
---|
| 5316 | + return -EOPNOTSUPP; |
---|
| 5317 | + |
---|
| 5318 | + if (key) |
---|
| 5319 | + return -EOPNOTSUPP; |
---|
| 5320 | + |
---|
| 5321 | + if (delete) |
---|
| 5322 | + return mvpp22_port_rss_ctx_delete(port, *rss_context); |
---|
| 5323 | + |
---|
| 5324 | + if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { |
---|
| 5325 | + ret = mvpp22_port_rss_ctx_create(port, rss_context); |
---|
| 5326 | + if (ret) |
---|
| 5327 | + return ret; |
---|
| 5328 | + } |
---|
| 5329 | + |
---|
| 5330 | + return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir); |
---|
| 5331 | +} |
---|
3901 | 5332 | /* Device ops */ |
---|
3902 | 5333 | |
---|
3903 | 5334 | static const struct net_device_ops mvpp2_netdev_ops = { |
---|
.. | .. |
---|
3912 | 5343 | .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid, |
---|
3913 | 5344 | .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid, |
---|
3914 | 5345 | .ndo_set_features = mvpp2_set_features, |
---|
| 5346 | + .ndo_bpf = mvpp2_xdp, |
---|
| 5347 | + .ndo_xdp_xmit = mvpp2_xdp_xmit, |
---|
3915 | 5348 | }; |
---|
3916 | 5349 | |
---|
3917 | 5350 | static const struct ethtool_ops mvpp2_eth_tool_ops = { |
---|
| 5351 | + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | |
---|
| 5352 | + ETHTOOL_COALESCE_MAX_FRAMES, |
---|
3918 | 5353 | .nway_reset = mvpp2_ethtool_nway_reset, |
---|
3919 | 5354 | .get_link = ethtool_op_get_link, |
---|
| 5355 | + .get_ts_info = mvpp2_ethtool_get_ts_info, |
---|
3920 | 5356 | .set_coalesce = mvpp2_ethtool_set_coalesce, |
---|
3921 | 5357 | .get_coalesce = mvpp2_ethtool_get_coalesce, |
---|
3922 | 5358 | .get_drvinfo = mvpp2_ethtool_get_drvinfo, |
---|
.. | .. |
---|
3934 | 5370 | .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size, |
---|
3935 | 5371 | .get_rxfh = mvpp2_ethtool_get_rxfh, |
---|
3936 | 5372 | .set_rxfh = mvpp2_ethtool_set_rxfh, |
---|
3937 | | - |
---|
| 5373 | + .get_rxfh_context = mvpp2_ethtool_get_rxfh_context, |
---|
| 5374 | + .set_rxfh_context = mvpp2_ethtool_set_rxfh_context, |
---|
3938 | 5375 | }; |
---|
3939 | 5376 | |
---|
3940 | 5377 | /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that |
---|
.. | .. |
---|
3965 | 5402 | static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, |
---|
3966 | 5403 | struct device_node *port_node) |
---|
3967 | 5404 | { |
---|
| 5405 | + struct mvpp2 *priv = port->priv; |
---|
3968 | 5406 | struct mvpp2_queue_vector *v; |
---|
3969 | 5407 | int i, ret; |
---|
3970 | 5408 | |
---|
3971 | | - port->nqvecs = num_possible_cpus(); |
---|
3972 | | - if (queue_mode == MVPP2_QDIST_SINGLE_MODE) |
---|
3973 | | - port->nqvecs += 1; |
---|
| 5409 | + switch (queue_mode) { |
---|
| 5410 | + case MVPP2_QDIST_SINGLE_MODE: |
---|
| 5411 | + port->nqvecs = priv->nthreads + 1; |
---|
| 5412 | + break; |
---|
| 5413 | + case MVPP2_QDIST_MULTI_MODE: |
---|
| 5414 | + port->nqvecs = priv->nthreads; |
---|
| 5415 | + break; |
---|
| 5416 | + } |
---|
3974 | 5417 | |
---|
3975 | 5418 | for (i = 0; i < port->nqvecs; i++) { |
---|
3976 | 5419 | char irqname[16]; |
---|
.. | .. |
---|
3982 | 5425 | v->sw_thread_id = i; |
---|
3983 | 5426 | v->sw_thread_mask = BIT(i); |
---|
3984 | 5427 | |
---|
3985 | | - snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); |
---|
| 5428 | + if (port->flags & MVPP2_F_DT_COMPAT) |
---|
| 5429 | + snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); |
---|
| 5430 | + else |
---|
| 5431 | + snprintf(irqname, sizeof(irqname), "hif%d", i); |
---|
3986 | 5432 | |
---|
3987 | 5433 | if (queue_mode == MVPP2_QDIST_MULTI_MODE) { |
---|
3988 | | - v->first_rxq = i * MVPP2_DEFAULT_RXQ; |
---|
3989 | | - v->nrxqs = MVPP2_DEFAULT_RXQ; |
---|
| 5434 | + v->first_rxq = i; |
---|
| 5435 | + v->nrxqs = 1; |
---|
3990 | 5436 | } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE && |
---|
3991 | 5437 | i == (port->nqvecs - 1)) { |
---|
3992 | 5438 | v->first_rxq = 0; |
---|
3993 | 5439 | v->nrxqs = port->nrxqs; |
---|
3994 | 5440 | v->type = MVPP2_QUEUE_VECTOR_SHARED; |
---|
3995 | | - strncpy(irqname, "rx-shared", sizeof(irqname)); |
---|
| 5441 | + |
---|
| 5442 | + if (port->flags & MVPP2_F_DT_COMPAT) |
---|
| 5443 | + strncpy(irqname, "rx-shared", sizeof(irqname)); |
---|
3996 | 5444 | } |
---|
3997 | 5445 | |
---|
3998 | 5446 | if (port_node) |
---|
.. | .. |
---|
4069 | 5517 | struct device *dev = port->dev->dev.parent; |
---|
4070 | 5518 | struct mvpp2 *priv = port->priv; |
---|
4071 | 5519 | struct mvpp2_txq_pcpu *txq_pcpu; |
---|
4072 | | - int queue, cpu, err; |
---|
| 5520 | + unsigned int thread; |
---|
| 5521 | + int queue, err, val; |
---|
4073 | 5522 | |
---|
4074 | 5523 | /* Checks for hardware constraints */ |
---|
4075 | 5524 | if (port->first_rxq + port->nrxqs > |
---|
4076 | 5525 | MVPP2_MAX_PORTS * priv->max_port_rxqs) |
---|
4077 | 5526 | return -EINVAL; |
---|
4078 | 5527 | |
---|
4079 | | - if (port->nrxqs % MVPP2_DEFAULT_RXQ || |
---|
4080 | | - port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ) |
---|
| 5528 | + if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ) |
---|
4081 | 5529 | return -EINVAL; |
---|
4082 | 5530 | |
---|
4083 | 5531 | /* Disable port */ |
---|
4084 | 5532 | mvpp2_egress_disable(port); |
---|
4085 | 5533 | mvpp2_port_disable(port); |
---|
| 5534 | + |
---|
| 5535 | + if (mvpp2_is_xlg(port->phy_interface)) { |
---|
| 5536 | + val = readl(port->base + MVPP22_XLG_CTRL0_REG); |
---|
| 5537 | + val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS; |
---|
| 5538 | + val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; |
---|
| 5539 | + writel(val, port->base + MVPP22_XLG_CTRL0_REG); |
---|
| 5540 | + } else { |
---|
| 5541 | + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
---|
| 5542 | + val &= ~MVPP2_GMAC_FORCE_LINK_PASS; |
---|
| 5543 | + val |= MVPP2_GMAC_FORCE_LINK_DOWN; |
---|
| 5544 | + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
---|
| 5545 | + } |
---|
4086 | 5546 | |
---|
4087 | 5547 | port->tx_time_coal = MVPP2_TXDONE_COAL_USEC; |
---|
4088 | 5548 | |
---|
.. | .. |
---|
4113 | 5573 | txq->id = queue_phy_id; |
---|
4114 | 5574 | txq->log_id = queue; |
---|
4115 | 5575 | txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; |
---|
4116 | | - for_each_present_cpu(cpu) { |
---|
4117 | | - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); |
---|
4118 | | - txq_pcpu->cpu = cpu; |
---|
| 5576 | + for (thread = 0; thread < priv->nthreads; thread++) { |
---|
| 5577 | + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
---|
| 5578 | + txq_pcpu->thread = thread; |
---|
4119 | 5579 | } |
---|
4120 | 5580 | |
---|
4121 | 5581 | port->txqs[queue] = txq; |
---|
.. | .. |
---|
4167 | 5627 | mvpp2_cls_port_config(port); |
---|
4168 | 5628 | |
---|
4169 | 5629 | if (mvpp22_rss_is_supported()) |
---|
4170 | | - mvpp22_rss_port_init(port); |
---|
| 5630 | + mvpp22_port_rss_init(port); |
---|
4171 | 5631 | |
---|
4172 | 5632 | /* Provide an initial Rx packet size */ |
---|
4173 | 5633 | port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu); |
---|
.. | .. |
---|
4176 | 5636 | err = mvpp2_swf_bm_pool_init(port); |
---|
4177 | 5637 | if (err) |
---|
4178 | 5638 | goto err_free_percpu; |
---|
| 5639 | + |
---|
| 5640 | + /* Clear all port stats */ |
---|
| 5641 | + mvpp2_read_stats(port); |
---|
| 5642 | + memset(port->ethtool_stats, 0, |
---|
| 5643 | + MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64)); |
---|
4179 | 5644 | |
---|
4180 | 5645 | return 0; |
---|
4181 | 5646 | |
---|
.. | .. |
---|
4188 | 5653 | return err; |
---|
4189 | 5654 | } |
---|
4190 | 5655 | |
---|
4191 | | -/* Checks if the port DT description has the TX interrupts |
---|
4192 | | - * described. On PPv2.1, there are no such interrupts. On PPv2.2, |
---|
4193 | | - * there are available, but we need to keep support for old DTs. |
---|
4194 | | - */ |
---|
4195 | | -static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv, |
---|
4196 | | - struct device_node *port_node) |
---|
| 5656 | +static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node, |
---|
| 5657 | + unsigned long *flags) |
---|
4197 | 5658 | { |
---|
4198 | | - char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", |
---|
4199 | | - "tx-cpu2", "tx-cpu3" }; |
---|
4200 | | - int ret, i; |
---|
| 5659 | + char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2", |
---|
| 5660 | + "tx-cpu3" }; |
---|
| 5661 | + int i; |
---|
| 5662 | + |
---|
| 5663 | + for (i = 0; i < 5; i++) |
---|
| 5664 | + if (of_property_match_string(port_node, "interrupt-names", |
---|
| 5665 | + irqs[i]) < 0) |
---|
| 5666 | + return false; |
---|
| 5667 | + |
---|
| 5668 | + *flags |= MVPP2_F_DT_COMPAT; |
---|
| 5669 | + return true; |
---|
| 5670 | +} |
---|
| 5671 | + |
---|
| 5672 | +/* Checks if the port dt description has the required Tx interrupts: |
---|
| 5673 | + * - PPv2.1: there are no such interrupts. |
---|
| 5674 | + * - PPv2.2: |
---|
| 5675 | + * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3] |
---|
| 5676 | + * - The new ones have: "hifX" with X in [0..8] |
---|
| 5677 | + * |
---|
| 5678 | + * All those variants are supported to keep the backward compatibility. |
---|
| 5679 | + */ |
---|
| 5680 | +static bool mvpp2_port_has_irqs(struct mvpp2 *priv, |
---|
| 5681 | + struct device_node *port_node, |
---|
| 5682 | + unsigned long *flags) |
---|
| 5683 | +{ |
---|
| 5684 | + char name[5]; |
---|
| 5685 | + int i; |
---|
| 5686 | + |
---|
| 5687 | + /* ACPI */ |
---|
| 5688 | + if (!port_node) |
---|
| 5689 | + return true; |
---|
4201 | 5690 | |
---|
4202 | 5691 | if (priv->hw_version == MVPP21) |
---|
4203 | 5692 | return false; |
---|
4204 | 5693 | |
---|
4205 | | - for (i = 0; i < 5; i++) { |
---|
4206 | | - ret = of_property_match_string(port_node, "interrupt-names", |
---|
4207 | | - irqs[i]); |
---|
4208 | | - if (ret < 0) |
---|
| 5694 | + if (mvpp22_port_has_legacy_tx_irqs(port_node, flags)) |
---|
| 5695 | + return true; |
---|
| 5696 | + |
---|
| 5697 | + for (i = 0; i < MVPP2_MAX_THREADS; i++) { |
---|
| 5698 | + snprintf(name, 5, "hif%d", i); |
---|
| 5699 | + if (of_property_match_string(port_node, "interrupt-names", |
---|
| 5700 | + name) < 0) |
---|
4209 | 5701 | return false; |
---|
4210 | 5702 | } |
---|
4211 | 5703 | |
---|
.. | .. |
---|
4239 | 5731 | eth_hw_addr_random(dev); |
---|
4240 | 5732 | } |
---|
4241 | 5733 | |
---|
4242 | | -static void mvpp2_phylink_validate(struct net_device *dev, |
---|
4243 | | - unsigned long *supported, |
---|
4244 | | - struct phylink_link_state *state) |
---|
| 5734 | +static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config) |
---|
4245 | 5735 | { |
---|
4246 | | - struct mvpp2_port *port = netdev_priv(dev); |
---|
4247 | | - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; |
---|
4248 | | - |
---|
4249 | | - /* Invalid combinations */ |
---|
4250 | | - switch (state->interface) { |
---|
4251 | | - case PHY_INTERFACE_MODE_10GKR: |
---|
4252 | | - case PHY_INTERFACE_MODE_XAUI: |
---|
4253 | | - if (port->gop_id != 0) |
---|
4254 | | - goto empty_set; |
---|
4255 | | - break; |
---|
4256 | | - case PHY_INTERFACE_MODE_RGMII: |
---|
4257 | | - case PHY_INTERFACE_MODE_RGMII_ID: |
---|
4258 | | - case PHY_INTERFACE_MODE_RGMII_RXID: |
---|
4259 | | - case PHY_INTERFACE_MODE_RGMII_TXID: |
---|
4260 | | - if (port->priv->hw_version == MVPP22 && port->gop_id == 0) |
---|
4261 | | - goto empty_set; |
---|
4262 | | - break; |
---|
4263 | | - default: |
---|
4264 | | - break; |
---|
4265 | | - } |
---|
4266 | | - |
---|
4267 | | - phylink_set(mask, Autoneg); |
---|
4268 | | - phylink_set_port_modes(mask); |
---|
4269 | | - |
---|
4270 | | - switch (state->interface) { |
---|
4271 | | - case PHY_INTERFACE_MODE_10GKR: |
---|
4272 | | - case PHY_INTERFACE_MODE_XAUI: |
---|
4273 | | - case PHY_INTERFACE_MODE_NA: |
---|
4274 | | - if (port->gop_id == 0) { |
---|
4275 | | - phylink_set(mask, 10000baseT_Full); |
---|
4276 | | - phylink_set(mask, 10000baseCR_Full); |
---|
4277 | | - phylink_set(mask, 10000baseSR_Full); |
---|
4278 | | - phylink_set(mask, 10000baseLR_Full); |
---|
4279 | | - phylink_set(mask, 10000baseLRM_Full); |
---|
4280 | | - phylink_set(mask, 10000baseER_Full); |
---|
4281 | | - phylink_set(mask, 10000baseKR_Full); |
---|
4282 | | - } |
---|
4283 | | - /* Fall-through */ |
---|
4284 | | - case PHY_INTERFACE_MODE_RGMII: |
---|
4285 | | - case PHY_INTERFACE_MODE_RGMII_ID: |
---|
4286 | | - case PHY_INTERFACE_MODE_RGMII_RXID: |
---|
4287 | | - case PHY_INTERFACE_MODE_RGMII_TXID: |
---|
4288 | | - case PHY_INTERFACE_MODE_SGMII: |
---|
4289 | | - phylink_set(mask, 10baseT_Half); |
---|
4290 | | - phylink_set(mask, 10baseT_Full); |
---|
4291 | | - phylink_set(mask, 100baseT_Half); |
---|
4292 | | - phylink_set(mask, 100baseT_Full); |
---|
4293 | | - /* Fall-through */ |
---|
4294 | | - case PHY_INTERFACE_MODE_1000BASEX: |
---|
4295 | | - case PHY_INTERFACE_MODE_2500BASEX: |
---|
4296 | | - phylink_set(mask, 1000baseT_Full); |
---|
4297 | | - phylink_set(mask, 1000baseX_Full); |
---|
4298 | | - phylink_set(mask, 2500baseX_Full); |
---|
4299 | | - break; |
---|
4300 | | - default: |
---|
4301 | | - goto empty_set; |
---|
4302 | | - } |
---|
4303 | | - |
---|
4304 | | - bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); |
---|
4305 | | - bitmap_and(state->advertising, state->advertising, mask, |
---|
4306 | | - __ETHTOOL_LINK_MODE_MASK_NBITS); |
---|
4307 | | - return; |
---|
4308 | | - |
---|
4309 | | -empty_set: |
---|
4310 | | - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); |
---|
| 5736 | + return container_of(config, struct mvpp2_port, phylink_config); |
---|
4311 | 5737 | } |
---|
4312 | 5738 | |
---|
4313 | | -static void mvpp22_xlg_link_state(struct mvpp2_port *port, |
---|
4314 | | - struct phylink_link_state *state) |
---|
| 5739 | +static struct mvpp2_port *mvpp2_pcs_to_port(struct phylink_pcs *pcs) |
---|
4315 | 5740 | { |
---|
| 5741 | + return container_of(pcs, struct mvpp2_port, phylink_pcs); |
---|
| 5742 | +} |
---|
| 5743 | + |
---|
| 5744 | +static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs, |
---|
| 5745 | + struct phylink_link_state *state) |
---|
| 5746 | +{ |
---|
| 5747 | + struct mvpp2_port *port = mvpp2_pcs_to_port(pcs); |
---|
4316 | 5748 | u32 val; |
---|
4317 | 5749 | |
---|
4318 | 5750 | state->speed = SPEED_10000; |
---|
.. | .. |
---|
4330 | 5762 | state->pause |= MLO_PAUSE_RX; |
---|
4331 | 5763 | } |
---|
4332 | 5764 | |
---|
4333 | | -static void mvpp2_gmac_link_state(struct mvpp2_port *port, |
---|
4334 | | - struct phylink_link_state *state) |
---|
| 5765 | +static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs, |
---|
| 5766 | + unsigned int mode, |
---|
| 5767 | + phy_interface_t interface, |
---|
| 5768 | + const unsigned long *advertising, |
---|
| 5769 | + bool permit_pause_to_mac) |
---|
4335 | 5770 | { |
---|
| 5771 | + return 0; |
---|
| 5772 | +} |
---|
| 5773 | + |
---|
| 5774 | +static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = { |
---|
| 5775 | + .pcs_get_state = mvpp2_xlg_pcs_get_state, |
---|
| 5776 | + .pcs_config = mvpp2_xlg_pcs_config, |
---|
| 5777 | +}; |
---|
| 5778 | + |
---|
| 5779 | +static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs, |
---|
| 5780 | + struct phylink_link_state *state) |
---|
| 5781 | +{ |
---|
| 5782 | + struct mvpp2_port *port = mvpp2_pcs_to_port(pcs); |
---|
4336 | 5783 | u32 val; |
---|
4337 | 5784 | |
---|
4338 | 5785 | val = readl(port->base + MVPP2_GMAC_STATUS0); |
---|
.. | .. |
---|
4364 | 5811 | state->pause |= MLO_PAUSE_TX; |
---|
4365 | 5812 | } |
---|
4366 | 5813 | |
---|
4367 | | -static int mvpp2_phylink_mac_link_state(struct net_device *dev, |
---|
4368 | | - struct phylink_link_state *state) |
---|
| 5814 | +static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode, |
---|
| 5815 | + phy_interface_t interface, |
---|
| 5816 | + const unsigned long *advertising, |
---|
| 5817 | + bool permit_pause_to_mac) |
---|
4369 | 5818 | { |
---|
4370 | | - struct mvpp2_port *port = netdev_priv(dev); |
---|
| 5819 | + struct mvpp2_port *port = mvpp2_pcs_to_port(pcs); |
---|
| 5820 | + u32 mask, val, an, old_an, changed; |
---|
4371 | 5821 | |
---|
4372 | | - if (port->priv->hw_version == MVPP22 && port->gop_id == 0) { |
---|
4373 | | - u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG); |
---|
4374 | | - mode &= MVPP22_XLG_CTRL3_MACMODESELECT_MASK; |
---|
| 5822 | + mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | |
---|
| 5823 | + MVPP2_GMAC_IN_BAND_AUTONEG | |
---|
| 5824 | + MVPP2_GMAC_AN_SPEED_EN | |
---|
| 5825 | + MVPP2_GMAC_FLOW_CTRL_AUTONEG | |
---|
| 5826 | + MVPP2_GMAC_AN_DUPLEX_EN; |
---|
4375 | 5827 | |
---|
4376 | | - if (mode == MVPP22_XLG_CTRL3_MACMODESELECT_10G) { |
---|
4377 | | - mvpp22_xlg_link_state(port, state); |
---|
4378 | | - return 1; |
---|
| 5828 | + if (phylink_autoneg_inband(mode)) { |
---|
| 5829 | + mask |= MVPP2_GMAC_CONFIG_MII_SPEED | |
---|
| 5830 | + MVPP2_GMAC_CONFIG_GMII_SPEED | |
---|
| 5831 | + MVPP2_GMAC_CONFIG_FULL_DUPLEX; |
---|
| 5832 | + val = MVPP2_GMAC_IN_BAND_AUTONEG; |
---|
| 5833 | + |
---|
| 5834 | + if (interface == PHY_INTERFACE_MODE_SGMII) { |
---|
| 5835 | + /* SGMII mode receives the speed and duplex from PHY */ |
---|
| 5836 | + val |= MVPP2_GMAC_AN_SPEED_EN | |
---|
| 5837 | + MVPP2_GMAC_AN_DUPLEX_EN; |
---|
| 5838 | + } else { |
---|
| 5839 | + /* 802.3z mode has fixed speed and duplex */ |
---|
| 5840 | + val |= MVPP2_GMAC_CONFIG_GMII_SPEED | |
---|
| 5841 | + MVPP2_GMAC_CONFIG_FULL_DUPLEX; |
---|
| 5842 | + |
---|
| 5843 | + /* The FLOW_CTRL_AUTONEG bit selects either the hardware |
---|
| 5844 | + * automatically or the bits in MVPP22_GMAC_CTRL_4_REG |
---|
| 5845 | + * manually controls the GMAC pause modes. |
---|
| 5846 | + */ |
---|
| 5847 | + if (permit_pause_to_mac) |
---|
| 5848 | + val |= MVPP2_GMAC_FLOW_CTRL_AUTONEG; |
---|
| 5849 | + |
---|
| 5850 | + /* Configure advertisement bits */ |
---|
| 5851 | + mask |= MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN; |
---|
| 5852 | + if (phylink_test(advertising, Pause)) |
---|
| 5853 | + val |= MVPP2_GMAC_FC_ADV_EN; |
---|
| 5854 | + if (phylink_test(advertising, Asym_Pause)) |
---|
| 5855 | + val |= MVPP2_GMAC_FC_ADV_ASM_EN; |
---|
4379 | 5856 | } |
---|
| 5857 | + } else { |
---|
| 5858 | + val = 0; |
---|
4380 | 5859 | } |
---|
4381 | 5860 | |
---|
4382 | | - mvpp2_gmac_link_state(port, state); |
---|
4383 | | - return 1; |
---|
| 5861 | + old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
---|
| 5862 | + an = (an & ~mask) | val; |
---|
| 5863 | + changed = an ^ old_an; |
---|
| 5864 | + if (changed) |
---|
| 5865 | + writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
---|
| 5866 | + |
---|
| 5867 | + /* We are only interested in the advertisement bits changing */ |
---|
| 5868 | + return changed & (MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN); |
---|
4384 | 5869 | } |
---|
4385 | 5870 | |
---|
4386 | | -static void mvpp2_mac_an_restart(struct net_device *dev) |
---|
| 5871 | +static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs) |
---|
4387 | 5872 | { |
---|
4388 | | - struct mvpp2_port *port = netdev_priv(dev); |
---|
4389 | | - u32 val; |
---|
| 5873 | + struct mvpp2_port *port = mvpp2_pcs_to_port(pcs); |
---|
| 5874 | + u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
---|
4390 | 5875 | |
---|
4391 | | - if (port->phy_interface != PHY_INTERFACE_MODE_SGMII) |
---|
4392 | | - return; |
---|
| 5876 | + writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN, |
---|
| 5877 | + port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
---|
| 5878 | + writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN, |
---|
| 5879 | + port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
---|
| 5880 | +} |
---|
4393 | 5881 | |
---|
4394 | | - val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
---|
4395 | | - /* The RESTART_AN bit is cleared by the h/w after restarting the AN |
---|
4396 | | - * process. |
---|
4397 | | - */ |
---|
4398 | | - val |= MVPP2_GMAC_IN_BAND_RESTART_AN | MVPP2_GMAC_IN_BAND_AUTONEG; |
---|
4399 | | - writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
---|
| 5882 | +static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = { |
---|
| 5883 | + .pcs_get_state = mvpp2_gmac_pcs_get_state, |
---|
| 5884 | + .pcs_config = mvpp2_gmac_pcs_config, |
---|
| 5885 | + .pcs_an_restart = mvpp2_gmac_pcs_an_restart, |
---|
| 5886 | +}; |
---|
| 5887 | + |
---|
| 5888 | +static void mvpp2_phylink_validate(struct phylink_config *config, |
---|
| 5889 | + unsigned long *supported, |
---|
| 5890 | + struct phylink_link_state *state) |
---|
| 5891 | +{ |
---|
| 5892 | + struct mvpp2_port *port = mvpp2_phylink_to_port(config); |
---|
| 5893 | + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; |
---|
| 5894 | + |
---|
| 5895 | + /* Invalid combinations */ |
---|
| 5896 | + switch (state->interface) { |
---|
| 5897 | + case PHY_INTERFACE_MODE_10GBASER: |
---|
| 5898 | + case PHY_INTERFACE_MODE_XAUI: |
---|
| 5899 | + if (!mvpp2_port_supports_xlg(port)) |
---|
| 5900 | + goto empty_set; |
---|
| 5901 | + break; |
---|
| 5902 | + case PHY_INTERFACE_MODE_RGMII: |
---|
| 5903 | + case PHY_INTERFACE_MODE_RGMII_ID: |
---|
| 5904 | + case PHY_INTERFACE_MODE_RGMII_RXID: |
---|
| 5905 | + case PHY_INTERFACE_MODE_RGMII_TXID: |
---|
| 5906 | + if (!mvpp2_port_supports_rgmii(port)) |
---|
| 5907 | + goto empty_set; |
---|
| 5908 | + break; |
---|
| 5909 | + default: |
---|
| 5910 | + break; |
---|
| 5911 | + } |
---|
| 5912 | + |
---|
| 5913 | + phylink_set(mask, Autoneg); |
---|
| 5914 | + phylink_set_port_modes(mask); |
---|
| 5915 | + |
---|
| 5916 | + switch (state->interface) { |
---|
| 5917 | + case PHY_INTERFACE_MODE_10GBASER: |
---|
| 5918 | + case PHY_INTERFACE_MODE_XAUI: |
---|
| 5919 | + case PHY_INTERFACE_MODE_NA: |
---|
| 5920 | + if (mvpp2_port_supports_xlg(port)) { |
---|
| 5921 | + phylink_set(mask, 10000baseT_Full); |
---|
| 5922 | + phylink_set(mask, 10000baseCR_Full); |
---|
| 5923 | + phylink_set(mask, 10000baseSR_Full); |
---|
| 5924 | + phylink_set(mask, 10000baseLR_Full); |
---|
| 5925 | + phylink_set(mask, 10000baseLRM_Full); |
---|
| 5926 | + phylink_set(mask, 10000baseER_Full); |
---|
| 5927 | + phylink_set(mask, 10000baseKR_Full); |
---|
| 5928 | + } |
---|
| 5929 | + if (state->interface != PHY_INTERFACE_MODE_NA) |
---|
| 5930 | + break; |
---|
| 5931 | + fallthrough; |
---|
| 5932 | + case PHY_INTERFACE_MODE_RGMII: |
---|
| 5933 | + case PHY_INTERFACE_MODE_RGMII_ID: |
---|
| 5934 | + case PHY_INTERFACE_MODE_RGMII_RXID: |
---|
| 5935 | + case PHY_INTERFACE_MODE_RGMII_TXID: |
---|
| 5936 | + case PHY_INTERFACE_MODE_SGMII: |
---|
| 5937 | + phylink_set(mask, 10baseT_Half); |
---|
| 5938 | + phylink_set(mask, 10baseT_Full); |
---|
| 5939 | + phylink_set(mask, 100baseT_Half); |
---|
| 5940 | + phylink_set(mask, 100baseT_Full); |
---|
| 5941 | + phylink_set(mask, 1000baseT_Full); |
---|
| 5942 | + phylink_set(mask, 1000baseX_Full); |
---|
| 5943 | + if (state->interface != PHY_INTERFACE_MODE_NA) |
---|
| 5944 | + break; |
---|
| 5945 | + fallthrough; |
---|
| 5946 | + case PHY_INTERFACE_MODE_1000BASEX: |
---|
| 5947 | + case PHY_INTERFACE_MODE_2500BASEX: |
---|
| 5948 | + if (port->comphy || |
---|
| 5949 | + state->interface != PHY_INTERFACE_MODE_2500BASEX) { |
---|
| 5950 | + phylink_set(mask, 1000baseT_Full); |
---|
| 5951 | + phylink_set(mask, 1000baseX_Full); |
---|
| 5952 | + } |
---|
| 5953 | + if (port->comphy || |
---|
| 5954 | + state->interface == PHY_INTERFACE_MODE_2500BASEX) { |
---|
| 5955 | + phylink_set(mask, 2500baseT_Full); |
---|
| 5956 | + phylink_set(mask, 2500baseX_Full); |
---|
| 5957 | + } |
---|
| 5958 | + break; |
---|
| 5959 | + default: |
---|
| 5960 | + goto empty_set; |
---|
| 5961 | + } |
---|
| 5962 | + |
---|
| 5963 | + bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); |
---|
| 5964 | + bitmap_and(state->advertising, state->advertising, mask, |
---|
| 5965 | + __ETHTOOL_LINK_MODE_MASK_NBITS); |
---|
| 5966 | + |
---|
| 5967 | + phylink_helper_basex_speed(state); |
---|
| 5968 | + return; |
---|
| 5969 | + |
---|
| 5970 | +empty_set: |
---|
| 5971 | + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); |
---|
4400 | 5972 | } |
---|
4401 | 5973 | |
---|
4402 | 5974 | static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, |
---|
4403 | 5975 | const struct phylink_link_state *state) |
---|
4404 | 5976 | { |
---|
4405 | | - u32 ctrl0, ctrl4; |
---|
| 5977 | + u32 val; |
---|
4406 | 5978 | |
---|
4407 | | - ctrl0 = readl(port->base + MVPP22_XLG_CTRL0_REG); |
---|
4408 | | - ctrl4 = readl(port->base + MVPP22_XLG_CTRL4_REG); |
---|
| 5979 | + mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, |
---|
| 5980 | + MVPP22_XLG_CTRL0_MAC_RESET_DIS, |
---|
| 5981 | + MVPP22_XLG_CTRL0_MAC_RESET_DIS); |
---|
| 5982 | + mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG, |
---|
| 5983 | + MVPP22_XLG_CTRL4_MACMODSELECT_GMAC | |
---|
| 5984 | + MVPP22_XLG_CTRL4_EN_IDLE_CHECK | |
---|
| 5985 | + MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC, |
---|
| 5986 | + MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC); |
---|
4409 | 5987 | |
---|
4410 | | - if (state->pause & MLO_PAUSE_TX) |
---|
4411 | | - ctrl0 |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; |
---|
4412 | | - if (state->pause & MLO_PAUSE_RX) |
---|
4413 | | - ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; |
---|
4414 | | - |
---|
4415 | | - ctrl4 &= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC | |
---|
4416 | | - MVPP22_XLG_CTRL4_EN_IDLE_CHECK); |
---|
4417 | | - ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC; |
---|
4418 | | - |
---|
4419 | | - writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG); |
---|
4420 | | - writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG); |
---|
| 5988 | + /* Wait for reset to deassert */ |
---|
| 5989 | + do { |
---|
| 5990 | + val = readl(port->base + MVPP22_XLG_CTRL0_REG); |
---|
| 5991 | + } while (!(val & MVPP22_XLG_CTRL0_MAC_RESET_DIS)); |
---|
4421 | 5992 | } |
---|
4422 | 5993 | |
---|
4423 | 5994 | static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, |
---|
4424 | 5995 | const struct phylink_link_state *state) |
---|
4425 | 5996 | { |
---|
4426 | | - u32 an, ctrl0, ctrl2, ctrl4; |
---|
4427 | | - u32 old_ctrl2; |
---|
| 5997 | + u32 old_ctrl0, ctrl0; |
---|
| 5998 | + u32 old_ctrl2, ctrl2; |
---|
| 5999 | + u32 old_ctrl4, ctrl4; |
---|
4428 | 6000 | |
---|
4429 | | - an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
---|
4430 | | - ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG); |
---|
4431 | | - ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG); |
---|
4432 | | - ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG); |
---|
| 6001 | + old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG); |
---|
| 6002 | + old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG); |
---|
| 6003 | + old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG); |
---|
4433 | 6004 | |
---|
4434 | | - old_ctrl2 = ctrl2; |
---|
4435 | | - |
---|
4436 | | - /* Force link down */ |
---|
4437 | | - an &= ~MVPP2_GMAC_FORCE_LINK_PASS; |
---|
4438 | | - an |= MVPP2_GMAC_FORCE_LINK_DOWN; |
---|
4439 | | - writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
---|
4440 | | - |
---|
4441 | | - /* Set the GMAC in a reset state */ |
---|
4442 | | - ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK; |
---|
4443 | | - writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); |
---|
4444 | | - |
---|
4445 | | - an &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | |
---|
4446 | | - MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN | |
---|
4447 | | - MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG | |
---|
4448 | | - MVPP2_GMAC_CONFIG_FULL_DUPLEX | MVPP2_GMAC_AN_DUPLEX_EN | |
---|
4449 | | - MVPP2_GMAC_FORCE_LINK_DOWN); |
---|
4450 | 6005 | ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK; |
---|
4451 | | - ctrl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK); |
---|
| 6006 | + ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK); |
---|
4452 | 6007 | |
---|
4453 | | - if (state->interface == PHY_INTERFACE_MODE_1000BASEX || |
---|
4454 | | - state->interface == PHY_INTERFACE_MODE_2500BASEX) { |
---|
4455 | | - /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can |
---|
4456 | | - * they negotiate duplex: they are always operating with a fixed |
---|
4457 | | - * speed of 1000/2500Mbps in full duplex, so force 1000/2500 |
---|
4458 | | - * speed and full duplex here. |
---|
4459 | | - */ |
---|
4460 | | - ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK; |
---|
4461 | | - an |= MVPP2_GMAC_CONFIG_GMII_SPEED | |
---|
4462 | | - MVPP2_GMAC_CONFIG_FULL_DUPLEX; |
---|
4463 | | - } else if (!phy_interface_mode_is_rgmii(state->interface)) { |
---|
4464 | | - an |= MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG; |
---|
4465 | | - } |
---|
4466 | | - |
---|
4467 | | - if (state->duplex) |
---|
4468 | | - an |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; |
---|
4469 | | - if (phylink_test(state->advertising, Pause)) |
---|
4470 | | - an |= MVPP2_GMAC_FC_ADV_EN; |
---|
4471 | | - if (phylink_test(state->advertising, Asym_Pause)) |
---|
4472 | | - an |= MVPP2_GMAC_FC_ADV_ASM_EN; |
---|
4473 | | - |
---|
4474 | | - if (state->interface == PHY_INTERFACE_MODE_SGMII || |
---|
4475 | | - state->interface == PHY_INTERFACE_MODE_1000BASEX || |
---|
4476 | | - state->interface == PHY_INTERFACE_MODE_2500BASEX) { |
---|
4477 | | - an |= MVPP2_GMAC_IN_BAND_AUTONEG; |
---|
4478 | | - ctrl2 |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK; |
---|
4479 | | - |
---|
4480 | | - ctrl4 &= ~(MVPP22_CTRL4_EXT_PIN_GMII_SEL | |
---|
4481 | | - MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN); |
---|
| 6008 | + /* Configure port type */ |
---|
| 6009 | + if (phy_interface_mode_is_8023z(state->interface)) { |
---|
| 6010 | + ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK; |
---|
| 6011 | + ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; |
---|
4482 | 6012 | ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | |
---|
4483 | 6013 | MVPP22_CTRL4_DP_CLK_SEL | |
---|
4484 | 6014 | MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; |
---|
4485 | | - |
---|
4486 | | - if (state->pause & MLO_PAUSE_TX) |
---|
4487 | | - ctrl4 |= MVPP22_CTRL4_TX_FC_EN; |
---|
4488 | | - if (state->pause & MLO_PAUSE_RX) |
---|
4489 | | - ctrl4 |= MVPP22_CTRL4_RX_FC_EN; |
---|
| 6015 | + } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { |
---|
| 6016 | + ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK; |
---|
| 6017 | + ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; |
---|
| 6018 | + ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | |
---|
| 6019 | + MVPP22_CTRL4_DP_CLK_SEL | |
---|
| 6020 | + MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; |
---|
4490 | 6021 | } else if (phy_interface_mode_is_rgmii(state->interface)) { |
---|
4491 | | - an |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS; |
---|
4492 | | - |
---|
4493 | | - if (state->speed == SPEED_1000) |
---|
4494 | | - an |= MVPP2_GMAC_CONFIG_GMII_SPEED; |
---|
4495 | | - else if (state->speed == SPEED_100) |
---|
4496 | | - an |= MVPP2_GMAC_CONFIG_MII_SPEED; |
---|
4497 | | - |
---|
4498 | 6022 | ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL; |
---|
4499 | 6023 | ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | |
---|
4500 | 6024 | MVPP22_CTRL4_SYNC_BYPASS_DIS | |
---|
4501 | 6025 | MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; |
---|
4502 | 6026 | } |
---|
4503 | 6027 | |
---|
4504 | | - writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG); |
---|
4505 | | - writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); |
---|
4506 | | - writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG); |
---|
4507 | | - writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
---|
4508 | | - |
---|
4509 | | - if (old_ctrl2 & MVPP2_GMAC_PORT_RESET_MASK) { |
---|
4510 | | - while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & |
---|
4511 | | - MVPP2_GMAC_PORT_RESET_MASK) |
---|
4512 | | - continue; |
---|
| 6028 | + /* Configure negotiation style */ |
---|
| 6029 | + if (!phylink_autoneg_inband(mode)) { |
---|
| 6030 | + /* Phy or fixed speed - no in-band AN, nothing to do, leave the |
---|
| 6031 | + * configured speed, duplex and flow control as-is. |
---|
| 6032 | + */ |
---|
| 6033 | + } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { |
---|
| 6034 | + /* SGMII in-band mode receives the speed and duplex from |
---|
| 6035 | + * the PHY. Flow control information is not received. */ |
---|
| 6036 | + } else if (phy_interface_mode_is_8023z(state->interface)) { |
---|
| 6037 | + /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can |
---|
| 6038 | + * they negotiate duplex: they are always operating with a fixed |
---|
| 6039 | + * speed of 1000/2500Mbps in full duplex, so force 1000/2500 |
---|
| 6040 | + * speed and full duplex here. |
---|
| 6041 | + */ |
---|
| 6042 | + ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK; |
---|
4513 | 6043 | } |
---|
| 6044 | + |
---|
| 6045 | + if (old_ctrl0 != ctrl0) |
---|
| 6046 | + writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG); |
---|
| 6047 | + if (old_ctrl2 != ctrl2) |
---|
| 6048 | + writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); |
---|
| 6049 | + if (old_ctrl4 != ctrl4) |
---|
| 6050 | + writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG); |
---|
4514 | 6051 | } |
---|
4515 | 6052 | |
---|
4516 | | -static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, |
---|
4517 | | - const struct phylink_link_state *state) |
---|
| 6053 | +static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode, |
---|
| 6054 | + phy_interface_t interface) |
---|
4518 | 6055 | { |
---|
4519 | | - struct mvpp2_port *port = netdev_priv(dev); |
---|
| 6056 | + struct mvpp2_port *port = mvpp2_phylink_to_port(config); |
---|
4520 | 6057 | |
---|
4521 | 6058 | /* Check for invalid configuration */ |
---|
4522 | | - if (state->interface == PHY_INTERFACE_MODE_10GKR && port->gop_id != 0) { |
---|
4523 | | - netdev_err(dev, "Invalid mode on %s\n", dev->name); |
---|
4524 | | - return; |
---|
| 6059 | + if (mvpp2_is_xlg(interface) && port->gop_id != 0) { |
---|
| 6060 | + netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name); |
---|
| 6061 | + return -EINVAL; |
---|
| 6062 | + } |
---|
| 6063 | + |
---|
| 6064 | + if (port->phy_interface != interface || |
---|
| 6065 | + phylink_autoneg_inband(mode)) { |
---|
| 6066 | + /* Force the link down when changing the interface or if in |
---|
| 6067 | + * in-band mode to ensure we do not change the configuration |
---|
| 6068 | + * while the hardware is indicating link is up. We force both |
---|
| 6069 | + * XLG and GMAC down to ensure that they're both in a known |
---|
| 6070 | + * state. |
---|
| 6071 | + */ |
---|
| 6072 | + mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, |
---|
| 6073 | + MVPP2_GMAC_FORCE_LINK_PASS | |
---|
| 6074 | + MVPP2_GMAC_FORCE_LINK_DOWN, |
---|
| 6075 | + MVPP2_GMAC_FORCE_LINK_DOWN); |
---|
| 6076 | + |
---|
| 6077 | + if (mvpp2_port_supports_xlg(port)) |
---|
| 6078 | + mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, |
---|
| 6079 | + MVPP22_XLG_CTRL0_FORCE_LINK_PASS | |
---|
| 6080 | + MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, |
---|
| 6081 | + MVPP22_XLG_CTRL0_FORCE_LINK_DOWN); |
---|
4525 | 6082 | } |
---|
4526 | 6083 | |
---|
4527 | 6084 | /* Make sure the port is disabled when reconfiguring the mode */ |
---|
4528 | 6085 | mvpp2_port_disable(port); |
---|
4529 | 6086 | |
---|
4530 | | - if (port->priv->hw_version == MVPP22 && |
---|
4531 | | - port->phy_interface != state->interface) { |
---|
4532 | | - port->phy_interface = state->interface; |
---|
| 6087 | + if (port->phy_interface != interface) { |
---|
| 6088 | + /* Place GMAC into reset */ |
---|
| 6089 | + mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG, |
---|
| 6090 | + MVPP2_GMAC_PORT_RESET_MASK, |
---|
| 6091 | + MVPP2_GMAC_PORT_RESET_MASK); |
---|
4533 | 6092 | |
---|
4534 | | - /* Reconfigure the serdes lanes */ |
---|
4535 | | - phy_power_off(port->comphy); |
---|
4536 | | - mvpp22_mode_reconfigure(port); |
---|
| 6093 | + if (port->priv->hw_version == MVPP22) { |
---|
| 6094 | + mvpp22_gop_mask_irq(port); |
---|
| 6095 | + |
---|
| 6096 | + phy_power_off(port->comphy); |
---|
| 6097 | + } |
---|
4537 | 6098 | } |
---|
4538 | 6099 | |
---|
| 6100 | + /* Select the appropriate PCS operations depending on the |
---|
| 6101 | + * configured interface mode. We will only switch to a mode |
---|
| 6102 | + * that the validate() checks have already passed. |
---|
| 6103 | + */ |
---|
| 6104 | + if (mvpp2_is_xlg(interface)) |
---|
| 6105 | + port->phylink_pcs.ops = &mvpp2_phylink_xlg_pcs_ops; |
---|
| 6106 | + else |
---|
| 6107 | + port->phylink_pcs.ops = &mvpp2_phylink_gmac_pcs_ops; |
---|
| 6108 | + |
---|
| 6109 | + return 0; |
---|
| 6110 | +} |
---|
| 6111 | + |
---|
| 6112 | +static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode, |
---|
| 6113 | + phy_interface_t interface) |
---|
| 6114 | +{ |
---|
| 6115 | + struct mvpp2_port *port = mvpp2_phylink_to_port(config); |
---|
| 6116 | + int ret; |
---|
| 6117 | + |
---|
| 6118 | + ret = mvpp2__mac_prepare(config, mode, interface); |
---|
| 6119 | + if (ret == 0) |
---|
| 6120 | + phylink_set_pcs(port->phylink, &port->phylink_pcs); |
---|
| 6121 | + |
---|
| 6122 | + return ret; |
---|
| 6123 | +} |
---|
| 6124 | + |
---|
| 6125 | +static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode, |
---|
| 6126 | + const struct phylink_link_state *state) |
---|
| 6127 | +{ |
---|
| 6128 | + struct mvpp2_port *port = mvpp2_phylink_to_port(config); |
---|
| 6129 | + |
---|
4539 | 6130 | /* mac (re)configuration */ |
---|
4540 | | - if (state->interface == PHY_INTERFACE_MODE_10GKR) |
---|
| 6131 | + if (mvpp2_is_xlg(state->interface)) |
---|
4541 | 6132 | mvpp2_xlg_config(port, mode, state); |
---|
4542 | 6133 | else if (phy_interface_mode_is_rgmii(state->interface) || |
---|
4543 | | - state->interface == PHY_INTERFACE_MODE_SGMII || |
---|
4544 | | - state->interface == PHY_INTERFACE_MODE_1000BASEX || |
---|
4545 | | - state->interface == PHY_INTERFACE_MODE_2500BASEX) |
---|
| 6134 | + phy_interface_mode_is_8023z(state->interface) || |
---|
| 6135 | + state->interface == PHY_INTERFACE_MODE_SGMII) |
---|
4546 | 6136 | mvpp2_gmac_config(port, mode, state); |
---|
4547 | 6137 | |
---|
4548 | 6138 | if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) |
---|
4549 | 6139 | mvpp2_port_loopback_set(port, state); |
---|
4550 | | - |
---|
4551 | | - mvpp2_port_enable(port); |
---|
4552 | 6140 | } |
---|
4553 | 6141 | |
---|
4554 | | -static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode, |
---|
4555 | | - phy_interface_t interface, struct phy_device *phy) |
---|
| 6142 | +static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode, |
---|
| 6143 | + phy_interface_t interface) |
---|
4556 | 6144 | { |
---|
4557 | | - struct mvpp2_port *port = netdev_priv(dev); |
---|
| 6145 | + struct mvpp2_port *port = mvpp2_phylink_to_port(config); |
---|
| 6146 | + |
---|
| 6147 | + if (port->priv->hw_version == MVPP22 && |
---|
| 6148 | + port->phy_interface != interface) { |
---|
| 6149 | + port->phy_interface = interface; |
---|
| 6150 | + |
---|
| 6151 | + /* Reconfigure the serdes lanes */ |
---|
| 6152 | + mvpp22_mode_reconfigure(port); |
---|
| 6153 | + |
---|
| 6154 | + /* Unmask interrupts */ |
---|
| 6155 | + mvpp22_gop_unmask_irq(port); |
---|
| 6156 | + } |
---|
| 6157 | + |
---|
| 6158 | + if (!mvpp2_is_xlg(interface)) { |
---|
| 6159 | + /* Release GMAC reset and wait */ |
---|
| 6160 | + mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG, |
---|
| 6161 | + MVPP2_GMAC_PORT_RESET_MASK, 0); |
---|
| 6162 | + |
---|
| 6163 | + while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & |
---|
| 6164 | + MVPP2_GMAC_PORT_RESET_MASK) |
---|
| 6165 | + continue; |
---|
| 6166 | + } |
---|
| 6167 | + |
---|
| 6168 | + mvpp2_port_enable(port); |
---|
| 6169 | + |
---|
| 6170 | + /* Allow the link to come up if in in-band mode, otherwise the |
---|
| 6171 | + * link is forced via mac_link_down()/mac_link_up() |
---|
| 6172 | + */ |
---|
| 6173 | + if (phylink_autoneg_inband(mode)) { |
---|
| 6174 | + if (mvpp2_is_xlg(interface)) |
---|
| 6175 | + mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, |
---|
| 6176 | + MVPP22_XLG_CTRL0_FORCE_LINK_PASS | |
---|
| 6177 | + MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 0); |
---|
| 6178 | + else |
---|
| 6179 | + mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, |
---|
| 6180 | + MVPP2_GMAC_FORCE_LINK_PASS | |
---|
| 6181 | + MVPP2_GMAC_FORCE_LINK_DOWN, 0); |
---|
| 6182 | + } |
---|
| 6183 | + |
---|
| 6184 | + return 0; |
---|
| 6185 | +} |
---|
| 6186 | + |
---|
| 6187 | +static void mvpp2_mac_link_up(struct phylink_config *config, |
---|
| 6188 | + struct phy_device *phy, |
---|
| 6189 | + unsigned int mode, phy_interface_t interface, |
---|
| 6190 | + int speed, int duplex, |
---|
| 6191 | + bool tx_pause, bool rx_pause) |
---|
| 6192 | +{ |
---|
| 6193 | + struct mvpp2_port *port = mvpp2_phylink_to_port(config); |
---|
4558 | 6194 | u32 val; |
---|
4559 | 6195 | |
---|
4560 | | - if (!phylink_autoneg_inband(mode) && |
---|
4561 | | - interface != PHY_INTERFACE_MODE_10GKR) { |
---|
4562 | | - val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
---|
4563 | | - val &= ~MVPP2_GMAC_FORCE_LINK_DOWN; |
---|
4564 | | - if (phy_interface_mode_is_rgmii(interface)) |
---|
4565 | | - val |= MVPP2_GMAC_FORCE_LINK_PASS; |
---|
4566 | | - writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
---|
| 6196 | + if (mvpp2_is_xlg(interface)) { |
---|
| 6197 | + if (!phylink_autoneg_inband(mode)) { |
---|
| 6198 | + val = MVPP22_XLG_CTRL0_FORCE_LINK_PASS; |
---|
| 6199 | + if (tx_pause) |
---|
| 6200 | + val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; |
---|
| 6201 | + if (rx_pause) |
---|
| 6202 | + val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; |
---|
| 6203 | + |
---|
| 6204 | + mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, |
---|
| 6205 | + MVPP22_XLG_CTRL0_FORCE_LINK_DOWN | |
---|
| 6206 | + MVPP22_XLG_CTRL0_FORCE_LINK_PASS | |
---|
| 6207 | + MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN | |
---|
| 6208 | + MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, val); |
---|
| 6209 | + } |
---|
| 6210 | + } else { |
---|
| 6211 | + if (!phylink_autoneg_inband(mode)) { |
---|
| 6212 | + val = MVPP2_GMAC_FORCE_LINK_PASS; |
---|
| 6213 | + |
---|
| 6214 | + if (speed == SPEED_1000 || speed == SPEED_2500) |
---|
| 6215 | + val |= MVPP2_GMAC_CONFIG_GMII_SPEED; |
---|
| 6216 | + else if (speed == SPEED_100) |
---|
| 6217 | + val |= MVPP2_GMAC_CONFIG_MII_SPEED; |
---|
| 6218 | + |
---|
| 6219 | + if (duplex == DUPLEX_FULL) |
---|
| 6220 | + val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; |
---|
| 6221 | + |
---|
| 6222 | + mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, |
---|
| 6223 | + MVPP2_GMAC_FORCE_LINK_DOWN | |
---|
| 6224 | + MVPP2_GMAC_FORCE_LINK_PASS | |
---|
| 6225 | + MVPP2_GMAC_CONFIG_MII_SPEED | |
---|
| 6226 | + MVPP2_GMAC_CONFIG_GMII_SPEED | |
---|
| 6227 | + MVPP2_GMAC_CONFIG_FULL_DUPLEX, val); |
---|
| 6228 | + } |
---|
| 6229 | + |
---|
| 6230 | + /* We can always update the flow control enable bits; |
---|
| 6231 | + * these will only be effective if flow control AN |
---|
| 6232 | + * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled. |
---|
| 6233 | + */ |
---|
| 6234 | + val = 0; |
---|
| 6235 | + if (tx_pause) |
---|
| 6236 | + val |= MVPP22_CTRL4_TX_FC_EN; |
---|
| 6237 | + if (rx_pause) |
---|
| 6238 | + val |= MVPP22_CTRL4_RX_FC_EN; |
---|
| 6239 | + |
---|
| 6240 | + mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG, |
---|
| 6241 | + MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN, |
---|
| 6242 | + val); |
---|
4567 | 6243 | } |
---|
4568 | 6244 | |
---|
4569 | 6245 | mvpp2_port_enable(port); |
---|
4570 | 6246 | |
---|
4571 | 6247 | mvpp2_egress_enable(port); |
---|
4572 | 6248 | mvpp2_ingress_enable(port); |
---|
4573 | | - netif_tx_wake_all_queues(dev); |
---|
| 6249 | + netif_tx_wake_all_queues(port->dev); |
---|
4574 | 6250 | } |
---|
4575 | 6251 | |
---|
4576 | | -static void mvpp2_mac_link_down(struct net_device *dev, unsigned int mode, |
---|
4577 | | - phy_interface_t interface) |
---|
| 6252 | +static void mvpp2_mac_link_down(struct phylink_config *config, |
---|
| 6253 | + unsigned int mode, phy_interface_t interface) |
---|
4578 | 6254 | { |
---|
4579 | | - struct mvpp2_port *port = netdev_priv(dev); |
---|
| 6255 | + struct mvpp2_port *port = mvpp2_phylink_to_port(config); |
---|
4580 | 6256 | u32 val; |
---|
4581 | 6257 | |
---|
4582 | | - if (!phylink_autoneg_inband(mode) && |
---|
4583 | | - interface != PHY_INTERFACE_MODE_10GKR) { |
---|
4584 | | - val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
---|
4585 | | - val &= ~MVPP2_GMAC_FORCE_LINK_PASS; |
---|
4586 | | - val |= MVPP2_GMAC_FORCE_LINK_DOWN; |
---|
4587 | | - writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
---|
| 6258 | + if (!phylink_autoneg_inband(mode)) { |
---|
| 6259 | + if (mvpp2_is_xlg(interface)) { |
---|
| 6260 | + val = readl(port->base + MVPP22_XLG_CTRL0_REG); |
---|
| 6261 | + val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS; |
---|
| 6262 | + val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; |
---|
| 6263 | + writel(val, port->base + MVPP22_XLG_CTRL0_REG); |
---|
| 6264 | + } else { |
---|
| 6265 | + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
---|
| 6266 | + val &= ~MVPP2_GMAC_FORCE_LINK_PASS; |
---|
| 6267 | + val |= MVPP2_GMAC_FORCE_LINK_DOWN; |
---|
| 6268 | + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
---|
| 6269 | + } |
---|
4588 | 6270 | } |
---|
4589 | 6271 | |
---|
4590 | | - netif_tx_stop_all_queues(dev); |
---|
| 6272 | + netif_tx_stop_all_queues(port->dev); |
---|
4591 | 6273 | mvpp2_egress_disable(port); |
---|
4592 | 6274 | mvpp2_ingress_disable(port); |
---|
4593 | | - |
---|
4594 | | - /* When using link interrupts to notify phylink of a MAC state change, |
---|
4595 | | - * we do not want the port to be disabled (we want to receive further |
---|
4596 | | - * interrupts, to be notified when the port will have a link later). |
---|
4597 | | - */ |
---|
4598 | | - if (!port->has_phy) |
---|
4599 | | - return; |
---|
4600 | 6275 | |
---|
4601 | 6276 | mvpp2_port_disable(port); |
---|
4602 | 6277 | } |
---|
4603 | 6278 | |
---|
4604 | 6279 | static const struct phylink_mac_ops mvpp2_phylink_ops = { |
---|
4605 | 6280 | .validate = mvpp2_phylink_validate, |
---|
4606 | | - .mac_link_state = mvpp2_phylink_mac_link_state, |
---|
4607 | | - .mac_an_restart = mvpp2_mac_an_restart, |
---|
| 6281 | + .mac_prepare = mvpp2_mac_prepare, |
---|
4608 | 6282 | .mac_config = mvpp2_mac_config, |
---|
| 6283 | + .mac_finish = mvpp2_mac_finish, |
---|
4609 | 6284 | .mac_link_up = mvpp2_mac_link_up, |
---|
4610 | 6285 | .mac_link_down = mvpp2_mac_link_down, |
---|
4611 | 6286 | }; |
---|
| 6287 | + |
---|
| 6288 | +/* Work-around for ACPI */ |
---|
| 6289 | +static void mvpp2_acpi_start(struct mvpp2_port *port) |
---|
| 6290 | +{ |
---|
| 6291 | + /* Phylink isn't used as of now for ACPI, so the MAC has to be |
---|
| 6292 | + * configured manually when the interface is started. This will |
---|
| 6293 | + * be removed as soon as the phylink ACPI support lands in. |
---|
| 6294 | + */ |
---|
| 6295 | + struct phylink_link_state state = { |
---|
| 6296 | + .interface = port->phy_interface, |
---|
| 6297 | + }; |
---|
| 6298 | + mvpp2__mac_prepare(&port->phylink_config, MLO_AN_INBAND, |
---|
| 6299 | + port->phy_interface); |
---|
| 6300 | + mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state); |
---|
| 6301 | + port->phylink_pcs.ops->pcs_config(&port->phylink_pcs, MLO_AN_INBAND, |
---|
| 6302 | + port->phy_interface, |
---|
| 6303 | + state.advertising, false); |
---|
| 6304 | + mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND, |
---|
| 6305 | + port->phy_interface); |
---|
| 6306 | + mvpp2_mac_link_up(&port->phylink_config, NULL, |
---|
| 6307 | + MLO_AN_INBAND, port->phy_interface, |
---|
| 6308 | + SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false); |
---|
| 6309 | +} |
---|
4612 | 6310 | |
---|
4613 | 6311 | /* Ports initialization */ |
---|
4614 | 6312 | static int mvpp2_port_probe(struct platform_device *pdev, |
---|
.. | .. |
---|
4619 | 6317 | struct mvpp2_port *port; |
---|
4620 | 6318 | struct mvpp2_port_pcpu *port_pcpu; |
---|
4621 | 6319 | struct device_node *port_node = to_of_node(port_fwnode); |
---|
| 6320 | + netdev_features_t features; |
---|
4622 | 6321 | struct net_device *dev; |
---|
4623 | | - struct resource *res; |
---|
4624 | 6322 | struct phylink *phylink; |
---|
4625 | 6323 | char *mac_from = ""; |
---|
4626 | | - unsigned int ntxqs, nrxqs; |
---|
| 6324 | + unsigned int ntxqs, nrxqs, thread; |
---|
| 6325 | + unsigned long flags = 0; |
---|
4627 | 6326 | bool has_tx_irqs; |
---|
4628 | 6327 | u32 id; |
---|
4629 | | - int features; |
---|
4630 | 6328 | int phy_mode; |
---|
4631 | | - int err, i, cpu; |
---|
| 6329 | + int err, i; |
---|
4632 | 6330 | |
---|
4633 | | - if (port_node) { |
---|
4634 | | - has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node); |
---|
4635 | | - } else { |
---|
4636 | | - has_tx_irqs = true; |
---|
4637 | | - queue_mode = MVPP2_QDIST_MULTI_MODE; |
---|
| 6331 | + has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags); |
---|
| 6332 | + if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) { |
---|
| 6333 | + dev_err(&pdev->dev, |
---|
| 6334 | + "not enough IRQs to support multi queue mode\n"); |
---|
| 6335 | + return -EINVAL; |
---|
4638 | 6336 | } |
---|
4639 | 6337 | |
---|
4640 | | - if (!has_tx_irqs) |
---|
4641 | | - queue_mode = MVPP2_QDIST_SINGLE_MODE; |
---|
4642 | | - |
---|
4643 | 6338 | ntxqs = MVPP2_MAX_TXQ; |
---|
4644 | | - if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE) |
---|
4645 | | - nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus(); |
---|
4646 | | - else |
---|
4647 | | - nrxqs = MVPP2_DEFAULT_RXQ; |
---|
| 6339 | + nrxqs = mvpp2_get_nrxqs(priv); |
---|
4648 | 6340 | |
---|
4649 | 6341 | dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs); |
---|
4650 | 6342 | if (!dev) |
---|
.. | .. |
---|
4656 | 6348 | err = phy_mode; |
---|
4657 | 6349 | goto err_free_netdev; |
---|
4658 | 6350 | } |
---|
| 6351 | + |
---|
| 6352 | + /* |
---|
| 6353 | + * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT. |
---|
| 6354 | + * Existing usage of 10GBASE-KR is not correct; no backplane |
---|
| 6355 | + * negotiation is done, and this driver does not actually support |
---|
| 6356 | + * 10GBASE-KR. |
---|
| 6357 | + */ |
---|
| 6358 | + if (phy_mode == PHY_INTERFACE_MODE_10GKR) |
---|
| 6359 | + phy_mode = PHY_INTERFACE_MODE_10GBASER; |
---|
4659 | 6360 | |
---|
4660 | 6361 | if (port_node) { |
---|
4661 | 6362 | comphy = devm_of_phy_get(&pdev->dev, port_node, NULL); |
---|
.. | .. |
---|
4687 | 6388 | port->nrxqs = nrxqs; |
---|
4688 | 6389 | port->priv = priv; |
---|
4689 | 6390 | port->has_tx_irqs = has_tx_irqs; |
---|
| 6391 | + port->flags = flags; |
---|
4690 | 6392 | |
---|
4691 | 6393 | err = mvpp2_queue_vectors_init(port, port_node); |
---|
4692 | 6394 | if (err) |
---|
4693 | 6395 | goto err_free_netdev; |
---|
4694 | 6396 | |
---|
4695 | 6397 | if (port_node) |
---|
4696 | | - port->link_irq = of_irq_get_byname(port_node, "link"); |
---|
| 6398 | + port->port_irq = of_irq_get_byname(port_node, "link"); |
---|
4697 | 6399 | else |
---|
4698 | | - port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1); |
---|
4699 | | - if (port->link_irq == -EPROBE_DEFER) { |
---|
| 6400 | + port->port_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1); |
---|
| 6401 | + if (port->port_irq == -EPROBE_DEFER) { |
---|
4700 | 6402 | err = -EPROBE_DEFER; |
---|
4701 | 6403 | goto err_deinit_qvecs; |
---|
4702 | 6404 | } |
---|
4703 | | - if (port->link_irq <= 0) |
---|
| 6405 | + if (port->port_irq <= 0) |
---|
4704 | 6406 | /* the link irq is optional */ |
---|
4705 | | - port->link_irq = 0; |
---|
| 6407 | + port->port_irq = 0; |
---|
4706 | 6408 | |
---|
4707 | 6409 | if (fwnode_property_read_bool(port_fwnode, "marvell,loopback")) |
---|
4708 | 6410 | port->flags |= MVPP2_F_LOOPBACK; |
---|
.. | .. |
---|
4718 | 6420 | port->comphy = comphy; |
---|
4719 | 6421 | |
---|
4720 | 6422 | if (priv->hw_version == MVPP21) { |
---|
4721 | | - res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id); |
---|
4722 | | - port->base = devm_ioremap_resource(&pdev->dev, res); |
---|
| 6423 | + port->base = devm_platform_ioremap_resource(pdev, 2 + id); |
---|
4723 | 6424 | if (IS_ERR(port->base)) { |
---|
4724 | 6425 | err = PTR_ERR(port->base); |
---|
4725 | 6426 | goto err_free_irq; |
---|
.. | .. |
---|
4740 | 6441 | port->stats_base = port->priv->iface_base + |
---|
4741 | 6442 | MVPP22_MIB_COUNTERS_OFFSET + |
---|
4742 | 6443 | port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ; |
---|
| 6444 | + |
---|
| 6445 | + /* We may want a property to describe whether we should use |
---|
| 6446 | + * MAC hardware timestamping. |
---|
| 6447 | + */ |
---|
| 6448 | + if (priv->tai) |
---|
| 6449 | + port->hwtstamp = true; |
---|
4743 | 6450 | } |
---|
4744 | 6451 | |
---|
4745 | 6452 | /* Alloc per-cpu and ethtool stats */ |
---|
.. | .. |
---|
4750 | 6457 | } |
---|
4751 | 6458 | |
---|
4752 | 6459 | port->ethtool_stats = devm_kcalloc(&pdev->dev, |
---|
4753 | | - ARRAY_SIZE(mvpp2_ethtool_regs), |
---|
| 6460 | + MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs), |
---|
4754 | 6461 | sizeof(u64), GFP_KERNEL); |
---|
4755 | 6462 | if (!port->ethtool_stats) { |
---|
4756 | 6463 | err = -ENOMEM; |
---|
.. | .. |
---|
4774 | 6481 | |
---|
4775 | 6482 | mvpp2_port_periodic_xon_disable(port); |
---|
4776 | 6483 | |
---|
4777 | | - mvpp2_port_reset(port); |
---|
| 6484 | + mvpp2_mac_reset_assert(port); |
---|
| 6485 | + mvpp22_pcs_reset_assert(port); |
---|
4778 | 6486 | |
---|
4779 | 6487 | port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); |
---|
4780 | 6488 | if (!port->pcpu) { |
---|
.. | .. |
---|
4783 | 6491 | } |
---|
4784 | 6492 | |
---|
4785 | 6493 | if (!port->has_tx_irqs) { |
---|
4786 | | - for_each_present_cpu(cpu) { |
---|
4787 | | - port_pcpu = per_cpu_ptr(port->pcpu, cpu); |
---|
| 6494 | + for (thread = 0; thread < priv->nthreads; thread++) { |
---|
| 6495 | + port_pcpu = per_cpu_ptr(port->pcpu, thread); |
---|
4788 | 6496 | |
---|
4789 | 6497 | hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, |
---|
4790 | | - HRTIMER_MODE_REL_PINNED); |
---|
| 6498 | + HRTIMER_MODE_REL_PINNED_SOFT); |
---|
4791 | 6499 | port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; |
---|
4792 | 6500 | port_pcpu->timer_scheduled = false; |
---|
4793 | | - |
---|
4794 | | - tasklet_init(&port_pcpu->tx_done_tasklet, |
---|
4795 | | - mvpp2_tx_proc_cb, |
---|
4796 | | - (unsigned long)dev); |
---|
| 6501 | + port_pcpu->dev = dev; |
---|
4797 | 6502 | } |
---|
4798 | 6503 | } |
---|
4799 | 6504 | |
---|
.. | .. |
---|
4803 | 6508 | dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO | |
---|
4804 | 6509 | NETIF_F_HW_VLAN_CTAG_FILTER; |
---|
4805 | 6510 | |
---|
4806 | | - if (mvpp22_rss_is_supported()) |
---|
| 6511 | + if (mvpp22_rss_is_supported()) { |
---|
4807 | 6512 | dev->hw_features |= NETIF_F_RXHASH; |
---|
4808 | | - |
---|
4809 | | - if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) { |
---|
4810 | | - dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); |
---|
4811 | | - dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); |
---|
| 6513 | + dev->features |= NETIF_F_NTUPLE; |
---|
4812 | 6514 | } |
---|
| 6515 | + |
---|
| 6516 | + if (!port->priv->percpu_pools) |
---|
| 6517 | + mvpp2_set_hw_csum(port, port->pool_long->id); |
---|
4813 | 6518 | |
---|
4814 | 6519 | dev->vlan_features |= features; |
---|
4815 | 6520 | dev->gso_max_segs = MVPP2_MAX_TSO_SEGS; |
---|
.. | .. |
---|
4823 | 6528 | |
---|
4824 | 6529 | /* Phylink isn't used w/ ACPI as of now */ |
---|
4825 | 6530 | if (port_node) { |
---|
4826 | | - phylink = phylink_create(dev, port_fwnode, phy_mode, |
---|
4827 | | - &mvpp2_phylink_ops); |
---|
| 6531 | + port->phylink_config.dev = &dev->dev; |
---|
| 6532 | + port->phylink_config.type = PHYLINK_NETDEV; |
---|
| 6533 | + |
---|
| 6534 | + phylink = phylink_create(&port->phylink_config, port_fwnode, |
---|
| 6535 | + phy_mode, &mvpp2_phylink_ops); |
---|
4828 | 6536 | if (IS_ERR(phylink)) { |
---|
4829 | 6537 | err = PTR_ERR(phylink); |
---|
4830 | 6538 | goto err_free_port_pcpu; |
---|
.. | .. |
---|
4832 | 6540 | port->phylink = phylink; |
---|
4833 | 6541 | } else { |
---|
4834 | 6542 | port->phylink = NULL; |
---|
| 6543 | + } |
---|
| 6544 | + |
---|
| 6545 | + /* Cycle the comphy to power it down, saving 270mW per port - |
---|
| 6546 | + * don't worry about an error powering it up. When the comphy |
---|
| 6547 | + * driver does this, we can remove this code. |
---|
| 6548 | + */ |
---|
| 6549 | + if (port->comphy) { |
---|
| 6550 | + err = mvpp22_comphy_init(port); |
---|
| 6551 | + if (err == 0) |
---|
| 6552 | + phy_power_off(port->comphy); |
---|
4835 | 6553 | } |
---|
4836 | 6554 | |
---|
4837 | 6555 | err = register_netdev(dev); |
---|
.. | .. |
---|
4856 | 6574 | err_free_stats: |
---|
4857 | 6575 | free_percpu(port->stats); |
---|
4858 | 6576 | err_free_irq: |
---|
4859 | | - if (port->link_irq) |
---|
4860 | | - irq_dispose_mapping(port->link_irq); |
---|
| 6577 | + if (port->port_irq) |
---|
| 6578 | + irq_dispose_mapping(port->port_irq); |
---|
4861 | 6579 | err_deinit_qvecs: |
---|
4862 | 6580 | mvpp2_queue_vectors_deinit(port); |
---|
4863 | 6581 | err_free_netdev: |
---|
.. | .. |
---|
4878 | 6596 | for (i = 0; i < port->ntxqs; i++) |
---|
4879 | 6597 | free_percpu(port->txqs[i]->pcpu); |
---|
4880 | 6598 | mvpp2_queue_vectors_deinit(port); |
---|
4881 | | - if (port->link_irq) |
---|
4882 | | - irq_dispose_mapping(port->link_irq); |
---|
| 6599 | + if (port->port_irq) |
---|
| 6600 | + irq_dispose_mapping(port->port_irq); |
---|
4883 | 6601 | free_netdev(port->dev); |
---|
4884 | 6602 | } |
---|
4885 | 6603 | |
---|
.. | .. |
---|
5068 | 6786 | } |
---|
5069 | 6787 | |
---|
5070 | 6788 | /* Allocate and initialize aggregated TXQs */ |
---|
5071 | | - priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(), |
---|
| 6789 | + priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS, |
---|
5072 | 6790 | sizeof(*priv->aggr_txqs), |
---|
5073 | 6791 | GFP_KERNEL); |
---|
5074 | 6792 | if (!priv->aggr_txqs) |
---|
5075 | 6793 | return -ENOMEM; |
---|
5076 | 6794 | |
---|
5077 | | - for_each_present_cpu(i) { |
---|
| 6795 | + for (i = 0; i < MVPP2_MAX_THREADS; i++) { |
---|
5078 | 6796 | priv->aggr_txqs[i].id = i; |
---|
5079 | 6797 | priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; |
---|
5080 | 6798 | err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv); |
---|
.. | .. |
---|
5098 | 6816 | mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); |
---|
5099 | 6817 | |
---|
5100 | 6818 | /* Buffer Manager initialization */ |
---|
5101 | | - err = mvpp2_bm_init(pdev, priv); |
---|
| 6819 | + err = mvpp2_bm_init(&pdev->dev, priv); |
---|
5102 | 6820 | if (err < 0) |
---|
5103 | 6821 | return err; |
---|
5104 | 6822 | |
---|
.. | .. |
---|
5121 | 6839 | struct mvpp2 *priv; |
---|
5122 | 6840 | struct resource *res; |
---|
5123 | 6841 | void __iomem *base; |
---|
5124 | | - int i; |
---|
| 6842 | + int i, shared; |
---|
5125 | 6843 | int err; |
---|
5126 | 6844 | |
---|
5127 | 6845 | priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); |
---|
.. | .. |
---|
5145 | 6863 | if (priv->hw_version == MVPP21) |
---|
5146 | 6864 | queue_mode = MVPP2_QDIST_SINGLE_MODE; |
---|
5147 | 6865 | |
---|
5148 | | - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
---|
5149 | | - base = devm_ioremap_resource(&pdev->dev, res); |
---|
| 6866 | + base = devm_platform_ioremap_resource(pdev, 0); |
---|
5150 | 6867 | if (IS_ERR(base)) |
---|
5151 | 6868 | return PTR_ERR(base); |
---|
5152 | 6869 | |
---|
5153 | 6870 | if (priv->hw_version == MVPP21) { |
---|
5154 | | - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
---|
5155 | | - priv->lms_base = devm_ioremap_resource(&pdev->dev, res); |
---|
| 6871 | + priv->lms_base = devm_platform_ioremap_resource(pdev, 1); |
---|
5156 | 6872 | if (IS_ERR(priv->lms_base)) |
---|
5157 | 6873 | return PTR_ERR(priv->lms_base); |
---|
5158 | 6874 | } else { |
---|
.. | .. |
---|
5190 | 6906 | priv->sysctrl_base = NULL; |
---|
5191 | 6907 | } |
---|
5192 | 6908 | |
---|
| 6909 | + if (priv->hw_version == MVPP22 && |
---|
| 6910 | + mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS) |
---|
| 6911 | + priv->percpu_pools = 1; |
---|
| 6912 | + |
---|
5193 | 6913 | mvpp2_setup_bm_pool(); |
---|
| 6914 | + |
---|
| 6915 | + |
---|
| 6916 | + priv->nthreads = min_t(unsigned int, num_present_cpus(), |
---|
| 6917 | + MVPP2_MAX_THREADS); |
---|
| 6918 | + |
---|
| 6919 | + shared = num_present_cpus() - priv->nthreads; |
---|
| 6920 | + if (shared > 0) |
---|
| 6921 | + bitmap_set(&priv->lock_map, 0, |
---|
| 6922 | + min_t(int, shared, MVPP2_MAX_THREADS)); |
---|
5194 | 6923 | |
---|
5195 | 6924 | for (i = 0; i < MVPP2_MAX_THREADS; i++) { |
---|
5196 | 6925 | u32 addr_space_sz; |
---|
.. | .. |
---|
5284 | 7013 | goto err_axi_clk; |
---|
5285 | 7014 | } |
---|
5286 | 7015 | |
---|
| 7016 | + err = mvpp22_tai_probe(&pdev->dev, priv); |
---|
| 7017 | + if (err < 0) |
---|
| 7018 | + goto err_axi_clk; |
---|
| 7019 | + |
---|
5287 | 7020 | /* Initialize ports */ |
---|
5288 | 7021 | fwnode_for_each_available_child_node(fwnode, port_fwnode) { |
---|
5289 | 7022 | err = mvpp2_port_probe(pdev, port_fwnode, priv); |
---|
.. | .. |
---|
5346 | 7079 | { |
---|
5347 | 7080 | struct mvpp2 *priv = platform_get_drvdata(pdev); |
---|
5348 | 7081 | struct fwnode_handle *fwnode = pdev->dev.fwnode; |
---|
| 7082 | + int i = 0, poolnum = MVPP2_BM_POOLS_NUM; |
---|
5349 | 7083 | struct fwnode_handle *port_fwnode; |
---|
5350 | | - int i = 0; |
---|
5351 | 7084 | |
---|
5352 | 7085 | mvpp2_dbgfs_cleanup(priv); |
---|
5353 | 7086 | |
---|
.. | .. |
---|
5361 | 7094 | |
---|
5362 | 7095 | destroy_workqueue(priv->stats_queue); |
---|
5363 | 7096 | |
---|
5364 | | - for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { |
---|
| 7097 | + if (priv->percpu_pools) |
---|
| 7098 | + poolnum = mvpp2_get_nrxqs(priv) * 2; |
---|
| 7099 | + |
---|
| 7100 | + for (i = 0; i < poolnum; i++) { |
---|
5365 | 7101 | struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; |
---|
5366 | 7102 | |
---|
5367 | | - mvpp2_bm_pool_destroy(pdev, priv, bm_pool); |
---|
| 7103 | + mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool); |
---|
5368 | 7104 | } |
---|
5369 | 7105 | |
---|
5370 | | - for_each_present_cpu(i) { |
---|
| 7106 | + for (i = 0; i < MVPP2_MAX_THREADS; i++) { |
---|
5371 | 7107 | struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i]; |
---|
5372 | 7108 | |
---|
5373 | 7109 | dma_free_coherent(&pdev->dev, |
---|
.. | .. |
---|
5401 | 7137 | }; |
---|
5402 | 7138 | MODULE_DEVICE_TABLE(of, mvpp2_match); |
---|
5403 | 7139 | |
---|
| 7140 | +#ifdef CONFIG_ACPI |
---|
5404 | 7141 | static const struct acpi_device_id mvpp2_acpi_match[] = { |
---|
5405 | 7142 | { "MRVL0110", MVPP22 }, |
---|
5406 | 7143 | { }, |
---|
5407 | 7144 | }; |
---|
5408 | 7145 | MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match); |
---|
| 7146 | +#endif |
---|
5409 | 7147 | |
---|
5410 | 7148 | static struct platform_driver mvpp2_driver = { |
---|
5411 | 7149 | .probe = mvpp2_probe, |
---|
.. | .. |
---|
5417 | 7155 | }, |
---|
5418 | 7156 | }; |
---|
5419 | 7157 | |
---|
5420 | | -module_platform_driver(mvpp2_driver); |
---|
| 7158 | +static int __init mvpp2_driver_init(void) |
---|
| 7159 | +{ |
---|
| 7160 | + return platform_driver_register(&mvpp2_driver); |
---|
| 7161 | +} |
---|
| 7162 | +module_init(mvpp2_driver_init); |
---|
| 7163 | + |
---|
| 7164 | +static void __exit mvpp2_driver_exit(void) |
---|
| 7165 | +{ |
---|
| 7166 | + platform_driver_unregister(&mvpp2_driver); |
---|
| 7167 | + mvpp2_dbgfs_exit(); |
---|
| 7168 | +} |
---|
| 7169 | +module_exit(mvpp2_driver_exit); |
---|
5421 | 7170 | |
---|
5422 | 7171 | MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com"); |
---|
5423 | 7172 | MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>"); |
---|