.. | .. |
---|
62 | 62 | #include <net/netevent.h> |
---|
63 | 63 | #include <net/addrconf.h> |
---|
64 | 64 | #include <net/bonding.h> |
---|
65 | | -#include <net/addrconf.h> |
---|
66 | 65 | #include <linux/uaccess.h> |
---|
67 | 66 | #include <linux/crash_dump.h> |
---|
68 | 67 | #include <net/udp_tunnel.h> |
---|
| 68 | +#include <net/xfrm.h> |
---|
| 69 | +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) |
---|
| 70 | +#include <net/tls.h> |
---|
| 71 | +#endif |
---|
69 | 72 | |
---|
70 | 73 | #include "cxgb4.h" |
---|
71 | 74 | #include "cxgb4_filter.h" |
---|
.. | .. |
---|
83 | 86 | #include "sched.h" |
---|
84 | 87 | #include "cxgb4_tc_u32.h" |
---|
85 | 88 | #include "cxgb4_tc_flower.h" |
---|
| 89 | +#include "cxgb4_tc_mqprio.h" |
---|
| 90 | +#include "cxgb4_tc_matchall.h" |
---|
86 | 91 | #include "cxgb4_ptp.h" |
---|
87 | 92 | #include "cxgb4_cudbg.h" |
---|
88 | 93 | |
---|
89 | 94 | char cxgb4_driver_name[] = KBUILD_MODNAME; |
---|
90 | 95 | |
---|
91 | | -#ifdef DRV_VERSION |
---|
92 | | -#undef DRV_VERSION |
---|
93 | | -#endif |
---|
94 | | -#define DRV_VERSION "2.0.0-ko" |
---|
95 | | -const char cxgb4_driver_version[] = DRV_VERSION; |
---|
96 | 96 | #define DRV_DESC "Chelsio T4/T5/T6 Network Driver" |
---|
97 | 97 | |
---|
98 | 98 | #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ |
---|
.. | .. |
---|
135 | 135 | MODULE_DESCRIPTION(DRV_DESC); |
---|
136 | 136 | MODULE_AUTHOR("Chelsio Communications"); |
---|
137 | 137 | MODULE_LICENSE("Dual BSD/GPL"); |
---|
138 | | -MODULE_VERSION(DRV_VERSION); |
---|
139 | 138 | MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); |
---|
140 | 139 | MODULE_FIRMWARE(FW4_FNAME); |
---|
141 | 140 | MODULE_FIRMWARE(FW5_FNAME); |
---|
.. | .. |
---|
184 | 183 | |
---|
185 | 184 | LIST_HEAD(adapter_list); |
---|
186 | 185 | DEFINE_MUTEX(uld_mutex); |
---|
| 186 | +LIST_HEAD(uld_list); |
---|
| 187 | + |
---|
| 188 | +static int cfg_queues(struct adapter *adap); |
---|
187 | 189 | |
---|
188 | 190 | static void link_report(struct net_device *dev) |
---|
189 | 191 | { |
---|
.. | .. |
---|
367 | 369 | int ret; |
---|
368 | 370 | u64 mhash = 0; |
---|
369 | 371 | u64 uhash = 0; |
---|
| 372 | + /* idx stores the index of allocated filters, |
---|
| 373 | + * its size should be modified based on the number of |
---|
| 374 | + * MAC addresses that we allocate filters for |
---|
| 375 | + */ |
---|
| 376 | + |
---|
| 377 | + u16 idx[1] = {}; |
---|
370 | 378 | bool free = false; |
---|
371 | 379 | bool ucast = is_unicast_ether_addr(mac_addr); |
---|
372 | 380 | const u8 *maclist[1] = {mac_addr}; |
---|
373 | 381 | struct hash_mac_addr *new_entry; |
---|
374 | 382 | |
---|
375 | | - ret = t4_alloc_mac_filt(adap, adap->mbox, pi->viid, free, 1, maclist, |
---|
376 | | - NULL, ucast ? &uhash : &mhash, false); |
---|
| 383 | + ret = cxgb4_alloc_mac_filt(adap, pi->viid, free, 1, maclist, |
---|
| 384 | + idx, ucast ? &uhash : &mhash, false); |
---|
377 | 385 | if (ret < 0) |
---|
378 | 386 | goto out; |
---|
379 | 387 | /* if hash != 0, then add the addr to hash addr list |
---|
.. | .. |
---|
411 | 419 | } |
---|
412 | 420 | } |
---|
413 | 421 | |
---|
414 | | - ret = t4_free_mac_filt(adap, adap->mbox, pi->viid, 1, maclist, false); |
---|
| 422 | + ret = cxgb4_free_mac_filt(adap, pi->viid, 1, maclist, false); |
---|
415 | 423 | return ret < 0 ? -EINVAL : 0; |
---|
416 | 424 | } |
---|
417 | 425 | |
---|
.. | .. |
---|
427 | 435 | __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync); |
---|
428 | 436 | __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync); |
---|
429 | 437 | |
---|
430 | | - return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, |
---|
431 | | - (dev->flags & IFF_PROMISC) ? 1 : 0, |
---|
| 438 | + return t4_set_rxmode(adapter, adapter->mbox, pi->viid, pi->viid_mirror, |
---|
| 439 | + mtu, (dev->flags & IFF_PROMISC) ? 1 : 0, |
---|
432 | 440 | (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1, |
---|
433 | 441 | sleep_ok); |
---|
434 | 442 | } |
---|
435 | 443 | |
---|
436 | 444 | /** |
---|
| 445 | + * cxgb4_change_mac - Update match filter for a MAC address. |
---|
| 446 | + * @pi: the port_info |
---|
| 447 | + * @viid: the VI id |
---|
| 448 | + * @tcam_idx: TCAM index of existing filter for old value of MAC address, |
---|
| 449 | + * or -1 |
---|
| 450 | + * @addr: the new MAC address value |
---|
| 451 | + * @persist: whether a new MAC allocation should be persistent |
---|
| 452 | + * @smt_idx: the destination to store the new SMT index. |
---|
| 453 | + * |
---|
| 454 | + * Modifies an MPS filter and sets it to the new MAC address if |
---|
| 455 | + * @tcam_idx >= 0, or adds the MAC address to a new filter if |
---|
| 456 | + * @tcam_idx < 0. In the latter case the address is added persistently |
---|
| 457 | + * if @persist is %true. |
---|
| 458 | + * Addresses are programmed to hash region, if tcam runs out of entries. |
---|
| 459 | + * |
---|
| 460 | + */ |
---|
| 461 | +int cxgb4_change_mac(struct port_info *pi, unsigned int viid, |
---|
| 462 | + int *tcam_idx, const u8 *addr, bool persist, |
---|
| 463 | + u8 *smt_idx) |
---|
| 464 | +{ |
---|
| 465 | + struct adapter *adapter = pi->adapter; |
---|
| 466 | + struct hash_mac_addr *entry, *new_entry; |
---|
| 467 | + int ret; |
---|
| 468 | + |
---|
| 469 | + ret = t4_change_mac(adapter, adapter->mbox, viid, |
---|
| 470 | + *tcam_idx, addr, persist, smt_idx); |
---|
| 471 | + /* We ran out of TCAM entries. try programming hash region. */ |
---|
| 472 | + if (ret == -ENOMEM) { |
---|
| 473 | + /* If the MAC address to be updated is in the hash addr |
---|
| 474 | + * list, update it from the list |
---|
| 475 | + */ |
---|
| 476 | + list_for_each_entry(entry, &adapter->mac_hlist, list) { |
---|
| 477 | + if (entry->iface_mac) { |
---|
| 478 | + ether_addr_copy(entry->addr, addr); |
---|
| 479 | + goto set_hash; |
---|
| 480 | + } |
---|
| 481 | + } |
---|
| 482 | + new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL); |
---|
| 483 | + if (!new_entry) |
---|
| 484 | + return -ENOMEM; |
---|
| 485 | + ether_addr_copy(new_entry->addr, addr); |
---|
| 486 | + new_entry->iface_mac = true; |
---|
| 487 | + list_add_tail(&new_entry->list, &adapter->mac_hlist); |
---|
| 488 | +set_hash: |
---|
| 489 | + ret = cxgb4_set_addr_hash(pi); |
---|
| 490 | + } else if (ret >= 0) { |
---|
| 491 | + *tcam_idx = ret; |
---|
| 492 | + ret = 0; |
---|
| 493 | + } |
---|
| 494 | + |
---|
| 495 | + return ret; |
---|
| 496 | +} |
---|
| 497 | + |
---|
| 498 | +/* |
---|
437 | 499 | * link_start - enable a port |
---|
438 | 500 | * @dev: the port to enable |
---|
439 | 501 | * |
---|
.. | .. |
---|
441 | 503 | */ |
---|
442 | 504 | static int link_start(struct net_device *dev) |
---|
443 | 505 | { |
---|
444 | | - int ret; |
---|
445 | 506 | struct port_info *pi = netdev_priv(dev); |
---|
446 | | - unsigned int mb = pi->adapter->pf; |
---|
| 507 | + unsigned int mb = pi->adapter->mbox; |
---|
| 508 | + int ret; |
---|
447 | 509 | |
---|
448 | 510 | /* |
---|
449 | 511 | * We do not set address filters and promiscuity here, the stack does |
---|
450 | 512 | * that step explicitly. |
---|
451 | 513 | */ |
---|
452 | | - ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1, |
---|
| 514 | + ret = t4_set_rxmode(pi->adapter, mb, pi->viid, pi->viid_mirror, |
---|
| 515 | + dev->mtu, -1, -1, -1, |
---|
453 | 516 | !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true); |
---|
454 | | - if (ret == 0) { |
---|
455 | | - ret = t4_change_mac(pi->adapter, mb, pi->viid, |
---|
456 | | - pi->xact_addr_filt, dev->dev_addr, true, |
---|
457 | | - true); |
---|
458 | | - if (ret >= 0) { |
---|
459 | | - pi->xact_addr_filt = ret; |
---|
460 | | - ret = 0; |
---|
461 | | - } |
---|
462 | | - } |
---|
| 517 | + if (ret == 0) |
---|
| 518 | + ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt, |
---|
| 519 | + dev->dev_addr, true, &pi->smt_idx); |
---|
463 | 520 | if (ret == 0) |
---|
464 | 521 | ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan, |
---|
465 | 522 | &pi->link_cfg); |
---|
.. | .. |
---|
528 | 585 | struct sge_eth_txq *eq; |
---|
529 | 586 | |
---|
530 | 587 | eq = container_of(txq, struct sge_eth_txq, q); |
---|
531 | | - netif_tx_wake_queue(eq->txq); |
---|
| 588 | + t4_sge_eth_txq_egress_update(q->adap, eq, -1); |
---|
532 | 589 | } else { |
---|
533 | 590 | struct sge_uld_txq *oq; |
---|
534 | 591 | |
---|
.. | .. |
---|
604 | 661 | |
---|
605 | 662 | static void disable_msi(struct adapter *adapter) |
---|
606 | 663 | { |
---|
607 | | - if (adapter->flags & USING_MSIX) { |
---|
| 664 | + if (adapter->flags & CXGB4_USING_MSIX) { |
---|
608 | 665 | pci_disable_msix(adapter->pdev); |
---|
609 | | - adapter->flags &= ~USING_MSIX; |
---|
610 | | - } else if (adapter->flags & USING_MSI) { |
---|
| 666 | + adapter->flags &= ~CXGB4_USING_MSIX; |
---|
| 667 | + } else if (adapter->flags & CXGB4_USING_MSI) { |
---|
611 | 668 | pci_disable_msi(adapter->pdev); |
---|
612 | | - adapter->flags &= ~USING_MSI; |
---|
| 669 | + adapter->flags &= ~CXGB4_USING_MSI; |
---|
613 | 670 | } |
---|
614 | 671 | } |
---|
615 | 672 | |
---|
.. | .. |
---|
625 | 682 | adap->swintr = 1; |
---|
626 | 683 | t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v); |
---|
627 | 684 | } |
---|
628 | | - if (adap->flags & MASTER_PF) |
---|
| 685 | + if (adap->flags & CXGB4_MASTER_PF) |
---|
629 | 686 | t4_slow_intr_handler(adap); |
---|
630 | 687 | return IRQ_HANDLED; |
---|
631 | 688 | } |
---|
632 | 689 | |
---|
633 | | -/* |
---|
634 | | - * Name the MSI-X interrupts. |
---|
635 | | - */ |
---|
636 | | -static void name_msix_vecs(struct adapter *adap) |
---|
| 690 | +int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec, |
---|
| 691 | + cpumask_var_t *aff_mask, int idx) |
---|
637 | 692 | { |
---|
638 | | - int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc); |
---|
| 693 | + int rv; |
---|
639 | 694 | |
---|
640 | | - /* non-data interrupts */ |
---|
641 | | - snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name); |
---|
642 | | - |
---|
643 | | - /* FW events */ |
---|
644 | | - snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", |
---|
645 | | - adap->port[0]->name); |
---|
646 | | - |
---|
647 | | - /* Ethernet queues */ |
---|
648 | | - for_each_port(adap, j) { |
---|
649 | | - struct net_device *d = adap->port[j]; |
---|
650 | | - const struct port_info *pi = netdev_priv(d); |
---|
651 | | - |
---|
652 | | - for (i = 0; i < pi->nqsets; i++, msi_idx++) |
---|
653 | | - snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d", |
---|
654 | | - d->name, i); |
---|
| 695 | + if (!zalloc_cpumask_var(aff_mask, GFP_KERNEL)) { |
---|
| 696 | + dev_err(adap->pdev_dev, "alloc_cpumask_var failed\n"); |
---|
| 697 | + return -ENOMEM; |
---|
655 | 698 | } |
---|
| 699 | + |
---|
| 700 | + cpumask_set_cpu(cpumask_local_spread(idx, dev_to_node(adap->pdev_dev)), |
---|
| 701 | + *aff_mask); |
---|
| 702 | + |
---|
| 703 | + rv = irq_set_affinity_hint(vec, *aff_mask); |
---|
| 704 | + if (rv) |
---|
| 705 | + dev_warn(adap->pdev_dev, |
---|
| 706 | + "irq_set_affinity_hint %u failed %d\n", |
---|
| 707 | + vec, rv); |
---|
| 708 | + |
---|
| 709 | + return 0; |
---|
| 710 | +} |
---|
| 711 | + |
---|
| 712 | +void cxgb4_clear_msix_aff(unsigned short vec, cpumask_var_t aff_mask) |
---|
| 713 | +{ |
---|
| 714 | + irq_set_affinity_hint(vec, NULL); |
---|
| 715 | + free_cpumask_var(aff_mask); |
---|
656 | 716 | } |
---|
657 | 717 | |
---|
658 | 718 | static int request_msix_queue_irqs(struct adapter *adap) |
---|
659 | 719 | { |
---|
660 | 720 | struct sge *s = &adap->sge; |
---|
| 721 | + struct msix_info *minfo; |
---|
661 | 722 | int err, ethqidx; |
---|
662 | | - int msi_index = 2; |
---|
663 | 723 | |
---|
664 | | - err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, |
---|
665 | | - adap->msix_info[1].desc, &s->fw_evtq); |
---|
| 724 | + if (s->fwevtq_msix_idx < 0) |
---|
| 725 | + return -ENOMEM; |
---|
| 726 | + |
---|
| 727 | + err = request_irq(adap->msix_info[s->fwevtq_msix_idx].vec, |
---|
| 728 | + t4_sge_intr_msix, 0, |
---|
| 729 | + adap->msix_info[s->fwevtq_msix_idx].desc, |
---|
| 730 | + &s->fw_evtq); |
---|
666 | 731 | if (err) |
---|
667 | 732 | return err; |
---|
668 | 733 | |
---|
669 | 734 | for_each_ethrxq(s, ethqidx) { |
---|
670 | | - err = request_irq(adap->msix_info[msi_index].vec, |
---|
| 735 | + minfo = s->ethrxq[ethqidx].msix; |
---|
| 736 | + err = request_irq(minfo->vec, |
---|
671 | 737 | t4_sge_intr_msix, 0, |
---|
672 | | - adap->msix_info[msi_index].desc, |
---|
| 738 | + minfo->desc, |
---|
673 | 739 | &s->ethrxq[ethqidx].rspq); |
---|
674 | 740 | if (err) |
---|
675 | 741 | goto unwind; |
---|
676 | | - msi_index++; |
---|
| 742 | + |
---|
| 743 | + cxgb4_set_msix_aff(adap, minfo->vec, |
---|
| 744 | + &minfo->aff_mask, ethqidx); |
---|
677 | 745 | } |
---|
678 | 746 | return 0; |
---|
679 | 747 | |
---|
680 | 748 | unwind: |
---|
681 | | - while (--ethqidx >= 0) |
---|
682 | | - free_irq(adap->msix_info[--msi_index].vec, |
---|
683 | | - &s->ethrxq[ethqidx].rspq); |
---|
684 | | - free_irq(adap->msix_info[1].vec, &s->fw_evtq); |
---|
| 749 | + while (--ethqidx >= 0) { |
---|
| 750 | + minfo = s->ethrxq[ethqidx].msix; |
---|
| 751 | + cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask); |
---|
| 752 | + free_irq(minfo->vec, &s->ethrxq[ethqidx].rspq); |
---|
| 753 | + } |
---|
| 754 | + free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq); |
---|
685 | 755 | return err; |
---|
686 | 756 | } |
---|
687 | 757 | |
---|
688 | 758 | static void free_msix_queue_irqs(struct adapter *adap) |
---|
689 | 759 | { |
---|
690 | | - int i, msi_index = 2; |
---|
691 | 760 | struct sge *s = &adap->sge; |
---|
| 761 | + struct msix_info *minfo; |
---|
| 762 | + int i; |
---|
692 | 763 | |
---|
693 | | - free_irq(adap->msix_info[1].vec, &s->fw_evtq); |
---|
694 | | - for_each_ethrxq(s, i) |
---|
695 | | - free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq); |
---|
| 764 | + free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq); |
---|
| 765 | + for_each_ethrxq(s, i) { |
---|
| 766 | + minfo = s->ethrxq[i].msix; |
---|
| 767 | + cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask); |
---|
| 768 | + free_irq(minfo->vec, &s->ethrxq[i].rspq); |
---|
| 769 | + } |
---|
| 770 | +} |
---|
| 771 | + |
---|
| 772 | +static int setup_ppod_edram(struct adapter *adap) |
---|
| 773 | +{ |
---|
| 774 | + unsigned int param, val; |
---|
| 775 | + int ret; |
---|
| 776 | + |
---|
| 777 | + /* Driver sends FW_PARAMS_PARAM_DEV_PPOD_EDRAM read command to check |
---|
| 778 | + * if firmware supports ppod edram feature or not. If firmware |
---|
| 779 | + * returns 1, then driver can enable this feature by sending |
---|
| 780 | + * FW_PARAMS_PARAM_DEV_PPOD_EDRAM write command with value 1 to |
---|
| 781 | + * enable ppod edram feature. |
---|
| 782 | + */ |
---|
| 783 | + param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | |
---|
| 784 | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PPOD_EDRAM)); |
---|
| 785 | + |
---|
| 786 | + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); |
---|
| 787 | + if (ret < 0) { |
---|
| 788 | + dev_warn(adap->pdev_dev, |
---|
| 789 | + "querying PPOD_EDRAM support failed: %d\n", |
---|
| 790 | + ret); |
---|
| 791 | + return -1; |
---|
| 792 | + } |
---|
| 793 | + |
---|
| 794 | + if (val != 1) |
---|
| 795 | + return -1; |
---|
| 796 | + |
---|
| 797 | + ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); |
---|
| 798 | + if (ret < 0) { |
---|
| 799 | + dev_err(adap->pdev_dev, |
---|
| 800 | + "setting PPOD_EDRAM failed: %d\n", ret); |
---|
| 801 | + return -1; |
---|
| 802 | + } |
---|
| 803 | + return 0; |
---|
| 804 | +} |
---|
| 805 | + |
---|
| 806 | +static void adap_config_hpfilter(struct adapter *adapter) |
---|
| 807 | +{ |
---|
| 808 | + u32 param, val = 0; |
---|
| 809 | + int ret; |
---|
| 810 | + |
---|
| 811 | + /* Enable HP filter region. Older fw will fail this request and |
---|
| 812 | + * it is fine. |
---|
| 813 | + */ |
---|
| 814 | + param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT); |
---|
| 815 | + ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0, |
---|
| 816 | + 1, ¶m, &val); |
---|
| 817 | + |
---|
| 818 | + /* An error means FW doesn't know about HP filter support, |
---|
| 819 | + * it's not a problem, don't return an error. |
---|
| 820 | + */ |
---|
| 821 | + if (ret < 0) |
---|
| 822 | + dev_err(adapter->pdev_dev, |
---|
| 823 | + "HP filter region isn't supported by FW\n"); |
---|
| 824 | +} |
---|
| 825 | + |
---|
| 826 | +static int cxgb4_config_rss(const struct port_info *pi, u16 *rss, |
---|
| 827 | + u16 rss_size, u16 viid) |
---|
| 828 | +{ |
---|
| 829 | + struct adapter *adap = pi->adapter; |
---|
| 830 | + int ret; |
---|
| 831 | + |
---|
| 832 | + ret = t4_config_rss_range(adap, adap->mbox, viid, 0, rss_size, rss, |
---|
| 833 | + rss_size); |
---|
| 834 | + if (ret) |
---|
| 835 | + return ret; |
---|
| 836 | + |
---|
| 837 | + /* If Tunnel All Lookup isn't specified in the global RSS |
---|
| 838 | + * Configuration, then we need to specify a default Ingress |
---|
| 839 | + * Queue for any ingress packets which aren't hashed. We'll |
---|
| 840 | + * use our first ingress queue ... |
---|
| 841 | + */ |
---|
| 842 | + return t4_config_vi_rss(adap, adap->mbox, viid, |
---|
| 843 | + FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F | |
---|
| 844 | + FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F | |
---|
| 845 | + FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F | |
---|
| 846 | + FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F | |
---|
| 847 | + FW_RSS_VI_CONFIG_CMD_UDPEN_F, |
---|
| 848 | + rss[0]); |
---|
696 | 849 | } |
---|
697 | 850 | |
---|
698 | 851 | /** |
---|
.. | .. |
---|
706 | 859 | */ |
---|
707 | 860 | int cxgb4_write_rss(const struct port_info *pi, const u16 *queues) |
---|
708 | 861 | { |
---|
709 | | - u16 *rss; |
---|
710 | | - int i, err; |
---|
711 | 862 | struct adapter *adapter = pi->adapter; |
---|
712 | 863 | const struct sge_eth_rxq *rxq; |
---|
| 864 | + int i, err; |
---|
| 865 | + u16 *rss; |
---|
713 | 866 | |
---|
714 | 867 | rxq = &adapter->sge.ethrxq[pi->first_qset]; |
---|
715 | 868 | rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL); |
---|
.. | .. |
---|
720 | 873 | for (i = 0; i < pi->rss_size; i++, queues++) |
---|
721 | 874 | rss[i] = rxq[*queues].rspq.abs_id; |
---|
722 | 875 | |
---|
723 | | - err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0, |
---|
724 | | - pi->rss_size, rss, pi->rss_size); |
---|
725 | | - /* If Tunnel All Lookup isn't specified in the global RSS |
---|
726 | | - * Configuration, then we need to specify a default Ingress |
---|
727 | | - * Queue for any ingress packets which aren't hashed. We'll |
---|
728 | | - * use our first ingress queue ... |
---|
729 | | - */ |
---|
730 | | - if (!err) |
---|
731 | | - err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid, |
---|
732 | | - FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F | |
---|
733 | | - FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F | |
---|
734 | | - FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F | |
---|
735 | | - FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F | |
---|
736 | | - FW_RSS_VI_CONFIG_CMD_UDPEN_F, |
---|
737 | | - rss[0]); |
---|
| 876 | + err = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid); |
---|
738 | 877 | kfree(rss); |
---|
739 | 878 | return err; |
---|
740 | 879 | } |
---|
.. | .. |
---|
772 | 911 | return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan; |
---|
773 | 912 | } |
---|
774 | 913 | |
---|
| 914 | +void cxgb4_quiesce_rx(struct sge_rspq *q) |
---|
| 915 | +{ |
---|
| 916 | + if (q->handler) |
---|
| 917 | + napi_disable(&q->napi); |
---|
| 918 | +} |
---|
| 919 | + |
---|
775 | 920 | /* |
---|
776 | 921 | * Wait until all NAPI handlers are descheduled. |
---|
777 | 922 | */ |
---|
.. | .. |
---|
782 | 927 | for (i = 0; i < adap->sge.ingr_sz; i++) { |
---|
783 | 928 | struct sge_rspq *q = adap->sge.ingr_map[i]; |
---|
784 | 929 | |
---|
785 | | - if (q && q->handler) |
---|
786 | | - napi_disable(&q->napi); |
---|
| 930 | + if (!q) |
---|
| 931 | + continue; |
---|
| 932 | + |
---|
| 933 | + cxgb4_quiesce_rx(q); |
---|
787 | 934 | } |
---|
788 | 935 | } |
---|
789 | 936 | |
---|
790 | 937 | /* Disable interrupt and napi handler */ |
---|
791 | 938 | static void disable_interrupts(struct adapter *adap) |
---|
792 | 939 | { |
---|
793 | | - if (adap->flags & FULL_INIT_DONE) { |
---|
| 940 | + struct sge *s = &adap->sge; |
---|
| 941 | + |
---|
| 942 | + if (adap->flags & CXGB4_FULL_INIT_DONE) { |
---|
794 | 943 | t4_intr_disable(adap); |
---|
795 | | - if (adap->flags & USING_MSIX) { |
---|
| 944 | + if (adap->flags & CXGB4_USING_MSIX) { |
---|
796 | 945 | free_msix_queue_irqs(adap); |
---|
797 | | - free_irq(adap->msix_info[0].vec, adap); |
---|
| 946 | + free_irq(adap->msix_info[s->nd_msix_idx].vec, |
---|
| 947 | + adap); |
---|
798 | 948 | } else { |
---|
799 | 949 | free_irq(adap->pdev->irq, adap); |
---|
800 | 950 | } |
---|
801 | 951 | quiesce_rx(adap); |
---|
802 | 952 | } |
---|
| 953 | +} |
---|
| 954 | + |
---|
| 955 | +void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q) |
---|
| 956 | +{ |
---|
| 957 | + if (q->handler) |
---|
| 958 | + napi_enable(&q->napi); |
---|
| 959 | + |
---|
| 960 | + /* 0-increment GTS to start the timer and enable interrupts */ |
---|
| 961 | + t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), |
---|
| 962 | + SEINTARM_V(q->intr_params) | |
---|
| 963 | + INGRESSQID_V(q->cntxt_id)); |
---|
803 | 964 | } |
---|
804 | 965 | |
---|
805 | 966 | /* |
---|
.. | .. |
---|
814 | 975 | |
---|
815 | 976 | if (!q) |
---|
816 | 977 | continue; |
---|
817 | | - if (q->handler) |
---|
818 | | - napi_enable(&q->napi); |
---|
819 | 978 | |
---|
820 | | - /* 0-increment GTS to start the timer and enable interrupts */ |
---|
821 | | - t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), |
---|
822 | | - SEINTARM_V(q->intr_params) | |
---|
823 | | - INGRESSQID_V(q->cntxt_id)); |
---|
| 979 | + cxgb4_enable_rx(adap, q); |
---|
824 | 980 | } |
---|
825 | 981 | } |
---|
826 | 982 | |
---|
| 983 | +static int setup_non_data_intr(struct adapter *adap) |
---|
| 984 | +{ |
---|
| 985 | + int msix; |
---|
| 986 | + |
---|
| 987 | + adap->sge.nd_msix_idx = -1; |
---|
| 988 | + if (!(adap->flags & CXGB4_USING_MSIX)) |
---|
| 989 | + return 0; |
---|
| 990 | + |
---|
| 991 | + /* Request MSI-X vector for non-data interrupt */ |
---|
| 992 | + msix = cxgb4_get_msix_idx_from_bmap(adap); |
---|
| 993 | + if (msix < 0) |
---|
| 994 | + return -ENOMEM; |
---|
| 995 | + |
---|
| 996 | + snprintf(adap->msix_info[msix].desc, |
---|
| 997 | + sizeof(adap->msix_info[msix].desc), |
---|
| 998 | + "%s", adap->port[0]->name); |
---|
| 999 | + |
---|
| 1000 | + adap->sge.nd_msix_idx = msix; |
---|
| 1001 | + return 0; |
---|
| 1002 | +} |
---|
827 | 1003 | |
---|
828 | 1004 | static int setup_fw_sge_queues(struct adapter *adap) |
---|
829 | 1005 | { |
---|
830 | 1006 | struct sge *s = &adap->sge; |
---|
831 | | - int err = 0; |
---|
| 1007 | + int msix, err = 0; |
---|
832 | 1008 | |
---|
833 | 1009 | bitmap_zero(s->starving_fl, s->egr_sz); |
---|
834 | 1010 | bitmap_zero(s->txq_maperr, s->egr_sz); |
---|
835 | 1011 | |
---|
836 | | - if (adap->flags & USING_MSIX) |
---|
837 | | - adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */ |
---|
838 | | - else { |
---|
| 1012 | + if (adap->flags & CXGB4_USING_MSIX) { |
---|
| 1013 | + s->fwevtq_msix_idx = -1; |
---|
| 1014 | + msix = cxgb4_get_msix_idx_from_bmap(adap); |
---|
| 1015 | + if (msix < 0) |
---|
| 1016 | + return -ENOMEM; |
---|
| 1017 | + |
---|
| 1018 | + snprintf(adap->msix_info[msix].desc, |
---|
| 1019 | + sizeof(adap->msix_info[msix].desc), |
---|
| 1020 | + "%s-FWeventq", adap->port[0]->name); |
---|
| 1021 | + } else { |
---|
839 | 1022 | err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, |
---|
840 | 1023 | NULL, NULL, NULL, -1); |
---|
841 | 1024 | if (err) |
---|
842 | 1025 | return err; |
---|
843 | | - adap->msi_idx = -((int)s->intrq.abs_id + 1); |
---|
| 1026 | + msix = -((int)s->intrq.abs_id + 1); |
---|
844 | 1027 | } |
---|
845 | 1028 | |
---|
846 | 1029 | err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], |
---|
847 | | - adap->msi_idx, NULL, fwevtq_handler, NULL, -1); |
---|
| 1030 | + msix, NULL, fwevtq_handler, NULL, -1); |
---|
| 1031 | + if (err && msix >= 0) |
---|
| 1032 | + cxgb4_free_msix_idx_in_bmap(adap, msix); |
---|
| 1033 | + |
---|
| 1034 | + s->fwevtq_msix_idx = msix; |
---|
848 | 1035 | return err; |
---|
849 | 1036 | } |
---|
850 | 1037 | |
---|
.. | .. |
---|
858 | 1045 | */ |
---|
859 | 1046 | static int setup_sge_queues(struct adapter *adap) |
---|
860 | 1047 | { |
---|
861 | | - int err, i, j; |
---|
862 | | - struct sge *s = &adap->sge; |
---|
863 | 1048 | struct sge_uld_rxq_info *rxq_info = NULL; |
---|
| 1049 | + struct sge *s = &adap->sge; |
---|
864 | 1050 | unsigned int cmplqid = 0; |
---|
| 1051 | + int err, i, j, msix = 0; |
---|
865 | 1052 | |
---|
866 | 1053 | if (is_uld(adap)) |
---|
867 | 1054 | rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA]; |
---|
| 1055 | + |
---|
| 1056 | + if (!(adap->flags & CXGB4_USING_MSIX)) |
---|
| 1057 | + msix = -((int)s->intrq.abs_id + 1); |
---|
868 | 1058 | |
---|
869 | 1059 | for_each_port(adap, i) { |
---|
870 | 1060 | struct net_device *dev = adap->port[i]; |
---|
.. | .. |
---|
873 | 1063 | struct sge_eth_txq *t = &s->ethtxq[pi->first_qset]; |
---|
874 | 1064 | |
---|
875 | 1065 | for (j = 0; j < pi->nqsets; j++, q++) { |
---|
876 | | - if (adap->msi_idx > 0) |
---|
877 | | - adap->msi_idx++; |
---|
| 1066 | + if (msix >= 0) { |
---|
| 1067 | + msix = cxgb4_get_msix_idx_from_bmap(adap); |
---|
| 1068 | + if (msix < 0) { |
---|
| 1069 | + err = msix; |
---|
| 1070 | + goto freeout; |
---|
| 1071 | + } |
---|
| 1072 | + |
---|
| 1073 | + snprintf(adap->msix_info[msix].desc, |
---|
| 1074 | + sizeof(adap->msix_info[msix].desc), |
---|
| 1075 | + "%s-Rx%d", dev->name, j); |
---|
| 1076 | + q->msix = &adap->msix_info[msix]; |
---|
| 1077 | + } |
---|
| 1078 | + |
---|
878 | 1079 | err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, |
---|
879 | | - adap->msi_idx, &q->fl, |
---|
| 1080 | + msix, &q->fl, |
---|
880 | 1081 | t4_ethrx_handler, |
---|
881 | 1082 | NULL, |
---|
882 | 1083 | t4_get_tp_ch_map(adap, |
---|
.. | .. |
---|
886 | 1087 | q->rspq.idx = j; |
---|
887 | 1088 | memset(&q->stats, 0, sizeof(q->stats)); |
---|
888 | 1089 | } |
---|
889 | | - for (j = 0; j < pi->nqsets; j++, t++) { |
---|
| 1090 | + |
---|
| 1091 | + q = &s->ethrxq[pi->first_qset]; |
---|
| 1092 | + for (j = 0; j < pi->nqsets; j++, t++, q++) { |
---|
890 | 1093 | err = t4_sge_alloc_eth_txq(adap, t, dev, |
---|
891 | 1094 | netdev_get_tx_queue(dev, j), |
---|
892 | | - s->fw_evtq.cntxt_id); |
---|
| 1095 | + q->rspq.cntxt_id, |
---|
| 1096 | + !!(adap->flags & CXGB4_SGE_DBQ_TIMER)); |
---|
893 | 1097 | if (err) |
---|
894 | 1098 | goto freeout; |
---|
895 | 1099 | } |
---|
.. | .. |
---|
911 | 1115 | if (!is_t4(adap->params.chip)) { |
---|
912 | 1116 | err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0], |
---|
913 | 1117 | netdev_get_tx_queue(adap->port[0], 0) |
---|
914 | | - , s->fw_evtq.cntxt_id); |
---|
| 1118 | + , s->fw_evtq.cntxt_id, false); |
---|
915 | 1119 | if (err) |
---|
916 | 1120 | goto freeout; |
---|
917 | 1121 | } |
---|
.. | .. |
---|
929 | 1133 | } |
---|
930 | 1134 | |
---|
931 | 1135 | static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, |
---|
932 | | - struct net_device *sb_dev, |
---|
933 | | - select_queue_fallback_t fallback) |
---|
| 1136 | + struct net_device *sb_dev) |
---|
934 | 1137 | { |
---|
935 | 1138 | int txq; |
---|
936 | 1139 | |
---|
.. | .. |
---|
961 | 1164 | } |
---|
962 | 1165 | #endif /* CONFIG_CHELSIO_T4_DCB */ |
---|
963 | 1166 | |
---|
| 1167 | + if (dev->num_tc) { |
---|
| 1168 | + struct port_info *pi = netdev2pinfo(dev); |
---|
| 1169 | + u8 ver, proto; |
---|
| 1170 | + |
---|
| 1171 | + ver = ip_hdr(skb)->version; |
---|
| 1172 | + proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr : |
---|
| 1173 | + ip_hdr(skb)->protocol; |
---|
| 1174 | + |
---|
| 1175 | + /* Send unsupported traffic pattern to normal NIC queues. */ |
---|
| 1176 | + txq = netdev_pick_tx(dev, skb, sb_dev); |
---|
| 1177 | + if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) || |
---|
| 1178 | + skb->encapsulation || |
---|
| 1179 | + cxgb4_is_ktls_skb(skb) || |
---|
| 1180 | + (proto != IPPROTO_TCP && proto != IPPROTO_UDP)) |
---|
| 1181 | + txq = txq % pi->nqsets; |
---|
| 1182 | + |
---|
| 1183 | + return txq; |
---|
| 1184 | + } |
---|
| 1185 | + |
---|
964 | 1186 | if (select_queue) { |
---|
965 | 1187 | txq = (skb_rx_queue_recorded(skb) |
---|
966 | 1188 | ? skb_get_rx_queue(skb) |
---|
.. | .. |
---|
972 | 1194 | return txq; |
---|
973 | 1195 | } |
---|
974 | 1196 | |
---|
975 | | - return fallback(dev, skb, NULL) % dev->real_num_tx_queues; |
---|
| 1197 | + return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; |
---|
976 | 1198 | } |
---|
977 | 1199 | |
---|
978 | 1200 | static int closest_timer(const struct sge *s, int time) |
---|
.. | .. |
---|
1050 | 1272 | |
---|
1051 | 1273 | static int cxgb_set_features(struct net_device *dev, netdev_features_t features) |
---|
1052 | 1274 | { |
---|
1053 | | - const struct port_info *pi = netdev_priv(dev); |
---|
1054 | 1275 | netdev_features_t changed = dev->features ^ features; |
---|
| 1276 | + const struct port_info *pi = netdev_priv(dev); |
---|
1055 | 1277 | int err; |
---|
1056 | 1278 | |
---|
1057 | 1279 | if (!(changed & NETIF_F_HW_VLAN_CTAG_RX)) |
---|
1058 | 1280 | return 0; |
---|
1059 | 1281 | |
---|
1060 | | - err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1, |
---|
1061 | | - -1, -1, -1, |
---|
| 1282 | + err = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid, |
---|
| 1283 | + pi->viid_mirror, -1, -1, -1, -1, |
---|
1062 | 1284 | !!(features & NETIF_F_HW_VLAN_CTAG_RX), true); |
---|
1063 | 1285 | if (unlikely(err)) |
---|
1064 | 1286 | dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX; |
---|
.. | .. |
---|
1074 | 1296 | t4_setup_debugfs(adap); |
---|
1075 | 1297 | #endif |
---|
1076 | 1298 | return 0; |
---|
| 1299 | +} |
---|
| 1300 | + |
---|
| 1301 | +static void cxgb4_port_mirror_free_rxq(struct adapter *adap, |
---|
| 1302 | + struct sge_eth_rxq *mirror_rxq) |
---|
| 1303 | +{ |
---|
| 1304 | + if ((adap->flags & CXGB4_FULL_INIT_DONE) && |
---|
| 1305 | + !(adap->flags & CXGB4_SHUTTING_DOWN)) |
---|
| 1306 | + cxgb4_quiesce_rx(&mirror_rxq->rspq); |
---|
| 1307 | + |
---|
| 1308 | + if (adap->flags & CXGB4_USING_MSIX) { |
---|
| 1309 | + cxgb4_clear_msix_aff(mirror_rxq->msix->vec, |
---|
| 1310 | + mirror_rxq->msix->aff_mask); |
---|
| 1311 | + free_irq(mirror_rxq->msix->vec, &mirror_rxq->rspq); |
---|
| 1312 | + cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx); |
---|
| 1313 | + } |
---|
| 1314 | + |
---|
| 1315 | + free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl); |
---|
| 1316 | +} |
---|
| 1317 | + |
---|
| 1318 | +static int cxgb4_port_mirror_alloc_queues(struct net_device *dev) |
---|
| 1319 | +{ |
---|
| 1320 | + struct port_info *pi = netdev2pinfo(dev); |
---|
| 1321 | + struct adapter *adap = netdev2adap(dev); |
---|
| 1322 | + struct sge_eth_rxq *mirror_rxq; |
---|
| 1323 | + struct sge *s = &adap->sge; |
---|
| 1324 | + int ret = 0, msix = 0; |
---|
| 1325 | + u16 i, rxqid; |
---|
| 1326 | + u16 *rss; |
---|
| 1327 | + |
---|
| 1328 | + if (!pi->vi_mirror_count) |
---|
| 1329 | + return 0; |
---|
| 1330 | + |
---|
| 1331 | + if (s->mirror_rxq[pi->port_id]) |
---|
| 1332 | + return 0; |
---|
| 1333 | + |
---|
| 1334 | + mirror_rxq = kcalloc(pi->nmirrorqsets, sizeof(*mirror_rxq), GFP_KERNEL); |
---|
| 1335 | + if (!mirror_rxq) |
---|
| 1336 | + return -ENOMEM; |
---|
| 1337 | + |
---|
| 1338 | + s->mirror_rxq[pi->port_id] = mirror_rxq; |
---|
| 1339 | + |
---|
| 1340 | + if (!(adap->flags & CXGB4_USING_MSIX)) |
---|
| 1341 | + msix = -((int)adap->sge.intrq.abs_id + 1); |
---|
| 1342 | + |
---|
| 1343 | + for (i = 0, rxqid = 0; i < pi->nmirrorqsets; i++, rxqid++) { |
---|
| 1344 | + mirror_rxq = &s->mirror_rxq[pi->port_id][i]; |
---|
| 1345 | + |
---|
| 1346 | + /* Allocate Mirror Rxqs */ |
---|
| 1347 | + if (msix >= 0) { |
---|
| 1348 | + msix = cxgb4_get_msix_idx_from_bmap(adap); |
---|
| 1349 | + if (msix < 0) { |
---|
| 1350 | + ret = msix; |
---|
| 1351 | + goto out_free_queues; |
---|
| 1352 | + } |
---|
| 1353 | + |
---|
| 1354 | + mirror_rxq->msix = &adap->msix_info[msix]; |
---|
| 1355 | + snprintf(mirror_rxq->msix->desc, |
---|
| 1356 | + sizeof(mirror_rxq->msix->desc), |
---|
| 1357 | + "%s-mirrorrxq%d", dev->name, i); |
---|
| 1358 | + } |
---|
| 1359 | + |
---|
| 1360 | + init_rspq(adap, &mirror_rxq->rspq, |
---|
| 1361 | + CXGB4_MIRROR_RXQ_DEFAULT_INTR_USEC, |
---|
| 1362 | + CXGB4_MIRROR_RXQ_DEFAULT_PKT_CNT, |
---|
| 1363 | + CXGB4_MIRROR_RXQ_DEFAULT_DESC_NUM, |
---|
| 1364 | + CXGB4_MIRROR_RXQ_DEFAULT_DESC_SIZE); |
---|
| 1365 | + |
---|
| 1366 | + mirror_rxq->fl.size = CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM; |
---|
| 1367 | + |
---|
| 1368 | + ret = t4_sge_alloc_rxq(adap, &mirror_rxq->rspq, false, |
---|
| 1369 | + dev, msix, &mirror_rxq->fl, |
---|
| 1370 | + t4_ethrx_handler, NULL, 0); |
---|
| 1371 | + if (ret) |
---|
| 1372 | + goto out_free_msix_idx; |
---|
| 1373 | + |
---|
| 1374 | + /* Setup MSI-X vectors for Mirror Rxqs */ |
---|
| 1375 | + if (adap->flags & CXGB4_USING_MSIX) { |
---|
| 1376 | + ret = request_irq(mirror_rxq->msix->vec, |
---|
| 1377 | + t4_sge_intr_msix, 0, |
---|
| 1378 | + mirror_rxq->msix->desc, |
---|
| 1379 | + &mirror_rxq->rspq); |
---|
| 1380 | + if (ret) |
---|
| 1381 | + goto out_free_rxq; |
---|
| 1382 | + |
---|
| 1383 | + cxgb4_set_msix_aff(adap, mirror_rxq->msix->vec, |
---|
| 1384 | + &mirror_rxq->msix->aff_mask, i); |
---|
| 1385 | + } |
---|
| 1386 | + |
---|
| 1387 | + /* Start NAPI for Mirror Rxqs */ |
---|
| 1388 | + cxgb4_enable_rx(adap, &mirror_rxq->rspq); |
---|
| 1389 | + } |
---|
| 1390 | + |
---|
| 1391 | + /* Setup RSS for Mirror Rxqs */ |
---|
| 1392 | + rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL); |
---|
| 1393 | + if (!rss) { |
---|
| 1394 | + ret = -ENOMEM; |
---|
| 1395 | + goto out_free_queues; |
---|
| 1396 | + } |
---|
| 1397 | + |
---|
| 1398 | + mirror_rxq = &s->mirror_rxq[pi->port_id][0]; |
---|
| 1399 | + for (i = 0; i < pi->rss_size; i++) |
---|
| 1400 | + rss[i] = mirror_rxq[i % pi->nmirrorqsets].rspq.abs_id; |
---|
| 1401 | + |
---|
| 1402 | + ret = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid_mirror); |
---|
| 1403 | + kfree(rss); |
---|
| 1404 | + if (ret) |
---|
| 1405 | + goto out_free_queues; |
---|
| 1406 | + |
---|
| 1407 | + return 0; |
---|
| 1408 | + |
---|
| 1409 | +out_free_rxq: |
---|
| 1410 | + free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl); |
---|
| 1411 | + |
---|
| 1412 | +out_free_msix_idx: |
---|
| 1413 | + cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx); |
---|
| 1414 | + |
---|
| 1415 | +out_free_queues: |
---|
| 1416 | + while (rxqid-- > 0) |
---|
| 1417 | + cxgb4_port_mirror_free_rxq(adap, |
---|
| 1418 | + &s->mirror_rxq[pi->port_id][rxqid]); |
---|
| 1419 | + |
---|
| 1420 | + kfree(s->mirror_rxq[pi->port_id]); |
---|
| 1421 | + s->mirror_rxq[pi->port_id] = NULL; |
---|
| 1422 | + return ret; |
---|
| 1423 | +} |
---|
| 1424 | + |
---|
| 1425 | +static void cxgb4_port_mirror_free_queues(struct net_device *dev) |
---|
| 1426 | +{ |
---|
| 1427 | + struct port_info *pi = netdev2pinfo(dev); |
---|
| 1428 | + struct adapter *adap = netdev2adap(dev); |
---|
| 1429 | + struct sge *s = &adap->sge; |
---|
| 1430 | + u16 i; |
---|
| 1431 | + |
---|
| 1432 | + if (!pi->vi_mirror_count) |
---|
| 1433 | + return; |
---|
| 1434 | + |
---|
| 1435 | + if (!s->mirror_rxq[pi->port_id]) |
---|
| 1436 | + return; |
---|
| 1437 | + |
---|
| 1438 | + for (i = 0; i < pi->nmirrorqsets; i++) |
---|
| 1439 | + cxgb4_port_mirror_free_rxq(adap, |
---|
| 1440 | + &s->mirror_rxq[pi->port_id][i]); |
---|
| 1441 | + |
---|
| 1442 | + kfree(s->mirror_rxq[pi->port_id]); |
---|
| 1443 | + s->mirror_rxq[pi->port_id] = NULL; |
---|
| 1444 | +} |
---|
| 1445 | + |
---|
| 1446 | +static int cxgb4_port_mirror_start(struct net_device *dev) |
---|
| 1447 | +{ |
---|
| 1448 | + struct port_info *pi = netdev2pinfo(dev); |
---|
| 1449 | + struct adapter *adap = netdev2adap(dev); |
---|
| 1450 | + int ret, idx = -1; |
---|
| 1451 | + |
---|
| 1452 | + if (!pi->vi_mirror_count) |
---|
| 1453 | + return 0; |
---|
| 1454 | + |
---|
| 1455 | + /* Mirror VIs can be created dynamically after stack had |
---|
| 1456 | + * already setup Rx modes like MTU, promisc, allmulti, etc. |
---|
| 1457 | + * on main VI. So, parse what the stack had setup on the |
---|
| 1458 | + * main VI and update the same on the mirror VI. |
---|
| 1459 | + */ |
---|
| 1460 | + ret = t4_set_rxmode(adap, adap->mbox, pi->viid, pi->viid_mirror, |
---|
| 1461 | + dev->mtu, (dev->flags & IFF_PROMISC) ? 1 : 0, |
---|
| 1462 | + (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, |
---|
| 1463 | + !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true); |
---|
| 1464 | + if (ret) { |
---|
| 1465 | + dev_err(adap->pdev_dev, |
---|
| 1466 | + "Failed start up Rx mode for Mirror VI 0x%x, ret: %d\n", |
---|
| 1467 | + pi->viid_mirror, ret); |
---|
| 1468 | + return ret; |
---|
| 1469 | + } |
---|
| 1470 | + |
---|
| 1471 | + /* Enable replication bit for the device's MAC address |
---|
| 1472 | + * in MPS TCAM, so that the packets for the main VI are |
---|
| 1473 | + * replicated to mirror VI. |
---|
| 1474 | + */ |
---|
| 1475 | + ret = cxgb4_update_mac_filt(pi, pi->viid_mirror, &idx, |
---|
| 1476 | + dev->dev_addr, true, NULL); |
---|
| 1477 | + if (ret) { |
---|
| 1478 | + dev_err(adap->pdev_dev, |
---|
| 1479 | + "Failed updating MAC filter for Mirror VI 0x%x, ret: %d\n", |
---|
| 1480 | + pi->viid_mirror, ret); |
---|
| 1481 | + return ret; |
---|
| 1482 | + } |
---|
| 1483 | + |
---|
| 1484 | + /* Enabling a Virtual Interface can result in an interrupt |
---|
| 1485 | + * during the processing of the VI Enable command and, in some |
---|
| 1486 | + * paths, result in an attempt to issue another command in the |
---|
| 1487 | + * interrupt context. Thus, we disable interrupts during the |
---|
| 1488 | + * course of the VI Enable command ... |
---|
| 1489 | + */ |
---|
| 1490 | + local_bh_disable(); |
---|
| 1491 | + ret = t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, true, true, |
---|
| 1492 | + false); |
---|
| 1493 | + local_bh_enable(); |
---|
| 1494 | + if (ret) |
---|
| 1495 | + dev_err(adap->pdev_dev, |
---|
| 1496 | + "Failed starting Mirror VI 0x%x, ret: %d\n", |
---|
| 1497 | + pi->viid_mirror, ret); |
---|
| 1498 | + |
---|
| 1499 | + return ret; |
---|
| 1500 | +} |
---|
| 1501 | + |
---|
| 1502 | +static void cxgb4_port_mirror_stop(struct net_device *dev) |
---|
| 1503 | +{ |
---|
| 1504 | + struct port_info *pi = netdev2pinfo(dev); |
---|
| 1505 | + struct adapter *adap = netdev2adap(dev); |
---|
| 1506 | + |
---|
| 1507 | + if (!pi->vi_mirror_count) |
---|
| 1508 | + return; |
---|
| 1509 | + |
---|
| 1510 | + t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, false, false, |
---|
| 1511 | + false); |
---|
| 1512 | +} |
---|
| 1513 | + |
---|
| 1514 | +int cxgb4_port_mirror_alloc(struct net_device *dev) |
---|
| 1515 | +{ |
---|
| 1516 | + struct port_info *pi = netdev2pinfo(dev); |
---|
| 1517 | + struct adapter *adap = netdev2adap(dev); |
---|
| 1518 | + int ret = 0; |
---|
| 1519 | + |
---|
| 1520 | + if (!pi->nmirrorqsets) |
---|
| 1521 | + return -EOPNOTSUPP; |
---|
| 1522 | + |
---|
| 1523 | + mutex_lock(&pi->vi_mirror_mutex); |
---|
| 1524 | + if (pi->viid_mirror) { |
---|
| 1525 | + pi->vi_mirror_count++; |
---|
| 1526 | + goto out_unlock; |
---|
| 1527 | + } |
---|
| 1528 | + |
---|
| 1529 | + ret = t4_init_port_mirror(pi, adap->mbox, pi->port_id, adap->pf, 0, |
---|
| 1530 | + &pi->viid_mirror); |
---|
| 1531 | + if (ret) |
---|
| 1532 | + goto out_unlock; |
---|
| 1533 | + |
---|
| 1534 | + pi->vi_mirror_count = 1; |
---|
| 1535 | + |
---|
| 1536 | + if (adap->flags & CXGB4_FULL_INIT_DONE) { |
---|
| 1537 | + ret = cxgb4_port_mirror_alloc_queues(dev); |
---|
| 1538 | + if (ret) |
---|
| 1539 | + goto out_free_vi; |
---|
| 1540 | + |
---|
| 1541 | + ret = cxgb4_port_mirror_start(dev); |
---|
| 1542 | + if (ret) |
---|
| 1543 | + goto out_free_queues; |
---|
| 1544 | + } |
---|
| 1545 | + |
---|
| 1546 | + mutex_unlock(&pi->vi_mirror_mutex); |
---|
| 1547 | + return 0; |
---|
| 1548 | + |
---|
| 1549 | +out_free_queues: |
---|
| 1550 | + cxgb4_port_mirror_free_queues(dev); |
---|
| 1551 | + |
---|
| 1552 | +out_free_vi: |
---|
| 1553 | + pi->vi_mirror_count = 0; |
---|
| 1554 | + t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror); |
---|
| 1555 | + pi->viid_mirror = 0; |
---|
| 1556 | + |
---|
| 1557 | +out_unlock: |
---|
| 1558 | + mutex_unlock(&pi->vi_mirror_mutex); |
---|
| 1559 | + return ret; |
---|
| 1560 | +} |
---|
| 1561 | + |
---|
| 1562 | +void cxgb4_port_mirror_free(struct net_device *dev) |
---|
| 1563 | +{ |
---|
| 1564 | + struct port_info *pi = netdev2pinfo(dev); |
---|
| 1565 | + struct adapter *adap = netdev2adap(dev); |
---|
| 1566 | + |
---|
| 1567 | + mutex_lock(&pi->vi_mirror_mutex); |
---|
| 1568 | + if (!pi->viid_mirror) |
---|
| 1569 | + goto out_unlock; |
---|
| 1570 | + |
---|
| 1571 | + if (pi->vi_mirror_count > 1) { |
---|
| 1572 | + pi->vi_mirror_count--; |
---|
| 1573 | + goto out_unlock; |
---|
| 1574 | + } |
---|
| 1575 | + |
---|
| 1576 | + cxgb4_port_mirror_stop(dev); |
---|
| 1577 | + cxgb4_port_mirror_free_queues(dev); |
---|
| 1578 | + |
---|
| 1579 | + pi->vi_mirror_count = 0; |
---|
| 1580 | + t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror); |
---|
| 1581 | + pi->viid_mirror = 0; |
---|
| 1582 | + |
---|
| 1583 | +out_unlock: |
---|
| 1584 | + mutex_unlock(&pi->vi_mirror_mutex); |
---|
1077 | 1585 | } |
---|
1078 | 1586 | |
---|
1079 | 1587 | /* |
---|
.. | .. |
---|
1236 | 1744 | static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, |
---|
1237 | 1745 | unsigned int tid) |
---|
1238 | 1746 | { |
---|
1239 | | - void **p = &t->tid_tab[tid]; |
---|
1240 | 1747 | struct adapter *adap = container_of(t, struct adapter, tids); |
---|
| 1748 | + void **p = &t->tid_tab[tid - t->tid_base]; |
---|
1241 | 1749 | |
---|
1242 | 1750 | spin_lock_bh(&adap->tid_release_lock); |
---|
1243 | 1751 | *p = adap->tid_release_head; |
---|
.. | .. |
---|
1289 | 1797 | void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid, |
---|
1290 | 1798 | unsigned short family) |
---|
1291 | 1799 | { |
---|
1292 | | - struct sk_buff *skb; |
---|
1293 | 1800 | struct adapter *adap = container_of(t, struct adapter, tids); |
---|
| 1801 | + struct sk_buff *skb; |
---|
1294 | 1802 | |
---|
1295 | | - WARN_ON(tid >= t->ntids); |
---|
| 1803 | + WARN_ON(tid_out_of_range(&adap->tids, tid)); |
---|
1296 | 1804 | |
---|
1297 | | - if (t->tid_tab[tid]) { |
---|
1298 | | - t->tid_tab[tid] = NULL; |
---|
| 1805 | + if (t->tid_tab[tid - adap->tids.tid_base]) { |
---|
| 1806 | + t->tid_tab[tid - adap->tids.tid_base] = NULL; |
---|
1299 | 1807 | atomic_dec(&t->conns_in_use); |
---|
1300 | 1808 | if (t->hash_base && (tid >= t->hash_base)) { |
---|
1301 | 1809 | if (family == AF_INET6) |
---|
.. | .. |
---|
1327 | 1835 | struct adapter *adap = container_of(t, struct adapter, tids); |
---|
1328 | 1836 | unsigned int max_ftids = t->nftids + t->nsftids; |
---|
1329 | 1837 | unsigned int natids = t->natids; |
---|
| 1838 | + unsigned int hpftid_bmap_size; |
---|
| 1839 | + unsigned int eotid_bmap_size; |
---|
1330 | 1840 | unsigned int stid_bmap_size; |
---|
1331 | 1841 | unsigned int ftid_bmap_size; |
---|
1332 | 1842 | size_t size; |
---|
1333 | 1843 | |
---|
1334 | 1844 | stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); |
---|
1335 | 1845 | ftid_bmap_size = BITS_TO_LONGS(t->nftids); |
---|
| 1846 | + hpftid_bmap_size = BITS_TO_LONGS(t->nhpftids); |
---|
| 1847 | + eotid_bmap_size = BITS_TO_LONGS(t->neotids); |
---|
1336 | 1848 | size = t->ntids * sizeof(*t->tid_tab) + |
---|
1337 | 1849 | natids * sizeof(*t->atid_tab) + |
---|
1338 | 1850 | t->nstids * sizeof(*t->stid_tab) + |
---|
1339 | 1851 | t->nsftids * sizeof(*t->stid_tab) + |
---|
1340 | 1852 | stid_bmap_size * sizeof(long) + |
---|
| 1853 | + t->nhpftids * sizeof(*t->hpftid_tab) + |
---|
| 1854 | + hpftid_bmap_size * sizeof(long) + |
---|
1341 | 1855 | max_ftids * sizeof(*t->ftid_tab) + |
---|
1342 | | - ftid_bmap_size * sizeof(long); |
---|
| 1856 | + ftid_bmap_size * sizeof(long) + |
---|
| 1857 | + t->neotids * sizeof(*t->eotid_tab) + |
---|
| 1858 | + eotid_bmap_size * sizeof(long); |
---|
1343 | 1859 | |
---|
1344 | 1860 | t->tid_tab = kvzalloc(size, GFP_KERNEL); |
---|
1345 | 1861 | if (!t->tid_tab) |
---|
.. | .. |
---|
1348 | 1864 | t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; |
---|
1349 | 1865 | t->stid_tab = (struct serv_entry *)&t->atid_tab[natids]; |
---|
1350 | 1866 | t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids]; |
---|
1351 | | - t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size]; |
---|
| 1867 | + t->hpftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size]; |
---|
| 1868 | + t->hpftid_bmap = (unsigned long *)&t->hpftid_tab[t->nhpftids]; |
---|
| 1869 | + t->ftid_tab = (struct filter_entry *)&t->hpftid_bmap[hpftid_bmap_size]; |
---|
1352 | 1870 | t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids]; |
---|
| 1871 | + t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size]; |
---|
| 1872 | + t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids]; |
---|
1353 | 1873 | spin_lock_init(&t->stid_lock); |
---|
1354 | 1874 | spin_lock_init(&t->atid_lock); |
---|
1355 | 1875 | spin_lock_init(&t->ftid_lock); |
---|
.. | .. |
---|
1362 | 1882 | atomic_set(&t->tids_in_use, 0); |
---|
1363 | 1883 | atomic_set(&t->conns_in_use, 0); |
---|
1364 | 1884 | atomic_set(&t->hash_tids_in_use, 0); |
---|
| 1885 | + atomic_set(&t->eotids_in_use, 0); |
---|
1365 | 1886 | |
---|
1366 | 1887 | /* Setup the free list for atid_tab and clear the stid bitmap. */ |
---|
1367 | 1888 | if (natids) { |
---|
.. | .. |
---|
1376 | 1897 | if (!t->stid_base && |
---|
1377 | 1898 | CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) |
---|
1378 | 1899 | __set_bit(0, t->stid_bmap); |
---|
| 1900 | + |
---|
| 1901 | + if (t->neotids) |
---|
| 1902 | + bitmap_zero(t->eotid_bmap, t->neotids); |
---|
1379 | 1903 | } |
---|
1380 | 1904 | |
---|
| 1905 | + if (t->nhpftids) |
---|
| 1906 | + bitmap_zero(t->hpftid_bmap, t->nhpftids); |
---|
1381 | 1907 | bitmap_zero(t->ftid_bmap, t->nftids); |
---|
1382 | 1908 | return 0; |
---|
1383 | 1909 | } |
---|
.. | .. |
---|
1388 | 1914 | * @stid: the server TID |
---|
1389 | 1915 | * @sip: local IP address to bind server to |
---|
1390 | 1916 | * @sport: the server's TCP port |
---|
| 1917 | + * @vlan: the VLAN header information |
---|
1391 | 1918 | * @queue: queue to direct messages from this server to |
---|
1392 | 1919 | * |
---|
1393 | 1920 | * Create an IP server for the given port and address. |
---|
.. | .. |
---|
1586 | 2113 | EXPORT_SYMBOL(cxgb4_best_aligned_mtu); |
---|
1587 | 2114 | |
---|
1588 | 2115 | /** |
---|
1589 | | - * cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI |
---|
1590 | | - * @chip: chip type |
---|
1591 | | - * @viid: VI id of the given port |
---|
1592 | | - * |
---|
1593 | | - * Return the SMT index for this VI. |
---|
1594 | | - */ |
---|
1595 | | -unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid) |
---|
1596 | | -{ |
---|
1597 | | - /* In T4/T5, SMT contains 256 SMAC entries organized in |
---|
1598 | | - * 128 rows of 2 entries each. |
---|
1599 | | - * In T6, SMT contains 256 SMAC entries in 256 rows. |
---|
1600 | | - * TODO: The below code needs to be updated when we add support |
---|
1601 | | - * for 256 VFs. |
---|
1602 | | - */ |
---|
1603 | | - if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5) |
---|
1604 | | - return ((viid & 0x7f) << 1); |
---|
1605 | | - else |
---|
1606 | | - return (viid & 0x7f); |
---|
1607 | | -} |
---|
1608 | | -EXPORT_SYMBOL(cxgb4_tp_smt_idx); |
---|
1609 | | - |
---|
1610 | | -/** |
---|
1611 | 2116 | * cxgb4_port_chan - get the HW channel of a port |
---|
1612 | 2117 | * @dev: the net device for the port |
---|
1613 | 2118 | * |
---|
.. | .. |
---|
1618 | 2123 | return netdev2pinfo(dev)->tx_chan; |
---|
1619 | 2124 | } |
---|
1620 | 2125 | EXPORT_SYMBOL(cxgb4_port_chan); |
---|
| 2126 | + |
---|
| 2127 | +/** |
---|
| 2128 | + * cxgb4_port_e2cchan - get the HW c-channel of a port |
---|
| 2129 | + * @dev: the net device for the port |
---|
| 2130 | + * |
---|
| 2131 | + * Return the HW RX c-channel of the given port. |
---|
| 2132 | + */ |
---|
| 2133 | +unsigned int cxgb4_port_e2cchan(const struct net_device *dev) |
---|
| 2134 | +{ |
---|
| 2135 | + return netdev2pinfo(dev)->rx_cchan; |
---|
| 2136 | +} |
---|
| 2137 | +EXPORT_SYMBOL(cxgb4_port_e2cchan); |
---|
1621 | 2138 | |
---|
1622 | 2139 | unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo) |
---|
1623 | 2140 | { |
---|
.. | .. |
---|
2126 | 2643 | { |
---|
2127 | 2644 | unsigned int i; |
---|
2128 | 2645 | |
---|
| 2646 | + if (!is_uld(adap)) |
---|
| 2647 | + return; |
---|
| 2648 | + |
---|
2129 | 2649 | mutex_lock(&uld_mutex); |
---|
2130 | 2650 | list_del(&adap->list_node); |
---|
2131 | 2651 | |
---|
.. | .. |
---|
2242 | 2762 | */ |
---|
2243 | 2763 | static int cxgb_up(struct adapter *adap) |
---|
2244 | 2764 | { |
---|
| 2765 | + struct sge *s = &adap->sge; |
---|
2245 | 2766 | int err; |
---|
2246 | 2767 | |
---|
2247 | 2768 | mutex_lock(&uld_mutex); |
---|
.. | .. |
---|
2252 | 2773 | if (err) |
---|
2253 | 2774 | goto freeq; |
---|
2254 | 2775 | |
---|
2255 | | - if (adap->flags & USING_MSIX) { |
---|
2256 | | - name_msix_vecs(adap); |
---|
2257 | | - err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0, |
---|
2258 | | - adap->msix_info[0].desc, adap); |
---|
2259 | | - if (err) |
---|
2260 | | - goto irq_err; |
---|
2261 | | - err = request_msix_queue_irqs(adap); |
---|
2262 | | - if (err) { |
---|
2263 | | - free_irq(adap->msix_info[0].vec, adap); |
---|
| 2776 | + if (adap->flags & CXGB4_USING_MSIX) { |
---|
| 2777 | + if (s->nd_msix_idx < 0) { |
---|
| 2778 | + err = -ENOMEM; |
---|
2264 | 2779 | goto irq_err; |
---|
2265 | 2780 | } |
---|
| 2781 | + |
---|
| 2782 | + err = request_irq(adap->msix_info[s->nd_msix_idx].vec, |
---|
| 2783 | + t4_nondata_intr, 0, |
---|
| 2784 | + adap->msix_info[s->nd_msix_idx].desc, adap); |
---|
| 2785 | + if (err) |
---|
| 2786 | + goto irq_err; |
---|
| 2787 | + |
---|
| 2788 | + err = request_msix_queue_irqs(adap); |
---|
| 2789 | + if (err) |
---|
| 2790 | + goto irq_err_free_nd_msix; |
---|
2266 | 2791 | } else { |
---|
2267 | 2792 | err = request_irq(adap->pdev->irq, t4_intr_handler(adap), |
---|
2268 | | - (adap->flags & USING_MSI) ? 0 : IRQF_SHARED, |
---|
| 2793 | + (adap->flags & CXGB4_USING_MSI) ? 0 |
---|
| 2794 | + : IRQF_SHARED, |
---|
2269 | 2795 | adap->port[0]->name, adap); |
---|
2270 | 2796 | if (err) |
---|
2271 | 2797 | goto irq_err; |
---|
.. | .. |
---|
2274 | 2800 | enable_rx(adap); |
---|
2275 | 2801 | t4_sge_start(adap); |
---|
2276 | 2802 | t4_intr_enable(adap); |
---|
2277 | | - adap->flags |= FULL_INIT_DONE; |
---|
| 2803 | + adap->flags |= CXGB4_FULL_INIT_DONE; |
---|
2278 | 2804 | mutex_unlock(&uld_mutex); |
---|
2279 | 2805 | |
---|
2280 | 2806 | notify_ulds(adap, CXGB4_STATE_UP); |
---|
.. | .. |
---|
2283 | 2809 | #endif |
---|
2284 | 2810 | return err; |
---|
2285 | 2811 | |
---|
2286 | | - irq_err: |
---|
| 2812 | +irq_err_free_nd_msix: |
---|
| 2813 | + free_irq(adap->msix_info[s->nd_msix_idx].vec, adap); |
---|
| 2814 | +irq_err: |
---|
2287 | 2815 | dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); |
---|
2288 | | - freeq: |
---|
| 2816 | +freeq: |
---|
2289 | 2817 | t4_free_sge_resources(adap); |
---|
2290 | | - rel_lock: |
---|
| 2818 | +rel_lock: |
---|
2291 | 2819 | mutex_unlock(&uld_mutex); |
---|
2292 | 2820 | return err; |
---|
2293 | 2821 | } |
---|
.. | .. |
---|
2303 | 2831 | t4_sge_stop(adapter); |
---|
2304 | 2832 | t4_free_sge_resources(adapter); |
---|
2305 | 2833 | |
---|
2306 | | - adapter->flags &= ~FULL_INIT_DONE; |
---|
| 2834 | + adapter->flags &= ~CXGB4_FULL_INIT_DONE; |
---|
2307 | 2835 | } |
---|
2308 | 2836 | |
---|
2309 | 2837 | /* |
---|
.. | .. |
---|
2311 | 2839 | */ |
---|
2312 | 2840 | static int cxgb_open(struct net_device *dev) |
---|
2313 | 2841 | { |
---|
2314 | | - int err; |
---|
2315 | 2842 | struct port_info *pi = netdev_priv(dev); |
---|
2316 | 2843 | struct adapter *adapter = pi->adapter; |
---|
| 2844 | + int err; |
---|
2317 | 2845 | |
---|
2318 | 2846 | netif_carrier_off(dev); |
---|
2319 | 2847 | |
---|
2320 | | - if (!(adapter->flags & FULL_INIT_DONE)) { |
---|
| 2848 | + if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) { |
---|
2321 | 2849 | err = cxgb_up(adapter); |
---|
2322 | 2850 | if (err < 0) |
---|
2323 | 2851 | return err; |
---|
.. | .. |
---|
2331 | 2859 | return err; |
---|
2332 | 2860 | |
---|
2333 | 2861 | err = link_start(dev); |
---|
2334 | | - if (!err) |
---|
2335 | | - netif_tx_start_all_queues(dev); |
---|
| 2862 | + if (err) |
---|
| 2863 | + return err; |
---|
| 2864 | + |
---|
| 2865 | + if (pi->nmirrorqsets) { |
---|
| 2866 | + mutex_lock(&pi->vi_mirror_mutex); |
---|
| 2867 | + err = cxgb4_port_mirror_alloc_queues(dev); |
---|
| 2868 | + if (err) |
---|
| 2869 | + goto out_unlock; |
---|
| 2870 | + |
---|
| 2871 | + err = cxgb4_port_mirror_start(dev); |
---|
| 2872 | + if (err) |
---|
| 2873 | + goto out_free_queues; |
---|
| 2874 | + mutex_unlock(&pi->vi_mirror_mutex); |
---|
| 2875 | + } |
---|
| 2876 | + |
---|
| 2877 | + netif_tx_start_all_queues(dev); |
---|
| 2878 | + return 0; |
---|
| 2879 | + |
---|
| 2880 | +out_free_queues: |
---|
| 2881 | + cxgb4_port_mirror_free_queues(dev); |
---|
| 2882 | + |
---|
| 2883 | +out_unlock: |
---|
| 2884 | + mutex_unlock(&pi->vi_mirror_mutex); |
---|
2336 | 2885 | return err; |
---|
2337 | 2886 | } |
---|
2338 | 2887 | |
---|
.. | .. |
---|
2350 | 2899 | cxgb4_dcb_reset(dev); |
---|
2351 | 2900 | dcb_tx_queue_prio_enable(dev, false); |
---|
2352 | 2901 | #endif |
---|
2353 | | - return ret; |
---|
| 2902 | + if (ret) |
---|
| 2903 | + return ret; |
---|
| 2904 | + |
---|
| 2905 | + if (pi->nmirrorqsets) { |
---|
| 2906 | + mutex_lock(&pi->vi_mirror_mutex); |
---|
| 2907 | + cxgb4_port_mirror_stop(dev); |
---|
| 2908 | + cxgb4_port_mirror_free_queues(dev); |
---|
| 2909 | + mutex_unlock(&pi->vi_mirror_mutex); |
---|
| 2910 | + } |
---|
| 2911 | + |
---|
| 2912 | + return 0; |
---|
2354 | 2913 | } |
---|
2355 | 2914 | |
---|
2356 | 2915 | int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, |
---|
.. | .. |
---|
2384 | 2943 | |
---|
2385 | 2944 | /* Clear out filter specifications */ |
---|
2386 | 2945 | memset(&f->fs, 0, sizeof(struct ch_filter_specification)); |
---|
2387 | | - f->fs.val.lport = cpu_to_be16(sport); |
---|
| 2946 | + f->fs.val.lport = be16_to_cpu(sport); |
---|
2388 | 2947 | f->fs.mask.lport = ~0; |
---|
2389 | 2948 | val = (u8 *)&sip; |
---|
2390 | 2949 | if ((val[0] | val[1] | val[2] | val[3]) != 0) { |
---|
.. | .. |
---|
2616 | 3175 | |
---|
2617 | 3176 | static int cxgb_change_mtu(struct net_device *dev, int new_mtu) |
---|
2618 | 3177 | { |
---|
2619 | | - int ret; |
---|
2620 | 3178 | struct port_info *pi = netdev_priv(dev); |
---|
| 3179 | + int ret; |
---|
2621 | 3180 | |
---|
2622 | | - ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1, |
---|
2623 | | - -1, -1, -1, true); |
---|
| 3181 | + ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid, |
---|
| 3182 | + pi->viid_mirror, new_mtu, -1, -1, -1, -1, true); |
---|
2624 | 3183 | if (!ret) |
---|
2625 | 3184 | dev->mtu = new_mtu; |
---|
2626 | 3185 | return ret; |
---|
.. | .. |
---|
2669 | 3228 | |
---|
2670 | 3229 | for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev); |
---|
2671 | 3230 | vf < nvfs; vf++) { |
---|
2672 | | - macaddr[5] = adap->pf * 16 + vf; |
---|
| 3231 | + macaddr[5] = adap->pf * nvfs + vf; |
---|
2673 | 3232 | ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr); |
---|
2674 | 3233 | } |
---|
2675 | 3234 | } |
---|
.. | .. |
---|
2712 | 3271 | ivi->min_tx_rate = 0; |
---|
2713 | 3272 | ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr); |
---|
2714 | 3273 | ivi->vlan = vfinfo->vlan; |
---|
| 3274 | + ivi->linkstate = vfinfo->link_state; |
---|
2715 | 3275 | return 0; |
---|
2716 | 3276 | } |
---|
2717 | 3277 | |
---|
.. | .. |
---|
2748 | 3308 | return -EINVAL; |
---|
2749 | 3309 | } |
---|
2750 | 3310 | |
---|
| 3311 | + if (max_tx_rate == 0) { |
---|
| 3312 | + /* unbind VF to to any Traffic Class */ |
---|
| 3313 | + fw_pfvf = |
---|
| 3314 | + (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | |
---|
| 3315 | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH)); |
---|
| 3316 | + fw_class = 0xffffffff; |
---|
| 3317 | + ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, |
---|
| 3318 | + &fw_pfvf, &fw_class); |
---|
| 3319 | + if (ret) { |
---|
| 3320 | + dev_err(adap->pdev_dev, |
---|
| 3321 | + "Err %d in unbinding PF %d VF %d from TX Rate Limiting\n", |
---|
| 3322 | + ret, adap->pf, vf); |
---|
| 3323 | + return -EINVAL; |
---|
| 3324 | + } |
---|
| 3325 | + dev_info(adap->pdev_dev, |
---|
| 3326 | + "PF %d VF %d is unbound from TX Rate Limiting\n", |
---|
| 3327 | + adap->pf, vf); |
---|
| 3328 | + adap->vfinfo[vf].tx_rate = 0; |
---|
| 3329 | + return 0; |
---|
| 3330 | + } |
---|
| 3331 | + |
---|
2751 | 3332 | ret = t4_get_link_params(pi, &link_ok, &speed, &mtu); |
---|
2752 | 3333 | if (ret != FW_SUCCESS) { |
---|
2753 | 3334 | dev_err(adap->pdev_dev, |
---|
.. | .. |
---|
2779 | 3360 | SCHED_CLASS_RATEUNIT_BITS, |
---|
2780 | 3361 | SCHED_CLASS_RATEMODE_ABS, |
---|
2781 | 3362 | pi->tx_chan, class_id, 0, |
---|
2782 | | - max_tx_rate * 1000, 0, pktsize); |
---|
| 3363 | + max_tx_rate * 1000, 0, pktsize, 0); |
---|
2783 | 3364 | if (ret) { |
---|
2784 | 3365 | dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n", |
---|
2785 | 3366 | ret); |
---|
.. | .. |
---|
2797 | 3378 | &fw_class); |
---|
2798 | 3379 | if (ret) { |
---|
2799 | 3380 | dev_err(adap->pdev_dev, |
---|
2800 | | - "Err %d in binding VF %d to Traffic Class %d\n", |
---|
2801 | | - ret, vf, class_id); |
---|
| 3381 | + "Err %d in binding PF %d VF %d to Traffic Class %d\n", |
---|
| 3382 | + ret, adap->pf, vf, class_id); |
---|
2802 | 3383 | return -EINVAL; |
---|
2803 | 3384 | } |
---|
2804 | 3385 | dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n", |
---|
.. | .. |
---|
2830 | 3411 | ret, (vlan ? "setting" : "clearing"), adap->pf, vf); |
---|
2831 | 3412 | return ret; |
---|
2832 | 3413 | } |
---|
| 3414 | + |
---|
| 3415 | +static int cxgb4_mgmt_set_vf_link_state(struct net_device *dev, int vf, |
---|
| 3416 | + int link) |
---|
| 3417 | +{ |
---|
| 3418 | + struct port_info *pi = netdev_priv(dev); |
---|
| 3419 | + struct adapter *adap = pi->adapter; |
---|
| 3420 | + u32 param, val; |
---|
| 3421 | + int ret = 0; |
---|
| 3422 | + |
---|
| 3423 | + if (vf >= adap->num_vfs) |
---|
| 3424 | + return -EINVAL; |
---|
| 3425 | + |
---|
| 3426 | + switch (link) { |
---|
| 3427 | + case IFLA_VF_LINK_STATE_AUTO: |
---|
| 3428 | + val = FW_VF_LINK_STATE_AUTO; |
---|
| 3429 | + break; |
---|
| 3430 | + |
---|
| 3431 | + case IFLA_VF_LINK_STATE_ENABLE: |
---|
| 3432 | + val = FW_VF_LINK_STATE_ENABLE; |
---|
| 3433 | + break; |
---|
| 3434 | + |
---|
| 3435 | + case IFLA_VF_LINK_STATE_DISABLE: |
---|
| 3436 | + val = FW_VF_LINK_STATE_DISABLE; |
---|
| 3437 | + break; |
---|
| 3438 | + |
---|
| 3439 | + default: |
---|
| 3440 | + return -EINVAL; |
---|
| 3441 | + } |
---|
| 3442 | + |
---|
| 3443 | + param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | |
---|
| 3444 | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_LINK_STATE)); |
---|
| 3445 | + ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, |
---|
| 3446 | + ¶m, &val); |
---|
| 3447 | + if (ret) { |
---|
| 3448 | + dev_err(adap->pdev_dev, |
---|
| 3449 | + "Error %d in setting PF %d VF %d link state\n", |
---|
| 3450 | + ret, adap->pf, vf); |
---|
| 3451 | + return -EINVAL; |
---|
| 3452 | + } |
---|
| 3453 | + |
---|
| 3454 | + adap->vfinfo[vf].link_state = link; |
---|
| 3455 | + return ret; |
---|
| 3456 | +} |
---|
2833 | 3457 | #endif /* CONFIG_PCI_IOV */ |
---|
2834 | 3458 | |
---|
2835 | 3459 | static int cxgb_set_mac_addr(struct net_device *dev, void *p) |
---|
.. | .. |
---|
2841 | 3465 | if (!is_valid_ether_addr(addr->sa_data)) |
---|
2842 | 3466 | return -EADDRNOTAVAIL; |
---|
2843 | 3467 | |
---|
2844 | | - ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid, |
---|
2845 | | - pi->xact_addr_filt, addr->sa_data, true, true); |
---|
| 3468 | + ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt, |
---|
| 3469 | + addr->sa_data, true, &pi->smt_idx); |
---|
2846 | 3470 | if (ret < 0) |
---|
2847 | 3471 | return ret; |
---|
2848 | 3472 | |
---|
2849 | 3473 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); |
---|
2850 | | - pi->xact_addr_filt = ret; |
---|
2851 | 3474 | return 0; |
---|
2852 | 3475 | } |
---|
2853 | 3476 | |
---|
.. | .. |
---|
2857 | 3480 | struct port_info *pi = netdev_priv(dev); |
---|
2858 | 3481 | struct adapter *adap = pi->adapter; |
---|
2859 | 3482 | |
---|
2860 | | - if (adap->flags & USING_MSIX) { |
---|
| 3483 | + if (adap->flags & CXGB4_USING_MSIX) { |
---|
2861 | 3484 | int i; |
---|
2862 | 3485 | struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; |
---|
2863 | 3486 | |
---|
.. | .. |
---|
2872 | 3495 | { |
---|
2873 | 3496 | struct port_info *pi = netdev_priv(dev); |
---|
2874 | 3497 | struct adapter *adap = pi->adapter; |
---|
| 3498 | + struct ch_sched_queue qe = { 0 }; |
---|
| 3499 | + struct ch_sched_params p = { 0 }; |
---|
2875 | 3500 | struct sched_class *e; |
---|
2876 | | - struct ch_sched_params p; |
---|
2877 | | - struct ch_sched_queue qe; |
---|
2878 | 3501 | u32 req_rate; |
---|
2879 | 3502 | int err = 0; |
---|
2880 | 3503 | |
---|
.. | .. |
---|
2884 | 3507 | if (index < 0 || index > pi->nqsets - 1) |
---|
2885 | 3508 | return -EINVAL; |
---|
2886 | 3509 | |
---|
2887 | | - if (!(adap->flags & FULL_INIT_DONE)) { |
---|
| 3510 | + if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { |
---|
2888 | 3511 | dev_err(adap->pdev_dev, |
---|
2889 | 3512 | "Failed to rate limit on queue %d. Link Down?\n", |
---|
2890 | 3513 | index); |
---|
2891 | 3514 | return -EINVAL; |
---|
| 3515 | + } |
---|
| 3516 | + |
---|
| 3517 | + qe.queue = index; |
---|
| 3518 | + e = cxgb4_sched_queue_lookup(dev, &qe); |
---|
| 3519 | + if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) { |
---|
| 3520 | + dev_err(adap->pdev_dev, |
---|
| 3521 | + "Queue %u already bound to class %u of type: %u\n", |
---|
| 3522 | + index, e->idx, e->info.u.params.level); |
---|
| 3523 | + return -EBUSY; |
---|
2892 | 3524 | } |
---|
2893 | 3525 | |
---|
2894 | 3526 | /* Convert from Mbps to Kbps */ |
---|
.. | .. |
---|
2920 | 3552 | return 0; |
---|
2921 | 3553 | |
---|
2922 | 3554 | /* Fetch any available unused or matching scheduling class */ |
---|
2923 | | - memset(&p, 0, sizeof(p)); |
---|
2924 | 3555 | p.type = SCHED_CLASS_TYPE_PACKET; |
---|
2925 | 3556 | p.u.params.level = SCHED_CLASS_LEVEL_CL_RL; |
---|
2926 | 3557 | p.u.params.mode = SCHED_CLASS_MODE_CLASS; |
---|
.. | .. |
---|
2950 | 3581 | } |
---|
2951 | 3582 | |
---|
2952 | 3583 | static int cxgb_setup_tc_flower(struct net_device *dev, |
---|
2953 | | - struct tc_cls_flower_offload *cls_flower) |
---|
| 3584 | + struct flow_cls_offload *cls_flower) |
---|
2954 | 3585 | { |
---|
2955 | 3586 | switch (cls_flower->command) { |
---|
2956 | | - case TC_CLSFLOWER_REPLACE: |
---|
| 3587 | + case FLOW_CLS_REPLACE: |
---|
2957 | 3588 | return cxgb4_tc_flower_replace(dev, cls_flower); |
---|
2958 | | - case TC_CLSFLOWER_DESTROY: |
---|
| 3589 | + case FLOW_CLS_DESTROY: |
---|
2959 | 3590 | return cxgb4_tc_flower_destroy(dev, cls_flower); |
---|
2960 | | - case TC_CLSFLOWER_STATS: |
---|
| 3591 | + case FLOW_CLS_STATS: |
---|
2961 | 3592 | return cxgb4_tc_flower_stats(dev, cls_flower); |
---|
2962 | 3593 | default: |
---|
2963 | 3594 | return -EOPNOTSUPP; |
---|
.. | .. |
---|
2978 | 3609 | } |
---|
2979 | 3610 | } |
---|
2980 | 3611 | |
---|
2981 | | -static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data, |
---|
2982 | | - void *cb_priv) |
---|
| 3612 | +static int cxgb_setup_tc_matchall(struct net_device *dev, |
---|
| 3613 | + struct tc_cls_matchall_offload *cls_matchall, |
---|
| 3614 | + bool ingress) |
---|
| 3615 | +{ |
---|
| 3616 | + struct adapter *adap = netdev2adap(dev); |
---|
| 3617 | + |
---|
| 3618 | + if (!adap->tc_matchall) |
---|
| 3619 | + return -ENOMEM; |
---|
| 3620 | + |
---|
| 3621 | + switch (cls_matchall->command) { |
---|
| 3622 | + case TC_CLSMATCHALL_REPLACE: |
---|
| 3623 | + return cxgb4_tc_matchall_replace(dev, cls_matchall, ingress); |
---|
| 3624 | + case TC_CLSMATCHALL_DESTROY: |
---|
| 3625 | + return cxgb4_tc_matchall_destroy(dev, cls_matchall, ingress); |
---|
| 3626 | + case TC_CLSMATCHALL_STATS: |
---|
| 3627 | + if (ingress) |
---|
| 3628 | + return cxgb4_tc_matchall_stats(dev, cls_matchall); |
---|
| 3629 | + break; |
---|
| 3630 | + default: |
---|
| 3631 | + break; |
---|
| 3632 | + } |
---|
| 3633 | + |
---|
| 3634 | + return -EOPNOTSUPP; |
---|
| 3635 | +} |
---|
| 3636 | + |
---|
| 3637 | +static int cxgb_setup_tc_block_ingress_cb(enum tc_setup_type type, |
---|
| 3638 | + void *type_data, void *cb_priv) |
---|
2983 | 3639 | { |
---|
2984 | 3640 | struct net_device *dev = cb_priv; |
---|
2985 | 3641 | struct port_info *pi = netdev2pinfo(dev); |
---|
2986 | 3642 | struct adapter *adap = netdev2adap(dev); |
---|
2987 | 3643 | |
---|
2988 | | - if (!(adap->flags & FULL_INIT_DONE)) { |
---|
| 3644 | + if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { |
---|
2989 | 3645 | dev_err(adap->pdev_dev, |
---|
2990 | 3646 | "Failed to setup tc on port %d. Link Down?\n", |
---|
2991 | 3647 | pi->port_id); |
---|
.. | .. |
---|
3000 | 3656 | return cxgb_setup_tc_cls_u32(dev, type_data); |
---|
3001 | 3657 | case TC_SETUP_CLSFLOWER: |
---|
3002 | 3658 | return cxgb_setup_tc_flower(dev, type_data); |
---|
| 3659 | + case TC_SETUP_CLSMATCHALL: |
---|
| 3660 | + return cxgb_setup_tc_matchall(dev, type_data, true); |
---|
3003 | 3661 | default: |
---|
3004 | 3662 | return -EOPNOTSUPP; |
---|
3005 | 3663 | } |
---|
3006 | 3664 | } |
---|
3007 | 3665 | |
---|
3008 | | -static int cxgb_setup_tc_block(struct net_device *dev, |
---|
3009 | | - struct tc_block_offload *f) |
---|
| 3666 | +static int cxgb_setup_tc_block_egress_cb(enum tc_setup_type type, |
---|
| 3667 | + void *type_data, void *cb_priv) |
---|
3010 | 3668 | { |
---|
| 3669 | + struct net_device *dev = cb_priv; |
---|
3011 | 3670 | struct port_info *pi = netdev2pinfo(dev); |
---|
| 3671 | + struct adapter *adap = netdev2adap(dev); |
---|
3012 | 3672 | |
---|
3013 | | - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
---|
3014 | | - return -EOPNOTSUPP; |
---|
3015 | | - |
---|
3016 | | - switch (f->command) { |
---|
3017 | | - case TC_BLOCK_BIND: |
---|
3018 | | - return tcf_block_cb_register(f->block, cxgb_setup_tc_block_cb, |
---|
3019 | | - pi, dev, f->extack); |
---|
3020 | | - case TC_BLOCK_UNBIND: |
---|
3021 | | - tcf_block_cb_unregister(f->block, cxgb_setup_tc_block_cb, pi); |
---|
3022 | | - return 0; |
---|
3023 | | - default: |
---|
3024 | | - return -EOPNOTSUPP; |
---|
| 3673 | + if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { |
---|
| 3674 | + dev_err(adap->pdev_dev, |
---|
| 3675 | + "Failed to setup tc on port %d. Link Down?\n", |
---|
| 3676 | + pi->port_id); |
---|
| 3677 | + return -EINVAL; |
---|
3025 | 3678 | } |
---|
| 3679 | + |
---|
| 3680 | + if (!tc_cls_can_offload_and_chain0(dev, type_data)) |
---|
| 3681 | + return -EOPNOTSUPP; |
---|
| 3682 | + |
---|
| 3683 | + switch (type) { |
---|
| 3684 | + case TC_SETUP_CLSMATCHALL: |
---|
| 3685 | + return cxgb_setup_tc_matchall(dev, type_data, false); |
---|
| 3686 | + default: |
---|
| 3687 | + break; |
---|
| 3688 | + } |
---|
| 3689 | + |
---|
| 3690 | + return -EOPNOTSUPP; |
---|
| 3691 | +} |
---|
| 3692 | + |
---|
| 3693 | +static int cxgb_setup_tc_mqprio(struct net_device *dev, |
---|
| 3694 | + struct tc_mqprio_qopt_offload *mqprio) |
---|
| 3695 | +{ |
---|
| 3696 | + struct adapter *adap = netdev2adap(dev); |
---|
| 3697 | + |
---|
| 3698 | + if (!is_ethofld(adap) || !adap->tc_mqprio) |
---|
| 3699 | + return -ENOMEM; |
---|
| 3700 | + |
---|
| 3701 | + return cxgb4_setup_tc_mqprio(dev, mqprio); |
---|
| 3702 | +} |
---|
| 3703 | + |
---|
| 3704 | +static LIST_HEAD(cxgb_block_cb_list); |
---|
| 3705 | + |
---|
| 3706 | +static int cxgb_setup_tc_block(struct net_device *dev, |
---|
| 3707 | + struct flow_block_offload *f) |
---|
| 3708 | +{ |
---|
| 3709 | + struct port_info *pi = netdev_priv(dev); |
---|
| 3710 | + flow_setup_cb_t *cb; |
---|
| 3711 | + bool ingress_only; |
---|
| 3712 | + |
---|
| 3713 | + pi->tc_block_shared = f->block_shared; |
---|
| 3714 | + if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { |
---|
| 3715 | + cb = cxgb_setup_tc_block_egress_cb; |
---|
| 3716 | + ingress_only = false; |
---|
| 3717 | + } else { |
---|
| 3718 | + cb = cxgb_setup_tc_block_ingress_cb; |
---|
| 3719 | + ingress_only = true; |
---|
| 3720 | + } |
---|
| 3721 | + |
---|
| 3722 | + return flow_block_cb_setup_simple(f, &cxgb_block_cb_list, |
---|
| 3723 | + cb, pi, dev, ingress_only); |
---|
3026 | 3724 | } |
---|
3027 | 3725 | |
---|
3028 | 3726 | static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, |
---|
3029 | 3727 | void *type_data) |
---|
3030 | 3728 | { |
---|
3031 | 3729 | switch (type) { |
---|
| 3730 | + case TC_SETUP_QDISC_MQPRIO: |
---|
| 3731 | + return cxgb_setup_tc_mqprio(dev, type_data); |
---|
3032 | 3732 | case TC_SETUP_BLOCK: |
---|
3033 | 3733 | return cxgb_setup_tc_block(dev, type_data); |
---|
3034 | 3734 | default: |
---|
.. | .. |
---|
3036 | 3736 | } |
---|
3037 | 3737 | } |
---|
3038 | 3738 | |
---|
3039 | | -static void cxgb_del_udp_tunnel(struct net_device *netdev, |
---|
3040 | | - struct udp_tunnel_info *ti) |
---|
| 3739 | +static int cxgb_udp_tunnel_unset_port(struct net_device *netdev, |
---|
| 3740 | + unsigned int table, unsigned int entry, |
---|
| 3741 | + struct udp_tunnel_info *ti) |
---|
3041 | 3742 | { |
---|
3042 | 3743 | struct port_info *pi = netdev_priv(netdev); |
---|
3043 | 3744 | struct adapter *adapter = pi->adapter; |
---|
3044 | | - unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); |
---|
3045 | 3745 | u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; |
---|
3046 | 3746 | int ret = 0, i; |
---|
3047 | 3747 | |
---|
3048 | | - if (chip_ver < CHELSIO_T6) |
---|
3049 | | - return; |
---|
3050 | | - |
---|
3051 | 3748 | switch (ti->type) { |
---|
3052 | 3749 | case UDP_TUNNEL_TYPE_VXLAN: |
---|
3053 | | - if (!adapter->vxlan_port_cnt || |
---|
3054 | | - adapter->vxlan_port != ti->port) |
---|
3055 | | - return; /* Invalid VxLAN destination port */ |
---|
3056 | | - |
---|
3057 | | - adapter->vxlan_port_cnt--; |
---|
3058 | | - if (adapter->vxlan_port_cnt) |
---|
3059 | | - return; |
---|
3060 | | - |
---|
3061 | 3750 | adapter->vxlan_port = 0; |
---|
3062 | 3751 | t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0); |
---|
3063 | 3752 | break; |
---|
3064 | 3753 | case UDP_TUNNEL_TYPE_GENEVE: |
---|
3065 | | - if (!adapter->geneve_port_cnt || |
---|
3066 | | - adapter->geneve_port != ti->port) |
---|
3067 | | - return; /* Invalid GENEVE destination port */ |
---|
3068 | | - |
---|
3069 | | - adapter->geneve_port_cnt--; |
---|
3070 | | - if (adapter->geneve_port_cnt) |
---|
3071 | | - return; |
---|
3072 | | - |
---|
3073 | 3754 | adapter->geneve_port = 0; |
---|
3074 | 3755 | t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0); |
---|
3075 | 3756 | break; |
---|
3076 | 3757 | default: |
---|
3077 | | - return; |
---|
| 3758 | + return -EINVAL; |
---|
3078 | 3759 | } |
---|
3079 | 3760 | |
---|
3080 | 3761 | /* Matchall mac entries can be deleted only after all tunnel ports |
---|
3081 | 3762 | * are brought down or removed. |
---|
3082 | 3763 | */ |
---|
3083 | 3764 | if (!adapter->rawf_cnt) |
---|
3084 | | - return; |
---|
| 3765 | + return 0; |
---|
3085 | 3766 | for_each_port(adapter, i) { |
---|
3086 | 3767 | pi = adap2pinfo(adapter, i); |
---|
3087 | 3768 | ret = t4_free_raw_mac_filt(adapter, pi->viid, |
---|
3088 | 3769 | match_all_mac, match_all_mac, |
---|
3089 | | - adapter->rawf_start + |
---|
3090 | | - pi->port_id, |
---|
| 3770 | + adapter->rawf_start + pi->port_id, |
---|
3091 | 3771 | 1, pi->port_id, false); |
---|
3092 | 3772 | if (ret < 0) { |
---|
3093 | 3773 | netdev_info(netdev, "Failed to free mac filter entry, for port %d\n", |
---|
3094 | 3774 | i); |
---|
3095 | | - return; |
---|
| 3775 | + return ret; |
---|
3096 | 3776 | } |
---|
3097 | | - atomic_dec(&adapter->mps_encap[adapter->rawf_start + |
---|
3098 | | - pi->port_id].refcnt); |
---|
3099 | 3777 | } |
---|
| 3778 | + |
---|
| 3779 | + return 0; |
---|
3100 | 3780 | } |
---|
3101 | 3781 | |
---|
3102 | | -static void cxgb_add_udp_tunnel(struct net_device *netdev, |
---|
3103 | | - struct udp_tunnel_info *ti) |
---|
| 3782 | +static int cxgb_udp_tunnel_set_port(struct net_device *netdev, |
---|
| 3783 | + unsigned int table, unsigned int entry, |
---|
| 3784 | + struct udp_tunnel_info *ti) |
---|
3104 | 3785 | { |
---|
3105 | 3786 | struct port_info *pi = netdev_priv(netdev); |
---|
3106 | 3787 | struct adapter *adapter = pi->adapter; |
---|
3107 | | - unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); |
---|
3108 | 3788 | u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; |
---|
3109 | 3789 | int i, ret; |
---|
3110 | 3790 | |
---|
3111 | | - if (chip_ver < CHELSIO_T6 || !adapter->rawf_cnt) |
---|
3112 | | - return; |
---|
3113 | | - |
---|
3114 | 3791 | switch (ti->type) { |
---|
3115 | 3792 | case UDP_TUNNEL_TYPE_VXLAN: |
---|
3116 | | - /* Callback for adding vxlan port can be called with the same |
---|
3117 | | - * port for both IPv4 and IPv6. We should not disable the |
---|
3118 | | - * offloading when the same port for both protocols is added |
---|
3119 | | - * and later one of them is removed. |
---|
3120 | | - */ |
---|
3121 | | - if (adapter->vxlan_port_cnt && |
---|
3122 | | - adapter->vxlan_port == ti->port) { |
---|
3123 | | - adapter->vxlan_port_cnt++; |
---|
3124 | | - return; |
---|
3125 | | - } |
---|
3126 | | - |
---|
3127 | | - /* We will support only one VxLAN port */ |
---|
3128 | | - if (adapter->vxlan_port_cnt) { |
---|
3129 | | - netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n", |
---|
3130 | | - be16_to_cpu(adapter->vxlan_port), |
---|
3131 | | - be16_to_cpu(ti->port)); |
---|
3132 | | - return; |
---|
3133 | | - } |
---|
3134 | | - |
---|
3135 | 3793 | adapter->vxlan_port = ti->port; |
---|
3136 | | - adapter->vxlan_port_cnt = 1; |
---|
3137 | | - |
---|
3138 | 3794 | t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, |
---|
3139 | 3795 | VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F); |
---|
3140 | 3796 | break; |
---|
3141 | 3797 | case UDP_TUNNEL_TYPE_GENEVE: |
---|
3142 | | - if (adapter->geneve_port_cnt && |
---|
3143 | | - adapter->geneve_port == ti->port) { |
---|
3144 | | - adapter->geneve_port_cnt++; |
---|
3145 | | - return; |
---|
3146 | | - } |
---|
3147 | | - |
---|
3148 | | - /* We will support only one GENEVE port */ |
---|
3149 | | - if (adapter->geneve_port_cnt) { |
---|
3150 | | - netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n", |
---|
3151 | | - be16_to_cpu(adapter->geneve_port), |
---|
3152 | | - be16_to_cpu(ti->port)); |
---|
3153 | | - return; |
---|
3154 | | - } |
---|
3155 | | - |
---|
3156 | 3798 | adapter->geneve_port = ti->port; |
---|
3157 | | - adapter->geneve_port_cnt = 1; |
---|
3158 | | - |
---|
3159 | 3799 | t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, |
---|
3160 | 3800 | GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F); |
---|
3161 | 3801 | break; |
---|
3162 | 3802 | default: |
---|
3163 | | - return; |
---|
| 3803 | + return -EINVAL; |
---|
3164 | 3804 | } |
---|
3165 | 3805 | |
---|
3166 | 3806 | /* Create a 'match all' mac filter entry for inner mac, |
---|
.. | .. |
---|
3175 | 3815 | ret = t4_alloc_raw_mac_filt(adapter, pi->viid, |
---|
3176 | 3816 | match_all_mac, |
---|
3177 | 3817 | match_all_mac, |
---|
3178 | | - adapter->rawf_start + |
---|
3179 | | - pi->port_id, |
---|
| 3818 | + adapter->rawf_start + pi->port_id, |
---|
3180 | 3819 | 1, pi->port_id, false); |
---|
3181 | 3820 | if (ret < 0) { |
---|
3182 | 3821 | netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n", |
---|
3183 | 3822 | be16_to_cpu(ti->port)); |
---|
3184 | | - cxgb_del_udp_tunnel(netdev, ti); |
---|
3185 | | - return; |
---|
| 3823 | + return ret; |
---|
3186 | 3824 | } |
---|
3187 | | - atomic_inc(&adapter->mps_encap[ret].refcnt); |
---|
3188 | 3825 | } |
---|
| 3826 | + |
---|
| 3827 | + return 0; |
---|
3189 | 3828 | } |
---|
| 3829 | + |
---|
| 3830 | +static const struct udp_tunnel_nic_info cxgb_udp_tunnels = { |
---|
| 3831 | + .set_port = cxgb_udp_tunnel_set_port, |
---|
| 3832 | + .unset_port = cxgb_udp_tunnel_unset_port, |
---|
| 3833 | + .tables = { |
---|
| 3834 | + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, |
---|
| 3835 | + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, |
---|
| 3836 | + }, |
---|
| 3837 | +}; |
---|
3190 | 3838 | |
---|
3191 | 3839 | static netdev_features_t cxgb_features_check(struct sk_buff *skb, |
---|
3192 | 3840 | struct net_device *dev, |
---|
.. | .. |
---|
3237 | 3885 | #endif /* CONFIG_CHELSIO_T4_FCOE */ |
---|
3238 | 3886 | .ndo_set_tx_maxrate = cxgb_set_tx_maxrate, |
---|
3239 | 3887 | .ndo_setup_tc = cxgb_setup_tc, |
---|
3240 | | - .ndo_udp_tunnel_add = cxgb_add_udp_tunnel, |
---|
3241 | | - .ndo_udp_tunnel_del = cxgb_del_udp_tunnel, |
---|
| 3888 | + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, |
---|
| 3889 | + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, |
---|
3242 | 3890 | .ndo_features_check = cxgb_features_check, |
---|
3243 | 3891 | .ndo_fix_features = cxgb_fix_features, |
---|
3244 | 3892 | }; |
---|
3245 | 3893 | |
---|
3246 | 3894 | #ifdef CONFIG_PCI_IOV |
---|
3247 | 3895 | static const struct net_device_ops cxgb4_mgmt_netdev_ops = { |
---|
3248 | | - .ndo_open = cxgb4_mgmt_open, |
---|
3249 | | - .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac, |
---|
3250 | | - .ndo_get_vf_config = cxgb4_mgmt_get_vf_config, |
---|
3251 | | - .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate, |
---|
3252 | | - .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id, |
---|
3253 | | - .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan, |
---|
| 3896 | + .ndo_open = cxgb4_mgmt_open, |
---|
| 3897 | + .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac, |
---|
| 3898 | + .ndo_get_vf_config = cxgb4_mgmt_get_vf_config, |
---|
| 3899 | + .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate, |
---|
| 3900 | + .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id, |
---|
| 3901 | + .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan, |
---|
| 3902 | + .ndo_set_vf_link_state = cxgb4_mgmt_set_vf_link_state, |
---|
3254 | 3903 | }; |
---|
3255 | 3904 | #endif |
---|
3256 | 3905 | |
---|
.. | .. |
---|
3260 | 3909 | struct adapter *adapter = netdev2adap(dev); |
---|
3261 | 3910 | |
---|
3262 | 3911 | strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver)); |
---|
3263 | | - strlcpy(info->version, cxgb4_driver_version, |
---|
3264 | | - sizeof(info->version)); |
---|
3265 | 3912 | strlcpy(info->bus_info, pci_name(adapter->pdev), |
---|
3266 | 3913 | sizeof(info->bus_info)); |
---|
3267 | 3914 | } |
---|
.. | .. |
---|
3784 | 4431 | |
---|
3785 | 4432 | /* Load PHY Firmware onto adapter. |
---|
3786 | 4433 | */ |
---|
3787 | | - ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock, |
---|
3788 | | - phy_info->phy_fw_version, |
---|
| 4434 | + ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version, |
---|
3789 | 4435 | (u8 *)phyf->data, phyf->size); |
---|
3790 | 4436 | if (ret < 0) |
---|
3791 | 4437 | dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n", |
---|
.. | .. |
---|
3811 | 4457 | */ |
---|
3812 | 4458 | static int adap_init0_config(struct adapter *adapter, int reset) |
---|
3813 | 4459 | { |
---|
3814 | | - struct fw_caps_config_cmd caps_cmd; |
---|
3815 | | - const struct firmware *cf; |
---|
3816 | | - unsigned long mtype = 0, maddr = 0; |
---|
3817 | | - u32 finiver, finicsum, cfcsum; |
---|
3818 | | - int ret; |
---|
3819 | | - int config_issued = 0; |
---|
3820 | 4460 | char *fw_config_file, fw_config_file_path[256]; |
---|
| 4461 | + u32 finiver, finicsum, cfcsum, param, val; |
---|
| 4462 | + struct fw_caps_config_cmd caps_cmd; |
---|
| 4463 | + unsigned long mtype = 0, maddr = 0; |
---|
| 4464 | + const struct firmware *cf; |
---|
3821 | 4465 | char *config_name = NULL; |
---|
| 4466 | + int config_issued = 0; |
---|
| 4467 | + int ret; |
---|
3822 | 4468 | |
---|
3823 | 4469 | /* |
---|
3824 | 4470 | * Reset device if necessary. |
---|
.. | .. |
---|
3926 | 4572 | goto bye; |
---|
3927 | 4573 | } |
---|
3928 | 4574 | |
---|
| 4575 | + val = 0; |
---|
| 4576 | + |
---|
| 4577 | + /* Ofld + Hash filter is supported. Older fw will fail this request and |
---|
| 4578 | + * it is fine. |
---|
| 4579 | + */ |
---|
| 4580 | + param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | |
---|
| 4581 | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD)); |
---|
| 4582 | + ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0, |
---|
| 4583 | + 1, ¶m, &val); |
---|
| 4584 | + |
---|
| 4585 | + /* FW doesn't know about Hash filter + ofld support, |
---|
| 4586 | + * it's not a problem, don't return an error. |
---|
| 4587 | + */ |
---|
| 4588 | + if (ret < 0) { |
---|
| 4589 | + dev_warn(adapter->pdev_dev, |
---|
| 4590 | + "Hash filter with ofld is not supported by FW\n"); |
---|
| 4591 | + } |
---|
| 4592 | + |
---|
3929 | 4593 | /* |
---|
3930 | 4594 | * Issue a Capability Configuration command to the firmware to get it |
---|
3931 | 4595 | * to parse the Configuration File. We don't use t4_fw_config_file() |
---|
.. | .. |
---|
4001 | 4665 | if (ret) |
---|
4002 | 4666 | dev_err(adapter->pdev_dev, |
---|
4003 | 4667 | "HMA configuration failed with error %d\n", ret); |
---|
| 4668 | + |
---|
| 4669 | + if (is_t6(adapter->params.chip)) { |
---|
| 4670 | + adap_config_hpfilter(adapter); |
---|
| 4671 | + ret = setup_ppod_edram(adapter); |
---|
| 4672 | + if (!ret) |
---|
| 4673 | + dev_info(adapter->pdev_dev, "Successfully enabled " |
---|
| 4674 | + "ppod edram feature\n"); |
---|
| 4675 | + } |
---|
4004 | 4676 | |
---|
4005 | 4677 | /* |
---|
4006 | 4678 | * And finally tell the firmware to initialize itself using the |
---|
.. | .. |
---|
4091 | 4763 | /* |
---|
4092 | 4764 | * Phase 0 of initialization: contact FW, obtain config, perform basic init. |
---|
4093 | 4765 | */ |
---|
4094 | | -static int adap_init0(struct adapter *adap) |
---|
| 4766 | +static int adap_init0(struct adapter *adap, int vpd_skip) |
---|
4095 | 4767 | { |
---|
4096 | | - int ret; |
---|
4097 | | - u32 v, port_vec; |
---|
4098 | | - enum dev_state state; |
---|
4099 | | - u32 params[7], val[7]; |
---|
4100 | 4768 | struct fw_caps_config_cmd caps_cmd; |
---|
| 4769 | + u32 params[7], val[7]; |
---|
| 4770 | + enum dev_state state; |
---|
| 4771 | + u32 v, port_vec; |
---|
4101 | 4772 | int reset = 1; |
---|
| 4773 | + int ret; |
---|
4102 | 4774 | |
---|
4103 | 4775 | /* Grab Firmware Device Log parameters as early as possible so we have |
---|
4104 | 4776 | * access to it for debugging, etc. |
---|
.. | .. |
---|
4116 | 4788 | return ret; |
---|
4117 | 4789 | } |
---|
4118 | 4790 | if (ret == adap->mbox) |
---|
4119 | | - adap->flags |= MASTER_PF; |
---|
| 4791 | + adap->flags |= CXGB4_MASTER_PF; |
---|
4120 | 4792 | |
---|
4121 | 4793 | /* |
---|
4122 | 4794 | * If we're the Master PF Driver and the device is uninitialized, |
---|
.. | .. |
---|
4131 | 4803 | /* If firmware is too old (not supported by driver) force an update. */ |
---|
4132 | 4804 | if (ret) |
---|
4133 | 4805 | state = DEV_STATE_UNINIT; |
---|
4134 | | - if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { |
---|
| 4806 | + if ((adap->flags & CXGB4_MASTER_PF) && state != DEV_STATE_INIT) { |
---|
4135 | 4807 | struct fw_info *fw_info; |
---|
4136 | 4808 | struct fw_hdr *card_fw; |
---|
4137 | 4809 | const struct firmware *fw; |
---|
.. | .. |
---|
4193 | 4865 | ret); |
---|
4194 | 4866 | dev_info(adap->pdev_dev, "Coming up as %s: "\ |
---|
4195 | 4867 | "Adapter already initialized\n", |
---|
4196 | | - adap->flags & MASTER_PF ? "MASTER" : "SLAVE"); |
---|
| 4868 | + adap->flags & CXGB4_MASTER_PF ? "MASTER" : "SLAVE"); |
---|
4197 | 4869 | } else { |
---|
4198 | 4870 | dev_info(adap->pdev_dev, "Coming up as MASTER: "\ |
---|
4199 | 4871 | "Initializing adapter\n"); |
---|
.. | .. |
---|
4253 | 4925 | * could have FLASHed a new VPD which won't be read by the firmware |
---|
4254 | 4926 | * until we do the RESET ... |
---|
4255 | 4927 | */ |
---|
4256 | | - ret = t4_get_vpd_params(adap, &adap->params.vpd); |
---|
4257 | | - if (ret < 0) |
---|
4258 | | - goto bye; |
---|
| 4928 | + if (!vpd_skip) { |
---|
| 4929 | + ret = t4_get_vpd_params(adap, &adap->params.vpd); |
---|
| 4930 | + if (ret < 0) |
---|
| 4931 | + goto bye; |
---|
| 4932 | + } |
---|
4259 | 4933 | |
---|
4260 | 4934 | /* Find out what ports are available to us. Note that we need to do |
---|
4261 | 4935 | * this before calling adap_init0_no_config() since it needs nports |
---|
.. | .. |
---|
4279 | 4953 | if (ret < 0) |
---|
4280 | 4954 | goto bye; |
---|
4281 | 4955 | |
---|
| 4956 | + /* Grab the SGE Doorbell Queue Timer values. If successful, that |
---|
| 4957 | + * indicates that the Firmware and Hardware support this. |
---|
| 4958 | + */ |
---|
| 4959 | + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | |
---|
| 4960 | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK)); |
---|
| 4961 | + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, |
---|
| 4962 | + 1, params, val); |
---|
| 4963 | + |
---|
| 4964 | + if (!ret) { |
---|
| 4965 | + adap->sge.dbqtimer_tick = val[0]; |
---|
| 4966 | + ret = t4_read_sge_dbqtimers(adap, |
---|
| 4967 | + ARRAY_SIZE(adap->sge.dbqtimer_val), |
---|
| 4968 | + adap->sge.dbqtimer_val); |
---|
| 4969 | + } |
---|
| 4970 | + |
---|
| 4971 | + if (!ret) |
---|
| 4972 | + adap->flags |= CXGB4_SGE_DBQ_TIMER; |
---|
| 4973 | + |
---|
4282 | 4974 | if (is_bypass_device(adap->pdev->device)) |
---|
4283 | 4975 | adap->params.bypass = 1; |
---|
4284 | 4976 | |
---|
4285 | 4977 | /* |
---|
4286 | 4978 | * Grab some of our basic fundamental operating parameters. |
---|
4287 | 4979 | */ |
---|
4288 | | -#define FW_PARAM_DEV(param) \ |
---|
4289 | | - (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \ |
---|
4290 | | - FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param)) |
---|
4291 | | - |
---|
4292 | | -#define FW_PARAM_PFVF(param) \ |
---|
4293 | | - FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \ |
---|
4294 | | - FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \ |
---|
4295 | | - FW_PARAMS_PARAM_Y_V(0) | \ |
---|
4296 | | - FW_PARAMS_PARAM_Z_V(0) |
---|
4297 | | - |
---|
4298 | 4980 | params[0] = FW_PARAM_PFVF(EQ_START); |
---|
4299 | 4981 | params[1] = FW_PARAM_PFVF(L2T_START); |
---|
4300 | 4982 | params[2] = FW_PARAM_PFVF(L2T_END); |
---|
.. | .. |
---|
4312 | 4994 | adap->sge.ingr_start = val[5]; |
---|
4313 | 4995 | |
---|
4314 | 4996 | if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { |
---|
| 4997 | + params[0] = FW_PARAM_PFVF(HPFILTER_START); |
---|
| 4998 | + params[1] = FW_PARAM_PFVF(HPFILTER_END); |
---|
| 4999 | + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, |
---|
| 5000 | + params, val); |
---|
| 5001 | + if (ret < 0) |
---|
| 5002 | + goto bye; |
---|
| 5003 | + |
---|
| 5004 | + adap->tids.hpftid_base = val[0]; |
---|
| 5005 | + adap->tids.nhpftids = val[1] - val[0] + 1; |
---|
| 5006 | + |
---|
4315 | 5007 | /* Read the raw mps entries. In T6, the last 2 tcam entries |
---|
4316 | 5008 | * are reserved for raw mac addresses (rawf = 2, one per port). |
---|
4317 | 5009 | */ |
---|
.. | .. |
---|
4323 | 5015 | adap->rawf_start = val[0]; |
---|
4324 | 5016 | adap->rawf_cnt = val[1] - val[0] + 1; |
---|
4325 | 5017 | } |
---|
| 5018 | + |
---|
| 5019 | + adap->tids.tid_base = |
---|
| 5020 | + t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A); |
---|
4326 | 5021 | } |
---|
4327 | 5022 | |
---|
4328 | 5023 | /* qids (ingress/egress) returned from firmware can be anywhere |
---|
.. | .. |
---|
4377 | 5072 | ret = -ENOMEM; |
---|
4378 | 5073 | goto bye; |
---|
4379 | 5074 | } |
---|
| 5075 | + bitmap_zero(adap->sge.blocked_fl, adap->sge.egr_sz); |
---|
4380 | 5076 | #endif |
---|
4381 | 5077 | |
---|
4382 | 5078 | params[0] = FW_PARAM_PFVF(CLIP_START); |
---|
.. | .. |
---|
4387 | 5083 | adap->clipt_start = val[0]; |
---|
4388 | 5084 | adap->clipt_end = val[1]; |
---|
4389 | 5085 | |
---|
4390 | | - /* We don't yet have a PARAMs calls to retrieve the number of Traffic |
---|
4391 | | - * Classes supported by the hardware/firmware so we hard code it here |
---|
4392 | | - * for now. |
---|
4393 | | - */ |
---|
4394 | | - adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16; |
---|
| 5086 | + /* Get the supported number of traffic classes */ |
---|
| 5087 | + params[0] = FW_PARAM_DEV(NUM_TM_CLASS); |
---|
| 5088 | + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val); |
---|
| 5089 | + if (ret < 0) { |
---|
| 5090 | + /* We couldn't retrieve the number of Traffic Classes |
---|
| 5091 | + * supported by the hardware/firmware. So we hard |
---|
| 5092 | + * code it here. |
---|
| 5093 | + */ |
---|
| 5094 | + adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16; |
---|
| 5095 | + } else { |
---|
| 5096 | + adap->params.nsched_cls = val[0]; |
---|
| 5097 | + } |
---|
4395 | 5098 | |
---|
4396 | 5099 | /* query params related to active filter region */ |
---|
4397 | 5100 | params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START); |
---|
.. | .. |
---|
4401 | 5104 | * offload connection through firmware work request |
---|
4402 | 5105 | */ |
---|
4403 | 5106 | if ((val[0] != val[1]) && (ret >= 0)) { |
---|
4404 | | - adap->flags |= FW_OFLD_CONN; |
---|
| 5107 | + adap->flags |= CXGB4_FW_OFLD_CONN; |
---|
4405 | 5108 | adap->tids.aftid_base = val[0]; |
---|
4406 | 5109 | adap->tids.aftid_end = val[1]; |
---|
4407 | 5110 | } |
---|
.. | .. |
---|
4446 | 5149 | adap->params.filter2_wr_support = (ret == 0 && val[0] != 0); |
---|
4447 | 5150 | } |
---|
4448 | 5151 | |
---|
| 5152 | + /* Check if FW supports returning vin and smt index. |
---|
| 5153 | + * If this is not supported, driver will interpret |
---|
| 5154 | + * these values from viid. |
---|
| 5155 | + */ |
---|
| 5156 | + params[0] = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN); |
---|
| 5157 | + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, |
---|
| 5158 | + 1, params, val); |
---|
| 5159 | + adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0); |
---|
| 5160 | + |
---|
4449 | 5161 | /* |
---|
4450 | 5162 | * Get device capabilities so we can determine what resources we need |
---|
4451 | 5163 | * to manage. |
---|
.. | .. |
---|
4459 | 5171 | if (ret < 0) |
---|
4460 | 5172 | goto bye; |
---|
4461 | 5173 | |
---|
| 5174 | + /* hash filter has some mandatory register settings to be tested and for |
---|
| 5175 | + * that it needs to test whether offload is enabled or not, hence |
---|
| 5176 | + * checking and setting it here. |
---|
| 5177 | + */ |
---|
| 5178 | + if (caps_cmd.ofldcaps) |
---|
| 5179 | + adap->params.offload = 1; |
---|
| 5180 | + |
---|
4462 | 5181 | if (caps_cmd.ofldcaps || |
---|
4463 | | - (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER))) { |
---|
| 5182 | + (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) || |
---|
| 5183 | + (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD))) { |
---|
4464 | 5184 | /* query offload-related parameters */ |
---|
4465 | 5185 | params[0] = FW_PARAM_DEV(NTID); |
---|
4466 | 5186 | params[1] = FW_PARAM_PFVF(SERVER_START); |
---|
.. | .. |
---|
4485 | 5205 | * 2. Server filter: This are special filters which are used |
---|
4486 | 5206 | * to redirect SYN packets to offload queue. |
---|
4487 | 5207 | */ |
---|
4488 | | - if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) { |
---|
| 5208 | + if (adap->flags & CXGB4_FW_OFLD_CONN && !is_bypass(adap)) { |
---|
4489 | 5209 | adap->tids.sftid_base = adap->tids.ftid_base + |
---|
4490 | 5210 | DIV_ROUND_UP(adap->tids.nftids, 3); |
---|
4491 | 5211 | adap->tids.nsftids = adap->tids.nftids - |
---|
.. | .. |
---|
4498 | 5218 | adap->params.ofldq_wr_cred = val[5]; |
---|
4499 | 5219 | |
---|
4500 | 5220 | if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) { |
---|
4501 | | - ret = init_hash_filter(adap); |
---|
4502 | | - if (ret < 0) |
---|
4503 | | - goto bye; |
---|
| 5221 | + init_hash_filter(adap); |
---|
4504 | 5222 | } else { |
---|
4505 | | - adap->params.offload = 1; |
---|
4506 | 5223 | adap->num_ofld_uld += 1; |
---|
| 5224 | + } |
---|
| 5225 | + |
---|
| 5226 | + if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD)) { |
---|
| 5227 | + params[0] = FW_PARAM_PFVF(ETHOFLD_START); |
---|
| 5228 | + params[1] = FW_PARAM_PFVF(ETHOFLD_END); |
---|
| 5229 | + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, |
---|
| 5230 | + params, val); |
---|
| 5231 | + if (!ret) { |
---|
| 5232 | + adap->tids.eotid_base = val[0]; |
---|
| 5233 | + adap->tids.neotids = min_t(u32, MAX_ATIDS, |
---|
| 5234 | + val[1] - val[0] + 1); |
---|
| 5235 | + adap->params.ethofld = 1; |
---|
| 5236 | + } |
---|
4507 | 5237 | } |
---|
4508 | 5238 | } |
---|
4509 | 5239 | if (caps_cmd.rdmacaps) { |
---|
.. | .. |
---|
4594 | 5324 | goto bye; |
---|
4595 | 5325 | adap->vres.iscsi.start = val[0]; |
---|
4596 | 5326 | adap->vres.iscsi.size = val[1] - val[0] + 1; |
---|
| 5327 | + if (is_t6(adap->params.chip)) { |
---|
| 5328 | + params[0] = FW_PARAM_PFVF(PPOD_EDRAM_START); |
---|
| 5329 | + params[1] = FW_PARAM_PFVF(PPOD_EDRAM_END); |
---|
| 5330 | + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, |
---|
| 5331 | + params, val); |
---|
| 5332 | + if (!ret) { |
---|
| 5333 | + adap->vres.ppod_edram.start = val[0]; |
---|
| 5334 | + adap->vres.ppod_edram.size = |
---|
| 5335 | + val[1] - val[0] + 1; |
---|
| 5336 | + |
---|
| 5337 | + dev_info(adap->pdev_dev, |
---|
| 5338 | + "ppod edram start 0x%x end 0x%x size 0x%x\n", |
---|
| 5339 | + val[0], val[1], |
---|
| 5340 | + adap->vres.ppod_edram.size); |
---|
| 5341 | + } |
---|
| 5342 | + } |
---|
4597 | 5343 | /* LIO target and cxgb4i initiaitor */ |
---|
4598 | 5344 | adap->num_ofld_uld += 2; |
---|
4599 | 5345 | } |
---|
.. | .. |
---|
4625 | 5371 | } |
---|
4626 | 5372 | adap->params.crypto = ntohs(caps_cmd.cryptocaps); |
---|
4627 | 5373 | } |
---|
4628 | | -#undef FW_PARAM_PFVF |
---|
4629 | | -#undef FW_PARAM_DEV |
---|
4630 | 5374 | |
---|
4631 | 5375 | /* The MTU/MSS Table is initialized by now, so load their values. If |
---|
4632 | 5376 | * we're initializing the adapter, then we'll make any modifications |
---|
.. | .. |
---|
4664 | 5408 | adap->params.b_wnd); |
---|
4665 | 5409 | } |
---|
4666 | 5410 | t4_init_sge_params(adap); |
---|
4667 | | - adap->flags |= FW_OK; |
---|
| 5411 | + adap->flags |= CXGB4_FW_OK; |
---|
4668 | 5412 | t4_init_tp_params(adap, true); |
---|
4669 | 5413 | return 0; |
---|
4670 | 5414 | |
---|
.. | .. |
---|
4699 | 5443 | goto out; |
---|
4700 | 5444 | |
---|
4701 | 5445 | rtnl_lock(); |
---|
4702 | | - adap->flags &= ~FW_OK; |
---|
| 5446 | + adap->flags &= ~CXGB4_FW_OK; |
---|
4703 | 5447 | notify_ulds(adap, CXGB4_STATE_START_RECOVERY); |
---|
4704 | 5448 | spin_lock(&adap->stats_lock); |
---|
4705 | 5449 | for_each_port(adap, i) { |
---|
.. | .. |
---|
4711 | 5455 | } |
---|
4712 | 5456 | spin_unlock(&adap->stats_lock); |
---|
4713 | 5457 | disable_interrupts(adap); |
---|
4714 | | - if (adap->flags & FULL_INIT_DONE) |
---|
| 5458 | + if (adap->flags & CXGB4_FULL_INIT_DONE) |
---|
4715 | 5459 | cxgb_down(adap); |
---|
4716 | 5460 | rtnl_unlock(); |
---|
4717 | | - if ((adap->flags & DEV_ENABLED)) { |
---|
| 5461 | + if ((adap->flags & CXGB4_DEV_ENABLED)) { |
---|
4718 | 5462 | pci_disable_device(pdev); |
---|
4719 | | - adap->flags &= ~DEV_ENABLED; |
---|
| 5463 | + adap->flags &= ~CXGB4_DEV_ENABLED; |
---|
4720 | 5464 | } |
---|
4721 | 5465 | out: return state == pci_channel_io_perm_failure ? |
---|
4722 | 5466 | PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; |
---|
.. | .. |
---|
4734 | 5478 | return PCI_ERS_RESULT_RECOVERED; |
---|
4735 | 5479 | } |
---|
4736 | 5480 | |
---|
4737 | | - if (!(adap->flags & DEV_ENABLED)) { |
---|
| 5481 | + if (!(adap->flags & CXGB4_DEV_ENABLED)) { |
---|
4738 | 5482 | if (pci_enable_device(pdev)) { |
---|
4739 | 5483 | dev_err(&pdev->dev, "Cannot reenable PCI " |
---|
4740 | 5484 | "device after reset\n"); |
---|
4741 | 5485 | return PCI_ERS_RESULT_DISCONNECT; |
---|
4742 | 5486 | } |
---|
4743 | | - adap->flags |= DEV_ENABLED; |
---|
| 5487 | + adap->flags |= CXGB4_DEV_ENABLED; |
---|
4744 | 5488 | } |
---|
4745 | 5489 | |
---|
4746 | 5490 | pci_set_master(pdev); |
---|
4747 | 5491 | pci_restore_state(pdev); |
---|
4748 | 5492 | pci_save_state(pdev); |
---|
4749 | | - pci_cleanup_aer_uncorrect_error_status(pdev); |
---|
4750 | 5493 | |
---|
4751 | 5494 | if (t4_wait_dev_ready(adap->regs) < 0) |
---|
4752 | 5495 | return PCI_ERS_RESULT_DISCONNECT; |
---|
4753 | 5496 | if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0) |
---|
4754 | 5497 | return PCI_ERS_RESULT_DISCONNECT; |
---|
4755 | | - adap->flags |= FW_OK; |
---|
| 5498 | + adap->flags |= CXGB4_FW_OK; |
---|
4756 | 5499 | if (adap_init1(adap, &c)) |
---|
4757 | 5500 | return PCI_ERS_RESULT_DISCONNECT; |
---|
4758 | 5501 | |
---|
4759 | 5502 | for_each_port(adap, i) { |
---|
4760 | | - struct port_info *p = adap2pinfo(adap, i); |
---|
| 5503 | + struct port_info *pi = adap2pinfo(adap, i); |
---|
| 5504 | + u8 vivld = 0, vin = 0; |
---|
4761 | 5505 | |
---|
4762 | | - ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1, |
---|
4763 | | - NULL, NULL); |
---|
| 5506 | + ret = t4_alloc_vi(adap, adap->mbox, pi->tx_chan, adap->pf, 0, 1, |
---|
| 5507 | + NULL, NULL, &vivld, &vin); |
---|
4764 | 5508 | if (ret < 0) |
---|
4765 | 5509 | return PCI_ERS_RESULT_DISCONNECT; |
---|
4766 | | - p->viid = ret; |
---|
4767 | | - p->xact_addr_filt = -1; |
---|
| 5510 | + pi->viid = ret; |
---|
| 5511 | + pi->xact_addr_filt = -1; |
---|
| 5512 | + /* If fw supports returning the VIN as part of FW_VI_CMD, |
---|
| 5513 | + * save the returned values. |
---|
| 5514 | + */ |
---|
| 5515 | + if (adap->params.viid_smt_extn_support) { |
---|
| 5516 | + pi->vivld = vivld; |
---|
| 5517 | + pi->vin = vin; |
---|
| 5518 | + } else { |
---|
| 5519 | + /* Retrieve the values from VIID */ |
---|
| 5520 | + pi->vivld = FW_VIID_VIVLD_G(pi->viid); |
---|
| 5521 | + pi->vin = FW_VIID_VIN_G(pi->viid); |
---|
| 5522 | + } |
---|
4768 | 5523 | } |
---|
4769 | 5524 | |
---|
4770 | 5525 | t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, |
---|
.. | .. |
---|
4797 | 5552 | rtnl_unlock(); |
---|
4798 | 5553 | } |
---|
4799 | 5554 | |
---|
| 5555 | +static void eeh_reset_prepare(struct pci_dev *pdev) |
---|
| 5556 | +{ |
---|
| 5557 | + struct adapter *adapter = pci_get_drvdata(pdev); |
---|
| 5558 | + int i; |
---|
| 5559 | + |
---|
| 5560 | + if (adapter->pf != 4) |
---|
| 5561 | + return; |
---|
| 5562 | + |
---|
| 5563 | + adapter->flags &= ~CXGB4_FW_OK; |
---|
| 5564 | + |
---|
| 5565 | + notify_ulds(adapter, CXGB4_STATE_DOWN); |
---|
| 5566 | + |
---|
| 5567 | + for_each_port(adapter, i) |
---|
| 5568 | + if (adapter->port[i]->reg_state == NETREG_REGISTERED) |
---|
| 5569 | + cxgb_close(adapter->port[i]); |
---|
| 5570 | + |
---|
| 5571 | + disable_interrupts(adapter); |
---|
| 5572 | + cxgb4_free_mps_ref_entries(adapter); |
---|
| 5573 | + |
---|
| 5574 | + adap_free_hma_mem(adapter); |
---|
| 5575 | + |
---|
| 5576 | + if (adapter->flags & CXGB4_FULL_INIT_DONE) |
---|
| 5577 | + cxgb_down(adapter); |
---|
| 5578 | +} |
---|
| 5579 | + |
---|
| 5580 | +static void eeh_reset_done(struct pci_dev *pdev) |
---|
| 5581 | +{ |
---|
| 5582 | + struct adapter *adapter = pci_get_drvdata(pdev); |
---|
| 5583 | + int err, i; |
---|
| 5584 | + |
---|
| 5585 | + if (adapter->pf != 4) |
---|
| 5586 | + return; |
---|
| 5587 | + |
---|
| 5588 | + err = t4_wait_dev_ready(adapter->regs); |
---|
| 5589 | + if (err < 0) { |
---|
| 5590 | + dev_err(adapter->pdev_dev, |
---|
| 5591 | + "Device not ready, err %d", err); |
---|
| 5592 | + return; |
---|
| 5593 | + } |
---|
| 5594 | + |
---|
| 5595 | + setup_memwin(adapter); |
---|
| 5596 | + |
---|
| 5597 | + err = adap_init0(adapter, 1); |
---|
| 5598 | + if (err) { |
---|
| 5599 | + dev_err(adapter->pdev_dev, |
---|
| 5600 | + "Adapter init failed, err %d", err); |
---|
| 5601 | + return; |
---|
| 5602 | + } |
---|
| 5603 | + |
---|
| 5604 | + setup_memwin_rdma(adapter); |
---|
| 5605 | + |
---|
| 5606 | + if (adapter->flags & CXGB4_FW_OK) { |
---|
| 5607 | + err = t4_port_init(adapter, adapter->pf, adapter->pf, 0); |
---|
| 5608 | + if (err) { |
---|
| 5609 | + dev_err(adapter->pdev_dev, |
---|
| 5610 | + "Port init failed, err %d", err); |
---|
| 5611 | + return; |
---|
| 5612 | + } |
---|
| 5613 | + } |
---|
| 5614 | + |
---|
| 5615 | + err = cfg_queues(adapter); |
---|
| 5616 | + if (err) { |
---|
| 5617 | + dev_err(adapter->pdev_dev, |
---|
| 5618 | + "Config queues failed, err %d", err); |
---|
| 5619 | + return; |
---|
| 5620 | + } |
---|
| 5621 | + |
---|
| 5622 | + cxgb4_init_mps_ref_entries(adapter); |
---|
| 5623 | + |
---|
| 5624 | + err = setup_fw_sge_queues(adapter); |
---|
| 5625 | + if (err) { |
---|
| 5626 | + dev_err(adapter->pdev_dev, |
---|
| 5627 | + "FW sge queue allocation failed, err %d", err); |
---|
| 5628 | + return; |
---|
| 5629 | + } |
---|
| 5630 | + |
---|
| 5631 | + for_each_port(adapter, i) |
---|
| 5632 | + if (adapter->port[i]->reg_state == NETREG_REGISTERED) |
---|
| 5633 | + cxgb_open(adapter->port[i]); |
---|
| 5634 | +} |
---|
| 5635 | + |
---|
4800 | 5636 | static const struct pci_error_handlers cxgb4_eeh = { |
---|
4801 | 5637 | .error_detected = eeh_err_detected, |
---|
4802 | 5638 | .slot_reset = eeh_slot_reset, |
---|
4803 | 5639 | .resume = eeh_resume, |
---|
| 5640 | + .reset_prepare = eeh_reset_prepare, |
---|
| 5641 | + .reset_done = eeh_reset_done, |
---|
4804 | 5642 | }; |
---|
4805 | 5643 | |
---|
4806 | 5644 | /* Return true if the Link Configuration supports "High Speeds" (those greater |
---|
.. | .. |
---|
4817 | 5655 | return high_speeds != 0; |
---|
4818 | 5656 | } |
---|
4819 | 5657 | |
---|
4820 | | -/* |
---|
4821 | | - * Perform default configuration of DMA queues depending on the number and type |
---|
| 5658 | +/* Perform default configuration of DMA queues depending on the number and type |
---|
4822 | 5659 | * of ports we found and the number of available CPUs. Most settings can be |
---|
4823 | 5660 | * modified by the admin prior to actual use. |
---|
4824 | 5661 | */ |
---|
4825 | 5662 | static int cfg_queues(struct adapter *adap) |
---|
4826 | 5663 | { |
---|
| 5664 | + u32 avail_qsets, avail_eth_qsets, avail_uld_qsets; |
---|
| 5665 | + u32 ncpus = num_online_cpus(); |
---|
| 5666 | + u32 niqflint, neq, num_ulds; |
---|
4827 | 5667 | struct sge *s = &adap->sge; |
---|
4828 | | - int i, n10g = 0, qidx = 0; |
---|
4829 | | - int niqflint, neq, avail_eth_qsets; |
---|
4830 | | - int max_eth_qsets = 32; |
---|
4831 | | -#ifndef CONFIG_CHELSIO_T4_DCB |
---|
4832 | | - int q10g = 0; |
---|
4833 | | -#endif |
---|
| 5668 | + u32 i, n10g = 0, qidx = 0; |
---|
| 5669 | + u32 q10g = 0, q1g; |
---|
4834 | 5670 | |
---|
4835 | | - /* Reduce memory usage in kdump environment, disable all offload. |
---|
4836 | | - */ |
---|
| 5671 | + /* Reduce memory usage in kdump environment, disable all offload. */ |
---|
4837 | 5672 | if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) { |
---|
4838 | 5673 | adap->params.offload = 0; |
---|
4839 | 5674 | adap->params.crypto = 0; |
---|
| 5675 | + adap->params.ethofld = 0; |
---|
4840 | 5676 | } |
---|
4841 | 5677 | |
---|
4842 | 5678 | /* Calculate the number of Ethernet Queue Sets available based on |
---|
.. | .. |
---|
4852 | 5688 | * at all is problematic ... |
---|
4853 | 5689 | */ |
---|
4854 | 5690 | niqflint = adap->params.pfres.niqflint - 1; |
---|
4855 | | - if (!(adap->flags & USING_MSIX)) |
---|
| 5691 | + if (!(adap->flags & CXGB4_USING_MSIX)) |
---|
4856 | 5692 | niqflint--; |
---|
4857 | 5693 | neq = adap->params.pfres.neq / 2; |
---|
4858 | | - avail_eth_qsets = min(niqflint, neq); |
---|
| 5694 | + avail_qsets = min(niqflint, neq); |
---|
4859 | 5695 | |
---|
4860 | | - if (avail_eth_qsets > max_eth_qsets) |
---|
4861 | | - avail_eth_qsets = max_eth_qsets; |
---|
4862 | | - |
---|
4863 | | - if (avail_eth_qsets < adap->params.nports) { |
---|
| 5696 | + if (avail_qsets < adap->params.nports) { |
---|
4864 | 5697 | dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n", |
---|
4865 | | - avail_eth_qsets, adap->params.nports); |
---|
| 5698 | + avail_qsets, adap->params.nports); |
---|
4866 | 5699 | return -ENOMEM; |
---|
4867 | 5700 | } |
---|
4868 | 5701 | |
---|
.. | .. |
---|
4870 | 5703 | for_each_port(adap, i) |
---|
4871 | 5704 | n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); |
---|
4872 | 5705 | |
---|
| 5706 | + avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS); |
---|
| 5707 | + |
---|
| 5708 | + /* We default to 1 queue per non-10G port and up to # of cores queues |
---|
| 5709 | + * per 10G port. |
---|
| 5710 | + */ |
---|
| 5711 | + if (n10g) |
---|
| 5712 | + q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g; |
---|
| 5713 | + |
---|
4873 | 5714 | #ifdef CONFIG_CHELSIO_T4_DCB |
---|
4874 | 5715 | /* For Data Center Bridging support we need to be able to support up |
---|
4875 | 5716 | * to 8 Traffic Priorities; each of which will be assigned to its |
---|
4876 | 5717 | * own TX Queue in order to prevent Head-Of-Line Blocking. |
---|
4877 | 5718 | */ |
---|
| 5719 | + q1g = 8; |
---|
4878 | 5720 | if (adap->params.nports * 8 > avail_eth_qsets) { |
---|
4879 | 5721 | dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n", |
---|
4880 | 5722 | avail_eth_qsets, adap->params.nports * 8); |
---|
4881 | 5723 | return -ENOMEM; |
---|
4882 | 5724 | } |
---|
4883 | 5725 | |
---|
4884 | | - for_each_port(adap, i) { |
---|
4885 | | - struct port_info *pi = adap2pinfo(adap, i); |
---|
| 5726 | + if (adap->params.nports * ncpus < avail_eth_qsets) |
---|
| 5727 | + q10g = max(8U, ncpus); |
---|
| 5728 | + else |
---|
| 5729 | + q10g = max(8U, q10g); |
---|
4886 | 5730 | |
---|
4887 | | - pi->first_qset = qidx; |
---|
4888 | | - pi->nqsets = is_kdump_kernel() ? 1 : 8; |
---|
4889 | | - qidx += pi->nqsets; |
---|
4890 | | - } |
---|
| 5731 | + while ((q10g * n10g) > |
---|
| 5732 | + (avail_eth_qsets - (adap->params.nports - n10g) * q1g)) |
---|
| 5733 | + q10g--; |
---|
| 5734 | + |
---|
4891 | 5735 | #else /* !CONFIG_CHELSIO_T4_DCB */ |
---|
4892 | | - /* |
---|
4893 | | - * We default to 1 queue per non-10G port and up to # of cores queues |
---|
4894 | | - * per 10G port. |
---|
4895 | | - */ |
---|
4896 | | - if (n10g) |
---|
4897 | | - q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g; |
---|
4898 | | - if (q10g > netif_get_num_default_rss_queues()) |
---|
4899 | | - q10g = netif_get_num_default_rss_queues(); |
---|
4900 | | - |
---|
4901 | | - if (is_kdump_kernel()) |
---|
| 5736 | + q1g = 1; |
---|
| 5737 | + q10g = min(q10g, ncpus); |
---|
| 5738 | +#endif /* !CONFIG_CHELSIO_T4_DCB */ |
---|
| 5739 | + if (is_kdump_kernel()) { |
---|
4902 | 5740 | q10g = 1; |
---|
| 5741 | + q1g = 1; |
---|
| 5742 | + } |
---|
4903 | 5743 | |
---|
4904 | 5744 | for_each_port(adap, i) { |
---|
4905 | 5745 | struct port_info *pi = adap2pinfo(adap, i); |
---|
4906 | 5746 | |
---|
4907 | 5747 | pi->first_qset = qidx; |
---|
4908 | | - pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1; |
---|
| 5748 | + pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : q1g; |
---|
4909 | 5749 | qidx += pi->nqsets; |
---|
4910 | 5750 | } |
---|
4911 | | -#endif /* !CONFIG_CHELSIO_T4_DCB */ |
---|
4912 | 5751 | |
---|
4913 | 5752 | s->ethqsets = qidx; |
---|
4914 | 5753 | s->max_ethqsets = qidx; /* MSI-X may lower it later */ |
---|
| 5754 | + avail_qsets -= qidx; |
---|
4915 | 5755 | |
---|
4916 | 5756 | if (is_uld(adap)) { |
---|
4917 | | - /* |
---|
4918 | | - * For offload we use 1 queue/channel if all ports are up to 1G, |
---|
| 5757 | + /* For offload we use 1 queue/channel if all ports are up to 1G, |
---|
4919 | 5758 | * otherwise we divide all available queues amongst the channels |
---|
4920 | 5759 | * capped by the number of available cores. |
---|
4921 | 5760 | */ |
---|
4922 | | - if (n10g) { |
---|
4923 | | - i = min_t(int, MAX_OFLD_QSETS, num_online_cpus()); |
---|
4924 | | - s->ofldqsets = roundup(i, adap->params.nports); |
---|
4925 | | - } else { |
---|
| 5761 | + num_ulds = adap->num_uld + adap->num_ofld_uld; |
---|
| 5762 | + i = min_t(u32, MAX_OFLD_QSETS, ncpus); |
---|
| 5763 | + avail_uld_qsets = roundup(i, adap->params.nports); |
---|
| 5764 | + if (avail_qsets < num_ulds * adap->params.nports) { |
---|
| 5765 | + adap->params.offload = 0; |
---|
| 5766 | + adap->params.crypto = 0; |
---|
| 5767 | + s->ofldqsets = 0; |
---|
| 5768 | + } else if (avail_qsets < num_ulds * avail_uld_qsets || !n10g) { |
---|
4926 | 5769 | s->ofldqsets = adap->params.nports; |
---|
| 5770 | + } else { |
---|
| 5771 | + s->ofldqsets = avail_uld_qsets; |
---|
4927 | 5772 | } |
---|
| 5773 | + |
---|
| 5774 | + avail_qsets -= num_ulds * s->ofldqsets; |
---|
4928 | 5775 | } |
---|
| 5776 | + |
---|
| 5777 | + /* ETHOFLD Queues used for QoS offload should follow same |
---|
| 5778 | + * allocation scheme as normal Ethernet Queues. |
---|
| 5779 | + */ |
---|
| 5780 | + if (is_ethofld(adap)) { |
---|
| 5781 | + if (avail_qsets < s->max_ethqsets) { |
---|
| 5782 | + adap->params.ethofld = 0; |
---|
| 5783 | + s->eoqsets = 0; |
---|
| 5784 | + } else { |
---|
| 5785 | + s->eoqsets = s->max_ethqsets; |
---|
| 5786 | + } |
---|
| 5787 | + avail_qsets -= s->eoqsets; |
---|
| 5788 | + } |
---|
| 5789 | + |
---|
| 5790 | + /* Mirror queues must follow same scheme as normal Ethernet |
---|
| 5791 | + * Queues, when there are enough queues available. Otherwise, |
---|
| 5792 | + * allocate at least 1 queue per port. If even 1 queue is not |
---|
| 5793 | + * available, then disable mirror queues support. |
---|
| 5794 | + */ |
---|
| 5795 | + if (avail_qsets >= s->max_ethqsets) |
---|
| 5796 | + s->mirrorqsets = s->max_ethqsets; |
---|
| 5797 | + else if (avail_qsets >= adap->params.nports) |
---|
| 5798 | + s->mirrorqsets = adap->params.nports; |
---|
| 5799 | + else |
---|
| 5800 | + s->mirrorqsets = 0; |
---|
| 5801 | + avail_qsets -= s->mirrorqsets; |
---|
4929 | 5802 | |
---|
4930 | 5803 | for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { |
---|
4931 | 5804 | struct sge_eth_rxq *r = &s->ethrxq[i]; |
---|
.. | .. |
---|
4977 | 5850 | } |
---|
4978 | 5851 | } |
---|
4979 | 5852 | |
---|
4980 | | -static int get_msix_info(struct adapter *adap) |
---|
| 5853 | +static int alloc_msix_info(struct adapter *adap, u32 num_vec) |
---|
4981 | 5854 | { |
---|
4982 | | - struct uld_msix_info *msix_info; |
---|
4983 | | - unsigned int max_ingq = 0; |
---|
| 5855 | + struct msix_info *msix_info; |
---|
4984 | 5856 | |
---|
4985 | | - if (is_offload(adap)) |
---|
4986 | | - max_ingq += MAX_OFLD_QSETS * adap->num_ofld_uld; |
---|
4987 | | - if (is_pci_uld(adap)) |
---|
4988 | | - max_ingq += MAX_OFLD_QSETS * adap->num_uld; |
---|
4989 | | - |
---|
4990 | | - if (!max_ingq) |
---|
4991 | | - goto out; |
---|
4992 | | - |
---|
4993 | | - msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL); |
---|
| 5857 | + msix_info = kcalloc(num_vec, sizeof(*msix_info), GFP_KERNEL); |
---|
4994 | 5858 | if (!msix_info) |
---|
4995 | 5859 | return -ENOMEM; |
---|
4996 | 5860 | |
---|
4997 | | - adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq), |
---|
4998 | | - sizeof(long), GFP_KERNEL); |
---|
4999 | | - if (!adap->msix_bmap_ulds.msix_bmap) { |
---|
| 5861 | + adap->msix_bmap.msix_bmap = kcalloc(BITS_TO_LONGS(num_vec), |
---|
| 5862 | + sizeof(long), GFP_KERNEL); |
---|
| 5863 | + if (!adap->msix_bmap.msix_bmap) { |
---|
5000 | 5864 | kfree(msix_info); |
---|
5001 | 5865 | return -ENOMEM; |
---|
5002 | 5866 | } |
---|
5003 | | - spin_lock_init(&adap->msix_bmap_ulds.lock); |
---|
5004 | | - adap->msix_info_ulds = msix_info; |
---|
5005 | | -out: |
---|
| 5867 | + |
---|
| 5868 | + spin_lock_init(&adap->msix_bmap.lock); |
---|
| 5869 | + adap->msix_bmap.mapsize = num_vec; |
---|
| 5870 | + |
---|
| 5871 | + adap->msix_info = msix_info; |
---|
5006 | 5872 | return 0; |
---|
5007 | 5873 | } |
---|
5008 | 5874 | |
---|
5009 | 5875 | static void free_msix_info(struct adapter *adap) |
---|
5010 | 5876 | { |
---|
5011 | | - if (!(adap->num_uld && adap->num_ofld_uld)) |
---|
5012 | | - return; |
---|
| 5877 | + kfree(adap->msix_bmap.msix_bmap); |
---|
| 5878 | + kfree(adap->msix_info); |
---|
| 5879 | +} |
---|
5013 | 5880 | |
---|
5014 | | - kfree(adap->msix_info_ulds); |
---|
5015 | | - kfree(adap->msix_bmap_ulds.msix_bmap); |
---|
| 5881 | +int cxgb4_get_msix_idx_from_bmap(struct adapter *adap) |
---|
| 5882 | +{ |
---|
| 5883 | + struct msix_bmap *bmap = &adap->msix_bmap; |
---|
| 5884 | + unsigned int msix_idx; |
---|
| 5885 | + unsigned long flags; |
---|
| 5886 | + |
---|
| 5887 | + spin_lock_irqsave(&bmap->lock, flags); |
---|
| 5888 | + msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize); |
---|
| 5889 | + if (msix_idx < bmap->mapsize) { |
---|
| 5890 | + __set_bit(msix_idx, bmap->msix_bmap); |
---|
| 5891 | + } else { |
---|
| 5892 | + spin_unlock_irqrestore(&bmap->lock, flags); |
---|
| 5893 | + return -ENOSPC; |
---|
| 5894 | + } |
---|
| 5895 | + |
---|
| 5896 | + spin_unlock_irqrestore(&bmap->lock, flags); |
---|
| 5897 | + return msix_idx; |
---|
| 5898 | +} |
---|
| 5899 | + |
---|
| 5900 | +void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, |
---|
| 5901 | + unsigned int msix_idx) |
---|
| 5902 | +{ |
---|
| 5903 | + struct msix_bmap *bmap = &adap->msix_bmap; |
---|
| 5904 | + unsigned long flags; |
---|
| 5905 | + |
---|
| 5906 | + spin_lock_irqsave(&bmap->lock, flags); |
---|
| 5907 | + __clear_bit(msix_idx, bmap->msix_bmap); |
---|
| 5908 | + spin_unlock_irqrestore(&bmap->lock, flags); |
---|
5016 | 5909 | } |
---|
5017 | 5910 | |
---|
5018 | 5911 | /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */ |
---|
.. | .. |
---|
5020 | 5913 | |
---|
5021 | 5914 | static int enable_msix(struct adapter *adap) |
---|
5022 | 5915 | { |
---|
5023 | | - int ofld_need = 0, uld_need = 0; |
---|
5024 | | - int i, j, want, need, allocated; |
---|
| 5916 | + u32 eth_need, uld_need = 0, ethofld_need = 0, mirror_need = 0; |
---|
| 5917 | + u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0, mirrorqsets = 0; |
---|
| 5918 | + u8 num_uld = 0, nchan = adap->params.nports; |
---|
| 5919 | + u32 i, want, need, num_vec; |
---|
5025 | 5920 | struct sge *s = &adap->sge; |
---|
5026 | | - unsigned int nchan = adap->params.nports; |
---|
5027 | 5921 | struct msix_entry *entries; |
---|
5028 | | - int max_ingq = MAX_INGQ; |
---|
| 5922 | + struct port_info *pi; |
---|
| 5923 | + int allocated, ret; |
---|
5029 | 5924 | |
---|
5030 | | - if (is_pci_uld(adap)) |
---|
5031 | | - max_ingq += (MAX_OFLD_QSETS * adap->num_uld); |
---|
5032 | | - if (is_offload(adap)) |
---|
5033 | | - max_ingq += (MAX_OFLD_QSETS * adap->num_ofld_uld); |
---|
5034 | | - entries = kmalloc_array(max_ingq + 1, sizeof(*entries), |
---|
5035 | | - GFP_KERNEL); |
---|
5036 | | - if (!entries) |
---|
5037 | | - return -ENOMEM; |
---|
5038 | | - |
---|
5039 | | - /* map for msix */ |
---|
5040 | | - if (get_msix_info(adap)) { |
---|
5041 | | - adap->params.offload = 0; |
---|
5042 | | - adap->params.crypto = 0; |
---|
5043 | | - } |
---|
5044 | | - |
---|
5045 | | - for (i = 0; i < max_ingq + 1; ++i) |
---|
5046 | | - entries[i].entry = i; |
---|
5047 | | - |
---|
5048 | | - want = s->max_ethqsets + EXTRA_VECS; |
---|
5049 | | - if (is_offload(adap)) { |
---|
5050 | | - want += adap->num_ofld_uld * s->ofldqsets; |
---|
5051 | | - ofld_need = adap->num_ofld_uld * nchan; |
---|
5052 | | - } |
---|
5053 | | - if (is_pci_uld(adap)) { |
---|
5054 | | - want += adap->num_uld * s->ofldqsets; |
---|
5055 | | - uld_need = adap->num_uld * nchan; |
---|
5056 | | - } |
---|
| 5925 | + want = s->max_ethqsets; |
---|
5057 | 5926 | #ifdef CONFIG_CHELSIO_T4_DCB |
---|
5058 | 5927 | /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for |
---|
5059 | 5928 | * each port. |
---|
5060 | 5929 | */ |
---|
5061 | | - need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need; |
---|
| 5930 | + need = 8 * nchan; |
---|
5062 | 5931 | #else |
---|
5063 | | - need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need; |
---|
| 5932 | + need = nchan; |
---|
5064 | 5933 | #endif |
---|
| 5934 | + eth_need = need; |
---|
| 5935 | + if (is_uld(adap)) { |
---|
| 5936 | + num_uld = adap->num_ofld_uld + adap->num_uld; |
---|
| 5937 | + want += num_uld * s->ofldqsets; |
---|
| 5938 | + uld_need = num_uld * nchan; |
---|
| 5939 | + need += uld_need; |
---|
| 5940 | + } |
---|
| 5941 | + |
---|
| 5942 | + if (is_ethofld(adap)) { |
---|
| 5943 | + want += s->eoqsets; |
---|
| 5944 | + ethofld_need = eth_need; |
---|
| 5945 | + need += ethofld_need; |
---|
| 5946 | + } |
---|
| 5947 | + |
---|
| 5948 | + if (s->mirrorqsets) { |
---|
| 5949 | + want += s->mirrorqsets; |
---|
| 5950 | + mirror_need = nchan; |
---|
| 5951 | + need += mirror_need; |
---|
| 5952 | + } |
---|
| 5953 | + |
---|
| 5954 | + want += EXTRA_VECS; |
---|
| 5955 | + need += EXTRA_VECS; |
---|
| 5956 | + |
---|
| 5957 | + entries = kmalloc_array(want, sizeof(*entries), GFP_KERNEL); |
---|
| 5958 | + if (!entries) |
---|
| 5959 | + return -ENOMEM; |
---|
| 5960 | + |
---|
| 5961 | + for (i = 0; i < want; i++) |
---|
| 5962 | + entries[i].entry = i; |
---|
| 5963 | + |
---|
5065 | 5964 | allocated = pci_enable_msix_range(adap->pdev, entries, need, want); |
---|
5066 | 5965 | if (allocated < 0) { |
---|
5067 | | - dev_info(adap->pdev_dev, "not enough MSI-X vectors left," |
---|
5068 | | - " not using MSI-X\n"); |
---|
5069 | | - kfree(entries); |
---|
5070 | | - return allocated; |
---|
5071 | | - } |
---|
5072 | | - |
---|
5073 | | - /* Distribute available vectors to the various queue groups. |
---|
5074 | | - * Every group gets its minimum requirement and NIC gets top |
---|
5075 | | - * priority for leftovers. |
---|
5076 | | - */ |
---|
5077 | | - i = allocated - EXTRA_VECS - ofld_need - uld_need; |
---|
5078 | | - if (i < s->max_ethqsets) { |
---|
5079 | | - s->max_ethqsets = i; |
---|
5080 | | - if (i < s->ethqsets) |
---|
5081 | | - reduce_ethqs(adap, i); |
---|
5082 | | - } |
---|
5083 | | - if (is_uld(adap)) { |
---|
5084 | | - if (allocated < want) |
---|
5085 | | - s->nqs_per_uld = nchan; |
---|
5086 | | - else |
---|
5087 | | - s->nqs_per_uld = s->ofldqsets; |
---|
5088 | | - } |
---|
5089 | | - |
---|
5090 | | - for (i = 0; i < (s->max_ethqsets + EXTRA_VECS); ++i) |
---|
5091 | | - adap->msix_info[i].vec = entries[i].vector; |
---|
5092 | | - if (is_uld(adap)) { |
---|
5093 | | - for (j = 0 ; i < allocated; ++i, j++) { |
---|
5094 | | - adap->msix_info_ulds[j].vec = entries[i].vector; |
---|
5095 | | - adap->msix_info_ulds[j].idx = i; |
---|
| 5966 | + /* Disable offload and attempt to get vectors for NIC |
---|
| 5967 | + * only mode. |
---|
| 5968 | + */ |
---|
| 5969 | + want = s->max_ethqsets + EXTRA_VECS; |
---|
| 5970 | + need = eth_need + EXTRA_VECS; |
---|
| 5971 | + allocated = pci_enable_msix_range(adap->pdev, entries, |
---|
| 5972 | + need, want); |
---|
| 5973 | + if (allocated < 0) { |
---|
| 5974 | + dev_info(adap->pdev_dev, |
---|
| 5975 | + "Disabling MSI-X due to insufficient MSI-X vectors\n"); |
---|
| 5976 | + ret = allocated; |
---|
| 5977 | + goto out_free; |
---|
5096 | 5978 | } |
---|
5097 | | - adap->msix_bmap_ulds.mapsize = j; |
---|
| 5979 | + |
---|
| 5980 | + dev_info(adap->pdev_dev, |
---|
| 5981 | + "Disabling offload due to insufficient MSI-X vectors\n"); |
---|
| 5982 | + adap->params.offload = 0; |
---|
| 5983 | + adap->params.crypto = 0; |
---|
| 5984 | + adap->params.ethofld = 0; |
---|
| 5985 | + s->ofldqsets = 0; |
---|
| 5986 | + s->eoqsets = 0; |
---|
| 5987 | + s->mirrorqsets = 0; |
---|
| 5988 | + uld_need = 0; |
---|
| 5989 | + ethofld_need = 0; |
---|
| 5990 | + mirror_need = 0; |
---|
5098 | 5991 | } |
---|
5099 | | - dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, " |
---|
5100 | | - "nic %d per uld %d\n", |
---|
5101 | | - allocated, s->max_ethqsets, s->nqs_per_uld); |
---|
| 5992 | + |
---|
| 5993 | + num_vec = allocated; |
---|
| 5994 | + if (num_vec < want) { |
---|
| 5995 | + /* Distribute available vectors to the various queue groups. |
---|
| 5996 | + * Every group gets its minimum requirement and NIC gets top |
---|
| 5997 | + * priority for leftovers. |
---|
| 5998 | + */ |
---|
| 5999 | + ethqsets = eth_need; |
---|
| 6000 | + if (is_uld(adap)) |
---|
| 6001 | + ofldqsets = nchan; |
---|
| 6002 | + if (is_ethofld(adap)) |
---|
| 6003 | + eoqsets = ethofld_need; |
---|
| 6004 | + if (s->mirrorqsets) |
---|
| 6005 | + mirrorqsets = mirror_need; |
---|
| 6006 | + |
---|
| 6007 | + num_vec -= need; |
---|
| 6008 | + while (num_vec) { |
---|
| 6009 | + if (num_vec < eth_need + ethofld_need || |
---|
| 6010 | + ethqsets > s->max_ethqsets) |
---|
| 6011 | + break; |
---|
| 6012 | + |
---|
| 6013 | + for_each_port(adap, i) { |
---|
| 6014 | + pi = adap2pinfo(adap, i); |
---|
| 6015 | + if (pi->nqsets < 2) |
---|
| 6016 | + continue; |
---|
| 6017 | + |
---|
| 6018 | + ethqsets++; |
---|
| 6019 | + num_vec--; |
---|
| 6020 | + if (ethofld_need) { |
---|
| 6021 | + eoqsets++; |
---|
| 6022 | + num_vec--; |
---|
| 6023 | + } |
---|
| 6024 | + } |
---|
| 6025 | + } |
---|
| 6026 | + |
---|
| 6027 | + if (is_uld(adap)) { |
---|
| 6028 | + while (num_vec) { |
---|
| 6029 | + if (num_vec < uld_need || |
---|
| 6030 | + ofldqsets > s->ofldqsets) |
---|
| 6031 | + break; |
---|
| 6032 | + |
---|
| 6033 | + ofldqsets++; |
---|
| 6034 | + num_vec -= uld_need; |
---|
| 6035 | + } |
---|
| 6036 | + } |
---|
| 6037 | + |
---|
| 6038 | + if (s->mirrorqsets) { |
---|
| 6039 | + while (num_vec) { |
---|
| 6040 | + if (num_vec < mirror_need || |
---|
| 6041 | + mirrorqsets > s->mirrorqsets) |
---|
| 6042 | + break; |
---|
| 6043 | + |
---|
| 6044 | + mirrorqsets++; |
---|
| 6045 | + num_vec -= mirror_need; |
---|
| 6046 | + } |
---|
| 6047 | + } |
---|
| 6048 | + } else { |
---|
| 6049 | + ethqsets = s->max_ethqsets; |
---|
| 6050 | + if (is_uld(adap)) |
---|
| 6051 | + ofldqsets = s->ofldqsets; |
---|
| 6052 | + if (is_ethofld(adap)) |
---|
| 6053 | + eoqsets = s->eoqsets; |
---|
| 6054 | + if (s->mirrorqsets) |
---|
| 6055 | + mirrorqsets = s->mirrorqsets; |
---|
| 6056 | + } |
---|
| 6057 | + |
---|
| 6058 | + if (ethqsets < s->max_ethqsets) { |
---|
| 6059 | + s->max_ethqsets = ethqsets; |
---|
| 6060 | + reduce_ethqs(adap, ethqsets); |
---|
| 6061 | + } |
---|
| 6062 | + |
---|
| 6063 | + if (is_uld(adap)) { |
---|
| 6064 | + s->ofldqsets = ofldqsets; |
---|
| 6065 | + s->nqs_per_uld = s->ofldqsets; |
---|
| 6066 | + } |
---|
| 6067 | + |
---|
| 6068 | + if (is_ethofld(adap)) |
---|
| 6069 | + s->eoqsets = eoqsets; |
---|
| 6070 | + |
---|
| 6071 | + if (s->mirrorqsets) { |
---|
| 6072 | + s->mirrorqsets = mirrorqsets; |
---|
| 6073 | + for_each_port(adap, i) { |
---|
| 6074 | + pi = adap2pinfo(adap, i); |
---|
| 6075 | + pi->nmirrorqsets = s->mirrorqsets / nchan; |
---|
| 6076 | + mutex_init(&pi->vi_mirror_mutex); |
---|
| 6077 | + } |
---|
| 6078 | + } |
---|
| 6079 | + |
---|
| 6080 | + /* map for msix */ |
---|
| 6081 | + ret = alloc_msix_info(adap, allocated); |
---|
| 6082 | + if (ret) |
---|
| 6083 | + goto out_disable_msix; |
---|
| 6084 | + |
---|
| 6085 | + for (i = 0; i < allocated; i++) { |
---|
| 6086 | + adap->msix_info[i].vec = entries[i].vector; |
---|
| 6087 | + adap->msix_info[i].idx = i; |
---|
| 6088 | + } |
---|
| 6089 | + |
---|
| 6090 | + dev_info(adap->pdev_dev, |
---|
| 6091 | + "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d mirrorqsets %d\n", |
---|
| 6092 | + allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld, |
---|
| 6093 | + s->mirrorqsets); |
---|
5102 | 6094 | |
---|
5103 | 6095 | kfree(entries); |
---|
5104 | 6096 | return 0; |
---|
| 6097 | + |
---|
| 6098 | +out_disable_msix: |
---|
| 6099 | + pci_disable_msix(adap->pdev); |
---|
| 6100 | + |
---|
| 6101 | +out_free: |
---|
| 6102 | + kfree(entries); |
---|
| 6103 | + return ret; |
---|
5105 | 6104 | } |
---|
5106 | 6105 | |
---|
5107 | 6106 | #undef EXTRA_VECS |
---|
.. | .. |
---|
5134 | 6133 | /* Software/Hardware configuration */ |
---|
5135 | 6134 | dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n", |
---|
5136 | 6135 | is_offload(adapter) ? "R" : "", |
---|
5137 | | - ((adapter->flags & USING_MSIX) ? "MSI-X" : |
---|
5138 | | - (adapter->flags & USING_MSI) ? "MSI" : ""), |
---|
| 6136 | + ((adapter->flags & CXGB4_USING_MSIX) ? "MSI-X" : |
---|
| 6137 | + (adapter->flags & CXGB4_USING_MSI) ? "MSI" : ""), |
---|
5139 | 6138 | is_offload(adapter) ? "Offload" : "non-Offload"); |
---|
5140 | 6139 | } |
---|
5141 | 6140 | |
---|
.. | .. |
---|
5183 | 6182 | { |
---|
5184 | 6183 | unsigned int i; |
---|
5185 | 6184 | |
---|
5186 | | - kvfree(adapter->mps_encap); |
---|
5187 | 6185 | kvfree(adapter->smt); |
---|
5188 | 6186 | kvfree(adapter->l2t); |
---|
5189 | 6187 | kvfree(adapter->srq); |
---|
5190 | 6188 | t4_cleanup_sched(adapter); |
---|
5191 | 6189 | kvfree(adapter->tids.tid_tab); |
---|
| 6190 | + cxgb4_cleanup_tc_matchall(adapter); |
---|
| 6191 | + cxgb4_cleanup_tc_mqprio(adapter); |
---|
5192 | 6192 | cxgb4_cleanup_tc_flower(adapter); |
---|
5193 | 6193 | cxgb4_cleanup_tc_u32(adapter); |
---|
| 6194 | + cxgb4_cleanup_ethtool_filters(adapter); |
---|
5194 | 6195 | kfree(adapter->sge.egr_map); |
---|
5195 | 6196 | kfree(adapter->sge.ingr_map); |
---|
5196 | 6197 | kfree(adapter->sge.starving_fl); |
---|
.. | .. |
---|
5210 | 6211 | kfree(adap2pinfo(adapter, i)->rss); |
---|
5211 | 6212 | free_netdev(adapter->port[i]); |
---|
5212 | 6213 | } |
---|
5213 | | - if (adapter->flags & FW_OK) |
---|
| 6214 | + if (adapter->flags & CXGB4_FW_OK) |
---|
5214 | 6215 | t4_fw_bye(adapter, adapter->pf); |
---|
5215 | 6216 | } |
---|
5216 | 6217 | |
---|
5217 | | -#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) |
---|
| 6218 | +#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | \ |
---|
| 6219 | + NETIF_F_GSO_UDP_L4) |
---|
5218 | 6220 | #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \ |
---|
5219 | | - NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) |
---|
| 6221 | + NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) |
---|
5220 | 6222 | #define SEGMENT_SIZE 128 |
---|
5221 | 6223 | |
---|
5222 | 6224 | static int t4_get_chip_type(struct adapter *adap, int ver) |
---|
.. | .. |
---|
5309 | 6311 | char name[IFNAMSIZ]; |
---|
5310 | 6312 | u32 devcap2; |
---|
5311 | 6313 | u16 flags; |
---|
5312 | | - int pos; |
---|
5313 | 6314 | |
---|
5314 | 6315 | /* If we want to instantiate Virtual Functions, then our |
---|
5315 | 6316 | * parent bridge's PCI-E needs to support Alternative Routing |
---|
.. | .. |
---|
5317 | 6318 | * and above. |
---|
5318 | 6319 | */ |
---|
5319 | 6320 | pbridge = pdev->bus->self; |
---|
5320 | | - pos = pci_find_capability(pbridge, PCI_CAP_ID_EXP); |
---|
5321 | | - pci_read_config_word(pbridge, pos + PCI_EXP_FLAGS, &flags); |
---|
5322 | | - pci_read_config_dword(pbridge, pos + PCI_EXP_DEVCAP2, &devcap2); |
---|
| 6321 | + pcie_capability_read_word(pbridge, PCI_EXP_FLAGS, &flags); |
---|
| 6322 | + pcie_capability_read_dword(pbridge, PCI_EXP_DEVCAP2, &devcap2); |
---|
5323 | 6323 | |
---|
5324 | 6324 | if ((flags & PCI_EXP_FLAGS_VERS) < 2 || |
---|
5325 | 6325 | !(devcap2 & PCI_EXP_DEVCAP2_ARI)) { |
---|
.. | .. |
---|
5399 | 6399 | } |
---|
5400 | 6400 | #endif /* CONFIG_PCI_IOV */ |
---|
5401 | 6401 | |
---|
| 6402 | +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) || IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) |
---|
| 6403 | + |
---|
| 6404 | +static int chcr_offload_state(struct adapter *adap, |
---|
| 6405 | + enum cxgb4_netdev_tls_ops op_val) |
---|
| 6406 | +{ |
---|
| 6407 | + switch (op_val) { |
---|
| 6408 | +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) |
---|
| 6409 | + case CXGB4_TLSDEV_OPS: |
---|
| 6410 | + if (!adap->uld[CXGB4_ULD_KTLS].handle) { |
---|
| 6411 | + dev_dbg(adap->pdev_dev, "ch_ktls driver is not loaded\n"); |
---|
| 6412 | + return -EOPNOTSUPP; |
---|
| 6413 | + } |
---|
| 6414 | + if (!adap->uld[CXGB4_ULD_KTLS].tlsdev_ops) { |
---|
| 6415 | + dev_dbg(adap->pdev_dev, |
---|
| 6416 | + "ch_ktls driver has no registered tlsdev_ops\n"); |
---|
| 6417 | + return -EOPNOTSUPP; |
---|
| 6418 | + } |
---|
| 6419 | + break; |
---|
| 6420 | +#endif /* CONFIG_CHELSIO_TLS_DEVICE */ |
---|
| 6421 | +#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) |
---|
| 6422 | + case CXGB4_XFRMDEV_OPS: |
---|
| 6423 | + if (!adap->uld[CXGB4_ULD_IPSEC].handle) { |
---|
| 6424 | + dev_dbg(adap->pdev_dev, "chipsec driver is not loaded\n"); |
---|
| 6425 | + return -EOPNOTSUPP; |
---|
| 6426 | + } |
---|
| 6427 | + if (!adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops) { |
---|
| 6428 | + dev_dbg(adap->pdev_dev, |
---|
| 6429 | + "chipsec driver has no registered xfrmdev_ops\n"); |
---|
| 6430 | + return -EOPNOTSUPP; |
---|
| 6431 | + } |
---|
| 6432 | + break; |
---|
| 6433 | +#endif /* CONFIG_CHELSIO_IPSEC_INLINE */ |
---|
| 6434 | + default: |
---|
| 6435 | + dev_dbg(adap->pdev_dev, |
---|
| 6436 | + "driver has no support for offload %d\n", op_val); |
---|
| 6437 | + return -EOPNOTSUPP; |
---|
| 6438 | + } |
---|
| 6439 | + |
---|
| 6440 | + return 0; |
---|
| 6441 | +} |
---|
| 6442 | + |
---|
| 6443 | +#endif /* CONFIG_CHELSIO_TLS_DEVICE || CONFIG_CHELSIO_IPSEC_INLINE */ |
---|
| 6444 | + |
---|
| 6445 | +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) |
---|
| 6446 | + |
---|
| 6447 | +static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk, |
---|
| 6448 | + enum tls_offload_ctx_dir direction, |
---|
| 6449 | + struct tls_crypto_info *crypto_info, |
---|
| 6450 | + u32 tcp_sn) |
---|
| 6451 | +{ |
---|
| 6452 | + struct adapter *adap = netdev2adap(netdev); |
---|
| 6453 | + int ret; |
---|
| 6454 | + |
---|
| 6455 | + mutex_lock(&uld_mutex); |
---|
| 6456 | + ret = chcr_offload_state(adap, CXGB4_TLSDEV_OPS); |
---|
| 6457 | + if (ret) |
---|
| 6458 | + goto out_unlock; |
---|
| 6459 | + |
---|
| 6460 | + ret = cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_ENABLE); |
---|
| 6461 | + if (ret) |
---|
| 6462 | + goto out_unlock; |
---|
| 6463 | + |
---|
| 6464 | + ret = adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_add(netdev, sk, |
---|
| 6465 | + direction, |
---|
| 6466 | + crypto_info, |
---|
| 6467 | + tcp_sn); |
---|
| 6468 | + /* if there is a failure, clear the refcount */ |
---|
| 6469 | + if (ret) |
---|
| 6470 | + cxgb4_set_ktls_feature(adap, |
---|
| 6471 | + FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE); |
---|
| 6472 | +out_unlock: |
---|
| 6473 | + mutex_unlock(&uld_mutex); |
---|
| 6474 | + return ret; |
---|
| 6475 | +} |
---|
| 6476 | + |
---|
| 6477 | +static void cxgb4_ktls_dev_del(struct net_device *netdev, |
---|
| 6478 | + struct tls_context *tls_ctx, |
---|
| 6479 | + enum tls_offload_ctx_dir direction) |
---|
| 6480 | +{ |
---|
| 6481 | + struct adapter *adap = netdev2adap(netdev); |
---|
| 6482 | + |
---|
| 6483 | + mutex_lock(&uld_mutex); |
---|
| 6484 | + if (chcr_offload_state(adap, CXGB4_TLSDEV_OPS)) |
---|
| 6485 | + goto out_unlock; |
---|
| 6486 | + |
---|
| 6487 | + adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx, |
---|
| 6488 | + direction); |
---|
| 6489 | + |
---|
| 6490 | +out_unlock: |
---|
| 6491 | + cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE); |
---|
| 6492 | + mutex_unlock(&uld_mutex); |
---|
| 6493 | +} |
---|
| 6494 | + |
---|
| 6495 | +static const struct tlsdev_ops cxgb4_ktls_ops = { |
---|
| 6496 | + .tls_dev_add = cxgb4_ktls_dev_add, |
---|
| 6497 | + .tls_dev_del = cxgb4_ktls_dev_del, |
---|
| 6498 | +}; |
---|
| 6499 | +#endif /* CONFIG_CHELSIO_TLS_DEVICE */ |
---|
| 6500 | + |
---|
| 6501 | +#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) |
---|
| 6502 | + |
---|
| 6503 | +static int cxgb4_xfrm_add_state(struct xfrm_state *x) |
---|
| 6504 | +{ |
---|
| 6505 | + struct adapter *adap = netdev2adap(x->xso.dev); |
---|
| 6506 | + int ret; |
---|
| 6507 | + |
---|
| 6508 | + if (!mutex_trylock(&uld_mutex)) { |
---|
| 6509 | + dev_dbg(adap->pdev_dev, |
---|
| 6510 | + "crypto uld critical resource is under use\n"); |
---|
| 6511 | + return -EBUSY; |
---|
| 6512 | + } |
---|
| 6513 | + ret = chcr_offload_state(adap, CXGB4_XFRMDEV_OPS); |
---|
| 6514 | + if (ret) |
---|
| 6515 | + goto out_unlock; |
---|
| 6516 | + |
---|
| 6517 | + ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x); |
---|
| 6518 | + |
---|
| 6519 | +out_unlock: |
---|
| 6520 | + mutex_unlock(&uld_mutex); |
---|
| 6521 | + |
---|
| 6522 | + return ret; |
---|
| 6523 | +} |
---|
| 6524 | + |
---|
| 6525 | +static void cxgb4_xfrm_del_state(struct xfrm_state *x) |
---|
| 6526 | +{ |
---|
| 6527 | + struct adapter *adap = netdev2adap(x->xso.dev); |
---|
| 6528 | + |
---|
| 6529 | + if (!mutex_trylock(&uld_mutex)) { |
---|
| 6530 | + dev_dbg(adap->pdev_dev, |
---|
| 6531 | + "crypto uld critical resource is under use\n"); |
---|
| 6532 | + return; |
---|
| 6533 | + } |
---|
| 6534 | + if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) |
---|
| 6535 | + goto out_unlock; |
---|
| 6536 | + |
---|
| 6537 | + adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x); |
---|
| 6538 | + |
---|
| 6539 | +out_unlock: |
---|
| 6540 | + mutex_unlock(&uld_mutex); |
---|
| 6541 | +} |
---|
| 6542 | + |
---|
| 6543 | +static void cxgb4_xfrm_free_state(struct xfrm_state *x) |
---|
| 6544 | +{ |
---|
| 6545 | + struct adapter *adap = netdev2adap(x->xso.dev); |
---|
| 6546 | + |
---|
| 6547 | + if (!mutex_trylock(&uld_mutex)) { |
---|
| 6548 | + dev_dbg(adap->pdev_dev, |
---|
| 6549 | + "crypto uld critical resource is under use\n"); |
---|
| 6550 | + return; |
---|
| 6551 | + } |
---|
| 6552 | + if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) |
---|
| 6553 | + goto out_unlock; |
---|
| 6554 | + |
---|
| 6555 | + adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x); |
---|
| 6556 | + |
---|
| 6557 | +out_unlock: |
---|
| 6558 | + mutex_unlock(&uld_mutex); |
---|
| 6559 | +} |
---|
| 6560 | + |
---|
| 6561 | +static bool cxgb4_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) |
---|
| 6562 | +{ |
---|
| 6563 | + struct adapter *adap = netdev2adap(x->xso.dev); |
---|
| 6564 | + bool ret = false; |
---|
| 6565 | + |
---|
| 6566 | + if (!mutex_trylock(&uld_mutex)) { |
---|
| 6567 | + dev_dbg(adap->pdev_dev, |
---|
| 6568 | + "crypto uld critical resource is under use\n"); |
---|
| 6569 | + return ret; |
---|
| 6570 | + } |
---|
| 6571 | + if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) |
---|
| 6572 | + goto out_unlock; |
---|
| 6573 | + |
---|
| 6574 | + ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x); |
---|
| 6575 | + |
---|
| 6576 | +out_unlock: |
---|
| 6577 | + mutex_unlock(&uld_mutex); |
---|
| 6578 | + return ret; |
---|
| 6579 | +} |
---|
| 6580 | + |
---|
| 6581 | +static void cxgb4_advance_esn_state(struct xfrm_state *x) |
---|
| 6582 | +{ |
---|
| 6583 | + struct adapter *adap = netdev2adap(x->xso.dev); |
---|
| 6584 | + |
---|
| 6585 | + if (!mutex_trylock(&uld_mutex)) { |
---|
| 6586 | + dev_dbg(adap->pdev_dev, |
---|
| 6587 | + "crypto uld critical resource is under use\n"); |
---|
| 6588 | + return; |
---|
| 6589 | + } |
---|
| 6590 | + if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) |
---|
| 6591 | + goto out_unlock; |
---|
| 6592 | + |
---|
| 6593 | + adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_advance_esn(x); |
---|
| 6594 | + |
---|
| 6595 | +out_unlock: |
---|
| 6596 | + mutex_unlock(&uld_mutex); |
---|
| 6597 | +} |
---|
| 6598 | + |
---|
| 6599 | +static const struct xfrmdev_ops cxgb4_xfrmdev_ops = { |
---|
| 6600 | + .xdo_dev_state_add = cxgb4_xfrm_add_state, |
---|
| 6601 | + .xdo_dev_state_delete = cxgb4_xfrm_del_state, |
---|
| 6602 | + .xdo_dev_state_free = cxgb4_xfrm_free_state, |
---|
| 6603 | + .xdo_dev_offload_ok = cxgb4_ipsec_offload_ok, |
---|
| 6604 | + .xdo_dev_state_advance_esn = cxgb4_advance_esn_state, |
---|
| 6605 | +}; |
---|
| 6606 | + |
---|
| 6607 | +#endif /* CONFIG_CHELSIO_IPSEC_INLINE */ |
---|
| 6608 | + |
---|
5402 | 6609 | static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
---|
5403 | 6610 | { |
---|
5404 | 6611 | struct net_device *netdev; |
---|
.. | .. |
---|
5413 | 6620 | u16 device_id; |
---|
5414 | 6621 | int i, err; |
---|
5415 | 6622 | u32 whoami; |
---|
5416 | | - |
---|
5417 | | - printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); |
---|
5418 | 6623 | |
---|
5419 | 6624 | err = pci_request_regions(pdev, KBUILD_MODNAME); |
---|
5420 | 6625 | if (err) { |
---|
.. | .. |
---|
5514 | 6719 | } |
---|
5515 | 6720 | |
---|
5516 | 6721 | /* PCI device has been enabled */ |
---|
5517 | | - adapter->flags |= DEV_ENABLED; |
---|
| 6722 | + adapter->flags |= CXGB4_DEV_ENABLED; |
---|
5518 | 6723 | memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); |
---|
5519 | 6724 | |
---|
5520 | 6725 | /* If possible, we use PCIe Relaxed Ordering Attribute to deliver |
---|
.. | .. |
---|
5532 | 6737 | * using Relaxed Ordering. |
---|
5533 | 6738 | */ |
---|
5534 | 6739 | if (!pcie_relaxed_ordering_enabled(pdev)) |
---|
5535 | | - adapter->flags |= ROOT_NO_RELAXED_ORDERING; |
---|
| 6740 | + adapter->flags |= CXGB4_ROOT_NO_RELAXED_ORDERING; |
---|
5536 | 6741 | |
---|
5537 | 6742 | spin_lock_init(&adapter->stats_lock); |
---|
5538 | 6743 | spin_lock_init(&adapter->tid_release_lock); |
---|
.. | .. |
---|
5587 | 6792 | } |
---|
5588 | 6793 | |
---|
5589 | 6794 | setup_memwin(adapter); |
---|
5590 | | - err = adap_init0(adapter); |
---|
5591 | | -#ifdef CONFIG_DEBUG_FS |
---|
5592 | | - bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz); |
---|
5593 | | -#endif |
---|
5594 | | - setup_memwin_rdma(adapter); |
---|
| 6795 | + err = adap_init0(adapter, 0); |
---|
5595 | 6796 | if (err) |
---|
5596 | 6797 | goto out_unmap_bar; |
---|
| 6798 | + |
---|
| 6799 | + setup_memwin_rdma(adapter); |
---|
5597 | 6800 | |
---|
5598 | 6801 | /* configure SGE_STAT_CFG_A to read WC stats */ |
---|
5599 | 6802 | if (!is_t4(adapter->params.chip)) |
---|
.. | .. |
---|
5605 | 6808 | INIT_LIST_HEAD(&adapter->mac_hlist); |
---|
5606 | 6809 | |
---|
5607 | 6810 | for_each_port(adapter, i) { |
---|
| 6811 | + /* For supporting MQPRIO Offload, need some extra |
---|
| 6812 | + * queues for each ETHOFLD TIDs. Keep it equal to |
---|
| 6813 | + * MAX_ATIDs for now. Once we connect to firmware |
---|
| 6814 | + * later and query the EOTID params, we'll come to |
---|
| 6815 | + * know the actual # of EOTIDs supported. |
---|
| 6816 | + */ |
---|
5608 | 6817 | netdev = alloc_etherdev_mq(sizeof(struct port_info), |
---|
5609 | | - MAX_ETH_QSETS); |
---|
| 6818 | + MAX_ETH_QSETS + MAX_ATIDS); |
---|
5610 | 6819 | if (!netdev) { |
---|
5611 | 6820 | err = -ENOMEM; |
---|
5612 | 6821 | goto out_free_dev; |
---|
.. | .. |
---|
5623 | 6832 | |
---|
5624 | 6833 | netdev->hw_features = NETIF_F_SG | TSO_FLAGS | |
---|
5625 | 6834 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
---|
5626 | | - NETIF_F_RXCSUM | NETIF_F_RXHASH | |
---|
| 6835 | + NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO | |
---|
5627 | 6836 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | |
---|
5628 | | - NETIF_F_HW_TC; |
---|
| 6837 | + NETIF_F_HW_TC | NETIF_F_NTUPLE; |
---|
5629 | 6838 | |
---|
5630 | 6839 | if (chip_ver > CHELSIO_T5) { |
---|
5631 | 6840 | netdev->hw_enc_features |= NETIF_F_IP_CSUM | |
---|
5632 | 6841 | NETIF_F_IPV6_CSUM | |
---|
5633 | 6842 | NETIF_F_RXCSUM | |
---|
5634 | 6843 | NETIF_F_GSO_UDP_TUNNEL | |
---|
| 6844 | + NETIF_F_GSO_UDP_TUNNEL_CSUM | |
---|
5635 | 6845 | NETIF_F_TSO | NETIF_F_TSO6; |
---|
5636 | 6846 | |
---|
5637 | | - netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; |
---|
| 6847 | + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | |
---|
| 6848 | + NETIF_F_GSO_UDP_TUNNEL_CSUM | |
---|
| 6849 | + NETIF_F_HW_TLS_RECORD; |
---|
| 6850 | + |
---|
| 6851 | + if (adapter->rawf_cnt) |
---|
| 6852 | + netdev->udp_tunnel_nic_info = &cxgb_udp_tunnels; |
---|
5638 | 6853 | } |
---|
5639 | 6854 | |
---|
5640 | 6855 | if (highdma) |
---|
5641 | 6856 | netdev->hw_features |= NETIF_F_HIGHDMA; |
---|
5642 | 6857 | netdev->features |= netdev->hw_features; |
---|
5643 | 6858 | netdev->vlan_features = netdev->features & VLAN_FEAT; |
---|
| 6859 | +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) |
---|
| 6860 | + if (pi->adapter->params.crypto & FW_CAPS_CONFIG_TLS_HW) { |
---|
| 6861 | + netdev->hw_features |= NETIF_F_HW_TLS_TX; |
---|
| 6862 | + netdev->tlsdev_ops = &cxgb4_ktls_ops; |
---|
| 6863 | + /* initialize the refcount */ |
---|
| 6864 | + refcount_set(&pi->adapter->chcr_ktls.ktls_refcount, 0); |
---|
| 6865 | + } |
---|
| 6866 | +#endif /* CONFIG_CHELSIO_TLS_DEVICE */ |
---|
| 6867 | +#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) |
---|
| 6868 | + if (pi->adapter->params.crypto & FW_CAPS_CONFIG_IPSEC_INLINE) { |
---|
| 6869 | + netdev->hw_enc_features |= NETIF_F_HW_ESP; |
---|
| 6870 | + netdev->features |= NETIF_F_HW_ESP; |
---|
| 6871 | + netdev->xfrmdev_ops = &cxgb4_xfrmdev_ops; |
---|
| 6872 | + } |
---|
| 6873 | +#endif /* CONFIG_CHELSIO_IPSEC_INLINE */ |
---|
5644 | 6874 | |
---|
5645 | 6875 | netdev->priv_flags |= IFF_UNICAST_FLT; |
---|
5646 | 6876 | |
---|
.. | .. |
---|
5661 | 6891 | |
---|
5662 | 6892 | pci_set_drvdata(pdev, adapter); |
---|
5663 | 6893 | |
---|
5664 | | - if (adapter->flags & FW_OK) { |
---|
| 6894 | + if (adapter->flags & CXGB4_FW_OK) { |
---|
5665 | 6895 | err = t4_port_init(adapter, func, func, 0); |
---|
5666 | 6896 | if (err) |
---|
5667 | 6897 | goto out_free_dev; |
---|
.. | .. |
---|
5683 | 6913 | } |
---|
5684 | 6914 | } |
---|
5685 | 6915 | |
---|
5686 | | - if (!(adapter->flags & FW_OK)) |
---|
| 6916 | + if (!(adapter->flags & CXGB4_FW_OK)) |
---|
5687 | 6917 | goto fw_attach_fail; |
---|
5688 | 6918 | |
---|
5689 | 6919 | /* Configure queues and allocate tables now, they can be needed as |
---|
.. | .. |
---|
5705 | 6935 | dev_warn(&pdev->dev, "could not allocate L2T, continuing\n"); |
---|
5706 | 6936 | adapter->params.offload = 0; |
---|
5707 | 6937 | } |
---|
5708 | | - |
---|
5709 | | - adapter->mps_encap = kvcalloc(adapter->params.arch.mps_tcam_size, |
---|
5710 | | - sizeof(struct mps_encap_entry), |
---|
5711 | | - GFP_KERNEL); |
---|
5712 | | - if (!adapter->mps_encap) |
---|
5713 | | - dev_warn(&pdev->dev, "could not allocate MPS Encap entries, continuing\n"); |
---|
5714 | 6938 | |
---|
5715 | 6939 | #if IS_ENABLED(CONFIG_IPV6) |
---|
5716 | 6940 | if (chip_ver <= CHELSIO_T5 && |
---|
.. | .. |
---|
5744 | 6968 | i); |
---|
5745 | 6969 | } |
---|
5746 | 6970 | |
---|
| 6971 | + if (is_offload(adapter) || is_hashfilter(adapter)) { |
---|
| 6972 | + if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) { |
---|
| 6973 | + u32 v; |
---|
| 6974 | + |
---|
| 6975 | + v = t4_read_reg(adapter, LE_DB_HASH_CONFIG_A); |
---|
| 6976 | + if (chip_ver <= CHELSIO_T5) { |
---|
| 6977 | + adapter->tids.nhash = 1 << HASHTIDSIZE_G(v); |
---|
| 6978 | + v = t4_read_reg(adapter, LE_DB_TID_HASHBASE_A); |
---|
| 6979 | + adapter->tids.hash_base = v / 4; |
---|
| 6980 | + } else { |
---|
| 6981 | + adapter->tids.nhash = HASHTBLSIZE_G(v) << 3; |
---|
| 6982 | + v = t4_read_reg(adapter, |
---|
| 6983 | + T6_LE_DB_HASH_TID_BASE_A); |
---|
| 6984 | + adapter->tids.hash_base = v; |
---|
| 6985 | + } |
---|
| 6986 | + } |
---|
| 6987 | + } |
---|
| 6988 | + |
---|
5747 | 6989 | if (tid_init(&adapter->tids) < 0) { |
---|
5748 | 6990 | dev_warn(&pdev->dev, "could not allocate TID table, " |
---|
5749 | 6991 | "continuing\n"); |
---|
.. | .. |
---|
5757 | 6999 | if (cxgb4_init_tc_flower(adapter)) |
---|
5758 | 7000 | dev_warn(&pdev->dev, |
---|
5759 | 7001 | "could not offload tc flower, continuing\n"); |
---|
5760 | | - } |
---|
5761 | 7002 | |
---|
5762 | | - if (is_offload(adapter) || is_hashfilter(adapter)) { |
---|
5763 | | - if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) { |
---|
5764 | | - u32 hash_base, hash_reg; |
---|
| 7003 | + if (cxgb4_init_tc_mqprio(adapter)) |
---|
| 7004 | + dev_warn(&pdev->dev, |
---|
| 7005 | + "could not offload tc mqprio, continuing\n"); |
---|
5765 | 7006 | |
---|
5766 | | - if (chip_ver <= CHELSIO_T5) { |
---|
5767 | | - hash_reg = LE_DB_TID_HASHBASE_A; |
---|
5768 | | - hash_base = t4_read_reg(adapter, hash_reg); |
---|
5769 | | - adapter->tids.hash_base = hash_base / 4; |
---|
5770 | | - } else { |
---|
5771 | | - hash_reg = T6_LE_DB_HASH_TID_BASE_A; |
---|
5772 | | - hash_base = t4_read_reg(adapter, hash_reg); |
---|
5773 | | - adapter->tids.hash_base = hash_base; |
---|
5774 | | - } |
---|
5775 | | - } |
---|
| 7007 | + if (cxgb4_init_tc_matchall(adapter)) |
---|
| 7008 | + dev_warn(&pdev->dev, |
---|
| 7009 | + "could not offload tc matchall, continuing\n"); |
---|
| 7010 | + if (cxgb4_init_ethtool_filters(adapter)) |
---|
| 7011 | + dev_warn(&pdev->dev, |
---|
| 7012 | + "could not initialize ethtool filters, continuing\n"); |
---|
5776 | 7013 | } |
---|
5777 | 7014 | |
---|
5778 | 7015 | /* See what interrupts we'll be using */ |
---|
5779 | 7016 | if (msi > 1 && enable_msix(adapter) == 0) |
---|
5780 | | - adapter->flags |= USING_MSIX; |
---|
| 7017 | + adapter->flags |= CXGB4_USING_MSIX; |
---|
5781 | 7018 | else if (msi > 0 && pci_enable_msi(pdev) == 0) { |
---|
5782 | | - adapter->flags |= USING_MSI; |
---|
| 7019 | + adapter->flags |= CXGB4_USING_MSI; |
---|
5783 | 7020 | if (msi > 1) |
---|
5784 | 7021 | free_msix_info(adapter); |
---|
5785 | 7022 | } |
---|
.. | .. |
---|
5787 | 7024 | /* check for PCI Express bandwidth capabiltites */ |
---|
5788 | 7025 | pcie_print_link_status(pdev); |
---|
5789 | 7026 | |
---|
| 7027 | + cxgb4_init_mps_ref_entries(adapter); |
---|
| 7028 | + |
---|
5790 | 7029 | err = init_rss(adapter); |
---|
5791 | 7030 | if (err) |
---|
5792 | 7031 | goto out_free_dev; |
---|
| 7032 | + |
---|
| 7033 | + err = setup_non_data_intr(adapter); |
---|
| 7034 | + if (err) { |
---|
| 7035 | + dev_err(adapter->pdev_dev, |
---|
| 7036 | + "Non Data interrupt allocation failed, err: %d\n", err); |
---|
| 7037 | + goto out_free_dev; |
---|
| 7038 | + } |
---|
5793 | 7039 | |
---|
5794 | 7040 | err = setup_fw_sge_queues(adapter); |
---|
5795 | 7041 | if (err) { |
---|
.. | .. |
---|
5837 | 7083 | /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ |
---|
5838 | 7084 | pdev->needs_freset = 1; |
---|
5839 | 7085 | |
---|
5840 | | - if (is_uld(adapter)) { |
---|
5841 | | - mutex_lock(&uld_mutex); |
---|
5842 | | - list_add_tail(&adapter->list_node, &adapter_list); |
---|
5843 | | - mutex_unlock(&uld_mutex); |
---|
5844 | | - } |
---|
| 7086 | + if (is_uld(adapter)) |
---|
| 7087 | + cxgb4_uld_enable(adapter); |
---|
5845 | 7088 | |
---|
5846 | 7089 | if (!is_t4(adapter->params.chip)) |
---|
5847 | 7090 | cxgb4_ptp_init(adapter); |
---|
| 7091 | + |
---|
| 7092 | + if (IS_REACHABLE(CONFIG_THERMAL) && |
---|
| 7093 | + !is_t4(adapter->params.chip) && (adapter->flags & CXGB4_FW_OK)) |
---|
| 7094 | + cxgb4_thermal_init(adapter); |
---|
5848 | 7095 | |
---|
5849 | 7096 | print_adapter_info(adapter); |
---|
5850 | 7097 | return 0; |
---|
.. | .. |
---|
5852 | 7099 | out_free_dev: |
---|
5853 | 7100 | t4_free_sge_resources(adapter); |
---|
5854 | 7101 | free_some_resources(adapter); |
---|
5855 | | - if (adapter->flags & USING_MSIX) |
---|
| 7102 | + if (adapter->flags & CXGB4_USING_MSIX) |
---|
5856 | 7103 | free_msix_info(adapter); |
---|
5857 | 7104 | if (adapter->num_uld || adapter->num_ofld_uld) |
---|
5858 | 7105 | t4_uld_mem_free(adapter); |
---|
.. | .. |
---|
5885 | 7132 | return; |
---|
5886 | 7133 | } |
---|
5887 | 7134 | |
---|
5888 | | - adapter->flags |= SHUTTING_DOWN; |
---|
| 7135 | + /* If we allocated filters, free up state associated with any |
---|
| 7136 | + * valid filters ... |
---|
| 7137 | + */ |
---|
| 7138 | + clear_all_filters(adapter); |
---|
| 7139 | + |
---|
| 7140 | + adapter->flags |= CXGB4_SHUTTING_DOWN; |
---|
5889 | 7141 | |
---|
5890 | 7142 | if (adapter->pf == 4) { |
---|
5891 | 7143 | int i; |
---|
.. | .. |
---|
5895 | 7147 | */ |
---|
5896 | 7148 | destroy_workqueue(adapter->workq); |
---|
5897 | 7149 | |
---|
5898 | | - if (is_uld(adapter)) { |
---|
5899 | | - detach_ulds(adapter); |
---|
5900 | | - t4_uld_clean_up(adapter); |
---|
5901 | | - } |
---|
5902 | | - |
---|
5903 | | - adap_free_hma_mem(adapter); |
---|
5904 | | - |
---|
5905 | | - disable_interrupts(adapter); |
---|
| 7150 | + detach_ulds(adapter); |
---|
5906 | 7151 | |
---|
5907 | 7152 | for_each_port(adapter, i) |
---|
5908 | 7153 | if (adapter->port[i]->reg_state == NETREG_REGISTERED) |
---|
5909 | 7154 | unregister_netdev(adapter->port[i]); |
---|
5910 | 7155 | |
---|
| 7156 | + t4_uld_clean_up(adapter); |
---|
| 7157 | + |
---|
| 7158 | + adap_free_hma_mem(adapter); |
---|
| 7159 | + |
---|
| 7160 | + disable_interrupts(adapter); |
---|
| 7161 | + |
---|
| 7162 | + cxgb4_free_mps_ref_entries(adapter); |
---|
| 7163 | + |
---|
5911 | 7164 | debugfs_remove_recursive(adapter->debugfs_root); |
---|
5912 | 7165 | |
---|
5913 | 7166 | if (!is_t4(adapter->params.chip)) |
---|
5914 | 7167 | cxgb4_ptp_stop(adapter); |
---|
| 7168 | + if (IS_REACHABLE(CONFIG_THERMAL)) |
---|
| 7169 | + cxgb4_thermal_remove(adapter); |
---|
5915 | 7170 | |
---|
5916 | | - /* If we allocated filters, free up state associated with any |
---|
5917 | | - * valid filters ... |
---|
5918 | | - */ |
---|
5919 | | - clear_all_filters(adapter); |
---|
5920 | | - |
---|
5921 | | - if (adapter->flags & FULL_INIT_DONE) |
---|
| 7171 | + if (adapter->flags & CXGB4_FULL_INIT_DONE) |
---|
5922 | 7172 | cxgb_down(adapter); |
---|
5923 | 7173 | |
---|
5924 | | - if (adapter->flags & USING_MSIX) |
---|
| 7174 | + if (adapter->flags & CXGB4_USING_MSIX) |
---|
5925 | 7175 | free_msix_info(adapter); |
---|
5926 | 7176 | if (adapter->num_uld || adapter->num_ofld_uld) |
---|
5927 | 7177 | t4_uld_mem_free(adapter); |
---|
.. | .. |
---|
5945 | 7195 | #endif |
---|
5946 | 7196 | iounmap(adapter->regs); |
---|
5947 | 7197 | pci_disable_pcie_error_reporting(pdev); |
---|
5948 | | - if ((adapter->flags & DEV_ENABLED)) { |
---|
| 7198 | + if ((adapter->flags & CXGB4_DEV_ENABLED)) { |
---|
5949 | 7199 | pci_disable_device(pdev); |
---|
5950 | | - adapter->flags &= ~DEV_ENABLED; |
---|
| 7200 | + adapter->flags &= ~CXGB4_DEV_ENABLED; |
---|
5951 | 7201 | } |
---|
5952 | 7202 | pci_release_regions(pdev); |
---|
5953 | 7203 | kfree(adapter->mbox_log); |
---|
.. | .. |
---|
5973 | 7223 | return; |
---|
5974 | 7224 | } |
---|
5975 | 7225 | |
---|
5976 | | - adapter->flags |= SHUTTING_DOWN; |
---|
| 7226 | + adapter->flags |= CXGB4_SHUTTING_DOWN; |
---|
5977 | 7227 | |
---|
5978 | 7228 | if (adapter->pf == 4) { |
---|
5979 | 7229 | int i; |
---|
.. | .. |
---|
5981 | 7231 | for_each_port(adapter, i) |
---|
5982 | 7232 | if (adapter->port[i]->reg_state == NETREG_REGISTERED) |
---|
5983 | 7233 | cxgb_close(adapter->port[i]); |
---|
| 7234 | + |
---|
| 7235 | + rtnl_lock(); |
---|
| 7236 | + cxgb4_mqprio_stop_offload(adapter); |
---|
| 7237 | + rtnl_unlock(); |
---|
5984 | 7238 | |
---|
5985 | 7239 | if (is_uld(adapter)) { |
---|
5986 | 7240 | detach_ulds(adapter); |
---|
.. | .. |
---|
5991 | 7245 | disable_msi(adapter); |
---|
5992 | 7246 | |
---|
5993 | 7247 | t4_sge_stop(adapter); |
---|
5994 | | - if (adapter->flags & FW_OK) |
---|
| 7248 | + if (adapter->flags & CXGB4_FW_OK) |
---|
5995 | 7249 | t4_fw_bye(adapter, adapter->mbox); |
---|
5996 | 7250 | } |
---|
5997 | 7251 | } |
---|
.. | .. |
---|
6012 | 7266 | { |
---|
6013 | 7267 | int ret; |
---|
6014 | 7268 | |
---|
6015 | | - /* Debugfs support is optional, just warn if this fails */ |
---|
6016 | 7269 | cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); |
---|
6017 | | - if (!cxgb4_debugfs_root) |
---|
6018 | | - pr_warn("could not create debugfs entry, continuing\n"); |
---|
6019 | 7270 | |
---|
6020 | 7271 | ret = pci_register_driver(&cxgb4_driver); |
---|
6021 | 7272 | if (ret < 0) |
---|