.. | .. |
---|
1 | | -/* |
---|
2 | | - * Copyright (C) 2015-2017 Netronome Systems, Inc. |
---|
3 | | - * |
---|
4 | | - * This software is dual licensed under the GNU General License Version 2, |
---|
5 | | - * June 1991 as shown in the file COPYING in the top-level directory of this |
---|
6 | | - * source tree or the BSD 2-Clause License provided below. You have the |
---|
7 | | - * option to license this software under the complete terms of either license. |
---|
8 | | - * |
---|
9 | | - * The BSD 2-Clause License: |
---|
10 | | - * |
---|
11 | | - * Redistribution and use in source and binary forms, with or |
---|
12 | | - * without modification, are permitted provided that the following |
---|
13 | | - * conditions are met: |
---|
14 | | - * |
---|
15 | | - * 1. Redistributions of source code must retain the above |
---|
16 | | - * copyright notice, this list of conditions and the following |
---|
17 | | - * disclaimer. |
---|
18 | | - * |
---|
19 | | - * 2. Redistributions in binary form must reproduce the above |
---|
20 | | - * copyright notice, this list of conditions and the following |
---|
21 | | - * disclaimer in the documentation and/or other materials |
---|
22 | | - * provided with the distribution. |
---|
23 | | - * |
---|
24 | | - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
---|
25 | | - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
---|
26 | | - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
---|
27 | | - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
---|
28 | | - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
---|
29 | | - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
---|
30 | | - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
---|
31 | | - * SOFTWARE. |
---|
32 | | - */ |
---|
| 1 | +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) |
---|
| 2 | +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ |
---|
33 | 3 | |
---|
34 | 4 | /* |
---|
35 | 5 | * nfp_net_common.c |
---|
.. | .. |
---|
66 | 36 | #include <linux/vmalloc.h> |
---|
67 | 37 | #include <linux/ktime.h> |
---|
68 | 38 | |
---|
69 | | -#include <net/switchdev.h> |
---|
| 39 | +#include <net/tls.h> |
---|
70 | 40 | #include <net/vxlan.h> |
---|
71 | 41 | |
---|
72 | 42 | #include "nfpcore/nfp_nsp.h" |
---|
| 43 | +#include "ccm.h" |
---|
73 | 44 | #include "nfp_app.h" |
---|
74 | 45 | #include "nfp_net_ctrl.h" |
---|
75 | 46 | #include "nfp_net.h" |
---|
76 | 47 | #include "nfp_net_sriov.h" |
---|
77 | 48 | #include "nfp_port.h" |
---|
| 49 | +#include "crypto/crypto.h" |
---|
| 50 | +#include "crypto/fw.h" |
---|
78 | 51 | |
---|
79 | 52 | /** |
---|
80 | 53 | * nfp_net_get_fw_version() - Read and parse the FW version |
---|
.. | .. |
---|
131 | 104 | /* ensure update is written before pinging HW */ |
---|
132 | 105 | nn_pci_flush(nn); |
---|
133 | 106 | nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1); |
---|
| 107 | + nn->reconfig_in_progress_update = update; |
---|
134 | 108 | } |
---|
135 | 109 | |
---|
136 | 110 | /* Pass 0 as update to run posted reconfigs. */ |
---|
.. | .. |
---|
153 | 127 | if (reg == 0) |
---|
154 | 128 | return true; |
---|
155 | 129 | if (reg & NFP_NET_CFG_UPDATE_ERR) { |
---|
156 | | - nn_err(nn, "Reconfig error: 0x%08x\n", reg); |
---|
| 130 | + nn_err(nn, "Reconfig error (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n", |
---|
| 131 | + reg, nn->reconfig_in_progress_update, |
---|
| 132 | + nn_readl(nn, NFP_NET_CFG_CTRL)); |
---|
157 | 133 | return true; |
---|
158 | 134 | } else if (last_check) { |
---|
159 | | - nn_err(nn, "Reconfig timeout: 0x%08x\n", reg); |
---|
| 135 | + nn_err(nn, "Reconfig timeout (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n", |
---|
| 136 | + reg, nn->reconfig_in_progress_update, |
---|
| 137 | + nn_readl(nn, NFP_NET_CFG_CTRL)); |
---|
160 | 138 | return true; |
---|
161 | 139 | } |
---|
162 | 140 | |
---|
163 | 141 | return false; |
---|
164 | 142 | } |
---|
165 | 143 | |
---|
166 | | -static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline) |
---|
| 144 | +static bool __nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline) |
---|
167 | 145 | { |
---|
168 | 146 | bool timed_out = false; |
---|
| 147 | + int i; |
---|
169 | 148 | |
---|
170 | | - /* Poll update field, waiting for NFP to ack the config */ |
---|
| 149 | + /* Poll update field, waiting for NFP to ack the config. |
---|
| 150 | + * Do an opportunistic wait-busy loop, afterward sleep. |
---|
| 151 | + */ |
---|
| 152 | + for (i = 0; i < 50; i++) { |
---|
| 153 | + if (nfp_net_reconfig_check_done(nn, false)) |
---|
| 154 | + return false; |
---|
| 155 | + udelay(4); |
---|
| 156 | + } |
---|
| 157 | + |
---|
171 | 158 | while (!nfp_net_reconfig_check_done(nn, timed_out)) { |
---|
172 | | - msleep(1); |
---|
| 159 | + usleep_range(250, 500); |
---|
173 | 160 | timed_out = time_is_before_eq_jiffies(deadline); |
---|
174 | 161 | } |
---|
| 162 | + |
---|
| 163 | + return timed_out; |
---|
| 164 | +} |
---|
| 165 | + |
---|
| 166 | +static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline) |
---|
| 167 | +{ |
---|
| 168 | + if (__nfp_net_reconfig_wait(nn, deadline)) |
---|
| 169 | + return -EIO; |
---|
175 | 170 | |
---|
176 | 171 | if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR) |
---|
177 | 172 | return -EIO; |
---|
178 | 173 | |
---|
179 | | - return timed_out ? -EIO : 0; |
---|
| 174 | + return 0; |
---|
180 | 175 | } |
---|
181 | 176 | |
---|
182 | 177 | static void nfp_net_reconfig_timer(struct timer_list *t) |
---|
.. | .. |
---|
236 | 231 | |
---|
237 | 232 | spin_lock_bh(&nn->reconfig_lock); |
---|
238 | 233 | |
---|
| 234 | + WARN_ON(nn->reconfig_sync_present); |
---|
239 | 235 | nn->reconfig_sync_present = true; |
---|
240 | 236 | |
---|
241 | 237 | if (nn->reconfig_timer_active) { |
---|
.. | .. |
---|
269 | 265 | } |
---|
270 | 266 | |
---|
271 | 267 | /** |
---|
272 | | - * nfp_net_reconfig() - Reconfigure the firmware |
---|
| 268 | + * __nfp_net_reconfig() - Reconfigure the firmware |
---|
273 | 269 | * @nn: NFP Net device to reconfigure |
---|
274 | 270 | * @update: The value for the update field in the BAR config |
---|
275 | 271 | * |
---|
.. | .. |
---|
279 | 275 | * |
---|
280 | 276 | * Return: Negative errno on error, 0 on success |
---|
281 | 277 | */ |
---|
282 | | -int nfp_net_reconfig(struct nfp_net *nn, u32 update) |
---|
| 278 | +int __nfp_net_reconfig(struct nfp_net *nn, u32 update) |
---|
283 | 279 | { |
---|
284 | 280 | int ret; |
---|
285 | 281 | |
---|
.. | .. |
---|
300 | 296 | return ret; |
---|
301 | 297 | } |
---|
302 | 298 | |
---|
| 299 | +int nfp_net_reconfig(struct nfp_net *nn, u32 update) |
---|
| 300 | +{ |
---|
| 301 | + int ret; |
---|
| 302 | + |
---|
| 303 | + nn_ctrl_bar_lock(nn); |
---|
| 304 | + ret = __nfp_net_reconfig(nn, update); |
---|
| 305 | + nn_ctrl_bar_unlock(nn); |
---|
| 306 | + |
---|
| 307 | + return ret; |
---|
| 308 | +} |
---|
| 309 | + |
---|
| 310 | +int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size) |
---|
| 311 | +{ |
---|
| 312 | + if (nn->tlv_caps.mbox_len < NFP_NET_CFG_MBOX_SIMPLE_VAL + data_size) { |
---|
| 313 | + nn_err(nn, "mailbox too small for %u of data (%u)\n", |
---|
| 314 | + data_size, nn->tlv_caps.mbox_len); |
---|
| 315 | + return -EIO; |
---|
| 316 | + } |
---|
| 317 | + |
---|
| 318 | + nn_ctrl_bar_lock(nn); |
---|
| 319 | + return 0; |
---|
| 320 | +} |
---|
| 321 | + |
---|
303 | 322 | /** |
---|
304 | | - * nfp_net_reconfig_mbox() - Reconfigure the firmware via the mailbox |
---|
| 323 | + * nfp_net_mbox_reconfig() - Reconfigure the firmware via the mailbox |
---|
305 | 324 | * @nn: NFP Net device to reconfigure |
---|
306 | 325 | * @mbox_cmd: The value for the mailbox command |
---|
307 | 326 | * |
---|
.. | .. |
---|
309 | 328 | * |
---|
310 | 329 | * Return: Negative errno on error, 0 on success |
---|
311 | 330 | */ |
---|
312 | | -static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd) |
---|
| 331 | +int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd) |
---|
313 | 332 | { |
---|
314 | 333 | u32 mbox = nn->tlv_caps.mbox_off; |
---|
315 | 334 | int ret; |
---|
316 | 335 | |
---|
317 | | - if (!nfp_net_has_mbox(&nn->tlv_caps)) { |
---|
318 | | - nn_err(nn, "no mailbox present, command: %u\n", mbox_cmd); |
---|
319 | | - return -EIO; |
---|
320 | | - } |
---|
321 | | - |
---|
322 | 336 | nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd); |
---|
323 | 337 | |
---|
324 | | - ret = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX); |
---|
| 338 | + ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX); |
---|
325 | 339 | if (ret) { |
---|
326 | 340 | nn_err(nn, "Mailbox update error\n"); |
---|
327 | 341 | return ret; |
---|
328 | 342 | } |
---|
329 | 343 | |
---|
330 | 344 | return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET); |
---|
| 345 | +} |
---|
| 346 | + |
---|
| 347 | +void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 mbox_cmd) |
---|
| 348 | +{ |
---|
| 349 | + u32 mbox = nn->tlv_caps.mbox_off; |
---|
| 350 | + |
---|
| 351 | + nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd); |
---|
| 352 | + |
---|
| 353 | + nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_MBOX); |
---|
| 354 | +} |
---|
| 355 | + |
---|
| 356 | +int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn) |
---|
| 357 | +{ |
---|
| 358 | + u32 mbox = nn->tlv_caps.mbox_off; |
---|
| 359 | + |
---|
| 360 | + nfp_net_reconfig_wait_posted(nn); |
---|
| 361 | + |
---|
| 362 | + return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET); |
---|
| 363 | +} |
---|
| 364 | + |
---|
| 365 | +int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd) |
---|
| 366 | +{ |
---|
| 367 | + int ret; |
---|
| 368 | + |
---|
| 369 | + ret = nfp_net_mbox_reconfig(nn, mbox_cmd); |
---|
| 370 | + nn_ctrl_bar_unlock(nn); |
---|
| 371 | + return ret; |
---|
331 | 372 | } |
---|
332 | 373 | |
---|
333 | 374 | /* Interrupt configuration and handling |
---|
.. | .. |
---|
677 | 718 | * @txbuf: Pointer to driver soft TX descriptor |
---|
678 | 719 | * @txd: Pointer to HW TX descriptor |
---|
679 | 720 | * @skb: Pointer to SKB |
---|
| 721 | + * @md_bytes: Prepend length |
---|
680 | 722 | * |
---|
681 | 723 | * Set up Tx descriptor for LSO, do nothing for non-LSO skbs. |
---|
682 | 724 | * Return error on packet header greater than maximum supported LSO header size. |
---|
683 | 725 | */ |
---|
684 | 726 | static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec, |
---|
685 | 727 | struct nfp_net_tx_buf *txbuf, |
---|
686 | | - struct nfp_net_tx_desc *txd, struct sk_buff *skb) |
---|
| 728 | + struct nfp_net_tx_desc *txd, struct sk_buff *skb, |
---|
| 729 | + u32 md_bytes) |
---|
687 | 730 | { |
---|
688 | | - u32 hdrlen; |
---|
| 731 | + u32 l3_offset, l4_offset, hdrlen; |
---|
689 | 732 | u16 mss; |
---|
690 | 733 | |
---|
691 | 734 | if (!skb_is_gso(skb)) |
---|
692 | 735 | return; |
---|
693 | 736 | |
---|
694 | 737 | if (!skb->encapsulation) { |
---|
695 | | - txd->l3_offset = skb_network_offset(skb); |
---|
696 | | - txd->l4_offset = skb_transport_offset(skb); |
---|
| 738 | + l3_offset = skb_network_offset(skb); |
---|
| 739 | + l4_offset = skb_transport_offset(skb); |
---|
697 | 740 | hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); |
---|
698 | 741 | } else { |
---|
699 | | - txd->l3_offset = skb_inner_network_offset(skb); |
---|
700 | | - txd->l4_offset = skb_inner_transport_offset(skb); |
---|
| 742 | + l3_offset = skb_inner_network_offset(skb); |
---|
| 743 | + l4_offset = skb_inner_transport_offset(skb); |
---|
701 | 744 | hdrlen = skb_inner_transport_header(skb) - skb->data + |
---|
702 | 745 | inner_tcp_hdrlen(skb); |
---|
703 | 746 | } |
---|
.. | .. |
---|
706 | 749 | txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1); |
---|
707 | 750 | |
---|
708 | 751 | mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK; |
---|
709 | | - txd->lso_hdrlen = hdrlen; |
---|
| 752 | + txd->l3_offset = l3_offset - md_bytes; |
---|
| 753 | + txd->l4_offset = l4_offset - md_bytes; |
---|
| 754 | + txd->lso_hdrlen = hdrlen - md_bytes; |
---|
710 | 755 | txd->mss = cpu_to_le16(mss); |
---|
711 | 756 | txd->flags |= PCIE_DESC_TX_LSO; |
---|
712 | 757 | |
---|
.. | .. |
---|
778 | 823 | u64_stats_update_end(&r_vec->tx_sync); |
---|
779 | 824 | } |
---|
780 | 825 | |
---|
| 826 | +static struct sk_buff * |
---|
| 827 | +nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, |
---|
| 828 | + struct sk_buff *skb, u64 *tls_handle, int *nr_frags) |
---|
| 829 | +{ |
---|
| 830 | +#ifdef CONFIG_TLS_DEVICE |
---|
| 831 | + struct nfp_net_tls_offload_ctx *ntls; |
---|
| 832 | + struct sk_buff *nskb; |
---|
| 833 | + bool resync_pending; |
---|
| 834 | + u32 datalen, seq; |
---|
| 835 | + |
---|
| 836 | + if (likely(!dp->ktls_tx)) |
---|
| 837 | + return skb; |
---|
| 838 | + if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)) |
---|
| 839 | + return skb; |
---|
| 840 | + |
---|
| 841 | + datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); |
---|
| 842 | + seq = ntohl(tcp_hdr(skb)->seq); |
---|
| 843 | + ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX); |
---|
| 844 | + resync_pending = tls_offload_tx_resync_pending(skb->sk); |
---|
| 845 | + if (unlikely(resync_pending || ntls->next_seq != seq)) { |
---|
| 846 | + /* Pure ACK out of order already */ |
---|
| 847 | + if (!datalen) |
---|
| 848 | + return skb; |
---|
| 849 | + |
---|
| 850 | + u64_stats_update_begin(&r_vec->tx_sync); |
---|
| 851 | + r_vec->tls_tx_fallback++; |
---|
| 852 | + u64_stats_update_end(&r_vec->tx_sync); |
---|
| 853 | + |
---|
| 854 | + nskb = tls_encrypt_skb(skb); |
---|
| 855 | + if (!nskb) { |
---|
| 856 | + u64_stats_update_begin(&r_vec->tx_sync); |
---|
| 857 | + r_vec->tls_tx_no_fallback++; |
---|
| 858 | + u64_stats_update_end(&r_vec->tx_sync); |
---|
| 859 | + return NULL; |
---|
| 860 | + } |
---|
| 861 | + /* encryption wasn't necessary */ |
---|
| 862 | + if (nskb == skb) |
---|
| 863 | + return skb; |
---|
| 864 | + /* we don't re-check ring space */ |
---|
| 865 | + if (unlikely(skb_is_nonlinear(nskb))) { |
---|
| 866 | + nn_dp_warn(dp, "tls_encrypt_skb() produced fragmented frame\n"); |
---|
| 867 | + u64_stats_update_begin(&r_vec->tx_sync); |
---|
| 868 | + r_vec->tx_errors++; |
---|
| 869 | + u64_stats_update_end(&r_vec->tx_sync); |
---|
| 870 | + dev_kfree_skb_any(nskb); |
---|
| 871 | + return NULL; |
---|
| 872 | + } |
---|
| 873 | + |
---|
| 874 | + /* jump forward, a TX may have gotten lost, need to sync TX */ |
---|
| 875 | + if (!resync_pending && seq - ntls->next_seq < U32_MAX / 4) |
---|
| 876 | + tls_offload_tx_resync_request(nskb->sk, seq, |
---|
| 877 | + ntls->next_seq); |
---|
| 878 | + |
---|
| 879 | + *nr_frags = 0; |
---|
| 880 | + return nskb; |
---|
| 881 | + } |
---|
| 882 | + |
---|
| 883 | + if (datalen) { |
---|
| 884 | + u64_stats_update_begin(&r_vec->tx_sync); |
---|
| 885 | + if (!skb_is_gso(skb)) |
---|
| 886 | + r_vec->hw_tls_tx++; |
---|
| 887 | + else |
---|
| 888 | + r_vec->hw_tls_tx += skb_shinfo(skb)->gso_segs; |
---|
| 889 | + u64_stats_update_end(&r_vec->tx_sync); |
---|
| 890 | + } |
---|
| 891 | + |
---|
| 892 | + memcpy(tls_handle, ntls->fw_handle, sizeof(ntls->fw_handle)); |
---|
| 893 | + ntls->next_seq += datalen; |
---|
| 894 | +#endif |
---|
| 895 | + return skb; |
---|
| 896 | +} |
---|
| 897 | + |
---|
| 898 | +static void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle) |
---|
| 899 | +{ |
---|
| 900 | +#ifdef CONFIG_TLS_DEVICE |
---|
| 901 | + struct nfp_net_tls_offload_ctx *ntls; |
---|
| 902 | + u32 datalen, seq; |
---|
| 903 | + |
---|
| 904 | + if (!tls_handle) |
---|
| 905 | + return; |
---|
| 906 | + if (WARN_ON_ONCE(!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))) |
---|
| 907 | + return; |
---|
| 908 | + |
---|
| 909 | + datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); |
---|
| 910 | + seq = ntohl(tcp_hdr(skb)->seq); |
---|
| 911 | + |
---|
| 912 | + ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX); |
---|
| 913 | + if (ntls->next_seq == seq + datalen) |
---|
| 914 | + ntls->next_seq = seq; |
---|
| 915 | + else |
---|
| 916 | + WARN_ON_ONCE(1); |
---|
| 917 | +#endif |
---|
| 918 | +} |
---|
| 919 | + |
---|
781 | 920 | static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring) |
---|
782 | 921 | { |
---|
783 | 922 | wmb(); |
---|
.. | .. |
---|
785 | 924 | tx_ring->wr_ptr_add = 0; |
---|
786 | 925 | } |
---|
787 | 926 | |
---|
788 | | -static int nfp_net_prep_port_id(struct sk_buff *skb) |
---|
| 927 | +static int nfp_net_prep_tx_meta(struct sk_buff *skb, u64 tls_handle) |
---|
789 | 928 | { |
---|
790 | 929 | struct metadata_dst *md_dst = skb_metadata_dst(skb); |
---|
791 | 930 | unsigned char *data; |
---|
| 931 | + u32 meta_id = 0; |
---|
| 932 | + int md_bytes; |
---|
792 | 933 | |
---|
793 | | - if (likely(!md_dst)) |
---|
| 934 | + if (likely(!md_dst && !tls_handle)) |
---|
794 | 935 | return 0; |
---|
795 | | - if (unlikely(md_dst->type != METADATA_HW_PORT_MUX)) |
---|
796 | | - return 0; |
---|
| 936 | + if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX)) { |
---|
| 937 | + if (!tls_handle) |
---|
| 938 | + return 0; |
---|
| 939 | + md_dst = NULL; |
---|
| 940 | + } |
---|
797 | 941 | |
---|
798 | | - if (unlikely(skb_cow_head(skb, 8))) |
---|
| 942 | + md_bytes = 4 + !!md_dst * 4 + !!tls_handle * 8; |
---|
| 943 | + |
---|
| 944 | + if (unlikely(skb_cow_head(skb, md_bytes))) |
---|
799 | 945 | return -ENOMEM; |
---|
800 | 946 | |
---|
801 | | - data = skb_push(skb, 8); |
---|
802 | | - put_unaligned_be32(NFP_NET_META_PORTID, data); |
---|
803 | | - put_unaligned_be32(md_dst->u.port_info.port_id, data + 4); |
---|
| 947 | + meta_id = 0; |
---|
| 948 | + data = skb_push(skb, md_bytes) + md_bytes; |
---|
| 949 | + if (md_dst) { |
---|
| 950 | + data -= 4; |
---|
| 951 | + put_unaligned_be32(md_dst->u.port_info.port_id, data); |
---|
| 952 | + meta_id = NFP_NET_META_PORTID; |
---|
| 953 | + } |
---|
| 954 | + if (tls_handle) { |
---|
| 955 | + /* conn handle is opaque, we just use u64 to be able to quickly |
---|
| 956 | + * compare it to zero |
---|
| 957 | + */ |
---|
| 958 | + data -= 8; |
---|
| 959 | + memcpy(data, &tls_handle, sizeof(tls_handle)); |
---|
| 960 | + meta_id <<= NFP_NET_META_FIELD_SIZE; |
---|
| 961 | + meta_id |= NFP_NET_META_CONN_HANDLE; |
---|
| 962 | + } |
---|
804 | 963 | |
---|
805 | | - return 8; |
---|
| 964 | + data -= 4; |
---|
| 965 | + put_unaligned_be32(meta_id, data); |
---|
| 966 | + |
---|
| 967 | + return md_bytes; |
---|
806 | 968 | } |
---|
807 | 969 | |
---|
808 | 970 | /** |
---|
.. | .. |
---|
812 | 974 | * |
---|
813 | 975 | * Return: NETDEV_TX_OK on success. |
---|
814 | 976 | */ |
---|
815 | | -static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) |
---|
| 977 | +static netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) |
---|
816 | 978 | { |
---|
817 | 979 | struct nfp_net *nn = netdev_priv(netdev); |
---|
818 | | - const struct skb_frag_struct *frag; |
---|
819 | | - struct nfp_net_tx_desc *txd, txdg; |
---|
| 980 | + const skb_frag_t *frag; |
---|
820 | 981 | int f, nr_frags, wr_idx, md_bytes; |
---|
821 | 982 | struct nfp_net_tx_ring *tx_ring; |
---|
822 | 983 | struct nfp_net_r_vector *r_vec; |
---|
823 | 984 | struct nfp_net_tx_buf *txbuf; |
---|
| 985 | + struct nfp_net_tx_desc *txd; |
---|
824 | 986 | struct netdev_queue *nd_q; |
---|
825 | 987 | struct nfp_net_dp *dp; |
---|
826 | 988 | dma_addr_t dma_addr; |
---|
827 | 989 | unsigned int fsize; |
---|
| 990 | + u64 tls_handle = 0; |
---|
828 | 991 | u16 qidx; |
---|
829 | 992 | |
---|
830 | 993 | dp = &nn->dp; |
---|
831 | 994 | qidx = skb_get_queue_mapping(skb); |
---|
832 | 995 | tx_ring = &dp->tx_rings[qidx]; |
---|
833 | 996 | r_vec = tx_ring->r_vec; |
---|
834 | | - nd_q = netdev_get_tx_queue(dp->netdev, qidx); |
---|
835 | 997 | |
---|
836 | 998 | nr_frags = skb_shinfo(skb)->nr_frags; |
---|
837 | 999 | |
---|
838 | 1000 | if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) { |
---|
839 | 1001 | nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n", |
---|
840 | 1002 | qidx, tx_ring->wr_p, tx_ring->rd_p); |
---|
| 1003 | + nd_q = netdev_get_tx_queue(dp->netdev, qidx); |
---|
841 | 1004 | netif_tx_stop_queue(nd_q); |
---|
842 | 1005 | nfp_net_tx_xmit_more_flush(tx_ring); |
---|
843 | 1006 | u64_stats_update_begin(&r_vec->tx_sync); |
---|
.. | .. |
---|
846 | 1009 | return NETDEV_TX_BUSY; |
---|
847 | 1010 | } |
---|
848 | 1011 | |
---|
849 | | - md_bytes = nfp_net_prep_port_id(skb); |
---|
850 | | - if (unlikely(md_bytes < 0)) { |
---|
| 1012 | + skb = nfp_net_tls_tx(dp, r_vec, skb, &tls_handle, &nr_frags); |
---|
| 1013 | + if (unlikely(!skb)) { |
---|
851 | 1014 | nfp_net_tx_xmit_more_flush(tx_ring); |
---|
852 | | - dev_kfree_skb_any(skb); |
---|
853 | 1015 | return NETDEV_TX_OK; |
---|
854 | 1016 | } |
---|
| 1017 | + |
---|
| 1018 | + md_bytes = nfp_net_prep_tx_meta(skb, tls_handle); |
---|
| 1019 | + if (unlikely(md_bytes < 0)) |
---|
| 1020 | + goto err_flush; |
---|
855 | 1021 | |
---|
856 | 1022 | /* Start with the head skbuf */ |
---|
857 | 1023 | dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb), |
---|
858 | 1024 | DMA_TO_DEVICE); |
---|
859 | 1025 | if (dma_mapping_error(dp->dev, dma_addr)) |
---|
860 | | - goto err_free; |
---|
| 1026 | + goto err_dma_err; |
---|
861 | 1027 | |
---|
862 | 1028 | wr_idx = D_IDX(tx_ring, tx_ring->wr_p); |
---|
863 | 1029 | |
---|
.. | .. |
---|
881 | 1047 | txd->lso_hdrlen = 0; |
---|
882 | 1048 | |
---|
883 | 1049 | /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */ |
---|
884 | | - nfp_net_tx_tso(r_vec, txbuf, txd, skb); |
---|
| 1050 | + nfp_net_tx_tso(r_vec, txbuf, txd, skb, md_bytes); |
---|
885 | 1051 | nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb); |
---|
886 | 1052 | if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) { |
---|
887 | 1053 | txd->flags |= PCIE_DESC_TX_VLAN; |
---|
.. | .. |
---|
890 | 1056 | |
---|
891 | 1057 | /* Gather DMA */ |
---|
892 | 1058 | if (nr_frags > 0) { |
---|
| 1059 | + __le64 second_half; |
---|
| 1060 | + |
---|
893 | 1061 | /* all descs must match except for in addr, length and eop */ |
---|
894 | | - txdg = *txd; |
---|
| 1062 | + second_half = txd->vals8[1]; |
---|
895 | 1063 | |
---|
896 | 1064 | for (f = 0; f < nr_frags; f++) { |
---|
897 | 1065 | frag = &skb_shinfo(skb)->frags[f]; |
---|
.. | .. |
---|
908 | 1076 | tx_ring->txbufs[wr_idx].fidx = f; |
---|
909 | 1077 | |
---|
910 | 1078 | txd = &tx_ring->txds[wr_idx]; |
---|
911 | | - *txd = txdg; |
---|
912 | 1079 | txd->dma_len = cpu_to_le16(fsize); |
---|
913 | 1080 | nfp_desc_set_dma_addr(txd, dma_addr); |
---|
914 | | - txd->offset_eop |= |
---|
915 | | - (f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0; |
---|
| 1081 | + txd->offset_eop = md_bytes | |
---|
| 1082 | + ((f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0); |
---|
| 1083 | + txd->vals8[1] = second_half; |
---|
916 | 1084 | } |
---|
917 | 1085 | |
---|
918 | 1086 | u64_stats_update_begin(&r_vec->tx_sync); |
---|
.. | .. |
---|
920 | 1088 | u64_stats_update_end(&r_vec->tx_sync); |
---|
921 | 1089 | } |
---|
922 | 1090 | |
---|
923 | | - netdev_tx_sent_queue(nd_q, txbuf->real_len); |
---|
924 | | - |
---|
925 | 1091 | skb_tx_timestamp(skb); |
---|
| 1092 | + |
---|
| 1093 | + nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx); |
---|
926 | 1094 | |
---|
927 | 1095 | tx_ring->wr_p += nr_frags + 1; |
---|
928 | 1096 | if (nfp_net_tx_ring_should_stop(tx_ring)) |
---|
929 | 1097 | nfp_net_tx_ring_stop(nd_q, tx_ring); |
---|
930 | 1098 | |
---|
931 | 1099 | tx_ring->wr_ptr_add += nr_frags + 1; |
---|
932 | | - if (!skb->xmit_more || netif_xmit_stopped(nd_q)) |
---|
| 1100 | + if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, netdev_xmit_more())) |
---|
933 | 1101 | nfp_net_tx_xmit_more_flush(tx_ring); |
---|
934 | 1102 | |
---|
935 | 1103 | return NETDEV_TX_OK; |
---|
.. | .. |
---|
951 | 1119 | tx_ring->txbufs[wr_idx].skb = NULL; |
---|
952 | 1120 | tx_ring->txbufs[wr_idx].dma_addr = 0; |
---|
953 | 1121 | tx_ring->txbufs[wr_idx].fidx = -2; |
---|
954 | | -err_free: |
---|
| 1122 | +err_dma_err: |
---|
955 | 1123 | nn_dp_warn(dp, "Failed to map DMA TX buffer\n"); |
---|
| 1124 | +err_flush: |
---|
956 | 1125 | nfp_net_tx_xmit_more_flush(tx_ring); |
---|
957 | 1126 | u64_stats_update_begin(&r_vec->tx_sync); |
---|
958 | 1127 | r_vec->tx_errors++; |
---|
959 | 1128 | u64_stats_update_end(&r_vec->tx_sync); |
---|
| 1129 | + nfp_net_tls_tx_undo(skb, tls_handle); |
---|
960 | 1130 | dev_kfree_skb_any(skb); |
---|
961 | 1131 | return NETDEV_TX_OK; |
---|
962 | 1132 | } |
---|
.. | .. |
---|
970 | 1140 | { |
---|
971 | 1141 | struct nfp_net_r_vector *r_vec = tx_ring->r_vec; |
---|
972 | 1142 | struct nfp_net_dp *dp = &r_vec->nfp_net->dp; |
---|
973 | | - const struct skb_frag_struct *frag; |
---|
974 | 1143 | struct netdev_queue *nd_q; |
---|
975 | 1144 | u32 done_pkts = 0, done_bytes = 0; |
---|
976 | | - struct sk_buff *skb; |
---|
977 | | - int todo, nr_frags; |
---|
978 | 1145 | u32 qcp_rd_p; |
---|
979 | | - int fidx; |
---|
980 | | - int idx; |
---|
| 1146 | + int todo; |
---|
981 | 1147 | |
---|
982 | 1148 | if (tx_ring->wr_p == tx_ring->rd_p) |
---|
983 | 1149 | return; |
---|
.. | .. |
---|
991 | 1157 | todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p); |
---|
992 | 1158 | |
---|
993 | 1159 | while (todo--) { |
---|
994 | | - idx = D_IDX(tx_ring, tx_ring->rd_p++); |
---|
| 1160 | + const skb_frag_t *frag; |
---|
| 1161 | + struct nfp_net_tx_buf *tx_buf; |
---|
| 1162 | + struct sk_buff *skb; |
---|
| 1163 | + int fidx, nr_frags; |
---|
| 1164 | + int idx; |
---|
995 | 1165 | |
---|
996 | | - skb = tx_ring->txbufs[idx].skb; |
---|
| 1166 | + idx = D_IDX(tx_ring, tx_ring->rd_p++); |
---|
| 1167 | + tx_buf = &tx_ring->txbufs[idx]; |
---|
| 1168 | + |
---|
| 1169 | + skb = tx_buf->skb; |
---|
997 | 1170 | if (!skb) |
---|
998 | 1171 | continue; |
---|
999 | 1172 | |
---|
1000 | 1173 | nr_frags = skb_shinfo(skb)->nr_frags; |
---|
1001 | | - fidx = tx_ring->txbufs[idx].fidx; |
---|
| 1174 | + fidx = tx_buf->fidx; |
---|
1002 | 1175 | |
---|
1003 | 1176 | if (fidx == -1) { |
---|
1004 | 1177 | /* unmap head */ |
---|
1005 | | - dma_unmap_single(dp->dev, tx_ring->txbufs[idx].dma_addr, |
---|
| 1178 | + dma_unmap_single(dp->dev, tx_buf->dma_addr, |
---|
1006 | 1179 | skb_headlen(skb), DMA_TO_DEVICE); |
---|
1007 | 1180 | |
---|
1008 | | - done_pkts += tx_ring->txbufs[idx].pkt_cnt; |
---|
1009 | | - done_bytes += tx_ring->txbufs[idx].real_len; |
---|
| 1181 | + done_pkts += tx_buf->pkt_cnt; |
---|
| 1182 | + done_bytes += tx_buf->real_len; |
---|
1010 | 1183 | } else { |
---|
1011 | 1184 | /* unmap fragment */ |
---|
1012 | 1185 | frag = &skb_shinfo(skb)->frags[fidx]; |
---|
1013 | | - dma_unmap_page(dp->dev, tx_ring->txbufs[idx].dma_addr, |
---|
| 1186 | + dma_unmap_page(dp->dev, tx_buf->dma_addr, |
---|
1014 | 1187 | skb_frag_size(frag), DMA_TO_DEVICE); |
---|
1015 | 1188 | } |
---|
1016 | 1189 | |
---|
.. | .. |
---|
1018 | 1191 | if (fidx == nr_frags - 1) |
---|
1019 | 1192 | napi_consume_skb(skb, budget); |
---|
1020 | 1193 | |
---|
1021 | | - tx_ring->txbufs[idx].dma_addr = 0; |
---|
1022 | | - tx_ring->txbufs[idx].skb = NULL; |
---|
1023 | | - tx_ring->txbufs[idx].fidx = -2; |
---|
| 1194 | + tx_buf->dma_addr = 0; |
---|
| 1195 | + tx_buf->skb = NULL; |
---|
| 1196 | + tx_buf->fidx = -2; |
---|
1024 | 1197 | } |
---|
1025 | 1198 | |
---|
1026 | 1199 | tx_ring->qcp_rd_p = qcp_rd_p; |
---|
.. | .. |
---|
1099 | 1272 | static void |
---|
1100 | 1273 | nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) |
---|
1101 | 1274 | { |
---|
1102 | | - const struct skb_frag_struct *frag; |
---|
| 1275 | + const skb_frag_t *frag; |
---|
1103 | 1276 | struct netdev_queue *nd_q; |
---|
1104 | 1277 | |
---|
1105 | 1278 | while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) { |
---|
.. | .. |
---|
1149 | 1322 | netdev_tx_reset_queue(nd_q); |
---|
1150 | 1323 | } |
---|
1151 | 1324 | |
---|
1152 | | -static void nfp_net_tx_timeout(struct net_device *netdev) |
---|
| 1325 | +static void nfp_net_tx_timeout(struct net_device *netdev, unsigned int txqueue) |
---|
1153 | 1326 | { |
---|
1154 | 1327 | struct nfp_net *nn = netdev_priv(netdev); |
---|
1155 | | - int i; |
---|
1156 | 1328 | |
---|
1157 | | - for (i = 0; i < nn->dp.netdev->real_num_tx_queues; i++) { |
---|
1158 | | - if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i))) |
---|
1159 | | - continue; |
---|
1160 | | - nn_warn(nn, "TX timeout on ring: %d\n", i); |
---|
1161 | | - } |
---|
1162 | | - nn_warn(nn, "TX watchdog timeout\n"); |
---|
| 1329 | + nn_warn(nn, "TX watchdog timeout on ring: %u\n", txqueue); |
---|
1163 | 1330 | } |
---|
1164 | 1331 | |
---|
1165 | 1332 | /* Receive processing |
---|
.. | .. |
---|
1495 | 1662 | &rx_hash->hash); |
---|
1496 | 1663 | } |
---|
1497 | 1664 | |
---|
1498 | | -static void * |
---|
| 1665 | +static bool |
---|
1499 | 1666 | nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta, |
---|
1500 | | - void *data, int meta_len) |
---|
| 1667 | + void *data, void *pkt, unsigned int pkt_len, int meta_len) |
---|
1501 | 1668 | { |
---|
1502 | 1669 | u32 meta_info; |
---|
1503 | 1670 | |
---|
.. | .. |
---|
1527 | 1694 | (__force __wsum)__get_unaligned_cpu32(data); |
---|
1528 | 1695 | data += 4; |
---|
1529 | 1696 | break; |
---|
| 1697 | + case NFP_NET_META_RESYNC_INFO: |
---|
| 1698 | + if (nfp_net_tls_rx_resync_req(netdev, data, pkt, |
---|
| 1699 | + pkt_len)) |
---|
| 1700 | + return false; |
---|
| 1701 | + data += sizeof(struct nfp_net_tls_resync_req); |
---|
| 1702 | + break; |
---|
1530 | 1703 | default: |
---|
1531 | | - return NULL; |
---|
| 1704 | + return true; |
---|
1532 | 1705 | } |
---|
1533 | 1706 | |
---|
1534 | 1707 | meta_info >>= NFP_NET_META_FIELD_SIZE; |
---|
1535 | 1708 | } |
---|
1536 | 1709 | |
---|
1537 | | - return data; |
---|
| 1710 | + return data != pkt; |
---|
1538 | 1711 | } |
---|
1539 | 1712 | |
---|
1540 | 1713 | static void |
---|
.. | .. |
---|
1568 | 1741 | struct nfp_net_rx_buf *rxbuf, unsigned int dma_off, |
---|
1569 | 1742 | unsigned int pkt_len, bool *completed) |
---|
1570 | 1743 | { |
---|
| 1744 | + unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA; |
---|
1571 | 1745 | struct nfp_net_tx_buf *txbuf; |
---|
1572 | 1746 | struct nfp_net_tx_desc *txd; |
---|
1573 | 1747 | int wr_idx; |
---|
| 1748 | + |
---|
| 1749 | + /* Reject if xdp_adjust_tail grow packet beyond DMA area */ |
---|
| 1750 | + if (pkt_len + dma_off > dma_map_sz) |
---|
| 1751 | + return false; |
---|
1574 | 1752 | |
---|
1575 | 1753 | if (unlikely(nfp_net_tx_full(tx_ring, 1))) { |
---|
1576 | 1754 | if (!*completed) { |
---|
.. | .. |
---|
1644 | 1822 | rcu_read_lock(); |
---|
1645 | 1823 | xdp_prog = READ_ONCE(dp->xdp_prog); |
---|
1646 | 1824 | true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz; |
---|
| 1825 | + xdp.frame_sz = PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM; |
---|
1647 | 1826 | xdp.rxq = &rx_ring->xdp_rxq; |
---|
1648 | 1827 | tx_ring = r_vec->xdp_ring; |
---|
1649 | 1828 | |
---|
.. | .. |
---|
1652 | 1831 | struct nfp_net_rx_buf *rxbuf; |
---|
1653 | 1832 | struct nfp_net_rx_desc *rxd; |
---|
1654 | 1833 | struct nfp_meta_parsed meta; |
---|
| 1834 | + bool redir_egress = false; |
---|
1655 | 1835 | struct net_device *netdev; |
---|
1656 | 1836 | dma_addr_t new_dma_addr; |
---|
1657 | 1837 | u32 meta_len_xdp = 0; |
---|
.. | .. |
---|
1718 | 1898 | nfp_net_set_hash_desc(dp->netdev, &meta, |
---|
1719 | 1899 | rxbuf->frag + meta_off, rxd); |
---|
1720 | 1900 | } else if (meta_len) { |
---|
1721 | | - void *end; |
---|
1722 | | - |
---|
1723 | | - end = nfp_net_parse_meta(dp->netdev, &meta, |
---|
1724 | | - rxbuf->frag + meta_off, |
---|
1725 | | - meta_len); |
---|
1726 | | - if (unlikely(end != rxbuf->frag + pkt_off)) { |
---|
| 1901 | + if (unlikely(nfp_net_parse_meta(dp->netdev, &meta, |
---|
| 1902 | + rxbuf->frag + meta_off, |
---|
| 1903 | + rxbuf->frag + pkt_off, |
---|
| 1904 | + pkt_len, meta_len))) { |
---|
1727 | 1905 | nn_dp_warn(dp, "invalid RX packet metadata\n"); |
---|
1728 | 1906 | nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, |
---|
1729 | 1907 | NULL); |
---|
.. | .. |
---|
1762 | 1940 | continue; |
---|
1763 | 1941 | default: |
---|
1764 | 1942 | bpf_warn_invalid_xdp_action(act); |
---|
1765 | | - /* fall through */ |
---|
| 1943 | + fallthrough; |
---|
1766 | 1944 | case XDP_ABORTED: |
---|
1767 | 1945 | trace_xdp_exception(dp->netdev, xdp_prog, act); |
---|
1768 | | - /* fall through */ |
---|
| 1946 | + fallthrough; |
---|
1769 | 1947 | case XDP_DROP: |
---|
1770 | 1948 | nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, |
---|
1771 | 1949 | rxbuf->dma_addr); |
---|
.. | .. |
---|
1787 | 1965 | struct nfp_net *nn; |
---|
1788 | 1966 | |
---|
1789 | 1967 | nn = netdev_priv(dp->netdev); |
---|
1790 | | - netdev = nfp_app_repr_get(nn->app, meta.portid); |
---|
| 1968 | + netdev = nfp_app_dev_get(nn->app, meta.portid, |
---|
| 1969 | + &redir_egress); |
---|
1791 | 1970 | if (unlikely(!netdev)) { |
---|
1792 | 1971 | nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, |
---|
1793 | 1972 | NULL); |
---|
1794 | 1973 | continue; |
---|
1795 | 1974 | } |
---|
1796 | | - nfp_repr_inc_rx_stats(netdev, pkt_len); |
---|
| 1975 | + |
---|
| 1976 | + if (nfp_netdev_is_nfp_repr(netdev)) |
---|
| 1977 | + nfp_repr_inc_rx_stats(netdev, pkt_len); |
---|
1797 | 1978 | } |
---|
1798 | 1979 | |
---|
1799 | 1980 | skb = build_skb(rxbuf->frag, true_bufsz); |
---|
.. | .. |
---|
1822 | 2003 | |
---|
1823 | 2004 | nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb); |
---|
1824 | 2005 | |
---|
| 2006 | +#ifdef CONFIG_TLS_DEVICE |
---|
| 2007 | + if (rxd->rxd.flags & PCIE_DESC_RX_DECRYPTED) { |
---|
| 2008 | + skb->decrypted = true; |
---|
| 2009 | + u64_stats_update_begin(&r_vec->rx_sync); |
---|
| 2010 | + r_vec->hw_tls_rx++; |
---|
| 2011 | + u64_stats_update_end(&r_vec->rx_sync); |
---|
| 2012 | + } |
---|
| 2013 | +#endif |
---|
| 2014 | + |
---|
1825 | 2015 | if (rxd->rxd.flags & PCIE_DESC_RX_VLAN) |
---|
1826 | 2016 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), |
---|
1827 | 2017 | le16_to_cpu(rxd->rxd.vlan)); |
---|
1828 | 2018 | if (meta_len_xdp) |
---|
1829 | 2019 | skb_metadata_set(skb, meta_len_xdp); |
---|
1830 | 2020 | |
---|
1831 | | - napi_gro_receive(&rx_ring->r_vec->napi, skb); |
---|
| 2021 | + if (likely(!redir_egress)) { |
---|
| 2022 | + napi_gro_receive(&rx_ring->r_vec->napi, skb); |
---|
| 2023 | + } else { |
---|
| 2024 | + skb->dev = netdev; |
---|
| 2025 | + skb_reset_network_header(skb); |
---|
| 2026 | + __skb_push(skb, ETH_HLEN); |
---|
| 2027 | + dev_queue_xmit(skb); |
---|
| 2028 | + } |
---|
1832 | 2029 | } |
---|
1833 | 2030 | |
---|
1834 | 2031 | if (xdp_prog) { |
---|
.. | .. |
---|
2090 | 2287 | return budget; |
---|
2091 | 2288 | } |
---|
2092 | 2289 | |
---|
2093 | | -static void nfp_ctrl_poll(unsigned long arg) |
---|
| 2290 | +static void nfp_ctrl_poll(struct tasklet_struct *t) |
---|
2094 | 2291 | { |
---|
2095 | | - struct nfp_net_r_vector *r_vec = (void *)arg; |
---|
| 2292 | + struct nfp_net_r_vector *r_vec = from_tasklet(r_vec, t, tasklet); |
---|
2096 | 2293 | |
---|
2097 | | - spin_lock_bh(&r_vec->lock); |
---|
| 2294 | + spin_lock(&r_vec->lock); |
---|
2098 | 2295 | nfp_net_tx_complete(r_vec->tx_ring, 0); |
---|
2099 | 2296 | __nfp_ctrl_tx_queued(r_vec); |
---|
2100 | | - spin_unlock_bh(&r_vec->lock); |
---|
| 2297 | + spin_unlock(&r_vec->lock); |
---|
2101 | 2298 | |
---|
2102 | 2299 | if (nfp_ctrl_rx(r_vec)) { |
---|
2103 | 2300 | nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); |
---|
.. | .. |
---|
2140 | 2337 | |
---|
2141 | 2338 | __skb_queue_head_init(&r_vec->queue); |
---|
2142 | 2339 | spin_lock_init(&r_vec->lock); |
---|
2143 | | - tasklet_init(&r_vec->tasklet, nfp_ctrl_poll, |
---|
2144 | | - (unsigned long)r_vec); |
---|
| 2340 | + tasklet_setup(&r_vec->tasklet, nfp_ctrl_poll); |
---|
2145 | 2341 | tasklet_disable(&r_vec->tasklet); |
---|
2146 | 2342 | } |
---|
2147 | 2343 | |
---|
.. | .. |
---|
2186 | 2382 | tx_ring->cnt = dp->txd_cnt; |
---|
2187 | 2383 | |
---|
2188 | 2384 | tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds)); |
---|
2189 | | - tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size, |
---|
2190 | | - &tx_ring->dma, |
---|
2191 | | - GFP_KERNEL | __GFP_NOWARN); |
---|
| 2385 | + tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size, |
---|
| 2386 | + &tx_ring->dma, |
---|
| 2387 | + GFP_KERNEL | __GFP_NOWARN); |
---|
2192 | 2388 | if (!tx_ring->txds) { |
---|
2193 | 2389 | netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", |
---|
2194 | 2390 | tx_ring->cnt); |
---|
.. | .. |
---|
2344 | 2540 | |
---|
2345 | 2541 | rx_ring->cnt = dp->rxd_cnt; |
---|
2346 | 2542 | rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds)); |
---|
2347 | | - rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size, |
---|
2348 | | - &rx_ring->dma, |
---|
2349 | | - GFP_KERNEL | __GFP_NOWARN); |
---|
| 2543 | + rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size, |
---|
| 2544 | + &rx_ring->dma, |
---|
| 2545 | + GFP_KERNEL | __GFP_NOWARN); |
---|
2350 | 2546 | if (!rx_ring->rxds) { |
---|
2351 | 2547 | netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", |
---|
2352 | 2548 | rx_ring->cnt); |
---|
.. | .. |
---|
2669 | 2865 | |
---|
2670 | 2866 | for (r = 0; r < nn->dp.num_rx_rings; r++) |
---|
2671 | 2867 | nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]); |
---|
2672 | | - |
---|
2673 | | - /* Since reconfiguration requests while NFP is down are ignored we |
---|
2674 | | - * have to wipe the entire VXLAN configuration and reinitialize it. |
---|
2675 | | - */ |
---|
2676 | | - if (nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN) { |
---|
2677 | | - memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports)); |
---|
2678 | | - memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt)); |
---|
2679 | | - udp_tunnel_get_rx_info(nn->dp.netdev); |
---|
2680 | | - } |
---|
2681 | 2868 | |
---|
2682 | 2869 | return 0; |
---|
2683 | 2870 | } |
---|
.. | .. |
---|
3128 | 3315 | static int |
---|
3129 | 3316 | nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) |
---|
3130 | 3317 | { |
---|
| 3318 | + const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD; |
---|
3131 | 3319 | struct nfp_net *nn = netdev_priv(netdev); |
---|
| 3320 | + int err; |
---|
3132 | 3321 | |
---|
3133 | 3322 | /* Priority tagged packets with vlan id 0 are processed by the |
---|
3134 | 3323 | * NFP as untagged packets |
---|
.. | .. |
---|
3136 | 3325 | if (!vid) |
---|
3137 | 3326 | return 0; |
---|
3138 | 3327 | |
---|
| 3328 | + err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ); |
---|
| 3329 | + if (err) |
---|
| 3330 | + return err; |
---|
| 3331 | + |
---|
3139 | 3332 | nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid); |
---|
3140 | 3333 | nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO, |
---|
3141 | 3334 | ETH_P_8021Q); |
---|
3142 | 3335 | |
---|
3143 | | - return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD); |
---|
| 3336 | + return nfp_net_mbox_reconfig_and_unlock(nn, cmd); |
---|
3144 | 3337 | } |
---|
3145 | 3338 | |
---|
3146 | 3339 | static int |
---|
3147 | 3340 | nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) |
---|
3148 | 3341 | { |
---|
| 3342 | + const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL; |
---|
3149 | 3343 | struct nfp_net *nn = netdev_priv(netdev); |
---|
| 3344 | + int err; |
---|
3150 | 3345 | |
---|
3151 | 3346 | /* Priority tagged packets with vlan id 0 are processed by the |
---|
3152 | 3347 | * NFP as untagged packets |
---|
.. | .. |
---|
3154 | 3349 | if (!vid) |
---|
3155 | 3350 | return 0; |
---|
3156 | 3351 | |
---|
| 3352 | + err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ); |
---|
| 3353 | + if (err) |
---|
| 3354 | + return err; |
---|
| 3355 | + |
---|
3157 | 3356 | nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid); |
---|
3158 | 3357 | nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO, |
---|
3159 | 3358 | ETH_P_8021Q); |
---|
3160 | 3359 | |
---|
3161 | | - return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL); |
---|
| 3360 | + return nfp_net_mbox_reconfig_and_unlock(nn, cmd); |
---|
3162 | 3361 | } |
---|
3163 | 3362 | |
---|
3164 | 3363 | static void nfp_net_stat64(struct net_device *netdev, |
---|
.. | .. |
---|
3167 | 3366 | struct nfp_net *nn = netdev_priv(netdev); |
---|
3168 | 3367 | int r; |
---|
3169 | 3368 | |
---|
| 3369 | + /* Collect software stats */ |
---|
3170 | 3370 | for (r = 0; r < nn->max_r_vecs; r++) { |
---|
3171 | 3371 | struct nfp_net_r_vector *r_vec = &nn->r_vecs[r]; |
---|
3172 | 3372 | u64 data[3]; |
---|
3173 | 3373 | unsigned int start; |
---|
3174 | 3374 | |
---|
3175 | 3375 | do { |
---|
3176 | | - start = u64_stats_fetch_begin(&r_vec->rx_sync); |
---|
| 3376 | + start = u64_stats_fetch_begin_irq(&r_vec->rx_sync); |
---|
3177 | 3377 | data[0] = r_vec->rx_pkts; |
---|
3178 | 3378 | data[1] = r_vec->rx_bytes; |
---|
3179 | 3379 | data[2] = r_vec->rx_drops; |
---|
3180 | | - } while (u64_stats_fetch_retry(&r_vec->rx_sync, start)); |
---|
| 3380 | + } while (u64_stats_fetch_retry_irq(&r_vec->rx_sync, start)); |
---|
3181 | 3381 | stats->rx_packets += data[0]; |
---|
3182 | 3382 | stats->rx_bytes += data[1]; |
---|
3183 | 3383 | stats->rx_dropped += data[2]; |
---|
3184 | 3384 | |
---|
3185 | 3385 | do { |
---|
3186 | | - start = u64_stats_fetch_begin(&r_vec->tx_sync); |
---|
| 3386 | + start = u64_stats_fetch_begin_irq(&r_vec->tx_sync); |
---|
3187 | 3387 | data[0] = r_vec->tx_pkts; |
---|
3188 | 3388 | data[1] = r_vec->tx_bytes; |
---|
3189 | 3389 | data[2] = r_vec->tx_errors; |
---|
3190 | | - } while (u64_stats_fetch_retry(&r_vec->tx_sync, start)); |
---|
| 3390 | + } while (u64_stats_fetch_retry_irq(&r_vec->tx_sync, start)); |
---|
3191 | 3391 | stats->tx_packets += data[0]; |
---|
3192 | 3392 | stats->tx_bytes += data[1]; |
---|
3193 | 3393 | stats->tx_errors += data[2]; |
---|
3194 | 3394 | } |
---|
| 3395 | + |
---|
| 3396 | + /* Add in device stats */ |
---|
| 3397 | + stats->multicast += nn_readq(nn, NFP_NET_CFG_STATS_RX_MC_FRAMES); |
---|
| 3398 | + stats->rx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_RX_DISCARDS); |
---|
| 3399 | + stats->rx_errors += nn_readq(nn, NFP_NET_CFG_STATS_RX_ERRORS); |
---|
| 3400 | + |
---|
| 3401 | + stats->tx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_TX_DISCARDS); |
---|
| 3402 | + stats->tx_errors += nn_readq(nn, NFP_NET_CFG_STATS_TX_ERRORS); |
---|
3195 | 3403 | } |
---|
3196 | 3404 | |
---|
3197 | 3405 | static int nfp_net_set_features(struct net_device *netdev, |
---|
.. | .. |
---|
3296 | 3504 | hdrlen = skb_inner_transport_header(skb) - skb->data + |
---|
3297 | 3505 | inner_tcp_hdrlen(skb); |
---|
3298 | 3506 | |
---|
3299 | | - if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ)) |
---|
| 3507 | + /* Assume worst case scenario of having longest possible |
---|
| 3508 | + * metadata prepend - 8B |
---|
| 3509 | + */ |
---|
| 3510 | + if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ - 8)) |
---|
3300 | 3511 | features &= ~NETIF_F_GSO_MASK; |
---|
3301 | 3512 | } |
---|
3302 | 3513 | |
---|
.. | .. |
---|
3329 | 3540 | struct nfp_net *nn = netdev_priv(netdev); |
---|
3330 | 3541 | int n; |
---|
3331 | 3542 | |
---|
| 3543 | + /* If port is defined, devlink_port is registered and devlink core |
---|
| 3544 | + * is taking care of name formatting. |
---|
| 3545 | + */ |
---|
3332 | 3546 | if (nn->port) |
---|
3333 | | - return nfp_port_get_phys_port_name(netdev, name, len); |
---|
| 3547 | + return -EOPNOTSUPP; |
---|
3334 | 3548 | |
---|
3335 | 3549 | if (nn->dp.is_vf || nn->vnic_no_name) |
---|
3336 | 3550 | return -EOPNOTSUPP; |
---|
.. | .. |
---|
3342 | 3556 | return 0; |
---|
3343 | 3557 | } |
---|
3344 | 3558 | |
---|
3345 | | -/** |
---|
3346 | | - * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW |
---|
3347 | | - * @nn: NFP Net device to reconfigure |
---|
3348 | | - * @idx: Index into the port table where new port should be written |
---|
3349 | | - * @port: UDP port to configure (pass zero to remove VXLAN port) |
---|
3350 | | - */ |
---|
3351 | | -static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port) |
---|
3352 | | -{ |
---|
3353 | | - int i; |
---|
3354 | | - |
---|
3355 | | - nn->vxlan_ports[idx] = port; |
---|
3356 | | - |
---|
3357 | | - if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN)) |
---|
3358 | | - return; |
---|
3359 | | - |
---|
3360 | | - BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1); |
---|
3361 | | - for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2) |
---|
3362 | | - nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port), |
---|
3363 | | - be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 | |
---|
3364 | | - be16_to_cpu(nn->vxlan_ports[i])); |
---|
3365 | | - |
---|
3366 | | - nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_VXLAN); |
---|
3367 | | -} |
---|
3368 | | - |
---|
3369 | | -/** |
---|
3370 | | - * nfp_net_find_vxlan_idx() - find table entry of the port or a free one |
---|
3371 | | - * @nn: NFP Network structure |
---|
3372 | | - * @port: UDP port to look for |
---|
3373 | | - * |
---|
3374 | | - * Return: if the port is already in the table -- it's position; |
---|
3375 | | - * if the port is not in the table -- free position to use; |
---|
3376 | | - * if the table is full -- -ENOSPC. |
---|
3377 | | - */ |
---|
3378 | | -static int nfp_net_find_vxlan_idx(struct nfp_net *nn, __be16 port) |
---|
3379 | | -{ |
---|
3380 | | - int i, free_idx = -ENOSPC; |
---|
3381 | | - |
---|
3382 | | - for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) { |
---|
3383 | | - if (nn->vxlan_ports[i] == port) |
---|
3384 | | - return i; |
---|
3385 | | - if (!nn->vxlan_usecnt[i]) |
---|
3386 | | - free_idx = i; |
---|
3387 | | - } |
---|
3388 | | - |
---|
3389 | | - return free_idx; |
---|
3390 | | -} |
---|
3391 | | - |
---|
3392 | | -static void nfp_net_add_vxlan_port(struct net_device *netdev, |
---|
3393 | | - struct udp_tunnel_info *ti) |
---|
3394 | | -{ |
---|
3395 | | - struct nfp_net *nn = netdev_priv(netdev); |
---|
3396 | | - int idx; |
---|
3397 | | - |
---|
3398 | | - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) |
---|
3399 | | - return; |
---|
3400 | | - |
---|
3401 | | - idx = nfp_net_find_vxlan_idx(nn, ti->port); |
---|
3402 | | - if (idx == -ENOSPC) |
---|
3403 | | - return; |
---|
3404 | | - |
---|
3405 | | - if (!nn->vxlan_usecnt[idx]++) |
---|
3406 | | - nfp_net_set_vxlan_port(nn, idx, ti->port); |
---|
3407 | | -} |
---|
3408 | | - |
---|
3409 | | -static void nfp_net_del_vxlan_port(struct net_device *netdev, |
---|
3410 | | - struct udp_tunnel_info *ti) |
---|
3411 | | -{ |
---|
3412 | | - struct nfp_net *nn = netdev_priv(netdev); |
---|
3413 | | - int idx; |
---|
3414 | | - |
---|
3415 | | - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) |
---|
3416 | | - return; |
---|
3417 | | - |
---|
3418 | | - idx = nfp_net_find_vxlan_idx(nn, ti->port); |
---|
3419 | | - if (idx == -ENOSPC || !nn->vxlan_usecnt[idx]) |
---|
3420 | | - return; |
---|
3421 | | - |
---|
3422 | | - if (!--nn->vxlan_usecnt[idx]) |
---|
3423 | | - nfp_net_set_vxlan_port(nn, idx, 0); |
---|
3424 | | -} |
---|
3425 | | - |
---|
3426 | 3559 | static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf) |
---|
3427 | 3560 | { |
---|
3428 | 3561 | struct bpf_prog *prog = bpf->prog; |
---|
3429 | 3562 | struct nfp_net_dp *dp; |
---|
3430 | 3563 | int err; |
---|
3431 | | - |
---|
3432 | | - if (!xdp_attachment_flags_ok(&nn->xdp, bpf)) |
---|
3433 | | - return -EBUSY; |
---|
3434 | 3564 | |
---|
3435 | 3565 | if (!prog == !nn->dp.xdp_prog) { |
---|
3436 | 3566 | WRITE_ONCE(nn->dp.xdp_prog, prog); |
---|
.. | .. |
---|
3460 | 3590 | { |
---|
3461 | 3591 | int err; |
---|
3462 | 3592 | |
---|
3463 | | - if (!xdp_attachment_flags_ok(&nn->xdp_hw, bpf)) |
---|
3464 | | - return -EBUSY; |
---|
3465 | | - |
---|
3466 | 3593 | err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack); |
---|
3467 | 3594 | if (err) |
---|
3468 | 3595 | return err; |
---|
.. | .. |
---|
3480 | 3607 | return nfp_net_xdp_setup_drv(nn, xdp); |
---|
3481 | 3608 | case XDP_SETUP_PROG_HW: |
---|
3482 | 3609 | return nfp_net_xdp_setup_hw(nn, xdp); |
---|
3483 | | - case XDP_QUERY_PROG: |
---|
3484 | | - return xdp_attachment_query(&nn->xdp, xdp); |
---|
3485 | | - case XDP_QUERY_PROG_HW: |
---|
3486 | | - return xdp_attachment_query(&nn->xdp_hw, xdp); |
---|
3487 | 3610 | default: |
---|
3488 | 3611 | return nfp_app_bpf(nn->app, nn, xdp); |
---|
3489 | 3612 | } |
---|
.. | .. |
---|
3522 | 3645 | .ndo_set_vf_mac = nfp_app_set_vf_mac, |
---|
3523 | 3646 | .ndo_set_vf_vlan = nfp_app_set_vf_vlan, |
---|
3524 | 3647 | .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, |
---|
| 3648 | + .ndo_set_vf_trust = nfp_app_set_vf_trust, |
---|
3525 | 3649 | .ndo_get_vf_config = nfp_app_get_vf_config, |
---|
3526 | 3650 | .ndo_set_vf_link_state = nfp_app_set_vf_link_state, |
---|
3527 | 3651 | .ndo_setup_tc = nfp_port_setup_tc, |
---|
.. | .. |
---|
3532 | 3656 | .ndo_set_features = nfp_net_set_features, |
---|
3533 | 3657 | .ndo_features_check = nfp_net_features_check, |
---|
3534 | 3658 | .ndo_get_phys_port_name = nfp_net_get_phys_port_name, |
---|
3535 | | - .ndo_udp_tunnel_add = nfp_net_add_vxlan_port, |
---|
3536 | | - .ndo_udp_tunnel_del = nfp_net_del_vxlan_port, |
---|
| 3659 | + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, |
---|
| 3660 | + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, |
---|
3537 | 3661 | .ndo_bpf = nfp_net_xdp, |
---|
| 3662 | + .ndo_get_devlink_port = nfp_devlink_get_devlink_port, |
---|
| 3663 | +}; |
---|
| 3664 | + |
---|
| 3665 | +static int nfp_udp_tunnel_sync(struct net_device *netdev, unsigned int table) |
---|
| 3666 | +{ |
---|
| 3667 | + struct nfp_net *nn = netdev_priv(netdev); |
---|
| 3668 | + int i; |
---|
| 3669 | + |
---|
| 3670 | + BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1); |
---|
| 3671 | + for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2) { |
---|
| 3672 | + struct udp_tunnel_info ti0, ti1; |
---|
| 3673 | + |
---|
| 3674 | + udp_tunnel_nic_get_port(netdev, table, i, &ti0); |
---|
| 3675 | + udp_tunnel_nic_get_port(netdev, table, i + 1, &ti1); |
---|
| 3676 | + |
---|
| 3677 | + nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(ti0.port), |
---|
| 3678 | + be16_to_cpu(ti1.port) << 16 | be16_to_cpu(ti0.port)); |
---|
| 3679 | + } |
---|
| 3680 | + |
---|
| 3681 | + return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_VXLAN); |
---|
| 3682 | +} |
---|
| 3683 | + |
---|
| 3684 | +static const struct udp_tunnel_nic_info nfp_udp_tunnels = { |
---|
| 3685 | + .sync_table = nfp_udp_tunnel_sync, |
---|
| 3686 | + .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | |
---|
| 3687 | + UDP_TUNNEL_NIC_INFO_OPEN_ONLY, |
---|
| 3688 | + .tables = { |
---|
| 3689 | + { |
---|
| 3690 | + .n_entries = NFP_NET_N_VXLAN_PORTS, |
---|
| 3691 | + .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, |
---|
| 3692 | + }, |
---|
| 3693 | + }, |
---|
3538 | 3694 | }; |
---|
3539 | 3695 | |
---|
3540 | 3696 | /** |
---|
.. | .. |
---|
3551 | 3707 | nn->fw_ver.resv, nn->fw_ver.class, |
---|
3552 | 3708 | nn->fw_ver.major, nn->fw_ver.minor, |
---|
3553 | 3709 | nn->max_mtu); |
---|
3554 | | - nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", |
---|
| 3710 | + nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", |
---|
3555 | 3711 | nn->cap, |
---|
3556 | 3712 | nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "", |
---|
3557 | 3713 | nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "", |
---|
.. | .. |
---|
3567 | 3723 | nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS1 " : "", |
---|
3568 | 3724 | nn->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSS2 " : "", |
---|
3569 | 3725 | nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "", |
---|
3570 | | - nn->cap & NFP_NET_CFG_CTRL_L2SWITCH ? "L2SWITCH " : "", |
---|
3571 | 3726 | nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "", |
---|
3572 | 3727 | nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "", |
---|
3573 | 3728 | nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "", |
---|
.. | .. |
---|
3581 | 3736 | /** |
---|
3582 | 3737 | * nfp_net_alloc() - Allocate netdev and related structure |
---|
3583 | 3738 | * @pdev: PCI device |
---|
| 3739 | + * @ctrl_bar: PCI IOMEM with vNIC config memory |
---|
3584 | 3740 | * @needs_netdev: Whether to allocate a netdev for this vNIC |
---|
3585 | 3741 | * @max_tx_rings: Maximum number of TX rings supported by device |
---|
3586 | 3742 | * @max_rx_rings: Maximum number of RX rings supported by device |
---|
.. | .. |
---|
3591 | 3747 | * |
---|
3592 | 3748 | * Return: NFP Net device structure, or ERR_PTR on error. |
---|
3593 | 3749 | */ |
---|
3594 | | -struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev, |
---|
3595 | | - unsigned int max_tx_rings, |
---|
3596 | | - unsigned int max_rx_rings) |
---|
| 3750 | +struct nfp_net * |
---|
| 3751 | +nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev, |
---|
| 3752 | + unsigned int max_tx_rings, unsigned int max_rx_rings) |
---|
3597 | 3753 | { |
---|
3598 | 3754 | struct nfp_net *nn; |
---|
| 3755 | + int err; |
---|
3599 | 3756 | |
---|
3600 | 3757 | if (needs_netdev) { |
---|
3601 | 3758 | struct net_device *netdev; |
---|
.. | .. |
---|
3615 | 3772 | } |
---|
3616 | 3773 | |
---|
3617 | 3774 | nn->dp.dev = &pdev->dev; |
---|
| 3775 | + nn->dp.ctrl_bar = ctrl_bar; |
---|
3618 | 3776 | nn->pdev = pdev; |
---|
3619 | 3777 | |
---|
3620 | 3778 | nn->max_tx_rings = max_tx_rings; |
---|
.. | .. |
---|
3632 | 3790 | nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT; |
---|
3633 | 3791 | nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT; |
---|
3634 | 3792 | |
---|
| 3793 | + sema_init(&nn->bar_lock, 1); |
---|
| 3794 | + |
---|
3635 | 3795 | spin_lock_init(&nn->reconfig_lock); |
---|
3636 | 3796 | spin_lock_init(&nn->link_status_lock); |
---|
3637 | 3797 | |
---|
3638 | 3798 | timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0); |
---|
3639 | 3799 | |
---|
| 3800 | + err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar, |
---|
| 3801 | + &nn->tlv_caps); |
---|
| 3802 | + if (err) |
---|
| 3803 | + goto err_free_nn; |
---|
| 3804 | + |
---|
| 3805 | + err = nfp_ccm_mbox_alloc(nn); |
---|
| 3806 | + if (err) |
---|
| 3807 | + goto err_free_nn; |
---|
| 3808 | + |
---|
3640 | 3809 | return nn; |
---|
| 3810 | + |
---|
| 3811 | +err_free_nn: |
---|
| 3812 | + if (nn->dp.netdev) |
---|
| 3813 | + free_netdev(nn->dp.netdev); |
---|
| 3814 | + else |
---|
| 3815 | + vfree(nn); |
---|
| 3816 | + return ERR_PTR(err); |
---|
3641 | 3817 | } |
---|
3642 | 3818 | |
---|
3643 | 3819 | /** |
---|
.. | .. |
---|
3647 | 3823 | void nfp_net_free(struct nfp_net *nn) |
---|
3648 | 3824 | { |
---|
3649 | 3825 | WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted); |
---|
| 3826 | + nfp_ccm_mbox_free(nn); |
---|
| 3827 | + |
---|
3650 | 3828 | if (nn->dp.netdev) |
---|
3651 | 3829 | free_netdev(nn->dp.netdev); |
---|
3652 | 3830 | else |
---|
.. | .. |
---|
3759 | 3937 | } |
---|
3760 | 3938 | if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) |
---|
3761 | 3939 | netdev->hw_features |= NETIF_F_RXHASH; |
---|
3762 | | - if (nn->cap & NFP_NET_CFG_CTRL_VXLAN && |
---|
3763 | | - nn->cap & NFP_NET_CFG_CTRL_NVGRE) { |
---|
| 3940 | + if (nn->cap & NFP_NET_CFG_CTRL_VXLAN) { |
---|
3764 | 3941 | if (nn->cap & NFP_NET_CFG_CTRL_LSO) |
---|
3765 | | - netdev->hw_features |= NETIF_F_GSO_GRE | |
---|
3766 | | - NETIF_F_GSO_UDP_TUNNEL; |
---|
3767 | | - nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE; |
---|
3768 | | - |
---|
3769 | | - netdev->hw_enc_features = netdev->hw_features; |
---|
| 3942 | + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; |
---|
| 3943 | + netdev->udp_tunnel_nic_info = &nfp_udp_tunnels; |
---|
| 3944 | + nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN; |
---|
3770 | 3945 | } |
---|
| 3946 | + if (nn->cap & NFP_NET_CFG_CTRL_NVGRE) { |
---|
| 3947 | + if (nn->cap & NFP_NET_CFG_CTRL_LSO) |
---|
| 3948 | + netdev->hw_features |= NETIF_F_GSO_GRE; |
---|
| 3949 | + nn->dp.ctrl |= NFP_NET_CFG_CTRL_NVGRE; |
---|
| 3950 | + } |
---|
| 3951 | + if (nn->cap & (NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE)) |
---|
| 3952 | + netdev->hw_enc_features = netdev->hw_features; |
---|
3771 | 3953 | |
---|
3772 | 3954 | netdev->vlan_features = netdev->hw_features; |
---|
3773 | 3955 | |
---|
.. | .. |
---|
3800 | 3982 | /* Finalise the netdev setup */ |
---|
3801 | 3983 | netdev->netdev_ops = &nfp_net_netdev_ops; |
---|
3802 | 3984 | netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000); |
---|
3803 | | - |
---|
3804 | | - SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops); |
---|
3805 | 3985 | |
---|
3806 | 3986 | /* MTU range: 68 - hw-specific max */ |
---|
3807 | 3987 | netdev->min_mtu = ETH_MIN_MTU; |
---|
.. | .. |
---|
3872 | 4052 | return err; |
---|
3873 | 4053 | |
---|
3874 | 4054 | /* Set default MTU and Freelist buffer size */ |
---|
3875 | | - if (nn->max_mtu < NFP_NET_DEFAULT_MTU) |
---|
| 4055 | + if (!nfp_net_is_data_vnic(nn) && nn->app->ctrl_mtu) { |
---|
| 4056 | + nn->dp.mtu = min(nn->app->ctrl_mtu, nn->max_mtu); |
---|
| 4057 | + } else if (nn->max_mtu < NFP_NET_DEFAULT_MTU) { |
---|
3876 | 4058 | nn->dp.mtu = nn->max_mtu; |
---|
3877 | | - else |
---|
| 4059 | + } else { |
---|
3878 | 4060 | nn->dp.mtu = NFP_NET_DEFAULT_MTU; |
---|
| 4061 | + } |
---|
3879 | 4062 | nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp); |
---|
3880 | 4063 | |
---|
3881 | 4064 | if (nfp_app_ctrl_uses_data_vnics(nn->app)) |
---|
.. | .. |
---|
3897 | 4080 | nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD; |
---|
3898 | 4081 | } |
---|
3899 | 4082 | |
---|
3900 | | - err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar, |
---|
3901 | | - &nn->tlv_caps); |
---|
3902 | | - if (err) |
---|
3903 | | - return err; |
---|
3904 | | - |
---|
3905 | | - if (nn->dp.netdev) |
---|
3906 | | - nfp_net_netdev_init(nn); |
---|
3907 | | - |
---|
3908 | 4083 | /* Stash the re-configuration queue away. First odd queue in TX Bar */ |
---|
3909 | 4084 | nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ; |
---|
3910 | 4085 | |
---|
.. | .. |
---|
3917 | 4092 | if (err) |
---|
3918 | 4093 | return err; |
---|
3919 | 4094 | |
---|
| 4095 | + if (nn->dp.netdev) { |
---|
| 4096 | + nfp_net_netdev_init(nn); |
---|
| 4097 | + |
---|
| 4098 | + err = nfp_ccm_mbox_init(nn); |
---|
| 4099 | + if (err) |
---|
| 4100 | + return err; |
---|
| 4101 | + |
---|
| 4102 | + err = nfp_net_tls_init(nn); |
---|
| 4103 | + if (err) |
---|
| 4104 | + goto err_clean_mbox; |
---|
| 4105 | + } |
---|
| 4106 | + |
---|
3920 | 4107 | nfp_net_vecs_init(nn); |
---|
3921 | 4108 | |
---|
3922 | 4109 | if (!nn->dp.netdev) |
---|
3923 | 4110 | return 0; |
---|
3924 | 4111 | return register_netdev(nn->dp.netdev); |
---|
| 4112 | + |
---|
| 4113 | +err_clean_mbox: |
---|
| 4114 | + nfp_ccm_mbox_clean(nn); |
---|
| 4115 | + return err; |
---|
3925 | 4116 | } |
---|
3926 | 4117 | |
---|
3927 | 4118 | /** |
---|
.. | .. |
---|
3934 | 4125 | return; |
---|
3935 | 4126 | |
---|
3936 | 4127 | unregister_netdev(nn->dp.netdev); |
---|
| 4128 | + nfp_ccm_mbox_clean(nn); |
---|
3937 | 4129 | nfp_net_reconfig_wait_posted(nn); |
---|
3938 | 4130 | } |
---|