forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
....@@ -1,35 +1,5 @@
1
-/*
2
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
3
- *
4
- * This software is dual licensed under the GNU General License Version 2,
5
- * June 1991 as shown in the file COPYING in the top-level directory of this
6
- * source tree or the BSD 2-Clause License provided below. You have the
7
- * option to license this software under the complete terms of either license.
8
- *
9
- * The BSD 2-Clause License:
10
- *
11
- * Redistribution and use in source and binary forms, with or
12
- * without modification, are permitted provided that the following
13
- * conditions are met:
14
- *
15
- * 1. Redistributions of source code must retain the above
16
- * copyright notice, this list of conditions and the following
17
- * disclaimer.
18
- *
19
- * 2. Redistributions in binary form must reproduce the above
20
- * copyright notice, this list of conditions and the following
21
- * disclaimer in the documentation and/or other materials
22
- * provided with the distribution.
23
- *
24
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31
- * SOFTWARE.
32
- */
1
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
333
344 /*
355 * nfp_net_common.c
....@@ -66,15 +36,18 @@
6636 #include <linux/vmalloc.h>
6737 #include <linux/ktime.h>
6838
69
-#include <net/switchdev.h>
39
+#include <net/tls.h>
7040 #include <net/vxlan.h>
7141
7242 #include "nfpcore/nfp_nsp.h"
43
+#include "ccm.h"
7344 #include "nfp_app.h"
7445 #include "nfp_net_ctrl.h"
7546 #include "nfp_net.h"
7647 #include "nfp_net_sriov.h"
7748 #include "nfp_port.h"
49
+#include "crypto/crypto.h"
50
+#include "crypto/fw.h"
7851
7952 /**
8053 * nfp_net_get_fw_version() - Read and parse the FW version
....@@ -131,6 +104,7 @@
131104 /* ensure update is written before pinging HW */
132105 nn_pci_flush(nn);
133106 nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
107
+ nn->reconfig_in_progress_update = update;
134108 }
135109
136110 /* Pass 0 as update to run posted reconfigs. */
....@@ -153,30 +127,51 @@
153127 if (reg == 0)
154128 return true;
155129 if (reg & NFP_NET_CFG_UPDATE_ERR) {
156
- nn_err(nn, "Reconfig error: 0x%08x\n", reg);
130
+ nn_err(nn, "Reconfig error (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n",
131
+ reg, nn->reconfig_in_progress_update,
132
+ nn_readl(nn, NFP_NET_CFG_CTRL));
157133 return true;
158134 } else if (last_check) {
159
- nn_err(nn, "Reconfig timeout: 0x%08x\n", reg);
135
+ nn_err(nn, "Reconfig timeout (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n",
136
+ reg, nn->reconfig_in_progress_update,
137
+ nn_readl(nn, NFP_NET_CFG_CTRL));
160138 return true;
161139 }
162140
163141 return false;
164142 }
165143
166
-static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
144
+static bool __nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
167145 {
168146 bool timed_out = false;
147
+ int i;
169148
170
- /* Poll update field, waiting for NFP to ack the config */
149
+ /* Poll update field, waiting for NFP to ack the config.
150
+ * Do an opportunistic wait-busy loop, afterward sleep.
151
+ */
152
+ for (i = 0; i < 50; i++) {
153
+ if (nfp_net_reconfig_check_done(nn, false))
154
+ return false;
155
+ udelay(4);
156
+ }
157
+
171158 while (!nfp_net_reconfig_check_done(nn, timed_out)) {
172
- msleep(1);
159
+ usleep_range(250, 500);
173160 timed_out = time_is_before_eq_jiffies(deadline);
174161 }
162
+
163
+ return timed_out;
164
+}
165
+
166
+static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
167
+{
168
+ if (__nfp_net_reconfig_wait(nn, deadline))
169
+ return -EIO;
175170
176171 if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
177172 return -EIO;
178173
179
- return timed_out ? -EIO : 0;
174
+ return 0;
180175 }
181176
182177 static void nfp_net_reconfig_timer(struct timer_list *t)
....@@ -236,6 +231,7 @@
236231
237232 spin_lock_bh(&nn->reconfig_lock);
238233
234
+ WARN_ON(nn->reconfig_sync_present);
239235 nn->reconfig_sync_present = true;
240236
241237 if (nn->reconfig_timer_active) {
....@@ -269,7 +265,7 @@
269265 }
270266
271267 /**
272
- * nfp_net_reconfig() - Reconfigure the firmware
268
+ * __nfp_net_reconfig() - Reconfigure the firmware
273269 * @nn: NFP Net device to reconfigure
274270 * @update: The value for the update field in the BAR config
275271 *
....@@ -279,7 +275,7 @@
279275 *
280276 * Return: Negative errno on error, 0 on success
281277 */
282
-int nfp_net_reconfig(struct nfp_net *nn, u32 update)
278
+int __nfp_net_reconfig(struct nfp_net *nn, u32 update)
283279 {
284280 int ret;
285281
....@@ -300,8 +296,31 @@
300296 return ret;
301297 }
302298
299
+int nfp_net_reconfig(struct nfp_net *nn, u32 update)
300
+{
301
+ int ret;
302
+
303
+ nn_ctrl_bar_lock(nn);
304
+ ret = __nfp_net_reconfig(nn, update);
305
+ nn_ctrl_bar_unlock(nn);
306
+
307
+ return ret;
308
+}
309
+
310
+int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size)
311
+{
312
+ if (nn->tlv_caps.mbox_len < NFP_NET_CFG_MBOX_SIMPLE_VAL + data_size) {
313
+ nn_err(nn, "mailbox too small for %u of data (%u)\n",
314
+ data_size, nn->tlv_caps.mbox_len);
315
+ return -EIO;
316
+ }
317
+
318
+ nn_ctrl_bar_lock(nn);
319
+ return 0;
320
+}
321
+
303322 /**
304
- * nfp_net_reconfig_mbox() - Reconfigure the firmware via the mailbox
323
+ * nfp_net_mbox_reconfig() - Reconfigure the firmware via the mailbox
305324 * @nn: NFP Net device to reconfigure
306325 * @mbox_cmd: The value for the mailbox command
307326 *
....@@ -309,25 +328,47 @@
309328 *
310329 * Return: Negative errno on error, 0 on success
311330 */
312
-static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
331
+int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd)
313332 {
314333 u32 mbox = nn->tlv_caps.mbox_off;
315334 int ret;
316335
317
- if (!nfp_net_has_mbox(&nn->tlv_caps)) {
318
- nn_err(nn, "no mailbox present, command: %u\n", mbox_cmd);
319
- return -EIO;
320
- }
321
-
322336 nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
323337
324
- ret = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
338
+ ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
325339 if (ret) {
326340 nn_err(nn, "Mailbox update error\n");
327341 return ret;
328342 }
329343
330344 return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
345
+}
346
+
347
+void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 mbox_cmd)
348
+{
349
+ u32 mbox = nn->tlv_caps.mbox_off;
350
+
351
+ nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
352
+
353
+ nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_MBOX);
354
+}
355
+
356
+int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn)
357
+{
358
+ u32 mbox = nn->tlv_caps.mbox_off;
359
+
360
+ nfp_net_reconfig_wait_posted(nn);
361
+
362
+ return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
363
+}
364
+
365
+int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd)
366
+{
367
+ int ret;
368
+
369
+ ret = nfp_net_mbox_reconfig(nn, mbox_cmd);
370
+ nn_ctrl_bar_unlock(nn);
371
+ return ret;
331372 }
332373
333374 /* Interrupt configuration and handling
....@@ -677,27 +718,29 @@
677718 * @txbuf: Pointer to driver soft TX descriptor
678719 * @txd: Pointer to HW TX descriptor
679720 * @skb: Pointer to SKB
721
+ * @md_bytes: Prepend length
680722 *
681723 * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
682724 * Return error on packet header greater than maximum supported LSO header size.
683725 */
684726 static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec,
685727 struct nfp_net_tx_buf *txbuf,
686
- struct nfp_net_tx_desc *txd, struct sk_buff *skb)
728
+ struct nfp_net_tx_desc *txd, struct sk_buff *skb,
729
+ u32 md_bytes)
687730 {
688
- u32 hdrlen;
731
+ u32 l3_offset, l4_offset, hdrlen;
689732 u16 mss;
690733
691734 if (!skb_is_gso(skb))
692735 return;
693736
694737 if (!skb->encapsulation) {
695
- txd->l3_offset = skb_network_offset(skb);
696
- txd->l4_offset = skb_transport_offset(skb);
738
+ l3_offset = skb_network_offset(skb);
739
+ l4_offset = skb_transport_offset(skb);
697740 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
698741 } else {
699
- txd->l3_offset = skb_inner_network_offset(skb);
700
- txd->l4_offset = skb_inner_transport_offset(skb);
742
+ l3_offset = skb_inner_network_offset(skb);
743
+ l4_offset = skb_inner_transport_offset(skb);
701744 hdrlen = skb_inner_transport_header(skb) - skb->data +
702745 inner_tcp_hdrlen(skb);
703746 }
....@@ -706,7 +749,9 @@
706749 txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
707750
708751 mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK;
709
- txd->lso_hdrlen = hdrlen;
752
+ txd->l3_offset = l3_offset - md_bytes;
753
+ txd->l4_offset = l4_offset - md_bytes;
754
+ txd->lso_hdrlen = hdrlen - md_bytes;
710755 txd->mss = cpu_to_le16(mss);
711756 txd->flags |= PCIE_DESC_TX_LSO;
712757
....@@ -778,6 +823,100 @@
778823 u64_stats_update_end(&r_vec->tx_sync);
779824 }
780825
826
+static struct sk_buff *
827
+nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
828
+ struct sk_buff *skb, u64 *tls_handle, int *nr_frags)
829
+{
830
+#ifdef CONFIG_TLS_DEVICE
831
+ struct nfp_net_tls_offload_ctx *ntls;
832
+ struct sk_buff *nskb;
833
+ bool resync_pending;
834
+ u32 datalen, seq;
835
+
836
+ if (likely(!dp->ktls_tx))
837
+ return skb;
838
+ if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
839
+ return skb;
840
+
841
+ datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
842
+ seq = ntohl(tcp_hdr(skb)->seq);
843
+ ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
844
+ resync_pending = tls_offload_tx_resync_pending(skb->sk);
845
+ if (unlikely(resync_pending || ntls->next_seq != seq)) {
846
+ /* Pure ACK out of order already */
847
+ if (!datalen)
848
+ return skb;
849
+
850
+ u64_stats_update_begin(&r_vec->tx_sync);
851
+ r_vec->tls_tx_fallback++;
852
+ u64_stats_update_end(&r_vec->tx_sync);
853
+
854
+ nskb = tls_encrypt_skb(skb);
855
+ if (!nskb) {
856
+ u64_stats_update_begin(&r_vec->tx_sync);
857
+ r_vec->tls_tx_no_fallback++;
858
+ u64_stats_update_end(&r_vec->tx_sync);
859
+ return NULL;
860
+ }
861
+ /* encryption wasn't necessary */
862
+ if (nskb == skb)
863
+ return skb;
864
+ /* we don't re-check ring space */
865
+ if (unlikely(skb_is_nonlinear(nskb))) {
866
+ nn_dp_warn(dp, "tls_encrypt_skb() produced fragmented frame\n");
867
+ u64_stats_update_begin(&r_vec->tx_sync);
868
+ r_vec->tx_errors++;
869
+ u64_stats_update_end(&r_vec->tx_sync);
870
+ dev_kfree_skb_any(nskb);
871
+ return NULL;
872
+ }
873
+
874
+ /* jump forward, a TX may have gotten lost, need to sync TX */
875
+ if (!resync_pending && seq - ntls->next_seq < U32_MAX / 4)
876
+ tls_offload_tx_resync_request(nskb->sk, seq,
877
+ ntls->next_seq);
878
+
879
+ *nr_frags = 0;
880
+ return nskb;
881
+ }
882
+
883
+ if (datalen) {
884
+ u64_stats_update_begin(&r_vec->tx_sync);
885
+ if (!skb_is_gso(skb))
886
+ r_vec->hw_tls_tx++;
887
+ else
888
+ r_vec->hw_tls_tx += skb_shinfo(skb)->gso_segs;
889
+ u64_stats_update_end(&r_vec->tx_sync);
890
+ }
891
+
892
+ memcpy(tls_handle, ntls->fw_handle, sizeof(ntls->fw_handle));
893
+ ntls->next_seq += datalen;
894
+#endif
895
+ return skb;
896
+}
897
+
898
+static void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
899
+{
900
+#ifdef CONFIG_TLS_DEVICE
901
+ struct nfp_net_tls_offload_ctx *ntls;
902
+ u32 datalen, seq;
903
+
904
+ if (!tls_handle)
905
+ return;
906
+ if (WARN_ON_ONCE(!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)))
907
+ return;
908
+
909
+ datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
910
+ seq = ntohl(tcp_hdr(skb)->seq);
911
+
912
+ ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
913
+ if (ntls->next_seq == seq + datalen)
914
+ ntls->next_seq = seq;
915
+ else
916
+ WARN_ON_ONCE(1);
917
+#endif
918
+}
919
+
781920 static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
782921 {
783922 wmb();
....@@ -785,24 +924,47 @@
785924 tx_ring->wr_ptr_add = 0;
786925 }
787926
788
-static int nfp_net_prep_port_id(struct sk_buff *skb)
927
+static int nfp_net_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
789928 {
790929 struct metadata_dst *md_dst = skb_metadata_dst(skb);
791930 unsigned char *data;
931
+ u32 meta_id = 0;
932
+ int md_bytes;
792933
793
- if (likely(!md_dst))
934
+ if (likely(!md_dst && !tls_handle))
794935 return 0;
795
- if (unlikely(md_dst->type != METADATA_HW_PORT_MUX))
796
- return 0;
936
+ if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX)) {
937
+ if (!tls_handle)
938
+ return 0;
939
+ md_dst = NULL;
940
+ }
797941
798
- if (unlikely(skb_cow_head(skb, 8)))
942
+ md_bytes = 4 + !!md_dst * 4 + !!tls_handle * 8;
943
+
944
+ if (unlikely(skb_cow_head(skb, md_bytes)))
799945 return -ENOMEM;
800946
801
- data = skb_push(skb, 8);
802
- put_unaligned_be32(NFP_NET_META_PORTID, data);
803
- put_unaligned_be32(md_dst->u.port_info.port_id, data + 4);
947
+ meta_id = 0;
948
+ data = skb_push(skb, md_bytes) + md_bytes;
949
+ if (md_dst) {
950
+ data -= 4;
951
+ put_unaligned_be32(md_dst->u.port_info.port_id, data);
952
+ meta_id = NFP_NET_META_PORTID;
953
+ }
954
+ if (tls_handle) {
955
+ /* conn handle is opaque, we just use u64 to be able to quickly
956
+ * compare it to zero
957
+ */
958
+ data -= 8;
959
+ memcpy(data, &tls_handle, sizeof(tls_handle));
960
+ meta_id <<= NFP_NET_META_FIELD_SIZE;
961
+ meta_id |= NFP_NET_META_CONN_HANDLE;
962
+ }
804963
805
- return 8;
964
+ data -= 4;
965
+ put_unaligned_be32(meta_id, data);
966
+
967
+ return md_bytes;
806968 }
807969
808970 /**
....@@ -812,32 +974,33 @@
812974 *
813975 * Return: NETDEV_TX_OK on success.
814976 */
815
-static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
977
+static netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
816978 {
817979 struct nfp_net *nn = netdev_priv(netdev);
818
- const struct skb_frag_struct *frag;
819
- struct nfp_net_tx_desc *txd, txdg;
980
+ const skb_frag_t *frag;
820981 int f, nr_frags, wr_idx, md_bytes;
821982 struct nfp_net_tx_ring *tx_ring;
822983 struct nfp_net_r_vector *r_vec;
823984 struct nfp_net_tx_buf *txbuf;
985
+ struct nfp_net_tx_desc *txd;
824986 struct netdev_queue *nd_q;
825987 struct nfp_net_dp *dp;
826988 dma_addr_t dma_addr;
827989 unsigned int fsize;
990
+ u64 tls_handle = 0;
828991 u16 qidx;
829992
830993 dp = &nn->dp;
831994 qidx = skb_get_queue_mapping(skb);
832995 tx_ring = &dp->tx_rings[qidx];
833996 r_vec = tx_ring->r_vec;
834
- nd_q = netdev_get_tx_queue(dp->netdev, qidx);
835997
836998 nr_frags = skb_shinfo(skb)->nr_frags;
837999
8381000 if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
8391001 nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
8401002 qidx, tx_ring->wr_p, tx_ring->rd_p);
1003
+ nd_q = netdev_get_tx_queue(dp->netdev, qidx);
8411004 netif_tx_stop_queue(nd_q);
8421005 nfp_net_tx_xmit_more_flush(tx_ring);
8431006 u64_stats_update_begin(&r_vec->tx_sync);
....@@ -846,18 +1009,21 @@
8461009 return NETDEV_TX_BUSY;
8471010 }
8481011
849
- md_bytes = nfp_net_prep_port_id(skb);
850
- if (unlikely(md_bytes < 0)) {
1012
+ skb = nfp_net_tls_tx(dp, r_vec, skb, &tls_handle, &nr_frags);
1013
+ if (unlikely(!skb)) {
8511014 nfp_net_tx_xmit_more_flush(tx_ring);
852
- dev_kfree_skb_any(skb);
8531015 return NETDEV_TX_OK;
8541016 }
1017
+
1018
+ md_bytes = nfp_net_prep_tx_meta(skb, tls_handle);
1019
+ if (unlikely(md_bytes < 0))
1020
+ goto err_flush;
8551021
8561022 /* Start with the head skbuf */
8571023 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
8581024 DMA_TO_DEVICE);
8591025 if (dma_mapping_error(dp->dev, dma_addr))
860
- goto err_free;
1026
+ goto err_dma_err;
8611027
8621028 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
8631029
....@@ -881,7 +1047,7 @@
8811047 txd->lso_hdrlen = 0;
8821048
8831049 /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
884
- nfp_net_tx_tso(r_vec, txbuf, txd, skb);
1050
+ nfp_net_tx_tso(r_vec, txbuf, txd, skb, md_bytes);
8851051 nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb);
8861052 if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
8871053 txd->flags |= PCIE_DESC_TX_VLAN;
....@@ -890,8 +1056,10 @@
8901056
8911057 /* Gather DMA */
8921058 if (nr_frags > 0) {
1059
+ __le64 second_half;
1060
+
8931061 /* all descs must match except for in addr, length and eop */
894
- txdg = *txd;
1062
+ second_half = txd->vals8[1];
8951063
8961064 for (f = 0; f < nr_frags; f++) {
8971065 frag = &skb_shinfo(skb)->frags[f];
....@@ -908,11 +1076,11 @@
9081076 tx_ring->txbufs[wr_idx].fidx = f;
9091077
9101078 txd = &tx_ring->txds[wr_idx];
911
- *txd = txdg;
9121079 txd->dma_len = cpu_to_le16(fsize);
9131080 nfp_desc_set_dma_addr(txd, dma_addr);
914
- txd->offset_eop |=
915
- (f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0;
1081
+ txd->offset_eop = md_bytes |
1082
+ ((f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0);
1083
+ txd->vals8[1] = second_half;
9161084 }
9171085
9181086 u64_stats_update_begin(&r_vec->tx_sync);
....@@ -920,16 +1088,16 @@
9201088 u64_stats_update_end(&r_vec->tx_sync);
9211089 }
9221090
923
- netdev_tx_sent_queue(nd_q, txbuf->real_len);
924
-
9251091 skb_tx_timestamp(skb);
1092
+
1093
+ nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
9261094
9271095 tx_ring->wr_p += nr_frags + 1;
9281096 if (nfp_net_tx_ring_should_stop(tx_ring))
9291097 nfp_net_tx_ring_stop(nd_q, tx_ring);
9301098
9311099 tx_ring->wr_ptr_add += nr_frags + 1;
932
- if (!skb->xmit_more || netif_xmit_stopped(nd_q))
1100
+ if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, netdev_xmit_more()))
9331101 nfp_net_tx_xmit_more_flush(tx_ring);
9341102
9351103 return NETDEV_TX_OK;
....@@ -951,12 +1119,14 @@
9511119 tx_ring->txbufs[wr_idx].skb = NULL;
9521120 tx_ring->txbufs[wr_idx].dma_addr = 0;
9531121 tx_ring->txbufs[wr_idx].fidx = -2;
954
-err_free:
1122
+err_dma_err:
9551123 nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
1124
+err_flush:
9561125 nfp_net_tx_xmit_more_flush(tx_ring);
9571126 u64_stats_update_begin(&r_vec->tx_sync);
9581127 r_vec->tx_errors++;
9591128 u64_stats_update_end(&r_vec->tx_sync);
1129
+ nfp_net_tls_tx_undo(skb, tls_handle);
9601130 dev_kfree_skb_any(skb);
9611131 return NETDEV_TX_OK;
9621132 }
....@@ -970,14 +1140,10 @@
9701140 {
9711141 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
9721142 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
973
- const struct skb_frag_struct *frag;
9741143 struct netdev_queue *nd_q;
9751144 u32 done_pkts = 0, done_bytes = 0;
976
- struct sk_buff *skb;
977
- int todo, nr_frags;
9781145 u32 qcp_rd_p;
979
- int fidx;
980
- int idx;
1146
+ int todo;
9811147
9821148 if (tx_ring->wr_p == tx_ring->rd_p)
9831149 return;
....@@ -991,26 +1157,33 @@
9911157 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
9921158
9931159 while (todo--) {
994
- idx = D_IDX(tx_ring, tx_ring->rd_p++);
1160
+ const skb_frag_t *frag;
1161
+ struct nfp_net_tx_buf *tx_buf;
1162
+ struct sk_buff *skb;
1163
+ int fidx, nr_frags;
1164
+ int idx;
9951165
996
- skb = tx_ring->txbufs[idx].skb;
1166
+ idx = D_IDX(tx_ring, tx_ring->rd_p++);
1167
+ tx_buf = &tx_ring->txbufs[idx];
1168
+
1169
+ skb = tx_buf->skb;
9971170 if (!skb)
9981171 continue;
9991172
10001173 nr_frags = skb_shinfo(skb)->nr_frags;
1001
- fidx = tx_ring->txbufs[idx].fidx;
1174
+ fidx = tx_buf->fidx;
10021175
10031176 if (fidx == -1) {
10041177 /* unmap head */
1005
- dma_unmap_single(dp->dev, tx_ring->txbufs[idx].dma_addr,
1178
+ dma_unmap_single(dp->dev, tx_buf->dma_addr,
10061179 skb_headlen(skb), DMA_TO_DEVICE);
10071180
1008
- done_pkts += tx_ring->txbufs[idx].pkt_cnt;
1009
- done_bytes += tx_ring->txbufs[idx].real_len;
1181
+ done_pkts += tx_buf->pkt_cnt;
1182
+ done_bytes += tx_buf->real_len;
10101183 } else {
10111184 /* unmap fragment */
10121185 frag = &skb_shinfo(skb)->frags[fidx];
1013
- dma_unmap_page(dp->dev, tx_ring->txbufs[idx].dma_addr,
1186
+ dma_unmap_page(dp->dev, tx_buf->dma_addr,
10141187 skb_frag_size(frag), DMA_TO_DEVICE);
10151188 }
10161189
....@@ -1018,9 +1191,9 @@
10181191 if (fidx == nr_frags - 1)
10191192 napi_consume_skb(skb, budget);
10201193
1021
- tx_ring->txbufs[idx].dma_addr = 0;
1022
- tx_ring->txbufs[idx].skb = NULL;
1023
- tx_ring->txbufs[idx].fidx = -2;
1194
+ tx_buf->dma_addr = 0;
1195
+ tx_buf->skb = NULL;
1196
+ tx_buf->fidx = -2;
10241197 }
10251198
10261199 tx_ring->qcp_rd_p = qcp_rd_p;
....@@ -1099,7 +1272,7 @@
10991272 static void
11001273 nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
11011274 {
1102
- const struct skb_frag_struct *frag;
1275
+ const skb_frag_t *frag;
11031276 struct netdev_queue *nd_q;
11041277
11051278 while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
....@@ -1149,17 +1322,11 @@
11491322 netdev_tx_reset_queue(nd_q);
11501323 }
11511324
1152
-static void nfp_net_tx_timeout(struct net_device *netdev)
1325
+static void nfp_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
11531326 {
11541327 struct nfp_net *nn = netdev_priv(netdev);
1155
- int i;
11561328
1157
- for (i = 0; i < nn->dp.netdev->real_num_tx_queues; i++) {
1158
- if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i)))
1159
- continue;
1160
- nn_warn(nn, "TX timeout on ring: %d\n", i);
1161
- }
1162
- nn_warn(nn, "TX watchdog timeout\n");
1329
+ nn_warn(nn, "TX watchdog timeout on ring: %u\n", txqueue);
11631330 }
11641331
11651332 /* Receive processing
....@@ -1495,9 +1662,9 @@
14951662 &rx_hash->hash);
14961663 }
14971664
1498
-static void *
1665
+static bool
14991666 nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
1500
- void *data, int meta_len)
1667
+ void *data, void *pkt, unsigned int pkt_len, int meta_len)
15011668 {
15021669 u32 meta_info;
15031670
....@@ -1527,14 +1694,20 @@
15271694 (__force __wsum)__get_unaligned_cpu32(data);
15281695 data += 4;
15291696 break;
1697
+ case NFP_NET_META_RESYNC_INFO:
1698
+ if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
1699
+ pkt_len))
1700
+ return false;
1701
+ data += sizeof(struct nfp_net_tls_resync_req);
1702
+ break;
15301703 default:
1531
- return NULL;
1704
+ return true;
15321705 }
15331706
15341707 meta_info >>= NFP_NET_META_FIELD_SIZE;
15351708 }
15361709
1537
- return data;
1710
+ return data != pkt;
15381711 }
15391712
15401713 static void
....@@ -1568,9 +1741,14 @@
15681741 struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
15691742 unsigned int pkt_len, bool *completed)
15701743 {
1744
+ unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA;
15711745 struct nfp_net_tx_buf *txbuf;
15721746 struct nfp_net_tx_desc *txd;
15731747 int wr_idx;
1748
+
1749
+ /* Reject if xdp_adjust_tail grow packet beyond DMA area */
1750
+ if (pkt_len + dma_off > dma_map_sz)
1751
+ return false;
15741752
15751753 if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
15761754 if (!*completed) {
....@@ -1644,6 +1822,7 @@
16441822 rcu_read_lock();
16451823 xdp_prog = READ_ONCE(dp->xdp_prog);
16461824 true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
1825
+ xdp.frame_sz = PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM;
16471826 xdp.rxq = &rx_ring->xdp_rxq;
16481827 tx_ring = r_vec->xdp_ring;
16491828
....@@ -1652,6 +1831,7 @@
16521831 struct nfp_net_rx_buf *rxbuf;
16531832 struct nfp_net_rx_desc *rxd;
16541833 struct nfp_meta_parsed meta;
1834
+ bool redir_egress = false;
16551835 struct net_device *netdev;
16561836 dma_addr_t new_dma_addr;
16571837 u32 meta_len_xdp = 0;
....@@ -1718,12 +1898,10 @@
17181898 nfp_net_set_hash_desc(dp->netdev, &meta,
17191899 rxbuf->frag + meta_off, rxd);
17201900 } else if (meta_len) {
1721
- void *end;
1722
-
1723
- end = nfp_net_parse_meta(dp->netdev, &meta,
1724
- rxbuf->frag + meta_off,
1725
- meta_len);
1726
- if (unlikely(end != rxbuf->frag + pkt_off)) {
1901
+ if (unlikely(nfp_net_parse_meta(dp->netdev, &meta,
1902
+ rxbuf->frag + meta_off,
1903
+ rxbuf->frag + pkt_off,
1904
+ pkt_len, meta_len))) {
17271905 nn_dp_warn(dp, "invalid RX packet metadata\n");
17281906 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
17291907 NULL);
....@@ -1762,10 +1940,10 @@
17621940 continue;
17631941 default:
17641942 bpf_warn_invalid_xdp_action(act);
1765
- /* fall through */
1943
+ fallthrough;
17661944 case XDP_ABORTED:
17671945 trace_xdp_exception(dp->netdev, xdp_prog, act);
1768
- /* fall through */
1946
+ fallthrough;
17691947 case XDP_DROP:
17701948 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
17711949 rxbuf->dma_addr);
....@@ -1787,13 +1965,16 @@
17871965 struct nfp_net *nn;
17881966
17891967 nn = netdev_priv(dp->netdev);
1790
- netdev = nfp_app_repr_get(nn->app, meta.portid);
1968
+ netdev = nfp_app_dev_get(nn->app, meta.portid,
1969
+ &redir_egress);
17911970 if (unlikely(!netdev)) {
17921971 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
17931972 NULL);
17941973 continue;
17951974 }
1796
- nfp_repr_inc_rx_stats(netdev, pkt_len);
1975
+
1976
+ if (nfp_netdev_is_nfp_repr(netdev))
1977
+ nfp_repr_inc_rx_stats(netdev, pkt_len);
17971978 }
17981979
17991980 skb = build_skb(rxbuf->frag, true_bufsz);
....@@ -1822,13 +2003,29 @@
18222003
18232004 nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb);
18242005
2006
+#ifdef CONFIG_TLS_DEVICE
2007
+ if (rxd->rxd.flags & PCIE_DESC_RX_DECRYPTED) {
2008
+ skb->decrypted = true;
2009
+ u64_stats_update_begin(&r_vec->rx_sync);
2010
+ r_vec->hw_tls_rx++;
2011
+ u64_stats_update_end(&r_vec->rx_sync);
2012
+ }
2013
+#endif
2014
+
18252015 if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
18262016 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
18272017 le16_to_cpu(rxd->rxd.vlan));
18282018 if (meta_len_xdp)
18292019 skb_metadata_set(skb, meta_len_xdp);
18302020
1831
- napi_gro_receive(&rx_ring->r_vec->napi, skb);
2021
+ if (likely(!redir_egress)) {
2022
+ napi_gro_receive(&rx_ring->r_vec->napi, skb);
2023
+ } else {
2024
+ skb->dev = netdev;
2025
+ skb_reset_network_header(skb);
2026
+ __skb_push(skb, ETH_HLEN);
2027
+ dev_queue_xmit(skb);
2028
+ }
18322029 }
18332030
18342031 if (xdp_prog) {
....@@ -2090,14 +2287,14 @@
20902287 return budget;
20912288 }
20922289
2093
-static void nfp_ctrl_poll(unsigned long arg)
2290
+static void nfp_ctrl_poll(struct tasklet_struct *t)
20942291 {
2095
- struct nfp_net_r_vector *r_vec = (void *)arg;
2292
+ struct nfp_net_r_vector *r_vec = from_tasklet(r_vec, t, tasklet);
20962293
2097
- spin_lock_bh(&r_vec->lock);
2294
+ spin_lock(&r_vec->lock);
20982295 nfp_net_tx_complete(r_vec->tx_ring, 0);
20992296 __nfp_ctrl_tx_queued(r_vec);
2100
- spin_unlock_bh(&r_vec->lock);
2297
+ spin_unlock(&r_vec->lock);
21012298
21022299 if (nfp_ctrl_rx(r_vec)) {
21032300 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
....@@ -2140,8 +2337,7 @@
21402337
21412338 __skb_queue_head_init(&r_vec->queue);
21422339 spin_lock_init(&r_vec->lock);
2143
- tasklet_init(&r_vec->tasklet, nfp_ctrl_poll,
2144
- (unsigned long)r_vec);
2340
+ tasklet_setup(&r_vec->tasklet, nfp_ctrl_poll);
21452341 tasklet_disable(&r_vec->tasklet);
21462342 }
21472343
....@@ -2186,9 +2382,9 @@
21862382 tx_ring->cnt = dp->txd_cnt;
21872383
21882384 tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
2189
- tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size,
2190
- &tx_ring->dma,
2191
- GFP_KERNEL | __GFP_NOWARN);
2385
+ tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size,
2386
+ &tx_ring->dma,
2387
+ GFP_KERNEL | __GFP_NOWARN);
21922388 if (!tx_ring->txds) {
21932389 netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
21942390 tx_ring->cnt);
....@@ -2344,9 +2540,9 @@
23442540
23452541 rx_ring->cnt = dp->rxd_cnt;
23462542 rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
2347
- rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size,
2348
- &rx_ring->dma,
2349
- GFP_KERNEL | __GFP_NOWARN);
2543
+ rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size,
2544
+ &rx_ring->dma,
2545
+ GFP_KERNEL | __GFP_NOWARN);
23502546 if (!rx_ring->rxds) {
23512547 netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
23522548 rx_ring->cnt);
....@@ -2669,15 +2865,6 @@
26692865
26702866 for (r = 0; r < nn->dp.num_rx_rings; r++)
26712867 nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]);
2672
-
2673
- /* Since reconfiguration requests while NFP is down are ignored we
2674
- * have to wipe the entire VXLAN configuration and reinitialize it.
2675
- */
2676
- if (nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN) {
2677
- memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
2678
- memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
2679
- udp_tunnel_get_rx_info(nn->dp.netdev);
2680
- }
26812868
26822869 return 0;
26832870 }
....@@ -3128,7 +3315,9 @@
31283315 static int
31293316 nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
31303317 {
3318
+ const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD;
31313319 struct nfp_net *nn = netdev_priv(netdev);
3320
+ int err;
31323321
31333322 /* Priority tagged packets with vlan id 0 are processed by the
31343323 * NFP as untagged packets
....@@ -3136,17 +3325,23 @@
31363325 if (!vid)
31373326 return 0;
31383327
3328
+ err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
3329
+ if (err)
3330
+ return err;
3331
+
31393332 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
31403333 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
31413334 ETH_P_8021Q);
31423335
3143
- return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD);
3336
+ return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
31443337 }
31453338
31463339 static int
31473340 nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
31483341 {
3342
+ const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL;
31493343 struct nfp_net *nn = netdev_priv(netdev);
3344
+ int err;
31503345
31513346 /* Priority tagged packets with vlan id 0 are processed by the
31523347 * NFP as untagged packets
....@@ -3154,11 +3349,15 @@
31543349 if (!vid)
31553350 return 0;
31563351
3352
+ err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
3353
+ if (err)
3354
+ return err;
3355
+
31573356 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
31583357 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
31593358 ETH_P_8021Q);
31603359
3161
- return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
3360
+ return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
31623361 }
31633362
31643363 static void nfp_net_stat64(struct net_device *netdev,
....@@ -3167,31 +3366,40 @@
31673366 struct nfp_net *nn = netdev_priv(netdev);
31683367 int r;
31693368
3369
+ /* Collect software stats */
31703370 for (r = 0; r < nn->max_r_vecs; r++) {
31713371 struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
31723372 u64 data[3];
31733373 unsigned int start;
31743374
31753375 do {
3176
- start = u64_stats_fetch_begin(&r_vec->rx_sync);
3376
+ start = u64_stats_fetch_begin_irq(&r_vec->rx_sync);
31773377 data[0] = r_vec->rx_pkts;
31783378 data[1] = r_vec->rx_bytes;
31793379 data[2] = r_vec->rx_drops;
3180
- } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
3380
+ } while (u64_stats_fetch_retry_irq(&r_vec->rx_sync, start));
31813381 stats->rx_packets += data[0];
31823382 stats->rx_bytes += data[1];
31833383 stats->rx_dropped += data[2];
31843384
31853385 do {
3186
- start = u64_stats_fetch_begin(&r_vec->tx_sync);
3386
+ start = u64_stats_fetch_begin_irq(&r_vec->tx_sync);
31873387 data[0] = r_vec->tx_pkts;
31883388 data[1] = r_vec->tx_bytes;
31893389 data[2] = r_vec->tx_errors;
3190
- } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
3390
+ } while (u64_stats_fetch_retry_irq(&r_vec->tx_sync, start));
31913391 stats->tx_packets += data[0];
31923392 stats->tx_bytes += data[1];
31933393 stats->tx_errors += data[2];
31943394 }
3395
+
3396
+ /* Add in device stats */
3397
+ stats->multicast += nn_readq(nn, NFP_NET_CFG_STATS_RX_MC_FRAMES);
3398
+ stats->rx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_RX_DISCARDS);
3399
+ stats->rx_errors += nn_readq(nn, NFP_NET_CFG_STATS_RX_ERRORS);
3400
+
3401
+ stats->tx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_TX_DISCARDS);
3402
+ stats->tx_errors += nn_readq(nn, NFP_NET_CFG_STATS_TX_ERRORS);
31953403 }
31963404
31973405 static int nfp_net_set_features(struct net_device *netdev,
....@@ -3296,7 +3504,10 @@
32963504 hdrlen = skb_inner_transport_header(skb) - skb->data +
32973505 inner_tcp_hdrlen(skb);
32983506
3299
- if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ))
3507
+ /* Assume worst case scenario of having longest possible
3508
+ * metadata prepend - 8B
3509
+ */
3510
+ if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ - 8))
33003511 features &= ~NETIF_F_GSO_MASK;
33013512 }
33023513
....@@ -3329,8 +3540,11 @@
33293540 struct nfp_net *nn = netdev_priv(netdev);
33303541 int n;
33313542
3543
+ /* If port is defined, devlink_port is registered and devlink core
3544
+ * is taking care of name formatting.
3545
+ */
33323546 if (nn->port)
3333
- return nfp_port_get_phys_port_name(netdev, name, len);
3547
+ return -EOPNOTSUPP;
33343548
33353549 if (nn->dp.is_vf || nn->vnic_no_name)
33363550 return -EOPNOTSUPP;
....@@ -3342,95 +3556,11 @@
33423556 return 0;
33433557 }
33443558
3345
-/**
3346
- * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW
3347
- * @nn: NFP Net device to reconfigure
3348
- * @idx: Index into the port table where new port should be written
3349
- * @port: UDP port to configure (pass zero to remove VXLAN port)
3350
- */
3351
-static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port)
3352
-{
3353
- int i;
3354
-
3355
- nn->vxlan_ports[idx] = port;
3356
-
3357
- if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN))
3358
- return;
3359
-
3360
- BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
3361
- for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2)
3362
- nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port),
3363
- be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 |
3364
- be16_to_cpu(nn->vxlan_ports[i]));
3365
-
3366
- nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_VXLAN);
3367
-}
3368
-
3369
-/**
3370
- * nfp_net_find_vxlan_idx() - find table entry of the port or a free one
3371
- * @nn: NFP Network structure
3372
- * @port: UDP port to look for
3373
- *
3374
- * Return: if the port is already in the table -- it's position;
3375
- * if the port is not in the table -- free position to use;
3376
- * if the table is full -- -ENOSPC.
3377
- */
3378
-static int nfp_net_find_vxlan_idx(struct nfp_net *nn, __be16 port)
3379
-{
3380
- int i, free_idx = -ENOSPC;
3381
-
3382
- for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) {
3383
- if (nn->vxlan_ports[i] == port)
3384
- return i;
3385
- if (!nn->vxlan_usecnt[i])
3386
- free_idx = i;
3387
- }
3388
-
3389
- return free_idx;
3390
-}
3391
-
3392
-static void nfp_net_add_vxlan_port(struct net_device *netdev,
3393
- struct udp_tunnel_info *ti)
3394
-{
3395
- struct nfp_net *nn = netdev_priv(netdev);
3396
- int idx;
3397
-
3398
- if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3399
- return;
3400
-
3401
- idx = nfp_net_find_vxlan_idx(nn, ti->port);
3402
- if (idx == -ENOSPC)
3403
- return;
3404
-
3405
- if (!nn->vxlan_usecnt[idx]++)
3406
- nfp_net_set_vxlan_port(nn, idx, ti->port);
3407
-}
3408
-
3409
-static void nfp_net_del_vxlan_port(struct net_device *netdev,
3410
- struct udp_tunnel_info *ti)
3411
-{
3412
- struct nfp_net *nn = netdev_priv(netdev);
3413
- int idx;
3414
-
3415
- if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3416
- return;
3417
-
3418
- idx = nfp_net_find_vxlan_idx(nn, ti->port);
3419
- if (idx == -ENOSPC || !nn->vxlan_usecnt[idx])
3420
- return;
3421
-
3422
- if (!--nn->vxlan_usecnt[idx])
3423
- nfp_net_set_vxlan_port(nn, idx, 0);
3424
-}
3425
-
34263559 static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf)
34273560 {
34283561 struct bpf_prog *prog = bpf->prog;
34293562 struct nfp_net_dp *dp;
34303563 int err;
3431
-
3432
- if (!xdp_attachment_flags_ok(&nn->xdp, bpf))
3433
- return -EBUSY;
34343564
34353565 if (!prog == !nn->dp.xdp_prog) {
34363566 WRITE_ONCE(nn->dp.xdp_prog, prog);
....@@ -3460,9 +3590,6 @@
34603590 {
34613591 int err;
34623592
3463
- if (!xdp_attachment_flags_ok(&nn->xdp_hw, bpf))
3464
- return -EBUSY;
3465
-
34663593 err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack);
34673594 if (err)
34683595 return err;
....@@ -3480,10 +3607,6 @@
34803607 return nfp_net_xdp_setup_drv(nn, xdp);
34813608 case XDP_SETUP_PROG_HW:
34823609 return nfp_net_xdp_setup_hw(nn, xdp);
3483
- case XDP_QUERY_PROG:
3484
- return xdp_attachment_query(&nn->xdp, xdp);
3485
- case XDP_QUERY_PROG_HW:
3486
- return xdp_attachment_query(&nn->xdp_hw, xdp);
34873610 default:
34883611 return nfp_app_bpf(nn->app, nn, xdp);
34893612 }
....@@ -3522,6 +3645,7 @@
35223645 .ndo_set_vf_mac = nfp_app_set_vf_mac,
35233646 .ndo_set_vf_vlan = nfp_app_set_vf_vlan,
35243647 .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
3648
+ .ndo_set_vf_trust = nfp_app_set_vf_trust,
35253649 .ndo_get_vf_config = nfp_app_get_vf_config,
35263650 .ndo_set_vf_link_state = nfp_app_set_vf_link_state,
35273651 .ndo_setup_tc = nfp_port_setup_tc,
....@@ -3532,9 +3656,41 @@
35323656 .ndo_set_features = nfp_net_set_features,
35333657 .ndo_features_check = nfp_net_features_check,
35343658 .ndo_get_phys_port_name = nfp_net_get_phys_port_name,
3535
- .ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
3536
- .ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
3659
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
3660
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
35373661 .ndo_bpf = nfp_net_xdp,
3662
+ .ndo_get_devlink_port = nfp_devlink_get_devlink_port,
3663
+};
3664
+
3665
+static int nfp_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
3666
+{
3667
+ struct nfp_net *nn = netdev_priv(netdev);
3668
+ int i;
3669
+
3670
+ BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
3671
+ for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2) {
3672
+ struct udp_tunnel_info ti0, ti1;
3673
+
3674
+ udp_tunnel_nic_get_port(netdev, table, i, &ti0);
3675
+ udp_tunnel_nic_get_port(netdev, table, i + 1, &ti1);
3676
+
3677
+ nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(ti0.port),
3678
+ be16_to_cpu(ti1.port) << 16 | be16_to_cpu(ti0.port));
3679
+ }
3680
+
3681
+ return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_VXLAN);
3682
+}
3683
+
3684
+static const struct udp_tunnel_nic_info nfp_udp_tunnels = {
3685
+ .sync_table = nfp_udp_tunnel_sync,
3686
+ .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
3687
+ UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
3688
+ .tables = {
3689
+ {
3690
+ .n_entries = NFP_NET_N_VXLAN_PORTS,
3691
+ .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,
3692
+ },
3693
+ },
35383694 };
35393695
35403696 /**
....@@ -3551,7 +3707,7 @@
35513707 nn->fw_ver.resv, nn->fw_ver.class,
35523708 nn->fw_ver.major, nn->fw_ver.minor,
35533709 nn->max_mtu);
3554
- nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
3710
+ nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
35553711 nn->cap,
35563712 nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
35573713 nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
....@@ -3567,7 +3723,6 @@
35673723 nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS1 " : "",
35683724 nn->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSS2 " : "",
35693725 nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "",
3570
- nn->cap & NFP_NET_CFG_CTRL_L2SWITCH ? "L2SWITCH " : "",
35713726 nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
35723727 nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
35733728 nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
....@@ -3581,6 +3736,7 @@
35813736 /**
35823737 * nfp_net_alloc() - Allocate netdev and related structure
35833738 * @pdev: PCI device
3739
+ * @ctrl_bar: PCI IOMEM with vNIC config memory
35843740 * @needs_netdev: Whether to allocate a netdev for this vNIC
35853741 * @max_tx_rings: Maximum number of TX rings supported by device
35863742 * @max_rx_rings: Maximum number of RX rings supported by device
....@@ -3591,11 +3747,12 @@
35913747 *
35923748 * Return: NFP Net device structure, or ERR_PTR on error.
35933749 */
3594
-struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
3595
- unsigned int max_tx_rings,
3596
- unsigned int max_rx_rings)
3750
+struct nfp_net *
3751
+nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
3752
+ unsigned int max_tx_rings, unsigned int max_rx_rings)
35973753 {
35983754 struct nfp_net *nn;
3755
+ int err;
35993756
36003757 if (needs_netdev) {
36013758 struct net_device *netdev;
....@@ -3615,6 +3772,7 @@
36153772 }
36163773
36173774 nn->dp.dev = &pdev->dev;
3775
+ nn->dp.ctrl_bar = ctrl_bar;
36183776 nn->pdev = pdev;
36193777
36203778 nn->max_tx_rings = max_tx_rings;
....@@ -3632,12 +3790,30 @@
36323790 nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
36333791 nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
36343792
3793
+ sema_init(&nn->bar_lock, 1);
3794
+
36353795 spin_lock_init(&nn->reconfig_lock);
36363796 spin_lock_init(&nn->link_status_lock);
36373797
36383798 timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0);
36393799
3800
+ err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
3801
+ &nn->tlv_caps);
3802
+ if (err)
3803
+ goto err_free_nn;
3804
+
3805
+ err = nfp_ccm_mbox_alloc(nn);
3806
+ if (err)
3807
+ goto err_free_nn;
3808
+
36403809 return nn;
3810
+
3811
+err_free_nn:
3812
+ if (nn->dp.netdev)
3813
+ free_netdev(nn->dp.netdev);
3814
+ else
3815
+ vfree(nn);
3816
+ return ERR_PTR(err);
36413817 }
36423818
36433819 /**
....@@ -3647,6 +3823,8 @@
36473823 void nfp_net_free(struct nfp_net *nn)
36483824 {
36493825 WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
3826
+ nfp_ccm_mbox_free(nn);
3827
+
36503828 if (nn->dp.netdev)
36513829 free_netdev(nn->dp.netdev);
36523830 else
....@@ -3759,15 +3937,19 @@
37593937 }
37603938 if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)
37613939 netdev->hw_features |= NETIF_F_RXHASH;
3762
- if (nn->cap & NFP_NET_CFG_CTRL_VXLAN &&
3763
- nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
3940
+ if (nn->cap & NFP_NET_CFG_CTRL_VXLAN) {
37643941 if (nn->cap & NFP_NET_CFG_CTRL_LSO)
3765
- netdev->hw_features |= NETIF_F_GSO_GRE |
3766
- NETIF_F_GSO_UDP_TUNNEL;
3767
- nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE;
3768
-
3769
- netdev->hw_enc_features = netdev->hw_features;
3942
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
3943
+ netdev->udp_tunnel_nic_info = &nfp_udp_tunnels;
3944
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN;
37703945 }
3946
+ if (nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
3947
+ if (nn->cap & NFP_NET_CFG_CTRL_LSO)
3948
+ netdev->hw_features |= NETIF_F_GSO_GRE;
3949
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_NVGRE;
3950
+ }
3951
+ if (nn->cap & (NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE))
3952
+ netdev->hw_enc_features = netdev->hw_features;
37713953
37723954 netdev->vlan_features = netdev->hw_features;
37733955
....@@ -3800,8 +3982,6 @@
38003982 /* Finalise the netdev setup */
38013983 netdev->netdev_ops = &nfp_net_netdev_ops;
38023984 netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
3803
-
3804
- SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
38053985
38063986 /* MTU range: 68 - hw-specific max */
38073987 netdev->min_mtu = ETH_MIN_MTU;
....@@ -3872,10 +4052,13 @@
38724052 return err;
38734053
38744054 /* Set default MTU and Freelist buffer size */
3875
- if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
4055
+ if (!nfp_net_is_data_vnic(nn) && nn->app->ctrl_mtu) {
4056
+ nn->dp.mtu = min(nn->app->ctrl_mtu, nn->max_mtu);
4057
+ } else if (nn->max_mtu < NFP_NET_DEFAULT_MTU) {
38764058 nn->dp.mtu = nn->max_mtu;
3877
- else
4059
+ } else {
38784060 nn->dp.mtu = NFP_NET_DEFAULT_MTU;
4061
+ }
38794062 nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
38804063
38814064 if (nfp_app_ctrl_uses_data_vnics(nn->app))
....@@ -3897,14 +4080,6 @@
38974080 nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
38984081 }
38994082
3900
- err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
3901
- &nn->tlv_caps);
3902
- if (err)
3903
- return err;
3904
-
3905
- if (nn->dp.netdev)
3906
- nfp_net_netdev_init(nn);
3907
-
39084083 /* Stash the re-configuration queue away. First odd queue in TX Bar */
39094084 nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
39104085
....@@ -3917,11 +4092,27 @@
39174092 if (err)
39184093 return err;
39194094
4095
+ if (nn->dp.netdev) {
4096
+ nfp_net_netdev_init(nn);
4097
+
4098
+ err = nfp_ccm_mbox_init(nn);
4099
+ if (err)
4100
+ return err;
4101
+
4102
+ err = nfp_net_tls_init(nn);
4103
+ if (err)
4104
+ goto err_clean_mbox;
4105
+ }
4106
+
39204107 nfp_net_vecs_init(nn);
39214108
39224109 if (!nn->dp.netdev)
39234110 return 0;
39244111 return register_netdev(nn->dp.netdev);
4112
+
4113
+err_clean_mbox:
4114
+ nfp_ccm_mbox_clean(nn);
4115
+ return err;
39254116 }
39264117
39274118 /**
....@@ -3934,5 +4125,6 @@
39344125 return;
39354126
39364127 unregister_netdev(nn->dp.netdev);
4128
+ nfp_ccm_mbox_clean(nn);
39374129 nfp_net_reconfig_wait_posted(nn);
39384130 }