forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
....@@ -39,7 +39,6 @@
3939 #include <linux/slab.h>
4040 #include <linux/hash.h>
4141 #include <net/ip.h>
42
-#include <net/busy_poll.h>
4342 #include <net/vxlan.h>
4443 #include <net/devlink.h>
4544
....@@ -52,7 +51,8 @@
5251 #include "en_port.h"
5352
5453 #define MLX4_EN_MAX_XDP_MTU ((int)(PAGE_SIZE - ETH_HLEN - (2 * VLAN_HLEN) - \
55
- XDP_PACKET_HEADROOM))
54
+ XDP_PACKET_HEADROOM - \
55
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))))
5656
5757 int mlx4_en_setup_tc(struct net_device *dev, u8 up)
5858 {
....@@ -1367,24 +1367,18 @@
13671367 }
13681368 }
13691369
1370
-static void mlx4_en_tx_timeout(struct net_device *dev)
1370
+static void mlx4_en_tx_timeout(struct net_device *dev, unsigned int txqueue)
13711371 {
13721372 struct mlx4_en_priv *priv = netdev_priv(dev);
13731373 struct mlx4_en_dev *mdev = priv->mdev;
1374
- int i;
1374
+ struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][txqueue];
13751375
13761376 if (netif_msg_timer(priv))
13771377 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
13781378
1379
- for (i = 0; i < priv->tx_ring_num[TX]; i++) {
1380
- struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][i];
1381
-
1382
- if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
1383
- continue;
1384
- en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
1385
- i, tx_ring->qpn, tx_ring->sp_cqn,
1386
- tx_ring->cons, tx_ring->prod);
1387
- }
1379
+ en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
1380
+ txqueue, tx_ring->qpn, tx_ring->sp_cqn,
1381
+ tx_ring->cons, tx_ring->prod);
13881382
13891383 priv->port_stats.tx_timeout++;
13901384 if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) {
....@@ -1828,7 +1822,7 @@
18281822 queue_work(mdev->workqueue, &priv->rx_mode_task);
18291823
18301824 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1831
- udp_tunnel_get_rx_info(dev);
1825
+ udp_tunnel_nic_reset_ntf(dev);
18321826
18331827 priv->port_up = true;
18341828
....@@ -2308,11 +2302,7 @@
23082302 lockdep_is_held(&priv->mdev->state_lock));
23092303
23102304 if (xdp_prog && carry_xdp_prog) {
2311
- xdp_prog = bpf_prog_add(xdp_prog, tmp->rx_ring_num);
2312
- if (IS_ERR(xdp_prog)) {
2313
- mlx4_en_free_resources(tmp);
2314
- return PTR_ERR(xdp_prog);
2315
- }
2305
+ bpf_prog_add(xdp_prog, tmp->rx_ring_num);
23162306 for (i = 0; i < tmp->rx_ring_num; i++)
23172307 rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
23182308 xdp_prog);
....@@ -2652,105 +2642,32 @@
26522642 return 0;
26532643 }
26542644
2655
-static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
2645
+static int mlx4_udp_tunnel_sync(struct net_device *dev, unsigned int table)
26562646 {
2647
+ struct mlx4_en_priv *priv = netdev_priv(dev);
2648
+ struct udp_tunnel_info ti;
26572649 int ret;
2658
- struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2659
- vxlan_add_task);
2650
+
2651
+ udp_tunnel_nic_get_port(dev, table, 0, &ti);
2652
+ priv->vxlan_port = ti.port;
26602653
26612654 ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
26622655 if (ret)
2663
- goto out;
2656
+ return ret;
26642657
2665
- ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2666
- VXLAN_STEER_BY_OUTER_MAC, 1);
2667
-out:
2668
- if (ret) {
2669
- en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2670
- return;
2671
- }
2672
-
2673
- /* set offloads */
2674
- priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2675
- NETIF_F_RXCSUM |
2676
- NETIF_F_TSO | NETIF_F_TSO6 |
2677
- NETIF_F_GSO_UDP_TUNNEL |
2678
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
2679
- NETIF_F_GSO_PARTIAL;
2658
+ return mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2659
+ VXLAN_STEER_BY_OUTER_MAC,
2660
+ !!priv->vxlan_port);
26802661 }
26812662
2682
-static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2683
-{
2684
- int ret;
2685
- struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2686
- vxlan_del_task);
2687
- /* unset offloads */
2688
- priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2689
- NETIF_F_RXCSUM |
2690
- NETIF_F_TSO | NETIF_F_TSO6 |
2691
- NETIF_F_GSO_UDP_TUNNEL |
2692
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
2693
- NETIF_F_GSO_PARTIAL);
2694
-
2695
- ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2696
- VXLAN_STEER_BY_OUTER_MAC, 0);
2697
- if (ret)
2698
- en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2699
-
2700
- priv->vxlan_port = 0;
2701
-}
2702
-
2703
-static void mlx4_en_add_vxlan_port(struct net_device *dev,
2704
- struct udp_tunnel_info *ti)
2705
-{
2706
- struct mlx4_en_priv *priv = netdev_priv(dev);
2707
- __be16 port = ti->port;
2708
- __be16 current_port;
2709
-
2710
- if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2711
- return;
2712
-
2713
- if (ti->sa_family != AF_INET)
2714
- return;
2715
-
2716
- if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2717
- return;
2718
-
2719
- current_port = priv->vxlan_port;
2720
- if (current_port && current_port != port) {
2721
- en_warn(priv, "vxlan port %d configured, can't add port %d\n",
2722
- ntohs(current_port), ntohs(port));
2723
- return;
2724
- }
2725
-
2726
- priv->vxlan_port = port;
2727
- queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
2728
-}
2729
-
2730
-static void mlx4_en_del_vxlan_port(struct net_device *dev,
2731
- struct udp_tunnel_info *ti)
2732
-{
2733
- struct mlx4_en_priv *priv = netdev_priv(dev);
2734
- __be16 port = ti->port;
2735
- __be16 current_port;
2736
-
2737
- if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2738
- return;
2739
-
2740
- if (ti->sa_family != AF_INET)
2741
- return;
2742
-
2743
- if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2744
- return;
2745
-
2746
- current_port = priv->vxlan_port;
2747
- if (current_port != port) {
2748
- en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port));
2749
- return;
2750
- }
2751
-
2752
- queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
2753
-}
2663
+static const struct udp_tunnel_nic_info mlx4_udp_tunnels = {
2664
+ .sync_table = mlx4_udp_tunnel_sync,
2665
+ .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
2666
+ UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
2667
+ .tables = {
2668
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
2669
+ },
2670
+};
27542671
27552672 static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
27562673 struct net_device *dev,
....@@ -2822,11 +2739,9 @@
28222739 * program for a new one.
28232740 */
28242741 if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) {
2825
- if (prog) {
2826
- prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
2827
- if (IS_ERR(prog))
2828
- return PTR_ERR(prog);
2829
- }
2742
+ if (prog)
2743
+ bpf_prog_add(prog, priv->rx_ring_num - 1);
2744
+
28302745 mutex_lock(&mdev->state_lock);
28312746 for (i = 0; i < priv->rx_ring_num; i++) {
28322747 old_prog = rcu_dereference_protected(
....@@ -2847,13 +2762,8 @@
28472762 if (!tmp)
28482763 return -ENOMEM;
28492764
2850
- if (prog) {
2851
- prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
2852
- if (IS_ERR(prog)) {
2853
- err = PTR_ERR(prog);
2854
- goto out;
2855
- }
2856
- }
2765
+ if (prog)
2766
+ bpf_prog_add(prog, priv->rx_ring_num - 1);
28572767
28582768 mutex_lock(&mdev->state_lock);
28592769 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
....@@ -2903,30 +2813,8 @@
29032813
29042814 unlock_out:
29052815 mutex_unlock(&mdev->state_lock);
2906
-out:
29072816 kfree(tmp);
29082817 return err;
2909
-}
2910
-
2911
-static u32 mlx4_xdp_query(struct net_device *dev)
2912
-{
2913
- struct mlx4_en_priv *priv = netdev_priv(dev);
2914
- struct mlx4_en_dev *mdev = priv->mdev;
2915
- const struct bpf_prog *xdp_prog;
2916
- u32 prog_id = 0;
2917
-
2918
- if (!priv->tx_ring_num[TX_XDP])
2919
- return prog_id;
2920
-
2921
- mutex_lock(&mdev->state_lock);
2922
- xdp_prog = rcu_dereference_protected(
2923
- priv->rx_ring[0]->xdp_prog,
2924
- lockdep_is_held(&mdev->state_lock));
2925
- if (xdp_prog)
2926
- prog_id = xdp_prog->aux->id;
2927
- mutex_unlock(&mdev->state_lock);
2928
-
2929
- return prog_id;
29302818 }
29312819
29322820 static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp)
....@@ -2934,9 +2822,6 @@
29342822 switch (xdp->command) {
29352823 case XDP_SETUP_PROG:
29362824 return mlx4_xdp_set(dev, xdp->prog);
2937
- case XDP_QUERY_PROG:
2938
- xdp->prog_id = mlx4_xdp_query(dev);
2939
- return 0;
29402825 default:
29412826 return -EINVAL;
29422827 }
....@@ -2963,8 +2848,8 @@
29632848 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
29642849 #endif
29652850 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2966
- .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
2967
- .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
2851
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
2852
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
29682853 .ndo_features_check = mlx4_en_features_check,
29692854 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
29702855 .ndo_bpf = mlx4_xdp,
....@@ -2997,8 +2882,8 @@
29972882 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
29982883 #endif
29992884 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
3000
- .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
3001
- .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
2885
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
2886
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
30022887 .ndo_features_check = mlx4_en_features_check,
30032888 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
30042889 .ndo_bpf = mlx4_xdp,
....@@ -3299,8 +3184,6 @@
32993184 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
33003185 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
33013186 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
3302
- INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
3303
- INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
33043187 #ifdef CONFIG_RFS_ACCEL
33053188 INIT_LIST_HEAD(&priv->filters);
33063189 spin_lock_init(&priv->filters_lock);
....@@ -3385,7 +3268,7 @@
33853268 dev->addr_len = ETH_ALEN;
33863269 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
33873270 if (!is_valid_ether_addr(dev->dev_addr)) {
3388
- en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
3271
+ en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n",
33893272 priv->port, dev->dev_addr);
33903273 err = -EINVAL;
33913274 goto out;
....@@ -3439,6 +3322,25 @@
34393322 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
34403323 if (mdev->LSO_support)
34413324 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
3325
+
3326
+ if (mdev->dev->caps.tunnel_offload_mode ==
3327
+ MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
3328
+ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3329
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
3330
+ NETIF_F_GSO_PARTIAL;
3331
+ dev->features |= NETIF_F_GSO_UDP_TUNNEL |
3332
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
3333
+ NETIF_F_GSO_PARTIAL;
3334
+ dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
3335
+ dev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3336
+ NETIF_F_RXCSUM |
3337
+ NETIF_F_TSO | NETIF_F_TSO6 |
3338
+ NETIF_F_GSO_UDP_TUNNEL |
3339
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
3340
+ NETIF_F_GSO_PARTIAL;
3341
+
3342
+ dev->udp_tunnel_nic_info = &mlx4_udp_tunnels;
3343
+ }
34423344
34433345 dev->vlan_features = dev->hw_features;
34443346
....@@ -3506,16 +3408,6 @@
35063408 en_warn(priv,
35073409 "No RSS hash capabilities exposed, using Toeplitz\n");
35083410 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3509
- }
3510
-
3511
- if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
3512
- dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3513
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
3514
- NETIF_F_GSO_PARTIAL;
3515
- dev->features |= NETIF_F_GSO_UDP_TUNNEL |
3516
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
3517
- NETIF_F_GSO_PARTIAL;
3518
- dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
35193411 }
35203412
35213413 /* MTU range: 68 - hw-specific max */