hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
....@@ -35,10 +35,14 @@
3535
3636 #include <net/ip_tunnels.h>
3737 #include <linux/rhashtable.h>
38
+#include <linux/mutex.h>
3839 #include "eswitch.h"
3940 #include "en.h"
41
+#include "lib/port_tun.h"
4042
4143 #ifdef CONFIG_MLX5_ESWITCH
44
+extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep;
45
+
4246 struct mlx5e_neigh_update_table {
4347 struct rhashtable neigh_ht;
4448 /* Save the neigh hash entries in a list in addition to the hash table
....@@ -47,25 +51,62 @@
4751 */
4852 struct list_head neigh_list;
4953 /* protect lookup/remove operations */
50
- spinlock_t encap_lock;
54
+ struct mutex encap_lock;
5155 struct notifier_block netevent_nb;
5256 struct delayed_work neigh_stats_work;
5357 unsigned long min_interval; /* jiffies */
58
+};
59
+
60
+struct mlx5_tc_ct_priv;
61
+struct mlx5e_rep_bond;
62
+struct mlx5_rep_uplink_priv {
63
+ /* Filters DB - instantiated by the uplink representor and shared by
64
+ * the uplink's VFs
65
+ */
66
+ struct rhashtable tc_ht;
67
+
68
+ /* indirect block callbacks are invoked on bind/unbind events
69
+ * on registered higher level devices (e.g. tunnel devices)
70
+ *
71
+ * tc_indr_block_cb_priv_list is used to lookup indirect callback
72
+ * private data
73
+ *
74
+ */
75
+ struct list_head tc_indr_block_priv_list;
76
+
77
+ struct mlx5_tun_entropy tun_entropy;
78
+
79
+ /* protects unready_flows */
80
+ struct mutex unready_flows_lock;
81
+ struct list_head unready_flows;
82
+ struct work_struct reoffload_flows_work;
83
+
84
+ /* maps tun_info to a unique id*/
85
+ struct mapping_ctx *tunnel_mapping;
86
+ /* maps tun_enc_opts to a unique id*/
87
+ struct mapping_ctx *tunnel_enc_opts_mapping;
88
+
89
+ struct mlx5_tc_ct_priv *ct_priv;
90
+
91
+ /* support eswitch vports bonding */
92
+ struct mlx5e_rep_bond *bond;
5493 };
5594
5695 struct mlx5e_rep_priv {
5796 struct mlx5_eswitch_rep *rep;
5897 struct mlx5e_neigh_update_table neigh_update;
5998 struct net_device *netdev;
99
+ struct mlx5_flow_table *root_ft;
60100 struct mlx5_flow_handle *vport_rx_rule;
61101 struct list_head vport_sqs_list;
62
- struct rhashtable tc_ht; /* valid for uplink rep */
102
+ struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */
103
+ struct rtnl_link_stats64 prev_vf_vport_stats;
63104 };
64105
65106 static inline
66107 struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep)
67108 {
68
- return (struct mlx5e_rep_priv *)rep->rep_if[REP_ETH].priv;
109
+ return rep->rep_data[REP_ETH].priv;
69110 }
70111
71112 struct mlx5e_neigh {
....@@ -80,6 +121,7 @@
80121 struct mlx5e_neigh_hash_entry {
81122 struct rhash_head rhash_node;
82123 struct mlx5e_neigh m_neigh;
124
+ struct mlx5e_priv *priv;
83125
84126 /* Save the neigh hash entry in a list on the representor in
85127 * addition to the hash table. In order to iterate easily over the
....@@ -87,14 +129,10 @@
87129 */
88130 struct list_head neigh_list;
89131
132
+ /* protects encap list */
133
+ spinlock_t encap_list_lock;
90134 /* encap list sharing the same neigh */
91135 struct list_head encap_list;
92
-
93
- /* valid only when the neigh reference is taken during
94
- * neigh_update_work workqueue callback.
95
- */
96
- struct neighbour *n;
97
- struct work_struct neigh_update_work;
98136
99137 /* neigh hash entry can be deleted only when the refcount is zero.
100138 * refcount is needed to avoid neigh hash entry removal by TC, while
....@@ -107,14 +145,34 @@
107145 * 'used' value and avoid neigh deleting by the kernel.
108146 */
109147 unsigned long reported_lastuse;
148
+
149
+ struct rcu_head rcu;
110150 };
111151
112152 enum {
113153 /* set when the encap entry is successfully offloaded into HW */
114154 MLX5_ENCAP_ENTRY_VALID = BIT(0),
155
+ MLX5_REFORMAT_DECAP = BIT(1),
156
+};
157
+
158
+struct mlx5e_decap_key {
159
+ struct ethhdr key;
160
+};
161
+
162
+struct mlx5e_decap_entry {
163
+ struct mlx5e_decap_key key;
164
+ struct list_head flows;
165
+ struct hlist_node hlist;
166
+ refcount_t refcnt;
167
+ struct completion res_ready;
168
+ int compl_result;
169
+ struct mlx5_pkt_reformat *pkt_reformat;
170
+ struct rcu_head rcu;
115171 };
116172
117173 struct mlx5e_encap_entry {
174
+ /* attached neigh hash entry */
175
+ struct mlx5e_neigh_hash_entry *nhe;
118176 /* neigh hash entry list of encaps sharing the same neigh */
119177 struct list_head encap_list;
120178 struct mlx5e_neigh m_neigh;
....@@ -123,15 +181,21 @@
123181 */
124182 struct hlist_node encap_hlist;
125183 struct list_head flows;
126
- u32 encap_id;
127
- struct ip_tunnel_info tun_info;
184
+ struct mlx5_pkt_reformat *pkt_reformat;
185
+ const struct ip_tunnel_info *tun_info;
128186 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
129187
130188 struct net_device *out_dev;
131
- int tunnel_type;
189
+ int route_dev_ifindex;
190
+ struct mlx5e_tc_tunnel *tunnel;
191
+ int reformat_type;
132192 u8 flags;
133193 char *encap_header;
134194 int encap_size;
195
+ refcount_t refcnt;
196
+ struct completion res_ready;
197
+ int compl_result;
198
+ struct rcu_head rcu;
135199 };
136200
137201 struct mlx5e_rep_sq {
....@@ -139,31 +203,39 @@
139203 struct list_head list;
140204 };
141205
142
-void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev);
143
-void mlx5e_register_vport_reps(struct mlx5e_priv *priv);
144
-void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv);
206
+void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev);
207
+void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev);
208
+int mlx5e_rep_bond_init(struct mlx5e_rep_priv *rpriv);
209
+void mlx5e_rep_bond_cleanup(struct mlx5e_rep_priv *rpriv);
210
+int mlx5e_rep_bond_enslave(struct mlx5_eswitch *esw, struct net_device *netdev,
211
+ struct net_device *lag_dev);
212
+void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
213
+ const struct net_device *netdev,
214
+ const struct net_device *lag_dev);
215
+int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup);
216
+
145217 bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv);
146218 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
147219 void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
148220
149
-int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, void *sp);
150
-bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id);
151
-
152
-int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr);
153
-void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
154
-
155
-int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
156
- struct mlx5e_encap_entry *e);
157
-void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
158
- struct mlx5e_encap_entry *e);
159
-
160221 void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
222
+
223
+bool mlx5e_eswitch_vf_rep(struct net_device *netdev);
224
+bool mlx5e_eswitch_uplink_rep(struct net_device *netdev);
225
+static inline bool mlx5e_eswitch_rep(struct net_device *netdev)
226
+{
227
+ return mlx5e_eswitch_vf_rep(netdev) ||
228
+ mlx5e_eswitch_uplink_rep(netdev);
229
+}
230
+
161231 #else /* CONFIG_MLX5_ESWITCH */
162
-static inline void mlx5e_register_vport_reps(struct mlx5e_priv *priv) {}
163
-static inline void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv) {}
164232 static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; }
165233 static inline int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) { return 0; }
166234 static inline void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) {}
167235 #endif
168236
237
+static inline bool mlx5e_is_vport_rep(struct mlx5e_priv *priv)
238
+{
239
+ return (MLX5_ESWITCH_MANAGER(priv->mdev) && priv->ppriv);
240
+}
169241 #endif /* __MLX5E_REP_H__ */