forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-04 1543e317f1da31b75942316931e8f491a8920811
kernel/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
....@@ -34,46 +34,273 @@
3434 #define __MLX5_EN_TC_H__
3535
3636 #include <net/pkt_cls.h>
37
+#include "en.h"
38
+#include "eswitch.h"
39
+#include "en/tc_ct.h"
3740
3841 #define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
3942
4043 #ifdef CONFIG_MLX5_ESWITCH
4144
42
-enum {
43
- MLX5E_TC_INGRESS = BIT(0),
44
- MLX5E_TC_EGRESS = BIT(1),
45
- MLX5E_TC_LAST_EXPORTED_BIT = 1,
45
+#define NIC_FLOW_ATTR_SZ (sizeof(struct mlx5_flow_attr) +\
46
+ sizeof(struct mlx5_nic_flow_attr))
47
+#define ESW_FLOW_ATTR_SZ (sizeof(struct mlx5_flow_attr) +\
48
+ sizeof(struct mlx5_esw_flow_attr))
49
+#define ns_to_attr_sz(ns) (((ns) == MLX5_FLOW_NAMESPACE_FDB) ?\
50
+ ESW_FLOW_ATTR_SZ :\
51
+ NIC_FLOW_ATTR_SZ)
52
+
53
+
54
+int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
55
+
56
+struct mlx5e_tc_update_priv {
57
+ struct net_device *tun_dev;
4658 };
4759
48
-int mlx5e_tc_nic_init(struct mlx5e_priv *priv);
49
-void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
60
+struct mlx5_nic_flow_attr {
61
+ u32 flow_tag;
62
+ u32 hairpin_tirn;
63
+ struct mlx5_flow_table *hairpin_ft;
64
+};
65
+
66
+struct mlx5_flow_attr {
67
+ u32 action;
68
+ struct mlx5_fc *counter;
69
+ struct mlx5_modify_hdr *modify_hdr;
70
+ struct mlx5_ct_attr ct_attr;
71
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
72
+ u32 chain;
73
+ u16 prio;
74
+ u32 dest_chain;
75
+ struct mlx5_flow_table *ft;
76
+ struct mlx5_flow_table *dest_ft;
77
+ u8 inner_match_level;
78
+ u8 outer_match_level;
79
+ u32 flags;
80
+ union {
81
+ struct mlx5_esw_flow_attr esw_attr[0];
82
+ struct mlx5_nic_flow_attr nic_attr[0];
83
+ };
84
+};
85
+
86
+#define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16
87
+#define MLX5E_TC_TABLE_CHAIN_TAG_MASK GENMASK(MLX5E_TC_TABLE_CHAIN_TAG_BITS - 1, 0)
88
+
89
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
90
+
91
+struct tunnel_match_key {
92
+ struct flow_dissector_key_control enc_control;
93
+ struct flow_dissector_key_keyid enc_key_id;
94
+ struct flow_dissector_key_ports enc_tp;
95
+ struct flow_dissector_key_ip enc_ip;
96
+ union {
97
+ struct flow_dissector_key_ipv4_addrs enc_ipv4;
98
+ struct flow_dissector_key_ipv6_addrs enc_ipv6;
99
+ };
100
+
101
+ int filter_ifindex;
102
+};
103
+
104
+struct tunnel_match_enc_opts {
105
+ struct flow_dissector_key_enc_opts key;
106
+ struct flow_dissector_key_enc_opts mask;
107
+};
108
+
109
+/* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS.
110
+ * Upper TUNNEL_INFO_BITS for general tunnel info.
111
+ * Lower ENC_OPTS_BITS bits for enc_opts.
112
+ */
113
+#define TUNNEL_INFO_BITS 12
114
+#define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0)
115
+#define ENC_OPTS_BITS 12
116
+#define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0)
117
+#define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS)
118
+#define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0)
119
+
120
+enum {
121
+ MLX5E_TC_FLAG_INGRESS_BIT,
122
+ MLX5E_TC_FLAG_EGRESS_BIT,
123
+ MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
124
+ MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
125
+ MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
126
+ MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
127
+};
128
+
129
+#define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT)
50130
51131 int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
52132 void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
133
+bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow);
53134
54
-int mlx5e_configure_flower(struct mlx5e_priv *priv,
55
- struct tc_cls_flower_offload *f, int flags);
56
-int mlx5e_delete_flower(struct mlx5e_priv *priv,
57
- struct tc_cls_flower_offload *f, int flags);
135
+int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
136
+ struct flow_cls_offload *f, unsigned long flags);
137
+int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
138
+ struct flow_cls_offload *f, unsigned long flags);
58139
59
-int mlx5e_stats_flower(struct mlx5e_priv *priv,
60
- struct tc_cls_flower_offload *f, int flags);
140
+int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
141
+ struct flow_cls_offload *f, unsigned long flags);
142
+
143
+int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
144
+ struct tc_cls_matchall_offload *f);
145
+int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
146
+ struct tc_cls_matchall_offload *f);
147
+void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
148
+ struct tc_cls_matchall_offload *ma);
61149
62150 struct mlx5e_encap_entry;
63151 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
64
- struct mlx5e_encap_entry *e);
152
+ struct mlx5e_encap_entry *e,
153
+ struct list_head *flow_list);
65154 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
66
- struct mlx5e_encap_entry *e);
155
+ struct mlx5e_encap_entry *e,
156
+ struct list_head *flow_list);
157
+bool mlx5e_encap_take(struct mlx5e_encap_entry *e);
158
+void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e);
159
+
160
+void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list);
161
+void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
67162
68163 struct mlx5e_neigh_hash_entry;
69164 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
70165
71
-int mlx5e_tc_num_filters(struct mlx5e_priv *priv);
166
+void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
167
+
168
+enum mlx5e_tc_attr_to_reg {
169
+ CHAIN_TO_REG,
170
+ TUNNEL_TO_REG,
171
+ CTSTATE_TO_REG,
172
+ ZONE_TO_REG,
173
+ ZONE_RESTORE_TO_REG,
174
+ MARK_TO_REG,
175
+ LABELS_TO_REG,
176
+ FTEID_TO_REG,
177
+ NIC_CHAIN_TO_REG,
178
+ NIC_ZONE_RESTORE_TO_REG,
179
+};
180
+
181
+struct mlx5e_tc_attr_to_reg_mapping {
182
+ int mfield; /* rewrite field */
183
+ int moffset; /* offset of mfield */
184
+ int mlen; /* bytes to rewrite/match */
185
+
186
+ int soffset; /* offset of spec for match */
187
+};
188
+
189
+extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[];
190
+
191
+bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
192
+ struct net_device *out_dev);
193
+
194
+int mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
195
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
196
+ enum mlx5_flow_namespace_type ns,
197
+ enum mlx5e_tc_attr_to_reg type,
198
+ u32 data);
199
+
200
+void mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
201
+ enum mlx5e_tc_attr_to_reg type,
202
+ u32 data,
203
+ u32 mask);
204
+
205
+void mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
206
+ enum mlx5e_tc_attr_to_reg type,
207
+ u32 *data,
208
+ u32 *mask);
209
+
210
+int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
211
+ int namespace,
212
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
213
+void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
214
+
215
+struct mlx5e_tc_flow;
216
+u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow);
217
+
218
+void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
219
+ struct flow_match_basic *match, bool outer,
220
+ void *headers_c, void *headers_v);
221
+
222
+int mlx5e_tc_nic_init(struct mlx5e_priv *priv);
223
+void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
224
+
225
+int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
226
+ void *cb_priv);
227
+
228
+struct mlx5_flow_handle *
229
+mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
230
+ struct mlx5_flow_spec *spec,
231
+ struct mlx5_flow_attr *attr);
232
+void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
233
+ struct mlx5_flow_handle *rule,
234
+ struct mlx5_flow_attr *attr);
235
+
236
+struct mlx5_flow_handle *
237
+mlx5_tc_rule_insert(struct mlx5e_priv *priv,
238
+ struct mlx5_flow_spec *spec,
239
+ struct mlx5_flow_attr *attr);
240
+void
241
+mlx5_tc_rule_delete(struct mlx5e_priv *priv,
242
+ struct mlx5_flow_handle *rule,
243
+ struct mlx5_flow_attr *attr);
244
+
245
+#else /* CONFIG_MLX5_CLS_ACT */
246
+static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
247
+static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
248
+static inline int
249
+mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
250
+{ return -EOPNOTSUPP; }
251
+
252
+#endif /* CONFIG_MLX5_CLS_ACT */
253
+
254
+struct mlx5_flow_attr *mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type);
255
+
256
+struct mlx5_flow_handle *
257
+mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
258
+ struct mlx5_flow_spec *spec,
259
+ struct mlx5_flow_attr *attr);
260
+void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
261
+ struct mlx5_flow_handle *rule,
262
+ struct mlx5_flow_attr *attr);
72263
73264 #else /* CONFIG_MLX5_ESWITCH */
74265 static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
75266 static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
76
-static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) { return 0; }
267
+static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv,
268
+ unsigned long flags)
269
+{
270
+ return 0;
271
+}
272
+
273
+static inline int
274
+mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
275
+{ return -EOPNOTSUPP; }
276
+#endif
277
+
278
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
279
+static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
280
+{
281
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
282
+ u32 chain, reg_b;
283
+
284
+ reg_b = be32_to_cpu(cqe->ft_metadata);
285
+
286
+ if (reg_b >> (MLX5E_TC_TABLE_CHAIN_TAG_BITS + ZONE_RESTORE_BITS))
287
+ return false;
288
+
289
+ chain = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
290
+ if (chain)
291
+ return true;
292
+#endif
293
+
294
+ return false;
295
+}
296
+
297
+bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb);
298
+#else /* CONFIG_MLX5_CLS_ACT */
299
+static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
300
+{ return false; }
301
+static inline bool
302
+mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
303
+{ return true; }
77304 #endif
78305
79306 #endif /* __MLX5_EN_TC_H__ */