.. | .. |
---|
31 | 31 | */ |
---|
32 | 32 | |
---|
33 | 33 | #include <linux/etherdevice.h> |
---|
| 34 | +#include <linux/idr.h> |
---|
34 | 35 | #include <linux/mlx5/driver.h> |
---|
35 | 36 | #include <linux/mlx5/mlx5_ifc.h> |
---|
36 | 37 | #include <linux/mlx5/vport.h> |
---|
37 | 38 | #include <linux/mlx5/fs.h> |
---|
38 | 39 | #include "mlx5_core.h" |
---|
39 | 40 | #include "eswitch.h" |
---|
| 41 | +#include "esw/acl/ofld.h" |
---|
| 42 | +#include "rdma.h" |
---|
| 43 | +#include "en.h" |
---|
| 44 | +#include "fs_core.h" |
---|
| 45 | +#include "lib/devcom.h" |
---|
| 46 | +#include "lib/eq.h" |
---|
| 47 | +#include "lib/fs_chains.h" |
---|
| 48 | +#include "en_tc.h" |
---|
40 | 49 | |
---|
41 | | -enum { |
---|
42 | | - FDB_FAST_PATH = 0, |
---|
43 | | - FDB_SLOW_PATH |
---|
| 50 | +/* There are two match-all miss flows, one for unicast dst mac and |
---|
| 51 | + * one for multicast. |
---|
| 52 | + */ |
---|
| 53 | +#define MLX5_ESW_MISS_FLOWS (2) |
---|
| 54 | +#define UPLINK_REP_INDEX 0 |
---|
| 55 | + |
---|
| 56 | +/* Per vport tables */ |
---|
| 57 | + |
---|
| 58 | +#define MLX5_ESW_VPORT_TABLE_SIZE 128 |
---|
| 59 | + |
---|
| 60 | +/* This struct is used as a key to the hash table and we need it to be packed |
---|
| 61 | + * so hash result is consistent |
---|
| 62 | + */ |
---|
| 63 | +struct mlx5_vport_key { |
---|
| 64 | + u32 chain; |
---|
| 65 | + u16 prio; |
---|
| 66 | + u16 vport; |
---|
| 67 | + u16 vhca_id; |
---|
| 68 | +} __packed; |
---|
| 69 | + |
---|
| 70 | +struct mlx5_vport_tbl_attr { |
---|
| 71 | + u16 chain; |
---|
| 72 | + u16 prio; |
---|
| 73 | + u16 vport; |
---|
44 | 74 | }; |
---|
| 75 | + |
---|
| 76 | +struct mlx5_vport_table { |
---|
| 77 | + struct hlist_node hlist; |
---|
| 78 | + struct mlx5_flow_table *fdb; |
---|
| 79 | + u32 num_rules; |
---|
| 80 | + struct mlx5_vport_key key; |
---|
| 81 | +}; |
---|
| 82 | + |
---|
| 83 | +#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4 |
---|
| 84 | + |
---|
| 85 | +static struct mlx5_flow_table * |
---|
| 86 | +esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns) |
---|
| 87 | +{ |
---|
| 88 | + struct mlx5_flow_table_attr ft_attr = {}; |
---|
| 89 | + struct mlx5_flow_table *fdb; |
---|
| 90 | + |
---|
| 91 | + ft_attr.autogroup.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS; |
---|
| 92 | + ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE; |
---|
| 93 | + ft_attr.prio = FDB_PER_VPORT; |
---|
| 94 | + fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); |
---|
| 95 | + if (IS_ERR(fdb)) { |
---|
| 96 | + esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n", |
---|
| 97 | + PTR_ERR(fdb)); |
---|
| 98 | + } |
---|
| 99 | + |
---|
| 100 | + return fdb; |
---|
| 101 | +} |
---|
| 102 | + |
---|
| 103 | +static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw, |
---|
| 104 | + struct mlx5_vport_tbl_attr *attr, |
---|
| 105 | + struct mlx5_vport_key *key) |
---|
| 106 | +{ |
---|
| 107 | + key->vport = attr->vport; |
---|
| 108 | + key->chain = attr->chain; |
---|
| 109 | + key->prio = attr->prio; |
---|
| 110 | + key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id); |
---|
| 111 | + return jhash(key, sizeof(*key), 0); |
---|
| 112 | +} |
---|
| 113 | + |
---|
| 114 | +/* caller must hold vports.lock */ |
---|
| 115 | +static struct mlx5_vport_table * |
---|
| 116 | +esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key) |
---|
| 117 | +{ |
---|
| 118 | + struct mlx5_vport_table *e; |
---|
| 119 | + |
---|
| 120 | + hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key) |
---|
| 121 | + if (!memcmp(&e->key, skey, sizeof(*skey))) |
---|
| 122 | + return e; |
---|
| 123 | + |
---|
| 124 | + return NULL; |
---|
| 125 | +} |
---|
| 126 | + |
---|
| 127 | +static void |
---|
| 128 | +esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr) |
---|
| 129 | +{ |
---|
| 130 | + struct mlx5_vport_table *e; |
---|
| 131 | + struct mlx5_vport_key key; |
---|
| 132 | + u32 hkey; |
---|
| 133 | + |
---|
| 134 | + mutex_lock(&esw->fdb_table.offloads.vports.lock); |
---|
| 135 | + hkey = flow_attr_to_vport_key(esw, attr, &key); |
---|
| 136 | + e = esw_vport_tbl_lookup(esw, &key, hkey); |
---|
| 137 | + if (!e || --e->num_rules) |
---|
| 138 | + goto out; |
---|
| 139 | + |
---|
| 140 | + hash_del(&e->hlist); |
---|
| 141 | + mlx5_destroy_flow_table(e->fdb); |
---|
| 142 | + kfree(e); |
---|
| 143 | +out: |
---|
| 144 | + mutex_unlock(&esw->fdb_table.offloads.vports.lock); |
---|
| 145 | +} |
---|
| 146 | + |
---|
| 147 | +static struct mlx5_flow_table * |
---|
| 148 | +esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr) |
---|
| 149 | +{ |
---|
| 150 | + struct mlx5_core_dev *dev = esw->dev; |
---|
| 151 | + struct mlx5_flow_namespace *ns; |
---|
| 152 | + struct mlx5_flow_table *fdb; |
---|
| 153 | + struct mlx5_vport_table *e; |
---|
| 154 | + struct mlx5_vport_key skey; |
---|
| 155 | + u32 hkey; |
---|
| 156 | + |
---|
| 157 | + mutex_lock(&esw->fdb_table.offloads.vports.lock); |
---|
| 158 | + hkey = flow_attr_to_vport_key(esw, attr, &skey); |
---|
| 159 | + e = esw_vport_tbl_lookup(esw, &skey, hkey); |
---|
| 160 | + if (e) { |
---|
| 161 | + e->num_rules++; |
---|
| 162 | + goto out; |
---|
| 163 | + } |
---|
| 164 | + |
---|
| 165 | + e = kzalloc(sizeof(*e), GFP_KERNEL); |
---|
| 166 | + if (!e) { |
---|
| 167 | + fdb = ERR_PTR(-ENOMEM); |
---|
| 168 | + goto err_alloc; |
---|
| 169 | + } |
---|
| 170 | + |
---|
| 171 | + ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); |
---|
| 172 | + if (!ns) { |
---|
| 173 | + esw_warn(dev, "Failed to get FDB namespace\n"); |
---|
| 174 | + fdb = ERR_PTR(-ENOENT); |
---|
| 175 | + goto err_ns; |
---|
| 176 | + } |
---|
| 177 | + |
---|
| 178 | + fdb = esw_vport_tbl_create(esw, ns); |
---|
| 179 | + if (IS_ERR(fdb)) |
---|
| 180 | + goto err_ns; |
---|
| 181 | + |
---|
| 182 | + e->fdb = fdb; |
---|
| 183 | + e->num_rules = 1; |
---|
| 184 | + e->key = skey; |
---|
| 185 | + hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey); |
---|
| 186 | +out: |
---|
| 187 | + mutex_unlock(&esw->fdb_table.offloads.vports.lock); |
---|
| 188 | + return e->fdb; |
---|
| 189 | + |
---|
| 190 | +err_ns: |
---|
| 191 | + kfree(e); |
---|
| 192 | +err_alloc: |
---|
| 193 | + mutex_unlock(&esw->fdb_table.offloads.vports.lock); |
---|
| 194 | + return fdb; |
---|
| 195 | +} |
---|
| 196 | + |
---|
| 197 | +int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw) |
---|
| 198 | +{ |
---|
| 199 | + struct mlx5_vport_tbl_attr attr; |
---|
| 200 | + struct mlx5_flow_table *fdb; |
---|
| 201 | + struct mlx5_vport *vport; |
---|
| 202 | + int i; |
---|
| 203 | + |
---|
| 204 | + attr.chain = 0; |
---|
| 205 | + attr.prio = 1; |
---|
| 206 | + mlx5_esw_for_all_vports(esw, i, vport) { |
---|
| 207 | + attr.vport = vport->vport; |
---|
| 208 | + fdb = esw_vport_tbl_get(esw, &attr); |
---|
| 209 | + if (IS_ERR(fdb)) |
---|
| 210 | + goto out; |
---|
| 211 | + } |
---|
| 212 | + return 0; |
---|
| 213 | + |
---|
| 214 | +out: |
---|
| 215 | + mlx5_esw_vport_tbl_put(esw); |
---|
| 216 | + return PTR_ERR(fdb); |
---|
| 217 | +} |
---|
| 218 | + |
---|
| 219 | +void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw) |
---|
| 220 | +{ |
---|
| 221 | + struct mlx5_vport_tbl_attr attr; |
---|
| 222 | + struct mlx5_vport *vport; |
---|
| 223 | + int i; |
---|
| 224 | + |
---|
| 225 | + attr.chain = 0; |
---|
| 226 | + attr.prio = 1; |
---|
| 227 | + mlx5_esw_for_all_vports(esw, i, vport) { |
---|
| 228 | + attr.vport = vport->vport; |
---|
| 229 | + esw_vport_tbl_put(esw, &attr); |
---|
| 230 | + } |
---|
| 231 | +} |
---|
| 232 | + |
---|
| 233 | +/* End: Per vport tables */ |
---|
| 234 | + |
---|
| 235 | +static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, |
---|
| 236 | + u16 vport_num) |
---|
| 237 | +{ |
---|
| 238 | + int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num); |
---|
| 239 | + |
---|
| 240 | + WARN_ON(idx > esw->total_vports - 1); |
---|
| 241 | + return &esw->offloads.vport_reps[idx]; |
---|
| 242 | +} |
---|
| 243 | + |
---|
| 244 | +static void |
---|
| 245 | +mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw, |
---|
| 246 | + struct mlx5_flow_spec *spec, |
---|
| 247 | + struct mlx5_esw_flow_attr *attr) |
---|
| 248 | +{ |
---|
| 249 | + if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) && |
---|
| 250 | + attr && attr->in_rep) |
---|
| 251 | + spec->flow_context.flow_source = |
---|
| 252 | + attr->in_rep->vport == MLX5_VPORT_UPLINK ? |
---|
| 253 | + MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK : |
---|
| 254 | + MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT; |
---|
| 255 | +} |
---|
| 256 | + |
---|
| 257 | +static void |
---|
| 258 | +mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, |
---|
| 259 | + struct mlx5_flow_spec *spec, |
---|
| 260 | + struct mlx5_esw_flow_attr *attr) |
---|
| 261 | +{ |
---|
| 262 | + void *misc2; |
---|
| 263 | + void *misc; |
---|
| 264 | + |
---|
| 265 | + /* Use metadata matching because vport is not represented by single |
---|
| 266 | + * VHCA in dual-port RoCE mode, and matching on source vport may fail. |
---|
| 267 | + */ |
---|
| 268 | + if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { |
---|
| 269 | + misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); |
---|
| 270 | + MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, |
---|
| 271 | + mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch, |
---|
| 272 | + attr->in_rep->vport)); |
---|
| 273 | + |
---|
| 274 | + misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); |
---|
| 275 | + MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, |
---|
| 276 | + mlx5_eswitch_get_vport_metadata_mask()); |
---|
| 277 | + |
---|
| 278 | + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; |
---|
| 279 | + } else { |
---|
| 280 | + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); |
---|
| 281 | + MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport); |
---|
| 282 | + |
---|
| 283 | + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) |
---|
| 284 | + MLX5_SET(fte_match_set_misc, misc, |
---|
| 285 | + source_eswitch_owner_vhca_id, |
---|
| 286 | + MLX5_CAP_GEN(attr->in_mdev, vhca_id)); |
---|
| 287 | + |
---|
| 288 | + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); |
---|
| 289 | + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); |
---|
| 290 | + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) |
---|
| 291 | + MLX5_SET_TO_ONES(fte_match_set_misc, misc, |
---|
| 292 | + source_eswitch_owner_vhca_id); |
---|
| 293 | + |
---|
| 294 | + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; |
---|
| 295 | + } |
---|
| 296 | +} |
---|
45 | 297 | |
---|
46 | 298 | struct mlx5_flow_handle * |
---|
47 | 299 | mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, |
---|
48 | 300 | struct mlx5_flow_spec *spec, |
---|
49 | | - struct mlx5_esw_flow_attr *attr) |
---|
| 301 | + struct mlx5_flow_attr *attr) |
---|
50 | 302 | { |
---|
51 | 303 | struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {}; |
---|
52 | | - struct mlx5_flow_act flow_act = {0}; |
---|
53 | | - struct mlx5_flow_table *ft = NULL; |
---|
54 | | - struct mlx5_fc *counter = NULL; |
---|
| 304 | + struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; |
---|
| 305 | + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; |
---|
| 306 | + struct mlx5_fs_chains *chains = esw_chains(esw); |
---|
| 307 | + bool split = !!(esw_attr->split_count); |
---|
| 308 | + struct mlx5_vport_tbl_attr fwd_attr; |
---|
55 | 309 | struct mlx5_flow_handle *rule; |
---|
| 310 | + struct mlx5_flow_table *fdb; |
---|
56 | 311 | int j, i = 0; |
---|
57 | | - void *misc; |
---|
58 | 312 | |
---|
59 | | - if (esw->mode != SRIOV_OFFLOADS) |
---|
| 313 | + if (esw->mode != MLX5_ESWITCH_OFFLOADS) |
---|
60 | 314 | return ERR_PTR(-EOPNOTSUPP); |
---|
61 | | - |
---|
62 | | - if (attr->mirror_count) |
---|
63 | | - ft = esw->fdb_table.offloads.fwd_fdb; |
---|
64 | | - else |
---|
65 | | - ft = esw->fdb_table.offloads.fast_fdb; |
---|
66 | 315 | |
---|
67 | 316 | flow_act.action = attr->action; |
---|
68 | 317 | /* if per flow vlan pop/push is emulated, don't set that into the firmware */ |
---|
.. | .. |
---|
70 | 319 | flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | |
---|
71 | 320 | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); |
---|
72 | 321 | else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { |
---|
73 | | - flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]); |
---|
74 | | - flow_act.vlan[0].vid = attr->vlan_vid[0]; |
---|
75 | | - flow_act.vlan[0].prio = attr->vlan_prio[0]; |
---|
| 322 | + flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]); |
---|
| 323 | + flow_act.vlan[0].vid = esw_attr->vlan_vid[0]; |
---|
| 324 | + flow_act.vlan[0].prio = esw_attr->vlan_prio[0]; |
---|
76 | 325 | if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { |
---|
77 | | - flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]); |
---|
78 | | - flow_act.vlan[1].vid = attr->vlan_vid[1]; |
---|
79 | | - flow_act.vlan[1].prio = attr->vlan_prio[1]; |
---|
| 326 | + flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]); |
---|
| 327 | + flow_act.vlan[1].vid = esw_attr->vlan_vid[1]; |
---|
| 328 | + flow_act.vlan[1].prio = esw_attr->vlan_prio[1]; |
---|
80 | 329 | } |
---|
81 | 330 | } |
---|
82 | 331 | |
---|
83 | 332 | if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { |
---|
84 | | - for (j = attr->mirror_count; j < attr->out_count; j++) { |
---|
85 | | - dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; |
---|
86 | | - dest[i].vport.num = attr->out_rep[j]->vport; |
---|
87 | | - dest[i].vport.vhca_id = |
---|
88 | | - MLX5_CAP_GEN(attr->out_mdev[j], vhca_id); |
---|
89 | | - dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch); |
---|
| 333 | + struct mlx5_flow_table *ft; |
---|
| 334 | + |
---|
| 335 | + if (attr->dest_ft) { |
---|
| 336 | + flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; |
---|
| 337 | + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; |
---|
| 338 | + dest[i].ft = attr->dest_ft; |
---|
90 | 339 | i++; |
---|
| 340 | + } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) { |
---|
| 341 | + flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; |
---|
| 342 | + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; |
---|
| 343 | + dest[i].ft = mlx5_chains_get_tc_end_ft(chains); |
---|
| 344 | + i++; |
---|
| 345 | + } else if (attr->dest_chain) { |
---|
| 346 | + flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; |
---|
| 347 | + ft = mlx5_chains_get_table(chains, attr->dest_chain, |
---|
| 348 | + 1, 0); |
---|
| 349 | + if (IS_ERR(ft)) { |
---|
| 350 | + rule = ERR_CAST(ft); |
---|
| 351 | + goto err_create_goto_table; |
---|
| 352 | + } |
---|
| 353 | + |
---|
| 354 | + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; |
---|
| 355 | + dest[i].ft = ft; |
---|
| 356 | + i++; |
---|
| 357 | + } else { |
---|
| 358 | + for (j = esw_attr->split_count; j < esw_attr->out_count; j++) { |
---|
| 359 | + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; |
---|
| 360 | + dest[i].vport.num = esw_attr->dests[j].rep->vport; |
---|
| 361 | + dest[i].vport.vhca_id = |
---|
| 362 | + MLX5_CAP_GEN(esw_attr->dests[j].mdev, vhca_id); |
---|
| 363 | + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) |
---|
| 364 | + dest[i].vport.flags |= |
---|
| 365 | + MLX5_FLOW_DEST_VPORT_VHCA_ID; |
---|
| 366 | + if (esw_attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) { |
---|
| 367 | + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; |
---|
| 368 | + flow_act.pkt_reformat = |
---|
| 369 | + esw_attr->dests[j].pkt_reformat; |
---|
| 370 | + dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID; |
---|
| 371 | + dest[i].vport.pkt_reformat = |
---|
| 372 | + esw_attr->dests[j].pkt_reformat; |
---|
| 373 | + } |
---|
| 374 | + i++; |
---|
| 375 | + } |
---|
91 | 376 | } |
---|
92 | 377 | } |
---|
| 378 | + |
---|
| 379 | + if (esw_attr->decap_pkt_reformat) |
---|
| 380 | + flow_act.pkt_reformat = esw_attr->decap_pkt_reformat; |
---|
| 381 | + |
---|
93 | 382 | if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { |
---|
94 | | - counter = mlx5_fc_create(esw->dev, true); |
---|
95 | | - if (IS_ERR(counter)) { |
---|
96 | | - rule = ERR_CAST(counter); |
---|
97 | | - goto err_counter_alloc; |
---|
98 | | - } |
---|
99 | 383 | dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; |
---|
100 | | - dest[i].counter = counter; |
---|
| 384 | + dest[i].counter_id = mlx5_fc_id(attr->counter); |
---|
101 | 385 | i++; |
---|
102 | 386 | } |
---|
103 | 387 | |
---|
104 | | - misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); |
---|
105 | | - MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport); |
---|
106 | | - |
---|
107 | | - if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) |
---|
108 | | - MLX5_SET(fte_match_set_misc, misc, |
---|
109 | | - source_eswitch_owner_vhca_id, |
---|
110 | | - MLX5_CAP_GEN(attr->in_mdev, vhca_id)); |
---|
111 | | - |
---|
112 | | - misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); |
---|
113 | | - MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); |
---|
114 | | - if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) |
---|
115 | | - MLX5_SET_TO_ONES(fte_match_set_misc, misc, |
---|
116 | | - source_eswitch_owner_vhca_id); |
---|
117 | | - |
---|
118 | | - if (attr->match_level == MLX5_MATCH_NONE) |
---|
119 | | - spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; |
---|
120 | | - else |
---|
121 | | - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | |
---|
122 | | - MLX5_MATCH_MISC_PARAMETERS; |
---|
123 | | - |
---|
124 | | - if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) |
---|
| 388 | + if (attr->outer_match_level != MLX5_MATCH_NONE) |
---|
| 389 | + spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; |
---|
| 390 | + if (attr->inner_match_level != MLX5_MATCH_NONE) |
---|
125 | 391 | spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; |
---|
126 | 392 | |
---|
127 | 393 | if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) |
---|
128 | | - flow_act.modify_id = attr->mod_hdr_id; |
---|
| 394 | + flow_act.modify_hdr = attr->modify_hdr; |
---|
129 | 395 | |
---|
130 | | - if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) |
---|
131 | | - flow_act.encap_id = attr->encap_id; |
---|
| 396 | + if (split) { |
---|
| 397 | + fwd_attr.chain = attr->chain; |
---|
| 398 | + fwd_attr.prio = attr->prio; |
---|
| 399 | + fwd_attr.vport = esw_attr->in_rep->vport; |
---|
132 | 400 | |
---|
133 | | - rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, i); |
---|
| 401 | + fdb = esw_vport_tbl_get(esw, &fwd_attr); |
---|
| 402 | + } else { |
---|
| 403 | + if (attr->chain || attr->prio) |
---|
| 404 | + fdb = mlx5_chains_get_table(chains, attr->chain, |
---|
| 405 | + attr->prio, 0); |
---|
| 406 | + else |
---|
| 407 | + fdb = attr->ft; |
---|
| 408 | + |
---|
| 409 | + if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT)) |
---|
| 410 | + mlx5_eswitch_set_rule_source_port(esw, spec, esw_attr); |
---|
| 411 | + } |
---|
| 412 | + if (IS_ERR(fdb)) { |
---|
| 413 | + rule = ERR_CAST(fdb); |
---|
| 414 | + goto err_esw_get; |
---|
| 415 | + } |
---|
| 416 | + |
---|
| 417 | + mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr); |
---|
| 418 | + |
---|
| 419 | + if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec)) |
---|
| 420 | + rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr, |
---|
| 421 | + &flow_act, dest, i); |
---|
| 422 | + else |
---|
| 423 | + rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i); |
---|
134 | 424 | if (IS_ERR(rule)) |
---|
135 | 425 | goto err_add_rule; |
---|
136 | 426 | else |
---|
137 | | - esw->offloads.num_flows++; |
---|
| 427 | + atomic64_inc(&esw->offloads.num_flows); |
---|
138 | 428 | |
---|
139 | 429 | return rule; |
---|
140 | 430 | |
---|
141 | 431 | err_add_rule: |
---|
142 | | - mlx5_fc_destroy(esw->dev, counter); |
---|
143 | | -err_counter_alloc: |
---|
| 432 | + if (split) |
---|
| 433 | + esw_vport_tbl_put(esw, &fwd_attr); |
---|
| 434 | + else if (attr->chain || attr->prio) |
---|
| 435 | + mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); |
---|
| 436 | +err_esw_get: |
---|
| 437 | + if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain) |
---|
| 438 | + mlx5_chains_put_table(chains, attr->dest_chain, 1, 0); |
---|
| 439 | +err_create_goto_table: |
---|
144 | 440 | return rule; |
---|
145 | 441 | } |
---|
146 | 442 | |
---|
147 | 443 | struct mlx5_flow_handle * |
---|
148 | 444 | mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, |
---|
149 | 445 | struct mlx5_flow_spec *spec, |
---|
150 | | - struct mlx5_esw_flow_attr *attr) |
---|
| 446 | + struct mlx5_flow_attr *attr) |
---|
151 | 447 | { |
---|
152 | 448 | struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {}; |
---|
153 | | - struct mlx5_flow_act flow_act = {0}; |
---|
| 449 | + struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; |
---|
| 450 | + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; |
---|
| 451 | + struct mlx5_fs_chains *chains = esw_chains(esw); |
---|
| 452 | + struct mlx5_vport_tbl_attr fwd_attr; |
---|
| 453 | + struct mlx5_flow_table *fast_fdb; |
---|
| 454 | + struct mlx5_flow_table *fwd_fdb; |
---|
154 | 455 | struct mlx5_flow_handle *rule; |
---|
155 | | - void *misc; |
---|
156 | 456 | int i; |
---|
157 | 457 | |
---|
| 458 | + fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0); |
---|
| 459 | + if (IS_ERR(fast_fdb)) { |
---|
| 460 | + rule = ERR_CAST(fast_fdb); |
---|
| 461 | + goto err_get_fast; |
---|
| 462 | + } |
---|
| 463 | + |
---|
| 464 | + fwd_attr.chain = attr->chain; |
---|
| 465 | + fwd_attr.prio = attr->prio; |
---|
| 466 | + fwd_attr.vport = esw_attr->in_rep->vport; |
---|
| 467 | + fwd_fdb = esw_vport_tbl_get(esw, &fwd_attr); |
---|
| 468 | + if (IS_ERR(fwd_fdb)) { |
---|
| 469 | + rule = ERR_CAST(fwd_fdb); |
---|
| 470 | + goto err_get_fwd; |
---|
| 471 | + } |
---|
| 472 | + |
---|
158 | 473 | flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; |
---|
159 | | - for (i = 0; i < attr->mirror_count; i++) { |
---|
| 474 | + for (i = 0; i < esw_attr->split_count; i++) { |
---|
160 | 475 | dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; |
---|
161 | | - dest[i].vport.num = attr->out_rep[i]->vport; |
---|
| 476 | + dest[i].vport.num = esw_attr->dests[i].rep->vport; |
---|
162 | 477 | dest[i].vport.vhca_id = |
---|
163 | | - MLX5_CAP_GEN(attr->out_mdev[i], vhca_id); |
---|
164 | | - dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch); |
---|
| 478 | + MLX5_CAP_GEN(esw_attr->dests[i].mdev, vhca_id); |
---|
| 479 | + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) |
---|
| 480 | + dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; |
---|
| 481 | + if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) { |
---|
| 482 | + dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID; |
---|
| 483 | + dest[i].vport.pkt_reformat = esw_attr->dests[i].pkt_reformat; |
---|
| 484 | + } |
---|
165 | 485 | } |
---|
166 | 486 | dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; |
---|
167 | | - dest[i].ft = esw->fdb_table.offloads.fwd_fdb, |
---|
| 487 | + dest[i].ft = fwd_fdb, |
---|
168 | 488 | i++; |
---|
169 | 489 | |
---|
170 | | - misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); |
---|
171 | | - MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport); |
---|
| 490 | + mlx5_eswitch_set_rule_source_port(esw, spec, esw_attr); |
---|
172 | 491 | |
---|
173 | | - if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) |
---|
174 | | - MLX5_SET(fte_match_set_misc, misc, |
---|
175 | | - source_eswitch_owner_vhca_id, |
---|
176 | | - MLX5_CAP_GEN(attr->in_mdev, vhca_id)); |
---|
| 492 | + if (attr->outer_match_level != MLX5_MATCH_NONE) |
---|
| 493 | + spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; |
---|
177 | 494 | |
---|
178 | | - misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); |
---|
179 | | - MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); |
---|
180 | | - if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) |
---|
181 | | - MLX5_SET_TO_ONES(fte_match_set_misc, misc, |
---|
182 | | - source_eswitch_owner_vhca_id); |
---|
| 495 | + flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; |
---|
| 496 | + rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i); |
---|
183 | 497 | |
---|
184 | | - if (attr->match_level == MLX5_MATCH_NONE) |
---|
185 | | - spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; |
---|
186 | | - else |
---|
187 | | - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | |
---|
188 | | - MLX5_MATCH_MISC_PARAMETERS; |
---|
| 498 | + if (IS_ERR(rule)) |
---|
| 499 | + goto add_err; |
---|
189 | 500 | |
---|
190 | | - rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fast_fdb, spec, &flow_act, dest, i); |
---|
191 | | - |
---|
192 | | - if (!IS_ERR(rule)) |
---|
193 | | - esw->offloads.num_flows++; |
---|
| 501 | + atomic64_inc(&esw->offloads.num_flows); |
---|
194 | 502 | |
---|
195 | 503 | return rule; |
---|
| 504 | +add_err: |
---|
| 505 | + esw_vport_tbl_put(esw, &fwd_attr); |
---|
| 506 | +err_get_fwd: |
---|
| 507 | + mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); |
---|
| 508 | +err_get_fast: |
---|
| 509 | + return rule; |
---|
| 510 | +} |
---|
| 511 | + |
---|
| 512 | +static void |
---|
| 513 | +__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, |
---|
| 514 | + struct mlx5_flow_handle *rule, |
---|
| 515 | + struct mlx5_flow_attr *attr, |
---|
| 516 | + bool fwd_rule) |
---|
| 517 | +{ |
---|
| 518 | + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; |
---|
| 519 | + struct mlx5_fs_chains *chains = esw_chains(esw); |
---|
| 520 | + bool split = (esw_attr->split_count > 0); |
---|
| 521 | + struct mlx5_vport_tbl_attr fwd_attr; |
---|
| 522 | + int i; |
---|
| 523 | + |
---|
| 524 | + mlx5_del_flow_rules(rule); |
---|
| 525 | + |
---|
| 526 | + if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) { |
---|
| 527 | + /* unref the term table */ |
---|
| 528 | + for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) { |
---|
| 529 | + if (esw_attr->dests[i].termtbl) |
---|
| 530 | + mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl); |
---|
| 531 | + } |
---|
| 532 | + } |
---|
| 533 | + |
---|
| 534 | + atomic64_dec(&esw->offloads.num_flows); |
---|
| 535 | + |
---|
| 536 | + if (fwd_rule || split) { |
---|
| 537 | + fwd_attr.chain = attr->chain; |
---|
| 538 | + fwd_attr.prio = attr->prio; |
---|
| 539 | + fwd_attr.vport = esw_attr->in_rep->vport; |
---|
| 540 | + } |
---|
| 541 | + |
---|
| 542 | + if (fwd_rule) { |
---|
| 543 | + esw_vport_tbl_put(esw, &fwd_attr); |
---|
| 544 | + mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); |
---|
| 545 | + } else { |
---|
| 546 | + if (split) |
---|
| 547 | + esw_vport_tbl_put(esw, &fwd_attr); |
---|
| 548 | + else if (attr->chain || attr->prio) |
---|
| 549 | + mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); |
---|
| 550 | + if (attr->dest_chain) |
---|
| 551 | + mlx5_chains_put_table(chains, attr->dest_chain, 1, 0); |
---|
| 552 | + } |
---|
196 | 553 | } |
---|
197 | 554 | |
---|
198 | 555 | void |
---|
199 | 556 | mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, |
---|
200 | 557 | struct mlx5_flow_handle *rule, |
---|
201 | | - struct mlx5_esw_flow_attr *attr) |
---|
| 558 | + struct mlx5_flow_attr *attr) |
---|
202 | 559 | { |
---|
203 | | - struct mlx5_fc *counter = NULL; |
---|
| 560 | + __mlx5_eswitch_del_rule(esw, rule, attr, false); |
---|
| 561 | +} |
---|
204 | 562 | |
---|
205 | | - counter = mlx5_flow_rule_counter(rule); |
---|
206 | | - mlx5_del_flow_rules(rule); |
---|
207 | | - mlx5_fc_destroy(esw->dev, counter); |
---|
208 | | - esw->offloads.num_flows--; |
---|
| 563 | +void |
---|
| 564 | +mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, |
---|
| 565 | + struct mlx5_flow_handle *rule, |
---|
| 566 | + struct mlx5_flow_attr *attr) |
---|
| 567 | +{ |
---|
| 568 | + __mlx5_eswitch_del_rule(esw, rule, attr, true); |
---|
209 | 569 | } |
---|
210 | 570 | |
---|
211 | 571 | static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) |
---|
212 | 572 | { |
---|
213 | 573 | struct mlx5_eswitch_rep *rep; |
---|
214 | | - int vf_vport, err = 0; |
---|
| 574 | + int i, err = 0; |
---|
215 | 575 | |
---|
216 | 576 | esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none"); |
---|
217 | | - for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) { |
---|
218 | | - rep = &esw->offloads.vport_reps[vf_vport]; |
---|
219 | | - if (!rep->rep_if[REP_ETH].valid) |
---|
| 577 | + mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) { |
---|
| 578 | + if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED) |
---|
220 | 579 | continue; |
---|
221 | 580 | |
---|
222 | 581 | err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val); |
---|
.. | .. |
---|
234 | 593 | struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL; |
---|
235 | 594 | |
---|
236 | 595 | in_rep = attr->in_rep; |
---|
237 | | - out_rep = attr->out_rep[0]; |
---|
| 596 | + out_rep = attr->dests[0].rep; |
---|
238 | 597 | |
---|
239 | 598 | if (push) |
---|
240 | 599 | vport = in_rep; |
---|
.. | .. |
---|
255 | 614 | goto out_notsupp; |
---|
256 | 615 | |
---|
257 | 616 | in_rep = attr->in_rep; |
---|
258 | | - out_rep = attr->out_rep[0]; |
---|
| 617 | + out_rep = attr->dests[0].rep; |
---|
259 | 618 | |
---|
260 | | - if (push && in_rep->vport == FDB_UPLINK_VPORT) |
---|
| 619 | + if (push && in_rep->vport == MLX5_VPORT_UPLINK) |
---|
261 | 620 | goto out_notsupp; |
---|
262 | 621 | |
---|
263 | | - if (pop && out_rep->vport == FDB_UPLINK_VPORT) |
---|
| 622 | + if (pop && out_rep->vport == MLX5_VPORT_UPLINK) |
---|
264 | 623 | goto out_notsupp; |
---|
265 | 624 | |
---|
266 | 625 | /* vport has vlan push configured, can't offload VF --> wire rules w.o it */ |
---|
267 | 626 | if (!push && !pop && fwd) |
---|
268 | | - if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT) |
---|
| 627 | + if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK) |
---|
269 | 628 | goto out_notsupp; |
---|
270 | 629 | |
---|
271 | 630 | /* protects against (1) setting rules with different vlans to push and |
---|
.. | .. |
---|
281 | 640 | } |
---|
282 | 641 | |
---|
283 | 642 | int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, |
---|
284 | | - struct mlx5_esw_flow_attr *attr) |
---|
| 643 | + struct mlx5_flow_attr *attr) |
---|
285 | 644 | { |
---|
286 | 645 | struct offloads_fdb *offloads = &esw->fdb_table.offloads; |
---|
| 646 | + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; |
---|
287 | 647 | struct mlx5_eswitch_rep *vport = NULL; |
---|
288 | 648 | bool push, pop, fwd; |
---|
289 | 649 | int err = 0; |
---|
.. | .. |
---|
294 | 654 | |
---|
295 | 655 | push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH); |
---|
296 | 656 | pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); |
---|
297 | | - fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); |
---|
| 657 | + fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && |
---|
| 658 | + !attr->dest_chain); |
---|
298 | 659 | |
---|
299 | | - err = esw_add_vlan_action_check(attr, push, pop, fwd); |
---|
| 660 | + mutex_lock(&esw->state_lock); |
---|
| 661 | + |
---|
| 662 | + err = esw_add_vlan_action_check(esw_attr, push, pop, fwd); |
---|
300 | 663 | if (err) |
---|
301 | | - return err; |
---|
| 664 | + goto unlock; |
---|
302 | 665 | |
---|
303 | | - attr->vlan_handled = false; |
---|
| 666 | + attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED; |
---|
304 | 667 | |
---|
305 | | - vport = esw_vlan_action_get_vport(attr, push, pop); |
---|
| 668 | + vport = esw_vlan_action_get_vport(esw_attr, push, pop); |
---|
306 | 669 | |
---|
307 | 670 | if (!push && !pop && fwd) { |
---|
308 | 671 | /* tracks VF --> wire rules without vlan push action */ |
---|
309 | | - if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) { |
---|
| 672 | + if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) { |
---|
310 | 673 | vport->vlan_refcount++; |
---|
311 | | - attr->vlan_handled = true; |
---|
| 674 | + attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED; |
---|
312 | 675 | } |
---|
313 | 676 | |
---|
314 | | - return 0; |
---|
| 677 | + goto unlock; |
---|
315 | 678 | } |
---|
316 | 679 | |
---|
317 | 680 | if (!push && !pop) |
---|
318 | | - return 0; |
---|
| 681 | + goto unlock; |
---|
319 | 682 | |
---|
320 | 683 | if (!(offloads->vlan_push_pop_refcount)) { |
---|
321 | 684 | /* it's the 1st vlan rule, apply global vlan pop policy */ |
---|
.. | .. |
---|
329 | 692 | if (vport->vlan_refcount) |
---|
330 | 693 | goto skip_set_push; |
---|
331 | 694 | |
---|
332 | | - err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0, |
---|
333 | | - SET_VLAN_INSERT | SET_VLAN_STRIP); |
---|
| 695 | + err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, esw_attr->vlan_vid[0], |
---|
| 696 | + 0, SET_VLAN_INSERT | SET_VLAN_STRIP); |
---|
334 | 697 | if (err) |
---|
335 | 698 | goto out; |
---|
336 | | - vport->vlan = attr->vlan_vid[0]; |
---|
| 699 | + vport->vlan = esw_attr->vlan_vid[0]; |
---|
337 | 700 | skip_set_push: |
---|
338 | 701 | vport->vlan_refcount++; |
---|
339 | 702 | } |
---|
340 | 703 | out: |
---|
341 | 704 | if (!err) |
---|
342 | | - attr->vlan_handled = true; |
---|
| 705 | + attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED; |
---|
| 706 | +unlock: |
---|
| 707 | + mutex_unlock(&esw->state_lock); |
---|
343 | 708 | return err; |
---|
344 | 709 | } |
---|
345 | 710 | |
---|
346 | 711 | int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, |
---|
347 | | - struct mlx5_esw_flow_attr *attr) |
---|
| 712 | + struct mlx5_flow_attr *attr) |
---|
348 | 713 | { |
---|
349 | 714 | struct offloads_fdb *offloads = &esw->fdb_table.offloads; |
---|
| 715 | + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; |
---|
350 | 716 | struct mlx5_eswitch_rep *vport = NULL; |
---|
351 | 717 | bool push, pop, fwd; |
---|
352 | 718 | int err = 0; |
---|
.. | .. |
---|
355 | 721 | if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) |
---|
356 | 722 | return 0; |
---|
357 | 723 | |
---|
358 | | - if (!attr->vlan_handled) |
---|
| 724 | + if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED)) |
---|
359 | 725 | return 0; |
---|
360 | 726 | |
---|
361 | 727 | push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH); |
---|
362 | 728 | pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); |
---|
363 | 729 | fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); |
---|
364 | 730 | |
---|
365 | | - vport = esw_vlan_action_get_vport(attr, push, pop); |
---|
| 731 | + mutex_lock(&esw->state_lock); |
---|
| 732 | + |
---|
| 733 | + vport = esw_vlan_action_get_vport(esw_attr, push, pop); |
---|
366 | 734 | |
---|
367 | 735 | if (!push && !pop && fwd) { |
---|
368 | 736 | /* tracks VF --> wire rules without vlan push action */ |
---|
369 | | - if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) |
---|
| 737 | + if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) |
---|
370 | 738 | vport->vlan_refcount--; |
---|
371 | 739 | |
---|
372 | | - return 0; |
---|
| 740 | + goto out; |
---|
373 | 741 | } |
---|
374 | 742 | |
---|
375 | 743 | if (push) { |
---|
.. | .. |
---|
387 | 755 | skip_unset_push: |
---|
388 | 756 | offloads->vlan_push_pop_refcount--; |
---|
389 | 757 | if (offloads->vlan_push_pop_refcount) |
---|
390 | | - return 0; |
---|
| 758 | + goto out; |
---|
391 | 759 | |
---|
392 | 760 | /* no more vlan rules, stop global vlan pop policy */ |
---|
393 | 761 | err = esw_set_global_vlan_pop(esw, 0); |
---|
394 | 762 | |
---|
395 | 763 | out: |
---|
| 764 | + mutex_unlock(&esw->state_lock); |
---|
396 | 765 | return err; |
---|
397 | 766 | } |
---|
398 | 767 | |
---|
399 | 768 | struct mlx5_flow_handle * |
---|
400 | | -mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn) |
---|
| 769 | +mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport, |
---|
| 770 | + u32 sqn) |
---|
401 | 771 | { |
---|
402 | 772 | struct mlx5_flow_act flow_act = {0}; |
---|
403 | 773 | struct mlx5_flow_destination dest = {}; |
---|
.. | .. |
---|
413 | 783 | |
---|
414 | 784 | misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); |
---|
415 | 785 | MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn); |
---|
416 | | - MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */ |
---|
| 786 | + /* source vport is the esw manager */ |
---|
| 787 | + MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport); |
---|
417 | 788 | |
---|
418 | 789 | misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); |
---|
419 | 790 | MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn); |
---|
.. | .. |
---|
424 | 795 | dest.vport.num = vport; |
---|
425 | 796 | flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; |
---|
426 | 797 | |
---|
427 | | - flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, |
---|
428 | | - &flow_act, &dest, 1); |
---|
| 798 | + flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, |
---|
| 799 | + spec, &flow_act, &dest, 1); |
---|
429 | 800 | if (IS_ERR(flow_rule)) |
---|
430 | 801 | esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule)); |
---|
431 | 802 | out: |
---|
.. | .. |
---|
437 | 808 | void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule) |
---|
438 | 809 | { |
---|
439 | 810 | mlx5_del_flow_rules(rule); |
---|
| 811 | +} |
---|
| 812 | + |
---|
| 813 | +static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw) |
---|
| 814 | +{ |
---|
| 815 | + return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) & |
---|
| 816 | + MLX5_FDB_TO_VPORT_REG_C_1; |
---|
| 817 | +} |
---|
| 818 | + |
---|
| 819 | +static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable) |
---|
| 820 | +{ |
---|
| 821 | + u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {}; |
---|
| 822 | + u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {}; |
---|
| 823 | + u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {}; |
---|
| 824 | + u8 curr, wanted; |
---|
| 825 | + int err; |
---|
| 826 | + |
---|
| 827 | + if (!mlx5_eswitch_reg_c1_loopback_supported(esw) && |
---|
| 828 | + !mlx5_eswitch_vport_match_metadata_enabled(esw)) |
---|
| 829 | + return 0; |
---|
| 830 | + |
---|
| 831 | + MLX5_SET(query_esw_vport_context_in, in, opcode, |
---|
| 832 | + MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT); |
---|
| 833 | + err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out); |
---|
| 834 | + if (err) |
---|
| 835 | + return err; |
---|
| 836 | + |
---|
| 837 | + curr = MLX5_GET(query_esw_vport_context_out, out, |
---|
| 838 | + esw_vport_context.fdb_to_vport_reg_c_id); |
---|
| 839 | + wanted = MLX5_FDB_TO_VPORT_REG_C_0; |
---|
| 840 | + if (mlx5_eswitch_reg_c1_loopback_supported(esw)) |
---|
| 841 | + wanted |= MLX5_FDB_TO_VPORT_REG_C_1; |
---|
| 842 | + |
---|
| 843 | + if (enable) |
---|
| 844 | + curr |= wanted; |
---|
| 845 | + else |
---|
| 846 | + curr &= ~wanted; |
---|
| 847 | + |
---|
| 848 | + MLX5_SET(modify_esw_vport_context_in, min, |
---|
| 849 | + esw_vport_context.fdb_to_vport_reg_c_id, curr); |
---|
| 850 | + MLX5_SET(modify_esw_vport_context_in, min, |
---|
| 851 | + field_select.fdb_to_vport_reg_c_id, 1); |
---|
| 852 | + |
---|
| 853 | + err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min); |
---|
| 854 | + if (!err) { |
---|
| 855 | + if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1)) |
---|
| 856 | + esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED; |
---|
| 857 | + else |
---|
| 858 | + esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED; |
---|
| 859 | + } |
---|
| 860 | + |
---|
| 861 | + return err; |
---|
| 862 | +} |
---|
| 863 | + |
---|
| 864 | +static void peer_miss_rules_setup(struct mlx5_eswitch *esw, |
---|
| 865 | + struct mlx5_core_dev *peer_dev, |
---|
| 866 | + struct mlx5_flow_spec *spec, |
---|
| 867 | + struct mlx5_flow_destination *dest) |
---|
| 868 | +{ |
---|
| 869 | + void *misc; |
---|
| 870 | + |
---|
| 871 | + if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { |
---|
| 872 | + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, |
---|
| 873 | + misc_parameters_2); |
---|
| 874 | + MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, |
---|
| 875 | + mlx5_eswitch_get_vport_metadata_mask()); |
---|
| 876 | + |
---|
| 877 | + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; |
---|
| 878 | + } else { |
---|
| 879 | + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, |
---|
| 880 | + misc_parameters); |
---|
| 881 | + |
---|
| 882 | + MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, |
---|
| 883 | + MLX5_CAP_GEN(peer_dev, vhca_id)); |
---|
| 884 | + |
---|
| 885 | + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; |
---|
| 886 | + |
---|
| 887 | + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, |
---|
| 888 | + misc_parameters); |
---|
| 889 | + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); |
---|
| 890 | + MLX5_SET_TO_ONES(fte_match_set_misc, misc, |
---|
| 891 | + source_eswitch_owner_vhca_id); |
---|
| 892 | + } |
---|
| 893 | + |
---|
| 894 | + dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT; |
---|
| 895 | + dest->vport.num = peer_dev->priv.eswitch->manager_vport; |
---|
| 896 | + dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id); |
---|
| 897 | + dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; |
---|
| 898 | +} |
---|
| 899 | + |
---|
| 900 | +static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw, |
---|
| 901 | + struct mlx5_eswitch *peer_esw, |
---|
| 902 | + struct mlx5_flow_spec *spec, |
---|
| 903 | + u16 vport) |
---|
| 904 | +{ |
---|
| 905 | + void *misc; |
---|
| 906 | + |
---|
| 907 | + if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { |
---|
| 908 | + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, |
---|
| 909 | + misc_parameters_2); |
---|
| 910 | + MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, |
---|
| 911 | + mlx5_eswitch_get_vport_metadata_for_match(peer_esw, |
---|
| 912 | + vport)); |
---|
| 913 | + } else { |
---|
| 914 | + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, |
---|
| 915 | + misc_parameters); |
---|
| 916 | + MLX5_SET(fte_match_set_misc, misc, source_port, vport); |
---|
| 917 | + } |
---|
| 918 | +} |
---|
| 919 | + |
---|
| 920 | +static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, |
---|
| 921 | + struct mlx5_core_dev *peer_dev) |
---|
| 922 | +{ |
---|
| 923 | + struct mlx5_flow_destination dest = {}; |
---|
| 924 | + struct mlx5_flow_act flow_act = {0}; |
---|
| 925 | + struct mlx5_flow_handle **flows; |
---|
| 926 | + struct mlx5_flow_handle *flow; |
---|
| 927 | + struct mlx5_flow_spec *spec; |
---|
| 928 | + /* total vports is the same for both e-switches */ |
---|
| 929 | + int nvports = esw->total_vports; |
---|
| 930 | + void *misc; |
---|
| 931 | + int err, i; |
---|
| 932 | + |
---|
| 933 | + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); |
---|
| 934 | + if (!spec) |
---|
| 935 | + return -ENOMEM; |
---|
| 936 | + |
---|
| 937 | + peer_miss_rules_setup(esw, peer_dev, spec, &dest); |
---|
| 938 | + |
---|
| 939 | + flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL); |
---|
| 940 | + if (!flows) { |
---|
| 941 | + err = -ENOMEM; |
---|
| 942 | + goto alloc_flows_err; |
---|
| 943 | + } |
---|
| 944 | + |
---|
| 945 | + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; |
---|
| 946 | + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, |
---|
| 947 | + misc_parameters); |
---|
| 948 | + |
---|
| 949 | + if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { |
---|
| 950 | + esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch, |
---|
| 951 | + spec, MLX5_VPORT_PF); |
---|
| 952 | + |
---|
| 953 | + flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, |
---|
| 954 | + spec, &flow_act, &dest, 1); |
---|
| 955 | + if (IS_ERR(flow)) { |
---|
| 956 | + err = PTR_ERR(flow); |
---|
| 957 | + goto add_pf_flow_err; |
---|
| 958 | + } |
---|
| 959 | + flows[MLX5_VPORT_PF] = flow; |
---|
| 960 | + } |
---|
| 961 | + |
---|
| 962 | + if (mlx5_ecpf_vport_exists(esw->dev)) { |
---|
| 963 | + MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF); |
---|
| 964 | + flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, |
---|
| 965 | + spec, &flow_act, &dest, 1); |
---|
| 966 | + if (IS_ERR(flow)) { |
---|
| 967 | + err = PTR_ERR(flow); |
---|
| 968 | + goto add_ecpf_flow_err; |
---|
| 969 | + } |
---|
| 970 | + flows[mlx5_eswitch_ecpf_idx(esw)] = flow; |
---|
| 971 | + } |
---|
| 972 | + |
---|
| 973 | + mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) { |
---|
| 974 | + esw_set_peer_miss_rule_source_port(esw, |
---|
| 975 | + peer_dev->priv.eswitch, |
---|
| 976 | + spec, i); |
---|
| 977 | + |
---|
| 978 | + flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, |
---|
| 979 | + spec, &flow_act, &dest, 1); |
---|
| 980 | + if (IS_ERR(flow)) { |
---|
| 981 | + err = PTR_ERR(flow); |
---|
| 982 | + goto add_vf_flow_err; |
---|
| 983 | + } |
---|
| 984 | + flows[i] = flow; |
---|
| 985 | + } |
---|
| 986 | + |
---|
| 987 | + esw->fdb_table.offloads.peer_miss_rules = flows; |
---|
| 988 | + |
---|
| 989 | + kvfree(spec); |
---|
| 990 | + return 0; |
---|
| 991 | + |
---|
| 992 | +add_vf_flow_err: |
---|
| 993 | + nvports = --i; |
---|
| 994 | + mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports) |
---|
| 995 | + mlx5_del_flow_rules(flows[i]); |
---|
| 996 | + |
---|
| 997 | + if (mlx5_ecpf_vport_exists(esw->dev)) |
---|
| 998 | + mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]); |
---|
| 999 | +add_ecpf_flow_err: |
---|
| 1000 | + if (mlx5_core_is_ecpf_esw_manager(esw->dev)) |
---|
| 1001 | + mlx5_del_flow_rules(flows[MLX5_VPORT_PF]); |
---|
| 1002 | +add_pf_flow_err: |
---|
| 1003 | + esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err); |
---|
| 1004 | + kvfree(flows); |
---|
| 1005 | +alloc_flows_err: |
---|
| 1006 | + kvfree(spec); |
---|
| 1007 | + return err; |
---|
| 1008 | +} |
---|
| 1009 | + |
---|
| 1010 | +static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw) |
---|
| 1011 | +{ |
---|
| 1012 | + struct mlx5_flow_handle **flows; |
---|
| 1013 | + int i; |
---|
| 1014 | + |
---|
| 1015 | + flows = esw->fdb_table.offloads.peer_miss_rules; |
---|
| 1016 | + |
---|
| 1017 | + mlx5_esw_for_each_vf_vport_num_reverse(esw, i, |
---|
| 1018 | + mlx5_core_max_vfs(esw->dev)) |
---|
| 1019 | + mlx5_del_flow_rules(flows[i]); |
---|
| 1020 | + |
---|
| 1021 | + if (mlx5_ecpf_vport_exists(esw->dev)) |
---|
| 1022 | + mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]); |
---|
| 1023 | + |
---|
| 1024 | + if (mlx5_core_is_ecpf_esw_manager(esw->dev)) |
---|
| 1025 | + mlx5_del_flow_rules(flows[MLX5_VPORT_PF]); |
---|
| 1026 | + |
---|
| 1027 | + kvfree(flows); |
---|
440 | 1028 | } |
---|
441 | 1029 | |
---|
442 | 1030 | static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) |
---|
.. | .. |
---|
465 | 1053 | dmac_c[0] = 0x01; |
---|
466 | 1054 | |
---|
467 | 1055 | dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; |
---|
468 | | - dest.vport.num = 0; |
---|
| 1056 | + dest.vport.num = esw->manager_vport; |
---|
469 | 1057 | flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; |
---|
470 | 1058 | |
---|
471 | | - flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, |
---|
472 | | - &flow_act, &dest, 1); |
---|
| 1059 | + flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, |
---|
| 1060 | + spec, &flow_act, &dest, 1); |
---|
473 | 1061 | if (IS_ERR(flow_rule)) { |
---|
474 | 1062 | err = PTR_ERR(flow_rule); |
---|
475 | 1063 | esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err); |
---|
.. | .. |
---|
483 | 1071 | dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, |
---|
484 | 1072 | outer_headers.dmac_47_16); |
---|
485 | 1073 | dmac_v[0] = 0x01; |
---|
486 | | - flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, |
---|
487 | | - &flow_act, &dest, 1); |
---|
| 1074 | + flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, |
---|
| 1075 | + spec, &flow_act, &dest, 1); |
---|
488 | 1076 | if (IS_ERR(flow_rule)) { |
---|
489 | 1077 | err = PTR_ERR(flow_rule); |
---|
490 | 1078 | esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err); |
---|
.. | .. |
---|
499 | 1087 | return err; |
---|
500 | 1088 | } |
---|
501 | 1089 | |
---|
502 | | -#define ESW_OFFLOADS_NUM_GROUPS 4 |
---|
503 | | - |
---|
504 | | -static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw) |
---|
| 1090 | +struct mlx5_flow_handle * |
---|
| 1091 | +esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) |
---|
505 | 1092 | { |
---|
506 | | - struct mlx5_core_dev *dev = esw->dev; |
---|
507 | | - struct mlx5_flow_namespace *root_ns; |
---|
508 | | - struct mlx5_flow_table *fdb = NULL; |
---|
509 | | - int esw_size, err = 0; |
---|
510 | | - u32 flags = 0; |
---|
511 | | - u32 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) | |
---|
512 | | - MLX5_CAP_GEN(dev, max_flow_counter_15_0); |
---|
| 1093 | + struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; |
---|
| 1094 | + struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore; |
---|
| 1095 | + struct mlx5_flow_context *flow_context; |
---|
| 1096 | + struct mlx5_flow_handle *flow_rule; |
---|
| 1097 | + struct mlx5_flow_destination dest; |
---|
| 1098 | + struct mlx5_flow_spec *spec; |
---|
| 1099 | + void *misc; |
---|
513 | 1100 | |
---|
514 | | - root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); |
---|
515 | | - if (!root_ns) { |
---|
516 | | - esw_warn(dev, "Failed to get FDB flow namespace\n"); |
---|
517 | | - err = -EOPNOTSUPP; |
---|
518 | | - goto out_namespace; |
---|
519 | | - } |
---|
| 1101 | + if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) |
---|
| 1102 | + return ERR_PTR(-EOPNOTSUPP); |
---|
520 | 1103 | |
---|
521 | | - esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n", |
---|
522 | | - MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size), |
---|
523 | | - max_flow_counter, ESW_OFFLOADS_NUM_GROUPS); |
---|
| 1104 | + spec = kzalloc(sizeof(*spec), GFP_KERNEL); |
---|
| 1105 | + if (!spec) |
---|
| 1106 | + return ERR_PTR(-ENOMEM); |
---|
524 | 1107 | |
---|
525 | | - esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS, |
---|
526 | | - 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); |
---|
| 1108 | + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, |
---|
| 1109 | + misc_parameters_2); |
---|
| 1110 | + MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, |
---|
| 1111 | + ESW_CHAIN_TAG_METADATA_MASK); |
---|
| 1112 | + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, |
---|
| 1113 | + misc_parameters_2); |
---|
| 1114 | + MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag); |
---|
| 1115 | + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; |
---|
| 1116 | + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | |
---|
| 1117 | + MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; |
---|
| 1118 | + flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id; |
---|
527 | 1119 | |
---|
528 | | - if (mlx5_esw_has_fwd_fdb(dev)) |
---|
529 | | - esw_size >>= 1; |
---|
| 1120 | + flow_context = &spec->flow_context; |
---|
| 1121 | + flow_context->flags |= FLOW_CONTEXT_HAS_TAG; |
---|
| 1122 | + flow_context->flow_tag = tag; |
---|
| 1123 | + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; |
---|
| 1124 | + dest.ft = esw->offloads.ft_offloads; |
---|
530 | 1125 | |
---|
531 | | - if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) |
---|
532 | | - flags |= MLX5_FLOW_TABLE_TUNNEL_EN; |
---|
| 1126 | + flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); |
---|
| 1127 | + kfree(spec); |
---|
533 | 1128 | |
---|
534 | | - fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH, |
---|
535 | | - esw_size, |
---|
536 | | - ESW_OFFLOADS_NUM_GROUPS, 0, |
---|
537 | | - flags); |
---|
538 | | - if (IS_ERR(fdb)) { |
---|
539 | | - err = PTR_ERR(fdb); |
---|
540 | | - esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err); |
---|
541 | | - goto out_namespace; |
---|
542 | | - } |
---|
543 | | - esw->fdb_table.offloads.fast_fdb = fdb; |
---|
| 1129 | + if (IS_ERR(flow_rule)) |
---|
| 1130 | + esw_warn(esw->dev, |
---|
| 1131 | + "Failed to create restore rule for tag: %d, err(%d)\n", |
---|
| 1132 | + tag, (int)PTR_ERR(flow_rule)); |
---|
544 | 1133 | |
---|
545 | | - if (!mlx5_esw_has_fwd_fdb(dev)) |
---|
546 | | - goto out_namespace; |
---|
547 | | - |
---|
548 | | - fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH, |
---|
549 | | - esw_size, |
---|
550 | | - ESW_OFFLOADS_NUM_GROUPS, 1, |
---|
551 | | - flags); |
---|
552 | | - if (IS_ERR(fdb)) { |
---|
553 | | - err = PTR_ERR(fdb); |
---|
554 | | - esw_warn(dev, "Failed to create fwd table err %d\n", err); |
---|
555 | | - goto out_ft; |
---|
556 | | - } |
---|
557 | | - esw->fdb_table.offloads.fwd_fdb = fdb; |
---|
558 | | - |
---|
559 | | - return err; |
---|
560 | | - |
---|
561 | | -out_ft: |
---|
562 | | - mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb); |
---|
563 | | -out_namespace: |
---|
564 | | - return err; |
---|
| 1134 | + return flow_rule; |
---|
565 | 1135 | } |
---|
566 | 1136 | |
---|
567 | | -static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw) |
---|
| 1137 | +u32 |
---|
| 1138 | +esw_get_max_restore_tag(struct mlx5_eswitch *esw) |
---|
568 | 1139 | { |
---|
569 | | - if (mlx5_esw_has_fwd_fdb(esw->dev)) |
---|
570 | | - mlx5_destroy_flow_table(esw->fdb_table.offloads.fwd_fdb); |
---|
571 | | - mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb); |
---|
| 1140 | + return ESW_CHAIN_TAG_METADATA_MASK; |
---|
572 | 1141 | } |
---|
573 | 1142 | |
---|
574 | 1143 | #define MAX_PF_SQ 256 |
---|
575 | 1144 | #define MAX_SQ_NVPORTS 32 |
---|
576 | 1145 | |
---|
577 | | -static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) |
---|
| 1146 | +static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw, |
---|
| 1147 | + u32 *flow_group_in) |
---|
| 1148 | +{ |
---|
| 1149 | + void *match_criteria = MLX5_ADDR_OF(create_flow_group_in, |
---|
| 1150 | + flow_group_in, |
---|
| 1151 | + match_criteria); |
---|
| 1152 | + |
---|
| 1153 | + if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { |
---|
| 1154 | + MLX5_SET(create_flow_group_in, flow_group_in, |
---|
| 1155 | + match_criteria_enable, |
---|
| 1156 | + MLX5_MATCH_MISC_PARAMETERS_2); |
---|
| 1157 | + |
---|
| 1158 | + MLX5_SET(fte_match_param, match_criteria, |
---|
| 1159 | + misc_parameters_2.metadata_reg_c_0, |
---|
| 1160 | + mlx5_eswitch_get_vport_metadata_mask()); |
---|
| 1161 | + } else { |
---|
| 1162 | + MLX5_SET(create_flow_group_in, flow_group_in, |
---|
| 1163 | + match_criteria_enable, |
---|
| 1164 | + MLX5_MATCH_MISC_PARAMETERS); |
---|
| 1165 | + |
---|
| 1166 | + MLX5_SET_TO_ONES(fte_match_param, match_criteria, |
---|
| 1167 | + misc_parameters.source_port); |
---|
| 1168 | + } |
---|
| 1169 | +} |
---|
| 1170 | + |
---|
| 1171 | +#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) |
---|
| 1172 | +#define fdb_modify_header_fwd_to_table_supported(esw) \ |
---|
| 1173 | + (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table)) |
---|
| 1174 | +static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags) |
---|
| 1175 | +{ |
---|
| 1176 | + struct mlx5_core_dev *dev = esw->dev; |
---|
| 1177 | + |
---|
| 1178 | + if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level)) |
---|
| 1179 | + *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED; |
---|
| 1180 | + |
---|
| 1181 | + if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) && |
---|
| 1182 | + esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) { |
---|
| 1183 | + *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED; |
---|
| 1184 | + esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n"); |
---|
| 1185 | + } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) { |
---|
| 1186 | + *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED; |
---|
| 1187 | + esw_warn(dev, "Tc chains and priorities offload aren't supported\n"); |
---|
| 1188 | + } else if (!fdb_modify_header_fwd_to_table_supported(esw)) { |
---|
| 1189 | + /* Disabled when ttl workaround is needed, e.g |
---|
| 1190 | + * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig |
---|
| 1191 | + */ |
---|
| 1192 | + esw_warn(dev, |
---|
| 1193 | + "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n"); |
---|
| 1194 | + *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED; |
---|
| 1195 | + } else { |
---|
| 1196 | + *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED; |
---|
| 1197 | + esw_info(dev, "Supported tc chains and prios offload\n"); |
---|
| 1198 | + } |
---|
| 1199 | + |
---|
| 1200 | + if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) |
---|
| 1201 | + *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED; |
---|
| 1202 | +} |
---|
| 1203 | + |
---|
| 1204 | +static int |
---|
| 1205 | +esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb) |
---|
| 1206 | +{ |
---|
| 1207 | + struct mlx5_core_dev *dev = esw->dev; |
---|
| 1208 | + struct mlx5_flow_table *nf_ft, *ft; |
---|
| 1209 | + struct mlx5_chains_attr attr = {}; |
---|
| 1210 | + struct mlx5_fs_chains *chains; |
---|
| 1211 | + u32 fdb_max; |
---|
| 1212 | + int err; |
---|
| 1213 | + |
---|
| 1214 | + fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size); |
---|
| 1215 | + |
---|
| 1216 | + esw_init_chains_offload_flags(esw, &attr.flags); |
---|
| 1217 | + attr.ns = MLX5_FLOW_NAMESPACE_FDB; |
---|
| 1218 | + attr.max_ft_sz = fdb_max; |
---|
| 1219 | + attr.max_grp_num = esw->params.large_group_num; |
---|
| 1220 | + attr.default_ft = miss_fdb; |
---|
| 1221 | + attr.max_restore_tag = esw_get_max_restore_tag(esw); |
---|
| 1222 | + |
---|
| 1223 | + chains = mlx5_chains_create(dev, &attr); |
---|
| 1224 | + if (IS_ERR(chains)) { |
---|
| 1225 | + err = PTR_ERR(chains); |
---|
| 1226 | + esw_warn(dev, "Failed to create fdb chains err(%d)\n", err); |
---|
| 1227 | + return err; |
---|
| 1228 | + } |
---|
| 1229 | + |
---|
| 1230 | + esw->fdb_table.offloads.esw_chains_priv = chains; |
---|
| 1231 | + |
---|
| 1232 | + /* Create tc_end_ft which is the always created ft chain */ |
---|
| 1233 | + nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains), |
---|
| 1234 | + 1, 0); |
---|
| 1235 | + if (IS_ERR(nf_ft)) { |
---|
| 1236 | + err = PTR_ERR(nf_ft); |
---|
| 1237 | + goto nf_ft_err; |
---|
| 1238 | + } |
---|
| 1239 | + |
---|
| 1240 | + /* Always open the root for fast path */ |
---|
| 1241 | + ft = mlx5_chains_get_table(chains, 0, 1, 0); |
---|
| 1242 | + if (IS_ERR(ft)) { |
---|
| 1243 | + err = PTR_ERR(ft); |
---|
| 1244 | + goto level_0_err; |
---|
| 1245 | + } |
---|
| 1246 | + |
---|
| 1247 | + /* Open level 1 for split fdb rules now if prios isn't supported */ |
---|
| 1248 | + if (!mlx5_chains_prios_supported(chains)) { |
---|
| 1249 | + err = mlx5_esw_vport_tbl_get(esw); |
---|
| 1250 | + if (err) |
---|
| 1251 | + goto level_1_err; |
---|
| 1252 | + } |
---|
| 1253 | + |
---|
| 1254 | + mlx5_chains_set_end_ft(chains, nf_ft); |
---|
| 1255 | + |
---|
| 1256 | + return 0; |
---|
| 1257 | + |
---|
| 1258 | +level_1_err: |
---|
| 1259 | + mlx5_chains_put_table(chains, 0, 1, 0); |
---|
| 1260 | +level_0_err: |
---|
| 1261 | + mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0); |
---|
| 1262 | +nf_ft_err: |
---|
| 1263 | + mlx5_chains_destroy(chains); |
---|
| 1264 | + esw->fdb_table.offloads.esw_chains_priv = NULL; |
---|
| 1265 | + |
---|
| 1266 | + return err; |
---|
| 1267 | +} |
---|
| 1268 | + |
---|
| 1269 | +static void |
---|
| 1270 | +esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains) |
---|
| 1271 | +{ |
---|
| 1272 | + if (!mlx5_chains_prios_supported(chains)) |
---|
| 1273 | + mlx5_esw_vport_tbl_put(esw); |
---|
| 1274 | + mlx5_chains_put_table(chains, 0, 1, 0); |
---|
| 1275 | + mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0); |
---|
| 1276 | + mlx5_chains_destroy(chains); |
---|
| 1277 | +} |
---|
| 1278 | + |
---|
| 1279 | +#else /* CONFIG_MLX5_CLS_ACT */ |
---|
| 1280 | + |
---|
| 1281 | +static int |
---|
| 1282 | +esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb) |
---|
| 1283 | +{ return 0; } |
---|
| 1284 | + |
---|
| 1285 | +static void |
---|
| 1286 | +esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains) |
---|
| 1287 | +{} |
---|
| 1288 | + |
---|
| 1289 | +#endif |
---|
| 1290 | + |
---|
| 1291 | +static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw) |
---|
578 | 1292 | { |
---|
579 | 1293 | int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); |
---|
580 | 1294 | struct mlx5_flow_table_attr ft_attr = {}; |
---|
581 | 1295 | struct mlx5_core_dev *dev = esw->dev; |
---|
582 | 1296 | struct mlx5_flow_namespace *root_ns; |
---|
583 | 1297 | struct mlx5_flow_table *fdb = NULL; |
---|
| 1298 | + u32 flags = 0, *flow_group_in; |
---|
584 | 1299 | int table_size, ix, err = 0; |
---|
585 | 1300 | struct mlx5_flow_group *g; |
---|
586 | 1301 | void *match_criteria; |
---|
587 | | - u32 *flow_group_in; |
---|
588 | 1302 | u8 *dmac; |
---|
589 | 1303 | |
---|
590 | 1304 | esw_debug(esw->dev, "Create offloads FDB Tables\n"); |
---|
| 1305 | + |
---|
591 | 1306 | flow_group_in = kvzalloc(inlen, GFP_KERNEL); |
---|
592 | 1307 | if (!flow_group_in) |
---|
593 | 1308 | return -ENOMEM; |
---|
.. | .. |
---|
598 | 1313 | err = -EOPNOTSUPP; |
---|
599 | 1314 | goto ns_err; |
---|
600 | 1315 | } |
---|
| 1316 | + esw->fdb_table.offloads.ns = root_ns; |
---|
| 1317 | + err = mlx5_flow_namespace_set_mode(root_ns, |
---|
| 1318 | + esw->dev->priv.steering->mode); |
---|
| 1319 | + if (err) { |
---|
| 1320 | + esw_warn(dev, "Failed to set FDB namespace steering mode\n"); |
---|
| 1321 | + goto ns_err; |
---|
| 1322 | + } |
---|
601 | 1323 | |
---|
602 | | - err = esw_create_offloads_fast_fdb_table(esw); |
---|
603 | | - if (err) |
---|
604 | | - goto fast_fdb_err; |
---|
| 1324 | + table_size = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ + |
---|
| 1325 | + MLX5_ESW_MISS_FLOWS + esw->total_vports; |
---|
605 | 1326 | |
---|
606 | | - table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2; |
---|
| 1327 | + /* create the slow path fdb with encap set, so further table instances |
---|
| 1328 | + * can be created at run time while VFs are probed if the FW allows that. |
---|
| 1329 | + */ |
---|
| 1330 | + if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) |
---|
| 1331 | + flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | |
---|
| 1332 | + MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); |
---|
607 | 1333 | |
---|
| 1334 | + ft_attr.flags = flags; |
---|
608 | 1335 | ft_attr.max_fte = table_size; |
---|
609 | 1336 | ft_attr.prio = FDB_SLOW_PATH; |
---|
610 | 1337 | |
---|
.. | .. |
---|
616 | 1343 | } |
---|
617 | 1344 | esw->fdb_table.offloads.slow_fdb = fdb; |
---|
618 | 1345 | |
---|
| 1346 | + err = esw_chains_create(esw, fdb); |
---|
| 1347 | + if (err) { |
---|
| 1348 | + esw_warn(dev, "Failed to open fdb chains err(%d)\n", err); |
---|
| 1349 | + goto fdb_chains_err; |
---|
| 1350 | + } |
---|
| 1351 | + |
---|
619 | 1352 | /* create send-to-vport group */ |
---|
620 | | - memset(flow_group_in, 0, inlen); |
---|
621 | 1353 | MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, |
---|
622 | 1354 | MLX5_MATCH_MISC_PARAMETERS); |
---|
623 | 1355 | |
---|
.. | .. |
---|
626 | 1358 | MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn); |
---|
627 | 1359 | MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port); |
---|
628 | 1360 | |
---|
629 | | - ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ; |
---|
| 1361 | + ix = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ; |
---|
630 | 1362 | MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); |
---|
631 | 1363 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1); |
---|
632 | 1364 | |
---|
.. | .. |
---|
637 | 1369 | goto send_vport_err; |
---|
638 | 1370 | } |
---|
639 | 1371 | esw->fdb_table.offloads.send_to_vport_grp = g; |
---|
| 1372 | + |
---|
| 1373 | + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { |
---|
| 1374 | + /* create peer esw miss group */ |
---|
| 1375 | + memset(flow_group_in, 0, inlen); |
---|
| 1376 | + |
---|
| 1377 | + esw_set_flow_group_source_port(esw, flow_group_in); |
---|
| 1378 | + |
---|
| 1379 | + if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) { |
---|
| 1380 | + match_criteria = MLX5_ADDR_OF(create_flow_group_in, |
---|
| 1381 | + flow_group_in, |
---|
| 1382 | + match_criteria); |
---|
| 1383 | + |
---|
| 1384 | + MLX5_SET_TO_ONES(fte_match_param, match_criteria, |
---|
| 1385 | + misc_parameters.source_eswitch_owner_vhca_id); |
---|
| 1386 | + |
---|
| 1387 | + MLX5_SET(create_flow_group_in, flow_group_in, |
---|
| 1388 | + source_eswitch_owner_vhca_id_valid, 1); |
---|
| 1389 | + } |
---|
| 1390 | + |
---|
| 1391 | + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); |
---|
| 1392 | + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, |
---|
| 1393 | + ix + esw->total_vports - 1); |
---|
| 1394 | + ix += esw->total_vports; |
---|
| 1395 | + |
---|
| 1396 | + g = mlx5_create_flow_group(fdb, flow_group_in); |
---|
| 1397 | + if (IS_ERR(g)) { |
---|
| 1398 | + err = PTR_ERR(g); |
---|
| 1399 | + esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err); |
---|
| 1400 | + goto peer_miss_err; |
---|
| 1401 | + } |
---|
| 1402 | + esw->fdb_table.offloads.peer_miss_grp = g; |
---|
| 1403 | + } |
---|
640 | 1404 | |
---|
641 | 1405 | /* create miss group */ |
---|
642 | 1406 | memset(flow_group_in, 0, inlen); |
---|
.. | .. |
---|
649 | 1413 | dmac[0] = 0x01; |
---|
650 | 1414 | |
---|
651 | 1415 | MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); |
---|
652 | | - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 2); |
---|
| 1416 | + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, |
---|
| 1417 | + ix + MLX5_ESW_MISS_FLOWS); |
---|
653 | 1418 | |
---|
654 | 1419 | g = mlx5_create_flow_group(fdb, flow_group_in); |
---|
655 | 1420 | if (IS_ERR(g)) { |
---|
.. | .. |
---|
669 | 1434 | miss_rule_err: |
---|
670 | 1435 | mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); |
---|
671 | 1436 | miss_err: |
---|
| 1437 | + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) |
---|
| 1438 | + mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); |
---|
| 1439 | +peer_miss_err: |
---|
672 | 1440 | mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); |
---|
673 | 1441 | send_vport_err: |
---|
| 1442 | + esw_chains_destroy(esw, esw_chains(esw)); |
---|
| 1443 | +fdb_chains_err: |
---|
674 | 1444 | mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); |
---|
675 | 1445 | slow_fdb_err: |
---|
676 | | - esw_destroy_offloads_fast_fdb_table(esw); |
---|
677 | | -fast_fdb_err: |
---|
| 1446 | + /* Holds true only as long as DMFS is the default */ |
---|
| 1447 | + mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS); |
---|
678 | 1448 | ns_err: |
---|
679 | 1449 | kvfree(flow_group_in); |
---|
680 | 1450 | return err; |
---|
.. | .. |
---|
682 | 1452 | |
---|
683 | 1453 | static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) |
---|
684 | 1454 | { |
---|
685 | | - if (!esw->fdb_table.offloads.fast_fdb) |
---|
| 1455 | + if (!esw->fdb_table.offloads.slow_fdb) |
---|
686 | 1456 | return; |
---|
687 | 1457 | |
---|
688 | 1458 | esw_debug(esw->dev, "Destroy offloads FDB Tables\n"); |
---|
689 | 1459 | mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi); |
---|
690 | 1460 | mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); |
---|
691 | 1461 | mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); |
---|
| 1462 | + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) |
---|
| 1463 | + mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); |
---|
692 | 1464 | mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); |
---|
693 | 1465 | |
---|
| 1466 | + esw_chains_destroy(esw, esw_chains(esw)); |
---|
| 1467 | + |
---|
694 | 1468 | mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); |
---|
695 | | - esw_destroy_offloads_fast_fdb_table(esw); |
---|
| 1469 | + /* Holds true only as long as DMFS is the default */ |
---|
| 1470 | + mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns, |
---|
| 1471 | + MLX5_FLOW_STEERING_MODE_DMFS); |
---|
696 | 1472 | } |
---|
697 | 1473 | |
---|
698 | 1474 | static int esw_create_offloads_table(struct mlx5_eswitch *esw) |
---|
.. | .. |
---|
709 | 1485 | return -EOPNOTSUPP; |
---|
710 | 1486 | } |
---|
711 | 1487 | |
---|
712 | | - ft_attr.max_fte = dev->priv.sriov.num_vfs + 2; |
---|
| 1488 | + ft_attr.max_fte = esw->total_vports + MLX5_ESW_MISS_FLOWS; |
---|
| 1489 | + ft_attr.prio = 1; |
---|
713 | 1490 | |
---|
714 | 1491 | ft_offloads = mlx5_create_flow_table(ns, &ft_attr); |
---|
715 | 1492 | if (IS_ERR(ft_offloads)) { |
---|
.. | .. |
---|
733 | 1510 | { |
---|
734 | 1511 | int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); |
---|
735 | 1512 | struct mlx5_flow_group *g; |
---|
736 | | - struct mlx5_priv *priv = &esw->dev->priv; |
---|
737 | 1513 | u32 *flow_group_in; |
---|
738 | | - void *match_criteria, *misc; |
---|
| 1514 | + int nvports; |
---|
739 | 1515 | int err = 0; |
---|
740 | | - int nvports = priv->sriov.num_vfs + 2; |
---|
741 | 1516 | |
---|
| 1517 | + nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS; |
---|
742 | 1518 | flow_group_in = kvzalloc(inlen, GFP_KERNEL); |
---|
743 | 1519 | if (!flow_group_in) |
---|
744 | 1520 | return -ENOMEM; |
---|
745 | 1521 | |
---|
746 | 1522 | /* create vport rx group */ |
---|
747 | | - memset(flow_group_in, 0, inlen); |
---|
748 | | - MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, |
---|
749 | | - MLX5_MATCH_MISC_PARAMETERS); |
---|
750 | | - |
---|
751 | | - match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); |
---|
752 | | - misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters); |
---|
753 | | - MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); |
---|
| 1523 | + esw_set_flow_group_source_port(esw, flow_group_in); |
---|
754 | 1524 | |
---|
755 | 1525 | MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); |
---|
756 | 1526 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1); |
---|
.. | .. |
---|
775 | 1545 | } |
---|
776 | 1546 | |
---|
777 | 1547 | struct mlx5_flow_handle * |
---|
778 | | -mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn) |
---|
| 1548 | +mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, |
---|
| 1549 | + struct mlx5_flow_destination *dest) |
---|
779 | 1550 | { |
---|
780 | 1551 | struct mlx5_flow_act flow_act = {0}; |
---|
781 | | - struct mlx5_flow_destination dest = {}; |
---|
782 | 1552 | struct mlx5_flow_handle *flow_rule; |
---|
783 | 1553 | struct mlx5_flow_spec *spec; |
---|
784 | 1554 | void *misc; |
---|
.. | .. |
---|
789 | 1559 | goto out; |
---|
790 | 1560 | } |
---|
791 | 1561 | |
---|
792 | | - misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); |
---|
793 | | - MLX5_SET(fte_match_set_misc, misc, source_port, vport); |
---|
| 1562 | + if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { |
---|
| 1563 | + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); |
---|
| 1564 | + MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, |
---|
| 1565 | + mlx5_eswitch_get_vport_metadata_for_match(esw, vport)); |
---|
794 | 1566 | |
---|
795 | | - misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); |
---|
796 | | - MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); |
---|
| 1567 | + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); |
---|
| 1568 | + MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, |
---|
| 1569 | + mlx5_eswitch_get_vport_metadata_mask()); |
---|
797 | 1570 | |
---|
798 | | - spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; |
---|
799 | | - dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; |
---|
800 | | - dest.tir_num = tirn; |
---|
| 1571 | + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; |
---|
| 1572 | + } else { |
---|
| 1573 | + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); |
---|
| 1574 | + MLX5_SET(fte_match_set_misc, misc, source_port, vport); |
---|
| 1575 | + |
---|
| 1576 | + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); |
---|
| 1577 | + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); |
---|
| 1578 | + |
---|
| 1579 | + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; |
---|
| 1580 | + } |
---|
801 | 1581 | |
---|
802 | 1582 | flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; |
---|
803 | 1583 | flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec, |
---|
804 | | - &flow_act, &dest, 1); |
---|
| 1584 | + &flow_act, dest, 1); |
---|
805 | 1585 | if (IS_ERR(flow_rule)) { |
---|
806 | 1586 | esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule)); |
---|
807 | 1587 | goto out; |
---|
.. | .. |
---|
812 | 1592 | return flow_rule; |
---|
813 | 1593 | } |
---|
814 | 1594 | |
---|
815 | | -static int esw_offloads_start(struct mlx5_eswitch *esw) |
---|
816 | | -{ |
---|
817 | | - int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; |
---|
818 | 1595 | |
---|
819 | | - if (esw->mode != SRIOV_LEGACY) { |
---|
820 | | - esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n"); |
---|
821 | | - return -EINVAL; |
---|
| 1596 | +static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode) |
---|
| 1597 | +{ |
---|
| 1598 | + u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2; |
---|
| 1599 | + struct mlx5_core_dev *dev = esw->dev; |
---|
| 1600 | + int vport; |
---|
| 1601 | + |
---|
| 1602 | + if (!MLX5_CAP_GEN(dev, vport_group_manager)) |
---|
| 1603 | + return -EOPNOTSUPP; |
---|
| 1604 | + |
---|
| 1605 | + if (esw->mode == MLX5_ESWITCH_NONE) |
---|
| 1606 | + return -EOPNOTSUPP; |
---|
| 1607 | + |
---|
| 1608 | + switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { |
---|
| 1609 | + case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: |
---|
| 1610 | + mlx5_mode = MLX5_INLINE_MODE_NONE; |
---|
| 1611 | + goto out; |
---|
| 1612 | + case MLX5_CAP_INLINE_MODE_L2: |
---|
| 1613 | + mlx5_mode = MLX5_INLINE_MODE_L2; |
---|
| 1614 | + goto out; |
---|
| 1615 | + case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: |
---|
| 1616 | + goto query_vports; |
---|
822 | 1617 | } |
---|
823 | 1618 | |
---|
824 | | - mlx5_eswitch_disable_sriov(esw); |
---|
825 | | - err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS); |
---|
| 1619 | +query_vports: |
---|
| 1620 | + mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode); |
---|
| 1621 | + mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) { |
---|
| 1622 | + mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode); |
---|
| 1623 | + if (prev_mlx5_mode != mlx5_mode) |
---|
| 1624 | + return -EINVAL; |
---|
| 1625 | + prev_mlx5_mode = mlx5_mode; |
---|
| 1626 | + } |
---|
| 1627 | + |
---|
| 1628 | +out: |
---|
| 1629 | + *mode = mlx5_mode; |
---|
| 1630 | + return 0; |
---|
| 1631 | +} |
---|
| 1632 | + |
---|
| 1633 | +static void esw_destroy_restore_table(struct mlx5_eswitch *esw) |
---|
| 1634 | +{ |
---|
| 1635 | + struct mlx5_esw_offload *offloads = &esw->offloads; |
---|
| 1636 | + |
---|
| 1637 | + if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) |
---|
| 1638 | + return; |
---|
| 1639 | + |
---|
| 1640 | + mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id); |
---|
| 1641 | + mlx5_destroy_flow_group(offloads->restore_group); |
---|
| 1642 | + mlx5_destroy_flow_table(offloads->ft_offloads_restore); |
---|
| 1643 | +} |
---|
| 1644 | + |
---|
| 1645 | +static int esw_create_restore_table(struct mlx5_eswitch *esw) |
---|
| 1646 | +{ |
---|
| 1647 | + u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; |
---|
| 1648 | + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); |
---|
| 1649 | + struct mlx5_flow_table_attr ft_attr = {}; |
---|
| 1650 | + struct mlx5_core_dev *dev = esw->dev; |
---|
| 1651 | + struct mlx5_flow_namespace *ns; |
---|
| 1652 | + struct mlx5_modify_hdr *mod_hdr; |
---|
| 1653 | + void *match_criteria, *misc; |
---|
| 1654 | + struct mlx5_flow_table *ft; |
---|
| 1655 | + struct mlx5_flow_group *g; |
---|
| 1656 | + u32 *flow_group_in; |
---|
| 1657 | + int err = 0; |
---|
| 1658 | + |
---|
| 1659 | + if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) |
---|
| 1660 | + return 0; |
---|
| 1661 | + |
---|
| 1662 | + ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); |
---|
| 1663 | + if (!ns) { |
---|
| 1664 | + esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); |
---|
| 1665 | + return -EOPNOTSUPP; |
---|
| 1666 | + } |
---|
| 1667 | + |
---|
| 1668 | + flow_group_in = kvzalloc(inlen, GFP_KERNEL); |
---|
| 1669 | + if (!flow_group_in) { |
---|
| 1670 | + err = -ENOMEM; |
---|
| 1671 | + goto out_free; |
---|
| 1672 | + } |
---|
| 1673 | + |
---|
| 1674 | + ft_attr.max_fte = 1 << ESW_CHAIN_TAG_METADATA_BITS; |
---|
| 1675 | + ft = mlx5_create_flow_table(ns, &ft_attr); |
---|
| 1676 | + if (IS_ERR(ft)) { |
---|
| 1677 | + err = PTR_ERR(ft); |
---|
| 1678 | + esw_warn(esw->dev, "Failed to create restore table, err %d\n", |
---|
| 1679 | + err); |
---|
| 1680 | + goto out_free; |
---|
| 1681 | + } |
---|
| 1682 | + |
---|
| 1683 | + memset(flow_group_in, 0, inlen); |
---|
| 1684 | + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, |
---|
| 1685 | + match_criteria); |
---|
| 1686 | + misc = MLX5_ADDR_OF(fte_match_param, match_criteria, |
---|
| 1687 | + misc_parameters_2); |
---|
| 1688 | + |
---|
| 1689 | + MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, |
---|
| 1690 | + ESW_CHAIN_TAG_METADATA_MASK); |
---|
| 1691 | + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); |
---|
| 1692 | + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, |
---|
| 1693 | + ft_attr.max_fte - 1); |
---|
| 1694 | + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, |
---|
| 1695 | + MLX5_MATCH_MISC_PARAMETERS_2); |
---|
| 1696 | + g = mlx5_create_flow_group(ft, flow_group_in); |
---|
| 1697 | + if (IS_ERR(g)) { |
---|
| 1698 | + err = PTR_ERR(g); |
---|
| 1699 | + esw_warn(dev, "Failed to create restore flow group, err: %d\n", |
---|
| 1700 | + err); |
---|
| 1701 | + goto err_group; |
---|
| 1702 | + } |
---|
| 1703 | + |
---|
| 1704 | + MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY); |
---|
| 1705 | + MLX5_SET(copy_action_in, modact, src_field, |
---|
| 1706 | + MLX5_ACTION_IN_FIELD_METADATA_REG_C_1); |
---|
| 1707 | + MLX5_SET(copy_action_in, modact, dst_field, |
---|
| 1708 | + MLX5_ACTION_IN_FIELD_METADATA_REG_B); |
---|
| 1709 | + mod_hdr = mlx5_modify_header_alloc(esw->dev, |
---|
| 1710 | + MLX5_FLOW_NAMESPACE_KERNEL, 1, |
---|
| 1711 | + modact); |
---|
| 1712 | + if (IS_ERR(mod_hdr)) { |
---|
| 1713 | + err = PTR_ERR(mod_hdr); |
---|
| 1714 | + esw_warn(dev, "Failed to create restore mod header, err: %d\n", |
---|
| 1715 | + err); |
---|
| 1716 | + goto err_mod_hdr; |
---|
| 1717 | + } |
---|
| 1718 | + |
---|
| 1719 | + esw->offloads.ft_offloads_restore = ft; |
---|
| 1720 | + esw->offloads.restore_group = g; |
---|
| 1721 | + esw->offloads.restore_copy_hdr_id = mod_hdr; |
---|
| 1722 | + |
---|
| 1723 | + kvfree(flow_group_in); |
---|
| 1724 | + |
---|
| 1725 | + return 0; |
---|
| 1726 | + |
---|
| 1727 | +err_mod_hdr: |
---|
| 1728 | + mlx5_destroy_flow_group(g); |
---|
| 1729 | +err_group: |
---|
| 1730 | + mlx5_destroy_flow_table(ft); |
---|
| 1731 | +out_free: |
---|
| 1732 | + kvfree(flow_group_in); |
---|
| 1733 | + |
---|
| 1734 | + return err; |
---|
| 1735 | +} |
---|
| 1736 | + |
---|
| 1737 | +static int esw_offloads_start(struct mlx5_eswitch *esw, |
---|
| 1738 | + struct netlink_ext_ack *extack) |
---|
| 1739 | +{ |
---|
| 1740 | + int err, err1; |
---|
| 1741 | + |
---|
| 1742 | + mlx5_eswitch_disable_locked(esw, false); |
---|
| 1743 | + err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS, |
---|
| 1744 | + esw->dev->priv.sriov.num_vfs); |
---|
826 | 1745 | if (err) { |
---|
827 | | - esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err); |
---|
828 | | - err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); |
---|
829 | | - if (err1) |
---|
830 | | - esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1); |
---|
| 1746 | + NL_SET_ERR_MSG_MOD(extack, |
---|
| 1747 | + "Failed setting eswitch to offloads"); |
---|
| 1748 | + err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, |
---|
| 1749 | + MLX5_ESWITCH_IGNORE_NUM_VFS); |
---|
| 1750 | + if (err1) { |
---|
| 1751 | + NL_SET_ERR_MSG_MOD(extack, |
---|
| 1752 | + "Failed setting eswitch back to legacy"); |
---|
| 1753 | + } |
---|
831 | 1754 | } |
---|
832 | 1755 | if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { |
---|
833 | 1756 | if (mlx5_eswitch_inline_mode_get(esw, |
---|
834 | | - num_vfs, |
---|
835 | 1757 | &esw->offloads.inline_mode)) { |
---|
836 | 1758 | esw->offloads.inline_mode = MLX5_INLINE_MODE_L2; |
---|
837 | | - esw_warn(esw->dev, "Inline mode is different between vports\n"); |
---|
| 1759 | + NL_SET_ERR_MSG_MOD(extack, |
---|
| 1760 | + "Inline mode is different between vports"); |
---|
838 | 1761 | } |
---|
839 | 1762 | } |
---|
840 | 1763 | return err; |
---|
.. | .. |
---|
847 | 1770 | |
---|
848 | 1771 | int esw_offloads_init_reps(struct mlx5_eswitch *esw) |
---|
849 | 1772 | { |
---|
850 | | - int total_vfs = MLX5_TOTAL_VPORTS(esw->dev); |
---|
851 | | - struct mlx5_core_dev *dev = esw->dev; |
---|
852 | | - struct mlx5_esw_offload *offloads; |
---|
| 1773 | + int total_vports = esw->total_vports; |
---|
853 | 1774 | struct mlx5_eswitch_rep *rep; |
---|
854 | | - u8 hw_id[ETH_ALEN]; |
---|
855 | | - int vport; |
---|
| 1775 | + int vport_index; |
---|
| 1776 | + u8 rep_type; |
---|
856 | 1777 | |
---|
857 | | - esw->offloads.vport_reps = kcalloc(total_vfs, |
---|
| 1778 | + esw->offloads.vport_reps = kcalloc(total_vports, |
---|
858 | 1779 | sizeof(struct mlx5_eswitch_rep), |
---|
859 | 1780 | GFP_KERNEL); |
---|
860 | 1781 | if (!esw->offloads.vport_reps) |
---|
861 | 1782 | return -ENOMEM; |
---|
862 | 1783 | |
---|
863 | | - offloads = &esw->offloads; |
---|
864 | | - mlx5_query_nic_vport_mac_address(dev, 0, hw_id); |
---|
| 1784 | + mlx5_esw_for_all_reps(esw, vport_index, rep) { |
---|
| 1785 | + rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index); |
---|
| 1786 | + rep->vport_index = vport_index; |
---|
865 | 1787 | |
---|
866 | | - for (vport = 0; vport < total_vfs; vport++) { |
---|
867 | | - rep = &offloads->vport_reps[vport]; |
---|
868 | | - |
---|
869 | | - rep->vport = vport; |
---|
870 | | - ether_addr_copy(rep->hw_id, hw_id); |
---|
| 1788 | + for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) |
---|
| 1789 | + atomic_set(&rep->rep_data[rep_type].state, |
---|
| 1790 | + REP_UNREGISTERED); |
---|
871 | 1791 | } |
---|
872 | | - |
---|
873 | | - offloads->vport_reps[0].vport = FDB_UPLINK_VPORT; |
---|
874 | 1792 | |
---|
875 | 1793 | return 0; |
---|
876 | 1794 | } |
---|
877 | 1795 | |
---|
878 | | -static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports, |
---|
879 | | - u8 rep_type) |
---|
| 1796 | +static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw, |
---|
| 1797 | + struct mlx5_eswitch_rep *rep, u8 rep_type) |
---|
| 1798 | +{ |
---|
| 1799 | + if (atomic_cmpxchg(&rep->rep_data[rep_type].state, |
---|
| 1800 | + REP_LOADED, REP_REGISTERED) == REP_LOADED) |
---|
| 1801 | + esw->offloads.rep_ops[rep_type]->unload(rep); |
---|
| 1802 | +} |
---|
| 1803 | + |
---|
| 1804 | +static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type) |
---|
880 | 1805 | { |
---|
881 | 1806 | struct mlx5_eswitch_rep *rep; |
---|
882 | | - int vport; |
---|
| 1807 | + int i; |
---|
883 | 1808 | |
---|
884 | | - for (vport = nvports - 1; vport >= 0; vport--) { |
---|
885 | | - rep = &esw->offloads.vport_reps[vport]; |
---|
886 | | - if (!rep->rep_if[rep_type].valid) |
---|
887 | | - continue; |
---|
| 1809 | + mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs) |
---|
| 1810 | + __esw_offloads_unload_rep(esw, rep, rep_type); |
---|
888 | 1811 | |
---|
889 | | - rep->rep_if[rep_type].unload(rep); |
---|
| 1812 | + if (mlx5_ecpf_vport_exists(esw->dev)) { |
---|
| 1813 | + rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF); |
---|
| 1814 | + __esw_offloads_unload_rep(esw, rep, rep_type); |
---|
890 | 1815 | } |
---|
| 1816 | + |
---|
| 1817 | + if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { |
---|
| 1818 | + rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF); |
---|
| 1819 | + __esw_offloads_unload_rep(esw, rep, rep_type); |
---|
| 1820 | + } |
---|
| 1821 | + |
---|
| 1822 | + rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); |
---|
| 1823 | + __esw_offloads_unload_rep(esw, rep, rep_type); |
---|
891 | 1824 | } |
---|
892 | 1825 | |
---|
893 | | -static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports) |
---|
894 | | -{ |
---|
895 | | - u8 rep_type = NUM_REP_TYPES; |
---|
896 | | - |
---|
897 | | - while (rep_type-- > 0) |
---|
898 | | - esw_offloads_unload_reps_type(esw, nvports, rep_type); |
---|
899 | | -} |
---|
900 | | - |
---|
901 | | -static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports, |
---|
902 | | - u8 rep_type) |
---|
| 1826 | +static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num) |
---|
903 | 1827 | { |
---|
904 | 1828 | struct mlx5_eswitch_rep *rep; |
---|
905 | | - int vport; |
---|
| 1829 | + int rep_type; |
---|
906 | 1830 | int err; |
---|
907 | 1831 | |
---|
908 | | - for (vport = 0; vport < nvports; vport++) { |
---|
909 | | - rep = &esw->offloads.vport_reps[vport]; |
---|
910 | | - if (!rep->rep_if[rep_type].valid) |
---|
911 | | - continue; |
---|
912 | | - |
---|
913 | | - err = rep->rep_if[rep_type].load(esw->dev, rep); |
---|
914 | | - if (err) |
---|
915 | | - goto err_reps; |
---|
916 | | - } |
---|
| 1832 | + rep = mlx5_eswitch_get_rep(esw, vport_num); |
---|
| 1833 | + for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) |
---|
| 1834 | + if (atomic_cmpxchg(&rep->rep_data[rep_type].state, |
---|
| 1835 | + REP_REGISTERED, REP_LOADED) == REP_REGISTERED) { |
---|
| 1836 | + err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep); |
---|
| 1837 | + if (err) |
---|
| 1838 | + goto err_reps; |
---|
| 1839 | + } |
---|
917 | 1840 | |
---|
918 | 1841 | return 0; |
---|
919 | 1842 | |
---|
920 | 1843 | err_reps: |
---|
921 | | - esw_offloads_unload_reps_type(esw, vport, rep_type); |
---|
| 1844 | + atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED); |
---|
| 1845 | + for (--rep_type; rep_type >= 0; rep_type--) |
---|
| 1846 | + __esw_offloads_unload_rep(esw, rep, rep_type); |
---|
922 | 1847 | return err; |
---|
923 | 1848 | } |
---|
924 | 1849 | |
---|
925 | | -static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports) |
---|
| 1850 | +static void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num) |
---|
926 | 1851 | { |
---|
927 | | - u8 rep_type = 0; |
---|
928 | | - int err; |
---|
| 1852 | + struct mlx5_eswitch_rep *rep; |
---|
| 1853 | + int rep_type; |
---|
929 | 1854 | |
---|
930 | | - for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) { |
---|
931 | | - err = esw_offloads_load_reps_type(esw, nvports, rep_type); |
---|
932 | | - if (err) |
---|
933 | | - goto err_reps; |
---|
934 | | - } |
---|
935 | | - |
---|
936 | | - return err; |
---|
937 | | - |
---|
938 | | -err_reps: |
---|
939 | | - while (rep_type-- > 0) |
---|
940 | | - esw_offloads_unload_reps_type(esw, nvports, rep_type); |
---|
941 | | - return err; |
---|
| 1855 | + rep = mlx5_eswitch_get_rep(esw, vport_num); |
---|
| 1856 | + for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--) |
---|
| 1857 | + __esw_offloads_unload_rep(esw, rep, rep_type); |
---|
942 | 1858 | } |
---|
943 | 1859 | |
---|
944 | | -int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) |
---|
| 1860 | +int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num) |
---|
945 | 1861 | { |
---|
946 | 1862 | int err; |
---|
947 | 1863 | |
---|
948 | | - err = esw_create_offloads_fdb_tables(esw, nvports); |
---|
| 1864 | + if (esw->mode != MLX5_ESWITCH_OFFLOADS) |
---|
| 1865 | + return 0; |
---|
| 1866 | + |
---|
| 1867 | + err = mlx5_esw_offloads_devlink_port_register(esw, vport_num); |
---|
949 | 1868 | if (err) |
---|
950 | 1869 | return err; |
---|
951 | 1870 | |
---|
| 1871 | + err = mlx5_esw_offloads_rep_load(esw, vport_num); |
---|
| 1872 | + if (err) |
---|
| 1873 | + goto load_err; |
---|
| 1874 | + return err; |
---|
| 1875 | + |
---|
| 1876 | +load_err: |
---|
| 1877 | + mlx5_esw_offloads_devlink_port_unregister(esw, vport_num); |
---|
| 1878 | + return err; |
---|
| 1879 | +} |
---|
| 1880 | + |
---|
| 1881 | +void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num) |
---|
| 1882 | +{ |
---|
| 1883 | + if (esw->mode != MLX5_ESWITCH_OFFLOADS) |
---|
| 1884 | + return; |
---|
| 1885 | + |
---|
| 1886 | + mlx5_esw_offloads_rep_unload(esw, vport_num); |
---|
| 1887 | + mlx5_esw_offloads_devlink_port_unregister(esw, vport_num); |
---|
| 1888 | +} |
---|
| 1889 | + |
---|
| 1890 | +#define ESW_OFFLOADS_DEVCOM_PAIR (0) |
---|
| 1891 | +#define ESW_OFFLOADS_DEVCOM_UNPAIR (1) |
---|
| 1892 | + |
---|
| 1893 | +static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw, |
---|
| 1894 | + struct mlx5_eswitch *peer_esw) |
---|
| 1895 | +{ |
---|
| 1896 | + int err; |
---|
| 1897 | + |
---|
| 1898 | + err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev); |
---|
| 1899 | + if (err) |
---|
| 1900 | + return err; |
---|
| 1901 | + |
---|
| 1902 | + return 0; |
---|
| 1903 | +} |
---|
| 1904 | + |
---|
| 1905 | +static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw) |
---|
| 1906 | +{ |
---|
| 1907 | +#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) |
---|
| 1908 | + mlx5e_tc_clean_fdb_peer_flows(esw); |
---|
| 1909 | +#endif |
---|
| 1910 | + esw_del_fdb_peer_miss_rules(esw); |
---|
| 1911 | +} |
---|
| 1912 | + |
---|
| 1913 | +static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw, |
---|
| 1914 | + struct mlx5_eswitch *peer_esw, |
---|
| 1915 | + bool pair) |
---|
| 1916 | +{ |
---|
| 1917 | + struct mlx5_flow_root_namespace *peer_ns; |
---|
| 1918 | + struct mlx5_flow_root_namespace *ns; |
---|
| 1919 | + int err; |
---|
| 1920 | + |
---|
| 1921 | + peer_ns = peer_esw->dev->priv.steering->fdb_root_ns; |
---|
| 1922 | + ns = esw->dev->priv.steering->fdb_root_ns; |
---|
| 1923 | + |
---|
| 1924 | + if (pair) { |
---|
| 1925 | + err = mlx5_flow_namespace_set_peer(ns, peer_ns); |
---|
| 1926 | + if (err) |
---|
| 1927 | + return err; |
---|
| 1928 | + |
---|
| 1929 | + err = mlx5_flow_namespace_set_peer(peer_ns, ns); |
---|
| 1930 | + if (err) { |
---|
| 1931 | + mlx5_flow_namespace_set_peer(ns, NULL); |
---|
| 1932 | + return err; |
---|
| 1933 | + } |
---|
| 1934 | + } else { |
---|
| 1935 | + mlx5_flow_namespace_set_peer(ns, NULL); |
---|
| 1936 | + mlx5_flow_namespace_set_peer(peer_ns, NULL); |
---|
| 1937 | + } |
---|
| 1938 | + |
---|
| 1939 | + return 0; |
---|
| 1940 | +} |
---|
| 1941 | + |
---|
| 1942 | +static int mlx5_esw_offloads_devcom_event(int event, |
---|
| 1943 | + void *my_data, |
---|
| 1944 | + void *event_data) |
---|
| 1945 | +{ |
---|
| 1946 | + struct mlx5_eswitch *esw = my_data; |
---|
| 1947 | + struct mlx5_devcom *devcom = esw->dev->priv.devcom; |
---|
| 1948 | + struct mlx5_eswitch *peer_esw = event_data; |
---|
| 1949 | + int err; |
---|
| 1950 | + |
---|
| 1951 | + switch (event) { |
---|
| 1952 | + case ESW_OFFLOADS_DEVCOM_PAIR: |
---|
| 1953 | + if (mlx5_eswitch_vport_match_metadata_enabled(esw) != |
---|
| 1954 | + mlx5_eswitch_vport_match_metadata_enabled(peer_esw)) |
---|
| 1955 | + break; |
---|
| 1956 | + |
---|
| 1957 | + err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true); |
---|
| 1958 | + if (err) |
---|
| 1959 | + goto err_out; |
---|
| 1960 | + err = mlx5_esw_offloads_pair(esw, peer_esw); |
---|
| 1961 | + if (err) |
---|
| 1962 | + goto err_peer; |
---|
| 1963 | + |
---|
| 1964 | + err = mlx5_esw_offloads_pair(peer_esw, esw); |
---|
| 1965 | + if (err) |
---|
| 1966 | + goto err_pair; |
---|
| 1967 | + |
---|
| 1968 | + mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true); |
---|
| 1969 | + break; |
---|
| 1970 | + |
---|
| 1971 | + case ESW_OFFLOADS_DEVCOM_UNPAIR: |
---|
| 1972 | + if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) |
---|
| 1973 | + break; |
---|
| 1974 | + |
---|
| 1975 | + mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false); |
---|
| 1976 | + mlx5_esw_offloads_unpair(peer_esw); |
---|
| 1977 | + mlx5_esw_offloads_unpair(esw); |
---|
| 1978 | + mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false); |
---|
| 1979 | + break; |
---|
| 1980 | + } |
---|
| 1981 | + |
---|
| 1982 | + return 0; |
---|
| 1983 | + |
---|
| 1984 | +err_pair: |
---|
| 1985 | + mlx5_esw_offloads_unpair(esw); |
---|
| 1986 | +err_peer: |
---|
| 1987 | + mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false); |
---|
| 1988 | +err_out: |
---|
| 1989 | + mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d", |
---|
| 1990 | + event, err); |
---|
| 1991 | + return err; |
---|
| 1992 | +} |
---|
| 1993 | + |
---|
| 1994 | +static void esw_offloads_devcom_init(struct mlx5_eswitch *esw) |
---|
| 1995 | +{ |
---|
| 1996 | + struct mlx5_devcom *devcom = esw->dev->priv.devcom; |
---|
| 1997 | + |
---|
| 1998 | + INIT_LIST_HEAD(&esw->offloads.peer_flows); |
---|
| 1999 | + mutex_init(&esw->offloads.peer_mutex); |
---|
| 2000 | + |
---|
| 2001 | + if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) |
---|
| 2002 | + return; |
---|
| 2003 | + |
---|
| 2004 | + mlx5_devcom_register_component(devcom, |
---|
| 2005 | + MLX5_DEVCOM_ESW_OFFLOADS, |
---|
| 2006 | + mlx5_esw_offloads_devcom_event, |
---|
| 2007 | + esw); |
---|
| 2008 | + |
---|
| 2009 | + mlx5_devcom_send_event(devcom, |
---|
| 2010 | + MLX5_DEVCOM_ESW_OFFLOADS, |
---|
| 2011 | + ESW_OFFLOADS_DEVCOM_PAIR, esw); |
---|
| 2012 | +} |
---|
| 2013 | + |
---|
| 2014 | +static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) |
---|
| 2015 | +{ |
---|
| 2016 | + struct mlx5_devcom *devcom = esw->dev->priv.devcom; |
---|
| 2017 | + |
---|
| 2018 | + if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) |
---|
| 2019 | + return; |
---|
| 2020 | + |
---|
| 2021 | + mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS, |
---|
| 2022 | + ESW_OFFLOADS_DEVCOM_UNPAIR, esw); |
---|
| 2023 | + |
---|
| 2024 | + mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS); |
---|
| 2025 | +} |
---|
| 2026 | + |
---|
| 2027 | +static bool |
---|
| 2028 | +esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw) |
---|
| 2029 | +{ |
---|
| 2030 | + if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl)) |
---|
| 2031 | + return false; |
---|
| 2032 | + |
---|
| 2033 | + if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) & |
---|
| 2034 | + MLX5_FDB_TO_VPORT_REG_C_0)) |
---|
| 2035 | + return false; |
---|
| 2036 | + |
---|
| 2037 | + if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source)) |
---|
| 2038 | + return false; |
---|
| 2039 | + |
---|
| 2040 | + return true; |
---|
| 2041 | +} |
---|
| 2042 | + |
---|
| 2043 | +u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw) |
---|
| 2044 | +{ |
---|
| 2045 | + u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1; |
---|
| 2046 | + u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 1; |
---|
| 2047 | + u32 pf_num; |
---|
| 2048 | + int id; |
---|
| 2049 | + |
---|
| 2050 | + /* Only 4 bits of pf_num */ |
---|
| 2051 | + pf_num = PCI_FUNC(esw->dev->pdev->devfn); |
---|
| 2052 | + if (pf_num > max_pf_num) |
---|
| 2053 | + return 0; |
---|
| 2054 | + |
---|
| 2055 | + /* Metadata is 4 bits of PFNUM and 12 bits of unique id */ |
---|
| 2056 | + /* Use only non-zero vport_id (1-4095) for all PF's */ |
---|
| 2057 | + id = ida_alloc_range(&esw->offloads.vport_metadata_ida, 1, vport_end_ida, GFP_KERNEL); |
---|
| 2058 | + if (id < 0) |
---|
| 2059 | + return 0; |
---|
| 2060 | + id = (pf_num << ESW_VPORT_BITS) | id; |
---|
| 2061 | + return id; |
---|
| 2062 | +} |
---|
| 2063 | + |
---|
| 2064 | +void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata) |
---|
| 2065 | +{ |
---|
| 2066 | + u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1; |
---|
| 2067 | + |
---|
| 2068 | + /* Metadata contains only 12 bits of actual ida id */ |
---|
| 2069 | + ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask); |
---|
| 2070 | +} |
---|
| 2071 | + |
---|
| 2072 | +static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw, |
---|
| 2073 | + struct mlx5_vport *vport) |
---|
| 2074 | +{ |
---|
| 2075 | + vport->default_metadata = mlx5_esw_match_metadata_alloc(esw); |
---|
| 2076 | + vport->metadata = vport->default_metadata; |
---|
| 2077 | + return vport->metadata ? 0 : -ENOSPC; |
---|
| 2078 | +} |
---|
| 2079 | + |
---|
| 2080 | +static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw, |
---|
| 2081 | + struct mlx5_vport *vport) |
---|
| 2082 | +{ |
---|
| 2083 | + if (!vport->default_metadata) |
---|
| 2084 | + return; |
---|
| 2085 | + |
---|
| 2086 | + WARN_ON(vport->metadata != vport->default_metadata); |
---|
| 2087 | + mlx5_esw_match_metadata_free(esw, vport->default_metadata); |
---|
| 2088 | +} |
---|
| 2089 | + |
---|
| 2090 | +static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw) |
---|
| 2091 | +{ |
---|
| 2092 | + struct mlx5_vport *vport; |
---|
| 2093 | + int i; |
---|
| 2094 | + |
---|
| 2095 | + if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) |
---|
| 2096 | + return; |
---|
| 2097 | + |
---|
| 2098 | + mlx5_esw_for_all_vports_reverse(esw, i, vport) |
---|
| 2099 | + esw_offloads_vport_metadata_cleanup(esw, vport); |
---|
| 2100 | +} |
---|
| 2101 | + |
---|
| 2102 | +static int esw_offloads_metadata_init(struct mlx5_eswitch *esw) |
---|
| 2103 | +{ |
---|
| 2104 | + struct mlx5_vport *vport; |
---|
| 2105 | + int err; |
---|
| 2106 | + int i; |
---|
| 2107 | + |
---|
| 2108 | + if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) |
---|
| 2109 | + return 0; |
---|
| 2110 | + |
---|
| 2111 | + mlx5_esw_for_all_vports(esw, i, vport) { |
---|
| 2112 | + err = esw_offloads_vport_metadata_setup(esw, vport); |
---|
| 2113 | + if (err) |
---|
| 2114 | + goto metadata_err; |
---|
| 2115 | + } |
---|
| 2116 | + |
---|
| 2117 | + return 0; |
---|
| 2118 | + |
---|
| 2119 | +metadata_err: |
---|
| 2120 | + esw_offloads_metadata_uninit(esw); |
---|
| 2121 | + return err; |
---|
| 2122 | +} |
---|
| 2123 | + |
---|
| 2124 | +int |
---|
| 2125 | +esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, |
---|
| 2126 | + struct mlx5_vport *vport) |
---|
| 2127 | +{ |
---|
| 2128 | + int err; |
---|
| 2129 | + |
---|
| 2130 | + err = esw_acl_ingress_ofld_setup(esw, vport); |
---|
| 2131 | + if (err) |
---|
| 2132 | + return err; |
---|
| 2133 | + |
---|
| 2134 | + err = esw_acl_egress_ofld_setup(esw, vport); |
---|
| 2135 | + if (err) |
---|
| 2136 | + goto egress_err; |
---|
| 2137 | + |
---|
| 2138 | + return 0; |
---|
| 2139 | + |
---|
| 2140 | +egress_err: |
---|
| 2141 | + esw_acl_ingress_ofld_cleanup(esw, vport); |
---|
| 2142 | + return err; |
---|
| 2143 | +} |
---|
| 2144 | + |
---|
| 2145 | +void |
---|
| 2146 | +esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, |
---|
| 2147 | + struct mlx5_vport *vport) |
---|
| 2148 | +{ |
---|
| 2149 | + esw_acl_egress_ofld_cleanup(vport); |
---|
| 2150 | + esw_acl_ingress_ofld_cleanup(esw, vport); |
---|
| 2151 | +} |
---|
| 2152 | + |
---|
| 2153 | +static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) |
---|
| 2154 | +{ |
---|
| 2155 | + struct mlx5_vport *vport; |
---|
| 2156 | + |
---|
| 2157 | + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); |
---|
| 2158 | + return esw_vport_create_offloads_acl_tables(esw, vport); |
---|
| 2159 | +} |
---|
| 2160 | + |
---|
| 2161 | +static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) |
---|
| 2162 | +{ |
---|
| 2163 | + struct mlx5_vport *vport; |
---|
| 2164 | + |
---|
| 2165 | + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); |
---|
| 2166 | + esw_vport_destroy_offloads_acl_tables(esw, vport); |
---|
| 2167 | +} |
---|
| 2168 | + |
---|
| 2169 | +static int esw_offloads_steering_init(struct mlx5_eswitch *esw) |
---|
| 2170 | +{ |
---|
| 2171 | + int err; |
---|
| 2172 | + |
---|
| 2173 | + memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); |
---|
| 2174 | + mutex_init(&esw->fdb_table.offloads.vports.lock); |
---|
| 2175 | + hash_init(esw->fdb_table.offloads.vports.table); |
---|
| 2176 | + |
---|
| 2177 | + err = esw_create_uplink_offloads_acl_tables(esw); |
---|
| 2178 | + if (err) |
---|
| 2179 | + goto create_acl_err; |
---|
| 2180 | + |
---|
952 | 2181 | err = esw_create_offloads_table(esw); |
---|
953 | 2182 | if (err) |
---|
954 | | - goto create_ft_err; |
---|
| 2183 | + goto create_offloads_err; |
---|
| 2184 | + |
---|
| 2185 | + err = esw_create_restore_table(esw); |
---|
| 2186 | + if (err) |
---|
| 2187 | + goto create_restore_err; |
---|
| 2188 | + |
---|
| 2189 | + err = esw_create_offloads_fdb_tables(esw); |
---|
| 2190 | + if (err) |
---|
| 2191 | + goto create_fdb_err; |
---|
955 | 2192 | |
---|
956 | 2193 | err = esw_create_vport_rx_group(esw); |
---|
957 | 2194 | if (err) |
---|
958 | 2195 | goto create_fg_err; |
---|
959 | 2196 | |
---|
960 | | - err = esw_offloads_load_reps(esw, nvports); |
---|
| 2197 | + return 0; |
---|
| 2198 | + |
---|
| 2199 | +create_fg_err: |
---|
| 2200 | + esw_destroy_offloads_fdb_tables(esw); |
---|
| 2201 | +create_fdb_err: |
---|
| 2202 | + esw_destroy_restore_table(esw); |
---|
| 2203 | +create_restore_err: |
---|
| 2204 | + esw_destroy_offloads_table(esw); |
---|
| 2205 | +create_offloads_err: |
---|
| 2206 | + esw_destroy_uplink_offloads_acl_tables(esw); |
---|
| 2207 | +create_acl_err: |
---|
| 2208 | + mutex_destroy(&esw->fdb_table.offloads.vports.lock); |
---|
| 2209 | + return err; |
---|
| 2210 | +} |
---|
| 2211 | + |
---|
| 2212 | +static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) |
---|
| 2213 | +{ |
---|
| 2214 | + esw_destroy_vport_rx_group(esw); |
---|
| 2215 | + esw_destroy_offloads_fdb_tables(esw); |
---|
| 2216 | + esw_destroy_restore_table(esw); |
---|
| 2217 | + esw_destroy_offloads_table(esw); |
---|
| 2218 | + esw_destroy_uplink_offloads_acl_tables(esw); |
---|
| 2219 | + mutex_destroy(&esw->fdb_table.offloads.vports.lock); |
---|
| 2220 | +} |
---|
| 2221 | + |
---|
| 2222 | +static void |
---|
| 2223 | +esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out) |
---|
| 2224 | +{ |
---|
| 2225 | + bool host_pf_disabled; |
---|
| 2226 | + u16 new_num_vfs; |
---|
| 2227 | + |
---|
| 2228 | + new_num_vfs = MLX5_GET(query_esw_functions_out, out, |
---|
| 2229 | + host_params_context.host_num_of_vfs); |
---|
| 2230 | + host_pf_disabled = MLX5_GET(query_esw_functions_out, out, |
---|
| 2231 | + host_params_context.host_pf_disabled); |
---|
| 2232 | + |
---|
| 2233 | + if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled) |
---|
| 2234 | + return; |
---|
| 2235 | + |
---|
| 2236 | + /* Number of VFs can only change from "0 to x" or "x to 0". */ |
---|
| 2237 | + if (esw->esw_funcs.num_vfs > 0) { |
---|
| 2238 | + mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs); |
---|
| 2239 | + } else { |
---|
| 2240 | + int err; |
---|
| 2241 | + |
---|
| 2242 | + err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs, |
---|
| 2243 | + MLX5_VPORT_UC_ADDR_CHANGE); |
---|
| 2244 | + if (err) |
---|
| 2245 | + return; |
---|
| 2246 | + } |
---|
| 2247 | + esw->esw_funcs.num_vfs = new_num_vfs; |
---|
| 2248 | +} |
---|
| 2249 | + |
---|
| 2250 | +static void esw_functions_changed_event_handler(struct work_struct *work) |
---|
| 2251 | +{ |
---|
| 2252 | + struct mlx5_host_work *host_work; |
---|
| 2253 | + struct mlx5_eswitch *esw; |
---|
| 2254 | + const u32 *out; |
---|
| 2255 | + |
---|
| 2256 | + host_work = container_of(work, struct mlx5_host_work, work); |
---|
| 2257 | + esw = host_work->esw; |
---|
| 2258 | + |
---|
| 2259 | + out = mlx5_esw_query_functions(esw->dev); |
---|
| 2260 | + if (IS_ERR(out)) |
---|
| 2261 | + goto out; |
---|
| 2262 | + |
---|
| 2263 | + esw_vfs_changed_event_handler(esw, out); |
---|
| 2264 | + kvfree(out); |
---|
| 2265 | +out: |
---|
| 2266 | + kfree(host_work); |
---|
| 2267 | +} |
---|
| 2268 | + |
---|
| 2269 | +int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data) |
---|
| 2270 | +{ |
---|
| 2271 | + struct mlx5_esw_functions *esw_funcs; |
---|
| 2272 | + struct mlx5_host_work *host_work; |
---|
| 2273 | + struct mlx5_eswitch *esw; |
---|
| 2274 | + |
---|
| 2275 | + host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC); |
---|
| 2276 | + if (!host_work) |
---|
| 2277 | + return NOTIFY_DONE; |
---|
| 2278 | + |
---|
| 2279 | + esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb); |
---|
| 2280 | + esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs); |
---|
| 2281 | + |
---|
| 2282 | + host_work->esw = esw; |
---|
| 2283 | + |
---|
| 2284 | + INIT_WORK(&host_work->work, esw_functions_changed_event_handler); |
---|
| 2285 | + queue_work(esw->work_queue, &host_work->work); |
---|
| 2286 | + |
---|
| 2287 | + return NOTIFY_OK; |
---|
| 2288 | +} |
---|
| 2289 | + |
---|
| 2290 | +static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw) |
---|
| 2291 | +{ |
---|
| 2292 | + const u32 *query_host_out; |
---|
| 2293 | + |
---|
| 2294 | + if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) |
---|
| 2295 | + return 0; |
---|
| 2296 | + |
---|
| 2297 | + query_host_out = mlx5_esw_query_functions(esw->dev); |
---|
| 2298 | + if (IS_ERR(query_host_out)) |
---|
| 2299 | + return PTR_ERR(query_host_out); |
---|
| 2300 | + |
---|
| 2301 | + /* Mark non local controller with non zero controller number. */ |
---|
| 2302 | + esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out, |
---|
| 2303 | + host_params_context.host_number); |
---|
| 2304 | + kvfree(query_host_out); |
---|
| 2305 | + return 0; |
---|
| 2306 | +} |
---|
| 2307 | + |
---|
| 2308 | +int esw_offloads_enable(struct mlx5_eswitch *esw) |
---|
| 2309 | +{ |
---|
| 2310 | + struct mlx5_vport *vport; |
---|
| 2311 | + int err, i; |
---|
| 2312 | + |
---|
| 2313 | + if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) && |
---|
| 2314 | + MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap)) |
---|
| 2315 | + esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC; |
---|
| 2316 | + else |
---|
| 2317 | + esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; |
---|
| 2318 | + |
---|
| 2319 | + mutex_init(&esw->offloads.termtbl_mutex); |
---|
| 2320 | + mlx5_rdma_enable_roce(esw->dev); |
---|
| 2321 | + |
---|
| 2322 | + err = mlx5_esw_host_number_init(esw); |
---|
961 | 2323 | if (err) |
---|
962 | | - goto err_reps; |
---|
| 2324 | + goto err_metadata; |
---|
| 2325 | + |
---|
| 2326 | + if (esw_check_vport_match_metadata_supported(esw)) |
---|
| 2327 | + esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; |
---|
| 2328 | + |
---|
| 2329 | + err = esw_offloads_metadata_init(esw); |
---|
| 2330 | + if (err) |
---|
| 2331 | + goto err_metadata; |
---|
| 2332 | + |
---|
| 2333 | + err = esw_set_passing_vport_metadata(esw, true); |
---|
| 2334 | + if (err) |
---|
| 2335 | + goto err_vport_metadata; |
---|
| 2336 | + |
---|
| 2337 | + err = esw_offloads_steering_init(esw); |
---|
| 2338 | + if (err) |
---|
| 2339 | + goto err_steering_init; |
---|
| 2340 | + |
---|
| 2341 | + /* Representor will control the vport link state */ |
---|
| 2342 | + mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) |
---|
| 2343 | + vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN; |
---|
| 2344 | + |
---|
| 2345 | + /* Uplink vport rep must load first. */ |
---|
| 2346 | + err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK); |
---|
| 2347 | + if (err) |
---|
| 2348 | + goto err_uplink; |
---|
| 2349 | + |
---|
| 2350 | + err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE); |
---|
| 2351 | + if (err) |
---|
| 2352 | + goto err_vports; |
---|
| 2353 | + |
---|
| 2354 | + esw_offloads_devcom_init(esw); |
---|
963 | 2355 | |
---|
964 | 2356 | return 0; |
---|
965 | 2357 | |
---|
966 | | -err_reps: |
---|
967 | | - esw_destroy_vport_rx_group(esw); |
---|
968 | | - |
---|
969 | | -create_fg_err: |
---|
970 | | - esw_destroy_offloads_table(esw); |
---|
971 | | - |
---|
972 | | -create_ft_err: |
---|
973 | | - esw_destroy_offloads_fdb_tables(esw); |
---|
974 | | - |
---|
| 2358 | +err_vports: |
---|
| 2359 | + esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK); |
---|
| 2360 | +err_uplink: |
---|
| 2361 | + esw_offloads_steering_cleanup(esw); |
---|
| 2362 | +err_steering_init: |
---|
| 2363 | + esw_set_passing_vport_metadata(esw, false); |
---|
| 2364 | +err_vport_metadata: |
---|
| 2365 | + esw_offloads_metadata_uninit(esw); |
---|
| 2366 | +err_metadata: |
---|
| 2367 | + esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA; |
---|
| 2368 | + mlx5_rdma_disable_roce(esw->dev); |
---|
| 2369 | + mutex_destroy(&esw->offloads.termtbl_mutex); |
---|
975 | 2370 | return err; |
---|
976 | 2371 | } |
---|
977 | 2372 | |
---|
978 | | -static int esw_offloads_stop(struct mlx5_eswitch *esw) |
---|
| 2373 | +static int esw_offloads_stop(struct mlx5_eswitch *esw, |
---|
| 2374 | + struct netlink_ext_ack *extack) |
---|
979 | 2375 | { |
---|
980 | | - int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; |
---|
| 2376 | + int err, err1; |
---|
981 | 2377 | |
---|
982 | | - mlx5_eswitch_disable_sriov(esw); |
---|
983 | | - err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); |
---|
| 2378 | + mlx5_eswitch_disable_locked(esw, false); |
---|
| 2379 | + err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, |
---|
| 2380 | + MLX5_ESWITCH_IGNORE_NUM_VFS); |
---|
984 | 2381 | if (err) { |
---|
985 | | - esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err); |
---|
986 | | - err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS); |
---|
987 | | - if (err1) |
---|
988 | | - esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err); |
---|
| 2382 | + NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); |
---|
| 2383 | + err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS, |
---|
| 2384 | + MLX5_ESWITCH_IGNORE_NUM_VFS); |
---|
| 2385 | + if (err1) { |
---|
| 2386 | + NL_SET_ERR_MSG_MOD(extack, |
---|
| 2387 | + "Failed setting eswitch back to offloads"); |
---|
| 2388 | + } |
---|
989 | 2389 | } |
---|
990 | 2390 | |
---|
991 | | - /* enable back PF RoCE */ |
---|
992 | | - mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); |
---|
993 | | - |
---|
994 | 2391 | return err; |
---|
995 | 2392 | } |
---|
996 | 2393 | |
---|
997 | | -void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports) |
---|
| 2394 | +void esw_offloads_disable(struct mlx5_eswitch *esw) |
---|
998 | 2395 | { |
---|
999 | | - esw_offloads_unload_reps(esw, nvports); |
---|
1000 | | - esw_destroy_vport_rx_group(esw); |
---|
1001 | | - esw_destroy_offloads_table(esw); |
---|
1002 | | - esw_destroy_offloads_fdb_tables(esw); |
---|
| 2396 | + esw_offloads_devcom_cleanup(esw); |
---|
| 2397 | + mlx5_eswitch_disable_pf_vf_vports(esw); |
---|
| 2398 | + esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK); |
---|
| 2399 | + esw_set_passing_vport_metadata(esw, false); |
---|
| 2400 | + esw_offloads_steering_cleanup(esw); |
---|
| 2401 | + esw_offloads_metadata_uninit(esw); |
---|
| 2402 | + esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA; |
---|
| 2403 | + mlx5_rdma_disable_roce(esw->dev); |
---|
| 2404 | + mutex_destroy(&esw->offloads.termtbl_mutex); |
---|
| 2405 | + esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; |
---|
1003 | 2406 | } |
---|
1004 | 2407 | |
---|
1005 | 2408 | static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) |
---|
1006 | 2409 | { |
---|
1007 | 2410 | switch (mode) { |
---|
1008 | 2411 | case DEVLINK_ESWITCH_MODE_LEGACY: |
---|
1009 | | - *mlx5_mode = SRIOV_LEGACY; |
---|
| 2412 | + *mlx5_mode = MLX5_ESWITCH_LEGACY; |
---|
1010 | 2413 | break; |
---|
1011 | 2414 | case DEVLINK_ESWITCH_MODE_SWITCHDEV: |
---|
1012 | | - *mlx5_mode = SRIOV_OFFLOADS; |
---|
| 2415 | + *mlx5_mode = MLX5_ESWITCH_OFFLOADS; |
---|
1013 | 2416 | break; |
---|
1014 | 2417 | default: |
---|
1015 | 2418 | return -EINVAL; |
---|
.. | .. |
---|
1021 | 2424 | static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode) |
---|
1022 | 2425 | { |
---|
1023 | 2426 | switch (mlx5_mode) { |
---|
1024 | | - case SRIOV_LEGACY: |
---|
| 2427 | + case MLX5_ESWITCH_LEGACY: |
---|
1025 | 2428 | *mode = DEVLINK_ESWITCH_MODE_LEGACY; |
---|
1026 | 2429 | break; |
---|
1027 | | - case SRIOV_OFFLOADS: |
---|
| 2430 | + case MLX5_ESWITCH_OFFLOADS: |
---|
1028 | 2431 | *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; |
---|
1029 | 2432 | break; |
---|
1030 | 2433 | default: |
---|
.. | .. |
---|
1078 | 2481 | return 0; |
---|
1079 | 2482 | } |
---|
1080 | 2483 | |
---|
1081 | | -static int mlx5_devlink_eswitch_check(struct devlink *devlink) |
---|
| 2484 | +static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw) |
---|
1082 | 2485 | { |
---|
1083 | | - struct mlx5_core_dev *dev = devlink_priv(devlink); |
---|
1084 | | - |
---|
1085 | | - if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) |
---|
1086 | | - return -EOPNOTSUPP; |
---|
1087 | | - |
---|
1088 | | - if(!MLX5_ESWITCH_MANAGER(dev)) |
---|
1089 | | - return -EPERM; |
---|
1090 | | - |
---|
1091 | | - if (dev->priv.eswitch->mode == SRIOV_NONE) |
---|
1092 | | - return -EOPNOTSUPP; |
---|
1093 | | - |
---|
1094 | | - return 0; |
---|
| 2486 | + /* devlink commands in NONE eswitch mode are currently supported only |
---|
| 2487 | + * on ECPF. |
---|
| 2488 | + */ |
---|
| 2489 | + return (esw->mode == MLX5_ESWITCH_NONE && |
---|
| 2490 | + !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0; |
---|
1095 | 2491 | } |
---|
1096 | 2492 | |
---|
1097 | | -int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) |
---|
| 2493 | +int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, |
---|
| 2494 | + struct netlink_ext_ack *extack) |
---|
1098 | 2495 | { |
---|
1099 | | - struct mlx5_core_dev *dev = devlink_priv(devlink); |
---|
1100 | 2496 | u16 cur_mlx5_mode, mlx5_mode = 0; |
---|
1101 | | - int err; |
---|
| 2497 | + struct mlx5_eswitch *esw; |
---|
| 2498 | + int err = 0; |
---|
1102 | 2499 | |
---|
1103 | | - err = mlx5_devlink_eswitch_check(devlink); |
---|
1104 | | - if (err) |
---|
1105 | | - return err; |
---|
1106 | | - |
---|
1107 | | - cur_mlx5_mode = dev->priv.eswitch->mode; |
---|
| 2500 | + esw = mlx5_devlink_eswitch_get(devlink); |
---|
| 2501 | + if (IS_ERR(esw)) |
---|
| 2502 | + return PTR_ERR(esw); |
---|
1108 | 2503 | |
---|
1109 | 2504 | if (esw_mode_from_devlink(mode, &mlx5_mode)) |
---|
1110 | 2505 | return -EINVAL; |
---|
1111 | 2506 | |
---|
| 2507 | + down_write(&esw->mode_lock); |
---|
| 2508 | + cur_mlx5_mode = esw->mode; |
---|
1112 | 2509 | if (cur_mlx5_mode == mlx5_mode) |
---|
1113 | | - return 0; |
---|
| 2510 | + goto unlock; |
---|
1114 | 2511 | |
---|
1115 | 2512 | if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) |
---|
1116 | | - return esw_offloads_start(dev->priv.eswitch); |
---|
| 2513 | + err = esw_offloads_start(esw, extack); |
---|
1117 | 2514 | else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) |
---|
1118 | | - return esw_offloads_stop(dev->priv.eswitch); |
---|
| 2515 | + err = esw_offloads_stop(esw, extack); |
---|
1119 | 2516 | else |
---|
1120 | | - return -EINVAL; |
---|
| 2517 | + err = -EINVAL; |
---|
| 2518 | + |
---|
| 2519 | +unlock: |
---|
| 2520 | + up_write(&esw->mode_lock); |
---|
| 2521 | + return err; |
---|
1121 | 2522 | } |
---|
1122 | 2523 | |
---|
1123 | 2524 | int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) |
---|
1124 | 2525 | { |
---|
1125 | | - struct mlx5_core_dev *dev = devlink_priv(devlink); |
---|
| 2526 | + struct mlx5_eswitch *esw; |
---|
1126 | 2527 | int err; |
---|
1127 | 2528 | |
---|
1128 | | - err = mlx5_devlink_eswitch_check(devlink); |
---|
1129 | | - if (err) |
---|
1130 | | - return err; |
---|
| 2529 | + esw = mlx5_devlink_eswitch_get(devlink); |
---|
| 2530 | + if (IS_ERR(esw)) |
---|
| 2531 | + return PTR_ERR(esw); |
---|
1131 | 2532 | |
---|
1132 | | - return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); |
---|
| 2533 | + down_write(&esw->mode_lock); |
---|
| 2534 | + err = eswitch_devlink_esw_mode_check(esw); |
---|
| 2535 | + if (err) |
---|
| 2536 | + goto unlock; |
---|
| 2537 | + |
---|
| 2538 | + err = esw_mode_to_devlink(esw->mode, mode); |
---|
| 2539 | +unlock: |
---|
| 2540 | + up_write(&esw->mode_lock); |
---|
| 2541 | + return err; |
---|
1133 | 2542 | } |
---|
1134 | 2543 | |
---|
1135 | | -int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) |
---|
| 2544 | +int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, |
---|
| 2545 | + struct netlink_ext_ack *extack) |
---|
1136 | 2546 | { |
---|
1137 | 2547 | struct mlx5_core_dev *dev = devlink_priv(devlink); |
---|
1138 | | - struct mlx5_eswitch *esw = dev->priv.eswitch; |
---|
1139 | | - int err, vport; |
---|
| 2548 | + int err, vport, num_vport; |
---|
| 2549 | + struct mlx5_eswitch *esw; |
---|
1140 | 2550 | u8 mlx5_mode; |
---|
1141 | 2551 | |
---|
1142 | | - err = mlx5_devlink_eswitch_check(devlink); |
---|
| 2552 | + esw = mlx5_devlink_eswitch_get(devlink); |
---|
| 2553 | + if (IS_ERR(esw)) |
---|
| 2554 | + return PTR_ERR(esw); |
---|
| 2555 | + |
---|
| 2556 | + down_write(&esw->mode_lock); |
---|
| 2557 | + err = eswitch_devlink_esw_mode_check(esw); |
---|
1143 | 2558 | if (err) |
---|
1144 | | - return err; |
---|
| 2559 | + goto out; |
---|
1145 | 2560 | |
---|
1146 | 2561 | switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { |
---|
1147 | 2562 | case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: |
---|
1148 | | - if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) |
---|
1149 | | - return 0; |
---|
1150 | | - /* fall through */ |
---|
| 2563 | + if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) { |
---|
| 2564 | + err = 0; |
---|
| 2565 | + goto out; |
---|
| 2566 | + } |
---|
| 2567 | + |
---|
| 2568 | + fallthrough; |
---|
1151 | 2569 | case MLX5_CAP_INLINE_MODE_L2: |
---|
1152 | | - esw_warn(dev, "Inline mode can't be set\n"); |
---|
1153 | | - return -EOPNOTSUPP; |
---|
| 2570 | + NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set"); |
---|
| 2571 | + err = -EOPNOTSUPP; |
---|
| 2572 | + goto out; |
---|
1154 | 2573 | case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: |
---|
1155 | 2574 | break; |
---|
1156 | 2575 | } |
---|
1157 | 2576 | |
---|
1158 | | - if (esw->offloads.num_flows > 0) { |
---|
1159 | | - esw_warn(dev, "Can't set inline mode when flows are configured\n"); |
---|
1160 | | - return -EOPNOTSUPP; |
---|
| 2577 | + if (atomic64_read(&esw->offloads.num_flows) > 0) { |
---|
| 2578 | + NL_SET_ERR_MSG_MOD(extack, |
---|
| 2579 | + "Can't set inline mode when flows are configured"); |
---|
| 2580 | + err = -EOPNOTSUPP; |
---|
| 2581 | + goto out; |
---|
1161 | 2582 | } |
---|
1162 | 2583 | |
---|
1163 | 2584 | err = esw_inline_mode_from_devlink(mode, &mlx5_mode); |
---|
1164 | 2585 | if (err) |
---|
1165 | 2586 | goto out; |
---|
1166 | 2587 | |
---|
1167 | | - for (vport = 1; vport < esw->enabled_vports; vport++) { |
---|
| 2588 | + mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) { |
---|
1168 | 2589 | err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode); |
---|
1169 | 2590 | if (err) { |
---|
1170 | | - esw_warn(dev, "Failed to set min inline on vport %d\n", |
---|
1171 | | - vport); |
---|
| 2591 | + NL_SET_ERR_MSG_MOD(extack, |
---|
| 2592 | + "Failed to set min inline on vport"); |
---|
1172 | 2593 | goto revert_inline_mode; |
---|
1173 | 2594 | } |
---|
1174 | 2595 | } |
---|
1175 | 2596 | |
---|
1176 | 2597 | esw->offloads.inline_mode = mlx5_mode; |
---|
| 2598 | + up_write(&esw->mode_lock); |
---|
1177 | 2599 | return 0; |
---|
1178 | 2600 | |
---|
1179 | 2601 | revert_inline_mode: |
---|
1180 | | - while (--vport > 0) |
---|
| 2602 | + num_vport = --vport; |
---|
| 2603 | + mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport) |
---|
1181 | 2604 | mlx5_modify_nic_vport_min_inline(dev, |
---|
1182 | 2605 | vport, |
---|
1183 | 2606 | esw->offloads.inline_mode); |
---|
1184 | 2607 | out: |
---|
| 2608 | + up_write(&esw->mode_lock); |
---|
1185 | 2609 | return err; |
---|
1186 | 2610 | } |
---|
1187 | 2611 | |
---|
1188 | 2612 | int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) |
---|
1189 | 2613 | { |
---|
1190 | | - struct mlx5_core_dev *dev = devlink_priv(devlink); |
---|
1191 | | - struct mlx5_eswitch *esw = dev->priv.eswitch; |
---|
| 2614 | + struct mlx5_eswitch *esw; |
---|
1192 | 2615 | int err; |
---|
1193 | 2616 | |
---|
1194 | | - err = mlx5_devlink_eswitch_check(devlink); |
---|
| 2617 | + esw = mlx5_devlink_eswitch_get(devlink); |
---|
| 2618 | + if (IS_ERR(esw)) |
---|
| 2619 | + return PTR_ERR(esw); |
---|
| 2620 | + |
---|
| 2621 | + down_write(&esw->mode_lock); |
---|
| 2622 | + err = eswitch_devlink_esw_mode_check(esw); |
---|
1195 | 2623 | if (err) |
---|
1196 | | - return err; |
---|
| 2624 | + goto unlock; |
---|
1197 | 2625 | |
---|
1198 | | - return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); |
---|
1199 | | -} |
---|
1200 | | - |
---|
1201 | | -int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode) |
---|
1202 | | -{ |
---|
1203 | | - u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2; |
---|
1204 | | - struct mlx5_core_dev *dev = esw->dev; |
---|
1205 | | - int vport; |
---|
1206 | | - |
---|
1207 | | - if (!MLX5_CAP_GEN(dev, vport_group_manager)) |
---|
1208 | | - return -EOPNOTSUPP; |
---|
1209 | | - |
---|
1210 | | - if (esw->mode == SRIOV_NONE) |
---|
1211 | | - return -EOPNOTSUPP; |
---|
1212 | | - |
---|
1213 | | - switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { |
---|
1214 | | - case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: |
---|
1215 | | - mlx5_mode = MLX5_INLINE_MODE_NONE; |
---|
1216 | | - goto out; |
---|
1217 | | - case MLX5_CAP_INLINE_MODE_L2: |
---|
1218 | | - mlx5_mode = MLX5_INLINE_MODE_L2; |
---|
1219 | | - goto out; |
---|
1220 | | - case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: |
---|
1221 | | - goto query_vports; |
---|
1222 | | - } |
---|
1223 | | - |
---|
1224 | | -query_vports: |
---|
1225 | | - for (vport = 1; vport <= nvfs; vport++) { |
---|
1226 | | - mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode); |
---|
1227 | | - if (vport > 1 && prev_mlx5_mode != mlx5_mode) |
---|
1228 | | - return -EINVAL; |
---|
1229 | | - prev_mlx5_mode = mlx5_mode; |
---|
1230 | | - } |
---|
1231 | | - |
---|
1232 | | -out: |
---|
1233 | | - *mode = mlx5_mode; |
---|
1234 | | - return 0; |
---|
1235 | | -} |
---|
1236 | | - |
---|
1237 | | -int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap) |
---|
1238 | | -{ |
---|
1239 | | - struct mlx5_core_dev *dev = devlink_priv(devlink); |
---|
1240 | | - struct mlx5_eswitch *esw = dev->priv.eswitch; |
---|
1241 | | - int err; |
---|
1242 | | - |
---|
1243 | | - err = mlx5_devlink_eswitch_check(devlink); |
---|
1244 | | - if (err) |
---|
1245 | | - return err; |
---|
1246 | | - |
---|
1247 | | - if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && |
---|
1248 | | - (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) || |
---|
1249 | | - !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) |
---|
1250 | | - return -EOPNOTSUPP; |
---|
1251 | | - |
---|
1252 | | - if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) |
---|
1253 | | - return -EOPNOTSUPP; |
---|
1254 | | - |
---|
1255 | | - if (esw->mode == SRIOV_LEGACY) { |
---|
1256 | | - esw->offloads.encap = encap; |
---|
1257 | | - return 0; |
---|
1258 | | - } |
---|
1259 | | - |
---|
1260 | | - if (esw->offloads.encap == encap) |
---|
1261 | | - return 0; |
---|
1262 | | - |
---|
1263 | | - if (esw->offloads.num_flows > 0) { |
---|
1264 | | - esw_warn(dev, "Can't set encapsulation when flows are configured\n"); |
---|
1265 | | - return -EOPNOTSUPP; |
---|
1266 | | - } |
---|
1267 | | - |
---|
1268 | | - esw_destroy_offloads_fast_fdb_table(esw); |
---|
1269 | | - |
---|
1270 | | - esw->offloads.encap = encap; |
---|
1271 | | - err = esw_create_offloads_fast_fdb_table(esw); |
---|
1272 | | - if (err) { |
---|
1273 | | - esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err); |
---|
1274 | | - esw->offloads.encap = !encap; |
---|
1275 | | - (void)esw_create_offloads_fast_fdb_table(esw); |
---|
1276 | | - } |
---|
| 2626 | + err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); |
---|
| 2627 | +unlock: |
---|
| 2628 | + up_write(&esw->mode_lock); |
---|
1277 | 2629 | return err; |
---|
1278 | 2630 | } |
---|
1279 | 2631 | |
---|
1280 | | -int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap) |
---|
| 2632 | +int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, |
---|
| 2633 | + enum devlink_eswitch_encap_mode encap, |
---|
| 2634 | + struct netlink_ext_ack *extack) |
---|
1281 | 2635 | { |
---|
1282 | 2636 | struct mlx5_core_dev *dev = devlink_priv(devlink); |
---|
1283 | | - struct mlx5_eswitch *esw = dev->priv.eswitch; |
---|
| 2637 | + struct mlx5_eswitch *esw; |
---|
1284 | 2638 | int err; |
---|
1285 | 2639 | |
---|
1286 | | - err = mlx5_devlink_eswitch_check(devlink); |
---|
| 2640 | + esw = mlx5_devlink_eswitch_get(devlink); |
---|
| 2641 | + if (IS_ERR(esw)) |
---|
| 2642 | + return PTR_ERR(esw); |
---|
| 2643 | + |
---|
| 2644 | + down_write(&esw->mode_lock); |
---|
| 2645 | + err = eswitch_devlink_esw_mode_check(esw); |
---|
1287 | 2646 | if (err) |
---|
1288 | | - return err; |
---|
| 2647 | + goto unlock; |
---|
| 2648 | + |
---|
| 2649 | + if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && |
---|
| 2650 | + (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) || |
---|
| 2651 | + !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) { |
---|
| 2652 | + err = -EOPNOTSUPP; |
---|
| 2653 | + goto unlock; |
---|
| 2654 | + } |
---|
| 2655 | + |
---|
| 2656 | + if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) { |
---|
| 2657 | + err = -EOPNOTSUPP; |
---|
| 2658 | + goto unlock; |
---|
| 2659 | + } |
---|
| 2660 | + |
---|
| 2661 | + if (esw->mode == MLX5_ESWITCH_LEGACY) { |
---|
| 2662 | + esw->offloads.encap = encap; |
---|
| 2663 | + goto unlock; |
---|
| 2664 | + } |
---|
| 2665 | + |
---|
| 2666 | + if (esw->offloads.encap == encap) |
---|
| 2667 | + goto unlock; |
---|
| 2668 | + |
---|
| 2669 | + if (atomic64_read(&esw->offloads.num_flows) > 0) { |
---|
| 2670 | + NL_SET_ERR_MSG_MOD(extack, |
---|
| 2671 | + "Can't set encapsulation when flows are configured"); |
---|
| 2672 | + err = -EOPNOTSUPP; |
---|
| 2673 | + goto unlock; |
---|
| 2674 | + } |
---|
| 2675 | + |
---|
| 2676 | + esw_destroy_offloads_fdb_tables(esw); |
---|
| 2677 | + |
---|
| 2678 | + esw->offloads.encap = encap; |
---|
| 2679 | + |
---|
| 2680 | + err = esw_create_offloads_fdb_tables(esw); |
---|
| 2681 | + |
---|
| 2682 | + if (err) { |
---|
| 2683 | + NL_SET_ERR_MSG_MOD(extack, |
---|
| 2684 | + "Failed re-creating fast FDB table"); |
---|
| 2685 | + esw->offloads.encap = !encap; |
---|
| 2686 | + (void)esw_create_offloads_fdb_tables(esw); |
---|
| 2687 | + } |
---|
| 2688 | + |
---|
| 2689 | +unlock: |
---|
| 2690 | + up_write(&esw->mode_lock); |
---|
| 2691 | + return err; |
---|
| 2692 | +} |
---|
| 2693 | + |
---|
| 2694 | +int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, |
---|
| 2695 | + enum devlink_eswitch_encap_mode *encap) |
---|
| 2696 | +{ |
---|
| 2697 | + struct mlx5_eswitch *esw; |
---|
| 2698 | + int err; |
---|
| 2699 | + |
---|
| 2700 | + esw = mlx5_devlink_eswitch_get(devlink); |
---|
| 2701 | + if (IS_ERR(esw)) |
---|
| 2702 | + return PTR_ERR(esw); |
---|
| 2703 | + |
---|
| 2704 | + |
---|
| 2705 | + down_write(&esw->mode_lock); |
---|
| 2706 | + err = eswitch_devlink_esw_mode_check(esw); |
---|
| 2707 | + if (err) |
---|
| 2708 | + goto unlock; |
---|
1289 | 2709 | |
---|
1290 | 2710 | *encap = esw->offloads.encap; |
---|
1291 | | - return 0; |
---|
| 2711 | +unlock: |
---|
| 2712 | + up_write(&esw->mode_lock); |
---|
| 2713 | + return err; |
---|
1292 | 2714 | } |
---|
1293 | 2715 | |
---|
1294 | | -void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, |
---|
1295 | | - int vport_index, |
---|
1296 | | - struct mlx5_eswitch_rep_if *__rep_if, |
---|
1297 | | - u8 rep_type) |
---|
| 2716 | +static bool |
---|
| 2717 | +mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num) |
---|
1298 | 2718 | { |
---|
1299 | | - struct mlx5_esw_offload *offloads = &esw->offloads; |
---|
1300 | | - struct mlx5_eswitch_rep_if *rep_if; |
---|
| 2719 | + /* Currently, only ECPF based device has representor for host PF. */ |
---|
| 2720 | + if (vport_num == MLX5_VPORT_PF && |
---|
| 2721 | + !mlx5_core_is_ecpf_esw_manager(esw->dev)) |
---|
| 2722 | + return false; |
---|
1301 | 2723 | |
---|
1302 | | - rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type]; |
---|
| 2724 | + if (vport_num == MLX5_VPORT_ECPF && |
---|
| 2725 | + !mlx5_ecpf_vport_exists(esw->dev)) |
---|
| 2726 | + return false; |
---|
1303 | 2727 | |
---|
1304 | | - rep_if->load = __rep_if->load; |
---|
1305 | | - rep_if->unload = __rep_if->unload; |
---|
1306 | | - rep_if->get_proto_dev = __rep_if->get_proto_dev; |
---|
1307 | | - rep_if->priv = __rep_if->priv; |
---|
1308 | | - |
---|
1309 | | - rep_if->valid = true; |
---|
| 2728 | + return true; |
---|
1310 | 2729 | } |
---|
1311 | | -EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep); |
---|
1312 | 2730 | |
---|
1313 | | -void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, |
---|
1314 | | - int vport_index, u8 rep_type) |
---|
| 2731 | +void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, |
---|
| 2732 | + const struct mlx5_eswitch_rep_ops *ops, |
---|
| 2733 | + u8 rep_type) |
---|
1315 | 2734 | { |
---|
1316 | | - struct mlx5_esw_offload *offloads = &esw->offloads; |
---|
| 2735 | + struct mlx5_eswitch_rep_data *rep_data; |
---|
1317 | 2736 | struct mlx5_eswitch_rep *rep; |
---|
| 2737 | + int i; |
---|
1318 | 2738 | |
---|
1319 | | - rep = &offloads->vport_reps[vport_index]; |
---|
1320 | | - |
---|
1321 | | - if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled) |
---|
1322 | | - rep->rep_if[rep_type].unload(rep); |
---|
1323 | | - |
---|
1324 | | - rep->rep_if[rep_type].valid = false; |
---|
| 2739 | + esw->offloads.rep_ops[rep_type] = ops; |
---|
| 2740 | + mlx5_esw_for_all_reps(esw, i, rep) { |
---|
| 2741 | + if (likely(mlx5_eswitch_vport_has_rep(esw, i))) { |
---|
| 2742 | + rep_data = &rep->rep_data[rep_type]; |
---|
| 2743 | + atomic_set(&rep_data->state, REP_REGISTERED); |
---|
| 2744 | + } |
---|
| 2745 | + } |
---|
1325 | 2746 | } |
---|
1326 | | -EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep); |
---|
| 2747 | +EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps); |
---|
| 2748 | + |
---|
| 2749 | +void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type) |
---|
| 2750 | +{ |
---|
| 2751 | + struct mlx5_eswitch_rep *rep; |
---|
| 2752 | + int i; |
---|
| 2753 | + |
---|
| 2754 | + if (esw->mode == MLX5_ESWITCH_OFFLOADS) |
---|
| 2755 | + __unload_reps_all_vport(esw, rep_type); |
---|
| 2756 | + |
---|
| 2757 | + mlx5_esw_for_all_reps(esw, i, rep) |
---|
| 2758 | + atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED); |
---|
| 2759 | +} |
---|
| 2760 | +EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps); |
---|
1327 | 2761 | |
---|
1328 | 2762 | void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type) |
---|
1329 | 2763 | { |
---|
1330 | | -#define UPLINK_REP_INDEX 0 |
---|
1331 | | - struct mlx5_esw_offload *offloads = &esw->offloads; |
---|
1332 | 2764 | struct mlx5_eswitch_rep *rep; |
---|
1333 | 2765 | |
---|
1334 | | - rep = &offloads->vport_reps[UPLINK_REP_INDEX]; |
---|
1335 | | - return rep->rep_if[rep_type].priv; |
---|
| 2766 | + rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); |
---|
| 2767 | + return rep->rep_data[rep_type].priv; |
---|
1336 | 2768 | } |
---|
1337 | 2769 | |
---|
1338 | 2770 | void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, |
---|
1339 | | - int vport, |
---|
| 2771 | + u16 vport, |
---|
1340 | 2772 | u8 rep_type) |
---|
1341 | 2773 | { |
---|
1342 | | - struct mlx5_esw_offload *offloads = &esw->offloads; |
---|
1343 | 2774 | struct mlx5_eswitch_rep *rep; |
---|
1344 | 2775 | |
---|
1345 | | - if (vport == FDB_UPLINK_VPORT) |
---|
1346 | | - vport = UPLINK_REP_INDEX; |
---|
| 2776 | + rep = mlx5_eswitch_get_rep(esw, vport); |
---|
1347 | 2777 | |
---|
1348 | | - rep = &offloads->vport_reps[vport]; |
---|
1349 | | - |
---|
1350 | | - if (rep->rep_if[rep_type].valid && |
---|
1351 | | - rep->rep_if[rep_type].get_proto_dev) |
---|
1352 | | - return rep->rep_if[rep_type].get_proto_dev(rep); |
---|
| 2778 | + if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED && |
---|
| 2779 | + esw->offloads.rep_ops[rep_type]->get_proto_dev) |
---|
| 2780 | + return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep); |
---|
1353 | 2781 | return NULL; |
---|
1354 | 2782 | } |
---|
1355 | 2783 | EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev); |
---|
1356 | 2784 | |
---|
1357 | 2785 | void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type) |
---|
1358 | 2786 | { |
---|
1359 | | - return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type); |
---|
| 2787 | + return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type); |
---|
1360 | 2788 | } |
---|
1361 | 2789 | EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev); |
---|
1362 | 2790 | |
---|
1363 | 2791 | struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, |
---|
1364 | | - int vport) |
---|
| 2792 | + u16 vport) |
---|
1365 | 2793 | { |
---|
1366 | | - return &esw->offloads.vport_reps[vport]; |
---|
| 2794 | + return mlx5_eswitch_get_rep(esw, vport); |
---|
1367 | 2795 | } |
---|
1368 | 2796 | EXPORT_SYMBOL(mlx5_eswitch_vport_rep); |
---|
| 2797 | + |
---|
| 2798 | +bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num) |
---|
| 2799 | +{ |
---|
| 2800 | + return vport_num >= MLX5_VPORT_FIRST_VF && |
---|
| 2801 | + vport_num <= esw->dev->priv.sriov.max_vfs; |
---|
| 2802 | +} |
---|
| 2803 | + |
---|
| 2804 | +bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw) |
---|
| 2805 | +{ |
---|
| 2806 | + return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED); |
---|
| 2807 | +} |
---|
| 2808 | +EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled); |
---|
| 2809 | + |
---|
| 2810 | +bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw) |
---|
| 2811 | +{ |
---|
| 2812 | + return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA); |
---|
| 2813 | +} |
---|
| 2814 | +EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled); |
---|
| 2815 | + |
---|
| 2816 | +u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, |
---|
| 2817 | + u16 vport_num) |
---|
| 2818 | +{ |
---|
| 2819 | + struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); |
---|
| 2820 | + |
---|
| 2821 | + if (WARN_ON_ONCE(IS_ERR(vport))) |
---|
| 2822 | + return 0; |
---|
| 2823 | + |
---|
| 2824 | + return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS); |
---|
| 2825 | +} |
---|
| 2826 | +EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match); |
---|