.. | .. |
---|
16 | 16 | #include <net/tc_act/tc_skbedit.h> |
---|
17 | 17 | #include <net/tc_act/tc_mirred.h> |
---|
18 | 18 | #include <net/tc_act/tc_vlan.h> |
---|
| 19 | +#include <net/tc_act/tc_pedit.h> |
---|
19 | 20 | #include <net/tc_act/tc_tunnel_key.h> |
---|
| 21 | +#include <net/vxlan.h> |
---|
20 | 22 | |
---|
21 | 23 | #include "bnxt_hsi.h" |
---|
22 | 24 | #include "bnxt.h" |
---|
.. | .. |
---|
36 | 38 | #define is_vid_exactmatch(vlan_tci_mask) \ |
---|
37 | 39 | ((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK) |
---|
38 | 40 | |
---|
| 41 | +static bool is_wildcard(void *mask, int len); |
---|
| 42 | +static bool is_exactmatch(void *mask, int len); |
---|
39 | 43 | /* Return the dst fid of the func for flow forwarding |
---|
40 | 44 | * For PFs: src_fid is the fid of the PF |
---|
41 | 45 | * For VF-reps: src_fid the fid of the VF |
---|
.. | .. |
---|
45 | 49 | struct bnxt *bp; |
---|
46 | 50 | |
---|
47 | 51 | /* check if dev belongs to the same switch */ |
---|
48 | | - if (!switchdev_port_same_parent_id(pf_bp->dev, dev)) { |
---|
49 | | - netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch", |
---|
| 52 | + if (!netdev_port_same_parent_id(pf_bp->dev, dev)) { |
---|
| 53 | + netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch\n", |
---|
50 | 54 | dev->ifindex); |
---|
51 | 55 | return BNXT_FID_INVALID; |
---|
52 | 56 | } |
---|
.. | .. |
---|
61 | 65 | |
---|
62 | 66 | static int bnxt_tc_parse_redir(struct bnxt *bp, |
---|
63 | 67 | struct bnxt_tc_actions *actions, |
---|
64 | | - const struct tc_action *tc_act) |
---|
| 68 | + const struct flow_action_entry *act) |
---|
65 | 69 | { |
---|
66 | | - struct net_device *dev = tcf_mirred_dev(tc_act); |
---|
| 70 | + struct net_device *dev = act->dev; |
---|
67 | 71 | |
---|
68 | 72 | if (!dev) { |
---|
69 | | - netdev_info(bp->dev, "no dev in mirred action"); |
---|
| 73 | + netdev_info(bp->dev, "no dev in mirred action\n"); |
---|
70 | 74 | return -EINVAL; |
---|
71 | 75 | } |
---|
72 | 76 | |
---|
.. | .. |
---|
77 | 81 | |
---|
78 | 82 | static int bnxt_tc_parse_vlan(struct bnxt *bp, |
---|
79 | 83 | struct bnxt_tc_actions *actions, |
---|
80 | | - const struct tc_action *tc_act) |
---|
| 84 | + const struct flow_action_entry *act) |
---|
81 | 85 | { |
---|
82 | | - switch (tcf_vlan_action(tc_act)) { |
---|
83 | | - case TCA_VLAN_ACT_POP: |
---|
| 86 | + switch (act->id) { |
---|
| 87 | + case FLOW_ACTION_VLAN_POP: |
---|
84 | 88 | actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN; |
---|
85 | 89 | break; |
---|
86 | | - case TCA_VLAN_ACT_PUSH: |
---|
| 90 | + case FLOW_ACTION_VLAN_PUSH: |
---|
87 | 91 | actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN; |
---|
88 | | - actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act)); |
---|
89 | | - actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act); |
---|
| 92 | + actions->push_vlan_tci = htons(act->vlan.vid); |
---|
| 93 | + actions->push_vlan_tpid = act->vlan.proto; |
---|
90 | 94 | break; |
---|
91 | 95 | default: |
---|
92 | 96 | return -EOPNOTSUPP; |
---|
.. | .. |
---|
96 | 100 | |
---|
97 | 101 | static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, |
---|
98 | 102 | struct bnxt_tc_actions *actions, |
---|
99 | | - const struct tc_action *tc_act) |
---|
| 103 | + const struct flow_action_entry *act) |
---|
100 | 104 | { |
---|
101 | | - struct ip_tunnel_info *tun_info = tcf_tunnel_info(tc_act); |
---|
102 | | - struct ip_tunnel_key *tun_key = &tun_info->key; |
---|
| 105 | + const struct ip_tunnel_info *tun_info = act->tunnel; |
---|
| 106 | + const struct ip_tunnel_key *tun_key = &tun_info->key; |
---|
103 | 107 | |
---|
104 | 108 | if (ip_tunnel_info_af(tun_info) != AF_INET) { |
---|
105 | | - netdev_info(bp->dev, "only IPv4 tunnel-encap is supported"); |
---|
| 109 | + netdev_info(bp->dev, "only IPv4 tunnel-encap is supported\n"); |
---|
106 | 110 | return -EOPNOTSUPP; |
---|
107 | 111 | } |
---|
108 | 112 | |
---|
.. | .. |
---|
111 | 115 | return 0; |
---|
112 | 116 | } |
---|
113 | 117 | |
---|
| 118 | +/* Key & Mask from the stack comes unaligned in multiple iterations of 4 bytes |
---|
| 119 | + * each(u32). |
---|
| 120 | + * This routine consolidates such multiple unaligned values into one |
---|
| 121 | + * field each for Key & Mask (for src and dst macs separately) |
---|
| 122 | + * For example, |
---|
| 123 | + * Mask/Key Offset Iteration |
---|
| 124 | + * ========== ====== ========= |
---|
| 125 | + * dst mac 0xffffffff 0 1 |
---|
| 126 | + * dst mac 0x0000ffff 4 2 |
---|
| 127 | + * |
---|
| 128 | + * src mac 0xffff0000 4 1 |
---|
| 129 | + * src mac 0xffffffff 8 2 |
---|
| 130 | + * |
---|
| 131 | + * The above combination coming from the stack will be consolidated as |
---|
| 132 | + * Mask/Key |
---|
| 133 | + * ============== |
---|
| 134 | + * src mac: 0xffffffffffff |
---|
| 135 | + * dst mac: 0xffffffffffff |
---|
| 136 | + */ |
---|
| 137 | +static void bnxt_set_l2_key_mask(u32 part_key, u32 part_mask, |
---|
| 138 | + u8 *actual_key, u8 *actual_mask) |
---|
| 139 | +{ |
---|
| 140 | + u32 key = get_unaligned((u32 *)actual_key); |
---|
| 141 | + u32 mask = get_unaligned((u32 *)actual_mask); |
---|
| 142 | + |
---|
| 143 | + part_key &= part_mask; |
---|
| 144 | + part_key |= key & ~part_mask; |
---|
| 145 | + |
---|
| 146 | + put_unaligned(mask | part_mask, (u32 *)actual_mask); |
---|
| 147 | + put_unaligned(part_key, (u32 *)actual_key); |
---|
| 148 | +} |
---|
| 149 | + |
---|
| 150 | +static int |
---|
| 151 | +bnxt_fill_l2_rewrite_fields(struct bnxt_tc_actions *actions, |
---|
| 152 | + u16 *eth_addr, u16 *eth_addr_mask) |
---|
| 153 | +{ |
---|
| 154 | + u16 *p; |
---|
| 155 | + int j; |
---|
| 156 | + |
---|
| 157 | + if (unlikely(bnxt_eth_addr_key_mask_invalid(eth_addr, eth_addr_mask))) |
---|
| 158 | + return -EINVAL; |
---|
| 159 | + |
---|
| 160 | + if (!is_wildcard(ð_addr_mask[0], ETH_ALEN)) { |
---|
| 161 | + if (!is_exactmatch(ð_addr_mask[0], ETH_ALEN)) |
---|
| 162 | + return -EINVAL; |
---|
| 163 | + /* FW expects dmac to be in u16 array format */ |
---|
| 164 | + p = eth_addr; |
---|
| 165 | + for (j = 0; j < 3; j++) |
---|
| 166 | + actions->l2_rewrite_dmac[j] = cpu_to_be16(*(p + j)); |
---|
| 167 | + } |
---|
| 168 | + |
---|
| 169 | + if (!is_wildcard(ð_addr_mask[ETH_ALEN / 2], ETH_ALEN)) { |
---|
| 170 | + if (!is_exactmatch(ð_addr_mask[ETH_ALEN / 2], ETH_ALEN)) |
---|
| 171 | + return -EINVAL; |
---|
| 172 | + /* FW expects smac to be in u16 array format */ |
---|
| 173 | + p = ð_addr[ETH_ALEN / 2]; |
---|
| 174 | + for (j = 0; j < 3; j++) |
---|
| 175 | + actions->l2_rewrite_smac[j] = cpu_to_be16(*(p + j)); |
---|
| 176 | + } |
---|
| 177 | + |
---|
| 178 | + return 0; |
---|
| 179 | +} |
---|
| 180 | + |
---|
| 181 | +static int |
---|
| 182 | +bnxt_tc_parse_pedit(struct bnxt *bp, struct bnxt_tc_actions *actions, |
---|
| 183 | + struct flow_action_entry *act, int act_idx, u8 *eth_addr, |
---|
| 184 | + u8 *eth_addr_mask) |
---|
| 185 | +{ |
---|
| 186 | + size_t offset_of_ip6_daddr = offsetof(struct ipv6hdr, daddr); |
---|
| 187 | + size_t offset_of_ip6_saddr = offsetof(struct ipv6hdr, saddr); |
---|
| 188 | + u32 mask, val, offset, idx; |
---|
| 189 | + u8 htype; |
---|
| 190 | + |
---|
| 191 | + offset = act->mangle.offset; |
---|
| 192 | + htype = act->mangle.htype; |
---|
| 193 | + mask = ~act->mangle.mask; |
---|
| 194 | + val = act->mangle.val; |
---|
| 195 | + |
---|
| 196 | + switch (htype) { |
---|
| 197 | + case FLOW_ACT_MANGLE_HDR_TYPE_ETH: |
---|
| 198 | + if (offset > PEDIT_OFFSET_SMAC_LAST_4_BYTES) { |
---|
| 199 | + netdev_err(bp->dev, |
---|
| 200 | + "%s: eth_hdr: Invalid pedit field\n", |
---|
| 201 | + __func__); |
---|
| 202 | + return -EINVAL; |
---|
| 203 | + } |
---|
| 204 | + actions->flags |= BNXT_TC_ACTION_FLAG_L2_REWRITE; |
---|
| 205 | + |
---|
| 206 | + bnxt_set_l2_key_mask(val, mask, ð_addr[offset], |
---|
| 207 | + ð_addr_mask[offset]); |
---|
| 208 | + break; |
---|
| 209 | + case FLOW_ACT_MANGLE_HDR_TYPE_IP4: |
---|
| 210 | + actions->flags |= BNXT_TC_ACTION_FLAG_NAT_XLATE; |
---|
| 211 | + actions->nat.l3_is_ipv4 = true; |
---|
| 212 | + if (offset == offsetof(struct iphdr, saddr)) { |
---|
| 213 | + actions->nat.src_xlate = true; |
---|
| 214 | + actions->nat.l3.ipv4.saddr.s_addr = htonl(val); |
---|
| 215 | + } else if (offset == offsetof(struct iphdr, daddr)) { |
---|
| 216 | + actions->nat.src_xlate = false; |
---|
| 217 | + actions->nat.l3.ipv4.daddr.s_addr = htonl(val); |
---|
| 218 | + } else { |
---|
| 219 | + netdev_err(bp->dev, |
---|
| 220 | + "%s: IPv4_hdr: Invalid pedit field\n", |
---|
| 221 | + __func__); |
---|
| 222 | + return -EINVAL; |
---|
| 223 | + } |
---|
| 224 | + |
---|
| 225 | + netdev_dbg(bp->dev, "nat.src_xlate = %d src IP: %pI4 dst ip : %pI4\n", |
---|
| 226 | + actions->nat.src_xlate, &actions->nat.l3.ipv4.saddr, |
---|
| 227 | + &actions->nat.l3.ipv4.daddr); |
---|
| 228 | + break; |
---|
| 229 | + |
---|
| 230 | + case FLOW_ACT_MANGLE_HDR_TYPE_IP6: |
---|
| 231 | + actions->flags |= BNXT_TC_ACTION_FLAG_NAT_XLATE; |
---|
| 232 | + actions->nat.l3_is_ipv4 = false; |
---|
| 233 | + if (offset >= offsetof(struct ipv6hdr, saddr) && |
---|
| 234 | + offset < offset_of_ip6_daddr) { |
---|
| 235 | + /* 16 byte IPv6 address comes in 4 iterations of |
---|
| 236 | + * 4byte chunks each |
---|
| 237 | + */ |
---|
| 238 | + actions->nat.src_xlate = true; |
---|
| 239 | + idx = (offset - offset_of_ip6_saddr) / 4; |
---|
| 240 | + /* First 4bytes will be copied to idx 0 and so on */ |
---|
| 241 | + actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val); |
---|
| 242 | + } else if (offset >= offset_of_ip6_daddr && |
---|
| 243 | + offset < offset_of_ip6_daddr + 16) { |
---|
| 244 | + actions->nat.src_xlate = false; |
---|
| 245 | + idx = (offset - offset_of_ip6_daddr) / 4; |
---|
| 246 | + actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val); |
---|
| 247 | + } else { |
---|
| 248 | + netdev_err(bp->dev, |
---|
| 249 | + "%s: IPv6_hdr: Invalid pedit field\n", |
---|
| 250 | + __func__); |
---|
| 251 | + return -EINVAL; |
---|
| 252 | + } |
---|
| 253 | + break; |
---|
| 254 | + case FLOW_ACT_MANGLE_HDR_TYPE_TCP: |
---|
| 255 | + case FLOW_ACT_MANGLE_HDR_TYPE_UDP: |
---|
| 256 | + /* HW does not support L4 rewrite alone without L3 |
---|
| 257 | + * rewrite |
---|
| 258 | + */ |
---|
| 259 | + if (!(actions->flags & BNXT_TC_ACTION_FLAG_NAT_XLATE)) { |
---|
| 260 | + netdev_err(bp->dev, |
---|
| 261 | + "Need to specify L3 rewrite as well\n"); |
---|
| 262 | + return -EINVAL; |
---|
| 263 | + } |
---|
| 264 | + if (actions->nat.src_xlate) |
---|
| 265 | + actions->nat.l4.ports.sport = htons(val); |
---|
| 266 | + else |
---|
| 267 | + actions->nat.l4.ports.dport = htons(val); |
---|
| 268 | + netdev_dbg(bp->dev, "actions->nat.sport = %d dport = %d\n", |
---|
| 269 | + actions->nat.l4.ports.sport, |
---|
| 270 | + actions->nat.l4.ports.dport); |
---|
| 271 | + break; |
---|
| 272 | + default: |
---|
| 273 | + netdev_err(bp->dev, "%s: Unsupported pedit hdr type\n", |
---|
| 274 | + __func__); |
---|
| 275 | + return -EINVAL; |
---|
| 276 | + } |
---|
| 277 | + return 0; |
---|
| 278 | +} |
---|
| 279 | + |
---|
114 | 280 | static int bnxt_tc_parse_actions(struct bnxt *bp, |
---|
115 | 281 | struct bnxt_tc_actions *actions, |
---|
116 | | - struct tcf_exts *tc_exts) |
---|
| 282 | + struct flow_action *flow_action, |
---|
| 283 | + struct netlink_ext_ack *extack) |
---|
117 | 284 | { |
---|
118 | | - const struct tc_action *tc_act; |
---|
| 285 | + /* Used to store the L2 rewrite mask for dmac (6 bytes) followed by |
---|
| 286 | + * smac (6 bytes) if rewrite of both is specified, otherwise either |
---|
| 287 | + * dmac or smac |
---|
| 288 | + */ |
---|
| 289 | + u16 eth_addr_mask[ETH_ALEN] = { 0 }; |
---|
| 290 | + /* Used to store the L2 rewrite key for dmac (6 bytes) followed by |
---|
| 291 | + * smac (6 bytes) if rewrite of both is specified, otherwise either |
---|
| 292 | + * dmac or smac |
---|
| 293 | + */ |
---|
| 294 | + u16 eth_addr[ETH_ALEN] = { 0 }; |
---|
| 295 | + struct flow_action_entry *act; |
---|
119 | 296 | int i, rc; |
---|
120 | 297 | |
---|
121 | | - if (!tcf_exts_has_actions(tc_exts)) { |
---|
122 | | - netdev_info(bp->dev, "no actions"); |
---|
| 298 | + if (!flow_action_has_entries(flow_action)) { |
---|
| 299 | + netdev_info(bp->dev, "no actions\n"); |
---|
123 | 300 | return -EINVAL; |
---|
124 | 301 | } |
---|
125 | 302 | |
---|
126 | | - tcf_exts_for_each_action(i, tc_act, tc_exts) { |
---|
127 | | - /* Drop action */ |
---|
128 | | - if (is_tcf_gact_shot(tc_act)) { |
---|
| 303 | + if (!flow_action_basic_hw_stats_check(flow_action, extack)) |
---|
| 304 | + return -EOPNOTSUPP; |
---|
| 305 | + |
---|
| 306 | + flow_action_for_each(i, act, flow_action) { |
---|
| 307 | + switch (act->id) { |
---|
| 308 | + case FLOW_ACTION_DROP: |
---|
129 | 309 | actions->flags |= BNXT_TC_ACTION_FLAG_DROP; |
---|
130 | 310 | return 0; /* don't bother with other actions */ |
---|
131 | | - } |
---|
132 | | - |
---|
133 | | - /* Redirect action */ |
---|
134 | | - if (is_tcf_mirred_egress_redirect(tc_act)) { |
---|
135 | | - rc = bnxt_tc_parse_redir(bp, actions, tc_act); |
---|
| 311 | + case FLOW_ACTION_REDIRECT: |
---|
| 312 | + rc = bnxt_tc_parse_redir(bp, actions, act); |
---|
136 | 313 | if (rc) |
---|
137 | 314 | return rc; |
---|
138 | | - continue; |
---|
139 | | - } |
---|
140 | | - |
---|
141 | | - /* Push/pop VLAN */ |
---|
142 | | - if (is_tcf_vlan(tc_act)) { |
---|
143 | | - rc = bnxt_tc_parse_vlan(bp, actions, tc_act); |
---|
| 315 | + break; |
---|
| 316 | + case FLOW_ACTION_VLAN_POP: |
---|
| 317 | + case FLOW_ACTION_VLAN_PUSH: |
---|
| 318 | + case FLOW_ACTION_VLAN_MANGLE: |
---|
| 319 | + rc = bnxt_tc_parse_vlan(bp, actions, act); |
---|
144 | 320 | if (rc) |
---|
145 | 321 | return rc; |
---|
146 | | - continue; |
---|
147 | | - } |
---|
148 | | - |
---|
149 | | - /* Tunnel encap */ |
---|
150 | | - if (is_tcf_tunnel_set(tc_act)) { |
---|
151 | | - rc = bnxt_tc_parse_tunnel_set(bp, actions, tc_act); |
---|
| 322 | + break; |
---|
| 323 | + case FLOW_ACTION_TUNNEL_ENCAP: |
---|
| 324 | + rc = bnxt_tc_parse_tunnel_set(bp, actions, act); |
---|
152 | 325 | if (rc) |
---|
153 | 326 | return rc; |
---|
154 | | - continue; |
---|
155 | | - } |
---|
156 | | - |
---|
157 | | - /* Tunnel decap */ |
---|
158 | | - if (is_tcf_tunnel_release(tc_act)) { |
---|
| 327 | + break; |
---|
| 328 | + case FLOW_ACTION_TUNNEL_DECAP: |
---|
159 | 329 | actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP; |
---|
160 | | - continue; |
---|
| 330 | + break; |
---|
| 331 | + /* Packet edit: L2 rewrite, NAT, NAPT */ |
---|
| 332 | + case FLOW_ACTION_MANGLE: |
---|
| 333 | + rc = bnxt_tc_parse_pedit(bp, actions, act, i, |
---|
| 334 | + (u8 *)eth_addr, |
---|
| 335 | + (u8 *)eth_addr_mask); |
---|
| 336 | + if (rc) |
---|
| 337 | + return rc; |
---|
| 338 | + break; |
---|
| 339 | + default: |
---|
| 340 | + break; |
---|
161 | 341 | } |
---|
| 342 | + } |
---|
| 343 | + |
---|
| 344 | + if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) { |
---|
| 345 | + rc = bnxt_fill_l2_rewrite_fields(actions, eth_addr, |
---|
| 346 | + eth_addr_mask); |
---|
| 347 | + if (rc) |
---|
| 348 | + return rc; |
---|
162 | 349 | } |
---|
163 | 350 | |
---|
164 | 351 | if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) { |
---|
.. | .. |
---|
177 | 364 | return 0; |
---|
178 | 365 | } |
---|
179 | 366 | |
---|
180 | | -#define GET_KEY(flow_cmd, key_type) \ |
---|
181 | | - skb_flow_dissector_target((flow_cmd)->dissector, key_type,\ |
---|
182 | | - (flow_cmd)->key) |
---|
183 | | -#define GET_MASK(flow_cmd, key_type) \ |
---|
184 | | - skb_flow_dissector_target((flow_cmd)->dissector, key_type,\ |
---|
185 | | - (flow_cmd)->mask) |
---|
186 | | - |
---|
187 | 367 | static int bnxt_tc_parse_flow(struct bnxt *bp, |
---|
188 | | - struct tc_cls_flower_offload *tc_flow_cmd, |
---|
| 368 | + struct flow_cls_offload *tc_flow_cmd, |
---|
189 | 369 | struct bnxt_tc_flow *flow) |
---|
190 | 370 | { |
---|
191 | | - struct flow_dissector *dissector = tc_flow_cmd->dissector; |
---|
192 | | - u16 addr_type = 0; |
---|
| 371 | + struct flow_rule *rule = flow_cls_offload_flow_rule(tc_flow_cmd); |
---|
| 372 | + struct flow_dissector *dissector = rule->match.dissector; |
---|
193 | 373 | |
---|
194 | 374 | /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */ |
---|
195 | 375 | if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || |
---|
196 | 376 | (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) { |
---|
197 | | - netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x", |
---|
| 377 | + netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x\n", |
---|
198 | 378 | dissector->used_keys); |
---|
199 | 379 | return -EOPNOTSUPP; |
---|
200 | 380 | } |
---|
201 | 381 | |
---|
202 | | - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) { |
---|
203 | | - struct flow_dissector_key_control *key = |
---|
204 | | - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL); |
---|
| 382 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { |
---|
| 383 | + struct flow_match_basic match; |
---|
205 | 384 | |
---|
206 | | - addr_type = key->addr_type; |
---|
207 | | - } |
---|
| 385 | + flow_rule_match_basic(rule, &match); |
---|
| 386 | + flow->l2_key.ether_type = match.key->n_proto; |
---|
| 387 | + flow->l2_mask.ether_type = match.mask->n_proto; |
---|
208 | 388 | |
---|
209 | | - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) { |
---|
210 | | - struct flow_dissector_key_basic *key = |
---|
211 | | - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC); |
---|
212 | | - struct flow_dissector_key_basic *mask = |
---|
213 | | - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC); |
---|
214 | | - |
---|
215 | | - flow->l2_key.ether_type = key->n_proto; |
---|
216 | | - flow->l2_mask.ether_type = mask->n_proto; |
---|
217 | | - |
---|
218 | | - if (key->n_proto == htons(ETH_P_IP) || |
---|
219 | | - key->n_proto == htons(ETH_P_IPV6)) { |
---|
220 | | - flow->l4_key.ip_proto = key->ip_proto; |
---|
221 | | - flow->l4_mask.ip_proto = mask->ip_proto; |
---|
| 389 | + if (match.key->n_proto == htons(ETH_P_IP) || |
---|
| 390 | + match.key->n_proto == htons(ETH_P_IPV6)) { |
---|
| 391 | + flow->l4_key.ip_proto = match.key->ip_proto; |
---|
| 392 | + flow->l4_mask.ip_proto = match.mask->ip_proto; |
---|
222 | 393 | } |
---|
223 | 394 | } |
---|
224 | 395 | |
---|
225 | | - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { |
---|
226 | | - struct flow_dissector_key_eth_addrs *key = |
---|
227 | | - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS); |
---|
228 | | - struct flow_dissector_key_eth_addrs *mask = |
---|
229 | | - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS); |
---|
| 396 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { |
---|
| 397 | + struct flow_match_eth_addrs match; |
---|
230 | 398 | |
---|
| 399 | + flow_rule_match_eth_addrs(rule, &match); |
---|
231 | 400 | flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS; |
---|
232 | | - ether_addr_copy(flow->l2_key.dmac, key->dst); |
---|
233 | | - ether_addr_copy(flow->l2_mask.dmac, mask->dst); |
---|
234 | | - ether_addr_copy(flow->l2_key.smac, key->src); |
---|
235 | | - ether_addr_copy(flow->l2_mask.smac, mask->src); |
---|
| 401 | + ether_addr_copy(flow->l2_key.dmac, match.key->dst); |
---|
| 402 | + ether_addr_copy(flow->l2_mask.dmac, match.mask->dst); |
---|
| 403 | + ether_addr_copy(flow->l2_key.smac, match.key->src); |
---|
| 404 | + ether_addr_copy(flow->l2_mask.smac, match.mask->src); |
---|
236 | 405 | } |
---|
237 | 406 | |
---|
238 | | - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) { |
---|
239 | | - struct flow_dissector_key_vlan *key = |
---|
240 | | - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN); |
---|
241 | | - struct flow_dissector_key_vlan *mask = |
---|
242 | | - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN); |
---|
| 407 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { |
---|
| 408 | + struct flow_match_vlan match; |
---|
243 | 409 | |
---|
| 410 | + flow_rule_match_vlan(rule, &match); |
---|
244 | 411 | flow->l2_key.inner_vlan_tci = |
---|
245 | | - cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority)); |
---|
| 412 | + cpu_to_be16(VLAN_TCI(match.key->vlan_id, |
---|
| 413 | + match.key->vlan_priority)); |
---|
246 | 414 | flow->l2_mask.inner_vlan_tci = |
---|
247 | | - cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority))); |
---|
| 415 | + cpu_to_be16((VLAN_TCI(match.mask->vlan_id, |
---|
| 416 | + match.mask->vlan_priority))); |
---|
248 | 417 | flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q); |
---|
249 | 418 | flow->l2_mask.inner_vlan_tpid = htons(0xffff); |
---|
250 | 419 | flow->l2_key.num_vlans = 1; |
---|
251 | 420 | } |
---|
252 | 421 | |
---|
253 | | - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { |
---|
254 | | - struct flow_dissector_key_ipv4_addrs *key = |
---|
255 | | - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS); |
---|
256 | | - struct flow_dissector_key_ipv4_addrs *mask = |
---|
257 | | - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS); |
---|
| 422 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { |
---|
| 423 | + struct flow_match_ipv4_addrs match; |
---|
258 | 424 | |
---|
| 425 | + flow_rule_match_ipv4_addrs(rule, &match); |
---|
259 | 426 | flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS; |
---|
260 | | - flow->l3_key.ipv4.daddr.s_addr = key->dst; |
---|
261 | | - flow->l3_mask.ipv4.daddr.s_addr = mask->dst; |
---|
262 | | - flow->l3_key.ipv4.saddr.s_addr = key->src; |
---|
263 | | - flow->l3_mask.ipv4.saddr.s_addr = mask->src; |
---|
264 | | - } else if (dissector_uses_key(dissector, |
---|
265 | | - FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { |
---|
266 | | - struct flow_dissector_key_ipv6_addrs *key = |
---|
267 | | - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS); |
---|
268 | | - struct flow_dissector_key_ipv6_addrs *mask = |
---|
269 | | - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS); |
---|
| 427 | + flow->l3_key.ipv4.daddr.s_addr = match.key->dst; |
---|
| 428 | + flow->l3_mask.ipv4.daddr.s_addr = match.mask->dst; |
---|
| 429 | + flow->l3_key.ipv4.saddr.s_addr = match.key->src; |
---|
| 430 | + flow->l3_mask.ipv4.saddr.s_addr = match.mask->src; |
---|
| 431 | + } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { |
---|
| 432 | + struct flow_match_ipv6_addrs match; |
---|
270 | 433 | |
---|
| 434 | + flow_rule_match_ipv6_addrs(rule, &match); |
---|
271 | 435 | flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS; |
---|
272 | | - flow->l3_key.ipv6.daddr = key->dst; |
---|
273 | | - flow->l3_mask.ipv6.daddr = mask->dst; |
---|
274 | | - flow->l3_key.ipv6.saddr = key->src; |
---|
275 | | - flow->l3_mask.ipv6.saddr = mask->src; |
---|
| 436 | + flow->l3_key.ipv6.daddr = match.key->dst; |
---|
| 437 | + flow->l3_mask.ipv6.daddr = match.mask->dst; |
---|
| 438 | + flow->l3_key.ipv6.saddr = match.key->src; |
---|
| 439 | + flow->l3_mask.ipv6.saddr = match.mask->src; |
---|
276 | 440 | } |
---|
277 | 441 | |
---|
278 | | - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) { |
---|
279 | | - struct flow_dissector_key_ports *key = |
---|
280 | | - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS); |
---|
281 | | - struct flow_dissector_key_ports *mask = |
---|
282 | | - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS); |
---|
| 442 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { |
---|
| 443 | + struct flow_match_ports match; |
---|
283 | 444 | |
---|
| 445 | + flow_rule_match_ports(rule, &match); |
---|
284 | 446 | flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS; |
---|
285 | | - flow->l4_key.ports.dport = key->dst; |
---|
286 | | - flow->l4_mask.ports.dport = mask->dst; |
---|
287 | | - flow->l4_key.ports.sport = key->src; |
---|
288 | | - flow->l4_mask.ports.sport = mask->src; |
---|
| 447 | + flow->l4_key.ports.dport = match.key->dst; |
---|
| 448 | + flow->l4_mask.ports.dport = match.mask->dst; |
---|
| 449 | + flow->l4_key.ports.sport = match.key->src; |
---|
| 450 | + flow->l4_mask.ports.sport = match.mask->src; |
---|
289 | 451 | } |
---|
290 | 452 | |
---|
291 | | - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) { |
---|
292 | | - struct flow_dissector_key_icmp *key = |
---|
293 | | - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP); |
---|
294 | | - struct flow_dissector_key_icmp *mask = |
---|
295 | | - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP); |
---|
| 453 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) { |
---|
| 454 | + struct flow_match_icmp match; |
---|
296 | 455 | |
---|
| 456 | + flow_rule_match_icmp(rule, &match); |
---|
297 | 457 | flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP; |
---|
298 | | - flow->l4_key.icmp.type = key->type; |
---|
299 | | - flow->l4_key.icmp.code = key->code; |
---|
300 | | - flow->l4_mask.icmp.type = mask->type; |
---|
301 | | - flow->l4_mask.icmp.code = mask->code; |
---|
| 458 | + flow->l4_key.icmp.type = match.key->type; |
---|
| 459 | + flow->l4_key.icmp.code = match.key->code; |
---|
| 460 | + flow->l4_mask.icmp.type = match.mask->type; |
---|
| 461 | + flow->l4_mask.icmp.code = match.mask->code; |
---|
302 | 462 | } |
---|
303 | 463 | |
---|
304 | | - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { |
---|
305 | | - struct flow_dissector_key_control *key = |
---|
306 | | - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_CONTROL); |
---|
| 464 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { |
---|
| 465 | + struct flow_match_ipv4_addrs match; |
---|
307 | 466 | |
---|
308 | | - addr_type = key->addr_type; |
---|
309 | | - } |
---|
310 | | - |
---|
311 | | - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { |
---|
312 | | - struct flow_dissector_key_ipv4_addrs *key = |
---|
313 | | - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS); |
---|
314 | | - struct flow_dissector_key_ipv4_addrs *mask = |
---|
315 | | - GET_MASK(tc_flow_cmd, |
---|
316 | | - FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS); |
---|
317 | | - |
---|
| 467 | + flow_rule_match_enc_ipv4_addrs(rule, &match); |
---|
318 | 468 | flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS; |
---|
319 | | - flow->tun_key.u.ipv4.dst = key->dst; |
---|
320 | | - flow->tun_mask.u.ipv4.dst = mask->dst; |
---|
321 | | - flow->tun_key.u.ipv4.src = key->src; |
---|
322 | | - flow->tun_mask.u.ipv4.src = mask->src; |
---|
323 | | - } else if (dissector_uses_key(dissector, |
---|
| 469 | + flow->tun_key.u.ipv4.dst = match.key->dst; |
---|
| 470 | + flow->tun_mask.u.ipv4.dst = match.mask->dst; |
---|
| 471 | + flow->tun_key.u.ipv4.src = match.key->src; |
---|
| 472 | + flow->tun_mask.u.ipv4.src = match.mask->src; |
---|
| 473 | + } else if (flow_rule_match_key(rule, |
---|
324 | 474 | FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) { |
---|
325 | 475 | return -EOPNOTSUPP; |
---|
326 | 476 | } |
---|
327 | 477 | |
---|
328 | | - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { |
---|
329 | | - struct flow_dissector_key_keyid *key = |
---|
330 | | - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID); |
---|
331 | | - struct flow_dissector_key_keyid *mask = |
---|
332 | | - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID); |
---|
| 478 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { |
---|
| 479 | + struct flow_match_enc_keyid match; |
---|
333 | 480 | |
---|
| 481 | + flow_rule_match_enc_keyid(rule, &match); |
---|
334 | 482 | flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID; |
---|
335 | | - flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid); |
---|
336 | | - flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid); |
---|
| 483 | + flow->tun_key.tun_id = key32_to_tunnel_id(match.key->keyid); |
---|
| 484 | + flow->tun_mask.tun_id = key32_to_tunnel_id(match.mask->keyid); |
---|
337 | 485 | } |
---|
338 | 486 | |
---|
339 | | - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) { |
---|
340 | | - struct flow_dissector_key_ports *key = |
---|
341 | | - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS); |
---|
342 | | - struct flow_dissector_key_ports *mask = |
---|
343 | | - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS); |
---|
| 487 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) { |
---|
| 488 | + struct flow_match_ports match; |
---|
344 | 489 | |
---|
| 490 | + flow_rule_match_enc_ports(rule, &match); |
---|
345 | 491 | flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS; |
---|
346 | | - flow->tun_key.tp_dst = key->dst; |
---|
347 | | - flow->tun_mask.tp_dst = mask->dst; |
---|
348 | | - flow->tun_key.tp_src = key->src; |
---|
349 | | - flow->tun_mask.tp_src = mask->src; |
---|
| 492 | + flow->tun_key.tp_dst = match.key->dst; |
---|
| 493 | + flow->tun_mask.tp_dst = match.mask->dst; |
---|
| 494 | + flow->tun_key.tp_src = match.key->src; |
---|
| 495 | + flow->tun_mask.tp_src = match.mask->src; |
---|
350 | 496 | } |
---|
351 | 497 | |
---|
352 | | - return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts); |
---|
| 498 | + return bnxt_tc_parse_actions(bp, &flow->actions, &rule->action, |
---|
| 499 | + tc_flow_cmd->common.extack); |
---|
353 | 500 | } |
---|
354 | 501 | |
---|
355 | | -static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle) |
---|
| 502 | +static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, |
---|
| 503 | + struct bnxt_tc_flow_node *flow_node) |
---|
356 | 504 | { |
---|
357 | 505 | struct hwrm_cfa_flow_free_input req = { 0 }; |
---|
358 | 506 | int rc; |
---|
359 | 507 | |
---|
360 | 508 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1); |
---|
361 | | - req.flow_handle = flow_handle; |
---|
| 509 | + if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) |
---|
| 510 | + req.ext_flow_handle = flow_node->ext_flow_handle; |
---|
| 511 | + else |
---|
| 512 | + req.flow_handle = flow_node->flow_handle; |
---|
362 | 513 | |
---|
363 | 514 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
364 | 515 | if (rc) |
---|
365 | | - netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d", |
---|
366 | | - __func__, flow_handle, rc); |
---|
| 516 | + netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); |
---|
367 | 517 | |
---|
368 | | - if (rc) |
---|
369 | | - rc = -EIO; |
---|
370 | 518 | return rc; |
---|
371 | 519 | } |
---|
372 | 520 | |
---|
.. | .. |
---|
433 | 581 | |
---|
434 | 582 | static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, |
---|
435 | 583 | __le16 ref_flow_handle, |
---|
436 | | - __le32 tunnel_handle, __le16 *flow_handle) |
---|
| 584 | + __le32 tunnel_handle, |
---|
| 585 | + struct bnxt_tc_flow_node *flow_node) |
---|
437 | 586 | { |
---|
438 | | - struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr; |
---|
439 | 587 | struct bnxt_tc_actions *actions = &flow->actions; |
---|
440 | 588 | struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask; |
---|
441 | 589 | struct bnxt_tc_l3_key *l3_key = &flow->l3_key; |
---|
442 | 590 | struct hwrm_cfa_flow_alloc_input req = { 0 }; |
---|
| 591 | + struct hwrm_cfa_flow_alloc_output *resp; |
---|
443 | 592 | u16 flow_flags = 0, action_flags = 0; |
---|
444 | 593 | int rc; |
---|
445 | 594 | |
---|
.. | .. |
---|
447 | 596 | |
---|
448 | 597 | req.src_fid = cpu_to_le16(flow->src_fid); |
---|
449 | 598 | req.ref_flow_handle = ref_flow_handle; |
---|
| 599 | + |
---|
| 600 | + if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) { |
---|
| 601 | + memcpy(req.l2_rewrite_dmac, actions->l2_rewrite_dmac, |
---|
| 602 | + ETH_ALEN); |
---|
| 603 | + memcpy(req.l2_rewrite_smac, actions->l2_rewrite_smac, |
---|
| 604 | + ETH_ALEN); |
---|
| 605 | + action_flags |= |
---|
| 606 | + CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE; |
---|
| 607 | + } |
---|
| 608 | + |
---|
| 609 | + if (actions->flags & BNXT_TC_ACTION_FLAG_NAT_XLATE) { |
---|
| 610 | + if (actions->nat.l3_is_ipv4) { |
---|
| 611 | + action_flags |= |
---|
| 612 | + CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS; |
---|
| 613 | + |
---|
| 614 | + if (actions->nat.src_xlate) { |
---|
| 615 | + action_flags |= |
---|
| 616 | + CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC; |
---|
| 617 | + /* L3 source rewrite */ |
---|
| 618 | + req.nat_ip_address[0] = |
---|
| 619 | + actions->nat.l3.ipv4.saddr.s_addr; |
---|
| 620 | + /* L4 source port */ |
---|
| 621 | + if (actions->nat.l4.ports.sport) |
---|
| 622 | + req.nat_port = |
---|
| 623 | + actions->nat.l4.ports.sport; |
---|
| 624 | + } else { |
---|
| 625 | + action_flags |= |
---|
| 626 | + CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST; |
---|
| 627 | + /* L3 destination rewrite */ |
---|
| 628 | + req.nat_ip_address[0] = |
---|
| 629 | + actions->nat.l3.ipv4.daddr.s_addr; |
---|
| 630 | + /* L4 destination port */ |
---|
| 631 | + if (actions->nat.l4.ports.dport) |
---|
| 632 | + req.nat_port = |
---|
| 633 | + actions->nat.l4.ports.dport; |
---|
| 634 | + } |
---|
| 635 | + netdev_dbg(bp->dev, |
---|
| 636 | + "req.nat_ip_address: %pI4 src_xlate: %d req.nat_port: %x\n", |
---|
| 637 | + req.nat_ip_address, actions->nat.src_xlate, |
---|
| 638 | + req.nat_port); |
---|
| 639 | + } else { |
---|
| 640 | + if (actions->nat.src_xlate) { |
---|
| 641 | + action_flags |= |
---|
| 642 | + CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC; |
---|
| 643 | + /* L3 source rewrite */ |
---|
| 644 | + memcpy(req.nat_ip_address, |
---|
| 645 | + actions->nat.l3.ipv6.saddr.s6_addr32, |
---|
| 646 | + sizeof(req.nat_ip_address)); |
---|
| 647 | + /* L4 source port */ |
---|
| 648 | + if (actions->nat.l4.ports.sport) |
---|
| 649 | + req.nat_port = |
---|
| 650 | + actions->nat.l4.ports.sport; |
---|
| 651 | + } else { |
---|
| 652 | + action_flags |= |
---|
| 653 | + CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST; |
---|
| 654 | + /* L3 destination rewrite */ |
---|
| 655 | + memcpy(req.nat_ip_address, |
---|
| 656 | + actions->nat.l3.ipv6.daddr.s6_addr32, |
---|
| 657 | + sizeof(req.nat_ip_address)); |
---|
| 658 | + /* L4 destination port */ |
---|
| 659 | + if (actions->nat.l4.ports.dport) |
---|
| 660 | + req.nat_port = |
---|
| 661 | + actions->nat.l4.ports.dport; |
---|
| 662 | + } |
---|
| 663 | + netdev_dbg(bp->dev, |
---|
| 664 | + "req.nat_ip_address: %pI6 src_xlate: %d req.nat_port: %x\n", |
---|
| 665 | + req.nat_ip_address, actions->nat.src_xlate, |
---|
| 666 | + req.nat_port); |
---|
| 667 | + } |
---|
| 668 | + } |
---|
450 | 669 | |
---|
451 | 670 | if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP || |
---|
452 | 671 | actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) { |
---|
.. | .. |
---|
542 | 761 | |
---|
543 | 762 | mutex_lock(&bp->hwrm_cmd_lock); |
---|
544 | 763 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
545 | | - if (!rc) |
---|
546 | | - *flow_handle = resp->flow_handle; |
---|
| 764 | + if (!rc) { |
---|
| 765 | + resp = bnxt_get_hwrm_resp_addr(bp, &req); |
---|
| 766 | + /* CFA_FLOW_ALLOC response interpretation: |
---|
| 767 | + * fw with fw with |
---|
| 768 | + * 16-bit 64-bit |
---|
| 769 | + * flow handle flow handle |
---|
| 770 | + * =========== =========== |
---|
| 771 | + * flow_handle flow handle flow context id |
---|
| 772 | + * ext_flow_handle INVALID flow handle |
---|
| 773 | + * flow_id INVALID flow counter id |
---|
| 774 | + */ |
---|
| 775 | + flow_node->flow_handle = resp->flow_handle; |
---|
| 776 | + if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) { |
---|
| 777 | + flow_node->ext_flow_handle = resp->ext_flow_handle; |
---|
| 778 | + flow_node->flow_id = resp->flow_id; |
---|
| 779 | + } |
---|
| 780 | + } |
---|
547 | 781 | mutex_unlock(&bp->hwrm_cmd_lock); |
---|
548 | | - |
---|
549 | | - if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) |
---|
550 | | - rc = -ENOSPC; |
---|
551 | | - else if (rc) |
---|
552 | | - rc = -EIO; |
---|
553 | 782 | return rc; |
---|
554 | 783 | } |
---|
555 | 784 | |
---|
.. | .. |
---|
559 | 788 | __le32 ref_decap_handle, |
---|
560 | 789 | __le32 *decap_filter_handle) |
---|
561 | 790 | { |
---|
562 | | - struct hwrm_cfa_decap_filter_alloc_output *resp = |
---|
563 | | - bp->hwrm_cmd_resp_addr; |
---|
564 | 791 | struct hwrm_cfa_decap_filter_alloc_input req = { 0 }; |
---|
| 792 | + struct hwrm_cfa_decap_filter_alloc_output *resp; |
---|
565 | 793 | struct ip_tunnel_key *tun_key = &flow->tun_key; |
---|
566 | 794 | u32 enables = 0; |
---|
567 | 795 | int rc; |
---|
.. | .. |
---|
614 | 842 | |
---|
615 | 843 | mutex_lock(&bp->hwrm_cmd_lock); |
---|
616 | 844 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
617 | | - if (!rc) |
---|
| 845 | + if (!rc) { |
---|
| 846 | + resp = bnxt_get_hwrm_resp_addr(bp, &req); |
---|
618 | 847 | *decap_filter_handle = resp->decap_filter_id; |
---|
619 | | - else |
---|
620 | | - netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); |
---|
| 848 | + } else { |
---|
| 849 | + netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); |
---|
| 850 | + } |
---|
621 | 851 | mutex_unlock(&bp->hwrm_cmd_lock); |
---|
622 | 852 | |
---|
623 | | - if (rc) |
---|
624 | | - rc = -EIO; |
---|
625 | 853 | return rc; |
---|
626 | 854 | } |
---|
627 | 855 | |
---|
.. | .. |
---|
636 | 864 | |
---|
637 | 865 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
638 | 866 | if (rc) |
---|
639 | | - netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); |
---|
| 867 | + netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); |
---|
640 | 868 | |
---|
641 | | - if (rc) |
---|
642 | | - rc = -EIO; |
---|
643 | 869 | return rc; |
---|
644 | 870 | } |
---|
645 | 871 | |
---|
.. | .. |
---|
648 | 874 | struct bnxt_tc_l2_key *l2_info, |
---|
649 | 875 | __le32 *encap_record_handle) |
---|
650 | 876 | { |
---|
651 | | - struct hwrm_cfa_encap_record_alloc_output *resp = |
---|
652 | | - bp->hwrm_cmd_resp_addr; |
---|
653 | 877 | struct hwrm_cfa_encap_record_alloc_input req = { 0 }; |
---|
| 878 | + struct hwrm_cfa_encap_record_alloc_output *resp; |
---|
654 | 879 | struct hwrm_cfa_encap_data_vxlan *encap = |
---|
655 | 880 | (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data; |
---|
656 | 881 | struct hwrm_vxlan_ipv4_hdr *encap_ipv4 = |
---|
.. | .. |
---|
682 | 907 | |
---|
683 | 908 | mutex_lock(&bp->hwrm_cmd_lock); |
---|
684 | 909 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
685 | | - if (!rc) |
---|
| 910 | + if (!rc) { |
---|
| 911 | + resp = bnxt_get_hwrm_resp_addr(bp, &req); |
---|
686 | 912 | *encap_record_handle = resp->encap_record_id; |
---|
687 | | - else |
---|
688 | | - netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); |
---|
| 913 | + } else { |
---|
| 914 | + netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); |
---|
| 915 | + } |
---|
689 | 916 | mutex_unlock(&bp->hwrm_cmd_lock); |
---|
690 | 917 | |
---|
691 | | - if (rc) |
---|
692 | | - rc = -EIO; |
---|
693 | 918 | return rc; |
---|
694 | 919 | } |
---|
695 | 920 | |
---|
.. | .. |
---|
704 | 929 | |
---|
705 | 930 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
706 | 931 | if (rc) |
---|
707 | | - netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); |
---|
| 932 | + netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); |
---|
708 | 933 | |
---|
709 | | - if (rc) |
---|
710 | | - rc = -EIO; |
---|
711 | 934 | return rc; |
---|
712 | 935 | } |
---|
713 | 936 | |
---|
.. | .. |
---|
725 | 948 | tc_info->l2_ht_params); |
---|
726 | 949 | if (rc) |
---|
727 | 950 | netdev_err(bp->dev, |
---|
728 | | - "Error: %s: rhashtable_remove_fast: %d", |
---|
| 951 | + "Error: %s: rhashtable_remove_fast: %d\n", |
---|
729 | 952 | __func__, rc); |
---|
730 | 953 | kfree_rcu(l2_node, rcu); |
---|
731 | 954 | } |
---|
.. | .. |
---|
754 | 977 | if (rc) { |
---|
755 | 978 | kfree_rcu(l2_node, rcu); |
---|
756 | 979 | netdev_err(bp->dev, |
---|
757 | | - "Error: %s: rhashtable_insert_fast: %d", |
---|
| 980 | + "Error: %s: rhashtable_insert_fast: %d\n", |
---|
758 | 981 | __func__, rc); |
---|
759 | 982 | return NULL; |
---|
760 | 983 | } |
---|
.. | .. |
---|
813 | 1036 | if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) && |
---|
814 | 1037 | (flow->l4_key.ip_proto != IPPROTO_TCP && |
---|
815 | 1038 | flow->l4_key.ip_proto != IPPROTO_UDP)) { |
---|
816 | | - netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports", |
---|
| 1039 | + netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports\n", |
---|
817 | 1040 | flow->l4_key.ip_proto); |
---|
818 | 1041 | return false; |
---|
819 | 1042 | } |
---|
.. | .. |
---|
870 | 1093 | rc = rhashtable_remove_fast(tunnel_table, &tunnel_node->node, |
---|
871 | 1094 | *ht_params); |
---|
872 | 1095 | if (rc) { |
---|
873 | | - netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc); |
---|
| 1096 | + netdev_err(bp->dev, "rhashtable_remove_fast rc=%d\n", rc); |
---|
874 | 1097 | rc = -1; |
---|
875 | 1098 | } |
---|
876 | 1099 | kfree_rcu(tunnel_node, rcu); |
---|
.. | .. |
---|
911 | 1134 | tunnel_node->refcount++; |
---|
912 | 1135 | return tunnel_node; |
---|
913 | 1136 | err: |
---|
914 | | - netdev_info(bp->dev, "error rc=%d", rc); |
---|
| 1137 | + netdev_info(bp->dev, "error rc=%d\n", rc); |
---|
915 | 1138 | return NULL; |
---|
916 | 1139 | } |
---|
917 | 1140 | |
---|
.. | .. |
---|
969 | 1192 | &decap_l2_node->node, |
---|
970 | 1193 | tc_info->decap_l2_ht_params); |
---|
971 | 1194 | if (rc) |
---|
972 | | - netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc); |
---|
| 1195 | + netdev_err(bp->dev, "rhashtable_remove_fast rc=%d\n", rc); |
---|
973 | 1196 | kfree_rcu(decap_l2_node, rcu); |
---|
974 | 1197 | } |
---|
975 | 1198 | } |
---|
.. | .. |
---|
1009 | 1232 | |
---|
1010 | 1233 | rt = ip_route_output_key(dev_net(real_dst_dev), &flow); |
---|
1011 | 1234 | if (IS_ERR(rt)) { |
---|
1012 | | - netdev_info(bp->dev, "no route to %pI4b", &flow.daddr); |
---|
| 1235 | + netdev_info(bp->dev, "no route to %pI4b\n", &flow.daddr); |
---|
1013 | 1236 | return -EOPNOTSUPP; |
---|
1014 | 1237 | } |
---|
1015 | 1238 | |
---|
.. | .. |
---|
1023 | 1246 | |
---|
1024 | 1247 | if (vlan->real_dev != real_dst_dev) { |
---|
1025 | 1248 | netdev_info(bp->dev, |
---|
1026 | | - "dst_dev(%s) doesn't use PF-if(%s)", |
---|
| 1249 | + "dst_dev(%s) doesn't use PF-if(%s)\n", |
---|
1027 | 1250 | netdev_name(dst_dev), |
---|
1028 | 1251 | netdev_name(real_dst_dev)); |
---|
1029 | 1252 | rc = -EOPNOTSUPP; |
---|
.. | .. |
---|
1035 | 1258 | #endif |
---|
1036 | 1259 | } else if (dst_dev != real_dst_dev) { |
---|
1037 | 1260 | netdev_info(bp->dev, |
---|
1038 | | - "dst_dev(%s) for %pI4b is not PF-if(%s)", |
---|
| 1261 | + "dst_dev(%s) for %pI4b is not PF-if(%s)\n", |
---|
1039 | 1262 | netdev_name(dst_dev), &flow.daddr, |
---|
1040 | 1263 | netdev_name(real_dst_dev)); |
---|
1041 | 1264 | rc = -EOPNOTSUPP; |
---|
.. | .. |
---|
1044 | 1267 | |
---|
1045 | 1268 | nbr = dst_neigh_lookup(&rt->dst, &flow.daddr); |
---|
1046 | 1269 | if (!nbr) { |
---|
1047 | | - netdev_info(bp->dev, "can't lookup neighbor for %pI4b", |
---|
| 1270 | + netdev_info(bp->dev, "can't lookup neighbor for %pI4b\n", |
---|
1048 | 1271 | &flow.daddr); |
---|
1049 | 1272 | rc = -EOPNOTSUPP; |
---|
1050 | 1273 | goto put_rt; |
---|
.. | .. |
---|
1239 | 1462 | int rc; |
---|
1240 | 1463 | |
---|
1241 | 1464 | /* send HWRM cmd to free the flow-id */ |
---|
1242 | | - bnxt_hwrm_cfa_flow_free(bp, flow_node->flow_handle); |
---|
| 1465 | + bnxt_hwrm_cfa_flow_free(bp, flow_node); |
---|
1243 | 1466 | |
---|
1244 | 1467 | mutex_lock(&tc_info->lock); |
---|
1245 | 1468 | |
---|
.. | .. |
---|
1254 | 1477 | rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node, |
---|
1255 | 1478 | tc_info->flow_ht_params); |
---|
1256 | 1479 | if (rc) |
---|
1257 | | - netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d", |
---|
| 1480 | + netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d\n", |
---|
1258 | 1481 | __func__, rc); |
---|
1259 | 1482 | |
---|
1260 | 1483 | kfree_rcu(flow_node, rcu); |
---|
1261 | 1484 | return 0; |
---|
| 1485 | +} |
---|
| 1486 | + |
---|
| 1487 | +static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow, |
---|
| 1488 | + u16 src_fid) |
---|
| 1489 | +{ |
---|
| 1490 | + flow->l2_key.dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX; |
---|
1262 | 1491 | } |
---|
1263 | 1492 | |
---|
1264 | 1493 | static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow, |
---|
.. | .. |
---|
1284 | 1513 | * The hash-tables are already protected by the rhashtable API. |
---|
1285 | 1514 | */ |
---|
1286 | 1515 | static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, |
---|
1287 | | - struct tc_cls_flower_offload *tc_flow_cmd) |
---|
| 1516 | + struct flow_cls_offload *tc_flow_cmd) |
---|
1288 | 1517 | { |
---|
1289 | 1518 | struct bnxt_tc_flow_node *new_node, *old_node; |
---|
1290 | 1519 | struct bnxt_tc_info *tc_info = bp->tc_info; |
---|
.. | .. |
---|
1307 | 1536 | goto free_node; |
---|
1308 | 1537 | |
---|
1309 | 1538 | bnxt_tc_set_src_fid(bp, flow, src_fid); |
---|
| 1539 | + bnxt_tc_set_flow_dir(bp, flow, flow->src_fid); |
---|
1310 | 1540 | |
---|
1311 | 1541 | if (!bnxt_tc_can_offload(bp, flow)) { |
---|
1312 | | - rc = -ENOSPC; |
---|
1313 | | - goto free_node; |
---|
| 1542 | + rc = -EOPNOTSUPP; |
---|
| 1543 | + kfree_rcu(new_node, rcu); |
---|
| 1544 | + return rc; |
---|
1314 | 1545 | } |
---|
1315 | 1546 | |
---|
1316 | 1547 | /* If a flow exists with the same cookie, delete it */ |
---|
.. | .. |
---|
1335 | 1566 | |
---|
1336 | 1567 | /* send HWRM cmd to alloc the flow */ |
---|
1337 | 1568 | rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle, |
---|
1338 | | - tunnel_handle, &new_node->flow_handle); |
---|
| 1569 | + tunnel_handle, new_node); |
---|
1339 | 1570 | if (rc) |
---|
1340 | 1571 | goto put_tunnel; |
---|
1341 | 1572 | |
---|
.. | .. |
---|
1351 | 1582 | return 0; |
---|
1352 | 1583 | |
---|
1353 | 1584 | hwrm_flow_free: |
---|
1354 | | - bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle); |
---|
| 1585 | + bnxt_hwrm_cfa_flow_free(bp, new_node); |
---|
1355 | 1586 | put_tunnel: |
---|
1356 | 1587 | bnxt_tc_put_tunnel_handle(bp, flow, new_node); |
---|
1357 | 1588 | put_l2: |
---|
.. | .. |
---|
1361 | 1592 | free_node: |
---|
1362 | 1593 | kfree_rcu(new_node, rcu); |
---|
1363 | 1594 | done: |
---|
1364 | | - netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d", |
---|
| 1595 | + netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d\n", |
---|
1365 | 1596 | __func__, tc_flow_cmd->cookie, rc); |
---|
1366 | 1597 | return rc; |
---|
1367 | 1598 | } |
---|
1368 | 1599 | |
---|
1369 | 1600 | static int bnxt_tc_del_flow(struct bnxt *bp, |
---|
1370 | | - struct tc_cls_flower_offload *tc_flow_cmd) |
---|
| 1601 | + struct flow_cls_offload *tc_flow_cmd) |
---|
1371 | 1602 | { |
---|
1372 | 1603 | struct bnxt_tc_info *tc_info = bp->tc_info; |
---|
1373 | 1604 | struct bnxt_tc_flow_node *flow_node; |
---|
.. | .. |
---|
1382 | 1613 | } |
---|
1383 | 1614 | |
---|
1384 | 1615 | static int bnxt_tc_get_flow_stats(struct bnxt *bp, |
---|
1385 | | - struct tc_cls_flower_offload *tc_flow_cmd) |
---|
| 1616 | + struct flow_cls_offload *tc_flow_cmd) |
---|
1386 | 1617 | { |
---|
1387 | 1618 | struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats; |
---|
1388 | 1619 | struct bnxt_tc_info *tc_info = bp->tc_info; |
---|
.. | .. |
---|
1407 | 1638 | lastused = flow->lastused; |
---|
1408 | 1639 | spin_unlock(&flow->stats_lock); |
---|
1409 | 1640 | |
---|
1410 | | - tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets, |
---|
1411 | | - lastused); |
---|
| 1641 | + flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets, 0, |
---|
| 1642 | + lastused, FLOW_ACTION_HW_STATS_DELAYED); |
---|
1412 | 1643 | return 0; |
---|
| 1644 | +} |
---|
| 1645 | + |
---|
| 1646 | +static void bnxt_fill_cfa_stats_req(struct bnxt *bp, |
---|
| 1647 | + struct bnxt_tc_flow_node *flow_node, |
---|
| 1648 | + __le16 *flow_handle, __le32 *flow_id) |
---|
| 1649 | +{ |
---|
| 1650 | + u16 handle; |
---|
| 1651 | + |
---|
| 1652 | + if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) { |
---|
| 1653 | + *flow_id = flow_node->flow_id; |
---|
| 1654 | + |
---|
| 1655 | + /* If flow_id is used to fetch flow stats then: |
---|
| 1656 | + * 1. lower 12 bits of flow_handle must be set to all 1s. |
---|
| 1657 | + * 2. 15th bit of flow_handle must specify the flow |
---|
| 1658 | + * direction (TX/RX). |
---|
| 1659 | + */ |
---|
| 1660 | + if (flow_node->flow.l2_key.dir == BNXT_DIR_RX) |
---|
| 1661 | + handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX | |
---|
| 1662 | + CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK; |
---|
| 1663 | + else |
---|
| 1664 | + handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK; |
---|
| 1665 | + |
---|
| 1666 | + *flow_handle = cpu_to_le16(handle); |
---|
| 1667 | + } else { |
---|
| 1668 | + *flow_handle = flow_node->flow_handle; |
---|
| 1669 | + } |
---|
1413 | 1670 | } |
---|
1414 | 1671 | |
---|
1415 | 1672 | static int |
---|
1416 | 1673 | bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, |
---|
1417 | 1674 | struct bnxt_tc_stats_batch stats_batch[]) |
---|
1418 | 1675 | { |
---|
1419 | | - struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr; |
---|
1420 | 1676 | struct hwrm_cfa_flow_stats_input req = { 0 }; |
---|
| 1677 | + struct hwrm_cfa_flow_stats_output *resp; |
---|
1421 | 1678 | __le16 *req_flow_handles = &req.flow_handle_0; |
---|
| 1679 | + __le32 *req_flow_ids = &req.flow_id_0; |
---|
1422 | 1680 | int rc, i; |
---|
1423 | 1681 | |
---|
1424 | 1682 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1); |
---|
.. | .. |
---|
1426 | 1684 | for (i = 0; i < num_flows; i++) { |
---|
1427 | 1685 | struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node; |
---|
1428 | 1686 | |
---|
1429 | | - req_flow_handles[i] = flow_node->flow_handle; |
---|
| 1687 | + bnxt_fill_cfa_stats_req(bp, flow_node, |
---|
| 1688 | + &req_flow_handles[i], &req_flow_ids[i]); |
---|
1430 | 1689 | } |
---|
1431 | 1690 | |
---|
1432 | 1691 | mutex_lock(&bp->hwrm_cmd_lock); |
---|
1433 | 1692 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
---|
1434 | 1693 | if (!rc) { |
---|
1435 | | - __le64 *resp_packets = &resp->packet_0; |
---|
1436 | | - __le64 *resp_bytes = &resp->byte_0; |
---|
| 1694 | + __le64 *resp_packets; |
---|
| 1695 | + __le64 *resp_bytes; |
---|
| 1696 | + |
---|
| 1697 | + resp = bnxt_get_hwrm_resp_addr(bp, &req); |
---|
| 1698 | + resp_packets = &resp->packet_0; |
---|
| 1699 | + resp_bytes = &resp->byte_0; |
---|
1437 | 1700 | |
---|
1438 | 1701 | for (i = 0; i < num_flows; i++) { |
---|
1439 | 1702 | stats_batch[i].hw_stats.packets = |
---|
.. | .. |
---|
1442 | 1705 | le64_to_cpu(resp_bytes[i]); |
---|
1443 | 1706 | } |
---|
1444 | 1707 | } else { |
---|
1445 | | - netdev_info(bp->dev, "error rc=%d", rc); |
---|
| 1708 | + netdev_info(bp->dev, "error rc=%d\n", rc); |
---|
1446 | 1709 | } |
---|
1447 | 1710 | mutex_unlock(&bp->hwrm_cmd_lock); |
---|
1448 | 1711 | |
---|
1449 | | - if (rc) |
---|
1450 | | - rc = -EIO; |
---|
1451 | 1712 | return rc; |
---|
1452 | 1713 | } |
---|
1453 | 1714 | |
---|
.. | .. |
---|
1572 | 1833 | } |
---|
1573 | 1834 | |
---|
1574 | 1835 | int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, |
---|
1575 | | - struct tc_cls_flower_offload *cls_flower) |
---|
| 1836 | + struct flow_cls_offload *cls_flower) |
---|
1576 | 1837 | { |
---|
1577 | 1838 | switch (cls_flower->command) { |
---|
1578 | | - case TC_CLSFLOWER_REPLACE: |
---|
| 1839 | + case FLOW_CLS_REPLACE: |
---|
1579 | 1840 | return bnxt_tc_add_flow(bp, src_fid, cls_flower); |
---|
1580 | | - case TC_CLSFLOWER_DESTROY: |
---|
| 1841 | + case FLOW_CLS_DESTROY: |
---|
1581 | 1842 | return bnxt_tc_del_flow(bp, cls_flower); |
---|
1582 | | - case TC_CLSFLOWER_STATS: |
---|
| 1843 | + case FLOW_CLS_STATS: |
---|
1583 | 1844 | return bnxt_tc_get_flow_stats(bp, cls_flower); |
---|
1584 | 1845 | default: |
---|
1585 | 1846 | return -EOPNOTSUPP; |
---|
1586 | 1847 | } |
---|
| 1848 | +} |
---|
| 1849 | + |
---|
| 1850 | +static int bnxt_tc_setup_indr_block_cb(enum tc_setup_type type, |
---|
| 1851 | + void *type_data, void *cb_priv) |
---|
| 1852 | +{ |
---|
| 1853 | + struct bnxt_flower_indr_block_cb_priv *priv = cb_priv; |
---|
| 1854 | + struct flow_cls_offload *flower = type_data; |
---|
| 1855 | + struct bnxt *bp = priv->bp; |
---|
| 1856 | + |
---|
| 1857 | + if (!tc_cls_can_offload_and_chain0(bp->dev, type_data)) |
---|
| 1858 | + return -EOPNOTSUPP; |
---|
| 1859 | + |
---|
| 1860 | + switch (type) { |
---|
| 1861 | + case TC_SETUP_CLSFLOWER: |
---|
| 1862 | + return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, flower); |
---|
| 1863 | + default: |
---|
| 1864 | + return -EOPNOTSUPP; |
---|
| 1865 | + } |
---|
| 1866 | +} |
---|
| 1867 | + |
---|
| 1868 | +static struct bnxt_flower_indr_block_cb_priv * |
---|
| 1869 | +bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct net_device *netdev) |
---|
| 1870 | +{ |
---|
| 1871 | + struct bnxt_flower_indr_block_cb_priv *cb_priv; |
---|
| 1872 | + |
---|
| 1873 | + list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list) |
---|
| 1874 | + if (cb_priv->tunnel_netdev == netdev) |
---|
| 1875 | + return cb_priv; |
---|
| 1876 | + |
---|
| 1877 | + return NULL; |
---|
| 1878 | +} |
---|
| 1879 | + |
---|
| 1880 | +static void bnxt_tc_setup_indr_rel(void *cb_priv) |
---|
| 1881 | +{ |
---|
| 1882 | + struct bnxt_flower_indr_block_cb_priv *priv = cb_priv; |
---|
| 1883 | + |
---|
| 1884 | + list_del(&priv->list); |
---|
| 1885 | + kfree(priv); |
---|
| 1886 | +} |
---|
| 1887 | + |
---|
| 1888 | +static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct Qdisc *sch, struct bnxt *bp, |
---|
| 1889 | + struct flow_block_offload *f, void *data, |
---|
| 1890 | + void (*cleanup)(struct flow_block_cb *block_cb)) |
---|
| 1891 | +{ |
---|
| 1892 | + struct bnxt_flower_indr_block_cb_priv *cb_priv; |
---|
| 1893 | + struct flow_block_cb *block_cb; |
---|
| 1894 | + |
---|
| 1895 | + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
---|
| 1896 | + return -EOPNOTSUPP; |
---|
| 1897 | + |
---|
| 1898 | + switch (f->command) { |
---|
| 1899 | + case FLOW_BLOCK_BIND: |
---|
| 1900 | + cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL); |
---|
| 1901 | + if (!cb_priv) |
---|
| 1902 | + return -ENOMEM; |
---|
| 1903 | + |
---|
| 1904 | + cb_priv->tunnel_netdev = netdev; |
---|
| 1905 | + cb_priv->bp = bp; |
---|
| 1906 | + list_add(&cb_priv->list, &bp->tc_indr_block_list); |
---|
| 1907 | + |
---|
| 1908 | + block_cb = flow_indr_block_cb_alloc(bnxt_tc_setup_indr_block_cb, |
---|
| 1909 | + cb_priv, cb_priv, |
---|
| 1910 | + bnxt_tc_setup_indr_rel, f, |
---|
| 1911 | + netdev, sch, data, bp, cleanup); |
---|
| 1912 | + if (IS_ERR(block_cb)) { |
---|
| 1913 | + list_del(&cb_priv->list); |
---|
| 1914 | + kfree(cb_priv); |
---|
| 1915 | + return PTR_ERR(block_cb); |
---|
| 1916 | + } |
---|
| 1917 | + |
---|
| 1918 | + flow_block_cb_add(block_cb, f); |
---|
| 1919 | + list_add_tail(&block_cb->driver_list, &bnxt_block_cb_list); |
---|
| 1920 | + break; |
---|
| 1921 | + case FLOW_BLOCK_UNBIND: |
---|
| 1922 | + cb_priv = bnxt_tc_indr_block_cb_lookup(bp, netdev); |
---|
| 1923 | + if (!cb_priv) |
---|
| 1924 | + return -ENOENT; |
---|
| 1925 | + |
---|
| 1926 | + block_cb = flow_block_cb_lookup(f->block, |
---|
| 1927 | + bnxt_tc_setup_indr_block_cb, |
---|
| 1928 | + cb_priv); |
---|
| 1929 | + if (!block_cb) |
---|
| 1930 | + return -ENOENT; |
---|
| 1931 | + |
---|
| 1932 | + flow_indr_block_cb_remove(block_cb, f); |
---|
| 1933 | + list_del(&block_cb->driver_list); |
---|
| 1934 | + break; |
---|
| 1935 | + default: |
---|
| 1936 | + return -EOPNOTSUPP; |
---|
| 1937 | + } |
---|
| 1938 | + return 0; |
---|
| 1939 | +} |
---|
| 1940 | + |
---|
| 1941 | +static bool bnxt_is_netdev_indr_offload(struct net_device *netdev) |
---|
| 1942 | +{ |
---|
| 1943 | + return netif_is_vxlan(netdev); |
---|
| 1944 | +} |
---|
| 1945 | + |
---|
| 1946 | +static int bnxt_tc_setup_indr_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv, |
---|
| 1947 | + enum tc_setup_type type, void *type_data, |
---|
| 1948 | + void *data, |
---|
| 1949 | + void (*cleanup)(struct flow_block_cb *block_cb)) |
---|
| 1950 | +{ |
---|
| 1951 | + if (!bnxt_is_netdev_indr_offload(netdev)) |
---|
| 1952 | + return -EOPNOTSUPP; |
---|
| 1953 | + |
---|
| 1954 | + switch (type) { |
---|
| 1955 | + case TC_SETUP_BLOCK: |
---|
| 1956 | + return bnxt_tc_setup_indr_block(netdev, sch, cb_priv, type_data, data, cleanup); |
---|
| 1957 | + default: |
---|
| 1958 | + break; |
---|
| 1959 | + } |
---|
| 1960 | + |
---|
| 1961 | + return -EOPNOTSUPP; |
---|
1587 | 1962 | } |
---|
1588 | 1963 | |
---|
1589 | 1964 | static const struct rhashtable_params bnxt_tc_flow_ht_params = { |
---|
.. | .. |
---|
1622 | 1997 | struct bnxt_tc_info *tc_info; |
---|
1623 | 1998 | int rc; |
---|
1624 | 1999 | |
---|
1625 | | - if (bp->hwrm_spec_code < 0x10803) { |
---|
1626 | | - netdev_warn(bp->dev, |
---|
1627 | | - "Firmware does not support TC flower offload.\n"); |
---|
1628 | | - return -ENOTSUPP; |
---|
1629 | | - } |
---|
| 2000 | + if (bp->hwrm_spec_code < 0x10803) |
---|
| 2001 | + return 0; |
---|
1630 | 2002 | |
---|
1631 | 2003 | tc_info = kzalloc(sizeof(*tc_info), GFP_KERNEL); |
---|
1632 | 2004 | if (!tc_info) |
---|
.. | .. |
---|
1669 | 2041 | bp->dev->hw_features |= NETIF_F_HW_TC; |
---|
1670 | 2042 | bp->dev->features |= NETIF_F_HW_TC; |
---|
1671 | 2043 | bp->tc_info = tc_info; |
---|
1672 | | - return 0; |
---|
| 2044 | + |
---|
| 2045 | + /* init indirect block notifications */ |
---|
| 2046 | + INIT_LIST_HEAD(&bp->tc_indr_block_list); |
---|
| 2047 | + |
---|
| 2048 | + rc = flow_indr_dev_register(bnxt_tc_setup_indr_cb, bp); |
---|
| 2049 | + if (!rc) |
---|
| 2050 | + return 0; |
---|
| 2051 | + |
---|
| 2052 | + rhashtable_destroy(&tc_info->encap_table); |
---|
1673 | 2053 | |
---|
1674 | 2054 | destroy_decap_table: |
---|
1675 | 2055 | rhashtable_destroy(&tc_info->decap_table); |
---|
.. | .. |
---|
1691 | 2071 | if (!bnxt_tc_flower_enabled(bp)) |
---|
1692 | 2072 | return; |
---|
1693 | 2073 | |
---|
| 2074 | + flow_indr_dev_unregister(bnxt_tc_setup_indr_cb, bp, |
---|
| 2075 | + bnxt_tc_setup_indr_rel); |
---|
1694 | 2076 | rhashtable_destroy(&tc_info->flow_table); |
---|
1695 | 2077 | rhashtable_destroy(&tc_info->l2_table); |
---|
1696 | 2078 | rhashtable_destroy(&tc_info->decap_l2_table); |
---|