.. | .. |
---|
1 | | -/* |
---|
2 | | - * Copyright (C) 2017 Netronome Systems, Inc. |
---|
3 | | - * |
---|
4 | | - * This software is dual licensed under the GNU General License Version 2, |
---|
5 | | - * June 1991 as shown in the file COPYING in the top-level directory of this |
---|
6 | | - * source tree or the BSD 2-Clause License provided below. You have the |
---|
7 | | - * option to license this software under the complete terms of either license. |
---|
8 | | - * |
---|
9 | | - * The BSD 2-Clause License: |
---|
10 | | - * |
---|
11 | | - * Redistribution and use in source and binary forms, with or |
---|
12 | | - * without modification, are permitted provided that the following |
---|
13 | | - * conditions are met: |
---|
14 | | - * |
---|
15 | | - * 1. Redistributions of source code must retain the above |
---|
16 | | - * copyright notice, this list of conditions and the following |
---|
17 | | - * disclaimer. |
---|
18 | | - * |
---|
19 | | - * 2. Redistributions in binary form must reproduce the above |
---|
20 | | - * copyright notice, this list of conditions and the following |
---|
21 | | - * disclaimer in the documentation and/or other materials |
---|
22 | | - * provided with the distribution. |
---|
23 | | - * |
---|
24 | | - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
---|
25 | | - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
---|
26 | | - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
---|
27 | | - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
---|
28 | | - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
---|
29 | | - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
---|
30 | | - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
---|
31 | | - * SOFTWARE. |
---|
32 | | - */ |
---|
| 1 | +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) |
---|
| 2 | +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ |
---|
33 | 3 | |
---|
34 | 4 | #include <linux/etherdevice.h> |
---|
35 | 5 | #include <linux/inetdevice.h> |
---|
.. | .. |
---|
44 | 14 | #include "../nfp_net.h" |
---|
45 | 15 | |
---|
46 | 16 | #define NFP_FL_MAX_ROUTES 32 |
---|
| 17 | + |
---|
| 18 | +#define NFP_TUN_PRE_TUN_RULE_LIMIT 32 |
---|
| 19 | +#define NFP_TUN_PRE_TUN_RULE_DEL BIT(0) |
---|
| 20 | +#define NFP_TUN_PRE_TUN_IDX_BIT BIT(3) |
---|
| 21 | +#define NFP_TUN_PRE_TUN_IPV6_BIT BIT(7) |
---|
| 22 | + |
---|
| 23 | +/** |
---|
| 24 | + * struct nfp_tun_pre_run_rule - rule matched before decap |
---|
| 25 | + * @flags: options for the rule offset |
---|
| 26 | + * @port_idx: index of destination MAC address for the rule |
---|
| 27 | + * @vlan_tci: VLAN info associated with MAC |
---|
| 28 | + * @host_ctx_id: stats context of rule to update |
---|
| 29 | + */ |
---|
| 30 | +struct nfp_tun_pre_tun_rule { |
---|
| 31 | + __be32 flags; |
---|
| 32 | + __be16 port_idx; |
---|
| 33 | + __be16 vlan_tci; |
---|
| 34 | + __be32 host_ctx_id; |
---|
| 35 | +}; |
---|
47 | 36 | |
---|
48 | 37 | /** |
---|
49 | 38 | * struct nfp_tun_active_tuns - periodic message of active tunnels |
---|
.. | .. |
---|
67 | 56 | }; |
---|
68 | 57 | |
---|
69 | 58 | /** |
---|
| 59 | + * struct nfp_tun_active_tuns_v6 - periodic message of active IPv6 tunnels |
---|
| 60 | + * @seq: sequence number of the message |
---|
| 61 | + * @count: number of tunnels report in message |
---|
| 62 | + * @flags: options part of the request |
---|
| 63 | + * @tun_info.ipv6: dest IPv6 address of active route |
---|
| 64 | + * @tun_info.egress_port: port the encapsulated packet egressed |
---|
| 65 | + * @tun_info.extra: reserved for future use |
---|
| 66 | + * @tun_info: tunnels that have sent traffic in reported period |
---|
| 67 | + */ |
---|
| 68 | +struct nfp_tun_active_tuns_v6 { |
---|
| 69 | + __be32 seq; |
---|
| 70 | + __be32 count; |
---|
| 71 | + __be32 flags; |
---|
| 72 | + struct route_ip_info_v6 { |
---|
| 73 | + struct in6_addr ipv6; |
---|
| 74 | + __be32 egress_port; |
---|
| 75 | + __be32 extra[2]; |
---|
| 76 | + } tun_info[]; |
---|
| 77 | +}; |
---|
| 78 | + |
---|
| 79 | +/** |
---|
70 | 80 | * struct nfp_tun_neigh - neighbour/route entry on the NFP |
---|
71 | 81 | * @dst_ipv4: destination IPv4 address |
---|
72 | 82 | * @src_ipv4: source IPv4 address |
---|
.. | .. |
---|
77 | 87 | struct nfp_tun_neigh { |
---|
78 | 88 | __be32 dst_ipv4; |
---|
79 | 89 | __be32 src_ipv4; |
---|
| 90 | + u8 dst_addr[ETH_ALEN]; |
---|
| 91 | + u8 src_addr[ETH_ALEN]; |
---|
| 92 | + __be32 port_id; |
---|
| 93 | +}; |
---|
| 94 | + |
---|
| 95 | +/** |
---|
| 96 | + * struct nfp_tun_neigh_v6 - neighbour/route entry on the NFP |
---|
| 97 | + * @dst_ipv6: destination IPv6 address |
---|
| 98 | + * @src_ipv6: source IPv6 address |
---|
| 99 | + * @dst_addr: destination MAC address |
---|
| 100 | + * @src_addr: source MAC address |
---|
| 101 | + * @port_id: NFP port to output packet on - associated with source IPv6 |
---|
| 102 | + */ |
---|
| 103 | +struct nfp_tun_neigh_v6 { |
---|
| 104 | + struct in6_addr dst_ipv6; |
---|
| 105 | + struct in6_addr src_ipv6; |
---|
80 | 106 | u8 dst_addr[ETH_ALEN]; |
---|
81 | 107 | u8 src_addr[ETH_ALEN]; |
---|
82 | 108 | __be32 port_id; |
---|
.. | .. |
---|
95 | 121 | }; |
---|
96 | 122 | |
---|
97 | 123 | /** |
---|
98 | | - * struct nfp_ipv4_route_entry - routes that are offloaded to the NFP |
---|
99 | | - * @ipv4_addr: destination of route |
---|
100 | | - * @list: list pointer |
---|
| 124 | + * struct nfp_tun_req_route_ipv6 - NFP requests an IPv6 route/neighbour lookup |
---|
| 125 | + * @ingress_port: ingress port of packet that signalled request |
---|
| 126 | + * @ipv6_addr: destination ipv6 address for route |
---|
101 | 127 | */ |
---|
102 | | -struct nfp_ipv4_route_entry { |
---|
103 | | - __be32 ipv4_addr; |
---|
| 128 | +struct nfp_tun_req_route_ipv6 { |
---|
| 129 | + __be32 ingress_port; |
---|
| 130 | + struct in6_addr ipv6_addr; |
---|
| 131 | +}; |
---|
| 132 | + |
---|
| 133 | +/** |
---|
| 134 | + * struct nfp_offloaded_route - routes that are offloaded to the NFP |
---|
| 135 | + * @list: list pointer |
---|
| 136 | + * @ip_add: destination of route - can be IPv4 or IPv6 |
---|
| 137 | + */ |
---|
| 138 | +struct nfp_offloaded_route { |
---|
104 | 139 | struct list_head list; |
---|
| 140 | + u8 ip_add[]; |
---|
105 | 141 | }; |
---|
106 | 142 | |
---|
107 | 143 | #define NFP_FL_IPV4_ADDRS_MAX 32 |
---|
.. | .. |
---|
128 | 164 | struct list_head list; |
---|
129 | 165 | }; |
---|
130 | 166 | |
---|
131 | | -/** |
---|
132 | | - * struct nfp_tun_mac_addr - configure MAC address of tunnel EP on NFP |
---|
133 | | - * @reserved: reserved for future use |
---|
134 | | - * @count: number of MAC addresses in the message |
---|
135 | | - * @addresses.index: index of MAC address in the lookup table |
---|
136 | | - * @addresses.addr: interface MAC address |
---|
137 | | - * @addresses: series of MACs to offload |
---|
138 | | - */ |
---|
139 | | -struct nfp_tun_mac_addr { |
---|
140 | | - __be16 reserved; |
---|
141 | | - __be16 count; |
---|
142 | | - struct index_mac_addr { |
---|
143 | | - __be16 index; |
---|
144 | | - u8 addr[ETH_ALEN]; |
---|
145 | | - } addresses[]; |
---|
146 | | -}; |
---|
| 167 | +#define NFP_FL_IPV6_ADDRS_MAX 4 |
---|
147 | 168 | |
---|
148 | 169 | /** |
---|
149 | | - * struct nfp_tun_mac_offload_entry - list of MACs to offload |
---|
150 | | - * @index: index of MAC address for offloading |
---|
151 | | - * @addr: interface MAC address |
---|
152 | | - * @list: list pointer |
---|
| 170 | + * struct nfp_tun_ipv6_addr - set the IP address list on the NFP |
---|
| 171 | + * @count: number of IPs populated in the array |
---|
| 172 | + * @ipv6_addr: array of IPV6_ADDRS_MAX 128 bit IPv6 addresses |
---|
153 | 173 | */ |
---|
154 | | -struct nfp_tun_mac_offload_entry { |
---|
| 174 | +struct nfp_tun_ipv6_addr { |
---|
| 175 | + __be32 count; |
---|
| 176 | + struct in6_addr ipv6_addr[NFP_FL_IPV6_ADDRS_MAX]; |
---|
| 177 | +}; |
---|
| 178 | + |
---|
| 179 | +#define NFP_TUN_MAC_OFFLOAD_DEL_FLAG 0x2 |
---|
| 180 | + |
---|
| 181 | +/** |
---|
| 182 | + * struct nfp_tun_mac_addr_offload - configure MAC address of tunnel EP on NFP |
---|
| 183 | + * @flags: MAC address offload options |
---|
| 184 | + * @count: number of MAC addresses in the message (should be 1) |
---|
| 185 | + * @index: index of MAC address in the lookup table |
---|
| 186 | + * @addr: interface MAC address |
---|
| 187 | + */ |
---|
| 188 | +struct nfp_tun_mac_addr_offload { |
---|
| 189 | + __be16 flags; |
---|
| 190 | + __be16 count; |
---|
155 | 191 | __be16 index; |
---|
156 | 192 | u8 addr[ETH_ALEN]; |
---|
157 | | - struct list_head list; |
---|
| 193 | +}; |
---|
| 194 | + |
---|
| 195 | +enum nfp_flower_mac_offload_cmd { |
---|
| 196 | + NFP_TUNNEL_MAC_OFFLOAD_ADD = 0, |
---|
| 197 | + NFP_TUNNEL_MAC_OFFLOAD_DEL = 1, |
---|
| 198 | + NFP_TUNNEL_MAC_OFFLOAD_MOD = 2, |
---|
158 | 199 | }; |
---|
159 | 200 | |
---|
160 | 201 | #define NFP_MAX_MAC_INDEX 0xff |
---|
161 | 202 | |
---|
162 | 203 | /** |
---|
163 | | - * struct nfp_tun_mac_non_nfp_idx - converts non NFP netdev ifindex to 8-bit id |
---|
164 | | - * @ifindex: netdev ifindex of the device |
---|
165 | | - * @index: index of netdevs mac on NFP |
---|
166 | | - * @list: list pointer |
---|
| 204 | + * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC |
---|
| 205 | + * @ht_node: Hashtable entry |
---|
| 206 | + * @addr: Offloaded MAC address |
---|
| 207 | + * @index: Offloaded index for given MAC address |
---|
| 208 | + * @ref_count: Number of devs using this MAC address |
---|
| 209 | + * @repr_list: List of reprs sharing this MAC address |
---|
| 210 | + * @bridge_count: Number of bridge/internal devs with MAC |
---|
167 | 211 | */ |
---|
168 | | -struct nfp_tun_mac_non_nfp_idx { |
---|
169 | | - int ifindex; |
---|
170 | | - u8 index; |
---|
171 | | - struct list_head list; |
---|
| 212 | +struct nfp_tun_offloaded_mac { |
---|
| 213 | + struct rhash_head ht_node; |
---|
| 214 | + u8 addr[ETH_ALEN]; |
---|
| 215 | + u16 index; |
---|
| 216 | + int ref_count; |
---|
| 217 | + struct list_head repr_list; |
---|
| 218 | + int bridge_count; |
---|
| 219 | +}; |
---|
| 220 | + |
---|
| 221 | +static const struct rhashtable_params offloaded_macs_params = { |
---|
| 222 | + .key_offset = offsetof(struct nfp_tun_offloaded_mac, addr), |
---|
| 223 | + .head_offset = offsetof(struct nfp_tun_offloaded_mac, ht_node), |
---|
| 224 | + .key_len = ETH_ALEN, |
---|
| 225 | + .automatic_shrinking = true, |
---|
172 | 226 | }; |
---|
173 | 227 | |
---|
174 | 228 | void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb) |
---|
.. | .. |
---|
188 | 242 | } |
---|
189 | 243 | |
---|
190 | 244 | pay_len = nfp_flower_cmsg_get_data_len(skb); |
---|
191 | | - if (pay_len != sizeof(struct nfp_tun_active_tuns) + |
---|
192 | | - sizeof(struct route_ip_info) * count) { |
---|
| 245 | + if (pay_len != struct_size(payload, tun_info, count)) { |
---|
193 | 246 | nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n"); |
---|
194 | 247 | return; |
---|
195 | 248 | } |
---|
.. | .. |
---|
198 | 251 | for (i = 0; i < count; i++) { |
---|
199 | 252 | ipv4_addr = payload->tun_info[i].ipv4; |
---|
200 | 253 | port = be32_to_cpu(payload->tun_info[i].egress_port); |
---|
201 | | - netdev = nfp_app_repr_get(app, port); |
---|
| 254 | + netdev = nfp_app_dev_get(app, port, NULL); |
---|
202 | 255 | if (!netdev) |
---|
203 | 256 | continue; |
---|
204 | 257 | |
---|
.. | .. |
---|
213 | 266 | rcu_read_unlock(); |
---|
214 | 267 | } |
---|
215 | 268 | |
---|
216 | | -static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev) |
---|
| 269 | +void nfp_tunnel_keep_alive_v6(struct nfp_app *app, struct sk_buff *skb) |
---|
217 | 270 | { |
---|
218 | | - if (!netdev->rtnl_link_ops) |
---|
219 | | - return false; |
---|
220 | | - if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch")) |
---|
221 | | - return true; |
---|
222 | | - if (!strcmp(netdev->rtnl_link_ops->kind, "vxlan")) |
---|
223 | | - return true; |
---|
| 271 | +#if IS_ENABLED(CONFIG_IPV6) |
---|
| 272 | + struct nfp_tun_active_tuns_v6 *payload; |
---|
| 273 | + struct net_device *netdev; |
---|
| 274 | + int count, i, pay_len; |
---|
| 275 | + struct neighbour *n; |
---|
| 276 | + void *ipv6_add; |
---|
| 277 | + u32 port; |
---|
224 | 278 | |
---|
225 | | - return false; |
---|
| 279 | + payload = nfp_flower_cmsg_get_data(skb); |
---|
| 280 | + count = be32_to_cpu(payload->count); |
---|
| 281 | + if (count > NFP_FL_IPV6_ADDRS_MAX) { |
---|
| 282 | + nfp_flower_cmsg_warn(app, "IPv6 tunnel keep-alive request exceeds max routes.\n"); |
---|
| 283 | + return; |
---|
| 284 | + } |
---|
| 285 | + |
---|
| 286 | + pay_len = nfp_flower_cmsg_get_data_len(skb); |
---|
| 287 | + if (pay_len != struct_size(payload, tun_info, count)) { |
---|
| 288 | + nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n"); |
---|
| 289 | + return; |
---|
| 290 | + } |
---|
| 291 | + |
---|
| 292 | + rcu_read_lock(); |
---|
| 293 | + for (i = 0; i < count; i++) { |
---|
| 294 | + ipv6_add = &payload->tun_info[i].ipv6; |
---|
| 295 | + port = be32_to_cpu(payload->tun_info[i].egress_port); |
---|
| 296 | + netdev = nfp_app_dev_get(app, port, NULL); |
---|
| 297 | + if (!netdev) |
---|
| 298 | + continue; |
---|
| 299 | + |
---|
| 300 | + n = neigh_lookup(&nd_tbl, ipv6_add, netdev); |
---|
| 301 | + if (!n) |
---|
| 302 | + continue; |
---|
| 303 | + |
---|
| 304 | + /* Update the used timestamp of neighbour */ |
---|
| 305 | + neigh_event_send(n, NULL); |
---|
| 306 | + neigh_release(n); |
---|
| 307 | + } |
---|
| 308 | + rcu_read_unlock(); |
---|
| 309 | +#endif |
---|
226 | 310 | } |
---|
227 | 311 | |
---|
228 | 312 | static int |
---|
.. | .. |
---|
243 | 327 | return 0; |
---|
244 | 328 | } |
---|
245 | 329 | |
---|
246 | | -static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr) |
---|
| 330 | +static bool |
---|
| 331 | +__nfp_tun_has_route(struct list_head *route_list, spinlock_t *list_lock, |
---|
| 332 | + void *add, int add_len) |
---|
247 | 333 | { |
---|
248 | | - struct nfp_flower_priv *priv = app->priv; |
---|
249 | | - struct nfp_ipv4_route_entry *entry; |
---|
250 | | - struct list_head *ptr, *storage; |
---|
| 334 | + struct nfp_offloaded_route *entry; |
---|
251 | 335 | |
---|
252 | | - spin_lock_bh(&priv->nfp_neigh_off_lock); |
---|
253 | | - list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) { |
---|
254 | | - entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); |
---|
255 | | - if (entry->ipv4_addr == ipv4_addr) { |
---|
256 | | - spin_unlock_bh(&priv->nfp_neigh_off_lock); |
---|
| 336 | + spin_lock_bh(list_lock); |
---|
| 337 | + list_for_each_entry(entry, route_list, list) |
---|
| 338 | + if (!memcmp(entry->ip_add, add, add_len)) { |
---|
| 339 | + spin_unlock_bh(list_lock); |
---|
257 | 340 | return true; |
---|
258 | 341 | } |
---|
259 | | - } |
---|
260 | | - spin_unlock_bh(&priv->nfp_neigh_off_lock); |
---|
| 342 | + spin_unlock_bh(list_lock); |
---|
261 | 343 | return false; |
---|
262 | 344 | } |
---|
263 | 345 | |
---|
264 | | -static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr) |
---|
| 346 | +static int |
---|
| 347 | +__nfp_tun_add_route_to_cache(struct list_head *route_list, |
---|
| 348 | + spinlock_t *list_lock, void *add, int add_len) |
---|
265 | 349 | { |
---|
266 | | - struct nfp_flower_priv *priv = app->priv; |
---|
267 | | - struct nfp_ipv4_route_entry *entry; |
---|
268 | | - struct list_head *ptr, *storage; |
---|
| 350 | + struct nfp_offloaded_route *entry; |
---|
269 | 351 | |
---|
270 | | - spin_lock_bh(&priv->nfp_neigh_off_lock); |
---|
271 | | - list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) { |
---|
272 | | - entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); |
---|
273 | | - if (entry->ipv4_addr == ipv4_addr) { |
---|
274 | | - spin_unlock_bh(&priv->nfp_neigh_off_lock); |
---|
275 | | - return; |
---|
| 352 | + spin_lock_bh(list_lock); |
---|
| 353 | + list_for_each_entry(entry, route_list, list) |
---|
| 354 | + if (!memcmp(entry->ip_add, add, add_len)) { |
---|
| 355 | + spin_unlock_bh(list_lock); |
---|
| 356 | + return 0; |
---|
276 | 357 | } |
---|
277 | | - } |
---|
278 | | - entry = kmalloc(sizeof(*entry), GFP_ATOMIC); |
---|
| 358 | + |
---|
| 359 | + entry = kmalloc(sizeof(*entry) + add_len, GFP_ATOMIC); |
---|
279 | 360 | if (!entry) { |
---|
280 | | - spin_unlock_bh(&priv->nfp_neigh_off_lock); |
---|
281 | | - nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n"); |
---|
282 | | - return; |
---|
| 361 | + spin_unlock_bh(list_lock); |
---|
| 362 | + return -ENOMEM; |
---|
283 | 363 | } |
---|
284 | 364 | |
---|
285 | | - entry->ipv4_addr = ipv4_addr; |
---|
286 | | - list_add_tail(&entry->list, &priv->nfp_neigh_off_list); |
---|
287 | | - spin_unlock_bh(&priv->nfp_neigh_off_lock); |
---|
| 365 | + memcpy(entry->ip_add, add, add_len); |
---|
| 366 | + list_add_tail(&entry->list, route_list); |
---|
| 367 | + spin_unlock_bh(list_lock); |
---|
| 368 | + |
---|
| 369 | + return 0; |
---|
288 | 370 | } |
---|
289 | 371 | |
---|
290 | | -static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr) |
---|
| 372 | +static void |
---|
| 373 | +__nfp_tun_del_route_from_cache(struct list_head *route_list, |
---|
| 374 | + spinlock_t *list_lock, void *add, int add_len) |
---|
291 | 375 | { |
---|
292 | | - struct nfp_flower_priv *priv = app->priv; |
---|
293 | | - struct nfp_ipv4_route_entry *entry; |
---|
294 | | - struct list_head *ptr, *storage; |
---|
| 376 | + struct nfp_offloaded_route *entry; |
---|
295 | 377 | |
---|
296 | | - spin_lock_bh(&priv->nfp_neigh_off_lock); |
---|
297 | | - list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) { |
---|
298 | | - entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); |
---|
299 | | - if (entry->ipv4_addr == ipv4_addr) { |
---|
| 378 | + spin_lock_bh(list_lock); |
---|
| 379 | + list_for_each_entry(entry, route_list, list) |
---|
| 380 | + if (!memcmp(entry->ip_add, add, add_len)) { |
---|
300 | 381 | list_del(&entry->list); |
---|
301 | 382 | kfree(entry); |
---|
302 | 383 | break; |
---|
303 | 384 | } |
---|
304 | | - } |
---|
305 | | - spin_unlock_bh(&priv->nfp_neigh_off_lock); |
---|
| 385 | + spin_unlock_bh(list_lock); |
---|
| 386 | +} |
---|
| 387 | + |
---|
| 388 | +static bool nfp_tun_has_route_v4(struct nfp_app *app, __be32 *ipv4_addr) |
---|
| 389 | +{ |
---|
| 390 | + struct nfp_flower_priv *priv = app->priv; |
---|
| 391 | + |
---|
| 392 | + return __nfp_tun_has_route(&priv->tun.neigh_off_list_v4, |
---|
| 393 | + &priv->tun.neigh_off_lock_v4, ipv4_addr, |
---|
| 394 | + sizeof(*ipv4_addr)); |
---|
| 395 | +} |
---|
| 396 | + |
---|
| 397 | +static bool |
---|
| 398 | +nfp_tun_has_route_v6(struct nfp_app *app, struct in6_addr *ipv6_addr) |
---|
| 399 | +{ |
---|
| 400 | + struct nfp_flower_priv *priv = app->priv; |
---|
| 401 | + |
---|
| 402 | + return __nfp_tun_has_route(&priv->tun.neigh_off_list_v6, |
---|
| 403 | + &priv->tun.neigh_off_lock_v6, ipv6_addr, |
---|
| 404 | + sizeof(*ipv6_addr)); |
---|
306 | 405 | } |
---|
307 | 406 | |
---|
308 | 407 | static void |
---|
309 | | -nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, |
---|
310 | | - struct flowi4 *flow, struct neighbour *neigh, gfp_t flag) |
---|
| 408 | +nfp_tun_add_route_to_cache_v4(struct nfp_app *app, __be32 *ipv4_addr) |
---|
| 409 | +{ |
---|
| 410 | + struct nfp_flower_priv *priv = app->priv; |
---|
| 411 | + |
---|
| 412 | + __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v4, |
---|
| 413 | + &priv->tun.neigh_off_lock_v4, ipv4_addr, |
---|
| 414 | + sizeof(*ipv4_addr)); |
---|
| 415 | +} |
---|
| 416 | + |
---|
| 417 | +static void |
---|
| 418 | +nfp_tun_add_route_to_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr) |
---|
| 419 | +{ |
---|
| 420 | + struct nfp_flower_priv *priv = app->priv; |
---|
| 421 | + |
---|
| 422 | + __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v6, |
---|
| 423 | + &priv->tun.neigh_off_lock_v6, ipv6_addr, |
---|
| 424 | + sizeof(*ipv6_addr)); |
---|
| 425 | +} |
---|
| 426 | + |
---|
| 427 | +static void |
---|
| 428 | +nfp_tun_del_route_from_cache_v4(struct nfp_app *app, __be32 *ipv4_addr) |
---|
| 429 | +{ |
---|
| 430 | + struct nfp_flower_priv *priv = app->priv; |
---|
| 431 | + |
---|
| 432 | + __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v4, |
---|
| 433 | + &priv->tun.neigh_off_lock_v4, ipv4_addr, |
---|
| 434 | + sizeof(*ipv4_addr)); |
---|
| 435 | +} |
---|
| 436 | + |
---|
| 437 | +static void |
---|
| 438 | +nfp_tun_del_route_from_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr) |
---|
| 439 | +{ |
---|
| 440 | + struct nfp_flower_priv *priv = app->priv; |
---|
| 441 | + |
---|
| 442 | + __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v6, |
---|
| 443 | + &priv->tun.neigh_off_lock_v6, ipv6_addr, |
---|
| 444 | + sizeof(*ipv6_addr)); |
---|
| 445 | +} |
---|
| 446 | + |
---|
| 447 | +static void |
---|
| 448 | +nfp_tun_write_neigh_v4(struct net_device *netdev, struct nfp_app *app, |
---|
| 449 | + struct flowi4 *flow, struct neighbour *neigh, gfp_t flag) |
---|
311 | 450 | { |
---|
312 | 451 | struct nfp_tun_neigh payload; |
---|
| 452 | + u32 port_id; |
---|
313 | 453 | |
---|
314 | | - /* Only offload representor IPv4s for now. */ |
---|
315 | | - if (!nfp_netdev_is_nfp_repr(netdev)) |
---|
| 454 | + port_id = nfp_flower_get_port_id_from_netdev(app, netdev); |
---|
| 455 | + if (!port_id) |
---|
316 | 456 | return; |
---|
317 | 457 | |
---|
318 | 458 | memset(&payload, 0, sizeof(struct nfp_tun_neigh)); |
---|
.. | .. |
---|
320 | 460 | |
---|
321 | 461 | /* If entry has expired send dst IP with all other fields 0. */ |
---|
322 | 462 | if (!(neigh->nud_state & NUD_VALID) || neigh->dead) { |
---|
323 | | - nfp_tun_del_route_from_cache(app, payload.dst_ipv4); |
---|
| 463 | + nfp_tun_del_route_from_cache_v4(app, &payload.dst_ipv4); |
---|
324 | 464 | /* Trigger ARP to verify invalid neighbour state. */ |
---|
325 | 465 | neigh_event_send(neigh, NULL); |
---|
326 | 466 | goto send_msg; |
---|
.. | .. |
---|
330 | 470 | payload.src_ipv4 = flow->saddr; |
---|
331 | 471 | ether_addr_copy(payload.src_addr, netdev->dev_addr); |
---|
332 | 472 | neigh_ha_snapshot(payload.dst_addr, neigh, netdev); |
---|
333 | | - payload.port_id = cpu_to_be32(nfp_repr_get_port_id(netdev)); |
---|
| 473 | + payload.port_id = cpu_to_be32(port_id); |
---|
334 | 474 | /* Add destination of new route to NFP cache. */ |
---|
335 | | - nfp_tun_add_route_to_cache(app, payload.dst_ipv4); |
---|
| 475 | + nfp_tun_add_route_to_cache_v4(app, &payload.dst_ipv4); |
---|
336 | 476 | |
---|
337 | 477 | send_msg: |
---|
338 | 478 | nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH, |
---|
339 | 479 | sizeof(struct nfp_tun_neigh), |
---|
| 480 | + (unsigned char *)&payload, flag); |
---|
| 481 | +} |
---|
| 482 | + |
---|
| 483 | +static void |
---|
| 484 | +nfp_tun_write_neigh_v6(struct net_device *netdev, struct nfp_app *app, |
---|
| 485 | + struct flowi6 *flow, struct neighbour *neigh, gfp_t flag) |
---|
| 486 | +{ |
---|
| 487 | + struct nfp_tun_neigh_v6 payload; |
---|
| 488 | + u32 port_id; |
---|
| 489 | + |
---|
| 490 | + port_id = nfp_flower_get_port_id_from_netdev(app, netdev); |
---|
| 491 | + if (!port_id) |
---|
| 492 | + return; |
---|
| 493 | + |
---|
| 494 | + memset(&payload, 0, sizeof(struct nfp_tun_neigh_v6)); |
---|
| 495 | + payload.dst_ipv6 = flow->daddr; |
---|
| 496 | + |
---|
| 497 | + /* If entry has expired send dst IP with all other fields 0. */ |
---|
| 498 | + if (!(neigh->nud_state & NUD_VALID) || neigh->dead) { |
---|
| 499 | + nfp_tun_del_route_from_cache_v6(app, &payload.dst_ipv6); |
---|
| 500 | + /* Trigger probe to verify invalid neighbour state. */ |
---|
| 501 | + neigh_event_send(neigh, NULL); |
---|
| 502 | + goto send_msg; |
---|
| 503 | + } |
---|
| 504 | + |
---|
| 505 | + /* Have a valid neighbour so populate rest of entry. */ |
---|
| 506 | + payload.src_ipv6 = flow->saddr; |
---|
| 507 | + ether_addr_copy(payload.src_addr, netdev->dev_addr); |
---|
| 508 | + neigh_ha_snapshot(payload.dst_addr, neigh, netdev); |
---|
| 509 | + payload.port_id = cpu_to_be32(port_id); |
---|
| 510 | + /* Add destination of new route to NFP cache. */ |
---|
| 511 | + nfp_tun_add_route_to_cache_v6(app, &payload.dst_ipv6); |
---|
| 512 | + |
---|
| 513 | +send_msg: |
---|
| 514 | + nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6, |
---|
| 515 | + sizeof(struct nfp_tun_neigh_v6), |
---|
340 | 516 | (unsigned char *)&payload, flag); |
---|
341 | 517 | } |
---|
342 | 518 | |
---|
.. | .. |
---|
346 | 522 | { |
---|
347 | 523 | struct nfp_flower_priv *app_priv; |
---|
348 | 524 | struct netevent_redirect *redir; |
---|
349 | | - struct flowi4 flow = {}; |
---|
| 525 | + struct flowi4 flow4 = {}; |
---|
| 526 | + struct flowi6 flow6 = {}; |
---|
350 | 527 | struct neighbour *n; |
---|
351 | 528 | struct nfp_app *app; |
---|
352 | 529 | struct rtable *rt; |
---|
| 530 | + bool ipv6 = false; |
---|
353 | 531 | int err; |
---|
354 | 532 | |
---|
355 | 533 | switch (event) { |
---|
.. | .. |
---|
364 | 542 | return NOTIFY_DONE; |
---|
365 | 543 | } |
---|
366 | 544 | |
---|
367 | | - flow.daddr = *(__be32 *)n->primary_key; |
---|
| 545 | + if (n->tbl->family == AF_INET6) |
---|
| 546 | + ipv6 = true; |
---|
368 | 547 | |
---|
369 | | - /* Only concerned with route changes for representors. */ |
---|
370 | | - if (!nfp_netdev_is_nfp_repr(n->dev)) |
---|
371 | | - return NOTIFY_DONE; |
---|
| 548 | + if (ipv6) |
---|
| 549 | + flow6.daddr = *(struct in6_addr *)n->primary_key; |
---|
| 550 | + else |
---|
| 551 | + flow4.daddr = *(__be32 *)n->primary_key; |
---|
372 | 552 | |
---|
373 | | - app_priv = container_of(nb, struct nfp_flower_priv, nfp_tun_neigh_nb); |
---|
| 553 | + app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb); |
---|
374 | 554 | app = app_priv->app; |
---|
375 | 555 | |
---|
| 556 | + if (!nfp_netdev_is_nfp_repr(n->dev) && |
---|
| 557 | + !nfp_flower_internal_port_can_offload(app, n->dev)) |
---|
| 558 | + return NOTIFY_DONE; |
---|
| 559 | + |
---|
376 | 560 | /* Only concerned with changes to routes already added to NFP. */ |
---|
377 | | - if (!nfp_tun_has_route(app, flow.daddr)) |
---|
| 561 | + if ((ipv6 && !nfp_tun_has_route_v6(app, &flow6.daddr)) || |
---|
| 562 | + (!ipv6 && !nfp_tun_has_route_v4(app, &flow4.daddr))) |
---|
378 | 563 | return NOTIFY_DONE; |
---|
379 | 564 | |
---|
380 | 565 | #if IS_ENABLED(CONFIG_INET) |
---|
381 | | - /* Do a route lookup to populate flow data. */ |
---|
382 | | - rt = ip_route_output_key(dev_net(n->dev), &flow); |
---|
383 | | - err = PTR_ERR_OR_ZERO(rt); |
---|
384 | | - if (err) |
---|
385 | | - return NOTIFY_DONE; |
---|
| 566 | + if (ipv6) { |
---|
| 567 | +#if IS_ENABLED(CONFIG_IPV6) |
---|
| 568 | + struct dst_entry *dst; |
---|
386 | 569 | |
---|
387 | | - ip_rt_put(rt); |
---|
| 570 | + dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(n->dev), NULL, |
---|
| 571 | + &flow6, NULL); |
---|
| 572 | + if (IS_ERR(dst)) |
---|
| 573 | + return NOTIFY_DONE; |
---|
| 574 | + |
---|
| 575 | + dst_release(dst); |
---|
| 576 | + flow6.flowi6_proto = IPPROTO_UDP; |
---|
| 577 | + nfp_tun_write_neigh_v6(n->dev, app, &flow6, n, GFP_ATOMIC); |
---|
| 578 | +#else |
---|
| 579 | + return NOTIFY_DONE; |
---|
| 580 | +#endif /* CONFIG_IPV6 */ |
---|
| 581 | + } else { |
---|
| 582 | + /* Do a route lookup to populate flow data. */ |
---|
| 583 | + rt = ip_route_output_key(dev_net(n->dev), &flow4); |
---|
| 584 | + err = PTR_ERR_OR_ZERO(rt); |
---|
| 585 | + if (err) |
---|
| 586 | + return NOTIFY_DONE; |
---|
| 587 | + |
---|
| 588 | + ip_rt_put(rt); |
---|
| 589 | + |
---|
| 590 | + flow4.flowi4_proto = IPPROTO_UDP; |
---|
| 591 | + nfp_tun_write_neigh_v4(n->dev, app, &flow4, n, GFP_ATOMIC); |
---|
| 592 | + } |
---|
388 | 593 | #else |
---|
389 | 594 | return NOTIFY_DONE; |
---|
390 | | -#endif |
---|
391 | | - |
---|
392 | | - flow.flowi4_proto = IPPROTO_UDP; |
---|
393 | | - nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC); |
---|
| 595 | +#endif /* CONFIG_INET */ |
---|
394 | 596 | |
---|
395 | 597 | return NOTIFY_OK; |
---|
396 | 598 | } |
---|
397 | 599 | |
---|
398 | | -void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb) |
---|
| 600 | +void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb) |
---|
399 | 601 | { |
---|
400 | 602 | struct nfp_tun_req_route_ipv4 *payload; |
---|
401 | 603 | struct net_device *netdev; |
---|
.. | .. |
---|
407 | 609 | payload = nfp_flower_cmsg_get_data(skb); |
---|
408 | 610 | |
---|
409 | 611 | rcu_read_lock(); |
---|
410 | | - netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port)); |
---|
| 612 | + netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL); |
---|
411 | 613 | if (!netdev) |
---|
412 | 614 | goto fail_rcu_unlock; |
---|
413 | 615 | |
---|
.. | .. |
---|
429 | 631 | ip_rt_put(rt); |
---|
430 | 632 | if (!n) |
---|
431 | 633 | goto fail_rcu_unlock; |
---|
432 | | - nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC); |
---|
| 634 | + nfp_tun_write_neigh_v4(n->dev, app, &flow, n, GFP_ATOMIC); |
---|
433 | 635 | neigh_release(n); |
---|
434 | 636 | rcu_read_unlock(); |
---|
435 | 637 | return; |
---|
.. | .. |
---|
437 | 639 | fail_rcu_unlock: |
---|
438 | 640 | rcu_read_unlock(); |
---|
439 | 641 | nfp_flower_cmsg_warn(app, "Requested route not found.\n"); |
---|
| 642 | +} |
---|
| 643 | + |
---|
| 644 | +void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb) |
---|
| 645 | +{ |
---|
| 646 | + struct nfp_tun_req_route_ipv6 *payload; |
---|
| 647 | + struct net_device *netdev; |
---|
| 648 | + struct flowi6 flow = {}; |
---|
| 649 | + struct dst_entry *dst; |
---|
| 650 | + struct neighbour *n; |
---|
| 651 | + |
---|
| 652 | + payload = nfp_flower_cmsg_get_data(skb); |
---|
| 653 | + |
---|
| 654 | + rcu_read_lock(); |
---|
| 655 | + netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL); |
---|
| 656 | + if (!netdev) |
---|
| 657 | + goto fail_rcu_unlock; |
---|
| 658 | + |
---|
| 659 | + flow.daddr = payload->ipv6_addr; |
---|
| 660 | + flow.flowi6_proto = IPPROTO_UDP; |
---|
| 661 | + |
---|
| 662 | +#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) |
---|
| 663 | + dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(netdev), NULL, &flow, |
---|
| 664 | + NULL); |
---|
| 665 | + if (IS_ERR(dst)) |
---|
| 666 | + goto fail_rcu_unlock; |
---|
| 667 | +#else |
---|
| 668 | + goto fail_rcu_unlock; |
---|
| 669 | +#endif |
---|
| 670 | + |
---|
| 671 | + n = dst_neigh_lookup(dst, &flow.daddr); |
---|
| 672 | + dst_release(dst); |
---|
| 673 | + if (!n) |
---|
| 674 | + goto fail_rcu_unlock; |
---|
| 675 | + |
---|
| 676 | + nfp_tun_write_neigh_v6(n->dev, app, &flow, n, GFP_ATOMIC); |
---|
| 677 | + neigh_release(n); |
---|
| 678 | + rcu_read_unlock(); |
---|
| 679 | + return; |
---|
| 680 | + |
---|
| 681 | +fail_rcu_unlock: |
---|
| 682 | + rcu_read_unlock(); |
---|
| 683 | + nfp_flower_cmsg_warn(app, "Requested IPv6 route not found.\n"); |
---|
440 | 684 | } |
---|
441 | 685 | |
---|
442 | 686 | static void nfp_tun_write_ipv4_list(struct nfp_app *app) |
---|
.. | .. |
---|
448 | 692 | int count; |
---|
449 | 693 | |
---|
450 | 694 | memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr)); |
---|
451 | | - mutex_lock(&priv->nfp_ipv4_off_lock); |
---|
| 695 | + mutex_lock(&priv->tun.ipv4_off_lock); |
---|
452 | 696 | count = 0; |
---|
453 | | - list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) { |
---|
| 697 | + list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { |
---|
454 | 698 | if (count >= NFP_FL_IPV4_ADDRS_MAX) { |
---|
455 | | - mutex_unlock(&priv->nfp_ipv4_off_lock); |
---|
| 699 | + mutex_unlock(&priv->tun.ipv4_off_lock); |
---|
456 | 700 | nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n"); |
---|
457 | 701 | return; |
---|
458 | 702 | } |
---|
.. | .. |
---|
460 | 704 | payload.ipv4_addr[count++] = entry->ipv4_addr; |
---|
461 | 705 | } |
---|
462 | 706 | payload.count = cpu_to_be32(count); |
---|
463 | | - mutex_unlock(&priv->nfp_ipv4_off_lock); |
---|
| 707 | + mutex_unlock(&priv->tun.ipv4_off_lock); |
---|
464 | 708 | |
---|
465 | 709 | nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS, |
---|
466 | 710 | sizeof(struct nfp_tun_ipv4_addr), |
---|
.. | .. |
---|
473 | 717 | struct nfp_ipv4_addr_entry *entry; |
---|
474 | 718 | struct list_head *ptr, *storage; |
---|
475 | 719 | |
---|
476 | | - mutex_lock(&priv->nfp_ipv4_off_lock); |
---|
477 | | - list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) { |
---|
| 720 | + mutex_lock(&priv->tun.ipv4_off_lock); |
---|
| 721 | + list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { |
---|
478 | 722 | entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); |
---|
479 | 723 | if (entry->ipv4_addr == ipv4) { |
---|
480 | 724 | entry->ref_count++; |
---|
481 | | - mutex_unlock(&priv->nfp_ipv4_off_lock); |
---|
| 725 | + mutex_unlock(&priv->tun.ipv4_off_lock); |
---|
482 | 726 | return; |
---|
483 | 727 | } |
---|
484 | 728 | } |
---|
485 | 729 | |
---|
486 | 730 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); |
---|
487 | 731 | if (!entry) { |
---|
488 | | - mutex_unlock(&priv->nfp_ipv4_off_lock); |
---|
| 732 | + mutex_unlock(&priv->tun.ipv4_off_lock); |
---|
489 | 733 | nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n"); |
---|
490 | 734 | return; |
---|
491 | 735 | } |
---|
492 | 736 | entry->ipv4_addr = ipv4; |
---|
493 | 737 | entry->ref_count = 1; |
---|
494 | | - list_add_tail(&entry->list, &priv->nfp_ipv4_off_list); |
---|
495 | | - mutex_unlock(&priv->nfp_ipv4_off_lock); |
---|
| 738 | + list_add_tail(&entry->list, &priv->tun.ipv4_off_list); |
---|
| 739 | + mutex_unlock(&priv->tun.ipv4_off_lock); |
---|
496 | 740 | |
---|
497 | 741 | nfp_tun_write_ipv4_list(app); |
---|
498 | 742 | } |
---|
.. | .. |
---|
503 | 747 | struct nfp_ipv4_addr_entry *entry; |
---|
504 | 748 | struct list_head *ptr, *storage; |
---|
505 | 749 | |
---|
506 | | - mutex_lock(&priv->nfp_ipv4_off_lock); |
---|
507 | | - list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) { |
---|
| 750 | + mutex_lock(&priv->tun.ipv4_off_lock); |
---|
| 751 | + list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { |
---|
508 | 752 | entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); |
---|
509 | 753 | if (entry->ipv4_addr == ipv4) { |
---|
510 | 754 | entry->ref_count--; |
---|
.. | .. |
---|
515 | 759 | break; |
---|
516 | 760 | } |
---|
517 | 761 | } |
---|
518 | | - mutex_unlock(&priv->nfp_ipv4_off_lock); |
---|
| 762 | + mutex_unlock(&priv->tun.ipv4_off_lock); |
---|
519 | 763 | |
---|
520 | 764 | nfp_tun_write_ipv4_list(app); |
---|
521 | 765 | } |
---|
522 | 766 | |
---|
523 | | -void nfp_tunnel_write_macs(struct nfp_app *app) |
---|
| 767 | +static void nfp_tun_write_ipv6_list(struct nfp_app *app) |
---|
524 | 768 | { |
---|
525 | 769 | struct nfp_flower_priv *priv = app->priv; |
---|
526 | | - struct nfp_tun_mac_offload_entry *entry; |
---|
527 | | - struct nfp_tun_mac_addr *payload; |
---|
528 | | - struct list_head *ptr, *storage; |
---|
529 | | - int mac_count, err, pay_size; |
---|
| 770 | + struct nfp_ipv6_addr_entry *entry; |
---|
| 771 | + struct nfp_tun_ipv6_addr payload; |
---|
| 772 | + int count = 0; |
---|
530 | 773 | |
---|
531 | | - mutex_lock(&priv->nfp_mac_off_lock); |
---|
532 | | - if (!priv->nfp_mac_off_count) { |
---|
533 | | - mutex_unlock(&priv->nfp_mac_off_lock); |
---|
534 | | - return; |
---|
535 | | - } |
---|
536 | | - |
---|
537 | | - pay_size = sizeof(struct nfp_tun_mac_addr) + |
---|
538 | | - sizeof(struct index_mac_addr) * priv->nfp_mac_off_count; |
---|
539 | | - |
---|
540 | | - payload = kzalloc(pay_size, GFP_KERNEL); |
---|
541 | | - if (!payload) { |
---|
542 | | - mutex_unlock(&priv->nfp_mac_off_lock); |
---|
543 | | - return; |
---|
544 | | - } |
---|
545 | | - |
---|
546 | | - payload->count = cpu_to_be16(priv->nfp_mac_off_count); |
---|
547 | | - |
---|
548 | | - mac_count = 0; |
---|
549 | | - list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) { |
---|
550 | | - entry = list_entry(ptr, struct nfp_tun_mac_offload_entry, |
---|
551 | | - list); |
---|
552 | | - payload->addresses[mac_count].index = entry->index; |
---|
553 | | - ether_addr_copy(payload->addresses[mac_count].addr, |
---|
554 | | - entry->addr); |
---|
555 | | - mac_count++; |
---|
556 | | - } |
---|
557 | | - |
---|
558 | | - err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC, |
---|
559 | | - pay_size, payload, GFP_KERNEL); |
---|
560 | | - |
---|
561 | | - kfree(payload); |
---|
562 | | - |
---|
563 | | - if (err) { |
---|
564 | | - mutex_unlock(&priv->nfp_mac_off_lock); |
---|
565 | | - /* Write failed so retain list for future retry. */ |
---|
566 | | - return; |
---|
567 | | - } |
---|
568 | | - |
---|
569 | | - /* If list was successfully offloaded, flush it. */ |
---|
570 | | - list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) { |
---|
571 | | - entry = list_entry(ptr, struct nfp_tun_mac_offload_entry, |
---|
572 | | - list); |
---|
573 | | - list_del(&entry->list); |
---|
574 | | - kfree(entry); |
---|
575 | | - } |
---|
576 | | - |
---|
577 | | - priv->nfp_mac_off_count = 0; |
---|
578 | | - mutex_unlock(&priv->nfp_mac_off_lock); |
---|
579 | | -} |
---|
580 | | - |
---|
581 | | -static int nfp_tun_get_mac_idx(struct nfp_app *app, int ifindex) |
---|
582 | | -{ |
---|
583 | | - struct nfp_flower_priv *priv = app->priv; |
---|
584 | | - struct nfp_tun_mac_non_nfp_idx *entry; |
---|
585 | | - struct list_head *ptr, *storage; |
---|
586 | | - int idx; |
---|
587 | | - |
---|
588 | | - mutex_lock(&priv->nfp_mac_index_lock); |
---|
589 | | - list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) { |
---|
590 | | - entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list); |
---|
591 | | - if (entry->ifindex == ifindex) { |
---|
592 | | - idx = entry->index; |
---|
593 | | - mutex_unlock(&priv->nfp_mac_index_lock); |
---|
594 | | - return idx; |
---|
595 | | - } |
---|
596 | | - } |
---|
597 | | - |
---|
598 | | - idx = ida_simple_get(&priv->nfp_mac_off_ids, 0, |
---|
599 | | - NFP_MAX_MAC_INDEX, GFP_KERNEL); |
---|
600 | | - if (idx < 0) { |
---|
601 | | - mutex_unlock(&priv->nfp_mac_index_lock); |
---|
602 | | - return idx; |
---|
603 | | - } |
---|
604 | | - |
---|
605 | | - entry = kmalloc(sizeof(*entry), GFP_KERNEL); |
---|
606 | | - if (!entry) { |
---|
607 | | - mutex_unlock(&priv->nfp_mac_index_lock); |
---|
608 | | - return -ENOMEM; |
---|
609 | | - } |
---|
610 | | - entry->ifindex = ifindex; |
---|
611 | | - entry->index = idx; |
---|
612 | | - list_add_tail(&entry->list, &priv->nfp_mac_index_list); |
---|
613 | | - mutex_unlock(&priv->nfp_mac_index_lock); |
---|
614 | | - |
---|
615 | | - return idx; |
---|
616 | | -} |
---|
617 | | - |
---|
618 | | -static void nfp_tun_del_mac_idx(struct nfp_app *app, int ifindex) |
---|
619 | | -{ |
---|
620 | | - struct nfp_flower_priv *priv = app->priv; |
---|
621 | | - struct nfp_tun_mac_non_nfp_idx *entry; |
---|
622 | | - struct list_head *ptr, *storage; |
---|
623 | | - |
---|
624 | | - mutex_lock(&priv->nfp_mac_index_lock); |
---|
625 | | - list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) { |
---|
626 | | - entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list); |
---|
627 | | - if (entry->ifindex == ifindex) { |
---|
628 | | - ida_simple_remove(&priv->nfp_mac_off_ids, |
---|
629 | | - entry->index); |
---|
630 | | - list_del(&entry->list); |
---|
631 | | - kfree(entry); |
---|
| 774 | + memset(&payload, 0, sizeof(struct nfp_tun_ipv6_addr)); |
---|
| 775 | + mutex_lock(&priv->tun.ipv6_off_lock); |
---|
| 776 | + list_for_each_entry(entry, &priv->tun.ipv6_off_list, list) { |
---|
| 777 | + if (count >= NFP_FL_IPV6_ADDRS_MAX) { |
---|
| 778 | + nfp_flower_cmsg_warn(app, "Too many IPv6 tunnel endpoint addresses, some cannot be offloaded.\n"); |
---|
632 | 779 | break; |
---|
633 | 780 | } |
---|
| 781 | + payload.ipv6_addr[count++] = entry->ipv6_addr; |
---|
634 | 782 | } |
---|
635 | | - mutex_unlock(&priv->nfp_mac_index_lock); |
---|
| 783 | + mutex_unlock(&priv->tun.ipv6_off_lock); |
---|
| 784 | + payload.count = cpu_to_be32(count); |
---|
| 785 | + |
---|
| 786 | + nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6, |
---|
| 787 | + sizeof(struct nfp_tun_ipv6_addr), |
---|
| 788 | + &payload, GFP_KERNEL); |
---|
636 | 789 | } |
---|
637 | 790 | |
---|
638 | | -static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev, |
---|
639 | | - struct nfp_app *app) |
---|
| 791 | +struct nfp_ipv6_addr_entry * |
---|
| 792 | +nfp_tunnel_add_ipv6_off(struct nfp_app *app, struct in6_addr *ipv6) |
---|
640 | 793 | { |
---|
641 | 794 | struct nfp_flower_priv *priv = app->priv; |
---|
642 | | - struct nfp_tun_mac_offload_entry *entry; |
---|
643 | | - u16 nfp_mac_idx; |
---|
644 | | - int port = 0; |
---|
| 795 | + struct nfp_ipv6_addr_entry *entry; |
---|
645 | 796 | |
---|
646 | | - /* Check if MAC should be offloaded. */ |
---|
647 | | - if (!is_valid_ether_addr(netdev->dev_addr)) |
---|
648 | | - return; |
---|
649 | | - |
---|
650 | | - if (nfp_netdev_is_nfp_repr(netdev)) |
---|
651 | | - port = nfp_repr_get_port_id(netdev); |
---|
652 | | - else if (!nfp_tun_is_netdev_to_offload(netdev)) |
---|
653 | | - return; |
---|
| 797 | + mutex_lock(&priv->tun.ipv6_off_lock); |
---|
| 798 | + list_for_each_entry(entry, &priv->tun.ipv6_off_list, list) |
---|
| 799 | + if (!memcmp(&entry->ipv6_addr, ipv6, sizeof(*ipv6))) { |
---|
| 800 | + entry->ref_count++; |
---|
| 801 | + mutex_unlock(&priv->tun.ipv6_off_lock); |
---|
| 802 | + return entry; |
---|
| 803 | + } |
---|
654 | 804 | |
---|
655 | 805 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); |
---|
656 | 806 | if (!entry) { |
---|
657 | | - nfp_flower_cmsg_warn(app, "Mem fail when offloading MAC.\n"); |
---|
658 | | - return; |
---|
| 807 | + mutex_unlock(&priv->tun.ipv6_off_lock); |
---|
| 808 | + nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n"); |
---|
| 809 | + return NULL; |
---|
659 | 810 | } |
---|
| 811 | + entry->ipv6_addr = *ipv6; |
---|
| 812 | + entry->ref_count = 1; |
---|
| 813 | + list_add_tail(&entry->list, &priv->tun.ipv6_off_list); |
---|
| 814 | + mutex_unlock(&priv->tun.ipv6_off_lock); |
---|
660 | 815 | |
---|
661 | | - if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) == |
---|
662 | | - NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT) { |
---|
663 | | - nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT; |
---|
664 | | - } else if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) == |
---|
665 | | - NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT) { |
---|
666 | | - port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port); |
---|
667 | | - nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT; |
---|
668 | | - } else { |
---|
669 | | - /* Must assign our own unique 8-bit index. */ |
---|
670 | | - int idx = nfp_tun_get_mac_idx(app, netdev->ifindex); |
---|
| 816 | + nfp_tun_write_ipv6_list(app); |
---|
671 | 817 | |
---|
672 | | - if (idx < 0) { |
---|
673 | | - nfp_flower_cmsg_warn(app, "Can't assign non-repr MAC index.\n"); |
---|
674 | | - kfree(entry); |
---|
675 | | - return; |
---|
676 | | - } |
---|
677 | | - nfp_mac_idx = idx << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT; |
---|
678 | | - } |
---|
679 | | - |
---|
680 | | - entry->index = cpu_to_be16(nfp_mac_idx); |
---|
681 | | - ether_addr_copy(entry->addr, netdev->dev_addr); |
---|
682 | | - |
---|
683 | | - mutex_lock(&priv->nfp_mac_off_lock); |
---|
684 | | - priv->nfp_mac_off_count++; |
---|
685 | | - list_add_tail(&entry->list, &priv->nfp_mac_off_list); |
---|
686 | | - mutex_unlock(&priv->nfp_mac_off_lock); |
---|
| 818 | + return entry; |
---|
687 | 819 | } |
---|
688 | 820 | |
---|
689 | | -static int nfp_tun_mac_event_handler(struct notifier_block *nb, |
---|
690 | | - unsigned long event, void *ptr) |
---|
| 821 | +void |
---|
| 822 | +nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry) |
---|
691 | 823 | { |
---|
692 | | - struct nfp_flower_priv *app_priv; |
---|
693 | | - struct net_device *netdev; |
---|
694 | | - struct nfp_app *app; |
---|
| 824 | + struct nfp_flower_priv *priv = app->priv; |
---|
| 825 | + bool freed = false; |
---|
695 | 826 | |
---|
696 | | - if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) { |
---|
697 | | - app_priv = container_of(nb, struct nfp_flower_priv, |
---|
698 | | - nfp_tun_mac_nb); |
---|
699 | | - app = app_priv->app; |
---|
700 | | - netdev = netdev_notifier_info_to_dev(ptr); |
---|
| 827 | + mutex_lock(&priv->tun.ipv6_off_lock); |
---|
| 828 | + if (!--entry->ref_count) { |
---|
| 829 | + list_del(&entry->list); |
---|
| 830 | + kfree(entry); |
---|
| 831 | + freed = true; |
---|
| 832 | + } |
---|
| 833 | + mutex_unlock(&priv->tun.ipv6_off_lock); |
---|
701 | 834 | |
---|
702 | | - /* If non-nfp netdev then free its offload index. */ |
---|
703 | | - if (nfp_tun_is_netdev_to_offload(netdev)) |
---|
704 | | - nfp_tun_del_mac_idx(app, netdev->ifindex); |
---|
705 | | - } else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR || |
---|
706 | | - event == NETDEV_REGISTER) { |
---|
707 | | - app_priv = container_of(nb, struct nfp_flower_priv, |
---|
708 | | - nfp_tun_mac_nb); |
---|
709 | | - app = app_priv->app; |
---|
710 | | - netdev = netdev_notifier_info_to_dev(ptr); |
---|
| 835 | + if (freed) |
---|
| 836 | + nfp_tun_write_ipv6_list(app); |
---|
| 837 | +} |
---|
711 | 838 | |
---|
712 | | - nfp_tun_add_to_mac_offload_list(netdev, app); |
---|
| 839 | +static int |
---|
| 840 | +__nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del) |
---|
| 841 | +{ |
---|
| 842 | + struct nfp_tun_mac_addr_offload payload; |
---|
713 | 843 | |
---|
714 | | - /* Force a list write to keep NFP up to date. */ |
---|
715 | | - nfp_tunnel_write_macs(app); |
---|
| 844 | + memset(&payload, 0, sizeof(payload)); |
---|
| 845 | + |
---|
| 846 | + if (del) |
---|
| 847 | + payload.flags = cpu_to_be16(NFP_TUN_MAC_OFFLOAD_DEL_FLAG); |
---|
| 848 | + |
---|
| 849 | + /* FW supports multiple MACs per cmsg but restrict to single. */ |
---|
| 850 | + payload.count = cpu_to_be16(1); |
---|
| 851 | + payload.index = cpu_to_be16(idx); |
---|
| 852 | + ether_addr_copy(payload.addr, mac); |
---|
| 853 | + |
---|
| 854 | + return nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC, |
---|
| 855 | + sizeof(struct nfp_tun_mac_addr_offload), |
---|
| 856 | + &payload, GFP_KERNEL); |
---|
| 857 | +} |
---|
| 858 | + |
---|
| 859 | +static bool nfp_tunnel_port_is_phy_repr(int port) |
---|
| 860 | +{ |
---|
| 861 | + if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) == |
---|
| 862 | + NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT) |
---|
| 863 | + return true; |
---|
| 864 | + |
---|
| 865 | + return false; |
---|
| 866 | +} |
---|
| 867 | + |
---|
| 868 | +static u16 nfp_tunnel_get_mac_idx_from_phy_port_id(int port) |
---|
| 869 | +{ |
---|
| 870 | + return port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT; |
---|
| 871 | +} |
---|
| 872 | + |
---|
| 873 | +static u16 nfp_tunnel_get_global_mac_idx_from_ida(int id) |
---|
| 874 | +{ |
---|
| 875 | + return id << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT; |
---|
| 876 | +} |
---|
| 877 | + |
---|
| 878 | +static int nfp_tunnel_get_ida_from_global_mac_idx(u16 nfp_mac_idx) |
---|
| 879 | +{ |
---|
| 880 | + return nfp_mac_idx >> 8; |
---|
| 881 | +} |
---|
| 882 | + |
---|
| 883 | +static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx) |
---|
| 884 | +{ |
---|
| 885 | + return (nfp_mac_idx & 0xff) == NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT; |
---|
| 886 | +} |
---|
| 887 | + |
---|
| 888 | +static struct nfp_tun_offloaded_mac * |
---|
| 889 | +nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, u8 *mac) |
---|
| 890 | +{ |
---|
| 891 | + struct nfp_flower_priv *priv = app->priv; |
---|
| 892 | + |
---|
| 893 | + return rhashtable_lookup_fast(&priv->tun.offloaded_macs, mac, |
---|
| 894 | + offloaded_macs_params); |
---|
| 895 | +} |
---|
| 896 | + |
---|
| 897 | +static void |
---|
| 898 | +nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry, |
---|
| 899 | + struct net_device *netdev, bool mod) |
---|
| 900 | +{ |
---|
| 901 | + if (nfp_netdev_is_nfp_repr(netdev)) { |
---|
| 902 | + struct nfp_flower_repr_priv *repr_priv; |
---|
| 903 | + struct nfp_repr *repr; |
---|
| 904 | + |
---|
| 905 | + repr = netdev_priv(netdev); |
---|
| 906 | + repr_priv = repr->app_priv; |
---|
| 907 | + |
---|
| 908 | + /* If modifing MAC, remove repr from old list first. */ |
---|
| 909 | + if (mod) |
---|
| 910 | + list_del(&repr_priv->mac_list); |
---|
| 911 | + |
---|
| 912 | + list_add_tail(&repr_priv->mac_list, &entry->repr_list); |
---|
| 913 | + } else if (nfp_flower_is_supported_bridge(netdev)) { |
---|
| 914 | + entry->bridge_count++; |
---|
| 915 | + } |
---|
| 916 | + |
---|
| 917 | + entry->ref_count++; |
---|
| 918 | +} |
---|
| 919 | + |
---|
| 920 | +static int |
---|
| 921 | +nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, |
---|
| 922 | + int port, bool mod) |
---|
| 923 | +{ |
---|
| 924 | + struct nfp_flower_priv *priv = app->priv; |
---|
| 925 | + struct nfp_tun_offloaded_mac *entry; |
---|
| 926 | + int ida_idx = -1, err; |
---|
| 927 | + u16 nfp_mac_idx = 0; |
---|
| 928 | + |
---|
| 929 | + entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); |
---|
| 930 | + if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) { |
---|
| 931 | + if (entry->bridge_count || |
---|
| 932 | + !nfp_flower_is_supported_bridge(netdev)) { |
---|
| 933 | + nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, |
---|
| 934 | + netdev, mod); |
---|
| 935 | + return 0; |
---|
| 936 | + } |
---|
| 937 | + |
---|
| 938 | + /* MAC is global but matches need to go to pre_tun table. */ |
---|
| 939 | + nfp_mac_idx = entry->index | NFP_TUN_PRE_TUN_IDX_BIT; |
---|
| 940 | + } |
---|
| 941 | + |
---|
| 942 | + if (!nfp_mac_idx) { |
---|
| 943 | + /* Assign a global index if non-repr or MAC is now shared. */ |
---|
| 944 | + if (entry || !port) { |
---|
| 945 | + ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0, |
---|
| 946 | + NFP_MAX_MAC_INDEX, GFP_KERNEL); |
---|
| 947 | + if (ida_idx < 0) |
---|
| 948 | + return ida_idx; |
---|
| 949 | + |
---|
| 950 | + nfp_mac_idx = |
---|
| 951 | + nfp_tunnel_get_global_mac_idx_from_ida(ida_idx); |
---|
| 952 | + |
---|
| 953 | + if (nfp_flower_is_supported_bridge(netdev)) |
---|
| 954 | + nfp_mac_idx |= NFP_TUN_PRE_TUN_IDX_BIT; |
---|
| 955 | + |
---|
| 956 | + } else { |
---|
| 957 | + nfp_mac_idx = |
---|
| 958 | + nfp_tunnel_get_mac_idx_from_phy_port_id(port); |
---|
| 959 | + } |
---|
| 960 | + } |
---|
| 961 | + |
---|
| 962 | + if (!entry) { |
---|
| 963 | + entry = kzalloc(sizeof(*entry), GFP_KERNEL); |
---|
| 964 | + if (!entry) { |
---|
| 965 | + err = -ENOMEM; |
---|
| 966 | + goto err_free_ida; |
---|
| 967 | + } |
---|
| 968 | + |
---|
| 969 | + ether_addr_copy(entry->addr, netdev->dev_addr); |
---|
| 970 | + INIT_LIST_HEAD(&entry->repr_list); |
---|
| 971 | + |
---|
| 972 | + if (rhashtable_insert_fast(&priv->tun.offloaded_macs, |
---|
| 973 | + &entry->ht_node, |
---|
| 974 | + offloaded_macs_params)) { |
---|
| 975 | + err = -ENOMEM; |
---|
| 976 | + goto err_free_entry; |
---|
| 977 | + } |
---|
| 978 | + } |
---|
| 979 | + |
---|
| 980 | + err = __nfp_tunnel_offload_mac(app, netdev->dev_addr, |
---|
| 981 | + nfp_mac_idx, false); |
---|
| 982 | + if (err) { |
---|
| 983 | + /* If not shared then free. */ |
---|
| 984 | + if (!entry->ref_count) |
---|
| 985 | + goto err_remove_hash; |
---|
| 986 | + goto err_free_ida; |
---|
| 987 | + } |
---|
| 988 | + |
---|
| 989 | + entry->index = nfp_mac_idx; |
---|
| 990 | + nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod); |
---|
| 991 | + |
---|
| 992 | + return 0; |
---|
| 993 | + |
---|
| 994 | +err_remove_hash: |
---|
| 995 | + rhashtable_remove_fast(&priv->tun.offloaded_macs, &entry->ht_node, |
---|
| 996 | + offloaded_macs_params); |
---|
| 997 | +err_free_entry: |
---|
| 998 | + kfree(entry); |
---|
| 999 | +err_free_ida: |
---|
| 1000 | + if (ida_idx != -1) |
---|
| 1001 | + ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); |
---|
| 1002 | + |
---|
| 1003 | + return err; |
---|
| 1004 | +} |
---|
| 1005 | + |
---|
| 1006 | +static int |
---|
| 1007 | +nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, |
---|
| 1008 | + u8 *mac, bool mod) |
---|
| 1009 | +{ |
---|
| 1010 | + struct nfp_flower_priv *priv = app->priv; |
---|
| 1011 | + struct nfp_flower_repr_priv *repr_priv; |
---|
| 1012 | + struct nfp_tun_offloaded_mac *entry; |
---|
| 1013 | + struct nfp_repr *repr; |
---|
| 1014 | + u16 nfp_mac_idx; |
---|
| 1015 | + int ida_idx; |
---|
| 1016 | + |
---|
| 1017 | + entry = nfp_tunnel_lookup_offloaded_macs(app, mac); |
---|
| 1018 | + if (!entry) |
---|
| 1019 | + return 0; |
---|
| 1020 | + |
---|
| 1021 | + entry->ref_count--; |
---|
| 1022 | + /* If del is part of a mod then mac_list is still in use elsewheree. */ |
---|
| 1023 | + if (nfp_netdev_is_nfp_repr(netdev) && !mod) { |
---|
| 1024 | + repr = netdev_priv(netdev); |
---|
| 1025 | + repr_priv = repr->app_priv; |
---|
| 1026 | + list_del(&repr_priv->mac_list); |
---|
| 1027 | + } |
---|
| 1028 | + |
---|
| 1029 | + if (nfp_flower_is_supported_bridge(netdev)) { |
---|
| 1030 | + entry->bridge_count--; |
---|
| 1031 | + |
---|
| 1032 | + if (!entry->bridge_count && entry->ref_count) { |
---|
| 1033 | + nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT; |
---|
| 1034 | + if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, |
---|
| 1035 | + false)) { |
---|
| 1036 | + nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n", |
---|
| 1037 | + netdev_name(netdev)); |
---|
| 1038 | + return 0; |
---|
| 1039 | + } |
---|
| 1040 | + |
---|
| 1041 | + entry->index = nfp_mac_idx; |
---|
| 1042 | + return 0; |
---|
| 1043 | + } |
---|
| 1044 | + } |
---|
| 1045 | + |
---|
| 1046 | + /* If MAC is now used by 1 repr set the offloaded MAC index to port. */ |
---|
| 1047 | + if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) { |
---|
| 1048 | + int port, err; |
---|
| 1049 | + |
---|
| 1050 | + repr_priv = list_first_entry(&entry->repr_list, |
---|
| 1051 | + struct nfp_flower_repr_priv, |
---|
| 1052 | + mac_list); |
---|
| 1053 | + repr = repr_priv->nfp_repr; |
---|
| 1054 | + port = nfp_repr_get_port_id(repr->netdev); |
---|
| 1055 | + nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port); |
---|
| 1056 | + err = __nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false); |
---|
| 1057 | + if (err) { |
---|
| 1058 | + nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n", |
---|
| 1059 | + netdev_name(netdev)); |
---|
| 1060 | + return 0; |
---|
| 1061 | + } |
---|
| 1062 | + |
---|
| 1063 | + ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index); |
---|
| 1064 | + ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); |
---|
| 1065 | + entry->index = nfp_mac_idx; |
---|
| 1066 | + return 0; |
---|
| 1067 | + } |
---|
| 1068 | + |
---|
| 1069 | + if (entry->ref_count) |
---|
| 1070 | + return 0; |
---|
| 1071 | + |
---|
| 1072 | + WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs, |
---|
| 1073 | + &entry->ht_node, |
---|
| 1074 | + offloaded_macs_params)); |
---|
| 1075 | + |
---|
| 1076 | + if (nfp_flower_is_supported_bridge(netdev)) |
---|
| 1077 | + nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT; |
---|
| 1078 | + else |
---|
| 1079 | + nfp_mac_idx = entry->index; |
---|
| 1080 | + |
---|
| 1081 | + /* If MAC has global ID then extract and free the ida entry. */ |
---|
| 1082 | + if (nfp_tunnel_is_mac_idx_global(nfp_mac_idx)) { |
---|
| 1083 | + ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index); |
---|
| 1084 | + ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); |
---|
| 1085 | + } |
---|
| 1086 | + |
---|
| 1087 | + kfree(entry); |
---|
| 1088 | + |
---|
| 1089 | + return __nfp_tunnel_offload_mac(app, mac, 0, true); |
---|
| 1090 | +} |
---|
| 1091 | + |
---|
| 1092 | +static int |
---|
| 1093 | +nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev, |
---|
| 1094 | + enum nfp_flower_mac_offload_cmd cmd) |
---|
| 1095 | +{ |
---|
| 1096 | + struct nfp_flower_non_repr_priv *nr_priv = NULL; |
---|
| 1097 | + bool non_repr = false, *mac_offloaded; |
---|
| 1098 | + u8 *off_mac = NULL; |
---|
| 1099 | + int err, port = 0; |
---|
| 1100 | + |
---|
| 1101 | + if (nfp_netdev_is_nfp_repr(netdev)) { |
---|
| 1102 | + struct nfp_flower_repr_priv *repr_priv; |
---|
| 1103 | + struct nfp_repr *repr; |
---|
| 1104 | + |
---|
| 1105 | + repr = netdev_priv(netdev); |
---|
| 1106 | + if (repr->app != app) |
---|
| 1107 | + return 0; |
---|
| 1108 | + |
---|
| 1109 | + repr_priv = repr->app_priv; |
---|
| 1110 | + if (repr_priv->on_bridge) |
---|
| 1111 | + return 0; |
---|
| 1112 | + |
---|
| 1113 | + mac_offloaded = &repr_priv->mac_offloaded; |
---|
| 1114 | + off_mac = &repr_priv->offloaded_mac_addr[0]; |
---|
| 1115 | + port = nfp_repr_get_port_id(netdev); |
---|
| 1116 | + if (!nfp_tunnel_port_is_phy_repr(port)) |
---|
| 1117 | + return 0; |
---|
| 1118 | + } else if (nfp_fl_is_netdev_to_offload(netdev)) { |
---|
| 1119 | + nr_priv = nfp_flower_non_repr_priv_get(app, netdev); |
---|
| 1120 | + if (!nr_priv) |
---|
| 1121 | + return -ENOMEM; |
---|
| 1122 | + |
---|
| 1123 | + mac_offloaded = &nr_priv->mac_offloaded; |
---|
| 1124 | + off_mac = &nr_priv->offloaded_mac_addr[0]; |
---|
| 1125 | + non_repr = true; |
---|
| 1126 | + } else { |
---|
| 1127 | + return 0; |
---|
| 1128 | + } |
---|
| 1129 | + |
---|
| 1130 | + if (!is_valid_ether_addr(netdev->dev_addr)) { |
---|
| 1131 | + err = -EINVAL; |
---|
| 1132 | + goto err_put_non_repr_priv; |
---|
| 1133 | + } |
---|
| 1134 | + |
---|
| 1135 | + if (cmd == NFP_TUNNEL_MAC_OFFLOAD_MOD && !*mac_offloaded) |
---|
| 1136 | + cmd = NFP_TUNNEL_MAC_OFFLOAD_ADD; |
---|
| 1137 | + |
---|
| 1138 | + switch (cmd) { |
---|
| 1139 | + case NFP_TUNNEL_MAC_OFFLOAD_ADD: |
---|
| 1140 | + err = nfp_tunnel_add_shared_mac(app, netdev, port, false); |
---|
| 1141 | + if (err) |
---|
| 1142 | + goto err_put_non_repr_priv; |
---|
| 1143 | + |
---|
| 1144 | + if (non_repr) |
---|
| 1145 | + __nfp_flower_non_repr_priv_get(nr_priv); |
---|
| 1146 | + |
---|
| 1147 | + *mac_offloaded = true; |
---|
| 1148 | + ether_addr_copy(off_mac, netdev->dev_addr); |
---|
| 1149 | + break; |
---|
| 1150 | + case NFP_TUNNEL_MAC_OFFLOAD_DEL: |
---|
| 1151 | + /* Only attempt delete if add was successful. */ |
---|
| 1152 | + if (!*mac_offloaded) |
---|
| 1153 | + break; |
---|
| 1154 | + |
---|
| 1155 | + if (non_repr) |
---|
| 1156 | + __nfp_flower_non_repr_priv_put(nr_priv); |
---|
| 1157 | + |
---|
| 1158 | + *mac_offloaded = false; |
---|
| 1159 | + |
---|
| 1160 | + err = nfp_tunnel_del_shared_mac(app, netdev, netdev->dev_addr, |
---|
| 1161 | + false); |
---|
| 1162 | + if (err) |
---|
| 1163 | + goto err_put_non_repr_priv; |
---|
| 1164 | + |
---|
| 1165 | + break; |
---|
| 1166 | + case NFP_TUNNEL_MAC_OFFLOAD_MOD: |
---|
| 1167 | + /* Ignore if changing to the same address. */ |
---|
| 1168 | + if (ether_addr_equal(netdev->dev_addr, off_mac)) |
---|
| 1169 | + break; |
---|
| 1170 | + |
---|
| 1171 | + err = nfp_tunnel_add_shared_mac(app, netdev, port, true); |
---|
| 1172 | + if (err) |
---|
| 1173 | + goto err_put_non_repr_priv; |
---|
| 1174 | + |
---|
| 1175 | + /* Delete the previous MAC address. */ |
---|
| 1176 | + err = nfp_tunnel_del_shared_mac(app, netdev, off_mac, true); |
---|
| 1177 | + if (err) |
---|
| 1178 | + nfp_flower_cmsg_warn(app, "Failed to remove offload of replaced MAC addr on %s.\n", |
---|
| 1179 | + netdev_name(netdev)); |
---|
| 1180 | + |
---|
| 1181 | + ether_addr_copy(off_mac, netdev->dev_addr); |
---|
| 1182 | + break; |
---|
| 1183 | + default: |
---|
| 1184 | + err = -EINVAL; |
---|
| 1185 | + goto err_put_non_repr_priv; |
---|
| 1186 | + } |
---|
| 1187 | + |
---|
| 1188 | + if (non_repr) |
---|
| 1189 | + __nfp_flower_non_repr_priv_put(nr_priv); |
---|
| 1190 | + |
---|
| 1191 | + return 0; |
---|
| 1192 | + |
---|
| 1193 | +err_put_non_repr_priv: |
---|
| 1194 | + if (non_repr) |
---|
| 1195 | + __nfp_flower_non_repr_priv_put(nr_priv); |
---|
| 1196 | + |
---|
| 1197 | + return err; |
---|
| 1198 | +} |
---|
| 1199 | + |
---|
| 1200 | +int nfp_tunnel_mac_event_handler(struct nfp_app *app, |
---|
| 1201 | + struct net_device *netdev, |
---|
| 1202 | + unsigned long event, void *ptr) |
---|
| 1203 | +{ |
---|
| 1204 | + int err; |
---|
| 1205 | + |
---|
| 1206 | + if (event == NETDEV_DOWN) { |
---|
| 1207 | + err = nfp_tunnel_offload_mac(app, netdev, |
---|
| 1208 | + NFP_TUNNEL_MAC_OFFLOAD_DEL); |
---|
| 1209 | + if (err) |
---|
| 1210 | + nfp_flower_cmsg_warn(app, "Failed to delete offload MAC on %s.\n", |
---|
| 1211 | + netdev_name(netdev)); |
---|
| 1212 | + } else if (event == NETDEV_UP) { |
---|
| 1213 | + err = nfp_tunnel_offload_mac(app, netdev, |
---|
| 1214 | + NFP_TUNNEL_MAC_OFFLOAD_ADD); |
---|
| 1215 | + if (err) |
---|
| 1216 | + nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n", |
---|
| 1217 | + netdev_name(netdev)); |
---|
| 1218 | + } else if (event == NETDEV_CHANGEADDR) { |
---|
| 1219 | + /* Only offload addr change if netdev is already up. */ |
---|
| 1220 | + if (!(netdev->flags & IFF_UP)) |
---|
| 1221 | + return NOTIFY_OK; |
---|
| 1222 | + |
---|
| 1223 | + err = nfp_tunnel_offload_mac(app, netdev, |
---|
| 1224 | + NFP_TUNNEL_MAC_OFFLOAD_MOD); |
---|
| 1225 | + if (err) |
---|
| 1226 | + nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n", |
---|
| 1227 | + netdev_name(netdev)); |
---|
| 1228 | + } else if (event == NETDEV_CHANGEUPPER) { |
---|
| 1229 | + /* If a repr is attached to a bridge then tunnel packets |
---|
| 1230 | + * entering the physical port are directed through the bridge |
---|
| 1231 | + * datapath and cannot be directly detunneled. Therefore, |
---|
| 1232 | + * associated offloaded MACs and indexes should not be used |
---|
| 1233 | + * by fw for detunneling. |
---|
| 1234 | + */ |
---|
| 1235 | + struct netdev_notifier_changeupper_info *info = ptr; |
---|
| 1236 | + struct net_device *upper = info->upper_dev; |
---|
| 1237 | + struct nfp_flower_repr_priv *repr_priv; |
---|
| 1238 | + struct nfp_repr *repr; |
---|
| 1239 | + |
---|
| 1240 | + if (!nfp_netdev_is_nfp_repr(netdev) || |
---|
| 1241 | + !nfp_flower_is_supported_bridge(upper)) |
---|
| 1242 | + return NOTIFY_OK; |
---|
| 1243 | + |
---|
| 1244 | + repr = netdev_priv(netdev); |
---|
| 1245 | + if (repr->app != app) |
---|
| 1246 | + return NOTIFY_OK; |
---|
| 1247 | + |
---|
| 1248 | + repr_priv = repr->app_priv; |
---|
| 1249 | + |
---|
| 1250 | + if (info->linking) { |
---|
| 1251 | + if (nfp_tunnel_offload_mac(app, netdev, |
---|
| 1252 | + NFP_TUNNEL_MAC_OFFLOAD_DEL)) |
---|
| 1253 | + nfp_flower_cmsg_warn(app, "Failed to delete offloaded MAC on %s.\n", |
---|
| 1254 | + netdev_name(netdev)); |
---|
| 1255 | + repr_priv->on_bridge = true; |
---|
| 1256 | + } else { |
---|
| 1257 | + repr_priv->on_bridge = false; |
---|
| 1258 | + |
---|
| 1259 | + if (!(netdev->flags & IFF_UP)) |
---|
| 1260 | + return NOTIFY_OK; |
---|
| 1261 | + |
---|
| 1262 | + if (nfp_tunnel_offload_mac(app, netdev, |
---|
| 1263 | + NFP_TUNNEL_MAC_OFFLOAD_ADD)) |
---|
| 1264 | + nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n", |
---|
| 1265 | + netdev_name(netdev)); |
---|
| 1266 | + } |
---|
716 | 1267 | } |
---|
717 | 1268 | return NOTIFY_OK; |
---|
| 1269 | +} |
---|
| 1270 | + |
---|
| 1271 | +int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app, |
---|
| 1272 | + struct nfp_fl_payload *flow) |
---|
| 1273 | +{ |
---|
| 1274 | + struct nfp_flower_priv *app_priv = app->priv; |
---|
| 1275 | + struct nfp_tun_offloaded_mac *mac_entry; |
---|
| 1276 | + struct nfp_flower_meta_tci *key_meta; |
---|
| 1277 | + struct nfp_tun_pre_tun_rule payload; |
---|
| 1278 | + struct net_device *internal_dev; |
---|
| 1279 | + int err; |
---|
| 1280 | + |
---|
| 1281 | + if (app_priv->pre_tun_rule_cnt == NFP_TUN_PRE_TUN_RULE_LIMIT) |
---|
| 1282 | + return -ENOSPC; |
---|
| 1283 | + |
---|
| 1284 | + memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule)); |
---|
| 1285 | + |
---|
| 1286 | + internal_dev = flow->pre_tun_rule.dev; |
---|
| 1287 | + payload.vlan_tci = flow->pre_tun_rule.vlan_tci; |
---|
| 1288 | + payload.host_ctx_id = flow->meta.host_ctx_id; |
---|
| 1289 | + |
---|
| 1290 | + /* Lookup MAC index for the pre-tunnel rule egress device. |
---|
| 1291 | + * Note that because the device is always an internal port, it will |
---|
| 1292 | + * have a constant global index so does not need to be tracked. |
---|
| 1293 | + */ |
---|
| 1294 | + mac_entry = nfp_tunnel_lookup_offloaded_macs(app, |
---|
| 1295 | + internal_dev->dev_addr); |
---|
| 1296 | + if (!mac_entry) |
---|
| 1297 | + return -ENOENT; |
---|
| 1298 | + |
---|
| 1299 | + /* Set/clear IPV6 bit. cpu_to_be16() swap will lead to MSB being |
---|
| 1300 | + * set/clear for port_idx. |
---|
| 1301 | + */ |
---|
| 1302 | + key_meta = (struct nfp_flower_meta_tci *)flow->unmasked_data; |
---|
| 1303 | + if (key_meta->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV6) |
---|
| 1304 | + mac_entry->index |= NFP_TUN_PRE_TUN_IPV6_BIT; |
---|
| 1305 | + else |
---|
| 1306 | + mac_entry->index &= ~NFP_TUN_PRE_TUN_IPV6_BIT; |
---|
| 1307 | + |
---|
| 1308 | + payload.port_idx = cpu_to_be16(mac_entry->index); |
---|
| 1309 | + |
---|
| 1310 | + /* Copy mac id and vlan to flow - dev may not exist at delete time. */ |
---|
| 1311 | + flow->pre_tun_rule.vlan_tci = payload.vlan_tci; |
---|
| 1312 | + flow->pre_tun_rule.port_idx = payload.port_idx; |
---|
| 1313 | + |
---|
| 1314 | + err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE, |
---|
| 1315 | + sizeof(struct nfp_tun_pre_tun_rule), |
---|
| 1316 | + (unsigned char *)&payload, GFP_KERNEL); |
---|
| 1317 | + if (err) |
---|
| 1318 | + return err; |
---|
| 1319 | + |
---|
| 1320 | + app_priv->pre_tun_rule_cnt++; |
---|
| 1321 | + |
---|
| 1322 | + return 0; |
---|
| 1323 | +} |
---|
| 1324 | + |
---|
| 1325 | +int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app, |
---|
| 1326 | + struct nfp_fl_payload *flow) |
---|
| 1327 | +{ |
---|
| 1328 | + struct nfp_flower_priv *app_priv = app->priv; |
---|
| 1329 | + struct nfp_tun_pre_tun_rule payload; |
---|
| 1330 | + u32 tmp_flags = 0; |
---|
| 1331 | + int err; |
---|
| 1332 | + |
---|
| 1333 | + memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule)); |
---|
| 1334 | + |
---|
| 1335 | + tmp_flags |= NFP_TUN_PRE_TUN_RULE_DEL; |
---|
| 1336 | + payload.flags = cpu_to_be32(tmp_flags); |
---|
| 1337 | + payload.vlan_tci = flow->pre_tun_rule.vlan_tci; |
---|
| 1338 | + payload.port_idx = flow->pre_tun_rule.port_idx; |
---|
| 1339 | + |
---|
| 1340 | + err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE, |
---|
| 1341 | + sizeof(struct nfp_tun_pre_tun_rule), |
---|
| 1342 | + (unsigned char *)&payload, GFP_KERNEL); |
---|
| 1343 | + if (err) |
---|
| 1344 | + return err; |
---|
| 1345 | + |
---|
| 1346 | + app_priv->pre_tun_rule_cnt--; |
---|
| 1347 | + |
---|
| 1348 | + return 0; |
---|
718 | 1349 | } |
---|
719 | 1350 | |
---|
720 | 1351 | int nfp_tunnel_config_start(struct nfp_app *app) |
---|
721 | 1352 | { |
---|
722 | 1353 | struct nfp_flower_priv *priv = app->priv; |
---|
723 | | - struct net_device *netdev; |
---|
724 | 1354 | int err; |
---|
725 | 1355 | |
---|
726 | | - /* Initialise priv data for MAC offloading. */ |
---|
727 | | - priv->nfp_mac_off_count = 0; |
---|
728 | | - mutex_init(&priv->nfp_mac_off_lock); |
---|
729 | | - INIT_LIST_HEAD(&priv->nfp_mac_off_list); |
---|
730 | | - priv->nfp_tun_mac_nb.notifier_call = nfp_tun_mac_event_handler; |
---|
731 | | - mutex_init(&priv->nfp_mac_index_lock); |
---|
732 | | - INIT_LIST_HEAD(&priv->nfp_mac_index_list); |
---|
733 | | - ida_init(&priv->nfp_mac_off_ids); |
---|
| 1356 | + /* Initialise rhash for MAC offload tracking. */ |
---|
| 1357 | + err = rhashtable_init(&priv->tun.offloaded_macs, |
---|
| 1358 | + &offloaded_macs_params); |
---|
| 1359 | + if (err) |
---|
| 1360 | + return err; |
---|
734 | 1361 | |
---|
735 | | - /* Initialise priv data for IPv4 offloading. */ |
---|
736 | | - mutex_init(&priv->nfp_ipv4_off_lock); |
---|
737 | | - INIT_LIST_HEAD(&priv->nfp_ipv4_off_list); |
---|
| 1362 | + ida_init(&priv->tun.mac_off_ids); |
---|
| 1363 | + |
---|
| 1364 | + /* Initialise priv data for IPv4/v6 offloading. */ |
---|
| 1365 | + mutex_init(&priv->tun.ipv4_off_lock); |
---|
| 1366 | + INIT_LIST_HEAD(&priv->tun.ipv4_off_list); |
---|
| 1367 | + mutex_init(&priv->tun.ipv6_off_lock); |
---|
| 1368 | + INIT_LIST_HEAD(&priv->tun.ipv6_off_list); |
---|
738 | 1369 | |
---|
739 | 1370 | /* Initialise priv data for neighbour offloading. */ |
---|
740 | | - spin_lock_init(&priv->nfp_neigh_off_lock); |
---|
741 | | - INIT_LIST_HEAD(&priv->nfp_neigh_off_list); |
---|
742 | | - priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler; |
---|
| 1371 | + spin_lock_init(&priv->tun.neigh_off_lock_v4); |
---|
| 1372 | + INIT_LIST_HEAD(&priv->tun.neigh_off_list_v4); |
---|
| 1373 | + spin_lock_init(&priv->tun.neigh_off_lock_v6); |
---|
| 1374 | + INIT_LIST_HEAD(&priv->tun.neigh_off_list_v6); |
---|
| 1375 | + priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler; |
---|
743 | 1376 | |
---|
744 | | - err = register_netdevice_notifier(&priv->nfp_tun_mac_nb); |
---|
745 | | - if (err) |
---|
746 | | - goto err_free_mac_ida; |
---|
747 | | - |
---|
748 | | - err = register_netevent_notifier(&priv->nfp_tun_neigh_nb); |
---|
749 | | - if (err) |
---|
750 | | - goto err_unreg_mac_nb; |
---|
751 | | - |
---|
752 | | - /* Parse netdevs already registered for MACs that need offloaded. */ |
---|
753 | | - rtnl_lock(); |
---|
754 | | - for_each_netdev(&init_net, netdev) |
---|
755 | | - nfp_tun_add_to_mac_offload_list(netdev, app); |
---|
756 | | - rtnl_unlock(); |
---|
| 1377 | + err = register_netevent_notifier(&priv->tun.neigh_nb); |
---|
| 1378 | + if (err) { |
---|
| 1379 | + rhashtable_free_and_destroy(&priv->tun.offloaded_macs, |
---|
| 1380 | + nfp_check_rhashtable_empty, NULL); |
---|
| 1381 | + return err; |
---|
| 1382 | + } |
---|
757 | 1383 | |
---|
758 | 1384 | return 0; |
---|
759 | | - |
---|
760 | | -err_unreg_mac_nb: |
---|
761 | | - unregister_netdevice_notifier(&priv->nfp_tun_mac_nb); |
---|
762 | | -err_free_mac_ida: |
---|
763 | | - ida_destroy(&priv->nfp_mac_off_ids); |
---|
764 | | - return err; |
---|
765 | 1385 | } |
---|
766 | 1386 | |
---|
767 | 1387 | void nfp_tunnel_config_stop(struct nfp_app *app) |
---|
768 | 1388 | { |
---|
769 | | - struct nfp_tun_mac_offload_entry *mac_entry; |
---|
| 1389 | + struct nfp_offloaded_route *route_entry, *temp; |
---|
770 | 1390 | struct nfp_flower_priv *priv = app->priv; |
---|
771 | | - struct nfp_ipv4_route_entry *route_entry; |
---|
772 | | - struct nfp_tun_mac_non_nfp_idx *mac_idx; |
---|
773 | 1391 | struct nfp_ipv4_addr_entry *ip_entry; |
---|
| 1392 | + struct nfp_tun_neigh_v6 ipv6_route; |
---|
| 1393 | + struct nfp_tun_neigh ipv4_route; |
---|
774 | 1394 | struct list_head *ptr, *storage; |
---|
775 | 1395 | |
---|
776 | | - unregister_netdevice_notifier(&priv->nfp_tun_mac_nb); |
---|
777 | | - unregister_netevent_notifier(&priv->nfp_tun_neigh_nb); |
---|
| 1396 | + unregister_netevent_notifier(&priv->tun.neigh_nb); |
---|
778 | 1397 | |
---|
779 | | - /* Free any memory that may be occupied by MAC list. */ |
---|
780 | | - list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) { |
---|
781 | | - mac_entry = list_entry(ptr, struct nfp_tun_mac_offload_entry, |
---|
782 | | - list); |
---|
783 | | - list_del(&mac_entry->list); |
---|
784 | | - kfree(mac_entry); |
---|
785 | | - } |
---|
786 | | - |
---|
787 | | - /* Free any memory that may be occupied by MAC index list. */ |
---|
788 | | - list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) { |
---|
789 | | - mac_idx = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, |
---|
790 | | - list); |
---|
791 | | - list_del(&mac_idx->list); |
---|
792 | | - kfree(mac_idx); |
---|
793 | | - } |
---|
794 | | - |
---|
795 | | - ida_destroy(&priv->nfp_mac_off_ids); |
---|
| 1398 | + ida_destroy(&priv->tun.mac_off_ids); |
---|
796 | 1399 | |
---|
797 | 1400 | /* Free any memory that may be occupied by ipv4 list. */ |
---|
798 | | - list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) { |
---|
| 1401 | + list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { |
---|
799 | 1402 | ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); |
---|
800 | 1403 | list_del(&ip_entry->list); |
---|
801 | 1404 | kfree(ip_entry); |
---|
802 | 1405 | } |
---|
803 | 1406 | |
---|
804 | | - /* Free any memory that may be occupied by the route list. */ |
---|
805 | | - list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) { |
---|
806 | | - route_entry = list_entry(ptr, struct nfp_ipv4_route_entry, |
---|
807 | | - list); |
---|
| 1407 | + mutex_destroy(&priv->tun.ipv6_off_lock); |
---|
| 1408 | + |
---|
| 1409 | + /* Free memory in the route list and remove entries from fw cache. */ |
---|
| 1410 | + list_for_each_entry_safe(route_entry, temp, |
---|
| 1411 | + &priv->tun.neigh_off_list_v4, list) { |
---|
| 1412 | + memset(&ipv4_route, 0, sizeof(ipv4_route)); |
---|
| 1413 | + memcpy(&ipv4_route.dst_ipv4, &route_entry->ip_add, |
---|
| 1414 | + sizeof(ipv4_route.dst_ipv4)); |
---|
808 | 1415 | list_del(&route_entry->list); |
---|
809 | 1416 | kfree(route_entry); |
---|
| 1417 | + |
---|
| 1418 | + nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH, |
---|
| 1419 | + sizeof(struct nfp_tun_neigh), |
---|
| 1420 | + (unsigned char *)&ipv4_route, |
---|
| 1421 | + GFP_KERNEL); |
---|
810 | 1422 | } |
---|
| 1423 | + |
---|
| 1424 | + list_for_each_entry_safe(route_entry, temp, |
---|
| 1425 | + &priv->tun.neigh_off_list_v6, list) { |
---|
| 1426 | + memset(&ipv6_route, 0, sizeof(ipv6_route)); |
---|
| 1427 | + memcpy(&ipv6_route.dst_ipv6, &route_entry->ip_add, |
---|
| 1428 | + sizeof(ipv6_route.dst_ipv6)); |
---|
| 1429 | + list_del(&route_entry->list); |
---|
| 1430 | + kfree(route_entry); |
---|
| 1431 | + |
---|
| 1432 | + nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6, |
---|
| 1433 | + sizeof(struct nfp_tun_neigh), |
---|
| 1434 | + (unsigned char *)&ipv6_route, |
---|
| 1435 | + GFP_KERNEL); |
---|
| 1436 | + } |
---|
| 1437 | + |
---|
| 1438 | + /* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */ |
---|
| 1439 | + rhashtable_free_and_destroy(&priv->tun.offloaded_macs, |
---|
| 1440 | + nfp_check_rhashtable_empty, NULL); |
---|
811 | 1441 | } |
---|