hc
2024-05-10 ee930fffee469d076998274a2ca55e13dc1efb67
kernel/include/net/ip_tunnels.h
....@@ -33,8 +33,8 @@
3333 /* Used to memset ipv4 address padding. */
3434 #define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst)
3535 #define IP_TUNNEL_KEY_IPV4_PAD_LEN \
36
- (FIELD_SIZEOF(struct ip_tunnel_key, u) - \
37
- FIELD_SIZEOF(struct ip_tunnel_key, u.ipv4))
36
+ (sizeof_field(struct ip_tunnel_key, u) - \
37
+ sizeof_field(struct ip_tunnel_key, u.ipv4))
3838
3939 struct ip_tunnel_key {
4040 __be64 tun_id;
....@@ -63,7 +63,7 @@
6363
6464 /* Maximum tunnel options length. */
6565 #define IP_TUNNEL_OPTS_MAX \
66
- GENMASK((FIELD_SIZEOF(struct ip_tunnel_info, \
66
+ GENMASK((sizeof_field(struct ip_tunnel_info, \
6767 options_len) * BITS_PER_BYTE) - 1, 0)
6868
6969 struct ip_tunnel_info {
....@@ -113,7 +113,7 @@
113113
114114 /* These four fields used only by GRE */
115115 u32 i_seqno; /* The last seen seqno */
116
- u32 o_seqno; /* The last output seqno */
116
+ atomic_t o_seqno; /* The last output seqno */
117117 int tun_hlen; /* Precalculated header length */
118118
119119 /* These four fields used only by ERSPAN */
....@@ -143,25 +143,6 @@
143143 bool collect_md;
144144 bool ignore_df;
145145 };
146
-
147
-#define TUNNEL_CSUM __cpu_to_be16(0x01)
148
-#define TUNNEL_ROUTING __cpu_to_be16(0x02)
149
-#define TUNNEL_KEY __cpu_to_be16(0x04)
150
-#define TUNNEL_SEQ __cpu_to_be16(0x08)
151
-#define TUNNEL_STRICT __cpu_to_be16(0x10)
152
-#define TUNNEL_REC __cpu_to_be16(0x20)
153
-#define TUNNEL_VERSION __cpu_to_be16(0x40)
154
-#define TUNNEL_NO_KEY __cpu_to_be16(0x80)
155
-#define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
156
-#define TUNNEL_OAM __cpu_to_be16(0x0200)
157
-#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
158
-#define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800)
159
-#define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
160
-#define TUNNEL_NOCACHE __cpu_to_be16(0x2000)
161
-#define TUNNEL_ERSPAN_OPT __cpu_to_be16(0x4000)
162
-
163
-#define TUNNEL_OPTIONS_PRESENT \
164
- (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT)
165146
166147 struct tnl_ptk_info {
167148 __be16 flags;
....@@ -260,7 +241,7 @@
260241 int proto,
261242 __be32 daddr, __be32 saddr,
262243 __be32 key, __u8 tos, int oif,
263
- __u32 mark)
244
+ __u32 mark, __u32 tun_inner_hash)
264245 {
265246 memset(fl4, 0, sizeof(*fl4));
266247 fl4->flowi4_oif = oif;
....@@ -270,6 +251,7 @@
270251 fl4->flowi4_proto = proto;
271252 fl4->fl4_gre_key = key;
272253 fl4->flowi4_mark = mark;
254
+ fl4->flowi4_multipath_hash = tun_inner_hash;
273255 }
274256
275257 int ip_tunnel_init(struct net_device *dev);
....@@ -286,8 +268,9 @@
286268 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
287269 const struct iphdr *tnl_params, const u8 protocol);
288270 void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
289
- const u8 proto);
290
-int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
271
+ const u8 proto, int tunnel_hlen);
272
+int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
273
+int ip_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
291274 int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
292275 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
293276
....@@ -307,12 +290,14 @@
307290 struct ip_tunnel_parm *p, __u32 fwmark);
308291 void ip_tunnel_setup(struct net_device *dev, unsigned int net_id);
309292
293
+extern const struct header_ops ip_tunnel_header_ops;
310294 __be16 ip_tunnel_parse_protocol(const struct sk_buff *skb);
311295
312296 struct ip_tunnel_encap_ops {
313297 size_t (*encap_hlen)(struct ip_tunnel_encap *e);
314298 int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
315299 u8 *protocol, struct flowi4 *fl4);
300
+ int (*err_handler)(struct sk_buff *skb, u32 info);
316301 };
317302
318303 #define MAX_IPTUN_ENCAP_OPS 8
....@@ -393,9 +378,11 @@
393378 static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
394379 const struct sk_buff *skb)
395380 {
396
- if (skb->protocol == htons(ETH_P_IP))
381
+ __be16 payload_protocol = skb_protocol(skb, true);
382
+
383
+ if (payload_protocol == htons(ETH_P_IP))
397384 return iph->tos;
398
- else if (skb->protocol == htons(ETH_P_IPV6))
385
+ else if (payload_protocol == htons(ETH_P_IPV6))
399386 return ipv6_get_dsfield((const struct ipv6hdr *)iph);
400387 else
401388 return 0;
....@@ -404,9 +391,11 @@
404391 static inline u8 ip_tunnel_get_ttl(const struct iphdr *iph,
405392 const struct sk_buff *skb)
406393 {
407
- if (skb->protocol == htons(ETH_P_IP))
394
+ __be16 payload_protocol = skb_protocol(skb, true);
395
+
396
+ if (payload_protocol == htons(ETH_P_IP))
408397 return iph->ttl;
409
- else if (skb->protocol == htons(ETH_P_IPV6))
398
+ else if (payload_protocol == htons(ETH_P_IPV6))
410399 return ((const struct ipv6hdr *)iph)->hop_limit;
411400 else
412401 return 0;
....@@ -435,6 +424,8 @@
435424 u8 tos, u8 ttl, __be16 df, bool xnet);
436425 struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
437426 gfp_t flags);
427
+int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst,
428
+ int headroom, bool reply);
438429
439430 int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
440431