| .. | .. |
|---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Definitions for the 'struct sk_buff' memory handlers. |
|---|
| 3 | 4 | * |
|---|
| 4 | 5 | * Authors: |
|---|
| 5 | 6 | * Alan Cox, <gw4pts@gw4pts.ampr.org> |
|---|
| 6 | 7 | * Florian La Roche, <rzsfl@rz.uni-sb.de> |
|---|
| 7 | | - * |
|---|
| 8 | | - * This program is free software; you can redistribute it and/or |
|---|
| 9 | | - * modify it under the terms of the GNU General Public License |
|---|
| 10 | | - * as published by the Free Software Foundation; either version |
|---|
| 11 | | - * 2 of the License, or (at your option) any later version. |
|---|
| 12 | 8 | */ |
|---|
| 13 | 9 | |
|---|
| 14 | 10 | #ifndef _LINUX_SKBUFF_H |
|---|
| .. | .. |
|---|
| 18 | 14 | #include <linux/compiler.h> |
|---|
| 19 | 15 | #include <linux/time.h> |
|---|
| 20 | 16 | #include <linux/bug.h> |
|---|
| 17 | +#include <linux/bvec.h> |
|---|
| 21 | 18 | #include <linux/cache.h> |
|---|
| 22 | 19 | #include <linux/rbtree.h> |
|---|
| 23 | 20 | #include <linux/socket.h> |
|---|
| .. | .. |
|---|
| 40 | 37 | #include <linux/in6.h> |
|---|
| 41 | 38 | #include <linux/if_packet.h> |
|---|
| 42 | 39 | #include <net/flow.h> |
|---|
| 40 | +#if IS_ENABLED(CONFIG_NF_CONNTRACK) |
|---|
| 41 | +#include <linux/netfilter/nf_conntrack_common.h> |
|---|
| 42 | +#endif |
|---|
| 43 | +#include <linux/android_kabi.h> |
|---|
| 44 | +#include <linux/android_vendor.h> |
|---|
| 43 | 45 | |
|---|
| 44 | 46 | /* The interface for checksum offload between the stack and networking drivers |
|---|
| 45 | 47 | * is as follows... |
|---|
| .. | .. |
|---|
| 47 | 49 | * A. IP checksum related features |
|---|
| 48 | 50 | * |
|---|
| 49 | 51 | * Drivers advertise checksum offload capabilities in the features of a device. |
|---|
| 50 | | - * From the stack's point of view these are capabilities offered by the driver, |
|---|
| 51 | | - * a driver typically only advertises features that it is capable of offloading |
|---|
| 52 | + * From the stack's point of view these are capabilities offered by the driver. |
|---|
| 53 | + * A driver typically only advertises features that it is capable of offloading |
|---|
| 52 | 54 | * to its device. |
|---|
| 53 | 55 | * |
|---|
| 54 | 56 | * The checksum related features are: |
|---|
| .. | .. |
|---|
| 63 | 65 | * TCP or UDP packets over IPv4. These are specifically |
|---|
| 64 | 66 | * unencapsulated packets of the form IPv4|TCP or |
|---|
| 65 | 67 | * IPv4|UDP where the Protocol field in the IPv4 header |
|---|
| 66 | | - * is TCP or UDP. The IPv4 header may contain IP options |
|---|
| 68 | + * is TCP or UDP. The IPv4 header may contain IP options. |
|---|
| 67 | 69 | * This feature cannot be set in features for a device |
|---|
| 68 | 70 | * with NETIF_F_HW_CSUM also set. This feature is being |
|---|
| 69 | 71 | * DEPRECATED (see below). |
|---|
| .. | .. |
|---|
| 71 | 73 | * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain |
|---|
| 72 | 74 | * TCP or UDP packets over IPv6. These are specifically |
|---|
| 73 | 75 | * unencapsulated packets of the form IPv6|TCP or |
|---|
| 74 | | - * IPv4|UDP where the Next Header field in the IPv6 |
|---|
| 76 | + * IPv6|UDP where the Next Header field in the IPv6 |
|---|
| 75 | 77 | * header is either TCP or UDP. IPv6 extension headers |
|---|
| 76 | 78 | * are not supported with this feature. This feature |
|---|
| 77 | 79 | * cannot be set in features for a device with |
|---|
| .. | .. |
|---|
| 79 | 81 | * DEPRECATED (see below). |
|---|
| 80 | 82 | * |
|---|
| 81 | 83 | * NETIF_F_RXCSUM - Driver (device) performs receive checksum offload. |
|---|
| 82 | | - * This flag is used only used to disable the RX checksum |
|---|
| 84 | + * This flag is only used to disable the RX checksum |
|---|
| 83 | 85 | * feature for a device. The stack will accept receive |
|---|
| 84 | 86 | * checksum indication in packets received on a device |
|---|
| 85 | 87 | * regardless of whether NETIF_F_RXCSUM is set. |
|---|
| 86 | 88 | * |
|---|
| 87 | 89 | * B. Checksumming of received packets by device. Indication of checksum |
|---|
| 88 | | - * verification is in set skb->ip_summed. Possible values are: |
|---|
| 90 | + * verification is set in skb->ip_summed. Possible values are: |
|---|
| 89 | 91 | * |
|---|
| 90 | 92 | * CHECKSUM_NONE: |
|---|
| 91 | 93 | * |
|---|
| .. | .. |
|---|
| 115 | 117 | * the packet minus one that have been verified as CHECKSUM_UNNECESSARY. |
|---|
| 116 | 118 | * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet |
|---|
| 117 | 119 | * and a device is able to verify the checksums for UDP (possibly zero), |
|---|
| 118 | | - * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to |
|---|
| 120 | + * GRE (checksum flag is set) and TCP, skb->csum_level would be set to |
|---|
| 119 | 121 | * two. If the device were only able to verify the UDP checksum and not |
|---|
| 120 | | - * GRE, either because it doesn't support GRE checksum of because GRE |
|---|
| 122 | + * GRE, either because it doesn't support GRE checksum or because GRE |
|---|
| 121 | 123 | * checksum is bad, skb->csum_level would be set to zero (TCP checksum is |
|---|
| 122 | 124 | * not considered in this case). |
|---|
| 123 | 125 | * |
|---|
| 124 | 126 | * CHECKSUM_COMPLETE: |
|---|
| 125 | 127 | * |
|---|
| 126 | 128 | * This is the most generic way. The device supplied checksum of the _whole_ |
|---|
| 127 | | - * packet as seen by netif_rx() and fills out in skb->csum. Meaning, the |
|---|
| 129 | + * packet as seen by netif_rx() and fills in skb->csum. This means the |
|---|
| 128 | 130 | * hardware doesn't need to parse L3/L4 headers to implement this. |
|---|
| 129 | 131 | * |
|---|
| 130 | 132 | * Notes: |
|---|
| .. | .. |
|---|
| 153 | 155 | * from skb->csum_start up to the end, and to record/write the checksum at |
|---|
| 154 | 156 | * offset skb->csum_start + skb->csum_offset. A driver may verify that the |
|---|
| 155 | 157 | * csum_start and csum_offset values are valid values given the length and |
|---|
| 156 | | - * offset of the packet, however they should not attempt to validate that the |
|---|
| 157 | | - * checksum refers to a legitimate transport layer checksum-- it is the |
|---|
| 158 | + * offset of the packet, but it should not attempt to validate that the |
|---|
| 159 | + * checksum refers to a legitimate transport layer checksum -- it is the |
|---|
| 158 | 160 | * purview of the stack to validate that csum_start and csum_offset are set |
|---|
| 159 | 161 | * correctly. |
|---|
| 160 | 162 | * |
|---|
| .. | .. |
|---|
| 178 | 180 | * |
|---|
| 179 | 181 | * CHECKSUM_UNNECESSARY: |
|---|
| 180 | 182 | * |
|---|
| 181 | | - * This has the same meaning on as CHECKSUM_NONE for checksum offload on |
|---|
| 183 | + * This has the same meaning as CHECKSUM_NONE for checksum offload on |
|---|
| 182 | 184 | * output. |
|---|
| 183 | 185 | * |
|---|
| 184 | 186 | * CHECKSUM_COMPLETE: |
|---|
| 185 | 187 | * Not used in checksum output. If a driver observes a packet with this value |
|---|
| 186 | | - * set in skbuff, if should treat as CHECKSUM_NONE being set. |
|---|
| 188 | + * set in skbuff, it should treat the packet as if CHECKSUM_NONE were set. |
|---|
| 187 | 189 | * |
|---|
| 188 | 190 | * D. Non-IP checksum (CRC) offloads |
|---|
| 189 | 191 | * |
|---|
| 190 | 192 | * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of |
|---|
| 191 | 193 | * offloading the SCTP CRC in a packet. To perform this offload the stack |
|---|
| 192 | | - * will set set csum_start and csum_offset accordingly, set ip_summed to |
|---|
| 194 | + * will set csum_start and csum_offset accordingly, set ip_summed to |
|---|
| 193 | 195 | * CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication in |
|---|
| 194 | 196 | * the skbuff that the CHECKSUM_PARTIAL refers to CRC32c. |
|---|
| 195 | 197 | * A driver that supports both IP checksum offload and SCTP CRC32c offload |
|---|
| .. | .. |
|---|
| 200 | 202 | * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of |
|---|
| 201 | 203 | * offloading the FCOE CRC in a packet. To perform this offload the stack |
|---|
| 202 | 204 | * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset |
|---|
| 203 | | - * accordingly. Note the there is no indication in the skbuff that the |
|---|
| 204 | | - * CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports |
|---|
| 205 | + * accordingly. Note that there is no indication in the skbuff that the |
|---|
| 206 | + * CHECKSUM_PARTIAL refers to an FCOE checksum, so a driver that supports |
|---|
| 205 | 207 | * both IP checksum offload and FCOE CRC offload must verify which offload |
|---|
| 206 | | - * is configured for a packet presumably by inspecting packet headers. |
|---|
| 208 | + * is configured for a packet, presumably by inspecting packet headers. |
|---|
| 207 | 209 | * |
|---|
| 208 | 210 | * E. Checksumming on output with GSO. |
|---|
| 209 | 211 | * |
|---|
| .. | .. |
|---|
| 211 | 213 | * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the |
|---|
| 212 | 214 | * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as |
|---|
| 213 | 215 | * part of the GSO operation is implied. If a checksum is being offloaded |
|---|
| 214 | | - * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset |
|---|
| 215 | | - * are set to refer to the outermost checksum being offload (two offloaded |
|---|
| 216 | | - * checksums are possible with UDP encapsulation). |
|---|
| 216 | + * with GSO then ip_summed is CHECKSUM_PARTIAL, and both csum_start and |
|---|
| 217 | + * csum_offset are set to refer to the outermost checksum being offloaded |
|---|
| 218 | + * (two offloaded checksums are possible with UDP encapsulation). |
|---|
| 217 | 219 | */ |
|---|
| 218 | 220 | |
|---|
| 219 | 221 | /* Don't change this without changing skb_csum_unnecessary! */ |
|---|
| .. | .. |
|---|
| 238 | 240 | SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \ |
|---|
| 239 | 241 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) |
|---|
| 240 | 242 | |
|---|
| 243 | +struct ahash_request; |
|---|
| 241 | 244 | struct net_device; |
|---|
| 242 | 245 | struct scatterlist; |
|---|
| 243 | 246 | struct pipe_inode_info; |
|---|
| 244 | 247 | struct iov_iter; |
|---|
| 245 | 248 | struct napi_struct; |
|---|
| 249 | +struct bpf_prog; |
|---|
| 250 | +union bpf_attr; |
|---|
| 251 | +struct skb_ext; |
|---|
| 246 | 252 | |
|---|
| 247 | | -#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
|---|
| 248 | | -struct nf_conntrack { |
|---|
| 249 | | - atomic_t use; |
|---|
| 250 | | -}; |
|---|
| 251 | | -#endif |
|---|
| 252 | | -#include <linux/android_kabi.h> |
|---|
| 253 | | - |
|---|
| 253 | +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
|---|
| 254 | 254 | struct nf_bridge_info { |
|---|
| 255 | | - refcount_t use; |
|---|
| 256 | 255 | enum { |
|---|
| 257 | 256 | BRNF_PROTO_UNCHANGED, |
|---|
| 258 | 257 | BRNF_PROTO_8021Q, |
|---|
| .. | .. |
|---|
| 261 | 260 | u8 pkt_otherhost:1; |
|---|
| 262 | 261 | u8 in_prerouting:1; |
|---|
| 263 | 262 | u8 bridged_dnat:1; |
|---|
| 263 | + u8 sabotage_in_done:1; |
|---|
| 264 | 264 | __u16 frag_max_size; |
|---|
| 265 | 265 | struct net_device *physindev; |
|---|
| 266 | 266 | |
|---|
| .. | .. |
|---|
| 278 | 278 | char neigh_header[8]; |
|---|
| 279 | 279 | }; |
|---|
| 280 | 280 | }; |
|---|
| 281 | +#endif |
|---|
| 282 | + |
|---|
| 283 | +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) |
|---|
| 284 | +/* Chain in tc_skb_ext will be used to share the tc chain with |
|---|
| 285 | + * ovs recirc_id. It will be set to the current chain by tc |
|---|
| 286 | + * and read by ovs to recirc_id. |
|---|
| 287 | + */ |
|---|
| 288 | +struct tc_skb_ext { |
|---|
| 289 | + __u32 chain; |
|---|
| 290 | + __u16 mru; |
|---|
| 291 | +}; |
|---|
| 292 | +#endif |
|---|
| 281 | 293 | |
|---|
| 282 | 294 | struct sk_buff_head { |
|---|
| 283 | 295 | /* These two members must be first. */ |
|---|
| .. | .. |
|---|
| 309 | 321 | */ |
|---|
| 310 | 322 | #define GSO_BY_FRAGS 0xFFFF |
|---|
| 311 | 323 | |
|---|
| 312 | | -typedef struct skb_frag_struct skb_frag_t; |
|---|
| 324 | +typedef struct bio_vec skb_frag_t; |
|---|
| 313 | 325 | |
|---|
| 314 | | -struct skb_frag_struct { |
|---|
| 315 | | - struct { |
|---|
| 316 | | - struct page *p; |
|---|
| 317 | | - } page; |
|---|
| 318 | | -#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) |
|---|
| 319 | | - __u32 page_offset; |
|---|
| 320 | | - __u32 size; |
|---|
| 321 | | -#else |
|---|
| 322 | | - __u16 page_offset; |
|---|
| 323 | | - __u16 size; |
|---|
| 324 | | -#endif |
|---|
| 325 | | -}; |
|---|
| 326 | | - |
|---|
| 326 | +/** |
|---|
| 327 | + * skb_frag_size() - Returns the size of a skb fragment |
|---|
| 328 | + * @frag: skb fragment |
|---|
| 329 | + */ |
|---|
| 327 | 330 | static inline unsigned int skb_frag_size(const skb_frag_t *frag) |
|---|
| 328 | 331 | { |
|---|
| 329 | | - return frag->size; |
|---|
| 332 | + return frag->bv_len; |
|---|
| 330 | 333 | } |
|---|
| 331 | 334 | |
|---|
| 335 | +/** |
|---|
| 336 | + * skb_frag_size_set() - Sets the size of a skb fragment |
|---|
| 337 | + * @frag: skb fragment |
|---|
| 338 | + * @size: size of fragment |
|---|
| 339 | + */ |
|---|
| 332 | 340 | static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size) |
|---|
| 333 | 341 | { |
|---|
| 334 | | - frag->size = size; |
|---|
| 342 | + frag->bv_len = size; |
|---|
| 335 | 343 | } |
|---|
| 336 | 344 | |
|---|
| 345 | +/** |
|---|
| 346 | + * skb_frag_size_add() - Increments the size of a skb fragment by @delta |
|---|
| 347 | + * @frag: skb fragment |
|---|
| 348 | + * @delta: value to add |
|---|
| 349 | + */ |
|---|
| 337 | 350 | static inline void skb_frag_size_add(skb_frag_t *frag, int delta) |
|---|
| 338 | 351 | { |
|---|
| 339 | | - frag->size += delta; |
|---|
| 352 | + frag->bv_len += delta; |
|---|
| 340 | 353 | } |
|---|
| 341 | 354 | |
|---|
| 355 | +/** |
|---|
| 356 | + * skb_frag_size_sub() - Decrements the size of a skb fragment by @delta |
|---|
| 357 | + * @frag: skb fragment |
|---|
| 358 | + * @delta: value to subtract |
|---|
| 359 | + */ |
|---|
| 342 | 360 | static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) |
|---|
| 343 | 361 | { |
|---|
| 344 | | - frag->size -= delta; |
|---|
| 362 | + frag->bv_len -= delta; |
|---|
| 345 | 363 | } |
|---|
| 346 | 364 | |
|---|
| 365 | +/** |
|---|
| 366 | + * skb_frag_must_loop - Test if %p is a high memory page |
|---|
| 367 | + * @p: fragment's page |
|---|
| 368 | + */ |
|---|
| 347 | 369 | static inline bool skb_frag_must_loop(struct page *p) |
|---|
| 348 | 370 | { |
|---|
| 349 | 371 | #if defined(CONFIG_HIGHMEM) |
|---|
| .. | .. |
|---|
| 357 | 379 | * skb_frag_foreach_page - loop over pages in a fragment |
|---|
| 358 | 380 | * |
|---|
| 359 | 381 | * @f: skb frag to operate on |
|---|
| 360 | | - * @f_off: offset from start of f->page.p |
|---|
| 382 | + * @f_off: offset from start of f->bv_page |
|---|
| 361 | 383 | * @f_len: length from f_off to loop over |
|---|
| 362 | 384 | * @p: (temp var) current page |
|---|
| 363 | 385 | * @p_off: (temp var) offset from start of current page, |
|---|
| .. | .. |
|---|
| 478 | 500 | } |
|---|
| 479 | 501 | |
|---|
| 480 | 502 | void sock_zerocopy_put(struct ubuf_info *uarg); |
|---|
| 481 | | -void sock_zerocopy_put_abort(struct ubuf_info *uarg); |
|---|
| 503 | +void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref); |
|---|
| 482 | 504 | |
|---|
| 483 | 505 | void sock_zerocopy_callback(struct ubuf_info *uarg, bool success); |
|---|
| 484 | 506 | |
|---|
| 507 | +int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len); |
|---|
| 485 | 508 | int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, |
|---|
| 486 | 509 | struct msghdr *msg, int len, |
|---|
| 487 | 510 | struct ubuf_info *uarg); |
|---|
| .. | .. |
|---|
| 510 | 533 | /* Intermediate layers must ensure that destructor_arg |
|---|
| 511 | 534 | * remains valid until skb destructor */ |
|---|
| 512 | 535 | void * destructor_arg; |
|---|
| 536 | + |
|---|
| 537 | + ANDROID_OEM_DATA_ARRAY(1, 3); |
|---|
| 513 | 538 | |
|---|
| 514 | 539 | /* must be last field, see pskb_expand_head() */ |
|---|
| 515 | 540 | skb_frag_t frags[MAX_SKB_FRAGS]; |
|---|
| .. | .. |
|---|
| 574 | 599 | SKB_GSO_UDP = 1 << 16, |
|---|
| 575 | 600 | |
|---|
| 576 | 601 | SKB_GSO_UDP_L4 = 1 << 17, |
|---|
| 602 | + |
|---|
| 603 | + SKB_GSO_FRAGLIST = 1 << 18, |
|---|
| 577 | 604 | }; |
|---|
| 578 | 605 | |
|---|
| 579 | 606 | #if BITS_PER_LONG > 32 |
|---|
| .. | .. |
|---|
| 586 | 613 | typedef unsigned char *sk_buff_data_t; |
|---|
| 587 | 614 | #endif |
|---|
| 588 | 615 | |
|---|
| 589 | | -/** |
|---|
| 616 | +/** |
|---|
| 590 | 617 | * struct sk_buff - socket buffer |
|---|
| 591 | 618 | * @next: Next buffer in list |
|---|
| 592 | 619 | * @prev: Previous buffer in list |
|---|
| 593 | 620 | * @tstamp: Time we arrived/left |
|---|
| 621 | + * @skb_mstamp_ns: (aka @tstamp) earliest departure time; start point |
|---|
| 622 | + * for retransmit timer |
|---|
| 594 | 623 | * @rbnode: RB tree node, alternative to next/prev for netem/tcp |
|---|
| 624 | + * @list: queue head |
|---|
| 595 | 625 | * @sk: Socket we are owned by |
|---|
| 626 | + * @ip_defrag_offset: (aka @sk) alternate use of @sk, used in |
|---|
| 627 | + * fragmentation management |
|---|
| 596 | 628 | * @dev: Device we arrived on/are leaving by |
|---|
| 629 | + * @dev_scratch: (aka @dev) alternate use of @dev when @dev would be %NULL |
|---|
| 597 | 630 | * @cb: Control buffer. Free for use by every layer. Put private vars here |
|---|
| 598 | 631 | * @_skb_refdst: destination entry (with norefcount bit) |
|---|
| 599 | 632 | * @sp: the security path, used for xfrm |
|---|
| .. | .. |
|---|
| 612 | 645 | * @pkt_type: Packet class |
|---|
| 613 | 646 | * @fclone: skbuff clone status |
|---|
| 614 | 647 | * @ipvs_property: skbuff is owned by ipvs |
|---|
| 648 | + * @inner_protocol_type: whether the inner protocol is |
|---|
| 649 | + * ENCAP_TYPE_ETHER or ENCAP_TYPE_IPPROTO |
|---|
| 650 | + * @remcsum_offload: remote checksum offload is enabled |
|---|
| 651 | + * @offload_fwd_mark: Packet was L2-forwarded in hardware |
|---|
| 652 | + * @offload_l3_fwd_mark: Packet was L3-forwarded in hardware |
|---|
| 615 | 653 | * @tc_skip_classify: do not classify packet. set by IFB device |
|---|
| 616 | 654 | * @tc_at_ingress: used within tc_classify to distinguish in/egress |
|---|
| 617 | | - * @tc_redirected: packet was redirected by a tc action |
|---|
| 618 | | - * @tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect |
|---|
| 655 | + * @redirected: packet was redirected by packet classifier |
|---|
| 656 | + * @from_ingress: packet was redirected from the ingress path |
|---|
| 619 | 657 | * @peeked: this packet has been seen already, so stats have been |
|---|
| 620 | 658 | * done for it, don't do them again |
|---|
| 621 | 659 | * @nf_trace: netfilter packet trace flag |
|---|
| .. | .. |
|---|
| 628 | 666 | * @tc_index: Traffic control index |
|---|
| 629 | 667 | * @hash: the packet hash |
|---|
| 630 | 668 | * @queue_mapping: Queue mapping for multiqueue devices |
|---|
| 631 | | - * @xmit_more: More SKBs are pending for this queue |
|---|
| 669 | + * @head_frag: skb was allocated from page fragments, |
|---|
| 670 | + * not allocated by kmalloc() or vmalloc(). |
|---|
| 632 | 671 | * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves |
|---|
| 672 | + * @active_extensions: active extensions (skb_ext_id types) |
|---|
| 633 | 673 | * @ndisc_nodetype: router type (from link layer) |
|---|
| 634 | 674 | * @ooo_okay: allow the mapping of a socket to a queue to be changed |
|---|
| 635 | 675 | * @l4_hash: indicate hash is a canonical 4-tuple hash over transport |
|---|
| .. | .. |
|---|
| 638 | 678 | * @wifi_acked_valid: wifi_acked was set |
|---|
| 639 | 679 | * @wifi_acked: whether frame was acked on wifi or not |
|---|
| 640 | 680 | * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS |
|---|
| 681 | + * @encapsulation: indicates the inner headers in the skbuff are valid |
|---|
| 682 | + * @encap_hdr_csum: software checksum is needed |
|---|
| 683 | + * @csum_valid: checksum is already valid |
|---|
| 641 | 684 | * @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL |
|---|
| 685 | + * @csum_complete_sw: checksum was completed by software |
|---|
| 686 | + * @csum_level: indicates the number of consecutive checksums found in |
|---|
| 687 | + * the packet minus one that have been verified as |
|---|
| 688 | + * CHECKSUM_UNNECESSARY (max 3) |
|---|
| 689 | + * @scm_io_uring: SKB holds io_uring registered files |
|---|
| 642 | 690 | * @dst_pending_confirm: need to confirm neighbour |
|---|
| 643 | 691 | * @decrypted: Decrypted SKB |
|---|
| 644 | | - * @napi_id: id of the NAPI struct this skb came from |
|---|
| 692 | + * @napi_id: id of the NAPI struct this skb came from |
|---|
| 693 | + * @sender_cpu: (aka @napi_id) source CPU in XPS |
|---|
| 645 | 694 | * @secmark: security marking |
|---|
| 646 | 695 | * @mark: Generic packet mark |
|---|
| 696 | + * @reserved_tailroom: (aka @mark) number of bytes of free space available |
|---|
| 697 | + * at the tail of an sk_buff |
|---|
| 698 | + * @vlan_present: VLAN tag is present |
|---|
| 647 | 699 | * @vlan_proto: vlan encapsulation protocol |
|---|
| 648 | 700 | * @vlan_tci: vlan tag control information |
|---|
| 649 | 701 | * @inner_protocol: Protocol (encapsulation) |
|---|
| 702 | + * @inner_ipproto: (aka @inner_protocol) stores ipproto when |
|---|
| 703 | + * skb->inner_protocol_type == ENCAP_TYPE_IPPROTO; |
|---|
| 650 | 704 | * @inner_transport_header: Inner transport layer header (encapsulation) |
|---|
| 651 | 705 | * @inner_network_header: Network layer header (encapsulation) |
|---|
| 652 | 706 | * @inner_mac_header: Link layer header (encapsulation) |
|---|
| 653 | 707 | * @transport_header: Transport layer header |
|---|
| 654 | 708 | * @network_header: Network layer header |
|---|
| 655 | 709 | * @mac_header: Link layer header |
|---|
| 710 | + * @kcov_handle: KCOV remote handle for remote coverage collection |
|---|
| 656 | 711 | * @tail: Tail pointer |
|---|
| 657 | 712 | * @end: End pointer |
|---|
| 658 | 713 | * @head: Head of buffer |
|---|
| 659 | 714 | * @data: Data head pointer |
|---|
| 660 | 715 | * @truesize: Buffer size |
|---|
| 661 | 716 | * @users: User count - see {datagram,tcp}.c |
|---|
| 717 | + * @extensions: allocated extensions, valid if active_extensions is nonzero |
|---|
| 662 | 718 | */ |
|---|
| 663 | 719 | |
|---|
| 664 | 720 | struct sk_buff { |
|---|
| .. | .. |
|---|
| 688 | 744 | |
|---|
| 689 | 745 | union { |
|---|
| 690 | 746 | ktime_t tstamp; |
|---|
| 691 | | - u64 skb_mstamp; |
|---|
| 747 | + u64 skb_mstamp_ns; /* earliest departure time */ |
|---|
| 692 | 748 | }; |
|---|
| 693 | 749 | /* |
|---|
| 694 | 750 | * This is the control buffer. It is free to use for every |
|---|
| .. | .. |
|---|
| 706 | 762 | struct list_head tcp_tsorted_anchor; |
|---|
| 707 | 763 | }; |
|---|
| 708 | 764 | |
|---|
| 709 | | -#ifdef CONFIG_XFRM |
|---|
| 710 | | - struct sec_path *sp; |
|---|
| 711 | | -#endif |
|---|
| 712 | 765 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
|---|
| 713 | 766 | unsigned long _nfct; |
|---|
| 714 | 767 | #endif |
|---|
| 715 | | - struct nf_bridge_info *nf_bridge; |
|---|
| 716 | 768 | unsigned int len, |
|---|
| 717 | 769 | data_len; |
|---|
| 718 | 770 | __u16 mac_len, |
|---|
| .. | .. |
|---|
| 731 | 783 | #endif |
|---|
| 732 | 784 | #define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset) |
|---|
| 733 | 785 | |
|---|
| 786 | + /* private: */ |
|---|
| 734 | 787 | __u8 __cloned_offset[0]; |
|---|
| 788 | + /* public: */ |
|---|
| 735 | 789 | __u8 cloned:1, |
|---|
| 736 | 790 | nohdr:1, |
|---|
| 737 | 791 | fclone:2, |
|---|
| 738 | 792 | peeked:1, |
|---|
| 739 | 793 | head_frag:1, |
|---|
| 740 | | - xmit_more:1, |
|---|
| 741 | 794 | pfmemalloc:1; |
|---|
| 742 | | - |
|---|
| 795 | +#ifdef CONFIG_SKB_EXTENSIONS |
|---|
| 796 | + __u8 active_extensions; |
|---|
| 797 | +#endif |
|---|
| 743 | 798 | /* fields enclosed in headers_start/headers_end are copied |
|---|
| 744 | 799 | * using a single memcpy() in __copy_skb_header() |
|---|
| 745 | 800 | */ |
|---|
| .. | .. |
|---|
| 755 | 810 | #endif |
|---|
| 756 | 811 | #define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset) |
|---|
| 757 | 812 | |
|---|
| 813 | + /* private: */ |
|---|
| 758 | 814 | __u8 __pkt_type_offset[0]; |
|---|
| 815 | + /* public: */ |
|---|
| 759 | 816 | __u8 pkt_type:3; |
|---|
| 760 | 817 | __u8 ignore_df:1; |
|---|
| 761 | 818 | __u8 nf_trace:1; |
|---|
| .. | .. |
|---|
| 772 | 829 | __u8 encap_hdr_csum:1; |
|---|
| 773 | 830 | __u8 csum_valid:1; |
|---|
| 774 | 831 | |
|---|
| 832 | +#ifdef __BIG_ENDIAN_BITFIELD |
|---|
| 833 | +#define PKT_VLAN_PRESENT_BIT 7 |
|---|
| 834 | +#else |
|---|
| 835 | +#define PKT_VLAN_PRESENT_BIT 0 |
|---|
| 836 | +#endif |
|---|
| 837 | +#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset) |
|---|
| 838 | + /* private: */ |
|---|
| 839 | + __u8 __pkt_vlan_present_offset[0]; |
|---|
| 840 | + /* public: */ |
|---|
| 841 | + __u8 vlan_present:1; |
|---|
| 775 | 842 | __u8 csum_complete_sw:1; |
|---|
| 776 | 843 | __u8 csum_level:2; |
|---|
| 777 | 844 | __u8 csum_not_inet:1; |
|---|
| .. | .. |
|---|
| 779 | 846 | #ifdef CONFIG_IPV6_NDISC_NODETYPE |
|---|
| 780 | 847 | __u8 ndisc_nodetype:2; |
|---|
| 781 | 848 | #endif |
|---|
| 782 | | - __u8 ipvs_property:1; |
|---|
| 783 | 849 | |
|---|
| 850 | + __u8 ipvs_property:1; |
|---|
| 784 | 851 | __u8 inner_protocol_type:1; |
|---|
| 785 | 852 | __u8 remcsum_offload:1; |
|---|
| 786 | 853 | #ifdef CONFIG_NET_SWITCHDEV |
|---|
| 787 | 854 | __u8 offload_fwd_mark:1; |
|---|
| 788 | | - __u8 offload_mr_fwd_mark:1; |
|---|
| 855 | + __u8 offload_l3_fwd_mark:1; |
|---|
| 789 | 856 | #endif |
|---|
| 790 | 857 | #ifdef CONFIG_NET_CLS_ACT |
|---|
| 791 | 858 | __u8 tc_skip_classify:1; |
|---|
| 792 | 859 | __u8 tc_at_ingress:1; |
|---|
| 793 | | - __u8 tc_redirected:1; |
|---|
| 794 | | - __u8 tc_from_ingress:1; |
|---|
| 860 | +#endif |
|---|
| 861 | +#ifdef CONFIG_NET_REDIRECT |
|---|
| 862 | + __u8 redirected:1; |
|---|
| 863 | + __u8 from_ingress:1; |
|---|
| 795 | 864 | #endif |
|---|
| 796 | 865 | #ifdef CONFIG_TLS_DEVICE |
|---|
| 797 | 866 | __u8 decrypted:1; |
|---|
| .. | .. |
|---|
| 842 | 911 | __u16 network_header; |
|---|
| 843 | 912 | __u16 mac_header; |
|---|
| 844 | 913 | |
|---|
| 914 | +#ifdef CONFIG_KCOV |
|---|
| 915 | + u64 kcov_handle; |
|---|
| 916 | +#endif |
|---|
| 917 | + |
|---|
| 845 | 918 | /* private: */ |
|---|
| 846 | 919 | __u32 headers_end[0]; |
|---|
| 847 | 920 | /* public: */ |
|---|
| 848 | 921 | |
|---|
| 849 | | - ANDROID_KABI_RESERVE(1); |
|---|
| 922 | + /* Android KABI preservation. |
|---|
| 923 | + * |
|---|
| 924 | + * "open coded" version of ANDROID_KABI_USE() to pack more |
|---|
| 925 | + * fields/variables into the space that we have. |
|---|
| 926 | + * |
|---|
| 927 | + * scm_io_uring is from 04df9719df18 ("io_uring/af_unix: defer |
|---|
| 928 | + * registered files gc to io_uring release") |
|---|
| 929 | + */ |
|---|
| 930 | + _ANDROID_KABI_REPLACE(_ANDROID_KABI_RESERVE(1), |
|---|
| 931 | + struct { |
|---|
| 932 | + __u8 scm_io_uring:1; |
|---|
| 933 | + __u8 android_kabi_reserved1_padding1; |
|---|
| 934 | + __u16 android_kabi_reserved1_padding2; |
|---|
| 935 | + __u32 android_kabi_reserved1_padding3; |
|---|
| 936 | + }); |
|---|
| 850 | 937 | ANDROID_KABI_RESERVE(2); |
|---|
| 851 | 938 | |
|---|
| 852 | 939 | /* These elements must be at the end, see alloc_skb() for details. */ |
|---|
| .. | .. |
|---|
| 856 | 943 | *data; |
|---|
| 857 | 944 | unsigned int truesize; |
|---|
| 858 | 945 | refcount_t users; |
|---|
| 946 | + |
|---|
| 947 | +#ifdef CONFIG_SKB_EXTENSIONS |
|---|
| 948 | + /* only useable after checking ->active_extensions != 0 */ |
|---|
| 949 | + struct skb_ext *extensions; |
|---|
| 950 | +#endif |
|---|
| 859 | 951 | }; |
|---|
| 860 | 952 | |
|---|
| 861 | 953 | #ifdef __KERNEL__ |
|---|
| .. | .. |
|---|
| 867 | 959 | #define SKB_ALLOC_RX 0x02 |
|---|
| 868 | 960 | #define SKB_ALLOC_NAPI 0x04 |
|---|
| 869 | 961 | |
|---|
| 870 | | -/* Returns true if the skb was allocated from PFMEMALLOC reserves */ |
|---|
| 962 | +/** |
|---|
| 963 | + * skb_pfmemalloc - Test if the skb was allocated from PFMEMALLOC reserves |
|---|
| 964 | + * @skb: buffer |
|---|
| 965 | + */ |
|---|
| 871 | 966 | static inline bool skb_pfmemalloc(const struct sk_buff *skb) |
|---|
| 872 | 967 | { |
|---|
| 873 | 968 | return unlikely(skb->pfmemalloc); |
|---|
| .. | .. |
|---|
| 880 | 975 | #define SKB_DST_NOREF 1UL |
|---|
| 881 | 976 | #define SKB_DST_PTRMASK ~(SKB_DST_NOREF) |
|---|
| 882 | 977 | |
|---|
| 883 | | -#define SKB_NFCT_PTRMASK ~(7UL) |
|---|
| 884 | 978 | /** |
|---|
| 885 | 979 | * skb_dst - returns skb dst_entry |
|---|
| 886 | 980 | * @skb: buffer |
|---|
| .. | .. |
|---|
| 889 | 983 | */ |
|---|
| 890 | 984 | static inline struct dst_entry *skb_dst(const struct sk_buff *skb) |
|---|
| 891 | 985 | { |
|---|
| 892 | | - /* If refdst was not refcounted, check we still are in a |
|---|
| 986 | + /* If refdst was not refcounted, check we still are in a |
|---|
| 893 | 987 | * rcu_read_lock section |
|---|
| 894 | 988 | */ |
|---|
| 895 | 989 | WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && |
|---|
| .. | .. |
|---|
| 936 | 1030 | return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb); |
|---|
| 937 | 1031 | } |
|---|
| 938 | 1032 | |
|---|
| 1033 | +/** |
|---|
| 1034 | + * skb_rtable - Returns the skb &rtable |
|---|
| 1035 | + * @skb: buffer |
|---|
| 1036 | + */ |
|---|
| 939 | 1037 | static inline struct rtable *skb_rtable(const struct sk_buff *skb) |
|---|
| 940 | 1038 | { |
|---|
| 941 | 1039 | return (struct rtable *)skb_dst(skb); |
|---|
| .. | .. |
|---|
| 950 | 1048 | return ptype <= PACKET_OTHERHOST; |
|---|
| 951 | 1049 | } |
|---|
| 952 | 1050 | |
|---|
| 1051 | +/** |
|---|
| 1052 | + * skb_napi_id - Returns the skb's NAPI id |
|---|
| 1053 | + * @skb: buffer |
|---|
| 1054 | + */ |
|---|
| 953 | 1055 | static inline unsigned int skb_napi_id(const struct sk_buff *skb) |
|---|
| 954 | 1056 | { |
|---|
| 955 | 1057 | #ifdef CONFIG_NET_RX_BUSY_POLL |
|---|
| .. | .. |
|---|
| 959 | 1061 | #endif |
|---|
| 960 | 1062 | } |
|---|
| 961 | 1063 | |
|---|
| 962 | | -/* decrement the reference count and return true if we can free the skb */ |
|---|
| 1064 | +/** |
|---|
| 1065 | + * skb_unref - decrement the skb's reference count |
|---|
| 1066 | + * @skb: buffer |
|---|
| 1067 | + * |
|---|
| 1068 | + * Returns true if we can free the skb. |
|---|
| 1069 | + */ |
|---|
| 963 | 1070 | static inline bool skb_unref(struct sk_buff *skb) |
|---|
| 964 | 1071 | { |
|---|
| 965 | 1072 | if (unlikely(!skb)) |
|---|
| .. | .. |
|---|
| 975 | 1082 | void skb_release_head_state(struct sk_buff *skb); |
|---|
| 976 | 1083 | void kfree_skb(struct sk_buff *skb); |
|---|
| 977 | 1084 | void kfree_skb_list(struct sk_buff *segs); |
|---|
| 1085 | +void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt); |
|---|
| 978 | 1086 | void skb_tx_error(struct sk_buff *skb); |
|---|
| 1087 | + |
|---|
| 1088 | +#ifdef CONFIG_TRACEPOINTS |
|---|
| 979 | 1089 | void consume_skb(struct sk_buff *skb); |
|---|
| 1090 | +#else |
|---|
| 1091 | +static inline void consume_skb(struct sk_buff *skb) |
|---|
| 1092 | +{ |
|---|
| 1093 | + return kfree_skb(skb); |
|---|
| 1094 | +} |
|---|
| 1095 | +#endif |
|---|
| 1096 | + |
|---|
| 980 | 1097 | void __consume_stateless_skb(struct sk_buff *skb); |
|---|
| 981 | 1098 | void __kfree_skb(struct sk_buff *skb); |
|---|
| 982 | 1099 | extern struct kmem_cache *skbuff_head_cache; |
|---|
| .. | .. |
|---|
| 989 | 1106 | int node); |
|---|
| 990 | 1107 | struct sk_buff *__build_skb(void *data, unsigned int frag_size); |
|---|
| 991 | 1108 | struct sk_buff *build_skb(void *data, unsigned int frag_size); |
|---|
| 1109 | +struct sk_buff *build_skb_around(struct sk_buff *skb, |
|---|
| 1110 | + void *data, unsigned int frag_size); |
|---|
| 1111 | + |
|---|
| 1112 | +/** |
|---|
| 1113 | + * alloc_skb - allocate a network buffer |
|---|
| 1114 | + * @size: size to allocate |
|---|
| 1115 | + * @priority: allocation mask |
|---|
| 1116 | + * |
|---|
| 1117 | + * This function is a convenient wrapper around __alloc_skb(). |
|---|
| 1118 | + */ |
|---|
| 992 | 1119 | static inline struct sk_buff *alloc_skb(unsigned int size, |
|---|
| 993 | 1120 | gfp_t priority) |
|---|
| 994 | 1121 | { |
|---|
| .. | .. |
|---|
| 1000 | 1127 | int max_page_order, |
|---|
| 1001 | 1128 | int *errcode, |
|---|
| 1002 | 1129 | gfp_t gfp_mask); |
|---|
| 1130 | +struct sk_buff *alloc_skb_for_msg(struct sk_buff *first); |
|---|
| 1003 | 1131 | |
|---|
| 1004 | 1132 | /* Layout of fast clones : [skb1][skb2][fclone_ref] */ |
|---|
| 1005 | 1133 | struct sk_buff_fclones { |
|---|
| .. | .. |
|---|
| 1031 | 1159 | fclones->skb2.sk == sk; |
|---|
| 1032 | 1160 | } |
|---|
| 1033 | 1161 | |
|---|
| 1162 | +/** |
|---|
| 1163 | + * alloc_skb_fclone - allocate a network buffer from fclone cache |
|---|
| 1164 | + * @size: size to allocate |
|---|
| 1165 | + * @priority: allocation mask |
|---|
| 1166 | + * |
|---|
| 1167 | + * This function is a convenient wrapper around __alloc_skb(). |
|---|
| 1168 | + */ |
|---|
| 1034 | 1169 | static inline struct sk_buff *alloc_skb_fclone(unsigned int size, |
|---|
| 1035 | 1170 | gfp_t priority) |
|---|
| 1036 | 1171 | { |
|---|
| .. | .. |
|---|
| 1079 | 1214 | return __skb_pad(skb, pad, true); |
|---|
| 1080 | 1215 | } |
|---|
| 1081 | 1216 | #define dev_kfree_skb(a) consume_skb(a) |
|---|
| 1082 | | - |
|---|
| 1083 | | -int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, |
|---|
| 1084 | | - int getfrag(void *from, char *to, int offset, |
|---|
| 1085 | | - int len, int odd, struct sk_buff *skb), |
|---|
| 1086 | | - void *from, int length); |
|---|
| 1087 | 1217 | |
|---|
| 1088 | 1218 | int skb_append_pagefrags(struct sk_buff *skb, struct page *page, |
|---|
| 1089 | 1219 | int offset, size_t size); |
|---|
| .. | .. |
|---|
| 1192 | 1322 | const struct flow_dissector_key *key, |
|---|
| 1193 | 1323 | unsigned int key_count); |
|---|
| 1194 | 1324 | |
|---|
| 1195 | | -bool __skb_flow_dissect(const struct sk_buff *skb, |
|---|
| 1325 | +struct bpf_flow_dissector; |
|---|
| 1326 | +bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, |
|---|
| 1327 | + __be16 proto, int nhoff, int hlen, unsigned int flags); |
|---|
| 1328 | + |
|---|
| 1329 | +bool __skb_flow_dissect(const struct net *net, |
|---|
| 1330 | + const struct sk_buff *skb, |
|---|
| 1196 | 1331 | struct flow_dissector *flow_dissector, |
|---|
| 1197 | 1332 | void *target_container, |
|---|
| 1198 | 1333 | void *data, __be16 proto, int nhoff, int hlen, |
|---|
| .. | .. |
|---|
| 1202 | 1337 | struct flow_dissector *flow_dissector, |
|---|
| 1203 | 1338 | void *target_container, unsigned int flags) |
|---|
| 1204 | 1339 | { |
|---|
| 1205 | | - return __skb_flow_dissect(skb, flow_dissector, target_container, |
|---|
| 1206 | | - NULL, 0, 0, 0, flags); |
|---|
| 1340 | + return __skb_flow_dissect(NULL, skb, flow_dissector, |
|---|
| 1341 | + target_container, NULL, 0, 0, 0, flags); |
|---|
| 1207 | 1342 | } |
|---|
| 1208 | 1343 | |
|---|
| 1209 | 1344 | static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb, |
|---|
| .. | .. |
|---|
| 1211 | 1346 | unsigned int flags) |
|---|
| 1212 | 1347 | { |
|---|
| 1213 | 1348 | memset(flow, 0, sizeof(*flow)); |
|---|
| 1214 | | - return __skb_flow_dissect(skb, &flow_keys_dissector, flow, |
|---|
| 1215 | | - NULL, 0, 0, 0, flags); |
|---|
| 1349 | + return __skb_flow_dissect(NULL, skb, &flow_keys_dissector, |
|---|
| 1350 | + flow, NULL, 0, 0, 0, flags); |
|---|
| 1216 | 1351 | } |
|---|
| 1217 | 1352 | |
|---|
| 1218 | 1353 | static inline bool |
|---|
| 1219 | | -skb_flow_dissect_flow_keys_basic(const struct sk_buff *skb, |
|---|
| 1354 | +skb_flow_dissect_flow_keys_basic(const struct net *net, |
|---|
| 1355 | + const struct sk_buff *skb, |
|---|
| 1220 | 1356 | struct flow_keys_basic *flow, void *data, |
|---|
| 1221 | 1357 | __be16 proto, int nhoff, int hlen, |
|---|
| 1222 | 1358 | unsigned int flags) |
|---|
| 1223 | 1359 | { |
|---|
| 1224 | 1360 | memset(flow, 0, sizeof(*flow)); |
|---|
| 1225 | | - return __skb_flow_dissect(skb, &flow_keys_basic_dissector, flow, |
|---|
| 1361 | + return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow, |
|---|
| 1226 | 1362 | data, proto, nhoff, hlen, flags); |
|---|
| 1227 | 1363 | } |
|---|
| 1228 | 1364 | |
|---|
| 1365 | +void skb_flow_dissect_meta(const struct sk_buff *skb, |
|---|
| 1366 | + struct flow_dissector *flow_dissector, |
|---|
| 1367 | + void *target_container); |
|---|
| 1368 | + |
|---|
| 1369 | +/* Gets a skb connection tracking info, ctinfo map should be a |
|---|
| 1370 | + * map of mapsize to translate enum ip_conntrack_info states |
|---|
| 1371 | + * to user states. |
|---|
| 1372 | + */ |
|---|
| 1373 | +void |
|---|
| 1374 | +skb_flow_dissect_ct(const struct sk_buff *skb, |
|---|
| 1375 | + struct flow_dissector *flow_dissector, |
|---|
| 1376 | + void *target_container, |
|---|
| 1377 | + u16 *ctinfo_map, |
|---|
| 1378 | + size_t mapsize); |
|---|
| 1229 | 1379 | void |
|---|
| 1230 | 1380 | skb_flow_dissect_tunnel_info(const struct sk_buff *skb, |
|---|
| 1231 | 1381 | struct flow_dissector *flow_dissector, |
|---|
| 1232 | 1382 | void *target_container); |
|---|
| 1383 | + |
|---|
| 1384 | +void skb_flow_dissect_hash(const struct sk_buff *skb, |
|---|
| 1385 | + struct flow_dissector *flow_dissector, |
|---|
| 1386 | + void *target_container); |
|---|
| 1233 | 1387 | |
|---|
| 1234 | 1388 | static inline __u32 skb_get_hash(struct sk_buff *skb) |
|---|
| 1235 | 1389 | { |
|---|
| .. | .. |
|---|
| 1266 | 1420 | to->l4_hash = from->l4_hash; |
|---|
| 1267 | 1421 | }; |
|---|
| 1268 | 1422 | |
|---|
| 1423 | +static inline void skb_copy_decrypted(struct sk_buff *to, |
|---|
| 1424 | + const struct sk_buff *from) |
|---|
| 1425 | +{ |
|---|
| 1426 | +#ifdef CONFIG_TLS_DEVICE |
|---|
| 1427 | + to->decrypted = from->decrypted; |
|---|
| 1428 | +#endif |
|---|
| 1429 | +} |
|---|
| 1430 | + |
|---|
| 1269 | 1431 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
|---|
| 1270 | 1432 | static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) |
|---|
| 1271 | 1433 | { |
|---|
| .. | .. |
|---|
| 1276 | 1438 | { |
|---|
| 1277 | 1439 | return skb->end; |
|---|
| 1278 | 1440 | } |
|---|
| 1441 | + |
|---|
| 1442 | +static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset) |
|---|
| 1443 | +{ |
|---|
| 1444 | + skb->end = offset; |
|---|
| 1445 | +} |
|---|
| 1279 | 1446 | #else |
|---|
| 1280 | 1447 | static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) |
|---|
| 1281 | 1448 | { |
|---|
| .. | .. |
|---|
| 1285 | 1452 | static inline unsigned int skb_end_offset(const struct sk_buff *skb) |
|---|
| 1286 | 1453 | { |
|---|
| 1287 | 1454 | return skb->end - skb->head; |
|---|
| 1455 | +} |
|---|
| 1456 | + |
|---|
| 1457 | +static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset) |
|---|
| 1458 | +{ |
|---|
| 1459 | + skb->end = skb->head + offset; |
|---|
| 1288 | 1460 | } |
|---|
| 1289 | 1461 | #endif |
|---|
| 1290 | 1462 | |
|---|
| .. | .. |
|---|
| 1303 | 1475 | return is_zcopy ? skb_uarg(skb) : NULL; |
|---|
| 1304 | 1476 | } |
|---|
| 1305 | 1477 | |
|---|
| 1306 | | -static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg) |
|---|
| 1478 | +static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg, |
|---|
| 1479 | + bool *have_ref) |
|---|
| 1307 | 1480 | { |
|---|
| 1308 | 1481 | if (skb && uarg && !skb_zcopy(skb)) { |
|---|
| 1309 | | - sock_zerocopy_get(uarg); |
|---|
| 1482 | + if (unlikely(have_ref && *have_ref)) |
|---|
| 1483 | + *have_ref = false; |
|---|
| 1484 | + else |
|---|
| 1485 | + sock_zerocopy_get(uarg); |
|---|
| 1310 | 1486 | skb_shinfo(skb)->destructor_arg = uarg; |
|---|
| 1311 | 1487 | skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG; |
|---|
| 1312 | 1488 | } |
|---|
| .. | .. |
|---|
| 1353 | 1529 | struct ubuf_info *uarg = skb_zcopy(skb); |
|---|
| 1354 | 1530 | |
|---|
| 1355 | 1531 | if (uarg) { |
|---|
| 1356 | | - sock_zerocopy_put_abort(uarg); |
|---|
| 1532 | + sock_zerocopy_put_abort(uarg, false); |
|---|
| 1357 | 1533 | skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG; |
|---|
| 1358 | 1534 | } |
|---|
| 1359 | 1535 | } |
|---|
| .. | .. |
|---|
| 1501 | 1677 | return 0; |
|---|
| 1502 | 1678 | } |
|---|
| 1503 | 1679 | |
|---|
| 1680 | +/* This variant of skb_unclone() makes sure skb->truesize |
|---|
| 1681 | + * and skb_end_offset() are not changed, whenever a new skb->head is needed. |
|---|
| 1682 | + * |
|---|
| 1683 | + * Indeed there is no guarantee that ksize(kmalloc(X)) == ksize(kmalloc(X)) |
|---|
| 1684 | + * when various debugging features are in place. |
|---|
| 1685 | + */ |
|---|
| 1686 | +int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri); |
|---|
| 1687 | +static inline int skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) |
|---|
| 1688 | +{ |
|---|
| 1689 | + might_sleep_if(gfpflags_allow_blocking(pri)); |
|---|
| 1690 | + |
|---|
| 1691 | + if (skb_cloned(skb)) |
|---|
| 1692 | + return __skb_unclone_keeptruesize(skb, pri); |
|---|
| 1693 | + return 0; |
|---|
| 1694 | +} |
|---|
| 1695 | + |
|---|
| 1504 | 1696 | /** |
|---|
| 1505 | 1697 | * skb_header_cloned - is the header a clone |
|---|
| 1506 | 1698 | * @skb: buffer to check |
|---|
| .. | .. |
|---|
| 1641 | 1833 | } |
|---|
| 1642 | 1834 | |
|---|
| 1643 | 1835 | /** |
|---|
| 1836 | + * __skb_peek - peek at the head of a non-empty &sk_buff_head |
|---|
| 1837 | + * @list_: list to peek at |
|---|
| 1838 | + * |
|---|
| 1839 | + * Like skb_peek(), but the caller knows that the list is not empty. |
|---|
| 1840 | + */ |
|---|
| 1841 | +static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_) |
|---|
| 1842 | +{ |
|---|
| 1843 | + return list_->next; |
|---|
| 1844 | +} |
|---|
| 1845 | + |
|---|
| 1846 | +/** |
|---|
| 1644 | 1847 | * skb_peek_next - peek skb following the given one from a queue |
|---|
| 1645 | 1848 | * @skb: skb to start from |
|---|
| 1646 | 1849 | * @list_: list to peek at |
|---|
| .. | .. |
|---|
| 1748 | 1951 | * The "__skb_xxxx()" functions are the non-atomic ones that |
|---|
| 1749 | 1952 | * can only be called with interrupts disabled. |
|---|
| 1750 | 1953 | */ |
|---|
| 1751 | | -void skb_insert(struct sk_buff *old, struct sk_buff *newsk, |
|---|
| 1752 | | - struct sk_buff_head *list); |
|---|
| 1753 | 1954 | static inline void __skb_insert(struct sk_buff *newsk, |
|---|
| 1754 | 1955 | struct sk_buff *prev, struct sk_buff *next, |
|---|
| 1755 | 1956 | struct sk_buff_head *list) |
|---|
| .. | .. |
|---|
| 1879 | 2080 | * |
|---|
| 1880 | 2081 | * A buffer cannot be placed on two lists at the same time. |
|---|
| 1881 | 2082 | */ |
|---|
| 1882 | | -void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); |
|---|
| 1883 | 2083 | static inline void __skb_queue_head(struct sk_buff_head *list, |
|---|
| 1884 | 2084 | struct sk_buff *newsk) |
|---|
| 1885 | 2085 | { |
|---|
| 1886 | 2086 | __skb_queue_after(list, (struct sk_buff *)list, newsk); |
|---|
| 1887 | 2087 | } |
|---|
| 2088 | +void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); |
|---|
| 1888 | 2089 | |
|---|
| 1889 | 2090 | /** |
|---|
| 1890 | 2091 | * __skb_queue_tail - queue a buffer at the list tail |
|---|
| .. | .. |
|---|
| 1896 | 2097 | * |
|---|
| 1897 | 2098 | * A buffer cannot be placed on two lists at the same time. |
|---|
| 1898 | 2099 | */ |
|---|
| 1899 | | -void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); |
|---|
| 1900 | 2100 | static inline void __skb_queue_tail(struct sk_buff_head *list, |
|---|
| 1901 | 2101 | struct sk_buff *newsk) |
|---|
| 1902 | 2102 | { |
|---|
| 1903 | 2103 | __skb_queue_before(list, (struct sk_buff *)list, newsk); |
|---|
| 1904 | 2104 | } |
|---|
| 2105 | +void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); |
|---|
| 1905 | 2106 | |
|---|
| 1906 | 2107 | /* |
|---|
| 1907 | 2108 | * remove sk_buff from list. _Must_ be called atomically, and with |
|---|
| .. | .. |
|---|
| 1928 | 2129 | * so must be used with appropriate locks held only. The head item is |
|---|
| 1929 | 2130 | * returned or %NULL if the list is empty. |
|---|
| 1930 | 2131 | */ |
|---|
| 1931 | | -struct sk_buff *skb_dequeue(struct sk_buff_head *list); |
|---|
| 1932 | 2132 | static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) |
|---|
| 1933 | 2133 | { |
|---|
| 1934 | 2134 | struct sk_buff *skb = skb_peek(list); |
|---|
| .. | .. |
|---|
| 1936 | 2136 | __skb_unlink(skb, list); |
|---|
| 1937 | 2137 | return skb; |
|---|
| 1938 | 2138 | } |
|---|
| 2139 | +struct sk_buff *skb_dequeue(struct sk_buff_head *list); |
|---|
| 1939 | 2140 | |
|---|
| 1940 | 2141 | /** |
|---|
| 1941 | 2142 | * __skb_dequeue_tail - remove from the tail of the queue |
|---|
| .. | .. |
|---|
| 1945 | 2146 | * so must be used with appropriate locks held only. The tail item is |
|---|
| 1946 | 2147 | * returned or %NULL if the list is empty. |
|---|
| 1947 | 2148 | */ |
|---|
| 1948 | | -struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); |
|---|
| 1949 | 2149 | static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) |
|---|
| 1950 | 2150 | { |
|---|
| 1951 | 2151 | struct sk_buff *skb = skb_peek_tail(list); |
|---|
| .. | .. |
|---|
| 1953 | 2153 | __skb_unlink(skb, list); |
|---|
| 1954 | 2154 | return skb; |
|---|
| 1955 | 2155 | } |
|---|
| 2156 | +struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); |
|---|
| 1956 | 2157 | |
|---|
| 1957 | 2158 | |
|---|
| 1958 | 2159 | static inline bool skb_is_nonlinear(const struct sk_buff *skb) |
|---|
| .. | .. |
|---|
| 2002 | 2203 | * that not all callers have unique ownership of the page but rely |
|---|
| 2003 | 2204 | * on page_is_pfmemalloc doing the right thing(tm). |
|---|
| 2004 | 2205 | */ |
|---|
| 2005 | | - frag->page.p = page; |
|---|
| 2006 | | - frag->page_offset = off; |
|---|
| 2206 | + frag->bv_page = page; |
|---|
| 2207 | + frag->bv_offset = off; |
|---|
| 2007 | 2208 | skb_frag_size_set(frag, size); |
|---|
| 2008 | 2209 | |
|---|
| 2009 | 2210 | page = compound_head(page); |
|---|
| .. | .. |
|---|
| 2038 | 2239 | void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, |
|---|
| 2039 | 2240 | unsigned int truesize); |
|---|
| 2040 | 2241 | |
|---|
| 2041 | | -#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) |
|---|
| 2042 | | -#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb)) |
|---|
| 2043 | 2242 | #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) |
|---|
| 2044 | 2243 | |
|---|
| 2045 | 2244 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
|---|
| .. | .. |
|---|
| 2076 | 2275 | } |
|---|
| 2077 | 2276 | |
|---|
| 2078 | 2277 | #endif /* NET_SKBUFF_DATA_USES_OFFSET */ |
|---|
| 2278 | + |
|---|
| 2279 | +static inline void skb_assert_len(struct sk_buff *skb) |
|---|
| 2280 | +{ |
|---|
| 2281 | +#ifdef CONFIG_DEBUG_NET |
|---|
| 2282 | + if (WARN_ONCE(!skb->len, "%s\n", __func__)) |
|---|
| 2283 | + DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false); |
|---|
| 2284 | +#endif /* CONFIG_DEBUG_NET */ |
|---|
| 2285 | +} |
|---|
| 2079 | 2286 | |
|---|
| 2080 | 2287 | /* |
|---|
| 2081 | 2288 | * Add data to an sk_buff |
|---|
| .. | .. |
|---|
| 2174 | 2381 | return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); |
|---|
| 2175 | 2382 | } |
|---|
| 2176 | 2383 | |
|---|
| 2177 | | -static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) |
|---|
| 2384 | +static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len) |
|---|
| 2178 | 2385 | { |
|---|
| 2179 | 2386 | if (likely(len <= skb_headlen(skb))) |
|---|
| 2180 | | - return 1; |
|---|
| 2387 | + return true; |
|---|
| 2181 | 2388 | if (unlikely(len > skb->len)) |
|---|
| 2182 | | - return 0; |
|---|
| 2389 | + return false; |
|---|
| 2183 | 2390 | return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; |
|---|
| 2184 | 2391 | } |
|---|
| 2185 | 2392 | |
|---|
| .. | .. |
|---|
| 2403 | 2610 | return skb->mac_header != (typeof(skb->mac_header))~0U; |
|---|
| 2404 | 2611 | } |
|---|
| 2405 | 2612 | |
|---|
| 2613 | +static inline void skb_unset_mac_header(struct sk_buff *skb) |
|---|
| 2614 | +{ |
|---|
| 2615 | + skb->mac_header = (typeof(skb->mac_header))~0U; |
|---|
| 2616 | +} |
|---|
| 2617 | + |
|---|
| 2406 | 2618 | static inline void skb_reset_mac_header(struct sk_buff *skb) |
|---|
| 2407 | 2619 | { |
|---|
| 2408 | 2620 | skb->mac_header = skb->data - skb->head; |
|---|
| .. | .. |
|---|
| 2419 | 2631 | skb->mac_header = skb->network_header; |
|---|
| 2420 | 2632 | } |
|---|
| 2421 | 2633 | |
|---|
| 2422 | | -static inline void skb_probe_transport_header(struct sk_buff *skb, |
|---|
| 2423 | | - const int offset_hint) |
|---|
| 2634 | +static inline void skb_probe_transport_header(struct sk_buff *skb) |
|---|
| 2424 | 2635 | { |
|---|
| 2425 | 2636 | struct flow_keys_basic keys; |
|---|
| 2426 | 2637 | |
|---|
| 2427 | 2638 | if (skb_transport_header_was_set(skb)) |
|---|
| 2428 | 2639 | return; |
|---|
| 2429 | 2640 | |
|---|
| 2430 | | - if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0)) |
|---|
| 2641 | + if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys, |
|---|
| 2642 | + NULL, 0, 0, 0, 0)) |
|---|
| 2431 | 2643 | skb_set_transport_header(skb, keys.control.thoff); |
|---|
| 2432 | | - else if (offset_hint >= 0) |
|---|
| 2433 | | - skb_set_transport_header(skb, offset_hint); |
|---|
| 2434 | 2644 | } |
|---|
| 2435 | 2645 | |
|---|
| 2436 | 2646 | static inline void skb_mac_header_rebuild(struct sk_buff *skb) |
|---|
| .. | .. |
|---|
| 2524 | 2734 | * |
|---|
| 2525 | 2735 | * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS) |
|---|
| 2526 | 2736 | * to reduce average number of cache lines per packet. |
|---|
| 2527 | | - * get_rps_cpus() for example only access one 64 bytes aligned block : |
|---|
| 2737 | + * get_rps_cpu() for example only access one 64 bytes aligned block : |
|---|
| 2528 | 2738 | * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) |
|---|
| 2529 | 2739 | */ |
|---|
| 2530 | 2740 | #ifndef NET_SKB_PAD |
|---|
| .. | .. |
|---|
| 2535 | 2745 | |
|---|
| 2536 | 2746 | static inline void __skb_set_length(struct sk_buff *skb, unsigned int len) |
|---|
| 2537 | 2747 | { |
|---|
| 2538 | | - if (unlikely(skb_is_nonlinear(skb))) { |
|---|
| 2539 | | - WARN_ON(1); |
|---|
| 2748 | + if (WARN_ON(skb_is_nonlinear(skb))) |
|---|
| 2540 | 2749 | return; |
|---|
| 2541 | | - } |
|---|
| 2542 | 2750 | skb->len = len; |
|---|
| 2543 | 2751 | skb_set_tail_pointer(skb, len); |
|---|
| 2544 | 2752 | } |
|---|
| .. | .. |
|---|
| 2646 | 2854 | * the list and one reference dropped. This function does not take the |
|---|
| 2647 | 2855 | * list lock and the caller must hold the relevant locks to use it. |
|---|
| 2648 | 2856 | */ |
|---|
| 2649 | | -void skb_queue_purge(struct sk_buff_head *list); |
|---|
| 2650 | 2857 | static inline void __skb_queue_purge(struct sk_buff_head *list) |
|---|
| 2651 | 2858 | { |
|---|
| 2652 | 2859 | struct sk_buff *skb; |
|---|
| 2653 | 2860 | while ((skb = __skb_dequeue(list)) != NULL) |
|---|
| 2654 | 2861 | kfree_skb(skb); |
|---|
| 2655 | 2862 | } |
|---|
| 2863 | +void skb_queue_purge(struct sk_buff_head *list); |
|---|
| 2656 | 2864 | |
|---|
| 2657 | 2865 | unsigned int skb_rbtree_purge(struct rb_root *root); |
|---|
| 2658 | 2866 | |
|---|
| .. | .. |
|---|
| 2794 | 3002 | */ |
|---|
| 2795 | 3003 | static inline unsigned int skb_frag_off(const skb_frag_t *frag) |
|---|
| 2796 | 3004 | { |
|---|
| 2797 | | - return frag->page_offset; |
|---|
| 3005 | + return frag->bv_offset; |
|---|
| 3006 | +} |
|---|
| 3007 | + |
|---|
| 3008 | +/** |
|---|
| 3009 | + * skb_frag_off_add() - Increments the offset of a skb fragment by @delta |
|---|
| 3010 | + * @frag: skb fragment |
|---|
| 3011 | + * @delta: value to add |
|---|
| 3012 | + */ |
|---|
| 3013 | +static inline void skb_frag_off_add(skb_frag_t *frag, int delta) |
|---|
| 3014 | +{ |
|---|
| 3015 | + frag->bv_offset += delta; |
|---|
| 3016 | +} |
|---|
| 3017 | + |
|---|
| 3018 | +/** |
|---|
| 3019 | + * skb_frag_off_set() - Sets the offset of a skb fragment |
|---|
| 3020 | + * @frag: skb fragment |
|---|
| 3021 | + * @offset: offset of fragment |
|---|
| 3022 | + */ |
|---|
| 3023 | +static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset) |
|---|
| 3024 | +{ |
|---|
| 3025 | + frag->bv_offset = offset; |
|---|
| 3026 | +} |
|---|
| 3027 | + |
|---|
| 3028 | +/** |
|---|
| 3029 | + * skb_frag_off_copy() - Sets the offset of a skb fragment from another fragment |
|---|
| 3030 | + * @fragto: skb fragment where offset is set |
|---|
| 3031 | + * @fragfrom: skb fragment offset is copied from |
|---|
| 3032 | + */ |
|---|
| 3033 | +static inline void skb_frag_off_copy(skb_frag_t *fragto, |
|---|
| 3034 | + const skb_frag_t *fragfrom) |
|---|
| 3035 | +{ |
|---|
| 3036 | + fragto->bv_offset = fragfrom->bv_offset; |
|---|
| 2798 | 3037 | } |
|---|
| 2799 | 3038 | |
|---|
| 2800 | 3039 | /** |
|---|
| .. | .. |
|---|
| 2805 | 3044 | */ |
|---|
| 2806 | 3045 | static inline struct page *skb_frag_page(const skb_frag_t *frag) |
|---|
| 2807 | 3046 | { |
|---|
| 2808 | | - return frag->page.p; |
|---|
| 3047 | + return frag->bv_page; |
|---|
| 2809 | 3048 | } |
|---|
| 2810 | 3049 | |
|---|
| 2811 | 3050 | /** |
|---|
| .. | .. |
|---|
| 2863 | 3102 | */ |
|---|
| 2864 | 3103 | static inline void *skb_frag_address(const skb_frag_t *frag) |
|---|
| 2865 | 3104 | { |
|---|
| 2866 | | - return page_address(skb_frag_page(frag)) + frag->page_offset; |
|---|
| 3105 | + return page_address(skb_frag_page(frag)) + skb_frag_off(frag); |
|---|
| 2867 | 3106 | } |
|---|
| 2868 | 3107 | |
|---|
| 2869 | 3108 | /** |
|---|
| .. | .. |
|---|
| 2879 | 3118 | if (unlikely(!ptr)) |
|---|
| 2880 | 3119 | return NULL; |
|---|
| 2881 | 3120 | |
|---|
| 2882 | | - return ptr + frag->page_offset; |
|---|
| 3121 | + return ptr + skb_frag_off(frag); |
|---|
| 3122 | +} |
|---|
| 3123 | + |
|---|
| 3124 | +/** |
|---|
| 3125 | + * skb_frag_page_copy() - sets the page in a fragment from another fragment |
|---|
| 3126 | + * @fragto: skb fragment where page is set |
|---|
| 3127 | + * @fragfrom: skb fragment page is copied from |
|---|
| 3128 | + */ |
|---|
| 3129 | +static inline void skb_frag_page_copy(skb_frag_t *fragto, |
|---|
| 3130 | + const skb_frag_t *fragfrom) |
|---|
| 3131 | +{ |
|---|
| 3132 | + fragto->bv_page = fragfrom->bv_page; |
|---|
| 2883 | 3133 | } |
|---|
| 2884 | 3134 | |
|---|
| 2885 | 3135 | /** |
|---|
| .. | .. |
|---|
| 2891 | 3141 | */ |
|---|
| 2892 | 3142 | static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page) |
|---|
| 2893 | 3143 | { |
|---|
| 2894 | | - frag->page.p = page; |
|---|
| 3144 | + frag->bv_page = page; |
|---|
| 2895 | 3145 | } |
|---|
| 2896 | 3146 | |
|---|
| 2897 | 3147 | /** |
|---|
| .. | .. |
|---|
| 2927 | 3177 | enum dma_data_direction dir) |
|---|
| 2928 | 3178 | { |
|---|
| 2929 | 3179 | return dma_map_page(dev, skb_frag_page(frag), |
|---|
| 2930 | | - frag->page_offset + offset, size, dir); |
|---|
| 3180 | + skb_frag_off(frag) + offset, size, dir); |
|---|
| 2931 | 3181 | } |
|---|
| 2932 | 3182 | |
|---|
| 2933 | 3183 | static inline struct sk_buff *pskb_copy(struct sk_buff *skb, |
|---|
| .. | .. |
|---|
| 3030 | 3280 | } |
|---|
| 3031 | 3281 | |
|---|
| 3032 | 3282 | /** |
|---|
| 3033 | | - * skb_put_padto - increase size and pad an skbuff up to a minimal size |
|---|
| 3283 | + * __skb_put_padto - increase size and pad an skbuff up to a minimal size |
|---|
| 3034 | 3284 | * @skb: buffer to pad |
|---|
| 3035 | 3285 | * @len: minimal length |
|---|
| 3036 | 3286 | * @free_on_error: free buffer on error |
|---|
| .. | .. |
|---|
| 3095 | 3345 | if (skb_zcopy(skb)) |
|---|
| 3096 | 3346 | return false; |
|---|
| 3097 | 3347 | if (i) { |
|---|
| 3098 | | - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; |
|---|
| 3348 | + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; |
|---|
| 3099 | 3349 | |
|---|
| 3100 | 3350 | return page == skb_frag_page(frag) && |
|---|
| 3101 | | - off == frag->page_offset + skb_frag_size(frag); |
|---|
| 3351 | + off == skb_frag_off(frag) + skb_frag_size(frag); |
|---|
| 3102 | 3352 | } |
|---|
| 3103 | 3353 | return false; |
|---|
| 3104 | 3354 | } |
|---|
| .. | .. |
|---|
| 3317 | 3567 | for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) |
|---|
| 3318 | 3568 | |
|---|
| 3319 | 3569 | |
|---|
| 3320 | | -int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p, |
|---|
| 3570 | +int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue, |
|---|
| 3571 | + int *err, long *timeo_p, |
|---|
| 3321 | 3572 | const struct sk_buff *skb); |
|---|
| 3322 | 3573 | struct sk_buff *__skb_try_recv_from_queue(struct sock *sk, |
|---|
| 3323 | 3574 | struct sk_buff_head *queue, |
|---|
| 3324 | 3575 | unsigned int flags, |
|---|
| 3325 | | - void (*destructor)(struct sock *sk, |
|---|
| 3326 | | - struct sk_buff *skb), |
|---|
| 3327 | | - int *peeked, int *off, int *err, |
|---|
| 3576 | + int *off, int *err, |
|---|
| 3328 | 3577 | struct sk_buff **last); |
|---|
| 3329 | | -struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags, |
|---|
| 3330 | | - void (*destructor)(struct sock *sk, |
|---|
| 3331 | | - struct sk_buff *skb), |
|---|
| 3332 | | - int *peeked, int *off, int *err, |
|---|
| 3578 | +struct sk_buff *__skb_try_recv_datagram(struct sock *sk, |
|---|
| 3579 | + struct sk_buff_head *queue, |
|---|
| 3580 | + unsigned int flags, int *off, int *err, |
|---|
| 3333 | 3581 | struct sk_buff **last); |
|---|
| 3334 | | -struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, |
|---|
| 3335 | | - void (*destructor)(struct sock *sk, |
|---|
| 3336 | | - struct sk_buff *skb), |
|---|
| 3337 | | - int *peeked, int *off, int *err); |
|---|
| 3582 | +struct sk_buff *__skb_recv_datagram(struct sock *sk, |
|---|
| 3583 | + struct sk_buff_head *sk_queue, |
|---|
| 3584 | + unsigned int flags, int *off, int *err); |
|---|
| 3338 | 3585 | struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, |
|---|
| 3339 | 3586 | int *err); |
|---|
| 3340 | 3587 | __poll_t datagram_poll(struct file *file, struct socket *sock, |
|---|
| .. | .. |
|---|
| 3348 | 3595 | } |
|---|
| 3349 | 3596 | int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen, |
|---|
| 3350 | 3597 | struct msghdr *msg); |
|---|
| 3598 | +int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset, |
|---|
| 3599 | + struct iov_iter *to, int len, |
|---|
| 3600 | + struct ahash_request *hash); |
|---|
| 3351 | 3601 | int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, |
|---|
| 3352 | 3602 | struct iov_iter *from, int len); |
|---|
| 3353 | 3603 | int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm); |
|---|
| .. | .. |
|---|
| 3362 | 3612 | int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); |
|---|
| 3363 | 3613 | int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); |
|---|
| 3364 | 3614 | __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, |
|---|
| 3365 | | - int len, __wsum csum); |
|---|
| 3615 | + int len); |
|---|
| 3366 | 3616 | int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, |
|---|
| 3367 | 3617 | struct pipe_inode_info *pipe, unsigned int len, |
|---|
| 3368 | 3618 | unsigned int flags); |
|---|
| 3369 | 3619 | int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, |
|---|
| 3370 | 3620 | int len); |
|---|
| 3371 | | -int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len); |
|---|
| 3372 | 3621 | void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); |
|---|
| 3373 | 3622 | unsigned int skb_zerocopy_headlen(const struct sk_buff *from); |
|---|
| 3374 | 3623 | int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, |
|---|
| .. | .. |
|---|
| 3379 | 3628 | bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu); |
|---|
| 3380 | 3629 | bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len); |
|---|
| 3381 | 3630 | struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); |
|---|
| 3631 | +struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features, |
|---|
| 3632 | + unsigned int offset); |
|---|
| 3382 | 3633 | struct sk_buff *skb_vlan_untag(struct sk_buff *skb); |
|---|
| 3383 | 3634 | int skb_ensure_writable(struct sk_buff *skb, int write_len); |
|---|
| 3384 | 3635 | int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci); |
|---|
| 3385 | 3636 | int skb_vlan_pop(struct sk_buff *skb); |
|---|
| 3386 | 3637 | int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); |
|---|
| 3638 | +int skb_eth_pop(struct sk_buff *skb); |
|---|
| 3639 | +int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, |
|---|
| 3640 | + const unsigned char *src); |
|---|
| 3641 | +int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, |
|---|
| 3642 | + int mac_len, bool ethernet); |
|---|
| 3643 | +int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, |
|---|
| 3644 | + bool ethernet); |
|---|
| 3645 | +int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse); |
|---|
| 3646 | +int skb_mpls_dec_ttl(struct sk_buff *skb); |
|---|
| 3387 | 3647 | struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy, |
|---|
| 3388 | 3648 | gfp_t gfp); |
|---|
| 3389 | 3649 | |
|---|
| .. | .. |
|---|
| 3487 | 3747 | /** |
|---|
| 3488 | 3748 | * skb_get_timestamp - get timestamp from a skb |
|---|
| 3489 | 3749 | * @skb: skb to get stamp from |
|---|
| 3490 | | - * @stamp: pointer to struct timeval to store stamp in |
|---|
| 3750 | + * @stamp: pointer to struct __kernel_old_timeval to store stamp in |
|---|
| 3491 | 3751 | * |
|---|
| 3492 | 3752 | * Timestamps are stored in the skb as offsets to a base timestamp. |
|---|
| 3493 | 3753 | * This function converts the offset back to a struct timeval and stores |
|---|
| 3494 | 3754 | * it in stamp. |
|---|
| 3495 | 3755 | */ |
|---|
| 3496 | 3756 | static inline void skb_get_timestamp(const struct sk_buff *skb, |
|---|
| 3497 | | - struct timeval *stamp) |
|---|
| 3757 | + struct __kernel_old_timeval *stamp) |
|---|
| 3498 | 3758 | { |
|---|
| 3499 | | - *stamp = ktime_to_timeval(skb->tstamp); |
|---|
| 3759 | + *stamp = ns_to_kernel_old_timeval(skb->tstamp); |
|---|
| 3760 | +} |
|---|
| 3761 | + |
|---|
| 3762 | +static inline void skb_get_new_timestamp(const struct sk_buff *skb, |
|---|
| 3763 | + struct __kernel_sock_timeval *stamp) |
|---|
| 3764 | +{ |
|---|
| 3765 | + struct timespec64 ts = ktime_to_timespec64(skb->tstamp); |
|---|
| 3766 | + |
|---|
| 3767 | + stamp->tv_sec = ts.tv_sec; |
|---|
| 3768 | + stamp->tv_usec = ts.tv_nsec / 1000; |
|---|
| 3500 | 3769 | } |
|---|
| 3501 | 3770 | |
|---|
| 3502 | 3771 | static inline void skb_get_timestampns(const struct sk_buff *skb, |
|---|
| 3503 | | - struct timespec *stamp) |
|---|
| 3772 | + struct __kernel_old_timespec *stamp) |
|---|
| 3504 | 3773 | { |
|---|
| 3505 | | - *stamp = ktime_to_timespec(skb->tstamp); |
|---|
| 3774 | + struct timespec64 ts = ktime_to_timespec64(skb->tstamp); |
|---|
| 3775 | + |
|---|
| 3776 | + stamp->tv_sec = ts.tv_sec; |
|---|
| 3777 | + stamp->tv_nsec = ts.tv_nsec; |
|---|
| 3778 | +} |
|---|
| 3779 | + |
|---|
| 3780 | +static inline void skb_get_new_timestampns(const struct sk_buff *skb, |
|---|
| 3781 | + struct __kernel_timespec *stamp) |
|---|
| 3782 | +{ |
|---|
| 3783 | + struct timespec64 ts = ktime_to_timespec64(skb->tstamp); |
|---|
| 3784 | + |
|---|
| 3785 | + stamp->tv_sec = ts.tv_sec; |
|---|
| 3786 | + stamp->tv_nsec = ts.tv_nsec; |
|---|
| 3506 | 3787 | } |
|---|
| 3507 | 3788 | |
|---|
| 3508 | 3789 | static inline void __net_timestamp(struct sk_buff *skb) |
|---|
| .. | .. |
|---|
| 3544 | 3825 | #define __it(x, op) (x -= sizeof(u##op)) |
|---|
| 3545 | 3826 | #define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op)) |
|---|
| 3546 | 3827 | case 32: diffs |= __it_diff(a, b, 64); |
|---|
| 3828 | + fallthrough; |
|---|
| 3547 | 3829 | case 24: diffs |= __it_diff(a, b, 64); |
|---|
| 3830 | + fallthrough; |
|---|
| 3548 | 3831 | case 16: diffs |= __it_diff(a, b, 64); |
|---|
| 3832 | + fallthrough; |
|---|
| 3549 | 3833 | case 8: diffs |= __it_diff(a, b, 64); |
|---|
| 3550 | 3834 | break; |
|---|
| 3551 | 3835 | case 28: diffs |= __it_diff(a, b, 64); |
|---|
| 3836 | + fallthrough; |
|---|
| 3552 | 3837 | case 20: diffs |= __it_diff(a, b, 64); |
|---|
| 3838 | + fallthrough; |
|---|
| 3553 | 3839 | case 12: diffs |= __it_diff(a, b, 64); |
|---|
| 3840 | + fallthrough; |
|---|
| 3554 | 3841 | case 4: diffs |= __it_diff(a, b, 32); |
|---|
| 3555 | 3842 | break; |
|---|
| 3556 | 3843 | } |
|---|
| .. | .. |
|---|
| 3611 | 3898 | * must call this function to return the skb back to the stack with a |
|---|
| 3612 | 3899 | * timestamp. |
|---|
| 3613 | 3900 | * |
|---|
| 3614 | | - * @skb: clone of the the original outgoing packet |
|---|
| 3901 | + * @skb: clone of the original outgoing packet |
|---|
| 3615 | 3902 | * @hwtstamps: hardware time stamps |
|---|
| 3616 | 3903 | * |
|---|
| 3617 | 3904 | */ |
|---|
| .. | .. |
|---|
| 3714 | 4001 | skb->csum_level++; |
|---|
| 3715 | 4002 | } else if (skb->ip_summed == CHECKSUM_NONE) { |
|---|
| 3716 | 4003 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
|---|
| 4004 | + skb->csum_level = 0; |
|---|
| 4005 | + } |
|---|
| 4006 | +} |
|---|
| 4007 | + |
|---|
| 4008 | +static inline void __skb_reset_checksum_unnecessary(struct sk_buff *skb) |
|---|
| 4009 | +{ |
|---|
| 4010 | + if (skb->ip_summed == CHECKSUM_UNNECESSARY) { |
|---|
| 4011 | + skb->ip_summed = CHECKSUM_NONE; |
|---|
| 3717 | 4012 | skb->csum_level = 0; |
|---|
| 3718 | 4013 | } |
|---|
| 3719 | 4014 | } |
|---|
| .. | .. |
|---|
| 3833 | 4128 | return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid); |
|---|
| 3834 | 4129 | } |
|---|
| 3835 | 4130 | |
|---|
| 3836 | | -static inline void __skb_checksum_convert(struct sk_buff *skb, |
|---|
| 3837 | | - __sum16 check, __wsum pseudo) |
|---|
| 4131 | +static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo) |
|---|
| 3838 | 4132 | { |
|---|
| 3839 | 4133 | skb->csum = ~pseudo; |
|---|
| 3840 | 4134 | skb->ip_summed = CHECKSUM_COMPLETE; |
|---|
| 3841 | 4135 | } |
|---|
| 3842 | 4136 | |
|---|
| 3843 | | -#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \ |
|---|
| 4137 | +#define skb_checksum_try_convert(skb, proto, compute_pseudo) \ |
|---|
| 3844 | 4138 | do { \ |
|---|
| 3845 | 4139 | if (__skb_checksum_convert_check(skb)) \ |
|---|
| 3846 | | - __skb_checksum_convert(skb, check, \ |
|---|
| 3847 | | - compute_pseudo(skb, proto)); \ |
|---|
| 4140 | + __skb_checksum_convert(skb, compute_pseudo(skb, proto)); \ |
|---|
| 3848 | 4141 | } while (0) |
|---|
| 3849 | 4142 | |
|---|
| 3850 | 4143 | static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr, |
|---|
| .. | .. |
|---|
| 3884 | 4177 | static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb) |
|---|
| 3885 | 4178 | { |
|---|
| 3886 | 4179 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) |
|---|
| 3887 | | - return (void *)(skb->_nfct & SKB_NFCT_PTRMASK); |
|---|
| 4180 | + return (void *)(skb->_nfct & NFCT_PTRMASK); |
|---|
| 3888 | 4181 | #else |
|---|
| 3889 | 4182 | return NULL; |
|---|
| 3890 | 4183 | #endif |
|---|
| 3891 | 4184 | } |
|---|
| 3892 | 4185 | |
|---|
| 3893 | | -#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
|---|
| 3894 | | -void nf_conntrack_destroy(struct nf_conntrack *nfct); |
|---|
| 3895 | | -static inline void nf_conntrack_put(struct nf_conntrack *nfct) |
|---|
| 4186 | +static inline unsigned long skb_get_nfct(const struct sk_buff *skb) |
|---|
| 3896 | 4187 | { |
|---|
| 3897 | | - if (nfct && atomic_dec_and_test(&nfct->use)) |
|---|
| 3898 | | - nf_conntrack_destroy(nfct); |
|---|
| 3899 | | -} |
|---|
| 3900 | | -static inline void nf_conntrack_get(struct nf_conntrack *nfct) |
|---|
| 3901 | | -{ |
|---|
| 3902 | | - if (nfct) |
|---|
| 3903 | | - atomic_inc(&nfct->use); |
|---|
| 3904 | | -} |
|---|
| 4188 | +#if IS_ENABLED(CONFIG_NF_CONNTRACK) |
|---|
| 4189 | + return skb->_nfct; |
|---|
| 4190 | +#else |
|---|
| 4191 | + return 0UL; |
|---|
| 3905 | 4192 | #endif |
|---|
| 4193 | +} |
|---|
| 4194 | + |
|---|
| 4195 | +static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct) |
|---|
| 4196 | +{ |
|---|
| 4197 | +#if IS_ENABLED(CONFIG_NF_CONNTRACK) |
|---|
| 4198 | + skb->_nfct = nfct; |
|---|
| 4199 | +#endif |
|---|
| 4200 | +} |
|---|
| 4201 | + |
|---|
| 4202 | +#ifdef CONFIG_SKB_EXTENSIONS |
|---|
| 4203 | +enum skb_ext_id { |
|---|
| 3906 | 4204 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
|---|
| 3907 | | -static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) |
|---|
| 4205 | + SKB_EXT_BRIDGE_NF, |
|---|
| 4206 | +#endif |
|---|
| 4207 | +#ifdef CONFIG_XFRM |
|---|
| 4208 | + SKB_EXT_SEC_PATH, |
|---|
| 4209 | +#endif |
|---|
| 4210 | +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) |
|---|
| 4211 | + TC_SKB_EXT, |
|---|
| 4212 | +#endif |
|---|
| 4213 | +#if IS_ENABLED(CONFIG_MPTCP) |
|---|
| 4214 | + SKB_EXT_MPTCP, |
|---|
| 4215 | +#endif |
|---|
| 4216 | + SKB_EXT_NUM, /* must be last */ |
|---|
| 4217 | +}; |
|---|
| 4218 | + |
|---|
| 4219 | +/** |
|---|
| 4220 | + * struct skb_ext - sk_buff extensions |
|---|
| 4221 | + * @refcnt: 1 on allocation, deallocated on 0 |
|---|
| 4222 | + * @offset: offset to add to @data to obtain extension address |
|---|
| 4223 | + * @chunks: size currently allocated, stored in SKB_EXT_ALIGN_SHIFT units |
|---|
| 4224 | + * @data: start of extension data, variable sized |
|---|
| 4225 | + * |
|---|
| 4226 | + * Note: offsets/lengths are stored in chunks of 8 bytes, this allows |
|---|
| 4227 | + * to use 'u8' types while allowing up to 2kb worth of extension data. |
|---|
| 4228 | + */ |
|---|
| 4229 | +struct skb_ext { |
|---|
| 4230 | + refcount_t refcnt; |
|---|
| 4231 | + u8 offset[SKB_EXT_NUM]; /* in chunks of 8 bytes */ |
|---|
| 4232 | + u8 chunks; /* same */ |
|---|
| 4233 | + char data[] __aligned(8); |
|---|
| 4234 | +}; |
|---|
| 4235 | + |
|---|
| 4236 | +struct skb_ext *__skb_ext_alloc(gfp_t flags); |
|---|
| 4237 | +void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, |
|---|
| 4238 | + struct skb_ext *ext); |
|---|
| 4239 | +void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id); |
|---|
| 4240 | +void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id); |
|---|
| 4241 | +void __skb_ext_put(struct skb_ext *ext); |
|---|
| 4242 | + |
|---|
| 4243 | +static inline void skb_ext_put(struct sk_buff *skb) |
|---|
| 3908 | 4244 | { |
|---|
| 3909 | | - if (nf_bridge && refcount_dec_and_test(&nf_bridge->use)) |
|---|
| 3910 | | - kfree(nf_bridge); |
|---|
| 4245 | + if (skb->active_extensions) |
|---|
| 4246 | + __skb_ext_put(skb->extensions); |
|---|
| 3911 | 4247 | } |
|---|
| 3912 | | -static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) |
|---|
| 4248 | + |
|---|
| 4249 | +static inline void __skb_ext_copy(struct sk_buff *dst, |
|---|
| 4250 | + const struct sk_buff *src) |
|---|
| 3913 | 4251 | { |
|---|
| 3914 | | - if (nf_bridge) |
|---|
| 3915 | | - refcount_inc(&nf_bridge->use); |
|---|
| 4252 | + dst->active_extensions = src->active_extensions; |
|---|
| 4253 | + |
|---|
| 4254 | + if (src->active_extensions) { |
|---|
| 4255 | + struct skb_ext *ext = src->extensions; |
|---|
| 4256 | + |
|---|
| 4257 | + refcount_inc(&ext->refcnt); |
|---|
| 4258 | + dst->extensions = ext; |
|---|
| 4259 | + } |
|---|
| 3916 | 4260 | } |
|---|
| 3917 | | -#endif /* CONFIG_BRIDGE_NETFILTER */ |
|---|
| 3918 | | -static inline void nf_reset(struct sk_buff *skb) |
|---|
| 4261 | + |
|---|
| 4262 | +static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src) |
|---|
| 4263 | +{ |
|---|
| 4264 | + skb_ext_put(dst); |
|---|
| 4265 | + __skb_ext_copy(dst, src); |
|---|
| 4266 | +} |
|---|
| 4267 | + |
|---|
| 4268 | +static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i) |
|---|
| 4269 | +{ |
|---|
| 4270 | + return !!ext->offset[i]; |
|---|
| 4271 | +} |
|---|
| 4272 | + |
|---|
| 4273 | +static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id) |
|---|
| 4274 | +{ |
|---|
| 4275 | + return skb->active_extensions & (1 << id); |
|---|
| 4276 | +} |
|---|
| 4277 | + |
|---|
| 4278 | +static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) |
|---|
| 4279 | +{ |
|---|
| 4280 | + if (skb_ext_exist(skb, id)) |
|---|
| 4281 | + __skb_ext_del(skb, id); |
|---|
| 4282 | +} |
|---|
| 4283 | + |
|---|
| 4284 | +static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id) |
|---|
| 4285 | +{ |
|---|
| 4286 | + if (skb_ext_exist(skb, id)) { |
|---|
| 4287 | + struct skb_ext *ext = skb->extensions; |
|---|
| 4288 | + |
|---|
| 4289 | + return (void *)ext + (ext->offset[id] << 3); |
|---|
| 4290 | + } |
|---|
| 4291 | + |
|---|
| 4292 | + return NULL; |
|---|
| 4293 | +} |
|---|
| 4294 | + |
|---|
| 4295 | +static inline void skb_ext_reset(struct sk_buff *skb) |
|---|
| 4296 | +{ |
|---|
| 4297 | + if (unlikely(skb->active_extensions)) { |
|---|
| 4298 | + __skb_ext_put(skb->extensions); |
|---|
| 4299 | + skb->active_extensions = 0; |
|---|
| 4300 | + } |
|---|
| 4301 | +} |
|---|
| 4302 | + |
|---|
| 4303 | +static inline bool skb_has_extensions(struct sk_buff *skb) |
|---|
| 4304 | +{ |
|---|
| 4305 | + return unlikely(skb->active_extensions); |
|---|
| 4306 | +} |
|---|
| 4307 | +#else |
|---|
| 4308 | +static inline void skb_ext_put(struct sk_buff *skb) {} |
|---|
| 4309 | +static inline void skb_ext_reset(struct sk_buff *skb) {} |
|---|
| 4310 | +static inline void skb_ext_del(struct sk_buff *skb, int unused) {} |
|---|
| 4311 | +static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {} |
|---|
| 4312 | +static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {} |
|---|
| 4313 | +static inline bool skb_has_extensions(struct sk_buff *skb) { return false; } |
|---|
| 4314 | +#endif /* CONFIG_SKB_EXTENSIONS */ |
|---|
| 4315 | + |
|---|
| 4316 | +static inline void nf_reset_ct(struct sk_buff *skb) |
|---|
| 3919 | 4317 | { |
|---|
| 3920 | 4318 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
|---|
| 3921 | 4319 | nf_conntrack_put(skb_nfct(skb)); |
|---|
| 3922 | 4320 | skb->_nfct = 0; |
|---|
| 3923 | 4321 | #endif |
|---|
| 3924 | | -#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
|---|
| 3925 | | - nf_bridge_put(skb->nf_bridge); |
|---|
| 3926 | | -#endif |
|---|
| 3927 | | - skb->nf_bridge = NULL; |
|---|
| 3928 | 4322 | } |
|---|
| 3929 | 4323 | |
|---|
| 3930 | 4324 | static inline void nf_reset_trace(struct sk_buff *skb) |
|---|
| 3931 | 4325 | { |
|---|
| 3932 | | -#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) |
|---|
| 4326 | +#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES) |
|---|
| 3933 | 4327 | skb->nf_trace = 0; |
|---|
| 3934 | 4328 | #endif |
|---|
| 3935 | 4329 | } |
|---|
| .. | .. |
|---|
| 3941 | 4335 | #endif |
|---|
| 3942 | 4336 | } |
|---|
| 3943 | 4337 | |
|---|
| 3944 | | -/* Note: This doesn't put any conntrack and bridge info in dst. */ |
|---|
| 4338 | +/* Note: This doesn't put any conntrack info in dst. */ |
|---|
| 3945 | 4339 | static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, |
|---|
| 3946 | 4340 | bool copy) |
|---|
| 3947 | 4341 | { |
|---|
| .. | .. |
|---|
| 3949 | 4343 | dst->_nfct = src->_nfct; |
|---|
| 3950 | 4344 | nf_conntrack_get(skb_nfct(src)); |
|---|
| 3951 | 4345 | #endif |
|---|
| 3952 | | -#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
|---|
| 3953 | | - dst->nf_bridge = src->nf_bridge; |
|---|
| 3954 | | - nf_bridge_get(src->nf_bridge); |
|---|
| 3955 | | -#endif |
|---|
| 3956 | | -#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) |
|---|
| 4346 | +#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES) |
|---|
| 3957 | 4347 | if (copy) |
|---|
| 3958 | 4348 | dst->nf_trace = src->nf_trace; |
|---|
| 3959 | 4349 | #endif |
|---|
| .. | .. |
|---|
| 3963 | 4353 | { |
|---|
| 3964 | 4354 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
|---|
| 3965 | 4355 | nf_conntrack_put(skb_nfct(dst)); |
|---|
| 3966 | | -#endif |
|---|
| 3967 | | -#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
|---|
| 3968 | | - nf_bridge_put(dst->nf_bridge); |
|---|
| 3969 | 4356 | #endif |
|---|
| 3970 | 4357 | __nf_copy(dst, src, true); |
|---|
| 3971 | 4358 | } |
|---|
| .. | .. |
|---|
| 3988 | 4375 | { } |
|---|
| 3989 | 4376 | #endif |
|---|
| 3990 | 4377 | |
|---|
| 4378 | +static inline int secpath_exists(const struct sk_buff *skb) |
|---|
| 4379 | +{ |
|---|
| 4380 | +#ifdef CONFIG_XFRM |
|---|
| 4381 | + return skb_ext_exist(skb, SKB_EXT_SEC_PATH); |
|---|
| 4382 | +#else |
|---|
| 4383 | + return 0; |
|---|
| 4384 | +#endif |
|---|
| 4385 | +} |
|---|
| 4386 | + |
|---|
| 3991 | 4387 | static inline bool skb_irq_freeable(const struct sk_buff *skb) |
|---|
| 3992 | 4388 | { |
|---|
| 3993 | 4389 | return !skb->destructor && |
|---|
| 3994 | | -#if IS_ENABLED(CONFIG_XFRM) |
|---|
| 3995 | | - !skb->sp && |
|---|
| 3996 | | -#endif |
|---|
| 4390 | + !secpath_exists(skb) && |
|---|
| 3997 | 4391 | !skb_nfct(skb) && |
|---|
| 3998 | 4392 | !skb->_skb_refdst && |
|---|
| 3999 | 4393 | !skb_has_frag_list(skb); |
|---|
| .. | .. |
|---|
| 4039 | 4433 | return skb->dst_pending_confirm != 0; |
|---|
| 4040 | 4434 | } |
|---|
| 4041 | 4435 | |
|---|
| 4042 | | -static inline struct sec_path *skb_sec_path(struct sk_buff *skb) |
|---|
| 4436 | +static inline struct sec_path *skb_sec_path(const struct sk_buff *skb) |
|---|
| 4043 | 4437 | { |
|---|
| 4044 | 4438 | #ifdef CONFIG_XFRM |
|---|
| 4045 | | - return skb->sp; |
|---|
| 4439 | + return skb_ext_find(skb, SKB_EXT_SEC_PATH); |
|---|
| 4046 | 4440 | #else |
|---|
| 4047 | 4441 | return NULL; |
|---|
| 4048 | 4442 | #endif |
|---|
| .. | .. |
|---|
| 4063 | 4457 | __wsum csum; |
|---|
| 4064 | 4458 | __u16 csum_start; |
|---|
| 4065 | 4459 | }; |
|---|
| 4066 | | -#define SKB_SGO_CB_OFFSET 32 |
|---|
| 4067 | | -#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET)) |
|---|
| 4460 | +#define SKB_GSO_CB_OFFSET 32 |
|---|
| 4461 | +#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET)) |
|---|
| 4068 | 4462 | |
|---|
| 4069 | 4463 | static inline int skb_tnl_header_len(const struct sk_buff *inner_skb) |
|---|
| 4070 | 4464 | { |
|---|
| .. | .. |
|---|
| 4225 | 4619 | /* Local Checksum Offload. |
|---|
| 4226 | 4620 | * Compute outer checksum based on the assumption that the |
|---|
| 4227 | 4621 | * inner checksum will be offloaded later. |
|---|
| 4228 | | - * See Documentation/networking/checksum-offloads.txt for |
|---|
| 4622 | + * See Documentation/networking/checksum-offloads.rst for |
|---|
| 4229 | 4623 | * explanation of how this works. |
|---|
| 4230 | 4624 | * Fill in outer checksum adjustment (e.g. with sum of outer |
|---|
| 4231 | 4625 | * pseudo-header) before calling. |
|---|
| .. | .. |
|---|
| 4247 | 4641 | return csum_partial(l4_hdr, csum_start - l4_hdr, partial); |
|---|
| 4248 | 4642 | } |
|---|
| 4249 | 4643 | |
|---|
| 4644 | +static inline bool skb_is_redirected(const struct sk_buff *skb) |
|---|
| 4645 | +{ |
|---|
| 4646 | +#ifdef CONFIG_NET_REDIRECT |
|---|
| 4647 | + return skb->redirected; |
|---|
| 4648 | +#else |
|---|
| 4649 | + return false; |
|---|
| 4650 | +#endif |
|---|
| 4651 | +} |
|---|
| 4652 | + |
|---|
| 4653 | +static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress) |
|---|
| 4654 | +{ |
|---|
| 4655 | +#ifdef CONFIG_NET_REDIRECT |
|---|
| 4656 | + skb->redirected = 1; |
|---|
| 4657 | + skb->from_ingress = from_ingress; |
|---|
| 4658 | + if (skb->from_ingress) |
|---|
| 4659 | + skb->tstamp = 0; |
|---|
| 4660 | +#endif |
|---|
| 4661 | +} |
|---|
| 4662 | + |
|---|
| 4663 | +static inline void skb_reset_redirect(struct sk_buff *skb) |
|---|
| 4664 | +{ |
|---|
| 4665 | +#ifdef CONFIG_NET_REDIRECT |
|---|
| 4666 | + skb->redirected = 0; |
|---|
| 4667 | +#endif |
|---|
| 4668 | +} |
|---|
| 4669 | + |
|---|
| 4670 | +static inline bool skb_csum_is_sctp(struct sk_buff *skb) |
|---|
| 4671 | +{ |
|---|
| 4672 | + return skb->csum_not_inet; |
|---|
| 4673 | +} |
|---|
| 4674 | + |
|---|
| 4675 | +static inline void skb_set_kcov_handle(struct sk_buff *skb, |
|---|
| 4676 | + const u64 kcov_handle) |
|---|
| 4677 | +{ |
|---|
| 4678 | +#ifdef CONFIG_KCOV |
|---|
| 4679 | + skb->kcov_handle = kcov_handle; |
|---|
| 4680 | +#endif |
|---|
| 4681 | +} |
|---|
| 4682 | + |
|---|
| 4683 | +static inline u64 skb_get_kcov_handle(struct sk_buff *skb) |
|---|
| 4684 | +{ |
|---|
| 4685 | +#ifdef CONFIG_KCOV |
|---|
| 4686 | + return skb->kcov_handle; |
|---|
| 4687 | +#else |
|---|
| 4688 | + return 0; |
|---|
| 4689 | +#endif |
|---|
| 4690 | +} |
|---|
| 4691 | + |
|---|
| 4250 | 4692 | #endif /* __KERNEL__ */ |
|---|
| 4251 | 4693 | #endif /* _LINUX_SKBUFF_H */ |
|---|