.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ |
---|
1 | 2 | /* |
---|
2 | 3 | * Definitions for the 'struct sk_buff' memory handlers. |
---|
3 | 4 | * |
---|
4 | 5 | * Authors: |
---|
5 | 6 | * Alan Cox, <gw4pts@gw4pts.ampr.org> |
---|
6 | 7 | * Florian La Roche, <rzsfl@rz.uni-sb.de> |
---|
7 | | - * |
---|
8 | | - * This program is free software; you can redistribute it and/or |
---|
9 | | - * modify it under the terms of the GNU General Public License |
---|
10 | | - * as published by the Free Software Foundation; either version |
---|
11 | | - * 2 of the License, or (at your option) any later version. |
---|
12 | 8 | */ |
---|
13 | 9 | |
---|
14 | 10 | #ifndef _LINUX_SKBUFF_H |
---|
.. | .. |
---|
18 | 14 | #include <linux/compiler.h> |
---|
19 | 15 | #include <linux/time.h> |
---|
20 | 16 | #include <linux/bug.h> |
---|
| 17 | +#include <linux/bvec.h> |
---|
21 | 18 | #include <linux/cache.h> |
---|
22 | 19 | #include <linux/rbtree.h> |
---|
23 | 20 | #include <linux/socket.h> |
---|
.. | .. |
---|
40 | 37 | #include <linux/in6.h> |
---|
41 | 38 | #include <linux/if_packet.h> |
---|
42 | 39 | #include <net/flow.h> |
---|
| 40 | +#if IS_ENABLED(CONFIG_NF_CONNTRACK) |
---|
| 41 | +#include <linux/netfilter/nf_conntrack_common.h> |
---|
| 42 | +#endif |
---|
| 43 | +#include <linux/android_kabi.h> |
---|
| 44 | +#include <linux/android_vendor.h> |
---|
43 | 45 | |
---|
44 | 46 | /* The interface for checksum offload between the stack and networking drivers |
---|
45 | 47 | * is as follows... |
---|
.. | .. |
---|
47 | 49 | * A. IP checksum related features |
---|
48 | 50 | * |
---|
49 | 51 | * Drivers advertise checksum offload capabilities in the features of a device. |
---|
50 | | - * From the stack's point of view these are capabilities offered by the driver, |
---|
51 | | - * a driver typically only advertises features that it is capable of offloading |
---|
| 52 | + * From the stack's point of view these are capabilities offered by the driver. |
---|
| 53 | + * A driver typically only advertises features that it is capable of offloading |
---|
52 | 54 | * to its device. |
---|
53 | 55 | * |
---|
54 | 56 | * The checksum related features are: |
---|
.. | .. |
---|
63 | 65 | * TCP or UDP packets over IPv4. These are specifically |
---|
64 | 66 | * unencapsulated packets of the form IPv4|TCP or |
---|
65 | 67 | * IPv4|UDP where the Protocol field in the IPv4 header |
---|
66 | | - * is TCP or UDP. The IPv4 header may contain IP options |
---|
| 68 | + * is TCP or UDP. The IPv4 header may contain IP options. |
---|
67 | 69 | * This feature cannot be set in features for a device |
---|
68 | 70 | * with NETIF_F_HW_CSUM also set. This feature is being |
---|
69 | 71 | * DEPRECATED (see below). |
---|
.. | .. |
---|
71 | 73 | * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain |
---|
72 | 74 | * TCP or UDP packets over IPv6. These are specifically |
---|
73 | 75 | * unencapsulated packets of the form IPv6|TCP or |
---|
74 | | - * IPv4|UDP where the Next Header field in the IPv6 |
---|
| 76 | + * IPv6|UDP where the Next Header field in the IPv6 |
---|
75 | 77 | * header is either TCP or UDP. IPv6 extension headers |
---|
76 | 78 | * are not supported with this feature. This feature |
---|
77 | 79 | * cannot be set in features for a device with |
---|
.. | .. |
---|
79 | 81 | * DEPRECATED (see below). |
---|
80 | 82 | * |
---|
81 | 83 | * NETIF_F_RXCSUM - Driver (device) performs receive checksum offload. |
---|
82 | | - * This flag is used only used to disable the RX checksum |
---|
| 84 | + * This flag is only used to disable the RX checksum |
---|
83 | 85 | * feature for a device. The stack will accept receive |
---|
84 | 86 | * checksum indication in packets received on a device |
---|
85 | 87 | * regardless of whether NETIF_F_RXCSUM is set. |
---|
86 | 88 | * |
---|
87 | 89 | * B. Checksumming of received packets by device. Indication of checksum |
---|
88 | | - * verification is in set skb->ip_summed. Possible values are: |
---|
| 90 | + * verification is set in skb->ip_summed. Possible values are: |
---|
89 | 91 | * |
---|
90 | 92 | * CHECKSUM_NONE: |
---|
91 | 93 | * |
---|
.. | .. |
---|
115 | 117 | * the packet minus one that have been verified as CHECKSUM_UNNECESSARY. |
---|
116 | 118 | * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet |
---|
117 | 119 | * and a device is able to verify the checksums for UDP (possibly zero), |
---|
118 | | - * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to |
---|
| 120 | + * GRE (checksum flag is set) and TCP, skb->csum_level would be set to |
---|
119 | 121 | * two. If the device were only able to verify the UDP checksum and not |
---|
120 | | - * GRE, either because it doesn't support GRE checksum of because GRE |
---|
| 122 | + * GRE, either because it doesn't support GRE checksum or because GRE |
---|
121 | 123 | * checksum is bad, skb->csum_level would be set to zero (TCP checksum is |
---|
122 | 124 | * not considered in this case). |
---|
123 | 125 | * |
---|
124 | 126 | * CHECKSUM_COMPLETE: |
---|
125 | 127 | * |
---|
126 | 128 | * This is the most generic way. The device supplied checksum of the _whole_ |
---|
127 | | - * packet as seen by netif_rx() and fills out in skb->csum. Meaning, the |
---|
| 129 | + * packet as seen by netif_rx() and fills in skb->csum. This means the |
---|
128 | 130 | * hardware doesn't need to parse L3/L4 headers to implement this. |
---|
129 | 131 | * |
---|
130 | 132 | * Notes: |
---|
.. | .. |
---|
153 | 155 | * from skb->csum_start up to the end, and to record/write the checksum at |
---|
154 | 156 | * offset skb->csum_start + skb->csum_offset. A driver may verify that the |
---|
155 | 157 | * csum_start and csum_offset values are valid values given the length and |
---|
156 | | - * offset of the packet, however they should not attempt to validate that the |
---|
157 | | - * checksum refers to a legitimate transport layer checksum-- it is the |
---|
| 158 | + * offset of the packet, but it should not attempt to validate that the |
---|
| 159 | + * checksum refers to a legitimate transport layer checksum -- it is the |
---|
158 | 160 | * purview of the stack to validate that csum_start and csum_offset are set |
---|
159 | 161 | * correctly. |
---|
160 | 162 | * |
---|
.. | .. |
---|
178 | 180 | * |
---|
179 | 181 | * CHECKSUM_UNNECESSARY: |
---|
180 | 182 | * |
---|
181 | | - * This has the same meaning on as CHECKSUM_NONE for checksum offload on |
---|
| 183 | + * This has the same meaning as CHECKSUM_NONE for checksum offload on |
---|
182 | 184 | * output. |
---|
183 | 185 | * |
---|
184 | 186 | * CHECKSUM_COMPLETE: |
---|
185 | 187 | * Not used in checksum output. If a driver observes a packet with this value |
---|
186 | | - * set in skbuff, if should treat as CHECKSUM_NONE being set. |
---|
| 188 | + * set in skbuff, it should treat the packet as if CHECKSUM_NONE were set. |
---|
187 | 189 | * |
---|
188 | 190 | * D. Non-IP checksum (CRC) offloads |
---|
189 | 191 | * |
---|
190 | 192 | * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of |
---|
191 | 193 | * offloading the SCTP CRC in a packet. To perform this offload the stack |
---|
192 | | - * will set set csum_start and csum_offset accordingly, set ip_summed to |
---|
| 194 | + * will set csum_start and csum_offset accordingly, set ip_summed to |
---|
193 | 195 | * CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication in |
---|
194 | 196 | * the skbuff that the CHECKSUM_PARTIAL refers to CRC32c. |
---|
195 | 197 | * A driver that supports both IP checksum offload and SCTP CRC32c offload |
---|
.. | .. |
---|
200 | 202 | * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of |
---|
201 | 203 | * offloading the FCOE CRC in a packet. To perform this offload the stack |
---|
202 | 204 | * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset |
---|
203 | | - * accordingly. Note the there is no indication in the skbuff that the |
---|
204 | | - * CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports |
---|
| 205 | + * accordingly. Note that there is no indication in the skbuff that the |
---|
| 206 | + * CHECKSUM_PARTIAL refers to an FCOE checksum, so a driver that supports |
---|
205 | 207 | * both IP checksum offload and FCOE CRC offload must verify which offload |
---|
206 | | - * is configured for a packet presumably by inspecting packet headers. |
---|
| 208 | + * is configured for a packet, presumably by inspecting packet headers. |
---|
207 | 209 | * |
---|
208 | 210 | * E. Checksumming on output with GSO. |
---|
209 | 211 | * |
---|
.. | .. |
---|
211 | 213 | * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the |
---|
212 | 214 | * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as |
---|
213 | 215 | * part of the GSO operation is implied. If a checksum is being offloaded |
---|
214 | | - * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset |
---|
215 | | - * are set to refer to the outermost checksum being offload (two offloaded |
---|
216 | | - * checksums are possible with UDP encapsulation). |
---|
| 216 | + * with GSO then ip_summed is CHECKSUM_PARTIAL, and both csum_start and |
---|
| 217 | + * csum_offset are set to refer to the outermost checksum being offloaded |
---|
| 218 | + * (two offloaded checksums are possible with UDP encapsulation). |
---|
217 | 219 | */ |
---|
218 | 220 | |
---|
219 | 221 | /* Don't change this without changing skb_csum_unnecessary! */ |
---|
.. | .. |
---|
238 | 240 | SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \ |
---|
239 | 241 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) |
---|
240 | 242 | |
---|
| 243 | +struct ahash_request; |
---|
241 | 244 | struct net_device; |
---|
242 | 245 | struct scatterlist; |
---|
243 | 246 | struct pipe_inode_info; |
---|
244 | 247 | struct iov_iter; |
---|
245 | 248 | struct napi_struct; |
---|
| 249 | +struct bpf_prog; |
---|
| 250 | +union bpf_attr; |
---|
| 251 | +struct skb_ext; |
---|
246 | 252 | |
---|
247 | | -#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
---|
248 | | -struct nf_conntrack { |
---|
249 | | - atomic_t use; |
---|
250 | | -}; |
---|
251 | | -#endif |
---|
252 | | -#include <linux/android_kabi.h> |
---|
253 | | - |
---|
| 253 | +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
---|
254 | 254 | struct nf_bridge_info { |
---|
255 | | - refcount_t use; |
---|
256 | 255 | enum { |
---|
257 | 256 | BRNF_PROTO_UNCHANGED, |
---|
258 | 257 | BRNF_PROTO_8021Q, |
---|
.. | .. |
---|
278 | 277 | char neigh_header[8]; |
---|
279 | 278 | }; |
---|
280 | 279 | }; |
---|
| 280 | +#endif |
---|
| 281 | + |
---|
| 282 | +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) |
---|
| 283 | +/* Chain in tc_skb_ext will be used to share the tc chain with |
---|
| 284 | + * ovs recirc_id. It will be set to the current chain by tc |
---|
| 285 | + * and read by ovs to recirc_id. |
---|
| 286 | + */ |
---|
| 287 | +struct tc_skb_ext { |
---|
| 288 | + __u32 chain; |
---|
| 289 | + __u16 mru; |
---|
| 290 | +}; |
---|
| 291 | +#endif |
---|
281 | 292 | |
---|
282 | 293 | struct sk_buff_head { |
---|
283 | 294 | /* These two members must be first. */ |
---|
.. | .. |
---|
310 | 321 | */ |
---|
311 | 322 | #define GSO_BY_FRAGS 0xFFFF |
---|
312 | 323 | |
---|
313 | | -typedef struct skb_frag_struct skb_frag_t; |
---|
| 324 | +typedef struct bio_vec skb_frag_t; |
---|
314 | 325 | |
---|
315 | | -struct skb_frag_struct { |
---|
316 | | - struct { |
---|
317 | | - struct page *p; |
---|
318 | | - } page; |
---|
319 | | -#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) |
---|
320 | | - __u32 page_offset; |
---|
321 | | - __u32 size; |
---|
322 | | -#else |
---|
323 | | - __u16 page_offset; |
---|
324 | | - __u16 size; |
---|
325 | | -#endif |
---|
326 | | -}; |
---|
327 | | - |
---|
| 326 | +/** |
---|
| 327 | + * skb_frag_size() - Returns the size of a skb fragment |
---|
| 328 | + * @frag: skb fragment |
---|
| 329 | + */ |
---|
328 | 330 | static inline unsigned int skb_frag_size(const skb_frag_t *frag) |
---|
329 | 331 | { |
---|
330 | | - return frag->size; |
---|
| 332 | + return frag->bv_len; |
---|
331 | 333 | } |
---|
332 | 334 | |
---|
| 335 | +/** |
---|
| 336 | + * skb_frag_size_set() - Sets the size of a skb fragment |
---|
| 337 | + * @frag: skb fragment |
---|
| 338 | + * @size: size of fragment |
---|
| 339 | + */ |
---|
333 | 340 | static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size) |
---|
334 | 341 | { |
---|
335 | | - frag->size = size; |
---|
| 342 | + frag->bv_len = size; |
---|
336 | 343 | } |
---|
337 | 344 | |
---|
| 345 | +/** |
---|
| 346 | + * skb_frag_size_add() - Increments the size of a skb fragment by @delta |
---|
| 347 | + * @frag: skb fragment |
---|
| 348 | + * @delta: value to add |
---|
| 349 | + */ |
---|
338 | 350 | static inline void skb_frag_size_add(skb_frag_t *frag, int delta) |
---|
339 | 351 | { |
---|
340 | | - frag->size += delta; |
---|
| 352 | + frag->bv_len += delta; |
---|
341 | 353 | } |
---|
342 | 354 | |
---|
| 355 | +/** |
---|
| 356 | + * skb_frag_size_sub() - Decrements the size of a skb fragment by @delta |
---|
| 357 | + * @frag: skb fragment |
---|
| 358 | + * @delta: value to subtract |
---|
| 359 | + */ |
---|
343 | 360 | static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) |
---|
344 | 361 | { |
---|
345 | | - frag->size -= delta; |
---|
| 362 | + frag->bv_len -= delta; |
---|
346 | 363 | } |
---|
347 | 364 | |
---|
| 365 | +/** |
---|
| 366 | + * skb_frag_must_loop - Test if %p is a high memory page |
---|
| 367 | + * @p: fragment's page |
---|
| 368 | + */ |
---|
348 | 369 | static inline bool skb_frag_must_loop(struct page *p) |
---|
349 | 370 | { |
---|
350 | 371 | #if defined(CONFIG_HIGHMEM) |
---|
.. | .. |
---|
358 | 379 | * skb_frag_foreach_page - loop over pages in a fragment |
---|
359 | 380 | * |
---|
360 | 381 | * @f: skb frag to operate on |
---|
361 | | - * @f_off: offset from start of f->page.p |
---|
| 382 | + * @f_off: offset from start of f->bv_page |
---|
362 | 383 | * @f_len: length from f_off to loop over |
---|
363 | 384 | * @p: (temp var) current page |
---|
364 | 385 | * @p_off: (temp var) offset from start of current page, |
---|
.. | .. |
---|
479 | 500 | } |
---|
480 | 501 | |
---|
481 | 502 | void sock_zerocopy_put(struct ubuf_info *uarg); |
---|
482 | | -void sock_zerocopy_put_abort(struct ubuf_info *uarg); |
---|
| 503 | +void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref); |
---|
483 | 504 | |
---|
484 | 505 | void sock_zerocopy_callback(struct ubuf_info *uarg, bool success); |
---|
485 | 506 | |
---|
| 507 | +int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len); |
---|
486 | 508 | int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, |
---|
487 | 509 | struct msghdr *msg, int len, |
---|
488 | 510 | struct ubuf_info *uarg); |
---|
.. | .. |
---|
511 | 533 | /* Intermediate layers must ensure that destructor_arg |
---|
512 | 534 | * remains valid until skb destructor */ |
---|
513 | 535 | void * destructor_arg; |
---|
| 536 | + |
---|
| 537 | + ANDROID_OEM_DATA_ARRAY(1, 3); |
---|
514 | 538 | |
---|
515 | 539 | /* must be last field, see pskb_expand_head() */ |
---|
516 | 540 | skb_frag_t frags[MAX_SKB_FRAGS]; |
---|
.. | .. |
---|
575 | 599 | SKB_GSO_UDP = 1 << 16, |
---|
576 | 600 | |
---|
577 | 601 | SKB_GSO_UDP_L4 = 1 << 17, |
---|
| 602 | + |
---|
| 603 | + SKB_GSO_FRAGLIST = 1 << 18, |
---|
578 | 604 | }; |
---|
579 | 605 | |
---|
580 | 606 | #if BITS_PER_LONG > 32 |
---|
.. | .. |
---|
587 | 613 | typedef unsigned char *sk_buff_data_t; |
---|
588 | 614 | #endif |
---|
589 | 615 | |
---|
590 | | -/** |
---|
| 616 | +/** |
---|
591 | 617 | * struct sk_buff - socket buffer |
---|
592 | 618 | * @next: Next buffer in list |
---|
593 | 619 | * @prev: Previous buffer in list |
---|
594 | 620 | * @tstamp: Time we arrived/left |
---|
| 621 | + * @skb_mstamp_ns: (aka @tstamp) earliest departure time; start point |
---|
| 622 | + * for retransmit timer |
---|
595 | 623 | * @rbnode: RB tree node, alternative to next/prev for netem/tcp |
---|
| 624 | + * @list: queue head |
---|
596 | 625 | * @sk: Socket we are owned by |
---|
| 626 | + * @ip_defrag_offset: (aka @sk) alternate use of @sk, used in |
---|
| 627 | + * fragmentation management |
---|
597 | 628 | * @dev: Device we arrived on/are leaving by |
---|
| 629 | + * @dev_scratch: (aka @dev) alternate use of @dev when @dev would be %NULL |
---|
598 | 630 | * @cb: Control buffer. Free for use by every layer. Put private vars here |
---|
599 | 631 | * @_skb_refdst: destination entry (with norefcount bit) |
---|
600 | 632 | * @sp: the security path, used for xfrm |
---|
.. | .. |
---|
613 | 645 | * @pkt_type: Packet class |
---|
614 | 646 | * @fclone: skbuff clone status |
---|
615 | 647 | * @ipvs_property: skbuff is owned by ipvs |
---|
| 648 | + * @inner_protocol_type: whether the inner protocol is |
---|
| 649 | + * ENCAP_TYPE_ETHER or ENCAP_TYPE_IPPROTO |
---|
| 650 | + * @remcsum_offload: remote checksum offload is enabled |
---|
| 651 | + * @offload_fwd_mark: Packet was L2-forwarded in hardware |
---|
| 652 | + * @offload_l3_fwd_mark: Packet was L3-forwarded in hardware |
---|
616 | 653 | * @tc_skip_classify: do not classify packet. set by IFB device |
---|
617 | 654 | * @tc_at_ingress: used within tc_classify to distinguish in/egress |
---|
618 | | - * @tc_redirected: packet was redirected by a tc action |
---|
619 | | - * @tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect |
---|
| 655 | + * @redirected: packet was redirected by packet classifier |
---|
| 656 | + * @from_ingress: packet was redirected from the ingress path |
---|
620 | 657 | * @peeked: this packet has been seen already, so stats have been |
---|
621 | 658 | * done for it, don't do them again |
---|
622 | 659 | * @nf_trace: netfilter packet trace flag |
---|
.. | .. |
---|
629 | 666 | * @tc_index: Traffic control index |
---|
630 | 667 | * @hash: the packet hash |
---|
631 | 668 | * @queue_mapping: Queue mapping for multiqueue devices |
---|
632 | | - * @xmit_more: More SKBs are pending for this queue |
---|
| 669 | + * @head_frag: skb was allocated from page fragments, |
---|
| 670 | + * not allocated by kmalloc() or vmalloc(). |
---|
633 | 671 | * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves |
---|
| 672 | + * @active_extensions: active extensions (skb_ext_id types) |
---|
634 | 673 | * @ndisc_nodetype: router type (from link layer) |
---|
635 | 674 | * @ooo_okay: allow the mapping of a socket to a queue to be changed |
---|
636 | 675 | * @l4_hash: indicate hash is a canonical 4-tuple hash over transport |
---|
.. | .. |
---|
639 | 678 | * @wifi_acked_valid: wifi_acked was set |
---|
640 | 679 | * @wifi_acked: whether frame was acked on wifi or not |
---|
641 | 680 | * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS |
---|
| 681 | + * @encapsulation: indicates the inner headers in the skbuff are valid |
---|
| 682 | + * @encap_hdr_csum: software checksum is needed |
---|
| 683 | + * @csum_valid: checksum is already valid |
---|
642 | 684 | * @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL |
---|
| 685 | + * @csum_complete_sw: checksum was completed by software |
---|
| 686 | + * @csum_level: indicates the number of consecutive checksums found in |
---|
| 687 | + * the packet minus one that have been verified as |
---|
| 688 | + * CHECKSUM_UNNECESSARY (max 3) |
---|
| 689 | + * @scm_io_uring: SKB holds io_uring registered files |
---|
643 | 690 | * @dst_pending_confirm: need to confirm neighbour |
---|
644 | 691 | * @decrypted: Decrypted SKB |
---|
645 | | - * @napi_id: id of the NAPI struct this skb came from |
---|
| 692 | + * @napi_id: id of the NAPI struct this skb came from |
---|
| 693 | + * @sender_cpu: (aka @napi_id) source CPU in XPS |
---|
646 | 694 | * @secmark: security marking |
---|
647 | 695 | * @mark: Generic packet mark |
---|
| 696 | + * @reserved_tailroom: (aka @mark) number of bytes of free space available |
---|
| 697 | + * at the tail of an sk_buff |
---|
| 698 | + * @vlan_present: VLAN tag is present |
---|
648 | 699 | * @vlan_proto: vlan encapsulation protocol |
---|
649 | 700 | * @vlan_tci: vlan tag control information |
---|
650 | 701 | * @inner_protocol: Protocol (encapsulation) |
---|
| 702 | + * @inner_ipproto: (aka @inner_protocol) stores ipproto when |
---|
| 703 | + * skb->inner_protocol_type == ENCAP_TYPE_IPPROTO; |
---|
651 | 704 | * @inner_transport_header: Inner transport layer header (encapsulation) |
---|
652 | 705 | * @inner_network_header: Network layer header (encapsulation) |
---|
653 | 706 | * @inner_mac_header: Link layer header (encapsulation) |
---|
.. | .. |
---|
660 | 713 | * @data: Data head pointer |
---|
661 | 714 | * @truesize: Buffer size |
---|
662 | 715 | * @users: User count - see {datagram,tcp}.c |
---|
| 716 | + * @extensions: allocated extensions, valid if active_extensions is nonzero |
---|
663 | 717 | */ |
---|
664 | 718 | |
---|
665 | 719 | struct sk_buff { |
---|
.. | .. |
---|
689 | 743 | |
---|
690 | 744 | union { |
---|
691 | 745 | ktime_t tstamp; |
---|
692 | | - u64 skb_mstamp; |
---|
| 746 | + u64 skb_mstamp_ns; /* earliest departure time */ |
---|
693 | 747 | }; |
---|
694 | 748 | /* |
---|
695 | 749 | * This is the control buffer. It is free to use for every |
---|
.. | .. |
---|
707 | 761 | struct list_head tcp_tsorted_anchor; |
---|
708 | 762 | }; |
---|
709 | 763 | |
---|
710 | | -#ifdef CONFIG_XFRM |
---|
711 | | - struct sec_path *sp; |
---|
712 | | -#endif |
---|
713 | 764 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
---|
714 | 765 | unsigned long _nfct; |
---|
715 | 766 | #endif |
---|
716 | | - struct nf_bridge_info *nf_bridge; |
---|
717 | 767 | unsigned int len, |
---|
718 | 768 | data_len; |
---|
719 | 769 | __u16 mac_len, |
---|
.. | .. |
---|
732 | 782 | #endif |
---|
733 | 783 | #define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset) |
---|
734 | 784 | |
---|
| 785 | + /* private: */ |
---|
735 | 786 | __u8 __cloned_offset[0]; |
---|
| 787 | + /* public: */ |
---|
736 | 788 | __u8 cloned:1, |
---|
737 | 789 | nohdr:1, |
---|
738 | 790 | fclone:2, |
---|
739 | 791 | peeked:1, |
---|
740 | 792 | head_frag:1, |
---|
741 | | - xmit_more:1, |
---|
742 | 793 | pfmemalloc:1; |
---|
743 | | - |
---|
| 794 | +#ifdef CONFIG_SKB_EXTENSIONS |
---|
| 795 | + __u8 active_extensions; |
---|
| 796 | +#endif |
---|
744 | 797 | /* fields enclosed in headers_start/headers_end are copied |
---|
745 | 798 | * using a single memcpy() in __copy_skb_header() |
---|
746 | 799 | */ |
---|
.. | .. |
---|
756 | 809 | #endif |
---|
757 | 810 | #define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset) |
---|
758 | 811 | |
---|
| 812 | + /* private: */ |
---|
759 | 813 | __u8 __pkt_type_offset[0]; |
---|
| 814 | + /* public: */ |
---|
760 | 815 | __u8 pkt_type:3; |
---|
761 | 816 | __u8 ignore_df:1; |
---|
762 | 817 | __u8 nf_trace:1; |
---|
.. | .. |
---|
773 | 828 | __u8 encap_hdr_csum:1; |
---|
774 | 829 | __u8 csum_valid:1; |
---|
775 | 830 | |
---|
| 831 | +#ifdef __BIG_ENDIAN_BITFIELD |
---|
| 832 | +#define PKT_VLAN_PRESENT_BIT 7 |
---|
| 833 | +#else |
---|
| 834 | +#define PKT_VLAN_PRESENT_BIT 0 |
---|
| 835 | +#endif |
---|
| 836 | +#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset) |
---|
| 837 | + /* private: */ |
---|
| 838 | + __u8 __pkt_vlan_present_offset[0]; |
---|
| 839 | + /* public: */ |
---|
| 840 | + __u8 vlan_present:1; |
---|
776 | 841 | __u8 csum_complete_sw:1; |
---|
777 | 842 | __u8 csum_level:2; |
---|
778 | 843 | __u8 csum_not_inet:1; |
---|
.. | .. |
---|
780 | 845 | #ifdef CONFIG_IPV6_NDISC_NODETYPE |
---|
781 | 846 | __u8 ndisc_nodetype:2; |
---|
782 | 847 | #endif |
---|
783 | | - __u8 ipvs_property:1; |
---|
784 | 848 | |
---|
| 849 | + __u8 ipvs_property:1; |
---|
785 | 850 | __u8 inner_protocol_type:1; |
---|
786 | 851 | __u8 remcsum_offload:1; |
---|
787 | 852 | #ifdef CONFIG_NET_SWITCHDEV |
---|
788 | 853 | __u8 offload_fwd_mark:1; |
---|
789 | | - __u8 offload_mr_fwd_mark:1; |
---|
| 854 | + __u8 offload_l3_fwd_mark:1; |
---|
790 | 855 | #endif |
---|
791 | 856 | #ifdef CONFIG_NET_CLS_ACT |
---|
792 | 857 | __u8 tc_skip_classify:1; |
---|
793 | 858 | __u8 tc_at_ingress:1; |
---|
794 | | - __u8 tc_redirected:1; |
---|
795 | | - __u8 tc_from_ingress:1; |
---|
| 859 | +#endif |
---|
| 860 | +#ifdef CONFIG_NET_REDIRECT |
---|
| 861 | + __u8 redirected:1; |
---|
| 862 | + __u8 from_ingress:1; |
---|
796 | 863 | #endif |
---|
797 | 864 | #ifdef CONFIG_TLS_DEVICE |
---|
798 | 865 | __u8 decrypted:1; |
---|
.. | .. |
---|
847 | 914 | __u32 headers_end[0]; |
---|
848 | 915 | /* public: */ |
---|
849 | 916 | |
---|
850 | | - ANDROID_KABI_RESERVE(1); |
---|
| 917 | + /* Android KABI preservation. |
---|
| 918 | + * |
---|
| 919 | + * "open coded" version of ANDROID_KABI_USE() to pack more |
---|
| 920 | + * fields/variables into the space that we have. |
---|
| 921 | + * |
---|
| 922 | + * scm_io_uring is from 04df9719df18 ("io_uring/af_unix: defer |
---|
| 923 | + * registered files gc to io_uring release") |
---|
| 924 | + */ |
---|
| 925 | + _ANDROID_KABI_REPLACE(_ANDROID_KABI_RESERVE(1), |
---|
| 926 | + struct { |
---|
| 927 | + __u8 scm_io_uring:1; |
---|
| 928 | + __u8 android_kabi_reserved1_padding1; |
---|
| 929 | + __u16 android_kabi_reserved1_padding2; |
---|
| 930 | + __u32 android_kabi_reserved1_padding3; |
---|
| 931 | + }); |
---|
851 | 932 | ANDROID_KABI_RESERVE(2); |
---|
852 | 933 | |
---|
853 | 934 | /* These elements must be at the end, see alloc_skb() for details. */ |
---|
.. | .. |
---|
857 | 938 | *data; |
---|
858 | 939 | unsigned int truesize; |
---|
859 | 940 | refcount_t users; |
---|
| 941 | + |
---|
| 942 | +#ifdef CONFIG_SKB_EXTENSIONS |
---|
| 943 | + /* only useable after checking ->active_extensions != 0 */ |
---|
| 944 | + struct skb_ext *extensions; |
---|
| 945 | +#endif |
---|
860 | 946 | }; |
---|
861 | 947 | |
---|
862 | 948 | #ifdef __KERNEL__ |
---|
.. | .. |
---|
868 | 954 | #define SKB_ALLOC_RX 0x02 |
---|
869 | 955 | #define SKB_ALLOC_NAPI 0x04 |
---|
870 | 956 | |
---|
871 | | -/* Returns true if the skb was allocated from PFMEMALLOC reserves */ |
---|
| 957 | +/** |
---|
| 958 | + * skb_pfmemalloc - Test if the skb was allocated from PFMEMALLOC reserves |
---|
| 959 | + * @skb: buffer |
---|
| 960 | + */ |
---|
872 | 961 | static inline bool skb_pfmemalloc(const struct sk_buff *skb) |
---|
873 | 962 | { |
---|
874 | 963 | return unlikely(skb->pfmemalloc); |
---|
.. | .. |
---|
881 | 970 | #define SKB_DST_NOREF 1UL |
---|
882 | 971 | #define SKB_DST_PTRMASK ~(SKB_DST_NOREF) |
---|
883 | 972 | |
---|
884 | | -#define SKB_NFCT_PTRMASK ~(7UL) |
---|
885 | 973 | /** |
---|
886 | 974 | * skb_dst - returns skb dst_entry |
---|
887 | 975 | * @skb: buffer |
---|
.. | .. |
---|
890 | 978 | */ |
---|
891 | 979 | static inline struct dst_entry *skb_dst(const struct sk_buff *skb) |
---|
892 | 980 | { |
---|
893 | | - /* If refdst was not refcounted, check we still are in a |
---|
| 981 | + /* If refdst was not refcounted, check we still are in a |
---|
894 | 982 | * rcu_read_lock section |
---|
895 | 983 | */ |
---|
896 | 984 | WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && |
---|
.. | .. |
---|
937 | 1025 | return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb); |
---|
938 | 1026 | } |
---|
939 | 1027 | |
---|
| 1028 | +/** |
---|
| 1029 | + * skb_rtable - Returns the skb &rtable |
---|
| 1030 | + * @skb: buffer |
---|
| 1031 | + */ |
---|
940 | 1032 | static inline struct rtable *skb_rtable(const struct sk_buff *skb) |
---|
941 | 1033 | { |
---|
942 | 1034 | return (struct rtable *)skb_dst(skb); |
---|
.. | .. |
---|
951 | 1043 | return ptype <= PACKET_OTHERHOST; |
---|
952 | 1044 | } |
---|
953 | 1045 | |
---|
| 1046 | +/** |
---|
| 1047 | + * skb_napi_id - Returns the skb's NAPI id |
---|
| 1048 | + * @skb: buffer |
---|
| 1049 | + */ |
---|
954 | 1050 | static inline unsigned int skb_napi_id(const struct sk_buff *skb) |
---|
955 | 1051 | { |
---|
956 | 1052 | #ifdef CONFIG_NET_RX_BUSY_POLL |
---|
.. | .. |
---|
960 | 1056 | #endif |
---|
961 | 1057 | } |
---|
962 | 1058 | |
---|
963 | | -/* decrement the reference count and return true if we can free the skb */ |
---|
| 1059 | +/** |
---|
| 1060 | + * skb_unref - decrement the skb's reference count |
---|
| 1061 | + * @skb: buffer |
---|
| 1062 | + * |
---|
| 1063 | + * Returns true if we can free the skb. |
---|
| 1064 | + */ |
---|
964 | 1065 | static inline bool skb_unref(struct sk_buff *skb) |
---|
965 | 1066 | { |
---|
966 | 1067 | if (unlikely(!skb)) |
---|
.. | .. |
---|
976 | 1077 | void skb_release_head_state(struct sk_buff *skb); |
---|
977 | 1078 | void kfree_skb(struct sk_buff *skb); |
---|
978 | 1079 | void kfree_skb_list(struct sk_buff *segs); |
---|
| 1080 | +void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt); |
---|
979 | 1081 | void skb_tx_error(struct sk_buff *skb); |
---|
| 1082 | + |
---|
| 1083 | +#ifdef CONFIG_TRACEPOINTS |
---|
980 | 1084 | void consume_skb(struct sk_buff *skb); |
---|
| 1085 | +#else |
---|
| 1086 | +static inline void consume_skb(struct sk_buff *skb) |
---|
| 1087 | +{ |
---|
| 1088 | + return kfree_skb(skb); |
---|
| 1089 | +} |
---|
| 1090 | +#endif |
---|
| 1091 | + |
---|
981 | 1092 | void __consume_stateless_skb(struct sk_buff *skb); |
---|
982 | 1093 | void __kfree_skb(struct sk_buff *skb); |
---|
983 | 1094 | extern struct kmem_cache *skbuff_head_cache; |
---|
.. | .. |
---|
990 | 1101 | int node); |
---|
991 | 1102 | struct sk_buff *__build_skb(void *data, unsigned int frag_size); |
---|
992 | 1103 | struct sk_buff *build_skb(void *data, unsigned int frag_size); |
---|
| 1104 | +struct sk_buff *build_skb_around(struct sk_buff *skb, |
---|
| 1105 | + void *data, unsigned int frag_size); |
---|
| 1106 | + |
---|
| 1107 | +/** |
---|
| 1108 | + * alloc_skb - allocate a network buffer |
---|
| 1109 | + * @size: size to allocate |
---|
| 1110 | + * @priority: allocation mask |
---|
| 1111 | + * |
---|
| 1112 | + * This function is a convenient wrapper around __alloc_skb(). |
---|
| 1113 | + */ |
---|
993 | 1114 | static inline struct sk_buff *alloc_skb(unsigned int size, |
---|
994 | 1115 | gfp_t priority) |
---|
995 | 1116 | { |
---|
.. | .. |
---|
1001 | 1122 | int max_page_order, |
---|
1002 | 1123 | int *errcode, |
---|
1003 | 1124 | gfp_t gfp_mask); |
---|
| 1125 | +struct sk_buff *alloc_skb_for_msg(struct sk_buff *first); |
---|
1004 | 1126 | |
---|
1005 | 1127 | /* Layout of fast clones : [skb1][skb2][fclone_ref] */ |
---|
1006 | 1128 | struct sk_buff_fclones { |
---|
.. | .. |
---|
1032 | 1154 | fclones->skb2.sk == sk; |
---|
1033 | 1155 | } |
---|
1034 | 1156 | |
---|
| 1157 | +/** |
---|
| 1158 | + * alloc_skb_fclone - allocate a network buffer from fclone cache |
---|
| 1159 | + * @size: size to allocate |
---|
| 1160 | + * @priority: allocation mask |
---|
| 1161 | + * |
---|
| 1162 | + * This function is a convenient wrapper around __alloc_skb(). |
---|
| 1163 | + */ |
---|
1035 | 1164 | static inline struct sk_buff *alloc_skb_fclone(unsigned int size, |
---|
1036 | 1165 | gfp_t priority) |
---|
1037 | 1166 | { |
---|
.. | .. |
---|
1080 | 1209 | return __skb_pad(skb, pad, true); |
---|
1081 | 1210 | } |
---|
1082 | 1211 | #define dev_kfree_skb(a) consume_skb(a) |
---|
1083 | | - |
---|
1084 | | -int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, |
---|
1085 | | - int getfrag(void *from, char *to, int offset, |
---|
1086 | | - int len, int odd, struct sk_buff *skb), |
---|
1087 | | - void *from, int length); |
---|
1088 | 1212 | |
---|
1089 | 1213 | int skb_append_pagefrags(struct sk_buff *skb, struct page *page, |
---|
1090 | 1214 | int offset, size_t size); |
---|
.. | .. |
---|
1193 | 1317 | const struct flow_dissector_key *key, |
---|
1194 | 1318 | unsigned int key_count); |
---|
1195 | 1319 | |
---|
1196 | | -bool __skb_flow_dissect(const struct sk_buff *skb, |
---|
| 1320 | +struct bpf_flow_dissector; |
---|
| 1321 | +bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, |
---|
| 1322 | + __be16 proto, int nhoff, int hlen, unsigned int flags); |
---|
| 1323 | + |
---|
| 1324 | +bool __skb_flow_dissect(const struct net *net, |
---|
| 1325 | + const struct sk_buff *skb, |
---|
1197 | 1326 | struct flow_dissector *flow_dissector, |
---|
1198 | 1327 | void *target_container, |
---|
1199 | 1328 | void *data, __be16 proto, int nhoff, int hlen, |
---|
.. | .. |
---|
1203 | 1332 | struct flow_dissector *flow_dissector, |
---|
1204 | 1333 | void *target_container, unsigned int flags) |
---|
1205 | 1334 | { |
---|
1206 | | - return __skb_flow_dissect(skb, flow_dissector, target_container, |
---|
1207 | | - NULL, 0, 0, 0, flags); |
---|
| 1335 | + return __skb_flow_dissect(NULL, skb, flow_dissector, |
---|
| 1336 | + target_container, NULL, 0, 0, 0, flags); |
---|
1208 | 1337 | } |
---|
1209 | 1338 | |
---|
1210 | 1339 | static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb, |
---|
.. | .. |
---|
1212 | 1341 | unsigned int flags) |
---|
1213 | 1342 | { |
---|
1214 | 1343 | memset(flow, 0, sizeof(*flow)); |
---|
1215 | | - return __skb_flow_dissect(skb, &flow_keys_dissector, flow, |
---|
1216 | | - NULL, 0, 0, 0, flags); |
---|
| 1344 | + return __skb_flow_dissect(NULL, skb, &flow_keys_dissector, |
---|
| 1345 | + flow, NULL, 0, 0, 0, flags); |
---|
1217 | 1346 | } |
---|
1218 | 1347 | |
---|
1219 | 1348 | static inline bool |
---|
1220 | | -skb_flow_dissect_flow_keys_basic(const struct sk_buff *skb, |
---|
| 1349 | +skb_flow_dissect_flow_keys_basic(const struct net *net, |
---|
| 1350 | + const struct sk_buff *skb, |
---|
1221 | 1351 | struct flow_keys_basic *flow, void *data, |
---|
1222 | 1352 | __be16 proto, int nhoff, int hlen, |
---|
1223 | 1353 | unsigned int flags) |
---|
1224 | 1354 | { |
---|
1225 | 1355 | memset(flow, 0, sizeof(*flow)); |
---|
1226 | | - return __skb_flow_dissect(skb, &flow_keys_basic_dissector, flow, |
---|
| 1356 | + return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow, |
---|
1227 | 1357 | data, proto, nhoff, hlen, flags); |
---|
1228 | 1358 | } |
---|
1229 | 1359 | |
---|
| 1360 | +void skb_flow_dissect_meta(const struct sk_buff *skb, |
---|
| 1361 | + struct flow_dissector *flow_dissector, |
---|
| 1362 | + void *target_container); |
---|
| 1363 | + |
---|
| 1364 | +/* Gets a skb connection tracking info, ctinfo map should be a |
---|
| 1365 | + * map of mapsize to translate enum ip_conntrack_info states |
---|
| 1366 | + * to user states. |
---|
| 1367 | + */ |
---|
| 1368 | +void |
---|
| 1369 | +skb_flow_dissect_ct(const struct sk_buff *skb, |
---|
| 1370 | + struct flow_dissector *flow_dissector, |
---|
| 1371 | + void *target_container, |
---|
| 1372 | + u16 *ctinfo_map, |
---|
| 1373 | + size_t mapsize); |
---|
1230 | 1374 | void |
---|
1231 | 1375 | skb_flow_dissect_tunnel_info(const struct sk_buff *skb, |
---|
1232 | 1376 | struct flow_dissector *flow_dissector, |
---|
1233 | 1377 | void *target_container); |
---|
| 1378 | + |
---|
| 1379 | +void skb_flow_dissect_hash(const struct sk_buff *skb, |
---|
| 1380 | + struct flow_dissector *flow_dissector, |
---|
| 1381 | + void *target_container); |
---|
1234 | 1382 | |
---|
1235 | 1383 | static inline __u32 skb_get_hash(struct sk_buff *skb) |
---|
1236 | 1384 | { |
---|
.. | .. |
---|
1267 | 1415 | to->l4_hash = from->l4_hash; |
---|
1268 | 1416 | }; |
---|
1269 | 1417 | |
---|
| 1418 | +static inline void skb_copy_decrypted(struct sk_buff *to, |
---|
| 1419 | + const struct sk_buff *from) |
---|
| 1420 | +{ |
---|
| 1421 | +#ifdef CONFIG_TLS_DEVICE |
---|
| 1422 | + to->decrypted = from->decrypted; |
---|
| 1423 | +#endif |
---|
| 1424 | +} |
---|
| 1425 | + |
---|
1270 | 1426 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
---|
1271 | 1427 | static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) |
---|
1272 | 1428 | { |
---|
.. | .. |
---|
1277 | 1433 | { |
---|
1278 | 1434 | return skb->end; |
---|
1279 | 1435 | } |
---|
| 1436 | + |
---|
| 1437 | +static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset) |
---|
| 1438 | +{ |
---|
| 1439 | + skb->end = offset; |
---|
| 1440 | +} |
---|
1280 | 1441 | #else |
---|
1281 | 1442 | static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) |
---|
1282 | 1443 | { |
---|
.. | .. |
---|
1286 | 1447 | static inline unsigned int skb_end_offset(const struct sk_buff *skb) |
---|
1287 | 1448 | { |
---|
1288 | 1449 | return skb->end - skb->head; |
---|
| 1450 | +} |
---|
| 1451 | + |
---|
| 1452 | +static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset) |
---|
| 1453 | +{ |
---|
| 1454 | + skb->end = skb->head + offset; |
---|
1289 | 1455 | } |
---|
1290 | 1456 | #endif |
---|
1291 | 1457 | |
---|
.. | .. |
---|
1304 | 1470 | return is_zcopy ? skb_uarg(skb) : NULL; |
---|
1305 | 1471 | } |
---|
1306 | 1472 | |
---|
1307 | | -static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg) |
---|
| 1473 | +static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg, |
---|
| 1474 | + bool *have_ref) |
---|
1308 | 1475 | { |
---|
1309 | 1476 | if (skb && uarg && !skb_zcopy(skb)) { |
---|
1310 | | - sock_zerocopy_get(uarg); |
---|
| 1477 | + if (unlikely(have_ref && *have_ref)) |
---|
| 1478 | + *have_ref = false; |
---|
| 1479 | + else |
---|
| 1480 | + sock_zerocopy_get(uarg); |
---|
1311 | 1481 | skb_shinfo(skb)->destructor_arg = uarg; |
---|
1312 | 1482 | skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG; |
---|
1313 | 1483 | } |
---|
.. | .. |
---|
1354 | 1524 | struct ubuf_info *uarg = skb_zcopy(skb); |
---|
1355 | 1525 | |
---|
1356 | 1526 | if (uarg) { |
---|
1357 | | - sock_zerocopy_put_abort(uarg); |
---|
| 1527 | + sock_zerocopy_put_abort(uarg, false); |
---|
1358 | 1528 | skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG; |
---|
1359 | 1529 | } |
---|
1360 | 1530 | } |
---|
.. | .. |
---|
1502 | 1672 | return 0; |
---|
1503 | 1673 | } |
---|
1504 | 1674 | |
---|
| 1675 | +/* This variant of skb_unclone() makes sure skb->truesize |
---|
| 1676 | + * and skb_end_offset() are not changed, whenever a new skb->head is needed. |
---|
| 1677 | + * |
---|
| 1678 | + * Indeed there is no guarantee that ksize(kmalloc(X)) == ksize(kmalloc(X)) |
---|
| 1679 | + * when various debugging features are in place. |
---|
| 1680 | + */ |
---|
| 1681 | +int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri); |
---|
| 1682 | +static inline int skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) |
---|
| 1683 | +{ |
---|
| 1684 | + might_sleep_if(gfpflags_allow_blocking(pri)); |
---|
| 1685 | + |
---|
| 1686 | + if (skb_cloned(skb)) |
---|
| 1687 | + return __skb_unclone_keeptruesize(skb, pri); |
---|
| 1688 | + return 0; |
---|
| 1689 | +} |
---|
| 1690 | + |
---|
1505 | 1691 | /** |
---|
1506 | 1692 | * skb_header_cloned - is the header a clone |
---|
1507 | 1693 | * @skb: buffer to check |
---|
.. | .. |
---|
1642 | 1828 | } |
---|
1643 | 1829 | |
---|
1644 | 1830 | /** |
---|
| 1831 | + * __skb_peek - peek at the head of a non-empty &sk_buff_head |
---|
| 1832 | + * @list_: list to peek at |
---|
| 1833 | + * |
---|
| 1834 | + * Like skb_peek(), but the caller knows that the list is not empty. |
---|
| 1835 | + */ |
---|
| 1836 | +static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_) |
---|
| 1837 | +{ |
---|
| 1838 | + return list_->next; |
---|
| 1839 | +} |
---|
| 1840 | + |
---|
| 1841 | +/** |
---|
1645 | 1842 | * skb_peek_next - peek skb following the given one from a queue |
---|
1646 | 1843 | * @skb: skb to start from |
---|
1647 | 1844 | * @list_: list to peek at |
---|
.. | .. |
---|
1755 | 1952 | * The "__skb_xxxx()" functions are the non-atomic ones that |
---|
1756 | 1953 | * can only be called with interrupts disabled. |
---|
1757 | 1954 | */ |
---|
1758 | | -void skb_insert(struct sk_buff *old, struct sk_buff *newsk, |
---|
1759 | | - struct sk_buff_head *list); |
---|
1760 | 1955 | static inline void __skb_insert(struct sk_buff *newsk, |
---|
1761 | 1956 | struct sk_buff *prev, struct sk_buff *next, |
---|
1762 | 1957 | struct sk_buff_head *list) |
---|
.. | .. |
---|
1886 | 2081 | * |
---|
1887 | 2082 | * A buffer cannot be placed on two lists at the same time. |
---|
1888 | 2083 | */ |
---|
1889 | | -void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); |
---|
1890 | 2084 | static inline void __skb_queue_head(struct sk_buff_head *list, |
---|
1891 | 2085 | struct sk_buff *newsk) |
---|
1892 | 2086 | { |
---|
1893 | 2087 | __skb_queue_after(list, (struct sk_buff *)list, newsk); |
---|
1894 | 2088 | } |
---|
| 2089 | +void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); |
---|
1895 | 2090 | |
---|
1896 | 2091 | /** |
---|
1897 | 2092 | * __skb_queue_tail - queue a buffer at the list tail |
---|
.. | .. |
---|
1903 | 2098 | * |
---|
1904 | 2099 | * A buffer cannot be placed on two lists at the same time. |
---|
1905 | 2100 | */ |
---|
1906 | | -void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); |
---|
1907 | 2101 | static inline void __skb_queue_tail(struct sk_buff_head *list, |
---|
1908 | 2102 | struct sk_buff *newsk) |
---|
1909 | 2103 | { |
---|
1910 | 2104 | __skb_queue_before(list, (struct sk_buff *)list, newsk); |
---|
1911 | 2105 | } |
---|
| 2106 | +void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); |
---|
1912 | 2107 | |
---|
1913 | 2108 | /* |
---|
1914 | 2109 | * remove sk_buff from list. _Must_ be called atomically, and with |
---|
.. | .. |
---|
1935 | 2130 | * so must be used with appropriate locks held only. The head item is |
---|
1936 | 2131 | * returned or %NULL if the list is empty. |
---|
1937 | 2132 | */ |
---|
1938 | | -struct sk_buff *skb_dequeue(struct sk_buff_head *list); |
---|
1939 | 2133 | static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) |
---|
1940 | 2134 | { |
---|
1941 | 2135 | struct sk_buff *skb = skb_peek(list); |
---|
.. | .. |
---|
1943 | 2137 | __skb_unlink(skb, list); |
---|
1944 | 2138 | return skb; |
---|
1945 | 2139 | } |
---|
| 2140 | +struct sk_buff *skb_dequeue(struct sk_buff_head *list); |
---|
1946 | 2141 | |
---|
1947 | 2142 | /** |
---|
1948 | 2143 | * __skb_dequeue_tail - remove from the tail of the queue |
---|
.. | .. |
---|
1952 | 2147 | * so must be used with appropriate locks held only. The tail item is |
---|
1953 | 2148 | * returned or %NULL if the list is empty. |
---|
1954 | 2149 | */ |
---|
1955 | | -struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); |
---|
1956 | 2150 | static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) |
---|
1957 | 2151 | { |
---|
1958 | 2152 | struct sk_buff *skb = skb_peek_tail(list); |
---|
.. | .. |
---|
1960 | 2154 | __skb_unlink(skb, list); |
---|
1961 | 2155 | return skb; |
---|
1962 | 2156 | } |
---|
| 2157 | +struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); |
---|
1963 | 2158 | |
---|
1964 | 2159 | |
---|
1965 | 2160 | static inline bool skb_is_nonlinear(const struct sk_buff *skb) |
---|
.. | .. |
---|
2009 | 2204 | * that not all callers have unique ownership of the page but rely |
---|
2010 | 2205 | * on page_is_pfmemalloc doing the right thing(tm). |
---|
2011 | 2206 | */ |
---|
2012 | | - frag->page.p = page; |
---|
2013 | | - frag->page_offset = off; |
---|
| 2207 | + frag->bv_page = page; |
---|
| 2208 | + frag->bv_offset = off; |
---|
2014 | 2209 | skb_frag_size_set(frag, size); |
---|
2015 | 2210 | |
---|
2016 | 2211 | page = compound_head(page); |
---|
.. | .. |
---|
2045 | 2240 | void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, |
---|
2046 | 2241 | unsigned int truesize); |
---|
2047 | 2242 | |
---|
2048 | | -#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) |
---|
2049 | | -#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb)) |
---|
2050 | 2243 | #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) |
---|
2051 | 2244 | |
---|
2052 | 2245 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
---|
.. | .. |
---|
2083 | 2276 | } |
---|
2084 | 2277 | |
---|
2085 | 2278 | #endif /* NET_SKBUFF_DATA_USES_OFFSET */ |
---|
| 2279 | + |
---|
| 2280 | +static inline void skb_assert_len(struct sk_buff *skb) |
---|
| 2281 | +{ |
---|
| 2282 | +#ifdef CONFIG_DEBUG_NET |
---|
| 2283 | + if (WARN_ONCE(!skb->len, "%s\n", __func__)) |
---|
| 2284 | + DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false); |
---|
| 2285 | +#endif /* CONFIG_DEBUG_NET */ |
---|
| 2286 | +} |
---|
2086 | 2287 | |
---|
2087 | 2288 | /* |
---|
2088 | 2289 | * Add data to an sk_buff |
---|
.. | .. |
---|
2181 | 2382 | return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); |
---|
2182 | 2383 | } |
---|
2183 | 2384 | |
---|
2184 | | -static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) |
---|
| 2385 | +static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len) |
---|
2185 | 2386 | { |
---|
2186 | 2387 | if (likely(len <= skb_headlen(skb))) |
---|
2187 | | - return 1; |
---|
| 2388 | + return true; |
---|
2188 | 2389 | if (unlikely(len > skb->len)) |
---|
2189 | | - return 0; |
---|
| 2390 | + return false; |
---|
2190 | 2391 | return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; |
---|
2191 | 2392 | } |
---|
2192 | 2393 | |
---|
.. | .. |
---|
2410 | 2611 | return skb->mac_header != (typeof(skb->mac_header))~0U; |
---|
2411 | 2612 | } |
---|
2412 | 2613 | |
---|
| 2614 | +static inline void skb_unset_mac_header(struct sk_buff *skb) |
---|
| 2615 | +{ |
---|
| 2616 | + skb->mac_header = (typeof(skb->mac_header))~0U; |
---|
| 2617 | +} |
---|
| 2618 | + |
---|
2413 | 2619 | static inline void skb_reset_mac_header(struct sk_buff *skb) |
---|
2414 | 2620 | { |
---|
2415 | 2621 | skb->mac_header = skb->data - skb->head; |
---|
.. | .. |
---|
2426 | 2632 | skb->mac_header = skb->network_header; |
---|
2427 | 2633 | } |
---|
2428 | 2634 | |
---|
2429 | | -static inline void skb_probe_transport_header(struct sk_buff *skb, |
---|
2430 | | - const int offset_hint) |
---|
| 2635 | +static inline void skb_probe_transport_header(struct sk_buff *skb) |
---|
2431 | 2636 | { |
---|
2432 | 2637 | struct flow_keys_basic keys; |
---|
2433 | 2638 | |
---|
2434 | 2639 | if (skb_transport_header_was_set(skb)) |
---|
2435 | 2640 | return; |
---|
2436 | 2641 | |
---|
2437 | | - if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0)) |
---|
| 2642 | + if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys, |
---|
| 2643 | + NULL, 0, 0, 0, 0)) |
---|
2438 | 2644 | skb_set_transport_header(skb, keys.control.thoff); |
---|
2439 | | - else if (offset_hint >= 0) |
---|
2440 | | - skb_set_transport_header(skb, offset_hint); |
---|
2441 | 2645 | } |
---|
2442 | 2646 | |
---|
2443 | 2647 | static inline void skb_mac_header_rebuild(struct sk_buff *skb) |
---|
.. | .. |
---|
2531 | 2735 | * |
---|
2532 | 2736 | * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS) |
---|
2533 | 2737 | * to reduce average number of cache lines per packet. |
---|
2534 | | - * get_rps_cpus() for example only access one 64 bytes aligned block : |
---|
| 2738 | + * get_rps_cpu() for example only access one 64 bytes aligned block : |
---|
2535 | 2739 | * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) |
---|
2536 | 2740 | */ |
---|
2537 | 2741 | #ifndef NET_SKB_PAD |
---|
.. | .. |
---|
2542 | 2746 | |
---|
2543 | 2747 | static inline void __skb_set_length(struct sk_buff *skb, unsigned int len) |
---|
2544 | 2748 | { |
---|
2545 | | - if (unlikely(skb_is_nonlinear(skb))) { |
---|
2546 | | - WARN_ON(1); |
---|
| 2749 | + if (WARN_ON(skb_is_nonlinear(skb))) |
---|
2547 | 2750 | return; |
---|
2548 | | - } |
---|
2549 | 2751 | skb->len = len; |
---|
2550 | 2752 | skb_set_tail_pointer(skb, len); |
---|
2551 | 2753 | } |
---|
.. | .. |
---|
2653 | 2855 | * the list and one reference dropped. This function does not take the |
---|
2654 | 2856 | * list lock and the caller must hold the relevant locks to use it. |
---|
2655 | 2857 | */ |
---|
2656 | | -void skb_queue_purge(struct sk_buff_head *list); |
---|
2657 | 2858 | static inline void __skb_queue_purge(struct sk_buff_head *list) |
---|
2658 | 2859 | { |
---|
2659 | 2860 | struct sk_buff *skb; |
---|
2660 | 2861 | while ((skb = __skb_dequeue(list)) != NULL) |
---|
2661 | 2862 | kfree_skb(skb); |
---|
2662 | 2863 | } |
---|
| 2864 | +void skb_queue_purge(struct sk_buff_head *list); |
---|
2663 | 2865 | |
---|
2664 | 2866 | unsigned int skb_rbtree_purge(struct rb_root *root); |
---|
2665 | 2867 | |
---|
.. | .. |
---|
2801 | 3003 | */ |
---|
2802 | 3004 | static inline unsigned int skb_frag_off(const skb_frag_t *frag) |
---|
2803 | 3005 | { |
---|
2804 | | - return frag->page_offset; |
---|
| 3006 | + return frag->bv_offset; |
---|
| 3007 | +} |
---|
| 3008 | + |
---|
| 3009 | +/** |
---|
| 3010 | + * skb_frag_off_add() - Increments the offset of a skb fragment by @delta |
---|
| 3011 | + * @frag: skb fragment |
---|
| 3012 | + * @delta: value to add |
---|
| 3013 | + */ |
---|
| 3014 | +static inline void skb_frag_off_add(skb_frag_t *frag, int delta) |
---|
| 3015 | +{ |
---|
| 3016 | + frag->bv_offset += delta; |
---|
| 3017 | +} |
---|
| 3018 | + |
---|
| 3019 | +/** |
---|
| 3020 | + * skb_frag_off_set() - Sets the offset of a skb fragment |
---|
| 3021 | + * @frag: skb fragment |
---|
| 3022 | + * @offset: offset of fragment |
---|
| 3023 | + */ |
---|
| 3024 | +static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset) |
---|
| 3025 | +{ |
---|
| 3026 | + frag->bv_offset = offset; |
---|
| 3027 | +} |
---|
| 3028 | + |
---|
| 3029 | +/** |
---|
| 3030 | + * skb_frag_off_copy() - Sets the offset of a skb fragment from another fragment |
---|
| 3031 | + * @fragto: skb fragment where offset is set |
---|
| 3032 | + * @fragfrom: skb fragment offset is copied from |
---|
| 3033 | + */ |
---|
| 3034 | +static inline void skb_frag_off_copy(skb_frag_t *fragto, |
---|
| 3035 | + const skb_frag_t *fragfrom) |
---|
| 3036 | +{ |
---|
| 3037 | + fragto->bv_offset = fragfrom->bv_offset; |
---|
2805 | 3038 | } |
---|
2806 | 3039 | |
---|
2807 | 3040 | /** |
---|
.. | .. |
---|
2812 | 3045 | */ |
---|
2813 | 3046 | static inline struct page *skb_frag_page(const skb_frag_t *frag) |
---|
2814 | 3047 | { |
---|
2815 | | - return frag->page.p; |
---|
| 3048 | + return frag->bv_page; |
---|
2816 | 3049 | } |
---|
2817 | 3050 | |
---|
2818 | 3051 | /** |
---|
.. | .. |
---|
2870 | 3103 | */ |
---|
2871 | 3104 | static inline void *skb_frag_address(const skb_frag_t *frag) |
---|
2872 | 3105 | { |
---|
2873 | | - return page_address(skb_frag_page(frag)) + frag->page_offset; |
---|
| 3106 | + return page_address(skb_frag_page(frag)) + skb_frag_off(frag); |
---|
2874 | 3107 | } |
---|
2875 | 3108 | |
---|
2876 | 3109 | /** |
---|
.. | .. |
---|
2886 | 3119 | if (unlikely(!ptr)) |
---|
2887 | 3120 | return NULL; |
---|
2888 | 3121 | |
---|
2889 | | - return ptr + frag->page_offset; |
---|
| 3122 | + return ptr + skb_frag_off(frag); |
---|
| 3123 | +} |
---|
| 3124 | + |
---|
| 3125 | +/** |
---|
| 3126 | + * skb_frag_page_copy() - sets the page in a fragment from another fragment |
---|
| 3127 | + * @fragto: skb fragment where page is set |
---|
| 3128 | + * @fragfrom: skb fragment page is copied from |
---|
| 3129 | + */ |
---|
| 3130 | +static inline void skb_frag_page_copy(skb_frag_t *fragto, |
---|
| 3131 | + const skb_frag_t *fragfrom) |
---|
| 3132 | +{ |
---|
| 3133 | + fragto->bv_page = fragfrom->bv_page; |
---|
2890 | 3134 | } |
---|
2891 | 3135 | |
---|
2892 | 3136 | /** |
---|
.. | .. |
---|
2898 | 3142 | */ |
---|
2899 | 3143 | static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page) |
---|
2900 | 3144 | { |
---|
2901 | | - frag->page.p = page; |
---|
| 3145 | + frag->bv_page = page; |
---|
2902 | 3146 | } |
---|
2903 | 3147 | |
---|
2904 | 3148 | /** |
---|
.. | .. |
---|
2934 | 3178 | enum dma_data_direction dir) |
---|
2935 | 3179 | { |
---|
2936 | 3180 | return dma_map_page(dev, skb_frag_page(frag), |
---|
2937 | | - frag->page_offset + offset, size, dir); |
---|
| 3181 | + skb_frag_off(frag) + offset, size, dir); |
---|
2938 | 3182 | } |
---|
2939 | 3183 | |
---|
2940 | 3184 | static inline struct sk_buff *pskb_copy(struct sk_buff *skb, |
---|
.. | .. |
---|
3037 | 3281 | } |
---|
3038 | 3282 | |
---|
3039 | 3283 | /** |
---|
3040 | | - * skb_put_padto - increase size and pad an skbuff up to a minimal size |
---|
| 3284 | + * __skb_put_padto - increase size and pad an skbuff up to a minimal size |
---|
3041 | 3285 | * @skb: buffer to pad |
---|
3042 | 3286 | * @len: minimal length |
---|
3043 | 3287 | * @free_on_error: free buffer on error |
---|
.. | .. |
---|
3102 | 3346 | if (skb_zcopy(skb)) |
---|
3103 | 3347 | return false; |
---|
3104 | 3348 | if (i) { |
---|
3105 | | - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; |
---|
| 3349 | + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; |
---|
3106 | 3350 | |
---|
3107 | 3351 | return page == skb_frag_page(frag) && |
---|
3108 | | - off == frag->page_offset + skb_frag_size(frag); |
---|
| 3352 | + off == skb_frag_off(frag) + skb_frag_size(frag); |
---|
3109 | 3353 | } |
---|
3110 | 3354 | return false; |
---|
3111 | 3355 | } |
---|
.. | .. |
---|
3324 | 3568 | for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) |
---|
3325 | 3569 | |
---|
3326 | 3570 | |
---|
3327 | | -int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p, |
---|
| 3571 | +int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue, |
---|
| 3572 | + int *err, long *timeo_p, |
---|
3328 | 3573 | const struct sk_buff *skb); |
---|
3329 | 3574 | struct sk_buff *__skb_try_recv_from_queue(struct sock *sk, |
---|
3330 | 3575 | struct sk_buff_head *queue, |
---|
3331 | 3576 | unsigned int flags, |
---|
3332 | | - void (*destructor)(struct sock *sk, |
---|
3333 | | - struct sk_buff *skb), |
---|
3334 | | - int *peeked, int *off, int *err, |
---|
| 3577 | + int *off, int *err, |
---|
3335 | 3578 | struct sk_buff **last); |
---|
3336 | | -struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags, |
---|
3337 | | - void (*destructor)(struct sock *sk, |
---|
3338 | | - struct sk_buff *skb), |
---|
3339 | | - int *peeked, int *off, int *err, |
---|
| 3579 | +struct sk_buff *__skb_try_recv_datagram(struct sock *sk, |
---|
| 3580 | + struct sk_buff_head *queue, |
---|
| 3581 | + unsigned int flags, int *off, int *err, |
---|
3340 | 3582 | struct sk_buff **last); |
---|
3341 | | -struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, |
---|
3342 | | - void (*destructor)(struct sock *sk, |
---|
3343 | | - struct sk_buff *skb), |
---|
3344 | | - int *peeked, int *off, int *err); |
---|
| 3583 | +struct sk_buff *__skb_recv_datagram(struct sock *sk, |
---|
| 3584 | + struct sk_buff_head *sk_queue, |
---|
| 3585 | + unsigned int flags, int *off, int *err); |
---|
3345 | 3586 | struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, |
---|
3346 | 3587 | int *err); |
---|
3347 | 3588 | __poll_t datagram_poll(struct file *file, struct socket *sock, |
---|
.. | .. |
---|
3355 | 3596 | } |
---|
3356 | 3597 | int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen, |
---|
3357 | 3598 | struct msghdr *msg); |
---|
| 3599 | +int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset, |
---|
| 3600 | + struct iov_iter *to, int len, |
---|
| 3601 | + struct ahash_request *hash); |
---|
3358 | 3602 | int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, |
---|
3359 | 3603 | struct iov_iter *from, int len); |
---|
3360 | 3604 | int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm); |
---|
.. | .. |
---|
3369 | 3613 | int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); |
---|
3370 | 3614 | int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); |
---|
3371 | 3615 | __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, |
---|
3372 | | - int len, __wsum csum); |
---|
| 3616 | + int len); |
---|
3373 | 3617 | int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, |
---|
3374 | 3618 | struct pipe_inode_info *pipe, unsigned int len, |
---|
3375 | 3619 | unsigned int flags); |
---|
3376 | 3620 | int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, |
---|
3377 | 3621 | int len); |
---|
3378 | | -int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len); |
---|
3379 | 3622 | void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); |
---|
3380 | 3623 | unsigned int skb_zerocopy_headlen(const struct sk_buff *from); |
---|
3381 | 3624 | int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, |
---|
.. | .. |
---|
3386 | 3629 | bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu); |
---|
3387 | 3630 | bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len); |
---|
3388 | 3631 | struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); |
---|
| 3632 | +struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features, |
---|
| 3633 | + unsigned int offset); |
---|
3389 | 3634 | struct sk_buff *skb_vlan_untag(struct sk_buff *skb); |
---|
3390 | 3635 | int skb_ensure_writable(struct sk_buff *skb, int write_len); |
---|
3391 | 3636 | int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci); |
---|
3392 | 3637 | int skb_vlan_pop(struct sk_buff *skb); |
---|
3393 | 3638 | int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); |
---|
| 3639 | +int skb_eth_pop(struct sk_buff *skb); |
---|
| 3640 | +int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, |
---|
| 3641 | + const unsigned char *src); |
---|
| 3642 | +int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, |
---|
| 3643 | + int mac_len, bool ethernet); |
---|
| 3644 | +int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, |
---|
| 3645 | + bool ethernet); |
---|
| 3646 | +int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse); |
---|
| 3647 | +int skb_mpls_dec_ttl(struct sk_buff *skb); |
---|
3394 | 3648 | struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy, |
---|
3395 | 3649 | gfp_t gfp); |
---|
3396 | 3650 | |
---|
.. | .. |
---|
3494 | 3748 | /** |
---|
3495 | 3749 | * skb_get_timestamp - get timestamp from a skb |
---|
3496 | 3750 | * @skb: skb to get stamp from |
---|
3497 | | - * @stamp: pointer to struct timeval to store stamp in |
---|
| 3751 | + * @stamp: pointer to struct __kernel_old_timeval to store stamp in |
---|
3498 | 3752 | * |
---|
3499 | 3753 | * Timestamps are stored in the skb as offsets to a base timestamp. |
---|
3500 | 3754 | * This function converts the offset back to a struct timeval and stores |
---|
3501 | 3755 | * it in stamp. |
---|
3502 | 3756 | */ |
---|
3503 | 3757 | static inline void skb_get_timestamp(const struct sk_buff *skb, |
---|
3504 | | - struct timeval *stamp) |
---|
| 3758 | + struct __kernel_old_timeval *stamp) |
---|
3505 | 3759 | { |
---|
3506 | | - *stamp = ktime_to_timeval(skb->tstamp); |
---|
| 3760 | + *stamp = ns_to_kernel_old_timeval(skb->tstamp); |
---|
| 3761 | +} |
---|
| 3762 | + |
---|
| 3763 | +static inline void skb_get_new_timestamp(const struct sk_buff *skb, |
---|
| 3764 | + struct __kernel_sock_timeval *stamp) |
---|
| 3765 | +{ |
---|
| 3766 | + struct timespec64 ts = ktime_to_timespec64(skb->tstamp); |
---|
| 3767 | + |
---|
| 3768 | + stamp->tv_sec = ts.tv_sec; |
---|
| 3769 | + stamp->tv_usec = ts.tv_nsec / 1000; |
---|
3507 | 3770 | } |
---|
3508 | 3771 | |
---|
3509 | 3772 | static inline void skb_get_timestampns(const struct sk_buff *skb, |
---|
3510 | | - struct timespec *stamp) |
---|
| 3773 | + struct __kernel_old_timespec *stamp) |
---|
3511 | 3774 | { |
---|
3512 | | - *stamp = ktime_to_timespec(skb->tstamp); |
---|
| 3775 | + struct timespec64 ts = ktime_to_timespec64(skb->tstamp); |
---|
| 3776 | + |
---|
| 3777 | + stamp->tv_sec = ts.tv_sec; |
---|
| 3778 | + stamp->tv_nsec = ts.tv_nsec; |
---|
| 3779 | +} |
---|
| 3780 | + |
---|
| 3781 | +static inline void skb_get_new_timestampns(const struct sk_buff *skb, |
---|
| 3782 | + struct __kernel_timespec *stamp) |
---|
| 3783 | +{ |
---|
| 3784 | + struct timespec64 ts = ktime_to_timespec64(skb->tstamp); |
---|
| 3785 | + |
---|
| 3786 | + stamp->tv_sec = ts.tv_sec; |
---|
| 3787 | + stamp->tv_nsec = ts.tv_nsec; |
---|
3513 | 3788 | } |
---|
3514 | 3789 | |
---|
3515 | 3790 | static inline void __net_timestamp(struct sk_buff *skb) |
---|
.. | .. |
---|
3551 | 3826 | #define __it(x, op) (x -= sizeof(u##op)) |
---|
3552 | 3827 | #define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op)) |
---|
3553 | 3828 | case 32: diffs |= __it_diff(a, b, 64); |
---|
| 3829 | + fallthrough; |
---|
3554 | 3830 | case 24: diffs |= __it_diff(a, b, 64); |
---|
| 3831 | + fallthrough; |
---|
3555 | 3832 | case 16: diffs |= __it_diff(a, b, 64); |
---|
| 3833 | + fallthrough; |
---|
3556 | 3834 | case 8: diffs |= __it_diff(a, b, 64); |
---|
3557 | 3835 | break; |
---|
3558 | 3836 | case 28: diffs |= __it_diff(a, b, 64); |
---|
| 3837 | + fallthrough; |
---|
3559 | 3838 | case 20: diffs |= __it_diff(a, b, 64); |
---|
| 3839 | + fallthrough; |
---|
3560 | 3840 | case 12: diffs |= __it_diff(a, b, 64); |
---|
| 3841 | + fallthrough; |
---|
3561 | 3842 | case 4: diffs |= __it_diff(a, b, 32); |
---|
3562 | 3843 | break; |
---|
3563 | 3844 | } |
---|
.. | .. |
---|
3618 | 3899 | * must call this function to return the skb back to the stack with a |
---|
3619 | 3900 | * timestamp. |
---|
3620 | 3901 | * |
---|
3621 | | - * @skb: clone of the the original outgoing packet |
---|
| 3902 | + * @skb: clone of the original outgoing packet |
---|
3622 | 3903 | * @hwtstamps: hardware time stamps |
---|
3623 | 3904 | * |
---|
3624 | 3905 | */ |
---|
.. | .. |
---|
3721 | 4002 | skb->csum_level++; |
---|
3722 | 4003 | } else if (skb->ip_summed == CHECKSUM_NONE) { |
---|
3723 | 4004 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
---|
| 4005 | + skb->csum_level = 0; |
---|
| 4006 | + } |
---|
| 4007 | +} |
---|
| 4008 | + |
---|
| 4009 | +static inline void __skb_reset_checksum_unnecessary(struct sk_buff *skb) |
---|
| 4010 | +{ |
---|
| 4011 | + if (skb->ip_summed == CHECKSUM_UNNECESSARY) { |
---|
| 4012 | + skb->ip_summed = CHECKSUM_NONE; |
---|
3724 | 4013 | skb->csum_level = 0; |
---|
3725 | 4014 | } |
---|
3726 | 4015 | } |
---|
.. | .. |
---|
3840 | 4129 | return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid); |
---|
3841 | 4130 | } |
---|
3842 | 4131 | |
---|
3843 | | -static inline void __skb_checksum_convert(struct sk_buff *skb, |
---|
3844 | | - __sum16 check, __wsum pseudo) |
---|
| 4132 | +static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo) |
---|
3845 | 4133 | { |
---|
3846 | 4134 | skb->csum = ~pseudo; |
---|
3847 | 4135 | skb->ip_summed = CHECKSUM_COMPLETE; |
---|
3848 | 4136 | } |
---|
3849 | 4137 | |
---|
3850 | | -#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \ |
---|
| 4138 | +#define skb_checksum_try_convert(skb, proto, compute_pseudo) \ |
---|
3851 | 4139 | do { \ |
---|
3852 | 4140 | if (__skb_checksum_convert_check(skb)) \ |
---|
3853 | | - __skb_checksum_convert(skb, check, \ |
---|
3854 | | - compute_pseudo(skb, proto)); \ |
---|
| 4141 | + __skb_checksum_convert(skb, compute_pseudo(skb, proto)); \ |
---|
3855 | 4142 | } while (0) |
---|
3856 | 4143 | |
---|
3857 | 4144 | static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr, |
---|
.. | .. |
---|
3891 | 4178 | static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb) |
---|
3892 | 4179 | { |
---|
3893 | 4180 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) |
---|
3894 | | - return (void *)(skb->_nfct & SKB_NFCT_PTRMASK); |
---|
| 4181 | + return (void *)(skb->_nfct & NFCT_PTRMASK); |
---|
3895 | 4182 | #else |
---|
3896 | 4183 | return NULL; |
---|
3897 | 4184 | #endif |
---|
3898 | 4185 | } |
---|
3899 | 4186 | |
---|
3900 | | -#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
---|
3901 | | -void nf_conntrack_destroy(struct nf_conntrack *nfct); |
---|
3902 | | -static inline void nf_conntrack_put(struct nf_conntrack *nfct) |
---|
| 4187 | +static inline unsigned long skb_get_nfct(const struct sk_buff *skb) |
---|
3903 | 4188 | { |
---|
3904 | | - if (nfct && atomic_dec_and_test(&nfct->use)) |
---|
3905 | | - nf_conntrack_destroy(nfct); |
---|
3906 | | -} |
---|
3907 | | -static inline void nf_conntrack_get(struct nf_conntrack *nfct) |
---|
3908 | | -{ |
---|
3909 | | - if (nfct) |
---|
3910 | | - atomic_inc(&nfct->use); |
---|
3911 | | -} |
---|
| 4189 | +#if IS_ENABLED(CONFIG_NF_CONNTRACK) |
---|
| 4190 | + return skb->_nfct; |
---|
| 4191 | +#else |
---|
| 4192 | + return 0UL; |
---|
3912 | 4193 | #endif |
---|
| 4194 | +} |
---|
| 4195 | + |
---|
| 4196 | +static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct) |
---|
| 4197 | +{ |
---|
| 4198 | +#if IS_ENABLED(CONFIG_NF_CONNTRACK) |
---|
| 4199 | + skb->_nfct = nfct; |
---|
| 4200 | +#endif |
---|
| 4201 | +} |
---|
| 4202 | + |
---|
| 4203 | +#ifdef CONFIG_SKB_EXTENSIONS |
---|
| 4204 | +enum skb_ext_id { |
---|
3913 | 4205 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
---|
3914 | | -static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) |
---|
| 4206 | + SKB_EXT_BRIDGE_NF, |
---|
| 4207 | +#endif |
---|
| 4208 | +#ifdef CONFIG_XFRM |
---|
| 4209 | + SKB_EXT_SEC_PATH, |
---|
| 4210 | +#endif |
---|
| 4211 | +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) |
---|
| 4212 | + TC_SKB_EXT, |
---|
| 4213 | +#endif |
---|
| 4214 | +#if IS_ENABLED(CONFIG_MPTCP) |
---|
| 4215 | + SKB_EXT_MPTCP, |
---|
| 4216 | +#endif |
---|
| 4217 | +#if IS_ENABLED(CONFIG_KCOV) |
---|
| 4218 | + SKB_EXT_KCOV_HANDLE, |
---|
| 4219 | +#endif |
---|
| 4220 | + SKB_EXT_NUM, /* must be last */ |
---|
| 4221 | +}; |
---|
| 4222 | + |
---|
| 4223 | +/** |
---|
| 4224 | + * struct skb_ext - sk_buff extensions |
---|
| 4225 | + * @refcnt: 1 on allocation, deallocated on 0 |
---|
| 4226 | + * @offset: offset to add to @data to obtain extension address |
---|
| 4227 | + * @chunks: size currently allocated, stored in SKB_EXT_ALIGN_SHIFT units |
---|
| 4228 | + * @data: start of extension data, variable sized |
---|
| 4229 | + * |
---|
| 4230 | + * Note: offsets/lengths are stored in chunks of 8 bytes, this allows |
---|
| 4231 | + * to use 'u8' types while allowing up to 2kb worth of extension data. |
---|
| 4232 | + */ |
---|
| 4233 | +struct skb_ext { |
---|
| 4234 | + refcount_t refcnt; |
---|
| 4235 | + u8 offset[SKB_EXT_NUM]; /* in chunks of 8 bytes */ |
---|
| 4236 | + u8 chunks; /* same */ |
---|
| 4237 | + char data[] __aligned(8); |
---|
| 4238 | +}; |
---|
| 4239 | + |
---|
| 4240 | +struct skb_ext *__skb_ext_alloc(gfp_t flags); |
---|
| 4241 | +void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, |
---|
| 4242 | + struct skb_ext *ext); |
---|
| 4243 | +void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id); |
---|
| 4244 | +void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id); |
---|
| 4245 | +void __skb_ext_put(struct skb_ext *ext); |
---|
| 4246 | + |
---|
| 4247 | +static inline void skb_ext_put(struct sk_buff *skb) |
---|
3915 | 4248 | { |
---|
3916 | | - if (nf_bridge && refcount_dec_and_test(&nf_bridge->use)) |
---|
3917 | | - kfree(nf_bridge); |
---|
| 4249 | + if (skb->active_extensions) |
---|
| 4250 | + __skb_ext_put(skb->extensions); |
---|
3918 | 4251 | } |
---|
3919 | | -static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) |
---|
| 4252 | + |
---|
| 4253 | +static inline void __skb_ext_copy(struct sk_buff *dst, |
---|
| 4254 | + const struct sk_buff *src) |
---|
3920 | 4255 | { |
---|
3921 | | - if (nf_bridge) |
---|
3922 | | - refcount_inc(&nf_bridge->use); |
---|
| 4256 | + dst->active_extensions = src->active_extensions; |
---|
| 4257 | + |
---|
| 4258 | + if (src->active_extensions) { |
---|
| 4259 | + struct skb_ext *ext = src->extensions; |
---|
| 4260 | + |
---|
| 4261 | + refcount_inc(&ext->refcnt); |
---|
| 4262 | + dst->extensions = ext; |
---|
| 4263 | + } |
---|
3923 | 4264 | } |
---|
3924 | | -#endif /* CONFIG_BRIDGE_NETFILTER */ |
---|
3925 | | -static inline void nf_reset(struct sk_buff *skb) |
---|
| 4265 | + |
---|
| 4266 | +static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src) |
---|
| 4267 | +{ |
---|
| 4268 | + skb_ext_put(dst); |
---|
| 4269 | + __skb_ext_copy(dst, src); |
---|
| 4270 | +} |
---|
| 4271 | + |
---|
| 4272 | +static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i) |
---|
| 4273 | +{ |
---|
| 4274 | + return !!ext->offset[i]; |
---|
| 4275 | +} |
---|
| 4276 | + |
---|
| 4277 | +static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id) |
---|
| 4278 | +{ |
---|
| 4279 | + return skb->active_extensions & (1 << id); |
---|
| 4280 | +} |
---|
| 4281 | + |
---|
| 4282 | +static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) |
---|
| 4283 | +{ |
---|
| 4284 | + if (skb_ext_exist(skb, id)) |
---|
| 4285 | + __skb_ext_del(skb, id); |
---|
| 4286 | +} |
---|
| 4287 | + |
---|
| 4288 | +static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id) |
---|
| 4289 | +{ |
---|
| 4290 | + if (skb_ext_exist(skb, id)) { |
---|
| 4291 | + struct skb_ext *ext = skb->extensions; |
---|
| 4292 | + |
---|
| 4293 | + return (void *)ext + (ext->offset[id] << 3); |
---|
| 4294 | + } |
---|
| 4295 | + |
---|
| 4296 | + return NULL; |
---|
| 4297 | +} |
---|
| 4298 | + |
---|
| 4299 | +static inline void skb_ext_reset(struct sk_buff *skb) |
---|
| 4300 | +{ |
---|
| 4301 | + if (unlikely(skb->active_extensions)) { |
---|
| 4302 | + __skb_ext_put(skb->extensions); |
---|
| 4303 | + skb->active_extensions = 0; |
---|
| 4304 | + } |
---|
| 4305 | +} |
---|
| 4306 | + |
---|
| 4307 | +static inline bool skb_has_extensions(struct sk_buff *skb) |
---|
| 4308 | +{ |
---|
| 4309 | + return unlikely(skb->active_extensions); |
---|
| 4310 | +} |
---|
| 4311 | +#else |
---|
| 4312 | +static inline void skb_ext_put(struct sk_buff *skb) {} |
---|
| 4313 | +static inline void skb_ext_reset(struct sk_buff *skb) {} |
---|
| 4314 | +static inline void skb_ext_del(struct sk_buff *skb, int unused) {} |
---|
| 4315 | +static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {} |
---|
| 4316 | +static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {} |
---|
| 4317 | +static inline bool skb_has_extensions(struct sk_buff *skb) { return false; } |
---|
| 4318 | +#endif /* CONFIG_SKB_EXTENSIONS */ |
---|
| 4319 | + |
---|
| 4320 | +static inline void nf_reset_ct(struct sk_buff *skb) |
---|
3926 | 4321 | { |
---|
3927 | 4322 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
---|
3928 | 4323 | nf_conntrack_put(skb_nfct(skb)); |
---|
3929 | 4324 | skb->_nfct = 0; |
---|
3930 | 4325 | #endif |
---|
3931 | | -#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
---|
3932 | | - nf_bridge_put(skb->nf_bridge); |
---|
3933 | | -#endif |
---|
3934 | | - skb->nf_bridge = NULL; |
---|
3935 | 4326 | } |
---|
3936 | 4327 | |
---|
3937 | 4328 | static inline void nf_reset_trace(struct sk_buff *skb) |
---|
.. | .. |
---|
3948 | 4339 | #endif |
---|
3949 | 4340 | } |
---|
3950 | 4341 | |
---|
3951 | | -/* Note: This doesn't put any conntrack and bridge info in dst. */ |
---|
| 4342 | +/* Note: This doesn't put any conntrack info in dst. */ |
---|
3952 | 4343 | static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, |
---|
3953 | 4344 | bool copy) |
---|
3954 | 4345 | { |
---|
3955 | 4346 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
---|
3956 | 4347 | dst->_nfct = src->_nfct; |
---|
3957 | 4348 | nf_conntrack_get(skb_nfct(src)); |
---|
3958 | | -#endif |
---|
3959 | | -#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
---|
3960 | | - dst->nf_bridge = src->nf_bridge; |
---|
3961 | | - nf_bridge_get(src->nf_bridge); |
---|
3962 | 4349 | #endif |
---|
3963 | 4350 | #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) |
---|
3964 | 4351 | if (copy) |
---|
.. | .. |
---|
3970 | 4357 | { |
---|
3971 | 4358 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
---|
3972 | 4359 | nf_conntrack_put(skb_nfct(dst)); |
---|
3973 | | -#endif |
---|
3974 | | -#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
---|
3975 | | - nf_bridge_put(dst->nf_bridge); |
---|
3976 | 4360 | #endif |
---|
3977 | 4361 | __nf_copy(dst, src, true); |
---|
3978 | 4362 | } |
---|
.. | .. |
---|
3995 | 4379 | { } |
---|
3996 | 4380 | #endif |
---|
3997 | 4381 | |
---|
| 4382 | +static inline int secpath_exists(const struct sk_buff *skb) |
---|
| 4383 | +{ |
---|
| 4384 | +#ifdef CONFIG_XFRM |
---|
| 4385 | + return skb_ext_exist(skb, SKB_EXT_SEC_PATH); |
---|
| 4386 | +#else |
---|
| 4387 | + return 0; |
---|
| 4388 | +#endif |
---|
| 4389 | +} |
---|
| 4390 | + |
---|
3998 | 4391 | static inline bool skb_irq_freeable(const struct sk_buff *skb) |
---|
3999 | 4392 | { |
---|
4000 | 4393 | return !skb->destructor && |
---|
4001 | | -#if IS_ENABLED(CONFIG_XFRM) |
---|
4002 | | - !skb->sp && |
---|
4003 | | -#endif |
---|
| 4394 | + !secpath_exists(skb) && |
---|
4004 | 4395 | !skb_nfct(skb) && |
---|
4005 | 4396 | !skb->_skb_refdst && |
---|
4006 | 4397 | !skb_has_frag_list(skb); |
---|
.. | .. |
---|
4046 | 4437 | return skb->dst_pending_confirm != 0; |
---|
4047 | 4438 | } |
---|
4048 | 4439 | |
---|
4049 | | -static inline struct sec_path *skb_sec_path(struct sk_buff *skb) |
---|
| 4440 | +static inline struct sec_path *skb_sec_path(const struct sk_buff *skb) |
---|
4050 | 4441 | { |
---|
4051 | 4442 | #ifdef CONFIG_XFRM |
---|
4052 | | - return skb->sp; |
---|
| 4443 | + return skb_ext_find(skb, SKB_EXT_SEC_PATH); |
---|
4053 | 4444 | #else |
---|
4054 | 4445 | return NULL; |
---|
4055 | 4446 | #endif |
---|
.. | .. |
---|
4070 | 4461 | __wsum csum; |
---|
4071 | 4462 | __u16 csum_start; |
---|
4072 | 4463 | }; |
---|
4073 | | -#define SKB_SGO_CB_OFFSET 32 |
---|
4074 | | -#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET)) |
---|
| 4464 | +#define SKB_GSO_CB_OFFSET 32 |
---|
| 4465 | +#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET)) |
---|
4075 | 4466 | |
---|
4076 | 4467 | static inline int skb_tnl_header_len(const struct sk_buff *inner_skb) |
---|
4077 | 4468 | { |
---|
.. | .. |
---|
4232 | 4623 | /* Local Checksum Offload. |
---|
4233 | 4624 | * Compute outer checksum based on the assumption that the |
---|
4234 | 4625 | * inner checksum will be offloaded later. |
---|
4235 | | - * See Documentation/networking/checksum-offloads.txt for |
---|
| 4626 | + * See Documentation/networking/checksum-offloads.rst for |
---|
4236 | 4627 | * explanation of how this works. |
---|
4237 | 4628 | * Fill in outer checksum adjustment (e.g. with sum of outer |
---|
4238 | 4629 | * pseudo-header) before calling. |
---|
.. | .. |
---|
4254 | 4645 | return csum_partial(l4_hdr, csum_start - l4_hdr, partial); |
---|
4255 | 4646 | } |
---|
4256 | 4647 | |
---|
| 4648 | +static inline bool skb_is_redirected(const struct sk_buff *skb) |
---|
| 4649 | +{ |
---|
| 4650 | +#ifdef CONFIG_NET_REDIRECT |
---|
| 4651 | + return skb->redirected; |
---|
| 4652 | +#else |
---|
| 4653 | + return false; |
---|
| 4654 | +#endif |
---|
| 4655 | +} |
---|
| 4656 | + |
---|
| 4657 | +static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress) |
---|
| 4658 | +{ |
---|
| 4659 | +#ifdef CONFIG_NET_REDIRECT |
---|
| 4660 | + skb->redirected = 1; |
---|
| 4661 | + skb->from_ingress = from_ingress; |
---|
| 4662 | + if (skb->from_ingress) |
---|
| 4663 | + skb->tstamp = 0; |
---|
| 4664 | +#endif |
---|
| 4665 | +} |
---|
| 4666 | + |
---|
| 4667 | +static inline void skb_reset_redirect(struct sk_buff *skb) |
---|
| 4668 | +{ |
---|
| 4669 | +#ifdef CONFIG_NET_REDIRECT |
---|
| 4670 | + skb->redirected = 0; |
---|
| 4671 | +#endif |
---|
| 4672 | +} |
---|
| 4673 | + |
---|
| 4674 | +#if IS_ENABLED(CONFIG_KCOV) && IS_ENABLED(CONFIG_SKB_EXTENSIONS) |
---|
| 4675 | +static inline void skb_set_kcov_handle(struct sk_buff *skb, |
---|
| 4676 | + const u64 kcov_handle) |
---|
| 4677 | +{ |
---|
| 4678 | + /* Do not allocate skb extensions only to set kcov_handle to zero |
---|
| 4679 | + * (as it is zero by default). However, if the extensions are |
---|
| 4680 | + * already allocated, update kcov_handle anyway since |
---|
| 4681 | + * skb_set_kcov_handle can be called to zero a previously set |
---|
| 4682 | + * value. |
---|
| 4683 | + */ |
---|
| 4684 | + if (skb_has_extensions(skb) || kcov_handle) { |
---|
| 4685 | + u64 *kcov_handle_ptr = skb_ext_add(skb, SKB_EXT_KCOV_HANDLE); |
---|
| 4686 | + |
---|
| 4687 | + if (kcov_handle_ptr) |
---|
| 4688 | + *kcov_handle_ptr = kcov_handle; |
---|
| 4689 | + } |
---|
| 4690 | +} |
---|
| 4691 | + |
---|
| 4692 | +static inline u64 skb_get_kcov_handle(struct sk_buff *skb) |
---|
| 4693 | +{ |
---|
| 4694 | + u64 *kcov_handle = skb_ext_find(skb, SKB_EXT_KCOV_HANDLE); |
---|
| 4695 | + |
---|
| 4696 | + return kcov_handle ? *kcov_handle : 0; |
---|
| 4697 | +} |
---|
| 4698 | +#else |
---|
| 4699 | +static inline void skb_set_kcov_handle(struct sk_buff *skb, |
---|
| 4700 | + const u64 kcov_handle) { } |
---|
| 4701 | +static inline u64 skb_get_kcov_handle(struct sk_buff *skb) { return 0; } |
---|
| 4702 | +#endif /* CONFIG_KCOV && CONFIG_SKB_EXTENSIONS */ |
---|
| 4703 | + |
---|
4257 | 4704 | #endif /* __KERNEL__ */ |
---|
4258 | 4705 | #endif /* _LINUX_SKBUFF_H */ |
---|