hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/include/linux/skbuff.h
....@@ -1,14 +1,10 @@
1
+/* SPDX-License-Identifier: GPL-2.0-or-later */
12 /*
23 * Definitions for the 'struct sk_buff' memory handlers.
34 *
45 * Authors:
56 * Alan Cox, <gw4pts@gw4pts.ampr.org>
67 * Florian La Roche, <rzsfl@rz.uni-sb.de>
7
- *
8
- * This program is free software; you can redistribute it and/or
9
- * modify it under the terms of the GNU General Public License
10
- * as published by the Free Software Foundation; either version
11
- * 2 of the License, or (at your option) any later version.
128 */
139
1410 #ifndef _LINUX_SKBUFF_H
....@@ -18,6 +14,7 @@
1814 #include <linux/compiler.h>
1915 #include <linux/time.h>
2016 #include <linux/bug.h>
17
+#include <linux/bvec.h>
2118 #include <linux/cache.h>
2219 #include <linux/rbtree.h>
2320 #include <linux/socket.h>
....@@ -40,6 +37,11 @@
4037 #include <linux/in6.h>
4138 #include <linux/if_packet.h>
4239 #include <net/flow.h>
40
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
41
+#include <linux/netfilter/nf_conntrack_common.h>
42
+#endif
43
+#include <linux/android_kabi.h>
44
+#include <linux/android_vendor.h>
4345
4446 /* The interface for checksum offload between the stack and networking drivers
4547 * is as follows...
....@@ -47,8 +49,8 @@
4749 * A. IP checksum related features
4850 *
4951 * Drivers advertise checksum offload capabilities in the features of a device.
50
- * From the stack's point of view these are capabilities offered by the driver,
51
- * a driver typically only advertises features that it is capable of offloading
52
+ * From the stack's point of view these are capabilities offered by the driver.
53
+ * A driver typically only advertises features that it is capable of offloading
5254 * to its device.
5355 *
5456 * The checksum related features are:
....@@ -63,7 +65,7 @@
6365 * TCP or UDP packets over IPv4. These are specifically
6466 * unencapsulated packets of the form IPv4|TCP or
6567 * IPv4|UDP where the Protocol field in the IPv4 header
66
- * is TCP or UDP. The IPv4 header may contain IP options
68
+ * is TCP or UDP. The IPv4 header may contain IP options.
6769 * This feature cannot be set in features for a device
6870 * with NETIF_F_HW_CSUM also set. This feature is being
6971 * DEPRECATED (see below).
....@@ -71,7 +73,7 @@
7173 * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain
7274 * TCP or UDP packets over IPv6. These are specifically
7375 * unencapsulated packets of the form IPv6|TCP or
74
- * IPv4|UDP where the Next Header field in the IPv6
76
+ * IPv6|UDP where the Next Header field in the IPv6
7577 * header is either TCP or UDP. IPv6 extension headers
7678 * are not supported with this feature. This feature
7779 * cannot be set in features for a device with
....@@ -79,13 +81,13 @@
7981 * DEPRECATED (see below).
8082 *
8183 * NETIF_F_RXCSUM - Driver (device) performs receive checksum offload.
82
- * This flag is used only used to disable the RX checksum
84
+ * This flag is only used to disable the RX checksum
8385 * feature for a device. The stack will accept receive
8486 * checksum indication in packets received on a device
8587 * regardless of whether NETIF_F_RXCSUM is set.
8688 *
8789 * B. Checksumming of received packets by device. Indication of checksum
88
- * verification is in set skb->ip_summed. Possible values are:
90
+ * verification is set in skb->ip_summed. Possible values are:
8991 *
9092 * CHECKSUM_NONE:
9193 *
....@@ -115,16 +117,16 @@
115117 * the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
116118 * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
117119 * and a device is able to verify the checksums for UDP (possibly zero),
118
- * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to
120
+ * GRE (checksum flag is set) and TCP, skb->csum_level would be set to
119121 * two. If the device were only able to verify the UDP checksum and not
120
- * GRE, either because it doesn't support GRE checksum of because GRE
122
+ * GRE, either because it doesn't support GRE checksum or because GRE
121123 * checksum is bad, skb->csum_level would be set to zero (TCP checksum is
122124 * not considered in this case).
123125 *
124126 * CHECKSUM_COMPLETE:
125127 *
126128 * This is the most generic way. The device supplied checksum of the _whole_
127
- * packet as seen by netif_rx() and fills out in skb->csum. Meaning, the
129
+ * packet as seen by netif_rx() and fills in skb->csum. This means the
128130 * hardware doesn't need to parse L3/L4 headers to implement this.
129131 *
130132 * Notes:
....@@ -153,8 +155,8 @@
153155 * from skb->csum_start up to the end, and to record/write the checksum at
154156 * offset skb->csum_start + skb->csum_offset. A driver may verify that the
155157 * csum_start and csum_offset values are valid values given the length and
156
- * offset of the packet, however they should not attempt to validate that the
157
- * checksum refers to a legitimate transport layer checksum-- it is the
158
+ * offset of the packet, but it should not attempt to validate that the
159
+ * checksum refers to a legitimate transport layer checksum -- it is the
158160 * purview of the stack to validate that csum_start and csum_offset are set
159161 * correctly.
160162 *
....@@ -178,18 +180,18 @@
178180 *
179181 * CHECKSUM_UNNECESSARY:
180182 *
181
- * This has the same meaning on as CHECKSUM_NONE for checksum offload on
183
+ * This has the same meaning as CHECKSUM_NONE for checksum offload on
182184 * output.
183185 *
184186 * CHECKSUM_COMPLETE:
185187 * Not used in checksum output. If a driver observes a packet with this value
186
- * set in skbuff, if should treat as CHECKSUM_NONE being set.
188
+ * set in skbuff, it should treat the packet as if CHECKSUM_NONE were set.
187189 *
188190 * D. Non-IP checksum (CRC) offloads
189191 *
190192 * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of
191193 * offloading the SCTP CRC in a packet. To perform this offload the stack
192
- * will set set csum_start and csum_offset accordingly, set ip_summed to
194
+ * will set csum_start and csum_offset accordingly, set ip_summed to
193195 * CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication in
194196 * the skbuff that the CHECKSUM_PARTIAL refers to CRC32c.
195197 * A driver that supports both IP checksum offload and SCTP CRC32c offload
....@@ -200,10 +202,10 @@
200202 * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of
201203 * offloading the FCOE CRC in a packet. To perform this offload the stack
202204 * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
203
- * accordingly. Note the there is no indication in the skbuff that the
204
- * CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports
205
+ * accordingly. Note that there is no indication in the skbuff that the
206
+ * CHECKSUM_PARTIAL refers to an FCOE checksum, so a driver that supports
205207 * both IP checksum offload and FCOE CRC offload must verify which offload
206
- * is configured for a packet presumably by inspecting packet headers.
208
+ * is configured for a packet, presumably by inspecting packet headers.
207209 *
208210 * E. Checksumming on output with GSO.
209211 *
....@@ -211,9 +213,9 @@
211213 * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the
212214 * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as
213215 * part of the GSO operation is implied. If a checksum is being offloaded
214
- * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset
215
- * are set to refer to the outermost checksum being offload (two offloaded
216
- * checksums are possible with UDP encapsulation).
216
+ * with GSO then ip_summed is CHECKSUM_PARTIAL, and both csum_start and
217
+ * csum_offset are set to refer to the outermost checksum being offloaded
218
+ * (two offloaded checksums are possible with UDP encapsulation).
217219 */
218220
219221 /* Don't change this without changing skb_csum_unnecessary! */
....@@ -238,21 +240,18 @@
238240 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
239241 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
240242
243
+struct ahash_request;
241244 struct net_device;
242245 struct scatterlist;
243246 struct pipe_inode_info;
244247 struct iov_iter;
245248 struct napi_struct;
249
+struct bpf_prog;
250
+union bpf_attr;
251
+struct skb_ext;
246252
247
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
248
-struct nf_conntrack {
249
- atomic_t use;
250
-};
251
-#endif
252
-#include <linux/android_kabi.h>
253
-
253
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
254254 struct nf_bridge_info {
255
- refcount_t use;
256255 enum {
257256 BRNF_PROTO_UNCHANGED,
258257 BRNF_PROTO_8021Q,
....@@ -261,6 +260,7 @@
261260 u8 pkt_otherhost:1;
262261 u8 in_prerouting:1;
263262 u8 bridged_dnat:1;
263
+ u8 sabotage_in_done:1;
264264 __u16 frag_max_size;
265265 struct net_device *physindev;
266266
....@@ -278,6 +278,18 @@
278278 char neigh_header[8];
279279 };
280280 };
281
+#endif
282
+
283
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
284
+/* Chain in tc_skb_ext will be used to share the tc chain with
285
+ * ovs recirc_id. It will be set to the current chain by tc
286
+ * and read by ovs to recirc_id.
287
+ */
288
+struct tc_skb_ext {
289
+ __u32 chain;
290
+ __u16 mru;
291
+};
292
+#endif
281293
282294 struct sk_buff_head {
283295 /* These two members must be first. */
....@@ -286,7 +298,6 @@
286298
287299 __u32 qlen;
288300 spinlock_t lock;
289
- raw_spinlock_t raw_lock;
290301 };
291302
292303 struct sk_buff;
....@@ -310,41 +321,51 @@
310321 */
311322 #define GSO_BY_FRAGS 0xFFFF
312323
313
-typedef struct skb_frag_struct skb_frag_t;
324
+typedef struct bio_vec skb_frag_t;
314325
315
-struct skb_frag_struct {
316
- struct {
317
- struct page *p;
318
- } page;
319
-#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
320
- __u32 page_offset;
321
- __u32 size;
322
-#else
323
- __u16 page_offset;
324
- __u16 size;
325
-#endif
326
-};
327
-
326
+/**
327
+ * skb_frag_size() - Returns the size of a skb fragment
328
+ * @frag: skb fragment
329
+ */
328330 static inline unsigned int skb_frag_size(const skb_frag_t *frag)
329331 {
330
- return frag->size;
332
+ return frag->bv_len;
331333 }
332334
335
+/**
336
+ * skb_frag_size_set() - Sets the size of a skb fragment
337
+ * @frag: skb fragment
338
+ * @size: size of fragment
339
+ */
333340 static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
334341 {
335
- frag->size = size;
342
+ frag->bv_len = size;
336343 }
337344
345
+/**
346
+ * skb_frag_size_add() - Increments the size of a skb fragment by @delta
347
+ * @frag: skb fragment
348
+ * @delta: value to add
349
+ */
338350 static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
339351 {
340
- frag->size += delta;
352
+ frag->bv_len += delta;
341353 }
342354
355
+/**
356
+ * skb_frag_size_sub() - Decrements the size of a skb fragment by @delta
357
+ * @frag: skb fragment
358
+ * @delta: value to subtract
359
+ */
343360 static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
344361 {
345
- frag->size -= delta;
362
+ frag->bv_len -= delta;
346363 }
347364
365
+/**
366
+ * skb_frag_must_loop - Test if %p is a high memory page
367
+ * @p: fragment's page
368
+ */
348369 static inline bool skb_frag_must_loop(struct page *p)
349370 {
350371 #if defined(CONFIG_HIGHMEM)
....@@ -358,7 +379,7 @@
358379 * skb_frag_foreach_page - loop over pages in a fragment
359380 *
360381 * @f: skb frag to operate on
361
- * @f_off: offset from start of f->page.p
382
+ * @f_off: offset from start of f->bv_page
362383 * @f_len: length from f_off to loop over
363384 * @p: (temp var) current page
364385 * @p_off: (temp var) offset from start of current page,
....@@ -479,10 +500,11 @@
479500 }
480501
481502 void sock_zerocopy_put(struct ubuf_info *uarg);
482
-void sock_zerocopy_put_abort(struct ubuf_info *uarg);
503
+void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
483504
484505 void sock_zerocopy_callback(struct ubuf_info *uarg, bool success);
485506
507
+int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
486508 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
487509 struct msghdr *msg, int len,
488510 struct ubuf_info *uarg);
....@@ -511,6 +533,8 @@
511533 /* Intermediate layers must ensure that destructor_arg
512534 * remains valid until skb destructor */
513535 void * destructor_arg;
536
+
537
+ ANDROID_OEM_DATA_ARRAY(1, 3);
514538
515539 /* must be last field, see pskb_expand_head() */
516540 skb_frag_t frags[MAX_SKB_FRAGS];
....@@ -575,6 +599,8 @@
575599 SKB_GSO_UDP = 1 << 16,
576600
577601 SKB_GSO_UDP_L4 = 1 << 17,
602
+
603
+ SKB_GSO_FRAGLIST = 1 << 18,
578604 };
579605
580606 #if BITS_PER_LONG > 32
....@@ -587,14 +613,20 @@
587613 typedef unsigned char *sk_buff_data_t;
588614 #endif
589615
590
-/**
616
+/**
591617 * struct sk_buff - socket buffer
592618 * @next: Next buffer in list
593619 * @prev: Previous buffer in list
594620 * @tstamp: Time we arrived/left
621
+ * @skb_mstamp_ns: (aka @tstamp) earliest departure time; start point
622
+ * for retransmit timer
595623 * @rbnode: RB tree node, alternative to next/prev for netem/tcp
624
+ * @list: queue head
596625 * @sk: Socket we are owned by
626
+ * @ip_defrag_offset: (aka @sk) alternate use of @sk, used in
627
+ * fragmentation management
597628 * @dev: Device we arrived on/are leaving by
629
+ * @dev_scratch: (aka @dev) alternate use of @dev when @dev would be %NULL
598630 * @cb: Control buffer. Free for use by every layer. Put private vars here
599631 * @_skb_refdst: destination entry (with norefcount bit)
600632 * @sp: the security path, used for xfrm
....@@ -613,10 +645,15 @@
613645 * @pkt_type: Packet class
614646 * @fclone: skbuff clone status
615647 * @ipvs_property: skbuff is owned by ipvs
648
+ * @inner_protocol_type: whether the inner protocol is
649
+ * ENCAP_TYPE_ETHER or ENCAP_TYPE_IPPROTO
650
+ * @remcsum_offload: remote checksum offload is enabled
651
+ * @offload_fwd_mark: Packet was L2-forwarded in hardware
652
+ * @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
616653 * @tc_skip_classify: do not classify packet. set by IFB device
617654 * @tc_at_ingress: used within tc_classify to distinguish in/egress
618
- * @tc_redirected: packet was redirected by a tc action
619
- * @tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect
655
+ * @redirected: packet was redirected by packet classifier
656
+ * @from_ingress: packet was redirected from the ingress path
620657 * @peeked: this packet has been seen already, so stats have been
621658 * done for it, don't do them again
622659 * @nf_trace: netfilter packet trace flag
....@@ -629,8 +666,10 @@
629666 * @tc_index: Traffic control index
630667 * @hash: the packet hash
631668 * @queue_mapping: Queue mapping for multiqueue devices
632
- * @xmit_more: More SKBs are pending for this queue
669
+ * @head_frag: skb was allocated from page fragments,
670
+ * not allocated by kmalloc() or vmalloc().
633671 * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
672
+ * @active_extensions: active extensions (skb_ext_id types)
634673 * @ndisc_nodetype: router type (from link layer)
635674 * @ooo_okay: allow the mapping of a socket to a queue to be changed
636675 * @l4_hash: indicate hash is a canonical 4-tuple hash over transport
....@@ -639,27 +678,43 @@
639678 * @wifi_acked_valid: wifi_acked was set
640679 * @wifi_acked: whether frame was acked on wifi or not
641680 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
681
+ * @encapsulation: indicates the inner headers in the skbuff are valid
682
+ * @encap_hdr_csum: software checksum is needed
683
+ * @csum_valid: checksum is already valid
642684 * @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL
685
+ * @csum_complete_sw: checksum was completed by software
686
+ * @csum_level: indicates the number of consecutive checksums found in
687
+ * the packet minus one that have been verified as
688
+ * CHECKSUM_UNNECESSARY (max 3)
689
+ * @scm_io_uring: SKB holds io_uring registered files
643690 * @dst_pending_confirm: need to confirm neighbour
644691 * @decrypted: Decrypted SKB
645
- * @napi_id: id of the NAPI struct this skb came from
692
+ * @napi_id: id of the NAPI struct this skb came from
693
+ * @sender_cpu: (aka @napi_id) source CPU in XPS
646694 * @secmark: security marking
647695 * @mark: Generic packet mark
696
+ * @reserved_tailroom: (aka @mark) number of bytes of free space available
697
+ * at the tail of an sk_buff
698
+ * @vlan_present: VLAN tag is present
648699 * @vlan_proto: vlan encapsulation protocol
649700 * @vlan_tci: vlan tag control information
650701 * @inner_protocol: Protocol (encapsulation)
702
+ * @inner_ipproto: (aka @inner_protocol) stores ipproto when
703
+ * skb->inner_protocol_type == ENCAP_TYPE_IPPROTO;
651704 * @inner_transport_header: Inner transport layer header (encapsulation)
652705 * @inner_network_header: Network layer header (encapsulation)
653706 * @inner_mac_header: Link layer header (encapsulation)
654707 * @transport_header: Transport layer header
655708 * @network_header: Network layer header
656709 * @mac_header: Link layer header
710
+ * @kcov_handle: KCOV remote handle for remote coverage collection
657711 * @tail: Tail pointer
658712 * @end: End pointer
659713 * @head: Head of buffer
660714 * @data: Data head pointer
661715 * @truesize: Buffer size
662716 * @users: User count - see {datagram,tcp}.c
717
+ * @extensions: allocated extensions, valid if active_extensions is nonzero
663718 */
664719
665720 struct sk_buff {
....@@ -689,7 +744,7 @@
689744
690745 union {
691746 ktime_t tstamp;
692
- u64 skb_mstamp;
747
+ u64 skb_mstamp_ns; /* earliest departure time */
693748 };
694749 /*
695750 * This is the control buffer. It is free to use for every
....@@ -707,13 +762,9 @@
707762 struct list_head tcp_tsorted_anchor;
708763 };
709764
710
-#ifdef CONFIG_XFRM
711
- struct sec_path *sp;
712
-#endif
713765 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
714766 unsigned long _nfct;
715767 #endif
716
- struct nf_bridge_info *nf_bridge;
717768 unsigned int len,
718769 data_len;
719770 __u16 mac_len,
....@@ -732,15 +783,18 @@
732783 #endif
733784 #define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
734785
786
+ /* private: */
735787 __u8 __cloned_offset[0];
788
+ /* public: */
736789 __u8 cloned:1,
737790 nohdr:1,
738791 fclone:2,
739792 peeked:1,
740793 head_frag:1,
741
- xmit_more:1,
742794 pfmemalloc:1;
743
-
795
+#ifdef CONFIG_SKB_EXTENSIONS
796
+ __u8 active_extensions;
797
+#endif
744798 /* fields enclosed in headers_start/headers_end are copied
745799 * using a single memcpy() in __copy_skb_header()
746800 */
....@@ -756,7 +810,9 @@
756810 #endif
757811 #define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
758812
813
+ /* private: */
759814 __u8 __pkt_type_offset[0];
815
+ /* public: */
760816 __u8 pkt_type:3;
761817 __u8 ignore_df:1;
762818 __u8 nf_trace:1;
....@@ -773,6 +829,16 @@
773829 __u8 encap_hdr_csum:1;
774830 __u8 csum_valid:1;
775831
832
+#ifdef __BIG_ENDIAN_BITFIELD
833
+#define PKT_VLAN_PRESENT_BIT 7
834
+#else
835
+#define PKT_VLAN_PRESENT_BIT 0
836
+#endif
837
+#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset)
838
+ /* private: */
839
+ __u8 __pkt_vlan_present_offset[0];
840
+ /* public: */
841
+ __u8 vlan_present:1;
776842 __u8 csum_complete_sw:1;
777843 __u8 csum_level:2;
778844 __u8 csum_not_inet:1;
....@@ -780,19 +846,21 @@
780846 #ifdef CONFIG_IPV6_NDISC_NODETYPE
781847 __u8 ndisc_nodetype:2;
782848 #endif
783
- __u8 ipvs_property:1;
784849
850
+ __u8 ipvs_property:1;
785851 __u8 inner_protocol_type:1;
786852 __u8 remcsum_offload:1;
787853 #ifdef CONFIG_NET_SWITCHDEV
788854 __u8 offload_fwd_mark:1;
789
- __u8 offload_mr_fwd_mark:1;
855
+ __u8 offload_l3_fwd_mark:1;
790856 #endif
791857 #ifdef CONFIG_NET_CLS_ACT
792858 __u8 tc_skip_classify:1;
793859 __u8 tc_at_ingress:1;
794
- __u8 tc_redirected:1;
795
- __u8 tc_from_ingress:1;
860
+#endif
861
+#ifdef CONFIG_NET_REDIRECT
862
+ __u8 redirected:1;
863
+ __u8 from_ingress:1;
796864 #endif
797865 #ifdef CONFIG_TLS_DEVICE
798866 __u8 decrypted:1;
....@@ -843,11 +911,29 @@
843911 __u16 network_header;
844912 __u16 mac_header;
845913
914
+#ifdef CONFIG_KCOV
915
+ u64 kcov_handle;
916
+#endif
917
+
846918 /* private: */
847919 __u32 headers_end[0];
848920 /* public: */
849921
850
- ANDROID_KABI_RESERVE(1);
922
+ /* Android KABI preservation.
923
+ *
924
+ * "open coded" version of ANDROID_KABI_USE() to pack more
925
+ * fields/variables into the space that we have.
926
+ *
927
+ * scm_io_uring is from 04df9719df18 ("io_uring/af_unix: defer
928
+ * registered files gc to io_uring release")
929
+ */
930
+ _ANDROID_KABI_REPLACE(_ANDROID_KABI_RESERVE(1),
931
+ struct {
932
+ __u8 scm_io_uring:1;
933
+ __u8 android_kabi_reserved1_padding1;
934
+ __u16 android_kabi_reserved1_padding2;
935
+ __u32 android_kabi_reserved1_padding3;
936
+ });
851937 ANDROID_KABI_RESERVE(2);
852938
853939 /* These elements must be at the end, see alloc_skb() for details. */
....@@ -857,6 +943,11 @@
857943 *data;
858944 unsigned int truesize;
859945 refcount_t users;
946
+
947
+#ifdef CONFIG_SKB_EXTENSIONS
948
+ /* only useable after checking ->active_extensions != 0 */
949
+ struct skb_ext *extensions;
950
+#endif
860951 };
861952
862953 #ifdef __KERNEL__
....@@ -868,7 +959,10 @@
868959 #define SKB_ALLOC_RX 0x02
869960 #define SKB_ALLOC_NAPI 0x04
870961
871
-/* Returns true if the skb was allocated from PFMEMALLOC reserves */
962
+/**
963
+ * skb_pfmemalloc - Test if the skb was allocated from PFMEMALLOC reserves
964
+ * @skb: buffer
965
+ */
872966 static inline bool skb_pfmemalloc(const struct sk_buff *skb)
873967 {
874968 return unlikely(skb->pfmemalloc);
....@@ -881,7 +975,6 @@
881975 #define SKB_DST_NOREF 1UL
882976 #define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
883977
884
-#define SKB_NFCT_PTRMASK ~(7UL)
885978 /**
886979 * skb_dst - returns skb dst_entry
887980 * @skb: buffer
....@@ -890,7 +983,7 @@
890983 */
891984 static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
892985 {
893
- /* If refdst was not refcounted, check we still are in a
986
+ /* If refdst was not refcounted, check we still are in a
894987 * rcu_read_lock section
895988 */
896989 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
....@@ -937,6 +1030,10 @@
9371030 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
9381031 }
9391032
1033
+/**
1034
+ * skb_rtable - Returns the skb &rtable
1035
+ * @skb: buffer
1036
+ */
9401037 static inline struct rtable *skb_rtable(const struct sk_buff *skb)
9411038 {
9421039 return (struct rtable *)skb_dst(skb);
....@@ -951,6 +1048,10 @@
9511048 return ptype <= PACKET_OTHERHOST;
9521049 }
9531050
1051
+/**
1052
+ * skb_napi_id - Returns the skb's NAPI id
1053
+ * @skb: buffer
1054
+ */
9541055 static inline unsigned int skb_napi_id(const struct sk_buff *skb)
9551056 {
9561057 #ifdef CONFIG_NET_RX_BUSY_POLL
....@@ -960,7 +1061,12 @@
9601061 #endif
9611062 }
9621063
963
-/* decrement the reference count and return true if we can free the skb */
1064
+/**
1065
+ * skb_unref - decrement the skb's reference count
1066
+ * @skb: buffer
1067
+ *
1068
+ * Returns true if we can free the skb.
1069
+ */
9641070 static inline bool skb_unref(struct sk_buff *skb)
9651071 {
9661072 if (unlikely(!skb))
....@@ -976,8 +1082,18 @@
9761082 void skb_release_head_state(struct sk_buff *skb);
9771083 void kfree_skb(struct sk_buff *skb);
9781084 void kfree_skb_list(struct sk_buff *segs);
1085
+void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
9791086 void skb_tx_error(struct sk_buff *skb);
1087
+
1088
+#ifdef CONFIG_TRACEPOINTS
9801089 void consume_skb(struct sk_buff *skb);
1090
+#else
1091
+static inline void consume_skb(struct sk_buff *skb)
1092
+{
1093
+ return kfree_skb(skb);
1094
+}
1095
+#endif
1096
+
9811097 void __consume_stateless_skb(struct sk_buff *skb);
9821098 void __kfree_skb(struct sk_buff *skb);
9831099 extern struct kmem_cache *skbuff_head_cache;
....@@ -990,6 +1106,16 @@
9901106 int node);
9911107 struct sk_buff *__build_skb(void *data, unsigned int frag_size);
9921108 struct sk_buff *build_skb(void *data, unsigned int frag_size);
1109
+struct sk_buff *build_skb_around(struct sk_buff *skb,
1110
+ void *data, unsigned int frag_size);
1111
+
1112
+/**
1113
+ * alloc_skb - allocate a network buffer
1114
+ * @size: size to allocate
1115
+ * @priority: allocation mask
1116
+ *
1117
+ * This function is a convenient wrapper around __alloc_skb().
1118
+ */
9931119 static inline struct sk_buff *alloc_skb(unsigned int size,
9941120 gfp_t priority)
9951121 {
....@@ -1001,6 +1127,7 @@
10011127 int max_page_order,
10021128 int *errcode,
10031129 gfp_t gfp_mask);
1130
+struct sk_buff *alloc_skb_for_msg(struct sk_buff *first);
10041131
10051132 /* Layout of fast clones : [skb1][skb2][fclone_ref] */
10061133 struct sk_buff_fclones {
....@@ -1032,6 +1159,13 @@
10321159 fclones->skb2.sk == sk;
10331160 }
10341161
1162
+/**
1163
+ * alloc_skb_fclone - allocate a network buffer from fclone cache
1164
+ * @size: size to allocate
1165
+ * @priority: allocation mask
1166
+ *
1167
+ * This function is a convenient wrapper around __alloc_skb().
1168
+ */
10351169 static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
10361170 gfp_t priority)
10371171 {
....@@ -1080,11 +1214,6 @@
10801214 return __skb_pad(skb, pad, true);
10811215 }
10821216 #define dev_kfree_skb(a) consume_skb(a)
1083
-
1084
-int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
1085
- int getfrag(void *from, char *to, int offset,
1086
- int len, int odd, struct sk_buff *skb),
1087
- void *from, int length);
10881217
10891218 int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
10901219 int offset, size_t size);
....@@ -1193,7 +1322,12 @@
11931322 const struct flow_dissector_key *key,
11941323 unsigned int key_count);
11951324
1196
-bool __skb_flow_dissect(const struct sk_buff *skb,
1325
+struct bpf_flow_dissector;
1326
+bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
1327
+ __be16 proto, int nhoff, int hlen, unsigned int flags);
1328
+
1329
+bool __skb_flow_dissect(const struct net *net,
1330
+ const struct sk_buff *skb,
11971331 struct flow_dissector *flow_dissector,
11981332 void *target_container,
11991333 void *data, __be16 proto, int nhoff, int hlen,
....@@ -1203,8 +1337,8 @@
12031337 struct flow_dissector *flow_dissector,
12041338 void *target_container, unsigned int flags)
12051339 {
1206
- return __skb_flow_dissect(skb, flow_dissector, target_container,
1207
- NULL, 0, 0, 0, flags);
1340
+ return __skb_flow_dissect(NULL, skb, flow_dissector,
1341
+ target_container, NULL, 0, 0, 0, flags);
12081342 }
12091343
12101344 static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
....@@ -1212,25 +1346,44 @@
12121346 unsigned int flags)
12131347 {
12141348 memset(flow, 0, sizeof(*flow));
1215
- return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
1216
- NULL, 0, 0, 0, flags);
1349
+ return __skb_flow_dissect(NULL, skb, &flow_keys_dissector,
1350
+ flow, NULL, 0, 0, 0, flags);
12171351 }
12181352
12191353 static inline bool
1220
-skb_flow_dissect_flow_keys_basic(const struct sk_buff *skb,
1354
+skb_flow_dissect_flow_keys_basic(const struct net *net,
1355
+ const struct sk_buff *skb,
12211356 struct flow_keys_basic *flow, void *data,
12221357 __be16 proto, int nhoff, int hlen,
12231358 unsigned int flags)
12241359 {
12251360 memset(flow, 0, sizeof(*flow));
1226
- return __skb_flow_dissect(skb, &flow_keys_basic_dissector, flow,
1361
+ return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow,
12271362 data, proto, nhoff, hlen, flags);
12281363 }
12291364
1365
+void skb_flow_dissect_meta(const struct sk_buff *skb,
1366
+ struct flow_dissector *flow_dissector,
1367
+ void *target_container);
1368
+
1369
+/* Gets a skb connection tracking info, ctinfo map should be a
1370
+ * map of mapsize to translate enum ip_conntrack_info states
1371
+ * to user states.
1372
+ */
1373
+void
1374
+skb_flow_dissect_ct(const struct sk_buff *skb,
1375
+ struct flow_dissector *flow_dissector,
1376
+ void *target_container,
1377
+ u16 *ctinfo_map,
1378
+ size_t mapsize);
12301379 void
12311380 skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
12321381 struct flow_dissector *flow_dissector,
12331382 void *target_container);
1383
+
1384
+void skb_flow_dissect_hash(const struct sk_buff *skb,
1385
+ struct flow_dissector *flow_dissector,
1386
+ void *target_container);
12341387
12351388 static inline __u32 skb_get_hash(struct sk_buff *skb)
12361389 {
....@@ -1267,6 +1420,14 @@
12671420 to->l4_hash = from->l4_hash;
12681421 };
12691422
1423
+static inline void skb_copy_decrypted(struct sk_buff *to,
1424
+ const struct sk_buff *from)
1425
+{
1426
+#ifdef CONFIG_TLS_DEVICE
1427
+ to->decrypted = from->decrypted;
1428
+#endif
1429
+}
1430
+
12701431 #ifdef NET_SKBUFF_DATA_USES_OFFSET
12711432 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
12721433 {
....@@ -1277,6 +1438,11 @@
12771438 {
12781439 return skb->end;
12791440 }
1441
+
1442
+static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
1443
+{
1444
+ skb->end = offset;
1445
+}
12801446 #else
12811447 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
12821448 {
....@@ -1286,6 +1452,11 @@
12861452 static inline unsigned int skb_end_offset(const struct sk_buff *skb)
12871453 {
12881454 return skb->end - skb->head;
1455
+}
1456
+
1457
+static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
1458
+{
1459
+ skb->end = skb->head + offset;
12891460 }
12901461 #endif
12911462
....@@ -1304,10 +1475,14 @@
13041475 return is_zcopy ? skb_uarg(skb) : NULL;
13051476 }
13061477
1307
-static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg)
1478
+static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
1479
+ bool *have_ref)
13081480 {
13091481 if (skb && uarg && !skb_zcopy(skb)) {
1310
- sock_zerocopy_get(uarg);
1482
+ if (unlikely(have_ref && *have_ref))
1483
+ *have_ref = false;
1484
+ else
1485
+ sock_zerocopy_get(uarg);
13111486 skb_shinfo(skb)->destructor_arg = uarg;
13121487 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
13131488 }
....@@ -1354,7 +1529,7 @@
13541529 struct ubuf_info *uarg = skb_zcopy(skb);
13551530
13561531 if (uarg) {
1357
- sock_zerocopy_put_abort(uarg);
1532
+ sock_zerocopy_put_abort(uarg, false);
13581533 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
13591534 }
13601535 }
....@@ -1502,6 +1677,22 @@
15021677 return 0;
15031678 }
15041679
1680
+/* This variant of skb_unclone() makes sure skb->truesize
1681
+ * and skb_end_offset() are not changed, whenever a new skb->head is needed.
1682
+ *
1683
+ * Indeed there is no guarantee that ksize(kmalloc(X)) == ksize(kmalloc(X))
1684
+ * when various debugging features are in place.
1685
+ */
1686
+int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri);
1687
+static inline int skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
1688
+{
1689
+ might_sleep_if(gfpflags_allow_blocking(pri));
1690
+
1691
+ if (skb_cloned(skb))
1692
+ return __skb_unclone_keeptruesize(skb, pri);
1693
+ return 0;
1694
+}
1695
+
15051696 /**
15061697 * skb_header_cloned - is the header a clone
15071698 * @skb: buffer to check
....@@ -1642,6 +1833,17 @@
16421833 }
16431834
16441835 /**
1836
+ * __skb_peek - peek at the head of a non-empty &sk_buff_head
1837
+ * @list_: list to peek at
1838
+ *
1839
+ * Like skb_peek(), but the caller knows that the list is not empty.
1840
+ */
1841
+static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_)
1842
+{
1843
+ return list_->next;
1844
+}
1845
+
1846
+/**
16451847 * skb_peek_next - peek skb following the given one from a queue
16461848 * @skb: skb to start from
16471849 * @list_: list to peek at
....@@ -1736,12 +1938,6 @@
17361938 __skb_queue_head_init(list);
17371939 }
17381940
1739
-static inline void skb_queue_head_init_raw(struct sk_buff_head *list)
1740
-{
1741
- raw_spin_lock_init(&list->raw_lock);
1742
- __skb_queue_head_init(list);
1743
-}
1744
-
17451941 static inline void skb_queue_head_init_class(struct sk_buff_head *list,
17461942 struct lock_class_key *class)
17471943 {
....@@ -1755,8 +1951,6 @@
17551951 * The "__skb_xxxx()" functions are the non-atomic ones that
17561952 * can only be called with interrupts disabled.
17571953 */
1758
-void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
1759
- struct sk_buff_head *list);
17601954 static inline void __skb_insert(struct sk_buff *newsk,
17611955 struct sk_buff *prev, struct sk_buff *next,
17621956 struct sk_buff_head *list)
....@@ -1886,12 +2080,12 @@
18862080 *
18872081 * A buffer cannot be placed on two lists at the same time.
18882082 */
1889
-void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
18902083 static inline void __skb_queue_head(struct sk_buff_head *list,
18912084 struct sk_buff *newsk)
18922085 {
18932086 __skb_queue_after(list, (struct sk_buff *)list, newsk);
18942087 }
2088
+void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
18952089
18962090 /**
18972091 * __skb_queue_tail - queue a buffer at the list tail
....@@ -1903,12 +2097,12 @@
19032097 *
19042098 * A buffer cannot be placed on two lists at the same time.
19052099 */
1906
-void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
19072100 static inline void __skb_queue_tail(struct sk_buff_head *list,
19082101 struct sk_buff *newsk)
19092102 {
19102103 __skb_queue_before(list, (struct sk_buff *)list, newsk);
19112104 }
2105
+void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
19122106
19132107 /*
19142108 * remove sk_buff from list. _Must_ be called atomically, and with
....@@ -1935,7 +2129,6 @@
19352129 * so must be used with appropriate locks held only. The head item is
19362130 * returned or %NULL if the list is empty.
19372131 */
1938
-struct sk_buff *skb_dequeue(struct sk_buff_head *list);
19392132 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
19402133 {
19412134 struct sk_buff *skb = skb_peek(list);
....@@ -1943,6 +2136,7 @@
19432136 __skb_unlink(skb, list);
19442137 return skb;
19452138 }
2139
+struct sk_buff *skb_dequeue(struct sk_buff_head *list);
19462140
19472141 /**
19482142 * __skb_dequeue_tail - remove from the tail of the queue
....@@ -1952,7 +2146,6 @@
19522146 * so must be used with appropriate locks held only. The tail item is
19532147 * returned or %NULL if the list is empty.
19542148 */
1955
-struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
19562149 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
19572150 {
19582151 struct sk_buff *skb = skb_peek_tail(list);
....@@ -1960,6 +2153,7 @@
19602153 __skb_unlink(skb, list);
19612154 return skb;
19622155 }
2156
+struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
19632157
19642158
19652159 static inline bool skb_is_nonlinear(const struct sk_buff *skb)
....@@ -2009,8 +2203,8 @@
20092203 * that not all callers have unique ownership of the page but rely
20102204 * on page_is_pfmemalloc doing the right thing(tm).
20112205 */
2012
- frag->page.p = page;
2013
- frag->page_offset = off;
2206
+ frag->bv_page = page;
2207
+ frag->bv_offset = off;
20142208 skb_frag_size_set(frag, size);
20152209
20162210 page = compound_head(page);
....@@ -2045,8 +2239,6 @@
20452239 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
20462240 unsigned int truesize);
20472241
2048
-#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
2049
-#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
20502242 #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
20512243
20522244 #ifdef NET_SKBUFF_DATA_USES_OFFSET
....@@ -2083,6 +2275,14 @@
20832275 }
20842276
20852277 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
2278
+
2279
+static inline void skb_assert_len(struct sk_buff *skb)
2280
+{
2281
+#ifdef CONFIG_DEBUG_NET
2282
+ if (WARN_ONCE(!skb->len, "%s\n", __func__))
2283
+ DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
2284
+#endif /* CONFIG_DEBUG_NET */
2285
+}
20862286
20872287 /*
20882288 * Add data to an sk_buff
....@@ -2181,12 +2381,12 @@
21812381 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
21822382 }
21832383
2184
-static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
2384
+static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
21852385 {
21862386 if (likely(len <= skb_headlen(skb)))
2187
- return 1;
2387
+ return true;
21882388 if (unlikely(len > skb->len))
2189
- return 0;
2389
+ return false;
21902390 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
21912391 }
21922392
....@@ -2410,6 +2610,11 @@
24102610 return skb->mac_header != (typeof(skb->mac_header))~0U;
24112611 }
24122612
2613
+static inline void skb_unset_mac_header(struct sk_buff *skb)
2614
+{
2615
+ skb->mac_header = (typeof(skb->mac_header))~0U;
2616
+}
2617
+
24132618 static inline void skb_reset_mac_header(struct sk_buff *skb)
24142619 {
24152620 skb->mac_header = skb->data - skb->head;
....@@ -2426,18 +2631,16 @@
24262631 skb->mac_header = skb->network_header;
24272632 }
24282633
2429
-static inline void skb_probe_transport_header(struct sk_buff *skb,
2430
- const int offset_hint)
2634
+static inline void skb_probe_transport_header(struct sk_buff *skb)
24312635 {
24322636 struct flow_keys_basic keys;
24332637
24342638 if (skb_transport_header_was_set(skb))
24352639 return;
24362640
2437
- if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
2641
+ if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
2642
+ NULL, 0, 0, 0, 0))
24382643 skb_set_transport_header(skb, keys.control.thoff);
2439
- else if (offset_hint >= 0)
2440
- skb_set_transport_header(skb, offset_hint);
24412644 }
24422645
24432646 static inline void skb_mac_header_rebuild(struct sk_buff *skb)
....@@ -2531,7 +2734,7 @@
25312734 *
25322735 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
25332736 * to reduce average number of cache lines per packet.
2534
- * get_rps_cpus() for example only access one 64 bytes aligned block :
2737
+ * get_rps_cpu() for example only access one 64 bytes aligned block :
25352738 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
25362739 */
25372740 #ifndef NET_SKB_PAD
....@@ -2542,10 +2745,8 @@
25422745
25432746 static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
25442747 {
2545
- if (unlikely(skb_is_nonlinear(skb))) {
2546
- WARN_ON(1);
2748
+ if (WARN_ON(skb_is_nonlinear(skb)))
25472749 return;
2548
- }
25492750 skb->len = len;
25502751 skb_set_tail_pointer(skb, len);
25512752 }
....@@ -2653,13 +2854,13 @@
26532854 * the list and one reference dropped. This function does not take the
26542855 * list lock and the caller must hold the relevant locks to use it.
26552856 */
2656
-void skb_queue_purge(struct sk_buff_head *list);
26572857 static inline void __skb_queue_purge(struct sk_buff_head *list)
26582858 {
26592859 struct sk_buff *skb;
26602860 while ((skb = __skb_dequeue(list)) != NULL)
26612861 kfree_skb(skb);
26622862 }
2863
+void skb_queue_purge(struct sk_buff_head *list);
26632864
26642865 unsigned int skb_rbtree_purge(struct rb_root *root);
26652866
....@@ -2801,7 +3002,38 @@
28013002 */
28023003 static inline unsigned int skb_frag_off(const skb_frag_t *frag)
28033004 {
2804
- return frag->page_offset;
3005
+ return frag->bv_offset;
3006
+}
3007
+
3008
+/**
3009
+ * skb_frag_off_add() - Increments the offset of a skb fragment by @delta
3010
+ * @frag: skb fragment
3011
+ * @delta: value to add
3012
+ */
3013
+static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
3014
+{
3015
+ frag->bv_offset += delta;
3016
+}
3017
+
3018
+/**
3019
+ * skb_frag_off_set() - Sets the offset of a skb fragment
3020
+ * @frag: skb fragment
3021
+ * @offset: offset of fragment
3022
+ */
3023
+static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset)
3024
+{
3025
+ frag->bv_offset = offset;
3026
+}
3027
+
3028
+/**
3029
+ * skb_frag_off_copy() - Sets the offset of a skb fragment from another fragment
3030
+ * @fragto: skb fragment where offset is set
3031
+ * @fragfrom: skb fragment offset is copied from
3032
+ */
3033
+static inline void skb_frag_off_copy(skb_frag_t *fragto,
3034
+ const skb_frag_t *fragfrom)
3035
+{
3036
+ fragto->bv_offset = fragfrom->bv_offset;
28053037 }
28063038
28073039 /**
....@@ -2812,7 +3044,7 @@
28123044 */
28133045 static inline struct page *skb_frag_page(const skb_frag_t *frag)
28143046 {
2815
- return frag->page.p;
3047
+ return frag->bv_page;
28163048 }
28173049
28183050 /**
....@@ -2870,7 +3102,7 @@
28703102 */
28713103 static inline void *skb_frag_address(const skb_frag_t *frag)
28723104 {
2873
- return page_address(skb_frag_page(frag)) + frag->page_offset;
3105
+ return page_address(skb_frag_page(frag)) + skb_frag_off(frag);
28743106 }
28753107
28763108 /**
....@@ -2886,7 +3118,18 @@
28863118 if (unlikely(!ptr))
28873119 return NULL;
28883120
2889
- return ptr + frag->page_offset;
3121
+ return ptr + skb_frag_off(frag);
3122
+}
3123
+
3124
+/**
3125
+ * skb_frag_page_copy() - sets the page in a fragment from another fragment
3126
+ * @fragto: skb fragment where page is set
3127
+ * @fragfrom: skb fragment page is copied from
3128
+ */
3129
+static inline void skb_frag_page_copy(skb_frag_t *fragto,
3130
+ const skb_frag_t *fragfrom)
3131
+{
3132
+ fragto->bv_page = fragfrom->bv_page;
28903133 }
28913134
28923135 /**
....@@ -2898,7 +3141,7 @@
28983141 */
28993142 static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
29003143 {
2901
- frag->page.p = page;
3144
+ frag->bv_page = page;
29023145 }
29033146
29043147 /**
....@@ -2934,7 +3177,7 @@
29343177 enum dma_data_direction dir)
29353178 {
29363179 return dma_map_page(dev, skb_frag_page(frag),
2937
- frag->page_offset + offset, size, dir);
3180
+ skb_frag_off(frag) + offset, size, dir);
29383181 }
29393182
29403183 static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
....@@ -3037,7 +3280,7 @@
30373280 }
30383281
30393282 /**
3040
- * skb_put_padto - increase size and pad an skbuff up to a minimal size
3283
+ * __skb_put_padto - increase size and pad an skbuff up to a minimal size
30413284 * @skb: buffer to pad
30423285 * @len: minimal length
30433286 * @free_on_error: free buffer on error
....@@ -3102,10 +3345,10 @@
31023345 if (skb_zcopy(skb))
31033346 return false;
31043347 if (i) {
3105
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
3348
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
31063349
31073350 return page == skb_frag_page(frag) &&
3108
- off == frag->page_offset + skb_frag_size(frag);
3351
+ off == skb_frag_off(frag) + skb_frag_size(frag);
31093352 }
31103353 return false;
31113354 }
....@@ -3324,24 +3567,21 @@
33243567 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
33253568
33263569
3327
-int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
3570
+int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
3571
+ int *err, long *timeo_p,
33283572 const struct sk_buff *skb);
33293573 struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
33303574 struct sk_buff_head *queue,
33313575 unsigned int flags,
3332
- void (*destructor)(struct sock *sk,
3333
- struct sk_buff *skb),
3334
- int *peeked, int *off, int *err,
3576
+ int *off, int *err,
33353577 struct sk_buff **last);
3336
-struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
3337
- void (*destructor)(struct sock *sk,
3338
- struct sk_buff *skb),
3339
- int *peeked, int *off, int *err,
3578
+struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
3579
+ struct sk_buff_head *queue,
3580
+ unsigned int flags, int *off, int *err,
33403581 struct sk_buff **last);
3341
-struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
3342
- void (*destructor)(struct sock *sk,
3343
- struct sk_buff *skb),
3344
- int *peeked, int *off, int *err);
3582
+struct sk_buff *__skb_recv_datagram(struct sock *sk,
3583
+ struct sk_buff_head *sk_queue,
3584
+ unsigned int flags, int *off, int *err);
33453585 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
33463586 int *err);
33473587 __poll_t datagram_poll(struct file *file, struct socket *sock,
....@@ -3355,6 +3595,9 @@
33553595 }
33563596 int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
33573597 struct msghdr *msg);
3598
+int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
3599
+ struct iov_iter *to, int len,
3600
+ struct ahash_request *hash);
33583601 int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
33593602 struct iov_iter *from, int len);
33603603 int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
....@@ -3369,13 +3612,12 @@
33693612 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
33703613 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
33713614 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
3372
- int len, __wsum csum);
3615
+ int len);
33733616 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
33743617 struct pipe_inode_info *pipe, unsigned int len,
33753618 unsigned int flags);
33763619 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
33773620 int len);
3378
-int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
33793621 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
33803622 unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
33813623 int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
....@@ -3386,11 +3628,22 @@
33863628 bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
33873629 bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
33883630 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3631
+struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
3632
+ unsigned int offset);
33893633 struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
33903634 int skb_ensure_writable(struct sk_buff *skb, int write_len);
33913635 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
33923636 int skb_vlan_pop(struct sk_buff *skb);
33933637 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3638
+int skb_eth_pop(struct sk_buff *skb);
3639
+int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
3640
+ const unsigned char *src);
3641
+int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
3642
+ int mac_len, bool ethernet);
3643
+int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
3644
+ bool ethernet);
3645
+int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
3646
+int skb_mpls_dec_ttl(struct sk_buff *skb);
33943647 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
33953648 gfp_t gfp);
33963649
....@@ -3494,22 +3747,43 @@
34943747 /**
34953748 * skb_get_timestamp - get timestamp from a skb
34963749 * @skb: skb to get stamp from
3497
- * @stamp: pointer to struct timeval to store stamp in
3750
+ * @stamp: pointer to struct __kernel_old_timeval to store stamp in
34983751 *
34993752 * Timestamps are stored in the skb as offsets to a base timestamp.
35003753 * This function converts the offset back to a struct timeval and stores
35013754 * it in stamp.
35023755 */
35033756 static inline void skb_get_timestamp(const struct sk_buff *skb,
3504
- struct timeval *stamp)
3757
+ struct __kernel_old_timeval *stamp)
35053758 {
3506
- *stamp = ktime_to_timeval(skb->tstamp);
3759
+ *stamp = ns_to_kernel_old_timeval(skb->tstamp);
3760
+}
3761
+
3762
+static inline void skb_get_new_timestamp(const struct sk_buff *skb,
3763
+ struct __kernel_sock_timeval *stamp)
3764
+{
3765
+ struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3766
+
3767
+ stamp->tv_sec = ts.tv_sec;
3768
+ stamp->tv_usec = ts.tv_nsec / 1000;
35073769 }
35083770
35093771 static inline void skb_get_timestampns(const struct sk_buff *skb,
3510
- struct timespec *stamp)
3772
+ struct __kernel_old_timespec *stamp)
35113773 {
3512
- *stamp = ktime_to_timespec(skb->tstamp);
3774
+ struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3775
+
3776
+ stamp->tv_sec = ts.tv_sec;
3777
+ stamp->tv_nsec = ts.tv_nsec;
3778
+}
3779
+
3780
+static inline void skb_get_new_timestampns(const struct sk_buff *skb,
3781
+ struct __kernel_timespec *stamp)
3782
+{
3783
+ struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3784
+
3785
+ stamp->tv_sec = ts.tv_sec;
3786
+ stamp->tv_nsec = ts.tv_nsec;
35133787 }
35143788
35153789 static inline void __net_timestamp(struct sk_buff *skb)
....@@ -3551,13 +3825,19 @@
35513825 #define __it(x, op) (x -= sizeof(u##op))
35523826 #define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
35533827 case 32: diffs |= __it_diff(a, b, 64);
3828
+ fallthrough;
35543829 case 24: diffs |= __it_diff(a, b, 64);
3830
+ fallthrough;
35553831 case 16: diffs |= __it_diff(a, b, 64);
3832
+ fallthrough;
35563833 case 8: diffs |= __it_diff(a, b, 64);
35573834 break;
35583835 case 28: diffs |= __it_diff(a, b, 64);
3836
+ fallthrough;
35593837 case 20: diffs |= __it_diff(a, b, 64);
3838
+ fallthrough;
35603839 case 12: diffs |= __it_diff(a, b, 64);
3840
+ fallthrough;
35613841 case 4: diffs |= __it_diff(a, b, 32);
35623842 break;
35633843 }
....@@ -3618,7 +3898,7 @@
36183898 * must call this function to return the skb back to the stack with a
36193899 * timestamp.
36203900 *
3621
- * @skb: clone of the the original outgoing packet
3901
+ * @skb: clone of the original outgoing packet
36223902 * @hwtstamps: hardware time stamps
36233903 *
36243904 */
....@@ -3721,6 +4001,14 @@
37214001 skb->csum_level++;
37224002 } else if (skb->ip_summed == CHECKSUM_NONE) {
37234003 skb->ip_summed = CHECKSUM_UNNECESSARY;
4004
+ skb->csum_level = 0;
4005
+ }
4006
+}
4007
+
4008
+static inline void __skb_reset_checksum_unnecessary(struct sk_buff *skb)
4009
+{
4010
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
4011
+ skb->ip_summed = CHECKSUM_NONE;
37244012 skb->csum_level = 0;
37254013 }
37264014 }
....@@ -3840,18 +4128,16 @@
38404128 return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
38414129 }
38424130
3843
-static inline void __skb_checksum_convert(struct sk_buff *skb,
3844
- __sum16 check, __wsum pseudo)
4131
+static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo)
38454132 {
38464133 skb->csum = ~pseudo;
38474134 skb->ip_summed = CHECKSUM_COMPLETE;
38484135 }
38494136
3850
-#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \
4137
+#define skb_checksum_try_convert(skb, proto, compute_pseudo) \
38514138 do { \
38524139 if (__skb_checksum_convert_check(skb)) \
3853
- __skb_checksum_convert(skb, check, \
3854
- compute_pseudo(skb, proto)); \
4140
+ __skb_checksum_convert(skb, compute_pseudo(skb, proto)); \
38554141 } while (0)
38564142
38574143 static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
....@@ -3891,52 +4177,153 @@
38914177 static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
38924178 {
38934179 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
3894
- return (void *)(skb->_nfct & SKB_NFCT_PTRMASK);
4180
+ return (void *)(skb->_nfct & NFCT_PTRMASK);
38954181 #else
38964182 return NULL;
38974183 #endif
38984184 }
38994185
3900
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3901
-void nf_conntrack_destroy(struct nf_conntrack *nfct);
3902
-static inline void nf_conntrack_put(struct nf_conntrack *nfct)
4186
+static inline unsigned long skb_get_nfct(const struct sk_buff *skb)
39034187 {
3904
- if (nfct && atomic_dec_and_test(&nfct->use))
3905
- nf_conntrack_destroy(nfct);
3906
-}
3907
-static inline void nf_conntrack_get(struct nf_conntrack *nfct)
3908
-{
3909
- if (nfct)
3910
- atomic_inc(&nfct->use);
3911
-}
4188
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
4189
+ return skb->_nfct;
4190
+#else
4191
+ return 0UL;
39124192 #endif
4193
+}
4194
+
4195
+static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct)
4196
+{
4197
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
4198
+ skb->_nfct = nfct;
4199
+#endif
4200
+}
4201
+
4202
+#ifdef CONFIG_SKB_EXTENSIONS
4203
+enum skb_ext_id {
39134204 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3914
-static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
4205
+ SKB_EXT_BRIDGE_NF,
4206
+#endif
4207
+#ifdef CONFIG_XFRM
4208
+ SKB_EXT_SEC_PATH,
4209
+#endif
4210
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4211
+ TC_SKB_EXT,
4212
+#endif
4213
+#if IS_ENABLED(CONFIG_MPTCP)
4214
+ SKB_EXT_MPTCP,
4215
+#endif
4216
+ SKB_EXT_NUM, /* must be last */
4217
+};
4218
+
4219
+/**
4220
+ * struct skb_ext - sk_buff extensions
4221
+ * @refcnt: 1 on allocation, deallocated on 0
4222
+ * @offset: offset to add to @data to obtain extension address
4223
+ * @chunks: size currently allocated, stored in SKB_EXT_ALIGN_SHIFT units
4224
+ * @data: start of extension data, variable sized
4225
+ *
4226
+ * Note: offsets/lengths are stored in chunks of 8 bytes, this allows
4227
+ * to use 'u8' types while allowing up to 2kb worth of extension data.
4228
+ */
4229
+struct skb_ext {
4230
+ refcount_t refcnt;
4231
+ u8 offset[SKB_EXT_NUM]; /* in chunks of 8 bytes */
4232
+ u8 chunks; /* same */
4233
+ char data[] __aligned(8);
4234
+};
4235
+
4236
+struct skb_ext *__skb_ext_alloc(gfp_t flags);
4237
+void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
4238
+ struct skb_ext *ext);
4239
+void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
4240
+void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
4241
+void __skb_ext_put(struct skb_ext *ext);
4242
+
4243
+static inline void skb_ext_put(struct sk_buff *skb)
39154244 {
3916
- if (nf_bridge && refcount_dec_and_test(&nf_bridge->use))
3917
- kfree(nf_bridge);
4245
+ if (skb->active_extensions)
4246
+ __skb_ext_put(skb->extensions);
39184247 }
3919
-static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
4248
+
4249
+static inline void __skb_ext_copy(struct sk_buff *dst,
4250
+ const struct sk_buff *src)
39204251 {
3921
- if (nf_bridge)
3922
- refcount_inc(&nf_bridge->use);
4252
+ dst->active_extensions = src->active_extensions;
4253
+
4254
+ if (src->active_extensions) {
4255
+ struct skb_ext *ext = src->extensions;
4256
+
4257
+ refcount_inc(&ext->refcnt);
4258
+ dst->extensions = ext;
4259
+ }
39234260 }
3924
-#endif /* CONFIG_BRIDGE_NETFILTER */
3925
-static inline void nf_reset(struct sk_buff *skb)
4261
+
4262
+static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src)
4263
+{
4264
+ skb_ext_put(dst);
4265
+ __skb_ext_copy(dst, src);
4266
+}
4267
+
4268
+static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i)
4269
+{
4270
+ return !!ext->offset[i];
4271
+}
4272
+
4273
+static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id)
4274
+{
4275
+ return skb->active_extensions & (1 << id);
4276
+}
4277
+
4278
+static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
4279
+{
4280
+ if (skb_ext_exist(skb, id))
4281
+ __skb_ext_del(skb, id);
4282
+}
4283
+
4284
+static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id)
4285
+{
4286
+ if (skb_ext_exist(skb, id)) {
4287
+ struct skb_ext *ext = skb->extensions;
4288
+
4289
+ return (void *)ext + (ext->offset[id] << 3);
4290
+ }
4291
+
4292
+ return NULL;
4293
+}
4294
+
4295
+static inline void skb_ext_reset(struct sk_buff *skb)
4296
+{
4297
+ if (unlikely(skb->active_extensions)) {
4298
+ __skb_ext_put(skb->extensions);
4299
+ skb->active_extensions = 0;
4300
+ }
4301
+}
4302
+
4303
+static inline bool skb_has_extensions(struct sk_buff *skb)
4304
+{
4305
+ return unlikely(skb->active_extensions);
4306
+}
4307
+#else
4308
+static inline void skb_ext_put(struct sk_buff *skb) {}
4309
+static inline void skb_ext_reset(struct sk_buff *skb) {}
4310
+static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
4311
+static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
4312
+static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
4313
+static inline bool skb_has_extensions(struct sk_buff *skb) { return false; }
4314
+#endif /* CONFIG_SKB_EXTENSIONS */
4315
+
4316
+static inline void nf_reset_ct(struct sk_buff *skb)
39264317 {
39274318 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
39284319 nf_conntrack_put(skb_nfct(skb));
39294320 skb->_nfct = 0;
39304321 #endif
3931
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3932
- nf_bridge_put(skb->nf_bridge);
3933
-#endif
3934
- skb->nf_bridge = NULL;
39354322 }
39364323
39374324 static inline void nf_reset_trace(struct sk_buff *skb)
39384325 {
3939
-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4326
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES)
39404327 skb->nf_trace = 0;
39414328 #endif
39424329 }
....@@ -3948,7 +4335,7 @@
39484335 #endif
39494336 }
39504337
3951
-/* Note: This doesn't put any conntrack and bridge info in dst. */
4338
+/* Note: This doesn't put any conntrack info in dst. */
39524339 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
39534340 bool copy)
39544341 {
....@@ -3956,11 +4343,7 @@
39564343 dst->_nfct = src->_nfct;
39574344 nf_conntrack_get(skb_nfct(src));
39584345 #endif
3959
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3960
- dst->nf_bridge = src->nf_bridge;
3961
- nf_bridge_get(src->nf_bridge);
3962
-#endif
3963
-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4346
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES)
39644347 if (copy)
39654348 dst->nf_trace = src->nf_trace;
39664349 #endif
....@@ -3970,9 +4353,6 @@
39704353 {
39714354 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
39724355 nf_conntrack_put(skb_nfct(dst));
3973
-#endif
3974
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3975
- nf_bridge_put(dst->nf_bridge);
39764356 #endif
39774357 __nf_copy(dst, src, true);
39784358 }
....@@ -3995,12 +4375,19 @@
39954375 { }
39964376 #endif
39974377
4378
+static inline int secpath_exists(const struct sk_buff *skb)
4379
+{
4380
+#ifdef CONFIG_XFRM
4381
+ return skb_ext_exist(skb, SKB_EXT_SEC_PATH);
4382
+#else
4383
+ return 0;
4384
+#endif
4385
+}
4386
+
39984387 static inline bool skb_irq_freeable(const struct sk_buff *skb)
39994388 {
40004389 return !skb->destructor &&
4001
-#if IS_ENABLED(CONFIG_XFRM)
4002
- !skb->sp &&
4003
-#endif
4390
+ !secpath_exists(skb) &&
40044391 !skb_nfct(skb) &&
40054392 !skb->_skb_refdst &&
40064393 !skb_has_frag_list(skb);
....@@ -4046,10 +4433,10 @@
40464433 return skb->dst_pending_confirm != 0;
40474434 }
40484435
4049
-static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
4436
+static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
40504437 {
40514438 #ifdef CONFIG_XFRM
4052
- return skb->sp;
4439
+ return skb_ext_find(skb, SKB_EXT_SEC_PATH);
40534440 #else
40544441 return NULL;
40554442 #endif
....@@ -4070,8 +4457,8 @@
40704457 __wsum csum;
40714458 __u16 csum_start;
40724459 };
4073
-#define SKB_SGO_CB_OFFSET 32
4074
-#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
4460
+#define SKB_GSO_CB_OFFSET 32
4461
+#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET))
40754462
40764463 static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
40774464 {
....@@ -4232,7 +4619,7 @@
42324619 /* Local Checksum Offload.
42334620 * Compute outer checksum based on the assumption that the
42344621 * inner checksum will be offloaded later.
4235
- * See Documentation/networking/checksum-offloads.txt for
4622
+ * See Documentation/networking/checksum-offloads.rst for
42364623 * explanation of how this works.
42374624 * Fill in outer checksum adjustment (e.g. with sum of outer
42384625 * pseudo-header) before calling.
....@@ -4254,5 +4641,53 @@
42544641 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
42554642 }
42564643
4644
+static inline bool skb_is_redirected(const struct sk_buff *skb)
4645
+{
4646
+#ifdef CONFIG_NET_REDIRECT
4647
+ return skb->redirected;
4648
+#else
4649
+ return false;
4650
+#endif
4651
+}
4652
+
4653
+static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
4654
+{
4655
+#ifdef CONFIG_NET_REDIRECT
4656
+ skb->redirected = 1;
4657
+ skb->from_ingress = from_ingress;
4658
+ if (skb->from_ingress)
4659
+ skb->tstamp = 0;
4660
+#endif
4661
+}
4662
+
4663
+static inline void skb_reset_redirect(struct sk_buff *skb)
4664
+{
4665
+#ifdef CONFIG_NET_REDIRECT
4666
+ skb->redirected = 0;
4667
+#endif
4668
+}
4669
+
4670
+static inline bool skb_csum_is_sctp(struct sk_buff *skb)
4671
+{
4672
+ return skb->csum_not_inet;
4673
+}
4674
+
4675
+static inline void skb_set_kcov_handle(struct sk_buff *skb,
4676
+ const u64 kcov_handle)
4677
+{
4678
+#ifdef CONFIG_KCOV
4679
+ skb->kcov_handle = kcov_handle;
4680
+#endif
4681
+}
4682
+
4683
+static inline u64 skb_get_kcov_handle(struct sk_buff *skb)
4684
+{
4685
+#ifdef CONFIG_KCOV
4686
+ return skb->kcov_handle;
4687
+#else
4688
+ return 0;
4689
+#endif
4690
+}
4691
+
42574692 #endif /* __KERNEL__ */
42584693 #endif /* _LINUX_SKBUFF_H */