hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/include/linux/skbuff.h
....@@ -1,14 +1,10 @@
1
+/* SPDX-License-Identifier: GPL-2.0-or-later */
12 /*
23 * Definitions for the 'struct sk_buff' memory handlers.
34 *
45 * Authors:
56 * Alan Cox, <gw4pts@gw4pts.ampr.org>
67 * Florian La Roche, <rzsfl@rz.uni-sb.de>
7
- *
8
- * This program is free software; you can redistribute it and/or
9
- * modify it under the terms of the GNU General Public License
10
- * as published by the Free Software Foundation; either version
11
- * 2 of the License, or (at your option) any later version.
128 */
139
1410 #ifndef _LINUX_SKBUFF_H
....@@ -18,6 +14,7 @@
1814 #include <linux/compiler.h>
1915 #include <linux/time.h>
2016 #include <linux/bug.h>
17
+#include <linux/bvec.h>
2118 #include <linux/cache.h>
2219 #include <linux/rbtree.h>
2320 #include <linux/socket.h>
....@@ -40,6 +37,11 @@
4037 #include <linux/in6.h>
4138 #include <linux/if_packet.h>
4239 #include <net/flow.h>
40
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
41
+#include <linux/netfilter/nf_conntrack_common.h>
42
+#endif
43
+#include <linux/android_kabi.h>
44
+#include <linux/android_vendor.h>
4345
4446 /* The interface for checksum offload between the stack and networking drivers
4547 * is as follows...
....@@ -47,8 +49,8 @@
4749 * A. IP checksum related features
4850 *
4951 * Drivers advertise checksum offload capabilities in the features of a device.
50
- * From the stack's point of view these are capabilities offered by the driver,
51
- * a driver typically only advertises features that it is capable of offloading
52
+ * From the stack's point of view these are capabilities offered by the driver.
53
+ * A driver typically only advertises features that it is capable of offloading
5254 * to its device.
5355 *
5456 * The checksum related features are:
....@@ -63,7 +65,7 @@
6365 * TCP or UDP packets over IPv4. These are specifically
6466 * unencapsulated packets of the form IPv4|TCP or
6567 * IPv4|UDP where the Protocol field in the IPv4 header
66
- * is TCP or UDP. The IPv4 header may contain IP options
68
+ * is TCP or UDP. The IPv4 header may contain IP options.
6769 * This feature cannot be set in features for a device
6870 * with NETIF_F_HW_CSUM also set. This feature is being
6971 * DEPRECATED (see below).
....@@ -71,7 +73,7 @@
7173 * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain
7274 * TCP or UDP packets over IPv6. These are specifically
7375 * unencapsulated packets of the form IPv6|TCP or
74
- * IPv4|UDP where the Next Header field in the IPv6
76
+ * IPv6|UDP where the Next Header field in the IPv6
7577 * header is either TCP or UDP. IPv6 extension headers
7678 * are not supported with this feature. This feature
7779 * cannot be set in features for a device with
....@@ -79,13 +81,13 @@
7981 * DEPRECATED (see below).
8082 *
8183 * NETIF_F_RXCSUM - Driver (device) performs receive checksum offload.
82
- * This flag is used only used to disable the RX checksum
84
+ * This flag is only used to disable the RX checksum
8385 * feature for a device. The stack will accept receive
8486 * checksum indication in packets received on a device
8587 * regardless of whether NETIF_F_RXCSUM is set.
8688 *
8789 * B. Checksumming of received packets by device. Indication of checksum
88
- * verification is in set skb->ip_summed. Possible values are:
90
+ * verification is set in skb->ip_summed. Possible values are:
8991 *
9092 * CHECKSUM_NONE:
9193 *
....@@ -115,16 +117,16 @@
115117 * the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
116118 * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
117119 * and a device is able to verify the checksums for UDP (possibly zero),
118
- * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to
120
+ * GRE (checksum flag is set) and TCP, skb->csum_level would be set to
119121 * two. If the device were only able to verify the UDP checksum and not
120
- * GRE, either because it doesn't support GRE checksum of because GRE
122
+ * GRE, either because it doesn't support GRE checksum or because GRE
121123 * checksum is bad, skb->csum_level would be set to zero (TCP checksum is
122124 * not considered in this case).
123125 *
124126 * CHECKSUM_COMPLETE:
125127 *
126128 * This is the most generic way. The device supplied checksum of the _whole_
127
- * packet as seen by netif_rx() and fills out in skb->csum. Meaning, the
129
+ * packet as seen by netif_rx() and fills in skb->csum. This means the
128130 * hardware doesn't need to parse L3/L4 headers to implement this.
129131 *
130132 * Notes:
....@@ -153,8 +155,8 @@
153155 * from skb->csum_start up to the end, and to record/write the checksum at
154156 * offset skb->csum_start + skb->csum_offset. A driver may verify that the
155157 * csum_start and csum_offset values are valid values given the length and
156
- * offset of the packet, however they should not attempt to validate that the
157
- * checksum refers to a legitimate transport layer checksum-- it is the
158
+ * offset of the packet, but it should not attempt to validate that the
159
+ * checksum refers to a legitimate transport layer checksum -- it is the
158160 * purview of the stack to validate that csum_start and csum_offset are set
159161 * correctly.
160162 *
....@@ -178,18 +180,18 @@
178180 *
179181 * CHECKSUM_UNNECESSARY:
180182 *
181
- * This has the same meaning on as CHECKSUM_NONE for checksum offload on
183
+ * This has the same meaning as CHECKSUM_NONE for checksum offload on
182184 * output.
183185 *
184186 * CHECKSUM_COMPLETE:
185187 * Not used in checksum output. If a driver observes a packet with this value
186
- * set in skbuff, if should treat as CHECKSUM_NONE being set.
188
+ * set in skbuff, it should treat the packet as if CHECKSUM_NONE were set.
187189 *
188190 * D. Non-IP checksum (CRC) offloads
189191 *
190192 * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of
191193 * offloading the SCTP CRC in a packet. To perform this offload the stack
192
- * will set set csum_start and csum_offset accordingly, set ip_summed to
194
+ * will set csum_start and csum_offset accordingly, set ip_summed to
193195 * CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication in
194196 * the skbuff that the CHECKSUM_PARTIAL refers to CRC32c.
195197 * A driver that supports both IP checksum offload and SCTP CRC32c offload
....@@ -200,10 +202,10 @@
200202 * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of
201203 * offloading the FCOE CRC in a packet. To perform this offload the stack
202204 * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
203
- * accordingly. Note the there is no indication in the skbuff that the
204
- * CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports
205
+ * accordingly. Note that there is no indication in the skbuff that the
206
+ * CHECKSUM_PARTIAL refers to an FCOE checksum, so a driver that supports
205207 * both IP checksum offload and FCOE CRC offload must verify which offload
206
- * is configured for a packet presumably by inspecting packet headers.
208
+ * is configured for a packet, presumably by inspecting packet headers.
207209 *
208210 * E. Checksumming on output with GSO.
209211 *
....@@ -211,9 +213,9 @@
211213 * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the
212214 * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as
213215 * part of the GSO operation is implied. If a checksum is being offloaded
214
- * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset
215
- * are set to refer to the outermost checksum being offload (two offloaded
216
- * checksums are possible with UDP encapsulation).
216
+ * with GSO then ip_summed is CHECKSUM_PARTIAL, and both csum_start and
217
+ * csum_offset are set to refer to the outermost checksum being offloaded
218
+ * (two offloaded checksums are possible with UDP encapsulation).
217219 */
218220
219221 /* Don't change this without changing skb_csum_unnecessary! */
....@@ -238,21 +240,18 @@
238240 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
239241 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
240242
243
+struct ahash_request;
241244 struct net_device;
242245 struct scatterlist;
243246 struct pipe_inode_info;
244247 struct iov_iter;
245248 struct napi_struct;
249
+struct bpf_prog;
250
+union bpf_attr;
251
+struct skb_ext;
246252
247
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
248
-struct nf_conntrack {
249
- atomic_t use;
250
-};
251
-#endif
252
-#include <linux/android_kabi.h>
253
-
253
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
254254 struct nf_bridge_info {
255
- refcount_t use;
256255 enum {
257256 BRNF_PROTO_UNCHANGED,
258257 BRNF_PROTO_8021Q,
....@@ -278,6 +277,18 @@
278277 char neigh_header[8];
279278 };
280279 };
280
+#endif
281
+
282
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
283
+/* Chain in tc_skb_ext will be used to share the tc chain with
284
+ * ovs recirc_id. It will be set to the current chain by tc
285
+ * and read by ovs to recirc_id.
286
+ */
287
+struct tc_skb_ext {
288
+ __u32 chain;
289
+ __u16 mru;
290
+};
291
+#endif
281292
282293 struct sk_buff_head {
283294 /* These two members must be first. */
....@@ -309,41 +320,51 @@
309320 */
310321 #define GSO_BY_FRAGS 0xFFFF
311322
312
-typedef struct skb_frag_struct skb_frag_t;
323
+typedef struct bio_vec skb_frag_t;
313324
314
-struct skb_frag_struct {
315
- struct {
316
- struct page *p;
317
- } page;
318
-#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
319
- __u32 page_offset;
320
- __u32 size;
321
-#else
322
- __u16 page_offset;
323
- __u16 size;
324
-#endif
325
-};
326
-
325
+/**
326
+ * skb_frag_size() - Returns the size of a skb fragment
327
+ * @frag: skb fragment
328
+ */
327329 static inline unsigned int skb_frag_size(const skb_frag_t *frag)
328330 {
329
- return frag->size;
331
+ return frag->bv_len;
330332 }
331333
334
+/**
335
+ * skb_frag_size_set() - Sets the size of a skb fragment
336
+ * @frag: skb fragment
337
+ * @size: size of fragment
338
+ */
332339 static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
333340 {
334
- frag->size = size;
341
+ frag->bv_len = size;
335342 }
336343
344
+/**
345
+ * skb_frag_size_add() - Increments the size of a skb fragment by @delta
346
+ * @frag: skb fragment
347
+ * @delta: value to add
348
+ */
337349 static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
338350 {
339
- frag->size += delta;
351
+ frag->bv_len += delta;
340352 }
341353
354
+/**
355
+ * skb_frag_size_sub() - Decrements the size of a skb fragment by @delta
356
+ * @frag: skb fragment
357
+ * @delta: value to subtract
358
+ */
342359 static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
343360 {
344
- frag->size -= delta;
361
+ frag->bv_len -= delta;
345362 }
346363
364
+/**
365
+ * skb_frag_must_loop - Test if %p is a high memory page
366
+ * @p: fragment's page
367
+ */
347368 static inline bool skb_frag_must_loop(struct page *p)
348369 {
349370 #if defined(CONFIG_HIGHMEM)
....@@ -357,7 +378,7 @@
357378 * skb_frag_foreach_page - loop over pages in a fragment
358379 *
359380 * @f: skb frag to operate on
360
- * @f_off: offset from start of f->page.p
381
+ * @f_off: offset from start of f->bv_page
361382 * @f_len: length from f_off to loop over
362383 * @p: (temp var) current page
363384 * @p_off: (temp var) offset from start of current page,
....@@ -478,10 +499,11 @@
478499 }
479500
480501 void sock_zerocopy_put(struct ubuf_info *uarg);
481
-void sock_zerocopy_put_abort(struct ubuf_info *uarg);
502
+void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
482503
483504 void sock_zerocopy_callback(struct ubuf_info *uarg, bool success);
484505
506
+int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
485507 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
486508 struct msghdr *msg, int len,
487509 struct ubuf_info *uarg);
....@@ -510,6 +532,8 @@
510532 /* Intermediate layers must ensure that destructor_arg
511533 * remains valid until skb destructor */
512534 void * destructor_arg;
535
+
536
+ ANDROID_OEM_DATA_ARRAY(1, 3);
513537
514538 /* must be last field, see pskb_expand_head() */
515539 skb_frag_t frags[MAX_SKB_FRAGS];
....@@ -574,6 +598,8 @@
574598 SKB_GSO_UDP = 1 << 16,
575599
576600 SKB_GSO_UDP_L4 = 1 << 17,
601
+
602
+ SKB_GSO_FRAGLIST = 1 << 18,
577603 };
578604
579605 #if BITS_PER_LONG > 32
....@@ -586,14 +612,20 @@
586612 typedef unsigned char *sk_buff_data_t;
587613 #endif
588614
589
-/**
615
+/**
590616 * struct sk_buff - socket buffer
591617 * @next: Next buffer in list
592618 * @prev: Previous buffer in list
593619 * @tstamp: Time we arrived/left
620
+ * @skb_mstamp_ns: (aka @tstamp) earliest departure time; start point
621
+ * for retransmit timer
594622 * @rbnode: RB tree node, alternative to next/prev for netem/tcp
623
+ * @list: queue head
595624 * @sk: Socket we are owned by
625
+ * @ip_defrag_offset: (aka @sk) alternate use of @sk, used in
626
+ * fragmentation management
596627 * @dev: Device we arrived on/are leaving by
628
+ * @dev_scratch: (aka @dev) alternate use of @dev when @dev would be %NULL
597629 * @cb: Control buffer. Free for use by every layer. Put private vars here
598630 * @_skb_refdst: destination entry (with norefcount bit)
599631 * @sp: the security path, used for xfrm
....@@ -612,10 +644,15 @@
612644 * @pkt_type: Packet class
613645 * @fclone: skbuff clone status
614646 * @ipvs_property: skbuff is owned by ipvs
647
+ * @inner_protocol_type: whether the inner protocol is
648
+ * ENCAP_TYPE_ETHER or ENCAP_TYPE_IPPROTO
649
+ * @remcsum_offload: remote checksum offload is enabled
650
+ * @offload_fwd_mark: Packet was L2-forwarded in hardware
651
+ * @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
615652 * @tc_skip_classify: do not classify packet. set by IFB device
616653 * @tc_at_ingress: used within tc_classify to distinguish in/egress
617
- * @tc_redirected: packet was redirected by a tc action
618
- * @tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect
654
+ * @redirected: packet was redirected by packet classifier
655
+ * @from_ingress: packet was redirected from the ingress path
619656 * @peeked: this packet has been seen already, so stats have been
620657 * done for it, don't do them again
621658 * @nf_trace: netfilter packet trace flag
....@@ -628,8 +665,10 @@
628665 * @tc_index: Traffic control index
629666 * @hash: the packet hash
630667 * @queue_mapping: Queue mapping for multiqueue devices
631
- * @xmit_more: More SKBs are pending for this queue
668
+ * @head_frag: skb was allocated from page fragments,
669
+ * not allocated by kmalloc() or vmalloc().
632670 * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
671
+ * @active_extensions: active extensions (skb_ext_id types)
633672 * @ndisc_nodetype: router type (from link layer)
634673 * @ooo_okay: allow the mapping of a socket to a queue to be changed
635674 * @l4_hash: indicate hash is a canonical 4-tuple hash over transport
....@@ -638,15 +677,29 @@
638677 * @wifi_acked_valid: wifi_acked was set
639678 * @wifi_acked: whether frame was acked on wifi or not
640679 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
680
+ * @encapsulation: indicates the inner headers in the skbuff are valid
681
+ * @encap_hdr_csum: software checksum is needed
682
+ * @csum_valid: checksum is already valid
641683 * @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL
684
+ * @csum_complete_sw: checksum was completed by software
685
+ * @csum_level: indicates the number of consecutive checksums found in
686
+ * the packet minus one that have been verified as
687
+ * CHECKSUM_UNNECESSARY (max 3)
688
+ * @scm_io_uring: SKB holds io_uring registered files
642689 * @dst_pending_confirm: need to confirm neighbour
643690 * @decrypted: Decrypted SKB
644
- * @napi_id: id of the NAPI struct this skb came from
691
+ * @napi_id: id of the NAPI struct this skb came from
692
+ * @sender_cpu: (aka @napi_id) source CPU in XPS
645693 * @secmark: security marking
646694 * @mark: Generic packet mark
695
+ * @reserved_tailroom: (aka @mark) number of bytes of free space available
696
+ * at the tail of an sk_buff
697
+ * @vlan_present: VLAN tag is present
647698 * @vlan_proto: vlan encapsulation protocol
648699 * @vlan_tci: vlan tag control information
649700 * @inner_protocol: Protocol (encapsulation)
701
+ * @inner_ipproto: (aka @inner_protocol) stores ipproto when
702
+ * skb->inner_protocol_type == ENCAP_TYPE_IPPROTO;
650703 * @inner_transport_header: Inner transport layer header (encapsulation)
651704 * @inner_network_header: Network layer header (encapsulation)
652705 * @inner_mac_header: Link layer header (encapsulation)
....@@ -659,6 +712,7 @@
659712 * @data: Data head pointer
660713 * @truesize: Buffer size
661714 * @users: User count - see {datagram,tcp}.c
715
+ * @extensions: allocated extensions, valid if active_extensions is nonzero
662716 */
663717
664718 struct sk_buff {
....@@ -688,7 +742,7 @@
688742
689743 union {
690744 ktime_t tstamp;
691
- u64 skb_mstamp;
745
+ u64 skb_mstamp_ns; /* earliest departure time */
692746 };
693747 /*
694748 * This is the control buffer. It is free to use for every
....@@ -706,13 +760,9 @@
706760 struct list_head tcp_tsorted_anchor;
707761 };
708762
709
-#ifdef CONFIG_XFRM
710
- struct sec_path *sp;
711
-#endif
712763 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
713764 unsigned long _nfct;
714765 #endif
715
- struct nf_bridge_info *nf_bridge;
716766 unsigned int len,
717767 data_len;
718768 __u16 mac_len,
....@@ -731,15 +781,18 @@
731781 #endif
732782 #define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
733783
784
+ /* private: */
734785 __u8 __cloned_offset[0];
786
+ /* public: */
735787 __u8 cloned:1,
736788 nohdr:1,
737789 fclone:2,
738790 peeked:1,
739791 head_frag:1,
740
- xmit_more:1,
741792 pfmemalloc:1;
742
-
793
+#ifdef CONFIG_SKB_EXTENSIONS
794
+ __u8 active_extensions;
795
+#endif
743796 /* fields enclosed in headers_start/headers_end are copied
744797 * using a single memcpy() in __copy_skb_header()
745798 */
....@@ -755,7 +808,9 @@
755808 #endif
756809 #define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
757810
811
+ /* private: */
758812 __u8 __pkt_type_offset[0];
813
+ /* public: */
759814 __u8 pkt_type:3;
760815 __u8 ignore_df:1;
761816 __u8 nf_trace:1;
....@@ -772,6 +827,16 @@
772827 __u8 encap_hdr_csum:1;
773828 __u8 csum_valid:1;
774829
830
+#ifdef __BIG_ENDIAN_BITFIELD
831
+#define PKT_VLAN_PRESENT_BIT 7
832
+#else
833
+#define PKT_VLAN_PRESENT_BIT 0
834
+#endif
835
+#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset)
836
+ /* private: */
837
+ __u8 __pkt_vlan_present_offset[0];
838
+ /* public: */
839
+ __u8 vlan_present:1;
775840 __u8 csum_complete_sw:1;
776841 __u8 csum_level:2;
777842 __u8 csum_not_inet:1;
....@@ -779,19 +844,21 @@
779844 #ifdef CONFIG_IPV6_NDISC_NODETYPE
780845 __u8 ndisc_nodetype:2;
781846 #endif
782
- __u8 ipvs_property:1;
783847
848
+ __u8 ipvs_property:1;
784849 __u8 inner_protocol_type:1;
785850 __u8 remcsum_offload:1;
786851 #ifdef CONFIG_NET_SWITCHDEV
787852 __u8 offload_fwd_mark:1;
788
- __u8 offload_mr_fwd_mark:1;
853
+ __u8 offload_l3_fwd_mark:1;
789854 #endif
790855 #ifdef CONFIG_NET_CLS_ACT
791856 __u8 tc_skip_classify:1;
792857 __u8 tc_at_ingress:1;
793
- __u8 tc_redirected:1;
794
- __u8 tc_from_ingress:1;
858
+#endif
859
+#ifdef CONFIG_NET_REDIRECT
860
+ __u8 redirected:1;
861
+ __u8 from_ingress:1;
795862 #endif
796863 #ifdef CONFIG_TLS_DEVICE
797864 __u8 decrypted:1;
....@@ -846,7 +913,21 @@
846913 __u32 headers_end[0];
847914 /* public: */
848915
849
- ANDROID_KABI_RESERVE(1);
916
+ /* Android KABI preservation.
917
+ *
918
+ * "open coded" version of ANDROID_KABI_USE() to pack more
919
+ * fields/variables into the space that we have.
920
+ *
921
+ * scm_io_uring is from 04df9719df18 ("io_uring/af_unix: defer
922
+ * registered files gc to io_uring release")
923
+ */
924
+ _ANDROID_KABI_REPLACE(_ANDROID_KABI_RESERVE(1),
925
+ struct {
926
+ __u8 scm_io_uring:1;
927
+ __u8 android_kabi_reserved1_padding1;
928
+ __u16 android_kabi_reserved1_padding2;
929
+ __u32 android_kabi_reserved1_padding3;
930
+ });
850931 ANDROID_KABI_RESERVE(2);
851932
852933 /* These elements must be at the end, see alloc_skb() for details. */
....@@ -856,6 +937,11 @@
856937 *data;
857938 unsigned int truesize;
858939 refcount_t users;
940
+
941
+#ifdef CONFIG_SKB_EXTENSIONS
942
+ /* only useable after checking ->active_extensions != 0 */
943
+ struct skb_ext *extensions;
944
+#endif
859945 };
860946
861947 #ifdef __KERNEL__
....@@ -867,7 +953,10 @@
867953 #define SKB_ALLOC_RX 0x02
868954 #define SKB_ALLOC_NAPI 0x04
869955
870
-/* Returns true if the skb was allocated from PFMEMALLOC reserves */
956
+/**
957
+ * skb_pfmemalloc - Test if the skb was allocated from PFMEMALLOC reserves
958
+ * @skb: buffer
959
+ */
871960 static inline bool skb_pfmemalloc(const struct sk_buff *skb)
872961 {
873962 return unlikely(skb->pfmemalloc);
....@@ -880,7 +969,6 @@
880969 #define SKB_DST_NOREF 1UL
881970 #define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
882971
883
-#define SKB_NFCT_PTRMASK ~(7UL)
884972 /**
885973 * skb_dst - returns skb dst_entry
886974 * @skb: buffer
....@@ -889,7 +977,7 @@
889977 */
890978 static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
891979 {
892
- /* If refdst was not refcounted, check we still are in a
980
+ /* If refdst was not refcounted, check we still are in a
893981 * rcu_read_lock section
894982 */
895983 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
....@@ -936,6 +1024,10 @@
9361024 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
9371025 }
9381026
1027
+/**
1028
+ * skb_rtable - Returns the skb &rtable
1029
+ * @skb: buffer
1030
+ */
9391031 static inline struct rtable *skb_rtable(const struct sk_buff *skb)
9401032 {
9411033 return (struct rtable *)skb_dst(skb);
....@@ -950,6 +1042,10 @@
9501042 return ptype <= PACKET_OTHERHOST;
9511043 }
9521044
1045
+/**
1046
+ * skb_napi_id - Returns the skb's NAPI id
1047
+ * @skb: buffer
1048
+ */
9531049 static inline unsigned int skb_napi_id(const struct sk_buff *skb)
9541050 {
9551051 #ifdef CONFIG_NET_RX_BUSY_POLL
....@@ -959,7 +1055,12 @@
9591055 #endif
9601056 }
9611057
962
-/* decrement the reference count and return true if we can free the skb */
1058
+/**
1059
+ * skb_unref - decrement the skb's reference count
1060
+ * @skb: buffer
1061
+ *
1062
+ * Returns true if we can free the skb.
1063
+ */
9631064 static inline bool skb_unref(struct sk_buff *skb)
9641065 {
9651066 if (unlikely(!skb))
....@@ -975,8 +1076,18 @@
9751076 void skb_release_head_state(struct sk_buff *skb);
9761077 void kfree_skb(struct sk_buff *skb);
9771078 void kfree_skb_list(struct sk_buff *segs);
1079
+void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
9781080 void skb_tx_error(struct sk_buff *skb);
1081
+
1082
+#ifdef CONFIG_TRACEPOINTS
9791083 void consume_skb(struct sk_buff *skb);
1084
+#else
1085
+static inline void consume_skb(struct sk_buff *skb)
1086
+{
1087
+ return kfree_skb(skb);
1088
+}
1089
+#endif
1090
+
9801091 void __consume_stateless_skb(struct sk_buff *skb);
9811092 void __kfree_skb(struct sk_buff *skb);
9821093 extern struct kmem_cache *skbuff_head_cache;
....@@ -989,6 +1100,16 @@
9891100 int node);
9901101 struct sk_buff *__build_skb(void *data, unsigned int frag_size);
9911102 struct sk_buff *build_skb(void *data, unsigned int frag_size);
1103
+struct sk_buff *build_skb_around(struct sk_buff *skb,
1104
+ void *data, unsigned int frag_size);
1105
+
1106
+/**
1107
+ * alloc_skb - allocate a network buffer
1108
+ * @size: size to allocate
1109
+ * @priority: allocation mask
1110
+ *
1111
+ * This function is a convenient wrapper around __alloc_skb().
1112
+ */
9921113 static inline struct sk_buff *alloc_skb(unsigned int size,
9931114 gfp_t priority)
9941115 {
....@@ -1000,6 +1121,7 @@
10001121 int max_page_order,
10011122 int *errcode,
10021123 gfp_t gfp_mask);
1124
+struct sk_buff *alloc_skb_for_msg(struct sk_buff *first);
10031125
10041126 /* Layout of fast clones : [skb1][skb2][fclone_ref] */
10051127 struct sk_buff_fclones {
....@@ -1031,6 +1153,13 @@
10311153 fclones->skb2.sk == sk;
10321154 }
10331155
1156
+/**
1157
+ * alloc_skb_fclone - allocate a network buffer from fclone cache
1158
+ * @size: size to allocate
1159
+ * @priority: allocation mask
1160
+ *
1161
+ * This function is a convenient wrapper around __alloc_skb().
1162
+ */
10341163 static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
10351164 gfp_t priority)
10361165 {
....@@ -1079,11 +1208,6 @@
10791208 return __skb_pad(skb, pad, true);
10801209 }
10811210 #define dev_kfree_skb(a) consume_skb(a)
1082
-
1083
-int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
1084
- int getfrag(void *from, char *to, int offset,
1085
- int len, int odd, struct sk_buff *skb),
1086
- void *from, int length);
10871211
10881212 int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
10891213 int offset, size_t size);
....@@ -1192,7 +1316,12 @@
11921316 const struct flow_dissector_key *key,
11931317 unsigned int key_count);
11941318
1195
-bool __skb_flow_dissect(const struct sk_buff *skb,
1319
+struct bpf_flow_dissector;
1320
+bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
1321
+ __be16 proto, int nhoff, int hlen, unsigned int flags);
1322
+
1323
+bool __skb_flow_dissect(const struct net *net,
1324
+ const struct sk_buff *skb,
11961325 struct flow_dissector *flow_dissector,
11971326 void *target_container,
11981327 void *data, __be16 proto, int nhoff, int hlen,
....@@ -1202,8 +1331,8 @@
12021331 struct flow_dissector *flow_dissector,
12031332 void *target_container, unsigned int flags)
12041333 {
1205
- return __skb_flow_dissect(skb, flow_dissector, target_container,
1206
- NULL, 0, 0, 0, flags);
1334
+ return __skb_flow_dissect(NULL, skb, flow_dissector,
1335
+ target_container, NULL, 0, 0, 0, flags);
12071336 }
12081337
12091338 static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
....@@ -1211,25 +1340,44 @@
12111340 unsigned int flags)
12121341 {
12131342 memset(flow, 0, sizeof(*flow));
1214
- return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
1215
- NULL, 0, 0, 0, flags);
1343
+ return __skb_flow_dissect(NULL, skb, &flow_keys_dissector,
1344
+ flow, NULL, 0, 0, 0, flags);
12161345 }
12171346
12181347 static inline bool
1219
-skb_flow_dissect_flow_keys_basic(const struct sk_buff *skb,
1348
+skb_flow_dissect_flow_keys_basic(const struct net *net,
1349
+ const struct sk_buff *skb,
12201350 struct flow_keys_basic *flow, void *data,
12211351 __be16 proto, int nhoff, int hlen,
12221352 unsigned int flags)
12231353 {
12241354 memset(flow, 0, sizeof(*flow));
1225
- return __skb_flow_dissect(skb, &flow_keys_basic_dissector, flow,
1355
+ return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow,
12261356 data, proto, nhoff, hlen, flags);
12271357 }
12281358
1359
+void skb_flow_dissect_meta(const struct sk_buff *skb,
1360
+ struct flow_dissector *flow_dissector,
1361
+ void *target_container);
1362
+
1363
+/* Gets a skb connection tracking info, ctinfo map should be a
1364
+ * map of mapsize to translate enum ip_conntrack_info states
1365
+ * to user states.
1366
+ */
1367
+void
1368
+skb_flow_dissect_ct(const struct sk_buff *skb,
1369
+ struct flow_dissector *flow_dissector,
1370
+ void *target_container,
1371
+ u16 *ctinfo_map,
1372
+ size_t mapsize);
12291373 void
12301374 skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
12311375 struct flow_dissector *flow_dissector,
12321376 void *target_container);
1377
+
1378
+void skb_flow_dissect_hash(const struct sk_buff *skb,
1379
+ struct flow_dissector *flow_dissector,
1380
+ void *target_container);
12331381
12341382 static inline __u32 skb_get_hash(struct sk_buff *skb)
12351383 {
....@@ -1266,6 +1414,14 @@
12661414 to->l4_hash = from->l4_hash;
12671415 };
12681416
1417
+static inline void skb_copy_decrypted(struct sk_buff *to,
1418
+ const struct sk_buff *from)
1419
+{
1420
+#ifdef CONFIG_TLS_DEVICE
1421
+ to->decrypted = from->decrypted;
1422
+#endif
1423
+}
1424
+
12691425 #ifdef NET_SKBUFF_DATA_USES_OFFSET
12701426 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
12711427 {
....@@ -1276,6 +1432,11 @@
12761432 {
12771433 return skb->end;
12781434 }
1435
+
1436
+static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
1437
+{
1438
+ skb->end = offset;
1439
+}
12791440 #else
12801441 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
12811442 {
....@@ -1285,6 +1446,11 @@
12851446 static inline unsigned int skb_end_offset(const struct sk_buff *skb)
12861447 {
12871448 return skb->end - skb->head;
1449
+}
1450
+
1451
+static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
1452
+{
1453
+ skb->end = skb->head + offset;
12881454 }
12891455 #endif
12901456
....@@ -1303,10 +1469,14 @@
13031469 return is_zcopy ? skb_uarg(skb) : NULL;
13041470 }
13051471
1306
-static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg)
1472
+static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
1473
+ bool *have_ref)
13071474 {
13081475 if (skb && uarg && !skb_zcopy(skb)) {
1309
- sock_zerocopy_get(uarg);
1476
+ if (unlikely(have_ref && *have_ref))
1477
+ *have_ref = false;
1478
+ else
1479
+ sock_zerocopy_get(uarg);
13101480 skb_shinfo(skb)->destructor_arg = uarg;
13111481 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
13121482 }
....@@ -1353,7 +1523,7 @@
13531523 struct ubuf_info *uarg = skb_zcopy(skb);
13541524
13551525 if (uarg) {
1356
- sock_zerocopy_put_abort(uarg);
1526
+ sock_zerocopy_put_abort(uarg, false);
13571527 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
13581528 }
13591529 }
....@@ -1501,6 +1671,22 @@
15011671 return 0;
15021672 }
15031673
1674
+/* This variant of skb_unclone() makes sure skb->truesize
1675
+ * and skb_end_offset() are not changed, whenever a new skb->head is needed.
1676
+ *
1677
+ * Indeed there is no guarantee that ksize(kmalloc(X)) == ksize(kmalloc(X))
1678
+ * when various debugging features are in place.
1679
+ */
1680
+int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri);
1681
+static inline int skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
1682
+{
1683
+ might_sleep_if(gfpflags_allow_blocking(pri));
1684
+
1685
+ if (skb_cloned(skb))
1686
+ return __skb_unclone_keeptruesize(skb, pri);
1687
+ return 0;
1688
+}
1689
+
15041690 /**
15051691 * skb_header_cloned - is the header a clone
15061692 * @skb: buffer to check
....@@ -1641,6 +1827,17 @@
16411827 }
16421828
16431829 /**
1830
+ * __skb_peek - peek at the head of a non-empty &sk_buff_head
1831
+ * @list_: list to peek at
1832
+ *
1833
+ * Like skb_peek(), but the caller knows that the list is not empty.
1834
+ */
1835
+static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_)
1836
+{
1837
+ return list_->next;
1838
+}
1839
+
1840
+/**
16441841 * skb_peek_next - peek skb following the given one from a queue
16451842 * @skb: skb to start from
16461843 * @list_: list to peek at
....@@ -1748,8 +1945,6 @@
17481945 * The "__skb_xxxx()" functions are the non-atomic ones that
17491946 * can only be called with interrupts disabled.
17501947 */
1751
-void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
1752
- struct sk_buff_head *list);
17531948 static inline void __skb_insert(struct sk_buff *newsk,
17541949 struct sk_buff *prev, struct sk_buff *next,
17551950 struct sk_buff_head *list)
....@@ -1879,12 +2074,12 @@
18792074 *
18802075 * A buffer cannot be placed on two lists at the same time.
18812076 */
1882
-void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
18832077 static inline void __skb_queue_head(struct sk_buff_head *list,
18842078 struct sk_buff *newsk)
18852079 {
18862080 __skb_queue_after(list, (struct sk_buff *)list, newsk);
18872081 }
2082
+void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
18882083
18892084 /**
18902085 * __skb_queue_tail - queue a buffer at the list tail
....@@ -1896,12 +2091,12 @@
18962091 *
18972092 * A buffer cannot be placed on two lists at the same time.
18982093 */
1899
-void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
19002094 static inline void __skb_queue_tail(struct sk_buff_head *list,
19012095 struct sk_buff *newsk)
19022096 {
19032097 __skb_queue_before(list, (struct sk_buff *)list, newsk);
19042098 }
2099
+void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
19052100
19062101 /*
19072102 * remove sk_buff from list. _Must_ be called atomically, and with
....@@ -1928,7 +2123,6 @@
19282123 * so must be used with appropriate locks held only. The head item is
19292124 * returned or %NULL if the list is empty.
19302125 */
1931
-struct sk_buff *skb_dequeue(struct sk_buff_head *list);
19322126 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
19332127 {
19342128 struct sk_buff *skb = skb_peek(list);
....@@ -1936,6 +2130,7 @@
19362130 __skb_unlink(skb, list);
19372131 return skb;
19382132 }
2133
+struct sk_buff *skb_dequeue(struct sk_buff_head *list);
19392134
19402135 /**
19412136 * __skb_dequeue_tail - remove from the tail of the queue
....@@ -1945,7 +2140,6 @@
19452140 * so must be used with appropriate locks held only. The tail item is
19462141 * returned or %NULL if the list is empty.
19472142 */
1948
-struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
19492143 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
19502144 {
19512145 struct sk_buff *skb = skb_peek_tail(list);
....@@ -1953,6 +2147,7 @@
19532147 __skb_unlink(skb, list);
19542148 return skb;
19552149 }
2150
+struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
19562151
19572152
19582153 static inline bool skb_is_nonlinear(const struct sk_buff *skb)
....@@ -2002,8 +2197,8 @@
20022197 * that not all callers have unique ownership of the page but rely
20032198 * on page_is_pfmemalloc doing the right thing(tm).
20042199 */
2005
- frag->page.p = page;
2006
- frag->page_offset = off;
2200
+ frag->bv_page = page;
2201
+ frag->bv_offset = off;
20072202 skb_frag_size_set(frag, size);
20082203
20092204 page = compound_head(page);
....@@ -2038,8 +2233,6 @@
20382233 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
20392234 unsigned int truesize);
20402235
2041
-#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
2042
-#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
20432236 #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
20442237
20452238 #ifdef NET_SKBUFF_DATA_USES_OFFSET
....@@ -2076,6 +2269,14 @@
20762269 }
20772270
20782271 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
2272
+
2273
+static inline void skb_assert_len(struct sk_buff *skb)
2274
+{
2275
+#ifdef CONFIG_DEBUG_NET
2276
+ if (WARN_ONCE(!skb->len, "%s\n", __func__))
2277
+ DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
2278
+#endif /* CONFIG_DEBUG_NET */
2279
+}
20792280
20802281 /*
20812282 * Add data to an sk_buff
....@@ -2174,12 +2375,12 @@
21742375 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
21752376 }
21762377
2177
-static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
2378
+static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
21782379 {
21792380 if (likely(len <= skb_headlen(skb)))
2180
- return 1;
2381
+ return true;
21812382 if (unlikely(len > skb->len))
2182
- return 0;
2383
+ return false;
21832384 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
21842385 }
21852386
....@@ -2403,6 +2604,11 @@
24032604 return skb->mac_header != (typeof(skb->mac_header))~0U;
24042605 }
24052606
2607
+static inline void skb_unset_mac_header(struct sk_buff *skb)
2608
+{
2609
+ skb->mac_header = (typeof(skb->mac_header))~0U;
2610
+}
2611
+
24062612 static inline void skb_reset_mac_header(struct sk_buff *skb)
24072613 {
24082614 skb->mac_header = skb->data - skb->head;
....@@ -2419,18 +2625,16 @@
24192625 skb->mac_header = skb->network_header;
24202626 }
24212627
2422
-static inline void skb_probe_transport_header(struct sk_buff *skb,
2423
- const int offset_hint)
2628
+static inline void skb_probe_transport_header(struct sk_buff *skb)
24242629 {
24252630 struct flow_keys_basic keys;
24262631
24272632 if (skb_transport_header_was_set(skb))
24282633 return;
24292634
2430
- if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
2635
+ if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
2636
+ NULL, 0, 0, 0, 0))
24312637 skb_set_transport_header(skb, keys.control.thoff);
2432
- else if (offset_hint >= 0)
2433
- skb_set_transport_header(skb, offset_hint);
24342638 }
24352639
24362640 static inline void skb_mac_header_rebuild(struct sk_buff *skb)
....@@ -2524,7 +2728,7 @@
25242728 *
25252729 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
25262730 * to reduce average number of cache lines per packet.
2527
- * get_rps_cpus() for example only access one 64 bytes aligned block :
2731
+ * get_rps_cpu() for example only access one 64 bytes aligned block :
25282732 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
25292733 */
25302734 #ifndef NET_SKB_PAD
....@@ -2535,10 +2739,8 @@
25352739
25362740 static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
25372741 {
2538
- if (unlikely(skb_is_nonlinear(skb))) {
2539
- WARN_ON(1);
2742
+ if (WARN_ON(skb_is_nonlinear(skb)))
25402743 return;
2541
- }
25422744 skb->len = len;
25432745 skb_set_tail_pointer(skb, len);
25442746 }
....@@ -2646,13 +2848,13 @@
26462848 * the list and one reference dropped. This function does not take the
26472849 * list lock and the caller must hold the relevant locks to use it.
26482850 */
2649
-void skb_queue_purge(struct sk_buff_head *list);
26502851 static inline void __skb_queue_purge(struct sk_buff_head *list)
26512852 {
26522853 struct sk_buff *skb;
26532854 while ((skb = __skb_dequeue(list)) != NULL)
26542855 kfree_skb(skb);
26552856 }
2857
+void skb_queue_purge(struct sk_buff_head *list);
26562858
26572859 unsigned int skb_rbtree_purge(struct rb_root *root);
26582860
....@@ -2794,7 +2996,38 @@
27942996 */
27952997 static inline unsigned int skb_frag_off(const skb_frag_t *frag)
27962998 {
2797
- return frag->page_offset;
2999
+ return frag->bv_offset;
3000
+}
3001
+
3002
+/**
3003
+ * skb_frag_off_add() - Increments the offset of a skb fragment by @delta
3004
+ * @frag: skb fragment
3005
+ * @delta: value to add
3006
+ */
3007
+static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
3008
+{
3009
+ frag->bv_offset += delta;
3010
+}
3011
+
3012
+/**
3013
+ * skb_frag_off_set() - Sets the offset of a skb fragment
3014
+ * @frag: skb fragment
3015
+ * @offset: offset of fragment
3016
+ */
3017
+static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset)
3018
+{
3019
+ frag->bv_offset = offset;
3020
+}
3021
+
3022
+/**
3023
+ * skb_frag_off_copy() - Sets the offset of a skb fragment from another fragment
3024
+ * @fragto: skb fragment where offset is set
3025
+ * @fragfrom: skb fragment offset is copied from
3026
+ */
3027
+static inline void skb_frag_off_copy(skb_frag_t *fragto,
3028
+ const skb_frag_t *fragfrom)
3029
+{
3030
+ fragto->bv_offset = fragfrom->bv_offset;
27983031 }
27993032
28003033 /**
....@@ -2805,7 +3038,7 @@
28053038 */
28063039 static inline struct page *skb_frag_page(const skb_frag_t *frag)
28073040 {
2808
- return frag->page.p;
3041
+ return frag->bv_page;
28093042 }
28103043
28113044 /**
....@@ -2863,7 +3096,7 @@
28633096 */
28643097 static inline void *skb_frag_address(const skb_frag_t *frag)
28653098 {
2866
- return page_address(skb_frag_page(frag)) + frag->page_offset;
3099
+ return page_address(skb_frag_page(frag)) + skb_frag_off(frag);
28673100 }
28683101
28693102 /**
....@@ -2879,7 +3112,18 @@
28793112 if (unlikely(!ptr))
28803113 return NULL;
28813114
2882
- return ptr + frag->page_offset;
3115
+ return ptr + skb_frag_off(frag);
3116
+}
3117
+
3118
+/**
3119
+ * skb_frag_page_copy() - sets the page in a fragment from another fragment
3120
+ * @fragto: skb fragment where page is set
3121
+ * @fragfrom: skb fragment page is copied from
3122
+ */
3123
+static inline void skb_frag_page_copy(skb_frag_t *fragto,
3124
+ const skb_frag_t *fragfrom)
3125
+{
3126
+ fragto->bv_page = fragfrom->bv_page;
28833127 }
28843128
28853129 /**
....@@ -2891,7 +3135,7 @@
28913135 */
28923136 static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
28933137 {
2894
- frag->page.p = page;
3138
+ frag->bv_page = page;
28953139 }
28963140
28973141 /**
....@@ -2927,7 +3171,7 @@
29273171 enum dma_data_direction dir)
29283172 {
29293173 return dma_map_page(dev, skb_frag_page(frag),
2930
- frag->page_offset + offset, size, dir);
3174
+ skb_frag_off(frag) + offset, size, dir);
29313175 }
29323176
29333177 static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
....@@ -3030,7 +3274,7 @@
30303274 }
30313275
30323276 /**
3033
- * skb_put_padto - increase size and pad an skbuff up to a minimal size
3277
+ * __skb_put_padto - increase size and pad an skbuff up to a minimal size
30343278 * @skb: buffer to pad
30353279 * @len: minimal length
30363280 * @free_on_error: free buffer on error
....@@ -3095,10 +3339,10 @@
30953339 if (skb_zcopy(skb))
30963340 return false;
30973341 if (i) {
3098
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
3342
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
30993343
31003344 return page == skb_frag_page(frag) &&
3101
- off == frag->page_offset + skb_frag_size(frag);
3345
+ off == skb_frag_off(frag) + skb_frag_size(frag);
31023346 }
31033347 return false;
31043348 }
....@@ -3317,24 +3561,21 @@
33173561 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
33183562
33193563
3320
-int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
3564
+int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
3565
+ int *err, long *timeo_p,
33213566 const struct sk_buff *skb);
33223567 struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
33233568 struct sk_buff_head *queue,
33243569 unsigned int flags,
3325
- void (*destructor)(struct sock *sk,
3326
- struct sk_buff *skb),
3327
- int *peeked, int *off, int *err,
3570
+ int *off, int *err,
33283571 struct sk_buff **last);
3329
-struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
3330
- void (*destructor)(struct sock *sk,
3331
- struct sk_buff *skb),
3332
- int *peeked, int *off, int *err,
3572
+struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
3573
+ struct sk_buff_head *queue,
3574
+ unsigned int flags, int *off, int *err,
33333575 struct sk_buff **last);
3334
-struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
3335
- void (*destructor)(struct sock *sk,
3336
- struct sk_buff *skb),
3337
- int *peeked, int *off, int *err);
3576
+struct sk_buff *__skb_recv_datagram(struct sock *sk,
3577
+ struct sk_buff_head *sk_queue,
3578
+ unsigned int flags, int *off, int *err);
33383579 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
33393580 int *err);
33403581 __poll_t datagram_poll(struct file *file, struct socket *sock,
....@@ -3348,6 +3589,9 @@
33483589 }
33493590 int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
33503591 struct msghdr *msg);
3592
+int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
3593
+ struct iov_iter *to, int len,
3594
+ struct ahash_request *hash);
33513595 int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
33523596 struct iov_iter *from, int len);
33533597 int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
....@@ -3362,13 +3606,12 @@
33623606 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
33633607 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
33643608 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
3365
- int len, __wsum csum);
3609
+ int len);
33663610 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
33673611 struct pipe_inode_info *pipe, unsigned int len,
33683612 unsigned int flags);
33693613 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
33703614 int len);
3371
-int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
33723615 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
33733616 unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
33743617 int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
....@@ -3379,11 +3622,22 @@
33793622 bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
33803623 bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
33813624 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3625
+struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
3626
+ unsigned int offset);
33823627 struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
33833628 int skb_ensure_writable(struct sk_buff *skb, int write_len);
33843629 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
33853630 int skb_vlan_pop(struct sk_buff *skb);
33863631 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3632
+int skb_eth_pop(struct sk_buff *skb);
3633
+int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
3634
+ const unsigned char *src);
3635
+int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
3636
+ int mac_len, bool ethernet);
3637
+int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
3638
+ bool ethernet);
3639
+int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
3640
+int skb_mpls_dec_ttl(struct sk_buff *skb);
33873641 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
33883642 gfp_t gfp);
33893643
....@@ -3487,22 +3741,43 @@
34873741 /**
34883742 * skb_get_timestamp - get timestamp from a skb
34893743 * @skb: skb to get stamp from
3490
- * @stamp: pointer to struct timeval to store stamp in
3744
+ * @stamp: pointer to struct __kernel_old_timeval to store stamp in
34913745 *
34923746 * Timestamps are stored in the skb as offsets to a base timestamp.
34933747 * This function converts the offset back to a struct timeval and stores
34943748 * it in stamp.
34953749 */
34963750 static inline void skb_get_timestamp(const struct sk_buff *skb,
3497
- struct timeval *stamp)
3751
+ struct __kernel_old_timeval *stamp)
34983752 {
3499
- *stamp = ktime_to_timeval(skb->tstamp);
3753
+ *stamp = ns_to_kernel_old_timeval(skb->tstamp);
3754
+}
3755
+
3756
+static inline void skb_get_new_timestamp(const struct sk_buff *skb,
3757
+ struct __kernel_sock_timeval *stamp)
3758
+{
3759
+ struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3760
+
3761
+ stamp->tv_sec = ts.tv_sec;
3762
+ stamp->tv_usec = ts.tv_nsec / 1000;
35003763 }
35013764
35023765 static inline void skb_get_timestampns(const struct sk_buff *skb,
3503
- struct timespec *stamp)
3766
+ struct __kernel_old_timespec *stamp)
35043767 {
3505
- *stamp = ktime_to_timespec(skb->tstamp);
3768
+ struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3769
+
3770
+ stamp->tv_sec = ts.tv_sec;
3771
+ stamp->tv_nsec = ts.tv_nsec;
3772
+}
3773
+
3774
+static inline void skb_get_new_timestampns(const struct sk_buff *skb,
3775
+ struct __kernel_timespec *stamp)
3776
+{
3777
+ struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3778
+
3779
+ stamp->tv_sec = ts.tv_sec;
3780
+ stamp->tv_nsec = ts.tv_nsec;
35063781 }
35073782
35083783 static inline void __net_timestamp(struct sk_buff *skb)
....@@ -3544,13 +3819,19 @@
35443819 #define __it(x, op) (x -= sizeof(u##op))
35453820 #define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
35463821 case 32: diffs |= __it_diff(a, b, 64);
3822
+ fallthrough;
35473823 case 24: diffs |= __it_diff(a, b, 64);
3824
+ fallthrough;
35483825 case 16: diffs |= __it_diff(a, b, 64);
3826
+ fallthrough;
35493827 case 8: diffs |= __it_diff(a, b, 64);
35503828 break;
35513829 case 28: diffs |= __it_diff(a, b, 64);
3830
+ fallthrough;
35523831 case 20: diffs |= __it_diff(a, b, 64);
3832
+ fallthrough;
35533833 case 12: diffs |= __it_diff(a, b, 64);
3834
+ fallthrough;
35543835 case 4: diffs |= __it_diff(a, b, 32);
35553836 break;
35563837 }
....@@ -3611,7 +3892,7 @@
36113892 * must call this function to return the skb back to the stack with a
36123893 * timestamp.
36133894 *
3614
- * @skb: clone of the the original outgoing packet
3895
+ * @skb: clone of the original outgoing packet
36153896 * @hwtstamps: hardware time stamps
36163897 *
36173898 */
....@@ -3714,6 +3995,14 @@
37143995 skb->csum_level++;
37153996 } else if (skb->ip_summed == CHECKSUM_NONE) {
37163997 skb->ip_summed = CHECKSUM_UNNECESSARY;
3998
+ skb->csum_level = 0;
3999
+ }
4000
+}
4001
+
4002
+static inline void __skb_reset_checksum_unnecessary(struct sk_buff *skb)
4003
+{
4004
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
4005
+ skb->ip_summed = CHECKSUM_NONE;
37174006 skb->csum_level = 0;
37184007 }
37194008 }
....@@ -3833,18 +4122,16 @@
38334122 return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
38344123 }
38354124
3836
-static inline void __skb_checksum_convert(struct sk_buff *skb,
3837
- __sum16 check, __wsum pseudo)
4125
+static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo)
38384126 {
38394127 skb->csum = ~pseudo;
38404128 skb->ip_summed = CHECKSUM_COMPLETE;
38414129 }
38424130
3843
-#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \
4131
+#define skb_checksum_try_convert(skb, proto, compute_pseudo) \
38444132 do { \
38454133 if (__skb_checksum_convert_check(skb)) \
3846
- __skb_checksum_convert(skb, check, \
3847
- compute_pseudo(skb, proto)); \
4134
+ __skb_checksum_convert(skb, compute_pseudo(skb, proto)); \
38484135 } while (0)
38494136
38504137 static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
....@@ -3884,47 +4171,151 @@
38844171 static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
38854172 {
38864173 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
3887
- return (void *)(skb->_nfct & SKB_NFCT_PTRMASK);
4174
+ return (void *)(skb->_nfct & NFCT_PTRMASK);
38884175 #else
38894176 return NULL;
38904177 #endif
38914178 }
38924179
3893
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3894
-void nf_conntrack_destroy(struct nf_conntrack *nfct);
3895
-static inline void nf_conntrack_put(struct nf_conntrack *nfct)
4180
+static inline unsigned long skb_get_nfct(const struct sk_buff *skb)
38964181 {
3897
- if (nfct && atomic_dec_and_test(&nfct->use))
3898
- nf_conntrack_destroy(nfct);
3899
-}
3900
-static inline void nf_conntrack_get(struct nf_conntrack *nfct)
3901
-{
3902
- if (nfct)
3903
- atomic_inc(&nfct->use);
3904
-}
4182
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
4183
+ return skb->_nfct;
4184
+#else
4185
+ return 0UL;
39054186 #endif
4187
+}
4188
+
4189
+static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct)
4190
+{
4191
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
4192
+ skb->_nfct = nfct;
4193
+#endif
4194
+}
4195
+
4196
+#ifdef CONFIG_SKB_EXTENSIONS
4197
+enum skb_ext_id {
39064198 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3907
-static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
4199
+ SKB_EXT_BRIDGE_NF,
4200
+#endif
4201
+#ifdef CONFIG_XFRM
4202
+ SKB_EXT_SEC_PATH,
4203
+#endif
4204
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4205
+ TC_SKB_EXT,
4206
+#endif
4207
+#if IS_ENABLED(CONFIG_MPTCP)
4208
+ SKB_EXT_MPTCP,
4209
+#endif
4210
+#if IS_ENABLED(CONFIG_KCOV)
4211
+ SKB_EXT_KCOV_HANDLE,
4212
+#endif
4213
+ SKB_EXT_NUM, /* must be last */
4214
+};
4215
+
4216
+/**
4217
+ * struct skb_ext - sk_buff extensions
4218
+ * @refcnt: 1 on allocation, deallocated on 0
4219
+ * @offset: offset to add to @data to obtain extension address
4220
+ * @chunks: size currently allocated, stored in SKB_EXT_ALIGN_SHIFT units
4221
+ * @data: start of extension data, variable sized
4222
+ *
4223
+ * Note: offsets/lengths are stored in chunks of 8 bytes, this allows
4224
+ * to use 'u8' types while allowing up to 2kb worth of extension data.
4225
+ */
4226
+struct skb_ext {
4227
+ refcount_t refcnt;
4228
+ u8 offset[SKB_EXT_NUM]; /* in chunks of 8 bytes */
4229
+ u8 chunks; /* same */
4230
+ char data[] __aligned(8);
4231
+};
4232
+
4233
+struct skb_ext *__skb_ext_alloc(gfp_t flags);
4234
+void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
4235
+ struct skb_ext *ext);
4236
+void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
4237
+void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
4238
+void __skb_ext_put(struct skb_ext *ext);
4239
+
4240
+static inline void skb_ext_put(struct sk_buff *skb)
39084241 {
3909
- if (nf_bridge && refcount_dec_and_test(&nf_bridge->use))
3910
- kfree(nf_bridge);
4242
+ if (skb->active_extensions)
4243
+ __skb_ext_put(skb->extensions);
39114244 }
3912
-static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
4245
+
4246
+static inline void __skb_ext_copy(struct sk_buff *dst,
4247
+ const struct sk_buff *src)
39134248 {
3914
- if (nf_bridge)
3915
- refcount_inc(&nf_bridge->use);
4249
+ dst->active_extensions = src->active_extensions;
4250
+
4251
+ if (src->active_extensions) {
4252
+ struct skb_ext *ext = src->extensions;
4253
+
4254
+ refcount_inc(&ext->refcnt);
4255
+ dst->extensions = ext;
4256
+ }
39164257 }
3917
-#endif /* CONFIG_BRIDGE_NETFILTER */
3918
-static inline void nf_reset(struct sk_buff *skb)
4258
+
4259
+static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src)
4260
+{
4261
+ skb_ext_put(dst);
4262
+ __skb_ext_copy(dst, src);
4263
+}
4264
+
4265
+static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i)
4266
+{
4267
+ return !!ext->offset[i];
4268
+}
4269
+
4270
+static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id)
4271
+{
4272
+ return skb->active_extensions & (1 << id);
4273
+}
4274
+
4275
+static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
4276
+{
4277
+ if (skb_ext_exist(skb, id))
4278
+ __skb_ext_del(skb, id);
4279
+}
4280
+
4281
+static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id)
4282
+{
4283
+ if (skb_ext_exist(skb, id)) {
4284
+ struct skb_ext *ext = skb->extensions;
4285
+
4286
+ return (void *)ext + (ext->offset[id] << 3);
4287
+ }
4288
+
4289
+ return NULL;
4290
+}
4291
+
4292
+static inline void skb_ext_reset(struct sk_buff *skb)
4293
+{
4294
+ if (unlikely(skb->active_extensions)) {
4295
+ __skb_ext_put(skb->extensions);
4296
+ skb->active_extensions = 0;
4297
+ }
4298
+}
4299
+
4300
+static inline bool skb_has_extensions(struct sk_buff *skb)
4301
+{
4302
+ return unlikely(skb->active_extensions);
4303
+}
4304
+#else
4305
+static inline void skb_ext_put(struct sk_buff *skb) {}
4306
+static inline void skb_ext_reset(struct sk_buff *skb) {}
4307
+static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
4308
+static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
4309
+static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
4310
+static inline bool skb_has_extensions(struct sk_buff *skb) { return false; }
4311
+#endif /* CONFIG_SKB_EXTENSIONS */
4312
+
4313
+static inline void nf_reset_ct(struct sk_buff *skb)
39194314 {
39204315 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
39214316 nf_conntrack_put(skb_nfct(skb));
39224317 skb->_nfct = 0;
39234318 #endif
3924
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3925
- nf_bridge_put(skb->nf_bridge);
3926
-#endif
3927
- skb->nf_bridge = NULL;
39284319 }
39294320
39304321 static inline void nf_reset_trace(struct sk_buff *skb)
....@@ -3941,17 +4332,13 @@
39414332 #endif
39424333 }
39434334
3944
-/* Note: This doesn't put any conntrack and bridge info in dst. */
4335
+/* Note: This doesn't put any conntrack info in dst. */
39454336 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
39464337 bool copy)
39474338 {
39484339 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
39494340 dst->_nfct = src->_nfct;
39504341 nf_conntrack_get(skb_nfct(src));
3951
-#endif
3952
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3953
- dst->nf_bridge = src->nf_bridge;
3954
- nf_bridge_get(src->nf_bridge);
39554342 #endif
39564343 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
39574344 if (copy)
....@@ -3963,9 +4350,6 @@
39634350 {
39644351 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
39654352 nf_conntrack_put(skb_nfct(dst));
3966
-#endif
3967
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3968
- nf_bridge_put(dst->nf_bridge);
39694353 #endif
39704354 __nf_copy(dst, src, true);
39714355 }
....@@ -3988,12 +4372,19 @@
39884372 { }
39894373 #endif
39904374
4375
+static inline int secpath_exists(const struct sk_buff *skb)
4376
+{
4377
+#ifdef CONFIG_XFRM
4378
+ return skb_ext_exist(skb, SKB_EXT_SEC_PATH);
4379
+#else
4380
+ return 0;
4381
+#endif
4382
+}
4383
+
39914384 static inline bool skb_irq_freeable(const struct sk_buff *skb)
39924385 {
39934386 return !skb->destructor &&
3994
-#if IS_ENABLED(CONFIG_XFRM)
3995
- !skb->sp &&
3996
-#endif
4387
+ !secpath_exists(skb) &&
39974388 !skb_nfct(skb) &&
39984389 !skb->_skb_refdst &&
39994390 !skb_has_frag_list(skb);
....@@ -4039,10 +4430,10 @@
40394430 return skb->dst_pending_confirm != 0;
40404431 }
40414432
4042
-static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
4433
+static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
40434434 {
40444435 #ifdef CONFIG_XFRM
4045
- return skb->sp;
4436
+ return skb_ext_find(skb, SKB_EXT_SEC_PATH);
40464437 #else
40474438 return NULL;
40484439 #endif
....@@ -4063,8 +4454,8 @@
40634454 __wsum csum;
40644455 __u16 csum_start;
40654456 };
4066
-#define SKB_SGO_CB_OFFSET 32
4067
-#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
4457
+#define SKB_GSO_CB_OFFSET 32
4458
+#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET))
40684459
40694460 static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
40704461 {
....@@ -4225,7 +4616,7 @@
42254616 /* Local Checksum Offload.
42264617 * Compute outer checksum based on the assumption that the
42274618 * inner checksum will be offloaded later.
4228
- * See Documentation/networking/checksum-offloads.txt for
4619
+ * See Documentation/networking/checksum-offloads.rst for
42294620 * explanation of how this works.
42304621 * Fill in outer checksum adjustment (e.g. with sum of outer
42314622 * pseudo-header) before calling.
....@@ -4247,5 +4638,61 @@
42474638 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
42484639 }
42494640
4641
+static inline bool skb_is_redirected(const struct sk_buff *skb)
4642
+{
4643
+#ifdef CONFIG_NET_REDIRECT
4644
+ return skb->redirected;
4645
+#else
4646
+ return false;
4647
+#endif
4648
+}
4649
+
4650
+static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
4651
+{
4652
+#ifdef CONFIG_NET_REDIRECT
4653
+ skb->redirected = 1;
4654
+ skb->from_ingress = from_ingress;
4655
+ if (skb->from_ingress)
4656
+ skb->tstamp = 0;
4657
+#endif
4658
+}
4659
+
4660
+static inline void skb_reset_redirect(struct sk_buff *skb)
4661
+{
4662
+#ifdef CONFIG_NET_REDIRECT
4663
+ skb->redirected = 0;
4664
+#endif
4665
+}
4666
+
4667
+#if IS_ENABLED(CONFIG_KCOV) && IS_ENABLED(CONFIG_SKB_EXTENSIONS)
4668
+static inline void skb_set_kcov_handle(struct sk_buff *skb,
4669
+ const u64 kcov_handle)
4670
+{
4671
+ /* Do not allocate skb extensions only to set kcov_handle to zero
4672
+ * (as it is zero by default). However, if the extensions are
4673
+ * already allocated, update kcov_handle anyway since
4674
+ * skb_set_kcov_handle can be called to zero a previously set
4675
+ * value.
4676
+ */
4677
+ if (skb_has_extensions(skb) || kcov_handle) {
4678
+ u64 *kcov_handle_ptr = skb_ext_add(skb, SKB_EXT_KCOV_HANDLE);
4679
+
4680
+ if (kcov_handle_ptr)
4681
+ *kcov_handle_ptr = kcov_handle;
4682
+ }
4683
+}
4684
+
4685
+static inline u64 skb_get_kcov_handle(struct sk_buff *skb)
4686
+{
4687
+ u64 *kcov_handle = skb_ext_find(skb, SKB_EXT_KCOV_HANDLE);
4688
+
4689
+ return kcov_handle ? *kcov_handle : 0;
4690
+}
4691
+#else
4692
+static inline void skb_set_kcov_handle(struct sk_buff *skb,
4693
+ const u64 kcov_handle) { }
4694
+static inline u64 skb_get_kcov_handle(struct sk_buff *skb) { return 0; }
4695
+#endif /* CONFIG_KCOV && CONFIG_SKB_EXTENSIONS */
4696
+
42504697 #endif /* __KERNEL__ */
42514698 #endif /* _LINUX_SKBUFF_H */