.. | .. |
---|
291 | 291 | return skb; |
---|
292 | 292 | } |
---|
293 | 293 | |
---|
| 294 | +#ifdef CONFIG_NET_OOB |
---|
| 295 | + |
---|
| 296 | +struct sk_buff *__netdev_alloc_oob_skb(struct net_device *dev, size_t len, |
---|
| 297 | + size_t headroom, gfp_t gfp_mask) |
---|
| 298 | +{ |
---|
| 299 | + struct sk_buff *skb; |
---|
| 300 | + |
---|
| 301 | + headroom = ALIGN(NET_SKB_PAD + headroom, NET_SKB_PAD); |
---|
| 302 | + skb = __alloc_skb(len + headroom, gfp_mask, |
---|
| 303 | + SKB_ALLOC_RX, NUMA_NO_NODE); |
---|
| 304 | + if (!skb) |
---|
| 305 | + return NULL; |
---|
| 306 | + |
---|
| 307 | + skb_reserve(skb, headroom); |
---|
| 308 | + skb->dev = dev; |
---|
| 309 | + skb->oob = true; |
---|
| 310 | + |
---|
| 311 | + return skb; |
---|
| 312 | +} |
---|
| 313 | +EXPORT_SYMBOL_GPL(__netdev_alloc_oob_skb); |
---|
| 314 | + |
---|
| 315 | +void __netdev_free_oob_skb(struct net_device *dev, struct sk_buff *skb) |
---|
| 316 | +{ |
---|
| 317 | + skb->oob = false; |
---|
| 318 | + skb->oob_clone = false; |
---|
| 319 | + dev_kfree_skb(skb); |
---|
| 320 | +} |
---|
| 321 | +EXPORT_SYMBOL_GPL(__netdev_free_oob_skb); |
---|
| 322 | + |
---|
| 323 | +void netdev_reset_oob_skb(struct net_device *dev, struct sk_buff *skb, |
---|
| 324 | + size_t headroom) |
---|
| 325 | +{ |
---|
| 326 | + unsigned char *data = skb->head; /* Always from kmalloc_reserve(). */ |
---|
| 327 | + |
---|
| 328 | + if (WARN_ON_ONCE(!skb->oob || skb->oob_clone)) |
---|
| 329 | + return; |
---|
| 330 | + |
---|
| 331 | + memset(skb, 0, offsetof(struct sk_buff, tail)); |
---|
| 332 | + __build_skb_around(skb, data, 0); |
---|
| 333 | + headroom = ALIGN(NET_SKB_PAD + headroom, NET_SKB_PAD); |
---|
| 334 | + skb_reserve(skb, headroom); |
---|
| 335 | + skb->oob = true; |
---|
| 336 | + skb->dev = dev; |
---|
| 337 | +} |
---|
| 338 | +EXPORT_SYMBOL_GPL(netdev_reset_oob_skb); |
---|
| 339 | + |
---|
| 340 | +struct sk_buff *skb_alloc_oob_head(gfp_t gfp_mask) |
---|
| 341 | +{ |
---|
| 342 | + struct sk_buff *skb = kmem_cache_alloc(skbuff_head_cache, gfp_mask); |
---|
| 343 | + |
---|
| 344 | + if (!skb) |
---|
| 345 | + return NULL; |
---|
| 346 | + |
---|
| 347 | + /* |
---|
| 348 | + * skb heads allocated for out-of-band traffic should be |
---|
| 349 | + * reserved for clones, so memset is extraneous in the sense |
---|
| 350 | + * that skb_morph_oob() should follow the allocation. |
---|
| 351 | + */ |
---|
| 352 | + memset(skb, 0, offsetof(struct sk_buff, tail)); |
---|
| 353 | + refcount_set(&skb->users, 1); |
---|
| 354 | + skb->oob_clone = true; |
---|
| 355 | + skb_set_kcov_handle(skb, kcov_common_handle()); |
---|
| 356 | + |
---|
| 357 | + return skb; |
---|
| 358 | +} |
---|
| 359 | +EXPORT_SYMBOL_GPL(skb_alloc_oob_head); |
---|
| 360 | + |
---|
| 361 | +static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb); |
---|
| 362 | + |
---|
| 363 | +void skb_morph_oob_skb(struct sk_buff *n, struct sk_buff *skb) |
---|
| 364 | +{ |
---|
| 365 | + __skb_clone(n, skb); |
---|
| 366 | + n->oob = true; |
---|
| 367 | + n->oob_clone = true; |
---|
| 368 | + skb->oob_cloned = true; |
---|
| 369 | +} |
---|
| 370 | +EXPORT_SYMBOL_GPL(skb_morph_oob_skb); |
---|
| 371 | + |
---|
| 372 | +bool skb_release_oob_skb(struct sk_buff *skb, int *dref) |
---|
| 373 | +{ |
---|
| 374 | + struct skb_shared_info *shinfo = skb_shinfo(skb); |
---|
| 375 | + |
---|
| 376 | + if (!skb_unref(skb)) |
---|
| 377 | + return false; |
---|
| 378 | + |
---|
| 379 | + /* |
---|
| 380 | + * ->nohdr is never set for oob shells, so we always refcount |
---|
| 381 | + * the full data (header + payload) when cloned. |
---|
| 382 | + */ |
---|
| 383 | + *dref = skb->cloned ? atomic_sub_return(1, &shinfo->dataref) : 0; |
---|
| 384 | + |
---|
| 385 | + return true; |
---|
| 386 | +} |
---|
| 387 | +EXPORT_SYMBOL_GPL(skb_release_oob_skb); |
---|
| 388 | + |
---|
| 389 | +__weak bool skb_oob_recycle(struct sk_buff *skb) |
---|
| 390 | +{ |
---|
| 391 | + return false; |
---|
| 392 | +} |
---|
| 393 | + |
---|
| 394 | +#endif /* CONFIG_NET_OOB */ |
---|
| 395 | + |
---|
294 | 396 | /** |
---|
295 | 397 | * __build_skb - build a network buffer |
---|
296 | 398 | * @data: data buffer provided by caller |
---|
.. | .. |
---|
691 | 793 | |
---|
692 | 794 | void __kfree_skb(struct sk_buff *skb) |
---|
693 | 795 | { |
---|
| 796 | + if (recycle_oob_skb(skb)) |
---|
| 797 | + return; |
---|
| 798 | + |
---|
694 | 799 | skb_release_all(skb); |
---|
695 | 800 | kfree_skbmem(skb); |
---|
696 | 801 | } |
---|
.. | .. |
---|
884 | 989 | struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); |
---|
885 | 990 | |
---|
886 | 991 | /* drop skb->head and call any destructors for packet */ |
---|
| 992 | + if (recycle_oob_skb(skb)) |
---|
| 993 | + return; |
---|
| 994 | + |
---|
887 | 995 | skb_release_all(skb); |
---|
888 | 996 | |
---|
889 | 997 | /* record skb to CPU local list */ |
---|
.. | .. |
---|
903 | 1011 | } |
---|
904 | 1012 | void __kfree_skb_defer(struct sk_buff *skb) |
---|
905 | 1013 | { |
---|
| 1014 | + if (recycle_oob_skb(skb)) |
---|
| 1015 | + return; |
---|
| 1016 | + |
---|
906 | 1017 | _kfree_skb_defer(skb); |
---|
907 | 1018 | } |
---|
908 | 1019 | |
---|
.. | .. |
---|
926 | 1037 | return; |
---|
927 | 1038 | } |
---|
928 | 1039 | |
---|
| 1040 | + if (recycle_oob_skb(skb)) |
---|
| 1041 | + return; |
---|
| 1042 | + |
---|
929 | 1043 | _kfree_skb_defer(skb); |
---|
930 | 1044 | } |
---|
931 | 1045 | EXPORT_SYMBOL(napi_consume_skb); |
---|
.. | .. |
---|
946 | 1060 | skb_dst_copy(new, old); |
---|
947 | 1061 | __skb_ext_copy(new, old); |
---|
948 | 1062 | __nf_copy(new, old, false); |
---|
| 1063 | + __skb_oob_copy(new, old); |
---|
949 | 1064 | |
---|
950 | 1065 | /* Note : this field could be in headers_start/headers_end section |
---|
951 | 1066 | * It is not yet because we do not want to have a 16 bit hole |
---|