.. | .. |
---|
63 | 63 | #include <linux/errqueue.h> |
---|
64 | 64 | #include <linux/prefetch.h> |
---|
65 | 65 | #include <linux/if_vlan.h> |
---|
| 66 | +#include <linux/locallock.h> |
---|
66 | 67 | |
---|
67 | 68 | #include <net/protocol.h> |
---|
68 | 69 | #include <net/dst.h> |
---|
.. | .. |
---|
330 | 331 | |
---|
331 | 332 | static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); |
---|
332 | 333 | static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); |
---|
| 334 | +static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock); |
---|
| 335 | +static DEFINE_LOCAL_IRQ_LOCK(napi_alloc_cache_lock); |
---|
333 | 336 | |
---|
334 | 337 | static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) |
---|
335 | 338 | { |
---|
.. | .. |
---|
337 | 340 | unsigned long flags; |
---|
338 | 341 | void *data; |
---|
339 | 342 | |
---|
340 | | - local_irq_save(flags); |
---|
| 343 | + local_lock_irqsave(netdev_alloc_lock, flags); |
---|
341 | 344 | nc = this_cpu_ptr(&netdev_alloc_cache); |
---|
342 | 345 | data = page_frag_alloc(nc, fragsz, gfp_mask); |
---|
343 | | - local_irq_restore(flags); |
---|
| 346 | + local_unlock_irqrestore(netdev_alloc_lock, flags); |
---|
344 | 347 | return data; |
---|
345 | 348 | } |
---|
346 | 349 | |
---|
.. | .. |
---|
361 | 364 | |
---|
362 | 365 | static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) |
---|
363 | 366 | { |
---|
364 | | - struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); |
---|
| 367 | + struct napi_alloc_cache *nc; |
---|
| 368 | + void *data; |
---|
365 | 369 | |
---|
366 | | - return page_frag_alloc(&nc->page, fragsz, gfp_mask); |
---|
| 370 | + nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); |
---|
| 371 | + data = page_frag_alloc(&nc->page, fragsz, gfp_mask); |
---|
| 372 | + put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); |
---|
| 373 | + return data; |
---|
367 | 374 | } |
---|
368 | 375 | |
---|
369 | 376 | void *napi_alloc_frag(unsigned int fragsz) |
---|
.. | .. |
---|
416 | 423 | if (sk_memalloc_socks()) |
---|
417 | 424 | gfp_mask |= __GFP_MEMALLOC; |
---|
418 | 425 | |
---|
419 | | - local_irq_save(flags); |
---|
| 426 | + local_lock_irqsave(netdev_alloc_lock, flags); |
---|
420 | 427 | |
---|
421 | 428 | nc = this_cpu_ptr(&netdev_alloc_cache); |
---|
422 | 429 | data = page_frag_alloc(nc, len, gfp_mask); |
---|
423 | 430 | pfmemalloc = nc->pfmemalloc; |
---|
424 | 431 | |
---|
425 | | - local_irq_restore(flags); |
---|
| 432 | + local_unlock_irqrestore(netdev_alloc_lock, flags); |
---|
426 | 433 | |
---|
427 | 434 | if (unlikely(!data)) |
---|
428 | 435 | return NULL; |
---|
.. | .. |
---|
466 | 473 | struct napi_alloc_cache *nc; |
---|
467 | 474 | struct sk_buff *skb; |
---|
468 | 475 | void *data; |
---|
| 476 | + bool pfmemalloc; |
---|
469 | 477 | |
---|
470 | 478 | len += NET_SKB_PAD + NET_IP_ALIGN; |
---|
471 | 479 | |
---|
.. | .. |
---|
488 | 496 | if (sk_memalloc_socks()) |
---|
489 | 497 | gfp_mask |= __GFP_MEMALLOC; |
---|
490 | 498 | |
---|
| 499 | + nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); |
---|
491 | 500 | data = page_frag_alloc(&nc->page, len, gfp_mask); |
---|
| 501 | + pfmemalloc = nc->page.pfmemalloc; |
---|
| 502 | + put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); |
---|
492 | 503 | if (unlikely(!data)) |
---|
493 | 504 | return NULL; |
---|
494 | 505 | |
---|
.. | .. |
---|
499 | 510 | } |
---|
500 | 511 | |
---|
501 | 512 | /* use OR instead of assignment to avoid clearing of bits in mask */ |
---|
502 | | - if (nc->page.pfmemalloc) |
---|
| 513 | + if (pfmemalloc) |
---|
503 | 514 | skb->pfmemalloc = 1; |
---|
504 | 515 | skb->head_frag = 1; |
---|
505 | 516 | |
---|
.. | .. |
---|
731 | 742 | |
---|
732 | 743 | void __kfree_skb_flush(void) |
---|
733 | 744 | { |
---|
734 | | - struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); |
---|
| 745 | + struct napi_alloc_cache *nc; |
---|
735 | 746 | |
---|
| 747 | + nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); |
---|
736 | 748 | /* flush skb_cache if containing objects */ |
---|
737 | 749 | if (nc->skb_count) { |
---|
738 | 750 | kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count, |
---|
739 | 751 | nc->skb_cache); |
---|
740 | 752 | nc->skb_count = 0; |
---|
741 | 753 | } |
---|
| 754 | + put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); |
---|
742 | 755 | } |
---|
743 | 756 | |
---|
744 | 757 | static inline void _kfree_skb_defer(struct sk_buff *skb) |
---|
745 | 758 | { |
---|
746 | | - struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); |
---|
| 759 | + struct napi_alloc_cache *nc; |
---|
747 | 760 | |
---|
748 | 761 | /* drop skb->head and call any destructors for packet */ |
---|
749 | 762 | skb_release_all(skb); |
---|
750 | 763 | |
---|
| 764 | + nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); |
---|
751 | 765 | /* record skb to CPU local list */ |
---|
752 | 766 | nc->skb_cache[nc->skb_count++] = skb; |
---|
753 | 767 | |
---|
.. | .. |
---|
762 | 776 | nc->skb_cache); |
---|
763 | 777 | nc->skb_count = 0; |
---|
764 | 778 | } |
---|
| 779 | + put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); |
---|
765 | 780 | } |
---|
766 | 781 | void __kfree_skb_defer(struct sk_buff *skb) |
---|
767 | 782 | { |
---|