.. | .. |
---|
16 | 16 | #include <linux/overflow.h> |
---|
17 | 17 | #include <linux/types.h> |
---|
18 | 18 | #include <linux/workqueue.h> |
---|
| 19 | +#include <linux/percpu-refcount.h> |
---|
19 | 20 | |
---|
20 | 21 | |
---|
21 | 22 | /* |
---|
.. | .. |
---|
115 | 116 | /* Objects are reclaimable */ |
---|
116 | 117 | #define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U) |
---|
117 | 118 | #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ |
---|
| 119 | + |
---|
| 120 | +/* Slab deactivation flag */ |
---|
| 121 | +#define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U) |
---|
| 122 | + |
---|
118 | 123 | /* |
---|
119 | 124 | * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. |
---|
120 | 125 | * |
---|
.. | .. |
---|
150 | 155 | void kmem_cache_destroy(struct kmem_cache *); |
---|
151 | 156 | int kmem_cache_shrink(struct kmem_cache *); |
---|
152 | 157 | |
---|
153 | | -void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *); |
---|
154 | | -void memcg_deactivate_kmem_caches(struct mem_cgroup *); |
---|
155 | | -void memcg_destroy_kmem_caches(struct mem_cgroup *); |
---|
156 | | - |
---|
157 | 158 | /* |
---|
158 | 159 | * Please use this macro to create slab caches. Simply specify the |
---|
159 | 160 | * name of the structure and maybe some flags that are listed above. |
---|
.. | .. |
---|
180 | 181 | /* |
---|
181 | 182 | * Common kmalloc functions provided by all allocators |
---|
182 | 183 | */ |
---|
183 | | -void * __must_check __krealloc(const void *, size_t, gfp_t); |
---|
184 | 184 | void * __must_check krealloc(const void *, size_t, gfp_t); |
---|
185 | 185 | void kfree(const void *); |
---|
186 | | -void kzfree(const void *); |
---|
| 186 | +void kfree_sensitive(const void *); |
---|
| 187 | +size_t __ksize(const void *); |
---|
187 | 188 | size_t ksize(const void *); |
---|
188 | 189 | |
---|
189 | 190 | #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR |
---|
.. | .. |
---|
199 | 200 | * alignment larger than the alignment of a 64-bit integer. |
---|
200 | 201 | * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that. |
---|
201 | 202 | */ |
---|
202 | | -#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 |
---|
| 203 | +#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 && !IS_ENABLED(CONFIG_ROCKCHIP_MINI_KERNEL) |
---|
203 | 204 | #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN |
---|
204 | 205 | #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN |
---|
205 | 206 | #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN) |
---|
.. | .. |
---|
214 | 215 | */ |
---|
215 | 216 | #ifndef ARCH_SLAB_MINALIGN |
---|
216 | 217 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) |
---|
| 218 | +#endif |
---|
| 219 | + |
---|
| 220 | +/* |
---|
| 221 | + * Arches can define this function if they want to decide the minimum slab |
---|
| 222 | + * alignment at runtime. The value returned by the function must be a power |
---|
| 223 | + * of two and >= ARCH_SLAB_MINALIGN. |
---|
| 224 | + */ |
---|
| 225 | +#ifndef arch_slab_minalign |
---|
| 226 | +static inline unsigned int arch_slab_minalign(void) |
---|
| 227 | +{ |
---|
| 228 | + return ARCH_SLAB_MINALIGN; |
---|
| 229 | +} |
---|
217 | 230 | #endif |
---|
218 | 231 | |
---|
219 | 232 | /* |
---|
.. | .. |
---|
276 | 289 | #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) |
---|
277 | 290 | /* Maximum size for which we actually use a slab cache */ |
---|
278 | 291 | #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH) |
---|
279 | | -/* Maximum order allocatable via the slab allocagtor */ |
---|
| 292 | +/* Maximum order allocatable via the slab allocator */ |
---|
280 | 293 | #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT) |
---|
281 | 294 | |
---|
282 | 295 | /* |
---|
.. | .. |
---|
488 | 501 | * kmalloc is the normal method of allocating memory |
---|
489 | 502 | * for objects smaller than page size in the kernel. |
---|
490 | 503 | * |
---|
491 | | - * The @flags argument may be one of: |
---|
| 504 | + * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN |
---|
| 505 | + * bytes. For @size of power of two bytes, the alignment is also guaranteed |
---|
| 506 | + * to be at least to the size. |
---|
492 | 507 | * |
---|
493 | | - * %GFP_USER - Allocate memory on behalf of user. May sleep. |
---|
| 508 | + * The @flags argument may be one of the GFP flags defined at |
---|
| 509 | + * include/linux/gfp.h and described at |
---|
| 510 | + * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>` |
---|
494 | 511 | * |
---|
495 | | - * %GFP_KERNEL - Allocate normal kernel ram. May sleep. |
---|
| 512 | + * The recommended usage of the @flags is described at |
---|
| 513 | + * :ref:`Documentation/core-api/memory-allocation.rst <memory_allocation>` |
---|
496 | 514 | * |
---|
497 | | - * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools. |
---|
498 | | - * For example, use this inside interrupt handlers. |
---|
| 515 | + * Below is a brief outline of the most useful GFP flags |
---|
499 | 516 | * |
---|
500 | | - * %GFP_HIGHUSER - Allocate pages from high memory. |
---|
| 517 | + * %GFP_KERNEL |
---|
| 518 | + * Allocate normal kernel ram. May sleep. |
---|
501 | 519 | * |
---|
502 | | - * %GFP_NOIO - Do not do any I/O at all while trying to get memory. |
---|
| 520 | + * %GFP_NOWAIT |
---|
| 521 | + * Allocation will not sleep. |
---|
503 | 522 | * |
---|
504 | | - * %GFP_NOFS - Do not make any fs calls while trying to get memory. |
---|
| 523 | + * %GFP_ATOMIC |
---|
| 524 | + * Allocation will not sleep. May use emergency pools. |
---|
505 | 525 | * |
---|
506 | | - * %GFP_NOWAIT - Allocation will not sleep. |
---|
507 | | - * |
---|
508 | | - * %__GFP_THISNODE - Allocate node-local memory only. |
---|
509 | | - * |
---|
510 | | - * %GFP_DMA - Allocation suitable for DMA. |
---|
511 | | - * Should only be used for kmalloc() caches. Otherwise, use a |
---|
512 | | - * slab created with SLAB_DMA. |
---|
| 526 | + * %GFP_HIGHUSER |
---|
| 527 | + * Allocate memory from high memory on behalf of user. |
---|
513 | 528 | * |
---|
514 | 529 | * Also it is possible to set different flags by OR'ing |
---|
515 | 530 | * in one or more of the following additional @flags: |
---|
516 | 531 | * |
---|
517 | | - * %__GFP_HIGH - This allocation has high priority and may use emergency pools. |
---|
| 532 | + * %__GFP_HIGH |
---|
| 533 | + * This allocation has high priority and may use emergency pools. |
---|
518 | 534 | * |
---|
519 | | - * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail |
---|
520 | | - * (think twice before using). |
---|
| 535 | + * %__GFP_NOFAIL |
---|
| 536 | + * Indicate that this allocation is in no way allowed to fail |
---|
| 537 | + * (think twice before using). |
---|
521 | 538 | * |
---|
522 | | - * %__GFP_NORETRY - If memory is not immediately available, |
---|
523 | | - * then give up at once. |
---|
| 539 | + * %__GFP_NORETRY |
---|
| 540 | + * If memory is not immediately available, |
---|
| 541 | + * then give up at once. |
---|
524 | 542 | * |
---|
525 | | - * %__GFP_NOWARN - If allocation fails, don't issue any warnings. |
---|
| 543 | + * %__GFP_NOWARN |
---|
| 544 | + * If allocation fails, don't issue any warnings. |
---|
526 | 545 | * |
---|
527 | | - * %__GFP_RETRY_MAYFAIL - Try really hard to succeed the allocation but fail |
---|
528 | | - * eventually. |
---|
529 | | - * |
---|
530 | | - * There are other flags available as well, but these are not intended |
---|
531 | | - * for general use, and so are not documented here. For a full list of |
---|
532 | | - * potential flags, always refer to linux/gfp.h. |
---|
| 546 | + * %__GFP_RETRY_MAYFAIL |
---|
| 547 | + * Try really hard to succeed the allocation but fail |
---|
| 548 | + * eventually. |
---|
533 | 549 | */ |
---|
534 | 550 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
---|
535 | 551 | { |
---|
.. | .. |
---|
553 | 569 | return __kmalloc(size, flags); |
---|
554 | 570 | } |
---|
555 | 571 | |
---|
556 | | -/* |
---|
557 | | - * Determine size used for the nth kmalloc cache. |
---|
558 | | - * return size or 0 if a kmalloc cache for that |
---|
559 | | - * size does not exist |
---|
560 | | - */ |
---|
561 | | -static __always_inline unsigned int kmalloc_size(unsigned int n) |
---|
562 | | -{ |
---|
563 | | -#ifndef CONFIG_SLOB |
---|
564 | | - if (n > 2) |
---|
565 | | - return 1U << n; |
---|
566 | | - |
---|
567 | | - if (n == 1 && KMALLOC_MIN_SIZE <= 32) |
---|
568 | | - return 96; |
---|
569 | | - |
---|
570 | | - if (n == 2 && KMALLOC_MIN_SIZE <= 64) |
---|
571 | | - return 192; |
---|
572 | | -#endif |
---|
573 | | - return 0; |
---|
574 | | -} |
---|
575 | | - |
---|
576 | 572 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
---|
577 | 573 | { |
---|
578 | 574 | #ifndef CONFIG_SLOB |
---|
.. | .. |
---|
590 | 586 | #endif |
---|
591 | 587 | return __kmalloc_node(size, flags, node); |
---|
592 | 588 | } |
---|
593 | | - |
---|
594 | | -struct memcg_cache_array { |
---|
595 | | - struct rcu_head rcu; |
---|
596 | | - struct kmem_cache *entries[0]; |
---|
597 | | -}; |
---|
598 | | - |
---|
599 | | -/* |
---|
600 | | - * This is the main placeholder for memcg-related information in kmem caches. |
---|
601 | | - * Both the root cache and the child caches will have it. For the root cache, |
---|
602 | | - * this will hold a dynamically allocated array large enough to hold |
---|
603 | | - * information about the currently limited memcgs in the system. To allow the |
---|
604 | | - * array to be accessed without taking any locks, on relocation we free the old |
---|
605 | | - * version only after a grace period. |
---|
606 | | - * |
---|
607 | | - * Root and child caches hold different metadata. |
---|
608 | | - * |
---|
609 | | - * @root_cache: Common to root and child caches. NULL for root, pointer to |
---|
610 | | - * the root cache for children. |
---|
611 | | - * |
---|
612 | | - * The following fields are specific to root caches. |
---|
613 | | - * |
---|
614 | | - * @memcg_caches: kmemcg ID indexed table of child caches. This table is |
---|
615 | | - * used to index child cachces during allocation and cleared |
---|
616 | | - * early during shutdown. |
---|
617 | | - * |
---|
618 | | - * @root_caches_node: List node for slab_root_caches list. |
---|
619 | | - * |
---|
620 | | - * @children: List of all child caches. While the child caches are also |
---|
621 | | - * reachable through @memcg_caches, a child cache remains on |
---|
622 | | - * this list until it is actually destroyed. |
---|
623 | | - * |
---|
624 | | - * The following fields are specific to child caches. |
---|
625 | | - * |
---|
626 | | - * @memcg: Pointer to the memcg this cache belongs to. |
---|
627 | | - * |
---|
628 | | - * @children_node: List node for @root_cache->children list. |
---|
629 | | - * |
---|
630 | | - * @kmem_caches_node: List node for @memcg->kmem_caches list. |
---|
631 | | - */ |
---|
632 | | -struct memcg_cache_params { |
---|
633 | | - struct kmem_cache *root_cache; |
---|
634 | | - union { |
---|
635 | | - struct { |
---|
636 | | - struct memcg_cache_array __rcu *memcg_caches; |
---|
637 | | - struct list_head __root_caches_node; |
---|
638 | | - struct list_head children; |
---|
639 | | - bool dying; |
---|
640 | | - }; |
---|
641 | | - struct { |
---|
642 | | - struct mem_cgroup *memcg; |
---|
643 | | - struct list_head children_node; |
---|
644 | | - struct list_head kmem_caches_node; |
---|
645 | | - |
---|
646 | | - void (*deact_fn)(struct kmem_cache *); |
---|
647 | | - union { |
---|
648 | | - struct rcu_head deact_rcu_head; |
---|
649 | | - struct work_struct deact_work; |
---|
650 | | - }; |
---|
651 | | - }; |
---|
652 | | - }; |
---|
653 | | -}; |
---|
654 | | - |
---|
655 | | -int memcg_update_all_caches(int num_memcgs); |
---|
656 | 589 | |
---|
657 | 590 | /** |
---|
658 | 591 | * kmalloc_array - allocate memory for an array. |
---|