hc
2024-01-31 f9004dbfff8a3fbbd7e2a88c8a4327c7f2f8e5b2
kernel/mm/slab.c
....@@ -100,6 +100,7 @@
100100 #include <linux/seq_file.h>
101101 #include <linux/notifier.h>
102102 #include <linux/kallsyms.h>
103
+#include <linux/kfence.h>
103104 #include <linux/cpu.h>
104105 #include <linux/sysctl.h>
105106 #include <linux/module.h>
....@@ -362,29 +363,6 @@
362363
363364 #endif
364365
365
-#ifdef CONFIG_DEBUG_SLAB_LEAK
366
-
367
-static inline bool is_store_user_clean(struct kmem_cache *cachep)
368
-{
369
- return atomic_read(&cachep->store_user_clean) == 1;
370
-}
371
-
372
-static inline void set_store_user_clean(struct kmem_cache *cachep)
373
-{
374
- atomic_set(&cachep->store_user_clean, 1);
375
-}
376
-
377
-static inline void set_store_user_dirty(struct kmem_cache *cachep)
378
-{
379
- if (is_store_user_clean(cachep))
380
- atomic_set(&cachep->store_user_clean, 0);
381
-}
382
-
383
-#else
384
-static inline void set_store_user_dirty(struct kmem_cache *cachep) {}
385
-
386
-#endif
387
-
388366 /*
389367 * Do not go above this order unless 0 objects fit into the slab or
390368 * overridden on the command line.
....@@ -393,12 +371,6 @@
393371 #define SLAB_MAX_ORDER_LO 0
394372 static int slab_max_order = SLAB_MAX_ORDER_LO;
395373 static bool slab_max_order_set __initdata;
396
-
397
-static inline struct kmem_cache *virt_to_cache(const void *obj)
398
-{
399
- struct page *page = virt_to_head_page(obj);
400
- return page->slab_cache;
401
-}
402374
403375 static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
404376 unsigned int idx)
....@@ -617,6 +589,16 @@
617589 return nr;
618590 }
619591
592
+/* &alien->lock must be held by alien callers. */
593
+static __always_inline void __free_one(struct array_cache *ac, void *objp)
594
+{
595
+ /* Avoid trivial double-free. */
596
+ if (IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
597
+ WARN_ON_ONCE(ac->avail > 0 && ac->entry[ac->avail - 1] == objp))
598
+ return;
599
+ ac->entry[ac->avail++] = objp;
600
+}
601
+
620602 #ifndef CONFIG_NUMA
621603
622604 #define drain_alien_cache(cachep, alien) do { } while (0)
....@@ -677,12 +659,11 @@
677659 static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
678660 {
679661 struct alien_cache **alc_ptr;
680
- size_t memsize = sizeof(void *) * nr_node_ids;
681662 int i;
682663
683664 if (limit > 1)
684665 limit = 12;
685
- alc_ptr = kzalloc_node(memsize, gfp, node);
666
+ alc_ptr = kcalloc_node(nr_node_ids, sizeof(void *), gfp, node);
686667 if (!alc_ptr)
687668 return NULL;
688669
....@@ -797,7 +778,7 @@
797778 STATS_INC_ACOVERFLOW(cachep);
798779 __drain_alien_cache(cachep, ac, page_node, &list);
799780 }
800
- ac->entry[ac->avail++] = objp;
781
+ __free_one(ac, objp);
801782 spin_unlock(&alien->lock);
802783 slabs_destroy(cachep, &list);
803784 } else {
....@@ -952,10 +933,10 @@
952933 * To protect lockless access to n->shared during irq disabled context.
953934 * If n->shared isn't NULL in irq disabled context, accessing to it is
954935 * guaranteed to be valid until irq is re-enabled, because it will be
955
- * freed after synchronize_sched().
936
+ * freed after synchronize_rcu().
956937 */
957938 if (old_shared && force_change)
958
- synchronize_sched();
939
+ synchronize_rcu();
959940
960941 fail:
961942 kfree(old_shared);
....@@ -991,10 +972,8 @@
991972
992973 /* cpu is dead; no one can alloc from it. */
993974 nc = per_cpu_ptr(cachep->cpu_cache, cpu);
994
- if (nc) {
995
- free_block(cachep, nc->entry, nc->avail, node, &list);
996
- nc->avail = 0;
997
- }
975
+ free_block(cachep, nc->entry, nc->avail, node, &list);
976
+ nc->avail = 0;
998977
999978 if (!cpumask_empty(mask)) {
1000979 spin_unlock_irq(&n->list_lock);
....@@ -1082,9 +1061,9 @@
10821061 * offline.
10831062 *
10841063 * Even if all the cpus of a node are down, we don't free the
1085
- * kmem_list3 of any cache. This to avoid a race between cpu_down, and
1064
+ * kmem_cache_node of any cache. This to avoid a race between cpu_down, and
10861065 * a kmalloc allocation from another cpu for memory from the node of
1087
- * the cpu going down. The list3 structure is usually allocated from
1066
+ * the cpu going down. The kmem_cache_node structure is usually allocated from
10881067 * kmem_cache_create() and gets destroyed at kmem_cache_destroy().
10891068 */
10901069 int slab_dead_cpu(unsigned int cpu)
....@@ -1238,7 +1217,7 @@
12381217 * page orders on machines with more than 32MB of memory if
12391218 * not overridden on the command line.
12401219 */
1241
- if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
1220
+ if (!slab_max_order_set && totalram_pages() > (32 << 20) >> PAGE_SHIFT)
12421221 slab_max_order = SLAB_MAX_ORDER_HI;
12431222
12441223 /* Bootstrap is tricky, because several objects are allocated
....@@ -1271,7 +1250,6 @@
12711250 nr_node_ids * sizeof(struct kmem_cache_node *),
12721251 SLAB_HWCACHE_ALIGN, 0, 0);
12731252 list_add(&kmem_cache->list, &slab_caches);
1274
- memcg_link_cache(kmem_cache);
12751253 slab_state = PARTIAL;
12761254
12771255 /*
....@@ -1279,9 +1257,10 @@
12791257 * structures first. Without this, further allocations will bug.
12801258 */
12811259 kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE] = create_kmalloc_cache(
1282
- kmalloc_info[INDEX_NODE].name,
1283
- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS,
1284
- 0, kmalloc_size(INDEX_NODE));
1260
+ kmalloc_info[INDEX_NODE].name[KMALLOC_NORMAL],
1261
+ kmalloc_info[INDEX_NODE].size,
1262
+ ARCH_KMALLOC_FLAGS, 0,
1263
+ kmalloc_info[INDEX_NODE].size);
12851264 slab_state = PARTIAL_NODE;
12861265 setup_kmalloc_cache_index_table();
12871266
....@@ -1392,7 +1371,6 @@
13921371 int nodeid)
13931372 {
13941373 struct page *page;
1395
- int nr_pages;
13961374
13971375 flags |= cachep->allocflags;
13981376
....@@ -1402,17 +1380,7 @@
14021380 return NULL;
14031381 }
14041382
1405
- if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) {
1406
- __free_pages(page, cachep->gfporder);
1407
- return NULL;
1408
- }
1409
-
1410
- nr_pages = (1 << cachep->gfporder);
1411
- if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1412
- mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, nr_pages);
1413
- else
1414
- mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, nr_pages);
1415
-
1383
+ account_slab_page(page, cachep->gfporder, cachep);
14161384 __SetPageSlab(page);
14171385 /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
14181386 if (sk_memalloc_socks() && page_is_pfmemalloc(page))
....@@ -1427,12 +1395,6 @@
14271395 static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
14281396 {
14291397 int order = cachep->gfporder;
1430
- unsigned long nr_freed = (1 << order);
1431
-
1432
- if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1433
- mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, -nr_freed);
1434
- else
1435
- mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, -nr_freed);
14361398
14371399 BUG_ON(!PageSlab(page));
14381400 __ClearPageSlabPfmemalloc(page);
....@@ -1441,8 +1403,8 @@
14411403 page->mapping = NULL;
14421404
14431405 if (current->reclaim_state)
1444
- current->reclaim_state->reclaimed_slab += nr_freed;
1445
- memcg_uncharge_slab(page, order, cachep);
1406
+ current->reclaim_state->reclaimed_slab += 1 << order;
1407
+ unaccount_slab_page(page, order, cachep);
14461408 __free_pages(page, order);
14471409 }
14481410
....@@ -1460,7 +1422,7 @@
14601422 #if DEBUG
14611423 static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
14621424 {
1463
- if (debug_pagealloc_enabled() && OFF_SLAB(cachep) &&
1425
+ if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) &&
14641426 (cachep->size % PAGE_SIZE) == 0)
14651427 return true;
14661428
....@@ -1468,53 +1430,17 @@
14681430 }
14691431
14701432 #ifdef CONFIG_DEBUG_PAGEALLOC
1471
-static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1472
- unsigned long caller)
1473
-{
1474
- int size = cachep->object_size;
1475
-
1476
- addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1477
-
1478
- if (size < 5 * sizeof(unsigned long))
1479
- return;
1480
-
1481
- *addr++ = 0x12345678;
1482
- *addr++ = caller;
1483
- *addr++ = smp_processor_id();
1484
- size -= 3 * sizeof(unsigned long);
1485
- {
1486
- unsigned long *sptr = &caller;
1487
- unsigned long svalue;
1488
-
1489
- while (!kstack_end(sptr)) {
1490
- svalue = *sptr++;
1491
- if (kernel_text_address(svalue)) {
1492
- *addr++ = svalue;
1493
- size -= sizeof(unsigned long);
1494
- if (size <= sizeof(unsigned long))
1495
- break;
1496
- }
1497
- }
1498
-
1499
- }
1500
- *addr++ = 0x87654321;
1501
-}
1502
-
1503
-static void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1504
- int map, unsigned long caller)
1433
+static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map)
15051434 {
15061435 if (!is_debug_pagealloc_cache(cachep))
15071436 return;
15081437
1509
- if (caller)
1510
- store_stackinfo(cachep, objp, caller);
1511
-
1512
- kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
1438
+ __kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
15131439 }
15141440
15151441 #else
15161442 static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1517
- int map, unsigned long caller) {}
1443
+ int map) {}
15181444
15191445 #endif
15201446
....@@ -1662,7 +1588,7 @@
16621588
16631589 if (cachep->flags & SLAB_POISON) {
16641590 check_poison_obj(cachep, objp);
1665
- slab_kernel_map(cachep, objp, 1, 0);
1591
+ slab_kernel_map(cachep, objp, 1);
16661592 }
16671593 if (cachep->flags & SLAB_RED_ZONE) {
16681594 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
....@@ -1707,12 +1633,16 @@
17071633 kmem_cache_free(cachep->freelist_cache, freelist);
17081634 }
17091635
1636
+/*
1637
+ * Update the size of the caches before calling slabs_destroy as it may
1638
+ * recursively call kfree.
1639
+ */
17101640 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
17111641 {
17121642 struct page *page, *n;
17131643
1714
- list_for_each_entry_safe(page, n, list, lru) {
1715
- list_del(&page->lru);
1644
+ list_for_each_entry_safe(page, n, list, slab_list) {
1645
+ list_del(&page->slab_list);
17161646 slab_destroy(cachep, page);
17171647 }
17181648 }
....@@ -1728,6 +1658,8 @@
17281658 * This could be made much more intelligent. For now, try to avoid using
17291659 * high order pages for slabs. When the gfp() functions are more friendly
17301660 * towards high-order requests, this should be changed.
1661
+ *
1662
+ * Return: number of left-over bytes in a slab
17311663 */
17321664 static size_t calculate_slab_order(struct kmem_cache *cachep,
17331665 size_t size, slab_flags_t flags)
....@@ -1858,8 +1790,7 @@
18581790 }
18591791
18601792 slab_flags_t kmem_cache_flags(unsigned int object_size,
1861
- slab_flags_t flags, const char *name,
1862
- void (*ctor)(void *))
1793
+ slab_flags_t flags, const char *name)
18631794 {
18641795 return flags;
18651796 }
....@@ -1984,6 +1915,8 @@
19841915 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
19851916 * cacheline. This can be beneficial if you're counting cycles as closely
19861917 * as davem.
1918
+ *
1919
+ * Return: a pointer to the created cache or %NULL in case of error
19871920 */
19881921 int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
19891922 {
....@@ -2084,7 +2017,7 @@
20842017 * to check size >= 256. It guarantees that all necessary small
20852018 * sized slab is initialized in current slab initialization sequence.
20862019 */
2087
- if (debug_pagealloc_enabled() && (flags & SLAB_POISON) &&
2020
+ if (debug_pagealloc_enabled_static() && (flags & SLAB_POISON) &&
20882021 size >= 256 && cachep->object_size > cache_line_size()) {
20892022 if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
20902023 size_t tmp_size = ALIGN(size, PAGE_SIZE);
....@@ -2224,8 +2157,8 @@
22242157 spin_lock(&n->list_lock);
22252158 free_block(cachep, ac->entry, ac->avail, node, &list);
22262159 spin_unlock(&n->list_lock);
2227
- slabs_destroy(cachep, &list);
22282160 ac->avail = 0;
2161
+ slabs_destroy(cachep, &list);
22292162 }
22302163
22312164 static void drain_cpu_caches(struct kmem_cache *cachep)
....@@ -2272,8 +2205,8 @@
22722205 goto out;
22732206 }
22742207
2275
- page = list_entry(p, struct page, lru);
2276
- list_del(&page->lru);
2208
+ page = list_entry(p, struct page, slab_list);
2209
+ list_del(&page->slab_list);
22772210 n->free_slabs--;
22782211 n->total_slabs--;
22792212 /*
....@@ -2318,13 +2251,6 @@
23182251 }
23192252 return (ret ? 1 : 0);
23202253 }
2321
-
2322
-#ifdef CONFIG_MEMCG
2323
-void __kmemcg_cache_deactivate(struct kmem_cache *cachep)
2324
-{
2325
- __kmem_cache_shrink(cachep);
2326
-}
2327
-#endif
23282254
23292255 int __kmem_cache_shutdown(struct kmem_cache *cachep)
23302256 {
....@@ -2379,8 +2305,6 @@
23792305 /* Slab management obj is off-slab. */
23802306 freelist = kmem_cache_alloc_node(cachep->freelist_cache,
23812307 local_flags, nodeid);
2382
- if (!freelist)
2383
- return NULL;
23842308 } else {
23852309 /* We will use last bytes at the slab for freelist */
23862310 freelist = addr + (PAGE_SIZE << cachep->gfporder) -
....@@ -2438,7 +2362,7 @@
24382362 /* need to poison the objs? */
24392363 if (cachep->flags & SLAB_POISON) {
24402364 poison_obj(cachep, objp, POISON_FREE);
2441
- slab_kernel_map(cachep, objp, 0, 0);
2365
+ slab_kernel_map(cachep, objp, 0);
24422366 }
24432367 }
24442368 #endif
....@@ -2595,11 +2519,6 @@
25952519 objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
25962520 page->active++;
25972521
2598
-#if DEBUG
2599
- if (cachep->flags & SLAB_STORE_USER)
2600
- set_store_user_dirty(cachep);
2601
-#endif
2602
-
26032522 return objp;
26042523 }
26052524
....@@ -2656,13 +2575,9 @@
26562575 * Be lazy and only check for valid flags here, keeping it out of the
26572576 * critical path in kmem_cache_alloc().
26582577 */
2659
- if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
2660
- gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
2661
- flags &= ~GFP_SLAB_BUG_MASK;
2662
- pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
2663
- invalid_mask, &invalid_mask, flags, &flags);
2664
- dump_stack();
2665
- }
2578
+ if (unlikely(flags & GFP_SLAB_BUG_MASK))
2579
+ flags = kmalloc_fix_flags(flags);
2580
+
26662581 WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
26672582 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
26682583
....@@ -2732,13 +2647,13 @@
27322647 if (!page)
27332648 return;
27342649
2735
- INIT_LIST_HEAD(&page->lru);
2650
+ INIT_LIST_HEAD(&page->slab_list);
27362651 n = get_node(cachep, page_to_nid(page));
27372652
27382653 spin_lock(&n->list_lock);
27392654 n->total_slabs++;
27402655 if (!page->active) {
2741
- list_add_tail(&page->lru, &(n->slabs_free));
2656
+ list_add_tail(&page->slab_list, &n->slabs_free);
27422657 n->free_slabs++;
27432658 } else
27442659 fixup_slab_list(cachep, n, page, &list);
....@@ -2805,10 +2720,8 @@
28052720 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
28062721 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
28072722 }
2808
- if (cachep->flags & SLAB_STORE_USER) {
2809
- set_store_user_dirty(cachep);
2723
+ if (cachep->flags & SLAB_STORE_USER)
28102724 *dbg_userword(cachep, objp) = (void *)caller;
2811
- }
28122725
28132726 objnr = obj_to_index(cachep, page, objp);
28142727
....@@ -2817,7 +2730,7 @@
28172730
28182731 if (cachep->flags & SLAB_POISON) {
28192732 poison_obj(cachep, objp, POISON_FREE);
2820
- slab_kernel_map(cachep, objp, 0, caller);
2733
+ slab_kernel_map(cachep, objp, 0);
28212734 }
28222735 return objp;
28232736 }
....@@ -2847,9 +2760,9 @@
28472760 void **list)
28482761 {
28492762 /* move slabp to correct slabp list: */
2850
- list_del(&page->lru);
2763
+ list_del(&page->slab_list);
28512764 if (page->active == cachep->num) {
2852
- list_add(&page->lru, &n->slabs_full);
2765
+ list_add(&page->slab_list, &n->slabs_full);
28532766 if (OBJFREELIST_SLAB(cachep)) {
28542767 #if DEBUG
28552768 /* Poisoning will be done without holding the lock */
....@@ -2863,7 +2776,7 @@
28632776 page->freelist = NULL;
28642777 }
28652778 } else
2866
- list_add(&page->lru, &n->slabs_partial);
2779
+ list_add(&page->slab_list, &n->slabs_partial);
28672780 }
28682781
28692782 /* Try to find non-pfmemalloc slab if needed */
....@@ -2886,20 +2799,20 @@
28862799 }
28872800
28882801 /* Move pfmemalloc slab to the end of list to speed up next search */
2889
- list_del(&page->lru);
2802
+ list_del(&page->slab_list);
28902803 if (!page->active) {
2891
- list_add_tail(&page->lru, &n->slabs_free);
2804
+ list_add_tail(&page->slab_list, &n->slabs_free);
28922805 n->free_slabs++;
28932806 } else
2894
- list_add_tail(&page->lru, &n->slabs_partial);
2807
+ list_add_tail(&page->slab_list, &n->slabs_partial);
28952808
2896
- list_for_each_entry(page, &n->slabs_partial, lru) {
2809
+ list_for_each_entry(page, &n->slabs_partial, slab_list) {
28972810 if (!PageSlabPfmemalloc(page))
28982811 return page;
28992812 }
29002813
29012814 n->free_touched = 1;
2902
- list_for_each_entry(page, &n->slabs_free, lru) {
2815
+ list_for_each_entry(page, &n->slabs_free, slab_list) {
29032816 if (!PageSlabPfmemalloc(page)) {
29042817 n->free_slabs--;
29052818 return page;
....@@ -2914,11 +2827,12 @@
29142827 struct page *page;
29152828
29162829 assert_spin_locked(&n->list_lock);
2917
- page = list_first_entry_or_null(&n->slabs_partial, struct page, lru);
2830
+ page = list_first_entry_or_null(&n->slabs_partial, struct page,
2831
+ slab_list);
29182832 if (!page) {
29192833 n->free_touched = 1;
29202834 page = list_first_entry_or_null(&n->slabs_free, struct page,
2921
- lru);
2835
+ slab_list);
29222836 if (page)
29232837 n->free_slabs--;
29242838 }
....@@ -3081,7 +2995,7 @@
30812995 return objp;
30822996 if (cachep->flags & SLAB_POISON) {
30832997 check_poison_obj(cachep, objp);
3084
- slab_kernel_map(cachep, objp, 1, 0);
2998
+ slab_kernel_map(cachep, objp, 1);
30852999 poison_obj(cachep, objp, POISON_INUSE);
30863000 }
30873001 if (cachep->flags & SLAB_STORE_USER)
....@@ -3102,10 +3016,9 @@
31023016 objp += obj_offset(cachep);
31033017 if (cachep->ctor && cachep->flags & SLAB_POISON)
31043018 cachep->ctor(objp);
3105
- if (ARCH_SLAB_MINALIGN &&
3106
- ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3107
- pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3108
- objp, (int)ARCH_SLAB_MINALIGN);
3019
+ if ((unsigned long)objp & (arch_slab_minalign() - 1)) {
3020
+ pr_err("0x%px: not aligned to arch_slab_minalign()=%u\n", objp,
3021
+ arch_slab_minalign());
31093022 }
31103023 return objp;
31113024 }
....@@ -3184,7 +3097,7 @@
31843097 struct zonelist *zonelist;
31853098 struct zoneref *z;
31863099 struct zone *zone;
3187
- enum zone_type high_zoneidx = gfp_zone(flags);
3100
+ enum zone_type highest_zoneidx = gfp_zone(flags);
31883101 void *obj = NULL;
31893102 struct page *page;
31903103 int nid;
....@@ -3202,7 +3115,7 @@
32023115 * Look through allowed nodes for objects available
32033116 * from existing per node queues.
32043117 */
3205
- for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3118
+ for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
32063119 nid = zone_to_nid(zone);
32073120
32083121 if (cpuset_zone_allowed(zone, flags) &&
....@@ -3294,17 +3207,23 @@
32943207 }
32953208
32963209 static __always_inline void *
3297
-slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3210
+slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size,
32983211 unsigned long caller)
32993212 {
33003213 unsigned long save_flags;
33013214 void *ptr;
33023215 int slab_node = numa_mem_id();
3216
+ struct obj_cgroup *objcg = NULL;
3217
+ bool init = false;
33033218
33043219 flags &= gfp_allowed_mask;
3305
- cachep = slab_pre_alloc_hook(cachep, flags);
3220
+ cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags);
33063221 if (unlikely(!cachep))
33073222 return NULL;
3223
+
3224
+ ptr = kfence_alloc(cachep, orig_size, flags);
3225
+ if (unlikely(ptr))
3226
+ goto out_hooks;
33083227
33093228 cache_alloc_debugcheck_before(cachep, flags);
33103229 local_irq_save(save_flags);
....@@ -3334,11 +3253,10 @@
33343253 out:
33353254 local_irq_restore(save_flags);
33363255 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3256
+ init = slab_want_init_on_alloc(flags, cachep);
33373257
3338
- if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
3339
- memset(ptr, 0, cachep->object_size);
3340
-
3341
- slab_post_alloc_hook(cachep, flags, 1, &ptr);
3258
+out_hooks:
3259
+ slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr, init);
33423260 return ptr;
33433261 }
33443262
....@@ -3375,15 +3293,21 @@
33753293 #endif /* CONFIG_NUMA */
33763294
33773295 static __always_inline void *
3378
-slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3296
+slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned long caller)
33793297 {
33803298 unsigned long save_flags;
33813299 void *objp;
3300
+ struct obj_cgroup *objcg = NULL;
3301
+ bool init = false;
33823302
33833303 flags &= gfp_allowed_mask;
3384
- cachep = slab_pre_alloc_hook(cachep, flags);
3304
+ cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags);
33853305 if (unlikely(!cachep))
33863306 return NULL;
3307
+
3308
+ objp = kfence_alloc(cachep, orig_size, flags);
3309
+ if (unlikely(objp))
3310
+ goto out;
33873311
33883312 cache_alloc_debugcheck_before(cachep, flags);
33893313 local_irq_save(save_flags);
....@@ -3391,11 +3315,10 @@
33913315 local_irq_restore(save_flags);
33923316 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
33933317 prefetchw(objp);
3318
+ init = slab_want_init_on_alloc(flags, cachep);
33943319
3395
- if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
3396
- memset(objp, 0, cachep->object_size);
3397
-
3398
- slab_post_alloc_hook(cachep, flags, 1, &objp);
3320
+out:
3321
+ slab_post_alloc_hook(cachep, objcg, flags, 1, &objp, init);
33993322 return objp;
34003323 }
34013324
....@@ -3419,29 +3342,29 @@
34193342 objp = objpp[i];
34203343
34213344 page = virt_to_head_page(objp);
3422
- list_del(&page->lru);
3345
+ list_del(&page->slab_list);
34233346 check_spinlock_acquired_node(cachep, node);
34243347 slab_put_obj(cachep, page, objp);
34253348 STATS_DEC_ACTIVE(cachep);
34263349
34273350 /* fixup slab chains */
34283351 if (page->active == 0) {
3429
- list_add(&page->lru, &n->slabs_free);
3352
+ list_add(&page->slab_list, &n->slabs_free);
34303353 n->free_slabs++;
34313354 } else {
34323355 /* Unconditionally move a slab to the end of the
34333356 * partial list on free - maximum time for the
34343357 * other objects to be freed, too.
34353358 */
3436
- list_add_tail(&page->lru, &n->slabs_partial);
3359
+ list_add_tail(&page->slab_list, &n->slabs_partial);
34373360 }
34383361 }
34393362
34403363 while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
34413364 n->free_objects -= cachep->num;
34423365
3443
- page = list_last_entry(&n->slabs_free, struct page, lru);
3444
- list_move(&page->lru, list);
3366
+ page = list_last_entry(&n->slabs_free, struct page, slab_list);
3367
+ list_move(&page->slab_list, list);
34453368 n->free_slabs--;
34463369 n->total_slabs--;
34473370 }
....@@ -3479,7 +3402,7 @@
34793402 int i = 0;
34803403 struct page *page;
34813404
3482
- list_for_each_entry(page, &n->slabs_free, lru) {
3405
+ list_for_each_entry(page, &n->slabs_free, slab_list) {
34833406 BUG_ON(page->active);
34843407
34853408 i++;
....@@ -3488,9 +3411,9 @@
34883411 }
34893412 #endif
34903413 spin_unlock(&n->list_lock);
3491
- slabs_destroy(cachep, &list);
34923414 ac->avail -= batchcount;
34933415 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3416
+ slabs_destroy(cachep, &list);
34943417 }
34953418
34963419 /*
....@@ -3500,9 +3423,31 @@
35003423 static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
35013424 unsigned long caller)
35023425 {
3503
- /* Put the object into the quarantine, don't touch it for now. */
3504
- if (kasan_slab_free(cachep, objp, _RET_IP_))
3426
+ bool init;
3427
+
3428
+ if (is_kfence_address(objp)) {
3429
+ kmemleak_free_recursive(objp, cachep->flags);
3430
+ memcg_slab_free_hook(cachep, &objp, 1);
3431
+ __kfence_free(objp);
35053432 return;
3433
+ }
3434
+
3435
+ /*
3436
+ * As memory initialization might be integrated into KASAN,
3437
+ * kasan_slab_free and initialization memset must be
3438
+ * kept together to avoid discrepancies in behavior.
3439
+ */
3440
+ init = slab_want_init_on_free(cachep);
3441
+ if (init && !kasan_has_integrated_init())
3442
+ memset(objp, 0, cachep->object_size);
3443
+ /* KASAN might put objp into memory quarantine, delaying its reuse. */
3444
+ if (kasan_slab_free(cachep, objp, init))
3445
+ return;
3446
+
3447
+ /* Use KCSAN to help debug racy use-after-free. */
3448
+ if (!(cachep->flags & SLAB_TYPESAFE_BY_RCU))
3449
+ __kcsan_check_access(objp, cachep->object_size,
3450
+ KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
35063451
35073452 ___cache_free(cachep, objp, caller);
35083453 }
....@@ -3513,10 +3458,9 @@
35133458 struct array_cache *ac = cpu_cache_get(cachep);
35143459
35153460 check_irq_off();
3516
- if (unlikely(slab_want_init_on_free(cachep)))
3517
- memset(objp, 0, cachep->object_size);
35183461 kmemleak_free_recursive(objp, cachep->flags);
35193462 objp = cache_free_debugcheck(cachep, objp, caller);
3463
+ memcg_slab_free_hook(cachep, &objp, 1);
35203464
35213465 /*
35223466 * Skip calling cache_free_alien() when the platform is not numa.
....@@ -3544,7 +3488,7 @@
35443488 }
35453489 }
35463490
3547
- ac->entry[ac->avail++] = objp;
3491
+ __free_one(ac, objp);
35483492 }
35493493
35503494 /**
....@@ -3554,10 +3498,12 @@
35543498 *
35553499 * Allocate an object from this cache. The flags are only relevant
35563500 * if the cache has no available objects.
3501
+ *
3502
+ * Return: pointer to the new object or %NULL in case of error
35573503 */
35583504 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
35593505 {
3560
- void *ret = slab_alloc(cachep, flags, _RET_IP_);
3506
+ void *ret = slab_alloc(cachep, flags, cachep->object_size, _RET_IP_);
35613507
35623508 trace_kmem_cache_alloc(_RET_IP_, ret,
35633509 cachep->object_size, cachep->size, flags);
....@@ -3580,8 +3526,9 @@
35803526 void **p)
35813527 {
35823528 size_t i;
3529
+ struct obj_cgroup *objcg = NULL;
35833530
3584
- s = slab_pre_alloc_hook(s, flags);
3531
+ s = slab_pre_alloc_hook(s, &objcg, size, flags);
35853532 if (!s)
35863533 return 0;
35873534
....@@ -3589,7 +3536,7 @@
35893536
35903537 local_irq_disable();
35913538 for (i = 0; i < size; i++) {
3592
- void *objp = __do_cache_alloc(s, flags);
3539
+ void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags);
35933540
35943541 if (unlikely(!objp))
35953542 goto error;
....@@ -3599,18 +3546,18 @@
35993546
36003547 cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);
36013548
3602
- /* Clear memory outside IRQ disabled section */
3603
- if (unlikely(slab_want_init_on_alloc(flags, s)))
3604
- for (i = 0; i < size; i++)
3605
- memset(p[i], 0, s->object_size);
3606
-
3607
- slab_post_alloc_hook(s, flags, size, p);
3549
+ /*
3550
+ * memcg and kmem_cache debug support and memory initialization.
3551
+ * Done outside of the IRQ disabled section.
3552
+ */
3553
+ slab_post_alloc_hook(s, objcg, flags, size, p,
3554
+ slab_want_init_on_alloc(flags, s));
36083555 /* FIXME: Trace call missing. Christoph would like a bulk variant */
36093556 return size;
36103557 error:
36113558 local_irq_enable();
36123559 cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
3613
- slab_post_alloc_hook(s, flags, i, p);
3560
+ slab_post_alloc_hook(s, objcg, flags, i, p, false);
36143561 __kmem_cache_free_bulk(s, i, p);
36153562 return 0;
36163563 }
....@@ -3622,7 +3569,7 @@
36223569 {
36233570 void *ret;
36243571
3625
- ret = slab_alloc(cachep, flags, _RET_IP_);
3572
+ ret = slab_alloc(cachep, flags, size, _RET_IP_);
36263573
36273574 ret = kasan_kmalloc(cachep, ret, size, flags);
36283575 trace_kmalloc(_RET_IP_, ret,
....@@ -3643,10 +3590,12 @@
36433590 * node, which can improve the performance for cpu bound structures.
36443591 *
36453592 * Fallback to other node is possible if __GFP_THISNODE is not set.
3593
+ *
3594
+ * Return: pointer to the new object or %NULL in case of error
36463595 */
36473596 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
36483597 {
3649
- void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3598
+ void *ret = slab_alloc_node(cachep, flags, nodeid, cachep->object_size, _RET_IP_);
36503599
36513600 trace_kmem_cache_alloc_node(_RET_IP_, ret,
36523601 cachep->object_size, cachep->size,
....@@ -3664,7 +3613,7 @@
36643613 {
36653614 void *ret;
36663615
3667
- ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3616
+ ret = slab_alloc_node(cachep, flags, nodeid, size, _RET_IP_);
36683617
36693618 ret = kasan_kmalloc(cachep, ret, size, flags);
36703619 trace_kmalloc_node(_RET_IP_, ret,
....@@ -3711,6 +3660,8 @@
37113660 * @size: how many bytes of memory are required.
37123661 * @flags: the type of memory to allocate (see kmalloc).
37133662 * @caller: function caller for debug tracking of the caller
3663
+ *
3664
+ * Return: pointer to the allocated memory or %NULL in case of error
37143665 */
37153666 static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
37163667 unsigned long caller)
....@@ -3723,7 +3674,7 @@
37233674 cachep = kmalloc_slab(size, flags);
37243675 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
37253676 return cachep;
3726
- ret = slab_alloc(cachep, flags, caller);
3677
+ ret = slab_alloc(cachep, flags, size, caller);
37273678
37283679 ret = kasan_kmalloc(cachep, ret, size, flags);
37293680 trace_kmalloc(caller, ret,
....@@ -3783,6 +3734,8 @@
37833734 s = virt_to_cache(objp);
37843735 else
37853736 s = cache_from_obj(orig_s, objp);
3737
+ if (!s)
3738
+ continue;
37863739
37873740 debug_check_no_locks_freed(objp, s->object_size);
37883741 if (!(s->flags & SLAB_DEBUG_OBJECTS))
....@@ -3817,6 +3770,10 @@
38173770 local_irq_save(flags);
38183771 kfree_debugcheck(objp);
38193772 c = virt_to_cache(objp);
3773
+ if (!c) {
3774
+ local_irq_restore(flags);
3775
+ return;
3776
+ }
38203777 debug_check_no_locks_freed(objp, c->object_size);
38213778
38223779 debug_check_no_obj_freed(objp, c->object_size);
....@@ -3862,8 +3819,8 @@
38623819 }
38633820
38643821 /* Always called with the slab_mutex held */
3865
-static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
3866
- int batchcount, int shared, gfp_t gfp)
3822
+static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3823
+ int batchcount, int shared, gfp_t gfp)
38673824 {
38683825 struct array_cache __percpu *cpu_cache, *prev;
38693826 int cpu;
....@@ -3908,29 +3865,6 @@
39083865 return setup_kmem_cache_nodes(cachep, gfp);
39093866 }
39103867
3911
-static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3912
- int batchcount, int shared, gfp_t gfp)
3913
-{
3914
- int ret;
3915
- struct kmem_cache *c;
3916
-
3917
- ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3918
-
3919
- if (slab_state < FULL)
3920
- return ret;
3921
-
3922
- if ((ret < 0) || !is_root_cache(cachep))
3923
- return ret;
3924
-
3925
- lockdep_assert_held(&slab_mutex);
3926
- for_each_memcg_cache(c, cachep) {
3927
- /* return value determined by the root cache only */
3928
- __do_tune_cpucache(c, limit, batchcount, shared, gfp);
3929
- }
3930
-
3931
- return ret;
3932
-}
3933
-
39343868 /* Called with slab_mutex held always */
39353869 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
39363870 {
....@@ -3942,13 +3876,6 @@
39423876 err = cache_random_seq_create(cachep, cachep->num, gfp);
39433877 if (err)
39443878 goto end;
3945
-
3946
- if (!is_root_cache(cachep)) {
3947
- struct kmem_cache *root = memcg_root_cache(cachep);
3948
- limit = root->limit;
3949
- shared = root->shared;
3950
- batchcount = root->batchcount;
3951
- }
39523879
39533880 if (limit && shared && batchcount)
39543881 goto skip_setup;
....@@ -4136,6 +4063,7 @@
41364063 sinfo->objects_per_slab = cachep->num;
41374064 sinfo->cache_order = cachep->gfporder;
41384065 }
4066
+EXPORT_SYMBOL_GPL(get_slabinfo);
41394067
41404068 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
41414069 {
....@@ -4176,6 +4104,8 @@
41764104 * @buffer: user buffer
41774105 * @count: data length
41784106 * @ppos: unused
4107
+ *
4108
+ * Return: %0 on success, negative error code otherwise.
41794109 */
41804110 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
41814111 size_t count, loff_t *ppos)
....@@ -4220,200 +4150,6 @@
42204150 return res;
42214151 }
42224152
4223
-#ifdef CONFIG_DEBUG_SLAB_LEAK
4224
-
4225
-static inline int add_caller(unsigned long *n, unsigned long v)
4226
-{
4227
- unsigned long *p;
4228
- int l;
4229
- if (!v)
4230
- return 1;
4231
- l = n[1];
4232
- p = n + 2;
4233
- while (l) {
4234
- int i = l/2;
4235
- unsigned long *q = p + 2 * i;
4236
- if (*q == v) {
4237
- q[1]++;
4238
- return 1;
4239
- }
4240
- if (*q > v) {
4241
- l = i;
4242
- } else {
4243
- p = q + 2;
4244
- l -= i + 1;
4245
- }
4246
- }
4247
- if (++n[1] == n[0])
4248
- return 0;
4249
- memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4250
- p[0] = v;
4251
- p[1] = 1;
4252
- return 1;
4253
-}
4254
-
4255
-static void handle_slab(unsigned long *n, struct kmem_cache *c,
4256
- struct page *page)
4257
-{
4258
- void *p;
4259
- int i, j;
4260
- unsigned long v;
4261
-
4262
- if (n[0] == n[1])
4263
- return;
4264
- for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
4265
- bool active = true;
4266
-
4267
- for (j = page->active; j < c->num; j++) {
4268
- if (get_free_obj(page, j) == i) {
4269
- active = false;
4270
- break;
4271
- }
4272
- }
4273
-
4274
- if (!active)
4275
- continue;
4276
-
4277
- /*
4278
- * probe_kernel_read() is used for DEBUG_PAGEALLOC. page table
4279
- * mapping is established when actual object allocation and
4280
- * we could mistakenly access the unmapped object in the cpu
4281
- * cache.
4282
- */
4283
- if (probe_kernel_read(&v, dbg_userword(c, p), sizeof(v)))
4284
- continue;
4285
-
4286
- if (!add_caller(n, v))
4287
- return;
4288
- }
4289
-}
4290
-
4291
-static void show_symbol(struct seq_file *m, unsigned long address)
4292
-{
4293
-#ifdef CONFIG_KALLSYMS
4294
- unsigned long offset, size;
4295
- char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4296
-
4297
- if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4298
- seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4299
- if (modname[0])
4300
- seq_printf(m, " [%s]", modname);
4301
- return;
4302
- }
4303
-#endif
4304
- seq_printf(m, "%px", (void *)address);
4305
-}
4306
-
4307
-static int leaks_show(struct seq_file *m, void *p)
4308
-{
4309
- struct kmem_cache *cachep = list_entry(p, struct kmem_cache,
4310
- root_caches_node);
4311
- struct page *page;
4312
- struct kmem_cache_node *n;
4313
- const char *name;
4314
- unsigned long *x = m->private;
4315
- int node;
4316
- int i;
4317
-
4318
- if (!(cachep->flags & SLAB_STORE_USER))
4319
- return 0;
4320
- if (!(cachep->flags & SLAB_RED_ZONE))
4321
- return 0;
4322
-
4323
- /*
4324
- * Set store_user_clean and start to grab stored user information
4325
- * for all objects on this cache. If some alloc/free requests comes
4326
- * during the processing, information would be wrong so restart
4327
- * whole processing.
4328
- */
4329
- do {
4330
- drain_cpu_caches(cachep);
4331
- /*
4332
- * drain_cpu_caches() could make kmemleak_object and
4333
- * debug_objects_cache dirty, so reset afterwards.
4334
- */
4335
- set_store_user_clean(cachep);
4336
-
4337
- x[1] = 0;
4338
-
4339
- for_each_kmem_cache_node(cachep, node, n) {
4340
-
4341
- check_irq_on();
4342
- spin_lock_irq(&n->list_lock);
4343
-
4344
- list_for_each_entry(page, &n->slabs_full, lru)
4345
- handle_slab(x, cachep, page);
4346
- list_for_each_entry(page, &n->slabs_partial, lru)
4347
- handle_slab(x, cachep, page);
4348
- spin_unlock_irq(&n->list_lock);
4349
- }
4350
- } while (!is_store_user_clean(cachep));
4351
-
4352
- name = cachep->name;
4353
- if (x[0] == x[1]) {
4354
- /* Increase the buffer size */
4355
- mutex_unlock(&slab_mutex);
4356
- m->private = kcalloc(x[0] * 4, sizeof(unsigned long),
4357
- GFP_KERNEL);
4358
- if (!m->private) {
4359
- /* Too bad, we are really out */
4360
- m->private = x;
4361
- mutex_lock(&slab_mutex);
4362
- return -ENOMEM;
4363
- }
4364
- *(unsigned long *)m->private = x[0] * 2;
4365
- kfree(x);
4366
- mutex_lock(&slab_mutex);
4367
- /* Now make sure this entry will be retried */
4368
- m->count = m->size;
4369
- return 0;
4370
- }
4371
- for (i = 0; i < x[1]; i++) {
4372
- seq_printf(m, "%s: %lu ", name, x[2*i+3]);
4373
- show_symbol(m, x[2*i+2]);
4374
- seq_putc(m, '\n');
4375
- }
4376
-
4377
- return 0;
4378
-}
4379
-
4380
-static const struct seq_operations slabstats_op = {
4381
- .start = slab_start,
4382
- .next = slab_next,
4383
- .stop = slab_stop,
4384
- .show = leaks_show,
4385
-};
4386
-
4387
-static int slabstats_open(struct inode *inode, struct file *file)
4388
-{
4389
- unsigned long *n;
4390
-
4391
- n = __seq_open_private(file, &slabstats_op, PAGE_SIZE);
4392
- if (!n)
4393
- return -ENOMEM;
4394
-
4395
- *n = PAGE_SIZE / (2 * sizeof(unsigned long));
4396
-
4397
- return 0;
4398
-}
4399
-
4400
-static const struct file_operations proc_slabstats_operations = {
4401
- .open = slabstats_open,
4402
- .read = seq_read,
4403
- .llseek = seq_lseek,
4404
- .release = seq_release_private,
4405
-};
4406
-#endif
4407
-
4408
-static int __init slab_proc_init(void)
4409
-{
4410
-#ifdef CONFIG_DEBUG_SLAB_LEAK
4411
- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4412
-#endif
4413
- return 0;
4414
-}
4415
-module_init(slab_proc_init);
4416
-
44174153 #ifdef CONFIG_HARDENED_USERCOPY
44184154 /*
44194155 * Rejects incorrectly sized objects and objects that are to be copied
....@@ -4438,7 +4174,10 @@
44384174 BUG_ON(objnr >= cachep->num);
44394175
44404176 /* Find offset within object. */
4441
- offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
4177
+ if (is_kfence_address(ptr))
4178
+ offset = ptr - kfence_object_start(ptr);
4179
+ else
4180
+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
44424181
44434182 /* Allow address range falling entirely within usercopy region. */
44444183 if (offset >= cachep->useroffset &&
....@@ -4464,31 +4203,26 @@
44644203 #endif /* CONFIG_HARDENED_USERCOPY */
44654204
44664205 /**
4467
- * ksize - get the actual amount of memory allocated for a given object
4468
- * @objp: Pointer to the object
4206
+ * __ksize -- Uninstrumented ksize.
4207
+ * @objp: pointer to the object
44694208 *
4470
- * kmalloc may internally round up allocations and return more memory
4471
- * than requested. ksize() can be used to determine the actual amount of
4472
- * memory allocated. The caller may use this additional memory, even though
4473
- * a smaller amount of memory was initially specified with the kmalloc call.
4474
- * The caller must guarantee that objp points to a valid object previously
4475
- * allocated with either kmalloc() or kmem_cache_alloc(). The object
4476
- * must not be freed during the duration of the call.
4209
+ * Unlike ksize(), __ksize() is uninstrumented, and does not provide the same
4210
+ * safety checks as ksize() with KASAN instrumentation enabled.
4211
+ *
4212
+ * Return: size of the actual memory used by @objp in bytes
44774213 */
4478
-size_t ksize(const void *objp)
4214
+size_t __ksize(const void *objp)
44794215 {
4216
+ struct kmem_cache *c;
44804217 size_t size;
44814218
44824219 BUG_ON(!objp);
44834220 if (unlikely(objp == ZERO_SIZE_PTR))
44844221 return 0;
44854222
4486
- size = virt_to_cache(objp)->object_size;
4487
- /* We assume that ksize callers could use the whole allocated area,
4488
- * so we need to unpoison this area.
4489
- */
4490
- kasan_unpoison_shadow(objp, size);
4223
+ c = virt_to_cache(objp);
4224
+ size = c ? c->object_size : 0;
44914225
44924226 return size;
44934227 }
4494
-EXPORT_SYMBOL(ksize);
4228
+EXPORT_SYMBOL(__ksize);