From 244b2c5ca8b14627e4a17755e5922221e121c771 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Wed, 09 Oct 2024 06:15:07 +0000
Subject: [PATCH] change system file
---
kernel/mm/slab.h | 422 +++++++++++++++++++++++++++++++++++-----------------
1 files changed, 284 insertions(+), 138 deletions(-)
diff --git a/kernel/mm/slab.h b/kernel/mm/slab.h
index 77a1a26..4738d19 100644
--- a/kernel/mm/slab.h
+++ b/kernel/mm/slab.h
@@ -46,6 +46,7 @@
#include <linux/kmemleak.h>
#include <linux/random.h>
#include <linux/sched/mm.h>
+#include <linux/android_vendor.h>
/*
* State of the slab allocator.
@@ -76,7 +77,7 @@
/* A table of kmalloc cache names and sizes */
extern const struct kmalloc_info_struct {
- const char *name;
+ const char *name[NR_KMALLOC_TYPES];
unsigned int size;
} kmalloc_info[];
@@ -89,6 +90,28 @@
struct kmem_cache *kmalloc_slab(size_t, gfp_t);
#endif
+gfp_t kmalloc_fix_flags(gfp_t flags);
+
+#ifdef CONFIG_SLUB
+/*
+ * Tracking user of a slab.
+ */
+#define TRACK_ADDRS_COUNT 16
+struct track {
+ unsigned long addr; /* Called from address */
+#ifdef CONFIG_STACKTRACE
+ unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
+#endif
+ int cpu; /* Was running on cpu */
+ int pid; /* Pid context */
+ unsigned long when; /* When did the operation occur */
+#ifdef CONFIG_STACKTRACE
+ ANDROID_OEM_DATA(1);
+#endif
+};
+
+enum track_item { TRACK_ALLOC, TRACK_FREE };
+#endif
/* Functions provided by the slab allocators */
int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
@@ -109,8 +132,7 @@
slab_flags_t flags, void (*ctor)(void *));
slab_flags_t kmem_cache_flags(unsigned int object_size,
- slab_flags_t flags, const char *name,
- void (*ctor)(void *));
+ slab_flags_t flags, const char *name);
#else
static inline struct kmem_cache *
__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
@@ -118,8 +140,7 @@
{ return NULL; }
static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
- slab_flags_t flags, const char *name,
- void (*ctor)(void *))
+ slab_flags_t flags, const char *name)
{
return flags;
}
@@ -171,7 +192,6 @@
int __kmem_cache_shutdown(struct kmem_cache *);
void __kmem_cache_release(struct kmem_cache *);
int __kmem_cache_shrink(struct kmem_cache *);
-void __kmemcg_cache_deactivate(struct kmem_cache *s);
void slab_kmem_cache_release(struct kmem_cache *);
struct seq_file;
@@ -204,181 +224,294 @@
void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
+static inline int cache_vmstat_idx(struct kmem_cache *s)
+{
+ return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
+ NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
+}
+
+#ifdef CONFIG_SLUB_DEBUG
+#ifdef CONFIG_SLUB_DEBUG_ON
+DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
+#else
+DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
+#endif
+extern void print_tracking(struct kmem_cache *s, void *object);
+extern unsigned long get_each_object_track(struct kmem_cache *s,
+ struct page *page, enum track_item alloc,
+ int (*fn)(const struct kmem_cache *, const void *,
+ const struct track *, void *), void *private);
+extern slab_flags_t slub_debug;
+static inline bool __slub_debug_enabled(void)
+{
+ return static_branch_unlikely(&slub_debug_enabled);
+}
+#else
+static inline void print_tracking(struct kmem_cache *s, void *object)
+{
+}
+static inline bool __slub_debug_enabled(void)
+{
+ return false;
+}
+#ifdef CONFIG_SLUB
+static inline unsigned long get_each_object_track(struct kmem_cache *s,
+ struct page *page, enum track_item alloc,
+ int (*fn)(const struct kmem_cache *, const void *,
+ const struct track *, void *), void *private)
+{
+ return 0;
+}
+#endif
+#endif
+
+/*
+ * Returns true if any of the specified slub_debug flags is enabled for the
+ * cache. Use only for flags parsed by setup_slub_debug() as it also enables
+ * the static key.
+ */
+static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
+{
+ if (IS_ENABLED(CONFIG_SLUB_DEBUG))
+ VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
+ if (__slub_debug_enabled())
+ return s->flags & flags;
+ return false;
+}
+
#ifdef CONFIG_MEMCG_KMEM
-
-/* List of all root caches. */
-extern struct list_head slab_root_caches;
-#define root_caches_node memcg_params.__root_caches_node
-
-/*
- * Iterate over all memcg caches of the given root cache. The caller must hold
- * slab_mutex.
- */
-#define for_each_memcg_cache(iter, root) \
- list_for_each_entry(iter, &(root)->memcg_params.children, \
- memcg_params.children_node)
-
-static inline bool is_root_cache(struct kmem_cache *s)
+static inline struct obj_cgroup **page_obj_cgroups(struct page *page)
{
- return !s->memcg_params.root_cache;
+ /*
+ * page->mem_cgroup and page->obj_cgroups are sharing the same
+ * space. To distinguish between them in case we don't know for sure
+ * that the page is a slab page (e.g. page_cgroup_ino()), let's
+ * always set the lowest bit of obj_cgroups.
+ */
+ return (struct obj_cgroup **)
+ ((unsigned long)page->obj_cgroups & ~0x1UL);
}
-static inline bool slab_equal_or_root(struct kmem_cache *s,
- struct kmem_cache *p)
+static inline bool page_has_obj_cgroups(struct page *page)
{
- return p == s || p == s->memcg_params.root_cache;
+ return ((unsigned long)page->obj_cgroups & 0x1UL);
+}
+
+int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
+ gfp_t gfp);
+
+static inline void memcg_free_page_obj_cgroups(struct page *page)
+{
+ kfree(page_obj_cgroups(page));
+ page->obj_cgroups = NULL;
+}
+
+static inline size_t obj_full_size(struct kmem_cache *s)
+{
+ /*
+ * For each accounted object there is an extra space which is used
+ * to store obj_cgroup membership. Charge it too.
+ */
+ return s->size + sizeof(struct obj_cgroup *);
}
/*
- * We use suffixes to the name in memcg because we can't have caches
- * created in the system with the same name. But when we print them
- * locally, better refer to them with the base name
+ * Returns false if the allocation should fail.
*/
-static inline const char *cache_name(struct kmem_cache *s)
+static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
+ struct obj_cgroup **objcgp,
+ size_t objects, gfp_t flags)
{
- if (!is_root_cache(s))
- s = s->memcg_params.root_cache;
- return s->name;
+ struct obj_cgroup *objcg;
+
+ if (!memcg_kmem_enabled())
+ return true;
+
+ if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
+ return true;
+
+ objcg = get_obj_cgroup_from_current();
+ if (!objcg)
+ return true;
+
+ if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) {
+ obj_cgroup_put(objcg);
+ return false;
+ }
+
+ *objcgp = objcg;
+ return true;
}
-/*
- * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
- * That said the caller must assure the memcg's cache won't go away by either
- * taking a css reference to the owner cgroup, or holding the slab_mutex.
- */
-static inline struct kmem_cache *
-cache_from_memcg_idx(struct kmem_cache *s, int idx)
+static inline void mod_objcg_state(struct obj_cgroup *objcg,
+ struct pglist_data *pgdat,
+ int idx, int nr)
{
- struct kmem_cache *cachep;
- struct memcg_cache_array *arr;
+ struct mem_cgroup *memcg;
+ struct lruvec *lruvec;
rcu_read_lock();
- arr = rcu_dereference(s->memcg_params.memcg_caches);
-
- /*
- * Make sure we will access the up-to-date value. The code updating
- * memcg_caches issues a write barrier to match this (see
- * memcg_create_kmem_cache()).
- */
- cachep = READ_ONCE(arr->entries[idx]);
+ memcg = obj_cgroup_memcg(objcg);
+ lruvec = mem_cgroup_lruvec(memcg, pgdat);
+ mod_memcg_lruvec_state(lruvec, idx, nr);
rcu_read_unlock();
-
- return cachep;
}
-static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
+static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
+ struct obj_cgroup *objcg,
+ gfp_t flags, size_t size,
+ void **p)
{
- if (is_root_cache(s))
- return s;
- return s->memcg_params.root_cache;
+ struct page *page;
+ unsigned long off;
+ size_t i;
+
+ if (!memcg_kmem_enabled() || !objcg)
+ return;
+
+ for (i = 0; i < size; i++) {
+ if (likely(p[i])) {
+ page = virt_to_head_page(p[i]);
+
+ if (!page_has_obj_cgroups(page) &&
+ memcg_alloc_page_obj_cgroups(page, s, flags)) {
+ obj_cgroup_uncharge(objcg, obj_full_size(s));
+ continue;
+ }
+
+ off = obj_to_index(s, page, p[i]);
+ obj_cgroup_get(objcg);
+ page_obj_cgroups(page)[off] = objcg;
+ mod_objcg_state(objcg, page_pgdat(page),
+ cache_vmstat_idx(s), obj_full_size(s));
+ } else {
+ obj_cgroup_uncharge(objcg, obj_full_size(s));
+ }
+ }
+ obj_cgroup_put(objcg);
}
-static __always_inline int memcg_charge_slab(struct page *page,
- gfp_t gfp, int order,
- struct kmem_cache *s)
+static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
+ void **p, int objects)
{
- if (!memcg_kmem_enabled())
- return 0;
- if (is_root_cache(s))
- return 0;
- return memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
-}
+ struct kmem_cache *s;
+ struct obj_cgroup *objcg;
+ struct page *page;
+ unsigned int off;
+ int i;
-static __always_inline void memcg_uncharge_slab(struct page *page, int order,
- struct kmem_cache *s)
-{
if (!memcg_kmem_enabled())
return;
- memcg_kmem_uncharge(page, order);
-}
-extern void slab_init_memcg_params(struct kmem_cache *);
-extern void memcg_link_cache(struct kmem_cache *s);
-extern void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
- void (*deact_fn)(struct kmem_cache *));
+ for (i = 0; i < objects; i++) {
+ if (unlikely(!p[i]))
+ continue;
+
+ page = virt_to_head_page(p[i]);
+ if (!page_has_obj_cgroups(page))
+ continue;
+
+ if (!s_orig)
+ s = page->slab_cache;
+ else
+ s = s_orig;
+
+ off = obj_to_index(s, page, p[i]);
+ objcg = page_obj_cgroups(page)[off];
+ if (!objcg)
+ continue;
+
+ page_obj_cgroups(page)[off] = NULL;
+ obj_cgroup_uncharge(objcg, obj_full_size(s));
+ mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
+ -obj_full_size(s));
+ obj_cgroup_put(objcg);
+ }
+}
#else /* CONFIG_MEMCG_KMEM */
-
-/* If !memcg, all caches are root. */
-#define slab_root_caches slab_caches
-#define root_caches_node list
-
-#define for_each_memcg_cache(iter, root) \
- for ((void)(iter), (void)(root); 0; )
-
-static inline bool is_root_cache(struct kmem_cache *s)
+static inline bool page_has_obj_cgroups(struct page *page)
{
- return true;
+ return false;
}
-static inline bool slab_equal_or_root(struct kmem_cache *s,
- struct kmem_cache *p)
-{
- return true;
-}
-
-static inline const char *cache_name(struct kmem_cache *s)
-{
- return s->name;
-}
-
-static inline struct kmem_cache *
-cache_from_memcg_idx(struct kmem_cache *s, int idx)
+static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
{
return NULL;
}
-static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
-{
- return s;
-}
-
-static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
- struct kmem_cache *s)
+static inline int memcg_alloc_page_obj_cgroups(struct page *page,
+ struct kmem_cache *s, gfp_t gfp)
{
return 0;
}
-static inline void memcg_uncharge_slab(struct page *page, int order,
- struct kmem_cache *s)
+static inline void memcg_free_page_obj_cgroups(struct page *page)
{
}
-static inline void slab_init_memcg_params(struct kmem_cache *s)
+static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
+ struct obj_cgroup **objcgp,
+ size_t objects, gfp_t flags)
+{
+ return true;
+}
+
+static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
+ struct obj_cgroup *objcg,
+ gfp_t flags, size_t size,
+ void **p)
{
}
-static inline void memcg_link_cache(struct kmem_cache *s)
+static inline void memcg_slab_free_hook(struct kmem_cache *s,
+ void **p, int objects)
{
}
-
#endif /* CONFIG_MEMCG_KMEM */
+
+static inline struct kmem_cache *virt_to_cache(const void *obj)
+{
+ struct page *page;
+
+ page = virt_to_head_page(obj);
+ if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
+ __func__))
+ return NULL;
+ return page->slab_cache;
+}
+
+static __always_inline void account_slab_page(struct page *page, int order,
+ struct kmem_cache *s)
+{
+ mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
+ PAGE_SIZE << order);
+}
+
+static __always_inline void unaccount_slab_page(struct page *page, int order,
+ struct kmem_cache *s)
+{
+ if (memcg_kmem_enabled())
+ memcg_free_page_obj_cgroups(page);
+
+ mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
+ -(PAGE_SIZE << order));
+}
static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
{
struct kmem_cache *cachep;
- struct page *page;
- /*
- * When kmemcg is not being used, both assignments should return the
- * same value. but we don't want to pay the assignment price in that
- * case. If it is not compiled in, the compiler should be smart enough
- * to not do even the assignment. In that case, slab_equal_or_root
- * will also be a constant.
- */
- if (!memcg_kmem_enabled() &&
- !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
+ if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
+ !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
return s;
- page = virt_to_head_page(x);
- cachep = page->slab_cache;
- if (slab_equal_or_root(cachep, s))
- return cachep;
-
- pr_err("%s: Wrong slab cache. %s but object is from %s\n",
- __func__, s->name, cachep->name);
- WARN_ON_ONCE(1);
- return s;
+ cachep = virt_to_cache(x);
+ if (WARN(cachep && cachep != s,
+ "%s: Wrong slab cache. %s but object is from %s\n",
+ __func__, s->name, cachep->name))
+ print_tracking(cachep, x);
+ return cachep;
}
static inline size_t slab_ksize(const struct kmem_cache *s)
@@ -412,7 +545,8 @@
}
static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
- gfp_t flags)
+ struct obj_cgroup **objcgp,
+ size_t size, gfp_t flags)
{
flags &= gfp_allowed_mask;
@@ -424,27 +558,36 @@
if (should_failslab(s, flags))
return NULL;
- if (memcg_kmem_enabled() &&
- ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
- return memcg_kmem_get_cache(s);
+ if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags))
+ return NULL;
return s;
}
-static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
- size_t size, void **p)
+static inline void slab_post_alloc_hook(struct kmem_cache *s,
+ struct obj_cgroup *objcg, gfp_t flags,
+ size_t size, void **p, bool init)
{
size_t i;
flags &= gfp_allowed_mask;
+
+ /*
+ * As memory initialization might be integrated into KASAN,
+ * kasan_slab_alloc and initialization memset must be
+ * kept together to avoid discrepancies in behavior.
+ *
+ * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
+ */
for (i = 0; i < size; i++) {
- p[i] = kasan_slab_alloc(s, p[i], flags);
+ p[i] = kasan_slab_alloc(s, p[i], flags, init);
+ if (p[i] && init && !kasan_has_integrated_init())
+ memset(p[i], 0, s->object_size);
kmemleak_alloc_recursive(p[i], s->object_size, 1,
s->flags, flags);
}
- if (memcg_kmem_enabled())
- memcg_kmem_put_cache(s);
+ memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
}
#ifndef CONFIG_SLOB
@@ -499,9 +642,6 @@
void *slab_start(struct seq_file *m, loff_t *pos);
void *slab_next(struct seq_file *m, void *p, loff_t *pos);
void slab_stop(struct seq_file *m, void *p);
-void *memcg_slab_start(struct seq_file *m, loff_t *pos);
-void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
-void memcg_slab_stop(struct seq_file *m, void *p);
int memcg_slab_show(struct seq_file *m, void *p);
#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
@@ -547,4 +687,10 @@
return false;
}
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
+void debugfs_slab_release(struct kmem_cache *);
+#else
+static inline void debugfs_slab_release(struct kmem_cache *s) { }
+#endif
+
#endif /* MM_SLAB_H */
--
Gitblit v1.6.2