From 1c055e55a242a33e574e48be530e06770a210dcd Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 19 Feb 2024 03:26:26 +0000
Subject: [PATCH] add r8169 read mac form eeprom
---
kernel/include/linux/slub_def.h | 44 +++++++++++++++++++++++++++++---------------
1 files changed, 29 insertions(+), 15 deletions(-)
diff --git a/kernel/include/linux/slub_def.h b/kernel/include/linux/slub_def.h
index 2dd927f..f2247fe 100644
--- a/kernel/include/linux/slub_def.h
+++ b/kernel/include/linux/slub_def.h
@@ -7,7 +7,9 @@
*
* (C) 2007 SGI, Christoph Lameter
*/
+#include <linux/kfence.h>
#include <linux/kobject.h>
+#include <linux/reciprocal_div.h>
enum stat_item {
ALLOC_FASTPATH, /* Allocation from cpu slab */
@@ -81,12 +83,13 @@
*/
struct kmem_cache {
struct kmem_cache_cpu __percpu *cpu_slab;
- /* Used for retriving partial slabs etc */
+ /* Used for retrieving partial slabs, etc. */
slab_flags_t flags;
unsigned long min_partial;
- unsigned int size; /* The size of an object including meta data */
- unsigned int object_size;/* The size of an object without meta data */
- unsigned int offset; /* Free pointer offset. */
+ unsigned int size; /* The size of an object including metadata */
+ unsigned int object_size;/* The size of an object without metadata */
+ struct reciprocal_value reciprocal_size;
+ unsigned int offset; /* Free pointer offset */
#ifdef CONFIG_SLUB_CPU_PARTIAL
/* Number of per cpu partial objects to keep around */
unsigned int cpu_partial;
@@ -106,17 +109,7 @@
struct list_head list; /* List of slab caches */
#ifdef CONFIG_SLUB_SYSFS
struct kobject kobj; /* For sysfs */
- struct work_struct kobj_remove_work;
#endif
-#ifdef CONFIG_MEMCG
- struct memcg_cache_params memcg_params;
- /* for propagation, maximum size of a stored attr */
- unsigned int max_attr_size;
-#ifdef CONFIG_SLUB_SYSFS
- struct kset *memcg_kset;
-#endif
-#endif
-
#ifdef CONFIG_SLAB_FREELIST_HARDENED
unsigned long random;
#endif
@@ -151,7 +144,7 @@
#else
#define slub_cpu_partial(s) (0)
#define slub_set_cpu_partial(s, n)
-#endif // CONFIG_SLUB_CPU_PARTIAL
+#endif /* CONFIG_SLUB_CPU_PARTIAL */
#ifdef CONFIG_SLUB_SYSFS
#define SLAB_SUPPORTS_SYSFS
@@ -182,4 +175,25 @@
return result;
}
+/* Determine object index from a given position */
+static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
+ void *addr, void *obj)
+{
+ return reciprocal_divide(kasan_reset_tag(obj) - addr,
+ cache->reciprocal_size);
+}
+
+static inline unsigned int obj_to_index(const struct kmem_cache *cache,
+ const struct page *page, void *obj)
+{
+ if (is_kfence_address(obj))
+ return 0;
+ return __obj_to_index(cache, page_address(page), obj);
+}
+
+static inline int objs_per_slab_page(const struct kmem_cache *cache,
+ const struct page *page)
+{
+ return page->objects;
+}
#endif /* _LINUX_SLUB_DEF_H */
--
Gitblit v1.6.2