hc
2024-01-31 f9004dbfff8a3fbbd7e2a88c8a4327c7f2f8e5b2
kernel/include/linux/slub_def.h
....@@ -7,7 +7,9 @@
77 *
88 * (C) 2007 SGI, Christoph Lameter
99 */
10
+#include <linux/kfence.h>
1011 #include <linux/kobject.h>
12
+#include <linux/reciprocal_div.h>
1113
1214 enum stat_item {
1315 ALLOC_FASTPATH, /* Allocation from cpu slab */
....@@ -81,12 +83,13 @@
8183 */
8284 struct kmem_cache {
8385 struct kmem_cache_cpu __percpu *cpu_slab;
84
- /* Used for retriving partial slabs etc */
86
+ /* Used for retrieving partial slabs, etc. */
8587 slab_flags_t flags;
8688 unsigned long min_partial;
87
- unsigned int size; /* The size of an object including meta data */
88
- unsigned int object_size;/* The size of an object without meta data */
89
- unsigned int offset; /* Free pointer offset. */
89
+ unsigned int size; /* The size of an object including metadata */
90
+ unsigned int object_size;/* The size of an object without metadata */
91
+ struct reciprocal_value reciprocal_size;
92
+ unsigned int offset; /* Free pointer offset */
9093 #ifdef CONFIG_SLUB_CPU_PARTIAL
9194 /* Number of per cpu partial objects to keep around */
9295 unsigned int cpu_partial;
....@@ -106,17 +109,7 @@
106109 struct list_head list; /* List of slab caches */
107110 #ifdef CONFIG_SLUB_SYSFS
108111 struct kobject kobj; /* For sysfs */
109
- struct work_struct kobj_remove_work;
110112 #endif
111
-#ifdef CONFIG_MEMCG
112
- struct memcg_cache_params memcg_params;
113
- /* for propagation, maximum size of a stored attr */
114
- unsigned int max_attr_size;
115
-#ifdef CONFIG_SLUB_SYSFS
116
- struct kset *memcg_kset;
117
-#endif
118
-#endif
119
-
120113 #ifdef CONFIG_SLAB_FREELIST_HARDENED
121114 unsigned long random;
122115 #endif
....@@ -151,7 +144,7 @@
151144 #else
152145 #define slub_cpu_partial(s) (0)
153146 #define slub_set_cpu_partial(s, n)
154
-#endif // CONFIG_SLUB_CPU_PARTIAL
147
+#endif /* CONFIG_SLUB_CPU_PARTIAL */
155148
156149 #ifdef CONFIG_SLUB_SYSFS
157150 #define SLAB_SUPPORTS_SYSFS
....@@ -182,4 +175,25 @@
182175 return result;
183176 }
184177
178
+/* Determine object index from a given position */
179
+static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
180
+ void *addr, void *obj)
181
+{
182
+ return reciprocal_divide(kasan_reset_tag(obj) - addr,
183
+ cache->reciprocal_size);
184
+}
185
+
186
+static inline unsigned int obj_to_index(const struct kmem_cache *cache,
187
+ const struct page *page, void *obj)
188
+{
189
+ if (is_kfence_address(obj))
190
+ return 0;
191
+ return __obj_to_index(cache, page_address(page), obj);
192
+}
193
+
194
+static inline int objs_per_slab_page(const struct kmem_cache *cache,
195
+ const struct page *page)
196
+{
197
+ return page->objects;
198
+}
185199 #endif /* _LINUX_SLUB_DEF_H */