.. | .. |
---|
7 | 7 | * |
---|
8 | 8 | * (C) 2007 SGI, Christoph Lameter |
---|
9 | 9 | */ |
---|
| 10 | +#include <linux/kfence.h> |
---|
10 | 11 | #include <linux/kobject.h> |
---|
| 12 | +#include <linux/reciprocal_div.h> |
---|
11 | 13 | |
---|
12 | 14 | enum stat_item { |
---|
13 | 15 | ALLOC_FASTPATH, /* Allocation from cpu slab */ |
---|
.. | .. |
---|
81 | 83 | */ |
---|
82 | 84 | struct kmem_cache { |
---|
83 | 85 | struct kmem_cache_cpu __percpu *cpu_slab; |
---|
84 | | - /* Used for retriving partial slabs etc */ |
---|
| 86 | + /* Used for retrieving partial slabs, etc. */ |
---|
85 | 87 | slab_flags_t flags; |
---|
86 | 88 | unsigned long min_partial; |
---|
87 | | - unsigned int size; /* The size of an object including meta data */ |
---|
88 | | - unsigned int object_size;/* The size of an object without meta data */ |
---|
89 | | - unsigned int offset; /* Free pointer offset. */ |
---|
| 89 | + unsigned int size; /* The size of an object including metadata */ |
---|
| 90 | + unsigned int object_size;/* The size of an object without metadata */ |
---|
| 91 | + struct reciprocal_value reciprocal_size; |
---|
| 92 | + unsigned int offset; /* Free pointer offset */ |
---|
90 | 93 | #ifdef CONFIG_SLUB_CPU_PARTIAL |
---|
91 | 94 | /* Number of per cpu partial objects to keep around */ |
---|
92 | 95 | unsigned int cpu_partial; |
---|
.. | .. |
---|
106 | 109 | struct list_head list; /* List of slab caches */ |
---|
107 | 110 | #ifdef CONFIG_SLUB_SYSFS |
---|
108 | 111 | struct kobject kobj; /* For sysfs */ |
---|
109 | | - struct work_struct kobj_remove_work; |
---|
110 | 112 | #endif |
---|
111 | | -#ifdef CONFIG_MEMCG |
---|
112 | | - struct memcg_cache_params memcg_params; |
---|
113 | | - /* for propagation, maximum size of a stored attr */ |
---|
114 | | - unsigned int max_attr_size; |
---|
115 | | -#ifdef CONFIG_SLUB_SYSFS |
---|
116 | | - struct kset *memcg_kset; |
---|
117 | | -#endif |
---|
118 | | -#endif |
---|
119 | | - |
---|
120 | 113 | #ifdef CONFIG_SLAB_FREELIST_HARDENED |
---|
121 | 114 | unsigned long random; |
---|
122 | 115 | #endif |
---|
.. | .. |
---|
151 | 144 | #else |
---|
152 | 145 | #define slub_cpu_partial(s) (0) |
---|
153 | 146 | #define slub_set_cpu_partial(s, n) |
---|
154 | | -#endif // CONFIG_SLUB_CPU_PARTIAL |
---|
| 147 | +#endif /* CONFIG_SLUB_CPU_PARTIAL */ |
---|
155 | 148 | |
---|
156 | 149 | #ifdef CONFIG_SLUB_SYSFS |
---|
157 | 150 | #define SLAB_SUPPORTS_SYSFS |
---|
.. | .. |
---|
182 | 175 | return result; |
---|
183 | 176 | } |
---|
184 | 177 | |
---|
| 178 | +/* Determine object index from a given position */ |
---|
| 179 | +static inline unsigned int __obj_to_index(const struct kmem_cache *cache, |
---|
| 180 | + void *addr, void *obj) |
---|
| 181 | +{ |
---|
| 182 | + return reciprocal_divide(kasan_reset_tag(obj) - addr, |
---|
| 183 | + cache->reciprocal_size); |
---|
| 184 | +} |
---|
| 185 | + |
---|
| 186 | +static inline unsigned int obj_to_index(const struct kmem_cache *cache, |
---|
| 187 | + const struct page *page, void *obj) |
---|
| 188 | +{ |
---|
| 189 | + if (is_kfence_address(obj)) |
---|
| 190 | + return 0; |
---|
| 191 | + return __obj_to_index(cache, page_address(page), obj); |
---|
| 192 | +} |
---|
| 193 | + |
---|
| 194 | +static inline int objs_per_slab_page(const struct kmem_cache *cache, |
---|
| 195 | + const struct page *page) |
---|
| 196 | +{ |
---|
| 197 | + return page->objects; |
---|
| 198 | +} |
---|
185 | 199 | #endif /* _LINUX_SLUB_DEF_H */ |
---|