.. | .. |
---|
19 | 19 | #include <linux/slab.h> |
---|
20 | 20 | #include <linux/hash.h> |
---|
21 | 21 | #include <linux/kmemleak.h> |
---|
| 22 | +#include <linux/cpu.h> |
---|
22 | 23 | |
---|
23 | 24 | #define ODEBUG_HASH_BITS 14 |
---|
24 | 25 | #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) |
---|
25 | 26 | |
---|
26 | 27 | #define ODEBUG_POOL_SIZE 1024 |
---|
27 | 28 | #define ODEBUG_POOL_MIN_LEVEL 256 |
---|
| 29 | +#define ODEBUG_POOL_PERCPU_SIZE 64 |
---|
| 30 | +#define ODEBUG_BATCH_SIZE 16 |
---|
28 | 31 | |
---|
29 | 32 | #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT |
---|
30 | 33 | #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) |
---|
31 | 34 | #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) |
---|
32 | 35 | |
---|
| 36 | +/* |
---|
| 37 | + * We limit the freeing of debug objects via workqueue at a maximum |
---|
| 38 | + * frequency of 10Hz and about 1024 objects for each freeing operation. |
---|
| 39 | + * So it is freeing at most 10k debug objects per second. |
---|
| 40 | + */ |
---|
| 41 | +#define ODEBUG_FREE_WORK_MAX 1024 |
---|
| 42 | +#define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10) |
---|
| 43 | + |
---|
33 | 44 | struct debug_bucket { |
---|
34 | 45 | struct hlist_head list; |
---|
35 | 46 | raw_spinlock_t lock; |
---|
36 | 47 | }; |
---|
| 48 | + |
---|
| 49 | +/* |
---|
| 50 | + * Debug object percpu free list |
---|
| 51 | + * Access is protected by disabling irq |
---|
| 52 | + */ |
---|
| 53 | +struct debug_percpu_free { |
---|
| 54 | + struct hlist_head free_objs; |
---|
| 55 | + int obj_free; |
---|
| 56 | +}; |
---|
| 57 | + |
---|
| 58 | +static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool); |
---|
37 | 59 | |
---|
38 | 60 | static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; |
---|
39 | 61 | |
---|
.. | .. |
---|
44 | 66 | static HLIST_HEAD(obj_pool); |
---|
45 | 67 | static HLIST_HEAD(obj_to_free); |
---|
46 | 68 | |
---|
| 69 | +/* |
---|
| 70 | + * Because of the presence of percpu free pools, obj_pool_free will |
---|
| 71 | + * under-count those in the percpu free pools. Similarly, obj_pool_used |
---|
| 72 | + * will over-count those in the percpu free pools. Adjustments will be |
---|
| 73 | + * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used |
---|
| 74 | + * can be off. |
---|
| 75 | + */ |
---|
47 | 76 | static int obj_pool_min_free = ODEBUG_POOL_SIZE; |
---|
48 | 77 | static int obj_pool_free = ODEBUG_POOL_SIZE; |
---|
49 | 78 | static int obj_pool_used; |
---|
50 | 79 | static int obj_pool_max_used; |
---|
| 80 | +static bool obj_freeing; |
---|
51 | 81 | /* The number of objs on the global free list */ |
---|
52 | 82 | static int obj_nr_tofree; |
---|
53 | | -static struct kmem_cache *obj_cache; |
---|
54 | 83 | |
---|
55 | 84 | static int debug_objects_maxchain __read_mostly; |
---|
56 | 85 | static int __maybe_unused debug_objects_maxchecked __read_mostly; |
---|
.. | .. |
---|
62 | 91 | = ODEBUG_POOL_SIZE; |
---|
63 | 92 | static int debug_objects_pool_min_level __read_mostly |
---|
64 | 93 | = ODEBUG_POOL_MIN_LEVEL; |
---|
65 | | -static struct debug_obj_descr *descr_test __read_mostly; |
---|
| 94 | +static const struct debug_obj_descr *descr_test __read_mostly; |
---|
| 95 | +static struct kmem_cache *obj_cache __read_mostly; |
---|
66 | 96 | |
---|
67 | 97 | /* |
---|
68 | 98 | * Track numbers of kmem_cache_alloc()/free() calls done. |
---|
.. | .. |
---|
71 | 101 | static int debug_objects_freed; |
---|
72 | 102 | |
---|
73 | 103 | static void free_obj_work(struct work_struct *work); |
---|
74 | | -static DECLARE_WORK(debug_obj_work, free_obj_work); |
---|
| 104 | +static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work); |
---|
75 | 105 | |
---|
76 | 106 | static int __init enable_object_debug(char *str) |
---|
77 | 107 | { |
---|
.. | .. |
---|
99 | 129 | |
---|
100 | 130 | static void fill_pool(void) |
---|
101 | 131 | { |
---|
102 | | - gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; |
---|
103 | | - struct debug_obj *new, *obj; |
---|
| 132 | + gfp_t gfp = __GFP_HIGH | __GFP_NOWARN; |
---|
| 133 | + struct debug_obj *obj; |
---|
104 | 134 | unsigned long flags; |
---|
105 | 135 | |
---|
106 | | - if (likely(obj_pool_free >= debug_objects_pool_min_level)) |
---|
| 136 | + if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level)) |
---|
107 | 137 | return; |
---|
108 | 138 | |
---|
109 | 139 | /* |
---|
110 | 140 | * Reuse objs from the global free list; they will be reinitialized |
---|
111 | 141 | * when allocating. |
---|
| 142 | + * |
---|
| 143 | + * Both obj_nr_tofree and obj_pool_free are checked locklessly; the |
---|
| 144 | + * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical |
---|
| 145 | + * sections. |
---|
112 | 146 | */ |
---|
113 | | - while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) { |
---|
| 147 | + while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) { |
---|
114 | 148 | raw_spin_lock_irqsave(&pool_lock, flags); |
---|
115 | 149 | /* |
---|
116 | 150 | * Recheck with the lock held as the worker thread might have |
---|
117 | 151 | * won the race and freed the global free list already. |
---|
118 | 152 | */ |
---|
119 | | - if (obj_nr_tofree) { |
---|
| 153 | + while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) { |
---|
120 | 154 | obj = hlist_entry(obj_to_free.first, typeof(*obj), node); |
---|
121 | 155 | hlist_del(&obj->node); |
---|
122 | | - obj_nr_tofree--; |
---|
| 156 | + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1); |
---|
123 | 157 | hlist_add_head(&obj->node, &obj_pool); |
---|
124 | | - obj_pool_free++; |
---|
| 158 | + WRITE_ONCE(obj_pool_free, obj_pool_free + 1); |
---|
125 | 159 | } |
---|
126 | 160 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
---|
127 | 161 | } |
---|
.. | .. |
---|
129 | 163 | if (unlikely(!obj_cache)) |
---|
130 | 164 | return; |
---|
131 | 165 | |
---|
132 | | - while (obj_pool_free < debug_objects_pool_min_level) { |
---|
| 166 | + while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) { |
---|
| 167 | + struct debug_obj *new[ODEBUG_BATCH_SIZE]; |
---|
| 168 | + int cnt; |
---|
133 | 169 | |
---|
134 | | - new = kmem_cache_zalloc(obj_cache, gfp); |
---|
135 | | - if (!new) |
---|
| 170 | + for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) { |
---|
| 171 | + new[cnt] = kmem_cache_zalloc(obj_cache, gfp); |
---|
| 172 | + if (!new[cnt]) |
---|
| 173 | + break; |
---|
| 174 | + } |
---|
| 175 | + if (!cnt) |
---|
136 | 176 | return; |
---|
137 | 177 | |
---|
138 | 178 | raw_spin_lock_irqsave(&pool_lock, flags); |
---|
139 | | - hlist_add_head(&new->node, &obj_pool); |
---|
140 | | - debug_objects_allocated++; |
---|
141 | | - obj_pool_free++; |
---|
| 179 | + while (cnt) { |
---|
| 180 | + hlist_add_head(&new[--cnt]->node, &obj_pool); |
---|
| 181 | + debug_objects_allocated++; |
---|
| 182 | + WRITE_ONCE(obj_pool_free, obj_pool_free + 1); |
---|
| 183 | + } |
---|
142 | 184 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
---|
143 | 185 | } |
---|
144 | 186 | } |
---|
.. | .. |
---|
163 | 205 | } |
---|
164 | 206 | |
---|
165 | 207 | /* |
---|
166 | | - * Allocate a new object. If the pool is empty, switch off the debugger. |
---|
167 | | - * Must be called with interrupts disabled. |
---|
| 208 | + * Allocate a new object from the hlist |
---|
168 | 209 | */ |
---|
169 | | -static struct debug_obj * |
---|
170 | | -alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) |
---|
| 210 | +static struct debug_obj *__alloc_object(struct hlist_head *list) |
---|
171 | 211 | { |
---|
172 | 212 | struct debug_obj *obj = NULL; |
---|
173 | 213 | |
---|
174 | | - raw_spin_lock(&pool_lock); |
---|
175 | | - if (obj_pool.first) { |
---|
176 | | - obj = hlist_entry(obj_pool.first, typeof(*obj), node); |
---|
177 | | - |
---|
178 | | - obj->object = addr; |
---|
179 | | - obj->descr = descr; |
---|
180 | | - obj->state = ODEBUG_STATE_NONE; |
---|
181 | | - obj->astate = 0; |
---|
| 214 | + if (list->first) { |
---|
| 215 | + obj = hlist_entry(list->first, typeof(*obj), node); |
---|
182 | 216 | hlist_del(&obj->node); |
---|
| 217 | + } |
---|
183 | 218 | |
---|
184 | | - hlist_add_head(&obj->node, &b->list); |
---|
| 219 | + return obj; |
---|
| 220 | +} |
---|
185 | 221 | |
---|
| 222 | +static struct debug_obj * |
---|
| 223 | +alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr) |
---|
| 224 | +{ |
---|
| 225 | + struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool); |
---|
| 226 | + struct debug_obj *obj; |
---|
| 227 | + |
---|
| 228 | + if (likely(obj_cache)) { |
---|
| 229 | + obj = __alloc_object(&percpu_pool->free_objs); |
---|
| 230 | + if (obj) { |
---|
| 231 | + percpu_pool->obj_free--; |
---|
| 232 | + goto init_obj; |
---|
| 233 | + } |
---|
| 234 | + } |
---|
| 235 | + |
---|
| 236 | + raw_spin_lock(&pool_lock); |
---|
| 237 | + obj = __alloc_object(&obj_pool); |
---|
| 238 | + if (obj) { |
---|
186 | 239 | obj_pool_used++; |
---|
| 240 | + WRITE_ONCE(obj_pool_free, obj_pool_free - 1); |
---|
| 241 | + |
---|
| 242 | + /* |
---|
| 243 | + * Looking ahead, allocate one batch of debug objects and |
---|
| 244 | + * put them into the percpu free pool. |
---|
| 245 | + */ |
---|
| 246 | + if (likely(obj_cache)) { |
---|
| 247 | + int i; |
---|
| 248 | + |
---|
| 249 | + for (i = 0; i < ODEBUG_BATCH_SIZE; i++) { |
---|
| 250 | + struct debug_obj *obj2; |
---|
| 251 | + |
---|
| 252 | + obj2 = __alloc_object(&obj_pool); |
---|
| 253 | + if (!obj2) |
---|
| 254 | + break; |
---|
| 255 | + hlist_add_head(&obj2->node, |
---|
| 256 | + &percpu_pool->free_objs); |
---|
| 257 | + percpu_pool->obj_free++; |
---|
| 258 | + obj_pool_used++; |
---|
| 259 | + WRITE_ONCE(obj_pool_free, obj_pool_free - 1); |
---|
| 260 | + } |
---|
| 261 | + } |
---|
| 262 | + |
---|
187 | 263 | if (obj_pool_used > obj_pool_max_used) |
---|
188 | 264 | obj_pool_max_used = obj_pool_used; |
---|
189 | 265 | |
---|
190 | | - obj_pool_free--; |
---|
191 | 266 | if (obj_pool_free < obj_pool_min_free) |
---|
192 | 267 | obj_pool_min_free = obj_pool_free; |
---|
193 | 268 | } |
---|
194 | 269 | raw_spin_unlock(&pool_lock); |
---|
195 | 270 | |
---|
| 271 | +init_obj: |
---|
| 272 | + if (obj) { |
---|
| 273 | + obj->object = addr; |
---|
| 274 | + obj->descr = descr; |
---|
| 275 | + obj->state = ODEBUG_STATE_NONE; |
---|
| 276 | + obj->astate = 0; |
---|
| 277 | + hlist_add_head(&obj->node, &b->list); |
---|
| 278 | + } |
---|
196 | 279 | return obj; |
---|
197 | 280 | } |
---|
198 | 281 | |
---|
.. | .. |
---|
209 | 292 | unsigned long flags; |
---|
210 | 293 | HLIST_HEAD(tofree); |
---|
211 | 294 | |
---|
| 295 | + WRITE_ONCE(obj_freeing, false); |
---|
212 | 296 | if (!raw_spin_trylock_irqsave(&pool_lock, flags)) |
---|
213 | 297 | return; |
---|
| 298 | + |
---|
| 299 | + if (obj_pool_free >= debug_objects_pool_size) |
---|
| 300 | + goto free_objs; |
---|
214 | 301 | |
---|
215 | 302 | /* |
---|
216 | 303 | * The objs on the pool list might be allocated before the work is |
---|
217 | 304 | * run, so recheck if pool list it full or not, if not fill pool |
---|
218 | | - * list from the global free list |
---|
| 305 | + * list from the global free list. As it is likely that a workload |
---|
| 306 | + * may be gearing up to use more and more objects, don't free any |
---|
| 307 | + * of them until the next round. |
---|
219 | 308 | */ |
---|
220 | 309 | while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) { |
---|
221 | 310 | obj = hlist_entry(obj_to_free.first, typeof(*obj), node); |
---|
222 | 311 | hlist_del(&obj->node); |
---|
223 | 312 | hlist_add_head(&obj->node, &obj_pool); |
---|
224 | | - obj_pool_free++; |
---|
225 | | - obj_nr_tofree--; |
---|
| 313 | + WRITE_ONCE(obj_pool_free, obj_pool_free + 1); |
---|
| 314 | + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1); |
---|
226 | 315 | } |
---|
| 316 | + raw_spin_unlock_irqrestore(&pool_lock, flags); |
---|
| 317 | + return; |
---|
227 | 318 | |
---|
| 319 | +free_objs: |
---|
228 | 320 | /* |
---|
229 | 321 | * Pool list is already full and there are still objs on the free |
---|
230 | 322 | * list. Move remaining free objs to a temporary list to free the |
---|
.. | .. |
---|
233 | 325 | if (obj_nr_tofree) { |
---|
234 | 326 | hlist_move_list(&obj_to_free, &tofree); |
---|
235 | 327 | debug_objects_freed += obj_nr_tofree; |
---|
236 | | - obj_nr_tofree = 0; |
---|
| 328 | + WRITE_ONCE(obj_nr_tofree, 0); |
---|
237 | 329 | } |
---|
238 | 330 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
---|
239 | 331 | |
---|
.. | .. |
---|
243 | 335 | } |
---|
244 | 336 | } |
---|
245 | 337 | |
---|
246 | | -static bool __free_object(struct debug_obj *obj) |
---|
| 338 | +static void __free_object(struct debug_obj *obj) |
---|
247 | 339 | { |
---|
| 340 | + struct debug_obj *objs[ODEBUG_BATCH_SIZE]; |
---|
| 341 | + struct debug_percpu_free *percpu_pool; |
---|
| 342 | + int lookahead_count = 0; |
---|
248 | 343 | unsigned long flags; |
---|
249 | 344 | bool work; |
---|
250 | 345 | |
---|
251 | | - raw_spin_lock_irqsave(&pool_lock, flags); |
---|
252 | | - work = (obj_pool_free > debug_objects_pool_size) && obj_cache; |
---|
| 346 | + local_irq_save(flags); |
---|
| 347 | + if (!obj_cache) |
---|
| 348 | + goto free_to_obj_pool; |
---|
| 349 | + |
---|
| 350 | + /* |
---|
| 351 | + * Try to free it into the percpu pool first. |
---|
| 352 | + */ |
---|
| 353 | + percpu_pool = this_cpu_ptr(&percpu_obj_pool); |
---|
| 354 | + if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) { |
---|
| 355 | + hlist_add_head(&obj->node, &percpu_pool->free_objs); |
---|
| 356 | + percpu_pool->obj_free++; |
---|
| 357 | + local_irq_restore(flags); |
---|
| 358 | + return; |
---|
| 359 | + } |
---|
| 360 | + |
---|
| 361 | + /* |
---|
| 362 | + * As the percpu pool is full, look ahead and pull out a batch |
---|
| 363 | + * of objects from the percpu pool and free them as well. |
---|
| 364 | + */ |
---|
| 365 | + for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) { |
---|
| 366 | + objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs); |
---|
| 367 | + if (!objs[lookahead_count]) |
---|
| 368 | + break; |
---|
| 369 | + percpu_pool->obj_free--; |
---|
| 370 | + } |
---|
| 371 | + |
---|
| 372 | +free_to_obj_pool: |
---|
| 373 | + raw_spin_lock(&pool_lock); |
---|
| 374 | + work = (obj_pool_free > debug_objects_pool_size) && obj_cache && |
---|
| 375 | + (obj_nr_tofree < ODEBUG_FREE_WORK_MAX); |
---|
253 | 376 | obj_pool_used--; |
---|
254 | 377 | |
---|
255 | 378 | if (work) { |
---|
256 | | - obj_nr_tofree++; |
---|
| 379 | + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1); |
---|
257 | 380 | hlist_add_head(&obj->node, &obj_to_free); |
---|
| 381 | + if (lookahead_count) { |
---|
| 382 | + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count); |
---|
| 383 | + obj_pool_used -= lookahead_count; |
---|
| 384 | + while (lookahead_count) { |
---|
| 385 | + hlist_add_head(&objs[--lookahead_count]->node, |
---|
| 386 | + &obj_to_free); |
---|
| 387 | + } |
---|
| 388 | + } |
---|
| 389 | + |
---|
| 390 | + if ((obj_pool_free > debug_objects_pool_size) && |
---|
| 391 | + (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) { |
---|
| 392 | + int i; |
---|
| 393 | + |
---|
| 394 | + /* |
---|
| 395 | + * Free one more batch of objects from obj_pool. |
---|
| 396 | + */ |
---|
| 397 | + for (i = 0; i < ODEBUG_BATCH_SIZE; i++) { |
---|
| 398 | + obj = __alloc_object(&obj_pool); |
---|
| 399 | + hlist_add_head(&obj->node, &obj_to_free); |
---|
| 400 | + WRITE_ONCE(obj_pool_free, obj_pool_free - 1); |
---|
| 401 | + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1); |
---|
| 402 | + } |
---|
| 403 | + } |
---|
258 | 404 | } else { |
---|
259 | | - obj_pool_free++; |
---|
| 405 | + WRITE_ONCE(obj_pool_free, obj_pool_free + 1); |
---|
260 | 406 | hlist_add_head(&obj->node, &obj_pool); |
---|
| 407 | + if (lookahead_count) { |
---|
| 408 | + WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count); |
---|
| 409 | + obj_pool_used -= lookahead_count; |
---|
| 410 | + while (lookahead_count) { |
---|
| 411 | + hlist_add_head(&objs[--lookahead_count]->node, |
---|
| 412 | + &obj_pool); |
---|
| 413 | + } |
---|
| 414 | + } |
---|
261 | 415 | } |
---|
262 | | - raw_spin_unlock_irqrestore(&pool_lock, flags); |
---|
263 | | - return work; |
---|
| 416 | + raw_spin_unlock(&pool_lock); |
---|
| 417 | + local_irq_restore(flags); |
---|
264 | 418 | } |
---|
265 | 419 | |
---|
266 | 420 | /* |
---|
.. | .. |
---|
269 | 423 | */ |
---|
270 | 424 | static void free_object(struct debug_obj *obj) |
---|
271 | 425 | { |
---|
272 | | - if (__free_object(obj)) |
---|
273 | | - schedule_work(&debug_obj_work); |
---|
| 426 | + __free_object(obj); |
---|
| 427 | + if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) { |
---|
| 428 | + WRITE_ONCE(obj_freeing, true); |
---|
| 429 | + schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); |
---|
| 430 | + } |
---|
274 | 431 | } |
---|
| 432 | + |
---|
| 433 | +#ifdef CONFIG_HOTPLUG_CPU |
---|
| 434 | +static int object_cpu_offline(unsigned int cpu) |
---|
| 435 | +{ |
---|
| 436 | + struct debug_percpu_free *percpu_pool; |
---|
| 437 | + struct hlist_node *tmp; |
---|
| 438 | + struct debug_obj *obj; |
---|
| 439 | + unsigned long flags; |
---|
| 440 | + |
---|
| 441 | + /* Remote access is safe as the CPU is dead already */ |
---|
| 442 | + percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu); |
---|
| 443 | + hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) { |
---|
| 444 | + hlist_del(&obj->node); |
---|
| 445 | + kmem_cache_free(obj_cache, obj); |
---|
| 446 | + } |
---|
| 447 | + |
---|
| 448 | + raw_spin_lock_irqsave(&pool_lock, flags); |
---|
| 449 | + obj_pool_used -= percpu_pool->obj_free; |
---|
| 450 | + debug_objects_freed += percpu_pool->obj_free; |
---|
| 451 | + raw_spin_unlock_irqrestore(&pool_lock, flags); |
---|
| 452 | + |
---|
| 453 | + percpu_pool->obj_free = 0; |
---|
| 454 | + |
---|
| 455 | + return 0; |
---|
| 456 | +} |
---|
| 457 | +#endif |
---|
275 | 458 | |
---|
276 | 459 | /* |
---|
277 | 460 | * We run out of memory. That means we probably have tons of objects |
---|
.. | .. |
---|
315 | 498 | |
---|
316 | 499 | static void debug_print_object(struct debug_obj *obj, char *msg) |
---|
317 | 500 | { |
---|
318 | | - struct debug_obj_descr *descr = obj->descr; |
---|
| 501 | + const struct debug_obj_descr *descr = obj->descr; |
---|
319 | 502 | static int limit; |
---|
| 503 | + |
---|
| 504 | + /* |
---|
| 505 | + * Don't report if lookup_object_or_alloc() by the current thread |
---|
| 506 | + * failed because lookup_object_or_alloc()/debug_objects_oom() by a |
---|
| 507 | + * concurrent thread turned off debug_objects_enabled and cleared |
---|
| 508 | + * the hash buckets. |
---|
| 509 | + */ |
---|
| 510 | + if (!debug_objects_enabled) |
---|
| 511 | + return; |
---|
320 | 512 | |
---|
321 | 513 | if (limit < 5 && descr != descr_test) { |
---|
322 | 514 | void *hint = descr->debug_hint ? |
---|
.. | .. |
---|
368 | 560 | WARN_ON(1); |
---|
369 | 561 | } |
---|
370 | 562 | |
---|
| 563 | +static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b, |
---|
| 564 | + const struct debug_obj_descr *descr, |
---|
| 565 | + bool onstack, bool alloc_ifstatic) |
---|
| 566 | +{ |
---|
| 567 | + struct debug_obj *obj = lookup_object(addr, b); |
---|
| 568 | + enum debug_obj_state state = ODEBUG_STATE_NONE; |
---|
| 569 | + |
---|
| 570 | + if (likely(obj)) |
---|
| 571 | + return obj; |
---|
| 572 | + |
---|
| 573 | + /* |
---|
| 574 | + * debug_object_init() unconditionally allocates untracked |
---|
| 575 | + * objects. It does not matter whether it is a static object or |
---|
| 576 | + * not. |
---|
| 577 | + * |
---|
| 578 | + * debug_object_assert_init() and debug_object_activate() allow |
---|
| 579 | + * allocation only if the descriptor callback confirms that the |
---|
| 580 | + * object is static and considered initialized. For non-static |
---|
| 581 | + * objects the allocation needs to be done from the fixup callback. |
---|
| 582 | + */ |
---|
| 583 | + if (unlikely(alloc_ifstatic)) { |
---|
| 584 | + if (!descr->is_static_object || !descr->is_static_object(addr)) |
---|
| 585 | + return ERR_PTR(-ENOENT); |
---|
| 586 | + /* Statically allocated objects are considered initialized */ |
---|
| 587 | + state = ODEBUG_STATE_INIT; |
---|
| 588 | + } |
---|
| 589 | + |
---|
| 590 | + obj = alloc_object(addr, b, descr); |
---|
| 591 | + if (likely(obj)) { |
---|
| 592 | + obj->state = state; |
---|
| 593 | + debug_object_is_on_stack(addr, onstack); |
---|
| 594 | + return obj; |
---|
| 595 | + } |
---|
| 596 | + |
---|
| 597 | + /* Out of memory. Do the cleanup outside of the locked region */ |
---|
| 598 | + debug_objects_enabled = 0; |
---|
| 599 | + return NULL; |
---|
| 600 | +} |
---|
| 601 | + |
---|
| 602 | +static void debug_objects_fill_pool(void) |
---|
| 603 | +{ |
---|
| 604 | + /* |
---|
| 605 | + * On RT enabled kernels the pool refill must happen in preemptible |
---|
| 606 | + * context: |
---|
| 607 | + */ |
---|
| 608 | + if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) |
---|
| 609 | + fill_pool(); |
---|
| 610 | +} |
---|
| 611 | + |
---|
371 | 612 | static void |
---|
372 | | -__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) |
---|
| 613 | +__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack) |
---|
373 | 614 | { |
---|
374 | 615 | enum debug_obj_state state; |
---|
375 | 616 | struct debug_bucket *db; |
---|
376 | 617 | struct debug_obj *obj; |
---|
377 | 618 | unsigned long flags; |
---|
378 | 619 | |
---|
379 | | - fill_pool(); |
---|
| 620 | + debug_objects_fill_pool(); |
---|
380 | 621 | |
---|
381 | 622 | db = get_bucket((unsigned long) addr); |
---|
382 | 623 | |
---|
383 | 624 | raw_spin_lock_irqsave(&db->lock, flags); |
---|
384 | 625 | |
---|
385 | | - obj = lookup_object(addr, db); |
---|
386 | | - if (!obj) { |
---|
387 | | - obj = alloc_object(addr, db, descr); |
---|
388 | | - if (!obj) { |
---|
389 | | - debug_objects_enabled = 0; |
---|
390 | | - raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
391 | | - debug_objects_oom(); |
---|
392 | | - return; |
---|
393 | | - } |
---|
394 | | - debug_object_is_on_stack(addr, onstack); |
---|
| 626 | + obj = lookup_object_or_alloc(addr, db, descr, onstack, false); |
---|
| 627 | + if (unlikely(!obj)) { |
---|
| 628 | + raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
| 629 | + debug_objects_oom(); |
---|
| 630 | + return; |
---|
395 | 631 | } |
---|
396 | 632 | |
---|
397 | 633 | switch (obj->state) { |
---|
.. | .. |
---|
402 | 638 | break; |
---|
403 | 639 | |
---|
404 | 640 | case ODEBUG_STATE_ACTIVE: |
---|
405 | | - debug_print_object(obj, "init"); |
---|
406 | 641 | state = obj->state; |
---|
407 | 642 | raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
| 643 | + debug_print_object(obj, "init"); |
---|
408 | 644 | debug_object_fixup(descr->fixup_init, addr, state); |
---|
409 | 645 | return; |
---|
410 | 646 | |
---|
411 | 647 | case ODEBUG_STATE_DESTROYED: |
---|
| 648 | + raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
412 | 649 | debug_print_object(obj, "init"); |
---|
413 | | - break; |
---|
| 650 | + return; |
---|
414 | 651 | default: |
---|
415 | 652 | break; |
---|
416 | 653 | } |
---|
.. | .. |
---|
423 | 660 | * @addr: address of the object |
---|
424 | 661 | * @descr: pointer to an object specific debug description structure |
---|
425 | 662 | */ |
---|
426 | | -void debug_object_init(void *addr, struct debug_obj_descr *descr) |
---|
| 663 | +void debug_object_init(void *addr, const struct debug_obj_descr *descr) |
---|
427 | 664 | { |
---|
428 | 665 | if (!debug_objects_enabled) |
---|
429 | 666 | return; |
---|
.. | .. |
---|
438 | 675 | * @addr: address of the object |
---|
439 | 676 | * @descr: pointer to an object specific debug description structure |
---|
440 | 677 | */ |
---|
441 | | -void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) |
---|
| 678 | +void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr) |
---|
442 | 679 | { |
---|
443 | 680 | if (!debug_objects_enabled) |
---|
444 | 681 | return; |
---|
.. | .. |
---|
453 | 690 | * @descr: pointer to an object specific debug description structure |
---|
454 | 691 | * Returns 0 for success, -EINVAL for check failed. |
---|
455 | 692 | */ |
---|
456 | | -int debug_object_activate(void *addr, struct debug_obj_descr *descr) |
---|
| 693 | +int debug_object_activate(void *addr, const struct debug_obj_descr *descr) |
---|
457 | 694 | { |
---|
| 695 | + struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; |
---|
458 | 696 | enum debug_obj_state state; |
---|
459 | 697 | struct debug_bucket *db; |
---|
460 | 698 | struct debug_obj *obj; |
---|
461 | 699 | unsigned long flags; |
---|
462 | 700 | int ret; |
---|
463 | | - struct debug_obj o = { .object = addr, |
---|
464 | | - .state = ODEBUG_STATE_NOTAVAILABLE, |
---|
465 | | - .descr = descr }; |
---|
466 | 701 | |
---|
467 | 702 | if (!debug_objects_enabled) |
---|
468 | 703 | return 0; |
---|
| 704 | + |
---|
| 705 | + debug_objects_fill_pool(); |
---|
469 | 706 | |
---|
470 | 707 | db = get_bucket((unsigned long) addr); |
---|
471 | 708 | |
---|
472 | 709 | raw_spin_lock_irqsave(&db->lock, flags); |
---|
473 | 710 | |
---|
474 | | - obj = lookup_object(addr, db); |
---|
475 | | - if (obj) { |
---|
| 711 | + obj = lookup_object_or_alloc(addr, db, descr, false, true); |
---|
| 712 | + if (likely(!IS_ERR_OR_NULL(obj))) { |
---|
| 713 | + bool print_object = false; |
---|
| 714 | + |
---|
476 | 715 | switch (obj->state) { |
---|
477 | 716 | case ODEBUG_STATE_INIT: |
---|
478 | 717 | case ODEBUG_STATE_INACTIVE: |
---|
.. | .. |
---|
481 | 720 | break; |
---|
482 | 721 | |
---|
483 | 722 | case ODEBUG_STATE_ACTIVE: |
---|
484 | | - debug_print_object(obj, "activate"); |
---|
485 | 723 | state = obj->state; |
---|
486 | 724 | raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
| 725 | + debug_print_object(obj, "activate"); |
---|
487 | 726 | ret = debug_object_fixup(descr->fixup_activate, addr, state); |
---|
488 | 727 | return ret ? 0 : -EINVAL; |
---|
489 | 728 | |
---|
490 | 729 | case ODEBUG_STATE_DESTROYED: |
---|
491 | | - debug_print_object(obj, "activate"); |
---|
| 730 | + print_object = true; |
---|
492 | 731 | ret = -EINVAL; |
---|
493 | 732 | break; |
---|
494 | 733 | default: |
---|
.. | .. |
---|
496 | 735 | break; |
---|
497 | 736 | } |
---|
498 | 737 | raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
| 738 | + if (print_object) |
---|
| 739 | + debug_print_object(obj, "activate"); |
---|
499 | 740 | return ret; |
---|
500 | 741 | } |
---|
501 | 742 | |
---|
502 | 743 | raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
503 | | - /* |
---|
504 | | - * We are here when a static object is activated. We |
---|
505 | | - * let the type specific code confirm whether this is |
---|
506 | | - * true or not. if true, we just make sure that the |
---|
507 | | - * static object is tracked in the object tracker. If |
---|
508 | | - * not, this must be a bug, so we try to fix it up. |
---|
509 | | - */ |
---|
510 | | - if (descr->is_static_object && descr->is_static_object(addr)) { |
---|
511 | | - /* track this static object */ |
---|
512 | | - debug_object_init(addr, descr); |
---|
513 | | - debug_object_activate(addr, descr); |
---|
514 | | - } else { |
---|
515 | | - debug_print_object(&o, "activate"); |
---|
516 | | - ret = debug_object_fixup(descr->fixup_activate, addr, |
---|
517 | | - ODEBUG_STATE_NOTAVAILABLE); |
---|
518 | | - return ret ? 0 : -EINVAL; |
---|
| 744 | + |
---|
| 745 | + /* If NULL the allocation has hit OOM */ |
---|
| 746 | + if (!obj) { |
---|
| 747 | + debug_objects_oom(); |
---|
| 748 | + return 0; |
---|
519 | 749 | } |
---|
520 | | - return 0; |
---|
| 750 | + |
---|
| 751 | + /* Object is neither static nor tracked. It's not initialized */ |
---|
| 752 | + debug_print_object(&o, "activate"); |
---|
| 753 | + ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE); |
---|
| 754 | + return ret ? 0 : -EINVAL; |
---|
521 | 755 | } |
---|
522 | 756 | EXPORT_SYMBOL_GPL(debug_object_activate); |
---|
523 | 757 | |
---|
.. | .. |
---|
526 | 760 | * @addr: address of the object |
---|
527 | 761 | * @descr: pointer to an object specific debug description structure |
---|
528 | 762 | */ |
---|
529 | | -void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) |
---|
| 763 | +void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr) |
---|
530 | 764 | { |
---|
531 | 765 | struct debug_bucket *db; |
---|
532 | 766 | struct debug_obj *obj; |
---|
533 | 767 | unsigned long flags; |
---|
| 768 | + bool print_object = false; |
---|
534 | 769 | |
---|
535 | 770 | if (!debug_objects_enabled) |
---|
536 | 771 | return; |
---|
.. | .. |
---|
548 | 783 | if (!obj->astate) |
---|
549 | 784 | obj->state = ODEBUG_STATE_INACTIVE; |
---|
550 | 785 | else |
---|
551 | | - debug_print_object(obj, "deactivate"); |
---|
| 786 | + print_object = true; |
---|
552 | 787 | break; |
---|
553 | 788 | |
---|
554 | 789 | case ODEBUG_STATE_DESTROYED: |
---|
555 | | - debug_print_object(obj, "deactivate"); |
---|
| 790 | + print_object = true; |
---|
556 | 791 | break; |
---|
557 | 792 | default: |
---|
558 | 793 | break; |
---|
559 | 794 | } |
---|
560 | | - } else { |
---|
| 795 | + } |
---|
| 796 | + |
---|
| 797 | + raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
| 798 | + if (!obj) { |
---|
561 | 799 | struct debug_obj o = { .object = addr, |
---|
562 | 800 | .state = ODEBUG_STATE_NOTAVAILABLE, |
---|
563 | 801 | .descr = descr }; |
---|
564 | 802 | |
---|
565 | 803 | debug_print_object(&o, "deactivate"); |
---|
| 804 | + } else if (print_object) { |
---|
| 805 | + debug_print_object(obj, "deactivate"); |
---|
566 | 806 | } |
---|
567 | | - |
---|
568 | | - raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
569 | 807 | } |
---|
570 | 808 | EXPORT_SYMBOL_GPL(debug_object_deactivate); |
---|
571 | 809 | |
---|
.. | .. |
---|
574 | 812 | * @addr: address of the object |
---|
575 | 813 | * @descr: pointer to an object specific debug description structure |
---|
576 | 814 | */ |
---|
577 | | -void debug_object_destroy(void *addr, struct debug_obj_descr *descr) |
---|
| 815 | +void debug_object_destroy(void *addr, const struct debug_obj_descr *descr) |
---|
578 | 816 | { |
---|
579 | 817 | enum debug_obj_state state; |
---|
580 | 818 | struct debug_bucket *db; |
---|
581 | 819 | struct debug_obj *obj; |
---|
582 | 820 | unsigned long flags; |
---|
| 821 | + bool print_object = false; |
---|
583 | 822 | |
---|
584 | 823 | if (!debug_objects_enabled) |
---|
585 | 824 | return; |
---|
.. | .. |
---|
599 | 838 | obj->state = ODEBUG_STATE_DESTROYED; |
---|
600 | 839 | break; |
---|
601 | 840 | case ODEBUG_STATE_ACTIVE: |
---|
602 | | - debug_print_object(obj, "destroy"); |
---|
603 | 841 | state = obj->state; |
---|
604 | 842 | raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
| 843 | + debug_print_object(obj, "destroy"); |
---|
605 | 844 | debug_object_fixup(descr->fixup_destroy, addr, state); |
---|
606 | 845 | return; |
---|
607 | 846 | |
---|
608 | 847 | case ODEBUG_STATE_DESTROYED: |
---|
609 | | - debug_print_object(obj, "destroy"); |
---|
| 848 | + print_object = true; |
---|
610 | 849 | break; |
---|
611 | 850 | default: |
---|
612 | 851 | break; |
---|
613 | 852 | } |
---|
614 | 853 | out_unlock: |
---|
615 | 854 | raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
| 855 | + if (print_object) |
---|
| 856 | + debug_print_object(obj, "destroy"); |
---|
616 | 857 | } |
---|
617 | 858 | EXPORT_SYMBOL_GPL(debug_object_destroy); |
---|
618 | 859 | |
---|
.. | .. |
---|
621 | 862 | * @addr: address of the object |
---|
622 | 863 | * @descr: pointer to an object specific debug description structure |
---|
623 | 864 | */ |
---|
624 | | -void debug_object_free(void *addr, struct debug_obj_descr *descr) |
---|
| 865 | +void debug_object_free(void *addr, const struct debug_obj_descr *descr) |
---|
625 | 866 | { |
---|
626 | 867 | enum debug_obj_state state; |
---|
627 | 868 | struct debug_bucket *db; |
---|
.. | .. |
---|
641 | 882 | |
---|
642 | 883 | switch (obj->state) { |
---|
643 | 884 | case ODEBUG_STATE_ACTIVE: |
---|
644 | | - debug_print_object(obj, "free"); |
---|
645 | 885 | state = obj->state; |
---|
646 | 886 | raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
| 887 | + debug_print_object(obj, "free"); |
---|
647 | 888 | debug_object_fixup(descr->fixup_free, addr, state); |
---|
648 | 889 | return; |
---|
649 | 890 | default: |
---|
.. | .. |
---|
662 | 903 | * @addr: address of the object |
---|
663 | 904 | * @descr: pointer to an object specific debug description structure |
---|
664 | 905 | */ |
---|
665 | | -void debug_object_assert_init(void *addr, struct debug_obj_descr *descr) |
---|
| 906 | +void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr) |
---|
666 | 907 | { |
---|
| 908 | + struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; |
---|
667 | 909 | struct debug_bucket *db; |
---|
668 | 910 | struct debug_obj *obj; |
---|
669 | 911 | unsigned long flags; |
---|
.. | .. |
---|
671 | 913 | if (!debug_objects_enabled) |
---|
672 | 914 | return; |
---|
673 | 915 | |
---|
| 916 | + debug_objects_fill_pool(); |
---|
| 917 | + |
---|
674 | 918 | db = get_bucket((unsigned long) addr); |
---|
675 | 919 | |
---|
676 | 920 | raw_spin_lock_irqsave(&db->lock, flags); |
---|
| 921 | + obj = lookup_object_or_alloc(addr, db, descr, false, true); |
---|
| 922 | + raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
| 923 | + if (likely(!IS_ERR_OR_NULL(obj))) |
---|
| 924 | + return; |
---|
677 | 925 | |
---|
678 | | - obj = lookup_object(addr, db); |
---|
| 926 | + /* If NULL the allocation has hit OOM */ |
---|
679 | 927 | if (!obj) { |
---|
680 | | - struct debug_obj o = { .object = addr, |
---|
681 | | - .state = ODEBUG_STATE_NOTAVAILABLE, |
---|
682 | | - .descr = descr }; |
---|
683 | | - |
---|
684 | | - raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
685 | | - /* |
---|
686 | | - * Maybe the object is static, and we let the type specific |
---|
687 | | - * code confirm. Track this static object if true, else invoke |
---|
688 | | - * fixup. |
---|
689 | | - */ |
---|
690 | | - if (descr->is_static_object && descr->is_static_object(addr)) { |
---|
691 | | - /* Track this static object */ |
---|
692 | | - debug_object_init(addr, descr); |
---|
693 | | - } else { |
---|
694 | | - debug_print_object(&o, "assert_init"); |
---|
695 | | - debug_object_fixup(descr->fixup_assert_init, addr, |
---|
696 | | - ODEBUG_STATE_NOTAVAILABLE); |
---|
697 | | - } |
---|
| 928 | + debug_objects_oom(); |
---|
698 | 929 | return; |
---|
699 | 930 | } |
---|
700 | 931 | |
---|
701 | | - raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
| 932 | + /* Object is neither tracked nor static. It's not initialized. */ |
---|
| 933 | + debug_print_object(&o, "assert_init"); |
---|
| 934 | + debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE); |
---|
702 | 935 | } |
---|
703 | 936 | EXPORT_SYMBOL_GPL(debug_object_assert_init); |
---|
704 | 937 | |
---|
.. | .. |
---|
710 | 943 | * @next: state to move to if expected state is found |
---|
711 | 944 | */ |
---|
712 | 945 | void |
---|
713 | | -debug_object_active_state(void *addr, struct debug_obj_descr *descr, |
---|
| 946 | +debug_object_active_state(void *addr, const struct debug_obj_descr *descr, |
---|
714 | 947 | unsigned int expect, unsigned int next) |
---|
715 | 948 | { |
---|
716 | 949 | struct debug_bucket *db; |
---|
717 | 950 | struct debug_obj *obj; |
---|
718 | 951 | unsigned long flags; |
---|
| 952 | + bool print_object = false; |
---|
719 | 953 | |
---|
720 | 954 | if (!debug_objects_enabled) |
---|
721 | 955 | return; |
---|
.. | .. |
---|
731 | 965 | if (obj->astate == expect) |
---|
732 | 966 | obj->astate = next; |
---|
733 | 967 | else |
---|
734 | | - debug_print_object(obj, "active_state"); |
---|
| 968 | + print_object = true; |
---|
735 | 969 | break; |
---|
736 | 970 | |
---|
737 | 971 | default: |
---|
738 | | - debug_print_object(obj, "active_state"); |
---|
| 972 | + print_object = true; |
---|
739 | 973 | break; |
---|
740 | 974 | } |
---|
741 | | - } else { |
---|
| 975 | + } |
---|
| 976 | + |
---|
| 977 | + raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
| 978 | + if (!obj) { |
---|
742 | 979 | struct debug_obj o = { .object = addr, |
---|
743 | 980 | .state = ODEBUG_STATE_NOTAVAILABLE, |
---|
744 | 981 | .descr = descr }; |
---|
745 | 982 | |
---|
746 | 983 | debug_print_object(&o, "active_state"); |
---|
| 984 | + } else if (print_object) { |
---|
| 985 | + debug_print_object(obj, "active_state"); |
---|
747 | 986 | } |
---|
748 | | - |
---|
749 | | - raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
750 | 987 | } |
---|
751 | 988 | EXPORT_SYMBOL_GPL(debug_object_active_state); |
---|
752 | 989 | |
---|
.. | .. |
---|
754 | 991 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) |
---|
755 | 992 | { |
---|
756 | 993 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; |
---|
757 | | - struct debug_obj_descr *descr; |
---|
| 994 | + const struct debug_obj_descr *descr; |
---|
758 | 995 | enum debug_obj_state state; |
---|
759 | 996 | struct debug_bucket *db; |
---|
760 | 997 | struct hlist_node *tmp; |
---|
761 | 998 | struct debug_obj *obj; |
---|
762 | 999 | int cnt, objs_checked = 0; |
---|
763 | | - bool work = false; |
---|
764 | 1000 | |
---|
765 | 1001 | saddr = (unsigned long) address; |
---|
766 | 1002 | eaddr = saddr + size; |
---|
.. | .. |
---|
782 | 1018 | |
---|
783 | 1019 | switch (obj->state) { |
---|
784 | 1020 | case ODEBUG_STATE_ACTIVE: |
---|
785 | | - debug_print_object(obj, "free"); |
---|
786 | 1021 | descr = obj->descr; |
---|
787 | 1022 | state = obj->state; |
---|
788 | 1023 | raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
| 1024 | + debug_print_object(obj, "free"); |
---|
789 | 1025 | debug_object_fixup(descr->fixup_free, |
---|
790 | 1026 | (void *) oaddr, state); |
---|
791 | 1027 | goto repeat; |
---|
792 | 1028 | default: |
---|
793 | 1029 | hlist_del(&obj->node); |
---|
794 | | - work |= __free_object(obj); |
---|
| 1030 | + __free_object(obj); |
---|
795 | 1031 | break; |
---|
796 | 1032 | } |
---|
797 | 1033 | } |
---|
.. | .. |
---|
807 | 1043 | debug_objects_maxchecked = objs_checked; |
---|
808 | 1044 | |
---|
809 | 1045 | /* Schedule work to actually kmem_cache_free() objects */ |
---|
810 | | - if (work) |
---|
811 | | - schedule_work(&debug_obj_work); |
---|
| 1046 | + if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) { |
---|
| 1047 | + WRITE_ONCE(obj_freeing, true); |
---|
| 1048 | + schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); |
---|
| 1049 | + } |
---|
812 | 1050 | } |
---|
813 | 1051 | |
---|
814 | 1052 | void debug_check_no_obj_freed(const void *address, unsigned long size) |
---|
.. | .. |
---|
822 | 1060 | |
---|
823 | 1061 | static int debug_stats_show(struct seq_file *m, void *v) |
---|
824 | 1062 | { |
---|
| 1063 | + int cpu, obj_percpu_free = 0; |
---|
| 1064 | + |
---|
| 1065 | + for_each_possible_cpu(cpu) |
---|
| 1066 | + obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu); |
---|
| 1067 | + |
---|
825 | 1068 | seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); |
---|
826 | 1069 | seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked); |
---|
827 | 1070 | seq_printf(m, "warnings :%d\n", debug_objects_warnings); |
---|
828 | 1071 | seq_printf(m, "fixups :%d\n", debug_objects_fixups); |
---|
829 | | - seq_printf(m, "pool_free :%d\n", obj_pool_free); |
---|
| 1072 | + seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free); |
---|
| 1073 | + seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free); |
---|
830 | 1074 | seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); |
---|
831 | | - seq_printf(m, "pool_used :%d\n", obj_pool_used); |
---|
| 1075 | + seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free); |
---|
832 | 1076 | seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); |
---|
833 | | - seq_printf(m, "on_free_list :%d\n", obj_nr_tofree); |
---|
| 1077 | + seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree)); |
---|
834 | 1078 | seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); |
---|
835 | 1079 | seq_printf(m, "objs_freed :%d\n", debug_objects_freed); |
---|
836 | 1080 | return 0; |
---|
837 | 1081 | } |
---|
838 | | - |
---|
839 | | -static int debug_stats_open(struct inode *inode, struct file *filp) |
---|
840 | | -{ |
---|
841 | | - return single_open(filp, debug_stats_show, NULL); |
---|
842 | | -} |
---|
843 | | - |
---|
844 | | -static const struct file_operations debug_stats_fops = { |
---|
845 | | - .open = debug_stats_open, |
---|
846 | | - .read = seq_read, |
---|
847 | | - .llseek = seq_lseek, |
---|
848 | | - .release = single_release, |
---|
849 | | -}; |
---|
| 1082 | +DEFINE_SHOW_ATTRIBUTE(debug_stats); |
---|
850 | 1083 | |
---|
851 | 1084 | static int __init debug_objects_init_debugfs(void) |
---|
852 | 1085 | { |
---|
853 | | - struct dentry *dbgdir, *dbgstats; |
---|
| 1086 | + struct dentry *dbgdir; |
---|
854 | 1087 | |
---|
855 | 1088 | if (!debug_objects_enabled) |
---|
856 | 1089 | return 0; |
---|
857 | 1090 | |
---|
858 | 1091 | dbgdir = debugfs_create_dir("debug_objects", NULL); |
---|
859 | | - if (!dbgdir) |
---|
860 | | - return -ENOMEM; |
---|
861 | 1092 | |
---|
862 | | - dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, |
---|
863 | | - &debug_stats_fops); |
---|
864 | | - if (!dbgstats) |
---|
865 | | - goto err; |
---|
| 1093 | + debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops); |
---|
866 | 1094 | |
---|
867 | 1095 | return 0; |
---|
868 | | - |
---|
869 | | -err: |
---|
870 | | - debugfs_remove(dbgdir); |
---|
871 | | - |
---|
872 | | - return -ENOMEM; |
---|
873 | 1096 | } |
---|
874 | 1097 | __initcall(debug_objects_init_debugfs); |
---|
875 | 1098 | |
---|
.. | .. |
---|
886 | 1109 | unsigned long dummy2[3]; |
---|
887 | 1110 | }; |
---|
888 | 1111 | |
---|
889 | | -static __initdata struct debug_obj_descr descr_type_test; |
---|
| 1112 | +static __initconst const struct debug_obj_descr descr_type_test; |
---|
890 | 1113 | |
---|
891 | 1114 | static bool __init is_static_object(void *addr) |
---|
892 | 1115 | { |
---|
.. | .. |
---|
1011 | 1234 | return res; |
---|
1012 | 1235 | } |
---|
1013 | 1236 | |
---|
1014 | | -static __initdata struct debug_obj_descr descr_type_test = { |
---|
| 1237 | +static __initconst const struct debug_obj_descr descr_type_test = { |
---|
1015 | 1238 | .name = "selftest", |
---|
1016 | 1239 | .is_static_object = is_static_object, |
---|
1017 | 1240 | .fixup_init = fixup_init, |
---|
.. | .. |
---|
1130 | 1353 | hlist_add_head(&obj->node, &objects); |
---|
1131 | 1354 | } |
---|
1132 | 1355 | |
---|
| 1356 | + debug_objects_allocated += i; |
---|
| 1357 | + |
---|
1133 | 1358 | /* |
---|
1134 | | - * When debug_objects_mem_init() is called we know that only |
---|
1135 | | - * one CPU is up, so disabling interrupts is enough |
---|
1136 | | - * protection. This avoids the lockdep hell of lock ordering. |
---|
| 1359 | + * debug_objects_mem_init() is now called early that only one CPU is up |
---|
| 1360 | + * and interrupts have been disabled, so it is safe to replace the |
---|
| 1361 | + * active object references. |
---|
1137 | 1362 | */ |
---|
1138 | | - local_irq_disable(); |
---|
1139 | 1363 | |
---|
1140 | 1364 | /* Remove the statically allocated objects from the pool */ |
---|
1141 | 1365 | hlist_for_each_entry_safe(obj, tmp, &obj_pool, node) |
---|
.. | .. |
---|
1156 | 1380 | cnt++; |
---|
1157 | 1381 | } |
---|
1158 | 1382 | } |
---|
1159 | | - local_irq_enable(); |
---|
1160 | 1383 | |
---|
1161 | 1384 | pr_debug("%d of %d active objects replaced\n", |
---|
1162 | 1385 | cnt, obj_pool_used); |
---|
.. | .. |
---|
1177 | 1400 | */ |
---|
1178 | 1401 | void __init debug_objects_mem_init(void) |
---|
1179 | 1402 | { |
---|
| 1403 | + int cpu, extras; |
---|
| 1404 | + |
---|
1180 | 1405 | if (!debug_objects_enabled) |
---|
1181 | 1406 | return; |
---|
| 1407 | + |
---|
| 1408 | + /* |
---|
| 1409 | + * Initialize the percpu object pools |
---|
| 1410 | + * |
---|
| 1411 | + * Initialization is not strictly necessary, but was done for |
---|
| 1412 | + * completeness. |
---|
| 1413 | + */ |
---|
| 1414 | + for_each_possible_cpu(cpu) |
---|
| 1415 | + INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu)); |
---|
1182 | 1416 | |
---|
1183 | 1417 | obj_cache = kmem_cache_create("debug_objects_cache", |
---|
1184 | 1418 | sizeof (struct debug_obj), 0, |
---|
.. | .. |
---|
1189 | 1423 | debug_objects_enabled = 0; |
---|
1190 | 1424 | kmem_cache_destroy(obj_cache); |
---|
1191 | 1425 | pr_warn("out of memory.\n"); |
---|
| 1426 | + return; |
---|
1192 | 1427 | } else |
---|
1193 | 1428 | debug_objects_selftest(); |
---|
| 1429 | + |
---|
| 1430 | +#ifdef CONFIG_HOTPLUG_CPU |
---|
| 1431 | + cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL, |
---|
| 1432 | + object_cpu_offline); |
---|
| 1433 | +#endif |
---|
1194 | 1434 | |
---|
1195 | 1435 | /* |
---|
1196 | 1436 | * Increase the thresholds for allocating and freeing objects |
---|
1197 | 1437 | * according to the number of possible CPUs available in the system. |
---|
1198 | 1438 | */ |
---|
1199 | | - debug_objects_pool_size += num_possible_cpus() * 32; |
---|
1200 | | - debug_objects_pool_min_level += num_possible_cpus() * 4; |
---|
| 1439 | + extras = num_possible_cpus() * ODEBUG_BATCH_SIZE; |
---|
| 1440 | + debug_objects_pool_size += extras; |
---|
| 1441 | + debug_objects_pool_min_level += extras; |
---|
1201 | 1442 | } |
---|