.. | .. |
---|
19 | 19 | #include <linux/slab.h> |
---|
20 | 20 | #include <linux/hash.h> |
---|
21 | 21 | #include <linux/kmemleak.h> |
---|
| 22 | +#include <linux/cpu.h> |
---|
22 | 23 | |
---|
23 | 24 | #define ODEBUG_HASH_BITS 14 |
---|
24 | 25 | #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) |
---|
25 | 26 | |
---|
26 | 27 | #define ODEBUG_POOL_SIZE 1024 |
---|
27 | 28 | #define ODEBUG_POOL_MIN_LEVEL 256 |
---|
| 29 | +#define ODEBUG_POOL_PERCPU_SIZE 64 |
---|
| 30 | +#define ODEBUG_BATCH_SIZE 16 |
---|
28 | 31 | |
---|
29 | 32 | #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT |
---|
30 | 33 | #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) |
---|
31 | 34 | #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) |
---|
32 | 35 | |
---|
| 36 | +/* |
---|
| 37 | + * We limit the freeing of debug objects via workqueue at a maximum |
---|
| 38 | + * frequency of 10Hz and about 1024 objects for each freeing operation. |
---|
| 39 | + * So it is freeing at most 10k debug objects per second. |
---|
| 40 | + */ |
---|
| 41 | +#define ODEBUG_FREE_WORK_MAX 1024 |
---|
| 42 | +#define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10) |
---|
| 43 | + |
---|
33 | 44 | struct debug_bucket { |
---|
34 | 45 | struct hlist_head list; |
---|
35 | 46 | raw_spinlock_t lock; |
---|
36 | 47 | }; |
---|
| 48 | + |
---|
| 49 | +/* |
---|
| 50 | + * Debug object percpu free list |
---|
| 51 | + * Access is protected by disabling irq |
---|
| 52 | + */ |
---|
| 53 | +struct debug_percpu_free { |
---|
| 54 | + struct hlist_head free_objs; |
---|
| 55 | + int obj_free; |
---|
| 56 | +}; |
---|
| 57 | + |
---|
| 58 | +static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool); |
---|
37 | 59 | |
---|
38 | 60 | static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; |
---|
39 | 61 | |
---|
.. | .. |
---|
44 | 66 | static HLIST_HEAD(obj_pool); |
---|
45 | 67 | static HLIST_HEAD(obj_to_free); |
---|
46 | 68 | |
---|
| 69 | +/* |
---|
| 70 | + * Because of the presence of percpu free pools, obj_pool_free will |
---|
| 71 | + * under-count those in the percpu free pools. Similarly, obj_pool_used |
---|
| 72 | + * will over-count those in the percpu free pools. Adjustments will be |
---|
| 73 | + * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used |
---|
| 74 | + * can be off. |
---|
| 75 | + */ |
---|
47 | 76 | static int obj_pool_min_free = ODEBUG_POOL_SIZE; |
---|
48 | 77 | static int obj_pool_free = ODEBUG_POOL_SIZE; |
---|
49 | 78 | static int obj_pool_used; |
---|
50 | 79 | static int obj_pool_max_used; |
---|
| 80 | +static bool obj_freeing; |
---|
51 | 81 | /* The number of objs on the global free list */ |
---|
52 | 82 | static int obj_nr_tofree; |
---|
53 | | -static struct kmem_cache *obj_cache; |
---|
54 | 83 | |
---|
55 | 84 | static int debug_objects_maxchain __read_mostly; |
---|
56 | 85 | static int __maybe_unused debug_objects_maxchecked __read_mostly; |
---|
.. | .. |
---|
62 | 91 | = ODEBUG_POOL_SIZE; |
---|
63 | 92 | static int debug_objects_pool_min_level __read_mostly |
---|
64 | 93 | = ODEBUG_POOL_MIN_LEVEL; |
---|
65 | | -static struct debug_obj_descr *descr_test __read_mostly; |
---|
| 94 | +static const struct debug_obj_descr *descr_test __read_mostly; |
---|
| 95 | +static struct kmem_cache *obj_cache __read_mostly; |
---|
66 | 96 | |
---|
67 | 97 | /* |
---|
68 | 98 | * Track numbers of kmem_cache_alloc()/free() calls done. |
---|
.. | .. |
---|
71 | 101 | static int debug_objects_freed; |
---|
72 | 102 | |
---|
73 | 103 | static void free_obj_work(struct work_struct *work); |
---|
74 | | -static DECLARE_WORK(debug_obj_work, free_obj_work); |
---|
| 104 | +static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work); |
---|
75 | 105 | |
---|
76 | 106 | static int __init enable_object_debug(char *str) |
---|
77 | 107 | { |
---|
.. | .. |
---|
100 | 130 | static void fill_pool(void) |
---|
101 | 131 | { |
---|
102 | 132 | gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; |
---|
103 | | - struct debug_obj *new, *obj; |
---|
| 133 | + struct debug_obj *obj; |
---|
104 | 134 | unsigned long flags; |
---|
105 | 135 | |
---|
106 | | - if (likely(obj_pool_free >= debug_objects_pool_min_level)) |
---|
| 136 | + if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level)) |
---|
107 | 137 | return; |
---|
108 | 138 | |
---|
109 | 139 | /* |
---|
110 | 140 | * Reuse objs from the global free list; they will be reinitialized |
---|
111 | 141 | * when allocating. |
---|
| 142 | + * |
---|
| 143 | + * Both obj_nr_tofree and obj_pool_free are checked locklessly; the |
---|
| 144 | + * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical |
---|
| 145 | + * sections. |
---|
112 | 146 | */ |
---|
113 | | - while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) { |
---|
| 147 | + while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) { |
---|
114 | 148 | raw_spin_lock_irqsave(&pool_lock, flags); |
---|
115 | 149 | /* |
---|
116 | 150 | * Recheck with the lock held as the worker thread might have |
---|
117 | 151 | * won the race and freed the global free list already. |
---|
118 | 152 | */ |
---|
119 | | - if (obj_nr_tofree) { |
---|
| 153 | + while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) { |
---|
120 | 154 | obj = hlist_entry(obj_to_free.first, typeof(*obj), node); |
---|
121 | 155 | hlist_del(&obj->node); |
---|
122 | | - obj_nr_tofree--; |
---|
| 156 | + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1); |
---|
123 | 157 | hlist_add_head(&obj->node, &obj_pool); |
---|
124 | | - obj_pool_free++; |
---|
| 158 | + WRITE_ONCE(obj_pool_free, obj_pool_free + 1); |
---|
125 | 159 | } |
---|
126 | 160 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
---|
127 | 161 | } |
---|
.. | .. |
---|
129 | 163 | if (unlikely(!obj_cache)) |
---|
130 | 164 | return; |
---|
131 | 165 | |
---|
132 | | - while (obj_pool_free < debug_objects_pool_min_level) { |
---|
| 166 | + while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) { |
---|
| 167 | + struct debug_obj *new[ODEBUG_BATCH_SIZE]; |
---|
| 168 | + int cnt; |
---|
133 | 169 | |
---|
134 | | - new = kmem_cache_zalloc(obj_cache, gfp); |
---|
135 | | - if (!new) |
---|
| 170 | + for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) { |
---|
| 171 | + new[cnt] = kmem_cache_zalloc(obj_cache, gfp); |
---|
| 172 | + if (!new[cnt]) |
---|
| 173 | + break; |
---|
| 174 | + } |
---|
| 175 | + if (!cnt) |
---|
136 | 176 | return; |
---|
137 | 177 | |
---|
138 | 178 | raw_spin_lock_irqsave(&pool_lock, flags); |
---|
139 | | - hlist_add_head(&new->node, &obj_pool); |
---|
140 | | - debug_objects_allocated++; |
---|
141 | | - obj_pool_free++; |
---|
| 179 | + while (cnt) { |
---|
| 180 | + hlist_add_head(&new[--cnt]->node, &obj_pool); |
---|
| 181 | + debug_objects_allocated++; |
---|
| 182 | + WRITE_ONCE(obj_pool_free, obj_pool_free + 1); |
---|
| 183 | + } |
---|
142 | 184 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
---|
143 | 185 | } |
---|
144 | 186 | } |
---|
.. | .. |
---|
163 | 205 | } |
---|
164 | 206 | |
---|
165 | 207 | /* |
---|
| 208 | + * Allocate a new object from the hlist |
---|
| 209 | + */ |
---|
| 210 | +static struct debug_obj *__alloc_object(struct hlist_head *list) |
---|
| 211 | +{ |
---|
| 212 | + struct debug_obj *obj = NULL; |
---|
| 213 | + |
---|
| 214 | + if (list->first) { |
---|
| 215 | + obj = hlist_entry(list->first, typeof(*obj), node); |
---|
| 216 | + hlist_del(&obj->node); |
---|
| 217 | + } |
---|
| 218 | + |
---|
| 219 | + return obj; |
---|
| 220 | +} |
---|
| 221 | + |
---|
| 222 | +/* |
---|
166 | 223 | * Allocate a new object. If the pool is empty, switch off the debugger. |
---|
167 | 224 | * Must be called with interrupts disabled. |
---|
168 | 225 | */ |
---|
169 | 226 | static struct debug_obj * |
---|
170 | | -alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) |
---|
| 227 | +alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr) |
---|
171 | 228 | { |
---|
172 | | - struct debug_obj *obj = NULL; |
---|
| 229 | + struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool); |
---|
| 230 | + struct debug_obj *obj; |
---|
| 231 | + |
---|
| 232 | + if (likely(obj_cache)) { |
---|
| 233 | + obj = __alloc_object(&percpu_pool->free_objs); |
---|
| 234 | + if (obj) { |
---|
| 235 | + percpu_pool->obj_free--; |
---|
| 236 | + goto init_obj; |
---|
| 237 | + } |
---|
| 238 | + } |
---|
173 | 239 | |
---|
174 | 240 | raw_spin_lock(&pool_lock); |
---|
175 | | - if (obj_pool.first) { |
---|
176 | | - obj = hlist_entry(obj_pool.first, typeof(*obj), node); |
---|
177 | | - |
---|
178 | | - obj->object = addr; |
---|
179 | | - obj->descr = descr; |
---|
180 | | - obj->state = ODEBUG_STATE_NONE; |
---|
181 | | - obj->astate = 0; |
---|
182 | | - hlist_del(&obj->node); |
---|
183 | | - |
---|
184 | | - hlist_add_head(&obj->node, &b->list); |
---|
185 | | - |
---|
| 241 | + obj = __alloc_object(&obj_pool); |
---|
| 242 | + if (obj) { |
---|
186 | 243 | obj_pool_used++; |
---|
| 244 | + WRITE_ONCE(obj_pool_free, obj_pool_free - 1); |
---|
| 245 | + |
---|
| 246 | + /* |
---|
| 247 | + * Looking ahead, allocate one batch of debug objects and |
---|
| 248 | + * put them into the percpu free pool. |
---|
| 249 | + */ |
---|
| 250 | + if (likely(obj_cache)) { |
---|
| 251 | + int i; |
---|
| 252 | + |
---|
| 253 | + for (i = 0; i < ODEBUG_BATCH_SIZE; i++) { |
---|
| 254 | + struct debug_obj *obj2; |
---|
| 255 | + |
---|
| 256 | + obj2 = __alloc_object(&obj_pool); |
---|
| 257 | + if (!obj2) |
---|
| 258 | + break; |
---|
| 259 | + hlist_add_head(&obj2->node, |
---|
| 260 | + &percpu_pool->free_objs); |
---|
| 261 | + percpu_pool->obj_free++; |
---|
| 262 | + obj_pool_used++; |
---|
| 263 | + WRITE_ONCE(obj_pool_free, obj_pool_free - 1); |
---|
| 264 | + } |
---|
| 265 | + } |
---|
| 266 | + |
---|
187 | 267 | if (obj_pool_used > obj_pool_max_used) |
---|
188 | 268 | obj_pool_max_used = obj_pool_used; |
---|
189 | 269 | |
---|
190 | | - obj_pool_free--; |
---|
191 | 270 | if (obj_pool_free < obj_pool_min_free) |
---|
192 | 271 | obj_pool_min_free = obj_pool_free; |
---|
193 | 272 | } |
---|
194 | 273 | raw_spin_unlock(&pool_lock); |
---|
195 | 274 | |
---|
| 275 | +init_obj: |
---|
| 276 | + if (obj) { |
---|
| 277 | + obj->object = addr; |
---|
| 278 | + obj->descr = descr; |
---|
| 279 | + obj->state = ODEBUG_STATE_NONE; |
---|
| 280 | + obj->astate = 0; |
---|
| 281 | + hlist_add_head(&obj->node, &b->list); |
---|
| 282 | + } |
---|
196 | 283 | return obj; |
---|
197 | 284 | } |
---|
198 | 285 | |
---|
.. | .. |
---|
209 | 296 | unsigned long flags; |
---|
210 | 297 | HLIST_HEAD(tofree); |
---|
211 | 298 | |
---|
| 299 | + WRITE_ONCE(obj_freeing, false); |
---|
212 | 300 | if (!raw_spin_trylock_irqsave(&pool_lock, flags)) |
---|
213 | 301 | return; |
---|
| 302 | + |
---|
| 303 | + if (obj_pool_free >= debug_objects_pool_size) |
---|
| 304 | + goto free_objs; |
---|
214 | 305 | |
---|
215 | 306 | /* |
---|
216 | 307 | * The objs on the pool list might be allocated before the work is |
---|
217 | 308 | * run, so recheck if pool list it full or not, if not fill pool |
---|
218 | | - * list from the global free list |
---|
| 309 | + * list from the global free list. As it is likely that a workload |
---|
| 310 | + * may be gearing up to use more and more objects, don't free any |
---|
| 311 | + * of them until the next round. |
---|
219 | 312 | */ |
---|
220 | 313 | while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) { |
---|
221 | 314 | obj = hlist_entry(obj_to_free.first, typeof(*obj), node); |
---|
222 | 315 | hlist_del(&obj->node); |
---|
223 | 316 | hlist_add_head(&obj->node, &obj_pool); |
---|
224 | | - obj_pool_free++; |
---|
225 | | - obj_nr_tofree--; |
---|
| 317 | + WRITE_ONCE(obj_pool_free, obj_pool_free + 1); |
---|
| 318 | + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1); |
---|
226 | 319 | } |
---|
| 320 | + raw_spin_unlock_irqrestore(&pool_lock, flags); |
---|
| 321 | + return; |
---|
227 | 322 | |
---|
| 323 | +free_objs: |
---|
228 | 324 | /* |
---|
229 | 325 | * Pool list is already full and there are still objs on the free |
---|
230 | 326 | * list. Move remaining free objs to a temporary list to free the |
---|
.. | .. |
---|
233 | 329 | if (obj_nr_tofree) { |
---|
234 | 330 | hlist_move_list(&obj_to_free, &tofree); |
---|
235 | 331 | debug_objects_freed += obj_nr_tofree; |
---|
236 | | - obj_nr_tofree = 0; |
---|
| 332 | + WRITE_ONCE(obj_nr_tofree, 0); |
---|
237 | 333 | } |
---|
238 | 334 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
---|
239 | 335 | |
---|
.. | .. |
---|
243 | 339 | } |
---|
244 | 340 | } |
---|
245 | 341 | |
---|
246 | | -static bool __free_object(struct debug_obj *obj) |
---|
| 342 | +static void __free_object(struct debug_obj *obj) |
---|
247 | 343 | { |
---|
| 344 | + struct debug_obj *objs[ODEBUG_BATCH_SIZE]; |
---|
| 345 | + struct debug_percpu_free *percpu_pool; |
---|
| 346 | + int lookahead_count = 0; |
---|
248 | 347 | unsigned long flags; |
---|
249 | 348 | bool work; |
---|
250 | 349 | |
---|
251 | | - raw_spin_lock_irqsave(&pool_lock, flags); |
---|
252 | | - work = (obj_pool_free > debug_objects_pool_size) && obj_cache; |
---|
| 350 | + local_irq_save(flags); |
---|
| 351 | + if (!obj_cache) |
---|
| 352 | + goto free_to_obj_pool; |
---|
| 353 | + |
---|
| 354 | + /* |
---|
| 355 | + * Try to free it into the percpu pool first. |
---|
| 356 | + */ |
---|
| 357 | + percpu_pool = this_cpu_ptr(&percpu_obj_pool); |
---|
| 358 | + if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) { |
---|
| 359 | + hlist_add_head(&obj->node, &percpu_pool->free_objs); |
---|
| 360 | + percpu_pool->obj_free++; |
---|
| 361 | + local_irq_restore(flags); |
---|
| 362 | + return; |
---|
| 363 | + } |
---|
| 364 | + |
---|
| 365 | + /* |
---|
| 366 | + * As the percpu pool is full, look ahead and pull out a batch |
---|
| 367 | + * of objects from the percpu pool and free them as well. |
---|
| 368 | + */ |
---|
| 369 | + for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) { |
---|
| 370 | + objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs); |
---|
| 371 | + if (!objs[lookahead_count]) |
---|
| 372 | + break; |
---|
| 373 | + percpu_pool->obj_free--; |
---|
| 374 | + } |
---|
| 375 | + |
---|
| 376 | +free_to_obj_pool: |
---|
| 377 | + raw_spin_lock(&pool_lock); |
---|
| 378 | + work = (obj_pool_free > debug_objects_pool_size) && obj_cache && |
---|
| 379 | + (obj_nr_tofree < ODEBUG_FREE_WORK_MAX); |
---|
253 | 380 | obj_pool_used--; |
---|
254 | 381 | |
---|
255 | 382 | if (work) { |
---|
256 | | - obj_nr_tofree++; |
---|
| 383 | + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1); |
---|
257 | 384 | hlist_add_head(&obj->node, &obj_to_free); |
---|
| 385 | + if (lookahead_count) { |
---|
| 386 | + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count); |
---|
| 387 | + obj_pool_used -= lookahead_count; |
---|
| 388 | + while (lookahead_count) { |
---|
| 389 | + hlist_add_head(&objs[--lookahead_count]->node, |
---|
| 390 | + &obj_to_free); |
---|
| 391 | + } |
---|
| 392 | + } |
---|
| 393 | + |
---|
| 394 | + if ((obj_pool_free > debug_objects_pool_size) && |
---|
| 395 | + (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) { |
---|
| 396 | + int i; |
---|
| 397 | + |
---|
| 398 | + /* |
---|
| 399 | + * Free one more batch of objects from obj_pool. |
---|
| 400 | + */ |
---|
| 401 | + for (i = 0; i < ODEBUG_BATCH_SIZE; i++) { |
---|
| 402 | + obj = __alloc_object(&obj_pool); |
---|
| 403 | + hlist_add_head(&obj->node, &obj_to_free); |
---|
| 404 | + WRITE_ONCE(obj_pool_free, obj_pool_free - 1); |
---|
| 405 | + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1); |
---|
| 406 | + } |
---|
| 407 | + } |
---|
258 | 408 | } else { |
---|
259 | | - obj_pool_free++; |
---|
| 409 | + WRITE_ONCE(obj_pool_free, obj_pool_free + 1); |
---|
260 | 410 | hlist_add_head(&obj->node, &obj_pool); |
---|
| 411 | + if (lookahead_count) { |
---|
| 412 | + WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count); |
---|
| 413 | + obj_pool_used -= lookahead_count; |
---|
| 414 | + while (lookahead_count) { |
---|
| 415 | + hlist_add_head(&objs[--lookahead_count]->node, |
---|
| 416 | + &obj_pool); |
---|
| 417 | + } |
---|
| 418 | + } |
---|
261 | 419 | } |
---|
262 | | - raw_spin_unlock_irqrestore(&pool_lock, flags); |
---|
263 | | - return work; |
---|
| 420 | + raw_spin_unlock(&pool_lock); |
---|
| 421 | + local_irq_restore(flags); |
---|
264 | 422 | } |
---|
265 | 423 | |
---|
266 | 424 | /* |
---|
.. | .. |
---|
269 | 427 | */ |
---|
270 | 428 | static void free_object(struct debug_obj *obj) |
---|
271 | 429 | { |
---|
272 | | - if (__free_object(obj)) |
---|
273 | | - schedule_work(&debug_obj_work); |
---|
| 430 | + __free_object(obj); |
---|
| 431 | + if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) { |
---|
| 432 | + WRITE_ONCE(obj_freeing, true); |
---|
| 433 | + schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); |
---|
| 434 | + } |
---|
274 | 435 | } |
---|
| 436 | + |
---|
| 437 | +#ifdef CONFIG_HOTPLUG_CPU |
---|
| 438 | +static int object_cpu_offline(unsigned int cpu) |
---|
| 439 | +{ |
---|
| 440 | + struct debug_percpu_free *percpu_pool; |
---|
| 441 | + struct hlist_node *tmp; |
---|
| 442 | + struct debug_obj *obj; |
---|
| 443 | + |
---|
| 444 | + /* Remote access is safe as the CPU is dead already */ |
---|
| 445 | + percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu); |
---|
| 446 | + hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) { |
---|
| 447 | + hlist_del(&obj->node); |
---|
| 448 | + kmem_cache_free(obj_cache, obj); |
---|
| 449 | + } |
---|
| 450 | + percpu_pool->obj_free = 0; |
---|
| 451 | + |
---|
| 452 | + return 0; |
---|
| 453 | +} |
---|
| 454 | +#endif |
---|
275 | 455 | |
---|
276 | 456 | /* |
---|
277 | 457 | * We run out of memory. That means we probably have tons of objects |
---|
.. | .. |
---|
315 | 495 | |
---|
316 | 496 | static void debug_print_object(struct debug_obj *obj, char *msg) |
---|
317 | 497 | { |
---|
318 | | - struct debug_obj_descr *descr = obj->descr; |
---|
| 498 | + const struct debug_obj_descr *descr = obj->descr; |
---|
319 | 499 | static int limit; |
---|
320 | 500 | |
---|
321 | 501 | if (limit < 5 && descr != descr_test) { |
---|
.. | .. |
---|
369 | 549 | } |
---|
370 | 550 | |
---|
371 | 551 | static void |
---|
372 | | -__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) |
---|
| 552 | +__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack) |
---|
373 | 553 | { |
---|
374 | 554 | enum debug_obj_state state; |
---|
| 555 | + bool check_stack = false; |
---|
375 | 556 | struct debug_bucket *db; |
---|
376 | 557 | struct debug_obj *obj; |
---|
377 | 558 | unsigned long flags; |
---|
.. | .. |
---|
391 | 572 | debug_objects_oom(); |
---|
392 | 573 | return; |
---|
393 | 574 | } |
---|
394 | | - debug_object_is_on_stack(addr, onstack); |
---|
| 575 | + check_stack = true; |
---|
395 | 576 | } |
---|
396 | 577 | |
---|
397 | 578 | switch (obj->state) { |
---|
.. | .. |
---|
402 | 583 | break; |
---|
403 | 584 | |
---|
404 | 585 | case ODEBUG_STATE_ACTIVE: |
---|
405 | | - debug_print_object(obj, "init"); |
---|
406 | 586 | state = obj->state; |
---|
407 | 587 | raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
| 588 | + debug_print_object(obj, "init"); |
---|
408 | 589 | debug_object_fixup(descr->fixup_init, addr, state); |
---|
409 | 590 | return; |
---|
410 | 591 | |
---|
411 | 592 | case ODEBUG_STATE_DESTROYED: |
---|
| 593 | + raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
412 | 594 | debug_print_object(obj, "init"); |
---|
413 | | - break; |
---|
| 595 | + return; |
---|
414 | 596 | default: |
---|
415 | 597 | break; |
---|
416 | 598 | } |
---|
417 | 599 | |
---|
418 | 600 | raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
| 601 | + if (check_stack) |
---|
| 602 | + debug_object_is_on_stack(addr, onstack); |
---|
419 | 603 | } |
---|
420 | 604 | |
---|
421 | 605 | /** |
---|
.. | .. |
---|
423 | 607 | * @addr: address of the object |
---|
424 | 608 | * @descr: pointer to an object specific debug description structure |
---|
425 | 609 | */ |
---|
426 | | -void debug_object_init(void *addr, struct debug_obj_descr *descr) |
---|
| 610 | +void debug_object_init(void *addr, const struct debug_obj_descr *descr) |
---|
427 | 611 | { |
---|
428 | 612 | if (!debug_objects_enabled) |
---|
429 | 613 | return; |
---|
.. | .. |
---|
438 | 622 | * @addr: address of the object |
---|
439 | 623 | * @descr: pointer to an object specific debug description structure |
---|
440 | 624 | */ |
---|
441 | | -void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) |
---|
| 625 | +void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr) |
---|
442 | 626 | { |
---|
443 | 627 | if (!debug_objects_enabled) |
---|
444 | 628 | return; |
---|
.. | .. |
---|
453 | 637 | * @descr: pointer to an object specific debug description structure |
---|
454 | 638 | * Returns 0 for success, -EINVAL for check failed. |
---|
455 | 639 | */ |
---|
456 | | -int debug_object_activate(void *addr, struct debug_obj_descr *descr) |
---|
| 640 | +int debug_object_activate(void *addr, const struct debug_obj_descr *descr) |
---|
457 | 641 | { |
---|
458 | 642 | enum debug_obj_state state; |
---|
459 | 643 | struct debug_bucket *db; |
---|
.. | .. |
---|
473 | 657 | |
---|
474 | 658 | obj = lookup_object(addr, db); |
---|
475 | 659 | if (obj) { |
---|
| 660 | + bool print_object = false; |
---|
| 661 | + |
---|
476 | 662 | switch (obj->state) { |
---|
477 | 663 | case ODEBUG_STATE_INIT: |
---|
478 | 664 | case ODEBUG_STATE_INACTIVE: |
---|
.. | .. |
---|
481 | 667 | break; |
---|
482 | 668 | |
---|
483 | 669 | case ODEBUG_STATE_ACTIVE: |
---|
484 | | - debug_print_object(obj, "activate"); |
---|
485 | 670 | state = obj->state; |
---|
486 | 671 | raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
| 672 | + debug_print_object(obj, "activate"); |
---|
487 | 673 | ret = debug_object_fixup(descr->fixup_activate, addr, state); |
---|
488 | 674 | return ret ? 0 : -EINVAL; |
---|
489 | 675 | |
---|
490 | 676 | case ODEBUG_STATE_DESTROYED: |
---|
491 | | - debug_print_object(obj, "activate"); |
---|
| 677 | + print_object = true; |
---|
492 | 678 | ret = -EINVAL; |
---|
493 | 679 | break; |
---|
494 | 680 | default: |
---|
.. | .. |
---|
496 | 682 | break; |
---|
497 | 683 | } |
---|
498 | 684 | raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
| 685 | + if (print_object) |
---|
| 686 | + debug_print_object(obj, "activate"); |
---|
499 | 687 | return ret; |
---|
500 | 688 | } |
---|
501 | 689 | |
---|
502 | 690 | raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
| 691 | + |
---|
503 | 692 | /* |
---|
504 | 693 | * We are here when a static object is activated. We |
---|
505 | 694 | * let the type specific code confirm whether this is |
---|
.. | .. |
---|
526 | 715 | * @addr: address of the object |
---|
527 | 716 | * @descr: pointer to an object specific debug description structure |
---|
528 | 717 | */ |
---|
529 | | -void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) |
---|
| 718 | +void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr) |
---|
530 | 719 | { |
---|
531 | 720 | struct debug_bucket *db; |
---|
532 | 721 | struct debug_obj *obj; |
---|
533 | 722 | unsigned long flags; |
---|
| 723 | + bool print_object = false; |
---|
534 | 724 | |
---|
535 | 725 | if (!debug_objects_enabled) |
---|
536 | 726 | return; |
---|
.. | .. |
---|
548 | 738 | if (!obj->astate) |
---|
549 | 739 | obj->state = ODEBUG_STATE_INACTIVE; |
---|
550 | 740 | else |
---|
551 | | - debug_print_object(obj, "deactivate"); |
---|
| 741 | + print_object = true; |
---|
552 | 742 | break; |
---|
553 | 743 | |
---|
554 | 744 | case ODEBUG_STATE_DESTROYED: |
---|
555 | | - debug_print_object(obj, "deactivate"); |
---|
| 745 | + print_object = true; |
---|
556 | 746 | break; |
---|
557 | 747 | default: |
---|
558 | 748 | break; |
---|
559 | 749 | } |
---|
560 | | - } else { |
---|
| 750 | + } |
---|
| 751 | + |
---|
| 752 | + raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
| 753 | + if (!obj) { |
---|
561 | 754 | struct debug_obj o = { .object = addr, |
---|
562 | 755 | .state = ODEBUG_STATE_NOTAVAILABLE, |
---|
563 | 756 | .descr = descr }; |
---|
564 | 757 | |
---|
565 | 758 | debug_print_object(&o, "deactivate"); |
---|
| 759 | + } else if (print_object) { |
---|
| 760 | + debug_print_object(obj, "deactivate"); |
---|
566 | 761 | } |
---|
567 | | - |
---|
568 | | - raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
569 | 762 | } |
---|
570 | 763 | EXPORT_SYMBOL_GPL(debug_object_deactivate); |
---|
571 | 764 | |
---|
.. | .. |
---|
574 | 767 | * @addr: address of the object |
---|
575 | 768 | * @descr: pointer to an object specific debug description structure |
---|
576 | 769 | */ |
---|
577 | | -void debug_object_destroy(void *addr, struct debug_obj_descr *descr) |
---|
| 770 | +void debug_object_destroy(void *addr, const struct debug_obj_descr *descr) |
---|
578 | 771 | { |
---|
579 | 772 | enum debug_obj_state state; |
---|
580 | 773 | struct debug_bucket *db; |
---|
581 | 774 | struct debug_obj *obj; |
---|
582 | 775 | unsigned long flags; |
---|
| 776 | + bool print_object = false; |
---|
583 | 777 | |
---|
584 | 778 | if (!debug_objects_enabled) |
---|
585 | 779 | return; |
---|
.. | .. |
---|
599 | 793 | obj->state = ODEBUG_STATE_DESTROYED; |
---|
600 | 794 | break; |
---|
601 | 795 | case ODEBUG_STATE_ACTIVE: |
---|
602 | | - debug_print_object(obj, "destroy"); |
---|
603 | 796 | state = obj->state; |
---|
604 | 797 | raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
| 798 | + debug_print_object(obj, "destroy"); |
---|
605 | 799 | debug_object_fixup(descr->fixup_destroy, addr, state); |
---|
606 | 800 | return; |
---|
607 | 801 | |
---|
608 | 802 | case ODEBUG_STATE_DESTROYED: |
---|
609 | | - debug_print_object(obj, "destroy"); |
---|
| 803 | + print_object = true; |
---|
610 | 804 | break; |
---|
611 | 805 | default: |
---|
612 | 806 | break; |
---|
613 | 807 | } |
---|
614 | 808 | out_unlock: |
---|
615 | 809 | raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
| 810 | + if (print_object) |
---|
| 811 | + debug_print_object(obj, "destroy"); |
---|
616 | 812 | } |
---|
617 | 813 | EXPORT_SYMBOL_GPL(debug_object_destroy); |
---|
618 | 814 | |
---|
.. | .. |
---|
621 | 817 | * @addr: address of the object |
---|
622 | 818 | * @descr: pointer to an object specific debug description structure |
---|
623 | 819 | */ |
---|
624 | | -void debug_object_free(void *addr, struct debug_obj_descr *descr) |
---|
| 820 | +void debug_object_free(void *addr, const struct debug_obj_descr *descr) |
---|
625 | 821 | { |
---|
626 | 822 | enum debug_obj_state state; |
---|
627 | 823 | struct debug_bucket *db; |
---|
.. | .. |
---|
641 | 837 | |
---|
642 | 838 | switch (obj->state) { |
---|
643 | 839 | case ODEBUG_STATE_ACTIVE: |
---|
644 | | - debug_print_object(obj, "free"); |
---|
645 | 840 | state = obj->state; |
---|
646 | 841 | raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
| 842 | + debug_print_object(obj, "free"); |
---|
647 | 843 | debug_object_fixup(descr->fixup_free, addr, state); |
---|
648 | 844 | return; |
---|
649 | 845 | default: |
---|
.. | .. |
---|
662 | 858 | * @addr: address of the object |
---|
663 | 859 | * @descr: pointer to an object specific debug description structure |
---|
664 | 860 | */ |
---|
665 | | -void debug_object_assert_init(void *addr, struct debug_obj_descr *descr) |
---|
| 861 | +void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr) |
---|
666 | 862 | { |
---|
667 | 863 | struct debug_bucket *db; |
---|
668 | 864 | struct debug_obj *obj; |
---|
.. | .. |
---|
710 | 906 | * @next: state to move to if expected state is found |
---|
711 | 907 | */ |
---|
712 | 908 | void |
---|
713 | | -debug_object_active_state(void *addr, struct debug_obj_descr *descr, |
---|
| 909 | +debug_object_active_state(void *addr, const struct debug_obj_descr *descr, |
---|
714 | 910 | unsigned int expect, unsigned int next) |
---|
715 | 911 | { |
---|
716 | 912 | struct debug_bucket *db; |
---|
717 | 913 | struct debug_obj *obj; |
---|
718 | 914 | unsigned long flags; |
---|
| 915 | + bool print_object = false; |
---|
719 | 916 | |
---|
720 | 917 | if (!debug_objects_enabled) |
---|
721 | 918 | return; |
---|
.. | .. |
---|
731 | 928 | if (obj->astate == expect) |
---|
732 | 929 | obj->astate = next; |
---|
733 | 930 | else |
---|
734 | | - debug_print_object(obj, "active_state"); |
---|
| 931 | + print_object = true; |
---|
735 | 932 | break; |
---|
736 | 933 | |
---|
737 | 934 | default: |
---|
738 | | - debug_print_object(obj, "active_state"); |
---|
| 935 | + print_object = true; |
---|
739 | 936 | break; |
---|
740 | 937 | } |
---|
741 | | - } else { |
---|
| 938 | + } |
---|
| 939 | + |
---|
| 940 | + raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
| 941 | + if (!obj) { |
---|
742 | 942 | struct debug_obj o = { .object = addr, |
---|
743 | 943 | .state = ODEBUG_STATE_NOTAVAILABLE, |
---|
744 | 944 | .descr = descr }; |
---|
745 | 945 | |
---|
746 | 946 | debug_print_object(&o, "active_state"); |
---|
| 947 | + } else if (print_object) { |
---|
| 948 | + debug_print_object(obj, "active_state"); |
---|
747 | 949 | } |
---|
748 | | - |
---|
749 | | - raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
750 | 950 | } |
---|
751 | 951 | EXPORT_SYMBOL_GPL(debug_object_active_state); |
---|
752 | 952 | |
---|
.. | .. |
---|
754 | 954 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) |
---|
755 | 955 | { |
---|
756 | 956 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; |
---|
757 | | - struct debug_obj_descr *descr; |
---|
| 957 | + const struct debug_obj_descr *descr; |
---|
758 | 958 | enum debug_obj_state state; |
---|
759 | 959 | struct debug_bucket *db; |
---|
760 | 960 | struct hlist_node *tmp; |
---|
761 | 961 | struct debug_obj *obj; |
---|
762 | 962 | int cnt, objs_checked = 0; |
---|
763 | | - bool work = false; |
---|
764 | 963 | |
---|
765 | 964 | saddr = (unsigned long) address; |
---|
766 | 965 | eaddr = saddr + size; |
---|
.. | .. |
---|
782 | 981 | |
---|
783 | 982 | switch (obj->state) { |
---|
784 | 983 | case ODEBUG_STATE_ACTIVE: |
---|
785 | | - debug_print_object(obj, "free"); |
---|
786 | 984 | descr = obj->descr; |
---|
787 | 985 | state = obj->state; |
---|
788 | 986 | raw_spin_unlock_irqrestore(&db->lock, flags); |
---|
| 987 | + debug_print_object(obj, "free"); |
---|
789 | 988 | debug_object_fixup(descr->fixup_free, |
---|
790 | 989 | (void *) oaddr, state); |
---|
791 | 990 | goto repeat; |
---|
792 | 991 | default: |
---|
793 | 992 | hlist_del(&obj->node); |
---|
794 | | - work |= __free_object(obj); |
---|
| 993 | + __free_object(obj); |
---|
795 | 994 | break; |
---|
796 | 995 | } |
---|
797 | 996 | } |
---|
.. | .. |
---|
807 | 1006 | debug_objects_maxchecked = objs_checked; |
---|
808 | 1007 | |
---|
809 | 1008 | /* Schedule work to actually kmem_cache_free() objects */ |
---|
810 | | - if (work) |
---|
811 | | - schedule_work(&debug_obj_work); |
---|
| 1009 | + if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) { |
---|
| 1010 | + WRITE_ONCE(obj_freeing, true); |
---|
| 1011 | + schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); |
---|
| 1012 | + } |
---|
812 | 1013 | } |
---|
813 | 1014 | |
---|
814 | 1015 | void debug_check_no_obj_freed(const void *address, unsigned long size) |
---|
.. | .. |
---|
822 | 1023 | |
---|
823 | 1024 | static int debug_stats_show(struct seq_file *m, void *v) |
---|
824 | 1025 | { |
---|
| 1026 | + int cpu, obj_percpu_free = 0; |
---|
| 1027 | + |
---|
| 1028 | + for_each_possible_cpu(cpu) |
---|
| 1029 | + obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu); |
---|
| 1030 | + |
---|
825 | 1031 | seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); |
---|
826 | 1032 | seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked); |
---|
827 | 1033 | seq_printf(m, "warnings :%d\n", debug_objects_warnings); |
---|
828 | 1034 | seq_printf(m, "fixups :%d\n", debug_objects_fixups); |
---|
829 | | - seq_printf(m, "pool_free :%d\n", obj_pool_free); |
---|
| 1035 | + seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free); |
---|
| 1036 | + seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free); |
---|
830 | 1037 | seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); |
---|
831 | | - seq_printf(m, "pool_used :%d\n", obj_pool_used); |
---|
| 1038 | + seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free); |
---|
832 | 1039 | seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); |
---|
833 | | - seq_printf(m, "on_free_list :%d\n", obj_nr_tofree); |
---|
| 1040 | + seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree)); |
---|
834 | 1041 | seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); |
---|
835 | 1042 | seq_printf(m, "objs_freed :%d\n", debug_objects_freed); |
---|
836 | 1043 | return 0; |
---|
837 | 1044 | } |
---|
838 | | - |
---|
839 | | -static int debug_stats_open(struct inode *inode, struct file *filp) |
---|
840 | | -{ |
---|
841 | | - return single_open(filp, debug_stats_show, NULL); |
---|
842 | | -} |
---|
843 | | - |
---|
844 | | -static const struct file_operations debug_stats_fops = { |
---|
845 | | - .open = debug_stats_open, |
---|
846 | | - .read = seq_read, |
---|
847 | | - .llseek = seq_lseek, |
---|
848 | | - .release = single_release, |
---|
849 | | -}; |
---|
| 1045 | +DEFINE_SHOW_ATTRIBUTE(debug_stats); |
---|
850 | 1046 | |
---|
851 | 1047 | static int __init debug_objects_init_debugfs(void) |
---|
852 | 1048 | { |
---|
853 | | - struct dentry *dbgdir, *dbgstats; |
---|
| 1049 | + struct dentry *dbgdir; |
---|
854 | 1050 | |
---|
855 | 1051 | if (!debug_objects_enabled) |
---|
856 | 1052 | return 0; |
---|
857 | 1053 | |
---|
858 | 1054 | dbgdir = debugfs_create_dir("debug_objects", NULL); |
---|
859 | | - if (!dbgdir) |
---|
860 | | - return -ENOMEM; |
---|
861 | 1055 | |
---|
862 | | - dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, |
---|
863 | | - &debug_stats_fops); |
---|
864 | | - if (!dbgstats) |
---|
865 | | - goto err; |
---|
| 1056 | + debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops); |
---|
866 | 1057 | |
---|
867 | 1058 | return 0; |
---|
868 | | - |
---|
869 | | -err: |
---|
870 | | - debugfs_remove(dbgdir); |
---|
871 | | - |
---|
872 | | - return -ENOMEM; |
---|
873 | 1059 | } |
---|
874 | 1060 | __initcall(debug_objects_init_debugfs); |
---|
875 | 1061 | |
---|
.. | .. |
---|
886 | 1072 | unsigned long dummy2[3]; |
---|
887 | 1073 | }; |
---|
888 | 1074 | |
---|
889 | | -static __initdata struct debug_obj_descr descr_type_test; |
---|
| 1075 | +static __initconst const struct debug_obj_descr descr_type_test; |
---|
890 | 1076 | |
---|
891 | 1077 | static bool __init is_static_object(void *addr) |
---|
892 | 1078 | { |
---|
.. | .. |
---|
1011 | 1197 | return res; |
---|
1012 | 1198 | } |
---|
1013 | 1199 | |
---|
1014 | | -static __initdata struct debug_obj_descr descr_type_test = { |
---|
| 1200 | +static __initconst const struct debug_obj_descr descr_type_test = { |
---|
1015 | 1201 | .name = "selftest", |
---|
1016 | 1202 | .is_static_object = is_static_object, |
---|
1017 | 1203 | .fixup_init = fixup_init, |
---|
.. | .. |
---|
1131 | 1317 | } |
---|
1132 | 1318 | |
---|
1133 | 1319 | /* |
---|
1134 | | - * When debug_objects_mem_init() is called we know that only |
---|
1135 | | - * one CPU is up, so disabling interrupts is enough |
---|
1136 | | - * protection. This avoids the lockdep hell of lock ordering. |
---|
| 1320 | + * debug_objects_mem_init() is now called early that only one CPU is up |
---|
| 1321 | + * and interrupts have been disabled, so it is safe to replace the |
---|
| 1322 | + * active object references. |
---|
1137 | 1323 | */ |
---|
1138 | | - local_irq_disable(); |
---|
1139 | 1324 | |
---|
1140 | 1325 | /* Remove the statically allocated objects from the pool */ |
---|
1141 | 1326 | hlist_for_each_entry_safe(obj, tmp, &obj_pool, node) |
---|
.. | .. |
---|
1156 | 1341 | cnt++; |
---|
1157 | 1342 | } |
---|
1158 | 1343 | } |
---|
1159 | | - local_irq_enable(); |
---|
1160 | 1344 | |
---|
1161 | 1345 | pr_debug("%d of %d active objects replaced\n", |
---|
1162 | 1346 | cnt, obj_pool_used); |
---|
.. | .. |
---|
1177 | 1361 | */ |
---|
1178 | 1362 | void __init debug_objects_mem_init(void) |
---|
1179 | 1363 | { |
---|
| 1364 | + int cpu, extras; |
---|
| 1365 | + |
---|
1180 | 1366 | if (!debug_objects_enabled) |
---|
1181 | 1367 | return; |
---|
| 1368 | + |
---|
| 1369 | + /* |
---|
| 1370 | + * Initialize the percpu object pools |
---|
| 1371 | + * |
---|
| 1372 | + * Initialization is not strictly necessary, but was done for |
---|
| 1373 | + * completeness. |
---|
| 1374 | + */ |
---|
| 1375 | + for_each_possible_cpu(cpu) |
---|
| 1376 | + INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu)); |
---|
1182 | 1377 | |
---|
1183 | 1378 | obj_cache = kmem_cache_create("debug_objects_cache", |
---|
1184 | 1379 | sizeof (struct debug_obj), 0, |
---|
.. | .. |
---|
1192 | 1387 | } else |
---|
1193 | 1388 | debug_objects_selftest(); |
---|
1194 | 1389 | |
---|
| 1390 | +#ifdef CONFIG_HOTPLUG_CPU |
---|
| 1391 | + cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL, |
---|
| 1392 | + object_cpu_offline); |
---|
| 1393 | +#endif |
---|
| 1394 | + |
---|
1195 | 1395 | /* |
---|
1196 | 1396 | * Increase the thresholds for allocating and freeing objects |
---|
1197 | 1397 | * according to the number of possible CPUs available in the system. |
---|
1198 | 1398 | */ |
---|
1199 | | - debug_objects_pool_size += num_possible_cpus() * 32; |
---|
1200 | | - debug_objects_pool_min_level += num_possible_cpus() * 4; |
---|
| 1399 | + extras = num_possible_cpus() * ODEBUG_BATCH_SIZE; |
---|
| 1400 | + debug_objects_pool_size += extras; |
---|
| 1401 | + debug_objects_pool_min_level += extras; |
---|
1201 | 1402 | } |
---|