| .. | .. |
|---|
| 19 | 19 | #include <linux/slab.h> |
|---|
| 20 | 20 | #include <linux/hash.h> |
|---|
| 21 | 21 | #include <linux/kmemleak.h> |
|---|
| 22 | +#include <linux/cpu.h> |
|---|
| 22 | 23 | |
|---|
| 23 | 24 | #define ODEBUG_HASH_BITS 14 |
|---|
| 24 | 25 | #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) |
|---|
| 25 | 26 | |
|---|
| 26 | 27 | #define ODEBUG_POOL_SIZE 1024 |
|---|
| 27 | 28 | #define ODEBUG_POOL_MIN_LEVEL 256 |
|---|
| 29 | +#define ODEBUG_POOL_PERCPU_SIZE 64 |
|---|
| 30 | +#define ODEBUG_BATCH_SIZE 16 |
|---|
| 28 | 31 | |
|---|
| 29 | 32 | #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT |
|---|
| 30 | 33 | #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) |
|---|
| 31 | 34 | #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) |
|---|
| 32 | 35 | |
|---|
| 36 | +/* |
|---|
| 37 | + * We limit the freeing of debug objects via workqueue at a maximum |
|---|
| 38 | + * frequency of 10Hz and about 1024 objects for each freeing operation. |
|---|
| 39 | + * So it is freeing at most 10k debug objects per second. |
|---|
| 40 | + */ |
|---|
| 41 | +#define ODEBUG_FREE_WORK_MAX 1024 |
|---|
| 42 | +#define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10) |
|---|
| 43 | + |
|---|
| 33 | 44 | struct debug_bucket { |
|---|
| 34 | 45 | struct hlist_head list; |
|---|
| 35 | 46 | raw_spinlock_t lock; |
|---|
| 36 | 47 | }; |
|---|
| 48 | + |
|---|
| 49 | +/* |
|---|
| 50 | + * Debug object percpu free list |
|---|
| 51 | + * Access is protected by disabling irq |
|---|
| 52 | + */ |
|---|
| 53 | +struct debug_percpu_free { |
|---|
| 54 | + struct hlist_head free_objs; |
|---|
| 55 | + int obj_free; |
|---|
| 56 | +}; |
|---|
| 57 | + |
|---|
| 58 | +static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool); |
|---|
| 37 | 59 | |
|---|
| 38 | 60 | static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; |
|---|
| 39 | 61 | |
|---|
| .. | .. |
|---|
| 44 | 66 | static HLIST_HEAD(obj_pool); |
|---|
| 45 | 67 | static HLIST_HEAD(obj_to_free); |
|---|
| 46 | 68 | |
|---|
| 69 | +/* |
|---|
| 70 | + * Because of the presence of percpu free pools, obj_pool_free will |
|---|
| 71 | + * under-count those in the percpu free pools. Similarly, obj_pool_used |
|---|
| 72 | + * will over-count those in the percpu free pools. Adjustments will be |
|---|
| 73 | + * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used |
|---|
| 74 | + * can be off. |
|---|
| 75 | + */ |
|---|
| 47 | 76 | static int obj_pool_min_free = ODEBUG_POOL_SIZE; |
|---|
| 48 | 77 | static int obj_pool_free = ODEBUG_POOL_SIZE; |
|---|
| 49 | 78 | static int obj_pool_used; |
|---|
| 50 | 79 | static int obj_pool_max_used; |
|---|
| 80 | +static bool obj_freeing; |
|---|
| 51 | 81 | /* The number of objs on the global free list */ |
|---|
| 52 | 82 | static int obj_nr_tofree; |
|---|
| 53 | | -static struct kmem_cache *obj_cache; |
|---|
| 54 | 83 | |
|---|
| 55 | 84 | static int debug_objects_maxchain __read_mostly; |
|---|
| 56 | 85 | static int __maybe_unused debug_objects_maxchecked __read_mostly; |
|---|
| .. | .. |
|---|
| 62 | 91 | = ODEBUG_POOL_SIZE; |
|---|
| 63 | 92 | static int debug_objects_pool_min_level __read_mostly |
|---|
| 64 | 93 | = ODEBUG_POOL_MIN_LEVEL; |
|---|
| 65 | | -static struct debug_obj_descr *descr_test __read_mostly; |
|---|
| 94 | +static const struct debug_obj_descr *descr_test __read_mostly; |
|---|
| 95 | +static struct kmem_cache *obj_cache __read_mostly; |
|---|
| 66 | 96 | |
|---|
| 67 | 97 | /* |
|---|
| 68 | 98 | * Track numbers of kmem_cache_alloc()/free() calls done. |
|---|
| .. | .. |
|---|
| 71 | 101 | static int debug_objects_freed; |
|---|
| 72 | 102 | |
|---|
| 73 | 103 | static void free_obj_work(struct work_struct *work); |
|---|
| 74 | | -static DECLARE_WORK(debug_obj_work, free_obj_work); |
|---|
| 104 | +static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work); |
|---|
| 75 | 105 | |
|---|
| 76 | 106 | static int __init enable_object_debug(char *str) |
|---|
| 77 | 107 | { |
|---|
| .. | .. |
|---|
| 99 | 129 | |
|---|
| 100 | 130 | static void fill_pool(void) |
|---|
| 101 | 131 | { |
|---|
| 102 | | - gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; |
|---|
| 103 | | - struct debug_obj *new, *obj; |
|---|
| 132 | + gfp_t gfp = __GFP_HIGH | __GFP_NOWARN; |
|---|
| 133 | + struct debug_obj *obj; |
|---|
| 104 | 134 | unsigned long flags; |
|---|
| 105 | 135 | |
|---|
| 106 | | - if (likely(obj_pool_free >= debug_objects_pool_min_level)) |
|---|
| 136 | + if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level)) |
|---|
| 107 | 137 | return; |
|---|
| 108 | 138 | |
|---|
| 109 | 139 | /* |
|---|
| 110 | 140 | * Reuse objs from the global free list; they will be reinitialized |
|---|
| 111 | 141 | * when allocating. |
|---|
| 142 | + * |
|---|
| 143 | + * Both obj_nr_tofree and obj_pool_free are checked locklessly; the |
|---|
| 144 | + * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical |
|---|
| 145 | + * sections. |
|---|
| 112 | 146 | */ |
|---|
| 113 | | - while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) { |
|---|
| 147 | + while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) { |
|---|
| 114 | 148 | raw_spin_lock_irqsave(&pool_lock, flags); |
|---|
| 115 | 149 | /* |
|---|
| 116 | 150 | * Recheck with the lock held as the worker thread might have |
|---|
| 117 | 151 | * won the race and freed the global free list already. |
|---|
| 118 | 152 | */ |
|---|
| 119 | | - if (obj_nr_tofree) { |
|---|
| 153 | + while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) { |
|---|
| 120 | 154 | obj = hlist_entry(obj_to_free.first, typeof(*obj), node); |
|---|
| 121 | 155 | hlist_del(&obj->node); |
|---|
| 122 | | - obj_nr_tofree--; |
|---|
| 156 | + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1); |
|---|
| 123 | 157 | hlist_add_head(&obj->node, &obj_pool); |
|---|
| 124 | | - obj_pool_free++; |
|---|
| 158 | + WRITE_ONCE(obj_pool_free, obj_pool_free + 1); |
|---|
| 125 | 159 | } |
|---|
| 126 | 160 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
|---|
| 127 | 161 | } |
|---|
| .. | .. |
|---|
| 129 | 163 | if (unlikely(!obj_cache)) |
|---|
| 130 | 164 | return; |
|---|
| 131 | 165 | |
|---|
| 132 | | - while (obj_pool_free < debug_objects_pool_min_level) { |
|---|
| 166 | + while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) { |
|---|
| 167 | + struct debug_obj *new[ODEBUG_BATCH_SIZE]; |
|---|
| 168 | + int cnt; |
|---|
| 133 | 169 | |
|---|
| 134 | | - new = kmem_cache_zalloc(obj_cache, gfp); |
|---|
| 135 | | - if (!new) |
|---|
| 170 | + for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) { |
|---|
| 171 | + new[cnt] = kmem_cache_zalloc(obj_cache, gfp); |
|---|
| 172 | + if (!new[cnt]) |
|---|
| 173 | + break; |
|---|
| 174 | + } |
|---|
| 175 | + if (!cnt) |
|---|
| 136 | 176 | return; |
|---|
| 137 | 177 | |
|---|
| 138 | 178 | raw_spin_lock_irqsave(&pool_lock, flags); |
|---|
| 139 | | - hlist_add_head(&new->node, &obj_pool); |
|---|
| 140 | | - debug_objects_allocated++; |
|---|
| 141 | | - obj_pool_free++; |
|---|
| 179 | + while (cnt) { |
|---|
| 180 | + hlist_add_head(&new[--cnt]->node, &obj_pool); |
|---|
| 181 | + debug_objects_allocated++; |
|---|
| 182 | + WRITE_ONCE(obj_pool_free, obj_pool_free + 1); |
|---|
| 183 | + } |
|---|
| 142 | 184 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
|---|
| 143 | 185 | } |
|---|
| 144 | 186 | } |
|---|
| .. | .. |
|---|
| 163 | 205 | } |
|---|
| 164 | 206 | |
|---|
| 165 | 207 | /* |
|---|
| 166 | | - * Allocate a new object. If the pool is empty, switch off the debugger. |
|---|
| 167 | | - * Must be called with interrupts disabled. |
|---|
| 208 | + * Allocate a new object from the hlist |
|---|
| 168 | 209 | */ |
|---|
| 169 | | -static struct debug_obj * |
|---|
| 170 | | -alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) |
|---|
| 210 | +static struct debug_obj *__alloc_object(struct hlist_head *list) |
|---|
| 171 | 211 | { |
|---|
| 172 | 212 | struct debug_obj *obj = NULL; |
|---|
| 173 | 213 | |
|---|
| 174 | | - raw_spin_lock(&pool_lock); |
|---|
| 175 | | - if (obj_pool.first) { |
|---|
| 176 | | - obj = hlist_entry(obj_pool.first, typeof(*obj), node); |
|---|
| 177 | | - |
|---|
| 178 | | - obj->object = addr; |
|---|
| 179 | | - obj->descr = descr; |
|---|
| 180 | | - obj->state = ODEBUG_STATE_NONE; |
|---|
| 181 | | - obj->astate = 0; |
|---|
| 214 | + if (list->first) { |
|---|
| 215 | + obj = hlist_entry(list->first, typeof(*obj), node); |
|---|
| 182 | 216 | hlist_del(&obj->node); |
|---|
| 217 | + } |
|---|
| 183 | 218 | |
|---|
| 184 | | - hlist_add_head(&obj->node, &b->list); |
|---|
| 219 | + return obj; |
|---|
| 220 | +} |
|---|
| 185 | 221 | |
|---|
| 222 | +static struct debug_obj * |
|---|
| 223 | +alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr) |
|---|
| 224 | +{ |
|---|
| 225 | + struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool); |
|---|
| 226 | + struct debug_obj *obj; |
|---|
| 227 | + |
|---|
| 228 | + if (likely(obj_cache)) { |
|---|
| 229 | + obj = __alloc_object(&percpu_pool->free_objs); |
|---|
| 230 | + if (obj) { |
|---|
| 231 | + percpu_pool->obj_free--; |
|---|
| 232 | + goto init_obj; |
|---|
| 233 | + } |
|---|
| 234 | + } |
|---|
| 235 | + |
|---|
| 236 | + raw_spin_lock(&pool_lock); |
|---|
| 237 | + obj = __alloc_object(&obj_pool); |
|---|
| 238 | + if (obj) { |
|---|
| 186 | 239 | obj_pool_used++; |
|---|
| 240 | + WRITE_ONCE(obj_pool_free, obj_pool_free - 1); |
|---|
| 241 | + |
|---|
| 242 | + /* |
|---|
| 243 | + * Looking ahead, allocate one batch of debug objects and |
|---|
| 244 | + * put them into the percpu free pool. |
|---|
| 245 | + */ |
|---|
| 246 | + if (likely(obj_cache)) { |
|---|
| 247 | + int i; |
|---|
| 248 | + |
|---|
| 249 | + for (i = 0; i < ODEBUG_BATCH_SIZE; i++) { |
|---|
| 250 | + struct debug_obj *obj2; |
|---|
| 251 | + |
|---|
| 252 | + obj2 = __alloc_object(&obj_pool); |
|---|
| 253 | + if (!obj2) |
|---|
| 254 | + break; |
|---|
| 255 | + hlist_add_head(&obj2->node, |
|---|
| 256 | + &percpu_pool->free_objs); |
|---|
| 257 | + percpu_pool->obj_free++; |
|---|
| 258 | + obj_pool_used++; |
|---|
| 259 | + WRITE_ONCE(obj_pool_free, obj_pool_free - 1); |
|---|
| 260 | + } |
|---|
| 261 | + } |
|---|
| 262 | + |
|---|
| 187 | 263 | if (obj_pool_used > obj_pool_max_used) |
|---|
| 188 | 264 | obj_pool_max_used = obj_pool_used; |
|---|
| 189 | 265 | |
|---|
| 190 | | - obj_pool_free--; |
|---|
| 191 | 266 | if (obj_pool_free < obj_pool_min_free) |
|---|
| 192 | 267 | obj_pool_min_free = obj_pool_free; |
|---|
| 193 | 268 | } |
|---|
| 194 | 269 | raw_spin_unlock(&pool_lock); |
|---|
| 195 | 270 | |
|---|
| 271 | +init_obj: |
|---|
| 272 | + if (obj) { |
|---|
| 273 | + obj->object = addr; |
|---|
| 274 | + obj->descr = descr; |
|---|
| 275 | + obj->state = ODEBUG_STATE_NONE; |
|---|
| 276 | + obj->astate = 0; |
|---|
| 277 | + hlist_add_head(&obj->node, &b->list); |
|---|
| 278 | + } |
|---|
| 196 | 279 | return obj; |
|---|
| 197 | 280 | } |
|---|
| 198 | 281 | |
|---|
| .. | .. |
|---|
| 209 | 292 | unsigned long flags; |
|---|
| 210 | 293 | HLIST_HEAD(tofree); |
|---|
| 211 | 294 | |
|---|
| 295 | + WRITE_ONCE(obj_freeing, false); |
|---|
| 212 | 296 | if (!raw_spin_trylock_irqsave(&pool_lock, flags)) |
|---|
| 213 | 297 | return; |
|---|
| 298 | + |
|---|
| 299 | + if (obj_pool_free >= debug_objects_pool_size) |
|---|
| 300 | + goto free_objs; |
|---|
| 214 | 301 | |
|---|
| 215 | 302 | /* |
|---|
| 216 | 303 | * The objs on the pool list might be allocated before the work is |
|---|
| 217 | 304 | * run, so recheck if pool list it full or not, if not fill pool |
|---|
| 218 | | - * list from the global free list |
|---|
| 305 | + * list from the global free list. As it is likely that a workload |
|---|
| 306 | + * may be gearing up to use more and more objects, don't free any |
|---|
| 307 | + * of them until the next round. |
|---|
| 219 | 308 | */ |
|---|
| 220 | 309 | while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) { |
|---|
| 221 | 310 | obj = hlist_entry(obj_to_free.first, typeof(*obj), node); |
|---|
| 222 | 311 | hlist_del(&obj->node); |
|---|
| 223 | 312 | hlist_add_head(&obj->node, &obj_pool); |
|---|
| 224 | | - obj_pool_free++; |
|---|
| 225 | | - obj_nr_tofree--; |
|---|
| 313 | + WRITE_ONCE(obj_pool_free, obj_pool_free + 1); |
|---|
| 314 | + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1); |
|---|
| 226 | 315 | } |
|---|
| 316 | + raw_spin_unlock_irqrestore(&pool_lock, flags); |
|---|
| 317 | + return; |
|---|
| 227 | 318 | |
|---|
| 319 | +free_objs: |
|---|
| 228 | 320 | /* |
|---|
| 229 | 321 | * Pool list is already full and there are still objs on the free |
|---|
| 230 | 322 | * list. Move remaining free objs to a temporary list to free the |
|---|
| .. | .. |
|---|
| 233 | 325 | if (obj_nr_tofree) { |
|---|
| 234 | 326 | hlist_move_list(&obj_to_free, &tofree); |
|---|
| 235 | 327 | debug_objects_freed += obj_nr_tofree; |
|---|
| 236 | | - obj_nr_tofree = 0; |
|---|
| 328 | + WRITE_ONCE(obj_nr_tofree, 0); |
|---|
| 237 | 329 | } |
|---|
| 238 | 330 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
|---|
| 239 | 331 | |
|---|
| .. | .. |
|---|
| 243 | 335 | } |
|---|
| 244 | 336 | } |
|---|
| 245 | 337 | |
|---|
| 246 | | -static bool __free_object(struct debug_obj *obj) |
|---|
| 338 | +static void __free_object(struct debug_obj *obj) |
|---|
| 247 | 339 | { |
|---|
| 340 | + struct debug_obj *objs[ODEBUG_BATCH_SIZE]; |
|---|
| 341 | + struct debug_percpu_free *percpu_pool; |
|---|
| 342 | + int lookahead_count = 0; |
|---|
| 248 | 343 | unsigned long flags; |
|---|
| 249 | 344 | bool work; |
|---|
| 250 | 345 | |
|---|
| 251 | | - raw_spin_lock_irqsave(&pool_lock, flags); |
|---|
| 252 | | - work = (obj_pool_free > debug_objects_pool_size) && obj_cache; |
|---|
| 346 | + local_irq_save(flags); |
|---|
| 347 | + if (!obj_cache) |
|---|
| 348 | + goto free_to_obj_pool; |
|---|
| 349 | + |
|---|
| 350 | + /* |
|---|
| 351 | + * Try to free it into the percpu pool first. |
|---|
| 352 | + */ |
|---|
| 353 | + percpu_pool = this_cpu_ptr(&percpu_obj_pool); |
|---|
| 354 | + if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) { |
|---|
| 355 | + hlist_add_head(&obj->node, &percpu_pool->free_objs); |
|---|
| 356 | + percpu_pool->obj_free++; |
|---|
| 357 | + local_irq_restore(flags); |
|---|
| 358 | + return; |
|---|
| 359 | + } |
|---|
| 360 | + |
|---|
| 361 | + /* |
|---|
| 362 | + * As the percpu pool is full, look ahead and pull out a batch |
|---|
| 363 | + * of objects from the percpu pool and free them as well. |
|---|
| 364 | + */ |
|---|
| 365 | + for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) { |
|---|
| 366 | + objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs); |
|---|
| 367 | + if (!objs[lookahead_count]) |
|---|
| 368 | + break; |
|---|
| 369 | + percpu_pool->obj_free--; |
|---|
| 370 | + } |
|---|
| 371 | + |
|---|
| 372 | +free_to_obj_pool: |
|---|
| 373 | + raw_spin_lock(&pool_lock); |
|---|
| 374 | + work = (obj_pool_free > debug_objects_pool_size) && obj_cache && |
|---|
| 375 | + (obj_nr_tofree < ODEBUG_FREE_WORK_MAX); |
|---|
| 253 | 376 | obj_pool_used--; |
|---|
| 254 | 377 | |
|---|
| 255 | 378 | if (work) { |
|---|
| 256 | | - obj_nr_tofree++; |
|---|
| 379 | + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1); |
|---|
| 257 | 380 | hlist_add_head(&obj->node, &obj_to_free); |
|---|
| 381 | + if (lookahead_count) { |
|---|
| 382 | + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count); |
|---|
| 383 | + obj_pool_used -= lookahead_count; |
|---|
| 384 | + while (lookahead_count) { |
|---|
| 385 | + hlist_add_head(&objs[--lookahead_count]->node, |
|---|
| 386 | + &obj_to_free); |
|---|
| 387 | + } |
|---|
| 388 | + } |
|---|
| 389 | + |
|---|
| 390 | + if ((obj_pool_free > debug_objects_pool_size) && |
|---|
| 391 | + (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) { |
|---|
| 392 | + int i; |
|---|
| 393 | + |
|---|
| 394 | + /* |
|---|
| 395 | + * Free one more batch of objects from obj_pool. |
|---|
| 396 | + */ |
|---|
| 397 | + for (i = 0; i < ODEBUG_BATCH_SIZE; i++) { |
|---|
| 398 | + obj = __alloc_object(&obj_pool); |
|---|
| 399 | + hlist_add_head(&obj->node, &obj_to_free); |
|---|
| 400 | + WRITE_ONCE(obj_pool_free, obj_pool_free - 1); |
|---|
| 401 | + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1); |
|---|
| 402 | + } |
|---|
| 403 | + } |
|---|
| 258 | 404 | } else { |
|---|
| 259 | | - obj_pool_free++; |
|---|
| 405 | + WRITE_ONCE(obj_pool_free, obj_pool_free + 1); |
|---|
| 260 | 406 | hlist_add_head(&obj->node, &obj_pool); |
|---|
| 407 | + if (lookahead_count) { |
|---|
| 408 | + WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count); |
|---|
| 409 | + obj_pool_used -= lookahead_count; |
|---|
| 410 | + while (lookahead_count) { |
|---|
| 411 | + hlist_add_head(&objs[--lookahead_count]->node, |
|---|
| 412 | + &obj_pool); |
|---|
| 413 | + } |
|---|
| 414 | + } |
|---|
| 261 | 415 | } |
|---|
| 262 | | - raw_spin_unlock_irqrestore(&pool_lock, flags); |
|---|
| 263 | | - return work; |
|---|
| 416 | + raw_spin_unlock(&pool_lock); |
|---|
| 417 | + local_irq_restore(flags); |
|---|
| 264 | 418 | } |
|---|
| 265 | 419 | |
|---|
| 266 | 420 | /* |
|---|
| .. | .. |
|---|
| 269 | 423 | */ |
|---|
| 270 | 424 | static void free_object(struct debug_obj *obj) |
|---|
| 271 | 425 | { |
|---|
| 272 | | - if (__free_object(obj)) |
|---|
| 273 | | - schedule_work(&debug_obj_work); |
|---|
| 426 | + __free_object(obj); |
|---|
| 427 | + if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) { |
|---|
| 428 | + WRITE_ONCE(obj_freeing, true); |
|---|
| 429 | + schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); |
|---|
| 430 | + } |
|---|
| 274 | 431 | } |
|---|
| 432 | + |
|---|
| 433 | +#ifdef CONFIG_HOTPLUG_CPU |
|---|
| 434 | +static int object_cpu_offline(unsigned int cpu) |
|---|
| 435 | +{ |
|---|
| 436 | + struct debug_percpu_free *percpu_pool; |
|---|
| 437 | + struct hlist_node *tmp; |
|---|
| 438 | + struct debug_obj *obj; |
|---|
| 439 | + unsigned long flags; |
|---|
| 440 | + |
|---|
| 441 | + /* Remote access is safe as the CPU is dead already */ |
|---|
| 442 | + percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu); |
|---|
| 443 | + hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) { |
|---|
| 444 | + hlist_del(&obj->node); |
|---|
| 445 | + kmem_cache_free(obj_cache, obj); |
|---|
| 446 | + } |
|---|
| 447 | + |
|---|
| 448 | + raw_spin_lock_irqsave(&pool_lock, flags); |
|---|
| 449 | + obj_pool_used -= percpu_pool->obj_free; |
|---|
| 450 | + debug_objects_freed += percpu_pool->obj_free; |
|---|
| 451 | + raw_spin_unlock_irqrestore(&pool_lock, flags); |
|---|
| 452 | + |
|---|
| 453 | + percpu_pool->obj_free = 0; |
|---|
| 454 | + |
|---|
| 455 | + return 0; |
|---|
| 456 | +} |
|---|
| 457 | +#endif |
|---|
| 275 | 458 | |
|---|
| 276 | 459 | /* |
|---|
| 277 | 460 | * We run out of memory. That means we probably have tons of objects |
|---|
| .. | .. |
|---|
| 315 | 498 | |
|---|
| 316 | 499 | static void debug_print_object(struct debug_obj *obj, char *msg) |
|---|
| 317 | 500 | { |
|---|
| 318 | | - struct debug_obj_descr *descr = obj->descr; |
|---|
| 501 | + const struct debug_obj_descr *descr = obj->descr; |
|---|
| 319 | 502 | static int limit; |
|---|
| 503 | + |
|---|
| 504 | + /* |
|---|
| 505 | + * Don't report if lookup_object_or_alloc() by the current thread |
|---|
| 506 | + * failed because lookup_object_or_alloc()/debug_objects_oom() by a |
|---|
| 507 | + * concurrent thread turned off debug_objects_enabled and cleared |
|---|
| 508 | + * the hash buckets. |
|---|
| 509 | + */ |
|---|
| 510 | + if (!debug_objects_enabled) |
|---|
| 511 | + return; |
|---|
| 320 | 512 | |
|---|
| 321 | 513 | if (limit < 5 && descr != descr_test) { |
|---|
| 322 | 514 | void *hint = descr->debug_hint ? |
|---|
| .. | .. |
|---|
| 368 | 560 | WARN_ON(1); |
|---|
| 369 | 561 | } |
|---|
| 370 | 562 | |
|---|
| 563 | +static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b, |
|---|
| 564 | + const struct debug_obj_descr *descr, |
|---|
| 565 | + bool onstack, bool alloc_ifstatic) |
|---|
| 566 | +{ |
|---|
| 567 | + struct debug_obj *obj = lookup_object(addr, b); |
|---|
| 568 | + enum debug_obj_state state = ODEBUG_STATE_NONE; |
|---|
| 569 | + |
|---|
| 570 | + if (likely(obj)) |
|---|
| 571 | + return obj; |
|---|
| 572 | + |
|---|
| 573 | + /* |
|---|
| 574 | + * debug_object_init() unconditionally allocates untracked |
|---|
| 575 | + * objects. It does not matter whether it is a static object or |
|---|
| 576 | + * not. |
|---|
| 577 | + * |
|---|
| 578 | + * debug_object_assert_init() and debug_object_activate() allow |
|---|
| 579 | + * allocation only if the descriptor callback confirms that the |
|---|
| 580 | + * object is static and considered initialized. For non-static |
|---|
| 581 | + * objects the allocation needs to be done from the fixup callback. |
|---|
| 582 | + */ |
|---|
| 583 | + if (unlikely(alloc_ifstatic)) { |
|---|
| 584 | + if (!descr->is_static_object || !descr->is_static_object(addr)) |
|---|
| 585 | + return ERR_PTR(-ENOENT); |
|---|
| 586 | + /* Statically allocated objects are considered initialized */ |
|---|
| 587 | + state = ODEBUG_STATE_INIT; |
|---|
| 588 | + } |
|---|
| 589 | + |
|---|
| 590 | + obj = alloc_object(addr, b, descr); |
|---|
| 591 | + if (likely(obj)) { |
|---|
| 592 | + obj->state = state; |
|---|
| 593 | + debug_object_is_on_stack(addr, onstack); |
|---|
| 594 | + return obj; |
|---|
| 595 | + } |
|---|
| 596 | + |
|---|
| 597 | + /* Out of memory. Do the cleanup outside of the locked region */ |
|---|
| 598 | + debug_objects_enabled = 0; |
|---|
| 599 | + return NULL; |
|---|
| 600 | +} |
|---|
| 601 | + |
|---|
| 602 | +static void debug_objects_fill_pool(void) |
|---|
| 603 | +{ |
|---|
| 604 | + /* |
|---|
| 605 | + * On RT enabled kernels the pool refill must happen in preemptible |
|---|
| 606 | + * context: |
|---|
| 607 | + */ |
|---|
| 608 | + if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) |
|---|
| 609 | + fill_pool(); |
|---|
| 610 | +} |
|---|
| 611 | + |
|---|
| 371 | 612 | static void |
|---|
| 372 | | -__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) |
|---|
| 613 | +__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack) |
|---|
| 373 | 614 | { |
|---|
| 374 | 615 | enum debug_obj_state state; |
|---|
| 375 | 616 | struct debug_bucket *db; |
|---|
| 376 | 617 | struct debug_obj *obj; |
|---|
| 377 | 618 | unsigned long flags; |
|---|
| 378 | 619 | |
|---|
| 379 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
|---|
| 380 | | - if (preempt_count() == 0 && !irqs_disabled()) |
|---|
| 381 | | -#endif |
|---|
| 382 | | - fill_pool(); |
|---|
| 620 | + debug_objects_fill_pool(); |
|---|
| 383 | 621 | |
|---|
| 384 | 622 | db = get_bucket((unsigned long) addr); |
|---|
| 385 | 623 | |
|---|
| 386 | 624 | raw_spin_lock_irqsave(&db->lock, flags); |
|---|
| 387 | 625 | |
|---|
| 388 | | - obj = lookup_object(addr, db); |
|---|
| 389 | | - if (!obj) { |
|---|
| 390 | | - obj = alloc_object(addr, db, descr); |
|---|
| 391 | | - if (!obj) { |
|---|
| 392 | | - debug_objects_enabled = 0; |
|---|
| 393 | | - raw_spin_unlock_irqrestore(&db->lock, flags); |
|---|
| 394 | | - debug_objects_oom(); |
|---|
| 395 | | - return; |
|---|
| 396 | | - } |
|---|
| 397 | | - debug_object_is_on_stack(addr, onstack); |
|---|
| 626 | + obj = lookup_object_or_alloc(addr, db, descr, onstack, false); |
|---|
| 627 | + if (unlikely(!obj)) { |
|---|
| 628 | + raw_spin_unlock_irqrestore(&db->lock, flags); |
|---|
| 629 | + debug_objects_oom(); |
|---|
| 630 | + return; |
|---|
| 398 | 631 | } |
|---|
| 399 | 632 | |
|---|
| 400 | 633 | switch (obj->state) { |
|---|
| .. | .. |
|---|
| 405 | 638 | break; |
|---|
| 406 | 639 | |
|---|
| 407 | 640 | case ODEBUG_STATE_ACTIVE: |
|---|
| 408 | | - debug_print_object(obj, "init"); |
|---|
| 409 | 641 | state = obj->state; |
|---|
| 410 | 642 | raw_spin_unlock_irqrestore(&db->lock, flags); |
|---|
| 643 | + debug_print_object(obj, "init"); |
|---|
| 411 | 644 | debug_object_fixup(descr->fixup_init, addr, state); |
|---|
| 412 | 645 | return; |
|---|
| 413 | 646 | |
|---|
| 414 | 647 | case ODEBUG_STATE_DESTROYED: |
|---|
| 648 | + raw_spin_unlock_irqrestore(&db->lock, flags); |
|---|
| 415 | 649 | debug_print_object(obj, "init"); |
|---|
| 416 | | - break; |
|---|
| 650 | + return; |
|---|
| 417 | 651 | default: |
|---|
| 418 | 652 | break; |
|---|
| 419 | 653 | } |
|---|
| .. | .. |
|---|
| 426 | 660 | * @addr: address of the object |
|---|
| 427 | 661 | * @descr: pointer to an object specific debug description structure |
|---|
| 428 | 662 | */ |
|---|
| 429 | | -void debug_object_init(void *addr, struct debug_obj_descr *descr) |
|---|
| 663 | +void debug_object_init(void *addr, const struct debug_obj_descr *descr) |
|---|
| 430 | 664 | { |
|---|
| 431 | 665 | if (!debug_objects_enabled) |
|---|
| 432 | 666 | return; |
|---|
| .. | .. |
|---|
| 441 | 675 | * @addr: address of the object |
|---|
| 442 | 676 | * @descr: pointer to an object specific debug description structure |
|---|
| 443 | 677 | */ |
|---|
| 444 | | -void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) |
|---|
| 678 | +void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr) |
|---|
| 445 | 679 | { |
|---|
| 446 | 680 | if (!debug_objects_enabled) |
|---|
| 447 | 681 | return; |
|---|
| .. | .. |
|---|
| 456 | 690 | * @descr: pointer to an object specific debug description structure |
|---|
| 457 | 691 | * Returns 0 for success, -EINVAL for check failed. |
|---|
| 458 | 692 | */ |
|---|
| 459 | | -int debug_object_activate(void *addr, struct debug_obj_descr *descr) |
|---|
| 693 | +int debug_object_activate(void *addr, const struct debug_obj_descr *descr) |
|---|
| 460 | 694 | { |
|---|
| 695 | + struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; |
|---|
| 461 | 696 | enum debug_obj_state state; |
|---|
| 462 | 697 | struct debug_bucket *db; |
|---|
| 463 | 698 | struct debug_obj *obj; |
|---|
| 464 | 699 | unsigned long flags; |
|---|
| 465 | 700 | int ret; |
|---|
| 466 | | - struct debug_obj o = { .object = addr, |
|---|
| 467 | | - .state = ODEBUG_STATE_NOTAVAILABLE, |
|---|
| 468 | | - .descr = descr }; |
|---|
| 469 | 701 | |
|---|
| 470 | 702 | if (!debug_objects_enabled) |
|---|
| 471 | 703 | return 0; |
|---|
| 704 | + |
|---|
| 705 | + debug_objects_fill_pool(); |
|---|
| 472 | 706 | |
|---|
| 473 | 707 | db = get_bucket((unsigned long) addr); |
|---|
| 474 | 708 | |
|---|
| 475 | 709 | raw_spin_lock_irqsave(&db->lock, flags); |
|---|
| 476 | 710 | |
|---|
| 477 | | - obj = lookup_object(addr, db); |
|---|
| 478 | | - if (obj) { |
|---|
| 711 | + obj = lookup_object_or_alloc(addr, db, descr, false, true); |
|---|
| 712 | + if (likely(!IS_ERR_OR_NULL(obj))) { |
|---|
| 713 | + bool print_object = false; |
|---|
| 714 | + |
|---|
| 479 | 715 | switch (obj->state) { |
|---|
| 480 | 716 | case ODEBUG_STATE_INIT: |
|---|
| 481 | 717 | case ODEBUG_STATE_INACTIVE: |
|---|
| .. | .. |
|---|
| 484 | 720 | break; |
|---|
| 485 | 721 | |
|---|
| 486 | 722 | case ODEBUG_STATE_ACTIVE: |
|---|
| 487 | | - debug_print_object(obj, "activate"); |
|---|
| 488 | 723 | state = obj->state; |
|---|
| 489 | 724 | raw_spin_unlock_irqrestore(&db->lock, flags); |
|---|
| 725 | + debug_print_object(obj, "activate"); |
|---|
| 490 | 726 | ret = debug_object_fixup(descr->fixup_activate, addr, state); |
|---|
| 491 | 727 | return ret ? 0 : -EINVAL; |
|---|
| 492 | 728 | |
|---|
| 493 | 729 | case ODEBUG_STATE_DESTROYED: |
|---|
| 494 | | - debug_print_object(obj, "activate"); |
|---|
| 730 | + print_object = true; |
|---|
| 495 | 731 | ret = -EINVAL; |
|---|
| 496 | 732 | break; |
|---|
| 497 | 733 | default: |
|---|
| .. | .. |
|---|
| 499 | 735 | break; |
|---|
| 500 | 736 | } |
|---|
| 501 | 737 | raw_spin_unlock_irqrestore(&db->lock, flags); |
|---|
| 738 | + if (print_object) |
|---|
| 739 | + debug_print_object(obj, "activate"); |
|---|
| 502 | 740 | return ret; |
|---|
| 503 | 741 | } |
|---|
| 504 | 742 | |
|---|
| 505 | 743 | raw_spin_unlock_irqrestore(&db->lock, flags); |
|---|
| 506 | | - /* |
|---|
| 507 | | - * We are here when a static object is activated. We |
|---|
| 508 | | - * let the type specific code confirm whether this is |
|---|
| 509 | | - * true or not. if true, we just make sure that the |
|---|
| 510 | | - * static object is tracked in the object tracker. If |
|---|
| 511 | | - * not, this must be a bug, so we try to fix it up. |
|---|
| 512 | | - */ |
|---|
| 513 | | - if (descr->is_static_object && descr->is_static_object(addr)) { |
|---|
| 514 | | - /* track this static object */ |
|---|
| 515 | | - debug_object_init(addr, descr); |
|---|
| 516 | | - debug_object_activate(addr, descr); |
|---|
| 517 | | - } else { |
|---|
| 518 | | - debug_print_object(&o, "activate"); |
|---|
| 519 | | - ret = debug_object_fixup(descr->fixup_activate, addr, |
|---|
| 520 | | - ODEBUG_STATE_NOTAVAILABLE); |
|---|
| 521 | | - return ret ? 0 : -EINVAL; |
|---|
| 744 | + |
|---|
| 745 | + /* If NULL the allocation has hit OOM */ |
|---|
| 746 | + if (!obj) { |
|---|
| 747 | + debug_objects_oom(); |
|---|
| 748 | + return 0; |
|---|
| 522 | 749 | } |
|---|
| 523 | | - return 0; |
|---|
| 750 | + |
|---|
| 751 | + /* Object is neither static nor tracked. It's not initialized */ |
|---|
| 752 | + debug_print_object(&o, "activate"); |
|---|
| 753 | + ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE); |
|---|
| 754 | + return ret ? 0 : -EINVAL; |
|---|
| 524 | 755 | } |
|---|
| 525 | 756 | EXPORT_SYMBOL_GPL(debug_object_activate); |
|---|
| 526 | 757 | |
|---|
| .. | .. |
|---|
| 529 | 760 | * @addr: address of the object |
|---|
| 530 | 761 | * @descr: pointer to an object specific debug description structure |
|---|
| 531 | 762 | */ |
|---|
| 532 | | -void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) |
|---|
| 763 | +void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr) |
|---|
| 533 | 764 | { |
|---|
| 534 | 765 | struct debug_bucket *db; |
|---|
| 535 | 766 | struct debug_obj *obj; |
|---|
| 536 | 767 | unsigned long flags; |
|---|
| 768 | + bool print_object = false; |
|---|
| 537 | 769 | |
|---|
| 538 | 770 | if (!debug_objects_enabled) |
|---|
| 539 | 771 | return; |
|---|
| .. | .. |
|---|
| 551 | 783 | if (!obj->astate) |
|---|
| 552 | 784 | obj->state = ODEBUG_STATE_INACTIVE; |
|---|
| 553 | 785 | else |
|---|
| 554 | | - debug_print_object(obj, "deactivate"); |
|---|
| 786 | + print_object = true; |
|---|
| 555 | 787 | break; |
|---|
| 556 | 788 | |
|---|
| 557 | 789 | case ODEBUG_STATE_DESTROYED: |
|---|
| 558 | | - debug_print_object(obj, "deactivate"); |
|---|
| 790 | + print_object = true; |
|---|
| 559 | 791 | break; |
|---|
| 560 | 792 | default: |
|---|
| 561 | 793 | break; |
|---|
| 562 | 794 | } |
|---|
| 563 | | - } else { |
|---|
| 795 | + } |
|---|
| 796 | + |
|---|
| 797 | + raw_spin_unlock_irqrestore(&db->lock, flags); |
|---|
| 798 | + if (!obj) { |
|---|
| 564 | 799 | struct debug_obj o = { .object = addr, |
|---|
| 565 | 800 | .state = ODEBUG_STATE_NOTAVAILABLE, |
|---|
| 566 | 801 | .descr = descr }; |
|---|
| 567 | 802 | |
|---|
| 568 | 803 | debug_print_object(&o, "deactivate"); |
|---|
| 804 | + } else if (print_object) { |
|---|
| 805 | + debug_print_object(obj, "deactivate"); |
|---|
| 569 | 806 | } |
|---|
| 570 | | - |
|---|
| 571 | | - raw_spin_unlock_irqrestore(&db->lock, flags); |
|---|
| 572 | 807 | } |
|---|
| 573 | 808 | EXPORT_SYMBOL_GPL(debug_object_deactivate); |
|---|
| 574 | 809 | |
|---|
| .. | .. |
|---|
| 577 | 812 | * @addr: address of the object |
|---|
| 578 | 813 | * @descr: pointer to an object specific debug description structure |
|---|
| 579 | 814 | */ |
|---|
| 580 | | -void debug_object_destroy(void *addr, struct debug_obj_descr *descr) |
|---|
| 815 | +void debug_object_destroy(void *addr, const struct debug_obj_descr *descr) |
|---|
| 581 | 816 | { |
|---|
| 582 | 817 | enum debug_obj_state state; |
|---|
| 583 | 818 | struct debug_bucket *db; |
|---|
| 584 | 819 | struct debug_obj *obj; |
|---|
| 585 | 820 | unsigned long flags; |
|---|
| 821 | + bool print_object = false; |
|---|
| 586 | 822 | |
|---|
| 587 | 823 | if (!debug_objects_enabled) |
|---|
| 588 | 824 | return; |
|---|
| .. | .. |
|---|
| 602 | 838 | obj->state = ODEBUG_STATE_DESTROYED; |
|---|
| 603 | 839 | break; |
|---|
| 604 | 840 | case ODEBUG_STATE_ACTIVE: |
|---|
| 605 | | - debug_print_object(obj, "destroy"); |
|---|
| 606 | 841 | state = obj->state; |
|---|
| 607 | 842 | raw_spin_unlock_irqrestore(&db->lock, flags); |
|---|
| 843 | + debug_print_object(obj, "destroy"); |
|---|
| 608 | 844 | debug_object_fixup(descr->fixup_destroy, addr, state); |
|---|
| 609 | 845 | return; |
|---|
| 610 | 846 | |
|---|
| 611 | 847 | case ODEBUG_STATE_DESTROYED: |
|---|
| 612 | | - debug_print_object(obj, "destroy"); |
|---|
| 848 | + print_object = true; |
|---|
| 613 | 849 | break; |
|---|
| 614 | 850 | default: |
|---|
| 615 | 851 | break; |
|---|
| 616 | 852 | } |
|---|
| 617 | 853 | out_unlock: |
|---|
| 618 | 854 | raw_spin_unlock_irqrestore(&db->lock, flags); |
|---|
| 855 | + if (print_object) |
|---|
| 856 | + debug_print_object(obj, "destroy"); |
|---|
| 619 | 857 | } |
|---|
| 620 | 858 | EXPORT_SYMBOL_GPL(debug_object_destroy); |
|---|
| 621 | 859 | |
|---|
| .. | .. |
|---|
| 624 | 862 | * @addr: address of the object |
|---|
| 625 | 863 | * @descr: pointer to an object specific debug description structure |
|---|
| 626 | 864 | */ |
|---|
| 627 | | -void debug_object_free(void *addr, struct debug_obj_descr *descr) |
|---|
| 865 | +void debug_object_free(void *addr, const struct debug_obj_descr *descr) |
|---|
| 628 | 866 | { |
|---|
| 629 | 867 | enum debug_obj_state state; |
|---|
| 630 | 868 | struct debug_bucket *db; |
|---|
| .. | .. |
|---|
| 644 | 882 | |
|---|
| 645 | 883 | switch (obj->state) { |
|---|
| 646 | 884 | case ODEBUG_STATE_ACTIVE: |
|---|
| 647 | | - debug_print_object(obj, "free"); |
|---|
| 648 | 885 | state = obj->state; |
|---|
| 649 | 886 | raw_spin_unlock_irqrestore(&db->lock, flags); |
|---|
| 887 | + debug_print_object(obj, "free"); |
|---|
| 650 | 888 | debug_object_fixup(descr->fixup_free, addr, state); |
|---|
| 651 | 889 | return; |
|---|
| 652 | 890 | default: |
|---|
| .. | .. |
|---|
| 665 | 903 | * @addr: address of the object |
|---|
| 666 | 904 | * @descr: pointer to an object specific debug description structure |
|---|
| 667 | 905 | */ |
|---|
| 668 | | -void debug_object_assert_init(void *addr, struct debug_obj_descr *descr) |
|---|
| 906 | +void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr) |
|---|
| 669 | 907 | { |
|---|
| 908 | + struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; |
|---|
| 670 | 909 | struct debug_bucket *db; |
|---|
| 671 | 910 | struct debug_obj *obj; |
|---|
| 672 | 911 | unsigned long flags; |
|---|
| .. | .. |
|---|
| 674 | 913 | if (!debug_objects_enabled) |
|---|
| 675 | 914 | return; |
|---|
| 676 | 915 | |
|---|
| 916 | + debug_objects_fill_pool(); |
|---|
| 917 | + |
|---|
| 677 | 918 | db = get_bucket((unsigned long) addr); |
|---|
| 678 | 919 | |
|---|
| 679 | 920 | raw_spin_lock_irqsave(&db->lock, flags); |
|---|
| 921 | + obj = lookup_object_or_alloc(addr, db, descr, false, true); |
|---|
| 922 | + raw_spin_unlock_irqrestore(&db->lock, flags); |
|---|
| 923 | + if (likely(!IS_ERR_OR_NULL(obj))) |
|---|
| 924 | + return; |
|---|
| 680 | 925 | |
|---|
| 681 | | - obj = lookup_object(addr, db); |
|---|
| 926 | + /* If NULL the allocation has hit OOM */ |
|---|
| 682 | 927 | if (!obj) { |
|---|
| 683 | | - struct debug_obj o = { .object = addr, |
|---|
| 684 | | - .state = ODEBUG_STATE_NOTAVAILABLE, |
|---|
| 685 | | - .descr = descr }; |
|---|
| 686 | | - |
|---|
| 687 | | - raw_spin_unlock_irqrestore(&db->lock, flags); |
|---|
| 688 | | - /* |
|---|
| 689 | | - * Maybe the object is static, and we let the type specific |
|---|
| 690 | | - * code confirm. Track this static object if true, else invoke |
|---|
| 691 | | - * fixup. |
|---|
| 692 | | - */ |
|---|
| 693 | | - if (descr->is_static_object && descr->is_static_object(addr)) { |
|---|
| 694 | | - /* Track this static object */ |
|---|
| 695 | | - debug_object_init(addr, descr); |
|---|
| 696 | | - } else { |
|---|
| 697 | | - debug_print_object(&o, "assert_init"); |
|---|
| 698 | | - debug_object_fixup(descr->fixup_assert_init, addr, |
|---|
| 699 | | - ODEBUG_STATE_NOTAVAILABLE); |
|---|
| 700 | | - } |
|---|
| 928 | + debug_objects_oom(); |
|---|
| 701 | 929 | return; |
|---|
| 702 | 930 | } |
|---|
| 703 | 931 | |
|---|
| 704 | | - raw_spin_unlock_irqrestore(&db->lock, flags); |
|---|
| 932 | + /* Object is neither tracked nor static. It's not initialized. */ |
|---|
| 933 | + debug_print_object(&o, "assert_init"); |
|---|
| 934 | + debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE); |
|---|
| 705 | 935 | } |
|---|
| 706 | 936 | EXPORT_SYMBOL_GPL(debug_object_assert_init); |
|---|
| 707 | 937 | |
|---|
| .. | .. |
|---|
| 713 | 943 | * @next: state to move to if expected state is found |
|---|
| 714 | 944 | */ |
|---|
| 715 | 945 | void |
|---|
| 716 | | -debug_object_active_state(void *addr, struct debug_obj_descr *descr, |
|---|
| 946 | +debug_object_active_state(void *addr, const struct debug_obj_descr *descr, |
|---|
| 717 | 947 | unsigned int expect, unsigned int next) |
|---|
| 718 | 948 | { |
|---|
| 719 | 949 | struct debug_bucket *db; |
|---|
| 720 | 950 | struct debug_obj *obj; |
|---|
| 721 | 951 | unsigned long flags; |
|---|
| 952 | + bool print_object = false; |
|---|
| 722 | 953 | |
|---|
| 723 | 954 | if (!debug_objects_enabled) |
|---|
| 724 | 955 | return; |
|---|
| .. | .. |
|---|
| 734 | 965 | if (obj->astate == expect) |
|---|
| 735 | 966 | obj->astate = next; |
|---|
| 736 | 967 | else |
|---|
| 737 | | - debug_print_object(obj, "active_state"); |
|---|
| 968 | + print_object = true; |
|---|
| 738 | 969 | break; |
|---|
| 739 | 970 | |
|---|
| 740 | 971 | default: |
|---|
| 741 | | - debug_print_object(obj, "active_state"); |
|---|
| 972 | + print_object = true; |
|---|
| 742 | 973 | break; |
|---|
| 743 | 974 | } |
|---|
| 744 | | - } else { |
|---|
| 975 | + } |
|---|
| 976 | + |
|---|
| 977 | + raw_spin_unlock_irqrestore(&db->lock, flags); |
|---|
| 978 | + if (!obj) { |
|---|
| 745 | 979 | struct debug_obj o = { .object = addr, |
|---|
| 746 | 980 | .state = ODEBUG_STATE_NOTAVAILABLE, |
|---|
| 747 | 981 | .descr = descr }; |
|---|
| 748 | 982 | |
|---|
| 749 | 983 | debug_print_object(&o, "active_state"); |
|---|
| 984 | + } else if (print_object) { |
|---|
| 985 | + debug_print_object(obj, "active_state"); |
|---|
| 750 | 986 | } |
|---|
| 751 | | - |
|---|
| 752 | | - raw_spin_unlock_irqrestore(&db->lock, flags); |
|---|
| 753 | 987 | } |
|---|
| 754 | 988 | EXPORT_SYMBOL_GPL(debug_object_active_state); |
|---|
| 755 | 989 | |
|---|
| .. | .. |
|---|
| 757 | 991 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) |
|---|
| 758 | 992 | { |
|---|
| 759 | 993 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; |
|---|
| 760 | | - struct debug_obj_descr *descr; |
|---|
| 994 | + const struct debug_obj_descr *descr; |
|---|
| 761 | 995 | enum debug_obj_state state; |
|---|
| 762 | 996 | struct debug_bucket *db; |
|---|
| 763 | 997 | struct hlist_node *tmp; |
|---|
| 764 | 998 | struct debug_obj *obj; |
|---|
| 765 | 999 | int cnt, objs_checked = 0; |
|---|
| 766 | | - bool work = false; |
|---|
| 767 | 1000 | |
|---|
| 768 | 1001 | saddr = (unsigned long) address; |
|---|
| 769 | 1002 | eaddr = saddr + size; |
|---|
| .. | .. |
|---|
| 785 | 1018 | |
|---|
| 786 | 1019 | switch (obj->state) { |
|---|
| 787 | 1020 | case ODEBUG_STATE_ACTIVE: |
|---|
| 788 | | - debug_print_object(obj, "free"); |
|---|
| 789 | 1021 | descr = obj->descr; |
|---|
| 790 | 1022 | state = obj->state; |
|---|
| 791 | 1023 | raw_spin_unlock_irqrestore(&db->lock, flags); |
|---|
| 1024 | + debug_print_object(obj, "free"); |
|---|
| 792 | 1025 | debug_object_fixup(descr->fixup_free, |
|---|
| 793 | 1026 | (void *) oaddr, state); |
|---|
| 794 | 1027 | goto repeat; |
|---|
| 795 | 1028 | default: |
|---|
| 796 | 1029 | hlist_del(&obj->node); |
|---|
| 797 | | - work |= __free_object(obj); |
|---|
| 1030 | + __free_object(obj); |
|---|
| 798 | 1031 | break; |
|---|
| 799 | 1032 | } |
|---|
| 800 | 1033 | } |
|---|
| .. | .. |
|---|
| 810 | 1043 | debug_objects_maxchecked = objs_checked; |
|---|
| 811 | 1044 | |
|---|
| 812 | 1045 | /* Schedule work to actually kmem_cache_free() objects */ |
|---|
| 813 | | - if (work) |
|---|
| 814 | | - schedule_work(&debug_obj_work); |
|---|
| 1046 | + if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) { |
|---|
| 1047 | + WRITE_ONCE(obj_freeing, true); |
|---|
| 1048 | + schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); |
|---|
| 1049 | + } |
|---|
| 815 | 1050 | } |
|---|
| 816 | 1051 | |
|---|
| 817 | 1052 | void debug_check_no_obj_freed(const void *address, unsigned long size) |
|---|
| .. | .. |
|---|
| 825 | 1060 | |
|---|
| 826 | 1061 | static int debug_stats_show(struct seq_file *m, void *v) |
|---|
| 827 | 1062 | { |
|---|
| 1063 | + int cpu, obj_percpu_free = 0; |
|---|
| 1064 | + |
|---|
| 1065 | + for_each_possible_cpu(cpu) |
|---|
| 1066 | + obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu); |
|---|
| 1067 | + |
|---|
| 828 | 1068 | seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); |
|---|
| 829 | 1069 | seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked); |
|---|
| 830 | 1070 | seq_printf(m, "warnings :%d\n", debug_objects_warnings); |
|---|
| 831 | 1071 | seq_printf(m, "fixups :%d\n", debug_objects_fixups); |
|---|
| 832 | | - seq_printf(m, "pool_free :%d\n", obj_pool_free); |
|---|
| 1072 | + seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free); |
|---|
| 1073 | + seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free); |
|---|
| 833 | 1074 | seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); |
|---|
| 834 | | - seq_printf(m, "pool_used :%d\n", obj_pool_used); |
|---|
| 1075 | + seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free); |
|---|
| 835 | 1076 | seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); |
|---|
| 836 | | - seq_printf(m, "on_free_list :%d\n", obj_nr_tofree); |
|---|
| 1077 | + seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree)); |
|---|
| 837 | 1078 | seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); |
|---|
| 838 | 1079 | seq_printf(m, "objs_freed :%d\n", debug_objects_freed); |
|---|
| 839 | 1080 | return 0; |
|---|
| 840 | 1081 | } |
|---|
| 841 | | - |
|---|
| 842 | | -static int debug_stats_open(struct inode *inode, struct file *filp) |
|---|
| 843 | | -{ |
|---|
| 844 | | - return single_open(filp, debug_stats_show, NULL); |
|---|
| 845 | | -} |
|---|
| 846 | | - |
|---|
| 847 | | -static const struct file_operations debug_stats_fops = { |
|---|
| 848 | | - .open = debug_stats_open, |
|---|
| 849 | | - .read = seq_read, |
|---|
| 850 | | - .llseek = seq_lseek, |
|---|
| 851 | | - .release = single_release, |
|---|
| 852 | | -}; |
|---|
| 1082 | +DEFINE_SHOW_ATTRIBUTE(debug_stats); |
|---|
| 853 | 1083 | |
|---|
| 854 | 1084 | static int __init debug_objects_init_debugfs(void) |
|---|
| 855 | 1085 | { |
|---|
| 856 | | - struct dentry *dbgdir, *dbgstats; |
|---|
| 1086 | + struct dentry *dbgdir; |
|---|
| 857 | 1087 | |
|---|
| 858 | 1088 | if (!debug_objects_enabled) |
|---|
| 859 | 1089 | return 0; |
|---|
| 860 | 1090 | |
|---|
| 861 | 1091 | dbgdir = debugfs_create_dir("debug_objects", NULL); |
|---|
| 862 | | - if (!dbgdir) |
|---|
| 863 | | - return -ENOMEM; |
|---|
| 864 | 1092 | |
|---|
| 865 | | - dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, |
|---|
| 866 | | - &debug_stats_fops); |
|---|
| 867 | | - if (!dbgstats) |
|---|
| 868 | | - goto err; |
|---|
| 1093 | + debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops); |
|---|
| 869 | 1094 | |
|---|
| 870 | 1095 | return 0; |
|---|
| 871 | | - |
|---|
| 872 | | -err: |
|---|
| 873 | | - debugfs_remove(dbgdir); |
|---|
| 874 | | - |
|---|
| 875 | | - return -ENOMEM; |
|---|
| 876 | 1096 | } |
|---|
| 877 | 1097 | __initcall(debug_objects_init_debugfs); |
|---|
| 878 | 1098 | |
|---|
| .. | .. |
|---|
| 889 | 1109 | unsigned long dummy2[3]; |
|---|
| 890 | 1110 | }; |
|---|
| 891 | 1111 | |
|---|
| 892 | | -static __initdata struct debug_obj_descr descr_type_test; |
|---|
| 1112 | +static __initconst const struct debug_obj_descr descr_type_test; |
|---|
| 893 | 1113 | |
|---|
| 894 | 1114 | static bool __init is_static_object(void *addr) |
|---|
| 895 | 1115 | { |
|---|
| .. | .. |
|---|
| 1014 | 1234 | return res; |
|---|
| 1015 | 1235 | } |
|---|
| 1016 | 1236 | |
|---|
| 1017 | | -static __initdata struct debug_obj_descr descr_type_test = { |
|---|
| 1237 | +static __initconst const struct debug_obj_descr descr_type_test = { |
|---|
| 1018 | 1238 | .name = "selftest", |
|---|
| 1019 | 1239 | .is_static_object = is_static_object, |
|---|
| 1020 | 1240 | .fixup_init = fixup_init, |
|---|
| .. | .. |
|---|
| 1133 | 1353 | hlist_add_head(&obj->node, &objects); |
|---|
| 1134 | 1354 | } |
|---|
| 1135 | 1355 | |
|---|
| 1356 | + debug_objects_allocated += i; |
|---|
| 1357 | + |
|---|
| 1136 | 1358 | /* |
|---|
| 1137 | | - * When debug_objects_mem_init() is called we know that only |
|---|
| 1138 | | - * one CPU is up, so disabling interrupts is enough |
|---|
| 1139 | | - * protection. This avoids the lockdep hell of lock ordering. |
|---|
| 1359 | + * debug_objects_mem_init() is now called early that only one CPU is up |
|---|
| 1360 | + * and interrupts have been disabled, so it is safe to replace the |
|---|
| 1361 | + * active object references. |
|---|
| 1140 | 1362 | */ |
|---|
| 1141 | | - local_irq_disable(); |
|---|
| 1142 | 1363 | |
|---|
| 1143 | 1364 | /* Remove the statically allocated objects from the pool */ |
|---|
| 1144 | 1365 | hlist_for_each_entry_safe(obj, tmp, &obj_pool, node) |
|---|
| .. | .. |
|---|
| 1159 | 1380 | cnt++; |
|---|
| 1160 | 1381 | } |
|---|
| 1161 | 1382 | } |
|---|
| 1162 | | - local_irq_enable(); |
|---|
| 1163 | 1383 | |
|---|
| 1164 | 1384 | pr_debug("%d of %d active objects replaced\n", |
|---|
| 1165 | 1385 | cnt, obj_pool_used); |
|---|
| .. | .. |
|---|
| 1180 | 1400 | */ |
|---|
| 1181 | 1401 | void __init debug_objects_mem_init(void) |
|---|
| 1182 | 1402 | { |
|---|
| 1403 | + int cpu, extras; |
|---|
| 1404 | + |
|---|
| 1183 | 1405 | if (!debug_objects_enabled) |
|---|
| 1184 | 1406 | return; |
|---|
| 1407 | + |
|---|
| 1408 | + /* |
|---|
| 1409 | + * Initialize the percpu object pools |
|---|
| 1410 | + * |
|---|
| 1411 | + * Initialization is not strictly necessary, but was done for |
|---|
| 1412 | + * completeness. |
|---|
| 1413 | + */ |
|---|
| 1414 | + for_each_possible_cpu(cpu) |
|---|
| 1415 | + INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu)); |
|---|
| 1185 | 1416 | |
|---|
| 1186 | 1417 | obj_cache = kmem_cache_create("debug_objects_cache", |
|---|
| 1187 | 1418 | sizeof (struct debug_obj), 0, |
|---|
| .. | .. |
|---|
| 1192 | 1423 | debug_objects_enabled = 0; |
|---|
| 1193 | 1424 | kmem_cache_destroy(obj_cache); |
|---|
| 1194 | 1425 | pr_warn("out of memory.\n"); |
|---|
| 1426 | + return; |
|---|
| 1195 | 1427 | } else |
|---|
| 1196 | 1428 | debug_objects_selftest(); |
|---|
| 1429 | + |
|---|
| 1430 | +#ifdef CONFIG_HOTPLUG_CPU |
|---|
| 1431 | + cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL, |
|---|
| 1432 | + object_cpu_offline); |
|---|
| 1433 | +#endif |
|---|
| 1197 | 1434 | |
|---|
| 1198 | 1435 | /* |
|---|
| 1199 | 1436 | * Increase the thresholds for allocating and freeing objects |
|---|
| 1200 | 1437 | * according to the number of possible CPUs available in the system. |
|---|
| 1201 | 1438 | */ |
|---|
| 1202 | | - debug_objects_pool_size += num_possible_cpus() * 32; |
|---|
| 1203 | | - debug_objects_pool_min_level += num_possible_cpus() * 4; |
|---|
| 1439 | + extras = num_possible_cpus() * ODEBUG_BATCH_SIZE; |
|---|
| 1440 | + debug_objects_pool_size += extras; |
|---|
| 1441 | + debug_objects_pool_min_level += extras; |
|---|
| 1204 | 1442 | } |
|---|