.. | .. |
---|
1091 | 1091 | struct ftrace_page *next; |
---|
1092 | 1092 | struct dyn_ftrace *records; |
---|
1093 | 1093 | int index; |
---|
1094 | | - int size; |
---|
| 1094 | + int order; |
---|
1095 | 1095 | }; |
---|
1096 | 1096 | |
---|
1097 | 1097 | #define ENTRY_SIZE sizeof(struct dyn_ftrace) |
---|
.. | .. |
---|
1538 | 1538 | key.flags = end; /* overload flags, as it is unsigned long */ |
---|
1539 | 1539 | |
---|
1540 | 1540 | for (pg = ftrace_pages_start; pg; pg = pg->next) { |
---|
1541 | | - if (end < pg->records[0].ip || |
---|
| 1541 | + if (pg->index == 0 || |
---|
| 1542 | + end < pg->records[0].ip || |
---|
1542 | 1543 | start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) |
---|
1543 | 1544 | continue; |
---|
1544 | 1545 | rec = bsearch(&key, pg->records, pg->index, |
---|
.. | .. |
---|
3187 | 3188 | ftrace_number_of_groups++; |
---|
3188 | 3189 | |
---|
3189 | 3190 | cnt = (PAGE_SIZE << order) / ENTRY_SIZE; |
---|
3190 | | - pg->size = cnt; |
---|
| 3191 | + pg->order = order; |
---|
3191 | 3192 | |
---|
3192 | 3193 | if (cnt > count) |
---|
3193 | 3194 | cnt = count; |
---|
.. | .. |
---|
3195 | 3196 | return cnt; |
---|
3196 | 3197 | } |
---|
3197 | 3198 | |
---|
| 3199 | +static void ftrace_free_pages(struct ftrace_page *pages) |
---|
| 3200 | +{ |
---|
| 3201 | + struct ftrace_page *pg = pages; |
---|
| 3202 | + |
---|
| 3203 | + while (pg) { |
---|
| 3204 | + if (pg->records) { |
---|
| 3205 | + free_pages((unsigned long)pg->records, pg->order); |
---|
| 3206 | + ftrace_number_of_pages -= 1 << pg->order; |
---|
| 3207 | + } |
---|
| 3208 | + pages = pg->next; |
---|
| 3209 | + kfree(pg); |
---|
| 3210 | + pg = pages; |
---|
| 3211 | + ftrace_number_of_groups--; |
---|
| 3212 | + } |
---|
| 3213 | +} |
---|
| 3214 | + |
---|
3198 | 3215 | static struct ftrace_page * |
---|
3199 | 3216 | ftrace_allocate_pages(unsigned long num_to_init) |
---|
3200 | 3217 | { |
---|
3201 | 3218 | struct ftrace_page *start_pg; |
---|
3202 | 3219 | struct ftrace_page *pg; |
---|
3203 | | - int order; |
---|
3204 | 3220 | int cnt; |
---|
3205 | 3221 | |
---|
3206 | 3222 | if (!num_to_init) |
---|
.. | .. |
---|
3234 | 3250 | return start_pg; |
---|
3235 | 3251 | |
---|
3236 | 3252 | free_pages: |
---|
3237 | | - pg = start_pg; |
---|
3238 | | - while (pg) { |
---|
3239 | | - order = get_count_order(pg->size / ENTRIES_PER_PAGE); |
---|
3240 | | - if (order >= 0) |
---|
3241 | | - free_pages((unsigned long)pg->records, order); |
---|
3242 | | - start_pg = pg->next; |
---|
3243 | | - kfree(pg); |
---|
3244 | | - pg = start_pg; |
---|
3245 | | - ftrace_number_of_pages -= 1 << order; |
---|
3246 | | - ftrace_number_of_groups--; |
---|
3247 | | - } |
---|
| 3253 | + ftrace_free_pages(start_pg); |
---|
3248 | 3254 | pr_info("ftrace: FAILED to allocate memory for functions\n"); |
---|
3249 | 3255 | return NULL; |
---|
3250 | 3256 | } |
---|
.. | .. |
---|
5389 | 5395 | ret = 0; |
---|
5390 | 5396 | } |
---|
5391 | 5397 | |
---|
5392 | | - if (unlikely(ret && new_direct)) { |
---|
5393 | | - direct->count++; |
---|
5394 | | - list_del_rcu(&new_direct->next); |
---|
5395 | | - synchronize_rcu_tasks(); |
---|
5396 | | - kfree(new_direct); |
---|
5397 | | - ftrace_direct_func_count--; |
---|
| 5398 | + if (ret) { |
---|
| 5399 | + direct->addr = old_addr; |
---|
| 5400 | + if (unlikely(new_direct)) { |
---|
| 5401 | + direct->count++; |
---|
| 5402 | + list_del_rcu(&new_direct->next); |
---|
| 5403 | + synchronize_rcu_tasks(); |
---|
| 5404 | + kfree(new_direct); |
---|
| 5405 | + ftrace_direct_func_count--; |
---|
| 5406 | + } |
---|
5398 | 5407 | } |
---|
5399 | 5408 | |
---|
5400 | 5409 | out_unlock: |
---|
.. | .. |
---|
6187 | 6196 | unsigned long *start, |
---|
6188 | 6197 | unsigned long *end) |
---|
6189 | 6198 | { |
---|
| 6199 | + struct ftrace_page *pg_unuse = NULL; |
---|
6190 | 6200 | struct ftrace_page *start_pg; |
---|
6191 | 6201 | struct ftrace_page *pg; |
---|
6192 | 6202 | struct dyn_ftrace *rec; |
---|
| 6203 | + unsigned long skipped = 0; |
---|
6193 | 6204 | unsigned long count; |
---|
6194 | 6205 | unsigned long *p; |
---|
6195 | 6206 | unsigned long addr; |
---|
.. | .. |
---|
6235 | 6246 | p = start; |
---|
6236 | 6247 | pg = start_pg; |
---|
6237 | 6248 | while (p < end) { |
---|
| 6249 | + unsigned long end_offset; |
---|
6238 | 6250 | addr = ftrace_call_adjust(*p++); |
---|
6239 | 6251 | /* |
---|
6240 | 6252 | * Some architecture linkers will pad between |
---|
.. | .. |
---|
6242 | 6254 | * object files to satisfy alignments. |
---|
6243 | 6255 | * Skip any NULL pointers. |
---|
6244 | 6256 | */ |
---|
6245 | | - if (!addr) |
---|
| 6257 | + if (!addr) { |
---|
| 6258 | + skipped++; |
---|
6246 | 6259 | continue; |
---|
| 6260 | + } |
---|
6247 | 6261 | |
---|
6248 | | - if (pg->index == pg->size) { |
---|
| 6262 | + end_offset = (pg->index+1) * sizeof(pg->records[0]); |
---|
| 6263 | + if (end_offset > PAGE_SIZE << pg->order) { |
---|
6249 | 6264 | /* We should have allocated enough */ |
---|
6250 | 6265 | if (WARN_ON(!pg->next)) |
---|
6251 | 6266 | break; |
---|
.. | .. |
---|
6256 | 6271 | rec->ip = addr; |
---|
6257 | 6272 | } |
---|
6258 | 6273 | |
---|
6259 | | - /* We should have used all pages */ |
---|
6260 | | - WARN_ON(pg->next); |
---|
| 6274 | + if (pg->next) { |
---|
| 6275 | + pg_unuse = pg->next; |
---|
| 6276 | + pg->next = NULL; |
---|
| 6277 | + } |
---|
6261 | 6278 | |
---|
6262 | 6279 | /* Assign the last page to ftrace_pages */ |
---|
6263 | 6280 | ftrace_pages = pg; |
---|
.. | .. |
---|
6279 | 6296 | out: |
---|
6280 | 6297 | mutex_unlock(&ftrace_lock); |
---|
6281 | 6298 | |
---|
| 6299 | + /* We should have used all pages unless we skipped some */ |
---|
| 6300 | + if (pg_unuse) { |
---|
| 6301 | + WARN_ON(!skipped); |
---|
| 6302 | + ftrace_free_pages(pg_unuse); |
---|
| 6303 | + } |
---|
6282 | 6304 | return ret; |
---|
6283 | 6305 | } |
---|
6284 | 6306 | |
---|
.. | .. |
---|
6414 | 6436 | struct ftrace_page **last_pg; |
---|
6415 | 6437 | struct ftrace_page *tmp_page = NULL; |
---|
6416 | 6438 | struct ftrace_page *pg; |
---|
6417 | | - int order; |
---|
6418 | 6439 | |
---|
6419 | 6440 | mutex_lock(&ftrace_lock); |
---|
6420 | 6441 | |
---|
.. | .. |
---|
6465 | 6486 | /* Needs to be called outside of ftrace_lock */ |
---|
6466 | 6487 | clear_mod_from_hashes(pg); |
---|
6467 | 6488 | |
---|
6468 | | - order = get_count_order(pg->size / ENTRIES_PER_PAGE); |
---|
6469 | | - if (order >= 0) |
---|
6470 | | - free_pages((unsigned long)pg->records, order); |
---|
| 6489 | + if (pg->records) { |
---|
| 6490 | + free_pages((unsigned long)pg->records, pg->order); |
---|
| 6491 | + ftrace_number_of_pages -= 1 << pg->order; |
---|
| 6492 | + } |
---|
6471 | 6493 | tmp_page = pg->next; |
---|
6472 | 6494 | kfree(pg); |
---|
6473 | | - ftrace_number_of_pages -= 1 << order; |
---|
6474 | 6495 | ftrace_number_of_groups--; |
---|
6475 | 6496 | } |
---|
6476 | 6497 | } |
---|
.. | .. |
---|
6788 | 6809 | struct ftrace_mod_map *mod_map = NULL; |
---|
6789 | 6810 | struct ftrace_init_func *func, *func_next; |
---|
6790 | 6811 | struct list_head clear_hash; |
---|
6791 | | - int order; |
---|
6792 | 6812 | |
---|
6793 | 6813 | INIT_LIST_HEAD(&clear_hash); |
---|
6794 | 6814 | |
---|
.. | .. |
---|
6826 | 6846 | ftrace_update_tot_cnt--; |
---|
6827 | 6847 | if (!pg->index) { |
---|
6828 | 6848 | *last_pg = pg->next; |
---|
6829 | | - order = get_count_order(pg->size / ENTRIES_PER_PAGE); |
---|
6830 | | - if (order >= 0) |
---|
6831 | | - free_pages((unsigned long)pg->records, order); |
---|
6832 | | - ftrace_number_of_pages -= 1 << order; |
---|
| 6849 | + if (pg->records) { |
---|
| 6850 | + free_pages((unsigned long)pg->records, pg->order); |
---|
| 6851 | + ftrace_number_of_pages -= 1 << pg->order; |
---|
| 6852 | + } |
---|
6833 | 6853 | ftrace_number_of_groups--; |
---|
6834 | 6854 | kfree(pg); |
---|
6835 | 6855 | pg = container_of(last_pg, struct ftrace_page, next); |
---|