| .. | .. |
|---|
| 18 | 18 | #include <linux/clocksource.h> |
|---|
| 19 | 19 | #include <linux/sched/task.h> |
|---|
| 20 | 20 | #include <linux/kallsyms.h> |
|---|
| 21 | +#include <linux/security.h> |
|---|
| 21 | 22 | #include <linux/seq_file.h> |
|---|
| 22 | | -#include <linux/suspend.h> |
|---|
| 23 | 23 | #include <linux/tracefs.h> |
|---|
| 24 | 24 | #include <linux/hardirq.h> |
|---|
| 25 | 25 | #include <linux/kthread.h> |
|---|
| .. | .. |
|---|
| 41 | 41 | #include <asm/sections.h> |
|---|
| 42 | 42 | #include <asm/setup.h> |
|---|
| 43 | 43 | |
|---|
| 44 | +#include "ftrace_internal.h" |
|---|
| 44 | 45 | #include "trace_output.h" |
|---|
| 45 | 46 | #include "trace_stat.h" |
|---|
| 46 | 47 | |
|---|
| .. | .. |
|---|
| 61 | 62 | }) |
|---|
| 62 | 63 | |
|---|
| 63 | 64 | /* hash bits for specific function selection */ |
|---|
| 64 | | -#define FTRACE_HASH_BITS 7 |
|---|
| 65 | | -#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) |
|---|
| 66 | 65 | #define FTRACE_HASH_DEFAULT_BITS 10 |
|---|
| 67 | 66 | #define FTRACE_HASH_MAX_BITS 12 |
|---|
| 68 | 67 | |
|---|
| .. | .. |
|---|
| 70 | 69 | #define INIT_OPS_HASH(opsname) \ |
|---|
| 71 | 70 | .func_hash = &opsname.local_hash, \ |
|---|
| 72 | 71 | .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), |
|---|
| 73 | | -#define ASSIGN_OPS_HASH(opsname, val) \ |
|---|
| 74 | | - .func_hash = val, \ |
|---|
| 75 | | - .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), |
|---|
| 76 | 72 | #else |
|---|
| 77 | 73 | #define INIT_OPS_HASH(opsname) |
|---|
| 78 | | -#define ASSIGN_OPS_HASH(opsname, val) |
|---|
| 79 | 74 | #endif |
|---|
| 80 | 75 | |
|---|
| 81 | | -static struct ftrace_ops ftrace_list_end __read_mostly = { |
|---|
| 76 | +enum { |
|---|
| 77 | + FTRACE_MODIFY_ENABLE_FL = (1 << 0), |
|---|
| 78 | + FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1), |
|---|
| 79 | +}; |
|---|
| 80 | + |
|---|
| 81 | +struct ftrace_ops ftrace_list_end __read_mostly = { |
|---|
| 82 | 82 | .func = ftrace_stub, |
|---|
| 83 | 83 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, |
|---|
| 84 | 84 | INIT_OPS_HASH(ftrace_list_end) |
|---|
| .. | .. |
|---|
| 102 | 102 | |
|---|
| 103 | 103 | tr = ops->private; |
|---|
| 104 | 104 | |
|---|
| 105 | | - return tr->function_pids != NULL; |
|---|
| 105 | + return tr->function_pids != NULL || tr->function_no_pids != NULL; |
|---|
| 106 | 106 | } |
|---|
| 107 | 107 | |
|---|
| 108 | 108 | static void ftrace_update_trampoline(struct ftrace_ops *ops); |
|---|
| .. | .. |
|---|
| 113 | 113 | */ |
|---|
| 114 | 114 | static int ftrace_disabled __read_mostly; |
|---|
| 115 | 115 | |
|---|
| 116 | | -static DEFINE_MUTEX(ftrace_lock); |
|---|
| 116 | +DEFINE_MUTEX(ftrace_lock); |
|---|
| 117 | 117 | |
|---|
| 118 | | -static struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end; |
|---|
| 118 | +struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end; |
|---|
| 119 | 119 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
|---|
| 120 | | -static struct ftrace_ops global_ops; |
|---|
| 120 | +struct ftrace_ops global_ops; |
|---|
| 121 | 121 | |
|---|
| 122 | 122 | #if ARCH_SUPPORTS_FTRACE_OPS |
|---|
| 123 | 123 | static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
|---|
| 124 | 124 | struct ftrace_ops *op, struct pt_regs *regs); |
|---|
| 125 | 125 | #else |
|---|
| 126 | 126 | /* See comment below, where ftrace_ops_list_func is defined */ |
|---|
| 127 | | -static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip, |
|---|
| 128 | | - struct ftrace_ops *op, struct pt_regs *regs); |
|---|
| 129 | | -#define ftrace_ops_list_func ftrace_ops_no_ops |
|---|
| 127 | +static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); |
|---|
| 128 | +#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) |
|---|
| 130 | 129 | #endif |
|---|
| 131 | | - |
|---|
| 132 | | -/* |
|---|
| 133 | | - * Traverse the ftrace_global_list, invoking all entries. The reason that we |
|---|
| 134 | | - * can use rcu_dereference_raw_notrace() is that elements removed from this list |
|---|
| 135 | | - * are simply leaked, so there is no need to interact with a grace-period |
|---|
| 136 | | - * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle |
|---|
| 137 | | - * concurrent insertions into the ftrace_global_list. |
|---|
| 138 | | - * |
|---|
| 139 | | - * Silly Alpha and silly pointer-speculation compiler optimizations! |
|---|
| 140 | | - */ |
|---|
| 141 | | -#define do_for_each_ftrace_op(op, list) \ |
|---|
| 142 | | - op = rcu_dereference_raw_notrace(list); \ |
|---|
| 143 | | - do |
|---|
| 144 | | - |
|---|
| 145 | | -/* |
|---|
| 146 | | - * Optimized for just a single item in the list (as that is the normal case). |
|---|
| 147 | | - */ |
|---|
| 148 | | -#define while_for_each_ftrace_op(op) \ |
|---|
| 149 | | - while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \ |
|---|
| 150 | | - unlikely((op) != &ftrace_list_end)) |
|---|
| 151 | 130 | |
|---|
| 152 | 131 | static inline void ftrace_ops_init(struct ftrace_ops *ops) |
|---|
| 153 | 132 | { |
|---|
| .. | .. |
|---|
| 164 | 143 | struct ftrace_ops *op, struct pt_regs *regs) |
|---|
| 165 | 144 | { |
|---|
| 166 | 145 | struct trace_array *tr = op->private; |
|---|
| 146 | + int pid; |
|---|
| 167 | 147 | |
|---|
| 168 | | - if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid)) |
|---|
| 169 | | - return; |
|---|
| 148 | + if (tr) { |
|---|
| 149 | + pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid); |
|---|
| 150 | + if (pid == FTRACE_PID_IGNORE) |
|---|
| 151 | + return; |
|---|
| 152 | + if (pid != FTRACE_PID_TRACE && |
|---|
| 153 | + pid != current->pid) |
|---|
| 154 | + return; |
|---|
| 155 | + } |
|---|
| 170 | 156 | |
|---|
| 171 | 157 | op->saved_func(ip, parent_ip, op, regs); |
|---|
| 172 | | -} |
|---|
| 173 | | - |
|---|
| 174 | | -static void ftrace_sync(struct work_struct *work) |
|---|
| 175 | | -{ |
|---|
| 176 | | - /* |
|---|
| 177 | | - * This function is just a stub to implement a hard force |
|---|
| 178 | | - * of synchronize_sched(). This requires synchronizing |
|---|
| 179 | | - * tasks even in userspace and idle. |
|---|
| 180 | | - * |
|---|
| 181 | | - * Yes, function tracing is rude. |
|---|
| 182 | | - */ |
|---|
| 183 | 158 | } |
|---|
| 184 | 159 | |
|---|
| 185 | 160 | static void ftrace_sync_ipi(void *data) |
|---|
| .. | .. |
|---|
| 187 | 162 | /* Probably not needed, but do it anyway */ |
|---|
| 188 | 163 | smp_rmb(); |
|---|
| 189 | 164 | } |
|---|
| 190 | | - |
|---|
| 191 | | -#ifdef CONFIG_FUNCTION_GRAPH_TRACER |
|---|
| 192 | | -static void update_function_graph_func(void); |
|---|
| 193 | | - |
|---|
| 194 | | -/* Both enabled by default (can be cleared by function_graph tracer flags */ |
|---|
| 195 | | -static bool fgraph_sleep_time = true; |
|---|
| 196 | | -static bool fgraph_graph_time = true; |
|---|
| 197 | | - |
|---|
| 198 | | -#else |
|---|
| 199 | | -static inline void update_function_graph_func(void) { } |
|---|
| 200 | | -#endif |
|---|
| 201 | | - |
|---|
| 202 | 165 | |
|---|
| 203 | 166 | static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) |
|---|
| 204 | 167 | { |
|---|
| .. | .. |
|---|
| 267 | 230 | /* |
|---|
| 268 | 231 | * For static tracing, we need to be a bit more careful. |
|---|
| 269 | 232 | * The function change takes affect immediately. Thus, |
|---|
| 270 | | - * we need to coorditate the setting of the function_trace_ops |
|---|
| 233 | + * we need to coordinate the setting of the function_trace_ops |
|---|
| 271 | 234 | * with the setting of the ftrace_trace_function. |
|---|
| 272 | 235 | * |
|---|
| 273 | 236 | * Set the function to the list ops, which will call the |
|---|
| .. | .. |
|---|
| 279 | 242 | * Make sure all CPUs see this. Yes this is slow, but static |
|---|
| 280 | 243 | * tracing is slow and nasty to have enabled. |
|---|
| 281 | 244 | */ |
|---|
| 282 | | - schedule_on_each_cpu(ftrace_sync); |
|---|
| 245 | + synchronize_rcu_tasks_rude(); |
|---|
| 283 | 246 | /* Now all cpus are using the list ops. */ |
|---|
| 284 | 247 | function_trace_op = set_function_trace_op; |
|---|
| 285 | 248 | /* Make sure the function_trace_op is visible on all CPUs */ |
|---|
| .. | .. |
|---|
| 336 | 299 | |
|---|
| 337 | 300 | static void ftrace_update_trampoline(struct ftrace_ops *ops); |
|---|
| 338 | 301 | |
|---|
| 339 | | -static int __register_ftrace_function(struct ftrace_ops *ops) |
|---|
| 302 | +int __register_ftrace_function(struct ftrace_ops *ops) |
|---|
| 340 | 303 | { |
|---|
| 341 | 304 | if (ops->flags & FTRACE_OPS_FL_DELETED) |
|---|
| 342 | 305 | return -EINVAL; |
|---|
| .. | .. |
|---|
| 357 | 320 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED) |
|---|
| 358 | 321 | ops->flags |= FTRACE_OPS_FL_SAVE_REGS; |
|---|
| 359 | 322 | #endif |
|---|
| 323 | + if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT)) |
|---|
| 324 | + return -EBUSY; |
|---|
| 360 | 325 | |
|---|
| 361 | 326 | if (!core_kernel_data((unsigned long)ops)) |
|---|
| 362 | 327 | ops->flags |= FTRACE_OPS_FL_DYNAMIC; |
|---|
| .. | .. |
|---|
| 377 | 342 | return 0; |
|---|
| 378 | 343 | } |
|---|
| 379 | 344 | |
|---|
| 380 | | -static int __unregister_ftrace_function(struct ftrace_ops *ops) |
|---|
| 345 | +int __unregister_ftrace_function(struct ftrace_ops *ops) |
|---|
| 381 | 346 | { |
|---|
| 382 | 347 | int ret; |
|---|
| 383 | 348 | |
|---|
| .. | .. |
|---|
| 494 | 459 | |
|---|
| 495 | 460 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
|---|
| 496 | 461 | /* function graph compares on total time */ |
|---|
| 497 | | -static int function_stat_cmp(void *p1, void *p2) |
|---|
| 462 | +static int function_stat_cmp(const void *p1, const void *p2) |
|---|
| 498 | 463 | { |
|---|
| 499 | | - struct ftrace_profile *a = p1; |
|---|
| 500 | | - struct ftrace_profile *b = p2; |
|---|
| 464 | + const struct ftrace_profile *a = p1; |
|---|
| 465 | + const struct ftrace_profile *b = p2; |
|---|
| 501 | 466 | |
|---|
| 502 | 467 | if (a->time < b->time) |
|---|
| 503 | 468 | return -1; |
|---|
| .. | .. |
|---|
| 508 | 473 | } |
|---|
| 509 | 474 | #else |
|---|
| 510 | 475 | /* not function graph compares against hits */ |
|---|
| 511 | | -static int function_stat_cmp(void *p1, void *p2) |
|---|
| 476 | +static int function_stat_cmp(const void *p1, const void *p2) |
|---|
| 512 | 477 | { |
|---|
| 513 | | - struct ftrace_profile *a = p1; |
|---|
| 514 | | - struct ftrace_profile *b = p2; |
|---|
| 478 | + const struct ftrace_profile *a = p1; |
|---|
| 479 | + const struct ftrace_profile *b = p2; |
|---|
| 515 | 480 | |
|---|
| 516 | 481 | if (a->counter < b->counter) |
|---|
| 517 | 482 | return -1; |
|---|
| .. | .. |
|---|
| 817 | 782 | } |
|---|
| 818 | 783 | |
|---|
| 819 | 784 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
|---|
| 785 | +static bool fgraph_graph_time = true; |
|---|
| 786 | + |
|---|
| 787 | +void ftrace_graph_graph_time_control(bool enable) |
|---|
| 788 | +{ |
|---|
| 789 | + fgraph_graph_time = enable; |
|---|
| 790 | +} |
|---|
| 791 | + |
|---|
| 820 | 792 | static int profile_graph_entry(struct ftrace_graph_ent *trace) |
|---|
| 821 | 793 | { |
|---|
| 822 | | - int index = current->curr_ret_stack; |
|---|
| 794 | + struct ftrace_ret_stack *ret_stack; |
|---|
| 823 | 795 | |
|---|
| 824 | 796 | function_profile_call(trace->func, 0, NULL, NULL); |
|---|
| 825 | 797 | |
|---|
| .. | .. |
|---|
| 827 | 799 | if (!current->ret_stack) |
|---|
| 828 | 800 | return 0; |
|---|
| 829 | 801 | |
|---|
| 830 | | - if (index >= 0 && index < FTRACE_RETFUNC_DEPTH) |
|---|
| 831 | | - current->ret_stack[index].subtime = 0; |
|---|
| 802 | + ret_stack = ftrace_graph_get_ret_stack(current, 0); |
|---|
| 803 | + if (ret_stack) |
|---|
| 804 | + ret_stack->subtime = 0; |
|---|
| 832 | 805 | |
|---|
| 833 | 806 | return 1; |
|---|
| 834 | 807 | } |
|---|
| 835 | 808 | |
|---|
| 836 | 809 | static void profile_graph_return(struct ftrace_graph_ret *trace) |
|---|
| 837 | 810 | { |
|---|
| 811 | + struct ftrace_ret_stack *ret_stack; |
|---|
| 838 | 812 | struct ftrace_profile_stat *stat; |
|---|
| 839 | 813 | unsigned long long calltime; |
|---|
| 840 | 814 | struct ftrace_profile *rec; |
|---|
| .. | .. |
|---|
| 852 | 826 | calltime = trace->rettime - trace->calltime; |
|---|
| 853 | 827 | |
|---|
| 854 | 828 | if (!fgraph_graph_time) { |
|---|
| 855 | | - int index; |
|---|
| 856 | | - |
|---|
| 857 | | - index = current->curr_ret_stack; |
|---|
| 858 | 829 | |
|---|
| 859 | 830 | /* Append this call time to the parent time to subtract */ |
|---|
| 860 | | - if (index) |
|---|
| 861 | | - current->ret_stack[index - 1].subtime += calltime; |
|---|
| 831 | + ret_stack = ftrace_graph_get_ret_stack(current, 1); |
|---|
| 832 | + if (ret_stack) |
|---|
| 833 | + ret_stack->subtime += calltime; |
|---|
| 862 | 834 | |
|---|
| 863 | | - if (current->ret_stack[index].subtime < calltime) |
|---|
| 864 | | - calltime -= current->ret_stack[index].subtime; |
|---|
| 835 | + ret_stack = ftrace_graph_get_ret_stack(current, 0); |
|---|
| 836 | + if (ret_stack && ret_stack->subtime < calltime) |
|---|
| 837 | + calltime -= ret_stack->subtime; |
|---|
| 865 | 838 | else |
|---|
| 866 | 839 | calltime = 0; |
|---|
| 867 | 840 | } |
|---|
| .. | .. |
|---|
| 876 | 849 | local_irq_restore(flags); |
|---|
| 877 | 850 | } |
|---|
| 878 | 851 | |
|---|
| 852 | +static struct fgraph_ops fprofiler_ops = { |
|---|
| 853 | + .entryfunc = &profile_graph_entry, |
|---|
| 854 | + .retfunc = &profile_graph_return, |
|---|
| 855 | +}; |
|---|
| 856 | + |
|---|
| 879 | 857 | static int register_ftrace_profiler(void) |
|---|
| 880 | 858 | { |
|---|
| 881 | | - return register_ftrace_graph(&profile_graph_return, |
|---|
| 882 | | - &profile_graph_entry); |
|---|
| 859 | + return register_ftrace_graph(&fprofiler_ops); |
|---|
| 883 | 860 | } |
|---|
| 884 | 861 | |
|---|
| 885 | 862 | static void unregister_ftrace_profiler(void) |
|---|
| 886 | 863 | { |
|---|
| 887 | | - unregister_ftrace_graph(); |
|---|
| 864 | + unregister_ftrace_graph(&fprofiler_ops); |
|---|
| 888 | 865 | } |
|---|
| 889 | 866 | #else |
|---|
| 890 | 867 | static struct ftrace_ops ftrace_profile_ops __read_mostly = { |
|---|
| .. | .. |
|---|
| 936 | 913 | ftrace_profile_enabled = 0; |
|---|
| 937 | 914 | /* |
|---|
| 938 | 915 | * unregister_ftrace_profiler calls stop_machine |
|---|
| 939 | | - * so this acts like an synchronize_sched. |
|---|
| 916 | + * so this acts like an synchronize_rcu. |
|---|
| 940 | 917 | */ |
|---|
| 941 | 918 | unregister_ftrace_profiler(); |
|---|
| 942 | 919 | } |
|---|
| .. | .. |
|---|
| 1023 | 1000 | } |
|---|
| 1024 | 1001 | #endif /* CONFIG_FUNCTION_PROFILER */ |
|---|
| 1025 | 1002 | |
|---|
| 1026 | | -#ifdef CONFIG_FUNCTION_GRAPH_TRACER |
|---|
| 1027 | | -static int ftrace_graph_active; |
|---|
| 1028 | | -#else |
|---|
| 1029 | | -# define ftrace_graph_active 0 |
|---|
| 1030 | | -#endif |
|---|
| 1031 | | - |
|---|
| 1032 | 1003 | #ifdef CONFIG_DYNAMIC_FTRACE |
|---|
| 1033 | 1004 | |
|---|
| 1034 | 1005 | static struct ftrace_ops *removed_ops; |
|---|
| .. | .. |
|---|
| 1042 | 1013 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD |
|---|
| 1043 | 1014 | # error Dynamic ftrace depends on MCOUNT_RECORD |
|---|
| 1044 | 1015 | #endif |
|---|
| 1045 | | - |
|---|
| 1046 | | -struct ftrace_func_entry { |
|---|
| 1047 | | - struct hlist_node hlist; |
|---|
| 1048 | | - unsigned long ip; |
|---|
| 1049 | | -}; |
|---|
| 1050 | 1016 | |
|---|
| 1051 | 1017 | struct ftrace_func_probe { |
|---|
| 1052 | 1018 | struct ftrace_probe_ops *probe_ops; |
|---|
| .. | .. |
|---|
| 1069 | 1035 | }; |
|---|
| 1070 | 1036 | #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) |
|---|
| 1071 | 1037 | |
|---|
| 1072 | | -static struct ftrace_ops global_ops = { |
|---|
| 1038 | +struct ftrace_ops global_ops = { |
|---|
| 1073 | 1039 | .func = ftrace_stub, |
|---|
| 1074 | 1040 | .local_hash.notrace_hash = EMPTY_HASH, |
|---|
| 1075 | 1041 | .local_hash.filter_hash = EMPTY_HASH, |
|---|
| .. | .. |
|---|
| 1088 | 1054 | |
|---|
| 1089 | 1055 | /* |
|---|
| 1090 | 1056 | * Some of the ops may be dynamically allocated, |
|---|
| 1091 | | - * they are freed after a synchronize_sched(). |
|---|
| 1057 | + * they are freed after a synchronize_rcu(). |
|---|
| 1092 | 1058 | */ |
|---|
| 1093 | 1059 | preempt_disable_notrace(); |
|---|
| 1094 | 1060 | |
|---|
| .. | .. |
|---|
| 1130 | 1096 | |
|---|
| 1131 | 1097 | #define ENTRY_SIZE sizeof(struct dyn_ftrace) |
|---|
| 1132 | 1098 | #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) |
|---|
| 1133 | | - |
|---|
| 1134 | | -/* estimate from running different kernels */ |
|---|
| 1135 | | -#define NR_TO_INIT 10000 |
|---|
| 1136 | 1099 | |
|---|
| 1137 | 1100 | static struct ftrace_page *ftrace_pages_start; |
|---|
| 1138 | 1101 | static struct ftrace_page *ftrace_pages; |
|---|
| .. | .. |
|---|
| 1288 | 1251 | { |
|---|
| 1289 | 1252 | if (!hash || hash == EMPTY_HASH) |
|---|
| 1290 | 1253 | return; |
|---|
| 1291 | | - call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu); |
|---|
| 1254 | + call_rcu(&hash->rcu, __free_ftrace_hash_rcu); |
|---|
| 1292 | 1255 | } |
|---|
| 1293 | 1256 | |
|---|
| 1294 | 1257 | void ftrace_free_filter(struct ftrace_ops *ops) |
|---|
| .. | .. |
|---|
| 1332 | 1295 | if (!ftrace_mod) |
|---|
| 1333 | 1296 | return -ENOMEM; |
|---|
| 1334 | 1297 | |
|---|
| 1298 | + INIT_LIST_HEAD(&ftrace_mod->list); |
|---|
| 1335 | 1299 | ftrace_mod->func = kstrdup(func, GFP_KERNEL); |
|---|
| 1336 | 1300 | ftrace_mod->module = kstrdup(module, GFP_KERNEL); |
|---|
| 1337 | 1301 | ftrace_mod->enable = enable; |
|---|
| .. | .. |
|---|
| 1395 | 1359 | static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, |
|---|
| 1396 | 1360 | struct ftrace_hash *new_hash); |
|---|
| 1397 | 1361 | |
|---|
| 1398 | | -static struct ftrace_hash * |
|---|
| 1399 | | -__ftrace_hash_move(struct ftrace_hash *src) |
|---|
| 1362 | +static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size) |
|---|
| 1400 | 1363 | { |
|---|
| 1401 | 1364 | struct ftrace_func_entry *entry; |
|---|
| 1402 | | - struct hlist_node *tn; |
|---|
| 1403 | | - struct hlist_head *hhd; |
|---|
| 1404 | 1365 | struct ftrace_hash *new_hash; |
|---|
| 1405 | | - int size = src->count; |
|---|
| 1366 | + struct hlist_head *hhd; |
|---|
| 1367 | + struct hlist_node *tn; |
|---|
| 1406 | 1368 | int bits = 0; |
|---|
| 1407 | 1369 | int i; |
|---|
| 1408 | 1370 | |
|---|
| 1409 | 1371 | /* |
|---|
| 1410 | | - * If the new source is empty, just return the empty_hash. |
|---|
| 1372 | + * Use around half the size (max bit of it), but |
|---|
| 1373 | + * a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits). |
|---|
| 1411 | 1374 | */ |
|---|
| 1412 | | - if (ftrace_hash_empty(src)) |
|---|
| 1413 | | - return EMPTY_HASH; |
|---|
| 1414 | | - |
|---|
| 1415 | | - /* |
|---|
| 1416 | | - * Make the hash size about 1/2 the # found |
|---|
| 1417 | | - */ |
|---|
| 1418 | | - for (size /= 2; size; size >>= 1) |
|---|
| 1419 | | - bits++; |
|---|
| 1375 | + bits = fls(size / 2); |
|---|
| 1420 | 1376 | |
|---|
| 1421 | 1377 | /* Don't allocate too much */ |
|---|
| 1422 | 1378 | if (bits > FTRACE_HASH_MAX_BITS) |
|---|
| .. | .. |
|---|
| 1436 | 1392 | __add_hash_entry(new_hash, entry); |
|---|
| 1437 | 1393 | } |
|---|
| 1438 | 1394 | } |
|---|
| 1439 | | - |
|---|
| 1440 | 1395 | return new_hash; |
|---|
| 1396 | +} |
|---|
| 1397 | + |
|---|
| 1398 | +static struct ftrace_hash * |
|---|
| 1399 | +__ftrace_hash_move(struct ftrace_hash *src) |
|---|
| 1400 | +{ |
|---|
| 1401 | + int size = src->count; |
|---|
| 1402 | + |
|---|
| 1403 | + /* |
|---|
| 1404 | + * If the new source is empty, just return the empty_hash. |
|---|
| 1405 | + */ |
|---|
| 1406 | + if (ftrace_hash_empty(src)) |
|---|
| 1407 | + return EMPTY_HASH; |
|---|
| 1408 | + |
|---|
| 1409 | + return dup_hash(src, size); |
|---|
| 1441 | 1410 | } |
|---|
| 1442 | 1411 | |
|---|
| 1443 | 1412 | static int |
|---|
| .. | .. |
|---|
| 1483 | 1452 | { |
|---|
| 1484 | 1453 | /* |
|---|
| 1485 | 1454 | * The function record is a match if it exists in the filter |
|---|
| 1486 | | - * hash and not in the notrace hash. Note, an emty hash is |
|---|
| 1455 | + * hash and not in the notrace hash. Note, an empty hash is |
|---|
| 1487 | 1456 | * considered a match for the filter hash, but an empty |
|---|
| 1488 | 1457 | * notrace hash is considered not in the notrace hash. |
|---|
| 1489 | 1458 | */ |
|---|
| .. | .. |
|---|
| 1503 | 1472 | * the ip is not in the ops->notrace_hash. |
|---|
| 1504 | 1473 | * |
|---|
| 1505 | 1474 | * This needs to be called with preemption disabled as |
|---|
| 1506 | | - * the hashes are freed with call_rcu_sched(). |
|---|
| 1475 | + * the hashes are freed with call_rcu(). |
|---|
| 1507 | 1476 | */ |
|---|
| 1508 | | -static int |
|---|
| 1477 | +int |
|---|
| 1509 | 1478 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) |
|---|
| 1510 | 1479 | { |
|---|
| 1511 | 1480 | struct ftrace_ops_hash hash; |
|---|
| .. | .. |
|---|
| 1559 | 1528 | return 0; |
|---|
| 1560 | 1529 | } |
|---|
| 1561 | 1530 | |
|---|
| 1531 | +static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end) |
|---|
| 1532 | +{ |
|---|
| 1533 | + struct ftrace_page *pg; |
|---|
| 1534 | + struct dyn_ftrace *rec = NULL; |
|---|
| 1535 | + struct dyn_ftrace key; |
|---|
| 1536 | + |
|---|
| 1537 | + key.ip = start; |
|---|
| 1538 | + key.flags = end; /* overload flags, as it is unsigned long */ |
|---|
| 1539 | + |
|---|
| 1540 | + for (pg = ftrace_pages_start; pg; pg = pg->next) { |
|---|
| 1541 | + if (end < pg->records[0].ip || |
|---|
| 1542 | + start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) |
|---|
| 1543 | + continue; |
|---|
| 1544 | + rec = bsearch(&key, pg->records, pg->index, |
|---|
| 1545 | + sizeof(struct dyn_ftrace), |
|---|
| 1546 | + ftrace_cmp_recs); |
|---|
| 1547 | + if (rec) |
|---|
| 1548 | + break; |
|---|
| 1549 | + } |
|---|
| 1550 | + return rec; |
|---|
| 1551 | +} |
|---|
| 1552 | + |
|---|
| 1562 | 1553 | /** |
|---|
| 1563 | 1554 | * ftrace_location_range - return the first address of a traced location |
|---|
| 1564 | 1555 | * if it touches the given ip range |
|---|
| .. | .. |
|---|
| 1573 | 1564 | */ |
|---|
| 1574 | 1565 | unsigned long ftrace_location_range(unsigned long start, unsigned long end) |
|---|
| 1575 | 1566 | { |
|---|
| 1576 | | - struct ftrace_page *pg; |
|---|
| 1577 | 1567 | struct dyn_ftrace *rec; |
|---|
| 1578 | | - struct dyn_ftrace key; |
|---|
| 1579 | 1568 | |
|---|
| 1580 | | - key.ip = start; |
|---|
| 1581 | | - key.flags = end; /* overload flags, as it is unsigned long */ |
|---|
| 1582 | | - |
|---|
| 1583 | | - for (pg = ftrace_pages_start; pg; pg = pg->next) { |
|---|
| 1584 | | - if (end < pg->records[0].ip || |
|---|
| 1585 | | - start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) |
|---|
| 1586 | | - continue; |
|---|
| 1587 | | - rec = bsearch(&key, pg->records, pg->index, |
|---|
| 1588 | | - sizeof(struct dyn_ftrace), |
|---|
| 1589 | | - ftrace_cmp_recs); |
|---|
| 1590 | | - if (rec) |
|---|
| 1591 | | - return rec->ip; |
|---|
| 1592 | | - } |
|---|
| 1569 | + rec = lookup_rec(start, end); |
|---|
| 1570 | + if (rec) |
|---|
| 1571 | + return rec->ip; |
|---|
| 1593 | 1572 | |
|---|
| 1594 | 1573 | return 0; |
|---|
| 1595 | 1574 | } |
|---|
| .. | .. |
|---|
| 1742 | 1721 | if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX)) |
|---|
| 1743 | 1722 | return false; |
|---|
| 1744 | 1723 | |
|---|
| 1724 | + if (ops->flags & FTRACE_OPS_FL_DIRECT) |
|---|
| 1725 | + rec->flags |= FTRACE_FL_DIRECT; |
|---|
| 1726 | + |
|---|
| 1745 | 1727 | /* |
|---|
| 1746 | 1728 | * If there's only a single callback registered to a |
|---|
| 1747 | 1729 | * function, and the ops has a trampoline registered |
|---|
| .. | .. |
|---|
| 1768 | 1750 | if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0)) |
|---|
| 1769 | 1751 | return false; |
|---|
| 1770 | 1752 | rec->flags--; |
|---|
| 1753 | + |
|---|
| 1754 | + /* |
|---|
| 1755 | + * Only the internal direct_ops should have the |
|---|
| 1756 | + * DIRECT flag set. Thus, if it is removing a |
|---|
| 1757 | + * function, then that function should no longer |
|---|
| 1758 | + * be direct. |
|---|
| 1759 | + */ |
|---|
| 1760 | + if (ops->flags & FTRACE_OPS_FL_DIRECT) |
|---|
| 1761 | + rec->flags &= ~FTRACE_FL_DIRECT; |
|---|
| 1771 | 1762 | |
|---|
| 1772 | 1763 | /* |
|---|
| 1773 | 1764 | * If the rec had REGS enabled and the ops that is |
|---|
| .. | .. |
|---|
| 1803 | 1794 | count++; |
|---|
| 1804 | 1795 | |
|---|
| 1805 | 1796 | /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */ |
|---|
| 1806 | | - update |= ftrace_test_record(rec, 1) != FTRACE_UPDATE_IGNORE; |
|---|
| 1797 | + update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE; |
|---|
| 1807 | 1798 | |
|---|
| 1808 | 1799 | /* Shortcut, if we handled all records, we are done. */ |
|---|
| 1809 | 1800 | if (!all && count == hash->count) |
|---|
| .. | .. |
|---|
| 1981 | 1972 | char ins[MCOUNT_INSN_SIZE]; |
|---|
| 1982 | 1973 | int i; |
|---|
| 1983 | 1974 | |
|---|
| 1984 | | - if (probe_kernel_read(ins, p, MCOUNT_INSN_SIZE)) { |
|---|
| 1975 | + if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) { |
|---|
| 1985 | 1976 | printk(KERN_CONT "%s[FAULT] %px\n", fmt, p); |
|---|
| 1986 | 1977 | return; |
|---|
| 1987 | 1978 | } |
|---|
| .. | .. |
|---|
| 2025 | 2016 | * modifying the code. @failed should be one of either: |
|---|
| 2026 | 2017 | * EFAULT - if the problem happens on reading the @ip address |
|---|
| 2027 | 2018 | * EINVAL - if what is read at @ip is not what was expected |
|---|
| 2028 | | - * EPERM - if the problem happens on writting to the @ip address |
|---|
| 2019 | + * EPERM - if the problem happens on writing to the @ip address |
|---|
| 2029 | 2020 | */ |
|---|
| 2030 | 2021 | void ftrace_bug(int failed, struct dyn_ftrace *rec) |
|---|
| 2031 | 2022 | { |
|---|
| 2032 | 2023 | unsigned long ip = rec ? rec->ip : 0; |
|---|
| 2033 | 2024 | |
|---|
| 2025 | + pr_info("------------[ ftrace bug ]------------\n"); |
|---|
| 2026 | + |
|---|
| 2034 | 2027 | switch (failed) { |
|---|
| 2035 | 2028 | case -EFAULT: |
|---|
| 2036 | | - FTRACE_WARN_ON_ONCE(1); |
|---|
| 2037 | 2029 | pr_info("ftrace faulted on modifying "); |
|---|
| 2038 | | - print_ip_sym(ip); |
|---|
| 2030 | + print_ip_sym(KERN_INFO, ip); |
|---|
| 2039 | 2031 | break; |
|---|
| 2040 | 2032 | case -EINVAL: |
|---|
| 2041 | | - FTRACE_WARN_ON_ONCE(1); |
|---|
| 2042 | 2033 | pr_info("ftrace failed to modify "); |
|---|
| 2043 | | - print_ip_sym(ip); |
|---|
| 2034 | + print_ip_sym(KERN_INFO, ip); |
|---|
| 2044 | 2035 | print_ip_ins(" actual: ", (unsigned char *)ip); |
|---|
| 2045 | 2036 | pr_cont("\n"); |
|---|
| 2046 | 2037 | if (ftrace_expected) { |
|---|
| .. | .. |
|---|
| 2049 | 2040 | } |
|---|
| 2050 | 2041 | break; |
|---|
| 2051 | 2042 | case -EPERM: |
|---|
| 2052 | | - FTRACE_WARN_ON_ONCE(1); |
|---|
| 2053 | 2043 | pr_info("ftrace faulted on writing "); |
|---|
| 2054 | | - print_ip_sym(ip); |
|---|
| 2044 | + print_ip_sym(KERN_INFO, ip); |
|---|
| 2055 | 2045 | break; |
|---|
| 2056 | 2046 | default: |
|---|
| 2057 | | - FTRACE_WARN_ON_ONCE(1); |
|---|
| 2058 | 2047 | pr_info("ftrace faulted on unknown error "); |
|---|
| 2059 | | - print_ip_sym(ip); |
|---|
| 2048 | + print_ip_sym(KERN_INFO, ip); |
|---|
| 2060 | 2049 | } |
|---|
| 2061 | 2050 | print_bug_type(); |
|---|
| 2062 | 2051 | if (rec) { |
|---|
| .. | .. |
|---|
| 2081 | 2070 | ip = ftrace_get_addr_curr(rec); |
|---|
| 2082 | 2071 | pr_cont("\n expected tramp: %lx\n", ip); |
|---|
| 2083 | 2072 | } |
|---|
| 2073 | + |
|---|
| 2074 | + FTRACE_WARN_ON_ONCE(1); |
|---|
| 2084 | 2075 | } |
|---|
| 2085 | 2076 | |
|---|
| 2086 | | -static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) |
|---|
| 2077 | +static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update) |
|---|
| 2087 | 2078 | { |
|---|
| 2088 | 2079 | unsigned long flag = 0UL; |
|---|
| 2089 | 2080 | |
|---|
| .. | .. |
|---|
| 2110 | 2101 | * If enabling and the REGS flag does not match the REGS_EN, or |
|---|
| 2111 | 2102 | * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore |
|---|
| 2112 | 2103 | * this record. Set flags to fail the compare against ENABLED. |
|---|
| 2104 | + * Same for direct calls. |
|---|
| 2113 | 2105 | */ |
|---|
| 2114 | 2106 | if (flag) { |
|---|
| 2115 | | - if (!(rec->flags & FTRACE_FL_REGS) != |
|---|
| 2107 | + if (!(rec->flags & FTRACE_FL_REGS) != |
|---|
| 2116 | 2108 | !(rec->flags & FTRACE_FL_REGS_EN)) |
|---|
| 2117 | 2109 | flag |= FTRACE_FL_REGS; |
|---|
| 2118 | 2110 | |
|---|
| 2119 | | - if (!(rec->flags & FTRACE_FL_TRAMP) != |
|---|
| 2111 | + if (!(rec->flags & FTRACE_FL_TRAMP) != |
|---|
| 2120 | 2112 | !(rec->flags & FTRACE_FL_TRAMP_EN)) |
|---|
| 2121 | 2113 | flag |= FTRACE_FL_TRAMP; |
|---|
| 2114 | + |
|---|
| 2115 | + /* |
|---|
| 2116 | + * Direct calls are special, as count matters. |
|---|
| 2117 | + * We must test the record for direct, if the |
|---|
| 2118 | + * DIRECT and DIRECT_EN do not match, but only |
|---|
| 2119 | + * if the count is 1. That's because, if the |
|---|
| 2120 | + * count is something other than one, we do not |
|---|
| 2121 | + * want the direct enabled (it will be done via the |
|---|
| 2122 | + * direct helper). But if DIRECT_EN is set, and |
|---|
| 2123 | + * the count is not one, we need to clear it. |
|---|
| 2124 | + */ |
|---|
| 2125 | + if (ftrace_rec_count(rec) == 1) { |
|---|
| 2126 | + if (!(rec->flags & FTRACE_FL_DIRECT) != |
|---|
| 2127 | + !(rec->flags & FTRACE_FL_DIRECT_EN)) |
|---|
| 2128 | + flag |= FTRACE_FL_DIRECT; |
|---|
| 2129 | + } else if (rec->flags & FTRACE_FL_DIRECT_EN) { |
|---|
| 2130 | + flag |= FTRACE_FL_DIRECT; |
|---|
| 2131 | + } |
|---|
| 2122 | 2132 | } |
|---|
| 2123 | 2133 | |
|---|
| 2124 | 2134 | /* If the state of this record hasn't changed, then do nothing */ |
|---|
| .. | .. |
|---|
| 2142 | 2152 | rec->flags |= FTRACE_FL_TRAMP_EN; |
|---|
| 2143 | 2153 | else |
|---|
| 2144 | 2154 | rec->flags &= ~FTRACE_FL_TRAMP_EN; |
|---|
| 2155 | + } |
|---|
| 2156 | + if (flag & FTRACE_FL_DIRECT) { |
|---|
| 2157 | + /* |
|---|
| 2158 | + * If there's only one user (direct_ops helper) |
|---|
| 2159 | + * then we can call the direct function |
|---|
| 2160 | + * directly (no ftrace trampoline). |
|---|
| 2161 | + */ |
|---|
| 2162 | + if (ftrace_rec_count(rec) == 1) { |
|---|
| 2163 | + if (rec->flags & FTRACE_FL_DIRECT) |
|---|
| 2164 | + rec->flags |= FTRACE_FL_DIRECT_EN; |
|---|
| 2165 | + else |
|---|
| 2166 | + rec->flags &= ~FTRACE_FL_DIRECT_EN; |
|---|
| 2167 | + } else { |
|---|
| 2168 | + /* |
|---|
| 2169 | + * Can only call directly if there's |
|---|
| 2170 | + * only one callback to the function. |
|---|
| 2171 | + */ |
|---|
| 2172 | + rec->flags &= ~FTRACE_FL_DIRECT_EN; |
|---|
| 2173 | + } |
|---|
| 2145 | 2174 | } |
|---|
| 2146 | 2175 | } |
|---|
| 2147 | 2176 | |
|---|
| .. | .. |
|---|
| 2172 | 2201 | * and REGS states. The _EN flags must be disabled though. |
|---|
| 2173 | 2202 | */ |
|---|
| 2174 | 2203 | rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | |
|---|
| 2175 | | - FTRACE_FL_REGS_EN); |
|---|
| 2204 | + FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN); |
|---|
| 2176 | 2205 | } |
|---|
| 2177 | 2206 | |
|---|
| 2178 | 2207 | ftrace_bug_type = FTRACE_BUG_NOP; |
|---|
| .. | .. |
|---|
| 2182 | 2211 | /** |
|---|
| 2183 | 2212 | * ftrace_update_record, set a record that now is tracing or not |
|---|
| 2184 | 2213 | * @rec: the record to update |
|---|
| 2185 | | - * @enable: set to 1 if the record is tracing, zero to force disable |
|---|
| 2214 | + * @enable: set to true if the record is tracing, false to force disable |
|---|
| 2186 | 2215 | * |
|---|
| 2187 | 2216 | * The records that represent all functions that can be traced need |
|---|
| 2188 | 2217 | * to be updated when tracing has been enabled. |
|---|
| 2189 | 2218 | */ |
|---|
| 2190 | | -int ftrace_update_record(struct dyn_ftrace *rec, int enable) |
|---|
| 2219 | +int ftrace_update_record(struct dyn_ftrace *rec, bool enable) |
|---|
| 2191 | 2220 | { |
|---|
| 2192 | | - return ftrace_check_record(rec, enable, 1); |
|---|
| 2221 | + return ftrace_check_record(rec, enable, true); |
|---|
| 2193 | 2222 | } |
|---|
| 2194 | 2223 | |
|---|
| 2195 | 2224 | /** |
|---|
| 2196 | 2225 | * ftrace_test_record, check if the record has been enabled or not |
|---|
| 2197 | 2226 | * @rec: the record to test |
|---|
| 2198 | | - * @enable: set to 1 to check if enabled, 0 if it is disabled |
|---|
| 2227 | + * @enable: set to true to check if enabled, false if it is disabled |
|---|
| 2199 | 2228 | * |
|---|
| 2200 | 2229 | * The arch code may need to test if a record is already set to |
|---|
| 2201 | 2230 | * tracing to determine how to modify the function code that it |
|---|
| 2202 | 2231 | * represents. |
|---|
| 2203 | 2232 | */ |
|---|
| 2204 | | -int ftrace_test_record(struct dyn_ftrace *rec, int enable) |
|---|
| 2233 | +int ftrace_test_record(struct dyn_ftrace *rec, bool enable) |
|---|
| 2205 | 2234 | { |
|---|
| 2206 | | - return ftrace_check_record(rec, enable, 0); |
|---|
| 2235 | + return ftrace_check_record(rec, enable, false); |
|---|
| 2207 | 2236 | } |
|---|
| 2208 | 2237 | |
|---|
| 2209 | 2238 | static struct ftrace_ops * |
|---|
| .. | .. |
|---|
| 2255 | 2284 | |
|---|
| 2256 | 2285 | if (hash_contains_ip(ip, op->func_hash)) |
|---|
| 2257 | 2286 | return op; |
|---|
| 2258 | | - } |
|---|
| 2287 | + } |
|---|
| 2259 | 2288 | |
|---|
| 2260 | 2289 | return NULL; |
|---|
| 2261 | 2290 | } |
|---|
| .. | .. |
|---|
| 2345 | 2374 | return NULL; |
|---|
| 2346 | 2375 | } |
|---|
| 2347 | 2376 | |
|---|
| 2377 | +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
|---|
| 2378 | +/* Protected by rcu_tasks for reading, and direct_mutex for writing */ |
|---|
| 2379 | +static struct ftrace_hash *direct_functions = EMPTY_HASH; |
|---|
| 2380 | +static DEFINE_MUTEX(direct_mutex); |
|---|
| 2381 | +int ftrace_direct_func_count; |
|---|
| 2382 | + |
|---|
| 2383 | +/* |
|---|
| 2384 | + * Search the direct_functions hash to see if the given instruction pointer |
|---|
| 2385 | + * has a direct caller attached to it. |
|---|
| 2386 | + */ |
|---|
| 2387 | +unsigned long ftrace_find_rec_direct(unsigned long ip) |
|---|
| 2388 | +{ |
|---|
| 2389 | + struct ftrace_func_entry *entry; |
|---|
| 2390 | + |
|---|
| 2391 | + entry = __ftrace_lookup_ip(direct_functions, ip); |
|---|
| 2392 | + if (!entry) |
|---|
| 2393 | + return 0; |
|---|
| 2394 | + |
|---|
| 2395 | + return entry->direct; |
|---|
| 2396 | +} |
|---|
| 2397 | + |
|---|
| 2398 | +static void call_direct_funcs(unsigned long ip, unsigned long pip, |
|---|
| 2399 | + struct ftrace_ops *ops, struct pt_regs *regs) |
|---|
| 2400 | +{ |
|---|
| 2401 | + unsigned long addr; |
|---|
| 2402 | + |
|---|
| 2403 | + addr = ftrace_find_rec_direct(ip); |
|---|
| 2404 | + if (!addr) |
|---|
| 2405 | + return; |
|---|
| 2406 | + |
|---|
| 2407 | + arch_ftrace_set_direct_caller(regs, addr); |
|---|
| 2408 | +} |
|---|
| 2409 | + |
|---|
| 2410 | +struct ftrace_ops direct_ops = { |
|---|
| 2411 | + .func = call_direct_funcs, |
|---|
| 2412 | + .flags = FTRACE_OPS_FL_IPMODIFY | FTRACE_OPS_FL_RECURSION_SAFE |
|---|
| 2413 | + | FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS |
|---|
| 2414 | + | FTRACE_OPS_FL_PERMANENT, |
|---|
| 2415 | + /* |
|---|
| 2416 | + * By declaring the main trampoline as this trampoline |
|---|
| 2417 | + * it will never have one allocated for it. Allocated |
|---|
| 2418 | + * trampolines should not call direct functions. |
|---|
| 2419 | + * The direct_ops should only be called by the builtin |
|---|
| 2420 | + * ftrace_regs_caller trampoline. |
|---|
| 2421 | + */ |
|---|
| 2422 | + .trampoline = FTRACE_REGS_ADDR, |
|---|
| 2423 | +}; |
|---|
| 2424 | +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ |
|---|
| 2425 | + |
|---|
| 2348 | 2426 | /** |
|---|
| 2349 | 2427 | * ftrace_get_addr_new - Get the call address to set to |
|---|
| 2350 | 2428 | * @rec: The ftrace record descriptor |
|---|
| 2351 | 2429 | * |
|---|
| 2352 | 2430 | * If the record has the FTRACE_FL_REGS set, that means that it |
|---|
| 2353 | 2431 | * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS |
|---|
| 2354 | | - * is not not set, then it wants to convert to the normal callback. |
|---|
| 2432 | + * is not set, then it wants to convert to the normal callback. |
|---|
| 2355 | 2433 | * |
|---|
| 2356 | 2434 | * Returns the address of the trampoline to set to |
|---|
| 2357 | 2435 | */ |
|---|
| 2358 | 2436 | unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec) |
|---|
| 2359 | 2437 | { |
|---|
| 2360 | 2438 | struct ftrace_ops *ops; |
|---|
| 2439 | + unsigned long addr; |
|---|
| 2440 | + |
|---|
| 2441 | + if ((rec->flags & FTRACE_FL_DIRECT) && |
|---|
| 2442 | + (ftrace_rec_count(rec) == 1)) { |
|---|
| 2443 | + addr = ftrace_find_rec_direct(rec->ip); |
|---|
| 2444 | + if (addr) |
|---|
| 2445 | + return addr; |
|---|
| 2446 | + WARN_ON_ONCE(1); |
|---|
| 2447 | + } |
|---|
| 2361 | 2448 | |
|---|
| 2362 | 2449 | /* Trampolines take precedence over regs */ |
|---|
| 2363 | 2450 | if (rec->flags & FTRACE_FL_TRAMP) { |
|---|
| .. | .. |
|---|
| 2390 | 2477 | unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec) |
|---|
| 2391 | 2478 | { |
|---|
| 2392 | 2479 | struct ftrace_ops *ops; |
|---|
| 2480 | + unsigned long addr; |
|---|
| 2481 | + |
|---|
| 2482 | + /* Direct calls take precedence over trampolines */ |
|---|
| 2483 | + if (rec->flags & FTRACE_FL_DIRECT_EN) { |
|---|
| 2484 | + addr = ftrace_find_rec_direct(rec->ip); |
|---|
| 2485 | + if (addr) |
|---|
| 2486 | + return addr; |
|---|
| 2487 | + WARN_ON_ONCE(1); |
|---|
| 2488 | + } |
|---|
| 2393 | 2489 | |
|---|
| 2394 | 2490 | /* Trampolines take precedence over regs */ |
|---|
| 2395 | 2491 | if (rec->flags & FTRACE_FL_TRAMP_EN) { |
|---|
| .. | .. |
|---|
| 2410 | 2506 | } |
|---|
| 2411 | 2507 | |
|---|
| 2412 | 2508 | static int |
|---|
| 2413 | | -__ftrace_replace_code(struct dyn_ftrace *rec, int enable) |
|---|
| 2509 | +__ftrace_replace_code(struct dyn_ftrace *rec, bool enable) |
|---|
| 2414 | 2510 | { |
|---|
| 2415 | 2511 | unsigned long ftrace_old_addr; |
|---|
| 2416 | 2512 | unsigned long ftrace_addr; |
|---|
| .. | .. |
|---|
| 2442 | 2538 | return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); |
|---|
| 2443 | 2539 | } |
|---|
| 2444 | 2540 | |
|---|
| 2445 | | - return -1; /* unknow ftrace bug */ |
|---|
| 2541 | + return -1; /* unknown ftrace bug */ |
|---|
| 2446 | 2542 | } |
|---|
| 2447 | 2543 | |
|---|
| 2448 | | -void __weak ftrace_replace_code(int enable) |
|---|
| 2544 | +void __weak ftrace_replace_code(int mod_flags) |
|---|
| 2449 | 2545 | { |
|---|
| 2450 | 2546 | struct dyn_ftrace *rec; |
|---|
| 2451 | 2547 | struct ftrace_page *pg; |
|---|
| 2548 | + bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL; |
|---|
| 2549 | + int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL; |
|---|
| 2452 | 2550 | int failed; |
|---|
| 2453 | 2551 | |
|---|
| 2454 | 2552 | if (unlikely(ftrace_disabled)) |
|---|
| .. | .. |
|---|
| 2465 | 2563 | /* Stop processing */ |
|---|
| 2466 | 2564 | return; |
|---|
| 2467 | 2565 | } |
|---|
| 2566 | + if (schedulable) |
|---|
| 2567 | + cond_resched(); |
|---|
| 2468 | 2568 | } while_for_each_ftrace_rec(); |
|---|
| 2469 | 2569 | } |
|---|
| 2470 | 2570 | |
|---|
| .. | .. |
|---|
| 2541 | 2641 | } |
|---|
| 2542 | 2642 | |
|---|
| 2543 | 2643 | static int |
|---|
| 2544 | | -ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) |
|---|
| 2644 | +ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec) |
|---|
| 2545 | 2645 | { |
|---|
| 2546 | 2646 | int ret; |
|---|
| 2547 | 2647 | |
|---|
| 2548 | 2648 | if (unlikely(ftrace_disabled)) |
|---|
| 2549 | 2649 | return 0; |
|---|
| 2550 | 2650 | |
|---|
| 2551 | | - ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); |
|---|
| 2651 | + ret = ftrace_init_nop(mod, rec); |
|---|
| 2552 | 2652 | if (ret) { |
|---|
| 2553 | 2653 | ftrace_bug_type = FTRACE_BUG_INIT; |
|---|
| 2554 | 2654 | ftrace_bug(ret, rec); |
|---|
| .. | .. |
|---|
| 2578 | 2678 | void ftrace_modify_all_code(int command) |
|---|
| 2579 | 2679 | { |
|---|
| 2580 | 2680 | int update = command & FTRACE_UPDATE_TRACE_FUNC; |
|---|
| 2681 | + int mod_flags = 0; |
|---|
| 2581 | 2682 | int err = 0; |
|---|
| 2683 | + |
|---|
| 2684 | + if (command & FTRACE_MAY_SLEEP) |
|---|
| 2685 | + mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL; |
|---|
| 2582 | 2686 | |
|---|
| 2583 | 2687 | /* |
|---|
| 2584 | 2688 | * If the ftrace_caller calls a ftrace_ops func directly, |
|---|
| .. | .. |
|---|
| 2597 | 2701 | } |
|---|
| 2598 | 2702 | |
|---|
| 2599 | 2703 | if (command & FTRACE_UPDATE_CALLS) |
|---|
| 2600 | | - ftrace_replace_code(1); |
|---|
| 2704 | + ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL); |
|---|
| 2601 | 2705 | else if (command & FTRACE_DISABLE_CALLS) |
|---|
| 2602 | | - ftrace_replace_code(0); |
|---|
| 2706 | + ftrace_replace_code(mod_flags); |
|---|
| 2603 | 2707 | |
|---|
| 2604 | 2708 | if (update && ftrace_trace_function != ftrace_ops_list_func) { |
|---|
| 2605 | 2709 | function_trace_op = set_function_trace_op; |
|---|
| .. | .. |
|---|
| 2692 | 2796 | { |
|---|
| 2693 | 2797 | } |
|---|
| 2694 | 2798 | |
|---|
| 2799 | +/* List of trace_ops that have allocated trampolines */ |
|---|
| 2800 | +static LIST_HEAD(ftrace_ops_trampoline_list); |
|---|
| 2801 | + |
|---|
| 2802 | +static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops) |
|---|
| 2803 | +{ |
|---|
| 2804 | + lockdep_assert_held(&ftrace_lock); |
|---|
| 2805 | + list_add_rcu(&ops->list, &ftrace_ops_trampoline_list); |
|---|
| 2806 | +} |
|---|
| 2807 | + |
|---|
| 2808 | +static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops) |
|---|
| 2809 | +{ |
|---|
| 2810 | + lockdep_assert_held(&ftrace_lock); |
|---|
| 2811 | + list_del_rcu(&ops->list); |
|---|
| 2812 | + synchronize_rcu(); |
|---|
| 2813 | +} |
|---|
| 2814 | + |
|---|
| 2815 | +/* |
|---|
| 2816 | + * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols |
|---|
| 2817 | + * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is |
|---|
| 2818 | + * not a module. |
|---|
| 2819 | + */ |
|---|
| 2820 | +#define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace" |
|---|
| 2821 | +#define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline" |
|---|
| 2822 | + |
|---|
| 2823 | +static void ftrace_trampoline_free(struct ftrace_ops *ops) |
|---|
| 2824 | +{ |
|---|
| 2825 | + if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) && |
|---|
| 2826 | + ops->trampoline) { |
|---|
| 2827 | + /* |
|---|
| 2828 | + * Record the text poke event before the ksymbol unregister |
|---|
| 2829 | + * event. |
|---|
| 2830 | + */ |
|---|
| 2831 | + perf_event_text_poke((void *)ops->trampoline, |
|---|
| 2832 | + (void *)ops->trampoline, |
|---|
| 2833 | + ops->trampoline_size, NULL, 0); |
|---|
| 2834 | + perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, |
|---|
| 2835 | + ops->trampoline, ops->trampoline_size, |
|---|
| 2836 | + true, FTRACE_TRAMPOLINE_SYM); |
|---|
| 2837 | + /* Remove from kallsyms after the perf events */ |
|---|
| 2838 | + ftrace_remove_trampoline_from_kallsyms(ops); |
|---|
| 2839 | + } |
|---|
| 2840 | + |
|---|
| 2841 | + arch_ftrace_trampoline_free(ops); |
|---|
| 2842 | +} |
|---|
| 2843 | + |
|---|
| 2695 | 2844 | static void ftrace_startup_enable(int command) |
|---|
| 2696 | 2845 | { |
|---|
| 2697 | 2846 | if (saved_ftrace_func != ftrace_trace_function) { |
|---|
| .. | .. |
|---|
| 2712 | 2861 | update_all_ops = false; |
|---|
| 2713 | 2862 | } |
|---|
| 2714 | 2863 | |
|---|
| 2715 | | -static int ftrace_startup(struct ftrace_ops *ops, int command) |
|---|
| 2864 | +int ftrace_startup(struct ftrace_ops *ops, int command) |
|---|
| 2716 | 2865 | { |
|---|
| 2717 | 2866 | int ret; |
|---|
| 2718 | 2867 | |
|---|
| .. | .. |
|---|
| 2741 | 2890 | __unregister_ftrace_function(ops); |
|---|
| 2742 | 2891 | ftrace_start_up--; |
|---|
| 2743 | 2892 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; |
|---|
| 2893 | + if (ops->flags & FTRACE_OPS_FL_DYNAMIC) |
|---|
| 2894 | + ftrace_trampoline_free(ops); |
|---|
| 2744 | 2895 | return ret; |
|---|
| 2745 | 2896 | } |
|---|
| 2746 | 2897 | |
|---|
| .. | .. |
|---|
| 2749 | 2900 | |
|---|
| 2750 | 2901 | ftrace_startup_enable(command); |
|---|
| 2751 | 2902 | |
|---|
| 2903 | + /* |
|---|
| 2904 | + * If ftrace is in an undefined state, we just remove ops from list |
|---|
| 2905 | + * to prevent the NULL pointer, instead of totally rolling it back and |
|---|
| 2906 | + * free trampoline, because those actions could cause further damage. |
|---|
| 2907 | + */ |
|---|
| 2908 | + if (unlikely(ftrace_disabled)) { |
|---|
| 2909 | + __unregister_ftrace_function(ops); |
|---|
| 2910 | + return -ENODEV; |
|---|
| 2911 | + } |
|---|
| 2912 | + |
|---|
| 2752 | 2913 | ops->flags &= ~FTRACE_OPS_FL_ADDING; |
|---|
| 2753 | 2914 | |
|---|
| 2754 | 2915 | return 0; |
|---|
| 2755 | 2916 | } |
|---|
| 2756 | 2917 | |
|---|
| 2757 | | -static int ftrace_shutdown(struct ftrace_ops *ops, int command) |
|---|
| 2918 | +int ftrace_shutdown(struct ftrace_ops *ops, int command) |
|---|
| 2758 | 2919 | { |
|---|
| 2759 | 2920 | int ret; |
|---|
| 2760 | 2921 | |
|---|
| .. | .. |
|---|
| 2786 | 2947 | command |= FTRACE_UPDATE_TRACE_FUNC; |
|---|
| 2787 | 2948 | } |
|---|
| 2788 | 2949 | |
|---|
| 2789 | | - if (!command || !ftrace_enabled) { |
|---|
| 2790 | | - /* |
|---|
| 2791 | | - * If these are dynamic or per_cpu ops, they still |
|---|
| 2792 | | - * need their data freed. Since, function tracing is |
|---|
| 2793 | | - * not currently active, we can just free them |
|---|
| 2794 | | - * without synchronizing all CPUs. |
|---|
| 2795 | | - */ |
|---|
| 2796 | | - if (ops->flags & FTRACE_OPS_FL_DYNAMIC) |
|---|
| 2797 | | - goto free_ops; |
|---|
| 2798 | | - |
|---|
| 2799 | | - return 0; |
|---|
| 2800 | | - } |
|---|
| 2950 | + if (!command || !ftrace_enabled) |
|---|
| 2951 | + goto out; |
|---|
| 2801 | 2952 | |
|---|
| 2802 | 2953 | /* |
|---|
| 2803 | 2954 | * If the ops uses a trampoline, then it needs to be |
|---|
| .. | .. |
|---|
| 2834 | 2985 | removed_ops = NULL; |
|---|
| 2835 | 2986 | ops->flags &= ~FTRACE_OPS_FL_REMOVING; |
|---|
| 2836 | 2987 | |
|---|
| 2988 | +out: |
|---|
| 2837 | 2989 | /* |
|---|
| 2838 | 2990 | * Dynamic ops may be freed, we must make sure that all |
|---|
| 2839 | 2991 | * callers are done before leaving this function. |
|---|
| .. | .. |
|---|
| 2849 | 3001 | * infrastructure to do the synchronization, thus we must do it |
|---|
| 2850 | 3002 | * ourselves. |
|---|
| 2851 | 3003 | */ |
|---|
| 2852 | | - schedule_on_each_cpu(ftrace_sync); |
|---|
| 3004 | + synchronize_rcu_tasks_rude(); |
|---|
| 2853 | 3005 | |
|---|
| 2854 | 3006 | /* |
|---|
| 2855 | | - * When the kernel is preeptive, tasks can be preempted |
|---|
| 3007 | + * When the kernel is preemptive, tasks can be preempted |
|---|
| 2856 | 3008 | * while on a ftrace trampoline. Just scheduling a task on |
|---|
| 2857 | 3009 | * a CPU is not good enough to flush them. Calling |
|---|
| 2858 | 3010 | * synchornize_rcu_tasks() will wait for those tasks to |
|---|
| 2859 | 3011 | * execute and either schedule voluntarily or enter user space. |
|---|
| 2860 | 3012 | */ |
|---|
| 2861 | | - if (IS_ENABLED(CONFIG_PREEMPT)) |
|---|
| 3013 | + if (IS_ENABLED(CONFIG_PREEMPTION)) |
|---|
| 2862 | 3014 | synchronize_rcu_tasks(); |
|---|
| 2863 | 3015 | |
|---|
| 2864 | | - free_ops: |
|---|
| 2865 | | - arch_ftrace_trampoline_free(ops); |
|---|
| 3016 | + ftrace_trampoline_free(ops); |
|---|
| 2866 | 3017 | } |
|---|
| 2867 | 3018 | |
|---|
| 2868 | 3019 | return 0; |
|---|
| .. | .. |
|---|
| 2904 | 3055 | |
|---|
| 2905 | 3056 | static u64 ftrace_update_time; |
|---|
| 2906 | 3057 | unsigned long ftrace_update_tot_cnt; |
|---|
| 3058 | +unsigned long ftrace_number_of_pages; |
|---|
| 3059 | +unsigned long ftrace_number_of_groups; |
|---|
| 2907 | 3060 | |
|---|
| 2908 | 3061 | static inline int ops_traces_mod(struct ftrace_ops *ops) |
|---|
| 2909 | 3062 | { |
|---|
| .. | .. |
|---|
| 2986 | 3139 | * to the NOP instructions. |
|---|
| 2987 | 3140 | */ |
|---|
| 2988 | 3141 | if (!__is_defined(CC_USING_NOP_MCOUNT) && |
|---|
| 2989 | | - !ftrace_code_disable(mod, p)) |
|---|
| 3142 | + !ftrace_nop_initialize(mod, p)) |
|---|
| 2990 | 3143 | break; |
|---|
| 2991 | 3144 | |
|---|
| 2992 | 3145 | update_cnt++; |
|---|
| .. | .. |
|---|
| 3003 | 3156 | static int ftrace_allocate_records(struct ftrace_page *pg, int count) |
|---|
| 3004 | 3157 | { |
|---|
| 3005 | 3158 | int order; |
|---|
| 3159 | + int pages; |
|---|
| 3006 | 3160 | int cnt; |
|---|
| 3007 | 3161 | |
|---|
| 3008 | 3162 | if (WARN_ON(!count)) |
|---|
| 3009 | 3163 | return -EINVAL; |
|---|
| 3010 | 3164 | |
|---|
| 3011 | | - order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE)); |
|---|
| 3165 | + pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE); |
|---|
| 3166 | + order = get_count_order(pages); |
|---|
| 3012 | 3167 | |
|---|
| 3013 | 3168 | /* |
|---|
| 3014 | 3169 | * We want to fill as much as possible. No more than a page |
|---|
| 3015 | 3170 | * may be empty. |
|---|
| 3016 | 3171 | */ |
|---|
| 3017 | | - while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE) |
|---|
| 3172 | + if (!is_power_of_2(pages)) |
|---|
| 3018 | 3173 | order--; |
|---|
| 3019 | 3174 | |
|---|
| 3020 | 3175 | again: |
|---|
| .. | .. |
|---|
| 3024 | 3179 | /* if we can't allocate this size, try something smaller */ |
|---|
| 3025 | 3180 | if (!order) |
|---|
| 3026 | 3181 | return -ENOMEM; |
|---|
| 3027 | | - order >>= 1; |
|---|
| 3182 | + order--; |
|---|
| 3028 | 3183 | goto again; |
|---|
| 3029 | 3184 | } |
|---|
| 3185 | + |
|---|
| 3186 | + ftrace_number_of_pages += 1 << order; |
|---|
| 3187 | + ftrace_number_of_groups++; |
|---|
| 3030 | 3188 | |
|---|
| 3031 | 3189 | cnt = (PAGE_SIZE << order) / ENTRY_SIZE; |
|---|
| 3032 | 3190 | pg->size = cnt; |
|---|
| .. | .. |
|---|
| 3046 | 3204 | int cnt; |
|---|
| 3047 | 3205 | |
|---|
| 3048 | 3206 | if (!num_to_init) |
|---|
| 3049 | | - return 0; |
|---|
| 3207 | + return NULL; |
|---|
| 3050 | 3208 | |
|---|
| 3051 | 3209 | start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL); |
|---|
| 3052 | 3210 | if (!pg) |
|---|
| .. | .. |
|---|
| 3079 | 3237 | pg = start_pg; |
|---|
| 3080 | 3238 | while (pg) { |
|---|
| 3081 | 3239 | order = get_count_order(pg->size / ENTRIES_PER_PAGE); |
|---|
| 3082 | | - free_pages((unsigned long)pg->records, order); |
|---|
| 3240 | + if (order >= 0) |
|---|
| 3241 | + free_pages((unsigned long)pg->records, order); |
|---|
| 3083 | 3242 | start_pg = pg->next; |
|---|
| 3084 | 3243 | kfree(pg); |
|---|
| 3085 | 3244 | pg = start_pg; |
|---|
| 3245 | + ftrace_number_of_pages -= 1 << order; |
|---|
| 3246 | + ftrace_number_of_groups--; |
|---|
| 3086 | 3247 | } |
|---|
| 3087 | 3248 | pr_info("ftrace: FAILED to allocate memory for functions\n"); |
|---|
| 3088 | 3249 | return NULL; |
|---|
| .. | .. |
|---|
| 3493 | 3654 | if (iter->flags & FTRACE_ITER_ENABLED) { |
|---|
| 3494 | 3655 | struct ftrace_ops *ops; |
|---|
| 3495 | 3656 | |
|---|
| 3496 | | - seq_printf(m, " (%ld)%s%s", |
|---|
| 3657 | + seq_printf(m, " (%ld)%s%s%s", |
|---|
| 3497 | 3658 | ftrace_rec_count(rec), |
|---|
| 3498 | 3659 | rec->flags & FTRACE_FL_REGS ? " R" : " ", |
|---|
| 3499 | | - rec->flags & FTRACE_FL_IPMODIFY ? " I" : " "); |
|---|
| 3660 | + rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ", |
|---|
| 3661 | + rec->flags & FTRACE_FL_DIRECT ? " D" : " "); |
|---|
| 3500 | 3662 | if (rec->flags & FTRACE_FL_TRAMP_EN) { |
|---|
| 3501 | 3663 | ops = ftrace_find_tramp_ops_any(rec); |
|---|
| 3502 | 3664 | if (ops) { |
|---|
| .. | .. |
|---|
| 3512 | 3674 | } else { |
|---|
| 3513 | 3675 | add_trampoline_func(m, NULL, rec); |
|---|
| 3514 | 3676 | } |
|---|
| 3515 | | - } |
|---|
| 3677 | + if (rec->flags & FTRACE_FL_DIRECT) { |
|---|
| 3678 | + unsigned long direct; |
|---|
| 3679 | + |
|---|
| 3680 | + direct = ftrace_find_rec_direct(rec->ip); |
|---|
| 3681 | + if (direct) |
|---|
| 3682 | + seq_printf(m, "\n\tdirect-->%pS", (void *)direct); |
|---|
| 3683 | + } |
|---|
| 3684 | + } |
|---|
| 3516 | 3685 | |
|---|
| 3517 | 3686 | seq_putc(m, '\n'); |
|---|
| 3518 | 3687 | |
|---|
| .. | .. |
|---|
| 3530 | 3699 | ftrace_avail_open(struct inode *inode, struct file *file) |
|---|
| 3531 | 3700 | { |
|---|
| 3532 | 3701 | struct ftrace_iterator *iter; |
|---|
| 3702 | + int ret; |
|---|
| 3703 | + |
|---|
| 3704 | + ret = security_locked_down(LOCKDOWN_TRACEFS); |
|---|
| 3705 | + if (ret) |
|---|
| 3706 | + return ret; |
|---|
| 3533 | 3707 | |
|---|
| 3534 | 3708 | if (unlikely(ftrace_disabled)) |
|---|
| 3535 | 3709 | return -ENODEV; |
|---|
| .. | .. |
|---|
| 3548 | 3722 | ftrace_enabled_open(struct inode *inode, struct file *file) |
|---|
| 3549 | 3723 | { |
|---|
| 3550 | 3724 | struct ftrace_iterator *iter; |
|---|
| 3725 | + |
|---|
| 3726 | + /* |
|---|
| 3727 | + * This shows us what functions are currently being |
|---|
| 3728 | + * traced and by what. Not sure if we want lockdown |
|---|
| 3729 | + * to hide such critical information for an admin. |
|---|
| 3730 | + * Although, perhaps it can show information we don't |
|---|
| 3731 | + * want people to see, but if something is tracing |
|---|
| 3732 | + * something, we probably want to know about it. |
|---|
| 3733 | + */ |
|---|
| 3551 | 3734 | |
|---|
| 3552 | 3735 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); |
|---|
| 3553 | 3736 | if (!iter) |
|---|
| .. | .. |
|---|
| 3591 | 3774 | if (unlikely(ftrace_disabled)) |
|---|
| 3592 | 3775 | return -ENODEV; |
|---|
| 3593 | 3776 | |
|---|
| 3594 | | - if (tr && trace_array_get(tr) < 0) |
|---|
| 3777 | + if (tracing_check_open_get_tr(tr)) |
|---|
| 3595 | 3778 | return -ENODEV; |
|---|
| 3596 | 3779 | |
|---|
| 3597 | 3780 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
|---|
| .. | .. |
|---|
| 3669 | 3852 | { |
|---|
| 3670 | 3853 | struct ftrace_ops *ops = inode->i_private; |
|---|
| 3671 | 3854 | |
|---|
| 3855 | + /* Checks for tracefs lockdown */ |
|---|
| 3672 | 3856 | return ftrace_regex_open(ops, |
|---|
| 3673 | 3857 | FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES, |
|---|
| 3674 | 3858 | inode, file); |
|---|
| .. | .. |
|---|
| 3679 | 3863 | { |
|---|
| 3680 | 3864 | struct ftrace_ops *ops = inode->i_private; |
|---|
| 3681 | 3865 | |
|---|
| 3866 | + /* Checks for tracefs lockdown */ |
|---|
| 3682 | 3867 | return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE, |
|---|
| 3683 | 3868 | inode, file); |
|---|
| 3684 | 3869 | } |
|---|
| .. | .. |
|---|
| 3759 | 3944 | } |
|---|
| 3760 | 3945 | |
|---|
| 3761 | 3946 | static int |
|---|
| 3947 | +add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g, |
|---|
| 3948 | + int clear_filter) |
|---|
| 3949 | +{ |
|---|
| 3950 | + long index = simple_strtoul(func_g->search, NULL, 0); |
|---|
| 3951 | + struct ftrace_page *pg; |
|---|
| 3952 | + struct dyn_ftrace *rec; |
|---|
| 3953 | + |
|---|
| 3954 | + /* The index starts at 1 */ |
|---|
| 3955 | + if (--index < 0) |
|---|
| 3956 | + return 0; |
|---|
| 3957 | + |
|---|
| 3958 | + do_for_each_ftrace_rec(pg, rec) { |
|---|
| 3959 | + if (pg->index <= index) { |
|---|
| 3960 | + index -= pg->index; |
|---|
| 3961 | + /* this is a double loop, break goes to the next page */ |
|---|
| 3962 | + break; |
|---|
| 3963 | + } |
|---|
| 3964 | + rec = &pg->records[index]; |
|---|
| 3965 | + enter_record(hash, rec, clear_filter); |
|---|
| 3966 | + return 1; |
|---|
| 3967 | + } while_for_each_ftrace_rec(); |
|---|
| 3968 | + return 0; |
|---|
| 3969 | +} |
|---|
| 3970 | + |
|---|
| 3971 | +static int |
|---|
| 3762 | 3972 | ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g, |
|---|
| 3763 | 3973 | struct ftrace_glob *mod_g, int exclude_mod) |
|---|
| 3764 | 3974 | { |
|---|
| .. | .. |
|---|
| 3825 | 4035 | |
|---|
| 3826 | 4036 | if (unlikely(ftrace_disabled)) |
|---|
| 3827 | 4037 | goto out_unlock; |
|---|
| 4038 | + |
|---|
| 4039 | + if (func_g.type == MATCH_INDEX) { |
|---|
| 4040 | + found = add_rec_by_index(hash, &func_g, clear_filter); |
|---|
| 4041 | + goto out_unlock; |
|---|
| 4042 | + } |
|---|
| 3828 | 4043 | |
|---|
| 3829 | 4044 | do_for_each_ftrace_rec(pg, rec) { |
|---|
| 3830 | 4045 | |
|---|
| .. | .. |
|---|
| 3906 | 4121 | static bool module_exists(const char *module) |
|---|
| 3907 | 4122 | { |
|---|
| 3908 | 4123 | /* All modules have the symbol __this_module */ |
|---|
| 3909 | | - const char this_mod[] = "__this_module"; |
|---|
| 4124 | + static const char this_mod[] = "__this_module"; |
|---|
| 3910 | 4125 | char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2]; |
|---|
| 3911 | 4126 | unsigned long val; |
|---|
| 3912 | 4127 | int n; |
|---|
| .. | .. |
|---|
| 4183 | 4398 | * @ip: The instruction pointer address to map @data to |
|---|
| 4184 | 4399 | * @data: The data to map to @ip |
|---|
| 4185 | 4400 | * |
|---|
| 4186 | | - * Returns 0 on succes otherwise an error. |
|---|
| 4401 | + * Returns 0 on success otherwise an error. |
|---|
| 4187 | 4402 | */ |
|---|
| 4188 | 4403 | int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, |
|---|
| 4189 | 4404 | unsigned long ip, void *data) |
|---|
| .. | .. |
|---|
| 4213 | 4428 | * @ip: The instruction pointer address to remove the data from |
|---|
| 4214 | 4429 | * |
|---|
| 4215 | 4430 | * Returns the data if it is found, otherwise NULL. |
|---|
| 4216 | | - * Note, if the data pointer is used as the data itself, (see |
|---|
| 4431 | + * Note, if the data pointer is used as the data itself, (see |
|---|
| 4217 | 4432 | * ftrace_func_mapper_find_ip(), then the return value may be meaningless, |
|---|
| 4218 | 4433 | * if the data pointer was set to zero. |
|---|
| 4219 | 4434 | */ |
|---|
| .. | .. |
|---|
| 4351 | 4566 | |
|---|
| 4352 | 4567 | /* |
|---|
| 4353 | 4568 | * Note, there's a small window here that the func_hash->filter_hash |
|---|
| 4354 | | - * may be NULL or empty. Need to be carefule when reading the loop. |
|---|
| 4569 | + * may be NULL or empty. Need to be careful when reading the loop. |
|---|
| 4355 | 4570 | */ |
|---|
| 4356 | 4571 | mutex_lock(&probe->ops.func_hash->regex_lock); |
|---|
| 4357 | 4572 | |
|---|
| .. | .. |
|---|
| 4552 | 4767 | if (ftrace_enabled && !ftrace_hash_empty(hash)) |
|---|
| 4553 | 4768 | ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS, |
|---|
| 4554 | 4769 | &old_hash_ops); |
|---|
| 4555 | | - synchronize_sched(); |
|---|
| 4770 | + synchronize_rcu(); |
|---|
| 4556 | 4771 | |
|---|
| 4557 | 4772 | hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { |
|---|
| 4558 | 4773 | hlist_del(&entry->hlist); |
|---|
| .. | .. |
|---|
| 4794 | 5009 | ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove, |
|---|
| 4795 | 5010 | int reset, int enable) |
|---|
| 4796 | 5011 | { |
|---|
| 4797 | | - return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable); |
|---|
| 5012 | + return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable); |
|---|
| 4798 | 5013 | } |
|---|
| 5014 | + |
|---|
| 5015 | +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
|---|
| 5016 | + |
|---|
| 5017 | +struct ftrace_direct_func { |
|---|
| 5018 | + struct list_head next; |
|---|
| 5019 | + unsigned long addr; |
|---|
| 5020 | + int count; |
|---|
| 5021 | +}; |
|---|
| 5022 | + |
|---|
| 5023 | +static LIST_HEAD(ftrace_direct_funcs); |
|---|
| 5024 | + |
|---|
| 5025 | +/** |
|---|
| 5026 | + * ftrace_find_direct_func - test an address if it is a registered direct caller |
|---|
| 5027 | + * @addr: The address of a registered direct caller |
|---|
| 5028 | + * |
|---|
| 5029 | + * This searches to see if a ftrace direct caller has been registered |
|---|
| 5030 | + * at a specific address, and if so, it returns a descriptor for it. |
|---|
| 5031 | + * |
|---|
| 5032 | + * This can be used by architecture code to see if an address is |
|---|
| 5033 | + * a direct caller (trampoline) attached to a fentry/mcount location. |
|---|
| 5034 | + * This is useful for the function_graph tracer, as it may need to |
|---|
| 5035 | + * do adjustments if it traced a location that also has a direct |
|---|
| 5036 | + * trampoline attached to it. |
|---|
| 5037 | + */ |
|---|
| 5038 | +struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr) |
|---|
| 5039 | +{ |
|---|
| 5040 | + struct ftrace_direct_func *entry; |
|---|
| 5041 | + bool found = false; |
|---|
| 5042 | + |
|---|
| 5043 | + /* May be called by fgraph trampoline (protected by rcu tasks) */ |
|---|
| 5044 | + list_for_each_entry_rcu(entry, &ftrace_direct_funcs, next) { |
|---|
| 5045 | + if (entry->addr == addr) { |
|---|
| 5046 | + found = true; |
|---|
| 5047 | + break; |
|---|
| 5048 | + } |
|---|
| 5049 | + } |
|---|
| 5050 | + if (found) |
|---|
| 5051 | + return entry; |
|---|
| 5052 | + |
|---|
| 5053 | + return NULL; |
|---|
| 5054 | +} |
|---|
| 5055 | + |
|---|
| 5056 | +static struct ftrace_direct_func *ftrace_alloc_direct_func(unsigned long addr) |
|---|
| 5057 | +{ |
|---|
| 5058 | + struct ftrace_direct_func *direct; |
|---|
| 5059 | + |
|---|
| 5060 | + direct = kmalloc(sizeof(*direct), GFP_KERNEL); |
|---|
| 5061 | + if (!direct) |
|---|
| 5062 | + return NULL; |
|---|
| 5063 | + direct->addr = addr; |
|---|
| 5064 | + direct->count = 0; |
|---|
| 5065 | + list_add_rcu(&direct->next, &ftrace_direct_funcs); |
|---|
| 5066 | + ftrace_direct_func_count++; |
|---|
| 5067 | + return direct; |
|---|
| 5068 | +} |
|---|
| 5069 | + |
|---|
| 5070 | +/** |
|---|
| 5071 | + * register_ftrace_direct - Call a custom trampoline directly |
|---|
| 5072 | + * @ip: The address of the nop at the beginning of a function |
|---|
| 5073 | + * @addr: The address of the trampoline to call at @ip |
|---|
| 5074 | + * |
|---|
| 5075 | + * This is used to connect a direct call from the nop location (@ip) |
|---|
| 5076 | + * at the start of ftrace traced functions. The location that it calls |
|---|
| 5077 | + * (@addr) must be able to handle a direct call, and save the parameters |
|---|
| 5078 | + * of the function being traced, and restore them (or inject new ones |
|---|
| 5079 | + * if needed), before returning. |
|---|
| 5080 | + * |
|---|
| 5081 | + * Returns: |
|---|
| 5082 | + * 0 on success |
|---|
| 5083 | + * -EBUSY - Another direct function is already attached (there can be only one) |
|---|
| 5084 | + * -ENODEV - @ip does not point to a ftrace nop location (or not supported) |
|---|
| 5085 | + * -ENOMEM - There was an allocation failure. |
|---|
| 5086 | + */ |
|---|
| 5087 | +int register_ftrace_direct(unsigned long ip, unsigned long addr) |
|---|
| 5088 | +{ |
|---|
| 5089 | + struct ftrace_direct_func *direct; |
|---|
| 5090 | + struct ftrace_func_entry *entry; |
|---|
| 5091 | + struct ftrace_hash *free_hash = NULL; |
|---|
| 5092 | + struct dyn_ftrace *rec; |
|---|
| 5093 | + int ret = -EBUSY; |
|---|
| 5094 | + |
|---|
| 5095 | + mutex_lock(&direct_mutex); |
|---|
| 5096 | + |
|---|
| 5097 | + /* See if there's a direct function at @ip already */ |
|---|
| 5098 | + if (ftrace_find_rec_direct(ip)) |
|---|
| 5099 | + goto out_unlock; |
|---|
| 5100 | + |
|---|
| 5101 | + ret = -ENODEV; |
|---|
| 5102 | + rec = lookup_rec(ip, ip); |
|---|
| 5103 | + if (!rec) |
|---|
| 5104 | + goto out_unlock; |
|---|
| 5105 | + |
|---|
| 5106 | + /* |
|---|
| 5107 | + * Check if the rec says it has a direct call but we didn't |
|---|
| 5108 | + * find one earlier? |
|---|
| 5109 | + */ |
|---|
| 5110 | + if (WARN_ON(rec->flags & FTRACE_FL_DIRECT)) |
|---|
| 5111 | + goto out_unlock; |
|---|
| 5112 | + |
|---|
| 5113 | + /* Make sure the ip points to the exact record */ |
|---|
| 5114 | + if (ip != rec->ip) { |
|---|
| 5115 | + ip = rec->ip; |
|---|
| 5116 | + /* Need to check this ip for a direct. */ |
|---|
| 5117 | + if (ftrace_find_rec_direct(ip)) |
|---|
| 5118 | + goto out_unlock; |
|---|
| 5119 | + } |
|---|
| 5120 | + |
|---|
| 5121 | + ret = -ENOMEM; |
|---|
| 5122 | + if (ftrace_hash_empty(direct_functions) || |
|---|
| 5123 | + direct_functions->count > 2 * (1 << direct_functions->size_bits)) { |
|---|
| 5124 | + struct ftrace_hash *new_hash; |
|---|
| 5125 | + int size = ftrace_hash_empty(direct_functions) ? 0 : |
|---|
| 5126 | + direct_functions->count + 1; |
|---|
| 5127 | + |
|---|
| 5128 | + if (size < 32) |
|---|
| 5129 | + size = 32; |
|---|
| 5130 | + |
|---|
| 5131 | + new_hash = dup_hash(direct_functions, size); |
|---|
| 5132 | + if (!new_hash) |
|---|
| 5133 | + goto out_unlock; |
|---|
| 5134 | + |
|---|
| 5135 | + free_hash = direct_functions; |
|---|
| 5136 | + direct_functions = new_hash; |
|---|
| 5137 | + } |
|---|
| 5138 | + |
|---|
| 5139 | + entry = kmalloc(sizeof(*entry), GFP_KERNEL); |
|---|
| 5140 | + if (!entry) |
|---|
| 5141 | + goto out_unlock; |
|---|
| 5142 | + |
|---|
| 5143 | + direct = ftrace_find_direct_func(addr); |
|---|
| 5144 | + if (!direct) { |
|---|
| 5145 | + direct = ftrace_alloc_direct_func(addr); |
|---|
| 5146 | + if (!direct) { |
|---|
| 5147 | + kfree(entry); |
|---|
| 5148 | + goto out_unlock; |
|---|
| 5149 | + } |
|---|
| 5150 | + } |
|---|
| 5151 | + |
|---|
| 5152 | + entry->ip = ip; |
|---|
| 5153 | + entry->direct = addr; |
|---|
| 5154 | + __add_hash_entry(direct_functions, entry); |
|---|
| 5155 | + |
|---|
| 5156 | + ret = ftrace_set_filter_ip(&direct_ops, ip, 0, 0); |
|---|
| 5157 | + |
|---|
| 5158 | + if (!ret && !(direct_ops.flags & FTRACE_OPS_FL_ENABLED)) { |
|---|
| 5159 | + ret = register_ftrace_function(&direct_ops); |
|---|
| 5160 | + if (ret) |
|---|
| 5161 | + ftrace_set_filter_ip(&direct_ops, ip, 1, 0); |
|---|
| 5162 | + } |
|---|
| 5163 | + |
|---|
| 5164 | + if (ret) { |
|---|
| 5165 | + remove_hash_entry(direct_functions, entry); |
|---|
| 5166 | + kfree(entry); |
|---|
| 5167 | + if (!direct->count) { |
|---|
| 5168 | + list_del_rcu(&direct->next); |
|---|
| 5169 | + synchronize_rcu_tasks(); |
|---|
| 5170 | + kfree(direct); |
|---|
| 5171 | + if (free_hash) |
|---|
| 5172 | + free_ftrace_hash(free_hash); |
|---|
| 5173 | + free_hash = NULL; |
|---|
| 5174 | + ftrace_direct_func_count--; |
|---|
| 5175 | + } |
|---|
| 5176 | + } else { |
|---|
| 5177 | + direct->count++; |
|---|
| 5178 | + } |
|---|
| 5179 | + out_unlock: |
|---|
| 5180 | + mutex_unlock(&direct_mutex); |
|---|
| 5181 | + |
|---|
| 5182 | + if (free_hash) { |
|---|
| 5183 | + synchronize_rcu_tasks(); |
|---|
| 5184 | + free_ftrace_hash(free_hash); |
|---|
| 5185 | + } |
|---|
| 5186 | + |
|---|
| 5187 | + return ret; |
|---|
| 5188 | +} |
|---|
| 5189 | +EXPORT_SYMBOL_GPL(register_ftrace_direct); |
|---|
| 5190 | + |
|---|
| 5191 | +static struct ftrace_func_entry *find_direct_entry(unsigned long *ip, |
|---|
| 5192 | + struct dyn_ftrace **recp) |
|---|
| 5193 | +{ |
|---|
| 5194 | + struct ftrace_func_entry *entry; |
|---|
| 5195 | + struct dyn_ftrace *rec; |
|---|
| 5196 | + |
|---|
| 5197 | + rec = lookup_rec(*ip, *ip); |
|---|
| 5198 | + if (!rec) |
|---|
| 5199 | + return NULL; |
|---|
| 5200 | + |
|---|
| 5201 | + entry = __ftrace_lookup_ip(direct_functions, rec->ip); |
|---|
| 5202 | + if (!entry) { |
|---|
| 5203 | + WARN_ON(rec->flags & FTRACE_FL_DIRECT); |
|---|
| 5204 | + return NULL; |
|---|
| 5205 | + } |
|---|
| 5206 | + |
|---|
| 5207 | + WARN_ON(!(rec->flags & FTRACE_FL_DIRECT)); |
|---|
| 5208 | + |
|---|
| 5209 | + /* Passed in ip just needs to be on the call site */ |
|---|
| 5210 | + *ip = rec->ip; |
|---|
| 5211 | + |
|---|
| 5212 | + if (recp) |
|---|
| 5213 | + *recp = rec; |
|---|
| 5214 | + |
|---|
| 5215 | + return entry; |
|---|
| 5216 | +} |
|---|
| 5217 | + |
|---|
| 5218 | +int unregister_ftrace_direct(unsigned long ip, unsigned long addr) |
|---|
| 5219 | +{ |
|---|
| 5220 | + struct ftrace_direct_func *direct; |
|---|
| 5221 | + struct ftrace_func_entry *entry; |
|---|
| 5222 | + int ret = -ENODEV; |
|---|
| 5223 | + |
|---|
| 5224 | + mutex_lock(&direct_mutex); |
|---|
| 5225 | + |
|---|
| 5226 | + entry = find_direct_entry(&ip, NULL); |
|---|
| 5227 | + if (!entry) |
|---|
| 5228 | + goto out_unlock; |
|---|
| 5229 | + |
|---|
| 5230 | + if (direct_functions->count == 1) |
|---|
| 5231 | + unregister_ftrace_function(&direct_ops); |
|---|
| 5232 | + |
|---|
| 5233 | + ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0); |
|---|
| 5234 | + |
|---|
| 5235 | + WARN_ON(ret); |
|---|
| 5236 | + |
|---|
| 5237 | + remove_hash_entry(direct_functions, entry); |
|---|
| 5238 | + |
|---|
| 5239 | + direct = ftrace_find_direct_func(addr); |
|---|
| 5240 | + if (!WARN_ON(!direct)) { |
|---|
| 5241 | + /* This is the good path (see the ! before WARN) */ |
|---|
| 5242 | + direct->count--; |
|---|
| 5243 | + WARN_ON(direct->count < 0); |
|---|
| 5244 | + if (!direct->count) { |
|---|
| 5245 | + list_del_rcu(&direct->next); |
|---|
| 5246 | + synchronize_rcu_tasks(); |
|---|
| 5247 | + kfree(direct); |
|---|
| 5248 | + kfree(entry); |
|---|
| 5249 | + ftrace_direct_func_count--; |
|---|
| 5250 | + } |
|---|
| 5251 | + } |
|---|
| 5252 | + out_unlock: |
|---|
| 5253 | + mutex_unlock(&direct_mutex); |
|---|
| 5254 | + |
|---|
| 5255 | + return ret; |
|---|
| 5256 | +} |
|---|
| 5257 | +EXPORT_SYMBOL_GPL(unregister_ftrace_direct); |
|---|
| 5258 | + |
|---|
| 5259 | +static struct ftrace_ops stub_ops = { |
|---|
| 5260 | + .func = ftrace_stub, |
|---|
| 5261 | +}; |
|---|
| 5262 | + |
|---|
| 5263 | +/** |
|---|
| 5264 | + * ftrace_modify_direct_caller - modify ftrace nop directly |
|---|
| 5265 | + * @entry: The ftrace hash entry of the direct helper for @rec |
|---|
| 5266 | + * @rec: The record representing the function site to patch |
|---|
| 5267 | + * @old_addr: The location that the site at @rec->ip currently calls |
|---|
| 5268 | + * @new_addr: The location that the site at @rec->ip should call |
|---|
| 5269 | + * |
|---|
| 5270 | + * An architecture may overwrite this function to optimize the |
|---|
| 5271 | + * changing of the direct callback on an ftrace nop location. |
|---|
| 5272 | + * This is called with the ftrace_lock mutex held, and no other |
|---|
| 5273 | + * ftrace callbacks are on the associated record (@rec). Thus, |
|---|
| 5274 | + * it is safe to modify the ftrace record, where it should be |
|---|
| 5275 | + * currently calling @old_addr directly, to call @new_addr. |
|---|
| 5276 | + * |
|---|
| 5277 | + * Safety checks should be made to make sure that the code at |
|---|
| 5278 | + * @rec->ip is currently calling @old_addr. And this must |
|---|
| 5279 | + * also update entry->direct to @new_addr. |
|---|
| 5280 | + */ |
|---|
| 5281 | +int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry, |
|---|
| 5282 | + struct dyn_ftrace *rec, |
|---|
| 5283 | + unsigned long old_addr, |
|---|
| 5284 | + unsigned long new_addr) |
|---|
| 5285 | +{ |
|---|
| 5286 | + unsigned long ip = rec->ip; |
|---|
| 5287 | + int ret; |
|---|
| 5288 | + |
|---|
| 5289 | + /* |
|---|
| 5290 | + * The ftrace_lock was used to determine if the record |
|---|
| 5291 | + * had more than one registered user to it. If it did, |
|---|
| 5292 | + * we needed to prevent that from changing to do the quick |
|---|
| 5293 | + * switch. But if it did not (only a direct caller was attached) |
|---|
| 5294 | + * then this function is called. But this function can deal |
|---|
| 5295 | + * with attached callers to the rec that we care about, and |
|---|
| 5296 | + * since this function uses standard ftrace calls that take |
|---|
| 5297 | + * the ftrace_lock mutex, we need to release it. |
|---|
| 5298 | + */ |
|---|
| 5299 | + mutex_unlock(&ftrace_lock); |
|---|
| 5300 | + |
|---|
| 5301 | + /* |
|---|
| 5302 | + * By setting a stub function at the same address, we force |
|---|
| 5303 | + * the code to call the iterator and the direct_ops helper. |
|---|
| 5304 | + * This means that @ip does not call the direct call, and |
|---|
| 5305 | + * we can simply modify it. |
|---|
| 5306 | + */ |
|---|
| 5307 | + ret = ftrace_set_filter_ip(&stub_ops, ip, 0, 0); |
|---|
| 5308 | + if (ret) |
|---|
| 5309 | + goto out_lock; |
|---|
| 5310 | + |
|---|
| 5311 | + ret = register_ftrace_function(&stub_ops); |
|---|
| 5312 | + if (ret) { |
|---|
| 5313 | + ftrace_set_filter_ip(&stub_ops, ip, 1, 0); |
|---|
| 5314 | + goto out_lock; |
|---|
| 5315 | + } |
|---|
| 5316 | + |
|---|
| 5317 | + entry->direct = new_addr; |
|---|
| 5318 | + |
|---|
| 5319 | + /* |
|---|
| 5320 | + * By removing the stub, we put back the direct call, calling |
|---|
| 5321 | + * the @new_addr. |
|---|
| 5322 | + */ |
|---|
| 5323 | + unregister_ftrace_function(&stub_ops); |
|---|
| 5324 | + ftrace_set_filter_ip(&stub_ops, ip, 1, 0); |
|---|
| 5325 | + |
|---|
| 5326 | + out_lock: |
|---|
| 5327 | + mutex_lock(&ftrace_lock); |
|---|
| 5328 | + |
|---|
| 5329 | + return ret; |
|---|
| 5330 | +} |
|---|
| 5331 | + |
|---|
| 5332 | +/** |
|---|
| 5333 | + * modify_ftrace_direct - Modify an existing direct call to call something else |
|---|
| 5334 | + * @ip: The instruction pointer to modify |
|---|
| 5335 | + * @old_addr: The address that the current @ip calls directly |
|---|
| 5336 | + * @new_addr: The address that the @ip should call |
|---|
| 5337 | + * |
|---|
| 5338 | + * This modifies a ftrace direct caller at an instruction pointer without |
|---|
| 5339 | + * having to disable it first. The direct call will switch over to the |
|---|
| 5340 | + * @new_addr without missing anything. |
|---|
| 5341 | + * |
|---|
| 5342 | + * Returns: zero on success. Non zero on error, which includes: |
|---|
| 5343 | + * -ENODEV : the @ip given has no direct caller attached |
|---|
| 5344 | + * -EINVAL : the @old_addr does not match the current direct caller |
|---|
| 5345 | + */ |
|---|
| 5346 | +int modify_ftrace_direct(unsigned long ip, |
|---|
| 5347 | + unsigned long old_addr, unsigned long new_addr) |
|---|
| 5348 | +{ |
|---|
| 5349 | + struct ftrace_direct_func *direct, *new_direct = NULL; |
|---|
| 5350 | + struct ftrace_func_entry *entry; |
|---|
| 5351 | + struct dyn_ftrace *rec; |
|---|
| 5352 | + int ret = -ENODEV; |
|---|
| 5353 | + |
|---|
| 5354 | + mutex_lock(&direct_mutex); |
|---|
| 5355 | + |
|---|
| 5356 | + mutex_lock(&ftrace_lock); |
|---|
| 5357 | + entry = find_direct_entry(&ip, &rec); |
|---|
| 5358 | + if (!entry) |
|---|
| 5359 | + goto out_unlock; |
|---|
| 5360 | + |
|---|
| 5361 | + ret = -EINVAL; |
|---|
| 5362 | + if (entry->direct != old_addr) |
|---|
| 5363 | + goto out_unlock; |
|---|
| 5364 | + |
|---|
| 5365 | + direct = ftrace_find_direct_func(old_addr); |
|---|
| 5366 | + if (WARN_ON(!direct)) |
|---|
| 5367 | + goto out_unlock; |
|---|
| 5368 | + if (direct->count > 1) { |
|---|
| 5369 | + ret = -ENOMEM; |
|---|
| 5370 | + new_direct = ftrace_alloc_direct_func(new_addr); |
|---|
| 5371 | + if (!new_direct) |
|---|
| 5372 | + goto out_unlock; |
|---|
| 5373 | + direct->count--; |
|---|
| 5374 | + new_direct->count++; |
|---|
| 5375 | + } else { |
|---|
| 5376 | + direct->addr = new_addr; |
|---|
| 5377 | + } |
|---|
| 5378 | + |
|---|
| 5379 | + /* |
|---|
| 5380 | + * If there's no other ftrace callback on the rec->ip location, |
|---|
| 5381 | + * then it can be changed directly by the architecture. |
|---|
| 5382 | + * If there is another caller, then we just need to change the |
|---|
| 5383 | + * direct caller helper to point to @new_addr. |
|---|
| 5384 | + */ |
|---|
| 5385 | + if (ftrace_rec_count(rec) == 1) { |
|---|
| 5386 | + ret = ftrace_modify_direct_caller(entry, rec, old_addr, new_addr); |
|---|
| 5387 | + } else { |
|---|
| 5388 | + entry->direct = new_addr; |
|---|
| 5389 | + ret = 0; |
|---|
| 5390 | + } |
|---|
| 5391 | + |
|---|
| 5392 | + if (unlikely(ret && new_direct)) { |
|---|
| 5393 | + direct->count++; |
|---|
| 5394 | + list_del_rcu(&new_direct->next); |
|---|
| 5395 | + synchronize_rcu_tasks(); |
|---|
| 5396 | + kfree(new_direct); |
|---|
| 5397 | + ftrace_direct_func_count--; |
|---|
| 5398 | + } |
|---|
| 5399 | + |
|---|
| 5400 | + out_unlock: |
|---|
| 5401 | + mutex_unlock(&ftrace_lock); |
|---|
| 5402 | + mutex_unlock(&direct_mutex); |
|---|
| 5403 | + return ret; |
|---|
| 5404 | +} |
|---|
| 5405 | +EXPORT_SYMBOL_GPL(modify_ftrace_direct); |
|---|
| 5406 | +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ |
|---|
| 4799 | 5407 | |
|---|
| 4800 | 5408 | /** |
|---|
| 4801 | 5409 | * ftrace_set_filter_ip - set a function to filter on in ftrace by address |
|---|
| .. | .. |
|---|
| 4967 | 5575 | struct ftrace_hash *hash; |
|---|
| 4968 | 5576 | |
|---|
| 4969 | 5577 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); |
|---|
| 4970 | | - if (WARN_ON(!hash)) |
|---|
| 5578 | + if (MEM_FAIL(!hash, "Failed to allocate hash\n")) |
|---|
| 4971 | 5579 | return; |
|---|
| 4972 | 5580 | |
|---|
| 4973 | 5581 | while (buf) { |
|---|
| .. | .. |
|---|
| 5045 | 5653 | |
|---|
| 5046 | 5654 | if (filter_hash) { |
|---|
| 5047 | 5655 | orig_hash = &iter->ops->func_hash->filter_hash; |
|---|
| 5048 | | - if (iter->tr && !list_empty(&iter->tr->mod_trace)) |
|---|
| 5049 | | - iter->hash->flags |= FTRACE_HASH_FL_MOD; |
|---|
| 5656 | + if (iter->tr) { |
|---|
| 5657 | + if (list_empty(&iter->tr->mod_trace)) |
|---|
| 5658 | + iter->hash->flags &= ~FTRACE_HASH_FL_MOD; |
|---|
| 5659 | + else |
|---|
| 5660 | + iter->hash->flags |= FTRACE_HASH_FL_MOD; |
|---|
| 5661 | + } |
|---|
| 5050 | 5662 | } else |
|---|
| 5051 | 5663 | orig_hash = &iter->ops->func_hash->notrace_hash; |
|---|
| 5052 | 5664 | |
|---|
| .. | .. |
|---|
| 5220 | 5832 | __ftrace_graph_open(struct inode *inode, struct file *file, |
|---|
| 5221 | 5833 | struct ftrace_graph_data *fgd) |
|---|
| 5222 | 5834 | { |
|---|
| 5223 | | - int ret = 0; |
|---|
| 5835 | + int ret; |
|---|
| 5224 | 5836 | struct ftrace_hash *new_hash = NULL; |
|---|
| 5837 | + |
|---|
| 5838 | + ret = security_locked_down(LOCKDOWN_TRACEFS); |
|---|
| 5839 | + if (ret) |
|---|
| 5840 | + return ret; |
|---|
| 5225 | 5841 | |
|---|
| 5226 | 5842 | if (file->f_mode & FMODE_WRITE) { |
|---|
| 5227 | 5843 | const int size_bits = FTRACE_HASH_DEFAULT_BITS; |
|---|
| .. | .. |
|---|
| 5382 | 5998 | * infrastructure to do the synchronization, thus we must do it |
|---|
| 5383 | 5999 | * ourselves. |
|---|
| 5384 | 6000 | */ |
|---|
| 5385 | | - schedule_on_each_cpu(ftrace_sync); |
|---|
| 6001 | + synchronize_rcu_tasks_rude(); |
|---|
| 5386 | 6002 | |
|---|
| 5387 | 6003 | free_ftrace_hash(old_hash); |
|---|
| 5388 | 6004 | } |
|---|
| .. | .. |
|---|
| 5514 | 6130 | |
|---|
| 5515 | 6131 | /* |
|---|
| 5516 | 6132 | * The name "destroy_filter_files" is really a misnomer. Although |
|---|
| 5517 | | - * in the future, it may actualy delete the files, but this is |
|---|
| 6133 | + * in the future, it may actually delete the files, but this is |
|---|
| 5518 | 6134 | * really intended to make sure the ops passed in are disabled |
|---|
| 5519 | 6135 | * and that when this function returns, the caller is free to |
|---|
| 5520 | 6136 | * free the ops. |
|---|
| .. | .. |
|---|
| 5567 | 6183 | return 0; |
|---|
| 5568 | 6184 | } |
|---|
| 5569 | 6185 | |
|---|
| 5570 | | -static int __norecordmcount ftrace_process_locs(struct module *mod, |
|---|
| 5571 | | - unsigned long *start, |
|---|
| 5572 | | - unsigned long *end) |
|---|
| 6186 | +static int ftrace_process_locs(struct module *mod, |
|---|
| 6187 | + unsigned long *start, |
|---|
| 6188 | + unsigned long *end) |
|---|
| 5573 | 6189 | { |
|---|
| 5574 | 6190 | struct ftrace_page *start_pg; |
|---|
| 5575 | 6191 | struct ftrace_page *pg; |
|---|
| .. | .. |
|---|
| 5683 | 6299 | unsigned int num_funcs; |
|---|
| 5684 | 6300 | }; |
|---|
| 5685 | 6301 | |
|---|
| 6302 | +static int ftrace_get_trampoline_kallsym(unsigned int symnum, |
|---|
| 6303 | + unsigned long *value, char *type, |
|---|
| 6304 | + char *name, char *module_name, |
|---|
| 6305 | + int *exported) |
|---|
| 6306 | +{ |
|---|
| 6307 | + struct ftrace_ops *op; |
|---|
| 6308 | + |
|---|
| 6309 | + list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) { |
|---|
| 6310 | + if (!op->trampoline || symnum--) |
|---|
| 6311 | + continue; |
|---|
| 6312 | + *value = op->trampoline; |
|---|
| 6313 | + *type = 't'; |
|---|
| 6314 | + strlcpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN); |
|---|
| 6315 | + strlcpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN); |
|---|
| 6316 | + *exported = 0; |
|---|
| 6317 | + return 0; |
|---|
| 6318 | + } |
|---|
| 6319 | + |
|---|
| 6320 | + return -ERANGE; |
|---|
| 6321 | +} |
|---|
| 6322 | + |
|---|
| 5686 | 6323 | #ifdef CONFIG_MODULES |
|---|
| 5687 | 6324 | |
|---|
| 5688 | 6325 | #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next) |
|---|
| .. | .. |
|---|
| 5696 | 6333 | |
|---|
| 5697 | 6334 | for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { |
|---|
| 5698 | 6335 | if (ops_references_rec(ops, rec)) { |
|---|
| 6336 | + if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT)) |
|---|
| 6337 | + continue; |
|---|
| 6338 | + if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY)) |
|---|
| 6339 | + continue; |
|---|
| 5699 | 6340 | cnt++; |
|---|
| 5700 | 6341 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) |
|---|
| 5701 | 6342 | rec->flags |= FTRACE_FL_REGS; |
|---|
| 6343 | + if (cnt == 1 && ops->trampoline) |
|---|
| 6344 | + rec->flags |= FTRACE_FL_TRAMP; |
|---|
| 6345 | + else |
|---|
| 6346 | + rec->flags &= ~FTRACE_FL_TRAMP; |
|---|
| 5702 | 6347 | } |
|---|
| 5703 | 6348 | } |
|---|
| 5704 | 6349 | |
|---|
| .. | .. |
|---|
| 5779 | 6424 | list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) { |
|---|
| 5780 | 6425 | if (mod_map->mod == mod) { |
|---|
| 5781 | 6426 | list_del_rcu(&mod_map->list); |
|---|
| 5782 | | - call_rcu_sched(&mod_map->rcu, ftrace_free_mod_map); |
|---|
| 6427 | + call_rcu(&mod_map->rcu, ftrace_free_mod_map); |
|---|
| 5783 | 6428 | break; |
|---|
| 5784 | 6429 | } |
|---|
| 5785 | 6430 | } |
|---|
| .. | .. |
|---|
| 5821 | 6466 | clear_mod_from_hashes(pg); |
|---|
| 5822 | 6467 | |
|---|
| 5823 | 6468 | order = get_count_order(pg->size / ENTRIES_PER_PAGE); |
|---|
| 5824 | | - free_pages((unsigned long)pg->records, order); |
|---|
| 6469 | + if (order >= 0) |
|---|
| 6470 | + free_pages((unsigned long)pg->records, order); |
|---|
| 5825 | 6471 | tmp_page = pg->next; |
|---|
| 5826 | 6472 | kfree(pg); |
|---|
| 6473 | + ftrace_number_of_pages -= 1 << order; |
|---|
| 6474 | + ftrace_number_of_groups--; |
|---|
| 5827 | 6475 | } |
|---|
| 5828 | 6476 | } |
|---|
| 5829 | 6477 | |
|---|
| .. | .. |
|---|
| 5840 | 6488 | /* |
|---|
| 5841 | 6489 | * If the tracing is enabled, go ahead and enable the record. |
|---|
| 5842 | 6490 | * |
|---|
| 5843 | | - * The reason not to enable the record immediatelly is the |
|---|
| 6491 | + * The reason not to enable the record immediately is the |
|---|
| 5844 | 6492 | * inherent check of ftrace_make_nop/ftrace_make_call for |
|---|
| 5845 | 6493 | * correct previous instructions. Making first the NOP |
|---|
| 5846 | 6494 | * conversion puts the module to the correct state, thus |
|---|
| .. | .. |
|---|
| 5999 | 6647 | struct ftrace_mod_map *mod_map; |
|---|
| 6000 | 6648 | const char *ret = NULL; |
|---|
| 6001 | 6649 | |
|---|
| 6002 | | - /* mod_map is freed via call_rcu_sched() */ |
|---|
| 6650 | + /* mod_map is freed via call_rcu() */ |
|---|
| 6003 | 6651 | preempt_disable(); |
|---|
| 6004 | 6652 | list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { |
|---|
| 6005 | 6653 | ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym); |
|---|
| .. | .. |
|---|
| 6020 | 6668 | { |
|---|
| 6021 | 6669 | struct ftrace_mod_map *mod_map; |
|---|
| 6022 | 6670 | struct ftrace_mod_func *mod_func; |
|---|
| 6671 | + int ret; |
|---|
| 6023 | 6672 | |
|---|
| 6024 | 6673 | preempt_disable(); |
|---|
| 6025 | 6674 | list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { |
|---|
| .. | .. |
|---|
| 6046 | 6695 | WARN_ON(1); |
|---|
| 6047 | 6696 | break; |
|---|
| 6048 | 6697 | } |
|---|
| 6698 | + ret = ftrace_get_trampoline_kallsym(symnum, value, type, name, |
|---|
| 6699 | + module_name, exported); |
|---|
| 6049 | 6700 | preempt_enable(); |
|---|
| 6050 | | - return -ERANGE; |
|---|
| 6701 | + return ret; |
|---|
| 6051 | 6702 | } |
|---|
| 6052 | 6703 | |
|---|
| 6053 | 6704 | #else |
|---|
| .. | .. |
|---|
| 6058 | 6709 | unsigned long start, unsigned long end) |
|---|
| 6059 | 6710 | { |
|---|
| 6060 | 6711 | return NULL; |
|---|
| 6712 | +} |
|---|
| 6713 | +int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, |
|---|
| 6714 | + char *type, char *name, char *module_name, |
|---|
| 6715 | + int *exported) |
|---|
| 6716 | +{ |
|---|
| 6717 | + int ret; |
|---|
| 6718 | + |
|---|
| 6719 | + preempt_disable(); |
|---|
| 6720 | + ret = ftrace_get_trampoline_kallsym(symnum, value, type, name, |
|---|
| 6721 | + module_name, exported); |
|---|
| 6722 | + preempt_enable(); |
|---|
| 6723 | + return ret; |
|---|
| 6061 | 6724 | } |
|---|
| 6062 | 6725 | #endif /* CONFIG_MODULES */ |
|---|
| 6063 | 6726 | |
|---|
| .. | .. |
|---|
| 6072 | 6735 | { |
|---|
| 6073 | 6736 | struct ftrace_func_entry *entry; |
|---|
| 6074 | 6737 | |
|---|
| 6075 | | - if (ftrace_hash_empty(hash)) |
|---|
| 6076 | | - return; |
|---|
| 6077 | | - |
|---|
| 6078 | | - entry = __ftrace_lookup_ip(hash, func->ip); |
|---|
| 6079 | | - |
|---|
| 6738 | + entry = ftrace_lookup_ip(hash, func->ip); |
|---|
| 6080 | 6739 | /* |
|---|
| 6081 | 6740 | * Do not allow this rec to match again. |
|---|
| 6082 | 6741 | * Yeah, it may waste some memory, but will be removed |
|---|
| .. | .. |
|---|
| 6110 | 6769 | |
|---|
| 6111 | 6770 | func = kmalloc(sizeof(*func), GFP_KERNEL); |
|---|
| 6112 | 6771 | if (!func) { |
|---|
| 6113 | | - WARN_ONCE(1, "alloc failure, ftrace filter could be stale\n"); |
|---|
| 6772 | + MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n"); |
|---|
| 6114 | 6773 | return; |
|---|
| 6115 | 6774 | } |
|---|
| 6116 | 6775 | |
|---|
| .. | .. |
|---|
| 6168 | 6827 | if (!pg->index) { |
|---|
| 6169 | 6828 | *last_pg = pg->next; |
|---|
| 6170 | 6829 | order = get_count_order(pg->size / ENTRIES_PER_PAGE); |
|---|
| 6171 | | - free_pages((unsigned long)pg->records, order); |
|---|
| 6830 | + if (order >= 0) |
|---|
| 6831 | + free_pages((unsigned long)pg->records, order); |
|---|
| 6832 | + ftrace_number_of_pages -= 1 << order; |
|---|
| 6833 | + ftrace_number_of_groups--; |
|---|
| 6172 | 6834 | kfree(pg); |
|---|
| 6173 | 6835 | pg = container_of(last_pg, struct ftrace_page, next); |
|---|
| 6174 | 6836 | if (!(*last_pg)) |
|---|
| .. | .. |
|---|
| 6216 | 6878 | } |
|---|
| 6217 | 6879 | |
|---|
| 6218 | 6880 | pr_info("ftrace: allocating %ld entries in %ld pages\n", |
|---|
| 6219 | | - count, count / ENTRIES_PER_PAGE + 1); |
|---|
| 6881 | + count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE)); |
|---|
| 6220 | 6882 | |
|---|
| 6221 | 6883 | last_ftrace_enabled = ftrace_enabled = 1; |
|---|
| 6222 | 6884 | |
|---|
| 6223 | 6885 | ret = ftrace_process_locs(NULL, |
|---|
| 6224 | 6886 | __start_mcount_loc, |
|---|
| 6225 | 6887 | __stop_mcount_loc); |
|---|
| 6888 | + |
|---|
| 6889 | + pr_info("ftrace: allocated %ld pages with %ld groups\n", |
|---|
| 6890 | + ftrace_number_of_pages, ftrace_number_of_groups); |
|---|
| 6226 | 6891 | |
|---|
| 6227 | 6892 | set_ftrace_early_filters(); |
|---|
| 6228 | 6893 | |
|---|
| .. | .. |
|---|
| 6238 | 6903 | |
|---|
| 6239 | 6904 | static void ftrace_update_trampoline(struct ftrace_ops *ops) |
|---|
| 6240 | 6905 | { |
|---|
| 6906 | + unsigned long trampoline = ops->trampoline; |
|---|
| 6907 | + |
|---|
| 6241 | 6908 | arch_ftrace_update_trampoline(ops); |
|---|
| 6909 | + if (ops->trampoline && ops->trampoline != trampoline && |
|---|
| 6910 | + (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) { |
|---|
| 6911 | + /* Add to kallsyms before the perf events */ |
|---|
| 6912 | + ftrace_add_trampoline_to_kallsyms(ops); |
|---|
| 6913 | + perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, |
|---|
| 6914 | + ops->trampoline, ops->trampoline_size, false, |
|---|
| 6915 | + FTRACE_TRAMPOLINE_SYM); |
|---|
| 6916 | + /* |
|---|
| 6917 | + * Record the perf text poke event after the ksymbol register |
|---|
| 6918 | + * event. |
|---|
| 6919 | + */ |
|---|
| 6920 | + perf_event_text_poke((void *)ops->trampoline, NULL, 0, |
|---|
| 6921 | + (void *)ops->trampoline, |
|---|
| 6922 | + ops->trampoline_size); |
|---|
| 6923 | + } |
|---|
| 6242 | 6924 | } |
|---|
| 6243 | 6925 | |
|---|
| 6244 | 6926 | void ftrace_init_trace_array(struct trace_array *tr) |
|---|
| .. | .. |
|---|
| 6249 | 6931 | } |
|---|
| 6250 | 6932 | #else |
|---|
| 6251 | 6933 | |
|---|
| 6252 | | -static struct ftrace_ops global_ops = { |
|---|
| 6934 | +struct ftrace_ops global_ops = { |
|---|
| 6253 | 6935 | .func = ftrace_stub, |
|---|
| 6254 | 6936 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | |
|---|
| 6255 | 6937 | FTRACE_OPS_FL_INITIALIZED | |
|---|
| .. | .. |
|---|
| 6266 | 6948 | static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; } |
|---|
| 6267 | 6949 | static inline void ftrace_startup_enable(int command) { } |
|---|
| 6268 | 6950 | static inline void ftrace_startup_all(int command) { } |
|---|
| 6269 | | -/* Keep as macros so we do not need to define the commands */ |
|---|
| 6270 | | -# define ftrace_startup(ops, command) \ |
|---|
| 6271 | | - ({ \ |
|---|
| 6272 | | - int ___ret = __register_ftrace_function(ops); \ |
|---|
| 6273 | | - if (!___ret) \ |
|---|
| 6274 | | - (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ |
|---|
| 6275 | | - ___ret; \ |
|---|
| 6276 | | - }) |
|---|
| 6277 | | -# define ftrace_shutdown(ops, command) \ |
|---|
| 6278 | | - ({ \ |
|---|
| 6279 | | - int ___ret = __unregister_ftrace_function(ops); \ |
|---|
| 6280 | | - if (!___ret) \ |
|---|
| 6281 | | - (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \ |
|---|
| 6282 | | - ___ret; \ |
|---|
| 6283 | | - }) |
|---|
| 6284 | 6951 | |
|---|
| 6285 | 6952 | # define ftrace_startup_sysctl() do { } while (0) |
|---|
| 6286 | 6953 | # define ftrace_shutdown_sysctl() do { } while (0) |
|---|
| 6287 | | - |
|---|
| 6288 | | -static inline int |
|---|
| 6289 | | -ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) |
|---|
| 6290 | | -{ |
|---|
| 6291 | | - return 1; |
|---|
| 6292 | | -} |
|---|
| 6293 | 6954 | |
|---|
| 6294 | 6955 | static void ftrace_update_trampoline(struct ftrace_ops *ops) |
|---|
| 6295 | 6956 | { |
|---|
| .. | .. |
|---|
| 6334 | 6995 | |
|---|
| 6335 | 6996 | /* |
|---|
| 6336 | 6997 | * Some of the ops may be dynamically allocated, |
|---|
| 6337 | | - * they must be freed after a synchronize_sched(). |
|---|
| 6998 | + * they must be freed after a synchronize_rcu(). |
|---|
| 6338 | 6999 | */ |
|---|
| 6339 | 7000 | preempt_disable_notrace(); |
|---|
| 6340 | 7001 | |
|---|
| 6341 | 7002 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
|---|
| 7003 | + /* Stub functions don't need to be called nor tested */ |
|---|
| 7004 | + if (op->flags & FTRACE_OPS_FL_STUB) |
|---|
| 7005 | + continue; |
|---|
| 6342 | 7006 | /* |
|---|
| 6343 | 7007 | * Check the following for each ops before calling their func: |
|---|
| 6344 | 7008 | * if RCU flag is set, then rcu_is_watching() must be true |
|---|
| .. | .. |
|---|
| 6383 | 7047 | } |
|---|
| 6384 | 7048 | NOKPROBE_SYMBOL(ftrace_ops_list_func); |
|---|
| 6385 | 7049 | #else |
|---|
| 6386 | | -static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip, |
|---|
| 6387 | | - struct ftrace_ops *op, struct pt_regs *regs) |
|---|
| 7050 | +static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) |
|---|
| 6388 | 7051 | { |
|---|
| 6389 | 7052 | __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); |
|---|
| 6390 | 7053 | } |
|---|
| .. | .. |
|---|
| 6445 | 7108 | { |
|---|
| 6446 | 7109 | struct trace_array *tr = data; |
|---|
| 6447 | 7110 | struct trace_pid_list *pid_list; |
|---|
| 7111 | + struct trace_pid_list *no_pid_list; |
|---|
| 6448 | 7112 | |
|---|
| 6449 | 7113 | pid_list = rcu_dereference_sched(tr->function_pids); |
|---|
| 7114 | + no_pid_list = rcu_dereference_sched(tr->function_no_pids); |
|---|
| 6450 | 7115 | |
|---|
| 6451 | | - this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid, |
|---|
| 6452 | | - trace_ignore_this_task(pid_list, next)); |
|---|
| 7116 | + if (trace_ignore_this_task(pid_list, no_pid_list, next)) |
|---|
| 7117 | + this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, |
|---|
| 7118 | + FTRACE_PID_IGNORE); |
|---|
| 7119 | + else |
|---|
| 7120 | + this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, |
|---|
| 7121 | + next->pid); |
|---|
| 6453 | 7122 | } |
|---|
| 6454 | 7123 | |
|---|
| 6455 | 7124 | static void |
|---|
| .. | .. |
|---|
| 6462 | 7131 | |
|---|
| 6463 | 7132 | pid_list = rcu_dereference_sched(tr->function_pids); |
|---|
| 6464 | 7133 | trace_filter_add_remove_task(pid_list, self, task); |
|---|
| 7134 | + |
|---|
| 7135 | + pid_list = rcu_dereference_sched(tr->function_no_pids); |
|---|
| 7136 | + trace_filter_add_remove_task(pid_list, self, task); |
|---|
| 6465 | 7137 | } |
|---|
| 6466 | 7138 | |
|---|
| 6467 | 7139 | static void |
|---|
| .. | .. |
|---|
| 6471 | 7143 | struct trace_array *tr = data; |
|---|
| 6472 | 7144 | |
|---|
| 6473 | 7145 | pid_list = rcu_dereference_sched(tr->function_pids); |
|---|
| 7146 | + trace_filter_add_remove_task(pid_list, NULL, task); |
|---|
| 7147 | + |
|---|
| 7148 | + pid_list = rcu_dereference_sched(tr->function_no_pids); |
|---|
| 6474 | 7149 | trace_filter_add_remove_task(pid_list, NULL, task); |
|---|
| 6475 | 7150 | } |
|---|
| 6476 | 7151 | |
|---|
| .. | .. |
|---|
| 6489 | 7164 | } |
|---|
| 6490 | 7165 | } |
|---|
| 6491 | 7166 | |
|---|
| 6492 | | -static void clear_ftrace_pids(struct trace_array *tr) |
|---|
| 7167 | +static void clear_ftrace_pids(struct trace_array *tr, int type) |
|---|
| 6493 | 7168 | { |
|---|
| 6494 | 7169 | struct trace_pid_list *pid_list; |
|---|
| 7170 | + struct trace_pid_list *no_pid_list; |
|---|
| 6495 | 7171 | int cpu; |
|---|
| 6496 | 7172 | |
|---|
| 6497 | 7173 | pid_list = rcu_dereference_protected(tr->function_pids, |
|---|
| 6498 | 7174 | lockdep_is_held(&ftrace_lock)); |
|---|
| 6499 | | - if (!pid_list) |
|---|
| 7175 | + no_pid_list = rcu_dereference_protected(tr->function_no_pids, |
|---|
| 7176 | + lockdep_is_held(&ftrace_lock)); |
|---|
| 7177 | + |
|---|
| 7178 | + /* Make sure there's something to do */ |
|---|
| 7179 | + if (!pid_type_enabled(type, pid_list, no_pid_list)) |
|---|
| 6500 | 7180 | return; |
|---|
| 6501 | 7181 | |
|---|
| 6502 | | - unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); |
|---|
| 7182 | + /* See if the pids still need to be checked after this */ |
|---|
| 7183 | + if (!still_need_pid_events(type, pid_list, no_pid_list)) { |
|---|
| 7184 | + unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); |
|---|
| 7185 | + for_each_possible_cpu(cpu) |
|---|
| 7186 | + per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE; |
|---|
| 7187 | + } |
|---|
| 6503 | 7188 | |
|---|
| 6504 | | - for_each_possible_cpu(cpu) |
|---|
| 6505 | | - per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false; |
|---|
| 7189 | + if (type & TRACE_PIDS) |
|---|
| 7190 | + rcu_assign_pointer(tr->function_pids, NULL); |
|---|
| 6506 | 7191 | |
|---|
| 6507 | | - rcu_assign_pointer(tr->function_pids, NULL); |
|---|
| 7192 | + if (type & TRACE_NO_PIDS) |
|---|
| 7193 | + rcu_assign_pointer(tr->function_no_pids, NULL); |
|---|
| 6508 | 7194 | |
|---|
| 6509 | 7195 | /* Wait till all users are no longer using pid filtering */ |
|---|
| 6510 | | - synchronize_sched(); |
|---|
| 7196 | + synchronize_rcu(); |
|---|
| 6511 | 7197 | |
|---|
| 6512 | | - trace_free_pid_list(pid_list); |
|---|
| 7198 | + if ((type & TRACE_PIDS) && pid_list) |
|---|
| 7199 | + trace_free_pid_list(pid_list); |
|---|
| 7200 | + |
|---|
| 7201 | + if ((type & TRACE_NO_PIDS) && no_pid_list) |
|---|
| 7202 | + trace_free_pid_list(no_pid_list); |
|---|
| 6513 | 7203 | } |
|---|
| 6514 | 7204 | |
|---|
| 6515 | 7205 | void ftrace_clear_pids(struct trace_array *tr) |
|---|
| 6516 | 7206 | { |
|---|
| 6517 | 7207 | mutex_lock(&ftrace_lock); |
|---|
| 6518 | 7208 | |
|---|
| 6519 | | - clear_ftrace_pids(tr); |
|---|
| 7209 | + clear_ftrace_pids(tr, TRACE_PIDS | TRACE_NO_PIDS); |
|---|
| 6520 | 7210 | |
|---|
| 6521 | 7211 | mutex_unlock(&ftrace_lock); |
|---|
| 6522 | 7212 | } |
|---|
| 6523 | 7213 | |
|---|
| 6524 | | -static void ftrace_pid_reset(struct trace_array *tr) |
|---|
| 7214 | +static void ftrace_pid_reset(struct trace_array *tr, int type) |
|---|
| 6525 | 7215 | { |
|---|
| 6526 | 7216 | mutex_lock(&ftrace_lock); |
|---|
| 6527 | | - clear_ftrace_pids(tr); |
|---|
| 7217 | + clear_ftrace_pids(tr, type); |
|---|
| 6528 | 7218 | |
|---|
| 6529 | 7219 | ftrace_update_pid_func(); |
|---|
| 6530 | 7220 | ftrace_startup_all(0); |
|---|
| .. | .. |
|---|
| 6588 | 7278 | .show = fpid_show, |
|---|
| 6589 | 7279 | }; |
|---|
| 6590 | 7280 | |
|---|
| 6591 | | -static int |
|---|
| 6592 | | -ftrace_pid_open(struct inode *inode, struct file *file) |
|---|
| 7281 | +static void *fnpid_start(struct seq_file *m, loff_t *pos) |
|---|
| 7282 | + __acquires(RCU) |
|---|
| 6593 | 7283 | { |
|---|
| 7284 | + struct trace_pid_list *pid_list; |
|---|
| 7285 | + struct trace_array *tr = m->private; |
|---|
| 7286 | + |
|---|
| 7287 | + mutex_lock(&ftrace_lock); |
|---|
| 7288 | + rcu_read_lock_sched(); |
|---|
| 7289 | + |
|---|
| 7290 | + pid_list = rcu_dereference_sched(tr->function_no_pids); |
|---|
| 7291 | + |
|---|
| 7292 | + if (!pid_list) |
|---|
| 7293 | + return !(*pos) ? FTRACE_NO_PIDS : NULL; |
|---|
| 7294 | + |
|---|
| 7295 | + return trace_pid_start(pid_list, pos); |
|---|
| 7296 | +} |
|---|
| 7297 | + |
|---|
| 7298 | +static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos) |
|---|
| 7299 | +{ |
|---|
| 7300 | + struct trace_array *tr = m->private; |
|---|
| 7301 | + struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids); |
|---|
| 7302 | + |
|---|
| 7303 | + if (v == FTRACE_NO_PIDS) { |
|---|
| 7304 | + (*pos)++; |
|---|
| 7305 | + return NULL; |
|---|
| 7306 | + } |
|---|
| 7307 | + return trace_pid_next(pid_list, v, pos); |
|---|
| 7308 | +} |
|---|
| 7309 | + |
|---|
| 7310 | +static const struct seq_operations ftrace_no_pid_sops = { |
|---|
| 7311 | + .start = fnpid_start, |
|---|
| 7312 | + .next = fnpid_next, |
|---|
| 7313 | + .stop = fpid_stop, |
|---|
| 7314 | + .show = fpid_show, |
|---|
| 7315 | +}; |
|---|
| 7316 | + |
|---|
| 7317 | +static int pid_open(struct inode *inode, struct file *file, int type) |
|---|
| 7318 | +{ |
|---|
| 7319 | + const struct seq_operations *seq_ops; |
|---|
| 6594 | 7320 | struct trace_array *tr = inode->i_private; |
|---|
| 6595 | 7321 | struct seq_file *m; |
|---|
| 6596 | 7322 | int ret = 0; |
|---|
| 6597 | 7323 | |
|---|
| 6598 | | - if (trace_array_get(tr) < 0) |
|---|
| 6599 | | - return -ENODEV; |
|---|
| 7324 | + ret = tracing_check_open_get_tr(tr); |
|---|
| 7325 | + if (ret) |
|---|
| 7326 | + return ret; |
|---|
| 6600 | 7327 | |
|---|
| 6601 | 7328 | if ((file->f_mode & FMODE_WRITE) && |
|---|
| 6602 | 7329 | (file->f_flags & O_TRUNC)) |
|---|
| 6603 | | - ftrace_pid_reset(tr); |
|---|
| 7330 | + ftrace_pid_reset(tr, type); |
|---|
| 6604 | 7331 | |
|---|
| 6605 | | - ret = seq_open(file, &ftrace_pid_sops); |
|---|
| 7332 | + switch (type) { |
|---|
| 7333 | + case TRACE_PIDS: |
|---|
| 7334 | + seq_ops = &ftrace_pid_sops; |
|---|
| 7335 | + break; |
|---|
| 7336 | + case TRACE_NO_PIDS: |
|---|
| 7337 | + seq_ops = &ftrace_no_pid_sops; |
|---|
| 7338 | + break; |
|---|
| 7339 | + default: |
|---|
| 7340 | + trace_array_put(tr); |
|---|
| 7341 | + WARN_ON_ONCE(1); |
|---|
| 7342 | + return -EINVAL; |
|---|
| 7343 | + } |
|---|
| 7344 | + |
|---|
| 7345 | + ret = seq_open(file, seq_ops); |
|---|
| 6606 | 7346 | if (ret < 0) { |
|---|
| 6607 | 7347 | trace_array_put(tr); |
|---|
| 6608 | 7348 | } else { |
|---|
| .. | .. |
|---|
| 6614 | 7354 | return ret; |
|---|
| 6615 | 7355 | } |
|---|
| 6616 | 7356 | |
|---|
| 7357 | +static int |
|---|
| 7358 | +ftrace_pid_open(struct inode *inode, struct file *file) |
|---|
| 7359 | +{ |
|---|
| 7360 | + return pid_open(inode, file, TRACE_PIDS); |
|---|
| 7361 | +} |
|---|
| 7362 | + |
|---|
| 7363 | +static int |
|---|
| 7364 | +ftrace_no_pid_open(struct inode *inode, struct file *file) |
|---|
| 7365 | +{ |
|---|
| 7366 | + return pid_open(inode, file, TRACE_NO_PIDS); |
|---|
| 7367 | +} |
|---|
| 7368 | + |
|---|
| 6617 | 7369 | static void ignore_task_cpu(void *data) |
|---|
| 6618 | 7370 | { |
|---|
| 6619 | 7371 | struct trace_array *tr = data; |
|---|
| 6620 | 7372 | struct trace_pid_list *pid_list; |
|---|
| 7373 | + struct trace_pid_list *no_pid_list; |
|---|
| 6621 | 7374 | |
|---|
| 6622 | 7375 | /* |
|---|
| 6623 | 7376 | * This function is called by on_each_cpu() while the |
|---|
| .. | .. |
|---|
| 6625 | 7378 | */ |
|---|
| 6626 | 7379 | pid_list = rcu_dereference_protected(tr->function_pids, |
|---|
| 6627 | 7380 | mutex_is_locked(&ftrace_lock)); |
|---|
| 7381 | + no_pid_list = rcu_dereference_protected(tr->function_no_pids, |
|---|
| 7382 | + mutex_is_locked(&ftrace_lock)); |
|---|
| 6628 | 7383 | |
|---|
| 6629 | | - this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid, |
|---|
| 6630 | | - trace_ignore_this_task(pid_list, current)); |
|---|
| 7384 | + if (trace_ignore_this_task(pid_list, no_pid_list, current)) |
|---|
| 7385 | + this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, |
|---|
| 7386 | + FTRACE_PID_IGNORE); |
|---|
| 7387 | + else |
|---|
| 7388 | + this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, |
|---|
| 7389 | + current->pid); |
|---|
| 6631 | 7390 | } |
|---|
| 6632 | 7391 | |
|---|
| 6633 | 7392 | static ssize_t |
|---|
| 6634 | | -ftrace_pid_write(struct file *filp, const char __user *ubuf, |
|---|
| 6635 | | - size_t cnt, loff_t *ppos) |
|---|
| 7393 | +pid_write(struct file *filp, const char __user *ubuf, |
|---|
| 7394 | + size_t cnt, loff_t *ppos, int type) |
|---|
| 6636 | 7395 | { |
|---|
| 6637 | 7396 | struct seq_file *m = filp->private_data; |
|---|
| 6638 | 7397 | struct trace_array *tr = m->private; |
|---|
| 6639 | | - struct trace_pid_list *filtered_pids = NULL; |
|---|
| 7398 | + struct trace_pid_list *filtered_pids; |
|---|
| 7399 | + struct trace_pid_list *other_pids; |
|---|
| 6640 | 7400 | struct trace_pid_list *pid_list; |
|---|
| 6641 | 7401 | ssize_t ret; |
|---|
| 6642 | 7402 | |
|---|
| .. | .. |
|---|
| 6645 | 7405 | |
|---|
| 6646 | 7406 | mutex_lock(&ftrace_lock); |
|---|
| 6647 | 7407 | |
|---|
| 6648 | | - filtered_pids = rcu_dereference_protected(tr->function_pids, |
|---|
| 7408 | + switch (type) { |
|---|
| 7409 | + case TRACE_PIDS: |
|---|
| 7410 | + filtered_pids = rcu_dereference_protected(tr->function_pids, |
|---|
| 6649 | 7411 | lockdep_is_held(&ftrace_lock)); |
|---|
| 7412 | + other_pids = rcu_dereference_protected(tr->function_no_pids, |
|---|
| 7413 | + lockdep_is_held(&ftrace_lock)); |
|---|
| 7414 | + break; |
|---|
| 7415 | + case TRACE_NO_PIDS: |
|---|
| 7416 | + filtered_pids = rcu_dereference_protected(tr->function_no_pids, |
|---|
| 7417 | + lockdep_is_held(&ftrace_lock)); |
|---|
| 7418 | + other_pids = rcu_dereference_protected(tr->function_pids, |
|---|
| 7419 | + lockdep_is_held(&ftrace_lock)); |
|---|
| 7420 | + break; |
|---|
| 7421 | + default: |
|---|
| 7422 | + ret = -EINVAL; |
|---|
| 7423 | + WARN_ON_ONCE(1); |
|---|
| 7424 | + goto out; |
|---|
| 7425 | + } |
|---|
| 6650 | 7426 | |
|---|
| 6651 | 7427 | ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt); |
|---|
| 6652 | 7428 | if (ret < 0) |
|---|
| 6653 | 7429 | goto out; |
|---|
| 6654 | 7430 | |
|---|
| 6655 | | - rcu_assign_pointer(tr->function_pids, pid_list); |
|---|
| 7431 | + switch (type) { |
|---|
| 7432 | + case TRACE_PIDS: |
|---|
| 7433 | + rcu_assign_pointer(tr->function_pids, pid_list); |
|---|
| 7434 | + break; |
|---|
| 7435 | + case TRACE_NO_PIDS: |
|---|
| 7436 | + rcu_assign_pointer(tr->function_no_pids, pid_list); |
|---|
| 7437 | + break; |
|---|
| 7438 | + } |
|---|
| 7439 | + |
|---|
| 6656 | 7440 | |
|---|
| 6657 | 7441 | if (filtered_pids) { |
|---|
| 6658 | | - synchronize_sched(); |
|---|
| 7442 | + synchronize_rcu(); |
|---|
| 6659 | 7443 | trace_free_pid_list(filtered_pids); |
|---|
| 6660 | | - } else if (pid_list) { |
|---|
| 7444 | + } else if (pid_list && !other_pids) { |
|---|
| 6661 | 7445 | /* Register a probe to set whether to ignore the tracing of a task */ |
|---|
| 6662 | 7446 | register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); |
|---|
| 6663 | 7447 | } |
|---|
| .. | .. |
|---|
| 6680 | 7464 | return ret; |
|---|
| 6681 | 7465 | } |
|---|
| 6682 | 7466 | |
|---|
| 7467 | +static ssize_t |
|---|
| 7468 | +ftrace_pid_write(struct file *filp, const char __user *ubuf, |
|---|
| 7469 | + size_t cnt, loff_t *ppos) |
|---|
| 7470 | +{ |
|---|
| 7471 | + return pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS); |
|---|
| 7472 | +} |
|---|
| 7473 | + |
|---|
| 7474 | +static ssize_t |
|---|
| 7475 | +ftrace_no_pid_write(struct file *filp, const char __user *ubuf, |
|---|
| 7476 | + size_t cnt, loff_t *ppos) |
|---|
| 7477 | +{ |
|---|
| 7478 | + return pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS); |
|---|
| 7479 | +} |
|---|
| 7480 | + |
|---|
| 6683 | 7481 | static int |
|---|
| 6684 | 7482 | ftrace_pid_release(struct inode *inode, struct file *file) |
|---|
| 6685 | 7483 | { |
|---|
| .. | .. |
|---|
| 6698 | 7496 | .release = ftrace_pid_release, |
|---|
| 6699 | 7497 | }; |
|---|
| 6700 | 7498 | |
|---|
| 7499 | +static const struct file_operations ftrace_no_pid_fops = { |
|---|
| 7500 | + .open = ftrace_no_pid_open, |
|---|
| 7501 | + .write = ftrace_no_pid_write, |
|---|
| 7502 | + .read = seq_read, |
|---|
| 7503 | + .llseek = tracing_lseek, |
|---|
| 7504 | + .release = ftrace_pid_release, |
|---|
| 7505 | +}; |
|---|
| 7506 | + |
|---|
| 6701 | 7507 | void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer) |
|---|
| 6702 | 7508 | { |
|---|
| 6703 | 7509 | trace_create_file("set_ftrace_pid", 0644, d_tracer, |
|---|
| 6704 | 7510 | tr, &ftrace_pid_fops); |
|---|
| 7511 | + trace_create_file("set_ftrace_notrace_pid", 0644, d_tracer, |
|---|
| 7512 | + tr, &ftrace_no_pid_fops); |
|---|
| 6705 | 7513 | } |
|---|
| 6706 | 7514 | |
|---|
| 6707 | 7515 | void __init ftrace_init_tracefs_toplevel(struct trace_array *tr, |
|---|
| .. | .. |
|---|
| 6781 | 7589 | } |
|---|
| 6782 | 7590 | EXPORT_SYMBOL_GPL(unregister_ftrace_function); |
|---|
| 6783 | 7591 | |
|---|
| 7592 | +static bool is_permanent_ops_registered(void) |
|---|
| 7593 | +{ |
|---|
| 7594 | + struct ftrace_ops *op; |
|---|
| 7595 | + |
|---|
| 7596 | + do_for_each_ftrace_op(op, ftrace_ops_list) { |
|---|
| 7597 | + if (op->flags & FTRACE_OPS_FL_PERMANENT) |
|---|
| 7598 | + return true; |
|---|
| 7599 | + } while_for_each_ftrace_op(op); |
|---|
| 7600 | + |
|---|
| 7601 | + return false; |
|---|
| 7602 | +} |
|---|
| 7603 | + |
|---|
| 6784 | 7604 | int |
|---|
| 6785 | 7605 | ftrace_enable_sysctl(struct ctl_table *table, int write, |
|---|
| 6786 | | - void __user *buffer, size_t *lenp, |
|---|
| 6787 | | - loff_t *ppos) |
|---|
| 7606 | + void *buffer, size_t *lenp, loff_t *ppos) |
|---|
| 6788 | 7607 | { |
|---|
| 6789 | 7608 | int ret = -ENODEV; |
|---|
| 6790 | 7609 | |
|---|
| .. | .. |
|---|
| 6798 | 7617 | if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) |
|---|
| 6799 | 7618 | goto out; |
|---|
| 6800 | 7619 | |
|---|
| 6801 | | - last_ftrace_enabled = !!ftrace_enabled; |
|---|
| 6802 | | - |
|---|
| 6803 | 7620 | if (ftrace_enabled) { |
|---|
| 6804 | 7621 | |
|---|
| 6805 | 7622 | /* we are starting ftrace again */ |
|---|
| .. | .. |
|---|
| 6810 | 7627 | ftrace_startup_sysctl(); |
|---|
| 6811 | 7628 | |
|---|
| 6812 | 7629 | } else { |
|---|
| 7630 | + if (is_permanent_ops_registered()) { |
|---|
| 7631 | + ftrace_enabled = true; |
|---|
| 7632 | + ret = -EBUSY; |
|---|
| 7633 | + goto out; |
|---|
| 7634 | + } |
|---|
| 7635 | + |
|---|
| 6813 | 7636 | /* stopping ftrace calls (just send to ftrace_stub) */ |
|---|
| 6814 | 7637 | ftrace_trace_function = ftrace_stub; |
|---|
| 6815 | 7638 | |
|---|
| 6816 | 7639 | ftrace_shutdown_sysctl(); |
|---|
| 6817 | 7640 | } |
|---|
| 6818 | 7641 | |
|---|
| 7642 | + last_ftrace_enabled = !!ftrace_enabled; |
|---|
| 6819 | 7643 | out: |
|---|
| 6820 | 7644 | mutex_unlock(&ftrace_lock); |
|---|
| 6821 | 7645 | return ret; |
|---|
| 6822 | 7646 | } |
|---|
| 6823 | | - |
|---|
| 6824 | | -#ifdef CONFIG_FUNCTION_GRAPH_TRACER |
|---|
| 6825 | | - |
|---|
| 6826 | | -static struct ftrace_ops graph_ops = { |
|---|
| 6827 | | - .func = ftrace_stub, |
|---|
| 6828 | | - .flags = FTRACE_OPS_FL_RECURSION_SAFE | |
|---|
| 6829 | | - FTRACE_OPS_FL_INITIALIZED | |
|---|
| 6830 | | - FTRACE_OPS_FL_PID | |
|---|
| 6831 | | - FTRACE_OPS_FL_STUB, |
|---|
| 6832 | | -#ifdef FTRACE_GRAPH_TRAMP_ADDR |
|---|
| 6833 | | - .trampoline = FTRACE_GRAPH_TRAMP_ADDR, |
|---|
| 6834 | | - /* trampoline_size is only needed for dynamically allocated tramps */ |
|---|
| 6835 | | -#endif |
|---|
| 6836 | | - ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) |
|---|
| 6837 | | -}; |
|---|
| 6838 | | - |
|---|
| 6839 | | -void ftrace_graph_sleep_time_control(bool enable) |
|---|
| 6840 | | -{ |
|---|
| 6841 | | - fgraph_sleep_time = enable; |
|---|
| 6842 | | -} |
|---|
| 6843 | | - |
|---|
| 6844 | | -void ftrace_graph_graph_time_control(bool enable) |
|---|
| 6845 | | -{ |
|---|
| 6846 | | - fgraph_graph_time = enable; |
|---|
| 6847 | | -} |
|---|
| 6848 | | - |
|---|
| 6849 | | -void ftrace_graph_return_stub(struct ftrace_graph_ret *trace) |
|---|
| 6850 | | -{ |
|---|
| 6851 | | -} |
|---|
| 6852 | | - |
|---|
| 6853 | | -int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) |
|---|
| 6854 | | -{ |
|---|
| 6855 | | - return 0; |
|---|
| 6856 | | -} |
|---|
| 6857 | | - |
|---|
| 6858 | | -/* The callbacks that hook a function */ |
|---|
| 6859 | | -trace_func_graph_ret_t ftrace_graph_return = ftrace_graph_return_stub; |
|---|
| 6860 | | -trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; |
|---|
| 6861 | | -static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub; |
|---|
| 6862 | | - |
|---|
| 6863 | | -/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ |
|---|
| 6864 | | -static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) |
|---|
| 6865 | | -{ |
|---|
| 6866 | | - int i; |
|---|
| 6867 | | - int ret = 0; |
|---|
| 6868 | | - int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE; |
|---|
| 6869 | | - struct task_struct *g, *t; |
|---|
| 6870 | | - |
|---|
| 6871 | | - for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) { |
|---|
| 6872 | | - ret_stack_list[i] = |
|---|
| 6873 | | - kmalloc_array(FTRACE_RETFUNC_DEPTH, |
|---|
| 6874 | | - sizeof(struct ftrace_ret_stack), |
|---|
| 6875 | | - GFP_KERNEL); |
|---|
| 6876 | | - if (!ret_stack_list[i]) { |
|---|
| 6877 | | - start = 0; |
|---|
| 6878 | | - end = i; |
|---|
| 6879 | | - ret = -ENOMEM; |
|---|
| 6880 | | - goto free; |
|---|
| 6881 | | - } |
|---|
| 6882 | | - } |
|---|
| 6883 | | - |
|---|
| 6884 | | - read_lock(&tasklist_lock); |
|---|
| 6885 | | - do_each_thread(g, t) { |
|---|
| 6886 | | - if (start == end) { |
|---|
| 6887 | | - ret = -EAGAIN; |
|---|
| 6888 | | - goto unlock; |
|---|
| 6889 | | - } |
|---|
| 6890 | | - |
|---|
| 6891 | | - if (t->ret_stack == NULL) { |
|---|
| 6892 | | - atomic_set(&t->trace_overrun, 0); |
|---|
| 6893 | | - t->curr_ret_stack = -1; |
|---|
| 6894 | | - t->curr_ret_depth = -1; |
|---|
| 6895 | | - /* Make sure the tasks see the -1 first: */ |
|---|
| 6896 | | - smp_wmb(); |
|---|
| 6897 | | - t->ret_stack = ret_stack_list[start++]; |
|---|
| 6898 | | - } |
|---|
| 6899 | | - } while_each_thread(g, t); |
|---|
| 6900 | | - |
|---|
| 6901 | | -unlock: |
|---|
| 6902 | | - read_unlock(&tasklist_lock); |
|---|
| 6903 | | -free: |
|---|
| 6904 | | - for (i = start; i < end; i++) |
|---|
| 6905 | | - kfree(ret_stack_list[i]); |
|---|
| 6906 | | - return ret; |
|---|
| 6907 | | -} |
|---|
| 6908 | | - |
|---|
| 6909 | | -static void |
|---|
| 6910 | | -ftrace_graph_probe_sched_switch(void *ignore, bool preempt, |
|---|
| 6911 | | - struct task_struct *prev, struct task_struct *next) |
|---|
| 6912 | | -{ |
|---|
| 6913 | | - unsigned long long timestamp; |
|---|
| 6914 | | - int index; |
|---|
| 6915 | | - |
|---|
| 6916 | | - /* |
|---|
| 6917 | | - * Does the user want to count the time a function was asleep. |
|---|
| 6918 | | - * If so, do not update the time stamps. |
|---|
| 6919 | | - */ |
|---|
| 6920 | | - if (fgraph_sleep_time) |
|---|
| 6921 | | - return; |
|---|
| 6922 | | - |
|---|
| 6923 | | - timestamp = trace_clock_local(); |
|---|
| 6924 | | - |
|---|
| 6925 | | - prev->ftrace_timestamp = timestamp; |
|---|
| 6926 | | - |
|---|
| 6927 | | - /* only process tasks that we timestamped */ |
|---|
| 6928 | | - if (!next->ftrace_timestamp) |
|---|
| 6929 | | - return; |
|---|
| 6930 | | - |
|---|
| 6931 | | - /* |
|---|
| 6932 | | - * Update all the counters in next to make up for the |
|---|
| 6933 | | - * time next was sleeping. |
|---|
| 6934 | | - */ |
|---|
| 6935 | | - timestamp -= next->ftrace_timestamp; |
|---|
| 6936 | | - |
|---|
| 6937 | | - for (index = next->curr_ret_stack; index >= 0; index--) |
|---|
| 6938 | | - next->ret_stack[index].calltime += timestamp; |
|---|
| 6939 | | -} |
|---|
| 6940 | | - |
|---|
| 6941 | | -/* Allocate a return stack for each task */ |
|---|
| 6942 | | -static int start_graph_tracing(void) |
|---|
| 6943 | | -{ |
|---|
| 6944 | | - struct ftrace_ret_stack **ret_stack_list; |
|---|
| 6945 | | - int ret, cpu; |
|---|
| 6946 | | - |
|---|
| 6947 | | - ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE, |
|---|
| 6948 | | - sizeof(struct ftrace_ret_stack *), |
|---|
| 6949 | | - GFP_KERNEL); |
|---|
| 6950 | | - |
|---|
| 6951 | | - if (!ret_stack_list) |
|---|
| 6952 | | - return -ENOMEM; |
|---|
| 6953 | | - |
|---|
| 6954 | | - /* The cpu_boot init_task->ret_stack will never be freed */ |
|---|
| 6955 | | - for_each_online_cpu(cpu) { |
|---|
| 6956 | | - if (!idle_task(cpu)->ret_stack) |
|---|
| 6957 | | - ftrace_graph_init_idle_task(idle_task(cpu), cpu); |
|---|
| 6958 | | - } |
|---|
| 6959 | | - |
|---|
| 6960 | | - do { |
|---|
| 6961 | | - ret = alloc_retstack_tasklist(ret_stack_list); |
|---|
| 6962 | | - } while (ret == -EAGAIN); |
|---|
| 6963 | | - |
|---|
| 6964 | | - if (!ret) { |
|---|
| 6965 | | - ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); |
|---|
| 6966 | | - if (ret) |
|---|
| 6967 | | - pr_info("ftrace_graph: Couldn't activate tracepoint" |
|---|
| 6968 | | - " probe to kernel_sched_switch\n"); |
|---|
| 6969 | | - } |
|---|
| 6970 | | - |
|---|
| 6971 | | - kfree(ret_stack_list); |
|---|
| 6972 | | - return ret; |
|---|
| 6973 | | -} |
|---|
| 6974 | | - |
|---|
| 6975 | | -/* |
|---|
| 6976 | | - * Hibernation protection. |
|---|
| 6977 | | - * The state of the current task is too much unstable during |
|---|
| 6978 | | - * suspend/restore to disk. We want to protect against that. |
|---|
| 6979 | | - */ |
|---|
| 6980 | | -static int |
|---|
| 6981 | | -ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state, |
|---|
| 6982 | | - void *unused) |
|---|
| 6983 | | -{ |
|---|
| 6984 | | - switch (state) { |
|---|
| 6985 | | - case PM_HIBERNATION_PREPARE: |
|---|
| 6986 | | - pause_graph_tracing(); |
|---|
| 6987 | | - break; |
|---|
| 6988 | | - |
|---|
| 6989 | | - case PM_POST_HIBERNATION: |
|---|
| 6990 | | - unpause_graph_tracing(); |
|---|
| 6991 | | - break; |
|---|
| 6992 | | - } |
|---|
| 6993 | | - return NOTIFY_DONE; |
|---|
| 6994 | | -} |
|---|
| 6995 | | - |
|---|
| 6996 | | -static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace) |
|---|
| 6997 | | -{ |
|---|
| 6998 | | - if (!ftrace_ops_test(&global_ops, trace->func, NULL)) |
|---|
| 6999 | | - return 0; |
|---|
| 7000 | | - return __ftrace_graph_entry(trace); |
|---|
| 7001 | | -} |
|---|
| 7002 | | - |
|---|
| 7003 | | -/* |
|---|
| 7004 | | - * The function graph tracer should only trace the functions defined |
|---|
| 7005 | | - * by set_ftrace_filter and set_ftrace_notrace. If another function |
|---|
| 7006 | | - * tracer ops is registered, the graph tracer requires testing the |
|---|
| 7007 | | - * function against the global ops, and not just trace any function |
|---|
| 7008 | | - * that any ftrace_ops registered. |
|---|
| 7009 | | - */ |
|---|
| 7010 | | -static void update_function_graph_func(void) |
|---|
| 7011 | | -{ |
|---|
| 7012 | | - struct ftrace_ops *op; |
|---|
| 7013 | | - bool do_test = false; |
|---|
| 7014 | | - |
|---|
| 7015 | | - /* |
|---|
| 7016 | | - * The graph and global ops share the same set of functions |
|---|
| 7017 | | - * to test. If any other ops is on the list, then |
|---|
| 7018 | | - * the graph tracing needs to test if its the function |
|---|
| 7019 | | - * it should call. |
|---|
| 7020 | | - */ |
|---|
| 7021 | | - do_for_each_ftrace_op(op, ftrace_ops_list) { |
|---|
| 7022 | | - if (op != &global_ops && op != &graph_ops && |
|---|
| 7023 | | - op != &ftrace_list_end) { |
|---|
| 7024 | | - do_test = true; |
|---|
| 7025 | | - /* in double loop, break out with goto */ |
|---|
| 7026 | | - goto out; |
|---|
| 7027 | | - } |
|---|
| 7028 | | - } while_for_each_ftrace_op(op); |
|---|
| 7029 | | - out: |
|---|
| 7030 | | - if (do_test) |
|---|
| 7031 | | - ftrace_graph_entry = ftrace_graph_entry_test; |
|---|
| 7032 | | - else |
|---|
| 7033 | | - ftrace_graph_entry = __ftrace_graph_entry; |
|---|
| 7034 | | -} |
|---|
| 7035 | | - |
|---|
| 7036 | | -static struct notifier_block ftrace_suspend_notifier = { |
|---|
| 7037 | | - .notifier_call = ftrace_suspend_notifier_call, |
|---|
| 7038 | | -}; |
|---|
| 7039 | | - |
|---|
| 7040 | | -int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
|---|
| 7041 | | - trace_func_graph_ent_t entryfunc) |
|---|
| 7042 | | -{ |
|---|
| 7043 | | - int ret = 0; |
|---|
| 7044 | | - |
|---|
| 7045 | | - mutex_lock(&ftrace_lock); |
|---|
| 7046 | | - |
|---|
| 7047 | | - /* we currently allow only one tracer registered at a time */ |
|---|
| 7048 | | - if (ftrace_graph_active) { |
|---|
| 7049 | | - ret = -EBUSY; |
|---|
| 7050 | | - goto out; |
|---|
| 7051 | | - } |
|---|
| 7052 | | - |
|---|
| 7053 | | - register_pm_notifier(&ftrace_suspend_notifier); |
|---|
| 7054 | | - |
|---|
| 7055 | | - ftrace_graph_active++; |
|---|
| 7056 | | - ret = start_graph_tracing(); |
|---|
| 7057 | | - if (ret) { |
|---|
| 7058 | | - ftrace_graph_active--; |
|---|
| 7059 | | - goto out; |
|---|
| 7060 | | - } |
|---|
| 7061 | | - |
|---|
| 7062 | | - ftrace_graph_return = retfunc; |
|---|
| 7063 | | - |
|---|
| 7064 | | - /* |
|---|
| 7065 | | - * Update the indirect function to the entryfunc, and the |
|---|
| 7066 | | - * function that gets called to the entry_test first. Then |
|---|
| 7067 | | - * call the update fgraph entry function to determine if |
|---|
| 7068 | | - * the entryfunc should be called directly or not. |
|---|
| 7069 | | - */ |
|---|
| 7070 | | - __ftrace_graph_entry = entryfunc; |
|---|
| 7071 | | - ftrace_graph_entry = ftrace_graph_entry_test; |
|---|
| 7072 | | - update_function_graph_func(); |
|---|
| 7073 | | - |
|---|
| 7074 | | - ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET); |
|---|
| 7075 | | -out: |
|---|
| 7076 | | - mutex_unlock(&ftrace_lock); |
|---|
| 7077 | | - return ret; |
|---|
| 7078 | | -} |
|---|
| 7079 | | - |
|---|
| 7080 | | -void unregister_ftrace_graph(void) |
|---|
| 7081 | | -{ |
|---|
| 7082 | | - mutex_lock(&ftrace_lock); |
|---|
| 7083 | | - |
|---|
| 7084 | | - if (unlikely(!ftrace_graph_active)) |
|---|
| 7085 | | - goto out; |
|---|
| 7086 | | - |
|---|
| 7087 | | - ftrace_graph_active--; |
|---|
| 7088 | | - ftrace_graph_return = ftrace_graph_return_stub; |
|---|
| 7089 | | - ftrace_graph_entry = ftrace_graph_entry_stub; |
|---|
| 7090 | | - __ftrace_graph_entry = ftrace_graph_entry_stub; |
|---|
| 7091 | | - ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET); |
|---|
| 7092 | | - unregister_pm_notifier(&ftrace_suspend_notifier); |
|---|
| 7093 | | - unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); |
|---|
| 7094 | | - |
|---|
| 7095 | | - out: |
|---|
| 7096 | | - mutex_unlock(&ftrace_lock); |
|---|
| 7097 | | -} |
|---|
| 7098 | | - |
|---|
| 7099 | | -static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); |
|---|
| 7100 | | - |
|---|
| 7101 | | -static void |
|---|
| 7102 | | -graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) |
|---|
| 7103 | | -{ |
|---|
| 7104 | | - atomic_set(&t->trace_overrun, 0); |
|---|
| 7105 | | - t->ftrace_timestamp = 0; |
|---|
| 7106 | | - /* make curr_ret_stack visible before we add the ret_stack */ |
|---|
| 7107 | | - smp_wmb(); |
|---|
| 7108 | | - t->ret_stack = ret_stack; |
|---|
| 7109 | | -} |
|---|
| 7110 | | - |
|---|
| 7111 | | -/* |
|---|
| 7112 | | - * Allocate a return stack for the idle task. May be the first |
|---|
| 7113 | | - * time through, or it may be done by CPU hotplug online. |
|---|
| 7114 | | - */ |
|---|
| 7115 | | -void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) |
|---|
| 7116 | | -{ |
|---|
| 7117 | | - t->curr_ret_stack = -1; |
|---|
| 7118 | | - t->curr_ret_depth = -1; |
|---|
| 7119 | | - /* |
|---|
| 7120 | | - * The idle task has no parent, it either has its own |
|---|
| 7121 | | - * stack or no stack at all. |
|---|
| 7122 | | - */ |
|---|
| 7123 | | - if (t->ret_stack) |
|---|
| 7124 | | - WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); |
|---|
| 7125 | | - |
|---|
| 7126 | | - if (ftrace_graph_active) { |
|---|
| 7127 | | - struct ftrace_ret_stack *ret_stack; |
|---|
| 7128 | | - |
|---|
| 7129 | | - ret_stack = per_cpu(idle_ret_stack, cpu); |
|---|
| 7130 | | - if (!ret_stack) { |
|---|
| 7131 | | - ret_stack = |
|---|
| 7132 | | - kmalloc_array(FTRACE_RETFUNC_DEPTH, |
|---|
| 7133 | | - sizeof(struct ftrace_ret_stack), |
|---|
| 7134 | | - GFP_KERNEL); |
|---|
| 7135 | | - if (!ret_stack) |
|---|
| 7136 | | - return; |
|---|
| 7137 | | - per_cpu(idle_ret_stack, cpu) = ret_stack; |
|---|
| 7138 | | - } |
|---|
| 7139 | | - graph_init_task(t, ret_stack); |
|---|
| 7140 | | - } |
|---|
| 7141 | | -} |
|---|
| 7142 | | - |
|---|
| 7143 | | -/* Allocate a return stack for newly created task */ |
|---|
| 7144 | | -void ftrace_graph_init_task(struct task_struct *t) |
|---|
| 7145 | | -{ |
|---|
| 7146 | | - /* Make sure we do not use the parent ret_stack */ |
|---|
| 7147 | | - t->ret_stack = NULL; |
|---|
| 7148 | | - t->curr_ret_stack = -1; |
|---|
| 7149 | | - t->curr_ret_depth = -1; |
|---|
| 7150 | | - |
|---|
| 7151 | | - if (ftrace_graph_active) { |
|---|
| 7152 | | - struct ftrace_ret_stack *ret_stack; |
|---|
| 7153 | | - |
|---|
| 7154 | | - ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH, |
|---|
| 7155 | | - sizeof(struct ftrace_ret_stack), |
|---|
| 7156 | | - GFP_KERNEL); |
|---|
| 7157 | | - if (!ret_stack) |
|---|
| 7158 | | - return; |
|---|
| 7159 | | - graph_init_task(t, ret_stack); |
|---|
| 7160 | | - } |
|---|
| 7161 | | -} |
|---|
| 7162 | | - |
|---|
| 7163 | | -void ftrace_graph_exit_task(struct task_struct *t) |
|---|
| 7164 | | -{ |
|---|
| 7165 | | - struct ftrace_ret_stack *ret_stack = t->ret_stack; |
|---|
| 7166 | | - |
|---|
| 7167 | | - t->ret_stack = NULL; |
|---|
| 7168 | | - /* NULL must become visible to IRQs before we free it: */ |
|---|
| 7169 | | - barrier(); |
|---|
| 7170 | | - |
|---|
| 7171 | | - kfree(ret_stack); |
|---|
| 7172 | | -} |
|---|
| 7173 | | -#endif |
|---|