.. | .. |
---|
18 | 18 | #include <linux/clocksource.h> |
---|
19 | 19 | #include <linux/sched/task.h> |
---|
20 | 20 | #include <linux/kallsyms.h> |
---|
| 21 | +#include <linux/security.h> |
---|
21 | 22 | #include <linux/seq_file.h> |
---|
22 | | -#include <linux/suspend.h> |
---|
23 | 23 | #include <linux/tracefs.h> |
---|
24 | 24 | #include <linux/hardirq.h> |
---|
25 | 25 | #include <linux/kthread.h> |
---|
.. | .. |
---|
41 | 41 | #include <asm/sections.h> |
---|
42 | 42 | #include <asm/setup.h> |
---|
43 | 43 | |
---|
| 44 | +#include "ftrace_internal.h" |
---|
44 | 45 | #include "trace_output.h" |
---|
45 | 46 | #include "trace_stat.h" |
---|
46 | 47 | |
---|
.. | .. |
---|
61 | 62 | }) |
---|
62 | 63 | |
---|
63 | 64 | /* hash bits for specific function selection */ |
---|
64 | | -#define FTRACE_HASH_BITS 7 |
---|
65 | | -#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) |
---|
66 | 65 | #define FTRACE_HASH_DEFAULT_BITS 10 |
---|
67 | 66 | #define FTRACE_HASH_MAX_BITS 12 |
---|
68 | 67 | |
---|
.. | .. |
---|
70 | 69 | #define INIT_OPS_HASH(opsname) \ |
---|
71 | 70 | .func_hash = &opsname.local_hash, \ |
---|
72 | 71 | .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), |
---|
73 | | -#define ASSIGN_OPS_HASH(opsname, val) \ |
---|
74 | | - .func_hash = val, \ |
---|
75 | | - .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), |
---|
76 | 72 | #else |
---|
77 | 73 | #define INIT_OPS_HASH(opsname) |
---|
78 | | -#define ASSIGN_OPS_HASH(opsname, val) |
---|
79 | 74 | #endif |
---|
80 | 75 | |
---|
81 | | -static struct ftrace_ops ftrace_list_end __read_mostly = { |
---|
| 76 | +enum { |
---|
| 77 | + FTRACE_MODIFY_ENABLE_FL = (1 << 0), |
---|
| 78 | + FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1), |
---|
| 79 | +}; |
---|
| 80 | + |
---|
| 81 | +struct ftrace_ops ftrace_list_end __read_mostly = { |
---|
82 | 82 | .func = ftrace_stub, |
---|
83 | 83 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, |
---|
84 | 84 | INIT_OPS_HASH(ftrace_list_end) |
---|
.. | .. |
---|
102 | 102 | |
---|
103 | 103 | tr = ops->private; |
---|
104 | 104 | |
---|
105 | | - return tr->function_pids != NULL; |
---|
| 105 | + return tr->function_pids != NULL || tr->function_no_pids != NULL; |
---|
106 | 106 | } |
---|
107 | 107 | |
---|
108 | 108 | static void ftrace_update_trampoline(struct ftrace_ops *ops); |
---|
.. | .. |
---|
113 | 113 | */ |
---|
114 | 114 | static int ftrace_disabled __read_mostly; |
---|
115 | 115 | |
---|
116 | | -static DEFINE_MUTEX(ftrace_lock); |
---|
| 116 | +DEFINE_MUTEX(ftrace_lock); |
---|
117 | 117 | |
---|
118 | | -static struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end; |
---|
| 118 | +struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end; |
---|
119 | 119 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
---|
120 | | -static struct ftrace_ops global_ops; |
---|
| 120 | +struct ftrace_ops global_ops; |
---|
121 | 121 | |
---|
122 | 122 | #if ARCH_SUPPORTS_FTRACE_OPS |
---|
123 | 123 | static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
---|
124 | 124 | struct ftrace_ops *op, struct pt_regs *regs); |
---|
125 | 125 | #else |
---|
126 | 126 | /* See comment below, where ftrace_ops_list_func is defined */ |
---|
127 | | -static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip, |
---|
128 | | - struct ftrace_ops *op, struct pt_regs *regs); |
---|
129 | | -#define ftrace_ops_list_func ftrace_ops_no_ops |
---|
| 127 | +static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); |
---|
| 128 | +#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) |
---|
130 | 129 | #endif |
---|
131 | | - |
---|
132 | | -/* |
---|
133 | | - * Traverse the ftrace_global_list, invoking all entries. The reason that we |
---|
134 | | - * can use rcu_dereference_raw_notrace() is that elements removed from this list |
---|
135 | | - * are simply leaked, so there is no need to interact with a grace-period |
---|
136 | | - * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle |
---|
137 | | - * concurrent insertions into the ftrace_global_list. |
---|
138 | | - * |
---|
139 | | - * Silly Alpha and silly pointer-speculation compiler optimizations! |
---|
140 | | - */ |
---|
141 | | -#define do_for_each_ftrace_op(op, list) \ |
---|
142 | | - op = rcu_dereference_raw_notrace(list); \ |
---|
143 | | - do |
---|
144 | | - |
---|
145 | | -/* |
---|
146 | | - * Optimized for just a single item in the list (as that is the normal case). |
---|
147 | | - */ |
---|
148 | | -#define while_for_each_ftrace_op(op) \ |
---|
149 | | - while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \ |
---|
150 | | - unlikely((op) != &ftrace_list_end)) |
---|
151 | 130 | |
---|
152 | 131 | static inline void ftrace_ops_init(struct ftrace_ops *ops) |
---|
153 | 132 | { |
---|
.. | .. |
---|
164 | 143 | struct ftrace_ops *op, struct pt_regs *regs) |
---|
165 | 144 | { |
---|
166 | 145 | struct trace_array *tr = op->private; |
---|
| 146 | + int pid; |
---|
167 | 147 | |
---|
168 | | - if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid)) |
---|
169 | | - return; |
---|
| 148 | + if (tr) { |
---|
| 149 | + pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid); |
---|
| 150 | + if (pid == FTRACE_PID_IGNORE) |
---|
| 151 | + return; |
---|
| 152 | + if (pid != FTRACE_PID_TRACE && |
---|
| 153 | + pid != current->pid) |
---|
| 154 | + return; |
---|
| 155 | + } |
---|
170 | 156 | |
---|
171 | 157 | op->saved_func(ip, parent_ip, op, regs); |
---|
172 | | -} |
---|
173 | | - |
---|
174 | | -static void ftrace_sync(struct work_struct *work) |
---|
175 | | -{ |
---|
176 | | - /* |
---|
177 | | - * This function is just a stub to implement a hard force |
---|
178 | | - * of synchronize_sched(). This requires synchronizing |
---|
179 | | - * tasks even in userspace and idle. |
---|
180 | | - * |
---|
181 | | - * Yes, function tracing is rude. |
---|
182 | | - */ |
---|
183 | 158 | } |
---|
184 | 159 | |
---|
185 | 160 | static void ftrace_sync_ipi(void *data) |
---|
.. | .. |
---|
187 | 162 | /* Probably not needed, but do it anyway */ |
---|
188 | 163 | smp_rmb(); |
---|
189 | 164 | } |
---|
190 | | - |
---|
191 | | -#ifdef CONFIG_FUNCTION_GRAPH_TRACER |
---|
192 | | -static void update_function_graph_func(void); |
---|
193 | | - |
---|
194 | | -/* Both enabled by default (can be cleared by function_graph tracer flags */ |
---|
195 | | -static bool fgraph_sleep_time = true; |
---|
196 | | -static bool fgraph_graph_time = true; |
---|
197 | | - |
---|
198 | | -#else |
---|
199 | | -static inline void update_function_graph_func(void) { } |
---|
200 | | -#endif |
---|
201 | | - |
---|
202 | 165 | |
---|
203 | 166 | static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) |
---|
204 | 167 | { |
---|
.. | .. |
---|
267 | 230 | /* |
---|
268 | 231 | * For static tracing, we need to be a bit more careful. |
---|
269 | 232 | * The function change takes affect immediately. Thus, |
---|
270 | | - * we need to coorditate the setting of the function_trace_ops |
---|
| 233 | + * we need to coordinate the setting of the function_trace_ops |
---|
271 | 234 | * with the setting of the ftrace_trace_function. |
---|
272 | 235 | * |
---|
273 | 236 | * Set the function to the list ops, which will call the |
---|
.. | .. |
---|
279 | 242 | * Make sure all CPUs see this. Yes this is slow, but static |
---|
280 | 243 | * tracing is slow and nasty to have enabled. |
---|
281 | 244 | */ |
---|
282 | | - schedule_on_each_cpu(ftrace_sync); |
---|
| 245 | + synchronize_rcu_tasks_rude(); |
---|
283 | 246 | /* Now all cpus are using the list ops. */ |
---|
284 | 247 | function_trace_op = set_function_trace_op; |
---|
285 | 248 | /* Make sure the function_trace_op is visible on all CPUs */ |
---|
.. | .. |
---|
336 | 299 | |
---|
337 | 300 | static void ftrace_update_trampoline(struct ftrace_ops *ops); |
---|
338 | 301 | |
---|
339 | | -static int __register_ftrace_function(struct ftrace_ops *ops) |
---|
| 302 | +int __register_ftrace_function(struct ftrace_ops *ops) |
---|
340 | 303 | { |
---|
341 | 304 | if (ops->flags & FTRACE_OPS_FL_DELETED) |
---|
342 | 305 | return -EINVAL; |
---|
.. | .. |
---|
357 | 320 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED) |
---|
358 | 321 | ops->flags |= FTRACE_OPS_FL_SAVE_REGS; |
---|
359 | 322 | #endif |
---|
| 323 | + if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT)) |
---|
| 324 | + return -EBUSY; |
---|
360 | 325 | |
---|
361 | 326 | if (!core_kernel_data((unsigned long)ops)) |
---|
362 | 327 | ops->flags |= FTRACE_OPS_FL_DYNAMIC; |
---|
.. | .. |
---|
377 | 342 | return 0; |
---|
378 | 343 | } |
---|
379 | 344 | |
---|
380 | | -static int __unregister_ftrace_function(struct ftrace_ops *ops) |
---|
| 345 | +int __unregister_ftrace_function(struct ftrace_ops *ops) |
---|
381 | 346 | { |
---|
382 | 347 | int ret; |
---|
383 | 348 | |
---|
.. | .. |
---|
494 | 459 | |
---|
495 | 460 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
---|
496 | 461 | /* function graph compares on total time */ |
---|
497 | | -static int function_stat_cmp(void *p1, void *p2) |
---|
| 462 | +static int function_stat_cmp(const void *p1, const void *p2) |
---|
498 | 463 | { |
---|
499 | | - struct ftrace_profile *a = p1; |
---|
500 | | - struct ftrace_profile *b = p2; |
---|
| 464 | + const struct ftrace_profile *a = p1; |
---|
| 465 | + const struct ftrace_profile *b = p2; |
---|
501 | 466 | |
---|
502 | 467 | if (a->time < b->time) |
---|
503 | 468 | return -1; |
---|
.. | .. |
---|
508 | 473 | } |
---|
509 | 474 | #else |
---|
510 | 475 | /* not function graph compares against hits */ |
---|
511 | | -static int function_stat_cmp(void *p1, void *p2) |
---|
| 476 | +static int function_stat_cmp(const void *p1, const void *p2) |
---|
512 | 477 | { |
---|
513 | | - struct ftrace_profile *a = p1; |
---|
514 | | - struct ftrace_profile *b = p2; |
---|
| 478 | + const struct ftrace_profile *a = p1; |
---|
| 479 | + const struct ftrace_profile *b = p2; |
---|
515 | 480 | |
---|
516 | 481 | if (a->counter < b->counter) |
---|
517 | 482 | return -1; |
---|
.. | .. |
---|
817 | 782 | } |
---|
818 | 783 | |
---|
819 | 784 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
---|
| 785 | +static bool fgraph_graph_time = true; |
---|
| 786 | + |
---|
| 787 | +void ftrace_graph_graph_time_control(bool enable) |
---|
| 788 | +{ |
---|
| 789 | + fgraph_graph_time = enable; |
---|
| 790 | +} |
---|
| 791 | + |
---|
820 | 792 | static int profile_graph_entry(struct ftrace_graph_ent *trace) |
---|
821 | 793 | { |
---|
822 | | - int index = current->curr_ret_stack; |
---|
| 794 | + struct ftrace_ret_stack *ret_stack; |
---|
823 | 795 | |
---|
824 | 796 | function_profile_call(trace->func, 0, NULL, NULL); |
---|
825 | 797 | |
---|
.. | .. |
---|
827 | 799 | if (!current->ret_stack) |
---|
828 | 800 | return 0; |
---|
829 | 801 | |
---|
830 | | - if (index >= 0 && index < FTRACE_RETFUNC_DEPTH) |
---|
831 | | - current->ret_stack[index].subtime = 0; |
---|
| 802 | + ret_stack = ftrace_graph_get_ret_stack(current, 0); |
---|
| 803 | + if (ret_stack) |
---|
| 804 | + ret_stack->subtime = 0; |
---|
832 | 805 | |
---|
833 | 806 | return 1; |
---|
834 | 807 | } |
---|
835 | 808 | |
---|
836 | 809 | static void profile_graph_return(struct ftrace_graph_ret *trace) |
---|
837 | 810 | { |
---|
| 811 | + struct ftrace_ret_stack *ret_stack; |
---|
838 | 812 | struct ftrace_profile_stat *stat; |
---|
839 | 813 | unsigned long long calltime; |
---|
840 | 814 | struct ftrace_profile *rec; |
---|
.. | .. |
---|
852 | 826 | calltime = trace->rettime - trace->calltime; |
---|
853 | 827 | |
---|
854 | 828 | if (!fgraph_graph_time) { |
---|
855 | | - int index; |
---|
856 | | - |
---|
857 | | - index = current->curr_ret_stack; |
---|
858 | 829 | |
---|
859 | 830 | /* Append this call time to the parent time to subtract */ |
---|
860 | | - if (index) |
---|
861 | | - current->ret_stack[index - 1].subtime += calltime; |
---|
| 831 | + ret_stack = ftrace_graph_get_ret_stack(current, 1); |
---|
| 832 | + if (ret_stack) |
---|
| 833 | + ret_stack->subtime += calltime; |
---|
862 | 834 | |
---|
863 | | - if (current->ret_stack[index].subtime < calltime) |
---|
864 | | - calltime -= current->ret_stack[index].subtime; |
---|
| 835 | + ret_stack = ftrace_graph_get_ret_stack(current, 0); |
---|
| 836 | + if (ret_stack && ret_stack->subtime < calltime) |
---|
| 837 | + calltime -= ret_stack->subtime; |
---|
865 | 838 | else |
---|
866 | 839 | calltime = 0; |
---|
867 | 840 | } |
---|
.. | .. |
---|
876 | 849 | local_irq_restore(flags); |
---|
877 | 850 | } |
---|
878 | 851 | |
---|
| 852 | +static struct fgraph_ops fprofiler_ops = { |
---|
| 853 | + .entryfunc = &profile_graph_entry, |
---|
| 854 | + .retfunc = &profile_graph_return, |
---|
| 855 | +}; |
---|
| 856 | + |
---|
879 | 857 | static int register_ftrace_profiler(void) |
---|
880 | 858 | { |
---|
881 | | - return register_ftrace_graph(&profile_graph_return, |
---|
882 | | - &profile_graph_entry); |
---|
| 859 | + return register_ftrace_graph(&fprofiler_ops); |
---|
883 | 860 | } |
---|
884 | 861 | |
---|
885 | 862 | static void unregister_ftrace_profiler(void) |
---|
886 | 863 | { |
---|
887 | | - unregister_ftrace_graph(); |
---|
| 864 | + unregister_ftrace_graph(&fprofiler_ops); |
---|
888 | 865 | } |
---|
889 | 866 | #else |
---|
890 | 867 | static struct ftrace_ops ftrace_profile_ops __read_mostly = { |
---|
.. | .. |
---|
936 | 913 | ftrace_profile_enabled = 0; |
---|
937 | 914 | /* |
---|
938 | 915 | * unregister_ftrace_profiler calls stop_machine |
---|
939 | | - * so this acts like an synchronize_sched. |
---|
| 916 | + * so this acts like an synchronize_rcu. |
---|
940 | 917 | */ |
---|
941 | 918 | unregister_ftrace_profiler(); |
---|
942 | 919 | } |
---|
.. | .. |
---|
1023 | 1000 | } |
---|
1024 | 1001 | #endif /* CONFIG_FUNCTION_PROFILER */ |
---|
1025 | 1002 | |
---|
1026 | | -#ifdef CONFIG_FUNCTION_GRAPH_TRACER |
---|
1027 | | -static int ftrace_graph_active; |
---|
1028 | | -#else |
---|
1029 | | -# define ftrace_graph_active 0 |
---|
1030 | | -#endif |
---|
1031 | | - |
---|
1032 | 1003 | #ifdef CONFIG_DYNAMIC_FTRACE |
---|
1033 | 1004 | |
---|
1034 | 1005 | static struct ftrace_ops *removed_ops; |
---|
.. | .. |
---|
1042 | 1013 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD |
---|
1043 | 1014 | # error Dynamic ftrace depends on MCOUNT_RECORD |
---|
1044 | 1015 | #endif |
---|
1045 | | - |
---|
1046 | | -struct ftrace_func_entry { |
---|
1047 | | - struct hlist_node hlist; |
---|
1048 | | - unsigned long ip; |
---|
1049 | | -}; |
---|
1050 | 1016 | |
---|
1051 | 1017 | struct ftrace_func_probe { |
---|
1052 | 1018 | struct ftrace_probe_ops *probe_ops; |
---|
.. | .. |
---|
1069 | 1035 | }; |
---|
1070 | 1036 | #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) |
---|
1071 | 1037 | |
---|
1072 | | -static struct ftrace_ops global_ops = { |
---|
| 1038 | +struct ftrace_ops global_ops = { |
---|
1073 | 1039 | .func = ftrace_stub, |
---|
1074 | 1040 | .local_hash.notrace_hash = EMPTY_HASH, |
---|
1075 | 1041 | .local_hash.filter_hash = EMPTY_HASH, |
---|
.. | .. |
---|
1088 | 1054 | |
---|
1089 | 1055 | /* |
---|
1090 | 1056 | * Some of the ops may be dynamically allocated, |
---|
1091 | | - * they are freed after a synchronize_sched(). |
---|
| 1057 | + * they are freed after a synchronize_rcu(). |
---|
1092 | 1058 | */ |
---|
1093 | 1059 | preempt_disable_notrace(); |
---|
1094 | 1060 | |
---|
.. | .. |
---|
1125 | 1091 | struct ftrace_page *next; |
---|
1126 | 1092 | struct dyn_ftrace *records; |
---|
1127 | 1093 | int index; |
---|
1128 | | - int size; |
---|
| 1094 | + int order; |
---|
1129 | 1095 | }; |
---|
1130 | 1096 | |
---|
1131 | 1097 | #define ENTRY_SIZE sizeof(struct dyn_ftrace) |
---|
1132 | 1098 | #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) |
---|
1133 | | - |
---|
1134 | | -/* estimate from running different kernels */ |
---|
1135 | | -#define NR_TO_INIT 10000 |
---|
1136 | 1099 | |
---|
1137 | 1100 | static struct ftrace_page *ftrace_pages_start; |
---|
1138 | 1101 | static struct ftrace_page *ftrace_pages; |
---|
.. | .. |
---|
1288 | 1251 | { |
---|
1289 | 1252 | if (!hash || hash == EMPTY_HASH) |
---|
1290 | 1253 | return; |
---|
1291 | | - call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu); |
---|
| 1254 | + call_rcu(&hash->rcu, __free_ftrace_hash_rcu); |
---|
1292 | 1255 | } |
---|
1293 | 1256 | |
---|
1294 | 1257 | void ftrace_free_filter(struct ftrace_ops *ops) |
---|
.. | .. |
---|
1332 | 1295 | if (!ftrace_mod) |
---|
1333 | 1296 | return -ENOMEM; |
---|
1334 | 1297 | |
---|
| 1298 | + INIT_LIST_HEAD(&ftrace_mod->list); |
---|
1335 | 1299 | ftrace_mod->func = kstrdup(func, GFP_KERNEL); |
---|
1336 | 1300 | ftrace_mod->module = kstrdup(module, GFP_KERNEL); |
---|
1337 | 1301 | ftrace_mod->enable = enable; |
---|
.. | .. |
---|
1395 | 1359 | static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, |
---|
1396 | 1360 | struct ftrace_hash *new_hash); |
---|
1397 | 1361 | |
---|
1398 | | -static struct ftrace_hash * |
---|
1399 | | -__ftrace_hash_move(struct ftrace_hash *src) |
---|
| 1362 | +static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size) |
---|
1400 | 1363 | { |
---|
1401 | 1364 | struct ftrace_func_entry *entry; |
---|
1402 | | - struct hlist_node *tn; |
---|
1403 | | - struct hlist_head *hhd; |
---|
1404 | 1365 | struct ftrace_hash *new_hash; |
---|
1405 | | - int size = src->count; |
---|
| 1366 | + struct hlist_head *hhd; |
---|
| 1367 | + struct hlist_node *tn; |
---|
1406 | 1368 | int bits = 0; |
---|
1407 | 1369 | int i; |
---|
1408 | 1370 | |
---|
1409 | 1371 | /* |
---|
1410 | | - * If the new source is empty, just return the empty_hash. |
---|
| 1372 | + * Use around half the size (max bit of it), but |
---|
| 1373 | + * a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits). |
---|
1411 | 1374 | */ |
---|
1412 | | - if (ftrace_hash_empty(src)) |
---|
1413 | | - return EMPTY_HASH; |
---|
1414 | | - |
---|
1415 | | - /* |
---|
1416 | | - * Make the hash size about 1/2 the # found |
---|
1417 | | - */ |
---|
1418 | | - for (size /= 2; size; size >>= 1) |
---|
1419 | | - bits++; |
---|
| 1375 | + bits = fls(size / 2); |
---|
1420 | 1376 | |
---|
1421 | 1377 | /* Don't allocate too much */ |
---|
1422 | 1378 | if (bits > FTRACE_HASH_MAX_BITS) |
---|
.. | .. |
---|
1436 | 1392 | __add_hash_entry(new_hash, entry); |
---|
1437 | 1393 | } |
---|
1438 | 1394 | } |
---|
1439 | | - |
---|
1440 | 1395 | return new_hash; |
---|
| 1396 | +} |
---|
| 1397 | + |
---|
| 1398 | +static struct ftrace_hash * |
---|
| 1399 | +__ftrace_hash_move(struct ftrace_hash *src) |
---|
| 1400 | +{ |
---|
| 1401 | + int size = src->count; |
---|
| 1402 | + |
---|
| 1403 | + /* |
---|
| 1404 | + * If the new source is empty, just return the empty_hash. |
---|
| 1405 | + */ |
---|
| 1406 | + if (ftrace_hash_empty(src)) |
---|
| 1407 | + return EMPTY_HASH; |
---|
| 1408 | + |
---|
| 1409 | + return dup_hash(src, size); |
---|
1441 | 1410 | } |
---|
1442 | 1411 | |
---|
1443 | 1412 | static int |
---|
.. | .. |
---|
1483 | 1452 | { |
---|
1484 | 1453 | /* |
---|
1485 | 1454 | * The function record is a match if it exists in the filter |
---|
1486 | | - * hash and not in the notrace hash. Note, an emty hash is |
---|
| 1455 | + * hash and not in the notrace hash. Note, an empty hash is |
---|
1487 | 1456 | * considered a match for the filter hash, but an empty |
---|
1488 | 1457 | * notrace hash is considered not in the notrace hash. |
---|
1489 | 1458 | */ |
---|
.. | .. |
---|
1503 | 1472 | * the ip is not in the ops->notrace_hash. |
---|
1504 | 1473 | * |
---|
1505 | 1474 | * This needs to be called with preemption disabled as |
---|
1506 | | - * the hashes are freed with call_rcu_sched(). |
---|
| 1475 | + * the hashes are freed with call_rcu(). |
---|
1507 | 1476 | */ |
---|
1508 | | -static int |
---|
| 1477 | +int |
---|
1509 | 1478 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) |
---|
1510 | 1479 | { |
---|
1511 | 1480 | struct ftrace_ops_hash hash; |
---|
.. | .. |
---|
1559 | 1528 | return 0; |
---|
1560 | 1529 | } |
---|
1561 | 1530 | |
---|
| 1531 | +static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end) |
---|
| 1532 | +{ |
---|
| 1533 | + struct ftrace_page *pg; |
---|
| 1534 | + struct dyn_ftrace *rec = NULL; |
---|
| 1535 | + struct dyn_ftrace key; |
---|
| 1536 | + |
---|
| 1537 | + key.ip = start; |
---|
| 1538 | + key.flags = end; /* overload flags, as it is unsigned long */ |
---|
| 1539 | + |
---|
| 1540 | + for (pg = ftrace_pages_start; pg; pg = pg->next) { |
---|
| 1541 | + if (pg->index == 0 || |
---|
| 1542 | + end < pg->records[0].ip || |
---|
| 1543 | + start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) |
---|
| 1544 | + continue; |
---|
| 1545 | + rec = bsearch(&key, pg->records, pg->index, |
---|
| 1546 | + sizeof(struct dyn_ftrace), |
---|
| 1547 | + ftrace_cmp_recs); |
---|
| 1548 | + if (rec) |
---|
| 1549 | + break; |
---|
| 1550 | + } |
---|
| 1551 | + return rec; |
---|
| 1552 | +} |
---|
| 1553 | + |
---|
1562 | 1554 | /** |
---|
1563 | 1555 | * ftrace_location_range - return the first address of a traced location |
---|
1564 | 1556 | * if it touches the given ip range |
---|
.. | .. |
---|
1573 | 1565 | */ |
---|
1574 | 1566 | unsigned long ftrace_location_range(unsigned long start, unsigned long end) |
---|
1575 | 1567 | { |
---|
1576 | | - struct ftrace_page *pg; |
---|
1577 | 1568 | struct dyn_ftrace *rec; |
---|
1578 | | - struct dyn_ftrace key; |
---|
1579 | 1569 | |
---|
1580 | | - key.ip = start; |
---|
1581 | | - key.flags = end; /* overload flags, as it is unsigned long */ |
---|
1582 | | - |
---|
1583 | | - for (pg = ftrace_pages_start; pg; pg = pg->next) { |
---|
1584 | | - if (end < pg->records[0].ip || |
---|
1585 | | - start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) |
---|
1586 | | - continue; |
---|
1587 | | - rec = bsearch(&key, pg->records, pg->index, |
---|
1588 | | - sizeof(struct dyn_ftrace), |
---|
1589 | | - ftrace_cmp_recs); |
---|
1590 | | - if (rec) |
---|
1591 | | - return rec->ip; |
---|
1592 | | - } |
---|
| 1570 | + rec = lookup_rec(start, end); |
---|
| 1571 | + if (rec) |
---|
| 1572 | + return rec->ip; |
---|
1593 | 1573 | |
---|
1594 | 1574 | return 0; |
---|
1595 | 1575 | } |
---|
.. | .. |
---|
1742 | 1722 | if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX)) |
---|
1743 | 1723 | return false; |
---|
1744 | 1724 | |
---|
| 1725 | + if (ops->flags & FTRACE_OPS_FL_DIRECT) |
---|
| 1726 | + rec->flags |= FTRACE_FL_DIRECT; |
---|
| 1727 | + |
---|
1745 | 1728 | /* |
---|
1746 | 1729 | * If there's only a single callback registered to a |
---|
1747 | 1730 | * function, and the ops has a trampoline registered |
---|
.. | .. |
---|
1768 | 1751 | if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0)) |
---|
1769 | 1752 | return false; |
---|
1770 | 1753 | rec->flags--; |
---|
| 1754 | + |
---|
| 1755 | + /* |
---|
| 1756 | + * Only the internal direct_ops should have the |
---|
| 1757 | + * DIRECT flag set. Thus, if it is removing a |
---|
| 1758 | + * function, then that function should no longer |
---|
| 1759 | + * be direct. |
---|
| 1760 | + */ |
---|
| 1761 | + if (ops->flags & FTRACE_OPS_FL_DIRECT) |
---|
| 1762 | + rec->flags &= ~FTRACE_FL_DIRECT; |
---|
1771 | 1763 | |
---|
1772 | 1764 | /* |
---|
1773 | 1765 | * If the rec had REGS enabled and the ops that is |
---|
.. | .. |
---|
1803 | 1795 | count++; |
---|
1804 | 1796 | |
---|
1805 | 1797 | /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */ |
---|
1806 | | - update |= ftrace_test_record(rec, 1) != FTRACE_UPDATE_IGNORE; |
---|
| 1798 | + update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE; |
---|
1807 | 1799 | |
---|
1808 | 1800 | /* Shortcut, if we handled all records, we are done. */ |
---|
1809 | 1801 | if (!all && count == hash->count) |
---|
.. | .. |
---|
1981 | 1973 | char ins[MCOUNT_INSN_SIZE]; |
---|
1982 | 1974 | int i; |
---|
1983 | 1975 | |
---|
1984 | | - if (probe_kernel_read(ins, p, MCOUNT_INSN_SIZE)) { |
---|
| 1976 | + if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) { |
---|
1985 | 1977 | printk(KERN_CONT "%s[FAULT] %px\n", fmt, p); |
---|
1986 | 1978 | return; |
---|
1987 | 1979 | } |
---|
.. | .. |
---|
2025 | 2017 | * modifying the code. @failed should be one of either: |
---|
2026 | 2018 | * EFAULT - if the problem happens on reading the @ip address |
---|
2027 | 2019 | * EINVAL - if what is read at @ip is not what was expected |
---|
2028 | | - * EPERM - if the problem happens on writting to the @ip address |
---|
| 2020 | + * EPERM - if the problem happens on writing to the @ip address |
---|
2029 | 2021 | */ |
---|
2030 | 2022 | void ftrace_bug(int failed, struct dyn_ftrace *rec) |
---|
2031 | 2023 | { |
---|
2032 | 2024 | unsigned long ip = rec ? rec->ip : 0; |
---|
2033 | 2025 | |
---|
| 2026 | + pr_info("------------[ ftrace bug ]------------\n"); |
---|
| 2027 | + |
---|
2034 | 2028 | switch (failed) { |
---|
2035 | 2029 | case -EFAULT: |
---|
2036 | | - FTRACE_WARN_ON_ONCE(1); |
---|
2037 | 2030 | pr_info("ftrace faulted on modifying "); |
---|
2038 | | - print_ip_sym(ip); |
---|
| 2031 | + print_ip_sym(KERN_INFO, ip); |
---|
2039 | 2032 | break; |
---|
2040 | 2033 | case -EINVAL: |
---|
2041 | | - FTRACE_WARN_ON_ONCE(1); |
---|
2042 | 2034 | pr_info("ftrace failed to modify "); |
---|
2043 | | - print_ip_sym(ip); |
---|
| 2035 | + print_ip_sym(KERN_INFO, ip); |
---|
2044 | 2036 | print_ip_ins(" actual: ", (unsigned char *)ip); |
---|
2045 | 2037 | pr_cont("\n"); |
---|
2046 | 2038 | if (ftrace_expected) { |
---|
.. | .. |
---|
2049 | 2041 | } |
---|
2050 | 2042 | break; |
---|
2051 | 2043 | case -EPERM: |
---|
2052 | | - FTRACE_WARN_ON_ONCE(1); |
---|
2053 | 2044 | pr_info("ftrace faulted on writing "); |
---|
2054 | | - print_ip_sym(ip); |
---|
| 2045 | + print_ip_sym(KERN_INFO, ip); |
---|
2055 | 2046 | break; |
---|
2056 | 2047 | default: |
---|
2057 | | - FTRACE_WARN_ON_ONCE(1); |
---|
2058 | 2048 | pr_info("ftrace faulted on unknown error "); |
---|
2059 | | - print_ip_sym(ip); |
---|
| 2049 | + print_ip_sym(KERN_INFO, ip); |
---|
2060 | 2050 | } |
---|
2061 | 2051 | print_bug_type(); |
---|
2062 | 2052 | if (rec) { |
---|
.. | .. |
---|
2081 | 2071 | ip = ftrace_get_addr_curr(rec); |
---|
2082 | 2072 | pr_cont("\n expected tramp: %lx\n", ip); |
---|
2083 | 2073 | } |
---|
| 2074 | + |
---|
| 2075 | + FTRACE_WARN_ON_ONCE(1); |
---|
2084 | 2076 | } |
---|
2085 | 2077 | |
---|
2086 | | -static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) |
---|
| 2078 | +static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update) |
---|
2087 | 2079 | { |
---|
2088 | 2080 | unsigned long flag = 0UL; |
---|
2089 | 2081 | |
---|
.. | .. |
---|
2110 | 2102 | * If enabling and the REGS flag does not match the REGS_EN, or |
---|
2111 | 2103 | * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore |
---|
2112 | 2104 | * this record. Set flags to fail the compare against ENABLED. |
---|
| 2105 | + * Same for direct calls. |
---|
2113 | 2106 | */ |
---|
2114 | 2107 | if (flag) { |
---|
2115 | | - if (!(rec->flags & FTRACE_FL_REGS) != |
---|
| 2108 | + if (!(rec->flags & FTRACE_FL_REGS) != |
---|
2116 | 2109 | !(rec->flags & FTRACE_FL_REGS_EN)) |
---|
2117 | 2110 | flag |= FTRACE_FL_REGS; |
---|
2118 | 2111 | |
---|
2119 | | - if (!(rec->flags & FTRACE_FL_TRAMP) != |
---|
| 2112 | + if (!(rec->flags & FTRACE_FL_TRAMP) != |
---|
2120 | 2113 | !(rec->flags & FTRACE_FL_TRAMP_EN)) |
---|
2121 | 2114 | flag |= FTRACE_FL_TRAMP; |
---|
| 2115 | + |
---|
| 2116 | + /* |
---|
| 2117 | + * Direct calls are special, as count matters. |
---|
| 2118 | + * We must test the record for direct, if the |
---|
| 2119 | + * DIRECT and DIRECT_EN do not match, but only |
---|
| 2120 | + * if the count is 1. That's because, if the |
---|
| 2121 | + * count is something other than one, we do not |
---|
| 2122 | + * want the direct enabled (it will be done via the |
---|
| 2123 | + * direct helper). But if DIRECT_EN is set, and |
---|
| 2124 | + * the count is not one, we need to clear it. |
---|
| 2125 | + */ |
---|
| 2126 | + if (ftrace_rec_count(rec) == 1) { |
---|
| 2127 | + if (!(rec->flags & FTRACE_FL_DIRECT) != |
---|
| 2128 | + !(rec->flags & FTRACE_FL_DIRECT_EN)) |
---|
| 2129 | + flag |= FTRACE_FL_DIRECT; |
---|
| 2130 | + } else if (rec->flags & FTRACE_FL_DIRECT_EN) { |
---|
| 2131 | + flag |= FTRACE_FL_DIRECT; |
---|
| 2132 | + } |
---|
2122 | 2133 | } |
---|
2123 | 2134 | |
---|
2124 | 2135 | /* If the state of this record hasn't changed, then do nothing */ |
---|
.. | .. |
---|
2142 | 2153 | rec->flags |= FTRACE_FL_TRAMP_EN; |
---|
2143 | 2154 | else |
---|
2144 | 2155 | rec->flags &= ~FTRACE_FL_TRAMP_EN; |
---|
| 2156 | + } |
---|
| 2157 | + if (flag & FTRACE_FL_DIRECT) { |
---|
| 2158 | + /* |
---|
| 2159 | + * If there's only one user (direct_ops helper) |
---|
| 2160 | + * then we can call the direct function |
---|
| 2161 | + * directly (no ftrace trampoline). |
---|
| 2162 | + */ |
---|
| 2163 | + if (ftrace_rec_count(rec) == 1) { |
---|
| 2164 | + if (rec->flags & FTRACE_FL_DIRECT) |
---|
| 2165 | + rec->flags |= FTRACE_FL_DIRECT_EN; |
---|
| 2166 | + else |
---|
| 2167 | + rec->flags &= ~FTRACE_FL_DIRECT_EN; |
---|
| 2168 | + } else { |
---|
| 2169 | + /* |
---|
| 2170 | + * Can only call directly if there's |
---|
| 2171 | + * only one callback to the function. |
---|
| 2172 | + */ |
---|
| 2173 | + rec->flags &= ~FTRACE_FL_DIRECT_EN; |
---|
| 2174 | + } |
---|
2145 | 2175 | } |
---|
2146 | 2176 | } |
---|
2147 | 2177 | |
---|
.. | .. |
---|
2172 | 2202 | * and REGS states. The _EN flags must be disabled though. |
---|
2173 | 2203 | */ |
---|
2174 | 2204 | rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | |
---|
2175 | | - FTRACE_FL_REGS_EN); |
---|
| 2205 | + FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN); |
---|
2176 | 2206 | } |
---|
2177 | 2207 | |
---|
2178 | 2208 | ftrace_bug_type = FTRACE_BUG_NOP; |
---|
.. | .. |
---|
2182 | 2212 | /** |
---|
2183 | 2213 | * ftrace_update_record, set a record that now is tracing or not |
---|
2184 | 2214 | * @rec: the record to update |
---|
2185 | | - * @enable: set to 1 if the record is tracing, zero to force disable |
---|
| 2215 | + * @enable: set to true if the record is tracing, false to force disable |
---|
2186 | 2216 | * |
---|
2187 | 2217 | * The records that represent all functions that can be traced need |
---|
2188 | 2218 | * to be updated when tracing has been enabled. |
---|
2189 | 2219 | */ |
---|
2190 | | -int ftrace_update_record(struct dyn_ftrace *rec, int enable) |
---|
| 2220 | +int ftrace_update_record(struct dyn_ftrace *rec, bool enable) |
---|
2191 | 2221 | { |
---|
2192 | | - return ftrace_check_record(rec, enable, 1); |
---|
| 2222 | + return ftrace_check_record(rec, enable, true); |
---|
2193 | 2223 | } |
---|
2194 | 2224 | |
---|
2195 | 2225 | /** |
---|
2196 | 2226 | * ftrace_test_record, check if the record has been enabled or not |
---|
2197 | 2227 | * @rec: the record to test |
---|
2198 | | - * @enable: set to 1 to check if enabled, 0 if it is disabled |
---|
| 2228 | + * @enable: set to true to check if enabled, false if it is disabled |
---|
2199 | 2229 | * |
---|
2200 | 2230 | * The arch code may need to test if a record is already set to |
---|
2201 | 2231 | * tracing to determine how to modify the function code that it |
---|
2202 | 2232 | * represents. |
---|
2203 | 2233 | */ |
---|
2204 | | -int ftrace_test_record(struct dyn_ftrace *rec, int enable) |
---|
| 2234 | +int ftrace_test_record(struct dyn_ftrace *rec, bool enable) |
---|
2205 | 2235 | { |
---|
2206 | | - return ftrace_check_record(rec, enable, 0); |
---|
| 2236 | + return ftrace_check_record(rec, enable, false); |
---|
2207 | 2237 | } |
---|
2208 | 2238 | |
---|
2209 | 2239 | static struct ftrace_ops * |
---|
.. | .. |
---|
2255 | 2285 | |
---|
2256 | 2286 | if (hash_contains_ip(ip, op->func_hash)) |
---|
2257 | 2287 | return op; |
---|
2258 | | - } |
---|
| 2288 | + } |
---|
2259 | 2289 | |
---|
2260 | 2290 | return NULL; |
---|
2261 | 2291 | } |
---|
.. | .. |
---|
2345 | 2375 | return NULL; |
---|
2346 | 2376 | } |
---|
2347 | 2377 | |
---|
| 2378 | +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
---|
| 2379 | +/* Protected by rcu_tasks for reading, and direct_mutex for writing */ |
---|
| 2380 | +static struct ftrace_hash *direct_functions = EMPTY_HASH; |
---|
| 2381 | +static DEFINE_MUTEX(direct_mutex); |
---|
| 2382 | +int ftrace_direct_func_count; |
---|
| 2383 | + |
---|
| 2384 | +/* |
---|
| 2385 | + * Search the direct_functions hash to see if the given instruction pointer |
---|
| 2386 | + * has a direct caller attached to it. |
---|
| 2387 | + */ |
---|
| 2388 | +unsigned long ftrace_find_rec_direct(unsigned long ip) |
---|
| 2389 | +{ |
---|
| 2390 | + struct ftrace_func_entry *entry; |
---|
| 2391 | + |
---|
| 2392 | + entry = __ftrace_lookup_ip(direct_functions, ip); |
---|
| 2393 | + if (!entry) |
---|
| 2394 | + return 0; |
---|
| 2395 | + |
---|
| 2396 | + return entry->direct; |
---|
| 2397 | +} |
---|
| 2398 | + |
---|
| 2399 | +static void call_direct_funcs(unsigned long ip, unsigned long pip, |
---|
| 2400 | + struct ftrace_ops *ops, struct pt_regs *regs) |
---|
| 2401 | +{ |
---|
| 2402 | + unsigned long addr; |
---|
| 2403 | + |
---|
| 2404 | + addr = ftrace_find_rec_direct(ip); |
---|
| 2405 | + if (!addr) |
---|
| 2406 | + return; |
---|
| 2407 | + |
---|
| 2408 | + arch_ftrace_set_direct_caller(regs, addr); |
---|
| 2409 | +} |
---|
| 2410 | + |
---|
| 2411 | +struct ftrace_ops direct_ops = { |
---|
| 2412 | + .func = call_direct_funcs, |
---|
| 2413 | + .flags = FTRACE_OPS_FL_IPMODIFY | FTRACE_OPS_FL_RECURSION_SAFE |
---|
| 2414 | + | FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS |
---|
| 2415 | + | FTRACE_OPS_FL_PERMANENT, |
---|
| 2416 | + /* |
---|
| 2417 | + * By declaring the main trampoline as this trampoline |
---|
| 2418 | + * it will never have one allocated for it. Allocated |
---|
| 2419 | + * trampolines should not call direct functions. |
---|
| 2420 | + * The direct_ops should only be called by the builtin |
---|
| 2421 | + * ftrace_regs_caller trampoline. |
---|
| 2422 | + */ |
---|
| 2423 | + .trampoline = FTRACE_REGS_ADDR, |
---|
| 2424 | +}; |
---|
| 2425 | +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ |
---|
| 2426 | + |
---|
2348 | 2427 | /** |
---|
2349 | 2428 | * ftrace_get_addr_new - Get the call address to set to |
---|
2350 | 2429 | * @rec: The ftrace record descriptor |
---|
2351 | 2430 | * |
---|
2352 | 2431 | * If the record has the FTRACE_FL_REGS set, that means that it |
---|
2353 | 2432 | * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS |
---|
2354 | | - * is not not set, then it wants to convert to the normal callback. |
---|
| 2433 | + * is not set, then it wants to convert to the normal callback. |
---|
2355 | 2434 | * |
---|
2356 | 2435 | * Returns the address of the trampoline to set to |
---|
2357 | 2436 | */ |
---|
2358 | 2437 | unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec) |
---|
2359 | 2438 | { |
---|
2360 | 2439 | struct ftrace_ops *ops; |
---|
| 2440 | + unsigned long addr; |
---|
| 2441 | + |
---|
| 2442 | + if ((rec->flags & FTRACE_FL_DIRECT) && |
---|
| 2443 | + (ftrace_rec_count(rec) == 1)) { |
---|
| 2444 | + addr = ftrace_find_rec_direct(rec->ip); |
---|
| 2445 | + if (addr) |
---|
| 2446 | + return addr; |
---|
| 2447 | + WARN_ON_ONCE(1); |
---|
| 2448 | + } |
---|
2361 | 2449 | |
---|
2362 | 2450 | /* Trampolines take precedence over regs */ |
---|
2363 | 2451 | if (rec->flags & FTRACE_FL_TRAMP) { |
---|
.. | .. |
---|
2390 | 2478 | unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec) |
---|
2391 | 2479 | { |
---|
2392 | 2480 | struct ftrace_ops *ops; |
---|
| 2481 | + unsigned long addr; |
---|
| 2482 | + |
---|
| 2483 | + /* Direct calls take precedence over trampolines */ |
---|
| 2484 | + if (rec->flags & FTRACE_FL_DIRECT_EN) { |
---|
| 2485 | + addr = ftrace_find_rec_direct(rec->ip); |
---|
| 2486 | + if (addr) |
---|
| 2487 | + return addr; |
---|
| 2488 | + WARN_ON_ONCE(1); |
---|
| 2489 | + } |
---|
2393 | 2490 | |
---|
2394 | 2491 | /* Trampolines take precedence over regs */ |
---|
2395 | 2492 | if (rec->flags & FTRACE_FL_TRAMP_EN) { |
---|
.. | .. |
---|
2410 | 2507 | } |
---|
2411 | 2508 | |
---|
2412 | 2509 | static int |
---|
2413 | | -__ftrace_replace_code(struct dyn_ftrace *rec, int enable) |
---|
| 2510 | +__ftrace_replace_code(struct dyn_ftrace *rec, bool enable) |
---|
2414 | 2511 | { |
---|
2415 | 2512 | unsigned long ftrace_old_addr; |
---|
2416 | 2513 | unsigned long ftrace_addr; |
---|
.. | .. |
---|
2442 | 2539 | return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); |
---|
2443 | 2540 | } |
---|
2444 | 2541 | |
---|
2445 | | - return -1; /* unknow ftrace bug */ |
---|
| 2542 | + return -1; /* unknown ftrace bug */ |
---|
2446 | 2543 | } |
---|
2447 | 2544 | |
---|
2448 | | -void __weak ftrace_replace_code(int enable) |
---|
| 2545 | +void __weak ftrace_replace_code(int mod_flags) |
---|
2449 | 2546 | { |
---|
2450 | 2547 | struct dyn_ftrace *rec; |
---|
2451 | 2548 | struct ftrace_page *pg; |
---|
| 2549 | + bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL; |
---|
| 2550 | + int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL; |
---|
2452 | 2551 | int failed; |
---|
2453 | 2552 | |
---|
2454 | 2553 | if (unlikely(ftrace_disabled)) |
---|
.. | .. |
---|
2465 | 2564 | /* Stop processing */ |
---|
2466 | 2565 | return; |
---|
2467 | 2566 | } |
---|
| 2567 | + if (schedulable) |
---|
| 2568 | + cond_resched(); |
---|
2468 | 2569 | } while_for_each_ftrace_rec(); |
---|
2469 | 2570 | } |
---|
2470 | 2571 | |
---|
.. | .. |
---|
2541 | 2642 | } |
---|
2542 | 2643 | |
---|
2543 | 2644 | static int |
---|
2544 | | -ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) |
---|
| 2645 | +ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec) |
---|
2545 | 2646 | { |
---|
2546 | 2647 | int ret; |
---|
2547 | 2648 | |
---|
2548 | 2649 | if (unlikely(ftrace_disabled)) |
---|
2549 | 2650 | return 0; |
---|
2550 | 2651 | |
---|
2551 | | - ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); |
---|
| 2652 | + ret = ftrace_init_nop(mod, rec); |
---|
2552 | 2653 | if (ret) { |
---|
2553 | 2654 | ftrace_bug_type = FTRACE_BUG_INIT; |
---|
2554 | 2655 | ftrace_bug(ret, rec); |
---|
.. | .. |
---|
2578 | 2679 | void ftrace_modify_all_code(int command) |
---|
2579 | 2680 | { |
---|
2580 | 2681 | int update = command & FTRACE_UPDATE_TRACE_FUNC; |
---|
| 2682 | + int mod_flags = 0; |
---|
2581 | 2683 | int err = 0; |
---|
| 2684 | + |
---|
| 2685 | + if (command & FTRACE_MAY_SLEEP) |
---|
| 2686 | + mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL; |
---|
2582 | 2687 | |
---|
2583 | 2688 | /* |
---|
2584 | 2689 | * If the ftrace_caller calls a ftrace_ops func directly, |
---|
.. | .. |
---|
2597 | 2702 | } |
---|
2598 | 2703 | |
---|
2599 | 2704 | if (command & FTRACE_UPDATE_CALLS) |
---|
2600 | | - ftrace_replace_code(1); |
---|
| 2705 | + ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL); |
---|
2601 | 2706 | else if (command & FTRACE_DISABLE_CALLS) |
---|
2602 | | - ftrace_replace_code(0); |
---|
| 2707 | + ftrace_replace_code(mod_flags); |
---|
2603 | 2708 | |
---|
2604 | 2709 | if (update && ftrace_trace_function != ftrace_ops_list_func) { |
---|
2605 | 2710 | function_trace_op = set_function_trace_op; |
---|
.. | .. |
---|
2692 | 2797 | { |
---|
2693 | 2798 | } |
---|
2694 | 2799 | |
---|
| 2800 | +/* List of trace_ops that have allocated trampolines */ |
---|
| 2801 | +static LIST_HEAD(ftrace_ops_trampoline_list); |
---|
| 2802 | + |
---|
| 2803 | +static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops) |
---|
| 2804 | +{ |
---|
| 2805 | + lockdep_assert_held(&ftrace_lock); |
---|
| 2806 | + list_add_rcu(&ops->list, &ftrace_ops_trampoline_list); |
---|
| 2807 | +} |
---|
| 2808 | + |
---|
| 2809 | +static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops) |
---|
| 2810 | +{ |
---|
| 2811 | + lockdep_assert_held(&ftrace_lock); |
---|
| 2812 | + list_del_rcu(&ops->list); |
---|
| 2813 | + synchronize_rcu(); |
---|
| 2814 | +} |
---|
| 2815 | + |
---|
| 2816 | +/* |
---|
| 2817 | + * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols |
---|
| 2818 | + * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is |
---|
| 2819 | + * not a module. |
---|
| 2820 | + */ |
---|
| 2821 | +#define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace" |
---|
| 2822 | +#define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline" |
---|
| 2823 | + |
---|
| 2824 | +static void ftrace_trampoline_free(struct ftrace_ops *ops) |
---|
| 2825 | +{ |
---|
| 2826 | + if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) && |
---|
| 2827 | + ops->trampoline) { |
---|
| 2828 | + /* |
---|
| 2829 | + * Record the text poke event before the ksymbol unregister |
---|
| 2830 | + * event. |
---|
| 2831 | + */ |
---|
| 2832 | + perf_event_text_poke((void *)ops->trampoline, |
---|
| 2833 | + (void *)ops->trampoline, |
---|
| 2834 | + ops->trampoline_size, NULL, 0); |
---|
| 2835 | + perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, |
---|
| 2836 | + ops->trampoline, ops->trampoline_size, |
---|
| 2837 | + true, FTRACE_TRAMPOLINE_SYM); |
---|
| 2838 | + /* Remove from kallsyms after the perf events */ |
---|
| 2839 | + ftrace_remove_trampoline_from_kallsyms(ops); |
---|
| 2840 | + } |
---|
| 2841 | + |
---|
| 2842 | + arch_ftrace_trampoline_free(ops); |
---|
| 2843 | +} |
---|
| 2844 | + |
---|
2695 | 2845 | static void ftrace_startup_enable(int command) |
---|
2696 | 2846 | { |
---|
2697 | 2847 | if (saved_ftrace_func != ftrace_trace_function) { |
---|
.. | .. |
---|
2712 | 2862 | update_all_ops = false; |
---|
2713 | 2863 | } |
---|
2714 | 2864 | |
---|
2715 | | -static int ftrace_startup(struct ftrace_ops *ops, int command) |
---|
| 2865 | +int ftrace_startup(struct ftrace_ops *ops, int command) |
---|
2716 | 2866 | { |
---|
2717 | 2867 | int ret; |
---|
2718 | 2868 | |
---|
.. | .. |
---|
2741 | 2891 | __unregister_ftrace_function(ops); |
---|
2742 | 2892 | ftrace_start_up--; |
---|
2743 | 2893 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; |
---|
| 2894 | + if (ops->flags & FTRACE_OPS_FL_DYNAMIC) |
---|
| 2895 | + ftrace_trampoline_free(ops); |
---|
2744 | 2896 | return ret; |
---|
2745 | 2897 | } |
---|
2746 | 2898 | |
---|
.. | .. |
---|
2749 | 2901 | |
---|
2750 | 2902 | ftrace_startup_enable(command); |
---|
2751 | 2903 | |
---|
| 2904 | + /* |
---|
| 2905 | + * If ftrace is in an undefined state, we just remove ops from list |
---|
| 2906 | + * to prevent the NULL pointer, instead of totally rolling it back and |
---|
| 2907 | + * free trampoline, because those actions could cause further damage. |
---|
| 2908 | + */ |
---|
| 2909 | + if (unlikely(ftrace_disabled)) { |
---|
| 2910 | + __unregister_ftrace_function(ops); |
---|
| 2911 | + return -ENODEV; |
---|
| 2912 | + } |
---|
| 2913 | + |
---|
2752 | 2914 | ops->flags &= ~FTRACE_OPS_FL_ADDING; |
---|
2753 | 2915 | |
---|
2754 | 2916 | return 0; |
---|
2755 | 2917 | } |
---|
2756 | 2918 | |
---|
2757 | | -static int ftrace_shutdown(struct ftrace_ops *ops, int command) |
---|
| 2919 | +int ftrace_shutdown(struct ftrace_ops *ops, int command) |
---|
2758 | 2920 | { |
---|
2759 | 2921 | int ret; |
---|
2760 | 2922 | |
---|
.. | .. |
---|
2786 | 2948 | command |= FTRACE_UPDATE_TRACE_FUNC; |
---|
2787 | 2949 | } |
---|
2788 | 2950 | |
---|
2789 | | - if (!command || !ftrace_enabled) { |
---|
2790 | | - /* |
---|
2791 | | - * If these are dynamic or per_cpu ops, they still |
---|
2792 | | - * need their data freed. Since, function tracing is |
---|
2793 | | - * not currently active, we can just free them |
---|
2794 | | - * without synchronizing all CPUs. |
---|
2795 | | - */ |
---|
2796 | | - if (ops->flags & FTRACE_OPS_FL_DYNAMIC) |
---|
2797 | | - goto free_ops; |
---|
2798 | | - |
---|
2799 | | - return 0; |
---|
2800 | | - } |
---|
| 2951 | + if (!command || !ftrace_enabled) |
---|
| 2952 | + goto out; |
---|
2801 | 2953 | |
---|
2802 | 2954 | /* |
---|
2803 | 2955 | * If the ops uses a trampoline, then it needs to be |
---|
.. | .. |
---|
2834 | 2986 | removed_ops = NULL; |
---|
2835 | 2987 | ops->flags &= ~FTRACE_OPS_FL_REMOVING; |
---|
2836 | 2988 | |
---|
| 2989 | +out: |
---|
2837 | 2990 | /* |
---|
2838 | 2991 | * Dynamic ops may be freed, we must make sure that all |
---|
2839 | 2992 | * callers are done before leaving this function. |
---|
.. | .. |
---|
2849 | 3002 | * infrastructure to do the synchronization, thus we must do it |
---|
2850 | 3003 | * ourselves. |
---|
2851 | 3004 | */ |
---|
2852 | | - schedule_on_each_cpu(ftrace_sync); |
---|
| 3005 | + synchronize_rcu_tasks_rude(); |
---|
2853 | 3006 | |
---|
2854 | 3007 | /* |
---|
2855 | | - * When the kernel is preeptive, tasks can be preempted |
---|
| 3008 | + * When the kernel is preemptive, tasks can be preempted |
---|
2856 | 3009 | * while on a ftrace trampoline. Just scheduling a task on |
---|
2857 | 3010 | * a CPU is not good enough to flush them. Calling |
---|
2858 | 3011 | * synchornize_rcu_tasks() will wait for those tasks to |
---|
2859 | 3012 | * execute and either schedule voluntarily or enter user space. |
---|
2860 | 3013 | */ |
---|
2861 | | - if (IS_ENABLED(CONFIG_PREEMPT)) |
---|
| 3014 | + if (IS_ENABLED(CONFIG_PREEMPTION)) |
---|
2862 | 3015 | synchronize_rcu_tasks(); |
---|
2863 | 3016 | |
---|
2864 | | - free_ops: |
---|
2865 | | - arch_ftrace_trampoline_free(ops); |
---|
| 3017 | + ftrace_trampoline_free(ops); |
---|
2866 | 3018 | } |
---|
2867 | 3019 | |
---|
2868 | 3020 | return 0; |
---|
.. | .. |
---|
2904 | 3056 | |
---|
2905 | 3057 | static u64 ftrace_update_time; |
---|
2906 | 3058 | unsigned long ftrace_update_tot_cnt; |
---|
| 3059 | +unsigned long ftrace_number_of_pages; |
---|
| 3060 | +unsigned long ftrace_number_of_groups; |
---|
2907 | 3061 | |
---|
2908 | 3062 | static inline int ops_traces_mod(struct ftrace_ops *ops) |
---|
2909 | 3063 | { |
---|
.. | .. |
---|
2986 | 3140 | * to the NOP instructions. |
---|
2987 | 3141 | */ |
---|
2988 | 3142 | if (!__is_defined(CC_USING_NOP_MCOUNT) && |
---|
2989 | | - !ftrace_code_disable(mod, p)) |
---|
| 3143 | + !ftrace_nop_initialize(mod, p)) |
---|
2990 | 3144 | break; |
---|
2991 | 3145 | |
---|
2992 | 3146 | update_cnt++; |
---|
.. | .. |
---|
3003 | 3157 | static int ftrace_allocate_records(struct ftrace_page *pg, int count) |
---|
3004 | 3158 | { |
---|
3005 | 3159 | int order; |
---|
| 3160 | + int pages; |
---|
3006 | 3161 | int cnt; |
---|
3007 | 3162 | |
---|
3008 | 3163 | if (WARN_ON(!count)) |
---|
3009 | 3164 | return -EINVAL; |
---|
3010 | 3165 | |
---|
3011 | | - order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE)); |
---|
| 3166 | + pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE); |
---|
| 3167 | + order = get_count_order(pages); |
---|
3012 | 3168 | |
---|
3013 | 3169 | /* |
---|
3014 | 3170 | * We want to fill as much as possible. No more than a page |
---|
3015 | 3171 | * may be empty. |
---|
3016 | 3172 | */ |
---|
3017 | | - while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE) |
---|
| 3173 | + if (!is_power_of_2(pages)) |
---|
3018 | 3174 | order--; |
---|
3019 | 3175 | |
---|
3020 | 3176 | again: |
---|
.. | .. |
---|
3024 | 3180 | /* if we can't allocate this size, try something smaller */ |
---|
3025 | 3181 | if (!order) |
---|
3026 | 3182 | return -ENOMEM; |
---|
3027 | | - order >>= 1; |
---|
| 3183 | + order--; |
---|
3028 | 3184 | goto again; |
---|
3029 | 3185 | } |
---|
3030 | 3186 | |
---|
| 3187 | + ftrace_number_of_pages += 1 << order; |
---|
| 3188 | + ftrace_number_of_groups++; |
---|
| 3189 | + |
---|
3031 | 3190 | cnt = (PAGE_SIZE << order) / ENTRY_SIZE; |
---|
3032 | | - pg->size = cnt; |
---|
| 3191 | + pg->order = order; |
---|
3033 | 3192 | |
---|
3034 | 3193 | if (cnt > count) |
---|
3035 | 3194 | cnt = count; |
---|
.. | .. |
---|
3037 | 3196 | return cnt; |
---|
3038 | 3197 | } |
---|
3039 | 3198 | |
---|
| 3199 | +static void ftrace_free_pages(struct ftrace_page *pages) |
---|
| 3200 | +{ |
---|
| 3201 | + struct ftrace_page *pg = pages; |
---|
| 3202 | + |
---|
| 3203 | + while (pg) { |
---|
| 3204 | + if (pg->records) { |
---|
| 3205 | + free_pages((unsigned long)pg->records, pg->order); |
---|
| 3206 | + ftrace_number_of_pages -= 1 << pg->order; |
---|
| 3207 | + } |
---|
| 3208 | + pages = pg->next; |
---|
| 3209 | + kfree(pg); |
---|
| 3210 | + pg = pages; |
---|
| 3211 | + ftrace_number_of_groups--; |
---|
| 3212 | + } |
---|
| 3213 | +} |
---|
| 3214 | + |
---|
3040 | 3215 | static struct ftrace_page * |
---|
3041 | 3216 | ftrace_allocate_pages(unsigned long num_to_init) |
---|
3042 | 3217 | { |
---|
3043 | 3218 | struct ftrace_page *start_pg; |
---|
3044 | 3219 | struct ftrace_page *pg; |
---|
3045 | | - int order; |
---|
3046 | 3220 | int cnt; |
---|
3047 | 3221 | |
---|
3048 | 3222 | if (!num_to_init) |
---|
3049 | | - return 0; |
---|
| 3223 | + return NULL; |
---|
3050 | 3224 | |
---|
3051 | 3225 | start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL); |
---|
3052 | 3226 | if (!pg) |
---|
.. | .. |
---|
3076 | 3250 | return start_pg; |
---|
3077 | 3251 | |
---|
3078 | 3252 | free_pages: |
---|
3079 | | - pg = start_pg; |
---|
3080 | | - while (pg) { |
---|
3081 | | - order = get_count_order(pg->size / ENTRIES_PER_PAGE); |
---|
3082 | | - free_pages((unsigned long)pg->records, order); |
---|
3083 | | - start_pg = pg->next; |
---|
3084 | | - kfree(pg); |
---|
3085 | | - pg = start_pg; |
---|
3086 | | - } |
---|
| 3253 | + ftrace_free_pages(start_pg); |
---|
3087 | 3254 | pr_info("ftrace: FAILED to allocate memory for functions\n"); |
---|
3088 | 3255 | return NULL; |
---|
3089 | 3256 | } |
---|
.. | .. |
---|
3493 | 3660 | if (iter->flags & FTRACE_ITER_ENABLED) { |
---|
3494 | 3661 | struct ftrace_ops *ops; |
---|
3495 | 3662 | |
---|
3496 | | - seq_printf(m, " (%ld)%s%s", |
---|
| 3663 | + seq_printf(m, " (%ld)%s%s%s", |
---|
3497 | 3664 | ftrace_rec_count(rec), |
---|
3498 | 3665 | rec->flags & FTRACE_FL_REGS ? " R" : " ", |
---|
3499 | | - rec->flags & FTRACE_FL_IPMODIFY ? " I" : " "); |
---|
| 3666 | + rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ", |
---|
| 3667 | + rec->flags & FTRACE_FL_DIRECT ? " D" : " "); |
---|
3500 | 3668 | if (rec->flags & FTRACE_FL_TRAMP_EN) { |
---|
3501 | 3669 | ops = ftrace_find_tramp_ops_any(rec); |
---|
3502 | 3670 | if (ops) { |
---|
.. | .. |
---|
3512 | 3680 | } else { |
---|
3513 | 3681 | add_trampoline_func(m, NULL, rec); |
---|
3514 | 3682 | } |
---|
3515 | | - } |
---|
| 3683 | + if (rec->flags & FTRACE_FL_DIRECT) { |
---|
| 3684 | + unsigned long direct; |
---|
| 3685 | + |
---|
| 3686 | + direct = ftrace_find_rec_direct(rec->ip); |
---|
| 3687 | + if (direct) |
---|
| 3688 | + seq_printf(m, "\n\tdirect-->%pS", (void *)direct); |
---|
| 3689 | + } |
---|
| 3690 | + } |
---|
3516 | 3691 | |
---|
3517 | 3692 | seq_putc(m, '\n'); |
---|
3518 | 3693 | |
---|
.. | .. |
---|
3530 | 3705 | ftrace_avail_open(struct inode *inode, struct file *file) |
---|
3531 | 3706 | { |
---|
3532 | 3707 | struct ftrace_iterator *iter; |
---|
| 3708 | + int ret; |
---|
| 3709 | + |
---|
| 3710 | + ret = security_locked_down(LOCKDOWN_TRACEFS); |
---|
| 3711 | + if (ret) |
---|
| 3712 | + return ret; |
---|
3533 | 3713 | |
---|
3534 | 3714 | if (unlikely(ftrace_disabled)) |
---|
3535 | 3715 | return -ENODEV; |
---|
.. | .. |
---|
3548 | 3728 | ftrace_enabled_open(struct inode *inode, struct file *file) |
---|
3549 | 3729 | { |
---|
3550 | 3730 | struct ftrace_iterator *iter; |
---|
| 3731 | + |
---|
| 3732 | + /* |
---|
| 3733 | + * This shows us what functions are currently being |
---|
| 3734 | + * traced and by what. Not sure if we want lockdown |
---|
| 3735 | + * to hide such critical information for an admin. |
---|
| 3736 | + * Although, perhaps it can show information we don't |
---|
| 3737 | + * want people to see, but if something is tracing |
---|
| 3738 | + * something, we probably want to know about it. |
---|
| 3739 | + */ |
---|
3551 | 3740 | |
---|
3552 | 3741 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); |
---|
3553 | 3742 | if (!iter) |
---|
.. | .. |
---|
3591 | 3780 | if (unlikely(ftrace_disabled)) |
---|
3592 | 3781 | return -ENODEV; |
---|
3593 | 3782 | |
---|
3594 | | - if (tr && trace_array_get(tr) < 0) |
---|
| 3783 | + if (tracing_check_open_get_tr(tr)) |
---|
3595 | 3784 | return -ENODEV; |
---|
3596 | 3785 | |
---|
3597 | 3786 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
---|
.. | .. |
---|
3669 | 3858 | { |
---|
3670 | 3859 | struct ftrace_ops *ops = inode->i_private; |
---|
3671 | 3860 | |
---|
| 3861 | + /* Checks for tracefs lockdown */ |
---|
3672 | 3862 | return ftrace_regex_open(ops, |
---|
3673 | 3863 | FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES, |
---|
3674 | 3864 | inode, file); |
---|
.. | .. |
---|
3679 | 3869 | { |
---|
3680 | 3870 | struct ftrace_ops *ops = inode->i_private; |
---|
3681 | 3871 | |
---|
| 3872 | + /* Checks for tracefs lockdown */ |
---|
3682 | 3873 | return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE, |
---|
3683 | 3874 | inode, file); |
---|
3684 | 3875 | } |
---|
.. | .. |
---|
3759 | 3950 | } |
---|
3760 | 3951 | |
---|
3761 | 3952 | static int |
---|
| 3953 | +add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g, |
---|
| 3954 | + int clear_filter) |
---|
| 3955 | +{ |
---|
| 3956 | + long index = simple_strtoul(func_g->search, NULL, 0); |
---|
| 3957 | + struct ftrace_page *pg; |
---|
| 3958 | + struct dyn_ftrace *rec; |
---|
| 3959 | + |
---|
| 3960 | + /* The index starts at 1 */ |
---|
| 3961 | + if (--index < 0) |
---|
| 3962 | + return 0; |
---|
| 3963 | + |
---|
| 3964 | + do_for_each_ftrace_rec(pg, rec) { |
---|
| 3965 | + if (pg->index <= index) { |
---|
| 3966 | + index -= pg->index; |
---|
| 3967 | + /* this is a double loop, break goes to the next page */ |
---|
| 3968 | + break; |
---|
| 3969 | + } |
---|
| 3970 | + rec = &pg->records[index]; |
---|
| 3971 | + enter_record(hash, rec, clear_filter); |
---|
| 3972 | + return 1; |
---|
| 3973 | + } while_for_each_ftrace_rec(); |
---|
| 3974 | + return 0; |
---|
| 3975 | +} |
---|
| 3976 | + |
---|
| 3977 | +static int |
---|
3762 | 3978 | ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g, |
---|
3763 | 3979 | struct ftrace_glob *mod_g, int exclude_mod) |
---|
3764 | 3980 | { |
---|
.. | .. |
---|
3825 | 4041 | |
---|
3826 | 4042 | if (unlikely(ftrace_disabled)) |
---|
3827 | 4043 | goto out_unlock; |
---|
| 4044 | + |
---|
| 4045 | + if (func_g.type == MATCH_INDEX) { |
---|
| 4046 | + found = add_rec_by_index(hash, &func_g, clear_filter); |
---|
| 4047 | + goto out_unlock; |
---|
| 4048 | + } |
---|
3828 | 4049 | |
---|
3829 | 4050 | do_for_each_ftrace_rec(pg, rec) { |
---|
3830 | 4051 | |
---|
.. | .. |
---|
3906 | 4127 | static bool module_exists(const char *module) |
---|
3907 | 4128 | { |
---|
3908 | 4129 | /* All modules have the symbol __this_module */ |
---|
3909 | | - const char this_mod[] = "__this_module"; |
---|
| 4130 | + static const char this_mod[] = "__this_module"; |
---|
3910 | 4131 | char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2]; |
---|
3911 | 4132 | unsigned long val; |
---|
3912 | 4133 | int n; |
---|
.. | .. |
---|
4183 | 4404 | * @ip: The instruction pointer address to map @data to |
---|
4184 | 4405 | * @data: The data to map to @ip |
---|
4185 | 4406 | * |
---|
4186 | | - * Returns 0 on succes otherwise an error. |
---|
| 4407 | + * Returns 0 on success otherwise an error. |
---|
4187 | 4408 | */ |
---|
4188 | 4409 | int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, |
---|
4189 | 4410 | unsigned long ip, void *data) |
---|
.. | .. |
---|
4213 | 4434 | * @ip: The instruction pointer address to remove the data from |
---|
4214 | 4435 | * |
---|
4215 | 4436 | * Returns the data if it is found, otherwise NULL. |
---|
4216 | | - * Note, if the data pointer is used as the data itself, (see |
---|
| 4437 | + * Note, if the data pointer is used as the data itself, (see |
---|
4217 | 4438 | * ftrace_func_mapper_find_ip(), then the return value may be meaningless, |
---|
4218 | 4439 | * if the data pointer was set to zero. |
---|
4219 | 4440 | */ |
---|
.. | .. |
---|
4351 | 4572 | |
---|
4352 | 4573 | /* |
---|
4353 | 4574 | * Note, there's a small window here that the func_hash->filter_hash |
---|
4354 | | - * may be NULL or empty. Need to be carefule when reading the loop. |
---|
| 4575 | + * may be NULL or empty. Need to be careful when reading the loop. |
---|
4355 | 4576 | */ |
---|
4356 | 4577 | mutex_lock(&probe->ops.func_hash->regex_lock); |
---|
4357 | 4578 | |
---|
.. | .. |
---|
4552 | 4773 | if (ftrace_enabled && !ftrace_hash_empty(hash)) |
---|
4553 | 4774 | ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS, |
---|
4554 | 4775 | &old_hash_ops); |
---|
4555 | | - synchronize_sched(); |
---|
| 4776 | + synchronize_rcu(); |
---|
4556 | 4777 | |
---|
4557 | 4778 | hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { |
---|
4558 | 4779 | hlist_del(&entry->hlist); |
---|
.. | .. |
---|
4794 | 5015 | ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove, |
---|
4795 | 5016 | int reset, int enable) |
---|
4796 | 5017 | { |
---|
4797 | | - return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable); |
---|
| 5018 | + return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable); |
---|
4798 | 5019 | } |
---|
| 5020 | + |
---|
| 5021 | +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
---|
| 5022 | + |
---|
| 5023 | +struct ftrace_direct_func { |
---|
| 5024 | + struct list_head next; |
---|
| 5025 | + unsigned long addr; |
---|
| 5026 | + int count; |
---|
| 5027 | +}; |
---|
| 5028 | + |
---|
| 5029 | +static LIST_HEAD(ftrace_direct_funcs); |
---|
| 5030 | + |
---|
| 5031 | +/** |
---|
| 5032 | + * ftrace_find_direct_func - test an address if it is a registered direct caller |
---|
| 5033 | + * @addr: The address of a registered direct caller |
---|
| 5034 | + * |
---|
| 5035 | + * This searches to see if a ftrace direct caller has been registered |
---|
| 5036 | + * at a specific address, and if so, it returns a descriptor for it. |
---|
| 5037 | + * |
---|
| 5038 | + * This can be used by architecture code to see if an address is |
---|
| 5039 | + * a direct caller (trampoline) attached to a fentry/mcount location. |
---|
| 5040 | + * This is useful for the function_graph tracer, as it may need to |
---|
| 5041 | + * do adjustments if it traced a location that also has a direct |
---|
| 5042 | + * trampoline attached to it. |
---|
| 5043 | + */ |
---|
| 5044 | +struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr) |
---|
| 5045 | +{ |
---|
| 5046 | + struct ftrace_direct_func *entry; |
---|
| 5047 | + bool found = false; |
---|
| 5048 | + |
---|
| 5049 | + /* May be called by fgraph trampoline (protected by rcu tasks) */ |
---|
| 5050 | + list_for_each_entry_rcu(entry, &ftrace_direct_funcs, next) { |
---|
| 5051 | + if (entry->addr == addr) { |
---|
| 5052 | + found = true; |
---|
| 5053 | + break; |
---|
| 5054 | + } |
---|
| 5055 | + } |
---|
| 5056 | + if (found) |
---|
| 5057 | + return entry; |
---|
| 5058 | + |
---|
| 5059 | + return NULL; |
---|
| 5060 | +} |
---|
| 5061 | + |
---|
| 5062 | +static struct ftrace_direct_func *ftrace_alloc_direct_func(unsigned long addr) |
---|
| 5063 | +{ |
---|
| 5064 | + struct ftrace_direct_func *direct; |
---|
| 5065 | + |
---|
| 5066 | + direct = kmalloc(sizeof(*direct), GFP_KERNEL); |
---|
| 5067 | + if (!direct) |
---|
| 5068 | + return NULL; |
---|
| 5069 | + direct->addr = addr; |
---|
| 5070 | + direct->count = 0; |
---|
| 5071 | + list_add_rcu(&direct->next, &ftrace_direct_funcs); |
---|
| 5072 | + ftrace_direct_func_count++; |
---|
| 5073 | + return direct; |
---|
| 5074 | +} |
---|
| 5075 | + |
---|
| 5076 | +/** |
---|
| 5077 | + * register_ftrace_direct - Call a custom trampoline directly |
---|
| 5078 | + * @ip: The address of the nop at the beginning of a function |
---|
| 5079 | + * @addr: The address of the trampoline to call at @ip |
---|
| 5080 | + * |
---|
| 5081 | + * This is used to connect a direct call from the nop location (@ip) |
---|
| 5082 | + * at the start of ftrace traced functions. The location that it calls |
---|
| 5083 | + * (@addr) must be able to handle a direct call, and save the parameters |
---|
| 5084 | + * of the function being traced, and restore them (or inject new ones |
---|
| 5085 | + * if needed), before returning. |
---|
| 5086 | + * |
---|
| 5087 | + * Returns: |
---|
| 5088 | + * 0 on success |
---|
| 5089 | + * -EBUSY - Another direct function is already attached (there can be only one) |
---|
| 5090 | + * -ENODEV - @ip does not point to a ftrace nop location (or not supported) |
---|
| 5091 | + * -ENOMEM - There was an allocation failure. |
---|
| 5092 | + */ |
---|
| 5093 | +int register_ftrace_direct(unsigned long ip, unsigned long addr) |
---|
| 5094 | +{ |
---|
| 5095 | + struct ftrace_direct_func *direct; |
---|
| 5096 | + struct ftrace_func_entry *entry; |
---|
| 5097 | + struct ftrace_hash *free_hash = NULL; |
---|
| 5098 | + struct dyn_ftrace *rec; |
---|
| 5099 | + int ret = -EBUSY; |
---|
| 5100 | + |
---|
| 5101 | + mutex_lock(&direct_mutex); |
---|
| 5102 | + |
---|
| 5103 | + /* See if there's a direct function at @ip already */ |
---|
| 5104 | + if (ftrace_find_rec_direct(ip)) |
---|
| 5105 | + goto out_unlock; |
---|
| 5106 | + |
---|
| 5107 | + ret = -ENODEV; |
---|
| 5108 | + rec = lookup_rec(ip, ip); |
---|
| 5109 | + if (!rec) |
---|
| 5110 | + goto out_unlock; |
---|
| 5111 | + |
---|
| 5112 | + /* |
---|
| 5113 | + * Check if the rec says it has a direct call but we didn't |
---|
| 5114 | + * find one earlier? |
---|
| 5115 | + */ |
---|
| 5116 | + if (WARN_ON(rec->flags & FTRACE_FL_DIRECT)) |
---|
| 5117 | + goto out_unlock; |
---|
| 5118 | + |
---|
| 5119 | + /* Make sure the ip points to the exact record */ |
---|
| 5120 | + if (ip != rec->ip) { |
---|
| 5121 | + ip = rec->ip; |
---|
| 5122 | + /* Need to check this ip for a direct. */ |
---|
| 5123 | + if (ftrace_find_rec_direct(ip)) |
---|
| 5124 | + goto out_unlock; |
---|
| 5125 | + } |
---|
| 5126 | + |
---|
| 5127 | + ret = -ENOMEM; |
---|
| 5128 | + if (ftrace_hash_empty(direct_functions) || |
---|
| 5129 | + direct_functions->count > 2 * (1 << direct_functions->size_bits)) { |
---|
| 5130 | + struct ftrace_hash *new_hash; |
---|
| 5131 | + int size = ftrace_hash_empty(direct_functions) ? 0 : |
---|
| 5132 | + direct_functions->count + 1; |
---|
| 5133 | + |
---|
| 5134 | + if (size < 32) |
---|
| 5135 | + size = 32; |
---|
| 5136 | + |
---|
| 5137 | + new_hash = dup_hash(direct_functions, size); |
---|
| 5138 | + if (!new_hash) |
---|
| 5139 | + goto out_unlock; |
---|
| 5140 | + |
---|
| 5141 | + free_hash = direct_functions; |
---|
| 5142 | + direct_functions = new_hash; |
---|
| 5143 | + } |
---|
| 5144 | + |
---|
| 5145 | + entry = kmalloc(sizeof(*entry), GFP_KERNEL); |
---|
| 5146 | + if (!entry) |
---|
| 5147 | + goto out_unlock; |
---|
| 5148 | + |
---|
| 5149 | + direct = ftrace_find_direct_func(addr); |
---|
| 5150 | + if (!direct) { |
---|
| 5151 | + direct = ftrace_alloc_direct_func(addr); |
---|
| 5152 | + if (!direct) { |
---|
| 5153 | + kfree(entry); |
---|
| 5154 | + goto out_unlock; |
---|
| 5155 | + } |
---|
| 5156 | + } |
---|
| 5157 | + |
---|
| 5158 | + entry->ip = ip; |
---|
| 5159 | + entry->direct = addr; |
---|
| 5160 | + __add_hash_entry(direct_functions, entry); |
---|
| 5161 | + |
---|
| 5162 | + ret = ftrace_set_filter_ip(&direct_ops, ip, 0, 0); |
---|
| 5163 | + |
---|
| 5164 | + if (!ret && !(direct_ops.flags & FTRACE_OPS_FL_ENABLED)) { |
---|
| 5165 | + ret = register_ftrace_function(&direct_ops); |
---|
| 5166 | + if (ret) |
---|
| 5167 | + ftrace_set_filter_ip(&direct_ops, ip, 1, 0); |
---|
| 5168 | + } |
---|
| 5169 | + |
---|
| 5170 | + if (ret) { |
---|
| 5171 | + remove_hash_entry(direct_functions, entry); |
---|
| 5172 | + kfree(entry); |
---|
| 5173 | + if (!direct->count) { |
---|
| 5174 | + list_del_rcu(&direct->next); |
---|
| 5175 | + synchronize_rcu_tasks(); |
---|
| 5176 | + kfree(direct); |
---|
| 5177 | + if (free_hash) |
---|
| 5178 | + free_ftrace_hash(free_hash); |
---|
| 5179 | + free_hash = NULL; |
---|
| 5180 | + ftrace_direct_func_count--; |
---|
| 5181 | + } |
---|
| 5182 | + } else { |
---|
| 5183 | + direct->count++; |
---|
| 5184 | + } |
---|
| 5185 | + out_unlock: |
---|
| 5186 | + mutex_unlock(&direct_mutex); |
---|
| 5187 | + |
---|
| 5188 | + if (free_hash) { |
---|
| 5189 | + synchronize_rcu_tasks(); |
---|
| 5190 | + free_ftrace_hash(free_hash); |
---|
| 5191 | + } |
---|
| 5192 | + |
---|
| 5193 | + return ret; |
---|
| 5194 | +} |
---|
| 5195 | +EXPORT_SYMBOL_GPL(register_ftrace_direct); |
---|
| 5196 | + |
---|
| 5197 | +static struct ftrace_func_entry *find_direct_entry(unsigned long *ip, |
---|
| 5198 | + struct dyn_ftrace **recp) |
---|
| 5199 | +{ |
---|
| 5200 | + struct ftrace_func_entry *entry; |
---|
| 5201 | + struct dyn_ftrace *rec; |
---|
| 5202 | + |
---|
| 5203 | + rec = lookup_rec(*ip, *ip); |
---|
| 5204 | + if (!rec) |
---|
| 5205 | + return NULL; |
---|
| 5206 | + |
---|
| 5207 | + entry = __ftrace_lookup_ip(direct_functions, rec->ip); |
---|
| 5208 | + if (!entry) { |
---|
| 5209 | + WARN_ON(rec->flags & FTRACE_FL_DIRECT); |
---|
| 5210 | + return NULL; |
---|
| 5211 | + } |
---|
| 5212 | + |
---|
| 5213 | + WARN_ON(!(rec->flags & FTRACE_FL_DIRECT)); |
---|
| 5214 | + |
---|
| 5215 | + /* Passed in ip just needs to be on the call site */ |
---|
| 5216 | + *ip = rec->ip; |
---|
| 5217 | + |
---|
| 5218 | + if (recp) |
---|
| 5219 | + *recp = rec; |
---|
| 5220 | + |
---|
| 5221 | + return entry; |
---|
| 5222 | +} |
---|
| 5223 | + |
---|
| 5224 | +int unregister_ftrace_direct(unsigned long ip, unsigned long addr) |
---|
| 5225 | +{ |
---|
| 5226 | + struct ftrace_direct_func *direct; |
---|
| 5227 | + struct ftrace_func_entry *entry; |
---|
| 5228 | + int ret = -ENODEV; |
---|
| 5229 | + |
---|
| 5230 | + mutex_lock(&direct_mutex); |
---|
| 5231 | + |
---|
| 5232 | + entry = find_direct_entry(&ip, NULL); |
---|
| 5233 | + if (!entry) |
---|
| 5234 | + goto out_unlock; |
---|
| 5235 | + |
---|
| 5236 | + if (direct_functions->count == 1) |
---|
| 5237 | + unregister_ftrace_function(&direct_ops); |
---|
| 5238 | + |
---|
| 5239 | + ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0); |
---|
| 5240 | + |
---|
| 5241 | + WARN_ON(ret); |
---|
| 5242 | + |
---|
| 5243 | + remove_hash_entry(direct_functions, entry); |
---|
| 5244 | + |
---|
| 5245 | + direct = ftrace_find_direct_func(addr); |
---|
| 5246 | + if (!WARN_ON(!direct)) { |
---|
| 5247 | + /* This is the good path (see the ! before WARN) */ |
---|
| 5248 | + direct->count--; |
---|
| 5249 | + WARN_ON(direct->count < 0); |
---|
| 5250 | + if (!direct->count) { |
---|
| 5251 | + list_del_rcu(&direct->next); |
---|
| 5252 | + synchronize_rcu_tasks(); |
---|
| 5253 | + kfree(direct); |
---|
| 5254 | + kfree(entry); |
---|
| 5255 | + ftrace_direct_func_count--; |
---|
| 5256 | + } |
---|
| 5257 | + } |
---|
| 5258 | + out_unlock: |
---|
| 5259 | + mutex_unlock(&direct_mutex); |
---|
| 5260 | + |
---|
| 5261 | + return ret; |
---|
| 5262 | +} |
---|
| 5263 | +EXPORT_SYMBOL_GPL(unregister_ftrace_direct); |
---|
| 5264 | + |
---|
| 5265 | +static struct ftrace_ops stub_ops = { |
---|
| 5266 | + .func = ftrace_stub, |
---|
| 5267 | +}; |
---|
| 5268 | + |
---|
| 5269 | +/** |
---|
| 5270 | + * ftrace_modify_direct_caller - modify ftrace nop directly |
---|
| 5271 | + * @entry: The ftrace hash entry of the direct helper for @rec |
---|
| 5272 | + * @rec: The record representing the function site to patch |
---|
| 5273 | + * @old_addr: The location that the site at @rec->ip currently calls |
---|
| 5274 | + * @new_addr: The location that the site at @rec->ip should call |
---|
| 5275 | + * |
---|
| 5276 | + * An architecture may overwrite this function to optimize the |
---|
| 5277 | + * changing of the direct callback on an ftrace nop location. |
---|
| 5278 | + * This is called with the ftrace_lock mutex held, and no other |
---|
| 5279 | + * ftrace callbacks are on the associated record (@rec). Thus, |
---|
| 5280 | + * it is safe to modify the ftrace record, where it should be |
---|
| 5281 | + * currently calling @old_addr directly, to call @new_addr. |
---|
| 5282 | + * |
---|
| 5283 | + * Safety checks should be made to make sure that the code at |
---|
| 5284 | + * @rec->ip is currently calling @old_addr. And this must |
---|
| 5285 | + * also update entry->direct to @new_addr. |
---|
| 5286 | + */ |
---|
| 5287 | +int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry, |
---|
| 5288 | + struct dyn_ftrace *rec, |
---|
| 5289 | + unsigned long old_addr, |
---|
| 5290 | + unsigned long new_addr) |
---|
| 5291 | +{ |
---|
| 5292 | + unsigned long ip = rec->ip; |
---|
| 5293 | + int ret; |
---|
| 5294 | + |
---|
| 5295 | + /* |
---|
| 5296 | + * The ftrace_lock was used to determine if the record |
---|
| 5297 | + * had more than one registered user to it. If it did, |
---|
| 5298 | + * we needed to prevent that from changing to do the quick |
---|
| 5299 | + * switch. But if it did not (only a direct caller was attached) |
---|
| 5300 | + * then this function is called. But this function can deal |
---|
| 5301 | + * with attached callers to the rec that we care about, and |
---|
| 5302 | + * since this function uses standard ftrace calls that take |
---|
| 5303 | + * the ftrace_lock mutex, we need to release it. |
---|
| 5304 | + */ |
---|
| 5305 | + mutex_unlock(&ftrace_lock); |
---|
| 5306 | + |
---|
| 5307 | + /* |
---|
| 5308 | + * By setting a stub function at the same address, we force |
---|
| 5309 | + * the code to call the iterator and the direct_ops helper. |
---|
| 5310 | + * This means that @ip does not call the direct call, and |
---|
| 5311 | + * we can simply modify it. |
---|
| 5312 | + */ |
---|
| 5313 | + ret = ftrace_set_filter_ip(&stub_ops, ip, 0, 0); |
---|
| 5314 | + if (ret) |
---|
| 5315 | + goto out_lock; |
---|
| 5316 | + |
---|
| 5317 | + ret = register_ftrace_function(&stub_ops); |
---|
| 5318 | + if (ret) { |
---|
| 5319 | + ftrace_set_filter_ip(&stub_ops, ip, 1, 0); |
---|
| 5320 | + goto out_lock; |
---|
| 5321 | + } |
---|
| 5322 | + |
---|
| 5323 | + entry->direct = new_addr; |
---|
| 5324 | + |
---|
| 5325 | + /* |
---|
| 5326 | + * By removing the stub, we put back the direct call, calling |
---|
| 5327 | + * the @new_addr. |
---|
| 5328 | + */ |
---|
| 5329 | + unregister_ftrace_function(&stub_ops); |
---|
| 5330 | + ftrace_set_filter_ip(&stub_ops, ip, 1, 0); |
---|
| 5331 | + |
---|
| 5332 | + out_lock: |
---|
| 5333 | + mutex_lock(&ftrace_lock); |
---|
| 5334 | + |
---|
| 5335 | + return ret; |
---|
| 5336 | +} |
---|
| 5337 | + |
---|
| 5338 | +/** |
---|
| 5339 | + * modify_ftrace_direct - Modify an existing direct call to call something else |
---|
| 5340 | + * @ip: The instruction pointer to modify |
---|
| 5341 | + * @old_addr: The address that the current @ip calls directly |
---|
| 5342 | + * @new_addr: The address that the @ip should call |
---|
| 5343 | + * |
---|
| 5344 | + * This modifies a ftrace direct caller at an instruction pointer without |
---|
| 5345 | + * having to disable it first. The direct call will switch over to the |
---|
| 5346 | + * @new_addr without missing anything. |
---|
| 5347 | + * |
---|
| 5348 | + * Returns: zero on success. Non zero on error, which includes: |
---|
| 5349 | + * -ENODEV : the @ip given has no direct caller attached |
---|
| 5350 | + * -EINVAL : the @old_addr does not match the current direct caller |
---|
| 5351 | + */ |
---|
| 5352 | +int modify_ftrace_direct(unsigned long ip, |
---|
| 5353 | + unsigned long old_addr, unsigned long new_addr) |
---|
| 5354 | +{ |
---|
| 5355 | + struct ftrace_direct_func *direct, *new_direct = NULL; |
---|
| 5356 | + struct ftrace_func_entry *entry; |
---|
| 5357 | + struct dyn_ftrace *rec; |
---|
| 5358 | + int ret = -ENODEV; |
---|
| 5359 | + |
---|
| 5360 | + mutex_lock(&direct_mutex); |
---|
| 5361 | + |
---|
| 5362 | + mutex_lock(&ftrace_lock); |
---|
| 5363 | + entry = find_direct_entry(&ip, &rec); |
---|
| 5364 | + if (!entry) |
---|
| 5365 | + goto out_unlock; |
---|
| 5366 | + |
---|
| 5367 | + ret = -EINVAL; |
---|
| 5368 | + if (entry->direct != old_addr) |
---|
| 5369 | + goto out_unlock; |
---|
| 5370 | + |
---|
| 5371 | + direct = ftrace_find_direct_func(old_addr); |
---|
| 5372 | + if (WARN_ON(!direct)) |
---|
| 5373 | + goto out_unlock; |
---|
| 5374 | + if (direct->count > 1) { |
---|
| 5375 | + ret = -ENOMEM; |
---|
| 5376 | + new_direct = ftrace_alloc_direct_func(new_addr); |
---|
| 5377 | + if (!new_direct) |
---|
| 5378 | + goto out_unlock; |
---|
| 5379 | + direct->count--; |
---|
| 5380 | + new_direct->count++; |
---|
| 5381 | + } else { |
---|
| 5382 | + direct->addr = new_addr; |
---|
| 5383 | + } |
---|
| 5384 | + |
---|
| 5385 | + /* |
---|
| 5386 | + * If there's no other ftrace callback on the rec->ip location, |
---|
| 5387 | + * then it can be changed directly by the architecture. |
---|
| 5388 | + * If there is another caller, then we just need to change the |
---|
| 5389 | + * direct caller helper to point to @new_addr. |
---|
| 5390 | + */ |
---|
| 5391 | + if (ftrace_rec_count(rec) == 1) { |
---|
| 5392 | + ret = ftrace_modify_direct_caller(entry, rec, old_addr, new_addr); |
---|
| 5393 | + } else { |
---|
| 5394 | + entry->direct = new_addr; |
---|
| 5395 | + ret = 0; |
---|
| 5396 | + } |
---|
| 5397 | + |
---|
| 5398 | + if (ret) { |
---|
| 5399 | + direct->addr = old_addr; |
---|
| 5400 | + if (unlikely(new_direct)) { |
---|
| 5401 | + direct->count++; |
---|
| 5402 | + list_del_rcu(&new_direct->next); |
---|
| 5403 | + synchronize_rcu_tasks(); |
---|
| 5404 | + kfree(new_direct); |
---|
| 5405 | + ftrace_direct_func_count--; |
---|
| 5406 | + } |
---|
| 5407 | + } |
---|
| 5408 | + |
---|
| 5409 | + out_unlock: |
---|
| 5410 | + mutex_unlock(&ftrace_lock); |
---|
| 5411 | + mutex_unlock(&direct_mutex); |
---|
| 5412 | + return ret; |
---|
| 5413 | +} |
---|
| 5414 | +EXPORT_SYMBOL_GPL(modify_ftrace_direct); |
---|
| 5415 | +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ |
---|
4799 | 5416 | |
---|
4800 | 5417 | /** |
---|
4801 | 5418 | * ftrace_set_filter_ip - set a function to filter on in ftrace by address |
---|
.. | .. |
---|
4967 | 5584 | struct ftrace_hash *hash; |
---|
4968 | 5585 | |
---|
4969 | 5586 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); |
---|
4970 | | - if (WARN_ON(!hash)) |
---|
| 5587 | + if (MEM_FAIL(!hash, "Failed to allocate hash\n")) |
---|
4971 | 5588 | return; |
---|
4972 | 5589 | |
---|
4973 | 5590 | while (buf) { |
---|
.. | .. |
---|
5045 | 5662 | |
---|
5046 | 5663 | if (filter_hash) { |
---|
5047 | 5664 | orig_hash = &iter->ops->func_hash->filter_hash; |
---|
5048 | | - if (iter->tr && !list_empty(&iter->tr->mod_trace)) |
---|
5049 | | - iter->hash->flags |= FTRACE_HASH_FL_MOD; |
---|
| 5665 | + if (iter->tr) { |
---|
| 5666 | + if (list_empty(&iter->tr->mod_trace)) |
---|
| 5667 | + iter->hash->flags &= ~FTRACE_HASH_FL_MOD; |
---|
| 5668 | + else |
---|
| 5669 | + iter->hash->flags |= FTRACE_HASH_FL_MOD; |
---|
| 5670 | + } |
---|
5050 | 5671 | } else |
---|
5051 | 5672 | orig_hash = &iter->ops->func_hash->notrace_hash; |
---|
5052 | 5673 | |
---|
.. | .. |
---|
5220 | 5841 | __ftrace_graph_open(struct inode *inode, struct file *file, |
---|
5221 | 5842 | struct ftrace_graph_data *fgd) |
---|
5222 | 5843 | { |
---|
5223 | | - int ret = 0; |
---|
| 5844 | + int ret; |
---|
5224 | 5845 | struct ftrace_hash *new_hash = NULL; |
---|
| 5846 | + |
---|
| 5847 | + ret = security_locked_down(LOCKDOWN_TRACEFS); |
---|
| 5848 | + if (ret) |
---|
| 5849 | + return ret; |
---|
5225 | 5850 | |
---|
5226 | 5851 | if (file->f_mode & FMODE_WRITE) { |
---|
5227 | 5852 | const int size_bits = FTRACE_HASH_DEFAULT_BITS; |
---|
.. | .. |
---|
5382 | 6007 | * infrastructure to do the synchronization, thus we must do it |
---|
5383 | 6008 | * ourselves. |
---|
5384 | 6009 | */ |
---|
5385 | | - schedule_on_each_cpu(ftrace_sync); |
---|
| 6010 | + synchronize_rcu_tasks_rude(); |
---|
5386 | 6011 | |
---|
5387 | 6012 | free_ftrace_hash(old_hash); |
---|
5388 | 6013 | } |
---|
.. | .. |
---|
5514 | 6139 | |
---|
5515 | 6140 | /* |
---|
5516 | 6141 | * The name "destroy_filter_files" is really a misnomer. Although |
---|
5517 | | - * in the future, it may actualy delete the files, but this is |
---|
| 6142 | + * in the future, it may actually delete the files, but this is |
---|
5518 | 6143 | * really intended to make sure the ops passed in are disabled |
---|
5519 | 6144 | * and that when this function returns, the caller is free to |
---|
5520 | 6145 | * free the ops. |
---|
.. | .. |
---|
5567 | 6192 | return 0; |
---|
5568 | 6193 | } |
---|
5569 | 6194 | |
---|
5570 | | -static int __norecordmcount ftrace_process_locs(struct module *mod, |
---|
5571 | | - unsigned long *start, |
---|
5572 | | - unsigned long *end) |
---|
| 6195 | +static int ftrace_process_locs(struct module *mod, |
---|
| 6196 | + unsigned long *start, |
---|
| 6197 | + unsigned long *end) |
---|
5573 | 6198 | { |
---|
| 6199 | + struct ftrace_page *pg_unuse = NULL; |
---|
5574 | 6200 | struct ftrace_page *start_pg; |
---|
5575 | 6201 | struct ftrace_page *pg; |
---|
5576 | 6202 | struct dyn_ftrace *rec; |
---|
| 6203 | + unsigned long skipped = 0; |
---|
5577 | 6204 | unsigned long count; |
---|
5578 | 6205 | unsigned long *p; |
---|
5579 | 6206 | unsigned long addr; |
---|
.. | .. |
---|
5619 | 6246 | p = start; |
---|
5620 | 6247 | pg = start_pg; |
---|
5621 | 6248 | while (p < end) { |
---|
| 6249 | + unsigned long end_offset; |
---|
5622 | 6250 | addr = ftrace_call_adjust(*p++); |
---|
5623 | 6251 | /* |
---|
5624 | 6252 | * Some architecture linkers will pad between |
---|
.. | .. |
---|
5626 | 6254 | * object files to satisfy alignments. |
---|
5627 | 6255 | * Skip any NULL pointers. |
---|
5628 | 6256 | */ |
---|
5629 | | - if (!addr) |
---|
| 6257 | + if (!addr) { |
---|
| 6258 | + skipped++; |
---|
5630 | 6259 | continue; |
---|
| 6260 | + } |
---|
5631 | 6261 | |
---|
5632 | | - if (pg->index == pg->size) { |
---|
| 6262 | + end_offset = (pg->index+1) * sizeof(pg->records[0]); |
---|
| 6263 | + if (end_offset > PAGE_SIZE << pg->order) { |
---|
5633 | 6264 | /* We should have allocated enough */ |
---|
5634 | 6265 | if (WARN_ON(!pg->next)) |
---|
5635 | 6266 | break; |
---|
.. | .. |
---|
5640 | 6271 | rec->ip = addr; |
---|
5641 | 6272 | } |
---|
5642 | 6273 | |
---|
5643 | | - /* We should have used all pages */ |
---|
5644 | | - WARN_ON(pg->next); |
---|
| 6274 | + if (pg->next) { |
---|
| 6275 | + pg_unuse = pg->next; |
---|
| 6276 | + pg->next = NULL; |
---|
| 6277 | + } |
---|
5645 | 6278 | |
---|
5646 | 6279 | /* Assign the last page to ftrace_pages */ |
---|
5647 | 6280 | ftrace_pages = pg; |
---|
.. | .. |
---|
5663 | 6296 | out: |
---|
5664 | 6297 | mutex_unlock(&ftrace_lock); |
---|
5665 | 6298 | |
---|
| 6299 | + /* We should have used all pages unless we skipped some */ |
---|
| 6300 | + if (pg_unuse) { |
---|
| 6301 | + WARN_ON(!skipped); |
---|
| 6302 | + ftrace_free_pages(pg_unuse); |
---|
| 6303 | + } |
---|
5666 | 6304 | return ret; |
---|
5667 | 6305 | } |
---|
5668 | 6306 | |
---|
.. | .. |
---|
5683 | 6321 | unsigned int num_funcs; |
---|
5684 | 6322 | }; |
---|
5685 | 6323 | |
---|
| 6324 | +static int ftrace_get_trampoline_kallsym(unsigned int symnum, |
---|
| 6325 | + unsigned long *value, char *type, |
---|
| 6326 | + char *name, char *module_name, |
---|
| 6327 | + int *exported) |
---|
| 6328 | +{ |
---|
| 6329 | + struct ftrace_ops *op; |
---|
| 6330 | + |
---|
| 6331 | + list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) { |
---|
| 6332 | + if (!op->trampoline || symnum--) |
---|
| 6333 | + continue; |
---|
| 6334 | + *value = op->trampoline; |
---|
| 6335 | + *type = 't'; |
---|
| 6336 | + strlcpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN); |
---|
| 6337 | + strlcpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN); |
---|
| 6338 | + *exported = 0; |
---|
| 6339 | + return 0; |
---|
| 6340 | + } |
---|
| 6341 | + |
---|
| 6342 | + return -ERANGE; |
---|
| 6343 | +} |
---|
| 6344 | + |
---|
5686 | 6345 | #ifdef CONFIG_MODULES |
---|
5687 | 6346 | |
---|
5688 | 6347 | #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next) |
---|
.. | .. |
---|
5696 | 6355 | |
---|
5697 | 6356 | for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { |
---|
5698 | 6357 | if (ops_references_rec(ops, rec)) { |
---|
| 6358 | + if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT)) |
---|
| 6359 | + continue; |
---|
| 6360 | + if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY)) |
---|
| 6361 | + continue; |
---|
5699 | 6362 | cnt++; |
---|
5700 | 6363 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) |
---|
5701 | 6364 | rec->flags |= FTRACE_FL_REGS; |
---|
| 6365 | + if (cnt == 1 && ops->trampoline) |
---|
| 6366 | + rec->flags |= FTRACE_FL_TRAMP; |
---|
| 6367 | + else |
---|
| 6368 | + rec->flags &= ~FTRACE_FL_TRAMP; |
---|
5702 | 6369 | } |
---|
5703 | 6370 | } |
---|
5704 | 6371 | |
---|
.. | .. |
---|
5769 | 6436 | struct ftrace_page **last_pg; |
---|
5770 | 6437 | struct ftrace_page *tmp_page = NULL; |
---|
5771 | 6438 | struct ftrace_page *pg; |
---|
5772 | | - int order; |
---|
5773 | 6439 | |
---|
5774 | 6440 | mutex_lock(&ftrace_lock); |
---|
5775 | 6441 | |
---|
.. | .. |
---|
5779 | 6445 | list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) { |
---|
5780 | 6446 | if (mod_map->mod == mod) { |
---|
5781 | 6447 | list_del_rcu(&mod_map->list); |
---|
5782 | | - call_rcu_sched(&mod_map->rcu, ftrace_free_mod_map); |
---|
| 6448 | + call_rcu(&mod_map->rcu, ftrace_free_mod_map); |
---|
5783 | 6449 | break; |
---|
5784 | 6450 | } |
---|
5785 | 6451 | } |
---|
.. | .. |
---|
5820 | 6486 | /* Needs to be called outside of ftrace_lock */ |
---|
5821 | 6487 | clear_mod_from_hashes(pg); |
---|
5822 | 6488 | |
---|
5823 | | - order = get_count_order(pg->size / ENTRIES_PER_PAGE); |
---|
5824 | | - free_pages((unsigned long)pg->records, order); |
---|
| 6489 | + if (pg->records) { |
---|
| 6490 | + free_pages((unsigned long)pg->records, pg->order); |
---|
| 6491 | + ftrace_number_of_pages -= 1 << pg->order; |
---|
| 6492 | + } |
---|
5825 | 6493 | tmp_page = pg->next; |
---|
5826 | 6494 | kfree(pg); |
---|
| 6495 | + ftrace_number_of_groups--; |
---|
5827 | 6496 | } |
---|
5828 | 6497 | } |
---|
5829 | 6498 | |
---|
.. | .. |
---|
5840 | 6509 | /* |
---|
5841 | 6510 | * If the tracing is enabled, go ahead and enable the record. |
---|
5842 | 6511 | * |
---|
5843 | | - * The reason not to enable the record immediatelly is the |
---|
| 6512 | + * The reason not to enable the record immediately is the |
---|
5844 | 6513 | * inherent check of ftrace_make_nop/ftrace_make_call for |
---|
5845 | 6514 | * correct previous instructions. Making first the NOP |
---|
5846 | 6515 | * conversion puts the module to the correct state, thus |
---|
.. | .. |
---|
5999 | 6668 | struct ftrace_mod_map *mod_map; |
---|
6000 | 6669 | const char *ret = NULL; |
---|
6001 | 6670 | |
---|
6002 | | - /* mod_map is freed via call_rcu_sched() */ |
---|
| 6671 | + /* mod_map is freed via call_rcu() */ |
---|
6003 | 6672 | preempt_disable(); |
---|
6004 | 6673 | list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { |
---|
6005 | 6674 | ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym); |
---|
.. | .. |
---|
6020 | 6689 | { |
---|
6021 | 6690 | struct ftrace_mod_map *mod_map; |
---|
6022 | 6691 | struct ftrace_mod_func *mod_func; |
---|
| 6692 | + int ret; |
---|
6023 | 6693 | |
---|
6024 | 6694 | preempt_disable(); |
---|
6025 | 6695 | list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { |
---|
.. | .. |
---|
6046 | 6716 | WARN_ON(1); |
---|
6047 | 6717 | break; |
---|
6048 | 6718 | } |
---|
| 6719 | + ret = ftrace_get_trampoline_kallsym(symnum, value, type, name, |
---|
| 6720 | + module_name, exported); |
---|
6049 | 6721 | preempt_enable(); |
---|
6050 | | - return -ERANGE; |
---|
| 6722 | + return ret; |
---|
6051 | 6723 | } |
---|
6052 | 6724 | |
---|
6053 | 6725 | #else |
---|
.. | .. |
---|
6058 | 6730 | unsigned long start, unsigned long end) |
---|
6059 | 6731 | { |
---|
6060 | 6732 | return NULL; |
---|
| 6733 | +} |
---|
| 6734 | +int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, |
---|
| 6735 | + char *type, char *name, char *module_name, |
---|
| 6736 | + int *exported) |
---|
| 6737 | +{ |
---|
| 6738 | + int ret; |
---|
| 6739 | + |
---|
| 6740 | + preempt_disable(); |
---|
| 6741 | + ret = ftrace_get_trampoline_kallsym(symnum, value, type, name, |
---|
| 6742 | + module_name, exported); |
---|
| 6743 | + preempt_enable(); |
---|
| 6744 | + return ret; |
---|
6061 | 6745 | } |
---|
6062 | 6746 | #endif /* CONFIG_MODULES */ |
---|
6063 | 6747 | |
---|
.. | .. |
---|
6072 | 6756 | { |
---|
6073 | 6757 | struct ftrace_func_entry *entry; |
---|
6074 | 6758 | |
---|
6075 | | - if (ftrace_hash_empty(hash)) |
---|
6076 | | - return; |
---|
6077 | | - |
---|
6078 | | - entry = __ftrace_lookup_ip(hash, func->ip); |
---|
6079 | | - |
---|
| 6759 | + entry = ftrace_lookup_ip(hash, func->ip); |
---|
6080 | 6760 | /* |
---|
6081 | 6761 | * Do not allow this rec to match again. |
---|
6082 | 6762 | * Yeah, it may waste some memory, but will be removed |
---|
.. | .. |
---|
6110 | 6790 | |
---|
6111 | 6791 | func = kmalloc(sizeof(*func), GFP_KERNEL); |
---|
6112 | 6792 | if (!func) { |
---|
6113 | | - WARN_ONCE(1, "alloc failure, ftrace filter could be stale\n"); |
---|
| 6793 | + MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n"); |
---|
6114 | 6794 | return; |
---|
6115 | 6795 | } |
---|
6116 | 6796 | |
---|
.. | .. |
---|
6129 | 6809 | struct ftrace_mod_map *mod_map = NULL; |
---|
6130 | 6810 | struct ftrace_init_func *func, *func_next; |
---|
6131 | 6811 | struct list_head clear_hash; |
---|
6132 | | - int order; |
---|
6133 | 6812 | |
---|
6134 | 6813 | INIT_LIST_HEAD(&clear_hash); |
---|
6135 | 6814 | |
---|
.. | .. |
---|
6167 | 6846 | ftrace_update_tot_cnt--; |
---|
6168 | 6847 | if (!pg->index) { |
---|
6169 | 6848 | *last_pg = pg->next; |
---|
6170 | | - order = get_count_order(pg->size / ENTRIES_PER_PAGE); |
---|
6171 | | - free_pages((unsigned long)pg->records, order); |
---|
| 6849 | + if (pg->records) { |
---|
| 6850 | + free_pages((unsigned long)pg->records, pg->order); |
---|
| 6851 | + ftrace_number_of_pages -= 1 << pg->order; |
---|
| 6852 | + } |
---|
| 6853 | + ftrace_number_of_groups--; |
---|
6172 | 6854 | kfree(pg); |
---|
6173 | 6855 | pg = container_of(last_pg, struct ftrace_page, next); |
---|
6174 | 6856 | if (!(*last_pg)) |
---|
.. | .. |
---|
6216 | 6898 | } |
---|
6217 | 6899 | |
---|
6218 | 6900 | pr_info("ftrace: allocating %ld entries in %ld pages\n", |
---|
6219 | | - count, count / ENTRIES_PER_PAGE + 1); |
---|
| 6901 | + count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE)); |
---|
6220 | 6902 | |
---|
6221 | 6903 | last_ftrace_enabled = ftrace_enabled = 1; |
---|
6222 | 6904 | |
---|
6223 | 6905 | ret = ftrace_process_locs(NULL, |
---|
6224 | 6906 | __start_mcount_loc, |
---|
6225 | 6907 | __stop_mcount_loc); |
---|
| 6908 | + |
---|
| 6909 | + pr_info("ftrace: allocated %ld pages with %ld groups\n", |
---|
| 6910 | + ftrace_number_of_pages, ftrace_number_of_groups); |
---|
6226 | 6911 | |
---|
6227 | 6912 | set_ftrace_early_filters(); |
---|
6228 | 6913 | |
---|
.. | .. |
---|
6238 | 6923 | |
---|
6239 | 6924 | static void ftrace_update_trampoline(struct ftrace_ops *ops) |
---|
6240 | 6925 | { |
---|
| 6926 | + unsigned long trampoline = ops->trampoline; |
---|
| 6927 | + |
---|
6241 | 6928 | arch_ftrace_update_trampoline(ops); |
---|
| 6929 | + if (ops->trampoline && ops->trampoline != trampoline && |
---|
| 6930 | + (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) { |
---|
| 6931 | + /* Add to kallsyms before the perf events */ |
---|
| 6932 | + ftrace_add_trampoline_to_kallsyms(ops); |
---|
| 6933 | + perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, |
---|
| 6934 | + ops->trampoline, ops->trampoline_size, false, |
---|
| 6935 | + FTRACE_TRAMPOLINE_SYM); |
---|
| 6936 | + /* |
---|
| 6937 | + * Record the perf text poke event after the ksymbol register |
---|
| 6938 | + * event. |
---|
| 6939 | + */ |
---|
| 6940 | + perf_event_text_poke((void *)ops->trampoline, NULL, 0, |
---|
| 6941 | + (void *)ops->trampoline, |
---|
| 6942 | + ops->trampoline_size); |
---|
| 6943 | + } |
---|
6242 | 6944 | } |
---|
6243 | 6945 | |
---|
6244 | 6946 | void ftrace_init_trace_array(struct trace_array *tr) |
---|
.. | .. |
---|
6249 | 6951 | } |
---|
6250 | 6952 | #else |
---|
6251 | 6953 | |
---|
6252 | | -static struct ftrace_ops global_ops = { |
---|
| 6954 | +struct ftrace_ops global_ops = { |
---|
6253 | 6955 | .func = ftrace_stub, |
---|
6254 | 6956 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | |
---|
6255 | 6957 | FTRACE_OPS_FL_INITIALIZED | |
---|
.. | .. |
---|
6266 | 6968 | static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; } |
---|
6267 | 6969 | static inline void ftrace_startup_enable(int command) { } |
---|
6268 | 6970 | static inline void ftrace_startup_all(int command) { } |
---|
6269 | | -/* Keep as macros so we do not need to define the commands */ |
---|
6270 | | -# define ftrace_startup(ops, command) \ |
---|
6271 | | - ({ \ |
---|
6272 | | - int ___ret = __register_ftrace_function(ops); \ |
---|
6273 | | - if (!___ret) \ |
---|
6274 | | - (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ |
---|
6275 | | - ___ret; \ |
---|
6276 | | - }) |
---|
6277 | | -# define ftrace_shutdown(ops, command) \ |
---|
6278 | | - ({ \ |
---|
6279 | | - int ___ret = __unregister_ftrace_function(ops); \ |
---|
6280 | | - if (!___ret) \ |
---|
6281 | | - (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \ |
---|
6282 | | - ___ret; \ |
---|
6283 | | - }) |
---|
6284 | 6971 | |
---|
6285 | 6972 | # define ftrace_startup_sysctl() do { } while (0) |
---|
6286 | 6973 | # define ftrace_shutdown_sysctl() do { } while (0) |
---|
6287 | | - |
---|
6288 | | -static inline int |
---|
6289 | | -ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) |
---|
6290 | | -{ |
---|
6291 | | - return 1; |
---|
6292 | | -} |
---|
6293 | 6974 | |
---|
6294 | 6975 | static void ftrace_update_trampoline(struct ftrace_ops *ops) |
---|
6295 | 6976 | { |
---|
.. | .. |
---|
6334 | 7015 | |
---|
6335 | 7016 | /* |
---|
6336 | 7017 | * Some of the ops may be dynamically allocated, |
---|
6337 | | - * they must be freed after a synchronize_sched(). |
---|
| 7018 | + * they must be freed after a synchronize_rcu(). |
---|
6338 | 7019 | */ |
---|
6339 | 7020 | preempt_disable_notrace(); |
---|
6340 | 7021 | |
---|
6341 | 7022 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
---|
| 7023 | + /* Stub functions don't need to be called nor tested */ |
---|
| 7024 | + if (op->flags & FTRACE_OPS_FL_STUB) |
---|
| 7025 | + continue; |
---|
6342 | 7026 | /* |
---|
6343 | 7027 | * Check the following for each ops before calling their func: |
---|
6344 | 7028 | * if RCU flag is set, then rcu_is_watching() must be true |
---|
.. | .. |
---|
6383 | 7067 | } |
---|
6384 | 7068 | NOKPROBE_SYMBOL(ftrace_ops_list_func); |
---|
6385 | 7069 | #else |
---|
6386 | | -static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip, |
---|
6387 | | - struct ftrace_ops *op, struct pt_regs *regs) |
---|
| 7070 | +static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) |
---|
6388 | 7071 | { |
---|
6389 | 7072 | __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); |
---|
6390 | 7073 | } |
---|
.. | .. |
---|
6445 | 7128 | { |
---|
6446 | 7129 | struct trace_array *tr = data; |
---|
6447 | 7130 | struct trace_pid_list *pid_list; |
---|
| 7131 | + struct trace_pid_list *no_pid_list; |
---|
6448 | 7132 | |
---|
6449 | 7133 | pid_list = rcu_dereference_sched(tr->function_pids); |
---|
| 7134 | + no_pid_list = rcu_dereference_sched(tr->function_no_pids); |
---|
6450 | 7135 | |
---|
6451 | | - this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid, |
---|
6452 | | - trace_ignore_this_task(pid_list, next)); |
---|
| 7136 | + if (trace_ignore_this_task(pid_list, no_pid_list, next)) |
---|
| 7137 | + this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, |
---|
| 7138 | + FTRACE_PID_IGNORE); |
---|
| 7139 | + else |
---|
| 7140 | + this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, |
---|
| 7141 | + next->pid); |
---|
6453 | 7142 | } |
---|
6454 | 7143 | |
---|
6455 | 7144 | static void |
---|
.. | .. |
---|
6462 | 7151 | |
---|
6463 | 7152 | pid_list = rcu_dereference_sched(tr->function_pids); |
---|
6464 | 7153 | trace_filter_add_remove_task(pid_list, self, task); |
---|
| 7154 | + |
---|
| 7155 | + pid_list = rcu_dereference_sched(tr->function_no_pids); |
---|
| 7156 | + trace_filter_add_remove_task(pid_list, self, task); |
---|
6465 | 7157 | } |
---|
6466 | 7158 | |
---|
6467 | 7159 | static void |
---|
.. | .. |
---|
6471 | 7163 | struct trace_array *tr = data; |
---|
6472 | 7164 | |
---|
6473 | 7165 | pid_list = rcu_dereference_sched(tr->function_pids); |
---|
| 7166 | + trace_filter_add_remove_task(pid_list, NULL, task); |
---|
| 7167 | + |
---|
| 7168 | + pid_list = rcu_dereference_sched(tr->function_no_pids); |
---|
6474 | 7169 | trace_filter_add_remove_task(pid_list, NULL, task); |
---|
6475 | 7170 | } |
---|
6476 | 7171 | |
---|
.. | .. |
---|
6489 | 7184 | } |
---|
6490 | 7185 | } |
---|
6491 | 7186 | |
---|
6492 | | -static void clear_ftrace_pids(struct trace_array *tr) |
---|
| 7187 | +static void clear_ftrace_pids(struct trace_array *tr, int type) |
---|
6493 | 7188 | { |
---|
6494 | 7189 | struct trace_pid_list *pid_list; |
---|
| 7190 | + struct trace_pid_list *no_pid_list; |
---|
6495 | 7191 | int cpu; |
---|
6496 | 7192 | |
---|
6497 | 7193 | pid_list = rcu_dereference_protected(tr->function_pids, |
---|
6498 | 7194 | lockdep_is_held(&ftrace_lock)); |
---|
6499 | | - if (!pid_list) |
---|
| 7195 | + no_pid_list = rcu_dereference_protected(tr->function_no_pids, |
---|
| 7196 | + lockdep_is_held(&ftrace_lock)); |
---|
| 7197 | + |
---|
| 7198 | + /* Make sure there's something to do */ |
---|
| 7199 | + if (!pid_type_enabled(type, pid_list, no_pid_list)) |
---|
6500 | 7200 | return; |
---|
6501 | 7201 | |
---|
6502 | | - unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); |
---|
| 7202 | + /* See if the pids still need to be checked after this */ |
---|
| 7203 | + if (!still_need_pid_events(type, pid_list, no_pid_list)) { |
---|
| 7204 | + unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); |
---|
| 7205 | + for_each_possible_cpu(cpu) |
---|
| 7206 | + per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE; |
---|
| 7207 | + } |
---|
6503 | 7208 | |
---|
6504 | | - for_each_possible_cpu(cpu) |
---|
6505 | | - per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false; |
---|
| 7209 | + if (type & TRACE_PIDS) |
---|
| 7210 | + rcu_assign_pointer(tr->function_pids, NULL); |
---|
6506 | 7211 | |
---|
6507 | | - rcu_assign_pointer(tr->function_pids, NULL); |
---|
| 7212 | + if (type & TRACE_NO_PIDS) |
---|
| 7213 | + rcu_assign_pointer(tr->function_no_pids, NULL); |
---|
6508 | 7214 | |
---|
6509 | 7215 | /* Wait till all users are no longer using pid filtering */ |
---|
6510 | | - synchronize_sched(); |
---|
| 7216 | + synchronize_rcu(); |
---|
6511 | 7217 | |
---|
6512 | | - trace_free_pid_list(pid_list); |
---|
| 7218 | + if ((type & TRACE_PIDS) && pid_list) |
---|
| 7219 | + trace_free_pid_list(pid_list); |
---|
| 7220 | + |
---|
| 7221 | + if ((type & TRACE_NO_PIDS) && no_pid_list) |
---|
| 7222 | + trace_free_pid_list(no_pid_list); |
---|
6513 | 7223 | } |
---|
6514 | 7224 | |
---|
6515 | 7225 | void ftrace_clear_pids(struct trace_array *tr) |
---|
6516 | 7226 | { |
---|
6517 | 7227 | mutex_lock(&ftrace_lock); |
---|
6518 | 7228 | |
---|
6519 | | - clear_ftrace_pids(tr); |
---|
| 7229 | + clear_ftrace_pids(tr, TRACE_PIDS | TRACE_NO_PIDS); |
---|
6520 | 7230 | |
---|
6521 | 7231 | mutex_unlock(&ftrace_lock); |
---|
6522 | 7232 | } |
---|
6523 | 7233 | |
---|
6524 | | -static void ftrace_pid_reset(struct trace_array *tr) |
---|
| 7234 | +static void ftrace_pid_reset(struct trace_array *tr, int type) |
---|
6525 | 7235 | { |
---|
6526 | 7236 | mutex_lock(&ftrace_lock); |
---|
6527 | | - clear_ftrace_pids(tr); |
---|
| 7237 | + clear_ftrace_pids(tr, type); |
---|
6528 | 7238 | |
---|
6529 | 7239 | ftrace_update_pid_func(); |
---|
6530 | 7240 | ftrace_startup_all(0); |
---|
.. | .. |
---|
6588 | 7298 | .show = fpid_show, |
---|
6589 | 7299 | }; |
---|
6590 | 7300 | |
---|
6591 | | -static int |
---|
6592 | | -ftrace_pid_open(struct inode *inode, struct file *file) |
---|
| 7301 | +static void *fnpid_start(struct seq_file *m, loff_t *pos) |
---|
| 7302 | + __acquires(RCU) |
---|
6593 | 7303 | { |
---|
| 7304 | + struct trace_pid_list *pid_list; |
---|
| 7305 | + struct trace_array *tr = m->private; |
---|
| 7306 | + |
---|
| 7307 | + mutex_lock(&ftrace_lock); |
---|
| 7308 | + rcu_read_lock_sched(); |
---|
| 7309 | + |
---|
| 7310 | + pid_list = rcu_dereference_sched(tr->function_no_pids); |
---|
| 7311 | + |
---|
| 7312 | + if (!pid_list) |
---|
| 7313 | + return !(*pos) ? FTRACE_NO_PIDS : NULL; |
---|
| 7314 | + |
---|
| 7315 | + return trace_pid_start(pid_list, pos); |
---|
| 7316 | +} |
---|
| 7317 | + |
---|
| 7318 | +static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos) |
---|
| 7319 | +{ |
---|
| 7320 | + struct trace_array *tr = m->private; |
---|
| 7321 | + struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids); |
---|
| 7322 | + |
---|
| 7323 | + if (v == FTRACE_NO_PIDS) { |
---|
| 7324 | + (*pos)++; |
---|
| 7325 | + return NULL; |
---|
| 7326 | + } |
---|
| 7327 | + return trace_pid_next(pid_list, v, pos); |
---|
| 7328 | +} |
---|
| 7329 | + |
---|
| 7330 | +static const struct seq_operations ftrace_no_pid_sops = { |
---|
| 7331 | + .start = fnpid_start, |
---|
| 7332 | + .next = fnpid_next, |
---|
| 7333 | + .stop = fpid_stop, |
---|
| 7334 | + .show = fpid_show, |
---|
| 7335 | +}; |
---|
| 7336 | + |
---|
| 7337 | +static int pid_open(struct inode *inode, struct file *file, int type) |
---|
| 7338 | +{ |
---|
| 7339 | + const struct seq_operations *seq_ops; |
---|
6594 | 7340 | struct trace_array *tr = inode->i_private; |
---|
6595 | 7341 | struct seq_file *m; |
---|
6596 | 7342 | int ret = 0; |
---|
6597 | 7343 | |
---|
6598 | | - if (trace_array_get(tr) < 0) |
---|
6599 | | - return -ENODEV; |
---|
| 7344 | + ret = tracing_check_open_get_tr(tr); |
---|
| 7345 | + if (ret) |
---|
| 7346 | + return ret; |
---|
6600 | 7347 | |
---|
6601 | 7348 | if ((file->f_mode & FMODE_WRITE) && |
---|
6602 | 7349 | (file->f_flags & O_TRUNC)) |
---|
6603 | | - ftrace_pid_reset(tr); |
---|
| 7350 | + ftrace_pid_reset(tr, type); |
---|
6604 | 7351 | |
---|
6605 | | - ret = seq_open(file, &ftrace_pid_sops); |
---|
| 7352 | + switch (type) { |
---|
| 7353 | + case TRACE_PIDS: |
---|
| 7354 | + seq_ops = &ftrace_pid_sops; |
---|
| 7355 | + break; |
---|
| 7356 | + case TRACE_NO_PIDS: |
---|
| 7357 | + seq_ops = &ftrace_no_pid_sops; |
---|
| 7358 | + break; |
---|
| 7359 | + default: |
---|
| 7360 | + trace_array_put(tr); |
---|
| 7361 | + WARN_ON_ONCE(1); |
---|
| 7362 | + return -EINVAL; |
---|
| 7363 | + } |
---|
| 7364 | + |
---|
| 7365 | + ret = seq_open(file, seq_ops); |
---|
6606 | 7366 | if (ret < 0) { |
---|
6607 | 7367 | trace_array_put(tr); |
---|
6608 | 7368 | } else { |
---|
.. | .. |
---|
6614 | 7374 | return ret; |
---|
6615 | 7375 | } |
---|
6616 | 7376 | |
---|
| 7377 | +static int |
---|
| 7378 | +ftrace_pid_open(struct inode *inode, struct file *file) |
---|
| 7379 | +{ |
---|
| 7380 | + return pid_open(inode, file, TRACE_PIDS); |
---|
| 7381 | +} |
---|
| 7382 | + |
---|
| 7383 | +static int |
---|
| 7384 | +ftrace_no_pid_open(struct inode *inode, struct file *file) |
---|
| 7385 | +{ |
---|
| 7386 | + return pid_open(inode, file, TRACE_NO_PIDS); |
---|
| 7387 | +} |
---|
| 7388 | + |
---|
6617 | 7389 | static void ignore_task_cpu(void *data) |
---|
6618 | 7390 | { |
---|
6619 | 7391 | struct trace_array *tr = data; |
---|
6620 | 7392 | struct trace_pid_list *pid_list; |
---|
| 7393 | + struct trace_pid_list *no_pid_list; |
---|
6621 | 7394 | |
---|
6622 | 7395 | /* |
---|
6623 | 7396 | * This function is called by on_each_cpu() while the |
---|
.. | .. |
---|
6625 | 7398 | */ |
---|
6626 | 7399 | pid_list = rcu_dereference_protected(tr->function_pids, |
---|
6627 | 7400 | mutex_is_locked(&ftrace_lock)); |
---|
| 7401 | + no_pid_list = rcu_dereference_protected(tr->function_no_pids, |
---|
| 7402 | + mutex_is_locked(&ftrace_lock)); |
---|
6628 | 7403 | |
---|
6629 | | - this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid, |
---|
6630 | | - trace_ignore_this_task(pid_list, current)); |
---|
| 7404 | + if (trace_ignore_this_task(pid_list, no_pid_list, current)) |
---|
| 7405 | + this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, |
---|
| 7406 | + FTRACE_PID_IGNORE); |
---|
| 7407 | + else |
---|
| 7408 | + this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, |
---|
| 7409 | + current->pid); |
---|
6631 | 7410 | } |
---|
6632 | 7411 | |
---|
6633 | 7412 | static ssize_t |
---|
6634 | | -ftrace_pid_write(struct file *filp, const char __user *ubuf, |
---|
6635 | | - size_t cnt, loff_t *ppos) |
---|
| 7413 | +pid_write(struct file *filp, const char __user *ubuf, |
---|
| 7414 | + size_t cnt, loff_t *ppos, int type) |
---|
6636 | 7415 | { |
---|
6637 | 7416 | struct seq_file *m = filp->private_data; |
---|
6638 | 7417 | struct trace_array *tr = m->private; |
---|
6639 | | - struct trace_pid_list *filtered_pids = NULL; |
---|
| 7418 | + struct trace_pid_list *filtered_pids; |
---|
| 7419 | + struct trace_pid_list *other_pids; |
---|
6640 | 7420 | struct trace_pid_list *pid_list; |
---|
6641 | 7421 | ssize_t ret; |
---|
6642 | 7422 | |
---|
.. | .. |
---|
6645 | 7425 | |
---|
6646 | 7426 | mutex_lock(&ftrace_lock); |
---|
6647 | 7427 | |
---|
6648 | | - filtered_pids = rcu_dereference_protected(tr->function_pids, |
---|
| 7428 | + switch (type) { |
---|
| 7429 | + case TRACE_PIDS: |
---|
| 7430 | + filtered_pids = rcu_dereference_protected(tr->function_pids, |
---|
6649 | 7431 | lockdep_is_held(&ftrace_lock)); |
---|
| 7432 | + other_pids = rcu_dereference_protected(tr->function_no_pids, |
---|
| 7433 | + lockdep_is_held(&ftrace_lock)); |
---|
| 7434 | + break; |
---|
| 7435 | + case TRACE_NO_PIDS: |
---|
| 7436 | + filtered_pids = rcu_dereference_protected(tr->function_no_pids, |
---|
| 7437 | + lockdep_is_held(&ftrace_lock)); |
---|
| 7438 | + other_pids = rcu_dereference_protected(tr->function_pids, |
---|
| 7439 | + lockdep_is_held(&ftrace_lock)); |
---|
| 7440 | + break; |
---|
| 7441 | + default: |
---|
| 7442 | + ret = -EINVAL; |
---|
| 7443 | + WARN_ON_ONCE(1); |
---|
| 7444 | + goto out; |
---|
| 7445 | + } |
---|
6650 | 7446 | |
---|
6651 | 7447 | ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt); |
---|
6652 | 7448 | if (ret < 0) |
---|
6653 | 7449 | goto out; |
---|
6654 | 7450 | |
---|
6655 | | - rcu_assign_pointer(tr->function_pids, pid_list); |
---|
| 7451 | + switch (type) { |
---|
| 7452 | + case TRACE_PIDS: |
---|
| 7453 | + rcu_assign_pointer(tr->function_pids, pid_list); |
---|
| 7454 | + break; |
---|
| 7455 | + case TRACE_NO_PIDS: |
---|
| 7456 | + rcu_assign_pointer(tr->function_no_pids, pid_list); |
---|
| 7457 | + break; |
---|
| 7458 | + } |
---|
| 7459 | + |
---|
6656 | 7460 | |
---|
6657 | 7461 | if (filtered_pids) { |
---|
6658 | | - synchronize_sched(); |
---|
| 7462 | + synchronize_rcu(); |
---|
6659 | 7463 | trace_free_pid_list(filtered_pids); |
---|
6660 | | - } else if (pid_list) { |
---|
| 7464 | + } else if (pid_list && !other_pids) { |
---|
6661 | 7465 | /* Register a probe to set whether to ignore the tracing of a task */ |
---|
6662 | 7466 | register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); |
---|
6663 | 7467 | } |
---|
.. | .. |
---|
6680 | 7484 | return ret; |
---|
6681 | 7485 | } |
---|
6682 | 7486 | |
---|
| 7487 | +static ssize_t |
---|
| 7488 | +ftrace_pid_write(struct file *filp, const char __user *ubuf, |
---|
| 7489 | + size_t cnt, loff_t *ppos) |
---|
| 7490 | +{ |
---|
| 7491 | + return pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS); |
---|
| 7492 | +} |
---|
| 7493 | + |
---|
| 7494 | +static ssize_t |
---|
| 7495 | +ftrace_no_pid_write(struct file *filp, const char __user *ubuf, |
---|
| 7496 | + size_t cnt, loff_t *ppos) |
---|
| 7497 | +{ |
---|
| 7498 | + return pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS); |
---|
| 7499 | +} |
---|
| 7500 | + |
---|
6683 | 7501 | static int |
---|
6684 | 7502 | ftrace_pid_release(struct inode *inode, struct file *file) |
---|
6685 | 7503 | { |
---|
.. | .. |
---|
6698 | 7516 | .release = ftrace_pid_release, |
---|
6699 | 7517 | }; |
---|
6700 | 7518 | |
---|
| 7519 | +static const struct file_operations ftrace_no_pid_fops = { |
---|
| 7520 | + .open = ftrace_no_pid_open, |
---|
| 7521 | + .write = ftrace_no_pid_write, |
---|
| 7522 | + .read = seq_read, |
---|
| 7523 | + .llseek = tracing_lseek, |
---|
| 7524 | + .release = ftrace_pid_release, |
---|
| 7525 | +}; |
---|
| 7526 | + |
---|
6701 | 7527 | void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer) |
---|
6702 | 7528 | { |
---|
6703 | 7529 | trace_create_file("set_ftrace_pid", 0644, d_tracer, |
---|
6704 | 7530 | tr, &ftrace_pid_fops); |
---|
| 7531 | + trace_create_file("set_ftrace_notrace_pid", 0644, d_tracer, |
---|
| 7532 | + tr, &ftrace_no_pid_fops); |
---|
6705 | 7533 | } |
---|
6706 | 7534 | |
---|
6707 | 7535 | void __init ftrace_init_tracefs_toplevel(struct trace_array *tr, |
---|
.. | .. |
---|
6781 | 7609 | } |
---|
6782 | 7610 | EXPORT_SYMBOL_GPL(unregister_ftrace_function); |
---|
6783 | 7611 | |
---|
| 7612 | +static bool is_permanent_ops_registered(void) |
---|
| 7613 | +{ |
---|
| 7614 | + struct ftrace_ops *op; |
---|
| 7615 | + |
---|
| 7616 | + do_for_each_ftrace_op(op, ftrace_ops_list) { |
---|
| 7617 | + if (op->flags & FTRACE_OPS_FL_PERMANENT) |
---|
| 7618 | + return true; |
---|
| 7619 | + } while_for_each_ftrace_op(op); |
---|
| 7620 | + |
---|
| 7621 | + return false; |
---|
| 7622 | +} |
---|
| 7623 | + |
---|
6784 | 7624 | int |
---|
6785 | 7625 | ftrace_enable_sysctl(struct ctl_table *table, int write, |
---|
6786 | | - void __user *buffer, size_t *lenp, |
---|
6787 | | - loff_t *ppos) |
---|
| 7626 | + void *buffer, size_t *lenp, loff_t *ppos) |
---|
6788 | 7627 | { |
---|
6789 | 7628 | int ret = -ENODEV; |
---|
6790 | 7629 | |
---|
.. | .. |
---|
6798 | 7637 | if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) |
---|
6799 | 7638 | goto out; |
---|
6800 | 7639 | |
---|
6801 | | - last_ftrace_enabled = !!ftrace_enabled; |
---|
6802 | | - |
---|
6803 | 7640 | if (ftrace_enabled) { |
---|
6804 | 7641 | |
---|
6805 | 7642 | /* we are starting ftrace again */ |
---|
.. | .. |
---|
6810 | 7647 | ftrace_startup_sysctl(); |
---|
6811 | 7648 | |
---|
6812 | 7649 | } else { |
---|
| 7650 | + if (is_permanent_ops_registered()) { |
---|
| 7651 | + ftrace_enabled = true; |
---|
| 7652 | + ret = -EBUSY; |
---|
| 7653 | + goto out; |
---|
| 7654 | + } |
---|
| 7655 | + |
---|
6813 | 7656 | /* stopping ftrace calls (just send to ftrace_stub) */ |
---|
6814 | 7657 | ftrace_trace_function = ftrace_stub; |
---|
6815 | 7658 | |
---|
6816 | 7659 | ftrace_shutdown_sysctl(); |
---|
6817 | 7660 | } |
---|
6818 | 7661 | |
---|
| 7662 | + last_ftrace_enabled = !!ftrace_enabled; |
---|
6819 | 7663 | out: |
---|
6820 | 7664 | mutex_unlock(&ftrace_lock); |
---|
6821 | 7665 | return ret; |
---|
6822 | 7666 | } |
---|
6823 | | - |
---|
6824 | | -#ifdef CONFIG_FUNCTION_GRAPH_TRACER |
---|
6825 | | - |
---|
6826 | | -static struct ftrace_ops graph_ops = { |
---|
6827 | | - .func = ftrace_stub, |
---|
6828 | | - .flags = FTRACE_OPS_FL_RECURSION_SAFE | |
---|
6829 | | - FTRACE_OPS_FL_INITIALIZED | |
---|
6830 | | - FTRACE_OPS_FL_PID | |
---|
6831 | | - FTRACE_OPS_FL_STUB, |
---|
6832 | | -#ifdef FTRACE_GRAPH_TRAMP_ADDR |
---|
6833 | | - .trampoline = FTRACE_GRAPH_TRAMP_ADDR, |
---|
6834 | | - /* trampoline_size is only needed for dynamically allocated tramps */ |
---|
6835 | | -#endif |
---|
6836 | | - ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) |
---|
6837 | | -}; |
---|
6838 | | - |
---|
6839 | | -void ftrace_graph_sleep_time_control(bool enable) |
---|
6840 | | -{ |
---|
6841 | | - fgraph_sleep_time = enable; |
---|
6842 | | -} |
---|
6843 | | - |
---|
6844 | | -void ftrace_graph_graph_time_control(bool enable) |
---|
6845 | | -{ |
---|
6846 | | - fgraph_graph_time = enable; |
---|
6847 | | -} |
---|
6848 | | - |
---|
6849 | | -void ftrace_graph_return_stub(struct ftrace_graph_ret *trace) |
---|
6850 | | -{ |
---|
6851 | | -} |
---|
6852 | | - |
---|
6853 | | -int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) |
---|
6854 | | -{ |
---|
6855 | | - return 0; |
---|
6856 | | -} |
---|
6857 | | - |
---|
6858 | | -/* The callbacks that hook a function */ |
---|
6859 | | -trace_func_graph_ret_t ftrace_graph_return = ftrace_graph_return_stub; |
---|
6860 | | -trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; |
---|
6861 | | -static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub; |
---|
6862 | | - |
---|
6863 | | -/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ |
---|
6864 | | -static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) |
---|
6865 | | -{ |
---|
6866 | | - int i; |
---|
6867 | | - int ret = 0; |
---|
6868 | | - int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE; |
---|
6869 | | - struct task_struct *g, *t; |
---|
6870 | | - |
---|
6871 | | - for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) { |
---|
6872 | | - ret_stack_list[i] = |
---|
6873 | | - kmalloc_array(FTRACE_RETFUNC_DEPTH, |
---|
6874 | | - sizeof(struct ftrace_ret_stack), |
---|
6875 | | - GFP_KERNEL); |
---|
6876 | | - if (!ret_stack_list[i]) { |
---|
6877 | | - start = 0; |
---|
6878 | | - end = i; |
---|
6879 | | - ret = -ENOMEM; |
---|
6880 | | - goto free; |
---|
6881 | | - } |
---|
6882 | | - } |
---|
6883 | | - |
---|
6884 | | - read_lock(&tasklist_lock); |
---|
6885 | | - do_each_thread(g, t) { |
---|
6886 | | - if (start == end) { |
---|
6887 | | - ret = -EAGAIN; |
---|
6888 | | - goto unlock; |
---|
6889 | | - } |
---|
6890 | | - |
---|
6891 | | - if (t->ret_stack == NULL) { |
---|
6892 | | - atomic_set(&t->trace_overrun, 0); |
---|
6893 | | - t->curr_ret_stack = -1; |
---|
6894 | | - t->curr_ret_depth = -1; |
---|
6895 | | - /* Make sure the tasks see the -1 first: */ |
---|
6896 | | - smp_wmb(); |
---|
6897 | | - t->ret_stack = ret_stack_list[start++]; |
---|
6898 | | - } |
---|
6899 | | - } while_each_thread(g, t); |
---|
6900 | | - |
---|
6901 | | -unlock: |
---|
6902 | | - read_unlock(&tasklist_lock); |
---|
6903 | | -free: |
---|
6904 | | - for (i = start; i < end; i++) |
---|
6905 | | - kfree(ret_stack_list[i]); |
---|
6906 | | - return ret; |
---|
6907 | | -} |
---|
6908 | | - |
---|
6909 | | -static void |
---|
6910 | | -ftrace_graph_probe_sched_switch(void *ignore, bool preempt, |
---|
6911 | | - struct task_struct *prev, struct task_struct *next) |
---|
6912 | | -{ |
---|
6913 | | - unsigned long long timestamp; |
---|
6914 | | - int index; |
---|
6915 | | - |
---|
6916 | | - /* |
---|
6917 | | - * Does the user want to count the time a function was asleep. |
---|
6918 | | - * If so, do not update the time stamps. |
---|
6919 | | - */ |
---|
6920 | | - if (fgraph_sleep_time) |
---|
6921 | | - return; |
---|
6922 | | - |
---|
6923 | | - timestamp = trace_clock_local(); |
---|
6924 | | - |
---|
6925 | | - prev->ftrace_timestamp = timestamp; |
---|
6926 | | - |
---|
6927 | | - /* only process tasks that we timestamped */ |
---|
6928 | | - if (!next->ftrace_timestamp) |
---|
6929 | | - return; |
---|
6930 | | - |
---|
6931 | | - /* |
---|
6932 | | - * Update all the counters in next to make up for the |
---|
6933 | | - * time next was sleeping. |
---|
6934 | | - */ |
---|
6935 | | - timestamp -= next->ftrace_timestamp; |
---|
6936 | | - |
---|
6937 | | - for (index = next->curr_ret_stack; index >= 0; index--) |
---|
6938 | | - next->ret_stack[index].calltime += timestamp; |
---|
6939 | | -} |
---|
6940 | | - |
---|
6941 | | -/* Allocate a return stack for each task */ |
---|
6942 | | -static int start_graph_tracing(void) |
---|
6943 | | -{ |
---|
6944 | | - struct ftrace_ret_stack **ret_stack_list; |
---|
6945 | | - int ret, cpu; |
---|
6946 | | - |
---|
6947 | | - ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE, |
---|
6948 | | - sizeof(struct ftrace_ret_stack *), |
---|
6949 | | - GFP_KERNEL); |
---|
6950 | | - |
---|
6951 | | - if (!ret_stack_list) |
---|
6952 | | - return -ENOMEM; |
---|
6953 | | - |
---|
6954 | | - /* The cpu_boot init_task->ret_stack will never be freed */ |
---|
6955 | | - for_each_online_cpu(cpu) { |
---|
6956 | | - if (!idle_task(cpu)->ret_stack) |
---|
6957 | | - ftrace_graph_init_idle_task(idle_task(cpu), cpu); |
---|
6958 | | - } |
---|
6959 | | - |
---|
6960 | | - do { |
---|
6961 | | - ret = alloc_retstack_tasklist(ret_stack_list); |
---|
6962 | | - } while (ret == -EAGAIN); |
---|
6963 | | - |
---|
6964 | | - if (!ret) { |
---|
6965 | | - ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); |
---|
6966 | | - if (ret) |
---|
6967 | | - pr_info("ftrace_graph: Couldn't activate tracepoint" |
---|
6968 | | - " probe to kernel_sched_switch\n"); |
---|
6969 | | - } |
---|
6970 | | - |
---|
6971 | | - kfree(ret_stack_list); |
---|
6972 | | - return ret; |
---|
6973 | | -} |
---|
6974 | | - |
---|
6975 | | -/* |
---|
6976 | | - * Hibernation protection. |
---|
6977 | | - * The state of the current task is too much unstable during |
---|
6978 | | - * suspend/restore to disk. We want to protect against that. |
---|
6979 | | - */ |
---|
6980 | | -static int |
---|
6981 | | -ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state, |
---|
6982 | | - void *unused) |
---|
6983 | | -{ |
---|
6984 | | - switch (state) { |
---|
6985 | | - case PM_HIBERNATION_PREPARE: |
---|
6986 | | - pause_graph_tracing(); |
---|
6987 | | - break; |
---|
6988 | | - |
---|
6989 | | - case PM_POST_HIBERNATION: |
---|
6990 | | - unpause_graph_tracing(); |
---|
6991 | | - break; |
---|
6992 | | - } |
---|
6993 | | - return NOTIFY_DONE; |
---|
6994 | | -} |
---|
6995 | | - |
---|
6996 | | -static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace) |
---|
6997 | | -{ |
---|
6998 | | - if (!ftrace_ops_test(&global_ops, trace->func, NULL)) |
---|
6999 | | - return 0; |
---|
7000 | | - return __ftrace_graph_entry(trace); |
---|
7001 | | -} |
---|
7002 | | - |
---|
7003 | | -/* |
---|
7004 | | - * The function graph tracer should only trace the functions defined |
---|
7005 | | - * by set_ftrace_filter and set_ftrace_notrace. If another function |
---|
7006 | | - * tracer ops is registered, the graph tracer requires testing the |
---|
7007 | | - * function against the global ops, and not just trace any function |
---|
7008 | | - * that any ftrace_ops registered. |
---|
7009 | | - */ |
---|
7010 | | -static void update_function_graph_func(void) |
---|
7011 | | -{ |
---|
7012 | | - struct ftrace_ops *op; |
---|
7013 | | - bool do_test = false; |
---|
7014 | | - |
---|
7015 | | - /* |
---|
7016 | | - * The graph and global ops share the same set of functions |
---|
7017 | | - * to test. If any other ops is on the list, then |
---|
7018 | | - * the graph tracing needs to test if its the function |
---|
7019 | | - * it should call. |
---|
7020 | | - */ |
---|
7021 | | - do_for_each_ftrace_op(op, ftrace_ops_list) { |
---|
7022 | | - if (op != &global_ops && op != &graph_ops && |
---|
7023 | | - op != &ftrace_list_end) { |
---|
7024 | | - do_test = true; |
---|
7025 | | - /* in double loop, break out with goto */ |
---|
7026 | | - goto out; |
---|
7027 | | - } |
---|
7028 | | - } while_for_each_ftrace_op(op); |
---|
7029 | | - out: |
---|
7030 | | - if (do_test) |
---|
7031 | | - ftrace_graph_entry = ftrace_graph_entry_test; |
---|
7032 | | - else |
---|
7033 | | - ftrace_graph_entry = __ftrace_graph_entry; |
---|
7034 | | -} |
---|
7035 | | - |
---|
7036 | | -static struct notifier_block ftrace_suspend_notifier = { |
---|
7037 | | - .notifier_call = ftrace_suspend_notifier_call, |
---|
7038 | | -}; |
---|
7039 | | - |
---|
7040 | | -int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
---|
7041 | | - trace_func_graph_ent_t entryfunc) |
---|
7042 | | -{ |
---|
7043 | | - int ret = 0; |
---|
7044 | | - |
---|
7045 | | - mutex_lock(&ftrace_lock); |
---|
7046 | | - |
---|
7047 | | - /* we currently allow only one tracer registered at a time */ |
---|
7048 | | - if (ftrace_graph_active) { |
---|
7049 | | - ret = -EBUSY; |
---|
7050 | | - goto out; |
---|
7051 | | - } |
---|
7052 | | - |
---|
7053 | | - register_pm_notifier(&ftrace_suspend_notifier); |
---|
7054 | | - |
---|
7055 | | - ftrace_graph_active++; |
---|
7056 | | - ret = start_graph_tracing(); |
---|
7057 | | - if (ret) { |
---|
7058 | | - ftrace_graph_active--; |
---|
7059 | | - goto out; |
---|
7060 | | - } |
---|
7061 | | - |
---|
7062 | | - ftrace_graph_return = retfunc; |
---|
7063 | | - |
---|
7064 | | - /* |
---|
7065 | | - * Update the indirect function to the entryfunc, and the |
---|
7066 | | - * function that gets called to the entry_test first. Then |
---|
7067 | | - * call the update fgraph entry function to determine if |
---|
7068 | | - * the entryfunc should be called directly or not. |
---|
7069 | | - */ |
---|
7070 | | - __ftrace_graph_entry = entryfunc; |
---|
7071 | | - ftrace_graph_entry = ftrace_graph_entry_test; |
---|
7072 | | - update_function_graph_func(); |
---|
7073 | | - |
---|
7074 | | - ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET); |
---|
7075 | | -out: |
---|
7076 | | - mutex_unlock(&ftrace_lock); |
---|
7077 | | - return ret; |
---|
7078 | | -} |
---|
7079 | | - |
---|
7080 | | -void unregister_ftrace_graph(void) |
---|
7081 | | -{ |
---|
7082 | | - mutex_lock(&ftrace_lock); |
---|
7083 | | - |
---|
7084 | | - if (unlikely(!ftrace_graph_active)) |
---|
7085 | | - goto out; |
---|
7086 | | - |
---|
7087 | | - ftrace_graph_active--; |
---|
7088 | | - ftrace_graph_return = ftrace_graph_return_stub; |
---|
7089 | | - ftrace_graph_entry = ftrace_graph_entry_stub; |
---|
7090 | | - __ftrace_graph_entry = ftrace_graph_entry_stub; |
---|
7091 | | - ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET); |
---|
7092 | | - unregister_pm_notifier(&ftrace_suspend_notifier); |
---|
7093 | | - unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); |
---|
7094 | | - |
---|
7095 | | - out: |
---|
7096 | | - mutex_unlock(&ftrace_lock); |
---|
7097 | | -} |
---|
7098 | | - |
---|
7099 | | -static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); |
---|
7100 | | - |
---|
7101 | | -static void |
---|
7102 | | -graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) |
---|
7103 | | -{ |
---|
7104 | | - atomic_set(&t->trace_overrun, 0); |
---|
7105 | | - t->ftrace_timestamp = 0; |
---|
7106 | | - /* make curr_ret_stack visible before we add the ret_stack */ |
---|
7107 | | - smp_wmb(); |
---|
7108 | | - t->ret_stack = ret_stack; |
---|
7109 | | -} |
---|
7110 | | - |
---|
7111 | | -/* |
---|
7112 | | - * Allocate a return stack for the idle task. May be the first |
---|
7113 | | - * time through, or it may be done by CPU hotplug online. |
---|
7114 | | - */ |
---|
7115 | | -void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) |
---|
7116 | | -{ |
---|
7117 | | - t->curr_ret_stack = -1; |
---|
7118 | | - t->curr_ret_depth = -1; |
---|
7119 | | - /* |
---|
7120 | | - * The idle task has no parent, it either has its own |
---|
7121 | | - * stack or no stack at all. |
---|
7122 | | - */ |
---|
7123 | | - if (t->ret_stack) |
---|
7124 | | - WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); |
---|
7125 | | - |
---|
7126 | | - if (ftrace_graph_active) { |
---|
7127 | | - struct ftrace_ret_stack *ret_stack; |
---|
7128 | | - |
---|
7129 | | - ret_stack = per_cpu(idle_ret_stack, cpu); |
---|
7130 | | - if (!ret_stack) { |
---|
7131 | | - ret_stack = |
---|
7132 | | - kmalloc_array(FTRACE_RETFUNC_DEPTH, |
---|
7133 | | - sizeof(struct ftrace_ret_stack), |
---|
7134 | | - GFP_KERNEL); |
---|
7135 | | - if (!ret_stack) |
---|
7136 | | - return; |
---|
7137 | | - per_cpu(idle_ret_stack, cpu) = ret_stack; |
---|
7138 | | - } |
---|
7139 | | - graph_init_task(t, ret_stack); |
---|
7140 | | - } |
---|
7141 | | -} |
---|
7142 | | - |
---|
7143 | | -/* Allocate a return stack for newly created task */ |
---|
7144 | | -void ftrace_graph_init_task(struct task_struct *t) |
---|
7145 | | -{ |
---|
7146 | | - /* Make sure we do not use the parent ret_stack */ |
---|
7147 | | - t->ret_stack = NULL; |
---|
7148 | | - t->curr_ret_stack = -1; |
---|
7149 | | - t->curr_ret_depth = -1; |
---|
7150 | | - |
---|
7151 | | - if (ftrace_graph_active) { |
---|
7152 | | - struct ftrace_ret_stack *ret_stack; |
---|
7153 | | - |
---|
7154 | | - ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH, |
---|
7155 | | - sizeof(struct ftrace_ret_stack), |
---|
7156 | | - GFP_KERNEL); |
---|
7157 | | - if (!ret_stack) |
---|
7158 | | - return; |
---|
7159 | | - graph_init_task(t, ret_stack); |
---|
7160 | | - } |
---|
7161 | | -} |
---|
7162 | | - |
---|
7163 | | -void ftrace_graph_exit_task(struct task_struct *t) |
---|
7164 | | -{ |
---|
7165 | | - struct ftrace_ret_stack *ret_stack = t->ret_stack; |
---|
7166 | | - |
---|
7167 | | - t->ret_stack = NULL; |
---|
7168 | | - /* NULL must become visible to IRQs before we free it: */ |
---|
7169 | | - barrier(); |
---|
7170 | | - |
---|
7171 | | - kfree(ret_stack); |
---|
7172 | | -} |
---|
7173 | | -#endif |
---|