.. | .. |
---|
177 | 177 | int tracing_set_tracer(struct trace_array *tr, const char *buf); |
---|
178 | 178 | static void ftrace_trace_userstack(struct trace_array *tr, |
---|
179 | 179 | struct trace_buffer *buffer, |
---|
180 | | - unsigned int trace_ctx); |
---|
| 180 | + unsigned long flags, int pc); |
---|
181 | 181 | |
---|
182 | 182 | #define MAX_TRACER_SIZE 100 |
---|
183 | 183 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; |
---|
.. | .. |
---|
910 | 910 | |
---|
911 | 911 | #ifdef CONFIG_STACKTRACE |
---|
912 | 912 | static void __ftrace_trace_stack(struct trace_buffer *buffer, |
---|
913 | | - unsigned int trace_ctx, |
---|
914 | | - int skip, struct pt_regs *regs); |
---|
| 913 | + unsigned long flags, |
---|
| 914 | + int skip, int pc, struct pt_regs *regs); |
---|
915 | 915 | static inline void ftrace_trace_stack(struct trace_array *tr, |
---|
916 | 916 | struct trace_buffer *buffer, |
---|
917 | | - unsigned int trace_ctx, |
---|
918 | | - int skip, struct pt_regs *regs); |
---|
| 917 | + unsigned long flags, |
---|
| 918 | + int skip, int pc, struct pt_regs *regs); |
---|
919 | 919 | |
---|
920 | 920 | #else |
---|
921 | 921 | static inline void __ftrace_trace_stack(struct trace_buffer *buffer, |
---|
922 | | - unsigned int trace_ctx, |
---|
923 | | - int skip, struct pt_regs *regs) |
---|
| 922 | + unsigned long flags, |
---|
| 923 | + int skip, int pc, struct pt_regs *regs) |
---|
924 | 924 | { |
---|
925 | 925 | } |
---|
926 | 926 | static inline void ftrace_trace_stack(struct trace_array *tr, |
---|
927 | 927 | struct trace_buffer *buffer, |
---|
928 | | - unsigned long trace_ctx, |
---|
929 | | - int skip, struct pt_regs *regs) |
---|
| 928 | + unsigned long flags, |
---|
| 929 | + int skip, int pc, struct pt_regs *regs) |
---|
930 | 930 | { |
---|
931 | 931 | } |
---|
932 | 932 | |
---|
.. | .. |
---|
934 | 934 | |
---|
935 | 935 | static __always_inline void |
---|
936 | 936 | trace_event_setup(struct ring_buffer_event *event, |
---|
937 | | - int type, unsigned int trace_ctx) |
---|
| 937 | + int type, unsigned long flags, int pc) |
---|
938 | 938 | { |
---|
939 | 939 | struct trace_entry *ent = ring_buffer_event_data(event); |
---|
940 | 940 | |
---|
941 | | - tracing_generic_entry_update(ent, type, trace_ctx); |
---|
| 941 | + tracing_generic_entry_update(ent, type, flags, pc); |
---|
942 | 942 | } |
---|
943 | 943 | |
---|
944 | 944 | static __always_inline struct ring_buffer_event * |
---|
945 | 945 | __trace_buffer_lock_reserve(struct trace_buffer *buffer, |
---|
946 | 946 | int type, |
---|
947 | 947 | unsigned long len, |
---|
948 | | - unsigned int trace_ctx) |
---|
| 948 | + unsigned long flags, int pc) |
---|
949 | 949 | { |
---|
950 | 950 | struct ring_buffer_event *event; |
---|
951 | 951 | |
---|
952 | 952 | event = ring_buffer_lock_reserve(buffer, len); |
---|
953 | 953 | if (event != NULL) |
---|
954 | | - trace_event_setup(event, type, trace_ctx); |
---|
| 954 | + trace_event_setup(event, type, flags, pc); |
---|
955 | 955 | |
---|
956 | 956 | return event; |
---|
957 | 957 | } |
---|
.. | .. |
---|
1012 | 1012 | struct ring_buffer_event *event; |
---|
1013 | 1013 | struct trace_buffer *buffer; |
---|
1014 | 1014 | struct print_entry *entry; |
---|
1015 | | - unsigned int trace_ctx; |
---|
| 1015 | + unsigned long irq_flags; |
---|
1016 | 1016 | int alloc; |
---|
| 1017 | + int pc; |
---|
1017 | 1018 | |
---|
1018 | 1019 | if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) |
---|
1019 | 1020 | return 0; |
---|
| 1021 | + |
---|
| 1022 | + pc = preempt_count(); |
---|
1020 | 1023 | |
---|
1021 | 1024 | if (unlikely(tracing_selftest_running || tracing_disabled)) |
---|
1022 | 1025 | return 0; |
---|
1023 | 1026 | |
---|
1024 | 1027 | alloc = sizeof(*entry) + size + 2; /* possible \n added */ |
---|
1025 | 1028 | |
---|
1026 | | - trace_ctx = tracing_gen_ctx(); |
---|
| 1029 | + local_save_flags(irq_flags); |
---|
1027 | 1030 | buffer = global_trace.array_buffer.buffer; |
---|
1028 | 1031 | ring_buffer_nest_start(buffer); |
---|
1029 | | - event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, |
---|
1030 | | - trace_ctx); |
---|
| 1032 | + event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, |
---|
| 1033 | + irq_flags, pc); |
---|
1031 | 1034 | if (!event) { |
---|
1032 | 1035 | size = 0; |
---|
1033 | 1036 | goto out; |
---|
.. | .. |
---|
1046 | 1049 | entry->buf[size] = '\0'; |
---|
1047 | 1050 | |
---|
1048 | 1051 | __buffer_unlock_commit(buffer, event); |
---|
1049 | | - ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL); |
---|
| 1052 | + ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); |
---|
1050 | 1053 | out: |
---|
1051 | 1054 | ring_buffer_nest_end(buffer); |
---|
1052 | 1055 | return size; |
---|
.. | .. |
---|
1063 | 1066 | struct ring_buffer_event *event; |
---|
1064 | 1067 | struct trace_buffer *buffer; |
---|
1065 | 1068 | struct bputs_entry *entry; |
---|
1066 | | - unsigned int trace_ctx; |
---|
| 1069 | + unsigned long irq_flags; |
---|
1067 | 1070 | int size = sizeof(struct bputs_entry); |
---|
1068 | 1071 | int ret = 0; |
---|
| 1072 | + int pc; |
---|
1069 | 1073 | |
---|
1070 | 1074 | if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) |
---|
1071 | 1075 | return 0; |
---|
1072 | 1076 | |
---|
| 1077 | + pc = preempt_count(); |
---|
| 1078 | + |
---|
1073 | 1079 | if (unlikely(tracing_selftest_running || tracing_disabled)) |
---|
1074 | 1080 | return 0; |
---|
1075 | 1081 | |
---|
1076 | | - trace_ctx = tracing_gen_ctx(); |
---|
| 1082 | + local_save_flags(irq_flags); |
---|
1077 | 1083 | buffer = global_trace.array_buffer.buffer; |
---|
1078 | 1084 | |
---|
1079 | 1085 | ring_buffer_nest_start(buffer); |
---|
1080 | 1086 | event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, |
---|
1081 | | - trace_ctx); |
---|
| 1087 | + irq_flags, pc); |
---|
1082 | 1088 | if (!event) |
---|
1083 | 1089 | goto out; |
---|
1084 | 1090 | |
---|
.. | .. |
---|
1087 | 1093 | entry->str = str; |
---|
1088 | 1094 | |
---|
1089 | 1095 | __buffer_unlock_commit(buffer, event); |
---|
1090 | | - ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL); |
---|
| 1096 | + ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); |
---|
1091 | 1097 | |
---|
1092 | 1098 | ret = 1; |
---|
1093 | 1099 | out: |
---|
.. | .. |
---|
1877 | 1883 | * place on this CPU. We fail to record, but we reset |
---|
1878 | 1884 | * the max trace buffer (no one writes directly to it) |
---|
1879 | 1885 | * and flag that it failed. |
---|
| 1886 | + * Another reason is resize is in progress. |
---|
1880 | 1887 | */ |
---|
1881 | 1888 | trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, |
---|
1882 | | - "Failed to swap buffers due to commit in progress\n"); |
---|
| 1889 | + "Failed to swap buffers due to commit or resize in progress\n"); |
---|
1883 | 1890 | } |
---|
1884 | 1891 | |
---|
1885 | 1892 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); |
---|
.. | .. |
---|
2173 | 2180 | } |
---|
2174 | 2181 | |
---|
2175 | 2182 | /* Must have trace_types_lock held */ |
---|
2176 | | -void tracing_reset_all_online_cpus(void) |
---|
| 2183 | +void tracing_reset_all_online_cpus_unlocked(void) |
---|
2177 | 2184 | { |
---|
2178 | 2185 | struct trace_array *tr; |
---|
| 2186 | + |
---|
| 2187 | + lockdep_assert_held(&trace_types_lock); |
---|
2179 | 2188 | |
---|
2180 | 2189 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
---|
2181 | 2190 | if (!tr->clear_trace) |
---|
.. | .. |
---|
2186 | 2195 | tracing_reset_online_cpus(&tr->max_buffer); |
---|
2187 | 2196 | #endif |
---|
2188 | 2197 | } |
---|
| 2198 | +} |
---|
| 2199 | + |
---|
| 2200 | +void tracing_reset_all_online_cpus(void) |
---|
| 2201 | +{ |
---|
| 2202 | + mutex_lock(&trace_types_lock); |
---|
| 2203 | + tracing_reset_all_online_cpus_unlocked(); |
---|
| 2204 | + mutex_unlock(&trace_types_lock); |
---|
2189 | 2205 | } |
---|
2190 | 2206 | |
---|
2191 | 2207 | /* |
---|
.. | .. |
---|
2608 | 2624 | } |
---|
2609 | 2625 | EXPORT_SYMBOL_GPL(trace_handle_return); |
---|
2610 | 2626 | |
---|
2611 | | -static unsigned short migration_disable_value(void) |
---|
| 2627 | +void |
---|
| 2628 | +tracing_generic_entry_update(struct trace_entry *entry, unsigned short type, |
---|
| 2629 | + unsigned long flags, int pc) |
---|
2612 | 2630 | { |
---|
2613 | | -#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT) |
---|
2614 | | - return current->migration_disabled; |
---|
| 2631 | + struct task_struct *tsk = current; |
---|
| 2632 | + |
---|
| 2633 | + entry->preempt_count = pc & 0xff; |
---|
| 2634 | + entry->pid = (tsk) ? tsk->pid : 0; |
---|
| 2635 | + entry->type = type; |
---|
| 2636 | + entry->flags = |
---|
| 2637 | +#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT |
---|
| 2638 | + (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | |
---|
2615 | 2639 | #else |
---|
2616 | | - return 0; |
---|
| 2640 | + TRACE_FLAG_IRQS_NOSUPPORT | |
---|
2617 | 2641 | #endif |
---|
| 2642 | + ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) | |
---|
| 2643 | + ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | |
---|
| 2644 | + ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | |
---|
| 2645 | + (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | |
---|
| 2646 | + (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); |
---|
2618 | 2647 | } |
---|
2619 | | - |
---|
2620 | | -unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status) |
---|
2621 | | -{ |
---|
2622 | | - unsigned int trace_flags = irqs_status; |
---|
2623 | | - unsigned int pc; |
---|
2624 | | - |
---|
2625 | | - pc = preempt_count(); |
---|
2626 | | - |
---|
2627 | | - if (pc & NMI_MASK) |
---|
2628 | | - trace_flags |= TRACE_FLAG_NMI; |
---|
2629 | | - if (pc & HARDIRQ_MASK) |
---|
2630 | | - trace_flags |= TRACE_FLAG_HARDIRQ; |
---|
2631 | | - if (in_serving_softirq()) |
---|
2632 | | - trace_flags |= TRACE_FLAG_SOFTIRQ; |
---|
2633 | | - |
---|
2634 | | - if (tif_need_resched()) |
---|
2635 | | - trace_flags |= TRACE_FLAG_NEED_RESCHED; |
---|
2636 | | - if (test_preempt_need_resched()) |
---|
2637 | | - trace_flags |= TRACE_FLAG_PREEMPT_RESCHED; |
---|
2638 | | - |
---|
2639 | | -#ifdef CONFIG_PREEMPT_LAZY |
---|
2640 | | - if (need_resched_lazy()) |
---|
2641 | | - trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY; |
---|
2642 | | -#endif |
---|
2643 | | - |
---|
2644 | | - return (pc & 0xff) | |
---|
2645 | | - (migration_disable_value() & 0xff) << 8 | |
---|
2646 | | - (preempt_lazy_count() & 0xff) << 16 | |
---|
2647 | | - (trace_flags << 24); |
---|
2648 | | -} |
---|
| 2648 | +EXPORT_SYMBOL_GPL(tracing_generic_entry_update); |
---|
2649 | 2649 | |
---|
2650 | 2650 | struct ring_buffer_event * |
---|
2651 | 2651 | trace_buffer_lock_reserve(struct trace_buffer *buffer, |
---|
2652 | 2652 | int type, |
---|
2653 | 2653 | unsigned long len, |
---|
2654 | | - unsigned int trace_ctx) |
---|
| 2654 | + unsigned long flags, int pc) |
---|
2655 | 2655 | { |
---|
2656 | | - return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx); |
---|
| 2656 | + return __trace_buffer_lock_reserve(buffer, type, len, flags, pc); |
---|
2657 | 2657 | } |
---|
2658 | 2658 | |
---|
2659 | 2659 | DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); |
---|
.. | .. |
---|
2773 | 2773 | trace_event_buffer_lock_reserve(struct trace_buffer **current_rb, |
---|
2774 | 2774 | struct trace_event_file *trace_file, |
---|
2775 | 2775 | int type, unsigned long len, |
---|
2776 | | - unsigned int trace_ctx) |
---|
| 2776 | + unsigned long flags, int pc) |
---|
2777 | 2777 | { |
---|
2778 | 2778 | struct ring_buffer_event *entry; |
---|
2779 | 2779 | int val; |
---|
.. | .. |
---|
2786 | 2786 | /* Try to use the per cpu buffer first */ |
---|
2787 | 2787 | val = this_cpu_inc_return(trace_buffered_event_cnt); |
---|
2788 | 2788 | if ((len < (PAGE_SIZE - sizeof(*entry) - sizeof(entry->array[0]))) && val == 1) { |
---|
2789 | | - trace_event_setup(entry, type, trace_ctx); |
---|
| 2789 | + trace_event_setup(entry, type, flags, pc); |
---|
2790 | 2790 | entry->array[0] = len; |
---|
2791 | 2791 | return entry; |
---|
2792 | 2792 | } |
---|
.. | .. |
---|
2794 | 2794 | } |
---|
2795 | 2795 | |
---|
2796 | 2796 | entry = __trace_buffer_lock_reserve(*current_rb, |
---|
2797 | | - type, len, trace_ctx); |
---|
| 2797 | + type, len, flags, pc); |
---|
2798 | 2798 | /* |
---|
2799 | 2799 | * If tracing is off, but we have triggers enabled |
---|
2800 | 2800 | * we still need to look at the event data. Use the temp_buffer |
---|
.. | .. |
---|
2803 | 2803 | */ |
---|
2804 | 2804 | if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { |
---|
2805 | 2805 | *current_rb = temp_buffer; |
---|
2806 | | - entry = __trace_buffer_lock_reserve(*current_rb, type, len, |
---|
2807 | | - trace_ctx); |
---|
| 2806 | + entry = __trace_buffer_lock_reserve(*current_rb, |
---|
| 2807 | + type, len, flags, pc); |
---|
2808 | 2808 | } |
---|
2809 | 2809 | return entry; |
---|
2810 | 2810 | } |
---|
.. | .. |
---|
2890 | 2890 | ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT); |
---|
2891 | 2891 | event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer, |
---|
2892 | 2892 | fbuffer->event, fbuffer->entry, |
---|
2893 | | - fbuffer->trace_ctx, fbuffer->regs); |
---|
| 2893 | + fbuffer->flags, fbuffer->pc, fbuffer->regs); |
---|
2894 | 2894 | } |
---|
2895 | 2895 | EXPORT_SYMBOL_GPL(trace_event_buffer_commit); |
---|
2896 | 2896 | |
---|
.. | .. |
---|
2906 | 2906 | void trace_buffer_unlock_commit_regs(struct trace_array *tr, |
---|
2907 | 2907 | struct trace_buffer *buffer, |
---|
2908 | 2908 | struct ring_buffer_event *event, |
---|
2909 | | - unsigned int trace_ctx, |
---|
| 2909 | + unsigned long flags, int pc, |
---|
2910 | 2910 | struct pt_regs *regs) |
---|
2911 | 2911 | { |
---|
2912 | 2912 | __buffer_unlock_commit(buffer, event); |
---|
.. | .. |
---|
2917 | 2917 | * and mmiotrace, but that's ok if they lose a function or |
---|
2918 | 2918 | * two. They are not that meaningful. |
---|
2919 | 2919 | */ |
---|
2920 | | - ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs); |
---|
2921 | | - ftrace_trace_userstack(tr, buffer, trace_ctx); |
---|
| 2920 | + ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs); |
---|
| 2921 | + ftrace_trace_userstack(tr, buffer, flags, pc); |
---|
2922 | 2922 | } |
---|
2923 | 2923 | |
---|
2924 | 2924 | /* |
---|
.. | .. |
---|
2932 | 2932 | } |
---|
2933 | 2933 | |
---|
2934 | 2934 | void |
---|
2935 | | -trace_function(struct trace_array *tr, unsigned long ip, unsigned long |
---|
2936 | | - parent_ip, unsigned int trace_ctx) |
---|
| 2935 | +trace_function(struct trace_array *tr, |
---|
| 2936 | + unsigned long ip, unsigned long parent_ip, unsigned long flags, |
---|
| 2937 | + int pc) |
---|
2937 | 2938 | { |
---|
2938 | 2939 | struct trace_event_call *call = &event_function; |
---|
2939 | 2940 | struct trace_buffer *buffer = tr->array_buffer.buffer; |
---|
.. | .. |
---|
2941 | 2942 | struct ftrace_entry *entry; |
---|
2942 | 2943 | |
---|
2943 | 2944 | event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), |
---|
2944 | | - trace_ctx); |
---|
| 2945 | + flags, pc); |
---|
2945 | 2946 | if (!event) |
---|
2946 | 2947 | return; |
---|
2947 | 2948 | entry = ring_buffer_event_data(event); |
---|
.. | .. |
---|
2975 | 2976 | static DEFINE_PER_CPU(int, ftrace_stack_reserve); |
---|
2976 | 2977 | |
---|
2977 | 2978 | static void __ftrace_trace_stack(struct trace_buffer *buffer, |
---|
2978 | | - unsigned int trace_ctx, |
---|
2979 | | - int skip, struct pt_regs *regs) |
---|
| 2979 | + unsigned long flags, |
---|
| 2980 | + int skip, int pc, struct pt_regs *regs) |
---|
2980 | 2981 | { |
---|
2981 | 2982 | struct trace_event_call *call = &event_kernel_stack; |
---|
2982 | 2983 | struct ring_buffer_event *event; |
---|
.. | .. |
---|
3024 | 3025 | size = nr_entries * sizeof(unsigned long); |
---|
3025 | 3026 | event = __trace_buffer_lock_reserve(buffer, TRACE_STACK, |
---|
3026 | 3027 | (sizeof(*entry) - sizeof(entry->caller)) + size, |
---|
3027 | | - trace_ctx); |
---|
| 3028 | + flags, pc); |
---|
3028 | 3029 | if (!event) |
---|
3029 | 3030 | goto out; |
---|
3030 | 3031 | entry = ring_buffer_event_data(event); |
---|
.. | .. |
---|
3045 | 3046 | |
---|
3046 | 3047 | static inline void ftrace_trace_stack(struct trace_array *tr, |
---|
3047 | 3048 | struct trace_buffer *buffer, |
---|
3048 | | - unsigned int trace_ctx, |
---|
3049 | | - int skip, struct pt_regs *regs) |
---|
| 3049 | + unsigned long flags, |
---|
| 3050 | + int skip, int pc, struct pt_regs *regs) |
---|
3050 | 3051 | { |
---|
3051 | 3052 | if (!(tr->trace_flags & TRACE_ITER_STACKTRACE)) |
---|
3052 | 3053 | return; |
---|
3053 | 3054 | |
---|
3054 | | - __ftrace_trace_stack(buffer, trace_ctx, skip, regs); |
---|
| 3055 | + __ftrace_trace_stack(buffer, flags, skip, pc, regs); |
---|
3055 | 3056 | } |
---|
3056 | 3057 | |
---|
3057 | | -void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, |
---|
3058 | | - int skip) |
---|
| 3058 | +void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, |
---|
| 3059 | + int pc) |
---|
3059 | 3060 | { |
---|
3060 | 3061 | struct trace_buffer *buffer = tr->array_buffer.buffer; |
---|
3061 | 3062 | |
---|
3062 | 3063 | if (rcu_is_watching()) { |
---|
3063 | | - __ftrace_trace_stack(buffer, trace_ctx, skip, NULL); |
---|
| 3064 | + __ftrace_trace_stack(buffer, flags, skip, pc, NULL); |
---|
3064 | 3065 | return; |
---|
3065 | 3066 | } |
---|
3066 | 3067 | |
---|
.. | .. |
---|
3074 | 3075 | return; |
---|
3075 | 3076 | |
---|
3076 | 3077 | rcu_irq_enter_irqson(); |
---|
3077 | | - __ftrace_trace_stack(buffer, trace_ctx, skip, NULL); |
---|
| 3078 | + __ftrace_trace_stack(buffer, flags, skip, pc, NULL); |
---|
3078 | 3079 | rcu_irq_exit_irqson(); |
---|
3079 | 3080 | } |
---|
3080 | 3081 | |
---|
.. | .. |
---|
3084 | 3085 | */ |
---|
3085 | 3086 | void trace_dump_stack(int skip) |
---|
3086 | 3087 | { |
---|
| 3088 | + unsigned long flags; |
---|
| 3089 | + |
---|
3087 | 3090 | if (tracing_disabled || tracing_selftest_running) |
---|
3088 | 3091 | return; |
---|
| 3092 | + |
---|
| 3093 | + local_save_flags(flags); |
---|
3089 | 3094 | |
---|
3090 | 3095 | #ifndef CONFIG_UNWINDER_ORC |
---|
3091 | 3096 | /* Skip 1 to skip this function. */ |
---|
3092 | 3097 | skip++; |
---|
3093 | 3098 | #endif |
---|
3094 | 3099 | __ftrace_trace_stack(global_trace.array_buffer.buffer, |
---|
3095 | | - tracing_gen_ctx(), skip, NULL); |
---|
| 3100 | + flags, skip, preempt_count(), NULL); |
---|
3096 | 3101 | } |
---|
3097 | 3102 | EXPORT_SYMBOL_GPL(trace_dump_stack); |
---|
3098 | 3103 | |
---|
.. | .. |
---|
3101 | 3106 | |
---|
3102 | 3107 | static void |
---|
3103 | 3108 | ftrace_trace_userstack(struct trace_array *tr, |
---|
3104 | | - struct trace_buffer *buffer, unsigned int trace_ctx) |
---|
| 3109 | + struct trace_buffer *buffer, unsigned long flags, int pc) |
---|
3105 | 3110 | { |
---|
3106 | 3111 | struct trace_event_call *call = &event_user_stack; |
---|
3107 | 3112 | struct ring_buffer_event *event; |
---|
.. | .. |
---|
3128 | 3133 | __this_cpu_inc(user_stack_count); |
---|
3129 | 3134 | |
---|
3130 | 3135 | event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, |
---|
3131 | | - sizeof(*entry), trace_ctx); |
---|
| 3136 | + sizeof(*entry), flags, pc); |
---|
3132 | 3137 | if (!event) |
---|
3133 | 3138 | goto out_drop_count; |
---|
3134 | 3139 | entry = ring_buffer_event_data(event); |
---|
.. | .. |
---|
3148 | 3153 | #else /* CONFIG_USER_STACKTRACE_SUPPORT */ |
---|
3149 | 3154 | static void ftrace_trace_userstack(struct trace_array *tr, |
---|
3150 | 3155 | struct trace_buffer *buffer, |
---|
3151 | | - unsigned int trace_ctx) |
---|
| 3156 | + unsigned long flags, int pc) |
---|
3152 | 3157 | { |
---|
3153 | 3158 | } |
---|
3154 | 3159 | #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */ |
---|
.. | .. |
---|
3278 | 3283 | struct trace_buffer *buffer; |
---|
3279 | 3284 | struct trace_array *tr = &global_trace; |
---|
3280 | 3285 | struct bprint_entry *entry; |
---|
3281 | | - unsigned int trace_ctx; |
---|
| 3286 | + unsigned long flags; |
---|
3282 | 3287 | char *tbuffer; |
---|
3283 | | - int len = 0, size; |
---|
| 3288 | + int len = 0, size, pc; |
---|
3284 | 3289 | |
---|
3285 | 3290 | if (unlikely(tracing_selftest_running || tracing_disabled)) |
---|
3286 | 3291 | return 0; |
---|
.. | .. |
---|
3288 | 3293 | /* Don't pollute graph traces with trace_vprintk internals */ |
---|
3289 | 3294 | pause_graph_tracing(); |
---|
3290 | 3295 | |
---|
3291 | | - trace_ctx = tracing_gen_ctx(); |
---|
| 3296 | + pc = preempt_count(); |
---|
3292 | 3297 | preempt_disable_notrace(); |
---|
3293 | 3298 | |
---|
3294 | 3299 | tbuffer = get_trace_buf(); |
---|
.. | .. |
---|
3302 | 3307 | if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) |
---|
3303 | 3308 | goto out_put; |
---|
3304 | 3309 | |
---|
| 3310 | + local_save_flags(flags); |
---|
3305 | 3311 | size = sizeof(*entry) + sizeof(u32) * len; |
---|
3306 | 3312 | buffer = tr->array_buffer.buffer; |
---|
3307 | 3313 | ring_buffer_nest_start(buffer); |
---|
3308 | 3314 | event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, |
---|
3309 | | - trace_ctx); |
---|
| 3315 | + flags, pc); |
---|
3310 | 3316 | if (!event) |
---|
3311 | 3317 | goto out; |
---|
3312 | 3318 | entry = ring_buffer_event_data(event); |
---|
.. | .. |
---|
3316 | 3322 | memcpy(entry->buf, tbuffer, sizeof(u32) * len); |
---|
3317 | 3323 | if (!call_filter_check_discard(call, entry, buffer, event)) { |
---|
3318 | 3324 | __buffer_unlock_commit(buffer, event); |
---|
3319 | | - ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL); |
---|
| 3325 | + ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL); |
---|
3320 | 3326 | } |
---|
3321 | 3327 | |
---|
3322 | 3328 | out: |
---|
.. | .. |
---|
3339 | 3345 | { |
---|
3340 | 3346 | struct trace_event_call *call = &event_print; |
---|
3341 | 3347 | struct ring_buffer_event *event; |
---|
3342 | | - int len = 0, size; |
---|
| 3348 | + int len = 0, size, pc; |
---|
3343 | 3349 | struct print_entry *entry; |
---|
3344 | | - unsigned int trace_ctx; |
---|
| 3350 | + unsigned long flags; |
---|
3345 | 3351 | char *tbuffer; |
---|
3346 | 3352 | |
---|
3347 | 3353 | if (tracing_disabled || tracing_selftest_running) |
---|
.. | .. |
---|
3350 | 3356 | /* Don't pollute graph traces with trace_vprintk internals */ |
---|
3351 | 3357 | pause_graph_tracing(); |
---|
3352 | 3358 | |
---|
3353 | | - trace_ctx = tracing_gen_ctx(); |
---|
| 3359 | + pc = preempt_count(); |
---|
3354 | 3360 | preempt_disable_notrace(); |
---|
3355 | 3361 | |
---|
3356 | 3362 | |
---|
.. | .. |
---|
3362 | 3368 | |
---|
3363 | 3369 | len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); |
---|
3364 | 3370 | |
---|
| 3371 | + local_save_flags(flags); |
---|
3365 | 3372 | size = sizeof(*entry) + len + 1; |
---|
3366 | 3373 | ring_buffer_nest_start(buffer); |
---|
3367 | 3374 | event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, |
---|
3368 | | - trace_ctx); |
---|
| 3375 | + flags, pc); |
---|
3369 | 3376 | if (!event) |
---|
3370 | 3377 | goto out; |
---|
3371 | 3378 | entry = ring_buffer_event_data(event); |
---|
.. | .. |
---|
3374 | 3381 | memcpy(&entry->buf, tbuffer, len + 1); |
---|
3375 | 3382 | if (!call_filter_check_discard(call, entry, buffer, event)) { |
---|
3376 | 3383 | __buffer_unlock_commit(buffer, event); |
---|
3377 | | - ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL); |
---|
| 3384 | + ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL); |
---|
3378 | 3385 | } |
---|
3379 | 3386 | |
---|
3380 | 3387 | out: |
---|
.. | .. |
---|
3720 | 3727 | * will point to the same string as current_trace->name. |
---|
3721 | 3728 | */ |
---|
3722 | 3729 | mutex_lock(&trace_types_lock); |
---|
3723 | | - if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) |
---|
| 3730 | + if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) { |
---|
| 3731 | + /* Close iter->trace before switching to the new current tracer */ |
---|
| 3732 | + if (iter->trace->close) |
---|
| 3733 | + iter->trace->close(iter); |
---|
3724 | 3734 | *iter->trace = *tr->current_trace; |
---|
| 3735 | + /* Reopen the new current tracer */ |
---|
| 3736 | + if (iter->trace->open) |
---|
| 3737 | + iter->trace->open(iter); |
---|
| 3738 | + } |
---|
3725 | 3739 | mutex_unlock(&trace_types_lock); |
---|
3726 | 3740 | |
---|
3727 | 3741 | #ifdef CONFIG_TRACER_MAX_TRACE |
---|
.. | .. |
---|
3840 | 3854 | |
---|
3841 | 3855 | static void print_lat_help_header(struct seq_file *m) |
---|
3842 | 3856 | { |
---|
3843 | | - seq_puts(m, "# _--------=> CPU# \n" |
---|
3844 | | - "# / _-------=> irqs-off \n" |
---|
3845 | | - "# | / _------=> need-resched \n" |
---|
3846 | | - "# || / _-----=> need-resched-lazy\n" |
---|
3847 | | - "# ||| / _----=> hardirq/softirq \n" |
---|
3848 | | - "# |||| / _---=> preempt-depth \n" |
---|
3849 | | - "# ||||| / _--=> preempt-lazy-depth\n" |
---|
3850 | | - "# |||||| / _-=> migrate-disable \n" |
---|
3851 | | - "# ||||||| / delay \n" |
---|
3852 | | - "# cmd pid |||||||| time | caller \n" |
---|
3853 | | - "# \\ / |||||||| \\ | / \n"); |
---|
| 3857 | + seq_puts(m, "# _------=> CPU# \n" |
---|
| 3858 | + "# / _-----=> irqs-off \n" |
---|
| 3859 | + "# | / _----=> need-resched \n" |
---|
| 3860 | + "# || / _---=> hardirq/softirq \n" |
---|
| 3861 | + "# ||| / _--=> preempt-depth \n" |
---|
| 3862 | + "# |||| / delay \n" |
---|
| 3863 | + "# cmd pid ||||| time | caller \n" |
---|
| 3864 | + "# \\ / ||||| \\ | / \n"); |
---|
3854 | 3865 | } |
---|
3855 | 3866 | |
---|
3856 | 3867 | static void print_event_info(struct array_buffer *buf, struct seq_file *m) |
---|
.. | .. |
---|
3884 | 3895 | |
---|
3885 | 3896 | print_event_info(buf, m); |
---|
3886 | 3897 | |
---|
3887 | | - seq_printf(m, "# %.*s _-------=> irqs-off\n", prec, space); |
---|
3888 | | - seq_printf(m, "# %.*s / _------=> need-resched\n", prec, space); |
---|
3889 | | - seq_printf(m, "# %.*s| / _-----=> need-resched-lazy\n", prec, space); |
---|
3890 | | - seq_printf(m, "# %.*s|| / _----=> hardirq/softirq\n", prec, space); |
---|
3891 | | - seq_printf(m, "# %.*s||| / _---=> preempt-depth\n", prec, space); |
---|
3892 | | - seq_printf(m, "# %.*s|||| / _--=> preempt-lazy-depth\n", prec, space); |
---|
3893 | | - seq_printf(m, "# %.*s||||| / _-=> migrate-disable\n", prec, space); |
---|
3894 | | - seq_printf(m, "# %.*s|||||| / delay\n", prec, space); |
---|
3895 | | - seq_printf(m, "# TASK-PID %.*s CPU# ||||||| TIMESTAMP FUNCTION\n", prec, " TGID "); |
---|
3896 | | - seq_printf(m, "# | | %.*s | ||||||| | |\n", prec, " | "); |
---|
| 3898 | + seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space); |
---|
| 3899 | + seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space); |
---|
| 3900 | + seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space); |
---|
| 3901 | + seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space); |
---|
| 3902 | + seq_printf(m, "# %.*s||| / delay\n", prec, space); |
---|
| 3903 | + seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID "); |
---|
| 3904 | + seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | "); |
---|
3897 | 3905 | } |
---|
3898 | 3906 | |
---|
3899 | 3907 | void |
---|
.. | .. |
---|
4486 | 4494 | return 0; |
---|
4487 | 4495 | } |
---|
4488 | 4496 | |
---|
| 4497 | +/* |
---|
| 4498 | + * The private pointer of the inode is the trace_event_file. |
---|
| 4499 | + * Update the tr ref count associated to it. |
---|
| 4500 | + */ |
---|
| 4501 | +int tracing_open_file_tr(struct inode *inode, struct file *filp) |
---|
| 4502 | +{ |
---|
| 4503 | + struct trace_event_file *file = inode->i_private; |
---|
| 4504 | + int ret; |
---|
| 4505 | + |
---|
| 4506 | + ret = tracing_check_open_get_tr(file->tr); |
---|
| 4507 | + if (ret) |
---|
| 4508 | + return ret; |
---|
| 4509 | + |
---|
| 4510 | + filp->private_data = inode->i_private; |
---|
| 4511 | + |
---|
| 4512 | + return 0; |
---|
| 4513 | +} |
---|
| 4514 | + |
---|
| 4515 | +int tracing_release_file_tr(struct inode *inode, struct file *filp) |
---|
| 4516 | +{ |
---|
| 4517 | + struct trace_event_file *file = inode->i_private; |
---|
| 4518 | + |
---|
| 4519 | + trace_array_put(file->tr); |
---|
| 4520 | + |
---|
| 4521 | + return 0; |
---|
| 4522 | +} |
---|
| 4523 | + |
---|
4489 | 4524 | static int tracing_release(struct inode *inode, struct file *file) |
---|
4490 | 4525 | { |
---|
4491 | 4526 | struct trace_array *tr = inode->i_private; |
---|
.. | .. |
---|
4715 | 4750 | static const struct file_operations tracing_fops = { |
---|
4716 | 4751 | .open = tracing_open, |
---|
4717 | 4752 | .read = seq_read, |
---|
| 4753 | + .read_iter = seq_read_iter, |
---|
| 4754 | + .splice_read = generic_file_splice_read, |
---|
4718 | 4755 | .write = tracing_write_stub, |
---|
4719 | 4756 | .llseek = tracing_lseek, |
---|
4720 | 4757 | .release = tracing_release, |
---|
.. | .. |
---|
4774 | 4811 | !cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
---|
4775 | 4812 | atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); |
---|
4776 | 4813 | ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu); |
---|
| 4814 | +#ifdef CONFIG_TRACER_MAX_TRACE |
---|
| 4815 | + ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu); |
---|
| 4816 | +#endif |
---|
4777 | 4817 | } |
---|
4778 | 4818 | if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && |
---|
4779 | 4819 | cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
---|
4780 | 4820 | atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); |
---|
4781 | 4821 | ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu); |
---|
| 4822 | +#ifdef CONFIG_TRACER_MAX_TRACE |
---|
| 4823 | + ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu); |
---|
| 4824 | +#endif |
---|
4782 | 4825 | } |
---|
4783 | 4826 | } |
---|
4784 | 4827 | arch_spin_unlock(&tr->max_lock); |
---|
.. | .. |
---|
6249 | 6292 | mutex_unlock(&trace_types_lock); |
---|
6250 | 6293 | |
---|
6251 | 6294 | free_cpumask_var(iter->started); |
---|
| 6295 | + kfree(iter->temp); |
---|
6252 | 6296 | mutex_destroy(&iter->mutex); |
---|
6253 | 6297 | kfree(iter); |
---|
6254 | 6298 | |
---|
.. | .. |
---|
6381 | 6425 | |
---|
6382 | 6426 | ret = print_trace_line(iter); |
---|
6383 | 6427 | if (ret == TRACE_TYPE_PARTIAL_LINE) { |
---|
6384 | | - /* don't print partial lines */ |
---|
| 6428 | + /* |
---|
| 6429 | + * If one print_trace_line() fills entire trace_seq in one shot, |
---|
| 6430 | + * trace_seq_to_user() will returns -EBUSY because save_len == 0, |
---|
| 6431 | + * In this case, we need to consume it, otherwise, loop will peek |
---|
| 6432 | + * this event next time, resulting in an infinite loop. |
---|
| 6433 | + */ |
---|
| 6434 | + if (save_len == 0) { |
---|
| 6435 | + iter->seq.full = 0; |
---|
| 6436 | + trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n"); |
---|
| 6437 | + trace_consume(iter); |
---|
| 6438 | + break; |
---|
| 6439 | + } |
---|
| 6440 | + |
---|
| 6441 | + /* In other cases, don't print partial lines */ |
---|
6385 | 6442 | iter->seq.seq.len = save_len; |
---|
6386 | 6443 | break; |
---|
6387 | 6444 | } |
---|
.. | .. |
---|
6698 | 6755 | enum event_trigger_type tt = ETT_NONE; |
---|
6699 | 6756 | struct trace_buffer *buffer; |
---|
6700 | 6757 | struct print_entry *entry; |
---|
| 6758 | + unsigned long irq_flags; |
---|
6701 | 6759 | ssize_t written; |
---|
6702 | 6760 | int size; |
---|
6703 | 6761 | int len; |
---|
.. | .. |
---|
6717 | 6775 | |
---|
6718 | 6776 | BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); |
---|
6719 | 6777 | |
---|
| 6778 | + local_save_flags(irq_flags); |
---|
6720 | 6779 | size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */ |
---|
6721 | 6780 | |
---|
6722 | 6781 | /* If less than "<faulted>", then make sure we can still add that */ |
---|
.. | .. |
---|
6725 | 6784 | |
---|
6726 | 6785 | buffer = tr->array_buffer.buffer; |
---|
6727 | 6786 | event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, |
---|
6728 | | - tracing_gen_ctx()); |
---|
| 6787 | + irq_flags, preempt_count()); |
---|
6729 | 6788 | if (unlikely(!event)) |
---|
6730 | 6789 | /* Ring buffer disabled, return as if not open for write */ |
---|
6731 | 6790 | return -EBADF; |
---|
.. | .. |
---|
6777 | 6836 | struct ring_buffer_event *event; |
---|
6778 | 6837 | struct trace_buffer *buffer; |
---|
6779 | 6838 | struct raw_data_entry *entry; |
---|
| 6839 | + unsigned long irq_flags; |
---|
6780 | 6840 | ssize_t written; |
---|
6781 | 6841 | int size; |
---|
6782 | 6842 | int len; |
---|
.. | .. |
---|
6798 | 6858 | |
---|
6799 | 6859 | BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); |
---|
6800 | 6860 | |
---|
| 6861 | + local_save_flags(irq_flags); |
---|
6801 | 6862 | size = sizeof(*entry) + cnt; |
---|
6802 | 6863 | if (cnt < FAULT_SIZE_ID) |
---|
6803 | 6864 | size += FAULT_SIZE_ID - cnt; |
---|
6804 | 6865 | |
---|
6805 | 6866 | buffer = tr->array_buffer.buffer; |
---|
6806 | 6867 | event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size, |
---|
6807 | | - tracing_gen_ctx()); |
---|
| 6868 | + irq_flags, preempt_count()); |
---|
6808 | 6869 | if (!event) |
---|
6809 | 6870 | /* Ring buffer disabled, return as if not open for write */ |
---|
6810 | 6871 | return -EBADF; |
---|
.. | .. |
---|
7032 | 7093 | return ret; |
---|
7033 | 7094 | } |
---|
7034 | 7095 | |
---|
| 7096 | +static void tracing_swap_cpu_buffer(void *tr) |
---|
| 7097 | +{ |
---|
| 7098 | + update_max_tr_single((struct trace_array *)tr, current, smp_processor_id()); |
---|
| 7099 | +} |
---|
| 7100 | + |
---|
7035 | 7101 | static ssize_t |
---|
7036 | 7102 | tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, |
---|
7037 | 7103 | loff_t *ppos) |
---|
.. | .. |
---|
7090 | 7156 | ret = tracing_alloc_snapshot_instance(tr); |
---|
7091 | 7157 | if (ret < 0) |
---|
7092 | 7158 | break; |
---|
7093 | | - local_irq_disable(); |
---|
7094 | 7159 | /* Now, we're going to swap */ |
---|
7095 | | - if (iter->cpu_file == RING_BUFFER_ALL_CPUS) |
---|
| 7160 | + if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { |
---|
| 7161 | + local_irq_disable(); |
---|
7096 | 7162 | update_max_tr(tr, current, smp_processor_id(), NULL); |
---|
7097 | | - else |
---|
7098 | | - update_max_tr_single(tr, current, iter->cpu_file); |
---|
7099 | | - local_irq_enable(); |
---|
| 7163 | + local_irq_enable(); |
---|
| 7164 | + } else { |
---|
| 7165 | + smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer, |
---|
| 7166 | + (void *)tr, 1); |
---|
| 7167 | + } |
---|
7100 | 7168 | break; |
---|
7101 | 7169 | default: |
---|
7102 | 7170 | if (tr->allocated_snapshot) { |
---|
.. | .. |
---|
7185 | 7253 | #endif |
---|
7186 | 7254 | |
---|
7187 | 7255 | static const struct file_operations set_tracer_fops = { |
---|
7188 | | - .open = tracing_open_generic, |
---|
| 7256 | + .open = tracing_open_generic_tr, |
---|
7189 | 7257 | .read = tracing_set_trace_read, |
---|
7190 | 7258 | .write = tracing_set_trace_write, |
---|
7191 | 7259 | .llseek = generic_file_llseek, |
---|
| 7260 | + .release = tracing_release_generic_tr, |
---|
7192 | 7261 | }; |
---|
7193 | 7262 | |
---|
7194 | 7263 | static const struct file_operations tracing_pipe_fops = { |
---|
.. | .. |
---|
7511 | 7580 | .open = tracing_err_log_open, |
---|
7512 | 7581 | .write = tracing_err_log_write, |
---|
7513 | 7582 | .read = seq_read, |
---|
7514 | | - .llseek = seq_lseek, |
---|
| 7583 | + .llseek = tracing_lseek, |
---|
7515 | 7584 | .release = tracing_err_log_release, |
---|
7516 | 7585 | }; |
---|
7517 | 7586 | |
---|
.. | .. |
---|
8227 | 8296 | return cnt; |
---|
8228 | 8297 | } |
---|
8229 | 8298 | |
---|
| 8299 | +static int tracing_open_options(struct inode *inode, struct file *filp) |
---|
| 8300 | +{ |
---|
| 8301 | + struct trace_option_dentry *topt = inode->i_private; |
---|
| 8302 | + int ret; |
---|
| 8303 | + |
---|
| 8304 | + ret = tracing_check_open_get_tr(topt->tr); |
---|
| 8305 | + if (ret) |
---|
| 8306 | + return ret; |
---|
| 8307 | + |
---|
| 8308 | + filp->private_data = inode->i_private; |
---|
| 8309 | + return 0; |
---|
| 8310 | +} |
---|
| 8311 | + |
---|
| 8312 | +static int tracing_release_options(struct inode *inode, struct file *file) |
---|
| 8313 | +{ |
---|
| 8314 | + struct trace_option_dentry *topt = file->private_data; |
---|
| 8315 | + |
---|
| 8316 | + trace_array_put(topt->tr); |
---|
| 8317 | + return 0; |
---|
| 8318 | +} |
---|
8230 | 8319 | |
---|
8231 | 8320 | static const struct file_operations trace_options_fops = { |
---|
8232 | | - .open = tracing_open_generic, |
---|
| 8321 | + .open = tracing_open_options, |
---|
8233 | 8322 | .read = trace_options_read, |
---|
8234 | 8323 | .write = trace_options_write, |
---|
8235 | 8324 | .llseek = generic_file_llseek, |
---|
| 8325 | + .release = tracing_release_options, |
---|
8236 | 8326 | }; |
---|
8237 | 8327 | |
---|
8238 | 8328 | /* |
---|
.. | .. |
---|
8562 | 8652 | if (val > 100) |
---|
8563 | 8653 | return -EINVAL; |
---|
8564 | 8654 | |
---|
8565 | | - if (!val) |
---|
8566 | | - val = 1; |
---|
8567 | | - |
---|
8568 | 8655 | tr->buffer_percent = val; |
---|
8569 | 8656 | |
---|
8570 | 8657 | (*ppos)++; |
---|
.. | .. |
---|
8889 | 8976 | ftrace_destroy_function_files(tr); |
---|
8890 | 8977 | tracefs_remove(tr->dir); |
---|
8891 | 8978 | free_trace_buffers(tr); |
---|
| 8979 | + clear_tracing_err_log(tr); |
---|
8892 | 8980 | |
---|
8893 | 8981 | for (i = 0; i < tr->nr_topts; i++) { |
---|
8894 | 8982 | kfree(tr->topts[i].topts); |
---|
.. | .. |
---|
9391 | 9479 | tracing_off(); |
---|
9392 | 9480 | |
---|
9393 | 9481 | local_irq_save(flags); |
---|
| 9482 | + printk_nmi_direct_enter(); |
---|
9394 | 9483 | |
---|
9395 | 9484 | /* Simulate the iterator */ |
---|
9396 | 9485 | trace_init_global_iter(&iter); |
---|
.. | .. |
---|
9478 | 9567 | atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); |
---|
9479 | 9568 | } |
---|
9480 | 9569 | atomic_dec(&dump_running); |
---|
| 9570 | + printk_nmi_direct_exit(); |
---|
9481 | 9571 | local_irq_restore(flags); |
---|
9482 | 9572 | } |
---|
9483 | 9573 | EXPORT_SYMBOL_GPL(ftrace_dump); |
---|
.. | .. |
---|
9709 | 9799 | static_key_enable(&tracepoint_printk_key.key); |
---|
9710 | 9800 | } |
---|
9711 | 9801 | tracer_alloc_buffers(); |
---|
| 9802 | + |
---|
| 9803 | + init_events(); |
---|
9712 | 9804 | } |
---|
9713 | 9805 | |
---|
9714 | 9806 | void __init trace_init(void) |
---|