| .. | .. |
|---|
| 177 | 177 | int tracing_set_tracer(struct trace_array *tr, const char *buf); |
|---|
| 178 | 178 | static void ftrace_trace_userstack(struct trace_array *tr, |
|---|
| 179 | 179 | struct trace_buffer *buffer, |
|---|
| 180 | | - unsigned int trace_ctx); |
|---|
| 180 | + unsigned long flags, int pc); |
|---|
| 181 | 181 | |
|---|
| 182 | 182 | #define MAX_TRACER_SIZE 100 |
|---|
| 183 | 183 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; |
|---|
| .. | .. |
|---|
| 910 | 910 | |
|---|
| 911 | 911 | #ifdef CONFIG_STACKTRACE |
|---|
| 912 | 912 | static void __ftrace_trace_stack(struct trace_buffer *buffer, |
|---|
| 913 | | - unsigned int trace_ctx, |
|---|
| 914 | | - int skip, struct pt_regs *regs); |
|---|
| 913 | + unsigned long flags, |
|---|
| 914 | + int skip, int pc, struct pt_regs *regs); |
|---|
| 915 | 915 | static inline void ftrace_trace_stack(struct trace_array *tr, |
|---|
| 916 | 916 | struct trace_buffer *buffer, |
|---|
| 917 | | - unsigned int trace_ctx, |
|---|
| 918 | | - int skip, struct pt_regs *regs); |
|---|
| 917 | + unsigned long flags, |
|---|
| 918 | + int skip, int pc, struct pt_regs *regs); |
|---|
| 919 | 919 | |
|---|
| 920 | 920 | #else |
|---|
| 921 | 921 | static inline void __ftrace_trace_stack(struct trace_buffer *buffer, |
|---|
| 922 | | - unsigned int trace_ctx, |
|---|
| 923 | | - int skip, struct pt_regs *regs) |
|---|
| 922 | + unsigned long flags, |
|---|
| 923 | + int skip, int pc, struct pt_regs *regs) |
|---|
| 924 | 924 | { |
|---|
| 925 | 925 | } |
|---|
| 926 | 926 | static inline void ftrace_trace_stack(struct trace_array *tr, |
|---|
| 927 | 927 | struct trace_buffer *buffer, |
|---|
| 928 | | - unsigned long trace_ctx, |
|---|
| 929 | | - int skip, struct pt_regs *regs) |
|---|
| 928 | + unsigned long flags, |
|---|
| 929 | + int skip, int pc, struct pt_regs *regs) |
|---|
| 930 | 930 | { |
|---|
| 931 | 931 | } |
|---|
| 932 | 932 | |
|---|
| .. | .. |
|---|
| 934 | 934 | |
|---|
| 935 | 935 | static __always_inline void |
|---|
| 936 | 936 | trace_event_setup(struct ring_buffer_event *event, |
|---|
| 937 | | - int type, unsigned int trace_ctx) |
|---|
| 937 | + int type, unsigned long flags, int pc) |
|---|
| 938 | 938 | { |
|---|
| 939 | 939 | struct trace_entry *ent = ring_buffer_event_data(event); |
|---|
| 940 | 940 | |
|---|
| 941 | | - tracing_generic_entry_update(ent, type, trace_ctx); |
|---|
| 941 | + tracing_generic_entry_update(ent, type, flags, pc); |
|---|
| 942 | 942 | } |
|---|
| 943 | 943 | |
|---|
| 944 | 944 | static __always_inline struct ring_buffer_event * |
|---|
| 945 | 945 | __trace_buffer_lock_reserve(struct trace_buffer *buffer, |
|---|
| 946 | 946 | int type, |
|---|
| 947 | 947 | unsigned long len, |
|---|
| 948 | | - unsigned int trace_ctx) |
|---|
| 948 | + unsigned long flags, int pc) |
|---|
| 949 | 949 | { |
|---|
| 950 | 950 | struct ring_buffer_event *event; |
|---|
| 951 | 951 | |
|---|
| 952 | 952 | event = ring_buffer_lock_reserve(buffer, len); |
|---|
| 953 | 953 | if (event != NULL) |
|---|
| 954 | | - trace_event_setup(event, type, trace_ctx); |
|---|
| 954 | + trace_event_setup(event, type, flags, pc); |
|---|
| 955 | 955 | |
|---|
| 956 | 956 | return event; |
|---|
| 957 | 957 | } |
|---|
| .. | .. |
|---|
| 1012 | 1012 | struct ring_buffer_event *event; |
|---|
| 1013 | 1013 | struct trace_buffer *buffer; |
|---|
| 1014 | 1014 | struct print_entry *entry; |
|---|
| 1015 | | - unsigned int trace_ctx; |
|---|
| 1015 | + unsigned long irq_flags; |
|---|
| 1016 | 1016 | int alloc; |
|---|
| 1017 | + int pc; |
|---|
| 1017 | 1018 | |
|---|
| 1018 | 1019 | if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) |
|---|
| 1019 | 1020 | return 0; |
|---|
| 1021 | + |
|---|
| 1022 | + pc = preempt_count(); |
|---|
| 1020 | 1023 | |
|---|
| 1021 | 1024 | if (unlikely(tracing_selftest_running || tracing_disabled)) |
|---|
| 1022 | 1025 | return 0; |
|---|
| 1023 | 1026 | |
|---|
| 1024 | 1027 | alloc = sizeof(*entry) + size + 2; /* possible \n added */ |
|---|
| 1025 | 1028 | |
|---|
| 1026 | | - trace_ctx = tracing_gen_ctx(); |
|---|
| 1029 | + local_save_flags(irq_flags); |
|---|
| 1027 | 1030 | buffer = global_trace.array_buffer.buffer; |
|---|
| 1028 | 1031 | ring_buffer_nest_start(buffer); |
|---|
| 1029 | | - event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, |
|---|
| 1030 | | - trace_ctx); |
|---|
| 1032 | + event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, |
|---|
| 1033 | + irq_flags, pc); |
|---|
| 1031 | 1034 | if (!event) { |
|---|
| 1032 | 1035 | size = 0; |
|---|
| 1033 | 1036 | goto out; |
|---|
| .. | .. |
|---|
| 1046 | 1049 | entry->buf[size] = '\0'; |
|---|
| 1047 | 1050 | |
|---|
| 1048 | 1051 | __buffer_unlock_commit(buffer, event); |
|---|
| 1049 | | - ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL); |
|---|
| 1052 | + ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); |
|---|
| 1050 | 1053 | out: |
|---|
| 1051 | 1054 | ring_buffer_nest_end(buffer); |
|---|
| 1052 | 1055 | return size; |
|---|
| .. | .. |
|---|
| 1063 | 1066 | struct ring_buffer_event *event; |
|---|
| 1064 | 1067 | struct trace_buffer *buffer; |
|---|
| 1065 | 1068 | struct bputs_entry *entry; |
|---|
| 1066 | | - unsigned int trace_ctx; |
|---|
| 1069 | + unsigned long irq_flags; |
|---|
| 1067 | 1070 | int size = sizeof(struct bputs_entry); |
|---|
| 1068 | 1071 | int ret = 0; |
|---|
| 1072 | + int pc; |
|---|
| 1069 | 1073 | |
|---|
| 1070 | 1074 | if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) |
|---|
| 1071 | 1075 | return 0; |
|---|
| 1072 | 1076 | |
|---|
| 1077 | + pc = preempt_count(); |
|---|
| 1078 | + |
|---|
| 1073 | 1079 | if (unlikely(tracing_selftest_running || tracing_disabled)) |
|---|
| 1074 | 1080 | return 0; |
|---|
| 1075 | 1081 | |
|---|
| 1076 | | - trace_ctx = tracing_gen_ctx(); |
|---|
| 1082 | + local_save_flags(irq_flags); |
|---|
| 1077 | 1083 | buffer = global_trace.array_buffer.buffer; |
|---|
| 1078 | 1084 | |
|---|
| 1079 | 1085 | ring_buffer_nest_start(buffer); |
|---|
| 1080 | 1086 | event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, |
|---|
| 1081 | | - trace_ctx); |
|---|
| 1087 | + irq_flags, pc); |
|---|
| 1082 | 1088 | if (!event) |
|---|
| 1083 | 1089 | goto out; |
|---|
| 1084 | 1090 | |
|---|
| .. | .. |
|---|
| 1087 | 1093 | entry->str = str; |
|---|
| 1088 | 1094 | |
|---|
| 1089 | 1095 | __buffer_unlock_commit(buffer, event); |
|---|
| 1090 | | - ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL); |
|---|
| 1096 | + ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); |
|---|
| 1091 | 1097 | |
|---|
| 1092 | 1098 | ret = 1; |
|---|
| 1093 | 1099 | out: |
|---|
| .. | .. |
|---|
| 2608 | 2614 | } |
|---|
| 2609 | 2615 | EXPORT_SYMBOL_GPL(trace_handle_return); |
|---|
| 2610 | 2616 | |
|---|
| 2611 | | -static unsigned short migration_disable_value(void) |
|---|
| 2617 | +void |
|---|
| 2618 | +tracing_generic_entry_update(struct trace_entry *entry, unsigned short type, |
|---|
| 2619 | + unsigned long flags, int pc) |
|---|
| 2612 | 2620 | { |
|---|
| 2613 | | -#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT) |
|---|
| 2614 | | - return current->migration_disabled; |
|---|
| 2621 | + struct task_struct *tsk = current; |
|---|
| 2622 | + |
|---|
| 2623 | + entry->preempt_count = pc & 0xff; |
|---|
| 2624 | + entry->pid = (tsk) ? tsk->pid : 0; |
|---|
| 2625 | + entry->type = type; |
|---|
| 2626 | + entry->flags = |
|---|
| 2627 | +#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT |
|---|
| 2628 | + (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | |
|---|
| 2615 | 2629 | #else |
|---|
| 2616 | | - return 0; |
|---|
| 2630 | + TRACE_FLAG_IRQS_NOSUPPORT | |
|---|
| 2617 | 2631 | #endif |
|---|
| 2632 | + ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) | |
|---|
| 2633 | + ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | |
|---|
| 2634 | + ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | |
|---|
| 2635 | + (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | |
|---|
| 2636 | + (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); |
|---|
| 2618 | 2637 | } |
|---|
| 2619 | | - |
|---|
| 2620 | | -unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status) |
|---|
| 2621 | | -{ |
|---|
| 2622 | | - unsigned int trace_flags = irqs_status; |
|---|
| 2623 | | - unsigned int pc; |
|---|
| 2624 | | - |
|---|
| 2625 | | - pc = preempt_count(); |
|---|
| 2626 | | - |
|---|
| 2627 | | - if (pc & NMI_MASK) |
|---|
| 2628 | | - trace_flags |= TRACE_FLAG_NMI; |
|---|
| 2629 | | - if (pc & HARDIRQ_MASK) |
|---|
| 2630 | | - trace_flags |= TRACE_FLAG_HARDIRQ; |
|---|
| 2631 | | - if (in_serving_softirq()) |
|---|
| 2632 | | - trace_flags |= TRACE_FLAG_SOFTIRQ; |
|---|
| 2633 | | - |
|---|
| 2634 | | - if (tif_need_resched()) |
|---|
| 2635 | | - trace_flags |= TRACE_FLAG_NEED_RESCHED; |
|---|
| 2636 | | - if (test_preempt_need_resched()) |
|---|
| 2637 | | - trace_flags |= TRACE_FLAG_PREEMPT_RESCHED; |
|---|
| 2638 | | - |
|---|
| 2639 | | -#ifdef CONFIG_PREEMPT_LAZY |
|---|
| 2640 | | - if (need_resched_lazy()) |
|---|
| 2641 | | - trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY; |
|---|
| 2642 | | -#endif |
|---|
| 2643 | | - |
|---|
| 2644 | | - return (pc & 0xff) | |
|---|
| 2645 | | - (migration_disable_value() & 0xff) << 8 | |
|---|
| 2646 | | - (preempt_lazy_count() & 0xff) << 16 | |
|---|
| 2647 | | - (trace_flags << 24); |
|---|
| 2648 | | -} |
|---|
| 2638 | +EXPORT_SYMBOL_GPL(tracing_generic_entry_update); |
|---|
| 2649 | 2639 | |
|---|
| 2650 | 2640 | struct ring_buffer_event * |
|---|
| 2651 | 2641 | trace_buffer_lock_reserve(struct trace_buffer *buffer, |
|---|
| 2652 | 2642 | int type, |
|---|
| 2653 | 2643 | unsigned long len, |
|---|
| 2654 | | - unsigned int trace_ctx) |
|---|
| 2644 | + unsigned long flags, int pc) |
|---|
| 2655 | 2645 | { |
|---|
| 2656 | | - return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx); |
|---|
| 2646 | + return __trace_buffer_lock_reserve(buffer, type, len, flags, pc); |
|---|
| 2657 | 2647 | } |
|---|
| 2658 | 2648 | |
|---|
| 2659 | 2649 | DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); |
|---|
| .. | .. |
|---|
| 2773 | 2763 | trace_event_buffer_lock_reserve(struct trace_buffer **current_rb, |
|---|
| 2774 | 2764 | struct trace_event_file *trace_file, |
|---|
| 2775 | 2765 | int type, unsigned long len, |
|---|
| 2776 | | - unsigned int trace_ctx) |
|---|
| 2766 | + unsigned long flags, int pc) |
|---|
| 2777 | 2767 | { |
|---|
| 2778 | 2768 | struct ring_buffer_event *entry; |
|---|
| 2779 | 2769 | int val; |
|---|
| .. | .. |
|---|
| 2786 | 2776 | /* Try to use the per cpu buffer first */ |
|---|
| 2787 | 2777 | val = this_cpu_inc_return(trace_buffered_event_cnt); |
|---|
| 2788 | 2778 | if ((len < (PAGE_SIZE - sizeof(*entry) - sizeof(entry->array[0]))) && val == 1) { |
|---|
| 2789 | | - trace_event_setup(entry, type, trace_ctx); |
|---|
| 2779 | + trace_event_setup(entry, type, flags, pc); |
|---|
| 2790 | 2780 | entry->array[0] = len; |
|---|
| 2791 | 2781 | return entry; |
|---|
| 2792 | 2782 | } |
|---|
| .. | .. |
|---|
| 2794 | 2784 | } |
|---|
| 2795 | 2785 | |
|---|
| 2796 | 2786 | entry = __trace_buffer_lock_reserve(*current_rb, |
|---|
| 2797 | | - type, len, trace_ctx); |
|---|
| 2787 | + type, len, flags, pc); |
|---|
| 2798 | 2788 | /* |
|---|
| 2799 | 2789 | * If tracing is off, but we have triggers enabled |
|---|
| 2800 | 2790 | * we still need to look at the event data. Use the temp_buffer |
|---|
| .. | .. |
|---|
| 2803 | 2793 | */ |
|---|
| 2804 | 2794 | if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { |
|---|
| 2805 | 2795 | *current_rb = temp_buffer; |
|---|
| 2806 | | - entry = __trace_buffer_lock_reserve(*current_rb, type, len, |
|---|
| 2807 | | - trace_ctx); |
|---|
| 2796 | + entry = __trace_buffer_lock_reserve(*current_rb, |
|---|
| 2797 | + type, len, flags, pc); |
|---|
| 2808 | 2798 | } |
|---|
| 2809 | 2799 | return entry; |
|---|
| 2810 | 2800 | } |
|---|
| .. | .. |
|---|
| 2890 | 2880 | ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT); |
|---|
| 2891 | 2881 | event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer, |
|---|
| 2892 | 2882 | fbuffer->event, fbuffer->entry, |
|---|
| 2893 | | - fbuffer->trace_ctx, fbuffer->regs); |
|---|
| 2883 | + fbuffer->flags, fbuffer->pc, fbuffer->regs); |
|---|
| 2894 | 2884 | } |
|---|
| 2895 | 2885 | EXPORT_SYMBOL_GPL(trace_event_buffer_commit); |
|---|
| 2896 | 2886 | |
|---|
| .. | .. |
|---|
| 2906 | 2896 | void trace_buffer_unlock_commit_regs(struct trace_array *tr, |
|---|
| 2907 | 2897 | struct trace_buffer *buffer, |
|---|
| 2908 | 2898 | struct ring_buffer_event *event, |
|---|
| 2909 | | - unsigned int trace_ctx, |
|---|
| 2899 | + unsigned long flags, int pc, |
|---|
| 2910 | 2900 | struct pt_regs *regs) |
|---|
| 2911 | 2901 | { |
|---|
| 2912 | 2902 | __buffer_unlock_commit(buffer, event); |
|---|
| .. | .. |
|---|
| 2917 | 2907 | * and mmiotrace, but that's ok if they lose a function or |
|---|
| 2918 | 2908 | * two. They are not that meaningful. |
|---|
| 2919 | 2909 | */ |
|---|
| 2920 | | - ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs); |
|---|
| 2921 | | - ftrace_trace_userstack(tr, buffer, trace_ctx); |
|---|
| 2910 | + ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs); |
|---|
| 2911 | + ftrace_trace_userstack(tr, buffer, flags, pc); |
|---|
| 2922 | 2912 | } |
|---|
| 2923 | 2913 | |
|---|
| 2924 | 2914 | /* |
|---|
| .. | .. |
|---|
| 2932 | 2922 | } |
|---|
| 2933 | 2923 | |
|---|
| 2934 | 2924 | void |
|---|
| 2935 | | -trace_function(struct trace_array *tr, unsigned long ip, unsigned long |
|---|
| 2936 | | - parent_ip, unsigned int trace_ctx) |
|---|
| 2925 | +trace_function(struct trace_array *tr, |
|---|
| 2926 | + unsigned long ip, unsigned long parent_ip, unsigned long flags, |
|---|
| 2927 | + int pc) |
|---|
| 2937 | 2928 | { |
|---|
| 2938 | 2929 | struct trace_event_call *call = &event_function; |
|---|
| 2939 | 2930 | struct trace_buffer *buffer = tr->array_buffer.buffer; |
|---|
| .. | .. |
|---|
| 2941 | 2932 | struct ftrace_entry *entry; |
|---|
| 2942 | 2933 | |
|---|
| 2943 | 2934 | event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), |
|---|
| 2944 | | - trace_ctx); |
|---|
| 2935 | + flags, pc); |
|---|
| 2945 | 2936 | if (!event) |
|---|
| 2946 | 2937 | return; |
|---|
| 2947 | 2938 | entry = ring_buffer_event_data(event); |
|---|
| .. | .. |
|---|
| 2975 | 2966 | static DEFINE_PER_CPU(int, ftrace_stack_reserve); |
|---|
| 2976 | 2967 | |
|---|
| 2977 | 2968 | static void __ftrace_trace_stack(struct trace_buffer *buffer, |
|---|
| 2978 | | - unsigned int trace_ctx, |
|---|
| 2979 | | - int skip, struct pt_regs *regs) |
|---|
| 2969 | + unsigned long flags, |
|---|
| 2970 | + int skip, int pc, struct pt_regs *regs) |
|---|
| 2980 | 2971 | { |
|---|
| 2981 | 2972 | struct trace_event_call *call = &event_kernel_stack; |
|---|
| 2982 | 2973 | struct ring_buffer_event *event; |
|---|
| .. | .. |
|---|
| 3024 | 3015 | size = nr_entries * sizeof(unsigned long); |
|---|
| 3025 | 3016 | event = __trace_buffer_lock_reserve(buffer, TRACE_STACK, |
|---|
| 3026 | 3017 | (sizeof(*entry) - sizeof(entry->caller)) + size, |
|---|
| 3027 | | - trace_ctx); |
|---|
| 3018 | + flags, pc); |
|---|
| 3028 | 3019 | if (!event) |
|---|
| 3029 | 3020 | goto out; |
|---|
| 3030 | 3021 | entry = ring_buffer_event_data(event); |
|---|
| .. | .. |
|---|
| 3045 | 3036 | |
|---|
| 3046 | 3037 | static inline void ftrace_trace_stack(struct trace_array *tr, |
|---|
| 3047 | 3038 | struct trace_buffer *buffer, |
|---|
| 3048 | | - unsigned int trace_ctx, |
|---|
| 3049 | | - int skip, struct pt_regs *regs) |
|---|
| 3039 | + unsigned long flags, |
|---|
| 3040 | + int skip, int pc, struct pt_regs *regs) |
|---|
| 3050 | 3041 | { |
|---|
| 3051 | 3042 | if (!(tr->trace_flags & TRACE_ITER_STACKTRACE)) |
|---|
| 3052 | 3043 | return; |
|---|
| 3053 | 3044 | |
|---|
| 3054 | | - __ftrace_trace_stack(buffer, trace_ctx, skip, regs); |
|---|
| 3045 | + __ftrace_trace_stack(buffer, flags, skip, pc, regs); |
|---|
| 3055 | 3046 | } |
|---|
| 3056 | 3047 | |
|---|
| 3057 | | -void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, |
|---|
| 3058 | | - int skip) |
|---|
| 3048 | +void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, |
|---|
| 3049 | + int pc) |
|---|
| 3059 | 3050 | { |
|---|
| 3060 | 3051 | struct trace_buffer *buffer = tr->array_buffer.buffer; |
|---|
| 3061 | 3052 | |
|---|
| 3062 | 3053 | if (rcu_is_watching()) { |
|---|
| 3063 | | - __ftrace_trace_stack(buffer, trace_ctx, skip, NULL); |
|---|
| 3054 | + __ftrace_trace_stack(buffer, flags, skip, pc, NULL); |
|---|
| 3064 | 3055 | return; |
|---|
| 3065 | 3056 | } |
|---|
| 3066 | 3057 | |
|---|
| .. | .. |
|---|
| 3074 | 3065 | return; |
|---|
| 3075 | 3066 | |
|---|
| 3076 | 3067 | rcu_irq_enter_irqson(); |
|---|
| 3077 | | - __ftrace_trace_stack(buffer, trace_ctx, skip, NULL); |
|---|
| 3068 | + __ftrace_trace_stack(buffer, flags, skip, pc, NULL); |
|---|
| 3078 | 3069 | rcu_irq_exit_irqson(); |
|---|
| 3079 | 3070 | } |
|---|
| 3080 | 3071 | |
|---|
| .. | .. |
|---|
| 3084 | 3075 | */ |
|---|
| 3085 | 3076 | void trace_dump_stack(int skip) |
|---|
| 3086 | 3077 | { |
|---|
| 3078 | + unsigned long flags; |
|---|
| 3079 | + |
|---|
| 3087 | 3080 | if (tracing_disabled || tracing_selftest_running) |
|---|
| 3088 | 3081 | return; |
|---|
| 3082 | + |
|---|
| 3083 | + local_save_flags(flags); |
|---|
| 3089 | 3084 | |
|---|
| 3090 | 3085 | #ifndef CONFIG_UNWINDER_ORC |
|---|
| 3091 | 3086 | /* Skip 1 to skip this function. */ |
|---|
| 3092 | 3087 | skip++; |
|---|
| 3093 | 3088 | #endif |
|---|
| 3094 | 3089 | __ftrace_trace_stack(global_trace.array_buffer.buffer, |
|---|
| 3095 | | - tracing_gen_ctx(), skip, NULL); |
|---|
| 3090 | + flags, skip, preempt_count(), NULL); |
|---|
| 3096 | 3091 | } |
|---|
| 3097 | 3092 | EXPORT_SYMBOL_GPL(trace_dump_stack); |
|---|
| 3098 | 3093 | |
|---|
| .. | .. |
|---|
| 3101 | 3096 | |
|---|
| 3102 | 3097 | static void |
|---|
| 3103 | 3098 | ftrace_trace_userstack(struct trace_array *tr, |
|---|
| 3104 | | - struct trace_buffer *buffer, unsigned int trace_ctx) |
|---|
| 3099 | + struct trace_buffer *buffer, unsigned long flags, int pc) |
|---|
| 3105 | 3100 | { |
|---|
| 3106 | 3101 | struct trace_event_call *call = &event_user_stack; |
|---|
| 3107 | 3102 | struct ring_buffer_event *event; |
|---|
| .. | .. |
|---|
| 3128 | 3123 | __this_cpu_inc(user_stack_count); |
|---|
| 3129 | 3124 | |
|---|
| 3130 | 3125 | event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, |
|---|
| 3131 | | - sizeof(*entry), trace_ctx); |
|---|
| 3126 | + sizeof(*entry), flags, pc); |
|---|
| 3132 | 3127 | if (!event) |
|---|
| 3133 | 3128 | goto out_drop_count; |
|---|
| 3134 | 3129 | entry = ring_buffer_event_data(event); |
|---|
| .. | .. |
|---|
| 3148 | 3143 | #else /* CONFIG_USER_STACKTRACE_SUPPORT */ |
|---|
| 3149 | 3144 | static void ftrace_trace_userstack(struct trace_array *tr, |
|---|
| 3150 | 3145 | struct trace_buffer *buffer, |
|---|
| 3151 | | - unsigned int trace_ctx) |
|---|
| 3146 | + unsigned long flags, int pc) |
|---|
| 3152 | 3147 | { |
|---|
| 3153 | 3148 | } |
|---|
| 3154 | 3149 | #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */ |
|---|
| .. | .. |
|---|
| 3278 | 3273 | struct trace_buffer *buffer; |
|---|
| 3279 | 3274 | struct trace_array *tr = &global_trace; |
|---|
| 3280 | 3275 | struct bprint_entry *entry; |
|---|
| 3281 | | - unsigned int trace_ctx; |
|---|
| 3276 | + unsigned long flags; |
|---|
| 3282 | 3277 | char *tbuffer; |
|---|
| 3283 | | - int len = 0, size; |
|---|
| 3278 | + int len = 0, size, pc; |
|---|
| 3284 | 3279 | |
|---|
| 3285 | 3280 | if (unlikely(tracing_selftest_running || tracing_disabled)) |
|---|
| 3286 | 3281 | return 0; |
|---|
| .. | .. |
|---|
| 3288 | 3283 | /* Don't pollute graph traces with trace_vprintk internals */ |
|---|
| 3289 | 3284 | pause_graph_tracing(); |
|---|
| 3290 | 3285 | |
|---|
| 3291 | | - trace_ctx = tracing_gen_ctx(); |
|---|
| 3286 | + pc = preempt_count(); |
|---|
| 3292 | 3287 | preempt_disable_notrace(); |
|---|
| 3293 | 3288 | |
|---|
| 3294 | 3289 | tbuffer = get_trace_buf(); |
|---|
| .. | .. |
|---|
| 3302 | 3297 | if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) |
|---|
| 3303 | 3298 | goto out_put; |
|---|
| 3304 | 3299 | |
|---|
| 3300 | + local_save_flags(flags); |
|---|
| 3305 | 3301 | size = sizeof(*entry) + sizeof(u32) * len; |
|---|
| 3306 | 3302 | buffer = tr->array_buffer.buffer; |
|---|
| 3307 | 3303 | ring_buffer_nest_start(buffer); |
|---|
| 3308 | 3304 | event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, |
|---|
| 3309 | | - trace_ctx); |
|---|
| 3305 | + flags, pc); |
|---|
| 3310 | 3306 | if (!event) |
|---|
| 3311 | 3307 | goto out; |
|---|
| 3312 | 3308 | entry = ring_buffer_event_data(event); |
|---|
| .. | .. |
|---|
| 3316 | 3312 | memcpy(entry->buf, tbuffer, sizeof(u32) * len); |
|---|
| 3317 | 3313 | if (!call_filter_check_discard(call, entry, buffer, event)) { |
|---|
| 3318 | 3314 | __buffer_unlock_commit(buffer, event); |
|---|
| 3319 | | - ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL); |
|---|
| 3315 | + ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL); |
|---|
| 3320 | 3316 | } |
|---|
| 3321 | 3317 | |
|---|
| 3322 | 3318 | out: |
|---|
| .. | .. |
|---|
| 3339 | 3335 | { |
|---|
| 3340 | 3336 | struct trace_event_call *call = &event_print; |
|---|
| 3341 | 3337 | struct ring_buffer_event *event; |
|---|
| 3342 | | - int len = 0, size; |
|---|
| 3338 | + int len = 0, size, pc; |
|---|
| 3343 | 3339 | struct print_entry *entry; |
|---|
| 3344 | | - unsigned int trace_ctx; |
|---|
| 3340 | + unsigned long flags; |
|---|
| 3345 | 3341 | char *tbuffer; |
|---|
| 3346 | 3342 | |
|---|
| 3347 | 3343 | if (tracing_disabled || tracing_selftest_running) |
|---|
| .. | .. |
|---|
| 3350 | 3346 | /* Don't pollute graph traces with trace_vprintk internals */ |
|---|
| 3351 | 3347 | pause_graph_tracing(); |
|---|
| 3352 | 3348 | |
|---|
| 3353 | | - trace_ctx = tracing_gen_ctx(); |
|---|
| 3349 | + pc = preempt_count(); |
|---|
| 3354 | 3350 | preempt_disable_notrace(); |
|---|
| 3355 | 3351 | |
|---|
| 3356 | 3352 | |
|---|
| .. | .. |
|---|
| 3362 | 3358 | |
|---|
| 3363 | 3359 | len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); |
|---|
| 3364 | 3360 | |
|---|
| 3361 | + local_save_flags(flags); |
|---|
| 3365 | 3362 | size = sizeof(*entry) + len + 1; |
|---|
| 3366 | 3363 | ring_buffer_nest_start(buffer); |
|---|
| 3367 | 3364 | event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, |
|---|
| 3368 | | - trace_ctx); |
|---|
| 3365 | + flags, pc); |
|---|
| 3369 | 3366 | if (!event) |
|---|
| 3370 | 3367 | goto out; |
|---|
| 3371 | 3368 | entry = ring_buffer_event_data(event); |
|---|
| .. | .. |
|---|
| 3374 | 3371 | memcpy(&entry->buf, tbuffer, len + 1); |
|---|
| 3375 | 3372 | if (!call_filter_check_discard(call, entry, buffer, event)) { |
|---|
| 3376 | 3373 | __buffer_unlock_commit(buffer, event); |
|---|
| 3377 | | - ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL); |
|---|
| 3374 | + ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL); |
|---|
| 3378 | 3375 | } |
|---|
| 3379 | 3376 | |
|---|
| 3380 | 3377 | out: |
|---|
| .. | .. |
|---|
| 3840 | 3837 | |
|---|
| 3841 | 3838 | static void print_lat_help_header(struct seq_file *m) |
|---|
| 3842 | 3839 | { |
|---|
| 3843 | | - seq_puts(m, "# _--------=> CPU# \n" |
|---|
| 3844 | | - "# / _-------=> irqs-off \n" |
|---|
| 3845 | | - "# | / _------=> need-resched \n" |
|---|
| 3846 | | - "# || / _-----=> need-resched-lazy\n" |
|---|
| 3847 | | - "# ||| / _----=> hardirq/softirq \n" |
|---|
| 3848 | | - "# |||| / _---=> preempt-depth \n" |
|---|
| 3849 | | - "# ||||| / _--=> preempt-lazy-depth\n" |
|---|
| 3850 | | - "# |||||| / _-=> migrate-disable \n" |
|---|
| 3851 | | - "# ||||||| / delay \n" |
|---|
| 3852 | | - "# cmd pid |||||||| time | caller \n" |
|---|
| 3853 | | - "# \\ / |||||||| \\ | / \n"); |
|---|
| 3840 | + seq_puts(m, "# _------=> CPU# \n" |
|---|
| 3841 | + "# / _-----=> irqs-off \n" |
|---|
| 3842 | + "# | / _----=> need-resched \n" |
|---|
| 3843 | + "# || / _---=> hardirq/softirq \n" |
|---|
| 3844 | + "# ||| / _--=> preempt-depth \n" |
|---|
| 3845 | + "# |||| / delay \n" |
|---|
| 3846 | + "# cmd pid ||||| time | caller \n" |
|---|
| 3847 | + "# \\ / ||||| \\ | / \n"); |
|---|
| 3854 | 3848 | } |
|---|
| 3855 | 3849 | |
|---|
| 3856 | 3850 | static void print_event_info(struct array_buffer *buf, struct seq_file *m) |
|---|
| .. | .. |
|---|
| 3884 | 3878 | |
|---|
| 3885 | 3879 | print_event_info(buf, m); |
|---|
| 3886 | 3880 | |
|---|
| 3887 | | - seq_printf(m, "# %.*s _-------=> irqs-off\n", prec, space); |
|---|
| 3888 | | - seq_printf(m, "# %.*s / _------=> need-resched\n", prec, space); |
|---|
| 3889 | | - seq_printf(m, "# %.*s| / _-----=> need-resched-lazy\n", prec, space); |
|---|
| 3890 | | - seq_printf(m, "# %.*s|| / _----=> hardirq/softirq\n", prec, space); |
|---|
| 3891 | | - seq_printf(m, "# %.*s||| / _---=> preempt-depth\n", prec, space); |
|---|
| 3892 | | - seq_printf(m, "# %.*s|||| / _--=> preempt-lazy-depth\n", prec, space); |
|---|
| 3893 | | - seq_printf(m, "# %.*s||||| / _-=> migrate-disable\n", prec, space); |
|---|
| 3894 | | - seq_printf(m, "# %.*s|||||| / delay\n", prec, space); |
|---|
| 3895 | | - seq_printf(m, "# TASK-PID %.*s CPU# ||||||| TIMESTAMP FUNCTION\n", prec, " TGID "); |
|---|
| 3896 | | - seq_printf(m, "# | | %.*s | ||||||| | |\n", prec, " | "); |
|---|
| 3881 | + seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space); |
|---|
| 3882 | + seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space); |
|---|
| 3883 | + seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space); |
|---|
| 3884 | + seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space); |
|---|
| 3885 | + seq_printf(m, "# %.*s||| / delay\n", prec, space); |
|---|
| 3886 | + seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID "); |
|---|
| 3887 | + seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | "); |
|---|
| 3897 | 3888 | } |
|---|
| 3898 | 3889 | |
|---|
| 3899 | 3890 | void |
|---|
| .. | .. |
|---|
| 6698 | 6689 | enum event_trigger_type tt = ETT_NONE; |
|---|
| 6699 | 6690 | struct trace_buffer *buffer; |
|---|
| 6700 | 6691 | struct print_entry *entry; |
|---|
| 6692 | + unsigned long irq_flags; |
|---|
| 6701 | 6693 | ssize_t written; |
|---|
| 6702 | 6694 | int size; |
|---|
| 6703 | 6695 | int len; |
|---|
| .. | .. |
|---|
| 6717 | 6709 | |
|---|
| 6718 | 6710 | BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); |
|---|
| 6719 | 6711 | |
|---|
| 6712 | + local_save_flags(irq_flags); |
|---|
| 6720 | 6713 | size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */ |
|---|
| 6721 | 6714 | |
|---|
| 6722 | 6715 | /* If less than "<faulted>", then make sure we can still add that */ |
|---|
| .. | .. |
|---|
| 6725 | 6718 | |
|---|
| 6726 | 6719 | buffer = tr->array_buffer.buffer; |
|---|
| 6727 | 6720 | event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, |
|---|
| 6728 | | - tracing_gen_ctx()); |
|---|
| 6721 | + irq_flags, preempt_count()); |
|---|
| 6729 | 6722 | if (unlikely(!event)) |
|---|
| 6730 | 6723 | /* Ring buffer disabled, return as if not open for write */ |
|---|
| 6731 | 6724 | return -EBADF; |
|---|
| .. | .. |
|---|
| 6777 | 6770 | struct ring_buffer_event *event; |
|---|
| 6778 | 6771 | struct trace_buffer *buffer; |
|---|
| 6779 | 6772 | struct raw_data_entry *entry; |
|---|
| 6773 | + unsigned long irq_flags; |
|---|
| 6780 | 6774 | ssize_t written; |
|---|
| 6781 | 6775 | int size; |
|---|
| 6782 | 6776 | int len; |
|---|
| .. | .. |
|---|
| 6798 | 6792 | |
|---|
| 6799 | 6793 | BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); |
|---|
| 6800 | 6794 | |
|---|
| 6795 | + local_save_flags(irq_flags); |
|---|
| 6801 | 6796 | size = sizeof(*entry) + cnt; |
|---|
| 6802 | 6797 | if (cnt < FAULT_SIZE_ID) |
|---|
| 6803 | 6798 | size += FAULT_SIZE_ID - cnt; |
|---|
| 6804 | 6799 | |
|---|
| 6805 | 6800 | buffer = tr->array_buffer.buffer; |
|---|
| 6806 | 6801 | event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size, |
|---|
| 6807 | | - tracing_gen_ctx()); |
|---|
| 6802 | + irq_flags, preempt_count()); |
|---|
| 6808 | 6803 | if (!event) |
|---|
| 6809 | 6804 | /* Ring buffer disabled, return as if not open for write */ |
|---|
| 6810 | 6805 | return -EBADF; |
|---|
| .. | .. |
|---|
| 9391 | 9386 | tracing_off(); |
|---|
| 9392 | 9387 | |
|---|
| 9393 | 9388 | local_irq_save(flags); |
|---|
| 9389 | + printk_nmi_direct_enter(); |
|---|
| 9394 | 9390 | |
|---|
| 9395 | 9391 | /* Simulate the iterator */ |
|---|
| 9396 | 9392 | trace_init_global_iter(&iter); |
|---|
| .. | .. |
|---|
| 9478 | 9474 | atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); |
|---|
| 9479 | 9475 | } |
|---|
| 9480 | 9476 | atomic_dec(&dump_running); |
|---|
| 9477 | + printk_nmi_direct_exit(); |
|---|
| 9481 | 9478 | local_irq_restore(flags); |
|---|
| 9482 | 9479 | } |
|---|
| 9483 | 9480 | EXPORT_SYMBOL_GPL(ftrace_dump); |
|---|