| .. | .. | 
|---|
| 9 | 9 |  #include <linux/uaccess.h> | 
|---|
| 10 | 10 |  #include <linux/module.h> | 
|---|
| 11 | 11 |  #include <linux/ftrace.h> | 
|---|
 | 12 | +#include <linux/kprobes.h>  | 
|---|
| 12 | 13 |  #include "trace.h" | 
|---|
| 13 | 14 |   | 
|---|
| 14 | 15 |  #define CREATE_TRACE_POINTS | 
|---|
| 15 | 16 |  #include <trace/events/preemptirq.h> | 
|---|
 | 17 | +#undef CREATE_TRACE_POINTS  | 
|---|
 | 18 | +#include <trace/hooks/preemptirq.h>  | 
|---|
| 16 | 19 |   | 
|---|
| 17 | 20 |  #ifdef CONFIG_TRACE_IRQFLAGS | 
|---|
| 18 | 21 |  /* Per-cpu variable to prevent redundant calls when IRQs already off */ | 
|---|
| 19 | 22 |  static DEFINE_PER_CPU(int, tracing_irq_cpu); | 
|---|
| 20 | 23 |   | 
|---|
 | 24 | +/*  | 
|---|
 | 25 | + * Like trace_hardirqs_on() but without the lockdep invocation. This is  | 
|---|
 | 26 | + * used in the low level entry code where the ordering vs. RCU is important  | 
|---|
 | 27 | + * and lockdep uses a staged approach which splits the lockdep hardirq  | 
|---|
 | 28 | + * tracking into a RCU on and a RCU off section.  | 
|---|
 | 29 | + */  | 
|---|
 | 30 | +void trace_hardirqs_on_prepare(void)  | 
|---|
 | 31 | +{  | 
|---|
 | 32 | +	if (this_cpu_read(tracing_irq_cpu)) {  | 
|---|
 | 33 | +		if (!in_nmi()) {  | 
|---|
 | 34 | +			trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);  | 
|---|
 | 35 | +			trace_android_rvh_irqs_enable(CALLER_ADDR0,  | 
|---|
 | 36 | +						      CALLER_ADDR1);  | 
|---|
 | 37 | +		}  | 
|---|
 | 38 | +		tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);  | 
|---|
 | 39 | +		this_cpu_write(tracing_irq_cpu, 0);  | 
|---|
 | 40 | +	}  | 
|---|
 | 41 | +}  | 
|---|
 | 42 | +EXPORT_SYMBOL(trace_hardirqs_on_prepare);  | 
|---|
 | 43 | +NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);  | 
|---|
 | 44 | +  | 
|---|
| 21 | 45 |  void trace_hardirqs_on(void) | 
|---|
| 22 | 46 |  { | 
|---|
| 23 | 47 |  	if (this_cpu_read(tracing_irq_cpu)) { | 
|---|
| 24 |  | -		if (!in_nmi())  | 
|---|
 | 48 | +		if (!in_nmi()) {  | 
|---|
| 25 | 49 |  			trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); | 
|---|
 | 50 | +			trace_android_rvh_irqs_enable(CALLER_ADDR0,  | 
|---|
 | 51 | +						      CALLER_ADDR1);  | 
|---|
 | 52 | +		}  | 
|---|
| 26 | 53 |  		tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1); | 
|---|
| 27 | 54 |  		this_cpu_write(tracing_irq_cpu, 0); | 
|---|
| 28 | 55 |  	} | 
|---|
| 29 | 56 |   | 
|---|
 | 57 | +	lockdep_hardirqs_on_prepare(CALLER_ADDR0);  | 
|---|
| 30 | 58 |  	lockdep_hardirqs_on(CALLER_ADDR0); | 
|---|
| 31 | 59 |  } | 
|---|
| 32 | 60 |  EXPORT_SYMBOL(trace_hardirqs_on); | 
|---|
 | 61 | +NOKPROBE_SYMBOL(trace_hardirqs_on);  | 
|---|
| 33 | 62 |   | 
|---|
| 34 |  | -void trace_hardirqs_off(void)  | 
|---|
 | 63 | +/*  | 
|---|
 | 64 | + * Like trace_hardirqs_off() but without the lockdep invocation. This is  | 
|---|
 | 65 | + * used in the low level entry code where the ordering vs. RCU is important  | 
|---|
 | 66 | + * and lockdep uses a staged approach which splits the lockdep hardirq  | 
|---|
 | 67 | + * tracking into a RCU on and a RCU off section.  | 
|---|
 | 68 | + */  | 
|---|
 | 69 | +void trace_hardirqs_off_finish(void)  | 
|---|
| 35 | 70 |  { | 
|---|
| 36 | 71 |  	if (!this_cpu_read(tracing_irq_cpu)) { | 
|---|
| 37 | 72 |  		this_cpu_write(tracing_irq_cpu, 1); | 
|---|
| 38 | 73 |  		tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1); | 
|---|
| 39 |  | -		if (!in_nmi())  | 
|---|
| 40 |  | -			trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);  | 
|---|
 | 74 | +		if (!in_nmi()) {  | 
|---|
 | 75 | +			trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);  | 
|---|
 | 76 | +			trace_android_rvh_irqs_disable(CALLER_ADDR0,  | 
|---|
 | 77 | +						       CALLER_ADDR1);  | 
|---|
 | 78 | +		}  | 
|---|
| 41 | 79 |  	} | 
|---|
| 42 | 80 |   | 
|---|
| 43 |  | -	lockdep_hardirqs_off(CALLER_ADDR0);  | 
|---|
| 44 | 81 |  } | 
|---|
| 45 |  | -EXPORT_SYMBOL(trace_hardirqs_off);  | 
|---|
 | 82 | +EXPORT_SYMBOL(trace_hardirqs_off_finish);  | 
|---|
 | 83 | +NOKPROBE_SYMBOL(trace_hardirqs_off_finish);  | 
|---|
| 46 | 84 |   | 
|---|
| 47 |  | -__visible void trace_hardirqs_on_caller(unsigned long caller_addr)  | 
|---|
| 48 |  | -{  | 
|---|
| 49 |  | -	if (this_cpu_read(tracing_irq_cpu)) {  | 
|---|
| 50 |  | -		if (!in_nmi())  | 
|---|
| 51 |  | -			trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);  | 
|---|
| 52 |  | -		tracer_hardirqs_on(CALLER_ADDR0, caller_addr);  | 
|---|
| 53 |  | -		this_cpu_write(tracing_irq_cpu, 0);  | 
|---|
| 54 |  | -	}  | 
|---|
| 55 |  | -  | 
|---|
| 56 |  | -	lockdep_hardirqs_on(CALLER_ADDR0);  | 
|---|
| 57 |  | -}  | 
|---|
| 58 |  | -EXPORT_SYMBOL(trace_hardirqs_on_caller);  | 
|---|
| 59 |  | -  | 
|---|
| 60 |  | -__visible void trace_hardirqs_off_caller(unsigned long caller_addr)  | 
|---|
 | 85 | +void trace_hardirqs_off(void)  | 
|---|
| 61 | 86 |  { | 
|---|
| 62 | 87 |  	lockdep_hardirqs_off(CALLER_ADDR0); | 
|---|
| 63 | 88 |   | 
|---|
| 64 | 89 |  	if (!this_cpu_read(tracing_irq_cpu)) { | 
|---|
| 65 | 90 |  		this_cpu_write(tracing_irq_cpu, 1); | 
|---|
 | 91 | +		tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);  | 
|---|
 | 92 | +		if (!in_nmi()) {  | 
|---|
 | 93 | +			trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);  | 
|---|
 | 94 | +			trace_android_rvh_irqs_disable(CALLER_ADDR0,  | 
|---|
 | 95 | +						       CALLER_ADDR1);  | 
|---|
 | 96 | +		}  | 
|---|
 | 97 | +	}  | 
|---|
 | 98 | +}  | 
|---|
 | 99 | +EXPORT_SYMBOL(trace_hardirqs_off);  | 
|---|
 | 100 | +NOKPROBE_SYMBOL(trace_hardirqs_off);  | 
|---|
 | 101 | +  | 
|---|
 | 102 | +__visible void trace_hardirqs_on_caller(unsigned long caller_addr)  | 
|---|
 | 103 | +{  | 
|---|
 | 104 | +	if (this_cpu_read(tracing_irq_cpu)) {  | 
|---|
 | 105 | +		if (!in_nmi()) {  | 
|---|
 | 106 | +			trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);  | 
|---|
 | 107 | +			trace_android_rvh_irqs_enable(CALLER_ADDR0,  | 
|---|
 | 108 | +						      caller_addr);  | 
|---|
 | 109 | +		}  | 
|---|
 | 110 | +		tracer_hardirqs_on(CALLER_ADDR0, caller_addr);  | 
|---|
 | 111 | +		this_cpu_write(tracing_irq_cpu, 0);  | 
|---|
 | 112 | +	}  | 
|---|
 | 113 | +  | 
|---|
 | 114 | +	lockdep_hardirqs_on_prepare(caller_addr);  | 
|---|
 | 115 | +	lockdep_hardirqs_on(caller_addr);  | 
|---|
 | 116 | +}  | 
|---|
 | 117 | +EXPORT_SYMBOL(trace_hardirqs_on_caller);  | 
|---|
 | 118 | +NOKPROBE_SYMBOL(trace_hardirqs_on_caller);  | 
|---|
 | 119 | +  | 
|---|
 | 120 | +__visible void trace_hardirqs_off_caller(unsigned long caller_addr)  | 
|---|
 | 121 | +{  | 
|---|
 | 122 | +	lockdep_hardirqs_off(caller_addr);  | 
|---|
 | 123 | +  | 
|---|
 | 124 | +	if (!this_cpu_read(tracing_irq_cpu)) {  | 
|---|
 | 125 | +		this_cpu_write(tracing_irq_cpu, 1);  | 
|---|
| 66 | 126 |  		tracer_hardirqs_off(CALLER_ADDR0, caller_addr); | 
|---|
| 67 |  | -		if (!in_nmi())  | 
|---|
 | 127 | +		if (!in_nmi()) {  | 
|---|
| 68 | 128 |  			trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr); | 
|---|
 | 129 | +			trace_android_rvh_irqs_enable(CALLER_ADDR0,  | 
|---|
 | 130 | +						      caller_addr);  | 
|---|
 | 131 | +		}  | 
|---|
| 69 | 132 |  	} | 
|---|
| 70 | 133 |  } | 
|---|
| 71 | 134 |  EXPORT_SYMBOL(trace_hardirqs_off_caller); | 
|---|
 | 135 | +NOKPROBE_SYMBOL(trace_hardirqs_off_caller);  | 
|---|
| 72 | 136 |  #endif /* CONFIG_TRACE_IRQFLAGS */ | 
|---|
| 73 | 137 |   | 
|---|
| 74 | 138 |  #ifdef CONFIG_TRACE_PREEMPT_TOGGLE | 
|---|
| 75 | 139 |   | 
|---|
| 76 | 140 |  void trace_preempt_on(unsigned long a0, unsigned long a1) | 
|---|
| 77 | 141 |  { | 
|---|
| 78 |  | -	if (!in_nmi())  | 
|---|
 | 142 | +	if (!in_nmi()) {  | 
|---|
| 79 | 143 |  		trace_preempt_enable_rcuidle(a0, a1); | 
|---|
 | 144 | +		trace_android_rvh_preempt_enable(a0, a1);  | 
|---|
 | 145 | +	}  | 
|---|
| 80 | 146 |  	tracer_preempt_on(a0, a1); | 
|---|
| 81 | 147 |  } | 
|---|
| 82 | 148 |   | 
|---|
| 83 | 149 |  void trace_preempt_off(unsigned long a0, unsigned long a1) | 
|---|
| 84 | 150 |  { | 
|---|
| 85 |  | -	if (!in_nmi())  | 
|---|
 | 151 | +	if (!in_nmi()) {  | 
|---|
| 86 | 152 |  		trace_preempt_disable_rcuidle(a0, a1); | 
|---|
 | 153 | +		trace_android_rvh_preempt_disable(a0, a1);  | 
|---|
 | 154 | +	}  | 
|---|
| 87 | 155 |  	tracer_preempt_off(a0, a1); | 
|---|
| 88 | 156 |  } | 
|---|
| 89 | 157 |  #endif | 
|---|