hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/kernel/trace/trace.h
....@@ -136,6 +136,25 @@
136136 unsigned long ret_ip;
137137 };
138138
139
+/*
140
+ * trace_flag_type is an enumeration that holds different
141
+ * states when a trace occurs. These are:
142
+ * IRQS_OFF - interrupts were disabled
143
+ * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
144
+ * NEED_RESCHED - reschedule is requested
145
+ * HARDIRQ - inside an interrupt handler
146
+ * SOFTIRQ - inside a softirq handler
147
+ */
148
+enum trace_flag_type {
149
+ TRACE_FLAG_IRQS_OFF = 0x01,
150
+ TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
151
+ TRACE_FLAG_NEED_RESCHED = 0x04,
152
+ TRACE_FLAG_HARDIRQ = 0x08,
153
+ TRACE_FLAG_SOFTIRQ = 0x10,
154
+ TRACE_FLAG_PREEMPT_RESCHED = 0x20,
155
+ TRACE_FLAG_NMI = 0x40,
156
+};
157
+
139158 #define TRACE_BUF_SIZE 1024
140159
141160 struct trace_array;
....@@ -726,7 +745,8 @@
726745 trace_buffer_lock_reserve(struct trace_buffer *buffer,
727746 int type,
728747 unsigned long len,
729
- unsigned int trace_ctx);
748
+ unsigned long flags,
749
+ int pc);
730750
731751 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
732752 struct trace_array_cpu *data);
....@@ -751,11 +771,11 @@
751771 void trace_function(struct trace_array *tr,
752772 unsigned long ip,
753773 unsigned long parent_ip,
754
- unsigned int trace_ctx);
774
+ unsigned long flags, int pc);
755775 void trace_graph_function(struct trace_array *tr,
756776 unsigned long ip,
757777 unsigned long parent_ip,
758
- unsigned int trace_ctx);
778
+ unsigned long flags, int pc);
759779 void trace_latency_header(struct seq_file *m);
760780 void trace_default_header(struct seq_file *m);
761781 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
....@@ -823,10 +843,11 @@
823843 #endif
824844
825845 #ifdef CONFIG_STACKTRACE
826
-void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip);
846
+void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
847
+ int pc);
827848 #else
828
-static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
829
- int skip)
849
+static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
850
+ int skip, int pc)
830851 {
831852 }
832853 #endif /* CONFIG_STACKTRACE */
....@@ -966,10 +987,10 @@
966987 extern void graph_trace_close(struct trace_iterator *iter);
967988 extern int __trace_graph_entry(struct trace_array *tr,
968989 struct ftrace_graph_ent *trace,
969
- unsigned int trace_ctx);
990
+ unsigned long flags, int pc);
970991 extern void __trace_graph_return(struct trace_array *tr,
971992 struct ftrace_graph_ret *trace,
972
- unsigned int trace_ctx);
993
+ unsigned long flags, int pc);
973994
974995 #ifdef CONFIG_DYNAMIC_FTRACE
975996 extern struct ftrace_hash __rcu *ftrace_graph_hash;
....@@ -1432,15 +1453,15 @@
14321453 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
14331454 struct trace_buffer *buffer,
14341455 struct ring_buffer_event *event,
1435
- unsigned int trcace_ctx,
1456
+ unsigned long flags, int pc,
14361457 struct pt_regs *regs);
14371458
14381459 static inline void trace_buffer_unlock_commit(struct trace_array *tr,
14391460 struct trace_buffer *buffer,
14401461 struct ring_buffer_event *event,
1441
- unsigned int trace_ctx)
1462
+ unsigned long flags, int pc)
14421463 {
1443
- trace_buffer_unlock_commit_regs(tr, buffer, event, trace_ctx, NULL);
1464
+ trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL);
14441465 }
14451466
14461467 DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
....@@ -1513,7 +1534,8 @@
15131534 * @buffer: The ring buffer that the event is being written to
15141535 * @event: The event meta data in the ring buffer
15151536 * @entry: The event itself
1516
- * @trace_ctx: The tracing context flags.
1537
+ * @irq_flags: The state of the interrupts at the start of the event
1538
+ * @pc: The state of the preempt count at the start of the event.
15171539 *
15181540 * This is a helper function to handle triggers that require data
15191541 * from the event itself. It also tests the event against filters and
....@@ -1523,12 +1545,12 @@
15231545 event_trigger_unlock_commit(struct trace_event_file *file,
15241546 struct trace_buffer *buffer,
15251547 struct ring_buffer_event *event,
1526
- void *entry, unsigned int trace_ctx)
1548
+ void *entry, unsigned long irq_flags, int pc)
15271549 {
15281550 enum event_trigger_type tt = ETT_NONE;
15291551
15301552 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1531
- trace_buffer_unlock_commit(file->tr, buffer, event, trace_ctx);
1553
+ trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
15321554
15331555 if (tt)
15341556 event_triggers_post_call(file, tt);
....@@ -1540,7 +1562,8 @@
15401562 * @buffer: The ring buffer that the event is being written to
15411563 * @event: The event meta data in the ring buffer
15421564 * @entry: The event itself
1543
- * @trace_ctx: The tracing context flags.
1565
+ * @irq_flags: The state of the interrupts at the start of the event
1566
+ * @pc: The state of the preempt count at the start of the event.
15441567 *
15451568 * This is a helper function to handle triggers that require data
15461569 * from the event itself. It also tests the event against filters and
....@@ -1553,14 +1576,14 @@
15531576 event_trigger_unlock_commit_regs(struct trace_event_file *file,
15541577 struct trace_buffer *buffer,
15551578 struct ring_buffer_event *event,
1556
- void *entry, unsigned int trace_ctx,
1579
+ void *entry, unsigned long irq_flags, int pc,
15571580 struct pt_regs *regs)
15581581 {
15591582 enum event_trigger_type tt = ETT_NONE;
15601583
15611584 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
15621585 trace_buffer_unlock_commit_regs(file->tr, buffer, event,
1563
- trace_ctx, regs);
1586
+ irq_flags, pc, regs);
15641587
15651588 if (tt)
15661589 event_triggers_post_call(file, tt);