From a5969cabbb4660eab42b6ef0412cbbd1200cf14d Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Sat, 12 Oct 2024 07:10:09 +0000
Subject: [PATCH] 修改led为gpio
---
kernel/kernel/trace/trace.c | 356 +++++++++++++++++++++++++++++++++++++----------------------
1 files changed, 224 insertions(+), 132 deletions(-)
diff --git a/kernel/kernel/trace/trace.c b/kernel/kernel/trace/trace.c
index c4cce26..3992a50 100644
--- a/kernel/kernel/trace/trace.c
+++ b/kernel/kernel/trace/trace.c
@@ -177,7 +177,7 @@
int tracing_set_tracer(struct trace_array *tr, const char *buf);
static void ftrace_trace_userstack(struct trace_array *tr,
struct trace_buffer *buffer,
- unsigned int trace_ctx);
+ unsigned long flags, int pc);
#define MAX_TRACER_SIZE 100
static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
@@ -910,23 +910,23 @@
#ifdef CONFIG_STACKTRACE
static void __ftrace_trace_stack(struct trace_buffer *buffer,
- unsigned int trace_ctx,
- int skip, struct pt_regs *regs);
+ unsigned long flags,
+ int skip, int pc, struct pt_regs *regs);
static inline void ftrace_trace_stack(struct trace_array *tr,
struct trace_buffer *buffer,
- unsigned int trace_ctx,
- int skip, struct pt_regs *regs);
+ unsigned long flags,
+ int skip, int pc, struct pt_regs *regs);
#else
static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
- unsigned int trace_ctx,
- int skip, struct pt_regs *regs)
+ unsigned long flags,
+ int skip, int pc, struct pt_regs *regs)
{
}
static inline void ftrace_trace_stack(struct trace_array *tr,
struct trace_buffer *buffer,
- unsigned long trace_ctx,
- int skip, struct pt_regs *regs)
+ unsigned long flags,
+ int skip, int pc, struct pt_regs *regs)
{
}
@@ -934,24 +934,24 @@
static __always_inline void
trace_event_setup(struct ring_buffer_event *event,
- int type, unsigned int trace_ctx)
+ int type, unsigned long flags, int pc)
{
struct trace_entry *ent = ring_buffer_event_data(event);
- tracing_generic_entry_update(ent, type, trace_ctx);
+ tracing_generic_entry_update(ent, type, flags, pc);
}
static __always_inline struct ring_buffer_event *
__trace_buffer_lock_reserve(struct trace_buffer *buffer,
int type,
unsigned long len,
- unsigned int trace_ctx)
+ unsigned long flags, int pc)
{
struct ring_buffer_event *event;
event = ring_buffer_lock_reserve(buffer, len);
if (event != NULL)
- trace_event_setup(event, type, trace_ctx);
+ trace_event_setup(event, type, flags, pc);
return event;
}
@@ -1012,22 +1012,25 @@
struct ring_buffer_event *event;
struct trace_buffer *buffer;
struct print_entry *entry;
- unsigned int trace_ctx;
+ unsigned long irq_flags;
int alloc;
+ int pc;
if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
return 0;
+
+ pc = preempt_count();
if (unlikely(tracing_selftest_running || tracing_disabled))
return 0;
alloc = sizeof(*entry) + size + 2; /* possible \n added */
- trace_ctx = tracing_gen_ctx();
+ local_save_flags(irq_flags);
buffer = global_trace.array_buffer.buffer;
ring_buffer_nest_start(buffer);
- event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
- trace_ctx);
+ event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
+ irq_flags, pc);
if (!event) {
size = 0;
goto out;
@@ -1046,7 +1049,7 @@
entry->buf[size] = '\0';
__buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
+ ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
out:
ring_buffer_nest_end(buffer);
return size;
@@ -1063,22 +1066,25 @@
struct ring_buffer_event *event;
struct trace_buffer *buffer;
struct bputs_entry *entry;
- unsigned int trace_ctx;
+ unsigned long irq_flags;
int size = sizeof(struct bputs_entry);
int ret = 0;
+ int pc;
if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
return 0;
+ pc = preempt_count();
+
if (unlikely(tracing_selftest_running || tracing_disabled))
return 0;
- trace_ctx = tracing_gen_ctx();
+ local_save_flags(irq_flags);
buffer = global_trace.array_buffer.buffer;
ring_buffer_nest_start(buffer);
event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
- trace_ctx);
+ irq_flags, pc);
if (!event)
goto out;
@@ -1087,7 +1093,7 @@
entry->str = str;
__buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
+ ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
ret = 1;
out:
@@ -1877,9 +1883,10 @@
* place on this CPU. We fail to record, but we reset
* the max trace buffer (no one writes directly to it)
* and flag that it failed.
+ * Another reason is resize is in progress.
*/
trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
- "Failed to swap buffers due to commit in progress\n");
+ "Failed to swap buffers due to commit or resize in progress\n");
}
WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
@@ -2173,9 +2180,11 @@
}
/* Must have trace_types_lock held */
-void tracing_reset_all_online_cpus(void)
+void tracing_reset_all_online_cpus_unlocked(void)
{
struct trace_array *tr;
+
+ lockdep_assert_held(&trace_types_lock);
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
if (!tr->clear_trace)
@@ -2186,6 +2195,13 @@
tracing_reset_online_cpus(&tr->max_buffer);
#endif
}
+}
+
+void tracing_reset_all_online_cpus(void)
+{
+ mutex_lock(&trace_types_lock);
+ tracing_reset_all_online_cpus_unlocked();
+ mutex_unlock(&trace_types_lock);
}
/*
@@ -2608,52 +2624,36 @@
}
EXPORT_SYMBOL_GPL(trace_handle_return);
-static unsigned short migration_disable_value(void)
+void
+tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
+ unsigned long flags, int pc)
{
-#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT)
- return current->migration_disabled;
+ struct task_struct *tsk = current;
+
+ entry->preempt_count = pc & 0xff;
+ entry->pid = (tsk) ? tsk->pid : 0;
+ entry->type = type;
+ entry->flags =
+#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
+ (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
#else
- return 0;
+ TRACE_FLAG_IRQS_NOSUPPORT |
#endif
+ ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
+ ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
+ ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
+ (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
+ (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
}
-
-unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
-{
- unsigned int trace_flags = irqs_status;
- unsigned int pc;
-
- pc = preempt_count();
-
- if (pc & NMI_MASK)
- trace_flags |= TRACE_FLAG_NMI;
- if (pc & HARDIRQ_MASK)
- trace_flags |= TRACE_FLAG_HARDIRQ;
- if (in_serving_softirq())
- trace_flags |= TRACE_FLAG_SOFTIRQ;
-
- if (tif_need_resched())
- trace_flags |= TRACE_FLAG_NEED_RESCHED;
- if (test_preempt_need_resched())
- trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
-
-#ifdef CONFIG_PREEMPT_LAZY
- if (need_resched_lazy())
- trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY;
-#endif
-
- return (pc & 0xff) |
- (migration_disable_value() & 0xff) << 8 |
- (preempt_lazy_count() & 0xff) << 16 |
- (trace_flags << 24);
-}
+EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
struct ring_buffer_event *
trace_buffer_lock_reserve(struct trace_buffer *buffer,
int type,
unsigned long len,
- unsigned int trace_ctx)
+ unsigned long flags, int pc)
{
- return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
+ return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
}
DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
@@ -2773,7 +2773,7 @@
trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
struct trace_event_file *trace_file,
int type, unsigned long len,
- unsigned int trace_ctx)
+ unsigned long flags, int pc)
{
struct ring_buffer_event *entry;
int val;
@@ -2786,7 +2786,7 @@
/* Try to use the per cpu buffer first */
val = this_cpu_inc_return(trace_buffered_event_cnt);
if ((len < (PAGE_SIZE - sizeof(*entry) - sizeof(entry->array[0]))) && val == 1) {
- trace_event_setup(entry, type, trace_ctx);
+ trace_event_setup(entry, type, flags, pc);
entry->array[0] = len;
return entry;
}
@@ -2794,7 +2794,7 @@
}
entry = __trace_buffer_lock_reserve(*current_rb,
- type, len, trace_ctx);
+ type, len, flags, pc);
/*
* If tracing is off, but we have triggers enabled
* we still need to look at the event data. Use the temp_buffer
@@ -2803,8 +2803,8 @@
*/
if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
*current_rb = temp_buffer;
- entry = __trace_buffer_lock_reserve(*current_rb, type, len,
- trace_ctx);
+ entry = __trace_buffer_lock_reserve(*current_rb,
+ type, len, flags, pc);
}
return entry;
}
@@ -2890,7 +2890,7 @@
ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
fbuffer->event, fbuffer->entry,
- fbuffer->trace_ctx, fbuffer->regs);
+ fbuffer->flags, fbuffer->pc, fbuffer->regs);
}
EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
@@ -2906,7 +2906,7 @@
void trace_buffer_unlock_commit_regs(struct trace_array *tr,
struct trace_buffer *buffer,
struct ring_buffer_event *event,
- unsigned int trace_ctx,
+ unsigned long flags, int pc,
struct pt_regs *regs)
{
__buffer_unlock_commit(buffer, event);
@@ -2917,8 +2917,8 @@
* and mmiotrace, but that's ok if they lose a function or
* two. They are not that meaningful.
*/
- ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
- ftrace_trace_userstack(tr, buffer, trace_ctx);
+ ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
+ ftrace_trace_userstack(tr, buffer, flags, pc);
}
/*
@@ -2932,8 +2932,9 @@
}
void
-trace_function(struct trace_array *tr, unsigned long ip, unsigned long
- parent_ip, unsigned int trace_ctx)
+trace_function(struct trace_array *tr,
+ unsigned long ip, unsigned long parent_ip, unsigned long flags,
+ int pc)
{
struct trace_event_call *call = &event_function;
struct trace_buffer *buffer = tr->array_buffer.buffer;
@@ -2941,7 +2942,7 @@
struct ftrace_entry *entry;
event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
- trace_ctx);
+ flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
@@ -2975,8 +2976,8 @@
static DEFINE_PER_CPU(int, ftrace_stack_reserve);
static void __ftrace_trace_stack(struct trace_buffer *buffer,
- unsigned int trace_ctx,
- int skip, struct pt_regs *regs)
+ unsigned long flags,
+ int skip, int pc, struct pt_regs *regs)
{
struct trace_event_call *call = &event_kernel_stack;
struct ring_buffer_event *event;
@@ -3024,7 +3025,7 @@
size = nr_entries * sizeof(unsigned long);
event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
(sizeof(*entry) - sizeof(entry->caller)) + size,
- trace_ctx);
+ flags, pc);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
@@ -3045,22 +3046,22 @@
static inline void ftrace_trace_stack(struct trace_array *tr,
struct trace_buffer *buffer,
- unsigned int trace_ctx,
- int skip, struct pt_regs *regs)
+ unsigned long flags,
+ int skip, int pc, struct pt_regs *regs)
{
if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
return;
- __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
+ __ftrace_trace_stack(buffer, flags, skip, pc, regs);
}
-void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
- int skip)
+void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
+ int pc)
{
struct trace_buffer *buffer = tr->array_buffer.buffer;
if (rcu_is_watching()) {
- __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
+ __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
return;
}
@@ -3074,7 +3075,7 @@
return;
rcu_irq_enter_irqson();
- __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
+ __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
rcu_irq_exit_irqson();
}
@@ -3084,15 +3085,19 @@
*/
void trace_dump_stack(int skip)
{
+ unsigned long flags;
+
if (tracing_disabled || tracing_selftest_running)
return;
+
+ local_save_flags(flags);
#ifndef CONFIG_UNWINDER_ORC
/* Skip 1 to skip this function. */
skip++;
#endif
__ftrace_trace_stack(global_trace.array_buffer.buffer,
- tracing_gen_ctx(), skip, NULL);
+ flags, skip, preempt_count(), NULL);
}
EXPORT_SYMBOL_GPL(trace_dump_stack);
@@ -3101,7 +3106,7 @@
static void
ftrace_trace_userstack(struct trace_array *tr,
- struct trace_buffer *buffer, unsigned int trace_ctx)
+ struct trace_buffer *buffer, unsigned long flags, int pc)
{
struct trace_event_call *call = &event_user_stack;
struct ring_buffer_event *event;
@@ -3128,7 +3133,7 @@
__this_cpu_inc(user_stack_count);
event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
- sizeof(*entry), trace_ctx);
+ sizeof(*entry), flags, pc);
if (!event)
goto out_drop_count;
entry = ring_buffer_event_data(event);
@@ -3148,7 +3153,7 @@
#else /* CONFIG_USER_STACKTRACE_SUPPORT */
static void ftrace_trace_userstack(struct trace_array *tr,
struct trace_buffer *buffer,
- unsigned int trace_ctx)
+ unsigned long flags, int pc)
{
}
#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
@@ -3278,9 +3283,9 @@
struct trace_buffer *buffer;
struct trace_array *tr = &global_trace;
struct bprint_entry *entry;
- unsigned int trace_ctx;
+ unsigned long flags;
char *tbuffer;
- int len = 0, size;
+ int len = 0, size, pc;
if (unlikely(tracing_selftest_running || tracing_disabled))
return 0;
@@ -3288,7 +3293,7 @@
/* Don't pollute graph traces with trace_vprintk internals */
pause_graph_tracing();
- trace_ctx = tracing_gen_ctx();
+ pc = preempt_count();
preempt_disable_notrace();
tbuffer = get_trace_buf();
@@ -3302,11 +3307,12 @@
if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
goto out_put;
+ local_save_flags(flags);
size = sizeof(*entry) + sizeof(u32) * len;
buffer = tr->array_buffer.buffer;
ring_buffer_nest_start(buffer);
event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
- trace_ctx);
+ flags, pc);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
@@ -3316,7 +3322,7 @@
memcpy(entry->buf, tbuffer, sizeof(u32) * len);
if (!call_filter_check_discard(call, entry, buffer, event)) {
__buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
+ ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
}
out:
@@ -3339,9 +3345,9 @@
{
struct trace_event_call *call = &event_print;
struct ring_buffer_event *event;
- int len = 0, size;
+ int len = 0, size, pc;
struct print_entry *entry;
- unsigned int trace_ctx;
+ unsigned long flags;
char *tbuffer;
if (tracing_disabled || tracing_selftest_running)
@@ -3350,7 +3356,7 @@
/* Don't pollute graph traces with trace_vprintk internals */
pause_graph_tracing();
- trace_ctx = tracing_gen_ctx();
+ pc = preempt_count();
preempt_disable_notrace();
@@ -3362,10 +3368,11 @@
len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
+ local_save_flags(flags);
size = sizeof(*entry) + len + 1;
ring_buffer_nest_start(buffer);
event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
- trace_ctx);
+ flags, pc);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
@@ -3374,7 +3381,7 @@
memcpy(&entry->buf, tbuffer, len + 1);
if (!call_filter_check_discard(call, entry, buffer, event)) {
__buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
+ ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
}
out:
@@ -3720,8 +3727,15 @@
* will point to the same string as current_trace->name.
*/
mutex_lock(&trace_types_lock);
- if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
+ if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) {
+ /* Close iter->trace before switching to the new current tracer */
+ if (iter->trace->close)
+ iter->trace->close(iter);
*iter->trace = *tr->current_trace;
+ /* Reopen the new current tracer */
+ if (iter->trace->open)
+ iter->trace->open(iter);
+ }
mutex_unlock(&trace_types_lock);
#ifdef CONFIG_TRACER_MAX_TRACE
@@ -3840,17 +3854,14 @@
static void print_lat_help_header(struct seq_file *m)
{
- seq_puts(m, "# _--------=> CPU# \n"
- "# / _-------=> irqs-off \n"
- "# | / _------=> need-resched \n"
- "# || / _-----=> need-resched-lazy\n"
- "# ||| / _----=> hardirq/softirq \n"
- "# |||| / _---=> preempt-depth \n"
- "# ||||| / _--=> preempt-lazy-depth\n"
- "# |||||| / _-=> migrate-disable \n"
- "# ||||||| / delay \n"
- "# cmd pid |||||||| time | caller \n"
- "# \\ / |||||||| \\ | / \n");
+ seq_puts(m, "# _------=> CPU# \n"
+ "# / _-----=> irqs-off \n"
+ "# | / _----=> need-resched \n"
+ "# || / _---=> hardirq/softirq \n"
+ "# ||| / _--=> preempt-depth \n"
+ "# |||| / delay \n"
+ "# cmd pid ||||| time | caller \n"
+ "# \\ / ||||| \\ | / \n");
}
static void print_event_info(struct array_buffer *buf, struct seq_file *m)
@@ -3884,16 +3895,13 @@
print_event_info(buf, m);
- seq_printf(m, "# %.*s _-------=> irqs-off\n", prec, space);
- seq_printf(m, "# %.*s / _------=> need-resched\n", prec, space);
- seq_printf(m, "# %.*s| / _-----=> need-resched-lazy\n", prec, space);
- seq_printf(m, "# %.*s|| / _----=> hardirq/softirq\n", prec, space);
- seq_printf(m, "# %.*s||| / _---=> preempt-depth\n", prec, space);
- seq_printf(m, "# %.*s|||| / _--=> preempt-lazy-depth\n", prec, space);
- seq_printf(m, "# %.*s||||| / _-=> migrate-disable\n", prec, space);
- seq_printf(m, "# %.*s|||||| / delay\n", prec, space);
- seq_printf(m, "# TASK-PID %.*s CPU# ||||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
- seq_printf(m, "# | | %.*s | ||||||| | |\n", prec, " | ");
+ seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
+ seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
+ seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
+ seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
+ seq_printf(m, "# %.*s||| / delay\n", prec, space);
+ seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
+ seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
}
void
@@ -4486,6 +4494,33 @@
return 0;
}
+/*
+ * The private pointer of the inode is the trace_event_file.
+ * Update the tr ref count associated to it.
+ */
+int tracing_open_file_tr(struct inode *inode, struct file *filp)
+{
+ struct trace_event_file *file = inode->i_private;
+ int ret;
+
+ ret = tracing_check_open_get_tr(file->tr);
+ if (ret)
+ return ret;
+
+ filp->private_data = inode->i_private;
+
+ return 0;
+}
+
+int tracing_release_file_tr(struct inode *inode, struct file *filp)
+{
+ struct trace_event_file *file = inode->i_private;
+
+ trace_array_put(file->tr);
+
+ return 0;
+}
+
static int tracing_release(struct inode *inode, struct file *file)
{
struct trace_array *tr = inode->i_private;
@@ -4715,6 +4750,8 @@
static const struct file_operations tracing_fops = {
.open = tracing_open,
.read = seq_read,
+ .read_iter = seq_read_iter,
+ .splice_read = generic_file_splice_read,
.write = tracing_write_stub,
.llseek = tracing_lseek,
.release = tracing_release,
@@ -4774,11 +4811,17 @@
!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
+#ifdef CONFIG_TRACER_MAX_TRACE
+ ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
+#endif
}
if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
+#ifdef CONFIG_TRACER_MAX_TRACE
+ ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
+#endif
}
}
arch_spin_unlock(&tr->max_lock);
@@ -6249,6 +6292,7 @@
mutex_unlock(&trace_types_lock);
free_cpumask_var(iter->started);
+ kfree(iter->temp);
mutex_destroy(&iter->mutex);
kfree(iter);
@@ -6381,7 +6425,20 @@
ret = print_trace_line(iter);
if (ret == TRACE_TYPE_PARTIAL_LINE) {
- /* don't print partial lines */
+ /*
+ * If one print_trace_line() fills entire trace_seq in one shot,
+ * trace_seq_to_user() will returns -EBUSY because save_len == 0,
+ * In this case, we need to consume it, otherwise, loop will peek
+ * this event next time, resulting in an infinite loop.
+ */
+ if (save_len == 0) {
+ iter->seq.full = 0;
+ trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
+ trace_consume(iter);
+ break;
+ }
+
+ /* In other cases, don't print partial lines */
iter->seq.seq.len = save_len;
break;
}
@@ -6698,6 +6755,7 @@
enum event_trigger_type tt = ETT_NONE;
struct trace_buffer *buffer;
struct print_entry *entry;
+ unsigned long irq_flags;
ssize_t written;
int size;
int len;
@@ -6717,6 +6775,7 @@
BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
+ local_save_flags(irq_flags);
size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
/* If less than "<faulted>", then make sure we can still add that */
@@ -6725,7 +6784,7 @@
buffer = tr->array_buffer.buffer;
event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
- tracing_gen_ctx());
+ irq_flags, preempt_count());
if (unlikely(!event))
/* Ring buffer disabled, return as if not open for write */
return -EBADF;
@@ -6777,6 +6836,7 @@
struct ring_buffer_event *event;
struct trace_buffer *buffer;
struct raw_data_entry *entry;
+ unsigned long irq_flags;
ssize_t written;
int size;
int len;
@@ -6798,13 +6858,14 @@
BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
+ local_save_flags(irq_flags);
size = sizeof(*entry) + cnt;
if (cnt < FAULT_SIZE_ID)
size += FAULT_SIZE_ID - cnt;
buffer = tr->array_buffer.buffer;
event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
- tracing_gen_ctx());
+ irq_flags, preempt_count());
if (!event)
/* Ring buffer disabled, return as if not open for write */
return -EBADF;
@@ -7032,6 +7093,11 @@
return ret;
}
+static void tracing_swap_cpu_buffer(void *tr)
+{
+ update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
+}
+
static ssize_t
tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
@@ -7090,13 +7156,15 @@
ret = tracing_alloc_snapshot_instance(tr);
if (ret < 0)
break;
- local_irq_disable();
/* Now, we're going to swap */
- if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
+ if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
+ local_irq_disable();
update_max_tr(tr, current, smp_processor_id(), NULL);
- else
- update_max_tr_single(tr, current, iter->cpu_file);
- local_irq_enable();
+ local_irq_enable();
+ } else {
+ smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
+ (void *)tr, 1);
+ }
break;
default:
if (tr->allocated_snapshot) {
@@ -7185,10 +7253,11 @@
#endif
static const struct file_operations set_tracer_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_generic_tr,
.read = tracing_set_trace_read,
.write = tracing_set_trace_write,
.llseek = generic_file_llseek,
+ .release = tracing_release_generic_tr,
};
static const struct file_operations tracing_pipe_fops = {
@@ -7511,7 +7580,7 @@
.open = tracing_err_log_open,
.write = tracing_err_log_write,
.read = seq_read,
- .llseek = seq_lseek,
+ .llseek = tracing_lseek,
.release = tracing_err_log_release,
};
@@ -8227,12 +8296,33 @@
return cnt;
}
+static int tracing_open_options(struct inode *inode, struct file *filp)
+{
+ struct trace_option_dentry *topt = inode->i_private;
+ int ret;
+
+ ret = tracing_check_open_get_tr(topt->tr);
+ if (ret)
+ return ret;
+
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static int tracing_release_options(struct inode *inode, struct file *file)
+{
+ struct trace_option_dentry *topt = file->private_data;
+
+ trace_array_put(topt->tr);
+ return 0;
+}
static const struct file_operations trace_options_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_options,
.read = trace_options_read,
.write = trace_options_write,
.llseek = generic_file_llseek,
+ .release = tracing_release_options,
};
/*
@@ -8562,9 +8652,6 @@
if (val > 100)
return -EINVAL;
- if (!val)
- val = 1;
-
tr->buffer_percent = val;
(*ppos)++;
@@ -8889,6 +8976,7 @@
ftrace_destroy_function_files(tr);
tracefs_remove(tr->dir);
free_trace_buffers(tr);
+ clear_tracing_err_log(tr);
for (i = 0; i < tr->nr_topts; i++) {
kfree(tr->topts[i].topts);
@@ -9391,6 +9479,7 @@
tracing_off();
local_irq_save(flags);
+ printk_nmi_direct_enter();
/* Simulate the iterator */
trace_init_global_iter(&iter);
@@ -9478,6 +9567,7 @@
atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
}
atomic_dec(&dump_running);
+ printk_nmi_direct_exit();
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(ftrace_dump);
@@ -9709,6 +9799,8 @@
static_key_enable(&tracepoint_printk_key.key);
}
tracer_alloc_buffers();
+
+ init_events();
}
void __init trace_init(void)
--
Gitblit v1.6.2