From 9370bb92b2d16684ee45cf24e879c93c509162da Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Thu, 19 Dec 2024 01:47:39 +0000 Subject: [PATCH] add wifi6 8852be driver --- kernel/kernel/trace/trace_events.c | 42 +++++++++++++++++++++--------------------- 1 files changed, 21 insertions(+), 21 deletions(-) diff --git a/kernel/kernel/trace/trace_events.c b/kernel/kernel/trace/trace_events.c index ca64598..321cfda 100644 --- a/kernel/kernel/trace/trace_events.c +++ b/kernel/kernel/trace/trace_events.c @@ -184,8 +184,6 @@ __common_field(unsigned char, flags); __common_field(unsigned char, preempt_count); __common_field(int, pid); - __common_field(unsigned char, migrate_disable); - __common_field(unsigned char, preempt_lazy_count); return ret; } @@ -261,19 +259,22 @@ trace_event_ignore_this_pid(trace_file)) return NULL; + local_save_flags(fbuffer->flags); + fbuffer->pc = preempt_count(); /* * If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables * preemption (adding one to the preempt_count). Since we are * interested in the preempt_count at the time the tracepoint was * hit, we need to subtract one to offset the increment. */ - fbuffer->trace_ctx = tracing_gen_ctx_dec(); + if (IS_ENABLED(CONFIG_PREEMPTION)) + fbuffer->pc--; fbuffer->trace_file = trace_file; fbuffer->event = trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file, event_call->event.type, len, - fbuffer->trace_ctx); + fbuffer->flags, fbuffer->pc); if (!fbuffer->event) return NULL; @@ -370,7 +371,6 @@ { struct trace_event_call *call = file->event_call; struct trace_array *tr = file->tr; - unsigned long file_flags = file->flags; int ret = 0; int disable; @@ -394,6 +394,8 @@ break; disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED; clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags); + /* Disable use of trace_buffered_event */ + trace_buffered_event_disable(); } else disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE); @@ -432,6 +434,8 @@ if (atomic_inc_return(&file->sm_ref) > 1) break; set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags); + /* Enable use of trace_buffered_event */ + trace_buffered_event_enable(); } if (!(file->flags & EVENT_FILE_FL_ENABLED)) { @@ -469,15 +473,6 @@ set_bit(EVENT_FILE_FL_WAS_ENABLED_BIT, &file->flags); } break; - } - - /* Enable or disable use of trace_buffered_event */ - if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) != - (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) { - if (file->flags & EVENT_FILE_FL_SOFT_DISABLED) - trace_buffered_event_enable(); - else - trace_buffered_event_disable(); } return ret; @@ -1860,9 +1855,10 @@ }; static const struct file_operations ftrace_enable_fops = { - .open = tracing_open_generic, + .open = tracing_open_file_tr, .read = event_enable_read, .write = event_enable_write, + .release = tracing_release_file_tr, .llseek = default_llseek, }; @@ -1879,9 +1875,10 @@ }; static const struct file_operations ftrace_event_filter_fops = { - .open = tracing_open_generic, + .open = tracing_open_file_tr, .read = event_filter_read, .write = event_filter_write, + .release = tracing_release_file_tr, .llseek = default_llseek, }; @@ -2660,7 +2657,7 @@ * over from this module may be passed to the new module events and * unexpected results may occur. */ - tracing_reset_all_online_cpus(); + tracing_reset_all_online_cpus_unlocked(); } static int trace_module_notify(struct notifier_block *self, @@ -3698,11 +3695,12 @@ struct trace_buffer *buffer; struct ring_buffer_event *event; struct ftrace_entry *entry; - unsigned int trace_ctx; + unsigned long flags; long disabled; int cpu; + int pc; - trace_ctx = tracing_gen_ctx(); + pc = preempt_count(); preempt_disable_notrace(); cpu = raw_smp_processor_id(); disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); @@ -3710,9 +3708,11 @@ if (disabled != 1) goto out; + local_save_flags(flags); + event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file, TRACE_FN, sizeof(*entry), - trace_ctx); + flags, pc); if (!event) goto out; entry = ring_buffer_event_data(event); @@ -3720,7 +3720,7 @@ entry->parent_ip = parent_ip; event_trigger_unlock_commit(&event_trace_file, buffer, event, - entry, trace_ctx); + entry, flags, pc); out: atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); preempt_enable_notrace(); -- Gitblit v1.6.2