.. | .. |
---|
1883 | 1883 | * place on this CPU. We fail to record, but we reset |
---|
1884 | 1884 | * the max trace buffer (no one writes directly to it) |
---|
1885 | 1885 | * and flag that it failed. |
---|
| 1886 | + * Another reason is resize is in progress. |
---|
1886 | 1887 | */ |
---|
1887 | 1888 | trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, |
---|
1888 | | - "Failed to swap buffers due to commit in progress\n"); |
---|
| 1889 | + "Failed to swap buffers due to commit or resize in progress\n"); |
---|
1889 | 1890 | } |
---|
1890 | 1891 | |
---|
1891 | 1892 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); |
---|
.. | .. |
---|
2179 | 2180 | } |
---|
2180 | 2181 | |
---|
2181 | 2182 | /* Must have trace_types_lock held */ |
---|
2182 | | -void tracing_reset_all_online_cpus(void) |
---|
| 2183 | +void tracing_reset_all_online_cpus_unlocked(void) |
---|
2183 | 2184 | { |
---|
2184 | 2185 | struct trace_array *tr; |
---|
| 2186 | + |
---|
| 2187 | + lockdep_assert_held(&trace_types_lock); |
---|
2185 | 2188 | |
---|
2186 | 2189 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
---|
2187 | 2190 | if (!tr->clear_trace) |
---|
.. | .. |
---|
2192 | 2195 | tracing_reset_online_cpus(&tr->max_buffer); |
---|
2193 | 2196 | #endif |
---|
2194 | 2197 | } |
---|
| 2198 | +} |
---|
| 2199 | + |
---|
| 2200 | +void tracing_reset_all_online_cpus(void) |
---|
| 2201 | +{ |
---|
| 2202 | + mutex_lock(&trace_types_lock); |
---|
| 2203 | + tracing_reset_all_online_cpus_unlocked(); |
---|
| 2204 | + mutex_unlock(&trace_types_lock); |
---|
2195 | 2205 | } |
---|
2196 | 2206 | |
---|
2197 | 2207 | /* |
---|
.. | .. |
---|
3717 | 3727 | * will point to the same string as current_trace->name. |
---|
3718 | 3728 | */ |
---|
3719 | 3729 | mutex_lock(&trace_types_lock); |
---|
3720 | | - if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) |
---|
| 3730 | + if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) { |
---|
| 3731 | + /* Close iter->trace before switching to the new current tracer */ |
---|
| 3732 | + if (iter->trace->close) |
---|
| 3733 | + iter->trace->close(iter); |
---|
3721 | 3734 | *iter->trace = *tr->current_trace; |
---|
| 3735 | + /* Reopen the new current tracer */ |
---|
| 3736 | + if (iter->trace->open) |
---|
| 3737 | + iter->trace->open(iter); |
---|
| 3738 | + } |
---|
3722 | 3739 | mutex_unlock(&trace_types_lock); |
---|
3723 | 3740 | |
---|
3724 | 3741 | #ifdef CONFIG_TRACER_MAX_TRACE |
---|
.. | .. |
---|
4477 | 4494 | return 0; |
---|
4478 | 4495 | } |
---|
4479 | 4496 | |
---|
| 4497 | +/* |
---|
| 4498 | + * The private pointer of the inode is the trace_event_file. |
---|
| 4499 | + * Update the tr ref count associated to it. |
---|
| 4500 | + */ |
---|
| 4501 | +int tracing_open_file_tr(struct inode *inode, struct file *filp) |
---|
| 4502 | +{ |
---|
| 4503 | + struct trace_event_file *file = inode->i_private; |
---|
| 4504 | + int ret; |
---|
| 4505 | + |
---|
| 4506 | + ret = tracing_check_open_get_tr(file->tr); |
---|
| 4507 | + if (ret) |
---|
| 4508 | + return ret; |
---|
| 4509 | + |
---|
| 4510 | + filp->private_data = inode->i_private; |
---|
| 4511 | + |
---|
| 4512 | + return 0; |
---|
| 4513 | +} |
---|
| 4514 | + |
---|
| 4515 | +int tracing_release_file_tr(struct inode *inode, struct file *filp) |
---|
| 4516 | +{ |
---|
| 4517 | + struct trace_event_file *file = inode->i_private; |
---|
| 4518 | + |
---|
| 4519 | + trace_array_put(file->tr); |
---|
| 4520 | + |
---|
| 4521 | + return 0; |
---|
| 4522 | +} |
---|
| 4523 | + |
---|
4480 | 4524 | static int tracing_release(struct inode *inode, struct file *file) |
---|
4481 | 4525 | { |
---|
4482 | 4526 | struct trace_array *tr = inode->i_private; |
---|
.. | .. |
---|
4706 | 4750 | static const struct file_operations tracing_fops = { |
---|
4707 | 4751 | .open = tracing_open, |
---|
4708 | 4752 | .read = seq_read, |
---|
| 4753 | + .read_iter = seq_read_iter, |
---|
| 4754 | + .splice_read = generic_file_splice_read, |
---|
4709 | 4755 | .write = tracing_write_stub, |
---|
4710 | 4756 | .llseek = tracing_lseek, |
---|
4711 | 4757 | .release = tracing_release, |
---|
.. | .. |
---|
4765 | 4811 | !cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
---|
4766 | 4812 | atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); |
---|
4767 | 4813 | ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu); |
---|
| 4814 | +#ifdef CONFIG_TRACER_MAX_TRACE |
---|
| 4815 | + ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu); |
---|
| 4816 | +#endif |
---|
4768 | 4817 | } |
---|
4769 | 4818 | if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && |
---|
4770 | 4819 | cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
---|
4771 | 4820 | atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); |
---|
4772 | 4821 | ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu); |
---|
| 4822 | +#ifdef CONFIG_TRACER_MAX_TRACE |
---|
| 4823 | + ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu); |
---|
| 4824 | +#endif |
---|
4773 | 4825 | } |
---|
4774 | 4826 | } |
---|
4775 | 4827 | arch_spin_unlock(&tr->max_lock); |
---|
.. | .. |
---|
6240 | 6292 | mutex_unlock(&trace_types_lock); |
---|
6241 | 6293 | |
---|
6242 | 6294 | free_cpumask_var(iter->started); |
---|
| 6295 | + kfree(iter->temp); |
---|
6243 | 6296 | mutex_destroy(&iter->mutex); |
---|
6244 | 6297 | kfree(iter); |
---|
6245 | 6298 | |
---|
.. | .. |
---|
6372 | 6425 | |
---|
6373 | 6426 | ret = print_trace_line(iter); |
---|
6374 | 6427 | if (ret == TRACE_TYPE_PARTIAL_LINE) { |
---|
6375 | | - /* don't print partial lines */ |
---|
| 6428 | + /* |
---|
| 6429 | + * If one print_trace_line() fills entire trace_seq in one shot, |
---|
| 6430 | + * trace_seq_to_user() will returns -EBUSY because save_len == 0, |
---|
| 6431 | + * In this case, we need to consume it, otherwise, loop will peek |
---|
| 6432 | + * this event next time, resulting in an infinite loop. |
---|
| 6433 | + */ |
---|
| 6434 | + if (save_len == 0) { |
---|
| 6435 | + iter->seq.full = 0; |
---|
| 6436 | + trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n"); |
---|
| 6437 | + trace_consume(iter); |
---|
| 6438 | + break; |
---|
| 6439 | + } |
---|
| 6440 | + |
---|
| 6441 | + /* In other cases, don't print partial lines */ |
---|
6376 | 6442 | iter->seq.seq.len = save_len; |
---|
6377 | 6443 | break; |
---|
6378 | 6444 | } |
---|
.. | .. |
---|
7027 | 7093 | return ret; |
---|
7028 | 7094 | } |
---|
7029 | 7095 | |
---|
| 7096 | +static void tracing_swap_cpu_buffer(void *tr) |
---|
| 7097 | +{ |
---|
| 7098 | + update_max_tr_single((struct trace_array *)tr, current, smp_processor_id()); |
---|
| 7099 | +} |
---|
| 7100 | + |
---|
7030 | 7101 | static ssize_t |
---|
7031 | 7102 | tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, |
---|
7032 | 7103 | loff_t *ppos) |
---|
.. | .. |
---|
7085 | 7156 | ret = tracing_alloc_snapshot_instance(tr); |
---|
7086 | 7157 | if (ret < 0) |
---|
7087 | 7158 | break; |
---|
7088 | | - local_irq_disable(); |
---|
7089 | 7159 | /* Now, we're going to swap */ |
---|
7090 | | - if (iter->cpu_file == RING_BUFFER_ALL_CPUS) |
---|
| 7160 | + if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { |
---|
| 7161 | + local_irq_disable(); |
---|
7091 | 7162 | update_max_tr(tr, current, smp_processor_id(), NULL); |
---|
7092 | | - else |
---|
7093 | | - update_max_tr_single(tr, current, iter->cpu_file); |
---|
7094 | | - local_irq_enable(); |
---|
| 7163 | + local_irq_enable(); |
---|
| 7164 | + } else { |
---|
| 7165 | + smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer, |
---|
| 7166 | + (void *)tr, 1); |
---|
| 7167 | + } |
---|
7095 | 7168 | break; |
---|
7096 | 7169 | default: |
---|
7097 | 7170 | if (tr->allocated_snapshot) { |
---|
.. | .. |
---|
7180 | 7253 | #endif |
---|
7181 | 7254 | |
---|
7182 | 7255 | static const struct file_operations set_tracer_fops = { |
---|
7183 | | - .open = tracing_open_generic, |
---|
| 7256 | + .open = tracing_open_generic_tr, |
---|
7184 | 7257 | .read = tracing_set_trace_read, |
---|
7185 | 7258 | .write = tracing_set_trace_write, |
---|
7186 | 7259 | .llseek = generic_file_llseek, |
---|
| 7260 | + .release = tracing_release_generic_tr, |
---|
7187 | 7261 | }; |
---|
7188 | 7262 | |
---|
7189 | 7263 | static const struct file_operations tracing_pipe_fops = { |
---|
.. | .. |
---|
7506 | 7580 | .open = tracing_err_log_open, |
---|
7507 | 7581 | .write = tracing_err_log_write, |
---|
7508 | 7582 | .read = seq_read, |
---|
7509 | | - .llseek = seq_lseek, |
---|
| 7583 | + .llseek = tracing_lseek, |
---|
7510 | 7584 | .release = tracing_err_log_release, |
---|
7511 | 7585 | }; |
---|
7512 | 7586 | |
---|
.. | .. |
---|
8222 | 8296 | return cnt; |
---|
8223 | 8297 | } |
---|
8224 | 8298 | |
---|
| 8299 | +static int tracing_open_options(struct inode *inode, struct file *filp) |
---|
| 8300 | +{ |
---|
| 8301 | + struct trace_option_dentry *topt = inode->i_private; |
---|
| 8302 | + int ret; |
---|
| 8303 | + |
---|
| 8304 | + ret = tracing_check_open_get_tr(topt->tr); |
---|
| 8305 | + if (ret) |
---|
| 8306 | + return ret; |
---|
| 8307 | + |
---|
| 8308 | + filp->private_data = inode->i_private; |
---|
| 8309 | + return 0; |
---|
| 8310 | +} |
---|
| 8311 | + |
---|
| 8312 | +static int tracing_release_options(struct inode *inode, struct file *file) |
---|
| 8313 | +{ |
---|
| 8314 | + struct trace_option_dentry *topt = file->private_data; |
---|
| 8315 | + |
---|
| 8316 | + trace_array_put(topt->tr); |
---|
| 8317 | + return 0; |
---|
| 8318 | +} |
---|
8225 | 8319 | |
---|
8226 | 8320 | static const struct file_operations trace_options_fops = { |
---|
8227 | | - .open = tracing_open_generic, |
---|
| 8321 | + .open = tracing_open_options, |
---|
8228 | 8322 | .read = trace_options_read, |
---|
8229 | 8323 | .write = trace_options_write, |
---|
8230 | 8324 | .llseek = generic_file_llseek, |
---|
| 8325 | + .release = tracing_release_options, |
---|
8231 | 8326 | }; |
---|
8232 | 8327 | |
---|
8233 | 8328 | /* |
---|
.. | .. |
---|
8557 | 8652 | if (val > 100) |
---|
8558 | 8653 | return -EINVAL; |
---|
8559 | 8654 | |
---|
8560 | | - if (!val) |
---|
8561 | | - val = 1; |
---|
8562 | | - |
---|
8563 | 8655 | tr->buffer_percent = val; |
---|
8564 | 8656 | |
---|
8565 | 8657 | (*ppos)++; |
---|
.. | .. |
---|
8884 | 8976 | ftrace_destroy_function_files(tr); |
---|
8885 | 8977 | tracefs_remove(tr->dir); |
---|
8886 | 8978 | free_trace_buffers(tr); |
---|
| 8979 | + clear_tracing_err_log(tr); |
---|
8887 | 8980 | |
---|
8888 | 8981 | for (i = 0; i < tr->nr_topts; i++) { |
---|
8889 | 8982 | kfree(tr->topts[i].topts); |
---|
.. | .. |
---|
9706 | 9799 | static_key_enable(&tracepoint_printk_key.key); |
---|
9707 | 9800 | } |
---|
9708 | 9801 | tracer_alloc_buffers(); |
---|
| 9802 | + |
---|
| 9803 | + init_events(); |
---|
9709 | 9804 | } |
---|
9710 | 9805 | |
---|
9711 | 9806 | void __init trace_init(void) |
---|