.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * latencytop.c: Latency display infrastructure |
---|
3 | 4 | * |
---|
4 | 5 | * (C) Copyright 2008 Intel Corporation |
---|
5 | 6 | * Author: Arjan van de Ven <arjan@linux.intel.com> |
---|
6 | | - * |
---|
7 | | - * This program is free software; you can redistribute it and/or |
---|
8 | | - * modify it under the terms of the GNU General Public License |
---|
9 | | - * as published by the Free Software Foundation; version 2 |
---|
10 | | - * of the License. |
---|
11 | 7 | */ |
---|
12 | 8 | |
---|
13 | 9 | /* |
---|
.. | .. |
---|
67 | 63 | |
---|
68 | 64 | int latencytop_enabled; |
---|
69 | 65 | |
---|
70 | | -void clear_all_latency_tracing(struct task_struct *p) |
---|
| 66 | +void clear_tsk_latency_tracing(struct task_struct *p) |
---|
71 | 67 | { |
---|
72 | 68 | unsigned long flags; |
---|
73 | | - |
---|
74 | | - if (!latencytop_enabled) |
---|
75 | | - return; |
---|
76 | 69 | |
---|
77 | 70 | raw_spin_lock_irqsave(&latency_lock, flags); |
---|
78 | 71 | memset(&p->latency_record, 0, sizeof(p->latency_record)); |
---|
.. | .. |
---|
96 | 89 | int firstnonnull = MAXLR + 1; |
---|
97 | 90 | int i; |
---|
98 | 91 | |
---|
99 | | - if (!latencytop_enabled) |
---|
100 | | - return; |
---|
101 | | - |
---|
102 | 92 | /* skip kernel threads for now */ |
---|
103 | 93 | if (!tsk->mm) |
---|
104 | 94 | return; |
---|
.. | .. |
---|
120 | 110 | break; |
---|
121 | 111 | } |
---|
122 | 112 | |
---|
123 | | - /* 0 and ULONG_MAX entries mean end of backtrace: */ |
---|
124 | | - if (record == 0 || record == ULONG_MAX) |
---|
| 113 | + /* 0 entry marks end of backtrace: */ |
---|
| 114 | + if (!record) |
---|
125 | 115 | break; |
---|
126 | 116 | } |
---|
127 | 117 | if (same) { |
---|
.. | .. |
---|
139 | 129 | |
---|
140 | 130 | /* Allocted a new one: */ |
---|
141 | 131 | memcpy(&latency_record[i], lat, sizeof(struct latency_record)); |
---|
142 | | -} |
---|
143 | | - |
---|
144 | | -/* |
---|
145 | | - * Iterator to store a backtrace into a latency record entry |
---|
146 | | - */ |
---|
147 | | -static inline void store_stacktrace(struct task_struct *tsk, |
---|
148 | | - struct latency_record *lat) |
---|
149 | | -{ |
---|
150 | | - struct stack_trace trace; |
---|
151 | | - |
---|
152 | | - memset(&trace, 0, sizeof(trace)); |
---|
153 | | - trace.max_entries = LT_BACKTRACEDEPTH; |
---|
154 | | - trace.entries = &lat->backtrace[0]; |
---|
155 | | - save_stack_trace_tsk(tsk, &trace); |
---|
156 | 132 | } |
---|
157 | 133 | |
---|
158 | 134 | /** |
---|
.. | .. |
---|
191 | 167 | lat.count = 1; |
---|
192 | 168 | lat.time = usecs; |
---|
193 | 169 | lat.max = usecs; |
---|
194 | | - store_stacktrace(tsk, &lat); |
---|
| 170 | + |
---|
| 171 | + stack_trace_save_tsk(tsk, lat.backtrace, LT_BACKTRACEDEPTH, 0); |
---|
195 | 172 | |
---|
196 | 173 | raw_spin_lock_irqsave(&latency_lock, flags); |
---|
197 | 174 | |
---|
.. | .. |
---|
210 | 187 | break; |
---|
211 | 188 | } |
---|
212 | 189 | |
---|
213 | | - /* 0 and ULONG_MAX entries mean end of backtrace: */ |
---|
214 | | - if (record == 0 || record == ULONG_MAX) |
---|
| 190 | + /* 0 entry is end of backtrace */ |
---|
| 191 | + if (!record) |
---|
215 | 192 | break; |
---|
216 | 193 | } |
---|
217 | 194 | if (same) { |
---|
.. | .. |
---|
252 | 229 | lr->count, lr->time, lr->max); |
---|
253 | 230 | for (q = 0; q < LT_BACKTRACEDEPTH; q++) { |
---|
254 | 231 | unsigned long bt = lr->backtrace[q]; |
---|
| 232 | + |
---|
255 | 233 | if (!bt) |
---|
256 | 234 | break; |
---|
257 | | - if (bt == ULONG_MAX) |
---|
258 | | - break; |
---|
| 235 | + |
---|
259 | 236 | seq_printf(m, " %ps", (void *)bt); |
---|
260 | 237 | } |
---|
261 | 238 | seq_puts(m, "\n"); |
---|
.. | .. |
---|
278 | 255 | return single_open(filp, lstats_show, NULL); |
---|
279 | 256 | } |
---|
280 | 257 | |
---|
281 | | -static const struct file_operations lstats_fops = { |
---|
282 | | - .open = lstats_open, |
---|
283 | | - .read = seq_read, |
---|
284 | | - .write = lstats_write, |
---|
285 | | - .llseek = seq_lseek, |
---|
286 | | - .release = single_release, |
---|
| 258 | +static const struct proc_ops lstats_proc_ops = { |
---|
| 259 | + .proc_open = lstats_open, |
---|
| 260 | + .proc_read = seq_read, |
---|
| 261 | + .proc_write = lstats_write, |
---|
| 262 | + .proc_lseek = seq_lseek, |
---|
| 263 | + .proc_release = single_release, |
---|
287 | 264 | }; |
---|
288 | 265 | |
---|
289 | 266 | static int __init init_lstats_procfs(void) |
---|
290 | 267 | { |
---|
291 | | - proc_create("latency_stats", 0644, NULL, &lstats_fops); |
---|
| 268 | + proc_create("latency_stats", 0644, NULL, &lstats_proc_ops); |
---|
292 | 269 | return 0; |
---|
293 | 270 | } |
---|
294 | 271 | |
---|
295 | | -int sysctl_latencytop(struct ctl_table *table, int write, |
---|
296 | | - void __user *buffer, size_t *lenp, loff_t *ppos) |
---|
| 272 | +int sysctl_latencytop(struct ctl_table *table, int write, void *buffer, |
---|
| 273 | + size_t *lenp, loff_t *ppos) |
---|
297 | 274 | { |
---|
298 | 275 | int err; |
---|
299 | 276 | |
---|