hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/kernel/latencytop.c
....@@ -1,13 +1,9 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * latencytop.c: Latency display infrastructure
34 *
45 * (C) Copyright 2008 Intel Corporation
56 * Author: Arjan van de Ven <arjan@linux.intel.com>
6
- *
7
- * This program is free software; you can redistribute it and/or
8
- * modify it under the terms of the GNU General Public License
9
- * as published by the Free Software Foundation; version 2
10
- * of the License.
117 */
128
139 /*
....@@ -67,12 +63,9 @@
6763
6864 int latencytop_enabled;
6965
70
-void clear_all_latency_tracing(struct task_struct *p)
66
+void clear_tsk_latency_tracing(struct task_struct *p)
7167 {
7268 unsigned long flags;
73
-
74
- if (!latencytop_enabled)
75
- return;
7669
7770 raw_spin_lock_irqsave(&latency_lock, flags);
7871 memset(&p->latency_record, 0, sizeof(p->latency_record));
....@@ -96,9 +89,6 @@
9689 int firstnonnull = MAXLR + 1;
9790 int i;
9891
99
- if (!latencytop_enabled)
100
- return;
101
-
10292 /* skip kernel threads for now */
10393 if (!tsk->mm)
10494 return;
....@@ -120,8 +110,8 @@
120110 break;
121111 }
122112
123
- /* 0 and ULONG_MAX entries mean end of backtrace: */
124
- if (record == 0 || record == ULONG_MAX)
113
+ /* 0 entry marks end of backtrace: */
114
+ if (!record)
125115 break;
126116 }
127117 if (same) {
....@@ -139,20 +129,6 @@
139129
140130 /* Allocted a new one: */
141131 memcpy(&latency_record[i], lat, sizeof(struct latency_record));
142
-}
143
-
144
-/*
145
- * Iterator to store a backtrace into a latency record entry
146
- */
147
-static inline void store_stacktrace(struct task_struct *tsk,
148
- struct latency_record *lat)
149
-{
150
- struct stack_trace trace;
151
-
152
- memset(&trace, 0, sizeof(trace));
153
- trace.max_entries = LT_BACKTRACEDEPTH;
154
- trace.entries = &lat->backtrace[0];
155
- save_stack_trace_tsk(tsk, &trace);
156132 }
157133
158134 /**
....@@ -191,7 +167,8 @@
191167 lat.count = 1;
192168 lat.time = usecs;
193169 lat.max = usecs;
194
- store_stacktrace(tsk, &lat);
170
+
171
+ stack_trace_save_tsk(tsk, lat.backtrace, LT_BACKTRACEDEPTH, 0);
195172
196173 raw_spin_lock_irqsave(&latency_lock, flags);
197174
....@@ -210,8 +187,8 @@
210187 break;
211188 }
212189
213
- /* 0 and ULONG_MAX entries mean end of backtrace: */
214
- if (record == 0 || record == ULONG_MAX)
190
+ /* 0 entry is end of backtrace */
191
+ if (!record)
215192 break;
216193 }
217194 if (same) {
....@@ -252,10 +229,10 @@
252229 lr->count, lr->time, lr->max);
253230 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
254231 unsigned long bt = lr->backtrace[q];
232
+
255233 if (!bt)
256234 break;
257
- if (bt == ULONG_MAX)
258
- break;
235
+
259236 seq_printf(m, " %ps", (void *)bt);
260237 }
261238 seq_puts(m, "\n");
....@@ -278,22 +255,22 @@
278255 return single_open(filp, lstats_show, NULL);
279256 }
280257
281
-static const struct file_operations lstats_fops = {
282
- .open = lstats_open,
283
- .read = seq_read,
284
- .write = lstats_write,
285
- .llseek = seq_lseek,
286
- .release = single_release,
258
+static const struct proc_ops lstats_proc_ops = {
259
+ .proc_open = lstats_open,
260
+ .proc_read = seq_read,
261
+ .proc_write = lstats_write,
262
+ .proc_lseek = seq_lseek,
263
+ .proc_release = single_release,
287264 };
288265
289266 static int __init init_lstats_procfs(void)
290267 {
291
- proc_create("latency_stats", 0644, NULL, &lstats_fops);
268
+ proc_create("latency_stats", 0644, NULL, &lstats_proc_ops);
292269 return 0;
293270 }
294271
295
-int sysctl_latencytop(struct ctl_table *table, int write,
296
- void __user *buffer, size_t *lenp, loff_t *ppos)
272
+int sysctl_latencytop(struct ctl_table *table, int write, void *buffer,
273
+ size_t *lenp, loff_t *ppos)
297274 {
298275 int err;
299276