.. | .. |
---|
33 | 33 | struct list_head works; /* list of pending works */ |
---|
34 | 34 | |
---|
35 | 35 | struct cpu_stop_work stop_work; /* for stop_cpus */ |
---|
36 | | - unsigned long caller; |
---|
37 | | - cpu_stop_fn_t fn; |
---|
38 | 36 | }; |
---|
39 | 37 | |
---|
40 | 38 | static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); |
---|
41 | 39 | static bool stop_machine_initialized = false; |
---|
42 | | - |
---|
43 | | -void print_stop_info(const char *log_lvl, struct task_struct *task) |
---|
44 | | -{ |
---|
45 | | - /* |
---|
46 | | - * If @task is a stopper task, it cannot migrate and task_cpu() is |
---|
47 | | - * stable. |
---|
48 | | - */ |
---|
49 | | - struct cpu_stopper *stopper = per_cpu_ptr(&cpu_stopper, task_cpu(task)); |
---|
50 | | - |
---|
51 | | - if (task != stopper->thread) |
---|
52 | | - return; |
---|
53 | | - |
---|
54 | | - printk("%sStopper: %pS <- %pS\n", log_lvl, stopper->fn, (void *)stopper->caller); |
---|
55 | | -} |
---|
56 | 40 | |
---|
57 | 41 | /* static data for stop_cpus */ |
---|
58 | 42 | static DEFINE_MUTEX(stop_cpus_mutex); |
---|
.. | .. |
---|
130 | 114 | int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) |
---|
131 | 115 | { |
---|
132 | 116 | struct cpu_stop_done done; |
---|
133 | | - struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done, .caller = _RET_IP_ }; |
---|
| 117 | + struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done }; |
---|
134 | 118 | |
---|
135 | 119 | cpu_stop_init_done(&done, 1); |
---|
136 | 120 | if (!cpu_stop_queue_work(cpu, &work)) |
---|
.. | .. |
---|
338 | 322 | work1 = work2 = (struct cpu_stop_work){ |
---|
339 | 323 | .fn = multi_cpu_stop, |
---|
340 | 324 | .arg = &msdata, |
---|
341 | | - .done = &done, |
---|
342 | | - .caller = _RET_IP_, |
---|
| 325 | + .done = &done |
---|
343 | 326 | }; |
---|
344 | 327 | |
---|
345 | 328 | cpu_stop_init_done(&done, 2); |
---|
.. | .. |
---|
375 | 358 | bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, |
---|
376 | 359 | struct cpu_stop_work *work_buf) |
---|
377 | 360 | { |
---|
378 | | - *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, .caller = _RET_IP_, }; |
---|
| 361 | + *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, }; |
---|
379 | 362 | return cpu_stop_queue_work(cpu, work_buf); |
---|
380 | 363 | } |
---|
381 | 364 | EXPORT_SYMBOL_GPL(stop_one_cpu_nowait); |
---|
.. | .. |
---|
544 | 527 | int ret; |
---|
545 | 528 | |
---|
546 | 529 | /* cpu stop callbacks must not sleep, make in_atomic() == T */ |
---|
547 | | - stopper->caller = work->caller; |
---|
548 | | - stopper->fn = fn; |
---|
549 | 530 | preempt_count_inc(); |
---|
550 | 531 | ret = fn(arg); |
---|
551 | 532 | if (done) { |
---|
.. | .. |
---|
554 | 535 | cpu_stop_signal_done(done); |
---|
555 | 536 | } |
---|
556 | 537 | preempt_count_dec(); |
---|
557 | | - stopper->fn = NULL; |
---|
558 | | - stopper->caller = 0; |
---|
559 | 538 | WARN_ONCE(preempt_count(), |
---|
560 | 539 | "cpu_stop: %ps(%p) leaked preempt count\n", fn, arg); |
---|
561 | 540 | goto repeat; |
---|