forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/arch/x86/kernel/hw_breakpoint.c
....@@ -1,17 +1,5 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
2
- * This program is free software; you can redistribute it and/or modify
3
- * it under the terms of the GNU General Public License as published by
4
- * the Free Software Foundation; either version 2 of the License, or
5
- * (at your option) any later version.
6
- *
7
- * This program is distributed in the hope that it will be useful,
8
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
9
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
- * GNU General Public License for more details.
11
- *
12
- * You should have received a copy of the GNU General Public License
13
- * along with this program; if not, write to the Free Software
14
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
153 *
164 * Copyright (C) 2007 Alan Stern
175 * Copyright (C) 2009 IBM Corporation
....@@ -44,6 +32,8 @@
4432 #include <asm/processor.h>
4533 #include <asm/debugreg.h>
4634 #include <asm/user.h>
35
+#include <asm/desc.h>
36
+#include <asm/tlbflush.h>
4737
4838 /* Per cpu debug control register value */
4939 DEFINE_PER_CPU(unsigned long, cpu_dr7);
....@@ -109,6 +99,8 @@
10999 unsigned long *dr7;
110100 int i;
111101
102
+ lockdep_assert_irqs_disabled();
103
+
112104 for (i = 0; i < HBP_NUM; i++) {
113105 struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
114106
....@@ -126,6 +118,12 @@
126118
127119 dr7 = this_cpu_ptr(&cpu_dr7);
128120 *dr7 |= encode_dr7(i, info->len, info->type);
121
+
122
+ /*
123
+ * Ensure we first write cpu_dr7 before we set the DR7 register.
124
+ * This ensures an NMI never see cpu_dr7 0 when DR7 is not.
125
+ */
126
+ barrier();
129127
130128 set_debugreg(*dr7, 7);
131129 if (info->mask)
....@@ -146,8 +144,10 @@
146144 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
147145 {
148146 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
149
- unsigned long *dr7;
147
+ unsigned long dr7;
150148 int i;
149
+
150
+ lockdep_assert_irqs_disabled();
151151
152152 for (i = 0; i < HBP_NUM; i++) {
153153 struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
....@@ -161,12 +161,20 @@
161161 if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
162162 return;
163163
164
- dr7 = this_cpu_ptr(&cpu_dr7);
165
- *dr7 &= ~__encode_dr7(i, info->len, info->type);
164
+ dr7 = this_cpu_read(cpu_dr7);
165
+ dr7 &= ~__encode_dr7(i, info->len, info->type);
166166
167
- set_debugreg(*dr7, 7);
167
+ set_debugreg(dr7, 7);
168168 if (info->mask)
169169 set_dr_addr_mask(0, i);
170
+
171
+ /*
172
+ * Ensure the write to cpu_dr7 is after we've set the DR7 register.
173
+ * This ensures an NMI never see cpu_dr7 0 when DR7 is not.
174
+ */
175
+ barrier();
176
+
177
+ this_cpu_write(cpu_dr7, dr7);
170178 }
171179
172180 static int arch_bp_generic_len(int x86_len)
....@@ -239,10 +247,98 @@
239247 return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX);
240248 }
241249
250
+/*
251
+ * Checks whether the range [addr, end], overlaps the area [base, base + size).
252
+ */
253
+static inline bool within_area(unsigned long addr, unsigned long end,
254
+ unsigned long base, unsigned long size)
255
+{
256
+ return end >= base && addr < (base + size);
257
+}
258
+
259
+/*
260
+ * Checks whether the range from addr to end, inclusive, overlaps the fixed
261
+ * mapped CPU entry area range or other ranges used for CPU entry.
262
+ */
263
+static inline bool within_cpu_entry(unsigned long addr, unsigned long end)
264
+{
265
+ int cpu;
266
+
267
+ /* CPU entry erea is always used for CPU entry */
268
+ if (within_area(addr, end, CPU_ENTRY_AREA_BASE,
269
+ CPU_ENTRY_AREA_TOTAL_SIZE))
270
+ return true;
271
+
272
+ /*
273
+ * When FSGSBASE is enabled, paranoid_entry() fetches the per-CPU
274
+ * GSBASE value via __per_cpu_offset or pcpu_unit_offsets.
275
+ */
276
+#ifdef CONFIG_SMP
277
+ if (within_area(addr, end, (unsigned long)__per_cpu_offset,
278
+ sizeof(unsigned long) * nr_cpu_ids))
279
+ return true;
280
+#else
281
+ if (within_area(addr, end, (unsigned long)&pcpu_unit_offsets,
282
+ sizeof(pcpu_unit_offsets)))
283
+ return true;
284
+#endif
285
+
286
+ for_each_possible_cpu(cpu) {
287
+ /* The original rw GDT is being used after load_direct_gdt() */
288
+ if (within_area(addr, end, (unsigned long)get_cpu_gdt_rw(cpu),
289
+ GDT_SIZE))
290
+ return true;
291
+
292
+ /*
293
+ * cpu_tss_rw is not directly referenced by hardware, but
294
+ * cpu_tss_rw is also used in CPU entry code,
295
+ */
296
+ if (within_area(addr, end,
297
+ (unsigned long)&per_cpu(cpu_tss_rw, cpu),
298
+ sizeof(struct tss_struct)))
299
+ return true;
300
+
301
+ /*
302
+ * cpu_tlbstate.user_pcid_flush_mask is used for CPU entry.
303
+ * If a data breakpoint on it, it will cause an unwanted #DB.
304
+ * Protect the full cpu_tlbstate structure to be sure.
305
+ */
306
+ if (within_area(addr, end,
307
+ (unsigned long)&per_cpu(cpu_tlbstate, cpu),
308
+ sizeof(struct tlb_state)))
309
+ return true;
310
+
311
+ /*
312
+ * When in guest (X86_FEATURE_HYPERVISOR), local_db_save()
313
+ * will read per-cpu cpu_dr7 before clear dr7 register.
314
+ */
315
+ if (within_area(addr, end, (unsigned long)&per_cpu(cpu_dr7, cpu),
316
+ sizeof(cpu_dr7)))
317
+ return true;
318
+ }
319
+
320
+ return false;
321
+}
322
+
242323 static int arch_build_bp_info(struct perf_event *bp,
243324 const struct perf_event_attr *attr,
244325 struct arch_hw_breakpoint *hw)
245326 {
327
+ unsigned long bp_end;
328
+
329
+ bp_end = attr->bp_addr + attr->bp_len - 1;
330
+ if (bp_end < attr->bp_addr)
331
+ return -EINVAL;
332
+
333
+ /*
334
+ * Prevent any breakpoint of any type that overlaps the CPU
335
+ * entry area and data. This protects the IST stacks and also
336
+ * reduces the chance that we ever find out what happens if
337
+ * there's a data breakpoint on the GDT, IDT, or TSS.
338
+ */
339
+ if (within_cpu_entry(attr->bp_addr, bp_end))
340
+ return -EINVAL;
341
+
246342 hw->address = attr->bp_addr;
247343 hw->mask = 0;
248344
....@@ -261,12 +357,8 @@
261357 * allow kernel breakpoints at all.
262358 */
263359 if (attr->bp_addr >= TASK_SIZE_MAX) {
264
-#ifdef CONFIG_KPROBES
265360 if (within_kprobe_blacklist(attr->bp_addr))
266361 return -EINVAL;
267
-#else
268
- return -EINVAL;
269
-#endif
270362 }
271363
272364 hw->type = X86_BREAKPOINT_EXECUTE;
....@@ -279,6 +371,7 @@
279371 hw->len = X86_BREAKPOINT_LEN_X;
280372 return 0;
281373 }
374
+ fallthrough;
282375 default:
283376 return -EINVAL;
284377 }
....@@ -371,42 +464,6 @@
371464 }
372465
373466 /*
374
- * Dump the debug register contents to the user.
375
- * We can't dump our per cpu values because it
376
- * may contain cpu wide breakpoint, something that
377
- * doesn't belong to the current task.
378
- *
379
- * TODO: include non-ptrace user breakpoints (perf)
380
- */
381
-void aout_dump_debugregs(struct user *dump)
382
-{
383
- int i;
384
- int dr7 = 0;
385
- struct perf_event *bp;
386
- struct arch_hw_breakpoint *info;
387
- struct thread_struct *thread = &current->thread;
388
-
389
- for (i = 0; i < HBP_NUM; i++) {
390
- bp = thread->ptrace_bps[i];
391
-
392
- if (bp && !bp->attr.disabled) {
393
- dump->u_debugreg[i] = bp->attr.bp_addr;
394
- info = counter_arch_bp(bp);
395
- dr7 |= encode_dr7(i, info->len, info->type);
396
- } else {
397
- dump->u_debugreg[i] = 0;
398
- }
399
- }
400
-
401
- dump->u_debugreg[4] = 0;
402
- dump->u_debugreg[5] = 0;
403
- dump->u_debugreg[6] = current->thread.debugreg6;
404
-
405
- dump->u_debugreg[7] = dr7;
406
-}
407
-EXPORT_SYMBOL_GPL(aout_dump_debugregs);
408
-
409
-/*
410467 * Release the user breakpoints used by ptrace
411468 */
412469 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
....@@ -419,7 +476,7 @@
419476 t->ptrace_bps[i] = NULL;
420477 }
421478
422
- t->debugreg6 = 0;
479
+ t->virtual_dr6 = 0;
423480 t->ptrace_dr7 = 0;
424481 }
425482
....@@ -429,7 +486,7 @@
429486 set_debugreg(__this_cpu_read(cpu_debugreg[1]), 1);
430487 set_debugreg(__this_cpu_read(cpu_debugreg[2]), 2);
431488 set_debugreg(__this_cpu_read(cpu_debugreg[3]), 3);
432
- set_debugreg(current->thread.debugreg6, 6);
489
+ set_debugreg(DR6_RESERVED, 6);
433490 set_debugreg(__this_cpu_read(cpu_dr7), 7);
434491 }
435492 EXPORT_SYMBOL_GPL(hw_breakpoint_restore);
....@@ -452,61 +509,48 @@
452509 */
453510 static int hw_breakpoint_handler(struct die_args *args)
454511 {
455
- int i, cpu, rc = NOTIFY_STOP;
512
+ int i, rc = NOTIFY_STOP;
456513 struct perf_event *bp;
457
- unsigned long dr7, dr6;
458514 unsigned long *dr6_p;
515
+ unsigned long dr6;
516
+ bool bpx;
459517
460518 /* The DR6 value is pointed by args->err */
461519 dr6_p = (unsigned long *)ERR_PTR(args->err);
462520 dr6 = *dr6_p;
463521
464
- /* If it's a single step, TRAP bits are random */
465
- if (dr6 & DR_STEP)
466
- return NOTIFY_DONE;
467
-
468522 /* Do an early return if no trap bits are set in DR6 */
469523 if ((dr6 & DR_TRAP_BITS) == 0)
470524 return NOTIFY_DONE;
471
-
472
- get_debugreg(dr7, 7);
473
- /* Disable breakpoints during exception handling */
474
- set_debugreg(0UL, 7);
475
- /*
476
- * Assert that local interrupts are disabled
477
- * Reset the DRn bits in the virtualized register value.
478
- * The ptrace trigger routine will add in whatever is needed.
479
- */
480
- current->thread.debugreg6 &= ~DR_TRAP_BITS;
481
- cpu = get_cpu();
482525
483526 /* Handle all the breakpoints that were triggered */
484527 for (i = 0; i < HBP_NUM; ++i) {
485528 if (likely(!(dr6 & (DR_TRAP0 << i))))
486529 continue;
487530
488
- /*
489
- * The counter may be concurrently released but that can only
490
- * occur from a call_rcu() path. We can then safely fetch
491
- * the breakpoint, use its callback, touch its counter
492
- * while we are in an rcu_read_lock() path.
493
- */
494
- rcu_read_lock();
531
+ bp = this_cpu_read(bp_per_reg[i]);
532
+ if (!bp)
533
+ continue;
495534
496
- bp = per_cpu(bp_per_reg[i], cpu);
535
+ bpx = bp->hw.info.type == X86_BREAKPOINT_EXECUTE;
536
+
537
+ /*
538
+ * TF and data breakpoints are traps and can be merged, however
539
+ * instruction breakpoints are faults and will be raised
540
+ * separately.
541
+ *
542
+ * However DR6 can indicate both TF and instruction
543
+ * breakpoints. In that case take TF as that has precedence and
544
+ * delay the instruction breakpoint for the next exception.
545
+ */
546
+ if (bpx && (dr6 & DR_STEP))
547
+ continue;
548
+
497549 /*
498550 * Reset the 'i'th TRAP bit in dr6 to denote completion of
499551 * exception handling
500552 */
501553 (*dr6_p) &= ~(DR_TRAP0 << i);
502
- /*
503
- * bp can be NULL due to lazy debug register switching
504
- * or due to concurrent perf counter removing.
505
- */
506
- if (!bp) {
507
- rcu_read_unlock();
508
- break;
509
- }
510554
511555 perf_bp_event(bp, args->regs);
512556
....@@ -514,22 +558,18 @@
514558 * Set up resume flag to avoid breakpoint recursion when
515559 * returning back to origin.
516560 */
517
- if (bp->hw.info.type == X86_BREAKPOINT_EXECUTE)
561
+ if (bpx)
518562 args->regs->flags |= X86_EFLAGS_RF;
519
-
520
- rcu_read_unlock();
521563 }
564
+
522565 /*
523566 * Further processing in do_debug() is needed for a) user-space
524567 * breakpoints (to generate signals) and b) when the system has
525568 * taken exception due to multiple causes
526569 */
527
- if ((current->thread.debugreg6 & DR_TRAP_BITS) ||
570
+ if ((current->thread.virtual_dr6 & DR_TRAP_BITS) ||
528571 (dr6 & (~DR_TRAP_BITS)))
529572 rc = NOTIFY_DONE;
530
-
531
- set_debugreg(dr7, 7);
532
- put_cpu();
533573
534574 return rc;
535575 }