forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-09-20 cf4ce59b3b70238352c7f1729f0f7223214828ad
kernel/arch/powerpc/kernel/hw_breakpoint.c
....@@ -1,25 +1,11 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
34 * using the CPU's debug registers. Derived from
45 * "arch/x86/kernel/hw_breakpoint.c"
56 *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License as published by
8
- * the Free Software Foundation; either version 2 of the License, or
9
- * (at your option) any later version.
10
- *
11
- * This program is distributed in the hope that it will be useful,
12
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
- * GNU General Public License for more details.
15
- *
16
- * You should have received a copy of the GNU General Public License
17
- * along with this program; if not, write to the Free Software
18
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19
- *
207 * Copyright 2010 IBM Corporation
218 * Author: K.Prasad <prasad@linux.vnet.ibm.com>
22
- *
239 */
2410
2511 #include <linux/hw_breakpoint.h>
....@@ -29,18 +15,23 @@
2915 #include <linux/kernel.h>
3016 #include <linux/sched.h>
3117 #include <linux/smp.h>
18
+#include <linux/debugfs.h>
19
+#include <linux/init.h>
3220
3321 #include <asm/hw_breakpoint.h>
3422 #include <asm/processor.h>
3523 #include <asm/sstep.h>
3624 #include <asm/debug.h>
25
+#include <asm/debugfs.h>
26
+#include <asm/hvcall.h>
27
+#include <asm/inst.h>
3728 #include <linux/uaccess.h>
3829
3930 /*
4031 * Stores the breakpoints currently in use on each breakpoint address
4132 * register for every cpu
4233 */
43
-static DEFINE_PER_CPU(struct perf_event *, bp_per_reg);
34
+static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM_MAX]);
4435
4536 /*
4637 * Returns total number of data or instruction breakpoints available.
....@@ -48,8 +39,19 @@
4839 int hw_breakpoint_slots(int type)
4940 {
5041 if (type == TYPE_DATA)
51
- return HBP_NUM;
42
+ return nr_wp_slots();
5243 return 0; /* no instruction breakpoints available */
44
+}
45
+
46
+static bool single_step_pending(void)
47
+{
48
+ int i;
49
+
50
+ for (i = 0; i < nr_wp_slots(); i++) {
51
+ if (current->thread.last_hit_ubp[i])
52
+ return true;
53
+ }
54
+ return false;
5355 }
5456
5557 /*
....@@ -64,16 +66,26 @@
6466 int arch_install_hw_breakpoint(struct perf_event *bp)
6567 {
6668 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
67
- struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
69
+ struct perf_event **slot;
70
+ int i;
6871
69
- *slot = bp;
72
+ for (i = 0; i < nr_wp_slots(); i++) {
73
+ slot = this_cpu_ptr(&bp_per_reg[i]);
74
+ if (!*slot) {
75
+ *slot = bp;
76
+ break;
77
+ }
78
+ }
79
+
80
+ if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot"))
81
+ return -EBUSY;
7082
7183 /*
7284 * Do not install DABR values if the instruction must be single-stepped.
7385 * If so, DABR will be populated in single_step_dabr_instruction().
7486 */
75
- if (current->thread.last_hit_ubp != bp)
76
- __set_breakpoint(info);
87
+ if (!single_step_pending())
88
+ __set_breakpoint(i, info);
7789
7890 return 0;
7991 }
....@@ -89,15 +101,248 @@
89101 */
90102 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
91103 {
92
- struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
104
+ struct arch_hw_breakpoint null_brk = {0};
105
+ struct perf_event **slot;
106
+ int i;
93107
94
- if (*slot != bp) {
95
- WARN_ONCE(1, "Can't find the breakpoint");
96
- return;
108
+ for (i = 0; i < nr_wp_slots(); i++) {
109
+ slot = this_cpu_ptr(&bp_per_reg[i]);
110
+ if (*slot == bp) {
111
+ *slot = NULL;
112
+ break;
113
+ }
97114 }
98115
99
- *slot = NULL;
100
- hw_breakpoint_disable();
116
+ if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot"))
117
+ return;
118
+
119
+ __set_breakpoint(i, &null_brk);
120
+}
121
+
122
+static bool is_ptrace_bp(struct perf_event *bp)
123
+{
124
+ return bp->overflow_handler == ptrace_triggered;
125
+}
126
+
127
+struct breakpoint {
128
+ struct list_head list;
129
+ struct perf_event *bp;
130
+ bool ptrace_bp;
131
+};
132
+
133
+static DEFINE_PER_CPU(struct breakpoint *, cpu_bps[HBP_NUM_MAX]);
134
+static LIST_HEAD(task_bps);
135
+
136
+static struct breakpoint *alloc_breakpoint(struct perf_event *bp)
137
+{
138
+ struct breakpoint *tmp;
139
+
140
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
141
+ if (!tmp)
142
+ return ERR_PTR(-ENOMEM);
143
+ tmp->bp = bp;
144
+ tmp->ptrace_bp = is_ptrace_bp(bp);
145
+ return tmp;
146
+}
147
+
148
+static bool bp_addr_range_overlap(struct perf_event *bp1, struct perf_event *bp2)
149
+{
150
+ __u64 bp1_saddr, bp1_eaddr, bp2_saddr, bp2_eaddr;
151
+
152
+ bp1_saddr = ALIGN_DOWN(bp1->attr.bp_addr, HW_BREAKPOINT_SIZE);
153
+ bp1_eaddr = ALIGN(bp1->attr.bp_addr + bp1->attr.bp_len, HW_BREAKPOINT_SIZE);
154
+ bp2_saddr = ALIGN_DOWN(bp2->attr.bp_addr, HW_BREAKPOINT_SIZE);
155
+ bp2_eaddr = ALIGN(bp2->attr.bp_addr + bp2->attr.bp_len, HW_BREAKPOINT_SIZE);
156
+
157
+ return (bp1_saddr < bp2_eaddr && bp1_eaddr > bp2_saddr);
158
+}
159
+
160
+static bool alternate_infra_bp(struct breakpoint *b, struct perf_event *bp)
161
+{
162
+ return is_ptrace_bp(bp) ? !b->ptrace_bp : b->ptrace_bp;
163
+}
164
+
165
+static bool can_co_exist(struct breakpoint *b, struct perf_event *bp)
166
+{
167
+ return !(alternate_infra_bp(b, bp) && bp_addr_range_overlap(b->bp, bp));
168
+}
169
+
170
+static int task_bps_add(struct perf_event *bp)
171
+{
172
+ struct breakpoint *tmp;
173
+
174
+ tmp = alloc_breakpoint(bp);
175
+ if (IS_ERR(tmp))
176
+ return PTR_ERR(tmp);
177
+
178
+ list_add(&tmp->list, &task_bps);
179
+ return 0;
180
+}
181
+
182
+static void task_bps_remove(struct perf_event *bp)
183
+{
184
+ struct list_head *pos, *q;
185
+
186
+ list_for_each_safe(pos, q, &task_bps) {
187
+ struct breakpoint *tmp = list_entry(pos, struct breakpoint, list);
188
+
189
+ if (tmp->bp == bp) {
190
+ list_del(&tmp->list);
191
+ kfree(tmp);
192
+ break;
193
+ }
194
+ }
195
+}
196
+
197
+/*
198
+ * If any task has breakpoint from alternate infrastructure,
199
+ * return true. Otherwise return false.
200
+ */
201
+static bool all_task_bps_check(struct perf_event *bp)
202
+{
203
+ struct breakpoint *tmp;
204
+
205
+ list_for_each_entry(tmp, &task_bps, list) {
206
+ if (!can_co_exist(tmp, bp))
207
+ return true;
208
+ }
209
+ return false;
210
+}
211
+
212
+/*
213
+ * If same task has breakpoint from alternate infrastructure,
214
+ * return true. Otherwise return false.
215
+ */
216
+static bool same_task_bps_check(struct perf_event *bp)
217
+{
218
+ struct breakpoint *tmp;
219
+
220
+ list_for_each_entry(tmp, &task_bps, list) {
221
+ if (tmp->bp->hw.target == bp->hw.target &&
222
+ !can_co_exist(tmp, bp))
223
+ return true;
224
+ }
225
+ return false;
226
+}
227
+
228
+static int cpu_bps_add(struct perf_event *bp)
229
+{
230
+ struct breakpoint **cpu_bp;
231
+ struct breakpoint *tmp;
232
+ int i = 0;
233
+
234
+ tmp = alloc_breakpoint(bp);
235
+ if (IS_ERR(tmp))
236
+ return PTR_ERR(tmp);
237
+
238
+ cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
239
+ for (i = 0; i < nr_wp_slots(); i++) {
240
+ if (!cpu_bp[i]) {
241
+ cpu_bp[i] = tmp;
242
+ break;
243
+ }
244
+ }
245
+ return 0;
246
+}
247
+
248
+static void cpu_bps_remove(struct perf_event *bp)
249
+{
250
+ struct breakpoint **cpu_bp;
251
+ int i = 0;
252
+
253
+ cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
254
+ for (i = 0; i < nr_wp_slots(); i++) {
255
+ if (!cpu_bp[i])
256
+ continue;
257
+
258
+ if (cpu_bp[i]->bp == bp) {
259
+ kfree(cpu_bp[i]);
260
+ cpu_bp[i] = NULL;
261
+ break;
262
+ }
263
+ }
264
+}
265
+
266
+static bool cpu_bps_check(int cpu, struct perf_event *bp)
267
+{
268
+ struct breakpoint **cpu_bp;
269
+ int i;
270
+
271
+ cpu_bp = per_cpu_ptr(cpu_bps, cpu);
272
+ for (i = 0; i < nr_wp_slots(); i++) {
273
+ if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp))
274
+ return true;
275
+ }
276
+ return false;
277
+}
278
+
279
+static bool all_cpu_bps_check(struct perf_event *bp)
280
+{
281
+ int cpu;
282
+
283
+ for_each_online_cpu(cpu) {
284
+ if (cpu_bps_check(cpu, bp))
285
+ return true;
286
+ }
287
+ return false;
288
+}
289
+
290
+/*
291
+ * We don't use any locks to serialize accesses to cpu_bps or task_bps
292
+ * because are already inside nr_bp_mutex.
293
+ */
294
+int arch_reserve_bp_slot(struct perf_event *bp)
295
+{
296
+ int ret;
297
+
298
+ /* ptrace breakpoint */
299
+ if (is_ptrace_bp(bp)) {
300
+ if (all_cpu_bps_check(bp))
301
+ return -ENOSPC;
302
+
303
+ if (same_task_bps_check(bp))
304
+ return -ENOSPC;
305
+
306
+ return task_bps_add(bp);
307
+ }
308
+
309
+ /* perf breakpoint */
310
+ if (is_kernel_addr(bp->attr.bp_addr))
311
+ return 0;
312
+
313
+ if (bp->hw.target && bp->cpu == -1) {
314
+ if (same_task_bps_check(bp))
315
+ return -ENOSPC;
316
+
317
+ return task_bps_add(bp);
318
+ } else if (!bp->hw.target && bp->cpu != -1) {
319
+ if (all_task_bps_check(bp))
320
+ return -ENOSPC;
321
+
322
+ return cpu_bps_add(bp);
323
+ }
324
+
325
+ if (same_task_bps_check(bp))
326
+ return -ENOSPC;
327
+
328
+ ret = cpu_bps_add(bp);
329
+ if (ret)
330
+ return ret;
331
+ ret = task_bps_add(bp);
332
+ if (ret)
333
+ cpu_bps_remove(bp);
334
+
335
+ return ret;
336
+}
337
+
338
+void arch_release_bp_slot(struct perf_event *bp)
339
+{
340
+ if (!is_kernel_addr(bp->attr.bp_addr)) {
341
+ if (bp->hw.target)
342
+ task_bps_remove(bp);
343
+ if (bp->cpu != -1)
344
+ cpu_bps_remove(bp);
345
+ }
101346 }
102347
103348 /*
....@@ -112,8 +357,14 @@
112357 * restoration variables to prevent dangling pointers.
113358 * FIXME, this should not be using bp->ctx at all! Sayeth peterz.
114359 */
115
- if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L))
116
- bp->ctx->task->thread.last_hit_ubp = NULL;
360
+ if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L)) {
361
+ int i;
362
+
363
+ for (i = 0; i < nr_wp_slots(); i++) {
364
+ if (bp->ctx->task->thread.last_hit_ubp[i] == bp)
365
+ bp->ctx->task->thread.last_hit_ubp[i] = NULL;
366
+ }
367
+ }
117368 }
118369
119370 /*
....@@ -137,15 +388,62 @@
137388 }
138389
139390 /*
391
+ * Watchpoint match range is always doubleword(8 bytes) aligned on
392
+ * powerpc. If the given range is crossing doubleword boundary, we
393
+ * need to increase the length such that next doubleword also get
394
+ * covered. Ex,
395
+ *
396
+ * address len = 6 bytes
397
+ * |=========.
398
+ * |------------v--|------v--------|
399
+ * | | | | | | | | | | | | | | | | |
400
+ * |---------------|---------------|
401
+ * <---8 bytes--->
402
+ *
403
+ * In this case, we should configure hw as:
404
+ * start_addr = address & ~(HW_BREAKPOINT_SIZE - 1)
405
+ * len = 16 bytes
406
+ *
407
+ * @start_addr is inclusive but @end_addr is exclusive.
408
+ */
409
+static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw)
410
+{
411
+ u16 max_len = DABR_MAX_LEN;
412
+ u16 hw_len;
413
+ unsigned long start_addr, end_addr;
414
+
415
+ start_addr = ALIGN_DOWN(hw->address, HW_BREAKPOINT_SIZE);
416
+ end_addr = ALIGN(hw->address + hw->len, HW_BREAKPOINT_SIZE);
417
+ hw_len = end_addr - start_addr;
418
+
419
+ if (dawr_enabled()) {
420
+ max_len = DAWR_MAX_LEN;
421
+ /* DAWR region can't cross 512 bytes boundary on p10 predecessors */
422
+ if (!cpu_has_feature(CPU_FTR_ARCH_31) &&
423
+ (ALIGN_DOWN(start_addr, SZ_512) != ALIGN_DOWN(end_addr - 1, SZ_512)))
424
+ return -EINVAL;
425
+ } else if (IS_ENABLED(CONFIG_PPC_8xx)) {
426
+ /* 8xx can setup a range without limitation */
427
+ max_len = U16_MAX;
428
+ }
429
+
430
+ if (hw_len > max_len)
431
+ return -EINVAL;
432
+
433
+ hw->hw_len = hw_len;
434
+ return 0;
435
+}
436
+
437
+/*
140438 * Validate the arch-specific HW Breakpoint register settings
141439 */
142440 int hw_breakpoint_arch_parse(struct perf_event *bp,
143441 const struct perf_event_attr *attr,
144442 struct arch_hw_breakpoint *hw)
145443 {
146
- int ret = -EINVAL, length_max;
444
+ int ret = -EINVAL;
147445
148
- if (!bp)
446
+ if (!bp || !attr->bp_len)
149447 return ret;
150448
151449 hw->type = HW_BRK_TYPE_TRANSLATE;
....@@ -165,26 +463,10 @@
165463 hw->address = attr->bp_addr;
166464 hw->len = attr->bp_len;
167465
168
- /*
169
- * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8)
170
- * and breakpoint addresses are aligned to nearest double-word
171
- * HW_BREAKPOINT_ALIGN by rounding off to the lower address, the
172
- * 'symbolsize' should satisfy the check below.
173
- */
174466 if (!ppc_breakpoint_available())
175467 return -ENODEV;
176
- length_max = 8; /* DABR */
177
- if (cpu_has_feature(CPU_FTR_DAWR)) {
178
- length_max = 512 ; /* 64 doublewords */
179
- /* DAWR region can't cross 512 boundary */
180
- if ((attr->bp_addr >> 9) !=
181
- ((attr->bp_addr + attr->bp_len - 1) >> 9))
182
- return -EINVAL;
183
- }
184
- if (hw->len >
185
- (length_max - (hw->address & HW_BREAKPOINT_ALIGN)))
186
- return -EINVAL;
187
- return 0;
468
+
469
+ return hw_breakpoint_validate_len(hw);
188470 }
189471
190472 /*
....@@ -195,30 +477,98 @@
195477 void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
196478 {
197479 struct arch_hw_breakpoint *info;
480
+ int i;
198481
199
- if (likely(!tsk->thread.last_hit_ubp))
200
- return;
482
+ preempt_disable();
201483
202
- info = counter_arch_bp(tsk->thread.last_hit_ubp);
484
+ for (i = 0; i < nr_wp_slots(); i++) {
485
+ if (unlikely(tsk->thread.last_hit_ubp[i]))
486
+ goto reset;
487
+ }
488
+ goto out;
489
+
490
+reset:
203491 regs->msr &= ~MSR_SE;
204
- __set_breakpoint(info);
205
- tsk->thread.last_hit_ubp = NULL;
492
+ for (i = 0; i < nr_wp_slots(); i++) {
493
+ info = counter_arch_bp(__this_cpu_read(bp_per_reg[i]));
494
+ __set_breakpoint(i, info);
495
+ tsk->thread.last_hit_ubp[i] = NULL;
496
+ }
497
+
498
+out:
499
+ preempt_enable();
500
+}
501
+
502
+static bool is_larx_stcx_instr(int type)
503
+{
504
+ return type == LARX || type == STCX;
206505 }
207506
208507 /*
209
- * Handle debug exception notifications.
508
+ * We've failed in reliably handling the hw-breakpoint. Unregister
509
+ * it and throw a warning message to let the user know about it.
210510 */
511
+static void handler_error(struct perf_event *bp, struct arch_hw_breakpoint *info)
512
+{
513
+ WARN(1, "Unable to handle hardware breakpoint. Breakpoint at 0x%lx will be disabled.",
514
+ info->address);
515
+ perf_event_disable_inatomic(bp);
516
+}
517
+
518
+static void larx_stcx_err(struct perf_event *bp, struct arch_hw_breakpoint *info)
519
+{
520
+ printk_ratelimited("Breakpoint hit on instruction that can't be emulated. Breakpoint at 0x%lx will be disabled.\n",
521
+ info->address);
522
+ perf_event_disable_inatomic(bp);
523
+}
524
+
525
+static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp,
526
+ struct arch_hw_breakpoint **info, int *hit,
527
+ struct ppc_inst instr)
528
+{
529
+ int i;
530
+ int stepped;
531
+
532
+ /* Do not emulate user-space instructions, instead single-step them */
533
+ if (user_mode(regs)) {
534
+ for (i = 0; i < nr_wp_slots(); i++) {
535
+ if (!hit[i])
536
+ continue;
537
+ current->thread.last_hit_ubp[i] = bp[i];
538
+ info[i] = NULL;
539
+ }
540
+ regs->msr |= MSR_SE;
541
+ return false;
542
+ }
543
+
544
+ stepped = emulate_step(regs, instr);
545
+ if (!stepped) {
546
+ for (i = 0; i < nr_wp_slots(); i++) {
547
+ if (!hit[i])
548
+ continue;
549
+ handler_error(bp[i], info[i]);
550
+ info[i] = NULL;
551
+ }
552
+ return false;
553
+ }
554
+ return true;
555
+}
556
+
211557 int hw_breakpoint_handler(struct die_args *args)
212558 {
559
+ bool err = false;
213560 int rc = NOTIFY_STOP;
214
- struct perf_event *bp;
561
+ struct perf_event *bp[HBP_NUM_MAX] = { NULL };
215562 struct pt_regs *regs = args->regs;
216
-#ifndef CONFIG_PPC_8xx
217
- int stepped = 1;
218
- unsigned int instr;
219
-#endif
220
- struct arch_hw_breakpoint *info;
221
- unsigned long dar = regs->dar;
563
+ struct arch_hw_breakpoint *info[HBP_NUM_MAX] = { NULL };
564
+ int i;
565
+ int hit[HBP_NUM_MAX] = {0};
566
+ int nr_hit = 0;
567
+ bool ptrace_bp = false;
568
+ struct ppc_inst instr = ppc_inst(0);
569
+ int type = 0;
570
+ int size = 0;
571
+ unsigned long ea;
222572
223573 /* Disable breakpoints during exception handling */
224574 hw_breakpoint_disable();
....@@ -231,12 +581,40 @@
231581 */
232582 rcu_read_lock();
233583
234
- bp = __this_cpu_read(bp_per_reg);
235
- if (!bp) {
584
+ if (!IS_ENABLED(CONFIG_PPC_8xx))
585
+ wp_get_instr_detail(regs, &instr, &type, &size, &ea);
586
+
587
+ for (i = 0; i < nr_wp_slots(); i++) {
588
+ bp[i] = __this_cpu_read(bp_per_reg[i]);
589
+ if (!bp[i])
590
+ continue;
591
+
592
+ info[i] = counter_arch_bp(bp[i]);
593
+ info[i]->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
594
+
595
+ if (wp_check_constraints(regs, instr, ea, type, size, info[i])) {
596
+ if (!IS_ENABLED(CONFIG_PPC_8xx) &&
597
+ ppc_inst_equal(instr, ppc_inst(0))) {
598
+ handler_error(bp[i], info[i]);
599
+ info[i] = NULL;
600
+ err = 1;
601
+ continue;
602
+ }
603
+
604
+ if (is_ptrace_bp(bp[i]))
605
+ ptrace_bp = true;
606
+ hit[i] = 1;
607
+ nr_hit++;
608
+ }
609
+ }
610
+
611
+ if (err)
612
+ goto reset;
613
+
614
+ if (!nr_hit) {
236615 rc = NOTIFY_DONE;
237616 goto out;
238617 }
239
- info = counter_arch_bp(bp);
240618
241619 /*
242620 * Return early after invoking user-callback function without restoring
....@@ -244,56 +622,50 @@
244622 * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal
245623 * generated in do_dabr().
246624 */
247
- if (bp->overflow_handler == ptrace_triggered) {
248
- perf_bp_event(bp, regs);
625
+ if (ptrace_bp) {
626
+ for (i = 0; i < nr_wp_slots(); i++) {
627
+ if (!hit[i])
628
+ continue;
629
+ perf_bp_event(bp[i], regs);
630
+ info[i] = NULL;
631
+ }
249632 rc = NOTIFY_DONE;
250
- goto out;
633
+ goto reset;
251634 }
252635
253
- /*
254
- * Verify if dar lies within the address range occupied by the symbol
255
- * being watched to filter extraneous exceptions. If it doesn't,
256
- * we still need to single-step the instruction, but we don't
257
- * generate an event.
258
- */
259
- info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
260
- if (!((bp->attr.bp_addr <= dar) &&
261
- (dar - bp->attr.bp_addr < bp->attr.bp_len)))
262
- info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
636
+ if (!IS_ENABLED(CONFIG_PPC_8xx)) {
637
+ if (is_larx_stcx_instr(type)) {
638
+ for (i = 0; i < nr_wp_slots(); i++) {
639
+ if (!hit[i])
640
+ continue;
641
+ larx_stcx_err(bp[i], info[i]);
642
+ info[i] = NULL;
643
+ }
644
+ goto reset;
645
+ }
263646
264
-#ifndef CONFIG_PPC_8xx
265
- /* Do not emulate user-space instructions, instead single-step them */
266
- if (user_mode(regs)) {
267
- current->thread.last_hit_ubp = bp;
268
- regs->msr |= MSR_SE;
269
- goto out;
647
+ if (!stepping_handler(regs, bp, info, hit, instr))
648
+ goto reset;
270649 }
271650
272
- stepped = 0;
273
- instr = 0;
274
- if (!__get_user_inatomic(instr, (unsigned int *) regs->nip))
275
- stepped = emulate_step(regs, instr);
276
-
277
- /*
278
- * emulate_step() could not execute it. We've failed in reliably
279
- * handling the hw-breakpoint. Unregister it and throw a warning
280
- * message to let the user know about it.
281
- */
282
- if (!stepped) {
283
- WARN(1, "Unable to handle hardware breakpoint. Breakpoint at "
284
- "0x%lx will be disabled.", info->address);
285
- perf_event_disable_inatomic(bp);
286
- goto out;
287
- }
288
-#endif
289651 /*
290652 * As a policy, the callback is invoked in a 'trigger-after-execute'
291653 * fashion
292654 */
293
- if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
294
- perf_bp_event(bp, regs);
655
+ for (i = 0; i < nr_wp_slots(); i++) {
656
+ if (!hit[i])
657
+ continue;
658
+ if (!(info[i]->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
659
+ perf_bp_event(bp[i], regs);
660
+ }
295661
296
- __set_breakpoint(info);
662
+reset:
663
+ for (i = 0; i < nr_wp_slots(); i++) {
664
+ if (!info[i])
665
+ continue;
666
+ __set_breakpoint(i, info[i]);
667
+ }
668
+
297669 out:
298670 rcu_read_unlock();
299671 return rc;
....@@ -308,26 +680,43 @@
308680 struct pt_regs *regs = args->regs;
309681 struct perf_event *bp = NULL;
310682 struct arch_hw_breakpoint *info;
683
+ int i;
684
+ bool found = false;
311685
312
- bp = current->thread.last_hit_ubp;
313686 /*
314687 * Check if we are single-stepping as a result of a
315688 * previous HW Breakpoint exception
316689 */
317
- if (!bp)
690
+ for (i = 0; i < nr_wp_slots(); i++) {
691
+ bp = current->thread.last_hit_ubp[i];
692
+
693
+ if (!bp)
694
+ continue;
695
+
696
+ found = true;
697
+ info = counter_arch_bp(bp);
698
+
699
+ /*
700
+ * We shall invoke the user-defined callback function in the
701
+ * single stepping handler to confirm to 'trigger-after-execute'
702
+ * semantics
703
+ */
704
+ if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
705
+ perf_bp_event(bp, regs);
706
+ current->thread.last_hit_ubp[i] = NULL;
707
+ }
708
+
709
+ if (!found)
318710 return NOTIFY_DONE;
319711
320
- info = counter_arch_bp(bp);
712
+ for (i = 0; i < nr_wp_slots(); i++) {
713
+ bp = __this_cpu_read(bp_per_reg[i]);
714
+ if (!bp)
715
+ continue;
321716
322
- /*
323
- * We shall invoke the user-defined callback function in the single
324
- * stepping handler to confirm to 'trigger-after-execute' semantics
325
- */
326
- if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
327
- perf_bp_event(bp, regs);
328
-
329
- __set_breakpoint(info);
330
- current->thread.last_hit_ubp = NULL;
717
+ info = counter_arch_bp(bp);
718
+ __set_breakpoint(i, info);
719
+ }
331720
332721 /*
333722 * If the process was being single-stepped by ptrace, let the
....@@ -366,13 +755,32 @@
366755 */
367756 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
368757 {
758
+ int i;
369759 struct thread_struct *t = &tsk->thread;
370760
371
- unregister_hw_breakpoint(t->ptrace_bps[0]);
372
- t->ptrace_bps[0] = NULL;
761
+ for (i = 0; i < nr_wp_slots(); i++) {
762
+ unregister_hw_breakpoint(t->ptrace_bps[i]);
763
+ t->ptrace_bps[i] = NULL;
764
+ }
373765 }
374766
375767 void hw_breakpoint_pmu_read(struct perf_event *bp)
376768 {
377769 /* TODO */
378770 }
771
+
772
+void ptrace_triggered(struct perf_event *bp,
773
+ struct perf_sample_data *data, struct pt_regs *regs)
774
+{
775
+ struct perf_event_attr attr;
776
+
777
+ /*
778
+ * Disable the breakpoint request here since ptrace has defined a
779
+ * one-shot behaviour for breakpoint exceptions in PPC64.
780
+ * The SIGTRAP signal is generated automatically for us in do_dabr().
781
+ * We don't have to do anything about that here
782
+ */
783
+ attr = bp->attr;
784
+ attr.disabled = true;
785
+ modify_user_hw_breakpoint(bp, &attr);
786
+}