.. | .. |
---|
59 | 59 | |
---|
60 | 60 | u64 perf_reg_value(struct pt_regs *regs, int idx) |
---|
61 | 61 | { |
---|
| 62 | + struct x86_perf_regs *perf_regs; |
---|
| 63 | + |
---|
| 64 | + if (idx >= PERF_REG_X86_XMM0 && idx < PERF_REG_X86_XMM_MAX) { |
---|
| 65 | + perf_regs = container_of(regs, struct x86_perf_regs, regs); |
---|
| 66 | + if (!perf_regs->xmm_regs) |
---|
| 67 | + return 0; |
---|
| 68 | + return perf_regs->xmm_regs[idx - PERF_REG_X86_XMM0]; |
---|
| 69 | + } |
---|
| 70 | + |
---|
62 | 71 | if (WARN_ON_ONCE(idx >= ARRAY_SIZE(pt_regs_offset))) |
---|
63 | 72 | return 0; |
---|
64 | 73 | |
---|
65 | 74 | return regs_get_register(regs, pt_regs_offset[idx]); |
---|
66 | 75 | } |
---|
67 | 76 | |
---|
68 | | -#define REG_RESERVED (~((1ULL << PERF_REG_X86_MAX) - 1ULL)) |
---|
| 77 | +#define PERF_REG_X86_RESERVED (((1ULL << PERF_REG_X86_XMM0) - 1) & \ |
---|
| 78 | + ~((1ULL << PERF_REG_X86_MAX) - 1)) |
---|
69 | 79 | |
---|
70 | 80 | #ifdef CONFIG_X86_32 |
---|
| 81 | +#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_R8) | \ |
---|
| 82 | + (1ULL << PERF_REG_X86_R9) | \ |
---|
| 83 | + (1ULL << PERF_REG_X86_R10) | \ |
---|
| 84 | + (1ULL << PERF_REG_X86_R11) | \ |
---|
| 85 | + (1ULL << PERF_REG_X86_R12) | \ |
---|
| 86 | + (1ULL << PERF_REG_X86_R13) | \ |
---|
| 87 | + (1ULL << PERF_REG_X86_R14) | \ |
---|
| 88 | + (1ULL << PERF_REG_X86_R15)) |
---|
| 89 | + |
---|
71 | 90 | int perf_reg_validate(u64 mask) |
---|
72 | 91 | { |
---|
73 | | - if (!mask || mask & REG_RESERVED) |
---|
| 92 | + if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED))) |
---|
74 | 93 | return -EINVAL; |
---|
75 | 94 | |
---|
76 | 95 | return 0; |
---|
.. | .. |
---|
82 | 101 | } |
---|
83 | 102 | |
---|
84 | 103 | void perf_get_regs_user(struct perf_regs *regs_user, |
---|
85 | | - struct pt_regs *regs, |
---|
86 | | - struct pt_regs *regs_user_copy) |
---|
| 104 | + struct pt_regs *regs) |
---|
87 | 105 | { |
---|
88 | 106 | regs_user->regs = task_pt_regs(current); |
---|
89 | 107 | regs_user->abi = perf_reg_abi(current); |
---|
.. | .. |
---|
96 | 114 | |
---|
97 | 115 | int perf_reg_validate(u64 mask) |
---|
98 | 116 | { |
---|
99 | | - if (!mask || mask & REG_RESERVED) |
---|
100 | | - return -EINVAL; |
---|
101 | | - |
---|
102 | | - if (mask & REG_NOSUPPORT) |
---|
| 117 | + if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED))) |
---|
103 | 118 | return -EINVAL; |
---|
104 | 119 | |
---|
105 | 120 | return 0; |
---|
.. | .. |
---|
113 | 128 | return PERF_SAMPLE_REGS_ABI_64; |
---|
114 | 129 | } |
---|
115 | 130 | |
---|
| 131 | +static DEFINE_PER_CPU(struct pt_regs, nmi_user_regs); |
---|
| 132 | + |
---|
116 | 133 | void perf_get_regs_user(struct perf_regs *regs_user, |
---|
117 | | - struct pt_regs *regs, |
---|
118 | | - struct pt_regs *regs_user_copy) |
---|
| 134 | + struct pt_regs *regs) |
---|
119 | 135 | { |
---|
| 136 | + struct pt_regs *regs_user_copy = this_cpu_ptr(&nmi_user_regs); |
---|
120 | 137 | struct pt_regs *user_regs = task_pt_regs(current); |
---|
121 | 138 | |
---|
| 139 | + if (!in_nmi()) { |
---|
| 140 | + regs_user->regs = user_regs; |
---|
| 141 | + regs_user->abi = perf_reg_abi(current); |
---|
| 142 | + return; |
---|
| 143 | + } |
---|
| 144 | + |
---|
122 | 145 | /* |
---|
123 | 146 | * If we're in an NMI that interrupted task_pt_regs setup, then |
---|
124 | 147 | * we can't sample user regs at all. This check isn't really |
---|