.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
---|
1 | 2 | /* Copyright 2002 Andi Kleen */ |
---|
2 | 3 | |
---|
3 | 4 | #include <linux/linkage.h> |
---|
4 | 5 | #include <asm/errno.h> |
---|
5 | 6 | #include <asm/cpufeatures.h> |
---|
6 | | -#include <asm/mcsafe_test.h> |
---|
7 | | -#include <asm/alternative-asm.h> |
---|
| 7 | +#include <asm/alternative.h> |
---|
8 | 8 | #include <asm/export.h> |
---|
| 9 | + |
---|
| 10 | +.pushsection .noinstr.text, "ax" |
---|
9 | 11 | |
---|
10 | 12 | /* |
---|
11 | 13 | * We build a jump to memcpy_orig by default which gets NOPped out on |
---|
.. | .. |
---|
13 | 15 | * have the enhanced REP MOVSB/STOSB feature (ERMS), change those NOPs |
---|
14 | 16 | * to a jmp to memcpy_erms which does the REP; MOVSB mem copy. |
---|
15 | 17 | */ |
---|
16 | | - |
---|
17 | | -.weak memcpy |
---|
18 | 18 | |
---|
19 | 19 | /* |
---|
20 | 20 | * memcpy - Copy a memory block. |
---|
.. | .. |
---|
27 | 27 | * Output: |
---|
28 | 28 | * rax original destination |
---|
29 | 29 | */ |
---|
30 | | -ENTRY(__memcpy) |
---|
31 | | -ENTRY(memcpy) |
---|
| 30 | +SYM_FUNC_START_ALIAS(__memcpy) |
---|
| 31 | +SYM_FUNC_START_WEAK(memcpy) |
---|
32 | 32 | ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \ |
---|
33 | 33 | "jmp memcpy_erms", X86_FEATURE_ERMS |
---|
34 | 34 | |
---|
.. | .. |
---|
39 | 39 | rep movsq |
---|
40 | 40 | movl %edx, %ecx |
---|
41 | 41 | rep movsb |
---|
42 | | - ret |
---|
43 | | -ENDPROC(memcpy) |
---|
44 | | -ENDPROC(__memcpy) |
---|
| 42 | + RET |
---|
| 43 | +SYM_FUNC_END(memcpy) |
---|
| 44 | +SYM_FUNC_END_ALIAS(__memcpy) |
---|
45 | 45 | EXPORT_SYMBOL(memcpy) |
---|
46 | 46 | EXPORT_SYMBOL(__memcpy) |
---|
47 | 47 | |
---|
.. | .. |
---|
49 | 49 | * memcpy_erms() - enhanced fast string memcpy. This is faster and |
---|
50 | 50 | * simpler than memcpy. Use memcpy_erms when possible. |
---|
51 | 51 | */ |
---|
52 | | -ENTRY(memcpy_erms) |
---|
| 52 | +SYM_FUNC_START_LOCAL(memcpy_erms) |
---|
53 | 53 | movq %rdi, %rax |
---|
54 | 54 | movq %rdx, %rcx |
---|
55 | 55 | rep movsb |
---|
56 | | - ret |
---|
57 | | -ENDPROC(memcpy_erms) |
---|
| 56 | + RET |
---|
| 57 | +SYM_FUNC_END(memcpy_erms) |
---|
58 | 58 | |
---|
59 | | -ENTRY(memcpy_orig) |
---|
| 59 | +SYM_FUNC_START_LOCAL(memcpy_orig) |
---|
60 | 60 | movq %rdi, %rax |
---|
61 | 61 | |
---|
62 | 62 | cmpq $0x20, %rdx |
---|
.. | .. |
---|
137 | 137 | movq %r9, 1*8(%rdi) |
---|
138 | 138 | movq %r10, -2*8(%rdi, %rdx) |
---|
139 | 139 | movq %r11, -1*8(%rdi, %rdx) |
---|
140 | | - retq |
---|
| 140 | + RET |
---|
141 | 141 | .p2align 4 |
---|
142 | 142 | .Lless_16bytes: |
---|
143 | 143 | cmpl $8, %edx |
---|
.. | .. |
---|
149 | 149 | movq -1*8(%rsi, %rdx), %r9 |
---|
150 | 150 | movq %r8, 0*8(%rdi) |
---|
151 | 151 | movq %r9, -1*8(%rdi, %rdx) |
---|
152 | | - retq |
---|
| 152 | + RET |
---|
153 | 153 | .p2align 4 |
---|
154 | 154 | .Lless_8bytes: |
---|
155 | 155 | cmpl $4, %edx |
---|
.. | .. |
---|
162 | 162 | movl -4(%rsi, %rdx), %r8d |
---|
163 | 163 | movl %ecx, (%rdi) |
---|
164 | 164 | movl %r8d, -4(%rdi, %rdx) |
---|
165 | | - retq |
---|
| 165 | + RET |
---|
166 | 166 | .p2align 4 |
---|
167 | 167 | .Lless_3bytes: |
---|
168 | 168 | subl $1, %edx |
---|
.. | .. |
---|
180 | 180 | movb %cl, (%rdi) |
---|
181 | 181 | |
---|
182 | 182 | .Lend: |
---|
183 | | - retq |
---|
184 | | -ENDPROC(memcpy_orig) |
---|
| 183 | + RET |
---|
| 184 | +SYM_FUNC_END(memcpy_orig) |
---|
185 | 185 | |
---|
186 | | -#ifndef CONFIG_UML |
---|
187 | | - |
---|
188 | | -MCSAFE_TEST_CTL |
---|
189 | | - |
---|
190 | | -/* |
---|
191 | | - * __memcpy_mcsafe - memory copy with machine check exception handling |
---|
192 | | - * Note that we only catch machine checks when reading the source addresses. |
---|
193 | | - * Writes to target are posted and don't generate machine checks. |
---|
194 | | - */ |
---|
195 | | -ENTRY(__memcpy_mcsafe) |
---|
196 | | - cmpl $8, %edx |
---|
197 | | - /* Less than 8 bytes? Go to byte copy loop */ |
---|
198 | | - jb .L_no_whole_words |
---|
199 | | - |
---|
200 | | - /* Check for bad alignment of source */ |
---|
201 | | - testl $7, %esi |
---|
202 | | - /* Already aligned */ |
---|
203 | | - jz .L_8byte_aligned |
---|
204 | | - |
---|
205 | | - /* Copy one byte at a time until source is 8-byte aligned */ |
---|
206 | | - movl %esi, %ecx |
---|
207 | | - andl $7, %ecx |
---|
208 | | - subl $8, %ecx |
---|
209 | | - negl %ecx |
---|
210 | | - subl %ecx, %edx |
---|
211 | | -.L_read_leading_bytes: |
---|
212 | | - movb (%rsi), %al |
---|
213 | | - MCSAFE_TEST_SRC %rsi 1 .E_leading_bytes |
---|
214 | | - MCSAFE_TEST_DST %rdi 1 .E_leading_bytes |
---|
215 | | -.L_write_leading_bytes: |
---|
216 | | - movb %al, (%rdi) |
---|
217 | | - incq %rsi |
---|
218 | | - incq %rdi |
---|
219 | | - decl %ecx |
---|
220 | | - jnz .L_read_leading_bytes |
---|
221 | | - |
---|
222 | | -.L_8byte_aligned: |
---|
223 | | - movl %edx, %ecx |
---|
224 | | - andl $7, %edx |
---|
225 | | - shrl $3, %ecx |
---|
226 | | - jz .L_no_whole_words |
---|
227 | | - |
---|
228 | | -.L_read_words: |
---|
229 | | - movq (%rsi), %r8 |
---|
230 | | - MCSAFE_TEST_SRC %rsi 8 .E_read_words |
---|
231 | | - MCSAFE_TEST_DST %rdi 8 .E_write_words |
---|
232 | | -.L_write_words: |
---|
233 | | - movq %r8, (%rdi) |
---|
234 | | - addq $8, %rsi |
---|
235 | | - addq $8, %rdi |
---|
236 | | - decl %ecx |
---|
237 | | - jnz .L_read_words |
---|
238 | | - |
---|
239 | | - /* Any trailing bytes? */ |
---|
240 | | -.L_no_whole_words: |
---|
241 | | - andl %edx, %edx |
---|
242 | | - jz .L_done_memcpy_trap |
---|
243 | | - |
---|
244 | | - /* Copy trailing bytes */ |
---|
245 | | - movl %edx, %ecx |
---|
246 | | -.L_read_trailing_bytes: |
---|
247 | | - movb (%rsi), %al |
---|
248 | | - MCSAFE_TEST_SRC %rsi 1 .E_trailing_bytes |
---|
249 | | - MCSAFE_TEST_DST %rdi 1 .E_trailing_bytes |
---|
250 | | -.L_write_trailing_bytes: |
---|
251 | | - movb %al, (%rdi) |
---|
252 | | - incq %rsi |
---|
253 | | - incq %rdi |
---|
254 | | - decl %ecx |
---|
255 | | - jnz .L_read_trailing_bytes |
---|
256 | | - |
---|
257 | | - /* Copy successful. Return zero */ |
---|
258 | | -.L_done_memcpy_trap: |
---|
259 | | - xorl %eax, %eax |
---|
260 | | - ret |
---|
261 | | -ENDPROC(__memcpy_mcsafe) |
---|
262 | | -EXPORT_SYMBOL_GPL(__memcpy_mcsafe) |
---|
263 | | - |
---|
264 | | - .section .fixup, "ax" |
---|
265 | | - /* |
---|
266 | | - * Return number of bytes not copied for any failure. Note that |
---|
267 | | - * there is no "tail" handling since the source buffer is 8-byte |
---|
268 | | - * aligned and poison is cacheline aligned. |
---|
269 | | - */ |
---|
270 | | -.E_read_words: |
---|
271 | | - shll $3, %ecx |
---|
272 | | -.E_leading_bytes: |
---|
273 | | - addl %edx, %ecx |
---|
274 | | -.E_trailing_bytes: |
---|
275 | | - mov %ecx, %eax |
---|
276 | | - ret |
---|
277 | | - |
---|
278 | | - /* |
---|
279 | | - * For write fault handling, given the destination is unaligned, |
---|
280 | | - * we handle faults on multi-byte writes with a byte-by-byte |
---|
281 | | - * copy up to the write-protected page. |
---|
282 | | - */ |
---|
283 | | -.E_write_words: |
---|
284 | | - shll $3, %ecx |
---|
285 | | - addl %edx, %ecx |
---|
286 | | - movl %ecx, %edx |
---|
287 | | - jmp mcsafe_handle_tail |
---|
288 | | - |
---|
289 | | - .previous |
---|
290 | | - |
---|
291 | | - _ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes) |
---|
292 | | - _ASM_EXTABLE_FAULT(.L_read_words, .E_read_words) |
---|
293 | | - _ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes) |
---|
294 | | - _ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes) |
---|
295 | | - _ASM_EXTABLE(.L_write_words, .E_write_words) |
---|
296 | | - _ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes) |
---|
297 | | -#endif |
---|
| 186 | +.popsection |
---|