hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/tools/arch/x86/lib/memcpy_64.S
....@@ -1,11 +1,13 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /* Copyright 2002 Andi Kleen */
23
34 #include <linux/linkage.h>
45 #include <asm/errno.h>
56 #include <asm/cpufeatures.h>
6
-#include <asm/mcsafe_test.h>
7
-#include <asm/alternative-asm.h>
7
+#include <asm/alternative.h>
88 #include <asm/export.h>
9
+
10
+.pushsection .noinstr.text, "ax"
911
1012 /*
1113 * We build a jump to memcpy_orig by default which gets NOPped out on
....@@ -13,8 +15,6 @@
1315 * have the enhanced REP MOVSB/STOSB feature (ERMS), change those NOPs
1416 * to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
1517 */
16
-
17
-.weak memcpy
1818
1919 /*
2020 * memcpy - Copy a memory block.
....@@ -27,8 +27,8 @@
2727 * Output:
2828 * rax original destination
2929 */
30
-ENTRY(__memcpy)
31
-ENTRY(memcpy)
30
+SYM_FUNC_START_ALIAS(__memcpy)
31
+SYM_FUNC_START_WEAK(memcpy)
3232 ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
3333 "jmp memcpy_erms", X86_FEATURE_ERMS
3434
....@@ -39,9 +39,9 @@
3939 rep movsq
4040 movl %edx, %ecx
4141 rep movsb
42
- ret
43
-ENDPROC(memcpy)
44
-ENDPROC(__memcpy)
42
+ RET
43
+SYM_FUNC_END(memcpy)
44
+SYM_FUNC_END_ALIAS(__memcpy)
4545 EXPORT_SYMBOL(memcpy)
4646 EXPORT_SYMBOL(__memcpy)
4747
....@@ -49,14 +49,14 @@
4949 * memcpy_erms() - enhanced fast string memcpy. This is faster and
5050 * simpler than memcpy. Use memcpy_erms when possible.
5151 */
52
-ENTRY(memcpy_erms)
52
+SYM_FUNC_START_LOCAL(memcpy_erms)
5353 movq %rdi, %rax
5454 movq %rdx, %rcx
5555 rep movsb
56
- ret
57
-ENDPROC(memcpy_erms)
56
+ RET
57
+SYM_FUNC_END(memcpy_erms)
5858
59
-ENTRY(memcpy_orig)
59
+SYM_FUNC_START_LOCAL(memcpy_orig)
6060 movq %rdi, %rax
6161
6262 cmpq $0x20, %rdx
....@@ -137,7 +137,7 @@
137137 movq %r9, 1*8(%rdi)
138138 movq %r10, -2*8(%rdi, %rdx)
139139 movq %r11, -1*8(%rdi, %rdx)
140
- retq
140
+ RET
141141 .p2align 4
142142 .Lless_16bytes:
143143 cmpl $8, %edx
....@@ -149,7 +149,7 @@
149149 movq -1*8(%rsi, %rdx), %r9
150150 movq %r8, 0*8(%rdi)
151151 movq %r9, -1*8(%rdi, %rdx)
152
- retq
152
+ RET
153153 .p2align 4
154154 .Lless_8bytes:
155155 cmpl $4, %edx
....@@ -162,7 +162,7 @@
162162 movl -4(%rsi, %rdx), %r8d
163163 movl %ecx, (%rdi)
164164 movl %r8d, -4(%rdi, %rdx)
165
- retq
165
+ RET
166166 .p2align 4
167167 .Lless_3bytes:
168168 subl $1, %edx
....@@ -180,118 +180,7 @@
180180 movb %cl, (%rdi)
181181
182182 .Lend:
183
- retq
184
-ENDPROC(memcpy_orig)
183
+ RET
184
+SYM_FUNC_END(memcpy_orig)
185185
186
-#ifndef CONFIG_UML
187
-
188
-MCSAFE_TEST_CTL
189
-
190
-/*
191
- * __memcpy_mcsafe - memory copy with machine check exception handling
192
- * Note that we only catch machine checks when reading the source addresses.
193
- * Writes to target are posted and don't generate machine checks.
194
- */
195
-ENTRY(__memcpy_mcsafe)
196
- cmpl $8, %edx
197
- /* Less than 8 bytes? Go to byte copy loop */
198
- jb .L_no_whole_words
199
-
200
- /* Check for bad alignment of source */
201
- testl $7, %esi
202
- /* Already aligned */
203
- jz .L_8byte_aligned
204
-
205
- /* Copy one byte at a time until source is 8-byte aligned */
206
- movl %esi, %ecx
207
- andl $7, %ecx
208
- subl $8, %ecx
209
- negl %ecx
210
- subl %ecx, %edx
211
-.L_read_leading_bytes:
212
- movb (%rsi), %al
213
- MCSAFE_TEST_SRC %rsi 1 .E_leading_bytes
214
- MCSAFE_TEST_DST %rdi 1 .E_leading_bytes
215
-.L_write_leading_bytes:
216
- movb %al, (%rdi)
217
- incq %rsi
218
- incq %rdi
219
- decl %ecx
220
- jnz .L_read_leading_bytes
221
-
222
-.L_8byte_aligned:
223
- movl %edx, %ecx
224
- andl $7, %edx
225
- shrl $3, %ecx
226
- jz .L_no_whole_words
227
-
228
-.L_read_words:
229
- movq (%rsi), %r8
230
- MCSAFE_TEST_SRC %rsi 8 .E_read_words
231
- MCSAFE_TEST_DST %rdi 8 .E_write_words
232
-.L_write_words:
233
- movq %r8, (%rdi)
234
- addq $8, %rsi
235
- addq $8, %rdi
236
- decl %ecx
237
- jnz .L_read_words
238
-
239
- /* Any trailing bytes? */
240
-.L_no_whole_words:
241
- andl %edx, %edx
242
- jz .L_done_memcpy_trap
243
-
244
- /* Copy trailing bytes */
245
- movl %edx, %ecx
246
-.L_read_trailing_bytes:
247
- movb (%rsi), %al
248
- MCSAFE_TEST_SRC %rsi 1 .E_trailing_bytes
249
- MCSAFE_TEST_DST %rdi 1 .E_trailing_bytes
250
-.L_write_trailing_bytes:
251
- movb %al, (%rdi)
252
- incq %rsi
253
- incq %rdi
254
- decl %ecx
255
- jnz .L_read_trailing_bytes
256
-
257
- /* Copy successful. Return zero */
258
-.L_done_memcpy_trap:
259
- xorl %eax, %eax
260
- ret
261
-ENDPROC(__memcpy_mcsafe)
262
-EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
263
-
264
- .section .fixup, "ax"
265
- /*
266
- * Return number of bytes not copied for any failure. Note that
267
- * there is no "tail" handling since the source buffer is 8-byte
268
- * aligned and poison is cacheline aligned.
269
- */
270
-.E_read_words:
271
- shll $3, %ecx
272
-.E_leading_bytes:
273
- addl %edx, %ecx
274
-.E_trailing_bytes:
275
- mov %ecx, %eax
276
- ret
277
-
278
- /*
279
- * For write fault handling, given the destination is unaligned,
280
- * we handle faults on multi-byte writes with a byte-by-byte
281
- * copy up to the write-protected page.
282
- */
283
-.E_write_words:
284
- shll $3, %ecx
285
- addl %edx, %ecx
286
- movl %ecx, %edx
287
- jmp mcsafe_handle_tail
288
-
289
- .previous
290
-
291
- _ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes)
292
- _ASM_EXTABLE_FAULT(.L_read_words, .E_read_words)
293
- _ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes)
294
- _ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
295
- _ASM_EXTABLE(.L_write_words, .E_write_words)
296
- _ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
297
-#endif
186
+.popsection