forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/arch/x86/crypto/crc32-pclmul_asm.S
....@@ -38,7 +38,6 @@
3838 */
3939
4040 #include <linux/linkage.h>
41
-#include <asm/inst.h>
4241
4342
4443 .section .rodata
....@@ -103,7 +102,7 @@
103102 * size_t len, uint crc32)
104103 */
105104
106
-ENTRY(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */
105
+SYM_FUNC_START(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */
107106 movdqa (BUF), %xmm1
108107 movdqa 0x10(BUF), %xmm2
109108 movdqa 0x20(BUF), %xmm3
....@@ -129,17 +128,17 @@
129128 #ifdef __x86_64__
130129 movdqa %xmm4, %xmm8
131130 #endif
132
- PCLMULQDQ 00, CONSTANT, %xmm1
133
- PCLMULQDQ 00, CONSTANT, %xmm2
134
- PCLMULQDQ 00, CONSTANT, %xmm3
131
+ pclmulqdq $0x00, CONSTANT, %xmm1
132
+ pclmulqdq $0x00, CONSTANT, %xmm2
133
+ pclmulqdq $0x00, CONSTANT, %xmm3
135134 #ifdef __x86_64__
136
- PCLMULQDQ 00, CONSTANT, %xmm4
135
+ pclmulqdq $0x00, CONSTANT, %xmm4
137136 #endif
138
- PCLMULQDQ 0x11, CONSTANT, %xmm5
139
- PCLMULQDQ 0x11, CONSTANT, %xmm6
140
- PCLMULQDQ 0x11, CONSTANT, %xmm7
137
+ pclmulqdq $0x11, CONSTANT, %xmm5
138
+ pclmulqdq $0x11, CONSTANT, %xmm6
139
+ pclmulqdq $0x11, CONSTANT, %xmm7
141140 #ifdef __x86_64__
142
- PCLMULQDQ 0x11, CONSTANT, %xmm8
141
+ pclmulqdq $0x11, CONSTANT, %xmm8
143142 #endif
144143 pxor %xmm5, %xmm1
145144 pxor %xmm6, %xmm2
....@@ -149,8 +148,8 @@
149148 #else
150149 /* xmm8 unsupported for x32 */
151150 movdqa %xmm4, %xmm5
152
- PCLMULQDQ 00, CONSTANT, %xmm4
153
- PCLMULQDQ 0x11, CONSTANT, %xmm5
151
+ pclmulqdq $0x00, CONSTANT, %xmm4
152
+ pclmulqdq $0x11, CONSTANT, %xmm5
154153 pxor %xmm5, %xmm4
155154 #endif
156155
....@@ -172,20 +171,20 @@
172171 prefetchnta (BUF)
173172
174173 movdqa %xmm1, %xmm5
175
- PCLMULQDQ 0x00, CONSTANT, %xmm1
176
- PCLMULQDQ 0x11, CONSTANT, %xmm5
174
+ pclmulqdq $0x00, CONSTANT, %xmm1
175
+ pclmulqdq $0x11, CONSTANT, %xmm5
177176 pxor %xmm5, %xmm1
178177 pxor %xmm2, %xmm1
179178
180179 movdqa %xmm1, %xmm5
181
- PCLMULQDQ 0x00, CONSTANT, %xmm1
182
- PCLMULQDQ 0x11, CONSTANT, %xmm5
180
+ pclmulqdq $0x00, CONSTANT, %xmm1
181
+ pclmulqdq $0x11, CONSTANT, %xmm5
183182 pxor %xmm5, %xmm1
184183 pxor %xmm3, %xmm1
185184
186185 movdqa %xmm1, %xmm5
187
- PCLMULQDQ 0x00, CONSTANT, %xmm1
188
- PCLMULQDQ 0x11, CONSTANT, %xmm5
186
+ pclmulqdq $0x00, CONSTANT, %xmm1
187
+ pclmulqdq $0x11, CONSTANT, %xmm5
189188 pxor %xmm5, %xmm1
190189 pxor %xmm4, %xmm1
191190
....@@ -193,8 +192,8 @@
193192 jb fold_64
194193 loop_16:/* Folding rest buffer into 128bit */
195194 movdqa %xmm1, %xmm5
196
- PCLMULQDQ 0x00, CONSTANT, %xmm1
197
- PCLMULQDQ 0x11, CONSTANT, %xmm5
195
+ pclmulqdq $0x00, CONSTANT, %xmm1
196
+ pclmulqdq $0x11, CONSTANT, %xmm5
198197 pxor %xmm5, %xmm1
199198 pxor (BUF), %xmm1
200199 sub $0x10, LEN
....@@ -205,7 +204,7 @@
205204 fold_64:
206205 /* perform the last 64 bit fold, also adds 32 zeroes
207206 * to the input stream */
208
- PCLMULQDQ 0x01, %xmm1, CONSTANT /* R4 * xmm1.low */
207
+ pclmulqdq $0x01, %xmm1, CONSTANT /* R4 * xmm1.low */
209208 psrldq $0x08, %xmm1
210209 pxor CONSTANT, %xmm1
211210
....@@ -220,7 +219,7 @@
220219 #endif
221220 psrldq $0x04, %xmm2
222221 pand %xmm3, %xmm1
223
- PCLMULQDQ 0x00, CONSTANT, %xmm1
222
+ pclmulqdq $0x00, CONSTANT, %xmm1
224223 pxor %xmm2, %xmm1
225224
226225 /* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
....@@ -231,11 +230,11 @@
231230 #endif
232231 movdqa %xmm1, %xmm2
233232 pand %xmm3, %xmm1
234
- PCLMULQDQ 0x10, CONSTANT, %xmm1
233
+ pclmulqdq $0x10, CONSTANT, %xmm1
235234 pand %xmm3, %xmm1
236
- PCLMULQDQ 0x00, CONSTANT, %xmm1
235
+ pclmulqdq $0x00, CONSTANT, %xmm1
237236 pxor %xmm2, %xmm1
238
- PEXTRD 0x01, %xmm1, %eax
237
+ pextrd $0x01, %xmm1, %eax
239238
240
- ret
241
-ENDPROC(crc32_pclmul_le_16)
239
+ RET
240
+SYM_FUNC_END(crc32_pclmul_le_16)