1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
| /* SPDX-License-Identifier: GPL-2.0-or-later */
| /*
| * Floating-point, VMX/Altivec and VSX loads and stores
| * for use in instruction emulation.
| *
| * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
| */
|
| #include <asm/processor.h>
| #include <asm/ppc_asm.h>
| #include <asm/ppc-opcode.h>
| #include <asm/reg.h>
| #include <asm/asm-offsets.h>
| #include <asm/asm-compat.h>
| #include <linux/errno.h>
|
| #define STKFRM (PPC_MIN_STKFRM + 16)
|
| /* Get the contents of frN into *p; N is in r3 and p is in r4. */
| _GLOBAL(get_fpr)
| mflr r0
| mfmsr r6
| ori r7, r6, MSR_FP
| MTMSRD(r7)
| isync
| rlwinm r3,r3,3,0xf8
| bcl 20,31,1f
| reg = 0
| .rept 32
| stfd reg, 0(r4)
| b 2f
| reg = reg + 1
| .endr
| 1: mflr r5
| add r5,r3,r5
| mtctr r5
| mtlr r0
| bctr
| 2: MTMSRD(r6)
| isync
| blr
|
| /* Put the contents of *p into frN; N is in r3 and p is in r4. */
| _GLOBAL(put_fpr)
| mflr r0
| mfmsr r6
| ori r7, r6, MSR_FP
| MTMSRD(r7)
| isync
| rlwinm r3,r3,3,0xf8
| bcl 20,31,1f
| reg = 0
| .rept 32
| lfd reg, 0(r4)
| b 2f
| reg = reg + 1
| .endr
| 1: mflr r5
| add r5,r3,r5
| mtctr r5
| mtlr r0
| bctr
| 2: MTMSRD(r6)
| isync
| blr
|
| #ifdef CONFIG_ALTIVEC
| /* Get the contents of vrN into *p; N is in r3 and p is in r4. */
| _GLOBAL(get_vr)
| mflr r0
| mfmsr r6
| oris r7, r6, MSR_VEC@h
| MTMSRD(r7)
| isync
| rlwinm r3,r3,3,0xf8
| bcl 20,31,1f
| reg = 0
| .rept 32
| stvx reg, 0, r4
| b 2f
| reg = reg + 1
| .endr
| 1: mflr r5
| add r5,r3,r5
| mtctr r5
| mtlr r0
| bctr
| 2: MTMSRD(r6)
| isync
| blr
|
| /* Put the contents of *p into vrN; N is in r3 and p is in r4. */
| _GLOBAL(put_vr)
| mflr r0
| mfmsr r6
| oris r7, r6, MSR_VEC@h
| MTMSRD(r7)
| isync
| rlwinm r3,r3,3,0xf8
| bcl 20,31,1f
| reg = 0
| .rept 32
| lvx reg, 0, r4
| b 2f
| reg = reg + 1
| .endr
| 1: mflr r5
| add r5,r3,r5
| mtctr r5
| mtlr r0
| bctr
| 2: MTMSRD(r6)
| isync
| blr
| #endif /* CONFIG_ALTIVEC */
|
| #ifdef CONFIG_VSX
| /* Get the contents of vsN into vs0; N is in r3. */
| _GLOBAL(get_vsr)
| mflr r0
| rlwinm r3,r3,3,0x1f8
| bcl 20,31,1f
| blr /* vs0 is already in vs0 */
| nop
| reg = 1
| .rept 63
| XXLOR(0,reg,reg)
| blr
| reg = reg + 1
| .endr
| 1: mflr r5
| add r5,r3,r5
| mtctr r5
| mtlr r0
| bctr
|
| /* Put the contents of vs0 into vsN; N is in r3. */
| _GLOBAL(put_vsr)
| mflr r0
| rlwinm r3,r3,3,0x1f8
| bcl 20,31,1f
| blr /* v0 is already in v0 */
| nop
| reg = 1
| .rept 63
| XXLOR(reg,0,0)
| blr
| reg = reg + 1
| .endr
| 1: mflr r5
| add r5,r3,r5
| mtctr r5
| mtlr r0
| bctr
|
| /* Load VSX reg N from vector doubleword *p. N is in r3, p in r4. */
| _GLOBAL(load_vsrn)
| PPC_STLU r1,-STKFRM(r1)
| mflr r0
| PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
| mfmsr r6
| oris r7,r6,MSR_VSX@h
| cmpwi cr7,r3,0
| li r8,STKFRM-16
| MTMSRD(r7)
| isync
| beq cr7,1f
| STXVD2X(0,R1,R8)
| 1: LXVD2X(0,R0,R4)
| #ifdef __LITTLE_ENDIAN__
| XXSWAPD(0,0)
| #endif
| beq cr7,4f
| bl put_vsr
| LXVD2X(0,R1,R8)
| 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
| mtlr r0
| MTMSRD(r6)
| isync
| addi r1,r1,STKFRM
| blr
|
| /* Store VSX reg N to vector doubleword *p. N is in r3, p in r4. */
| _GLOBAL(store_vsrn)
| PPC_STLU r1,-STKFRM(r1)
| mflr r0
| PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
| mfmsr r6
| oris r7,r6,MSR_VSX@h
| li r8,STKFRM-16
| MTMSRD(r7)
| isync
| STXVD2X(0,R1,R8)
| bl get_vsr
| #ifdef __LITTLE_ENDIAN__
| XXSWAPD(0,0)
| #endif
| STXVD2X(0,R0,R4)
| LXVD2X(0,R1,R8)
| PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
| mtlr r0
| MTMSRD(r6)
| isync
| mr r3,r9
| addi r1,r1,STKFRM
| blr
| #endif /* CONFIG_VSX */
|
| /* Convert single-precision to double, without disturbing FPRs. */
| /* conv_sp_to_dp(float *sp, double *dp) */
| _GLOBAL(conv_sp_to_dp)
| mfmsr r6
| ori r7, r6, MSR_FP
| MTMSRD(r7)
| isync
| stfd fr0, -16(r1)
| lfs fr0, 0(r3)
| stfd fr0, 0(r4)
| lfd fr0, -16(r1)
| MTMSRD(r6)
| isync
| blr
|
| /* Convert single-precision to double, without disturbing FPRs. */
| /* conv_sp_to_dp(double *dp, float *sp) */
| _GLOBAL(conv_dp_to_sp)
| mfmsr r6
| ori r7, r6, MSR_FP
| MTMSRD(r7)
| isync
| stfd fr0, -16(r1)
| lfd fr0, 0(r3)
| stfs fr0, 0(r4)
| lfd fr0, -16(r1)
| MTMSRD(r6)
| isync
| blr
|
|