1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
| /* SPDX-License-Identifier: GPL-2.0 */
| /*---------------------------------------------------------------------------+
| | round_Xsig.S |
| | |
| | Copyright (C) 1992,1993,1994,1995 |
| | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, |
| | Australia. E-mail billm@jacobi.maths.monash.edu.au |
| | |
| | Normalize and round a 12 byte quantity. |
| | Call from C as: |
| | int round_Xsig(Xsig *n) |
| | |
| | Normalize a 12 byte quantity. |
| | Call from C as: |
| | int norm_Xsig(Xsig *n) |
| | |
| | Each function returns the size of the shift (nr of bits). |
| | |
| +---------------------------------------------------------------------------*/
| .file "round_Xsig.S"
|
| #include "fpu_emu.h"
|
|
| .text
| SYM_FUNC_START(round_Xsig)
| pushl %ebp
| movl %esp,%ebp
| pushl %ebx /* Reserve some space */
| pushl %ebx
| pushl %esi
|
| movl PARAM1,%esi
|
| movl 8(%esi),%edx
| movl 4(%esi),%ebx
| movl (%esi),%eax
|
| movl $0,-4(%ebp)
|
| orl %edx,%edx /* ms bits */
| js L_round /* Already normalized */
| jnz L_shift_1 /* Shift left 1 - 31 bits */
|
| movl %ebx,%edx
| movl %eax,%ebx
| xorl %eax,%eax
| movl $-32,-4(%ebp)
|
| /* We need to shift left by 1 - 31 bits */
| L_shift_1:
| bsrl %edx,%ecx /* get the required shift in %ecx */
| subl $31,%ecx
| negl %ecx
| subl %ecx,-4(%ebp)
| shld %cl,%ebx,%edx
| shld %cl,%eax,%ebx
| shl %cl,%eax
|
| L_round:
| testl $0x80000000,%eax
| jz L_exit
|
| addl $1,%ebx
| adcl $0,%edx
| jnz L_exit
|
| movl $0x80000000,%edx
| incl -4(%ebp)
|
| L_exit:
| movl %edx,8(%esi)
| movl %ebx,4(%esi)
| movl %eax,(%esi)
|
| movl -4(%ebp),%eax
|
| popl %esi
| popl %ebx
| leave
| RET
| SYM_FUNC_END(round_Xsig)
|
|
|
| SYM_FUNC_START(norm_Xsig)
| pushl %ebp
| movl %esp,%ebp
| pushl %ebx /* Reserve some space */
| pushl %ebx
| pushl %esi
|
| movl PARAM1,%esi
|
| movl 8(%esi),%edx
| movl 4(%esi),%ebx
| movl (%esi),%eax
|
| movl $0,-4(%ebp)
|
| orl %edx,%edx /* ms bits */
| js L_n_exit /* Already normalized */
| jnz L_n_shift_1 /* Shift left 1 - 31 bits */
|
| movl %ebx,%edx
| movl %eax,%ebx
| xorl %eax,%eax
| movl $-32,-4(%ebp)
|
| orl %edx,%edx /* ms bits */
| js L_n_exit /* Normalized now */
| jnz L_n_shift_1 /* Shift left 1 - 31 bits */
|
| movl %ebx,%edx
| movl %eax,%ebx
| xorl %eax,%eax
| addl $-32,-4(%ebp)
| jmp L_n_exit /* Might not be normalized,
| but shift no more. */
|
| /* We need to shift left by 1 - 31 bits */
| L_n_shift_1:
| bsrl %edx,%ecx /* get the required shift in %ecx */
| subl $31,%ecx
| negl %ecx
| subl %ecx,-4(%ebp)
| shld %cl,%ebx,%edx
| shld %cl,%eax,%ebx
| shl %cl,%eax
|
| L_n_exit:
| movl %edx,8(%esi)
| movl %ebx,4(%esi)
| movl %eax,(%esi)
|
| movl -4(%ebp),%eax
|
| popl %esi
| popl %ebx
| leave
| RET
| SYM_FUNC_END(norm_Xsig)
|
|