]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/i386/math-emu/mul_Xsig.S
Linux-2.6.12-rc2
[mirror_ubuntu-artful-kernel.git] / arch / i386 / math-emu / mul_Xsig.S
1 /*---------------------------------------------------------------------------+
2 | mul_Xsig.S |
3 | |
4 | Multiply a 12 byte fixed point number by another fixed point number. |
5 | |
6 | Copyright (C) 1992,1994,1995 |
7 | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, |
8 | Australia. E-mail billm@jacobi.maths.monash.edu.au |
9 | |
10 | Call from C as: |
11 | void mul32_Xsig(Xsig *x, unsigned b) |
12 | |
13 | void mul64_Xsig(Xsig *x, unsigned long long *b) |
14 | |
15 | void mul_Xsig_Xsig(Xsig *x, unsigned *b) |
16 | |
17 | The result is neither rounded nor normalized, and the ls bit or so may |
18 | be wrong. |
19 | |
20 +---------------------------------------------------------------------------*/
21 .file "mul_Xsig.S"
22
23
24 #include "fpu_emu.h"
25
26 .text
27 ENTRY(mul32_Xsig)
28 pushl %ebp
29 movl %esp,%ebp
30 subl $16,%esp
31 pushl %esi
32
33 movl PARAM1,%esi
34 movl PARAM2,%ecx
35
36 xor %eax,%eax
37 movl %eax,-4(%ebp)
38 movl %eax,-8(%ebp)
39
40 movl (%esi),%eax /* lsl of Xsig */
41 mull %ecx /* msl of b */
42 movl %edx,-12(%ebp)
43
44 movl 4(%esi),%eax /* midl of Xsig */
45 mull %ecx /* msl of b */
46 addl %eax,-12(%ebp)
47 adcl %edx,-8(%ebp)
48 adcl $0,-4(%ebp)
49
50 movl 8(%esi),%eax /* msl of Xsig */
51 mull %ecx /* msl of b */
52 addl %eax,-8(%ebp)
53 adcl %edx,-4(%ebp)
54
55 movl -12(%ebp),%eax
56 movl %eax,(%esi)
57 movl -8(%ebp),%eax
58 movl %eax,4(%esi)
59 movl -4(%ebp),%eax
60 movl %eax,8(%esi)
61
62 popl %esi
63 leave
64 ret
65
66
67 ENTRY(mul64_Xsig)
68 pushl %ebp
69 movl %esp,%ebp
70 subl $16,%esp
71 pushl %esi
72
73 movl PARAM1,%esi
74 movl PARAM2,%ecx
75
76 xor %eax,%eax
77 movl %eax,-4(%ebp)
78 movl %eax,-8(%ebp)
79
80 movl (%esi),%eax /* lsl of Xsig */
81 mull 4(%ecx) /* msl of b */
82 movl %edx,-12(%ebp)
83
84 movl 4(%esi),%eax /* midl of Xsig */
85 mull (%ecx) /* lsl of b */
86 addl %edx,-12(%ebp)
87 adcl $0,-8(%ebp)
88 adcl $0,-4(%ebp)
89
90 movl 4(%esi),%eax /* midl of Xsig */
91 mull 4(%ecx) /* msl of b */
92 addl %eax,-12(%ebp)
93 adcl %edx,-8(%ebp)
94 adcl $0,-4(%ebp)
95
96 movl 8(%esi),%eax /* msl of Xsig */
97 mull (%ecx) /* lsl of b */
98 addl %eax,-12(%ebp)
99 adcl %edx,-8(%ebp)
100 adcl $0,-4(%ebp)
101
102 movl 8(%esi),%eax /* msl of Xsig */
103 mull 4(%ecx) /* msl of b */
104 addl %eax,-8(%ebp)
105 adcl %edx,-4(%ebp)
106
107 movl -12(%ebp),%eax
108 movl %eax,(%esi)
109 movl -8(%ebp),%eax
110 movl %eax,4(%esi)
111 movl -4(%ebp),%eax
112 movl %eax,8(%esi)
113
114 popl %esi
115 leave
116 ret
117
118
119
120 ENTRY(mul_Xsig_Xsig)
121 pushl %ebp
122 movl %esp,%ebp
123 subl $16,%esp
124 pushl %esi
125
126 movl PARAM1,%esi
127 movl PARAM2,%ecx
128
129 xor %eax,%eax
130 movl %eax,-4(%ebp)
131 movl %eax,-8(%ebp)
132
133 movl (%esi),%eax /* lsl of Xsig */
134 mull 8(%ecx) /* msl of b */
135 movl %edx,-12(%ebp)
136
137 movl 4(%esi),%eax /* midl of Xsig */
138 mull 4(%ecx) /* midl of b */
139 addl %edx,-12(%ebp)
140 adcl $0,-8(%ebp)
141 adcl $0,-4(%ebp)
142
143 movl 8(%esi),%eax /* msl of Xsig */
144 mull (%ecx) /* lsl of b */
145 addl %edx,-12(%ebp)
146 adcl $0,-8(%ebp)
147 adcl $0,-4(%ebp)
148
149 movl 4(%esi),%eax /* midl of Xsig */
150 mull 8(%ecx) /* msl of b */
151 addl %eax,-12(%ebp)
152 adcl %edx,-8(%ebp)
153 adcl $0,-4(%ebp)
154
155 movl 8(%esi),%eax /* msl of Xsig */
156 mull 4(%ecx) /* midl of b */
157 addl %eax,-12(%ebp)
158 adcl %edx,-8(%ebp)
159 adcl $0,-4(%ebp)
160
161 movl 8(%esi),%eax /* msl of Xsig */
162 mull 8(%ecx) /* msl of b */
163 addl %eax,-8(%ebp)
164 adcl %edx,-4(%ebp)
165
166 movl -12(%ebp),%edx
167 movl %edx,(%esi)
168 movl -8(%ebp),%edx
169 movl %edx,4(%esi)
170 movl -4(%ebp),%edx
171 movl %edx,8(%esi)
172
173 popl %esi
174 leave
175 ret
176