root / target-arm / nwfpe / softfloat-macros @ a8d3431a
History | View | Annotate | Download (23.5 kB)
1 | 00406dff | bellard | |
---|---|---|---|
2 | 00406dff | bellard | /* |
3 | 00406dff | bellard | =============================================================================== |
4 | 00406dff | bellard | |
5 | 00406dff | bellard | This C source fragment is part of the SoftFloat IEC/IEEE Floating-point |
6 | 00406dff | bellard | Arithmetic Package, Release 2. |
7 | 00406dff | bellard | |
8 | 00406dff | bellard | Written by John R. Hauser. This work was made possible in part by the |
9 | 00406dff | bellard | International Computer Science Institute, located at Suite 600, 1947 Center |
10 | 00406dff | bellard | Street, Berkeley, California 94704. Funding was partially provided by the |
11 | 00406dff | bellard | National Science Foundation under grant MIP-9311980. The original version |
12 | 00406dff | bellard | of this code was written as part of a project to build a fixed-point vector |
13 | 00406dff | bellard | processor in collaboration with the University of California at Berkeley, |
14 | 00406dff | bellard | overseen by Profs. Nelson Morgan and John Wawrzynek. More information |
15 | 00406dff | bellard | is available through the web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ |
16 | 00406dff | bellard | arithmetic/softfloat.html'. |
17 | 00406dff | bellard | |
18 | 00406dff | bellard | THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort |
19 | 00406dff | bellard | has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT |
20 | 00406dff | bellard | TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO |
21 | 00406dff | bellard | PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY |
22 | 00406dff | bellard | AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. |
23 | 00406dff | bellard | |
24 | 00406dff | bellard | Derivative works are acceptable, even for commercial purposes, so long as |
25 | 00406dff | bellard | (1) they include prominent notice that the work is derivative, and (2) they |
26 | 00406dff | bellard | include prominent notice akin to these three paragraphs for those parts of |
27 | 00406dff | bellard | this code that are retained. |
28 | 00406dff | bellard | |
29 | 00406dff | bellard | =============================================================================== |
30 | 00406dff | bellard | */ |
31 | 00406dff | bellard | |
32 | 00406dff | bellard | /* |
33 | 00406dff | bellard | ------------------------------------------------------------------------------- |
34 | 00406dff | bellard | Shifts `a' right by the number of bits given in `count'. If any nonzero |
35 | 00406dff | bellard | bits are shifted off, they are ``jammed'' into the least significant bit of |
36 | 00406dff | bellard | the result by setting the least significant bit to 1. The value of `count' |
37 | 00406dff | bellard | can be arbitrarily large; in particular, if `count' is greater than 32, the |
38 | 00406dff | bellard | result will be either 0 or 1, depending on whether `a' is zero or nonzero. |
39 | 00406dff | bellard | The result is stored in the location pointed to by `zPtr'. |
40 | 00406dff | bellard | ------------------------------------------------------------------------------- |
41 | 00406dff | bellard | */ |
42 | 00406dff | bellard | INLINE void shift32RightJamming( bits32 a, int16 count, bits32 *zPtr ) |
43 | 00406dff | bellard | { |
44 | 00406dff | bellard | bits32 z; |
45 | 00406dff | bellard | if ( count == 0 ) { |
46 | 00406dff | bellard | z = a; |
47 | 00406dff | bellard | } |
48 | 00406dff | bellard | else if ( count < 32 ) { |
49 | 00406dff | bellard | z = ( a>>count ) | ( ( a<<( ( - count ) & 31 ) ) != 0 ); |
50 | 00406dff | bellard | } |
51 | 00406dff | bellard | else { |
52 | 00406dff | bellard | z = ( a != 0 ); |
53 | 00406dff | bellard | } |
54 | 00406dff | bellard | *zPtr = z; |
55 | 00406dff | bellard | } |
56 | 00406dff | bellard | |
57 | 00406dff | bellard | /* |
58 | 00406dff | bellard | ------------------------------------------------------------------------------- |
59 | 00406dff | bellard | Shifts `a' right by the number of bits given in `count'. If any nonzero |
60 | 00406dff | bellard | bits are shifted off, they are ``jammed'' into the least significant bit of |
61 | 00406dff | bellard | the result by setting the least significant bit to 1. The value of `count' |
62 | 00406dff | bellard | can be arbitrarily large; in particular, if `count' is greater than 64, the |
63 | 00406dff | bellard | result will be either 0 or 1, depending on whether `a' is zero or nonzero. |
64 | 00406dff | bellard | The result is stored in the location pointed to by `zPtr'. |
65 | 00406dff | bellard | ------------------------------------------------------------------------------- |
66 | 00406dff | bellard | */ |
67 | 00406dff | bellard | INLINE void shift64RightJamming( bits64 a, int16 count, bits64 *zPtr ) |
68 | 00406dff | bellard | { |
69 | 00406dff | bellard | bits64 z; |
70 | 00406dff | bellard | |
71 | 00406dff | bellard | // __asm__("@shift64RightJamming -- start"); |
72 | 00406dff | bellard | if ( count == 0 ) { |
73 | 00406dff | bellard | z = a; |
74 | 00406dff | bellard | } |
75 | 00406dff | bellard | else if ( count < 64 ) { |
76 | 00406dff | bellard | z = ( a>>count ) | ( ( a<<( ( - count ) & 63 ) ) != 0 ); |
77 | 00406dff | bellard | } |
78 | 00406dff | bellard | else { |
79 | 00406dff | bellard | z = ( a != 0 ); |
80 | 00406dff | bellard | } |
81 | 00406dff | bellard | // __asm__("@shift64RightJamming -- end"); |
82 | 00406dff | bellard | *zPtr = z; |
83 | 00406dff | bellard | } |
84 | 00406dff | bellard | |
85 | 00406dff | bellard | /* |
86 | 00406dff | bellard | ------------------------------------------------------------------------------- |
87 | 00406dff | bellard | Shifts the 128-bit value formed by concatenating `a0' and `a1' right by 64 |
88 | 00406dff | bellard | _plus_ the number of bits given in `count'. The shifted result is at most |
89 | 00406dff | bellard | 64 nonzero bits; this is stored at the location pointed to by `z0Ptr'. The |
90 | 00406dff | bellard | bits shifted off form a second 64-bit result as follows: The _last_ bit |
91 | 00406dff | bellard | shifted off is the most-significant bit of the extra result, and the other |
92 | 00406dff | bellard | 63 bits of the extra result are all zero if and only if _all_but_the_last_ |
93 | 00406dff | bellard | bits shifted off were all zero. This extra result is stored in the location |
94 | 00406dff | bellard | pointed to by `z1Ptr'. The value of `count' can be arbitrarily large. |
95 | 00406dff | bellard | (This routine makes more sense if `a0' and `a1' are considered to form a |
96 | 00406dff | bellard | fixed-point value with binary point between `a0' and `a1'. This fixed-point |
97 | 00406dff | bellard | value is shifted right by the number of bits given in `count', and the |
98 | 00406dff | bellard | integer part of the result is returned at the location pointed to by |
99 | 00406dff | bellard | `z0Ptr'. The fractional part of the result may be slightly corrupted as |
100 | 00406dff | bellard | described above, and is returned at the location pointed to by `z1Ptr'.) |
101 | 00406dff | bellard | ------------------------------------------------------------------------------- |
102 | 00406dff | bellard | */ |
103 | 00406dff | bellard | INLINE void |
104 | 00406dff | bellard | shift64ExtraRightJamming( |
105 | 00406dff | bellard | bits64 a0, bits64 a1, int16 count, bits64 *z0Ptr, bits64 *z1Ptr ) |
106 | 00406dff | bellard | { |
107 | 00406dff | bellard | bits64 z0, z1; |
108 | 00406dff | bellard | int8 negCount = ( - count ) & 63; |
109 | 00406dff | bellard | |
110 | 00406dff | bellard | if ( count == 0 ) { |
111 | 00406dff | bellard | z1 = a1; |
112 | 00406dff | bellard | z0 = a0; |
113 | 00406dff | bellard | } |
114 | 00406dff | bellard | else if ( count < 64 ) { |
115 | 00406dff | bellard | z1 = ( a0<<negCount ) | ( a1 != 0 ); |
116 | 00406dff | bellard | z0 = a0>>count; |
117 | 00406dff | bellard | } |
118 | 00406dff | bellard | else { |
119 | 00406dff | bellard | if ( count == 64 ) { |
120 | 00406dff | bellard | z1 = a0 | ( a1 != 0 ); |
121 | 00406dff | bellard | } |
122 | 00406dff | bellard | else { |
123 | 00406dff | bellard | z1 = ( ( a0 | a1 ) != 0 ); |
124 | 00406dff | bellard | } |
125 | 00406dff | bellard | z0 = 0; |
126 | 00406dff | bellard | } |
127 | 00406dff | bellard | *z1Ptr = z1; |
128 | 00406dff | bellard | *z0Ptr = z0; |
129 | 00406dff | bellard | |
130 | 00406dff | bellard | } |
131 | 00406dff | bellard | |
132 | 00406dff | bellard | /* |
133 | 00406dff | bellard | ------------------------------------------------------------------------------- |
134 | 00406dff | bellard | Shifts the 128-bit value formed by concatenating `a0' and `a1' right by the |
135 | 00406dff | bellard | number of bits given in `count'. Any bits shifted off are lost. The value |
136 | 00406dff | bellard | of `count' can be arbitrarily large; in particular, if `count' is greater |
137 | 00406dff | bellard | than 128, the result will be 0. The result is broken into two 64-bit pieces |
138 | 00406dff | bellard | which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'. |
139 | 00406dff | bellard | ------------------------------------------------------------------------------- |
140 | 00406dff | bellard | */ |
141 | 00406dff | bellard | INLINE void |
142 | 00406dff | bellard | shift128Right( |
143 | 00406dff | bellard | bits64 a0, bits64 a1, int16 count, bits64 *z0Ptr, bits64 *z1Ptr ) |
144 | 00406dff | bellard | { |
145 | 00406dff | bellard | bits64 z0, z1; |
146 | 00406dff | bellard | int8 negCount = ( - count ) & 63; |
147 | 00406dff | bellard | |
148 | 00406dff | bellard | if ( count == 0 ) { |
149 | 00406dff | bellard | z1 = a1; |
150 | 00406dff | bellard | z0 = a0; |
151 | 00406dff | bellard | } |
152 | 00406dff | bellard | else if ( count < 64 ) { |
153 | 00406dff | bellard | z1 = ( a0<<negCount ) | ( a1>>count ); |
154 | 00406dff | bellard | z0 = a0>>count; |
155 | 00406dff | bellard | } |
156 | 00406dff | bellard | else { |
157 | 00406dff | bellard | z1 = ( count < 64 ) ? ( a0>>( count & 63 ) ) : 0; |
158 | 00406dff | bellard | z0 = 0; |
159 | 00406dff | bellard | } |
160 | 00406dff | bellard | *z1Ptr = z1; |
161 | 00406dff | bellard | *z0Ptr = z0; |
162 | 00406dff | bellard | |
163 | 00406dff | bellard | } |
164 | 00406dff | bellard | |
165 | 00406dff | bellard | /* |
166 | 00406dff | bellard | ------------------------------------------------------------------------------- |
167 | 00406dff | bellard | Shifts the 128-bit value formed by concatenating `a0' and `a1' right by the |
168 | 00406dff | bellard | number of bits given in `count'. If any nonzero bits are shifted off, they |
169 | 00406dff | bellard | are ``jammed'' into the least significant bit of the result by setting the |
170 | 00406dff | bellard | least significant bit to 1. The value of `count' can be arbitrarily large; |
171 | 00406dff | bellard | in particular, if `count' is greater than 128, the result will be either 0 |
172 | 00406dff | bellard | or 1, depending on whether the concatenation of `a0' and `a1' is zero or |
173 | 00406dff | bellard | nonzero. The result is broken into two 64-bit pieces which are stored at |
174 | 00406dff | bellard | the locations pointed to by `z0Ptr' and `z1Ptr'. |
175 | 00406dff | bellard | ------------------------------------------------------------------------------- |
176 | 00406dff | bellard | */ |
177 | 00406dff | bellard | INLINE void |
178 | 00406dff | bellard | shift128RightJamming( |
179 | 00406dff | bellard | bits64 a0, bits64 a1, int16 count, bits64 *z0Ptr, bits64 *z1Ptr ) |
180 | 00406dff | bellard | { |
181 | 00406dff | bellard | bits64 z0, z1; |
182 | 00406dff | bellard | int8 negCount = ( - count ) & 63; |
183 | 00406dff | bellard | |
184 | 00406dff | bellard | if ( count == 0 ) { |
185 | 00406dff | bellard | z1 = a1; |
186 | 00406dff | bellard | z0 = a0; |
187 | 00406dff | bellard | } |
188 | 00406dff | bellard | else if ( count < 64 ) { |
189 | 00406dff | bellard | z1 = ( a0<<negCount ) | ( a1>>count ) | ( ( a1<<negCount ) != 0 ); |
190 | 00406dff | bellard | z0 = a0>>count; |
191 | 00406dff | bellard | } |
192 | 00406dff | bellard | else { |
193 | 00406dff | bellard | if ( count == 64 ) { |
194 | 00406dff | bellard | z1 = a0 | ( a1 != 0 ); |
195 | 00406dff | bellard | } |
196 | 00406dff | bellard | else if ( count < 128 ) { |
197 | 00406dff | bellard | z1 = ( a0>>( count & 63 ) ) | ( ( ( a0<<negCount ) | a1 ) != 0 ); |
198 | 00406dff | bellard | } |
199 | 00406dff | bellard | else { |
200 | 00406dff | bellard | z1 = ( ( a0 | a1 ) != 0 ); |
201 | 00406dff | bellard | } |
202 | 00406dff | bellard | z0 = 0; |
203 | 00406dff | bellard | } |
204 | 00406dff | bellard | *z1Ptr = z1; |
205 | 00406dff | bellard | *z0Ptr = z0; |
206 | 00406dff | bellard | |
207 | 00406dff | bellard | } |
208 | 00406dff | bellard | |
209 | 00406dff | bellard | /* |
210 | 00406dff | bellard | ------------------------------------------------------------------------------- |
211 | 00406dff | bellard | Shifts the 192-bit value formed by concatenating `a0', `a1', and `a2' right |
212 | 00406dff | bellard | by 64 _plus_ the number of bits given in `count'. The shifted result is |
213 | 00406dff | bellard | at most 128 nonzero bits; these are broken into two 64-bit pieces which are |
214 | 00406dff | bellard | stored at the locations pointed to by `z0Ptr' and `z1Ptr'. The bits shifted |
215 | 00406dff | bellard | off form a third 64-bit result as follows: The _last_ bit shifted off is |
216 | 00406dff | bellard | the most-significant bit of the extra result, and the other 63 bits of the |
217 | 00406dff | bellard | extra result are all zero if and only if _all_but_the_last_ bits shifted off |
218 | 00406dff | bellard | were all zero. This extra result is stored in the location pointed to by |
219 | 00406dff | bellard | `z2Ptr'. The value of `count' can be arbitrarily large. |
220 | 00406dff | bellard | (This routine makes more sense if `a0', `a1', and `a2' are considered |
221 | 00406dff | bellard | to form a fixed-point value with binary point between `a1' and `a2'. This |
222 | 00406dff | bellard | fixed-point value is shifted right by the number of bits given in `count', |
223 | 00406dff | bellard | and the integer part of the result is returned at the locations pointed to |
224 | 00406dff | bellard | by `z0Ptr' and `z1Ptr'. The fractional part of the result may be slightly |
225 | 00406dff | bellard | corrupted as described above, and is returned at the location pointed to by |
226 | 00406dff | bellard | `z2Ptr'.) |
227 | 00406dff | bellard | ------------------------------------------------------------------------------- |
228 | 00406dff | bellard | */ |
229 | 00406dff | bellard | INLINE void |
230 | 00406dff | bellard | shift128ExtraRightJamming( |
231 | 00406dff | bellard | bits64 a0, |
232 | 00406dff | bellard | bits64 a1, |
233 | 00406dff | bellard | bits64 a2, |
234 | 00406dff | bellard | int16 count, |
235 | 00406dff | bellard | bits64 *z0Ptr, |
236 | 00406dff | bellard | bits64 *z1Ptr, |
237 | 00406dff | bellard | bits64 *z2Ptr |
238 | 00406dff | bellard | ) |
239 | 00406dff | bellard | { |
240 | 00406dff | bellard | bits64 z0, z1, z2; |
241 | 00406dff | bellard | int8 negCount = ( - count ) & 63; |
242 | 00406dff | bellard | |
243 | 00406dff | bellard | if ( count == 0 ) { |
244 | 00406dff | bellard | z2 = a2; |
245 | 00406dff | bellard | z1 = a1; |
246 | 00406dff | bellard | z0 = a0; |
247 | 00406dff | bellard | } |
248 | 00406dff | bellard | else { |
249 | 00406dff | bellard | if ( count < 64 ) { |
250 | 00406dff | bellard | z2 = a1<<negCount; |
251 | 00406dff | bellard | z1 = ( a0<<negCount ) | ( a1>>count ); |
252 | 00406dff | bellard | z0 = a0>>count; |
253 | 00406dff | bellard | } |
254 | 00406dff | bellard | else { |
255 | 00406dff | bellard | if ( count == 64 ) { |
256 | 00406dff | bellard | z2 = a1; |
257 | 00406dff | bellard | z1 = a0; |
258 | 00406dff | bellard | } |
259 | 00406dff | bellard | else { |
260 | 00406dff | bellard | a2 |= a1; |
261 | 00406dff | bellard | if ( count < 128 ) { |
262 | 00406dff | bellard | z2 = a0<<negCount; |
263 | 00406dff | bellard | z1 = a0>>( count & 63 ); |
264 | 00406dff | bellard | } |
265 | 00406dff | bellard | else { |
266 | 00406dff | bellard | z2 = ( count == 128 ) ? a0 : ( a0 != 0 ); |
267 | 00406dff | bellard | z1 = 0; |
268 | 00406dff | bellard | } |
269 | 00406dff | bellard | } |
270 | 00406dff | bellard | z0 = 0; |
271 | 00406dff | bellard | } |
272 | 00406dff | bellard | z2 |= ( a2 != 0 ); |
273 | 00406dff | bellard | } |
274 | 00406dff | bellard | *z2Ptr = z2; |
275 | 00406dff | bellard | *z1Ptr = z1; |
276 | 00406dff | bellard | *z0Ptr = z0; |
277 | 00406dff | bellard | |
278 | 00406dff | bellard | } |
279 | 00406dff | bellard | |
280 | 00406dff | bellard | /* |
281 | 00406dff | bellard | ------------------------------------------------------------------------------- |
282 | 00406dff | bellard | Shifts the 128-bit value formed by concatenating `a0' and `a1' left by the |
283 | 00406dff | bellard | number of bits given in `count'. Any bits shifted off are lost. The value |
284 | 00406dff | bellard | of `count' must be less than 64. The result is broken into two 64-bit |
285 | 00406dff | bellard | pieces which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'. |
286 | 00406dff | bellard | ------------------------------------------------------------------------------- |
287 | 00406dff | bellard | */ |
288 | 00406dff | bellard | INLINE void |
289 | 00406dff | bellard | shortShift128Left( |
290 | 00406dff | bellard | bits64 a0, bits64 a1, int16 count, bits64 *z0Ptr, bits64 *z1Ptr ) |
291 | 00406dff | bellard | { |
292 | 00406dff | bellard | |
293 | 00406dff | bellard | *z1Ptr = a1<<count; |
294 | 00406dff | bellard | *z0Ptr = |
295 | 00406dff | bellard | ( count == 0 ) ? a0 : ( a0<<count ) | ( a1>>( ( - count ) & 63 ) ); |
296 | 00406dff | bellard | |
297 | 00406dff | bellard | } |
298 | 00406dff | bellard | |
299 | 00406dff | bellard | /* |
300 | 00406dff | bellard | ------------------------------------------------------------------------------- |
301 | 00406dff | bellard | Shifts the 192-bit value formed by concatenating `a0', `a1', and `a2' left |
302 | 00406dff | bellard | by the number of bits given in `count'. Any bits shifted off are lost. |
303 | 00406dff | bellard | The value of `count' must be less than 64. The result is broken into three |
304 | 00406dff | bellard | 64-bit pieces which are stored at the locations pointed to by `z0Ptr', |
305 | 00406dff | bellard | `z1Ptr', and `z2Ptr'. |
306 | 00406dff | bellard | ------------------------------------------------------------------------------- |
307 | 00406dff | bellard | */ |
308 | 00406dff | bellard | INLINE void |
309 | 00406dff | bellard | shortShift192Left( |
310 | 00406dff | bellard | bits64 a0, |
311 | 00406dff | bellard | bits64 a1, |
312 | 00406dff | bellard | bits64 a2, |
313 | 00406dff | bellard | int16 count, |
314 | 00406dff | bellard | bits64 *z0Ptr, |
315 | 00406dff | bellard | bits64 *z1Ptr, |
316 | 00406dff | bellard | bits64 *z2Ptr |
317 | 00406dff | bellard | ) |
318 | 00406dff | bellard | { |
319 | 00406dff | bellard | bits64 z0, z1, z2; |
320 | 00406dff | bellard | int8 negCount; |
321 | 00406dff | bellard | |
322 | 00406dff | bellard | z2 = a2<<count; |
323 | 00406dff | bellard | z1 = a1<<count; |
324 | 00406dff | bellard | z0 = a0<<count; |
325 | 00406dff | bellard | if ( 0 < count ) { |
326 | 00406dff | bellard | negCount = ( ( - count ) & 63 ); |
327 | 00406dff | bellard | z1 |= a2>>negCount; |
328 | 00406dff | bellard | z0 |= a1>>negCount; |
329 | 00406dff | bellard | } |
330 | 00406dff | bellard | *z2Ptr = z2; |
331 | 00406dff | bellard | *z1Ptr = z1; |
332 | 00406dff | bellard | *z0Ptr = z0; |
333 | 00406dff | bellard | |
334 | 00406dff | bellard | } |
335 | 00406dff | bellard | |
336 | 00406dff | bellard | /* |
337 | 00406dff | bellard | ------------------------------------------------------------------------------- |
338 | 00406dff | bellard | Adds the 128-bit value formed by concatenating `a0' and `a1' to the 128-bit |
339 | 00406dff | bellard | value formed by concatenating `b0' and `b1'. Addition is modulo 2^128, so |
340 | 00406dff | bellard | any carry out is lost. The result is broken into two 64-bit pieces which |
341 | 00406dff | bellard | are stored at the locations pointed to by `z0Ptr' and `z1Ptr'. |
342 | 00406dff | bellard | ------------------------------------------------------------------------------- |
343 | 00406dff | bellard | */ |
344 | 00406dff | bellard | INLINE void |
345 | 00406dff | bellard | add128( |
346 | 00406dff | bellard | bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 *z0Ptr, bits64 *z1Ptr ) |
347 | 00406dff | bellard | { |
348 | 00406dff | bellard | bits64 z1; |
349 | 00406dff | bellard | |
350 | 00406dff | bellard | z1 = a1 + b1; |
351 | 00406dff | bellard | *z1Ptr = z1; |
352 | 00406dff | bellard | *z0Ptr = a0 + b0 + ( z1 < a1 ); |
353 | 00406dff | bellard | |
354 | 00406dff | bellard | } |
355 | 00406dff | bellard | |
356 | 00406dff | bellard | /* |
357 | 00406dff | bellard | ------------------------------------------------------------------------------- |
358 | 00406dff | bellard | Adds the 192-bit value formed by concatenating `a0', `a1', and `a2' to the |
359 | 00406dff | bellard | 192-bit value formed by concatenating `b0', `b1', and `b2'. Addition is |
360 | 00406dff | bellard | modulo 2^192, so any carry out is lost. The result is broken into three |
361 | 00406dff | bellard | 64-bit pieces which are stored at the locations pointed to by `z0Ptr', |
362 | 00406dff | bellard | `z1Ptr', and `z2Ptr'. |
363 | 00406dff | bellard | ------------------------------------------------------------------------------- |
364 | 00406dff | bellard | */ |
365 | 00406dff | bellard | INLINE void |
366 | 00406dff | bellard | add192( |
367 | 00406dff | bellard | bits64 a0, |
368 | 00406dff | bellard | bits64 a1, |
369 | 00406dff | bellard | bits64 a2, |
370 | 00406dff | bellard | bits64 b0, |
371 | 00406dff | bellard | bits64 b1, |
372 | 00406dff | bellard | bits64 b2, |
373 | 00406dff | bellard | bits64 *z0Ptr, |
374 | 00406dff | bellard | bits64 *z1Ptr, |
375 | 00406dff | bellard | bits64 *z2Ptr |
376 | 00406dff | bellard | ) |
377 | 00406dff | bellard | { |
378 | 00406dff | bellard | bits64 z0, z1, z2; |
379 | 00406dff | bellard | int8 carry0, carry1; |
380 | 00406dff | bellard | |
381 | 00406dff | bellard | z2 = a2 + b2; |
382 | 00406dff | bellard | carry1 = ( z2 < a2 ); |
383 | 00406dff | bellard | z1 = a1 + b1; |
384 | 00406dff | bellard | carry0 = ( z1 < a1 ); |
385 | 00406dff | bellard | z0 = a0 + b0; |
386 | 00406dff | bellard | z1 += carry1; |
387 | 00406dff | bellard | z0 += ( z1 < carry1 ); |
388 | 00406dff | bellard | z0 += carry0; |
389 | 00406dff | bellard | *z2Ptr = z2; |
390 | 00406dff | bellard | *z1Ptr = z1; |
391 | 00406dff | bellard | *z0Ptr = z0; |
392 | 00406dff | bellard | |
393 | 00406dff | bellard | } |
394 | 00406dff | bellard | |
395 | 00406dff | bellard | /* |
396 | 00406dff | bellard | ------------------------------------------------------------------------------- |
397 | 00406dff | bellard | Subtracts the 128-bit value formed by concatenating `b0' and `b1' from the |
398 | 00406dff | bellard | 128-bit value formed by concatenating `a0' and `a1'. Subtraction is modulo |
399 | 00406dff | bellard | 2^128, so any borrow out (carry out) is lost. The result is broken into two |
400 | 00406dff | bellard | 64-bit pieces which are stored at the locations pointed to by `z0Ptr' and |
401 | 00406dff | bellard | `z1Ptr'. |
402 | 00406dff | bellard | ------------------------------------------------------------------------------- |
403 | 00406dff | bellard | */ |
404 | 00406dff | bellard | INLINE void |
405 | 00406dff | bellard | sub128( |
406 | 00406dff | bellard | bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 *z0Ptr, bits64 *z1Ptr ) |
407 | 00406dff | bellard | { |
408 | 00406dff | bellard | |
409 | 00406dff | bellard | *z1Ptr = a1 - b1; |
410 | 00406dff | bellard | *z0Ptr = a0 - b0 - ( a1 < b1 ); |
411 | 00406dff | bellard | |
412 | 00406dff | bellard | } |
413 | 00406dff | bellard | |
414 | 00406dff | bellard | /* |
415 | 00406dff | bellard | ------------------------------------------------------------------------------- |
416 | 00406dff | bellard | Subtracts the 192-bit value formed by concatenating `b0', `b1', and `b2' |
417 | 00406dff | bellard | from the 192-bit value formed by concatenating `a0', `a1', and `a2'. |
418 | 00406dff | bellard | Subtraction is modulo 2^192, so any borrow out (carry out) is lost. The |
419 | 00406dff | bellard | result is broken into three 64-bit pieces which are stored at the locations |
420 | 00406dff | bellard | pointed to by `z0Ptr', `z1Ptr', and `z2Ptr'. |
421 | 00406dff | bellard | ------------------------------------------------------------------------------- |
422 | 00406dff | bellard | */ |
423 | 00406dff | bellard | INLINE void |
424 | 00406dff | bellard | sub192( |
425 | 00406dff | bellard | bits64 a0, |
426 | 00406dff | bellard | bits64 a1, |
427 | 00406dff | bellard | bits64 a2, |
428 | 00406dff | bellard | bits64 b0, |
429 | 00406dff | bellard | bits64 b1, |
430 | 00406dff | bellard | bits64 b2, |
431 | 00406dff | bellard | bits64 *z0Ptr, |
432 | 00406dff | bellard | bits64 *z1Ptr, |
433 | 00406dff | bellard | bits64 *z2Ptr |
434 | 00406dff | bellard | ) |
435 | 00406dff | bellard | { |
436 | 00406dff | bellard | bits64 z0, z1, z2; |
437 | 00406dff | bellard | int8 borrow0, borrow1; |
438 | 00406dff | bellard | |
439 | 00406dff | bellard | z2 = a2 - b2; |
440 | 00406dff | bellard | borrow1 = ( a2 < b2 ); |
441 | 00406dff | bellard | z1 = a1 - b1; |
442 | 00406dff | bellard | borrow0 = ( a1 < b1 ); |
443 | 00406dff | bellard | z0 = a0 - b0; |
444 | 00406dff | bellard | z0 -= ( z1 < borrow1 ); |
445 | 00406dff | bellard | z1 -= borrow1; |
446 | 00406dff | bellard | z0 -= borrow0; |
447 | 00406dff | bellard | *z2Ptr = z2; |
448 | 00406dff | bellard | *z1Ptr = z1; |
449 | 00406dff | bellard | *z0Ptr = z0; |
450 | 00406dff | bellard | |
451 | 00406dff | bellard | } |
452 | 00406dff | bellard | |
453 | 00406dff | bellard | /* |
454 | 00406dff | bellard | ------------------------------------------------------------------------------- |
455 | 00406dff | bellard | Multiplies `a' by `b' to obtain a 128-bit product. The product is broken |
456 | 00406dff | bellard | into two 64-bit pieces which are stored at the locations pointed to by |
457 | 00406dff | bellard | `z0Ptr' and `z1Ptr'. |
458 | 00406dff | bellard | ------------------------------------------------------------------------------- |
459 | 00406dff | bellard | */ |
460 | 00406dff | bellard | INLINE void mul64To128( bits64 a, bits64 b, bits64 *z0Ptr, bits64 *z1Ptr ) |
461 | 00406dff | bellard | { |
462 | 00406dff | bellard | bits32 aHigh, aLow, bHigh, bLow; |
463 | 00406dff | bellard | bits64 z0, zMiddleA, zMiddleB, z1; |
464 | 00406dff | bellard | |
465 | 00406dff | bellard | aLow = a; |
466 | 00406dff | bellard | aHigh = a>>32; |
467 | 00406dff | bellard | bLow = b; |
468 | 00406dff | bellard | bHigh = b>>32; |
469 | 00406dff | bellard | z1 = ( (bits64) aLow ) * bLow; |
470 | 00406dff | bellard | zMiddleA = ( (bits64) aLow ) * bHigh; |
471 | 00406dff | bellard | zMiddleB = ( (bits64) aHigh ) * bLow; |
472 | 00406dff | bellard | z0 = ( (bits64) aHigh ) * bHigh; |
473 | 00406dff | bellard | zMiddleA += zMiddleB; |
474 | 00406dff | bellard | z0 += ( ( (bits64) ( zMiddleA < zMiddleB ) )<<32 ) + ( zMiddleA>>32 ); |
475 | 00406dff | bellard | zMiddleA <<= 32; |
476 | 00406dff | bellard | z1 += zMiddleA; |
477 | 00406dff | bellard | z0 += ( z1 < zMiddleA ); |
478 | 00406dff | bellard | *z1Ptr = z1; |
479 | 00406dff | bellard | *z0Ptr = z0; |
480 | 00406dff | bellard | |
481 | 00406dff | bellard | } |
482 | 00406dff | bellard | |
483 | 00406dff | bellard | /* |
484 | 00406dff | bellard | ------------------------------------------------------------------------------- |
485 | 00406dff | bellard | Multiplies the 128-bit value formed by concatenating `a0' and `a1' by `b' to |
486 | 00406dff | bellard | obtain a 192-bit product. The product is broken into three 64-bit pieces |
487 | 00406dff | bellard | which are stored at the locations pointed to by `z0Ptr', `z1Ptr', and |
488 | 00406dff | bellard | `z2Ptr'. |
489 | 00406dff | bellard | ------------------------------------------------------------------------------- |
490 | 00406dff | bellard | */ |
491 | 00406dff | bellard | INLINE void |
492 | 00406dff | bellard | mul128By64To192( |
493 | 00406dff | bellard | bits64 a0, |
494 | 00406dff | bellard | bits64 a1, |
495 | 00406dff | bellard | bits64 b, |
496 | 00406dff | bellard | bits64 *z0Ptr, |
497 | 00406dff | bellard | bits64 *z1Ptr, |
498 | 00406dff | bellard | bits64 *z2Ptr |
499 | 00406dff | bellard | ) |
500 | 00406dff | bellard | { |
501 | 00406dff | bellard | bits64 z0, z1, z2, more1; |
502 | 00406dff | bellard | |
503 | 00406dff | bellard | mul64To128( a1, b, &z1, &z2 ); |
504 | 00406dff | bellard | mul64To128( a0, b, &z0, &more1 ); |
505 | 00406dff | bellard | add128( z0, more1, 0, z1, &z0, &z1 ); |
506 | 00406dff | bellard | *z2Ptr = z2; |
507 | 00406dff | bellard | *z1Ptr = z1; |
508 | 00406dff | bellard | *z0Ptr = z0; |
509 | 00406dff | bellard | |
510 | 00406dff | bellard | } |
511 | 00406dff | bellard | |
512 | 00406dff | bellard | /* |
513 | 00406dff | bellard | ------------------------------------------------------------------------------- |
514 | 00406dff | bellard | Multiplies the 128-bit value formed by concatenating `a0' and `a1' to the |
515 | 00406dff | bellard | 128-bit value formed by concatenating `b0' and `b1' to obtain a 256-bit |
516 | 00406dff | bellard | product. The product is broken into four 64-bit pieces which are stored at |
517 | 00406dff | bellard | the locations pointed to by `z0Ptr', `z1Ptr', `z2Ptr', and `z3Ptr'. |
518 | 00406dff | bellard | ------------------------------------------------------------------------------- |
519 | 00406dff | bellard | */ |
520 | 00406dff | bellard | INLINE void |
521 | 00406dff | bellard | mul128To256( |
522 | 00406dff | bellard | bits64 a0, |
523 | 00406dff | bellard | bits64 a1, |
524 | 00406dff | bellard | bits64 b0, |
525 | 00406dff | bellard | bits64 b1, |
526 | 00406dff | bellard | bits64 *z0Ptr, |
527 | 00406dff | bellard | bits64 *z1Ptr, |
528 | 00406dff | bellard | bits64 *z2Ptr, |
529 | 00406dff | bellard | bits64 *z3Ptr |
530 | 00406dff | bellard | ) |
531 | 00406dff | bellard | { |
532 | 00406dff | bellard | bits64 z0, z1, z2, z3; |
533 | 00406dff | bellard | bits64 more1, more2; |
534 | 00406dff | bellard | |
535 | 00406dff | bellard | mul64To128( a1, b1, &z2, &z3 ); |
536 | 00406dff | bellard | mul64To128( a1, b0, &z1, &more2 ); |
537 | 00406dff | bellard | add128( z1, more2, 0, z2, &z1, &z2 ); |
538 | 00406dff | bellard | mul64To128( a0, b0, &z0, &more1 ); |
539 | 00406dff | bellard | add128( z0, more1, 0, z1, &z0, &z1 ); |
540 | 00406dff | bellard | mul64To128( a0, b1, &more1, &more2 ); |
541 | 00406dff | bellard | add128( more1, more2, 0, z2, &more1, &z2 ); |
542 | 00406dff | bellard | add128( z0, z1, 0, more1, &z0, &z1 ); |
543 | 00406dff | bellard | *z3Ptr = z3; |
544 | 00406dff | bellard | *z2Ptr = z2; |
545 | 00406dff | bellard | *z1Ptr = z1; |
546 | 00406dff | bellard | *z0Ptr = z0; |
547 | 00406dff | bellard | |
548 | 00406dff | bellard | } |
549 | 00406dff | bellard | |
550 | 00406dff | bellard | /* |
551 | 00406dff | bellard | ------------------------------------------------------------------------------- |
552 | 00406dff | bellard | Returns an approximation to the 64-bit integer quotient obtained by dividing |
553 | 00406dff | bellard | `b' into the 128-bit value formed by concatenating `a0' and `a1'. The |
554 | 00406dff | bellard | divisor `b' must be at least 2^63. If q is the exact quotient truncated |
555 | 00406dff | bellard | toward zero, the approximation returned lies between q and q + 2 inclusive. |
556 | 00406dff | bellard | If the exact quotient q is larger than 64 bits, the maximum positive 64-bit |
557 | 00406dff | bellard | unsigned integer is returned. |
558 | 00406dff | bellard | ------------------------------------------------------------------------------- |
559 | 00406dff | bellard | */ |
560 | 00406dff | bellard | static bits64 estimateDiv128To64( bits64 a0, bits64 a1, bits64 b ) |
561 | 00406dff | bellard | { |
562 | 00406dff | bellard | bits64 b0, b1; |
563 | 00406dff | bellard | bits64 rem0, rem1, term0, term1; |
564 | 00406dff | bellard | bits64 z; |
565 | 00406dff | bellard | if ( b <= a0 ) return LIT64( 0xFFFFFFFFFFFFFFFF ); |
566 | 00406dff | bellard | b0 = b>>32; |
567 | 00406dff | bellard | z = ( b0<<32 <= a0 ) ? LIT64( 0xFFFFFFFF00000000 ) : ( a0 / b0 )<<32; |
568 | 00406dff | bellard | mul64To128( b, z, &term0, &term1 ); |
569 | 00406dff | bellard | sub128( a0, a1, term0, term1, &rem0, &rem1 ); |
570 | 00406dff | bellard | while ( ( (sbits64) rem0 ) < 0 ) { |
571 | 00406dff | bellard | z -= LIT64( 0x100000000 ); |
572 | 00406dff | bellard | b1 = b<<32; |
573 | 00406dff | bellard | add128( rem0, rem1, b0, b1, &rem0, &rem1 ); |
574 | 00406dff | bellard | } |
575 | 00406dff | bellard | rem0 = ( rem0<<32 ) | ( rem1>>32 ); |
576 | 00406dff | bellard | z |= ( b0<<32 <= rem0 ) ? 0xFFFFFFFF : rem0 / b0; |
577 | 00406dff | bellard | return z; |
578 | 00406dff | bellard | |
579 | 00406dff | bellard | } |
580 | 00406dff | bellard | |
581 | 00406dff | bellard | /* |
582 | 00406dff | bellard | ------------------------------------------------------------------------------- |
583 | 00406dff | bellard | Returns an approximation to the square root of the 32-bit significand given |
584 | 00406dff | bellard | by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of |
585 | 00406dff | bellard | `aExp' (the least significant bit) is 1, the integer returned approximates |
586 | 00406dff | bellard | 2^31*sqrt(`a'/2^31), where `a' is considered an integer. If bit 0 of `aExp' |
587 | 00406dff | bellard | is 0, the integer returned approximates 2^31*sqrt(`a'/2^30). In either |
588 | 00406dff | bellard | case, the approximation returned lies strictly within +/-2 of the exact |
589 | 00406dff | bellard | value. |
590 | 00406dff | bellard | ------------------------------------------------------------------------------- |
591 | 00406dff | bellard | */ |
592 | 00406dff | bellard | static bits32 estimateSqrt32( int16 aExp, bits32 a ) |
593 | 00406dff | bellard | { |
594 | 00406dff | bellard | static const bits16 sqrtOddAdjustments[] = { |
595 | 00406dff | bellard | 0x0004, 0x0022, 0x005D, 0x00B1, 0x011D, 0x019F, 0x0236, 0x02E0, |
596 | 00406dff | bellard | 0x039C, 0x0468, 0x0545, 0x0631, 0x072B, 0x0832, 0x0946, 0x0A67 |
597 | 00406dff | bellard | }; |
598 | 00406dff | bellard | static const bits16 sqrtEvenAdjustments[] = { |
599 | 00406dff | bellard | 0x0A2D, 0x08AF, 0x075A, 0x0629, 0x051A, 0x0429, 0x0356, 0x029E, |
600 | 00406dff | bellard | 0x0200, 0x0179, 0x0109, 0x00AF, 0x0068, 0x0034, 0x0012, 0x0002 |
601 | 00406dff | bellard | }; |
602 | 00406dff | bellard | int8 index; |
603 | 00406dff | bellard | bits32 z; |
604 | 00406dff | bellard | |
605 | 00406dff | bellard | index = ( a>>27 ) & 15; |
606 | 00406dff | bellard | if ( aExp & 1 ) { |
607 | 00406dff | bellard | z = 0x4000 + ( a>>17 ) - sqrtOddAdjustments[ index ]; |
608 | 00406dff | bellard | z = ( ( a / z )<<14 ) + ( z<<15 ); |
609 | 00406dff | bellard | a >>= 1; |
610 | 00406dff | bellard | } |
611 | 00406dff | bellard | else { |
612 | 00406dff | bellard | z = 0x8000 + ( a>>17 ) - sqrtEvenAdjustments[ index ]; |
613 | 00406dff | bellard | z = a / z + z; |
614 | 00406dff | bellard | z = ( 0x20000 <= z ) ? 0xFFFF8000 : ( z<<15 ); |
615 | 00406dff | bellard | if ( z <= a ) return (bits32) ( ( (sbits32) a )>>1 ); |
616 | 00406dff | bellard | } |
617 | 00406dff | bellard | return ( (bits32) ( ( ( (bits64) a )<<31 ) / z ) ) + ( z>>1 ); |
618 | 00406dff | bellard | |
619 | 00406dff | bellard | } |
620 | 00406dff | bellard | |
621 | 00406dff | bellard | /* |
622 | 00406dff | bellard | ------------------------------------------------------------------------------- |
623 | 00406dff | bellard | Returns the number of leading 0 bits before the most-significant 1 bit |
624 | 00406dff | bellard | of `a'. If `a' is zero, 32 is returned. |
625 | 00406dff | bellard | ------------------------------------------------------------------------------- |
626 | 00406dff | bellard | */ |
627 | 00406dff | bellard | static int8 countLeadingZeros32( bits32 a ) |
628 | 00406dff | bellard | { |
629 | 00406dff | bellard | static const int8 countLeadingZerosHigh[] = { |
630 | 00406dff | bellard | 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, |
631 | 00406dff | bellard | 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, |
632 | 00406dff | bellard | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, |
633 | 00406dff | bellard | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, |
634 | 00406dff | bellard | 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, |
635 | 00406dff | bellard | 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, |
636 | 00406dff | bellard | 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, |
637 | 00406dff | bellard | 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, |
638 | 00406dff | bellard | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
639 | 00406dff | bellard | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
640 | 00406dff | bellard | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
641 | 00406dff | bellard | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
642 | 00406dff | bellard | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
643 | 00406dff | bellard | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
644 | 00406dff | bellard | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
645 | 00406dff | bellard | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 |
646 | 00406dff | bellard | }; |
647 | 00406dff | bellard | int8 shiftCount; |
648 | 00406dff | bellard | |
649 | 00406dff | bellard | shiftCount = 0; |
650 | 00406dff | bellard | if ( a < 0x10000 ) { |
651 | 00406dff | bellard | shiftCount += 16; |
652 | 00406dff | bellard | a <<= 16; |
653 | 00406dff | bellard | } |
654 | 00406dff | bellard | if ( a < 0x1000000 ) { |
655 | 00406dff | bellard | shiftCount += 8; |
656 | 00406dff | bellard | a <<= 8; |
657 | 00406dff | bellard | } |
658 | 00406dff | bellard | shiftCount += countLeadingZerosHigh[ a>>24 ]; |
659 | 00406dff | bellard | return shiftCount; |
660 | 00406dff | bellard | |
661 | 00406dff | bellard | } |
662 | 00406dff | bellard | |
663 | 00406dff | bellard | /* |
664 | 00406dff | bellard | ------------------------------------------------------------------------------- |
665 | 00406dff | bellard | Returns the number of leading 0 bits before the most-significant 1 bit |
666 | 00406dff | bellard | of `a'. If `a' is zero, 64 is returned. |
667 | 00406dff | bellard | ------------------------------------------------------------------------------- |
668 | 00406dff | bellard | */ |
669 | 00406dff | bellard | static int8 countLeadingZeros64( bits64 a ) |
670 | 00406dff | bellard | { |
671 | 00406dff | bellard | int8 shiftCount; |
672 | 00406dff | bellard | |
673 | 00406dff | bellard | shiftCount = 0; |
674 | 00406dff | bellard | if ( a < ( (bits64) 1 )<<32 ) { |
675 | 00406dff | bellard | shiftCount += 32; |
676 | 00406dff | bellard | } |
677 | 00406dff | bellard | else { |
678 | 00406dff | bellard | a >>= 32; |
679 | 00406dff | bellard | } |
680 | 00406dff | bellard | shiftCount += countLeadingZeros32( a ); |
681 | 00406dff | bellard | return shiftCount; |
682 | 00406dff | bellard | |
683 | 00406dff | bellard | } |
684 | 00406dff | bellard | |
685 | 00406dff | bellard | /* |
686 | 00406dff | bellard | ------------------------------------------------------------------------------- |
687 | 00406dff | bellard | Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' |
688 | 00406dff | bellard | is equal to the 128-bit value formed by concatenating `b0' and `b1'. |
689 | 00406dff | bellard | Otherwise, returns 0. |
690 | 00406dff | bellard | ------------------------------------------------------------------------------- |
691 | 00406dff | bellard | */ |
692 | 00406dff | bellard | INLINE flag eq128( bits64 a0, bits64 a1, bits64 b0, bits64 b1 ) |
693 | 00406dff | bellard | { |
694 | 00406dff | bellard | |
695 | 00406dff | bellard | return ( a0 == b0 ) && ( a1 == b1 ); |
696 | 00406dff | bellard | |
697 | 00406dff | bellard | } |
698 | 00406dff | bellard | |
699 | 00406dff | bellard | /* |
700 | 00406dff | bellard | ------------------------------------------------------------------------------- |
701 | 00406dff | bellard | Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' is less |
702 | 00406dff | bellard | than or equal to the 128-bit value formed by concatenating `b0' and `b1'. |
703 | 00406dff | bellard | Otherwise, returns 0. |
704 | 00406dff | bellard | ------------------------------------------------------------------------------- |
705 | 00406dff | bellard | */ |
706 | 00406dff | bellard | INLINE flag le128( bits64 a0, bits64 a1, bits64 b0, bits64 b1 ) |
707 | 00406dff | bellard | { |
708 | 00406dff | bellard | |
709 | 00406dff | bellard | return ( a0 < b0 ) || ( ( a0 == b0 ) && ( a1 <= b1 ) ); |
710 | 00406dff | bellard | |
711 | 00406dff | bellard | } |
712 | 00406dff | bellard | |
713 | 00406dff | bellard | /* |
714 | 00406dff | bellard | ------------------------------------------------------------------------------- |
715 | 00406dff | bellard | Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' is less |
716 | 00406dff | bellard | than the 128-bit value formed by concatenating `b0' and `b1'. Otherwise, |
717 | 00406dff | bellard | returns 0. |
718 | 00406dff | bellard | ------------------------------------------------------------------------------- |
719 | 00406dff | bellard | */ |
720 | 00406dff | bellard | INLINE flag lt128( bits64 a0, bits64 a1, bits64 b0, bits64 b1 ) |
721 | 00406dff | bellard | { |
722 | 00406dff | bellard | |
723 | 00406dff | bellard | return ( a0 < b0 ) || ( ( a0 == b0 ) && ( a1 < b1 ) ); |
724 | 00406dff | bellard | |
725 | 00406dff | bellard | } |
726 | 00406dff | bellard | |
727 | 00406dff | bellard | /* |
728 | 00406dff | bellard | ------------------------------------------------------------------------------- |
729 | 00406dff | bellard | Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' is |
730 | 00406dff | bellard | not equal to the 128-bit value formed by concatenating `b0' and `b1'. |
731 | 00406dff | bellard | Otherwise, returns 0. |
732 | 00406dff | bellard | ------------------------------------------------------------------------------- |
733 | 00406dff | bellard | */ |
734 | 00406dff | bellard | INLINE flag ne128( bits64 a0, bits64 a1, bits64 b0, bits64 b1 ) |
735 | 00406dff | bellard | { |
736 | 00406dff | bellard | |
737 | 00406dff | bellard | return ( a0 != b0 ) || ( a1 != b1 ); |
738 | 00406dff | bellard | |
739 | 00406dff | bellard | } |