root / target-alpha / op_helper.c @ 5dba48a8
History | View | Annotate | Download (30.3 kB)
1 |
/*
|
---|---|
2 |
* Alpha emulation cpu micro-operations helpers for qemu.
|
3 |
*
|
4 |
* Copyright (c) 2007 Jocelyn Mayer
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
18 |
*/
|
19 |
|
20 |
#include "exec.h" |
21 |
#include "host-utils.h" |
22 |
#include "softfloat.h" |
23 |
#include "helper.h" |
24 |
#include "qemu-timer.h" |
25 |
|
26 |
/*****************************************************************************/
|
27 |
/* Exceptions processing helpers */
|
28 |
void QEMU_NORETURN helper_excp (int excp, int error) |
29 |
{ |
30 |
env->exception_index = excp; |
31 |
env->error_code = error; |
32 |
cpu_loop_exit(); |
33 |
} |
34 |
|
35 |
uint64_t helper_load_pcc (void)
|
36 |
{ |
37 |
/* ??? This isn't a timer for which we have any rate info. */
|
38 |
return (uint32_t)cpu_get_real_ticks();
|
39 |
} |
40 |
|
41 |
uint64_t helper_load_fpcr (void)
|
42 |
{ |
43 |
return cpu_alpha_load_fpcr (env);
|
44 |
} |
45 |
|
46 |
void helper_store_fpcr (uint64_t val)
|
47 |
{ |
48 |
cpu_alpha_store_fpcr (env, val); |
49 |
} |
50 |
|
51 |
uint64_t helper_addqv (uint64_t op1, uint64_t op2) |
52 |
{ |
53 |
uint64_t tmp = op1; |
54 |
op1 += op2; |
55 |
if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) { |
56 |
helper_excp(EXCP_ARITH, EXC_M_IOV); |
57 |
} |
58 |
return op1;
|
59 |
} |
60 |
|
61 |
uint64_t helper_addlv (uint64_t op1, uint64_t op2) |
62 |
{ |
63 |
uint64_t tmp = op1; |
64 |
op1 = (uint32_t)(op1 + op2); |
65 |
if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) { |
66 |
helper_excp(EXCP_ARITH, EXC_M_IOV); |
67 |
} |
68 |
return op1;
|
69 |
} |
70 |
|
71 |
uint64_t helper_subqv (uint64_t op1, uint64_t op2) |
72 |
{ |
73 |
uint64_t res; |
74 |
res = op1 - op2; |
75 |
if (unlikely((op1 ^ op2) & (res ^ op1) & (1ULL << 63))) { |
76 |
helper_excp(EXCP_ARITH, EXC_M_IOV); |
77 |
} |
78 |
return res;
|
79 |
} |
80 |
|
81 |
uint64_t helper_sublv (uint64_t op1, uint64_t op2) |
82 |
{ |
83 |
uint32_t res; |
84 |
res = op1 - op2; |
85 |
if (unlikely((op1 ^ op2) & (res ^ op1) & (1UL << 31))) { |
86 |
helper_excp(EXCP_ARITH, EXC_M_IOV); |
87 |
} |
88 |
return res;
|
89 |
} |
90 |
|
91 |
uint64_t helper_mullv (uint64_t op1, uint64_t op2) |
92 |
{ |
93 |
int64_t res = (int64_t)op1 * (int64_t)op2; |
94 |
|
95 |
if (unlikely((int32_t)res != res)) {
|
96 |
helper_excp(EXCP_ARITH, EXC_M_IOV); |
97 |
} |
98 |
return (int64_t)((int32_t)res);
|
99 |
} |
100 |
|
101 |
uint64_t helper_mulqv (uint64_t op1, uint64_t op2) |
102 |
{ |
103 |
uint64_t tl, th; |
104 |
|
105 |
muls64(&tl, &th, op1, op2); |
106 |
/* If th != 0 && th != -1, then we had an overflow */
|
107 |
if (unlikely((th + 1) > 1)) { |
108 |
helper_excp(EXCP_ARITH, EXC_M_IOV); |
109 |
} |
110 |
return tl;
|
111 |
} |
112 |
|
113 |
uint64_t helper_umulh (uint64_t op1, uint64_t op2) |
114 |
{ |
115 |
uint64_t tl, th; |
116 |
|
117 |
mulu64(&tl, &th, op1, op2); |
118 |
return th;
|
119 |
} |
120 |
|
121 |
uint64_t helper_ctpop (uint64_t arg) |
122 |
{ |
123 |
return ctpop64(arg);
|
124 |
} |
125 |
|
126 |
uint64_t helper_ctlz (uint64_t arg) |
127 |
{ |
128 |
return clz64(arg);
|
129 |
} |
130 |
|
131 |
uint64_t helper_cttz (uint64_t arg) |
132 |
{ |
133 |
return ctz64(arg);
|
134 |
} |
135 |
|
136 |
static inline uint64_t byte_zap(uint64_t op, uint8_t mskb) |
137 |
{ |
138 |
uint64_t mask; |
139 |
|
140 |
mask = 0;
|
141 |
mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL; |
142 |
mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL; |
143 |
mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL; |
144 |
mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL; |
145 |
mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL; |
146 |
mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL; |
147 |
mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL; |
148 |
mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL; |
149 |
|
150 |
return op & ~mask;
|
151 |
} |
152 |
|
153 |
uint64_t helper_zap(uint64_t val, uint64_t mask) |
154 |
{ |
155 |
return byte_zap(val, mask);
|
156 |
} |
157 |
|
158 |
uint64_t helper_zapnot(uint64_t val, uint64_t mask) |
159 |
{ |
160 |
return byte_zap(val, ~mask);
|
161 |
} |
162 |
|
163 |
uint64_t helper_cmpbge (uint64_t op1, uint64_t op2) |
164 |
{ |
165 |
uint8_t opa, opb, res; |
166 |
int i;
|
167 |
|
168 |
res = 0;
|
169 |
for (i = 0; i < 8; i++) { |
170 |
opa = op1 >> (i * 8);
|
171 |
opb = op2 >> (i * 8);
|
172 |
if (opa >= opb)
|
173 |
res |= 1 << i;
|
174 |
} |
175 |
return res;
|
176 |
} |
177 |
|
178 |
uint64_t helper_minub8 (uint64_t op1, uint64_t op2) |
179 |
{ |
180 |
uint64_t res = 0;
|
181 |
uint8_t opa, opb, opr; |
182 |
int i;
|
183 |
|
184 |
for (i = 0; i < 8; ++i) { |
185 |
opa = op1 >> (i * 8);
|
186 |
opb = op2 >> (i * 8);
|
187 |
opr = opa < opb ? opa : opb; |
188 |
res |= (uint64_t)opr << (i * 8);
|
189 |
} |
190 |
return res;
|
191 |
} |
192 |
|
193 |
uint64_t helper_minsb8 (uint64_t op1, uint64_t op2) |
194 |
{ |
195 |
uint64_t res = 0;
|
196 |
int8_t opa, opb; |
197 |
uint8_t opr; |
198 |
int i;
|
199 |
|
200 |
for (i = 0; i < 8; ++i) { |
201 |
opa = op1 >> (i * 8);
|
202 |
opb = op2 >> (i * 8);
|
203 |
opr = opa < opb ? opa : opb; |
204 |
res |= (uint64_t)opr << (i * 8);
|
205 |
} |
206 |
return res;
|
207 |
} |
208 |
|
209 |
uint64_t helper_minuw4 (uint64_t op1, uint64_t op2) |
210 |
{ |
211 |
uint64_t res = 0;
|
212 |
uint16_t opa, opb, opr; |
213 |
int i;
|
214 |
|
215 |
for (i = 0; i < 4; ++i) { |
216 |
opa = op1 >> (i * 16);
|
217 |
opb = op2 >> (i * 16);
|
218 |
opr = opa < opb ? opa : opb; |
219 |
res |= (uint64_t)opr << (i * 16);
|
220 |
} |
221 |
return res;
|
222 |
} |
223 |
|
224 |
uint64_t helper_minsw4 (uint64_t op1, uint64_t op2) |
225 |
{ |
226 |
uint64_t res = 0;
|
227 |
int16_t opa, opb; |
228 |
uint16_t opr; |
229 |
int i;
|
230 |
|
231 |
for (i = 0; i < 4; ++i) { |
232 |
opa = op1 >> (i * 16);
|
233 |
opb = op2 >> (i * 16);
|
234 |
opr = opa < opb ? opa : opb; |
235 |
res |= (uint64_t)opr << (i * 16);
|
236 |
} |
237 |
return res;
|
238 |
} |
239 |
|
240 |
uint64_t helper_maxub8 (uint64_t op1, uint64_t op2) |
241 |
{ |
242 |
uint64_t res = 0;
|
243 |
uint8_t opa, opb, opr; |
244 |
int i;
|
245 |
|
246 |
for (i = 0; i < 8; ++i) { |
247 |
opa = op1 >> (i * 8);
|
248 |
opb = op2 >> (i * 8);
|
249 |
opr = opa > opb ? opa : opb; |
250 |
res |= (uint64_t)opr << (i * 8);
|
251 |
} |
252 |
return res;
|
253 |
} |
254 |
|
255 |
uint64_t helper_maxsb8 (uint64_t op1, uint64_t op2) |
256 |
{ |
257 |
uint64_t res = 0;
|
258 |
int8_t opa, opb; |
259 |
uint8_t opr; |
260 |
int i;
|
261 |
|
262 |
for (i = 0; i < 8; ++i) { |
263 |
opa = op1 >> (i * 8);
|
264 |
opb = op2 >> (i * 8);
|
265 |
opr = opa > opb ? opa : opb; |
266 |
res |= (uint64_t)opr << (i * 8);
|
267 |
} |
268 |
return res;
|
269 |
} |
270 |
|
271 |
uint64_t helper_maxuw4 (uint64_t op1, uint64_t op2) |
272 |
{ |
273 |
uint64_t res = 0;
|
274 |
uint16_t opa, opb, opr; |
275 |
int i;
|
276 |
|
277 |
for (i = 0; i < 4; ++i) { |
278 |
opa = op1 >> (i * 16);
|
279 |
opb = op2 >> (i * 16);
|
280 |
opr = opa > opb ? opa : opb; |
281 |
res |= (uint64_t)opr << (i * 16);
|
282 |
} |
283 |
return res;
|
284 |
} |
285 |
|
286 |
uint64_t helper_maxsw4 (uint64_t op1, uint64_t op2) |
287 |
{ |
288 |
uint64_t res = 0;
|
289 |
int16_t opa, opb; |
290 |
uint16_t opr; |
291 |
int i;
|
292 |
|
293 |
for (i = 0; i < 4; ++i) { |
294 |
opa = op1 >> (i * 16);
|
295 |
opb = op2 >> (i * 16);
|
296 |
opr = opa > opb ? opa : opb; |
297 |
res |= (uint64_t)opr << (i * 16);
|
298 |
} |
299 |
return res;
|
300 |
} |
301 |
|
302 |
uint64_t helper_perr (uint64_t op1, uint64_t op2) |
303 |
{ |
304 |
uint64_t res = 0;
|
305 |
uint8_t opa, opb, opr; |
306 |
int i;
|
307 |
|
308 |
for (i = 0; i < 8; ++i) { |
309 |
opa = op1 >> (i * 8);
|
310 |
opb = op2 >> (i * 8);
|
311 |
if (opa >= opb)
|
312 |
opr = opa - opb; |
313 |
else
|
314 |
opr = opb - opa; |
315 |
res += opr; |
316 |
} |
317 |
return res;
|
318 |
} |
319 |
|
320 |
uint64_t helper_pklb (uint64_t op1) |
321 |
{ |
322 |
return (op1 & 0xff) | ((op1 >> 24) & 0xff00); |
323 |
} |
324 |
|
325 |
uint64_t helper_pkwb (uint64_t op1) |
326 |
{ |
327 |
return ((op1 & 0xff) |
328 |
| ((op1 >> 8) & 0xff00) |
329 |
| ((op1 >> 16) & 0xff0000) |
330 |
| ((op1 >> 24) & 0xff000000)); |
331 |
} |
332 |
|
333 |
uint64_t helper_unpkbl (uint64_t op1) |
334 |
{ |
335 |
return (op1 & 0xff) | ((op1 & 0xff00) << 24); |
336 |
} |
337 |
|
338 |
uint64_t helper_unpkbw (uint64_t op1) |
339 |
{ |
340 |
return ((op1 & 0xff) |
341 |
| ((op1 & 0xff00) << 8) |
342 |
| ((op1 & 0xff0000) << 16) |
343 |
| ((op1 & 0xff000000) << 24)); |
344 |
} |
345 |
|
346 |
/* Floating point helpers */
|
347 |
|
348 |
void helper_setroundmode (uint32_t val)
|
349 |
{ |
350 |
set_float_rounding_mode(val, &FP_STATUS); |
351 |
} |
352 |
|
353 |
void helper_setflushzero (uint32_t val)
|
354 |
{ |
355 |
set_flush_to_zero(val, &FP_STATUS); |
356 |
} |
357 |
|
358 |
void helper_fp_exc_clear (void) |
359 |
{ |
360 |
set_float_exception_flags(0, &FP_STATUS);
|
361 |
} |
362 |
|
363 |
uint32_t helper_fp_exc_get (void)
|
364 |
{ |
365 |
return get_float_exception_flags(&FP_STATUS);
|
366 |
} |
367 |
|
368 |
/* Raise exceptions for ieee fp insns without software completion.
|
369 |
In that case there are no exceptions that don't trap; the mask
|
370 |
doesn't apply. */
|
371 |
void helper_fp_exc_raise(uint32_t exc, uint32_t regno)
|
372 |
{ |
373 |
if (exc) {
|
374 |
uint32_t hw_exc = 0;
|
375 |
|
376 |
env->ipr[IPR_EXC_MASK] |= 1ull << regno;
|
377 |
|
378 |
if (exc & float_flag_invalid) {
|
379 |
hw_exc |= EXC_M_INV; |
380 |
} |
381 |
if (exc & float_flag_divbyzero) {
|
382 |
hw_exc |= EXC_M_DZE; |
383 |
} |
384 |
if (exc & float_flag_overflow) {
|
385 |
hw_exc |= EXC_M_FOV; |
386 |
} |
387 |
if (exc & float_flag_underflow) {
|
388 |
hw_exc |= EXC_M_UNF; |
389 |
} |
390 |
if (exc & float_flag_inexact) {
|
391 |
hw_exc |= EXC_M_INE; |
392 |
} |
393 |
helper_excp(EXCP_ARITH, hw_exc); |
394 |
} |
395 |
} |
396 |
|
397 |
/* Raise exceptions for ieee fp insns with software completion. */
|
398 |
void helper_fp_exc_raise_s(uint32_t exc, uint32_t regno)
|
399 |
{ |
400 |
if (exc) {
|
401 |
env->fpcr_exc_status |= exc; |
402 |
|
403 |
exc &= ~env->fpcr_exc_mask; |
404 |
if (exc) {
|
405 |
helper_fp_exc_raise(exc, regno); |
406 |
} |
407 |
} |
408 |
} |
409 |
|
410 |
/* Input remapping without software completion. Handle denormal-map-to-zero
|
411 |
and trap for all other non-finite numbers. */
|
412 |
uint64_t helper_ieee_input(uint64_t val) |
413 |
{ |
414 |
uint32_t exp = (uint32_t)(val >> 52) & 0x7ff; |
415 |
uint64_t frac = val & 0xfffffffffffffull;
|
416 |
|
417 |
if (exp == 0) { |
418 |
if (frac != 0) { |
419 |
/* If DNZ is set flush denormals to zero on input. */
|
420 |
if (env->fpcr_dnz) {
|
421 |
val &= 1ull << 63; |
422 |
} else {
|
423 |
helper_excp(EXCP_ARITH, EXC_M_UNF); |
424 |
} |
425 |
} |
426 |
} else if (exp == 0x7ff) { |
427 |
/* Infinity or NaN. */
|
428 |
/* ??? I'm not sure these exception bit flags are correct. I do
|
429 |
know that the Linux kernel, at least, doesn't rely on them and
|
430 |
just emulates the insn to figure out what exception to use. */
|
431 |
helper_excp(EXCP_ARITH, frac ? EXC_M_INV : EXC_M_FOV); |
432 |
} |
433 |
return val;
|
434 |
} |
435 |
|
436 |
/* Similar, but does not trap for infinities. Used for comparisons. */
|
437 |
uint64_t helper_ieee_input_cmp(uint64_t val) |
438 |
{ |
439 |
uint32_t exp = (uint32_t)(val >> 52) & 0x7ff; |
440 |
uint64_t frac = val & 0xfffffffffffffull;
|
441 |
|
442 |
if (exp == 0) { |
443 |
if (frac != 0) { |
444 |
/* If DNZ is set flush denormals to zero on input. */
|
445 |
if (env->fpcr_dnz) {
|
446 |
val &= 1ull << 63; |
447 |
} else {
|
448 |
helper_excp(EXCP_ARITH, EXC_M_UNF); |
449 |
} |
450 |
} |
451 |
} else if (exp == 0x7ff && frac) { |
452 |
/* NaN. */
|
453 |
helper_excp(EXCP_ARITH, EXC_M_INV); |
454 |
} |
455 |
return val;
|
456 |
} |
457 |
|
458 |
/* Input remapping with software completion enabled. All we have to do
|
459 |
is handle denormal-map-to-zero; all other inputs get exceptions as
|
460 |
needed from the actual operation. */
|
461 |
uint64_t helper_ieee_input_s(uint64_t val) |
462 |
{ |
463 |
if (env->fpcr_dnz) {
|
464 |
uint32_t exp = (uint32_t)(val >> 52) & 0x7ff; |
465 |
if (exp == 0) { |
466 |
val &= 1ull << 63; |
467 |
} |
468 |
} |
469 |
return val;
|
470 |
} |
471 |
|
472 |
/* F floating (VAX) */
|
473 |
static inline uint64_t float32_to_f(float32 fa) |
474 |
{ |
475 |
uint64_t r, exp, mant, sig; |
476 |
CPU_FloatU a; |
477 |
|
478 |
a.f = fa; |
479 |
sig = ((uint64_t)a.l & 0x80000000) << 32; |
480 |
exp = (a.l >> 23) & 0xff; |
481 |
mant = ((uint64_t)a.l & 0x007fffff) << 29; |
482 |
|
483 |
if (exp == 255) { |
484 |
/* NaN or infinity */
|
485 |
r = 1; /* VAX dirty zero */ |
486 |
} else if (exp == 0) { |
487 |
if (mant == 0) { |
488 |
/* Zero */
|
489 |
r = 0;
|
490 |
} else {
|
491 |
/* Denormalized */
|
492 |
r = sig | ((exp + 1) << 52) | mant; |
493 |
} |
494 |
} else {
|
495 |
if (exp >= 253) { |
496 |
/* Overflow */
|
497 |
r = 1; /* VAX dirty zero */ |
498 |
} else {
|
499 |
r = sig | ((exp + 2) << 52); |
500 |
} |
501 |
} |
502 |
|
503 |
return r;
|
504 |
} |
505 |
|
506 |
static inline float32 f_to_float32(uint64_t a) |
507 |
{ |
508 |
uint32_t exp, mant_sig; |
509 |
CPU_FloatU r; |
510 |
|
511 |
exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f); |
512 |
mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff); |
513 |
|
514 |
if (unlikely(!exp && mant_sig)) {
|
515 |
/* Reserved operands / Dirty zero */
|
516 |
helper_excp(EXCP_OPCDEC, 0);
|
517 |
} |
518 |
|
519 |
if (exp < 3) { |
520 |
/* Underflow */
|
521 |
r.l = 0;
|
522 |
} else {
|
523 |
r.l = ((exp - 2) << 23) | mant_sig; |
524 |
} |
525 |
|
526 |
return r.f;
|
527 |
} |
528 |
|
529 |
uint32_t helper_f_to_memory (uint64_t a) |
530 |
{ |
531 |
uint32_t r; |
532 |
r = (a & 0x00001fffe0000000ull) >> 13; |
533 |
r |= (a & 0x07ffe00000000000ull) >> 45; |
534 |
r |= (a & 0xc000000000000000ull) >> 48; |
535 |
return r;
|
536 |
} |
537 |
|
538 |
uint64_t helper_memory_to_f (uint32_t a) |
539 |
{ |
540 |
uint64_t r; |
541 |
r = ((uint64_t)(a & 0x0000c000)) << 48; |
542 |
r |= ((uint64_t)(a & 0x003fffff)) << 45; |
543 |
r |= ((uint64_t)(a & 0xffff0000)) << 13; |
544 |
if (!(a & 0x00004000)) |
545 |
r |= 0x7ll << 59; |
546 |
return r;
|
547 |
} |
548 |
|
549 |
/* ??? Emulating VAX arithmetic with IEEE arithmetic is wrong. We should
|
550 |
either implement VAX arithmetic properly or just signal invalid opcode. */
|
551 |
|
552 |
uint64_t helper_addf (uint64_t a, uint64_t b) |
553 |
{ |
554 |
float32 fa, fb, fr; |
555 |
|
556 |
fa = f_to_float32(a); |
557 |
fb = f_to_float32(b); |
558 |
fr = float32_add(fa, fb, &FP_STATUS); |
559 |
return float32_to_f(fr);
|
560 |
} |
561 |
|
562 |
uint64_t helper_subf (uint64_t a, uint64_t b) |
563 |
{ |
564 |
float32 fa, fb, fr; |
565 |
|
566 |
fa = f_to_float32(a); |
567 |
fb = f_to_float32(b); |
568 |
fr = float32_sub(fa, fb, &FP_STATUS); |
569 |
return float32_to_f(fr);
|
570 |
} |
571 |
|
572 |
uint64_t helper_mulf (uint64_t a, uint64_t b) |
573 |
{ |
574 |
float32 fa, fb, fr; |
575 |
|
576 |
fa = f_to_float32(a); |
577 |
fb = f_to_float32(b); |
578 |
fr = float32_mul(fa, fb, &FP_STATUS); |
579 |
return float32_to_f(fr);
|
580 |
} |
581 |
|
582 |
uint64_t helper_divf (uint64_t a, uint64_t b) |
583 |
{ |
584 |
float32 fa, fb, fr; |
585 |
|
586 |
fa = f_to_float32(a); |
587 |
fb = f_to_float32(b); |
588 |
fr = float32_div(fa, fb, &FP_STATUS); |
589 |
return float32_to_f(fr);
|
590 |
} |
591 |
|
592 |
uint64_t helper_sqrtf (uint64_t t) |
593 |
{ |
594 |
float32 ft, fr; |
595 |
|
596 |
ft = f_to_float32(t); |
597 |
fr = float32_sqrt(ft, &FP_STATUS); |
598 |
return float32_to_f(fr);
|
599 |
} |
600 |
|
601 |
|
602 |
/* G floating (VAX) */
|
603 |
static inline uint64_t float64_to_g(float64 fa) |
604 |
{ |
605 |
uint64_t r, exp, mant, sig; |
606 |
CPU_DoubleU a; |
607 |
|
608 |
a.d = fa; |
609 |
sig = a.ll & 0x8000000000000000ull;
|
610 |
exp = (a.ll >> 52) & 0x7ff; |
611 |
mant = a.ll & 0x000fffffffffffffull;
|
612 |
|
613 |
if (exp == 2047) { |
614 |
/* NaN or infinity */
|
615 |
r = 1; /* VAX dirty zero */ |
616 |
} else if (exp == 0) { |
617 |
if (mant == 0) { |
618 |
/* Zero */
|
619 |
r = 0;
|
620 |
} else {
|
621 |
/* Denormalized */
|
622 |
r = sig | ((exp + 1) << 52) | mant; |
623 |
} |
624 |
} else {
|
625 |
if (exp >= 2045) { |
626 |
/* Overflow */
|
627 |
r = 1; /* VAX dirty zero */ |
628 |
} else {
|
629 |
r = sig | ((exp + 2) << 52); |
630 |
} |
631 |
} |
632 |
|
633 |
return r;
|
634 |
} |
635 |
|
636 |
static inline float64 g_to_float64(uint64_t a) |
637 |
{ |
638 |
uint64_t exp, mant_sig; |
639 |
CPU_DoubleU r; |
640 |
|
641 |
exp = (a >> 52) & 0x7ff; |
642 |
mant_sig = a & 0x800fffffffffffffull;
|
643 |
|
644 |
if (!exp && mant_sig) {
|
645 |
/* Reserved operands / Dirty zero */
|
646 |
helper_excp(EXCP_OPCDEC, 0);
|
647 |
} |
648 |
|
649 |
if (exp < 3) { |
650 |
/* Underflow */
|
651 |
r.ll = 0;
|
652 |
} else {
|
653 |
r.ll = ((exp - 2) << 52) | mant_sig; |
654 |
} |
655 |
|
656 |
return r.d;
|
657 |
} |
658 |
|
659 |
uint64_t helper_g_to_memory (uint64_t a) |
660 |
{ |
661 |
uint64_t r; |
662 |
r = (a & 0x000000000000ffffull) << 48; |
663 |
r |= (a & 0x00000000ffff0000ull) << 16; |
664 |
r |= (a & 0x0000ffff00000000ull) >> 16; |
665 |
r |= (a & 0xffff000000000000ull) >> 48; |
666 |
return r;
|
667 |
} |
668 |
|
669 |
uint64_t helper_memory_to_g (uint64_t a) |
670 |
{ |
671 |
uint64_t r; |
672 |
r = (a & 0x000000000000ffffull) << 48; |
673 |
r |= (a & 0x00000000ffff0000ull) << 16; |
674 |
r |= (a & 0x0000ffff00000000ull) >> 16; |
675 |
r |= (a & 0xffff000000000000ull) >> 48; |
676 |
return r;
|
677 |
} |
678 |
|
679 |
uint64_t helper_addg (uint64_t a, uint64_t b) |
680 |
{ |
681 |
float64 fa, fb, fr; |
682 |
|
683 |
fa = g_to_float64(a); |
684 |
fb = g_to_float64(b); |
685 |
fr = float64_add(fa, fb, &FP_STATUS); |
686 |
return float64_to_g(fr);
|
687 |
} |
688 |
|
689 |
uint64_t helper_subg (uint64_t a, uint64_t b) |
690 |
{ |
691 |
float64 fa, fb, fr; |
692 |
|
693 |
fa = g_to_float64(a); |
694 |
fb = g_to_float64(b); |
695 |
fr = float64_sub(fa, fb, &FP_STATUS); |
696 |
return float64_to_g(fr);
|
697 |
} |
698 |
|
699 |
uint64_t helper_mulg (uint64_t a, uint64_t b) |
700 |
{ |
701 |
float64 fa, fb, fr; |
702 |
|
703 |
fa = g_to_float64(a); |
704 |
fb = g_to_float64(b); |
705 |
fr = float64_mul(fa, fb, &FP_STATUS); |
706 |
return float64_to_g(fr);
|
707 |
} |
708 |
|
709 |
uint64_t helper_divg (uint64_t a, uint64_t b) |
710 |
{ |
711 |
float64 fa, fb, fr; |
712 |
|
713 |
fa = g_to_float64(a); |
714 |
fb = g_to_float64(b); |
715 |
fr = float64_div(fa, fb, &FP_STATUS); |
716 |
return float64_to_g(fr);
|
717 |
} |
718 |
|
719 |
uint64_t helper_sqrtg (uint64_t a) |
720 |
{ |
721 |
float64 fa, fr; |
722 |
|
723 |
fa = g_to_float64(a); |
724 |
fr = float64_sqrt(fa, &FP_STATUS); |
725 |
return float64_to_g(fr);
|
726 |
} |
727 |
|
728 |
|
729 |
/* S floating (single) */
|
730 |
|
731 |
/* Taken from linux/arch/alpha/kernel/traps.c, s_mem_to_reg. */
|
732 |
static inline uint64_t float32_to_s_int(uint32_t fi) |
733 |
{ |
734 |
uint32_t frac = fi & 0x7fffff;
|
735 |
uint32_t sign = fi >> 31;
|
736 |
uint32_t exp_msb = (fi >> 30) & 1; |
737 |
uint32_t exp_low = (fi >> 23) & 0x7f; |
738 |
uint32_t exp; |
739 |
|
740 |
exp = (exp_msb << 10) | exp_low;
|
741 |
if (exp_msb) {
|
742 |
if (exp_low == 0x7f) |
743 |
exp = 0x7ff;
|
744 |
} else {
|
745 |
if (exp_low != 0x00) |
746 |
exp |= 0x380;
|
747 |
} |
748 |
|
749 |
return (((uint64_t)sign << 63) |
750 |
| ((uint64_t)exp << 52)
|
751 |
| ((uint64_t)frac << 29));
|
752 |
} |
753 |
|
754 |
static inline uint64_t float32_to_s(float32 fa) |
755 |
{ |
756 |
CPU_FloatU a; |
757 |
a.f = fa; |
758 |
return float32_to_s_int(a.l);
|
759 |
} |
760 |
|
761 |
static inline uint32_t s_to_float32_int(uint64_t a) |
762 |
{ |
763 |
return ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff); |
764 |
} |
765 |
|
766 |
static inline float32 s_to_float32(uint64_t a) |
767 |
{ |
768 |
CPU_FloatU r; |
769 |
r.l = s_to_float32_int(a); |
770 |
return r.f;
|
771 |
} |
772 |
|
773 |
uint32_t helper_s_to_memory (uint64_t a) |
774 |
{ |
775 |
return s_to_float32_int(a);
|
776 |
} |
777 |
|
778 |
uint64_t helper_memory_to_s (uint32_t a) |
779 |
{ |
780 |
return float32_to_s_int(a);
|
781 |
} |
782 |
|
783 |
uint64_t helper_adds (uint64_t a, uint64_t b) |
784 |
{ |
785 |
float32 fa, fb, fr; |
786 |
|
787 |
fa = s_to_float32(a); |
788 |
fb = s_to_float32(b); |
789 |
fr = float32_add(fa, fb, &FP_STATUS); |
790 |
return float32_to_s(fr);
|
791 |
} |
792 |
|
793 |
uint64_t helper_subs (uint64_t a, uint64_t b) |
794 |
{ |
795 |
float32 fa, fb, fr; |
796 |
|
797 |
fa = s_to_float32(a); |
798 |
fb = s_to_float32(b); |
799 |
fr = float32_sub(fa, fb, &FP_STATUS); |
800 |
return float32_to_s(fr);
|
801 |
} |
802 |
|
803 |
uint64_t helper_muls (uint64_t a, uint64_t b) |
804 |
{ |
805 |
float32 fa, fb, fr; |
806 |
|
807 |
fa = s_to_float32(a); |
808 |
fb = s_to_float32(b); |
809 |
fr = float32_mul(fa, fb, &FP_STATUS); |
810 |
return float32_to_s(fr);
|
811 |
} |
812 |
|
813 |
uint64_t helper_divs (uint64_t a, uint64_t b) |
814 |
{ |
815 |
float32 fa, fb, fr; |
816 |
|
817 |
fa = s_to_float32(a); |
818 |
fb = s_to_float32(b); |
819 |
fr = float32_div(fa, fb, &FP_STATUS); |
820 |
return float32_to_s(fr);
|
821 |
} |
822 |
|
823 |
uint64_t helper_sqrts (uint64_t a) |
824 |
{ |
825 |
float32 fa, fr; |
826 |
|
827 |
fa = s_to_float32(a); |
828 |
fr = float32_sqrt(fa, &FP_STATUS); |
829 |
return float32_to_s(fr);
|
830 |
} |
831 |
|
832 |
|
833 |
/* T floating (double) */
|
834 |
static inline float64 t_to_float64(uint64_t a) |
835 |
{ |
836 |
/* Memory format is the same as float64 */
|
837 |
CPU_DoubleU r; |
838 |
r.ll = a; |
839 |
return r.d;
|
840 |
} |
841 |
|
842 |
static inline uint64_t float64_to_t(float64 fa) |
843 |
{ |
844 |
/* Memory format is the same as float64 */
|
845 |
CPU_DoubleU r; |
846 |
r.d = fa; |
847 |
return r.ll;
|
848 |
} |
849 |
|
850 |
uint64_t helper_addt (uint64_t a, uint64_t b) |
851 |
{ |
852 |
float64 fa, fb, fr; |
853 |
|
854 |
fa = t_to_float64(a); |
855 |
fb = t_to_float64(b); |
856 |
fr = float64_add(fa, fb, &FP_STATUS); |
857 |
return float64_to_t(fr);
|
858 |
} |
859 |
|
860 |
uint64_t helper_subt (uint64_t a, uint64_t b) |
861 |
{ |
862 |
float64 fa, fb, fr; |
863 |
|
864 |
fa = t_to_float64(a); |
865 |
fb = t_to_float64(b); |
866 |
fr = float64_sub(fa, fb, &FP_STATUS); |
867 |
return float64_to_t(fr);
|
868 |
} |
869 |
|
870 |
uint64_t helper_mult (uint64_t a, uint64_t b) |
871 |
{ |
872 |
float64 fa, fb, fr; |
873 |
|
874 |
fa = t_to_float64(a); |
875 |
fb = t_to_float64(b); |
876 |
fr = float64_mul(fa, fb, &FP_STATUS); |
877 |
return float64_to_t(fr);
|
878 |
} |
879 |
|
880 |
uint64_t helper_divt (uint64_t a, uint64_t b) |
881 |
{ |
882 |
float64 fa, fb, fr; |
883 |
|
884 |
fa = t_to_float64(a); |
885 |
fb = t_to_float64(b); |
886 |
fr = float64_div(fa, fb, &FP_STATUS); |
887 |
return float64_to_t(fr);
|
888 |
} |
889 |
|
890 |
uint64_t helper_sqrtt (uint64_t a) |
891 |
{ |
892 |
float64 fa, fr; |
893 |
|
894 |
fa = t_to_float64(a); |
895 |
fr = float64_sqrt(fa, &FP_STATUS); |
896 |
return float64_to_t(fr);
|
897 |
} |
898 |
|
899 |
/* Comparisons */
|
900 |
uint64_t helper_cmptun (uint64_t a, uint64_t b) |
901 |
{ |
902 |
float64 fa, fb; |
903 |
|
904 |
fa = t_to_float64(a); |
905 |
fb = t_to_float64(b); |
906 |
|
907 |
if (float64_is_nan(fa) || float64_is_nan(fb))
|
908 |
return 0x4000000000000000ULL; |
909 |
else
|
910 |
return 0; |
911 |
} |
912 |
|
913 |
uint64_t helper_cmpteq(uint64_t a, uint64_t b) |
914 |
{ |
915 |
float64 fa, fb; |
916 |
|
917 |
fa = t_to_float64(a); |
918 |
fb = t_to_float64(b); |
919 |
|
920 |
if (float64_eq(fa, fb, &FP_STATUS))
|
921 |
return 0x4000000000000000ULL; |
922 |
else
|
923 |
return 0; |
924 |
} |
925 |
|
926 |
uint64_t helper_cmptle(uint64_t a, uint64_t b) |
927 |
{ |
928 |
float64 fa, fb; |
929 |
|
930 |
fa = t_to_float64(a); |
931 |
fb = t_to_float64(b); |
932 |
|
933 |
if (float64_le(fa, fb, &FP_STATUS))
|
934 |
return 0x4000000000000000ULL; |
935 |
else
|
936 |
return 0; |
937 |
} |
938 |
|
939 |
uint64_t helper_cmptlt(uint64_t a, uint64_t b) |
940 |
{ |
941 |
float64 fa, fb; |
942 |
|
943 |
fa = t_to_float64(a); |
944 |
fb = t_to_float64(b); |
945 |
|
946 |
if (float64_lt(fa, fb, &FP_STATUS))
|
947 |
return 0x4000000000000000ULL; |
948 |
else
|
949 |
return 0; |
950 |
} |
951 |
|
952 |
uint64_t helper_cmpgeq(uint64_t a, uint64_t b) |
953 |
{ |
954 |
float64 fa, fb; |
955 |
|
956 |
fa = g_to_float64(a); |
957 |
fb = g_to_float64(b); |
958 |
|
959 |
if (float64_eq(fa, fb, &FP_STATUS))
|
960 |
return 0x4000000000000000ULL; |
961 |
else
|
962 |
return 0; |
963 |
} |
964 |
|
965 |
uint64_t helper_cmpgle(uint64_t a, uint64_t b) |
966 |
{ |
967 |
float64 fa, fb; |
968 |
|
969 |
fa = g_to_float64(a); |
970 |
fb = g_to_float64(b); |
971 |
|
972 |
if (float64_le(fa, fb, &FP_STATUS))
|
973 |
return 0x4000000000000000ULL; |
974 |
else
|
975 |
return 0; |
976 |
} |
977 |
|
978 |
uint64_t helper_cmpglt(uint64_t a, uint64_t b) |
979 |
{ |
980 |
float64 fa, fb; |
981 |
|
982 |
fa = g_to_float64(a); |
983 |
fb = g_to_float64(b); |
984 |
|
985 |
if (float64_lt(fa, fb, &FP_STATUS))
|
986 |
return 0x4000000000000000ULL; |
987 |
else
|
988 |
return 0; |
989 |
} |
990 |
|
991 |
/* Floating point format conversion */
|
992 |
uint64_t helper_cvtts (uint64_t a) |
993 |
{ |
994 |
float64 fa; |
995 |
float32 fr; |
996 |
|
997 |
fa = t_to_float64(a); |
998 |
fr = float64_to_float32(fa, &FP_STATUS); |
999 |
return float32_to_s(fr);
|
1000 |
} |
1001 |
|
1002 |
uint64_t helper_cvtst (uint64_t a) |
1003 |
{ |
1004 |
float32 fa; |
1005 |
float64 fr; |
1006 |
|
1007 |
fa = s_to_float32(a); |
1008 |
fr = float32_to_float64(fa, &FP_STATUS); |
1009 |
return float64_to_t(fr);
|
1010 |
} |
1011 |
|
1012 |
uint64_t helper_cvtqs (uint64_t a) |
1013 |
{ |
1014 |
float32 fr = int64_to_float32(a, &FP_STATUS); |
1015 |
return float32_to_s(fr);
|
1016 |
} |
1017 |
|
1018 |
/* Implement float64 to uint64 conversion without saturation -- we must
|
1019 |
supply the truncated result. This behaviour is used by the compiler
|
1020 |
to get unsigned conversion for free with the same instruction.
|
1021 |
|
1022 |
The VI flag is set when overflow or inexact exceptions should be raised. */
|
1023 |
|
1024 |
static inline uint64_t helper_cvttq_internal(uint64_t a, int roundmode, int VI) |
1025 |
{ |
1026 |
uint64_t frac, ret = 0;
|
1027 |
uint32_t exp, sign, exc = 0;
|
1028 |
int shift;
|
1029 |
|
1030 |
sign = (a >> 63);
|
1031 |
exp = (uint32_t)(a >> 52) & 0x7ff; |
1032 |
frac = a & 0xfffffffffffffull;
|
1033 |
|
1034 |
if (exp == 0) { |
1035 |
if (unlikely(frac != 0)) { |
1036 |
goto do_underflow;
|
1037 |
} |
1038 |
} else if (exp == 0x7ff) { |
1039 |
exc = (frac ? float_flag_invalid : VI ? float_flag_overflow : 0);
|
1040 |
} else {
|
1041 |
/* Restore implicit bit. */
|
1042 |
frac |= 0x10000000000000ull;
|
1043 |
|
1044 |
shift = exp - 1023 - 52; |
1045 |
if (shift >= 0) { |
1046 |
/* In this case the number is so large that we must shift
|
1047 |
the fraction left. There is no rounding to do. */
|
1048 |
if (shift < 63) { |
1049 |
ret = frac << shift; |
1050 |
if (VI && (ret >> shift) != frac) {
|
1051 |
exc = float_flag_overflow; |
1052 |
} |
1053 |
} |
1054 |
} else {
|
1055 |
uint64_t round; |
1056 |
|
1057 |
/* In this case the number is smaller than the fraction as
|
1058 |
represented by the 52 bit number. Here we must think
|
1059 |
about rounding the result. Handle this by shifting the
|
1060 |
fractional part of the number into the high bits of ROUND.
|
1061 |
This will let us efficiently handle round-to-nearest. */
|
1062 |
shift = -shift; |
1063 |
if (shift < 63) { |
1064 |
ret = frac >> shift; |
1065 |
round = frac << (64 - shift);
|
1066 |
} else {
|
1067 |
/* The exponent is so small we shift out everything.
|
1068 |
Leave a sticky bit for proper rounding below. */
|
1069 |
do_underflow:
|
1070 |
round = 1;
|
1071 |
} |
1072 |
|
1073 |
if (round) {
|
1074 |
exc = (VI ? float_flag_inexact : 0);
|
1075 |
switch (roundmode) {
|
1076 |
case float_round_nearest_even:
|
1077 |
if (round == (1ull << 63)) { |
1078 |
/* Fraction is exactly 0.5; round to even. */
|
1079 |
ret += (ret & 1);
|
1080 |
} else if (round > (1ull << 63)) { |
1081 |
ret += 1;
|
1082 |
} |
1083 |
break;
|
1084 |
case float_round_to_zero:
|
1085 |
break;
|
1086 |
case float_round_up:
|
1087 |
ret += 1 - sign;
|
1088 |
break;
|
1089 |
case float_round_down:
|
1090 |
ret += sign; |
1091 |
break;
|
1092 |
} |
1093 |
} |
1094 |
} |
1095 |
if (sign) {
|
1096 |
ret = -ret; |
1097 |
} |
1098 |
} |
1099 |
if (unlikely(exc)) {
|
1100 |
float_raise(exc, &FP_STATUS); |
1101 |
} |
1102 |
|
1103 |
return ret;
|
1104 |
} |
1105 |
|
1106 |
uint64_t helper_cvttq(uint64_t a) |
1107 |
{ |
1108 |
return helper_cvttq_internal(a, FP_STATUS.float_rounding_mode, 1); |
1109 |
} |
1110 |
|
1111 |
uint64_t helper_cvttq_c(uint64_t a) |
1112 |
{ |
1113 |
return helper_cvttq_internal(a, float_round_to_zero, 0); |
1114 |
} |
1115 |
|
1116 |
uint64_t helper_cvttq_svic(uint64_t a) |
1117 |
{ |
1118 |
return helper_cvttq_internal(a, float_round_to_zero, 1); |
1119 |
} |
1120 |
|
1121 |
uint64_t helper_cvtqt (uint64_t a) |
1122 |
{ |
1123 |
float64 fr = int64_to_float64(a, &FP_STATUS); |
1124 |
return float64_to_t(fr);
|
1125 |
} |
1126 |
|
1127 |
uint64_t helper_cvtqf (uint64_t a) |
1128 |
{ |
1129 |
float32 fr = int64_to_float32(a, &FP_STATUS); |
1130 |
return float32_to_f(fr);
|
1131 |
} |
1132 |
|
1133 |
uint64_t helper_cvtgf (uint64_t a) |
1134 |
{ |
1135 |
float64 fa; |
1136 |
float32 fr; |
1137 |
|
1138 |
fa = g_to_float64(a); |
1139 |
fr = float64_to_float32(fa, &FP_STATUS); |
1140 |
return float32_to_f(fr);
|
1141 |
} |
1142 |
|
1143 |
uint64_t helper_cvtgq (uint64_t a) |
1144 |
{ |
1145 |
float64 fa = g_to_float64(a); |
1146 |
return float64_to_int64_round_to_zero(fa, &FP_STATUS);
|
1147 |
} |
1148 |
|
1149 |
uint64_t helper_cvtqg (uint64_t a) |
1150 |
{ |
1151 |
float64 fr; |
1152 |
fr = int64_to_float64(a, &FP_STATUS); |
1153 |
return float64_to_g(fr);
|
1154 |
} |
1155 |
|
1156 |
/* PALcode support special instructions */
|
1157 |
#if !defined (CONFIG_USER_ONLY)
|
1158 |
void helper_hw_rei (void) |
1159 |
{ |
1160 |
env->pc = env->ipr[IPR_EXC_ADDR] & ~3;
|
1161 |
env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1;
|
1162 |
env->intr_flag = 0;
|
1163 |
env->lock_addr = -1;
|
1164 |
/* XXX: re-enable interrupts and memory mapping */
|
1165 |
} |
1166 |
|
1167 |
void helper_hw_ret (uint64_t a)
|
1168 |
{ |
1169 |
env->pc = a & ~3;
|
1170 |
env->ipr[IPR_EXC_ADDR] = a & 1;
|
1171 |
env->intr_flag = 0;
|
1172 |
env->lock_addr = -1;
|
1173 |
/* XXX: re-enable interrupts and memory mapping */
|
1174 |
} |
1175 |
|
1176 |
uint64_t helper_mfpr (int iprn, uint64_t val)
|
1177 |
{ |
1178 |
uint64_t tmp; |
1179 |
|
1180 |
if (cpu_alpha_mfpr(env, iprn, &tmp) == 0) |
1181 |
val = tmp; |
1182 |
|
1183 |
return val;
|
1184 |
} |
1185 |
|
1186 |
void helper_mtpr (int iprn, uint64_t val) |
1187 |
{ |
1188 |
cpu_alpha_mtpr(env, iprn, val, NULL);
|
1189 |
} |
1190 |
|
1191 |
void helper_set_alt_mode (void) |
1192 |
{ |
1193 |
env->saved_mode = env->ps & 0xC;
|
1194 |
env->ps = (env->ps & ~0xC) | (env->ipr[IPR_ALT_MODE] & 0xC); |
1195 |
} |
1196 |
|
1197 |
void helper_restore_mode (void) |
1198 |
{ |
1199 |
env->ps = (env->ps & ~0xC) | env->saved_mode;
|
1200 |
} |
1201 |
|
1202 |
#endif
|
1203 |
|
1204 |
/*****************************************************************************/
|
1205 |
/* Softmmu support */
|
1206 |
#if !defined (CONFIG_USER_ONLY)
|
1207 |
|
1208 |
/* XXX: the two following helpers are pure hacks.
|
1209 |
* Hopefully, we emulate the PALcode, then we should never see
|
1210 |
* HW_LD / HW_ST instructions.
|
1211 |
*/
|
1212 |
uint64_t helper_ld_virt_to_phys (uint64_t virtaddr) |
1213 |
{ |
1214 |
uint64_t tlb_addr, physaddr; |
1215 |
int index, mmu_idx;
|
1216 |
void *retaddr;
|
1217 |
|
1218 |
mmu_idx = cpu_mmu_index(env); |
1219 |
index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
1220 |
redo:
|
1221 |
tlb_addr = env->tlb_table[mmu_idx][index].addr_read; |
1222 |
if ((virtaddr & TARGET_PAGE_MASK) ==
|
1223 |
(tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { |
1224 |
physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend; |
1225 |
} else {
|
1226 |
/* the page is not in the TLB : fill it */
|
1227 |
retaddr = GETPC(); |
1228 |
tlb_fill(virtaddr, 0, mmu_idx, retaddr);
|
1229 |
goto redo;
|
1230 |
} |
1231 |
return physaddr;
|
1232 |
} |
1233 |
|
1234 |
uint64_t helper_st_virt_to_phys (uint64_t virtaddr) |
1235 |
{ |
1236 |
uint64_t tlb_addr, physaddr; |
1237 |
int index, mmu_idx;
|
1238 |
void *retaddr;
|
1239 |
|
1240 |
mmu_idx = cpu_mmu_index(env); |
1241 |
index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
1242 |
redo:
|
1243 |
tlb_addr = env->tlb_table[mmu_idx][index].addr_write; |
1244 |
if ((virtaddr & TARGET_PAGE_MASK) ==
|
1245 |
(tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { |
1246 |
physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend; |
1247 |
} else {
|
1248 |
/* the page is not in the TLB : fill it */
|
1249 |
retaddr = GETPC(); |
1250 |
tlb_fill(virtaddr, 1, mmu_idx, retaddr);
|
1251 |
goto redo;
|
1252 |
} |
1253 |
return physaddr;
|
1254 |
} |
1255 |
|
1256 |
void helper_ldl_raw(uint64_t t0, uint64_t t1)
|
1257 |
{ |
1258 |
ldl_raw(t1, t0); |
1259 |
} |
1260 |
|
1261 |
void helper_ldq_raw(uint64_t t0, uint64_t t1)
|
1262 |
{ |
1263 |
ldq_raw(t1, t0); |
1264 |
} |
1265 |
|
1266 |
void helper_ldl_l_raw(uint64_t t0, uint64_t t1)
|
1267 |
{ |
1268 |
env->lock = t1; |
1269 |
ldl_raw(t1, t0); |
1270 |
} |
1271 |
|
1272 |
void helper_ldq_l_raw(uint64_t t0, uint64_t t1)
|
1273 |
{ |
1274 |
env->lock = t1; |
1275 |
ldl_raw(t1, t0); |
1276 |
} |
1277 |
|
1278 |
void helper_ldl_kernel(uint64_t t0, uint64_t t1)
|
1279 |
{ |
1280 |
ldl_kernel(t1, t0); |
1281 |
} |
1282 |
|
1283 |
void helper_ldq_kernel(uint64_t t0, uint64_t t1)
|
1284 |
{ |
1285 |
ldq_kernel(t1, t0); |
1286 |
} |
1287 |
|
1288 |
void helper_ldl_data(uint64_t t0, uint64_t t1)
|
1289 |
{ |
1290 |
ldl_data(t1, t0); |
1291 |
} |
1292 |
|
1293 |
void helper_ldq_data(uint64_t t0, uint64_t t1)
|
1294 |
{ |
1295 |
ldq_data(t1, t0); |
1296 |
} |
1297 |
|
1298 |
void helper_stl_raw(uint64_t t0, uint64_t t1)
|
1299 |
{ |
1300 |
stl_raw(t1, t0); |
1301 |
} |
1302 |
|
1303 |
void helper_stq_raw(uint64_t t0, uint64_t t1)
|
1304 |
{ |
1305 |
stq_raw(t1, t0); |
1306 |
} |
1307 |
|
1308 |
uint64_t helper_stl_c_raw(uint64_t t0, uint64_t t1) |
1309 |
{ |
1310 |
uint64_t ret; |
1311 |
|
1312 |
if (t1 == env->lock) {
|
1313 |
stl_raw(t1, t0); |
1314 |
ret = 0;
|
1315 |
} else
|
1316 |
ret = 1;
|
1317 |
|
1318 |
env->lock = 1;
|
1319 |
|
1320 |
return ret;
|
1321 |
} |
1322 |
|
1323 |
uint64_t helper_stq_c_raw(uint64_t t0, uint64_t t1) |
1324 |
{ |
1325 |
uint64_t ret; |
1326 |
|
1327 |
if (t1 == env->lock) {
|
1328 |
stq_raw(t1, t0); |
1329 |
ret = 0;
|
1330 |
} else
|
1331 |
ret = 1;
|
1332 |
|
1333 |
env->lock = 1;
|
1334 |
|
1335 |
return ret;
|
1336 |
} |
1337 |
|
1338 |
#define MMUSUFFIX _mmu
|
1339 |
|
1340 |
#define SHIFT 0 |
1341 |
#include "softmmu_template.h" |
1342 |
|
1343 |
#define SHIFT 1 |
1344 |
#include "softmmu_template.h" |
1345 |
|
1346 |
#define SHIFT 2 |
1347 |
#include "softmmu_template.h" |
1348 |
|
1349 |
#define SHIFT 3 |
1350 |
#include "softmmu_template.h" |
1351 |
|
1352 |
/* try to fill the TLB and return an exception if error. If retaddr is
|
1353 |
NULL, it means that the function was called in C code (i.e. not
|
1354 |
from generated code or from helper.c) */
|
1355 |
/* XXX: fix it to restore all registers */
|
1356 |
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr) |
1357 |
{ |
1358 |
TranslationBlock *tb; |
1359 |
CPUState *saved_env; |
1360 |
unsigned long pc; |
1361 |
int ret;
|
1362 |
|
1363 |
/* XXX: hack to restore env in all cases, even if not called from
|
1364 |
generated code */
|
1365 |
saved_env = env; |
1366 |
env = cpu_single_env; |
1367 |
ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
|
1368 |
if (!likely(ret == 0)) { |
1369 |
if (likely(retaddr)) {
|
1370 |
/* now we have a real cpu fault */
|
1371 |
pc = (unsigned long)retaddr; |
1372 |
tb = tb_find_pc(pc); |
1373 |
if (likely(tb)) {
|
1374 |
/* the PC is inside the translated code. It means that we have
|
1375 |
a virtual CPU fault */
|
1376 |
cpu_restore_state(tb, env, pc, NULL);
|
1377 |
} |
1378 |
} |
1379 |
/* Exception index and error code are already set */
|
1380 |
cpu_loop_exit(); |
1381 |
} |
1382 |
env = saved_env; |
1383 |
} |
1384 |
|
1385 |
#endif
|