root / target-mips / op_helper.c @ c8c2227e
History | View | Annotate | Download (74.5 kB)
1 |
/*
|
---|---|
2 |
* MIPS emulation helpers for qemu.
|
3 |
*
|
4 |
* Copyright (c) 2004-2005 Jocelyn Mayer
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, write to the Free Software
|
18 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
19 |
*/
|
20 |
#include <stdlib.h> |
21 |
#include "exec.h" |
22 |
|
23 |
#include "host-utils.h" |
24 |
|
25 |
/*****************************************************************************/
|
26 |
/* Exceptions processing helpers */
|
27 |
|
28 |
void do_raise_exception_err (uint32_t exception, int error_code) |
29 |
{ |
30 |
#if 1 |
31 |
if (logfile && exception < 0x100) |
32 |
fprintf(logfile, "%s: %d %d\n", __func__, exception, error_code);
|
33 |
#endif
|
34 |
env->exception_index = exception; |
35 |
env->error_code = error_code; |
36 |
T0 = 0;
|
37 |
cpu_loop_exit(); |
38 |
} |
39 |
|
40 |
void do_raise_exception (uint32_t exception)
|
41 |
{ |
42 |
do_raise_exception_err(exception, 0);
|
43 |
} |
44 |
|
45 |
void do_interrupt_restart (void) |
46 |
{ |
47 |
if (!(env->CP0_Status & (1 << CP0St_EXL)) && |
48 |
!(env->CP0_Status & (1 << CP0St_ERL)) &&
|
49 |
!(env->hflags & MIPS_HFLAG_DM) && |
50 |
(env->CP0_Status & (1 << CP0St_IE)) &&
|
51 |
(env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask)) { |
52 |
env->CP0_Cause &= ~(0x1f << CP0Ca_EC);
|
53 |
do_raise_exception(EXCP_EXT_INTERRUPT); |
54 |
} |
55 |
} |
56 |
|
57 |
void do_restore_state (void *pc_ptr) |
58 |
{ |
59 |
TranslationBlock *tb; |
60 |
unsigned long pc = (unsigned long) pc_ptr; |
61 |
|
62 |
tb = tb_find_pc (pc); |
63 |
if (tb) {
|
64 |
cpu_restore_state (tb, env, pc, NULL);
|
65 |
} |
66 |
} |
67 |
|
68 |
void do_clo (void) |
69 |
{ |
70 |
T0 = clo32(T0); |
71 |
} |
72 |
|
73 |
void do_clz (void) |
74 |
{ |
75 |
T0 = clz32(T0); |
76 |
} |
77 |
|
78 |
#if defined(TARGET_MIPS64)
|
79 |
#if TARGET_LONG_BITS > HOST_LONG_BITS
|
80 |
/* Those might call libgcc functions. */
|
81 |
void do_dsll (void) |
82 |
{ |
83 |
T0 = T0 << T1; |
84 |
} |
85 |
|
86 |
void do_dsll32 (void) |
87 |
{ |
88 |
T0 = T0 << (T1 + 32);
|
89 |
} |
90 |
|
91 |
void do_dsra (void) |
92 |
{ |
93 |
T0 = (int64_t)T0 >> T1; |
94 |
} |
95 |
|
96 |
void do_dsra32 (void) |
97 |
{ |
98 |
T0 = (int64_t)T0 >> (T1 + 32);
|
99 |
} |
100 |
|
101 |
void do_dsrl (void) |
102 |
{ |
103 |
T0 = T0 >> T1; |
104 |
} |
105 |
|
106 |
void do_dsrl32 (void) |
107 |
{ |
108 |
T0 = T0 >> (T1 + 32);
|
109 |
} |
110 |
|
111 |
void do_drotr (void) |
112 |
{ |
113 |
target_ulong tmp; |
114 |
|
115 |
if (T1) {
|
116 |
tmp = T0 << (0x40 - T1);
|
117 |
T0 = (T0 >> T1) | tmp; |
118 |
} |
119 |
} |
120 |
|
121 |
void do_drotr32 (void) |
122 |
{ |
123 |
target_ulong tmp; |
124 |
|
125 |
tmp = T0 << (0x40 - (32 + T1)); |
126 |
T0 = (T0 >> (32 + T1)) | tmp;
|
127 |
} |
128 |
|
129 |
void do_dsllv (void) |
130 |
{ |
131 |
T0 = T1 << (T0 & 0x3F);
|
132 |
} |
133 |
|
134 |
void do_dsrav (void) |
135 |
{ |
136 |
T0 = (int64_t)T1 >> (T0 & 0x3F);
|
137 |
} |
138 |
|
139 |
void do_dsrlv (void) |
140 |
{ |
141 |
T0 = T1 >> (T0 & 0x3F);
|
142 |
} |
143 |
|
144 |
void do_drotrv (void) |
145 |
{ |
146 |
target_ulong tmp; |
147 |
|
148 |
T0 &= 0x3F;
|
149 |
if (T0) {
|
150 |
tmp = T1 << (0x40 - T0);
|
151 |
T0 = (T1 >> T0) | tmp; |
152 |
} else
|
153 |
T0 = T1; |
154 |
} |
155 |
|
156 |
#endif /* TARGET_LONG_BITS > HOST_LONG_BITS */ |
157 |
|
158 |
void do_dclo (void) |
159 |
{ |
160 |
T0 = clo64(T0); |
161 |
} |
162 |
|
163 |
void do_dclz (void) |
164 |
{ |
165 |
T0 = clz64(T0); |
166 |
} |
167 |
|
168 |
#endif /* TARGET_MIPS64 */ |
169 |
|
170 |
/* 64 bits arithmetic for 32 bits hosts */
|
171 |
static always_inline uint64_t get_HILO (void) |
172 |
{ |
173 |
return ((uint64_t)(env->HI[env->current_tc][0]) << 32) | (uint32_t)env->LO[env->current_tc][0]; |
174 |
} |
175 |
|
176 |
static always_inline void set_HILO (uint64_t HILO) |
177 |
{ |
178 |
env->LO[env->current_tc][0] = (int32_t)HILO;
|
179 |
env->HI[env->current_tc][0] = (int32_t)(HILO >> 32); |
180 |
} |
181 |
|
182 |
static always_inline void set_HIT0_LO (uint64_t HILO) |
183 |
{ |
184 |
env->LO[env->current_tc][0] = (int32_t)(HILO & 0xFFFFFFFF); |
185 |
T0 = env->HI[env->current_tc][0] = (int32_t)(HILO >> 32); |
186 |
} |
187 |
|
188 |
static always_inline void set_HI_LOT0 (uint64_t HILO) |
189 |
{ |
190 |
T0 = env->LO[env->current_tc][0] = (int32_t)(HILO & 0xFFFFFFFF); |
191 |
env->HI[env->current_tc][0] = (int32_t)(HILO >> 32); |
192 |
} |
193 |
|
194 |
#if TARGET_LONG_BITS > HOST_LONG_BITS
|
195 |
void do_madd (void) |
196 |
{ |
197 |
int64_t tmp; |
198 |
|
199 |
tmp = ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1); |
200 |
set_HILO((int64_t)get_HILO() + tmp); |
201 |
} |
202 |
|
203 |
void do_maddu (void) |
204 |
{ |
205 |
uint64_t tmp; |
206 |
|
207 |
tmp = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1); |
208 |
set_HILO(get_HILO() + tmp); |
209 |
} |
210 |
|
211 |
void do_msub (void) |
212 |
{ |
213 |
int64_t tmp; |
214 |
|
215 |
tmp = ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1); |
216 |
set_HILO((int64_t)get_HILO() - tmp); |
217 |
} |
218 |
|
219 |
void do_msubu (void) |
220 |
{ |
221 |
uint64_t tmp; |
222 |
|
223 |
tmp = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1); |
224 |
set_HILO(get_HILO() - tmp); |
225 |
} |
226 |
#endif /* TARGET_LONG_BITS > HOST_LONG_BITS */ |
227 |
|
228 |
/* Multiplication variants of the vr54xx. */
|
229 |
void do_muls (void) |
230 |
{ |
231 |
set_HI_LOT0(0 - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
|
232 |
} |
233 |
|
234 |
void do_mulsu (void) |
235 |
{ |
236 |
set_HI_LOT0(0 - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
|
237 |
} |
238 |
|
239 |
void do_macc (void) |
240 |
{ |
241 |
set_HI_LOT0(((int64_t)get_HILO()) + ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1)); |
242 |
} |
243 |
|
244 |
void do_macchi (void) |
245 |
{ |
246 |
set_HIT0_LO(((int64_t)get_HILO()) + ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1)); |
247 |
} |
248 |
|
249 |
void do_maccu (void) |
250 |
{ |
251 |
set_HI_LOT0(((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1)); |
252 |
} |
253 |
|
254 |
void do_macchiu (void) |
255 |
{ |
256 |
set_HIT0_LO(((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1)); |
257 |
} |
258 |
|
259 |
void do_msac (void) |
260 |
{ |
261 |
set_HI_LOT0(((int64_t)get_HILO()) - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1)); |
262 |
} |
263 |
|
264 |
void do_msachi (void) |
265 |
{ |
266 |
set_HIT0_LO(((int64_t)get_HILO()) - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1)); |
267 |
} |
268 |
|
269 |
void do_msacu (void) |
270 |
{ |
271 |
set_HI_LOT0(((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1)); |
272 |
} |
273 |
|
274 |
void do_msachiu (void) |
275 |
{ |
276 |
set_HIT0_LO(((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1)); |
277 |
} |
278 |
|
279 |
void do_mulhi (void) |
280 |
{ |
281 |
set_HIT0_LO((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1); |
282 |
} |
283 |
|
284 |
void do_mulhiu (void) |
285 |
{ |
286 |
set_HIT0_LO((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1); |
287 |
} |
288 |
|
289 |
void do_mulshi (void) |
290 |
{ |
291 |
set_HIT0_LO(0 - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
|
292 |
} |
293 |
|
294 |
void do_mulshiu (void) |
295 |
{ |
296 |
set_HIT0_LO(0 - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
|
297 |
} |
298 |
|
299 |
#ifdef TARGET_MIPS64
|
300 |
void do_dmult (void) |
301 |
{ |
302 |
muls64(&(env->LO[env->current_tc][0]), &(env->HI[env->current_tc][0]), T0, T1); |
303 |
} |
304 |
|
305 |
void do_dmultu (void) |
306 |
{ |
307 |
mulu64(&(env->LO[env->current_tc][0]), &(env->HI[env->current_tc][0]), T0, T1); |
308 |
} |
309 |
#endif
|
310 |
|
311 |
#ifdef TARGET_WORDS_BIGENDIAN
|
312 |
#define GET_LMASK(v) ((v) & 3) |
313 |
#define GET_OFFSET(addr, offset) (addr + (offset))
|
314 |
#else
|
315 |
#define GET_LMASK(v) (((v) & 3) ^ 3) |
316 |
#define GET_OFFSET(addr, offset) (addr - (offset))
|
317 |
#endif
|
318 |
|
319 |
void do_lwl(int mem_idx) |
320 |
{ |
321 |
target_ulong tmp; |
322 |
|
323 |
#ifdef CONFIG_USER_ONLY
|
324 |
#define ldfun ldub_raw
|
325 |
#else
|
326 |
int (*ldfun)(target_ulong);
|
327 |
|
328 |
switch (mem_idx)
|
329 |
{ |
330 |
case 0: ldfun = ldub_kernel; break; |
331 |
case 1: ldfun = ldub_super; break; |
332 |
default:
|
333 |
case 2: ldfun = ldub_user; break; |
334 |
} |
335 |
#endif
|
336 |
tmp = ldfun(T0); |
337 |
T1 = (T1 & 0x00FFFFFF) | (tmp << 24); |
338 |
|
339 |
if (GET_LMASK(T0) <= 2) { |
340 |
tmp = ldfun(GET_OFFSET(T0, 1));
|
341 |
T1 = (T1 & 0xFF00FFFF) | (tmp << 16); |
342 |
} |
343 |
|
344 |
if (GET_LMASK(T0) <= 1) { |
345 |
tmp = ldfun(GET_OFFSET(T0, 2));
|
346 |
T1 = (T1 & 0xFFFF00FF) | (tmp << 8); |
347 |
} |
348 |
|
349 |
if (GET_LMASK(T0) == 0) { |
350 |
tmp = ldfun(GET_OFFSET(T0, 3));
|
351 |
T1 = (T1 & 0xFFFFFF00) | tmp;
|
352 |
} |
353 |
T1 = (int32_t)T1; |
354 |
} |
355 |
|
356 |
void do_lwr(int mem_idx) |
357 |
{ |
358 |
target_ulong tmp; |
359 |
|
360 |
#ifdef CONFIG_USER_ONLY
|
361 |
#define ldfun ldub_raw
|
362 |
#else
|
363 |
int (*ldfun)(target_ulong);
|
364 |
|
365 |
switch (mem_idx)
|
366 |
{ |
367 |
case 0: ldfun = ldub_kernel; break; |
368 |
case 1: ldfun = ldub_super; break; |
369 |
default:
|
370 |
case 2: ldfun = ldub_user; break; |
371 |
} |
372 |
#endif
|
373 |
tmp = ldfun(T0); |
374 |
T1 = (T1 & 0xFFFFFF00) | tmp;
|
375 |
|
376 |
if (GET_LMASK(T0) >= 1) { |
377 |
tmp = ldfun(GET_OFFSET(T0, -1));
|
378 |
T1 = (T1 & 0xFFFF00FF) | (tmp << 8); |
379 |
} |
380 |
|
381 |
if (GET_LMASK(T0) >= 2) { |
382 |
tmp = ldfun(GET_OFFSET(T0, -2));
|
383 |
T1 = (T1 & 0xFF00FFFF) | (tmp << 16); |
384 |
} |
385 |
|
386 |
if (GET_LMASK(T0) == 3) { |
387 |
tmp = ldfun(GET_OFFSET(T0, -3));
|
388 |
T1 = (T1 & 0x00FFFFFF) | (tmp << 24); |
389 |
} |
390 |
T1 = (int32_t)T1; |
391 |
} |
392 |
|
393 |
void do_swl(int mem_idx) |
394 |
{ |
395 |
#ifdef CONFIG_USER_ONLY
|
396 |
#define stfun stb_raw
|
397 |
#else
|
398 |
void (*stfun)(target_ulong, int); |
399 |
|
400 |
switch (mem_idx)
|
401 |
{ |
402 |
case 0: stfun = stb_kernel; break; |
403 |
case 1: stfun = stb_super; break; |
404 |
default:
|
405 |
case 2: stfun = stb_user; break; |
406 |
} |
407 |
#endif
|
408 |
stfun(T0, (uint8_t)(T1 >> 24));
|
409 |
|
410 |
if (GET_LMASK(T0) <= 2) |
411 |
stfun(GET_OFFSET(T0, 1), (uint8_t)(T1 >> 16)); |
412 |
|
413 |
if (GET_LMASK(T0) <= 1) |
414 |
stfun(GET_OFFSET(T0, 2), (uint8_t)(T1 >> 8)); |
415 |
|
416 |
if (GET_LMASK(T0) == 0) |
417 |
stfun(GET_OFFSET(T0, 3), (uint8_t)T1);
|
418 |
} |
419 |
|
420 |
void do_swr(int mem_idx) |
421 |
{ |
422 |
#ifdef CONFIG_USER_ONLY
|
423 |
#define stfun stb_raw
|
424 |
#else
|
425 |
void (*stfun)(target_ulong, int); |
426 |
|
427 |
switch (mem_idx)
|
428 |
{ |
429 |
case 0: stfun = stb_kernel; break; |
430 |
case 1: stfun = stb_super; break; |
431 |
default:
|
432 |
case 2: stfun = stb_user; break; |
433 |
} |
434 |
#endif
|
435 |
stfun(T0, (uint8_t)T1); |
436 |
|
437 |
if (GET_LMASK(T0) >= 1) |
438 |
stfun(GET_OFFSET(T0, -1), (uint8_t)(T1 >> 8)); |
439 |
|
440 |
if (GET_LMASK(T0) >= 2) |
441 |
stfun(GET_OFFSET(T0, -2), (uint8_t)(T1 >> 16)); |
442 |
|
443 |
if (GET_LMASK(T0) == 3) |
444 |
stfun(GET_OFFSET(T0, -3), (uint8_t)(T1 >> 24)); |
445 |
} |
446 |
|
447 |
#if defined(TARGET_MIPS64)
|
448 |
/* "half" load and stores. We must do the memory access inline,
|
449 |
or fault handling won't work. */
|
450 |
|
451 |
#ifdef TARGET_WORDS_BIGENDIAN
|
452 |
#define GET_LMASK64(v) ((v) & 7) |
453 |
#else
|
454 |
#define GET_LMASK64(v) (((v) & 7) ^ 7) |
455 |
#endif
|
456 |
|
457 |
void do_ldl(int mem_idx) |
458 |
{ |
459 |
uint64_t tmp; |
460 |
|
461 |
#ifdef CONFIG_USER_ONLY
|
462 |
#define ldfun ldub_raw
|
463 |
#else
|
464 |
target_ulong (*ldfun)(target_ulong); |
465 |
|
466 |
switch (mem_idx)
|
467 |
{ |
468 |
case 0: ldfun = ldub_kernel; break; |
469 |
case 1: ldfun = ldub_super; break; |
470 |
default:
|
471 |
case 2: ldfun = ldub_user; break; |
472 |
} |
473 |
#endif
|
474 |
tmp = ldfun(T0); |
475 |
T1 = (T1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56); |
476 |
|
477 |
if (GET_LMASK64(T0) <= 6) { |
478 |
tmp = ldfun(GET_OFFSET(T0, 1));
|
479 |
T1 = (T1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48); |
480 |
} |
481 |
|
482 |
if (GET_LMASK64(T0) <= 5) { |
483 |
tmp = ldfun(GET_OFFSET(T0, 2));
|
484 |
T1 = (T1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40); |
485 |
} |
486 |
|
487 |
if (GET_LMASK64(T0) <= 4) { |
488 |
tmp = ldfun(GET_OFFSET(T0, 3));
|
489 |
T1 = (T1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32); |
490 |
} |
491 |
|
492 |
if (GET_LMASK64(T0) <= 3) { |
493 |
tmp = ldfun(GET_OFFSET(T0, 4));
|
494 |
T1 = (T1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24); |
495 |
} |
496 |
|
497 |
if (GET_LMASK64(T0) <= 2) { |
498 |
tmp = ldfun(GET_OFFSET(T0, 5));
|
499 |
T1 = (T1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16); |
500 |
} |
501 |
|
502 |
if (GET_LMASK64(T0) <= 1) { |
503 |
tmp = ldfun(GET_OFFSET(T0, 6));
|
504 |
T1 = (T1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8); |
505 |
} |
506 |
|
507 |
if (GET_LMASK64(T0) == 0) { |
508 |
tmp = ldfun(GET_OFFSET(T0, 7));
|
509 |
T1 = (T1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
|
510 |
} |
511 |
} |
512 |
|
513 |
void do_ldr(int mem_idx) |
514 |
{ |
515 |
uint64_t tmp; |
516 |
|
517 |
#ifdef CONFIG_USER_ONLY
|
518 |
#define ldfun ldub_raw
|
519 |
#else
|
520 |
target_ulong (*ldfun)(target_ulong); |
521 |
|
522 |
switch (mem_idx)
|
523 |
{ |
524 |
case 0: ldfun = ldub_kernel; break; |
525 |
case 1: ldfun = ldub_super; break; |
526 |
default:
|
527 |
case 2: ldfun = ldub_user; break; |
528 |
} |
529 |
#endif
|
530 |
tmp = ldfun(T0); |
531 |
T1 = (T1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
|
532 |
|
533 |
if (GET_LMASK64(T0) >= 1) { |
534 |
tmp = ldfun(GET_OFFSET(T0, -1));
|
535 |
T1 = (T1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8); |
536 |
} |
537 |
|
538 |
if (GET_LMASK64(T0) >= 2) { |
539 |
tmp = ldfun(GET_OFFSET(T0, -2));
|
540 |
T1 = (T1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16); |
541 |
} |
542 |
|
543 |
if (GET_LMASK64(T0) >= 3) { |
544 |
tmp = ldfun(GET_OFFSET(T0, -3));
|
545 |
T1 = (T1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24); |
546 |
} |
547 |
|
548 |
if (GET_LMASK64(T0) >= 4) { |
549 |
tmp = ldfun(GET_OFFSET(T0, -4));
|
550 |
T1 = (T1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32); |
551 |
} |
552 |
|
553 |
if (GET_LMASK64(T0) >= 5) { |
554 |
tmp = ldfun(GET_OFFSET(T0, -5));
|
555 |
T1 = (T1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40); |
556 |
} |
557 |
|
558 |
if (GET_LMASK64(T0) >= 6) { |
559 |
tmp = ldfun(GET_OFFSET(T0, -6));
|
560 |
T1 = (T1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48); |
561 |
} |
562 |
|
563 |
if (GET_LMASK64(T0) == 7) { |
564 |
tmp = ldfun(GET_OFFSET(T0, -7));
|
565 |
T1 = (T1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56); |
566 |
} |
567 |
} |
568 |
|
569 |
void do_sdl(int mem_idx) |
570 |
{ |
571 |
#ifdef CONFIG_USER_ONLY
|
572 |
#define stfun stb_raw
|
573 |
#else
|
574 |
void (*stfun)(target_ulong, int); |
575 |
|
576 |
switch (mem_idx)
|
577 |
{ |
578 |
case 0: stfun = stb_kernel; break; |
579 |
case 1: stfun = stb_super; break; |
580 |
default:
|
581 |
case 2: stfun = stb_user; break; |
582 |
} |
583 |
#endif
|
584 |
stfun(T0, (uint8_t)(T1 >> 56));
|
585 |
|
586 |
if (GET_LMASK64(T0) <= 6) |
587 |
stfun(GET_OFFSET(T0, 1), (uint8_t)(T1 >> 48)); |
588 |
|
589 |
if (GET_LMASK64(T0) <= 5) |
590 |
stfun(GET_OFFSET(T0, 2), (uint8_t)(T1 >> 40)); |
591 |
|
592 |
if (GET_LMASK64(T0) <= 4) |
593 |
stfun(GET_OFFSET(T0, 3), (uint8_t)(T1 >> 32)); |
594 |
|
595 |
if (GET_LMASK64(T0) <= 3) |
596 |
stfun(GET_OFFSET(T0, 4), (uint8_t)(T1 >> 24)); |
597 |
|
598 |
if (GET_LMASK64(T0) <= 2) |
599 |
stfun(GET_OFFSET(T0, 5), (uint8_t)(T1 >> 16)); |
600 |
|
601 |
if (GET_LMASK64(T0) <= 1) |
602 |
stfun(GET_OFFSET(T0, 6), (uint8_t)(T1 >> 8)); |
603 |
|
604 |
if (GET_LMASK64(T0) <= 0) |
605 |
stfun(GET_OFFSET(T0, 7), (uint8_t)T1);
|
606 |
} |
607 |
|
608 |
void do_sdr(int mem_idx) |
609 |
{ |
610 |
#ifdef CONFIG_USER_ONLY
|
611 |
#define stfun stb_raw
|
612 |
#else
|
613 |
void (*stfun)(target_ulong, int); |
614 |
|
615 |
switch (mem_idx)
|
616 |
{ |
617 |
case 0: stfun = stb_kernel; break; |
618 |
case 1: stfun = stb_super; break; |
619 |
default:
|
620 |
case 2: stfun = stb_user; break; |
621 |
} |
622 |
#endif
|
623 |
stfun(T0, (uint8_t)T1); |
624 |
|
625 |
if (GET_LMASK64(T0) >= 1) |
626 |
stfun(GET_OFFSET(T0, -1), (uint8_t)(T1 >> 8)); |
627 |
|
628 |
if (GET_LMASK64(T0) >= 2) |
629 |
stfun(GET_OFFSET(T0, -2), (uint8_t)(T1 >> 16)); |
630 |
|
631 |
if (GET_LMASK64(T0) >= 3) |
632 |
stfun(GET_OFFSET(T0, -3), (uint8_t)(T1 >> 24)); |
633 |
|
634 |
if (GET_LMASK64(T0) >= 4) |
635 |
stfun(GET_OFFSET(T0, -4), (uint8_t)(T1 >> 32)); |
636 |
|
637 |
if (GET_LMASK64(T0) >= 5) |
638 |
stfun(GET_OFFSET(T0, -5), (uint8_t)(T1 >> 40)); |
639 |
|
640 |
if (GET_LMASK64(T0) >= 6) |
641 |
stfun(GET_OFFSET(T0, -6), (uint8_t)(T1 >> 48)); |
642 |
|
643 |
if (GET_LMASK64(T0) == 7) |
644 |
stfun(GET_OFFSET(T0, -7), (uint8_t)(T1 >> 56)); |
645 |
} |
646 |
#endif /* TARGET_MIPS64 */ |
647 |
|
648 |
#ifdef CONFIG_USER_ONLY
|
649 |
void do_mfc0_random (void) |
650 |
{ |
651 |
cpu_abort(env, "mfc0 random\n");
|
652 |
} |
653 |
|
654 |
void do_mfc0_count (void) |
655 |
{ |
656 |
cpu_abort(env, "mfc0 count\n");
|
657 |
} |
658 |
|
659 |
void cpu_mips_store_count(CPUState *env, uint32_t value)
|
660 |
{ |
661 |
cpu_abort(env, "mtc0 count\n");
|
662 |
} |
663 |
|
664 |
void cpu_mips_store_compare(CPUState *env, uint32_t value)
|
665 |
{ |
666 |
cpu_abort(env, "mtc0 compare\n");
|
667 |
} |
668 |
|
669 |
void cpu_mips_start_count(CPUState *env)
|
670 |
{ |
671 |
cpu_abort(env, "start count\n");
|
672 |
} |
673 |
|
674 |
void cpu_mips_stop_count(CPUState *env)
|
675 |
{ |
676 |
cpu_abort(env, "stop count\n");
|
677 |
} |
678 |
|
679 |
void cpu_mips_update_irq(CPUState *env)
|
680 |
{ |
681 |
cpu_abort(env, "mtc0 status / mtc0 cause\n");
|
682 |
} |
683 |
|
684 |
void do_mtc0_status_debug(uint32_t old, uint32_t val)
|
685 |
{ |
686 |
cpu_abort(env, "mtc0 status debug\n");
|
687 |
} |
688 |
|
689 |
void do_mtc0_status_irqraise_debug (void) |
690 |
{ |
691 |
cpu_abort(env, "mtc0 status irqraise debug\n");
|
692 |
} |
693 |
|
694 |
void cpu_mips_tlb_flush (CPUState *env, int flush_global) |
695 |
{ |
696 |
cpu_abort(env, "mips_tlb_flush\n");
|
697 |
} |
698 |
|
699 |
#else
|
700 |
|
701 |
/* CP0 helpers */
|
702 |
void do_mfc0_mvpcontrol (void) |
703 |
{ |
704 |
T0 = env->mvp->CP0_MVPControl; |
705 |
} |
706 |
|
707 |
void do_mfc0_mvpconf0 (void) |
708 |
{ |
709 |
T0 = env->mvp->CP0_MVPConf0; |
710 |
} |
711 |
|
712 |
void do_mfc0_mvpconf1 (void) |
713 |
{ |
714 |
T0 = env->mvp->CP0_MVPConf1; |
715 |
} |
716 |
|
717 |
void do_mfc0_random (void) |
718 |
{ |
719 |
T0 = (int32_t)cpu_mips_get_random(env); |
720 |
} |
721 |
|
722 |
void do_mfc0_tcstatus (void) |
723 |
{ |
724 |
T0 = env->CP0_TCStatus[env->current_tc]; |
725 |
} |
726 |
|
727 |
void do_mftc0_tcstatus(void) |
728 |
{ |
729 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
730 |
|
731 |
T0 = env->CP0_TCStatus[other_tc]; |
732 |
} |
733 |
|
734 |
void do_mfc0_tcbind (void) |
735 |
{ |
736 |
T0 = env->CP0_TCBind[env->current_tc]; |
737 |
} |
738 |
|
739 |
void do_mftc0_tcbind(void) |
740 |
{ |
741 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
742 |
|
743 |
T0 = env->CP0_TCBind[other_tc]; |
744 |
} |
745 |
|
746 |
void do_mfc0_tcrestart (void) |
747 |
{ |
748 |
T0 = env->PC[env->current_tc]; |
749 |
} |
750 |
|
751 |
void do_mftc0_tcrestart(void) |
752 |
{ |
753 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
754 |
|
755 |
T0 = env->PC[other_tc]; |
756 |
} |
757 |
|
758 |
void do_mfc0_tchalt (void) |
759 |
{ |
760 |
T0 = env->CP0_TCHalt[env->current_tc]; |
761 |
} |
762 |
|
763 |
void do_mftc0_tchalt(void) |
764 |
{ |
765 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
766 |
|
767 |
T0 = env->CP0_TCHalt[other_tc]; |
768 |
} |
769 |
|
770 |
void do_mfc0_tccontext (void) |
771 |
{ |
772 |
T0 = env->CP0_TCContext[env->current_tc]; |
773 |
} |
774 |
|
775 |
void do_mftc0_tccontext(void) |
776 |
{ |
777 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
778 |
|
779 |
T0 = env->CP0_TCContext[other_tc]; |
780 |
} |
781 |
|
782 |
void do_mfc0_tcschedule (void) |
783 |
{ |
784 |
T0 = env->CP0_TCSchedule[env->current_tc]; |
785 |
} |
786 |
|
787 |
void do_mftc0_tcschedule(void) |
788 |
{ |
789 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
790 |
|
791 |
T0 = env->CP0_TCSchedule[other_tc]; |
792 |
} |
793 |
|
794 |
void do_mfc0_tcschefback (void) |
795 |
{ |
796 |
T0 = env->CP0_TCScheFBack[env->current_tc]; |
797 |
} |
798 |
|
799 |
void do_mftc0_tcschefback(void) |
800 |
{ |
801 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
802 |
|
803 |
T0 = env->CP0_TCScheFBack[other_tc]; |
804 |
} |
805 |
|
806 |
void do_mfc0_count (void) |
807 |
{ |
808 |
T0 = (int32_t)cpu_mips_get_count(env); |
809 |
} |
810 |
|
811 |
void do_mftc0_entryhi(void) |
812 |
{ |
813 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
814 |
|
815 |
T0 = (env->CP0_EntryHi & ~0xff) | (env->CP0_TCStatus[other_tc] & 0xff); |
816 |
} |
817 |
|
818 |
void do_mftc0_status(void) |
819 |
{ |
820 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
821 |
uint32_t tcstatus = env->CP0_TCStatus[other_tc]; |
822 |
|
823 |
T0 = env->CP0_Status & ~0xf1000018;
|
824 |
T0 |= tcstatus & (0xf << CP0TCSt_TCU0);
|
825 |
T0 |= (tcstatus & (1 << CP0TCSt_TMX)) >> (CP0TCSt_TMX - CP0St_MX);
|
826 |
T0 |= (tcstatus & (0x3 << CP0TCSt_TKSU)) >> (CP0TCSt_TKSU - CP0St_KSU);
|
827 |
} |
828 |
|
829 |
void do_mfc0_lladdr (void) |
830 |
{ |
831 |
T0 = (int32_t)env->CP0_LLAddr >> 4;
|
832 |
} |
833 |
|
834 |
void do_mfc0_watchlo (uint32_t sel)
|
835 |
{ |
836 |
T0 = (int32_t)env->CP0_WatchLo[sel]; |
837 |
} |
838 |
|
839 |
void do_mfc0_watchhi (uint32_t sel)
|
840 |
{ |
841 |
T0 = env->CP0_WatchHi[sel]; |
842 |
} |
843 |
|
844 |
void do_mfc0_debug (void) |
845 |
{ |
846 |
T0 = env->CP0_Debug; |
847 |
if (env->hflags & MIPS_HFLAG_DM)
|
848 |
T0 |= 1 << CP0DB_DM;
|
849 |
} |
850 |
|
851 |
void do_mftc0_debug(void) |
852 |
{ |
853 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
854 |
|
855 |
/* XXX: Might be wrong, check with EJTAG spec. */
|
856 |
T0 = (env->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) | |
857 |
(env->CP0_Debug_tcstatus[other_tc] & |
858 |
((1 << CP0DB_SSt) | (1 << CP0DB_Halt))); |
859 |
} |
860 |
|
861 |
#if defined(TARGET_MIPS64)
|
862 |
void do_dmfc0_tcrestart (void) |
863 |
{ |
864 |
T0 = env->PC[env->current_tc]; |
865 |
} |
866 |
|
867 |
void do_dmfc0_tchalt (void) |
868 |
{ |
869 |
T0 = env->CP0_TCHalt[env->current_tc]; |
870 |
} |
871 |
|
872 |
void do_dmfc0_tccontext (void) |
873 |
{ |
874 |
T0 = env->CP0_TCContext[env->current_tc]; |
875 |
} |
876 |
|
877 |
void do_dmfc0_tcschedule (void) |
878 |
{ |
879 |
T0 = env->CP0_TCSchedule[env->current_tc]; |
880 |
} |
881 |
|
882 |
void do_dmfc0_tcschefback (void) |
883 |
{ |
884 |
T0 = env->CP0_TCScheFBack[env->current_tc]; |
885 |
} |
886 |
|
887 |
void do_dmfc0_lladdr (void) |
888 |
{ |
889 |
T0 = env->CP0_LLAddr >> 4;
|
890 |
} |
891 |
|
892 |
void do_dmfc0_watchlo (uint32_t sel)
|
893 |
{ |
894 |
T0 = env->CP0_WatchLo[sel]; |
895 |
} |
896 |
#endif /* TARGET_MIPS64 */ |
897 |
|
898 |
void do_mtc0_index (void) |
899 |
{ |
900 |
int num = 1; |
901 |
unsigned int tmp = env->tlb->nb_tlb; |
902 |
|
903 |
do {
|
904 |
tmp >>= 1;
|
905 |
num <<= 1;
|
906 |
} while (tmp);
|
907 |
env->CP0_Index = (env->CP0_Index & 0x80000000) | (T0 & (num - 1)); |
908 |
} |
909 |
|
910 |
void do_mtc0_mvpcontrol (void) |
911 |
{ |
912 |
uint32_t mask = 0;
|
913 |
uint32_t newval; |
914 |
|
915 |
if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) |
916 |
mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) | |
917 |
(1 << CP0MVPCo_EVP);
|
918 |
if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) |
919 |
mask |= (1 << CP0MVPCo_STLB);
|
920 |
newval = (env->mvp->CP0_MVPControl & ~mask) | (T0 & mask); |
921 |
|
922 |
// TODO: Enable/disable shared TLB, enable/disable VPEs.
|
923 |
|
924 |
env->mvp->CP0_MVPControl = newval; |
925 |
} |
926 |
|
927 |
void do_mtc0_vpecontrol (void) |
928 |
{ |
929 |
uint32_t mask; |
930 |
uint32_t newval; |
931 |
|
932 |
mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) | |
933 |
(1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC); |
934 |
newval = (env->CP0_VPEControl & ~mask) | (T0 & mask); |
935 |
|
936 |
/* Yield scheduler intercept not implemented. */
|
937 |
/* Gating storage scheduler intercept not implemented. */
|
938 |
|
939 |
// TODO: Enable/disable TCs.
|
940 |
|
941 |
env->CP0_VPEControl = newval; |
942 |
} |
943 |
|
944 |
void do_mtc0_vpeconf0 (void) |
945 |
{ |
946 |
uint32_t mask = 0;
|
947 |
uint32_t newval; |
948 |
|
949 |
if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) { |
950 |
if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA)) |
951 |
mask |= (0xff << CP0VPEC0_XTC);
|
952 |
mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA); |
953 |
} |
954 |
newval = (env->CP0_VPEConf0 & ~mask) | (T0 & mask); |
955 |
|
956 |
// TODO: TC exclusive handling due to ERL/EXL.
|
957 |
|
958 |
env->CP0_VPEConf0 = newval; |
959 |
} |
960 |
|
961 |
void do_mtc0_vpeconf1 (void) |
962 |
{ |
963 |
uint32_t mask = 0;
|
964 |
uint32_t newval; |
965 |
|
966 |
if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) |
967 |
mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) | |
968 |
(0xff << CP0VPEC1_NCP1);
|
969 |
newval = (env->CP0_VPEConf1 & ~mask) | (T0 & mask); |
970 |
|
971 |
/* UDI not implemented. */
|
972 |
/* CP2 not implemented. */
|
973 |
|
974 |
// TODO: Handle FPU (CP1) binding.
|
975 |
|
976 |
env->CP0_VPEConf1 = newval; |
977 |
} |
978 |
|
979 |
void do_mtc0_yqmask (void) |
980 |
{ |
981 |
/* Yield qualifier inputs not implemented. */
|
982 |
env->CP0_YQMask = 0x00000000;
|
983 |
} |
984 |
|
985 |
void do_mtc0_vpeopt (void) |
986 |
{ |
987 |
env->CP0_VPEOpt = T0 & 0x0000ffff;
|
988 |
} |
989 |
|
990 |
void do_mtc0_entrylo0 (void) |
991 |
{ |
992 |
/* Large physaddr (PABITS) not implemented */
|
993 |
/* 1k pages not implemented */
|
994 |
env->CP0_EntryLo0 = T0 & 0x3FFFFFFF;
|
995 |
} |
996 |
|
997 |
void do_mtc0_tcstatus (void) |
998 |
{ |
999 |
uint32_t mask = env->CP0_TCStatus_rw_bitmask; |
1000 |
uint32_t newval; |
1001 |
|
1002 |
newval = (env->CP0_TCStatus[env->current_tc] & ~mask) | (T0 & mask); |
1003 |
|
1004 |
// TODO: Sync with CP0_Status.
|
1005 |
|
1006 |
env->CP0_TCStatus[env->current_tc] = newval; |
1007 |
} |
1008 |
|
1009 |
void do_mttc0_tcstatus (void) |
1010 |
{ |
1011 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
1012 |
|
1013 |
// TODO: Sync with CP0_Status.
|
1014 |
|
1015 |
env->CP0_TCStatus[other_tc] = T0; |
1016 |
} |
1017 |
|
1018 |
void do_mtc0_tcbind (void) |
1019 |
{ |
1020 |
uint32_t mask = (1 << CP0TCBd_TBE);
|
1021 |
uint32_t newval; |
1022 |
|
1023 |
if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) |
1024 |
mask |= (1 << CP0TCBd_CurVPE);
|
1025 |
newval = (env->CP0_TCBind[env->current_tc] & ~mask) | (T0 & mask); |
1026 |
env->CP0_TCBind[env->current_tc] = newval; |
1027 |
} |
1028 |
|
1029 |
void do_mttc0_tcbind (void) |
1030 |
{ |
1031 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
1032 |
uint32_t mask = (1 << CP0TCBd_TBE);
|
1033 |
uint32_t newval; |
1034 |
|
1035 |
if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) |
1036 |
mask |= (1 << CP0TCBd_CurVPE);
|
1037 |
newval = (env->CP0_TCBind[other_tc] & ~mask) | (T0 & mask); |
1038 |
env->CP0_TCBind[other_tc] = newval; |
1039 |
} |
1040 |
|
1041 |
void do_mtc0_tcrestart (void) |
1042 |
{ |
1043 |
env->PC[env->current_tc] = T0; |
1044 |
env->CP0_TCStatus[env->current_tc] &= ~(1 << CP0TCSt_TDS);
|
1045 |
env->CP0_LLAddr = 0ULL;
|
1046 |
/* MIPS16 not implemented. */
|
1047 |
} |
1048 |
|
1049 |
void do_mttc0_tcrestart (void) |
1050 |
{ |
1051 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
1052 |
|
1053 |
env->PC[other_tc] = T0; |
1054 |
env->CP0_TCStatus[other_tc] &= ~(1 << CP0TCSt_TDS);
|
1055 |
env->CP0_LLAddr = 0ULL;
|
1056 |
/* MIPS16 not implemented. */
|
1057 |
} |
1058 |
|
1059 |
void do_mtc0_tchalt (void) |
1060 |
{ |
1061 |
env->CP0_TCHalt[env->current_tc] = T0 & 0x1;
|
1062 |
|
1063 |
// TODO: Halt TC / Restart (if allocated+active) TC.
|
1064 |
} |
1065 |
|
1066 |
void do_mttc0_tchalt (void) |
1067 |
{ |
1068 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
1069 |
|
1070 |
// TODO: Halt TC / Restart (if allocated+active) TC.
|
1071 |
|
1072 |
env->CP0_TCHalt[other_tc] = T0; |
1073 |
} |
1074 |
|
1075 |
void do_mtc0_tccontext (void) |
1076 |
{ |
1077 |
env->CP0_TCContext[env->current_tc] = T0; |
1078 |
} |
1079 |
|
1080 |
void do_mttc0_tccontext (void) |
1081 |
{ |
1082 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
1083 |
|
1084 |
env->CP0_TCContext[other_tc] = T0; |
1085 |
} |
1086 |
|
1087 |
void do_mtc0_tcschedule (void) |
1088 |
{ |
1089 |
env->CP0_TCSchedule[env->current_tc] = T0; |
1090 |
} |
1091 |
|
1092 |
void do_mttc0_tcschedule (void) |
1093 |
{ |
1094 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
1095 |
|
1096 |
env->CP0_TCSchedule[other_tc] = T0; |
1097 |
} |
1098 |
|
1099 |
void do_mtc0_tcschefback (void) |
1100 |
{ |
1101 |
env->CP0_TCScheFBack[env->current_tc] = T0; |
1102 |
} |
1103 |
|
1104 |
void do_mttc0_tcschefback (void) |
1105 |
{ |
1106 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
1107 |
|
1108 |
env->CP0_TCScheFBack[other_tc] = T0; |
1109 |
} |
1110 |
|
1111 |
void do_mtc0_entrylo1 (void) |
1112 |
{ |
1113 |
/* Large physaddr (PABITS) not implemented */
|
1114 |
/* 1k pages not implemented */
|
1115 |
env->CP0_EntryLo1 = T0 & 0x3FFFFFFF;
|
1116 |
} |
1117 |
|
1118 |
void do_mtc0_context (void) |
1119 |
{ |
1120 |
env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (T0 & ~0x007FFFFF); |
1121 |
} |
1122 |
|
1123 |
void do_mtc0_pagemask (void) |
1124 |
{ |
1125 |
/* 1k pages not implemented */
|
1126 |
env->CP0_PageMask = T0 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1)); |
1127 |
} |
1128 |
|
1129 |
void do_mtc0_pagegrain (void) |
1130 |
{ |
1131 |
/* SmartMIPS not implemented */
|
1132 |
/* Large physaddr (PABITS) not implemented */
|
1133 |
/* 1k pages not implemented */
|
1134 |
env->CP0_PageGrain = 0;
|
1135 |
} |
1136 |
|
1137 |
void do_mtc0_wired (void) |
1138 |
{ |
1139 |
env->CP0_Wired = T0 % env->tlb->nb_tlb; |
1140 |
} |
1141 |
|
1142 |
void do_mtc0_srsconf0 (void) |
1143 |
{ |
1144 |
env->CP0_SRSConf0 |= T0 & env->CP0_SRSConf0_rw_bitmask; |
1145 |
} |
1146 |
|
1147 |
void do_mtc0_srsconf1 (void) |
1148 |
{ |
1149 |
env->CP0_SRSConf1 |= T0 & env->CP0_SRSConf1_rw_bitmask; |
1150 |
} |
1151 |
|
1152 |
void do_mtc0_srsconf2 (void) |
1153 |
{ |
1154 |
env->CP0_SRSConf2 |= T0 & env->CP0_SRSConf2_rw_bitmask; |
1155 |
} |
1156 |
|
1157 |
void do_mtc0_srsconf3 (void) |
1158 |
{ |
1159 |
env->CP0_SRSConf3 |= T0 & env->CP0_SRSConf3_rw_bitmask; |
1160 |
} |
1161 |
|
1162 |
void do_mtc0_srsconf4 (void) |
1163 |
{ |
1164 |
env->CP0_SRSConf4 |= T0 & env->CP0_SRSConf4_rw_bitmask; |
1165 |
} |
1166 |
|
1167 |
void do_mtc0_hwrena (void) |
1168 |
{ |
1169 |
env->CP0_HWREna = T0 & 0x0000000F;
|
1170 |
} |
1171 |
|
1172 |
void do_mtc0_count (void) |
1173 |
{ |
1174 |
cpu_mips_store_count(env, T0); |
1175 |
} |
1176 |
|
1177 |
void do_mtc0_entryhi (void) |
1178 |
{ |
1179 |
target_ulong old, val; |
1180 |
|
1181 |
/* 1k pages not implemented */
|
1182 |
val = T0 & ((TARGET_PAGE_MASK << 1) | 0xFF); |
1183 |
#if defined(TARGET_MIPS64)
|
1184 |
val &= env->SEGMask; |
1185 |
#endif
|
1186 |
old = env->CP0_EntryHi; |
1187 |
env->CP0_EntryHi = val; |
1188 |
if (env->CP0_Config3 & (1 << CP0C3_MT)) { |
1189 |
uint32_t tcst = env->CP0_TCStatus[env->current_tc] & ~0xff;
|
1190 |
env->CP0_TCStatus[env->current_tc] = tcst | (val & 0xff);
|
1191 |
} |
1192 |
/* If the ASID changes, flush qemu's TLB. */
|
1193 |
if ((old & 0xFF) != (val & 0xFF)) |
1194 |
cpu_mips_tlb_flush(env, 1);
|
1195 |
} |
1196 |
|
1197 |
void do_mttc0_entryhi(void) |
1198 |
{ |
1199 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
1200 |
|
1201 |
env->CP0_EntryHi = (env->CP0_EntryHi & 0xff) | (T0 & ~0xff); |
1202 |
env->CP0_TCStatus[other_tc] = (env->CP0_TCStatus[other_tc] & ~0xff) | (T0 & 0xff); |
1203 |
} |
1204 |
|
1205 |
void do_mtc0_compare (void) |
1206 |
{ |
1207 |
cpu_mips_store_compare(env, T0); |
1208 |
} |
1209 |
|
1210 |
void do_mtc0_status (void) |
1211 |
{ |
1212 |
uint32_t val, old; |
1213 |
uint32_t mask = env->CP0_Status_rw_bitmask; |
1214 |
|
1215 |
val = T0 & mask; |
1216 |
old = env->CP0_Status; |
1217 |
env->CP0_Status = (env->CP0_Status & ~mask) | val; |
1218 |
compute_hflags(env); |
1219 |
if (loglevel & CPU_LOG_EXEC)
|
1220 |
do_mtc0_status_debug(old, val); |
1221 |
cpu_mips_update_irq(env); |
1222 |
} |
1223 |
|
1224 |
void do_mttc0_status(void) |
1225 |
{ |
1226 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
1227 |
uint32_t tcstatus = env->CP0_TCStatus[other_tc]; |
1228 |
|
1229 |
env->CP0_Status = T0 & ~0xf1000018;
|
1230 |
tcstatus = (tcstatus & ~(0xf << CP0TCSt_TCU0)) | (T0 & (0xf << CP0St_CU0)); |
1231 |
tcstatus = (tcstatus & ~(1 << CP0TCSt_TMX)) | ((T0 & (1 << CP0St_MX)) << (CP0TCSt_TMX - CP0St_MX)); |
1232 |
tcstatus = (tcstatus & ~(0x3 << CP0TCSt_TKSU)) | ((T0 & (0x3 << CP0St_KSU)) << (CP0TCSt_TKSU - CP0St_KSU)); |
1233 |
env->CP0_TCStatus[other_tc] = tcstatus; |
1234 |
} |
1235 |
|
1236 |
void do_mtc0_intctl (void) |
1237 |
{ |
1238 |
/* vectored interrupts not implemented, no performance counters. */
|
1239 |
env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000002e0) | (T0 & 0x000002e0); |
1240 |
} |
1241 |
|
1242 |
void do_mtc0_srsctl (void) |
1243 |
{ |
1244 |
uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS); |
1245 |
env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (T0 & mask); |
1246 |
} |
1247 |
|
1248 |
void do_mtc0_cause (void) |
1249 |
{ |
1250 |
uint32_t mask = 0x00C00300;
|
1251 |
uint32_t old = env->CP0_Cause; |
1252 |
|
1253 |
if (env->insn_flags & ISA_MIPS32R2)
|
1254 |
mask |= 1 << CP0Ca_DC;
|
1255 |
|
1256 |
env->CP0_Cause = (env->CP0_Cause & ~mask) | (T0 & mask); |
1257 |
|
1258 |
if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) { |
1259 |
if (env->CP0_Cause & (1 << CP0Ca_DC)) |
1260 |
cpu_mips_stop_count(env); |
1261 |
else
|
1262 |
cpu_mips_start_count(env); |
1263 |
} |
1264 |
|
1265 |
/* Handle the software interrupt as an hardware one, as they
|
1266 |
are very similar */
|
1267 |
if (T0 & CP0Ca_IP_mask) {
|
1268 |
cpu_mips_update_irq(env); |
1269 |
} |
1270 |
} |
1271 |
|
1272 |
void do_mtc0_ebase (void) |
1273 |
{ |
1274 |
/* vectored interrupts not implemented */
|
1275 |
/* Multi-CPU not implemented */
|
1276 |
env->CP0_EBase = 0x80000000 | (T0 & 0x3FFFF000); |
1277 |
} |
1278 |
|
1279 |
void do_mtc0_config0 (void) |
1280 |
{ |
1281 |
env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (T0 & 0x00000007); |
1282 |
} |
1283 |
|
1284 |
void do_mtc0_config2 (void) |
1285 |
{ |
1286 |
/* tertiary/secondary caches not implemented */
|
1287 |
env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
|
1288 |
} |
1289 |
|
1290 |
void do_mtc0_watchlo (uint32_t sel)
|
1291 |
{ |
1292 |
/* Watch exceptions for instructions, data loads, data stores
|
1293 |
not implemented. */
|
1294 |
env->CP0_WatchLo[sel] = (T0 & ~0x7);
|
1295 |
} |
1296 |
|
1297 |
void do_mtc0_watchhi (uint32_t sel)
|
1298 |
{ |
1299 |
env->CP0_WatchHi[sel] = (T0 & 0x40FF0FF8);
|
1300 |
env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & T0 & 0x7);
|
1301 |
} |
1302 |
|
1303 |
void do_mtc0_xcontext (void) |
1304 |
{ |
1305 |
target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1; |
1306 |
env->CP0_XContext = (env->CP0_XContext & mask) | (T0 & ~mask); |
1307 |
} |
1308 |
|
1309 |
void do_mtc0_framemask (void) |
1310 |
{ |
1311 |
env->CP0_Framemask = T0; /* XXX */
|
1312 |
} |
1313 |
|
1314 |
void do_mtc0_debug (void) |
1315 |
{ |
1316 |
env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (T0 & 0x13300120); |
1317 |
if (T0 & (1 << CP0DB_DM)) |
1318 |
env->hflags |= MIPS_HFLAG_DM; |
1319 |
else
|
1320 |
env->hflags &= ~MIPS_HFLAG_DM; |
1321 |
} |
1322 |
|
1323 |
void do_mttc0_debug(void) |
1324 |
{ |
1325 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
1326 |
|
1327 |
/* XXX: Might be wrong, check with EJTAG spec. */
|
1328 |
env->CP0_Debug_tcstatus[other_tc] = T0 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)); |
1329 |
env->CP0_Debug = (env->CP0_Debug & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) | |
1330 |
(T0 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))); |
1331 |
} |
1332 |
|
1333 |
void do_mtc0_performance0 (void) |
1334 |
{ |
1335 |
env->CP0_Performance0 = T0 & 0x000007ff;
|
1336 |
} |
1337 |
|
1338 |
void do_mtc0_taglo (void) |
1339 |
{ |
1340 |
env->CP0_TagLo = T0 & 0xFFFFFCF6;
|
1341 |
} |
1342 |
|
1343 |
void do_mtc0_datalo (void) |
1344 |
{ |
1345 |
env->CP0_DataLo = T0; /* XXX */
|
1346 |
} |
1347 |
|
1348 |
void do_mtc0_taghi (void) |
1349 |
{ |
1350 |
env->CP0_TagHi = T0; /* XXX */
|
1351 |
} |
1352 |
|
1353 |
void do_mtc0_datahi (void) |
1354 |
{ |
1355 |
env->CP0_DataHi = T0; /* XXX */
|
1356 |
} |
1357 |
|
1358 |
void do_mtc0_status_debug(uint32_t old, uint32_t val)
|
1359 |
{ |
1360 |
fprintf(logfile, "Status %08x (%08x) => %08x (%08x) Cause %08x",
|
1361 |
old, old & env->CP0_Cause & CP0Ca_IP_mask, |
1362 |
val, val & env->CP0_Cause & CP0Ca_IP_mask, |
1363 |
env->CP0_Cause); |
1364 |
switch (env->hflags & MIPS_HFLAG_KSU) {
|
1365 |
case MIPS_HFLAG_UM: fputs(", UM\n", logfile); break; |
1366 |
case MIPS_HFLAG_SM: fputs(", SM\n", logfile); break; |
1367 |
case MIPS_HFLAG_KM: fputs("\n", logfile); break; |
1368 |
default: cpu_abort(env, "Invalid MMU mode!\n"); break; |
1369 |
} |
1370 |
} |
1371 |
|
1372 |
void do_mtc0_status_irqraise_debug(void) |
1373 |
{ |
1374 |
fprintf(logfile, "Raise pending IRQs\n");
|
1375 |
} |
1376 |
#endif /* !CONFIG_USER_ONLY */ |
1377 |
|
1378 |
/* MIPS MT functions */
|
1379 |
void do_mftgpr(uint32_t sel)
|
1380 |
{ |
1381 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
1382 |
|
1383 |
T0 = env->gpr[other_tc][sel]; |
1384 |
} |
1385 |
|
1386 |
void do_mftlo(uint32_t sel)
|
1387 |
{ |
1388 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
1389 |
|
1390 |
T0 = env->LO[other_tc][sel]; |
1391 |
} |
1392 |
|
1393 |
void do_mfthi(uint32_t sel)
|
1394 |
{ |
1395 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
1396 |
|
1397 |
T0 = env->HI[other_tc][sel]; |
1398 |
} |
1399 |
|
1400 |
void do_mftacx(uint32_t sel)
|
1401 |
{ |
1402 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
1403 |
|
1404 |
T0 = env->ACX[other_tc][sel]; |
1405 |
} |
1406 |
|
1407 |
void do_mftdsp(void) |
1408 |
{ |
1409 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
1410 |
|
1411 |
T0 = env->DSPControl[other_tc]; |
1412 |
} |
1413 |
|
1414 |
void do_mttgpr(uint32_t sel)
|
1415 |
{ |
1416 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
1417 |
|
1418 |
T0 = env->gpr[other_tc][sel]; |
1419 |
} |
1420 |
|
1421 |
void do_mttlo(uint32_t sel)
|
1422 |
{ |
1423 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
1424 |
|
1425 |
T0 = env->LO[other_tc][sel]; |
1426 |
} |
1427 |
|
1428 |
void do_mtthi(uint32_t sel)
|
1429 |
{ |
1430 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
1431 |
|
1432 |
T0 = env->HI[other_tc][sel]; |
1433 |
} |
1434 |
|
1435 |
void do_mttacx(uint32_t sel)
|
1436 |
{ |
1437 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
1438 |
|
1439 |
T0 = env->ACX[other_tc][sel]; |
1440 |
} |
1441 |
|
1442 |
void do_mttdsp(void) |
1443 |
{ |
1444 |
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); |
1445 |
|
1446 |
T0 = env->DSPControl[other_tc]; |
1447 |
} |
1448 |
|
1449 |
/* MIPS MT functions */
|
1450 |
void do_dmt(void) |
1451 |
{ |
1452 |
// TODO
|
1453 |
T0 = 0;
|
1454 |
// rt = T0
|
1455 |
} |
1456 |
|
1457 |
void do_emt(void) |
1458 |
{ |
1459 |
// TODO
|
1460 |
T0 = 0;
|
1461 |
// rt = T0
|
1462 |
} |
1463 |
|
1464 |
void do_dvpe(void) |
1465 |
{ |
1466 |
// TODO
|
1467 |
T0 = 0;
|
1468 |
// rt = T0
|
1469 |
} |
1470 |
|
1471 |
void do_evpe(void) |
1472 |
{ |
1473 |
// TODO
|
1474 |
T0 = 0;
|
1475 |
// rt = T0
|
1476 |
} |
1477 |
|
1478 |
void do_fork(void) |
1479 |
{ |
1480 |
// T0 = rt, T1 = rs
|
1481 |
T0 = 0;
|
1482 |
// TODO: store to TC register
|
1483 |
} |
1484 |
|
1485 |
void do_yield(void) |
1486 |
{ |
1487 |
if (T0 < 0) { |
1488 |
/* No scheduling policy implemented. */
|
1489 |
if (T0 != -2) { |
1490 |
if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) && |
1491 |
env->CP0_TCStatus[env->current_tc] & (1 << CP0TCSt_DT)) {
|
1492 |
env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
|
1493 |
env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
|
1494 |
do_raise_exception(EXCP_THREAD); |
1495 |
} |
1496 |
} |
1497 |
} else if (T0 == 0) { |
1498 |
if (0 /* TODO: TC underflow */) { |
1499 |
env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
|
1500 |
do_raise_exception(EXCP_THREAD); |
1501 |
} else {
|
1502 |
// TODO: Deallocate TC
|
1503 |
} |
1504 |
} else if (T0 > 0) { |
1505 |
/* Yield qualifier inputs not implemented. */
|
1506 |
env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
|
1507 |
env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
|
1508 |
do_raise_exception(EXCP_THREAD); |
1509 |
} |
1510 |
T0 = env->CP0_YQMask; |
1511 |
} |
1512 |
|
1513 |
/* CP1 functions */
|
1514 |
void fpu_handle_exception(void) |
1515 |
{ |
1516 |
#ifdef CONFIG_SOFTFLOAT
|
1517 |
int flags = get_float_exception_flags(&env->fpu->fp_status);
|
1518 |
unsigned int cpuflags = 0, enable, cause = 0; |
1519 |
|
1520 |
enable = GET_FP_ENABLE(env->fpu->fcr31); |
1521 |
|
1522 |
/* determine current flags */
|
1523 |
if (flags & float_flag_invalid) {
|
1524 |
cpuflags |= FP_INVALID; |
1525 |
cause |= FP_INVALID & enable; |
1526 |
} |
1527 |
if (flags & float_flag_divbyzero) {
|
1528 |
cpuflags |= FP_DIV0; |
1529 |
cause |= FP_DIV0 & enable; |
1530 |
} |
1531 |
if (flags & float_flag_overflow) {
|
1532 |
cpuflags |= FP_OVERFLOW; |
1533 |
cause |= FP_OVERFLOW & enable; |
1534 |
} |
1535 |
if (flags & float_flag_underflow) {
|
1536 |
cpuflags |= FP_UNDERFLOW; |
1537 |
cause |= FP_UNDERFLOW & enable; |
1538 |
} |
1539 |
if (flags & float_flag_inexact) {
|
1540 |
cpuflags |= FP_INEXACT; |
1541 |
cause |= FP_INEXACT & enable; |
1542 |
} |
1543 |
SET_FP_FLAGS(env->fpu->fcr31, cpuflags); |
1544 |
SET_FP_CAUSE(env->fpu->fcr31, cause); |
1545 |
#else
|
1546 |
SET_FP_FLAGS(env->fpu->fcr31, 0);
|
1547 |
SET_FP_CAUSE(env->fpu->fcr31, 0);
|
1548 |
#endif
|
1549 |
} |
1550 |
|
1551 |
#ifndef CONFIG_USER_ONLY
|
1552 |
/* TLB management */
|
1553 |
void cpu_mips_tlb_flush (CPUState *env, int flush_global) |
1554 |
{ |
1555 |
/* Flush qemu's TLB and discard all shadowed entries. */
|
1556 |
tlb_flush (env, flush_global); |
1557 |
env->tlb->tlb_in_use = env->tlb->nb_tlb; |
1558 |
} |
1559 |
|
1560 |
static void r4k_mips_tlb_flush_extra (CPUState *env, int first) |
1561 |
{ |
1562 |
/* Discard entries from env->tlb[first] onwards. */
|
1563 |
while (env->tlb->tlb_in_use > first) {
|
1564 |
r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
|
1565 |
} |
1566 |
} |
1567 |
|
1568 |
static void r4k_fill_tlb (int idx) |
1569 |
{ |
1570 |
r4k_tlb_t *tlb; |
1571 |
|
1572 |
/* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
|
1573 |
tlb = &env->tlb->mmu.r4k.tlb[idx]; |
1574 |
tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
|
1575 |
#if defined(TARGET_MIPS64)
|
1576 |
tlb->VPN &= env->SEGMask; |
1577 |
#endif
|
1578 |
tlb->ASID = env->CP0_EntryHi & 0xFF;
|
1579 |
tlb->PageMask = env->CP0_PageMask; |
1580 |
tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
|
1581 |
tlb->V0 = (env->CP0_EntryLo0 & 2) != 0; |
1582 |
tlb->D0 = (env->CP0_EntryLo0 & 4) != 0; |
1583 |
tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7; |
1584 |
tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12; |
1585 |
tlb->V1 = (env->CP0_EntryLo1 & 2) != 0; |
1586 |
tlb->D1 = (env->CP0_EntryLo1 & 4) != 0; |
1587 |
tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7; |
1588 |
tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12; |
1589 |
} |
1590 |
|
1591 |
void r4k_do_tlbwi (void) |
1592 |
{ |
1593 |
/* Discard cached TLB entries. We could avoid doing this if the
|
1594 |
tlbwi is just upgrading access permissions on the current entry;
|
1595 |
that might be a further win. */
|
1596 |
r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb); |
1597 |
|
1598 |
r4k_invalidate_tlb(env, env->CP0_Index % env->tlb->nb_tlb, 0);
|
1599 |
r4k_fill_tlb(env->CP0_Index % env->tlb->nb_tlb); |
1600 |
} |
1601 |
|
1602 |
void r4k_do_tlbwr (void) |
1603 |
{ |
1604 |
int r = cpu_mips_get_random(env);
|
1605 |
|
1606 |
r4k_invalidate_tlb(env, r, 1);
|
1607 |
r4k_fill_tlb(r); |
1608 |
} |
1609 |
|
1610 |
void r4k_do_tlbp (void) |
1611 |
{ |
1612 |
r4k_tlb_t *tlb; |
1613 |
target_ulong mask; |
1614 |
target_ulong tag; |
1615 |
target_ulong VPN; |
1616 |
uint8_t ASID; |
1617 |
int i;
|
1618 |
|
1619 |
ASID = env->CP0_EntryHi & 0xFF;
|
1620 |
for (i = 0; i < env->tlb->nb_tlb; i++) { |
1621 |
tlb = &env->tlb->mmu.r4k.tlb[i]; |
1622 |
/* 1k pages are not supported. */
|
1623 |
mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
|
1624 |
tag = env->CP0_EntryHi & ~mask; |
1625 |
VPN = tlb->VPN & ~mask; |
1626 |
/* Check ASID, virtual page number & size */
|
1627 |
if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) { |
1628 |
/* TLB match */
|
1629 |
env->CP0_Index = i; |
1630 |
break;
|
1631 |
} |
1632 |
} |
1633 |
if (i == env->tlb->nb_tlb) {
|
1634 |
/* No match. Discard any shadow entries, if any of them match. */
|
1635 |
for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
|
1636 |
tlb = &env->tlb->mmu.r4k.tlb[i]; |
1637 |
/* 1k pages are not supported. */
|
1638 |
mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
|
1639 |
tag = env->CP0_EntryHi & ~mask; |
1640 |
VPN = tlb->VPN & ~mask; |
1641 |
/* Check ASID, virtual page number & size */
|
1642 |
if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) { |
1643 |
r4k_mips_tlb_flush_extra (env, i); |
1644 |
break;
|
1645 |
} |
1646 |
} |
1647 |
|
1648 |
env->CP0_Index |= 0x80000000;
|
1649 |
} |
1650 |
} |
1651 |
|
1652 |
void r4k_do_tlbr (void) |
1653 |
{ |
1654 |
r4k_tlb_t *tlb; |
1655 |
uint8_t ASID; |
1656 |
|
1657 |
ASID = env->CP0_EntryHi & 0xFF;
|
1658 |
tlb = &env->tlb->mmu.r4k.tlb[env->CP0_Index % env->tlb->nb_tlb]; |
1659 |
|
1660 |
/* If this will change the current ASID, flush qemu's TLB. */
|
1661 |
if (ASID != tlb->ASID)
|
1662 |
cpu_mips_tlb_flush (env, 1);
|
1663 |
|
1664 |
r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb); |
1665 |
|
1666 |
env->CP0_EntryHi = tlb->VPN | tlb->ASID; |
1667 |
env->CP0_PageMask = tlb->PageMask; |
1668 |
env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) | |
1669 |
(tlb->C0 << 3) | (tlb->PFN[0] >> 6); |
1670 |
env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) | |
1671 |
(tlb->C1 << 3) | (tlb->PFN[1] >> 6); |
1672 |
} |
1673 |
|
1674 |
#endif /* !CONFIG_USER_ONLY */ |
1675 |
|
1676 |
void dump_ldst (const unsigned char *func) |
1677 |
{ |
1678 |
if (loglevel)
|
1679 |
fprintf(logfile, "%s => " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, T0, T1); |
1680 |
} |
1681 |
|
1682 |
void dump_sc (void) |
1683 |
{ |
1684 |
if (loglevel) {
|
1685 |
fprintf(logfile, "%s " TARGET_FMT_lx " at " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", __func__, |
1686 |
T1, T0, env->CP0_LLAddr); |
1687 |
} |
1688 |
} |
1689 |
|
1690 |
/* Specials */
|
1691 |
void do_di (void) |
1692 |
{ |
1693 |
T0 = env->CP0_Status; |
1694 |
env->CP0_Status = T0 & ~(1 << CP0St_IE);
|
1695 |
cpu_mips_update_irq(env); |
1696 |
} |
1697 |
|
1698 |
void do_ei (void) |
1699 |
{ |
1700 |
T0 = env->CP0_Status; |
1701 |
env->CP0_Status = T0 | (1 << CP0St_IE);
|
1702 |
cpu_mips_update_irq(env); |
1703 |
} |
1704 |
|
1705 |
void debug_pre_eret (void) |
1706 |
{ |
1707 |
fprintf(logfile, "ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx, |
1708 |
env->PC[env->current_tc], env->CP0_EPC); |
1709 |
if (env->CP0_Status & (1 << CP0St_ERL)) |
1710 |
fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
|
1711 |
if (env->hflags & MIPS_HFLAG_DM)
|
1712 |
fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
|
1713 |
fputs("\n", logfile);
|
1714 |
} |
1715 |
|
1716 |
void debug_post_eret (void) |
1717 |
{ |
1718 |
fprintf(logfile, " => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx, |
1719 |
env->PC[env->current_tc], env->CP0_EPC); |
1720 |
if (env->CP0_Status & (1 << CP0St_ERL)) |
1721 |
fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
|
1722 |
if (env->hflags & MIPS_HFLAG_DM)
|
1723 |
fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
|
1724 |
switch (env->hflags & MIPS_HFLAG_KSU) {
|
1725 |
case MIPS_HFLAG_UM: fputs(", UM\n", logfile); break; |
1726 |
case MIPS_HFLAG_SM: fputs(", SM\n", logfile); break; |
1727 |
case MIPS_HFLAG_KM: fputs("\n", logfile); break; |
1728 |
default: cpu_abort(env, "Invalid MMU mode!\n"); break; |
1729 |
} |
1730 |
} |
1731 |
|
1732 |
void do_eret (void) |
1733 |
{ |
1734 |
if (loglevel & CPU_LOG_EXEC)
|
1735 |
debug_pre_eret(); |
1736 |
if (env->CP0_Status & (1 << CP0St_ERL)) { |
1737 |
env->PC[env->current_tc] = env->CP0_ErrorEPC; |
1738 |
env->CP0_Status &= ~(1 << CP0St_ERL);
|
1739 |
} else {
|
1740 |
env->PC[env->current_tc] = env->CP0_EPC; |
1741 |
env->CP0_Status &= ~(1 << CP0St_EXL);
|
1742 |
} |
1743 |
compute_hflags(env); |
1744 |
if (loglevel & CPU_LOG_EXEC)
|
1745 |
debug_post_eret(); |
1746 |
env->CP0_LLAddr = 1;
|
1747 |
} |
1748 |
|
1749 |
void do_deret (void) |
1750 |
{ |
1751 |
if (loglevel & CPU_LOG_EXEC)
|
1752 |
debug_pre_eret(); |
1753 |
env->PC[env->current_tc] = env->CP0_DEPC; |
1754 |
env->hflags &= MIPS_HFLAG_DM; |
1755 |
compute_hflags(env); |
1756 |
if (loglevel & CPU_LOG_EXEC)
|
1757 |
debug_post_eret(); |
1758 |
env->CP0_LLAddr = 1;
|
1759 |
} |
1760 |
|
1761 |
void do_rdhwr_cpunum(void) |
1762 |
{ |
1763 |
if ((env->hflags & MIPS_HFLAG_CP0) ||
|
1764 |
(env->CP0_HWREna & (1 << 0))) |
1765 |
T0 = env->CP0_EBase & 0x3ff;
|
1766 |
else
|
1767 |
do_raise_exception(EXCP_RI); |
1768 |
} |
1769 |
|
1770 |
void do_rdhwr_synci_step(void) |
1771 |
{ |
1772 |
if ((env->hflags & MIPS_HFLAG_CP0) ||
|
1773 |
(env->CP0_HWREna & (1 << 1))) |
1774 |
T0 = env->SYNCI_Step; |
1775 |
else
|
1776 |
do_raise_exception(EXCP_RI); |
1777 |
} |
1778 |
|
1779 |
void do_rdhwr_cc(void) |
1780 |
{ |
1781 |
if ((env->hflags & MIPS_HFLAG_CP0) ||
|
1782 |
(env->CP0_HWREna & (1 << 2))) |
1783 |
T0 = env->CP0_Count; |
1784 |
else
|
1785 |
do_raise_exception(EXCP_RI); |
1786 |
} |
1787 |
|
1788 |
void do_rdhwr_ccres(void) |
1789 |
{ |
1790 |
if ((env->hflags & MIPS_HFLAG_CP0) ||
|
1791 |
(env->CP0_HWREna & (1 << 3))) |
1792 |
T0 = env->CCRes; |
1793 |
else
|
1794 |
do_raise_exception(EXCP_RI); |
1795 |
} |
1796 |
|
1797 |
/* Bitfield operations. */
|
1798 |
void do_ext(uint32_t pos, uint32_t size)
|
1799 |
{ |
1800 |
T0 = (int32_t)((T1 >> pos) & ((size < 32) ? ((1 << size) - 1) : ~0)); |
1801 |
} |
1802 |
|
1803 |
void do_ins(uint32_t pos, uint32_t size)
|
1804 |
{ |
1805 |
target_ulong mask = ((size < 32) ? ((1 << size) - 1) : ~0) << pos; |
1806 |
|
1807 |
T0 = (int32_t)((T0 & ~mask) | ((T1 << pos) & mask)); |
1808 |
} |
1809 |
|
1810 |
void do_wsbh(void) |
1811 |
{ |
1812 |
T0 = (int32_t)(((T1 << 8) & ~0x00FF00FF) | ((T1 >> 8) & 0x00FF00FF)); |
1813 |
} |
1814 |
|
1815 |
#if defined(TARGET_MIPS64)
|
1816 |
void do_dext(uint32_t pos, uint32_t size)
|
1817 |
{ |
1818 |
T0 = (T1 >> pos) & ((size < 64) ? ((1ULL << size) - 1) : ~0ULL); |
1819 |
} |
1820 |
|
1821 |
void do_dins(uint32_t pos, uint32_t size)
|
1822 |
{ |
1823 |
target_ulong mask = ((size < 64) ? ((1ULL << size) - 1) : ~0ULL) << pos; |
1824 |
|
1825 |
T0 = (T0 & ~mask) | ((T1 << pos) & mask); |
1826 |
} |
1827 |
|
1828 |
void do_dsbh(void) |
1829 |
{ |
1830 |
T0 = ((T1 << 8) & ~0x00FF00FF00FF00FFULL) | ((T1 >> 8) & 0x00FF00FF00FF00FFULL); |
1831 |
} |
1832 |
|
1833 |
void do_dshd(void) |
1834 |
{ |
1835 |
T1 = ((T1 << 16) & ~0x0000FFFF0000FFFFULL) | ((T1 >> 16) & 0x0000FFFF0000FFFFULL); |
1836 |
T0 = (T1 << 32) | (T1 >> 32); |
1837 |
} |
1838 |
#endif
|
1839 |
|
1840 |
void do_pmon (int function) |
1841 |
{ |
1842 |
function /= 2;
|
1843 |
switch (function) {
|
1844 |
case 2: /* TODO: char inbyte(int waitflag); */ |
1845 |
if (env->gpr[env->current_tc][4] == 0) |
1846 |
env->gpr[env->current_tc][2] = -1; |
1847 |
/* Fall through */
|
1848 |
case 11: /* TODO: char inbyte (void); */ |
1849 |
env->gpr[env->current_tc][2] = -1; |
1850 |
break;
|
1851 |
case 3: |
1852 |
case 12: |
1853 |
printf("%c", (char)(env->gpr[env->current_tc][4] & 0xFF)); |
1854 |
break;
|
1855 |
case 17: |
1856 |
break;
|
1857 |
case 158: |
1858 |
{ |
1859 |
unsigned char *fmt = (void *)(unsigned long)env->gpr[env->current_tc][4]; |
1860 |
printf("%s", fmt);
|
1861 |
} |
1862 |
break;
|
1863 |
} |
1864 |
} |
1865 |
|
1866 |
void do_wait (void) |
1867 |
{ |
1868 |
env->halted = 1;
|
1869 |
do_raise_exception(EXCP_HLT); |
1870 |
} |
1871 |
|
1872 |
#if !defined(CONFIG_USER_ONLY)
|
1873 |
|
1874 |
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr); |
1875 |
|
1876 |
#define MMUSUFFIX _mmu
|
1877 |
#define ALIGNED_ONLY
|
1878 |
|
1879 |
#define SHIFT 0 |
1880 |
#include "softmmu_template.h" |
1881 |
|
1882 |
#define SHIFT 1 |
1883 |
#include "softmmu_template.h" |
1884 |
|
1885 |
#define SHIFT 2 |
1886 |
#include "softmmu_template.h" |
1887 |
|
1888 |
#define SHIFT 3 |
1889 |
#include "softmmu_template.h" |
1890 |
|
1891 |
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr) |
1892 |
{ |
1893 |
env->CP0_BadVAddr = addr; |
1894 |
do_restore_state (retaddr); |
1895 |
do_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
|
1896 |
} |
1897 |
|
1898 |
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr) |
1899 |
{ |
1900 |
TranslationBlock *tb; |
1901 |
CPUState *saved_env; |
1902 |
unsigned long pc; |
1903 |
int ret;
|
1904 |
|
1905 |
/* XXX: hack to restore env in all cases, even if not called from
|
1906 |
generated code */
|
1907 |
saved_env = env; |
1908 |
env = cpu_single_env; |
1909 |
ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
|
1910 |
if (ret) {
|
1911 |
if (retaddr) {
|
1912 |
/* now we have a real cpu fault */
|
1913 |
pc = (unsigned long)retaddr; |
1914 |
tb = tb_find_pc(pc); |
1915 |
if (tb) {
|
1916 |
/* the PC is inside the translated code. It means that we have
|
1917 |
a virtual CPU fault */
|
1918 |
cpu_restore_state(tb, env, pc, NULL);
|
1919 |
} |
1920 |
} |
1921 |
do_raise_exception_err(env->exception_index, env->error_code); |
1922 |
} |
1923 |
env = saved_env; |
1924 |
} |
1925 |
|
1926 |
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec, |
1927 |
int unused)
|
1928 |
{ |
1929 |
if (is_exec)
|
1930 |
do_raise_exception(EXCP_IBE); |
1931 |
else
|
1932 |
do_raise_exception(EXCP_DBE); |
1933 |
} |
1934 |
#endif /* !CONFIG_USER_ONLY */ |
1935 |
|
1936 |
/* Complex FPU operations which may need stack space. */
|
1937 |
|
1938 |
#define FLOAT_ONE32 make_float32(0x3f8 << 20) |
1939 |
#define FLOAT_ONE64 make_float64(0x3ffULL << 52) |
1940 |
#define FLOAT_TWO32 make_float32(1 << 30) |
1941 |
#define FLOAT_TWO64 make_float64(1ULL << 62) |
1942 |
#define FLOAT_QNAN32 0x7fbfffff |
1943 |
#define FLOAT_QNAN64 0x7ff7ffffffffffffULL |
1944 |
#define FLOAT_SNAN32 0x7fffffff |
1945 |
#define FLOAT_SNAN64 0x7fffffffffffffffULL |
1946 |
|
1947 |
/* convert MIPS rounding mode in FCR31 to IEEE library */
|
1948 |
unsigned int ieee_rm[] = { |
1949 |
float_round_nearest_even, |
1950 |
float_round_to_zero, |
1951 |
float_round_up, |
1952 |
float_round_down |
1953 |
}; |
1954 |
|
1955 |
#define RESTORE_ROUNDING_MODE \
|
1956 |
set_float_rounding_mode(ieee_rm[env->fpu->fcr31 & 3], &env->fpu->fp_status)
|
1957 |
|
1958 |
void do_cfc1 (uint32_t reg)
|
1959 |
{ |
1960 |
switch (reg) {
|
1961 |
case 0: |
1962 |
T0 = (int32_t)env->fpu->fcr0; |
1963 |
break;
|
1964 |
case 25: |
1965 |
T0 = ((env->fpu->fcr31 >> 24) & 0xfe) | ((env->fpu->fcr31 >> 23) & 0x1); |
1966 |
break;
|
1967 |
case 26: |
1968 |
T0 = env->fpu->fcr31 & 0x0003f07c;
|
1969 |
break;
|
1970 |
case 28: |
1971 |
T0 = (env->fpu->fcr31 & 0x00000f83) | ((env->fpu->fcr31 >> 22) & 0x4); |
1972 |
break;
|
1973 |
default:
|
1974 |
T0 = (int32_t)env->fpu->fcr31; |
1975 |
break;
|
1976 |
} |
1977 |
} |
1978 |
|
1979 |
void do_ctc1 (uint32_t reg)
|
1980 |
{ |
1981 |
switch(reg) {
|
1982 |
case 25: |
1983 |
if (T0 & 0xffffff00) |
1984 |
return;
|
1985 |
env->fpu->fcr31 = (env->fpu->fcr31 & 0x017fffff) | ((T0 & 0xfe) << 24) | |
1986 |
((T0 & 0x1) << 23); |
1987 |
break;
|
1988 |
case 26: |
1989 |
if (T0 & 0x007c0000) |
1990 |
return;
|
1991 |
env->fpu->fcr31 = (env->fpu->fcr31 & 0xfffc0f83) | (T0 & 0x0003f07c); |
1992 |
break;
|
1993 |
case 28: |
1994 |
if (T0 & 0x007c0000) |
1995 |
return;
|
1996 |
env->fpu->fcr31 = (env->fpu->fcr31 & 0xfefff07c) | (T0 & 0x00000f83) | |
1997 |
((T0 & 0x4) << 22); |
1998 |
break;
|
1999 |
case 31: |
2000 |
if (T0 & 0x007c0000) |
2001 |
return;
|
2002 |
env->fpu->fcr31 = T0; |
2003 |
break;
|
2004 |
default:
|
2005 |
return;
|
2006 |
} |
2007 |
/* set rounding mode */
|
2008 |
RESTORE_ROUNDING_MODE; |
2009 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2010 |
if ((GET_FP_ENABLE(env->fpu->fcr31) | 0x20) & GET_FP_CAUSE(env->fpu->fcr31)) |
2011 |
do_raise_exception(EXCP_FPE); |
2012 |
} |
2013 |
|
2014 |
static always_inline char ieee_ex_to_mips(char xcpt) |
2015 |
{ |
2016 |
return (xcpt & float_flag_inexact) >> 5 | |
2017 |
(xcpt & float_flag_underflow) >> 3 |
|
2018 |
(xcpt & float_flag_overflow) >> 1 |
|
2019 |
(xcpt & float_flag_divbyzero) << 1 |
|
2020 |
(xcpt & float_flag_invalid) << 4;
|
2021 |
} |
2022 |
|
2023 |
static always_inline char mips_ex_to_ieee(char xcpt) |
2024 |
{ |
2025 |
return (xcpt & FP_INEXACT) << 5 | |
2026 |
(xcpt & FP_UNDERFLOW) << 3 |
|
2027 |
(xcpt & FP_OVERFLOW) << 1 |
|
2028 |
(xcpt & FP_DIV0) >> 1 |
|
2029 |
(xcpt & FP_INVALID) >> 4;
|
2030 |
} |
2031 |
|
2032 |
static always_inline void update_fcr31(void) |
2033 |
{ |
2034 |
int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->fpu->fp_status));
|
2035 |
|
2036 |
SET_FP_CAUSE(env->fpu->fcr31, tmp); |
2037 |
if (GET_FP_ENABLE(env->fpu->fcr31) & tmp)
|
2038 |
do_raise_exception(EXCP_FPE); |
2039 |
else
|
2040 |
UPDATE_FP_FLAGS(env->fpu->fcr31, tmp); |
2041 |
} |
2042 |
|
2043 |
/* Float support.
|
2044 |
Single precition routines have a "s" suffix, double precision a
|
2045 |
"d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
|
2046 |
paired single lower "pl", paired single upper "pu". */
|
2047 |
|
2048 |
#define FLOAT_OP(name, p) void do_float_##name##_##p(void) |
2049 |
|
2050 |
/* unary operations, modifying fp status */
|
2051 |
#define FLOAT_UNOP(name) \
|
2052 |
FLOAT_OP(name, d) \ |
2053 |
{ \ |
2054 |
FDT2 = float64_ ## name(FDT0, &env->fpu->fp_status); \ |
2055 |
} \ |
2056 |
FLOAT_OP(name, s) \ |
2057 |
{ \ |
2058 |
FST2 = float32_ ## name(FST0, &env->fpu->fp_status); \ |
2059 |
} |
2060 |
FLOAT_UNOP(sqrt) |
2061 |
#undef FLOAT_UNOP
|
2062 |
|
2063 |
FLOAT_OP(cvtd, s) |
2064 |
{ |
2065 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2066 |
FDT2 = float32_to_float64(FST0, &env->fpu->fp_status); |
2067 |
update_fcr31(); |
2068 |
} |
2069 |
FLOAT_OP(cvtd, w) |
2070 |
{ |
2071 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2072 |
FDT2 = int32_to_float64(WT0, &env->fpu->fp_status); |
2073 |
update_fcr31(); |
2074 |
} |
2075 |
FLOAT_OP(cvtd, l) |
2076 |
{ |
2077 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2078 |
FDT2 = int64_to_float64(DT0, &env->fpu->fp_status); |
2079 |
update_fcr31(); |
2080 |
} |
2081 |
FLOAT_OP(cvtl, d) |
2082 |
{ |
2083 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2084 |
DT2 = float64_to_int64(FDT0, &env->fpu->fp_status); |
2085 |
update_fcr31(); |
2086 |
if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
|
2087 |
DT2 = FLOAT_SNAN64; |
2088 |
} |
2089 |
FLOAT_OP(cvtl, s) |
2090 |
{ |
2091 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2092 |
DT2 = float32_to_int64(FST0, &env->fpu->fp_status); |
2093 |
update_fcr31(); |
2094 |
if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
|
2095 |
DT2 = FLOAT_SNAN64; |
2096 |
} |
2097 |
|
2098 |
FLOAT_OP(cvtps, pw) |
2099 |
{ |
2100 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2101 |
FST2 = int32_to_float32(WT0, &env->fpu->fp_status); |
2102 |
FSTH2 = int32_to_float32(WTH0, &env->fpu->fp_status); |
2103 |
update_fcr31(); |
2104 |
} |
2105 |
FLOAT_OP(cvtpw, ps) |
2106 |
{ |
2107 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2108 |
WT2 = float32_to_int32(FST0, &env->fpu->fp_status); |
2109 |
WTH2 = float32_to_int32(FSTH0, &env->fpu->fp_status); |
2110 |
update_fcr31(); |
2111 |
if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
|
2112 |
WT2 = FLOAT_SNAN32; |
2113 |
} |
2114 |
FLOAT_OP(cvts, d) |
2115 |
{ |
2116 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2117 |
FST2 = float64_to_float32(FDT0, &env->fpu->fp_status); |
2118 |
update_fcr31(); |
2119 |
} |
2120 |
FLOAT_OP(cvts, w) |
2121 |
{ |
2122 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2123 |
FST2 = int32_to_float32(WT0, &env->fpu->fp_status); |
2124 |
update_fcr31(); |
2125 |
} |
2126 |
FLOAT_OP(cvts, l) |
2127 |
{ |
2128 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2129 |
FST2 = int64_to_float32(DT0, &env->fpu->fp_status); |
2130 |
update_fcr31(); |
2131 |
} |
2132 |
FLOAT_OP(cvts, pl) |
2133 |
{ |
2134 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2135 |
WT2 = WT0; |
2136 |
update_fcr31(); |
2137 |
} |
2138 |
FLOAT_OP(cvts, pu) |
2139 |
{ |
2140 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2141 |
WT2 = WTH0; |
2142 |
update_fcr31(); |
2143 |
} |
2144 |
FLOAT_OP(cvtw, s) |
2145 |
{ |
2146 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2147 |
WT2 = float32_to_int32(FST0, &env->fpu->fp_status); |
2148 |
update_fcr31(); |
2149 |
if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
|
2150 |
WT2 = FLOAT_SNAN32; |
2151 |
} |
2152 |
FLOAT_OP(cvtw, d) |
2153 |
{ |
2154 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2155 |
WT2 = float64_to_int32(FDT0, &env->fpu->fp_status); |
2156 |
update_fcr31(); |
2157 |
if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
|
2158 |
WT2 = FLOAT_SNAN32; |
2159 |
} |
2160 |
|
2161 |
FLOAT_OP(roundl, d) |
2162 |
{ |
2163 |
set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status); |
2164 |
DT2 = float64_to_int64(FDT0, &env->fpu->fp_status); |
2165 |
RESTORE_ROUNDING_MODE; |
2166 |
update_fcr31(); |
2167 |
if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
|
2168 |
DT2 = FLOAT_SNAN64; |
2169 |
} |
2170 |
FLOAT_OP(roundl, s) |
2171 |
{ |
2172 |
set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status); |
2173 |
DT2 = float32_to_int64(FST0, &env->fpu->fp_status); |
2174 |
RESTORE_ROUNDING_MODE; |
2175 |
update_fcr31(); |
2176 |
if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
|
2177 |
DT2 = FLOAT_SNAN64; |
2178 |
} |
2179 |
FLOAT_OP(roundw, d) |
2180 |
{ |
2181 |
set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status); |
2182 |
WT2 = float64_to_int32(FDT0, &env->fpu->fp_status); |
2183 |
RESTORE_ROUNDING_MODE; |
2184 |
update_fcr31(); |
2185 |
if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
|
2186 |
WT2 = FLOAT_SNAN32; |
2187 |
} |
2188 |
FLOAT_OP(roundw, s) |
2189 |
{ |
2190 |
set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status); |
2191 |
WT2 = float32_to_int32(FST0, &env->fpu->fp_status); |
2192 |
RESTORE_ROUNDING_MODE; |
2193 |
update_fcr31(); |
2194 |
if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
|
2195 |
WT2 = FLOAT_SNAN32; |
2196 |
} |
2197 |
|
2198 |
FLOAT_OP(truncl, d) |
2199 |
{ |
2200 |
DT2 = float64_to_int64_round_to_zero(FDT0, &env->fpu->fp_status); |
2201 |
update_fcr31(); |
2202 |
if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
|
2203 |
DT2 = FLOAT_SNAN64; |
2204 |
} |
2205 |
FLOAT_OP(truncl, s) |
2206 |
{ |
2207 |
DT2 = float32_to_int64_round_to_zero(FST0, &env->fpu->fp_status); |
2208 |
update_fcr31(); |
2209 |
if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
|
2210 |
DT2 = FLOAT_SNAN64; |
2211 |
} |
2212 |
FLOAT_OP(truncw, d) |
2213 |
{ |
2214 |
WT2 = float64_to_int32_round_to_zero(FDT0, &env->fpu->fp_status); |
2215 |
update_fcr31(); |
2216 |
if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
|
2217 |
WT2 = FLOAT_SNAN32; |
2218 |
} |
2219 |
FLOAT_OP(truncw, s) |
2220 |
{ |
2221 |
WT2 = float32_to_int32_round_to_zero(FST0, &env->fpu->fp_status); |
2222 |
update_fcr31(); |
2223 |
if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
|
2224 |
WT2 = FLOAT_SNAN32; |
2225 |
} |
2226 |
|
2227 |
FLOAT_OP(ceill, d) |
2228 |
{ |
2229 |
set_float_rounding_mode(float_round_up, &env->fpu->fp_status); |
2230 |
DT2 = float64_to_int64(FDT0, &env->fpu->fp_status); |
2231 |
RESTORE_ROUNDING_MODE; |
2232 |
update_fcr31(); |
2233 |
if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
|
2234 |
DT2 = FLOAT_SNAN64; |
2235 |
} |
2236 |
FLOAT_OP(ceill, s) |
2237 |
{ |
2238 |
set_float_rounding_mode(float_round_up, &env->fpu->fp_status); |
2239 |
DT2 = float32_to_int64(FST0, &env->fpu->fp_status); |
2240 |
RESTORE_ROUNDING_MODE; |
2241 |
update_fcr31(); |
2242 |
if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
|
2243 |
DT2 = FLOAT_SNAN64; |
2244 |
} |
2245 |
FLOAT_OP(ceilw, d) |
2246 |
{ |
2247 |
set_float_rounding_mode(float_round_up, &env->fpu->fp_status); |
2248 |
WT2 = float64_to_int32(FDT0, &env->fpu->fp_status); |
2249 |
RESTORE_ROUNDING_MODE; |
2250 |
update_fcr31(); |
2251 |
if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
|
2252 |
WT2 = FLOAT_SNAN32; |
2253 |
} |
2254 |
FLOAT_OP(ceilw, s) |
2255 |
{ |
2256 |
set_float_rounding_mode(float_round_up, &env->fpu->fp_status); |
2257 |
WT2 = float32_to_int32(FST0, &env->fpu->fp_status); |
2258 |
RESTORE_ROUNDING_MODE; |
2259 |
update_fcr31(); |
2260 |
if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
|
2261 |
WT2 = FLOAT_SNAN32; |
2262 |
} |
2263 |
|
2264 |
FLOAT_OP(floorl, d) |
2265 |
{ |
2266 |
set_float_rounding_mode(float_round_down, &env->fpu->fp_status); |
2267 |
DT2 = float64_to_int64(FDT0, &env->fpu->fp_status); |
2268 |
RESTORE_ROUNDING_MODE; |
2269 |
update_fcr31(); |
2270 |
if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
|
2271 |
DT2 = FLOAT_SNAN64; |
2272 |
} |
2273 |
FLOAT_OP(floorl, s) |
2274 |
{ |
2275 |
set_float_rounding_mode(float_round_down, &env->fpu->fp_status); |
2276 |
DT2 = float32_to_int64(FST0, &env->fpu->fp_status); |
2277 |
RESTORE_ROUNDING_MODE; |
2278 |
update_fcr31(); |
2279 |
if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
|
2280 |
DT2 = FLOAT_SNAN64; |
2281 |
} |
2282 |
FLOAT_OP(floorw, d) |
2283 |
{ |
2284 |
set_float_rounding_mode(float_round_down, &env->fpu->fp_status); |
2285 |
WT2 = float64_to_int32(FDT0, &env->fpu->fp_status); |
2286 |
RESTORE_ROUNDING_MODE; |
2287 |
update_fcr31(); |
2288 |
if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
|
2289 |
WT2 = FLOAT_SNAN32; |
2290 |
} |
2291 |
FLOAT_OP(floorw, s) |
2292 |
{ |
2293 |
set_float_rounding_mode(float_round_down, &env->fpu->fp_status); |
2294 |
WT2 = float32_to_int32(FST0, &env->fpu->fp_status); |
2295 |
RESTORE_ROUNDING_MODE; |
2296 |
update_fcr31(); |
2297 |
if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
|
2298 |
WT2 = FLOAT_SNAN32; |
2299 |
} |
2300 |
|
2301 |
/* unary operations, not modifying fp status */
|
2302 |
#define FLOAT_UNOP(name) \
|
2303 |
FLOAT_OP(name, d) \ |
2304 |
{ \ |
2305 |
FDT2 = float64_ ## name(FDT0); \ |
2306 |
} \ |
2307 |
FLOAT_OP(name, s) \ |
2308 |
{ \ |
2309 |
FST2 = float32_ ## name(FST0); \ |
2310 |
} \ |
2311 |
FLOAT_OP(name, ps) \ |
2312 |
{ \ |
2313 |
FST2 = float32_ ## name(FST0); \ |
2314 |
FSTH2 = float32_ ## name(FSTH0); \ |
2315 |
} |
2316 |
FLOAT_UNOP(abs) |
2317 |
FLOAT_UNOP(chs) |
2318 |
#undef FLOAT_UNOP
|
2319 |
|
2320 |
/* MIPS specific unary operations */
|
2321 |
FLOAT_OP(recip, d) |
2322 |
{ |
2323 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2324 |
FDT2 = float64_div(FLOAT_ONE64, FDT0, &env->fpu->fp_status); |
2325 |
update_fcr31(); |
2326 |
} |
2327 |
FLOAT_OP(recip, s) |
2328 |
{ |
2329 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2330 |
FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status); |
2331 |
update_fcr31(); |
2332 |
} |
2333 |
|
2334 |
FLOAT_OP(rsqrt, d) |
2335 |
{ |
2336 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2337 |
FDT2 = float64_sqrt(FDT0, &env->fpu->fp_status); |
2338 |
FDT2 = float64_div(FLOAT_ONE64, FDT2, &env->fpu->fp_status); |
2339 |
update_fcr31(); |
2340 |
} |
2341 |
FLOAT_OP(rsqrt, s) |
2342 |
{ |
2343 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2344 |
FST2 = float32_sqrt(FST0, &env->fpu->fp_status); |
2345 |
FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status); |
2346 |
update_fcr31(); |
2347 |
} |
2348 |
|
2349 |
FLOAT_OP(recip1, d) |
2350 |
{ |
2351 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2352 |
FDT2 = float64_div(FLOAT_ONE64, FDT0, &env->fpu->fp_status); |
2353 |
update_fcr31(); |
2354 |
} |
2355 |
FLOAT_OP(recip1, s) |
2356 |
{ |
2357 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2358 |
FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status); |
2359 |
update_fcr31(); |
2360 |
} |
2361 |
FLOAT_OP(recip1, ps) |
2362 |
{ |
2363 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2364 |
FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status); |
2365 |
FSTH2 = float32_div(FLOAT_ONE32, FSTH0, &env->fpu->fp_status); |
2366 |
update_fcr31(); |
2367 |
} |
2368 |
|
2369 |
FLOAT_OP(rsqrt1, d) |
2370 |
{ |
2371 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2372 |
FDT2 = float64_sqrt(FDT0, &env->fpu->fp_status); |
2373 |
FDT2 = float64_div(FLOAT_ONE64, FDT2, &env->fpu->fp_status); |
2374 |
update_fcr31(); |
2375 |
} |
2376 |
FLOAT_OP(rsqrt1, s) |
2377 |
{ |
2378 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2379 |
FST2 = float32_sqrt(FST0, &env->fpu->fp_status); |
2380 |
FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status); |
2381 |
update_fcr31(); |
2382 |
} |
2383 |
FLOAT_OP(rsqrt1, ps) |
2384 |
{ |
2385 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2386 |
FST2 = float32_sqrt(FST0, &env->fpu->fp_status); |
2387 |
FSTH2 = float32_sqrt(FSTH0, &env->fpu->fp_status); |
2388 |
FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status); |
2389 |
FSTH2 = float32_div(FLOAT_ONE32, FSTH2, &env->fpu->fp_status); |
2390 |
update_fcr31(); |
2391 |
} |
2392 |
|
2393 |
/* binary operations */
|
2394 |
#define FLOAT_BINOP(name) \
|
2395 |
FLOAT_OP(name, d) \ |
2396 |
{ \ |
2397 |
set_float_exception_flags(0, &env->fpu->fp_status); \
|
2398 |
FDT2 = float64_ ## name (FDT0, FDT1, &env->fpu->fp_status); \ |
2399 |
update_fcr31(); \ |
2400 |
if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) \
|
2401 |
DT2 = FLOAT_QNAN64; \ |
2402 |
} \ |
2403 |
FLOAT_OP(name, s) \ |
2404 |
{ \ |
2405 |
set_float_exception_flags(0, &env->fpu->fp_status); \
|
2406 |
FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status); \ |
2407 |
update_fcr31(); \ |
2408 |
if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) \
|
2409 |
WT2 = FLOAT_QNAN32; \ |
2410 |
} \ |
2411 |
FLOAT_OP(name, ps) \ |
2412 |
{ \ |
2413 |
set_float_exception_flags(0, &env->fpu->fp_status); \
|
2414 |
FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status); \ |
2415 |
FSTH2 = float32_ ## name (FSTH0, FSTH1, &env->fpu->fp_status); \ |
2416 |
update_fcr31(); \ |
2417 |
if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) { \
|
2418 |
WT2 = FLOAT_QNAN32; \ |
2419 |
WTH2 = FLOAT_QNAN32; \ |
2420 |
} \ |
2421 |
} |
2422 |
FLOAT_BINOP(add) |
2423 |
FLOAT_BINOP(sub) |
2424 |
FLOAT_BINOP(mul) |
2425 |
FLOAT_BINOP(div) |
2426 |
#undef FLOAT_BINOP
|
2427 |
|
2428 |
/* ternary operations */
|
2429 |
#define FLOAT_TERNOP(name1, name2) \
|
2430 |
FLOAT_OP(name1 ## name2, d) \ |
2431 |
{ \ |
2432 |
FDT0 = float64_ ## name1 (FDT0, FDT1, &env->fpu->fp_status); \ |
2433 |
FDT2 = float64_ ## name2 (FDT0, FDT2, &env->fpu->fp_status); \ |
2434 |
} \ |
2435 |
FLOAT_OP(name1 ## name2, s) \ |
2436 |
{ \ |
2437 |
FST0 = float32_ ## name1 (FST0, FST1, &env->fpu->fp_status); \ |
2438 |
FST2 = float32_ ## name2 (FST0, FST2, &env->fpu->fp_status); \ |
2439 |
} \ |
2440 |
FLOAT_OP(name1 ## name2, ps) \ |
2441 |
{ \ |
2442 |
FST0 = float32_ ## name1 (FST0, FST1, &env->fpu->fp_status); \ |
2443 |
FSTH0 = float32_ ## name1 (FSTH0, FSTH1, &env->fpu->fp_status); \ |
2444 |
FST2 = float32_ ## name2 (FST0, FST2, &env->fpu->fp_status); \ |
2445 |
FSTH2 = float32_ ## name2 (FSTH0, FSTH2, &env->fpu->fp_status); \ |
2446 |
} |
2447 |
FLOAT_TERNOP(mul, add) |
2448 |
FLOAT_TERNOP(mul, sub) |
2449 |
#undef FLOAT_TERNOP
|
2450 |
|
2451 |
/* negated ternary operations */
|
2452 |
#define FLOAT_NTERNOP(name1, name2) \
|
2453 |
FLOAT_OP(n ## name1 ## name2, d) \ |
2454 |
{ \ |
2455 |
FDT0 = float64_ ## name1 (FDT0, FDT1, &env->fpu->fp_status); \ |
2456 |
FDT2 = float64_ ## name2 (FDT0, FDT2, &env->fpu->fp_status); \ |
2457 |
FDT2 = float64_chs(FDT2); \ |
2458 |
} \ |
2459 |
FLOAT_OP(n ## name1 ## name2, s) \ |
2460 |
{ \ |
2461 |
FST0 = float32_ ## name1 (FST0, FST1, &env->fpu->fp_status); \ |
2462 |
FST2 = float32_ ## name2 (FST0, FST2, &env->fpu->fp_status); \ |
2463 |
FST2 = float32_chs(FST2); \ |
2464 |
} \ |
2465 |
FLOAT_OP(n ## name1 ## name2, ps) \ |
2466 |
{ \ |
2467 |
FST0 = float32_ ## name1 (FST0, FST1, &env->fpu->fp_status); \ |
2468 |
FSTH0 = float32_ ## name1 (FSTH0, FSTH1, &env->fpu->fp_status); \ |
2469 |
FST2 = float32_ ## name2 (FST0, FST2, &env->fpu->fp_status); \ |
2470 |
FSTH2 = float32_ ## name2 (FSTH0, FSTH2, &env->fpu->fp_status); \ |
2471 |
FST2 = float32_chs(FST2); \ |
2472 |
FSTH2 = float32_chs(FSTH2); \ |
2473 |
} |
2474 |
FLOAT_NTERNOP(mul, add) |
2475 |
FLOAT_NTERNOP(mul, sub) |
2476 |
#undef FLOAT_NTERNOP
|
2477 |
|
2478 |
/* MIPS specific binary operations */
|
2479 |
FLOAT_OP(recip2, d) |
2480 |
{ |
2481 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2482 |
FDT2 = float64_mul(FDT0, FDT2, &env->fpu->fp_status); |
2483 |
FDT2 = float64_chs(float64_sub(FDT2, FLOAT_ONE64, &env->fpu->fp_status)); |
2484 |
update_fcr31(); |
2485 |
} |
2486 |
FLOAT_OP(recip2, s) |
2487 |
{ |
2488 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2489 |
FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status); |
2490 |
FST2 = float32_chs(float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status)); |
2491 |
update_fcr31(); |
2492 |
} |
2493 |
FLOAT_OP(recip2, ps) |
2494 |
{ |
2495 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2496 |
FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status); |
2497 |
FSTH2 = float32_mul(FSTH0, FSTH2, &env->fpu->fp_status); |
2498 |
FST2 = float32_chs(float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status)); |
2499 |
FSTH2 = float32_chs(float32_sub(FSTH2, FLOAT_ONE32, &env->fpu->fp_status)); |
2500 |
update_fcr31(); |
2501 |
} |
2502 |
|
2503 |
FLOAT_OP(rsqrt2, d) |
2504 |
{ |
2505 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2506 |
FDT2 = float64_mul(FDT0, FDT2, &env->fpu->fp_status); |
2507 |
FDT2 = float64_sub(FDT2, FLOAT_ONE64, &env->fpu->fp_status); |
2508 |
FDT2 = float64_chs(float64_div(FDT2, FLOAT_TWO64, &env->fpu->fp_status)); |
2509 |
update_fcr31(); |
2510 |
} |
2511 |
FLOAT_OP(rsqrt2, s) |
2512 |
{ |
2513 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2514 |
FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status); |
2515 |
FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status); |
2516 |
FST2 = float32_chs(float32_div(FST2, FLOAT_TWO32, &env->fpu->fp_status)); |
2517 |
update_fcr31(); |
2518 |
} |
2519 |
FLOAT_OP(rsqrt2, ps) |
2520 |
{ |
2521 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2522 |
FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status); |
2523 |
FSTH2 = float32_mul(FSTH0, FSTH2, &env->fpu->fp_status); |
2524 |
FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status); |
2525 |
FSTH2 = float32_sub(FSTH2, FLOAT_ONE32, &env->fpu->fp_status); |
2526 |
FST2 = float32_chs(float32_div(FST2, FLOAT_TWO32, &env->fpu->fp_status)); |
2527 |
FSTH2 = float32_chs(float32_div(FSTH2, FLOAT_TWO32, &env->fpu->fp_status)); |
2528 |
update_fcr31(); |
2529 |
} |
2530 |
|
2531 |
FLOAT_OP(addr, ps) |
2532 |
{ |
2533 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2534 |
FST2 = float32_add (FST0, FSTH0, &env->fpu->fp_status); |
2535 |
FSTH2 = float32_add (FST1, FSTH1, &env->fpu->fp_status); |
2536 |
update_fcr31(); |
2537 |
} |
2538 |
|
2539 |
FLOAT_OP(mulr, ps) |
2540 |
{ |
2541 |
set_float_exception_flags(0, &env->fpu->fp_status);
|
2542 |
FST2 = float32_mul (FST0, FSTH0, &env->fpu->fp_status); |
2543 |
FSTH2 = float32_mul (FST1, FSTH1, &env->fpu->fp_status); |
2544 |
update_fcr31(); |
2545 |
} |
2546 |
|
2547 |
/* compare operations */
|
2548 |
#define FOP_COND_D(op, cond) \
|
2549 |
void do_cmp_d_ ## op (long cc) \ |
2550 |
{ \ |
2551 |
int c = cond; \
|
2552 |
update_fcr31(); \ |
2553 |
if (c) \
|
2554 |
SET_FP_COND(cc, env->fpu); \ |
2555 |
else \
|
2556 |
CLEAR_FP_COND(cc, env->fpu); \ |
2557 |
} \ |
2558 |
void do_cmpabs_d_ ## op (long cc) \ |
2559 |
{ \ |
2560 |
int c; \
|
2561 |
FDT0 = float64_abs(FDT0); \ |
2562 |
FDT1 = float64_abs(FDT1); \ |
2563 |
c = cond; \ |
2564 |
update_fcr31(); \ |
2565 |
if (c) \
|
2566 |
SET_FP_COND(cc, env->fpu); \ |
2567 |
else \
|
2568 |
CLEAR_FP_COND(cc, env->fpu); \ |
2569 |
} |
2570 |
|
2571 |
int float64_is_unordered(int sig, float64 a, float64 b STATUS_PARAM) |
2572 |
{ |
2573 |
if (float64_is_signaling_nan(a) ||
|
2574 |
float64_is_signaling_nan(b) || |
2575 |
(sig && (float64_is_nan(a) || float64_is_nan(b)))) { |
2576 |
float_raise(float_flag_invalid, status); |
2577 |
return 1; |
2578 |
} else if (float64_is_nan(a) || float64_is_nan(b)) { |
2579 |
return 1; |
2580 |
} else {
|
2581 |
return 0; |
2582 |
} |
2583 |
} |
2584 |
|
2585 |
/* NOTE: the comma operator will make "cond" to eval to false,
|
2586 |
* but float*_is_unordered() is still called. */
|
2587 |
FOP_COND_D(f, (float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status), 0)) |
2588 |
FOP_COND_D(un, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status))
|
2589 |
FOP_COND_D(eq, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_eq(FDT0, FDT1, &env->fpu->fp_status))
|
2590 |
FOP_COND_D(ueq, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) || float64_eq(FDT0, FDT1, &env->fpu->fp_status))
|
2591 |
FOP_COND_D(olt, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_lt(FDT0, FDT1, &env->fpu->fp_status))
|
2592 |
FOP_COND_D(ult, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) || float64_lt(FDT0, FDT1, &env->fpu->fp_status))
|
2593 |
FOP_COND_D(ole, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_le(FDT0, FDT1, &env->fpu->fp_status))
|
2594 |
FOP_COND_D(ule, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) || float64_le(FDT0, FDT1, &env->fpu->fp_status))
|
2595 |
/* NOTE: the comma operator will make "cond" to eval to false,
|
2596 |
* but float*_is_unordered() is still called. */
|
2597 |
FOP_COND_D(sf, (float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status), 0)) |
2598 |
FOP_COND_D(ngle,float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status))
|
2599 |
FOP_COND_D(seq, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_eq(FDT0, FDT1, &env->fpu->fp_status))
|
2600 |
FOP_COND_D(ngl, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) || float64_eq(FDT0, FDT1, &env->fpu->fp_status))
|
2601 |
FOP_COND_D(lt, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_lt(FDT0, FDT1, &env->fpu->fp_status))
|
2602 |
FOP_COND_D(nge, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) || float64_lt(FDT0, FDT1, &env->fpu->fp_status))
|
2603 |
FOP_COND_D(le, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_le(FDT0, FDT1, &env->fpu->fp_status))
|
2604 |
FOP_COND_D(ngt, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) || float64_le(FDT0, FDT1, &env->fpu->fp_status))
|
2605 |
|
2606 |
#define FOP_COND_S(op, cond) \
|
2607 |
void do_cmp_s_ ## op (long cc) \ |
2608 |
{ \ |
2609 |
int c = cond; \
|
2610 |
update_fcr31(); \ |
2611 |
if (c) \
|
2612 |
SET_FP_COND(cc, env->fpu); \ |
2613 |
else \
|
2614 |
CLEAR_FP_COND(cc, env->fpu); \ |
2615 |
} \ |
2616 |
void do_cmpabs_s_ ## op (long cc) \ |
2617 |
{ \ |
2618 |
int c; \
|
2619 |
FST0 = float32_abs(FST0); \ |
2620 |
FST1 = float32_abs(FST1); \ |
2621 |
c = cond; \ |
2622 |
update_fcr31(); \ |
2623 |
if (c) \
|
2624 |
SET_FP_COND(cc, env->fpu); \ |
2625 |
else \
|
2626 |
CLEAR_FP_COND(cc, env->fpu); \ |
2627 |
} |
2628 |
|
2629 |
flag float32_is_unordered(int sig, float32 a, float32 b STATUS_PARAM)
|
2630 |
{ |
2631 |
if (float32_is_signaling_nan(a) ||
|
2632 |
float32_is_signaling_nan(b) || |
2633 |
(sig && (float32_is_nan(a) || float32_is_nan(b)))) { |
2634 |
float_raise(float_flag_invalid, status); |
2635 |
return 1; |
2636 |
} else if (float32_is_nan(a) || float32_is_nan(b)) { |
2637 |
return 1; |
2638 |
} else {
|
2639 |
return 0; |
2640 |
} |
2641 |
} |
2642 |
|
2643 |
/* NOTE: the comma operator will make "cond" to eval to false,
|
2644 |
* but float*_is_unordered() is still called. */
|
2645 |
FOP_COND_S(f, (float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status), 0)) |
2646 |
FOP_COND_S(un, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status))
|
2647 |
FOP_COND_S(eq, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status))
|
2648 |
FOP_COND_S(ueq, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status))
|
2649 |
FOP_COND_S(olt, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status))
|
2650 |
FOP_COND_S(ult, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status))
|
2651 |
FOP_COND_S(ole, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status))
|
2652 |
FOP_COND_S(ule, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status))
|
2653 |
/* NOTE: the comma operator will make "cond" to eval to false,
|
2654 |
* but float*_is_unordered() is still called. */
|
2655 |
FOP_COND_S(sf, (float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status), 0)) |
2656 |
FOP_COND_S(ngle,float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status))
|
2657 |
FOP_COND_S(seq, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status))
|
2658 |
FOP_COND_S(ngl, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status))
|
2659 |
FOP_COND_S(lt, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status))
|
2660 |
FOP_COND_S(nge, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status))
|
2661 |
FOP_COND_S(le, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status))
|
2662 |
FOP_COND_S(ngt, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status))
|
2663 |
|
2664 |
#define FOP_COND_PS(op, condl, condh) \
|
2665 |
void do_cmp_ps_ ## op (long cc) \ |
2666 |
{ \ |
2667 |
int cl = condl; \
|
2668 |
int ch = condh; \
|
2669 |
update_fcr31(); \ |
2670 |
if (cl) \
|
2671 |
SET_FP_COND(cc, env->fpu); \ |
2672 |
else \
|
2673 |
CLEAR_FP_COND(cc, env->fpu); \ |
2674 |
if (ch) \
|
2675 |
SET_FP_COND(cc + 1, env->fpu); \
|
2676 |
else \
|
2677 |
CLEAR_FP_COND(cc + 1, env->fpu); \
|
2678 |
} \ |
2679 |
void do_cmpabs_ps_ ## op (long cc) \ |
2680 |
{ \ |
2681 |
int cl, ch; \
|
2682 |
FST0 = float32_abs(FST0); \ |
2683 |
FSTH0 = float32_abs(FSTH0); \ |
2684 |
FST1 = float32_abs(FST1); \ |
2685 |
FSTH1 = float32_abs(FSTH1); \ |
2686 |
cl = condl; \ |
2687 |
ch = condh; \ |
2688 |
update_fcr31(); \ |
2689 |
if (cl) \
|
2690 |
SET_FP_COND(cc, env->fpu); \ |
2691 |
else \
|
2692 |
CLEAR_FP_COND(cc, env->fpu); \ |
2693 |
if (ch) \
|
2694 |
SET_FP_COND(cc + 1, env->fpu); \
|
2695 |
else \
|
2696 |
CLEAR_FP_COND(cc + 1, env->fpu); \
|
2697 |
} |
2698 |
|
2699 |
/* NOTE: the comma operator will make "cond" to eval to false,
|
2700 |
* but float*_is_unordered() is still called. */
|
2701 |
FOP_COND_PS(f, (float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status), 0), |
2702 |
(float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status), 0)) |
2703 |
FOP_COND_PS(un, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status),
|
2704 |
float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status))
|
2705 |
FOP_COND_PS(eq, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status),
|
2706 |
!float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
|
2707 |
FOP_COND_PS(ueq, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status),
|
2708 |
float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) || float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
|
2709 |
FOP_COND_PS(olt, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status),
|
2710 |
!float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
|
2711 |
FOP_COND_PS(ult, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status),
|
2712 |
float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) || float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
|
2713 |
FOP_COND_PS(ole, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status),
|
2714 |
!float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
|
2715 |
FOP_COND_PS(ule, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status),
|
2716 |
float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) || float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
|
2717 |
/* NOTE: the comma operator will make "cond" to eval to false,
|
2718 |
* but float*_is_unordered() is still called. */
|
2719 |
FOP_COND_PS(sf, (float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status), 0), |
2720 |
(float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status), 0)) |
2721 |
FOP_COND_PS(ngle,float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status),
|
2722 |
float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status))
|
2723 |
FOP_COND_PS(seq, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status),
|
2724 |
!float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
|
2725 |
FOP_COND_PS(ngl, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status),
|
2726 |
float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) || float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
|
2727 |
FOP_COND_PS(lt, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status),
|
2728 |
!float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
|
2729 |
FOP_COND_PS(nge, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status),
|
2730 |
float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) || float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
|
2731 |
FOP_COND_PS(le, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status),
|
2732 |
!float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
|
2733 |
FOP_COND_PS(ngt, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status),
|
2734 |
float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) || float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
|