root / target-arm / op.c @ 1497c961
History | View | Annotate | Download (26.9 kB)
1 |
/*
|
---|---|
2 |
* ARM micro operations
|
3 |
*
|
4 |
* Copyright (c) 2003 Fabrice Bellard
|
5 |
* Copyright (c) 2005-2007 CodeSourcery, LLC
|
6 |
*
|
7 |
* This library is free software; you can redistribute it and/or
|
8 |
* modify it under the terms of the GNU Lesser General Public
|
9 |
* License as published by the Free Software Foundation; either
|
10 |
* version 2 of the License, or (at your option) any later version.
|
11 |
*
|
12 |
* This library is distributed in the hope that it will be useful,
|
13 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
14 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
15 |
* Lesser General Public License for more details.
|
16 |
*
|
17 |
* You should have received a copy of the GNU Lesser General Public
|
18 |
* License along with this library; if not, write to the Free Software
|
19 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
20 |
*/
|
21 |
#include "exec.h" |
22 |
|
23 |
void OPPROTO op_addl_T0_T1_cc(void) |
24 |
{ |
25 |
unsigned int src1; |
26 |
src1 = T0; |
27 |
T0 += T1; |
28 |
env->NZF = T0; |
29 |
env->CF = T0 < src1; |
30 |
env->VF = (src1 ^ T1 ^ -1) & (src1 ^ T0);
|
31 |
} |
32 |
|
33 |
void OPPROTO op_adcl_T0_T1_cc(void) |
34 |
{ |
35 |
unsigned int src1; |
36 |
src1 = T0; |
37 |
if (!env->CF) {
|
38 |
T0 += T1; |
39 |
env->CF = T0 < src1; |
40 |
} else {
|
41 |
T0 += T1 + 1;
|
42 |
env->CF = T0 <= src1; |
43 |
} |
44 |
env->VF = (src1 ^ T1 ^ -1) & (src1 ^ T0);
|
45 |
env->NZF = T0; |
46 |
FORCE_RET(); |
47 |
} |
48 |
|
49 |
#define OPSUB(sub, sbc, res, T0, T1) \
|
50 |
\ |
51 |
void OPPROTO op_ ## sub ## l_T0_T1_cc(void) \ |
52 |
{ \ |
53 |
unsigned int src1; \ |
54 |
src1 = T0; \ |
55 |
T0 -= T1; \ |
56 |
env->NZF = T0; \ |
57 |
env->CF = src1 >= T1; \ |
58 |
env->VF = (src1 ^ T1) & (src1 ^ T0); \ |
59 |
res = T0; \ |
60 |
} \ |
61 |
\ |
62 |
void OPPROTO op_ ## sbc ## l_T0_T1(void) \ |
63 |
{ \ |
64 |
res = T0 - T1 + env->CF - 1; \
|
65 |
} \ |
66 |
\ |
67 |
void OPPROTO op_ ## sbc ## l_T0_T1_cc(void) \ |
68 |
{ \ |
69 |
unsigned int src1; \ |
70 |
src1 = T0; \ |
71 |
if (!env->CF) { \
|
72 |
T0 = T0 - T1 - 1; \
|
73 |
env->CF = src1 > T1; \ |
74 |
} else { \
|
75 |
T0 = T0 - T1; \ |
76 |
env->CF = src1 >= T1; \ |
77 |
} \ |
78 |
env->VF = (src1 ^ T1) & (src1 ^ T0); \ |
79 |
env->NZF = T0; \ |
80 |
res = T0; \ |
81 |
FORCE_RET(); \ |
82 |
} |
83 |
|
84 |
OPSUB(sub, sbc, T0, T0, T1) |
85 |
|
86 |
OPSUB(rsb, rsc, T0, T1, T0) |
87 |
|
88 |
#define EIP (env->regs[15]) |
89 |
|
90 |
void OPPROTO op_test_eq(void) |
91 |
{ |
92 |
if (env->NZF == 0) |
93 |
GOTO_LABEL_PARAM(1);;
|
94 |
FORCE_RET(); |
95 |
} |
96 |
|
97 |
void OPPROTO op_test_ne(void) |
98 |
{ |
99 |
if (env->NZF != 0) |
100 |
GOTO_LABEL_PARAM(1);;
|
101 |
FORCE_RET(); |
102 |
} |
103 |
|
104 |
void OPPROTO op_test_cs(void) |
105 |
{ |
106 |
if (env->CF != 0) |
107 |
GOTO_LABEL_PARAM(1);
|
108 |
FORCE_RET(); |
109 |
} |
110 |
|
111 |
void OPPROTO op_test_cc(void) |
112 |
{ |
113 |
if (env->CF == 0) |
114 |
GOTO_LABEL_PARAM(1);
|
115 |
FORCE_RET(); |
116 |
} |
117 |
|
118 |
void OPPROTO op_test_mi(void) |
119 |
{ |
120 |
if ((env->NZF & 0x80000000) != 0) |
121 |
GOTO_LABEL_PARAM(1);
|
122 |
FORCE_RET(); |
123 |
} |
124 |
|
125 |
void OPPROTO op_test_pl(void) |
126 |
{ |
127 |
if ((env->NZF & 0x80000000) == 0) |
128 |
GOTO_LABEL_PARAM(1);
|
129 |
FORCE_RET(); |
130 |
} |
131 |
|
132 |
void OPPROTO op_test_vs(void) |
133 |
{ |
134 |
if ((env->VF & 0x80000000) != 0) |
135 |
GOTO_LABEL_PARAM(1);
|
136 |
FORCE_RET(); |
137 |
} |
138 |
|
139 |
void OPPROTO op_test_vc(void) |
140 |
{ |
141 |
if ((env->VF & 0x80000000) == 0) |
142 |
GOTO_LABEL_PARAM(1);
|
143 |
FORCE_RET(); |
144 |
} |
145 |
|
146 |
void OPPROTO op_test_hi(void) |
147 |
{ |
148 |
if (env->CF != 0 && env->NZF != 0) |
149 |
GOTO_LABEL_PARAM(1);
|
150 |
FORCE_RET(); |
151 |
} |
152 |
|
153 |
void OPPROTO op_test_ls(void) |
154 |
{ |
155 |
if (env->CF == 0 || env->NZF == 0) |
156 |
GOTO_LABEL_PARAM(1);
|
157 |
FORCE_RET(); |
158 |
} |
159 |
|
160 |
void OPPROTO op_test_ge(void) |
161 |
{ |
162 |
if (((env->VF ^ env->NZF) & 0x80000000) == 0) |
163 |
GOTO_LABEL_PARAM(1);
|
164 |
FORCE_RET(); |
165 |
} |
166 |
|
167 |
void OPPROTO op_test_lt(void) |
168 |
{ |
169 |
if (((env->VF ^ env->NZF) & 0x80000000) != 0) |
170 |
GOTO_LABEL_PARAM(1);
|
171 |
FORCE_RET(); |
172 |
} |
173 |
|
174 |
void OPPROTO op_test_gt(void) |
175 |
{ |
176 |
if (env->NZF != 0 && ((env->VF ^ env->NZF) & 0x80000000) == 0) |
177 |
GOTO_LABEL_PARAM(1);
|
178 |
FORCE_RET(); |
179 |
} |
180 |
|
181 |
void OPPROTO op_test_le(void) |
182 |
{ |
183 |
if (env->NZF == 0 || ((env->VF ^ env->NZF) & 0x80000000) != 0) |
184 |
GOTO_LABEL_PARAM(1);
|
185 |
FORCE_RET(); |
186 |
} |
187 |
|
188 |
void OPPROTO op_test_T0(void) |
189 |
{ |
190 |
if (T0)
|
191 |
GOTO_LABEL_PARAM(1);
|
192 |
FORCE_RET(); |
193 |
} |
194 |
void OPPROTO op_testn_T0(void) |
195 |
{ |
196 |
if (!T0)
|
197 |
GOTO_LABEL_PARAM(1);
|
198 |
FORCE_RET(); |
199 |
} |
200 |
|
201 |
void OPPROTO op_movl_T0_cpsr(void) |
202 |
{ |
203 |
/* Execution state bits always read as zero. */
|
204 |
T0 = cpsr_read(env) & ~CPSR_EXEC; |
205 |
FORCE_RET(); |
206 |
} |
207 |
|
208 |
void OPPROTO op_movl_T0_spsr(void) |
209 |
{ |
210 |
T0 = env->spsr; |
211 |
} |
212 |
|
213 |
void OPPROTO op_movl_spsr_T0(void) |
214 |
{ |
215 |
uint32_t mask = PARAM1; |
216 |
env->spsr = (env->spsr & ~mask) | (T0 & mask); |
217 |
} |
218 |
|
219 |
void OPPROTO op_movl_cpsr_T0(void) |
220 |
{ |
221 |
cpsr_write(env, T0, PARAM1); |
222 |
FORCE_RET(); |
223 |
} |
224 |
|
225 |
void OPPROTO op_mul_T0_T1(void) |
226 |
{ |
227 |
T0 = T0 * T1; |
228 |
} |
229 |
|
230 |
/* 64 bit unsigned mul */
|
231 |
void OPPROTO op_mull_T0_T1(void) |
232 |
{ |
233 |
uint64_t res; |
234 |
res = (uint64_t)T0 * (uint64_t)T1; |
235 |
T1 = res >> 32;
|
236 |
T0 = res; |
237 |
} |
238 |
|
239 |
/* 64 bit signed mul */
|
240 |
void OPPROTO op_imull_T0_T1(void) |
241 |
{ |
242 |
uint64_t res; |
243 |
res = (int64_t)((int32_t)T0) * (int64_t)((int32_t)T1); |
244 |
T1 = res >> 32;
|
245 |
T0 = res; |
246 |
} |
247 |
|
248 |
/* 48 bit signed mul, top 32 bits */
|
249 |
void OPPROTO op_imulw_T0_T1(void) |
250 |
{ |
251 |
uint64_t res; |
252 |
res = (int64_t)((int32_t)T0) * (int64_t)((int32_t)T1); |
253 |
T0 = res >> 16;
|
254 |
} |
255 |
|
256 |
void OPPROTO op_addq_T0_T1(void) |
257 |
{ |
258 |
uint64_t res; |
259 |
res = ((uint64_t)T1 << 32) | T0;
|
260 |
res += ((uint64_t)(env->regs[PARAM2]) << 32) | (env->regs[PARAM1]);
|
261 |
T1 = res >> 32;
|
262 |
T0 = res; |
263 |
} |
264 |
|
265 |
void OPPROTO op_addq_lo_T0_T1(void) |
266 |
{ |
267 |
uint64_t res; |
268 |
res = ((uint64_t)T1 << 32) | T0;
|
269 |
res += (uint64_t)(env->regs[PARAM1]); |
270 |
T1 = res >> 32;
|
271 |
T0 = res; |
272 |
} |
273 |
|
274 |
/* Dual 16-bit accumulate. */
|
275 |
void OPPROTO op_addq_T0_T1_dual(void) |
276 |
{ |
277 |
uint64_t res; |
278 |
res = ((uint64_t)(env->regs[PARAM2]) << 32) | (env->regs[PARAM1]);
|
279 |
res += (int32_t)T0; |
280 |
res += (int32_t)T1; |
281 |
env->regs[PARAM1] = (uint32_t)res; |
282 |
env->regs[PARAM2] = res >> 32;
|
283 |
} |
284 |
|
285 |
/* Dual 16-bit subtract accumulate. */
|
286 |
void OPPROTO op_subq_T0_T1_dual(void) |
287 |
{ |
288 |
uint64_t res; |
289 |
res = ((uint64_t)(env->regs[PARAM2]) << 32) | (env->regs[PARAM1]);
|
290 |
res += (int32_t)T0; |
291 |
res -= (int32_t)T1; |
292 |
env->regs[PARAM1] = (uint32_t)res; |
293 |
env->regs[PARAM2] = res >> 32;
|
294 |
} |
295 |
|
296 |
void OPPROTO op_logicq_cc(void) |
297 |
{ |
298 |
env->NZF = (T1 & 0x80000000) | ((T0 | T1) != 0); |
299 |
} |
300 |
|
301 |
/* memory access */
|
302 |
|
303 |
#define MEMSUFFIX _raw
|
304 |
#include "op_mem.h" |
305 |
|
306 |
#if !defined(CONFIG_USER_ONLY)
|
307 |
#define MEMSUFFIX _user
|
308 |
#include "op_mem.h" |
309 |
#define MEMSUFFIX _kernel
|
310 |
#include "op_mem.h" |
311 |
#endif
|
312 |
|
313 |
void OPPROTO op_clrex(void) |
314 |
{ |
315 |
cpu_lock(); |
316 |
helper_clrex(env); |
317 |
cpu_unlock(); |
318 |
} |
319 |
|
320 |
/* T1 based, use T0 as shift count */
|
321 |
|
322 |
void OPPROTO op_shll_T1_T0(void) |
323 |
{ |
324 |
int shift;
|
325 |
shift = T0 & 0xff;
|
326 |
if (shift >= 32) |
327 |
T1 = 0;
|
328 |
else
|
329 |
T1 = T1 << shift; |
330 |
FORCE_RET(); |
331 |
} |
332 |
|
333 |
void OPPROTO op_shrl_T1_T0(void) |
334 |
{ |
335 |
int shift;
|
336 |
shift = T0 & 0xff;
|
337 |
if (shift >= 32) |
338 |
T1 = 0;
|
339 |
else
|
340 |
T1 = (uint32_t)T1 >> shift; |
341 |
FORCE_RET(); |
342 |
} |
343 |
|
344 |
void OPPROTO op_sarl_T1_T0(void) |
345 |
{ |
346 |
int shift;
|
347 |
shift = T0 & 0xff;
|
348 |
if (shift >= 32) |
349 |
shift = 31;
|
350 |
T1 = (int32_t)T1 >> shift; |
351 |
} |
352 |
|
353 |
void OPPROTO op_rorl_T1_T0(void) |
354 |
{ |
355 |
int shift;
|
356 |
shift = T0 & 0x1f;
|
357 |
if (shift) {
|
358 |
T1 = ((uint32_t)T1 >> shift) | (T1 << (32 - shift));
|
359 |
} |
360 |
FORCE_RET(); |
361 |
} |
362 |
|
363 |
/* T1 based, use T0 as shift count and compute CF */
|
364 |
|
365 |
void OPPROTO op_shll_T1_T0_cc(void) |
366 |
{ |
367 |
int shift;
|
368 |
shift = T0 & 0xff;
|
369 |
if (shift >= 32) { |
370 |
if (shift == 32) |
371 |
env->CF = T1 & 1;
|
372 |
else
|
373 |
env->CF = 0;
|
374 |
T1 = 0;
|
375 |
} else if (shift != 0) { |
376 |
env->CF = (T1 >> (32 - shift)) & 1; |
377 |
T1 = T1 << shift; |
378 |
} |
379 |
FORCE_RET(); |
380 |
} |
381 |
|
382 |
void OPPROTO op_shrl_T1_T0_cc(void) |
383 |
{ |
384 |
int shift;
|
385 |
shift = T0 & 0xff;
|
386 |
if (shift >= 32) { |
387 |
if (shift == 32) |
388 |
env->CF = (T1 >> 31) & 1; |
389 |
else
|
390 |
env->CF = 0;
|
391 |
T1 = 0;
|
392 |
} else if (shift != 0) { |
393 |
env->CF = (T1 >> (shift - 1)) & 1; |
394 |
T1 = (uint32_t)T1 >> shift; |
395 |
} |
396 |
FORCE_RET(); |
397 |
} |
398 |
|
399 |
void OPPROTO op_sarl_T1_T0_cc(void) |
400 |
{ |
401 |
int shift;
|
402 |
shift = T0 & 0xff;
|
403 |
if (shift >= 32) { |
404 |
env->CF = (T1 >> 31) & 1; |
405 |
T1 = (int32_t)T1 >> 31;
|
406 |
} else if (shift != 0) { |
407 |
env->CF = (T1 >> (shift - 1)) & 1; |
408 |
T1 = (int32_t)T1 >> shift; |
409 |
} |
410 |
FORCE_RET(); |
411 |
} |
412 |
|
413 |
void OPPROTO op_rorl_T1_T0_cc(void) |
414 |
{ |
415 |
int shift1, shift;
|
416 |
shift1 = T0 & 0xff;
|
417 |
shift = shift1 & 0x1f;
|
418 |
if (shift == 0) { |
419 |
if (shift1 != 0) |
420 |
env->CF = (T1 >> 31) & 1; |
421 |
} else {
|
422 |
env->CF = (T1 >> (shift - 1)) & 1; |
423 |
T1 = ((uint32_t)T1 >> shift) | (T1 << (32 - shift));
|
424 |
} |
425 |
FORCE_RET(); |
426 |
} |
427 |
|
428 |
/* exceptions */
|
429 |
|
430 |
void OPPROTO op_swi(void) |
431 |
{ |
432 |
env->exception_index = EXCP_SWI; |
433 |
cpu_loop_exit(); |
434 |
} |
435 |
|
436 |
void OPPROTO op_undef_insn(void) |
437 |
{ |
438 |
env->exception_index = EXCP_UDEF; |
439 |
cpu_loop_exit(); |
440 |
} |
441 |
|
442 |
void OPPROTO op_debug(void) |
443 |
{ |
444 |
env->exception_index = EXCP_DEBUG; |
445 |
cpu_loop_exit(); |
446 |
} |
447 |
|
448 |
void OPPROTO op_wfi(void) |
449 |
{ |
450 |
env->exception_index = EXCP_HLT; |
451 |
env->halted = 1;
|
452 |
cpu_loop_exit(); |
453 |
} |
454 |
|
455 |
void OPPROTO op_bkpt(void) |
456 |
{ |
457 |
env->exception_index = EXCP_BKPT; |
458 |
cpu_loop_exit(); |
459 |
} |
460 |
|
461 |
void OPPROTO op_exception_exit(void) |
462 |
{ |
463 |
env->exception_index = EXCP_EXCEPTION_EXIT; |
464 |
cpu_loop_exit(); |
465 |
} |
466 |
|
467 |
/* VFP support. We follow the convention used for VFP instrunctions:
|
468 |
Single precition routines have a "s" suffix, double precision a
|
469 |
"d" suffix. */
|
470 |
|
471 |
#define VFP_OP(name, p) void OPPROTO op_vfp_##name##p(void) |
472 |
|
473 |
#define VFP_BINOP(name) \
|
474 |
VFP_OP(name, s) \ |
475 |
{ \ |
476 |
FT0s = float32_ ## name (FT0s, FT1s, &env->vfp.fp_status); \ |
477 |
} \ |
478 |
VFP_OP(name, d) \ |
479 |
{ \ |
480 |
FT0d = float64_ ## name (FT0d, FT1d, &env->vfp.fp_status); \ |
481 |
} |
482 |
VFP_BINOP(add) |
483 |
VFP_BINOP(sub) |
484 |
VFP_BINOP(mul) |
485 |
VFP_BINOP(div) |
486 |
#undef VFP_BINOP
|
487 |
|
488 |
#define VFP_HELPER(name) \
|
489 |
VFP_OP(name, s) \ |
490 |
{ \ |
491 |
do_vfp_##name##s(); \ |
492 |
} \ |
493 |
VFP_OP(name, d) \ |
494 |
{ \ |
495 |
do_vfp_##name##d(); \ |
496 |
} |
497 |
VFP_HELPER(abs) |
498 |
VFP_HELPER(sqrt) |
499 |
VFP_HELPER(cmp) |
500 |
VFP_HELPER(cmpe) |
501 |
#undef VFP_HELPER
|
502 |
|
503 |
/* XXX: Will this do the right thing for NANs. Should invert the signbit
|
504 |
without looking at the rest of the value. */
|
505 |
VFP_OP(neg, s) |
506 |
{ |
507 |
FT0s = float32_chs(FT0s); |
508 |
} |
509 |
|
510 |
VFP_OP(neg, d) |
511 |
{ |
512 |
FT0d = float64_chs(FT0d); |
513 |
} |
514 |
|
515 |
VFP_OP(F1_ld0, s) |
516 |
{ |
517 |
union {
|
518 |
uint32_t i; |
519 |
float32 s; |
520 |
} v; |
521 |
v.i = 0;
|
522 |
FT1s = v.s; |
523 |
} |
524 |
|
525 |
VFP_OP(F1_ld0, d) |
526 |
{ |
527 |
union {
|
528 |
uint64_t i; |
529 |
float64 d; |
530 |
} v; |
531 |
v.i = 0;
|
532 |
FT1d = v.d; |
533 |
} |
534 |
|
535 |
/* Helper routines to perform bitwise copies between float and int. */
|
536 |
static inline float32 vfp_itos(uint32_t i) |
537 |
{ |
538 |
union {
|
539 |
uint32_t i; |
540 |
float32 s; |
541 |
} v; |
542 |
|
543 |
v.i = i; |
544 |
return v.s;
|
545 |
} |
546 |
|
547 |
static inline uint32_t vfp_stoi(float32 s) |
548 |
{ |
549 |
union {
|
550 |
uint32_t i; |
551 |
float32 s; |
552 |
} v; |
553 |
|
554 |
v.s = s; |
555 |
return v.i;
|
556 |
} |
557 |
|
558 |
static inline float64 vfp_itod(uint64_t i) |
559 |
{ |
560 |
union {
|
561 |
uint64_t i; |
562 |
float64 d; |
563 |
} v; |
564 |
|
565 |
v.i = i; |
566 |
return v.d;
|
567 |
} |
568 |
|
569 |
static inline uint64_t vfp_dtoi(float64 d) |
570 |
{ |
571 |
union {
|
572 |
uint64_t i; |
573 |
float64 d; |
574 |
} v; |
575 |
|
576 |
v.d = d; |
577 |
return v.i;
|
578 |
} |
579 |
|
580 |
/* Integer to float conversion. */
|
581 |
VFP_OP(uito, s) |
582 |
{ |
583 |
FT0s = uint32_to_float32(vfp_stoi(FT0s), &env->vfp.fp_status); |
584 |
} |
585 |
|
586 |
VFP_OP(uito, d) |
587 |
{ |
588 |
FT0d = uint32_to_float64(vfp_stoi(FT0s), &env->vfp.fp_status); |
589 |
} |
590 |
|
591 |
VFP_OP(sito, s) |
592 |
{ |
593 |
FT0s = int32_to_float32(vfp_stoi(FT0s), &env->vfp.fp_status); |
594 |
} |
595 |
|
596 |
VFP_OP(sito, d) |
597 |
{ |
598 |
FT0d = int32_to_float64(vfp_stoi(FT0s), &env->vfp.fp_status); |
599 |
} |
600 |
|
601 |
/* Float to integer conversion. */
|
602 |
VFP_OP(toui, s) |
603 |
{ |
604 |
FT0s = vfp_itos(float32_to_uint32(FT0s, &env->vfp.fp_status)); |
605 |
} |
606 |
|
607 |
VFP_OP(toui, d) |
608 |
{ |
609 |
FT0s = vfp_itos(float64_to_uint32(FT0d, &env->vfp.fp_status)); |
610 |
} |
611 |
|
612 |
VFP_OP(tosi, s) |
613 |
{ |
614 |
FT0s = vfp_itos(float32_to_int32(FT0s, &env->vfp.fp_status)); |
615 |
} |
616 |
|
617 |
VFP_OP(tosi, d) |
618 |
{ |
619 |
FT0s = vfp_itos(float64_to_int32(FT0d, &env->vfp.fp_status)); |
620 |
} |
621 |
|
622 |
/* TODO: Set rounding mode properly. */
|
623 |
VFP_OP(touiz, s) |
624 |
{ |
625 |
FT0s = vfp_itos(float32_to_uint32_round_to_zero(FT0s, &env->vfp.fp_status)); |
626 |
} |
627 |
|
628 |
VFP_OP(touiz, d) |
629 |
{ |
630 |
FT0s = vfp_itos(float64_to_uint32_round_to_zero(FT0d, &env->vfp.fp_status)); |
631 |
} |
632 |
|
633 |
VFP_OP(tosiz, s) |
634 |
{ |
635 |
FT0s = vfp_itos(float32_to_int32_round_to_zero(FT0s, &env->vfp.fp_status)); |
636 |
} |
637 |
|
638 |
VFP_OP(tosiz, d) |
639 |
{ |
640 |
FT0s = vfp_itos(float64_to_int32_round_to_zero(FT0d, &env->vfp.fp_status)); |
641 |
} |
642 |
|
643 |
/* floating point conversion */
|
644 |
VFP_OP(fcvtd, s) |
645 |
{ |
646 |
FT0d = float32_to_float64(FT0s, &env->vfp.fp_status); |
647 |
} |
648 |
|
649 |
VFP_OP(fcvts, d) |
650 |
{ |
651 |
FT0s = float64_to_float32(FT0d, &env->vfp.fp_status); |
652 |
} |
653 |
|
654 |
/* VFP3 fixed point conversion. */
|
655 |
#define VFP_CONV_FIX(name, p, ftype, itype, sign) \
|
656 |
VFP_OP(name##to, p) \ |
657 |
{ \ |
658 |
ftype tmp; \ |
659 |
tmp = sign##int32_to_##ftype ((itype)vfp_##p##toi(FT0##p), \ |
660 |
&env->vfp.fp_status); \ |
661 |
FT0##p = ftype##_scalbn(tmp, PARAM1, &env->vfp.fp_status); \ |
662 |
} \ |
663 |
VFP_OP(to##name, p) \ |
664 |
{ \ |
665 |
ftype tmp; \ |
666 |
tmp = ftype##_scalbn(FT0##p, PARAM1, &env->vfp.fp_status); \ |
667 |
FT0##p = vfp_ito##p((itype)ftype##_to_##sign##int32_round_to_zero(tmp, \ |
668 |
&env->vfp.fp_status)); \ |
669 |
} |
670 |
|
671 |
VFP_CONV_FIX(sh, d, float64, int16, ) |
672 |
VFP_CONV_FIX(sl, d, float64, int32, ) |
673 |
VFP_CONV_FIX(uh, d, float64, uint16, u) |
674 |
VFP_CONV_FIX(ul, d, float64, uint32, u) |
675 |
VFP_CONV_FIX(sh, s, float32, int16, ) |
676 |
VFP_CONV_FIX(sl, s, float32, int32, ) |
677 |
VFP_CONV_FIX(uh, s, float32, uint16, u) |
678 |
VFP_CONV_FIX(ul, s, float32, uint32, u) |
679 |
|
680 |
/* Get and Put values from registers. */
|
681 |
VFP_OP(getreg_F0, d) |
682 |
{ |
683 |
FT0d = *(float64 *)((char *) env + PARAM1);
|
684 |
} |
685 |
|
686 |
VFP_OP(getreg_F0, s) |
687 |
{ |
688 |
FT0s = *(float32 *)((char *) env + PARAM1);
|
689 |
} |
690 |
|
691 |
VFP_OP(getreg_F1, d) |
692 |
{ |
693 |
FT1d = *(float64 *)((char *) env + PARAM1);
|
694 |
} |
695 |
|
696 |
VFP_OP(getreg_F1, s) |
697 |
{ |
698 |
FT1s = *(float32 *)((char *) env + PARAM1);
|
699 |
} |
700 |
|
701 |
VFP_OP(setreg_F0, d) |
702 |
{ |
703 |
*(float64 *)((char *) env + PARAM1) = FT0d;
|
704 |
} |
705 |
|
706 |
VFP_OP(setreg_F0, s) |
707 |
{ |
708 |
*(float32 *)((char *) env + PARAM1) = FT0s;
|
709 |
} |
710 |
|
711 |
void OPPROTO op_vfp_movl_T0_fpscr(void) |
712 |
{ |
713 |
do_vfp_get_fpscr (); |
714 |
} |
715 |
|
716 |
void OPPROTO op_vfp_movl_T0_fpscr_flags(void) |
717 |
{ |
718 |
T0 = env->vfp.xregs[ARM_VFP_FPSCR] & (0xf << 28); |
719 |
} |
720 |
|
721 |
void OPPROTO op_vfp_movl_fpscr_T0(void) |
722 |
{ |
723 |
do_vfp_set_fpscr(); |
724 |
} |
725 |
|
726 |
void OPPROTO op_vfp_movl_T0_xreg(void) |
727 |
{ |
728 |
T0 = env->vfp.xregs[PARAM1]; |
729 |
} |
730 |
|
731 |
void OPPROTO op_vfp_movl_xreg_T0(void) |
732 |
{ |
733 |
env->vfp.xregs[PARAM1] = T0; |
734 |
} |
735 |
|
736 |
/* Move between FT0s to T0 */
|
737 |
void OPPROTO op_vfp_mrs(void) |
738 |
{ |
739 |
T0 = vfp_stoi(FT0s); |
740 |
} |
741 |
|
742 |
void OPPROTO op_vfp_msr(void) |
743 |
{ |
744 |
FT0s = vfp_itos(T0); |
745 |
} |
746 |
|
747 |
/* Move between FT0d and {T0,T1} */
|
748 |
void OPPROTO op_vfp_mrrd(void) |
749 |
{ |
750 |
CPU_DoubleU u; |
751 |
|
752 |
u.d = FT0d; |
753 |
T0 = u.l.lower; |
754 |
T1 = u.l.upper; |
755 |
} |
756 |
|
757 |
void OPPROTO op_vfp_mdrr(void) |
758 |
{ |
759 |
CPU_DoubleU u; |
760 |
|
761 |
u.l.lower = T0; |
762 |
u.l.upper = T1; |
763 |
FT0d = u.d; |
764 |
} |
765 |
|
766 |
/* Load immediate. PARAM1 is the 32 most significant bits of the value. */
|
767 |
void OPPROTO op_vfp_fconstd(void) |
768 |
{ |
769 |
CPU_DoubleU u; |
770 |
u.l.upper = PARAM1; |
771 |
u.l.lower = 0;
|
772 |
FT0d = u.d; |
773 |
} |
774 |
|
775 |
void OPPROTO op_vfp_fconsts(void) |
776 |
{ |
777 |
FT0s = vfp_itos(PARAM1); |
778 |
} |
779 |
|
780 |
/* Copy the most significant bit of T0 to all bits of T1. */
|
781 |
void OPPROTO op_signbit_T1_T0(void) |
782 |
{ |
783 |
T1 = (int32_t)T0 >> 31;
|
784 |
} |
785 |
|
786 |
void OPPROTO op_movl_cp_T0(void) |
787 |
{ |
788 |
helper_set_cp(env, PARAM1, T0); |
789 |
FORCE_RET(); |
790 |
} |
791 |
|
792 |
void OPPROTO op_movl_T0_cp(void) |
793 |
{ |
794 |
T0 = helper_get_cp(env, PARAM1); |
795 |
FORCE_RET(); |
796 |
} |
797 |
|
798 |
void OPPROTO op_movl_cp15_T0(void) |
799 |
{ |
800 |
helper_set_cp15(env, PARAM1, T0); |
801 |
FORCE_RET(); |
802 |
} |
803 |
|
804 |
void OPPROTO op_movl_T0_cp15(void) |
805 |
{ |
806 |
T0 = helper_get_cp15(env, PARAM1); |
807 |
FORCE_RET(); |
808 |
} |
809 |
|
810 |
/* Access to user mode registers from privileged modes. */
|
811 |
void OPPROTO op_movl_T0_user(void) |
812 |
{ |
813 |
int regno = PARAM1;
|
814 |
if (regno == 13) { |
815 |
T0 = env->banked_r13[0];
|
816 |
} else if (regno == 14) { |
817 |
T0 = env->banked_r14[0];
|
818 |
} else if ((env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) { |
819 |
T0 = env->usr_regs[regno - 8];
|
820 |
} else {
|
821 |
T0 = env->regs[regno]; |
822 |
} |
823 |
FORCE_RET(); |
824 |
} |
825 |
|
826 |
|
827 |
void OPPROTO op_movl_user_T0(void) |
828 |
{ |
829 |
int regno = PARAM1;
|
830 |
if (regno == 13) { |
831 |
env->banked_r13[0] = T0;
|
832 |
} else if (regno == 14) { |
833 |
env->banked_r14[0] = T0;
|
834 |
} else if ((env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) { |
835 |
env->usr_regs[regno - 8] = T0;
|
836 |
} else {
|
837 |
env->regs[regno] = T0; |
838 |
} |
839 |
FORCE_RET(); |
840 |
} |
841 |
|
842 |
/* ARMv6 Media instructions. */
|
843 |
|
844 |
/* Note that signed overflow is undefined in C. The following routines are
|
845 |
careful to use unsigned types where modulo arithmetic is required.
|
846 |
Failure to do so _will_ break on newer gcc. */
|
847 |
|
848 |
/* Signed saturating arithmetic. */
|
849 |
|
850 |
/* Perform 16-bit signed satruating addition. */
|
851 |
static inline uint16_t add16_sat(uint16_t a, uint16_t b) |
852 |
{ |
853 |
uint16_t res; |
854 |
|
855 |
res = a + b; |
856 |
if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { |
857 |
if (a & 0x8000) |
858 |
res = 0x8000;
|
859 |
else
|
860 |
res = 0x7fff;
|
861 |
} |
862 |
return res;
|
863 |
} |
864 |
|
865 |
/* Perform 8-bit signed satruating addition. */
|
866 |
static inline uint8_t add8_sat(uint8_t a, uint8_t b) |
867 |
{ |
868 |
uint8_t res; |
869 |
|
870 |
res = a + b; |
871 |
if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { |
872 |
if (a & 0x80) |
873 |
res = 0x80;
|
874 |
else
|
875 |
res = 0x7f;
|
876 |
} |
877 |
return res;
|
878 |
} |
879 |
|
880 |
/* Perform 16-bit signed satruating subtraction. */
|
881 |
static inline uint16_t sub16_sat(uint16_t a, uint16_t b) |
882 |
{ |
883 |
uint16_t res; |
884 |
|
885 |
res = a - b; |
886 |
if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { |
887 |
if (a & 0x8000) |
888 |
res = 0x8000;
|
889 |
else
|
890 |
res = 0x7fff;
|
891 |
} |
892 |
return res;
|
893 |
} |
894 |
|
895 |
/* Perform 8-bit signed satruating subtraction. */
|
896 |
static inline uint8_t sub8_sat(uint8_t a, uint8_t b) |
897 |
{ |
898 |
uint8_t res; |
899 |
|
900 |
res = a - b; |
901 |
if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { |
902 |
if (a & 0x80) |
903 |
res = 0x80;
|
904 |
else
|
905 |
res = 0x7f;
|
906 |
} |
907 |
return res;
|
908 |
} |
909 |
|
910 |
#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); |
911 |
#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); |
912 |
#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); |
913 |
#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); |
914 |
#define PFX q
|
915 |
|
916 |
#include "op_addsub.h" |
917 |
|
918 |
/* Unsigned saturating arithmetic. */
|
919 |
static inline uint16_t add16_usat(uint16_t a, uint8_t b) |
920 |
{ |
921 |
uint16_t res; |
922 |
res = a + b; |
923 |
if (res < a)
|
924 |
res = 0xffff;
|
925 |
return res;
|
926 |
} |
927 |
|
928 |
static inline uint16_t sub16_usat(uint16_t a, uint8_t b) |
929 |
{ |
930 |
if (a < b)
|
931 |
return a - b;
|
932 |
else
|
933 |
return 0; |
934 |
} |
935 |
|
936 |
static inline uint8_t add8_usat(uint8_t a, uint8_t b) |
937 |
{ |
938 |
uint8_t res; |
939 |
res = a + b; |
940 |
if (res < a)
|
941 |
res = 0xff;
|
942 |
return res;
|
943 |
} |
944 |
|
945 |
static inline uint8_t sub8_usat(uint8_t a, uint8_t b) |
946 |
{ |
947 |
if (a < b)
|
948 |
return a - b;
|
949 |
else
|
950 |
return 0; |
951 |
} |
952 |
|
953 |
#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); |
954 |
#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); |
955 |
#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); |
956 |
#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); |
957 |
#define PFX uq
|
958 |
|
959 |
#include "op_addsub.h" |
960 |
|
961 |
/* Signed modulo arithmetic. */
|
962 |
#define SARITH16(a, b, n, op) do { \ |
963 |
int32_t sum; \ |
964 |
sum = (int16_t)((uint16_t)(a) op (uint16_t)(b)); \ |
965 |
RESULT(sum, n, 16); \
|
966 |
if (sum >= 0) \ |
967 |
ge |= 3 << (n * 2); \ |
968 |
} while(0) |
969 |
|
970 |
#define SARITH8(a, b, n, op) do { \ |
971 |
int32_t sum; \ |
972 |
sum = (int8_t)((uint8_t)(a) op (uint8_t)(b)); \ |
973 |
RESULT(sum, n, 8); \
|
974 |
if (sum >= 0) \ |
975 |
ge |= 1 << n; \
|
976 |
} while(0) |
977 |
|
978 |
|
979 |
#define ADD16(a, b, n) SARITH16(a, b, n, +)
|
980 |
#define SUB16(a, b, n) SARITH16(a, b, n, -)
|
981 |
#define ADD8(a, b, n) SARITH8(a, b, n, +)
|
982 |
#define SUB8(a, b, n) SARITH8(a, b, n, -)
|
983 |
#define PFX s
|
984 |
#define ARITH_GE
|
985 |
|
986 |
#include "op_addsub.h" |
987 |
|
988 |
/* Unsigned modulo arithmetic. */
|
989 |
#define ADD16(a, b, n) do { \ |
990 |
uint32_t sum; \ |
991 |
sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ |
992 |
RESULT(sum, n, 16); \
|
993 |
if ((sum >> 16) == 0) \ |
994 |
ge |= 3 << (n * 2); \ |
995 |
} while(0) |
996 |
|
997 |
#define ADD8(a, b, n) do { \ |
998 |
uint32_t sum; \ |
999 |
sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ |
1000 |
RESULT(sum, n, 8); \
|
1001 |
if ((sum >> 8) == 0) \ |
1002 |
ge |= 3 << (n * 2); \ |
1003 |
} while(0) |
1004 |
|
1005 |
#define SUB16(a, b, n) do { \ |
1006 |
uint32_t sum; \ |
1007 |
sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ |
1008 |
RESULT(sum, n, 16); \
|
1009 |
if ((sum >> 16) == 0) \ |
1010 |
ge |= 3 << (n * 2); \ |
1011 |
} while(0) |
1012 |
|
1013 |
#define SUB8(a, b, n) do { \ |
1014 |
uint32_t sum; \ |
1015 |
sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ |
1016 |
RESULT(sum, n, 8); \
|
1017 |
if ((sum >> 8) == 0) \ |
1018 |
ge |= 3 << (n * 2); \ |
1019 |
} while(0) |
1020 |
|
1021 |
#define PFX u
|
1022 |
#define ARITH_GE
|
1023 |
|
1024 |
#include "op_addsub.h" |
1025 |
|
1026 |
/* Halved signed arithmetic. */
|
1027 |
#define ADD16(a, b, n) \
|
1028 |
RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) |
1029 |
#define SUB16(a, b, n) \
|
1030 |
RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) |
1031 |
#define ADD8(a, b, n) \
|
1032 |
RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) |
1033 |
#define SUB8(a, b, n) \
|
1034 |
RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) |
1035 |
#define PFX sh
|
1036 |
|
1037 |
#include "op_addsub.h" |
1038 |
|
1039 |
/* Halved unsigned arithmetic. */
|
1040 |
#define ADD16(a, b, n) \
|
1041 |
RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) |
1042 |
#define SUB16(a, b, n) \
|
1043 |
RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) |
1044 |
#define ADD8(a, b, n) \
|
1045 |
RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) |
1046 |
#define SUB8(a, b, n) \
|
1047 |
RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) |
1048 |
#define PFX uh
|
1049 |
|
1050 |
#include "op_addsub.h" |
1051 |
|
1052 |
void OPPROTO op_pkhtb_T0_T1(void) |
1053 |
{ |
1054 |
T0 = (T0 & 0xffff0000) | (T1 & 0xffff); |
1055 |
} |
1056 |
|
1057 |
void OPPROTO op_pkhbt_T0_T1(void) |
1058 |
{ |
1059 |
T0 = (T0 & 0xffff) | (T1 & 0xffff0000); |
1060 |
} |
1061 |
void OPPROTO op_rev_T0(void) |
1062 |
{ |
1063 |
T0 = ((T0 & 0xff000000) >> 24) |
1064 |
| ((T0 & 0x00ff0000) >> 8) |
1065 |
| ((T0 & 0x0000ff00) << 8) |
1066 |
| ((T0 & 0x000000ff) << 24); |
1067 |
} |
1068 |
|
1069 |
void OPPROTO op_revh_T0(void) |
1070 |
{ |
1071 |
T0 = (T0 >> 16) | (T0 << 16); |
1072 |
} |
1073 |
|
1074 |
void OPPROTO op_rev16_T0(void) |
1075 |
{ |
1076 |
T0 = ((T0 & 0xff000000) >> 8) |
1077 |
| ((T0 & 0x00ff0000) << 8) |
1078 |
| ((T0 & 0x0000ff00) >> 8) |
1079 |
| ((T0 & 0x000000ff) << 8); |
1080 |
} |
1081 |
|
1082 |
void OPPROTO op_revsh_T0(void) |
1083 |
{ |
1084 |
T0 = (int16_t)( ((T0 & 0x0000ff00) >> 8) |
1085 |
| ((T0 & 0x000000ff) << 8)); |
1086 |
} |
1087 |
|
1088 |
void OPPROTO op_rbit_T0(void) |
1089 |
{ |
1090 |
T0 = ((T0 & 0xff000000) >> 24) |
1091 |
| ((T0 & 0x00ff0000) >> 8) |
1092 |
| ((T0 & 0x0000ff00) << 8) |
1093 |
| ((T0 & 0x000000ff) << 24); |
1094 |
T0 = ((T0 & 0xf0f0f0f0) >> 4) |
1095 |
| ((T0 & 0x0f0f0f0f) << 4); |
1096 |
T0 = ((T0 & 0x88888888) >> 3) |
1097 |
| ((T0 & 0x44444444) >> 1) |
1098 |
| ((T0 & 0x22222222) << 1) |
1099 |
| ((T0 & 0x11111111) << 3); |
1100 |
} |
1101 |
|
1102 |
/* Swap low and high halfwords. */
|
1103 |
void OPPROTO op_swap_half_T1(void) |
1104 |
{ |
1105 |
T1 = (T1 >> 16) | (T1 << 16); |
1106 |
FORCE_RET(); |
1107 |
} |
1108 |
|
1109 |
/* Dual 16-bit signed multiply. */
|
1110 |
void OPPROTO op_mul_dual_T0_T1(void) |
1111 |
{ |
1112 |
int32_t low; |
1113 |
int32_t high; |
1114 |
low = (int32_t)(int16_t)T0 * (int32_t)(int16_t)T1; |
1115 |
high = (((int32_t)T0) >> 16) * (((int32_t)T1) >> 16); |
1116 |
T0 = low; |
1117 |
T1 = high; |
1118 |
} |
1119 |
|
1120 |
void OPPROTO op_sel_T0_T1(void) |
1121 |
{ |
1122 |
uint32_t mask; |
1123 |
uint32_t flags; |
1124 |
|
1125 |
flags = env->GE; |
1126 |
mask = 0;
|
1127 |
if (flags & 1) |
1128 |
mask |= 0xff;
|
1129 |
if (flags & 2) |
1130 |
mask |= 0xff00;
|
1131 |
if (flags & 4) |
1132 |
mask |= 0xff0000;
|
1133 |
if (flags & 8) |
1134 |
mask |= 0xff000000;
|
1135 |
T0 = (T0 & mask) | (T1 & ~mask); |
1136 |
FORCE_RET(); |
1137 |
} |
1138 |
|
1139 |
void OPPROTO op_roundqd_T0_T1(void) |
1140 |
{ |
1141 |
T0 = T1 + ((uint32_t)T0 >> 31);
|
1142 |
} |
1143 |
|
1144 |
/* Signed saturation. */
|
1145 |
static inline uint32_t do_ssat(int32_t val, int shift) |
1146 |
{ |
1147 |
int32_t top; |
1148 |
uint32_t mask; |
1149 |
|
1150 |
shift = PARAM1; |
1151 |
top = val >> shift; |
1152 |
mask = (1u << shift) - 1; |
1153 |
if (top > 0) { |
1154 |
env->QF = 1;
|
1155 |
return mask;
|
1156 |
} else if (top < -1) { |
1157 |
env->QF = 1;
|
1158 |
return ~mask;
|
1159 |
} |
1160 |
return val;
|
1161 |
} |
1162 |
|
1163 |
/* Unsigned saturation. */
|
1164 |
static inline uint32_t do_usat(int32_t val, int shift) |
1165 |
{ |
1166 |
uint32_t max; |
1167 |
|
1168 |
shift = PARAM1; |
1169 |
max = (1u << shift) - 1; |
1170 |
if (val < 0) { |
1171 |
env->QF = 1;
|
1172 |
return 0; |
1173 |
} else if (val > max) { |
1174 |
env->QF = 1;
|
1175 |
return max;
|
1176 |
} |
1177 |
return val;
|
1178 |
} |
1179 |
|
1180 |
/* Signed saturate. */
|
1181 |
void OPPROTO op_ssat_T1(void) |
1182 |
{ |
1183 |
T0 = do_ssat(T0, PARAM1); |
1184 |
FORCE_RET(); |
1185 |
} |
1186 |
|
1187 |
/* Dual halfword signed saturate. */
|
1188 |
void OPPROTO op_ssat16_T1(void) |
1189 |
{ |
1190 |
uint32_t res; |
1191 |
|
1192 |
res = (uint16_t)do_ssat((int16_t)T0, PARAM1); |
1193 |
res |= do_ssat(((int32_t)T0) >> 16, PARAM1) << 16; |
1194 |
T0 = res; |
1195 |
FORCE_RET(); |
1196 |
} |
1197 |
|
1198 |
/* Unsigned saturate. */
|
1199 |
void OPPROTO op_usat_T1(void) |
1200 |
{ |
1201 |
T0 = do_usat(T0, PARAM1); |
1202 |
FORCE_RET(); |
1203 |
} |
1204 |
|
1205 |
/* Dual halfword unsigned saturate. */
|
1206 |
void OPPROTO op_usat16_T1(void) |
1207 |
{ |
1208 |
uint32_t res; |
1209 |
|
1210 |
res = (uint16_t)do_usat((int16_t)T0, PARAM1); |
1211 |
res |= do_usat(((int32_t)T0) >> 16, PARAM1) << 16; |
1212 |
T0 = res; |
1213 |
FORCE_RET(); |
1214 |
} |
1215 |
|
1216 |
/* Dual 16-bit add. */
|
1217 |
static inline uint8_t do_usad(uint8_t a, uint8_t b) |
1218 |
{ |
1219 |
if (a > b)
|
1220 |
return a - b;
|
1221 |
else
|
1222 |
return b - a;
|
1223 |
} |
1224 |
|
1225 |
/* Unsigned sum of absolute byte differences. */
|
1226 |
void OPPROTO op_usad8_T0_T1(void) |
1227 |
{ |
1228 |
uint32_t sum; |
1229 |
sum = do_usad(T0, T1); |
1230 |
sum += do_usad(T0 >> 8, T1 >> 8); |
1231 |
sum += do_usad(T0 >> 16, T1 >>16); |
1232 |
sum += do_usad(T0 >> 24, T1 >> 24); |
1233 |
T0 = sum; |
1234 |
} |
1235 |
|
1236 |
/* Thumb-2 instructions. */
|
1237 |
|
1238 |
/* Insert T1 into T0. Result goes in T1. */
|
1239 |
void OPPROTO op_bfi_T1_T0(void) |
1240 |
{ |
1241 |
int shift = PARAM1;
|
1242 |
uint32_t mask = PARAM2; |
1243 |
uint32_t bits; |
1244 |
|
1245 |
bits = (T1 << shift) & mask; |
1246 |
T1 = (T0 & ~mask) | bits; |
1247 |
} |
1248 |
|
1249 |
/* Unsigned bitfield extract. */
|
1250 |
void OPPROTO op_ubfx_T1(void) |
1251 |
{ |
1252 |
uint32_t shift = PARAM1; |
1253 |
uint32_t mask = PARAM2; |
1254 |
|
1255 |
T1 >>= shift; |
1256 |
T1 &= mask; |
1257 |
} |
1258 |
|
1259 |
/* Signed bitfield extract. */
|
1260 |
void OPPROTO op_sbfx_T1(void) |
1261 |
{ |
1262 |
uint32_t shift = PARAM1; |
1263 |
uint32_t width = PARAM2; |
1264 |
int32_t val; |
1265 |
|
1266 |
val = T1 << (32 - (shift + width));
|
1267 |
T1 = val >> (32 - width);
|
1268 |
} |
1269 |
|
1270 |
void OPPROTO op_movtop_T0_im(void) |
1271 |
{ |
1272 |
T0 = (T0 & 0xffff) | PARAM1;
|
1273 |
} |
1274 |
|
1275 |
/* Used by table branch instructions. */
|
1276 |
void OPPROTO op_jmp_T0_im(void) |
1277 |
{ |
1278 |
env->regs[15] = PARAM1 + (T0 << 1); |
1279 |
} |
1280 |
|
1281 |
void OPPROTO op_set_condexec(void) |
1282 |
{ |
1283 |
env->condexec_bits = PARAM1; |
1284 |
} |
1285 |
|
1286 |
void OPPROTO op_sdivl_T0_T1(void) |
1287 |
{ |
1288 |
int32_t num; |
1289 |
int32_t den; |
1290 |
num = T0; |
1291 |
den = T1; |
1292 |
if (den == 0) |
1293 |
T0 = 0;
|
1294 |
else
|
1295 |
T0 = num / den; |
1296 |
FORCE_RET(); |
1297 |
} |
1298 |
|
1299 |
void OPPROTO op_udivl_T0_T1(void) |
1300 |
{ |
1301 |
uint32_t num; |
1302 |
uint32_t den; |
1303 |
num = T0; |
1304 |
den = T1; |
1305 |
if (den == 0) |
1306 |
T0 = 0;
|
1307 |
else
|
1308 |
T0 = num / den; |
1309 |
FORCE_RET(); |
1310 |
} |
1311 |
|
1312 |
void OPPROTO op_movl_T1_r13_banked(void) |
1313 |
{ |
1314 |
T1 = helper_get_r13_banked(env, PARAM1); |
1315 |
} |
1316 |
|
1317 |
void OPPROTO op_movl_r13_T1_banked(void) |
1318 |
{ |
1319 |
helper_set_r13_banked(env, PARAM1, T1); |
1320 |
} |
1321 |
|
1322 |
void OPPROTO op_v7m_mrs_T0(void) |
1323 |
{ |
1324 |
T0 = helper_v7m_mrs(env, PARAM1); |
1325 |
} |
1326 |
|
1327 |
void OPPROTO op_v7m_msr_T0(void) |
1328 |
{ |
1329 |
helper_v7m_msr(env, PARAM1, T0); |
1330 |
} |
1331 |
|
1332 |
void OPPROTO op_movl_T0_sp(void) |
1333 |
{ |
1334 |
if (PARAM1 == env->v7m.current_sp)
|
1335 |
T0 = env->regs[13];
|
1336 |
else
|
1337 |
T0 = env->v7m.other_sp; |
1338 |
FORCE_RET(); |
1339 |
} |
1340 |
|
1341 |
#include "op_neon.h" |
1342 |
|
1343 |
/* iwMMXt support */
|
1344 |
#include "op_iwmmxt.c" |