Revision 8f8e3aa4 target-arm/op_neon.h
b/target-arm/op_neon.h | ||
---|---|---|
47 | 47 |
T1 = *(uint32_t *)((char *) env + PARAM1); |
48 | 48 |
} |
49 | 49 |
|
50 |
NEON_OP(getreg_T2) |
|
51 |
{ |
|
52 |
T2 = *(uint32_t *)((char *) env + PARAM1); |
|
53 |
} |
|
54 |
|
|
55 | 50 |
NEON_OP(setreg_T0) |
56 | 51 |
{ |
57 | 52 |
*(uint32_t *)((char *) env + PARAM1) = T0; |
... | ... | |
62 | 57 |
*(uint32_t *)((char *) env + PARAM1) = T1; |
63 | 58 |
} |
64 | 59 |
|
65 |
NEON_OP(setreg_T2) |
|
66 |
{ |
|
67 |
*(uint32_t *)((char *) env + PARAM1) = T2; |
|
68 |
} |
|
69 |
|
|
70 | 60 |
#define NEON_TYPE1(name, type) \ |
71 | 61 |
typedef struct \ |
72 | 62 |
{ \ |
... | ... | |
293 | 283 |
FORCE_RET(); |
294 | 284 |
} |
295 | 285 |
|
296 |
/* ??? bsl, bif and bit are all the same op, just with the oparands in a |
|
297 |
differnet order. It's currently easier to have 3 differnt ops than |
|
298 |
rearange the operands. */ |
|
299 |
|
|
300 |
/* Bitwise Select. */ |
|
301 |
NEON_OP(bsl) |
|
302 |
{ |
|
303 |
T0 = (T0 & T2) | (T1 & ~T2); |
|
304 |
} |
|
305 |
|
|
306 |
/* Bitwise Insert If True. */ |
|
307 |
NEON_OP(bit) |
|
308 |
{ |
|
309 |
T0 = (T0 & T1) | (T2 & ~T1); |
|
310 |
} |
|
311 |
|
|
312 |
/* Bitwise Insert If False. */ |
|
313 |
NEON_OP(bif) |
|
314 |
{ |
|
315 |
T0 = (T2 & T1) | (T0 & ~T1); |
|
316 |
} |
|
317 |
|
|
318 | 286 |
#define NEON_USAT(dest, src1, src2, type) do { \ |
319 | 287 |
uint32_t tmp = (uint32_t)src1 + (uint32_t)src2; \ |
320 | 288 |
if (tmp != (type)tmp) { \ |
... | ... | |
423 | 391 |
|
424 | 392 |
NEON_OP(shl_u64) |
425 | 393 |
{ |
426 |
int8_t shift = T2;
|
|
394 |
int8_t shift = env->vfp.scratch[0];
|
|
427 | 395 |
uint64_t val = T0 | ((uint64_t)T1 << 32); |
428 | 396 |
if (shift < 0) { |
429 | 397 |
val >>= -shift; |
... | ... | |
437 | 405 |
|
438 | 406 |
NEON_OP(shl_s64) |
439 | 407 |
{ |
440 |
int8_t shift = T2;
|
|
408 |
int8_t shift = env->vfp.scratch[0];
|
|
441 | 409 |
int64_t val = T0 | ((uint64_t)T1 << 32); |
442 | 410 |
if (shift < 0) { |
443 | 411 |
val >>= -shift; |
... | ... | |
468 | 436 |
|
469 | 437 |
NEON_OP(rshl_u64) |
470 | 438 |
{ |
471 |
int8_t shift = T2;
|
|
439 |
int8_t shift = env->vfp.scratch[0];
|
|
472 | 440 |
uint64_t val = T0 | ((uint64_t)T1 << 32); |
473 | 441 |
if (shift < 0) { |
474 | 442 |
val = (val + ((uint64_t)1 << (-1 - shift))) >> -shift; |
... | ... | |
483 | 451 |
|
484 | 452 |
NEON_OP(rshl_s64) |
485 | 453 |
{ |
486 |
int8_t shift = T2;
|
|
454 |
int8_t shift = env->vfp.scratch[0];
|
|
487 | 455 |
int64_t val = T0 | ((uint64_t)T1 << 32); |
488 | 456 |
if (shift < 0) { |
489 | 457 |
val = (val + ((int64_t)1 << (-1 - shift))) >> -shift; |
... | ... | |
514 | 482 |
|
515 | 483 |
NEON_OP(qshl_s64) |
516 | 484 |
{ |
517 |
int8_t shift = T2;
|
|
485 |
int8_t shift = env->vfp.scratch[0];
|
|
518 | 486 |
int64_t val = T0 | ((uint64_t)T1 << 32); |
519 | 487 |
if (shift < 0) { |
520 | 488 |
val >>= -shift; |
... | ... | |
550 | 518 |
|
551 | 519 |
NEON_OP(qshl_u64) |
552 | 520 |
{ |
553 |
int8_t shift = T2;
|
|
521 |
int8_t shift = env->vfp.scratch[0];
|
|
554 | 522 |
uint64_t val = T0 | ((uint64_t)T1 << 32); |
555 | 523 |
if (shift < 0) { |
556 | 524 |
val >>= -shift; |
... | ... | |
1713 | 1681 |
FORCE_RET(); |
1714 | 1682 |
} |
1715 | 1683 |
|
1716 |
/* Table lookup. This accessed the register file directly. */ |
|
1717 |
NEON_OP(tbl) |
|
1718 |
{ |
|
1719 |
helper_neon_tbl(PARAM1, PARAM2); |
|
1720 |
} |
|
1721 |
|
|
1722 | 1684 |
NEON_OP(dup_u8) |
1723 | 1685 |
{ |
1724 | 1686 |
T0 = (T0 >> PARAM1) & 0xff; |
... | ... | |
1726 | 1688 |
T0 |= T0 << 16; |
1727 | 1689 |
FORCE_RET(); |
1728 | 1690 |
} |
1729 |
|
|
1730 |
/* Helpers for element load/store. */ |
|
1731 |
NEON_OP(insert_elt) |
|
1732 |
{ |
|
1733 |
int shift = PARAM1; |
|
1734 |
uint32_t mask = PARAM2; |
|
1735 |
T2 = (T2 & mask) | (T0 << shift); |
|
1736 |
FORCE_RET(); |
|
1737 |
} |
|
1738 |
|
|
1739 |
NEON_OP(extract_elt) |
|
1740 |
{ |
|
1741 |
int shift = PARAM1; |
|
1742 |
uint32_t mask = PARAM2; |
|
1743 |
T0 = (T2 & mask) >> shift; |
|
1744 |
FORCE_RET(); |
|
1745 |
} |
Also available in: Unified diff