Revision 0487d6a8 target-ppc/op_helper.c

b/target-ppc/op_helper.c
19 19
 */
20 20
#include "exec.h"
21 21

  
22
#include "op_helper.h"
23

  
22 24
#define MEMSUFFIX _raw
25
#include "op_helper.h"
23 26
#include "op_helper_mem.h"
24 27
#if !defined(CONFIG_USER_ONLY)
25 28
#define MEMSUFFIX _user
29
#include "op_helper.h"
26 30
#include "op_helper_mem.h"
27 31
#define MEMSUFFIX _kernel
32
#include "op_helper.h"
28 33
#include "op_helper_mem.h"
29 34
#endif
30 35

  
......
229 234
    mul64(plow, phigh, T0, T1);
230 235
}
231 236

  
232
static void imul64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
237
static void imul64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
233 238
{
234 239
    int sa, sb;
235 240
    sa = (a < 0);
......
1119 1124
    T0 = i;
1120 1125
}
1121 1126

  
1127
#if defined(TARGET_PPCSPE)
1128
/* SPE extension helpers */
1129
/* Use a table to make this quicker */
1130
static uint8_t hbrev[16] = {
1131
    0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
1132
    0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
1133
};
1134

  
1135
static inline uint8_t byte_reverse (uint8_t val)
1136
{
1137
    return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
1138
}
1139

  
1140
static inline uint32_t word_reverse (uint32_t val)
1141
{
1142
    return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
1143
        (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
1144
}
1145

  
1146
#define MASKBITS 16 // Random value - to be fixed
1147
void do_brinc (void)
1148
{
1149
    uint32_t a, b, d, mask;
1150

  
1151
    mask = (uint32_t)(-1UL) >> MASKBITS;
1152
    b = T1_64 & mask;
1153
    a = T0_64 & mask;
1154
    d = word_reverse(1 + word_reverse(a | ~mask));
1155
    T0_64 = (T0_64 & ~mask) | (d & mask);
1156
}
1157

  
1158
#define DO_SPE_OP2(name)                                                      \
1159
void do_ev##name (void)                                                       \
1160
{                                                                             \
1161
    T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32, T1_64 >> 32) << 32) |         \
1162
        (uint64_t)_do_e##name(T0_64, T1_64);                                  \
1163
}
1164

  
1165
#define DO_SPE_OP1(name)                                                      \
1166
void do_ev##name (void)                                                       \
1167
{                                                                             \
1168
    T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32) << 32) |                      \
1169
        (uint64_t)_do_e##name(T0_64);                                         \
1170
}
1171

  
1172
/* Fixed-point vector arithmetic */
1173
static inline uint32_t _do_eabs (uint32_t val)
1174
{
1175
    if (val != 0x80000000)
1176
        val &= ~0x80000000;
1177

  
1178
    return val;
1179
}
1180

  
1181
static inline uint32_t _do_eaddw (uint32_t op1, uint32_t op2)
1182
{
1183
    return op1 + op2;
1184
}
1185

  
1186
static inline int _do_ecntlsw (uint32_t val)
1187
{
1188
    if (val & 0x80000000)
1189
        return _do_cntlzw(~val);
1190
    else
1191
        return _do_cntlzw(val);
1192
}
1193

  
1194
static inline int _do_ecntlzw (uint32_t val)
1195
{
1196
    return _do_cntlzw(val);
1197
}
1198

  
1199
static inline uint32_t _do_eneg (uint32_t val)
1200
{
1201
    if (val != 0x80000000)
1202
        val ^= 0x80000000;
1203

  
1204
    return val;
1205
}
1206

  
1207
static inline uint32_t _do_erlw (uint32_t op1, uint32_t op2)
1208
{
1209
    return rotl32(op1, op2);
1210
}
1211

  
1212
static inline uint32_t _do_erndw (uint32_t val)
1213
{
1214
    return (val + 0x000080000000) & 0xFFFF0000;
1215
}
1216

  
1217
static inline uint32_t _do_eslw (uint32_t op1, uint32_t op2)
1218
{
1219
    /* No error here: 6 bits are used */
1220
    return op1 << (op2 & 0x3F);
1221
}
1222

  
1223
static inline int32_t _do_esrws (int32_t op1, uint32_t op2)
1224
{
1225
    /* No error here: 6 bits are used */
1226
    return op1 >> (op2 & 0x3F);
1227
}
1228

  
1229
static inline uint32_t _do_esrwu (uint32_t op1, uint32_t op2)
1230
{
1231
    /* No error here: 6 bits are used */
1232
    return op1 >> (op2 & 0x3F);
1233
}
1234

  
1235
static inline uint32_t _do_esubfw (uint32_t op1, uint32_t op2)
1236
{
1237
    return op2 - op1;
1238
}
1239

  
1240
/* evabs */
1241
DO_SPE_OP1(abs);
1242
/* evaddw */
1243
DO_SPE_OP2(addw);
1244
/* evcntlsw */
1245
DO_SPE_OP1(cntlsw);
1246
/* evcntlzw */
1247
DO_SPE_OP1(cntlzw);
1248
/* evneg */
1249
DO_SPE_OP1(neg);
1250
/* evrlw */
1251
DO_SPE_OP2(rlw);
1252
/* evrnd */
1253
DO_SPE_OP1(rndw);
1254
/* evslw */
1255
DO_SPE_OP2(slw);
1256
/* evsrws */
1257
DO_SPE_OP2(srws);
1258
/* evsrwu */
1259
DO_SPE_OP2(srwu);
1260
/* evsubfw */
1261
DO_SPE_OP2(subfw);
1262

  
1263
/* evsel is a little bit more complicated... */
1264
static inline uint32_t _do_esel (uint32_t op1, uint32_t op2, int n)
1265
{
1266
    if (n)
1267
        return op1;
1268
    else
1269
        return op2;
1270
}
1271

  
1272
void do_evsel (void)
1273
{
1274
    T0_64 = ((uint64_t)_do_esel(T0_64 >> 32, T1_64 >> 32, T0 >> 3) << 32) |
1275
        (uint64_t)_do_esel(T0_64, T1_64, (T0 >> 2) & 1);
1276
}
1277

  
1278
/* Fixed-point vector comparisons */
1279
#define DO_SPE_CMP(name)                                                      \
1280
void do_ev##name (void)                                                       \
1281
{                                                                             \
1282
    T0 = _do_evcmp_merge((uint64_t)_do_e##name(T0_64 >> 32,                   \
1283
                                               T1_64 >> 32) << 32,            \
1284
                         _do_e##name(T0_64, T1_64));                          \
1285
}
1286

  
1287
static inline uint32_t _do_evcmp_merge (int t0, int t1)
1288
{
1289
    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1290
}
1291
static inline int _do_ecmpeq (uint32_t op1, uint32_t op2)
1292
{
1293
    return op1 == op2 ? 1 : 0;
1294
}
1295

  
1296
static inline int _do_ecmpgts (int32_t op1, int32_t op2)
1297
{
1298
    return op1 > op2 ? 1 : 0;
1299
}
1300

  
1301
static inline int _do_ecmpgtu (uint32_t op1, uint32_t op2)
1302
{
1303
    return op1 > op2 ? 1 : 0;
1304
}
1305

  
1306
static inline int _do_ecmplts (int32_t op1, int32_t op2)
1307
{
1308
    return op1 < op2 ? 1 : 0;
1309
}
1310

  
1311
static inline int _do_ecmpltu (uint32_t op1, uint32_t op2)
1312
{
1313
    return op1 < op2 ? 1 : 0;
1314
}
1315

  
1316
/* evcmpeq */
1317
DO_SPE_CMP(cmpeq);
1318
/* evcmpgts */
1319
DO_SPE_CMP(cmpgts);
1320
/* evcmpgtu */
1321
DO_SPE_CMP(cmpgtu);
1322
/* evcmplts */
1323
DO_SPE_CMP(cmplts);
1324
/* evcmpltu */
1325
DO_SPE_CMP(cmpltu);
1326

  
1327
/* Single precision floating-point conversions from/to integer */
1328
static inline uint32_t _do_efscfsi (int32_t val)
1329
{
1330
    union {
1331
        uint32_t u;
1332
        float32 f;
1333
    } u;
1334

  
1335
    u.f = int32_to_float32(val, &env->spe_status);
1336

  
1337
    return u.u;
1338
}
1339

  
1340
static inline uint32_t _do_efscfui (uint32_t val)
1341
{
1342
    union {
1343
        uint32_t u;
1344
        float32 f;
1345
    } u;
1346

  
1347
    u.f = uint32_to_float32(val, &env->spe_status);
1348

  
1349
    return u.u;
1350
}
1351

  
1352
static inline int32_t _do_efsctsi (uint32_t val)
1353
{
1354
    union {
1355
        int32_t u;
1356
        float32 f;
1357
    } u;
1358

  
1359
    u.u = val;
1360
    /* NaN are not treated the same way IEEE 754 does */
1361
    if (unlikely(isnan(u.f)))
1362
        return 0;
1363

  
1364
    return float32_to_int32(u.f, &env->spe_status);
1365
}
1366

  
1367
static inline uint32_t _do_efsctui (uint32_t val)
1368
{
1369
    union {
1370
        int32_t u;
1371
        float32 f;
1372
    } u;
1373

  
1374
    u.u = val;
1375
    /* NaN are not treated the same way IEEE 754 does */
1376
    if (unlikely(isnan(u.f)))
1377
        return 0;
1378

  
1379
    return float32_to_uint32(u.f, &env->spe_status);
1380
}
1381

  
1382
static inline int32_t _do_efsctsiz (uint32_t val)
1383
{
1384
    union {
1385
        int32_t u;
1386
        float32 f;
1387
    } u;
1388

  
1389
    u.u = val;
1390
    /* NaN are not treated the same way IEEE 754 does */
1391
    if (unlikely(isnan(u.f)))
1392
        return 0;
1393

  
1394
    return float32_to_int32_round_to_zero(u.f, &env->spe_status);
1395
}
1396

  
1397
static inline uint32_t _do_efsctuiz (uint32_t val)
1398
{
1399
    union {
1400
        int32_t u;
1401
        float32 f;
1402
    } u;
1403

  
1404
    u.u = val;
1405
    /* NaN are not treated the same way IEEE 754 does */
1406
    if (unlikely(isnan(u.f)))
1407
        return 0;
1408

  
1409
    return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
1410
}
1411

  
1412
void do_efscfsi (void)
1413
{
1414
    T0_64 = _do_efscfsi(T0_64);
1415
}
1416

  
1417
void do_efscfui (void)
1418
{
1419
    T0_64 = _do_efscfui(T0_64);
1420
}
1421

  
1422
void do_efsctsi (void)
1423
{
1424
    T0_64 = _do_efsctsi(T0_64);
1425
}
1426

  
1427
void do_efsctui (void)
1428
{
1429
    T0_64 = _do_efsctui(T0_64);
1430
}
1431

  
1432
void do_efsctsiz (void)
1433
{
1434
    T0_64 = _do_efsctsiz(T0_64);
1435
}
1436

  
1437
void do_efsctuiz (void)
1438
{
1439
    T0_64 = _do_efsctuiz(T0_64);
1440
}
1441

  
1442
/* Single precision floating-point conversion to/from fractional */
1443
static inline uint32_t _do_efscfsf (uint32_t val)
1444
{
1445
    union {
1446
        uint32_t u;
1447
        float32 f;
1448
    } u;
1449
    float32 tmp;
1450

  
1451
    u.f = int32_to_float32(val, &env->spe_status);
1452
    tmp = int64_to_float32(1ULL << 32, &env->spe_status);
1453
    u.f = float32_div(u.f, tmp, &env->spe_status);
1454

  
1455
    return u.u;
1456
}
1457

  
1458
static inline uint32_t _do_efscfuf (uint32_t val)
1459
{
1460
    union {
1461
        uint32_t u;
1462
        float32 f;
1463
    } u;
1464
    float32 tmp;
1465

  
1466
    u.f = uint32_to_float32(val, &env->spe_status);
1467
    tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
1468
    u.f = float32_div(u.f, tmp, &env->spe_status);
1469

  
1470
    return u.u;
1471
}
1472

  
1473
static inline int32_t _do_efsctsf (uint32_t val)
1474
{
1475
    union {
1476
        int32_t u;
1477
        float32 f;
1478
    } u;
1479
    float32 tmp;
1480

  
1481
    u.u = val;
1482
    /* NaN are not treated the same way IEEE 754 does */
1483
    if (unlikely(isnan(u.f)))
1484
        return 0;
1485
    tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
1486
    u.f = float32_mul(u.f, tmp, &env->spe_status);
1487

  
1488
    return float32_to_int32(u.f, &env->spe_status);
1489
}
1490

  
1491
static inline uint32_t _do_efsctuf (uint32_t val)
1492
{
1493
    union {
1494
        int32_t u;
1495
        float32 f;
1496
    } u;
1497
    float32 tmp;
1498

  
1499
    u.u = val;
1500
    /* NaN are not treated the same way IEEE 754 does */
1501
    if (unlikely(isnan(u.f)))
1502
        return 0;
1503
    tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
1504
    u.f = float32_mul(u.f, tmp, &env->spe_status);
1505

  
1506
    return float32_to_uint32(u.f, &env->spe_status);
1507
}
1508

  
1509
static inline int32_t _do_efsctsfz (uint32_t val)
1510
{
1511
    union {
1512
        int32_t u;
1513
        float32 f;
1514
    } u;
1515
    float32 tmp;
1516

  
1517
    u.u = val;
1518
    /* NaN are not treated the same way IEEE 754 does */
1519
    if (unlikely(isnan(u.f)))
1520
        return 0;
1521
    tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
1522
    u.f = float32_mul(u.f, tmp, &env->spe_status);
1523

  
1524
    return float32_to_int32_round_to_zero(u.f, &env->spe_status);
1525
}
1526

  
1527
static inline uint32_t _do_efsctufz (uint32_t val)
1528
{
1529
    union {
1530
        int32_t u;
1531
        float32 f;
1532
    } u;
1533
    float32 tmp;
1534

  
1535
    u.u = val;
1536
    /* NaN are not treated the same way IEEE 754 does */
1537
    if (unlikely(isnan(u.f)))
1538
        return 0;
1539
    tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
1540
    u.f = float32_mul(u.f, tmp, &env->spe_status);
1541

  
1542
    return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
1543
}
1544

  
1545
void do_efscfsf (void)
1546
{
1547
    T0_64 = _do_efscfsf(T0_64);
1548
}
1549

  
1550
void do_efscfuf (void)
1551
{
1552
    T0_64 = _do_efscfuf(T0_64);
1553
}
1554

  
1555
void do_efsctsf (void)
1556
{
1557
    T0_64 = _do_efsctsf(T0_64);
1558
}
1559

  
1560
void do_efsctuf (void)
1561
{
1562
    T0_64 = _do_efsctuf(T0_64);
1563
}
1564

  
1565
void do_efsctsfz (void)
1566
{
1567
    T0_64 = _do_efsctsfz(T0_64);
1568
}
1569

  
1570
void do_efsctufz (void)
1571
{
1572
    T0_64 = _do_efsctufz(T0_64);
1573
}
1574

  
1575
/* Double precision floating point helpers */
1576
static inline int _do_efdcmplt (uint64_t op1, uint64_t op2)
1577
{
1578
    /* XXX: TODO: test special values (NaN, infinites, ...) */
1579
    return _do_efdtstlt(op1, op2);
1580
}
1581

  
1582
static inline int _do_efdcmpgt (uint64_t op1, uint64_t op2)
1583
{
1584
    /* XXX: TODO: test special values (NaN, infinites, ...) */
1585
    return _do_efdtstgt(op1, op2);
1586
}
1587

  
1588
static inline int _do_efdcmpeq (uint64_t op1, uint64_t op2)
1589
{
1590
    /* XXX: TODO: test special values (NaN, infinites, ...) */
1591
    return _do_efdtsteq(op1, op2);
1592
}
1593

  
1594
void do_efdcmplt (void)
1595
{
1596
    T0 = _do_efdcmplt(T0_64, T1_64);
1597
}
1598

  
1599
void do_efdcmpgt (void)
1600
{
1601
    T0 = _do_efdcmpgt(T0_64, T1_64);
1602
}
1603

  
1604
void do_efdcmpeq (void)
1605
{
1606
    T0 = _do_efdcmpeq(T0_64, T1_64);
1607
}
1608

  
1609
/* Double precision floating-point conversion to/from integer */
1610
static inline uint64_t _do_efdcfsi (int64_t val)
1611
{
1612
    union {
1613
        uint64_t u;
1614
        float64 f;
1615
    } u;
1616

  
1617
    u.f = int64_to_float64(val, &env->spe_status);
1618

  
1619
    return u.u;
1620
}
1621

  
1622
static inline uint64_t _do_efdcfui (uint64_t val)
1623
{
1624
    union {
1625
        uint64_t u;
1626
        float64 f;
1627
    } u;
1628

  
1629
    u.f = uint64_to_float64(val, &env->spe_status);
1630

  
1631
    return u.u;
1632
}
1633

  
1634
static inline int64_t _do_efdctsi (uint64_t val)
1635
{
1636
    union {
1637
        int64_t u;
1638
        float64 f;
1639
    } u;
1640

  
1641
    u.u = val;
1642
    /* NaN are not treated the same way IEEE 754 does */
1643
    if (unlikely(isnan(u.f)))
1644
        return 0;
1645

  
1646
    return float64_to_int64(u.f, &env->spe_status);
1647
}
1648

  
1649
static inline uint64_t _do_efdctui (uint64_t val)
1650
{
1651
    union {
1652
        int64_t u;
1653
        float64 f;
1654
    } u;
1655

  
1656
    u.u = val;
1657
    /* NaN are not treated the same way IEEE 754 does */
1658
    if (unlikely(isnan(u.f)))
1659
        return 0;
1660

  
1661
    return float64_to_uint64(u.f, &env->spe_status);
1662
}
1663

  
1664
static inline int64_t _do_efdctsiz (uint64_t val)
1665
{
1666
    union {
1667
        int64_t u;
1668
        float64 f;
1669
    } u;
1670

  
1671
    u.u = val;
1672
    /* NaN are not treated the same way IEEE 754 does */
1673
    if (unlikely(isnan(u.f)))
1674
        return 0;
1675

  
1676
    return float64_to_int64_round_to_zero(u.f, &env->spe_status);
1677
}
1678

  
1679
static inline uint64_t _do_efdctuiz (uint64_t val)
1680
{
1681
    union {
1682
        int64_t u;
1683
        float64 f;
1684
    } u;
1685

  
1686
    u.u = val;
1687
    /* NaN are not treated the same way IEEE 754 does */
1688
    if (unlikely(isnan(u.f)))
1689
        return 0;
1690

  
1691
    return float64_to_uint64_round_to_zero(u.f, &env->spe_status);
1692
}
1693

  
1694
void do_efdcfsi (void)
1695
{
1696
    T0_64 = _do_efdcfsi(T0_64);
1697
}
1698

  
1699
void do_efdcfui (void)
1700
{
1701
    T0_64 = _do_efdcfui(T0_64);
1702
}
1703

  
1704
void do_efdctsi (void)
1705
{
1706
    T0_64 = _do_efdctsi(T0_64);
1707
}
1708

  
1709
void do_efdctui (void)
1710
{
1711
    T0_64 = _do_efdctui(T0_64);
1712
}
1713

  
1714
void do_efdctsiz (void)
1715
{
1716
    T0_64 = _do_efdctsiz(T0_64);
1717
}
1718

  
1719
void do_efdctuiz (void)
1720
{
1721
    T0_64 = _do_efdctuiz(T0_64);
1722
}
1723

  
1724
/* Double precision floating-point conversion to/from fractional */
1725
static inline uint64_t _do_efdcfsf (int64_t val)
1726
{
1727
    union {
1728
        uint64_t u;
1729
        float64 f;
1730
    } u;
1731
    float64 tmp;
1732

  
1733
    u.f = int32_to_float64(val, &env->spe_status);
1734
    tmp = int64_to_float64(1ULL << 32, &env->spe_status);
1735
    u.f = float64_div(u.f, tmp, &env->spe_status);
1736

  
1737
    return u.u;
1738
}
1739

  
1740
static inline uint64_t _do_efdcfuf (uint64_t val)
1741
{
1742
    union {
1743
        uint64_t u;
1744
        float64 f;
1745
    } u;
1746
    float64 tmp;
1747

  
1748
    u.f = uint32_to_float64(val, &env->spe_status);
1749
    tmp = int64_to_float64(1ULL << 32, &env->spe_status);
1750
    u.f = float64_div(u.f, tmp, &env->spe_status);
1751

  
1752
    return u.u;
1753
}
1754

  
1755
static inline int64_t _do_efdctsf (uint64_t val)
1756
{
1757
    union {
1758
        int64_t u;
1759
        float64 f;
1760
    } u;
1761
    float64 tmp;
1762

  
1763
    u.u = val;
1764
    /* NaN are not treated the same way IEEE 754 does */
1765
    if (unlikely(isnan(u.f)))
1766
        return 0;
1767
    tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
1768
    u.f = float64_mul(u.f, tmp, &env->spe_status);
1769

  
1770
    return float64_to_int32(u.f, &env->spe_status);
1771
}
1772

  
1773
static inline uint64_t _do_efdctuf (uint64_t val)
1774
{
1775
    union {
1776
        int64_t u;
1777
        float64 f;
1778
    } u;
1779
    float64 tmp;
1780

  
1781
    u.u = val;
1782
    /* NaN are not treated the same way IEEE 754 does */
1783
    if (unlikely(isnan(u.f)))
1784
        return 0;
1785
    tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
1786
    u.f = float64_mul(u.f, tmp, &env->spe_status);
1787

  
1788
    return float64_to_uint32(u.f, &env->spe_status);
1789
}
1790

  
1791
static inline int64_t _do_efdctsfz (uint64_t val)
1792
{
1793
    union {
1794
        int64_t u;
1795
        float64 f;
1796
    } u;
1797
    float64 tmp;
1798

  
1799
    u.u = val;
1800
    /* NaN are not treated the same way IEEE 754 does */
1801
    if (unlikely(isnan(u.f)))
1802
        return 0;
1803
    tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
1804
    u.f = float64_mul(u.f, tmp, &env->spe_status);
1805

  
1806
    return float64_to_int32_round_to_zero(u.f, &env->spe_status);
1807
}
1808

  
1809
static inline uint64_t _do_efdctufz (uint64_t val)
1810
{
1811
    union {
1812
        int64_t u;
1813
        float64 f;
1814
    } u;
1815
    float64 tmp;
1816

  
1817
    u.u = val;
1818
    /* NaN are not treated the same way IEEE 754 does */
1819
    if (unlikely(isnan(u.f)))
1820
        return 0;
1821
    tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
1822
    u.f = float64_mul(u.f, tmp, &env->spe_status);
1823

  
1824
    return float64_to_uint32_round_to_zero(u.f, &env->spe_status);
1825
}
1826

  
1827
void do_efdcfsf (void)
1828
{
1829
    T0_64 = _do_efdcfsf(T0_64);
1830
}
1831

  
1832
void do_efdcfuf (void)
1833
{
1834
    T0_64 = _do_efdcfuf(T0_64);
1835
}
1836

  
1837
void do_efdctsf (void)
1838
{
1839
    T0_64 = _do_efdctsf(T0_64);
1840
}
1841

  
1842
void do_efdctuf (void)
1843
{
1844
    T0_64 = _do_efdctuf(T0_64);
1845
}
1846

  
1847
void do_efdctsfz (void)
1848
{
1849
    T0_64 = _do_efdctsfz(T0_64);
1850
}
1851

  
1852
void do_efdctufz (void)
1853
{
1854
    T0_64 = _do_efdctufz(T0_64);
1855
}
1856

  
1857
/* Floating point conversion between single and double precision */
1858
static inline uint32_t _do_efscfd (uint64_t val)
1859
{
1860
    union {
1861
        uint64_t u;
1862
        float64 f;
1863
    } u1;
1864
    union {
1865
        uint32_t u;
1866
        float32 f;
1867
    } u2;
1868

  
1869
    u1.u = val;
1870
    u2.f = float64_to_float32(u1.f, &env->spe_status);
1871

  
1872
    return u2.u;
1873
}
1874

  
1875
static inline uint64_t _do_efdcfs (uint32_t val)
1876
{
1877
    union {
1878
        uint64_t u;
1879
        float64 f;
1880
    } u2;
1881
    union {
1882
        uint32_t u;
1883
        float32 f;
1884
    } u1;
1885

  
1886
    u1.u = val;
1887
    u2.f = float32_to_float64(u1.f, &env->spe_status);
1888

  
1889
    return u2.u;
1890
}
1891

  
1892
void do_efscfd (void)
1893
{
1894
    T0_64 = _do_efscfd(T0_64);
1895
}
1896

  
1897
void do_efdcfs (void)
1898
{
1899
    T0_64 = _do_efdcfs(T0_64);
1900
}
1901

  
1902
/* Single precision fixed-point vector arithmetic */
1903
/* evfsabs */
1904
DO_SPE_OP1(fsabs);
1905
/* evfsnabs */
1906
DO_SPE_OP1(fsnabs);
1907
/* evfsneg */
1908
DO_SPE_OP1(fsneg);
1909
/* evfsadd */
1910
DO_SPE_OP2(fsadd);
1911
/* evfssub */
1912
DO_SPE_OP2(fssub);
1913
/* evfsmul */
1914
DO_SPE_OP2(fsmul);
1915
/* evfsdiv */
1916
DO_SPE_OP2(fsdiv);
1917

  
1918
/* Single-precision floating-point comparisons */
1919
static inline int _do_efscmplt (uint32_t op1, uint32_t op2)
1920
{
1921
    /* XXX: TODO: test special values (NaN, infinites, ...) */
1922
    return _do_efststlt(op1, op2);
1923
}
1924

  
1925
static inline int _do_efscmpgt (uint32_t op1, uint32_t op2)
1926
{
1927
    /* XXX: TODO: test special values (NaN, infinites, ...) */
1928
    return _do_efststgt(op1, op2);
1929
}
1930

  
1931
static inline int _do_efscmpeq (uint32_t op1, uint32_t op2)
1932
{
1933
    /* XXX: TODO: test special values (NaN, infinites, ...) */
1934
    return _do_efststeq(op1, op2);
1935
}
1936

  
1937
void do_efscmplt (void)
1938
{
1939
    T0 = _do_efscmplt(T0_64, T1_64);
1940
}
1941

  
1942
void do_efscmpgt (void)
1943
{
1944
    T0 = _do_efscmpgt(T0_64, T1_64);
1945
}
1946

  
1947
void do_efscmpeq (void)
1948
{
1949
    T0 = _do_efscmpeq(T0_64, T1_64);
1950
}
1951

  
1952
/* Single-precision floating-point vector comparisons */
1953
/* evfscmplt */
1954
DO_SPE_CMP(fscmplt);
1955
/* evfscmpgt */
1956
DO_SPE_CMP(fscmpgt);
1957
/* evfscmpeq */
1958
DO_SPE_CMP(fscmpeq);
1959
/* evfststlt */
1960
DO_SPE_CMP(fststlt);
1961
/* evfststgt */
1962
DO_SPE_CMP(fststgt);
1963
/* evfststeq */
1964
DO_SPE_CMP(fststeq);
1965

  
1966
/* Single-precision floating-point vector conversions */
1967
/* evfscfsi */
1968
DO_SPE_OP1(fscfsi);
1969
/* evfscfui */
1970
DO_SPE_OP1(fscfui);
1971
/* evfscfuf */
1972
DO_SPE_OP1(fscfuf);
1973
/* evfscfsf */
1974
DO_SPE_OP1(fscfsf);
1975
/* evfsctsi */
1976
DO_SPE_OP1(fsctsi);
1977
/* evfsctui */
1978
DO_SPE_OP1(fsctui);
1979
/* evfsctsiz */
1980
DO_SPE_OP1(fsctsiz);
1981
/* evfsctuiz */
1982
DO_SPE_OP1(fsctuiz);
1983
/* evfsctsf */
1984
DO_SPE_OP1(fsctsf);
1985
/* evfsctuf */
1986
DO_SPE_OP1(fsctuf);
1987
#endif /* defined(TARGET_PPCSPE) */
1988

  
1122 1989
/*****************************************************************************/
1123 1990
/* Softmmu support */
1124 1991
#if !defined (CONFIG_USER_ONLY)

Also available in: Unified diff