Statistics
| Branch: | Revision:

root / target-arm / neon_helper.c @ 960e623b

History | View | Annotate | Download (51.3 kB)

1
/*
2
 * ARM NEON vector operations.
3
 *
4
 * Copyright (c) 2007, 2008 CodeSourcery.
5
 * Written by Paul Brook
6
 *
7
 * This code is licenced under the GNU GPL v2.
8
 */
9
#include <stdlib.h>
10
#include <stdio.h>
11

    
12
#include "cpu.h"
13
#include "exec-all.h"
14
#include "helpers.h"
15

    
16
#define SIGNBIT (uint32_t)0x80000000
17
#define SIGNBIT64 ((uint64_t)1 << 63)
18

    
19
#define SET_QC() env->vfp.xregs[ARM_VFP_FPSCR] = CPSR_Q
20

    
21
static float_status neon_float_status;
22
#define NFS &neon_float_status
23

    
24
/* Helper routines to perform bitwise copies between float and int.  */
25
static inline float32 vfp_itos(uint32_t i)
26
{
27
    union {
28
        uint32_t i;
29
        float32 s;
30
    } v;
31

    
32
    v.i = i;
33
    return v.s;
34
}
35

    
36
static inline uint32_t vfp_stoi(float32 s)
37
{
38
    union {
39
        uint32_t i;
40
        float32 s;
41
    } v;
42

    
43
    v.s = s;
44
    return v.i;
45
}
46

    
47
#define NEON_TYPE1(name, type) \
48
typedef struct \
49
{ \
50
    type v1; \
51
} neon_##name;
52
#ifdef HOST_WORDS_BIGENDIAN
53
#define NEON_TYPE2(name, type) \
54
typedef struct \
55
{ \
56
    type v2; \
57
    type v1; \
58
} neon_##name;
59
#define NEON_TYPE4(name, type) \
60
typedef struct \
61
{ \
62
    type v4; \
63
    type v3; \
64
    type v2; \
65
    type v1; \
66
} neon_##name;
67
#else
68
#define NEON_TYPE2(name, type) \
69
typedef struct \
70
{ \
71
    type v1; \
72
    type v2; \
73
} neon_##name;
74
#define NEON_TYPE4(name, type) \
75
typedef struct \
76
{ \
77
    type v1; \
78
    type v2; \
79
    type v3; \
80
    type v4; \
81
} neon_##name;
82
#endif
83

    
84
NEON_TYPE4(s8, int8_t)
85
NEON_TYPE4(u8, uint8_t)
86
NEON_TYPE2(s16, int16_t)
87
NEON_TYPE2(u16, uint16_t)
88
NEON_TYPE1(s32, int32_t)
89
NEON_TYPE1(u32, uint32_t)
90
#undef NEON_TYPE4
91
#undef NEON_TYPE2
92
#undef NEON_TYPE1
93

    
94
/* Copy from a uint32_t to a vector structure type.  */
95
#define NEON_UNPACK(vtype, dest, val) do { \
96
    union { \
97
        vtype v; \
98
        uint32_t i; \
99
    } conv_u; \
100
    conv_u.i = (val); \
101
    dest = conv_u.v; \
102
    } while(0)
103

    
104
/* Copy from a vector structure type to a uint32_t.  */
105
#define NEON_PACK(vtype, dest, val) do { \
106
    union { \
107
        vtype v; \
108
        uint32_t i; \
109
    } conv_u; \
110
    conv_u.v = (val); \
111
    dest = conv_u.i; \
112
    } while(0)
113

    
114
#define NEON_DO1 \
115
    NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1);
116
#define NEON_DO2 \
117
    NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
118
    NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2);
119
#define NEON_DO4 \
120
    NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
121
    NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2); \
122
    NEON_FN(vdest.v3, vsrc1.v3, vsrc2.v3); \
123
    NEON_FN(vdest.v4, vsrc1.v4, vsrc2.v4);
124

    
125
#define NEON_VOP_BODY(vtype, n) \
126
{ \
127
    uint32_t res; \
128
    vtype vsrc1; \
129
    vtype vsrc2; \
130
    vtype vdest; \
131
    NEON_UNPACK(vtype, vsrc1, arg1); \
132
    NEON_UNPACK(vtype, vsrc2, arg2); \
133
    NEON_DO##n; \
134
    NEON_PACK(vtype, res, vdest); \
135
    return res; \
136
}
137

    
138
#define NEON_VOP(name, vtype, n) \
139
uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
140
NEON_VOP_BODY(vtype, n)
141

    
142
#define NEON_VOP_ENV(name, vtype, n) \
143
uint32_t HELPER(glue(neon_,name))(CPUState *env, uint32_t arg1, uint32_t arg2) \
144
NEON_VOP_BODY(vtype, n)
145

    
146
/* Pairwise operations.  */
147
/* For 32-bit elements each segment only contains a single element, so
148
   the elementwise and pairwise operations are the same.  */
149
#define NEON_PDO2 \
150
    NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
151
    NEON_FN(vdest.v2, vsrc2.v1, vsrc2.v2);
152
#define NEON_PDO4 \
153
    NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
154
    NEON_FN(vdest.v2, vsrc1.v3, vsrc1.v4); \
155
    NEON_FN(vdest.v3, vsrc2.v1, vsrc2.v2); \
156
    NEON_FN(vdest.v4, vsrc2.v3, vsrc2.v4); \
157

    
158
#define NEON_POP(name, vtype, n) \
159
uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
160
{ \
161
    uint32_t res; \
162
    vtype vsrc1; \
163
    vtype vsrc2; \
164
    vtype vdest; \
165
    NEON_UNPACK(vtype, vsrc1, arg1); \
166
    NEON_UNPACK(vtype, vsrc2, arg2); \
167
    NEON_PDO##n; \
168
    NEON_PACK(vtype, res, vdest); \
169
    return res; \
170
}
171

    
172
/* Unary operators.  */
173
#define NEON_VOP1(name, vtype, n) \
174
uint32_t HELPER(glue(neon_,name))(uint32_t arg) \
175
{ \
176
    vtype vsrc1; \
177
    vtype vdest; \
178
    NEON_UNPACK(vtype, vsrc1, arg); \
179
    NEON_DO##n; \
180
    NEON_PACK(vtype, arg, vdest); \
181
    return arg; \
182
}
183

    
184

    
185
#define NEON_USAT(dest, src1, src2, type) do { \
186
    uint32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
187
    if (tmp != (type)tmp) { \
188
        SET_QC(); \
189
        dest = ~0; \
190
    } else { \
191
        dest = tmp; \
192
    }} while(0)
193
#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
194
NEON_VOP_ENV(qadd_u8, neon_u8, 4)
195
#undef NEON_FN
196
#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
197
NEON_VOP_ENV(qadd_u16, neon_u16, 2)
198
#undef NEON_FN
199
#undef NEON_USAT
200

    
201
uint32_t HELPER(neon_qadd_u32)(CPUState *env, uint32_t a, uint32_t b)
202
{
203
    uint32_t res = a + b;
204
    if (res < a) {
205
        SET_QC();
206
        res = ~0;
207
    }
208
    return res;
209
}
210

    
211
uint64_t HELPER(neon_qadd_u64)(CPUState *env, uint64_t src1, uint64_t src2)
212
{
213
    uint64_t res;
214

    
215
    res = src1 + src2;
216
    if (res < src1) {
217
        SET_QC();
218
        res = ~(uint64_t)0;
219
    }
220
    return res;
221
}
222

    
223
#define NEON_SSAT(dest, src1, src2, type) do { \
224
    int32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
225
    if (tmp != (type)tmp) { \
226
        SET_QC(); \
227
        if (src2 > 0) { \
228
            tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
229
        } else { \
230
            tmp = 1 << (sizeof(type) * 8 - 1); \
231
        } \
232
    } \
233
    dest = tmp; \
234
    } while(0)
235
#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
236
NEON_VOP_ENV(qadd_s8, neon_s8, 4)
237
#undef NEON_FN
238
#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
239
NEON_VOP_ENV(qadd_s16, neon_s16, 2)
240
#undef NEON_FN
241
#undef NEON_SSAT
242

    
243
uint32_t HELPER(neon_qadd_s32)(CPUState *env, uint32_t a, uint32_t b)
244
{
245
    uint32_t res = a + b;
246
    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
247
        SET_QC();
248
        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
249
    }
250
    return res;
251
}
252

    
253
uint64_t HELPER(neon_qadd_s64)(CPUState *env, uint64_t src1, uint64_t src2)
254
{
255
    uint64_t res;
256

    
257
    res = src1 + src2;
258
    if (((res ^ src1) & SIGNBIT64) && !((src1 ^ src2) & SIGNBIT64)) {
259
        SET_QC();
260
        res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
261
    }
262
    return res;
263
}
264

    
265
#define NEON_USAT(dest, src1, src2, type) do { \
266
    uint32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
267
    if (tmp != (type)tmp) { \
268
        SET_QC(); \
269
        dest = 0; \
270
    } else { \
271
        dest = tmp; \
272
    }} while(0)
273
#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
274
NEON_VOP_ENV(qsub_u8, neon_u8, 4)
275
#undef NEON_FN
276
#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
277
NEON_VOP_ENV(qsub_u16, neon_u16, 2)
278
#undef NEON_FN
279
#undef NEON_USAT
280

    
281
uint32_t HELPER(neon_qsub_u32)(CPUState *env, uint32_t a, uint32_t b)
282
{
283
    uint32_t res = a - b;
284
    if (res > a) {
285
        SET_QC();
286
        res = 0;
287
    }
288
    return res;
289
}
290

    
291
uint64_t HELPER(neon_qsub_u64)(CPUState *env, uint64_t src1, uint64_t src2)
292
{
293
    uint64_t res;
294

    
295
    if (src1 < src2) {
296
        SET_QC();
297
        res = 0;
298
    } else {
299
        res = src1 - src2;
300
    }
301
    return res;
302
}
303

    
304
#define NEON_SSAT(dest, src1, src2, type) do { \
305
    int32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
306
    if (tmp != (type)tmp) { \
307
        SET_QC(); \
308
        if (src2 < 0) { \
309
            tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
310
        } else { \
311
            tmp = 1 << (sizeof(type) * 8 - 1); \
312
        } \
313
    } \
314
    dest = tmp; \
315
    } while(0)
316
#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
317
NEON_VOP_ENV(qsub_s8, neon_s8, 4)
318
#undef NEON_FN
319
#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
320
NEON_VOP_ENV(qsub_s16, neon_s16, 2)
321
#undef NEON_FN
322
#undef NEON_SSAT
323

    
324
uint32_t HELPER(neon_qsub_s32)(CPUState *env, uint32_t a, uint32_t b)
325
{
326
    uint32_t res = a - b;
327
    if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
328
        SET_QC();
329
        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
330
    }
331
    return res;
332
}
333

    
334
uint64_t HELPER(neon_qsub_s64)(CPUState *env, uint64_t src1, uint64_t src2)
335
{
336
    uint64_t res;
337

    
338
    res = src1 - src2;
339
    if (((res ^ src1) & SIGNBIT64) && ((src1 ^ src2) & SIGNBIT64)) {
340
        SET_QC();
341
        res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
342
    }
343
    return res;
344
}
345

    
346
#define NEON_FN(dest, src1, src2) dest = (src1 + src2) >> 1
347
NEON_VOP(hadd_s8, neon_s8, 4)
348
NEON_VOP(hadd_u8, neon_u8, 4)
349
NEON_VOP(hadd_s16, neon_s16, 2)
350
NEON_VOP(hadd_u16, neon_u16, 2)
351
#undef NEON_FN
352

    
353
int32_t HELPER(neon_hadd_s32)(int32_t src1, int32_t src2)
354
{
355
    int32_t dest;
356

    
357
    dest = (src1 >> 1) + (src2 >> 1);
358
    if (src1 & src2 & 1)
359
        dest++;
360
    return dest;
361
}
362

    
363
uint32_t HELPER(neon_hadd_u32)(uint32_t src1, uint32_t src2)
364
{
365
    uint32_t dest;
366

    
367
    dest = (src1 >> 1) + (src2 >> 1);
368
    if (src1 & src2 & 1)
369
        dest++;
370
    return dest;
371
}
372

    
373
#define NEON_FN(dest, src1, src2) dest = (src1 + src2 + 1) >> 1
374
NEON_VOP(rhadd_s8, neon_s8, 4)
375
NEON_VOP(rhadd_u8, neon_u8, 4)
376
NEON_VOP(rhadd_s16, neon_s16, 2)
377
NEON_VOP(rhadd_u16, neon_u16, 2)
378
#undef NEON_FN
379

    
380
int32_t HELPER(neon_rhadd_s32)(int32_t src1, int32_t src2)
381
{
382
    int32_t dest;
383

    
384
    dest = (src1 >> 1) + (src2 >> 1);
385
    if ((src1 | src2) & 1)
386
        dest++;
387
    return dest;
388
}
389

    
390
uint32_t HELPER(neon_rhadd_u32)(uint32_t src1, uint32_t src2)
391
{
392
    uint32_t dest;
393

    
394
    dest = (src1 >> 1) + (src2 >> 1);
395
    if ((src1 | src2) & 1)
396
        dest++;
397
    return dest;
398
}
399

    
400
#define NEON_FN(dest, src1, src2) dest = (src1 - src2) >> 1
401
NEON_VOP(hsub_s8, neon_s8, 4)
402
NEON_VOP(hsub_u8, neon_u8, 4)
403
NEON_VOP(hsub_s16, neon_s16, 2)
404
NEON_VOP(hsub_u16, neon_u16, 2)
405
#undef NEON_FN
406

    
407
int32_t HELPER(neon_hsub_s32)(int32_t src1, int32_t src2)
408
{
409
    int32_t dest;
410

    
411
    dest = (src1 >> 1) - (src2 >> 1);
412
    if ((~src1) & src2 & 1)
413
        dest--;
414
    return dest;
415
}
416

    
417
uint32_t HELPER(neon_hsub_u32)(uint32_t src1, uint32_t src2)
418
{
419
    uint32_t dest;
420

    
421
    dest = (src1 >> 1) - (src2 >> 1);
422
    if ((~src1) & src2 & 1)
423
        dest--;
424
    return dest;
425
}
426

    
427
#define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? ~0 : 0
428
NEON_VOP(cgt_s8, neon_s8, 4)
429
NEON_VOP(cgt_u8, neon_u8, 4)
430
NEON_VOP(cgt_s16, neon_s16, 2)
431
NEON_VOP(cgt_u16, neon_u16, 2)
432
NEON_VOP(cgt_s32, neon_s32, 1)
433
NEON_VOP(cgt_u32, neon_u32, 1)
434
#undef NEON_FN
435

    
436
#define NEON_FN(dest, src1, src2) dest = (src1 >= src2) ? ~0 : 0
437
NEON_VOP(cge_s8, neon_s8, 4)
438
NEON_VOP(cge_u8, neon_u8, 4)
439
NEON_VOP(cge_s16, neon_s16, 2)
440
NEON_VOP(cge_u16, neon_u16, 2)
441
NEON_VOP(cge_s32, neon_s32, 1)
442
NEON_VOP(cge_u32, neon_u32, 1)
443
#undef NEON_FN
444

    
445
#define NEON_FN(dest, src1, src2) dest = (src1 < src2) ? src1 : src2
446
NEON_VOP(min_s8, neon_s8, 4)
447
NEON_VOP(min_u8, neon_u8, 4)
448
NEON_VOP(min_s16, neon_s16, 2)
449
NEON_VOP(min_u16, neon_u16, 2)
450
NEON_VOP(min_s32, neon_s32, 1)
451
NEON_VOP(min_u32, neon_u32, 1)
452
NEON_POP(pmin_s8, neon_s8, 4)
453
NEON_POP(pmin_u8, neon_u8, 4)
454
NEON_POP(pmin_s16, neon_s16, 2)
455
NEON_POP(pmin_u16, neon_u16, 2)
456
#undef NEON_FN
457

    
458
#define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? src1 : src2
459
NEON_VOP(max_s8, neon_s8, 4)
460
NEON_VOP(max_u8, neon_u8, 4)
461
NEON_VOP(max_s16, neon_s16, 2)
462
NEON_VOP(max_u16, neon_u16, 2)
463
NEON_VOP(max_s32, neon_s32, 1)
464
NEON_VOP(max_u32, neon_u32, 1)
465
NEON_POP(pmax_s8, neon_s8, 4)
466
NEON_POP(pmax_u8, neon_u8, 4)
467
NEON_POP(pmax_s16, neon_s16, 2)
468
NEON_POP(pmax_u16, neon_u16, 2)
469
#undef NEON_FN
470

    
471
#define NEON_FN(dest, src1, src2) \
472
    dest = (src1 > src2) ? (src1 - src2) : (src2 - src1)
473
NEON_VOP(abd_s8, neon_s8, 4)
474
NEON_VOP(abd_u8, neon_u8, 4)
475
NEON_VOP(abd_s16, neon_s16, 2)
476
NEON_VOP(abd_u16, neon_u16, 2)
477
NEON_VOP(abd_s32, neon_s32, 1)
478
NEON_VOP(abd_u32, neon_u32, 1)
479
#undef NEON_FN
480

    
481
#define NEON_FN(dest, src1, src2) do { \
482
    int8_t tmp; \
483
    tmp = (int8_t)src2; \
484
    if (tmp >= (ssize_t)sizeof(src1) * 8 || \
485
        tmp <= -(ssize_t)sizeof(src1) * 8) { \
486
        dest = 0; \
487
    } else if (tmp < 0) { \
488
        dest = src1 >> -tmp; \
489
    } else { \
490
        dest = src1 << tmp; \
491
    }} while (0)
492
NEON_VOP(shl_u8, neon_u8, 4)
493
NEON_VOP(shl_u16, neon_u16, 2)
494
NEON_VOP(shl_u32, neon_u32, 1)
495
#undef NEON_FN
496

    
497
uint64_t HELPER(neon_shl_u64)(uint64_t val, uint64_t shiftop)
498
{
499
    int8_t shift = (int8_t)shiftop;
500
    if (shift >= 64 || shift <= -64) {
501
        val = 0;
502
    } else if (shift < 0) {
503
        val >>= -shift;
504
    } else {
505
        val <<= shift;
506
    }
507
    return val;
508
}
509

    
510
#define NEON_FN(dest, src1, src2) do { \
511
    int8_t tmp; \
512
    tmp = (int8_t)src2; \
513
    if (tmp >= (ssize_t)sizeof(src1) * 8) { \
514
        dest = 0; \
515
    } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
516
        dest = src1 >> (sizeof(src1) * 8 - 1); \
517
    } else if (tmp < 0) { \
518
        dest = src1 >> -tmp; \
519
    } else { \
520
        dest = src1 << tmp; \
521
    }} while (0)
522
NEON_VOP(shl_s8, neon_s8, 4)
523
NEON_VOP(shl_s16, neon_s16, 2)
524
NEON_VOP(shl_s32, neon_s32, 1)
525
#undef NEON_FN
526

    
527
uint64_t HELPER(neon_shl_s64)(uint64_t valop, uint64_t shiftop)
528
{
529
    int8_t shift = (int8_t)shiftop;
530
    int64_t val = valop;
531
    if (shift >= 64) {
532
        val = 0;
533
    } else if (shift <= -64) {
534
        val >>= 63;
535
    } else if (shift < 0) {
536
        val >>= -shift;
537
    } else {
538
        val <<= shift;
539
    }
540
    return val;
541
}
542

    
543
#define NEON_FN(dest, src1, src2) do { \
544
    int8_t tmp; \
545
    tmp = (int8_t)src2; \
546
    if ((tmp >= (ssize_t)sizeof(src1) * 8) \
547
        || (tmp <= -(ssize_t)sizeof(src1) * 8)) { \
548
        dest = 0; \
549
    } else if (tmp < 0) { \
550
        dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
551
    } else { \
552
        dest = src1 << tmp; \
553
    }} while (0)
554
NEON_VOP(rshl_s8, neon_s8, 4)
555
NEON_VOP(rshl_s16, neon_s16, 2)
556
#undef NEON_FN
557

    
558
/* The addition of the rounding constant may overflow, so we use an
559
 * intermediate 64 bits accumulator.  */
560
uint32_t HELPER(neon_rshl_s32)(uint32_t valop, uint32_t shiftop)
561
{
562
    int32_t dest;
563
    int32_t val = (int32_t)valop;
564
    int8_t shift = (int8_t)shiftop;
565
    if ((shift >= 32) || (shift <= -32)) {
566
        dest = 0;
567
    } else if (shift < 0) {
568
        int64_t big_dest = ((int64_t)val + (1 << (-1 - shift)));
569
        dest = big_dest >> -shift;
570
    } else {
571
        dest = val << shift;
572
    }
573
    return dest;
574
}
575

    
576
/* Handling addition overflow with 64 bits inputs values is more
577
 * tricky than with 32 bits values.  */
578
uint64_t HELPER(neon_rshl_s64)(uint64_t valop, uint64_t shiftop)
579
{
580
    int8_t shift = (int8_t)shiftop;
581
    int64_t val = valop;
582
    if ((shift >= 64) || (shift <= -64)) {
583
        val = 0;
584
    } else if (shift < 0) {
585
        val >>= (-shift - 1);
586
        if (val == INT64_MAX) {
587
            /* In this case, it means that the rounding constant is 1,
588
             * and the addition would overflow. Return the actual
589
             * result directly.  */
590
            val = 0x4000000000000000LL;
591
        } else {
592
            val++;
593
            val >>= 1;
594
        }
595
    } else {
596
        val <<= shift;
597
    }
598
    return val;
599
}
600

    
601
#define NEON_FN(dest, src1, src2) do { \
602
    int8_t tmp; \
603
    tmp = (int8_t)src2; \
604
    if (tmp >= (ssize_t)sizeof(src1) * 8 || \
605
        tmp < -(ssize_t)sizeof(src1) * 8) { \
606
        dest = 0; \
607
    } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \
608
        dest = src1 >> (-tmp - 1); \
609
    } else if (tmp < 0) { \
610
        dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
611
    } else { \
612
        dest = src1 << tmp; \
613
    }} while (0)
614
NEON_VOP(rshl_u8, neon_u8, 4)
615
NEON_VOP(rshl_u16, neon_u16, 2)
616
#undef NEON_FN
617

    
618
/* The addition of the rounding constant may overflow, so we use an
619
 * intermediate 64 bits accumulator.  */
620
uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shiftop)
621
{
622
    uint32_t dest;
623
    int8_t shift = (int8_t)shiftop;
624
    if (shift >= 32 || shift < -32) {
625
        dest = 0;
626
    } else if (shift == -32) {
627
        dest = val >> 31;
628
    } else if (shift < 0) {
629
        uint64_t big_dest = ((uint64_t)val + (1 << (-1 - shift)));
630
        dest = big_dest >> -shift;
631
    } else {
632
        dest = val << shift;
633
    }
634
    return dest;
635
}
636

    
637
/* Handling addition overflow with 64 bits inputs values is more
638
 * tricky than with 32 bits values.  */
639
uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shiftop)
640
{
641
    int8_t shift = (uint8_t)shiftop;
642
    if (shift >= 64 || shift < -64) {
643
        val = 0;
644
    } else if (shift == -64) {
645
        /* Rounding a 1-bit result just preserves that bit.  */
646
        val >>= 63;
647
    } else if (shift < 0) {
648
        val >>= (-shift - 1);
649
        if (val == UINT64_MAX) {
650
            /* In this case, it means that the rounding constant is 1,
651
             * and the addition would overflow. Return the actual
652
             * result directly.  */
653
            val = 0x8000000000000000ULL;
654
        } else {
655
            val++;
656
            val >>= 1;
657
        }
658
    } else {
659
        val <<= shift;
660
    }
661
    return val;
662
}
663

    
664
#define NEON_FN(dest, src1, src2) do { \
665
    int8_t tmp; \
666
    tmp = (int8_t)src2; \
667
    if (tmp >= (ssize_t)sizeof(src1) * 8) { \
668
        if (src1) { \
669
            SET_QC(); \
670
            dest = ~0; \
671
        } else { \
672
            dest = 0; \
673
        } \
674
    } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
675
        dest = 0; \
676
    } else if (tmp < 0) { \
677
        dest = src1 >> -tmp; \
678
    } else { \
679
        dest = src1 << tmp; \
680
        if ((dest >> tmp) != src1) { \
681
            SET_QC(); \
682
            dest = ~0; \
683
        } \
684
    }} while (0)
685
NEON_VOP_ENV(qshl_u8, neon_u8, 4)
686
NEON_VOP_ENV(qshl_u16, neon_u16, 2)
687
NEON_VOP_ENV(qshl_u32, neon_u32, 1)
688
#undef NEON_FN
689

    
690
uint64_t HELPER(neon_qshl_u64)(CPUState *env, uint64_t val, uint64_t shiftop)
691
{
692
    int8_t shift = (int8_t)shiftop;
693
    if (shift >= 64) {
694
        if (val) {
695
            val = ~(uint64_t)0;
696
            SET_QC();
697
        }
698
    } else if (shift <= -64) {
699
        val = 0;
700
    } else if (shift < 0) {
701
        val >>= -shift;
702
    } else {
703
        uint64_t tmp = val;
704
        val <<= shift;
705
        if ((val >> shift) != tmp) {
706
            SET_QC();
707
            val = ~(uint64_t)0;
708
        }
709
    }
710
    return val;
711
}
712

    
713
#define NEON_FN(dest, src1, src2) do { \
714
    int8_t tmp; \
715
    tmp = (int8_t)src2; \
716
    if (tmp >= (ssize_t)sizeof(src1) * 8) { \
717
        if (src1) { \
718
            SET_QC(); \
719
            dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
720
            if (src1 > 0) { \
721
                dest--; \
722
            } \
723
        } else { \
724
            dest = src1; \
725
        } \
726
    } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
727
        dest = src1 >> 31; \
728
    } else if (tmp < 0) { \
729
        dest = src1 >> -tmp; \
730
    } else { \
731
        dest = src1 << tmp; \
732
        if ((dest >> tmp) != src1) { \
733
            SET_QC(); \
734
            dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
735
            if (src1 > 0) { \
736
                dest--; \
737
            } \
738
        } \
739
    }} while (0)
740
NEON_VOP_ENV(qshl_s8, neon_s8, 4)
741
NEON_VOP_ENV(qshl_s16, neon_s16, 2)
742
NEON_VOP_ENV(qshl_s32, neon_s32, 1)
743
#undef NEON_FN
744

    
745
uint64_t HELPER(neon_qshl_s64)(CPUState *env, uint64_t valop, uint64_t shiftop)
746
{
747
    int8_t shift = (uint8_t)shiftop;
748
    int64_t val = valop;
749
    if (shift >= 64) {
750
        if (val) {
751
            SET_QC();
752
            val = (val >> 63) ^ ~SIGNBIT64;
753
        }
754
    } else if (shift <= -64) {
755
        val >>= 63;
756
    } else if (shift < 0) {
757
        val >>= -shift;
758
    } else {
759
        int64_t tmp = val;
760
        val <<= shift;
761
        if ((val >> shift) != tmp) {
762
            SET_QC();
763
            val = (tmp >> 63) ^ ~SIGNBIT64;
764
        }
765
    }
766
    return val;
767
}
768

    
769
#define NEON_FN(dest, src1, src2) do { \
770
    if (src1 & (1 << (sizeof(src1) * 8 - 1))) { \
771
        SET_QC(); \
772
        dest = 0; \
773
    } else { \
774
        int8_t tmp; \
775
        tmp = (int8_t)src2; \
776
        if (tmp >= (ssize_t)sizeof(src1) * 8) { \
777
            if (src1) { \
778
                SET_QC(); \
779
                dest = ~0; \
780
            } else { \
781
                dest = 0; \
782
            } \
783
        } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
784
            dest = 0; \
785
        } else if (tmp < 0) { \
786
            dest = src1 >> -tmp; \
787
        } else { \
788
            dest = src1 << tmp; \
789
            if ((dest >> tmp) != src1) { \
790
                SET_QC(); \
791
                dest = ~0; \
792
            } \
793
        } \
794
    }} while (0)
795
NEON_VOP_ENV(qshlu_s8, neon_u8, 4)
796
NEON_VOP_ENV(qshlu_s16, neon_u16, 2)
797
#undef NEON_FN
798

    
799
uint32_t HELPER(neon_qshlu_s32)(CPUState *env, uint32_t valop, uint32_t shiftop)
800
{
801
    if ((int32_t)valop < 0) {
802
        SET_QC();
803
        return 0;
804
    }
805
    return helper_neon_qshl_u32(env, valop, shiftop);
806
}
807

    
808
uint64_t HELPER(neon_qshlu_s64)(CPUState *env, uint64_t valop, uint64_t shiftop)
809
{
810
    if ((int64_t)valop < 0) {
811
        SET_QC();
812
        return 0;
813
    }
814
    return helper_neon_qshl_u64(env, valop, shiftop);
815
}
816

    
817
/* FIXME: This is wrong.  */
818
#define NEON_FN(dest, src1, src2) do { \
819
    int8_t tmp; \
820
    tmp = (int8_t)src2; \
821
    if (tmp < 0) { \
822
        dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
823
    } else { \
824
        dest = src1 << tmp; \
825
        if ((dest >> tmp) != src1) { \
826
            SET_QC(); \
827
            dest = ~0; \
828
        } \
829
    }} while (0)
830
NEON_VOP_ENV(qrshl_u8, neon_u8, 4)
831
NEON_VOP_ENV(qrshl_u16, neon_u16, 2)
832
#undef NEON_FN
833

    
834
/* The addition of the rounding constant may overflow, so we use an
835
 * intermediate 64 bits accumulator.  */
836
uint32_t HELPER(neon_qrshl_u32)(CPUState *env, uint32_t val, uint32_t shiftop)
837
{
838
    uint32_t dest;
839
    int8_t shift = (int8_t)shiftop;
840
    if (shift < 0) {
841
        uint64_t big_dest = ((uint64_t)val + (1 << (-1 - shift)));
842
        dest = big_dest >> -shift;
843
    } else {
844
        dest = val << shift;
845
        if ((dest >> shift) != val) {
846
            SET_QC();
847
            dest = ~0;
848
        }
849
    }
850
    return dest;
851
}
852

    
853
/* Handling addition overflow with 64 bits inputs values is more
854
 * tricky than with 32 bits values.  */
855
uint64_t HELPER(neon_qrshl_u64)(CPUState *env, uint64_t val, uint64_t shiftop)
856
{
857
    int8_t shift = (int8_t)shiftop;
858
    if (shift < 0) {
859
        val >>= (-shift - 1);
860
        if (val == UINT64_MAX) {
861
            /* In this case, it means that the rounding constant is 1,
862
             * and the addition would overflow. Return the actual
863
             * result directly.  */
864
            val = 0x8000000000000000ULL;
865
        } else {
866
            val++;
867
            val >>= 1;
868
        }
869
    } else { \
870
        uint64_t tmp = val;
871
        val <<= shift;
872
        if ((val >> shift) != tmp) {
873
            SET_QC();
874
            val = ~0;
875
        }
876
    }
877
    return val;
878
}
879

    
880
#define NEON_FN(dest, src1, src2) do { \
881
    int8_t tmp; \
882
    tmp = (int8_t)src2; \
883
    if (tmp < 0) { \
884
        dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
885
    } else { \
886
        dest = src1 << tmp; \
887
        if ((dest >> tmp) != src1) { \
888
            SET_QC(); \
889
            dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
890
            if (src1 > 0) { \
891
                dest--; \
892
            } \
893
        } \
894
    }} while (0)
895
NEON_VOP_ENV(qrshl_s8, neon_s8, 4)
896
NEON_VOP_ENV(qrshl_s16, neon_s16, 2)
897
#undef NEON_FN
898

    
899
/* The addition of the rounding constant may overflow, so we use an
900
 * intermediate 64 bits accumulator.  */
901
uint32_t HELPER(neon_qrshl_s32)(CPUState *env, uint32_t valop, uint32_t shiftop)
902
{
903
    int32_t dest;
904
    int32_t val = (int32_t)valop;
905
    int8_t shift = (int8_t)shiftop;
906
    if (shift < 0) {
907
        int64_t big_dest = ((int64_t)val + (1 << (-1 - shift)));
908
        dest = big_dest >> -shift;
909
    } else {
910
        dest = val << shift;
911
        if ((dest >> shift) != val) {
912
            SET_QC();
913
            dest = (val >> 31) ^ ~SIGNBIT;
914
        }
915
    }
916
    return dest;
917
}
918

    
919
/* Handling addition overflow with 64 bits inputs values is more
920
 * tricky than with 32 bits values.  */
921
uint64_t HELPER(neon_qrshl_s64)(CPUState *env, uint64_t valop, uint64_t shiftop)
922
{
923
    int8_t shift = (uint8_t)shiftop;
924
    int64_t val = valop;
925

    
926
    if (shift < 0) {
927
        val >>= (-shift - 1);
928
        if (val == INT64_MAX) {
929
            /* In this case, it means that the rounding constant is 1,
930
             * and the addition would overflow. Return the actual
931
             * result directly.  */
932
            val = 0x4000000000000000ULL;
933
        } else {
934
            val++;
935
            val >>= 1;
936
        }
937
    } else {
938
        int64_t tmp = val;
939
        val <<= shift;
940
        if ((val >> shift) != tmp) {
941
            SET_QC();
942
            val = (tmp >> 63) ^ ~SIGNBIT64;
943
        }
944
    }
945
    return val;
946
}
947

    
948
uint32_t HELPER(neon_add_u8)(uint32_t a, uint32_t b)
949
{
950
    uint32_t mask;
951
    mask = (a ^ b) & 0x80808080u;
952
    a &= ~0x80808080u;
953
    b &= ~0x80808080u;
954
    return (a + b) ^ mask;
955
}
956

    
957
uint32_t HELPER(neon_add_u16)(uint32_t a, uint32_t b)
958
{
959
    uint32_t mask;
960
    mask = (a ^ b) & 0x80008000u;
961
    a &= ~0x80008000u;
962
    b &= ~0x80008000u;
963
    return (a + b) ^ mask;
964
}
965

    
966
#define NEON_FN(dest, src1, src2) dest = src1 + src2
967
NEON_POP(padd_u8, neon_u8, 4)
968
NEON_POP(padd_u16, neon_u16, 2)
969
#undef NEON_FN
970

    
971
#define NEON_FN(dest, src1, src2) dest = src1 - src2
972
NEON_VOP(sub_u8, neon_u8, 4)
973
NEON_VOP(sub_u16, neon_u16, 2)
974
#undef NEON_FN
975

    
976
#define NEON_FN(dest, src1, src2) dest = src1 * src2
977
NEON_VOP(mul_u8, neon_u8, 4)
978
NEON_VOP(mul_u16, neon_u16, 2)
979
#undef NEON_FN
980

    
981
/* Polynomial multiplication is like integer multiplication except the
982
   partial products are XORed, not added.  */
983
uint32_t HELPER(neon_mul_p8)(uint32_t op1, uint32_t op2)
984
{
985
    uint32_t mask;
986
    uint32_t result;
987
    result = 0;
988
    while (op1) {
989
        mask = 0;
990
        if (op1 & 1)
991
            mask |= 0xff;
992
        if (op1 & (1 << 8))
993
            mask |= (0xff << 8);
994
        if (op1 & (1 << 16))
995
            mask |= (0xff << 16);
996
        if (op1 & (1 << 24))
997
            mask |= (0xff << 24);
998
        result ^= op2 & mask;
999
        op1 = (op1 >> 1) & 0x7f7f7f7f;
1000
        op2 = (op2 << 1) & 0xfefefefe;
1001
    }
1002
    return result;
1003
}
1004

    
1005
uint64_t HELPER(neon_mull_p8)(uint32_t op1, uint32_t op2)
1006
{
1007
    uint64_t result = 0;
1008
    uint64_t mask;
1009
    uint64_t op2ex = op2;
1010
    op2ex = (op2ex & 0xff) |
1011
        ((op2ex & 0xff00) << 8) |
1012
        ((op2ex & 0xff0000) << 16) |
1013
        ((op2ex & 0xff000000) << 24);
1014
    while (op1) {
1015
        mask = 0;
1016
        if (op1 & 1) {
1017
            mask |= 0xffff;
1018
        }
1019
        if (op1 & (1 << 8)) {
1020
            mask |= (0xffffU << 16);
1021
        }
1022
        if (op1 & (1 << 16)) {
1023
            mask |= (0xffffULL << 32);
1024
        }
1025
        if (op1 & (1 << 24)) {
1026
            mask |= (0xffffULL << 48);
1027
        }
1028
        result ^= op2ex & mask;
1029
        op1 = (op1 >> 1) & 0x7f7f7f7f;
1030
        op2ex <<= 1;
1031
    }
1032
    return result;
1033
}
1034

    
1035
#define NEON_FN(dest, src1, src2) dest = (src1 & src2) ? -1 : 0
1036
NEON_VOP(tst_u8, neon_u8, 4)
1037
NEON_VOP(tst_u16, neon_u16, 2)
1038
NEON_VOP(tst_u32, neon_u32, 1)
1039
#undef NEON_FN
1040

    
1041
#define NEON_FN(dest, src1, src2) dest = (src1 == src2) ? -1 : 0
1042
NEON_VOP(ceq_u8, neon_u8, 4)
1043
NEON_VOP(ceq_u16, neon_u16, 2)
1044
NEON_VOP(ceq_u32, neon_u32, 1)
1045
#undef NEON_FN
1046

    
1047
#define NEON_FN(dest, src, dummy) dest = (src < 0) ? -src : src
1048
NEON_VOP1(abs_s8, neon_s8, 4)
1049
NEON_VOP1(abs_s16, neon_s16, 2)
1050
#undef NEON_FN
1051

    
1052
/* Count Leading Sign/Zero Bits.  */
1053
static inline int do_clz8(uint8_t x)
1054
{
1055
    int n;
1056
    for (n = 8; x; n--)
1057
        x >>= 1;
1058
    return n;
1059
}
1060

    
1061
static inline int do_clz16(uint16_t x)
1062
{
1063
    int n;
1064
    for (n = 16; x; n--)
1065
        x >>= 1;
1066
    return n;
1067
}
1068

    
1069
#define NEON_FN(dest, src, dummy) dest = do_clz8(src)
1070
NEON_VOP1(clz_u8, neon_u8, 4)
1071
#undef NEON_FN
1072

    
1073
#define NEON_FN(dest, src, dummy) dest = do_clz16(src)
1074
NEON_VOP1(clz_u16, neon_u16, 2)
1075
#undef NEON_FN
1076

    
1077
#define NEON_FN(dest, src, dummy) dest = do_clz8((src < 0) ? ~src : src) - 1
1078
NEON_VOP1(cls_s8, neon_s8, 4)
1079
#undef NEON_FN
1080

    
1081
#define NEON_FN(dest, src, dummy) dest = do_clz16((src < 0) ? ~src : src) - 1
1082
NEON_VOP1(cls_s16, neon_s16, 2)
1083
#undef NEON_FN
1084

    
1085
uint32_t HELPER(neon_cls_s32)(uint32_t x)
1086
{
1087
    int count;
1088
    if ((int32_t)x < 0)
1089
        x = ~x;
1090
    for (count = 32; x; count--)
1091
        x = x >> 1;
1092
    return count - 1;
1093
}
1094

    
1095
/* Bit count.  */
1096
uint32_t HELPER(neon_cnt_u8)(uint32_t x)
1097
{
1098
    x = (x & 0x55555555) + ((x >>  1) & 0x55555555);
1099
    x = (x & 0x33333333) + ((x >>  2) & 0x33333333);
1100
    x = (x & 0x0f0f0f0f) + ((x >>  4) & 0x0f0f0f0f);
1101
    return x;
1102
}
1103

    
1104
#define NEON_QDMULH16(dest, src1, src2, round) do { \
1105
    uint32_t tmp = (int32_t)(int16_t) src1 * (int16_t) src2; \
1106
    if ((tmp ^ (tmp << 1)) & SIGNBIT) { \
1107
        SET_QC(); \
1108
        tmp = (tmp >> 31) ^ ~SIGNBIT; \
1109
    } else { \
1110
        tmp <<= 1; \
1111
    } \
1112
    if (round) { \
1113
        int32_t old = tmp; \
1114
        tmp += 1 << 15; \
1115
        if ((int32_t)tmp < old) { \
1116
            SET_QC(); \
1117
            tmp = SIGNBIT - 1; \
1118
        } \
1119
    } \
1120
    dest = tmp >> 16; \
1121
    } while(0)
1122
#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 0)
1123
NEON_VOP_ENV(qdmulh_s16, neon_s16, 2)
1124
#undef NEON_FN
1125
#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 1)
1126
NEON_VOP_ENV(qrdmulh_s16, neon_s16, 2)
1127
#undef NEON_FN
1128
#undef NEON_QDMULH16
1129

    
1130
#define NEON_QDMULH32(dest, src1, src2, round) do { \
1131
    uint64_t tmp = (int64_t)(int32_t) src1 * (int32_t) src2; \
1132
    if ((tmp ^ (tmp << 1)) & SIGNBIT64) { \
1133
        SET_QC(); \
1134
        tmp = (tmp >> 63) ^ ~SIGNBIT64; \
1135
    } else { \
1136
        tmp <<= 1; \
1137
    } \
1138
    if (round) { \
1139
        int64_t old = tmp; \
1140
        tmp += (int64_t)1 << 31; \
1141
        if ((int64_t)tmp < old) { \
1142
            SET_QC(); \
1143
            tmp = SIGNBIT64 - 1; \
1144
        } \
1145
    } \
1146
    dest = tmp >> 32; \
1147
    } while(0)
1148
#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 0)
1149
NEON_VOP_ENV(qdmulh_s32, neon_s32, 1)
1150
#undef NEON_FN
1151
#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 1)
1152
NEON_VOP_ENV(qrdmulh_s32, neon_s32, 1)
1153
#undef NEON_FN
1154
#undef NEON_QDMULH32
1155

    
1156
uint32_t HELPER(neon_narrow_u8)(uint64_t x)
1157
{
1158
    return (x & 0xffu) | ((x >> 8) & 0xff00u) | ((x >> 16) & 0xff0000u)
1159
           | ((x >> 24) & 0xff000000u);
1160
}
1161

    
1162
uint32_t HELPER(neon_narrow_u16)(uint64_t x)
1163
{
1164
    return (x & 0xffffu) | ((x >> 16) & 0xffff0000u);
1165
}
1166

    
1167
uint32_t HELPER(neon_narrow_high_u8)(uint64_t x)
1168
{
1169
    return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00)
1170
            | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000);
1171
}
1172

    
1173
uint32_t HELPER(neon_narrow_high_u16)(uint64_t x)
1174
{
1175
    return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
1176
}
1177

    
1178
uint32_t HELPER(neon_narrow_round_high_u8)(uint64_t x)
1179
{
1180
    x &= 0xff80ff80ff80ff80ull;
1181
    x += 0x0080008000800080ull;
1182
    return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00)
1183
            | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000);
1184
}
1185

    
1186
uint32_t HELPER(neon_narrow_round_high_u16)(uint64_t x)
1187
{
1188
    x &= 0xffff8000ffff8000ull;
1189
    x += 0x0000800000008000ull;
1190
    return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
1191
}
1192

    
1193
uint32_t HELPER(neon_unarrow_sat8)(CPUState *env, uint64_t x)
1194
{
1195
    uint16_t s;
1196
    uint8_t d;
1197
    uint32_t res = 0;
1198
#define SAT8(n) \
1199
    s = x >> n; \
1200
    if (s & 0x8000) { \
1201
        SET_QC(); \
1202
    } else { \
1203
        if (s > 0xff) { \
1204
            d = 0xff; \
1205
            SET_QC(); \
1206
        } else  { \
1207
            d = s; \
1208
        } \
1209
        res |= (uint32_t)d << (n / 2); \
1210
    }
1211

    
1212
    SAT8(0);
1213
    SAT8(16);
1214
    SAT8(32);
1215
    SAT8(48);
1216
#undef SAT8
1217
    return res;
1218
}
1219

    
1220
uint32_t HELPER(neon_narrow_sat_u8)(CPUState *env, uint64_t x)
1221
{
1222
    uint16_t s;
1223
    uint8_t d;
1224
    uint32_t res = 0;
1225
#define SAT8(n) \
1226
    s = x >> n; \
1227
    if (s > 0xff) { \
1228
        d = 0xff; \
1229
        SET_QC(); \
1230
    } else  { \
1231
        d = s; \
1232
    } \
1233
    res |= (uint32_t)d << (n / 2);
1234

    
1235
    SAT8(0);
1236
    SAT8(16);
1237
    SAT8(32);
1238
    SAT8(48);
1239
#undef SAT8
1240
    return res;
1241
}
1242

    
1243
uint32_t HELPER(neon_narrow_sat_s8)(CPUState *env, uint64_t x)
1244
{
1245
    int16_t s;
1246
    uint8_t d;
1247
    uint32_t res = 0;
1248
#define SAT8(n) \
1249
    s = x >> n; \
1250
    if (s != (int8_t)s) { \
1251
        d = (s >> 15) ^ 0x7f; \
1252
        SET_QC(); \
1253
    } else  { \
1254
        d = s; \
1255
    } \
1256
    res |= (uint32_t)d << (n / 2);
1257

    
1258
    SAT8(0);
1259
    SAT8(16);
1260
    SAT8(32);
1261
    SAT8(48);
1262
#undef SAT8
1263
    return res;
1264
}
1265

    
1266
uint32_t HELPER(neon_unarrow_sat16)(CPUState *env, uint64_t x)
1267
{
1268
    uint32_t high;
1269
    uint32_t low;
1270
    low = x;
1271
    if (low & 0x80000000) {
1272
        low = 0;
1273
        SET_QC();
1274
    } else if (low > 0xffff) {
1275
        low = 0xffff;
1276
        SET_QC();
1277
    }
1278
    high = x >> 32;
1279
    if (high & 0x80000000) {
1280
        high = 0;
1281
        SET_QC();
1282
    } else if (high > 0xffff) {
1283
        high = 0xffff;
1284
        SET_QC();
1285
    }
1286
    return low | (high << 16);
1287
}
1288

    
1289
uint32_t HELPER(neon_narrow_sat_u16)(CPUState *env, uint64_t x)
1290
{
1291
    uint32_t high;
1292
    uint32_t low;
1293
    low = x;
1294
    if (low > 0xffff) {
1295
        low = 0xffff;
1296
        SET_QC();
1297
    }
1298
    high = x >> 32;
1299
    if (high > 0xffff) {
1300
        high = 0xffff;
1301
        SET_QC();
1302
    }
1303
    return low | (high << 16);
1304
}
1305

    
1306
uint32_t HELPER(neon_narrow_sat_s16)(CPUState *env, uint64_t x)
1307
{
1308
    int32_t low;
1309
    int32_t high;
1310
    low = x;
1311
    if (low != (int16_t)low) {
1312
        low = (low >> 31) ^ 0x7fff;
1313
        SET_QC();
1314
    }
1315
    high = x >> 32;
1316
    if (high != (int16_t)high) {
1317
        high = (high >> 31) ^ 0x7fff;
1318
        SET_QC();
1319
    }
1320
    return (uint16_t)low | (high << 16);
1321
}
1322

    
1323
uint32_t HELPER(neon_unarrow_sat32)(CPUState *env, uint64_t x)
1324
{
1325
    if (x & 0x8000000000000000ull) {
1326
        SET_QC();
1327
        return 0;
1328
    }
1329
    if (x > 0xffffffffu) {
1330
        SET_QC();
1331
        return 0xffffffffu;
1332
    }
1333
    return x;
1334
}
1335

    
1336
uint32_t HELPER(neon_narrow_sat_u32)(CPUState *env, uint64_t x)
1337
{
1338
    if (x > 0xffffffffu) {
1339
        SET_QC();
1340
        return 0xffffffffu;
1341
    }
1342
    return x;
1343
}
1344

    
1345
uint32_t HELPER(neon_narrow_sat_s32)(CPUState *env, uint64_t x)
1346
{
1347
    if ((int64_t)x != (int32_t)x) {
1348
        SET_QC();
1349
        return ((int64_t)x >> 63) ^ 0x7fffffff;
1350
    }
1351
    return x;
1352
}
1353

    
1354
uint64_t HELPER(neon_widen_u8)(uint32_t x)
1355
{
1356
    uint64_t tmp;
1357
    uint64_t ret;
1358
    ret = (uint8_t)x;
1359
    tmp = (uint8_t)(x >> 8);
1360
    ret |= tmp << 16;
1361
    tmp = (uint8_t)(x >> 16);
1362
    ret |= tmp << 32;
1363
    tmp = (uint8_t)(x >> 24);
1364
    ret |= tmp << 48;
1365
    return ret;
1366
}
1367

    
1368
uint64_t HELPER(neon_widen_s8)(uint32_t x)
1369
{
1370
    uint64_t tmp;
1371
    uint64_t ret;
1372
    ret = (uint16_t)(int8_t)x;
1373
    tmp = (uint16_t)(int8_t)(x >> 8);
1374
    ret |= tmp << 16;
1375
    tmp = (uint16_t)(int8_t)(x >> 16);
1376
    ret |= tmp << 32;
1377
    tmp = (uint16_t)(int8_t)(x >> 24);
1378
    ret |= tmp << 48;
1379
    return ret;
1380
}
1381

    
1382
uint64_t HELPER(neon_widen_u16)(uint32_t x)
1383
{
1384
    uint64_t high = (uint16_t)(x >> 16);
1385
    return ((uint16_t)x) | (high << 32);
1386
}
1387

    
1388
uint64_t HELPER(neon_widen_s16)(uint32_t x)
1389
{
1390
    uint64_t high = (int16_t)(x >> 16);
1391
    return ((uint32_t)(int16_t)x) | (high << 32);
1392
}
1393

    
1394
uint64_t HELPER(neon_addl_u16)(uint64_t a, uint64_t b)
1395
{
1396
    uint64_t mask;
1397
    mask = (a ^ b) & 0x8000800080008000ull;
1398
    a &= ~0x8000800080008000ull;
1399
    b &= ~0x8000800080008000ull;
1400
    return (a + b) ^ mask;
1401
}
1402

    
1403
uint64_t HELPER(neon_addl_u32)(uint64_t a, uint64_t b)
1404
{
1405
    uint64_t mask;
1406
    mask = (a ^ b) & 0x8000000080000000ull;
1407
    a &= ~0x8000000080000000ull;
1408
    b &= ~0x8000000080000000ull;
1409
    return (a + b) ^ mask;
1410
}
1411

    
1412
uint64_t HELPER(neon_paddl_u16)(uint64_t a, uint64_t b)
1413
{
1414
    uint64_t tmp;
1415
    uint64_t tmp2;
1416

    
1417
    tmp = a & 0x0000ffff0000ffffull;
1418
    tmp += (a >> 16) & 0x0000ffff0000ffffull;
1419
    tmp2 = b & 0xffff0000ffff0000ull;
1420
    tmp2 += (b << 16) & 0xffff0000ffff0000ull;
1421
    return    ( tmp         & 0xffff)
1422
            | ((tmp  >> 16) & 0xffff0000ull)
1423
            | ((tmp2 << 16) & 0xffff00000000ull)
1424
            | ( tmp2        & 0xffff000000000000ull);
1425
}
1426

    
1427
uint64_t HELPER(neon_paddl_u32)(uint64_t a, uint64_t b)
1428
{
1429
    uint32_t low = a + (a >> 32);
1430
    uint32_t high = b + (b >> 32);
1431
    return low + ((uint64_t)high << 32);
1432
}
1433

    
1434
uint64_t HELPER(neon_subl_u16)(uint64_t a, uint64_t b)
1435
{
1436
    uint64_t mask;
1437
    mask = (a ^ ~b) & 0x8000800080008000ull;
1438
    a |= 0x8000800080008000ull;
1439
    b &= ~0x8000800080008000ull;
1440
    return (a - b) ^ mask;
1441
}
1442

    
1443
uint64_t HELPER(neon_subl_u32)(uint64_t a, uint64_t b)
1444
{
1445
    uint64_t mask;
1446
    mask = (a ^ ~b) & 0x8000000080000000ull;
1447
    a |= 0x8000000080000000ull;
1448
    b &= ~0x8000000080000000ull;
1449
    return (a - b) ^ mask;
1450
}
1451

    
1452
uint64_t HELPER(neon_addl_saturate_s32)(CPUState *env, uint64_t a, uint64_t b)
1453
{
1454
    uint32_t x, y;
1455
    uint32_t low, high;
1456

    
1457
    x = a;
1458
    y = b;
1459
    low = x + y;
1460
    if (((low ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) {
1461
        SET_QC();
1462
        low = ((int32_t)x >> 31) ^ ~SIGNBIT;
1463
    }
1464
    x = a >> 32;
1465
    y = b >> 32;
1466
    high = x + y;
1467
    if (((high ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) {
1468
        SET_QC();
1469
        high = ((int32_t)x >> 31) ^ ~SIGNBIT;
1470
    }
1471
    return low | ((uint64_t)high << 32);
1472
}
1473

    
1474
uint64_t HELPER(neon_addl_saturate_s64)(CPUState *env, uint64_t a, uint64_t b)
1475
{
1476
    uint64_t result;
1477

    
1478
    result = a + b;
1479
    if (((result ^ a) & SIGNBIT64) && !((a ^ b) & SIGNBIT64)) {
1480
        SET_QC();
1481
        result = ((int64_t)a >> 63) ^ ~SIGNBIT64;
1482
    }
1483
    return result;
1484
}
1485

    
1486
#define DO_ABD(dest, x, y, type) do { \
1487
    type tmp_x = x; \
1488
    type tmp_y = y; \
1489
    dest = ((tmp_x > tmp_y) ? tmp_x - tmp_y : tmp_y - tmp_x); \
1490
    } while(0)
1491

    
1492
uint64_t HELPER(neon_abdl_u16)(uint32_t a, uint32_t b)
1493
{
1494
    uint64_t tmp;
1495
    uint64_t result;
1496
    DO_ABD(result, a, b, uint8_t);
1497
    DO_ABD(tmp, a >> 8, b >> 8, uint8_t);
1498
    result |= tmp << 16;
1499
    DO_ABD(tmp, a >> 16, b >> 16, uint8_t);
1500
    result |= tmp << 32;
1501
    DO_ABD(tmp, a >> 24, b >> 24, uint8_t);
1502
    result |= tmp << 48;
1503
    return result;
1504
}
1505

    
1506
uint64_t HELPER(neon_abdl_s16)(uint32_t a, uint32_t b)
1507
{
1508
    uint64_t tmp;
1509
    uint64_t result;
1510
    DO_ABD(result, a, b, int8_t);
1511
    DO_ABD(tmp, a >> 8, b >> 8, int8_t);
1512
    result |= tmp << 16;
1513
    DO_ABD(tmp, a >> 16, b >> 16, int8_t);
1514
    result |= tmp << 32;
1515
    DO_ABD(tmp, a >> 24, b >> 24, int8_t);
1516
    result |= tmp << 48;
1517
    return result;
1518
}
1519

    
1520
uint64_t HELPER(neon_abdl_u32)(uint32_t a, uint32_t b)
1521
{
1522
    uint64_t tmp;
1523
    uint64_t result;
1524
    DO_ABD(result, a, b, uint16_t);
1525
    DO_ABD(tmp, a >> 16, b >> 16, uint16_t);
1526
    return result | (tmp << 32);
1527
}
1528

    
1529
uint64_t HELPER(neon_abdl_s32)(uint32_t a, uint32_t b)
1530
{
1531
    uint64_t tmp;
1532
    uint64_t result;
1533
    DO_ABD(result, a, b, int16_t);
1534
    DO_ABD(tmp, a >> 16, b >> 16, int16_t);
1535
    return result | (tmp << 32);
1536
}
1537

    
1538
uint64_t HELPER(neon_abdl_u64)(uint32_t a, uint32_t b)
1539
{
1540
    uint64_t result;
1541
    DO_ABD(result, a, b, uint32_t);
1542
    return result;
1543
}
1544

    
1545
uint64_t HELPER(neon_abdl_s64)(uint32_t a, uint32_t b)
1546
{
1547
    uint64_t result;
1548
    DO_ABD(result, a, b, int32_t);
1549
    return result;
1550
}
1551
#undef DO_ABD
1552

    
1553
/* Widening multiply. Named type is the source type.  */
1554
#define DO_MULL(dest, x, y, type1, type2) do { \
1555
    type1 tmp_x = x; \
1556
    type1 tmp_y = y; \
1557
    dest = (type2)((type2)tmp_x * (type2)tmp_y); \
1558
    } while(0)
1559

    
1560
uint64_t HELPER(neon_mull_u8)(uint32_t a, uint32_t b)
1561
{
1562
    uint64_t tmp;
1563
    uint64_t result;
1564

    
1565
    DO_MULL(result, a, b, uint8_t, uint16_t);
1566
    DO_MULL(tmp, a >> 8, b >> 8, uint8_t, uint16_t);
1567
    result |= tmp << 16;
1568
    DO_MULL(tmp, a >> 16, b >> 16, uint8_t, uint16_t);
1569
    result |= tmp << 32;
1570
    DO_MULL(tmp, a >> 24, b >> 24, uint8_t, uint16_t);
1571
    result |= tmp << 48;
1572
    return result;
1573
}
1574

    
1575
uint64_t HELPER(neon_mull_s8)(uint32_t a, uint32_t b)
1576
{
1577
    uint64_t tmp;
1578
    uint64_t result;
1579

    
1580
    DO_MULL(result, a, b, int8_t, uint16_t);
1581
    DO_MULL(tmp, a >> 8, b >> 8, int8_t, uint16_t);
1582
    result |= tmp << 16;
1583
    DO_MULL(tmp, a >> 16, b >> 16, int8_t, uint16_t);
1584
    result |= tmp << 32;
1585
    DO_MULL(tmp, a >> 24, b >> 24, int8_t, uint16_t);
1586
    result |= tmp << 48;
1587
    return result;
1588
}
1589

    
1590
uint64_t HELPER(neon_mull_u16)(uint32_t a, uint32_t b)
1591
{
1592
    uint64_t tmp;
1593
    uint64_t result;
1594

    
1595
    DO_MULL(result, a, b, uint16_t, uint32_t);
1596
    DO_MULL(tmp, a >> 16, b >> 16, uint16_t, uint32_t);
1597
    return result | (tmp << 32);
1598
}
1599

    
1600
uint64_t HELPER(neon_mull_s16)(uint32_t a, uint32_t b)
1601
{
1602
    uint64_t tmp;
1603
    uint64_t result;
1604

    
1605
    DO_MULL(result, a, b, int16_t, uint32_t);
1606
    DO_MULL(tmp, a >> 16, b >> 16, int16_t, uint32_t);
1607
    return result | (tmp << 32);
1608
}
1609

    
1610
uint64_t HELPER(neon_negl_u16)(uint64_t x)
1611
{
1612
    uint16_t tmp;
1613
    uint64_t result;
1614
    result = (uint16_t)-x;
1615
    tmp = -(x >> 16);
1616
    result |= (uint64_t)tmp << 16;
1617
    tmp = -(x >> 32);
1618
    result |= (uint64_t)tmp << 32;
1619
    tmp = -(x >> 48);
1620
    result |= (uint64_t)tmp << 48;
1621
    return result;
1622
}
1623

    
1624
uint64_t HELPER(neon_negl_u32)(uint64_t x)
1625
{
1626
    uint32_t low = -x;
1627
    uint32_t high = -(x >> 32);
1628
    return low | ((uint64_t)high << 32);
1629
}
1630

    
1631
/* FIXME:  There should be a native op for this.  */
1632
uint64_t HELPER(neon_negl_u64)(uint64_t x)
1633
{
1634
    return -x;
1635
}
1636

    
1637
/* Saturnating sign manuipulation.  */
1638
/* ??? Make these use NEON_VOP1 */
1639
#define DO_QABS8(x) do { \
1640
    if (x == (int8_t)0x80) { \
1641
        x = 0x7f; \
1642
        SET_QC(); \
1643
    } else if (x < 0) { \
1644
        x = -x; \
1645
    }} while (0)
1646
uint32_t HELPER(neon_qabs_s8)(CPUState *env, uint32_t x)
1647
{
1648
    neon_s8 vec;
1649
    NEON_UNPACK(neon_s8, vec, x);
1650
    DO_QABS8(vec.v1);
1651
    DO_QABS8(vec.v2);
1652
    DO_QABS8(vec.v3);
1653
    DO_QABS8(vec.v4);
1654
    NEON_PACK(neon_s8, x, vec);
1655
    return x;
1656
}
1657
#undef DO_QABS8
1658

    
1659
#define DO_QNEG8(x) do { \
1660
    if (x == (int8_t)0x80) { \
1661
        x = 0x7f; \
1662
        SET_QC(); \
1663
    } else { \
1664
        x = -x; \
1665
    }} while (0)
1666
uint32_t HELPER(neon_qneg_s8)(CPUState *env, uint32_t x)
1667
{
1668
    neon_s8 vec;
1669
    NEON_UNPACK(neon_s8, vec, x);
1670
    DO_QNEG8(vec.v1);
1671
    DO_QNEG8(vec.v2);
1672
    DO_QNEG8(vec.v3);
1673
    DO_QNEG8(vec.v4);
1674
    NEON_PACK(neon_s8, x, vec);
1675
    return x;
1676
}
1677
#undef DO_QNEG8
1678

    
1679
#define DO_QABS16(x) do { \
1680
    if (x == (int16_t)0x8000) { \
1681
        x = 0x7fff; \
1682
        SET_QC(); \
1683
    } else if (x < 0) { \
1684
        x = -x; \
1685
    }} while (0)
1686
uint32_t HELPER(neon_qabs_s16)(CPUState *env, uint32_t x)
1687
{
1688
    neon_s16 vec;
1689
    NEON_UNPACK(neon_s16, vec, x);
1690
    DO_QABS16(vec.v1);
1691
    DO_QABS16(vec.v2);
1692
    NEON_PACK(neon_s16, x, vec);
1693
    return x;
1694
}
1695
#undef DO_QABS16
1696

    
1697
#define DO_QNEG16(x) do { \
1698
    if (x == (int16_t)0x8000) { \
1699
        x = 0x7fff; \
1700
        SET_QC(); \
1701
    } else { \
1702
        x = -x; \
1703
    }} while (0)
1704
uint32_t HELPER(neon_qneg_s16)(CPUState *env, uint32_t x)
1705
{
1706
    neon_s16 vec;
1707
    NEON_UNPACK(neon_s16, vec, x);
1708
    DO_QNEG16(vec.v1);
1709
    DO_QNEG16(vec.v2);
1710
    NEON_PACK(neon_s16, x, vec);
1711
    return x;
1712
}
1713
#undef DO_QNEG16
1714

    
1715
uint32_t HELPER(neon_qabs_s32)(CPUState *env, uint32_t x)
1716
{
1717
    if (x == SIGNBIT) {
1718
        SET_QC();
1719
        x = ~SIGNBIT;
1720
    } else if ((int32_t)x < 0) {
1721
        x = -x;
1722
    }
1723
    return x;
1724
}
1725

    
1726
uint32_t HELPER(neon_qneg_s32)(CPUState *env, uint32_t x)
1727
{
1728
    if (x == SIGNBIT) {
1729
        SET_QC();
1730
        x = ~SIGNBIT;
1731
    } else {
1732
        x = -x;
1733
    }
1734
    return x;
1735
}
1736

    
1737
/* NEON Float helpers.  */
1738
uint32_t HELPER(neon_min_f32)(uint32_t a, uint32_t b)
1739
{
1740
    float32 f0 = vfp_itos(a);
1741
    float32 f1 = vfp_itos(b);
1742
    return (float32_compare_quiet(f0, f1, NFS) == -1) ? a : b;
1743
}
1744

    
1745
uint32_t HELPER(neon_max_f32)(uint32_t a, uint32_t b)
1746
{
1747
    float32 f0 = vfp_itos(a);
1748
    float32 f1 = vfp_itos(b);
1749
    return (float32_compare_quiet(f0, f1, NFS) == 1) ? a : b;
1750
}
1751

    
1752
uint32_t HELPER(neon_abd_f32)(uint32_t a, uint32_t b)
1753
{
1754
    float32 f0 = vfp_itos(a);
1755
    float32 f1 = vfp_itos(b);
1756
    return vfp_stoi((float32_compare_quiet(f0, f1, NFS) == 1)
1757
                    ? float32_sub(f0, f1, NFS)
1758
                    : float32_sub(f1, f0, NFS));
1759
}
1760

    
1761
uint32_t HELPER(neon_add_f32)(uint32_t a, uint32_t b)
1762
{
1763
    return vfp_stoi(float32_add(vfp_itos(a), vfp_itos(b), NFS));
1764
}
1765

    
1766
uint32_t HELPER(neon_sub_f32)(uint32_t a, uint32_t b)
1767
{
1768
    return vfp_stoi(float32_sub(vfp_itos(a), vfp_itos(b), NFS));
1769
}
1770

    
1771
uint32_t HELPER(neon_mul_f32)(uint32_t a, uint32_t b)
1772
{
1773
    return vfp_stoi(float32_mul(vfp_itos(a), vfp_itos(b), NFS));
1774
}
1775

    
1776
/* Floating point comparisons produce an integer result.  */
1777
#define NEON_VOP_FCMP(name, cmp) \
1778
uint32_t HELPER(neon_##name)(uint32_t a, uint32_t b) \
1779
{ \
1780
    if (float32_compare_quiet(vfp_itos(a), vfp_itos(b), NFS) cmp 0) \
1781
        return ~0; \
1782
    else \
1783
        return 0; \
1784
}
1785

    
1786
NEON_VOP_FCMP(ceq_f32, ==)
1787
NEON_VOP_FCMP(cge_f32, >=)
1788
NEON_VOP_FCMP(cgt_f32, >)
1789

    
1790
uint32_t HELPER(neon_acge_f32)(uint32_t a, uint32_t b)
1791
{
1792
    float32 f0 = float32_abs(vfp_itos(a));
1793
    float32 f1 = float32_abs(vfp_itos(b));
1794
    return (float32_compare_quiet(f0, f1,NFS) >= 0) ? ~0 : 0;
1795
}
1796

    
1797
uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b)
1798
{
1799
    float32 f0 = float32_abs(vfp_itos(a));
1800
    float32 f1 = float32_abs(vfp_itos(b));
1801
    return (float32_compare_quiet(f0, f1, NFS) > 0) ? ~0 : 0;
1802
}
1803

    
1804
#define ELEM(V, N, SIZE) (((V) >> ((N) * (SIZE))) & ((1ull << (SIZE)) - 1))
1805

    
1806
void HELPER(neon_qunzip8)(CPUState *env, uint32_t rd, uint32_t rm)
1807
{
1808
    uint64_t zm0 = float64_val(env->vfp.regs[rm]);
1809
    uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
1810
    uint64_t zd0 = float64_val(env->vfp.regs[rd]);
1811
    uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
1812
    uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zd0, 2, 8) << 8)
1813
        | (ELEM(zd0, 4, 8) << 16) | (ELEM(zd0, 6, 8) << 24)
1814
        | (ELEM(zd1, 0, 8) << 32) | (ELEM(zd1, 2, 8) << 40)
1815
        | (ELEM(zd1, 4, 8) << 48) | (ELEM(zd1, 6, 8) << 56);
1816
    uint64_t d1 = ELEM(zm0, 0, 8) | (ELEM(zm0, 2, 8) << 8)
1817
        | (ELEM(zm0, 4, 8) << 16) | (ELEM(zm0, 6, 8) << 24)
1818
        | (ELEM(zm1, 0, 8) << 32) | (ELEM(zm1, 2, 8) << 40)
1819
        | (ELEM(zm1, 4, 8) << 48) | (ELEM(zm1, 6, 8) << 56);
1820
    uint64_t m0 = ELEM(zd0, 1, 8) | (ELEM(zd0, 3, 8) << 8)
1821
        | (ELEM(zd0, 5, 8) << 16) | (ELEM(zd0, 7, 8) << 24)
1822
        | (ELEM(zd1, 1, 8) << 32) | (ELEM(zd1, 3, 8) << 40)
1823
        | (ELEM(zd1, 5, 8) << 48) | (ELEM(zd1, 7, 8) << 56);
1824
    uint64_t m1 = ELEM(zm0, 1, 8) | (ELEM(zm0, 3, 8) << 8)
1825
        | (ELEM(zm0, 5, 8) << 16) | (ELEM(zm0, 7, 8) << 24)
1826
        | (ELEM(zm1, 1, 8) << 32) | (ELEM(zm1, 3, 8) << 40)
1827
        | (ELEM(zm1, 5, 8) << 48) | (ELEM(zm1, 7, 8) << 56);
1828
    env->vfp.regs[rm] = make_float64(m0);
1829
    env->vfp.regs[rm + 1] = make_float64(m1);
1830
    env->vfp.regs[rd] = make_float64(d0);
1831
    env->vfp.regs[rd + 1] = make_float64(d1);
1832
}
1833

    
1834
void HELPER(neon_qunzip16)(CPUState *env, uint32_t rd, uint32_t rm)
1835
{
1836
    uint64_t zm0 = float64_val(env->vfp.regs[rm]);
1837
    uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
1838
    uint64_t zd0 = float64_val(env->vfp.regs[rd]);
1839
    uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
1840
    uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zd0, 2, 16) << 16)
1841
        | (ELEM(zd1, 0, 16) << 32) | (ELEM(zd1, 2, 16) << 48);
1842
    uint64_t d1 = ELEM(zm0, 0, 16) | (ELEM(zm0, 2, 16) << 16)
1843
        | (ELEM(zm1, 0, 16) << 32) | (ELEM(zm1, 2, 16) << 48);
1844
    uint64_t m0 = ELEM(zd0, 1, 16) | (ELEM(zd0, 3, 16) << 16)
1845
        | (ELEM(zd1, 1, 16) << 32) | (ELEM(zd1, 3, 16) << 48);
1846
    uint64_t m1 = ELEM(zm0, 1, 16) | (ELEM(zm0, 3, 16) << 16)
1847
        | (ELEM(zm1, 1, 16) << 32) | (ELEM(zm1, 3, 16) << 48);
1848
    env->vfp.regs[rm] = make_float64(m0);
1849
    env->vfp.regs[rm + 1] = make_float64(m1);
1850
    env->vfp.regs[rd] = make_float64(d0);
1851
    env->vfp.regs[rd + 1] = make_float64(d1);
1852
}
1853

    
1854
void HELPER(neon_qunzip32)(CPUState *env, uint32_t rd, uint32_t rm)
1855
{
1856
    uint64_t zm0 = float64_val(env->vfp.regs[rm]);
1857
    uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
1858
    uint64_t zd0 = float64_val(env->vfp.regs[rd]);
1859
    uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
1860
    uint64_t d0 = ELEM(zd0, 0, 32) | (ELEM(zd1, 0, 32) << 32);
1861
    uint64_t d1 = ELEM(zm0, 0, 32) | (ELEM(zm1, 0, 32) << 32);
1862
    uint64_t m0 = ELEM(zd0, 1, 32) | (ELEM(zd1, 1, 32) << 32);
1863
    uint64_t m1 = ELEM(zm0, 1, 32) | (ELEM(zm1, 1, 32) << 32);
1864
    env->vfp.regs[rm] = make_float64(m0);
1865
    env->vfp.regs[rm + 1] = make_float64(m1);
1866
    env->vfp.regs[rd] = make_float64(d0);
1867
    env->vfp.regs[rd + 1] = make_float64(d1);
1868
}
1869

    
1870
void HELPER(neon_unzip8)(CPUState *env, uint32_t rd, uint32_t rm)
1871
{
1872
    uint64_t zm = float64_val(env->vfp.regs[rm]);
1873
    uint64_t zd = float64_val(env->vfp.regs[rd]);
1874
    uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zd, 2, 8) << 8)
1875
        | (ELEM(zd, 4, 8) << 16) | (ELEM(zd, 6, 8) << 24)
1876
        | (ELEM(zm, 0, 8) << 32) | (ELEM(zm, 2, 8) << 40)
1877
        | (ELEM(zm, 4, 8) << 48) | (ELEM(zm, 6, 8) << 56);
1878
    uint64_t m0 = ELEM(zd, 1, 8) | (ELEM(zd, 3, 8) << 8)
1879
        | (ELEM(zd, 5, 8) << 16) | (ELEM(zd, 7, 8) << 24)
1880
        | (ELEM(zm, 1, 8) << 32) | (ELEM(zm, 3, 8) << 40)
1881
        | (ELEM(zm, 5, 8) << 48) | (ELEM(zm, 7, 8) << 56);
1882
    env->vfp.regs[rm] = make_float64(m0);
1883
    env->vfp.regs[rd] = make_float64(d0);
1884
}
1885

    
1886
void HELPER(neon_unzip16)(CPUState *env, uint32_t rd, uint32_t rm)
1887
{
1888
    uint64_t zm = float64_val(env->vfp.regs[rm]);
1889
    uint64_t zd = float64_val(env->vfp.regs[rd]);
1890
    uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zd, 2, 16) << 16)
1891
        | (ELEM(zm, 0, 16) << 32) | (ELEM(zm, 2, 16) << 48);
1892
    uint64_t m0 = ELEM(zd, 1, 16) | (ELEM(zd, 3, 16) << 16)
1893
        | (ELEM(zm, 1, 16) << 32) | (ELEM(zm, 3, 16) << 48);
1894
    env->vfp.regs[rm] = make_float64(m0);
1895
    env->vfp.regs[rd] = make_float64(d0);
1896
}
1897

    
1898
void HELPER(neon_qzip8)(CPUState *env, uint32_t rd, uint32_t rm)
1899
{
1900
    uint64_t zm0 = float64_val(env->vfp.regs[rm]);
1901
    uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
1902
    uint64_t zd0 = float64_val(env->vfp.regs[rd]);
1903
    uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
1904
    uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zm0, 0, 8) << 8)
1905
        | (ELEM(zd0, 1, 8) << 16) | (ELEM(zm0, 1, 8) << 24)
1906
        | (ELEM(zd0, 2, 8) << 32) | (ELEM(zm0, 2, 8) << 40)
1907
        | (ELEM(zd0, 3, 8) << 48) | (ELEM(zm0, 3, 8) << 56);
1908
    uint64_t d1 = ELEM(zd0, 4, 8) | (ELEM(zm0, 4, 8) << 8)
1909
        | (ELEM(zd0, 5, 8) << 16) | (ELEM(zm0, 5, 8) << 24)
1910
        | (ELEM(zd0, 6, 8) << 32) | (ELEM(zm0, 6, 8) << 40)
1911
        | (ELEM(zd0, 7, 8) << 48) | (ELEM(zm0, 7, 8) << 56);
1912
    uint64_t m0 = ELEM(zd1, 0, 8) | (ELEM(zm1, 0, 8) << 8)
1913
        | (ELEM(zd1, 1, 8) << 16) | (ELEM(zm1, 1, 8) << 24)
1914
        | (ELEM(zd1, 2, 8) << 32) | (ELEM(zm1, 2, 8) << 40)
1915
        | (ELEM(zd1, 3, 8) << 48) | (ELEM(zm1, 3, 8) << 56);
1916
    uint64_t m1 = ELEM(zd1, 4, 8) | (ELEM(zm1, 4, 8) << 8)
1917
        | (ELEM(zd1, 5, 8) << 16) | (ELEM(zm1, 5, 8) << 24)
1918
        | (ELEM(zd1, 6, 8) << 32) | (ELEM(zm1, 6, 8) << 40)
1919
        | (ELEM(zd1, 7, 8) << 48) | (ELEM(zm1, 7, 8) << 56);
1920
    env->vfp.regs[rm] = make_float64(m0);
1921
    env->vfp.regs[rm + 1] = make_float64(m1);
1922
    env->vfp.regs[rd] = make_float64(d0);
1923
    env->vfp.regs[rd + 1] = make_float64(d1);
1924
}
1925

    
1926
void HELPER(neon_qzip16)(CPUState *env, uint32_t rd, uint32_t rm)
1927
{
1928
    uint64_t zm0 = float64_val(env->vfp.regs[rm]);
1929
    uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
1930
    uint64_t zd0 = float64_val(env->vfp.regs[rd]);
1931
    uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
1932
    uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zm0, 0, 16) << 16)
1933
        | (ELEM(zd0, 1, 16) << 32) | (ELEM(zm0, 1, 16) << 48);
1934
    uint64_t d1 = ELEM(zd0, 2, 16) | (ELEM(zm0, 2, 16) << 16)
1935
        | (ELEM(zd0, 3, 16) << 32) | (ELEM(zm0, 3, 16) << 48);
1936
    uint64_t m0 = ELEM(zd1, 0, 16) | (ELEM(zm1, 0, 16) << 16)
1937
        | (ELEM(zd1, 1, 16) << 32) | (ELEM(zm1, 1, 16) << 48);
1938
    uint64_t m1 = ELEM(zd1, 2, 16) | (ELEM(zm1, 2, 16) << 16)
1939
        | (ELEM(zd1, 3, 16) << 32) | (ELEM(zm1, 3, 16) << 48);
1940
    env->vfp.regs[rm] = make_float64(m0);
1941
    env->vfp.regs[rm + 1] = make_float64(m1);
1942
    env->vfp.regs[rd] = make_float64(d0);
1943
    env->vfp.regs[rd + 1] = make_float64(d1);
1944
}
1945

    
1946
void HELPER(neon_qzip32)(CPUState *env, uint32_t rd, uint32_t rm)
1947
{
1948
    uint64_t zm0 = float64_val(env->vfp.regs[rm]);
1949
    uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
1950
    uint64_t zd0 = float64_val(env->vfp.regs[rd]);
1951
    uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
1952
    uint64_t d0 = ELEM(zd0, 0, 32) | (ELEM(zm0, 0, 32) << 32);
1953
    uint64_t d1 = ELEM(zd0, 1, 32) | (ELEM(zm0, 1, 32) << 32);
1954
    uint64_t m0 = ELEM(zd1, 0, 32) | (ELEM(zm1, 0, 32) << 32);
1955
    uint64_t m1 = ELEM(zd1, 1, 32) | (ELEM(zm1, 1, 32) << 32);
1956
    env->vfp.regs[rm] = make_float64(m0);
1957
    env->vfp.regs[rm + 1] = make_float64(m1);
1958
    env->vfp.regs[rd] = make_float64(d0);
1959
    env->vfp.regs[rd + 1] = make_float64(d1);
1960
}
1961

    
1962
void HELPER(neon_zip8)(CPUState *env, uint32_t rd, uint32_t rm)
1963
{
1964
    uint64_t zm = float64_val(env->vfp.regs[rm]);
1965
    uint64_t zd = float64_val(env->vfp.regs[rd]);
1966
    uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zm, 0, 8) << 8)
1967
        | (ELEM(zd, 1, 8) << 16) | (ELEM(zm, 1, 8) << 24)
1968
        | (ELEM(zd, 2, 8) << 32) | (ELEM(zm, 2, 8) << 40)
1969
        | (ELEM(zd, 3, 8) << 48) | (ELEM(zm, 3, 8) << 56);
1970
    uint64_t m0 = ELEM(zd, 4, 8) | (ELEM(zm, 4, 8) << 8)
1971
        | (ELEM(zd, 5, 8) << 16) | (ELEM(zm, 5, 8) << 24)
1972
        | (ELEM(zd, 6, 8) << 32) | (ELEM(zm, 6, 8) << 40)
1973
        | (ELEM(zd, 7, 8) << 48) | (ELEM(zm, 7, 8) << 56);
1974
    env->vfp.regs[rm] = make_float64(m0);
1975
    env->vfp.regs[rd] = make_float64(d0);
1976
}
1977

    
1978
void HELPER(neon_zip16)(CPUState *env, uint32_t rd, uint32_t rm)
1979
{
1980
    uint64_t zm = float64_val(env->vfp.regs[rm]);
1981
    uint64_t zd = float64_val(env->vfp.regs[rd]);
1982
    uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zm, 0, 16) << 16)
1983
        | (ELEM(zd, 1, 16) << 32) | (ELEM(zm, 1, 16) << 48);
1984
    uint64_t m0 = ELEM(zd, 2, 16) | (ELEM(zm, 2, 16) << 16)
1985
        | (ELEM(zd, 3, 16) << 32) | (ELEM(zm, 3, 16) << 48);
1986
    env->vfp.regs[rm] = make_float64(m0);
1987
    env->vfp.regs[rd] = make_float64(d0);
1988
}