Statistics
| Branch: | Revision:

root / target-arm / neon_helper.c @ 51e3930f

History | View | Annotate | Download (51.2 kB)

1
/*
2
 * ARM NEON vector operations.
3
 *
4
 * Copyright (c) 2007, 2008 CodeSourcery.
5
 * Written by Paul Brook
6
 *
7
 * This code is licenced under the GNU GPL v2.
8
 */
9
#include <stdlib.h>
10
#include <stdio.h>
11

    
12
#include "cpu.h"
13
#include "exec-all.h"
14
#include "helpers.h"
15

    
16
#define SIGNBIT (uint32_t)0x80000000
17
#define SIGNBIT64 ((uint64_t)1 << 63)
18

    
19
#define SET_QC() env->vfp.xregs[ARM_VFP_FPSCR] = CPSR_Q
20

    
21
static float_status neon_float_status;
22
#define NFS &neon_float_status
23

    
24
/* Helper routines to perform bitwise copies between float and int.  */
25
static inline float32 vfp_itos(uint32_t i)
26
{
27
    union {
28
        uint32_t i;
29
        float32 s;
30
    } v;
31

    
32
    v.i = i;
33
    return v.s;
34
}
35

    
36
static inline uint32_t vfp_stoi(float32 s)
37
{
38
    union {
39
        uint32_t i;
40
        float32 s;
41
    } v;
42

    
43
    v.s = s;
44
    return v.i;
45
}
46

    
47
#define NEON_TYPE1(name, type) \
48
typedef struct \
49
{ \
50
    type v1; \
51
} neon_##name;
52
#ifdef HOST_WORDS_BIGENDIAN
53
#define NEON_TYPE2(name, type) \
54
typedef struct \
55
{ \
56
    type v2; \
57
    type v1; \
58
} neon_##name;
59
#define NEON_TYPE4(name, type) \
60
typedef struct \
61
{ \
62
    type v4; \
63
    type v3; \
64
    type v2; \
65
    type v1; \
66
} neon_##name;
67
#else
68
#define NEON_TYPE2(name, type) \
69
typedef struct \
70
{ \
71
    type v1; \
72
    type v2; \
73
} neon_##name;
74
#define NEON_TYPE4(name, type) \
75
typedef struct \
76
{ \
77
    type v1; \
78
    type v2; \
79
    type v3; \
80
    type v4; \
81
} neon_##name;
82
#endif
83

    
84
NEON_TYPE4(s8, int8_t)
85
NEON_TYPE4(u8, uint8_t)
86
NEON_TYPE2(s16, int16_t)
87
NEON_TYPE2(u16, uint16_t)
88
NEON_TYPE1(s32, int32_t)
89
NEON_TYPE1(u32, uint32_t)
90
#undef NEON_TYPE4
91
#undef NEON_TYPE2
92
#undef NEON_TYPE1
93

    
94
/* Copy from a uint32_t to a vector structure type.  */
95
#define NEON_UNPACK(vtype, dest, val) do { \
96
    union { \
97
        vtype v; \
98
        uint32_t i; \
99
    } conv_u; \
100
    conv_u.i = (val); \
101
    dest = conv_u.v; \
102
    } while(0)
103

    
104
/* Copy from a vector structure type to a uint32_t.  */
105
#define NEON_PACK(vtype, dest, val) do { \
106
    union { \
107
        vtype v; \
108
        uint32_t i; \
109
    } conv_u; \
110
    conv_u.v = (val); \
111
    dest = conv_u.i; \
112
    } while(0)
113

    
114
#define NEON_DO1 \
115
    NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1);
116
#define NEON_DO2 \
117
    NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
118
    NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2);
119
#define NEON_DO4 \
120
    NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
121
    NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2); \
122
    NEON_FN(vdest.v3, vsrc1.v3, vsrc2.v3); \
123
    NEON_FN(vdest.v4, vsrc1.v4, vsrc2.v4);
124

    
125
#define NEON_VOP_BODY(vtype, n) \
126
{ \
127
    uint32_t res; \
128
    vtype vsrc1; \
129
    vtype vsrc2; \
130
    vtype vdest; \
131
    NEON_UNPACK(vtype, vsrc1, arg1); \
132
    NEON_UNPACK(vtype, vsrc2, arg2); \
133
    NEON_DO##n; \
134
    NEON_PACK(vtype, res, vdest); \
135
    return res; \
136
}
137

    
138
#define NEON_VOP(name, vtype, n) \
139
uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
140
NEON_VOP_BODY(vtype, n)
141

    
142
#define NEON_VOP_ENV(name, vtype, n) \
143
uint32_t HELPER(glue(neon_,name))(CPUState *env, uint32_t arg1, uint32_t arg2) \
144
NEON_VOP_BODY(vtype, n)
145

    
146
/* Pairwise operations.  */
147
/* For 32-bit elements each segment only contains a single element, so
148
   the elementwise and pairwise operations are the same.  */
149
#define NEON_PDO2 \
150
    NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
151
    NEON_FN(vdest.v2, vsrc2.v1, vsrc2.v2);
152
#define NEON_PDO4 \
153
    NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
154
    NEON_FN(vdest.v2, vsrc1.v3, vsrc1.v4); \
155
    NEON_FN(vdest.v3, vsrc2.v1, vsrc2.v2); \
156
    NEON_FN(vdest.v4, vsrc2.v3, vsrc2.v4); \
157

    
158
#define NEON_POP(name, vtype, n) \
159
uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
160
{ \
161
    uint32_t res; \
162
    vtype vsrc1; \
163
    vtype vsrc2; \
164
    vtype vdest; \
165
    NEON_UNPACK(vtype, vsrc1, arg1); \
166
    NEON_UNPACK(vtype, vsrc2, arg2); \
167
    NEON_PDO##n; \
168
    NEON_PACK(vtype, res, vdest); \
169
    return res; \
170
}
171

    
172
/* Unary operators.  */
173
#define NEON_VOP1(name, vtype, n) \
174
uint32_t HELPER(glue(neon_,name))(uint32_t arg) \
175
{ \
176
    vtype vsrc1; \
177
    vtype vdest; \
178
    NEON_UNPACK(vtype, vsrc1, arg); \
179
    NEON_DO##n; \
180
    NEON_PACK(vtype, arg, vdest); \
181
    return arg; \
182
}
183

    
184

    
185
#define NEON_USAT(dest, src1, src2, type) do { \
186
    uint32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
187
    if (tmp != (type)tmp) { \
188
        SET_QC(); \
189
        dest = ~0; \
190
    } else { \
191
        dest = tmp; \
192
    }} while(0)
193
#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
194
NEON_VOP_ENV(qadd_u8, neon_u8, 4)
195
#undef NEON_FN
196
#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
197
NEON_VOP_ENV(qadd_u16, neon_u16, 2)
198
#undef NEON_FN
199
#undef NEON_USAT
200

    
201
uint32_t HELPER(neon_qadd_u32)(CPUState *env, uint32_t a, uint32_t b)
202
{
203
    uint32_t res = a + b;
204
    if (res < a) {
205
        SET_QC();
206
        res = ~0;
207
    }
208
    return res;
209
}
210

    
211
uint64_t HELPER(neon_qadd_u64)(CPUState *env, uint64_t src1, uint64_t src2)
212
{
213
    uint64_t res;
214

    
215
    res = src1 + src2;
216
    if (res < src1) {
217
        SET_QC();
218
        res = ~(uint64_t)0;
219
    }
220
    return res;
221
}
222

    
223
#define NEON_SSAT(dest, src1, src2, type) do { \
224
    int32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
225
    if (tmp != (type)tmp) { \
226
        SET_QC(); \
227
        if (src2 > 0) { \
228
            tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
229
        } else { \
230
            tmp = 1 << (sizeof(type) * 8 - 1); \
231
        } \
232
    } \
233
    dest = tmp; \
234
    } while(0)
235
#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
236
NEON_VOP_ENV(qadd_s8, neon_s8, 4)
237
#undef NEON_FN
238
#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
239
NEON_VOP_ENV(qadd_s16, neon_s16, 2)
240
#undef NEON_FN
241
#undef NEON_SSAT
242

    
243
uint32_t HELPER(neon_qadd_s32)(CPUState *env, uint32_t a, uint32_t b)
244
{
245
    uint32_t res = a + b;
246
    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
247
        SET_QC();
248
        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
249
    }
250
    return res;
251
}
252

    
253
uint64_t HELPER(neon_qadd_s64)(CPUState *env, uint64_t src1, uint64_t src2)
254
{
255
    uint64_t res;
256

    
257
    res = src1 + src2;
258
    if (((res ^ src1) & SIGNBIT64) && !((src1 ^ src2) & SIGNBIT64)) {
259
        SET_QC();
260
        res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
261
    }
262
    return res;
263
}
264

    
265
#define NEON_USAT(dest, src1, src2, type) do { \
266
    uint32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
267
    if (tmp != (type)tmp) { \
268
        SET_QC(); \
269
        dest = 0; \
270
    } else { \
271
        dest = tmp; \
272
    }} while(0)
273
#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
274
NEON_VOP_ENV(qsub_u8, neon_u8, 4)
275
#undef NEON_FN
276
#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
277
NEON_VOP_ENV(qsub_u16, neon_u16, 2)
278
#undef NEON_FN
279
#undef NEON_USAT
280

    
281
uint32_t HELPER(neon_qsub_u32)(CPUState *env, uint32_t a, uint32_t b)
282
{
283
    uint32_t res = a - b;
284
    if (res > a) {
285
        SET_QC();
286
        res = 0;
287
    }
288
    return res;
289
}
290

    
291
uint64_t HELPER(neon_qsub_u64)(CPUState *env, uint64_t src1, uint64_t src2)
292
{
293
    uint64_t res;
294

    
295
    if (src1 < src2) {
296
        SET_QC();
297
        res = 0;
298
    } else {
299
        res = src1 - src2;
300
    }
301
    return res;
302
}
303

    
304
#define NEON_SSAT(dest, src1, src2, type) do { \
305
    int32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
306
    if (tmp != (type)tmp) { \
307
        SET_QC(); \
308
        if (src2 < 0) { \
309
            tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
310
        } else { \
311
            tmp = 1 << (sizeof(type) * 8 - 1); \
312
        } \
313
    } \
314
    dest = tmp; \
315
    } while(0)
316
#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
317
NEON_VOP_ENV(qsub_s8, neon_s8, 4)
318
#undef NEON_FN
319
#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
320
NEON_VOP_ENV(qsub_s16, neon_s16, 2)
321
#undef NEON_FN
322
#undef NEON_SSAT
323

    
324
uint32_t HELPER(neon_qsub_s32)(CPUState *env, uint32_t a, uint32_t b)
325
{
326
    uint32_t res = a - b;
327
    if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
328
        SET_QC();
329
        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
330
    }
331
    return res;
332
}
333

    
334
uint64_t HELPER(neon_qsub_s64)(CPUState *env, uint64_t src1, uint64_t src2)
335
{
336
    uint64_t res;
337

    
338
    res = src1 - src2;
339
    if (((res ^ src1) & SIGNBIT64) && ((src1 ^ src2) & SIGNBIT64)) {
340
        SET_QC();
341
        res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
342
    }
343
    return res;
344
}
345

    
346
#define NEON_FN(dest, src1, src2) dest = (src1 + src2) >> 1
347
NEON_VOP(hadd_s8, neon_s8, 4)
348
NEON_VOP(hadd_u8, neon_u8, 4)
349
NEON_VOP(hadd_s16, neon_s16, 2)
350
NEON_VOP(hadd_u16, neon_u16, 2)
351
#undef NEON_FN
352

    
353
int32_t HELPER(neon_hadd_s32)(int32_t src1, int32_t src2)
354
{
355
    int32_t dest;
356

    
357
    dest = (src1 >> 1) + (src2 >> 1);
358
    if (src1 & src2 & 1)
359
        dest++;
360
    return dest;
361
}
362

    
363
uint32_t HELPER(neon_hadd_u32)(uint32_t src1, uint32_t src2)
364
{
365
    uint32_t dest;
366

    
367
    dest = (src1 >> 1) + (src2 >> 1);
368
    if (src1 & src2 & 1)
369
        dest++;
370
    return dest;
371
}
372

    
373
#define NEON_FN(dest, src1, src2) dest = (src1 + src2 + 1) >> 1
374
NEON_VOP(rhadd_s8, neon_s8, 4)
375
NEON_VOP(rhadd_u8, neon_u8, 4)
376
NEON_VOP(rhadd_s16, neon_s16, 2)
377
NEON_VOP(rhadd_u16, neon_u16, 2)
378
#undef NEON_FN
379

    
380
int32_t HELPER(neon_rhadd_s32)(int32_t src1, int32_t src2)
381
{
382
    int32_t dest;
383

    
384
    dest = (src1 >> 1) + (src2 >> 1);
385
    if ((src1 | src2) & 1)
386
        dest++;
387
    return dest;
388
}
389

    
390
uint32_t HELPER(neon_rhadd_u32)(uint32_t src1, uint32_t src2)
391
{
392
    uint32_t dest;
393

    
394
    dest = (src1 >> 1) + (src2 >> 1);
395
    if ((src1 | src2) & 1)
396
        dest++;
397
    return dest;
398
}
399

    
400
#define NEON_FN(dest, src1, src2) dest = (src1 - src2) >> 1
401
NEON_VOP(hsub_s8, neon_s8, 4)
402
NEON_VOP(hsub_u8, neon_u8, 4)
403
NEON_VOP(hsub_s16, neon_s16, 2)
404
NEON_VOP(hsub_u16, neon_u16, 2)
405
#undef NEON_FN
406

    
407
int32_t HELPER(neon_hsub_s32)(int32_t src1, int32_t src2)
408
{
409
    int32_t dest;
410

    
411
    dest = (src1 >> 1) - (src2 >> 1);
412
    if ((~src1) & src2 & 1)
413
        dest--;
414
    return dest;
415
}
416

    
417
uint32_t HELPER(neon_hsub_u32)(uint32_t src1, uint32_t src2)
418
{
419
    uint32_t dest;
420

    
421
    dest = (src1 >> 1) - (src2 >> 1);
422
    if ((~src1) & src2 & 1)
423
        dest--;
424
    return dest;
425
}
426

    
427
#define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? ~0 : 0
428
NEON_VOP(cgt_s8, neon_s8, 4)
429
NEON_VOP(cgt_u8, neon_u8, 4)
430
NEON_VOP(cgt_s16, neon_s16, 2)
431
NEON_VOP(cgt_u16, neon_u16, 2)
432
NEON_VOP(cgt_s32, neon_s32, 1)
433
NEON_VOP(cgt_u32, neon_u32, 1)
434
#undef NEON_FN
435

    
436
#define NEON_FN(dest, src1, src2) dest = (src1 >= src2) ? ~0 : 0
437
NEON_VOP(cge_s8, neon_s8, 4)
438
NEON_VOP(cge_u8, neon_u8, 4)
439
NEON_VOP(cge_s16, neon_s16, 2)
440
NEON_VOP(cge_u16, neon_u16, 2)
441
NEON_VOP(cge_s32, neon_s32, 1)
442
NEON_VOP(cge_u32, neon_u32, 1)
443
#undef NEON_FN
444

    
445
#define NEON_FN(dest, src1, src2) dest = (src1 < src2) ? src1 : src2
446
NEON_VOP(min_s8, neon_s8, 4)
447
NEON_VOP(min_u8, neon_u8, 4)
448
NEON_VOP(min_s16, neon_s16, 2)
449
NEON_VOP(min_u16, neon_u16, 2)
450
NEON_VOP(min_s32, neon_s32, 1)
451
NEON_VOP(min_u32, neon_u32, 1)
452
NEON_POP(pmin_s8, neon_s8, 4)
453
NEON_POP(pmin_u8, neon_u8, 4)
454
NEON_POP(pmin_s16, neon_s16, 2)
455
NEON_POP(pmin_u16, neon_u16, 2)
456
#undef NEON_FN
457

    
458
#define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? src1 : src2
459
NEON_VOP(max_s8, neon_s8, 4)
460
NEON_VOP(max_u8, neon_u8, 4)
461
NEON_VOP(max_s16, neon_s16, 2)
462
NEON_VOP(max_u16, neon_u16, 2)
463
NEON_VOP(max_s32, neon_s32, 1)
464
NEON_VOP(max_u32, neon_u32, 1)
465
NEON_POP(pmax_s8, neon_s8, 4)
466
NEON_POP(pmax_u8, neon_u8, 4)
467
NEON_POP(pmax_s16, neon_s16, 2)
468
NEON_POP(pmax_u16, neon_u16, 2)
469
#undef NEON_FN
470

    
471
#define NEON_FN(dest, src1, src2) \
472
    dest = (src1 > src2) ? (src1 - src2) : (src2 - src1)
473
NEON_VOP(abd_s8, neon_s8, 4)
474
NEON_VOP(abd_u8, neon_u8, 4)
475
NEON_VOP(abd_s16, neon_s16, 2)
476
NEON_VOP(abd_u16, neon_u16, 2)
477
NEON_VOP(abd_s32, neon_s32, 1)
478
NEON_VOP(abd_u32, neon_u32, 1)
479
#undef NEON_FN
480

    
481
#define NEON_FN(dest, src1, src2) do { \
482
    int8_t tmp; \
483
    tmp = (int8_t)src2; \
484
    if (tmp >= (ssize_t)sizeof(src1) * 8 || \
485
        tmp <= -(ssize_t)sizeof(src1) * 8) { \
486
        dest = 0; \
487
    } else if (tmp < 0) { \
488
        dest = src1 >> -tmp; \
489
    } else { \
490
        dest = src1 << tmp; \
491
    }} while (0)
492
NEON_VOP(shl_u8, neon_u8, 4)
493
NEON_VOP(shl_u16, neon_u16, 2)
494
NEON_VOP(shl_u32, neon_u32, 1)
495
#undef NEON_FN
496

    
497
uint64_t HELPER(neon_shl_u64)(uint64_t val, uint64_t shiftop)
498
{
499
    int8_t shift = (int8_t)shiftop;
500
    if (shift >= 64 || shift <= -64) {
501
        val = 0;
502
    } else if (shift < 0) {
503
        val >>= -shift;
504
    } else {
505
        val <<= shift;
506
    }
507
    return val;
508
}
509

    
510
#define NEON_FN(dest, src1, src2) do { \
511
    int8_t tmp; \
512
    tmp = (int8_t)src2; \
513
    if (tmp >= (ssize_t)sizeof(src1) * 8) { \
514
        dest = 0; \
515
    } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
516
        dest = src1 >> (sizeof(src1) * 8 - 1); \
517
    } else if (tmp < 0) { \
518
        dest = src1 >> -tmp; \
519
    } else { \
520
        dest = src1 << tmp; \
521
    }} while (0)
522
NEON_VOP(shl_s8, neon_s8, 4)
523
NEON_VOP(shl_s16, neon_s16, 2)
524
NEON_VOP(shl_s32, neon_s32, 1)
525
#undef NEON_FN
526

    
527
uint64_t HELPER(neon_shl_s64)(uint64_t valop, uint64_t shiftop)
528
{
529
    int8_t shift = (int8_t)shiftop;
530
    int64_t val = valop;
531
    if (shift >= 64) {
532
        val = 0;
533
    } else if (shift <= -64) {
534
        val >>= 63;
535
    } else if (shift < 0) {
536
        val >>= -shift;
537
    } else {
538
        val <<= shift;
539
    }
540
    return val;
541
}
542

    
543
#define NEON_FN(dest, src1, src2) do { \
544
    int8_t tmp; \
545
    tmp = (int8_t)src2; \
546
    if ((tmp >= (ssize_t)sizeof(src1) * 8) \
547
        || (tmp <= -(ssize_t)sizeof(src1) * 8)) { \
548
        dest = 0; \
549
    } else if (tmp < 0) { \
550
        dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
551
    } else { \
552
        dest = src1 << tmp; \
553
    }} while (0)
554
NEON_VOP(rshl_s8, neon_s8, 4)
555
NEON_VOP(rshl_s16, neon_s16, 2)
556
#undef NEON_FN
557

    
558
/* The addition of the rounding constant may overflow, so we use an
559
 * intermediate 64 bits accumulator.  */
560
uint32_t HELPER(neon_rshl_s32)(uint32_t valop, uint32_t shiftop)
561
{
562
    int32_t dest;
563
    int32_t val = (int32_t)valop;
564
    int8_t shift = (int8_t)shiftop;
565
    if ((shift >= 32) || (shift <= -32)) {
566
        dest = 0;
567
    } else if (shift < 0) {
568
        int64_t big_dest = ((int64_t)val + (1 << (-1 - shift)));
569
        dest = big_dest >> -shift;
570
    } else {
571
        dest = val << shift;
572
    }
573
    return dest;
574
}
575

    
576
/* Handling addition overflow with 64 bits inputs values is more
577
 * tricky than with 32 bits values.  */
578
uint64_t HELPER(neon_rshl_s64)(uint64_t valop, uint64_t shiftop)
579
{
580
    int8_t shift = (int8_t)shiftop;
581
    int64_t val = valop;
582
    if ((shift >= 64) || (shift <= -64)) {
583
        val = 0;
584
    } else if (shift < 0) {
585
        val >>= (-shift - 1);
586
        if (val == INT64_MAX) {
587
            /* In this case, it means that the rounding constant is 1,
588
             * and the addition would overflow. Return the actual
589
             * result directly.  */
590
            val = 0x4000000000000000LL;
591
        } else {
592
            val++;
593
            val >>= 1;
594
        }
595
    } else {
596
        val <<= shift;
597
    }
598
    return val;
599
}
600

    
601
#define NEON_FN(dest, src1, src2) do { \
602
    int8_t tmp; \
603
    tmp = (int8_t)src2; \
604
    if (tmp >= (ssize_t)sizeof(src1) * 8 || \
605
        tmp < -(ssize_t)sizeof(src1) * 8) { \
606
        dest = 0; \
607
    } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \
608
        dest = src1 >> (-tmp - 1); \
609
    } else if (tmp < 0) { \
610
        dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
611
    } else { \
612
        dest = src1 << tmp; \
613
    }} while (0)
614
NEON_VOP(rshl_u8, neon_u8, 4)
615
NEON_VOP(rshl_u16, neon_u16, 2)
616
#undef NEON_FN
617

    
618
/* The addition of the rounding constant may overflow, so we use an
619
 * intermediate 64 bits accumulator.  */
620
uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shiftop)
621
{
622
    uint32_t dest;
623
    int8_t shift = (int8_t)shiftop;
624
    if (shift >= 32 || shift < -32) {
625
        dest = 0;
626
    } else if (shift == -32) {
627
        dest = val >> 31;
628
    } else if (shift < 0) {
629
        uint64_t big_dest = ((uint64_t)val + (1 << (-1 - shift)));
630
        dest = big_dest >> -shift;
631
    } else {
632
        dest = val << shift;
633
    }
634
    return dest;
635
}
636

    
637
/* Handling addition overflow with 64 bits inputs values is more
638
 * tricky than with 32 bits values.  */
639
uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shiftop)
640
{
641
    int8_t shift = (uint8_t)shiftop;
642
    if (shift >= 64 || shift < -64) {
643
        val = 0;
644
    } else if (shift == -64) {
645
        /* Rounding a 1-bit result just preserves that bit.  */
646
        val >>= 63;
647
    } else if (shift < 0) {
648
        val >>= (-shift - 1);
649
        if (val == UINT64_MAX) {
650
            /* In this case, it means that the rounding constant is 1,
651
             * and the addition would overflow. Return the actual
652
             * result directly.  */
653
            val = 0x8000000000000000ULL;
654
        } else {
655
            val++;
656
            val >>= 1;
657
        }
658
    } else {
659
        val <<= shift;
660
    }
661
    return val;
662
}
663

    
664
#define NEON_FN(dest, src1, src2) do { \
665
    int8_t tmp; \
666
    tmp = (int8_t)src2; \
667
    if (tmp >= (ssize_t)sizeof(src1) * 8) { \
668
        if (src1) { \
669
            SET_QC(); \
670
            dest = ~0; \
671
        } else { \
672
            dest = 0; \
673
        } \
674
    } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
675
        dest = 0; \
676
    } else if (tmp < 0) { \
677
        dest = src1 >> -tmp; \
678
    } else { \
679
        dest = src1 << tmp; \
680
        if ((dest >> tmp) != src1) { \
681
            SET_QC(); \
682
            dest = ~0; \
683
        } \
684
    }} while (0)
685
NEON_VOP_ENV(qshl_u8, neon_u8, 4)
686
NEON_VOP_ENV(qshl_u16, neon_u16, 2)
687
NEON_VOP_ENV(qshl_u32, neon_u32, 1)
688
#undef NEON_FN
689

    
690
uint64_t HELPER(neon_qshl_u64)(CPUState *env, uint64_t val, uint64_t shiftop)
691
{
692
    int8_t shift = (int8_t)shiftop;
693
    if (shift >= 64) {
694
        if (val) {
695
            val = ~(uint64_t)0;
696
            SET_QC();
697
        }
698
    } else if (shift <= -64) {
699
        val = 0;
700
    } else if (shift < 0) {
701
        val >>= -shift;
702
    } else {
703
        uint64_t tmp = val;
704
        val <<= shift;
705
        if ((val >> shift) != tmp) {
706
            SET_QC();
707
            val = ~(uint64_t)0;
708
        }
709
    }
710
    return val;
711
}
712

    
713
#define NEON_FN(dest, src1, src2) do { \
714
    int8_t tmp; \
715
    tmp = (int8_t)src2; \
716
    if (tmp >= (ssize_t)sizeof(src1) * 8) { \
717
        if (src1) { \
718
            SET_QC(); \
719
            dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
720
            if (src1 > 0) { \
721
                dest--; \
722
            } \
723
        } else { \
724
            dest = src1; \
725
        } \
726
    } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
727
        dest = src1 >> 31; \
728
    } else if (tmp < 0) { \
729
        dest = src1 >> -tmp; \
730
    } else { \
731
        dest = src1 << tmp; \
732
        if ((dest >> tmp) != src1) { \
733
            SET_QC(); \
734
            dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
735
            if (src1 > 0) { \
736
                dest--; \
737
            } \
738
        } \
739
    }} while (0)
740
NEON_VOP_ENV(qshl_s8, neon_s8, 4)
741
NEON_VOP_ENV(qshl_s16, neon_s16, 2)
742
NEON_VOP_ENV(qshl_s32, neon_s32, 1)
743
#undef NEON_FN
744

    
745
uint64_t HELPER(neon_qshl_s64)(CPUState *env, uint64_t valop, uint64_t shiftop)
746
{
747
    int8_t shift = (uint8_t)shiftop;
748
    int64_t val = valop;
749
    if (shift >= 64) {
750
        if (val) {
751
            SET_QC();
752
            val = (val >> 63) ^ ~SIGNBIT64;
753
        }
754
    } else if (shift <= -64) {
755
        val >>= 63;
756
    } else if (shift < 0) {
757
        val >>= -shift;
758
    } else {
759
        int64_t tmp = val;
760
        val <<= shift;
761
        if ((val >> shift) != tmp) {
762
            SET_QC();
763
            val = (tmp >> 63) ^ ~SIGNBIT64;
764
        }
765
    }
766
    return val;
767
}
768

    
769
#define NEON_FN(dest, src1, src2) do { \
770
    if (src1 & (1 << (sizeof(src1) * 8 - 1))) { \
771
        SET_QC(); \
772
        dest = 0; \
773
    } else { \
774
        int8_t tmp; \
775
        tmp = (int8_t)src2; \
776
        if (tmp >= (ssize_t)sizeof(src1) * 8) { \
777
            if (src1) { \
778
                SET_QC(); \
779
                dest = ~0; \
780
            } else { \
781
                dest = 0; \
782
            } \
783
        } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
784
            dest = 0; \
785
        } else if (tmp < 0) { \
786
            dest = src1 >> -tmp; \
787
        } else { \
788
            dest = src1 << tmp; \
789
            if ((dest >> tmp) != src1) { \
790
                SET_QC(); \
791
                dest = ~0; \
792
            } \
793
        } \
794
    }} while (0)
795
NEON_VOP_ENV(qshlu_s8, neon_u8, 4)
796
NEON_VOP_ENV(qshlu_s16, neon_u16, 2)
797
#undef NEON_FN
798

    
799
uint32_t HELPER(neon_qshlu_s32)(CPUState *env, uint32_t valop, uint32_t shiftop)
800
{
801
    if ((int32_t)valop < 0) {
802
        SET_QC();
803
        return 0;
804
    }
805
    return helper_neon_qshl_u32(env, valop, shiftop);
806
}
807

    
808
uint64_t HELPER(neon_qshlu_s64)(CPUState *env, uint64_t valop, uint64_t shiftop)
809
{
810
    if ((int64_t)valop < 0) {
811
        SET_QC();
812
        return 0;
813
    }
814
    return helper_neon_qshl_u64(env, valop, shiftop);
815
}
816

    
817
/* FIXME: This is wrong.  */
818
#define NEON_FN(dest, src1, src2) do { \
819
    int8_t tmp; \
820
    tmp = (int8_t)src2; \
821
    if (tmp < 0) { \
822
        dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
823
    } else { \
824
        dest = src1 << tmp; \
825
        if ((dest >> tmp) != src1) { \
826
            SET_QC(); \
827
            dest = ~0; \
828
        } \
829
    }} while (0)
830
NEON_VOP_ENV(qrshl_u8, neon_u8, 4)
831
NEON_VOP_ENV(qrshl_u16, neon_u16, 2)
832
#undef NEON_FN
833

    
834
/* The addition of the rounding constant may overflow, so we use an
835
 * intermediate 64 bits accumulator.  */
836
uint32_t HELPER(neon_qrshl_u32)(CPUState *env, uint32_t val, uint32_t shiftop)
837
{
838
    uint32_t dest;
839
    int8_t shift = (int8_t)shiftop;
840
    if (shift < 0) {
841
        uint64_t big_dest = ((uint64_t)val + (1 << (-1 - shift)));
842
        dest = big_dest >> -shift;
843
    } else {
844
        dest = val << shift;
845
        if ((dest >> shift) != val) {
846
            SET_QC();
847
            dest = ~0;
848
        }
849
    }
850
    return dest;
851
}
852

    
853
/* Handling addition overflow with 64 bits inputs values is more
854
 * tricky than with 32 bits values.  */
855
uint64_t HELPER(neon_qrshl_u64)(CPUState *env, uint64_t val, uint64_t shiftop)
856
{
857
    int8_t shift = (int8_t)shiftop;
858
    if (shift < 0) {
859
        val >>= (-shift - 1);
860
        if (val == UINT64_MAX) {
861
            /* In this case, it means that the rounding constant is 1,
862
             * and the addition would overflow. Return the actual
863
             * result directly.  */
864
            val = 0x8000000000000000ULL;
865
        } else {
866
            val++;
867
            val >>= 1;
868
        }
869
    } else { \
870
        uint64_t tmp = val;
871
        val <<= shift;
872
        if ((val >> shift) != tmp) {
873
            SET_QC();
874
            val = ~0;
875
        }
876
    }
877
    return val;
878
}
879

    
880
#define NEON_FN(dest, src1, src2) do { \
881
    int8_t tmp; \
882
    tmp = (int8_t)src2; \
883
    if (tmp < 0) { \
884
        dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
885
    } else { \
886
        dest = src1 << tmp; \
887
        if ((dest >> tmp) != src1) { \
888
            SET_QC(); \
889
            dest = src1 >> 31; \
890
        } \
891
    }} while (0)
892
NEON_VOP_ENV(qrshl_s8, neon_s8, 4)
893
NEON_VOP_ENV(qrshl_s16, neon_s16, 2)
894
#undef NEON_FN
895

    
896
/* The addition of the rounding constant may overflow, so we use an
897
 * intermediate 64 bits accumulator.  */
898
uint32_t HELPER(neon_qrshl_s32)(CPUState *env, uint32_t valop, uint32_t shiftop)
899
{
900
    int32_t dest;
901
    int32_t val = (int32_t)valop;
902
    int8_t shift = (int8_t)shiftop;
903
    if (shift < 0) {
904
        int64_t big_dest = ((int64_t)val + (1 << (-1 - shift)));
905
        dest = big_dest >> -shift;
906
    } else {
907
        dest = val << shift;
908
        if ((dest >> shift) != val) {
909
            SET_QC();
910
            dest = (val >> 31) ^ ~SIGNBIT;
911
        }
912
    }
913
    return dest;
914
}
915

    
916
/* Handling addition overflow with 64 bits inputs values is more
917
 * tricky than with 32 bits values.  */
918
uint64_t HELPER(neon_qrshl_s64)(CPUState *env, uint64_t valop, uint64_t shiftop)
919
{
920
    int8_t shift = (uint8_t)shiftop;
921
    int64_t val = valop;
922

    
923
    if (shift < 0) {
924
        val >>= (-shift - 1);
925
        if (val == INT64_MAX) {
926
            /* In this case, it means that the rounding constant is 1,
927
             * and the addition would overflow. Return the actual
928
             * result directly.  */
929
            val = 0x4000000000000000ULL;
930
        } else {
931
            val++;
932
            val >>= 1;
933
        }
934
    } else {
935
        int64_t tmp = val;
936
        val <<= shift;
937
        if ((val >> shift) != tmp) {
938
            SET_QC();
939
            val = (tmp >> 63) ^ ~SIGNBIT64;
940
        }
941
    }
942
    return val;
943
}
944

    
945
uint32_t HELPER(neon_add_u8)(uint32_t a, uint32_t b)
946
{
947
    uint32_t mask;
948
    mask = (a ^ b) & 0x80808080u;
949
    a &= ~0x80808080u;
950
    b &= ~0x80808080u;
951
    return (a + b) ^ mask;
952
}
953

    
954
uint32_t HELPER(neon_add_u16)(uint32_t a, uint32_t b)
955
{
956
    uint32_t mask;
957
    mask = (a ^ b) & 0x80008000u;
958
    a &= ~0x80008000u;
959
    b &= ~0x80008000u;
960
    return (a + b) ^ mask;
961
}
962

    
963
#define NEON_FN(dest, src1, src2) dest = src1 + src2
964
NEON_POP(padd_u8, neon_u8, 4)
965
NEON_POP(padd_u16, neon_u16, 2)
966
#undef NEON_FN
967

    
968
#define NEON_FN(dest, src1, src2) dest = src1 - src2
969
NEON_VOP(sub_u8, neon_u8, 4)
970
NEON_VOP(sub_u16, neon_u16, 2)
971
#undef NEON_FN
972

    
973
#define NEON_FN(dest, src1, src2) dest = src1 * src2
974
NEON_VOP(mul_u8, neon_u8, 4)
975
NEON_VOP(mul_u16, neon_u16, 2)
976
#undef NEON_FN
977

    
978
/* Polynomial multiplication is like integer multiplication except the
979
   partial products are XORed, not added.  */
980
uint32_t HELPER(neon_mul_p8)(uint32_t op1, uint32_t op2)
981
{
982
    uint32_t mask;
983
    uint32_t result;
984
    result = 0;
985
    while (op1) {
986
        mask = 0;
987
        if (op1 & 1)
988
            mask |= 0xff;
989
        if (op1 & (1 << 8))
990
            mask |= (0xff << 8);
991
        if (op1 & (1 << 16))
992
            mask |= (0xff << 16);
993
        if (op1 & (1 << 24))
994
            mask |= (0xff << 24);
995
        result ^= op2 & mask;
996
        op1 = (op1 >> 1) & 0x7f7f7f7f;
997
        op2 = (op2 << 1) & 0xfefefefe;
998
    }
999
    return result;
1000
}
1001

    
1002
uint64_t HELPER(neon_mull_p8)(uint32_t op1, uint32_t op2)
1003
{
1004
    uint64_t result = 0;
1005
    uint64_t mask;
1006
    uint64_t op2ex = op2;
1007
    op2ex = (op2ex & 0xff) |
1008
        ((op2ex & 0xff00) << 8) |
1009
        ((op2ex & 0xff0000) << 16) |
1010
        ((op2ex & 0xff000000) << 24);
1011
    while (op1) {
1012
        mask = 0;
1013
        if (op1 & 1) {
1014
            mask |= 0xffff;
1015
        }
1016
        if (op1 & (1 << 8)) {
1017
            mask |= (0xffffU << 16);
1018
        }
1019
        if (op1 & (1 << 16)) {
1020
            mask |= (0xffffULL << 32);
1021
        }
1022
        if (op1 & (1 << 24)) {
1023
            mask |= (0xffffULL << 48);
1024
        }
1025
        result ^= op2ex & mask;
1026
        op1 = (op1 >> 1) & 0x7f7f7f7f;
1027
        op2ex <<= 1;
1028
    }
1029
    return result;
1030
}
1031

    
1032
#define NEON_FN(dest, src1, src2) dest = (src1 & src2) ? -1 : 0
1033
NEON_VOP(tst_u8, neon_u8, 4)
1034
NEON_VOP(tst_u16, neon_u16, 2)
1035
NEON_VOP(tst_u32, neon_u32, 1)
1036
#undef NEON_FN
1037

    
1038
#define NEON_FN(dest, src1, src2) dest = (src1 == src2) ? -1 : 0
1039
NEON_VOP(ceq_u8, neon_u8, 4)
1040
NEON_VOP(ceq_u16, neon_u16, 2)
1041
NEON_VOP(ceq_u32, neon_u32, 1)
1042
#undef NEON_FN
1043

    
1044
#define NEON_FN(dest, src, dummy) dest = (src < 0) ? -src : src
1045
NEON_VOP1(abs_s8, neon_s8, 4)
1046
NEON_VOP1(abs_s16, neon_s16, 2)
1047
#undef NEON_FN
1048

    
1049
/* Count Leading Sign/Zero Bits.  */
1050
static inline int do_clz8(uint8_t x)
1051
{
1052
    int n;
1053
    for (n = 8; x; n--)
1054
        x >>= 1;
1055
    return n;
1056
}
1057

    
1058
static inline int do_clz16(uint16_t x)
1059
{
1060
    int n;
1061
    for (n = 16; x; n--)
1062
        x >>= 1;
1063
    return n;
1064
}
1065

    
1066
#define NEON_FN(dest, src, dummy) dest = do_clz8(src)
1067
NEON_VOP1(clz_u8, neon_u8, 4)
1068
#undef NEON_FN
1069

    
1070
#define NEON_FN(dest, src, dummy) dest = do_clz16(src)
1071
NEON_VOP1(clz_u16, neon_u16, 2)
1072
#undef NEON_FN
1073

    
1074
#define NEON_FN(dest, src, dummy) dest = do_clz8((src < 0) ? ~src : src) - 1
1075
NEON_VOP1(cls_s8, neon_s8, 4)
1076
#undef NEON_FN
1077

    
1078
#define NEON_FN(dest, src, dummy) dest = do_clz16((src < 0) ? ~src : src) - 1
1079
NEON_VOP1(cls_s16, neon_s16, 2)
1080
#undef NEON_FN
1081

    
1082
uint32_t HELPER(neon_cls_s32)(uint32_t x)
1083
{
1084
    int count;
1085
    if ((int32_t)x < 0)
1086
        x = ~x;
1087
    for (count = 32; x; count--)
1088
        x = x >> 1;
1089
    return count - 1;
1090
}
1091

    
1092
/* Bit count.  */
1093
uint32_t HELPER(neon_cnt_u8)(uint32_t x)
1094
{
1095
    x = (x & 0x55555555) + ((x >>  1) & 0x55555555);
1096
    x = (x & 0x33333333) + ((x >>  2) & 0x33333333);
1097
    x = (x & 0x0f0f0f0f) + ((x >>  4) & 0x0f0f0f0f);
1098
    return x;
1099
}
1100

    
1101
#define NEON_QDMULH16(dest, src1, src2, round) do { \
1102
    uint32_t tmp = (int32_t)(int16_t) src1 * (int16_t) src2; \
1103
    if ((tmp ^ (tmp << 1)) & SIGNBIT) { \
1104
        SET_QC(); \
1105
        tmp = (tmp >> 31) ^ ~SIGNBIT; \
1106
    } else { \
1107
        tmp <<= 1; \
1108
    } \
1109
    if (round) { \
1110
        int32_t old = tmp; \
1111
        tmp += 1 << 15; \
1112
        if ((int32_t)tmp < old) { \
1113
            SET_QC(); \
1114
            tmp = SIGNBIT - 1; \
1115
        } \
1116
    } \
1117
    dest = tmp >> 16; \
1118
    } while(0)
1119
#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 0)
1120
NEON_VOP_ENV(qdmulh_s16, neon_s16, 2)
1121
#undef NEON_FN
1122
#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 1)
1123
NEON_VOP_ENV(qrdmulh_s16, neon_s16, 2)
1124
#undef NEON_FN
1125
#undef NEON_QDMULH16
1126

    
1127
#define NEON_QDMULH32(dest, src1, src2, round) do { \
1128
    uint64_t tmp = (int64_t)(int32_t) src1 * (int32_t) src2; \
1129
    if ((tmp ^ (tmp << 1)) & SIGNBIT64) { \
1130
        SET_QC(); \
1131
        tmp = (tmp >> 63) ^ ~SIGNBIT64; \
1132
    } else { \
1133
        tmp <<= 1; \
1134
    } \
1135
    if (round) { \
1136
        int64_t old = tmp; \
1137
        tmp += (int64_t)1 << 31; \
1138
        if ((int64_t)tmp < old) { \
1139
            SET_QC(); \
1140
            tmp = SIGNBIT64 - 1; \
1141
        } \
1142
    } \
1143
    dest = tmp >> 32; \
1144
    } while(0)
1145
#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 0)
1146
NEON_VOP_ENV(qdmulh_s32, neon_s32, 1)
1147
#undef NEON_FN
1148
#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 1)
1149
NEON_VOP_ENV(qrdmulh_s32, neon_s32, 1)
1150
#undef NEON_FN
1151
#undef NEON_QDMULH32
1152

    
1153
uint32_t HELPER(neon_narrow_u8)(uint64_t x)
1154
{
1155
    return (x & 0xffu) | ((x >> 8) & 0xff00u) | ((x >> 16) & 0xff0000u)
1156
           | ((x >> 24) & 0xff000000u);
1157
}
1158

    
1159
uint32_t HELPER(neon_narrow_u16)(uint64_t x)
1160
{
1161
    return (x & 0xffffu) | ((x >> 16) & 0xffff0000u);
1162
}
1163

    
1164
uint32_t HELPER(neon_narrow_high_u8)(uint64_t x)
1165
{
1166
    return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00)
1167
            | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000);
1168
}
1169

    
1170
uint32_t HELPER(neon_narrow_high_u16)(uint64_t x)
1171
{
1172
    return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
1173
}
1174

    
1175
uint32_t HELPER(neon_narrow_round_high_u8)(uint64_t x)
1176
{
1177
    x &= 0xff80ff80ff80ff80ull;
1178
    x += 0x0080008000800080ull;
1179
    return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00)
1180
            | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000);
1181
}
1182

    
1183
uint32_t HELPER(neon_narrow_round_high_u16)(uint64_t x)
1184
{
1185
    x &= 0xffff8000ffff8000ull;
1186
    x += 0x0000800000008000ull;
1187
    return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
1188
}
1189

    
1190
uint32_t HELPER(neon_unarrow_sat8)(CPUState *env, uint64_t x)
1191
{
1192
    uint16_t s;
1193
    uint8_t d;
1194
    uint32_t res = 0;
1195
#define SAT8(n) \
1196
    s = x >> n; \
1197
    if (s & 0x8000) { \
1198
        SET_QC(); \
1199
    } else { \
1200
        if (s > 0xff) { \
1201
            d = 0xff; \
1202
            SET_QC(); \
1203
        } else  { \
1204
            d = s; \
1205
        } \
1206
        res |= (uint32_t)d << (n / 2); \
1207
    }
1208

    
1209
    SAT8(0);
1210
    SAT8(16);
1211
    SAT8(32);
1212
    SAT8(48);
1213
#undef SAT8
1214
    return res;
1215
}
1216

    
1217
uint32_t HELPER(neon_narrow_sat_u8)(CPUState *env, uint64_t x)
1218
{
1219
    uint16_t s;
1220
    uint8_t d;
1221
    uint32_t res = 0;
1222
#define SAT8(n) \
1223
    s = x >> n; \
1224
    if (s > 0xff) { \
1225
        d = 0xff; \
1226
        SET_QC(); \
1227
    } else  { \
1228
        d = s; \
1229
    } \
1230
    res |= (uint32_t)d << (n / 2);
1231

    
1232
    SAT8(0);
1233
    SAT8(16);
1234
    SAT8(32);
1235
    SAT8(48);
1236
#undef SAT8
1237
    return res;
1238
}
1239

    
1240
uint32_t HELPER(neon_narrow_sat_s8)(CPUState *env, uint64_t x)
1241
{
1242
    int16_t s;
1243
    uint8_t d;
1244
    uint32_t res = 0;
1245
#define SAT8(n) \
1246
    s = x >> n; \
1247
    if (s != (int8_t)s) { \
1248
        d = (s >> 15) ^ 0x7f; \
1249
        SET_QC(); \
1250
    } else  { \
1251
        d = s; \
1252
    } \
1253
    res |= (uint32_t)d << (n / 2);
1254

    
1255
    SAT8(0);
1256
    SAT8(16);
1257
    SAT8(32);
1258
    SAT8(48);
1259
#undef SAT8
1260
    return res;
1261
}
1262

    
1263
uint32_t HELPER(neon_unarrow_sat16)(CPUState *env, uint64_t x)
1264
{
1265
    uint32_t high;
1266
    uint32_t low;
1267
    low = x;
1268
    if (low & 0x80000000) {
1269
        low = 0;
1270
        SET_QC();
1271
    } else if (low > 0xffff) {
1272
        low = 0xffff;
1273
        SET_QC();
1274
    }
1275
    high = x >> 32;
1276
    if (high & 0x80000000) {
1277
        high = 0;
1278
        SET_QC();
1279
    } else if (high > 0xffff) {
1280
        high = 0xffff;
1281
        SET_QC();
1282
    }
1283
    return low | (high << 16);
1284
}
1285

    
1286
uint32_t HELPER(neon_narrow_sat_u16)(CPUState *env, uint64_t x)
1287
{
1288
    uint32_t high;
1289
    uint32_t low;
1290
    low = x;
1291
    if (low > 0xffff) {
1292
        low = 0xffff;
1293
        SET_QC();
1294
    }
1295
    high = x >> 32;
1296
    if (high > 0xffff) {
1297
        high = 0xffff;
1298
        SET_QC();
1299
    }
1300
    return low | (high << 16);
1301
}
1302

    
1303
uint32_t HELPER(neon_narrow_sat_s16)(CPUState *env, uint64_t x)
1304
{
1305
    int32_t low;
1306
    int32_t high;
1307
    low = x;
1308
    if (low != (int16_t)low) {
1309
        low = (low >> 31) ^ 0x7fff;
1310
        SET_QC();
1311
    }
1312
    high = x >> 32;
1313
    if (high != (int16_t)high) {
1314
        high = (high >> 31) ^ 0x7fff;
1315
        SET_QC();
1316
    }
1317
    return (uint16_t)low | (high << 16);
1318
}
1319

    
1320
uint32_t HELPER(neon_unarrow_sat32)(CPUState *env, uint64_t x)
1321
{
1322
    if (x & 0x8000000000000000ull) {
1323
        SET_QC();
1324
        return 0;
1325
    }
1326
    if (x > 0xffffffffu) {
1327
        SET_QC();
1328
        return 0xffffffffu;
1329
    }
1330
    return x;
1331
}
1332

    
1333
uint32_t HELPER(neon_narrow_sat_u32)(CPUState *env, uint64_t x)
1334
{
1335
    if (x > 0xffffffffu) {
1336
        SET_QC();
1337
        return 0xffffffffu;
1338
    }
1339
    return x;
1340
}
1341

    
1342
uint32_t HELPER(neon_narrow_sat_s32)(CPUState *env, uint64_t x)
1343
{
1344
    if ((int64_t)x != (int32_t)x) {
1345
        SET_QC();
1346
        return ((int64_t)x >> 63) ^ 0x7fffffff;
1347
    }
1348
    return x;
1349
}
1350

    
1351
uint64_t HELPER(neon_widen_u8)(uint32_t x)
1352
{
1353
    uint64_t tmp;
1354
    uint64_t ret;
1355
    ret = (uint8_t)x;
1356
    tmp = (uint8_t)(x >> 8);
1357
    ret |= tmp << 16;
1358
    tmp = (uint8_t)(x >> 16);
1359
    ret |= tmp << 32;
1360
    tmp = (uint8_t)(x >> 24);
1361
    ret |= tmp << 48;
1362
    return ret;
1363
}
1364

    
1365
uint64_t HELPER(neon_widen_s8)(uint32_t x)
1366
{
1367
    uint64_t tmp;
1368
    uint64_t ret;
1369
    ret = (uint16_t)(int8_t)x;
1370
    tmp = (uint16_t)(int8_t)(x >> 8);
1371
    ret |= tmp << 16;
1372
    tmp = (uint16_t)(int8_t)(x >> 16);
1373
    ret |= tmp << 32;
1374
    tmp = (uint16_t)(int8_t)(x >> 24);
1375
    ret |= tmp << 48;
1376
    return ret;
1377
}
1378

    
1379
uint64_t HELPER(neon_widen_u16)(uint32_t x)
1380
{
1381
    uint64_t high = (uint16_t)(x >> 16);
1382
    return ((uint16_t)x) | (high << 32);
1383
}
1384

    
1385
uint64_t HELPER(neon_widen_s16)(uint32_t x)
1386
{
1387
    uint64_t high = (int16_t)(x >> 16);
1388
    return ((uint32_t)(int16_t)x) | (high << 32);
1389
}
1390

    
1391
uint64_t HELPER(neon_addl_u16)(uint64_t a, uint64_t b)
1392
{
1393
    uint64_t mask;
1394
    mask = (a ^ b) & 0x8000800080008000ull;
1395
    a &= ~0x8000800080008000ull;
1396
    b &= ~0x8000800080008000ull;
1397
    return (a + b) ^ mask;
1398
}
1399

    
1400
uint64_t HELPER(neon_addl_u32)(uint64_t a, uint64_t b)
1401
{
1402
    uint64_t mask;
1403
    mask = (a ^ b) & 0x8000000080000000ull;
1404
    a &= ~0x8000000080000000ull;
1405
    b &= ~0x8000000080000000ull;
1406
    return (a + b) ^ mask;
1407
}
1408

    
1409
uint64_t HELPER(neon_paddl_u16)(uint64_t a, uint64_t b)
1410
{
1411
    uint64_t tmp;
1412
    uint64_t tmp2;
1413

    
1414
    tmp = a & 0x0000ffff0000ffffull;
1415
    tmp += (a >> 16) & 0x0000ffff0000ffffull;
1416
    tmp2 = b & 0xffff0000ffff0000ull;
1417
    tmp2 += (b << 16) & 0xffff0000ffff0000ull;
1418
    return    ( tmp         & 0xffff)
1419
            | ((tmp  >> 16) & 0xffff0000ull)
1420
            | ((tmp2 << 16) & 0xffff00000000ull)
1421
            | ( tmp2        & 0xffff000000000000ull);
1422
}
1423

    
1424
uint64_t HELPER(neon_paddl_u32)(uint64_t a, uint64_t b)
1425
{
1426
    uint32_t low = a + (a >> 32);
1427
    uint32_t high = b + (b >> 32);
1428
    return low + ((uint64_t)high << 32);
1429
}
1430

    
1431
uint64_t HELPER(neon_subl_u16)(uint64_t a, uint64_t b)
1432
{
1433
    uint64_t mask;
1434
    mask = (a ^ ~b) & 0x8000800080008000ull;
1435
    a |= 0x8000800080008000ull;
1436
    b &= ~0x8000800080008000ull;
1437
    return (a - b) ^ mask;
1438
}
1439

    
1440
uint64_t HELPER(neon_subl_u32)(uint64_t a, uint64_t b)
1441
{
1442
    uint64_t mask;
1443
    mask = (a ^ ~b) & 0x8000000080000000ull;
1444
    a |= 0x8000000080000000ull;
1445
    b &= ~0x8000000080000000ull;
1446
    return (a - b) ^ mask;
1447
}
1448

    
1449
uint64_t HELPER(neon_addl_saturate_s32)(CPUState *env, uint64_t a, uint64_t b)
1450
{
1451
    uint32_t x, y;
1452
    uint32_t low, high;
1453

    
1454
    x = a;
1455
    y = b;
1456
    low = x + y;
1457
    if (((low ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) {
1458
        SET_QC();
1459
        low = ((int32_t)x >> 31) ^ ~SIGNBIT;
1460
    }
1461
    x = a >> 32;
1462
    y = b >> 32;
1463
    high = x + y;
1464
    if (((high ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) {
1465
        SET_QC();
1466
        high = ((int32_t)x >> 31) ^ ~SIGNBIT;
1467
    }
1468
    return low | ((uint64_t)high << 32);
1469
}
1470

    
1471
uint64_t HELPER(neon_addl_saturate_s64)(CPUState *env, uint64_t a, uint64_t b)
1472
{
1473
    uint64_t result;
1474

    
1475
    result = a + b;
1476
    if (((result ^ a) & SIGNBIT64) && !((a ^ b) & SIGNBIT64)) {
1477
        SET_QC();
1478
        result = ((int64_t)a >> 63) ^ ~SIGNBIT64;
1479
    }
1480
    return result;
1481
}
1482

    
1483
#define DO_ABD(dest, x, y, type) do { \
1484
    type tmp_x = x; \
1485
    type tmp_y = y; \
1486
    dest = ((tmp_x > tmp_y) ? tmp_x - tmp_y : tmp_y - tmp_x); \
1487
    } while(0)
1488

    
1489
uint64_t HELPER(neon_abdl_u16)(uint32_t a, uint32_t b)
1490
{
1491
    uint64_t tmp;
1492
    uint64_t result;
1493
    DO_ABD(result, a, b, uint8_t);
1494
    DO_ABD(tmp, a >> 8, b >> 8, uint8_t);
1495
    result |= tmp << 16;
1496
    DO_ABD(tmp, a >> 16, b >> 16, uint8_t);
1497
    result |= tmp << 32;
1498
    DO_ABD(tmp, a >> 24, b >> 24, uint8_t);
1499
    result |= tmp << 48;
1500
    return result;
1501
}
1502

    
1503
uint64_t HELPER(neon_abdl_s16)(uint32_t a, uint32_t b)
1504
{
1505
    uint64_t tmp;
1506
    uint64_t result;
1507
    DO_ABD(result, a, b, int8_t);
1508
    DO_ABD(tmp, a >> 8, b >> 8, int8_t);
1509
    result |= tmp << 16;
1510
    DO_ABD(tmp, a >> 16, b >> 16, int8_t);
1511
    result |= tmp << 32;
1512
    DO_ABD(tmp, a >> 24, b >> 24, int8_t);
1513
    result |= tmp << 48;
1514
    return result;
1515
}
1516

    
1517
uint64_t HELPER(neon_abdl_u32)(uint32_t a, uint32_t b)
1518
{
1519
    uint64_t tmp;
1520
    uint64_t result;
1521
    DO_ABD(result, a, b, uint16_t);
1522
    DO_ABD(tmp, a >> 16, b >> 16, uint16_t);
1523
    return result | (tmp << 32);
1524
}
1525

    
1526
uint64_t HELPER(neon_abdl_s32)(uint32_t a, uint32_t b)
1527
{
1528
    uint64_t tmp;
1529
    uint64_t result;
1530
    DO_ABD(result, a, b, int16_t);
1531
    DO_ABD(tmp, a >> 16, b >> 16, int16_t);
1532
    return result | (tmp << 32);
1533
}
1534

    
1535
uint64_t HELPER(neon_abdl_u64)(uint32_t a, uint32_t b)
1536
{
1537
    uint64_t result;
1538
    DO_ABD(result, a, b, uint32_t);
1539
    return result;
1540
}
1541

    
1542
uint64_t HELPER(neon_abdl_s64)(uint32_t a, uint32_t b)
1543
{
1544
    uint64_t result;
1545
    DO_ABD(result, a, b, int32_t);
1546
    return result;
1547
}
1548
#undef DO_ABD
1549

    
1550
/* Widening multiply. Named type is the source type.  */
1551
#define DO_MULL(dest, x, y, type1, type2) do { \
1552
    type1 tmp_x = x; \
1553
    type1 tmp_y = y; \
1554
    dest = (type2)((type2)tmp_x * (type2)tmp_y); \
1555
    } while(0)
1556

    
1557
uint64_t HELPER(neon_mull_u8)(uint32_t a, uint32_t b)
1558
{
1559
    uint64_t tmp;
1560
    uint64_t result;
1561

    
1562
    DO_MULL(result, a, b, uint8_t, uint16_t);
1563
    DO_MULL(tmp, a >> 8, b >> 8, uint8_t, uint16_t);
1564
    result |= tmp << 16;
1565
    DO_MULL(tmp, a >> 16, b >> 16, uint8_t, uint16_t);
1566
    result |= tmp << 32;
1567
    DO_MULL(tmp, a >> 24, b >> 24, uint8_t, uint16_t);
1568
    result |= tmp << 48;
1569
    return result;
1570
}
1571

    
1572
uint64_t HELPER(neon_mull_s8)(uint32_t a, uint32_t b)
1573
{
1574
    uint64_t tmp;
1575
    uint64_t result;
1576

    
1577
    DO_MULL(result, a, b, int8_t, uint16_t);
1578
    DO_MULL(tmp, a >> 8, b >> 8, int8_t, uint16_t);
1579
    result |= tmp << 16;
1580
    DO_MULL(tmp, a >> 16, b >> 16, int8_t, uint16_t);
1581
    result |= tmp << 32;
1582
    DO_MULL(tmp, a >> 24, b >> 24, int8_t, uint16_t);
1583
    result |= tmp << 48;
1584
    return result;
1585
}
1586

    
1587
uint64_t HELPER(neon_mull_u16)(uint32_t a, uint32_t b)
1588
{
1589
    uint64_t tmp;
1590
    uint64_t result;
1591

    
1592
    DO_MULL(result, a, b, uint16_t, uint32_t);
1593
    DO_MULL(tmp, a >> 16, b >> 16, uint16_t, uint32_t);
1594
    return result | (tmp << 32);
1595
}
1596

    
1597
uint64_t HELPER(neon_mull_s16)(uint32_t a, uint32_t b)
1598
{
1599
    uint64_t tmp;
1600
    uint64_t result;
1601

    
1602
    DO_MULL(result, a, b, int16_t, uint32_t);
1603
    DO_MULL(tmp, a >> 16, b >> 16, int16_t, uint32_t);
1604
    return result | (tmp << 32);
1605
}
1606

    
1607
uint64_t HELPER(neon_negl_u16)(uint64_t x)
1608
{
1609
    uint16_t tmp;
1610
    uint64_t result;
1611
    result = (uint16_t)-x;
1612
    tmp = -(x >> 16);
1613
    result |= (uint64_t)tmp << 16;
1614
    tmp = -(x >> 32);
1615
    result |= (uint64_t)tmp << 32;
1616
    tmp = -(x >> 48);
1617
    result |= (uint64_t)tmp << 48;
1618
    return result;
1619
}
1620

    
1621
uint64_t HELPER(neon_negl_u32)(uint64_t x)
1622
{
1623
    uint32_t low = -x;
1624
    uint32_t high = -(x >> 32);
1625
    return low | ((uint64_t)high << 32);
1626
}
1627

    
1628
/* FIXME:  There should be a native op for this.  */
1629
uint64_t HELPER(neon_negl_u64)(uint64_t x)
1630
{
1631
    return -x;
1632
}
1633

    
1634
/* Saturnating sign manuipulation.  */
1635
/* ??? Make these use NEON_VOP1 */
1636
#define DO_QABS8(x) do { \
1637
    if (x == (int8_t)0x80) { \
1638
        x = 0x7f; \
1639
        SET_QC(); \
1640
    } else if (x < 0) { \
1641
        x = -x; \
1642
    }} while (0)
1643
uint32_t HELPER(neon_qabs_s8)(CPUState *env, uint32_t x)
1644
{
1645
    neon_s8 vec;
1646
    NEON_UNPACK(neon_s8, vec, x);
1647
    DO_QABS8(vec.v1);
1648
    DO_QABS8(vec.v2);
1649
    DO_QABS8(vec.v3);
1650
    DO_QABS8(vec.v4);
1651
    NEON_PACK(neon_s8, x, vec);
1652
    return x;
1653
}
1654
#undef DO_QABS8
1655

    
1656
#define DO_QNEG8(x) do { \
1657
    if (x == (int8_t)0x80) { \
1658
        x = 0x7f; \
1659
        SET_QC(); \
1660
    } else { \
1661
        x = -x; \
1662
    }} while (0)
1663
uint32_t HELPER(neon_qneg_s8)(CPUState *env, uint32_t x)
1664
{
1665
    neon_s8 vec;
1666
    NEON_UNPACK(neon_s8, vec, x);
1667
    DO_QNEG8(vec.v1);
1668
    DO_QNEG8(vec.v2);
1669
    DO_QNEG8(vec.v3);
1670
    DO_QNEG8(vec.v4);
1671
    NEON_PACK(neon_s8, x, vec);
1672
    return x;
1673
}
1674
#undef DO_QNEG8
1675

    
1676
#define DO_QABS16(x) do { \
1677
    if (x == (int16_t)0x8000) { \
1678
        x = 0x7fff; \
1679
        SET_QC(); \
1680
    } else if (x < 0) { \
1681
        x = -x; \
1682
    }} while (0)
1683
uint32_t HELPER(neon_qabs_s16)(CPUState *env, uint32_t x)
1684
{
1685
    neon_s16 vec;
1686
    NEON_UNPACK(neon_s16, vec, x);
1687
    DO_QABS16(vec.v1);
1688
    DO_QABS16(vec.v2);
1689
    NEON_PACK(neon_s16, x, vec);
1690
    return x;
1691
}
1692
#undef DO_QABS16
1693

    
1694
#define DO_QNEG16(x) do { \
1695
    if (x == (int16_t)0x8000) { \
1696
        x = 0x7fff; \
1697
        SET_QC(); \
1698
    } else { \
1699
        x = -x; \
1700
    }} while (0)
1701
uint32_t HELPER(neon_qneg_s16)(CPUState *env, uint32_t x)
1702
{
1703
    neon_s16 vec;
1704
    NEON_UNPACK(neon_s16, vec, x);
1705
    DO_QNEG16(vec.v1);
1706
    DO_QNEG16(vec.v2);
1707
    NEON_PACK(neon_s16, x, vec);
1708
    return x;
1709
}
1710
#undef DO_QNEG16
1711

    
1712
uint32_t HELPER(neon_qabs_s32)(CPUState *env, uint32_t x)
1713
{
1714
    if (x == SIGNBIT) {
1715
        SET_QC();
1716
        x = ~SIGNBIT;
1717
    } else if ((int32_t)x < 0) {
1718
        x = -x;
1719
    }
1720
    return x;
1721
}
1722

    
1723
uint32_t HELPER(neon_qneg_s32)(CPUState *env, uint32_t x)
1724
{
1725
    if (x == SIGNBIT) {
1726
        SET_QC();
1727
        x = ~SIGNBIT;
1728
    } else {
1729
        x = -x;
1730
    }
1731
    return x;
1732
}
1733

    
1734
/* NEON Float helpers.  */
1735
uint32_t HELPER(neon_min_f32)(uint32_t a, uint32_t b)
1736
{
1737
    float32 f0 = vfp_itos(a);
1738
    float32 f1 = vfp_itos(b);
1739
    return (float32_compare_quiet(f0, f1, NFS) == -1) ? a : b;
1740
}
1741

    
1742
uint32_t HELPER(neon_max_f32)(uint32_t a, uint32_t b)
1743
{
1744
    float32 f0 = vfp_itos(a);
1745
    float32 f1 = vfp_itos(b);
1746
    return (float32_compare_quiet(f0, f1, NFS) == 1) ? a : b;
1747
}
1748

    
1749
uint32_t HELPER(neon_abd_f32)(uint32_t a, uint32_t b)
1750
{
1751
    float32 f0 = vfp_itos(a);
1752
    float32 f1 = vfp_itos(b);
1753
    return vfp_stoi((float32_compare_quiet(f0, f1, NFS) == 1)
1754
                    ? float32_sub(f0, f1, NFS)
1755
                    : float32_sub(f1, f0, NFS));
1756
}
1757

    
1758
uint32_t HELPER(neon_add_f32)(uint32_t a, uint32_t b)
1759
{
1760
    return vfp_stoi(float32_add(vfp_itos(a), vfp_itos(b), NFS));
1761
}
1762

    
1763
uint32_t HELPER(neon_sub_f32)(uint32_t a, uint32_t b)
1764
{
1765
    return vfp_stoi(float32_sub(vfp_itos(a), vfp_itos(b), NFS));
1766
}
1767

    
1768
uint32_t HELPER(neon_mul_f32)(uint32_t a, uint32_t b)
1769
{
1770
    return vfp_stoi(float32_mul(vfp_itos(a), vfp_itos(b), NFS));
1771
}
1772

    
1773
/* Floating point comparisons produce an integer result.  */
1774
#define NEON_VOP_FCMP(name, cmp) \
1775
uint32_t HELPER(neon_##name)(uint32_t a, uint32_t b) \
1776
{ \
1777
    if (float32_compare_quiet(vfp_itos(a), vfp_itos(b), NFS) cmp 0) \
1778
        return ~0; \
1779
    else \
1780
        return 0; \
1781
}
1782

    
1783
NEON_VOP_FCMP(ceq_f32, ==)
1784
NEON_VOP_FCMP(cge_f32, >=)
1785
NEON_VOP_FCMP(cgt_f32, >)
1786

    
1787
uint32_t HELPER(neon_acge_f32)(uint32_t a, uint32_t b)
1788
{
1789
    float32 f0 = float32_abs(vfp_itos(a));
1790
    float32 f1 = float32_abs(vfp_itos(b));
1791
    return (float32_compare_quiet(f0, f1,NFS) >= 0) ? ~0 : 0;
1792
}
1793

    
1794
uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b)
1795
{
1796
    float32 f0 = float32_abs(vfp_itos(a));
1797
    float32 f1 = float32_abs(vfp_itos(b));
1798
    return (float32_compare_quiet(f0, f1, NFS) > 0) ? ~0 : 0;
1799
}
1800

    
1801
#define ELEM(V, N, SIZE) (((V) >> ((N) * (SIZE))) & ((1ull << (SIZE)) - 1))
1802

    
1803
void HELPER(neon_qunzip8)(CPUState *env, uint32_t rd, uint32_t rm)
1804
{
1805
    uint64_t zm0 = float64_val(env->vfp.regs[rm]);
1806
    uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
1807
    uint64_t zd0 = float64_val(env->vfp.regs[rd]);
1808
    uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
1809
    uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zd0, 2, 8) << 8)
1810
        | (ELEM(zd0, 4, 8) << 16) | (ELEM(zd0, 6, 8) << 24)
1811
        | (ELEM(zd1, 0, 8) << 32) | (ELEM(zd1, 2, 8) << 40)
1812
        | (ELEM(zd1, 4, 8) << 48) | (ELEM(zd1, 6, 8) << 56);
1813
    uint64_t d1 = ELEM(zm0, 0, 8) | (ELEM(zm0, 2, 8) << 8)
1814
        | (ELEM(zm0, 4, 8) << 16) | (ELEM(zm0, 6, 8) << 24)
1815
        | (ELEM(zm1, 0, 8) << 32) | (ELEM(zm1, 2, 8) << 40)
1816
        | (ELEM(zm1, 4, 8) << 48) | (ELEM(zm1, 6, 8) << 56);
1817
    uint64_t m0 = ELEM(zd0, 1, 8) | (ELEM(zd0, 3, 8) << 8)
1818
        | (ELEM(zd0, 5, 8) << 16) | (ELEM(zd0, 7, 8) << 24)
1819
        | (ELEM(zd1, 1, 8) << 32) | (ELEM(zd1, 3, 8) << 40)
1820
        | (ELEM(zd1, 5, 8) << 48) | (ELEM(zd1, 7, 8) << 56);
1821
    uint64_t m1 = ELEM(zm0, 1, 8) | (ELEM(zm0, 3, 8) << 8)
1822
        | (ELEM(zm0, 5, 8) << 16) | (ELEM(zm0, 7, 8) << 24)
1823
        | (ELEM(zm1, 1, 8) << 32) | (ELEM(zm1, 3, 8) << 40)
1824
        | (ELEM(zm1, 5, 8) << 48) | (ELEM(zm1, 7, 8) << 56);
1825
    env->vfp.regs[rm] = make_float64(m0);
1826
    env->vfp.regs[rm + 1] = make_float64(m1);
1827
    env->vfp.regs[rd] = make_float64(d0);
1828
    env->vfp.regs[rd + 1] = make_float64(d1);
1829
}
1830

    
1831
void HELPER(neon_qunzip16)(CPUState *env, uint32_t rd, uint32_t rm)
1832
{
1833
    uint64_t zm0 = float64_val(env->vfp.regs[rm]);
1834
    uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
1835
    uint64_t zd0 = float64_val(env->vfp.regs[rd]);
1836
    uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
1837
    uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zd0, 2, 16) << 16)
1838
        | (ELEM(zd1, 0, 16) << 32) | (ELEM(zd1, 2, 16) << 48);
1839
    uint64_t d1 = ELEM(zm0, 0, 16) | (ELEM(zm0, 2, 16) << 16)
1840
        | (ELEM(zm1, 0, 16) << 32) | (ELEM(zm1, 2, 16) << 48);
1841
    uint64_t m0 = ELEM(zd0, 1, 16) | (ELEM(zd0, 3, 16) << 16)
1842
        | (ELEM(zd1, 1, 16) << 32) | (ELEM(zd1, 3, 16) << 48);
1843
    uint64_t m1 = ELEM(zm0, 1, 16) | (ELEM(zm0, 3, 16) << 16)
1844
        | (ELEM(zm1, 1, 16) << 32) | (ELEM(zm1, 3, 16) << 48);
1845
    env->vfp.regs[rm] = make_float64(m0);
1846
    env->vfp.regs[rm + 1] = make_float64(m1);
1847
    env->vfp.regs[rd] = make_float64(d0);
1848
    env->vfp.regs[rd + 1] = make_float64(d1);
1849
}
1850

    
1851
void HELPER(neon_qunzip32)(CPUState *env, uint32_t rd, uint32_t rm)
1852
{
1853
    uint64_t zm0 = float64_val(env->vfp.regs[rm]);
1854
    uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
1855
    uint64_t zd0 = float64_val(env->vfp.regs[rd]);
1856
    uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
1857
    uint64_t d0 = ELEM(zd0, 0, 32) | (ELEM(zd1, 0, 32) << 32);
1858
    uint64_t d1 = ELEM(zm0, 0, 32) | (ELEM(zm1, 0, 32) << 32);
1859
    uint64_t m0 = ELEM(zd0, 1, 32) | (ELEM(zd1, 1, 32) << 32);
1860
    uint64_t m1 = ELEM(zm0, 1, 32) | (ELEM(zm1, 1, 32) << 32);
1861
    env->vfp.regs[rm] = make_float64(m0);
1862
    env->vfp.regs[rm + 1] = make_float64(m1);
1863
    env->vfp.regs[rd] = make_float64(d0);
1864
    env->vfp.regs[rd + 1] = make_float64(d1);
1865
}
1866

    
1867
void HELPER(neon_unzip8)(CPUState *env, uint32_t rd, uint32_t rm)
1868
{
1869
    uint64_t zm = float64_val(env->vfp.regs[rm]);
1870
    uint64_t zd = float64_val(env->vfp.regs[rd]);
1871
    uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zd, 2, 8) << 8)
1872
        | (ELEM(zd, 4, 8) << 16) | (ELEM(zd, 6, 8) << 24)
1873
        | (ELEM(zm, 0, 8) << 32) | (ELEM(zm, 2, 8) << 40)
1874
        | (ELEM(zm, 4, 8) << 48) | (ELEM(zm, 6, 8) << 56);
1875
    uint64_t m0 = ELEM(zd, 1, 8) | (ELEM(zd, 3, 8) << 8)
1876
        | (ELEM(zd, 5, 8) << 16) | (ELEM(zd, 7, 8) << 24)
1877
        | (ELEM(zm, 1, 8) << 32) | (ELEM(zm, 3, 8) << 40)
1878
        | (ELEM(zm, 5, 8) << 48) | (ELEM(zm, 7, 8) << 56);
1879
    env->vfp.regs[rm] = make_float64(m0);
1880
    env->vfp.regs[rd] = make_float64(d0);
1881
}
1882

    
1883
void HELPER(neon_unzip16)(CPUState *env, uint32_t rd, uint32_t rm)
1884
{
1885
    uint64_t zm = float64_val(env->vfp.regs[rm]);
1886
    uint64_t zd = float64_val(env->vfp.regs[rd]);
1887
    uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zd, 2, 16) << 16)
1888
        | (ELEM(zm, 0, 16) << 32) | (ELEM(zm, 2, 16) << 48);
1889
    uint64_t m0 = ELEM(zd, 1, 16) | (ELEM(zd, 3, 16) << 16)
1890
        | (ELEM(zm, 1, 16) << 32) | (ELEM(zm, 3, 16) << 48);
1891
    env->vfp.regs[rm] = make_float64(m0);
1892
    env->vfp.regs[rd] = make_float64(d0);
1893
}
1894

    
1895
void HELPER(neon_qzip8)(CPUState *env, uint32_t rd, uint32_t rm)
1896
{
1897
    uint64_t zm0 = float64_val(env->vfp.regs[rm]);
1898
    uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
1899
    uint64_t zd0 = float64_val(env->vfp.regs[rd]);
1900
    uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
1901
    uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zm0, 0, 8) << 8)
1902
        | (ELEM(zd0, 1, 8) << 16) | (ELEM(zm0, 1, 8) << 24)
1903
        | (ELEM(zd0, 2, 8) << 32) | (ELEM(zm0, 2, 8) << 40)
1904
        | (ELEM(zd0, 3, 8) << 48) | (ELEM(zm0, 3, 8) << 56);
1905
    uint64_t d1 = ELEM(zd0, 4, 8) | (ELEM(zm0, 4, 8) << 8)
1906
        | (ELEM(zd0, 5, 8) << 16) | (ELEM(zm0, 5, 8) << 24)
1907
        | (ELEM(zd0, 6, 8) << 32) | (ELEM(zm0, 6, 8) << 40)
1908
        | (ELEM(zd0, 7, 8) << 48) | (ELEM(zm0, 7, 8) << 56);
1909
    uint64_t m0 = ELEM(zd1, 0, 8) | (ELEM(zm1, 0, 8) << 8)
1910
        | (ELEM(zd1, 1, 8) << 16) | (ELEM(zm1, 1, 8) << 24)
1911
        | (ELEM(zd1, 2, 8) << 32) | (ELEM(zm1, 2, 8) << 40)
1912
        | (ELEM(zd1, 3, 8) << 48) | (ELEM(zm1, 3, 8) << 56);
1913
    uint64_t m1 = ELEM(zd1, 4, 8) | (ELEM(zm1, 4, 8) << 8)
1914
        | (ELEM(zd1, 5, 8) << 16) | (ELEM(zm1, 5, 8) << 24)
1915
        | (ELEM(zd1, 6, 8) << 32) | (ELEM(zm1, 6, 8) << 40)
1916
        | (ELEM(zd1, 7, 8) << 48) | (ELEM(zm1, 7, 8) << 56);
1917
    env->vfp.regs[rm] = make_float64(m0);
1918
    env->vfp.regs[rm + 1] = make_float64(m1);
1919
    env->vfp.regs[rd] = make_float64(d0);
1920
    env->vfp.regs[rd + 1] = make_float64(d1);
1921
}
1922

    
1923
void HELPER(neon_qzip16)(CPUState *env, uint32_t rd, uint32_t rm)
1924
{
1925
    uint64_t zm0 = float64_val(env->vfp.regs[rm]);
1926
    uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
1927
    uint64_t zd0 = float64_val(env->vfp.regs[rd]);
1928
    uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
1929
    uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zm0, 0, 16) << 16)
1930
        | (ELEM(zd0, 1, 16) << 32) | (ELEM(zm0, 1, 16) << 48);
1931
    uint64_t d1 = ELEM(zd0, 2, 16) | (ELEM(zm0, 2, 16) << 16)
1932
        | (ELEM(zd0, 3, 16) << 32) | (ELEM(zm0, 3, 16) << 48);
1933
    uint64_t m0 = ELEM(zd1, 0, 16) | (ELEM(zm1, 0, 16) << 16)
1934
        | (ELEM(zd1, 1, 16) << 32) | (ELEM(zm1, 1, 16) << 48);
1935
    uint64_t m1 = ELEM(zd1, 2, 16) | (ELEM(zm1, 2, 16) << 16)
1936
        | (ELEM(zd1, 3, 16) << 32) | (ELEM(zm1, 3, 16) << 48);
1937
    env->vfp.regs[rm] = make_float64(m0);
1938
    env->vfp.regs[rm + 1] = make_float64(m1);
1939
    env->vfp.regs[rd] = make_float64(d0);
1940
    env->vfp.regs[rd + 1] = make_float64(d1);
1941
}
1942

    
1943
void HELPER(neon_qzip32)(CPUState *env, uint32_t rd, uint32_t rm)
1944
{
1945
    uint64_t zm0 = float64_val(env->vfp.regs[rm]);
1946
    uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
1947
    uint64_t zd0 = float64_val(env->vfp.regs[rd]);
1948
    uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
1949
    uint64_t d0 = ELEM(zd0, 0, 32) | (ELEM(zm0, 0, 32) << 32);
1950
    uint64_t d1 = ELEM(zd0, 1, 32) | (ELEM(zm0, 1, 32) << 32);
1951
    uint64_t m0 = ELEM(zd1, 0, 32) | (ELEM(zm1, 0, 32) << 32);
1952
    uint64_t m1 = ELEM(zd1, 1, 32) | (ELEM(zm1, 1, 32) << 32);
1953
    env->vfp.regs[rm] = make_float64(m0);
1954
    env->vfp.regs[rm + 1] = make_float64(m1);
1955
    env->vfp.regs[rd] = make_float64(d0);
1956
    env->vfp.regs[rd + 1] = make_float64(d1);
1957
}
1958

    
1959
void HELPER(neon_zip8)(CPUState *env, uint32_t rd, uint32_t rm)
1960
{
1961
    uint64_t zm = float64_val(env->vfp.regs[rm]);
1962
    uint64_t zd = float64_val(env->vfp.regs[rd]);
1963
    uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zm, 0, 8) << 8)
1964
        | (ELEM(zd, 1, 8) << 16) | (ELEM(zm, 1, 8) << 24)
1965
        | (ELEM(zd, 2, 8) << 32) | (ELEM(zm, 2, 8) << 40)
1966
        | (ELEM(zd, 3, 8) << 48) | (ELEM(zm, 3, 8) << 56);
1967
    uint64_t m0 = ELEM(zd, 4, 8) | (ELEM(zm, 4, 8) << 8)
1968
        | (ELEM(zd, 5, 8) << 16) | (ELEM(zm, 5, 8) << 24)
1969
        | (ELEM(zd, 6, 8) << 32) | (ELEM(zm, 6, 8) << 40)
1970
        | (ELEM(zd, 7, 8) << 48) | (ELEM(zm, 7, 8) << 56);
1971
    env->vfp.regs[rm] = make_float64(m0);
1972
    env->vfp.regs[rd] = make_float64(d0);
1973
}
1974

    
1975
void HELPER(neon_zip16)(CPUState *env, uint32_t rd, uint32_t rm)
1976
{
1977
    uint64_t zm = float64_val(env->vfp.regs[rm]);
1978
    uint64_t zd = float64_val(env->vfp.regs[rd]);
1979
    uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zm, 0, 16) << 16)
1980
        | (ELEM(zd, 1, 16) << 32) | (ELEM(zm, 1, 16) << 48);
1981
    uint64_t m0 = ELEM(zd, 2, 16) | (ELEM(zm, 2, 16) << 16)
1982
        | (ELEM(zd, 3, 16) << 32) | (ELEM(zm, 3, 16) << 48);
1983
    env->vfp.regs[rm] = make_float64(m0);
1984
    env->vfp.regs[rd] = make_float64(d0);
1985
}