Statistics
| Branch: | Revision:

root / target-arm / helper.c @ bbc5c5fa

History | View | Annotate | Download (90.6 kB)

1
#include <stdio.h>
2
#include <stdlib.h>
3
#include <string.h>
4

    
5
#include "cpu.h"
6
#include "gdbstub.h"
7
#include "helper.h"
8
#include "qemu-common.h"
9
#include "host-utils.h"
10
#if !defined(CONFIG_USER_ONLY)
11
#include "hw/loader.h"
12
#endif
13

    
14
static uint32_t cortexa9_cp15_c0_c1[8] =
15
{ 0x1031, 0x11, 0x000, 0, 0x00100103, 0x20000000, 0x01230000, 0x00002111 };
16

    
17
static uint32_t cortexa9_cp15_c0_c2[8] =
18
{ 0x00101111, 0x13112111, 0x21232041, 0x11112131, 0x00111142, 0, 0, 0 };
19

    
20
static uint32_t cortexa8_cp15_c0_c1[8] =
21
{ 0x1031, 0x11, 0x400, 0, 0x31100003, 0x20000000, 0x01202000, 0x11 };
22

    
23
static uint32_t cortexa8_cp15_c0_c2[8] =
24
{ 0x00101111, 0x12112111, 0x21232031, 0x11112131, 0x00111142, 0, 0, 0 };
25

    
26
static uint32_t mpcore_cp15_c0_c1[8] =
27
{ 0x111, 0x1, 0, 0x2, 0x01100103, 0x10020302, 0x01222000, 0 };
28

    
29
static uint32_t mpcore_cp15_c0_c2[8] =
30
{ 0x00100011, 0x12002111, 0x11221011, 0x01102131, 0x141, 0, 0, 0 };
31

    
32
static uint32_t arm1136_cp15_c0_c1[8] =
33
{ 0x111, 0x1, 0x2, 0x3, 0x01130003, 0x10030302, 0x01222110, 0 };
34

    
35
static uint32_t arm1136_cp15_c0_c2[8] =
36
{ 0x00140011, 0x12002111, 0x11231111, 0x01102131, 0x141, 0, 0, 0 };
37

    
38
static uint32_t arm1176_cp15_c0_c1[8] =
39
{ 0x111, 0x11, 0x33, 0, 0x01130003, 0x10030302, 0x01222100, 0 };
40

    
41
static uint32_t arm1176_cp15_c0_c2[8] =
42
{ 0x0140011, 0x12002111, 0x11231121, 0x01102131, 0x01141, 0, 0, 0 };
43

    
44
static uint32_t cpu_arm_find_by_name(const char *name);
45

    
46
static inline void set_feature(CPUARMState *env, int feature)
47
{
48
    env->features |= 1u << feature;
49
}
50

    
51
static void cpu_reset_model_id(CPUARMState *env, uint32_t id)
52
{
53
    env->cp15.c0_cpuid = id;
54
    switch (id) {
55
    case ARM_CPUID_ARM926:
56
        set_feature(env, ARM_FEATURE_V5);
57
        set_feature(env, ARM_FEATURE_VFP);
58
        env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090;
59
        env->cp15.c0_cachetype = 0x1dd20d2;
60
        env->cp15.c1_sys = 0x00090078;
61
        break;
62
    case ARM_CPUID_ARM946:
63
        set_feature(env, ARM_FEATURE_V5);
64
        set_feature(env, ARM_FEATURE_MPU);
65
        env->cp15.c0_cachetype = 0x0f004006;
66
        env->cp15.c1_sys = 0x00000078;
67
        break;
68
    case ARM_CPUID_ARM1026:
69
        set_feature(env, ARM_FEATURE_V5);
70
        set_feature(env, ARM_FEATURE_VFP);
71
        set_feature(env, ARM_FEATURE_AUXCR);
72
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0;
73
        env->cp15.c0_cachetype = 0x1dd20d2;
74
        env->cp15.c1_sys = 0x00090078;
75
        break;
76
    case ARM_CPUID_ARM1136:
77
        /* This is the 1136 r1, which is a v6K core */
78
        set_feature(env, ARM_FEATURE_V6K);
79
        /* Fall through */
80
    case ARM_CPUID_ARM1136_R2:
81
        /* What qemu calls "arm1136_r2" is actually the 1136 r0p2, ie an
82
         * older core than plain "arm1136". In particular this does not
83
         * have the v6K features.
84
         */
85
        set_feature(env, ARM_FEATURE_V6);
86
        set_feature(env, ARM_FEATURE_VFP);
87
        set_feature(env, ARM_FEATURE_AUXCR);
88
        /* These ID register values are correct for 1136 but may be wrong
89
         * for 1136_r2 (in particular r0p2 does not actually implement most
90
         * of the ID registers).
91
         */
92
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
93
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
94
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
95
        memcpy(env->cp15.c0_c1, arm1136_cp15_c0_c1, 8 * sizeof(uint32_t));
96
        memcpy(env->cp15.c0_c2, arm1136_cp15_c0_c2, 8 * sizeof(uint32_t));
97
        env->cp15.c0_cachetype = 0x1dd20d2;
98
        env->cp15.c1_sys = 0x00050078;
99
        break;
100
    case ARM_CPUID_ARM1176:
101
        set_feature(env, ARM_FEATURE_V6K);
102
        set_feature(env, ARM_FEATURE_VFP);
103
        set_feature(env, ARM_FEATURE_AUXCR);
104
        set_feature(env, ARM_FEATURE_VAPA);
105
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b5;
106
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
107
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
108
        memcpy(env->cp15.c0_c1, arm1176_cp15_c0_c1, 8 * sizeof(uint32_t));
109
        memcpy(env->cp15.c0_c2, arm1176_cp15_c0_c2, 8 * sizeof(uint32_t));
110
        env->cp15.c0_cachetype = 0x1dd20d2;
111
        env->cp15.c1_sys = 0x00050078;
112
        break;
113
    case ARM_CPUID_ARM11MPCORE:
114
        set_feature(env, ARM_FEATURE_V6K);
115
        set_feature(env, ARM_FEATURE_VFP);
116
        set_feature(env, ARM_FEATURE_AUXCR);
117
        set_feature(env, ARM_FEATURE_VAPA);
118
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
119
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
120
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
121
        memcpy(env->cp15.c0_c1, mpcore_cp15_c0_c1, 8 * sizeof(uint32_t));
122
        memcpy(env->cp15.c0_c2, mpcore_cp15_c0_c2, 8 * sizeof(uint32_t));
123
        env->cp15.c0_cachetype = 0x1dd20d2;
124
        break;
125
    case ARM_CPUID_CORTEXA8:
126
        set_feature(env, ARM_FEATURE_V7);
127
        set_feature(env, ARM_FEATURE_AUXCR);
128
        set_feature(env, ARM_FEATURE_THUMB2);
129
        set_feature(env, ARM_FEATURE_VFP);
130
        set_feature(env, ARM_FEATURE_VFP3);
131
        set_feature(env, ARM_FEATURE_NEON);
132
        set_feature(env, ARM_FEATURE_THUMB2EE);
133
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410330c0;
134
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
135
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00011100;
136
        memcpy(env->cp15.c0_c1, cortexa8_cp15_c0_c1, 8 * sizeof(uint32_t));
137
        memcpy(env->cp15.c0_c2, cortexa8_cp15_c0_c2, 8 * sizeof(uint32_t));
138
        env->cp15.c0_cachetype = 0x82048004;
139
        env->cp15.c0_clid = (1 << 27) | (2 << 24) | 3;
140
        env->cp15.c0_ccsid[0] = 0xe007e01a; /* 16k L1 dcache. */
141
        env->cp15.c0_ccsid[1] = 0x2007e01a; /* 16k L1 icache. */
142
        env->cp15.c0_ccsid[2] = 0xf0000000; /* No L2 icache. */
143
        env->cp15.c1_sys = 0x00c50078;
144
        break;
145
    case ARM_CPUID_CORTEXA9:
146
        set_feature(env, ARM_FEATURE_V7);
147
        set_feature(env, ARM_FEATURE_AUXCR);
148
        set_feature(env, ARM_FEATURE_THUMB2);
149
        set_feature(env, ARM_FEATURE_VFP);
150
        set_feature(env, ARM_FEATURE_VFP3);
151
        set_feature(env, ARM_FEATURE_VFP_FP16);
152
        set_feature(env, ARM_FEATURE_NEON);
153
        set_feature(env, ARM_FEATURE_THUMB2EE);
154
        /* Note that A9 supports the MP extensions even for
155
         * A9UP and single-core A9MP (which are both different
156
         * and valid configurations; we don't model A9UP).
157
         */
158
        set_feature(env, ARM_FEATURE_V7MP);
159
        env->vfp.xregs[ARM_VFP_FPSID] = 0x41034000; /* Guess */
160
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
161
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x01111111;
162
        memcpy(env->cp15.c0_c1, cortexa9_cp15_c0_c1, 8 * sizeof(uint32_t));
163
        memcpy(env->cp15.c0_c2, cortexa9_cp15_c0_c2, 8 * sizeof(uint32_t));
164
        env->cp15.c0_cachetype = 0x80038003;
165
        env->cp15.c0_clid = (1 << 27) | (1 << 24) | 3;
166
        env->cp15.c0_ccsid[0] = 0xe00fe015; /* 16k L1 dcache. */
167
        env->cp15.c0_ccsid[1] = 0x200fe015; /* 16k L1 icache. */
168
        env->cp15.c1_sys = 0x00c50078;
169
        break;
170
    case ARM_CPUID_CORTEXM3:
171
        set_feature(env, ARM_FEATURE_THUMB2);
172
        set_feature(env, ARM_FEATURE_V7);
173
        set_feature(env, ARM_FEATURE_M);
174
        set_feature(env, ARM_FEATURE_THUMB_DIV);
175
        break;
176
    case ARM_CPUID_ANY: /* For userspace emulation.  */
177
        set_feature(env, ARM_FEATURE_V7);
178
        set_feature(env, ARM_FEATURE_THUMB2);
179
        set_feature(env, ARM_FEATURE_VFP);
180
        set_feature(env, ARM_FEATURE_VFP3);
181
        set_feature(env, ARM_FEATURE_VFP4);
182
        set_feature(env, ARM_FEATURE_VFP_FP16);
183
        set_feature(env, ARM_FEATURE_NEON);
184
        set_feature(env, ARM_FEATURE_THUMB2EE);
185
        set_feature(env, ARM_FEATURE_ARM_DIV);
186
        set_feature(env, ARM_FEATURE_V7MP);
187
        break;
188
    case ARM_CPUID_TI915T:
189
    case ARM_CPUID_TI925T:
190
        set_feature(env, ARM_FEATURE_V4T);
191
        set_feature(env, ARM_FEATURE_OMAPCP);
192
        env->cp15.c0_cpuid = ARM_CPUID_TI925T; /* Depends on wiring.  */
193
        env->cp15.c0_cachetype = 0x5109149;
194
        env->cp15.c1_sys = 0x00000070;
195
        env->cp15.c15_i_max = 0x000;
196
        env->cp15.c15_i_min = 0xff0;
197
        break;
198
    case ARM_CPUID_PXA250:
199
    case ARM_CPUID_PXA255:
200
    case ARM_CPUID_PXA260:
201
    case ARM_CPUID_PXA261:
202
    case ARM_CPUID_PXA262:
203
        set_feature(env, ARM_FEATURE_V5);
204
        set_feature(env, ARM_FEATURE_XSCALE);
205
        /* JTAG_ID is ((id << 28) | 0x09265013) */
206
        env->cp15.c0_cachetype = 0xd172172;
207
        env->cp15.c1_sys = 0x00000078;
208
        break;
209
    case ARM_CPUID_PXA270_A0:
210
    case ARM_CPUID_PXA270_A1:
211
    case ARM_CPUID_PXA270_B0:
212
    case ARM_CPUID_PXA270_B1:
213
    case ARM_CPUID_PXA270_C0:
214
    case ARM_CPUID_PXA270_C5:
215
        set_feature(env, ARM_FEATURE_V5);
216
        set_feature(env, ARM_FEATURE_XSCALE);
217
        /* JTAG_ID is ((id << 28) | 0x09265013) */
218
        set_feature(env, ARM_FEATURE_IWMMXT);
219
        env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
220
        env->cp15.c0_cachetype = 0xd172172;
221
        env->cp15.c1_sys = 0x00000078;
222
        break;
223
    case ARM_CPUID_SA1100:
224
    case ARM_CPUID_SA1110:
225
        set_feature(env, ARM_FEATURE_STRONGARM);
226
        env->cp15.c1_sys = 0x00000070;
227
        break;
228
    default:
229
        cpu_abort(env, "Bad CPU ID: %x\n", id);
230
        break;
231
    }
232

    
233
    /* Some features automatically imply others: */
234
    if (arm_feature(env, ARM_FEATURE_V7)) {
235
        set_feature(env, ARM_FEATURE_VAPA);
236
        if (!arm_feature(env, ARM_FEATURE_M)) {
237
            set_feature(env, ARM_FEATURE_V6K);
238
        } else {
239
            set_feature(env, ARM_FEATURE_V6);
240
        }
241
    }
242
    if (arm_feature(env, ARM_FEATURE_V6K)) {
243
        set_feature(env, ARM_FEATURE_V6);
244
    }
245
    if (arm_feature(env, ARM_FEATURE_V6)) {
246
        set_feature(env, ARM_FEATURE_V5);
247
    }
248
    if (arm_feature(env, ARM_FEATURE_V5)) {
249
        set_feature(env, ARM_FEATURE_V4T);
250
    }
251
    if (arm_feature(env, ARM_FEATURE_ARM_DIV)) {
252
        set_feature(env, ARM_FEATURE_THUMB_DIV);
253
    }
254
}
255

    
256
void cpu_reset(CPUARMState *env)
257
{
258
    uint32_t id;
259

    
260
    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
261
        qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
262
        log_cpu_state(env, 0);
263
    }
264

    
265
    id = env->cp15.c0_cpuid;
266
    memset(env, 0, offsetof(CPUARMState, breakpoints));
267
    if (id)
268
        cpu_reset_model_id(env, id);
269
#if defined (CONFIG_USER_ONLY)
270
    env->uncached_cpsr = ARM_CPU_MODE_USR;
271
    /* For user mode we must enable access to coprocessors */
272
    env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
273
    if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
274
        env->cp15.c15_cpar = 3;
275
    } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
276
        env->cp15.c15_cpar = 1;
277
    }
278
#else
279
    /* SVC mode with interrupts disabled.  */
280
    env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
281
    /* On ARMv7-M the CPSR_I is the value of the PRIMASK register, and is
282
       clear at reset.  Initial SP and PC are loaded from ROM.  */
283
    if (IS_M(env)) {
284
        uint32_t pc;
285
        uint8_t *rom;
286
        env->uncached_cpsr &= ~CPSR_I;
287
        rom = rom_ptr(0);
288
        if (rom) {
289
            /* We should really use ldl_phys here, in case the guest
290
               modified flash and reset itself.  However images
291
               loaded via -kernel have not been copied yet, so load the
292
               values directly from there.  */
293
            env->regs[13] = ldl_p(rom);
294
            pc = ldl_p(rom + 4);
295
            env->thumb = pc & 1;
296
            env->regs[15] = pc & ~1;
297
        }
298
    }
299
    env->vfp.xregs[ARM_VFP_FPEXC] = 0;
300
    env->cp15.c2_base_mask = 0xffffc000u;
301
    /* v7 performance monitor control register: same implementor
302
     * field as main ID register, and we implement no event counters.
303
     */
304
    env->cp15.c9_pmcr = (id & 0xff000000);
305
#endif
306
    set_flush_to_zero(1, &env->vfp.standard_fp_status);
307
    set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status);
308
    set_default_nan_mode(1, &env->vfp.standard_fp_status);
309
    set_float_detect_tininess(float_tininess_before_rounding,
310
                              &env->vfp.fp_status);
311
    set_float_detect_tininess(float_tininess_before_rounding,
312
                              &env->vfp.standard_fp_status);
313
    tlb_flush(env, 1);
314
}
315

    
316
static int vfp_gdb_get_reg(CPUState *env, uint8_t *buf, int reg)
317
{
318
    int nregs;
319

    
320
    /* VFP data registers are always little-endian.  */
321
    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
322
    if (reg < nregs) {
323
        stfq_le_p(buf, env->vfp.regs[reg]);
324
        return 8;
325
    }
326
    if (arm_feature(env, ARM_FEATURE_NEON)) {
327
        /* Aliases for Q regs.  */
328
        nregs += 16;
329
        if (reg < nregs) {
330
            stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
331
            stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
332
            return 16;
333
        }
334
    }
335
    switch (reg - nregs) {
336
    case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
337
    case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
338
    case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
339
    }
340
    return 0;
341
}
342

    
343
static int vfp_gdb_set_reg(CPUState *env, uint8_t *buf, int reg)
344
{
345
    int nregs;
346

    
347
    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
348
    if (reg < nregs) {
349
        env->vfp.regs[reg] = ldfq_le_p(buf);
350
        return 8;
351
    }
352
    if (arm_feature(env, ARM_FEATURE_NEON)) {
353
        nregs += 16;
354
        if (reg < nregs) {
355
            env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
356
            env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
357
            return 16;
358
        }
359
    }
360
    switch (reg - nregs) {
361
    case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
362
    case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
363
    case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
364
    }
365
    return 0;
366
}
367

    
368
CPUARMState *cpu_arm_init(const char *cpu_model)
369
{
370
    CPUARMState *env;
371
    uint32_t id;
372
    static int inited = 0;
373

    
374
    id = cpu_arm_find_by_name(cpu_model);
375
    if (id == 0)
376
        return NULL;
377
    env = g_malloc0(sizeof(CPUARMState));
378
    cpu_exec_init(env);
379
    if (tcg_enabled() && !inited) {
380
        inited = 1;
381
        arm_translate_init();
382
    }
383

    
384
    env->cpu_model_str = cpu_model;
385
    env->cp15.c0_cpuid = id;
386
    cpu_reset(env);
387
    if (arm_feature(env, ARM_FEATURE_NEON)) {
388
        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
389
                                 51, "arm-neon.xml", 0);
390
    } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
391
        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
392
                                 35, "arm-vfp3.xml", 0);
393
    } else if (arm_feature(env, ARM_FEATURE_VFP)) {
394
        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
395
                                 19, "arm-vfp.xml", 0);
396
    }
397
    qemu_init_vcpu(env);
398
    return env;
399
}
400

    
401
struct arm_cpu_t {
402
    uint32_t id;
403
    const char *name;
404
};
405

    
406
static const struct arm_cpu_t arm_cpu_names[] = {
407
    { ARM_CPUID_ARM926, "arm926"},
408
    { ARM_CPUID_ARM946, "arm946"},
409
    { ARM_CPUID_ARM1026, "arm1026"},
410
    { ARM_CPUID_ARM1136, "arm1136"},
411
    { ARM_CPUID_ARM1136_R2, "arm1136-r2"},
412
    { ARM_CPUID_ARM1176, "arm1176"},
413
    { ARM_CPUID_ARM11MPCORE, "arm11mpcore"},
414
    { ARM_CPUID_CORTEXM3, "cortex-m3"},
415
    { ARM_CPUID_CORTEXA8, "cortex-a8"},
416
    { ARM_CPUID_CORTEXA9, "cortex-a9"},
417
    { ARM_CPUID_TI925T, "ti925t" },
418
    { ARM_CPUID_PXA250, "pxa250" },
419
    { ARM_CPUID_SA1100,    "sa1100" },
420
    { ARM_CPUID_SA1110,    "sa1110" },
421
    { ARM_CPUID_PXA255, "pxa255" },
422
    { ARM_CPUID_PXA260, "pxa260" },
423
    { ARM_CPUID_PXA261, "pxa261" },
424
    { ARM_CPUID_PXA262, "pxa262" },
425
    { ARM_CPUID_PXA270, "pxa270" },
426
    { ARM_CPUID_PXA270_A0, "pxa270-a0" },
427
    { ARM_CPUID_PXA270_A1, "pxa270-a1" },
428
    { ARM_CPUID_PXA270_B0, "pxa270-b0" },
429
    { ARM_CPUID_PXA270_B1, "pxa270-b1" },
430
    { ARM_CPUID_PXA270_C0, "pxa270-c0" },
431
    { ARM_CPUID_PXA270_C5, "pxa270-c5" },
432
    { ARM_CPUID_ANY, "any"},
433
    { 0, NULL}
434
};
435

    
436
void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
437
{
438
    int i;
439

    
440
    (*cpu_fprintf)(f, "Available CPUs:\n");
441
    for (i = 0; arm_cpu_names[i].name; i++) {
442
        (*cpu_fprintf)(f, "  %s\n", arm_cpu_names[i].name);
443
    }
444
}
445

    
446
/* return 0 if not found */
447
static uint32_t cpu_arm_find_by_name(const char *name)
448
{
449
    int i;
450
    uint32_t id;
451

    
452
    id = 0;
453
    for (i = 0; arm_cpu_names[i].name; i++) {
454
        if (strcmp(name, arm_cpu_names[i].name) == 0) {
455
            id = arm_cpu_names[i].id;
456
            break;
457
        }
458
    }
459
    return id;
460
}
461

    
462
void cpu_arm_close(CPUARMState *env)
463
{
464
    g_free(env);
465
}
466

    
467
uint32_t cpsr_read(CPUARMState *env)
468
{
469
    int ZF;
470
    ZF = (env->ZF == 0);
471
    return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
472
        (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
473
        | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
474
        | ((env->condexec_bits & 0xfc) << 8)
475
        | (env->GE << 16);
476
}
477

    
478
void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
479
{
480
    if (mask & CPSR_NZCV) {
481
        env->ZF = (~val) & CPSR_Z;
482
        env->NF = val;
483
        env->CF = (val >> 29) & 1;
484
        env->VF = (val << 3) & 0x80000000;
485
    }
486
    if (mask & CPSR_Q)
487
        env->QF = ((val & CPSR_Q) != 0);
488
    if (mask & CPSR_T)
489
        env->thumb = ((val & CPSR_T) != 0);
490
    if (mask & CPSR_IT_0_1) {
491
        env->condexec_bits &= ~3;
492
        env->condexec_bits |= (val >> 25) & 3;
493
    }
494
    if (mask & CPSR_IT_2_7) {
495
        env->condexec_bits &= 3;
496
        env->condexec_bits |= (val >> 8) & 0xfc;
497
    }
498
    if (mask & CPSR_GE) {
499
        env->GE = (val >> 16) & 0xf;
500
    }
501

    
502
    if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
503
        switch_mode(env, val & CPSR_M);
504
    }
505
    mask &= ~CACHED_CPSR_BITS;
506
    env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
507
}
508

    
509
/* Sign/zero extend */
510
uint32_t HELPER(sxtb16)(uint32_t x)
511
{
512
    uint32_t res;
513
    res = (uint16_t)(int8_t)x;
514
    res |= (uint32_t)(int8_t)(x >> 16) << 16;
515
    return res;
516
}
517

    
518
uint32_t HELPER(uxtb16)(uint32_t x)
519
{
520
    uint32_t res;
521
    res = (uint16_t)(uint8_t)x;
522
    res |= (uint32_t)(uint8_t)(x >> 16) << 16;
523
    return res;
524
}
525

    
526
uint32_t HELPER(clz)(uint32_t x)
527
{
528
    return clz32(x);
529
}
530

    
531
int32_t HELPER(sdiv)(int32_t num, int32_t den)
532
{
533
    if (den == 0)
534
      return 0;
535
    if (num == INT_MIN && den == -1)
536
      return INT_MIN;
537
    return num / den;
538
}
539

    
540
uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
541
{
542
    if (den == 0)
543
      return 0;
544
    return num / den;
545
}
546

    
547
uint32_t HELPER(rbit)(uint32_t x)
548
{
549
    x =  ((x & 0xff000000) >> 24)
550
       | ((x & 0x00ff0000) >> 8)
551
       | ((x & 0x0000ff00) << 8)
552
       | ((x & 0x000000ff) << 24);
553
    x =  ((x & 0xf0f0f0f0) >> 4)
554
       | ((x & 0x0f0f0f0f) << 4);
555
    x =  ((x & 0x88888888) >> 3)
556
       | ((x & 0x44444444) >> 1)
557
       | ((x & 0x22222222) << 1)
558
       | ((x & 0x11111111) << 3);
559
    return x;
560
}
561

    
562
uint32_t HELPER(abs)(uint32_t x)
563
{
564
    return ((int32_t)x < 0) ? -x : x;
565
}
566

    
567
#if defined(CONFIG_USER_ONLY)
568

    
569
void do_interrupt (CPUState *env)
570
{
571
    env->exception_index = -1;
572
}
573

    
574
int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
575
                              int mmu_idx)
576
{
577
    if (rw == 2) {
578
        env->exception_index = EXCP_PREFETCH_ABORT;
579
        env->cp15.c6_insn = address;
580
    } else {
581
        env->exception_index = EXCP_DATA_ABORT;
582
        env->cp15.c6_data = address;
583
    }
584
    return 1;
585
}
586

    
587
/* These should probably raise undefined insn exceptions.  */
588
void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val)
589
{
590
    int op1 = (insn >> 8) & 0xf;
591
    cpu_abort(env, "cp%i insn %08x\n", op1, insn);
592
    return;
593
}
594

    
595
uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn)
596
{
597
    int op1 = (insn >> 8) & 0xf;
598
    cpu_abort(env, "cp%i insn %08x\n", op1, insn);
599
    return 0;
600
}
601

    
602
void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
603
{
604
    cpu_abort(env, "cp15 insn %08x\n", insn);
605
}
606

    
607
uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
608
{
609
    cpu_abort(env, "cp15 insn %08x\n", insn);
610
}
611

    
612
/* These should probably raise undefined insn exceptions.  */
613
void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val)
614
{
615
    cpu_abort(env, "v7m_mrs %d\n", reg);
616
}
617

    
618
uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg)
619
{
620
    cpu_abort(env, "v7m_mrs %d\n", reg);
621
    return 0;
622
}
623

    
624
void switch_mode(CPUState *env, int mode)
625
{
626
    if (mode != ARM_CPU_MODE_USR)
627
        cpu_abort(env, "Tried to switch out of user mode\n");
628
}
629

    
630
void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
631
{
632
    cpu_abort(env, "banked r13 write\n");
633
}
634

    
635
uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
636
{
637
    cpu_abort(env, "banked r13 read\n");
638
    return 0;
639
}
640

    
641
#else
642

    
643
extern int semihosting_enabled;
644

    
645
/* Map CPU modes onto saved register banks.  */
646
static inline int bank_number (int mode)
647
{
648
    switch (mode) {
649
    case ARM_CPU_MODE_USR:
650
    case ARM_CPU_MODE_SYS:
651
        return 0;
652
    case ARM_CPU_MODE_SVC:
653
        return 1;
654
    case ARM_CPU_MODE_ABT:
655
        return 2;
656
    case ARM_CPU_MODE_UND:
657
        return 3;
658
    case ARM_CPU_MODE_IRQ:
659
        return 4;
660
    case ARM_CPU_MODE_FIQ:
661
        return 5;
662
    }
663
    cpu_abort(cpu_single_env, "Bad mode %x\n", mode);
664
    return -1;
665
}
666

    
667
void switch_mode(CPUState *env, int mode)
668
{
669
    int old_mode;
670
    int i;
671

    
672
    old_mode = env->uncached_cpsr & CPSR_M;
673
    if (mode == old_mode)
674
        return;
675

    
676
    if (old_mode == ARM_CPU_MODE_FIQ) {
677
        memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
678
        memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
679
    } else if (mode == ARM_CPU_MODE_FIQ) {
680
        memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
681
        memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
682
    }
683

    
684
    i = bank_number(old_mode);
685
    env->banked_r13[i] = env->regs[13];
686
    env->banked_r14[i] = env->regs[14];
687
    env->banked_spsr[i] = env->spsr;
688

    
689
    i = bank_number(mode);
690
    env->regs[13] = env->banked_r13[i];
691
    env->regs[14] = env->banked_r14[i];
692
    env->spsr = env->banked_spsr[i];
693
}
694

    
695
static void v7m_push(CPUARMState *env, uint32_t val)
696
{
697
    env->regs[13] -= 4;
698
    stl_phys(env->regs[13], val);
699
}
700

    
701
static uint32_t v7m_pop(CPUARMState *env)
702
{
703
    uint32_t val;
704
    val = ldl_phys(env->regs[13]);
705
    env->regs[13] += 4;
706
    return val;
707
}
708

    
709
/* Switch to V7M main or process stack pointer.  */
710
static void switch_v7m_sp(CPUARMState *env, int process)
711
{
712
    uint32_t tmp;
713
    if (env->v7m.current_sp != process) {
714
        tmp = env->v7m.other_sp;
715
        env->v7m.other_sp = env->regs[13];
716
        env->regs[13] = tmp;
717
        env->v7m.current_sp = process;
718
    }
719
}
720

    
721
static void do_v7m_exception_exit(CPUARMState *env)
722
{
723
    uint32_t type;
724
    uint32_t xpsr;
725

    
726
    type = env->regs[15];
727
    if (env->v7m.exception != 0)
728
        armv7m_nvic_complete_irq(env->nvic, env->v7m.exception);
729

    
730
    /* Switch to the target stack.  */
731
    switch_v7m_sp(env, (type & 4) != 0);
732
    /* Pop registers.  */
733
    env->regs[0] = v7m_pop(env);
734
    env->regs[1] = v7m_pop(env);
735
    env->regs[2] = v7m_pop(env);
736
    env->regs[3] = v7m_pop(env);
737
    env->regs[12] = v7m_pop(env);
738
    env->regs[14] = v7m_pop(env);
739
    env->regs[15] = v7m_pop(env);
740
    xpsr = v7m_pop(env);
741
    xpsr_write(env, xpsr, 0xfffffdff);
742
    /* Undo stack alignment.  */
743
    if (xpsr & 0x200)
744
        env->regs[13] |= 4;
745
    /* ??? The exception return type specifies Thread/Handler mode.  However
746
       this is also implied by the xPSR value. Not sure what to do
747
       if there is a mismatch.  */
748
    /* ??? Likewise for mismatches between the CONTROL register and the stack
749
       pointer.  */
750
}
751

    
752
static void do_interrupt_v7m(CPUARMState *env)
753
{
754
    uint32_t xpsr = xpsr_read(env);
755
    uint32_t lr;
756
    uint32_t addr;
757

    
758
    lr = 0xfffffff1;
759
    if (env->v7m.current_sp)
760
        lr |= 4;
761
    if (env->v7m.exception == 0)
762
        lr |= 8;
763

    
764
    /* For exceptions we just mark as pending on the NVIC, and let that
765
       handle it.  */
766
    /* TODO: Need to escalate if the current priority is higher than the
767
       one we're raising.  */
768
    switch (env->exception_index) {
769
    case EXCP_UDEF:
770
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
771
        return;
772
    case EXCP_SWI:
773
        env->regs[15] += 2;
774
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
775
        return;
776
    case EXCP_PREFETCH_ABORT:
777
    case EXCP_DATA_ABORT:
778
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
779
        return;
780
    case EXCP_BKPT:
781
        if (semihosting_enabled) {
782
            int nr;
783
            nr = lduw_code(env->regs[15]) & 0xff;
784
            if (nr == 0xab) {
785
                env->regs[15] += 2;
786
                env->regs[0] = do_arm_semihosting(env);
787
                return;
788
            }
789
        }
790
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
791
        return;
792
    case EXCP_IRQ:
793
        env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic);
794
        break;
795
    case EXCP_EXCEPTION_EXIT:
796
        do_v7m_exception_exit(env);
797
        return;
798
    default:
799
        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
800
        return; /* Never happens.  Keep compiler happy.  */
801
    }
802

    
803
    /* Align stack pointer.  */
804
    /* ??? Should only do this if Configuration Control Register
805
       STACKALIGN bit is set.  */
806
    if (env->regs[13] & 4) {
807
        env->regs[13] -= 4;
808
        xpsr |= 0x200;
809
    }
810
    /* Switch to the handler mode.  */
811
    v7m_push(env, xpsr);
812
    v7m_push(env, env->regs[15]);
813
    v7m_push(env, env->regs[14]);
814
    v7m_push(env, env->regs[12]);
815
    v7m_push(env, env->regs[3]);
816
    v7m_push(env, env->regs[2]);
817
    v7m_push(env, env->regs[1]);
818
    v7m_push(env, env->regs[0]);
819
    switch_v7m_sp(env, 0);
820
    env->uncached_cpsr &= ~CPSR_IT;
821
    env->regs[14] = lr;
822
    addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4);
823
    env->regs[15] = addr & 0xfffffffe;
824
    env->thumb = addr & 1;
825
}
826

    
827
/* Handle a CPU exception.  */
828
void do_interrupt(CPUARMState *env)
829
{
830
    uint32_t addr;
831
    uint32_t mask;
832
    int new_mode;
833
    uint32_t offset;
834

    
835
    if (IS_M(env)) {
836
        do_interrupt_v7m(env);
837
        return;
838
    }
839
    /* TODO: Vectored interrupt controller.  */
840
    switch (env->exception_index) {
841
    case EXCP_UDEF:
842
        new_mode = ARM_CPU_MODE_UND;
843
        addr = 0x04;
844
        mask = CPSR_I;
845
        if (env->thumb)
846
            offset = 2;
847
        else
848
            offset = 4;
849
        break;
850
    case EXCP_SWI:
851
        if (semihosting_enabled) {
852
            /* Check for semihosting interrupt.  */
853
            if (env->thumb) {
854
                mask = lduw_code(env->regs[15] - 2) & 0xff;
855
            } else {
856
                mask = ldl_code(env->regs[15] - 4) & 0xffffff;
857
            }
858
            /* Only intercept calls from privileged modes, to provide some
859
               semblance of security.  */
860
            if (((mask == 0x123456 && !env->thumb)
861
                    || (mask == 0xab && env->thumb))
862
                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
863
                env->regs[0] = do_arm_semihosting(env);
864
                return;
865
            }
866
        }
867
        new_mode = ARM_CPU_MODE_SVC;
868
        addr = 0x08;
869
        mask = CPSR_I;
870
        /* The PC already points to the next instruction.  */
871
        offset = 0;
872
        break;
873
    case EXCP_BKPT:
874
        /* See if this is a semihosting syscall.  */
875
        if (env->thumb && semihosting_enabled) {
876
            mask = lduw_code(env->regs[15]) & 0xff;
877
            if (mask == 0xab
878
                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
879
                env->regs[15] += 2;
880
                env->regs[0] = do_arm_semihosting(env);
881
                return;
882
            }
883
        }
884
        env->cp15.c5_insn = 2;
885
        /* Fall through to prefetch abort.  */
886
    case EXCP_PREFETCH_ABORT:
887
        new_mode = ARM_CPU_MODE_ABT;
888
        addr = 0x0c;
889
        mask = CPSR_A | CPSR_I;
890
        offset = 4;
891
        break;
892
    case EXCP_DATA_ABORT:
893
        new_mode = ARM_CPU_MODE_ABT;
894
        addr = 0x10;
895
        mask = CPSR_A | CPSR_I;
896
        offset = 8;
897
        break;
898
    case EXCP_IRQ:
899
        new_mode = ARM_CPU_MODE_IRQ;
900
        addr = 0x18;
901
        /* Disable IRQ and imprecise data aborts.  */
902
        mask = CPSR_A | CPSR_I;
903
        offset = 4;
904
        break;
905
    case EXCP_FIQ:
906
        new_mode = ARM_CPU_MODE_FIQ;
907
        addr = 0x1c;
908
        /* Disable FIQ, IRQ and imprecise data aborts.  */
909
        mask = CPSR_A | CPSR_I | CPSR_F;
910
        offset = 4;
911
        break;
912
    default:
913
        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
914
        return; /* Never happens.  Keep compiler happy.  */
915
    }
916
    /* High vectors.  */
917
    if (env->cp15.c1_sys & (1 << 13)) {
918
        addr += 0xffff0000;
919
    }
920
    switch_mode (env, new_mode);
921
    env->spsr = cpsr_read(env);
922
    /* Clear IT bits.  */
923
    env->condexec_bits = 0;
924
    /* Switch to the new mode, and to the correct instruction set.  */
925
    env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
926
    env->uncached_cpsr |= mask;
927
    /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
928
     * and we should just guard the thumb mode on V4 */
929
    if (arm_feature(env, ARM_FEATURE_V4T)) {
930
        env->thumb = (env->cp15.c1_sys & (1 << 30)) != 0;
931
    }
932
    env->regs[14] = env->regs[15] + offset;
933
    env->regs[15] = addr;
934
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
935
}
936

    
937
/* Check section/page access permissions.
938
   Returns the page protection flags, or zero if the access is not
939
   permitted.  */
940
static inline int check_ap(CPUState *env, int ap, int domain_prot,
941
                           int access_type, int is_user)
942
{
943
  int prot_ro;
944

    
945
  if (domain_prot == 3) {
946
    return PAGE_READ | PAGE_WRITE;
947
  }
948

    
949
  if (access_type == 1)
950
      prot_ro = 0;
951
  else
952
      prot_ro = PAGE_READ;
953

    
954
  switch (ap) {
955
  case 0:
956
      if (access_type == 1)
957
          return 0;
958
      switch ((env->cp15.c1_sys >> 8) & 3) {
959
      case 1:
960
          return is_user ? 0 : PAGE_READ;
961
      case 2:
962
          return PAGE_READ;
963
      default:
964
          return 0;
965
      }
966
  case 1:
967
      return is_user ? 0 : PAGE_READ | PAGE_WRITE;
968
  case 2:
969
      if (is_user)
970
          return prot_ro;
971
      else
972
          return PAGE_READ | PAGE_WRITE;
973
  case 3:
974
      return PAGE_READ | PAGE_WRITE;
975
  case 4: /* Reserved.  */
976
      return 0;
977
  case 5:
978
      return is_user ? 0 : prot_ro;
979
  case 6:
980
      return prot_ro;
981
  case 7:
982
      if (!arm_feature (env, ARM_FEATURE_V6K))
983
          return 0;
984
      return prot_ro;
985
  default:
986
      abort();
987
  }
988
}
989

    
990
static uint32_t get_level1_table_address(CPUState *env, uint32_t address)
991
{
992
    uint32_t table;
993

    
994
    if (address & env->cp15.c2_mask)
995
        table = env->cp15.c2_base1 & 0xffffc000;
996
    else
997
        table = env->cp15.c2_base0 & env->cp15.c2_base_mask;
998

    
999
    table |= (address >> 18) & 0x3ffc;
1000
    return table;
1001
}
1002

    
1003
static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type,
1004
                            int is_user, uint32_t *phys_ptr, int *prot,
1005
                            target_ulong *page_size)
1006
{
1007
    int code;
1008
    uint32_t table;
1009
    uint32_t desc;
1010
    int type;
1011
    int ap;
1012
    int domain;
1013
    int domain_prot;
1014
    uint32_t phys_addr;
1015

    
1016
    /* Pagetable walk.  */
1017
    /* Lookup l1 descriptor.  */
1018
    table = get_level1_table_address(env, address);
1019
    desc = ldl_phys(table);
1020
    type = (desc & 3);
1021
    domain = (desc >> 5) & 0x0f;
1022
    domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
1023
    if (type == 0) {
1024
        /* Section translation fault.  */
1025
        code = 5;
1026
        goto do_fault;
1027
    }
1028
    if (domain_prot == 0 || domain_prot == 2) {
1029
        if (type == 2)
1030
            code = 9; /* Section domain fault.  */
1031
        else
1032
            code = 11; /* Page domain fault.  */
1033
        goto do_fault;
1034
    }
1035
    if (type == 2) {
1036
        /* 1Mb section.  */
1037
        phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1038
        ap = (desc >> 10) & 3;
1039
        code = 13;
1040
        *page_size = 1024 * 1024;
1041
    } else {
1042
        /* Lookup l2 entry.  */
1043
        if (type == 1) {
1044
            /* Coarse pagetable.  */
1045
            table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1046
        } else {
1047
            /* Fine pagetable.  */
1048
            table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
1049
        }
1050
        desc = ldl_phys(table);
1051
        switch (desc & 3) {
1052
        case 0: /* Page translation fault.  */
1053
            code = 7;
1054
            goto do_fault;
1055
        case 1: /* 64k page.  */
1056
            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1057
            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
1058
            *page_size = 0x10000;
1059
            break;
1060
        case 2: /* 4k page.  */
1061
            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1062
            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
1063
            *page_size = 0x1000;
1064
            break;
1065
        case 3: /* 1k page.  */
1066
            if (type == 1) {
1067
                if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1068
                    phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1069
                } else {
1070
                    /* Page translation fault.  */
1071
                    code = 7;
1072
                    goto do_fault;
1073
                }
1074
            } else {
1075
                phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
1076
            }
1077
            ap = (desc >> 4) & 3;
1078
            *page_size = 0x400;
1079
            break;
1080
        default:
1081
            /* Never happens, but compiler isn't smart enough to tell.  */
1082
            abort();
1083
        }
1084
        code = 15;
1085
    }
1086
    *prot = check_ap(env, ap, domain_prot, access_type, is_user);
1087
    if (!*prot) {
1088
        /* Access permission fault.  */
1089
        goto do_fault;
1090
    }
1091
    *prot |= PAGE_EXEC;
1092
    *phys_ptr = phys_addr;
1093
    return 0;
1094
do_fault:
1095
    return code | (domain << 4);
1096
}
1097

    
1098
static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type,
1099
                            int is_user, uint32_t *phys_ptr, int *prot,
1100
                            target_ulong *page_size)
1101
{
1102
    int code;
1103
    uint32_t table;
1104
    uint32_t desc;
1105
    uint32_t xn;
1106
    int type;
1107
    int ap;
1108
    int domain;
1109
    int domain_prot;
1110
    uint32_t phys_addr;
1111

    
1112
    /* Pagetable walk.  */
1113
    /* Lookup l1 descriptor.  */
1114
    table = get_level1_table_address(env, address);
1115
    desc = ldl_phys(table);
1116
    type = (desc & 3);
1117
    if (type == 0) {
1118
        /* Section translation fault.  */
1119
        code = 5;
1120
        domain = 0;
1121
        goto do_fault;
1122
    } else if (type == 2 && (desc & (1 << 18))) {
1123
        /* Supersection.  */
1124
        domain = 0;
1125
    } else {
1126
        /* Section or page.  */
1127
        domain = (desc >> 5) & 0x0f;
1128
    }
1129
    domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
1130
    if (domain_prot == 0 || domain_prot == 2) {
1131
        if (type == 2)
1132
            code = 9; /* Section domain fault.  */
1133
        else
1134
            code = 11; /* Page domain fault.  */
1135
        goto do_fault;
1136
    }
1137
    if (type == 2) {
1138
        if (desc & (1 << 18)) {
1139
            /* Supersection.  */
1140
            phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
1141
            *page_size = 0x1000000;
1142
        } else {
1143
            /* Section.  */
1144
            phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1145
            *page_size = 0x100000;
1146
        }
1147
        ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
1148
        xn = desc & (1 << 4);
1149
        code = 13;
1150
    } else {
1151
        /* Lookup l2 entry.  */
1152
        table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1153
        desc = ldl_phys(table);
1154
        ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
1155
        switch (desc & 3) {
1156
        case 0: /* Page translation fault.  */
1157
            code = 7;
1158
            goto do_fault;
1159
        case 1: /* 64k page.  */
1160
            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1161
            xn = desc & (1 << 15);
1162
            *page_size = 0x10000;
1163
            break;
1164
        case 2: case 3: /* 4k page.  */
1165
            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1166
            xn = desc & 1;
1167
            *page_size = 0x1000;
1168
            break;
1169
        default:
1170
            /* Never happens, but compiler isn't smart enough to tell.  */
1171
            abort();
1172
        }
1173
        code = 15;
1174
    }
1175
    if (domain_prot == 3) {
1176
        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1177
    } else {
1178
        if (xn && access_type == 2)
1179
            goto do_fault;
1180

    
1181
        /* The simplified model uses AP[0] as an access control bit.  */
1182
        if ((env->cp15.c1_sys & (1 << 29)) && (ap & 1) == 0) {
1183
            /* Access flag fault.  */
1184
            code = (code == 15) ? 6 : 3;
1185
            goto do_fault;
1186
        }
1187
        *prot = check_ap(env, ap, domain_prot, access_type, is_user);
1188
        if (!*prot) {
1189
            /* Access permission fault.  */
1190
            goto do_fault;
1191
        }
1192
        if (!xn) {
1193
            *prot |= PAGE_EXEC;
1194
        }
1195
    }
1196
    *phys_ptr = phys_addr;
1197
    return 0;
1198
do_fault:
1199
    return code | (domain << 4);
1200
}
1201

    
1202
static int get_phys_addr_mpu(CPUState *env, uint32_t address, int access_type,
1203
                             int is_user, uint32_t *phys_ptr, int *prot)
1204
{
1205
    int n;
1206
    uint32_t mask;
1207
    uint32_t base;
1208

    
1209
    *phys_ptr = address;
1210
    for (n = 7; n >= 0; n--) {
1211
        base = env->cp15.c6_region[n];
1212
        if ((base & 1) == 0)
1213
            continue;
1214
        mask = 1 << ((base >> 1) & 0x1f);
1215
        /* Keep this shift separate from the above to avoid an
1216
           (undefined) << 32.  */
1217
        mask = (mask << 1) - 1;
1218
        if (((base ^ address) & ~mask) == 0)
1219
            break;
1220
    }
1221
    if (n < 0)
1222
        return 2;
1223

    
1224
    if (access_type == 2) {
1225
        mask = env->cp15.c5_insn;
1226
    } else {
1227
        mask = env->cp15.c5_data;
1228
    }
1229
    mask = (mask >> (n * 4)) & 0xf;
1230
    switch (mask) {
1231
    case 0:
1232
        return 1;
1233
    case 1:
1234
        if (is_user)
1235
          return 1;
1236
        *prot = PAGE_READ | PAGE_WRITE;
1237
        break;
1238
    case 2:
1239
        *prot = PAGE_READ;
1240
        if (!is_user)
1241
            *prot |= PAGE_WRITE;
1242
        break;
1243
    case 3:
1244
        *prot = PAGE_READ | PAGE_WRITE;
1245
        break;
1246
    case 5:
1247
        if (is_user)
1248
            return 1;
1249
        *prot = PAGE_READ;
1250
        break;
1251
    case 6:
1252
        *prot = PAGE_READ;
1253
        break;
1254
    default:
1255
        /* Bad permission.  */
1256
        return 1;
1257
    }
1258
    *prot |= PAGE_EXEC;
1259
    return 0;
1260
}
1261

    
1262
static inline int get_phys_addr(CPUState *env, uint32_t address,
1263
                                int access_type, int is_user,
1264
                                uint32_t *phys_ptr, int *prot,
1265
                                target_ulong *page_size)
1266
{
1267
    /* Fast Context Switch Extension.  */
1268
    if (address < 0x02000000)
1269
        address += env->cp15.c13_fcse;
1270

    
1271
    if ((env->cp15.c1_sys & 1) == 0) {
1272
        /* MMU/MPU disabled.  */
1273
        *phys_ptr = address;
1274
        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1275
        *page_size = TARGET_PAGE_SIZE;
1276
        return 0;
1277
    } else if (arm_feature(env, ARM_FEATURE_MPU)) {
1278
        *page_size = TARGET_PAGE_SIZE;
1279
        return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
1280
                                 prot);
1281
    } else if (env->cp15.c1_sys & (1 << 23)) {
1282
        return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
1283
                                prot, page_size);
1284
    } else {
1285
        return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
1286
                                prot, page_size);
1287
    }
1288
}
1289

    
1290
int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
1291
                              int access_type, int mmu_idx)
1292
{
1293
    uint32_t phys_addr;
1294
    target_ulong page_size;
1295
    int prot;
1296
    int ret, is_user;
1297

    
1298
    is_user = mmu_idx == MMU_USER_IDX;
1299
    ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot,
1300
                        &page_size);
1301
    if (ret == 0) {
1302
        /* Map a single [sub]page.  */
1303
        phys_addr &= ~(uint32_t)0x3ff;
1304
        address &= ~(uint32_t)0x3ff;
1305
        tlb_set_page (env, address, phys_addr, prot, mmu_idx, page_size);
1306
        return 0;
1307
    }
1308

    
1309
    if (access_type == 2) {
1310
        env->cp15.c5_insn = ret;
1311
        env->cp15.c6_insn = address;
1312
        env->exception_index = EXCP_PREFETCH_ABORT;
1313
    } else {
1314
        env->cp15.c5_data = ret;
1315
        if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6))
1316
            env->cp15.c5_data |= (1 << 11);
1317
        env->cp15.c6_data = address;
1318
        env->exception_index = EXCP_DATA_ABORT;
1319
    }
1320
    return 1;
1321
}
1322

    
1323
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1324
{
1325
    uint32_t phys_addr;
1326
    target_ulong page_size;
1327
    int prot;
1328
    int ret;
1329

    
1330
    ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot, &page_size);
1331

    
1332
    if (ret != 0)
1333
        return -1;
1334

    
1335
    return phys_addr;
1336
}
1337

    
1338
void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val)
1339
{
1340
    int cp_num = (insn >> 8) & 0xf;
1341
    int cp_info = (insn >> 5) & 7;
1342
    int src = (insn >> 16) & 0xf;
1343
    int operand = insn & 0xf;
1344

    
1345
    if (env->cp[cp_num].cp_write)
1346
        env->cp[cp_num].cp_write(env->cp[cp_num].opaque,
1347
                                 cp_info, src, operand, val);
1348
}
1349

    
1350
uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn)
1351
{
1352
    int cp_num = (insn >> 8) & 0xf;
1353
    int cp_info = (insn >> 5) & 7;
1354
    int dest = (insn >> 16) & 0xf;
1355
    int operand = insn & 0xf;
1356

    
1357
    if (env->cp[cp_num].cp_read)
1358
        return env->cp[cp_num].cp_read(env->cp[cp_num].opaque,
1359
                                       cp_info, dest, operand);
1360
    return 0;
1361
}
1362

    
1363
/* Return basic MPU access permission bits.  */
1364
static uint32_t simple_mpu_ap_bits(uint32_t val)
1365
{
1366
    uint32_t ret;
1367
    uint32_t mask;
1368
    int i;
1369
    ret = 0;
1370
    mask = 3;
1371
    for (i = 0; i < 16; i += 2) {
1372
        ret |= (val >> i) & mask;
1373
        mask <<= 2;
1374
    }
1375
    return ret;
1376
}
1377

    
1378
/* Pad basic MPU access permission bits to extended format.  */
1379
static uint32_t extended_mpu_ap_bits(uint32_t val)
1380
{
1381
    uint32_t ret;
1382
    uint32_t mask;
1383
    int i;
1384
    ret = 0;
1385
    mask = 3;
1386
    for (i = 0; i < 16; i += 2) {
1387
        ret |= (val & mask) << i;
1388
        mask <<= 2;
1389
    }
1390
    return ret;
1391
}
1392

    
1393
void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
1394
{
1395
    int op1;
1396
    int op2;
1397
    int crm;
1398

    
1399
    op1 = (insn >> 21) & 7;
1400
    op2 = (insn >> 5) & 7;
1401
    crm = insn & 0xf;
1402
    switch ((insn >> 16) & 0xf) {
1403
    case 0:
1404
        /* ID codes.  */
1405
        if (arm_feature(env, ARM_FEATURE_XSCALE))
1406
            break;
1407
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1408
            break;
1409
        if (arm_feature(env, ARM_FEATURE_V7)
1410
                && op1 == 2 && crm == 0 && op2 == 0) {
1411
            env->cp15.c0_cssel = val & 0xf;
1412
            break;
1413
        }
1414
        goto bad_reg;
1415
    case 1: /* System configuration.  */
1416
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1417
            op2 = 0;
1418
        switch (op2) {
1419
        case 0:
1420
            if (!arm_feature(env, ARM_FEATURE_XSCALE) || crm == 0)
1421
                env->cp15.c1_sys = val;
1422
            /* ??? Lots of these bits are not implemented.  */
1423
            /* This may enable/disable the MMU, so do a TLB flush.  */
1424
            tlb_flush(env, 1);
1425
            break;
1426
        case 1: /* Auxiliary control register.  */
1427
            if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1428
                env->cp15.c1_xscaleauxcr = val;
1429
                break;
1430
            }
1431
            /* Not implemented.  */
1432
            break;
1433
        case 2:
1434
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1435
                goto bad_reg;
1436
            if (env->cp15.c1_coproc != val) {
1437
                env->cp15.c1_coproc = val;
1438
                /* ??? Is this safe when called from within a TB?  */
1439
                tb_flush(env);
1440
            }
1441
            break;
1442
        default:
1443
            goto bad_reg;
1444
        }
1445
        break;
1446
    case 2: /* MMU Page table control / MPU cache control.  */
1447
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1448
            switch (op2) {
1449
            case 0:
1450
                env->cp15.c2_data = val;
1451
                break;
1452
            case 1:
1453
                env->cp15.c2_insn = val;
1454
                break;
1455
            default:
1456
                goto bad_reg;
1457
            }
1458
        } else {
1459
            switch (op2) {
1460
            case 0:
1461
                env->cp15.c2_base0 = val;
1462
                break;
1463
            case 1:
1464
                env->cp15.c2_base1 = val;
1465
                break;
1466
            case 2:
1467
                val &= 7;
1468
                env->cp15.c2_control = val;
1469
                env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> val);
1470
                env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> val);
1471
                break;
1472
            default:
1473
                goto bad_reg;
1474
            }
1475
        }
1476
        break;
1477
    case 3: /* MMU Domain access control / MPU write buffer control.  */
1478
        env->cp15.c3 = val;
1479
        tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */
1480
        break;
1481
    case 4: /* Reserved.  */
1482
        goto bad_reg;
1483
    case 5: /* MMU Fault status / MPU access permission.  */
1484
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1485
            op2 = 0;
1486
        switch (op2) {
1487
        case 0:
1488
            if (arm_feature(env, ARM_FEATURE_MPU))
1489
                val = extended_mpu_ap_bits(val);
1490
            env->cp15.c5_data = val;
1491
            break;
1492
        case 1:
1493
            if (arm_feature(env, ARM_FEATURE_MPU))
1494
                val = extended_mpu_ap_bits(val);
1495
            env->cp15.c5_insn = val;
1496
            break;
1497
        case 2:
1498
            if (!arm_feature(env, ARM_FEATURE_MPU))
1499
                goto bad_reg;
1500
            env->cp15.c5_data = val;
1501
            break;
1502
        case 3:
1503
            if (!arm_feature(env, ARM_FEATURE_MPU))
1504
                goto bad_reg;
1505
            env->cp15.c5_insn = val;
1506
            break;
1507
        default:
1508
            goto bad_reg;
1509
        }
1510
        break;
1511
    case 6: /* MMU Fault address / MPU base/size.  */
1512
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1513
            if (crm >= 8)
1514
                goto bad_reg;
1515
            env->cp15.c6_region[crm] = val;
1516
        } else {
1517
            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1518
                op2 = 0;
1519
            switch (op2) {
1520
            case 0:
1521
                env->cp15.c6_data = val;
1522
                break;
1523
            case 1: /* ??? This is WFAR on armv6 */
1524
            case 2:
1525
                env->cp15.c6_insn = val;
1526
                break;
1527
            default:
1528
                goto bad_reg;
1529
            }
1530
        }
1531
        break;
1532
    case 7: /* Cache control.  */
1533
        env->cp15.c15_i_max = 0x000;
1534
        env->cp15.c15_i_min = 0xff0;
1535
        if (op1 != 0) {
1536
            goto bad_reg;
1537
        }
1538
        /* No cache, so nothing to do except VA->PA translations. */
1539
        if (arm_feature(env, ARM_FEATURE_VAPA)) {
1540
            switch (crm) {
1541
            case 4:
1542
                if (arm_feature(env, ARM_FEATURE_V7)) {
1543
                    env->cp15.c7_par = val & 0xfffff6ff;
1544
                } else {
1545
                    env->cp15.c7_par = val & 0xfffff1ff;
1546
                }
1547
                break;
1548
            case 8: {
1549
                uint32_t phys_addr;
1550
                target_ulong page_size;
1551
                int prot;
1552
                int ret, is_user = op2 & 2;
1553
                int access_type = op2 & 1;
1554

    
1555
                if (op2 & 4) {
1556
                    /* Other states are only available with TrustZone */
1557
                    goto bad_reg;
1558
                }
1559
                ret = get_phys_addr(env, val, access_type, is_user,
1560
                                    &phys_addr, &prot, &page_size);
1561
                if (ret == 0) {
1562
                    /* We do not set any attribute bits in the PAR */
1563
                    if (page_size == (1 << 24)
1564
                        && arm_feature(env, ARM_FEATURE_V7)) {
1565
                        env->cp15.c7_par = (phys_addr & 0xff000000) | 1 << 1;
1566
                    } else {
1567
                        env->cp15.c7_par = phys_addr & 0xfffff000;
1568
                    }
1569
                } else {
1570
                    env->cp15.c7_par = ((ret & (10 << 1)) >> 5) |
1571
                                       ((ret & (12 << 1)) >> 6) |
1572
                                       ((ret & 0xf) << 1) | 1;
1573
                }
1574
                break;
1575
            }
1576
            }
1577
        }
1578
        break;
1579
    case 8: /* MMU TLB control.  */
1580
        switch (op2) {
1581
        case 0: /* Invalidate all.  */
1582
            tlb_flush(env, 0);
1583
            break;
1584
        case 1: /* Invalidate single TLB entry.  */
1585
            tlb_flush_page(env, val & TARGET_PAGE_MASK);
1586
            break;
1587
        case 2: /* Invalidate on ASID.  */
1588
            tlb_flush(env, val == 0);
1589
            break;
1590
        case 3: /* Invalidate single entry on MVA.  */
1591
            /* ??? This is like case 1, but ignores ASID.  */
1592
            tlb_flush(env, 1);
1593
            break;
1594
        default:
1595
            goto bad_reg;
1596
        }
1597
        break;
1598
    case 9:
1599
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1600
            break;
1601
        if (arm_feature(env, ARM_FEATURE_STRONGARM))
1602
            break; /* Ignore ReadBuffer access */
1603
        switch (crm) {
1604
        case 0: /* Cache lockdown.  */
1605
            switch (op1) {
1606
            case 0: /* L1 cache.  */
1607
                switch (op2) {
1608
                case 0:
1609
                    env->cp15.c9_data = val;
1610
                    break;
1611
                case 1:
1612
                    env->cp15.c9_insn = val;
1613
                    break;
1614
                default:
1615
                    goto bad_reg;
1616
                }
1617
                break;
1618
            case 1: /* L2 cache.  */
1619
                /* Ignore writes to L2 lockdown/auxiliary registers.  */
1620
                break;
1621
            default:
1622
                goto bad_reg;
1623
            }
1624
            break;
1625
        case 1: /* TCM memory region registers.  */
1626
            /* Not implemented.  */
1627
            goto bad_reg;
1628
        case 12: /* Performance monitor control */
1629
            /* Performance monitors are implementation defined in v7,
1630
             * but with an ARM recommended set of registers, which we
1631
             * follow (although we don't actually implement any counters)
1632
             */
1633
            if (!arm_feature(env, ARM_FEATURE_V7)) {
1634
                goto bad_reg;
1635
            }
1636
            switch (op2) {
1637
            case 0: /* performance monitor control register */
1638
                /* only the DP, X, D and E bits are writable */
1639
                env->cp15.c9_pmcr &= ~0x39;
1640
                env->cp15.c9_pmcr |= (val & 0x39);
1641
                break;
1642
            case 1: /* Count enable set register */
1643
                val &= (1 << 31);
1644
                env->cp15.c9_pmcnten |= val;
1645
                break;
1646
            case 2: /* Count enable clear */
1647
                val &= (1 << 31);
1648
                env->cp15.c9_pmcnten &= ~val;
1649
                break;
1650
            case 3: /* Overflow flag status */
1651
                env->cp15.c9_pmovsr &= ~val;
1652
                break;
1653
            case 4: /* Software increment */
1654
                /* RAZ/WI since we don't implement the software-count event */
1655
                break;
1656
            case 5: /* Event counter selection register */
1657
                /* Since we don't implement any events, writing to this register
1658
                 * is actually UNPREDICTABLE. So we choose to RAZ/WI.
1659
                 */
1660
                break;
1661
            default:
1662
                goto bad_reg;
1663
            }
1664
            break;
1665
        case 13: /* Performance counters */
1666
            if (!arm_feature(env, ARM_FEATURE_V7)) {
1667
                goto bad_reg;
1668
            }
1669
            switch (op2) {
1670
            case 0: /* Cycle count register: not implemented, so RAZ/WI */
1671
                break;
1672
            case 1: /* Event type select */
1673
                env->cp15.c9_pmxevtyper = val & 0xff;
1674
                break;
1675
            case 2: /* Event count register */
1676
                /* Unimplemented (we have no events), RAZ/WI */
1677
                break;
1678
            default:
1679
                goto bad_reg;
1680
            }
1681
            break;
1682
        case 14: /* Performance monitor control */
1683
            if (!arm_feature(env, ARM_FEATURE_V7)) {
1684
                goto bad_reg;
1685
            }
1686
            switch (op2) {
1687
            case 0: /* user enable */
1688
                env->cp15.c9_pmuserenr = val & 1;
1689
                /* changes access rights for cp registers, so flush tbs */
1690
                tb_flush(env);
1691
                break;
1692
            case 1: /* interrupt enable set */
1693
                /* We have no event counters so only the C bit can be changed */
1694
                val &= (1 << 31);
1695
                env->cp15.c9_pminten |= val;
1696
                break;
1697
            case 2: /* interrupt enable clear */
1698
                val &= (1 << 31);
1699
                env->cp15.c9_pminten &= ~val;
1700
                break;
1701
            }
1702
            break;
1703
        default:
1704
            goto bad_reg;
1705
        }
1706
        break;
1707
    case 10: /* MMU TLB lockdown.  */
1708
        /* ??? TLB lockdown not implemented.  */
1709
        break;
1710
    case 12: /* Reserved.  */
1711
        goto bad_reg;
1712
    case 13: /* Process ID.  */
1713
        switch (op2) {
1714
        case 0:
1715
            /* Unlike real hardware the qemu TLB uses virtual addresses,
1716
               not modified virtual addresses, so this causes a TLB flush.
1717
             */
1718
            if (env->cp15.c13_fcse != val)
1719
              tlb_flush(env, 1);
1720
            env->cp15.c13_fcse = val;
1721
            break;
1722
        case 1:
1723
            /* This changes the ASID, so do a TLB flush.  */
1724
            if (env->cp15.c13_context != val
1725
                && !arm_feature(env, ARM_FEATURE_MPU))
1726
              tlb_flush(env, 0);
1727
            env->cp15.c13_context = val;
1728
            break;
1729
        default:
1730
            goto bad_reg;
1731
        }
1732
        break;
1733
    case 14: /* Reserved.  */
1734
        goto bad_reg;
1735
    case 15: /* Implementation specific.  */
1736
        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1737
            if (op2 == 0 && crm == 1) {
1738
                if (env->cp15.c15_cpar != (val & 0x3fff)) {
1739
                    /* Changes cp0 to cp13 behavior, so needs a TB flush.  */
1740
                    tb_flush(env);
1741
                    env->cp15.c15_cpar = val & 0x3fff;
1742
                }
1743
                break;
1744
            }
1745
            goto bad_reg;
1746
        }
1747
        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1748
            switch (crm) {
1749
            case 0:
1750
                break;
1751
            case 1: /* Set TI925T configuration.  */
1752
                env->cp15.c15_ticonfig = val & 0xe7;
1753
                env->cp15.c0_cpuid = (val & (1 << 5)) ? /* OS_TYPE bit */
1754
                        ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1755
                break;
1756
            case 2: /* Set I_max.  */
1757
                env->cp15.c15_i_max = val;
1758
                break;
1759
            case 3: /* Set I_min.  */
1760
                env->cp15.c15_i_min = val;
1761
                break;
1762
            case 4: /* Set thread-ID.  */
1763
                env->cp15.c15_threadid = val & 0xffff;
1764
                break;
1765
            case 8: /* Wait-for-interrupt (deprecated).  */
1766
                cpu_interrupt(env, CPU_INTERRUPT_HALT);
1767
                break;
1768
            default:
1769
                goto bad_reg;
1770
            }
1771
        }
1772
        break;
1773
    }
1774
    return;
1775
bad_reg:
1776
    /* ??? For debugging only.  Should raise illegal instruction exception.  */
1777
    cpu_abort(env, "Unimplemented cp15 register write (c%d, c%d, {%d, %d})\n",
1778
              (insn >> 16) & 0xf, crm, op1, op2);
1779
}
1780

    
1781
uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
1782
{
1783
    int op1;
1784
    int op2;
1785
    int crm;
1786

    
1787
    op1 = (insn >> 21) & 7;
1788
    op2 = (insn >> 5) & 7;
1789
    crm = insn & 0xf;
1790
    switch ((insn >> 16) & 0xf) {
1791
    case 0: /* ID codes.  */
1792
        switch (op1) {
1793
        case 0:
1794
            switch (crm) {
1795
            case 0:
1796
                switch (op2) {
1797
                case 0: /* Device ID.  */
1798
                    return env->cp15.c0_cpuid;
1799
                case 1: /* Cache Type.  */
1800
                    return env->cp15.c0_cachetype;
1801
                case 2: /* TCM status.  */
1802
                    return 0;
1803
                case 3: /* TLB type register.  */
1804
                    return 0; /* No lockable TLB entries.  */
1805
                case 5: /* MPIDR */
1806
                    /* The MPIDR was standardised in v7; prior to
1807
                     * this it was implemented only in the 11MPCore.
1808
                     * For all other pre-v7 cores it does not exist.
1809
                     */
1810
                    if (arm_feature(env, ARM_FEATURE_V7) ||
1811
                        ARM_CPUID(env) == ARM_CPUID_ARM11MPCORE) {
1812
                        int mpidr = env->cpu_index;
1813
                        /* We don't support setting cluster ID ([8..11])
1814
                         * so these bits always RAZ.
1815
                         */
1816
                        if (arm_feature(env, ARM_FEATURE_V7MP)) {
1817
                            mpidr |= (1 << 31);
1818
                            /* Cores which are uniprocessor (non-coherent)
1819
                             * but still implement the MP extensions set
1820
                             * bit 30. (For instance, A9UP.) However we do
1821
                             * not currently model any of those cores.
1822
                             */
1823
                        }
1824
                        return mpidr;
1825
                    }
1826
                    /* otherwise fall through to the unimplemented-reg case */
1827
                default:
1828
                    goto bad_reg;
1829
                }
1830
            case 1:
1831
                if (!arm_feature(env, ARM_FEATURE_V6))
1832
                    goto bad_reg;
1833
                return env->cp15.c0_c1[op2];
1834
            case 2:
1835
                if (!arm_feature(env, ARM_FEATURE_V6))
1836
                    goto bad_reg;
1837
                return env->cp15.c0_c2[op2];
1838
            case 3: case 4: case 5: case 6: case 7:
1839
                return 0;
1840
            default:
1841
                goto bad_reg;
1842
            }
1843
        case 1:
1844
            /* These registers aren't documented on arm11 cores.  However
1845
               Linux looks at them anyway.  */
1846
            if (!arm_feature(env, ARM_FEATURE_V6))
1847
                goto bad_reg;
1848
            if (crm != 0)
1849
                goto bad_reg;
1850
            if (!arm_feature(env, ARM_FEATURE_V7))
1851
                return 0;
1852

    
1853
            switch (op2) {
1854
            case 0:
1855
                return env->cp15.c0_ccsid[env->cp15.c0_cssel];
1856
            case 1:
1857
                return env->cp15.c0_clid;
1858
            case 7:
1859
                return 0;
1860
            }
1861
            goto bad_reg;
1862
        case 2:
1863
            if (op2 != 0 || crm != 0)
1864
                goto bad_reg;
1865
            return env->cp15.c0_cssel;
1866
        default:
1867
            goto bad_reg;
1868
        }
1869
    case 1: /* System configuration.  */
1870
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1871
            op2 = 0;
1872
        switch (op2) {
1873
        case 0: /* Control register.  */
1874
            return env->cp15.c1_sys;
1875
        case 1: /* Auxiliary control register.  */
1876
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1877
                return env->cp15.c1_xscaleauxcr;
1878
            if (!arm_feature(env, ARM_FEATURE_AUXCR))
1879
                goto bad_reg;
1880
            switch (ARM_CPUID(env)) {
1881
            case ARM_CPUID_ARM1026:
1882
                return 1;
1883
            case ARM_CPUID_ARM1136:
1884
            case ARM_CPUID_ARM1136_R2:
1885
            case ARM_CPUID_ARM1176:
1886
                return 7;
1887
            case ARM_CPUID_ARM11MPCORE:
1888
                return 1;
1889
            case ARM_CPUID_CORTEXA8:
1890
                return 2;
1891
            case ARM_CPUID_CORTEXA9:
1892
                return 0;
1893
            default:
1894
                goto bad_reg;
1895
            }
1896
        case 2: /* Coprocessor access register.  */
1897
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1898
                goto bad_reg;
1899
            return env->cp15.c1_coproc;
1900
        default:
1901
            goto bad_reg;
1902
        }
1903
    case 2: /* MMU Page table control / MPU cache control.  */
1904
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1905
            switch (op2) {
1906
            case 0:
1907
                return env->cp15.c2_data;
1908
                break;
1909
            case 1:
1910
                return env->cp15.c2_insn;
1911
                break;
1912
            default:
1913
                goto bad_reg;
1914
            }
1915
        } else {
1916
            switch (op2) {
1917
            case 0:
1918
                return env->cp15.c2_base0;
1919
            case 1:
1920
                return env->cp15.c2_base1;
1921
            case 2:
1922
                return env->cp15.c2_control;
1923
            default:
1924
                goto bad_reg;
1925
            }
1926
        }
1927
    case 3: /* MMU Domain access control / MPU write buffer control.  */
1928
        return env->cp15.c3;
1929
    case 4: /* Reserved.  */
1930
        goto bad_reg;
1931
    case 5: /* MMU Fault status / MPU access permission.  */
1932
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1933
            op2 = 0;
1934
        switch (op2) {
1935
        case 0:
1936
            if (arm_feature(env, ARM_FEATURE_MPU))
1937
                return simple_mpu_ap_bits(env->cp15.c5_data);
1938
            return env->cp15.c5_data;
1939
        case 1:
1940
            if (arm_feature(env, ARM_FEATURE_MPU))
1941
                return simple_mpu_ap_bits(env->cp15.c5_data);
1942
            return env->cp15.c5_insn;
1943
        case 2:
1944
            if (!arm_feature(env, ARM_FEATURE_MPU))
1945
                goto bad_reg;
1946
            return env->cp15.c5_data;
1947
        case 3:
1948
            if (!arm_feature(env, ARM_FEATURE_MPU))
1949
                goto bad_reg;
1950
            return env->cp15.c5_insn;
1951
        default:
1952
            goto bad_reg;
1953
        }
1954
    case 6: /* MMU Fault address.  */
1955
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1956
            if (crm >= 8)
1957
                goto bad_reg;
1958
            return env->cp15.c6_region[crm];
1959
        } else {
1960
            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1961
                op2 = 0;
1962
            switch (op2) {
1963
            case 0:
1964
                return env->cp15.c6_data;
1965
            case 1:
1966
                if (arm_feature(env, ARM_FEATURE_V6)) {
1967
                    /* Watchpoint Fault Adrress.  */
1968
                    return 0; /* Not implemented.  */
1969
                } else {
1970
                    /* Instruction Fault Adrress.  */
1971
                    /* Arm9 doesn't have an IFAR, but implementing it anyway
1972
                       shouldn't do any harm.  */
1973
                    return env->cp15.c6_insn;
1974
                }
1975
            case 2:
1976
                if (arm_feature(env, ARM_FEATURE_V6)) {
1977
                    /* Instruction Fault Adrress.  */
1978
                    return env->cp15.c6_insn;
1979
                } else {
1980
                    goto bad_reg;
1981
                }
1982
            default:
1983
                goto bad_reg;
1984
            }
1985
        }
1986
    case 7: /* Cache control.  */
1987
        if (crm == 4 && op1 == 0 && op2 == 0) {
1988
            return env->cp15.c7_par;
1989
        }
1990
        /* FIXME: Should only clear Z flag if destination is r15.  */
1991
        env->ZF = 0;
1992
        return 0;
1993
    case 8: /* MMU TLB control.  */
1994
        goto bad_reg;
1995
    case 9:
1996
        switch (crm) {
1997
        case 0: /* Cache lockdown */
1998
            switch (op1) {
1999
            case 0: /* L1 cache.  */
2000
                if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
2001
                    return 0;
2002
                }
2003
                switch (op2) {
2004
                case 0:
2005
                    return env->cp15.c9_data;
2006
                case 1:
2007
                    return env->cp15.c9_insn;
2008
                default:
2009
                    goto bad_reg;
2010
                }
2011
            case 1: /* L2 cache */
2012
                if (crm != 0) {
2013
                    goto bad_reg;
2014
                }
2015
                /* L2 Lockdown and Auxiliary control.  */
2016
                return 0;
2017
            default:
2018
                goto bad_reg;
2019
            }
2020
            break;
2021
        case 12: /* Performance monitor control */
2022
            if (!arm_feature(env, ARM_FEATURE_V7)) {
2023
                goto bad_reg;
2024
            }
2025
            switch (op2) {
2026
            case 0: /* performance monitor control register */
2027
                return env->cp15.c9_pmcr;
2028
            case 1: /* count enable set */
2029
            case 2: /* count enable clear */
2030
                return env->cp15.c9_pmcnten;
2031
            case 3: /* overflow flag status */
2032
                return env->cp15.c9_pmovsr;
2033
            case 4: /* software increment */
2034
            case 5: /* event counter selection register */
2035
                return 0; /* Unimplemented, RAZ/WI */
2036
            default:
2037
                goto bad_reg;
2038
            }
2039
        case 13: /* Performance counters */
2040
            if (!arm_feature(env, ARM_FEATURE_V7)) {
2041
                goto bad_reg;
2042
            }
2043
            switch (op2) {
2044
            case 1: /* Event type select */
2045
                return env->cp15.c9_pmxevtyper;
2046
            case 0: /* Cycle count register */
2047
            case 2: /* Event count register */
2048
                /* Unimplemented, so RAZ/WI */
2049
                return 0;
2050
            default:
2051
                goto bad_reg;
2052
            }
2053
        case 14: /* Performance monitor control */
2054
            if (!arm_feature(env, ARM_FEATURE_V7)) {
2055
                goto bad_reg;
2056
            }
2057
            switch (op2) {
2058
            case 0: /* user enable */
2059
                return env->cp15.c9_pmuserenr;
2060
            case 1: /* interrupt enable set */
2061
            case 2: /* interrupt enable clear */
2062
                return env->cp15.c9_pminten;
2063
            default:
2064
                goto bad_reg;
2065
            }
2066
        default:
2067
            goto bad_reg;
2068
        }
2069
        break;
2070
    case 10: /* MMU TLB lockdown.  */
2071
        /* ??? TLB lockdown not implemented.  */
2072
        return 0;
2073
    case 11: /* TCM DMA control.  */
2074
    case 12: /* Reserved.  */
2075
        goto bad_reg;
2076
    case 13: /* Process ID.  */
2077
        switch (op2) {
2078
        case 0:
2079
            return env->cp15.c13_fcse;
2080
        case 1:
2081
            return env->cp15.c13_context;
2082
        default:
2083
            goto bad_reg;
2084
        }
2085
    case 14: /* Reserved.  */
2086
        goto bad_reg;
2087
    case 15: /* Implementation specific.  */
2088
        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
2089
            if (op2 == 0 && crm == 1)
2090
                return env->cp15.c15_cpar;
2091

    
2092
            goto bad_reg;
2093
        }
2094
        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
2095
            switch (crm) {
2096
            case 0:
2097
                return 0;
2098
            case 1: /* Read TI925T configuration.  */
2099
                return env->cp15.c15_ticonfig;
2100
            case 2: /* Read I_max.  */
2101
                return env->cp15.c15_i_max;
2102
            case 3: /* Read I_min.  */
2103
                return env->cp15.c15_i_min;
2104
            case 4: /* Read thread-ID.  */
2105
                return env->cp15.c15_threadid;
2106
            case 8: /* TI925T_status */
2107
                return 0;
2108
            }
2109
            /* TODO: Peripheral port remap register:
2110
             * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt
2111
             * controller base address at $rn & ~0xfff and map size of
2112
             * 0x200 << ($rn & 0xfff), when MMU is off.  */
2113
            goto bad_reg;
2114
        }
2115
        return 0;
2116
    }
2117
bad_reg:
2118
    /* ??? For debugging only.  Should raise illegal instruction exception.  */
2119
    cpu_abort(env, "Unimplemented cp15 register read (c%d, c%d, {%d, %d})\n",
2120
              (insn >> 16) & 0xf, crm, op1, op2);
2121
    return 0;
2122
}
2123

    
2124
void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
2125
{
2126
    if ((env->uncached_cpsr & CPSR_M) == mode) {
2127
        env->regs[13] = val;
2128
    } else {
2129
        env->banked_r13[bank_number(mode)] = val;
2130
    }
2131
}
2132

    
2133
uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
2134
{
2135
    if ((env->uncached_cpsr & CPSR_M) == mode) {
2136
        return env->regs[13];
2137
    } else {
2138
        return env->banked_r13[bank_number(mode)];
2139
    }
2140
}
2141

    
2142
uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg)
2143
{
2144
    switch (reg) {
2145
    case 0: /* APSR */
2146
        return xpsr_read(env) & 0xf8000000;
2147
    case 1: /* IAPSR */
2148
        return xpsr_read(env) & 0xf80001ff;
2149
    case 2: /* EAPSR */
2150
        return xpsr_read(env) & 0xff00fc00;
2151
    case 3: /* xPSR */
2152
        return xpsr_read(env) & 0xff00fdff;
2153
    case 5: /* IPSR */
2154
        return xpsr_read(env) & 0x000001ff;
2155
    case 6: /* EPSR */
2156
        return xpsr_read(env) & 0x0700fc00;
2157
    case 7: /* IEPSR */
2158
        return xpsr_read(env) & 0x0700edff;
2159
    case 8: /* MSP */
2160
        return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
2161
    case 9: /* PSP */
2162
        return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
2163
    case 16: /* PRIMASK */
2164
        return (env->uncached_cpsr & CPSR_I) != 0;
2165
    case 17: /* BASEPRI */
2166
    case 18: /* BASEPRI_MAX */
2167
        return env->v7m.basepri;
2168
    case 19: /* FAULTMASK */
2169
        return (env->uncached_cpsr & CPSR_F) != 0;
2170
    case 20: /* CONTROL */
2171
        return env->v7m.control;
2172
    default:
2173
        /* ??? For debugging only.  */
2174
        cpu_abort(env, "Unimplemented system register read (%d)\n", reg);
2175
        return 0;
2176
    }
2177
}
2178

    
2179
void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val)
2180
{
2181
    switch (reg) {
2182
    case 0: /* APSR */
2183
        xpsr_write(env, val, 0xf8000000);
2184
        break;
2185
    case 1: /* IAPSR */
2186
        xpsr_write(env, val, 0xf8000000);
2187
        break;
2188
    case 2: /* EAPSR */
2189
        xpsr_write(env, val, 0xfe00fc00);
2190
        break;
2191
    case 3: /* xPSR */
2192
        xpsr_write(env, val, 0xfe00fc00);
2193
        break;
2194
    case 5: /* IPSR */
2195
        /* IPSR bits are readonly.  */
2196
        break;
2197
    case 6: /* EPSR */
2198
        xpsr_write(env, val, 0x0600fc00);
2199
        break;
2200
    case 7: /* IEPSR */
2201
        xpsr_write(env, val, 0x0600fc00);
2202
        break;
2203
    case 8: /* MSP */
2204
        if (env->v7m.current_sp)
2205
            env->v7m.other_sp = val;
2206
        else
2207
            env->regs[13] = val;
2208
        break;
2209
    case 9: /* PSP */
2210
        if (env->v7m.current_sp)
2211
            env->regs[13] = val;
2212
        else
2213
            env->v7m.other_sp = val;
2214
        break;
2215
    case 16: /* PRIMASK */
2216
        if (val & 1)
2217
            env->uncached_cpsr |= CPSR_I;
2218
        else
2219
            env->uncached_cpsr &= ~CPSR_I;
2220
        break;
2221
    case 17: /* BASEPRI */
2222
        env->v7m.basepri = val & 0xff;
2223
        break;
2224
    case 18: /* BASEPRI_MAX */
2225
        val &= 0xff;
2226
        if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
2227
            env->v7m.basepri = val;
2228
        break;
2229
    case 19: /* FAULTMASK */
2230
        if (val & 1)
2231
            env->uncached_cpsr |= CPSR_F;
2232
        else
2233
            env->uncached_cpsr &= ~CPSR_F;
2234
        break;
2235
    case 20: /* CONTROL */
2236
        env->v7m.control = val & 3;
2237
        switch_v7m_sp(env, (val & 2) != 0);
2238
        break;
2239
    default:
2240
        /* ??? For debugging only.  */
2241
        cpu_abort(env, "Unimplemented system register write (%d)\n", reg);
2242
        return;
2243
    }
2244
}
2245

    
2246
void cpu_arm_set_cp_io(CPUARMState *env, int cpnum,
2247
                ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write,
2248
                void *opaque)
2249
{
2250
    if (cpnum < 0 || cpnum > 14) {
2251
        cpu_abort(env, "Bad coprocessor number: %i\n", cpnum);
2252
        return;
2253
    }
2254

    
2255
    env->cp[cpnum].cp_read = cp_read;
2256
    env->cp[cpnum].cp_write = cp_write;
2257
    env->cp[cpnum].opaque = opaque;
2258
}
2259

    
2260
#endif
2261

    
2262
/* Note that signed overflow is undefined in C.  The following routines are
2263
   careful to use unsigned types where modulo arithmetic is required.
2264
   Failure to do so _will_ break on newer gcc.  */
2265

    
2266
/* Signed saturating arithmetic.  */
2267

    
2268
/* Perform 16-bit signed saturating addition.  */
2269
static inline uint16_t add16_sat(uint16_t a, uint16_t b)
2270
{
2271
    uint16_t res;
2272

    
2273
    res = a + b;
2274
    if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
2275
        if (a & 0x8000)
2276
            res = 0x8000;
2277
        else
2278
            res = 0x7fff;
2279
    }
2280
    return res;
2281
}
2282

    
2283
/* Perform 8-bit signed saturating addition.  */
2284
static inline uint8_t add8_sat(uint8_t a, uint8_t b)
2285
{
2286
    uint8_t res;
2287

    
2288
    res = a + b;
2289
    if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
2290
        if (a & 0x80)
2291
            res = 0x80;
2292
        else
2293
            res = 0x7f;
2294
    }
2295
    return res;
2296
}
2297

    
2298
/* Perform 16-bit signed saturating subtraction.  */
2299
static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
2300
{
2301
    uint16_t res;
2302

    
2303
    res = a - b;
2304
    if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
2305
        if (a & 0x8000)
2306
            res = 0x8000;
2307
        else
2308
            res = 0x7fff;
2309
    }
2310
    return res;
2311
}
2312

    
2313
/* Perform 8-bit signed saturating subtraction.  */
2314
static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
2315
{
2316
    uint8_t res;
2317

    
2318
    res = a - b;
2319
    if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
2320
        if (a & 0x80)
2321
            res = 0x80;
2322
        else
2323
            res = 0x7f;
2324
    }
2325
    return res;
2326
}
2327

    
2328
#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
2329
#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
2330
#define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
2331
#define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
2332
#define PFX q
2333

    
2334
#include "op_addsub.h"
2335

    
2336
/* Unsigned saturating arithmetic.  */
2337
static inline uint16_t add16_usat(uint16_t a, uint16_t b)
2338
{
2339
    uint16_t res;
2340
    res = a + b;
2341
    if (res < a)
2342
        res = 0xffff;
2343
    return res;
2344
}
2345

    
2346
static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
2347
{
2348
    if (a > b)
2349
        return a - b;
2350
    else
2351
        return 0;
2352
}
2353

    
2354
static inline uint8_t add8_usat(uint8_t a, uint8_t b)
2355
{
2356
    uint8_t res;
2357
    res = a + b;
2358
    if (res < a)
2359
        res = 0xff;
2360
    return res;
2361
}
2362

    
2363
static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
2364
{
2365
    if (a > b)
2366
        return a - b;
2367
    else
2368
        return 0;
2369
}
2370

    
2371
#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
2372
#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
2373
#define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
2374
#define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
2375
#define PFX uq
2376

    
2377
#include "op_addsub.h"
2378

    
2379
/* Signed modulo arithmetic.  */
2380
#define SARITH16(a, b, n, op) do { \
2381
    int32_t sum; \
2382
    sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
2383
    RESULT(sum, n, 16); \
2384
    if (sum >= 0) \
2385
        ge |= 3 << (n * 2); \
2386
    } while(0)
2387

    
2388
#define SARITH8(a, b, n, op) do { \
2389
    int32_t sum; \
2390
    sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
2391
    RESULT(sum, n, 8); \
2392
    if (sum >= 0) \
2393
        ge |= 1 << n; \
2394
    } while(0)
2395

    
2396

    
2397
#define ADD16(a, b, n) SARITH16(a, b, n, +)
2398
#define SUB16(a, b, n) SARITH16(a, b, n, -)
2399
#define ADD8(a, b, n)  SARITH8(a, b, n, +)
2400
#define SUB8(a, b, n)  SARITH8(a, b, n, -)
2401
#define PFX s
2402
#define ARITH_GE
2403

    
2404
#include "op_addsub.h"
2405

    
2406
/* Unsigned modulo arithmetic.  */
2407
#define ADD16(a, b, n) do { \
2408
    uint32_t sum; \
2409
    sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
2410
    RESULT(sum, n, 16); \
2411
    if ((sum >> 16) == 1) \
2412
        ge |= 3 << (n * 2); \
2413
    } while(0)
2414

    
2415
#define ADD8(a, b, n) do { \
2416
    uint32_t sum; \
2417
    sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
2418
    RESULT(sum, n, 8); \
2419
    if ((sum >> 8) == 1) \
2420
        ge |= 1 << n; \
2421
    } while(0)
2422

    
2423
#define SUB16(a, b, n) do { \
2424
    uint32_t sum; \
2425
    sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
2426
    RESULT(sum, n, 16); \
2427
    if ((sum >> 16) == 0) \
2428
        ge |= 3 << (n * 2); \
2429
    } while(0)
2430

    
2431
#define SUB8(a, b, n) do { \
2432
    uint32_t sum; \
2433
    sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
2434
    RESULT(sum, n, 8); \
2435
    if ((sum >> 8) == 0) \
2436
        ge |= 1 << n; \
2437
    } while(0)
2438

    
2439
#define PFX u
2440
#define ARITH_GE
2441

    
2442
#include "op_addsub.h"
2443

    
2444
/* Halved signed arithmetic.  */
2445
#define ADD16(a, b, n) \
2446
  RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
2447
#define SUB16(a, b, n) \
2448
  RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
2449
#define ADD8(a, b, n) \
2450
  RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
2451
#define SUB8(a, b, n) \
2452
  RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
2453
#define PFX sh
2454

    
2455
#include "op_addsub.h"
2456

    
2457
/* Halved unsigned arithmetic.  */
2458
#define ADD16(a, b, n) \
2459
  RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2460
#define SUB16(a, b, n) \
2461
  RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2462
#define ADD8(a, b, n) \
2463
  RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2464
#define SUB8(a, b, n) \
2465
  RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2466
#define PFX uh
2467

    
2468
#include "op_addsub.h"
2469

    
2470
static inline uint8_t do_usad(uint8_t a, uint8_t b)
2471
{
2472
    if (a > b)
2473
        return a - b;
2474
    else
2475
        return b - a;
2476
}
2477

    
2478
/* Unsigned sum of absolute byte differences.  */
2479
uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
2480
{
2481
    uint32_t sum;
2482
    sum = do_usad(a, b);
2483
    sum += do_usad(a >> 8, b >> 8);
2484
    sum += do_usad(a >> 16, b >>16);
2485
    sum += do_usad(a >> 24, b >> 24);
2486
    return sum;
2487
}
2488

    
2489
/* For ARMv6 SEL instruction.  */
2490
uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
2491
{
2492
    uint32_t mask;
2493

    
2494
    mask = 0;
2495
    if (flags & 1)
2496
        mask |= 0xff;
2497
    if (flags & 2)
2498
        mask |= 0xff00;
2499
    if (flags & 4)
2500
        mask |= 0xff0000;
2501
    if (flags & 8)
2502
        mask |= 0xff000000;
2503
    return (a & mask) | (b & ~mask);
2504
}
2505

    
2506
uint32_t HELPER(logicq_cc)(uint64_t val)
2507
{
2508
    return (val >> 32) | (val != 0);
2509
}
2510

    
2511
/* VFP support.  We follow the convention used for VFP instrunctions:
2512
   Single precition routines have a "s" suffix, double precision a
2513
   "d" suffix.  */
2514

    
2515
/* Convert host exception flags to vfp form.  */
2516
static inline int vfp_exceptbits_from_host(int host_bits)
2517
{
2518
    int target_bits = 0;
2519

    
2520
    if (host_bits & float_flag_invalid)
2521
        target_bits |= 1;
2522
    if (host_bits & float_flag_divbyzero)
2523
        target_bits |= 2;
2524
    if (host_bits & float_flag_overflow)
2525
        target_bits |= 4;
2526
    if (host_bits & (float_flag_underflow | float_flag_output_denormal))
2527
        target_bits |= 8;
2528
    if (host_bits & float_flag_inexact)
2529
        target_bits |= 0x10;
2530
    if (host_bits & float_flag_input_denormal)
2531
        target_bits |= 0x80;
2532
    return target_bits;
2533
}
2534

    
2535
uint32_t HELPER(vfp_get_fpscr)(CPUState *env)
2536
{
2537
    int i;
2538
    uint32_t fpscr;
2539

    
2540
    fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
2541
            | (env->vfp.vec_len << 16)
2542
            | (env->vfp.vec_stride << 20);
2543
    i = get_float_exception_flags(&env->vfp.fp_status);
2544
    i |= get_float_exception_flags(&env->vfp.standard_fp_status);
2545
    fpscr |= vfp_exceptbits_from_host(i);
2546
    return fpscr;
2547
}
2548

    
2549
uint32_t vfp_get_fpscr(CPUState *env)
2550
{
2551
    return HELPER(vfp_get_fpscr)(env);
2552
}
2553

    
2554
/* Convert vfp exception flags to target form.  */
2555
static inline int vfp_exceptbits_to_host(int target_bits)
2556
{
2557
    int host_bits = 0;
2558

    
2559
    if (target_bits & 1)
2560
        host_bits |= float_flag_invalid;
2561
    if (target_bits & 2)
2562
        host_bits |= float_flag_divbyzero;
2563
    if (target_bits & 4)
2564
        host_bits |= float_flag_overflow;
2565
    if (target_bits & 8)
2566
        host_bits |= float_flag_underflow;
2567
    if (target_bits & 0x10)
2568
        host_bits |= float_flag_inexact;
2569
    if (target_bits & 0x80)
2570
        host_bits |= float_flag_input_denormal;
2571
    return host_bits;
2572
}
2573

    
2574
void HELPER(vfp_set_fpscr)(CPUState *env, uint32_t val)
2575
{
2576
    int i;
2577
    uint32_t changed;
2578

    
2579
    changed = env->vfp.xregs[ARM_VFP_FPSCR];
2580
    env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
2581
    env->vfp.vec_len = (val >> 16) & 7;
2582
    env->vfp.vec_stride = (val >> 20) & 3;
2583

    
2584
    changed ^= val;
2585
    if (changed & (3 << 22)) {
2586
        i = (val >> 22) & 3;
2587
        switch (i) {
2588
        case 0:
2589
            i = float_round_nearest_even;
2590
            break;
2591
        case 1:
2592
            i = float_round_up;
2593
            break;
2594
        case 2:
2595
            i = float_round_down;
2596
            break;
2597
        case 3:
2598
            i = float_round_to_zero;
2599
            break;
2600
        }
2601
        set_float_rounding_mode(i, &env->vfp.fp_status);
2602
    }
2603
    if (changed & (1 << 24)) {
2604
        set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
2605
        set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
2606
    }
2607
    if (changed & (1 << 25))
2608
        set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
2609

    
2610
    i = vfp_exceptbits_to_host(val);
2611
    set_float_exception_flags(i, &env->vfp.fp_status);
2612
    set_float_exception_flags(0, &env->vfp.standard_fp_status);
2613
}
2614

    
2615
void vfp_set_fpscr(CPUState *env, uint32_t val)
2616
{
2617
    HELPER(vfp_set_fpscr)(env, val);
2618
}
2619

    
2620
#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
2621

    
2622
#define VFP_BINOP(name) \
2623
float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
2624
{ \
2625
    float_status *fpst = fpstp; \
2626
    return float32_ ## name(a, b, fpst); \
2627
} \
2628
float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
2629
{ \
2630
    float_status *fpst = fpstp; \
2631
    return float64_ ## name(a, b, fpst); \
2632
}
2633
VFP_BINOP(add)
2634
VFP_BINOP(sub)
2635
VFP_BINOP(mul)
2636
VFP_BINOP(div)
2637
#undef VFP_BINOP
2638

    
2639
float32 VFP_HELPER(neg, s)(float32 a)
2640
{
2641
    return float32_chs(a);
2642
}
2643

    
2644
float64 VFP_HELPER(neg, d)(float64 a)
2645
{
2646
    return float64_chs(a);
2647
}
2648

    
2649
float32 VFP_HELPER(abs, s)(float32 a)
2650
{
2651
    return float32_abs(a);
2652
}
2653

    
2654
float64 VFP_HELPER(abs, d)(float64 a)
2655
{
2656
    return float64_abs(a);
2657
}
2658

    
2659
float32 VFP_HELPER(sqrt, s)(float32 a, CPUState *env)
2660
{
2661
    return float32_sqrt(a, &env->vfp.fp_status);
2662
}
2663

    
2664
float64 VFP_HELPER(sqrt, d)(float64 a, CPUState *env)
2665
{
2666
    return float64_sqrt(a, &env->vfp.fp_status);
2667
}
2668

    
2669
/* XXX: check quiet/signaling case */
2670
#define DO_VFP_cmp(p, type) \
2671
void VFP_HELPER(cmp, p)(type a, type b, CPUState *env)  \
2672
{ \
2673
    uint32_t flags; \
2674
    switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
2675
    case 0: flags = 0x6; break; \
2676
    case -1: flags = 0x8; break; \
2677
    case 1: flags = 0x2; break; \
2678
    default: case 2: flags = 0x3; break; \
2679
    } \
2680
    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2681
        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2682
} \
2683
void VFP_HELPER(cmpe, p)(type a, type b, CPUState *env) \
2684
{ \
2685
    uint32_t flags; \
2686
    switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
2687
    case 0: flags = 0x6; break; \
2688
    case -1: flags = 0x8; break; \
2689
    case 1: flags = 0x2; break; \
2690
    default: case 2: flags = 0x3; break; \
2691
    } \
2692
    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2693
        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2694
}
2695
DO_VFP_cmp(s, float32)
2696
DO_VFP_cmp(d, float64)
2697
#undef DO_VFP_cmp
2698

    
2699
/* Integer to float and float to integer conversions */
2700

    
2701
#define CONV_ITOF(name, fsz, sign) \
2702
    float##fsz HELPER(name)(uint32_t x, void *fpstp) \
2703
{ \
2704
    float_status *fpst = fpstp; \
2705
    return sign##int32_to_##float##fsz(x, fpst); \
2706
}
2707

    
2708
#define CONV_FTOI(name, fsz, sign, round) \
2709
uint32_t HELPER(name)(float##fsz x, void *fpstp) \
2710
{ \
2711
    float_status *fpst = fpstp; \
2712
    if (float##fsz##_is_any_nan(x)) { \
2713
        float_raise(float_flag_invalid, fpst); \
2714
        return 0; \
2715
    } \
2716
    return float##fsz##_to_##sign##int32##round(x, fpst); \
2717
}
2718

    
2719
#define FLOAT_CONVS(name, p, fsz, sign) \
2720
CONV_ITOF(vfp_##name##to##p, fsz, sign) \
2721
CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
2722
CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
2723

    
2724
FLOAT_CONVS(si, s, 32, )
2725
FLOAT_CONVS(si, d, 64, )
2726
FLOAT_CONVS(ui, s, 32, u)
2727
FLOAT_CONVS(ui, d, 64, u)
2728

    
2729
#undef CONV_ITOF
2730
#undef CONV_FTOI
2731
#undef FLOAT_CONVS
2732

    
2733
/* floating point conversion */
2734
float64 VFP_HELPER(fcvtd, s)(float32 x, CPUState *env)
2735
{
2736
    float64 r = float32_to_float64(x, &env->vfp.fp_status);
2737
    /* ARM requires that S<->D conversion of any kind of NaN generates
2738
     * a quiet NaN by forcing the most significant frac bit to 1.
2739
     */
2740
    return float64_maybe_silence_nan(r);
2741
}
2742

    
2743
float32 VFP_HELPER(fcvts, d)(float64 x, CPUState *env)
2744
{
2745
    float32 r =  float64_to_float32(x, &env->vfp.fp_status);
2746
    /* ARM requires that S<->D conversion of any kind of NaN generates
2747
     * a quiet NaN by forcing the most significant frac bit to 1.
2748
     */
2749
    return float32_maybe_silence_nan(r);
2750
}
2751

    
2752
/* VFP3 fixed point conversion.  */
2753
#define VFP_CONV_FIX(name, p, fsz, itype, sign) \
2754
float##fsz HELPER(vfp_##name##to##p)(uint##fsz##_t  x, uint32_t shift, \
2755
                                    void *fpstp) \
2756
{ \
2757
    float_status *fpst = fpstp; \
2758
    float##fsz tmp; \
2759
    tmp = sign##int32_to_##float##fsz((itype##_t)x, fpst); \
2760
    return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
2761
} \
2762
uint##fsz##_t HELPER(vfp_to##name##p)(float##fsz x, uint32_t shift, \
2763
                                       void *fpstp) \
2764
{ \
2765
    float_status *fpst = fpstp; \
2766
    float##fsz tmp; \
2767
    if (float##fsz##_is_any_nan(x)) { \
2768
        float_raise(float_flag_invalid, fpst); \
2769
        return 0; \
2770
    } \
2771
    tmp = float##fsz##_scalbn(x, shift, fpst); \
2772
    return float##fsz##_to_##itype##_round_to_zero(tmp, fpst); \
2773
}
2774

    
2775
VFP_CONV_FIX(sh, d, 64, int16, )
2776
VFP_CONV_FIX(sl, d, 64, int32, )
2777
VFP_CONV_FIX(uh, d, 64, uint16, u)
2778
VFP_CONV_FIX(ul, d, 64, uint32, u)
2779
VFP_CONV_FIX(sh, s, 32, int16, )
2780
VFP_CONV_FIX(sl, s, 32, int32, )
2781
VFP_CONV_FIX(uh, s, 32, uint16, u)
2782
VFP_CONV_FIX(ul, s, 32, uint32, u)
2783
#undef VFP_CONV_FIX
2784

    
2785
/* Half precision conversions.  */
2786
static float32 do_fcvt_f16_to_f32(uint32_t a, CPUState *env, float_status *s)
2787
{
2788
    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
2789
    float32 r = float16_to_float32(make_float16(a), ieee, s);
2790
    if (ieee) {
2791
        return float32_maybe_silence_nan(r);
2792
    }
2793
    return r;
2794
}
2795

    
2796
static uint32_t do_fcvt_f32_to_f16(float32 a, CPUState *env, float_status *s)
2797
{
2798
    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
2799
    float16 r = float32_to_float16(a, ieee, s);
2800
    if (ieee) {
2801
        r = float16_maybe_silence_nan(r);
2802
    }
2803
    return float16_val(r);
2804
}
2805

    
2806
float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUState *env)
2807
{
2808
    return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status);
2809
}
2810

    
2811
uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUState *env)
2812
{
2813
    return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status);
2814
}
2815

    
2816
float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUState *env)
2817
{
2818
    return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status);
2819
}
2820

    
2821
uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUState *env)
2822
{
2823
    return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
2824
}
2825

    
2826
#define float32_two make_float32(0x40000000)
2827
#define float32_three make_float32(0x40400000)
2828
#define float32_one_point_five make_float32(0x3fc00000)
2829

    
2830
float32 HELPER(recps_f32)(float32 a, float32 b, CPUState *env)
2831
{
2832
    float_status *s = &env->vfp.standard_fp_status;
2833
    if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
2834
        (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
2835
        if (!(float32_is_zero(a) || float32_is_zero(b))) {
2836
            float_raise(float_flag_input_denormal, s);
2837
        }
2838
        return float32_two;
2839
    }
2840
    return float32_sub(float32_two, float32_mul(a, b, s), s);
2841
}
2842

    
2843
float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUState *env)
2844
{
2845
    float_status *s = &env->vfp.standard_fp_status;
2846
    float32 product;
2847
    if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
2848
        (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
2849
        if (!(float32_is_zero(a) || float32_is_zero(b))) {
2850
            float_raise(float_flag_input_denormal, s);
2851
        }
2852
        return float32_one_point_five;
2853
    }
2854
    product = float32_mul(a, b, s);
2855
    return float32_div(float32_sub(float32_three, product, s), float32_two, s);
2856
}
2857

    
2858
/* NEON helpers.  */
2859

    
2860
/* Constants 256 and 512 are used in some helpers; we avoid relying on
2861
 * int->float conversions at run-time.  */
2862
#define float64_256 make_float64(0x4070000000000000LL)
2863
#define float64_512 make_float64(0x4080000000000000LL)
2864

    
2865
/* The algorithm that must be used to calculate the estimate
2866
 * is specified by the ARM ARM.
2867
 */
2868
static float64 recip_estimate(float64 a, CPUState *env)
2869
{
2870
    /* These calculations mustn't set any fp exception flags,
2871
     * so we use a local copy of the fp_status.
2872
     */
2873
    float_status dummy_status = env->vfp.standard_fp_status;
2874
    float_status *s = &dummy_status;
2875
    /* q = (int)(a * 512.0) */
2876
    float64 q = float64_mul(float64_512, a, s);
2877
    int64_t q_int = float64_to_int64_round_to_zero(q, s);
2878

    
2879
    /* r = 1.0 / (((double)q + 0.5) / 512.0) */
2880
    q = int64_to_float64(q_int, s);
2881
    q = float64_add(q, float64_half, s);
2882
    q = float64_div(q, float64_512, s);
2883
    q = float64_div(float64_one, q, s);
2884

    
2885
    /* s = (int)(256.0 * r + 0.5) */
2886
    q = float64_mul(q, float64_256, s);
2887
    q = float64_add(q, float64_half, s);
2888
    q_int = float64_to_int64_round_to_zero(q, s);
2889

    
2890
    /* return (double)s / 256.0 */
2891
    return float64_div(int64_to_float64(q_int, s), float64_256, s);
2892
}
2893

    
2894
float32 HELPER(recpe_f32)(float32 a, CPUState *env)
2895
{
2896
    float_status *s = &env->vfp.standard_fp_status;
2897
    float64 f64;
2898
    uint32_t val32 = float32_val(a);
2899

    
2900
    int result_exp;
2901
    int a_exp = (val32  & 0x7f800000) >> 23;
2902
    int sign = val32 & 0x80000000;
2903

    
2904
    if (float32_is_any_nan(a)) {
2905
        if (float32_is_signaling_nan(a)) {
2906
            float_raise(float_flag_invalid, s);
2907
        }
2908
        return float32_default_nan;
2909
    } else if (float32_is_infinity(a)) {
2910
        return float32_set_sign(float32_zero, float32_is_neg(a));
2911
    } else if (float32_is_zero_or_denormal(a)) {
2912
        if (!float32_is_zero(a)) {
2913
            float_raise(float_flag_input_denormal, s);
2914
        }
2915
        float_raise(float_flag_divbyzero, s);
2916
        return float32_set_sign(float32_infinity, float32_is_neg(a));
2917
    } else if (a_exp >= 253) {
2918
        float_raise(float_flag_underflow, s);
2919
        return float32_set_sign(float32_zero, float32_is_neg(a));
2920
    }
2921

    
2922
    f64 = make_float64((0x3feULL << 52)
2923
                       | ((int64_t)(val32 & 0x7fffff) << 29));
2924

    
2925
    result_exp = 253 - a_exp;
2926

    
2927
    f64 = recip_estimate(f64, env);
2928

    
2929
    val32 = sign
2930
        | ((result_exp & 0xff) << 23)
2931
        | ((float64_val(f64) >> 29) & 0x7fffff);
2932
    return make_float32(val32);
2933
}
2934

    
2935
/* The algorithm that must be used to calculate the estimate
2936
 * is specified by the ARM ARM.
2937
 */
2938
static float64 recip_sqrt_estimate(float64 a, CPUState *env)
2939
{
2940
    /* These calculations mustn't set any fp exception flags,
2941
     * so we use a local copy of the fp_status.
2942
     */
2943
    float_status dummy_status = env->vfp.standard_fp_status;
2944
    float_status *s = &dummy_status;
2945
    float64 q;
2946
    int64_t q_int;
2947

    
2948
    if (float64_lt(a, float64_half, s)) {
2949
        /* range 0.25 <= a < 0.5 */
2950

    
2951
        /* a in units of 1/512 rounded down */
2952
        /* q0 = (int)(a * 512.0);  */
2953
        q = float64_mul(float64_512, a, s);
2954
        q_int = float64_to_int64_round_to_zero(q, s);
2955

    
2956
        /* reciprocal root r */
2957
        /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0);  */
2958
        q = int64_to_float64(q_int, s);
2959
        q = float64_add(q, float64_half, s);
2960
        q = float64_div(q, float64_512, s);
2961
        q = float64_sqrt(q, s);
2962
        q = float64_div(float64_one, q, s);
2963
    } else {
2964
        /* range 0.5 <= a < 1.0 */
2965

    
2966
        /* a in units of 1/256 rounded down */
2967
        /* q1 = (int)(a * 256.0); */
2968
        q = float64_mul(float64_256, a, s);
2969
        int64_t q_int = float64_to_int64_round_to_zero(q, s);
2970

    
2971
        /* reciprocal root r */
2972
        /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
2973
        q = int64_to_float64(q_int, s);
2974
        q = float64_add(q, float64_half, s);
2975
        q = float64_div(q, float64_256, s);
2976
        q = float64_sqrt(q, s);
2977
        q = float64_div(float64_one, q, s);
2978
    }
2979
    /* r in units of 1/256 rounded to nearest */
2980
    /* s = (int)(256.0 * r + 0.5); */
2981

    
2982
    q = float64_mul(q, float64_256,s );
2983
    q = float64_add(q, float64_half, s);
2984
    q_int = float64_to_int64_round_to_zero(q, s);
2985

    
2986
    /* return (double)s / 256.0;*/
2987
    return float64_div(int64_to_float64(q_int, s), float64_256, s);
2988
}
2989

    
2990
float32 HELPER(rsqrte_f32)(float32 a, CPUState *env)
2991
{
2992
    float_status *s = &env->vfp.standard_fp_status;
2993
    int result_exp;
2994
    float64 f64;
2995
    uint32_t val;
2996
    uint64_t val64;
2997

    
2998
    val = float32_val(a);
2999

    
3000
    if (float32_is_any_nan(a)) {
3001
        if (float32_is_signaling_nan(a)) {
3002
            float_raise(float_flag_invalid, s);
3003
        }
3004
        return float32_default_nan;
3005
    } else if (float32_is_zero_or_denormal(a)) {
3006
        if (!float32_is_zero(a)) {
3007
            float_raise(float_flag_input_denormal, s);
3008
        }
3009
        float_raise(float_flag_divbyzero, s);
3010
        return float32_set_sign(float32_infinity, float32_is_neg(a));
3011
    } else if (float32_is_neg(a)) {
3012
        float_raise(float_flag_invalid, s);
3013
        return float32_default_nan;
3014
    } else if (float32_is_infinity(a)) {
3015
        return float32_zero;
3016
    }
3017

    
3018
    /* Normalize to a double-precision value between 0.25 and 1.0,
3019
     * preserving the parity of the exponent.  */
3020
    if ((val & 0x800000) == 0) {
3021
        f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
3022
                           | (0x3feULL << 52)
3023
                           | ((uint64_t)(val & 0x7fffff) << 29));
3024
    } else {
3025
        f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
3026
                           | (0x3fdULL << 52)
3027
                           | ((uint64_t)(val & 0x7fffff) << 29));
3028
    }
3029

    
3030
    result_exp = (380 - ((val & 0x7f800000) >> 23)) / 2;
3031

    
3032
    f64 = recip_sqrt_estimate(f64, env);
3033

    
3034
    val64 = float64_val(f64);
3035

    
3036
    val = ((result_exp & 0xff) << 23)
3037
        | ((val64 >> 29)  & 0x7fffff);
3038
    return make_float32(val);
3039
}
3040

    
3041
uint32_t HELPER(recpe_u32)(uint32_t a, CPUState *env)
3042
{
3043
    float64 f64;
3044

    
3045
    if ((a & 0x80000000) == 0) {
3046
        return 0xffffffff;
3047
    }
3048

    
3049
    f64 = make_float64((0x3feULL << 52)
3050
                       | ((int64_t)(a & 0x7fffffff) << 21));
3051

    
3052
    f64 = recip_estimate (f64, env);
3053

    
3054
    return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
3055
}
3056

    
3057
uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUState *env)
3058
{
3059
    float64 f64;
3060

    
3061
    if ((a & 0xc0000000) == 0) {
3062
        return 0xffffffff;
3063
    }
3064

    
3065
    if (a & 0x80000000) {
3066
        f64 = make_float64((0x3feULL << 52)
3067
                           | ((uint64_t)(a & 0x7fffffff) << 21));
3068
    } else { /* bits 31-30 == '01' */
3069
        f64 = make_float64((0x3fdULL << 52)
3070
                           | ((uint64_t)(a & 0x3fffffff) << 22));
3071
    }
3072

    
3073
    f64 = recip_sqrt_estimate(f64, env);
3074

    
3075
    return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
3076
}
3077

    
3078
/* VFPv4 fused multiply-accumulate */
3079
float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp)
3080
{
3081
    float_status *fpst = fpstp;
3082
    return float32_muladd(a, b, c, 0, fpst);
3083
}
3084

    
3085
float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
3086
{
3087
    float_status *fpst = fpstp;
3088
    return float64_muladd(a, b, c, 0, fpst);
3089
}
3090

    
3091
void HELPER(set_teecr)(CPUState *env, uint32_t val)
3092
{
3093
    val &= 1;
3094
    if (env->teecr != val) {
3095
        env->teecr = val;
3096
        tb_flush(env);
3097
    }
3098
}