Statistics
| Branch: | Revision:

root / target-arm / helper.c @ 906879a9

History | View | Annotate | Download (90.8 kB)

1
#include <stdio.h>
2
#include <stdlib.h>
3
#include <string.h>
4

    
5
#include "cpu.h"
6
#include "gdbstub.h"
7
#include "helper.h"
8
#include "qemu-common.h"
9
#include "host-utils.h"
10
#if !defined(CONFIG_USER_ONLY)
11
#include "hw/loader.h"
12
#endif
13

    
14
static uint32_t cortexa9_cp15_c0_c1[8] =
15
{ 0x1031, 0x11, 0x000, 0, 0x00100103, 0x20000000, 0x01230000, 0x00002111 };
16

    
17
static uint32_t cortexa9_cp15_c0_c2[8] =
18
{ 0x00101111, 0x13112111, 0x21232041, 0x11112131, 0x00111142, 0, 0, 0 };
19

    
20
static uint32_t cortexa8_cp15_c0_c1[8] =
21
{ 0x1031, 0x11, 0x400, 0, 0x31100003, 0x20000000, 0x01202000, 0x11 };
22

    
23
static uint32_t cortexa8_cp15_c0_c2[8] =
24
{ 0x00101111, 0x12112111, 0x21232031, 0x11112131, 0x00111142, 0, 0, 0 };
25

    
26
static uint32_t mpcore_cp15_c0_c1[8] =
27
{ 0x111, 0x1, 0, 0x2, 0x01100103, 0x10020302, 0x01222000, 0 };
28

    
29
static uint32_t mpcore_cp15_c0_c2[8] =
30
{ 0x00100011, 0x12002111, 0x11221011, 0x01102131, 0x141, 0, 0, 0 };
31

    
32
static uint32_t arm1136_cp15_c0_c1[8] =
33
{ 0x111, 0x1, 0x2, 0x3, 0x01130003, 0x10030302, 0x01222110, 0 };
34

    
35
static uint32_t arm1136_cp15_c0_c2[8] =
36
{ 0x00140011, 0x12002111, 0x11231111, 0x01102131, 0x141, 0, 0, 0 };
37

    
38
static uint32_t arm1176_cp15_c0_c1[8] =
39
{ 0x111, 0x11, 0x33, 0, 0x01130003, 0x10030302, 0x01222100, 0 };
40

    
41
static uint32_t arm1176_cp15_c0_c2[8] =
42
{ 0x0140011, 0x12002111, 0x11231121, 0x01102131, 0x01141, 0, 0, 0 };
43

    
44
static uint32_t cpu_arm_find_by_name(const char *name);
45

    
46
static inline void set_feature(CPUARMState *env, int feature)
47
{
48
    env->features |= 1u << feature;
49
}
50

    
51
static void cpu_reset_model_id(CPUARMState *env, uint32_t id)
52
{
53
    env->cp15.c0_cpuid = id;
54
    switch (id) {
55
    case ARM_CPUID_ARM926:
56
        set_feature(env, ARM_FEATURE_V4T);
57
        set_feature(env, ARM_FEATURE_V5);
58
        set_feature(env, ARM_FEATURE_VFP);
59
        env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090;
60
        env->cp15.c0_cachetype = 0x1dd20d2;
61
        env->cp15.c1_sys = 0x00090078;
62
        break;
63
    case ARM_CPUID_ARM946:
64
        set_feature(env, ARM_FEATURE_V4T);
65
        set_feature(env, ARM_FEATURE_V5);
66
        set_feature(env, ARM_FEATURE_MPU);
67
        env->cp15.c0_cachetype = 0x0f004006;
68
        env->cp15.c1_sys = 0x00000078;
69
        break;
70
    case ARM_CPUID_ARM1026:
71
        set_feature(env, ARM_FEATURE_V4T);
72
        set_feature(env, ARM_FEATURE_V5);
73
        set_feature(env, ARM_FEATURE_VFP);
74
        set_feature(env, ARM_FEATURE_AUXCR);
75
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0;
76
        env->cp15.c0_cachetype = 0x1dd20d2;
77
        env->cp15.c1_sys = 0x00090078;
78
        break;
79
    case ARM_CPUID_ARM1136:
80
        /* This is the 1136 r1, which is a v6K core */
81
        set_feature(env, ARM_FEATURE_V6K);
82
        /* Fall through */
83
    case ARM_CPUID_ARM1136_R2:
84
        /* What qemu calls "arm1136_r2" is actually the 1136 r0p2, ie an
85
         * older core than plain "arm1136". In particular this does not
86
         * have the v6K features.
87
         */
88
        set_feature(env, ARM_FEATURE_V4T);
89
        set_feature(env, ARM_FEATURE_V5);
90
        set_feature(env, ARM_FEATURE_V6);
91
        set_feature(env, ARM_FEATURE_VFP);
92
        set_feature(env, ARM_FEATURE_AUXCR);
93
        /* These ID register values are correct for 1136 but may be wrong
94
         * for 1136_r2 (in particular r0p2 does not actually implement most
95
         * of the ID registers).
96
         */
97
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
98
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
99
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
100
        memcpy(env->cp15.c0_c1, arm1136_cp15_c0_c1, 8 * sizeof(uint32_t));
101
        memcpy(env->cp15.c0_c2, arm1136_cp15_c0_c2, 8 * sizeof(uint32_t));
102
        env->cp15.c0_cachetype = 0x1dd20d2;
103
        env->cp15.c1_sys = 0x00050078;
104
        break;
105
    case ARM_CPUID_ARM1176:
106
        set_feature(env, ARM_FEATURE_V4T);
107
        set_feature(env, ARM_FEATURE_V5);
108
        set_feature(env, ARM_FEATURE_V6);
109
        set_feature(env, ARM_FEATURE_V6K);
110
        set_feature(env, ARM_FEATURE_VFP);
111
        set_feature(env, ARM_FEATURE_AUXCR);
112
        set_feature(env, ARM_FEATURE_VAPA);
113
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b5;
114
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
115
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
116
        memcpy(env->cp15.c0_c1, arm1176_cp15_c0_c1, 8 * sizeof(uint32_t));
117
        memcpy(env->cp15.c0_c2, arm1176_cp15_c0_c2, 8 * sizeof(uint32_t));
118
        env->cp15.c0_cachetype = 0x1dd20d2;
119
        env->cp15.c1_sys = 0x00050078;
120
        break;
121
    case ARM_CPUID_ARM11MPCORE:
122
        set_feature(env, ARM_FEATURE_V4T);
123
        set_feature(env, ARM_FEATURE_V5);
124
        set_feature(env, ARM_FEATURE_V6);
125
        set_feature(env, ARM_FEATURE_V6K);
126
        set_feature(env, ARM_FEATURE_VFP);
127
        set_feature(env, ARM_FEATURE_AUXCR);
128
        set_feature(env, ARM_FEATURE_VAPA);
129
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
130
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
131
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
132
        memcpy(env->cp15.c0_c1, mpcore_cp15_c0_c1, 8 * sizeof(uint32_t));
133
        memcpy(env->cp15.c0_c2, mpcore_cp15_c0_c2, 8 * sizeof(uint32_t));
134
        env->cp15.c0_cachetype = 0x1dd20d2;
135
        break;
136
    case ARM_CPUID_CORTEXA8:
137
        set_feature(env, ARM_FEATURE_V4T);
138
        set_feature(env, ARM_FEATURE_V5);
139
        set_feature(env, ARM_FEATURE_V6);
140
        set_feature(env, ARM_FEATURE_V6K);
141
        set_feature(env, ARM_FEATURE_V7);
142
        set_feature(env, ARM_FEATURE_AUXCR);
143
        set_feature(env, ARM_FEATURE_THUMB2);
144
        set_feature(env, ARM_FEATURE_VFP);
145
        set_feature(env, ARM_FEATURE_VFP3);
146
        set_feature(env, ARM_FEATURE_NEON);
147
        set_feature(env, ARM_FEATURE_THUMB2EE);
148
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410330c0;
149
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
150
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00011100;
151
        memcpy(env->cp15.c0_c1, cortexa8_cp15_c0_c1, 8 * sizeof(uint32_t));
152
        memcpy(env->cp15.c0_c2, cortexa8_cp15_c0_c2, 8 * sizeof(uint32_t));
153
        env->cp15.c0_cachetype = 0x82048004;
154
        env->cp15.c0_clid = (1 << 27) | (2 << 24) | 3;
155
        env->cp15.c0_ccsid[0] = 0xe007e01a; /* 16k L1 dcache. */
156
        env->cp15.c0_ccsid[1] = 0x2007e01a; /* 16k L1 icache. */
157
        env->cp15.c0_ccsid[2] = 0xf0000000; /* No L2 icache. */
158
        env->cp15.c1_sys = 0x00c50078;
159
        break;
160
    case ARM_CPUID_CORTEXA9:
161
        set_feature(env, ARM_FEATURE_V4T);
162
        set_feature(env, ARM_FEATURE_V5);
163
        set_feature(env, ARM_FEATURE_V6);
164
        set_feature(env, ARM_FEATURE_V6K);
165
        set_feature(env, ARM_FEATURE_V7);
166
        set_feature(env, ARM_FEATURE_AUXCR);
167
        set_feature(env, ARM_FEATURE_THUMB2);
168
        set_feature(env, ARM_FEATURE_VFP);
169
        set_feature(env, ARM_FEATURE_VFP3);
170
        set_feature(env, ARM_FEATURE_VFP_FP16);
171
        set_feature(env, ARM_FEATURE_NEON);
172
        set_feature(env, ARM_FEATURE_THUMB2EE);
173
        /* Note that A9 supports the MP extensions even for
174
         * A9UP and single-core A9MP (which are both different
175
         * and valid configurations; we don't model A9UP).
176
         */
177
        set_feature(env, ARM_FEATURE_V7MP);
178
        env->vfp.xregs[ARM_VFP_FPSID] = 0x41034000; /* Guess */
179
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
180
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x01111111;
181
        memcpy(env->cp15.c0_c1, cortexa9_cp15_c0_c1, 8 * sizeof(uint32_t));
182
        memcpy(env->cp15.c0_c2, cortexa9_cp15_c0_c2, 8 * sizeof(uint32_t));
183
        env->cp15.c0_cachetype = 0x80038003;
184
        env->cp15.c0_clid = (1 << 27) | (1 << 24) | 3;
185
        env->cp15.c0_ccsid[0] = 0xe00fe015; /* 16k L1 dcache. */
186
        env->cp15.c0_ccsid[1] = 0x200fe015; /* 16k L1 icache. */
187
        env->cp15.c1_sys = 0x00c50078;
188
        break;
189
    case ARM_CPUID_CORTEXM3:
190
        set_feature(env, ARM_FEATURE_V4T);
191
        set_feature(env, ARM_FEATURE_V5);
192
        set_feature(env, ARM_FEATURE_V6);
193
        set_feature(env, ARM_FEATURE_THUMB2);
194
        set_feature(env, ARM_FEATURE_V7);
195
        set_feature(env, ARM_FEATURE_M);
196
        set_feature(env, ARM_FEATURE_DIV);
197
        break;
198
    case ARM_CPUID_ANY: /* For userspace emulation.  */
199
        set_feature(env, ARM_FEATURE_V4T);
200
        set_feature(env, ARM_FEATURE_V5);
201
        set_feature(env, ARM_FEATURE_V6);
202
        set_feature(env, ARM_FEATURE_V6K);
203
        set_feature(env, ARM_FEATURE_V7);
204
        set_feature(env, ARM_FEATURE_THUMB2);
205
        set_feature(env, ARM_FEATURE_VFP);
206
        set_feature(env, ARM_FEATURE_VFP3);
207
        set_feature(env, ARM_FEATURE_VFP_FP16);
208
        set_feature(env, ARM_FEATURE_NEON);
209
        set_feature(env, ARM_FEATURE_THUMB2EE);
210
        set_feature(env, ARM_FEATURE_DIV);
211
        set_feature(env, ARM_FEATURE_V7MP);
212
        break;
213
    case ARM_CPUID_TI915T:
214
    case ARM_CPUID_TI925T:
215
        set_feature(env, ARM_FEATURE_V4T);
216
        set_feature(env, ARM_FEATURE_OMAPCP);
217
        env->cp15.c0_cpuid = ARM_CPUID_TI925T; /* Depends on wiring.  */
218
        env->cp15.c0_cachetype = 0x5109149;
219
        env->cp15.c1_sys = 0x00000070;
220
        env->cp15.c15_i_max = 0x000;
221
        env->cp15.c15_i_min = 0xff0;
222
        break;
223
    case ARM_CPUID_PXA250:
224
    case ARM_CPUID_PXA255:
225
    case ARM_CPUID_PXA260:
226
    case ARM_CPUID_PXA261:
227
    case ARM_CPUID_PXA262:
228
        set_feature(env, ARM_FEATURE_V4T);
229
        set_feature(env, ARM_FEATURE_V5);
230
        set_feature(env, ARM_FEATURE_XSCALE);
231
        /* JTAG_ID is ((id << 28) | 0x09265013) */
232
        env->cp15.c0_cachetype = 0xd172172;
233
        env->cp15.c1_sys = 0x00000078;
234
        break;
235
    case ARM_CPUID_PXA270_A0:
236
    case ARM_CPUID_PXA270_A1:
237
    case ARM_CPUID_PXA270_B0:
238
    case ARM_CPUID_PXA270_B1:
239
    case ARM_CPUID_PXA270_C0:
240
    case ARM_CPUID_PXA270_C5:
241
        set_feature(env, ARM_FEATURE_V4T);
242
        set_feature(env, ARM_FEATURE_V5);
243
        set_feature(env, ARM_FEATURE_XSCALE);
244
        /* JTAG_ID is ((id << 28) | 0x09265013) */
245
        set_feature(env, ARM_FEATURE_IWMMXT);
246
        env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
247
        env->cp15.c0_cachetype = 0xd172172;
248
        env->cp15.c1_sys = 0x00000078;
249
        break;
250
    case ARM_CPUID_SA1100:
251
    case ARM_CPUID_SA1110:
252
        set_feature(env, ARM_FEATURE_STRONGARM);
253
        env->cp15.c1_sys = 0x00000070;
254
        break;
255
    default:
256
        cpu_abort(env, "Bad CPU ID: %x\n", id);
257
        break;
258
    }
259

    
260
    /* Some features automatically imply others: */
261
    if (arm_feature(env, ARM_FEATURE_V7)) {
262
        set_feature(env, ARM_FEATURE_VAPA);
263
    }
264
}
265

    
266
void cpu_reset(CPUARMState *env)
267
{
268
    uint32_t id;
269

    
270
    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
271
        qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
272
        log_cpu_state(env, 0);
273
    }
274

    
275
    id = env->cp15.c0_cpuid;
276
    memset(env, 0, offsetof(CPUARMState, breakpoints));
277
    if (id)
278
        cpu_reset_model_id(env, id);
279
#if defined (CONFIG_USER_ONLY)
280
    env->uncached_cpsr = ARM_CPU_MODE_USR;
281
    /* For user mode we must enable access to coprocessors */
282
    env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
283
    if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
284
        env->cp15.c15_cpar = 3;
285
    } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
286
        env->cp15.c15_cpar = 1;
287
    }
288
#else
289
    /* SVC mode with interrupts disabled.  */
290
    env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
291
    /* On ARMv7-M the CPSR_I is the value of the PRIMASK register, and is
292
       clear at reset.  Initial SP and PC are loaded from ROM.  */
293
    if (IS_M(env)) {
294
        uint32_t pc;
295
        uint8_t *rom;
296
        env->uncached_cpsr &= ~CPSR_I;
297
        rom = rom_ptr(0);
298
        if (rom) {
299
            /* We should really use ldl_phys here, in case the guest
300
               modified flash and reset itself.  However images
301
               loaded via -kenrel have not been copied yet, so load the
302
               values directly from there.  */
303
            env->regs[13] = ldl_p(rom);
304
            pc = ldl_p(rom + 4);
305
            env->thumb = pc & 1;
306
            env->regs[15] = pc & ~1;
307
        }
308
    }
309
    env->vfp.xregs[ARM_VFP_FPEXC] = 0;
310
    env->cp15.c2_base_mask = 0xffffc000u;
311
    /* v7 performance monitor control register: same implementor
312
     * field as main ID register, and we implement no event counters.
313
     */
314
    env->cp15.c9_pmcr = (id & 0xff000000);
315
#endif
316
    set_flush_to_zero(1, &env->vfp.standard_fp_status);
317
    set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status);
318
    set_default_nan_mode(1, &env->vfp.standard_fp_status);
319
    set_float_detect_tininess(float_tininess_before_rounding,
320
                              &env->vfp.fp_status);
321
    set_float_detect_tininess(float_tininess_before_rounding,
322
                              &env->vfp.standard_fp_status);
323
    tlb_flush(env, 1);
324
}
325

    
326
static int vfp_gdb_get_reg(CPUState *env, uint8_t *buf, int reg)
327
{
328
    int nregs;
329

    
330
    /* VFP data registers are always little-endian.  */
331
    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
332
    if (reg < nregs) {
333
        stfq_le_p(buf, env->vfp.regs[reg]);
334
        return 8;
335
    }
336
    if (arm_feature(env, ARM_FEATURE_NEON)) {
337
        /* Aliases for Q regs.  */
338
        nregs += 16;
339
        if (reg < nregs) {
340
            stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
341
            stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
342
            return 16;
343
        }
344
    }
345
    switch (reg - nregs) {
346
    case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
347
    case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
348
    case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
349
    }
350
    return 0;
351
}
352

    
353
static int vfp_gdb_set_reg(CPUState *env, uint8_t *buf, int reg)
354
{
355
    int nregs;
356

    
357
    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
358
    if (reg < nregs) {
359
        env->vfp.regs[reg] = ldfq_le_p(buf);
360
        return 8;
361
    }
362
    if (arm_feature(env, ARM_FEATURE_NEON)) {
363
        nregs += 16;
364
        if (reg < nregs) {
365
            env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
366
            env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
367
            return 16;
368
        }
369
    }
370
    switch (reg - nregs) {
371
    case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
372
    case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
373
    case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
374
    }
375
    return 0;
376
}
377

    
378
CPUARMState *cpu_arm_init(const char *cpu_model)
379
{
380
    CPUARMState *env;
381
    uint32_t id;
382
    static int inited = 0;
383

    
384
    id = cpu_arm_find_by_name(cpu_model);
385
    if (id == 0)
386
        return NULL;
387
    env = qemu_mallocz(sizeof(CPUARMState));
388
    cpu_exec_init(env);
389
    if (!inited) {
390
        inited = 1;
391
        arm_translate_init();
392
    }
393

    
394
    env->cpu_model_str = cpu_model;
395
    env->cp15.c0_cpuid = id;
396
    cpu_reset(env);
397
    if (arm_feature(env, ARM_FEATURE_NEON)) {
398
        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
399
                                 51, "arm-neon.xml", 0);
400
    } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
401
        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
402
                                 35, "arm-vfp3.xml", 0);
403
    } else if (arm_feature(env, ARM_FEATURE_VFP)) {
404
        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
405
                                 19, "arm-vfp.xml", 0);
406
    }
407
    qemu_init_vcpu(env);
408
    return env;
409
}
410

    
411
struct arm_cpu_t {
412
    uint32_t id;
413
    const char *name;
414
};
415

    
416
static const struct arm_cpu_t arm_cpu_names[] = {
417
    { ARM_CPUID_ARM926, "arm926"},
418
    { ARM_CPUID_ARM946, "arm946"},
419
    { ARM_CPUID_ARM1026, "arm1026"},
420
    { ARM_CPUID_ARM1136, "arm1136"},
421
    { ARM_CPUID_ARM1136_R2, "arm1136-r2"},
422
    { ARM_CPUID_ARM1176, "arm1176"},
423
    { ARM_CPUID_ARM11MPCORE, "arm11mpcore"},
424
    { ARM_CPUID_CORTEXM3, "cortex-m3"},
425
    { ARM_CPUID_CORTEXA8, "cortex-a8"},
426
    { ARM_CPUID_CORTEXA9, "cortex-a9"},
427
    { ARM_CPUID_TI925T, "ti925t" },
428
    { ARM_CPUID_PXA250, "pxa250" },
429
    { ARM_CPUID_SA1100,    "sa1100" },
430
    { ARM_CPUID_SA1110,    "sa1110" },
431
    { ARM_CPUID_PXA255, "pxa255" },
432
    { ARM_CPUID_PXA260, "pxa260" },
433
    { ARM_CPUID_PXA261, "pxa261" },
434
    { ARM_CPUID_PXA262, "pxa262" },
435
    { ARM_CPUID_PXA270, "pxa270" },
436
    { ARM_CPUID_PXA270_A0, "pxa270-a0" },
437
    { ARM_CPUID_PXA270_A1, "pxa270-a1" },
438
    { ARM_CPUID_PXA270_B0, "pxa270-b0" },
439
    { ARM_CPUID_PXA270_B1, "pxa270-b1" },
440
    { ARM_CPUID_PXA270_C0, "pxa270-c0" },
441
    { ARM_CPUID_PXA270_C5, "pxa270-c5" },
442
    { ARM_CPUID_ANY, "any"},
443
    { 0, NULL}
444
};
445

    
446
void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
447
{
448
    int i;
449

    
450
    (*cpu_fprintf)(f, "Available CPUs:\n");
451
    for (i = 0; arm_cpu_names[i].name; i++) {
452
        (*cpu_fprintf)(f, "  %s\n", arm_cpu_names[i].name);
453
    }
454
}
455

    
456
/* return 0 if not found */
457
static uint32_t cpu_arm_find_by_name(const char *name)
458
{
459
    int i;
460
    uint32_t id;
461

    
462
    id = 0;
463
    for (i = 0; arm_cpu_names[i].name; i++) {
464
        if (strcmp(name, arm_cpu_names[i].name) == 0) {
465
            id = arm_cpu_names[i].id;
466
            break;
467
        }
468
    }
469
    return id;
470
}
471

    
472
void cpu_arm_close(CPUARMState *env)
473
{
474
    free(env);
475
}
476

    
477
uint32_t cpsr_read(CPUARMState *env)
478
{
479
    int ZF;
480
    ZF = (env->ZF == 0);
481
    return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
482
        (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
483
        | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
484
        | ((env->condexec_bits & 0xfc) << 8)
485
        | (env->GE << 16);
486
}
487

    
488
void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
489
{
490
    if (mask & CPSR_NZCV) {
491
        env->ZF = (~val) & CPSR_Z;
492
        env->NF = val;
493
        env->CF = (val >> 29) & 1;
494
        env->VF = (val << 3) & 0x80000000;
495
    }
496
    if (mask & CPSR_Q)
497
        env->QF = ((val & CPSR_Q) != 0);
498
    if (mask & CPSR_T)
499
        env->thumb = ((val & CPSR_T) != 0);
500
    if (mask & CPSR_IT_0_1) {
501
        env->condexec_bits &= ~3;
502
        env->condexec_bits |= (val >> 25) & 3;
503
    }
504
    if (mask & CPSR_IT_2_7) {
505
        env->condexec_bits &= 3;
506
        env->condexec_bits |= (val >> 8) & 0xfc;
507
    }
508
    if (mask & CPSR_GE) {
509
        env->GE = (val >> 16) & 0xf;
510
    }
511

    
512
    if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
513
        switch_mode(env, val & CPSR_M);
514
    }
515
    mask &= ~CACHED_CPSR_BITS;
516
    env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
517
}
518

    
519
/* Sign/zero extend */
520
uint32_t HELPER(sxtb16)(uint32_t x)
521
{
522
    uint32_t res;
523
    res = (uint16_t)(int8_t)x;
524
    res |= (uint32_t)(int8_t)(x >> 16) << 16;
525
    return res;
526
}
527

    
528
uint32_t HELPER(uxtb16)(uint32_t x)
529
{
530
    uint32_t res;
531
    res = (uint16_t)(uint8_t)x;
532
    res |= (uint32_t)(uint8_t)(x >> 16) << 16;
533
    return res;
534
}
535

    
536
uint32_t HELPER(clz)(uint32_t x)
537
{
538
    return clz32(x);
539
}
540

    
541
int32_t HELPER(sdiv)(int32_t num, int32_t den)
542
{
543
    if (den == 0)
544
      return 0;
545
    if (num == INT_MIN && den == -1)
546
      return INT_MIN;
547
    return num / den;
548
}
549

    
550
uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
551
{
552
    if (den == 0)
553
      return 0;
554
    return num / den;
555
}
556

    
557
uint32_t HELPER(rbit)(uint32_t x)
558
{
559
    x =  ((x & 0xff000000) >> 24)
560
       | ((x & 0x00ff0000) >> 8)
561
       | ((x & 0x0000ff00) << 8)
562
       | ((x & 0x000000ff) << 24);
563
    x =  ((x & 0xf0f0f0f0) >> 4)
564
       | ((x & 0x0f0f0f0f) << 4);
565
    x =  ((x & 0x88888888) >> 3)
566
       | ((x & 0x44444444) >> 1)
567
       | ((x & 0x22222222) << 1)
568
       | ((x & 0x11111111) << 3);
569
    return x;
570
}
571

    
572
uint32_t HELPER(abs)(uint32_t x)
573
{
574
    return ((int32_t)x < 0) ? -x : x;
575
}
576

    
577
#if defined(CONFIG_USER_ONLY)
578

    
579
void do_interrupt (CPUState *env)
580
{
581
    env->exception_index = -1;
582
}
583

    
584
int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
585
                              int mmu_idx, int is_softmmu)
586
{
587
    if (rw == 2) {
588
        env->exception_index = EXCP_PREFETCH_ABORT;
589
        env->cp15.c6_insn = address;
590
    } else {
591
        env->exception_index = EXCP_DATA_ABORT;
592
        env->cp15.c6_data = address;
593
    }
594
    return 1;
595
}
596

    
597
/* These should probably raise undefined insn exceptions.  */
598
void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val)
599
{
600
    int op1 = (insn >> 8) & 0xf;
601
    cpu_abort(env, "cp%i insn %08x\n", op1, insn);
602
    return;
603
}
604

    
605
uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn)
606
{
607
    int op1 = (insn >> 8) & 0xf;
608
    cpu_abort(env, "cp%i insn %08x\n", op1, insn);
609
    return 0;
610
}
611

    
612
void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
613
{
614
    cpu_abort(env, "cp15 insn %08x\n", insn);
615
}
616

    
617
uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
618
{
619
    cpu_abort(env, "cp15 insn %08x\n", insn);
620
}
621

    
622
/* These should probably raise undefined insn exceptions.  */
623
void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val)
624
{
625
    cpu_abort(env, "v7m_mrs %d\n", reg);
626
}
627

    
628
uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg)
629
{
630
    cpu_abort(env, "v7m_mrs %d\n", reg);
631
    return 0;
632
}
633

    
634
void switch_mode(CPUState *env, int mode)
635
{
636
    if (mode != ARM_CPU_MODE_USR)
637
        cpu_abort(env, "Tried to switch out of user mode\n");
638
}
639

    
640
void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
641
{
642
    cpu_abort(env, "banked r13 write\n");
643
}
644

    
645
uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
646
{
647
    cpu_abort(env, "banked r13 read\n");
648
    return 0;
649
}
650

    
651
#else
652

    
653
extern int semihosting_enabled;
654

    
655
/* Map CPU modes onto saved register banks.  */
656
static inline int bank_number (int mode)
657
{
658
    switch (mode) {
659
    case ARM_CPU_MODE_USR:
660
    case ARM_CPU_MODE_SYS:
661
        return 0;
662
    case ARM_CPU_MODE_SVC:
663
        return 1;
664
    case ARM_CPU_MODE_ABT:
665
        return 2;
666
    case ARM_CPU_MODE_UND:
667
        return 3;
668
    case ARM_CPU_MODE_IRQ:
669
        return 4;
670
    case ARM_CPU_MODE_FIQ:
671
        return 5;
672
    }
673
    cpu_abort(cpu_single_env, "Bad mode %x\n", mode);
674
    return -1;
675
}
676

    
677
void switch_mode(CPUState *env, int mode)
678
{
679
    int old_mode;
680
    int i;
681

    
682
    old_mode = env->uncached_cpsr & CPSR_M;
683
    if (mode == old_mode)
684
        return;
685

    
686
    if (old_mode == ARM_CPU_MODE_FIQ) {
687
        memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
688
        memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
689
    } else if (mode == ARM_CPU_MODE_FIQ) {
690
        memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
691
        memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
692
    }
693

    
694
    i = bank_number(old_mode);
695
    env->banked_r13[i] = env->regs[13];
696
    env->banked_r14[i] = env->regs[14];
697
    env->banked_spsr[i] = env->spsr;
698

    
699
    i = bank_number(mode);
700
    env->regs[13] = env->banked_r13[i];
701
    env->regs[14] = env->banked_r14[i];
702
    env->spsr = env->banked_spsr[i];
703
}
704

    
705
static void v7m_push(CPUARMState *env, uint32_t val)
706
{
707
    env->regs[13] -= 4;
708
    stl_phys(env->regs[13], val);
709
}
710

    
711
static uint32_t v7m_pop(CPUARMState *env)
712
{
713
    uint32_t val;
714
    val = ldl_phys(env->regs[13]);
715
    env->regs[13] += 4;
716
    return val;
717
}
718

    
719
/* Switch to V7M main or process stack pointer.  */
720
static void switch_v7m_sp(CPUARMState *env, int process)
721
{
722
    uint32_t tmp;
723
    if (env->v7m.current_sp != process) {
724
        tmp = env->v7m.other_sp;
725
        env->v7m.other_sp = env->regs[13];
726
        env->regs[13] = tmp;
727
        env->v7m.current_sp = process;
728
    }
729
}
730

    
731
static void do_v7m_exception_exit(CPUARMState *env)
732
{
733
    uint32_t type;
734
    uint32_t xpsr;
735

    
736
    type = env->regs[15];
737
    if (env->v7m.exception != 0)
738
        armv7m_nvic_complete_irq(env->nvic, env->v7m.exception);
739

    
740
    /* Switch to the target stack.  */
741
    switch_v7m_sp(env, (type & 4) != 0);
742
    /* Pop registers.  */
743
    env->regs[0] = v7m_pop(env);
744
    env->regs[1] = v7m_pop(env);
745
    env->regs[2] = v7m_pop(env);
746
    env->regs[3] = v7m_pop(env);
747
    env->regs[12] = v7m_pop(env);
748
    env->regs[14] = v7m_pop(env);
749
    env->regs[15] = v7m_pop(env);
750
    xpsr = v7m_pop(env);
751
    xpsr_write(env, xpsr, 0xfffffdff);
752
    /* Undo stack alignment.  */
753
    if (xpsr & 0x200)
754
        env->regs[13] |= 4;
755
    /* ??? The exception return type specifies Thread/Handler mode.  However
756
       this is also implied by the xPSR value. Not sure what to do
757
       if there is a mismatch.  */
758
    /* ??? Likewise for mismatches between the CONTROL register and the stack
759
       pointer.  */
760
}
761

    
762
static void do_interrupt_v7m(CPUARMState *env)
763
{
764
    uint32_t xpsr = xpsr_read(env);
765
    uint32_t lr;
766
    uint32_t addr;
767

    
768
    lr = 0xfffffff1;
769
    if (env->v7m.current_sp)
770
        lr |= 4;
771
    if (env->v7m.exception == 0)
772
        lr |= 8;
773

    
774
    /* For exceptions we just mark as pending on the NVIC, and let that
775
       handle it.  */
776
    /* TODO: Need to escalate if the current priority is higher than the
777
       one we're raising.  */
778
    switch (env->exception_index) {
779
    case EXCP_UDEF:
780
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
781
        return;
782
    case EXCP_SWI:
783
        env->regs[15] += 2;
784
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
785
        return;
786
    case EXCP_PREFETCH_ABORT:
787
    case EXCP_DATA_ABORT:
788
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
789
        return;
790
    case EXCP_BKPT:
791
        if (semihosting_enabled) {
792
            int nr;
793
            nr = lduw_code(env->regs[15]) & 0xff;
794
            if (nr == 0xab) {
795
                env->regs[15] += 2;
796
                env->regs[0] = do_arm_semihosting(env);
797
                return;
798
            }
799
        }
800
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
801
        return;
802
    case EXCP_IRQ:
803
        env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic);
804
        break;
805
    case EXCP_EXCEPTION_EXIT:
806
        do_v7m_exception_exit(env);
807
        return;
808
    default:
809
        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
810
        return; /* Never happens.  Keep compiler happy.  */
811
    }
812

    
813
    /* Align stack pointer.  */
814
    /* ??? Should only do this if Configuration Control Register
815
       STACKALIGN bit is set.  */
816
    if (env->regs[13] & 4) {
817
        env->regs[13] -= 4;
818
        xpsr |= 0x200;
819
    }
820
    /* Switch to the handler mode.  */
821
    v7m_push(env, xpsr);
822
    v7m_push(env, env->regs[15]);
823
    v7m_push(env, env->regs[14]);
824
    v7m_push(env, env->regs[12]);
825
    v7m_push(env, env->regs[3]);
826
    v7m_push(env, env->regs[2]);
827
    v7m_push(env, env->regs[1]);
828
    v7m_push(env, env->regs[0]);
829
    switch_v7m_sp(env, 0);
830
    env->uncached_cpsr &= ~CPSR_IT;
831
    env->regs[14] = lr;
832
    addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4);
833
    env->regs[15] = addr & 0xfffffffe;
834
    env->thumb = addr & 1;
835
}
836

    
837
/* Handle a CPU exception.  */
838
void do_interrupt(CPUARMState *env)
839
{
840
    uint32_t addr;
841
    uint32_t mask;
842
    int new_mode;
843
    uint32_t offset;
844

    
845
    if (IS_M(env)) {
846
        do_interrupt_v7m(env);
847
        return;
848
    }
849
    /* TODO: Vectored interrupt controller.  */
850
    switch (env->exception_index) {
851
    case EXCP_UDEF:
852
        new_mode = ARM_CPU_MODE_UND;
853
        addr = 0x04;
854
        mask = CPSR_I;
855
        if (env->thumb)
856
            offset = 2;
857
        else
858
            offset = 4;
859
        break;
860
    case EXCP_SWI:
861
        if (semihosting_enabled) {
862
            /* Check for semihosting interrupt.  */
863
            if (env->thumb) {
864
                mask = lduw_code(env->regs[15] - 2) & 0xff;
865
            } else {
866
                mask = ldl_code(env->regs[15] - 4) & 0xffffff;
867
            }
868
            /* Only intercept calls from privileged modes, to provide some
869
               semblance of security.  */
870
            if (((mask == 0x123456 && !env->thumb)
871
                    || (mask == 0xab && env->thumb))
872
                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
873
                env->regs[0] = do_arm_semihosting(env);
874
                return;
875
            }
876
        }
877
        new_mode = ARM_CPU_MODE_SVC;
878
        addr = 0x08;
879
        mask = CPSR_I;
880
        /* The PC already points to the next instruction.  */
881
        offset = 0;
882
        break;
883
    case EXCP_BKPT:
884
        /* See if this is a semihosting syscall.  */
885
        if (env->thumb && semihosting_enabled) {
886
            mask = lduw_code(env->regs[15]) & 0xff;
887
            if (mask == 0xab
888
                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
889
                env->regs[15] += 2;
890
                env->regs[0] = do_arm_semihosting(env);
891
                return;
892
            }
893
        }
894
        env->cp15.c5_insn = 2;
895
        /* Fall through to prefetch abort.  */
896
    case EXCP_PREFETCH_ABORT:
897
        new_mode = ARM_CPU_MODE_ABT;
898
        addr = 0x0c;
899
        mask = CPSR_A | CPSR_I;
900
        offset = 4;
901
        break;
902
    case EXCP_DATA_ABORT:
903
        new_mode = ARM_CPU_MODE_ABT;
904
        addr = 0x10;
905
        mask = CPSR_A | CPSR_I;
906
        offset = 8;
907
        break;
908
    case EXCP_IRQ:
909
        new_mode = ARM_CPU_MODE_IRQ;
910
        addr = 0x18;
911
        /* Disable IRQ and imprecise data aborts.  */
912
        mask = CPSR_A | CPSR_I;
913
        offset = 4;
914
        break;
915
    case EXCP_FIQ:
916
        new_mode = ARM_CPU_MODE_FIQ;
917
        addr = 0x1c;
918
        /* Disable FIQ, IRQ and imprecise data aborts.  */
919
        mask = CPSR_A | CPSR_I | CPSR_F;
920
        offset = 4;
921
        break;
922
    default:
923
        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
924
        return; /* Never happens.  Keep compiler happy.  */
925
    }
926
    /* High vectors.  */
927
    if (env->cp15.c1_sys & (1 << 13)) {
928
        addr += 0xffff0000;
929
    }
930
    switch_mode (env, new_mode);
931
    env->spsr = cpsr_read(env);
932
    /* Clear IT bits.  */
933
    env->condexec_bits = 0;
934
    /* Switch to the new mode, and to the correct instruction set.  */
935
    env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
936
    env->uncached_cpsr |= mask;
937
    /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
938
     * and we should just guard the thumb mode on V4 */
939
    if (arm_feature(env, ARM_FEATURE_V4T)) {
940
        env->thumb = (env->cp15.c1_sys & (1 << 30)) != 0;
941
    }
942
    env->regs[14] = env->regs[15] + offset;
943
    env->regs[15] = addr;
944
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
945
}
946

    
947
/* Check section/page access permissions.
948
   Returns the page protection flags, or zero if the access is not
949
   permitted.  */
950
static inline int check_ap(CPUState *env, int ap, int domain, int access_type,
951
                           int is_user)
952
{
953
  int prot_ro;
954

    
955
  if (domain == 3)
956
    return PAGE_READ | PAGE_WRITE;
957

    
958
  if (access_type == 1)
959
      prot_ro = 0;
960
  else
961
      prot_ro = PAGE_READ;
962

    
963
  switch (ap) {
964
  case 0:
965
      if (access_type == 1)
966
          return 0;
967
      switch ((env->cp15.c1_sys >> 8) & 3) {
968
      case 1:
969
          return is_user ? 0 : PAGE_READ;
970
      case 2:
971
          return PAGE_READ;
972
      default:
973
          return 0;
974
      }
975
  case 1:
976
      return is_user ? 0 : PAGE_READ | PAGE_WRITE;
977
  case 2:
978
      if (is_user)
979
          return prot_ro;
980
      else
981
          return PAGE_READ | PAGE_WRITE;
982
  case 3:
983
      return PAGE_READ | PAGE_WRITE;
984
  case 4: /* Reserved.  */
985
      return 0;
986
  case 5:
987
      return is_user ? 0 : prot_ro;
988
  case 6:
989
      return prot_ro;
990
  case 7:
991
      if (!arm_feature (env, ARM_FEATURE_V6K))
992
          return 0;
993
      return prot_ro;
994
  default:
995
      abort();
996
  }
997
}
998

    
999
static uint32_t get_level1_table_address(CPUState *env, uint32_t address)
1000
{
1001
    uint32_t table;
1002

    
1003
    if (address & env->cp15.c2_mask)
1004
        table = env->cp15.c2_base1 & 0xffffc000;
1005
    else
1006
        table = env->cp15.c2_base0 & env->cp15.c2_base_mask;
1007

    
1008
    table |= (address >> 18) & 0x3ffc;
1009
    return table;
1010
}
1011

    
1012
static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type,
1013
                            int is_user, uint32_t *phys_ptr, int *prot,
1014
                            target_ulong *page_size)
1015
{
1016
    int code;
1017
    uint32_t table;
1018
    uint32_t desc;
1019
    int type;
1020
    int ap;
1021
    int domain;
1022
    uint32_t phys_addr;
1023

    
1024
    /* Pagetable walk.  */
1025
    /* Lookup l1 descriptor.  */
1026
    table = get_level1_table_address(env, address);
1027
    desc = ldl_phys(table);
1028
    type = (desc & 3);
1029
    domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3;
1030
    if (type == 0) {
1031
        /* Section translation fault.  */
1032
        code = 5;
1033
        goto do_fault;
1034
    }
1035
    if (domain == 0 || domain == 2) {
1036
        if (type == 2)
1037
            code = 9; /* Section domain fault.  */
1038
        else
1039
            code = 11; /* Page domain fault.  */
1040
        goto do_fault;
1041
    }
1042
    if (type == 2) {
1043
        /* 1Mb section.  */
1044
        phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1045
        ap = (desc >> 10) & 3;
1046
        code = 13;
1047
        *page_size = 1024 * 1024;
1048
    } else {
1049
        /* Lookup l2 entry.  */
1050
        if (type == 1) {
1051
            /* Coarse pagetable.  */
1052
            table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1053
        } else {
1054
            /* Fine pagetable.  */
1055
            table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
1056
        }
1057
        desc = ldl_phys(table);
1058
        switch (desc & 3) {
1059
        case 0: /* Page translation fault.  */
1060
            code = 7;
1061
            goto do_fault;
1062
        case 1: /* 64k page.  */
1063
            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1064
            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
1065
            *page_size = 0x10000;
1066
            break;
1067
        case 2: /* 4k page.  */
1068
            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1069
            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
1070
            *page_size = 0x1000;
1071
            break;
1072
        case 3: /* 1k page.  */
1073
            if (type == 1) {
1074
                if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1075
                    phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1076
                } else {
1077
                    /* Page translation fault.  */
1078
                    code = 7;
1079
                    goto do_fault;
1080
                }
1081
            } else {
1082
                phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
1083
            }
1084
            ap = (desc >> 4) & 3;
1085
            *page_size = 0x400;
1086
            break;
1087
        default:
1088
            /* Never happens, but compiler isn't smart enough to tell.  */
1089
            abort();
1090
        }
1091
        code = 15;
1092
    }
1093
    *prot = check_ap(env, ap, domain, access_type, is_user);
1094
    if (!*prot) {
1095
        /* Access permission fault.  */
1096
        goto do_fault;
1097
    }
1098
    *prot |= PAGE_EXEC;
1099
    *phys_ptr = phys_addr;
1100
    return 0;
1101
do_fault:
1102
    return code | (domain << 4);
1103
}
1104

    
1105
static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type,
1106
                            int is_user, uint32_t *phys_ptr, int *prot,
1107
                            target_ulong *page_size)
1108
{
1109
    int code;
1110
    uint32_t table;
1111
    uint32_t desc;
1112
    uint32_t xn;
1113
    int type;
1114
    int ap;
1115
    int domain;
1116
    uint32_t phys_addr;
1117

    
1118
    /* Pagetable walk.  */
1119
    /* Lookup l1 descriptor.  */
1120
    table = get_level1_table_address(env, address);
1121
    desc = ldl_phys(table);
1122
    type = (desc & 3);
1123
    if (type == 0) {
1124
        /* Section translation fault.  */
1125
        code = 5;
1126
        domain = 0;
1127
        goto do_fault;
1128
    } else if (type == 2 && (desc & (1 << 18))) {
1129
        /* Supersection.  */
1130
        domain = 0;
1131
    } else {
1132
        /* Section or page.  */
1133
        domain = (desc >> 4) & 0x1e;
1134
    }
1135
    domain = (env->cp15.c3 >> domain) & 3;
1136
    if (domain == 0 || domain == 2) {
1137
        if (type == 2)
1138
            code = 9; /* Section domain fault.  */
1139
        else
1140
            code = 11; /* Page domain fault.  */
1141
        goto do_fault;
1142
    }
1143
    if (type == 2) {
1144
        if (desc & (1 << 18)) {
1145
            /* Supersection.  */
1146
            phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
1147
            *page_size = 0x1000000;
1148
        } else {
1149
            /* Section.  */
1150
            phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1151
            *page_size = 0x100000;
1152
        }
1153
        ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
1154
        xn = desc & (1 << 4);
1155
        code = 13;
1156
    } else {
1157
        /* Lookup l2 entry.  */
1158
        table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1159
        desc = ldl_phys(table);
1160
        ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
1161
        switch (desc & 3) {
1162
        case 0: /* Page translation fault.  */
1163
            code = 7;
1164
            goto do_fault;
1165
        case 1: /* 64k page.  */
1166
            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1167
            xn = desc & (1 << 15);
1168
            *page_size = 0x10000;
1169
            break;
1170
        case 2: case 3: /* 4k page.  */
1171
            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1172
            xn = desc & 1;
1173
            *page_size = 0x1000;
1174
            break;
1175
        default:
1176
            /* Never happens, but compiler isn't smart enough to tell.  */
1177
            abort();
1178
        }
1179
        code = 15;
1180
    }
1181
    if (domain == 3) {
1182
        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1183
    } else {
1184
        if (xn && access_type == 2)
1185
            goto do_fault;
1186

    
1187
        /* The simplified model uses AP[0] as an access control bit.  */
1188
        if ((env->cp15.c1_sys & (1 << 29)) && (ap & 1) == 0) {
1189
            /* Access flag fault.  */
1190
            code = (code == 15) ? 6 : 3;
1191
            goto do_fault;
1192
        }
1193
        *prot = check_ap(env, ap, domain, access_type, is_user);
1194
        if (!*prot) {
1195
            /* Access permission fault.  */
1196
            goto do_fault;
1197
        }
1198
        if (!xn) {
1199
            *prot |= PAGE_EXEC;
1200
        }
1201
    }
1202
    *phys_ptr = phys_addr;
1203
    return 0;
1204
do_fault:
1205
    return code | (domain << 4);
1206
}
1207

    
1208
static int get_phys_addr_mpu(CPUState *env, uint32_t address, int access_type,
1209
                             int is_user, uint32_t *phys_ptr, int *prot)
1210
{
1211
    int n;
1212
    uint32_t mask;
1213
    uint32_t base;
1214

    
1215
    *phys_ptr = address;
1216
    for (n = 7; n >= 0; n--) {
1217
        base = env->cp15.c6_region[n];
1218
        if ((base & 1) == 0)
1219
            continue;
1220
        mask = 1 << ((base >> 1) & 0x1f);
1221
        /* Keep this shift separate from the above to avoid an
1222
           (undefined) << 32.  */
1223
        mask = (mask << 1) - 1;
1224
        if (((base ^ address) & ~mask) == 0)
1225
            break;
1226
    }
1227
    if (n < 0)
1228
        return 2;
1229

    
1230
    if (access_type == 2) {
1231
        mask = env->cp15.c5_insn;
1232
    } else {
1233
        mask = env->cp15.c5_data;
1234
    }
1235
    mask = (mask >> (n * 4)) & 0xf;
1236
    switch (mask) {
1237
    case 0:
1238
        return 1;
1239
    case 1:
1240
        if (is_user)
1241
          return 1;
1242
        *prot = PAGE_READ | PAGE_WRITE;
1243
        break;
1244
    case 2:
1245
        *prot = PAGE_READ;
1246
        if (!is_user)
1247
            *prot |= PAGE_WRITE;
1248
        break;
1249
    case 3:
1250
        *prot = PAGE_READ | PAGE_WRITE;
1251
        break;
1252
    case 5:
1253
        if (is_user)
1254
            return 1;
1255
        *prot = PAGE_READ;
1256
        break;
1257
    case 6:
1258
        *prot = PAGE_READ;
1259
        break;
1260
    default:
1261
        /* Bad permission.  */
1262
        return 1;
1263
    }
1264
    *prot |= PAGE_EXEC;
1265
    return 0;
1266
}
1267

    
1268
static inline int get_phys_addr(CPUState *env, uint32_t address,
1269
                                int access_type, int is_user,
1270
                                uint32_t *phys_ptr, int *prot,
1271
                                target_ulong *page_size)
1272
{
1273
    /* Fast Context Switch Extension.  */
1274
    if (address < 0x02000000)
1275
        address += env->cp15.c13_fcse;
1276

    
1277
    if ((env->cp15.c1_sys & 1) == 0) {
1278
        /* MMU/MPU disabled.  */
1279
        *phys_ptr = address;
1280
        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1281
        *page_size = TARGET_PAGE_SIZE;
1282
        return 0;
1283
    } else if (arm_feature(env, ARM_FEATURE_MPU)) {
1284
        *page_size = TARGET_PAGE_SIZE;
1285
        return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
1286
                                 prot);
1287
    } else if (env->cp15.c1_sys & (1 << 23)) {
1288
        return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
1289
                                prot, page_size);
1290
    } else {
1291
        return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
1292
                                prot, page_size);
1293
    }
1294
}
1295

    
1296
int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
1297
                              int access_type, int mmu_idx, int is_softmmu)
1298
{
1299
    uint32_t phys_addr;
1300
    target_ulong page_size;
1301
    int prot;
1302
    int ret, is_user;
1303

    
1304
    is_user = mmu_idx == MMU_USER_IDX;
1305
    ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot,
1306
                        &page_size);
1307
    if (ret == 0) {
1308
        /* Map a single [sub]page.  */
1309
        phys_addr &= ~(uint32_t)0x3ff;
1310
        address &= ~(uint32_t)0x3ff;
1311
        tlb_set_page (env, address, phys_addr, prot, mmu_idx, page_size);
1312
        return 0;
1313
    }
1314

    
1315
    if (access_type == 2) {
1316
        env->cp15.c5_insn = ret;
1317
        env->cp15.c6_insn = address;
1318
        env->exception_index = EXCP_PREFETCH_ABORT;
1319
    } else {
1320
        env->cp15.c5_data = ret;
1321
        if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6))
1322
            env->cp15.c5_data |= (1 << 11);
1323
        env->cp15.c6_data = address;
1324
        env->exception_index = EXCP_DATA_ABORT;
1325
    }
1326
    return 1;
1327
}
1328

    
1329
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1330
{
1331
    uint32_t phys_addr;
1332
    target_ulong page_size;
1333
    int prot;
1334
    int ret;
1335

    
1336
    ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot, &page_size);
1337

    
1338
    if (ret != 0)
1339
        return -1;
1340

    
1341
    return phys_addr;
1342
}
1343

    
1344
void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val)
1345
{
1346
    int cp_num = (insn >> 8) & 0xf;
1347
    int cp_info = (insn >> 5) & 7;
1348
    int src = (insn >> 16) & 0xf;
1349
    int operand = insn & 0xf;
1350

    
1351
    if (env->cp[cp_num].cp_write)
1352
        env->cp[cp_num].cp_write(env->cp[cp_num].opaque,
1353
                                 cp_info, src, operand, val);
1354
}
1355

    
1356
uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn)
1357
{
1358
    int cp_num = (insn >> 8) & 0xf;
1359
    int cp_info = (insn >> 5) & 7;
1360
    int dest = (insn >> 16) & 0xf;
1361
    int operand = insn & 0xf;
1362

    
1363
    if (env->cp[cp_num].cp_read)
1364
        return env->cp[cp_num].cp_read(env->cp[cp_num].opaque,
1365
                                       cp_info, dest, operand);
1366
    return 0;
1367
}
1368

    
1369
/* Return basic MPU access permission bits.  */
1370
static uint32_t simple_mpu_ap_bits(uint32_t val)
1371
{
1372
    uint32_t ret;
1373
    uint32_t mask;
1374
    int i;
1375
    ret = 0;
1376
    mask = 3;
1377
    for (i = 0; i < 16; i += 2) {
1378
        ret |= (val >> i) & mask;
1379
        mask <<= 2;
1380
    }
1381
    return ret;
1382
}
1383

    
1384
/* Pad basic MPU access permission bits to extended format.  */
1385
static uint32_t extended_mpu_ap_bits(uint32_t val)
1386
{
1387
    uint32_t ret;
1388
    uint32_t mask;
1389
    int i;
1390
    ret = 0;
1391
    mask = 3;
1392
    for (i = 0; i < 16; i += 2) {
1393
        ret |= (val & mask) << i;
1394
        mask <<= 2;
1395
    }
1396
    return ret;
1397
}
1398

    
1399
void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
1400
{
1401
    int op1;
1402
    int op2;
1403
    int crm;
1404

    
1405
    op1 = (insn >> 21) & 7;
1406
    op2 = (insn >> 5) & 7;
1407
    crm = insn & 0xf;
1408
    switch ((insn >> 16) & 0xf) {
1409
    case 0:
1410
        /* ID codes.  */
1411
        if (arm_feature(env, ARM_FEATURE_XSCALE))
1412
            break;
1413
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1414
            break;
1415
        if (arm_feature(env, ARM_FEATURE_V7)
1416
                && op1 == 2 && crm == 0 && op2 == 0) {
1417
            env->cp15.c0_cssel = val & 0xf;
1418
            break;
1419
        }
1420
        goto bad_reg;
1421
    case 1: /* System configuration.  */
1422
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1423
            op2 = 0;
1424
        switch (op2) {
1425
        case 0:
1426
            if (!arm_feature(env, ARM_FEATURE_XSCALE) || crm == 0)
1427
                env->cp15.c1_sys = val;
1428
            /* ??? Lots of these bits are not implemented.  */
1429
            /* This may enable/disable the MMU, so do a TLB flush.  */
1430
            tlb_flush(env, 1);
1431
            break;
1432
        case 1: /* Auxiliary control register.  */
1433
            if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1434
                env->cp15.c1_xscaleauxcr = val;
1435
                break;
1436
            }
1437
            /* Not implemented.  */
1438
            break;
1439
        case 2:
1440
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1441
                goto bad_reg;
1442
            if (env->cp15.c1_coproc != val) {
1443
                env->cp15.c1_coproc = val;
1444
                /* ??? Is this safe when called from within a TB?  */
1445
                tb_flush(env);
1446
            }
1447
            break;
1448
        default:
1449
            goto bad_reg;
1450
        }
1451
        break;
1452
    case 2: /* MMU Page table control / MPU cache control.  */
1453
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1454
            switch (op2) {
1455
            case 0:
1456
                env->cp15.c2_data = val;
1457
                break;
1458
            case 1:
1459
                env->cp15.c2_insn = val;
1460
                break;
1461
            default:
1462
                goto bad_reg;
1463
            }
1464
        } else {
1465
            switch (op2) {
1466
            case 0:
1467
                env->cp15.c2_base0 = val;
1468
                break;
1469
            case 1:
1470
                env->cp15.c2_base1 = val;
1471
                break;
1472
            case 2:
1473
                val &= 7;
1474
                env->cp15.c2_control = val;
1475
                env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> val);
1476
                env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> val);
1477
                break;
1478
            default:
1479
                goto bad_reg;
1480
            }
1481
        }
1482
        break;
1483
    case 3: /* MMU Domain access control / MPU write buffer control.  */
1484
        env->cp15.c3 = val;
1485
        tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */
1486
        break;
1487
    case 4: /* Reserved.  */
1488
        goto bad_reg;
1489
    case 5: /* MMU Fault status / MPU access permission.  */
1490
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1491
            op2 = 0;
1492
        switch (op2) {
1493
        case 0:
1494
            if (arm_feature(env, ARM_FEATURE_MPU))
1495
                val = extended_mpu_ap_bits(val);
1496
            env->cp15.c5_data = val;
1497
            break;
1498
        case 1:
1499
            if (arm_feature(env, ARM_FEATURE_MPU))
1500
                val = extended_mpu_ap_bits(val);
1501
            env->cp15.c5_insn = val;
1502
            break;
1503
        case 2:
1504
            if (!arm_feature(env, ARM_FEATURE_MPU))
1505
                goto bad_reg;
1506
            env->cp15.c5_data = val;
1507
            break;
1508
        case 3:
1509
            if (!arm_feature(env, ARM_FEATURE_MPU))
1510
                goto bad_reg;
1511
            env->cp15.c5_insn = val;
1512
            break;
1513
        default:
1514
            goto bad_reg;
1515
        }
1516
        break;
1517
    case 6: /* MMU Fault address / MPU base/size.  */
1518
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1519
            if (crm >= 8)
1520
                goto bad_reg;
1521
            env->cp15.c6_region[crm] = val;
1522
        } else {
1523
            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1524
                op2 = 0;
1525
            switch (op2) {
1526
            case 0:
1527
                env->cp15.c6_data = val;
1528
                break;
1529
            case 1: /* ??? This is WFAR on armv6 */
1530
            case 2:
1531
                env->cp15.c6_insn = val;
1532
                break;
1533
            default:
1534
                goto bad_reg;
1535
            }
1536
        }
1537
        break;
1538
    case 7: /* Cache control.  */
1539
        env->cp15.c15_i_max = 0x000;
1540
        env->cp15.c15_i_min = 0xff0;
1541
        if (op1 != 0) {
1542
            goto bad_reg;
1543
        }
1544
        /* No cache, so nothing to do except VA->PA translations. */
1545
        if (arm_feature(env, ARM_FEATURE_VAPA)) {
1546
            switch (crm) {
1547
            case 4:
1548
                if (arm_feature(env, ARM_FEATURE_V7)) {
1549
                    env->cp15.c7_par = val & 0xfffff6ff;
1550
                } else {
1551
                    env->cp15.c7_par = val & 0xfffff1ff;
1552
                }
1553
                break;
1554
            case 8: {
1555
                uint32_t phys_addr;
1556
                target_ulong page_size;
1557
                int prot;
1558
                int ret, is_user = op2 & 2;
1559
                int access_type = op2 & 1;
1560

    
1561
                if (op2 & 4) {
1562
                    /* Other states are only available with TrustZone */
1563
                    goto bad_reg;
1564
                }
1565
                ret = get_phys_addr(env, val, access_type, is_user,
1566
                                    &phys_addr, &prot, &page_size);
1567
                if (ret == 0) {
1568
                    /* We do not set any attribute bits in the PAR */
1569
                    if (page_size == (1 << 24)
1570
                        && arm_feature(env, ARM_FEATURE_V7)) {
1571
                        env->cp15.c7_par = (phys_addr & 0xff000000) | 1 << 1;
1572
                    } else {
1573
                        env->cp15.c7_par = phys_addr & 0xfffff000;
1574
                    }
1575
                } else {
1576
                    env->cp15.c7_par = ((ret & (10 << 1)) >> 5) |
1577
                                       ((ret & (12 << 1)) >> 6) |
1578
                                       ((ret & 0xf) << 1) | 1;
1579
                }
1580
                break;
1581
            }
1582
            }
1583
        }
1584
        break;
1585
    case 8: /* MMU TLB control.  */
1586
        switch (op2) {
1587
        case 0: /* Invalidate all.  */
1588
            tlb_flush(env, 0);
1589
            break;
1590
        case 1: /* Invalidate single TLB entry.  */
1591
            tlb_flush_page(env, val & TARGET_PAGE_MASK);
1592
            break;
1593
        case 2: /* Invalidate on ASID.  */
1594
            tlb_flush(env, val == 0);
1595
            break;
1596
        case 3: /* Invalidate single entry on MVA.  */
1597
            /* ??? This is like case 1, but ignores ASID.  */
1598
            tlb_flush(env, 1);
1599
            break;
1600
        default:
1601
            goto bad_reg;
1602
        }
1603
        break;
1604
    case 9:
1605
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1606
            break;
1607
        if (arm_feature(env, ARM_FEATURE_STRONGARM))
1608
            break; /* Ignore ReadBuffer access */
1609
        switch (crm) {
1610
        case 0: /* Cache lockdown.  */
1611
            switch (op1) {
1612
            case 0: /* L1 cache.  */
1613
                switch (op2) {
1614
                case 0:
1615
                    env->cp15.c9_data = val;
1616
                    break;
1617
                case 1:
1618
                    env->cp15.c9_insn = val;
1619
                    break;
1620
                default:
1621
                    goto bad_reg;
1622
                }
1623
                break;
1624
            case 1: /* L2 cache.  */
1625
                /* Ignore writes to L2 lockdown/auxiliary registers.  */
1626
                break;
1627
            default:
1628
                goto bad_reg;
1629
            }
1630
            break;
1631
        case 1: /* TCM memory region registers.  */
1632
            /* Not implemented.  */
1633
            goto bad_reg;
1634
        case 12: /* Performance monitor control */
1635
            /* Performance monitors are implementation defined in v7,
1636
             * but with an ARM recommended set of registers, which we
1637
             * follow (although we don't actually implement any counters)
1638
             */
1639
            if (!arm_feature(env, ARM_FEATURE_V7)) {
1640
                goto bad_reg;
1641
            }
1642
            switch (op2) {
1643
            case 0: /* performance monitor control register */
1644
                /* only the DP, X, D and E bits are writable */
1645
                env->cp15.c9_pmcr &= ~0x39;
1646
                env->cp15.c9_pmcr |= (val & 0x39);
1647
                break;
1648
            case 1: /* Count enable set register */
1649
                val &= (1 << 31);
1650
                env->cp15.c9_pmcnten |= val;
1651
                break;
1652
            case 2: /* Count enable clear */
1653
                val &= (1 << 31);
1654
                env->cp15.c9_pmcnten &= ~val;
1655
                break;
1656
            case 3: /* Overflow flag status */
1657
                env->cp15.c9_pmovsr &= ~val;
1658
                break;
1659
            case 4: /* Software increment */
1660
                /* RAZ/WI since we don't implement the software-count event */
1661
                break;
1662
            case 5: /* Event counter selection register */
1663
                /* Since we don't implement any events, writing to this register
1664
                 * is actually UNPREDICTABLE. So we choose to RAZ/WI.
1665
                 */
1666
                break;
1667
            default:
1668
                goto bad_reg;
1669
            }
1670
            break;
1671
        case 13: /* Performance counters */
1672
            if (!arm_feature(env, ARM_FEATURE_V7)) {
1673
                goto bad_reg;
1674
            }
1675
            switch (op2) {
1676
            case 0: /* Cycle count register: not implemented, so RAZ/WI */
1677
                break;
1678
            case 1: /* Event type select */
1679
                env->cp15.c9_pmxevtyper = val & 0xff;
1680
                break;
1681
            case 2: /* Event count register */
1682
                /* Unimplemented (we have no events), RAZ/WI */
1683
                break;
1684
            default:
1685
                goto bad_reg;
1686
            }
1687
            break;
1688
        case 14: /* Performance monitor control */
1689
            if (!arm_feature(env, ARM_FEATURE_V7)) {
1690
                goto bad_reg;
1691
            }
1692
            switch (op2) {
1693
            case 0: /* user enable */
1694
                env->cp15.c9_pmuserenr = val & 1;
1695
                /* changes access rights for cp registers, so flush tbs */
1696
                tb_flush(env);
1697
                break;
1698
            case 1: /* interrupt enable set */
1699
                /* We have no event counters so only the C bit can be changed */
1700
                val &= (1 << 31);
1701
                env->cp15.c9_pminten |= val;
1702
                break;
1703
            case 2: /* interrupt enable clear */
1704
                val &= (1 << 31);
1705
                env->cp15.c9_pminten &= ~val;
1706
                break;
1707
            }
1708
            break;
1709
        default:
1710
            goto bad_reg;
1711
        }
1712
        break;
1713
    case 10: /* MMU TLB lockdown.  */
1714
        /* ??? TLB lockdown not implemented.  */
1715
        break;
1716
    case 12: /* Reserved.  */
1717
        goto bad_reg;
1718
    case 13: /* Process ID.  */
1719
        switch (op2) {
1720
        case 0:
1721
            /* Unlike real hardware the qemu TLB uses virtual addresses,
1722
               not modified virtual addresses, so this causes a TLB flush.
1723
             */
1724
            if (env->cp15.c13_fcse != val)
1725
              tlb_flush(env, 1);
1726
            env->cp15.c13_fcse = val;
1727
            break;
1728
        case 1:
1729
            /* This changes the ASID, so do a TLB flush.  */
1730
            if (env->cp15.c13_context != val
1731
                && !arm_feature(env, ARM_FEATURE_MPU))
1732
              tlb_flush(env, 0);
1733
            env->cp15.c13_context = val;
1734
            break;
1735
        default:
1736
            goto bad_reg;
1737
        }
1738
        break;
1739
    case 14: /* Reserved.  */
1740
        goto bad_reg;
1741
    case 15: /* Implementation specific.  */
1742
        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1743
            if (op2 == 0 && crm == 1) {
1744
                if (env->cp15.c15_cpar != (val & 0x3fff)) {
1745
                    /* Changes cp0 to cp13 behavior, so needs a TB flush.  */
1746
                    tb_flush(env);
1747
                    env->cp15.c15_cpar = val & 0x3fff;
1748
                }
1749
                break;
1750
            }
1751
            goto bad_reg;
1752
        }
1753
        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1754
            switch (crm) {
1755
            case 0:
1756
                break;
1757
            case 1: /* Set TI925T configuration.  */
1758
                env->cp15.c15_ticonfig = val & 0xe7;
1759
                env->cp15.c0_cpuid = (val & (1 << 5)) ? /* OS_TYPE bit */
1760
                        ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1761
                break;
1762
            case 2: /* Set I_max.  */
1763
                env->cp15.c15_i_max = val;
1764
                break;
1765
            case 3: /* Set I_min.  */
1766
                env->cp15.c15_i_min = val;
1767
                break;
1768
            case 4: /* Set thread-ID.  */
1769
                env->cp15.c15_threadid = val & 0xffff;
1770
                break;
1771
            case 8: /* Wait-for-interrupt (deprecated).  */
1772
                cpu_interrupt(env, CPU_INTERRUPT_HALT);
1773
                break;
1774
            default:
1775
                goto bad_reg;
1776
            }
1777
        }
1778
        break;
1779
    }
1780
    return;
1781
bad_reg:
1782
    /* ??? For debugging only.  Should raise illegal instruction exception.  */
1783
    cpu_abort(env, "Unimplemented cp15 register write (c%d, c%d, {%d, %d})\n",
1784
              (insn >> 16) & 0xf, crm, op1, op2);
1785
}
1786

    
1787
uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
1788
{
1789
    int op1;
1790
    int op2;
1791
    int crm;
1792

    
1793
    op1 = (insn >> 21) & 7;
1794
    op2 = (insn >> 5) & 7;
1795
    crm = insn & 0xf;
1796
    switch ((insn >> 16) & 0xf) {
1797
    case 0: /* ID codes.  */
1798
        switch (op1) {
1799
        case 0:
1800
            switch (crm) {
1801
            case 0:
1802
                switch (op2) {
1803
                case 0: /* Device ID.  */
1804
                    return env->cp15.c0_cpuid;
1805
                case 1: /* Cache Type.  */
1806
                    return env->cp15.c0_cachetype;
1807
                case 2: /* TCM status.  */
1808
                    return 0;
1809
                case 3: /* TLB type register.  */
1810
                    return 0; /* No lockable TLB entries.  */
1811
                case 5: /* MPIDR */
1812
                    /* The MPIDR was standardised in v7; prior to
1813
                     * this it was implemented only in the 11MPCore.
1814
                     * For all other pre-v7 cores it does not exist.
1815
                     */
1816
                    if (arm_feature(env, ARM_FEATURE_V7) ||
1817
                        ARM_CPUID(env) == ARM_CPUID_ARM11MPCORE) {
1818
                        int mpidr = env->cpu_index;
1819
                        /* We don't support setting cluster ID ([8..11])
1820
                         * so these bits always RAZ.
1821
                         */
1822
                        if (arm_feature(env, ARM_FEATURE_V7MP)) {
1823
                            mpidr |= (1 << 31);
1824
                            /* Cores which are uniprocessor (non-coherent)
1825
                             * but still implement the MP extensions set
1826
                             * bit 30. (For instance, A9UP.) However we do
1827
                             * not currently model any of those cores.
1828
                             */
1829
                        }
1830
                        return mpidr;
1831
                    }
1832
                    /* otherwise fall through to the unimplemented-reg case */
1833
                default:
1834
                    goto bad_reg;
1835
                }
1836
            case 1:
1837
                if (!arm_feature(env, ARM_FEATURE_V6))
1838
                    goto bad_reg;
1839
                return env->cp15.c0_c1[op2];
1840
            case 2:
1841
                if (!arm_feature(env, ARM_FEATURE_V6))
1842
                    goto bad_reg;
1843
                return env->cp15.c0_c2[op2];
1844
            case 3: case 4: case 5: case 6: case 7:
1845
                return 0;
1846
            default:
1847
                goto bad_reg;
1848
            }
1849
        case 1:
1850
            /* These registers aren't documented on arm11 cores.  However
1851
               Linux looks at them anyway.  */
1852
            if (!arm_feature(env, ARM_FEATURE_V6))
1853
                goto bad_reg;
1854
            if (crm != 0)
1855
                goto bad_reg;
1856
            if (!arm_feature(env, ARM_FEATURE_V7))
1857
                return 0;
1858

    
1859
            switch (op2) {
1860
            case 0:
1861
                return env->cp15.c0_ccsid[env->cp15.c0_cssel];
1862
            case 1:
1863
                return env->cp15.c0_clid;
1864
            case 7:
1865
                return 0;
1866
            }
1867
            goto bad_reg;
1868
        case 2:
1869
            if (op2 != 0 || crm != 0)
1870
                goto bad_reg;
1871
            return env->cp15.c0_cssel;
1872
        default:
1873
            goto bad_reg;
1874
        }
1875
    case 1: /* System configuration.  */
1876
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1877
            op2 = 0;
1878
        switch (op2) {
1879
        case 0: /* Control register.  */
1880
            return env->cp15.c1_sys;
1881
        case 1: /* Auxiliary control register.  */
1882
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1883
                return env->cp15.c1_xscaleauxcr;
1884
            if (!arm_feature(env, ARM_FEATURE_AUXCR))
1885
                goto bad_reg;
1886
            switch (ARM_CPUID(env)) {
1887
            case ARM_CPUID_ARM1026:
1888
                return 1;
1889
            case ARM_CPUID_ARM1136:
1890
            case ARM_CPUID_ARM1136_R2:
1891
            case ARM_CPUID_ARM1176:
1892
                return 7;
1893
            case ARM_CPUID_ARM11MPCORE:
1894
                return 1;
1895
            case ARM_CPUID_CORTEXA8:
1896
                return 2;
1897
            case ARM_CPUID_CORTEXA9:
1898
                return 0;
1899
            default:
1900
                goto bad_reg;
1901
            }
1902
        case 2: /* Coprocessor access register.  */
1903
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1904
                goto bad_reg;
1905
            return env->cp15.c1_coproc;
1906
        default:
1907
            goto bad_reg;
1908
        }
1909
    case 2: /* MMU Page table control / MPU cache control.  */
1910
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1911
            switch (op2) {
1912
            case 0:
1913
                return env->cp15.c2_data;
1914
                break;
1915
            case 1:
1916
                return env->cp15.c2_insn;
1917
                break;
1918
            default:
1919
                goto bad_reg;
1920
            }
1921
        } else {
1922
            switch (op2) {
1923
            case 0:
1924
                return env->cp15.c2_base0;
1925
            case 1:
1926
                return env->cp15.c2_base1;
1927
            case 2:
1928
                return env->cp15.c2_control;
1929
            default:
1930
                goto bad_reg;
1931
            }
1932
        }
1933
    case 3: /* MMU Domain access control / MPU write buffer control.  */
1934
        return env->cp15.c3;
1935
    case 4: /* Reserved.  */
1936
        goto bad_reg;
1937
    case 5: /* MMU Fault status / MPU access permission.  */
1938
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1939
            op2 = 0;
1940
        switch (op2) {
1941
        case 0:
1942
            if (arm_feature(env, ARM_FEATURE_MPU))
1943
                return simple_mpu_ap_bits(env->cp15.c5_data);
1944
            return env->cp15.c5_data;
1945
        case 1:
1946
            if (arm_feature(env, ARM_FEATURE_MPU))
1947
                return simple_mpu_ap_bits(env->cp15.c5_data);
1948
            return env->cp15.c5_insn;
1949
        case 2:
1950
            if (!arm_feature(env, ARM_FEATURE_MPU))
1951
                goto bad_reg;
1952
            return env->cp15.c5_data;
1953
        case 3:
1954
            if (!arm_feature(env, ARM_FEATURE_MPU))
1955
                goto bad_reg;
1956
            return env->cp15.c5_insn;
1957
        default:
1958
            goto bad_reg;
1959
        }
1960
    case 6: /* MMU Fault address.  */
1961
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1962
            if (crm >= 8)
1963
                goto bad_reg;
1964
            return env->cp15.c6_region[crm];
1965
        } else {
1966
            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1967
                op2 = 0;
1968
            switch (op2) {
1969
            case 0:
1970
                return env->cp15.c6_data;
1971
            case 1:
1972
                if (arm_feature(env, ARM_FEATURE_V6)) {
1973
                    /* Watchpoint Fault Adrress.  */
1974
                    return 0; /* Not implemented.  */
1975
                } else {
1976
                    /* Instruction Fault Adrress.  */
1977
                    /* Arm9 doesn't have an IFAR, but implementing it anyway
1978
                       shouldn't do any harm.  */
1979
                    return env->cp15.c6_insn;
1980
                }
1981
            case 2:
1982
                if (arm_feature(env, ARM_FEATURE_V6)) {
1983
                    /* Instruction Fault Adrress.  */
1984
                    return env->cp15.c6_insn;
1985
                } else {
1986
                    goto bad_reg;
1987
                }
1988
            default:
1989
                goto bad_reg;
1990
            }
1991
        }
1992
    case 7: /* Cache control.  */
1993
        if (crm == 4 && op1 == 0 && op2 == 0) {
1994
            return env->cp15.c7_par;
1995
        }
1996
        /* FIXME: Should only clear Z flag if destination is r15.  */
1997
        env->ZF = 0;
1998
        return 0;
1999
    case 8: /* MMU TLB control.  */
2000
        goto bad_reg;
2001
    case 9:
2002
        switch (crm) {
2003
        case 0: /* Cache lockdown */
2004
            switch (op1) {
2005
            case 0: /* L1 cache.  */
2006
                if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
2007
                    return 0;
2008
                }
2009
                switch (op2) {
2010
                case 0:
2011
                    return env->cp15.c9_data;
2012
                case 1:
2013
                    return env->cp15.c9_insn;
2014
                default:
2015
                    goto bad_reg;
2016
                }
2017
            case 1: /* L2 cache */
2018
                if (crm != 0) {
2019
                    goto bad_reg;
2020
                }
2021
                /* L2 Lockdown and Auxiliary control.  */
2022
                return 0;
2023
            default:
2024
                goto bad_reg;
2025
            }
2026
            break;
2027
        case 12: /* Performance monitor control */
2028
            if (!arm_feature(env, ARM_FEATURE_V7)) {
2029
                goto bad_reg;
2030
            }
2031
            switch (op2) {
2032
            case 0: /* performance monitor control register */
2033
                return env->cp15.c9_pmcr;
2034
            case 1: /* count enable set */
2035
            case 2: /* count enable clear */
2036
                return env->cp15.c9_pmcnten;
2037
            case 3: /* overflow flag status */
2038
                return env->cp15.c9_pmovsr;
2039
            case 4: /* software increment */
2040
            case 5: /* event counter selection register */
2041
                return 0; /* Unimplemented, RAZ/WI */
2042
            default:
2043
                goto bad_reg;
2044
            }
2045
        case 13: /* Performance counters */
2046
            if (!arm_feature(env, ARM_FEATURE_V7)) {
2047
                goto bad_reg;
2048
            }
2049
            switch (op2) {
2050
            case 1: /* Event type select */
2051
                return env->cp15.c9_pmxevtyper;
2052
            case 0: /* Cycle count register */
2053
            case 2: /* Event count register */
2054
                /* Unimplemented, so RAZ/WI */
2055
                return 0;
2056
            default:
2057
                goto bad_reg;
2058
            }
2059
        case 14: /* Performance monitor control */
2060
            if (!arm_feature(env, ARM_FEATURE_V7)) {
2061
                goto bad_reg;
2062
            }
2063
            switch (op2) {
2064
            case 0: /* user enable */
2065
                return env->cp15.c9_pmuserenr;
2066
            case 1: /* interrupt enable set */
2067
            case 2: /* interrupt enable clear */
2068
                return env->cp15.c9_pminten;
2069
            default:
2070
                goto bad_reg;
2071
            }
2072
        default:
2073
            goto bad_reg;
2074
        }
2075
        break;
2076
    case 10: /* MMU TLB lockdown.  */
2077
        /* ??? TLB lockdown not implemented.  */
2078
        return 0;
2079
    case 11: /* TCM DMA control.  */
2080
    case 12: /* Reserved.  */
2081
        goto bad_reg;
2082
    case 13: /* Process ID.  */
2083
        switch (op2) {
2084
        case 0:
2085
            return env->cp15.c13_fcse;
2086
        case 1:
2087
            return env->cp15.c13_context;
2088
        default:
2089
            goto bad_reg;
2090
        }
2091
    case 14: /* Reserved.  */
2092
        goto bad_reg;
2093
    case 15: /* Implementation specific.  */
2094
        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
2095
            if (op2 == 0 && crm == 1)
2096
                return env->cp15.c15_cpar;
2097

    
2098
            goto bad_reg;
2099
        }
2100
        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
2101
            switch (crm) {
2102
            case 0:
2103
                return 0;
2104
            case 1: /* Read TI925T configuration.  */
2105
                return env->cp15.c15_ticonfig;
2106
            case 2: /* Read I_max.  */
2107
                return env->cp15.c15_i_max;
2108
            case 3: /* Read I_min.  */
2109
                return env->cp15.c15_i_min;
2110
            case 4: /* Read thread-ID.  */
2111
                return env->cp15.c15_threadid;
2112
            case 8: /* TI925T_status */
2113
                return 0;
2114
            }
2115
            /* TODO: Peripheral port remap register:
2116
             * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt
2117
             * controller base address at $rn & ~0xfff and map size of
2118
             * 0x200 << ($rn & 0xfff), when MMU is off.  */
2119
            goto bad_reg;
2120
        }
2121
        return 0;
2122
    }
2123
bad_reg:
2124
    /* ??? For debugging only.  Should raise illegal instruction exception.  */
2125
    cpu_abort(env, "Unimplemented cp15 register read (c%d, c%d, {%d, %d})\n",
2126
              (insn >> 16) & 0xf, crm, op1, op2);
2127
    return 0;
2128
}
2129

    
2130
void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
2131
{
2132
    if ((env->uncached_cpsr & CPSR_M) == mode) {
2133
        env->regs[13] = val;
2134
    } else {
2135
        env->banked_r13[bank_number(mode)] = val;
2136
    }
2137
}
2138

    
2139
uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
2140
{
2141
    if ((env->uncached_cpsr & CPSR_M) == mode) {
2142
        return env->regs[13];
2143
    } else {
2144
        return env->banked_r13[bank_number(mode)];
2145
    }
2146
}
2147

    
2148
uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg)
2149
{
2150
    switch (reg) {
2151
    case 0: /* APSR */
2152
        return xpsr_read(env) & 0xf8000000;
2153
    case 1: /* IAPSR */
2154
        return xpsr_read(env) & 0xf80001ff;
2155
    case 2: /* EAPSR */
2156
        return xpsr_read(env) & 0xff00fc00;
2157
    case 3: /* xPSR */
2158
        return xpsr_read(env) & 0xff00fdff;
2159
    case 5: /* IPSR */
2160
        return xpsr_read(env) & 0x000001ff;
2161
    case 6: /* EPSR */
2162
        return xpsr_read(env) & 0x0700fc00;
2163
    case 7: /* IEPSR */
2164
        return xpsr_read(env) & 0x0700edff;
2165
    case 8: /* MSP */
2166
        return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
2167
    case 9: /* PSP */
2168
        return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
2169
    case 16: /* PRIMASK */
2170
        return (env->uncached_cpsr & CPSR_I) != 0;
2171
    case 17: /* BASEPRI */
2172
    case 18: /* BASEPRI_MAX */
2173
        return env->v7m.basepri;
2174
    case 19: /* FAULTMASK */
2175
        return (env->uncached_cpsr & CPSR_F) != 0;
2176
    case 20: /* CONTROL */
2177
        return env->v7m.control;
2178
    default:
2179
        /* ??? For debugging only.  */
2180
        cpu_abort(env, "Unimplemented system register read (%d)\n", reg);
2181
        return 0;
2182
    }
2183
}
2184

    
2185
void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val)
2186
{
2187
    switch (reg) {
2188
    case 0: /* APSR */
2189
        xpsr_write(env, val, 0xf8000000);
2190
        break;
2191
    case 1: /* IAPSR */
2192
        xpsr_write(env, val, 0xf8000000);
2193
        break;
2194
    case 2: /* EAPSR */
2195
        xpsr_write(env, val, 0xfe00fc00);
2196
        break;
2197
    case 3: /* xPSR */
2198
        xpsr_write(env, val, 0xfe00fc00);
2199
        break;
2200
    case 5: /* IPSR */
2201
        /* IPSR bits are readonly.  */
2202
        break;
2203
    case 6: /* EPSR */
2204
        xpsr_write(env, val, 0x0600fc00);
2205
        break;
2206
    case 7: /* IEPSR */
2207
        xpsr_write(env, val, 0x0600fc00);
2208
        break;
2209
    case 8: /* MSP */
2210
        if (env->v7m.current_sp)
2211
            env->v7m.other_sp = val;
2212
        else
2213
            env->regs[13] = val;
2214
        break;
2215
    case 9: /* PSP */
2216
        if (env->v7m.current_sp)
2217
            env->regs[13] = val;
2218
        else
2219
            env->v7m.other_sp = val;
2220
        break;
2221
    case 16: /* PRIMASK */
2222
        if (val & 1)
2223
            env->uncached_cpsr |= CPSR_I;
2224
        else
2225
            env->uncached_cpsr &= ~CPSR_I;
2226
        break;
2227
    case 17: /* BASEPRI */
2228
        env->v7m.basepri = val & 0xff;
2229
        break;
2230
    case 18: /* BASEPRI_MAX */
2231
        val &= 0xff;
2232
        if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
2233
            env->v7m.basepri = val;
2234
        break;
2235
    case 19: /* FAULTMASK */
2236
        if (val & 1)
2237
            env->uncached_cpsr |= CPSR_F;
2238
        else
2239
            env->uncached_cpsr &= ~CPSR_F;
2240
        break;
2241
    case 20: /* CONTROL */
2242
        env->v7m.control = val & 3;
2243
        switch_v7m_sp(env, (val & 2) != 0);
2244
        break;
2245
    default:
2246
        /* ??? For debugging only.  */
2247
        cpu_abort(env, "Unimplemented system register write (%d)\n", reg);
2248
        return;
2249
    }
2250
}
2251

    
2252
void cpu_arm_set_cp_io(CPUARMState *env, int cpnum,
2253
                ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write,
2254
                void *opaque)
2255
{
2256
    if (cpnum < 0 || cpnum > 14) {
2257
        cpu_abort(env, "Bad coprocessor number: %i\n", cpnum);
2258
        return;
2259
    }
2260

    
2261
    env->cp[cpnum].cp_read = cp_read;
2262
    env->cp[cpnum].cp_write = cp_write;
2263
    env->cp[cpnum].opaque = opaque;
2264
}
2265

    
2266
#endif
2267

    
2268
/* Note that signed overflow is undefined in C.  The following routines are
2269
   careful to use unsigned types where modulo arithmetic is required.
2270
   Failure to do so _will_ break on newer gcc.  */
2271

    
2272
/* Signed saturating arithmetic.  */
2273

    
2274
/* Perform 16-bit signed saturating addition.  */
2275
static inline uint16_t add16_sat(uint16_t a, uint16_t b)
2276
{
2277
    uint16_t res;
2278

    
2279
    res = a + b;
2280
    if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
2281
        if (a & 0x8000)
2282
            res = 0x8000;
2283
        else
2284
            res = 0x7fff;
2285
    }
2286
    return res;
2287
}
2288

    
2289
/* Perform 8-bit signed saturating addition.  */
2290
static inline uint8_t add8_sat(uint8_t a, uint8_t b)
2291
{
2292
    uint8_t res;
2293

    
2294
    res = a + b;
2295
    if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
2296
        if (a & 0x80)
2297
            res = 0x80;
2298
        else
2299
            res = 0x7f;
2300
    }
2301
    return res;
2302
}
2303

    
2304
/* Perform 16-bit signed saturating subtraction.  */
2305
static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
2306
{
2307
    uint16_t res;
2308

    
2309
    res = a - b;
2310
    if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
2311
        if (a & 0x8000)
2312
            res = 0x8000;
2313
        else
2314
            res = 0x7fff;
2315
    }
2316
    return res;
2317
}
2318

    
2319
/* Perform 8-bit signed saturating subtraction.  */
2320
static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
2321
{
2322
    uint8_t res;
2323

    
2324
    res = a - b;
2325
    if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
2326
        if (a & 0x80)
2327
            res = 0x80;
2328
        else
2329
            res = 0x7f;
2330
    }
2331
    return res;
2332
}
2333

    
2334
#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
2335
#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
2336
#define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
2337
#define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
2338
#define PFX q
2339

    
2340
#include "op_addsub.h"
2341

    
2342
/* Unsigned saturating arithmetic.  */
2343
static inline uint16_t add16_usat(uint16_t a, uint16_t b)
2344
{
2345
    uint16_t res;
2346
    res = a + b;
2347
    if (res < a)
2348
        res = 0xffff;
2349
    return res;
2350
}
2351

    
2352
static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
2353
{
2354
    if (a > b)
2355
        return a - b;
2356
    else
2357
        return 0;
2358
}
2359

    
2360
static inline uint8_t add8_usat(uint8_t a, uint8_t b)
2361
{
2362
    uint8_t res;
2363
    res = a + b;
2364
    if (res < a)
2365
        res = 0xff;
2366
    return res;
2367
}
2368

    
2369
static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
2370
{
2371
    if (a > b)
2372
        return a - b;
2373
    else
2374
        return 0;
2375
}
2376

    
2377
#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
2378
#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
2379
#define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
2380
#define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
2381
#define PFX uq
2382

    
2383
#include "op_addsub.h"
2384

    
2385
/* Signed modulo arithmetic.  */
2386
#define SARITH16(a, b, n, op) do { \
2387
    int32_t sum; \
2388
    sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
2389
    RESULT(sum, n, 16); \
2390
    if (sum >= 0) \
2391
        ge |= 3 << (n * 2); \
2392
    } while(0)
2393

    
2394
#define SARITH8(a, b, n, op) do { \
2395
    int32_t sum; \
2396
    sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
2397
    RESULT(sum, n, 8); \
2398
    if (sum >= 0) \
2399
        ge |= 1 << n; \
2400
    } while(0)
2401

    
2402

    
2403
#define ADD16(a, b, n) SARITH16(a, b, n, +)
2404
#define SUB16(a, b, n) SARITH16(a, b, n, -)
2405
#define ADD8(a, b, n)  SARITH8(a, b, n, +)
2406
#define SUB8(a, b, n)  SARITH8(a, b, n, -)
2407
#define PFX s
2408
#define ARITH_GE
2409

    
2410
#include "op_addsub.h"
2411

    
2412
/* Unsigned modulo arithmetic.  */
2413
#define ADD16(a, b, n) do { \
2414
    uint32_t sum; \
2415
    sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
2416
    RESULT(sum, n, 16); \
2417
    if ((sum >> 16) == 1) \
2418
        ge |= 3 << (n * 2); \
2419
    } while(0)
2420

    
2421
#define ADD8(a, b, n) do { \
2422
    uint32_t sum; \
2423
    sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
2424
    RESULT(sum, n, 8); \
2425
    if ((sum >> 8) == 1) \
2426
        ge |= 1 << n; \
2427
    } while(0)
2428

    
2429
#define SUB16(a, b, n) do { \
2430
    uint32_t sum; \
2431
    sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
2432
    RESULT(sum, n, 16); \
2433
    if ((sum >> 16) == 0) \
2434
        ge |= 3 << (n * 2); \
2435
    } while(0)
2436

    
2437
#define SUB8(a, b, n) do { \
2438
    uint32_t sum; \
2439
    sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
2440
    RESULT(sum, n, 8); \
2441
    if ((sum >> 8) == 0) \
2442
        ge |= 1 << n; \
2443
    } while(0)
2444

    
2445
#define PFX u
2446
#define ARITH_GE
2447

    
2448
#include "op_addsub.h"
2449

    
2450
/* Halved signed arithmetic.  */
2451
#define ADD16(a, b, n) \
2452
  RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
2453
#define SUB16(a, b, n) \
2454
  RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
2455
#define ADD8(a, b, n) \
2456
  RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
2457
#define SUB8(a, b, n) \
2458
  RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
2459
#define PFX sh
2460

    
2461
#include "op_addsub.h"
2462

    
2463
/* Halved unsigned arithmetic.  */
2464
#define ADD16(a, b, n) \
2465
  RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2466
#define SUB16(a, b, n) \
2467
  RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2468
#define ADD8(a, b, n) \
2469
  RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2470
#define SUB8(a, b, n) \
2471
  RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2472
#define PFX uh
2473

    
2474
#include "op_addsub.h"
2475

    
2476
static inline uint8_t do_usad(uint8_t a, uint8_t b)
2477
{
2478
    if (a > b)
2479
        return a - b;
2480
    else
2481
        return b - a;
2482
}
2483

    
2484
/* Unsigned sum of absolute byte differences.  */
2485
uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
2486
{
2487
    uint32_t sum;
2488
    sum = do_usad(a, b);
2489
    sum += do_usad(a >> 8, b >> 8);
2490
    sum += do_usad(a >> 16, b >>16);
2491
    sum += do_usad(a >> 24, b >> 24);
2492
    return sum;
2493
}
2494

    
2495
/* For ARMv6 SEL instruction.  */
2496
uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
2497
{
2498
    uint32_t mask;
2499

    
2500
    mask = 0;
2501
    if (flags & 1)
2502
        mask |= 0xff;
2503
    if (flags & 2)
2504
        mask |= 0xff00;
2505
    if (flags & 4)
2506
        mask |= 0xff0000;
2507
    if (flags & 8)
2508
        mask |= 0xff000000;
2509
    return (a & mask) | (b & ~mask);
2510
}
2511

    
2512
uint32_t HELPER(logicq_cc)(uint64_t val)
2513
{
2514
    return (val >> 32) | (val != 0);
2515
}
2516

    
2517
/* VFP support.  We follow the convention used for VFP instrunctions:
2518
   Single precition routines have a "s" suffix, double precision a
2519
   "d" suffix.  */
2520

    
2521
/* Convert host exception flags to vfp form.  */
2522
static inline int vfp_exceptbits_from_host(int host_bits)
2523
{
2524
    int target_bits = 0;
2525

    
2526
    if (host_bits & float_flag_invalid)
2527
        target_bits |= 1;
2528
    if (host_bits & float_flag_divbyzero)
2529
        target_bits |= 2;
2530
    if (host_bits & float_flag_overflow)
2531
        target_bits |= 4;
2532
    if (host_bits & (float_flag_underflow | float_flag_output_denormal))
2533
        target_bits |= 8;
2534
    if (host_bits & float_flag_inexact)
2535
        target_bits |= 0x10;
2536
    if (host_bits & float_flag_input_denormal)
2537
        target_bits |= 0x80;
2538
    return target_bits;
2539
}
2540

    
2541
uint32_t HELPER(vfp_get_fpscr)(CPUState *env)
2542
{
2543
    int i;
2544
    uint32_t fpscr;
2545

    
2546
    fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
2547
            | (env->vfp.vec_len << 16)
2548
            | (env->vfp.vec_stride << 20);
2549
    i = get_float_exception_flags(&env->vfp.fp_status);
2550
    i |= get_float_exception_flags(&env->vfp.standard_fp_status);
2551
    fpscr |= vfp_exceptbits_from_host(i);
2552
    return fpscr;
2553
}
2554

    
2555
uint32_t vfp_get_fpscr(CPUState *env)
2556
{
2557
    return HELPER(vfp_get_fpscr)(env);
2558
}
2559

    
2560
/* Convert vfp exception flags to target form.  */
2561
static inline int vfp_exceptbits_to_host(int target_bits)
2562
{
2563
    int host_bits = 0;
2564

    
2565
    if (target_bits & 1)
2566
        host_bits |= float_flag_invalid;
2567
    if (target_bits & 2)
2568
        host_bits |= float_flag_divbyzero;
2569
    if (target_bits & 4)
2570
        host_bits |= float_flag_overflow;
2571
    if (target_bits & 8)
2572
        host_bits |= float_flag_underflow;
2573
    if (target_bits & 0x10)
2574
        host_bits |= float_flag_inexact;
2575
    if (target_bits & 0x80)
2576
        host_bits |= float_flag_input_denormal;
2577
    return host_bits;
2578
}
2579

    
2580
void HELPER(vfp_set_fpscr)(CPUState *env, uint32_t val)
2581
{
2582
    int i;
2583
    uint32_t changed;
2584

    
2585
    changed = env->vfp.xregs[ARM_VFP_FPSCR];
2586
    env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
2587
    env->vfp.vec_len = (val >> 16) & 7;
2588
    env->vfp.vec_stride = (val >> 20) & 3;
2589

    
2590
    changed ^= val;
2591
    if (changed & (3 << 22)) {
2592
        i = (val >> 22) & 3;
2593
        switch (i) {
2594
        case 0:
2595
            i = float_round_nearest_even;
2596
            break;
2597
        case 1:
2598
            i = float_round_up;
2599
            break;
2600
        case 2:
2601
            i = float_round_down;
2602
            break;
2603
        case 3:
2604
            i = float_round_to_zero;
2605
            break;
2606
        }
2607
        set_float_rounding_mode(i, &env->vfp.fp_status);
2608
    }
2609
    if (changed & (1 << 24)) {
2610
        set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
2611
        set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
2612
    }
2613
    if (changed & (1 << 25))
2614
        set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
2615

    
2616
    i = vfp_exceptbits_to_host(val);
2617
    set_float_exception_flags(i, &env->vfp.fp_status);
2618
    set_float_exception_flags(0, &env->vfp.standard_fp_status);
2619
}
2620

    
2621
void vfp_set_fpscr(CPUState *env, uint32_t val)
2622
{
2623
    HELPER(vfp_set_fpscr)(env, val);
2624
}
2625

    
2626
#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
2627

    
2628
#define VFP_BINOP(name) \
2629
float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
2630
{ \
2631
    float_status *fpst = fpstp; \
2632
    return float32_ ## name(a, b, fpst); \
2633
} \
2634
float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
2635
{ \
2636
    float_status *fpst = fpstp; \
2637
    return float64_ ## name(a, b, fpst); \
2638
}
2639
VFP_BINOP(add)
2640
VFP_BINOP(sub)
2641
VFP_BINOP(mul)
2642
VFP_BINOP(div)
2643
#undef VFP_BINOP
2644

    
2645
float32 VFP_HELPER(neg, s)(float32 a)
2646
{
2647
    return float32_chs(a);
2648
}
2649

    
2650
float64 VFP_HELPER(neg, d)(float64 a)
2651
{
2652
    return float64_chs(a);
2653
}
2654

    
2655
float32 VFP_HELPER(abs, s)(float32 a)
2656
{
2657
    return float32_abs(a);
2658
}
2659

    
2660
float64 VFP_HELPER(abs, d)(float64 a)
2661
{
2662
    return float64_abs(a);
2663
}
2664

    
2665
float32 VFP_HELPER(sqrt, s)(float32 a, CPUState *env)
2666
{
2667
    return float32_sqrt(a, &env->vfp.fp_status);
2668
}
2669

    
2670
float64 VFP_HELPER(sqrt, d)(float64 a, CPUState *env)
2671
{
2672
    return float64_sqrt(a, &env->vfp.fp_status);
2673
}
2674

    
2675
/* XXX: check quiet/signaling case */
2676
#define DO_VFP_cmp(p, type) \
2677
void VFP_HELPER(cmp, p)(type a, type b, CPUState *env)  \
2678
{ \
2679
    uint32_t flags; \
2680
    switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
2681
    case 0: flags = 0x6; break; \
2682
    case -1: flags = 0x8; break; \
2683
    case 1: flags = 0x2; break; \
2684
    default: case 2: flags = 0x3; break; \
2685
    } \
2686
    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2687
        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2688
} \
2689
void VFP_HELPER(cmpe, p)(type a, type b, CPUState *env) \
2690
{ \
2691
    uint32_t flags; \
2692
    switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
2693
    case 0: flags = 0x6; break; \
2694
    case -1: flags = 0x8; break; \
2695
    case 1: flags = 0x2; break; \
2696
    default: case 2: flags = 0x3; break; \
2697
    } \
2698
    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2699
        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2700
}
2701
DO_VFP_cmp(s, float32)
2702
DO_VFP_cmp(d, float64)
2703
#undef DO_VFP_cmp
2704

    
2705
/* Integer to float and float to integer conversions */
2706

    
2707
#define CONV_ITOF(name, fsz, sign) \
2708
    float##fsz HELPER(name)(uint32_t x, void *fpstp) \
2709
{ \
2710
    float_status *fpst = fpstp; \
2711
    return sign##int32_to_##float##fsz(x, fpst); \
2712
}
2713

    
2714
#define CONV_FTOI(name, fsz, sign, round) \
2715
uint32_t HELPER(name)(float##fsz x, void *fpstp) \
2716
{ \
2717
    float_status *fpst = fpstp; \
2718
    if (float##fsz##_is_any_nan(x)) { \
2719
        float_raise(float_flag_invalid, fpst); \
2720
        return 0; \
2721
    } \
2722
    return float##fsz##_to_##sign##int32##round(x, fpst); \
2723
}
2724

    
2725
#define FLOAT_CONVS(name, p, fsz, sign) \
2726
CONV_ITOF(vfp_##name##to##p, fsz, sign) \
2727
CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
2728
CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
2729

    
2730
FLOAT_CONVS(si, s, 32, )
2731
FLOAT_CONVS(si, d, 64, )
2732
FLOAT_CONVS(ui, s, 32, u)
2733
FLOAT_CONVS(ui, d, 64, u)
2734

    
2735
#undef CONV_ITOF
2736
#undef CONV_FTOI
2737
#undef FLOAT_CONVS
2738

    
2739
/* floating point conversion */
2740
float64 VFP_HELPER(fcvtd, s)(float32 x, CPUState *env)
2741
{
2742
    float64 r = float32_to_float64(x, &env->vfp.fp_status);
2743
    /* ARM requires that S<->D conversion of any kind of NaN generates
2744
     * a quiet NaN by forcing the most significant frac bit to 1.
2745
     */
2746
    return float64_maybe_silence_nan(r);
2747
}
2748

    
2749
float32 VFP_HELPER(fcvts, d)(float64 x, CPUState *env)
2750
{
2751
    float32 r =  float64_to_float32(x, &env->vfp.fp_status);
2752
    /* ARM requires that S<->D conversion of any kind of NaN generates
2753
     * a quiet NaN by forcing the most significant frac bit to 1.
2754
     */
2755
    return float32_maybe_silence_nan(r);
2756
}
2757

    
2758
/* VFP3 fixed point conversion.  */
2759
#define VFP_CONV_FIX(name, p, fsz, itype, sign) \
2760
float##fsz HELPER(vfp_##name##to##p)(uint##fsz##_t  x, uint32_t shift, \
2761
                                    void *fpstp) \
2762
{ \
2763
    float_status *fpst = fpstp; \
2764
    float##fsz tmp; \
2765
    tmp = sign##int32_to_##float##fsz((itype##_t)x, fpst); \
2766
    return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
2767
} \
2768
uint##fsz##_t HELPER(vfp_to##name##p)(float##fsz x, uint32_t shift, \
2769
                                       void *fpstp) \
2770
{ \
2771
    float_status *fpst = fpstp; \
2772
    float##fsz tmp; \
2773
    if (float##fsz##_is_any_nan(x)) { \
2774
        float_raise(float_flag_invalid, fpst); \
2775
        return 0; \
2776
    } \
2777
    tmp = float##fsz##_scalbn(x, shift, fpst); \
2778
    return float##fsz##_to_##itype##_round_to_zero(tmp, fpst); \
2779
}
2780

    
2781
VFP_CONV_FIX(sh, d, 64, int16, )
2782
VFP_CONV_FIX(sl, d, 64, int32, )
2783
VFP_CONV_FIX(uh, d, 64, uint16, u)
2784
VFP_CONV_FIX(ul, d, 64, uint32, u)
2785
VFP_CONV_FIX(sh, s, 32, int16, )
2786
VFP_CONV_FIX(sl, s, 32, int32, )
2787
VFP_CONV_FIX(uh, s, 32, uint16, u)
2788
VFP_CONV_FIX(ul, s, 32, uint32, u)
2789
#undef VFP_CONV_FIX
2790

    
2791
/* Half precision conversions.  */
2792
static float32 do_fcvt_f16_to_f32(uint32_t a, CPUState *env, float_status *s)
2793
{
2794
    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
2795
    float32 r = float16_to_float32(make_float16(a), ieee, s);
2796
    if (ieee) {
2797
        return float32_maybe_silence_nan(r);
2798
    }
2799
    return r;
2800
}
2801

    
2802
static uint32_t do_fcvt_f32_to_f16(float32 a, CPUState *env, float_status *s)
2803
{
2804
    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
2805
    float16 r = float32_to_float16(a, ieee, s);
2806
    if (ieee) {
2807
        r = float16_maybe_silence_nan(r);
2808
    }
2809
    return float16_val(r);
2810
}
2811

    
2812
float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUState *env)
2813
{
2814
    return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status);
2815
}
2816

    
2817
uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUState *env)
2818
{
2819
    return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status);
2820
}
2821

    
2822
float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUState *env)
2823
{
2824
    return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status);
2825
}
2826

    
2827
uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUState *env)
2828
{
2829
    return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
2830
}
2831

    
2832
#define float32_two make_float32(0x40000000)
2833
#define float32_three make_float32(0x40400000)
2834
#define float32_one_point_five make_float32(0x3fc00000)
2835

    
2836
float32 HELPER(recps_f32)(float32 a, float32 b, CPUState *env)
2837
{
2838
    float_status *s = &env->vfp.standard_fp_status;
2839
    if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
2840
        (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
2841
        if (!(float32_is_zero(a) || float32_is_zero(b))) {
2842
            float_raise(float_flag_input_denormal, s);
2843
        }
2844
        return float32_two;
2845
    }
2846
    return float32_sub(float32_two, float32_mul(a, b, s), s);
2847
}
2848

    
2849
float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUState *env)
2850
{
2851
    float_status *s = &env->vfp.standard_fp_status;
2852
    float32 product;
2853
    if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
2854
        (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
2855
        if (!(float32_is_zero(a) || float32_is_zero(b))) {
2856
            float_raise(float_flag_input_denormal, s);
2857
        }
2858
        return float32_one_point_five;
2859
    }
2860
    product = float32_mul(a, b, s);
2861
    return float32_div(float32_sub(float32_three, product, s), float32_two, s);
2862
}
2863

    
2864
/* NEON helpers.  */
2865

    
2866
/* Constants 256 and 512 are used in some helpers; we avoid relying on
2867
 * int->float conversions at run-time.  */
2868
#define float64_256 make_float64(0x4070000000000000LL)
2869
#define float64_512 make_float64(0x4080000000000000LL)
2870

    
2871
/* The algorithm that must be used to calculate the estimate
2872
 * is specified by the ARM ARM.
2873
 */
2874
static float64 recip_estimate(float64 a, CPUState *env)
2875
{
2876
    /* These calculations mustn't set any fp exception flags,
2877
     * so we use a local copy of the fp_status.
2878
     */
2879
    float_status dummy_status = env->vfp.standard_fp_status;
2880
    float_status *s = &dummy_status;
2881
    /* q = (int)(a * 512.0) */
2882
    float64 q = float64_mul(float64_512, a, s);
2883
    int64_t q_int = float64_to_int64_round_to_zero(q, s);
2884

    
2885
    /* r = 1.0 / (((double)q + 0.5) / 512.0) */
2886
    q = int64_to_float64(q_int, s);
2887
    q = float64_add(q, float64_half, s);
2888
    q = float64_div(q, float64_512, s);
2889
    q = float64_div(float64_one, q, s);
2890

    
2891
    /* s = (int)(256.0 * r + 0.5) */
2892
    q = float64_mul(q, float64_256, s);
2893
    q = float64_add(q, float64_half, s);
2894
    q_int = float64_to_int64_round_to_zero(q, s);
2895

    
2896
    /* return (double)s / 256.0 */
2897
    return float64_div(int64_to_float64(q_int, s), float64_256, s);
2898
}
2899

    
2900
float32 HELPER(recpe_f32)(float32 a, CPUState *env)
2901
{
2902
    float_status *s = &env->vfp.standard_fp_status;
2903
    float64 f64;
2904
    uint32_t val32 = float32_val(a);
2905

    
2906
    int result_exp;
2907
    int a_exp = (val32  & 0x7f800000) >> 23;
2908
    int sign = val32 & 0x80000000;
2909

    
2910
    if (float32_is_any_nan(a)) {
2911
        if (float32_is_signaling_nan(a)) {
2912
            float_raise(float_flag_invalid, s);
2913
        }
2914
        return float32_default_nan;
2915
    } else if (float32_is_infinity(a)) {
2916
        return float32_set_sign(float32_zero, float32_is_neg(a));
2917
    } else if (float32_is_zero_or_denormal(a)) {
2918
        if (!float32_is_zero(a)) {
2919
            float_raise(float_flag_input_denormal, s);
2920
        }
2921
        float_raise(float_flag_divbyzero, s);
2922
        return float32_set_sign(float32_infinity, float32_is_neg(a));
2923
    } else if (a_exp >= 253) {
2924
        float_raise(float_flag_underflow, s);
2925
        return float32_set_sign(float32_zero, float32_is_neg(a));
2926
    }
2927

    
2928
    f64 = make_float64((0x3feULL << 52)
2929
                       | ((int64_t)(val32 & 0x7fffff) << 29));
2930

    
2931
    result_exp = 253 - a_exp;
2932

    
2933
    f64 = recip_estimate(f64, env);
2934

    
2935
    val32 = sign
2936
        | ((result_exp & 0xff) << 23)
2937
        | ((float64_val(f64) >> 29) & 0x7fffff);
2938
    return make_float32(val32);
2939
}
2940

    
2941
/* The algorithm that must be used to calculate the estimate
2942
 * is specified by the ARM ARM.
2943
 */
2944
static float64 recip_sqrt_estimate(float64 a, CPUState *env)
2945
{
2946
    /* These calculations mustn't set any fp exception flags,
2947
     * so we use a local copy of the fp_status.
2948
     */
2949
    float_status dummy_status = env->vfp.standard_fp_status;
2950
    float_status *s = &dummy_status;
2951
    float64 q;
2952
    int64_t q_int;
2953

    
2954
    if (float64_lt(a, float64_half, s)) {
2955
        /* range 0.25 <= a < 0.5 */
2956

    
2957
        /* a in units of 1/512 rounded down */
2958
        /* q0 = (int)(a * 512.0);  */
2959
        q = float64_mul(float64_512, a, s);
2960
        q_int = float64_to_int64_round_to_zero(q, s);
2961

    
2962
        /* reciprocal root r */
2963
        /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0);  */
2964
        q = int64_to_float64(q_int, s);
2965
        q = float64_add(q, float64_half, s);
2966
        q = float64_div(q, float64_512, s);
2967
        q = float64_sqrt(q, s);
2968
        q = float64_div(float64_one, q, s);
2969
    } else {
2970
        /* range 0.5 <= a < 1.0 */
2971

    
2972
        /* a in units of 1/256 rounded down */
2973
        /* q1 = (int)(a * 256.0); */
2974
        q = float64_mul(float64_256, a, s);
2975
        int64_t q_int = float64_to_int64_round_to_zero(q, s);
2976

    
2977
        /* reciprocal root r */
2978
        /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
2979
        q = int64_to_float64(q_int, s);
2980
        q = float64_add(q, float64_half, s);
2981
        q = float64_div(q, float64_256, s);
2982
        q = float64_sqrt(q, s);
2983
        q = float64_div(float64_one, q, s);
2984
    }
2985
    /* r in units of 1/256 rounded to nearest */
2986
    /* s = (int)(256.0 * r + 0.5); */
2987

    
2988
    q = float64_mul(q, float64_256,s );
2989
    q = float64_add(q, float64_half, s);
2990
    q_int = float64_to_int64_round_to_zero(q, s);
2991

    
2992
    /* return (double)s / 256.0;*/
2993
    return float64_div(int64_to_float64(q_int, s), float64_256, s);
2994
}
2995

    
2996
float32 HELPER(rsqrte_f32)(float32 a, CPUState *env)
2997
{
2998
    float_status *s = &env->vfp.standard_fp_status;
2999
    int result_exp;
3000
    float64 f64;
3001
    uint32_t val;
3002
    uint64_t val64;
3003

    
3004
    val = float32_val(a);
3005

    
3006
    if (float32_is_any_nan(a)) {
3007
        if (float32_is_signaling_nan(a)) {
3008
            float_raise(float_flag_invalid, s);
3009
        }
3010
        return float32_default_nan;
3011
    } else if (float32_is_zero_or_denormal(a)) {
3012
        if (!float32_is_zero(a)) {
3013
            float_raise(float_flag_input_denormal, s);
3014
        }
3015
        float_raise(float_flag_divbyzero, s);
3016
        return float32_set_sign(float32_infinity, float32_is_neg(a));
3017
    } else if (float32_is_neg(a)) {
3018
        float_raise(float_flag_invalid, s);
3019
        return float32_default_nan;
3020
    } else if (float32_is_infinity(a)) {
3021
        return float32_zero;
3022
    }
3023

    
3024
    /* Normalize to a double-precision value between 0.25 and 1.0,
3025
     * preserving the parity of the exponent.  */
3026
    if ((val & 0x800000) == 0) {
3027
        f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
3028
                           | (0x3feULL << 52)
3029
                           | ((uint64_t)(val & 0x7fffff) << 29));
3030
    } else {
3031
        f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
3032
                           | (0x3fdULL << 52)
3033
                           | ((uint64_t)(val & 0x7fffff) << 29));
3034
    }
3035

    
3036
    result_exp = (380 - ((val & 0x7f800000) >> 23)) / 2;
3037

    
3038
    f64 = recip_sqrt_estimate(f64, env);
3039

    
3040
    val64 = float64_val(f64);
3041

    
3042
    val = ((val64 >> 63)  & 0x80000000)
3043
        | ((result_exp & 0xff) << 23)
3044
        | ((val64 >> 29)  & 0x7fffff);
3045
    return make_float32(val);
3046
}
3047

    
3048
uint32_t HELPER(recpe_u32)(uint32_t a, CPUState *env)
3049
{
3050
    float64 f64;
3051

    
3052
    if ((a & 0x80000000) == 0) {
3053
        return 0xffffffff;
3054
    }
3055

    
3056
    f64 = make_float64((0x3feULL << 52)
3057
                       | ((int64_t)(a & 0x7fffffff) << 21));
3058

    
3059
    f64 = recip_estimate (f64, env);
3060

    
3061
    return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
3062
}
3063

    
3064
uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUState *env)
3065
{
3066
    float64 f64;
3067

    
3068
    if ((a & 0xc0000000) == 0) {
3069
        return 0xffffffff;
3070
    }
3071

    
3072
    if (a & 0x80000000) {
3073
        f64 = make_float64((0x3feULL << 52)
3074
                           | ((uint64_t)(a & 0x7fffffff) << 21));
3075
    } else { /* bits 31-30 == '01' */
3076
        f64 = make_float64((0x3fdULL << 52)
3077
                           | ((uint64_t)(a & 0x3fffffff) << 22));
3078
    }
3079

    
3080
    f64 = recip_sqrt_estimate(f64, env);
3081

    
3082
    return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
3083
}
3084

    
3085
void HELPER(set_teecr)(CPUState *env, uint32_t val)
3086
{
3087
    val &= 1;
3088
    if (env->teecr != val) {
3089
        env->teecr = val;
3090
        tb_flush(env);
3091
    }
3092
}