Statistics
| Branch: | Revision:

root / target-arm / helper.c @ 43fe9bdb

History | View | Annotate | Download (85.6 kB)

1
#include <stdio.h>
2
#include <stdlib.h>
3
#include <string.h>
4

    
5
#include "cpu.h"
6
#include "exec-all.h"
7
#include "gdbstub.h"
8
#include "helper.h"
9
#include "qemu-common.h"
10
#include "host-utils.h"
11
#if !defined(CONFIG_USER_ONLY)
12
#include "hw/loader.h"
13
#endif
14

    
15
static uint32_t cortexa9_cp15_c0_c1[8] =
16
{ 0x1031, 0x11, 0x000, 0, 0x00100103, 0x20000000, 0x01230000, 0x00002111 };
17

    
18
static uint32_t cortexa9_cp15_c0_c2[8] =
19
{ 0x00101111, 0x13112111, 0x21232041, 0x11112131, 0x00111142, 0, 0, 0 };
20

    
21
static uint32_t cortexa8_cp15_c0_c1[8] =
22
{ 0x1031, 0x11, 0x400, 0, 0x31100003, 0x20000000, 0x01202000, 0x11 };
23

    
24
static uint32_t cortexa8_cp15_c0_c2[8] =
25
{ 0x00101111, 0x12112111, 0x21232031, 0x11112131, 0x00111142, 0, 0, 0 };
26

    
27
static uint32_t mpcore_cp15_c0_c1[8] =
28
{ 0x111, 0x1, 0, 0x2, 0x01100103, 0x10020302, 0x01222000, 0 };
29

    
30
static uint32_t mpcore_cp15_c0_c2[8] =
31
{ 0x00100011, 0x12002111, 0x11221011, 0x01102131, 0x141, 0, 0, 0 };
32

    
33
static uint32_t arm1136_cp15_c0_c1[8] =
34
{ 0x111, 0x1, 0x2, 0x3, 0x01130003, 0x10030302, 0x01222110, 0 };
35

    
36
static uint32_t arm1136_cp15_c0_c2[8] =
37
{ 0x00140011, 0x12002111, 0x11231111, 0x01102131, 0x141, 0, 0, 0 };
38

    
39
static uint32_t cpu_arm_find_by_name(const char *name);
40

    
41
static inline void set_feature(CPUARMState *env, int feature)
42
{
43
    env->features |= 1u << feature;
44
}
45

    
46
static void cpu_reset_model_id(CPUARMState *env, uint32_t id)
47
{
48
    env->cp15.c0_cpuid = id;
49
    switch (id) {
50
    case ARM_CPUID_ARM926:
51
        set_feature(env, ARM_FEATURE_V4T);
52
        set_feature(env, ARM_FEATURE_V5);
53
        set_feature(env, ARM_FEATURE_VFP);
54
        env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090;
55
        env->cp15.c0_cachetype = 0x1dd20d2;
56
        env->cp15.c1_sys = 0x00090078;
57
        break;
58
    case ARM_CPUID_ARM946:
59
        set_feature(env, ARM_FEATURE_V4T);
60
        set_feature(env, ARM_FEATURE_V5);
61
        set_feature(env, ARM_FEATURE_MPU);
62
        env->cp15.c0_cachetype = 0x0f004006;
63
        env->cp15.c1_sys = 0x00000078;
64
        break;
65
    case ARM_CPUID_ARM1026:
66
        set_feature(env, ARM_FEATURE_V4T);
67
        set_feature(env, ARM_FEATURE_V5);
68
        set_feature(env, ARM_FEATURE_VFP);
69
        set_feature(env, ARM_FEATURE_AUXCR);
70
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0;
71
        env->cp15.c0_cachetype = 0x1dd20d2;
72
        env->cp15.c1_sys = 0x00090078;
73
        break;
74
    case ARM_CPUID_ARM1136_R2:
75
    case ARM_CPUID_ARM1136:
76
        set_feature(env, ARM_FEATURE_V4T);
77
        set_feature(env, ARM_FEATURE_V5);
78
        set_feature(env, ARM_FEATURE_V6);
79
        set_feature(env, ARM_FEATURE_VFP);
80
        set_feature(env, ARM_FEATURE_AUXCR);
81
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
82
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
83
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
84
        memcpy(env->cp15.c0_c1, arm1136_cp15_c0_c1, 8 * sizeof(uint32_t));
85
        memcpy(env->cp15.c0_c2, arm1136_cp15_c0_c2, 8 * sizeof(uint32_t));
86
        env->cp15.c0_cachetype = 0x1dd20d2;
87
        env->cp15.c1_sys = 0x00050078;
88
        break;
89
    case ARM_CPUID_ARM11MPCORE:
90
        set_feature(env, ARM_FEATURE_V4T);
91
        set_feature(env, ARM_FEATURE_V5);
92
        set_feature(env, ARM_FEATURE_V6);
93
        set_feature(env, ARM_FEATURE_V6K);
94
        set_feature(env, ARM_FEATURE_VFP);
95
        set_feature(env, ARM_FEATURE_AUXCR);
96
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
97
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
98
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
99
        memcpy(env->cp15.c0_c1, mpcore_cp15_c0_c1, 8 * sizeof(uint32_t));
100
        memcpy(env->cp15.c0_c2, mpcore_cp15_c0_c2, 8 * sizeof(uint32_t));
101
        env->cp15.c0_cachetype = 0x1dd20d2;
102
        break;
103
    case ARM_CPUID_CORTEXA8:
104
        set_feature(env, ARM_FEATURE_V4T);
105
        set_feature(env, ARM_FEATURE_V5);
106
        set_feature(env, ARM_FEATURE_V6);
107
        set_feature(env, ARM_FEATURE_V6K);
108
        set_feature(env, ARM_FEATURE_V7);
109
        set_feature(env, ARM_FEATURE_AUXCR);
110
        set_feature(env, ARM_FEATURE_THUMB2);
111
        set_feature(env, ARM_FEATURE_VFP);
112
        set_feature(env, ARM_FEATURE_VFP3);
113
        set_feature(env, ARM_FEATURE_NEON);
114
        set_feature(env, ARM_FEATURE_THUMB2EE);
115
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410330c0;
116
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
117
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00011100;
118
        memcpy(env->cp15.c0_c1, cortexa8_cp15_c0_c1, 8 * sizeof(uint32_t));
119
        memcpy(env->cp15.c0_c2, cortexa8_cp15_c0_c2, 8 * sizeof(uint32_t));
120
        env->cp15.c0_cachetype = 0x82048004;
121
        env->cp15.c0_clid = (1 << 27) | (2 << 24) | 3;
122
        env->cp15.c0_ccsid[0] = 0xe007e01a; /* 16k L1 dcache. */
123
        env->cp15.c0_ccsid[1] = 0x2007e01a; /* 16k L1 icache. */
124
        env->cp15.c0_ccsid[2] = 0xf0000000; /* No L2 icache. */
125
        env->cp15.c1_sys = 0x00c50078;
126
        break;
127
    case ARM_CPUID_CORTEXA9:
128
        set_feature(env, ARM_FEATURE_V4T);
129
        set_feature(env, ARM_FEATURE_V5);
130
        set_feature(env, ARM_FEATURE_V6);
131
        set_feature(env, ARM_FEATURE_V6K);
132
        set_feature(env, ARM_FEATURE_V7);
133
        set_feature(env, ARM_FEATURE_AUXCR);
134
        set_feature(env, ARM_FEATURE_THUMB2);
135
        set_feature(env, ARM_FEATURE_VFP);
136
        set_feature(env, ARM_FEATURE_VFP3);
137
        set_feature(env, ARM_FEATURE_VFP_FP16);
138
        set_feature(env, ARM_FEATURE_NEON);
139
        set_feature(env, ARM_FEATURE_THUMB2EE);
140
        /* Note that A9 supports the MP extensions even for
141
         * A9UP and single-core A9MP (which are both different
142
         * and valid configurations; we don't model A9UP).
143
         */
144
        set_feature(env, ARM_FEATURE_V7MP);
145
        env->vfp.xregs[ARM_VFP_FPSID] = 0x41034000; /* Guess */
146
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
147
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x01111111;
148
        memcpy(env->cp15.c0_c1, cortexa9_cp15_c0_c1, 8 * sizeof(uint32_t));
149
        memcpy(env->cp15.c0_c2, cortexa9_cp15_c0_c2, 8 * sizeof(uint32_t));
150
        env->cp15.c0_cachetype = 0x80038003;
151
        env->cp15.c0_clid = (1 << 27) | (1 << 24) | 3;
152
        env->cp15.c0_ccsid[0] = 0xe00fe015; /* 16k L1 dcache. */
153
        env->cp15.c0_ccsid[1] = 0x200fe015; /* 16k L1 icache. */
154
        env->cp15.c1_sys = 0x00c50078;
155
        break;
156
    case ARM_CPUID_CORTEXM3:
157
        set_feature(env, ARM_FEATURE_V4T);
158
        set_feature(env, ARM_FEATURE_V5);
159
        set_feature(env, ARM_FEATURE_V6);
160
        set_feature(env, ARM_FEATURE_THUMB2);
161
        set_feature(env, ARM_FEATURE_V7);
162
        set_feature(env, ARM_FEATURE_M);
163
        set_feature(env, ARM_FEATURE_DIV);
164
        break;
165
    case ARM_CPUID_ANY: /* For userspace emulation.  */
166
        set_feature(env, ARM_FEATURE_V4T);
167
        set_feature(env, ARM_FEATURE_V5);
168
        set_feature(env, ARM_FEATURE_V6);
169
        set_feature(env, ARM_FEATURE_V6K);
170
        set_feature(env, ARM_FEATURE_V7);
171
        set_feature(env, ARM_FEATURE_THUMB2);
172
        set_feature(env, ARM_FEATURE_VFP);
173
        set_feature(env, ARM_FEATURE_VFP3);
174
        set_feature(env, ARM_FEATURE_VFP_FP16);
175
        set_feature(env, ARM_FEATURE_NEON);
176
        set_feature(env, ARM_FEATURE_THUMB2EE);
177
        set_feature(env, ARM_FEATURE_DIV);
178
        set_feature(env, ARM_FEATURE_V7MP);
179
        break;
180
    case ARM_CPUID_TI915T:
181
    case ARM_CPUID_TI925T:
182
        set_feature(env, ARM_FEATURE_V4T);
183
        set_feature(env, ARM_FEATURE_OMAPCP);
184
        env->cp15.c0_cpuid = ARM_CPUID_TI925T; /* Depends on wiring.  */
185
        env->cp15.c0_cachetype = 0x5109149;
186
        env->cp15.c1_sys = 0x00000070;
187
        env->cp15.c15_i_max = 0x000;
188
        env->cp15.c15_i_min = 0xff0;
189
        break;
190
    case ARM_CPUID_PXA250:
191
    case ARM_CPUID_PXA255:
192
    case ARM_CPUID_PXA260:
193
    case ARM_CPUID_PXA261:
194
    case ARM_CPUID_PXA262:
195
        set_feature(env, ARM_FEATURE_V4T);
196
        set_feature(env, ARM_FEATURE_V5);
197
        set_feature(env, ARM_FEATURE_XSCALE);
198
        /* JTAG_ID is ((id << 28) | 0x09265013) */
199
        env->cp15.c0_cachetype = 0xd172172;
200
        env->cp15.c1_sys = 0x00000078;
201
        break;
202
    case ARM_CPUID_PXA270_A0:
203
    case ARM_CPUID_PXA270_A1:
204
    case ARM_CPUID_PXA270_B0:
205
    case ARM_CPUID_PXA270_B1:
206
    case ARM_CPUID_PXA270_C0:
207
    case ARM_CPUID_PXA270_C5:
208
        set_feature(env, ARM_FEATURE_V4T);
209
        set_feature(env, ARM_FEATURE_V5);
210
        set_feature(env, ARM_FEATURE_XSCALE);
211
        /* JTAG_ID is ((id << 28) | 0x09265013) */
212
        set_feature(env, ARM_FEATURE_IWMMXT);
213
        env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
214
        env->cp15.c0_cachetype = 0xd172172;
215
        env->cp15.c1_sys = 0x00000078;
216
        break;
217
    case ARM_CPUID_SA1100:
218
    case ARM_CPUID_SA1110:
219
        set_feature(env, ARM_FEATURE_STRONGARM);
220
        env->cp15.c1_sys = 0x00000070;
221
        break;
222
    default:
223
        cpu_abort(env, "Bad CPU ID: %x\n", id);
224
        break;
225
    }
226
}
227

    
228
void cpu_reset(CPUARMState *env)
229
{
230
    uint32_t id;
231

    
232
    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
233
        qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
234
        log_cpu_state(env, 0);
235
    }
236

    
237
    id = env->cp15.c0_cpuid;
238
    memset(env, 0, offsetof(CPUARMState, breakpoints));
239
    if (id)
240
        cpu_reset_model_id(env, id);
241
#if defined (CONFIG_USER_ONLY)
242
    env->uncached_cpsr = ARM_CPU_MODE_USR;
243
    /* For user mode we must enable access to coprocessors */
244
    env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
245
    if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
246
        env->cp15.c15_cpar = 3;
247
    } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
248
        env->cp15.c15_cpar = 1;
249
    }
250
#else
251
    /* SVC mode with interrupts disabled.  */
252
    env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
253
    /* On ARMv7-M the CPSR_I is the value of the PRIMASK register, and is
254
       clear at reset.  Initial SP and PC are loaded from ROM.  */
255
    if (IS_M(env)) {
256
        uint32_t pc;
257
        uint8_t *rom;
258
        env->uncached_cpsr &= ~CPSR_I;
259
        rom = rom_ptr(0);
260
        if (rom) {
261
            /* We should really use ldl_phys here, in case the guest
262
               modified flash and reset itself.  However images
263
               loaded via -kenrel have not been copied yet, so load the
264
               values directly from there.  */
265
            env->regs[13] = ldl_p(rom);
266
            pc = ldl_p(rom + 4);
267
            env->thumb = pc & 1;
268
            env->regs[15] = pc & ~1;
269
        }
270
    }
271
    env->vfp.xregs[ARM_VFP_FPEXC] = 0;
272
    env->cp15.c2_base_mask = 0xffffc000u;
273
#endif
274
    set_flush_to_zero(1, &env->vfp.standard_fp_status);
275
    set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status);
276
    set_default_nan_mode(1, &env->vfp.standard_fp_status);
277
    set_float_detect_tininess(float_tininess_before_rounding,
278
                              &env->vfp.fp_status);
279
    set_float_detect_tininess(float_tininess_before_rounding,
280
                              &env->vfp.standard_fp_status);
281
    tlb_flush(env, 1);
282
}
283

    
284
static int vfp_gdb_get_reg(CPUState *env, uint8_t *buf, int reg)
285
{
286
    int nregs;
287

    
288
    /* VFP data registers are always little-endian.  */
289
    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
290
    if (reg < nregs) {
291
        stfq_le_p(buf, env->vfp.regs[reg]);
292
        return 8;
293
    }
294
    if (arm_feature(env, ARM_FEATURE_NEON)) {
295
        /* Aliases for Q regs.  */
296
        nregs += 16;
297
        if (reg < nregs) {
298
            stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
299
            stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
300
            return 16;
301
        }
302
    }
303
    switch (reg - nregs) {
304
    case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
305
    case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
306
    case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
307
    }
308
    return 0;
309
}
310

    
311
static int vfp_gdb_set_reg(CPUState *env, uint8_t *buf, int reg)
312
{
313
    int nregs;
314

    
315
    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
316
    if (reg < nregs) {
317
        env->vfp.regs[reg] = ldfq_le_p(buf);
318
        return 8;
319
    }
320
    if (arm_feature(env, ARM_FEATURE_NEON)) {
321
        nregs += 16;
322
        if (reg < nregs) {
323
            env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
324
            env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
325
            return 16;
326
        }
327
    }
328
    switch (reg - nregs) {
329
    case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
330
    case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
331
    case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
332
    }
333
    return 0;
334
}
335

    
336
CPUARMState *cpu_arm_init(const char *cpu_model)
337
{
338
    CPUARMState *env;
339
    uint32_t id;
340
    static int inited = 0;
341

    
342
    id = cpu_arm_find_by_name(cpu_model);
343
    if (id == 0)
344
        return NULL;
345
    env = qemu_mallocz(sizeof(CPUARMState));
346
    cpu_exec_init(env);
347
    if (!inited) {
348
        inited = 1;
349
        arm_translate_init();
350
    }
351

    
352
    env->cpu_model_str = cpu_model;
353
    env->cp15.c0_cpuid = id;
354
    cpu_reset(env);
355
    if (arm_feature(env, ARM_FEATURE_NEON)) {
356
        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
357
                                 51, "arm-neon.xml", 0);
358
    } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
359
        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
360
                                 35, "arm-vfp3.xml", 0);
361
    } else if (arm_feature(env, ARM_FEATURE_VFP)) {
362
        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
363
                                 19, "arm-vfp.xml", 0);
364
    }
365
    qemu_init_vcpu(env);
366
    return env;
367
}
368

    
369
struct arm_cpu_t {
370
    uint32_t id;
371
    const char *name;
372
};
373

    
374
static const struct arm_cpu_t arm_cpu_names[] = {
375
    { ARM_CPUID_ARM926, "arm926"},
376
    { ARM_CPUID_ARM946, "arm946"},
377
    { ARM_CPUID_ARM1026, "arm1026"},
378
    { ARM_CPUID_ARM1136, "arm1136"},
379
    { ARM_CPUID_ARM1136_R2, "arm1136-r2"},
380
    { ARM_CPUID_ARM11MPCORE, "arm11mpcore"},
381
    { ARM_CPUID_CORTEXM3, "cortex-m3"},
382
    { ARM_CPUID_CORTEXA8, "cortex-a8"},
383
    { ARM_CPUID_CORTEXA9, "cortex-a9"},
384
    { ARM_CPUID_TI925T, "ti925t" },
385
    { ARM_CPUID_PXA250, "pxa250" },
386
    { ARM_CPUID_SA1100,    "sa1100" },
387
    { ARM_CPUID_SA1110,    "sa1110" },
388
    { ARM_CPUID_PXA255, "pxa255" },
389
    { ARM_CPUID_PXA260, "pxa260" },
390
    { ARM_CPUID_PXA261, "pxa261" },
391
    { ARM_CPUID_PXA262, "pxa262" },
392
    { ARM_CPUID_PXA270, "pxa270" },
393
    { ARM_CPUID_PXA270_A0, "pxa270-a0" },
394
    { ARM_CPUID_PXA270_A1, "pxa270-a1" },
395
    { ARM_CPUID_PXA270_B0, "pxa270-b0" },
396
    { ARM_CPUID_PXA270_B1, "pxa270-b1" },
397
    { ARM_CPUID_PXA270_C0, "pxa270-c0" },
398
    { ARM_CPUID_PXA270_C5, "pxa270-c5" },
399
    { ARM_CPUID_ANY, "any"},
400
    { 0, NULL}
401
};
402

    
403
void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
404
{
405
    int i;
406

    
407
    (*cpu_fprintf)(f, "Available CPUs:\n");
408
    for (i = 0; arm_cpu_names[i].name; i++) {
409
        (*cpu_fprintf)(f, "  %s\n", arm_cpu_names[i].name);
410
    }
411
}
412

    
413
/* return 0 if not found */
414
static uint32_t cpu_arm_find_by_name(const char *name)
415
{
416
    int i;
417
    uint32_t id;
418

    
419
    id = 0;
420
    for (i = 0; arm_cpu_names[i].name; i++) {
421
        if (strcmp(name, arm_cpu_names[i].name) == 0) {
422
            id = arm_cpu_names[i].id;
423
            break;
424
        }
425
    }
426
    return id;
427
}
428

    
429
void cpu_arm_close(CPUARMState *env)
430
{
431
    free(env);
432
}
433

    
434
uint32_t cpsr_read(CPUARMState *env)
435
{
436
    int ZF;
437
    ZF = (env->ZF == 0);
438
    return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
439
        (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
440
        | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
441
        | ((env->condexec_bits & 0xfc) << 8)
442
        | (env->GE << 16);
443
}
444

    
445
void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
446
{
447
    if (mask & CPSR_NZCV) {
448
        env->ZF = (~val) & CPSR_Z;
449
        env->NF = val;
450
        env->CF = (val >> 29) & 1;
451
        env->VF = (val << 3) & 0x80000000;
452
    }
453
    if (mask & CPSR_Q)
454
        env->QF = ((val & CPSR_Q) != 0);
455
    if (mask & CPSR_T)
456
        env->thumb = ((val & CPSR_T) != 0);
457
    if (mask & CPSR_IT_0_1) {
458
        env->condexec_bits &= ~3;
459
        env->condexec_bits |= (val >> 25) & 3;
460
    }
461
    if (mask & CPSR_IT_2_7) {
462
        env->condexec_bits &= 3;
463
        env->condexec_bits |= (val >> 8) & 0xfc;
464
    }
465
    if (mask & CPSR_GE) {
466
        env->GE = (val >> 16) & 0xf;
467
    }
468

    
469
    if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
470
        switch_mode(env, val & CPSR_M);
471
    }
472
    mask &= ~CACHED_CPSR_BITS;
473
    env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
474
}
475

    
476
/* Sign/zero extend */
477
uint32_t HELPER(sxtb16)(uint32_t x)
478
{
479
    uint32_t res;
480
    res = (uint16_t)(int8_t)x;
481
    res |= (uint32_t)(int8_t)(x >> 16) << 16;
482
    return res;
483
}
484

    
485
uint32_t HELPER(uxtb16)(uint32_t x)
486
{
487
    uint32_t res;
488
    res = (uint16_t)(uint8_t)x;
489
    res |= (uint32_t)(uint8_t)(x >> 16) << 16;
490
    return res;
491
}
492

    
493
uint32_t HELPER(clz)(uint32_t x)
494
{
495
    return clz32(x);
496
}
497

    
498
int32_t HELPER(sdiv)(int32_t num, int32_t den)
499
{
500
    if (den == 0)
501
      return 0;
502
    if (num == INT_MIN && den == -1)
503
      return INT_MIN;
504
    return num / den;
505
}
506

    
507
uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
508
{
509
    if (den == 0)
510
      return 0;
511
    return num / den;
512
}
513

    
514
uint32_t HELPER(rbit)(uint32_t x)
515
{
516
    x =  ((x & 0xff000000) >> 24)
517
       | ((x & 0x00ff0000) >> 8)
518
       | ((x & 0x0000ff00) << 8)
519
       | ((x & 0x000000ff) << 24);
520
    x =  ((x & 0xf0f0f0f0) >> 4)
521
       | ((x & 0x0f0f0f0f) << 4);
522
    x =  ((x & 0x88888888) >> 3)
523
       | ((x & 0x44444444) >> 1)
524
       | ((x & 0x22222222) << 1)
525
       | ((x & 0x11111111) << 3);
526
    return x;
527
}
528

    
529
uint32_t HELPER(abs)(uint32_t x)
530
{
531
    return ((int32_t)x < 0) ? -x : x;
532
}
533

    
534
#if defined(CONFIG_USER_ONLY)
535

    
536
void do_interrupt (CPUState *env)
537
{
538
    env->exception_index = -1;
539
}
540

    
541
int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
542
                              int mmu_idx, int is_softmmu)
543
{
544
    if (rw == 2) {
545
        env->exception_index = EXCP_PREFETCH_ABORT;
546
        env->cp15.c6_insn = address;
547
    } else {
548
        env->exception_index = EXCP_DATA_ABORT;
549
        env->cp15.c6_data = address;
550
    }
551
    return 1;
552
}
553

    
554
/* These should probably raise undefined insn exceptions.  */
555
void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val)
556
{
557
    int op1 = (insn >> 8) & 0xf;
558
    cpu_abort(env, "cp%i insn %08x\n", op1, insn);
559
    return;
560
}
561

    
562
uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn)
563
{
564
    int op1 = (insn >> 8) & 0xf;
565
    cpu_abort(env, "cp%i insn %08x\n", op1, insn);
566
    return 0;
567
}
568

    
569
void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
570
{
571
    cpu_abort(env, "cp15 insn %08x\n", insn);
572
}
573

    
574
uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
575
{
576
    cpu_abort(env, "cp15 insn %08x\n", insn);
577
}
578

    
579
/* These should probably raise undefined insn exceptions.  */
580
void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val)
581
{
582
    cpu_abort(env, "v7m_mrs %d\n", reg);
583
}
584

    
585
uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg)
586
{
587
    cpu_abort(env, "v7m_mrs %d\n", reg);
588
    return 0;
589
}
590

    
591
void switch_mode(CPUState *env, int mode)
592
{
593
    if (mode != ARM_CPU_MODE_USR)
594
        cpu_abort(env, "Tried to switch out of user mode\n");
595
}
596

    
597
void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
598
{
599
    cpu_abort(env, "banked r13 write\n");
600
}
601

    
602
uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
603
{
604
    cpu_abort(env, "banked r13 read\n");
605
    return 0;
606
}
607

    
608
#else
609

    
610
extern int semihosting_enabled;
611

    
612
/* Map CPU modes onto saved register banks.  */
613
static inline int bank_number (int mode)
614
{
615
    switch (mode) {
616
    case ARM_CPU_MODE_USR:
617
    case ARM_CPU_MODE_SYS:
618
        return 0;
619
    case ARM_CPU_MODE_SVC:
620
        return 1;
621
    case ARM_CPU_MODE_ABT:
622
        return 2;
623
    case ARM_CPU_MODE_UND:
624
        return 3;
625
    case ARM_CPU_MODE_IRQ:
626
        return 4;
627
    case ARM_CPU_MODE_FIQ:
628
        return 5;
629
    }
630
    cpu_abort(cpu_single_env, "Bad mode %x\n", mode);
631
    return -1;
632
}
633

    
634
void switch_mode(CPUState *env, int mode)
635
{
636
    int old_mode;
637
    int i;
638

    
639
    old_mode = env->uncached_cpsr & CPSR_M;
640
    if (mode == old_mode)
641
        return;
642

    
643
    if (old_mode == ARM_CPU_MODE_FIQ) {
644
        memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
645
        memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
646
    } else if (mode == ARM_CPU_MODE_FIQ) {
647
        memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
648
        memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
649
    }
650

    
651
    i = bank_number(old_mode);
652
    env->banked_r13[i] = env->regs[13];
653
    env->banked_r14[i] = env->regs[14];
654
    env->banked_spsr[i] = env->spsr;
655

    
656
    i = bank_number(mode);
657
    env->regs[13] = env->banked_r13[i];
658
    env->regs[14] = env->banked_r14[i];
659
    env->spsr = env->banked_spsr[i];
660
}
661

    
662
static void v7m_push(CPUARMState *env, uint32_t val)
663
{
664
    env->regs[13] -= 4;
665
    stl_phys(env->regs[13], val);
666
}
667

    
668
static uint32_t v7m_pop(CPUARMState *env)
669
{
670
    uint32_t val;
671
    val = ldl_phys(env->regs[13]);
672
    env->regs[13] += 4;
673
    return val;
674
}
675

    
676
/* Switch to V7M main or process stack pointer.  */
677
static void switch_v7m_sp(CPUARMState *env, int process)
678
{
679
    uint32_t tmp;
680
    if (env->v7m.current_sp != process) {
681
        tmp = env->v7m.other_sp;
682
        env->v7m.other_sp = env->regs[13];
683
        env->regs[13] = tmp;
684
        env->v7m.current_sp = process;
685
    }
686
}
687

    
688
static void do_v7m_exception_exit(CPUARMState *env)
689
{
690
    uint32_t type;
691
    uint32_t xpsr;
692

    
693
    type = env->regs[15];
694
    if (env->v7m.exception != 0)
695
        armv7m_nvic_complete_irq(env->nvic, env->v7m.exception);
696

    
697
    /* Switch to the target stack.  */
698
    switch_v7m_sp(env, (type & 4) != 0);
699
    /* Pop registers.  */
700
    env->regs[0] = v7m_pop(env);
701
    env->regs[1] = v7m_pop(env);
702
    env->regs[2] = v7m_pop(env);
703
    env->regs[3] = v7m_pop(env);
704
    env->regs[12] = v7m_pop(env);
705
    env->regs[14] = v7m_pop(env);
706
    env->regs[15] = v7m_pop(env);
707
    xpsr = v7m_pop(env);
708
    xpsr_write(env, xpsr, 0xfffffdff);
709
    /* Undo stack alignment.  */
710
    if (xpsr & 0x200)
711
        env->regs[13] |= 4;
712
    /* ??? The exception return type specifies Thread/Handler mode.  However
713
       this is also implied by the xPSR value. Not sure what to do
714
       if there is a mismatch.  */
715
    /* ??? Likewise for mismatches between the CONTROL register and the stack
716
       pointer.  */
717
}
718

    
719
static void do_interrupt_v7m(CPUARMState *env)
720
{
721
    uint32_t xpsr = xpsr_read(env);
722
    uint32_t lr;
723
    uint32_t addr;
724

    
725
    lr = 0xfffffff1;
726
    if (env->v7m.current_sp)
727
        lr |= 4;
728
    if (env->v7m.exception == 0)
729
        lr |= 8;
730

    
731
    /* For exceptions we just mark as pending on the NVIC, and let that
732
       handle it.  */
733
    /* TODO: Need to escalate if the current priority is higher than the
734
       one we're raising.  */
735
    switch (env->exception_index) {
736
    case EXCP_UDEF:
737
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
738
        return;
739
    case EXCP_SWI:
740
        env->regs[15] += 2;
741
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
742
        return;
743
    case EXCP_PREFETCH_ABORT:
744
    case EXCP_DATA_ABORT:
745
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
746
        return;
747
    case EXCP_BKPT:
748
        if (semihosting_enabled) {
749
            int nr;
750
            nr = lduw_code(env->regs[15]) & 0xff;
751
            if (nr == 0xab) {
752
                env->regs[15] += 2;
753
                env->regs[0] = do_arm_semihosting(env);
754
                return;
755
            }
756
        }
757
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
758
        return;
759
    case EXCP_IRQ:
760
        env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic);
761
        break;
762
    case EXCP_EXCEPTION_EXIT:
763
        do_v7m_exception_exit(env);
764
        return;
765
    default:
766
        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
767
        return; /* Never happens.  Keep compiler happy.  */
768
    }
769

    
770
    /* Align stack pointer.  */
771
    /* ??? Should only do this if Configuration Control Register
772
       STACKALIGN bit is set.  */
773
    if (env->regs[13] & 4) {
774
        env->regs[13] -= 4;
775
        xpsr |= 0x200;
776
    }
777
    /* Switch to the handler mode.  */
778
    v7m_push(env, xpsr);
779
    v7m_push(env, env->regs[15]);
780
    v7m_push(env, env->regs[14]);
781
    v7m_push(env, env->regs[12]);
782
    v7m_push(env, env->regs[3]);
783
    v7m_push(env, env->regs[2]);
784
    v7m_push(env, env->regs[1]);
785
    v7m_push(env, env->regs[0]);
786
    switch_v7m_sp(env, 0);
787
    env->uncached_cpsr &= ~CPSR_IT;
788
    env->regs[14] = lr;
789
    addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4);
790
    env->regs[15] = addr & 0xfffffffe;
791
    env->thumb = addr & 1;
792
}
793

    
794
/* Handle a CPU exception.  */
795
void do_interrupt(CPUARMState *env)
796
{
797
    uint32_t addr;
798
    uint32_t mask;
799
    int new_mode;
800
    uint32_t offset;
801

    
802
    if (IS_M(env)) {
803
        do_interrupt_v7m(env);
804
        return;
805
    }
806
    /* TODO: Vectored interrupt controller.  */
807
    switch (env->exception_index) {
808
    case EXCP_UDEF:
809
        new_mode = ARM_CPU_MODE_UND;
810
        addr = 0x04;
811
        mask = CPSR_I;
812
        if (env->thumb)
813
            offset = 2;
814
        else
815
            offset = 4;
816
        break;
817
    case EXCP_SWI:
818
        if (semihosting_enabled) {
819
            /* Check for semihosting interrupt.  */
820
            if (env->thumb) {
821
                mask = lduw_code(env->regs[15] - 2) & 0xff;
822
            } else {
823
                mask = ldl_code(env->regs[15] - 4) & 0xffffff;
824
            }
825
            /* Only intercept calls from privileged modes, to provide some
826
               semblance of security.  */
827
            if (((mask == 0x123456 && !env->thumb)
828
                    || (mask == 0xab && env->thumb))
829
                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
830
                env->regs[0] = do_arm_semihosting(env);
831
                return;
832
            }
833
        }
834
        new_mode = ARM_CPU_MODE_SVC;
835
        addr = 0x08;
836
        mask = CPSR_I;
837
        /* The PC already points to the next instruction.  */
838
        offset = 0;
839
        break;
840
    case EXCP_BKPT:
841
        /* See if this is a semihosting syscall.  */
842
        if (env->thumb && semihosting_enabled) {
843
            mask = lduw_code(env->regs[15]) & 0xff;
844
            if (mask == 0xab
845
                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
846
                env->regs[15] += 2;
847
                env->regs[0] = do_arm_semihosting(env);
848
                return;
849
            }
850
        }
851
        /* Fall through to prefetch abort.  */
852
    case EXCP_PREFETCH_ABORT:
853
        new_mode = ARM_CPU_MODE_ABT;
854
        addr = 0x0c;
855
        mask = CPSR_A | CPSR_I;
856
        offset = 4;
857
        break;
858
    case EXCP_DATA_ABORT:
859
        new_mode = ARM_CPU_MODE_ABT;
860
        addr = 0x10;
861
        mask = CPSR_A | CPSR_I;
862
        offset = 8;
863
        break;
864
    case EXCP_IRQ:
865
        new_mode = ARM_CPU_MODE_IRQ;
866
        addr = 0x18;
867
        /* Disable IRQ and imprecise data aborts.  */
868
        mask = CPSR_A | CPSR_I;
869
        offset = 4;
870
        break;
871
    case EXCP_FIQ:
872
        new_mode = ARM_CPU_MODE_FIQ;
873
        addr = 0x1c;
874
        /* Disable FIQ, IRQ and imprecise data aborts.  */
875
        mask = CPSR_A | CPSR_I | CPSR_F;
876
        offset = 4;
877
        break;
878
    default:
879
        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
880
        return; /* Never happens.  Keep compiler happy.  */
881
    }
882
    /* High vectors.  */
883
    if (env->cp15.c1_sys & (1 << 13)) {
884
        addr += 0xffff0000;
885
    }
886
    switch_mode (env, new_mode);
887
    env->spsr = cpsr_read(env);
888
    /* Clear IT bits.  */
889
    env->condexec_bits = 0;
890
    /* Switch to the new mode, and to the correct instruction set.  */
891
    env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
892
    env->uncached_cpsr |= mask;
893
    /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
894
     * and we should just guard the thumb mode on V4 */
895
    if (arm_feature(env, ARM_FEATURE_V4T)) {
896
        env->thumb = (env->cp15.c1_sys & (1 << 30)) != 0;
897
    }
898
    env->regs[14] = env->regs[15] + offset;
899
    env->regs[15] = addr;
900
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
901
}
902

    
903
/* Check section/page access permissions.
904
   Returns the page protection flags, or zero if the access is not
905
   permitted.  */
906
static inline int check_ap(CPUState *env, int ap, int domain, int access_type,
907
                           int is_user)
908
{
909
  int prot_ro;
910

    
911
  if (domain == 3)
912
    return PAGE_READ | PAGE_WRITE;
913

    
914
  if (access_type == 1)
915
      prot_ro = 0;
916
  else
917
      prot_ro = PAGE_READ;
918

    
919
  switch (ap) {
920
  case 0:
921
      if (access_type == 1)
922
          return 0;
923
      switch ((env->cp15.c1_sys >> 8) & 3) {
924
      case 1:
925
          return is_user ? 0 : PAGE_READ;
926
      case 2:
927
          return PAGE_READ;
928
      default:
929
          return 0;
930
      }
931
  case 1:
932
      return is_user ? 0 : PAGE_READ | PAGE_WRITE;
933
  case 2:
934
      if (is_user)
935
          return prot_ro;
936
      else
937
          return PAGE_READ | PAGE_WRITE;
938
  case 3:
939
      return PAGE_READ | PAGE_WRITE;
940
  case 4: /* Reserved.  */
941
      return 0;
942
  case 5:
943
      return is_user ? 0 : prot_ro;
944
  case 6:
945
      return prot_ro;
946
  case 7:
947
      if (!arm_feature (env, ARM_FEATURE_V7))
948
          return 0;
949
      return prot_ro;
950
  default:
951
      abort();
952
  }
953
}
954

    
955
static uint32_t get_level1_table_address(CPUState *env, uint32_t address)
956
{
957
    uint32_t table;
958

    
959
    if (address & env->cp15.c2_mask)
960
        table = env->cp15.c2_base1 & 0xffffc000;
961
    else
962
        table = env->cp15.c2_base0 & env->cp15.c2_base_mask;
963

    
964
    table |= (address >> 18) & 0x3ffc;
965
    return table;
966
}
967

    
968
static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type,
969
                            int is_user, uint32_t *phys_ptr, int *prot,
970
                            target_ulong *page_size)
971
{
972
    int code;
973
    uint32_t table;
974
    uint32_t desc;
975
    int type;
976
    int ap;
977
    int domain;
978
    uint32_t phys_addr;
979

    
980
    /* Pagetable walk.  */
981
    /* Lookup l1 descriptor.  */
982
    table = get_level1_table_address(env, address);
983
    desc = ldl_phys(table);
984
    type = (desc & 3);
985
    domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3;
986
    if (type == 0) {
987
        /* Section translation fault.  */
988
        code = 5;
989
        goto do_fault;
990
    }
991
    if (domain == 0 || domain == 2) {
992
        if (type == 2)
993
            code = 9; /* Section domain fault.  */
994
        else
995
            code = 11; /* Page domain fault.  */
996
        goto do_fault;
997
    }
998
    if (type == 2) {
999
        /* 1Mb section.  */
1000
        phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1001
        ap = (desc >> 10) & 3;
1002
        code = 13;
1003
        *page_size = 1024 * 1024;
1004
    } else {
1005
        /* Lookup l2 entry.  */
1006
        if (type == 1) {
1007
            /* Coarse pagetable.  */
1008
            table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1009
        } else {
1010
            /* Fine pagetable.  */
1011
            table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
1012
        }
1013
        desc = ldl_phys(table);
1014
        switch (desc & 3) {
1015
        case 0: /* Page translation fault.  */
1016
            code = 7;
1017
            goto do_fault;
1018
        case 1: /* 64k page.  */
1019
            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1020
            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
1021
            *page_size = 0x10000;
1022
            break;
1023
        case 2: /* 4k page.  */
1024
            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1025
            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
1026
            *page_size = 0x1000;
1027
            break;
1028
        case 3: /* 1k page.  */
1029
            if (type == 1) {
1030
                if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1031
                    phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1032
                } else {
1033
                    /* Page translation fault.  */
1034
                    code = 7;
1035
                    goto do_fault;
1036
                }
1037
            } else {
1038
                phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
1039
            }
1040
            ap = (desc >> 4) & 3;
1041
            *page_size = 0x400;
1042
            break;
1043
        default:
1044
            /* Never happens, but compiler isn't smart enough to tell.  */
1045
            abort();
1046
        }
1047
        code = 15;
1048
    }
1049
    *prot = check_ap(env, ap, domain, access_type, is_user);
1050
    if (!*prot) {
1051
        /* Access permission fault.  */
1052
        goto do_fault;
1053
    }
1054
    *prot |= PAGE_EXEC;
1055
    *phys_ptr = phys_addr;
1056
    return 0;
1057
do_fault:
1058
    return code | (domain << 4);
1059
}
1060

    
1061
static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type,
1062
                            int is_user, uint32_t *phys_ptr, int *prot,
1063
                            target_ulong *page_size)
1064
{
1065
    int code;
1066
    uint32_t table;
1067
    uint32_t desc;
1068
    uint32_t xn;
1069
    int type;
1070
    int ap;
1071
    int domain;
1072
    uint32_t phys_addr;
1073

    
1074
    /* Pagetable walk.  */
1075
    /* Lookup l1 descriptor.  */
1076
    table = get_level1_table_address(env, address);
1077
    desc = ldl_phys(table);
1078
    type = (desc & 3);
1079
    if (type == 0) {
1080
        /* Section translation fault.  */
1081
        code = 5;
1082
        domain = 0;
1083
        goto do_fault;
1084
    } else if (type == 2 && (desc & (1 << 18))) {
1085
        /* Supersection.  */
1086
        domain = 0;
1087
    } else {
1088
        /* Section or page.  */
1089
        domain = (desc >> 4) & 0x1e;
1090
    }
1091
    domain = (env->cp15.c3 >> domain) & 3;
1092
    if (domain == 0 || domain == 2) {
1093
        if (type == 2)
1094
            code = 9; /* Section domain fault.  */
1095
        else
1096
            code = 11; /* Page domain fault.  */
1097
        goto do_fault;
1098
    }
1099
    if (type == 2) {
1100
        if (desc & (1 << 18)) {
1101
            /* Supersection.  */
1102
            phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
1103
            *page_size = 0x1000000;
1104
        } else {
1105
            /* Section.  */
1106
            phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1107
            *page_size = 0x100000;
1108
        }
1109
        ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
1110
        xn = desc & (1 << 4);
1111
        code = 13;
1112
    } else {
1113
        /* Lookup l2 entry.  */
1114
        table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1115
        desc = ldl_phys(table);
1116
        ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
1117
        switch (desc & 3) {
1118
        case 0: /* Page translation fault.  */
1119
            code = 7;
1120
            goto do_fault;
1121
        case 1: /* 64k page.  */
1122
            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1123
            xn = desc & (1 << 15);
1124
            *page_size = 0x10000;
1125
            break;
1126
        case 2: case 3: /* 4k page.  */
1127
            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1128
            xn = desc & 1;
1129
            *page_size = 0x1000;
1130
            break;
1131
        default:
1132
            /* Never happens, but compiler isn't smart enough to tell.  */
1133
            abort();
1134
        }
1135
        code = 15;
1136
    }
1137
    if (domain == 3) {
1138
        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1139
    } else {
1140
        if (xn && access_type == 2)
1141
            goto do_fault;
1142

    
1143
        /* The simplified model uses AP[0] as an access control bit.  */
1144
        if ((env->cp15.c1_sys & (1 << 29)) && (ap & 1) == 0) {
1145
            /* Access flag fault.  */
1146
            code = (code == 15) ? 6 : 3;
1147
            goto do_fault;
1148
        }
1149
        *prot = check_ap(env, ap, domain, access_type, is_user);
1150
        if (!*prot) {
1151
            /* Access permission fault.  */
1152
            goto do_fault;
1153
        }
1154
        if (!xn) {
1155
            *prot |= PAGE_EXEC;
1156
        }
1157
    }
1158
    *phys_ptr = phys_addr;
1159
    return 0;
1160
do_fault:
1161
    return code | (domain << 4);
1162
}
1163

    
1164
static int get_phys_addr_mpu(CPUState *env, uint32_t address, int access_type,
1165
                             int is_user, uint32_t *phys_ptr, int *prot)
1166
{
1167
    int n;
1168
    uint32_t mask;
1169
    uint32_t base;
1170

    
1171
    *phys_ptr = address;
1172
    for (n = 7; n >= 0; n--) {
1173
        base = env->cp15.c6_region[n];
1174
        if ((base & 1) == 0)
1175
            continue;
1176
        mask = 1 << ((base >> 1) & 0x1f);
1177
        /* Keep this shift separate from the above to avoid an
1178
           (undefined) << 32.  */
1179
        mask = (mask << 1) - 1;
1180
        if (((base ^ address) & ~mask) == 0)
1181
            break;
1182
    }
1183
    if (n < 0)
1184
        return 2;
1185

    
1186
    if (access_type == 2) {
1187
        mask = env->cp15.c5_insn;
1188
    } else {
1189
        mask = env->cp15.c5_data;
1190
    }
1191
    mask = (mask >> (n * 4)) & 0xf;
1192
    switch (mask) {
1193
    case 0:
1194
        return 1;
1195
    case 1:
1196
        if (is_user)
1197
          return 1;
1198
        *prot = PAGE_READ | PAGE_WRITE;
1199
        break;
1200
    case 2:
1201
        *prot = PAGE_READ;
1202
        if (!is_user)
1203
            *prot |= PAGE_WRITE;
1204
        break;
1205
    case 3:
1206
        *prot = PAGE_READ | PAGE_WRITE;
1207
        break;
1208
    case 5:
1209
        if (is_user)
1210
            return 1;
1211
        *prot = PAGE_READ;
1212
        break;
1213
    case 6:
1214
        *prot = PAGE_READ;
1215
        break;
1216
    default:
1217
        /* Bad permission.  */
1218
        return 1;
1219
    }
1220
    *prot |= PAGE_EXEC;
1221
    return 0;
1222
}
1223

    
1224
static inline int get_phys_addr(CPUState *env, uint32_t address,
1225
                                int access_type, int is_user,
1226
                                uint32_t *phys_ptr, int *prot,
1227
                                target_ulong *page_size)
1228
{
1229
    /* Fast Context Switch Extension.  */
1230
    if (address < 0x02000000)
1231
        address += env->cp15.c13_fcse;
1232

    
1233
    if ((env->cp15.c1_sys & 1) == 0) {
1234
        /* MMU/MPU disabled.  */
1235
        *phys_ptr = address;
1236
        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1237
        *page_size = TARGET_PAGE_SIZE;
1238
        return 0;
1239
    } else if (arm_feature(env, ARM_FEATURE_MPU)) {
1240
        *page_size = TARGET_PAGE_SIZE;
1241
        return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
1242
                                 prot);
1243
    } else if (env->cp15.c1_sys & (1 << 23)) {
1244
        return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
1245
                                prot, page_size);
1246
    } else {
1247
        return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
1248
                                prot, page_size);
1249
    }
1250
}
1251

    
1252
int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
1253
                              int access_type, int mmu_idx, int is_softmmu)
1254
{
1255
    uint32_t phys_addr;
1256
    target_ulong page_size;
1257
    int prot;
1258
    int ret, is_user;
1259

    
1260
    is_user = mmu_idx == MMU_USER_IDX;
1261
    ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot,
1262
                        &page_size);
1263
    if (ret == 0) {
1264
        /* Map a single [sub]page.  */
1265
        phys_addr &= ~(uint32_t)0x3ff;
1266
        address &= ~(uint32_t)0x3ff;
1267
        tlb_set_page (env, address, phys_addr, prot, mmu_idx, page_size);
1268
        return 0;
1269
    }
1270

    
1271
    if (access_type == 2) {
1272
        env->cp15.c5_insn = ret;
1273
        env->cp15.c6_insn = address;
1274
        env->exception_index = EXCP_PREFETCH_ABORT;
1275
    } else {
1276
        env->cp15.c5_data = ret;
1277
        if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6))
1278
            env->cp15.c5_data |= (1 << 11);
1279
        env->cp15.c6_data = address;
1280
        env->exception_index = EXCP_DATA_ABORT;
1281
    }
1282
    return 1;
1283
}
1284

    
1285
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1286
{
1287
    uint32_t phys_addr;
1288
    target_ulong page_size;
1289
    int prot;
1290
    int ret;
1291

    
1292
    ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot, &page_size);
1293

    
1294
    if (ret != 0)
1295
        return -1;
1296

    
1297
    return phys_addr;
1298
}
1299

    
1300
void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val)
1301
{
1302
    int cp_num = (insn >> 8) & 0xf;
1303
    int cp_info = (insn >> 5) & 7;
1304
    int src = (insn >> 16) & 0xf;
1305
    int operand = insn & 0xf;
1306

    
1307
    if (env->cp[cp_num].cp_write)
1308
        env->cp[cp_num].cp_write(env->cp[cp_num].opaque,
1309
                                 cp_info, src, operand, val);
1310
}
1311

    
1312
uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn)
1313
{
1314
    int cp_num = (insn >> 8) & 0xf;
1315
    int cp_info = (insn >> 5) & 7;
1316
    int dest = (insn >> 16) & 0xf;
1317
    int operand = insn & 0xf;
1318

    
1319
    if (env->cp[cp_num].cp_read)
1320
        return env->cp[cp_num].cp_read(env->cp[cp_num].opaque,
1321
                                       cp_info, dest, operand);
1322
    return 0;
1323
}
1324

    
1325
/* Return basic MPU access permission bits.  */
1326
static uint32_t simple_mpu_ap_bits(uint32_t val)
1327
{
1328
    uint32_t ret;
1329
    uint32_t mask;
1330
    int i;
1331
    ret = 0;
1332
    mask = 3;
1333
    for (i = 0; i < 16; i += 2) {
1334
        ret |= (val >> i) & mask;
1335
        mask <<= 2;
1336
    }
1337
    return ret;
1338
}
1339

    
1340
/* Pad basic MPU access permission bits to extended format.  */
1341
static uint32_t extended_mpu_ap_bits(uint32_t val)
1342
{
1343
    uint32_t ret;
1344
    uint32_t mask;
1345
    int i;
1346
    ret = 0;
1347
    mask = 3;
1348
    for (i = 0; i < 16; i += 2) {
1349
        ret |= (val & mask) << i;
1350
        mask <<= 2;
1351
    }
1352
    return ret;
1353
}
1354

    
1355
void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
1356
{
1357
    int op1;
1358
    int op2;
1359
    int crm;
1360

    
1361
    op1 = (insn >> 21) & 7;
1362
    op2 = (insn >> 5) & 7;
1363
    crm = insn & 0xf;
1364
    switch ((insn >> 16) & 0xf) {
1365
    case 0:
1366
        /* ID codes.  */
1367
        if (arm_feature(env, ARM_FEATURE_XSCALE))
1368
            break;
1369
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1370
            break;
1371
        if (arm_feature(env, ARM_FEATURE_V7)
1372
                && op1 == 2 && crm == 0 && op2 == 0) {
1373
            env->cp15.c0_cssel = val & 0xf;
1374
            break;
1375
        }
1376
        goto bad_reg;
1377
    case 1: /* System configuration.  */
1378
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1379
            op2 = 0;
1380
        switch (op2) {
1381
        case 0:
1382
            if (!arm_feature(env, ARM_FEATURE_XSCALE) || crm == 0)
1383
                env->cp15.c1_sys = val;
1384
            /* ??? Lots of these bits are not implemented.  */
1385
            /* This may enable/disable the MMU, so do a TLB flush.  */
1386
            tlb_flush(env, 1);
1387
            break;
1388
        case 1: /* Auxiliary control register.  */
1389
            if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1390
                env->cp15.c1_xscaleauxcr = val;
1391
                break;
1392
            }
1393
            /* Not implemented.  */
1394
            break;
1395
        case 2:
1396
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1397
                goto bad_reg;
1398
            if (env->cp15.c1_coproc != val) {
1399
                env->cp15.c1_coproc = val;
1400
                /* ??? Is this safe when called from within a TB?  */
1401
                tb_flush(env);
1402
            }
1403
            break;
1404
        default:
1405
            goto bad_reg;
1406
        }
1407
        break;
1408
    case 2: /* MMU Page table control / MPU cache control.  */
1409
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1410
            switch (op2) {
1411
            case 0:
1412
                env->cp15.c2_data = val;
1413
                break;
1414
            case 1:
1415
                env->cp15.c2_insn = val;
1416
                break;
1417
            default:
1418
                goto bad_reg;
1419
            }
1420
        } else {
1421
            switch (op2) {
1422
            case 0:
1423
                env->cp15.c2_base0 = val;
1424
                break;
1425
            case 1:
1426
                env->cp15.c2_base1 = val;
1427
                break;
1428
            case 2:
1429
                val &= 7;
1430
                env->cp15.c2_control = val;
1431
                env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> val);
1432
                env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> val);
1433
                break;
1434
            default:
1435
                goto bad_reg;
1436
            }
1437
        }
1438
        break;
1439
    case 3: /* MMU Domain access control / MPU write buffer control.  */
1440
        env->cp15.c3 = val;
1441
        tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */
1442
        break;
1443
    case 4: /* Reserved.  */
1444
        goto bad_reg;
1445
    case 5: /* MMU Fault status / MPU access permission.  */
1446
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1447
            op2 = 0;
1448
        switch (op2) {
1449
        case 0:
1450
            if (arm_feature(env, ARM_FEATURE_MPU))
1451
                val = extended_mpu_ap_bits(val);
1452
            env->cp15.c5_data = val;
1453
            break;
1454
        case 1:
1455
            if (arm_feature(env, ARM_FEATURE_MPU))
1456
                val = extended_mpu_ap_bits(val);
1457
            env->cp15.c5_insn = val;
1458
            break;
1459
        case 2:
1460
            if (!arm_feature(env, ARM_FEATURE_MPU))
1461
                goto bad_reg;
1462
            env->cp15.c5_data = val;
1463
            break;
1464
        case 3:
1465
            if (!arm_feature(env, ARM_FEATURE_MPU))
1466
                goto bad_reg;
1467
            env->cp15.c5_insn = val;
1468
            break;
1469
        default:
1470
            goto bad_reg;
1471
        }
1472
        break;
1473
    case 6: /* MMU Fault address / MPU base/size.  */
1474
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1475
            if (crm >= 8)
1476
                goto bad_reg;
1477
            env->cp15.c6_region[crm] = val;
1478
        } else {
1479
            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1480
                op2 = 0;
1481
            switch (op2) {
1482
            case 0:
1483
                env->cp15.c6_data = val;
1484
                break;
1485
            case 1: /* ??? This is WFAR on armv6 */
1486
            case 2:
1487
                env->cp15.c6_insn = val;
1488
                break;
1489
            default:
1490
                goto bad_reg;
1491
            }
1492
        }
1493
        break;
1494
    case 7: /* Cache control.  */
1495
        env->cp15.c15_i_max = 0x000;
1496
        env->cp15.c15_i_min = 0xff0;
1497
        if (op1 != 0) {
1498
            goto bad_reg;
1499
        }
1500
        /* No cache, so nothing to do except VA->PA translations. */
1501
        if (arm_feature(env, ARM_FEATURE_V6K)) {
1502
            switch (crm) {
1503
            case 4:
1504
                if (arm_feature(env, ARM_FEATURE_V7)) {
1505
                    env->cp15.c7_par = val & 0xfffff6ff;
1506
                } else {
1507
                    env->cp15.c7_par = val & 0xfffff1ff;
1508
                }
1509
                break;
1510
            case 8: {
1511
                uint32_t phys_addr;
1512
                target_ulong page_size;
1513
                int prot;
1514
                int ret, is_user = op2 & 2;
1515
                int access_type = op2 & 1;
1516

    
1517
                if (op2 & 4) {
1518
                    /* Other states are only available with TrustZone */
1519
                    goto bad_reg;
1520
                }
1521
                ret = get_phys_addr(env, val, access_type, is_user,
1522
                                    &phys_addr, &prot, &page_size);
1523
                if (ret == 0) {
1524
                    /* We do not set any attribute bits in the PAR */
1525
                    if (page_size == (1 << 24)
1526
                        && arm_feature(env, ARM_FEATURE_V7)) {
1527
                        env->cp15.c7_par = (phys_addr & 0xff000000) | 1 << 1;
1528
                    } else {
1529
                        env->cp15.c7_par = phys_addr & 0xfffff000;
1530
                    }
1531
                } else {
1532
                    env->cp15.c7_par = ((ret & (10 << 1)) >> 5) |
1533
                                       ((ret & (12 << 1)) >> 6) |
1534
                                       ((ret & 0xf) << 1) | 1;
1535
                }
1536
                break;
1537
            }
1538
            }
1539
        }
1540
        break;
1541
    case 8: /* MMU TLB control.  */
1542
        switch (op2) {
1543
        case 0: /* Invalidate all.  */
1544
            tlb_flush(env, 0);
1545
            break;
1546
        case 1: /* Invalidate single TLB entry.  */
1547
            tlb_flush_page(env, val & TARGET_PAGE_MASK);
1548
            break;
1549
        case 2: /* Invalidate on ASID.  */
1550
            tlb_flush(env, val == 0);
1551
            break;
1552
        case 3: /* Invalidate single entry on MVA.  */
1553
            /* ??? This is like case 1, but ignores ASID.  */
1554
            tlb_flush(env, 1);
1555
            break;
1556
        default:
1557
            goto bad_reg;
1558
        }
1559
        break;
1560
    case 9:
1561
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1562
            break;
1563
        if (arm_feature(env, ARM_FEATURE_STRONGARM))
1564
            break; /* Ignore ReadBuffer access */
1565
        switch (crm) {
1566
        case 0: /* Cache lockdown.  */
1567
            switch (op1) {
1568
            case 0: /* L1 cache.  */
1569
                switch (op2) {
1570
                case 0:
1571
                    env->cp15.c9_data = val;
1572
                    break;
1573
                case 1:
1574
                    env->cp15.c9_insn = val;
1575
                    break;
1576
                default:
1577
                    goto bad_reg;
1578
                }
1579
                break;
1580
            case 1: /* L2 cache.  */
1581
                /* Ignore writes to L2 lockdown/auxiliary registers.  */
1582
                break;
1583
            default:
1584
                goto bad_reg;
1585
            }
1586
            break;
1587
        case 1: /* TCM memory region registers.  */
1588
            /* Not implemented.  */
1589
            goto bad_reg;
1590
        default:
1591
            goto bad_reg;
1592
        }
1593
        break;
1594
    case 10: /* MMU TLB lockdown.  */
1595
        /* ??? TLB lockdown not implemented.  */
1596
        break;
1597
    case 12: /* Reserved.  */
1598
        goto bad_reg;
1599
    case 13: /* Process ID.  */
1600
        switch (op2) {
1601
        case 0:
1602
            /* Unlike real hardware the qemu TLB uses virtual addresses,
1603
               not modified virtual addresses, so this causes a TLB flush.
1604
             */
1605
            if (env->cp15.c13_fcse != val)
1606
              tlb_flush(env, 1);
1607
            env->cp15.c13_fcse = val;
1608
            break;
1609
        case 1:
1610
            /* This changes the ASID, so do a TLB flush.  */
1611
            if (env->cp15.c13_context != val
1612
                && !arm_feature(env, ARM_FEATURE_MPU))
1613
              tlb_flush(env, 0);
1614
            env->cp15.c13_context = val;
1615
            break;
1616
        default:
1617
            goto bad_reg;
1618
        }
1619
        break;
1620
    case 14: /* Reserved.  */
1621
        goto bad_reg;
1622
    case 15: /* Implementation specific.  */
1623
        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1624
            if (op2 == 0 && crm == 1) {
1625
                if (env->cp15.c15_cpar != (val & 0x3fff)) {
1626
                    /* Changes cp0 to cp13 behavior, so needs a TB flush.  */
1627
                    tb_flush(env);
1628
                    env->cp15.c15_cpar = val & 0x3fff;
1629
                }
1630
                break;
1631
            }
1632
            goto bad_reg;
1633
        }
1634
        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1635
            switch (crm) {
1636
            case 0:
1637
                break;
1638
            case 1: /* Set TI925T configuration.  */
1639
                env->cp15.c15_ticonfig = val & 0xe7;
1640
                env->cp15.c0_cpuid = (val & (1 << 5)) ? /* OS_TYPE bit */
1641
                        ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1642
                break;
1643
            case 2: /* Set I_max.  */
1644
                env->cp15.c15_i_max = val;
1645
                break;
1646
            case 3: /* Set I_min.  */
1647
                env->cp15.c15_i_min = val;
1648
                break;
1649
            case 4: /* Set thread-ID.  */
1650
                env->cp15.c15_threadid = val & 0xffff;
1651
                break;
1652
            case 8: /* Wait-for-interrupt (deprecated).  */
1653
                cpu_interrupt(env, CPU_INTERRUPT_HALT);
1654
                break;
1655
            default:
1656
                goto bad_reg;
1657
            }
1658
        }
1659
        break;
1660
    }
1661
    return;
1662
bad_reg:
1663
    /* ??? For debugging only.  Should raise illegal instruction exception.  */
1664
    cpu_abort(env, "Unimplemented cp15 register write (c%d, c%d, {%d, %d})\n",
1665
              (insn >> 16) & 0xf, crm, op1, op2);
1666
}
1667

    
1668
uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
1669
{
1670
    int op1;
1671
    int op2;
1672
    int crm;
1673

    
1674
    op1 = (insn >> 21) & 7;
1675
    op2 = (insn >> 5) & 7;
1676
    crm = insn & 0xf;
1677
    switch ((insn >> 16) & 0xf) {
1678
    case 0: /* ID codes.  */
1679
        switch (op1) {
1680
        case 0:
1681
            switch (crm) {
1682
            case 0:
1683
                switch (op2) {
1684
                case 0: /* Device ID.  */
1685
                    return env->cp15.c0_cpuid;
1686
                case 1: /* Cache Type.  */
1687
                    return env->cp15.c0_cachetype;
1688
                case 2: /* TCM status.  */
1689
                    return 0;
1690
                case 3: /* TLB type register.  */
1691
                    return 0; /* No lockable TLB entries.  */
1692
                case 5: /* MPIDR */
1693
                    /* The MPIDR was standardised in v7; prior to
1694
                     * this it was implemented only in the 11MPCore.
1695
                     * For all other pre-v7 cores it does not exist.
1696
                     */
1697
                    if (arm_feature(env, ARM_FEATURE_V7) ||
1698
                        ARM_CPUID(env) == ARM_CPUID_ARM11MPCORE) {
1699
                        int mpidr = env->cpu_index;
1700
                        /* We don't support setting cluster ID ([8..11])
1701
                         * so these bits always RAZ.
1702
                         */
1703
                        if (arm_feature(env, ARM_FEATURE_V7MP)) {
1704
                            mpidr |= (1 << 31);
1705
                            /* Cores which are uniprocessor (non-coherent)
1706
                             * but still implement the MP extensions set
1707
                             * bit 30. (For instance, A9UP.) However we do
1708
                             * not currently model any of those cores.
1709
                             */
1710
                        }
1711
                        return mpidr;
1712
                    }
1713
                    /* otherwise fall through to the unimplemented-reg case */
1714
                default:
1715
                    goto bad_reg;
1716
                }
1717
            case 1:
1718
                if (!arm_feature(env, ARM_FEATURE_V6))
1719
                    goto bad_reg;
1720
                return env->cp15.c0_c1[op2];
1721
            case 2:
1722
                if (!arm_feature(env, ARM_FEATURE_V6))
1723
                    goto bad_reg;
1724
                return env->cp15.c0_c2[op2];
1725
            case 3: case 4: case 5: case 6: case 7:
1726
                return 0;
1727
            default:
1728
                goto bad_reg;
1729
            }
1730
        case 1:
1731
            /* These registers aren't documented on arm11 cores.  However
1732
               Linux looks at them anyway.  */
1733
            if (!arm_feature(env, ARM_FEATURE_V6))
1734
                goto bad_reg;
1735
            if (crm != 0)
1736
                goto bad_reg;
1737
            if (!arm_feature(env, ARM_FEATURE_V7))
1738
                return 0;
1739

    
1740
            switch (op2) {
1741
            case 0:
1742
                return env->cp15.c0_ccsid[env->cp15.c0_cssel];
1743
            case 1:
1744
                return env->cp15.c0_clid;
1745
            case 7:
1746
                return 0;
1747
            }
1748
            goto bad_reg;
1749
        case 2:
1750
            if (op2 != 0 || crm != 0)
1751
                goto bad_reg;
1752
            return env->cp15.c0_cssel;
1753
        default:
1754
            goto bad_reg;
1755
        }
1756
    case 1: /* System configuration.  */
1757
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1758
            op2 = 0;
1759
        switch (op2) {
1760
        case 0: /* Control register.  */
1761
            return env->cp15.c1_sys;
1762
        case 1: /* Auxiliary control register.  */
1763
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1764
                return env->cp15.c1_xscaleauxcr;
1765
            if (!arm_feature(env, ARM_FEATURE_AUXCR))
1766
                goto bad_reg;
1767
            switch (ARM_CPUID(env)) {
1768
            case ARM_CPUID_ARM1026:
1769
                return 1;
1770
            case ARM_CPUID_ARM1136:
1771
            case ARM_CPUID_ARM1136_R2:
1772
                return 7;
1773
            case ARM_CPUID_ARM11MPCORE:
1774
                return 1;
1775
            case ARM_CPUID_CORTEXA8:
1776
                return 2;
1777
            case ARM_CPUID_CORTEXA9:
1778
                return 0;
1779
            default:
1780
                goto bad_reg;
1781
            }
1782
        case 2: /* Coprocessor access register.  */
1783
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1784
                goto bad_reg;
1785
            return env->cp15.c1_coproc;
1786
        default:
1787
            goto bad_reg;
1788
        }
1789
    case 2: /* MMU Page table control / MPU cache control.  */
1790
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1791
            switch (op2) {
1792
            case 0:
1793
                return env->cp15.c2_data;
1794
                break;
1795
            case 1:
1796
                return env->cp15.c2_insn;
1797
                break;
1798
            default:
1799
                goto bad_reg;
1800
            }
1801
        } else {
1802
            switch (op2) {
1803
            case 0:
1804
                return env->cp15.c2_base0;
1805
            case 1:
1806
                return env->cp15.c2_base1;
1807
            case 2:
1808
                return env->cp15.c2_control;
1809
            default:
1810
                goto bad_reg;
1811
            }
1812
        }
1813
    case 3: /* MMU Domain access control / MPU write buffer control.  */
1814
        return env->cp15.c3;
1815
    case 4: /* Reserved.  */
1816
        goto bad_reg;
1817
    case 5: /* MMU Fault status / MPU access permission.  */
1818
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1819
            op2 = 0;
1820
        switch (op2) {
1821
        case 0:
1822
            if (arm_feature(env, ARM_FEATURE_MPU))
1823
                return simple_mpu_ap_bits(env->cp15.c5_data);
1824
            return env->cp15.c5_data;
1825
        case 1:
1826
            if (arm_feature(env, ARM_FEATURE_MPU))
1827
                return simple_mpu_ap_bits(env->cp15.c5_data);
1828
            return env->cp15.c5_insn;
1829
        case 2:
1830
            if (!arm_feature(env, ARM_FEATURE_MPU))
1831
                goto bad_reg;
1832
            return env->cp15.c5_data;
1833
        case 3:
1834
            if (!arm_feature(env, ARM_FEATURE_MPU))
1835
                goto bad_reg;
1836
            return env->cp15.c5_insn;
1837
        default:
1838
            goto bad_reg;
1839
        }
1840
    case 6: /* MMU Fault address.  */
1841
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1842
            if (crm >= 8)
1843
                goto bad_reg;
1844
            return env->cp15.c6_region[crm];
1845
        } else {
1846
            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1847
                op2 = 0;
1848
            switch (op2) {
1849
            case 0:
1850
                return env->cp15.c6_data;
1851
            case 1:
1852
                if (arm_feature(env, ARM_FEATURE_V6)) {
1853
                    /* Watchpoint Fault Adrress.  */
1854
                    return 0; /* Not implemented.  */
1855
                } else {
1856
                    /* Instruction Fault Adrress.  */
1857
                    /* Arm9 doesn't have an IFAR, but implementing it anyway
1858
                       shouldn't do any harm.  */
1859
                    return env->cp15.c6_insn;
1860
                }
1861
            case 2:
1862
                if (arm_feature(env, ARM_FEATURE_V6)) {
1863
                    /* Instruction Fault Adrress.  */
1864
                    return env->cp15.c6_insn;
1865
                } else {
1866
                    goto bad_reg;
1867
                }
1868
            default:
1869
                goto bad_reg;
1870
            }
1871
        }
1872
    case 7: /* Cache control.  */
1873
        if (crm == 4 && op1 == 0 && op2 == 0) {
1874
            return env->cp15.c7_par;
1875
        }
1876
        /* FIXME: Should only clear Z flag if destination is r15.  */
1877
        env->ZF = 0;
1878
        return 0;
1879
    case 8: /* MMU TLB control.  */
1880
        goto bad_reg;
1881
    case 9: /* Cache lockdown.  */
1882
        switch (op1) {
1883
        case 0: /* L1 cache.  */
1884
            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1885
                return 0;
1886
            switch (op2) {
1887
            case 0:
1888
                return env->cp15.c9_data;
1889
            case 1:
1890
                return env->cp15.c9_insn;
1891
            default:
1892
                goto bad_reg;
1893
            }
1894
        case 1: /* L2 cache */
1895
            if (crm != 0)
1896
                goto bad_reg;
1897
            /* L2 Lockdown and Auxiliary control.  */
1898
            return 0;
1899
        default:
1900
            goto bad_reg;
1901
        }
1902
    case 10: /* MMU TLB lockdown.  */
1903
        /* ??? TLB lockdown not implemented.  */
1904
        return 0;
1905
    case 11: /* TCM DMA control.  */
1906
    case 12: /* Reserved.  */
1907
        goto bad_reg;
1908
    case 13: /* Process ID.  */
1909
        switch (op2) {
1910
        case 0:
1911
            return env->cp15.c13_fcse;
1912
        case 1:
1913
            return env->cp15.c13_context;
1914
        default:
1915
            goto bad_reg;
1916
        }
1917
    case 14: /* Reserved.  */
1918
        goto bad_reg;
1919
    case 15: /* Implementation specific.  */
1920
        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1921
            if (op2 == 0 && crm == 1)
1922
                return env->cp15.c15_cpar;
1923

    
1924
            goto bad_reg;
1925
        }
1926
        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1927
            switch (crm) {
1928
            case 0:
1929
                return 0;
1930
            case 1: /* Read TI925T configuration.  */
1931
                return env->cp15.c15_ticonfig;
1932
            case 2: /* Read I_max.  */
1933
                return env->cp15.c15_i_max;
1934
            case 3: /* Read I_min.  */
1935
                return env->cp15.c15_i_min;
1936
            case 4: /* Read thread-ID.  */
1937
                return env->cp15.c15_threadid;
1938
            case 8: /* TI925T_status */
1939
                return 0;
1940
            }
1941
            /* TODO: Peripheral port remap register:
1942
             * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt
1943
             * controller base address at $rn & ~0xfff and map size of
1944
             * 0x200 << ($rn & 0xfff), when MMU is off.  */
1945
            goto bad_reg;
1946
        }
1947
        return 0;
1948
    }
1949
bad_reg:
1950
    /* ??? For debugging only.  Should raise illegal instruction exception.  */
1951
    cpu_abort(env, "Unimplemented cp15 register read (c%d, c%d, {%d, %d})\n",
1952
              (insn >> 16) & 0xf, crm, op1, op2);
1953
    return 0;
1954
}
1955

    
1956
void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
1957
{
1958
    if ((env->uncached_cpsr & CPSR_M) == mode) {
1959
        env->regs[13] = val;
1960
    } else {
1961
        env->banked_r13[bank_number(mode)] = val;
1962
    }
1963
}
1964

    
1965
uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
1966
{
1967
    if ((env->uncached_cpsr & CPSR_M) == mode) {
1968
        return env->regs[13];
1969
    } else {
1970
        return env->banked_r13[bank_number(mode)];
1971
    }
1972
}
1973

    
1974
uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg)
1975
{
1976
    switch (reg) {
1977
    case 0: /* APSR */
1978
        return xpsr_read(env) & 0xf8000000;
1979
    case 1: /* IAPSR */
1980
        return xpsr_read(env) & 0xf80001ff;
1981
    case 2: /* EAPSR */
1982
        return xpsr_read(env) & 0xff00fc00;
1983
    case 3: /* xPSR */
1984
        return xpsr_read(env) & 0xff00fdff;
1985
    case 5: /* IPSR */
1986
        return xpsr_read(env) & 0x000001ff;
1987
    case 6: /* EPSR */
1988
        return xpsr_read(env) & 0x0700fc00;
1989
    case 7: /* IEPSR */
1990
        return xpsr_read(env) & 0x0700edff;
1991
    case 8: /* MSP */
1992
        return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
1993
    case 9: /* PSP */
1994
        return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
1995
    case 16: /* PRIMASK */
1996
        return (env->uncached_cpsr & CPSR_I) != 0;
1997
    case 17: /* FAULTMASK */
1998
        return (env->uncached_cpsr & CPSR_F) != 0;
1999
    case 18: /* BASEPRI */
2000
    case 19: /* BASEPRI_MAX */
2001
        return env->v7m.basepri;
2002
    case 20: /* CONTROL */
2003
        return env->v7m.control;
2004
    default:
2005
        /* ??? For debugging only.  */
2006
        cpu_abort(env, "Unimplemented system register read (%d)\n", reg);
2007
        return 0;
2008
    }
2009
}
2010

    
2011
void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val)
2012
{
2013
    switch (reg) {
2014
    case 0: /* APSR */
2015
        xpsr_write(env, val, 0xf8000000);
2016
        break;
2017
    case 1: /* IAPSR */
2018
        xpsr_write(env, val, 0xf8000000);
2019
        break;
2020
    case 2: /* EAPSR */
2021
        xpsr_write(env, val, 0xfe00fc00);
2022
        break;
2023
    case 3: /* xPSR */
2024
        xpsr_write(env, val, 0xfe00fc00);
2025
        break;
2026
    case 5: /* IPSR */
2027
        /* IPSR bits are readonly.  */
2028
        break;
2029
    case 6: /* EPSR */
2030
        xpsr_write(env, val, 0x0600fc00);
2031
        break;
2032
    case 7: /* IEPSR */
2033
        xpsr_write(env, val, 0x0600fc00);
2034
        break;
2035
    case 8: /* MSP */
2036
        if (env->v7m.current_sp)
2037
            env->v7m.other_sp = val;
2038
        else
2039
            env->regs[13] = val;
2040
        break;
2041
    case 9: /* PSP */
2042
        if (env->v7m.current_sp)
2043
            env->regs[13] = val;
2044
        else
2045
            env->v7m.other_sp = val;
2046
        break;
2047
    case 16: /* PRIMASK */
2048
        if (val & 1)
2049
            env->uncached_cpsr |= CPSR_I;
2050
        else
2051
            env->uncached_cpsr &= ~CPSR_I;
2052
        break;
2053
    case 17: /* FAULTMASK */
2054
        if (val & 1)
2055
            env->uncached_cpsr |= CPSR_F;
2056
        else
2057
            env->uncached_cpsr &= ~CPSR_F;
2058
        break;
2059
    case 18: /* BASEPRI */
2060
        env->v7m.basepri = val & 0xff;
2061
        break;
2062
    case 19: /* BASEPRI_MAX */
2063
        val &= 0xff;
2064
        if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
2065
            env->v7m.basepri = val;
2066
        break;
2067
    case 20: /* CONTROL */
2068
        env->v7m.control = val & 3;
2069
        switch_v7m_sp(env, (val & 2) != 0);
2070
        break;
2071
    default:
2072
        /* ??? For debugging only.  */
2073
        cpu_abort(env, "Unimplemented system register write (%d)\n", reg);
2074
        return;
2075
    }
2076
}
2077

    
2078
void cpu_arm_set_cp_io(CPUARMState *env, int cpnum,
2079
                ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write,
2080
                void *opaque)
2081
{
2082
    if (cpnum < 0 || cpnum > 14) {
2083
        cpu_abort(env, "Bad coprocessor number: %i\n", cpnum);
2084
        return;
2085
    }
2086

    
2087
    env->cp[cpnum].cp_read = cp_read;
2088
    env->cp[cpnum].cp_write = cp_write;
2089
    env->cp[cpnum].opaque = opaque;
2090
}
2091

    
2092
#endif
2093

    
2094
/* Note that signed overflow is undefined in C.  The following routines are
2095
   careful to use unsigned types where modulo arithmetic is required.
2096
   Failure to do so _will_ break on newer gcc.  */
2097

    
2098
/* Signed saturating arithmetic.  */
2099

    
2100
/* Perform 16-bit signed saturating addition.  */
2101
static inline uint16_t add16_sat(uint16_t a, uint16_t b)
2102
{
2103
    uint16_t res;
2104

    
2105
    res = a + b;
2106
    if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
2107
        if (a & 0x8000)
2108
            res = 0x8000;
2109
        else
2110
            res = 0x7fff;
2111
    }
2112
    return res;
2113
}
2114

    
2115
/* Perform 8-bit signed saturating addition.  */
2116
static inline uint8_t add8_sat(uint8_t a, uint8_t b)
2117
{
2118
    uint8_t res;
2119

    
2120
    res = a + b;
2121
    if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
2122
        if (a & 0x80)
2123
            res = 0x80;
2124
        else
2125
            res = 0x7f;
2126
    }
2127
    return res;
2128
}
2129

    
2130
/* Perform 16-bit signed saturating subtraction.  */
2131
static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
2132
{
2133
    uint16_t res;
2134

    
2135
    res = a - b;
2136
    if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
2137
        if (a & 0x8000)
2138
            res = 0x8000;
2139
        else
2140
            res = 0x7fff;
2141
    }
2142
    return res;
2143
}
2144

    
2145
/* Perform 8-bit signed saturating subtraction.  */
2146
static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
2147
{
2148
    uint8_t res;
2149

    
2150
    res = a - b;
2151
    if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
2152
        if (a & 0x80)
2153
            res = 0x80;
2154
        else
2155
            res = 0x7f;
2156
    }
2157
    return res;
2158
}
2159

    
2160
#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
2161
#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
2162
#define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
2163
#define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
2164
#define PFX q
2165

    
2166
#include "op_addsub.h"
2167

    
2168
/* Unsigned saturating arithmetic.  */
2169
static inline uint16_t add16_usat(uint16_t a, uint16_t b)
2170
{
2171
    uint16_t res;
2172
    res = a + b;
2173
    if (res < a)
2174
        res = 0xffff;
2175
    return res;
2176
}
2177

    
2178
static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
2179
{
2180
    if (a > b)
2181
        return a - b;
2182
    else
2183
        return 0;
2184
}
2185

    
2186
static inline uint8_t add8_usat(uint8_t a, uint8_t b)
2187
{
2188
    uint8_t res;
2189
    res = a + b;
2190
    if (res < a)
2191
        res = 0xff;
2192
    return res;
2193
}
2194

    
2195
static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
2196
{
2197
    if (a > b)
2198
        return a - b;
2199
    else
2200
        return 0;
2201
}
2202

    
2203
#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
2204
#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
2205
#define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
2206
#define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
2207
#define PFX uq
2208

    
2209
#include "op_addsub.h"
2210

    
2211
/* Signed modulo arithmetic.  */
2212
#define SARITH16(a, b, n, op) do { \
2213
    int32_t sum; \
2214
    sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
2215
    RESULT(sum, n, 16); \
2216
    if (sum >= 0) \
2217
        ge |= 3 << (n * 2); \
2218
    } while(0)
2219

    
2220
#define SARITH8(a, b, n, op) do { \
2221
    int32_t sum; \
2222
    sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
2223
    RESULT(sum, n, 8); \
2224
    if (sum >= 0) \
2225
        ge |= 1 << n; \
2226
    } while(0)
2227

    
2228

    
2229
#define ADD16(a, b, n) SARITH16(a, b, n, +)
2230
#define SUB16(a, b, n) SARITH16(a, b, n, -)
2231
#define ADD8(a, b, n)  SARITH8(a, b, n, +)
2232
#define SUB8(a, b, n)  SARITH8(a, b, n, -)
2233
#define PFX s
2234
#define ARITH_GE
2235

    
2236
#include "op_addsub.h"
2237

    
2238
/* Unsigned modulo arithmetic.  */
2239
#define ADD16(a, b, n) do { \
2240
    uint32_t sum; \
2241
    sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
2242
    RESULT(sum, n, 16); \
2243
    if ((sum >> 16) == 1) \
2244
        ge |= 3 << (n * 2); \
2245
    } while(0)
2246

    
2247
#define ADD8(a, b, n) do { \
2248
    uint32_t sum; \
2249
    sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
2250
    RESULT(sum, n, 8); \
2251
    if ((sum >> 8) == 1) \
2252
        ge |= 1 << n; \
2253
    } while(0)
2254

    
2255
#define SUB16(a, b, n) do { \
2256
    uint32_t sum; \
2257
    sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
2258
    RESULT(sum, n, 16); \
2259
    if ((sum >> 16) == 0) \
2260
        ge |= 3 << (n * 2); \
2261
    } while(0)
2262

    
2263
#define SUB8(a, b, n) do { \
2264
    uint32_t sum; \
2265
    sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
2266
    RESULT(sum, n, 8); \
2267
    if ((sum >> 8) == 0) \
2268
        ge |= 1 << n; \
2269
    } while(0)
2270

    
2271
#define PFX u
2272
#define ARITH_GE
2273

    
2274
#include "op_addsub.h"
2275

    
2276
/* Halved signed arithmetic.  */
2277
#define ADD16(a, b, n) \
2278
  RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
2279
#define SUB16(a, b, n) \
2280
  RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
2281
#define ADD8(a, b, n) \
2282
  RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
2283
#define SUB8(a, b, n) \
2284
  RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
2285
#define PFX sh
2286

    
2287
#include "op_addsub.h"
2288

    
2289
/* Halved unsigned arithmetic.  */
2290
#define ADD16(a, b, n) \
2291
  RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2292
#define SUB16(a, b, n) \
2293
  RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2294
#define ADD8(a, b, n) \
2295
  RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2296
#define SUB8(a, b, n) \
2297
  RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2298
#define PFX uh
2299

    
2300
#include "op_addsub.h"
2301

    
2302
static inline uint8_t do_usad(uint8_t a, uint8_t b)
2303
{
2304
    if (a > b)
2305
        return a - b;
2306
    else
2307
        return b - a;
2308
}
2309

    
2310
/* Unsigned sum of absolute byte differences.  */
2311
uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
2312
{
2313
    uint32_t sum;
2314
    sum = do_usad(a, b);
2315
    sum += do_usad(a >> 8, b >> 8);
2316
    sum += do_usad(a >> 16, b >>16);
2317
    sum += do_usad(a >> 24, b >> 24);
2318
    return sum;
2319
}
2320

    
2321
/* For ARMv6 SEL instruction.  */
2322
uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
2323
{
2324
    uint32_t mask;
2325

    
2326
    mask = 0;
2327
    if (flags & 1)
2328
        mask |= 0xff;
2329
    if (flags & 2)
2330
        mask |= 0xff00;
2331
    if (flags & 4)
2332
        mask |= 0xff0000;
2333
    if (flags & 8)
2334
        mask |= 0xff000000;
2335
    return (a & mask) | (b & ~mask);
2336
}
2337

    
2338
uint32_t HELPER(logicq_cc)(uint64_t val)
2339
{
2340
    return (val >> 32) | (val != 0);
2341
}
2342

    
2343
/* VFP support.  We follow the convention used for VFP instrunctions:
2344
   Single precition routines have a "s" suffix, double precision a
2345
   "d" suffix.  */
2346

    
2347
/* Convert host exception flags to vfp form.  */
2348
static inline int vfp_exceptbits_from_host(int host_bits)
2349
{
2350
    int target_bits = 0;
2351

    
2352
    if (host_bits & float_flag_invalid)
2353
        target_bits |= 1;
2354
    if (host_bits & float_flag_divbyzero)
2355
        target_bits |= 2;
2356
    if (host_bits & float_flag_overflow)
2357
        target_bits |= 4;
2358
    if (host_bits & float_flag_underflow)
2359
        target_bits |= 8;
2360
    if (host_bits & float_flag_inexact)
2361
        target_bits |= 0x10;
2362
    if (host_bits & float_flag_input_denormal)
2363
        target_bits |= 0x80;
2364
    return target_bits;
2365
}
2366

    
2367
uint32_t HELPER(vfp_get_fpscr)(CPUState *env)
2368
{
2369
    int i;
2370
    uint32_t fpscr;
2371

    
2372
    fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
2373
            | (env->vfp.vec_len << 16)
2374
            | (env->vfp.vec_stride << 20);
2375
    i = get_float_exception_flags(&env->vfp.fp_status);
2376
    i |= get_float_exception_flags(&env->vfp.standard_fp_status);
2377
    fpscr |= vfp_exceptbits_from_host(i);
2378
    return fpscr;
2379
}
2380

    
2381
uint32_t vfp_get_fpscr(CPUState *env)
2382
{
2383
    return HELPER(vfp_get_fpscr)(env);
2384
}
2385

    
2386
/* Convert vfp exception flags to target form.  */
2387
static inline int vfp_exceptbits_to_host(int target_bits)
2388
{
2389
    int host_bits = 0;
2390

    
2391
    if (target_bits & 1)
2392
        host_bits |= float_flag_invalid;
2393
    if (target_bits & 2)
2394
        host_bits |= float_flag_divbyzero;
2395
    if (target_bits & 4)
2396
        host_bits |= float_flag_overflow;
2397
    if (target_bits & 8)
2398
        host_bits |= float_flag_underflow;
2399
    if (target_bits & 0x10)
2400
        host_bits |= float_flag_inexact;
2401
    if (target_bits & 0x80)
2402
        host_bits |= float_flag_input_denormal;
2403
    return host_bits;
2404
}
2405

    
2406
void HELPER(vfp_set_fpscr)(CPUState *env, uint32_t val)
2407
{
2408
    int i;
2409
    uint32_t changed;
2410

    
2411
    changed = env->vfp.xregs[ARM_VFP_FPSCR];
2412
    env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
2413
    env->vfp.vec_len = (val >> 16) & 7;
2414
    env->vfp.vec_stride = (val >> 20) & 3;
2415

    
2416
    changed ^= val;
2417
    if (changed & (3 << 22)) {
2418
        i = (val >> 22) & 3;
2419
        switch (i) {
2420
        case 0:
2421
            i = float_round_nearest_even;
2422
            break;
2423
        case 1:
2424
            i = float_round_up;
2425
            break;
2426
        case 2:
2427
            i = float_round_down;
2428
            break;
2429
        case 3:
2430
            i = float_round_to_zero;
2431
            break;
2432
        }
2433
        set_float_rounding_mode(i, &env->vfp.fp_status);
2434
    }
2435
    if (changed & (1 << 24)) {
2436
        set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
2437
        set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
2438
    }
2439
    if (changed & (1 << 25))
2440
        set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
2441

    
2442
    i = vfp_exceptbits_to_host(val);
2443
    set_float_exception_flags(i, &env->vfp.fp_status);
2444
    set_float_exception_flags(0, &env->vfp.standard_fp_status);
2445
}
2446

    
2447
void vfp_set_fpscr(CPUState *env, uint32_t val)
2448
{
2449
    HELPER(vfp_set_fpscr)(env, val);
2450
}
2451

    
2452
#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
2453

    
2454
#define VFP_BINOP(name) \
2455
float32 VFP_HELPER(name, s)(float32 a, float32 b, CPUState *env) \
2456
{ \
2457
    return float32_ ## name (a, b, &env->vfp.fp_status); \
2458
} \
2459
float64 VFP_HELPER(name, d)(float64 a, float64 b, CPUState *env) \
2460
{ \
2461
    return float64_ ## name (a, b, &env->vfp.fp_status); \
2462
}
2463
VFP_BINOP(add)
2464
VFP_BINOP(sub)
2465
VFP_BINOP(mul)
2466
VFP_BINOP(div)
2467
#undef VFP_BINOP
2468

    
2469
float32 VFP_HELPER(neg, s)(float32 a)
2470
{
2471
    return float32_chs(a);
2472
}
2473

    
2474
float64 VFP_HELPER(neg, d)(float64 a)
2475
{
2476
    return float64_chs(a);
2477
}
2478

    
2479
float32 VFP_HELPER(abs, s)(float32 a)
2480
{
2481
    return float32_abs(a);
2482
}
2483

    
2484
float64 VFP_HELPER(abs, d)(float64 a)
2485
{
2486
    return float64_abs(a);
2487
}
2488

    
2489
float32 VFP_HELPER(sqrt, s)(float32 a, CPUState *env)
2490
{
2491
    return float32_sqrt(a, &env->vfp.fp_status);
2492
}
2493

    
2494
float64 VFP_HELPER(sqrt, d)(float64 a, CPUState *env)
2495
{
2496
    return float64_sqrt(a, &env->vfp.fp_status);
2497
}
2498

    
2499
/* XXX: check quiet/signaling case */
2500
#define DO_VFP_cmp(p, type) \
2501
void VFP_HELPER(cmp, p)(type a, type b, CPUState *env)  \
2502
{ \
2503
    uint32_t flags; \
2504
    switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
2505
    case 0: flags = 0x6; break; \
2506
    case -1: flags = 0x8; break; \
2507
    case 1: flags = 0x2; break; \
2508
    default: case 2: flags = 0x3; break; \
2509
    } \
2510
    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2511
        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2512
} \
2513
void VFP_HELPER(cmpe, p)(type a, type b, CPUState *env) \
2514
{ \
2515
    uint32_t flags; \
2516
    switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
2517
    case 0: flags = 0x6; break; \
2518
    case -1: flags = 0x8; break; \
2519
    case 1: flags = 0x2; break; \
2520
    default: case 2: flags = 0x3; break; \
2521
    } \
2522
    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2523
        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2524
}
2525
DO_VFP_cmp(s, float32)
2526
DO_VFP_cmp(d, float64)
2527
#undef DO_VFP_cmp
2528

    
2529
/* Integer to float conversion.  */
2530
float32 VFP_HELPER(uito, s)(uint32_t x, CPUState *env)
2531
{
2532
    return uint32_to_float32(x, &env->vfp.fp_status);
2533
}
2534

    
2535
float64 VFP_HELPER(uito, d)(uint32_t x, CPUState *env)
2536
{
2537
    return uint32_to_float64(x, &env->vfp.fp_status);
2538
}
2539

    
2540
float32 VFP_HELPER(sito, s)(uint32_t x, CPUState *env)
2541
{
2542
    return int32_to_float32(x, &env->vfp.fp_status);
2543
}
2544

    
2545
float64 VFP_HELPER(sito, d)(uint32_t x, CPUState *env)
2546
{
2547
    return int32_to_float64(x, &env->vfp.fp_status);
2548
}
2549

    
2550
/* Float to integer conversion.  */
2551
uint32_t VFP_HELPER(toui, s)(float32 x, CPUState *env)
2552
{
2553
    if (float32_is_any_nan(x)) {
2554
        float_raise(float_flag_invalid, &env->vfp.fp_status);
2555
        return 0;
2556
    }
2557
    return float32_to_uint32(x, &env->vfp.fp_status);
2558
}
2559

    
2560
uint32_t VFP_HELPER(toui, d)(float64 x, CPUState *env)
2561
{
2562
    if (float64_is_any_nan(x)) {
2563
        float_raise(float_flag_invalid, &env->vfp.fp_status);
2564
        return 0;
2565
    }
2566
    return float64_to_uint32(x, &env->vfp.fp_status);
2567
}
2568

    
2569
uint32_t VFP_HELPER(tosi, s)(float32 x, CPUState *env)
2570
{
2571
    if (float32_is_any_nan(x)) {
2572
        float_raise(float_flag_invalid, &env->vfp.fp_status);
2573
        return 0;
2574
    }
2575
    return float32_to_int32(x, &env->vfp.fp_status);
2576
}
2577

    
2578
uint32_t VFP_HELPER(tosi, d)(float64 x, CPUState *env)
2579
{
2580
    if (float64_is_any_nan(x)) {
2581
        float_raise(float_flag_invalid, &env->vfp.fp_status);
2582
        return 0;
2583
    }
2584
    return float64_to_int32(x, &env->vfp.fp_status);
2585
}
2586

    
2587
uint32_t VFP_HELPER(touiz, s)(float32 x, CPUState *env)
2588
{
2589
    if (float32_is_any_nan(x)) {
2590
        float_raise(float_flag_invalid, &env->vfp.fp_status);
2591
        return 0;
2592
    }
2593
    return float32_to_uint32_round_to_zero(x, &env->vfp.fp_status);
2594
}
2595

    
2596
uint32_t VFP_HELPER(touiz, d)(float64 x, CPUState *env)
2597
{
2598
    if (float64_is_any_nan(x)) {
2599
        float_raise(float_flag_invalid, &env->vfp.fp_status);
2600
        return 0;
2601
    }
2602
    return float64_to_uint32_round_to_zero(x, &env->vfp.fp_status);
2603
}
2604

    
2605
uint32_t VFP_HELPER(tosiz, s)(float32 x, CPUState *env)
2606
{
2607
    if (float32_is_any_nan(x)) {
2608
        float_raise(float_flag_invalid, &env->vfp.fp_status);
2609
        return 0;
2610
    }
2611
    return float32_to_int32_round_to_zero(x, &env->vfp.fp_status);
2612
}
2613

    
2614
uint32_t VFP_HELPER(tosiz, d)(float64 x, CPUState *env)
2615
{
2616
    if (float64_is_any_nan(x)) {
2617
        float_raise(float_flag_invalid, &env->vfp.fp_status);
2618
        return 0;
2619
    }
2620
    return float64_to_int32_round_to_zero(x, &env->vfp.fp_status);
2621
}
2622

    
2623
/* floating point conversion */
2624
float64 VFP_HELPER(fcvtd, s)(float32 x, CPUState *env)
2625
{
2626
    float64 r = float32_to_float64(x, &env->vfp.fp_status);
2627
    /* ARM requires that S<->D conversion of any kind of NaN generates
2628
     * a quiet NaN by forcing the most significant frac bit to 1.
2629
     */
2630
    return float64_maybe_silence_nan(r);
2631
}
2632

    
2633
float32 VFP_HELPER(fcvts, d)(float64 x, CPUState *env)
2634
{
2635
    float32 r =  float64_to_float32(x, &env->vfp.fp_status);
2636
    /* ARM requires that S<->D conversion of any kind of NaN generates
2637
     * a quiet NaN by forcing the most significant frac bit to 1.
2638
     */
2639
    return float32_maybe_silence_nan(r);
2640
}
2641

    
2642
/* VFP3 fixed point conversion.  */
2643
#define VFP_CONV_FIX(name, p, fsz, itype, sign) \
2644
float##fsz VFP_HELPER(name##to, p)(uint##fsz##_t  x, uint32_t shift, \
2645
                                   CPUState *env) \
2646
{ \
2647
    float##fsz tmp; \
2648
    tmp = sign##int32_to_##float##fsz ((itype##_t)x, &env->vfp.fp_status); \
2649
    return float##fsz##_scalbn(tmp, -(int)shift, &env->vfp.fp_status); \
2650
} \
2651
uint##fsz##_t VFP_HELPER(to##name, p)(float##fsz x, uint32_t shift, \
2652
                                      CPUState *env) \
2653
{ \
2654
    float##fsz tmp; \
2655
    if (float##fsz##_is_any_nan(x)) { \
2656
        float_raise(float_flag_invalid, &env->vfp.fp_status); \
2657
        return 0; \
2658
    } \
2659
    tmp = float##fsz##_scalbn(x, shift, &env->vfp.fp_status); \
2660
    return float##fsz##_to_##itype##_round_to_zero(tmp, &env->vfp.fp_status); \
2661
}
2662

    
2663
VFP_CONV_FIX(sh, d, 64, int16, )
2664
VFP_CONV_FIX(sl, d, 64, int32, )
2665
VFP_CONV_FIX(uh, d, 64, uint16, u)
2666
VFP_CONV_FIX(ul, d, 64, uint32, u)
2667
VFP_CONV_FIX(sh, s, 32, int16, )
2668
VFP_CONV_FIX(sl, s, 32, int32, )
2669
VFP_CONV_FIX(uh, s, 32, uint16, u)
2670
VFP_CONV_FIX(ul, s, 32, uint32, u)
2671
#undef VFP_CONV_FIX
2672

    
2673
/* Half precision conversions.  */
2674
static float32 do_fcvt_f16_to_f32(uint32_t a, CPUState *env, float_status *s)
2675
{
2676
    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
2677
    float32 r = float16_to_float32(make_float16(a), ieee, s);
2678
    if (ieee) {
2679
        return float32_maybe_silence_nan(r);
2680
    }
2681
    return r;
2682
}
2683

    
2684
static uint32_t do_fcvt_f32_to_f16(float32 a, CPUState *env, float_status *s)
2685
{
2686
    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
2687
    float16 r = float32_to_float16(a, ieee, s);
2688
    if (ieee) {
2689
        r = float16_maybe_silence_nan(r);
2690
    }
2691
    return float16_val(r);
2692
}
2693

    
2694
float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUState *env)
2695
{
2696
    return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status);
2697
}
2698

    
2699
uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUState *env)
2700
{
2701
    return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status);
2702
}
2703

    
2704
float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUState *env)
2705
{
2706
    return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status);
2707
}
2708

    
2709
uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUState *env)
2710
{
2711
    return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
2712
}
2713

    
2714
#define float32_two make_float32(0x40000000)
2715
#define float32_three make_float32(0x40400000)
2716
#define float32_one_point_five make_float32(0x3fc00000)
2717

    
2718
float32 HELPER(recps_f32)(float32 a, float32 b, CPUState *env)
2719
{
2720
    float_status *s = &env->vfp.standard_fp_status;
2721
    if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
2722
        (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
2723
        if (!(float32_is_zero(a) || float32_is_zero(b))) {
2724
            float_raise(float_flag_input_denormal, s);
2725
        }
2726
        return float32_two;
2727
    }
2728
    return float32_sub(float32_two, float32_mul(a, b, s), s);
2729
}
2730

    
2731
float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUState *env)
2732
{
2733
    float_status *s = &env->vfp.standard_fp_status;
2734
    float32 product;
2735
    if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
2736
        (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
2737
        if (!(float32_is_zero(a) || float32_is_zero(b))) {
2738
            float_raise(float_flag_input_denormal, s);
2739
        }
2740
        return float32_one_point_five;
2741
    }
2742
    product = float32_mul(a, b, s);
2743
    return float32_div(float32_sub(float32_three, product, s), float32_two, s);
2744
}
2745

    
2746
/* NEON helpers.  */
2747

    
2748
/* Constants 256 and 512 are used in some helpers; we avoid relying on
2749
 * int->float conversions at run-time.  */
2750
#define float64_256 make_float64(0x4070000000000000LL)
2751
#define float64_512 make_float64(0x4080000000000000LL)
2752

    
2753
/* The algorithm that must be used to calculate the estimate
2754
 * is specified by the ARM ARM.
2755
 */
2756
static float64 recip_estimate(float64 a, CPUState *env)
2757
{
2758
    /* These calculations mustn't set any fp exception flags,
2759
     * so we use a local copy of the fp_status.
2760
     */
2761
    float_status dummy_status = env->vfp.standard_fp_status;
2762
    float_status *s = &dummy_status;
2763
    /* q = (int)(a * 512.0) */
2764
    float64 q = float64_mul(float64_512, a, s);
2765
    int64_t q_int = float64_to_int64_round_to_zero(q, s);
2766

    
2767
    /* r = 1.0 / (((double)q + 0.5) / 512.0) */
2768
    q = int64_to_float64(q_int, s);
2769
    q = float64_add(q, float64_half, s);
2770
    q = float64_div(q, float64_512, s);
2771
    q = float64_div(float64_one, q, s);
2772

    
2773
    /* s = (int)(256.0 * r + 0.5) */
2774
    q = float64_mul(q, float64_256, s);
2775
    q = float64_add(q, float64_half, s);
2776
    q_int = float64_to_int64_round_to_zero(q, s);
2777

    
2778
    /* return (double)s / 256.0 */
2779
    return float64_div(int64_to_float64(q_int, s), float64_256, s);
2780
}
2781

    
2782
float32 HELPER(recpe_f32)(float32 a, CPUState *env)
2783
{
2784
    float_status *s = &env->vfp.standard_fp_status;
2785
    float64 f64;
2786
    uint32_t val32 = float32_val(a);
2787

    
2788
    int result_exp;
2789
    int a_exp = (val32  & 0x7f800000) >> 23;
2790
    int sign = val32 & 0x80000000;
2791

    
2792
    if (float32_is_any_nan(a)) {
2793
        if (float32_is_signaling_nan(a)) {
2794
            float_raise(float_flag_invalid, s);
2795
        }
2796
        return float32_default_nan;
2797
    } else if (float32_is_infinity(a)) {
2798
        return float32_set_sign(float32_zero, float32_is_neg(a));
2799
    } else if (float32_is_zero_or_denormal(a)) {
2800
        if (!float32_is_zero(a)) {
2801
            float_raise(float_flag_input_denormal, s);
2802
        }
2803
        float_raise(float_flag_divbyzero, s);
2804
        return float32_set_sign(float32_infinity, float32_is_neg(a));
2805
    } else if (a_exp >= 253) {
2806
        float_raise(float_flag_underflow, s);
2807
        return float32_set_sign(float32_zero, float32_is_neg(a));
2808
    }
2809

    
2810
    f64 = make_float64((0x3feULL << 52)
2811
                       | ((int64_t)(val32 & 0x7fffff) << 29));
2812

    
2813
    result_exp = 253 - a_exp;
2814

    
2815
    f64 = recip_estimate(f64, env);
2816

    
2817
    val32 = sign
2818
        | ((result_exp & 0xff) << 23)
2819
        | ((float64_val(f64) >> 29) & 0x7fffff);
2820
    return make_float32(val32);
2821
}
2822

    
2823
/* The algorithm that must be used to calculate the estimate
2824
 * is specified by the ARM ARM.
2825
 */
2826
static float64 recip_sqrt_estimate(float64 a, CPUState *env)
2827
{
2828
    /* These calculations mustn't set any fp exception flags,
2829
     * so we use a local copy of the fp_status.
2830
     */
2831
    float_status dummy_status = env->vfp.standard_fp_status;
2832
    float_status *s = &dummy_status;
2833
    float64 q;
2834
    int64_t q_int;
2835

    
2836
    if (float64_lt(a, float64_half, s)) {
2837
        /* range 0.25 <= a < 0.5 */
2838

    
2839
        /* a in units of 1/512 rounded down */
2840
        /* q0 = (int)(a * 512.0);  */
2841
        q = float64_mul(float64_512, a, s);
2842
        q_int = float64_to_int64_round_to_zero(q, s);
2843

    
2844
        /* reciprocal root r */
2845
        /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0);  */
2846
        q = int64_to_float64(q_int, s);
2847
        q = float64_add(q, float64_half, s);
2848
        q = float64_div(q, float64_512, s);
2849
        q = float64_sqrt(q, s);
2850
        q = float64_div(float64_one, q, s);
2851
    } else {
2852
        /* range 0.5 <= a < 1.0 */
2853

    
2854
        /* a in units of 1/256 rounded down */
2855
        /* q1 = (int)(a * 256.0); */
2856
        q = float64_mul(float64_256, a, s);
2857
        int64_t q_int = float64_to_int64_round_to_zero(q, s);
2858

    
2859
        /* reciprocal root r */
2860
        /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
2861
        q = int64_to_float64(q_int, s);
2862
        q = float64_add(q, float64_half, s);
2863
        q = float64_div(q, float64_256, s);
2864
        q = float64_sqrt(q, s);
2865
        q = float64_div(float64_one, q, s);
2866
    }
2867
    /* r in units of 1/256 rounded to nearest */
2868
    /* s = (int)(256.0 * r + 0.5); */
2869

    
2870
    q = float64_mul(q, float64_256,s );
2871
    q = float64_add(q, float64_half, s);
2872
    q_int = float64_to_int64_round_to_zero(q, s);
2873

    
2874
    /* return (double)s / 256.0;*/
2875
    return float64_div(int64_to_float64(q_int, s), float64_256, s);
2876
}
2877

    
2878
float32 HELPER(rsqrte_f32)(float32 a, CPUState *env)
2879
{
2880
    float_status *s = &env->vfp.standard_fp_status;
2881
    int result_exp;
2882
    float64 f64;
2883
    uint32_t val;
2884
    uint64_t val64;
2885

    
2886
    val = float32_val(a);
2887

    
2888
    if (float32_is_any_nan(a)) {
2889
        if (float32_is_signaling_nan(a)) {
2890
            float_raise(float_flag_invalid, s);
2891
        }
2892
        return float32_default_nan;
2893
    } else if (float32_is_zero_or_denormal(a)) {
2894
        if (!float32_is_zero(a)) {
2895
            float_raise(float_flag_input_denormal, s);
2896
        }
2897
        float_raise(float_flag_divbyzero, s);
2898
        return float32_set_sign(float32_infinity, float32_is_neg(a));
2899
    } else if (float32_is_neg(a)) {
2900
        float_raise(float_flag_invalid, s);
2901
        return float32_default_nan;
2902
    } else if (float32_is_infinity(a)) {
2903
        return float32_zero;
2904
    }
2905

    
2906
    /* Normalize to a double-precision value between 0.25 and 1.0,
2907
     * preserving the parity of the exponent.  */
2908
    if ((val & 0x800000) == 0) {
2909
        f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
2910
                           | (0x3feULL << 52)
2911
                           | ((uint64_t)(val & 0x7fffff) << 29));
2912
    } else {
2913
        f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
2914
                           | (0x3fdULL << 52)
2915
                           | ((uint64_t)(val & 0x7fffff) << 29));
2916
    }
2917

    
2918
    result_exp = (380 - ((val & 0x7f800000) >> 23)) / 2;
2919

    
2920
    f64 = recip_sqrt_estimate(f64, env);
2921

    
2922
    val64 = float64_val(f64);
2923

    
2924
    val = ((val64 >> 63)  & 0x80000000)
2925
        | ((result_exp & 0xff) << 23)
2926
        | ((val64 >> 29)  & 0x7fffff);
2927
    return make_float32(val);
2928
}
2929

    
2930
uint32_t HELPER(recpe_u32)(uint32_t a, CPUState *env)
2931
{
2932
    float64 f64;
2933

    
2934
    if ((a & 0x80000000) == 0) {
2935
        return 0xffffffff;
2936
    }
2937

    
2938
    f64 = make_float64((0x3feULL << 52)
2939
                       | ((int64_t)(a & 0x7fffffff) << 21));
2940

    
2941
    f64 = recip_estimate (f64, env);
2942

    
2943
    return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
2944
}
2945

    
2946
uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUState *env)
2947
{
2948
    float64 f64;
2949

    
2950
    if ((a & 0xc0000000) == 0) {
2951
        return 0xffffffff;
2952
    }
2953

    
2954
    if (a & 0x80000000) {
2955
        f64 = make_float64((0x3feULL << 52)
2956
                           | ((uint64_t)(a & 0x7fffffff) << 21));
2957
    } else { /* bits 31-30 == '01' */
2958
        f64 = make_float64((0x3fdULL << 52)
2959
                           | ((uint64_t)(a & 0x3fffffff) << 22));
2960
    }
2961

    
2962
    f64 = recip_sqrt_estimate(f64, env);
2963

    
2964
    return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
2965
}
2966

    
2967
void HELPER(set_teecr)(CPUState *env, uint32_t val)
2968
{
2969
    val &= 1;
2970
    if (env->teecr != val) {
2971
        env->teecr = val;
2972
        tb_flush(env);
2973
    }
2974
}