Statistics
| Branch: | Revision:

root / target-arm / helper.c @ 777dc784

History | View | Annotate | Download (95.8 kB)

1
#include "cpu.h"
2
#include "gdbstub.h"
3
#include "helper.h"
4
#include "host-utils.h"
5
#if !defined(CONFIG_USER_ONLY)
6
#include "hw/loader.h"
7
#endif
8
#include "sysemu.h"
9

    
10
static uint32_t cortexa15_cp15_c0_c1[8] = {
11
    0x00001131, 0x00011011, 0x02010555, 0x00000000,
12
    0x10201105, 0x20000000, 0x01240000, 0x02102211
13
};
14

    
15
static uint32_t cortexa15_cp15_c0_c2[8] = {
16
    0x02101110, 0x13112111, 0x21232041, 0x11112131, 0x10011142, 0, 0, 0
17
};
18

    
19
static uint32_t cortexa9_cp15_c0_c1[8] =
20
{ 0x1031, 0x11, 0x000, 0, 0x00100103, 0x20000000, 0x01230000, 0x00002111 };
21

    
22
static uint32_t cortexa9_cp15_c0_c2[8] =
23
{ 0x00101111, 0x13112111, 0x21232041, 0x11112131, 0x00111142, 0, 0, 0 };
24

    
25
static uint32_t cortexa8_cp15_c0_c1[8] =
26
{ 0x1031, 0x11, 0x400, 0, 0x31100003, 0x20000000, 0x01202000, 0x11 };
27

    
28
static uint32_t cortexa8_cp15_c0_c2[8] =
29
{ 0x00101111, 0x12112111, 0x21232031, 0x11112131, 0x00111142, 0, 0, 0 };
30

    
31
static uint32_t mpcore_cp15_c0_c1[8] =
32
{ 0x111, 0x1, 0, 0x2, 0x01100103, 0x10020302, 0x01222000, 0 };
33

    
34
static uint32_t mpcore_cp15_c0_c2[8] =
35
{ 0x00100011, 0x12002111, 0x11221011, 0x01102131, 0x141, 0, 0, 0 };
36

    
37
static uint32_t arm1136_cp15_c0_c1[8] =
38
{ 0x111, 0x1, 0x2, 0x3, 0x01130003, 0x10030302, 0x01222110, 0 };
39

    
40
static uint32_t arm1136_cp15_c0_c2[8] =
41
{ 0x00140011, 0x12002111, 0x11231111, 0x01102131, 0x141, 0, 0, 0 };
42

    
43
static uint32_t arm1176_cp15_c0_c1[8] =
44
{ 0x111, 0x11, 0x33, 0, 0x01130003, 0x10030302, 0x01222100, 0 };
45

    
46
static uint32_t arm1176_cp15_c0_c2[8] =
47
{ 0x0140011, 0x12002111, 0x11231121, 0x01102131, 0x01141, 0, 0, 0 };
48

    
49
static inline void set_feature(CPUARMState *env, int feature)
50
{
51
    env->features |= 1u << feature;
52
}
53

    
54
static void cpu_reset_model_id(CPUARMState *env, uint32_t id)
55
{
56
    switch (id) {
57
    case ARM_CPUID_ARM926:
58
        set_feature(env, ARM_FEATURE_V5);
59
        set_feature(env, ARM_FEATURE_VFP);
60
        env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090;
61
        env->cp15.c0_cachetype = 0x1dd20d2;
62
        env->cp15.c1_sys = 0x00090078;
63
        break;
64
    case ARM_CPUID_ARM946:
65
        set_feature(env, ARM_FEATURE_V5);
66
        set_feature(env, ARM_FEATURE_MPU);
67
        env->cp15.c0_cachetype = 0x0f004006;
68
        env->cp15.c1_sys = 0x00000078;
69
        break;
70
    case ARM_CPUID_ARM1026:
71
        set_feature(env, ARM_FEATURE_V5);
72
        set_feature(env, ARM_FEATURE_VFP);
73
        set_feature(env, ARM_FEATURE_AUXCR);
74
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0;
75
        env->cp15.c0_cachetype = 0x1dd20d2;
76
        env->cp15.c1_sys = 0x00090078;
77
        break;
78
    case ARM_CPUID_ARM1136:
79
        /* This is the 1136 r1, which is a v6K core */
80
        set_feature(env, ARM_FEATURE_V6K);
81
        /* Fall through */
82
    case ARM_CPUID_ARM1136_R2:
83
        /* What qemu calls "arm1136_r2" is actually the 1136 r0p2, ie an
84
         * older core than plain "arm1136". In particular this does not
85
         * have the v6K features.
86
         */
87
        set_feature(env, ARM_FEATURE_V6);
88
        set_feature(env, ARM_FEATURE_VFP);
89
        /* These ID register values are correct for 1136 but may be wrong
90
         * for 1136_r2 (in particular r0p2 does not actually implement most
91
         * of the ID registers).
92
         */
93
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
94
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
95
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
96
        memcpy(env->cp15.c0_c1, arm1136_cp15_c0_c1, 8 * sizeof(uint32_t));
97
        memcpy(env->cp15.c0_c2, arm1136_cp15_c0_c2, 8 * sizeof(uint32_t));
98
        env->cp15.c0_cachetype = 0x1dd20d2;
99
        env->cp15.c1_sys = 0x00050078;
100
        break;
101
    case ARM_CPUID_ARM1176:
102
        set_feature(env, ARM_FEATURE_V6K);
103
        set_feature(env, ARM_FEATURE_VFP);
104
        set_feature(env, ARM_FEATURE_VAPA);
105
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b5;
106
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
107
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
108
        memcpy(env->cp15.c0_c1, arm1176_cp15_c0_c1, 8 * sizeof(uint32_t));
109
        memcpy(env->cp15.c0_c2, arm1176_cp15_c0_c2, 8 * sizeof(uint32_t));
110
        env->cp15.c0_cachetype = 0x1dd20d2;
111
        env->cp15.c1_sys = 0x00050078;
112
        break;
113
    case ARM_CPUID_ARM11MPCORE:
114
        set_feature(env, ARM_FEATURE_V6K);
115
        set_feature(env, ARM_FEATURE_VFP);
116
        set_feature(env, ARM_FEATURE_VAPA);
117
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
118
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
119
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
120
        memcpy(env->cp15.c0_c1, mpcore_cp15_c0_c1, 8 * sizeof(uint32_t));
121
        memcpy(env->cp15.c0_c2, mpcore_cp15_c0_c2, 8 * sizeof(uint32_t));
122
        env->cp15.c0_cachetype = 0x1dd20d2;
123
        break;
124
    case ARM_CPUID_CORTEXA8:
125
        set_feature(env, ARM_FEATURE_V7);
126
        set_feature(env, ARM_FEATURE_VFP3);
127
        set_feature(env, ARM_FEATURE_NEON);
128
        set_feature(env, ARM_FEATURE_THUMB2EE);
129
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410330c0;
130
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
131
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00011100;
132
        memcpy(env->cp15.c0_c1, cortexa8_cp15_c0_c1, 8 * sizeof(uint32_t));
133
        memcpy(env->cp15.c0_c2, cortexa8_cp15_c0_c2, 8 * sizeof(uint32_t));
134
        env->cp15.c0_cachetype = 0x82048004;
135
        env->cp15.c0_clid = (1 << 27) | (2 << 24) | 3;
136
        env->cp15.c0_ccsid[0] = 0xe007e01a; /* 16k L1 dcache. */
137
        env->cp15.c0_ccsid[1] = 0x2007e01a; /* 16k L1 icache. */
138
        env->cp15.c0_ccsid[2] = 0xf0000000; /* No L2 icache. */
139
        env->cp15.c1_sys = 0x00c50078;
140
        break;
141
    case ARM_CPUID_CORTEXA9:
142
        set_feature(env, ARM_FEATURE_V7);
143
        set_feature(env, ARM_FEATURE_VFP3);
144
        set_feature(env, ARM_FEATURE_VFP_FP16);
145
        set_feature(env, ARM_FEATURE_NEON);
146
        set_feature(env, ARM_FEATURE_THUMB2EE);
147
        /* Note that A9 supports the MP extensions even for
148
         * A9UP and single-core A9MP (which are both different
149
         * and valid configurations; we don't model A9UP).
150
         */
151
        set_feature(env, ARM_FEATURE_V7MP);
152
        env->vfp.xregs[ARM_VFP_FPSID] = 0x41033090;
153
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
154
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x01111111;
155
        memcpy(env->cp15.c0_c1, cortexa9_cp15_c0_c1, 8 * sizeof(uint32_t));
156
        memcpy(env->cp15.c0_c2, cortexa9_cp15_c0_c2, 8 * sizeof(uint32_t));
157
        env->cp15.c0_cachetype = 0x80038003;
158
        env->cp15.c0_clid = (1 << 27) | (1 << 24) | 3;
159
        env->cp15.c0_ccsid[0] = 0xe00fe015; /* 16k L1 dcache. */
160
        env->cp15.c0_ccsid[1] = 0x200fe015; /* 16k L1 icache. */
161
        env->cp15.c1_sys = 0x00c50078;
162
        break;
163
    case ARM_CPUID_CORTEXA15:
164
        set_feature(env, ARM_FEATURE_V7);
165
        set_feature(env, ARM_FEATURE_VFP4);
166
        set_feature(env, ARM_FEATURE_VFP_FP16);
167
        set_feature(env, ARM_FEATURE_NEON);
168
        set_feature(env, ARM_FEATURE_THUMB2EE);
169
        set_feature(env, ARM_FEATURE_ARM_DIV);
170
        set_feature(env, ARM_FEATURE_V7MP);
171
        set_feature(env, ARM_FEATURE_GENERIC_TIMER);
172
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410430f0;
173
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x10110222;
174
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x11111111;
175
        memcpy(env->cp15.c0_c1, cortexa15_cp15_c0_c1, 8 * sizeof(uint32_t));
176
        memcpy(env->cp15.c0_c2, cortexa15_cp15_c0_c2, 8 * sizeof(uint32_t));
177
        env->cp15.c0_cachetype = 0x8444c004;
178
        env->cp15.c0_clid = 0x0a200023;
179
        env->cp15.c0_ccsid[0] = 0x701fe00a; /* 32K L1 dcache */
180
        env->cp15.c0_ccsid[1] = 0x201fe00a; /* 32K L1 icache */
181
        env->cp15.c0_ccsid[2] = 0x711fe07a; /* 4096K L2 unified cache */
182
        env->cp15.c1_sys = 0x00c50078;
183
        break;
184
    case ARM_CPUID_CORTEXM3:
185
        set_feature(env, ARM_FEATURE_V7);
186
        set_feature(env, ARM_FEATURE_M);
187
        break;
188
    case ARM_CPUID_ANY: /* For userspace emulation.  */
189
        set_feature(env, ARM_FEATURE_V7);
190
        set_feature(env, ARM_FEATURE_VFP4);
191
        set_feature(env, ARM_FEATURE_VFP_FP16);
192
        set_feature(env, ARM_FEATURE_NEON);
193
        set_feature(env, ARM_FEATURE_THUMB2EE);
194
        set_feature(env, ARM_FEATURE_ARM_DIV);
195
        set_feature(env, ARM_FEATURE_V7MP);
196
        break;
197
    case ARM_CPUID_TI915T:
198
    case ARM_CPUID_TI925T:
199
        set_feature(env, ARM_FEATURE_V4T);
200
        set_feature(env, ARM_FEATURE_OMAPCP);
201
        env->cp15.c0_cachetype = 0x5109149;
202
        env->cp15.c1_sys = 0x00000070;
203
        env->cp15.c15_i_max = 0x000;
204
        env->cp15.c15_i_min = 0xff0;
205
        break;
206
    case ARM_CPUID_PXA250:
207
    case ARM_CPUID_PXA255:
208
    case ARM_CPUID_PXA260:
209
    case ARM_CPUID_PXA261:
210
    case ARM_CPUID_PXA262:
211
        set_feature(env, ARM_FEATURE_V5);
212
        set_feature(env, ARM_FEATURE_XSCALE);
213
        /* JTAG_ID is ((id << 28) | 0x09265013) */
214
        env->cp15.c0_cachetype = 0xd172172;
215
        env->cp15.c1_sys = 0x00000078;
216
        break;
217
    case ARM_CPUID_PXA270_A0:
218
    case ARM_CPUID_PXA270_A1:
219
    case ARM_CPUID_PXA270_B0:
220
    case ARM_CPUID_PXA270_B1:
221
    case ARM_CPUID_PXA270_C0:
222
    case ARM_CPUID_PXA270_C5:
223
        set_feature(env, ARM_FEATURE_V5);
224
        set_feature(env, ARM_FEATURE_XSCALE);
225
        /* JTAG_ID is ((id << 28) | 0x09265013) */
226
        set_feature(env, ARM_FEATURE_IWMMXT);
227
        env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
228
        env->cp15.c0_cachetype = 0xd172172;
229
        env->cp15.c1_sys = 0x00000078;
230
        break;
231
    case ARM_CPUID_SA1100:
232
    case ARM_CPUID_SA1110:
233
        set_feature(env, ARM_FEATURE_STRONGARM);
234
        env->cp15.c1_sys = 0x00000070;
235
        break;
236
    default:
237
        cpu_abort(env, "Bad CPU ID: %x\n", id);
238
        break;
239
    }
240

    
241
    /* Some features automatically imply others: */
242
    if (arm_feature(env, ARM_FEATURE_V7)) {
243
        set_feature(env, ARM_FEATURE_VAPA);
244
        set_feature(env, ARM_FEATURE_THUMB2);
245
        if (!arm_feature(env, ARM_FEATURE_M)) {
246
            set_feature(env, ARM_FEATURE_V6K);
247
        } else {
248
            set_feature(env, ARM_FEATURE_V6);
249
        }
250
    }
251
    if (arm_feature(env, ARM_FEATURE_V6K)) {
252
        set_feature(env, ARM_FEATURE_V6);
253
        set_feature(env, ARM_FEATURE_MVFR);
254
    }
255
    if (arm_feature(env, ARM_FEATURE_V6)) {
256
        set_feature(env, ARM_FEATURE_V5);
257
        if (!arm_feature(env, ARM_FEATURE_M)) {
258
            set_feature(env, ARM_FEATURE_AUXCR);
259
        }
260
    }
261
    if (arm_feature(env, ARM_FEATURE_V5)) {
262
        set_feature(env, ARM_FEATURE_V4T);
263
    }
264
    if (arm_feature(env, ARM_FEATURE_M)) {
265
        set_feature(env, ARM_FEATURE_THUMB_DIV);
266
    }
267
    if (arm_feature(env, ARM_FEATURE_ARM_DIV)) {
268
        set_feature(env, ARM_FEATURE_THUMB_DIV);
269
    }
270
    if (arm_feature(env, ARM_FEATURE_VFP4)) {
271
        set_feature(env, ARM_FEATURE_VFP3);
272
    }
273
    if (arm_feature(env, ARM_FEATURE_VFP3)) {
274
        set_feature(env, ARM_FEATURE_VFP);
275
    }
276
}
277

    
278
/* TODO Move contents into arm_cpu_reset() in cpu.c,
279
 *      once cpu_reset_model_id() is eliminated,
280
 *      and then forward to cpu_reset() here.
281
 */
282
void cpu_state_reset(CPUARMState *env)
283
{
284
    uint32_t id;
285
    uint32_t tmp = 0;
286
    ARMCPU *cpu = arm_env_get_cpu(env);
287

    
288
    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
289
        qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
290
        log_cpu_state(env, 0);
291
    }
292

    
293
    id = cpu->midr;
294
    tmp = env->cp15.c15_config_base_address;
295
    memset(env, 0, offsetof(CPUARMState, breakpoints));
296
    if (id)
297
        cpu_reset_model_id(env, id);
298
    env->cp15.c15_config_base_address = tmp;
299
    env->cp15.c0_cpuid = cpu->midr;
300
#if defined (CONFIG_USER_ONLY)
301
    env->uncached_cpsr = ARM_CPU_MODE_USR;
302
    /* For user mode we must enable access to coprocessors */
303
    env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
304
    if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
305
        env->cp15.c15_cpar = 3;
306
    } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
307
        env->cp15.c15_cpar = 1;
308
    }
309
#else
310
    /* SVC mode with interrupts disabled.  */
311
    env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
312
    /* On ARMv7-M the CPSR_I is the value of the PRIMASK register, and is
313
       clear at reset.  Initial SP and PC are loaded from ROM.  */
314
    if (IS_M(env)) {
315
        uint32_t pc;
316
        uint8_t *rom;
317
        env->uncached_cpsr &= ~CPSR_I;
318
        rom = rom_ptr(0);
319
        if (rom) {
320
            /* We should really use ldl_phys here, in case the guest
321
               modified flash and reset itself.  However images
322
               loaded via -kernel have not been copied yet, so load the
323
               values directly from there.  */
324
            env->regs[13] = ldl_p(rom);
325
            pc = ldl_p(rom + 4);
326
            env->thumb = pc & 1;
327
            env->regs[15] = pc & ~1;
328
        }
329
    }
330
    env->vfp.xregs[ARM_VFP_FPEXC] = 0;
331
    env->cp15.c2_base_mask = 0xffffc000u;
332
    /* v7 performance monitor control register: same implementor
333
     * field as main ID register, and we implement no event counters.
334
     */
335
    env->cp15.c9_pmcr = (id & 0xff000000);
336
#endif
337
    set_flush_to_zero(1, &env->vfp.standard_fp_status);
338
    set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status);
339
    set_default_nan_mode(1, &env->vfp.standard_fp_status);
340
    set_float_detect_tininess(float_tininess_before_rounding,
341
                              &env->vfp.fp_status);
342
    set_float_detect_tininess(float_tininess_before_rounding,
343
                              &env->vfp.standard_fp_status);
344
    tlb_flush(env, 1);
345
    /* Reset is a state change for some CPUARMState fields which we
346
     * bake assumptions about into translated code, so we need to
347
     * tb_flush().
348
     */
349
    tb_flush(env);
350
}
351

    
352
static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
353
{
354
    int nregs;
355

    
356
    /* VFP data registers are always little-endian.  */
357
    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
358
    if (reg < nregs) {
359
        stfq_le_p(buf, env->vfp.regs[reg]);
360
        return 8;
361
    }
362
    if (arm_feature(env, ARM_FEATURE_NEON)) {
363
        /* Aliases for Q regs.  */
364
        nregs += 16;
365
        if (reg < nregs) {
366
            stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
367
            stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
368
            return 16;
369
        }
370
    }
371
    switch (reg - nregs) {
372
    case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
373
    case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
374
    case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
375
    }
376
    return 0;
377
}
378

    
379
static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
380
{
381
    int nregs;
382

    
383
    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
384
    if (reg < nregs) {
385
        env->vfp.regs[reg] = ldfq_le_p(buf);
386
        return 8;
387
    }
388
    if (arm_feature(env, ARM_FEATURE_NEON)) {
389
        nregs += 16;
390
        if (reg < nregs) {
391
            env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
392
            env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
393
            return 16;
394
        }
395
    }
396
    switch (reg - nregs) {
397
    case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
398
    case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
399
    case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
400
    }
401
    return 0;
402
}
403

    
404
CPUARMState *cpu_arm_init(const char *cpu_model)
405
{
406
    ARMCPU *cpu;
407
    CPUARMState *env;
408
    static int inited = 0;
409

    
410
    if (!object_class_by_name(cpu_model)) {
411
        return NULL;
412
    }
413
    cpu = ARM_CPU(object_new(cpu_model));
414
    env = &cpu->env;
415
    env->cpu_model_str = cpu_model;
416

    
417
    if (tcg_enabled() && !inited) {
418
        inited = 1;
419
        arm_translate_init();
420
    }
421

    
422
    cpu_state_reset(env);
423
    if (arm_feature(env, ARM_FEATURE_NEON)) {
424
        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
425
                                 51, "arm-neon.xml", 0);
426
    } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
427
        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
428
                                 35, "arm-vfp3.xml", 0);
429
    } else if (arm_feature(env, ARM_FEATURE_VFP)) {
430
        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
431
                                 19, "arm-vfp.xml", 0);
432
    }
433
    qemu_init_vcpu(env);
434
    return env;
435
}
436

    
437
typedef struct ARMCPUListState {
438
    fprintf_function cpu_fprintf;
439
    FILE *file;
440
} ARMCPUListState;
441

    
442
/* Sort alphabetically by type name, except for "any". */
443
static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
444
{
445
    ObjectClass *class_a = (ObjectClass *)a;
446
    ObjectClass *class_b = (ObjectClass *)b;
447
    const char *name_a, *name_b;
448

    
449
    name_a = object_class_get_name(class_a);
450
    name_b = object_class_get_name(class_b);
451
    if (strcmp(name_a, "any") == 0) {
452
        return 1;
453
    } else if (strcmp(name_b, "any") == 0) {
454
        return -1;
455
    } else {
456
        return strcmp(name_a, name_b);
457
    }
458
}
459

    
460
static void arm_cpu_list_entry(gpointer data, gpointer user_data)
461
{
462
    ObjectClass *oc = data;
463
    ARMCPUListState *s = user_data;
464

    
465
    (*s->cpu_fprintf)(s->file, "  %s\n",
466
                      object_class_get_name(oc));
467
}
468

    
469
void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
470
{
471
    ARMCPUListState s = {
472
        .file = f,
473
        .cpu_fprintf = cpu_fprintf,
474
    };
475
    GSList *list;
476

    
477
    list = object_class_get_list(TYPE_ARM_CPU, false);
478
    list = g_slist_sort(list, arm_cpu_list_compare);
479
    (*cpu_fprintf)(f, "Available CPUs:\n");
480
    g_slist_foreach(list, arm_cpu_list_entry, &s);
481
    g_slist_free(list);
482
}
483

    
484
static int bad_mode_switch(CPUARMState *env, int mode)
485
{
486
    /* Return true if it is not valid for us to switch to
487
     * this CPU mode (ie all the UNPREDICTABLE cases in
488
     * the ARM ARM CPSRWriteByInstr pseudocode).
489
     */
490
    switch (mode) {
491
    case ARM_CPU_MODE_USR:
492
    case ARM_CPU_MODE_SYS:
493
    case ARM_CPU_MODE_SVC:
494
    case ARM_CPU_MODE_ABT:
495
    case ARM_CPU_MODE_UND:
496
    case ARM_CPU_MODE_IRQ:
497
    case ARM_CPU_MODE_FIQ:
498
        return 0;
499
    default:
500
        return 1;
501
    }
502
}
503

    
504
uint32_t cpsr_read(CPUARMState *env)
505
{
506
    int ZF;
507
    ZF = (env->ZF == 0);
508
    return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
509
        (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
510
        | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
511
        | ((env->condexec_bits & 0xfc) << 8)
512
        | (env->GE << 16);
513
}
514

    
515
void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
516
{
517
    if (mask & CPSR_NZCV) {
518
        env->ZF = (~val) & CPSR_Z;
519
        env->NF = val;
520
        env->CF = (val >> 29) & 1;
521
        env->VF = (val << 3) & 0x80000000;
522
    }
523
    if (mask & CPSR_Q)
524
        env->QF = ((val & CPSR_Q) != 0);
525
    if (mask & CPSR_T)
526
        env->thumb = ((val & CPSR_T) != 0);
527
    if (mask & CPSR_IT_0_1) {
528
        env->condexec_bits &= ~3;
529
        env->condexec_bits |= (val >> 25) & 3;
530
    }
531
    if (mask & CPSR_IT_2_7) {
532
        env->condexec_bits &= 3;
533
        env->condexec_bits |= (val >> 8) & 0xfc;
534
    }
535
    if (mask & CPSR_GE) {
536
        env->GE = (val >> 16) & 0xf;
537
    }
538

    
539
    if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
540
        if (bad_mode_switch(env, val & CPSR_M)) {
541
            /* Attempt to switch to an invalid mode: this is UNPREDICTABLE.
542
             * We choose to ignore the attempt and leave the CPSR M field
543
             * untouched.
544
             */
545
            mask &= ~CPSR_M;
546
        } else {
547
            switch_mode(env, val & CPSR_M);
548
        }
549
    }
550
    mask &= ~CACHED_CPSR_BITS;
551
    env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
552
}
553

    
554
/* Sign/zero extend */
555
uint32_t HELPER(sxtb16)(uint32_t x)
556
{
557
    uint32_t res;
558
    res = (uint16_t)(int8_t)x;
559
    res |= (uint32_t)(int8_t)(x >> 16) << 16;
560
    return res;
561
}
562

    
563
uint32_t HELPER(uxtb16)(uint32_t x)
564
{
565
    uint32_t res;
566
    res = (uint16_t)(uint8_t)x;
567
    res |= (uint32_t)(uint8_t)(x >> 16) << 16;
568
    return res;
569
}
570

    
571
uint32_t HELPER(clz)(uint32_t x)
572
{
573
    return clz32(x);
574
}
575

    
576
int32_t HELPER(sdiv)(int32_t num, int32_t den)
577
{
578
    if (den == 0)
579
      return 0;
580
    if (num == INT_MIN && den == -1)
581
      return INT_MIN;
582
    return num / den;
583
}
584

    
585
uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
586
{
587
    if (den == 0)
588
      return 0;
589
    return num / den;
590
}
591

    
592
uint32_t HELPER(rbit)(uint32_t x)
593
{
594
    x =  ((x & 0xff000000) >> 24)
595
       | ((x & 0x00ff0000) >> 8)
596
       | ((x & 0x0000ff00) << 8)
597
       | ((x & 0x000000ff) << 24);
598
    x =  ((x & 0xf0f0f0f0) >> 4)
599
       | ((x & 0x0f0f0f0f) << 4);
600
    x =  ((x & 0x88888888) >> 3)
601
       | ((x & 0x44444444) >> 1)
602
       | ((x & 0x22222222) << 1)
603
       | ((x & 0x11111111) << 3);
604
    return x;
605
}
606

    
607
uint32_t HELPER(abs)(uint32_t x)
608
{
609
    return ((int32_t)x < 0) ? -x : x;
610
}
611

    
612
#if defined(CONFIG_USER_ONLY)
613

    
614
void do_interrupt (CPUARMState *env)
615
{
616
    env->exception_index = -1;
617
}
618

    
619
int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address, int rw,
620
                              int mmu_idx)
621
{
622
    if (rw == 2) {
623
        env->exception_index = EXCP_PREFETCH_ABORT;
624
        env->cp15.c6_insn = address;
625
    } else {
626
        env->exception_index = EXCP_DATA_ABORT;
627
        env->cp15.c6_data = address;
628
    }
629
    return 1;
630
}
631

    
632
/* These should probably raise undefined insn exceptions.  */
633
void HELPER(set_cp)(CPUARMState *env, uint32_t insn, uint32_t val)
634
{
635
    int op1 = (insn >> 8) & 0xf;
636
    cpu_abort(env, "cp%i insn %08x\n", op1, insn);
637
    return;
638
}
639

    
640
uint32_t HELPER(get_cp)(CPUARMState *env, uint32_t insn)
641
{
642
    int op1 = (insn >> 8) & 0xf;
643
    cpu_abort(env, "cp%i insn %08x\n", op1, insn);
644
    return 0;
645
}
646

    
647
void HELPER(set_cp15)(CPUARMState *env, uint32_t insn, uint32_t val)
648
{
649
    cpu_abort(env, "cp15 insn %08x\n", insn);
650
}
651

    
652
uint32_t HELPER(get_cp15)(CPUARMState *env, uint32_t insn)
653
{
654
    cpu_abort(env, "cp15 insn %08x\n", insn);
655
}
656

    
657
/* These should probably raise undefined insn exceptions.  */
658
void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
659
{
660
    cpu_abort(env, "v7m_mrs %d\n", reg);
661
}
662

    
663
uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
664
{
665
    cpu_abort(env, "v7m_mrs %d\n", reg);
666
    return 0;
667
}
668

    
669
void switch_mode(CPUARMState *env, int mode)
670
{
671
    if (mode != ARM_CPU_MODE_USR)
672
        cpu_abort(env, "Tried to switch out of user mode\n");
673
}
674

    
675
void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
676
{
677
    cpu_abort(env, "banked r13 write\n");
678
}
679

    
680
uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
681
{
682
    cpu_abort(env, "banked r13 read\n");
683
    return 0;
684
}
685

    
686
#else
687

    
688
/* Map CPU modes onto saved register banks.  */
689
static inline int bank_number(CPUARMState *env, int mode)
690
{
691
    switch (mode) {
692
    case ARM_CPU_MODE_USR:
693
    case ARM_CPU_MODE_SYS:
694
        return 0;
695
    case ARM_CPU_MODE_SVC:
696
        return 1;
697
    case ARM_CPU_MODE_ABT:
698
        return 2;
699
    case ARM_CPU_MODE_UND:
700
        return 3;
701
    case ARM_CPU_MODE_IRQ:
702
        return 4;
703
    case ARM_CPU_MODE_FIQ:
704
        return 5;
705
    }
706
    cpu_abort(env, "Bad mode %x\n", mode);
707
    return -1;
708
}
709

    
710
void switch_mode(CPUARMState *env, int mode)
711
{
712
    int old_mode;
713
    int i;
714

    
715
    old_mode = env->uncached_cpsr & CPSR_M;
716
    if (mode == old_mode)
717
        return;
718

    
719
    if (old_mode == ARM_CPU_MODE_FIQ) {
720
        memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
721
        memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
722
    } else if (mode == ARM_CPU_MODE_FIQ) {
723
        memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
724
        memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
725
    }
726

    
727
    i = bank_number(env, old_mode);
728
    env->banked_r13[i] = env->regs[13];
729
    env->banked_r14[i] = env->regs[14];
730
    env->banked_spsr[i] = env->spsr;
731

    
732
    i = bank_number(env, mode);
733
    env->regs[13] = env->banked_r13[i];
734
    env->regs[14] = env->banked_r14[i];
735
    env->spsr = env->banked_spsr[i];
736
}
737

    
738
static void v7m_push(CPUARMState *env, uint32_t val)
739
{
740
    env->regs[13] -= 4;
741
    stl_phys(env->regs[13], val);
742
}
743

    
744
static uint32_t v7m_pop(CPUARMState *env)
745
{
746
    uint32_t val;
747
    val = ldl_phys(env->regs[13]);
748
    env->regs[13] += 4;
749
    return val;
750
}
751

    
752
/* Switch to V7M main or process stack pointer.  */
753
static void switch_v7m_sp(CPUARMState *env, int process)
754
{
755
    uint32_t tmp;
756
    if (env->v7m.current_sp != process) {
757
        tmp = env->v7m.other_sp;
758
        env->v7m.other_sp = env->regs[13];
759
        env->regs[13] = tmp;
760
        env->v7m.current_sp = process;
761
    }
762
}
763

    
764
static void do_v7m_exception_exit(CPUARMState *env)
765
{
766
    uint32_t type;
767
    uint32_t xpsr;
768

    
769
    type = env->regs[15];
770
    if (env->v7m.exception != 0)
771
        armv7m_nvic_complete_irq(env->nvic, env->v7m.exception);
772

    
773
    /* Switch to the target stack.  */
774
    switch_v7m_sp(env, (type & 4) != 0);
775
    /* Pop registers.  */
776
    env->regs[0] = v7m_pop(env);
777
    env->regs[1] = v7m_pop(env);
778
    env->regs[2] = v7m_pop(env);
779
    env->regs[3] = v7m_pop(env);
780
    env->regs[12] = v7m_pop(env);
781
    env->regs[14] = v7m_pop(env);
782
    env->regs[15] = v7m_pop(env);
783
    xpsr = v7m_pop(env);
784
    xpsr_write(env, xpsr, 0xfffffdff);
785
    /* Undo stack alignment.  */
786
    if (xpsr & 0x200)
787
        env->regs[13] |= 4;
788
    /* ??? The exception return type specifies Thread/Handler mode.  However
789
       this is also implied by the xPSR value. Not sure what to do
790
       if there is a mismatch.  */
791
    /* ??? Likewise for mismatches between the CONTROL register and the stack
792
       pointer.  */
793
}
794

    
795
static void do_interrupt_v7m(CPUARMState *env)
796
{
797
    uint32_t xpsr = xpsr_read(env);
798
    uint32_t lr;
799
    uint32_t addr;
800

    
801
    lr = 0xfffffff1;
802
    if (env->v7m.current_sp)
803
        lr |= 4;
804
    if (env->v7m.exception == 0)
805
        lr |= 8;
806

    
807
    /* For exceptions we just mark as pending on the NVIC, and let that
808
       handle it.  */
809
    /* TODO: Need to escalate if the current priority is higher than the
810
       one we're raising.  */
811
    switch (env->exception_index) {
812
    case EXCP_UDEF:
813
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
814
        return;
815
    case EXCP_SWI:
816
        env->regs[15] += 2;
817
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
818
        return;
819
    case EXCP_PREFETCH_ABORT:
820
    case EXCP_DATA_ABORT:
821
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
822
        return;
823
    case EXCP_BKPT:
824
        if (semihosting_enabled) {
825
            int nr;
826
            nr = arm_lduw_code(env->regs[15], env->bswap_code) & 0xff;
827
            if (nr == 0xab) {
828
                env->regs[15] += 2;
829
                env->regs[0] = do_arm_semihosting(env);
830
                return;
831
            }
832
        }
833
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
834
        return;
835
    case EXCP_IRQ:
836
        env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic);
837
        break;
838
    case EXCP_EXCEPTION_EXIT:
839
        do_v7m_exception_exit(env);
840
        return;
841
    default:
842
        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
843
        return; /* Never happens.  Keep compiler happy.  */
844
    }
845

    
846
    /* Align stack pointer.  */
847
    /* ??? Should only do this if Configuration Control Register
848
       STACKALIGN bit is set.  */
849
    if (env->regs[13] & 4) {
850
        env->regs[13] -= 4;
851
        xpsr |= 0x200;
852
    }
853
    /* Switch to the handler mode.  */
854
    v7m_push(env, xpsr);
855
    v7m_push(env, env->regs[15]);
856
    v7m_push(env, env->regs[14]);
857
    v7m_push(env, env->regs[12]);
858
    v7m_push(env, env->regs[3]);
859
    v7m_push(env, env->regs[2]);
860
    v7m_push(env, env->regs[1]);
861
    v7m_push(env, env->regs[0]);
862
    switch_v7m_sp(env, 0);
863
    /* Clear IT bits */
864
    env->condexec_bits = 0;
865
    env->regs[14] = lr;
866
    addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4);
867
    env->regs[15] = addr & 0xfffffffe;
868
    env->thumb = addr & 1;
869
}
870

    
871
/* Handle a CPU exception.  */
872
void do_interrupt(CPUARMState *env)
873
{
874
    uint32_t addr;
875
    uint32_t mask;
876
    int new_mode;
877
    uint32_t offset;
878

    
879
    if (IS_M(env)) {
880
        do_interrupt_v7m(env);
881
        return;
882
    }
883
    /* TODO: Vectored interrupt controller.  */
884
    switch (env->exception_index) {
885
    case EXCP_UDEF:
886
        new_mode = ARM_CPU_MODE_UND;
887
        addr = 0x04;
888
        mask = CPSR_I;
889
        if (env->thumb)
890
            offset = 2;
891
        else
892
            offset = 4;
893
        break;
894
    case EXCP_SWI:
895
        if (semihosting_enabled) {
896
            /* Check for semihosting interrupt.  */
897
            if (env->thumb) {
898
                mask = arm_lduw_code(env->regs[15] - 2, env->bswap_code) & 0xff;
899
            } else {
900
                mask = arm_ldl_code(env->regs[15] - 4, env->bswap_code)
901
                    & 0xffffff;
902
            }
903
            /* Only intercept calls from privileged modes, to provide some
904
               semblance of security.  */
905
            if (((mask == 0x123456 && !env->thumb)
906
                    || (mask == 0xab && env->thumb))
907
                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
908
                env->regs[0] = do_arm_semihosting(env);
909
                return;
910
            }
911
        }
912
        new_mode = ARM_CPU_MODE_SVC;
913
        addr = 0x08;
914
        mask = CPSR_I;
915
        /* The PC already points to the next instruction.  */
916
        offset = 0;
917
        break;
918
    case EXCP_BKPT:
919
        /* See if this is a semihosting syscall.  */
920
        if (env->thumb && semihosting_enabled) {
921
            mask = arm_lduw_code(env->regs[15], env->bswap_code) & 0xff;
922
            if (mask == 0xab
923
                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
924
                env->regs[15] += 2;
925
                env->regs[0] = do_arm_semihosting(env);
926
                return;
927
            }
928
        }
929
        env->cp15.c5_insn = 2;
930
        /* Fall through to prefetch abort.  */
931
    case EXCP_PREFETCH_ABORT:
932
        new_mode = ARM_CPU_MODE_ABT;
933
        addr = 0x0c;
934
        mask = CPSR_A | CPSR_I;
935
        offset = 4;
936
        break;
937
    case EXCP_DATA_ABORT:
938
        new_mode = ARM_CPU_MODE_ABT;
939
        addr = 0x10;
940
        mask = CPSR_A | CPSR_I;
941
        offset = 8;
942
        break;
943
    case EXCP_IRQ:
944
        new_mode = ARM_CPU_MODE_IRQ;
945
        addr = 0x18;
946
        /* Disable IRQ and imprecise data aborts.  */
947
        mask = CPSR_A | CPSR_I;
948
        offset = 4;
949
        break;
950
    case EXCP_FIQ:
951
        new_mode = ARM_CPU_MODE_FIQ;
952
        addr = 0x1c;
953
        /* Disable FIQ, IRQ and imprecise data aborts.  */
954
        mask = CPSR_A | CPSR_I | CPSR_F;
955
        offset = 4;
956
        break;
957
    default:
958
        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
959
        return; /* Never happens.  Keep compiler happy.  */
960
    }
961
    /* High vectors.  */
962
    if (env->cp15.c1_sys & (1 << 13)) {
963
        addr += 0xffff0000;
964
    }
965
    switch_mode (env, new_mode);
966
    env->spsr = cpsr_read(env);
967
    /* Clear IT bits.  */
968
    env->condexec_bits = 0;
969
    /* Switch to the new mode, and to the correct instruction set.  */
970
    env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
971
    env->uncached_cpsr |= mask;
972
    /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
973
     * and we should just guard the thumb mode on V4 */
974
    if (arm_feature(env, ARM_FEATURE_V4T)) {
975
        env->thumb = (env->cp15.c1_sys & (1 << 30)) != 0;
976
    }
977
    env->regs[14] = env->regs[15] + offset;
978
    env->regs[15] = addr;
979
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
980
}
981

    
982
/* Check section/page access permissions.
983
   Returns the page protection flags, or zero if the access is not
984
   permitted.  */
985
static inline int check_ap(CPUARMState *env, int ap, int domain_prot,
986
                           int access_type, int is_user)
987
{
988
  int prot_ro;
989

    
990
  if (domain_prot == 3) {
991
    return PAGE_READ | PAGE_WRITE;
992
  }
993

    
994
  if (access_type == 1)
995
      prot_ro = 0;
996
  else
997
      prot_ro = PAGE_READ;
998

    
999
  switch (ap) {
1000
  case 0:
1001
      if (access_type == 1)
1002
          return 0;
1003
      switch ((env->cp15.c1_sys >> 8) & 3) {
1004
      case 1:
1005
          return is_user ? 0 : PAGE_READ;
1006
      case 2:
1007
          return PAGE_READ;
1008
      default:
1009
          return 0;
1010
      }
1011
  case 1:
1012
      return is_user ? 0 : PAGE_READ | PAGE_WRITE;
1013
  case 2:
1014
      if (is_user)
1015
          return prot_ro;
1016
      else
1017
          return PAGE_READ | PAGE_WRITE;
1018
  case 3:
1019
      return PAGE_READ | PAGE_WRITE;
1020
  case 4: /* Reserved.  */
1021
      return 0;
1022
  case 5:
1023
      return is_user ? 0 : prot_ro;
1024
  case 6:
1025
      return prot_ro;
1026
  case 7:
1027
      if (!arm_feature (env, ARM_FEATURE_V6K))
1028
          return 0;
1029
      return prot_ro;
1030
  default:
1031
      abort();
1032
  }
1033
}
1034

    
1035
static uint32_t get_level1_table_address(CPUARMState *env, uint32_t address)
1036
{
1037
    uint32_t table;
1038

    
1039
    if (address & env->cp15.c2_mask)
1040
        table = env->cp15.c2_base1 & 0xffffc000;
1041
    else
1042
        table = env->cp15.c2_base0 & env->cp15.c2_base_mask;
1043

    
1044
    table |= (address >> 18) & 0x3ffc;
1045
    return table;
1046
}
1047

    
1048
static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
1049
                            int is_user, uint32_t *phys_ptr, int *prot,
1050
                            target_ulong *page_size)
1051
{
1052
    int code;
1053
    uint32_t table;
1054
    uint32_t desc;
1055
    int type;
1056
    int ap;
1057
    int domain;
1058
    int domain_prot;
1059
    uint32_t phys_addr;
1060

    
1061
    /* Pagetable walk.  */
1062
    /* Lookup l1 descriptor.  */
1063
    table = get_level1_table_address(env, address);
1064
    desc = ldl_phys(table);
1065
    type = (desc & 3);
1066
    domain = (desc >> 5) & 0x0f;
1067
    domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
1068
    if (type == 0) {
1069
        /* Section translation fault.  */
1070
        code = 5;
1071
        goto do_fault;
1072
    }
1073
    if (domain_prot == 0 || domain_prot == 2) {
1074
        if (type == 2)
1075
            code = 9; /* Section domain fault.  */
1076
        else
1077
            code = 11; /* Page domain fault.  */
1078
        goto do_fault;
1079
    }
1080
    if (type == 2) {
1081
        /* 1Mb section.  */
1082
        phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1083
        ap = (desc >> 10) & 3;
1084
        code = 13;
1085
        *page_size = 1024 * 1024;
1086
    } else {
1087
        /* Lookup l2 entry.  */
1088
        if (type == 1) {
1089
            /* Coarse pagetable.  */
1090
            table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1091
        } else {
1092
            /* Fine pagetable.  */
1093
            table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
1094
        }
1095
        desc = ldl_phys(table);
1096
        switch (desc & 3) {
1097
        case 0: /* Page translation fault.  */
1098
            code = 7;
1099
            goto do_fault;
1100
        case 1: /* 64k page.  */
1101
            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1102
            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
1103
            *page_size = 0x10000;
1104
            break;
1105
        case 2: /* 4k page.  */
1106
            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1107
            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
1108
            *page_size = 0x1000;
1109
            break;
1110
        case 3: /* 1k page.  */
1111
            if (type == 1) {
1112
                if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1113
                    phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1114
                } else {
1115
                    /* Page translation fault.  */
1116
                    code = 7;
1117
                    goto do_fault;
1118
                }
1119
            } else {
1120
                phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
1121
            }
1122
            ap = (desc >> 4) & 3;
1123
            *page_size = 0x400;
1124
            break;
1125
        default:
1126
            /* Never happens, but compiler isn't smart enough to tell.  */
1127
            abort();
1128
        }
1129
        code = 15;
1130
    }
1131
    *prot = check_ap(env, ap, domain_prot, access_type, is_user);
1132
    if (!*prot) {
1133
        /* Access permission fault.  */
1134
        goto do_fault;
1135
    }
1136
    *prot |= PAGE_EXEC;
1137
    *phys_ptr = phys_addr;
1138
    return 0;
1139
do_fault:
1140
    return code | (domain << 4);
1141
}
1142

    
1143
static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
1144
                            int is_user, uint32_t *phys_ptr, int *prot,
1145
                            target_ulong *page_size)
1146
{
1147
    int code;
1148
    uint32_t table;
1149
    uint32_t desc;
1150
    uint32_t xn;
1151
    int type;
1152
    int ap;
1153
    int domain;
1154
    int domain_prot;
1155
    uint32_t phys_addr;
1156

    
1157
    /* Pagetable walk.  */
1158
    /* Lookup l1 descriptor.  */
1159
    table = get_level1_table_address(env, address);
1160
    desc = ldl_phys(table);
1161
    type = (desc & 3);
1162
    if (type == 0) {
1163
        /* Section translation fault.  */
1164
        code = 5;
1165
        domain = 0;
1166
        goto do_fault;
1167
    } else if (type == 2 && (desc & (1 << 18))) {
1168
        /* Supersection.  */
1169
        domain = 0;
1170
    } else {
1171
        /* Section or page.  */
1172
        domain = (desc >> 5) & 0x0f;
1173
    }
1174
    domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
1175
    if (domain_prot == 0 || domain_prot == 2) {
1176
        if (type == 2)
1177
            code = 9; /* Section domain fault.  */
1178
        else
1179
            code = 11; /* Page domain fault.  */
1180
        goto do_fault;
1181
    }
1182
    if (type == 2) {
1183
        if (desc & (1 << 18)) {
1184
            /* Supersection.  */
1185
            phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
1186
            *page_size = 0x1000000;
1187
        } else {
1188
            /* Section.  */
1189
            phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1190
            *page_size = 0x100000;
1191
        }
1192
        ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
1193
        xn = desc & (1 << 4);
1194
        code = 13;
1195
    } else {
1196
        /* Lookup l2 entry.  */
1197
        table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1198
        desc = ldl_phys(table);
1199
        ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
1200
        switch (desc & 3) {
1201
        case 0: /* Page translation fault.  */
1202
            code = 7;
1203
            goto do_fault;
1204
        case 1: /* 64k page.  */
1205
            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1206
            xn = desc & (1 << 15);
1207
            *page_size = 0x10000;
1208
            break;
1209
        case 2: case 3: /* 4k page.  */
1210
            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1211
            xn = desc & 1;
1212
            *page_size = 0x1000;
1213
            break;
1214
        default:
1215
            /* Never happens, but compiler isn't smart enough to tell.  */
1216
            abort();
1217
        }
1218
        code = 15;
1219
    }
1220
    if (domain_prot == 3) {
1221
        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1222
    } else {
1223
        if (xn && access_type == 2)
1224
            goto do_fault;
1225

    
1226
        /* The simplified model uses AP[0] as an access control bit.  */
1227
        if ((env->cp15.c1_sys & (1 << 29)) && (ap & 1) == 0) {
1228
            /* Access flag fault.  */
1229
            code = (code == 15) ? 6 : 3;
1230
            goto do_fault;
1231
        }
1232
        *prot = check_ap(env, ap, domain_prot, access_type, is_user);
1233
        if (!*prot) {
1234
            /* Access permission fault.  */
1235
            goto do_fault;
1236
        }
1237
        if (!xn) {
1238
            *prot |= PAGE_EXEC;
1239
        }
1240
    }
1241
    *phys_ptr = phys_addr;
1242
    return 0;
1243
do_fault:
1244
    return code | (domain << 4);
1245
}
1246

    
1247
static int get_phys_addr_mpu(CPUARMState *env, uint32_t address, int access_type,
1248
                             int is_user, uint32_t *phys_ptr, int *prot)
1249
{
1250
    int n;
1251
    uint32_t mask;
1252
    uint32_t base;
1253

    
1254
    *phys_ptr = address;
1255
    for (n = 7; n >= 0; n--) {
1256
        base = env->cp15.c6_region[n];
1257
        if ((base & 1) == 0)
1258
            continue;
1259
        mask = 1 << ((base >> 1) & 0x1f);
1260
        /* Keep this shift separate from the above to avoid an
1261
           (undefined) << 32.  */
1262
        mask = (mask << 1) - 1;
1263
        if (((base ^ address) & ~mask) == 0)
1264
            break;
1265
    }
1266
    if (n < 0)
1267
        return 2;
1268

    
1269
    if (access_type == 2) {
1270
        mask = env->cp15.c5_insn;
1271
    } else {
1272
        mask = env->cp15.c5_data;
1273
    }
1274
    mask = (mask >> (n * 4)) & 0xf;
1275
    switch (mask) {
1276
    case 0:
1277
        return 1;
1278
    case 1:
1279
        if (is_user)
1280
          return 1;
1281
        *prot = PAGE_READ | PAGE_WRITE;
1282
        break;
1283
    case 2:
1284
        *prot = PAGE_READ;
1285
        if (!is_user)
1286
            *prot |= PAGE_WRITE;
1287
        break;
1288
    case 3:
1289
        *prot = PAGE_READ | PAGE_WRITE;
1290
        break;
1291
    case 5:
1292
        if (is_user)
1293
            return 1;
1294
        *prot = PAGE_READ;
1295
        break;
1296
    case 6:
1297
        *prot = PAGE_READ;
1298
        break;
1299
    default:
1300
        /* Bad permission.  */
1301
        return 1;
1302
    }
1303
    *prot |= PAGE_EXEC;
1304
    return 0;
1305
}
1306

    
1307
static inline int get_phys_addr(CPUARMState *env, uint32_t address,
1308
                                int access_type, int is_user,
1309
                                uint32_t *phys_ptr, int *prot,
1310
                                target_ulong *page_size)
1311
{
1312
    /* Fast Context Switch Extension.  */
1313
    if (address < 0x02000000)
1314
        address += env->cp15.c13_fcse;
1315

    
1316
    if ((env->cp15.c1_sys & 1) == 0) {
1317
        /* MMU/MPU disabled.  */
1318
        *phys_ptr = address;
1319
        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1320
        *page_size = TARGET_PAGE_SIZE;
1321
        return 0;
1322
    } else if (arm_feature(env, ARM_FEATURE_MPU)) {
1323
        *page_size = TARGET_PAGE_SIZE;
1324
        return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
1325
                                 prot);
1326
    } else if (env->cp15.c1_sys & (1 << 23)) {
1327
        return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
1328
                                prot, page_size);
1329
    } else {
1330
        return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
1331
                                prot, page_size);
1332
    }
1333
}
1334

    
1335
int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address,
1336
                              int access_type, int mmu_idx)
1337
{
1338
    uint32_t phys_addr;
1339
    target_ulong page_size;
1340
    int prot;
1341
    int ret, is_user;
1342

    
1343
    is_user = mmu_idx == MMU_USER_IDX;
1344
    ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot,
1345
                        &page_size);
1346
    if (ret == 0) {
1347
        /* Map a single [sub]page.  */
1348
        phys_addr &= ~(uint32_t)0x3ff;
1349
        address &= ~(uint32_t)0x3ff;
1350
        tlb_set_page (env, address, phys_addr, prot, mmu_idx, page_size);
1351
        return 0;
1352
    }
1353

    
1354
    if (access_type == 2) {
1355
        env->cp15.c5_insn = ret;
1356
        env->cp15.c6_insn = address;
1357
        env->exception_index = EXCP_PREFETCH_ABORT;
1358
    } else {
1359
        env->cp15.c5_data = ret;
1360
        if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6))
1361
            env->cp15.c5_data |= (1 << 11);
1362
        env->cp15.c6_data = address;
1363
        env->exception_index = EXCP_DATA_ABORT;
1364
    }
1365
    return 1;
1366
}
1367

    
1368
target_phys_addr_t cpu_get_phys_page_debug(CPUARMState *env, target_ulong addr)
1369
{
1370
    uint32_t phys_addr;
1371
    target_ulong page_size;
1372
    int prot;
1373
    int ret;
1374

    
1375
    ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot, &page_size);
1376

    
1377
    if (ret != 0)
1378
        return -1;
1379

    
1380
    return phys_addr;
1381
}
1382

    
1383
void HELPER(set_cp)(CPUARMState *env, uint32_t insn, uint32_t val)
1384
{
1385
    int cp_num = (insn >> 8) & 0xf;
1386
    int cp_info = (insn >> 5) & 7;
1387
    int src = (insn >> 16) & 0xf;
1388
    int operand = insn & 0xf;
1389

    
1390
    if (env->cp[cp_num].cp_write)
1391
        env->cp[cp_num].cp_write(env->cp[cp_num].opaque,
1392
                                 cp_info, src, operand, val);
1393
}
1394

    
1395
uint32_t HELPER(get_cp)(CPUARMState *env, uint32_t insn)
1396
{
1397
    int cp_num = (insn >> 8) & 0xf;
1398
    int cp_info = (insn >> 5) & 7;
1399
    int dest = (insn >> 16) & 0xf;
1400
    int operand = insn & 0xf;
1401

    
1402
    if (env->cp[cp_num].cp_read)
1403
        return env->cp[cp_num].cp_read(env->cp[cp_num].opaque,
1404
                                       cp_info, dest, operand);
1405
    return 0;
1406
}
1407

    
1408
/* Return basic MPU access permission bits.  */
1409
static uint32_t simple_mpu_ap_bits(uint32_t val)
1410
{
1411
    uint32_t ret;
1412
    uint32_t mask;
1413
    int i;
1414
    ret = 0;
1415
    mask = 3;
1416
    for (i = 0; i < 16; i += 2) {
1417
        ret |= (val >> i) & mask;
1418
        mask <<= 2;
1419
    }
1420
    return ret;
1421
}
1422

    
1423
/* Pad basic MPU access permission bits to extended format.  */
1424
static uint32_t extended_mpu_ap_bits(uint32_t val)
1425
{
1426
    uint32_t ret;
1427
    uint32_t mask;
1428
    int i;
1429
    ret = 0;
1430
    mask = 3;
1431
    for (i = 0; i < 16; i += 2) {
1432
        ret |= (val & mask) << i;
1433
        mask <<= 2;
1434
    }
1435
    return ret;
1436
}
1437

    
1438
void HELPER(set_cp15)(CPUARMState *env, uint32_t insn, uint32_t val)
1439
{
1440
    int op1;
1441
    int op2;
1442
    int crm;
1443

    
1444
    op1 = (insn >> 21) & 7;
1445
    op2 = (insn >> 5) & 7;
1446
    crm = insn & 0xf;
1447
    switch ((insn >> 16) & 0xf) {
1448
    case 0:
1449
        /* ID codes.  */
1450
        if (arm_feature(env, ARM_FEATURE_XSCALE))
1451
            break;
1452
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1453
            break;
1454
        if (arm_feature(env, ARM_FEATURE_V7)
1455
                && op1 == 2 && crm == 0 && op2 == 0) {
1456
            env->cp15.c0_cssel = val & 0xf;
1457
            break;
1458
        }
1459
        goto bad_reg;
1460
    case 1: /* System configuration.  */
1461
        if (arm_feature(env, ARM_FEATURE_V7)
1462
                && op1 == 0 && crm == 1 && op2 == 0) {
1463
            env->cp15.c1_scr = val;
1464
            break;
1465
        }
1466
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1467
            op2 = 0;
1468
        switch (op2) {
1469
        case 0:
1470
            if (!arm_feature(env, ARM_FEATURE_XSCALE) || crm == 0)
1471
                env->cp15.c1_sys = val;
1472
            /* ??? Lots of these bits are not implemented.  */
1473
            /* This may enable/disable the MMU, so do a TLB flush.  */
1474
            tlb_flush(env, 1);
1475
            break;
1476
        case 1: /* Auxiliary control register.  */
1477
            if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1478
                env->cp15.c1_xscaleauxcr = val;
1479
                break;
1480
            }
1481
            /* Not implemented.  */
1482
            break;
1483
        case 2:
1484
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1485
                goto bad_reg;
1486
            if (env->cp15.c1_coproc != val) {
1487
                env->cp15.c1_coproc = val;
1488
                /* ??? Is this safe when called from within a TB?  */
1489
                tb_flush(env);
1490
            }
1491
            break;
1492
        default:
1493
            goto bad_reg;
1494
        }
1495
        break;
1496
    case 2: /* MMU Page table control / MPU cache control.  */
1497
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1498
            switch (op2) {
1499
            case 0:
1500
                env->cp15.c2_data = val;
1501
                break;
1502
            case 1:
1503
                env->cp15.c2_insn = val;
1504
                break;
1505
            default:
1506
                goto bad_reg;
1507
            }
1508
        } else {
1509
            switch (op2) {
1510
            case 0:
1511
                env->cp15.c2_base0 = val;
1512
                break;
1513
            case 1:
1514
                env->cp15.c2_base1 = val;
1515
                break;
1516
            case 2:
1517
                val &= 7;
1518
                env->cp15.c2_control = val;
1519
                env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> val);
1520
                env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> val);
1521
                break;
1522
            default:
1523
                goto bad_reg;
1524
            }
1525
        }
1526
        break;
1527
    case 3: /* MMU Domain access control / MPU write buffer control.  */
1528
        env->cp15.c3 = val;
1529
        tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */
1530
        break;
1531
    case 4: /* Reserved.  */
1532
        goto bad_reg;
1533
    case 5: /* MMU Fault status / MPU access permission.  */
1534
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1535
            op2 = 0;
1536
        switch (op2) {
1537
        case 0:
1538
            if (arm_feature(env, ARM_FEATURE_MPU))
1539
                val = extended_mpu_ap_bits(val);
1540
            env->cp15.c5_data = val;
1541
            break;
1542
        case 1:
1543
            if (arm_feature(env, ARM_FEATURE_MPU))
1544
                val = extended_mpu_ap_bits(val);
1545
            env->cp15.c5_insn = val;
1546
            break;
1547
        case 2:
1548
            if (!arm_feature(env, ARM_FEATURE_MPU))
1549
                goto bad_reg;
1550
            env->cp15.c5_data = val;
1551
            break;
1552
        case 3:
1553
            if (!arm_feature(env, ARM_FEATURE_MPU))
1554
                goto bad_reg;
1555
            env->cp15.c5_insn = val;
1556
            break;
1557
        default:
1558
            goto bad_reg;
1559
        }
1560
        break;
1561
    case 6: /* MMU Fault address / MPU base/size.  */
1562
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1563
            if (crm >= 8)
1564
                goto bad_reg;
1565
            env->cp15.c6_region[crm] = val;
1566
        } else {
1567
            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1568
                op2 = 0;
1569
            switch (op2) {
1570
            case 0:
1571
                env->cp15.c6_data = val;
1572
                break;
1573
            case 1: /* ??? This is WFAR on armv6 */
1574
            case 2:
1575
                env->cp15.c6_insn = val;
1576
                break;
1577
            default:
1578
                goto bad_reg;
1579
            }
1580
        }
1581
        break;
1582
    case 7: /* Cache control.  */
1583
        env->cp15.c15_i_max = 0x000;
1584
        env->cp15.c15_i_min = 0xff0;
1585
        if (op1 != 0) {
1586
            goto bad_reg;
1587
        }
1588
        /* No cache, so nothing to do except VA->PA translations. */
1589
        if (arm_feature(env, ARM_FEATURE_VAPA)) {
1590
            switch (crm) {
1591
            case 4:
1592
                if (arm_feature(env, ARM_FEATURE_V7)) {
1593
                    env->cp15.c7_par = val & 0xfffff6ff;
1594
                } else {
1595
                    env->cp15.c7_par = val & 0xfffff1ff;
1596
                }
1597
                break;
1598
            case 8: {
1599
                uint32_t phys_addr;
1600
                target_ulong page_size;
1601
                int prot;
1602
                int ret, is_user = op2 & 2;
1603
                int access_type = op2 & 1;
1604

    
1605
                if (op2 & 4) {
1606
                    /* Other states are only available with TrustZone */
1607
                    goto bad_reg;
1608
                }
1609
                ret = get_phys_addr(env, val, access_type, is_user,
1610
                                    &phys_addr, &prot, &page_size);
1611
                if (ret == 0) {
1612
                    /* We do not set any attribute bits in the PAR */
1613
                    if (page_size == (1 << 24)
1614
                        && arm_feature(env, ARM_FEATURE_V7)) {
1615
                        env->cp15.c7_par = (phys_addr & 0xff000000) | 1 << 1;
1616
                    } else {
1617
                        env->cp15.c7_par = phys_addr & 0xfffff000;
1618
                    }
1619
                } else {
1620
                    env->cp15.c7_par = ((ret & (10 << 1)) >> 5) |
1621
                                       ((ret & (12 << 1)) >> 6) |
1622
                                       ((ret & 0xf) << 1) | 1;
1623
                }
1624
                break;
1625
            }
1626
            }
1627
        }
1628
        break;
1629
    case 8: /* MMU TLB control.  */
1630
        switch (op2) {
1631
        case 0: /* Invalidate all (TLBIALL) */
1632
            tlb_flush(env, 1);
1633
            break;
1634
        case 1: /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
1635
            tlb_flush_page(env, val & TARGET_PAGE_MASK);
1636
            break;
1637
        case 2: /* Invalidate by ASID (TLBIASID) */
1638
            tlb_flush(env, val == 0);
1639
            break;
1640
        case 3: /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
1641
            tlb_flush_page(env, val & TARGET_PAGE_MASK);
1642
            break;
1643
        default:
1644
            goto bad_reg;
1645
        }
1646
        break;
1647
    case 9:
1648
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1649
            break;
1650
        if (arm_feature(env, ARM_FEATURE_STRONGARM))
1651
            break; /* Ignore ReadBuffer access */
1652
        switch (crm) {
1653
        case 0: /* Cache lockdown.  */
1654
            switch (op1) {
1655
            case 0: /* L1 cache.  */
1656
                switch (op2) {
1657
                case 0:
1658
                    env->cp15.c9_data = val;
1659
                    break;
1660
                case 1:
1661
                    env->cp15.c9_insn = val;
1662
                    break;
1663
                default:
1664
                    goto bad_reg;
1665
                }
1666
                break;
1667
            case 1: /* L2 cache.  */
1668
                /* Ignore writes to L2 lockdown/auxiliary registers.  */
1669
                break;
1670
            default:
1671
                goto bad_reg;
1672
            }
1673
            break;
1674
        case 1: /* TCM memory region registers.  */
1675
            /* Not implemented.  */
1676
            goto bad_reg;
1677
        case 12: /* Performance monitor control */
1678
            /* Performance monitors are implementation defined in v7,
1679
             * but with an ARM recommended set of registers, which we
1680
             * follow (although we don't actually implement any counters)
1681
             */
1682
            if (!arm_feature(env, ARM_FEATURE_V7)) {
1683
                goto bad_reg;
1684
            }
1685
            switch (op2) {
1686
            case 0: /* performance monitor control register */
1687
                /* only the DP, X, D and E bits are writable */
1688
                env->cp15.c9_pmcr &= ~0x39;
1689
                env->cp15.c9_pmcr |= (val & 0x39);
1690
                break;
1691
            case 1: /* Count enable set register */
1692
                val &= (1 << 31);
1693
                env->cp15.c9_pmcnten |= val;
1694
                break;
1695
            case 2: /* Count enable clear */
1696
                val &= (1 << 31);
1697
                env->cp15.c9_pmcnten &= ~val;
1698
                break;
1699
            case 3: /* Overflow flag status */
1700
                env->cp15.c9_pmovsr &= ~val;
1701
                break;
1702
            case 4: /* Software increment */
1703
                /* RAZ/WI since we don't implement the software-count event */
1704
                break;
1705
            case 5: /* Event counter selection register */
1706
                /* Since we don't implement any events, writing to this register
1707
                 * is actually UNPREDICTABLE. So we choose to RAZ/WI.
1708
                 */
1709
                break;
1710
            default:
1711
                goto bad_reg;
1712
            }
1713
            break;
1714
        case 13: /* Performance counters */
1715
            if (!arm_feature(env, ARM_FEATURE_V7)) {
1716
                goto bad_reg;
1717
            }
1718
            switch (op2) {
1719
            case 0: /* Cycle count register: not implemented, so RAZ/WI */
1720
                break;
1721
            case 1: /* Event type select */
1722
                env->cp15.c9_pmxevtyper = val & 0xff;
1723
                break;
1724
            case 2: /* Event count register */
1725
                /* Unimplemented (we have no events), RAZ/WI */
1726
                break;
1727
            default:
1728
                goto bad_reg;
1729
            }
1730
            break;
1731
        case 14: /* Performance monitor control */
1732
            if (!arm_feature(env, ARM_FEATURE_V7)) {
1733
                goto bad_reg;
1734
            }
1735
            switch (op2) {
1736
            case 0: /* user enable */
1737
                env->cp15.c9_pmuserenr = val & 1;
1738
                /* changes access rights for cp registers, so flush tbs */
1739
                tb_flush(env);
1740
                break;
1741
            case 1: /* interrupt enable set */
1742
                /* We have no event counters so only the C bit can be changed */
1743
                val &= (1 << 31);
1744
                env->cp15.c9_pminten |= val;
1745
                break;
1746
            case 2: /* interrupt enable clear */
1747
                val &= (1 << 31);
1748
                env->cp15.c9_pminten &= ~val;
1749
                break;
1750
            }
1751
            break;
1752
        default:
1753
            goto bad_reg;
1754
        }
1755
        break;
1756
    case 10: /* MMU TLB lockdown.  */
1757
        /* ??? TLB lockdown not implemented.  */
1758
        break;
1759
    case 12: /* Reserved.  */
1760
        goto bad_reg;
1761
    case 13: /* Process ID.  */
1762
        switch (op2) {
1763
        case 0:
1764
            /* Unlike real hardware the qemu TLB uses virtual addresses,
1765
               not modified virtual addresses, so this causes a TLB flush.
1766
             */
1767
            if (env->cp15.c13_fcse != val)
1768
              tlb_flush(env, 1);
1769
            env->cp15.c13_fcse = val;
1770
            break;
1771
        case 1:
1772
            /* This changes the ASID, so do a TLB flush.  */
1773
            if (env->cp15.c13_context != val
1774
                && !arm_feature(env, ARM_FEATURE_MPU))
1775
              tlb_flush(env, 0);
1776
            env->cp15.c13_context = val;
1777
            break;
1778
        default:
1779
            goto bad_reg;
1780
        }
1781
        break;
1782
    case 14: /* Generic timer */
1783
        if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
1784
            /* Dummy implementation: RAZ/WI for all */
1785
            break;
1786
        }
1787
        goto bad_reg;
1788
    case 15: /* Implementation specific.  */
1789
        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1790
            if (op2 == 0 && crm == 1) {
1791
                if (env->cp15.c15_cpar != (val & 0x3fff)) {
1792
                    /* Changes cp0 to cp13 behavior, so needs a TB flush.  */
1793
                    tb_flush(env);
1794
                    env->cp15.c15_cpar = val & 0x3fff;
1795
                }
1796
                break;
1797
            }
1798
            goto bad_reg;
1799
        }
1800
        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1801
            switch (crm) {
1802
            case 0:
1803
                break;
1804
            case 1: /* Set TI925T configuration.  */
1805
                env->cp15.c15_ticonfig = val & 0xe7;
1806
                env->cp15.c0_cpuid = (val & (1 << 5)) ? /* OS_TYPE bit */
1807
                        ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1808
                break;
1809
            case 2: /* Set I_max.  */
1810
                env->cp15.c15_i_max = val;
1811
                break;
1812
            case 3: /* Set I_min.  */
1813
                env->cp15.c15_i_min = val;
1814
                break;
1815
            case 4: /* Set thread-ID.  */
1816
                env->cp15.c15_threadid = val & 0xffff;
1817
                break;
1818
            case 8: /* Wait-for-interrupt (deprecated).  */
1819
                cpu_interrupt(env, CPU_INTERRUPT_HALT);
1820
                break;
1821
            default:
1822
                goto bad_reg;
1823
            }
1824
        }
1825
        if (ARM_CPUID(env) == ARM_CPUID_CORTEXA9) {
1826
            switch (crm) {
1827
            case 0:
1828
                if ((op1 == 0) && (op2 == 0)) {
1829
                    env->cp15.c15_power_control = val;
1830
                } else if ((op1 == 0) && (op2 == 1)) {
1831
                    env->cp15.c15_diagnostic = val;
1832
                } else if ((op1 == 0) && (op2 == 2)) {
1833
                    env->cp15.c15_power_diagnostic = val;
1834
                }
1835
            default:
1836
                break;
1837
            }
1838
        }
1839
        break;
1840
    }
1841
    return;
1842
bad_reg:
1843
    /* ??? For debugging only.  Should raise illegal instruction exception.  */
1844
    cpu_abort(env, "Unimplemented cp15 register write (c%d, c%d, {%d, %d})\n",
1845
              (insn >> 16) & 0xf, crm, op1, op2);
1846
}
1847

    
1848
uint32_t HELPER(get_cp15)(CPUARMState *env, uint32_t insn)
1849
{
1850
    int op1;
1851
    int op2;
1852
    int crm;
1853

    
1854
    op1 = (insn >> 21) & 7;
1855
    op2 = (insn >> 5) & 7;
1856
    crm = insn & 0xf;
1857
    switch ((insn >> 16) & 0xf) {
1858
    case 0: /* ID codes.  */
1859
        switch (op1) {
1860
        case 0:
1861
            switch (crm) {
1862
            case 0:
1863
                switch (op2) {
1864
                case 0: /* Device ID.  */
1865
                    return env->cp15.c0_cpuid;
1866
                case 1: /* Cache Type.  */
1867
                    return env->cp15.c0_cachetype;
1868
                case 2: /* TCM status.  */
1869
                    return 0;
1870
                case 3: /* TLB type register.  */
1871
                    return 0; /* No lockable TLB entries.  */
1872
                case 5: /* MPIDR */
1873
                    /* The MPIDR was standardised in v7; prior to
1874
                     * this it was implemented only in the 11MPCore.
1875
                     * For all other pre-v7 cores it does not exist.
1876
                     */
1877
                    if (arm_feature(env, ARM_FEATURE_V7) ||
1878
                        ARM_CPUID(env) == ARM_CPUID_ARM11MPCORE) {
1879
                        int mpidr = env->cpu_index;
1880
                        /* We don't support setting cluster ID ([8..11])
1881
                         * so these bits always RAZ.
1882
                         */
1883
                        if (arm_feature(env, ARM_FEATURE_V7MP)) {
1884
                            mpidr |= (1 << 31);
1885
                            /* Cores which are uniprocessor (non-coherent)
1886
                             * but still implement the MP extensions set
1887
                             * bit 30. (For instance, A9UP.) However we do
1888
                             * not currently model any of those cores.
1889
                             */
1890
                        }
1891
                        return mpidr;
1892
                    }
1893
                    /* otherwise fall through to the unimplemented-reg case */
1894
                default:
1895
                    goto bad_reg;
1896
                }
1897
            case 1:
1898
                if (!arm_feature(env, ARM_FEATURE_V6))
1899
                    goto bad_reg;
1900
                return env->cp15.c0_c1[op2];
1901
            case 2:
1902
                if (!arm_feature(env, ARM_FEATURE_V6))
1903
                    goto bad_reg;
1904
                return env->cp15.c0_c2[op2];
1905
            case 3: case 4: case 5: case 6: case 7:
1906
                return 0;
1907
            default:
1908
                goto bad_reg;
1909
            }
1910
        case 1:
1911
            /* These registers aren't documented on arm11 cores.  However
1912
               Linux looks at them anyway.  */
1913
            if (!arm_feature(env, ARM_FEATURE_V6))
1914
                goto bad_reg;
1915
            if (crm != 0)
1916
                goto bad_reg;
1917
            if (!arm_feature(env, ARM_FEATURE_V7))
1918
                return 0;
1919

    
1920
            switch (op2) {
1921
            case 0:
1922
                return env->cp15.c0_ccsid[env->cp15.c0_cssel];
1923
            case 1:
1924
                return env->cp15.c0_clid;
1925
            case 7:
1926
                return 0;
1927
            }
1928
            goto bad_reg;
1929
        case 2:
1930
            if (op2 != 0 || crm != 0)
1931
                goto bad_reg;
1932
            return env->cp15.c0_cssel;
1933
        default:
1934
            goto bad_reg;
1935
        }
1936
    case 1: /* System configuration.  */
1937
        if (arm_feature(env, ARM_FEATURE_V7)
1938
            && op1 == 0 && crm == 1 && op2 == 0) {
1939
            return env->cp15.c1_scr;
1940
        }
1941
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1942
            op2 = 0;
1943
        switch (op2) {
1944
        case 0: /* Control register.  */
1945
            return env->cp15.c1_sys;
1946
        case 1: /* Auxiliary control register.  */
1947
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1948
                return env->cp15.c1_xscaleauxcr;
1949
            if (!arm_feature(env, ARM_FEATURE_AUXCR))
1950
                goto bad_reg;
1951
            switch (ARM_CPUID(env)) {
1952
            case ARM_CPUID_ARM1026:
1953
                return 1;
1954
            case ARM_CPUID_ARM1136:
1955
            case ARM_CPUID_ARM1136_R2:
1956
            case ARM_CPUID_ARM1176:
1957
                return 7;
1958
            case ARM_CPUID_ARM11MPCORE:
1959
                return 1;
1960
            case ARM_CPUID_CORTEXA8:
1961
                return 2;
1962
            case ARM_CPUID_CORTEXA9:
1963
            case ARM_CPUID_CORTEXA15:
1964
                return 0;
1965
            default:
1966
                goto bad_reg;
1967
            }
1968
        case 2: /* Coprocessor access register.  */
1969
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1970
                goto bad_reg;
1971
            return env->cp15.c1_coproc;
1972
        default:
1973
            goto bad_reg;
1974
        }
1975
    case 2: /* MMU Page table control / MPU cache control.  */
1976
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1977
            switch (op2) {
1978
            case 0:
1979
                return env->cp15.c2_data;
1980
                break;
1981
            case 1:
1982
                return env->cp15.c2_insn;
1983
                break;
1984
            default:
1985
                goto bad_reg;
1986
            }
1987
        } else {
1988
            switch (op2) {
1989
            case 0:
1990
                return env->cp15.c2_base0;
1991
            case 1:
1992
                return env->cp15.c2_base1;
1993
            case 2:
1994
                return env->cp15.c2_control;
1995
            default:
1996
                goto bad_reg;
1997
            }
1998
        }
1999
    case 3: /* MMU Domain access control / MPU write buffer control.  */
2000
        return env->cp15.c3;
2001
    case 4: /* Reserved.  */
2002
        goto bad_reg;
2003
    case 5: /* MMU Fault status / MPU access permission.  */
2004
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
2005
            op2 = 0;
2006
        switch (op2) {
2007
        case 0:
2008
            if (arm_feature(env, ARM_FEATURE_MPU))
2009
                return simple_mpu_ap_bits(env->cp15.c5_data);
2010
            return env->cp15.c5_data;
2011
        case 1:
2012
            if (arm_feature(env, ARM_FEATURE_MPU))
2013
                return simple_mpu_ap_bits(env->cp15.c5_insn);
2014
            return env->cp15.c5_insn;
2015
        case 2:
2016
            if (!arm_feature(env, ARM_FEATURE_MPU))
2017
                goto bad_reg;
2018
            return env->cp15.c5_data;
2019
        case 3:
2020
            if (!arm_feature(env, ARM_FEATURE_MPU))
2021
                goto bad_reg;
2022
            return env->cp15.c5_insn;
2023
        default:
2024
            goto bad_reg;
2025
        }
2026
    case 6: /* MMU Fault address.  */
2027
        if (arm_feature(env, ARM_FEATURE_MPU)) {
2028
            if (crm >= 8)
2029
                goto bad_reg;
2030
            return env->cp15.c6_region[crm];
2031
        } else {
2032
            if (arm_feature(env, ARM_FEATURE_OMAPCP))
2033
                op2 = 0;
2034
            switch (op2) {
2035
            case 0:
2036
                return env->cp15.c6_data;
2037
            case 1:
2038
                if (arm_feature(env, ARM_FEATURE_V6)) {
2039
                    /* Watchpoint Fault Adrress.  */
2040
                    return 0; /* Not implemented.  */
2041
                } else {
2042
                    /* Instruction Fault Adrress.  */
2043
                    /* Arm9 doesn't have an IFAR, but implementing it anyway
2044
                       shouldn't do any harm.  */
2045
                    return env->cp15.c6_insn;
2046
                }
2047
            case 2:
2048
                if (arm_feature(env, ARM_FEATURE_V6)) {
2049
                    /* Instruction Fault Adrress.  */
2050
                    return env->cp15.c6_insn;
2051
                } else {
2052
                    goto bad_reg;
2053
                }
2054
            default:
2055
                goto bad_reg;
2056
            }
2057
        }
2058
    case 7: /* Cache control.  */
2059
        if (crm == 4 && op1 == 0 && op2 == 0) {
2060
            return env->cp15.c7_par;
2061
        }
2062
        /* FIXME: Should only clear Z flag if destination is r15.  */
2063
        env->ZF = 0;
2064
        return 0;
2065
    case 8: /* MMU TLB control.  */
2066
        goto bad_reg;
2067
    case 9:
2068
        switch (crm) {
2069
        case 0: /* Cache lockdown */
2070
            switch (op1) {
2071
            case 0: /* L1 cache.  */
2072
                if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
2073
                    return 0;
2074
                }
2075
                switch (op2) {
2076
                case 0:
2077
                    return env->cp15.c9_data;
2078
                case 1:
2079
                    return env->cp15.c9_insn;
2080
                default:
2081
                    goto bad_reg;
2082
                }
2083
            case 1: /* L2 cache */
2084
                /* L2 Lockdown and Auxiliary control.  */
2085
                switch (op2) {
2086
                case 0:
2087
                    /* L2 cache lockdown (A8 only) */
2088
                    return 0;
2089
                case 2:
2090
                    /* L2 cache auxiliary control (A8) or control (A15) */
2091
                    if (ARM_CPUID(env) == ARM_CPUID_CORTEXA15) {
2092
                        /* Linux wants the number of processors from here.
2093
                         * Might as well set the interrupt-controller bit too.
2094
                         */
2095
                        return ((smp_cpus - 1) << 24) | (1 << 23);
2096
                    }
2097
                    return 0;
2098
                case 3:
2099
                    /* L2 cache extended control (A15) */
2100
                    return 0;
2101
                default:
2102
                    goto bad_reg;
2103
                }
2104
            default:
2105
                goto bad_reg;
2106
            }
2107
            break;
2108
        case 12: /* Performance monitor control */
2109
            if (!arm_feature(env, ARM_FEATURE_V7)) {
2110
                goto bad_reg;
2111
            }
2112
            switch (op2) {
2113
            case 0: /* performance monitor control register */
2114
                return env->cp15.c9_pmcr;
2115
            case 1: /* count enable set */
2116
            case 2: /* count enable clear */
2117
                return env->cp15.c9_pmcnten;
2118
            case 3: /* overflow flag status */
2119
                return env->cp15.c9_pmovsr;
2120
            case 4: /* software increment */
2121
            case 5: /* event counter selection register */
2122
                return 0; /* Unimplemented, RAZ/WI */
2123
            default:
2124
                goto bad_reg;
2125
            }
2126
        case 13: /* Performance counters */
2127
            if (!arm_feature(env, ARM_FEATURE_V7)) {
2128
                goto bad_reg;
2129
            }
2130
            switch (op2) {
2131
            case 1: /* Event type select */
2132
                return env->cp15.c9_pmxevtyper;
2133
            case 0: /* Cycle count register */
2134
            case 2: /* Event count register */
2135
                /* Unimplemented, so RAZ/WI */
2136
                return 0;
2137
            default:
2138
                goto bad_reg;
2139
            }
2140
        case 14: /* Performance monitor control */
2141
            if (!arm_feature(env, ARM_FEATURE_V7)) {
2142
                goto bad_reg;
2143
            }
2144
            switch (op2) {
2145
            case 0: /* user enable */
2146
                return env->cp15.c9_pmuserenr;
2147
            case 1: /* interrupt enable set */
2148
            case 2: /* interrupt enable clear */
2149
                return env->cp15.c9_pminten;
2150
            default:
2151
                goto bad_reg;
2152
            }
2153
        default:
2154
            goto bad_reg;
2155
        }
2156
        break;
2157
    case 10: /* MMU TLB lockdown.  */
2158
        /* ??? TLB lockdown not implemented.  */
2159
        return 0;
2160
    case 11: /* TCM DMA control.  */
2161
    case 12: /* Reserved.  */
2162
        goto bad_reg;
2163
    case 13: /* Process ID.  */
2164
        switch (op2) {
2165
        case 0:
2166
            return env->cp15.c13_fcse;
2167
        case 1:
2168
            return env->cp15.c13_context;
2169
        default:
2170
            goto bad_reg;
2171
        }
2172
    case 14: /* Generic timer */
2173
        if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
2174
            /* Dummy implementation: RAZ/WI for all */
2175
            return 0;
2176
        }
2177
        goto bad_reg;
2178
    case 15: /* Implementation specific.  */
2179
        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
2180
            if (op2 == 0 && crm == 1)
2181
                return env->cp15.c15_cpar;
2182

    
2183
            goto bad_reg;
2184
        }
2185
        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
2186
            switch (crm) {
2187
            case 0:
2188
                return 0;
2189
            case 1: /* Read TI925T configuration.  */
2190
                return env->cp15.c15_ticonfig;
2191
            case 2: /* Read I_max.  */
2192
                return env->cp15.c15_i_max;
2193
            case 3: /* Read I_min.  */
2194
                return env->cp15.c15_i_min;
2195
            case 4: /* Read thread-ID.  */
2196
                return env->cp15.c15_threadid;
2197
            case 8: /* TI925T_status */
2198
                return 0;
2199
            }
2200
            /* TODO: Peripheral port remap register:
2201
             * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt
2202
             * controller base address at $rn & ~0xfff and map size of
2203
             * 0x200 << ($rn & 0xfff), when MMU is off.  */
2204
            goto bad_reg;
2205
        }
2206
        if (ARM_CPUID(env) == ARM_CPUID_CORTEXA9) {
2207
            switch (crm) {
2208
            case 0:
2209
                if ((op1 == 4) && (op2 == 0)) {
2210
                    /* The config_base_address should hold the value of
2211
                     * the peripheral base. ARM should get this from a CPU
2212
                     * object property, but that support isn't available in
2213
                     * December 2011. Default to 0 for now and board models
2214
                     * that care can set it by a private hook */
2215
                    return env->cp15.c15_config_base_address;
2216
                } else if ((op1 == 0) && (op2 == 0)) {
2217
                    /* power_control should be set to maximum latency. Again,
2218
                       default to 0 and set by private hook */
2219
                    return env->cp15.c15_power_control;
2220
                } else if ((op1 == 0) && (op2 == 1)) {
2221
                    return env->cp15.c15_diagnostic;
2222
                } else if ((op1 == 0) && (op2 == 2)) {
2223
                    return env->cp15.c15_power_diagnostic;
2224
                }
2225
                break;
2226
            case 1: /* NEON Busy */
2227
                return 0;
2228
            case 5: /* tlb lockdown */
2229
            case 6:
2230
            case 7:
2231
                if ((op1 == 5) && (op2 == 2)) {
2232
                    return 0;
2233
                }
2234
                break;
2235
            default:
2236
                break;
2237
            }
2238
            goto bad_reg;
2239
        }
2240
        return 0;
2241
    }
2242
bad_reg:
2243
    /* ??? For debugging only.  Should raise illegal instruction exception.  */
2244
    cpu_abort(env, "Unimplemented cp15 register read (c%d, c%d, {%d, %d})\n",
2245
              (insn >> 16) & 0xf, crm, op1, op2);
2246
    return 0;
2247
}
2248

    
2249
void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
2250
{
2251
    if ((env->uncached_cpsr & CPSR_M) == mode) {
2252
        env->regs[13] = val;
2253
    } else {
2254
        env->banked_r13[bank_number(env, mode)] = val;
2255
    }
2256
}
2257

    
2258
uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
2259
{
2260
    if ((env->uncached_cpsr & CPSR_M) == mode) {
2261
        return env->regs[13];
2262
    } else {
2263
        return env->banked_r13[bank_number(env, mode)];
2264
    }
2265
}
2266

    
2267
uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
2268
{
2269
    switch (reg) {
2270
    case 0: /* APSR */
2271
        return xpsr_read(env) & 0xf8000000;
2272
    case 1: /* IAPSR */
2273
        return xpsr_read(env) & 0xf80001ff;
2274
    case 2: /* EAPSR */
2275
        return xpsr_read(env) & 0xff00fc00;
2276
    case 3: /* xPSR */
2277
        return xpsr_read(env) & 0xff00fdff;
2278
    case 5: /* IPSR */
2279
        return xpsr_read(env) & 0x000001ff;
2280
    case 6: /* EPSR */
2281
        return xpsr_read(env) & 0x0700fc00;
2282
    case 7: /* IEPSR */
2283
        return xpsr_read(env) & 0x0700edff;
2284
    case 8: /* MSP */
2285
        return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
2286
    case 9: /* PSP */
2287
        return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
2288
    case 16: /* PRIMASK */
2289
        return (env->uncached_cpsr & CPSR_I) != 0;
2290
    case 17: /* BASEPRI */
2291
    case 18: /* BASEPRI_MAX */
2292
        return env->v7m.basepri;
2293
    case 19: /* FAULTMASK */
2294
        return (env->uncached_cpsr & CPSR_F) != 0;
2295
    case 20: /* CONTROL */
2296
        return env->v7m.control;
2297
    default:
2298
        /* ??? For debugging only.  */
2299
        cpu_abort(env, "Unimplemented system register read (%d)\n", reg);
2300
        return 0;
2301
    }
2302
}
2303

    
2304
void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
2305
{
2306
    switch (reg) {
2307
    case 0: /* APSR */
2308
        xpsr_write(env, val, 0xf8000000);
2309
        break;
2310
    case 1: /* IAPSR */
2311
        xpsr_write(env, val, 0xf8000000);
2312
        break;
2313
    case 2: /* EAPSR */
2314
        xpsr_write(env, val, 0xfe00fc00);
2315
        break;
2316
    case 3: /* xPSR */
2317
        xpsr_write(env, val, 0xfe00fc00);
2318
        break;
2319
    case 5: /* IPSR */
2320
        /* IPSR bits are readonly.  */
2321
        break;
2322
    case 6: /* EPSR */
2323
        xpsr_write(env, val, 0x0600fc00);
2324
        break;
2325
    case 7: /* IEPSR */
2326
        xpsr_write(env, val, 0x0600fc00);
2327
        break;
2328
    case 8: /* MSP */
2329
        if (env->v7m.current_sp)
2330
            env->v7m.other_sp = val;
2331
        else
2332
            env->regs[13] = val;
2333
        break;
2334
    case 9: /* PSP */
2335
        if (env->v7m.current_sp)
2336
            env->regs[13] = val;
2337
        else
2338
            env->v7m.other_sp = val;
2339
        break;
2340
    case 16: /* PRIMASK */
2341
        if (val & 1)
2342
            env->uncached_cpsr |= CPSR_I;
2343
        else
2344
            env->uncached_cpsr &= ~CPSR_I;
2345
        break;
2346
    case 17: /* BASEPRI */
2347
        env->v7m.basepri = val & 0xff;
2348
        break;
2349
    case 18: /* BASEPRI_MAX */
2350
        val &= 0xff;
2351
        if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
2352
            env->v7m.basepri = val;
2353
        break;
2354
    case 19: /* FAULTMASK */
2355
        if (val & 1)
2356
            env->uncached_cpsr |= CPSR_F;
2357
        else
2358
            env->uncached_cpsr &= ~CPSR_F;
2359
        break;
2360
    case 20: /* CONTROL */
2361
        env->v7m.control = val & 3;
2362
        switch_v7m_sp(env, (val & 2) != 0);
2363
        break;
2364
    default:
2365
        /* ??? For debugging only.  */
2366
        cpu_abort(env, "Unimplemented system register write (%d)\n", reg);
2367
        return;
2368
    }
2369
}
2370

    
2371
void cpu_arm_set_cp_io(CPUARMState *env, int cpnum,
2372
                ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write,
2373
                void *opaque)
2374
{
2375
    if (cpnum < 0 || cpnum > 14) {
2376
        cpu_abort(env, "Bad coprocessor number: %i\n", cpnum);
2377
        return;
2378
    }
2379

    
2380
    env->cp[cpnum].cp_read = cp_read;
2381
    env->cp[cpnum].cp_write = cp_write;
2382
    env->cp[cpnum].opaque = opaque;
2383
}
2384

    
2385
#endif
2386

    
2387
/* Note that signed overflow is undefined in C.  The following routines are
2388
   careful to use unsigned types where modulo arithmetic is required.
2389
   Failure to do so _will_ break on newer gcc.  */
2390

    
2391
/* Signed saturating arithmetic.  */
2392

    
2393
/* Perform 16-bit signed saturating addition.  */
2394
static inline uint16_t add16_sat(uint16_t a, uint16_t b)
2395
{
2396
    uint16_t res;
2397

    
2398
    res = a + b;
2399
    if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
2400
        if (a & 0x8000)
2401
            res = 0x8000;
2402
        else
2403
            res = 0x7fff;
2404
    }
2405
    return res;
2406
}
2407

    
2408
/* Perform 8-bit signed saturating addition.  */
2409
static inline uint8_t add8_sat(uint8_t a, uint8_t b)
2410
{
2411
    uint8_t res;
2412

    
2413
    res = a + b;
2414
    if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
2415
        if (a & 0x80)
2416
            res = 0x80;
2417
        else
2418
            res = 0x7f;
2419
    }
2420
    return res;
2421
}
2422

    
2423
/* Perform 16-bit signed saturating subtraction.  */
2424
static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
2425
{
2426
    uint16_t res;
2427

    
2428
    res = a - b;
2429
    if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
2430
        if (a & 0x8000)
2431
            res = 0x8000;
2432
        else
2433
            res = 0x7fff;
2434
    }
2435
    return res;
2436
}
2437

    
2438
/* Perform 8-bit signed saturating subtraction.  */
2439
static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
2440
{
2441
    uint8_t res;
2442

    
2443
    res = a - b;
2444
    if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
2445
        if (a & 0x80)
2446
            res = 0x80;
2447
        else
2448
            res = 0x7f;
2449
    }
2450
    return res;
2451
}
2452

    
2453
#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
2454
#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
2455
#define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
2456
#define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
2457
#define PFX q
2458

    
2459
#include "op_addsub.h"
2460

    
2461
/* Unsigned saturating arithmetic.  */
2462
static inline uint16_t add16_usat(uint16_t a, uint16_t b)
2463
{
2464
    uint16_t res;
2465
    res = a + b;
2466
    if (res < a)
2467
        res = 0xffff;
2468
    return res;
2469
}
2470

    
2471
static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
2472
{
2473
    if (a > b)
2474
        return a - b;
2475
    else
2476
        return 0;
2477
}
2478

    
2479
static inline uint8_t add8_usat(uint8_t a, uint8_t b)
2480
{
2481
    uint8_t res;
2482
    res = a + b;
2483
    if (res < a)
2484
        res = 0xff;
2485
    return res;
2486
}
2487

    
2488
static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
2489
{
2490
    if (a > b)
2491
        return a - b;
2492
    else
2493
        return 0;
2494
}
2495

    
2496
#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
2497
#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
2498
#define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
2499
#define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
2500
#define PFX uq
2501

    
2502
#include "op_addsub.h"
2503

    
2504
/* Signed modulo arithmetic.  */
2505
#define SARITH16(a, b, n, op) do { \
2506
    int32_t sum; \
2507
    sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
2508
    RESULT(sum, n, 16); \
2509
    if (sum >= 0) \
2510
        ge |= 3 << (n * 2); \
2511
    } while(0)
2512

    
2513
#define SARITH8(a, b, n, op) do { \
2514
    int32_t sum; \
2515
    sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
2516
    RESULT(sum, n, 8); \
2517
    if (sum >= 0) \
2518
        ge |= 1 << n; \
2519
    } while(0)
2520

    
2521

    
2522
#define ADD16(a, b, n) SARITH16(a, b, n, +)
2523
#define SUB16(a, b, n) SARITH16(a, b, n, -)
2524
#define ADD8(a, b, n)  SARITH8(a, b, n, +)
2525
#define SUB8(a, b, n)  SARITH8(a, b, n, -)
2526
#define PFX s
2527
#define ARITH_GE
2528

    
2529
#include "op_addsub.h"
2530

    
2531
/* Unsigned modulo arithmetic.  */
2532
#define ADD16(a, b, n) do { \
2533
    uint32_t sum; \
2534
    sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
2535
    RESULT(sum, n, 16); \
2536
    if ((sum >> 16) == 1) \
2537
        ge |= 3 << (n * 2); \
2538
    } while(0)
2539

    
2540
#define ADD8(a, b, n) do { \
2541
    uint32_t sum; \
2542
    sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
2543
    RESULT(sum, n, 8); \
2544
    if ((sum >> 8) == 1) \
2545
        ge |= 1 << n; \
2546
    } while(0)
2547

    
2548
#define SUB16(a, b, n) do { \
2549
    uint32_t sum; \
2550
    sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
2551
    RESULT(sum, n, 16); \
2552
    if ((sum >> 16) == 0) \
2553
        ge |= 3 << (n * 2); \
2554
    } while(0)
2555

    
2556
#define SUB8(a, b, n) do { \
2557
    uint32_t sum; \
2558
    sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
2559
    RESULT(sum, n, 8); \
2560
    if ((sum >> 8) == 0) \
2561
        ge |= 1 << n; \
2562
    } while(0)
2563

    
2564
#define PFX u
2565
#define ARITH_GE
2566

    
2567
#include "op_addsub.h"
2568

    
2569
/* Halved signed arithmetic.  */
2570
#define ADD16(a, b, n) \
2571
  RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
2572
#define SUB16(a, b, n) \
2573
  RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
2574
#define ADD8(a, b, n) \
2575
  RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
2576
#define SUB8(a, b, n) \
2577
  RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
2578
#define PFX sh
2579

    
2580
#include "op_addsub.h"
2581

    
2582
/* Halved unsigned arithmetic.  */
2583
#define ADD16(a, b, n) \
2584
  RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2585
#define SUB16(a, b, n) \
2586
  RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2587
#define ADD8(a, b, n) \
2588
  RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2589
#define SUB8(a, b, n) \
2590
  RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2591
#define PFX uh
2592

    
2593
#include "op_addsub.h"
2594

    
2595
static inline uint8_t do_usad(uint8_t a, uint8_t b)
2596
{
2597
    if (a > b)
2598
        return a - b;
2599
    else
2600
        return b - a;
2601
}
2602

    
2603
/* Unsigned sum of absolute byte differences.  */
2604
uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
2605
{
2606
    uint32_t sum;
2607
    sum = do_usad(a, b);
2608
    sum += do_usad(a >> 8, b >> 8);
2609
    sum += do_usad(a >> 16, b >>16);
2610
    sum += do_usad(a >> 24, b >> 24);
2611
    return sum;
2612
}
2613

    
2614
/* For ARMv6 SEL instruction.  */
2615
uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
2616
{
2617
    uint32_t mask;
2618

    
2619
    mask = 0;
2620
    if (flags & 1)
2621
        mask |= 0xff;
2622
    if (flags & 2)
2623
        mask |= 0xff00;
2624
    if (flags & 4)
2625
        mask |= 0xff0000;
2626
    if (flags & 8)
2627
        mask |= 0xff000000;
2628
    return (a & mask) | (b & ~mask);
2629
}
2630

    
2631
uint32_t HELPER(logicq_cc)(uint64_t val)
2632
{
2633
    return (val >> 32) | (val != 0);
2634
}
2635

    
2636
/* VFP support.  We follow the convention used for VFP instrunctions:
2637
   Single precition routines have a "s" suffix, double precision a
2638
   "d" suffix.  */
2639

    
2640
/* Convert host exception flags to vfp form.  */
2641
static inline int vfp_exceptbits_from_host(int host_bits)
2642
{
2643
    int target_bits = 0;
2644

    
2645
    if (host_bits & float_flag_invalid)
2646
        target_bits |= 1;
2647
    if (host_bits & float_flag_divbyzero)
2648
        target_bits |= 2;
2649
    if (host_bits & float_flag_overflow)
2650
        target_bits |= 4;
2651
    if (host_bits & (float_flag_underflow | float_flag_output_denormal))
2652
        target_bits |= 8;
2653
    if (host_bits & float_flag_inexact)
2654
        target_bits |= 0x10;
2655
    if (host_bits & float_flag_input_denormal)
2656
        target_bits |= 0x80;
2657
    return target_bits;
2658
}
2659

    
2660
uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
2661
{
2662
    int i;
2663
    uint32_t fpscr;
2664

    
2665
    fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
2666
            | (env->vfp.vec_len << 16)
2667
            | (env->vfp.vec_stride << 20);
2668
    i = get_float_exception_flags(&env->vfp.fp_status);
2669
    i |= get_float_exception_flags(&env->vfp.standard_fp_status);
2670
    fpscr |= vfp_exceptbits_from_host(i);
2671
    return fpscr;
2672
}
2673

    
2674
uint32_t vfp_get_fpscr(CPUARMState *env)
2675
{
2676
    return HELPER(vfp_get_fpscr)(env);
2677
}
2678

    
2679
/* Convert vfp exception flags to target form.  */
2680
static inline int vfp_exceptbits_to_host(int target_bits)
2681
{
2682
    int host_bits = 0;
2683

    
2684
    if (target_bits & 1)
2685
        host_bits |= float_flag_invalid;
2686
    if (target_bits & 2)
2687
        host_bits |= float_flag_divbyzero;
2688
    if (target_bits & 4)
2689
        host_bits |= float_flag_overflow;
2690
    if (target_bits & 8)
2691
        host_bits |= float_flag_underflow;
2692
    if (target_bits & 0x10)
2693
        host_bits |= float_flag_inexact;
2694
    if (target_bits & 0x80)
2695
        host_bits |= float_flag_input_denormal;
2696
    return host_bits;
2697
}
2698

    
2699
void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
2700
{
2701
    int i;
2702
    uint32_t changed;
2703

    
2704
    changed = env->vfp.xregs[ARM_VFP_FPSCR];
2705
    env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
2706
    env->vfp.vec_len = (val >> 16) & 7;
2707
    env->vfp.vec_stride = (val >> 20) & 3;
2708

    
2709
    changed ^= val;
2710
    if (changed & (3 << 22)) {
2711
        i = (val >> 22) & 3;
2712
        switch (i) {
2713
        case 0:
2714
            i = float_round_nearest_even;
2715
            break;
2716
        case 1:
2717
            i = float_round_up;
2718
            break;
2719
        case 2:
2720
            i = float_round_down;
2721
            break;
2722
        case 3:
2723
            i = float_round_to_zero;
2724
            break;
2725
        }
2726
        set_float_rounding_mode(i, &env->vfp.fp_status);
2727
    }
2728
    if (changed & (1 << 24)) {
2729
        set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
2730
        set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
2731
    }
2732
    if (changed & (1 << 25))
2733
        set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
2734

    
2735
    i = vfp_exceptbits_to_host(val);
2736
    set_float_exception_flags(i, &env->vfp.fp_status);
2737
    set_float_exception_flags(0, &env->vfp.standard_fp_status);
2738
}
2739

    
2740
void vfp_set_fpscr(CPUARMState *env, uint32_t val)
2741
{
2742
    HELPER(vfp_set_fpscr)(env, val);
2743
}
2744

    
2745
#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
2746

    
2747
#define VFP_BINOP(name) \
2748
float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
2749
{ \
2750
    float_status *fpst = fpstp; \
2751
    return float32_ ## name(a, b, fpst); \
2752
} \
2753
float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
2754
{ \
2755
    float_status *fpst = fpstp; \
2756
    return float64_ ## name(a, b, fpst); \
2757
}
2758
VFP_BINOP(add)
2759
VFP_BINOP(sub)
2760
VFP_BINOP(mul)
2761
VFP_BINOP(div)
2762
#undef VFP_BINOP
2763

    
2764
float32 VFP_HELPER(neg, s)(float32 a)
2765
{
2766
    return float32_chs(a);
2767
}
2768

    
2769
float64 VFP_HELPER(neg, d)(float64 a)
2770
{
2771
    return float64_chs(a);
2772
}
2773

    
2774
float32 VFP_HELPER(abs, s)(float32 a)
2775
{
2776
    return float32_abs(a);
2777
}
2778

    
2779
float64 VFP_HELPER(abs, d)(float64 a)
2780
{
2781
    return float64_abs(a);
2782
}
2783

    
2784
float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env)
2785
{
2786
    return float32_sqrt(a, &env->vfp.fp_status);
2787
}
2788

    
2789
float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env)
2790
{
2791
    return float64_sqrt(a, &env->vfp.fp_status);
2792
}
2793

    
2794
/* XXX: check quiet/signaling case */
2795
#define DO_VFP_cmp(p, type) \
2796
void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env)  \
2797
{ \
2798
    uint32_t flags; \
2799
    switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
2800
    case 0: flags = 0x6; break; \
2801
    case -1: flags = 0x8; break; \
2802
    case 1: flags = 0x2; break; \
2803
    default: case 2: flags = 0x3; break; \
2804
    } \
2805
    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2806
        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2807
} \
2808
void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
2809
{ \
2810
    uint32_t flags; \
2811
    switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
2812
    case 0: flags = 0x6; break; \
2813
    case -1: flags = 0x8; break; \
2814
    case 1: flags = 0x2; break; \
2815
    default: case 2: flags = 0x3; break; \
2816
    } \
2817
    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2818
        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2819
}
2820
DO_VFP_cmp(s, float32)
2821
DO_VFP_cmp(d, float64)
2822
#undef DO_VFP_cmp
2823

    
2824
/* Integer to float and float to integer conversions */
2825

    
2826
#define CONV_ITOF(name, fsz, sign) \
2827
    float##fsz HELPER(name)(uint32_t x, void *fpstp) \
2828
{ \
2829
    float_status *fpst = fpstp; \
2830
    return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
2831
}
2832

    
2833
#define CONV_FTOI(name, fsz, sign, round) \
2834
uint32_t HELPER(name)(float##fsz x, void *fpstp) \
2835
{ \
2836
    float_status *fpst = fpstp; \
2837
    if (float##fsz##_is_any_nan(x)) { \
2838
        float_raise(float_flag_invalid, fpst); \
2839
        return 0; \
2840
    } \
2841
    return float##fsz##_to_##sign##int32##round(x, fpst); \
2842
}
2843

    
2844
#define FLOAT_CONVS(name, p, fsz, sign) \
2845
CONV_ITOF(vfp_##name##to##p, fsz, sign) \
2846
CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
2847
CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
2848

    
2849
FLOAT_CONVS(si, s, 32, )
2850
FLOAT_CONVS(si, d, 64, )
2851
FLOAT_CONVS(ui, s, 32, u)
2852
FLOAT_CONVS(ui, d, 64, u)
2853

    
2854
#undef CONV_ITOF
2855
#undef CONV_FTOI
2856
#undef FLOAT_CONVS
2857

    
2858
/* floating point conversion */
2859
float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env)
2860
{
2861
    float64 r = float32_to_float64(x, &env->vfp.fp_status);
2862
    /* ARM requires that S<->D conversion of any kind of NaN generates
2863
     * a quiet NaN by forcing the most significant frac bit to 1.
2864
     */
2865
    return float64_maybe_silence_nan(r);
2866
}
2867

    
2868
float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
2869
{
2870
    float32 r =  float64_to_float32(x, &env->vfp.fp_status);
2871
    /* ARM requires that S<->D conversion of any kind of NaN generates
2872
     * a quiet NaN by forcing the most significant frac bit to 1.
2873
     */
2874
    return float32_maybe_silence_nan(r);
2875
}
2876

    
2877
/* VFP3 fixed point conversion.  */
2878
#define VFP_CONV_FIX(name, p, fsz, itype, sign) \
2879
float##fsz HELPER(vfp_##name##to##p)(uint##fsz##_t  x, uint32_t shift, \
2880
                                    void *fpstp) \
2881
{ \
2882
    float_status *fpst = fpstp; \
2883
    float##fsz tmp; \
2884
    tmp = sign##int32_to_##float##fsz((itype##_t)x, fpst); \
2885
    return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
2886
} \
2887
uint##fsz##_t HELPER(vfp_to##name##p)(float##fsz x, uint32_t shift, \
2888
                                       void *fpstp) \
2889
{ \
2890
    float_status *fpst = fpstp; \
2891
    float##fsz tmp; \
2892
    if (float##fsz##_is_any_nan(x)) { \
2893
        float_raise(float_flag_invalid, fpst); \
2894
        return 0; \
2895
    } \
2896
    tmp = float##fsz##_scalbn(x, shift, fpst); \
2897
    return float##fsz##_to_##itype##_round_to_zero(tmp, fpst); \
2898
}
2899

    
2900
VFP_CONV_FIX(sh, d, 64, int16, )
2901
VFP_CONV_FIX(sl, d, 64, int32, )
2902
VFP_CONV_FIX(uh, d, 64, uint16, u)
2903
VFP_CONV_FIX(ul, d, 64, uint32, u)
2904
VFP_CONV_FIX(sh, s, 32, int16, )
2905
VFP_CONV_FIX(sl, s, 32, int32, )
2906
VFP_CONV_FIX(uh, s, 32, uint16, u)
2907
VFP_CONV_FIX(ul, s, 32, uint32, u)
2908
#undef VFP_CONV_FIX
2909

    
2910
/* Half precision conversions.  */
2911
static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s)
2912
{
2913
    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
2914
    float32 r = float16_to_float32(make_float16(a), ieee, s);
2915
    if (ieee) {
2916
        return float32_maybe_silence_nan(r);
2917
    }
2918
    return r;
2919
}
2920

    
2921
static uint32_t do_fcvt_f32_to_f16(float32 a, CPUARMState *env, float_status *s)
2922
{
2923
    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
2924
    float16 r = float32_to_float16(a, ieee, s);
2925
    if (ieee) {
2926
        r = float16_maybe_silence_nan(r);
2927
    }
2928
    return float16_val(r);
2929
}
2930

    
2931
float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
2932
{
2933
    return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status);
2934
}
2935

    
2936
uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
2937
{
2938
    return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status);
2939
}
2940

    
2941
float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
2942
{
2943
    return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status);
2944
}
2945

    
2946
uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
2947
{
2948
    return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
2949
}
2950

    
2951
#define float32_two make_float32(0x40000000)
2952
#define float32_three make_float32(0x40400000)
2953
#define float32_one_point_five make_float32(0x3fc00000)
2954

    
2955
float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env)
2956
{
2957
    float_status *s = &env->vfp.standard_fp_status;
2958
    if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
2959
        (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
2960
        if (!(float32_is_zero(a) || float32_is_zero(b))) {
2961
            float_raise(float_flag_input_denormal, s);
2962
        }
2963
        return float32_two;
2964
    }
2965
    return float32_sub(float32_two, float32_mul(a, b, s), s);
2966
}
2967

    
2968
float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env)
2969
{
2970
    float_status *s = &env->vfp.standard_fp_status;
2971
    float32 product;
2972
    if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
2973
        (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
2974
        if (!(float32_is_zero(a) || float32_is_zero(b))) {
2975
            float_raise(float_flag_input_denormal, s);
2976
        }
2977
        return float32_one_point_five;
2978
    }
2979
    product = float32_mul(a, b, s);
2980
    return float32_div(float32_sub(float32_three, product, s), float32_two, s);
2981
}
2982

    
2983
/* NEON helpers.  */
2984

    
2985
/* Constants 256 and 512 are used in some helpers; we avoid relying on
2986
 * int->float conversions at run-time.  */
2987
#define float64_256 make_float64(0x4070000000000000LL)
2988
#define float64_512 make_float64(0x4080000000000000LL)
2989

    
2990
/* The algorithm that must be used to calculate the estimate
2991
 * is specified by the ARM ARM.
2992
 */
2993
static float64 recip_estimate(float64 a, CPUARMState *env)
2994
{
2995
    /* These calculations mustn't set any fp exception flags,
2996
     * so we use a local copy of the fp_status.
2997
     */
2998
    float_status dummy_status = env->vfp.standard_fp_status;
2999
    float_status *s = &dummy_status;
3000
    /* q = (int)(a * 512.0) */
3001
    float64 q = float64_mul(float64_512, a, s);
3002
    int64_t q_int = float64_to_int64_round_to_zero(q, s);
3003

    
3004
    /* r = 1.0 / (((double)q + 0.5) / 512.0) */
3005
    q = int64_to_float64(q_int, s);
3006
    q = float64_add(q, float64_half, s);
3007
    q = float64_div(q, float64_512, s);
3008
    q = float64_div(float64_one, q, s);
3009

    
3010
    /* s = (int)(256.0 * r + 0.5) */
3011
    q = float64_mul(q, float64_256, s);
3012
    q = float64_add(q, float64_half, s);
3013
    q_int = float64_to_int64_round_to_zero(q, s);
3014

    
3015
    /* return (double)s / 256.0 */
3016
    return float64_div(int64_to_float64(q_int, s), float64_256, s);
3017
}
3018

    
3019
float32 HELPER(recpe_f32)(float32 a, CPUARMState *env)
3020
{
3021
    float_status *s = &env->vfp.standard_fp_status;
3022
    float64 f64;
3023
    uint32_t val32 = float32_val(a);
3024

    
3025
    int result_exp;
3026
    int a_exp = (val32  & 0x7f800000) >> 23;
3027
    int sign = val32 & 0x80000000;
3028

    
3029
    if (float32_is_any_nan(a)) {
3030
        if (float32_is_signaling_nan(a)) {
3031
            float_raise(float_flag_invalid, s);
3032
        }
3033
        return float32_default_nan;
3034
    } else if (float32_is_infinity(a)) {
3035
        return float32_set_sign(float32_zero, float32_is_neg(a));
3036
    } else if (float32_is_zero_or_denormal(a)) {
3037
        if (!float32_is_zero(a)) {
3038
            float_raise(float_flag_input_denormal, s);
3039
        }
3040
        float_raise(float_flag_divbyzero, s);
3041
        return float32_set_sign(float32_infinity, float32_is_neg(a));
3042
    } else if (a_exp >= 253) {
3043
        float_raise(float_flag_underflow, s);
3044
        return float32_set_sign(float32_zero, float32_is_neg(a));
3045
    }
3046

    
3047
    f64 = make_float64((0x3feULL << 52)
3048
                       | ((int64_t)(val32 & 0x7fffff) << 29));
3049

    
3050
    result_exp = 253 - a_exp;
3051

    
3052
    f64 = recip_estimate(f64, env);
3053

    
3054
    val32 = sign
3055
        | ((result_exp & 0xff) << 23)
3056
        | ((float64_val(f64) >> 29) & 0x7fffff);
3057
    return make_float32(val32);
3058
}
3059

    
3060
/* The algorithm that must be used to calculate the estimate
3061
 * is specified by the ARM ARM.
3062
 */
3063
static float64 recip_sqrt_estimate(float64 a, CPUARMState *env)
3064
{
3065
    /* These calculations mustn't set any fp exception flags,
3066
     * so we use a local copy of the fp_status.
3067
     */
3068
    float_status dummy_status = env->vfp.standard_fp_status;
3069
    float_status *s = &dummy_status;
3070
    float64 q;
3071
    int64_t q_int;
3072

    
3073
    if (float64_lt(a, float64_half, s)) {
3074
        /* range 0.25 <= a < 0.5 */
3075

    
3076
        /* a in units of 1/512 rounded down */
3077
        /* q0 = (int)(a * 512.0);  */
3078
        q = float64_mul(float64_512, a, s);
3079
        q_int = float64_to_int64_round_to_zero(q, s);
3080

    
3081
        /* reciprocal root r */
3082
        /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0);  */
3083
        q = int64_to_float64(q_int, s);
3084
        q = float64_add(q, float64_half, s);
3085
        q = float64_div(q, float64_512, s);
3086
        q = float64_sqrt(q, s);
3087
        q = float64_div(float64_one, q, s);
3088
    } else {
3089
        /* range 0.5 <= a < 1.0 */
3090

    
3091
        /* a in units of 1/256 rounded down */
3092
        /* q1 = (int)(a * 256.0); */
3093
        q = float64_mul(float64_256, a, s);
3094
        int64_t q_int = float64_to_int64_round_to_zero(q, s);
3095

    
3096
        /* reciprocal root r */
3097
        /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
3098
        q = int64_to_float64(q_int, s);
3099
        q = float64_add(q, float64_half, s);
3100
        q = float64_div(q, float64_256, s);
3101
        q = float64_sqrt(q, s);
3102
        q = float64_div(float64_one, q, s);
3103
    }
3104
    /* r in units of 1/256 rounded to nearest */
3105
    /* s = (int)(256.0 * r + 0.5); */
3106

    
3107
    q = float64_mul(q, float64_256,s );
3108
    q = float64_add(q, float64_half, s);
3109
    q_int = float64_to_int64_round_to_zero(q, s);
3110

    
3111
    /* return (double)s / 256.0;*/
3112
    return float64_div(int64_to_float64(q_int, s), float64_256, s);
3113
}
3114

    
3115
float32 HELPER(rsqrte_f32)(float32 a, CPUARMState *env)
3116
{
3117
    float_status *s = &env->vfp.standard_fp_status;
3118
    int result_exp;
3119
    float64 f64;
3120
    uint32_t val;
3121
    uint64_t val64;
3122

    
3123
    val = float32_val(a);
3124

    
3125
    if (float32_is_any_nan(a)) {
3126
        if (float32_is_signaling_nan(a)) {
3127
            float_raise(float_flag_invalid, s);
3128
        }
3129
        return float32_default_nan;
3130
    } else if (float32_is_zero_or_denormal(a)) {
3131
        if (!float32_is_zero(a)) {
3132
            float_raise(float_flag_input_denormal, s);
3133
        }
3134
        float_raise(float_flag_divbyzero, s);
3135
        return float32_set_sign(float32_infinity, float32_is_neg(a));
3136
    } else if (float32_is_neg(a)) {
3137
        float_raise(float_flag_invalid, s);
3138
        return float32_default_nan;
3139
    } else if (float32_is_infinity(a)) {
3140
        return float32_zero;
3141
    }
3142

    
3143
    /* Normalize to a double-precision value between 0.25 and 1.0,
3144
     * preserving the parity of the exponent.  */
3145
    if ((val & 0x800000) == 0) {
3146
        f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
3147
                           | (0x3feULL << 52)
3148
                           | ((uint64_t)(val & 0x7fffff) << 29));
3149
    } else {
3150
        f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
3151
                           | (0x3fdULL << 52)
3152
                           | ((uint64_t)(val & 0x7fffff) << 29));
3153
    }
3154

    
3155
    result_exp = (380 - ((val & 0x7f800000) >> 23)) / 2;
3156

    
3157
    f64 = recip_sqrt_estimate(f64, env);
3158

    
3159
    val64 = float64_val(f64);
3160

    
3161
    val = ((result_exp & 0xff) << 23)
3162
        | ((val64 >> 29)  & 0x7fffff);
3163
    return make_float32(val);
3164
}
3165

    
3166
uint32_t HELPER(recpe_u32)(uint32_t a, CPUARMState *env)
3167
{
3168
    float64 f64;
3169

    
3170
    if ((a & 0x80000000) == 0) {
3171
        return 0xffffffff;
3172
    }
3173

    
3174
    f64 = make_float64((0x3feULL << 52)
3175
                       | ((int64_t)(a & 0x7fffffff) << 21));
3176

    
3177
    f64 = recip_estimate (f64, env);
3178

    
3179
    return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
3180
}
3181

    
3182
uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUARMState *env)
3183
{
3184
    float64 f64;
3185

    
3186
    if ((a & 0xc0000000) == 0) {
3187
        return 0xffffffff;
3188
    }
3189

    
3190
    if (a & 0x80000000) {
3191
        f64 = make_float64((0x3feULL << 52)
3192
                           | ((uint64_t)(a & 0x7fffffff) << 21));
3193
    } else { /* bits 31-30 == '01' */
3194
        f64 = make_float64((0x3fdULL << 52)
3195
                           | ((uint64_t)(a & 0x3fffffff) << 22));
3196
    }
3197

    
3198
    f64 = recip_sqrt_estimate(f64, env);
3199

    
3200
    return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
3201
}
3202

    
3203
/* VFPv4 fused multiply-accumulate */
3204
float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp)
3205
{
3206
    float_status *fpst = fpstp;
3207
    return float32_muladd(a, b, c, 0, fpst);
3208
}
3209

    
3210
float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
3211
{
3212
    float_status *fpst = fpstp;
3213
    return float64_muladd(a, b, c, 0, fpst);
3214
}
3215

    
3216
void HELPER(set_teecr)(CPUARMState *env, uint32_t val)
3217
{
3218
    val &= 1;
3219
    if (env->teecr != val) {
3220
        env->teecr = val;
3221
        tb_flush(env);
3222
    }
3223
}