Statistics
| Branch: | Revision:

root / target-arm / helper.c @ 0ab06d83

History | View | Annotate | Download (89.1 kB)

1
#include <stdio.h>
2
#include <stdlib.h>
3
#include <string.h>
4

    
5
#include "cpu.h"
6
#include "gdbstub.h"
7
#include "helper.h"
8
#include "qemu-common.h"
9
#include "host-utils.h"
10
#if !defined(CONFIG_USER_ONLY)
11
#include "hw/loader.h"
12
#endif
13

    
14
static uint32_t cortexa9_cp15_c0_c1[8] =
15
{ 0x1031, 0x11, 0x000, 0, 0x00100103, 0x20000000, 0x01230000, 0x00002111 };
16

    
17
static uint32_t cortexa9_cp15_c0_c2[8] =
18
{ 0x00101111, 0x13112111, 0x21232041, 0x11112131, 0x00111142, 0, 0, 0 };
19

    
20
static uint32_t cortexa8_cp15_c0_c1[8] =
21
{ 0x1031, 0x11, 0x400, 0, 0x31100003, 0x20000000, 0x01202000, 0x11 };
22

    
23
static uint32_t cortexa8_cp15_c0_c2[8] =
24
{ 0x00101111, 0x12112111, 0x21232031, 0x11112131, 0x00111142, 0, 0, 0 };
25

    
26
static uint32_t mpcore_cp15_c0_c1[8] =
27
{ 0x111, 0x1, 0, 0x2, 0x01100103, 0x10020302, 0x01222000, 0 };
28

    
29
static uint32_t mpcore_cp15_c0_c2[8] =
30
{ 0x00100011, 0x12002111, 0x11221011, 0x01102131, 0x141, 0, 0, 0 };
31

    
32
static uint32_t arm1136_cp15_c0_c1[8] =
33
{ 0x111, 0x1, 0x2, 0x3, 0x01130003, 0x10030302, 0x01222110, 0 };
34

    
35
static uint32_t arm1136_cp15_c0_c2[8] =
36
{ 0x00140011, 0x12002111, 0x11231111, 0x01102131, 0x141, 0, 0, 0 };
37

    
38
static uint32_t cpu_arm_find_by_name(const char *name);
39

    
40
static inline void set_feature(CPUARMState *env, int feature)
41
{
42
    env->features |= 1u << feature;
43
}
44

    
45
static void cpu_reset_model_id(CPUARMState *env, uint32_t id)
46
{
47
    env->cp15.c0_cpuid = id;
48
    switch (id) {
49
    case ARM_CPUID_ARM926:
50
        set_feature(env, ARM_FEATURE_V4T);
51
        set_feature(env, ARM_FEATURE_V5);
52
        set_feature(env, ARM_FEATURE_VFP);
53
        env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090;
54
        env->cp15.c0_cachetype = 0x1dd20d2;
55
        env->cp15.c1_sys = 0x00090078;
56
        break;
57
    case ARM_CPUID_ARM946:
58
        set_feature(env, ARM_FEATURE_V4T);
59
        set_feature(env, ARM_FEATURE_V5);
60
        set_feature(env, ARM_FEATURE_MPU);
61
        env->cp15.c0_cachetype = 0x0f004006;
62
        env->cp15.c1_sys = 0x00000078;
63
        break;
64
    case ARM_CPUID_ARM1026:
65
        set_feature(env, ARM_FEATURE_V4T);
66
        set_feature(env, ARM_FEATURE_V5);
67
        set_feature(env, ARM_FEATURE_VFP);
68
        set_feature(env, ARM_FEATURE_AUXCR);
69
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0;
70
        env->cp15.c0_cachetype = 0x1dd20d2;
71
        env->cp15.c1_sys = 0x00090078;
72
        break;
73
    case ARM_CPUID_ARM1136_R2:
74
    case ARM_CPUID_ARM1136:
75
        set_feature(env, ARM_FEATURE_V4T);
76
        set_feature(env, ARM_FEATURE_V5);
77
        set_feature(env, ARM_FEATURE_V6);
78
        set_feature(env, ARM_FEATURE_VFP);
79
        set_feature(env, ARM_FEATURE_AUXCR);
80
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
81
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
82
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
83
        memcpy(env->cp15.c0_c1, arm1136_cp15_c0_c1, 8 * sizeof(uint32_t));
84
        memcpy(env->cp15.c0_c2, arm1136_cp15_c0_c2, 8 * sizeof(uint32_t));
85
        env->cp15.c0_cachetype = 0x1dd20d2;
86
        env->cp15.c1_sys = 0x00050078;
87
        break;
88
    case ARM_CPUID_ARM11MPCORE:
89
        set_feature(env, ARM_FEATURE_V4T);
90
        set_feature(env, ARM_FEATURE_V5);
91
        set_feature(env, ARM_FEATURE_V6);
92
        set_feature(env, ARM_FEATURE_V6K);
93
        set_feature(env, ARM_FEATURE_VFP);
94
        set_feature(env, ARM_FEATURE_AUXCR);
95
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
96
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
97
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
98
        memcpy(env->cp15.c0_c1, mpcore_cp15_c0_c1, 8 * sizeof(uint32_t));
99
        memcpy(env->cp15.c0_c2, mpcore_cp15_c0_c2, 8 * sizeof(uint32_t));
100
        env->cp15.c0_cachetype = 0x1dd20d2;
101
        break;
102
    case ARM_CPUID_CORTEXA8:
103
        set_feature(env, ARM_FEATURE_V4T);
104
        set_feature(env, ARM_FEATURE_V5);
105
        set_feature(env, ARM_FEATURE_V6);
106
        set_feature(env, ARM_FEATURE_V6K);
107
        set_feature(env, ARM_FEATURE_V7);
108
        set_feature(env, ARM_FEATURE_AUXCR);
109
        set_feature(env, ARM_FEATURE_THUMB2);
110
        set_feature(env, ARM_FEATURE_VFP);
111
        set_feature(env, ARM_FEATURE_VFP3);
112
        set_feature(env, ARM_FEATURE_NEON);
113
        set_feature(env, ARM_FEATURE_THUMB2EE);
114
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410330c0;
115
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
116
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00011100;
117
        memcpy(env->cp15.c0_c1, cortexa8_cp15_c0_c1, 8 * sizeof(uint32_t));
118
        memcpy(env->cp15.c0_c2, cortexa8_cp15_c0_c2, 8 * sizeof(uint32_t));
119
        env->cp15.c0_cachetype = 0x82048004;
120
        env->cp15.c0_clid = (1 << 27) | (2 << 24) | 3;
121
        env->cp15.c0_ccsid[0] = 0xe007e01a; /* 16k L1 dcache. */
122
        env->cp15.c0_ccsid[1] = 0x2007e01a; /* 16k L1 icache. */
123
        env->cp15.c0_ccsid[2] = 0xf0000000; /* No L2 icache. */
124
        env->cp15.c1_sys = 0x00c50078;
125
        break;
126
    case ARM_CPUID_CORTEXA9:
127
        set_feature(env, ARM_FEATURE_V4T);
128
        set_feature(env, ARM_FEATURE_V5);
129
        set_feature(env, ARM_FEATURE_V6);
130
        set_feature(env, ARM_FEATURE_V6K);
131
        set_feature(env, ARM_FEATURE_V7);
132
        set_feature(env, ARM_FEATURE_AUXCR);
133
        set_feature(env, ARM_FEATURE_THUMB2);
134
        set_feature(env, ARM_FEATURE_VFP);
135
        set_feature(env, ARM_FEATURE_VFP3);
136
        set_feature(env, ARM_FEATURE_VFP_FP16);
137
        set_feature(env, ARM_FEATURE_NEON);
138
        set_feature(env, ARM_FEATURE_THUMB2EE);
139
        /* Note that A9 supports the MP extensions even for
140
         * A9UP and single-core A9MP (which are both different
141
         * and valid configurations; we don't model A9UP).
142
         */
143
        set_feature(env, ARM_FEATURE_V7MP);
144
        env->vfp.xregs[ARM_VFP_FPSID] = 0x41034000; /* Guess */
145
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
146
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x01111111;
147
        memcpy(env->cp15.c0_c1, cortexa9_cp15_c0_c1, 8 * sizeof(uint32_t));
148
        memcpy(env->cp15.c0_c2, cortexa9_cp15_c0_c2, 8 * sizeof(uint32_t));
149
        env->cp15.c0_cachetype = 0x80038003;
150
        env->cp15.c0_clid = (1 << 27) | (1 << 24) | 3;
151
        env->cp15.c0_ccsid[0] = 0xe00fe015; /* 16k L1 dcache. */
152
        env->cp15.c0_ccsid[1] = 0x200fe015; /* 16k L1 icache. */
153
        env->cp15.c1_sys = 0x00c50078;
154
        break;
155
    case ARM_CPUID_CORTEXM3:
156
        set_feature(env, ARM_FEATURE_V4T);
157
        set_feature(env, ARM_FEATURE_V5);
158
        set_feature(env, ARM_FEATURE_V6);
159
        set_feature(env, ARM_FEATURE_THUMB2);
160
        set_feature(env, ARM_FEATURE_V7);
161
        set_feature(env, ARM_FEATURE_M);
162
        set_feature(env, ARM_FEATURE_DIV);
163
        break;
164
    case ARM_CPUID_ANY: /* For userspace emulation.  */
165
        set_feature(env, ARM_FEATURE_V4T);
166
        set_feature(env, ARM_FEATURE_V5);
167
        set_feature(env, ARM_FEATURE_V6);
168
        set_feature(env, ARM_FEATURE_V6K);
169
        set_feature(env, ARM_FEATURE_V7);
170
        set_feature(env, ARM_FEATURE_THUMB2);
171
        set_feature(env, ARM_FEATURE_VFP);
172
        set_feature(env, ARM_FEATURE_VFP3);
173
        set_feature(env, ARM_FEATURE_VFP_FP16);
174
        set_feature(env, ARM_FEATURE_NEON);
175
        set_feature(env, ARM_FEATURE_THUMB2EE);
176
        set_feature(env, ARM_FEATURE_DIV);
177
        set_feature(env, ARM_FEATURE_V7MP);
178
        break;
179
    case ARM_CPUID_TI915T:
180
    case ARM_CPUID_TI925T:
181
        set_feature(env, ARM_FEATURE_V4T);
182
        set_feature(env, ARM_FEATURE_OMAPCP);
183
        env->cp15.c0_cpuid = ARM_CPUID_TI925T; /* Depends on wiring.  */
184
        env->cp15.c0_cachetype = 0x5109149;
185
        env->cp15.c1_sys = 0x00000070;
186
        env->cp15.c15_i_max = 0x000;
187
        env->cp15.c15_i_min = 0xff0;
188
        break;
189
    case ARM_CPUID_PXA250:
190
    case ARM_CPUID_PXA255:
191
    case ARM_CPUID_PXA260:
192
    case ARM_CPUID_PXA261:
193
    case ARM_CPUID_PXA262:
194
        set_feature(env, ARM_FEATURE_V4T);
195
        set_feature(env, ARM_FEATURE_V5);
196
        set_feature(env, ARM_FEATURE_XSCALE);
197
        /* JTAG_ID is ((id << 28) | 0x09265013) */
198
        env->cp15.c0_cachetype = 0xd172172;
199
        env->cp15.c1_sys = 0x00000078;
200
        break;
201
    case ARM_CPUID_PXA270_A0:
202
    case ARM_CPUID_PXA270_A1:
203
    case ARM_CPUID_PXA270_B0:
204
    case ARM_CPUID_PXA270_B1:
205
    case ARM_CPUID_PXA270_C0:
206
    case ARM_CPUID_PXA270_C5:
207
        set_feature(env, ARM_FEATURE_V4T);
208
        set_feature(env, ARM_FEATURE_V5);
209
        set_feature(env, ARM_FEATURE_XSCALE);
210
        /* JTAG_ID is ((id << 28) | 0x09265013) */
211
        set_feature(env, ARM_FEATURE_IWMMXT);
212
        env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
213
        env->cp15.c0_cachetype = 0xd172172;
214
        env->cp15.c1_sys = 0x00000078;
215
        break;
216
    case ARM_CPUID_SA1100:
217
    case ARM_CPUID_SA1110:
218
        set_feature(env, ARM_FEATURE_STRONGARM);
219
        env->cp15.c1_sys = 0x00000070;
220
        break;
221
    default:
222
        cpu_abort(env, "Bad CPU ID: %x\n", id);
223
        break;
224
    }
225
}
226

    
227
void cpu_reset(CPUARMState *env)
228
{
229
    uint32_t id;
230

    
231
    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
232
        qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
233
        log_cpu_state(env, 0);
234
    }
235

    
236
    id = env->cp15.c0_cpuid;
237
    memset(env, 0, offsetof(CPUARMState, breakpoints));
238
    if (id)
239
        cpu_reset_model_id(env, id);
240
#if defined (CONFIG_USER_ONLY)
241
    env->uncached_cpsr = ARM_CPU_MODE_USR;
242
    /* For user mode we must enable access to coprocessors */
243
    env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
244
    if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
245
        env->cp15.c15_cpar = 3;
246
    } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
247
        env->cp15.c15_cpar = 1;
248
    }
249
#else
250
    /* SVC mode with interrupts disabled.  */
251
    env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
252
    /* On ARMv7-M the CPSR_I is the value of the PRIMASK register, and is
253
       clear at reset.  Initial SP and PC are loaded from ROM.  */
254
    if (IS_M(env)) {
255
        uint32_t pc;
256
        uint8_t *rom;
257
        env->uncached_cpsr &= ~CPSR_I;
258
        rom = rom_ptr(0);
259
        if (rom) {
260
            /* We should really use ldl_phys here, in case the guest
261
               modified flash and reset itself.  However images
262
               loaded via -kenrel have not been copied yet, so load the
263
               values directly from there.  */
264
            env->regs[13] = ldl_p(rom);
265
            pc = ldl_p(rom + 4);
266
            env->thumb = pc & 1;
267
            env->regs[15] = pc & ~1;
268
        }
269
    }
270
    env->vfp.xregs[ARM_VFP_FPEXC] = 0;
271
    env->cp15.c2_base_mask = 0xffffc000u;
272
    /* v7 performance monitor control register: same implementor
273
     * field as main ID register, and we implement no event counters.
274
     */
275
    env->cp15.c9_pmcr = (id & 0xff000000);
276
#endif
277
    set_flush_to_zero(1, &env->vfp.standard_fp_status);
278
    set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status);
279
    set_default_nan_mode(1, &env->vfp.standard_fp_status);
280
    set_float_detect_tininess(float_tininess_before_rounding,
281
                              &env->vfp.fp_status);
282
    set_float_detect_tininess(float_tininess_before_rounding,
283
                              &env->vfp.standard_fp_status);
284
    tlb_flush(env, 1);
285
}
286

    
287
static int vfp_gdb_get_reg(CPUState *env, uint8_t *buf, int reg)
288
{
289
    int nregs;
290

    
291
    /* VFP data registers are always little-endian.  */
292
    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
293
    if (reg < nregs) {
294
        stfq_le_p(buf, env->vfp.regs[reg]);
295
        return 8;
296
    }
297
    if (arm_feature(env, ARM_FEATURE_NEON)) {
298
        /* Aliases for Q regs.  */
299
        nregs += 16;
300
        if (reg < nregs) {
301
            stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
302
            stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
303
            return 16;
304
        }
305
    }
306
    switch (reg - nregs) {
307
    case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
308
    case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
309
    case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
310
    }
311
    return 0;
312
}
313

    
314
static int vfp_gdb_set_reg(CPUState *env, uint8_t *buf, int reg)
315
{
316
    int nregs;
317

    
318
    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
319
    if (reg < nregs) {
320
        env->vfp.regs[reg] = ldfq_le_p(buf);
321
        return 8;
322
    }
323
    if (arm_feature(env, ARM_FEATURE_NEON)) {
324
        nregs += 16;
325
        if (reg < nregs) {
326
            env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
327
            env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
328
            return 16;
329
        }
330
    }
331
    switch (reg - nregs) {
332
    case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
333
    case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
334
    case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
335
    }
336
    return 0;
337
}
338

    
339
CPUARMState *cpu_arm_init(const char *cpu_model)
340
{
341
    CPUARMState *env;
342
    uint32_t id;
343
    static int inited = 0;
344

    
345
    id = cpu_arm_find_by_name(cpu_model);
346
    if (id == 0)
347
        return NULL;
348
    env = qemu_mallocz(sizeof(CPUARMState));
349
    cpu_exec_init(env);
350
    if (!inited) {
351
        inited = 1;
352
        arm_translate_init();
353
    }
354

    
355
    env->cpu_model_str = cpu_model;
356
    env->cp15.c0_cpuid = id;
357
    cpu_reset(env);
358
    if (arm_feature(env, ARM_FEATURE_NEON)) {
359
        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
360
                                 51, "arm-neon.xml", 0);
361
    } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
362
        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
363
                                 35, "arm-vfp3.xml", 0);
364
    } else if (arm_feature(env, ARM_FEATURE_VFP)) {
365
        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
366
                                 19, "arm-vfp.xml", 0);
367
    }
368
    qemu_init_vcpu(env);
369
    return env;
370
}
371

    
372
struct arm_cpu_t {
373
    uint32_t id;
374
    const char *name;
375
};
376

    
377
static const struct arm_cpu_t arm_cpu_names[] = {
378
    { ARM_CPUID_ARM926, "arm926"},
379
    { ARM_CPUID_ARM946, "arm946"},
380
    { ARM_CPUID_ARM1026, "arm1026"},
381
    { ARM_CPUID_ARM1136, "arm1136"},
382
    { ARM_CPUID_ARM1136_R2, "arm1136-r2"},
383
    { ARM_CPUID_ARM11MPCORE, "arm11mpcore"},
384
    { ARM_CPUID_CORTEXM3, "cortex-m3"},
385
    { ARM_CPUID_CORTEXA8, "cortex-a8"},
386
    { ARM_CPUID_CORTEXA9, "cortex-a9"},
387
    { ARM_CPUID_TI925T, "ti925t" },
388
    { ARM_CPUID_PXA250, "pxa250" },
389
    { ARM_CPUID_SA1100,    "sa1100" },
390
    { ARM_CPUID_SA1110,    "sa1110" },
391
    { ARM_CPUID_PXA255, "pxa255" },
392
    { ARM_CPUID_PXA260, "pxa260" },
393
    { ARM_CPUID_PXA261, "pxa261" },
394
    { ARM_CPUID_PXA262, "pxa262" },
395
    { ARM_CPUID_PXA270, "pxa270" },
396
    { ARM_CPUID_PXA270_A0, "pxa270-a0" },
397
    { ARM_CPUID_PXA270_A1, "pxa270-a1" },
398
    { ARM_CPUID_PXA270_B0, "pxa270-b0" },
399
    { ARM_CPUID_PXA270_B1, "pxa270-b1" },
400
    { ARM_CPUID_PXA270_C0, "pxa270-c0" },
401
    { ARM_CPUID_PXA270_C5, "pxa270-c5" },
402
    { ARM_CPUID_ANY, "any"},
403
    { 0, NULL}
404
};
405

    
406
void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
407
{
408
    int i;
409

    
410
    (*cpu_fprintf)(f, "Available CPUs:\n");
411
    for (i = 0; arm_cpu_names[i].name; i++) {
412
        (*cpu_fprintf)(f, "  %s\n", arm_cpu_names[i].name);
413
    }
414
}
415

    
416
/* return 0 if not found */
417
static uint32_t cpu_arm_find_by_name(const char *name)
418
{
419
    int i;
420
    uint32_t id;
421

    
422
    id = 0;
423
    for (i = 0; arm_cpu_names[i].name; i++) {
424
        if (strcmp(name, arm_cpu_names[i].name) == 0) {
425
            id = arm_cpu_names[i].id;
426
            break;
427
        }
428
    }
429
    return id;
430
}
431

    
432
void cpu_arm_close(CPUARMState *env)
433
{
434
    free(env);
435
}
436

    
437
uint32_t cpsr_read(CPUARMState *env)
438
{
439
    int ZF;
440
    ZF = (env->ZF == 0);
441
    return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
442
        (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
443
        | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
444
        | ((env->condexec_bits & 0xfc) << 8)
445
        | (env->GE << 16);
446
}
447

    
448
void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
449
{
450
    if (mask & CPSR_NZCV) {
451
        env->ZF = (~val) & CPSR_Z;
452
        env->NF = val;
453
        env->CF = (val >> 29) & 1;
454
        env->VF = (val << 3) & 0x80000000;
455
    }
456
    if (mask & CPSR_Q)
457
        env->QF = ((val & CPSR_Q) != 0);
458
    if (mask & CPSR_T)
459
        env->thumb = ((val & CPSR_T) != 0);
460
    if (mask & CPSR_IT_0_1) {
461
        env->condexec_bits &= ~3;
462
        env->condexec_bits |= (val >> 25) & 3;
463
    }
464
    if (mask & CPSR_IT_2_7) {
465
        env->condexec_bits &= 3;
466
        env->condexec_bits |= (val >> 8) & 0xfc;
467
    }
468
    if (mask & CPSR_GE) {
469
        env->GE = (val >> 16) & 0xf;
470
    }
471

    
472
    if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
473
        switch_mode(env, val & CPSR_M);
474
    }
475
    mask &= ~CACHED_CPSR_BITS;
476
    env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
477
}
478

    
479
/* Sign/zero extend */
480
uint32_t HELPER(sxtb16)(uint32_t x)
481
{
482
    uint32_t res;
483
    res = (uint16_t)(int8_t)x;
484
    res |= (uint32_t)(int8_t)(x >> 16) << 16;
485
    return res;
486
}
487

    
488
uint32_t HELPER(uxtb16)(uint32_t x)
489
{
490
    uint32_t res;
491
    res = (uint16_t)(uint8_t)x;
492
    res |= (uint32_t)(uint8_t)(x >> 16) << 16;
493
    return res;
494
}
495

    
496
uint32_t HELPER(clz)(uint32_t x)
497
{
498
    return clz32(x);
499
}
500

    
501
int32_t HELPER(sdiv)(int32_t num, int32_t den)
502
{
503
    if (den == 0)
504
      return 0;
505
    if (num == INT_MIN && den == -1)
506
      return INT_MIN;
507
    return num / den;
508
}
509

    
510
uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
511
{
512
    if (den == 0)
513
      return 0;
514
    return num / den;
515
}
516

    
517
uint32_t HELPER(rbit)(uint32_t x)
518
{
519
    x =  ((x & 0xff000000) >> 24)
520
       | ((x & 0x00ff0000) >> 8)
521
       | ((x & 0x0000ff00) << 8)
522
       | ((x & 0x000000ff) << 24);
523
    x =  ((x & 0xf0f0f0f0) >> 4)
524
       | ((x & 0x0f0f0f0f) << 4);
525
    x =  ((x & 0x88888888) >> 3)
526
       | ((x & 0x44444444) >> 1)
527
       | ((x & 0x22222222) << 1)
528
       | ((x & 0x11111111) << 3);
529
    return x;
530
}
531

    
532
uint32_t HELPER(abs)(uint32_t x)
533
{
534
    return ((int32_t)x < 0) ? -x : x;
535
}
536

    
537
#if defined(CONFIG_USER_ONLY)
538

    
539
void do_interrupt (CPUState *env)
540
{
541
    env->exception_index = -1;
542
}
543

    
544
int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
545
                              int mmu_idx, int is_softmmu)
546
{
547
    if (rw == 2) {
548
        env->exception_index = EXCP_PREFETCH_ABORT;
549
        env->cp15.c6_insn = address;
550
    } else {
551
        env->exception_index = EXCP_DATA_ABORT;
552
        env->cp15.c6_data = address;
553
    }
554
    return 1;
555
}
556

    
557
/* These should probably raise undefined insn exceptions.  */
558
void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val)
559
{
560
    int op1 = (insn >> 8) & 0xf;
561
    cpu_abort(env, "cp%i insn %08x\n", op1, insn);
562
    return;
563
}
564

    
565
uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn)
566
{
567
    int op1 = (insn >> 8) & 0xf;
568
    cpu_abort(env, "cp%i insn %08x\n", op1, insn);
569
    return 0;
570
}
571

    
572
void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
573
{
574
    cpu_abort(env, "cp15 insn %08x\n", insn);
575
}
576

    
577
uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
578
{
579
    cpu_abort(env, "cp15 insn %08x\n", insn);
580
}
581

    
582
/* These should probably raise undefined insn exceptions.  */
583
void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val)
584
{
585
    cpu_abort(env, "v7m_mrs %d\n", reg);
586
}
587

    
588
uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg)
589
{
590
    cpu_abort(env, "v7m_mrs %d\n", reg);
591
    return 0;
592
}
593

    
594
void switch_mode(CPUState *env, int mode)
595
{
596
    if (mode != ARM_CPU_MODE_USR)
597
        cpu_abort(env, "Tried to switch out of user mode\n");
598
}
599

    
600
void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
601
{
602
    cpu_abort(env, "banked r13 write\n");
603
}
604

    
605
uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
606
{
607
    cpu_abort(env, "banked r13 read\n");
608
    return 0;
609
}
610

    
611
#else
612

    
613
extern int semihosting_enabled;
614

    
615
/* Map CPU modes onto saved register banks.  */
616
static inline int bank_number (int mode)
617
{
618
    switch (mode) {
619
    case ARM_CPU_MODE_USR:
620
    case ARM_CPU_MODE_SYS:
621
        return 0;
622
    case ARM_CPU_MODE_SVC:
623
        return 1;
624
    case ARM_CPU_MODE_ABT:
625
        return 2;
626
    case ARM_CPU_MODE_UND:
627
        return 3;
628
    case ARM_CPU_MODE_IRQ:
629
        return 4;
630
    case ARM_CPU_MODE_FIQ:
631
        return 5;
632
    }
633
    cpu_abort(cpu_single_env, "Bad mode %x\n", mode);
634
    return -1;
635
}
636

    
637
void switch_mode(CPUState *env, int mode)
638
{
639
    int old_mode;
640
    int i;
641

    
642
    old_mode = env->uncached_cpsr & CPSR_M;
643
    if (mode == old_mode)
644
        return;
645

    
646
    if (old_mode == ARM_CPU_MODE_FIQ) {
647
        memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
648
        memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
649
    } else if (mode == ARM_CPU_MODE_FIQ) {
650
        memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
651
        memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
652
    }
653

    
654
    i = bank_number(old_mode);
655
    env->banked_r13[i] = env->regs[13];
656
    env->banked_r14[i] = env->regs[14];
657
    env->banked_spsr[i] = env->spsr;
658

    
659
    i = bank_number(mode);
660
    env->regs[13] = env->banked_r13[i];
661
    env->regs[14] = env->banked_r14[i];
662
    env->spsr = env->banked_spsr[i];
663
}
664

    
665
static void v7m_push(CPUARMState *env, uint32_t val)
666
{
667
    env->regs[13] -= 4;
668
    stl_phys(env->regs[13], val);
669
}
670

    
671
static uint32_t v7m_pop(CPUARMState *env)
672
{
673
    uint32_t val;
674
    val = ldl_phys(env->regs[13]);
675
    env->regs[13] += 4;
676
    return val;
677
}
678

    
679
/* Switch to V7M main or process stack pointer.  */
680
static void switch_v7m_sp(CPUARMState *env, int process)
681
{
682
    uint32_t tmp;
683
    if (env->v7m.current_sp != process) {
684
        tmp = env->v7m.other_sp;
685
        env->v7m.other_sp = env->regs[13];
686
        env->regs[13] = tmp;
687
        env->v7m.current_sp = process;
688
    }
689
}
690

    
691
static void do_v7m_exception_exit(CPUARMState *env)
692
{
693
    uint32_t type;
694
    uint32_t xpsr;
695

    
696
    type = env->regs[15];
697
    if (env->v7m.exception != 0)
698
        armv7m_nvic_complete_irq(env->nvic, env->v7m.exception);
699

    
700
    /* Switch to the target stack.  */
701
    switch_v7m_sp(env, (type & 4) != 0);
702
    /* Pop registers.  */
703
    env->regs[0] = v7m_pop(env);
704
    env->regs[1] = v7m_pop(env);
705
    env->regs[2] = v7m_pop(env);
706
    env->regs[3] = v7m_pop(env);
707
    env->regs[12] = v7m_pop(env);
708
    env->regs[14] = v7m_pop(env);
709
    env->regs[15] = v7m_pop(env);
710
    xpsr = v7m_pop(env);
711
    xpsr_write(env, xpsr, 0xfffffdff);
712
    /* Undo stack alignment.  */
713
    if (xpsr & 0x200)
714
        env->regs[13] |= 4;
715
    /* ??? The exception return type specifies Thread/Handler mode.  However
716
       this is also implied by the xPSR value. Not sure what to do
717
       if there is a mismatch.  */
718
    /* ??? Likewise for mismatches between the CONTROL register and the stack
719
       pointer.  */
720
}
721

    
722
static void do_interrupt_v7m(CPUARMState *env)
723
{
724
    uint32_t xpsr = xpsr_read(env);
725
    uint32_t lr;
726
    uint32_t addr;
727

    
728
    lr = 0xfffffff1;
729
    if (env->v7m.current_sp)
730
        lr |= 4;
731
    if (env->v7m.exception == 0)
732
        lr |= 8;
733

    
734
    /* For exceptions we just mark as pending on the NVIC, and let that
735
       handle it.  */
736
    /* TODO: Need to escalate if the current priority is higher than the
737
       one we're raising.  */
738
    switch (env->exception_index) {
739
    case EXCP_UDEF:
740
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
741
        return;
742
    case EXCP_SWI:
743
        env->regs[15] += 2;
744
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
745
        return;
746
    case EXCP_PREFETCH_ABORT:
747
    case EXCP_DATA_ABORT:
748
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
749
        return;
750
    case EXCP_BKPT:
751
        if (semihosting_enabled) {
752
            int nr;
753
            nr = lduw_code(env->regs[15]) & 0xff;
754
            if (nr == 0xab) {
755
                env->regs[15] += 2;
756
                env->regs[0] = do_arm_semihosting(env);
757
                return;
758
            }
759
        }
760
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
761
        return;
762
    case EXCP_IRQ:
763
        env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic);
764
        break;
765
    case EXCP_EXCEPTION_EXIT:
766
        do_v7m_exception_exit(env);
767
        return;
768
    default:
769
        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
770
        return; /* Never happens.  Keep compiler happy.  */
771
    }
772

    
773
    /* Align stack pointer.  */
774
    /* ??? Should only do this if Configuration Control Register
775
       STACKALIGN bit is set.  */
776
    if (env->regs[13] & 4) {
777
        env->regs[13] -= 4;
778
        xpsr |= 0x200;
779
    }
780
    /* Switch to the handler mode.  */
781
    v7m_push(env, xpsr);
782
    v7m_push(env, env->regs[15]);
783
    v7m_push(env, env->regs[14]);
784
    v7m_push(env, env->regs[12]);
785
    v7m_push(env, env->regs[3]);
786
    v7m_push(env, env->regs[2]);
787
    v7m_push(env, env->regs[1]);
788
    v7m_push(env, env->regs[0]);
789
    switch_v7m_sp(env, 0);
790
    env->uncached_cpsr &= ~CPSR_IT;
791
    env->regs[14] = lr;
792
    addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4);
793
    env->regs[15] = addr & 0xfffffffe;
794
    env->thumb = addr & 1;
795
}
796

    
797
/* Handle a CPU exception.  */
798
void do_interrupt(CPUARMState *env)
799
{
800
    uint32_t addr;
801
    uint32_t mask;
802
    int new_mode;
803
    uint32_t offset;
804

    
805
    if (IS_M(env)) {
806
        do_interrupt_v7m(env);
807
        return;
808
    }
809
    /* TODO: Vectored interrupt controller.  */
810
    switch (env->exception_index) {
811
    case EXCP_UDEF:
812
        new_mode = ARM_CPU_MODE_UND;
813
        addr = 0x04;
814
        mask = CPSR_I;
815
        if (env->thumb)
816
            offset = 2;
817
        else
818
            offset = 4;
819
        break;
820
    case EXCP_SWI:
821
        if (semihosting_enabled) {
822
            /* Check for semihosting interrupt.  */
823
            if (env->thumb) {
824
                mask = lduw_code(env->regs[15] - 2) & 0xff;
825
            } else {
826
                mask = ldl_code(env->regs[15] - 4) & 0xffffff;
827
            }
828
            /* Only intercept calls from privileged modes, to provide some
829
               semblance of security.  */
830
            if (((mask == 0x123456 && !env->thumb)
831
                    || (mask == 0xab && env->thumb))
832
                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
833
                env->regs[0] = do_arm_semihosting(env);
834
                return;
835
            }
836
        }
837
        new_mode = ARM_CPU_MODE_SVC;
838
        addr = 0x08;
839
        mask = CPSR_I;
840
        /* The PC already points to the next instruction.  */
841
        offset = 0;
842
        break;
843
    case EXCP_BKPT:
844
        /* See if this is a semihosting syscall.  */
845
        if (env->thumb && semihosting_enabled) {
846
            mask = lduw_code(env->regs[15]) & 0xff;
847
            if (mask == 0xab
848
                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
849
                env->regs[15] += 2;
850
                env->regs[0] = do_arm_semihosting(env);
851
                return;
852
            }
853
        }
854
        env->cp15.c5_insn = 2;
855
        /* Fall through to prefetch abort.  */
856
    case EXCP_PREFETCH_ABORT:
857
        new_mode = ARM_CPU_MODE_ABT;
858
        addr = 0x0c;
859
        mask = CPSR_A | CPSR_I;
860
        offset = 4;
861
        break;
862
    case EXCP_DATA_ABORT:
863
        new_mode = ARM_CPU_MODE_ABT;
864
        addr = 0x10;
865
        mask = CPSR_A | CPSR_I;
866
        offset = 8;
867
        break;
868
    case EXCP_IRQ:
869
        new_mode = ARM_CPU_MODE_IRQ;
870
        addr = 0x18;
871
        /* Disable IRQ and imprecise data aborts.  */
872
        mask = CPSR_A | CPSR_I;
873
        offset = 4;
874
        break;
875
    case EXCP_FIQ:
876
        new_mode = ARM_CPU_MODE_FIQ;
877
        addr = 0x1c;
878
        /* Disable FIQ, IRQ and imprecise data aborts.  */
879
        mask = CPSR_A | CPSR_I | CPSR_F;
880
        offset = 4;
881
        break;
882
    default:
883
        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
884
        return; /* Never happens.  Keep compiler happy.  */
885
    }
886
    /* High vectors.  */
887
    if (env->cp15.c1_sys & (1 << 13)) {
888
        addr += 0xffff0000;
889
    }
890
    switch_mode (env, new_mode);
891
    env->spsr = cpsr_read(env);
892
    /* Clear IT bits.  */
893
    env->condexec_bits = 0;
894
    /* Switch to the new mode, and to the correct instruction set.  */
895
    env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
896
    env->uncached_cpsr |= mask;
897
    /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
898
     * and we should just guard the thumb mode on V4 */
899
    if (arm_feature(env, ARM_FEATURE_V4T)) {
900
        env->thumb = (env->cp15.c1_sys & (1 << 30)) != 0;
901
    }
902
    env->regs[14] = env->regs[15] + offset;
903
    env->regs[15] = addr;
904
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
905
}
906

    
907
/* Check section/page access permissions.
908
   Returns the page protection flags, or zero if the access is not
909
   permitted.  */
910
static inline int check_ap(CPUState *env, int ap, int domain, int access_type,
911
                           int is_user)
912
{
913
  int prot_ro;
914

    
915
  if (domain == 3)
916
    return PAGE_READ | PAGE_WRITE;
917

    
918
  if (access_type == 1)
919
      prot_ro = 0;
920
  else
921
      prot_ro = PAGE_READ;
922

    
923
  switch (ap) {
924
  case 0:
925
      if (access_type == 1)
926
          return 0;
927
      switch ((env->cp15.c1_sys >> 8) & 3) {
928
      case 1:
929
          return is_user ? 0 : PAGE_READ;
930
      case 2:
931
          return PAGE_READ;
932
      default:
933
          return 0;
934
      }
935
  case 1:
936
      return is_user ? 0 : PAGE_READ | PAGE_WRITE;
937
  case 2:
938
      if (is_user)
939
          return prot_ro;
940
      else
941
          return PAGE_READ | PAGE_WRITE;
942
  case 3:
943
      return PAGE_READ | PAGE_WRITE;
944
  case 4: /* Reserved.  */
945
      return 0;
946
  case 5:
947
      return is_user ? 0 : prot_ro;
948
  case 6:
949
      return prot_ro;
950
  case 7:
951
      if (!arm_feature (env, ARM_FEATURE_V6K))
952
          return 0;
953
      return prot_ro;
954
  default:
955
      abort();
956
  }
957
}
958

    
959
static uint32_t get_level1_table_address(CPUState *env, uint32_t address)
960
{
961
    uint32_t table;
962

    
963
    if (address & env->cp15.c2_mask)
964
        table = env->cp15.c2_base1 & 0xffffc000;
965
    else
966
        table = env->cp15.c2_base0 & env->cp15.c2_base_mask;
967

    
968
    table |= (address >> 18) & 0x3ffc;
969
    return table;
970
}
971

    
972
static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type,
973
                            int is_user, uint32_t *phys_ptr, int *prot,
974
                            target_ulong *page_size)
975
{
976
    int code;
977
    uint32_t table;
978
    uint32_t desc;
979
    int type;
980
    int ap;
981
    int domain;
982
    uint32_t phys_addr;
983

    
984
    /* Pagetable walk.  */
985
    /* Lookup l1 descriptor.  */
986
    table = get_level1_table_address(env, address);
987
    desc = ldl_phys(table);
988
    type = (desc & 3);
989
    domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3;
990
    if (type == 0) {
991
        /* Section translation fault.  */
992
        code = 5;
993
        goto do_fault;
994
    }
995
    if (domain == 0 || domain == 2) {
996
        if (type == 2)
997
            code = 9; /* Section domain fault.  */
998
        else
999
            code = 11; /* Page domain fault.  */
1000
        goto do_fault;
1001
    }
1002
    if (type == 2) {
1003
        /* 1Mb section.  */
1004
        phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1005
        ap = (desc >> 10) & 3;
1006
        code = 13;
1007
        *page_size = 1024 * 1024;
1008
    } else {
1009
        /* Lookup l2 entry.  */
1010
        if (type == 1) {
1011
            /* Coarse pagetable.  */
1012
            table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1013
        } else {
1014
            /* Fine pagetable.  */
1015
            table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
1016
        }
1017
        desc = ldl_phys(table);
1018
        switch (desc & 3) {
1019
        case 0: /* Page translation fault.  */
1020
            code = 7;
1021
            goto do_fault;
1022
        case 1: /* 64k page.  */
1023
            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1024
            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
1025
            *page_size = 0x10000;
1026
            break;
1027
        case 2: /* 4k page.  */
1028
            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1029
            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
1030
            *page_size = 0x1000;
1031
            break;
1032
        case 3: /* 1k page.  */
1033
            if (type == 1) {
1034
                if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1035
                    phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1036
                } else {
1037
                    /* Page translation fault.  */
1038
                    code = 7;
1039
                    goto do_fault;
1040
                }
1041
            } else {
1042
                phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
1043
            }
1044
            ap = (desc >> 4) & 3;
1045
            *page_size = 0x400;
1046
            break;
1047
        default:
1048
            /* Never happens, but compiler isn't smart enough to tell.  */
1049
            abort();
1050
        }
1051
        code = 15;
1052
    }
1053
    *prot = check_ap(env, ap, domain, access_type, is_user);
1054
    if (!*prot) {
1055
        /* Access permission fault.  */
1056
        goto do_fault;
1057
    }
1058
    *prot |= PAGE_EXEC;
1059
    *phys_ptr = phys_addr;
1060
    return 0;
1061
do_fault:
1062
    return code | (domain << 4);
1063
}
1064

    
1065
static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type,
1066
                            int is_user, uint32_t *phys_ptr, int *prot,
1067
                            target_ulong *page_size)
1068
{
1069
    int code;
1070
    uint32_t table;
1071
    uint32_t desc;
1072
    uint32_t xn;
1073
    int type;
1074
    int ap;
1075
    int domain;
1076
    uint32_t phys_addr;
1077

    
1078
    /* Pagetable walk.  */
1079
    /* Lookup l1 descriptor.  */
1080
    table = get_level1_table_address(env, address);
1081
    desc = ldl_phys(table);
1082
    type = (desc & 3);
1083
    if (type == 0) {
1084
        /* Section translation fault.  */
1085
        code = 5;
1086
        domain = 0;
1087
        goto do_fault;
1088
    } else if (type == 2 && (desc & (1 << 18))) {
1089
        /* Supersection.  */
1090
        domain = 0;
1091
    } else {
1092
        /* Section or page.  */
1093
        domain = (desc >> 4) & 0x1e;
1094
    }
1095
    domain = (env->cp15.c3 >> domain) & 3;
1096
    if (domain == 0 || domain == 2) {
1097
        if (type == 2)
1098
            code = 9; /* Section domain fault.  */
1099
        else
1100
            code = 11; /* Page domain fault.  */
1101
        goto do_fault;
1102
    }
1103
    if (type == 2) {
1104
        if (desc & (1 << 18)) {
1105
            /* Supersection.  */
1106
            phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
1107
            *page_size = 0x1000000;
1108
        } else {
1109
            /* Section.  */
1110
            phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1111
            *page_size = 0x100000;
1112
        }
1113
        ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
1114
        xn = desc & (1 << 4);
1115
        code = 13;
1116
    } else {
1117
        /* Lookup l2 entry.  */
1118
        table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1119
        desc = ldl_phys(table);
1120
        ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
1121
        switch (desc & 3) {
1122
        case 0: /* Page translation fault.  */
1123
            code = 7;
1124
            goto do_fault;
1125
        case 1: /* 64k page.  */
1126
            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1127
            xn = desc & (1 << 15);
1128
            *page_size = 0x10000;
1129
            break;
1130
        case 2: case 3: /* 4k page.  */
1131
            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1132
            xn = desc & 1;
1133
            *page_size = 0x1000;
1134
            break;
1135
        default:
1136
            /* Never happens, but compiler isn't smart enough to tell.  */
1137
            abort();
1138
        }
1139
        code = 15;
1140
    }
1141
    if (domain == 3) {
1142
        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1143
    } else {
1144
        if (xn && access_type == 2)
1145
            goto do_fault;
1146

    
1147
        /* The simplified model uses AP[0] as an access control bit.  */
1148
        if ((env->cp15.c1_sys & (1 << 29)) && (ap & 1) == 0) {
1149
            /* Access flag fault.  */
1150
            code = (code == 15) ? 6 : 3;
1151
            goto do_fault;
1152
        }
1153
        *prot = check_ap(env, ap, domain, access_type, is_user);
1154
        if (!*prot) {
1155
            /* Access permission fault.  */
1156
            goto do_fault;
1157
        }
1158
        if (!xn) {
1159
            *prot |= PAGE_EXEC;
1160
        }
1161
    }
1162
    *phys_ptr = phys_addr;
1163
    return 0;
1164
do_fault:
1165
    return code | (domain << 4);
1166
}
1167

    
1168
static int get_phys_addr_mpu(CPUState *env, uint32_t address, int access_type,
1169
                             int is_user, uint32_t *phys_ptr, int *prot)
1170
{
1171
    int n;
1172
    uint32_t mask;
1173
    uint32_t base;
1174

    
1175
    *phys_ptr = address;
1176
    for (n = 7; n >= 0; n--) {
1177
        base = env->cp15.c6_region[n];
1178
        if ((base & 1) == 0)
1179
            continue;
1180
        mask = 1 << ((base >> 1) & 0x1f);
1181
        /* Keep this shift separate from the above to avoid an
1182
           (undefined) << 32.  */
1183
        mask = (mask << 1) - 1;
1184
        if (((base ^ address) & ~mask) == 0)
1185
            break;
1186
    }
1187
    if (n < 0)
1188
        return 2;
1189

    
1190
    if (access_type == 2) {
1191
        mask = env->cp15.c5_insn;
1192
    } else {
1193
        mask = env->cp15.c5_data;
1194
    }
1195
    mask = (mask >> (n * 4)) & 0xf;
1196
    switch (mask) {
1197
    case 0:
1198
        return 1;
1199
    case 1:
1200
        if (is_user)
1201
          return 1;
1202
        *prot = PAGE_READ | PAGE_WRITE;
1203
        break;
1204
    case 2:
1205
        *prot = PAGE_READ;
1206
        if (!is_user)
1207
            *prot |= PAGE_WRITE;
1208
        break;
1209
    case 3:
1210
        *prot = PAGE_READ | PAGE_WRITE;
1211
        break;
1212
    case 5:
1213
        if (is_user)
1214
            return 1;
1215
        *prot = PAGE_READ;
1216
        break;
1217
    case 6:
1218
        *prot = PAGE_READ;
1219
        break;
1220
    default:
1221
        /* Bad permission.  */
1222
        return 1;
1223
    }
1224
    *prot |= PAGE_EXEC;
1225
    return 0;
1226
}
1227

    
1228
static inline int get_phys_addr(CPUState *env, uint32_t address,
1229
                                int access_type, int is_user,
1230
                                uint32_t *phys_ptr, int *prot,
1231
                                target_ulong *page_size)
1232
{
1233
    /* Fast Context Switch Extension.  */
1234
    if (address < 0x02000000)
1235
        address += env->cp15.c13_fcse;
1236

    
1237
    if ((env->cp15.c1_sys & 1) == 0) {
1238
        /* MMU/MPU disabled.  */
1239
        *phys_ptr = address;
1240
        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1241
        *page_size = TARGET_PAGE_SIZE;
1242
        return 0;
1243
    } else if (arm_feature(env, ARM_FEATURE_MPU)) {
1244
        *page_size = TARGET_PAGE_SIZE;
1245
        return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
1246
                                 prot);
1247
    } else if (env->cp15.c1_sys & (1 << 23)) {
1248
        return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
1249
                                prot, page_size);
1250
    } else {
1251
        return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
1252
                                prot, page_size);
1253
    }
1254
}
1255

    
1256
int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
1257
                              int access_type, int mmu_idx, int is_softmmu)
1258
{
1259
    uint32_t phys_addr;
1260
    target_ulong page_size;
1261
    int prot;
1262
    int ret, is_user;
1263

    
1264
    is_user = mmu_idx == MMU_USER_IDX;
1265
    ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot,
1266
                        &page_size);
1267
    if (ret == 0) {
1268
        /* Map a single [sub]page.  */
1269
        phys_addr &= ~(uint32_t)0x3ff;
1270
        address &= ~(uint32_t)0x3ff;
1271
        tlb_set_page (env, address, phys_addr, prot, mmu_idx, page_size);
1272
        return 0;
1273
    }
1274

    
1275
    if (access_type == 2) {
1276
        env->cp15.c5_insn = ret;
1277
        env->cp15.c6_insn = address;
1278
        env->exception_index = EXCP_PREFETCH_ABORT;
1279
    } else {
1280
        env->cp15.c5_data = ret;
1281
        if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6))
1282
            env->cp15.c5_data |= (1 << 11);
1283
        env->cp15.c6_data = address;
1284
        env->exception_index = EXCP_DATA_ABORT;
1285
    }
1286
    return 1;
1287
}
1288

    
1289
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1290
{
1291
    uint32_t phys_addr;
1292
    target_ulong page_size;
1293
    int prot;
1294
    int ret;
1295

    
1296
    ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot, &page_size);
1297

    
1298
    if (ret != 0)
1299
        return -1;
1300

    
1301
    return phys_addr;
1302
}
1303

    
1304
void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val)
1305
{
1306
    int cp_num = (insn >> 8) & 0xf;
1307
    int cp_info = (insn >> 5) & 7;
1308
    int src = (insn >> 16) & 0xf;
1309
    int operand = insn & 0xf;
1310

    
1311
    if (env->cp[cp_num].cp_write)
1312
        env->cp[cp_num].cp_write(env->cp[cp_num].opaque,
1313
                                 cp_info, src, operand, val);
1314
}
1315

    
1316
uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn)
1317
{
1318
    int cp_num = (insn >> 8) & 0xf;
1319
    int cp_info = (insn >> 5) & 7;
1320
    int dest = (insn >> 16) & 0xf;
1321
    int operand = insn & 0xf;
1322

    
1323
    if (env->cp[cp_num].cp_read)
1324
        return env->cp[cp_num].cp_read(env->cp[cp_num].opaque,
1325
                                       cp_info, dest, operand);
1326
    return 0;
1327
}
1328

    
1329
/* Return basic MPU access permission bits.  */
1330
static uint32_t simple_mpu_ap_bits(uint32_t val)
1331
{
1332
    uint32_t ret;
1333
    uint32_t mask;
1334
    int i;
1335
    ret = 0;
1336
    mask = 3;
1337
    for (i = 0; i < 16; i += 2) {
1338
        ret |= (val >> i) & mask;
1339
        mask <<= 2;
1340
    }
1341
    return ret;
1342
}
1343

    
1344
/* Pad basic MPU access permission bits to extended format.  */
1345
static uint32_t extended_mpu_ap_bits(uint32_t val)
1346
{
1347
    uint32_t ret;
1348
    uint32_t mask;
1349
    int i;
1350
    ret = 0;
1351
    mask = 3;
1352
    for (i = 0; i < 16; i += 2) {
1353
        ret |= (val & mask) << i;
1354
        mask <<= 2;
1355
    }
1356
    return ret;
1357
}
1358

    
1359
void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
1360
{
1361
    int op1;
1362
    int op2;
1363
    int crm;
1364

    
1365
    op1 = (insn >> 21) & 7;
1366
    op2 = (insn >> 5) & 7;
1367
    crm = insn & 0xf;
1368
    switch ((insn >> 16) & 0xf) {
1369
    case 0:
1370
        /* ID codes.  */
1371
        if (arm_feature(env, ARM_FEATURE_XSCALE))
1372
            break;
1373
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1374
            break;
1375
        if (arm_feature(env, ARM_FEATURE_V7)
1376
                && op1 == 2 && crm == 0 && op2 == 0) {
1377
            env->cp15.c0_cssel = val & 0xf;
1378
            break;
1379
        }
1380
        goto bad_reg;
1381
    case 1: /* System configuration.  */
1382
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1383
            op2 = 0;
1384
        switch (op2) {
1385
        case 0:
1386
            if (!arm_feature(env, ARM_FEATURE_XSCALE) || crm == 0)
1387
                env->cp15.c1_sys = val;
1388
            /* ??? Lots of these bits are not implemented.  */
1389
            /* This may enable/disable the MMU, so do a TLB flush.  */
1390
            tlb_flush(env, 1);
1391
            break;
1392
        case 1: /* Auxiliary control register.  */
1393
            if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1394
                env->cp15.c1_xscaleauxcr = val;
1395
                break;
1396
            }
1397
            /* Not implemented.  */
1398
            break;
1399
        case 2:
1400
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1401
                goto bad_reg;
1402
            if (env->cp15.c1_coproc != val) {
1403
                env->cp15.c1_coproc = val;
1404
                /* ??? Is this safe when called from within a TB?  */
1405
                tb_flush(env);
1406
            }
1407
            break;
1408
        default:
1409
            goto bad_reg;
1410
        }
1411
        break;
1412
    case 2: /* MMU Page table control / MPU cache control.  */
1413
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1414
            switch (op2) {
1415
            case 0:
1416
                env->cp15.c2_data = val;
1417
                break;
1418
            case 1:
1419
                env->cp15.c2_insn = val;
1420
                break;
1421
            default:
1422
                goto bad_reg;
1423
            }
1424
        } else {
1425
            switch (op2) {
1426
            case 0:
1427
                env->cp15.c2_base0 = val;
1428
                break;
1429
            case 1:
1430
                env->cp15.c2_base1 = val;
1431
                break;
1432
            case 2:
1433
                val &= 7;
1434
                env->cp15.c2_control = val;
1435
                env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> val);
1436
                env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> val);
1437
                break;
1438
            default:
1439
                goto bad_reg;
1440
            }
1441
        }
1442
        break;
1443
    case 3: /* MMU Domain access control / MPU write buffer control.  */
1444
        env->cp15.c3 = val;
1445
        tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */
1446
        break;
1447
    case 4: /* Reserved.  */
1448
        goto bad_reg;
1449
    case 5: /* MMU Fault status / MPU access permission.  */
1450
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1451
            op2 = 0;
1452
        switch (op2) {
1453
        case 0:
1454
            if (arm_feature(env, ARM_FEATURE_MPU))
1455
                val = extended_mpu_ap_bits(val);
1456
            env->cp15.c5_data = val;
1457
            break;
1458
        case 1:
1459
            if (arm_feature(env, ARM_FEATURE_MPU))
1460
                val = extended_mpu_ap_bits(val);
1461
            env->cp15.c5_insn = val;
1462
            break;
1463
        case 2:
1464
            if (!arm_feature(env, ARM_FEATURE_MPU))
1465
                goto bad_reg;
1466
            env->cp15.c5_data = val;
1467
            break;
1468
        case 3:
1469
            if (!arm_feature(env, ARM_FEATURE_MPU))
1470
                goto bad_reg;
1471
            env->cp15.c5_insn = val;
1472
            break;
1473
        default:
1474
            goto bad_reg;
1475
        }
1476
        break;
1477
    case 6: /* MMU Fault address / MPU base/size.  */
1478
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1479
            if (crm >= 8)
1480
                goto bad_reg;
1481
            env->cp15.c6_region[crm] = val;
1482
        } else {
1483
            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1484
                op2 = 0;
1485
            switch (op2) {
1486
            case 0:
1487
                env->cp15.c6_data = val;
1488
                break;
1489
            case 1: /* ??? This is WFAR on armv6 */
1490
            case 2:
1491
                env->cp15.c6_insn = val;
1492
                break;
1493
            default:
1494
                goto bad_reg;
1495
            }
1496
        }
1497
        break;
1498
    case 7: /* Cache control.  */
1499
        env->cp15.c15_i_max = 0x000;
1500
        env->cp15.c15_i_min = 0xff0;
1501
        if (op1 != 0) {
1502
            goto bad_reg;
1503
        }
1504
        /* No cache, so nothing to do except VA->PA translations. */
1505
        if (arm_feature(env, ARM_FEATURE_V6K)) {
1506
            switch (crm) {
1507
            case 4:
1508
                if (arm_feature(env, ARM_FEATURE_V7)) {
1509
                    env->cp15.c7_par = val & 0xfffff6ff;
1510
                } else {
1511
                    env->cp15.c7_par = val & 0xfffff1ff;
1512
                }
1513
                break;
1514
            case 8: {
1515
                uint32_t phys_addr;
1516
                target_ulong page_size;
1517
                int prot;
1518
                int ret, is_user = op2 & 2;
1519
                int access_type = op2 & 1;
1520

    
1521
                if (op2 & 4) {
1522
                    /* Other states are only available with TrustZone */
1523
                    goto bad_reg;
1524
                }
1525
                ret = get_phys_addr(env, val, access_type, is_user,
1526
                                    &phys_addr, &prot, &page_size);
1527
                if (ret == 0) {
1528
                    /* We do not set any attribute bits in the PAR */
1529
                    if (page_size == (1 << 24)
1530
                        && arm_feature(env, ARM_FEATURE_V7)) {
1531
                        env->cp15.c7_par = (phys_addr & 0xff000000) | 1 << 1;
1532
                    } else {
1533
                        env->cp15.c7_par = phys_addr & 0xfffff000;
1534
                    }
1535
                } else {
1536
                    env->cp15.c7_par = ((ret & (10 << 1)) >> 5) |
1537
                                       ((ret & (12 << 1)) >> 6) |
1538
                                       ((ret & 0xf) << 1) | 1;
1539
                }
1540
                break;
1541
            }
1542
            }
1543
        }
1544
        break;
1545
    case 8: /* MMU TLB control.  */
1546
        switch (op2) {
1547
        case 0: /* Invalidate all.  */
1548
            tlb_flush(env, 0);
1549
            break;
1550
        case 1: /* Invalidate single TLB entry.  */
1551
            tlb_flush_page(env, val & TARGET_PAGE_MASK);
1552
            break;
1553
        case 2: /* Invalidate on ASID.  */
1554
            tlb_flush(env, val == 0);
1555
            break;
1556
        case 3: /* Invalidate single entry on MVA.  */
1557
            /* ??? This is like case 1, but ignores ASID.  */
1558
            tlb_flush(env, 1);
1559
            break;
1560
        default:
1561
            goto bad_reg;
1562
        }
1563
        break;
1564
    case 9:
1565
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1566
            break;
1567
        if (arm_feature(env, ARM_FEATURE_STRONGARM))
1568
            break; /* Ignore ReadBuffer access */
1569
        switch (crm) {
1570
        case 0: /* Cache lockdown.  */
1571
            switch (op1) {
1572
            case 0: /* L1 cache.  */
1573
                switch (op2) {
1574
                case 0:
1575
                    env->cp15.c9_data = val;
1576
                    break;
1577
                case 1:
1578
                    env->cp15.c9_insn = val;
1579
                    break;
1580
                default:
1581
                    goto bad_reg;
1582
                }
1583
                break;
1584
            case 1: /* L2 cache.  */
1585
                /* Ignore writes to L2 lockdown/auxiliary registers.  */
1586
                break;
1587
            default:
1588
                goto bad_reg;
1589
            }
1590
            break;
1591
        case 1: /* TCM memory region registers.  */
1592
            /* Not implemented.  */
1593
            goto bad_reg;
1594
        case 12: /* Performance monitor control */
1595
            /* Performance monitors are implementation defined in v7,
1596
             * but with an ARM recommended set of registers, which we
1597
             * follow (although we don't actually implement any counters)
1598
             */
1599
            if (!arm_feature(env, ARM_FEATURE_V7)) {
1600
                goto bad_reg;
1601
            }
1602
            switch (op2) {
1603
            case 0: /* performance monitor control register */
1604
                /* only the DP, X, D and E bits are writable */
1605
                env->cp15.c9_pmcr &= ~0x39;
1606
                env->cp15.c9_pmcr |= (val & 0x39);
1607
                break;
1608
            case 1: /* Count enable set register */
1609
                val &= (1 << 31);
1610
                env->cp15.c9_pmcnten |= val;
1611
                break;
1612
            case 2: /* Count enable clear */
1613
                val &= (1 << 31);
1614
                env->cp15.c9_pmcnten &= ~val;
1615
                break;
1616
            case 3: /* Overflow flag status */
1617
                env->cp15.c9_pmovsr &= ~val;
1618
                break;
1619
            case 4: /* Software increment */
1620
                /* RAZ/WI since we don't implement the software-count event */
1621
                break;
1622
            case 5: /* Event counter selection register */
1623
                /* Since we don't implement any events, writing to this register
1624
                 * is actually UNPREDICTABLE. So we choose to RAZ/WI.
1625
                 */
1626
                break;
1627
            default:
1628
                goto bad_reg;
1629
            }
1630
            break;
1631
        case 13: /* Performance counters */
1632
            if (!arm_feature(env, ARM_FEATURE_V7)) {
1633
                goto bad_reg;
1634
            }
1635
            switch (op2) {
1636
            case 0: /* Cycle count register: not implemented, so RAZ/WI */
1637
                break;
1638
            case 1: /* Event type select */
1639
                env->cp15.c9_pmxevtyper = val & 0xff;
1640
                break;
1641
            case 2: /* Event count register */
1642
                /* Unimplemented (we have no events), RAZ/WI */
1643
                break;
1644
            default:
1645
                goto bad_reg;
1646
            }
1647
            break;
1648
        case 14: /* Performance monitor control */
1649
            if (!arm_feature(env, ARM_FEATURE_V7)) {
1650
                goto bad_reg;
1651
            }
1652
            switch (op2) {
1653
            case 0: /* user enable */
1654
                env->cp15.c9_pmuserenr = val & 1;
1655
                /* changes access rights for cp registers, so flush tbs */
1656
                tb_flush(env);
1657
                break;
1658
            case 1: /* interrupt enable set */
1659
                /* We have no event counters so only the C bit can be changed */
1660
                val &= (1 << 31);
1661
                env->cp15.c9_pminten |= val;
1662
                break;
1663
            case 2: /* interrupt enable clear */
1664
                val &= (1 << 31);
1665
                env->cp15.c9_pminten &= ~val;
1666
                break;
1667
            }
1668
            break;
1669
        default:
1670
            goto bad_reg;
1671
        }
1672
        break;
1673
    case 10: /* MMU TLB lockdown.  */
1674
        /* ??? TLB lockdown not implemented.  */
1675
        break;
1676
    case 12: /* Reserved.  */
1677
        goto bad_reg;
1678
    case 13: /* Process ID.  */
1679
        switch (op2) {
1680
        case 0:
1681
            /* Unlike real hardware the qemu TLB uses virtual addresses,
1682
               not modified virtual addresses, so this causes a TLB flush.
1683
             */
1684
            if (env->cp15.c13_fcse != val)
1685
              tlb_flush(env, 1);
1686
            env->cp15.c13_fcse = val;
1687
            break;
1688
        case 1:
1689
            /* This changes the ASID, so do a TLB flush.  */
1690
            if (env->cp15.c13_context != val
1691
                && !arm_feature(env, ARM_FEATURE_MPU))
1692
              tlb_flush(env, 0);
1693
            env->cp15.c13_context = val;
1694
            break;
1695
        default:
1696
            goto bad_reg;
1697
        }
1698
        break;
1699
    case 14: /* Reserved.  */
1700
        goto bad_reg;
1701
    case 15: /* Implementation specific.  */
1702
        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1703
            if (op2 == 0 && crm == 1) {
1704
                if (env->cp15.c15_cpar != (val & 0x3fff)) {
1705
                    /* Changes cp0 to cp13 behavior, so needs a TB flush.  */
1706
                    tb_flush(env);
1707
                    env->cp15.c15_cpar = val & 0x3fff;
1708
                }
1709
                break;
1710
            }
1711
            goto bad_reg;
1712
        }
1713
        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1714
            switch (crm) {
1715
            case 0:
1716
                break;
1717
            case 1: /* Set TI925T configuration.  */
1718
                env->cp15.c15_ticonfig = val & 0xe7;
1719
                env->cp15.c0_cpuid = (val & (1 << 5)) ? /* OS_TYPE bit */
1720
                        ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1721
                break;
1722
            case 2: /* Set I_max.  */
1723
                env->cp15.c15_i_max = val;
1724
                break;
1725
            case 3: /* Set I_min.  */
1726
                env->cp15.c15_i_min = val;
1727
                break;
1728
            case 4: /* Set thread-ID.  */
1729
                env->cp15.c15_threadid = val & 0xffff;
1730
                break;
1731
            case 8: /* Wait-for-interrupt (deprecated).  */
1732
                cpu_interrupt(env, CPU_INTERRUPT_HALT);
1733
                break;
1734
            default:
1735
                goto bad_reg;
1736
            }
1737
        }
1738
        break;
1739
    }
1740
    return;
1741
bad_reg:
1742
    /* ??? For debugging only.  Should raise illegal instruction exception.  */
1743
    cpu_abort(env, "Unimplemented cp15 register write (c%d, c%d, {%d, %d})\n",
1744
              (insn >> 16) & 0xf, crm, op1, op2);
1745
}
1746

    
1747
uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
1748
{
1749
    int op1;
1750
    int op2;
1751
    int crm;
1752

    
1753
    op1 = (insn >> 21) & 7;
1754
    op2 = (insn >> 5) & 7;
1755
    crm = insn & 0xf;
1756
    switch ((insn >> 16) & 0xf) {
1757
    case 0: /* ID codes.  */
1758
        switch (op1) {
1759
        case 0:
1760
            switch (crm) {
1761
            case 0:
1762
                switch (op2) {
1763
                case 0: /* Device ID.  */
1764
                    return env->cp15.c0_cpuid;
1765
                case 1: /* Cache Type.  */
1766
                    return env->cp15.c0_cachetype;
1767
                case 2: /* TCM status.  */
1768
                    return 0;
1769
                case 3: /* TLB type register.  */
1770
                    return 0; /* No lockable TLB entries.  */
1771
                case 5: /* MPIDR */
1772
                    /* The MPIDR was standardised in v7; prior to
1773
                     * this it was implemented only in the 11MPCore.
1774
                     * For all other pre-v7 cores it does not exist.
1775
                     */
1776
                    if (arm_feature(env, ARM_FEATURE_V7) ||
1777
                        ARM_CPUID(env) == ARM_CPUID_ARM11MPCORE) {
1778
                        int mpidr = env->cpu_index;
1779
                        /* We don't support setting cluster ID ([8..11])
1780
                         * so these bits always RAZ.
1781
                         */
1782
                        if (arm_feature(env, ARM_FEATURE_V7MP)) {
1783
                            mpidr |= (1 << 31);
1784
                            /* Cores which are uniprocessor (non-coherent)
1785
                             * but still implement the MP extensions set
1786
                             * bit 30. (For instance, A9UP.) However we do
1787
                             * not currently model any of those cores.
1788
                             */
1789
                        }
1790
                        return mpidr;
1791
                    }
1792
                    /* otherwise fall through to the unimplemented-reg case */
1793
                default:
1794
                    goto bad_reg;
1795
                }
1796
            case 1:
1797
                if (!arm_feature(env, ARM_FEATURE_V6))
1798
                    goto bad_reg;
1799
                return env->cp15.c0_c1[op2];
1800
            case 2:
1801
                if (!arm_feature(env, ARM_FEATURE_V6))
1802
                    goto bad_reg;
1803
                return env->cp15.c0_c2[op2];
1804
            case 3: case 4: case 5: case 6: case 7:
1805
                return 0;
1806
            default:
1807
                goto bad_reg;
1808
            }
1809
        case 1:
1810
            /* These registers aren't documented on arm11 cores.  However
1811
               Linux looks at them anyway.  */
1812
            if (!arm_feature(env, ARM_FEATURE_V6))
1813
                goto bad_reg;
1814
            if (crm != 0)
1815
                goto bad_reg;
1816
            if (!arm_feature(env, ARM_FEATURE_V7))
1817
                return 0;
1818

    
1819
            switch (op2) {
1820
            case 0:
1821
                return env->cp15.c0_ccsid[env->cp15.c0_cssel];
1822
            case 1:
1823
                return env->cp15.c0_clid;
1824
            case 7:
1825
                return 0;
1826
            }
1827
            goto bad_reg;
1828
        case 2:
1829
            if (op2 != 0 || crm != 0)
1830
                goto bad_reg;
1831
            return env->cp15.c0_cssel;
1832
        default:
1833
            goto bad_reg;
1834
        }
1835
    case 1: /* System configuration.  */
1836
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1837
            op2 = 0;
1838
        switch (op2) {
1839
        case 0: /* Control register.  */
1840
            return env->cp15.c1_sys;
1841
        case 1: /* Auxiliary control register.  */
1842
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1843
                return env->cp15.c1_xscaleauxcr;
1844
            if (!arm_feature(env, ARM_FEATURE_AUXCR))
1845
                goto bad_reg;
1846
            switch (ARM_CPUID(env)) {
1847
            case ARM_CPUID_ARM1026:
1848
                return 1;
1849
            case ARM_CPUID_ARM1136:
1850
            case ARM_CPUID_ARM1136_R2:
1851
                return 7;
1852
            case ARM_CPUID_ARM11MPCORE:
1853
                return 1;
1854
            case ARM_CPUID_CORTEXA8:
1855
                return 2;
1856
            case ARM_CPUID_CORTEXA9:
1857
                return 0;
1858
            default:
1859
                goto bad_reg;
1860
            }
1861
        case 2: /* Coprocessor access register.  */
1862
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1863
                goto bad_reg;
1864
            return env->cp15.c1_coproc;
1865
        default:
1866
            goto bad_reg;
1867
        }
1868
    case 2: /* MMU Page table control / MPU cache control.  */
1869
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1870
            switch (op2) {
1871
            case 0:
1872
                return env->cp15.c2_data;
1873
                break;
1874
            case 1:
1875
                return env->cp15.c2_insn;
1876
                break;
1877
            default:
1878
                goto bad_reg;
1879
            }
1880
        } else {
1881
            switch (op2) {
1882
            case 0:
1883
                return env->cp15.c2_base0;
1884
            case 1:
1885
                return env->cp15.c2_base1;
1886
            case 2:
1887
                return env->cp15.c2_control;
1888
            default:
1889
                goto bad_reg;
1890
            }
1891
        }
1892
    case 3: /* MMU Domain access control / MPU write buffer control.  */
1893
        return env->cp15.c3;
1894
    case 4: /* Reserved.  */
1895
        goto bad_reg;
1896
    case 5: /* MMU Fault status / MPU access permission.  */
1897
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1898
            op2 = 0;
1899
        switch (op2) {
1900
        case 0:
1901
            if (arm_feature(env, ARM_FEATURE_MPU))
1902
                return simple_mpu_ap_bits(env->cp15.c5_data);
1903
            return env->cp15.c5_data;
1904
        case 1:
1905
            if (arm_feature(env, ARM_FEATURE_MPU))
1906
                return simple_mpu_ap_bits(env->cp15.c5_data);
1907
            return env->cp15.c5_insn;
1908
        case 2:
1909
            if (!arm_feature(env, ARM_FEATURE_MPU))
1910
                goto bad_reg;
1911
            return env->cp15.c5_data;
1912
        case 3:
1913
            if (!arm_feature(env, ARM_FEATURE_MPU))
1914
                goto bad_reg;
1915
            return env->cp15.c5_insn;
1916
        default:
1917
            goto bad_reg;
1918
        }
1919
    case 6: /* MMU Fault address.  */
1920
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1921
            if (crm >= 8)
1922
                goto bad_reg;
1923
            return env->cp15.c6_region[crm];
1924
        } else {
1925
            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1926
                op2 = 0;
1927
            switch (op2) {
1928
            case 0:
1929
                return env->cp15.c6_data;
1930
            case 1:
1931
                if (arm_feature(env, ARM_FEATURE_V6)) {
1932
                    /* Watchpoint Fault Adrress.  */
1933
                    return 0; /* Not implemented.  */
1934
                } else {
1935
                    /* Instruction Fault Adrress.  */
1936
                    /* Arm9 doesn't have an IFAR, but implementing it anyway
1937
                       shouldn't do any harm.  */
1938
                    return env->cp15.c6_insn;
1939
                }
1940
            case 2:
1941
                if (arm_feature(env, ARM_FEATURE_V6)) {
1942
                    /* Instruction Fault Adrress.  */
1943
                    return env->cp15.c6_insn;
1944
                } else {
1945
                    goto bad_reg;
1946
                }
1947
            default:
1948
                goto bad_reg;
1949
            }
1950
        }
1951
    case 7: /* Cache control.  */
1952
        if (crm == 4 && op1 == 0 && op2 == 0) {
1953
            return env->cp15.c7_par;
1954
        }
1955
        /* FIXME: Should only clear Z flag if destination is r15.  */
1956
        env->ZF = 0;
1957
        return 0;
1958
    case 8: /* MMU TLB control.  */
1959
        goto bad_reg;
1960
    case 9:
1961
        switch (crm) {
1962
        case 0: /* Cache lockdown */
1963
            switch (op1) {
1964
            case 0: /* L1 cache.  */
1965
                if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1966
                    return 0;
1967
                }
1968
                switch (op2) {
1969
                case 0:
1970
                    return env->cp15.c9_data;
1971
                case 1:
1972
                    return env->cp15.c9_insn;
1973
                default:
1974
                    goto bad_reg;
1975
                }
1976
            case 1: /* L2 cache */
1977
                if (crm != 0) {
1978
                    goto bad_reg;
1979
                }
1980
                /* L2 Lockdown and Auxiliary control.  */
1981
                return 0;
1982
            default:
1983
                goto bad_reg;
1984
            }
1985
            break;
1986
        case 12: /* Performance monitor control */
1987
            if (!arm_feature(env, ARM_FEATURE_V7)) {
1988
                goto bad_reg;
1989
            }
1990
            switch (op2) {
1991
            case 0: /* performance monitor control register */
1992
                return env->cp15.c9_pmcr;
1993
            case 1: /* count enable set */
1994
            case 2: /* count enable clear */
1995
                return env->cp15.c9_pmcnten;
1996
            case 3: /* overflow flag status */
1997
                return env->cp15.c9_pmovsr;
1998
            case 4: /* software increment */
1999
            case 5: /* event counter selection register */
2000
                return 0; /* Unimplemented, RAZ/WI */
2001
            default:
2002
                goto bad_reg;
2003
            }
2004
        case 13: /* Performance counters */
2005
            if (!arm_feature(env, ARM_FEATURE_V7)) {
2006
                goto bad_reg;
2007
            }
2008
            switch (op2) {
2009
            case 1: /* Event type select */
2010
                return env->cp15.c9_pmxevtyper;
2011
            case 0: /* Cycle count register */
2012
            case 2: /* Event count register */
2013
                /* Unimplemented, so RAZ/WI */
2014
                return 0;
2015
            default:
2016
                goto bad_reg;
2017
            }
2018
        case 14: /* Performance monitor control */
2019
            if (!arm_feature(env, ARM_FEATURE_V7)) {
2020
                goto bad_reg;
2021
            }
2022
            switch (op2) {
2023
            case 0: /* user enable */
2024
                return env->cp15.c9_pmuserenr;
2025
            case 1: /* interrupt enable set */
2026
            case 2: /* interrupt enable clear */
2027
                return env->cp15.c9_pminten;
2028
            default:
2029
                goto bad_reg;
2030
            }
2031
        default:
2032
            goto bad_reg;
2033
        }
2034
        break;
2035
    case 10: /* MMU TLB lockdown.  */
2036
        /* ??? TLB lockdown not implemented.  */
2037
        return 0;
2038
    case 11: /* TCM DMA control.  */
2039
    case 12: /* Reserved.  */
2040
        goto bad_reg;
2041
    case 13: /* Process ID.  */
2042
        switch (op2) {
2043
        case 0:
2044
            return env->cp15.c13_fcse;
2045
        case 1:
2046
            return env->cp15.c13_context;
2047
        default:
2048
            goto bad_reg;
2049
        }
2050
    case 14: /* Reserved.  */
2051
        goto bad_reg;
2052
    case 15: /* Implementation specific.  */
2053
        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
2054
            if (op2 == 0 && crm == 1)
2055
                return env->cp15.c15_cpar;
2056

    
2057
            goto bad_reg;
2058
        }
2059
        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
2060
            switch (crm) {
2061
            case 0:
2062
                return 0;
2063
            case 1: /* Read TI925T configuration.  */
2064
                return env->cp15.c15_ticonfig;
2065
            case 2: /* Read I_max.  */
2066
                return env->cp15.c15_i_max;
2067
            case 3: /* Read I_min.  */
2068
                return env->cp15.c15_i_min;
2069
            case 4: /* Read thread-ID.  */
2070
                return env->cp15.c15_threadid;
2071
            case 8: /* TI925T_status */
2072
                return 0;
2073
            }
2074
            /* TODO: Peripheral port remap register:
2075
             * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt
2076
             * controller base address at $rn & ~0xfff and map size of
2077
             * 0x200 << ($rn & 0xfff), when MMU is off.  */
2078
            goto bad_reg;
2079
        }
2080
        return 0;
2081
    }
2082
bad_reg:
2083
    /* ??? For debugging only.  Should raise illegal instruction exception.  */
2084
    cpu_abort(env, "Unimplemented cp15 register read (c%d, c%d, {%d, %d})\n",
2085
              (insn >> 16) & 0xf, crm, op1, op2);
2086
    return 0;
2087
}
2088

    
2089
void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
2090
{
2091
    if ((env->uncached_cpsr & CPSR_M) == mode) {
2092
        env->regs[13] = val;
2093
    } else {
2094
        env->banked_r13[bank_number(mode)] = val;
2095
    }
2096
}
2097

    
2098
uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
2099
{
2100
    if ((env->uncached_cpsr & CPSR_M) == mode) {
2101
        return env->regs[13];
2102
    } else {
2103
        return env->banked_r13[bank_number(mode)];
2104
    }
2105
}
2106

    
2107
uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg)
2108
{
2109
    switch (reg) {
2110
    case 0: /* APSR */
2111
        return xpsr_read(env) & 0xf8000000;
2112
    case 1: /* IAPSR */
2113
        return xpsr_read(env) & 0xf80001ff;
2114
    case 2: /* EAPSR */
2115
        return xpsr_read(env) & 0xff00fc00;
2116
    case 3: /* xPSR */
2117
        return xpsr_read(env) & 0xff00fdff;
2118
    case 5: /* IPSR */
2119
        return xpsr_read(env) & 0x000001ff;
2120
    case 6: /* EPSR */
2121
        return xpsr_read(env) & 0x0700fc00;
2122
    case 7: /* IEPSR */
2123
        return xpsr_read(env) & 0x0700edff;
2124
    case 8: /* MSP */
2125
        return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
2126
    case 9: /* PSP */
2127
        return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
2128
    case 16: /* PRIMASK */
2129
        return (env->uncached_cpsr & CPSR_I) != 0;
2130
    case 17: /* BASEPRI */
2131
    case 18: /* BASEPRI_MAX */
2132
        return env->v7m.basepri;
2133
    case 19: /* FAULTMASK */
2134
        return (env->uncached_cpsr & CPSR_F) != 0;
2135
    case 20: /* CONTROL */
2136
        return env->v7m.control;
2137
    default:
2138
        /* ??? For debugging only.  */
2139
        cpu_abort(env, "Unimplemented system register read (%d)\n", reg);
2140
        return 0;
2141
    }
2142
}
2143

    
2144
void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val)
2145
{
2146
    switch (reg) {
2147
    case 0: /* APSR */
2148
        xpsr_write(env, val, 0xf8000000);
2149
        break;
2150
    case 1: /* IAPSR */
2151
        xpsr_write(env, val, 0xf8000000);
2152
        break;
2153
    case 2: /* EAPSR */
2154
        xpsr_write(env, val, 0xfe00fc00);
2155
        break;
2156
    case 3: /* xPSR */
2157
        xpsr_write(env, val, 0xfe00fc00);
2158
        break;
2159
    case 5: /* IPSR */
2160
        /* IPSR bits are readonly.  */
2161
        break;
2162
    case 6: /* EPSR */
2163
        xpsr_write(env, val, 0x0600fc00);
2164
        break;
2165
    case 7: /* IEPSR */
2166
        xpsr_write(env, val, 0x0600fc00);
2167
        break;
2168
    case 8: /* MSP */
2169
        if (env->v7m.current_sp)
2170
            env->v7m.other_sp = val;
2171
        else
2172
            env->regs[13] = val;
2173
        break;
2174
    case 9: /* PSP */
2175
        if (env->v7m.current_sp)
2176
            env->regs[13] = val;
2177
        else
2178
            env->v7m.other_sp = val;
2179
        break;
2180
    case 16: /* PRIMASK */
2181
        if (val & 1)
2182
            env->uncached_cpsr |= CPSR_I;
2183
        else
2184
            env->uncached_cpsr &= ~CPSR_I;
2185
        break;
2186
    case 17: /* BASEPRI */
2187
        env->v7m.basepri = val & 0xff;
2188
        break;
2189
    case 18: /* BASEPRI_MAX */
2190
        val &= 0xff;
2191
        if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
2192
            env->v7m.basepri = val;
2193
        break;
2194
    case 19: /* FAULTMASK */
2195
        if (val & 1)
2196
            env->uncached_cpsr |= CPSR_F;
2197
        else
2198
            env->uncached_cpsr &= ~CPSR_F;
2199
        break;
2200
    case 20: /* CONTROL */
2201
        env->v7m.control = val & 3;
2202
        switch_v7m_sp(env, (val & 2) != 0);
2203
        break;
2204
    default:
2205
        /* ??? For debugging only.  */
2206
        cpu_abort(env, "Unimplemented system register write (%d)\n", reg);
2207
        return;
2208
    }
2209
}
2210

    
2211
void cpu_arm_set_cp_io(CPUARMState *env, int cpnum,
2212
                ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write,
2213
                void *opaque)
2214
{
2215
    if (cpnum < 0 || cpnum > 14) {
2216
        cpu_abort(env, "Bad coprocessor number: %i\n", cpnum);
2217
        return;
2218
    }
2219

    
2220
    env->cp[cpnum].cp_read = cp_read;
2221
    env->cp[cpnum].cp_write = cp_write;
2222
    env->cp[cpnum].opaque = opaque;
2223
}
2224

    
2225
#endif
2226

    
2227
/* Note that signed overflow is undefined in C.  The following routines are
2228
   careful to use unsigned types where modulo arithmetic is required.
2229
   Failure to do so _will_ break on newer gcc.  */
2230

    
2231
/* Signed saturating arithmetic.  */
2232

    
2233
/* Perform 16-bit signed saturating addition.  */
2234
static inline uint16_t add16_sat(uint16_t a, uint16_t b)
2235
{
2236
    uint16_t res;
2237

    
2238
    res = a + b;
2239
    if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
2240
        if (a & 0x8000)
2241
            res = 0x8000;
2242
        else
2243
            res = 0x7fff;
2244
    }
2245
    return res;
2246
}
2247

    
2248
/* Perform 8-bit signed saturating addition.  */
2249
static inline uint8_t add8_sat(uint8_t a, uint8_t b)
2250
{
2251
    uint8_t res;
2252

    
2253
    res = a + b;
2254
    if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
2255
        if (a & 0x80)
2256
            res = 0x80;
2257
        else
2258
            res = 0x7f;
2259
    }
2260
    return res;
2261
}
2262

    
2263
/* Perform 16-bit signed saturating subtraction.  */
2264
static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
2265
{
2266
    uint16_t res;
2267

    
2268
    res = a - b;
2269
    if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
2270
        if (a & 0x8000)
2271
            res = 0x8000;
2272
        else
2273
            res = 0x7fff;
2274
    }
2275
    return res;
2276
}
2277

    
2278
/* Perform 8-bit signed saturating subtraction.  */
2279
static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
2280
{
2281
    uint8_t res;
2282

    
2283
    res = a - b;
2284
    if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
2285
        if (a & 0x80)
2286
            res = 0x80;
2287
        else
2288
            res = 0x7f;
2289
    }
2290
    return res;
2291
}
2292

    
2293
#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
2294
#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
2295
#define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
2296
#define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
2297
#define PFX q
2298

    
2299
#include "op_addsub.h"
2300

    
2301
/* Unsigned saturating arithmetic.  */
2302
static inline uint16_t add16_usat(uint16_t a, uint16_t b)
2303
{
2304
    uint16_t res;
2305
    res = a + b;
2306
    if (res < a)
2307
        res = 0xffff;
2308
    return res;
2309
}
2310

    
2311
static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
2312
{
2313
    if (a > b)
2314
        return a - b;
2315
    else
2316
        return 0;
2317
}
2318

    
2319
static inline uint8_t add8_usat(uint8_t a, uint8_t b)
2320
{
2321
    uint8_t res;
2322
    res = a + b;
2323
    if (res < a)
2324
        res = 0xff;
2325
    return res;
2326
}
2327

    
2328
static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
2329
{
2330
    if (a > b)
2331
        return a - b;
2332
    else
2333
        return 0;
2334
}
2335

    
2336
#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
2337
#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
2338
#define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
2339
#define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
2340
#define PFX uq
2341

    
2342
#include "op_addsub.h"
2343

    
2344
/* Signed modulo arithmetic.  */
2345
#define SARITH16(a, b, n, op) do { \
2346
    int32_t sum; \
2347
    sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
2348
    RESULT(sum, n, 16); \
2349
    if (sum >= 0) \
2350
        ge |= 3 << (n * 2); \
2351
    } while(0)
2352

    
2353
#define SARITH8(a, b, n, op) do { \
2354
    int32_t sum; \
2355
    sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
2356
    RESULT(sum, n, 8); \
2357
    if (sum >= 0) \
2358
        ge |= 1 << n; \
2359
    } while(0)
2360

    
2361

    
2362
#define ADD16(a, b, n) SARITH16(a, b, n, +)
2363
#define SUB16(a, b, n) SARITH16(a, b, n, -)
2364
#define ADD8(a, b, n)  SARITH8(a, b, n, +)
2365
#define SUB8(a, b, n)  SARITH8(a, b, n, -)
2366
#define PFX s
2367
#define ARITH_GE
2368

    
2369
#include "op_addsub.h"
2370

    
2371
/* Unsigned modulo arithmetic.  */
2372
#define ADD16(a, b, n) do { \
2373
    uint32_t sum; \
2374
    sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
2375
    RESULT(sum, n, 16); \
2376
    if ((sum >> 16) == 1) \
2377
        ge |= 3 << (n * 2); \
2378
    } while(0)
2379

    
2380
#define ADD8(a, b, n) do { \
2381
    uint32_t sum; \
2382
    sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
2383
    RESULT(sum, n, 8); \
2384
    if ((sum >> 8) == 1) \
2385
        ge |= 1 << n; \
2386
    } while(0)
2387

    
2388
#define SUB16(a, b, n) do { \
2389
    uint32_t sum; \
2390
    sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
2391
    RESULT(sum, n, 16); \
2392
    if ((sum >> 16) == 0) \
2393
        ge |= 3 << (n * 2); \
2394
    } while(0)
2395

    
2396
#define SUB8(a, b, n) do { \
2397
    uint32_t sum; \
2398
    sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
2399
    RESULT(sum, n, 8); \
2400
    if ((sum >> 8) == 0) \
2401
        ge |= 1 << n; \
2402
    } while(0)
2403

    
2404
#define PFX u
2405
#define ARITH_GE
2406

    
2407
#include "op_addsub.h"
2408

    
2409
/* Halved signed arithmetic.  */
2410
#define ADD16(a, b, n) \
2411
  RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
2412
#define SUB16(a, b, n) \
2413
  RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
2414
#define ADD8(a, b, n) \
2415
  RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
2416
#define SUB8(a, b, n) \
2417
  RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
2418
#define PFX sh
2419

    
2420
#include "op_addsub.h"
2421

    
2422
/* Halved unsigned arithmetic.  */
2423
#define ADD16(a, b, n) \
2424
  RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2425
#define SUB16(a, b, n) \
2426
  RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2427
#define ADD8(a, b, n) \
2428
  RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2429
#define SUB8(a, b, n) \
2430
  RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2431
#define PFX uh
2432

    
2433
#include "op_addsub.h"
2434

    
2435
static inline uint8_t do_usad(uint8_t a, uint8_t b)
2436
{
2437
    if (a > b)
2438
        return a - b;
2439
    else
2440
        return b - a;
2441
}
2442

    
2443
/* Unsigned sum of absolute byte differences.  */
2444
uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
2445
{
2446
    uint32_t sum;
2447
    sum = do_usad(a, b);
2448
    sum += do_usad(a >> 8, b >> 8);
2449
    sum += do_usad(a >> 16, b >>16);
2450
    sum += do_usad(a >> 24, b >> 24);
2451
    return sum;
2452
}
2453

    
2454
/* For ARMv6 SEL instruction.  */
2455
uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
2456
{
2457
    uint32_t mask;
2458

    
2459
    mask = 0;
2460
    if (flags & 1)
2461
        mask |= 0xff;
2462
    if (flags & 2)
2463
        mask |= 0xff00;
2464
    if (flags & 4)
2465
        mask |= 0xff0000;
2466
    if (flags & 8)
2467
        mask |= 0xff000000;
2468
    return (a & mask) | (b & ~mask);
2469
}
2470

    
2471
uint32_t HELPER(logicq_cc)(uint64_t val)
2472
{
2473
    return (val >> 32) | (val != 0);
2474
}
2475

    
2476
/* VFP support.  We follow the convention used for VFP instrunctions:
2477
   Single precition routines have a "s" suffix, double precision a
2478
   "d" suffix.  */
2479

    
2480
/* Convert host exception flags to vfp form.  */
2481
static inline int vfp_exceptbits_from_host(int host_bits)
2482
{
2483
    int target_bits = 0;
2484

    
2485
    if (host_bits & float_flag_invalid)
2486
        target_bits |= 1;
2487
    if (host_bits & float_flag_divbyzero)
2488
        target_bits |= 2;
2489
    if (host_bits & float_flag_overflow)
2490
        target_bits |= 4;
2491
    if (host_bits & (float_flag_underflow | float_flag_output_denormal))
2492
        target_bits |= 8;
2493
    if (host_bits & float_flag_inexact)
2494
        target_bits |= 0x10;
2495
    if (host_bits & float_flag_input_denormal)
2496
        target_bits |= 0x80;
2497
    return target_bits;
2498
}
2499

    
2500
uint32_t HELPER(vfp_get_fpscr)(CPUState *env)
2501
{
2502
    int i;
2503
    uint32_t fpscr;
2504

    
2505
    fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
2506
            | (env->vfp.vec_len << 16)
2507
            | (env->vfp.vec_stride << 20);
2508
    i = get_float_exception_flags(&env->vfp.fp_status);
2509
    i |= get_float_exception_flags(&env->vfp.standard_fp_status);
2510
    fpscr |= vfp_exceptbits_from_host(i);
2511
    return fpscr;
2512
}
2513

    
2514
uint32_t vfp_get_fpscr(CPUState *env)
2515
{
2516
    return HELPER(vfp_get_fpscr)(env);
2517
}
2518

    
2519
/* Convert vfp exception flags to target form.  */
2520
static inline int vfp_exceptbits_to_host(int target_bits)
2521
{
2522
    int host_bits = 0;
2523

    
2524
    if (target_bits & 1)
2525
        host_bits |= float_flag_invalid;
2526
    if (target_bits & 2)
2527
        host_bits |= float_flag_divbyzero;
2528
    if (target_bits & 4)
2529
        host_bits |= float_flag_overflow;
2530
    if (target_bits & 8)
2531
        host_bits |= float_flag_underflow;
2532
    if (target_bits & 0x10)
2533
        host_bits |= float_flag_inexact;
2534
    if (target_bits & 0x80)
2535
        host_bits |= float_flag_input_denormal;
2536
    return host_bits;
2537
}
2538

    
2539
void HELPER(vfp_set_fpscr)(CPUState *env, uint32_t val)
2540
{
2541
    int i;
2542
    uint32_t changed;
2543

    
2544
    changed = env->vfp.xregs[ARM_VFP_FPSCR];
2545
    env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
2546
    env->vfp.vec_len = (val >> 16) & 7;
2547
    env->vfp.vec_stride = (val >> 20) & 3;
2548

    
2549
    changed ^= val;
2550
    if (changed & (3 << 22)) {
2551
        i = (val >> 22) & 3;
2552
        switch (i) {
2553
        case 0:
2554
            i = float_round_nearest_even;
2555
            break;
2556
        case 1:
2557
            i = float_round_up;
2558
            break;
2559
        case 2:
2560
            i = float_round_down;
2561
            break;
2562
        case 3:
2563
            i = float_round_to_zero;
2564
            break;
2565
        }
2566
        set_float_rounding_mode(i, &env->vfp.fp_status);
2567
    }
2568
    if (changed & (1 << 24)) {
2569
        set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
2570
        set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
2571
    }
2572
    if (changed & (1 << 25))
2573
        set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
2574

    
2575
    i = vfp_exceptbits_to_host(val);
2576
    set_float_exception_flags(i, &env->vfp.fp_status);
2577
    set_float_exception_flags(0, &env->vfp.standard_fp_status);
2578
}
2579

    
2580
void vfp_set_fpscr(CPUState *env, uint32_t val)
2581
{
2582
    HELPER(vfp_set_fpscr)(env, val);
2583
}
2584

    
2585
#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
2586

    
2587
#define VFP_BINOP(name) \
2588
float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
2589
{ \
2590
    float_status *fpst = fpstp; \
2591
    return float32_ ## name(a, b, fpst); \
2592
} \
2593
float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
2594
{ \
2595
    float_status *fpst = fpstp; \
2596
    return float64_ ## name(a, b, fpst); \
2597
}
2598
VFP_BINOP(add)
2599
VFP_BINOP(sub)
2600
VFP_BINOP(mul)
2601
VFP_BINOP(div)
2602
#undef VFP_BINOP
2603

    
2604
float32 VFP_HELPER(neg, s)(float32 a)
2605
{
2606
    return float32_chs(a);
2607
}
2608

    
2609
float64 VFP_HELPER(neg, d)(float64 a)
2610
{
2611
    return float64_chs(a);
2612
}
2613

    
2614
float32 VFP_HELPER(abs, s)(float32 a)
2615
{
2616
    return float32_abs(a);
2617
}
2618

    
2619
float64 VFP_HELPER(abs, d)(float64 a)
2620
{
2621
    return float64_abs(a);
2622
}
2623

    
2624
float32 VFP_HELPER(sqrt, s)(float32 a, CPUState *env)
2625
{
2626
    return float32_sqrt(a, &env->vfp.fp_status);
2627
}
2628

    
2629
float64 VFP_HELPER(sqrt, d)(float64 a, CPUState *env)
2630
{
2631
    return float64_sqrt(a, &env->vfp.fp_status);
2632
}
2633

    
2634
/* XXX: check quiet/signaling case */
2635
#define DO_VFP_cmp(p, type) \
2636
void VFP_HELPER(cmp, p)(type a, type b, CPUState *env)  \
2637
{ \
2638
    uint32_t flags; \
2639
    switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
2640
    case 0: flags = 0x6; break; \
2641
    case -1: flags = 0x8; break; \
2642
    case 1: flags = 0x2; break; \
2643
    default: case 2: flags = 0x3; break; \
2644
    } \
2645
    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2646
        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2647
} \
2648
void VFP_HELPER(cmpe, p)(type a, type b, CPUState *env) \
2649
{ \
2650
    uint32_t flags; \
2651
    switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
2652
    case 0: flags = 0x6; break; \
2653
    case -1: flags = 0x8; break; \
2654
    case 1: flags = 0x2; break; \
2655
    default: case 2: flags = 0x3; break; \
2656
    } \
2657
    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2658
        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2659
}
2660
DO_VFP_cmp(s, float32)
2661
DO_VFP_cmp(d, float64)
2662
#undef DO_VFP_cmp
2663

    
2664
/* Integer to float and float to integer conversions */
2665

    
2666
#define CONV_ITOF(name, fsz, sign) \
2667
    float##fsz HELPER(name)(uint32_t x, void *fpstp) \
2668
{ \
2669
    float_status *fpst = fpstp; \
2670
    return sign##int32_to_##float##fsz(x, fpst); \
2671
}
2672

    
2673
#define CONV_FTOI(name, fsz, sign, round) \
2674
uint32_t HELPER(name)(float##fsz x, void *fpstp) \
2675
{ \
2676
    float_status *fpst = fpstp; \
2677
    if (float##fsz##_is_any_nan(x)) { \
2678
        float_raise(float_flag_invalid, fpst); \
2679
        return 0; \
2680
    } \
2681
    return float##fsz##_to_##sign##int32##round(x, fpst); \
2682
}
2683

    
2684
#define FLOAT_CONVS(name, p, fsz, sign) \
2685
CONV_ITOF(vfp_##name##to##p, fsz, sign) \
2686
CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
2687
CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
2688

    
2689
FLOAT_CONVS(si, s, 32, )
2690
FLOAT_CONVS(si, d, 64, )
2691
FLOAT_CONVS(ui, s, 32, u)
2692
FLOAT_CONVS(ui, d, 64, u)
2693

    
2694
#undef CONV_ITOF
2695
#undef CONV_FTOI
2696
#undef FLOAT_CONVS
2697

    
2698
/* floating point conversion */
2699
float64 VFP_HELPER(fcvtd, s)(float32 x, CPUState *env)
2700
{
2701
    float64 r = float32_to_float64(x, &env->vfp.fp_status);
2702
    /* ARM requires that S<->D conversion of any kind of NaN generates
2703
     * a quiet NaN by forcing the most significant frac bit to 1.
2704
     */
2705
    return float64_maybe_silence_nan(r);
2706
}
2707

    
2708
float32 VFP_HELPER(fcvts, d)(float64 x, CPUState *env)
2709
{
2710
    float32 r =  float64_to_float32(x, &env->vfp.fp_status);
2711
    /* ARM requires that S<->D conversion of any kind of NaN generates
2712
     * a quiet NaN by forcing the most significant frac bit to 1.
2713
     */
2714
    return float32_maybe_silence_nan(r);
2715
}
2716

    
2717
/* VFP3 fixed point conversion.  */
2718
#define VFP_CONV_FIX(name, p, fsz, itype, sign) \
2719
float##fsz HELPER(vfp_##name##to##p)(uint##fsz##_t  x, uint32_t shift, \
2720
                                    void *fpstp) \
2721
{ \
2722
    float_status *fpst = fpstp; \
2723
    float##fsz tmp; \
2724
    tmp = sign##int32_to_##float##fsz((itype##_t)x, fpst); \
2725
    return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
2726
} \
2727
uint##fsz##_t HELPER(vfp_to##name##p)(float##fsz x, uint32_t shift, \
2728
                                       void *fpstp) \
2729
{ \
2730
    float_status *fpst = fpstp; \
2731
    float##fsz tmp; \
2732
    if (float##fsz##_is_any_nan(x)) { \
2733
        float_raise(float_flag_invalid, fpst); \
2734
        return 0; \
2735
    } \
2736
    tmp = float##fsz##_scalbn(x, shift, fpst); \
2737
    return float##fsz##_to_##itype##_round_to_zero(tmp, fpst); \
2738
}
2739

    
2740
VFP_CONV_FIX(sh, d, 64, int16, )
2741
VFP_CONV_FIX(sl, d, 64, int32, )
2742
VFP_CONV_FIX(uh, d, 64, uint16, u)
2743
VFP_CONV_FIX(ul, d, 64, uint32, u)
2744
VFP_CONV_FIX(sh, s, 32, int16, )
2745
VFP_CONV_FIX(sl, s, 32, int32, )
2746
VFP_CONV_FIX(uh, s, 32, uint16, u)
2747
VFP_CONV_FIX(ul, s, 32, uint32, u)
2748
#undef VFP_CONV_FIX
2749

    
2750
/* Half precision conversions.  */
2751
static float32 do_fcvt_f16_to_f32(uint32_t a, CPUState *env, float_status *s)
2752
{
2753
    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
2754
    float32 r = float16_to_float32(make_float16(a), ieee, s);
2755
    if (ieee) {
2756
        return float32_maybe_silence_nan(r);
2757
    }
2758
    return r;
2759
}
2760

    
2761
static uint32_t do_fcvt_f32_to_f16(float32 a, CPUState *env, float_status *s)
2762
{
2763
    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
2764
    float16 r = float32_to_float16(a, ieee, s);
2765
    if (ieee) {
2766
        r = float16_maybe_silence_nan(r);
2767
    }
2768
    return float16_val(r);
2769
}
2770

    
2771
float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUState *env)
2772
{
2773
    return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status);
2774
}
2775

    
2776
uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUState *env)
2777
{
2778
    return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status);
2779
}
2780

    
2781
float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUState *env)
2782
{
2783
    return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status);
2784
}
2785

    
2786
uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUState *env)
2787
{
2788
    return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
2789
}
2790

    
2791
#define float32_two make_float32(0x40000000)
2792
#define float32_three make_float32(0x40400000)
2793
#define float32_one_point_five make_float32(0x3fc00000)
2794

    
2795
float32 HELPER(recps_f32)(float32 a, float32 b, CPUState *env)
2796
{
2797
    float_status *s = &env->vfp.standard_fp_status;
2798
    if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
2799
        (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
2800
        if (!(float32_is_zero(a) || float32_is_zero(b))) {
2801
            float_raise(float_flag_input_denormal, s);
2802
        }
2803
        return float32_two;
2804
    }
2805
    return float32_sub(float32_two, float32_mul(a, b, s), s);
2806
}
2807

    
2808
float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUState *env)
2809
{
2810
    float_status *s = &env->vfp.standard_fp_status;
2811
    float32 product;
2812
    if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
2813
        (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
2814
        if (!(float32_is_zero(a) || float32_is_zero(b))) {
2815
            float_raise(float_flag_input_denormal, s);
2816
        }
2817
        return float32_one_point_five;
2818
    }
2819
    product = float32_mul(a, b, s);
2820
    return float32_div(float32_sub(float32_three, product, s), float32_two, s);
2821
}
2822

    
2823
/* NEON helpers.  */
2824

    
2825
/* Constants 256 and 512 are used in some helpers; we avoid relying on
2826
 * int->float conversions at run-time.  */
2827
#define float64_256 make_float64(0x4070000000000000LL)
2828
#define float64_512 make_float64(0x4080000000000000LL)
2829

    
2830
/* The algorithm that must be used to calculate the estimate
2831
 * is specified by the ARM ARM.
2832
 */
2833
static float64 recip_estimate(float64 a, CPUState *env)
2834
{
2835
    /* These calculations mustn't set any fp exception flags,
2836
     * so we use a local copy of the fp_status.
2837
     */
2838
    float_status dummy_status = env->vfp.standard_fp_status;
2839
    float_status *s = &dummy_status;
2840
    /* q = (int)(a * 512.0) */
2841
    float64 q = float64_mul(float64_512, a, s);
2842
    int64_t q_int = float64_to_int64_round_to_zero(q, s);
2843

    
2844
    /* r = 1.0 / (((double)q + 0.5) / 512.0) */
2845
    q = int64_to_float64(q_int, s);
2846
    q = float64_add(q, float64_half, s);
2847
    q = float64_div(q, float64_512, s);
2848
    q = float64_div(float64_one, q, s);
2849

    
2850
    /* s = (int)(256.0 * r + 0.5) */
2851
    q = float64_mul(q, float64_256, s);
2852
    q = float64_add(q, float64_half, s);
2853
    q_int = float64_to_int64_round_to_zero(q, s);
2854

    
2855
    /* return (double)s / 256.0 */
2856
    return float64_div(int64_to_float64(q_int, s), float64_256, s);
2857
}
2858

    
2859
float32 HELPER(recpe_f32)(float32 a, CPUState *env)
2860
{
2861
    float_status *s = &env->vfp.standard_fp_status;
2862
    float64 f64;
2863
    uint32_t val32 = float32_val(a);
2864

    
2865
    int result_exp;
2866
    int a_exp = (val32  & 0x7f800000) >> 23;
2867
    int sign = val32 & 0x80000000;
2868

    
2869
    if (float32_is_any_nan(a)) {
2870
        if (float32_is_signaling_nan(a)) {
2871
            float_raise(float_flag_invalid, s);
2872
        }
2873
        return float32_default_nan;
2874
    } else if (float32_is_infinity(a)) {
2875
        return float32_set_sign(float32_zero, float32_is_neg(a));
2876
    } else if (float32_is_zero_or_denormal(a)) {
2877
        if (!float32_is_zero(a)) {
2878
            float_raise(float_flag_input_denormal, s);
2879
        }
2880
        float_raise(float_flag_divbyzero, s);
2881
        return float32_set_sign(float32_infinity, float32_is_neg(a));
2882
    } else if (a_exp >= 253) {
2883
        float_raise(float_flag_underflow, s);
2884
        return float32_set_sign(float32_zero, float32_is_neg(a));
2885
    }
2886

    
2887
    f64 = make_float64((0x3feULL << 52)
2888
                       | ((int64_t)(val32 & 0x7fffff) << 29));
2889

    
2890
    result_exp = 253 - a_exp;
2891

    
2892
    f64 = recip_estimate(f64, env);
2893

    
2894
    val32 = sign
2895
        | ((result_exp & 0xff) << 23)
2896
        | ((float64_val(f64) >> 29) & 0x7fffff);
2897
    return make_float32(val32);
2898
}
2899

    
2900
/* The algorithm that must be used to calculate the estimate
2901
 * is specified by the ARM ARM.
2902
 */
2903
static float64 recip_sqrt_estimate(float64 a, CPUState *env)
2904
{
2905
    /* These calculations mustn't set any fp exception flags,
2906
     * so we use a local copy of the fp_status.
2907
     */
2908
    float_status dummy_status = env->vfp.standard_fp_status;
2909
    float_status *s = &dummy_status;
2910
    float64 q;
2911
    int64_t q_int;
2912

    
2913
    if (float64_lt(a, float64_half, s)) {
2914
        /* range 0.25 <= a < 0.5 */
2915

    
2916
        /* a in units of 1/512 rounded down */
2917
        /* q0 = (int)(a * 512.0);  */
2918
        q = float64_mul(float64_512, a, s);
2919
        q_int = float64_to_int64_round_to_zero(q, s);
2920

    
2921
        /* reciprocal root r */
2922
        /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0);  */
2923
        q = int64_to_float64(q_int, s);
2924
        q = float64_add(q, float64_half, s);
2925
        q = float64_div(q, float64_512, s);
2926
        q = float64_sqrt(q, s);
2927
        q = float64_div(float64_one, q, s);
2928
    } else {
2929
        /* range 0.5 <= a < 1.0 */
2930

    
2931
        /* a in units of 1/256 rounded down */
2932
        /* q1 = (int)(a * 256.0); */
2933
        q = float64_mul(float64_256, a, s);
2934
        int64_t q_int = float64_to_int64_round_to_zero(q, s);
2935

    
2936
        /* reciprocal root r */
2937
        /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
2938
        q = int64_to_float64(q_int, s);
2939
        q = float64_add(q, float64_half, s);
2940
        q = float64_div(q, float64_256, s);
2941
        q = float64_sqrt(q, s);
2942
        q = float64_div(float64_one, q, s);
2943
    }
2944
    /* r in units of 1/256 rounded to nearest */
2945
    /* s = (int)(256.0 * r + 0.5); */
2946

    
2947
    q = float64_mul(q, float64_256,s );
2948
    q = float64_add(q, float64_half, s);
2949
    q_int = float64_to_int64_round_to_zero(q, s);
2950

    
2951
    /* return (double)s / 256.0;*/
2952
    return float64_div(int64_to_float64(q_int, s), float64_256, s);
2953
}
2954

    
2955
float32 HELPER(rsqrte_f32)(float32 a, CPUState *env)
2956
{
2957
    float_status *s = &env->vfp.standard_fp_status;
2958
    int result_exp;
2959
    float64 f64;
2960
    uint32_t val;
2961
    uint64_t val64;
2962

    
2963
    val = float32_val(a);
2964

    
2965
    if (float32_is_any_nan(a)) {
2966
        if (float32_is_signaling_nan(a)) {
2967
            float_raise(float_flag_invalid, s);
2968
        }
2969
        return float32_default_nan;
2970
    } else if (float32_is_zero_or_denormal(a)) {
2971
        if (!float32_is_zero(a)) {
2972
            float_raise(float_flag_input_denormal, s);
2973
        }
2974
        float_raise(float_flag_divbyzero, s);
2975
        return float32_set_sign(float32_infinity, float32_is_neg(a));
2976
    } else if (float32_is_neg(a)) {
2977
        float_raise(float_flag_invalid, s);
2978
        return float32_default_nan;
2979
    } else if (float32_is_infinity(a)) {
2980
        return float32_zero;
2981
    }
2982

    
2983
    /* Normalize to a double-precision value between 0.25 and 1.0,
2984
     * preserving the parity of the exponent.  */
2985
    if ((val & 0x800000) == 0) {
2986
        f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
2987
                           | (0x3feULL << 52)
2988
                           | ((uint64_t)(val & 0x7fffff) << 29));
2989
    } else {
2990
        f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
2991
                           | (0x3fdULL << 52)
2992
                           | ((uint64_t)(val & 0x7fffff) << 29));
2993
    }
2994

    
2995
    result_exp = (380 - ((val & 0x7f800000) >> 23)) / 2;
2996

    
2997
    f64 = recip_sqrt_estimate(f64, env);
2998

    
2999
    val64 = float64_val(f64);
3000

    
3001
    val = ((val64 >> 63)  & 0x80000000)
3002
        | ((result_exp & 0xff) << 23)
3003
        | ((val64 >> 29)  & 0x7fffff);
3004
    return make_float32(val);
3005
}
3006

    
3007
uint32_t HELPER(recpe_u32)(uint32_t a, CPUState *env)
3008
{
3009
    float64 f64;
3010

    
3011
    if ((a & 0x80000000) == 0) {
3012
        return 0xffffffff;
3013
    }
3014

    
3015
    f64 = make_float64((0x3feULL << 52)
3016
                       | ((int64_t)(a & 0x7fffffff) << 21));
3017

    
3018
    f64 = recip_estimate (f64, env);
3019

    
3020
    return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
3021
}
3022

    
3023
uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUState *env)
3024
{
3025
    float64 f64;
3026

    
3027
    if ((a & 0xc0000000) == 0) {
3028
        return 0xffffffff;
3029
    }
3030

    
3031
    if (a & 0x80000000) {
3032
        f64 = make_float64((0x3feULL << 52)
3033
                           | ((uint64_t)(a & 0x7fffffff) << 21));
3034
    } else { /* bits 31-30 == '01' */
3035
        f64 = make_float64((0x3fdULL << 52)
3036
                           | ((uint64_t)(a & 0x3fffffff) << 22));
3037
    }
3038

    
3039
    f64 = recip_sqrt_estimate(f64, env);
3040

    
3041
    return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
3042
}
3043

    
3044
void HELPER(set_teecr)(CPUState *env, uint32_t val)
3045
{
3046
    val &= 1;
3047
    if (env->teecr != val) {
3048
        env->teecr = val;
3049
        tb_flush(env);
3050
    }
3051
}