Statistics
| Branch: | Revision:

root / target-arm / helper.c @ eca1bdf4

History | View | Annotate | Download (71.4 kB)

1
#include <stdio.h>
2
#include <stdlib.h>
3
#include <string.h>
4

    
5
#include "cpu.h"
6
#include "exec-all.h"
7
#include "gdbstub.h"
8
#include "helpers.h"
9
#include "qemu-common.h"
10

    
11
static uint32_t cortexa8_cp15_c0_c1[8] =
12
{ 0x1031, 0x11, 0x400, 0, 0x31100003, 0x20000000, 0x01202000, 0x11 };
13

    
14
static uint32_t cortexa8_cp15_c0_c2[8] =
15
{ 0x00101111, 0x12112111, 0x21232031, 0x11112131, 0x00111142, 0, 0, 0 };
16

    
17
static uint32_t mpcore_cp15_c0_c1[8] =
18
{ 0x111, 0x1, 0, 0x2, 0x01100103, 0x10020302, 0x01222000, 0 };
19

    
20
static uint32_t mpcore_cp15_c0_c2[8] =
21
{ 0x00100011, 0x12002111, 0x11221011, 0x01102131, 0x141, 0, 0, 0 };
22

    
23
static uint32_t arm1136_cp15_c0_c1[8] =
24
{ 0x111, 0x1, 0x2, 0x3, 0x01130003, 0x10030302, 0x01222110, 0 };
25

    
26
static uint32_t arm1136_cp15_c0_c2[8] =
27
{ 0x00140011, 0x12002111, 0x11231111, 0x01102131, 0x141, 0, 0, 0 };
28

    
29
static uint32_t cpu_arm_find_by_name(const char *name);
30

    
31
static inline void set_feature(CPUARMState *env, int feature)
32
{
33
    env->features |= 1u << feature;
34
}
35

    
36
static void cpu_reset_model_id(CPUARMState *env, uint32_t id)
37
{
38
    env->cp15.c0_cpuid = id;
39
    switch (id) {
40
    case ARM_CPUID_ARM926:
41
        set_feature(env, ARM_FEATURE_VFP);
42
        env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090;
43
        env->cp15.c0_cachetype = 0x1dd20d2;
44
        env->cp15.c1_sys = 0x00090078;
45
        break;
46
    case ARM_CPUID_ARM946:
47
        set_feature(env, ARM_FEATURE_MPU);
48
        env->cp15.c0_cachetype = 0x0f004006;
49
        env->cp15.c1_sys = 0x00000078;
50
        break;
51
    case ARM_CPUID_ARM1026:
52
        set_feature(env, ARM_FEATURE_VFP);
53
        set_feature(env, ARM_FEATURE_AUXCR);
54
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0;
55
        env->cp15.c0_cachetype = 0x1dd20d2;
56
        env->cp15.c1_sys = 0x00090078;
57
        break;
58
    case ARM_CPUID_ARM1136_R2:
59
    case ARM_CPUID_ARM1136:
60
        set_feature(env, ARM_FEATURE_V6);
61
        set_feature(env, ARM_FEATURE_VFP);
62
        set_feature(env, ARM_FEATURE_AUXCR);
63
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
64
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
65
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
66
        memcpy(env->cp15.c0_c1, arm1136_cp15_c0_c1, 8 * sizeof(uint32_t));
67
        memcpy(env->cp15.c0_c2, arm1136_cp15_c0_c2, 8 * sizeof(uint32_t));
68
        env->cp15.c0_cachetype = 0x1dd20d2;
69
        break;
70
    case ARM_CPUID_ARM11MPCORE:
71
        set_feature(env, ARM_FEATURE_V6);
72
        set_feature(env, ARM_FEATURE_V6K);
73
        set_feature(env, ARM_FEATURE_VFP);
74
        set_feature(env, ARM_FEATURE_AUXCR);
75
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
76
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
77
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
78
        memcpy(env->cp15.c0_c1, mpcore_cp15_c0_c1, 8 * sizeof(uint32_t));
79
        memcpy(env->cp15.c0_c2, mpcore_cp15_c0_c2, 8 * sizeof(uint32_t));
80
        env->cp15.c0_cachetype = 0x1dd20d2;
81
        break;
82
    case ARM_CPUID_CORTEXA8:
83
        set_feature(env, ARM_FEATURE_V6);
84
        set_feature(env, ARM_FEATURE_V6K);
85
        set_feature(env, ARM_FEATURE_V7);
86
        set_feature(env, ARM_FEATURE_AUXCR);
87
        set_feature(env, ARM_FEATURE_THUMB2);
88
        set_feature(env, ARM_FEATURE_VFP);
89
        set_feature(env, ARM_FEATURE_VFP3);
90
        set_feature(env, ARM_FEATURE_NEON);
91
        set_feature(env, ARM_FEATURE_THUMB2EE);
92
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410330c0;
93
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
94
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00011100;
95
        memcpy(env->cp15.c0_c1, cortexa8_cp15_c0_c1, 8 * sizeof(uint32_t));
96
        memcpy(env->cp15.c0_c2, cortexa8_cp15_c0_c2, 8 * sizeof(uint32_t));
97
        env->cp15.c0_cachetype = 0x82048004;
98
        env->cp15.c0_clid = (1 << 27) | (2 << 24) | 3;
99
        env->cp15.c0_ccsid[0] = 0xe007e01a; /* 16k L1 dcache. */
100
        env->cp15.c0_ccsid[1] = 0x2007e01a; /* 16k L1 icache. */
101
        env->cp15.c0_ccsid[2] = 0xf0000000; /* No L2 icache. */
102
        break;
103
    case ARM_CPUID_CORTEXM3:
104
        set_feature(env, ARM_FEATURE_V6);
105
        set_feature(env, ARM_FEATURE_THUMB2);
106
        set_feature(env, ARM_FEATURE_V7);
107
        set_feature(env, ARM_FEATURE_M);
108
        set_feature(env, ARM_FEATURE_DIV);
109
        break;
110
    case ARM_CPUID_ANY: /* For userspace emulation.  */
111
        set_feature(env, ARM_FEATURE_V6);
112
        set_feature(env, ARM_FEATURE_V6K);
113
        set_feature(env, ARM_FEATURE_V7);
114
        set_feature(env, ARM_FEATURE_THUMB2);
115
        set_feature(env, ARM_FEATURE_VFP);
116
        set_feature(env, ARM_FEATURE_VFP3);
117
        set_feature(env, ARM_FEATURE_NEON);
118
        set_feature(env, ARM_FEATURE_THUMB2EE);
119
        set_feature(env, ARM_FEATURE_DIV);
120
        break;
121
    case ARM_CPUID_TI915T:
122
    case ARM_CPUID_TI925T:
123
        set_feature(env, ARM_FEATURE_OMAPCP);
124
        env->cp15.c0_cpuid = ARM_CPUID_TI925T; /* Depends on wiring.  */
125
        env->cp15.c0_cachetype = 0x5109149;
126
        env->cp15.c1_sys = 0x00000070;
127
        env->cp15.c15_i_max = 0x000;
128
        env->cp15.c15_i_min = 0xff0;
129
        break;
130
    case ARM_CPUID_PXA250:
131
    case ARM_CPUID_PXA255:
132
    case ARM_CPUID_PXA260:
133
    case ARM_CPUID_PXA261:
134
    case ARM_CPUID_PXA262:
135
        set_feature(env, ARM_FEATURE_XSCALE);
136
        /* JTAG_ID is ((id << 28) | 0x09265013) */
137
        env->cp15.c0_cachetype = 0xd172172;
138
        env->cp15.c1_sys = 0x00000078;
139
        break;
140
    case ARM_CPUID_PXA270_A0:
141
    case ARM_CPUID_PXA270_A1:
142
    case ARM_CPUID_PXA270_B0:
143
    case ARM_CPUID_PXA270_B1:
144
    case ARM_CPUID_PXA270_C0:
145
    case ARM_CPUID_PXA270_C5:
146
        set_feature(env, ARM_FEATURE_XSCALE);
147
        /* JTAG_ID is ((id << 28) | 0x09265013) */
148
        set_feature(env, ARM_FEATURE_IWMMXT);
149
        env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
150
        env->cp15.c0_cachetype = 0xd172172;
151
        env->cp15.c1_sys = 0x00000078;
152
        break;
153
    default:
154
        cpu_abort(env, "Bad CPU ID: %x\n", id);
155
        break;
156
    }
157
}
158

    
159
void cpu_reset(CPUARMState *env)
160
{
161
    uint32_t id;
162

    
163
    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
164
        qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
165
        log_cpu_state(env, 0);
166
    }
167

    
168
    id = env->cp15.c0_cpuid;
169
    memset(env, 0, offsetof(CPUARMState, breakpoints));
170
    if (id)
171
        cpu_reset_model_id(env, id);
172
#if defined (CONFIG_USER_ONLY)
173
    env->uncached_cpsr = ARM_CPU_MODE_USR;
174
    env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
175
#else
176
    /* SVC mode with interrupts disabled.  */
177
    env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
178
    /* On ARMv7-M the CPSR_I is the value of the PRIMASK register, and is
179
       clear at reset.  */
180
    if (IS_M(env))
181
        env->uncached_cpsr &= ~CPSR_I;
182
    env->vfp.xregs[ARM_VFP_FPEXC] = 0;
183
    env->cp15.c2_base_mask = 0xffffc000u;
184
#endif
185
    env->regs[15] = 0;
186
    tlb_flush(env, 1);
187
}
188

    
189
static int vfp_gdb_get_reg(CPUState *env, uint8_t *buf, int reg)
190
{
191
    int nregs;
192

    
193
    /* VFP data registers are always little-endian.  */
194
    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
195
    if (reg < nregs) {
196
        stfq_le_p(buf, env->vfp.regs[reg]);
197
        return 8;
198
    }
199
    if (arm_feature(env, ARM_FEATURE_NEON)) {
200
        /* Aliases for Q regs.  */
201
        nregs += 16;
202
        if (reg < nregs) {
203
            stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
204
            stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
205
            return 16;
206
        }
207
    }
208
    switch (reg - nregs) {
209
    case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
210
    case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
211
    case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
212
    }
213
    return 0;
214
}
215

    
216
static int vfp_gdb_set_reg(CPUState *env, uint8_t *buf, int reg)
217
{
218
    int nregs;
219

    
220
    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
221
    if (reg < nregs) {
222
        env->vfp.regs[reg] = ldfq_le_p(buf);
223
        return 8;
224
    }
225
    if (arm_feature(env, ARM_FEATURE_NEON)) {
226
        nregs += 16;
227
        if (reg < nregs) {
228
            env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
229
            env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
230
            return 16;
231
        }
232
    }
233
    switch (reg - nregs) {
234
    case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
235
    case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
236
    case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf); return 4;
237
    }
238
    return 0;
239
}
240

    
241
CPUARMState *cpu_arm_init(const char *cpu_model)
242
{
243
    CPUARMState *env;
244
    uint32_t id;
245
    static int inited = 0;
246

    
247
    id = cpu_arm_find_by_name(cpu_model);
248
    if (id == 0)
249
        return NULL;
250
    env = qemu_mallocz(sizeof(CPUARMState));
251
    if (!env)
252
        return NULL;
253
    cpu_exec_init(env);
254
    if (!inited) {
255
        inited = 1;
256
        arm_translate_init();
257
    }
258

    
259
    env->cpu_model_str = cpu_model;
260
    env->cp15.c0_cpuid = id;
261
    cpu_reset(env);
262
    if (arm_feature(env, ARM_FEATURE_NEON)) {
263
        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
264
                                 51, "arm-neon.xml", 0);
265
    } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
266
        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
267
                                 35, "arm-vfp3.xml", 0);
268
    } else if (arm_feature(env, ARM_FEATURE_VFP)) {
269
        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
270
                                 19, "arm-vfp.xml", 0);
271
    }
272
    return env;
273
}
274

    
275
struct arm_cpu_t {
276
    uint32_t id;
277
    const char *name;
278
};
279

    
280
static const struct arm_cpu_t arm_cpu_names[] = {
281
    { ARM_CPUID_ARM926, "arm926"},
282
    { ARM_CPUID_ARM946, "arm946"},
283
    { ARM_CPUID_ARM1026, "arm1026"},
284
    { ARM_CPUID_ARM1136, "arm1136"},
285
    { ARM_CPUID_ARM1136_R2, "arm1136-r2"},
286
    { ARM_CPUID_ARM11MPCORE, "arm11mpcore"},
287
    { ARM_CPUID_CORTEXM3, "cortex-m3"},
288
    { ARM_CPUID_CORTEXA8, "cortex-a8"},
289
    { ARM_CPUID_TI925T, "ti925t" },
290
    { ARM_CPUID_PXA250, "pxa250" },
291
    { ARM_CPUID_PXA255, "pxa255" },
292
    { ARM_CPUID_PXA260, "pxa260" },
293
    { ARM_CPUID_PXA261, "pxa261" },
294
    { ARM_CPUID_PXA262, "pxa262" },
295
    { ARM_CPUID_PXA270, "pxa270" },
296
    { ARM_CPUID_PXA270_A0, "pxa270-a0" },
297
    { ARM_CPUID_PXA270_A1, "pxa270-a1" },
298
    { ARM_CPUID_PXA270_B0, "pxa270-b0" },
299
    { ARM_CPUID_PXA270_B1, "pxa270-b1" },
300
    { ARM_CPUID_PXA270_C0, "pxa270-c0" },
301
    { ARM_CPUID_PXA270_C5, "pxa270-c5" },
302
    { ARM_CPUID_ANY, "any"},
303
    { 0, NULL}
304
};
305

    
306
void arm_cpu_list(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
307
{
308
    int i;
309

    
310
    (*cpu_fprintf)(f, "Available CPUs:\n");
311
    for (i = 0; arm_cpu_names[i].name; i++) {
312
        (*cpu_fprintf)(f, "  %s\n", arm_cpu_names[i].name);
313
    }
314
}
315

    
316
/* return 0 if not found */
317
static uint32_t cpu_arm_find_by_name(const char *name)
318
{
319
    int i;
320
    uint32_t id;
321

    
322
    id = 0;
323
    for (i = 0; arm_cpu_names[i].name; i++) {
324
        if (strcmp(name, arm_cpu_names[i].name) == 0) {
325
            id = arm_cpu_names[i].id;
326
            break;
327
        }
328
    }
329
    return id;
330
}
331

    
332
void cpu_arm_close(CPUARMState *env)
333
{
334
    free(env);
335
}
336

    
337
uint32_t cpsr_read(CPUARMState *env)
338
{
339
    int ZF;
340
    ZF = (env->ZF == 0);
341
    return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
342
        (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
343
        | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
344
        | ((env->condexec_bits & 0xfc) << 8)
345
        | (env->GE << 16);
346
}
347

    
348
void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
349
{
350
    if (mask & CPSR_NZCV) {
351
        env->ZF = (~val) & CPSR_Z;
352
        env->NF = val;
353
        env->CF = (val >> 29) & 1;
354
        env->VF = (val << 3) & 0x80000000;
355
    }
356
    if (mask & CPSR_Q)
357
        env->QF = ((val & CPSR_Q) != 0);
358
    if (mask & CPSR_T)
359
        env->thumb = ((val & CPSR_T) != 0);
360
    if (mask & CPSR_IT_0_1) {
361
        env->condexec_bits &= ~3;
362
        env->condexec_bits |= (val >> 25) & 3;
363
    }
364
    if (mask & CPSR_IT_2_7) {
365
        env->condexec_bits &= 3;
366
        env->condexec_bits |= (val >> 8) & 0xfc;
367
    }
368
    if (mask & CPSR_GE) {
369
        env->GE = (val >> 16) & 0xf;
370
    }
371

    
372
    if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
373
        switch_mode(env, val & CPSR_M);
374
    }
375
    mask &= ~CACHED_CPSR_BITS;
376
    env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
377
}
378

    
379
/* Sign/zero extend */
380
uint32_t HELPER(sxtb16)(uint32_t x)
381
{
382
    uint32_t res;
383
    res = (uint16_t)(int8_t)x;
384
    res |= (uint32_t)(int8_t)(x >> 16) << 16;
385
    return res;
386
}
387

    
388
uint32_t HELPER(uxtb16)(uint32_t x)
389
{
390
    uint32_t res;
391
    res = (uint16_t)(uint8_t)x;
392
    res |= (uint32_t)(uint8_t)(x >> 16) << 16;
393
    return res;
394
}
395

    
396
uint32_t HELPER(clz)(uint32_t x)
397
{
398
    int count;
399
    for (count = 32; x; count--)
400
        x >>= 1;
401
    return count;
402
}
403

    
404
int32_t HELPER(sdiv)(int32_t num, int32_t den)
405
{
406
    if (den == 0)
407
      return 0;
408
    return num / den;
409
}
410

    
411
uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
412
{
413
    if (den == 0)
414
      return 0;
415
    return num / den;
416
}
417

    
418
uint32_t HELPER(rbit)(uint32_t x)
419
{
420
    x =  ((x & 0xff000000) >> 24)
421
       | ((x & 0x00ff0000) >> 8)
422
       | ((x & 0x0000ff00) << 8)
423
       | ((x & 0x000000ff) << 24);
424
    x =  ((x & 0xf0f0f0f0) >> 4)
425
       | ((x & 0x0f0f0f0f) << 4);
426
    x =  ((x & 0x88888888) >> 3)
427
       | ((x & 0x44444444) >> 1)
428
       | ((x & 0x22222222) << 1)
429
       | ((x & 0x11111111) << 3);
430
    return x;
431
}
432

    
433
uint32_t HELPER(abs)(uint32_t x)
434
{
435
    return ((int32_t)x < 0) ? -x : x;
436
}
437

    
438
#if defined(CONFIG_USER_ONLY)
439

    
440
void do_interrupt (CPUState *env)
441
{
442
    env->exception_index = -1;
443
}
444

    
445
/* Structure used to record exclusive memory locations.  */
446
typedef struct mmon_state {
447
    struct mmon_state *next;
448
    CPUARMState *cpu_env;
449
    uint32_t addr;
450
} mmon_state;
451

    
452
/* Chain of current locks.  */
453
static mmon_state* mmon_head = NULL;
454

    
455
int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
456
                              int mmu_idx, int is_softmmu)
457
{
458
    if (rw == 2) {
459
        env->exception_index = EXCP_PREFETCH_ABORT;
460
        env->cp15.c6_insn = address;
461
    } else {
462
        env->exception_index = EXCP_DATA_ABORT;
463
        env->cp15.c6_data = address;
464
    }
465
    return 1;
466
}
467

    
468
static void allocate_mmon_state(CPUState *env)
469
{
470
    env->mmon_entry = malloc(sizeof (mmon_state));
471
    if (!env->mmon_entry)
472
        abort();
473
    memset (env->mmon_entry, 0, sizeof (mmon_state));
474
    env->mmon_entry->cpu_env = env;
475
    mmon_head = env->mmon_entry;
476
}
477

    
478
/* Flush any monitor locks for the specified address.  */
479
static void flush_mmon(uint32_t addr)
480
{
481
    mmon_state *mon;
482

    
483
    for (mon = mmon_head; mon; mon = mon->next)
484
      {
485
        if (mon->addr != addr)
486
          continue;
487

    
488
        mon->addr = 0;
489
        break;
490
      }
491
}
492

    
493
/* Mark an address for exclusive access.  */
494
void HELPER(mark_exclusive)(CPUState *env, uint32_t addr)
495
{
496
    if (!env->mmon_entry)
497
        allocate_mmon_state(env);
498
    /* Clear any previous locks.  */
499
    flush_mmon(addr);
500
    env->mmon_entry->addr = addr;
501
}
502

    
503
/* Test if an exclusive address is still exclusive.  Returns zero
504
   if the address is still exclusive.   */
505
uint32_t HELPER(test_exclusive)(CPUState *env, uint32_t addr)
506
{
507
    int res;
508

    
509
    if (!env->mmon_entry)
510
        return 1;
511
    if (env->mmon_entry->addr == addr)
512
        res = 0;
513
    else
514
        res = 1;
515
    flush_mmon(addr);
516
    return res;
517
}
518

    
519
void HELPER(clrex)(CPUState *env)
520
{
521
    if (!(env->mmon_entry && env->mmon_entry->addr))
522
        return;
523
    flush_mmon(env->mmon_entry->addr);
524
}
525

    
526
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
527
{
528
    return addr;
529
}
530

    
531
/* These should probably raise undefined insn exceptions.  */
532
void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val)
533
{
534
    int op1 = (insn >> 8) & 0xf;
535
    cpu_abort(env, "cp%i insn %08x\n", op1, insn);
536
    return;
537
}
538

    
539
uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn)
540
{
541
    int op1 = (insn >> 8) & 0xf;
542
    cpu_abort(env, "cp%i insn %08x\n", op1, insn);
543
    return 0;
544
}
545

    
546
void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
547
{
548
    cpu_abort(env, "cp15 insn %08x\n", insn);
549
}
550

    
551
uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
552
{
553
    cpu_abort(env, "cp15 insn %08x\n", insn);
554
    return 0;
555
}
556

    
557
/* These should probably raise undefined insn exceptions.  */
558
void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val)
559
{
560
    cpu_abort(env, "v7m_mrs %d\n", reg);
561
}
562

    
563
uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg)
564
{
565
    cpu_abort(env, "v7m_mrs %d\n", reg);
566
    return 0;
567
}
568

    
569
void switch_mode(CPUState *env, int mode)
570
{
571
    if (mode != ARM_CPU_MODE_USR)
572
        cpu_abort(env, "Tried to switch out of user mode\n");
573
}
574

    
575
void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
576
{
577
    cpu_abort(env, "banked r13 write\n");
578
}
579

    
580
uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
581
{
582
    cpu_abort(env, "banked r13 read\n");
583
    return 0;
584
}
585

    
586
#else
587

    
588
extern int semihosting_enabled;
589

    
590
/* Map CPU modes onto saved register banks.  */
591
static inline int bank_number (int mode)
592
{
593
    switch (mode) {
594
    case ARM_CPU_MODE_USR:
595
    case ARM_CPU_MODE_SYS:
596
        return 0;
597
    case ARM_CPU_MODE_SVC:
598
        return 1;
599
    case ARM_CPU_MODE_ABT:
600
        return 2;
601
    case ARM_CPU_MODE_UND:
602
        return 3;
603
    case ARM_CPU_MODE_IRQ:
604
        return 4;
605
    case ARM_CPU_MODE_FIQ:
606
        return 5;
607
    }
608
    cpu_abort(cpu_single_env, "Bad mode %x\n", mode);
609
    return -1;
610
}
611

    
612
void switch_mode(CPUState *env, int mode)
613
{
614
    int old_mode;
615
    int i;
616

    
617
    old_mode = env->uncached_cpsr & CPSR_M;
618
    if (mode == old_mode)
619
        return;
620

    
621
    if (old_mode == ARM_CPU_MODE_FIQ) {
622
        memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
623
        memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
624
    } else if (mode == ARM_CPU_MODE_FIQ) {
625
        memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
626
        memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
627
    }
628

    
629
    i = bank_number(old_mode);
630
    env->banked_r13[i] = env->regs[13];
631
    env->banked_r14[i] = env->regs[14];
632
    env->banked_spsr[i] = env->spsr;
633

    
634
    i = bank_number(mode);
635
    env->regs[13] = env->banked_r13[i];
636
    env->regs[14] = env->banked_r14[i];
637
    env->spsr = env->banked_spsr[i];
638
}
639

    
640
static void v7m_push(CPUARMState *env, uint32_t val)
641
{
642
    env->regs[13] -= 4;
643
    stl_phys(env->regs[13], val);
644
}
645

    
646
static uint32_t v7m_pop(CPUARMState *env)
647
{
648
    uint32_t val;
649
    val = ldl_phys(env->regs[13]);
650
    env->regs[13] += 4;
651
    return val;
652
}
653

    
654
/* Switch to V7M main or process stack pointer.  */
655
static void switch_v7m_sp(CPUARMState *env, int process)
656
{
657
    uint32_t tmp;
658
    if (env->v7m.current_sp != process) {
659
        tmp = env->v7m.other_sp;
660
        env->v7m.other_sp = env->regs[13];
661
        env->regs[13] = tmp;
662
        env->v7m.current_sp = process;
663
    }
664
}
665

    
666
static void do_v7m_exception_exit(CPUARMState *env)
667
{
668
    uint32_t type;
669
    uint32_t xpsr;
670

    
671
    type = env->regs[15];
672
    if (env->v7m.exception != 0)
673
        armv7m_nvic_complete_irq(env->v7m.nvic, env->v7m.exception);
674

    
675
    /* Switch to the target stack.  */
676
    switch_v7m_sp(env, (type & 4) != 0);
677
    /* Pop registers.  */
678
    env->regs[0] = v7m_pop(env);
679
    env->regs[1] = v7m_pop(env);
680
    env->regs[2] = v7m_pop(env);
681
    env->regs[3] = v7m_pop(env);
682
    env->regs[12] = v7m_pop(env);
683
    env->regs[14] = v7m_pop(env);
684
    env->regs[15] = v7m_pop(env);
685
    xpsr = v7m_pop(env);
686
    xpsr_write(env, xpsr, 0xfffffdff);
687
    /* Undo stack alignment.  */
688
    if (xpsr & 0x200)
689
        env->regs[13] |= 4;
690
    /* ??? The exception return type specifies Thread/Handler mode.  However
691
       this is also implied by the xPSR value. Not sure what to do
692
       if there is a mismatch.  */
693
    /* ??? Likewise for mismatches between the CONTROL register and the stack
694
       pointer.  */
695
}
696

    
697
void do_interrupt_v7m(CPUARMState *env)
698
{
699
    uint32_t xpsr = xpsr_read(env);
700
    uint32_t lr;
701
    uint32_t addr;
702

    
703
    lr = 0xfffffff1;
704
    if (env->v7m.current_sp)
705
        lr |= 4;
706
    if (env->v7m.exception == 0)
707
        lr |= 8;
708

    
709
    /* For exceptions we just mark as pending on the NVIC, and let that
710
       handle it.  */
711
    /* TODO: Need to escalate if the current priority is higher than the
712
       one we're raising.  */
713
    switch (env->exception_index) {
714
    case EXCP_UDEF:
715
        armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_USAGE);
716
        return;
717
    case EXCP_SWI:
718
        env->regs[15] += 2;
719
        armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_SVC);
720
        return;
721
    case EXCP_PREFETCH_ABORT:
722
    case EXCP_DATA_ABORT:
723
        armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_MEM);
724
        return;
725
    case EXCP_BKPT:
726
        if (semihosting_enabled) {
727
            int nr;
728
            nr = lduw_code(env->regs[15]) & 0xff;
729
            if (nr == 0xab) {
730
                env->regs[15] += 2;
731
                env->regs[0] = do_arm_semihosting(env);
732
                return;
733
            }
734
        }
735
        armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_DEBUG);
736
        return;
737
    case EXCP_IRQ:
738
        env->v7m.exception = armv7m_nvic_acknowledge_irq(env->v7m.nvic);
739
        break;
740
    case EXCP_EXCEPTION_EXIT:
741
        do_v7m_exception_exit(env);
742
        return;
743
    default:
744
        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
745
        return; /* Never happens.  Keep compiler happy.  */
746
    }
747

    
748
    /* Align stack pointer.  */
749
    /* ??? Should only do this if Configuration Control Register
750
       STACKALIGN bit is set.  */
751
    if (env->regs[13] & 4) {
752
        env->regs[13] -= 4;
753
        xpsr |= 0x200;
754
    }
755
    /* Switch to the handler mode.  */
756
    v7m_push(env, xpsr);
757
    v7m_push(env, env->regs[15]);
758
    v7m_push(env, env->regs[14]);
759
    v7m_push(env, env->regs[12]);
760
    v7m_push(env, env->regs[3]);
761
    v7m_push(env, env->regs[2]);
762
    v7m_push(env, env->regs[1]);
763
    v7m_push(env, env->regs[0]);
764
    switch_v7m_sp(env, 0);
765
    env->uncached_cpsr &= ~CPSR_IT;
766
    env->regs[14] = lr;
767
    addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4);
768
    env->regs[15] = addr & 0xfffffffe;
769
    env->thumb = addr & 1;
770
}
771

    
772
/* Handle a CPU exception.  */
773
void do_interrupt(CPUARMState *env)
774
{
775
    uint32_t addr;
776
    uint32_t mask;
777
    int new_mode;
778
    uint32_t offset;
779

    
780
    if (IS_M(env)) {
781
        do_interrupt_v7m(env);
782
        return;
783
    }
784
    /* TODO: Vectored interrupt controller.  */
785
    switch (env->exception_index) {
786
    case EXCP_UDEF:
787
        new_mode = ARM_CPU_MODE_UND;
788
        addr = 0x04;
789
        mask = CPSR_I;
790
        if (env->thumb)
791
            offset = 2;
792
        else
793
            offset = 4;
794
        break;
795
    case EXCP_SWI:
796
        if (semihosting_enabled) {
797
            /* Check for semihosting interrupt.  */
798
            if (env->thumb) {
799
                mask = lduw_code(env->regs[15] - 2) & 0xff;
800
            } else {
801
                mask = ldl_code(env->regs[15] - 4) & 0xffffff;
802
            }
803
            /* Only intercept calls from privileged modes, to provide some
804
               semblance of security.  */
805
            if (((mask == 0x123456 && !env->thumb)
806
                    || (mask == 0xab && env->thumb))
807
                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
808
                env->regs[0] = do_arm_semihosting(env);
809
                return;
810
            }
811
        }
812
        new_mode = ARM_CPU_MODE_SVC;
813
        addr = 0x08;
814
        mask = CPSR_I;
815
        /* The PC already points to the next instruction.  */
816
        offset = 0;
817
        break;
818
    case EXCP_BKPT:
819
        /* See if this is a semihosting syscall.  */
820
        if (env->thumb && semihosting_enabled) {
821
            mask = lduw_code(env->regs[15]) & 0xff;
822
            if (mask == 0xab
823
                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
824
                env->regs[15] += 2;
825
                env->regs[0] = do_arm_semihosting(env);
826
                return;
827
            }
828
        }
829
        /* Fall through to prefetch abort.  */
830
    case EXCP_PREFETCH_ABORT:
831
        new_mode = ARM_CPU_MODE_ABT;
832
        addr = 0x0c;
833
        mask = CPSR_A | CPSR_I;
834
        offset = 4;
835
        break;
836
    case EXCP_DATA_ABORT:
837
        new_mode = ARM_CPU_MODE_ABT;
838
        addr = 0x10;
839
        mask = CPSR_A | CPSR_I;
840
        offset = 8;
841
        break;
842
    case EXCP_IRQ:
843
        new_mode = ARM_CPU_MODE_IRQ;
844
        addr = 0x18;
845
        /* Disable IRQ and imprecise data aborts.  */
846
        mask = CPSR_A | CPSR_I;
847
        offset = 4;
848
        break;
849
    case EXCP_FIQ:
850
        new_mode = ARM_CPU_MODE_FIQ;
851
        addr = 0x1c;
852
        /* Disable FIQ, IRQ and imprecise data aborts.  */
853
        mask = CPSR_A | CPSR_I | CPSR_F;
854
        offset = 4;
855
        break;
856
    default:
857
        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
858
        return; /* Never happens.  Keep compiler happy.  */
859
    }
860
    /* High vectors.  */
861
    if (env->cp15.c1_sys & (1 << 13)) {
862
        addr += 0xffff0000;
863
    }
864
    switch_mode (env, new_mode);
865
    env->spsr = cpsr_read(env);
866
    /* Clear IT bits.  */
867
    env->condexec_bits = 0;
868
    /* Switch to the new mode, and switch to Arm mode.  */
869
    /* ??? Thumb interrupt handlers not implemented.  */
870
    env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
871
    env->uncached_cpsr |= mask;
872
    env->thumb = 0;
873
    env->regs[14] = env->regs[15] + offset;
874
    env->regs[15] = addr;
875
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
876
}
877

    
878
/* Check section/page access permissions.
879
   Returns the page protection flags, or zero if the access is not
880
   permitted.  */
881
static inline int check_ap(CPUState *env, int ap, int domain, int access_type,
882
                           int is_user)
883
{
884
  int prot_ro;
885

    
886
  if (domain == 3)
887
    return PAGE_READ | PAGE_WRITE;
888

    
889
  if (access_type == 1)
890
      prot_ro = 0;
891
  else
892
      prot_ro = PAGE_READ;
893

    
894
  switch (ap) {
895
  case 0:
896
      if (access_type == 1)
897
          return 0;
898
      switch ((env->cp15.c1_sys >> 8) & 3) {
899
      case 1:
900
          return is_user ? 0 : PAGE_READ;
901
      case 2:
902
          return PAGE_READ;
903
      default:
904
          return 0;
905
      }
906
  case 1:
907
      return is_user ? 0 : PAGE_READ | PAGE_WRITE;
908
  case 2:
909
      if (is_user)
910
          return prot_ro;
911
      else
912
          return PAGE_READ | PAGE_WRITE;
913
  case 3:
914
      return PAGE_READ | PAGE_WRITE;
915
  case 4: /* Reserved.  */
916
      return 0;
917
  case 5:
918
      return is_user ? 0 : prot_ro;
919
  case 6:
920
      return prot_ro;
921
  case 7:
922
      if (!arm_feature (env, ARM_FEATURE_V7))
923
          return 0;
924
      return prot_ro;
925
  default:
926
      abort();
927
  }
928
}
929

    
930
static uint32_t get_level1_table_address(CPUState *env, uint32_t address)
931
{
932
    uint32_t table;
933

    
934
    if (address & env->cp15.c2_mask)
935
        table = env->cp15.c2_base1 & 0xffffc000;
936
    else
937
        table = env->cp15.c2_base0 & env->cp15.c2_base_mask;
938

    
939
    table |= (address >> 18) & 0x3ffc;
940
    return table;
941
}
942

    
943
static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type,
944
                            int is_user, uint32_t *phys_ptr, int *prot)
945
{
946
    int code;
947
    uint32_t table;
948
    uint32_t desc;
949
    int type;
950
    int ap;
951
    int domain;
952
    uint32_t phys_addr;
953

    
954
    /* Pagetable walk.  */
955
    /* Lookup l1 descriptor.  */
956
    table = get_level1_table_address(env, address);
957
    desc = ldl_phys(table);
958
    type = (desc & 3);
959
    domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3;
960
    if (type == 0) {
961
        /* Section translation fault.  */
962
        code = 5;
963
        goto do_fault;
964
    }
965
    if (domain == 0 || domain == 2) {
966
        if (type == 2)
967
            code = 9; /* Section domain fault.  */
968
        else
969
            code = 11; /* Page domain fault.  */
970
        goto do_fault;
971
    }
972
    if (type == 2) {
973
        /* 1Mb section.  */
974
        phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
975
        ap = (desc >> 10) & 3;
976
        code = 13;
977
    } else {
978
        /* Lookup l2 entry.  */
979
        if (type == 1) {
980
            /* Coarse pagetable.  */
981
            table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
982
        } else {
983
            /* Fine pagetable.  */
984
            table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
985
        }
986
        desc = ldl_phys(table);
987
        switch (desc & 3) {
988
        case 0: /* Page translation fault.  */
989
            code = 7;
990
            goto do_fault;
991
        case 1: /* 64k page.  */
992
            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
993
            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
994
            break;
995
        case 2: /* 4k page.  */
996
            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
997
            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
998
            break;
999
        case 3: /* 1k page.  */
1000
            if (type == 1) {
1001
                if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1002
                    phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1003
                } else {
1004
                    /* Page translation fault.  */
1005
                    code = 7;
1006
                    goto do_fault;
1007
                }
1008
            } else {
1009
                phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
1010
            }
1011
            ap = (desc >> 4) & 3;
1012
            break;
1013
        default:
1014
            /* Never happens, but compiler isn't smart enough to tell.  */
1015
            abort();
1016
        }
1017
        code = 15;
1018
    }
1019
    *prot = check_ap(env, ap, domain, access_type, is_user);
1020
    if (!*prot) {
1021
        /* Access permission fault.  */
1022
        goto do_fault;
1023
    }
1024
    *phys_ptr = phys_addr;
1025
    return 0;
1026
do_fault:
1027
    return code | (domain << 4);
1028
}
1029

    
1030
static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type,
1031
                            int is_user, uint32_t *phys_ptr, int *prot)
1032
{
1033
    int code;
1034
    uint32_t table;
1035
    uint32_t desc;
1036
    uint32_t xn;
1037
    int type;
1038
    int ap;
1039
    int domain;
1040
    uint32_t phys_addr;
1041

    
1042
    /* Pagetable walk.  */
1043
    /* Lookup l1 descriptor.  */
1044
    table = get_level1_table_address(env, address);
1045
    desc = ldl_phys(table);
1046
    type = (desc & 3);
1047
    if (type == 0) {
1048
        /* Section translation fault.  */
1049
        code = 5;
1050
        domain = 0;
1051
        goto do_fault;
1052
    } else if (type == 2 && (desc & (1 << 18))) {
1053
        /* Supersection.  */
1054
        domain = 0;
1055
    } else {
1056
        /* Section or page.  */
1057
        domain = (desc >> 4) & 0x1e;
1058
    }
1059
    domain = (env->cp15.c3 >> domain) & 3;
1060
    if (domain == 0 || domain == 2) {
1061
        if (type == 2)
1062
            code = 9; /* Section domain fault.  */
1063
        else
1064
            code = 11; /* Page domain fault.  */
1065
        goto do_fault;
1066
    }
1067
    if (type == 2) {
1068
        if (desc & (1 << 18)) {
1069
            /* Supersection.  */
1070
            phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
1071
        } else {
1072
            /* Section.  */
1073
            phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1074
        }
1075
        ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
1076
        xn = desc & (1 << 4);
1077
        code = 13;
1078
    } else {
1079
        /* Lookup l2 entry.  */
1080
        table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1081
        desc = ldl_phys(table);
1082
        ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
1083
        switch (desc & 3) {
1084
        case 0: /* Page translation fault.  */
1085
            code = 7;
1086
            goto do_fault;
1087
        case 1: /* 64k page.  */
1088
            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1089
            xn = desc & (1 << 15);
1090
            break;
1091
        case 2: case 3: /* 4k page.  */
1092
            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1093
            xn = desc & 1;
1094
            break;
1095
        default:
1096
            /* Never happens, but compiler isn't smart enough to tell.  */
1097
            abort();
1098
        }
1099
        code = 15;
1100
    }
1101
    if (xn && access_type == 2)
1102
        goto do_fault;
1103

    
1104
    /* The simplified model uses AP[0] as an access control bit.  */
1105
    if ((env->cp15.c1_sys & (1 << 29)) && (ap & 1) == 0) {
1106
        /* Access flag fault.  */
1107
        code = (code == 15) ? 6 : 3;
1108
        goto do_fault;
1109
    }
1110
    *prot = check_ap(env, ap, domain, access_type, is_user);
1111
    if (!*prot) {
1112
        /* Access permission fault.  */
1113
        goto do_fault;
1114
    }
1115
    *phys_ptr = phys_addr;
1116
    return 0;
1117
do_fault:
1118
    return code | (domain << 4);
1119
}
1120

    
1121
static int get_phys_addr_mpu(CPUState *env, uint32_t address, int access_type,
1122
                             int is_user, uint32_t *phys_ptr, int *prot)
1123
{
1124
    int n;
1125
    uint32_t mask;
1126
    uint32_t base;
1127

    
1128
    *phys_ptr = address;
1129
    for (n = 7; n >= 0; n--) {
1130
        base = env->cp15.c6_region[n];
1131
        if ((base & 1) == 0)
1132
            continue;
1133
        mask = 1 << ((base >> 1) & 0x1f);
1134
        /* Keep this shift separate from the above to avoid an
1135
           (undefined) << 32.  */
1136
        mask = (mask << 1) - 1;
1137
        if (((base ^ address) & ~mask) == 0)
1138
            break;
1139
    }
1140
    if (n < 0)
1141
        return 2;
1142

    
1143
    if (access_type == 2) {
1144
        mask = env->cp15.c5_insn;
1145
    } else {
1146
        mask = env->cp15.c5_data;
1147
    }
1148
    mask = (mask >> (n * 4)) & 0xf;
1149
    switch (mask) {
1150
    case 0:
1151
        return 1;
1152
    case 1:
1153
        if (is_user)
1154
          return 1;
1155
        *prot = PAGE_READ | PAGE_WRITE;
1156
        break;
1157
    case 2:
1158
        *prot = PAGE_READ;
1159
        if (!is_user)
1160
            *prot |= PAGE_WRITE;
1161
        break;
1162
    case 3:
1163
        *prot = PAGE_READ | PAGE_WRITE;
1164
        break;
1165
    case 5:
1166
        if (is_user)
1167
            return 1;
1168
        *prot = PAGE_READ;
1169
        break;
1170
    case 6:
1171
        *prot = PAGE_READ;
1172
        break;
1173
    default:
1174
        /* Bad permission.  */
1175
        return 1;
1176
    }
1177
    return 0;
1178
}
1179

    
1180
static inline int get_phys_addr(CPUState *env, uint32_t address,
1181
                                int access_type, int is_user,
1182
                                uint32_t *phys_ptr, int *prot)
1183
{
1184
    /* Fast Context Switch Extension.  */
1185
    if (address < 0x02000000)
1186
        address += env->cp15.c13_fcse;
1187

    
1188
    if ((env->cp15.c1_sys & 1) == 0) {
1189
        /* MMU/MPU disabled.  */
1190
        *phys_ptr = address;
1191
        *prot = PAGE_READ | PAGE_WRITE;
1192
        return 0;
1193
    } else if (arm_feature(env, ARM_FEATURE_MPU)) {
1194
        return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
1195
                                 prot);
1196
    } else if (env->cp15.c1_sys & (1 << 23)) {
1197
        return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
1198
                                prot);
1199
    } else {
1200
        return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
1201
                                prot);
1202
    }
1203
}
1204

    
1205
int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
1206
                              int access_type, int mmu_idx, int is_softmmu)
1207
{
1208
    uint32_t phys_addr;
1209
    int prot;
1210
    int ret, is_user;
1211

    
1212
    is_user = mmu_idx == MMU_USER_IDX;
1213
    ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot);
1214
    if (ret == 0) {
1215
        /* Map a single [sub]page.  */
1216
        phys_addr &= ~(uint32_t)0x3ff;
1217
        address &= ~(uint32_t)0x3ff;
1218
        return tlb_set_page (env, address, phys_addr, prot, mmu_idx,
1219
                             is_softmmu);
1220
    }
1221

    
1222
    if (access_type == 2) {
1223
        env->cp15.c5_insn = ret;
1224
        env->cp15.c6_insn = address;
1225
        env->exception_index = EXCP_PREFETCH_ABORT;
1226
    } else {
1227
        env->cp15.c5_data = ret;
1228
        if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6))
1229
            env->cp15.c5_data |= (1 << 11);
1230
        env->cp15.c6_data = address;
1231
        env->exception_index = EXCP_DATA_ABORT;
1232
    }
1233
    return 1;
1234
}
1235

    
1236
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1237
{
1238
    uint32_t phys_addr;
1239
    int prot;
1240
    int ret;
1241

    
1242
    ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot);
1243

    
1244
    if (ret != 0)
1245
        return -1;
1246

    
1247
    return phys_addr;
1248
}
1249

    
1250
/* Not really implemented.  Need to figure out a sane way of doing this.
1251
   Maybe add generic watchpoint support and use that.  */
1252

    
1253
void HELPER(mark_exclusive)(CPUState *env, uint32_t addr)
1254
{
1255
    env->mmon_addr = addr;
1256
}
1257

    
1258
uint32_t HELPER(test_exclusive)(CPUState *env, uint32_t addr)
1259
{
1260
    return (env->mmon_addr != addr);
1261
}
1262

    
1263
void HELPER(clrex)(CPUState *env)
1264
{
1265
    env->mmon_addr = -1;
1266
}
1267

    
1268
void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val)
1269
{
1270
    int cp_num = (insn >> 8) & 0xf;
1271
    int cp_info = (insn >> 5) & 7;
1272
    int src = (insn >> 16) & 0xf;
1273
    int operand = insn & 0xf;
1274

    
1275
    if (env->cp[cp_num].cp_write)
1276
        env->cp[cp_num].cp_write(env->cp[cp_num].opaque,
1277
                                 cp_info, src, operand, val);
1278
}
1279

    
1280
uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn)
1281
{
1282
    int cp_num = (insn >> 8) & 0xf;
1283
    int cp_info = (insn >> 5) & 7;
1284
    int dest = (insn >> 16) & 0xf;
1285
    int operand = insn & 0xf;
1286

    
1287
    if (env->cp[cp_num].cp_read)
1288
        return env->cp[cp_num].cp_read(env->cp[cp_num].opaque,
1289
                                       cp_info, dest, operand);
1290
    return 0;
1291
}
1292

    
1293
/* Return basic MPU access permission bits.  */
1294
static uint32_t simple_mpu_ap_bits(uint32_t val)
1295
{
1296
    uint32_t ret;
1297
    uint32_t mask;
1298
    int i;
1299
    ret = 0;
1300
    mask = 3;
1301
    for (i = 0; i < 16; i += 2) {
1302
        ret |= (val >> i) & mask;
1303
        mask <<= 2;
1304
    }
1305
    return ret;
1306
}
1307

    
1308
/* Pad basic MPU access permission bits to extended format.  */
1309
static uint32_t extended_mpu_ap_bits(uint32_t val)
1310
{
1311
    uint32_t ret;
1312
    uint32_t mask;
1313
    int i;
1314
    ret = 0;
1315
    mask = 3;
1316
    for (i = 0; i < 16; i += 2) {
1317
        ret |= (val & mask) << i;
1318
        mask <<= 2;
1319
    }
1320
    return ret;
1321
}
1322

    
1323
void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
1324
{
1325
    int op1;
1326
    int op2;
1327
    int crm;
1328

    
1329
    op1 = (insn >> 21) & 7;
1330
    op2 = (insn >> 5) & 7;
1331
    crm = insn & 0xf;
1332
    switch ((insn >> 16) & 0xf) {
1333
    case 0:
1334
        /* ID codes.  */
1335
        if (arm_feature(env, ARM_FEATURE_XSCALE))
1336
            break;
1337
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1338
            break;
1339
        if (arm_feature(env, ARM_FEATURE_V7)
1340
                && op1 == 2 && crm == 0 && op2 == 0) {
1341
            env->cp15.c0_cssel = val & 0xf;
1342
            break;
1343
        }
1344
        goto bad_reg;
1345
    case 1: /* System configuration.  */
1346
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1347
            op2 = 0;
1348
        switch (op2) {
1349
        case 0:
1350
            if (!arm_feature(env, ARM_FEATURE_XSCALE) || crm == 0)
1351
                env->cp15.c1_sys = val;
1352
            /* ??? Lots of these bits are not implemented.  */
1353
            /* This may enable/disable the MMU, so do a TLB flush.  */
1354
            tlb_flush(env, 1);
1355
            break;
1356
        case 1: /* Auxiliary cotrol register.  */
1357
            if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1358
                env->cp15.c1_xscaleauxcr = val;
1359
                break;
1360
            }
1361
            /* Not implemented.  */
1362
            break;
1363
        case 2:
1364
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1365
                goto bad_reg;
1366
            if (env->cp15.c1_coproc != val) {
1367
                env->cp15.c1_coproc = val;
1368
                /* ??? Is this safe when called from within a TB?  */
1369
                tb_flush(env);
1370
            }
1371
            break;
1372
        default:
1373
            goto bad_reg;
1374
        }
1375
        break;
1376
    case 2: /* MMU Page table control / MPU cache control.  */
1377
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1378
            switch (op2) {
1379
            case 0:
1380
                env->cp15.c2_data = val;
1381
                break;
1382
            case 1:
1383
                env->cp15.c2_insn = val;
1384
                break;
1385
            default:
1386
                goto bad_reg;
1387
            }
1388
        } else {
1389
            switch (op2) {
1390
            case 0:
1391
                env->cp15.c2_base0 = val;
1392
                break;
1393
            case 1:
1394
                env->cp15.c2_base1 = val;
1395
                break;
1396
            case 2:
1397
                val &= 7;
1398
                env->cp15.c2_control = val;
1399
                env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> val);
1400
                env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> val);
1401
                break;
1402
            default:
1403
                goto bad_reg;
1404
            }
1405
        }
1406
        break;
1407
    case 3: /* MMU Domain access control / MPU write buffer control.  */
1408
        env->cp15.c3 = val;
1409
        tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */
1410
        break;
1411
    case 4: /* Reserved.  */
1412
        goto bad_reg;
1413
    case 5: /* MMU Fault status / MPU access permission.  */
1414
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1415
            op2 = 0;
1416
        switch (op2) {
1417
        case 0:
1418
            if (arm_feature(env, ARM_FEATURE_MPU))
1419
                val = extended_mpu_ap_bits(val);
1420
            env->cp15.c5_data = val;
1421
            break;
1422
        case 1:
1423
            if (arm_feature(env, ARM_FEATURE_MPU))
1424
                val = extended_mpu_ap_bits(val);
1425
            env->cp15.c5_insn = val;
1426
            break;
1427
        case 2:
1428
            if (!arm_feature(env, ARM_FEATURE_MPU))
1429
                goto bad_reg;
1430
            env->cp15.c5_data = val;
1431
            break;
1432
        case 3:
1433
            if (!arm_feature(env, ARM_FEATURE_MPU))
1434
                goto bad_reg;
1435
            env->cp15.c5_insn = val;
1436
            break;
1437
        default:
1438
            goto bad_reg;
1439
        }
1440
        break;
1441
    case 6: /* MMU Fault address / MPU base/size.  */
1442
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1443
            if (crm >= 8)
1444
                goto bad_reg;
1445
            env->cp15.c6_region[crm] = val;
1446
        } else {
1447
            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1448
                op2 = 0;
1449
            switch (op2) {
1450
            case 0:
1451
                env->cp15.c6_data = val;
1452
                break;
1453
            case 1: /* ??? This is WFAR on armv6 */
1454
            case 2:
1455
                env->cp15.c6_insn = val;
1456
                break;
1457
            default:
1458
                goto bad_reg;
1459
            }
1460
        }
1461
        break;
1462
    case 7: /* Cache control.  */
1463
        env->cp15.c15_i_max = 0x000;
1464
        env->cp15.c15_i_min = 0xff0;
1465
        /* No cache, so nothing to do.  */
1466
        /* ??? MPCore has VA to PA translation functions.  */
1467
        break;
1468
    case 8: /* MMU TLB control.  */
1469
        switch (op2) {
1470
        case 0: /* Invalidate all.  */
1471
            tlb_flush(env, 0);
1472
            break;
1473
        case 1: /* Invalidate single TLB entry.  */
1474
#if 0
1475
            /* ??? This is wrong for large pages and sections.  */
1476
            /* As an ugly hack to make linux work we always flush a 4K
1477
               pages.  */
1478
            val &= 0xfffff000;
1479
            tlb_flush_page(env, val);
1480
            tlb_flush_page(env, val + 0x400);
1481
            tlb_flush_page(env, val + 0x800);
1482
            tlb_flush_page(env, val + 0xc00);
1483
#else
1484
            tlb_flush(env, 1);
1485
#endif
1486
            break;
1487
        case 2: /* Invalidate on ASID.  */
1488
            tlb_flush(env, val == 0);
1489
            break;
1490
        case 3: /* Invalidate single entry on MVA.  */
1491
            /* ??? This is like case 1, but ignores ASID.  */
1492
            tlb_flush(env, 1);
1493
            break;
1494
        default:
1495
            goto bad_reg;
1496
        }
1497
        break;
1498
    case 9:
1499
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1500
            break;
1501
        switch (crm) {
1502
        case 0: /* Cache lockdown.  */
1503
            switch (op1) {
1504
            case 0: /* L1 cache.  */
1505
                switch (op2) {
1506
                case 0:
1507
                    env->cp15.c9_data = val;
1508
                    break;
1509
                case 1:
1510
                    env->cp15.c9_insn = val;
1511
                    break;
1512
                default:
1513
                    goto bad_reg;
1514
                }
1515
                break;
1516
            case 1: /* L2 cache.  */
1517
                /* Ignore writes to L2 lockdown/auxiliary registers.  */
1518
                break;
1519
            default:
1520
                goto bad_reg;
1521
            }
1522
            break;
1523
        case 1: /* TCM memory region registers.  */
1524
            /* Not implemented.  */
1525
            goto bad_reg;
1526
        default:
1527
            goto bad_reg;
1528
        }
1529
        break;
1530
    case 10: /* MMU TLB lockdown.  */
1531
        /* ??? TLB lockdown not implemented.  */
1532
        break;
1533
    case 12: /* Reserved.  */
1534
        goto bad_reg;
1535
    case 13: /* Process ID.  */
1536
        switch (op2) {
1537
        case 0:
1538
            /* Unlike real hardware the qemu TLB uses virtual addresses,
1539
               not modified virtual addresses, so this causes a TLB flush.
1540
             */
1541
            if (env->cp15.c13_fcse != val)
1542
              tlb_flush(env, 1);
1543
            env->cp15.c13_fcse = val;
1544
            break;
1545
        case 1:
1546
            /* This changes the ASID, so do a TLB flush.  */
1547
            if (env->cp15.c13_context != val
1548
                && !arm_feature(env, ARM_FEATURE_MPU))
1549
              tlb_flush(env, 0);
1550
            env->cp15.c13_context = val;
1551
            break;
1552
        case 2:
1553
            env->cp15.c13_tls1 = val;
1554
            break;
1555
        case 3:
1556
            env->cp15.c13_tls2 = val;
1557
            break;
1558
        case 4:
1559
            env->cp15.c13_tls3 = val;
1560
            break;
1561
        default:
1562
            goto bad_reg;
1563
        }
1564
        break;
1565
    case 14: /* Reserved.  */
1566
        goto bad_reg;
1567
    case 15: /* Implementation specific.  */
1568
        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1569
            if (op2 == 0 && crm == 1) {
1570
                if (env->cp15.c15_cpar != (val & 0x3fff)) {
1571
                    /* Changes cp0 to cp13 behavior, so needs a TB flush.  */
1572
                    tb_flush(env);
1573
                    env->cp15.c15_cpar = val & 0x3fff;
1574
                }
1575
                break;
1576
            }
1577
            goto bad_reg;
1578
        }
1579
        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1580
            switch (crm) {
1581
            case 0:
1582
                break;
1583
            case 1: /* Set TI925T configuration.  */
1584
                env->cp15.c15_ticonfig = val & 0xe7;
1585
                env->cp15.c0_cpuid = (val & (1 << 5)) ? /* OS_TYPE bit */
1586
                        ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1587
                break;
1588
            case 2: /* Set I_max.  */
1589
                env->cp15.c15_i_max = val;
1590
                break;
1591
            case 3: /* Set I_min.  */
1592
                env->cp15.c15_i_min = val;
1593
                break;
1594
            case 4: /* Set thread-ID.  */
1595
                env->cp15.c15_threadid = val & 0xffff;
1596
                break;
1597
            case 8: /* Wait-for-interrupt (deprecated).  */
1598
                cpu_interrupt(env, CPU_INTERRUPT_HALT);
1599
                break;
1600
            default:
1601
                goto bad_reg;
1602
            }
1603
        }
1604
        break;
1605
    }
1606
    return;
1607
bad_reg:
1608
    /* ??? For debugging only.  Should raise illegal instruction exception.  */
1609
    cpu_abort(env, "Unimplemented cp15 register write (c%d, c%d, {%d, %d})\n",
1610
              (insn >> 16) & 0xf, crm, op1, op2);
1611
}
1612

    
1613
uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
1614
{
1615
    int op1;
1616
    int op2;
1617
    int crm;
1618

    
1619
    op1 = (insn >> 21) & 7;
1620
    op2 = (insn >> 5) & 7;
1621
    crm = insn & 0xf;
1622
    switch ((insn >> 16) & 0xf) {
1623
    case 0: /* ID codes.  */
1624
        switch (op1) {
1625
        case 0:
1626
            switch (crm) {
1627
            case 0:
1628
                switch (op2) {
1629
                case 0: /* Device ID.  */
1630
                    return env->cp15.c0_cpuid;
1631
                case 1: /* Cache Type.  */
1632
                    return env->cp15.c0_cachetype;
1633
                case 2: /* TCM status.  */
1634
                    return 0;
1635
                case 3: /* TLB type register.  */
1636
                    return 0; /* No lockable TLB entries.  */
1637
                case 5: /* CPU ID */
1638
                    return env->cpu_index;
1639
                default:
1640
                    goto bad_reg;
1641
                }
1642
            case 1:
1643
                if (!arm_feature(env, ARM_FEATURE_V6))
1644
                    goto bad_reg;
1645
                return env->cp15.c0_c1[op2];
1646
            case 2:
1647
                if (!arm_feature(env, ARM_FEATURE_V6))
1648
                    goto bad_reg;
1649
                return env->cp15.c0_c2[op2];
1650
            case 3: case 4: case 5: case 6: case 7:
1651
                return 0;
1652
            default:
1653
                goto bad_reg;
1654
            }
1655
        case 1:
1656
            /* These registers aren't documented on arm11 cores.  However
1657
               Linux looks at them anyway.  */
1658
            if (!arm_feature(env, ARM_FEATURE_V6))
1659
                goto bad_reg;
1660
            if (crm != 0)
1661
                goto bad_reg;
1662
            if (!arm_feature(env, ARM_FEATURE_V7))
1663
                return 0;
1664

    
1665
            switch (op2) {
1666
            case 0:
1667
                return env->cp15.c0_ccsid[env->cp15.c0_cssel];
1668
            case 1:
1669
                return env->cp15.c0_clid;
1670
            case 7:
1671
                return 0;
1672
            }
1673
            goto bad_reg;
1674
        case 2:
1675
            if (op2 != 0 || crm != 0)
1676
                goto bad_reg;
1677
            return env->cp15.c0_cssel;
1678
        default:
1679
            goto bad_reg;
1680
        }
1681
    case 1: /* System configuration.  */
1682
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1683
            op2 = 0;
1684
        switch (op2) {
1685
        case 0: /* Control register.  */
1686
            return env->cp15.c1_sys;
1687
        case 1: /* Auxiliary control register.  */
1688
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1689
                return env->cp15.c1_xscaleauxcr;
1690
            if (!arm_feature(env, ARM_FEATURE_AUXCR))
1691
                goto bad_reg;
1692
            switch (ARM_CPUID(env)) {
1693
            case ARM_CPUID_ARM1026:
1694
                return 1;
1695
            case ARM_CPUID_ARM1136:
1696
            case ARM_CPUID_ARM1136_R2:
1697
                return 7;
1698
            case ARM_CPUID_ARM11MPCORE:
1699
                return 1;
1700
            case ARM_CPUID_CORTEXA8:
1701
                return 0;
1702
            default:
1703
                goto bad_reg;
1704
            }
1705
        case 2: /* Coprocessor access register.  */
1706
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1707
                goto bad_reg;
1708
            return env->cp15.c1_coproc;
1709
        default:
1710
            goto bad_reg;
1711
        }
1712
    case 2: /* MMU Page table control / MPU cache control.  */
1713
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1714
            switch (op2) {
1715
            case 0:
1716
                return env->cp15.c2_data;
1717
                break;
1718
            case 1:
1719
                return env->cp15.c2_insn;
1720
                break;
1721
            default:
1722
                goto bad_reg;
1723
            }
1724
        } else {
1725
            switch (op2) {
1726
            case 0:
1727
                return env->cp15.c2_base0;
1728
            case 1:
1729
                return env->cp15.c2_base1;
1730
            case 2:
1731
                return env->cp15.c2_control;
1732
            default:
1733
                goto bad_reg;
1734
            }
1735
        }
1736
    case 3: /* MMU Domain access control / MPU write buffer control.  */
1737
        return env->cp15.c3;
1738
    case 4: /* Reserved.  */
1739
        goto bad_reg;
1740
    case 5: /* MMU Fault status / MPU access permission.  */
1741
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1742
            op2 = 0;
1743
        switch (op2) {
1744
        case 0:
1745
            if (arm_feature(env, ARM_FEATURE_MPU))
1746
                return simple_mpu_ap_bits(env->cp15.c5_data);
1747
            return env->cp15.c5_data;
1748
        case 1:
1749
            if (arm_feature(env, ARM_FEATURE_MPU))
1750
                return simple_mpu_ap_bits(env->cp15.c5_data);
1751
            return env->cp15.c5_insn;
1752
        case 2:
1753
            if (!arm_feature(env, ARM_FEATURE_MPU))
1754
                goto bad_reg;
1755
            return env->cp15.c5_data;
1756
        case 3:
1757
            if (!arm_feature(env, ARM_FEATURE_MPU))
1758
                goto bad_reg;
1759
            return env->cp15.c5_insn;
1760
        default:
1761
            goto bad_reg;
1762
        }
1763
    case 6: /* MMU Fault address.  */
1764
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1765
            if (crm >= 8)
1766
                goto bad_reg;
1767
            return env->cp15.c6_region[crm];
1768
        } else {
1769
            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1770
                op2 = 0;
1771
            switch (op2) {
1772
            case 0:
1773
                return env->cp15.c6_data;
1774
            case 1:
1775
                if (arm_feature(env, ARM_FEATURE_V6)) {
1776
                    /* Watchpoint Fault Adrress.  */
1777
                    return 0; /* Not implemented.  */
1778
                } else {
1779
                    /* Instruction Fault Adrress.  */
1780
                    /* Arm9 doesn't have an IFAR, but implementing it anyway
1781
                       shouldn't do any harm.  */
1782
                    return env->cp15.c6_insn;
1783
                }
1784
            case 2:
1785
                if (arm_feature(env, ARM_FEATURE_V6)) {
1786
                    /* Instruction Fault Adrress.  */
1787
                    return env->cp15.c6_insn;
1788
                } else {
1789
                    goto bad_reg;
1790
                }
1791
            default:
1792
                goto bad_reg;
1793
            }
1794
        }
1795
    case 7: /* Cache control.  */
1796
        /* FIXME: Should only clear Z flag if destination is r15.  */
1797
        env->ZF = 0;
1798
        return 0;
1799
    case 8: /* MMU TLB control.  */
1800
        goto bad_reg;
1801
    case 9: /* Cache lockdown.  */
1802
        switch (op1) {
1803
        case 0: /* L1 cache.  */
1804
            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1805
                return 0;
1806
            switch (op2) {
1807
            case 0:
1808
                return env->cp15.c9_data;
1809
            case 1:
1810
                return env->cp15.c9_insn;
1811
            default:
1812
                goto bad_reg;
1813
            }
1814
        case 1: /* L2 cache */
1815
            if (crm != 0)
1816
                goto bad_reg;
1817
            /* L2 Lockdown and Auxiliary control.  */
1818
            return 0;
1819
        default:
1820
            goto bad_reg;
1821
        }
1822
    case 10: /* MMU TLB lockdown.  */
1823
        /* ??? TLB lockdown not implemented.  */
1824
        return 0;
1825
    case 11: /* TCM DMA control.  */
1826
    case 12: /* Reserved.  */
1827
        goto bad_reg;
1828
    case 13: /* Process ID.  */
1829
        switch (op2) {
1830
        case 0:
1831
            return env->cp15.c13_fcse;
1832
        case 1:
1833
            return env->cp15.c13_context;
1834
        case 2:
1835
            return env->cp15.c13_tls1;
1836
        case 3:
1837
            return env->cp15.c13_tls2;
1838
        case 4:
1839
            return env->cp15.c13_tls3;
1840
        default:
1841
            goto bad_reg;
1842
        }
1843
    case 14: /* Reserved.  */
1844
        goto bad_reg;
1845
    case 15: /* Implementation specific.  */
1846
        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1847
            if (op2 == 0 && crm == 1)
1848
                return env->cp15.c15_cpar;
1849

    
1850
            goto bad_reg;
1851
        }
1852
        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1853
            switch (crm) {
1854
            case 0:
1855
                return 0;
1856
            case 1: /* Read TI925T configuration.  */
1857
                return env->cp15.c15_ticonfig;
1858
            case 2: /* Read I_max.  */
1859
                return env->cp15.c15_i_max;
1860
            case 3: /* Read I_min.  */
1861
                return env->cp15.c15_i_min;
1862
            case 4: /* Read thread-ID.  */
1863
                return env->cp15.c15_threadid;
1864
            case 8: /* TI925T_status */
1865
                return 0;
1866
            }
1867
            /* TODO: Peripheral port remap register:
1868
             * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt
1869
             * controller base address at $rn & ~0xfff and map size of
1870
             * 0x200 << ($rn & 0xfff), when MMU is off.  */
1871
            goto bad_reg;
1872
        }
1873
        return 0;
1874
    }
1875
bad_reg:
1876
    /* ??? For debugging only.  Should raise illegal instruction exception.  */
1877
    cpu_abort(env, "Unimplemented cp15 register read (c%d, c%d, {%d, %d})\n",
1878
              (insn >> 16) & 0xf, crm, op1, op2);
1879
    return 0;
1880
}
1881

    
1882
void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
1883
{
1884
    env->banked_r13[bank_number(mode)] = val;
1885
}
1886

    
1887
uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
1888
{
1889
    return env->banked_r13[bank_number(mode)];
1890
}
1891

    
1892
uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg)
1893
{
1894
    switch (reg) {
1895
    case 0: /* APSR */
1896
        return xpsr_read(env) & 0xf8000000;
1897
    case 1: /* IAPSR */
1898
        return xpsr_read(env) & 0xf80001ff;
1899
    case 2: /* EAPSR */
1900
        return xpsr_read(env) & 0xff00fc00;
1901
    case 3: /* xPSR */
1902
        return xpsr_read(env) & 0xff00fdff;
1903
    case 5: /* IPSR */
1904
        return xpsr_read(env) & 0x000001ff;
1905
    case 6: /* EPSR */
1906
        return xpsr_read(env) & 0x0700fc00;
1907
    case 7: /* IEPSR */
1908
        return xpsr_read(env) & 0x0700edff;
1909
    case 8: /* MSP */
1910
        return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
1911
    case 9: /* PSP */
1912
        return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
1913
    case 16: /* PRIMASK */
1914
        return (env->uncached_cpsr & CPSR_I) != 0;
1915
    case 17: /* FAULTMASK */
1916
        return (env->uncached_cpsr & CPSR_F) != 0;
1917
    case 18: /* BASEPRI */
1918
    case 19: /* BASEPRI_MAX */
1919
        return env->v7m.basepri;
1920
    case 20: /* CONTROL */
1921
        return env->v7m.control;
1922
    default:
1923
        /* ??? For debugging only.  */
1924
        cpu_abort(env, "Unimplemented system register read (%d)\n", reg);
1925
        return 0;
1926
    }
1927
}
1928

    
1929
void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val)
1930
{
1931
    switch (reg) {
1932
    case 0: /* APSR */
1933
        xpsr_write(env, val, 0xf8000000);
1934
        break;
1935
    case 1: /* IAPSR */
1936
        xpsr_write(env, val, 0xf8000000);
1937
        break;
1938
    case 2: /* EAPSR */
1939
        xpsr_write(env, val, 0xfe00fc00);
1940
        break;
1941
    case 3: /* xPSR */
1942
        xpsr_write(env, val, 0xfe00fc00);
1943
        break;
1944
    case 5: /* IPSR */
1945
        /* IPSR bits are readonly.  */
1946
        break;
1947
    case 6: /* EPSR */
1948
        xpsr_write(env, val, 0x0600fc00);
1949
        break;
1950
    case 7: /* IEPSR */
1951
        xpsr_write(env, val, 0x0600fc00);
1952
        break;
1953
    case 8: /* MSP */
1954
        if (env->v7m.current_sp)
1955
            env->v7m.other_sp = val;
1956
        else
1957
            env->regs[13] = val;
1958
        break;
1959
    case 9: /* PSP */
1960
        if (env->v7m.current_sp)
1961
            env->regs[13] = val;
1962
        else
1963
            env->v7m.other_sp = val;
1964
        break;
1965
    case 16: /* PRIMASK */
1966
        if (val & 1)
1967
            env->uncached_cpsr |= CPSR_I;
1968
        else
1969
            env->uncached_cpsr &= ~CPSR_I;
1970
        break;
1971
    case 17: /* FAULTMASK */
1972
        if (val & 1)
1973
            env->uncached_cpsr |= CPSR_F;
1974
        else
1975
            env->uncached_cpsr &= ~CPSR_F;
1976
        break;
1977
    case 18: /* BASEPRI */
1978
        env->v7m.basepri = val & 0xff;
1979
        break;
1980
    case 19: /* BASEPRI_MAX */
1981
        val &= 0xff;
1982
        if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
1983
            env->v7m.basepri = val;
1984
        break;
1985
    case 20: /* CONTROL */
1986
        env->v7m.control = val & 3;
1987
        switch_v7m_sp(env, (val & 2) != 0);
1988
        break;
1989
    default:
1990
        /* ??? For debugging only.  */
1991
        cpu_abort(env, "Unimplemented system register write (%d)\n", reg);
1992
        return;
1993
    }
1994
}
1995

    
1996
void cpu_arm_set_cp_io(CPUARMState *env, int cpnum,
1997
                ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write,
1998
                void *opaque)
1999
{
2000
    if (cpnum < 0 || cpnum > 14) {
2001
        cpu_abort(env, "Bad coprocessor number: %i\n", cpnum);
2002
        return;
2003
    }
2004

    
2005
    env->cp[cpnum].cp_read = cp_read;
2006
    env->cp[cpnum].cp_write = cp_write;
2007
    env->cp[cpnum].opaque = opaque;
2008
}
2009

    
2010
#endif
2011

    
2012
/* Note that signed overflow is undefined in C.  The following routines are
2013
   careful to use unsigned types where modulo arithmetic is required.
2014
   Failure to do so _will_ break on newer gcc.  */
2015

    
2016
/* Signed saturating arithmetic.  */
2017

    
2018
/* Perform 16-bit signed saturating addition.  */
2019
static inline uint16_t add16_sat(uint16_t a, uint16_t b)
2020
{
2021
    uint16_t res;
2022

    
2023
    res = a + b;
2024
    if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
2025
        if (a & 0x8000)
2026
            res = 0x8000;
2027
        else
2028
            res = 0x7fff;
2029
    }
2030
    return res;
2031
}
2032

    
2033
/* Perform 8-bit signed saturating addition.  */
2034
static inline uint8_t add8_sat(uint8_t a, uint8_t b)
2035
{
2036
    uint8_t res;
2037

    
2038
    res = a + b;
2039
    if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
2040
        if (a & 0x80)
2041
            res = 0x80;
2042
        else
2043
            res = 0x7f;
2044
    }
2045
    return res;
2046
}
2047

    
2048
/* Perform 16-bit signed saturating subtraction.  */
2049
static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
2050
{
2051
    uint16_t res;
2052

    
2053
    res = a - b;
2054
    if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
2055
        if (a & 0x8000)
2056
            res = 0x8000;
2057
        else
2058
            res = 0x7fff;
2059
    }
2060
    return res;
2061
}
2062

    
2063
/* Perform 8-bit signed saturating subtraction.  */
2064
static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
2065
{
2066
    uint8_t res;
2067

    
2068
    res = a - b;
2069
    if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
2070
        if (a & 0x80)
2071
            res = 0x80;
2072
        else
2073
            res = 0x7f;
2074
    }
2075
    return res;
2076
}
2077

    
2078
#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
2079
#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
2080
#define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
2081
#define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
2082
#define PFX q
2083

    
2084
#include "op_addsub.h"
2085

    
2086
/* Unsigned saturating arithmetic.  */
2087
static inline uint16_t add16_usat(uint16_t a, uint16_t b)
2088
{
2089
    uint16_t res;
2090
    res = a + b;
2091
    if (res < a)
2092
        res = 0xffff;
2093
    return res;
2094
}
2095

    
2096
static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
2097
{
2098
    if (a < b)
2099
        return a - b;
2100
    else
2101
        return 0;
2102
}
2103

    
2104
static inline uint8_t add8_usat(uint8_t a, uint8_t b)
2105
{
2106
    uint8_t res;
2107
    res = a + b;
2108
    if (res < a)
2109
        res = 0xff;
2110
    return res;
2111
}
2112

    
2113
static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
2114
{
2115
    if (a < b)
2116
        return a - b;
2117
    else
2118
        return 0;
2119
}
2120

    
2121
#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
2122
#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
2123
#define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
2124
#define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
2125
#define PFX uq
2126

    
2127
#include "op_addsub.h"
2128

    
2129
/* Signed modulo arithmetic.  */
2130
#define SARITH16(a, b, n, op) do { \
2131
    int32_t sum; \
2132
    sum = (int16_t)((uint16_t)(a) op (uint16_t)(b)); \
2133
    RESULT(sum, n, 16); \
2134
    if (sum >= 0) \
2135
        ge |= 3 << (n * 2); \
2136
    } while(0)
2137

    
2138
#define SARITH8(a, b, n, op) do { \
2139
    int32_t sum; \
2140
    sum = (int8_t)((uint8_t)(a) op (uint8_t)(b)); \
2141
    RESULT(sum, n, 8); \
2142
    if (sum >= 0) \
2143
        ge |= 1 << n; \
2144
    } while(0)
2145

    
2146

    
2147
#define ADD16(a, b, n) SARITH16(a, b, n, +)
2148
#define SUB16(a, b, n) SARITH16(a, b, n, -)
2149
#define ADD8(a, b, n)  SARITH8(a, b, n, +)
2150
#define SUB8(a, b, n)  SARITH8(a, b, n, -)
2151
#define PFX s
2152
#define ARITH_GE
2153

    
2154
#include "op_addsub.h"
2155

    
2156
/* Unsigned modulo arithmetic.  */
2157
#define ADD16(a, b, n) do { \
2158
    uint32_t sum; \
2159
    sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
2160
    RESULT(sum, n, 16); \
2161
    if ((sum >> 16) == 1) \
2162
        ge |= 3 << (n * 2); \
2163
    } while(0)
2164

    
2165
#define ADD8(a, b, n) do { \
2166
    uint32_t sum; \
2167
    sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
2168
    RESULT(sum, n, 8); \
2169
    if ((sum >> 8) == 1) \
2170
        ge |= 1 << n; \
2171
    } while(0)
2172

    
2173
#define SUB16(a, b, n) do { \
2174
    uint32_t sum; \
2175
    sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
2176
    RESULT(sum, n, 16); \
2177
    if ((sum >> 16) == 0) \
2178
        ge |= 3 << (n * 2); \
2179
    } while(0)
2180

    
2181
#define SUB8(a, b, n) do { \
2182
    uint32_t sum; \
2183
    sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
2184
    RESULT(sum, n, 8); \
2185
    if ((sum >> 8) == 0) \
2186
        ge |= 1 << n; \
2187
    } while(0)
2188

    
2189
#define PFX u
2190
#define ARITH_GE
2191

    
2192
#include "op_addsub.h"
2193

    
2194
/* Halved signed arithmetic.  */
2195
#define ADD16(a, b, n) \
2196
  RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
2197
#define SUB16(a, b, n) \
2198
  RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
2199
#define ADD8(a, b, n) \
2200
  RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
2201
#define SUB8(a, b, n) \
2202
  RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
2203
#define PFX sh
2204

    
2205
#include "op_addsub.h"
2206

    
2207
/* Halved unsigned arithmetic.  */
2208
#define ADD16(a, b, n) \
2209
  RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2210
#define SUB16(a, b, n) \
2211
  RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2212
#define ADD8(a, b, n) \
2213
  RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2214
#define SUB8(a, b, n) \
2215
  RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2216
#define PFX uh
2217

    
2218
#include "op_addsub.h"
2219

    
2220
static inline uint8_t do_usad(uint8_t a, uint8_t b)
2221
{
2222
    if (a > b)
2223
        return a - b;
2224
    else
2225
        return b - a;
2226
}
2227

    
2228
/* Unsigned sum of absolute byte differences.  */
2229
uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
2230
{
2231
    uint32_t sum;
2232
    sum = do_usad(a, b);
2233
    sum += do_usad(a >> 8, b >> 8);
2234
    sum += do_usad(a >> 16, b >>16);
2235
    sum += do_usad(a >> 24, b >> 24);
2236
    return sum;
2237
}
2238

    
2239
/* For ARMv6 SEL instruction.  */
2240
uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
2241
{
2242
    uint32_t mask;
2243

    
2244
    mask = 0;
2245
    if (flags & 1)
2246
        mask |= 0xff;
2247
    if (flags & 2)
2248
        mask |= 0xff00;
2249
    if (flags & 4)
2250
        mask |= 0xff0000;
2251
    if (flags & 8)
2252
        mask |= 0xff000000;
2253
    return (a & mask) | (b & ~mask);
2254
}
2255

    
2256
uint32_t HELPER(logicq_cc)(uint64_t val)
2257
{
2258
    return (val >> 32) | (val != 0);
2259
}
2260

    
2261
/* VFP support.  We follow the convention used for VFP instrunctions:
2262
   Single precition routines have a "s" suffix, double precision a
2263
   "d" suffix.  */
2264

    
2265
/* Convert host exception flags to vfp form.  */
2266
static inline int vfp_exceptbits_from_host(int host_bits)
2267
{
2268
    int target_bits = 0;
2269

    
2270
    if (host_bits & float_flag_invalid)
2271
        target_bits |= 1;
2272
    if (host_bits & float_flag_divbyzero)
2273
        target_bits |= 2;
2274
    if (host_bits & float_flag_overflow)
2275
        target_bits |= 4;
2276
    if (host_bits & float_flag_underflow)
2277
        target_bits |= 8;
2278
    if (host_bits & float_flag_inexact)
2279
        target_bits |= 0x10;
2280
    return target_bits;
2281
}
2282

    
2283
uint32_t HELPER(vfp_get_fpscr)(CPUState *env)
2284
{
2285
    int i;
2286
    uint32_t fpscr;
2287

    
2288
    fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
2289
            | (env->vfp.vec_len << 16)
2290
            | (env->vfp.vec_stride << 20);
2291
    i = get_float_exception_flags(&env->vfp.fp_status);
2292
    fpscr |= vfp_exceptbits_from_host(i);
2293
    return fpscr;
2294
}
2295

    
2296
/* Convert vfp exception flags to target form.  */
2297
static inline int vfp_exceptbits_to_host(int target_bits)
2298
{
2299
    int host_bits = 0;
2300

    
2301
    if (target_bits & 1)
2302
        host_bits |= float_flag_invalid;
2303
    if (target_bits & 2)
2304
        host_bits |= float_flag_divbyzero;
2305
    if (target_bits & 4)
2306
        host_bits |= float_flag_overflow;
2307
    if (target_bits & 8)
2308
        host_bits |= float_flag_underflow;
2309
    if (target_bits & 0x10)
2310
        host_bits |= float_flag_inexact;
2311
    return host_bits;
2312
}
2313

    
2314
void HELPER(vfp_set_fpscr)(CPUState *env, uint32_t val)
2315
{
2316
    int i;
2317
    uint32_t changed;
2318

    
2319
    changed = env->vfp.xregs[ARM_VFP_FPSCR];
2320
    env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
2321
    env->vfp.vec_len = (val >> 16) & 7;
2322
    env->vfp.vec_stride = (val >> 20) & 3;
2323

    
2324
    changed ^= val;
2325
    if (changed & (3 << 22)) {
2326
        i = (val >> 22) & 3;
2327
        switch (i) {
2328
        case 0:
2329
            i = float_round_nearest_even;
2330
            break;
2331
        case 1:
2332
            i = float_round_up;
2333
            break;
2334
        case 2:
2335
            i = float_round_down;
2336
            break;
2337
        case 3:
2338
            i = float_round_to_zero;
2339
            break;
2340
        }
2341
        set_float_rounding_mode(i, &env->vfp.fp_status);
2342
    }
2343
    if (changed & (1 << 24))
2344
        set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
2345
    if (changed & (1 << 25))
2346
        set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
2347

    
2348
    i = vfp_exceptbits_to_host((val >> 8) & 0x1f);
2349
    set_float_exception_flags(i, &env->vfp.fp_status);
2350
}
2351

    
2352
#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
2353

    
2354
#define VFP_BINOP(name) \
2355
float32 VFP_HELPER(name, s)(float32 a, float32 b, CPUState *env) \
2356
{ \
2357
    return float32_ ## name (a, b, &env->vfp.fp_status); \
2358
} \
2359
float64 VFP_HELPER(name, d)(float64 a, float64 b, CPUState *env) \
2360
{ \
2361
    return float64_ ## name (a, b, &env->vfp.fp_status); \
2362
}
2363
VFP_BINOP(add)
2364
VFP_BINOP(sub)
2365
VFP_BINOP(mul)
2366
VFP_BINOP(div)
2367
#undef VFP_BINOP
2368

    
2369
float32 VFP_HELPER(neg, s)(float32 a)
2370
{
2371
    return float32_chs(a);
2372
}
2373

    
2374
float64 VFP_HELPER(neg, d)(float64 a)
2375
{
2376
    return float64_chs(a);
2377
}
2378

    
2379
float32 VFP_HELPER(abs, s)(float32 a)
2380
{
2381
    return float32_abs(a);
2382
}
2383

    
2384
float64 VFP_HELPER(abs, d)(float64 a)
2385
{
2386
    return float64_abs(a);
2387
}
2388

    
2389
float32 VFP_HELPER(sqrt, s)(float32 a, CPUState *env)
2390
{
2391
    return float32_sqrt(a, &env->vfp.fp_status);
2392
}
2393

    
2394
float64 VFP_HELPER(sqrt, d)(float64 a, CPUState *env)
2395
{
2396
    return float64_sqrt(a, &env->vfp.fp_status);
2397
}
2398

    
2399
/* XXX: check quiet/signaling case */
2400
#define DO_VFP_cmp(p, type) \
2401
void VFP_HELPER(cmp, p)(type a, type b, CPUState *env)  \
2402
{ \
2403
    uint32_t flags; \
2404
    switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
2405
    case 0: flags = 0x6; break; \
2406
    case -1: flags = 0x8; break; \
2407
    case 1: flags = 0x2; break; \
2408
    default: case 2: flags = 0x3; break; \
2409
    } \
2410
    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2411
        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2412
} \
2413
void VFP_HELPER(cmpe, p)(type a, type b, CPUState *env) \
2414
{ \
2415
    uint32_t flags; \
2416
    switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
2417
    case 0: flags = 0x6; break; \
2418
    case -1: flags = 0x8; break; \
2419
    case 1: flags = 0x2; break; \
2420
    default: case 2: flags = 0x3; break; \
2421
    } \
2422
    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2423
        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2424
}
2425
DO_VFP_cmp(s, float32)
2426
DO_VFP_cmp(d, float64)
2427
#undef DO_VFP_cmp
2428

    
2429
/* Helper routines to perform bitwise copies between float and int.  */
2430
static inline float32 vfp_itos(uint32_t i)
2431
{
2432
    union {
2433
        uint32_t i;
2434
        float32 s;
2435
    } v;
2436

    
2437
    v.i = i;
2438
    return v.s;
2439
}
2440

    
2441
static inline uint32_t vfp_stoi(float32 s)
2442
{
2443
    union {
2444
        uint32_t i;
2445
        float32 s;
2446
    } v;
2447

    
2448
    v.s = s;
2449
    return v.i;
2450
}
2451

    
2452
static inline float64 vfp_itod(uint64_t i)
2453
{
2454
    union {
2455
        uint64_t i;
2456
        float64 d;
2457
    } v;
2458

    
2459
    v.i = i;
2460
    return v.d;
2461
}
2462

    
2463
static inline uint64_t vfp_dtoi(float64 d)
2464
{
2465
    union {
2466
        uint64_t i;
2467
        float64 d;
2468
    } v;
2469

    
2470
    v.d = d;
2471
    return v.i;
2472
}
2473

    
2474
/* Integer to float conversion.  */
2475
float32 VFP_HELPER(uito, s)(float32 x, CPUState *env)
2476
{
2477
    return uint32_to_float32(vfp_stoi(x), &env->vfp.fp_status);
2478
}
2479

    
2480
float64 VFP_HELPER(uito, d)(float32 x, CPUState *env)
2481
{
2482
    return uint32_to_float64(vfp_stoi(x), &env->vfp.fp_status);
2483
}
2484

    
2485
float32 VFP_HELPER(sito, s)(float32 x, CPUState *env)
2486
{
2487
    return int32_to_float32(vfp_stoi(x), &env->vfp.fp_status);
2488
}
2489

    
2490
float64 VFP_HELPER(sito, d)(float32 x, CPUState *env)
2491
{
2492
    return int32_to_float64(vfp_stoi(x), &env->vfp.fp_status);
2493
}
2494

    
2495
/* Float to integer conversion.  */
2496
float32 VFP_HELPER(toui, s)(float32 x, CPUState *env)
2497
{
2498
    return vfp_itos(float32_to_uint32(x, &env->vfp.fp_status));
2499
}
2500

    
2501
float32 VFP_HELPER(toui, d)(float64 x, CPUState *env)
2502
{
2503
    return vfp_itos(float64_to_uint32(x, &env->vfp.fp_status));
2504
}
2505

    
2506
float32 VFP_HELPER(tosi, s)(float32 x, CPUState *env)
2507
{
2508
    return vfp_itos(float32_to_int32(x, &env->vfp.fp_status));
2509
}
2510

    
2511
float32 VFP_HELPER(tosi, d)(float64 x, CPUState *env)
2512
{
2513
    return vfp_itos(float64_to_int32(x, &env->vfp.fp_status));
2514
}
2515

    
2516
float32 VFP_HELPER(touiz, s)(float32 x, CPUState *env)
2517
{
2518
    return vfp_itos(float32_to_uint32_round_to_zero(x, &env->vfp.fp_status));
2519
}
2520

    
2521
float32 VFP_HELPER(touiz, d)(float64 x, CPUState *env)
2522
{
2523
    return vfp_itos(float64_to_uint32_round_to_zero(x, &env->vfp.fp_status));
2524
}
2525

    
2526
float32 VFP_HELPER(tosiz, s)(float32 x, CPUState *env)
2527
{
2528
    return vfp_itos(float32_to_int32_round_to_zero(x, &env->vfp.fp_status));
2529
}
2530

    
2531
float32 VFP_HELPER(tosiz, d)(float64 x, CPUState *env)
2532
{
2533
    return vfp_itos(float64_to_int32_round_to_zero(x, &env->vfp.fp_status));
2534
}
2535

    
2536
/* floating point conversion */
2537
float64 VFP_HELPER(fcvtd, s)(float32 x, CPUState *env)
2538
{
2539
    return float32_to_float64(x, &env->vfp.fp_status);
2540
}
2541

    
2542
float32 VFP_HELPER(fcvts, d)(float64 x, CPUState *env)
2543
{
2544
    return float64_to_float32(x, &env->vfp.fp_status);
2545
}
2546

    
2547
/* VFP3 fixed point conversion.  */
2548
#define VFP_CONV_FIX(name, p, ftype, itype, sign) \
2549
ftype VFP_HELPER(name##to, p)(ftype x, uint32_t shift, CPUState *env) \
2550
{ \
2551
    ftype tmp; \
2552
    tmp = sign##int32_to_##ftype ((itype)vfp_##p##toi(x), \
2553
                                  &env->vfp.fp_status); \
2554
    return ftype##_scalbn(tmp, -(int)shift, &env->vfp.fp_status); \
2555
} \
2556
ftype VFP_HELPER(to##name, p)(ftype x, uint32_t shift, CPUState *env) \
2557
{ \
2558
    ftype tmp; \
2559
    tmp = ftype##_scalbn(x, shift, &env->vfp.fp_status); \
2560
    return vfp_ito##p((itype)ftype##_to_##sign##int32_round_to_zero(tmp, \
2561
        &env->vfp.fp_status)); \
2562
}
2563

    
2564
VFP_CONV_FIX(sh, d, float64, int16, )
2565
VFP_CONV_FIX(sl, d, float64, int32, )
2566
VFP_CONV_FIX(uh, d, float64, uint16, u)
2567
VFP_CONV_FIX(ul, d, float64, uint32, u)
2568
VFP_CONV_FIX(sh, s, float32, int16, )
2569
VFP_CONV_FIX(sl, s, float32, int32, )
2570
VFP_CONV_FIX(uh, s, float32, uint16, u)
2571
VFP_CONV_FIX(ul, s, float32, uint32, u)
2572
#undef VFP_CONV_FIX
2573

    
2574
float32 HELPER(recps_f32)(float32 a, float32 b, CPUState *env)
2575
{
2576
    float_status *s = &env->vfp.fp_status;
2577
    float32 two = int32_to_float32(2, s);
2578
    return float32_sub(two, float32_mul(a, b, s), s);
2579
}
2580

    
2581
float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUState *env)
2582
{
2583
    float_status *s = &env->vfp.fp_status;
2584
    float32 three = int32_to_float32(3, s);
2585
    return float32_sub(three, float32_mul(a, b, s), s);
2586
}
2587

    
2588
/* NEON helpers.  */
2589

    
2590
/* TODO: The architecture specifies the value that the estimate functions
2591
   should return.  We return the exact reciprocal/root instead.  */
2592
float32 HELPER(recpe_f32)(float32 a, CPUState *env)
2593
{
2594
    float_status *s = &env->vfp.fp_status;
2595
    float32 one = int32_to_float32(1, s);
2596
    return float32_div(one, a, s);
2597
}
2598

    
2599
float32 HELPER(rsqrte_f32)(float32 a, CPUState *env)
2600
{
2601
    float_status *s = &env->vfp.fp_status;
2602
    float32 one = int32_to_float32(1, s);
2603
    return float32_div(one, float32_sqrt(a, s), s);
2604
}
2605

    
2606
uint32_t HELPER(recpe_u32)(uint32_t a, CPUState *env)
2607
{
2608
    float_status *s = &env->vfp.fp_status;
2609
    float32 tmp;
2610
    tmp = int32_to_float32(a, s);
2611
    tmp = float32_scalbn(tmp, -32, s);
2612
    tmp = helper_recpe_f32(tmp, env);
2613
    tmp = float32_scalbn(tmp, 31, s);
2614
    return float32_to_int32(tmp, s);
2615
}
2616

    
2617
uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUState *env)
2618
{
2619
    float_status *s = &env->vfp.fp_status;
2620
    float32 tmp;
2621
    tmp = int32_to_float32(a, s);
2622
    tmp = float32_scalbn(tmp, -32, s);
2623
    tmp = helper_rsqrte_f32(tmp, env);
2624
    tmp = float32_scalbn(tmp, 31, s);
2625
    return float32_to_int32(tmp, s);
2626
}
2627

    
2628
void HELPER(set_teecr)(CPUState *env, uint32_t val)
2629
{
2630
    val &= 1;
2631
    if (env->teecr != val) {
2632
        env->teecr = val;
2633
        tb_flush(env);
2634
    }
2635
}