Statistics
| Branch: | Revision:

root / target-arm / helper.c @ 85df3786

History | View | Annotate | Download (87.7 kB)

1
#include "cpu.h"
2
#include "gdbstub.h"
3
#include "helper.h"
4
#include "host-utils.h"
5
#if !defined(CONFIG_USER_ONLY)
6
#include "hw/loader.h"
7
#endif
8
#include "sysemu.h"
9

    
10
static void cpu_reset_model_id(CPUARMState *env, uint32_t id)
11
{
12
    switch (id) {
13
    case ARM_CPUID_ARM926:
14
        break;
15
    case ARM_CPUID_ARM946:
16
        break;
17
    case ARM_CPUID_ARM1026:
18
        break;
19
    case ARM_CPUID_ARM1136:
20
        /* This is the 1136 r1, which is a v6K core */
21
    case ARM_CPUID_ARM1136_R2:
22
        break;
23
    case ARM_CPUID_ARM1176:
24
        break;
25
    case ARM_CPUID_ARM11MPCORE:
26
        break;
27
    case ARM_CPUID_CORTEXA8:
28
        break;
29
    case ARM_CPUID_CORTEXA9:
30
        break;
31
    case ARM_CPUID_CORTEXA15:
32
        break;
33
    case ARM_CPUID_CORTEXM3:
34
        break;
35
    case ARM_CPUID_ANY: /* For userspace emulation.  */
36
        break;
37
    case ARM_CPUID_TI915T:
38
    case ARM_CPUID_TI925T:
39
        break;
40
    case ARM_CPUID_PXA250:
41
    case ARM_CPUID_PXA255:
42
    case ARM_CPUID_PXA260:
43
    case ARM_CPUID_PXA261:
44
    case ARM_CPUID_PXA262:
45
        break;
46
    case ARM_CPUID_PXA270_A0:
47
    case ARM_CPUID_PXA270_A1:
48
    case ARM_CPUID_PXA270_B0:
49
    case ARM_CPUID_PXA270_B1:
50
    case ARM_CPUID_PXA270_C0:
51
    case ARM_CPUID_PXA270_C5:
52
        break;
53
    case ARM_CPUID_SA1100:
54
    case ARM_CPUID_SA1110:
55
        break;
56
    default:
57
        cpu_abort(env, "Bad CPU ID: %x\n", id);
58
        break;
59
    }
60

    
61
}
62

    
63
/* TODO Move contents into arm_cpu_reset() in cpu.c,
64
 *      once cpu_reset_model_id() is eliminated,
65
 *      and then forward to cpu_reset() here.
66
 */
67
void cpu_state_reset(CPUARMState *env)
68
{
69
    uint32_t id;
70
    uint32_t tmp = 0;
71
    ARMCPU *cpu = arm_env_get_cpu(env);
72

    
73
    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
74
        qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
75
        log_cpu_state(env, 0);
76
    }
77

    
78
    id = cpu->midr;
79
    tmp = env->cp15.c15_config_base_address;
80
    memset(env, 0, offsetof(CPUARMState, breakpoints));
81
    if (id)
82
        cpu_reset_model_id(env, id);
83
    env->cp15.c15_config_base_address = tmp;
84
    env->cp15.c0_cpuid = cpu->midr;
85
    env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid;
86
    env->vfp.xregs[ARM_VFP_MVFR0] = cpu->mvfr0;
87
    env->vfp.xregs[ARM_VFP_MVFR1] = cpu->mvfr1;
88
    env->cp15.c0_cachetype = cpu->ctr;
89
    env->cp15.c1_sys = cpu->reset_sctlr;
90
    env->cp15.c0_c1[0] = cpu->id_pfr0;
91
    env->cp15.c0_c1[1] = cpu->id_pfr1;
92
    env->cp15.c0_c1[2] = cpu->id_dfr0;
93
    env->cp15.c0_c1[3] = cpu->id_afr0;
94
    env->cp15.c0_c1[4] = cpu->id_mmfr0;
95
    env->cp15.c0_c1[5] = cpu->id_mmfr1;
96
    env->cp15.c0_c1[6] = cpu->id_mmfr2;
97
    env->cp15.c0_c1[7] = cpu->id_mmfr3;
98
    env->cp15.c0_c2[0] = cpu->id_isar0;
99
    env->cp15.c0_c2[1] = cpu->id_isar1;
100
    env->cp15.c0_c2[2] = cpu->id_isar2;
101
    env->cp15.c0_c2[3] = cpu->id_isar3;
102
    env->cp15.c0_c2[4] = cpu->id_isar4;
103
    env->cp15.c0_c2[5] = cpu->id_isar5;
104
    env->cp15.c15_i_min = 0xff0;
105
    env->cp15.c0_clid = cpu->clidr;
106
    memcpy(env->cp15.c0_ccsid, cpu->ccsidr, ARRAY_SIZE(cpu->ccsidr));
107

    
108
    if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
109
        env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
110
    }
111

    
112
#if defined (CONFIG_USER_ONLY)
113
    env->uncached_cpsr = ARM_CPU_MODE_USR;
114
    /* For user mode we must enable access to coprocessors */
115
    env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
116
    if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
117
        env->cp15.c15_cpar = 3;
118
    } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
119
        env->cp15.c15_cpar = 1;
120
    }
121
#else
122
    /* SVC mode with interrupts disabled.  */
123
    env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
124
    /* On ARMv7-M the CPSR_I is the value of the PRIMASK register, and is
125
       clear at reset.  Initial SP and PC are loaded from ROM.  */
126
    if (IS_M(env)) {
127
        uint32_t pc;
128
        uint8_t *rom;
129
        env->uncached_cpsr &= ~CPSR_I;
130
        rom = rom_ptr(0);
131
        if (rom) {
132
            /* We should really use ldl_phys here, in case the guest
133
               modified flash and reset itself.  However images
134
               loaded via -kernel have not been copied yet, so load the
135
               values directly from there.  */
136
            env->regs[13] = ldl_p(rom);
137
            pc = ldl_p(rom + 4);
138
            env->thumb = pc & 1;
139
            env->regs[15] = pc & ~1;
140
        }
141
    }
142
    env->vfp.xregs[ARM_VFP_FPEXC] = 0;
143
    env->cp15.c2_base_mask = 0xffffc000u;
144
    /* v7 performance monitor control register: same implementor
145
     * field as main ID register, and we implement no event counters.
146
     */
147
    env->cp15.c9_pmcr = (id & 0xff000000);
148
#endif
149
    set_flush_to_zero(1, &env->vfp.standard_fp_status);
150
    set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status);
151
    set_default_nan_mode(1, &env->vfp.standard_fp_status);
152
    set_float_detect_tininess(float_tininess_before_rounding,
153
                              &env->vfp.fp_status);
154
    set_float_detect_tininess(float_tininess_before_rounding,
155
                              &env->vfp.standard_fp_status);
156
    tlb_flush(env, 1);
157
    /* Reset is a state change for some CPUARMState fields which we
158
     * bake assumptions about into translated code, so we need to
159
     * tb_flush().
160
     */
161
    tb_flush(env);
162
}
163

    
164
static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
165
{
166
    int nregs;
167

    
168
    /* VFP data registers are always little-endian.  */
169
    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
170
    if (reg < nregs) {
171
        stfq_le_p(buf, env->vfp.regs[reg]);
172
        return 8;
173
    }
174
    if (arm_feature(env, ARM_FEATURE_NEON)) {
175
        /* Aliases for Q regs.  */
176
        nregs += 16;
177
        if (reg < nregs) {
178
            stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
179
            stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
180
            return 16;
181
        }
182
    }
183
    switch (reg - nregs) {
184
    case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
185
    case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
186
    case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
187
    }
188
    return 0;
189
}
190

    
191
static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
192
{
193
    int nregs;
194

    
195
    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
196
    if (reg < nregs) {
197
        env->vfp.regs[reg] = ldfq_le_p(buf);
198
        return 8;
199
    }
200
    if (arm_feature(env, ARM_FEATURE_NEON)) {
201
        nregs += 16;
202
        if (reg < nregs) {
203
            env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
204
            env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
205
            return 16;
206
        }
207
    }
208
    switch (reg - nregs) {
209
    case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
210
    case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
211
    case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
212
    }
213
    return 0;
214
}
215

    
216
CPUARMState *cpu_arm_init(const char *cpu_model)
217
{
218
    ARMCPU *cpu;
219
    CPUARMState *env;
220
    static int inited = 0;
221

    
222
    if (!object_class_by_name(cpu_model)) {
223
        return NULL;
224
    }
225
    cpu = ARM_CPU(object_new(cpu_model));
226
    env = &cpu->env;
227
    env->cpu_model_str = cpu_model;
228
    arm_cpu_realize(cpu);
229

    
230
    if (tcg_enabled() && !inited) {
231
        inited = 1;
232
        arm_translate_init();
233
    }
234

    
235
    cpu_state_reset(env);
236
    if (arm_feature(env, ARM_FEATURE_NEON)) {
237
        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
238
                                 51, "arm-neon.xml", 0);
239
    } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
240
        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
241
                                 35, "arm-vfp3.xml", 0);
242
    } else if (arm_feature(env, ARM_FEATURE_VFP)) {
243
        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
244
                                 19, "arm-vfp.xml", 0);
245
    }
246
    qemu_init_vcpu(env);
247
    return env;
248
}
249

    
250
typedef struct ARMCPUListState {
251
    fprintf_function cpu_fprintf;
252
    FILE *file;
253
} ARMCPUListState;
254

    
255
/* Sort alphabetically by type name, except for "any". */
256
static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
257
{
258
    ObjectClass *class_a = (ObjectClass *)a;
259
    ObjectClass *class_b = (ObjectClass *)b;
260
    const char *name_a, *name_b;
261

    
262
    name_a = object_class_get_name(class_a);
263
    name_b = object_class_get_name(class_b);
264
    if (strcmp(name_a, "any") == 0) {
265
        return 1;
266
    } else if (strcmp(name_b, "any") == 0) {
267
        return -1;
268
    } else {
269
        return strcmp(name_a, name_b);
270
    }
271
}
272

    
273
static void arm_cpu_list_entry(gpointer data, gpointer user_data)
274
{
275
    ObjectClass *oc = data;
276
    ARMCPUListState *s = user_data;
277

    
278
    (*s->cpu_fprintf)(s->file, "  %s\n",
279
                      object_class_get_name(oc));
280
}
281

    
282
void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
283
{
284
    ARMCPUListState s = {
285
        .file = f,
286
        .cpu_fprintf = cpu_fprintf,
287
    };
288
    GSList *list;
289

    
290
    list = object_class_get_list(TYPE_ARM_CPU, false);
291
    list = g_slist_sort(list, arm_cpu_list_compare);
292
    (*cpu_fprintf)(f, "Available CPUs:\n");
293
    g_slist_foreach(list, arm_cpu_list_entry, &s);
294
    g_slist_free(list);
295
}
296

    
297
static int bad_mode_switch(CPUARMState *env, int mode)
298
{
299
    /* Return true if it is not valid for us to switch to
300
     * this CPU mode (ie all the UNPREDICTABLE cases in
301
     * the ARM ARM CPSRWriteByInstr pseudocode).
302
     */
303
    switch (mode) {
304
    case ARM_CPU_MODE_USR:
305
    case ARM_CPU_MODE_SYS:
306
    case ARM_CPU_MODE_SVC:
307
    case ARM_CPU_MODE_ABT:
308
    case ARM_CPU_MODE_UND:
309
    case ARM_CPU_MODE_IRQ:
310
    case ARM_CPU_MODE_FIQ:
311
        return 0;
312
    default:
313
        return 1;
314
    }
315
}
316

    
317
uint32_t cpsr_read(CPUARMState *env)
318
{
319
    int ZF;
320
    ZF = (env->ZF == 0);
321
    return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
322
        (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
323
        | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
324
        | ((env->condexec_bits & 0xfc) << 8)
325
        | (env->GE << 16);
326
}
327

    
328
void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
329
{
330
    if (mask & CPSR_NZCV) {
331
        env->ZF = (~val) & CPSR_Z;
332
        env->NF = val;
333
        env->CF = (val >> 29) & 1;
334
        env->VF = (val << 3) & 0x80000000;
335
    }
336
    if (mask & CPSR_Q)
337
        env->QF = ((val & CPSR_Q) != 0);
338
    if (mask & CPSR_T)
339
        env->thumb = ((val & CPSR_T) != 0);
340
    if (mask & CPSR_IT_0_1) {
341
        env->condexec_bits &= ~3;
342
        env->condexec_bits |= (val >> 25) & 3;
343
    }
344
    if (mask & CPSR_IT_2_7) {
345
        env->condexec_bits &= 3;
346
        env->condexec_bits |= (val >> 8) & 0xfc;
347
    }
348
    if (mask & CPSR_GE) {
349
        env->GE = (val >> 16) & 0xf;
350
    }
351

    
352
    if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
353
        if (bad_mode_switch(env, val & CPSR_M)) {
354
            /* Attempt to switch to an invalid mode: this is UNPREDICTABLE.
355
             * We choose to ignore the attempt and leave the CPSR M field
356
             * untouched.
357
             */
358
            mask &= ~CPSR_M;
359
        } else {
360
            switch_mode(env, val & CPSR_M);
361
        }
362
    }
363
    mask &= ~CACHED_CPSR_BITS;
364
    env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
365
}
366

    
367
/* Sign/zero extend */
368
uint32_t HELPER(sxtb16)(uint32_t x)
369
{
370
    uint32_t res;
371
    res = (uint16_t)(int8_t)x;
372
    res |= (uint32_t)(int8_t)(x >> 16) << 16;
373
    return res;
374
}
375

    
376
uint32_t HELPER(uxtb16)(uint32_t x)
377
{
378
    uint32_t res;
379
    res = (uint16_t)(uint8_t)x;
380
    res |= (uint32_t)(uint8_t)(x >> 16) << 16;
381
    return res;
382
}
383

    
384
uint32_t HELPER(clz)(uint32_t x)
385
{
386
    return clz32(x);
387
}
388

    
389
int32_t HELPER(sdiv)(int32_t num, int32_t den)
390
{
391
    if (den == 0)
392
      return 0;
393
    if (num == INT_MIN && den == -1)
394
      return INT_MIN;
395
    return num / den;
396
}
397

    
398
uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
399
{
400
    if (den == 0)
401
      return 0;
402
    return num / den;
403
}
404

    
405
uint32_t HELPER(rbit)(uint32_t x)
406
{
407
    x =  ((x & 0xff000000) >> 24)
408
       | ((x & 0x00ff0000) >> 8)
409
       | ((x & 0x0000ff00) << 8)
410
       | ((x & 0x000000ff) << 24);
411
    x =  ((x & 0xf0f0f0f0) >> 4)
412
       | ((x & 0x0f0f0f0f) << 4);
413
    x =  ((x & 0x88888888) >> 3)
414
       | ((x & 0x44444444) >> 1)
415
       | ((x & 0x22222222) << 1)
416
       | ((x & 0x11111111) << 3);
417
    return x;
418
}
419

    
420
uint32_t HELPER(abs)(uint32_t x)
421
{
422
    return ((int32_t)x < 0) ? -x : x;
423
}
424

    
425
#if defined(CONFIG_USER_ONLY)
426

    
427
void do_interrupt (CPUARMState *env)
428
{
429
    env->exception_index = -1;
430
}
431

    
432
int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address, int rw,
433
                              int mmu_idx)
434
{
435
    if (rw == 2) {
436
        env->exception_index = EXCP_PREFETCH_ABORT;
437
        env->cp15.c6_insn = address;
438
    } else {
439
        env->exception_index = EXCP_DATA_ABORT;
440
        env->cp15.c6_data = address;
441
    }
442
    return 1;
443
}
444

    
445
/* These should probably raise undefined insn exceptions.  */
446
void HELPER(set_cp)(CPUARMState *env, uint32_t insn, uint32_t val)
447
{
448
    int op1 = (insn >> 8) & 0xf;
449
    cpu_abort(env, "cp%i insn %08x\n", op1, insn);
450
    return;
451
}
452

    
453
uint32_t HELPER(get_cp)(CPUARMState *env, uint32_t insn)
454
{
455
    int op1 = (insn >> 8) & 0xf;
456
    cpu_abort(env, "cp%i insn %08x\n", op1, insn);
457
    return 0;
458
}
459

    
460
void HELPER(set_cp15)(CPUARMState *env, uint32_t insn, uint32_t val)
461
{
462
    cpu_abort(env, "cp15 insn %08x\n", insn);
463
}
464

    
465
uint32_t HELPER(get_cp15)(CPUARMState *env, uint32_t insn)
466
{
467
    cpu_abort(env, "cp15 insn %08x\n", insn);
468
}
469

    
470
/* These should probably raise undefined insn exceptions.  */
471
void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
472
{
473
    cpu_abort(env, "v7m_mrs %d\n", reg);
474
}
475

    
476
uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
477
{
478
    cpu_abort(env, "v7m_mrs %d\n", reg);
479
    return 0;
480
}
481

    
482
void switch_mode(CPUARMState *env, int mode)
483
{
484
    if (mode != ARM_CPU_MODE_USR)
485
        cpu_abort(env, "Tried to switch out of user mode\n");
486
}
487

    
488
void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
489
{
490
    cpu_abort(env, "banked r13 write\n");
491
}
492

    
493
uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
494
{
495
    cpu_abort(env, "banked r13 read\n");
496
    return 0;
497
}
498

    
499
#else
500

    
501
/* Map CPU modes onto saved register banks.  */
502
static inline int bank_number(CPUARMState *env, int mode)
503
{
504
    switch (mode) {
505
    case ARM_CPU_MODE_USR:
506
    case ARM_CPU_MODE_SYS:
507
        return 0;
508
    case ARM_CPU_MODE_SVC:
509
        return 1;
510
    case ARM_CPU_MODE_ABT:
511
        return 2;
512
    case ARM_CPU_MODE_UND:
513
        return 3;
514
    case ARM_CPU_MODE_IRQ:
515
        return 4;
516
    case ARM_CPU_MODE_FIQ:
517
        return 5;
518
    }
519
    cpu_abort(env, "Bad mode %x\n", mode);
520
    return -1;
521
}
522

    
523
void switch_mode(CPUARMState *env, int mode)
524
{
525
    int old_mode;
526
    int i;
527

    
528
    old_mode = env->uncached_cpsr & CPSR_M;
529
    if (mode == old_mode)
530
        return;
531

    
532
    if (old_mode == ARM_CPU_MODE_FIQ) {
533
        memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
534
        memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
535
    } else if (mode == ARM_CPU_MODE_FIQ) {
536
        memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
537
        memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
538
    }
539

    
540
    i = bank_number(env, old_mode);
541
    env->banked_r13[i] = env->regs[13];
542
    env->banked_r14[i] = env->regs[14];
543
    env->banked_spsr[i] = env->spsr;
544

    
545
    i = bank_number(env, mode);
546
    env->regs[13] = env->banked_r13[i];
547
    env->regs[14] = env->banked_r14[i];
548
    env->spsr = env->banked_spsr[i];
549
}
550

    
551
static void v7m_push(CPUARMState *env, uint32_t val)
552
{
553
    env->regs[13] -= 4;
554
    stl_phys(env->regs[13], val);
555
}
556

    
557
static uint32_t v7m_pop(CPUARMState *env)
558
{
559
    uint32_t val;
560
    val = ldl_phys(env->regs[13]);
561
    env->regs[13] += 4;
562
    return val;
563
}
564

    
565
/* Switch to V7M main or process stack pointer.  */
566
static void switch_v7m_sp(CPUARMState *env, int process)
567
{
568
    uint32_t tmp;
569
    if (env->v7m.current_sp != process) {
570
        tmp = env->v7m.other_sp;
571
        env->v7m.other_sp = env->regs[13];
572
        env->regs[13] = tmp;
573
        env->v7m.current_sp = process;
574
    }
575
}
576

    
577
static void do_v7m_exception_exit(CPUARMState *env)
578
{
579
    uint32_t type;
580
    uint32_t xpsr;
581

    
582
    type = env->regs[15];
583
    if (env->v7m.exception != 0)
584
        armv7m_nvic_complete_irq(env->nvic, env->v7m.exception);
585

    
586
    /* Switch to the target stack.  */
587
    switch_v7m_sp(env, (type & 4) != 0);
588
    /* Pop registers.  */
589
    env->regs[0] = v7m_pop(env);
590
    env->regs[1] = v7m_pop(env);
591
    env->regs[2] = v7m_pop(env);
592
    env->regs[3] = v7m_pop(env);
593
    env->regs[12] = v7m_pop(env);
594
    env->regs[14] = v7m_pop(env);
595
    env->regs[15] = v7m_pop(env);
596
    xpsr = v7m_pop(env);
597
    xpsr_write(env, xpsr, 0xfffffdff);
598
    /* Undo stack alignment.  */
599
    if (xpsr & 0x200)
600
        env->regs[13] |= 4;
601
    /* ??? The exception return type specifies Thread/Handler mode.  However
602
       this is also implied by the xPSR value. Not sure what to do
603
       if there is a mismatch.  */
604
    /* ??? Likewise for mismatches between the CONTROL register and the stack
605
       pointer.  */
606
}
607

    
608
static void do_interrupt_v7m(CPUARMState *env)
609
{
610
    uint32_t xpsr = xpsr_read(env);
611
    uint32_t lr;
612
    uint32_t addr;
613

    
614
    lr = 0xfffffff1;
615
    if (env->v7m.current_sp)
616
        lr |= 4;
617
    if (env->v7m.exception == 0)
618
        lr |= 8;
619

    
620
    /* For exceptions we just mark as pending on the NVIC, and let that
621
       handle it.  */
622
    /* TODO: Need to escalate if the current priority is higher than the
623
       one we're raising.  */
624
    switch (env->exception_index) {
625
    case EXCP_UDEF:
626
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
627
        return;
628
    case EXCP_SWI:
629
        env->regs[15] += 2;
630
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
631
        return;
632
    case EXCP_PREFETCH_ABORT:
633
    case EXCP_DATA_ABORT:
634
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
635
        return;
636
    case EXCP_BKPT:
637
        if (semihosting_enabled) {
638
            int nr;
639
            nr = arm_lduw_code(env->regs[15], env->bswap_code) & 0xff;
640
            if (nr == 0xab) {
641
                env->regs[15] += 2;
642
                env->regs[0] = do_arm_semihosting(env);
643
                return;
644
            }
645
        }
646
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
647
        return;
648
    case EXCP_IRQ:
649
        env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic);
650
        break;
651
    case EXCP_EXCEPTION_EXIT:
652
        do_v7m_exception_exit(env);
653
        return;
654
    default:
655
        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
656
        return; /* Never happens.  Keep compiler happy.  */
657
    }
658

    
659
    /* Align stack pointer.  */
660
    /* ??? Should only do this if Configuration Control Register
661
       STACKALIGN bit is set.  */
662
    if (env->regs[13] & 4) {
663
        env->regs[13] -= 4;
664
        xpsr |= 0x200;
665
    }
666
    /* Switch to the handler mode.  */
667
    v7m_push(env, xpsr);
668
    v7m_push(env, env->regs[15]);
669
    v7m_push(env, env->regs[14]);
670
    v7m_push(env, env->regs[12]);
671
    v7m_push(env, env->regs[3]);
672
    v7m_push(env, env->regs[2]);
673
    v7m_push(env, env->regs[1]);
674
    v7m_push(env, env->regs[0]);
675
    switch_v7m_sp(env, 0);
676
    /* Clear IT bits */
677
    env->condexec_bits = 0;
678
    env->regs[14] = lr;
679
    addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4);
680
    env->regs[15] = addr & 0xfffffffe;
681
    env->thumb = addr & 1;
682
}
683

    
684
/* Handle a CPU exception.  */
685
void do_interrupt(CPUARMState *env)
686
{
687
    uint32_t addr;
688
    uint32_t mask;
689
    int new_mode;
690
    uint32_t offset;
691

    
692
    if (IS_M(env)) {
693
        do_interrupt_v7m(env);
694
        return;
695
    }
696
    /* TODO: Vectored interrupt controller.  */
697
    switch (env->exception_index) {
698
    case EXCP_UDEF:
699
        new_mode = ARM_CPU_MODE_UND;
700
        addr = 0x04;
701
        mask = CPSR_I;
702
        if (env->thumb)
703
            offset = 2;
704
        else
705
            offset = 4;
706
        break;
707
    case EXCP_SWI:
708
        if (semihosting_enabled) {
709
            /* Check for semihosting interrupt.  */
710
            if (env->thumb) {
711
                mask = arm_lduw_code(env->regs[15] - 2, env->bswap_code) & 0xff;
712
            } else {
713
                mask = arm_ldl_code(env->regs[15] - 4, env->bswap_code)
714
                    & 0xffffff;
715
            }
716
            /* Only intercept calls from privileged modes, to provide some
717
               semblance of security.  */
718
            if (((mask == 0x123456 && !env->thumb)
719
                    || (mask == 0xab && env->thumb))
720
                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
721
                env->regs[0] = do_arm_semihosting(env);
722
                return;
723
            }
724
        }
725
        new_mode = ARM_CPU_MODE_SVC;
726
        addr = 0x08;
727
        mask = CPSR_I;
728
        /* The PC already points to the next instruction.  */
729
        offset = 0;
730
        break;
731
    case EXCP_BKPT:
732
        /* See if this is a semihosting syscall.  */
733
        if (env->thumb && semihosting_enabled) {
734
            mask = arm_lduw_code(env->regs[15], env->bswap_code) & 0xff;
735
            if (mask == 0xab
736
                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
737
                env->regs[15] += 2;
738
                env->regs[0] = do_arm_semihosting(env);
739
                return;
740
            }
741
        }
742
        env->cp15.c5_insn = 2;
743
        /* Fall through to prefetch abort.  */
744
    case EXCP_PREFETCH_ABORT:
745
        new_mode = ARM_CPU_MODE_ABT;
746
        addr = 0x0c;
747
        mask = CPSR_A | CPSR_I;
748
        offset = 4;
749
        break;
750
    case EXCP_DATA_ABORT:
751
        new_mode = ARM_CPU_MODE_ABT;
752
        addr = 0x10;
753
        mask = CPSR_A | CPSR_I;
754
        offset = 8;
755
        break;
756
    case EXCP_IRQ:
757
        new_mode = ARM_CPU_MODE_IRQ;
758
        addr = 0x18;
759
        /* Disable IRQ and imprecise data aborts.  */
760
        mask = CPSR_A | CPSR_I;
761
        offset = 4;
762
        break;
763
    case EXCP_FIQ:
764
        new_mode = ARM_CPU_MODE_FIQ;
765
        addr = 0x1c;
766
        /* Disable FIQ, IRQ and imprecise data aborts.  */
767
        mask = CPSR_A | CPSR_I | CPSR_F;
768
        offset = 4;
769
        break;
770
    default:
771
        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
772
        return; /* Never happens.  Keep compiler happy.  */
773
    }
774
    /* High vectors.  */
775
    if (env->cp15.c1_sys & (1 << 13)) {
776
        addr += 0xffff0000;
777
    }
778
    switch_mode (env, new_mode);
779
    env->spsr = cpsr_read(env);
780
    /* Clear IT bits.  */
781
    env->condexec_bits = 0;
782
    /* Switch to the new mode, and to the correct instruction set.  */
783
    env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
784
    env->uncached_cpsr |= mask;
785
    /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
786
     * and we should just guard the thumb mode on V4 */
787
    if (arm_feature(env, ARM_FEATURE_V4T)) {
788
        env->thumb = (env->cp15.c1_sys & (1 << 30)) != 0;
789
    }
790
    env->regs[14] = env->regs[15] + offset;
791
    env->regs[15] = addr;
792
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
793
}
794

    
795
/* Check section/page access permissions.
796
   Returns the page protection flags, or zero if the access is not
797
   permitted.  */
798
static inline int check_ap(CPUARMState *env, int ap, int domain_prot,
799
                           int access_type, int is_user)
800
{
801
  int prot_ro;
802

    
803
  if (domain_prot == 3) {
804
    return PAGE_READ | PAGE_WRITE;
805
  }
806

    
807
  if (access_type == 1)
808
      prot_ro = 0;
809
  else
810
      prot_ro = PAGE_READ;
811

    
812
  switch (ap) {
813
  case 0:
814
      if (access_type == 1)
815
          return 0;
816
      switch ((env->cp15.c1_sys >> 8) & 3) {
817
      case 1:
818
          return is_user ? 0 : PAGE_READ;
819
      case 2:
820
          return PAGE_READ;
821
      default:
822
          return 0;
823
      }
824
  case 1:
825
      return is_user ? 0 : PAGE_READ | PAGE_WRITE;
826
  case 2:
827
      if (is_user)
828
          return prot_ro;
829
      else
830
          return PAGE_READ | PAGE_WRITE;
831
  case 3:
832
      return PAGE_READ | PAGE_WRITE;
833
  case 4: /* Reserved.  */
834
      return 0;
835
  case 5:
836
      return is_user ? 0 : prot_ro;
837
  case 6:
838
      return prot_ro;
839
  case 7:
840
      if (!arm_feature (env, ARM_FEATURE_V6K))
841
          return 0;
842
      return prot_ro;
843
  default:
844
      abort();
845
  }
846
}
847

    
848
static uint32_t get_level1_table_address(CPUARMState *env, uint32_t address)
849
{
850
    uint32_t table;
851

    
852
    if (address & env->cp15.c2_mask)
853
        table = env->cp15.c2_base1 & 0xffffc000;
854
    else
855
        table = env->cp15.c2_base0 & env->cp15.c2_base_mask;
856

    
857
    table |= (address >> 18) & 0x3ffc;
858
    return table;
859
}
860

    
861
static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
862
                            int is_user, uint32_t *phys_ptr, int *prot,
863
                            target_ulong *page_size)
864
{
865
    int code;
866
    uint32_t table;
867
    uint32_t desc;
868
    int type;
869
    int ap;
870
    int domain;
871
    int domain_prot;
872
    uint32_t phys_addr;
873

    
874
    /* Pagetable walk.  */
875
    /* Lookup l1 descriptor.  */
876
    table = get_level1_table_address(env, address);
877
    desc = ldl_phys(table);
878
    type = (desc & 3);
879
    domain = (desc >> 5) & 0x0f;
880
    domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
881
    if (type == 0) {
882
        /* Section translation fault.  */
883
        code = 5;
884
        goto do_fault;
885
    }
886
    if (domain_prot == 0 || domain_prot == 2) {
887
        if (type == 2)
888
            code = 9; /* Section domain fault.  */
889
        else
890
            code = 11; /* Page domain fault.  */
891
        goto do_fault;
892
    }
893
    if (type == 2) {
894
        /* 1Mb section.  */
895
        phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
896
        ap = (desc >> 10) & 3;
897
        code = 13;
898
        *page_size = 1024 * 1024;
899
    } else {
900
        /* Lookup l2 entry.  */
901
        if (type == 1) {
902
            /* Coarse pagetable.  */
903
            table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
904
        } else {
905
            /* Fine pagetable.  */
906
            table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
907
        }
908
        desc = ldl_phys(table);
909
        switch (desc & 3) {
910
        case 0: /* Page translation fault.  */
911
            code = 7;
912
            goto do_fault;
913
        case 1: /* 64k page.  */
914
            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
915
            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
916
            *page_size = 0x10000;
917
            break;
918
        case 2: /* 4k page.  */
919
            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
920
            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
921
            *page_size = 0x1000;
922
            break;
923
        case 3: /* 1k page.  */
924
            if (type == 1) {
925
                if (arm_feature(env, ARM_FEATURE_XSCALE)) {
926
                    phys_addr = (desc & 0xfffff000) | (address & 0xfff);
927
                } else {
928
                    /* Page translation fault.  */
929
                    code = 7;
930
                    goto do_fault;
931
                }
932
            } else {
933
                phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
934
            }
935
            ap = (desc >> 4) & 3;
936
            *page_size = 0x400;
937
            break;
938
        default:
939
            /* Never happens, but compiler isn't smart enough to tell.  */
940
            abort();
941
        }
942
        code = 15;
943
    }
944
    *prot = check_ap(env, ap, domain_prot, access_type, is_user);
945
    if (!*prot) {
946
        /* Access permission fault.  */
947
        goto do_fault;
948
    }
949
    *prot |= PAGE_EXEC;
950
    *phys_ptr = phys_addr;
951
    return 0;
952
do_fault:
953
    return code | (domain << 4);
954
}
955

    
956
static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
957
                            int is_user, uint32_t *phys_ptr, int *prot,
958
                            target_ulong *page_size)
959
{
960
    int code;
961
    uint32_t table;
962
    uint32_t desc;
963
    uint32_t xn;
964
    int type;
965
    int ap;
966
    int domain;
967
    int domain_prot;
968
    uint32_t phys_addr;
969

    
970
    /* Pagetable walk.  */
971
    /* Lookup l1 descriptor.  */
972
    table = get_level1_table_address(env, address);
973
    desc = ldl_phys(table);
974
    type = (desc & 3);
975
    if (type == 0) {
976
        /* Section translation fault.  */
977
        code = 5;
978
        domain = 0;
979
        goto do_fault;
980
    } else if (type == 2 && (desc & (1 << 18))) {
981
        /* Supersection.  */
982
        domain = 0;
983
    } else {
984
        /* Section or page.  */
985
        domain = (desc >> 5) & 0x0f;
986
    }
987
    domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
988
    if (domain_prot == 0 || domain_prot == 2) {
989
        if (type == 2)
990
            code = 9; /* Section domain fault.  */
991
        else
992
            code = 11; /* Page domain fault.  */
993
        goto do_fault;
994
    }
995
    if (type == 2) {
996
        if (desc & (1 << 18)) {
997
            /* Supersection.  */
998
            phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
999
            *page_size = 0x1000000;
1000
        } else {
1001
            /* Section.  */
1002
            phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1003
            *page_size = 0x100000;
1004
        }
1005
        ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
1006
        xn = desc & (1 << 4);
1007
        code = 13;
1008
    } else {
1009
        /* Lookup l2 entry.  */
1010
        table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1011
        desc = ldl_phys(table);
1012
        ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
1013
        switch (desc & 3) {
1014
        case 0: /* Page translation fault.  */
1015
            code = 7;
1016
            goto do_fault;
1017
        case 1: /* 64k page.  */
1018
            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1019
            xn = desc & (1 << 15);
1020
            *page_size = 0x10000;
1021
            break;
1022
        case 2: case 3: /* 4k page.  */
1023
            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1024
            xn = desc & 1;
1025
            *page_size = 0x1000;
1026
            break;
1027
        default:
1028
            /* Never happens, but compiler isn't smart enough to tell.  */
1029
            abort();
1030
        }
1031
        code = 15;
1032
    }
1033
    if (domain_prot == 3) {
1034
        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1035
    } else {
1036
        if (xn && access_type == 2)
1037
            goto do_fault;
1038

    
1039
        /* The simplified model uses AP[0] as an access control bit.  */
1040
        if ((env->cp15.c1_sys & (1 << 29)) && (ap & 1) == 0) {
1041
            /* Access flag fault.  */
1042
            code = (code == 15) ? 6 : 3;
1043
            goto do_fault;
1044
        }
1045
        *prot = check_ap(env, ap, domain_prot, access_type, is_user);
1046
        if (!*prot) {
1047
            /* Access permission fault.  */
1048
            goto do_fault;
1049
        }
1050
        if (!xn) {
1051
            *prot |= PAGE_EXEC;
1052
        }
1053
    }
1054
    *phys_ptr = phys_addr;
1055
    return 0;
1056
do_fault:
1057
    return code | (domain << 4);
1058
}
1059

    
1060
static int get_phys_addr_mpu(CPUARMState *env, uint32_t address, int access_type,
1061
                             int is_user, uint32_t *phys_ptr, int *prot)
1062
{
1063
    int n;
1064
    uint32_t mask;
1065
    uint32_t base;
1066

    
1067
    *phys_ptr = address;
1068
    for (n = 7; n >= 0; n--) {
1069
        base = env->cp15.c6_region[n];
1070
        if ((base & 1) == 0)
1071
            continue;
1072
        mask = 1 << ((base >> 1) & 0x1f);
1073
        /* Keep this shift separate from the above to avoid an
1074
           (undefined) << 32.  */
1075
        mask = (mask << 1) - 1;
1076
        if (((base ^ address) & ~mask) == 0)
1077
            break;
1078
    }
1079
    if (n < 0)
1080
        return 2;
1081

    
1082
    if (access_type == 2) {
1083
        mask = env->cp15.c5_insn;
1084
    } else {
1085
        mask = env->cp15.c5_data;
1086
    }
1087
    mask = (mask >> (n * 4)) & 0xf;
1088
    switch (mask) {
1089
    case 0:
1090
        return 1;
1091
    case 1:
1092
        if (is_user)
1093
          return 1;
1094
        *prot = PAGE_READ | PAGE_WRITE;
1095
        break;
1096
    case 2:
1097
        *prot = PAGE_READ;
1098
        if (!is_user)
1099
            *prot |= PAGE_WRITE;
1100
        break;
1101
    case 3:
1102
        *prot = PAGE_READ | PAGE_WRITE;
1103
        break;
1104
    case 5:
1105
        if (is_user)
1106
            return 1;
1107
        *prot = PAGE_READ;
1108
        break;
1109
    case 6:
1110
        *prot = PAGE_READ;
1111
        break;
1112
    default:
1113
        /* Bad permission.  */
1114
        return 1;
1115
    }
1116
    *prot |= PAGE_EXEC;
1117
    return 0;
1118
}
1119

    
1120
static inline int get_phys_addr(CPUARMState *env, uint32_t address,
1121
                                int access_type, int is_user,
1122
                                uint32_t *phys_ptr, int *prot,
1123
                                target_ulong *page_size)
1124
{
1125
    /* Fast Context Switch Extension.  */
1126
    if (address < 0x02000000)
1127
        address += env->cp15.c13_fcse;
1128

    
1129
    if ((env->cp15.c1_sys & 1) == 0) {
1130
        /* MMU/MPU disabled.  */
1131
        *phys_ptr = address;
1132
        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1133
        *page_size = TARGET_PAGE_SIZE;
1134
        return 0;
1135
    } else if (arm_feature(env, ARM_FEATURE_MPU)) {
1136
        *page_size = TARGET_PAGE_SIZE;
1137
        return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
1138
                                 prot);
1139
    } else if (env->cp15.c1_sys & (1 << 23)) {
1140
        return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
1141
                                prot, page_size);
1142
    } else {
1143
        return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
1144
                                prot, page_size);
1145
    }
1146
}
1147

    
1148
int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address,
1149
                              int access_type, int mmu_idx)
1150
{
1151
    uint32_t phys_addr;
1152
    target_ulong page_size;
1153
    int prot;
1154
    int ret, is_user;
1155

    
1156
    is_user = mmu_idx == MMU_USER_IDX;
1157
    ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot,
1158
                        &page_size);
1159
    if (ret == 0) {
1160
        /* Map a single [sub]page.  */
1161
        phys_addr &= ~(uint32_t)0x3ff;
1162
        address &= ~(uint32_t)0x3ff;
1163
        tlb_set_page (env, address, phys_addr, prot, mmu_idx, page_size);
1164
        return 0;
1165
    }
1166

    
1167
    if (access_type == 2) {
1168
        env->cp15.c5_insn = ret;
1169
        env->cp15.c6_insn = address;
1170
        env->exception_index = EXCP_PREFETCH_ABORT;
1171
    } else {
1172
        env->cp15.c5_data = ret;
1173
        if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6))
1174
            env->cp15.c5_data |= (1 << 11);
1175
        env->cp15.c6_data = address;
1176
        env->exception_index = EXCP_DATA_ABORT;
1177
    }
1178
    return 1;
1179
}
1180

    
1181
target_phys_addr_t cpu_get_phys_page_debug(CPUARMState *env, target_ulong addr)
1182
{
1183
    uint32_t phys_addr;
1184
    target_ulong page_size;
1185
    int prot;
1186
    int ret;
1187

    
1188
    ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot, &page_size);
1189

    
1190
    if (ret != 0)
1191
        return -1;
1192

    
1193
    return phys_addr;
1194
}
1195

    
1196
void HELPER(set_cp)(CPUARMState *env, uint32_t insn, uint32_t val)
1197
{
1198
    int cp_num = (insn >> 8) & 0xf;
1199
    int cp_info = (insn >> 5) & 7;
1200
    int src = (insn >> 16) & 0xf;
1201
    int operand = insn & 0xf;
1202

    
1203
    if (env->cp[cp_num].cp_write)
1204
        env->cp[cp_num].cp_write(env->cp[cp_num].opaque,
1205
                                 cp_info, src, operand, val);
1206
}
1207

    
1208
uint32_t HELPER(get_cp)(CPUARMState *env, uint32_t insn)
1209
{
1210
    int cp_num = (insn >> 8) & 0xf;
1211
    int cp_info = (insn >> 5) & 7;
1212
    int dest = (insn >> 16) & 0xf;
1213
    int operand = insn & 0xf;
1214

    
1215
    if (env->cp[cp_num].cp_read)
1216
        return env->cp[cp_num].cp_read(env->cp[cp_num].opaque,
1217
                                       cp_info, dest, operand);
1218
    return 0;
1219
}
1220

    
1221
/* Return basic MPU access permission bits.  */
1222
static uint32_t simple_mpu_ap_bits(uint32_t val)
1223
{
1224
    uint32_t ret;
1225
    uint32_t mask;
1226
    int i;
1227
    ret = 0;
1228
    mask = 3;
1229
    for (i = 0; i < 16; i += 2) {
1230
        ret |= (val >> i) & mask;
1231
        mask <<= 2;
1232
    }
1233
    return ret;
1234
}
1235

    
1236
/* Pad basic MPU access permission bits to extended format.  */
1237
static uint32_t extended_mpu_ap_bits(uint32_t val)
1238
{
1239
    uint32_t ret;
1240
    uint32_t mask;
1241
    int i;
1242
    ret = 0;
1243
    mask = 3;
1244
    for (i = 0; i < 16; i += 2) {
1245
        ret |= (val & mask) << i;
1246
        mask <<= 2;
1247
    }
1248
    return ret;
1249
}
1250

    
1251
void HELPER(set_cp15)(CPUARMState *env, uint32_t insn, uint32_t val)
1252
{
1253
    int op1;
1254
    int op2;
1255
    int crm;
1256

    
1257
    op1 = (insn >> 21) & 7;
1258
    op2 = (insn >> 5) & 7;
1259
    crm = insn & 0xf;
1260
    switch ((insn >> 16) & 0xf) {
1261
    case 0:
1262
        /* ID codes.  */
1263
        if (arm_feature(env, ARM_FEATURE_XSCALE))
1264
            break;
1265
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1266
            break;
1267
        if (arm_feature(env, ARM_FEATURE_V7)
1268
                && op1 == 2 && crm == 0 && op2 == 0) {
1269
            env->cp15.c0_cssel = val & 0xf;
1270
            break;
1271
        }
1272
        goto bad_reg;
1273
    case 1: /* System configuration.  */
1274
        if (arm_feature(env, ARM_FEATURE_V7)
1275
                && op1 == 0 && crm == 1 && op2 == 0) {
1276
            env->cp15.c1_scr = val;
1277
            break;
1278
        }
1279
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1280
            op2 = 0;
1281
        switch (op2) {
1282
        case 0:
1283
            if (!arm_feature(env, ARM_FEATURE_XSCALE) || crm == 0)
1284
                env->cp15.c1_sys = val;
1285
            /* ??? Lots of these bits are not implemented.  */
1286
            /* This may enable/disable the MMU, so do a TLB flush.  */
1287
            tlb_flush(env, 1);
1288
            break;
1289
        case 1: /* Auxiliary control register.  */
1290
            if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1291
                env->cp15.c1_xscaleauxcr = val;
1292
                break;
1293
            }
1294
            /* Not implemented.  */
1295
            break;
1296
        case 2:
1297
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1298
                goto bad_reg;
1299
            if (env->cp15.c1_coproc != val) {
1300
                env->cp15.c1_coproc = val;
1301
                /* ??? Is this safe when called from within a TB?  */
1302
                tb_flush(env);
1303
            }
1304
            break;
1305
        default:
1306
            goto bad_reg;
1307
        }
1308
        break;
1309
    case 2: /* MMU Page table control / MPU cache control.  */
1310
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1311
            switch (op2) {
1312
            case 0:
1313
                env->cp15.c2_data = val;
1314
                break;
1315
            case 1:
1316
                env->cp15.c2_insn = val;
1317
                break;
1318
            default:
1319
                goto bad_reg;
1320
            }
1321
        } else {
1322
            switch (op2) {
1323
            case 0:
1324
                env->cp15.c2_base0 = val;
1325
                break;
1326
            case 1:
1327
                env->cp15.c2_base1 = val;
1328
                break;
1329
            case 2:
1330
                val &= 7;
1331
                env->cp15.c2_control = val;
1332
                env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> val);
1333
                env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> val);
1334
                break;
1335
            default:
1336
                goto bad_reg;
1337
            }
1338
        }
1339
        break;
1340
    case 3: /* MMU Domain access control / MPU write buffer control.  */
1341
        env->cp15.c3 = val;
1342
        tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */
1343
        break;
1344
    case 4: /* Reserved.  */
1345
        goto bad_reg;
1346
    case 5: /* MMU Fault status / MPU access permission.  */
1347
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1348
            op2 = 0;
1349
        switch (op2) {
1350
        case 0:
1351
            if (arm_feature(env, ARM_FEATURE_MPU))
1352
                val = extended_mpu_ap_bits(val);
1353
            env->cp15.c5_data = val;
1354
            break;
1355
        case 1:
1356
            if (arm_feature(env, ARM_FEATURE_MPU))
1357
                val = extended_mpu_ap_bits(val);
1358
            env->cp15.c5_insn = val;
1359
            break;
1360
        case 2:
1361
            if (!arm_feature(env, ARM_FEATURE_MPU))
1362
                goto bad_reg;
1363
            env->cp15.c5_data = val;
1364
            break;
1365
        case 3:
1366
            if (!arm_feature(env, ARM_FEATURE_MPU))
1367
                goto bad_reg;
1368
            env->cp15.c5_insn = val;
1369
            break;
1370
        default:
1371
            goto bad_reg;
1372
        }
1373
        break;
1374
    case 6: /* MMU Fault address / MPU base/size.  */
1375
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1376
            if (crm >= 8)
1377
                goto bad_reg;
1378
            env->cp15.c6_region[crm] = val;
1379
        } else {
1380
            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1381
                op2 = 0;
1382
            switch (op2) {
1383
            case 0:
1384
                env->cp15.c6_data = val;
1385
                break;
1386
            case 1: /* ??? This is WFAR on armv6 */
1387
            case 2:
1388
                env->cp15.c6_insn = val;
1389
                break;
1390
            default:
1391
                goto bad_reg;
1392
            }
1393
        }
1394
        break;
1395
    case 7: /* Cache control.  */
1396
        env->cp15.c15_i_max = 0x000;
1397
        env->cp15.c15_i_min = 0xff0;
1398
        if (op1 != 0) {
1399
            goto bad_reg;
1400
        }
1401
        /* No cache, so nothing to do except VA->PA translations. */
1402
        if (arm_feature(env, ARM_FEATURE_VAPA)) {
1403
            switch (crm) {
1404
            case 4:
1405
                if (arm_feature(env, ARM_FEATURE_V7)) {
1406
                    env->cp15.c7_par = val & 0xfffff6ff;
1407
                } else {
1408
                    env->cp15.c7_par = val & 0xfffff1ff;
1409
                }
1410
                break;
1411
            case 8: {
1412
                uint32_t phys_addr;
1413
                target_ulong page_size;
1414
                int prot;
1415
                int ret, is_user = op2 & 2;
1416
                int access_type = op2 & 1;
1417

    
1418
                if (op2 & 4) {
1419
                    /* Other states are only available with TrustZone */
1420
                    goto bad_reg;
1421
                }
1422
                ret = get_phys_addr(env, val, access_type, is_user,
1423
                                    &phys_addr, &prot, &page_size);
1424
                if (ret == 0) {
1425
                    /* We do not set any attribute bits in the PAR */
1426
                    if (page_size == (1 << 24)
1427
                        && arm_feature(env, ARM_FEATURE_V7)) {
1428
                        env->cp15.c7_par = (phys_addr & 0xff000000) | 1 << 1;
1429
                    } else {
1430
                        env->cp15.c7_par = phys_addr & 0xfffff000;
1431
                    }
1432
                } else {
1433
                    env->cp15.c7_par = ((ret & (10 << 1)) >> 5) |
1434
                                       ((ret & (12 << 1)) >> 6) |
1435
                                       ((ret & 0xf) << 1) | 1;
1436
                }
1437
                break;
1438
            }
1439
            }
1440
        }
1441
        break;
1442
    case 8: /* MMU TLB control.  */
1443
        switch (op2) {
1444
        case 0: /* Invalidate all (TLBIALL) */
1445
            tlb_flush(env, 1);
1446
            break;
1447
        case 1: /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
1448
            tlb_flush_page(env, val & TARGET_PAGE_MASK);
1449
            break;
1450
        case 2: /* Invalidate by ASID (TLBIASID) */
1451
            tlb_flush(env, val == 0);
1452
            break;
1453
        case 3: /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
1454
            tlb_flush_page(env, val & TARGET_PAGE_MASK);
1455
            break;
1456
        default:
1457
            goto bad_reg;
1458
        }
1459
        break;
1460
    case 9:
1461
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1462
            break;
1463
        if (arm_feature(env, ARM_FEATURE_STRONGARM))
1464
            break; /* Ignore ReadBuffer access */
1465
        switch (crm) {
1466
        case 0: /* Cache lockdown.  */
1467
            switch (op1) {
1468
            case 0: /* L1 cache.  */
1469
                switch (op2) {
1470
                case 0:
1471
                    env->cp15.c9_data = val;
1472
                    break;
1473
                case 1:
1474
                    env->cp15.c9_insn = val;
1475
                    break;
1476
                default:
1477
                    goto bad_reg;
1478
                }
1479
                break;
1480
            case 1: /* L2 cache.  */
1481
                /* Ignore writes to L2 lockdown/auxiliary registers.  */
1482
                break;
1483
            default:
1484
                goto bad_reg;
1485
            }
1486
            break;
1487
        case 1: /* TCM memory region registers.  */
1488
            /* Not implemented.  */
1489
            goto bad_reg;
1490
        case 12: /* Performance monitor control */
1491
            /* Performance monitors are implementation defined in v7,
1492
             * but with an ARM recommended set of registers, which we
1493
             * follow (although we don't actually implement any counters)
1494
             */
1495
            if (!arm_feature(env, ARM_FEATURE_V7)) {
1496
                goto bad_reg;
1497
            }
1498
            switch (op2) {
1499
            case 0: /* performance monitor control register */
1500
                /* only the DP, X, D and E bits are writable */
1501
                env->cp15.c9_pmcr &= ~0x39;
1502
                env->cp15.c9_pmcr |= (val & 0x39);
1503
                break;
1504
            case 1: /* Count enable set register */
1505
                val &= (1 << 31);
1506
                env->cp15.c9_pmcnten |= val;
1507
                break;
1508
            case 2: /* Count enable clear */
1509
                val &= (1 << 31);
1510
                env->cp15.c9_pmcnten &= ~val;
1511
                break;
1512
            case 3: /* Overflow flag status */
1513
                env->cp15.c9_pmovsr &= ~val;
1514
                break;
1515
            case 4: /* Software increment */
1516
                /* RAZ/WI since we don't implement the software-count event */
1517
                break;
1518
            case 5: /* Event counter selection register */
1519
                /* Since we don't implement any events, writing to this register
1520
                 * is actually UNPREDICTABLE. So we choose to RAZ/WI.
1521
                 */
1522
                break;
1523
            default:
1524
                goto bad_reg;
1525
            }
1526
            break;
1527
        case 13: /* Performance counters */
1528
            if (!arm_feature(env, ARM_FEATURE_V7)) {
1529
                goto bad_reg;
1530
            }
1531
            switch (op2) {
1532
            case 0: /* Cycle count register: not implemented, so RAZ/WI */
1533
                break;
1534
            case 1: /* Event type select */
1535
                env->cp15.c9_pmxevtyper = val & 0xff;
1536
                break;
1537
            case 2: /* Event count register */
1538
                /* Unimplemented (we have no events), RAZ/WI */
1539
                break;
1540
            default:
1541
                goto bad_reg;
1542
            }
1543
            break;
1544
        case 14: /* Performance monitor control */
1545
            if (!arm_feature(env, ARM_FEATURE_V7)) {
1546
                goto bad_reg;
1547
            }
1548
            switch (op2) {
1549
            case 0: /* user enable */
1550
                env->cp15.c9_pmuserenr = val & 1;
1551
                /* changes access rights for cp registers, so flush tbs */
1552
                tb_flush(env);
1553
                break;
1554
            case 1: /* interrupt enable set */
1555
                /* We have no event counters so only the C bit can be changed */
1556
                val &= (1 << 31);
1557
                env->cp15.c9_pminten |= val;
1558
                break;
1559
            case 2: /* interrupt enable clear */
1560
                val &= (1 << 31);
1561
                env->cp15.c9_pminten &= ~val;
1562
                break;
1563
            }
1564
            break;
1565
        default:
1566
            goto bad_reg;
1567
        }
1568
        break;
1569
    case 10: /* MMU TLB lockdown.  */
1570
        /* ??? TLB lockdown not implemented.  */
1571
        break;
1572
    case 12: /* Reserved.  */
1573
        goto bad_reg;
1574
    case 13: /* Process ID.  */
1575
        switch (op2) {
1576
        case 0:
1577
            /* Unlike real hardware the qemu TLB uses virtual addresses,
1578
               not modified virtual addresses, so this causes a TLB flush.
1579
             */
1580
            if (env->cp15.c13_fcse != val)
1581
              tlb_flush(env, 1);
1582
            env->cp15.c13_fcse = val;
1583
            break;
1584
        case 1:
1585
            /* This changes the ASID, so do a TLB flush.  */
1586
            if (env->cp15.c13_context != val
1587
                && !arm_feature(env, ARM_FEATURE_MPU))
1588
              tlb_flush(env, 0);
1589
            env->cp15.c13_context = val;
1590
            break;
1591
        default:
1592
            goto bad_reg;
1593
        }
1594
        break;
1595
    case 14: /* Generic timer */
1596
        if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
1597
            /* Dummy implementation: RAZ/WI for all */
1598
            break;
1599
        }
1600
        goto bad_reg;
1601
    case 15: /* Implementation specific.  */
1602
        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1603
            if (op2 == 0 && crm == 1) {
1604
                if (env->cp15.c15_cpar != (val & 0x3fff)) {
1605
                    /* Changes cp0 to cp13 behavior, so needs a TB flush.  */
1606
                    tb_flush(env);
1607
                    env->cp15.c15_cpar = val & 0x3fff;
1608
                }
1609
                break;
1610
            }
1611
            goto bad_reg;
1612
        }
1613
        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1614
            switch (crm) {
1615
            case 0:
1616
                break;
1617
            case 1: /* Set TI925T configuration.  */
1618
                env->cp15.c15_ticonfig = val & 0xe7;
1619
                env->cp15.c0_cpuid = (val & (1 << 5)) ? /* OS_TYPE bit */
1620
                        ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1621
                break;
1622
            case 2: /* Set I_max.  */
1623
                env->cp15.c15_i_max = val;
1624
                break;
1625
            case 3: /* Set I_min.  */
1626
                env->cp15.c15_i_min = val;
1627
                break;
1628
            case 4: /* Set thread-ID.  */
1629
                env->cp15.c15_threadid = val & 0xffff;
1630
                break;
1631
            case 8: /* Wait-for-interrupt (deprecated).  */
1632
                cpu_interrupt(env, CPU_INTERRUPT_HALT);
1633
                break;
1634
            default:
1635
                goto bad_reg;
1636
            }
1637
        }
1638
        if (ARM_CPUID(env) == ARM_CPUID_CORTEXA9) {
1639
            switch (crm) {
1640
            case 0:
1641
                if ((op1 == 0) && (op2 == 0)) {
1642
                    env->cp15.c15_power_control = val;
1643
                } else if ((op1 == 0) && (op2 == 1)) {
1644
                    env->cp15.c15_diagnostic = val;
1645
                } else if ((op1 == 0) && (op2 == 2)) {
1646
                    env->cp15.c15_power_diagnostic = val;
1647
                }
1648
            default:
1649
                break;
1650
            }
1651
        }
1652
        break;
1653
    }
1654
    return;
1655
bad_reg:
1656
    /* ??? For debugging only.  Should raise illegal instruction exception.  */
1657
    cpu_abort(env, "Unimplemented cp15 register write (c%d, c%d, {%d, %d})\n",
1658
              (insn >> 16) & 0xf, crm, op1, op2);
1659
}
1660

    
1661
uint32_t HELPER(get_cp15)(CPUARMState *env, uint32_t insn)
1662
{
1663
    int op1;
1664
    int op2;
1665
    int crm;
1666

    
1667
    op1 = (insn >> 21) & 7;
1668
    op2 = (insn >> 5) & 7;
1669
    crm = insn & 0xf;
1670
    switch ((insn >> 16) & 0xf) {
1671
    case 0: /* ID codes.  */
1672
        switch (op1) {
1673
        case 0:
1674
            switch (crm) {
1675
            case 0:
1676
                switch (op2) {
1677
                case 0: /* Device ID.  */
1678
                    return env->cp15.c0_cpuid;
1679
                case 1: /* Cache Type.  */
1680
                    return env->cp15.c0_cachetype;
1681
                case 2: /* TCM status.  */
1682
                    return 0;
1683
                case 3: /* TLB type register.  */
1684
                    return 0; /* No lockable TLB entries.  */
1685
                case 5: /* MPIDR */
1686
                    /* The MPIDR was standardised in v7; prior to
1687
                     * this it was implemented only in the 11MPCore.
1688
                     * For all other pre-v7 cores it does not exist.
1689
                     */
1690
                    if (arm_feature(env, ARM_FEATURE_V7) ||
1691
                        ARM_CPUID(env) == ARM_CPUID_ARM11MPCORE) {
1692
                        int mpidr = env->cpu_index;
1693
                        /* We don't support setting cluster ID ([8..11])
1694
                         * so these bits always RAZ.
1695
                         */
1696
                        if (arm_feature(env, ARM_FEATURE_V7MP)) {
1697
                            mpidr |= (1 << 31);
1698
                            /* Cores which are uniprocessor (non-coherent)
1699
                             * but still implement the MP extensions set
1700
                             * bit 30. (For instance, A9UP.) However we do
1701
                             * not currently model any of those cores.
1702
                             */
1703
                        }
1704
                        return mpidr;
1705
                    }
1706
                    /* otherwise fall through to the unimplemented-reg case */
1707
                default:
1708
                    goto bad_reg;
1709
                }
1710
            case 1:
1711
                if (!arm_feature(env, ARM_FEATURE_V6))
1712
                    goto bad_reg;
1713
                return env->cp15.c0_c1[op2];
1714
            case 2:
1715
                if (!arm_feature(env, ARM_FEATURE_V6))
1716
                    goto bad_reg;
1717
                return env->cp15.c0_c2[op2];
1718
            case 3: case 4: case 5: case 6: case 7:
1719
                return 0;
1720
            default:
1721
                goto bad_reg;
1722
            }
1723
        case 1:
1724
            /* These registers aren't documented on arm11 cores.  However
1725
               Linux looks at them anyway.  */
1726
            if (!arm_feature(env, ARM_FEATURE_V6))
1727
                goto bad_reg;
1728
            if (crm != 0)
1729
                goto bad_reg;
1730
            if (!arm_feature(env, ARM_FEATURE_V7))
1731
                return 0;
1732

    
1733
            switch (op2) {
1734
            case 0:
1735
                return env->cp15.c0_ccsid[env->cp15.c0_cssel];
1736
            case 1:
1737
                return env->cp15.c0_clid;
1738
            case 7:
1739
                return 0;
1740
            }
1741
            goto bad_reg;
1742
        case 2:
1743
            if (op2 != 0 || crm != 0)
1744
                goto bad_reg;
1745
            return env->cp15.c0_cssel;
1746
        default:
1747
            goto bad_reg;
1748
        }
1749
    case 1: /* System configuration.  */
1750
        if (arm_feature(env, ARM_FEATURE_V7)
1751
            && op1 == 0 && crm == 1 && op2 == 0) {
1752
            return env->cp15.c1_scr;
1753
        }
1754
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1755
            op2 = 0;
1756
        switch (op2) {
1757
        case 0: /* Control register.  */
1758
            return env->cp15.c1_sys;
1759
        case 1: /* Auxiliary control register.  */
1760
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1761
                return env->cp15.c1_xscaleauxcr;
1762
            if (!arm_feature(env, ARM_FEATURE_AUXCR))
1763
                goto bad_reg;
1764
            switch (ARM_CPUID(env)) {
1765
            case ARM_CPUID_ARM1026:
1766
                return 1;
1767
            case ARM_CPUID_ARM1136:
1768
            case ARM_CPUID_ARM1136_R2:
1769
            case ARM_CPUID_ARM1176:
1770
                return 7;
1771
            case ARM_CPUID_ARM11MPCORE:
1772
                return 1;
1773
            case ARM_CPUID_CORTEXA8:
1774
                return 2;
1775
            case ARM_CPUID_CORTEXA9:
1776
            case ARM_CPUID_CORTEXA15:
1777
                return 0;
1778
            default:
1779
                goto bad_reg;
1780
            }
1781
        case 2: /* Coprocessor access register.  */
1782
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1783
                goto bad_reg;
1784
            return env->cp15.c1_coproc;
1785
        default:
1786
            goto bad_reg;
1787
        }
1788
    case 2: /* MMU Page table control / MPU cache control.  */
1789
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1790
            switch (op2) {
1791
            case 0:
1792
                return env->cp15.c2_data;
1793
                break;
1794
            case 1:
1795
                return env->cp15.c2_insn;
1796
                break;
1797
            default:
1798
                goto bad_reg;
1799
            }
1800
        } else {
1801
            switch (op2) {
1802
            case 0:
1803
                return env->cp15.c2_base0;
1804
            case 1:
1805
                return env->cp15.c2_base1;
1806
            case 2:
1807
                return env->cp15.c2_control;
1808
            default:
1809
                goto bad_reg;
1810
            }
1811
        }
1812
    case 3: /* MMU Domain access control / MPU write buffer control.  */
1813
        return env->cp15.c3;
1814
    case 4: /* Reserved.  */
1815
        goto bad_reg;
1816
    case 5: /* MMU Fault status / MPU access permission.  */
1817
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1818
            op2 = 0;
1819
        switch (op2) {
1820
        case 0:
1821
            if (arm_feature(env, ARM_FEATURE_MPU))
1822
                return simple_mpu_ap_bits(env->cp15.c5_data);
1823
            return env->cp15.c5_data;
1824
        case 1:
1825
            if (arm_feature(env, ARM_FEATURE_MPU))
1826
                return simple_mpu_ap_bits(env->cp15.c5_insn);
1827
            return env->cp15.c5_insn;
1828
        case 2:
1829
            if (!arm_feature(env, ARM_FEATURE_MPU))
1830
                goto bad_reg;
1831
            return env->cp15.c5_data;
1832
        case 3:
1833
            if (!arm_feature(env, ARM_FEATURE_MPU))
1834
                goto bad_reg;
1835
            return env->cp15.c5_insn;
1836
        default:
1837
            goto bad_reg;
1838
        }
1839
    case 6: /* MMU Fault address.  */
1840
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1841
            if (crm >= 8)
1842
                goto bad_reg;
1843
            return env->cp15.c6_region[crm];
1844
        } else {
1845
            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1846
                op2 = 0;
1847
            switch (op2) {
1848
            case 0:
1849
                return env->cp15.c6_data;
1850
            case 1:
1851
                if (arm_feature(env, ARM_FEATURE_V6)) {
1852
                    /* Watchpoint Fault Adrress.  */
1853
                    return 0; /* Not implemented.  */
1854
                } else {
1855
                    /* Instruction Fault Adrress.  */
1856
                    /* Arm9 doesn't have an IFAR, but implementing it anyway
1857
                       shouldn't do any harm.  */
1858
                    return env->cp15.c6_insn;
1859
                }
1860
            case 2:
1861
                if (arm_feature(env, ARM_FEATURE_V6)) {
1862
                    /* Instruction Fault Adrress.  */
1863
                    return env->cp15.c6_insn;
1864
                } else {
1865
                    goto bad_reg;
1866
                }
1867
            default:
1868
                goto bad_reg;
1869
            }
1870
        }
1871
    case 7: /* Cache control.  */
1872
        if (crm == 4 && op1 == 0 && op2 == 0) {
1873
            return env->cp15.c7_par;
1874
        }
1875
        /* FIXME: Should only clear Z flag if destination is r15.  */
1876
        env->ZF = 0;
1877
        return 0;
1878
    case 8: /* MMU TLB control.  */
1879
        goto bad_reg;
1880
    case 9:
1881
        switch (crm) {
1882
        case 0: /* Cache lockdown */
1883
            switch (op1) {
1884
            case 0: /* L1 cache.  */
1885
                if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1886
                    return 0;
1887
                }
1888
                switch (op2) {
1889
                case 0:
1890
                    return env->cp15.c9_data;
1891
                case 1:
1892
                    return env->cp15.c9_insn;
1893
                default:
1894
                    goto bad_reg;
1895
                }
1896
            case 1: /* L2 cache */
1897
                /* L2 Lockdown and Auxiliary control.  */
1898
                switch (op2) {
1899
                case 0:
1900
                    /* L2 cache lockdown (A8 only) */
1901
                    return 0;
1902
                case 2:
1903
                    /* L2 cache auxiliary control (A8) or control (A15) */
1904
                    if (ARM_CPUID(env) == ARM_CPUID_CORTEXA15) {
1905
                        /* Linux wants the number of processors from here.
1906
                         * Might as well set the interrupt-controller bit too.
1907
                         */
1908
                        return ((smp_cpus - 1) << 24) | (1 << 23);
1909
                    }
1910
                    return 0;
1911
                case 3:
1912
                    /* L2 cache extended control (A15) */
1913
                    return 0;
1914
                default:
1915
                    goto bad_reg;
1916
                }
1917
            default:
1918
                goto bad_reg;
1919
            }
1920
            break;
1921
        case 12: /* Performance monitor control */
1922
            if (!arm_feature(env, ARM_FEATURE_V7)) {
1923
                goto bad_reg;
1924
            }
1925
            switch (op2) {
1926
            case 0: /* performance monitor control register */
1927
                return env->cp15.c9_pmcr;
1928
            case 1: /* count enable set */
1929
            case 2: /* count enable clear */
1930
                return env->cp15.c9_pmcnten;
1931
            case 3: /* overflow flag status */
1932
                return env->cp15.c9_pmovsr;
1933
            case 4: /* software increment */
1934
            case 5: /* event counter selection register */
1935
                return 0; /* Unimplemented, RAZ/WI */
1936
            default:
1937
                goto bad_reg;
1938
            }
1939
        case 13: /* Performance counters */
1940
            if (!arm_feature(env, ARM_FEATURE_V7)) {
1941
                goto bad_reg;
1942
            }
1943
            switch (op2) {
1944
            case 1: /* Event type select */
1945
                return env->cp15.c9_pmxevtyper;
1946
            case 0: /* Cycle count register */
1947
            case 2: /* Event count register */
1948
                /* Unimplemented, so RAZ/WI */
1949
                return 0;
1950
            default:
1951
                goto bad_reg;
1952
            }
1953
        case 14: /* Performance monitor control */
1954
            if (!arm_feature(env, ARM_FEATURE_V7)) {
1955
                goto bad_reg;
1956
            }
1957
            switch (op2) {
1958
            case 0: /* user enable */
1959
                return env->cp15.c9_pmuserenr;
1960
            case 1: /* interrupt enable set */
1961
            case 2: /* interrupt enable clear */
1962
                return env->cp15.c9_pminten;
1963
            default:
1964
                goto bad_reg;
1965
            }
1966
        default:
1967
            goto bad_reg;
1968
        }
1969
        break;
1970
    case 10: /* MMU TLB lockdown.  */
1971
        /* ??? TLB lockdown not implemented.  */
1972
        return 0;
1973
    case 11: /* TCM DMA control.  */
1974
    case 12: /* Reserved.  */
1975
        goto bad_reg;
1976
    case 13: /* Process ID.  */
1977
        switch (op2) {
1978
        case 0:
1979
            return env->cp15.c13_fcse;
1980
        case 1:
1981
            return env->cp15.c13_context;
1982
        default:
1983
            goto bad_reg;
1984
        }
1985
    case 14: /* Generic timer */
1986
        if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
1987
            /* Dummy implementation: RAZ/WI for all */
1988
            return 0;
1989
        }
1990
        goto bad_reg;
1991
    case 15: /* Implementation specific.  */
1992
        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1993
            if (op2 == 0 && crm == 1)
1994
                return env->cp15.c15_cpar;
1995

    
1996
            goto bad_reg;
1997
        }
1998
        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1999
            switch (crm) {
2000
            case 0:
2001
                return 0;
2002
            case 1: /* Read TI925T configuration.  */
2003
                return env->cp15.c15_ticonfig;
2004
            case 2: /* Read I_max.  */
2005
                return env->cp15.c15_i_max;
2006
            case 3: /* Read I_min.  */
2007
                return env->cp15.c15_i_min;
2008
            case 4: /* Read thread-ID.  */
2009
                return env->cp15.c15_threadid;
2010
            case 8: /* TI925T_status */
2011
                return 0;
2012
            }
2013
            /* TODO: Peripheral port remap register:
2014
             * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt
2015
             * controller base address at $rn & ~0xfff and map size of
2016
             * 0x200 << ($rn & 0xfff), when MMU is off.  */
2017
            goto bad_reg;
2018
        }
2019
        if (ARM_CPUID(env) == ARM_CPUID_CORTEXA9) {
2020
            switch (crm) {
2021
            case 0:
2022
                if ((op1 == 4) && (op2 == 0)) {
2023
                    /* The config_base_address should hold the value of
2024
                     * the peripheral base. ARM should get this from a CPU
2025
                     * object property, but that support isn't available in
2026
                     * December 2011. Default to 0 for now and board models
2027
                     * that care can set it by a private hook */
2028
                    return env->cp15.c15_config_base_address;
2029
                } else if ((op1 == 0) && (op2 == 0)) {
2030
                    /* power_control should be set to maximum latency. Again,
2031
                       default to 0 and set by private hook */
2032
                    return env->cp15.c15_power_control;
2033
                } else if ((op1 == 0) && (op2 == 1)) {
2034
                    return env->cp15.c15_diagnostic;
2035
                } else if ((op1 == 0) && (op2 == 2)) {
2036
                    return env->cp15.c15_power_diagnostic;
2037
                }
2038
                break;
2039
            case 1: /* NEON Busy */
2040
                return 0;
2041
            case 5: /* tlb lockdown */
2042
            case 6:
2043
            case 7:
2044
                if ((op1 == 5) && (op2 == 2)) {
2045
                    return 0;
2046
                }
2047
                break;
2048
            default:
2049
                break;
2050
            }
2051
            goto bad_reg;
2052
        }
2053
        return 0;
2054
    }
2055
bad_reg:
2056
    /* ??? For debugging only.  Should raise illegal instruction exception.  */
2057
    cpu_abort(env, "Unimplemented cp15 register read (c%d, c%d, {%d, %d})\n",
2058
              (insn >> 16) & 0xf, crm, op1, op2);
2059
    return 0;
2060
}
2061

    
2062
void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
2063
{
2064
    if ((env->uncached_cpsr & CPSR_M) == mode) {
2065
        env->regs[13] = val;
2066
    } else {
2067
        env->banked_r13[bank_number(env, mode)] = val;
2068
    }
2069
}
2070

    
2071
uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
2072
{
2073
    if ((env->uncached_cpsr & CPSR_M) == mode) {
2074
        return env->regs[13];
2075
    } else {
2076
        return env->banked_r13[bank_number(env, mode)];
2077
    }
2078
}
2079

    
2080
uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
2081
{
2082
    switch (reg) {
2083
    case 0: /* APSR */
2084
        return xpsr_read(env) & 0xf8000000;
2085
    case 1: /* IAPSR */
2086
        return xpsr_read(env) & 0xf80001ff;
2087
    case 2: /* EAPSR */
2088
        return xpsr_read(env) & 0xff00fc00;
2089
    case 3: /* xPSR */
2090
        return xpsr_read(env) & 0xff00fdff;
2091
    case 5: /* IPSR */
2092
        return xpsr_read(env) & 0x000001ff;
2093
    case 6: /* EPSR */
2094
        return xpsr_read(env) & 0x0700fc00;
2095
    case 7: /* IEPSR */
2096
        return xpsr_read(env) & 0x0700edff;
2097
    case 8: /* MSP */
2098
        return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
2099
    case 9: /* PSP */
2100
        return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
2101
    case 16: /* PRIMASK */
2102
        return (env->uncached_cpsr & CPSR_I) != 0;
2103
    case 17: /* BASEPRI */
2104
    case 18: /* BASEPRI_MAX */
2105
        return env->v7m.basepri;
2106
    case 19: /* FAULTMASK */
2107
        return (env->uncached_cpsr & CPSR_F) != 0;
2108
    case 20: /* CONTROL */
2109
        return env->v7m.control;
2110
    default:
2111
        /* ??? For debugging only.  */
2112
        cpu_abort(env, "Unimplemented system register read (%d)\n", reg);
2113
        return 0;
2114
    }
2115
}
2116

    
2117
void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
2118
{
2119
    switch (reg) {
2120
    case 0: /* APSR */
2121
        xpsr_write(env, val, 0xf8000000);
2122
        break;
2123
    case 1: /* IAPSR */
2124
        xpsr_write(env, val, 0xf8000000);
2125
        break;
2126
    case 2: /* EAPSR */
2127
        xpsr_write(env, val, 0xfe00fc00);
2128
        break;
2129
    case 3: /* xPSR */
2130
        xpsr_write(env, val, 0xfe00fc00);
2131
        break;
2132
    case 5: /* IPSR */
2133
        /* IPSR bits are readonly.  */
2134
        break;
2135
    case 6: /* EPSR */
2136
        xpsr_write(env, val, 0x0600fc00);
2137
        break;
2138
    case 7: /* IEPSR */
2139
        xpsr_write(env, val, 0x0600fc00);
2140
        break;
2141
    case 8: /* MSP */
2142
        if (env->v7m.current_sp)
2143
            env->v7m.other_sp = val;
2144
        else
2145
            env->regs[13] = val;
2146
        break;
2147
    case 9: /* PSP */
2148
        if (env->v7m.current_sp)
2149
            env->regs[13] = val;
2150
        else
2151
            env->v7m.other_sp = val;
2152
        break;
2153
    case 16: /* PRIMASK */
2154
        if (val & 1)
2155
            env->uncached_cpsr |= CPSR_I;
2156
        else
2157
            env->uncached_cpsr &= ~CPSR_I;
2158
        break;
2159
    case 17: /* BASEPRI */
2160
        env->v7m.basepri = val & 0xff;
2161
        break;
2162
    case 18: /* BASEPRI_MAX */
2163
        val &= 0xff;
2164
        if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
2165
            env->v7m.basepri = val;
2166
        break;
2167
    case 19: /* FAULTMASK */
2168
        if (val & 1)
2169
            env->uncached_cpsr |= CPSR_F;
2170
        else
2171
            env->uncached_cpsr &= ~CPSR_F;
2172
        break;
2173
    case 20: /* CONTROL */
2174
        env->v7m.control = val & 3;
2175
        switch_v7m_sp(env, (val & 2) != 0);
2176
        break;
2177
    default:
2178
        /* ??? For debugging only.  */
2179
        cpu_abort(env, "Unimplemented system register write (%d)\n", reg);
2180
        return;
2181
    }
2182
}
2183

    
2184
void cpu_arm_set_cp_io(CPUARMState *env, int cpnum,
2185
                ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write,
2186
                void *opaque)
2187
{
2188
    if (cpnum < 0 || cpnum > 14) {
2189
        cpu_abort(env, "Bad coprocessor number: %i\n", cpnum);
2190
        return;
2191
    }
2192

    
2193
    env->cp[cpnum].cp_read = cp_read;
2194
    env->cp[cpnum].cp_write = cp_write;
2195
    env->cp[cpnum].opaque = opaque;
2196
}
2197

    
2198
#endif
2199

    
2200
/* Note that signed overflow is undefined in C.  The following routines are
2201
   careful to use unsigned types where modulo arithmetic is required.
2202
   Failure to do so _will_ break on newer gcc.  */
2203

    
2204
/* Signed saturating arithmetic.  */
2205

    
2206
/* Perform 16-bit signed saturating addition.  */
2207
static inline uint16_t add16_sat(uint16_t a, uint16_t b)
2208
{
2209
    uint16_t res;
2210

    
2211
    res = a + b;
2212
    if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
2213
        if (a & 0x8000)
2214
            res = 0x8000;
2215
        else
2216
            res = 0x7fff;
2217
    }
2218
    return res;
2219
}
2220

    
2221
/* Perform 8-bit signed saturating addition.  */
2222
static inline uint8_t add8_sat(uint8_t a, uint8_t b)
2223
{
2224
    uint8_t res;
2225

    
2226
    res = a + b;
2227
    if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
2228
        if (a & 0x80)
2229
            res = 0x80;
2230
        else
2231
            res = 0x7f;
2232
    }
2233
    return res;
2234
}
2235

    
2236
/* Perform 16-bit signed saturating subtraction.  */
2237
static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
2238
{
2239
    uint16_t res;
2240

    
2241
    res = a - b;
2242
    if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
2243
        if (a & 0x8000)
2244
            res = 0x8000;
2245
        else
2246
            res = 0x7fff;
2247
    }
2248
    return res;
2249
}
2250

    
2251
/* Perform 8-bit signed saturating subtraction.  */
2252
static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
2253
{
2254
    uint8_t res;
2255

    
2256
    res = a - b;
2257
    if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
2258
        if (a & 0x80)
2259
            res = 0x80;
2260
        else
2261
            res = 0x7f;
2262
    }
2263
    return res;
2264
}
2265

    
2266
#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
2267
#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
2268
#define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
2269
#define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
2270
#define PFX q
2271

    
2272
#include "op_addsub.h"
2273

    
2274
/* Unsigned saturating arithmetic.  */
2275
static inline uint16_t add16_usat(uint16_t a, uint16_t b)
2276
{
2277
    uint16_t res;
2278
    res = a + b;
2279
    if (res < a)
2280
        res = 0xffff;
2281
    return res;
2282
}
2283

    
2284
static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
2285
{
2286
    if (a > b)
2287
        return a - b;
2288
    else
2289
        return 0;
2290
}
2291

    
2292
static inline uint8_t add8_usat(uint8_t a, uint8_t b)
2293
{
2294
    uint8_t res;
2295
    res = a + b;
2296
    if (res < a)
2297
        res = 0xff;
2298
    return res;
2299
}
2300

    
2301
static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
2302
{
2303
    if (a > b)
2304
        return a - b;
2305
    else
2306
        return 0;
2307
}
2308

    
2309
#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
2310
#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
2311
#define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
2312
#define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
2313
#define PFX uq
2314

    
2315
#include "op_addsub.h"
2316

    
2317
/* Signed modulo arithmetic.  */
2318
#define SARITH16(a, b, n, op) do { \
2319
    int32_t sum; \
2320
    sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
2321
    RESULT(sum, n, 16); \
2322
    if (sum >= 0) \
2323
        ge |= 3 << (n * 2); \
2324
    } while(0)
2325

    
2326
#define SARITH8(a, b, n, op) do { \
2327
    int32_t sum; \
2328
    sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
2329
    RESULT(sum, n, 8); \
2330
    if (sum >= 0) \
2331
        ge |= 1 << n; \
2332
    } while(0)
2333

    
2334

    
2335
#define ADD16(a, b, n) SARITH16(a, b, n, +)
2336
#define SUB16(a, b, n) SARITH16(a, b, n, -)
2337
#define ADD8(a, b, n)  SARITH8(a, b, n, +)
2338
#define SUB8(a, b, n)  SARITH8(a, b, n, -)
2339
#define PFX s
2340
#define ARITH_GE
2341

    
2342
#include "op_addsub.h"
2343

    
2344
/* Unsigned modulo arithmetic.  */
2345
#define ADD16(a, b, n) do { \
2346
    uint32_t sum; \
2347
    sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
2348
    RESULT(sum, n, 16); \
2349
    if ((sum >> 16) == 1) \
2350
        ge |= 3 << (n * 2); \
2351
    } while(0)
2352

    
2353
#define ADD8(a, b, n) do { \
2354
    uint32_t sum; \
2355
    sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
2356
    RESULT(sum, n, 8); \
2357
    if ((sum >> 8) == 1) \
2358
        ge |= 1 << n; \
2359
    } while(0)
2360

    
2361
#define SUB16(a, b, n) do { \
2362
    uint32_t sum; \
2363
    sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
2364
    RESULT(sum, n, 16); \
2365
    if ((sum >> 16) == 0) \
2366
        ge |= 3 << (n * 2); \
2367
    } while(0)
2368

    
2369
#define SUB8(a, b, n) do { \
2370
    uint32_t sum; \
2371
    sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
2372
    RESULT(sum, n, 8); \
2373
    if ((sum >> 8) == 0) \
2374
        ge |= 1 << n; \
2375
    } while(0)
2376

    
2377
#define PFX u
2378
#define ARITH_GE
2379

    
2380
#include "op_addsub.h"
2381

    
2382
/* Halved signed arithmetic.  */
2383
#define ADD16(a, b, n) \
2384
  RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
2385
#define SUB16(a, b, n) \
2386
  RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
2387
#define ADD8(a, b, n) \
2388
  RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
2389
#define SUB8(a, b, n) \
2390
  RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
2391
#define PFX sh
2392

    
2393
#include "op_addsub.h"
2394

    
2395
/* Halved unsigned arithmetic.  */
2396
#define ADD16(a, b, n) \
2397
  RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2398
#define SUB16(a, b, n) \
2399
  RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2400
#define ADD8(a, b, n) \
2401
  RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2402
#define SUB8(a, b, n) \
2403
  RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2404
#define PFX uh
2405

    
2406
#include "op_addsub.h"
2407

    
2408
static inline uint8_t do_usad(uint8_t a, uint8_t b)
2409
{
2410
    if (a > b)
2411
        return a - b;
2412
    else
2413
        return b - a;
2414
}
2415

    
2416
/* Unsigned sum of absolute byte differences.  */
2417
uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
2418
{
2419
    uint32_t sum;
2420
    sum = do_usad(a, b);
2421
    sum += do_usad(a >> 8, b >> 8);
2422
    sum += do_usad(a >> 16, b >>16);
2423
    sum += do_usad(a >> 24, b >> 24);
2424
    return sum;
2425
}
2426

    
2427
/* For ARMv6 SEL instruction.  */
2428
uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
2429
{
2430
    uint32_t mask;
2431

    
2432
    mask = 0;
2433
    if (flags & 1)
2434
        mask |= 0xff;
2435
    if (flags & 2)
2436
        mask |= 0xff00;
2437
    if (flags & 4)
2438
        mask |= 0xff0000;
2439
    if (flags & 8)
2440
        mask |= 0xff000000;
2441
    return (a & mask) | (b & ~mask);
2442
}
2443

    
2444
uint32_t HELPER(logicq_cc)(uint64_t val)
2445
{
2446
    return (val >> 32) | (val != 0);
2447
}
2448

    
2449
/* VFP support.  We follow the convention used for VFP instrunctions:
2450
   Single precition routines have a "s" suffix, double precision a
2451
   "d" suffix.  */
2452

    
2453
/* Convert host exception flags to vfp form.  */
2454
static inline int vfp_exceptbits_from_host(int host_bits)
2455
{
2456
    int target_bits = 0;
2457

    
2458
    if (host_bits & float_flag_invalid)
2459
        target_bits |= 1;
2460
    if (host_bits & float_flag_divbyzero)
2461
        target_bits |= 2;
2462
    if (host_bits & float_flag_overflow)
2463
        target_bits |= 4;
2464
    if (host_bits & (float_flag_underflow | float_flag_output_denormal))
2465
        target_bits |= 8;
2466
    if (host_bits & float_flag_inexact)
2467
        target_bits |= 0x10;
2468
    if (host_bits & float_flag_input_denormal)
2469
        target_bits |= 0x80;
2470
    return target_bits;
2471
}
2472

    
2473
uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
2474
{
2475
    int i;
2476
    uint32_t fpscr;
2477

    
2478
    fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
2479
            | (env->vfp.vec_len << 16)
2480
            | (env->vfp.vec_stride << 20);
2481
    i = get_float_exception_flags(&env->vfp.fp_status);
2482
    i |= get_float_exception_flags(&env->vfp.standard_fp_status);
2483
    fpscr |= vfp_exceptbits_from_host(i);
2484
    return fpscr;
2485
}
2486

    
2487
uint32_t vfp_get_fpscr(CPUARMState *env)
2488
{
2489
    return HELPER(vfp_get_fpscr)(env);
2490
}
2491

    
2492
/* Convert vfp exception flags to target form.  */
2493
static inline int vfp_exceptbits_to_host(int target_bits)
2494
{
2495
    int host_bits = 0;
2496

    
2497
    if (target_bits & 1)
2498
        host_bits |= float_flag_invalid;
2499
    if (target_bits & 2)
2500
        host_bits |= float_flag_divbyzero;
2501
    if (target_bits & 4)
2502
        host_bits |= float_flag_overflow;
2503
    if (target_bits & 8)
2504
        host_bits |= float_flag_underflow;
2505
    if (target_bits & 0x10)
2506
        host_bits |= float_flag_inexact;
2507
    if (target_bits & 0x80)
2508
        host_bits |= float_flag_input_denormal;
2509
    return host_bits;
2510
}
2511

    
2512
void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
2513
{
2514
    int i;
2515
    uint32_t changed;
2516

    
2517
    changed = env->vfp.xregs[ARM_VFP_FPSCR];
2518
    env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
2519
    env->vfp.vec_len = (val >> 16) & 7;
2520
    env->vfp.vec_stride = (val >> 20) & 3;
2521

    
2522
    changed ^= val;
2523
    if (changed & (3 << 22)) {
2524
        i = (val >> 22) & 3;
2525
        switch (i) {
2526
        case 0:
2527
            i = float_round_nearest_even;
2528
            break;
2529
        case 1:
2530
            i = float_round_up;
2531
            break;
2532
        case 2:
2533
            i = float_round_down;
2534
            break;
2535
        case 3:
2536
            i = float_round_to_zero;
2537
            break;
2538
        }
2539
        set_float_rounding_mode(i, &env->vfp.fp_status);
2540
    }
2541
    if (changed & (1 << 24)) {
2542
        set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
2543
        set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
2544
    }
2545
    if (changed & (1 << 25))
2546
        set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
2547

    
2548
    i = vfp_exceptbits_to_host(val);
2549
    set_float_exception_flags(i, &env->vfp.fp_status);
2550
    set_float_exception_flags(0, &env->vfp.standard_fp_status);
2551
}
2552

    
2553
void vfp_set_fpscr(CPUARMState *env, uint32_t val)
2554
{
2555
    HELPER(vfp_set_fpscr)(env, val);
2556
}
2557

    
2558
#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
2559

    
2560
#define VFP_BINOP(name) \
2561
float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
2562
{ \
2563
    float_status *fpst = fpstp; \
2564
    return float32_ ## name(a, b, fpst); \
2565
} \
2566
float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
2567
{ \
2568
    float_status *fpst = fpstp; \
2569
    return float64_ ## name(a, b, fpst); \
2570
}
2571
VFP_BINOP(add)
2572
VFP_BINOP(sub)
2573
VFP_BINOP(mul)
2574
VFP_BINOP(div)
2575
#undef VFP_BINOP
2576

    
2577
float32 VFP_HELPER(neg, s)(float32 a)
2578
{
2579
    return float32_chs(a);
2580
}
2581

    
2582
float64 VFP_HELPER(neg, d)(float64 a)
2583
{
2584
    return float64_chs(a);
2585
}
2586

    
2587
float32 VFP_HELPER(abs, s)(float32 a)
2588
{
2589
    return float32_abs(a);
2590
}
2591

    
2592
float64 VFP_HELPER(abs, d)(float64 a)
2593
{
2594
    return float64_abs(a);
2595
}
2596

    
2597
float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env)
2598
{
2599
    return float32_sqrt(a, &env->vfp.fp_status);
2600
}
2601

    
2602
float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env)
2603
{
2604
    return float64_sqrt(a, &env->vfp.fp_status);
2605
}
2606

    
2607
/* XXX: check quiet/signaling case */
2608
#define DO_VFP_cmp(p, type) \
2609
void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env)  \
2610
{ \
2611
    uint32_t flags; \
2612
    switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
2613
    case 0: flags = 0x6; break; \
2614
    case -1: flags = 0x8; break; \
2615
    case 1: flags = 0x2; break; \
2616
    default: case 2: flags = 0x3; break; \
2617
    } \
2618
    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2619
        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2620
} \
2621
void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
2622
{ \
2623
    uint32_t flags; \
2624
    switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
2625
    case 0: flags = 0x6; break; \
2626
    case -1: flags = 0x8; break; \
2627
    case 1: flags = 0x2; break; \
2628
    default: case 2: flags = 0x3; break; \
2629
    } \
2630
    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2631
        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2632
}
2633
DO_VFP_cmp(s, float32)
2634
DO_VFP_cmp(d, float64)
2635
#undef DO_VFP_cmp
2636

    
2637
/* Integer to float and float to integer conversions */
2638

    
2639
#define CONV_ITOF(name, fsz, sign) \
2640
    float##fsz HELPER(name)(uint32_t x, void *fpstp) \
2641
{ \
2642
    float_status *fpst = fpstp; \
2643
    return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
2644
}
2645

    
2646
#define CONV_FTOI(name, fsz, sign, round) \
2647
uint32_t HELPER(name)(float##fsz x, void *fpstp) \
2648
{ \
2649
    float_status *fpst = fpstp; \
2650
    if (float##fsz##_is_any_nan(x)) { \
2651
        float_raise(float_flag_invalid, fpst); \
2652
        return 0; \
2653
    } \
2654
    return float##fsz##_to_##sign##int32##round(x, fpst); \
2655
}
2656

    
2657
#define FLOAT_CONVS(name, p, fsz, sign) \
2658
CONV_ITOF(vfp_##name##to##p, fsz, sign) \
2659
CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
2660
CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
2661

    
2662
FLOAT_CONVS(si, s, 32, )
2663
FLOAT_CONVS(si, d, 64, )
2664
FLOAT_CONVS(ui, s, 32, u)
2665
FLOAT_CONVS(ui, d, 64, u)
2666

    
2667
#undef CONV_ITOF
2668
#undef CONV_FTOI
2669
#undef FLOAT_CONVS
2670

    
2671
/* floating point conversion */
2672
float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env)
2673
{
2674
    float64 r = float32_to_float64(x, &env->vfp.fp_status);
2675
    /* ARM requires that S<->D conversion of any kind of NaN generates
2676
     * a quiet NaN by forcing the most significant frac bit to 1.
2677
     */
2678
    return float64_maybe_silence_nan(r);
2679
}
2680

    
2681
float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
2682
{
2683
    float32 r =  float64_to_float32(x, &env->vfp.fp_status);
2684
    /* ARM requires that S<->D conversion of any kind of NaN generates
2685
     * a quiet NaN by forcing the most significant frac bit to 1.
2686
     */
2687
    return float32_maybe_silence_nan(r);
2688
}
2689

    
2690
/* VFP3 fixed point conversion.  */
2691
#define VFP_CONV_FIX(name, p, fsz, itype, sign) \
2692
float##fsz HELPER(vfp_##name##to##p)(uint##fsz##_t  x, uint32_t shift, \
2693
                                    void *fpstp) \
2694
{ \
2695
    float_status *fpst = fpstp; \
2696
    float##fsz tmp; \
2697
    tmp = sign##int32_to_##float##fsz((itype##_t)x, fpst); \
2698
    return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
2699
} \
2700
uint##fsz##_t HELPER(vfp_to##name##p)(float##fsz x, uint32_t shift, \
2701
                                       void *fpstp) \
2702
{ \
2703
    float_status *fpst = fpstp; \
2704
    float##fsz tmp; \
2705
    if (float##fsz##_is_any_nan(x)) { \
2706
        float_raise(float_flag_invalid, fpst); \
2707
        return 0; \
2708
    } \
2709
    tmp = float##fsz##_scalbn(x, shift, fpst); \
2710
    return float##fsz##_to_##itype##_round_to_zero(tmp, fpst); \
2711
}
2712

    
2713
VFP_CONV_FIX(sh, d, 64, int16, )
2714
VFP_CONV_FIX(sl, d, 64, int32, )
2715
VFP_CONV_FIX(uh, d, 64, uint16, u)
2716
VFP_CONV_FIX(ul, d, 64, uint32, u)
2717
VFP_CONV_FIX(sh, s, 32, int16, )
2718
VFP_CONV_FIX(sl, s, 32, int32, )
2719
VFP_CONV_FIX(uh, s, 32, uint16, u)
2720
VFP_CONV_FIX(ul, s, 32, uint32, u)
2721
#undef VFP_CONV_FIX
2722

    
2723
/* Half precision conversions.  */
2724
static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s)
2725
{
2726
    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
2727
    float32 r = float16_to_float32(make_float16(a), ieee, s);
2728
    if (ieee) {
2729
        return float32_maybe_silence_nan(r);
2730
    }
2731
    return r;
2732
}
2733

    
2734
static uint32_t do_fcvt_f32_to_f16(float32 a, CPUARMState *env, float_status *s)
2735
{
2736
    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
2737
    float16 r = float32_to_float16(a, ieee, s);
2738
    if (ieee) {
2739
        r = float16_maybe_silence_nan(r);
2740
    }
2741
    return float16_val(r);
2742
}
2743

    
2744
float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
2745
{
2746
    return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status);
2747
}
2748

    
2749
uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
2750
{
2751
    return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status);
2752
}
2753

    
2754
float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
2755
{
2756
    return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status);
2757
}
2758

    
2759
uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
2760
{
2761
    return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
2762
}
2763

    
2764
#define float32_two make_float32(0x40000000)
2765
#define float32_three make_float32(0x40400000)
2766
#define float32_one_point_five make_float32(0x3fc00000)
2767

    
2768
float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env)
2769
{
2770
    float_status *s = &env->vfp.standard_fp_status;
2771
    if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
2772
        (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
2773
        if (!(float32_is_zero(a) || float32_is_zero(b))) {
2774
            float_raise(float_flag_input_denormal, s);
2775
        }
2776
        return float32_two;
2777
    }
2778
    return float32_sub(float32_two, float32_mul(a, b, s), s);
2779
}
2780

    
2781
float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env)
2782
{
2783
    float_status *s = &env->vfp.standard_fp_status;
2784
    float32 product;
2785
    if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
2786
        (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
2787
        if (!(float32_is_zero(a) || float32_is_zero(b))) {
2788
            float_raise(float_flag_input_denormal, s);
2789
        }
2790
        return float32_one_point_five;
2791
    }
2792
    product = float32_mul(a, b, s);
2793
    return float32_div(float32_sub(float32_three, product, s), float32_two, s);
2794
}
2795

    
2796
/* NEON helpers.  */
2797

    
2798
/* Constants 256 and 512 are used in some helpers; we avoid relying on
2799
 * int->float conversions at run-time.  */
2800
#define float64_256 make_float64(0x4070000000000000LL)
2801
#define float64_512 make_float64(0x4080000000000000LL)
2802

    
2803
/* The algorithm that must be used to calculate the estimate
2804
 * is specified by the ARM ARM.
2805
 */
2806
static float64 recip_estimate(float64 a, CPUARMState *env)
2807
{
2808
    /* These calculations mustn't set any fp exception flags,
2809
     * so we use a local copy of the fp_status.
2810
     */
2811
    float_status dummy_status = env->vfp.standard_fp_status;
2812
    float_status *s = &dummy_status;
2813
    /* q = (int)(a * 512.0) */
2814
    float64 q = float64_mul(float64_512, a, s);
2815
    int64_t q_int = float64_to_int64_round_to_zero(q, s);
2816

    
2817
    /* r = 1.0 / (((double)q + 0.5) / 512.0) */
2818
    q = int64_to_float64(q_int, s);
2819
    q = float64_add(q, float64_half, s);
2820
    q = float64_div(q, float64_512, s);
2821
    q = float64_div(float64_one, q, s);
2822

    
2823
    /* s = (int)(256.0 * r + 0.5) */
2824
    q = float64_mul(q, float64_256, s);
2825
    q = float64_add(q, float64_half, s);
2826
    q_int = float64_to_int64_round_to_zero(q, s);
2827

    
2828
    /* return (double)s / 256.0 */
2829
    return float64_div(int64_to_float64(q_int, s), float64_256, s);
2830
}
2831

    
2832
float32 HELPER(recpe_f32)(float32 a, CPUARMState *env)
2833
{
2834
    float_status *s = &env->vfp.standard_fp_status;
2835
    float64 f64;
2836
    uint32_t val32 = float32_val(a);
2837

    
2838
    int result_exp;
2839
    int a_exp = (val32  & 0x7f800000) >> 23;
2840
    int sign = val32 & 0x80000000;
2841

    
2842
    if (float32_is_any_nan(a)) {
2843
        if (float32_is_signaling_nan(a)) {
2844
            float_raise(float_flag_invalid, s);
2845
        }
2846
        return float32_default_nan;
2847
    } else if (float32_is_infinity(a)) {
2848
        return float32_set_sign(float32_zero, float32_is_neg(a));
2849
    } else if (float32_is_zero_or_denormal(a)) {
2850
        if (!float32_is_zero(a)) {
2851
            float_raise(float_flag_input_denormal, s);
2852
        }
2853
        float_raise(float_flag_divbyzero, s);
2854
        return float32_set_sign(float32_infinity, float32_is_neg(a));
2855
    } else if (a_exp >= 253) {
2856
        float_raise(float_flag_underflow, s);
2857
        return float32_set_sign(float32_zero, float32_is_neg(a));
2858
    }
2859

    
2860
    f64 = make_float64((0x3feULL << 52)
2861
                       | ((int64_t)(val32 & 0x7fffff) << 29));
2862

    
2863
    result_exp = 253 - a_exp;
2864

    
2865
    f64 = recip_estimate(f64, env);
2866

    
2867
    val32 = sign
2868
        | ((result_exp & 0xff) << 23)
2869
        | ((float64_val(f64) >> 29) & 0x7fffff);
2870
    return make_float32(val32);
2871
}
2872

    
2873
/* The algorithm that must be used to calculate the estimate
2874
 * is specified by the ARM ARM.
2875
 */
2876
static float64 recip_sqrt_estimate(float64 a, CPUARMState *env)
2877
{
2878
    /* These calculations mustn't set any fp exception flags,
2879
     * so we use a local copy of the fp_status.
2880
     */
2881
    float_status dummy_status = env->vfp.standard_fp_status;
2882
    float_status *s = &dummy_status;
2883
    float64 q;
2884
    int64_t q_int;
2885

    
2886
    if (float64_lt(a, float64_half, s)) {
2887
        /* range 0.25 <= a < 0.5 */
2888

    
2889
        /* a in units of 1/512 rounded down */
2890
        /* q0 = (int)(a * 512.0);  */
2891
        q = float64_mul(float64_512, a, s);
2892
        q_int = float64_to_int64_round_to_zero(q, s);
2893

    
2894
        /* reciprocal root r */
2895
        /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0);  */
2896
        q = int64_to_float64(q_int, s);
2897
        q = float64_add(q, float64_half, s);
2898
        q = float64_div(q, float64_512, s);
2899
        q = float64_sqrt(q, s);
2900
        q = float64_div(float64_one, q, s);
2901
    } else {
2902
        /* range 0.5 <= a < 1.0 */
2903

    
2904
        /* a in units of 1/256 rounded down */
2905
        /* q1 = (int)(a * 256.0); */
2906
        q = float64_mul(float64_256, a, s);
2907
        int64_t q_int = float64_to_int64_round_to_zero(q, s);
2908

    
2909
        /* reciprocal root r */
2910
        /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
2911
        q = int64_to_float64(q_int, s);
2912
        q = float64_add(q, float64_half, s);
2913
        q = float64_div(q, float64_256, s);
2914
        q = float64_sqrt(q, s);
2915
        q = float64_div(float64_one, q, s);
2916
    }
2917
    /* r in units of 1/256 rounded to nearest */
2918
    /* s = (int)(256.0 * r + 0.5); */
2919

    
2920
    q = float64_mul(q, float64_256,s );
2921
    q = float64_add(q, float64_half, s);
2922
    q_int = float64_to_int64_round_to_zero(q, s);
2923

    
2924
    /* return (double)s / 256.0;*/
2925
    return float64_div(int64_to_float64(q_int, s), float64_256, s);
2926
}
2927

    
2928
float32 HELPER(rsqrte_f32)(float32 a, CPUARMState *env)
2929
{
2930
    float_status *s = &env->vfp.standard_fp_status;
2931
    int result_exp;
2932
    float64 f64;
2933
    uint32_t val;
2934
    uint64_t val64;
2935

    
2936
    val = float32_val(a);
2937

    
2938
    if (float32_is_any_nan(a)) {
2939
        if (float32_is_signaling_nan(a)) {
2940
            float_raise(float_flag_invalid, s);
2941
        }
2942
        return float32_default_nan;
2943
    } else if (float32_is_zero_or_denormal(a)) {
2944
        if (!float32_is_zero(a)) {
2945
            float_raise(float_flag_input_denormal, s);
2946
        }
2947
        float_raise(float_flag_divbyzero, s);
2948
        return float32_set_sign(float32_infinity, float32_is_neg(a));
2949
    } else if (float32_is_neg(a)) {
2950
        float_raise(float_flag_invalid, s);
2951
        return float32_default_nan;
2952
    } else if (float32_is_infinity(a)) {
2953
        return float32_zero;
2954
    }
2955

    
2956
    /* Normalize to a double-precision value between 0.25 and 1.0,
2957
     * preserving the parity of the exponent.  */
2958
    if ((val & 0x800000) == 0) {
2959
        f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
2960
                           | (0x3feULL << 52)
2961
                           | ((uint64_t)(val & 0x7fffff) << 29));
2962
    } else {
2963
        f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
2964
                           | (0x3fdULL << 52)
2965
                           | ((uint64_t)(val & 0x7fffff) << 29));
2966
    }
2967

    
2968
    result_exp = (380 - ((val & 0x7f800000) >> 23)) / 2;
2969

    
2970
    f64 = recip_sqrt_estimate(f64, env);
2971

    
2972
    val64 = float64_val(f64);
2973

    
2974
    val = ((result_exp & 0xff) << 23)
2975
        | ((val64 >> 29)  & 0x7fffff);
2976
    return make_float32(val);
2977
}
2978

    
2979
uint32_t HELPER(recpe_u32)(uint32_t a, CPUARMState *env)
2980
{
2981
    float64 f64;
2982

    
2983
    if ((a & 0x80000000) == 0) {
2984
        return 0xffffffff;
2985
    }
2986

    
2987
    f64 = make_float64((0x3feULL << 52)
2988
                       | ((int64_t)(a & 0x7fffffff) << 21));
2989

    
2990
    f64 = recip_estimate (f64, env);
2991

    
2992
    return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
2993
}
2994

    
2995
uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUARMState *env)
2996
{
2997
    float64 f64;
2998

    
2999
    if ((a & 0xc0000000) == 0) {
3000
        return 0xffffffff;
3001
    }
3002

    
3003
    if (a & 0x80000000) {
3004
        f64 = make_float64((0x3feULL << 52)
3005
                           | ((uint64_t)(a & 0x7fffffff) << 21));
3006
    } else { /* bits 31-30 == '01' */
3007
        f64 = make_float64((0x3fdULL << 52)
3008
                           | ((uint64_t)(a & 0x3fffffff) << 22));
3009
    }
3010

    
3011
    f64 = recip_sqrt_estimate(f64, env);
3012

    
3013
    return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
3014
}
3015

    
3016
/* VFPv4 fused multiply-accumulate */
3017
float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp)
3018
{
3019
    float_status *fpst = fpstp;
3020
    return float32_muladd(a, b, c, 0, fpst);
3021
}
3022

    
3023
float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
3024
{
3025
    float_status *fpst = fpstp;
3026
    return float64_muladd(a, b, c, 0, fpst);
3027
}
3028

    
3029
void HELPER(set_teecr)(CPUARMState *env, uint32_t val)
3030
{
3031
    val &= 1;
3032
    if (env->teecr != val) {
3033
        env->teecr = val;
3034
        tb_flush(env);
3035
    }
3036
}