Statistics
| Branch: | Revision:

root / target-arm / helper.c @ 4b6a83fb

History | View | Annotate | Download (86.8 kB)

1
#include "cpu.h"
2
#include "gdbstub.h"
3
#include "helper.h"
4
#include "host-utils.h"
5
#include "sysemu.h"
6

    
7
static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
8
{
9
    int nregs;
10

    
11
    /* VFP data registers are always little-endian.  */
12
    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
13
    if (reg < nregs) {
14
        stfq_le_p(buf, env->vfp.regs[reg]);
15
        return 8;
16
    }
17
    if (arm_feature(env, ARM_FEATURE_NEON)) {
18
        /* Aliases for Q regs.  */
19
        nregs += 16;
20
        if (reg < nregs) {
21
            stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
22
            stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
23
            return 16;
24
        }
25
    }
26
    switch (reg - nregs) {
27
    case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
28
    case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
29
    case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
30
    }
31
    return 0;
32
}
33

    
34
static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
35
{
36
    int nregs;
37

    
38
    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
39
    if (reg < nregs) {
40
        env->vfp.regs[reg] = ldfq_le_p(buf);
41
        return 8;
42
    }
43
    if (arm_feature(env, ARM_FEATURE_NEON)) {
44
        nregs += 16;
45
        if (reg < nregs) {
46
            env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
47
            env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
48
            return 16;
49
        }
50
    }
51
    switch (reg - nregs) {
52
    case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
53
    case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
54
    case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
55
    }
56
    return 0;
57
}
58

    
59
ARMCPU *cpu_arm_init(const char *cpu_model)
60
{
61
    ARMCPU *cpu;
62
    CPUARMState *env;
63
    static int inited = 0;
64

    
65
    if (!object_class_by_name(cpu_model)) {
66
        return NULL;
67
    }
68
    cpu = ARM_CPU(object_new(cpu_model));
69
    env = &cpu->env;
70
    env->cpu_model_str = cpu_model;
71
    arm_cpu_realize(cpu);
72

    
73
    if (tcg_enabled() && !inited) {
74
        inited = 1;
75
        arm_translate_init();
76
    }
77

    
78
    cpu_reset(CPU(cpu));
79
    if (arm_feature(env, ARM_FEATURE_NEON)) {
80
        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
81
                                 51, "arm-neon.xml", 0);
82
    } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
83
        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
84
                                 35, "arm-vfp3.xml", 0);
85
    } else if (arm_feature(env, ARM_FEATURE_VFP)) {
86
        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
87
                                 19, "arm-vfp.xml", 0);
88
    }
89
    qemu_init_vcpu(env);
90
    return cpu;
91
}
92

    
93
typedef struct ARMCPUListState {
94
    fprintf_function cpu_fprintf;
95
    FILE *file;
96
} ARMCPUListState;
97

    
98
/* Sort alphabetically by type name, except for "any". */
99
static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
100
{
101
    ObjectClass *class_a = (ObjectClass *)a;
102
    ObjectClass *class_b = (ObjectClass *)b;
103
    const char *name_a, *name_b;
104

    
105
    name_a = object_class_get_name(class_a);
106
    name_b = object_class_get_name(class_b);
107
    if (strcmp(name_a, "any") == 0) {
108
        return 1;
109
    } else if (strcmp(name_b, "any") == 0) {
110
        return -1;
111
    } else {
112
        return strcmp(name_a, name_b);
113
    }
114
}
115

    
116
static void arm_cpu_list_entry(gpointer data, gpointer user_data)
117
{
118
    ObjectClass *oc = data;
119
    ARMCPUListState *s = user_data;
120

    
121
    (*s->cpu_fprintf)(s->file, "  %s\n",
122
                      object_class_get_name(oc));
123
}
124

    
125
void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
126
{
127
    ARMCPUListState s = {
128
        .file = f,
129
        .cpu_fprintf = cpu_fprintf,
130
    };
131
    GSList *list;
132

    
133
    list = object_class_get_list(TYPE_ARM_CPU, false);
134
    list = g_slist_sort(list, arm_cpu_list_compare);
135
    (*cpu_fprintf)(f, "Available CPUs:\n");
136
    g_slist_foreach(list, arm_cpu_list_entry, &s);
137
    g_slist_free(list);
138
}
139

    
140
void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
141
                                       const ARMCPRegInfo *r, void *opaque)
142
{
143
    /* Define implementations of coprocessor registers.
144
     * We store these in a hashtable because typically
145
     * there are less than 150 registers in a space which
146
     * is 16*16*16*8*8 = 262144 in size.
147
     * Wildcarding is supported for the crm, opc1 and opc2 fields.
148
     * If a register is defined twice then the second definition is
149
     * used, so this can be used to define some generic registers and
150
     * then override them with implementation specific variations.
151
     * At least one of the original and the second definition should
152
     * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
153
     * against accidental use.
154
     */
155
    int crm, opc1, opc2;
156
    int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
157
    int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
158
    int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
159
    int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
160
    int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
161
    int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
162
    /* 64 bit registers have only CRm and Opc1 fields */
163
    assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
164
    /* Check that the register definition has enough info to handle
165
     * reads and writes if they are permitted.
166
     */
167
    if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
168
        if (r->access & PL3_R) {
169
            assert(r->fieldoffset || r->readfn);
170
        }
171
        if (r->access & PL3_W) {
172
            assert(r->fieldoffset || r->writefn);
173
        }
174
    }
175
    /* Bad type field probably means missing sentinel at end of reg list */
176
    assert(cptype_valid(r->type));
177
    for (crm = crmmin; crm <= crmmax; crm++) {
178
        for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
179
            for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
180
                uint32_t *key = g_new(uint32_t, 1);
181
                ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
182
                int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
183
                *key = ENCODE_CP_REG(r->cp, is64, r->crn, crm, opc1, opc2);
184
                r2->opaque = opaque;
185
                /* Make sure reginfo passed to helpers for wildcarded regs
186
                 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
187
                 */
188
                r2->crm = crm;
189
                r2->opc1 = opc1;
190
                r2->opc2 = opc2;
191
                /* Overriding of an existing definition must be explicitly
192
                 * requested.
193
                 */
194
                if (!(r->type & ARM_CP_OVERRIDE)) {
195
                    ARMCPRegInfo *oldreg;
196
                    oldreg = g_hash_table_lookup(cpu->cp_regs, key);
197
                    if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
198
                        fprintf(stderr, "Register redefined: cp=%d %d bit "
199
                                "crn=%d crm=%d opc1=%d opc2=%d, "
200
                                "was %s, now %s\n", r2->cp, 32 + 32 * is64,
201
                                r2->crn, r2->crm, r2->opc1, r2->opc2,
202
                                oldreg->name, r2->name);
203
                        assert(0);
204
                    }
205
                }
206
                g_hash_table_insert(cpu->cp_regs, key, r2);
207
            }
208
        }
209
    }
210
}
211

    
212
void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
213
                                    const ARMCPRegInfo *regs, void *opaque)
214
{
215
    /* Define a whole list of registers */
216
    const ARMCPRegInfo *r;
217
    for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
218
        define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
219
    }
220
}
221

    
222
const ARMCPRegInfo *get_arm_cp_reginfo(ARMCPU *cpu, uint32_t encoded_cp)
223
{
224
    return g_hash_table_lookup(cpu->cp_regs, &encoded_cp);
225
}
226

    
227
int arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
228
                        uint64_t value)
229
{
230
    /* Helper coprocessor write function for write-ignore registers */
231
    return 0;
232
}
233

    
234
int arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t *value)
235
{
236
    /* Helper coprocessor write function for read-as-zero registers */
237
    *value = 0;
238
    return 0;
239
}
240

    
241
static int bad_mode_switch(CPUARMState *env, int mode)
242
{
243
    /* Return true if it is not valid for us to switch to
244
     * this CPU mode (ie all the UNPREDICTABLE cases in
245
     * the ARM ARM CPSRWriteByInstr pseudocode).
246
     */
247
    switch (mode) {
248
    case ARM_CPU_MODE_USR:
249
    case ARM_CPU_MODE_SYS:
250
    case ARM_CPU_MODE_SVC:
251
    case ARM_CPU_MODE_ABT:
252
    case ARM_CPU_MODE_UND:
253
    case ARM_CPU_MODE_IRQ:
254
    case ARM_CPU_MODE_FIQ:
255
        return 0;
256
    default:
257
        return 1;
258
    }
259
}
260

    
261
uint32_t cpsr_read(CPUARMState *env)
262
{
263
    int ZF;
264
    ZF = (env->ZF == 0);
265
    return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
266
        (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
267
        | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
268
        | ((env->condexec_bits & 0xfc) << 8)
269
        | (env->GE << 16);
270
}
271

    
272
void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
273
{
274
    if (mask & CPSR_NZCV) {
275
        env->ZF = (~val) & CPSR_Z;
276
        env->NF = val;
277
        env->CF = (val >> 29) & 1;
278
        env->VF = (val << 3) & 0x80000000;
279
    }
280
    if (mask & CPSR_Q)
281
        env->QF = ((val & CPSR_Q) != 0);
282
    if (mask & CPSR_T)
283
        env->thumb = ((val & CPSR_T) != 0);
284
    if (mask & CPSR_IT_0_1) {
285
        env->condexec_bits &= ~3;
286
        env->condexec_bits |= (val >> 25) & 3;
287
    }
288
    if (mask & CPSR_IT_2_7) {
289
        env->condexec_bits &= 3;
290
        env->condexec_bits |= (val >> 8) & 0xfc;
291
    }
292
    if (mask & CPSR_GE) {
293
        env->GE = (val >> 16) & 0xf;
294
    }
295

    
296
    if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
297
        if (bad_mode_switch(env, val & CPSR_M)) {
298
            /* Attempt to switch to an invalid mode: this is UNPREDICTABLE.
299
             * We choose to ignore the attempt and leave the CPSR M field
300
             * untouched.
301
             */
302
            mask &= ~CPSR_M;
303
        } else {
304
            switch_mode(env, val & CPSR_M);
305
        }
306
    }
307
    mask &= ~CACHED_CPSR_BITS;
308
    env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
309
}
310

    
311
/* Sign/zero extend */
312
uint32_t HELPER(sxtb16)(uint32_t x)
313
{
314
    uint32_t res;
315
    res = (uint16_t)(int8_t)x;
316
    res |= (uint32_t)(int8_t)(x >> 16) << 16;
317
    return res;
318
}
319

    
320
uint32_t HELPER(uxtb16)(uint32_t x)
321
{
322
    uint32_t res;
323
    res = (uint16_t)(uint8_t)x;
324
    res |= (uint32_t)(uint8_t)(x >> 16) << 16;
325
    return res;
326
}
327

    
328
uint32_t HELPER(clz)(uint32_t x)
329
{
330
    return clz32(x);
331
}
332

    
333
int32_t HELPER(sdiv)(int32_t num, int32_t den)
334
{
335
    if (den == 0)
336
      return 0;
337
    if (num == INT_MIN && den == -1)
338
      return INT_MIN;
339
    return num / den;
340
}
341

    
342
uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
343
{
344
    if (den == 0)
345
      return 0;
346
    return num / den;
347
}
348

    
349
uint32_t HELPER(rbit)(uint32_t x)
350
{
351
    x =  ((x & 0xff000000) >> 24)
352
       | ((x & 0x00ff0000) >> 8)
353
       | ((x & 0x0000ff00) << 8)
354
       | ((x & 0x000000ff) << 24);
355
    x =  ((x & 0xf0f0f0f0) >> 4)
356
       | ((x & 0x0f0f0f0f) << 4);
357
    x =  ((x & 0x88888888) >> 3)
358
       | ((x & 0x44444444) >> 1)
359
       | ((x & 0x22222222) << 1)
360
       | ((x & 0x11111111) << 3);
361
    return x;
362
}
363

    
364
uint32_t HELPER(abs)(uint32_t x)
365
{
366
    return ((int32_t)x < 0) ? -x : x;
367
}
368

    
369
#if defined(CONFIG_USER_ONLY)
370

    
371
void do_interrupt (CPUARMState *env)
372
{
373
    env->exception_index = -1;
374
}
375

    
376
int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address, int rw,
377
                              int mmu_idx)
378
{
379
    if (rw == 2) {
380
        env->exception_index = EXCP_PREFETCH_ABORT;
381
        env->cp15.c6_insn = address;
382
    } else {
383
        env->exception_index = EXCP_DATA_ABORT;
384
        env->cp15.c6_data = address;
385
    }
386
    return 1;
387
}
388

    
389
/* These should probably raise undefined insn exceptions.  */
390
void HELPER(set_cp)(CPUARMState *env, uint32_t insn, uint32_t val)
391
{
392
    int op1 = (insn >> 8) & 0xf;
393
    cpu_abort(env, "cp%i insn %08x\n", op1, insn);
394
    return;
395
}
396

    
397
uint32_t HELPER(get_cp)(CPUARMState *env, uint32_t insn)
398
{
399
    int op1 = (insn >> 8) & 0xf;
400
    cpu_abort(env, "cp%i insn %08x\n", op1, insn);
401
    return 0;
402
}
403

    
404
void HELPER(set_cp15)(CPUARMState *env, uint32_t insn, uint32_t val)
405
{
406
    cpu_abort(env, "cp15 insn %08x\n", insn);
407
}
408

    
409
uint32_t HELPER(get_cp15)(CPUARMState *env, uint32_t insn)
410
{
411
    cpu_abort(env, "cp15 insn %08x\n", insn);
412
}
413

    
414
/* These should probably raise undefined insn exceptions.  */
415
void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
416
{
417
    cpu_abort(env, "v7m_mrs %d\n", reg);
418
}
419

    
420
uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
421
{
422
    cpu_abort(env, "v7m_mrs %d\n", reg);
423
    return 0;
424
}
425

    
426
void switch_mode(CPUARMState *env, int mode)
427
{
428
    if (mode != ARM_CPU_MODE_USR)
429
        cpu_abort(env, "Tried to switch out of user mode\n");
430
}
431

    
432
void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
433
{
434
    cpu_abort(env, "banked r13 write\n");
435
}
436

    
437
uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
438
{
439
    cpu_abort(env, "banked r13 read\n");
440
    return 0;
441
}
442

    
443
#else
444

    
445
/* Map CPU modes onto saved register banks.  */
446
static inline int bank_number(CPUARMState *env, int mode)
447
{
448
    switch (mode) {
449
    case ARM_CPU_MODE_USR:
450
    case ARM_CPU_MODE_SYS:
451
        return 0;
452
    case ARM_CPU_MODE_SVC:
453
        return 1;
454
    case ARM_CPU_MODE_ABT:
455
        return 2;
456
    case ARM_CPU_MODE_UND:
457
        return 3;
458
    case ARM_CPU_MODE_IRQ:
459
        return 4;
460
    case ARM_CPU_MODE_FIQ:
461
        return 5;
462
    }
463
    cpu_abort(env, "Bad mode %x\n", mode);
464
    return -1;
465
}
466

    
467
void switch_mode(CPUARMState *env, int mode)
468
{
469
    int old_mode;
470
    int i;
471

    
472
    old_mode = env->uncached_cpsr & CPSR_M;
473
    if (mode == old_mode)
474
        return;
475

    
476
    if (old_mode == ARM_CPU_MODE_FIQ) {
477
        memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
478
        memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
479
    } else if (mode == ARM_CPU_MODE_FIQ) {
480
        memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
481
        memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
482
    }
483

    
484
    i = bank_number(env, old_mode);
485
    env->banked_r13[i] = env->regs[13];
486
    env->banked_r14[i] = env->regs[14];
487
    env->banked_spsr[i] = env->spsr;
488

    
489
    i = bank_number(env, mode);
490
    env->regs[13] = env->banked_r13[i];
491
    env->regs[14] = env->banked_r14[i];
492
    env->spsr = env->banked_spsr[i];
493
}
494

    
495
static void v7m_push(CPUARMState *env, uint32_t val)
496
{
497
    env->regs[13] -= 4;
498
    stl_phys(env->regs[13], val);
499
}
500

    
501
static uint32_t v7m_pop(CPUARMState *env)
502
{
503
    uint32_t val;
504
    val = ldl_phys(env->regs[13]);
505
    env->regs[13] += 4;
506
    return val;
507
}
508

    
509
/* Switch to V7M main or process stack pointer.  */
510
static void switch_v7m_sp(CPUARMState *env, int process)
511
{
512
    uint32_t tmp;
513
    if (env->v7m.current_sp != process) {
514
        tmp = env->v7m.other_sp;
515
        env->v7m.other_sp = env->regs[13];
516
        env->regs[13] = tmp;
517
        env->v7m.current_sp = process;
518
    }
519
}
520

    
521
static void do_v7m_exception_exit(CPUARMState *env)
522
{
523
    uint32_t type;
524
    uint32_t xpsr;
525

    
526
    type = env->regs[15];
527
    if (env->v7m.exception != 0)
528
        armv7m_nvic_complete_irq(env->nvic, env->v7m.exception);
529

    
530
    /* Switch to the target stack.  */
531
    switch_v7m_sp(env, (type & 4) != 0);
532
    /* Pop registers.  */
533
    env->regs[0] = v7m_pop(env);
534
    env->regs[1] = v7m_pop(env);
535
    env->regs[2] = v7m_pop(env);
536
    env->regs[3] = v7m_pop(env);
537
    env->regs[12] = v7m_pop(env);
538
    env->regs[14] = v7m_pop(env);
539
    env->regs[15] = v7m_pop(env);
540
    xpsr = v7m_pop(env);
541
    xpsr_write(env, xpsr, 0xfffffdff);
542
    /* Undo stack alignment.  */
543
    if (xpsr & 0x200)
544
        env->regs[13] |= 4;
545
    /* ??? The exception return type specifies Thread/Handler mode.  However
546
       this is also implied by the xPSR value. Not sure what to do
547
       if there is a mismatch.  */
548
    /* ??? Likewise for mismatches between the CONTROL register and the stack
549
       pointer.  */
550
}
551

    
552
static void do_interrupt_v7m(CPUARMState *env)
553
{
554
    uint32_t xpsr = xpsr_read(env);
555
    uint32_t lr;
556
    uint32_t addr;
557

    
558
    lr = 0xfffffff1;
559
    if (env->v7m.current_sp)
560
        lr |= 4;
561
    if (env->v7m.exception == 0)
562
        lr |= 8;
563

    
564
    /* For exceptions we just mark as pending on the NVIC, and let that
565
       handle it.  */
566
    /* TODO: Need to escalate if the current priority is higher than the
567
       one we're raising.  */
568
    switch (env->exception_index) {
569
    case EXCP_UDEF:
570
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
571
        return;
572
    case EXCP_SWI:
573
        env->regs[15] += 2;
574
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
575
        return;
576
    case EXCP_PREFETCH_ABORT:
577
    case EXCP_DATA_ABORT:
578
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
579
        return;
580
    case EXCP_BKPT:
581
        if (semihosting_enabled) {
582
            int nr;
583
            nr = arm_lduw_code(env->regs[15], env->bswap_code) & 0xff;
584
            if (nr == 0xab) {
585
                env->regs[15] += 2;
586
                env->regs[0] = do_arm_semihosting(env);
587
                return;
588
            }
589
        }
590
        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
591
        return;
592
    case EXCP_IRQ:
593
        env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic);
594
        break;
595
    case EXCP_EXCEPTION_EXIT:
596
        do_v7m_exception_exit(env);
597
        return;
598
    default:
599
        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
600
        return; /* Never happens.  Keep compiler happy.  */
601
    }
602

    
603
    /* Align stack pointer.  */
604
    /* ??? Should only do this if Configuration Control Register
605
       STACKALIGN bit is set.  */
606
    if (env->regs[13] & 4) {
607
        env->regs[13] -= 4;
608
        xpsr |= 0x200;
609
    }
610
    /* Switch to the handler mode.  */
611
    v7m_push(env, xpsr);
612
    v7m_push(env, env->regs[15]);
613
    v7m_push(env, env->regs[14]);
614
    v7m_push(env, env->regs[12]);
615
    v7m_push(env, env->regs[3]);
616
    v7m_push(env, env->regs[2]);
617
    v7m_push(env, env->regs[1]);
618
    v7m_push(env, env->regs[0]);
619
    switch_v7m_sp(env, 0);
620
    /* Clear IT bits */
621
    env->condexec_bits = 0;
622
    env->regs[14] = lr;
623
    addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4);
624
    env->regs[15] = addr & 0xfffffffe;
625
    env->thumb = addr & 1;
626
}
627

    
628
/* Handle a CPU exception.  */
629
void do_interrupt(CPUARMState *env)
630
{
631
    uint32_t addr;
632
    uint32_t mask;
633
    int new_mode;
634
    uint32_t offset;
635

    
636
    if (IS_M(env)) {
637
        do_interrupt_v7m(env);
638
        return;
639
    }
640
    /* TODO: Vectored interrupt controller.  */
641
    switch (env->exception_index) {
642
    case EXCP_UDEF:
643
        new_mode = ARM_CPU_MODE_UND;
644
        addr = 0x04;
645
        mask = CPSR_I;
646
        if (env->thumb)
647
            offset = 2;
648
        else
649
            offset = 4;
650
        break;
651
    case EXCP_SWI:
652
        if (semihosting_enabled) {
653
            /* Check for semihosting interrupt.  */
654
            if (env->thumb) {
655
                mask = arm_lduw_code(env->regs[15] - 2, env->bswap_code) & 0xff;
656
            } else {
657
                mask = arm_ldl_code(env->regs[15] - 4, env->bswap_code)
658
                    & 0xffffff;
659
            }
660
            /* Only intercept calls from privileged modes, to provide some
661
               semblance of security.  */
662
            if (((mask == 0x123456 && !env->thumb)
663
                    || (mask == 0xab && env->thumb))
664
                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
665
                env->regs[0] = do_arm_semihosting(env);
666
                return;
667
            }
668
        }
669
        new_mode = ARM_CPU_MODE_SVC;
670
        addr = 0x08;
671
        mask = CPSR_I;
672
        /* The PC already points to the next instruction.  */
673
        offset = 0;
674
        break;
675
    case EXCP_BKPT:
676
        /* See if this is a semihosting syscall.  */
677
        if (env->thumb && semihosting_enabled) {
678
            mask = arm_lduw_code(env->regs[15], env->bswap_code) & 0xff;
679
            if (mask == 0xab
680
                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
681
                env->regs[15] += 2;
682
                env->regs[0] = do_arm_semihosting(env);
683
                return;
684
            }
685
        }
686
        env->cp15.c5_insn = 2;
687
        /* Fall through to prefetch abort.  */
688
    case EXCP_PREFETCH_ABORT:
689
        new_mode = ARM_CPU_MODE_ABT;
690
        addr = 0x0c;
691
        mask = CPSR_A | CPSR_I;
692
        offset = 4;
693
        break;
694
    case EXCP_DATA_ABORT:
695
        new_mode = ARM_CPU_MODE_ABT;
696
        addr = 0x10;
697
        mask = CPSR_A | CPSR_I;
698
        offset = 8;
699
        break;
700
    case EXCP_IRQ:
701
        new_mode = ARM_CPU_MODE_IRQ;
702
        addr = 0x18;
703
        /* Disable IRQ and imprecise data aborts.  */
704
        mask = CPSR_A | CPSR_I;
705
        offset = 4;
706
        break;
707
    case EXCP_FIQ:
708
        new_mode = ARM_CPU_MODE_FIQ;
709
        addr = 0x1c;
710
        /* Disable FIQ, IRQ and imprecise data aborts.  */
711
        mask = CPSR_A | CPSR_I | CPSR_F;
712
        offset = 4;
713
        break;
714
    default:
715
        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
716
        return; /* Never happens.  Keep compiler happy.  */
717
    }
718
    /* High vectors.  */
719
    if (env->cp15.c1_sys & (1 << 13)) {
720
        addr += 0xffff0000;
721
    }
722
    switch_mode (env, new_mode);
723
    env->spsr = cpsr_read(env);
724
    /* Clear IT bits.  */
725
    env->condexec_bits = 0;
726
    /* Switch to the new mode, and to the correct instruction set.  */
727
    env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
728
    env->uncached_cpsr |= mask;
729
    /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
730
     * and we should just guard the thumb mode on V4 */
731
    if (arm_feature(env, ARM_FEATURE_V4T)) {
732
        env->thumb = (env->cp15.c1_sys & (1 << 30)) != 0;
733
    }
734
    env->regs[14] = env->regs[15] + offset;
735
    env->regs[15] = addr;
736
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
737
}
738

    
739
/* Check section/page access permissions.
740
   Returns the page protection flags, or zero if the access is not
741
   permitted.  */
742
static inline int check_ap(CPUARMState *env, int ap, int domain_prot,
743
                           int access_type, int is_user)
744
{
745
  int prot_ro;
746

    
747
  if (domain_prot == 3) {
748
    return PAGE_READ | PAGE_WRITE;
749
  }
750

    
751
  if (access_type == 1)
752
      prot_ro = 0;
753
  else
754
      prot_ro = PAGE_READ;
755

    
756
  switch (ap) {
757
  case 0:
758
      if (access_type == 1)
759
          return 0;
760
      switch ((env->cp15.c1_sys >> 8) & 3) {
761
      case 1:
762
          return is_user ? 0 : PAGE_READ;
763
      case 2:
764
          return PAGE_READ;
765
      default:
766
          return 0;
767
      }
768
  case 1:
769
      return is_user ? 0 : PAGE_READ | PAGE_WRITE;
770
  case 2:
771
      if (is_user)
772
          return prot_ro;
773
      else
774
          return PAGE_READ | PAGE_WRITE;
775
  case 3:
776
      return PAGE_READ | PAGE_WRITE;
777
  case 4: /* Reserved.  */
778
      return 0;
779
  case 5:
780
      return is_user ? 0 : prot_ro;
781
  case 6:
782
      return prot_ro;
783
  case 7:
784
      if (!arm_feature (env, ARM_FEATURE_V6K))
785
          return 0;
786
      return prot_ro;
787
  default:
788
      abort();
789
  }
790
}
791

    
792
static uint32_t get_level1_table_address(CPUARMState *env, uint32_t address)
793
{
794
    uint32_t table;
795

    
796
    if (address & env->cp15.c2_mask)
797
        table = env->cp15.c2_base1 & 0xffffc000;
798
    else
799
        table = env->cp15.c2_base0 & env->cp15.c2_base_mask;
800

    
801
    table |= (address >> 18) & 0x3ffc;
802
    return table;
803
}
804

    
805
static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
806
                            int is_user, uint32_t *phys_ptr, int *prot,
807
                            target_ulong *page_size)
808
{
809
    int code;
810
    uint32_t table;
811
    uint32_t desc;
812
    int type;
813
    int ap;
814
    int domain;
815
    int domain_prot;
816
    uint32_t phys_addr;
817

    
818
    /* Pagetable walk.  */
819
    /* Lookup l1 descriptor.  */
820
    table = get_level1_table_address(env, address);
821
    desc = ldl_phys(table);
822
    type = (desc & 3);
823
    domain = (desc >> 5) & 0x0f;
824
    domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
825
    if (type == 0) {
826
        /* Section translation fault.  */
827
        code = 5;
828
        goto do_fault;
829
    }
830
    if (domain_prot == 0 || domain_prot == 2) {
831
        if (type == 2)
832
            code = 9; /* Section domain fault.  */
833
        else
834
            code = 11; /* Page domain fault.  */
835
        goto do_fault;
836
    }
837
    if (type == 2) {
838
        /* 1Mb section.  */
839
        phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
840
        ap = (desc >> 10) & 3;
841
        code = 13;
842
        *page_size = 1024 * 1024;
843
    } else {
844
        /* Lookup l2 entry.  */
845
        if (type == 1) {
846
            /* Coarse pagetable.  */
847
            table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
848
        } else {
849
            /* Fine pagetable.  */
850
            table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
851
        }
852
        desc = ldl_phys(table);
853
        switch (desc & 3) {
854
        case 0: /* Page translation fault.  */
855
            code = 7;
856
            goto do_fault;
857
        case 1: /* 64k page.  */
858
            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
859
            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
860
            *page_size = 0x10000;
861
            break;
862
        case 2: /* 4k page.  */
863
            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
864
            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
865
            *page_size = 0x1000;
866
            break;
867
        case 3: /* 1k page.  */
868
            if (type == 1) {
869
                if (arm_feature(env, ARM_FEATURE_XSCALE)) {
870
                    phys_addr = (desc & 0xfffff000) | (address & 0xfff);
871
                } else {
872
                    /* Page translation fault.  */
873
                    code = 7;
874
                    goto do_fault;
875
                }
876
            } else {
877
                phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
878
            }
879
            ap = (desc >> 4) & 3;
880
            *page_size = 0x400;
881
            break;
882
        default:
883
            /* Never happens, but compiler isn't smart enough to tell.  */
884
            abort();
885
        }
886
        code = 15;
887
    }
888
    *prot = check_ap(env, ap, domain_prot, access_type, is_user);
889
    if (!*prot) {
890
        /* Access permission fault.  */
891
        goto do_fault;
892
    }
893
    *prot |= PAGE_EXEC;
894
    *phys_ptr = phys_addr;
895
    return 0;
896
do_fault:
897
    return code | (domain << 4);
898
}
899

    
900
static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
901
                            int is_user, uint32_t *phys_ptr, int *prot,
902
                            target_ulong *page_size)
903
{
904
    int code;
905
    uint32_t table;
906
    uint32_t desc;
907
    uint32_t xn;
908
    int type;
909
    int ap;
910
    int domain;
911
    int domain_prot;
912
    uint32_t phys_addr;
913

    
914
    /* Pagetable walk.  */
915
    /* Lookup l1 descriptor.  */
916
    table = get_level1_table_address(env, address);
917
    desc = ldl_phys(table);
918
    type = (desc & 3);
919
    if (type == 0) {
920
        /* Section translation fault.  */
921
        code = 5;
922
        domain = 0;
923
        goto do_fault;
924
    } else if (type == 2 && (desc & (1 << 18))) {
925
        /* Supersection.  */
926
        domain = 0;
927
    } else {
928
        /* Section or page.  */
929
        domain = (desc >> 5) & 0x0f;
930
    }
931
    domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
932
    if (domain_prot == 0 || domain_prot == 2) {
933
        if (type == 2)
934
            code = 9; /* Section domain fault.  */
935
        else
936
            code = 11; /* Page domain fault.  */
937
        goto do_fault;
938
    }
939
    if (type == 2) {
940
        if (desc & (1 << 18)) {
941
            /* Supersection.  */
942
            phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
943
            *page_size = 0x1000000;
944
        } else {
945
            /* Section.  */
946
            phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
947
            *page_size = 0x100000;
948
        }
949
        ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
950
        xn = desc & (1 << 4);
951
        code = 13;
952
    } else {
953
        /* Lookup l2 entry.  */
954
        table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
955
        desc = ldl_phys(table);
956
        ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
957
        switch (desc & 3) {
958
        case 0: /* Page translation fault.  */
959
            code = 7;
960
            goto do_fault;
961
        case 1: /* 64k page.  */
962
            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
963
            xn = desc & (1 << 15);
964
            *page_size = 0x10000;
965
            break;
966
        case 2: case 3: /* 4k page.  */
967
            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
968
            xn = desc & 1;
969
            *page_size = 0x1000;
970
            break;
971
        default:
972
            /* Never happens, but compiler isn't smart enough to tell.  */
973
            abort();
974
        }
975
        code = 15;
976
    }
977
    if (domain_prot == 3) {
978
        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
979
    } else {
980
        if (xn && access_type == 2)
981
            goto do_fault;
982

    
983
        /* The simplified model uses AP[0] as an access control bit.  */
984
        if ((env->cp15.c1_sys & (1 << 29)) && (ap & 1) == 0) {
985
            /* Access flag fault.  */
986
            code = (code == 15) ? 6 : 3;
987
            goto do_fault;
988
        }
989
        *prot = check_ap(env, ap, domain_prot, access_type, is_user);
990
        if (!*prot) {
991
            /* Access permission fault.  */
992
            goto do_fault;
993
        }
994
        if (!xn) {
995
            *prot |= PAGE_EXEC;
996
        }
997
    }
998
    *phys_ptr = phys_addr;
999
    return 0;
1000
do_fault:
1001
    return code | (domain << 4);
1002
}
1003

    
1004
static int get_phys_addr_mpu(CPUARMState *env, uint32_t address, int access_type,
1005
                             int is_user, uint32_t *phys_ptr, int *prot)
1006
{
1007
    int n;
1008
    uint32_t mask;
1009
    uint32_t base;
1010

    
1011
    *phys_ptr = address;
1012
    for (n = 7; n >= 0; n--) {
1013
        base = env->cp15.c6_region[n];
1014
        if ((base & 1) == 0)
1015
            continue;
1016
        mask = 1 << ((base >> 1) & 0x1f);
1017
        /* Keep this shift separate from the above to avoid an
1018
           (undefined) << 32.  */
1019
        mask = (mask << 1) - 1;
1020
        if (((base ^ address) & ~mask) == 0)
1021
            break;
1022
    }
1023
    if (n < 0)
1024
        return 2;
1025

    
1026
    if (access_type == 2) {
1027
        mask = env->cp15.c5_insn;
1028
    } else {
1029
        mask = env->cp15.c5_data;
1030
    }
1031
    mask = (mask >> (n * 4)) & 0xf;
1032
    switch (mask) {
1033
    case 0:
1034
        return 1;
1035
    case 1:
1036
        if (is_user)
1037
          return 1;
1038
        *prot = PAGE_READ | PAGE_WRITE;
1039
        break;
1040
    case 2:
1041
        *prot = PAGE_READ;
1042
        if (!is_user)
1043
            *prot |= PAGE_WRITE;
1044
        break;
1045
    case 3:
1046
        *prot = PAGE_READ | PAGE_WRITE;
1047
        break;
1048
    case 5:
1049
        if (is_user)
1050
            return 1;
1051
        *prot = PAGE_READ;
1052
        break;
1053
    case 6:
1054
        *prot = PAGE_READ;
1055
        break;
1056
    default:
1057
        /* Bad permission.  */
1058
        return 1;
1059
    }
1060
    *prot |= PAGE_EXEC;
1061
    return 0;
1062
}
1063

    
1064
static inline int get_phys_addr(CPUARMState *env, uint32_t address,
1065
                                int access_type, int is_user,
1066
                                uint32_t *phys_ptr, int *prot,
1067
                                target_ulong *page_size)
1068
{
1069
    /* Fast Context Switch Extension.  */
1070
    if (address < 0x02000000)
1071
        address += env->cp15.c13_fcse;
1072

    
1073
    if ((env->cp15.c1_sys & 1) == 0) {
1074
        /* MMU/MPU disabled.  */
1075
        *phys_ptr = address;
1076
        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1077
        *page_size = TARGET_PAGE_SIZE;
1078
        return 0;
1079
    } else if (arm_feature(env, ARM_FEATURE_MPU)) {
1080
        *page_size = TARGET_PAGE_SIZE;
1081
        return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
1082
                                 prot);
1083
    } else if (env->cp15.c1_sys & (1 << 23)) {
1084
        return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
1085
                                prot, page_size);
1086
    } else {
1087
        return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
1088
                                prot, page_size);
1089
    }
1090
}
1091

    
1092
int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address,
1093
                              int access_type, int mmu_idx)
1094
{
1095
    uint32_t phys_addr;
1096
    target_ulong page_size;
1097
    int prot;
1098
    int ret, is_user;
1099

    
1100
    is_user = mmu_idx == MMU_USER_IDX;
1101
    ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot,
1102
                        &page_size);
1103
    if (ret == 0) {
1104
        /* Map a single [sub]page.  */
1105
        phys_addr &= ~(uint32_t)0x3ff;
1106
        address &= ~(uint32_t)0x3ff;
1107
        tlb_set_page (env, address, phys_addr, prot, mmu_idx, page_size);
1108
        return 0;
1109
    }
1110

    
1111
    if (access_type == 2) {
1112
        env->cp15.c5_insn = ret;
1113
        env->cp15.c6_insn = address;
1114
        env->exception_index = EXCP_PREFETCH_ABORT;
1115
    } else {
1116
        env->cp15.c5_data = ret;
1117
        if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6))
1118
            env->cp15.c5_data |= (1 << 11);
1119
        env->cp15.c6_data = address;
1120
        env->exception_index = EXCP_DATA_ABORT;
1121
    }
1122
    return 1;
1123
}
1124

    
1125
target_phys_addr_t cpu_get_phys_page_debug(CPUARMState *env, target_ulong addr)
1126
{
1127
    uint32_t phys_addr;
1128
    target_ulong page_size;
1129
    int prot;
1130
    int ret;
1131

    
1132
    ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot, &page_size);
1133

    
1134
    if (ret != 0)
1135
        return -1;
1136

    
1137
    return phys_addr;
1138
}
1139

    
1140
void HELPER(set_cp)(CPUARMState *env, uint32_t insn, uint32_t val)
1141
{
1142
    int cp_num = (insn >> 8) & 0xf;
1143
    int cp_info = (insn >> 5) & 7;
1144
    int src = (insn >> 16) & 0xf;
1145
    int operand = insn & 0xf;
1146

    
1147
    if (env->cp[cp_num].cp_write)
1148
        env->cp[cp_num].cp_write(env->cp[cp_num].opaque,
1149
                                 cp_info, src, operand, val);
1150
}
1151

    
1152
uint32_t HELPER(get_cp)(CPUARMState *env, uint32_t insn)
1153
{
1154
    int cp_num = (insn >> 8) & 0xf;
1155
    int cp_info = (insn >> 5) & 7;
1156
    int dest = (insn >> 16) & 0xf;
1157
    int operand = insn & 0xf;
1158

    
1159
    if (env->cp[cp_num].cp_read)
1160
        return env->cp[cp_num].cp_read(env->cp[cp_num].opaque,
1161
                                       cp_info, dest, operand);
1162
    return 0;
1163
}
1164

    
1165
/* Return basic MPU access permission bits.  */
1166
static uint32_t simple_mpu_ap_bits(uint32_t val)
1167
{
1168
    uint32_t ret;
1169
    uint32_t mask;
1170
    int i;
1171
    ret = 0;
1172
    mask = 3;
1173
    for (i = 0; i < 16; i += 2) {
1174
        ret |= (val >> i) & mask;
1175
        mask <<= 2;
1176
    }
1177
    return ret;
1178
}
1179

    
1180
/* Pad basic MPU access permission bits to extended format.  */
1181
static uint32_t extended_mpu_ap_bits(uint32_t val)
1182
{
1183
    uint32_t ret;
1184
    uint32_t mask;
1185
    int i;
1186
    ret = 0;
1187
    mask = 3;
1188
    for (i = 0; i < 16; i += 2) {
1189
        ret |= (val & mask) << i;
1190
        mask <<= 2;
1191
    }
1192
    return ret;
1193
}
1194

    
1195
void HELPER(set_cp15)(CPUARMState *env, uint32_t insn, uint32_t val)
1196
{
1197
    int op1;
1198
    int op2;
1199
    int crm;
1200

    
1201
    op1 = (insn >> 21) & 7;
1202
    op2 = (insn >> 5) & 7;
1203
    crm = insn & 0xf;
1204
    switch ((insn >> 16) & 0xf) {
1205
    case 0:
1206
        /* ID codes.  */
1207
        if (arm_feature(env, ARM_FEATURE_XSCALE))
1208
            break;
1209
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1210
            break;
1211
        if (arm_feature(env, ARM_FEATURE_V7)
1212
                && op1 == 2 && crm == 0 && op2 == 0) {
1213
            env->cp15.c0_cssel = val & 0xf;
1214
            break;
1215
        }
1216
        goto bad_reg;
1217
    case 1: /* System configuration.  */
1218
        if (arm_feature(env, ARM_FEATURE_V7)
1219
                && op1 == 0 && crm == 1 && op2 == 0) {
1220
            env->cp15.c1_scr = val;
1221
            break;
1222
        }
1223
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1224
            op2 = 0;
1225
        switch (op2) {
1226
        case 0:
1227
            if (!arm_feature(env, ARM_FEATURE_XSCALE) || crm == 0)
1228
                env->cp15.c1_sys = val;
1229
            /* ??? Lots of these bits are not implemented.  */
1230
            /* This may enable/disable the MMU, so do a TLB flush.  */
1231
            tlb_flush(env, 1);
1232
            break;
1233
        case 1: /* Auxiliary control register.  */
1234
            if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1235
                env->cp15.c1_xscaleauxcr = val;
1236
                break;
1237
            }
1238
            /* Not implemented.  */
1239
            break;
1240
        case 2:
1241
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1242
                goto bad_reg;
1243
            if (env->cp15.c1_coproc != val) {
1244
                env->cp15.c1_coproc = val;
1245
                /* ??? Is this safe when called from within a TB?  */
1246
                tb_flush(env);
1247
            }
1248
            break;
1249
        default:
1250
            goto bad_reg;
1251
        }
1252
        break;
1253
    case 2: /* MMU Page table control / MPU cache control.  */
1254
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1255
            switch (op2) {
1256
            case 0:
1257
                env->cp15.c2_data = val;
1258
                break;
1259
            case 1:
1260
                env->cp15.c2_insn = val;
1261
                break;
1262
            default:
1263
                goto bad_reg;
1264
            }
1265
        } else {
1266
            switch (op2) {
1267
            case 0:
1268
                env->cp15.c2_base0 = val;
1269
                break;
1270
            case 1:
1271
                env->cp15.c2_base1 = val;
1272
                break;
1273
            case 2:
1274
                val &= 7;
1275
                env->cp15.c2_control = val;
1276
                env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> val);
1277
                env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> val);
1278
                break;
1279
            default:
1280
                goto bad_reg;
1281
            }
1282
        }
1283
        break;
1284
    case 3: /* MMU Domain access control / MPU write buffer control.  */
1285
        env->cp15.c3 = val;
1286
        tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */
1287
        break;
1288
    case 4: /* Reserved.  */
1289
        goto bad_reg;
1290
    case 5: /* MMU Fault status / MPU access permission.  */
1291
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1292
            op2 = 0;
1293
        switch (op2) {
1294
        case 0:
1295
            if (arm_feature(env, ARM_FEATURE_MPU))
1296
                val = extended_mpu_ap_bits(val);
1297
            env->cp15.c5_data = val;
1298
            break;
1299
        case 1:
1300
            if (arm_feature(env, ARM_FEATURE_MPU))
1301
                val = extended_mpu_ap_bits(val);
1302
            env->cp15.c5_insn = val;
1303
            break;
1304
        case 2:
1305
            if (!arm_feature(env, ARM_FEATURE_MPU))
1306
                goto bad_reg;
1307
            env->cp15.c5_data = val;
1308
            break;
1309
        case 3:
1310
            if (!arm_feature(env, ARM_FEATURE_MPU))
1311
                goto bad_reg;
1312
            env->cp15.c5_insn = val;
1313
            break;
1314
        default:
1315
            goto bad_reg;
1316
        }
1317
        break;
1318
    case 6: /* MMU Fault address / MPU base/size.  */
1319
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1320
            if (crm >= 8)
1321
                goto bad_reg;
1322
            env->cp15.c6_region[crm] = val;
1323
        } else {
1324
            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1325
                op2 = 0;
1326
            switch (op2) {
1327
            case 0:
1328
                env->cp15.c6_data = val;
1329
                break;
1330
            case 1: /* ??? This is WFAR on armv6 */
1331
            case 2:
1332
                env->cp15.c6_insn = val;
1333
                break;
1334
            default:
1335
                goto bad_reg;
1336
            }
1337
        }
1338
        break;
1339
    case 7: /* Cache control.  */
1340
        env->cp15.c15_i_max = 0x000;
1341
        env->cp15.c15_i_min = 0xff0;
1342
        if (op1 != 0) {
1343
            goto bad_reg;
1344
        }
1345
        /* No cache, so nothing to do except VA->PA translations. */
1346
        if (arm_feature(env, ARM_FEATURE_VAPA)) {
1347
            switch (crm) {
1348
            case 4:
1349
                if (arm_feature(env, ARM_FEATURE_V7)) {
1350
                    env->cp15.c7_par = val & 0xfffff6ff;
1351
                } else {
1352
                    env->cp15.c7_par = val & 0xfffff1ff;
1353
                }
1354
                break;
1355
            case 8: {
1356
                uint32_t phys_addr;
1357
                target_ulong page_size;
1358
                int prot;
1359
                int ret, is_user = op2 & 2;
1360
                int access_type = op2 & 1;
1361

    
1362
                if (op2 & 4) {
1363
                    /* Other states are only available with TrustZone */
1364
                    goto bad_reg;
1365
                }
1366
                ret = get_phys_addr(env, val, access_type, is_user,
1367
                                    &phys_addr, &prot, &page_size);
1368
                if (ret == 0) {
1369
                    /* We do not set any attribute bits in the PAR */
1370
                    if (page_size == (1 << 24)
1371
                        && arm_feature(env, ARM_FEATURE_V7)) {
1372
                        env->cp15.c7_par = (phys_addr & 0xff000000) | 1 << 1;
1373
                    } else {
1374
                        env->cp15.c7_par = phys_addr & 0xfffff000;
1375
                    }
1376
                } else {
1377
                    env->cp15.c7_par = ((ret & (10 << 1)) >> 5) |
1378
                                       ((ret & (12 << 1)) >> 6) |
1379
                                       ((ret & 0xf) << 1) | 1;
1380
                }
1381
                break;
1382
            }
1383
            }
1384
        }
1385
        break;
1386
    case 8: /* MMU TLB control.  */
1387
        switch (op2) {
1388
        case 0: /* Invalidate all (TLBIALL) */
1389
            tlb_flush(env, 1);
1390
            break;
1391
        case 1: /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
1392
            tlb_flush_page(env, val & TARGET_PAGE_MASK);
1393
            break;
1394
        case 2: /* Invalidate by ASID (TLBIASID) */
1395
            tlb_flush(env, val == 0);
1396
            break;
1397
        case 3: /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
1398
            tlb_flush_page(env, val & TARGET_PAGE_MASK);
1399
            break;
1400
        default:
1401
            goto bad_reg;
1402
        }
1403
        break;
1404
    case 9:
1405
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1406
            break;
1407
        if (arm_feature(env, ARM_FEATURE_STRONGARM))
1408
            break; /* Ignore ReadBuffer access */
1409
        switch (crm) {
1410
        case 0: /* Cache lockdown.  */
1411
            switch (op1) {
1412
            case 0: /* L1 cache.  */
1413
                switch (op2) {
1414
                case 0:
1415
                    env->cp15.c9_data = val;
1416
                    break;
1417
                case 1:
1418
                    env->cp15.c9_insn = val;
1419
                    break;
1420
                default:
1421
                    goto bad_reg;
1422
                }
1423
                break;
1424
            case 1: /* L2 cache.  */
1425
                /* Ignore writes to L2 lockdown/auxiliary registers.  */
1426
                break;
1427
            default:
1428
                goto bad_reg;
1429
            }
1430
            break;
1431
        case 1: /* TCM memory region registers.  */
1432
            /* Not implemented.  */
1433
            goto bad_reg;
1434
        case 12: /* Performance monitor control */
1435
            /* Performance monitors are implementation defined in v7,
1436
             * but with an ARM recommended set of registers, which we
1437
             * follow (although we don't actually implement any counters)
1438
             */
1439
            if (!arm_feature(env, ARM_FEATURE_V7)) {
1440
                goto bad_reg;
1441
            }
1442
            switch (op2) {
1443
            case 0: /* performance monitor control register */
1444
                /* only the DP, X, D and E bits are writable */
1445
                env->cp15.c9_pmcr &= ~0x39;
1446
                env->cp15.c9_pmcr |= (val & 0x39);
1447
                break;
1448
            case 1: /* Count enable set register */
1449
                val &= (1 << 31);
1450
                env->cp15.c9_pmcnten |= val;
1451
                break;
1452
            case 2: /* Count enable clear */
1453
                val &= (1 << 31);
1454
                env->cp15.c9_pmcnten &= ~val;
1455
                break;
1456
            case 3: /* Overflow flag status */
1457
                env->cp15.c9_pmovsr &= ~val;
1458
                break;
1459
            case 4: /* Software increment */
1460
                /* RAZ/WI since we don't implement the software-count event */
1461
                break;
1462
            case 5: /* Event counter selection register */
1463
                /* Since we don't implement any events, writing to this register
1464
                 * is actually UNPREDICTABLE. So we choose to RAZ/WI.
1465
                 */
1466
                break;
1467
            default:
1468
                goto bad_reg;
1469
            }
1470
            break;
1471
        case 13: /* Performance counters */
1472
            if (!arm_feature(env, ARM_FEATURE_V7)) {
1473
                goto bad_reg;
1474
            }
1475
            switch (op2) {
1476
            case 0: /* Cycle count register: not implemented, so RAZ/WI */
1477
                break;
1478
            case 1: /* Event type select */
1479
                env->cp15.c9_pmxevtyper = val & 0xff;
1480
                break;
1481
            case 2: /* Event count register */
1482
                /* Unimplemented (we have no events), RAZ/WI */
1483
                break;
1484
            default:
1485
                goto bad_reg;
1486
            }
1487
            break;
1488
        case 14: /* Performance monitor control */
1489
            if (!arm_feature(env, ARM_FEATURE_V7)) {
1490
                goto bad_reg;
1491
            }
1492
            switch (op2) {
1493
            case 0: /* user enable */
1494
                env->cp15.c9_pmuserenr = val & 1;
1495
                /* changes access rights for cp registers, so flush tbs */
1496
                tb_flush(env);
1497
                break;
1498
            case 1: /* interrupt enable set */
1499
                /* We have no event counters so only the C bit can be changed */
1500
                val &= (1 << 31);
1501
                env->cp15.c9_pminten |= val;
1502
                break;
1503
            case 2: /* interrupt enable clear */
1504
                val &= (1 << 31);
1505
                env->cp15.c9_pminten &= ~val;
1506
                break;
1507
            }
1508
            break;
1509
        default:
1510
            goto bad_reg;
1511
        }
1512
        break;
1513
    case 10: /* MMU TLB lockdown.  */
1514
        /* ??? TLB lockdown not implemented.  */
1515
        break;
1516
    case 12: /* Reserved.  */
1517
        goto bad_reg;
1518
    case 13: /* Process ID.  */
1519
        switch (op2) {
1520
        case 0:
1521
            /* Unlike real hardware the qemu TLB uses virtual addresses,
1522
               not modified virtual addresses, so this causes a TLB flush.
1523
             */
1524
            if (env->cp15.c13_fcse != val)
1525
              tlb_flush(env, 1);
1526
            env->cp15.c13_fcse = val;
1527
            break;
1528
        case 1:
1529
            /* This changes the ASID, so do a TLB flush.  */
1530
            if (env->cp15.c13_context != val
1531
                && !arm_feature(env, ARM_FEATURE_MPU))
1532
              tlb_flush(env, 0);
1533
            env->cp15.c13_context = val;
1534
            break;
1535
        default:
1536
            goto bad_reg;
1537
        }
1538
        break;
1539
    case 14: /* Generic timer */
1540
        if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
1541
            /* Dummy implementation: RAZ/WI for all */
1542
            break;
1543
        }
1544
        goto bad_reg;
1545
    case 15: /* Implementation specific.  */
1546
        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1547
            if (op2 == 0 && crm == 1) {
1548
                if (env->cp15.c15_cpar != (val & 0x3fff)) {
1549
                    /* Changes cp0 to cp13 behavior, so needs a TB flush.  */
1550
                    tb_flush(env);
1551
                    env->cp15.c15_cpar = val & 0x3fff;
1552
                }
1553
                break;
1554
            }
1555
            goto bad_reg;
1556
        }
1557
        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1558
            switch (crm) {
1559
            case 0:
1560
                break;
1561
            case 1: /* Set TI925T configuration.  */
1562
                env->cp15.c15_ticonfig = val & 0xe7;
1563
                env->cp15.c0_cpuid = (val & (1 << 5)) ? /* OS_TYPE bit */
1564
                        ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1565
                break;
1566
            case 2: /* Set I_max.  */
1567
                env->cp15.c15_i_max = val;
1568
                break;
1569
            case 3: /* Set I_min.  */
1570
                env->cp15.c15_i_min = val;
1571
                break;
1572
            case 4: /* Set thread-ID.  */
1573
                env->cp15.c15_threadid = val & 0xffff;
1574
                break;
1575
            case 8: /* Wait-for-interrupt (deprecated).  */
1576
                cpu_interrupt(env, CPU_INTERRUPT_HALT);
1577
                break;
1578
            default:
1579
                goto bad_reg;
1580
            }
1581
        }
1582
        if (ARM_CPUID(env) == ARM_CPUID_CORTEXA9) {
1583
            switch (crm) {
1584
            case 0:
1585
                if ((op1 == 0) && (op2 == 0)) {
1586
                    env->cp15.c15_power_control = val;
1587
                } else if ((op1 == 0) && (op2 == 1)) {
1588
                    env->cp15.c15_diagnostic = val;
1589
                } else if ((op1 == 0) && (op2 == 2)) {
1590
                    env->cp15.c15_power_diagnostic = val;
1591
                }
1592
            default:
1593
                break;
1594
            }
1595
        }
1596
        break;
1597
    }
1598
    return;
1599
bad_reg:
1600
    /* ??? For debugging only.  Should raise illegal instruction exception.  */
1601
    cpu_abort(env, "Unimplemented cp15 register write (c%d, c%d, {%d, %d})\n",
1602
              (insn >> 16) & 0xf, crm, op1, op2);
1603
}
1604

    
1605
uint32_t HELPER(get_cp15)(CPUARMState *env, uint32_t insn)
1606
{
1607
    int op1;
1608
    int op2;
1609
    int crm;
1610

    
1611
    op1 = (insn >> 21) & 7;
1612
    op2 = (insn >> 5) & 7;
1613
    crm = insn & 0xf;
1614
    switch ((insn >> 16) & 0xf) {
1615
    case 0: /* ID codes.  */
1616
        switch (op1) {
1617
        case 0:
1618
            switch (crm) {
1619
            case 0:
1620
                switch (op2) {
1621
                case 0: /* Device ID.  */
1622
                    return env->cp15.c0_cpuid;
1623
                case 1: /* Cache Type.  */
1624
                    return env->cp15.c0_cachetype;
1625
                case 2: /* TCM status.  */
1626
                    return 0;
1627
                case 3: /* TLB type register.  */
1628
                    return 0; /* No lockable TLB entries.  */
1629
                case 5: /* MPIDR */
1630
                    /* The MPIDR was standardised in v7; prior to
1631
                     * this it was implemented only in the 11MPCore.
1632
                     * For all other pre-v7 cores it does not exist.
1633
                     */
1634
                    if (arm_feature(env, ARM_FEATURE_V7) ||
1635
                        ARM_CPUID(env) == ARM_CPUID_ARM11MPCORE) {
1636
                        int mpidr = env->cpu_index;
1637
                        /* We don't support setting cluster ID ([8..11])
1638
                         * so these bits always RAZ.
1639
                         */
1640
                        if (arm_feature(env, ARM_FEATURE_V7MP)) {
1641
                            mpidr |= (1 << 31);
1642
                            /* Cores which are uniprocessor (non-coherent)
1643
                             * but still implement the MP extensions set
1644
                             * bit 30. (For instance, A9UP.) However we do
1645
                             * not currently model any of those cores.
1646
                             */
1647
                        }
1648
                        return mpidr;
1649
                    }
1650
                    /* otherwise fall through to the unimplemented-reg case */
1651
                default:
1652
                    goto bad_reg;
1653
                }
1654
            case 1:
1655
                if (!arm_feature(env, ARM_FEATURE_V6))
1656
                    goto bad_reg;
1657
                return env->cp15.c0_c1[op2];
1658
            case 2:
1659
                if (!arm_feature(env, ARM_FEATURE_V6))
1660
                    goto bad_reg;
1661
                return env->cp15.c0_c2[op2];
1662
            case 3: case 4: case 5: case 6: case 7:
1663
                return 0;
1664
            default:
1665
                goto bad_reg;
1666
            }
1667
        case 1:
1668
            /* These registers aren't documented on arm11 cores.  However
1669
               Linux looks at them anyway.  */
1670
            if (!arm_feature(env, ARM_FEATURE_V6))
1671
                goto bad_reg;
1672
            if (crm != 0)
1673
                goto bad_reg;
1674
            if (!arm_feature(env, ARM_FEATURE_V7))
1675
                return 0;
1676

    
1677
            switch (op2) {
1678
            case 0:
1679
                return env->cp15.c0_ccsid[env->cp15.c0_cssel];
1680
            case 1:
1681
                return env->cp15.c0_clid;
1682
            case 7:
1683
                return 0;
1684
            }
1685
            goto bad_reg;
1686
        case 2:
1687
            if (op2 != 0 || crm != 0)
1688
                goto bad_reg;
1689
            return env->cp15.c0_cssel;
1690
        default:
1691
            goto bad_reg;
1692
        }
1693
    case 1: /* System configuration.  */
1694
        if (arm_feature(env, ARM_FEATURE_V7)
1695
            && op1 == 0 && crm == 1 && op2 == 0) {
1696
            return env->cp15.c1_scr;
1697
        }
1698
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1699
            op2 = 0;
1700
        switch (op2) {
1701
        case 0: /* Control register.  */
1702
            return env->cp15.c1_sys;
1703
        case 1: /* Auxiliary control register.  */
1704
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1705
                return env->cp15.c1_xscaleauxcr;
1706
            if (!arm_feature(env, ARM_FEATURE_AUXCR))
1707
                goto bad_reg;
1708
            switch (ARM_CPUID(env)) {
1709
            case ARM_CPUID_ARM1026:
1710
                return 1;
1711
            case ARM_CPUID_ARM1136:
1712
            case ARM_CPUID_ARM1136_R2:
1713
            case ARM_CPUID_ARM1176:
1714
                return 7;
1715
            case ARM_CPUID_ARM11MPCORE:
1716
                return 1;
1717
            case ARM_CPUID_CORTEXA8:
1718
                return 2;
1719
            case ARM_CPUID_CORTEXA9:
1720
            case ARM_CPUID_CORTEXA15:
1721
                return 0;
1722
            default:
1723
                goto bad_reg;
1724
            }
1725
        case 2: /* Coprocessor access register.  */
1726
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1727
                goto bad_reg;
1728
            return env->cp15.c1_coproc;
1729
        default:
1730
            goto bad_reg;
1731
        }
1732
    case 2: /* MMU Page table control / MPU cache control.  */
1733
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1734
            switch (op2) {
1735
            case 0:
1736
                return env->cp15.c2_data;
1737
                break;
1738
            case 1:
1739
                return env->cp15.c2_insn;
1740
                break;
1741
            default:
1742
                goto bad_reg;
1743
            }
1744
        } else {
1745
            switch (op2) {
1746
            case 0:
1747
                return env->cp15.c2_base0;
1748
            case 1:
1749
                return env->cp15.c2_base1;
1750
            case 2:
1751
                return env->cp15.c2_control;
1752
            default:
1753
                goto bad_reg;
1754
            }
1755
        }
1756
    case 3: /* MMU Domain access control / MPU write buffer control.  */
1757
        return env->cp15.c3;
1758
    case 4: /* Reserved.  */
1759
        goto bad_reg;
1760
    case 5: /* MMU Fault status / MPU access permission.  */
1761
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1762
            op2 = 0;
1763
        switch (op2) {
1764
        case 0:
1765
            if (arm_feature(env, ARM_FEATURE_MPU))
1766
                return simple_mpu_ap_bits(env->cp15.c5_data);
1767
            return env->cp15.c5_data;
1768
        case 1:
1769
            if (arm_feature(env, ARM_FEATURE_MPU))
1770
                return simple_mpu_ap_bits(env->cp15.c5_insn);
1771
            return env->cp15.c5_insn;
1772
        case 2:
1773
            if (!arm_feature(env, ARM_FEATURE_MPU))
1774
                goto bad_reg;
1775
            return env->cp15.c5_data;
1776
        case 3:
1777
            if (!arm_feature(env, ARM_FEATURE_MPU))
1778
                goto bad_reg;
1779
            return env->cp15.c5_insn;
1780
        default:
1781
            goto bad_reg;
1782
        }
1783
    case 6: /* MMU Fault address.  */
1784
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1785
            if (crm >= 8)
1786
                goto bad_reg;
1787
            return env->cp15.c6_region[crm];
1788
        } else {
1789
            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1790
                op2 = 0;
1791
            switch (op2) {
1792
            case 0:
1793
                return env->cp15.c6_data;
1794
            case 1:
1795
                if (arm_feature(env, ARM_FEATURE_V6)) {
1796
                    /* Watchpoint Fault Adrress.  */
1797
                    return 0; /* Not implemented.  */
1798
                } else {
1799
                    /* Instruction Fault Adrress.  */
1800
                    /* Arm9 doesn't have an IFAR, but implementing it anyway
1801
                       shouldn't do any harm.  */
1802
                    return env->cp15.c6_insn;
1803
                }
1804
            case 2:
1805
                if (arm_feature(env, ARM_FEATURE_V6)) {
1806
                    /* Instruction Fault Adrress.  */
1807
                    return env->cp15.c6_insn;
1808
                } else {
1809
                    goto bad_reg;
1810
                }
1811
            default:
1812
                goto bad_reg;
1813
            }
1814
        }
1815
    case 7: /* Cache control.  */
1816
        if (crm == 4 && op1 == 0 && op2 == 0) {
1817
            return env->cp15.c7_par;
1818
        }
1819
        /* FIXME: Should only clear Z flag if destination is r15.  */
1820
        env->ZF = 0;
1821
        return 0;
1822
    case 8: /* MMU TLB control.  */
1823
        goto bad_reg;
1824
    case 9:
1825
        switch (crm) {
1826
        case 0: /* Cache lockdown */
1827
            switch (op1) {
1828
            case 0: /* L1 cache.  */
1829
                if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1830
                    return 0;
1831
                }
1832
                switch (op2) {
1833
                case 0:
1834
                    return env->cp15.c9_data;
1835
                case 1:
1836
                    return env->cp15.c9_insn;
1837
                default:
1838
                    goto bad_reg;
1839
                }
1840
            case 1: /* L2 cache */
1841
                /* L2 Lockdown and Auxiliary control.  */
1842
                switch (op2) {
1843
                case 0:
1844
                    /* L2 cache lockdown (A8 only) */
1845
                    return 0;
1846
                case 2:
1847
                    /* L2 cache auxiliary control (A8) or control (A15) */
1848
                    if (ARM_CPUID(env) == ARM_CPUID_CORTEXA15) {
1849
                        /* Linux wants the number of processors from here.
1850
                         * Might as well set the interrupt-controller bit too.
1851
                         */
1852
                        return ((smp_cpus - 1) << 24) | (1 << 23);
1853
                    }
1854
                    return 0;
1855
                case 3:
1856
                    /* L2 cache extended control (A15) */
1857
                    return 0;
1858
                default:
1859
                    goto bad_reg;
1860
                }
1861
            default:
1862
                goto bad_reg;
1863
            }
1864
            break;
1865
        case 12: /* Performance monitor control */
1866
            if (!arm_feature(env, ARM_FEATURE_V7)) {
1867
                goto bad_reg;
1868
            }
1869
            switch (op2) {
1870
            case 0: /* performance monitor control register */
1871
                return env->cp15.c9_pmcr;
1872
            case 1: /* count enable set */
1873
            case 2: /* count enable clear */
1874
                return env->cp15.c9_pmcnten;
1875
            case 3: /* overflow flag status */
1876
                return env->cp15.c9_pmovsr;
1877
            case 4: /* software increment */
1878
            case 5: /* event counter selection register */
1879
                return 0; /* Unimplemented, RAZ/WI */
1880
            default:
1881
                goto bad_reg;
1882
            }
1883
        case 13: /* Performance counters */
1884
            if (!arm_feature(env, ARM_FEATURE_V7)) {
1885
                goto bad_reg;
1886
            }
1887
            switch (op2) {
1888
            case 1: /* Event type select */
1889
                return env->cp15.c9_pmxevtyper;
1890
            case 0: /* Cycle count register */
1891
            case 2: /* Event count register */
1892
                /* Unimplemented, so RAZ/WI */
1893
                return 0;
1894
            default:
1895
                goto bad_reg;
1896
            }
1897
        case 14: /* Performance monitor control */
1898
            if (!arm_feature(env, ARM_FEATURE_V7)) {
1899
                goto bad_reg;
1900
            }
1901
            switch (op2) {
1902
            case 0: /* user enable */
1903
                return env->cp15.c9_pmuserenr;
1904
            case 1: /* interrupt enable set */
1905
            case 2: /* interrupt enable clear */
1906
                return env->cp15.c9_pminten;
1907
            default:
1908
                goto bad_reg;
1909
            }
1910
        default:
1911
            goto bad_reg;
1912
        }
1913
        break;
1914
    case 10: /* MMU TLB lockdown.  */
1915
        /* ??? TLB lockdown not implemented.  */
1916
        return 0;
1917
    case 11: /* TCM DMA control.  */
1918
    case 12: /* Reserved.  */
1919
        goto bad_reg;
1920
    case 13: /* Process ID.  */
1921
        switch (op2) {
1922
        case 0:
1923
            return env->cp15.c13_fcse;
1924
        case 1:
1925
            return env->cp15.c13_context;
1926
        default:
1927
            goto bad_reg;
1928
        }
1929
    case 14: /* Generic timer */
1930
        if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
1931
            /* Dummy implementation: RAZ/WI for all */
1932
            return 0;
1933
        }
1934
        goto bad_reg;
1935
    case 15: /* Implementation specific.  */
1936
        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1937
            if (op2 == 0 && crm == 1)
1938
                return env->cp15.c15_cpar;
1939

    
1940
            goto bad_reg;
1941
        }
1942
        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1943
            switch (crm) {
1944
            case 0:
1945
                return 0;
1946
            case 1: /* Read TI925T configuration.  */
1947
                return env->cp15.c15_ticonfig;
1948
            case 2: /* Read I_max.  */
1949
                return env->cp15.c15_i_max;
1950
            case 3: /* Read I_min.  */
1951
                return env->cp15.c15_i_min;
1952
            case 4: /* Read thread-ID.  */
1953
                return env->cp15.c15_threadid;
1954
            case 8: /* TI925T_status */
1955
                return 0;
1956
            }
1957
            /* TODO: Peripheral port remap register:
1958
             * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt
1959
             * controller base address at $rn & ~0xfff and map size of
1960
             * 0x200 << ($rn & 0xfff), when MMU is off.  */
1961
            goto bad_reg;
1962
        }
1963
        if (ARM_CPUID(env) == ARM_CPUID_CORTEXA9) {
1964
            switch (crm) {
1965
            case 0:
1966
                if ((op1 == 4) && (op2 == 0)) {
1967
                    /* The config_base_address should hold the value of
1968
                     * the peripheral base. ARM should get this from a CPU
1969
                     * object property, but that support isn't available in
1970
                     * December 2011. Default to 0 for now and board models
1971
                     * that care can set it by a private hook */
1972
                    return env->cp15.c15_config_base_address;
1973
                } else if ((op1 == 0) && (op2 == 0)) {
1974
                    /* power_control should be set to maximum latency. Again,
1975
                       default to 0 and set by private hook */
1976
                    return env->cp15.c15_power_control;
1977
                } else if ((op1 == 0) && (op2 == 1)) {
1978
                    return env->cp15.c15_diagnostic;
1979
                } else if ((op1 == 0) && (op2 == 2)) {
1980
                    return env->cp15.c15_power_diagnostic;
1981
                }
1982
                break;
1983
            case 1: /* NEON Busy */
1984
                return 0;
1985
            case 5: /* tlb lockdown */
1986
            case 6:
1987
            case 7:
1988
                if ((op1 == 5) && (op2 == 2)) {
1989
                    return 0;
1990
                }
1991
                break;
1992
            default:
1993
                break;
1994
            }
1995
            goto bad_reg;
1996
        }
1997
        return 0;
1998
    }
1999
bad_reg:
2000
    /* ??? For debugging only.  Should raise illegal instruction exception.  */
2001
    cpu_abort(env, "Unimplemented cp15 register read (c%d, c%d, {%d, %d})\n",
2002
              (insn >> 16) & 0xf, crm, op1, op2);
2003
    return 0;
2004
}
2005

    
2006
void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
2007
{
2008
    if ((env->uncached_cpsr & CPSR_M) == mode) {
2009
        env->regs[13] = val;
2010
    } else {
2011
        env->banked_r13[bank_number(env, mode)] = val;
2012
    }
2013
}
2014

    
2015
uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
2016
{
2017
    if ((env->uncached_cpsr & CPSR_M) == mode) {
2018
        return env->regs[13];
2019
    } else {
2020
        return env->banked_r13[bank_number(env, mode)];
2021
    }
2022
}
2023

    
2024
uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
2025
{
2026
    switch (reg) {
2027
    case 0: /* APSR */
2028
        return xpsr_read(env) & 0xf8000000;
2029
    case 1: /* IAPSR */
2030
        return xpsr_read(env) & 0xf80001ff;
2031
    case 2: /* EAPSR */
2032
        return xpsr_read(env) & 0xff00fc00;
2033
    case 3: /* xPSR */
2034
        return xpsr_read(env) & 0xff00fdff;
2035
    case 5: /* IPSR */
2036
        return xpsr_read(env) & 0x000001ff;
2037
    case 6: /* EPSR */
2038
        return xpsr_read(env) & 0x0700fc00;
2039
    case 7: /* IEPSR */
2040
        return xpsr_read(env) & 0x0700edff;
2041
    case 8: /* MSP */
2042
        return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
2043
    case 9: /* PSP */
2044
        return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
2045
    case 16: /* PRIMASK */
2046
        return (env->uncached_cpsr & CPSR_I) != 0;
2047
    case 17: /* BASEPRI */
2048
    case 18: /* BASEPRI_MAX */
2049
        return env->v7m.basepri;
2050
    case 19: /* FAULTMASK */
2051
        return (env->uncached_cpsr & CPSR_F) != 0;
2052
    case 20: /* CONTROL */
2053
        return env->v7m.control;
2054
    default:
2055
        /* ??? For debugging only.  */
2056
        cpu_abort(env, "Unimplemented system register read (%d)\n", reg);
2057
        return 0;
2058
    }
2059
}
2060

    
2061
void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
2062
{
2063
    switch (reg) {
2064
    case 0: /* APSR */
2065
        xpsr_write(env, val, 0xf8000000);
2066
        break;
2067
    case 1: /* IAPSR */
2068
        xpsr_write(env, val, 0xf8000000);
2069
        break;
2070
    case 2: /* EAPSR */
2071
        xpsr_write(env, val, 0xfe00fc00);
2072
        break;
2073
    case 3: /* xPSR */
2074
        xpsr_write(env, val, 0xfe00fc00);
2075
        break;
2076
    case 5: /* IPSR */
2077
        /* IPSR bits are readonly.  */
2078
        break;
2079
    case 6: /* EPSR */
2080
        xpsr_write(env, val, 0x0600fc00);
2081
        break;
2082
    case 7: /* IEPSR */
2083
        xpsr_write(env, val, 0x0600fc00);
2084
        break;
2085
    case 8: /* MSP */
2086
        if (env->v7m.current_sp)
2087
            env->v7m.other_sp = val;
2088
        else
2089
            env->regs[13] = val;
2090
        break;
2091
    case 9: /* PSP */
2092
        if (env->v7m.current_sp)
2093
            env->regs[13] = val;
2094
        else
2095
            env->v7m.other_sp = val;
2096
        break;
2097
    case 16: /* PRIMASK */
2098
        if (val & 1)
2099
            env->uncached_cpsr |= CPSR_I;
2100
        else
2101
            env->uncached_cpsr &= ~CPSR_I;
2102
        break;
2103
    case 17: /* BASEPRI */
2104
        env->v7m.basepri = val & 0xff;
2105
        break;
2106
    case 18: /* BASEPRI_MAX */
2107
        val &= 0xff;
2108
        if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
2109
            env->v7m.basepri = val;
2110
        break;
2111
    case 19: /* FAULTMASK */
2112
        if (val & 1)
2113
            env->uncached_cpsr |= CPSR_F;
2114
        else
2115
            env->uncached_cpsr &= ~CPSR_F;
2116
        break;
2117
    case 20: /* CONTROL */
2118
        env->v7m.control = val & 3;
2119
        switch_v7m_sp(env, (val & 2) != 0);
2120
        break;
2121
    default:
2122
        /* ??? For debugging only.  */
2123
        cpu_abort(env, "Unimplemented system register write (%d)\n", reg);
2124
        return;
2125
    }
2126
}
2127

    
2128
void cpu_arm_set_cp_io(CPUARMState *env, int cpnum,
2129
                ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write,
2130
                void *opaque)
2131
{
2132
    if (cpnum < 0 || cpnum > 14) {
2133
        cpu_abort(env, "Bad coprocessor number: %i\n", cpnum);
2134
        return;
2135
    }
2136

    
2137
    env->cp[cpnum].cp_read = cp_read;
2138
    env->cp[cpnum].cp_write = cp_write;
2139
    env->cp[cpnum].opaque = opaque;
2140
}
2141

    
2142
#endif
2143

    
2144
/* Note that signed overflow is undefined in C.  The following routines are
2145
   careful to use unsigned types where modulo arithmetic is required.
2146
   Failure to do so _will_ break on newer gcc.  */
2147

    
2148
/* Signed saturating arithmetic.  */
2149

    
2150
/* Perform 16-bit signed saturating addition.  */
2151
static inline uint16_t add16_sat(uint16_t a, uint16_t b)
2152
{
2153
    uint16_t res;
2154

    
2155
    res = a + b;
2156
    if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
2157
        if (a & 0x8000)
2158
            res = 0x8000;
2159
        else
2160
            res = 0x7fff;
2161
    }
2162
    return res;
2163
}
2164

    
2165
/* Perform 8-bit signed saturating addition.  */
2166
static inline uint8_t add8_sat(uint8_t a, uint8_t b)
2167
{
2168
    uint8_t res;
2169

    
2170
    res = a + b;
2171
    if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
2172
        if (a & 0x80)
2173
            res = 0x80;
2174
        else
2175
            res = 0x7f;
2176
    }
2177
    return res;
2178
}
2179

    
2180
/* Perform 16-bit signed saturating subtraction.  */
2181
static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
2182
{
2183
    uint16_t res;
2184

    
2185
    res = a - b;
2186
    if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
2187
        if (a & 0x8000)
2188
            res = 0x8000;
2189
        else
2190
            res = 0x7fff;
2191
    }
2192
    return res;
2193
}
2194

    
2195
/* Perform 8-bit signed saturating subtraction.  */
2196
static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
2197
{
2198
    uint8_t res;
2199

    
2200
    res = a - b;
2201
    if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
2202
        if (a & 0x80)
2203
            res = 0x80;
2204
        else
2205
            res = 0x7f;
2206
    }
2207
    return res;
2208
}
2209

    
2210
#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
2211
#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
2212
#define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
2213
#define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
2214
#define PFX q
2215

    
2216
#include "op_addsub.h"
2217

    
2218
/* Unsigned saturating arithmetic.  */
2219
static inline uint16_t add16_usat(uint16_t a, uint16_t b)
2220
{
2221
    uint16_t res;
2222
    res = a + b;
2223
    if (res < a)
2224
        res = 0xffff;
2225
    return res;
2226
}
2227

    
2228
static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
2229
{
2230
    if (a > b)
2231
        return a - b;
2232
    else
2233
        return 0;
2234
}
2235

    
2236
static inline uint8_t add8_usat(uint8_t a, uint8_t b)
2237
{
2238
    uint8_t res;
2239
    res = a + b;
2240
    if (res < a)
2241
        res = 0xff;
2242
    return res;
2243
}
2244

    
2245
static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
2246
{
2247
    if (a > b)
2248
        return a - b;
2249
    else
2250
        return 0;
2251
}
2252

    
2253
#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
2254
#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
2255
#define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
2256
#define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
2257
#define PFX uq
2258

    
2259
#include "op_addsub.h"
2260

    
2261
/* Signed modulo arithmetic.  */
2262
#define SARITH16(a, b, n, op) do { \
2263
    int32_t sum; \
2264
    sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
2265
    RESULT(sum, n, 16); \
2266
    if (sum >= 0) \
2267
        ge |= 3 << (n * 2); \
2268
    } while(0)
2269

    
2270
#define SARITH8(a, b, n, op) do { \
2271
    int32_t sum; \
2272
    sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
2273
    RESULT(sum, n, 8); \
2274
    if (sum >= 0) \
2275
        ge |= 1 << n; \
2276
    } while(0)
2277

    
2278

    
2279
#define ADD16(a, b, n) SARITH16(a, b, n, +)
2280
#define SUB16(a, b, n) SARITH16(a, b, n, -)
2281
#define ADD8(a, b, n)  SARITH8(a, b, n, +)
2282
#define SUB8(a, b, n)  SARITH8(a, b, n, -)
2283
#define PFX s
2284
#define ARITH_GE
2285

    
2286
#include "op_addsub.h"
2287

    
2288
/* Unsigned modulo arithmetic.  */
2289
#define ADD16(a, b, n) do { \
2290
    uint32_t sum; \
2291
    sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
2292
    RESULT(sum, n, 16); \
2293
    if ((sum >> 16) == 1) \
2294
        ge |= 3 << (n * 2); \
2295
    } while(0)
2296

    
2297
#define ADD8(a, b, n) do { \
2298
    uint32_t sum; \
2299
    sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
2300
    RESULT(sum, n, 8); \
2301
    if ((sum >> 8) == 1) \
2302
        ge |= 1 << n; \
2303
    } while(0)
2304

    
2305
#define SUB16(a, b, n) do { \
2306
    uint32_t sum; \
2307
    sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
2308
    RESULT(sum, n, 16); \
2309
    if ((sum >> 16) == 0) \
2310
        ge |= 3 << (n * 2); \
2311
    } while(0)
2312

    
2313
#define SUB8(a, b, n) do { \
2314
    uint32_t sum; \
2315
    sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
2316
    RESULT(sum, n, 8); \
2317
    if ((sum >> 8) == 0) \
2318
        ge |= 1 << n; \
2319
    } while(0)
2320

    
2321
#define PFX u
2322
#define ARITH_GE
2323

    
2324
#include "op_addsub.h"
2325

    
2326
/* Halved signed arithmetic.  */
2327
#define ADD16(a, b, n) \
2328
  RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
2329
#define SUB16(a, b, n) \
2330
  RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
2331
#define ADD8(a, b, n) \
2332
  RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
2333
#define SUB8(a, b, n) \
2334
  RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
2335
#define PFX sh
2336

    
2337
#include "op_addsub.h"
2338

    
2339
/* Halved unsigned arithmetic.  */
2340
#define ADD16(a, b, n) \
2341
  RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2342
#define SUB16(a, b, n) \
2343
  RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2344
#define ADD8(a, b, n) \
2345
  RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2346
#define SUB8(a, b, n) \
2347
  RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2348
#define PFX uh
2349

    
2350
#include "op_addsub.h"
2351

    
2352
static inline uint8_t do_usad(uint8_t a, uint8_t b)
2353
{
2354
    if (a > b)
2355
        return a - b;
2356
    else
2357
        return b - a;
2358
}
2359

    
2360
/* Unsigned sum of absolute byte differences.  */
2361
uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
2362
{
2363
    uint32_t sum;
2364
    sum = do_usad(a, b);
2365
    sum += do_usad(a >> 8, b >> 8);
2366
    sum += do_usad(a >> 16, b >>16);
2367
    sum += do_usad(a >> 24, b >> 24);
2368
    return sum;
2369
}
2370

    
2371
/* For ARMv6 SEL instruction.  */
2372
uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
2373
{
2374
    uint32_t mask;
2375

    
2376
    mask = 0;
2377
    if (flags & 1)
2378
        mask |= 0xff;
2379
    if (flags & 2)
2380
        mask |= 0xff00;
2381
    if (flags & 4)
2382
        mask |= 0xff0000;
2383
    if (flags & 8)
2384
        mask |= 0xff000000;
2385
    return (a & mask) | (b & ~mask);
2386
}
2387

    
2388
uint32_t HELPER(logicq_cc)(uint64_t val)
2389
{
2390
    return (val >> 32) | (val != 0);
2391
}
2392

    
2393
/* VFP support.  We follow the convention used for VFP instrunctions:
2394
   Single precition routines have a "s" suffix, double precision a
2395
   "d" suffix.  */
2396

    
2397
/* Convert host exception flags to vfp form.  */
2398
static inline int vfp_exceptbits_from_host(int host_bits)
2399
{
2400
    int target_bits = 0;
2401

    
2402
    if (host_bits & float_flag_invalid)
2403
        target_bits |= 1;
2404
    if (host_bits & float_flag_divbyzero)
2405
        target_bits |= 2;
2406
    if (host_bits & float_flag_overflow)
2407
        target_bits |= 4;
2408
    if (host_bits & (float_flag_underflow | float_flag_output_denormal))
2409
        target_bits |= 8;
2410
    if (host_bits & float_flag_inexact)
2411
        target_bits |= 0x10;
2412
    if (host_bits & float_flag_input_denormal)
2413
        target_bits |= 0x80;
2414
    return target_bits;
2415
}
2416

    
2417
uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
2418
{
2419
    int i;
2420
    uint32_t fpscr;
2421

    
2422
    fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
2423
            | (env->vfp.vec_len << 16)
2424
            | (env->vfp.vec_stride << 20);
2425
    i = get_float_exception_flags(&env->vfp.fp_status);
2426
    i |= get_float_exception_flags(&env->vfp.standard_fp_status);
2427
    fpscr |= vfp_exceptbits_from_host(i);
2428
    return fpscr;
2429
}
2430

    
2431
uint32_t vfp_get_fpscr(CPUARMState *env)
2432
{
2433
    return HELPER(vfp_get_fpscr)(env);
2434
}
2435

    
2436
/* Convert vfp exception flags to target form.  */
2437
static inline int vfp_exceptbits_to_host(int target_bits)
2438
{
2439
    int host_bits = 0;
2440

    
2441
    if (target_bits & 1)
2442
        host_bits |= float_flag_invalid;
2443
    if (target_bits & 2)
2444
        host_bits |= float_flag_divbyzero;
2445
    if (target_bits & 4)
2446
        host_bits |= float_flag_overflow;
2447
    if (target_bits & 8)
2448
        host_bits |= float_flag_underflow;
2449
    if (target_bits & 0x10)
2450
        host_bits |= float_flag_inexact;
2451
    if (target_bits & 0x80)
2452
        host_bits |= float_flag_input_denormal;
2453
    return host_bits;
2454
}
2455

    
2456
void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
2457
{
2458
    int i;
2459
    uint32_t changed;
2460

    
2461
    changed = env->vfp.xregs[ARM_VFP_FPSCR];
2462
    env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
2463
    env->vfp.vec_len = (val >> 16) & 7;
2464
    env->vfp.vec_stride = (val >> 20) & 3;
2465

    
2466
    changed ^= val;
2467
    if (changed & (3 << 22)) {
2468
        i = (val >> 22) & 3;
2469
        switch (i) {
2470
        case 0:
2471
            i = float_round_nearest_even;
2472
            break;
2473
        case 1:
2474
            i = float_round_up;
2475
            break;
2476
        case 2:
2477
            i = float_round_down;
2478
            break;
2479
        case 3:
2480
            i = float_round_to_zero;
2481
            break;
2482
        }
2483
        set_float_rounding_mode(i, &env->vfp.fp_status);
2484
    }
2485
    if (changed & (1 << 24)) {
2486
        set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
2487
        set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
2488
    }
2489
    if (changed & (1 << 25))
2490
        set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
2491

    
2492
    i = vfp_exceptbits_to_host(val);
2493
    set_float_exception_flags(i, &env->vfp.fp_status);
2494
    set_float_exception_flags(0, &env->vfp.standard_fp_status);
2495
}
2496

    
2497
void vfp_set_fpscr(CPUARMState *env, uint32_t val)
2498
{
2499
    HELPER(vfp_set_fpscr)(env, val);
2500
}
2501

    
2502
#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
2503

    
2504
#define VFP_BINOP(name) \
2505
float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
2506
{ \
2507
    float_status *fpst = fpstp; \
2508
    return float32_ ## name(a, b, fpst); \
2509
} \
2510
float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
2511
{ \
2512
    float_status *fpst = fpstp; \
2513
    return float64_ ## name(a, b, fpst); \
2514
}
2515
VFP_BINOP(add)
2516
VFP_BINOP(sub)
2517
VFP_BINOP(mul)
2518
VFP_BINOP(div)
2519
#undef VFP_BINOP
2520

    
2521
float32 VFP_HELPER(neg, s)(float32 a)
2522
{
2523
    return float32_chs(a);
2524
}
2525

    
2526
float64 VFP_HELPER(neg, d)(float64 a)
2527
{
2528
    return float64_chs(a);
2529
}
2530

    
2531
float32 VFP_HELPER(abs, s)(float32 a)
2532
{
2533
    return float32_abs(a);
2534
}
2535

    
2536
float64 VFP_HELPER(abs, d)(float64 a)
2537
{
2538
    return float64_abs(a);
2539
}
2540

    
2541
float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env)
2542
{
2543
    return float32_sqrt(a, &env->vfp.fp_status);
2544
}
2545

    
2546
float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env)
2547
{
2548
    return float64_sqrt(a, &env->vfp.fp_status);
2549
}
2550

    
2551
/* XXX: check quiet/signaling case */
2552
#define DO_VFP_cmp(p, type) \
2553
void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env)  \
2554
{ \
2555
    uint32_t flags; \
2556
    switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
2557
    case 0: flags = 0x6; break; \
2558
    case -1: flags = 0x8; break; \
2559
    case 1: flags = 0x2; break; \
2560
    default: case 2: flags = 0x3; break; \
2561
    } \
2562
    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2563
        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2564
} \
2565
void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
2566
{ \
2567
    uint32_t flags; \
2568
    switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
2569
    case 0: flags = 0x6; break; \
2570
    case -1: flags = 0x8; break; \
2571
    case 1: flags = 0x2; break; \
2572
    default: case 2: flags = 0x3; break; \
2573
    } \
2574
    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2575
        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2576
}
2577
DO_VFP_cmp(s, float32)
2578
DO_VFP_cmp(d, float64)
2579
#undef DO_VFP_cmp
2580

    
2581
/* Integer to float and float to integer conversions */
2582

    
2583
#define CONV_ITOF(name, fsz, sign) \
2584
    float##fsz HELPER(name)(uint32_t x, void *fpstp) \
2585
{ \
2586
    float_status *fpst = fpstp; \
2587
    return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
2588
}
2589

    
2590
#define CONV_FTOI(name, fsz, sign, round) \
2591
uint32_t HELPER(name)(float##fsz x, void *fpstp) \
2592
{ \
2593
    float_status *fpst = fpstp; \
2594
    if (float##fsz##_is_any_nan(x)) { \
2595
        float_raise(float_flag_invalid, fpst); \
2596
        return 0; \
2597
    } \
2598
    return float##fsz##_to_##sign##int32##round(x, fpst); \
2599
}
2600

    
2601
#define FLOAT_CONVS(name, p, fsz, sign) \
2602
CONV_ITOF(vfp_##name##to##p, fsz, sign) \
2603
CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
2604
CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
2605

    
2606
FLOAT_CONVS(si, s, 32, )
2607
FLOAT_CONVS(si, d, 64, )
2608
FLOAT_CONVS(ui, s, 32, u)
2609
FLOAT_CONVS(ui, d, 64, u)
2610

    
2611
#undef CONV_ITOF
2612
#undef CONV_FTOI
2613
#undef FLOAT_CONVS
2614

    
2615
/* floating point conversion */
2616
float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env)
2617
{
2618
    float64 r = float32_to_float64(x, &env->vfp.fp_status);
2619
    /* ARM requires that S<->D conversion of any kind of NaN generates
2620
     * a quiet NaN by forcing the most significant frac bit to 1.
2621
     */
2622
    return float64_maybe_silence_nan(r);
2623
}
2624

    
2625
float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
2626
{
2627
    float32 r =  float64_to_float32(x, &env->vfp.fp_status);
2628
    /* ARM requires that S<->D conversion of any kind of NaN generates
2629
     * a quiet NaN by forcing the most significant frac bit to 1.
2630
     */
2631
    return float32_maybe_silence_nan(r);
2632
}
2633

    
2634
/* VFP3 fixed point conversion.  */
2635
#define VFP_CONV_FIX(name, p, fsz, itype, sign) \
2636
float##fsz HELPER(vfp_##name##to##p)(uint##fsz##_t  x, uint32_t shift, \
2637
                                    void *fpstp) \
2638
{ \
2639
    float_status *fpst = fpstp; \
2640
    float##fsz tmp; \
2641
    tmp = sign##int32_to_##float##fsz((itype##_t)x, fpst); \
2642
    return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
2643
} \
2644
uint##fsz##_t HELPER(vfp_to##name##p)(float##fsz x, uint32_t shift, \
2645
                                       void *fpstp) \
2646
{ \
2647
    float_status *fpst = fpstp; \
2648
    float##fsz tmp; \
2649
    if (float##fsz##_is_any_nan(x)) { \
2650
        float_raise(float_flag_invalid, fpst); \
2651
        return 0; \
2652
    } \
2653
    tmp = float##fsz##_scalbn(x, shift, fpst); \
2654
    return float##fsz##_to_##itype##_round_to_zero(tmp, fpst); \
2655
}
2656

    
2657
VFP_CONV_FIX(sh, d, 64, int16, )
2658
VFP_CONV_FIX(sl, d, 64, int32, )
2659
VFP_CONV_FIX(uh, d, 64, uint16, u)
2660
VFP_CONV_FIX(ul, d, 64, uint32, u)
2661
VFP_CONV_FIX(sh, s, 32, int16, )
2662
VFP_CONV_FIX(sl, s, 32, int32, )
2663
VFP_CONV_FIX(uh, s, 32, uint16, u)
2664
VFP_CONV_FIX(ul, s, 32, uint32, u)
2665
#undef VFP_CONV_FIX
2666

    
2667
/* Half precision conversions.  */
2668
static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s)
2669
{
2670
    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
2671
    float32 r = float16_to_float32(make_float16(a), ieee, s);
2672
    if (ieee) {
2673
        return float32_maybe_silence_nan(r);
2674
    }
2675
    return r;
2676
}
2677

    
2678
static uint32_t do_fcvt_f32_to_f16(float32 a, CPUARMState *env, float_status *s)
2679
{
2680
    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
2681
    float16 r = float32_to_float16(a, ieee, s);
2682
    if (ieee) {
2683
        r = float16_maybe_silence_nan(r);
2684
    }
2685
    return float16_val(r);
2686
}
2687

    
2688
float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
2689
{
2690
    return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status);
2691
}
2692

    
2693
uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
2694
{
2695
    return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status);
2696
}
2697

    
2698
float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
2699
{
2700
    return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status);
2701
}
2702

    
2703
uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
2704
{
2705
    return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
2706
}
2707

    
2708
#define float32_two make_float32(0x40000000)
2709
#define float32_three make_float32(0x40400000)
2710
#define float32_one_point_five make_float32(0x3fc00000)
2711

    
2712
float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env)
2713
{
2714
    float_status *s = &env->vfp.standard_fp_status;
2715
    if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
2716
        (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
2717
        if (!(float32_is_zero(a) || float32_is_zero(b))) {
2718
            float_raise(float_flag_input_denormal, s);
2719
        }
2720
        return float32_two;
2721
    }
2722
    return float32_sub(float32_two, float32_mul(a, b, s), s);
2723
}
2724

    
2725
float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env)
2726
{
2727
    float_status *s = &env->vfp.standard_fp_status;
2728
    float32 product;
2729
    if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
2730
        (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
2731
        if (!(float32_is_zero(a) || float32_is_zero(b))) {
2732
            float_raise(float_flag_input_denormal, s);
2733
        }
2734
        return float32_one_point_five;
2735
    }
2736
    product = float32_mul(a, b, s);
2737
    return float32_div(float32_sub(float32_three, product, s), float32_two, s);
2738
}
2739

    
2740
/* NEON helpers.  */
2741

    
2742
/* Constants 256 and 512 are used in some helpers; we avoid relying on
2743
 * int->float conversions at run-time.  */
2744
#define float64_256 make_float64(0x4070000000000000LL)
2745
#define float64_512 make_float64(0x4080000000000000LL)
2746

    
2747
/* The algorithm that must be used to calculate the estimate
2748
 * is specified by the ARM ARM.
2749
 */
2750
static float64 recip_estimate(float64 a, CPUARMState *env)
2751
{
2752
    /* These calculations mustn't set any fp exception flags,
2753
     * so we use a local copy of the fp_status.
2754
     */
2755
    float_status dummy_status = env->vfp.standard_fp_status;
2756
    float_status *s = &dummy_status;
2757
    /* q = (int)(a * 512.0) */
2758
    float64 q = float64_mul(float64_512, a, s);
2759
    int64_t q_int = float64_to_int64_round_to_zero(q, s);
2760

    
2761
    /* r = 1.0 / (((double)q + 0.5) / 512.0) */
2762
    q = int64_to_float64(q_int, s);
2763
    q = float64_add(q, float64_half, s);
2764
    q = float64_div(q, float64_512, s);
2765
    q = float64_div(float64_one, q, s);
2766

    
2767
    /* s = (int)(256.0 * r + 0.5) */
2768
    q = float64_mul(q, float64_256, s);
2769
    q = float64_add(q, float64_half, s);
2770
    q_int = float64_to_int64_round_to_zero(q, s);
2771

    
2772
    /* return (double)s / 256.0 */
2773
    return float64_div(int64_to_float64(q_int, s), float64_256, s);
2774
}
2775

    
2776
float32 HELPER(recpe_f32)(float32 a, CPUARMState *env)
2777
{
2778
    float_status *s = &env->vfp.standard_fp_status;
2779
    float64 f64;
2780
    uint32_t val32 = float32_val(a);
2781

    
2782
    int result_exp;
2783
    int a_exp = (val32  & 0x7f800000) >> 23;
2784
    int sign = val32 & 0x80000000;
2785

    
2786
    if (float32_is_any_nan(a)) {
2787
        if (float32_is_signaling_nan(a)) {
2788
            float_raise(float_flag_invalid, s);
2789
        }
2790
        return float32_default_nan;
2791
    } else if (float32_is_infinity(a)) {
2792
        return float32_set_sign(float32_zero, float32_is_neg(a));
2793
    } else if (float32_is_zero_or_denormal(a)) {
2794
        if (!float32_is_zero(a)) {
2795
            float_raise(float_flag_input_denormal, s);
2796
        }
2797
        float_raise(float_flag_divbyzero, s);
2798
        return float32_set_sign(float32_infinity, float32_is_neg(a));
2799
    } else if (a_exp >= 253) {
2800
        float_raise(float_flag_underflow, s);
2801
        return float32_set_sign(float32_zero, float32_is_neg(a));
2802
    }
2803

    
2804
    f64 = make_float64((0x3feULL << 52)
2805
                       | ((int64_t)(val32 & 0x7fffff) << 29));
2806

    
2807
    result_exp = 253 - a_exp;
2808

    
2809
    f64 = recip_estimate(f64, env);
2810

    
2811
    val32 = sign
2812
        | ((result_exp & 0xff) << 23)
2813
        | ((float64_val(f64) >> 29) & 0x7fffff);
2814
    return make_float32(val32);
2815
}
2816

    
2817
/* The algorithm that must be used to calculate the estimate
2818
 * is specified by the ARM ARM.
2819
 */
2820
static float64 recip_sqrt_estimate(float64 a, CPUARMState *env)
2821
{
2822
    /* These calculations mustn't set any fp exception flags,
2823
     * so we use a local copy of the fp_status.
2824
     */
2825
    float_status dummy_status = env->vfp.standard_fp_status;
2826
    float_status *s = &dummy_status;
2827
    float64 q;
2828
    int64_t q_int;
2829

    
2830
    if (float64_lt(a, float64_half, s)) {
2831
        /* range 0.25 <= a < 0.5 */
2832

    
2833
        /* a in units of 1/512 rounded down */
2834
        /* q0 = (int)(a * 512.0);  */
2835
        q = float64_mul(float64_512, a, s);
2836
        q_int = float64_to_int64_round_to_zero(q, s);
2837

    
2838
        /* reciprocal root r */
2839
        /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0);  */
2840
        q = int64_to_float64(q_int, s);
2841
        q = float64_add(q, float64_half, s);
2842
        q = float64_div(q, float64_512, s);
2843
        q = float64_sqrt(q, s);
2844
        q = float64_div(float64_one, q, s);
2845
    } else {
2846
        /* range 0.5 <= a < 1.0 */
2847

    
2848
        /* a in units of 1/256 rounded down */
2849
        /* q1 = (int)(a * 256.0); */
2850
        q = float64_mul(float64_256, a, s);
2851
        int64_t q_int = float64_to_int64_round_to_zero(q, s);
2852

    
2853
        /* reciprocal root r */
2854
        /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
2855
        q = int64_to_float64(q_int, s);
2856
        q = float64_add(q, float64_half, s);
2857
        q = float64_div(q, float64_256, s);
2858
        q = float64_sqrt(q, s);
2859
        q = float64_div(float64_one, q, s);
2860
    }
2861
    /* r in units of 1/256 rounded to nearest */
2862
    /* s = (int)(256.0 * r + 0.5); */
2863

    
2864
    q = float64_mul(q, float64_256,s );
2865
    q = float64_add(q, float64_half, s);
2866
    q_int = float64_to_int64_round_to_zero(q, s);
2867

    
2868
    /* return (double)s / 256.0;*/
2869
    return float64_div(int64_to_float64(q_int, s), float64_256, s);
2870
}
2871

    
2872
float32 HELPER(rsqrte_f32)(float32 a, CPUARMState *env)
2873
{
2874
    float_status *s = &env->vfp.standard_fp_status;
2875
    int result_exp;
2876
    float64 f64;
2877
    uint32_t val;
2878
    uint64_t val64;
2879

    
2880
    val = float32_val(a);
2881

    
2882
    if (float32_is_any_nan(a)) {
2883
        if (float32_is_signaling_nan(a)) {
2884
            float_raise(float_flag_invalid, s);
2885
        }
2886
        return float32_default_nan;
2887
    } else if (float32_is_zero_or_denormal(a)) {
2888
        if (!float32_is_zero(a)) {
2889
            float_raise(float_flag_input_denormal, s);
2890
        }
2891
        float_raise(float_flag_divbyzero, s);
2892
        return float32_set_sign(float32_infinity, float32_is_neg(a));
2893
    } else if (float32_is_neg(a)) {
2894
        float_raise(float_flag_invalid, s);
2895
        return float32_default_nan;
2896
    } else if (float32_is_infinity(a)) {
2897
        return float32_zero;
2898
    }
2899

    
2900
    /* Normalize to a double-precision value between 0.25 and 1.0,
2901
     * preserving the parity of the exponent.  */
2902
    if ((val & 0x800000) == 0) {
2903
        f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
2904
                           | (0x3feULL << 52)
2905
                           | ((uint64_t)(val & 0x7fffff) << 29));
2906
    } else {
2907
        f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
2908
                           | (0x3fdULL << 52)
2909
                           | ((uint64_t)(val & 0x7fffff) << 29));
2910
    }
2911

    
2912
    result_exp = (380 - ((val & 0x7f800000) >> 23)) / 2;
2913

    
2914
    f64 = recip_sqrt_estimate(f64, env);
2915

    
2916
    val64 = float64_val(f64);
2917

    
2918
    val = ((result_exp & 0xff) << 23)
2919
        | ((val64 >> 29)  & 0x7fffff);
2920
    return make_float32(val);
2921
}
2922

    
2923
uint32_t HELPER(recpe_u32)(uint32_t a, CPUARMState *env)
2924
{
2925
    float64 f64;
2926

    
2927
    if ((a & 0x80000000) == 0) {
2928
        return 0xffffffff;
2929
    }
2930

    
2931
    f64 = make_float64((0x3feULL << 52)
2932
                       | ((int64_t)(a & 0x7fffffff) << 21));
2933

    
2934
    f64 = recip_estimate (f64, env);
2935

    
2936
    return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
2937
}
2938

    
2939
uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUARMState *env)
2940
{
2941
    float64 f64;
2942

    
2943
    if ((a & 0xc0000000) == 0) {
2944
        return 0xffffffff;
2945
    }
2946

    
2947
    if (a & 0x80000000) {
2948
        f64 = make_float64((0x3feULL << 52)
2949
                           | ((uint64_t)(a & 0x7fffffff) << 21));
2950
    } else { /* bits 31-30 == '01' */
2951
        f64 = make_float64((0x3fdULL << 52)
2952
                           | ((uint64_t)(a & 0x3fffffff) << 22));
2953
    }
2954

    
2955
    f64 = recip_sqrt_estimate(f64, env);
2956

    
2957
    return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
2958
}
2959

    
2960
/* VFPv4 fused multiply-accumulate */
2961
float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp)
2962
{
2963
    float_status *fpst = fpstp;
2964
    return float32_muladd(a, b, c, 0, fpst);
2965
}
2966

    
2967
float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
2968
{
2969
    float_status *fpst = fpstp;
2970
    return float64_muladd(a, b, c, 0, fpst);
2971
}
2972

    
2973
void HELPER(set_teecr)(CPUARMState *env, uint32_t val)
2974
{
2975
    val &= 1;
2976
    if (env->teecr != val) {
2977
        env->teecr = val;
2978
        tb_flush(env);
2979
    }
2980
}