Statistics
| Branch: | Revision:

root / target-arm / helper.c @ 601d70b9

History | View | Annotate | Download (67.7 kB)

1
#include <stdio.h>
2
#include <stdlib.h>
3
#include <string.h>
4

    
5
#include "cpu.h"
6
#include "exec-all.h"
7
#include "gdbstub.h"
8
#include "helpers.h"
9
#include "qemu-common.h"
10

    
11
static uint32_t cortexa8_cp15_c0_c1[8] =
12
{ 0x1031, 0x11, 0x400, 0, 0x31100003, 0x20000000, 0x01202000, 0x11 };
13

    
14
static uint32_t cortexa8_cp15_c0_c2[8] =
15
{ 0x00101111, 0x12112111, 0x21232031, 0x11112131, 0x00111142, 0, 0, 0 };
16

    
17
static uint32_t mpcore_cp15_c0_c1[8] =
18
{ 0x111, 0x1, 0, 0x2, 0x01100103, 0x10020302, 0x01222000, 0 };
19

    
20
static uint32_t mpcore_cp15_c0_c2[8] =
21
{ 0x00100011, 0x12002111, 0x11221011, 0x01102131, 0x141, 0, 0, 0 };
22

    
23
static uint32_t arm1136_cp15_c0_c1[8] =
24
{ 0x111, 0x1, 0x2, 0x3, 0x01130003, 0x10030302, 0x01222110, 0 };
25

    
26
static uint32_t arm1136_cp15_c0_c2[8] =
27
{ 0x00140011, 0x12002111, 0x11231111, 0x01102131, 0x141, 0, 0, 0 };
28

    
29
static uint32_t cpu_arm_find_by_name(const char *name);
30

    
31
static inline void set_feature(CPUARMState *env, int feature)
32
{
33
    env->features |= 1u << feature;
34
}
35

    
36
static void cpu_reset_model_id(CPUARMState *env, uint32_t id)
37
{
38
    env->cp15.c0_cpuid = id;
39
    switch (id) {
40
    case ARM_CPUID_ARM926:
41
        set_feature(env, ARM_FEATURE_VFP);
42
        env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090;
43
        env->cp15.c0_cachetype = 0x1dd20d2;
44
        env->cp15.c1_sys = 0x00090078;
45
        break;
46
    case ARM_CPUID_ARM946:
47
        set_feature(env, ARM_FEATURE_MPU);
48
        env->cp15.c0_cachetype = 0x0f004006;
49
        env->cp15.c1_sys = 0x00000078;
50
        break;
51
    case ARM_CPUID_ARM1026:
52
        set_feature(env, ARM_FEATURE_VFP);
53
        set_feature(env, ARM_FEATURE_AUXCR);
54
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0;
55
        env->cp15.c0_cachetype = 0x1dd20d2;
56
        env->cp15.c1_sys = 0x00090078;
57
        break;
58
    case ARM_CPUID_ARM1136_R2:
59
    case ARM_CPUID_ARM1136:
60
        set_feature(env, ARM_FEATURE_V6);
61
        set_feature(env, ARM_FEATURE_VFP);
62
        set_feature(env, ARM_FEATURE_AUXCR);
63
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
64
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
65
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
66
        memcpy(env->cp15.c0_c1, arm1136_cp15_c0_c1, 8 * sizeof(uint32_t));
67
        memcpy(env->cp15.c0_c1, arm1136_cp15_c0_c2, 8 * sizeof(uint32_t));
68
        env->cp15.c0_cachetype = 0x1dd20d2;
69
        break;
70
    case ARM_CPUID_ARM11MPCORE:
71
        set_feature(env, ARM_FEATURE_V6);
72
        set_feature(env, ARM_FEATURE_V6K);
73
        set_feature(env, ARM_FEATURE_VFP);
74
        set_feature(env, ARM_FEATURE_AUXCR);
75
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
76
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
77
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
78
        memcpy(env->cp15.c0_c1, mpcore_cp15_c0_c1, 8 * sizeof(uint32_t));
79
        memcpy(env->cp15.c0_c1, mpcore_cp15_c0_c2, 8 * sizeof(uint32_t));
80
        env->cp15.c0_cachetype = 0x1dd20d2;
81
        break;
82
    case ARM_CPUID_CORTEXA8:
83
        set_feature(env, ARM_FEATURE_V6);
84
        set_feature(env, ARM_FEATURE_V6K);
85
        set_feature(env, ARM_FEATURE_V7);
86
        set_feature(env, ARM_FEATURE_AUXCR);
87
        set_feature(env, ARM_FEATURE_THUMB2);
88
        set_feature(env, ARM_FEATURE_VFP);
89
        set_feature(env, ARM_FEATURE_VFP3);
90
        set_feature(env, ARM_FEATURE_NEON);
91
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410330c0;
92
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
93
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00011100;
94
        memcpy(env->cp15.c0_c1, cortexa8_cp15_c0_c1, 8 * sizeof(uint32_t));
95
        memcpy(env->cp15.c0_c1, cortexa8_cp15_c0_c2, 8 * sizeof(uint32_t));
96
        env->cp15.c0_cachetype = 0x1dd20d2;
97
        break;
98
    case ARM_CPUID_CORTEXM3:
99
        set_feature(env, ARM_FEATURE_V6);
100
        set_feature(env, ARM_FEATURE_THUMB2);
101
        set_feature(env, ARM_FEATURE_V7);
102
        set_feature(env, ARM_FEATURE_M);
103
        set_feature(env, ARM_FEATURE_DIV);
104
        break;
105
    case ARM_CPUID_ANY: /* For userspace emulation.  */
106
        set_feature(env, ARM_FEATURE_V6);
107
        set_feature(env, ARM_FEATURE_V6K);
108
        set_feature(env, ARM_FEATURE_V7);
109
        set_feature(env, ARM_FEATURE_THUMB2);
110
        set_feature(env, ARM_FEATURE_VFP);
111
        set_feature(env, ARM_FEATURE_VFP3);
112
        set_feature(env, ARM_FEATURE_NEON);
113
        set_feature(env, ARM_FEATURE_DIV);
114
        break;
115
    case ARM_CPUID_TI915T:
116
    case ARM_CPUID_TI925T:
117
        set_feature(env, ARM_FEATURE_OMAPCP);
118
        env->cp15.c0_cpuid = ARM_CPUID_TI925T; /* Depends on wiring.  */
119
        env->cp15.c0_cachetype = 0x5109149;
120
        env->cp15.c1_sys = 0x00000070;
121
        env->cp15.c15_i_max = 0x000;
122
        env->cp15.c15_i_min = 0xff0;
123
        break;
124
    case ARM_CPUID_PXA250:
125
    case ARM_CPUID_PXA255:
126
    case ARM_CPUID_PXA260:
127
    case ARM_CPUID_PXA261:
128
    case ARM_CPUID_PXA262:
129
        set_feature(env, ARM_FEATURE_XSCALE);
130
        /* JTAG_ID is ((id << 28) | 0x09265013) */
131
        env->cp15.c0_cachetype = 0xd172172;
132
        env->cp15.c1_sys = 0x00000078;
133
        break;
134
    case ARM_CPUID_PXA270_A0:
135
    case ARM_CPUID_PXA270_A1:
136
    case ARM_CPUID_PXA270_B0:
137
    case ARM_CPUID_PXA270_B1:
138
    case ARM_CPUID_PXA270_C0:
139
    case ARM_CPUID_PXA270_C5:
140
        set_feature(env, ARM_FEATURE_XSCALE);
141
        /* JTAG_ID is ((id << 28) | 0x09265013) */
142
        set_feature(env, ARM_FEATURE_IWMMXT);
143
        env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
144
        env->cp15.c0_cachetype = 0xd172172;
145
        env->cp15.c1_sys = 0x00000078;
146
        break;
147
    default:
148
        cpu_abort(env, "Bad CPU ID: %x\n", id);
149
        break;
150
    }
151
}
152

    
153
void cpu_reset(CPUARMState *env)
154
{
155
    uint32_t id;
156
    id = env->cp15.c0_cpuid;
157
    memset(env, 0, offsetof(CPUARMState, breakpoints));
158
    if (id)
159
        cpu_reset_model_id(env, id);
160
#if defined (CONFIG_USER_ONLY)
161
    env->uncached_cpsr = ARM_CPU_MODE_USR;
162
    env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
163
#else
164
    /* SVC mode with interrupts disabled.  */
165
    env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
166
    /* On ARMv7-M the CPSR_I is the value of the PRIMASK register, and is
167
       clear at reset.  */
168
    if (IS_M(env))
169
        env->uncached_cpsr &= ~CPSR_I;
170
    env->vfp.xregs[ARM_VFP_FPEXC] = 0;
171
#endif
172
    env->regs[15] = 0;
173
    tlb_flush(env, 1);
174
}
175

    
176
CPUARMState *cpu_arm_init(const char *cpu_model)
177
{
178
    CPUARMState *env;
179
    uint32_t id;
180
    static int inited = 0;
181

    
182
    id = cpu_arm_find_by_name(cpu_model);
183
    if (id == 0)
184
        return NULL;
185
    env = qemu_mallocz(sizeof(CPUARMState));
186
    if (!env)
187
        return NULL;
188
    cpu_exec_init(env);
189
    if (!inited) {
190
        inited = 1;
191
        arm_translate_init();
192
    }
193

    
194
    env->cpu_model_str = cpu_model;
195
    env->cp15.c0_cpuid = id;
196
    cpu_reset(env);
197
    return env;
198
}
199

    
200
struct arm_cpu_t {
201
    uint32_t id;
202
    const char *name;
203
};
204

    
205
static const struct arm_cpu_t arm_cpu_names[] = {
206
    { ARM_CPUID_ARM926, "arm926"},
207
    { ARM_CPUID_ARM946, "arm946"},
208
    { ARM_CPUID_ARM1026, "arm1026"},
209
    { ARM_CPUID_ARM1136, "arm1136"},
210
    { ARM_CPUID_ARM1136_R2, "arm1136-r2"},
211
    { ARM_CPUID_ARM11MPCORE, "arm11mpcore"},
212
    { ARM_CPUID_CORTEXM3, "cortex-m3"},
213
    { ARM_CPUID_CORTEXA8, "cortex-a8"},
214
    { ARM_CPUID_TI925T, "ti925t" },
215
    { ARM_CPUID_PXA250, "pxa250" },
216
    { ARM_CPUID_PXA255, "pxa255" },
217
    { ARM_CPUID_PXA260, "pxa260" },
218
    { ARM_CPUID_PXA261, "pxa261" },
219
    { ARM_CPUID_PXA262, "pxa262" },
220
    { ARM_CPUID_PXA270, "pxa270" },
221
    { ARM_CPUID_PXA270_A0, "pxa270-a0" },
222
    { ARM_CPUID_PXA270_A1, "pxa270-a1" },
223
    { ARM_CPUID_PXA270_B0, "pxa270-b0" },
224
    { ARM_CPUID_PXA270_B1, "pxa270-b1" },
225
    { ARM_CPUID_PXA270_C0, "pxa270-c0" },
226
    { ARM_CPUID_PXA270_C5, "pxa270-c5" },
227
    { ARM_CPUID_ANY, "any"},
228
    { 0, NULL}
229
};
230

    
231
void arm_cpu_list(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
232
{
233
    int i;
234

    
235
    (*cpu_fprintf)(f, "Available CPUs:\n");
236
    for (i = 0; arm_cpu_names[i].name; i++) {
237
        (*cpu_fprintf)(f, "  %s\n", arm_cpu_names[i].name);
238
    }
239
}
240

    
241
/* return 0 if not found */
242
static uint32_t cpu_arm_find_by_name(const char *name)
243
{
244
    int i;
245
    uint32_t id;
246

    
247
    id = 0;
248
    for (i = 0; arm_cpu_names[i].name; i++) {
249
        if (strcmp(name, arm_cpu_names[i].name) == 0) {
250
            id = arm_cpu_names[i].id;
251
            break;
252
        }
253
    }
254
    return id;
255
}
256

    
257
void cpu_arm_close(CPUARMState *env)
258
{
259
    free(env);
260
}
261

    
262
uint32_t cpsr_read(CPUARMState *env)
263
{
264
    int ZF;
265
    ZF = (env->ZF == 0);
266
    return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
267
        (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
268
        | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
269
        | ((env->condexec_bits & 0xfc) << 8)
270
        | (env->GE << 16);
271
}
272

    
273
void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
274
{
275
    if (mask & CPSR_NZCV) {
276
        env->ZF = (~val) & CPSR_Z;
277
        env->NF = val;
278
        env->CF = (val >> 29) & 1;
279
        env->VF = (val << 3) & 0x80000000;
280
    }
281
    if (mask & CPSR_Q)
282
        env->QF = ((val & CPSR_Q) != 0);
283
    if (mask & CPSR_T)
284
        env->thumb = ((val & CPSR_T) != 0);
285
    if (mask & CPSR_IT_0_1) {
286
        env->condexec_bits &= ~3;
287
        env->condexec_bits |= (val >> 25) & 3;
288
    }
289
    if (mask & CPSR_IT_2_7) {
290
        env->condexec_bits &= 3;
291
        env->condexec_bits |= (val >> 8) & 0xfc;
292
    }
293
    if (mask & CPSR_GE) {
294
        env->GE = (val >> 16) & 0xf;
295
    }
296

    
297
    if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
298
        switch_mode(env, val & CPSR_M);
299
    }
300
    mask &= ~CACHED_CPSR_BITS;
301
    env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
302
}
303

    
304
/* Sign/zero extend */
305
uint32_t HELPER(sxtb16)(uint32_t x)
306
{
307
    uint32_t res;
308
    res = (uint16_t)(int8_t)x;
309
    res |= (uint32_t)(int8_t)(x >> 16) << 16;
310
    return res;
311
}
312

    
313
uint32_t HELPER(uxtb16)(uint32_t x)
314
{
315
    uint32_t res;
316
    res = (uint16_t)(uint8_t)x;
317
    res |= (uint32_t)(uint8_t)(x >> 16) << 16;
318
    return res;
319
}
320

    
321
uint32_t HELPER(clz)(uint32_t x)
322
{
323
    int count;
324
    for (count = 32; x; count--)
325
        x >>= 1;
326
    return count;
327
}
328

    
329
int32_t HELPER(sdiv)(int32_t num, int32_t den)
330
{
331
    if (den == 0)
332
      return 0;
333
    return num / den;
334
}
335

    
336
uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
337
{
338
    if (den == 0)
339
      return 0;
340
    return num / den;
341
}
342

    
343
uint32_t HELPER(rbit)(uint32_t x)
344
{
345
    x =  ((x & 0xff000000) >> 24)
346
       | ((x & 0x00ff0000) >> 8)
347
       | ((x & 0x0000ff00) << 8)
348
       | ((x & 0x000000ff) << 24);
349
    x =  ((x & 0xf0f0f0f0) >> 4)
350
       | ((x & 0x0f0f0f0f) << 4);
351
    x =  ((x & 0x88888888) >> 3)
352
       | ((x & 0x44444444) >> 1)
353
       | ((x & 0x22222222) << 1)
354
       | ((x & 0x11111111) << 3);
355
    return x;
356
}
357

    
358
uint32_t HELPER(abs)(uint32_t x)
359
{
360
    return ((int32_t)x < 0) ? -x : x;
361
}
362

    
363
#if defined(CONFIG_USER_ONLY)
364

    
365
void do_interrupt (CPUState *env)
366
{
367
    env->exception_index = -1;
368
}
369

    
370
/* Structure used to record exclusive memory locations.  */
371
typedef struct mmon_state {
372
    struct mmon_state *next;
373
    CPUARMState *cpu_env;
374
    uint32_t addr;
375
} mmon_state;
376

    
377
/* Chain of current locks.  */
378
static mmon_state* mmon_head = NULL;
379

    
380
int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
381
                              int mmu_idx, int is_softmmu)
382
{
383
    if (rw == 2) {
384
        env->exception_index = EXCP_PREFETCH_ABORT;
385
        env->cp15.c6_insn = address;
386
    } else {
387
        env->exception_index = EXCP_DATA_ABORT;
388
        env->cp15.c6_data = address;
389
    }
390
    return 1;
391
}
392

    
393
static void allocate_mmon_state(CPUState *env)
394
{
395
    env->mmon_entry = malloc(sizeof (mmon_state));
396
    if (!env->mmon_entry)
397
        abort();
398
    memset (env->mmon_entry, 0, sizeof (mmon_state));
399
    env->mmon_entry->cpu_env = env;
400
    mmon_head = env->mmon_entry;
401
}
402

    
403
/* Flush any monitor locks for the specified address.  */
404
static void flush_mmon(uint32_t addr)
405
{
406
    mmon_state *mon;
407

    
408
    for (mon = mmon_head; mon; mon = mon->next)
409
      {
410
        if (mon->addr != addr)
411
          continue;
412

    
413
        mon->addr = 0;
414
        break;
415
      }
416
}
417

    
418
/* Mark an address for exclusive access.  */
419
void HELPER(mark_exclusive)(CPUState *env, uint32_t addr)
420
{
421
    if (!env->mmon_entry)
422
        allocate_mmon_state(env);
423
    /* Clear any previous locks.  */
424
    flush_mmon(addr);
425
    env->mmon_entry->addr = addr;
426
}
427

    
428
/* Test if an exclusive address is still exclusive.  Returns zero
429
   if the address is still exclusive.   */
430
uint32_t HELPER(test_exclusive)(CPUState *env, uint32_t addr)
431
{
432
    int res;
433

    
434
    if (!env->mmon_entry)
435
        return 1;
436
    if (env->mmon_entry->addr == addr)
437
        res = 0;
438
    else
439
        res = 1;
440
    flush_mmon(addr);
441
    return res;
442
}
443

    
444
void HELPER(clrex)(CPUState *env)
445
{
446
    if (!(env->mmon_entry && env->mmon_entry->addr))
447
        return;
448
    flush_mmon(env->mmon_entry->addr);
449
}
450

    
451
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
452
{
453
    return addr;
454
}
455

    
456
/* These should probably raise undefined insn exceptions.  */
457
void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val)
458
{
459
    int op1 = (insn >> 8) & 0xf;
460
    cpu_abort(env, "cp%i insn %08x\n", op1, insn);
461
    return;
462
}
463

    
464
uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn)
465
{
466
    int op1 = (insn >> 8) & 0xf;
467
    cpu_abort(env, "cp%i insn %08x\n", op1, insn);
468
    return 0;
469
}
470

    
471
void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
472
{
473
    cpu_abort(env, "cp15 insn %08x\n", insn);
474
}
475

    
476
uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
477
{
478
    cpu_abort(env, "cp15 insn %08x\n", insn);
479
    return 0;
480
}
481

    
482
/* These should probably raise undefined insn exceptions.  */
483
void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val)
484
{
485
    cpu_abort(env, "v7m_mrs %d\n", reg);
486
}
487

    
488
uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg)
489
{
490
    cpu_abort(env, "v7m_mrs %d\n", reg);
491
    return 0;
492
}
493

    
494
void switch_mode(CPUState *env, int mode)
495
{
496
    if (mode != ARM_CPU_MODE_USR)
497
        cpu_abort(env, "Tried to switch out of user mode\n");
498
}
499

    
500
void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
501
{
502
    cpu_abort(env, "banked r13 write\n");
503
}
504

    
505
uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
506
{
507
    cpu_abort(env, "banked r13 read\n");
508
    return 0;
509
}
510

    
511
#else
512

    
513
extern int semihosting_enabled;
514

    
515
/* Map CPU modes onto saved register banks.  */
516
static inline int bank_number (int mode)
517
{
518
    switch (mode) {
519
    case ARM_CPU_MODE_USR:
520
    case ARM_CPU_MODE_SYS:
521
        return 0;
522
    case ARM_CPU_MODE_SVC:
523
        return 1;
524
    case ARM_CPU_MODE_ABT:
525
        return 2;
526
    case ARM_CPU_MODE_UND:
527
        return 3;
528
    case ARM_CPU_MODE_IRQ:
529
        return 4;
530
    case ARM_CPU_MODE_FIQ:
531
        return 5;
532
    }
533
    cpu_abort(cpu_single_env, "Bad mode %x\n", mode);
534
    return -1;
535
}
536

    
537
void switch_mode(CPUState *env, int mode)
538
{
539
    int old_mode;
540
    int i;
541

    
542
    old_mode = env->uncached_cpsr & CPSR_M;
543
    if (mode == old_mode)
544
        return;
545

    
546
    if (old_mode == ARM_CPU_MODE_FIQ) {
547
        memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
548
        memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
549
    } else if (mode == ARM_CPU_MODE_FIQ) {
550
        memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
551
        memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
552
    }
553

    
554
    i = bank_number(old_mode);
555
    env->banked_r13[i] = env->regs[13];
556
    env->banked_r14[i] = env->regs[14];
557
    env->banked_spsr[i] = env->spsr;
558

    
559
    i = bank_number(mode);
560
    env->regs[13] = env->banked_r13[i];
561
    env->regs[14] = env->banked_r14[i];
562
    env->spsr = env->banked_spsr[i];
563
}
564

    
565
static void v7m_push(CPUARMState *env, uint32_t val)
566
{
567
    env->regs[13] -= 4;
568
    stl_phys(env->regs[13], val);
569
}
570

    
571
static uint32_t v7m_pop(CPUARMState *env)
572
{
573
    uint32_t val;
574
    val = ldl_phys(env->regs[13]);
575
    env->regs[13] += 4;
576
    return val;
577
}
578

    
579
/* Switch to V7M main or process stack pointer.  */
580
static void switch_v7m_sp(CPUARMState *env, int process)
581
{
582
    uint32_t tmp;
583
    if (env->v7m.current_sp != process) {
584
        tmp = env->v7m.other_sp;
585
        env->v7m.other_sp = env->regs[13];
586
        env->regs[13] = tmp;
587
        env->v7m.current_sp = process;
588
    }
589
}
590

    
591
static void do_v7m_exception_exit(CPUARMState *env)
592
{
593
    uint32_t type;
594
    uint32_t xpsr;
595

    
596
    type = env->regs[15];
597
    if (env->v7m.exception != 0)
598
        armv7m_nvic_complete_irq(env->v7m.nvic, env->v7m.exception);
599

    
600
    /* Switch to the target stack.  */
601
    switch_v7m_sp(env, (type & 4) != 0);
602
    /* Pop registers.  */
603
    env->regs[0] = v7m_pop(env);
604
    env->regs[1] = v7m_pop(env);
605
    env->regs[2] = v7m_pop(env);
606
    env->regs[3] = v7m_pop(env);
607
    env->regs[12] = v7m_pop(env);
608
    env->regs[14] = v7m_pop(env);
609
    env->regs[15] = v7m_pop(env);
610
    xpsr = v7m_pop(env);
611
    xpsr_write(env, xpsr, 0xfffffdff);
612
    /* Undo stack alignment.  */
613
    if (xpsr & 0x200)
614
        env->regs[13] |= 4;
615
    /* ??? The exception return type specifies Thread/Handler mode.  However
616
       this is also implied by the xPSR value. Not sure what to do
617
       if there is a mismatch.  */
618
    /* ??? Likewise for mismatches between the CONTROL register and the stack
619
       pointer.  */
620
}
621

    
622
void do_interrupt_v7m(CPUARMState *env)
623
{
624
    uint32_t xpsr = xpsr_read(env);
625
    uint32_t lr;
626
    uint32_t addr;
627

    
628
    lr = 0xfffffff1;
629
    if (env->v7m.current_sp)
630
        lr |= 4;
631
    if (env->v7m.exception == 0)
632
        lr |= 8;
633

    
634
    /* For exceptions we just mark as pending on the NVIC, and let that
635
       handle it.  */
636
    /* TODO: Need to escalate if the current priority is higher than the
637
       one we're raising.  */
638
    switch (env->exception_index) {
639
    case EXCP_UDEF:
640
        armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_USAGE);
641
        return;
642
    case EXCP_SWI:
643
        env->regs[15] += 2;
644
        armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_SVC);
645
        return;
646
    case EXCP_PREFETCH_ABORT:
647
    case EXCP_DATA_ABORT:
648
        armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_MEM);
649
        return;
650
    case EXCP_BKPT:
651
        if (semihosting_enabled) {
652
            int nr;
653
            nr = lduw_code(env->regs[15]) & 0xff;
654
            if (nr == 0xab) {
655
                env->regs[15] += 2;
656
                env->regs[0] = do_arm_semihosting(env);
657
                return;
658
            }
659
        }
660
        armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_DEBUG);
661
        return;
662
    case EXCP_IRQ:
663
        env->v7m.exception = armv7m_nvic_acknowledge_irq(env->v7m.nvic);
664
        break;
665
    case EXCP_EXCEPTION_EXIT:
666
        do_v7m_exception_exit(env);
667
        return;
668
    default:
669
        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
670
        return; /* Never happens.  Keep compiler happy.  */
671
    }
672

    
673
    /* Align stack pointer.  */
674
    /* ??? Should only do this if Configuration Control Register
675
       STACKALIGN bit is set.  */
676
    if (env->regs[13] & 4) {
677
        env->regs[13] += 4;
678
        xpsr |= 0x200;
679
    }
680
    /* Switch to the handler mode.  */
681
    v7m_push(env, xpsr);
682
    v7m_push(env, env->regs[15]);
683
    v7m_push(env, env->regs[14]);
684
    v7m_push(env, env->regs[12]);
685
    v7m_push(env, env->regs[3]);
686
    v7m_push(env, env->regs[2]);
687
    v7m_push(env, env->regs[1]);
688
    v7m_push(env, env->regs[0]);
689
    switch_v7m_sp(env, 0);
690
    env->uncached_cpsr &= ~CPSR_IT;
691
    env->regs[14] = lr;
692
    addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4);
693
    env->regs[15] = addr & 0xfffffffe;
694
    env->thumb = addr & 1;
695
}
696

    
697
/* Handle a CPU exception.  */
698
void do_interrupt(CPUARMState *env)
699
{
700
    uint32_t addr;
701
    uint32_t mask;
702
    int new_mode;
703
    uint32_t offset;
704

    
705
    if (IS_M(env)) {
706
        do_interrupt_v7m(env);
707
        return;
708
    }
709
    /* TODO: Vectored interrupt controller.  */
710
    switch (env->exception_index) {
711
    case EXCP_UDEF:
712
        new_mode = ARM_CPU_MODE_UND;
713
        addr = 0x04;
714
        mask = CPSR_I;
715
        if (env->thumb)
716
            offset = 2;
717
        else
718
            offset = 4;
719
        break;
720
    case EXCP_SWI:
721
        if (semihosting_enabled) {
722
            /* Check for semihosting interrupt.  */
723
            if (env->thumb) {
724
                mask = lduw_code(env->regs[15] - 2) & 0xff;
725
            } else {
726
                mask = ldl_code(env->regs[15] - 4) & 0xffffff;
727
            }
728
            /* Only intercept calls from privileged modes, to provide some
729
               semblance of security.  */
730
            if (((mask == 0x123456 && !env->thumb)
731
                    || (mask == 0xab && env->thumb))
732
                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
733
                env->regs[0] = do_arm_semihosting(env);
734
                return;
735
            }
736
        }
737
        new_mode = ARM_CPU_MODE_SVC;
738
        addr = 0x08;
739
        mask = CPSR_I;
740
        /* The PC already points to the next instruction.  */
741
        offset = 0;
742
        break;
743
    case EXCP_BKPT:
744
        /* See if this is a semihosting syscall.  */
745
        if (env->thumb && semihosting_enabled) {
746
            mask = lduw_code(env->regs[15]) & 0xff;
747
            if (mask == 0xab
748
                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
749
                env->regs[15] += 2;
750
                env->regs[0] = do_arm_semihosting(env);
751
                return;
752
            }
753
        }
754
        /* Fall through to prefetch abort.  */
755
    case EXCP_PREFETCH_ABORT:
756
        new_mode = ARM_CPU_MODE_ABT;
757
        addr = 0x0c;
758
        mask = CPSR_A | CPSR_I;
759
        offset = 4;
760
        break;
761
    case EXCP_DATA_ABORT:
762
        new_mode = ARM_CPU_MODE_ABT;
763
        addr = 0x10;
764
        mask = CPSR_A | CPSR_I;
765
        offset = 8;
766
        break;
767
    case EXCP_IRQ:
768
        new_mode = ARM_CPU_MODE_IRQ;
769
        addr = 0x18;
770
        /* Disable IRQ and imprecise data aborts.  */
771
        mask = CPSR_A | CPSR_I;
772
        offset = 4;
773
        break;
774
    case EXCP_FIQ:
775
        new_mode = ARM_CPU_MODE_FIQ;
776
        addr = 0x1c;
777
        /* Disable FIQ, IRQ and imprecise data aborts.  */
778
        mask = CPSR_A | CPSR_I | CPSR_F;
779
        offset = 4;
780
        break;
781
    default:
782
        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
783
        return; /* Never happens.  Keep compiler happy.  */
784
    }
785
    /* High vectors.  */
786
    if (env->cp15.c1_sys & (1 << 13)) {
787
        addr += 0xffff0000;
788
    }
789
    switch_mode (env, new_mode);
790
    env->spsr = cpsr_read(env);
791
    /* Clear IT bits.  */
792
    env->condexec_bits = 0;
793
    /* Switch to the new mode, and switch to Arm mode.  */
794
    /* ??? Thumb interrupt handlers not implemented.  */
795
    env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
796
    env->uncached_cpsr |= mask;
797
    env->thumb = 0;
798
    env->regs[14] = env->regs[15] + offset;
799
    env->regs[15] = addr;
800
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
801
}
802

    
803
/* Check section/page access permissions.
804
   Returns the page protection flags, or zero if the access is not
805
   permitted.  */
806
static inline int check_ap(CPUState *env, int ap, int domain, int access_type,
807
                           int is_user)
808
{
809
  int prot_ro;
810

    
811
  if (domain == 3)
812
    return PAGE_READ | PAGE_WRITE;
813

    
814
  if (access_type == 1)
815
      prot_ro = 0;
816
  else
817
      prot_ro = PAGE_READ;
818

    
819
  switch (ap) {
820
  case 0:
821
      if (access_type == 1)
822
          return 0;
823
      switch ((env->cp15.c1_sys >> 8) & 3) {
824
      case 1:
825
          return is_user ? 0 : PAGE_READ;
826
      case 2:
827
          return PAGE_READ;
828
      default:
829
          return 0;
830
      }
831
  case 1:
832
      return is_user ? 0 : PAGE_READ | PAGE_WRITE;
833
  case 2:
834
      if (is_user)
835
          return prot_ro;
836
      else
837
          return PAGE_READ | PAGE_WRITE;
838
  case 3:
839
      return PAGE_READ | PAGE_WRITE;
840
  case 4: case 7: /* Reserved.  */
841
      return 0;
842
  case 5:
843
      return is_user ? 0 : prot_ro;
844
  case 6:
845
      return prot_ro;
846
  default:
847
      abort();
848
  }
849
}
850

    
851
static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type,
852
                            int is_user, uint32_t *phys_ptr, int *prot)
853
{
854
    int code;
855
    uint32_t table;
856
    uint32_t desc;
857
    int type;
858
    int ap;
859
    int domain;
860
    uint32_t phys_addr;
861

    
862
    /* Pagetable walk.  */
863
    /* Lookup l1 descriptor.  */
864
    if (address & env->cp15.c2_mask)
865
        table = env->cp15.c2_base1;
866
    else
867
        table = env->cp15.c2_base0;
868
    table = (table & 0xffffc000) | ((address >> 18) & 0x3ffc);
869
    desc = ldl_phys(table);
870
    type = (desc & 3);
871
    domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3;
872
    if (type == 0) {
873
        /* Section translation fault.  */
874
        code = 5;
875
        goto do_fault;
876
    }
877
    if (domain == 0 || domain == 2) {
878
        if (type == 2)
879
            code = 9; /* Section domain fault.  */
880
        else
881
            code = 11; /* Page domain fault.  */
882
        goto do_fault;
883
    }
884
    if (type == 2) {
885
        /* 1Mb section.  */
886
        phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
887
        ap = (desc >> 10) & 3;
888
        code = 13;
889
    } else {
890
        /* Lookup l2 entry.  */
891
        if (type == 1) {
892
            /* Coarse pagetable.  */
893
            table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
894
        } else {
895
            /* Fine pagetable.  */
896
            table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
897
        }
898
        desc = ldl_phys(table);
899
        switch (desc & 3) {
900
        case 0: /* Page translation fault.  */
901
            code = 7;
902
            goto do_fault;
903
        case 1: /* 64k page.  */
904
            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
905
            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
906
            break;
907
        case 2: /* 4k page.  */
908
            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
909
            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
910
            break;
911
        case 3: /* 1k page.  */
912
            if (type == 1) {
913
                if (arm_feature(env, ARM_FEATURE_XSCALE)) {
914
                    phys_addr = (desc & 0xfffff000) | (address & 0xfff);
915
                } else {
916
                    /* Page translation fault.  */
917
                    code = 7;
918
                    goto do_fault;
919
                }
920
            } else {
921
                phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
922
            }
923
            ap = (desc >> 4) & 3;
924
            break;
925
        default:
926
            /* Never happens, but compiler isn't smart enough to tell.  */
927
            abort();
928
        }
929
        code = 15;
930
    }
931
    *prot = check_ap(env, ap, domain, access_type, is_user);
932
    if (!*prot) {
933
        /* Access permission fault.  */
934
        goto do_fault;
935
    }
936
    *phys_ptr = phys_addr;
937
    return 0;
938
do_fault:
939
    return code | (domain << 4);
940
}
941

    
942
static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type,
943
                            int is_user, uint32_t *phys_ptr, int *prot)
944
{
945
    int code;
946
    uint32_t table;
947
    uint32_t desc;
948
    uint32_t xn;
949
    int type;
950
    int ap;
951
    int domain;
952
    uint32_t phys_addr;
953

    
954
    /* Pagetable walk.  */
955
    /* Lookup l1 descriptor.  */
956
    if (address & env->cp15.c2_mask)
957
        table = env->cp15.c2_base1;
958
    else
959
        table = env->cp15.c2_base0;
960
    table = (table & 0xffffc000) | ((address >> 18) & 0x3ffc);
961
    desc = ldl_phys(table);
962
    type = (desc & 3);
963
    if (type == 0) {
964
        /* Section translation fault.  */
965
        code = 5;
966
        domain = 0;
967
        goto do_fault;
968
    } else if (type == 2 && (desc & (1 << 18))) {
969
        /* Supersection.  */
970
        domain = 0;
971
    } else {
972
        /* Section or page.  */
973
        domain = (desc >> 4) & 0x1e;
974
    }
975
    domain = (env->cp15.c3 >> domain) & 3;
976
    if (domain == 0 || domain == 2) {
977
        if (type == 2)
978
            code = 9; /* Section domain fault.  */
979
        else
980
            code = 11; /* Page domain fault.  */
981
        goto do_fault;
982
    }
983
    if (type == 2) {
984
        if (desc & (1 << 18)) {
985
            /* Supersection.  */
986
            phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
987
        } else {
988
            /* Section.  */
989
            phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
990
        }
991
        ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
992
        xn = desc & (1 << 4);
993
        code = 13;
994
    } else {
995
        /* Lookup l2 entry.  */
996
        table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
997
        desc = ldl_phys(table);
998
        ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
999
        switch (desc & 3) {
1000
        case 0: /* Page translation fault.  */
1001
            code = 7;
1002
            goto do_fault;
1003
        case 1: /* 64k page.  */
1004
            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1005
            xn = desc & (1 << 15);
1006
            break;
1007
        case 2: case 3: /* 4k page.  */
1008
            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1009
            xn = desc & 1;
1010
            break;
1011
        default:
1012
            /* Never happens, but compiler isn't smart enough to tell.  */
1013
            abort();
1014
        }
1015
        code = 15;
1016
    }
1017
    if (xn && access_type == 2)
1018
        goto do_fault;
1019

    
1020
    *prot = check_ap(env, ap, domain, access_type, is_user);
1021
    if (!*prot) {
1022
        /* Access permission fault.  */
1023
        goto do_fault;
1024
    }
1025
    *phys_ptr = phys_addr;
1026
    return 0;
1027
do_fault:
1028
    return code | (domain << 4);
1029
}
1030

    
1031
static int get_phys_addr_mpu(CPUState *env, uint32_t address, int access_type,
1032
                             int is_user, uint32_t *phys_ptr, int *prot)
1033
{
1034
    int n;
1035
    uint32_t mask;
1036
    uint32_t base;
1037

    
1038
    *phys_ptr = address;
1039
    for (n = 7; n >= 0; n--) {
1040
        base = env->cp15.c6_region[n];
1041
        if ((base & 1) == 0)
1042
            continue;
1043
        mask = 1 << ((base >> 1) & 0x1f);
1044
        /* Keep this shift separate from the above to avoid an
1045
           (undefined) << 32.  */
1046
        mask = (mask << 1) - 1;
1047
        if (((base ^ address) & ~mask) == 0)
1048
            break;
1049
    }
1050
    if (n < 0)
1051
        return 2;
1052

    
1053
    if (access_type == 2) {
1054
        mask = env->cp15.c5_insn;
1055
    } else {
1056
        mask = env->cp15.c5_data;
1057
    }
1058
    mask = (mask >> (n * 4)) & 0xf;
1059
    switch (mask) {
1060
    case 0:
1061
        return 1;
1062
    case 1:
1063
        if (is_user)
1064
          return 1;
1065
        *prot = PAGE_READ | PAGE_WRITE;
1066
        break;
1067
    case 2:
1068
        *prot = PAGE_READ;
1069
        if (!is_user)
1070
            *prot |= PAGE_WRITE;
1071
        break;
1072
    case 3:
1073
        *prot = PAGE_READ | PAGE_WRITE;
1074
        break;
1075
    case 5:
1076
        if (is_user)
1077
            return 1;
1078
        *prot = PAGE_READ;
1079
        break;
1080
    case 6:
1081
        *prot = PAGE_READ;
1082
        break;
1083
    default:
1084
        /* Bad permission.  */
1085
        return 1;
1086
    }
1087
    return 0;
1088
}
1089

    
1090
static inline int get_phys_addr(CPUState *env, uint32_t address,
1091
                                int access_type, int is_user,
1092
                                uint32_t *phys_ptr, int *prot)
1093
{
1094
    /* Fast Context Switch Extension.  */
1095
    if (address < 0x02000000)
1096
        address += env->cp15.c13_fcse;
1097

    
1098
    if ((env->cp15.c1_sys & 1) == 0) {
1099
        /* MMU/MPU disabled.  */
1100
        *phys_ptr = address;
1101
        *prot = PAGE_READ | PAGE_WRITE;
1102
        return 0;
1103
    } else if (arm_feature(env, ARM_FEATURE_MPU)) {
1104
        return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
1105
                                 prot);
1106
    } else if (env->cp15.c1_sys & (1 << 23)) {
1107
        return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
1108
                                prot);
1109
    } else {
1110
        return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
1111
                                prot);
1112
    }
1113
}
1114

    
1115
int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
1116
                              int access_type, int mmu_idx, int is_softmmu)
1117
{
1118
    uint32_t phys_addr;
1119
    int prot;
1120
    int ret, is_user;
1121

    
1122
    is_user = mmu_idx == MMU_USER_IDX;
1123
    ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot);
1124
    if (ret == 0) {
1125
        /* Map a single [sub]page.  */
1126
        phys_addr &= ~(uint32_t)0x3ff;
1127
        address &= ~(uint32_t)0x3ff;
1128
        return tlb_set_page (env, address, phys_addr, prot, mmu_idx,
1129
                             is_softmmu);
1130
    }
1131

    
1132
    if (access_type == 2) {
1133
        env->cp15.c5_insn = ret;
1134
        env->cp15.c6_insn = address;
1135
        env->exception_index = EXCP_PREFETCH_ABORT;
1136
    } else {
1137
        env->cp15.c5_data = ret;
1138
        if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6))
1139
            env->cp15.c5_data |= (1 << 11);
1140
        env->cp15.c6_data = address;
1141
        env->exception_index = EXCP_DATA_ABORT;
1142
    }
1143
    return 1;
1144
}
1145

    
1146
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1147
{
1148
    uint32_t phys_addr;
1149
    int prot;
1150
    int ret;
1151

    
1152
    ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot);
1153

    
1154
    if (ret != 0)
1155
        return -1;
1156

    
1157
    return phys_addr;
1158
}
1159

    
1160
/* Not really implemented.  Need to figure out a sane way of doing this.
1161
   Maybe add generic watchpoint support and use that.  */
1162

    
1163
void HELPER(mark_exclusive)(CPUState *env, uint32_t addr)
1164
{
1165
    env->mmon_addr = addr;
1166
}
1167

    
1168
uint32_t HELPER(test_exclusive)(CPUState *env, uint32_t addr)
1169
{
1170
    return (env->mmon_addr != addr);
1171
}
1172

    
1173
void HELPER(clrex)(CPUState *env)
1174
{
1175
    env->mmon_addr = -1;
1176
}
1177

    
1178
void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val)
1179
{
1180
    int cp_num = (insn >> 8) & 0xf;
1181
    int cp_info = (insn >> 5) & 7;
1182
    int src = (insn >> 16) & 0xf;
1183
    int operand = insn & 0xf;
1184

    
1185
    if (env->cp[cp_num].cp_write)
1186
        env->cp[cp_num].cp_write(env->cp[cp_num].opaque,
1187
                                 cp_info, src, operand, val);
1188
}
1189

    
1190
uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn)
1191
{
1192
    int cp_num = (insn >> 8) & 0xf;
1193
    int cp_info = (insn >> 5) & 7;
1194
    int dest = (insn >> 16) & 0xf;
1195
    int operand = insn & 0xf;
1196

    
1197
    if (env->cp[cp_num].cp_read)
1198
        return env->cp[cp_num].cp_read(env->cp[cp_num].opaque,
1199
                                       cp_info, dest, operand);
1200
    return 0;
1201
}
1202

    
1203
/* Return basic MPU access permission bits.  */
1204
static uint32_t simple_mpu_ap_bits(uint32_t val)
1205
{
1206
    uint32_t ret;
1207
    uint32_t mask;
1208
    int i;
1209
    ret = 0;
1210
    mask = 3;
1211
    for (i = 0; i < 16; i += 2) {
1212
        ret |= (val >> i) & mask;
1213
        mask <<= 2;
1214
    }
1215
    return ret;
1216
}
1217

    
1218
/* Pad basic MPU access permission bits to extended format.  */
1219
static uint32_t extended_mpu_ap_bits(uint32_t val)
1220
{
1221
    uint32_t ret;
1222
    uint32_t mask;
1223
    int i;
1224
    ret = 0;
1225
    mask = 3;
1226
    for (i = 0; i < 16; i += 2) {
1227
        ret |= (val & mask) << i;
1228
        mask <<= 2;
1229
    }
1230
    return ret;
1231
}
1232

    
1233
void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
1234
{
1235
    int op1;
1236
    int op2;
1237
    int crm;
1238

    
1239
    op1 = (insn >> 21) & 7;
1240
    op2 = (insn >> 5) & 7;
1241
    crm = insn & 0xf;
1242
    switch ((insn >> 16) & 0xf) {
1243
    case 0:
1244
        if (((insn >> 21) & 7) == 2) {
1245
            /* ??? Select cache level.  Ignore.  */
1246
            return;
1247
        }
1248
        /* ID codes.  */
1249
        if (arm_feature(env, ARM_FEATURE_XSCALE))
1250
            break;
1251
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1252
            break;
1253
        goto bad_reg;
1254
    case 1: /* System configuration.  */
1255
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1256
            op2 = 0;
1257
        switch (op2) {
1258
        case 0:
1259
            if (!arm_feature(env, ARM_FEATURE_XSCALE) || crm == 0)
1260
                env->cp15.c1_sys = val;
1261
            /* ??? Lots of these bits are not implemented.  */
1262
            /* This may enable/disable the MMU, so do a TLB flush.  */
1263
            tlb_flush(env, 1);
1264
            break;
1265
        case 1: /* Auxiliary cotrol register.  */
1266
            if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1267
                env->cp15.c1_xscaleauxcr = val;
1268
                break;
1269
            }
1270
            /* Not implemented.  */
1271
            break;
1272
        case 2:
1273
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1274
                goto bad_reg;
1275
            env->cp15.c1_coproc = val;
1276
            /* ??? Is this safe when called from within a TB?  */
1277
            tb_flush(env);
1278
            break;
1279
        default:
1280
            goto bad_reg;
1281
        }
1282
        break;
1283
    case 2: /* MMU Page table control / MPU cache control.  */
1284
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1285
            switch (op2) {
1286
            case 0:
1287
                env->cp15.c2_data = val;
1288
                break;
1289
            case 1:
1290
                env->cp15.c2_insn = val;
1291
                break;
1292
            default:
1293
                goto bad_reg;
1294
            }
1295
        } else {
1296
            switch (op2) {
1297
            case 0:
1298
                env->cp15.c2_base0 = val;
1299
                break;
1300
            case 1:
1301
                env->cp15.c2_base1 = val;
1302
                break;
1303
            case 2:
1304
                env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> val);
1305
                break;
1306
            default:
1307
                goto bad_reg;
1308
            }
1309
        }
1310
        break;
1311
    case 3: /* MMU Domain access control / MPU write buffer control.  */
1312
        env->cp15.c3 = val;
1313
        tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */
1314
        break;
1315
    case 4: /* Reserved.  */
1316
        goto bad_reg;
1317
    case 5: /* MMU Fault status / MPU access permission.  */
1318
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1319
            op2 = 0;
1320
        switch (op2) {
1321
        case 0:
1322
            if (arm_feature(env, ARM_FEATURE_MPU))
1323
                val = extended_mpu_ap_bits(val);
1324
            env->cp15.c5_data = val;
1325
            break;
1326
        case 1:
1327
            if (arm_feature(env, ARM_FEATURE_MPU))
1328
                val = extended_mpu_ap_bits(val);
1329
            env->cp15.c5_insn = val;
1330
            break;
1331
        case 2:
1332
            if (!arm_feature(env, ARM_FEATURE_MPU))
1333
                goto bad_reg;
1334
            env->cp15.c5_data = val;
1335
            break;
1336
        case 3:
1337
            if (!arm_feature(env, ARM_FEATURE_MPU))
1338
                goto bad_reg;
1339
            env->cp15.c5_insn = val;
1340
            break;
1341
        default:
1342
            goto bad_reg;
1343
        }
1344
        break;
1345
    case 6: /* MMU Fault address / MPU base/size.  */
1346
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1347
            if (crm >= 8)
1348
                goto bad_reg;
1349
            env->cp15.c6_region[crm] = val;
1350
        } else {
1351
            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1352
                op2 = 0;
1353
            switch (op2) {
1354
            case 0:
1355
                env->cp15.c6_data = val;
1356
                break;
1357
            case 1: /* ??? This is WFAR on armv6 */
1358
            case 2:
1359
                env->cp15.c6_insn = val;
1360
                break;
1361
            default:
1362
                goto bad_reg;
1363
            }
1364
        }
1365
        break;
1366
    case 7: /* Cache control.  */
1367
        env->cp15.c15_i_max = 0x000;
1368
        env->cp15.c15_i_min = 0xff0;
1369
        /* No cache, so nothing to do.  */
1370
        /* ??? MPCore has VA to PA translation functions.  */
1371
        break;
1372
    case 8: /* MMU TLB control.  */
1373
        switch (op2) {
1374
        case 0: /* Invalidate all.  */
1375
            tlb_flush(env, 0);
1376
            break;
1377
        case 1: /* Invalidate single TLB entry.  */
1378
#if 0
1379
            /* ??? This is wrong for large pages and sections.  */
1380
            /* As an ugly hack to make linux work we always flush a 4K
1381
               pages.  */
1382
            val &= 0xfffff000;
1383
            tlb_flush_page(env, val);
1384
            tlb_flush_page(env, val + 0x400);
1385
            tlb_flush_page(env, val + 0x800);
1386
            tlb_flush_page(env, val + 0xc00);
1387
#else
1388
            tlb_flush(env, 1);
1389
#endif
1390
            break;
1391
        case 2: /* Invalidate on ASID.  */
1392
            tlb_flush(env, val == 0);
1393
            break;
1394
        case 3: /* Invalidate single entry on MVA.  */
1395
            /* ??? This is like case 1, but ignores ASID.  */
1396
            tlb_flush(env, 1);
1397
            break;
1398
        default:
1399
            goto bad_reg;
1400
        }
1401
        break;
1402
    case 9:
1403
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1404
            break;
1405
        switch (crm) {
1406
        case 0: /* Cache lockdown.  */
1407
            switch (op1) {
1408
            case 0: /* L1 cache.  */
1409
                switch (op2) {
1410
                case 0:
1411
                    env->cp15.c9_data = val;
1412
                    break;
1413
                case 1:
1414
                    env->cp15.c9_insn = val;
1415
                    break;
1416
                default:
1417
                    goto bad_reg;
1418
                }
1419
                break;
1420
            case 1: /* L2 cache.  */
1421
                /* Ignore writes to L2 lockdown/auxiliary registers.  */
1422
                break;
1423
            default:
1424
                goto bad_reg;
1425
            }
1426
            break;
1427
        case 1: /* TCM memory region registers.  */
1428
            /* Not implemented.  */
1429
            goto bad_reg;
1430
        default:
1431
            goto bad_reg;
1432
        }
1433
        break;
1434
    case 10: /* MMU TLB lockdown.  */
1435
        /* ??? TLB lockdown not implemented.  */
1436
        break;
1437
    case 12: /* Reserved.  */
1438
        goto bad_reg;
1439
    case 13: /* Process ID.  */
1440
        switch (op2) {
1441
        case 0:
1442
            /* Unlike real hardware the qemu TLB uses virtual addresses,
1443
               not modified virtual addresses, so this causes a TLB flush.
1444
             */
1445
            if (env->cp15.c13_fcse != val)
1446
              tlb_flush(env, 1);
1447
            env->cp15.c13_fcse = val;
1448
            break;
1449
        case 1:
1450
            /* This changes the ASID, so do a TLB flush.  */
1451
            if (env->cp15.c13_context != val
1452
                && !arm_feature(env, ARM_FEATURE_MPU))
1453
              tlb_flush(env, 0);
1454
            env->cp15.c13_context = val;
1455
            break;
1456
        case 2:
1457
            env->cp15.c13_tls1 = val;
1458
            break;
1459
        case 3:
1460
            env->cp15.c13_tls2 = val;
1461
            break;
1462
        case 4:
1463
            env->cp15.c13_tls3 = val;
1464
            break;
1465
        default:
1466
            goto bad_reg;
1467
        }
1468
        break;
1469
    case 14: /* Reserved.  */
1470
        goto bad_reg;
1471
    case 15: /* Implementation specific.  */
1472
        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1473
            if (op2 == 0 && crm == 1) {
1474
                if (env->cp15.c15_cpar != (val & 0x3fff)) {
1475
                    /* Changes cp0 to cp13 behavior, so needs a TB flush.  */
1476
                    tb_flush(env);
1477
                    env->cp15.c15_cpar = val & 0x3fff;
1478
                }
1479
                break;
1480
            }
1481
            goto bad_reg;
1482
        }
1483
        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1484
            switch (crm) {
1485
            case 0:
1486
                break;
1487
            case 1: /* Set TI925T configuration.  */
1488
                env->cp15.c15_ticonfig = val & 0xe7;
1489
                env->cp15.c0_cpuid = (val & (1 << 5)) ? /* OS_TYPE bit */
1490
                        ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1491
                break;
1492
            case 2: /* Set I_max.  */
1493
                env->cp15.c15_i_max = val;
1494
                break;
1495
            case 3: /* Set I_min.  */
1496
                env->cp15.c15_i_min = val;
1497
                break;
1498
            case 4: /* Set thread-ID.  */
1499
                env->cp15.c15_threadid = val & 0xffff;
1500
                break;
1501
            case 8: /* Wait-for-interrupt (deprecated).  */
1502
                cpu_interrupt(env, CPU_INTERRUPT_HALT);
1503
                break;
1504
            default:
1505
                goto bad_reg;
1506
            }
1507
        }
1508
        break;
1509
    }
1510
    return;
1511
bad_reg:
1512
    /* ??? For debugging only.  Should raise illegal instruction exception.  */
1513
    cpu_abort(env, "Unimplemented cp15 register write (c%d, c%d, {%d, %d})\n",
1514
              (insn >> 16) & 0xf, crm, op1, op2);
1515
}
1516

    
1517
uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
1518
{
1519
    int op1;
1520
    int op2;
1521
    int crm;
1522

    
1523
    op1 = (insn >> 21) & 7;
1524
    op2 = (insn >> 5) & 7;
1525
    crm = insn & 0xf;
1526
    switch ((insn >> 16) & 0xf) {
1527
    case 0: /* ID codes.  */
1528
        switch (op1) {
1529
        case 0:
1530
            switch (crm) {
1531
            case 0:
1532
                switch (op2) {
1533
                case 0: /* Device ID.  */
1534
                    return env->cp15.c0_cpuid;
1535
                case 1: /* Cache Type.  */
1536
                    return env->cp15.c0_cachetype;
1537
                case 2: /* TCM status.  */
1538
                    return 0;
1539
                case 3: /* TLB type register.  */
1540
                    return 0; /* No lockable TLB entries.  */
1541
                case 5: /* CPU ID */
1542
                    return env->cpu_index;
1543
                default:
1544
                    goto bad_reg;
1545
                }
1546
            case 1:
1547
                if (!arm_feature(env, ARM_FEATURE_V6))
1548
                    goto bad_reg;
1549
                return env->cp15.c0_c1[op2];
1550
            case 2:
1551
                if (!arm_feature(env, ARM_FEATURE_V6))
1552
                    goto bad_reg;
1553
                return env->cp15.c0_c2[op2];
1554
            case 3: case 4: case 5: case 6: case 7:
1555
                return 0;
1556
            default:
1557
                goto bad_reg;
1558
            }
1559
        case 1:
1560
            /* These registers aren't documented on arm11 cores.  However
1561
               Linux looks at them anyway.  */
1562
            if (!arm_feature(env, ARM_FEATURE_V6))
1563
                goto bad_reg;
1564
            if (crm != 0)
1565
                goto bad_reg;
1566
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1567
                goto bad_reg;
1568
            return 0;
1569
        default:
1570
            goto bad_reg;
1571
        }
1572
    case 1: /* System configuration.  */
1573
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1574
            op2 = 0;
1575
        switch (op2) {
1576
        case 0: /* Control register.  */
1577
            return env->cp15.c1_sys;
1578
        case 1: /* Auxiliary control register.  */
1579
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1580
                return env->cp15.c1_xscaleauxcr;
1581
            if (!arm_feature(env, ARM_FEATURE_AUXCR))
1582
                goto bad_reg;
1583
            switch (ARM_CPUID(env)) {
1584
            case ARM_CPUID_ARM1026:
1585
                return 1;
1586
            case ARM_CPUID_ARM1136:
1587
            case ARM_CPUID_ARM1136_R2:
1588
                return 7;
1589
            case ARM_CPUID_ARM11MPCORE:
1590
                return 1;
1591
            case ARM_CPUID_CORTEXA8:
1592
                return 0;
1593
            default:
1594
                goto bad_reg;
1595
            }
1596
        case 2: /* Coprocessor access register.  */
1597
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1598
                goto bad_reg;
1599
            return env->cp15.c1_coproc;
1600
        default:
1601
            goto bad_reg;
1602
        }
1603
    case 2: /* MMU Page table control / MPU cache control.  */
1604
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1605
            switch (op2) {
1606
            case 0:
1607
                return env->cp15.c2_data;
1608
                break;
1609
            case 1:
1610
                return env->cp15.c2_insn;
1611
                break;
1612
            default:
1613
                goto bad_reg;
1614
            }
1615
        } else {
1616
            switch (op2) {
1617
            case 0:
1618
                return env->cp15.c2_base0;
1619
            case 1:
1620
                return env->cp15.c2_base1;
1621
            case 2:
1622
                {
1623
                    int n;
1624
                    uint32_t mask;
1625
                    n = 0;
1626
                    mask = env->cp15.c2_mask;
1627
                    while (mask) {
1628
                        n++;
1629
                        mask <<= 1;
1630
                    }
1631
                    return n;
1632
                }
1633
            default:
1634
                goto bad_reg;
1635
            }
1636
        }
1637
    case 3: /* MMU Domain access control / MPU write buffer control.  */
1638
        return env->cp15.c3;
1639
    case 4: /* Reserved.  */
1640
        goto bad_reg;
1641
    case 5: /* MMU Fault status / MPU access permission.  */
1642
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1643
            op2 = 0;
1644
        switch (op2) {
1645
        case 0:
1646
            if (arm_feature(env, ARM_FEATURE_MPU))
1647
                return simple_mpu_ap_bits(env->cp15.c5_data);
1648
            return env->cp15.c5_data;
1649
        case 1:
1650
            if (arm_feature(env, ARM_FEATURE_MPU))
1651
                return simple_mpu_ap_bits(env->cp15.c5_data);
1652
            return env->cp15.c5_insn;
1653
        case 2:
1654
            if (!arm_feature(env, ARM_FEATURE_MPU))
1655
                goto bad_reg;
1656
            return env->cp15.c5_data;
1657
        case 3:
1658
            if (!arm_feature(env, ARM_FEATURE_MPU))
1659
                goto bad_reg;
1660
            return env->cp15.c5_insn;
1661
        default:
1662
            goto bad_reg;
1663
        }
1664
    case 6: /* MMU Fault address.  */
1665
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1666
            if (crm >= 8)
1667
                goto bad_reg;
1668
            return env->cp15.c6_region[crm];
1669
        } else {
1670
            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1671
                op2 = 0;
1672
            switch (op2) {
1673
            case 0:
1674
                return env->cp15.c6_data;
1675
            case 1:
1676
                if (arm_feature(env, ARM_FEATURE_V6)) {
1677
                    /* Watchpoint Fault Adrress.  */
1678
                    return 0; /* Not implemented.  */
1679
                } else {
1680
                    /* Instruction Fault Adrress.  */
1681
                    /* Arm9 doesn't have an IFAR, but implementing it anyway
1682
                       shouldn't do any harm.  */
1683
                    return env->cp15.c6_insn;
1684
                }
1685
            case 2:
1686
                if (arm_feature(env, ARM_FEATURE_V6)) {
1687
                    /* Instruction Fault Adrress.  */
1688
                    return env->cp15.c6_insn;
1689
                } else {
1690
                    goto bad_reg;
1691
                }
1692
            default:
1693
                goto bad_reg;
1694
            }
1695
        }
1696
    case 7: /* Cache control.  */
1697
        /* FIXME: Should only clear Z flag if destination is r15.  */
1698
        env->ZF = 0;
1699
        return 0;
1700
    case 8: /* MMU TLB control.  */
1701
        goto bad_reg;
1702
    case 9: /* Cache lockdown.  */
1703
        switch (op1) {
1704
        case 0: /* L1 cache.  */
1705
            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1706
                return 0;
1707
            switch (op2) {
1708
            case 0:
1709
                return env->cp15.c9_data;
1710
            case 1:
1711
                return env->cp15.c9_insn;
1712
            default:
1713
                goto bad_reg;
1714
            }
1715
        case 1: /* L2 cache */
1716
            if (crm != 0)
1717
                goto bad_reg;
1718
            /* L2 Lockdown and Auxiliary control.  */
1719
            return 0;
1720
        default:
1721
            goto bad_reg;
1722
        }
1723
    case 10: /* MMU TLB lockdown.  */
1724
        /* ??? TLB lockdown not implemented.  */
1725
        return 0;
1726
    case 11: /* TCM DMA control.  */
1727
    case 12: /* Reserved.  */
1728
        goto bad_reg;
1729
    case 13: /* Process ID.  */
1730
        switch (op2) {
1731
        case 0:
1732
            return env->cp15.c13_fcse;
1733
        case 1:
1734
            return env->cp15.c13_context;
1735
        case 2:
1736
            return env->cp15.c13_tls1;
1737
        case 3:
1738
            return env->cp15.c13_tls2;
1739
        case 4:
1740
            return env->cp15.c13_tls3;
1741
        default:
1742
            goto bad_reg;
1743
        }
1744
    case 14: /* Reserved.  */
1745
        goto bad_reg;
1746
    case 15: /* Implementation specific.  */
1747
        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1748
            if (op2 == 0 && crm == 1)
1749
                return env->cp15.c15_cpar;
1750

    
1751
            goto bad_reg;
1752
        }
1753
        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1754
            switch (crm) {
1755
            case 0:
1756
                return 0;
1757
            case 1: /* Read TI925T configuration.  */
1758
                return env->cp15.c15_ticonfig;
1759
            case 2: /* Read I_max.  */
1760
                return env->cp15.c15_i_max;
1761
            case 3: /* Read I_min.  */
1762
                return env->cp15.c15_i_min;
1763
            case 4: /* Read thread-ID.  */
1764
                return env->cp15.c15_threadid;
1765
            case 8: /* TI925T_status */
1766
                return 0;
1767
            }
1768
            /* TODO: Peripheral port remap register:
1769
             * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt
1770
             * controller base address at $rn & ~0xfff and map size of
1771
             * 0x200 << ($rn & 0xfff), when MMU is off.  */
1772
            goto bad_reg;
1773
        }
1774
        return 0;
1775
    }
1776
bad_reg:
1777
    /* ??? For debugging only.  Should raise illegal instruction exception.  */
1778
    cpu_abort(env, "Unimplemented cp15 register read (c%d, c%d, {%d, %d})\n",
1779
              (insn >> 16) & 0xf, crm, op1, op2);
1780
    return 0;
1781
}
1782

    
1783
void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
1784
{
1785
    env->banked_r13[bank_number(mode)] = val;
1786
}
1787

    
1788
uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
1789
{
1790
    return env->banked_r13[bank_number(mode)];
1791
}
1792

    
1793
uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg)
1794
{
1795
    switch (reg) {
1796
    case 0: /* APSR */
1797
        return xpsr_read(env) & 0xf8000000;
1798
    case 1: /* IAPSR */
1799
        return xpsr_read(env) & 0xf80001ff;
1800
    case 2: /* EAPSR */
1801
        return xpsr_read(env) & 0xff00fc00;
1802
    case 3: /* xPSR */
1803
        return xpsr_read(env) & 0xff00fdff;
1804
    case 5: /* IPSR */
1805
        return xpsr_read(env) & 0x000001ff;
1806
    case 6: /* EPSR */
1807
        return xpsr_read(env) & 0x0700fc00;
1808
    case 7: /* IEPSR */
1809
        return xpsr_read(env) & 0x0700edff;
1810
    case 8: /* MSP */
1811
        return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
1812
    case 9: /* PSP */
1813
        return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
1814
    case 16: /* PRIMASK */
1815
        return (env->uncached_cpsr & CPSR_I) != 0;
1816
    case 17: /* FAULTMASK */
1817
        return (env->uncached_cpsr & CPSR_F) != 0;
1818
    case 18: /* BASEPRI */
1819
    case 19: /* BASEPRI_MAX */
1820
        return env->v7m.basepri;
1821
    case 20: /* CONTROL */
1822
        return env->v7m.control;
1823
    default:
1824
        /* ??? For debugging only.  */
1825
        cpu_abort(env, "Unimplemented system register read (%d)\n", reg);
1826
        return 0;
1827
    }
1828
}
1829

    
1830
void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val)
1831
{
1832
    switch (reg) {
1833
    case 0: /* APSR */
1834
        xpsr_write(env, val, 0xf8000000);
1835
        break;
1836
    case 1: /* IAPSR */
1837
        xpsr_write(env, val, 0xf8000000);
1838
        break;
1839
    case 2: /* EAPSR */
1840
        xpsr_write(env, val, 0xfe00fc00);
1841
        break;
1842
    case 3: /* xPSR */
1843
        xpsr_write(env, val, 0xfe00fc00);
1844
        break;
1845
    case 5: /* IPSR */
1846
        /* IPSR bits are readonly.  */
1847
        break;
1848
    case 6: /* EPSR */
1849
        xpsr_write(env, val, 0x0600fc00);
1850
        break;
1851
    case 7: /* IEPSR */
1852
        xpsr_write(env, val, 0x0600fc00);
1853
        break;
1854
    case 8: /* MSP */
1855
        if (env->v7m.current_sp)
1856
            env->v7m.other_sp = val;
1857
        else
1858
            env->regs[13] = val;
1859
        break;
1860
    case 9: /* PSP */
1861
        if (env->v7m.current_sp)
1862
            env->regs[13] = val;
1863
        else
1864
            env->v7m.other_sp = val;
1865
        break;
1866
    case 16: /* PRIMASK */
1867
        if (val & 1)
1868
            env->uncached_cpsr |= CPSR_I;
1869
        else
1870
            env->uncached_cpsr &= ~CPSR_I;
1871
        break;
1872
    case 17: /* FAULTMASK */
1873
        if (val & 1)
1874
            env->uncached_cpsr |= CPSR_F;
1875
        else
1876
            env->uncached_cpsr &= ~CPSR_F;
1877
        break;
1878
    case 18: /* BASEPRI */
1879
        env->v7m.basepri = val & 0xff;
1880
        break;
1881
    case 19: /* BASEPRI_MAX */
1882
        val &= 0xff;
1883
        if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
1884
            env->v7m.basepri = val;
1885
        break;
1886
    case 20: /* CONTROL */
1887
        env->v7m.control = val & 3;
1888
        switch_v7m_sp(env, (val & 2) != 0);
1889
        break;
1890
    default:
1891
        /* ??? For debugging only.  */
1892
        cpu_abort(env, "Unimplemented system register write (%d)\n", reg);
1893
        return;
1894
    }
1895
}
1896

    
1897
void cpu_arm_set_cp_io(CPUARMState *env, int cpnum,
1898
                ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write,
1899
                void *opaque)
1900
{
1901
    if (cpnum < 0 || cpnum > 14) {
1902
        cpu_abort(env, "Bad coprocessor number: %i\n", cpnum);
1903
        return;
1904
    }
1905

    
1906
    env->cp[cpnum].cp_read = cp_read;
1907
    env->cp[cpnum].cp_write = cp_write;
1908
    env->cp[cpnum].opaque = opaque;
1909
}
1910

    
1911
#endif
1912

    
1913
/* Note that signed overflow is undefined in C.  The following routines are
1914
   careful to use unsigned types where modulo arithmetic is required.
1915
   Failure to do so _will_ break on newer gcc.  */
1916

    
1917
/* Signed saturating arithmetic.  */
1918

    
1919
/* Perform 16-bit signed saturating addition.  */
1920
static inline uint16_t add16_sat(uint16_t a, uint16_t b)
1921
{
1922
    uint16_t res;
1923

    
1924
    res = a + b;
1925
    if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
1926
        if (a & 0x8000)
1927
            res = 0x8000;
1928
        else
1929
            res = 0x7fff;
1930
    }
1931
    return res;
1932
}
1933

    
1934
/* Perform 8-bit signed saturating addition.  */
1935
static inline uint8_t add8_sat(uint8_t a, uint8_t b)
1936
{
1937
    uint8_t res;
1938

    
1939
    res = a + b;
1940
    if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
1941
        if (a & 0x80)
1942
            res = 0x80;
1943
        else
1944
            res = 0x7f;
1945
    }
1946
    return res;
1947
}
1948

    
1949
/* Perform 16-bit signed saturating subtraction.  */
1950
static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
1951
{
1952
    uint16_t res;
1953

    
1954
    res = a - b;
1955
    if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
1956
        if (a & 0x8000)
1957
            res = 0x8000;
1958
        else
1959
            res = 0x7fff;
1960
    }
1961
    return res;
1962
}
1963

    
1964
/* Perform 8-bit signed saturating subtraction.  */
1965
static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
1966
{
1967
    uint8_t res;
1968

    
1969
    res = a - b;
1970
    if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
1971
        if (a & 0x80)
1972
            res = 0x80;
1973
        else
1974
            res = 0x7f;
1975
    }
1976
    return res;
1977
}
1978

    
1979
#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
1980
#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
1981
#define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
1982
#define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
1983
#define PFX q
1984

    
1985
#include "op_addsub.h"
1986

    
1987
/* Unsigned saturating arithmetic.  */
1988
static inline uint16_t add16_usat(uint16_t a, uint8_t b)
1989
{
1990
    uint16_t res;
1991
    res = a + b;
1992
    if (res < a)
1993
        res = 0xffff;
1994
    return res;
1995
}
1996

    
1997
static inline uint16_t sub16_usat(uint16_t a, uint8_t b)
1998
{
1999
    if (a < b)
2000
        return a - b;
2001
    else
2002
        return 0;
2003
}
2004

    
2005
static inline uint8_t add8_usat(uint8_t a, uint8_t b)
2006
{
2007
    uint8_t res;
2008
    res = a + b;
2009
    if (res < a)
2010
        res = 0xff;
2011
    return res;
2012
}
2013

    
2014
static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
2015
{
2016
    if (a < b)
2017
        return a - b;
2018
    else
2019
        return 0;
2020
}
2021

    
2022
#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
2023
#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
2024
#define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
2025
#define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
2026
#define PFX uq
2027

    
2028
#include "op_addsub.h"
2029

    
2030
/* Signed modulo arithmetic.  */
2031
#define SARITH16(a, b, n, op) do { \
2032
    int32_t sum; \
2033
    sum = (int16_t)((uint16_t)(a) op (uint16_t)(b)); \
2034
    RESULT(sum, n, 16); \
2035
    if (sum >= 0) \
2036
        ge |= 3 << (n * 2); \
2037
    } while(0)
2038

    
2039
#define SARITH8(a, b, n, op) do { \
2040
    int32_t sum; \
2041
    sum = (int8_t)((uint8_t)(a) op (uint8_t)(b)); \
2042
    RESULT(sum, n, 8); \
2043
    if (sum >= 0) \
2044
        ge |= 1 << n; \
2045
    } while(0)
2046

    
2047

    
2048
#define ADD16(a, b, n) SARITH16(a, b, n, +)
2049
#define SUB16(a, b, n) SARITH16(a, b, n, -)
2050
#define ADD8(a, b, n)  SARITH8(a, b, n, +)
2051
#define SUB8(a, b, n)  SARITH8(a, b, n, -)
2052
#define PFX s
2053
#define ARITH_GE
2054

    
2055
#include "op_addsub.h"
2056

    
2057
/* Unsigned modulo arithmetic.  */
2058
#define ADD16(a, b, n) do { \
2059
    uint32_t sum; \
2060
    sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
2061
    RESULT(sum, n, 16); \
2062
    if ((sum >> 16) == 0) \
2063
        ge |= 3 << (n * 2); \
2064
    } while(0)
2065

    
2066
#define ADD8(a, b, n) do { \
2067
    uint32_t sum; \
2068
    sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
2069
    RESULT(sum, n, 8); \
2070
    if ((sum >> 8) == 0) \
2071
        ge |= 3 << (n * 2); \
2072
    } while(0)
2073

    
2074
#define SUB16(a, b, n) do { \
2075
    uint32_t sum; \
2076
    sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
2077
    RESULT(sum, n, 16); \
2078
    if ((sum >> 16) == 0) \
2079
        ge |= 3 << (n * 2); \
2080
    } while(0)
2081

    
2082
#define SUB8(a, b, n) do { \
2083
    uint32_t sum; \
2084
    sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
2085
    RESULT(sum, n, 8); \
2086
    if ((sum >> 8) == 0) \
2087
        ge |= 3 << (n * 2); \
2088
    } while(0)
2089

    
2090
#define PFX u
2091
#define ARITH_GE
2092

    
2093
#include "op_addsub.h"
2094

    
2095
/* Halved signed arithmetic.  */
2096
#define ADD16(a, b, n) \
2097
  RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
2098
#define SUB16(a, b, n) \
2099
  RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
2100
#define ADD8(a, b, n) \
2101
  RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
2102
#define SUB8(a, b, n) \
2103
  RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
2104
#define PFX sh
2105

    
2106
#include "op_addsub.h"
2107

    
2108
/* Halved unsigned arithmetic.  */
2109
#define ADD16(a, b, n) \
2110
  RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2111
#define SUB16(a, b, n) \
2112
  RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2113
#define ADD8(a, b, n) \
2114
  RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2115
#define SUB8(a, b, n) \
2116
  RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2117
#define PFX uh
2118

    
2119
#include "op_addsub.h"
2120

    
2121
static inline uint8_t do_usad(uint8_t a, uint8_t b)
2122
{
2123
    if (a > b)
2124
        return a - b;
2125
    else
2126
        return b - a;
2127
}
2128

    
2129
/* Unsigned sum of absolute byte differences.  */
2130
uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
2131
{
2132
    uint32_t sum;
2133
    sum = do_usad(a, b);
2134
    sum += do_usad(a >> 8, b >> 8);
2135
    sum += do_usad(a >> 16, b >>16);
2136
    sum += do_usad(a >> 24, b >> 24);
2137
    return sum;
2138
}
2139

    
2140
/* For ARMv6 SEL instruction.  */
2141
uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
2142
{
2143
    uint32_t mask;
2144

    
2145
    mask = 0;
2146
    if (flags & 1)
2147
        mask |= 0xff;
2148
    if (flags & 2)
2149
        mask |= 0xff00;
2150
    if (flags & 4)
2151
        mask |= 0xff0000;
2152
    if (flags & 8)
2153
        mask |= 0xff000000;
2154
    return (a & mask) | (b & ~mask);
2155
}
2156

    
2157
uint32_t HELPER(logicq_cc)(uint64_t val)
2158
{
2159
    return (val >> 32) | (val != 0);
2160
}
2161

    
2162
/* VFP support.  We follow the convention used for VFP instrunctions:
2163
   Single precition routines have a "s" suffix, double precision a
2164
   "d" suffix.  */
2165

    
2166
/* Convert host exception flags to vfp form.  */
2167
static inline int vfp_exceptbits_from_host(int host_bits)
2168
{
2169
    int target_bits = 0;
2170

    
2171
    if (host_bits & float_flag_invalid)
2172
        target_bits |= 1;
2173
    if (host_bits & float_flag_divbyzero)
2174
        target_bits |= 2;
2175
    if (host_bits & float_flag_overflow)
2176
        target_bits |= 4;
2177
    if (host_bits & float_flag_underflow)
2178
        target_bits |= 8;
2179
    if (host_bits & float_flag_inexact)
2180
        target_bits |= 0x10;
2181
    return target_bits;
2182
}
2183

    
2184
uint32_t HELPER(vfp_get_fpscr)(CPUState *env)
2185
{
2186
    int i;
2187
    uint32_t fpscr;
2188

    
2189
    fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
2190
            | (env->vfp.vec_len << 16)
2191
            | (env->vfp.vec_stride << 20);
2192
    i = get_float_exception_flags(&env->vfp.fp_status);
2193
    fpscr |= vfp_exceptbits_from_host(i);
2194
    return fpscr;
2195
}
2196

    
2197
/* Convert vfp exception flags to target form.  */
2198
static inline int vfp_exceptbits_to_host(int target_bits)
2199
{
2200
    int host_bits = 0;
2201

    
2202
    if (target_bits & 1)
2203
        host_bits |= float_flag_invalid;
2204
    if (target_bits & 2)
2205
        host_bits |= float_flag_divbyzero;
2206
    if (target_bits & 4)
2207
        host_bits |= float_flag_overflow;
2208
    if (target_bits & 8)
2209
        host_bits |= float_flag_underflow;
2210
    if (target_bits & 0x10)
2211
        host_bits |= float_flag_inexact;
2212
    return host_bits;
2213
}
2214

    
2215
void HELPER(vfp_set_fpscr)(CPUState *env, uint32_t val)
2216
{
2217
    int i;
2218
    uint32_t changed;
2219

    
2220
    changed = env->vfp.xregs[ARM_VFP_FPSCR];
2221
    env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
2222
    env->vfp.vec_len = (val >> 16) & 7;
2223
    env->vfp.vec_stride = (val >> 20) & 3;
2224

    
2225
    changed ^= val;
2226
    if (changed & (3 << 22)) {
2227
        i = (val >> 22) & 3;
2228
        switch (i) {
2229
        case 0:
2230
            i = float_round_nearest_even;
2231
            break;
2232
        case 1:
2233
            i = float_round_up;
2234
            break;
2235
        case 2:
2236
            i = float_round_down;
2237
            break;
2238
        case 3:
2239
            i = float_round_to_zero;
2240
            break;
2241
        }
2242
        set_float_rounding_mode(i, &env->vfp.fp_status);
2243
    }
2244

    
2245
    i = vfp_exceptbits_to_host((val >> 8) & 0x1f);
2246
    set_float_exception_flags(i, &env->vfp.fp_status);
2247
    /* XXX: FZ and DN are not implemented.  */
2248
}
2249

    
2250
#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
2251

    
2252
#define VFP_BINOP(name) \
2253
float32 VFP_HELPER(name, s)(float32 a, float32 b, CPUState *env) \
2254
{ \
2255
    return float32_ ## name (a, b, &env->vfp.fp_status); \
2256
} \
2257
float64 VFP_HELPER(name, d)(float64 a, float64 b, CPUState *env) \
2258
{ \
2259
    return float64_ ## name (a, b, &env->vfp.fp_status); \
2260
}
2261
VFP_BINOP(add)
2262
VFP_BINOP(sub)
2263
VFP_BINOP(mul)
2264
VFP_BINOP(div)
2265
#undef VFP_BINOP
2266

    
2267
float32 VFP_HELPER(neg, s)(float32 a)
2268
{
2269
    return float32_chs(a);
2270
}
2271

    
2272
float64 VFP_HELPER(neg, d)(float64 a)
2273
{
2274
    return float64_chs(a);
2275
}
2276

    
2277
float32 VFP_HELPER(abs, s)(float32 a)
2278
{
2279
    return float32_abs(a);
2280
}
2281

    
2282
float64 VFP_HELPER(abs, d)(float64 a)
2283
{
2284
    return float64_abs(a);
2285
}
2286

    
2287
float32 VFP_HELPER(sqrt, s)(float32 a, CPUState *env)
2288
{
2289
    return float32_sqrt(a, &env->vfp.fp_status);
2290
}
2291

    
2292
float64 VFP_HELPER(sqrt, d)(float64 a, CPUState *env)
2293
{
2294
    return float64_sqrt(a, &env->vfp.fp_status);
2295
}
2296

    
2297
/* XXX: check quiet/signaling case */
2298
#define DO_VFP_cmp(p, type) \
2299
void VFP_HELPER(cmp, p)(type a, type b, CPUState *env)  \
2300
{ \
2301
    uint32_t flags; \
2302
    switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
2303
    case 0: flags = 0x6; break; \
2304
    case -1: flags = 0x8; break; \
2305
    case 1: flags = 0x2; break; \
2306
    default: case 2: flags = 0x3; break; \
2307
    } \
2308
    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2309
        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2310
} \
2311
void VFP_HELPER(cmpe, p)(type a, type b, CPUState *env) \
2312
{ \
2313
    uint32_t flags; \
2314
    switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
2315
    case 0: flags = 0x6; break; \
2316
    case -1: flags = 0x8; break; \
2317
    case 1: flags = 0x2; break; \
2318
    default: case 2: flags = 0x3; break; \
2319
    } \
2320
    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2321
        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2322
}
2323
DO_VFP_cmp(s, float32)
2324
DO_VFP_cmp(d, float64)
2325
#undef DO_VFP_cmp
2326

    
2327
/* Helper routines to perform bitwise copies between float and int.  */
2328
static inline float32 vfp_itos(uint32_t i)
2329
{
2330
    union {
2331
        uint32_t i;
2332
        float32 s;
2333
    } v;
2334

    
2335
    v.i = i;
2336
    return v.s;
2337
}
2338

    
2339
static inline uint32_t vfp_stoi(float32 s)
2340
{
2341
    union {
2342
        uint32_t i;
2343
        float32 s;
2344
    } v;
2345

    
2346
    v.s = s;
2347
    return v.i;
2348
}
2349

    
2350
static inline float64 vfp_itod(uint64_t i)
2351
{
2352
    union {
2353
        uint64_t i;
2354
        float64 d;
2355
    } v;
2356

    
2357
    v.i = i;
2358
    return v.d;
2359
}
2360

    
2361
static inline uint64_t vfp_dtoi(float64 d)
2362
{
2363
    union {
2364
        uint64_t i;
2365
        float64 d;
2366
    } v;
2367

    
2368
    v.d = d;
2369
    return v.i;
2370
}
2371

    
2372
/* Integer to float conversion.  */
2373
float32 VFP_HELPER(uito, s)(float32 x, CPUState *env)
2374
{
2375
    return uint32_to_float32(vfp_stoi(x), &env->vfp.fp_status);
2376
}
2377

    
2378
float64 VFP_HELPER(uito, d)(float32 x, CPUState *env)
2379
{
2380
    return uint32_to_float64(vfp_stoi(x), &env->vfp.fp_status);
2381
}
2382

    
2383
float32 VFP_HELPER(sito, s)(float32 x, CPUState *env)
2384
{
2385
    return int32_to_float32(vfp_stoi(x), &env->vfp.fp_status);
2386
}
2387

    
2388
float64 VFP_HELPER(sito, d)(float32 x, CPUState *env)
2389
{
2390
    return int32_to_float64(vfp_stoi(x), &env->vfp.fp_status);
2391
}
2392

    
2393
/* Float to integer conversion.  */
2394
float32 VFP_HELPER(toui, s)(float32 x, CPUState *env)
2395
{
2396
    return vfp_itos(float32_to_uint32(x, &env->vfp.fp_status));
2397
}
2398

    
2399
float32 VFP_HELPER(toui, d)(float64 x, CPUState *env)
2400
{
2401
    return vfp_itos(float64_to_uint32(x, &env->vfp.fp_status));
2402
}
2403

    
2404
float32 VFP_HELPER(tosi, s)(float32 x, CPUState *env)
2405
{
2406
    return vfp_itos(float32_to_int32(x, &env->vfp.fp_status));
2407
}
2408

    
2409
float32 VFP_HELPER(tosi, d)(float64 x, CPUState *env)
2410
{
2411
    return vfp_itos(float64_to_int32(x, &env->vfp.fp_status));
2412
}
2413

    
2414
float32 VFP_HELPER(touiz, s)(float32 x, CPUState *env)
2415
{
2416
    return vfp_itos(float32_to_uint32_round_to_zero(x, &env->vfp.fp_status));
2417
}
2418

    
2419
float32 VFP_HELPER(touiz, d)(float64 x, CPUState *env)
2420
{
2421
    return vfp_itos(float64_to_uint32_round_to_zero(x, &env->vfp.fp_status));
2422
}
2423

    
2424
float32 VFP_HELPER(tosiz, s)(float32 x, CPUState *env)
2425
{
2426
    return vfp_itos(float32_to_int32_round_to_zero(x, &env->vfp.fp_status));
2427
}
2428

    
2429
float32 VFP_HELPER(tosiz, d)(float64 x, CPUState *env)
2430
{
2431
    return vfp_itos(float64_to_int32_round_to_zero(x, &env->vfp.fp_status));
2432
}
2433

    
2434
/* floating point conversion */
2435
float64 VFP_HELPER(fcvtd, s)(float32 x, CPUState *env)
2436
{
2437
    return float32_to_float64(x, &env->vfp.fp_status);
2438
}
2439

    
2440
float32 VFP_HELPER(fcvts, d)(float64 x, CPUState *env)
2441
{
2442
    return float64_to_float32(x, &env->vfp.fp_status);
2443
}
2444

    
2445
/* VFP3 fixed point conversion.  */
2446
#define VFP_CONV_FIX(name, p, ftype, itype, sign) \
2447
ftype VFP_HELPER(name##to, p)(ftype x, uint32_t shift, CPUState *env) \
2448
{ \
2449
    ftype tmp; \
2450
    tmp = sign##int32_to_##ftype ((itype)vfp_##p##toi(x), \
2451
                                  &env->vfp.fp_status); \
2452
    return ftype##_scalbn(tmp, shift, &env->vfp.fp_status); \
2453
} \
2454
ftype VFP_HELPER(to##name, p)(ftype x, uint32_t shift, CPUState *env) \
2455
{ \
2456
    ftype tmp; \
2457
    tmp = ftype##_scalbn(x, shift, &env->vfp.fp_status); \
2458
    return vfp_ito##p((itype)ftype##_to_##sign##int32_round_to_zero(tmp, \
2459
        &env->vfp.fp_status)); \
2460
}
2461

    
2462
VFP_CONV_FIX(sh, d, float64, int16, )
2463
VFP_CONV_FIX(sl, d, float64, int32, )
2464
VFP_CONV_FIX(uh, d, float64, uint16, u)
2465
VFP_CONV_FIX(ul, d, float64, uint32, u)
2466
VFP_CONV_FIX(sh, s, float32, int16, )
2467
VFP_CONV_FIX(sl, s, float32, int32, )
2468
VFP_CONV_FIX(uh, s, float32, uint16, u)
2469
VFP_CONV_FIX(ul, s, float32, uint32, u)
2470
#undef VFP_CONV_FIX
2471

    
2472
float32 HELPER(recps_f32)(float32 a, float32 b, CPUState *env)
2473
{
2474
    float_status *s = &env->vfp.fp_status;
2475
    float32 two = int32_to_float32(2, s);
2476
    return float32_sub(two, float32_mul(a, b, s), s);
2477
}
2478

    
2479
float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUState *env)
2480
{
2481
    float_status *s = &env->vfp.fp_status;
2482
    float32 three = int32_to_float32(3, s);
2483
    return float32_sub(three, float32_mul(a, b, s), s);
2484
}
2485

    
2486
/* NEON helpers.  */
2487

    
2488
/* TODO: The architecture specifies the value that the estimate functions
2489
   should return.  We return the exact reciprocal/root instead.  */
2490
float32 HELPER(recpe_f32)(float32 a, CPUState *env)
2491
{
2492
    float_status *s = &env->vfp.fp_status;
2493
    float32 one = int32_to_float32(1, s);
2494
    return float32_div(one, a, s);
2495
}
2496

    
2497
float32 HELPER(rsqrte_f32)(float32 a, CPUState *env)
2498
{
2499
    float_status *s = &env->vfp.fp_status;
2500
    float32 one = int32_to_float32(1, s);
2501
    return float32_div(one, float32_sqrt(a, s), s);
2502
}
2503

    
2504
uint32_t HELPER(recpe_u32)(uint32_t a, CPUState *env)
2505
{
2506
    float_status *s = &env->vfp.fp_status;
2507
    float32 tmp;
2508
    tmp = int32_to_float32(a, s);
2509
    tmp = float32_scalbn(tmp, -32, s);
2510
    tmp = helper_recpe_f32(tmp, env);
2511
    tmp = float32_scalbn(tmp, 31, s);
2512
    return float32_to_int32(tmp, s);
2513
}
2514

    
2515
uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUState *env)
2516
{
2517
    float_status *s = &env->vfp.fp_status;
2518
    float32 tmp;
2519
    tmp = int32_to_float32(a, s);
2520
    tmp = float32_scalbn(tmp, -32, s);
2521
    tmp = helper_rsqrte_f32(tmp, env);
2522
    tmp = float32_scalbn(tmp, 31, s);
2523
    return float32_to_int32(tmp, s);
2524
}