Statistics
| Branch: | Revision:

root / target-arm / helper.c @ 4373f3ce

History | View | Annotate | Download (67.8 kB)

1
#include <stdio.h>
2
#include <stdlib.h>
3
#include <string.h>
4

    
5
#include "cpu.h"
6
#include "exec-all.h"
7
#include "gdbstub.h"
8
#include "helpers.h"
9

    
10
static uint32_t cortexa8_cp15_c0_c1[8] =
11
{ 0x1031, 0x11, 0x400, 0, 0x31100003, 0x20000000, 0x01202000, 0x11 };
12

    
13
static uint32_t cortexa8_cp15_c0_c2[8] =
14
{ 0x00101111, 0x12112111, 0x21232031, 0x11112131, 0x00111142, 0, 0, 0 };
15

    
16
static uint32_t mpcore_cp15_c0_c1[8] =
17
{ 0x111, 0x1, 0, 0x2, 0x01100103, 0x10020302, 0x01222000, 0 };
18

    
19
static uint32_t mpcore_cp15_c0_c2[8] =
20
{ 0x00100011, 0x12002111, 0x11221011, 0x01102131, 0x141, 0, 0, 0 };
21

    
22
static uint32_t arm1136_cp15_c0_c1[8] =
23
{ 0x111, 0x1, 0x2, 0x3, 0x01130003, 0x10030302, 0x01222110, 0 };
24

    
25
static uint32_t arm1136_cp15_c0_c2[8] =
26
{ 0x00140011, 0x12002111, 0x11231111, 0x01102131, 0x141, 0, 0, 0 };
27

    
28
static uint32_t cpu_arm_find_by_name(const char *name);
29

    
30
static inline void set_feature(CPUARMState *env, int feature)
31
{
32
    env->features |= 1u << feature;
33
}
34

    
35
static void cpu_reset_model_id(CPUARMState *env, uint32_t id)
36
{
37
    env->cp15.c0_cpuid = id;
38
    switch (id) {
39
    case ARM_CPUID_ARM926:
40
        set_feature(env, ARM_FEATURE_VFP);
41
        env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090;
42
        env->cp15.c0_cachetype = 0x1dd20d2;
43
        env->cp15.c1_sys = 0x00090078;
44
        break;
45
    case ARM_CPUID_ARM946:
46
        set_feature(env, ARM_FEATURE_MPU);
47
        env->cp15.c0_cachetype = 0x0f004006;
48
        env->cp15.c1_sys = 0x00000078;
49
        break;
50
    case ARM_CPUID_ARM1026:
51
        set_feature(env, ARM_FEATURE_VFP);
52
        set_feature(env, ARM_FEATURE_AUXCR);
53
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0;
54
        env->cp15.c0_cachetype = 0x1dd20d2;
55
        env->cp15.c1_sys = 0x00090078;
56
        break;
57
    case ARM_CPUID_ARM1136:
58
        set_feature(env, ARM_FEATURE_V6);
59
        set_feature(env, ARM_FEATURE_VFP);
60
        set_feature(env, ARM_FEATURE_AUXCR);
61
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
62
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
63
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
64
        memcpy(env->cp15.c0_c1, arm1136_cp15_c0_c1, 8 * sizeof(uint32_t));
65
        memcpy(env->cp15.c0_c1, arm1136_cp15_c0_c2, 8 * sizeof(uint32_t));
66
        env->cp15.c0_cachetype = 0x1dd20d2;
67
        break;
68
    case ARM_CPUID_ARM11MPCORE:
69
        set_feature(env, ARM_FEATURE_V6);
70
        set_feature(env, ARM_FEATURE_V6K);
71
        set_feature(env, ARM_FEATURE_VFP);
72
        set_feature(env, ARM_FEATURE_AUXCR);
73
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
74
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
75
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
76
        memcpy(env->cp15.c0_c1, mpcore_cp15_c0_c1, 8 * sizeof(uint32_t));
77
        memcpy(env->cp15.c0_c1, mpcore_cp15_c0_c2, 8 * sizeof(uint32_t));
78
        env->cp15.c0_cachetype = 0x1dd20d2;
79
        break;
80
    case ARM_CPUID_CORTEXA8:
81
        set_feature(env, ARM_FEATURE_V6);
82
        set_feature(env, ARM_FEATURE_V6K);
83
        set_feature(env, ARM_FEATURE_V7);
84
        set_feature(env, ARM_FEATURE_AUXCR);
85
        set_feature(env, ARM_FEATURE_THUMB2);
86
        set_feature(env, ARM_FEATURE_VFP);
87
        set_feature(env, ARM_FEATURE_VFP3);
88
        set_feature(env, ARM_FEATURE_NEON);
89
        env->vfp.xregs[ARM_VFP_FPSID] = 0x410330c0;
90
        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
91
        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00011100;
92
        memcpy(env->cp15.c0_c1, cortexa8_cp15_c0_c1, 8 * sizeof(uint32_t));
93
        memcpy(env->cp15.c0_c1, cortexa8_cp15_c0_c2, 8 * sizeof(uint32_t));
94
        env->cp15.c0_cachetype = 0x1dd20d2;
95
        break;
96
    case ARM_CPUID_CORTEXM3:
97
        set_feature(env, ARM_FEATURE_V6);
98
        set_feature(env, ARM_FEATURE_THUMB2);
99
        set_feature(env, ARM_FEATURE_V7);
100
        set_feature(env, ARM_FEATURE_M);
101
        set_feature(env, ARM_FEATURE_DIV);
102
        break;
103
    case ARM_CPUID_ANY: /* For userspace emulation.  */
104
        set_feature(env, ARM_FEATURE_V6);
105
        set_feature(env, ARM_FEATURE_V6K);
106
        set_feature(env, ARM_FEATURE_V7);
107
        set_feature(env, ARM_FEATURE_THUMB2);
108
        set_feature(env, ARM_FEATURE_VFP);
109
        set_feature(env, ARM_FEATURE_VFP3);
110
        set_feature(env, ARM_FEATURE_NEON);
111
        set_feature(env, ARM_FEATURE_DIV);
112
        break;
113
    case ARM_CPUID_TI915T:
114
    case ARM_CPUID_TI925T:
115
        set_feature(env, ARM_FEATURE_OMAPCP);
116
        env->cp15.c0_cpuid = ARM_CPUID_TI925T; /* Depends on wiring.  */
117
        env->cp15.c0_cachetype = 0x5109149;
118
        env->cp15.c1_sys = 0x00000070;
119
        env->cp15.c15_i_max = 0x000;
120
        env->cp15.c15_i_min = 0xff0;
121
        break;
122
    case ARM_CPUID_PXA250:
123
    case ARM_CPUID_PXA255:
124
    case ARM_CPUID_PXA260:
125
    case ARM_CPUID_PXA261:
126
    case ARM_CPUID_PXA262:
127
        set_feature(env, ARM_FEATURE_XSCALE);
128
        /* JTAG_ID is ((id << 28) | 0x09265013) */
129
        env->cp15.c0_cachetype = 0xd172172;
130
        env->cp15.c1_sys = 0x00000078;
131
        break;
132
    case ARM_CPUID_PXA270_A0:
133
    case ARM_CPUID_PXA270_A1:
134
    case ARM_CPUID_PXA270_B0:
135
    case ARM_CPUID_PXA270_B1:
136
    case ARM_CPUID_PXA270_C0:
137
    case ARM_CPUID_PXA270_C5:
138
        set_feature(env, ARM_FEATURE_XSCALE);
139
        /* JTAG_ID is ((id << 28) | 0x09265013) */
140
        set_feature(env, ARM_FEATURE_IWMMXT);
141
        env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
142
        env->cp15.c0_cachetype = 0xd172172;
143
        env->cp15.c1_sys = 0x00000078;
144
        break;
145
    default:
146
        cpu_abort(env, "Bad CPU ID: %x\n", id);
147
        break;
148
    }
149
}
150

    
151
void cpu_reset(CPUARMState *env)
152
{
153
    uint32_t id;
154
    id = env->cp15.c0_cpuid;
155
    memset(env, 0, offsetof(CPUARMState, breakpoints));
156
    if (id)
157
        cpu_reset_model_id(env, id);
158
#if defined (CONFIG_USER_ONLY)
159
    env->uncached_cpsr = ARM_CPU_MODE_USR;
160
    env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
161
#else
162
    /* SVC mode with interrupts disabled.  */
163
    env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
164
    /* On ARMv7-M the CPSR_I is the value of the PRIMASK register, and is
165
       clear at reset.  */
166
    if (IS_M(env))
167
        env->uncached_cpsr &= ~CPSR_I;
168
    env->vfp.xregs[ARM_VFP_FPEXC] = 0;
169
#endif
170
    env->regs[15] = 0;
171
    tlb_flush(env, 1);
172
}
173

    
174
CPUARMState *cpu_arm_init(const char *cpu_model)
175
{
176
    CPUARMState *env;
177
    uint32_t id;
178
    static int inited = 0;
179

    
180
    id = cpu_arm_find_by_name(cpu_model);
181
    if (id == 0)
182
        return NULL;
183
    env = qemu_mallocz(sizeof(CPUARMState));
184
    if (!env)
185
        return NULL;
186
    cpu_exec_init(env);
187
    if (!inited) {
188
        inited = 1;
189
        arm_translate_init();
190
    }
191

    
192
    env->cpu_model_str = cpu_model;
193
    env->cp15.c0_cpuid = id;
194
    cpu_reset(env);
195
    return env;
196
}
197

    
198
struct arm_cpu_t {
199
    uint32_t id;
200
    const char *name;
201
};
202

    
203
static const struct arm_cpu_t arm_cpu_names[] = {
204
    { ARM_CPUID_ARM926, "arm926"},
205
    { ARM_CPUID_ARM946, "arm946"},
206
    { ARM_CPUID_ARM1026, "arm1026"},
207
    { ARM_CPUID_ARM1136, "arm1136"},
208
    { ARM_CPUID_ARM11MPCORE, "arm11mpcore"},
209
    { ARM_CPUID_CORTEXM3, "cortex-m3"},
210
    { ARM_CPUID_CORTEXA8, "cortex-a8"},
211
    { ARM_CPUID_TI925T, "ti925t" },
212
    { ARM_CPUID_PXA250, "pxa250" },
213
    { ARM_CPUID_PXA255, "pxa255" },
214
    { ARM_CPUID_PXA260, "pxa260" },
215
    { ARM_CPUID_PXA261, "pxa261" },
216
    { ARM_CPUID_PXA262, "pxa262" },
217
    { ARM_CPUID_PXA270, "pxa270" },
218
    { ARM_CPUID_PXA270_A0, "pxa270-a0" },
219
    { ARM_CPUID_PXA270_A1, "pxa270-a1" },
220
    { ARM_CPUID_PXA270_B0, "pxa270-b0" },
221
    { ARM_CPUID_PXA270_B1, "pxa270-b1" },
222
    { ARM_CPUID_PXA270_C0, "pxa270-c0" },
223
    { ARM_CPUID_PXA270_C5, "pxa270-c5" },
224
    { ARM_CPUID_ANY, "any"},
225
    { 0, NULL}
226
};
227

    
228
void arm_cpu_list(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
229
{
230
    int i;
231

    
232
    (*cpu_fprintf)(f, "Available CPUs:\n");
233
    for (i = 0; arm_cpu_names[i].name; i++) {
234
        (*cpu_fprintf)(f, "  %s\n", arm_cpu_names[i].name);
235
    }
236
}
237

    
238
/* return 0 if not found */
239
static uint32_t cpu_arm_find_by_name(const char *name)
240
{
241
    int i;
242
    uint32_t id;
243

    
244
    id = 0;
245
    for (i = 0; arm_cpu_names[i].name; i++) {
246
        if (strcmp(name, arm_cpu_names[i].name) == 0) {
247
            id = arm_cpu_names[i].id;
248
            break;
249
        }
250
    }
251
    return id;
252
}
253

    
254
void cpu_arm_close(CPUARMState *env)
255
{
256
    free(env);
257
}
258

    
259
/* Polynomial multiplication is like integer multiplcation except the
260
   partial products are XORed, not added.  */
261
uint32_t helper_neon_mul_p8(uint32_t op1, uint32_t op2)
262
{
263
    uint32_t mask;
264
    uint32_t result;
265
    result = 0;
266
    while (op1) {
267
        mask = 0;
268
        if (op1 & 1)
269
            mask |= 0xff;
270
        if (op1 & (1 << 8))
271
            mask |= (0xff << 8);
272
        if (op1 & (1 << 16))
273
            mask |= (0xff << 16);
274
        if (op1 & (1 << 24))
275
            mask |= (0xff << 24);
276
        result ^= op2 & mask;
277
        op1 = (op1 >> 1) & 0x7f7f7f7f;
278
        op2 = (op2 << 1) & 0xfefefefe;
279
    }
280
    return result;
281
}
282

    
283
uint32_t cpsr_read(CPUARMState *env)
284
{
285
    int ZF;
286
    ZF = (env->NZF == 0);
287
    return env->uncached_cpsr | (env->NZF & 0x80000000) | (ZF << 30) |
288
        (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
289
        | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
290
        | ((env->condexec_bits & 0xfc) << 8)
291
        | (env->GE << 16);
292
}
293

    
294
void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
295
{
296
    /* NOTE: N = 1 and Z = 1 cannot be stored currently */
297
    if (mask & CPSR_NZCV) {
298
        env->NZF = (val & 0xc0000000) ^ 0x40000000;
299
        env->CF = (val >> 29) & 1;
300
        env->VF = (val << 3) & 0x80000000;
301
    }
302
    if (mask & CPSR_Q)
303
        env->QF = ((val & CPSR_Q) != 0);
304
    if (mask & CPSR_T)
305
        env->thumb = ((val & CPSR_T) != 0);
306
    if (mask & CPSR_IT_0_1) {
307
        env->condexec_bits &= ~3;
308
        env->condexec_bits |= (val >> 25) & 3;
309
    }
310
    if (mask & CPSR_IT_2_7) {
311
        env->condexec_bits &= 3;
312
        env->condexec_bits |= (val >> 8) & 0xfc;
313
    }
314
    if (mask & CPSR_GE) {
315
        env->GE = (val >> 16) & 0xf;
316
    }
317

    
318
    if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
319
        switch_mode(env, val & CPSR_M);
320
    }
321
    mask &= ~CACHED_CPSR_BITS;
322
    env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
323
}
324

    
325
/* Sign/zero extend */
326
uint32_t HELPER(sxtb16)(uint32_t x)
327
{
328
    uint32_t res;
329
    res = (uint16_t)(int8_t)x;
330
    res |= (uint32_t)(int8_t)(x >> 16) << 16;
331
    return res;
332
}
333

    
334
uint32_t HELPER(uxtb16)(uint32_t x)
335
{
336
    uint32_t res;
337
    res = (uint16_t)(uint8_t)x;
338
    res |= (uint32_t)(uint8_t)(x >> 16) << 16;
339
    return res;
340
}
341

    
342
uint32_t HELPER(clz)(uint32_t x)
343
{
344
    int count;
345
    for (count = 32; x; count--)
346
        x >>= 1;
347
    return count;
348
}
349

    
350
int32_t HELPER(sdiv)(int32_t num, int32_t den)
351
{
352
    if (den == 0)
353
      return 0;
354
    return num / den;
355
}
356

    
357
uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
358
{
359
    if (den == 0)
360
      return 0;
361
    return num / den;
362
}
363

    
364
uint32_t HELPER(rbit)(uint32_t x)
365
{
366
    x =  ((x & 0xff000000) >> 24)
367
       | ((x & 0x00ff0000) >> 8)
368
       | ((x & 0x0000ff00) << 8)
369
       | ((x & 0x000000ff) << 24);
370
    x =  ((x & 0xf0f0f0f0) >> 4)
371
       | ((x & 0x0f0f0f0f) << 4);
372
    x =  ((x & 0x88888888) >> 3)
373
       | ((x & 0x44444444) >> 1)
374
       | ((x & 0x22222222) << 1)
375
       | ((x & 0x11111111) << 3);
376
    return x;
377
}
378

    
379
#if defined(CONFIG_USER_ONLY)
380

    
381
void do_interrupt (CPUState *env)
382
{
383
    env->exception_index = -1;
384
}
385

    
386
/* Structure used to record exclusive memory locations.  */
387
typedef struct mmon_state {
388
    struct mmon_state *next;
389
    CPUARMState *cpu_env;
390
    uint32_t addr;
391
} mmon_state;
392

    
393
/* Chain of current locks.  */
394
static mmon_state* mmon_head = NULL;
395

    
396
int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
397
                              int mmu_idx, int is_softmmu)
398
{
399
    if (rw == 2) {
400
        env->exception_index = EXCP_PREFETCH_ABORT;
401
        env->cp15.c6_insn = address;
402
    } else {
403
        env->exception_index = EXCP_DATA_ABORT;
404
        env->cp15.c6_data = address;
405
    }
406
    return 1;
407
}
408

    
409
static void allocate_mmon_state(CPUState *env)
410
{
411
    env->mmon_entry = malloc(sizeof (mmon_state));
412
    if (!env->mmon_entry)
413
        abort();
414
    memset (env->mmon_entry, 0, sizeof (mmon_state));
415
    env->mmon_entry->cpu_env = env;
416
    mmon_head = env->mmon_entry;
417
}
418

    
419
/* Flush any monitor locks for the specified address.  */
420
static void flush_mmon(uint32_t addr)
421
{
422
    mmon_state *mon;
423

    
424
    for (mon = mmon_head; mon; mon = mon->next)
425
      {
426
        if (mon->addr != addr)
427
          continue;
428

    
429
        mon->addr = 0;
430
        break;
431
      }
432
}
433

    
434
/* Mark an address for exclusive access.  */
435
void helper_mark_exclusive(CPUState *env, uint32_t addr)
436
{
437
    if (!env->mmon_entry)
438
        allocate_mmon_state(env);
439
    /* Clear any previous locks.  */
440
    flush_mmon(addr);
441
    env->mmon_entry->addr = addr;
442
}
443

    
444
/* Test if an exclusive address is still exclusive.  Returns zero
445
   if the address is still exclusive.   */
446
int helper_test_exclusive(CPUState *env, uint32_t addr)
447
{
448
    int res;
449

    
450
    if (!env->mmon_entry)
451
        return 1;
452
    if (env->mmon_entry->addr == addr)
453
        res = 0;
454
    else
455
        res = 1;
456
    flush_mmon(addr);
457
    return res;
458
}
459

    
460
void helper_clrex(CPUState *env)
461
{
462
    if (!(env->mmon_entry && env->mmon_entry->addr))
463
        return;
464
    flush_mmon(env->mmon_entry->addr);
465
}
466

    
467
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
468
{
469
    return addr;
470
}
471

    
472
/* These should probably raise undefined insn exceptions.  */
473
void helper_set_cp(CPUState *env, uint32_t insn, uint32_t val)
474
{
475
    int op1 = (insn >> 8) & 0xf;
476
    cpu_abort(env, "cp%i insn %08x\n", op1, insn);
477
    return;
478
}
479

    
480
uint32_t helper_get_cp(CPUState *env, uint32_t insn)
481
{
482
    int op1 = (insn >> 8) & 0xf;
483
    cpu_abort(env, "cp%i insn %08x\n", op1, insn);
484
    return 0;
485
}
486

    
487
void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
488
{
489
    cpu_abort(env, "cp15 insn %08x\n", insn);
490
}
491

    
492
uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
493
{
494
    cpu_abort(env, "cp15 insn %08x\n", insn);
495
    return 0;
496
}
497

    
498
/* These should probably raise undefined insn exceptions.  */
499
void helper_v7m_msr(CPUState *env, int reg, uint32_t val)
500
{
501
    cpu_abort(env, "v7m_mrs %d\n", reg);
502
}
503

    
504
uint32_t helper_v7m_mrs(CPUState *env, int reg)
505
{
506
    cpu_abort(env, "v7m_mrs %d\n", reg);
507
    return 0;
508
}
509

    
510
void switch_mode(CPUState *env, int mode)
511
{
512
    if (mode != ARM_CPU_MODE_USR)
513
        cpu_abort(env, "Tried to switch out of user mode\n");
514
}
515

    
516
void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
517
{
518
    cpu_abort(env, "banked r13 write\n");
519
}
520

    
521
uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
522
{
523
    cpu_abort(env, "banked r13 read\n");
524
    return 0;
525
}
526

    
527
#else
528

    
529
extern int semihosting_enabled;
530

    
531
/* Map CPU modes onto saved register banks.  */
532
static inline int bank_number (int mode)
533
{
534
    switch (mode) {
535
    case ARM_CPU_MODE_USR:
536
    case ARM_CPU_MODE_SYS:
537
        return 0;
538
    case ARM_CPU_MODE_SVC:
539
        return 1;
540
    case ARM_CPU_MODE_ABT:
541
        return 2;
542
    case ARM_CPU_MODE_UND:
543
        return 3;
544
    case ARM_CPU_MODE_IRQ:
545
        return 4;
546
    case ARM_CPU_MODE_FIQ:
547
        return 5;
548
    }
549
    cpu_abort(cpu_single_env, "Bad mode %x\n", mode);
550
    return -1;
551
}
552

    
553
void switch_mode(CPUState *env, int mode)
554
{
555
    int old_mode;
556
    int i;
557

    
558
    old_mode = env->uncached_cpsr & CPSR_M;
559
    if (mode == old_mode)
560
        return;
561

    
562
    if (old_mode == ARM_CPU_MODE_FIQ) {
563
        memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
564
        memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
565
    } else if (mode == ARM_CPU_MODE_FIQ) {
566
        memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
567
        memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
568
    }
569

    
570
    i = bank_number(old_mode);
571
    env->banked_r13[i] = env->regs[13];
572
    env->banked_r14[i] = env->regs[14];
573
    env->banked_spsr[i] = env->spsr;
574

    
575
    i = bank_number(mode);
576
    env->regs[13] = env->banked_r13[i];
577
    env->regs[14] = env->banked_r14[i];
578
    env->spsr = env->banked_spsr[i];
579
}
580

    
581
static void v7m_push(CPUARMState *env, uint32_t val)
582
{
583
    env->regs[13] -= 4;
584
    stl_phys(env->regs[13], val);
585
}
586

    
587
static uint32_t v7m_pop(CPUARMState *env)
588
{
589
    uint32_t val;
590
    val = ldl_phys(env->regs[13]);
591
    env->regs[13] += 4;
592
    return val;
593
}
594

    
595
/* Switch to V7M main or process stack pointer.  */
596
static void switch_v7m_sp(CPUARMState *env, int process)
597
{
598
    uint32_t tmp;
599
    if (env->v7m.current_sp != process) {
600
        tmp = env->v7m.other_sp;
601
        env->v7m.other_sp = env->regs[13];
602
        env->regs[13] = tmp;
603
        env->v7m.current_sp = process;
604
    }
605
}
606

    
607
static void do_v7m_exception_exit(CPUARMState *env)
608
{
609
    uint32_t type;
610
    uint32_t xpsr;
611

    
612
    type = env->regs[15];
613
    if (env->v7m.exception != 0)
614
        armv7m_nvic_complete_irq(env->v7m.nvic, env->v7m.exception);
615

    
616
    /* Switch to the target stack.  */
617
    switch_v7m_sp(env, (type & 4) != 0);
618
    /* Pop registers.  */
619
    env->regs[0] = v7m_pop(env);
620
    env->regs[1] = v7m_pop(env);
621
    env->regs[2] = v7m_pop(env);
622
    env->regs[3] = v7m_pop(env);
623
    env->regs[12] = v7m_pop(env);
624
    env->regs[14] = v7m_pop(env);
625
    env->regs[15] = v7m_pop(env);
626
    xpsr = v7m_pop(env);
627
    xpsr_write(env, xpsr, 0xfffffdff);
628
    /* Undo stack alignment.  */
629
    if (xpsr & 0x200)
630
        env->regs[13] |= 4;
631
    /* ??? The exception return type specifies Thread/Handler mode.  However
632
       this is also implied by the xPSR value. Not sure what to do
633
       if there is a mismatch.  */
634
    /* ??? Likewise for mismatches between the CONTROL register and the stack
635
       pointer.  */
636
}
637

    
638
void do_interrupt_v7m(CPUARMState *env)
639
{
640
    uint32_t xpsr = xpsr_read(env);
641
    uint32_t lr;
642
    uint32_t addr;
643

    
644
    lr = 0xfffffff1;
645
    if (env->v7m.current_sp)
646
        lr |= 4;
647
    if (env->v7m.exception == 0)
648
        lr |= 8;
649

    
650
    /* For exceptions we just mark as pending on the NVIC, and let that
651
       handle it.  */
652
    /* TODO: Need to escalate if the current priority is higher than the
653
       one we're raising.  */
654
    switch (env->exception_index) {
655
    case EXCP_UDEF:
656
        armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_USAGE);
657
        return;
658
    case EXCP_SWI:
659
        env->regs[15] += 2;
660
        armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_SVC);
661
        return;
662
    case EXCP_PREFETCH_ABORT:
663
    case EXCP_DATA_ABORT:
664
        armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_MEM);
665
        return;
666
    case EXCP_BKPT:
667
        if (semihosting_enabled) {
668
            int nr;
669
            nr = lduw_code(env->regs[15]) & 0xff;
670
            if (nr == 0xab) {
671
                env->regs[15] += 2;
672
                env->regs[0] = do_arm_semihosting(env);
673
                return;
674
            }
675
        }
676
        armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_DEBUG);
677
        return;
678
    case EXCP_IRQ:
679
        env->v7m.exception = armv7m_nvic_acknowledge_irq(env->v7m.nvic);
680
        break;
681
    case EXCP_EXCEPTION_EXIT:
682
        do_v7m_exception_exit(env);
683
        return;
684
    default:
685
        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
686
        return; /* Never happens.  Keep compiler happy.  */
687
    }
688

    
689
    /* Align stack pointer.  */
690
    /* ??? Should only do this if Configuration Control Register
691
       STACKALIGN bit is set.  */
692
    if (env->regs[13] & 4) {
693
        env->regs[13] += 4;
694
        xpsr |= 0x200;
695
    }
696
    /* Switch to the hander mode.  */
697
    v7m_push(env, xpsr);
698
    v7m_push(env, env->regs[15]);
699
    v7m_push(env, env->regs[14]);
700
    v7m_push(env, env->regs[12]);
701
    v7m_push(env, env->regs[3]);
702
    v7m_push(env, env->regs[2]);
703
    v7m_push(env, env->regs[1]);
704
    v7m_push(env, env->regs[0]);
705
    switch_v7m_sp(env, 0);
706
    env->uncached_cpsr &= ~CPSR_IT;
707
    env->regs[14] = lr;
708
    addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4);
709
    env->regs[15] = addr & 0xfffffffe;
710
    env->thumb = addr & 1;
711
}
712

    
713
/* Handle a CPU exception.  */
714
void do_interrupt(CPUARMState *env)
715
{
716
    uint32_t addr;
717
    uint32_t mask;
718
    int new_mode;
719
    uint32_t offset;
720

    
721
    if (IS_M(env)) {
722
        do_interrupt_v7m(env);
723
        return;
724
    }
725
    /* TODO: Vectored interrupt controller.  */
726
    switch (env->exception_index) {
727
    case EXCP_UDEF:
728
        new_mode = ARM_CPU_MODE_UND;
729
        addr = 0x04;
730
        mask = CPSR_I;
731
        if (env->thumb)
732
            offset = 2;
733
        else
734
            offset = 4;
735
        break;
736
    case EXCP_SWI:
737
        if (semihosting_enabled) {
738
            /* Check for semihosting interrupt.  */
739
            if (env->thumb) {
740
                mask = lduw_code(env->regs[15] - 2) & 0xff;
741
            } else {
742
                mask = ldl_code(env->regs[15] - 4) & 0xffffff;
743
            }
744
            /* Only intercept calls from privileged modes, to provide some
745
               semblance of security.  */
746
            if (((mask == 0x123456 && !env->thumb)
747
                    || (mask == 0xab && env->thumb))
748
                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
749
                env->regs[0] = do_arm_semihosting(env);
750
                return;
751
            }
752
        }
753
        new_mode = ARM_CPU_MODE_SVC;
754
        addr = 0x08;
755
        mask = CPSR_I;
756
        /* The PC already points to the next instructon.  */
757
        offset = 0;
758
        break;
759
    case EXCP_BKPT:
760
        /* See if this is a semihosting syscall.  */
761
        if (env->thumb && semihosting_enabled) {
762
            mask = lduw_code(env->regs[15]) & 0xff;
763
            if (mask == 0xab
764
                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
765
                env->regs[15] += 2;
766
                env->regs[0] = do_arm_semihosting(env);
767
                return;
768
            }
769
        }
770
        /* Fall through to prefetch abort.  */
771
    case EXCP_PREFETCH_ABORT:
772
        new_mode = ARM_CPU_MODE_ABT;
773
        addr = 0x0c;
774
        mask = CPSR_A | CPSR_I;
775
        offset = 4;
776
        break;
777
    case EXCP_DATA_ABORT:
778
        new_mode = ARM_CPU_MODE_ABT;
779
        addr = 0x10;
780
        mask = CPSR_A | CPSR_I;
781
        offset = 8;
782
        break;
783
    case EXCP_IRQ:
784
        new_mode = ARM_CPU_MODE_IRQ;
785
        addr = 0x18;
786
        /* Disable IRQ and imprecise data aborts.  */
787
        mask = CPSR_A | CPSR_I;
788
        offset = 4;
789
        break;
790
    case EXCP_FIQ:
791
        new_mode = ARM_CPU_MODE_FIQ;
792
        addr = 0x1c;
793
        /* Disable FIQ, IRQ and imprecise data aborts.  */
794
        mask = CPSR_A | CPSR_I | CPSR_F;
795
        offset = 4;
796
        break;
797
    default:
798
        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
799
        return; /* Never happens.  Keep compiler happy.  */
800
    }
801
    /* High vectors.  */
802
    if (env->cp15.c1_sys & (1 << 13)) {
803
        addr += 0xffff0000;
804
    }
805
    switch_mode (env, new_mode);
806
    env->spsr = cpsr_read(env);
807
    /* Clear IT bits.  */
808
    env->condexec_bits = 0;
809
    /* Switch to the new mode, and switch to Arm mode.  */
810
    /* ??? Thumb interrupt handlers not implemented.  */
811
    env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
812
    env->uncached_cpsr |= mask;
813
    env->thumb = 0;
814
    env->regs[14] = env->regs[15] + offset;
815
    env->regs[15] = addr;
816
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
817
}
818

    
819
/* Check section/page access permissions.
820
   Returns the page protection flags, or zero if the access is not
821
   permitted.  */
822
static inline int check_ap(CPUState *env, int ap, int domain, int access_type,
823
                           int is_user)
824
{
825
  int prot_ro;
826

    
827
  if (domain == 3)
828
    return PAGE_READ | PAGE_WRITE;
829

    
830
  if (access_type == 1)
831
      prot_ro = 0;
832
  else
833
      prot_ro = PAGE_READ;
834

    
835
  switch (ap) {
836
  case 0:
837
      if (access_type == 1)
838
          return 0;
839
      switch ((env->cp15.c1_sys >> 8) & 3) {
840
      case 1:
841
          return is_user ? 0 : PAGE_READ;
842
      case 2:
843
          return PAGE_READ;
844
      default:
845
          return 0;
846
      }
847
  case 1:
848
      return is_user ? 0 : PAGE_READ | PAGE_WRITE;
849
  case 2:
850
      if (is_user)
851
          return prot_ro;
852
      else
853
          return PAGE_READ | PAGE_WRITE;
854
  case 3:
855
      return PAGE_READ | PAGE_WRITE;
856
  case 4: case 7: /* Reserved.  */
857
      return 0;
858
  case 5:
859
      return is_user ? 0 : prot_ro;
860
  case 6:
861
      return prot_ro;
862
  default:
863
      abort();
864
  }
865
}
866

    
867
static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type,
868
                            int is_user, uint32_t *phys_ptr, int *prot)
869
{
870
    int code;
871
    uint32_t table;
872
    uint32_t desc;
873
    int type;
874
    int ap;
875
    int domain;
876
    uint32_t phys_addr;
877

    
878
    /* Pagetable walk.  */
879
    /* Lookup l1 descriptor.  */
880
    if (address & env->cp15.c2_mask)
881
        table = env->cp15.c2_base1;
882
    else
883
        table = env->cp15.c2_base0;
884
    table = (table & 0xffffc000) | ((address >> 18) & 0x3ffc);
885
    desc = ldl_phys(table);
886
    type = (desc & 3);
887
    domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3;
888
    if (type == 0) {
889
        /* Secton translation fault.  */
890
        code = 5;
891
        goto do_fault;
892
    }
893
    if (domain == 0 || domain == 2) {
894
        if (type == 2)
895
            code = 9; /* Section domain fault.  */
896
        else
897
            code = 11; /* Page domain fault.  */
898
        goto do_fault;
899
    }
900
    if (type == 2) {
901
        /* 1Mb section.  */
902
        phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
903
        ap = (desc >> 10) & 3;
904
        code = 13;
905
    } else {
906
        /* Lookup l2 entry.  */
907
        if (type == 1) {
908
            /* Coarse pagetable.  */
909
            table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
910
        } else {
911
            /* Fine pagetable.  */
912
            table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
913
        }
914
        desc = ldl_phys(table);
915
        switch (desc & 3) {
916
        case 0: /* Page translation fault.  */
917
            code = 7;
918
            goto do_fault;
919
        case 1: /* 64k page.  */
920
            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
921
            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
922
            break;
923
        case 2: /* 4k page.  */
924
            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
925
            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
926
            break;
927
        case 3: /* 1k page.  */
928
            if (type == 1) {
929
                if (arm_feature(env, ARM_FEATURE_XSCALE)) {
930
                    phys_addr = (desc & 0xfffff000) | (address & 0xfff);
931
                } else {
932
                    /* Page translation fault.  */
933
                    code = 7;
934
                    goto do_fault;
935
                }
936
            } else {
937
                phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
938
            }
939
            ap = (desc >> 4) & 3;
940
            break;
941
        default:
942
            /* Never happens, but compiler isn't smart enough to tell.  */
943
            abort();
944
        }
945
        code = 15;
946
    }
947
    *prot = check_ap(env, ap, domain, access_type, is_user);
948
    if (!*prot) {
949
        /* Access permission fault.  */
950
        goto do_fault;
951
    }
952
    *phys_ptr = phys_addr;
953
    return 0;
954
do_fault:
955
    return code | (domain << 4);
956
}
957

    
958
static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type,
959
                            int is_user, uint32_t *phys_ptr, int *prot)
960
{
961
    int code;
962
    uint32_t table;
963
    uint32_t desc;
964
    uint32_t xn;
965
    int type;
966
    int ap;
967
    int domain;
968
    uint32_t phys_addr;
969

    
970
    /* Pagetable walk.  */
971
    /* Lookup l1 descriptor.  */
972
    if (address & env->cp15.c2_mask)
973
        table = env->cp15.c2_base1;
974
    else
975
        table = env->cp15.c2_base0;
976
    table = (table & 0xffffc000) | ((address >> 18) & 0x3ffc);
977
    desc = ldl_phys(table);
978
    type = (desc & 3);
979
    if (type == 0) {
980
        /* Secton translation fault.  */
981
        code = 5;
982
        domain = 0;
983
        goto do_fault;
984
    } else if (type == 2 && (desc & (1 << 18))) {
985
        /* Supersection.  */
986
        domain = 0;
987
    } else {
988
        /* Section or page.  */
989
        domain = (desc >> 4) & 0x1e;
990
    }
991
    domain = (env->cp15.c3 >> domain) & 3;
992
    if (domain == 0 || domain == 2) {
993
        if (type == 2)
994
            code = 9; /* Section domain fault.  */
995
        else
996
            code = 11; /* Page domain fault.  */
997
        goto do_fault;
998
    }
999
    if (type == 2) {
1000
        if (desc & (1 << 18)) {
1001
            /* Supersection.  */
1002
            phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
1003
        } else {
1004
            /* Section.  */
1005
            phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1006
        }
1007
        ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
1008
        xn = desc & (1 << 4);
1009
        code = 13;
1010
    } else {
1011
        /* Lookup l2 entry.  */
1012
        table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1013
        desc = ldl_phys(table);
1014
        ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
1015
        switch (desc & 3) {
1016
        case 0: /* Page translation fault.  */
1017
            code = 7;
1018
            goto do_fault;
1019
        case 1: /* 64k page.  */
1020
            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1021
            xn = desc & (1 << 15);
1022
            break;
1023
        case 2: case 3: /* 4k page.  */
1024
            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1025
            xn = desc & 1;
1026
            break;
1027
        default:
1028
            /* Never happens, but compiler isn't smart enough to tell.  */
1029
            abort();
1030
        }
1031
        code = 15;
1032
    }
1033
    if (xn && access_type == 2)
1034
        goto do_fault;
1035

    
1036
    *prot = check_ap(env, ap, domain, access_type, is_user);
1037
    if (!*prot) {
1038
        /* Access permission fault.  */
1039
        goto do_fault;
1040
    }
1041
    *phys_ptr = phys_addr;
1042
    return 0;
1043
do_fault:
1044
    return code | (domain << 4);
1045
}
1046

    
1047
static int get_phys_addr_mpu(CPUState *env, uint32_t address, int access_type,
1048
                             int is_user, uint32_t *phys_ptr, int *prot)
1049
{
1050
    int n;
1051
    uint32_t mask;
1052
    uint32_t base;
1053

    
1054
    *phys_ptr = address;
1055
    for (n = 7; n >= 0; n--) {
1056
        base = env->cp15.c6_region[n];
1057
        if ((base & 1) == 0)
1058
            continue;
1059
        mask = 1 << ((base >> 1) & 0x1f);
1060
        /* Keep this shift separate from the above to avoid an
1061
           (undefined) << 32.  */
1062
        mask = (mask << 1) - 1;
1063
        if (((base ^ address) & ~mask) == 0)
1064
            break;
1065
    }
1066
    if (n < 0)
1067
        return 2;
1068

    
1069
    if (access_type == 2) {
1070
        mask = env->cp15.c5_insn;
1071
    } else {
1072
        mask = env->cp15.c5_data;
1073
    }
1074
    mask = (mask >> (n * 4)) & 0xf;
1075
    switch (mask) {
1076
    case 0:
1077
        return 1;
1078
    case 1:
1079
        if (is_user)
1080
          return 1;
1081
        *prot = PAGE_READ | PAGE_WRITE;
1082
        break;
1083
    case 2:
1084
        *prot = PAGE_READ;
1085
        if (!is_user)
1086
            *prot |= PAGE_WRITE;
1087
        break;
1088
    case 3:
1089
        *prot = PAGE_READ | PAGE_WRITE;
1090
        break;
1091
    case 5:
1092
        if (is_user)
1093
            return 1;
1094
        *prot = PAGE_READ;
1095
        break;
1096
    case 6:
1097
        *prot = PAGE_READ;
1098
        break;
1099
    default:
1100
        /* Bad permission.  */
1101
        return 1;
1102
    }
1103
    return 0;
1104
}
1105

    
1106
static inline int get_phys_addr(CPUState *env, uint32_t address,
1107
                                int access_type, int is_user,
1108
                                uint32_t *phys_ptr, int *prot)
1109
{
1110
    /* Fast Context Switch Extension.  */
1111
    if (address < 0x02000000)
1112
        address += env->cp15.c13_fcse;
1113

    
1114
    if ((env->cp15.c1_sys & 1) == 0) {
1115
        /* MMU/MPU disabled.  */
1116
        *phys_ptr = address;
1117
        *prot = PAGE_READ | PAGE_WRITE;
1118
        return 0;
1119
    } else if (arm_feature(env, ARM_FEATURE_MPU)) {
1120
        return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
1121
                                 prot);
1122
    } else if (env->cp15.c1_sys & (1 << 23)) {
1123
        return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
1124
                                prot);
1125
    } else {
1126
        return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
1127
                                prot);
1128
    }
1129
}
1130

    
1131
int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
1132
                              int access_type, int mmu_idx, int is_softmmu)
1133
{
1134
    uint32_t phys_addr;
1135
    int prot;
1136
    int ret, is_user;
1137

    
1138
    is_user = mmu_idx == MMU_USER_IDX;
1139
    ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot);
1140
    if (ret == 0) {
1141
        /* Map a single [sub]page.  */
1142
        phys_addr &= ~(uint32_t)0x3ff;
1143
        address &= ~(uint32_t)0x3ff;
1144
        return tlb_set_page (env, address, phys_addr, prot, mmu_idx,
1145
                             is_softmmu);
1146
    }
1147

    
1148
    if (access_type == 2) {
1149
        env->cp15.c5_insn = ret;
1150
        env->cp15.c6_insn = address;
1151
        env->exception_index = EXCP_PREFETCH_ABORT;
1152
    } else {
1153
        env->cp15.c5_data = ret;
1154
        if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6))
1155
            env->cp15.c5_data |= (1 << 11);
1156
        env->cp15.c6_data = address;
1157
        env->exception_index = EXCP_DATA_ABORT;
1158
    }
1159
    return 1;
1160
}
1161

    
1162
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1163
{
1164
    uint32_t phys_addr;
1165
    int prot;
1166
    int ret;
1167

    
1168
    ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot);
1169

    
1170
    if (ret != 0)
1171
        return -1;
1172

    
1173
    return phys_addr;
1174
}
1175

    
1176
/* Not really implemented.  Need to figure out a sane way of doing this.
1177
   Maybe add generic watchpoint support and use that.  */
1178

    
1179
void helper_mark_exclusive(CPUState *env, uint32_t addr)
1180
{
1181
    env->mmon_addr = addr;
1182
}
1183

    
1184
int helper_test_exclusive(CPUState *env, uint32_t addr)
1185
{
1186
    return (env->mmon_addr != addr);
1187
}
1188

    
1189
void helper_clrex(CPUState *env)
1190
{
1191
    env->mmon_addr = -1;
1192
}
1193

    
1194
void helper_set_cp(CPUState *env, uint32_t insn, uint32_t val)
1195
{
1196
    int cp_num = (insn >> 8) & 0xf;
1197
    int cp_info = (insn >> 5) & 7;
1198
    int src = (insn >> 16) & 0xf;
1199
    int operand = insn & 0xf;
1200

    
1201
    if (env->cp[cp_num].cp_write)
1202
        env->cp[cp_num].cp_write(env->cp[cp_num].opaque,
1203
                                 cp_info, src, operand, val);
1204
}
1205

    
1206
uint32_t helper_get_cp(CPUState *env, uint32_t insn)
1207
{
1208
    int cp_num = (insn >> 8) & 0xf;
1209
    int cp_info = (insn >> 5) & 7;
1210
    int dest = (insn >> 16) & 0xf;
1211
    int operand = insn & 0xf;
1212

    
1213
    if (env->cp[cp_num].cp_read)
1214
        return env->cp[cp_num].cp_read(env->cp[cp_num].opaque,
1215
                                       cp_info, dest, operand);
1216
    return 0;
1217
}
1218

    
1219
/* Return basic MPU access permission bits.  */
1220
static uint32_t simple_mpu_ap_bits(uint32_t val)
1221
{
1222
    uint32_t ret;
1223
    uint32_t mask;
1224
    int i;
1225
    ret = 0;
1226
    mask = 3;
1227
    for (i = 0; i < 16; i += 2) {
1228
        ret |= (val >> i) & mask;
1229
        mask <<= 2;
1230
    }
1231
    return ret;
1232
}
1233

    
1234
/* Pad basic MPU access permission bits to extended format.  */
1235
static uint32_t extended_mpu_ap_bits(uint32_t val)
1236
{
1237
    uint32_t ret;
1238
    uint32_t mask;
1239
    int i;
1240
    ret = 0;
1241
    mask = 3;
1242
    for (i = 0; i < 16; i += 2) {
1243
        ret |= (val & mask) << i;
1244
        mask <<= 2;
1245
    }
1246
    return ret;
1247
}
1248

    
1249
void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
1250
{
1251
    int op1;
1252
    int op2;
1253
    int crm;
1254

    
1255
    op1 = (insn >> 21) & 7;
1256
    op2 = (insn >> 5) & 7;
1257
    crm = insn & 0xf;
1258
    switch ((insn >> 16) & 0xf) {
1259
    case 0:
1260
        if (((insn >> 21) & 7) == 2) {
1261
            /* ??? Select cache level.  Ignore.  */
1262
            return;
1263
        }
1264
        /* ID codes.  */
1265
        if (arm_feature(env, ARM_FEATURE_XSCALE))
1266
            break;
1267
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1268
            break;
1269
        goto bad_reg;
1270
    case 1: /* System configuration.  */
1271
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1272
            op2 = 0;
1273
        switch (op2) {
1274
        case 0:
1275
            if (!arm_feature(env, ARM_FEATURE_XSCALE) || crm == 0)
1276
                env->cp15.c1_sys = val;
1277
            /* ??? Lots of these bits are not implemented.  */
1278
            /* This may enable/disable the MMU, so do a TLB flush.  */
1279
            tlb_flush(env, 1);
1280
            break;
1281
        case 1: /* Auxiliary cotrol register.  */
1282
            if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1283
                env->cp15.c1_xscaleauxcr = val;
1284
                break;
1285
            }
1286
            /* Not implemented.  */
1287
            break;
1288
        case 2:
1289
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1290
                goto bad_reg;
1291
            env->cp15.c1_coproc = val;
1292
            /* ??? Is this safe when called from within a TB?  */
1293
            tb_flush(env);
1294
            break;
1295
        default:
1296
            goto bad_reg;
1297
        }
1298
        break;
1299
    case 2: /* MMU Page table control / MPU cache control.  */
1300
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1301
            switch (op2) {
1302
            case 0:
1303
                env->cp15.c2_data = val;
1304
                break;
1305
            case 1:
1306
                env->cp15.c2_insn = val;
1307
                break;
1308
            default:
1309
                goto bad_reg;
1310
            }
1311
        } else {
1312
            switch (op2) {
1313
            case 0:
1314
                env->cp15.c2_base0 = val;
1315
                break;
1316
            case 1:
1317
                env->cp15.c2_base1 = val;
1318
                break;
1319
            case 2:
1320
                env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> val);
1321
                break;
1322
            default:
1323
                goto bad_reg;
1324
            }
1325
        }
1326
        break;
1327
    case 3: /* MMU Domain access control / MPU write buffer control.  */
1328
        env->cp15.c3 = val;
1329
        tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */
1330
        break;
1331
    case 4: /* Reserved.  */
1332
        goto bad_reg;
1333
    case 5: /* MMU Fault status / MPU access permission.  */
1334
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1335
            op2 = 0;
1336
        switch (op2) {
1337
        case 0:
1338
            if (arm_feature(env, ARM_FEATURE_MPU))
1339
                val = extended_mpu_ap_bits(val);
1340
            env->cp15.c5_data = val;
1341
            break;
1342
        case 1:
1343
            if (arm_feature(env, ARM_FEATURE_MPU))
1344
                val = extended_mpu_ap_bits(val);
1345
            env->cp15.c5_insn = val;
1346
            break;
1347
        case 2:
1348
            if (!arm_feature(env, ARM_FEATURE_MPU))
1349
                goto bad_reg;
1350
            env->cp15.c5_data = val;
1351
            break;
1352
        case 3:
1353
            if (!arm_feature(env, ARM_FEATURE_MPU))
1354
                goto bad_reg;
1355
            env->cp15.c5_insn = val;
1356
            break;
1357
        default:
1358
            goto bad_reg;
1359
        }
1360
        break;
1361
    case 6: /* MMU Fault address / MPU base/size.  */
1362
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1363
            if (crm >= 8)
1364
                goto bad_reg;
1365
            env->cp15.c6_region[crm] = val;
1366
        } else {
1367
            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1368
                op2 = 0;
1369
            switch (op2) {
1370
            case 0:
1371
                env->cp15.c6_data = val;
1372
                break;
1373
            case 1: /* ??? This is WFAR on armv6 */
1374
            case 2:
1375
                env->cp15.c6_insn = val;
1376
                break;
1377
            default:
1378
                goto bad_reg;
1379
            }
1380
        }
1381
        break;
1382
    case 7: /* Cache control.  */
1383
        env->cp15.c15_i_max = 0x000;
1384
        env->cp15.c15_i_min = 0xff0;
1385
        /* No cache, so nothing to do.  */
1386
        /* ??? MPCore has VA to PA translation functions.  */
1387
        break;
1388
    case 8: /* MMU TLB control.  */
1389
        switch (op2) {
1390
        case 0: /* Invalidate all.  */
1391
            tlb_flush(env, 0);
1392
            break;
1393
        case 1: /* Invalidate single TLB entry.  */
1394
#if 0
1395
            /* ??? This is wrong for large pages and sections.  */
1396
            /* As an ugly hack to make linux work we always flush a 4K
1397
               pages.  */
1398
            val &= 0xfffff000;
1399
            tlb_flush_page(env, val);
1400
            tlb_flush_page(env, val + 0x400);
1401
            tlb_flush_page(env, val + 0x800);
1402
            tlb_flush_page(env, val + 0xc00);
1403
#else
1404
            tlb_flush(env, 1);
1405
#endif
1406
            break;
1407
        case 2: /* Invalidate on ASID.  */
1408
            tlb_flush(env, val == 0);
1409
            break;
1410
        case 3: /* Invalidate single entry on MVA.  */
1411
            /* ??? This is like case 1, but ignores ASID.  */
1412
            tlb_flush(env, 1);
1413
            break;
1414
        default:
1415
            goto bad_reg;
1416
        }
1417
        break;
1418
    case 9:
1419
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1420
            break;
1421
        switch (crm) {
1422
        case 0: /* Cache lockdown.  */
1423
            switch (op1) {
1424
            case 0: /* L1 cache.  */
1425
                switch (op2) {
1426
                case 0:
1427
                    env->cp15.c9_data = val;
1428
                    break;
1429
                case 1:
1430
                    env->cp15.c9_insn = val;
1431
                    break;
1432
                default:
1433
                    goto bad_reg;
1434
                }
1435
                break;
1436
            case 1: /* L2 cache.  */
1437
                /* Ignore writes to L2 lockdown/auxiliary registers.  */
1438
                break;
1439
            default:
1440
                goto bad_reg;
1441
            }
1442
            break;
1443
        case 1: /* TCM memory region registers.  */
1444
            /* Not implemented.  */
1445
            goto bad_reg;
1446
        default:
1447
            goto bad_reg;
1448
        }
1449
        break;
1450
    case 10: /* MMU TLB lockdown.  */
1451
        /* ??? TLB lockdown not implemented.  */
1452
        break;
1453
    case 12: /* Reserved.  */
1454
        goto bad_reg;
1455
    case 13: /* Process ID.  */
1456
        switch (op2) {
1457
        case 0:
1458
            /* Unlike real hardware the qemu TLB uses virtual addresses,
1459
               not modified virtual addresses, so this causes a TLB flush.
1460
             */
1461
            if (env->cp15.c13_fcse != val)
1462
              tlb_flush(env, 1);
1463
            env->cp15.c13_fcse = val;
1464
            break;
1465
        case 1:
1466
            /* This changes the ASID, so do a TLB flush.  */
1467
            if (env->cp15.c13_context != val
1468
                && !arm_feature(env, ARM_FEATURE_MPU))
1469
              tlb_flush(env, 0);
1470
            env->cp15.c13_context = val;
1471
            break;
1472
        case 2:
1473
            env->cp15.c13_tls1 = val;
1474
            break;
1475
        case 3:
1476
            env->cp15.c13_tls2 = val;
1477
            break;
1478
        case 4:
1479
            env->cp15.c13_tls3 = val;
1480
            break;
1481
        default:
1482
            goto bad_reg;
1483
        }
1484
        break;
1485
    case 14: /* Reserved.  */
1486
        goto bad_reg;
1487
    case 15: /* Implementation specific.  */
1488
        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1489
            if (op2 == 0 && crm == 1) {
1490
                if (env->cp15.c15_cpar != (val & 0x3fff)) {
1491
                    /* Changes cp0 to cp13 behavior, so needs a TB flush.  */
1492
                    tb_flush(env);
1493
                    env->cp15.c15_cpar = val & 0x3fff;
1494
                }
1495
                break;
1496
            }
1497
            goto bad_reg;
1498
        }
1499
        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1500
            switch (crm) {
1501
            case 0:
1502
                break;
1503
            case 1: /* Set TI925T configuration.  */
1504
                env->cp15.c15_ticonfig = val & 0xe7;
1505
                env->cp15.c0_cpuid = (val & (1 << 5)) ? /* OS_TYPE bit */
1506
                        ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1507
                break;
1508
            case 2: /* Set I_max.  */
1509
                env->cp15.c15_i_max = val;
1510
                break;
1511
            case 3: /* Set I_min.  */
1512
                env->cp15.c15_i_min = val;
1513
                break;
1514
            case 4: /* Set thread-ID.  */
1515
                env->cp15.c15_threadid = val & 0xffff;
1516
                break;
1517
            case 8: /* Wait-for-interrupt (deprecated).  */
1518
                cpu_interrupt(env, CPU_INTERRUPT_HALT);
1519
                break;
1520
            default:
1521
                goto bad_reg;
1522
            }
1523
        }
1524
        break;
1525
    }
1526
    return;
1527
bad_reg:
1528
    /* ??? For debugging only.  Should raise illegal instruction exception.  */
1529
    cpu_abort(env, "Unimplemented cp15 register write (c%d, c%d, {%d, %d})\n",
1530
              (insn >> 16) & 0xf, crm, op1, op2);
1531
}
1532

    
1533
uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
1534
{
1535
    int op1;
1536
    int op2;
1537
    int crm;
1538

    
1539
    op1 = (insn >> 21) & 7;
1540
    op2 = (insn >> 5) & 7;
1541
    crm = insn & 0xf;
1542
    switch ((insn >> 16) & 0xf) {
1543
    case 0: /* ID codes.  */
1544
        switch (op1) {
1545
        case 0:
1546
            switch (crm) {
1547
            case 0:
1548
                switch (op2) {
1549
                case 0: /* Device ID.  */
1550
                    return env->cp15.c0_cpuid;
1551
                case 1: /* Cache Type.  */
1552
                    return env->cp15.c0_cachetype;
1553
                case 2: /* TCM status.  */
1554
                    return 0;
1555
                case 3: /* TLB type register.  */
1556
                    return 0; /* No lockable TLB entries.  */
1557
                case 5: /* CPU ID */
1558
                    return env->cpu_index;
1559
                default:
1560
                    goto bad_reg;
1561
                }
1562
            case 1:
1563
                if (!arm_feature(env, ARM_FEATURE_V6))
1564
                    goto bad_reg;
1565
                return env->cp15.c0_c1[op2];
1566
            case 2:
1567
                if (!arm_feature(env, ARM_FEATURE_V6))
1568
                    goto bad_reg;
1569
                return env->cp15.c0_c2[op2];
1570
            case 3: case 4: case 5: case 6: case 7:
1571
                return 0;
1572
            default:
1573
                goto bad_reg;
1574
            }
1575
        case 1:
1576
            /* These registers aren't documented on arm11 cores.  However
1577
               Linux looks at them anyway.  */
1578
            if (!arm_feature(env, ARM_FEATURE_V6))
1579
                goto bad_reg;
1580
            if (crm != 0)
1581
                goto bad_reg;
1582
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1583
                goto bad_reg;
1584
            return 0;
1585
        default:
1586
            goto bad_reg;
1587
        }
1588
    case 1: /* System configuration.  */
1589
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1590
            op2 = 0;
1591
        switch (op2) {
1592
        case 0: /* Control register.  */
1593
            return env->cp15.c1_sys;
1594
        case 1: /* Auxiliary control register.  */
1595
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1596
                return env->cp15.c1_xscaleauxcr;
1597
            if (!arm_feature(env, ARM_FEATURE_AUXCR))
1598
                goto bad_reg;
1599
            switch (ARM_CPUID(env)) {
1600
            case ARM_CPUID_ARM1026:
1601
                return 1;
1602
            case ARM_CPUID_ARM1136:
1603
                return 7;
1604
            case ARM_CPUID_ARM11MPCORE:
1605
                return 1;
1606
            case ARM_CPUID_CORTEXA8:
1607
                return 0;
1608
            default:
1609
                goto bad_reg;
1610
            }
1611
        case 2: /* Coprocessor access register.  */
1612
            if (arm_feature(env, ARM_FEATURE_XSCALE))
1613
                goto bad_reg;
1614
            return env->cp15.c1_coproc;
1615
        default:
1616
            goto bad_reg;
1617
        }
1618
    case 2: /* MMU Page table control / MPU cache control.  */
1619
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1620
            switch (op2) {
1621
            case 0:
1622
                return env->cp15.c2_data;
1623
                break;
1624
            case 1:
1625
                return env->cp15.c2_insn;
1626
                break;
1627
            default:
1628
                goto bad_reg;
1629
            }
1630
        } else {
1631
            switch (op2) {
1632
            case 0:
1633
                return env->cp15.c2_base0;
1634
            case 1:
1635
                return env->cp15.c2_base1;
1636
            case 2:
1637
                {
1638
                    int n;
1639
                    uint32_t mask;
1640
                    n = 0;
1641
                    mask = env->cp15.c2_mask;
1642
                    while (mask) {
1643
                        n++;
1644
                        mask <<= 1;
1645
                    }
1646
                    return n;
1647
                }
1648
            default:
1649
                goto bad_reg;
1650
            }
1651
        }
1652
    case 3: /* MMU Domain access control / MPU write buffer control.  */
1653
        return env->cp15.c3;
1654
    case 4: /* Reserved.  */
1655
        goto bad_reg;
1656
    case 5: /* MMU Fault status / MPU access permission.  */
1657
        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1658
            op2 = 0;
1659
        switch (op2) {
1660
        case 0:
1661
            if (arm_feature(env, ARM_FEATURE_MPU))
1662
                return simple_mpu_ap_bits(env->cp15.c5_data);
1663
            return env->cp15.c5_data;
1664
        case 1:
1665
            if (arm_feature(env, ARM_FEATURE_MPU))
1666
                return simple_mpu_ap_bits(env->cp15.c5_data);
1667
            return env->cp15.c5_insn;
1668
        case 2:
1669
            if (!arm_feature(env, ARM_FEATURE_MPU))
1670
                goto bad_reg;
1671
            return env->cp15.c5_data;
1672
        case 3:
1673
            if (!arm_feature(env, ARM_FEATURE_MPU))
1674
                goto bad_reg;
1675
            return env->cp15.c5_insn;
1676
        default:
1677
            goto bad_reg;
1678
        }
1679
    case 6: /* MMU Fault address.  */
1680
        if (arm_feature(env, ARM_FEATURE_MPU)) {
1681
            if (crm >= 8)
1682
                goto bad_reg;
1683
            return env->cp15.c6_region[crm];
1684
        } else {
1685
            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1686
                op2 = 0;
1687
            switch (op2) {
1688
            case 0:
1689
                return env->cp15.c6_data;
1690
            case 1:
1691
                if (arm_feature(env, ARM_FEATURE_V6)) {
1692
                    /* Watchpoint Fault Adrress.  */
1693
                    return 0; /* Not implemented.  */
1694
                } else {
1695
                    /* Instruction Fault Adrress.  */
1696
                    /* Arm9 doesn't have an IFAR, but implementing it anyway
1697
                       shouldn't do any harm.  */
1698
                    return env->cp15.c6_insn;
1699
                }
1700
            case 2:
1701
                if (arm_feature(env, ARM_FEATURE_V6)) {
1702
                    /* Instruction Fault Adrress.  */
1703
                    return env->cp15.c6_insn;
1704
                } else {
1705
                    goto bad_reg;
1706
                }
1707
            default:
1708
                goto bad_reg;
1709
            }
1710
        }
1711
    case 7: /* Cache control.  */
1712
        /* ??? This is for test, clean and invaidate operations that set the
1713
           Z flag.  We can't represent N = Z = 1, so it also clears
1714
           the N flag.  Oh well.  */
1715
        env->NZF = 0;
1716
        return 0;
1717
    case 8: /* MMU TLB control.  */
1718
        goto bad_reg;
1719
    case 9: /* Cache lockdown.  */
1720
        switch (op1) {
1721
        case 0: /* L1 cache.  */
1722
            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1723
                return 0;
1724
            switch (op2) {
1725
            case 0:
1726
                return env->cp15.c9_data;
1727
            case 1:
1728
                return env->cp15.c9_insn;
1729
            default:
1730
                goto bad_reg;
1731
            }
1732
        case 1: /* L2 cache */
1733
            if (crm != 0)
1734
                goto bad_reg;
1735
            /* L2 Lockdown and Auxiliary control.  */
1736
            return 0;
1737
        default:
1738
            goto bad_reg;
1739
        }
1740
    case 10: /* MMU TLB lockdown.  */
1741
        /* ??? TLB lockdown not implemented.  */
1742
        return 0;
1743
    case 11: /* TCM DMA control.  */
1744
    case 12: /* Reserved.  */
1745
        goto bad_reg;
1746
    case 13: /* Process ID.  */
1747
        switch (op2) {
1748
        case 0:
1749
            return env->cp15.c13_fcse;
1750
        case 1:
1751
            return env->cp15.c13_context;
1752
        case 2:
1753
            return env->cp15.c13_tls1;
1754
        case 3:
1755
            return env->cp15.c13_tls2;
1756
        case 4:
1757
            return env->cp15.c13_tls3;
1758
        default:
1759
            goto bad_reg;
1760
        }
1761
    case 14: /* Reserved.  */
1762
        goto bad_reg;
1763
    case 15: /* Implementation specific.  */
1764
        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1765
            if (op2 == 0 && crm == 1)
1766
                return env->cp15.c15_cpar;
1767

    
1768
            goto bad_reg;
1769
        }
1770
        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1771
            switch (crm) {
1772
            case 0:
1773
                return 0;
1774
            case 1: /* Read TI925T configuration.  */
1775
                return env->cp15.c15_ticonfig;
1776
            case 2: /* Read I_max.  */
1777
                return env->cp15.c15_i_max;
1778
            case 3: /* Read I_min.  */
1779
                return env->cp15.c15_i_min;
1780
            case 4: /* Read thread-ID.  */
1781
                return env->cp15.c15_threadid;
1782
            case 8: /* TI925T_status */
1783
                return 0;
1784
            }
1785
            goto bad_reg;
1786
        }
1787
        return 0;
1788
    }
1789
bad_reg:
1790
    /* ??? For debugging only.  Should raise illegal instruction exception.  */
1791
    cpu_abort(env, "Unimplemented cp15 register read (c%d, c%d, {%d, %d})\n",
1792
              (insn >> 16) & 0xf, crm, op1, op2);
1793
    return 0;
1794
}
1795

    
1796
void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
1797
{
1798
    env->banked_r13[bank_number(mode)] = val;
1799
}
1800

    
1801
uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
1802
{
1803
    return env->banked_r13[bank_number(mode)];
1804
}
1805

    
1806
uint32_t helper_v7m_mrs(CPUState *env, int reg)
1807
{
1808
    switch (reg) {
1809
    case 0: /* APSR */
1810
        return xpsr_read(env) & 0xf8000000;
1811
    case 1: /* IAPSR */
1812
        return xpsr_read(env) & 0xf80001ff;
1813
    case 2: /* EAPSR */
1814
        return xpsr_read(env) & 0xff00fc00;
1815
    case 3: /* xPSR */
1816
        return xpsr_read(env) & 0xff00fdff;
1817
    case 5: /* IPSR */
1818
        return xpsr_read(env) & 0x000001ff;
1819
    case 6: /* EPSR */
1820
        return xpsr_read(env) & 0x0700fc00;
1821
    case 7: /* IEPSR */
1822
        return xpsr_read(env) & 0x0700edff;
1823
    case 8: /* MSP */
1824
        return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
1825
    case 9: /* PSP */
1826
        return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
1827
    case 16: /* PRIMASK */
1828
        return (env->uncached_cpsr & CPSR_I) != 0;
1829
    case 17: /* FAULTMASK */
1830
        return (env->uncached_cpsr & CPSR_F) != 0;
1831
    case 18: /* BASEPRI */
1832
    case 19: /* BASEPRI_MAX */
1833
        return env->v7m.basepri;
1834
    case 20: /* CONTROL */
1835
        return env->v7m.control;
1836
    default:
1837
        /* ??? For debugging only.  */
1838
        cpu_abort(env, "Unimplemented system register read (%d)\n", reg);
1839
        return 0;
1840
    }
1841
}
1842

    
1843
void helper_v7m_msr(CPUState *env, int reg, uint32_t val)
1844
{
1845
    switch (reg) {
1846
    case 0: /* APSR */
1847
        xpsr_write(env, val, 0xf8000000);
1848
        break;
1849
    case 1: /* IAPSR */
1850
        xpsr_write(env, val, 0xf8000000);
1851
        break;
1852
    case 2: /* EAPSR */
1853
        xpsr_write(env, val, 0xfe00fc00);
1854
        break;
1855
    case 3: /* xPSR */
1856
        xpsr_write(env, val, 0xfe00fc00);
1857
        break;
1858
    case 5: /* IPSR */
1859
        /* IPSR bits are readonly.  */
1860
        break;
1861
    case 6: /* EPSR */
1862
        xpsr_write(env, val, 0x0600fc00);
1863
        break;
1864
    case 7: /* IEPSR */
1865
        xpsr_write(env, val, 0x0600fc00);
1866
        break;
1867
    case 8: /* MSP */
1868
        if (env->v7m.current_sp)
1869
            env->v7m.other_sp = val;
1870
        else
1871
            env->regs[13] = val;
1872
        break;
1873
    case 9: /* PSP */
1874
        if (env->v7m.current_sp)
1875
            env->regs[13] = val;
1876
        else
1877
            env->v7m.other_sp = val;
1878
        break;
1879
    case 16: /* PRIMASK */
1880
        if (val & 1)
1881
            env->uncached_cpsr |= CPSR_I;
1882
        else
1883
            env->uncached_cpsr &= ~CPSR_I;
1884
        break;
1885
    case 17: /* FAULTMASK */
1886
        if (val & 1)
1887
            env->uncached_cpsr |= CPSR_F;
1888
        else
1889
            env->uncached_cpsr &= ~CPSR_F;
1890
        break;
1891
    case 18: /* BASEPRI */
1892
        env->v7m.basepri = val & 0xff;
1893
        break;
1894
    case 19: /* BASEPRI_MAX */
1895
        val &= 0xff;
1896
        if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
1897
            env->v7m.basepri = val;
1898
        break;
1899
    case 20: /* CONTROL */
1900
        env->v7m.control = val & 3;
1901
        switch_v7m_sp(env, (val & 2) != 0);
1902
        break;
1903
    default:
1904
        /* ??? For debugging only.  */
1905
        cpu_abort(env, "Unimplemented system register write (%d)\n", reg);
1906
        return;
1907
    }
1908
}
1909

    
1910
void cpu_arm_set_cp_io(CPUARMState *env, int cpnum,
1911
                ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write,
1912
                void *opaque)
1913
{
1914
    if (cpnum < 0 || cpnum > 14) {
1915
        cpu_abort(env, "Bad coprocessor number: %i\n", cpnum);
1916
        return;
1917
    }
1918

    
1919
    env->cp[cpnum].cp_read = cp_read;
1920
    env->cp[cpnum].cp_write = cp_write;
1921
    env->cp[cpnum].opaque = opaque;
1922
}
1923

    
1924
#endif
1925

    
1926
/* Note that signed overflow is undefined in C.  The following routines are
1927
   careful to use unsigned types where modulo arithmetic is required.
1928
   Failure to do so _will_ break on newer gcc.  */
1929

    
1930
/* Signed saturating arithmetic.  */
1931

    
1932
/* Perform 16-bit signed satruating addition.  */
1933
static inline uint16_t add16_sat(uint16_t a, uint16_t b)
1934
{
1935
    uint16_t res;
1936

    
1937
    res = a + b;
1938
    if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
1939
        if (a & 0x8000)
1940
            res = 0x8000;
1941
        else
1942
            res = 0x7fff;
1943
    }
1944
    return res;
1945
}
1946

    
1947
/* Perform 8-bit signed satruating addition.  */
1948
static inline uint8_t add8_sat(uint8_t a, uint8_t b)
1949
{
1950
    uint8_t res;
1951

    
1952
    res = a + b;
1953
    if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
1954
        if (a & 0x80)
1955
            res = 0x80;
1956
        else
1957
            res = 0x7f;
1958
    }
1959
    return res;
1960
}
1961

    
1962
/* Perform 16-bit signed satruating subtraction.  */
1963
static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
1964
{
1965
    uint16_t res;
1966

    
1967
    res = a - b;
1968
    if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
1969
        if (a & 0x8000)
1970
            res = 0x8000;
1971
        else
1972
            res = 0x7fff;
1973
    }
1974
    return res;
1975
}
1976

    
1977
/* Perform 8-bit signed satruating subtraction.  */
1978
static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
1979
{
1980
    uint8_t res;
1981

    
1982
    res = a - b;
1983
    if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
1984
        if (a & 0x80)
1985
            res = 0x80;
1986
        else
1987
            res = 0x7f;
1988
    }
1989
    return res;
1990
}
1991

    
1992
#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
1993
#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
1994
#define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
1995
#define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
1996
#define PFX q
1997

    
1998
#include "op_addsub.h"
1999

    
2000
/* Unsigned saturating arithmetic.  */
2001
static inline uint16_t add16_usat(uint16_t a, uint8_t b)
2002
{
2003
    uint16_t res;
2004
    res = a + b;
2005
    if (res < a)
2006
        res = 0xffff;
2007
    return res;
2008
}
2009

    
2010
static inline uint16_t sub16_usat(uint16_t a, uint8_t b)
2011
{
2012
    if (a < b)
2013
        return a - b;
2014
    else
2015
        return 0;
2016
}
2017

    
2018
static inline uint8_t add8_usat(uint8_t a, uint8_t b)
2019
{
2020
    uint8_t res;
2021
    res = a + b;
2022
    if (res < a)
2023
        res = 0xff;
2024
    return res;
2025
}
2026

    
2027
static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
2028
{
2029
    if (a < b)
2030
        return a - b;
2031
    else
2032
        return 0;
2033
}
2034

    
2035
#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
2036
#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
2037
#define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
2038
#define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
2039
#define PFX uq
2040

    
2041
#include "op_addsub.h"
2042

    
2043
/* Signed modulo arithmetic.  */
2044
#define SARITH16(a, b, n, op) do { \
2045
    int32_t sum; \
2046
    sum = (int16_t)((uint16_t)(a) op (uint16_t)(b)); \
2047
    RESULT(sum, n, 16); \
2048
    if (sum >= 0) \
2049
        ge |= 3 << (n * 2); \
2050
    } while(0)
2051

    
2052
#define SARITH8(a, b, n, op) do { \
2053
    int32_t sum; \
2054
    sum = (int8_t)((uint8_t)(a) op (uint8_t)(b)); \
2055
    RESULT(sum, n, 8); \
2056
    if (sum >= 0) \
2057
        ge |= 1 << n; \
2058
    } while(0)
2059

    
2060

    
2061
#define ADD16(a, b, n) SARITH16(a, b, n, +)
2062
#define SUB16(a, b, n) SARITH16(a, b, n, -)
2063
#define ADD8(a, b, n)  SARITH8(a, b, n, +)
2064
#define SUB8(a, b, n)  SARITH8(a, b, n, -)
2065
#define PFX s
2066
#define ARITH_GE
2067

    
2068
#include "op_addsub.h"
2069

    
2070
/* Unsigned modulo arithmetic.  */
2071
#define ADD16(a, b, n) do { \
2072
    uint32_t sum; \
2073
    sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
2074
    RESULT(sum, n, 16); \
2075
    if ((sum >> 16) == 0) \
2076
        ge |= 3 << (n * 2); \
2077
    } while(0)
2078

    
2079
#define ADD8(a, b, n) do { \
2080
    uint32_t sum; \
2081
    sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
2082
    RESULT(sum, n, 8); \
2083
    if ((sum >> 8) == 0) \
2084
        ge |= 3 << (n * 2); \
2085
    } while(0)
2086

    
2087
#define SUB16(a, b, n) do { \
2088
    uint32_t sum; \
2089
    sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
2090
    RESULT(sum, n, 16); \
2091
    if ((sum >> 16) == 0) \
2092
        ge |= 3 << (n * 2); \
2093
    } while(0)
2094

    
2095
#define SUB8(a, b, n) do { \
2096
    uint32_t sum; \
2097
    sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
2098
    RESULT(sum, n, 8); \
2099
    if ((sum >> 8) == 0) \
2100
        ge |= 3 << (n * 2); \
2101
    } while(0)
2102

    
2103
#define PFX u
2104
#define ARITH_GE
2105

    
2106
#include "op_addsub.h"
2107

    
2108
/* Halved signed arithmetic.  */
2109
#define ADD16(a, b, n) \
2110
  RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
2111
#define SUB16(a, b, n) \
2112
  RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
2113
#define ADD8(a, b, n) \
2114
  RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
2115
#define SUB8(a, b, n) \
2116
  RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
2117
#define PFX sh
2118

    
2119
#include "op_addsub.h"
2120

    
2121
/* Halved unsigned arithmetic.  */
2122
#define ADD16(a, b, n) \
2123
  RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2124
#define SUB16(a, b, n) \
2125
  RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2126
#define ADD8(a, b, n) \
2127
  RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2128
#define SUB8(a, b, n) \
2129
  RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2130
#define PFX uh
2131

    
2132
#include "op_addsub.h"
2133

    
2134
static inline uint8_t do_usad(uint8_t a, uint8_t b)
2135
{
2136
    if (a > b)
2137
        return a - b;
2138
    else
2139
        return b - a;
2140
}
2141

    
2142
/* Unsigned sum of absolute byte differences.  */
2143
uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
2144
{
2145
    uint32_t sum;
2146
    sum = do_usad(a, b);
2147
    sum += do_usad(a >> 8, b >> 8);
2148
    sum += do_usad(a >> 16, b >>16);
2149
    sum += do_usad(a >> 24, b >> 24);
2150
    return sum;
2151
}
2152

    
2153
/* For ARMv6 SEL instruction.  */
2154
uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
2155
{
2156
    uint32_t mask;
2157

    
2158
    mask = 0;
2159
    if (flags & 1)
2160
        mask |= 0xff;
2161
    if (flags & 2)
2162
        mask |= 0xff00;
2163
    if (flags & 4)
2164
        mask |= 0xff0000;
2165
    if (flags & 8)
2166
        mask |= 0xff000000;
2167
    return (a & mask) | (b & ~mask);
2168
}
2169

    
2170

    
2171
/* VFP support.  We follow the convention used for VFP instrunctions:
2172
   Single precition routines have a "s" suffix, double precision a
2173
   "d" suffix.  */
2174

    
2175
/* Convert host exception flags to vfp form.  */
2176
static inline int vfp_exceptbits_from_host(int host_bits)
2177
{
2178
    int target_bits = 0;
2179

    
2180
    if (host_bits & float_flag_invalid)
2181
        target_bits |= 1;
2182
    if (host_bits & float_flag_divbyzero)
2183
        target_bits |= 2;
2184
    if (host_bits & float_flag_overflow)
2185
        target_bits |= 4;
2186
    if (host_bits & float_flag_underflow)
2187
        target_bits |= 8;
2188
    if (host_bits & float_flag_inexact)
2189
        target_bits |= 0x10;
2190
    return target_bits;
2191
}
2192

    
2193
uint32_t HELPER(vfp_get_fpscr)(CPUState *env)
2194
{
2195
    int i;
2196
    uint32_t fpscr;
2197

    
2198
    fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
2199
            | (env->vfp.vec_len << 16)
2200
            | (env->vfp.vec_stride << 20);
2201
    i = get_float_exception_flags(&env->vfp.fp_status);
2202
    fpscr |= vfp_exceptbits_from_host(i);
2203
    return fpscr;
2204
}
2205

    
2206
/* Convert vfp exception flags to target form.  */
2207
static inline int vfp_exceptbits_to_host(int target_bits)
2208
{
2209
    int host_bits = 0;
2210

    
2211
    if (target_bits & 1)
2212
        host_bits |= float_flag_invalid;
2213
    if (target_bits & 2)
2214
        host_bits |= float_flag_divbyzero;
2215
    if (target_bits & 4)
2216
        host_bits |= float_flag_overflow;
2217
    if (target_bits & 8)
2218
        host_bits |= float_flag_underflow;
2219
    if (target_bits & 0x10)
2220
        host_bits |= float_flag_inexact;
2221
    return host_bits;
2222
}
2223

    
2224
void HELPER(vfp_set_fpscr)(CPUState *env, uint32_t val)
2225
{
2226
    int i;
2227
    uint32_t changed;
2228

    
2229
    changed = env->vfp.xregs[ARM_VFP_FPSCR];
2230
    env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
2231
    env->vfp.vec_len = (val >> 16) & 7;
2232
    env->vfp.vec_stride = (val >> 20) & 3;
2233

    
2234
    changed ^= val;
2235
    if (changed & (3 << 22)) {
2236
        i = (val >> 22) & 3;
2237
        switch (i) {
2238
        case 0:
2239
            i = float_round_nearest_even;
2240
            break;
2241
        case 1:
2242
            i = float_round_up;
2243
            break;
2244
        case 2:
2245
            i = float_round_down;
2246
            break;
2247
        case 3:
2248
            i = float_round_to_zero;
2249
            break;
2250
        }
2251
        set_float_rounding_mode(i, &env->vfp.fp_status);
2252
    }
2253

    
2254
    i = vfp_exceptbits_to_host((val >> 8) & 0x1f);
2255
    set_float_exception_flags(i, &env->vfp.fp_status);
2256
    /* XXX: FZ and DN are not implemented.  */
2257
}
2258

    
2259
#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
2260

    
2261
#define VFP_BINOP(name) \
2262
float32 VFP_HELPER(name, s)(float32 a, float32 b, CPUState *env) \
2263
{ \
2264
    return float32_ ## name (a, b, &env->vfp.fp_status); \
2265
} \
2266
float64 VFP_HELPER(name, d)(float64 a, float64 b, CPUState *env) \
2267
{ \
2268
    return float64_ ## name (a, b, &env->vfp.fp_status); \
2269
}
2270
VFP_BINOP(add)
2271
VFP_BINOP(sub)
2272
VFP_BINOP(mul)
2273
VFP_BINOP(div)
2274
#undef VFP_BINOP
2275

    
2276
float32 VFP_HELPER(neg, s)(float32 a)
2277
{
2278
    return float32_chs(a);
2279
}
2280

    
2281
float64 VFP_HELPER(neg, d)(float64 a)
2282
{
2283
    return float32_chs(a);
2284
}
2285

    
2286
float32 VFP_HELPER(abs, s)(float32 a)
2287
{
2288
    return float32_abs(a);
2289
}
2290

    
2291
float64 VFP_HELPER(abs, d)(float64 a)
2292
{
2293
    return float32_abs(a);
2294
}
2295

    
2296
float32 VFP_HELPER(sqrt, s)(float32 a, CPUState *env)
2297
{
2298
    return float32_sqrt(a, &env->vfp.fp_status);
2299
}
2300

    
2301
float64 VFP_HELPER(sqrt, d)(float64 a, CPUState *env)
2302
{
2303
    return float64_sqrt(a, &env->vfp.fp_status);
2304
}
2305

    
2306
/* XXX: check quiet/signaling case */
2307
#define DO_VFP_cmp(p, type) \
2308
void VFP_HELPER(cmp, p)(type a, type b, CPUState *env)  \
2309
{ \
2310
    uint32_t flags; \
2311
    switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
2312
    case 0: flags = 0x6; break; \
2313
    case -1: flags = 0x8; break; \
2314
    case 1: flags = 0x2; break; \
2315
    default: case 2: flags = 0x3; break; \
2316
    } \
2317
    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2318
        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2319
} \
2320
void VFP_HELPER(cmpe, p)(type a, type b, CPUState *env) \
2321
{ \
2322
    uint32_t flags; \
2323
    switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
2324
    case 0: flags = 0x6; break; \
2325
    case -1: flags = 0x8; break; \
2326
    case 1: flags = 0x2; break; \
2327
    default: case 2: flags = 0x3; break; \
2328
    } \
2329
    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2330
        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2331
}
2332
DO_VFP_cmp(s, float32)
2333
DO_VFP_cmp(d, float64)
2334
#undef DO_VFP_cmp
2335

    
2336
/* Helper routines to perform bitwise copies between float and int.  */
2337
static inline float32 vfp_itos(uint32_t i)
2338
{
2339
    union {
2340
        uint32_t i;
2341
        float32 s;
2342
    } v;
2343

    
2344
    v.i = i;
2345
    return v.s;
2346
}
2347

    
2348
static inline uint32_t vfp_stoi(float32 s)
2349
{
2350
    union {
2351
        uint32_t i;
2352
        float32 s;
2353
    } v;
2354

    
2355
    v.s = s;
2356
    return v.i;
2357
}
2358

    
2359
static inline float64 vfp_itod(uint64_t i)
2360
{
2361
    union {
2362
        uint64_t i;
2363
        float64 d;
2364
    } v;
2365

    
2366
    v.i = i;
2367
    return v.d;
2368
}
2369

    
2370
static inline uint64_t vfp_dtoi(float64 d)
2371
{
2372
    union {
2373
        uint64_t i;
2374
        float64 d;
2375
    } v;
2376

    
2377
    v.d = d;
2378
    return v.i;
2379
}
2380

    
2381
/* Integer to float conversion.  */
2382
float32 VFP_HELPER(uito, s)(float32 x, CPUState *env)
2383
{
2384
    return uint32_to_float32(vfp_stoi(x), &env->vfp.fp_status);
2385
}
2386

    
2387
float64 VFP_HELPER(uito, d)(float32 x, CPUState *env)
2388
{
2389
    return uint32_to_float64(vfp_stoi(x), &env->vfp.fp_status);
2390
}
2391

    
2392
float32 VFP_HELPER(sito, s)(float32 x, CPUState *env)
2393
{
2394
    return int32_to_float32(vfp_stoi(x), &env->vfp.fp_status);
2395
}
2396

    
2397
float64 VFP_HELPER(sito, d)(float32 x, CPUState *env)
2398
{
2399
    return int32_to_float64(vfp_stoi(x), &env->vfp.fp_status);
2400
}
2401

    
2402
/* Float to integer conversion.  */
2403
float32 VFP_HELPER(toui, s)(float32 x, CPUState *env)
2404
{
2405
    return vfp_itos(float32_to_uint32(x, &env->vfp.fp_status));
2406
}
2407

    
2408
float32 VFP_HELPER(toui, d)(float64 x, CPUState *env)
2409
{
2410
    return vfp_itos(float64_to_uint32(x, &env->vfp.fp_status));
2411
}
2412

    
2413
float32 VFP_HELPER(tosi, s)(float32 x, CPUState *env)
2414
{
2415
    return vfp_itos(float32_to_int32(x, &env->vfp.fp_status));
2416
}
2417

    
2418
float32 VFP_HELPER(tosi, d)(float64 x, CPUState *env)
2419
{
2420
    return vfp_itos(float64_to_int32(x, &env->vfp.fp_status));
2421
}
2422

    
2423
float32 VFP_HELPER(touiz, s)(float32 x, CPUState *env)
2424
{
2425
    return vfp_itos(float32_to_uint32_round_to_zero(x, &env->vfp.fp_status));
2426
}
2427

    
2428
float32 VFP_HELPER(touiz, d)(float64 x, CPUState *env)
2429
{
2430
    return vfp_itos(float64_to_uint32_round_to_zero(x, &env->vfp.fp_status));
2431
}
2432

    
2433
float32 VFP_HELPER(tosiz, s)(float32 x, CPUState *env)
2434
{
2435
    return vfp_itos(float32_to_int32_round_to_zero(x, &env->vfp.fp_status));
2436
}
2437

    
2438
float32 VFP_HELPER(tosiz, d)(float64 x, CPUState *env)
2439
{
2440
    return vfp_itos(float64_to_int32_round_to_zero(x, &env->vfp.fp_status));
2441
}
2442

    
2443
/* floating point conversion */
2444
float64 VFP_HELPER(fcvtd, s)(float32 x, CPUState *env)
2445
{
2446
    return float32_to_float64(x, &env->vfp.fp_status);
2447
}
2448

    
2449
float32 VFP_HELPER(fcvts, d)(float64 x, CPUState *env)
2450
{
2451
    return float64_to_float32(x, &env->vfp.fp_status);
2452
}
2453

    
2454
/* VFP3 fixed point conversion.  */
2455
#define VFP_CONV_FIX(name, p, ftype, itype, sign) \
2456
ftype VFP_HELPER(name##to, p)(ftype x, uint32_t shift, CPUState *env) \
2457
{ \
2458
    ftype tmp; \
2459
    tmp = sign##int32_to_##ftype ((itype)vfp_##p##toi(x), \
2460
                                  &env->vfp.fp_status); \
2461
    return ftype##_scalbn(tmp, shift, &env->vfp.fp_status); \
2462
} \
2463
ftype VFP_HELPER(to##name, p)(ftype x, uint32_t shift, CPUState *env) \
2464
{ \
2465
    ftype tmp; \
2466
    tmp = ftype##_scalbn(x, shift, &env->vfp.fp_status); \
2467
    return vfp_ito##p((itype)ftype##_to_##sign##int32_round_to_zero(tmp, \
2468
        &env->vfp.fp_status)); \
2469
}
2470

    
2471
VFP_CONV_FIX(sh, d, float64, int16, )
2472
VFP_CONV_FIX(sl, d, float64, int32, )
2473
VFP_CONV_FIX(uh, d, float64, uint16, u)
2474
VFP_CONV_FIX(ul, d, float64, uint32, u)
2475
VFP_CONV_FIX(sh, s, float32, int16, )
2476
VFP_CONV_FIX(sl, s, float32, int32, )
2477
VFP_CONV_FIX(uh, s, float32, uint16, u)
2478
VFP_CONV_FIX(ul, s, float32, uint32, u)
2479
#undef VFP_CONV_FIX
2480

    
2481
float32 HELPER(recps_f32)(float32 a, float32 b, CPUState *env)
2482
{
2483
    float_status *s = &env->vfp.fp_status;
2484
    float32 two = int32_to_float32(2, s);
2485
    return float32_sub(two, float32_mul(a, b, s), s);
2486
}
2487

    
2488
float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUState *env)
2489
{
2490
    float_status *s = &env->vfp.fp_status;
2491
    float32 three = int32_to_float32(3, s);
2492
    return float32_sub(three, float32_mul(a, b, s), s);
2493
}
2494

    
2495
/* TODO: The architecture specifies the value that the estimate functions
2496
   should return.  We return the exact reciprocal/root instead.  */
2497
float32 HELPER(recpe_f32)(float32 a, CPUState *env)
2498
{
2499
    float_status *s = &env->vfp.fp_status;
2500
    float32 one = int32_to_float32(1, s);
2501
    return float32_div(one, a, s);
2502
}
2503

    
2504
float32 HELPER(rsqrte_f32)(float32 a, CPUState *env)
2505
{
2506
    float_status *s = &env->vfp.fp_status;
2507
    float32 one = int32_to_float32(1, s);
2508
    return float32_div(one, float32_sqrt(a, s), s);
2509
}
2510

    
2511
uint32_t HELPER(recpe_u32)(uint32_t a, CPUState *env)
2512
{
2513
    float_status *s = &env->vfp.fp_status;
2514
    float32 tmp;
2515
    tmp = int32_to_float32(a, s);
2516
    tmp = float32_scalbn(tmp, -32, s);
2517
    tmp = helper_recpe_f32(tmp, env);
2518
    tmp = float32_scalbn(tmp, 31, s);
2519
    return float32_to_int32(tmp, s);
2520
}
2521

    
2522
uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUState *env)
2523
{
2524
    float_status *s = &env->vfp.fp_status;
2525
    float32 tmp;
2526
    tmp = int32_to_float32(a, s);
2527
    tmp = float32_scalbn(tmp, -32, s);
2528
    tmp = helper_rsqrte_f32(tmp, env);
2529
    tmp = float32_scalbn(tmp, 31, s);
2530
    return float32_to_int32(tmp, s);
2531
}
2532