Statistics
| Branch: | Revision:

root / target-unicore32 / translate.c @ 8cfd0495

History | View | Annotate | Download (58.8 kB)

1
/*
2
 *  UniCore32 translation
3
 *
4
 * Copyright (C) 2010-2012 Guan Xuetao
5
 *
6
 * This program is free software; you can redistribute it and/or modify
7
 * it under the terms of the GNU General Public License version 2 as
8
 * published by the Free Software Foundation, or (at your option) any
9
 * later version. See the COPYING file in the top-level directory.
10
 */
11
#include <stdarg.h>
12
#include <stdlib.h>
13
#include <stdio.h>
14
#include <string.h>
15
#include <inttypes.h>
16

    
17
#include "cpu.h"
18
#include "disas/disas.h"
19
#include "tcg-op.h"
20
#include "qemu/log.h"
21

    
22
#include "helper.h"
23
#define GEN_HELPER 1
24
#include "helper.h"
25

    
26
/* internal defines */
27
typedef struct DisasContext {
28
    target_ulong pc;
29
    int is_jmp;
30
    /* Nonzero if this instruction has been conditionally skipped.  */
31
    int condjmp;
32
    /* The label that will be jumped to when the instruction is skipped.  */
33
    int condlabel;
34
    struct TranslationBlock *tb;
35
    int singlestep_enabled;
36
#ifndef CONFIG_USER_ONLY
37
    int user;
38
#endif
39
} DisasContext;
40

    
41
#ifndef CONFIG_USER_ONLY
42
#define IS_USER(s)      (s->user)
43
#else
44
#define IS_USER(s)      1
45
#endif
46

    
47
/* These instructions trap after executing, so defer them until after the
48
   conditional executions state has been updated.  */
49
#define DISAS_SYSCALL 5
50

    
51
static TCGv_ptr cpu_env;
52
static TCGv_i32 cpu_R[32];
53

    
54
/* FIXME:  These should be removed.  */
55
static TCGv cpu_F0s, cpu_F1s;
56
static TCGv_i64 cpu_F0d, cpu_F1d;
57

    
58
#include "exec/gen-icount.h"
59

    
60
static const char *regnames[] = {
61
      "r00", "r01", "r02", "r03", "r04", "r05", "r06", "r07",
62
      "r08", "r09", "r10", "r11", "r12", "r13", "r14", "r15",
63
      "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
64
      "r24", "r25", "r26", "r27", "r28", "r29", "r30", "pc" };
65

    
66
/* initialize TCG globals.  */
67
void uc32_translate_init(void)
68
{
69
    int i;
70

    
71
    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
72

    
73
    for (i = 0; i < 32; i++) {
74
        cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
75
                                offsetof(CPUUniCore32State, regs[i]), regnames[i]);
76
    }
77

    
78
#define GEN_HELPER 2
79
#include "helper.h"
80
}
81

    
82
static int num_temps;
83

    
84
/* Allocate a temporary variable.  */
85
static TCGv_i32 new_tmp(void)
86
{
87
    num_temps++;
88
    return tcg_temp_new_i32();
89
}
90

    
91
/* Release a temporary variable.  */
92
static void dead_tmp(TCGv tmp)
93
{
94
    tcg_temp_free(tmp);
95
    num_temps--;
96
}
97

    
98
static inline TCGv load_cpu_offset(int offset)
99
{
100
    TCGv tmp = new_tmp();
101
    tcg_gen_ld_i32(tmp, cpu_env, offset);
102
    return tmp;
103
}
104

    
105
#define load_cpu_field(name) load_cpu_offset(offsetof(CPUUniCore32State, name))
106

    
107
static inline void store_cpu_offset(TCGv var, int offset)
108
{
109
    tcg_gen_st_i32(var, cpu_env, offset);
110
    dead_tmp(var);
111
}
112

    
113
#define store_cpu_field(var, name) \
114
    store_cpu_offset(var, offsetof(CPUUniCore32State, name))
115

    
116
/* Set a variable to the value of a CPU register.  */
117
static void load_reg_var(DisasContext *s, TCGv var, int reg)
118
{
119
    if (reg == 31) {
120
        uint32_t addr;
121
        /* normaly, since we updated PC */
122
        addr = (long)s->pc;
123
        tcg_gen_movi_i32(var, addr);
124
    } else {
125
        tcg_gen_mov_i32(var, cpu_R[reg]);
126
    }
127
}
128

    
129
/* Create a new temporary and set it to the value of a CPU register.  */
130
static inline TCGv load_reg(DisasContext *s, int reg)
131
{
132
    TCGv tmp = new_tmp();
133
    load_reg_var(s, tmp, reg);
134
    return tmp;
135
}
136

    
137
/* Set a CPU register.  The source must be a temporary and will be
138
   marked as dead.  */
139
static void store_reg(DisasContext *s, int reg, TCGv var)
140
{
141
    if (reg == 31) {
142
        tcg_gen_andi_i32(var, var, ~3);
143
        s->is_jmp = DISAS_JUMP;
144
    }
145
    tcg_gen_mov_i32(cpu_R[reg], var);
146
    dead_tmp(var);
147
}
148

    
149
/* Value extensions.  */
150
#define gen_uxtb(var)           tcg_gen_ext8u_i32(var, var)
151
#define gen_uxth(var)           tcg_gen_ext16u_i32(var, var)
152
#define gen_sxtb(var)           tcg_gen_ext8s_i32(var, var)
153
#define gen_sxth(var)           tcg_gen_ext16s_i32(var, var)
154

    
155
#define UCOP_REG_M              (((insn) >>  0) & 0x1f)
156
#define UCOP_REG_N              (((insn) >> 19) & 0x1f)
157
#define UCOP_REG_D              (((insn) >> 14) & 0x1f)
158
#define UCOP_REG_S              (((insn) >>  9) & 0x1f)
159
#define UCOP_REG_LO             (((insn) >> 14) & 0x1f)
160
#define UCOP_REG_HI             (((insn) >>  9) & 0x1f)
161
#define UCOP_SH_OP              (((insn) >>  6) & 0x03)
162
#define UCOP_SH_IM              (((insn) >>  9) & 0x1f)
163
#define UCOP_OPCODES            (((insn) >> 25) & 0x0f)
164
#define UCOP_IMM_9              (((insn) >>  0) & 0x1ff)
165
#define UCOP_IMM10              (((insn) >>  0) & 0x3ff)
166
#define UCOP_IMM14              (((insn) >>  0) & 0x3fff)
167
#define UCOP_COND               (((insn) >> 25) & 0x0f)
168
#define UCOP_CMOV_COND          (((insn) >> 19) & 0x0f)
169
#define UCOP_CPNUM              (((insn) >> 10) & 0x0f)
170
#define UCOP_UCF64_FMT          (((insn) >> 24) & 0x03)
171
#define UCOP_UCF64_FUNC         (((insn) >>  6) & 0x0f)
172
#define UCOP_UCF64_COND         (((insn) >>  6) & 0x0f)
173

    
174
#define UCOP_SET(i)             ((insn) & (1 << (i)))
175
#define UCOP_SET_P              UCOP_SET(28)
176
#define UCOP_SET_U              UCOP_SET(27)
177
#define UCOP_SET_B              UCOP_SET(26)
178
#define UCOP_SET_W              UCOP_SET(25)
179
#define UCOP_SET_L              UCOP_SET(24)
180
#define UCOP_SET_S              UCOP_SET(24)
181

    
182
#define ILLEGAL         cpu_abort(env,                                  \
183
                        "Illegal UniCore32 instruction %x at line %d!", \
184
                        insn, __LINE__)
185

    
186
#ifndef CONFIG_USER_ONLY
187
static void disas_cp0_insn(CPUUniCore32State *env, DisasContext *s,
188
        uint32_t insn)
189
{
190
    TCGv tmp, tmp2, tmp3;
191
    if ((insn & 0xfe000000) == 0xe0000000) {
192
        tmp2 = new_tmp();
193
        tmp3 = new_tmp();
194
        tcg_gen_movi_i32(tmp2, UCOP_REG_N);
195
        tcg_gen_movi_i32(tmp3, UCOP_IMM10);
196
        if (UCOP_SET_L) {
197
            tmp = new_tmp();
198
            gen_helper_cp0_get(tmp, cpu_env, tmp2, tmp3);
199
            store_reg(s, UCOP_REG_D, tmp);
200
        } else {
201
            tmp = load_reg(s, UCOP_REG_D);
202
            gen_helper_cp0_set(cpu_env, tmp, tmp2, tmp3);
203
            dead_tmp(tmp);
204
        }
205
        dead_tmp(tmp2);
206
        dead_tmp(tmp3);
207
        return;
208
    }
209
    ILLEGAL;
210
}
211

    
212
static void disas_ocd_insn(CPUUniCore32State *env, DisasContext *s,
213
        uint32_t insn)
214
{
215
    TCGv tmp;
216

    
217
    if ((insn & 0xff003fff) == 0xe1000400) {
218
        /*
219
         * movc rd, pp.nn, #imm9
220
         *      rd: UCOP_REG_D
221
         *      nn: UCOP_REG_N (must be 0)
222
         *      imm9: 0
223
         */
224
        if (UCOP_REG_N == 0) {
225
            tmp = new_tmp();
226
            tcg_gen_movi_i32(tmp, 0);
227
            store_reg(s, UCOP_REG_D, tmp);
228
            return;
229
        } else {
230
            ILLEGAL;
231
        }
232
    }
233
    if ((insn & 0xff003fff) == 0xe0000401) {
234
        /*
235
         * movc pp.nn, rn, #imm9
236
         *      rn: UCOP_REG_D
237
         *      nn: UCOP_REG_N (must be 1)
238
         *      imm9: 1
239
         */
240
        if (UCOP_REG_N == 1) {
241
            tmp = load_reg(s, UCOP_REG_D);
242
            gen_helper_cp1_putc(tmp);
243
            dead_tmp(tmp);
244
            return;
245
        } else {
246
            ILLEGAL;
247
        }
248
    }
249
    ILLEGAL;
250
}
251
#endif
252

    
253
static inline void gen_set_asr(TCGv var, uint32_t mask)
254
{
255
    TCGv tmp_mask = tcg_const_i32(mask);
256
    gen_helper_asr_write(cpu_env, var, tmp_mask);
257
    tcg_temp_free_i32(tmp_mask);
258
}
259
/* Set NZCV flags from the high 4 bits of var.  */
260
#define gen_set_nzcv(var) gen_set_asr(var, ASR_NZCV)
261

    
262
static void gen_exception(int excp)
263
{
264
    TCGv tmp = new_tmp();
265
    tcg_gen_movi_i32(tmp, excp);
266
    gen_helper_exception(cpu_env, tmp);
267
    dead_tmp(tmp);
268
}
269

    
270
#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, CF))
271

    
272
/* Set CF to the top bit of var.  */
273
static void gen_set_CF_bit31(TCGv var)
274
{
275
    TCGv tmp = new_tmp();
276
    tcg_gen_shri_i32(tmp, var, 31);
277
    gen_set_CF(tmp);
278
    dead_tmp(tmp);
279
}
280

    
281
/* Set N and Z flags from var.  */
282
static inline void gen_logic_CC(TCGv var)
283
{
284
    tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, NF));
285
    tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, ZF));
286
}
287

    
288
/* dest = T0 + T1 + CF. */
289
static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
290
{
291
    TCGv tmp;
292
    tcg_gen_add_i32(dest, t0, t1);
293
    tmp = load_cpu_field(CF);
294
    tcg_gen_add_i32(dest, dest, tmp);
295
    dead_tmp(tmp);
296
}
297

    
298
/* dest = T0 - T1 + CF - 1.  */
299
static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
300
{
301
    TCGv tmp;
302
    tcg_gen_sub_i32(dest, t0, t1);
303
    tmp = load_cpu_field(CF);
304
    tcg_gen_add_i32(dest, dest, tmp);
305
    tcg_gen_subi_i32(dest, dest, 1);
306
    dead_tmp(tmp);
307
}
308

    
309
static void shifter_out_im(TCGv var, int shift)
310
{
311
    TCGv tmp = new_tmp();
312
    if (shift == 0) {
313
        tcg_gen_andi_i32(tmp, var, 1);
314
    } else {
315
        tcg_gen_shri_i32(tmp, var, shift);
316
        if (shift != 31) {
317
            tcg_gen_andi_i32(tmp, tmp, 1);
318
        }
319
    }
320
    gen_set_CF(tmp);
321
    dead_tmp(tmp);
322
}
323

    
324
/* Shift by immediate.  Includes special handling for shift == 0.  */
325
static inline void gen_uc32_shift_im(TCGv var, int shiftop, int shift,
326
        int flags)
327
{
328
    switch (shiftop) {
329
    case 0: /* LSL */
330
        if (shift != 0) {
331
            if (flags) {
332
                shifter_out_im(var, 32 - shift);
333
            }
334
            tcg_gen_shli_i32(var, var, shift);
335
        }
336
        break;
337
    case 1: /* LSR */
338
        if (shift == 0) {
339
            if (flags) {
340
                tcg_gen_shri_i32(var, var, 31);
341
                gen_set_CF(var);
342
            }
343
            tcg_gen_movi_i32(var, 0);
344
        } else {
345
            if (flags) {
346
                shifter_out_im(var, shift - 1);
347
            }
348
            tcg_gen_shri_i32(var, var, shift);
349
        }
350
        break;
351
    case 2: /* ASR */
352
        if (shift == 0) {
353
            shift = 32;
354
        }
355
        if (flags) {
356
            shifter_out_im(var, shift - 1);
357
        }
358
        if (shift == 32) {
359
            shift = 31;
360
        }
361
        tcg_gen_sari_i32(var, var, shift);
362
        break;
363
    case 3: /* ROR/RRX */
364
        if (shift != 0) {
365
            if (flags) {
366
                shifter_out_im(var, shift - 1);
367
            }
368
            tcg_gen_rotri_i32(var, var, shift); break;
369
        } else {
370
            TCGv tmp = load_cpu_field(CF);
371
            if (flags) {
372
                shifter_out_im(var, 0);
373
            }
374
            tcg_gen_shri_i32(var, var, 1);
375
            tcg_gen_shli_i32(tmp, tmp, 31);
376
            tcg_gen_or_i32(var, var, tmp);
377
            dead_tmp(tmp);
378
        }
379
    }
380
};
381

    
382
static inline void gen_uc32_shift_reg(TCGv var, int shiftop,
383
                                     TCGv shift, int flags)
384
{
385
    if (flags) {
386
        switch (shiftop) {
387
        case 0:
388
            gen_helper_shl_cc(var, cpu_env, var, shift);
389
            break;
390
        case 1:
391
            gen_helper_shr_cc(var, cpu_env, var, shift);
392
            break;
393
        case 2:
394
            gen_helper_sar_cc(var, cpu_env, var, shift);
395
            break;
396
        case 3:
397
            gen_helper_ror_cc(var, cpu_env, var, shift);
398
            break;
399
        }
400
    } else {
401
        switch (shiftop) {
402
        case 0:
403
            gen_helper_shl(var, var, shift);
404
            break;
405
        case 1:
406
            gen_helper_shr(var, var, shift);
407
            break;
408
        case 2:
409
            gen_helper_sar(var, var, shift);
410
            break;
411
        case 3:
412
            tcg_gen_andi_i32(shift, shift, 0x1f);
413
            tcg_gen_rotr_i32(var, var, shift);
414
            break;
415
        }
416
    }
417
    dead_tmp(shift);
418
}
419

    
420
static void gen_test_cc(int cc, int label)
421
{
422
    TCGv tmp;
423
    TCGv tmp2;
424
    int inv;
425

    
426
    switch (cc) {
427
    case 0: /* eq: Z */
428
        tmp = load_cpu_field(ZF);
429
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
430
        break;
431
    case 1: /* ne: !Z */
432
        tmp = load_cpu_field(ZF);
433
        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
434
        break;
435
    case 2: /* cs: C */
436
        tmp = load_cpu_field(CF);
437
        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
438
        break;
439
    case 3: /* cc: !C */
440
        tmp = load_cpu_field(CF);
441
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
442
        break;
443
    case 4: /* mi: N */
444
        tmp = load_cpu_field(NF);
445
        tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
446
        break;
447
    case 5: /* pl: !N */
448
        tmp = load_cpu_field(NF);
449
        tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
450
        break;
451
    case 6: /* vs: V */
452
        tmp = load_cpu_field(VF);
453
        tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
454
        break;
455
    case 7: /* vc: !V */
456
        tmp = load_cpu_field(VF);
457
        tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
458
        break;
459
    case 8: /* hi: C && !Z */
460
        inv = gen_new_label();
461
        tmp = load_cpu_field(CF);
462
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
463
        dead_tmp(tmp);
464
        tmp = load_cpu_field(ZF);
465
        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
466
        gen_set_label(inv);
467
        break;
468
    case 9: /* ls: !C || Z */
469
        tmp = load_cpu_field(CF);
470
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
471
        dead_tmp(tmp);
472
        tmp = load_cpu_field(ZF);
473
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
474
        break;
475
    case 10: /* ge: N == V -> N ^ V == 0 */
476
        tmp = load_cpu_field(VF);
477
        tmp2 = load_cpu_field(NF);
478
        tcg_gen_xor_i32(tmp, tmp, tmp2);
479
        dead_tmp(tmp2);
480
        tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
481
        break;
482
    case 11: /* lt: N != V -> N ^ V != 0 */
483
        tmp = load_cpu_field(VF);
484
        tmp2 = load_cpu_field(NF);
485
        tcg_gen_xor_i32(tmp, tmp, tmp2);
486
        dead_tmp(tmp2);
487
        tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
488
        break;
489
    case 12: /* gt: !Z && N == V */
490
        inv = gen_new_label();
491
        tmp = load_cpu_field(ZF);
492
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
493
        dead_tmp(tmp);
494
        tmp = load_cpu_field(VF);
495
        tmp2 = load_cpu_field(NF);
496
        tcg_gen_xor_i32(tmp, tmp, tmp2);
497
        dead_tmp(tmp2);
498
        tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
499
        gen_set_label(inv);
500
        break;
501
    case 13: /* le: Z || N != V */
502
        tmp = load_cpu_field(ZF);
503
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
504
        dead_tmp(tmp);
505
        tmp = load_cpu_field(VF);
506
        tmp2 = load_cpu_field(NF);
507
        tcg_gen_xor_i32(tmp, tmp, tmp2);
508
        dead_tmp(tmp2);
509
        tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
510
        break;
511
    default:
512
        fprintf(stderr, "Bad condition code 0x%x\n", cc);
513
        abort();
514
    }
515
    dead_tmp(tmp);
516
}
517

    
518
static const uint8_t table_logic_cc[16] = {
519
    1, /* and */    1, /* xor */    0, /* sub */    0, /* rsb */
520
    0, /* add */    0, /* adc */    0, /* sbc */    0, /* rsc */
521
    1, /* andl */   1, /* xorl */   0, /* cmp */    0, /* cmn */
522
    1, /* orr */    1, /* mov */    1, /* bic */    1, /* mvn */
523
};
524

    
525
/* Set PC state from an immediate address.  */
526
static inline void gen_bx_im(DisasContext *s, uint32_t addr)
527
{
528
    s->is_jmp = DISAS_UPDATE;
529
    tcg_gen_movi_i32(cpu_R[31], addr & ~3);
530
}
531

    
532
/* Set PC state from var.  var is marked as dead.  */
533
static inline void gen_bx(DisasContext *s, TCGv var)
534
{
535
    s->is_jmp = DISAS_UPDATE;
536
    tcg_gen_andi_i32(cpu_R[31], var, ~3);
537
    dead_tmp(var);
538
}
539

    
540
static inline void store_reg_bx(DisasContext *s, int reg, TCGv var)
541
{
542
    store_reg(s, reg, var);
543
}
544

    
545
static inline TCGv gen_ld8s(TCGv addr, int index)
546
{
547
    TCGv tmp = new_tmp();
548
    tcg_gen_qemu_ld8s(tmp, addr, index);
549
    return tmp;
550
}
551

    
552
static inline TCGv gen_ld8u(TCGv addr, int index)
553
{
554
    TCGv tmp = new_tmp();
555
    tcg_gen_qemu_ld8u(tmp, addr, index);
556
    return tmp;
557
}
558

    
559
static inline TCGv gen_ld16s(TCGv addr, int index)
560
{
561
    TCGv tmp = new_tmp();
562
    tcg_gen_qemu_ld16s(tmp, addr, index);
563
    return tmp;
564
}
565

    
566
static inline TCGv gen_ld16u(TCGv addr, int index)
567
{
568
    TCGv tmp = new_tmp();
569
    tcg_gen_qemu_ld16u(tmp, addr, index);
570
    return tmp;
571
}
572

    
573
static inline TCGv gen_ld32(TCGv addr, int index)
574
{
575
    TCGv tmp = new_tmp();
576
    tcg_gen_qemu_ld32u(tmp, addr, index);
577
    return tmp;
578
}
579

    
580
static inline TCGv_i64 gen_ld64(TCGv addr, int index)
581
{
582
    TCGv_i64 tmp = tcg_temp_new_i64();
583
    tcg_gen_qemu_ld64(tmp, addr, index);
584
    return tmp;
585
}
586

    
587
static inline void gen_st8(TCGv val, TCGv addr, int index)
588
{
589
    tcg_gen_qemu_st8(val, addr, index);
590
    dead_tmp(val);
591
}
592

    
593
static inline void gen_st16(TCGv val, TCGv addr, int index)
594
{
595
    tcg_gen_qemu_st16(val, addr, index);
596
    dead_tmp(val);
597
}
598

    
599
static inline void gen_st32(TCGv val, TCGv addr, int index)
600
{
601
    tcg_gen_qemu_st32(val, addr, index);
602
    dead_tmp(val);
603
}
604

    
605
static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
606
{
607
    tcg_gen_qemu_st64(val, addr, index);
608
    tcg_temp_free_i64(val);
609
}
610

    
611
static inline void gen_set_pc_im(uint32_t val)
612
{
613
    tcg_gen_movi_i32(cpu_R[31], val);
614
}
615

    
616
/* Force a TB lookup after an instruction that changes the CPU state.  */
617
static inline void gen_lookup_tb(DisasContext *s)
618
{
619
    tcg_gen_movi_i32(cpu_R[31], s->pc & ~1);
620
    s->is_jmp = DISAS_UPDATE;
621
}
622

    
623
static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
624
        TCGv var)
625
{
626
    int val;
627
    TCGv offset;
628

    
629
    if (UCOP_SET(29)) {
630
        /* immediate */
631
        val = UCOP_IMM14;
632
        if (!UCOP_SET_U) {
633
            val = -val;
634
        }
635
        if (val != 0) {
636
            tcg_gen_addi_i32(var, var, val);
637
        }
638
    } else {
639
        /* shift/register */
640
        offset = load_reg(s, UCOP_REG_M);
641
        gen_uc32_shift_im(offset, UCOP_SH_OP, UCOP_SH_IM, 0);
642
        if (!UCOP_SET_U) {
643
            tcg_gen_sub_i32(var, var, offset);
644
        } else {
645
            tcg_gen_add_i32(var, var, offset);
646
        }
647
        dead_tmp(offset);
648
    }
649
}
650

    
651
static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
652
        TCGv var)
653
{
654
    int val;
655
    TCGv offset;
656

    
657
    if (UCOP_SET(26)) {
658
        /* immediate */
659
        val = (insn & 0x1f) | ((insn >> 4) & 0x3e0);
660
        if (!UCOP_SET_U) {
661
            val = -val;
662
        }
663
        if (val != 0) {
664
            tcg_gen_addi_i32(var, var, val);
665
        }
666
    } else {
667
        /* register */
668
        offset = load_reg(s, UCOP_REG_M);
669
        if (!UCOP_SET_U) {
670
            tcg_gen_sub_i32(var, var, offset);
671
        } else {
672
            tcg_gen_add_i32(var, var, offset);
673
        }
674
        dead_tmp(offset);
675
    }
676
}
677

    
678
static inline long ucf64_reg_offset(int reg)
679
{
680
    if (reg & 1) {
681
        return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1])
682
          + offsetof(CPU_DoubleU, l.upper);
683
    } else {
684
        return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1])
685
          + offsetof(CPU_DoubleU, l.lower);
686
    }
687
}
688

    
689
#define ucf64_gen_ld32(reg)      load_cpu_offset(ucf64_reg_offset(reg))
690
#define ucf64_gen_st32(var, reg) store_cpu_offset(var, ucf64_reg_offset(reg))
691

    
692
/* UniCore-F64 single load/store I_offset */
693
static void do_ucf64_ldst_i(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
694
{
695
    int offset;
696
    TCGv tmp;
697
    TCGv addr;
698

    
699
    addr = load_reg(s, UCOP_REG_N);
700
    if (!UCOP_SET_P && !UCOP_SET_W) {
701
        ILLEGAL;
702
    }
703

    
704
    if (UCOP_SET_P) {
705
        offset = UCOP_IMM10 << 2;
706
        if (!UCOP_SET_U) {
707
            offset = -offset;
708
        }
709
        if (offset != 0) {
710
            tcg_gen_addi_i32(addr, addr, offset);
711
        }
712
    }
713

    
714
    if (UCOP_SET_L) { /* load */
715
        tmp = gen_ld32(addr, IS_USER(s));
716
        ucf64_gen_st32(tmp, UCOP_REG_D);
717
    } else { /* store */
718
        tmp = ucf64_gen_ld32(UCOP_REG_D);
719
        gen_st32(tmp, addr, IS_USER(s));
720
    }
721

    
722
    if (!UCOP_SET_P) {
723
        offset = UCOP_IMM10 << 2;
724
        if (!UCOP_SET_U) {
725
            offset = -offset;
726
        }
727
        if (offset != 0) {
728
            tcg_gen_addi_i32(addr, addr, offset);
729
        }
730
    }
731
    if (UCOP_SET_W) {
732
        store_reg(s, UCOP_REG_N, addr);
733
    } else {
734
        dead_tmp(addr);
735
    }
736
}
737

    
738
/* UniCore-F64 load/store multiple words */
739
static void do_ucf64_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
740
{
741
    unsigned int i;
742
    int j, n, freg;
743
    TCGv tmp;
744
    TCGv addr;
745

    
746
    if (UCOP_REG_D != 0) {
747
        ILLEGAL;
748
    }
749
    if (UCOP_REG_N == 31) {
750
        ILLEGAL;
751
    }
752
    if ((insn << 24) == 0) {
753
        ILLEGAL;
754
    }
755

    
756
    addr = load_reg(s, UCOP_REG_N);
757

    
758
    n = 0;
759
    for (i = 0; i < 8; i++) {
760
        if (UCOP_SET(i)) {
761
            n++;
762
        }
763
    }
764

    
765
    if (UCOP_SET_U) {
766
        if (UCOP_SET_P) { /* pre increment */
767
            tcg_gen_addi_i32(addr, addr, 4);
768
        } /* unnecessary to do anything when post increment */
769
    } else {
770
        if (UCOP_SET_P) { /* pre decrement */
771
            tcg_gen_addi_i32(addr, addr, -(n * 4));
772
        } else { /* post decrement */
773
            if (n != 1) {
774
                tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
775
            }
776
        }
777
    }
778

    
779
    freg = ((insn >> 8) & 3) << 3; /* freg should be 0, 8, 16, 24 */
780

    
781
    for (i = 0, j = 0; i < 8; i++, freg++) {
782
        if (!UCOP_SET(i)) {
783
            continue;
784
        }
785

    
786
        if (UCOP_SET_L) { /* load */
787
            tmp = gen_ld32(addr, IS_USER(s));
788
            ucf64_gen_st32(tmp, freg);
789
        } else { /* store */
790
            tmp = ucf64_gen_ld32(freg);
791
            gen_st32(tmp, addr, IS_USER(s));
792
        }
793

    
794
        j++;
795
        /* unnecessary to add after the last transfer */
796
        if (j != n) {
797
            tcg_gen_addi_i32(addr, addr, 4);
798
        }
799
    }
800

    
801
    if (UCOP_SET_W) { /* write back */
802
        if (UCOP_SET_U) {
803
            if (!UCOP_SET_P) { /* post increment */
804
                tcg_gen_addi_i32(addr, addr, 4);
805
            } /* unnecessary to do anything when pre increment */
806
        } else {
807
            if (UCOP_SET_P) {
808
                /* pre decrement */
809
                if (n != 1) {
810
                    tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
811
                }
812
            } else {
813
                /* post decrement */
814
                tcg_gen_addi_i32(addr, addr, -(n * 4));
815
            }
816
        }
817
        store_reg(s, UCOP_REG_N, addr);
818
    } else {
819
        dead_tmp(addr);
820
    }
821
}
822

    
823
/* UniCore-F64 mrc/mcr */
824
static void do_ucf64_trans(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
825
{
826
    TCGv tmp;
827

    
828
    if ((insn & 0xfe0003ff) == 0xe2000000) {
829
        /* control register */
830
        if ((UCOP_REG_N != UC32_UCF64_FPSCR) || (UCOP_REG_D == 31)) {
831
            ILLEGAL;
832
        }
833
        if (UCOP_SET(24)) {
834
            /* CFF */
835
            tmp = new_tmp();
836
            gen_helper_ucf64_get_fpscr(tmp, cpu_env);
837
            store_reg(s, UCOP_REG_D, tmp);
838
        } else {
839
            /* CTF */
840
            tmp = load_reg(s, UCOP_REG_D);
841
            gen_helper_ucf64_set_fpscr(cpu_env, tmp);
842
            dead_tmp(tmp);
843
            gen_lookup_tb(s);
844
        }
845
        return;
846
    }
847
    if ((insn & 0xfe0003ff) == 0xe0000000) {
848
        /* general register */
849
        if (UCOP_REG_D == 31) {
850
            ILLEGAL;
851
        }
852
        if (UCOP_SET(24)) { /* MFF */
853
            tmp = ucf64_gen_ld32(UCOP_REG_N);
854
            store_reg(s, UCOP_REG_D, tmp);
855
        } else { /* MTF */
856
            tmp = load_reg(s, UCOP_REG_D);
857
            ucf64_gen_st32(tmp, UCOP_REG_N);
858
        }
859
        return;
860
    }
861
    if ((insn & 0xfb000000) == 0xe9000000) {
862
        /* MFFC */
863
        if (UCOP_REG_D != 31) {
864
            ILLEGAL;
865
        }
866
        if (UCOP_UCF64_COND & 0x8) {
867
            ILLEGAL;
868
        }
869

    
870
        tmp = new_tmp();
871
        tcg_gen_movi_i32(tmp, UCOP_UCF64_COND);
872
        if (UCOP_SET(26)) {
873
            tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
874
            tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
875
            gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, tmp, cpu_env);
876
        } else {
877
            tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
878
            tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
879
            gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, tmp, cpu_env);
880
        }
881
        dead_tmp(tmp);
882
        return;
883
    }
884
    ILLEGAL;
885
}
886

    
887
/* UniCore-F64 convert instructions */
888
static void do_ucf64_fcvt(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
889
{
890
    if (UCOP_UCF64_FMT == 3) {
891
        ILLEGAL;
892
    }
893
    if (UCOP_REG_N != 0) {
894
        ILLEGAL;
895
    }
896
    switch (UCOP_UCF64_FUNC) {
897
    case 0: /* cvt.s */
898
        switch (UCOP_UCF64_FMT) {
899
        case 1 /* d */:
900
            tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
901
            gen_helper_ucf64_df2sf(cpu_F0s, cpu_F0d, cpu_env);
902
            tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
903
            break;
904
        case 2 /* w */:
905
            tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
906
            gen_helper_ucf64_si2sf(cpu_F0s, cpu_F0s, cpu_env);
907
            tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
908
            break;
909
        default /* s */:
910
            ILLEGAL;
911
            break;
912
        }
913
        break;
914
    case 1: /* cvt.d */
915
        switch (UCOP_UCF64_FMT) {
916
        case 0 /* s */:
917
            tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
918
            gen_helper_ucf64_sf2df(cpu_F0d, cpu_F0s, cpu_env);
919
            tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
920
            break;
921
        case 2 /* w */:
922
            tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
923
            gen_helper_ucf64_si2df(cpu_F0d, cpu_F0s, cpu_env);
924
            tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
925
            break;
926
        default /* d */:
927
            ILLEGAL;
928
            break;
929
        }
930
        break;
931
    case 4: /* cvt.w */
932
        switch (UCOP_UCF64_FMT) {
933
        case 0 /* s */:
934
            tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
935
            gen_helper_ucf64_sf2si(cpu_F0s, cpu_F0s, cpu_env);
936
            tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
937
            break;
938
        case 1 /* d */:
939
            tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
940
            gen_helper_ucf64_df2si(cpu_F0s, cpu_F0d, cpu_env);
941
            tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
942
            break;
943
    default /* w */:
944
            ILLEGAL;
945
            break;
946
        }
947
        break;
948
    default:
949
        ILLEGAL;
950
    }
951
}
952

    
953
/* UniCore-F64 compare instructions */
954
static void do_ucf64_fcmp(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
955
{
956
    if (UCOP_SET(25)) {
957
        ILLEGAL;
958
    }
959
    if (UCOP_REG_D != 0) {
960
        ILLEGAL;
961
    }
962

    
963
    ILLEGAL; /* TODO */
964
    if (UCOP_SET(24)) {
965
        tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
966
        tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
967
        /* gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, cpu_env); */
968
    } else {
969
        tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
970
        tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
971
        /* gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, cpu_env); */
972
    }
973
}
974

    
975
#define gen_helper_ucf64_movs(x, y)      do { } while (0)
976
#define gen_helper_ucf64_movd(x, y)      do { } while (0)
977

    
978
#define UCF64_OP1(name)    do {                           \
979
        if (UCOP_REG_N != 0) {                            \
980
            ILLEGAL;                                      \
981
        }                                                 \
982
        switch (UCOP_UCF64_FMT) {                         \
983
        case 0 /* s */:                                   \
984
            tcg_gen_ld_i32(cpu_F0s, cpu_env,              \
985
                           ucf64_reg_offset(UCOP_REG_M)); \
986
            gen_helper_ucf64_##name##s(cpu_F0s, cpu_F0s); \
987
            tcg_gen_st_i32(cpu_F0s, cpu_env,              \
988
                           ucf64_reg_offset(UCOP_REG_D)); \
989
            break;                                        \
990
        case 1 /* d */:                                   \
991
            tcg_gen_ld_i64(cpu_F0d, cpu_env,              \
992
                           ucf64_reg_offset(UCOP_REG_M)); \
993
            gen_helper_ucf64_##name##d(cpu_F0d, cpu_F0d); \
994
            tcg_gen_st_i64(cpu_F0d, cpu_env,              \
995
                           ucf64_reg_offset(UCOP_REG_D)); \
996
            break;                                        \
997
        case 2 /* w */:                                   \
998
            ILLEGAL;                                      \
999
            break;                                        \
1000
        }                                                 \
1001
    } while (0)
1002

    
1003
#define UCF64_OP2(name)    do {                           \
1004
        switch (UCOP_UCF64_FMT) {                         \
1005
        case 0 /* s */:                                   \
1006
            tcg_gen_ld_i32(cpu_F0s, cpu_env,              \
1007
                           ucf64_reg_offset(UCOP_REG_N)); \
1008
            tcg_gen_ld_i32(cpu_F1s, cpu_env,              \
1009
                           ucf64_reg_offset(UCOP_REG_M)); \
1010
            gen_helper_ucf64_##name##s(cpu_F0s,           \
1011
                           cpu_F0s, cpu_F1s, cpu_env);    \
1012
            tcg_gen_st_i32(cpu_F0s, cpu_env,              \
1013
                           ucf64_reg_offset(UCOP_REG_D)); \
1014
            break;                                        \
1015
        case 1 /* d */:                                   \
1016
            tcg_gen_ld_i64(cpu_F0d, cpu_env,              \
1017
                           ucf64_reg_offset(UCOP_REG_N)); \
1018
            tcg_gen_ld_i64(cpu_F1d, cpu_env,              \
1019
                           ucf64_reg_offset(UCOP_REG_M)); \
1020
            gen_helper_ucf64_##name##d(cpu_F0d,           \
1021
                           cpu_F0d, cpu_F1d, cpu_env);    \
1022
            tcg_gen_st_i64(cpu_F0d, cpu_env,              \
1023
                           ucf64_reg_offset(UCOP_REG_D)); \
1024
            break;                                        \
1025
        case 2 /* w */:                                   \
1026
            ILLEGAL;                                      \
1027
            break;                                        \
1028
        }                                                 \
1029
    } while (0)
1030

    
1031
/* UniCore-F64 data processing */
1032
static void do_ucf64_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1033
{
1034
    if (UCOP_UCF64_FMT == 3) {
1035
        ILLEGAL;
1036
    }
1037
    switch (UCOP_UCF64_FUNC) {
1038
    case 0: /* add */
1039
        UCF64_OP2(add);
1040
        break;
1041
    case 1: /* sub */
1042
        UCF64_OP2(sub);
1043
        break;
1044
    case 2: /* mul */
1045
        UCF64_OP2(mul);
1046
        break;
1047
    case 4: /* div */
1048
        UCF64_OP2(div);
1049
        break;
1050
    case 5: /* abs */
1051
        UCF64_OP1(abs);
1052
        break;
1053
    case 6: /* mov */
1054
        UCF64_OP1(mov);
1055
        break;
1056
    case 7: /* neg */
1057
        UCF64_OP1(neg);
1058
        break;
1059
    default:
1060
        ILLEGAL;
1061
    }
1062
}
1063

    
1064
/* Disassemble an F64 instruction */
1065
static void disas_ucf64_insn(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1066
{
1067
    if (!UCOP_SET(29)) {
1068
        if (UCOP_SET(26)) {
1069
            do_ucf64_ldst_m(env, s, insn);
1070
        } else {
1071
            do_ucf64_ldst_i(env, s, insn);
1072
        }
1073
    } else {
1074
        if (UCOP_SET(5)) {
1075
            switch ((insn >> 26) & 0x3) {
1076
            case 0:
1077
                do_ucf64_datap(env, s, insn);
1078
                break;
1079
            case 1:
1080
                ILLEGAL;
1081
                break;
1082
            case 2:
1083
                do_ucf64_fcvt(env, s, insn);
1084
                break;
1085
            case 3:
1086
                do_ucf64_fcmp(env, s, insn);
1087
                break;
1088
            }
1089
        } else {
1090
            do_ucf64_trans(env, s, insn);
1091
        }
1092
    }
1093
}
1094

    
1095
static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
1096
{
1097
    TranslationBlock *tb;
1098

    
1099
    tb = s->tb;
1100
    if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
1101
        tcg_gen_goto_tb(n);
1102
        gen_set_pc_im(dest);
1103
        tcg_gen_exit_tb((uintptr_t)tb + n);
1104
    } else {
1105
        gen_set_pc_im(dest);
1106
        tcg_gen_exit_tb(0);
1107
    }
1108
}
1109

    
1110
static inline void gen_jmp(DisasContext *s, uint32_t dest)
1111
{
1112
    if (unlikely(s->singlestep_enabled)) {
1113
        /* An indirect jump so that we still trigger the debug exception.  */
1114
        gen_bx_im(s, dest);
1115
    } else {
1116
        gen_goto_tb(s, 0, dest);
1117
        s->is_jmp = DISAS_TB_JUMP;
1118
    }
1119
}
1120

    
1121
static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
1122
{
1123
    if (x) {
1124
        tcg_gen_sari_i32(t0, t0, 16);
1125
    } else {
1126
        gen_sxth(t0);
1127
    }
1128
    if (y) {
1129
        tcg_gen_sari_i32(t1, t1, 16);
1130
    } else {
1131
        gen_sxth(t1);
1132
    }
1133
    tcg_gen_mul_i32(t0, t0, t1);
1134
}
1135

    
1136
/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
1137
static int gen_set_psr(DisasContext *s, uint32_t mask, int bsr, TCGv t0)
1138
{
1139
    TCGv tmp;
1140
    if (bsr) {
1141
        /* ??? This is also undefined in system mode.  */
1142
        if (IS_USER(s)) {
1143
            return 1;
1144
        }
1145

    
1146
        tmp = load_cpu_field(bsr);
1147
        tcg_gen_andi_i32(tmp, tmp, ~mask);
1148
        tcg_gen_andi_i32(t0, t0, mask);
1149
        tcg_gen_or_i32(tmp, tmp, t0);
1150
        store_cpu_field(tmp, bsr);
1151
    } else {
1152
        gen_set_asr(t0, mask);
1153
    }
1154
    dead_tmp(t0);
1155
    gen_lookup_tb(s);
1156
    return 0;
1157
}
1158

    
1159
/* Generate an old-style exception return. Marks pc as dead. */
1160
static void gen_exception_return(DisasContext *s, TCGv pc)
1161
{
1162
    TCGv tmp;
1163
    store_reg(s, 31, pc);
1164
    tmp = load_cpu_field(bsr);
1165
    gen_set_asr(tmp, 0xffffffff);
1166
    dead_tmp(tmp);
1167
    s->is_jmp = DISAS_UPDATE;
1168
}
1169

    
1170
static void disas_coproc_insn(CPUUniCore32State *env, DisasContext *s,
1171
        uint32_t insn)
1172
{
1173
    switch (UCOP_CPNUM) {
1174
#ifndef CONFIG_USER_ONLY
1175
    case 0:
1176
        disas_cp0_insn(env, s, insn);
1177
        break;
1178
    case 1:
1179
        disas_ocd_insn(env, s, insn);
1180
        break;
1181
#endif
1182
    case 2:
1183
        disas_ucf64_insn(env, s, insn);
1184
        break;
1185
    default:
1186
        /* Unknown coprocessor. */
1187
        cpu_abort(env, "Unknown coprocessor!");
1188
    }
1189
}
1190

    
1191
/* data processing instructions */
1192
static void do_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1193
{
1194
    TCGv tmp;
1195
    TCGv tmp2;
1196
    int logic_cc;
1197

    
1198
    if (UCOP_OPCODES == 0x0f || UCOP_OPCODES == 0x0d) {
1199
        if (UCOP_SET(23)) { /* CMOV instructions */
1200
            if ((UCOP_CMOV_COND == 0xe) || (UCOP_CMOV_COND == 0xf)) {
1201
                ILLEGAL;
1202
            }
1203
            /* if not always execute, we generate a conditional jump to
1204
               next instruction */
1205
            s->condlabel = gen_new_label();
1206
            gen_test_cc(UCOP_CMOV_COND ^ 1, s->condlabel);
1207
            s->condjmp = 1;
1208
        }
1209
    }
1210

    
1211
    logic_cc = table_logic_cc[UCOP_OPCODES] & (UCOP_SET_S >> 24);
1212

    
1213
    if (UCOP_SET(29)) {
1214
        unsigned int val;
1215
        /* immediate operand */
1216
        val = UCOP_IMM_9;
1217
        if (UCOP_SH_IM) {
1218
            val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
1219
        }
1220
        tmp2 = new_tmp();
1221
        tcg_gen_movi_i32(tmp2, val);
1222
        if (logic_cc && UCOP_SH_IM) {
1223
            gen_set_CF_bit31(tmp2);
1224
        }
1225
   } else {
1226
        /* register */
1227
        tmp2 = load_reg(s, UCOP_REG_M);
1228
        if (UCOP_SET(5)) {
1229
            tmp = load_reg(s, UCOP_REG_S);
1230
            gen_uc32_shift_reg(tmp2, UCOP_SH_OP, tmp, logic_cc);
1231
        } else {
1232
            gen_uc32_shift_im(tmp2, UCOP_SH_OP, UCOP_SH_IM, logic_cc);
1233
        }
1234
    }
1235

    
1236
    if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
1237
        tmp = load_reg(s, UCOP_REG_N);
1238
    } else {
1239
        TCGV_UNUSED(tmp);
1240
    }
1241

    
1242
    switch (UCOP_OPCODES) {
1243
    case 0x00:
1244
        tcg_gen_and_i32(tmp, tmp, tmp2);
1245
        if (logic_cc) {
1246
            gen_logic_CC(tmp);
1247
        }
1248
        store_reg_bx(s, UCOP_REG_D, tmp);
1249
        break;
1250
    case 0x01:
1251
        tcg_gen_xor_i32(tmp, tmp, tmp2);
1252
        if (logic_cc) {
1253
            gen_logic_CC(tmp);
1254
        }
1255
        store_reg_bx(s, UCOP_REG_D, tmp);
1256
        break;
1257
    case 0x02:
1258
        if (UCOP_SET_S && UCOP_REG_D == 31) {
1259
            /* SUBS r31, ... is used for exception return.  */
1260
            if (IS_USER(s)) {
1261
                ILLEGAL;
1262
            }
1263
            gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
1264
            gen_exception_return(s, tmp);
1265
        } else {
1266
            if (UCOP_SET_S) {
1267
                gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
1268
            } else {
1269
                tcg_gen_sub_i32(tmp, tmp, tmp2);
1270
            }
1271
            store_reg_bx(s, UCOP_REG_D, tmp);
1272
        }
1273
        break;
1274
    case 0x03:
1275
        if (UCOP_SET_S) {
1276
            gen_helper_sub_cc(tmp, cpu_env, tmp2, tmp);
1277
        } else {
1278
            tcg_gen_sub_i32(tmp, tmp2, tmp);
1279
        }
1280
        store_reg_bx(s, UCOP_REG_D, tmp);
1281
        break;
1282
    case 0x04:
1283
        if (UCOP_SET_S) {
1284
            gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
1285
        } else {
1286
            tcg_gen_add_i32(tmp, tmp, tmp2);
1287
        }
1288
        store_reg_bx(s, UCOP_REG_D, tmp);
1289
        break;
1290
    case 0x05:
1291
        if (UCOP_SET_S) {
1292
            gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
1293
        } else {
1294
            gen_add_carry(tmp, tmp, tmp2);
1295
        }
1296
        store_reg_bx(s, UCOP_REG_D, tmp);
1297
        break;
1298
    case 0x06:
1299
        if (UCOP_SET_S) {
1300
            gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
1301
        } else {
1302
            gen_sub_carry(tmp, tmp, tmp2);
1303
        }
1304
        store_reg_bx(s, UCOP_REG_D, tmp);
1305
        break;
1306
    case 0x07:
1307
        if (UCOP_SET_S) {
1308
            gen_helper_sbc_cc(tmp, cpu_env, tmp2, tmp);
1309
        } else {
1310
            gen_sub_carry(tmp, tmp2, tmp);
1311
        }
1312
        store_reg_bx(s, UCOP_REG_D, tmp);
1313
        break;
1314
    case 0x08:
1315
        if (UCOP_SET_S) {
1316
            tcg_gen_and_i32(tmp, tmp, tmp2);
1317
            gen_logic_CC(tmp);
1318
        }
1319
        dead_tmp(tmp);
1320
        break;
1321
    case 0x09:
1322
        if (UCOP_SET_S) {
1323
            tcg_gen_xor_i32(tmp, tmp, tmp2);
1324
            gen_logic_CC(tmp);
1325
        }
1326
        dead_tmp(tmp);
1327
        break;
1328
    case 0x0a:
1329
        if (UCOP_SET_S) {
1330
            gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
1331
        }
1332
        dead_tmp(tmp);
1333
        break;
1334
    case 0x0b:
1335
        if (UCOP_SET_S) {
1336
            gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
1337
        }
1338
        dead_tmp(tmp);
1339
        break;
1340
    case 0x0c:
1341
        tcg_gen_or_i32(tmp, tmp, tmp2);
1342
        if (logic_cc) {
1343
            gen_logic_CC(tmp);
1344
        }
1345
        store_reg_bx(s, UCOP_REG_D, tmp);
1346
        break;
1347
    case 0x0d:
1348
        if (logic_cc && UCOP_REG_D == 31) {
1349
            /* MOVS r31, ... is used for exception return.  */
1350
            if (IS_USER(s)) {
1351
                ILLEGAL;
1352
            }
1353
            gen_exception_return(s, tmp2);
1354
        } else {
1355
            if (logic_cc) {
1356
                gen_logic_CC(tmp2);
1357
            }
1358
            store_reg_bx(s, UCOP_REG_D, tmp2);
1359
        }
1360
        break;
1361
    case 0x0e:
1362
        tcg_gen_andc_i32(tmp, tmp, tmp2);
1363
        if (logic_cc) {
1364
            gen_logic_CC(tmp);
1365
        }
1366
        store_reg_bx(s, UCOP_REG_D, tmp);
1367
        break;
1368
    default:
1369
    case 0x0f:
1370
        tcg_gen_not_i32(tmp2, tmp2);
1371
        if (logic_cc) {
1372
            gen_logic_CC(tmp2);
1373
        }
1374
        store_reg_bx(s, UCOP_REG_D, tmp2);
1375
        break;
1376
    }
1377
    if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
1378
        dead_tmp(tmp2);
1379
    }
1380
}
1381

    
1382
/* multiply */
1383
static void do_mult(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1384
{
1385
    TCGv tmp, tmp2, tmp3, tmp4;
1386

    
1387
    if (UCOP_SET(27)) {
1388
        /* 64 bit mul */
1389
        tmp = load_reg(s, UCOP_REG_M);
1390
        tmp2 = load_reg(s, UCOP_REG_N);
1391
        if (UCOP_SET(26)) {
1392
            tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
1393
        } else {
1394
            tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
1395
        }
1396
        if (UCOP_SET(25)) { /* mult accumulate */
1397
            tmp3 = load_reg(s, UCOP_REG_LO);
1398
            tmp4 = load_reg(s, UCOP_REG_HI);
1399
            tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, tmp3, tmp4);
1400
            dead_tmp(tmp3);
1401
            dead_tmp(tmp4);
1402
        }
1403
        store_reg(s, UCOP_REG_LO, tmp);
1404
        store_reg(s, UCOP_REG_HI, tmp2);
1405
    } else {
1406
        /* 32 bit mul */
1407
        tmp = load_reg(s, UCOP_REG_M);
1408
        tmp2 = load_reg(s, UCOP_REG_N);
1409
        tcg_gen_mul_i32(tmp, tmp, tmp2);
1410
        dead_tmp(tmp2);
1411
        if (UCOP_SET(25)) {
1412
            /* Add */
1413
            tmp2 = load_reg(s, UCOP_REG_S);
1414
            tcg_gen_add_i32(tmp, tmp, tmp2);
1415
            dead_tmp(tmp2);
1416
        }
1417
        if (UCOP_SET_S) {
1418
            gen_logic_CC(tmp);
1419
        }
1420
        store_reg(s, UCOP_REG_D, tmp);
1421
    }
1422
}
1423

    
1424
/* miscellaneous instructions */
1425
static void do_misc(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1426
{
1427
    unsigned int val;
1428
    TCGv tmp;
1429

    
1430
    if ((insn & 0xffffffe0) == 0x10ffc120) {
1431
        /* Trivial implementation equivalent to bx.  */
1432
        tmp = load_reg(s, UCOP_REG_M);
1433
        gen_bx(s, tmp);
1434
        return;
1435
    }
1436

    
1437
    if ((insn & 0xfbffc000) == 0x30ffc000) {
1438
        /* PSR = immediate */
1439
        val = UCOP_IMM_9;
1440
        if (UCOP_SH_IM) {
1441
            val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
1442
        }
1443
        tmp = new_tmp();
1444
        tcg_gen_movi_i32(tmp, val);
1445
        if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
1446
            ILLEGAL;
1447
        }
1448
        return;
1449
    }
1450

    
1451
    if ((insn & 0xfbffffe0) == 0x12ffc020) {
1452
        /* PSR.flag = reg */
1453
        tmp = load_reg(s, UCOP_REG_M);
1454
        if (gen_set_psr(s, ASR_NZCV, UCOP_SET_B, tmp)) {
1455
            ILLEGAL;
1456
        }
1457
        return;
1458
    }
1459

    
1460
    if ((insn & 0xfbffffe0) == 0x10ffc020) {
1461
        /* PSR = reg */
1462
        tmp = load_reg(s, UCOP_REG_M);
1463
        if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
1464
            ILLEGAL;
1465
        }
1466
        return;
1467
    }
1468

    
1469
    if ((insn & 0xfbf83fff) == 0x10f80000) {
1470
        /* reg = PSR */
1471
        if (UCOP_SET_B) {
1472
            if (IS_USER(s)) {
1473
                ILLEGAL;
1474
            }
1475
            tmp = load_cpu_field(bsr);
1476
        } else {
1477
            tmp = new_tmp();
1478
            gen_helper_asr_read(tmp, cpu_env);
1479
        }
1480
        store_reg(s, UCOP_REG_D, tmp);
1481
        return;
1482
    }
1483

    
1484
    if ((insn & 0xfbf83fe0) == 0x12f80120) {
1485
        /* clz */
1486
        tmp = load_reg(s, UCOP_REG_M);
1487
        if (UCOP_SET(26)) {
1488
            gen_helper_clo(tmp, tmp);
1489
        } else {
1490
            gen_helper_clz(tmp, tmp);
1491
        }
1492
        store_reg(s, UCOP_REG_D, tmp);
1493
        return;
1494
    }
1495

    
1496
    /* otherwise */
1497
    ILLEGAL;
1498
}
1499

    
1500
/* load/store I_offset and R_offset */
1501
static void do_ldst_ir(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1502
{
1503
    unsigned int mmu_idx;
1504
    TCGv tmp;
1505
    TCGv tmp2;
1506

    
1507
    tmp2 = load_reg(s, UCOP_REG_N);
1508
    mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W));
1509

    
1510
    /* immediate */
1511
    if (UCOP_SET_P) {
1512
        gen_add_data_offset(s, insn, tmp2);
1513
    }
1514

    
1515
    if (UCOP_SET_L) {
1516
        /* load */
1517
        if (UCOP_SET_B) {
1518
            tmp = gen_ld8u(tmp2, mmu_idx);
1519
        } else {
1520
            tmp = gen_ld32(tmp2, mmu_idx);
1521
        }
1522
    } else {
1523
        /* store */
1524
        tmp = load_reg(s, UCOP_REG_D);
1525
        if (UCOP_SET_B) {
1526
            gen_st8(tmp, tmp2, mmu_idx);
1527
        } else {
1528
            gen_st32(tmp, tmp2, mmu_idx);
1529
        }
1530
    }
1531
    if (!UCOP_SET_P) {
1532
        gen_add_data_offset(s, insn, tmp2);
1533
        store_reg(s, UCOP_REG_N, tmp2);
1534
    } else if (UCOP_SET_W) {
1535
        store_reg(s, UCOP_REG_N, tmp2);
1536
    } else {
1537
        dead_tmp(tmp2);
1538
    }
1539
    if (UCOP_SET_L) {
1540
        /* Complete the load.  */
1541
        if (UCOP_REG_D == 31) {
1542
            gen_bx(s, tmp);
1543
        } else {
1544
            store_reg(s, UCOP_REG_D, tmp);
1545
        }
1546
    }
1547
}
1548

    
1549
/* SWP instruction */
1550
static void do_swap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1551
{
1552
    TCGv addr;
1553
    TCGv tmp;
1554
    TCGv tmp2;
1555

    
1556
    if ((insn & 0xff003fe0) != 0x40000120) {
1557
        ILLEGAL;
1558
    }
1559

    
1560
    /* ??? This is not really atomic.  However we know
1561
       we never have multiple CPUs running in parallel,
1562
       so it is good enough.  */
1563
    addr = load_reg(s, UCOP_REG_N);
1564
    tmp = load_reg(s, UCOP_REG_M);
1565
    if (UCOP_SET_B) {
1566
        tmp2 = gen_ld8u(addr, IS_USER(s));
1567
        gen_st8(tmp, addr, IS_USER(s));
1568
    } else {
1569
        tmp2 = gen_ld32(addr, IS_USER(s));
1570
        gen_st32(tmp, addr, IS_USER(s));
1571
    }
1572
    dead_tmp(addr);
1573
    store_reg(s, UCOP_REG_D, tmp2);
1574
}
1575

    
1576
/* load/store hw/sb */
1577
static void do_ldst_hwsb(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1578
{
1579
    TCGv addr;
1580
    TCGv tmp;
1581

    
1582
    if (UCOP_SH_OP == 0) {
1583
        do_swap(env, s, insn);
1584
        return;
1585
    }
1586

    
1587
    addr = load_reg(s, UCOP_REG_N);
1588
    if (UCOP_SET_P) {
1589
        gen_add_datah_offset(s, insn, addr);
1590
    }
1591

    
1592
    if (UCOP_SET_L) { /* load */
1593
        switch (UCOP_SH_OP) {
1594
        case 1:
1595
            tmp = gen_ld16u(addr, IS_USER(s));
1596
            break;
1597
        case 2:
1598
            tmp = gen_ld8s(addr, IS_USER(s));
1599
            break;
1600
        default: /* see do_swap */
1601
        case 3:
1602
            tmp = gen_ld16s(addr, IS_USER(s));
1603
            break;
1604
        }
1605
    } else { /* store */
1606
        if (UCOP_SH_OP != 1) {
1607
            ILLEGAL;
1608
        }
1609
        tmp = load_reg(s, UCOP_REG_D);
1610
        gen_st16(tmp, addr, IS_USER(s));
1611
    }
1612
    /* Perform base writeback before the loaded value to
1613
       ensure correct behavior with overlapping index registers. */
1614
    if (!UCOP_SET_P) {
1615
        gen_add_datah_offset(s, insn, addr);
1616
        store_reg(s, UCOP_REG_N, addr);
1617
    } else if (UCOP_SET_W) {
1618
        store_reg(s, UCOP_REG_N, addr);
1619
    } else {
1620
        dead_tmp(addr);
1621
    }
1622
    if (UCOP_SET_L) {
1623
        /* Complete the load.  */
1624
        store_reg(s, UCOP_REG_D, tmp);
1625
    }
1626
}
1627

    
1628
/* load/store multiple words */
1629
static void do_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1630
{
1631
    unsigned int val, i, mmu_idx;
1632
    int j, n, reg, user, loaded_base;
1633
    TCGv tmp;
1634
    TCGv tmp2;
1635
    TCGv addr;
1636
    TCGv loaded_var;
1637

    
1638
    if (UCOP_SET(7)) {
1639
        ILLEGAL;
1640
    }
1641
    /* XXX: store correct base if write back */
1642
    user = 0;
1643
    if (UCOP_SET_B) { /* S bit in instruction table */
1644
        if (IS_USER(s)) {
1645
            ILLEGAL; /* only usable in supervisor mode */
1646
        }
1647
        if (UCOP_SET(18) == 0) { /* pc reg */
1648
            user = 1;
1649
        }
1650
    }
1651

    
1652
    mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W));
1653
    addr = load_reg(s, UCOP_REG_N);
1654

    
1655
    /* compute total size */
1656
    loaded_base = 0;
1657
    TCGV_UNUSED(loaded_var);
1658
    n = 0;
1659
    for (i = 0; i < 6; i++) {
1660
        if (UCOP_SET(i)) {
1661
            n++;
1662
        }
1663
    }
1664
    for (i = 9; i < 19; i++) {
1665
        if (UCOP_SET(i)) {
1666
            n++;
1667
        }
1668
    }
1669
    /* XXX: test invalid n == 0 case ? */
1670
    if (UCOP_SET_U) {
1671
        if (UCOP_SET_P) {
1672
            /* pre increment */
1673
            tcg_gen_addi_i32(addr, addr, 4);
1674
        } else {
1675
            /* post increment */
1676
        }
1677
    } else {
1678
        if (UCOP_SET_P) {
1679
            /* pre decrement */
1680
            tcg_gen_addi_i32(addr, addr, -(n * 4));
1681
        } else {
1682
            /* post decrement */
1683
            if (n != 1) {
1684
                tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
1685
            }
1686
        }
1687
    }
1688

    
1689
    j = 0;
1690
    reg = UCOP_SET(6) ? 16 : 0;
1691
    for (i = 0; i < 19; i++, reg++) {
1692
        if (i == 6) {
1693
            i = i + 3;
1694
        }
1695
        if (UCOP_SET(i)) {
1696
            if (UCOP_SET_L) { /* load */
1697
                tmp = gen_ld32(addr, mmu_idx);
1698
                if (reg == 31) {
1699
                    gen_bx(s, tmp);
1700
                } else if (user) {
1701
                    tmp2 = tcg_const_i32(reg);
1702
                    gen_helper_set_user_reg(cpu_env, tmp2, tmp);
1703
                    tcg_temp_free_i32(tmp2);
1704
                    dead_tmp(tmp);
1705
                } else if (reg == UCOP_REG_N) {
1706
                    loaded_var = tmp;
1707
                    loaded_base = 1;
1708
                } else {
1709
                    store_reg(s, reg, tmp);
1710
                }
1711
            } else { /* store */
1712
                if (reg == 31) {
1713
                    /* special case: r31 = PC + 4 */
1714
                    val = (long)s->pc;
1715
                    tmp = new_tmp();
1716
                    tcg_gen_movi_i32(tmp, val);
1717
                } else if (user) {
1718
                    tmp = new_tmp();
1719
                    tmp2 = tcg_const_i32(reg);
1720
                    gen_helper_get_user_reg(tmp, cpu_env, tmp2);
1721
                    tcg_temp_free_i32(tmp2);
1722
                } else {
1723
                    tmp = load_reg(s, reg);
1724
                }
1725
                gen_st32(tmp, addr, mmu_idx);
1726
            }
1727
            j++;
1728
            /* no need to add after the last transfer */
1729
            if (j != n) {
1730
                tcg_gen_addi_i32(addr, addr, 4);
1731
            }
1732
        }
1733
    }
1734
    if (UCOP_SET_W) { /* write back */
1735
        if (UCOP_SET_U) {
1736
            if (UCOP_SET_P) {
1737
                /* pre increment */
1738
            } else {
1739
                /* post increment */
1740
                tcg_gen_addi_i32(addr, addr, 4);
1741
            }
1742
        } else {
1743
            if (UCOP_SET_P) {
1744
                /* pre decrement */
1745
                if (n != 1) {
1746
                    tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
1747
                }
1748
            } else {
1749
                /* post decrement */
1750
                tcg_gen_addi_i32(addr, addr, -(n * 4));
1751
            }
1752
        }
1753
        store_reg(s, UCOP_REG_N, addr);
1754
    } else {
1755
        dead_tmp(addr);
1756
    }
1757
    if (loaded_base) {
1758
        store_reg(s, UCOP_REG_N, loaded_var);
1759
    }
1760
    if (UCOP_SET_B && !user) {
1761
        /* Restore ASR from BSR.  */
1762
        tmp = load_cpu_field(bsr);
1763
        gen_set_asr(tmp, 0xffffffff);
1764
        dead_tmp(tmp);
1765
        s->is_jmp = DISAS_UPDATE;
1766
    }
1767
}
1768

    
1769
/* branch (and link) */
1770
static void do_branch(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1771
{
1772
    unsigned int val;
1773
    int32_t offset;
1774
    TCGv tmp;
1775

    
1776
    if (UCOP_COND == 0xf) {
1777
        ILLEGAL;
1778
    }
1779

    
1780
    if (UCOP_COND != 0xe) {
1781
        /* if not always execute, we generate a conditional jump to
1782
           next instruction */
1783
        s->condlabel = gen_new_label();
1784
        gen_test_cc(UCOP_COND ^ 1, s->condlabel);
1785
        s->condjmp = 1;
1786
    }
1787

    
1788
    val = (int32_t)s->pc;
1789
    if (UCOP_SET_L) {
1790
        tmp = new_tmp();
1791
        tcg_gen_movi_i32(tmp, val);
1792
        store_reg(s, 30, tmp);
1793
    }
1794
    offset = (((int32_t)insn << 8) >> 8);
1795
    val += (offset << 2); /* unicore is pc+4 */
1796
    gen_jmp(s, val);
1797
}
1798

    
1799
static void disas_uc32_insn(CPUUniCore32State *env, DisasContext *s)
1800
{
1801
    unsigned int insn;
1802

    
1803
    if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
1804
        tcg_gen_debug_insn_start(s->pc);
1805
    }
1806

    
1807
    insn = cpu_ldl_code(env, s->pc);
1808
    s->pc += 4;
1809

    
1810
    /* UniCore instructions class:
1811
     * AAAB BBBC xxxx xxxx xxxx xxxD xxEx xxxx
1812
     * AAA  : see switch case
1813
     * BBBB : opcodes or cond or PUBW
1814
     * C    : S OR L
1815
     * D    : 8
1816
     * E    : 5
1817
     */
1818
    switch (insn >> 29) {
1819
    case 0x0:
1820
        if (UCOP_SET(5) && UCOP_SET(8) && !UCOP_SET(28)) {
1821
            do_mult(env, s, insn);
1822
            break;
1823
        }
1824

    
1825
        if (UCOP_SET(8)) {
1826
            do_misc(env, s, insn);
1827
            break;
1828
        }
1829
    case 0x1:
1830
        if (((UCOP_OPCODES >> 2) == 2) && !UCOP_SET_S) {
1831
            do_misc(env, s, insn);
1832
            break;
1833
        }
1834
        do_datap(env, s, insn);
1835
        break;
1836

    
1837
    case 0x2:
1838
        if (UCOP_SET(8) && UCOP_SET(5)) {
1839
            do_ldst_hwsb(env, s, insn);
1840
            break;
1841
        }
1842
        if (UCOP_SET(8) || UCOP_SET(5)) {
1843
            ILLEGAL;
1844
        }
1845
    case 0x3:
1846
        do_ldst_ir(env, s, insn);
1847
        break;
1848

    
1849
    case 0x4:
1850
        if (UCOP_SET(8)) {
1851
            ILLEGAL; /* extended instructions */
1852
        }
1853
        do_ldst_m(env, s, insn);
1854
        break;
1855
    case 0x5:
1856
        do_branch(env, s, insn);
1857
        break;
1858
    case 0x6:
1859
        /* Coprocessor.  */
1860
        disas_coproc_insn(env, s, insn);
1861
        break;
1862
    case 0x7:
1863
        if (!UCOP_SET(28)) {
1864
            disas_coproc_insn(env, s, insn);
1865
            break;
1866
        }
1867
        if ((insn & 0xff000000) == 0xff000000) { /* syscall */
1868
            gen_set_pc_im(s->pc);
1869
            s->is_jmp = DISAS_SYSCALL;
1870
            break;
1871
        }
1872
        ILLEGAL;
1873
    }
1874
}
1875

    
1876
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
1877
   basic block 'tb'. If search_pc is TRUE, also generate PC
1878
   information for each intermediate instruction. */
1879
static inline void gen_intermediate_code_internal(UniCore32CPU *cpu,
1880
        TranslationBlock *tb, bool search_pc)
1881
{
1882
    CPUState *cs = CPU(cpu);
1883
    CPUUniCore32State *env = &cpu->env;
1884
    DisasContext dc1, *dc = &dc1;
1885
    CPUBreakpoint *bp;
1886
    uint16_t *gen_opc_end;
1887
    int j, lj;
1888
    target_ulong pc_start;
1889
    uint32_t next_page_start;
1890
    int num_insns;
1891
    int max_insns;
1892

    
1893
    /* generate intermediate code */
1894
    num_temps = 0;
1895

    
1896
    pc_start = tb->pc;
1897

    
1898
    dc->tb = tb;
1899

    
1900
    gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
1901

    
1902
    dc->is_jmp = DISAS_NEXT;
1903
    dc->pc = pc_start;
1904
    dc->singlestep_enabled = cs->singlestep_enabled;
1905
    dc->condjmp = 0;
1906
    cpu_F0s = tcg_temp_new_i32();
1907
    cpu_F1s = tcg_temp_new_i32();
1908
    cpu_F0d = tcg_temp_new_i64();
1909
    cpu_F1d = tcg_temp_new_i64();
1910
    next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1911
    lj = -1;
1912
    num_insns = 0;
1913
    max_insns = tb->cflags & CF_COUNT_MASK;
1914
    if (max_insns == 0) {
1915
        max_insns = CF_COUNT_MASK;
1916
    }
1917

    
1918
#ifndef CONFIG_USER_ONLY
1919
    if ((env->uncached_asr & ASR_M) == ASR_MODE_USER) {
1920
        dc->user = 1;
1921
    } else {
1922
        dc->user = 0;
1923
    }
1924
#endif
1925

    
1926
    gen_tb_start();
1927
    do {
1928
        if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1929
            QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1930
                if (bp->pc == dc->pc) {
1931
                    gen_set_pc_im(dc->pc);
1932
                    gen_exception(EXCP_DEBUG);
1933
                    dc->is_jmp = DISAS_JUMP;
1934
                    /* Advance PC so that clearing the breakpoint will
1935
                       invalidate this TB.  */
1936
                    dc->pc += 2; /* FIXME */
1937
                    goto done_generating;
1938
                }
1939
            }
1940
        }
1941
        if (search_pc) {
1942
            j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1943
            if (lj < j) {
1944
                lj++;
1945
                while (lj < j) {
1946
                    tcg_ctx.gen_opc_instr_start[lj++] = 0;
1947
                }
1948
            }
1949
            tcg_ctx.gen_opc_pc[lj] = dc->pc;
1950
            tcg_ctx.gen_opc_instr_start[lj] = 1;
1951
            tcg_ctx.gen_opc_icount[lj] = num_insns;
1952
        }
1953

    
1954
        if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
1955
            gen_io_start();
1956
        }
1957

    
1958
        disas_uc32_insn(env, dc);
1959

    
1960
        if (num_temps) {
1961
            fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
1962
            num_temps = 0;
1963
        }
1964

    
1965
        if (dc->condjmp && !dc->is_jmp) {
1966
            gen_set_label(dc->condlabel);
1967
            dc->condjmp = 0;
1968
        }
1969
        /* Translation stops when a conditional branch is encountered.
1970
         * Otherwise the subsequent code could get translated several times.
1971
         * Also stop translation when a page boundary is reached.  This
1972
         * ensures prefetch aborts occur at the right place.  */
1973
        num_insns++;
1974
    } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
1975
             !cs->singlestep_enabled &&
1976
             !singlestep &&
1977
             dc->pc < next_page_start &&
1978
             num_insns < max_insns);
1979

    
1980
    if (tb->cflags & CF_LAST_IO) {
1981
        if (dc->condjmp) {
1982
            /* FIXME:  This can theoretically happen with self-modifying
1983
               code.  */
1984
            cpu_abort(env, "IO on conditional branch instruction");
1985
        }
1986
        gen_io_end();
1987
    }
1988

    
1989
    /* At this stage dc->condjmp will only be set when the skipped
1990
       instruction was a conditional branch or trap, and the PC has
1991
       already been written.  */
1992
    if (unlikely(cs->singlestep_enabled)) {
1993
        /* Make sure the pc is updated, and raise a debug exception.  */
1994
        if (dc->condjmp) {
1995
            if (dc->is_jmp == DISAS_SYSCALL) {
1996
                gen_exception(UC32_EXCP_PRIV);
1997
            } else {
1998
                gen_exception(EXCP_DEBUG);
1999
            }
2000
            gen_set_label(dc->condlabel);
2001
        }
2002
        if (dc->condjmp || !dc->is_jmp) {
2003
            gen_set_pc_im(dc->pc);
2004
            dc->condjmp = 0;
2005
        }
2006
        if (dc->is_jmp == DISAS_SYSCALL && !dc->condjmp) {
2007
            gen_exception(UC32_EXCP_PRIV);
2008
        } else {
2009
            gen_exception(EXCP_DEBUG);
2010
        }
2011
    } else {
2012
        /* While branches must always occur at the end of an IT block,
2013
           there are a few other things that can cause us to terminate
2014
           the TB in the middel of an IT block:
2015
            - Exception generating instructions (bkpt, swi, undefined).
2016
            - Page boundaries.
2017
            - Hardware watchpoints.
2018
           Hardware breakpoints have already been handled and skip this code.
2019
         */
2020
        switch (dc->is_jmp) {
2021
        case DISAS_NEXT:
2022
            gen_goto_tb(dc, 1, dc->pc);
2023
            break;
2024
        default:
2025
        case DISAS_JUMP:
2026
        case DISAS_UPDATE:
2027
            /* indicate that the hash table must be used to find the next TB */
2028
            tcg_gen_exit_tb(0);
2029
            break;
2030
        case DISAS_TB_JUMP:
2031
            /* nothing more to generate */
2032
            break;
2033
        case DISAS_SYSCALL:
2034
            gen_exception(UC32_EXCP_PRIV);
2035
            break;
2036
        }
2037
        if (dc->condjmp) {
2038
            gen_set_label(dc->condlabel);
2039
            gen_goto_tb(dc, 1, dc->pc);
2040
            dc->condjmp = 0;
2041
        }
2042
    }
2043

    
2044
done_generating:
2045
    gen_tb_end(tb, num_insns);
2046
    *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2047

    
2048
#ifdef DEBUG_DISAS
2049
    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2050
        qemu_log("----------------\n");
2051
        qemu_log("IN: %s\n", lookup_symbol(pc_start));
2052
        log_target_disas(env, pc_start, dc->pc - pc_start, 0);
2053
        qemu_log("\n");
2054
    }
2055
#endif
2056
    if (search_pc) {
2057
        j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2058
        lj++;
2059
        while (lj <= j) {
2060
            tcg_ctx.gen_opc_instr_start[lj++] = 0;
2061
        }
2062
    } else {
2063
        tb->size = dc->pc - pc_start;
2064
        tb->icount = num_insns;
2065
    }
2066
}
2067

    
2068
void gen_intermediate_code(CPUUniCore32State *env, TranslationBlock *tb)
2069
{
2070
    gen_intermediate_code_internal(uc32_env_get_cpu(env), tb, false);
2071
}
2072

    
2073
void gen_intermediate_code_pc(CPUUniCore32State *env, TranslationBlock *tb)
2074
{
2075
    gen_intermediate_code_internal(uc32_env_get_cpu(env), tb, true);
2076
}
2077

    
2078
static const char *cpu_mode_names[16] = {
2079
    "USER", "REAL", "INTR", "PRIV", "UM14", "UM15", "UM16", "TRAP",
2080
    "UM18", "UM19", "UM1A", "EXTN", "UM1C", "UM1D", "UM1E", "SUSR"
2081
};
2082

    
2083
#undef UCF64_DUMP_STATE
2084
#ifdef UCF64_DUMP_STATE
2085
static void cpu_dump_state_ucf64(CPUUniCore32State *env, FILE *f,
2086
        fprintf_function cpu_fprintf, int flags)
2087
{
2088
    int i;
2089
    union {
2090
        uint32_t i;
2091
        float s;
2092
    } s0, s1;
2093
    CPU_DoubleU d;
2094
    /* ??? This assumes float64 and double have the same layout.
2095
       Oh well, it's only debug dumps.  */
2096
    union {
2097
        float64 f64;
2098
        double d;
2099
    } d0;
2100

    
2101
    for (i = 0; i < 16; i++) {
2102
        d.d = env->ucf64.regs[i];
2103
        s0.i = d.l.lower;
2104
        s1.i = d.l.upper;
2105
        d0.f64 = d.d;
2106
        cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g)",
2107
                    i * 2, (int)s0.i, s0.s,
2108
                    i * 2 + 1, (int)s1.i, s1.s);
2109
        cpu_fprintf(f, " d%02d=%" PRIx64 "(%8g)\n",
2110
                    i, (uint64_t)d0.f64, d0.d);
2111
    }
2112
    cpu_fprintf(f, "FPSCR: %08x\n", (int)env->ucf64.xregs[UC32_UCF64_FPSCR]);
2113
}
2114
#else
2115
#define cpu_dump_state_ucf64(env, file, pr, flags)      do { } while (0)
2116
#endif
2117

    
2118
void uc32_cpu_dump_state(CPUState *cs, FILE *f,
2119
                         fprintf_function cpu_fprintf, int flags)
2120
{
2121
    UniCore32CPU *cpu = UNICORE32_CPU(cs);
2122
    CPUUniCore32State *env = &cpu->env;
2123
    int i;
2124
    uint32_t psr;
2125

    
2126
    for (i = 0; i < 32; i++) {
2127
        cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2128
        if ((i % 4) == 3) {
2129
            cpu_fprintf(f, "\n");
2130
        } else {
2131
            cpu_fprintf(f, " ");
2132
        }
2133
    }
2134
    psr = cpu_asr_read(env);
2135
    cpu_fprintf(f, "PSR=%08x %c%c%c%c %s\n",
2136
                psr,
2137
                psr & (1 << 31) ? 'N' : '-',
2138
                psr & (1 << 30) ? 'Z' : '-',
2139
                psr & (1 << 29) ? 'C' : '-',
2140
                psr & (1 << 28) ? 'V' : '-',
2141
                cpu_mode_names[psr & 0xf]);
2142

    
2143
    cpu_dump_state_ucf64(env, f, cpu_fprintf, flags);
2144
}
2145

    
2146
void restore_state_to_opc(CPUUniCore32State *env, TranslationBlock *tb, int pc_pos)
2147
{
2148
    env->regs[31] = tcg_ctx.gen_opc_pc[pc_pos];
2149
}