Statistics
| Branch: | Revision:

root / target-microblaze / translate.c @ 9a78eead

History | View | Annotate | Download (47.4 kB)

1
/*
2
 *  Xilinx MicroBlaze emulation for qemu: main translation routines.
3
 *
4
 *  Copyright (c) 2009 Edgar E. Iglesias.
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19

    
20
#include <stdarg.h>
21
#include <stdlib.h>
22
#include <stdio.h>
23
#include <string.h>
24
#include <inttypes.h>
25
#include <assert.h>
26

    
27
#include "cpu.h"
28
#include "exec-all.h"
29
#include "disas.h"
30
#include "tcg-op.h"
31
#include "helper.h"
32
#include "microblaze-decode.h"
33
#include "qemu-common.h"
34

    
35
#define GEN_HELPER 1
36
#include "helper.h"
37

    
38
#define SIM_COMPAT 0
39
#define DISAS_GNU 1
40
#define DISAS_MB 1
41
#if DISAS_MB && !SIM_COMPAT
42
#  define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
43
#else
44
#  define LOG_DIS(...) do { } while (0)
45
#endif
46

    
47
#define D(x)
48

    
49
#define EXTRACT_FIELD(src, start, end) \
50
            (((src) >> start) & ((1 << (end - start + 1)) - 1))
51

    
52
static TCGv env_debug;
53
static TCGv_ptr cpu_env;
54
static TCGv cpu_R[32];
55
static TCGv cpu_SR[18];
56
static TCGv env_imm;
57
static TCGv env_btaken;
58
static TCGv env_btarget;
59
static TCGv env_iflags;
60

    
61
#include "gen-icount.h"
62

    
63
/* This is the state at translation time.  */
64
typedef struct DisasContext {
65
    CPUState *env;
66
    target_ulong pc;
67

    
68
    /* Decoder.  */
69
    int type_b;
70
    uint32_t ir;
71
    uint8_t opcode;
72
    uint8_t rd, ra, rb;
73
    uint16_t imm;
74

    
75
    unsigned int cpustate_changed;
76
    unsigned int delayed_branch;
77
    unsigned int tb_flags, synced_flags; /* tb dependent flags.  */
78
    unsigned int clear_imm;
79
    int is_jmp;
80

    
81
#define JMP_NOJMP    0
82
#define JMP_DIRECT   1
83
#define JMP_INDIRECT 2
84
    unsigned int jmp;
85
    uint32_t jmp_pc;
86

    
87
    int abort_at_next_insn;
88
    int nr_nops;
89
    struct TranslationBlock *tb;
90
    int singlestep_enabled;
91
} DisasContext;
92

    
93
static const char *regnames[] =
94
{
95
    "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
96
    "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
97
    "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
98
    "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
99
};
100

    
101
static const char *special_regnames[] =
102
{
103
    "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
104
    "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
105
    "sr16", "sr17", "sr18"
106
};
107

    
108
/* Sign extend at translation time.  */
109
static inline int sign_extend(unsigned int val, unsigned int width)
110
{
111
        int sval;
112

    
113
        /* LSL.  */
114
        val <<= 31 - width;
115
        sval = val;
116
        /* ASR.  */
117
        sval >>= 31 - width;
118
        return sval;
119
}
120

    
121
static inline void t_sync_flags(DisasContext *dc)
122
{
123
    /* Synch the tb dependant flags between translator and runtime.  */
124
    if (dc->tb_flags != dc->synced_flags) {
125
        tcg_gen_movi_tl(env_iflags, dc->tb_flags);
126
        dc->synced_flags = dc->tb_flags;
127
    }
128
}
129

    
130
static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
131
{
132
    TCGv_i32 tmp = tcg_const_i32(index);
133

    
134
    t_sync_flags(dc);
135
    tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
136
    gen_helper_raise_exception(tmp);
137
    tcg_temp_free_i32(tmp);
138
    dc->is_jmp = DISAS_UPDATE;
139
}
140

    
141
static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
142
{
143
    TranslationBlock *tb;
144
    tb = dc->tb;
145
    if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
146
        tcg_gen_goto_tb(n);
147
        tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
148
        tcg_gen_exit_tb((long)tb + n);
149
    } else {
150
        tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
151
        tcg_gen_exit_tb(0);
152
    }
153
}
154

    
155
/* True if ALU operand b is a small immediate that may deserve
156
   faster treatment.  */
157
static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
158
{
159
    /* Immediate insn without the imm prefix ?  */
160
    return dc->type_b && !(dc->tb_flags & IMM_FLAG);
161
}
162

    
163
static inline TCGv *dec_alu_op_b(DisasContext *dc)
164
{
165
    if (dc->type_b) {
166
        if (dc->tb_flags & IMM_FLAG)
167
            tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
168
        else
169
            tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
170
        return &env_imm;
171
    } else
172
        return &cpu_R[dc->rb];
173
}
174

    
175
static void dec_add(DisasContext *dc)
176
{
177
    unsigned int k, c;
178

    
179
    k = dc->opcode & 4;
180
    c = dc->opcode & 2;
181

    
182
    LOG_DIS("add%s%s%s r%d r%d r%d\n",
183
            dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
184
            dc->rd, dc->ra, dc->rb);
185

    
186
    if (k && !c && dc->rd)
187
        tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
188
    else if (dc->rd)
189
        gen_helper_addkc(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)),
190
                         tcg_const_tl(k), tcg_const_tl(c));
191
    else {
192
        TCGv d = tcg_temp_new();
193
        gen_helper_addkc(d, cpu_R[dc->ra], *(dec_alu_op_b(dc)),
194
                         tcg_const_tl(k), tcg_const_tl(c));
195
        tcg_temp_free(d);
196
    }
197
}
198

    
199
static void dec_sub(DisasContext *dc)
200
{
201
    unsigned int u, cmp, k, c;
202

    
203
    u = dc->imm & 2;
204
    k = dc->opcode & 4;
205
    c = dc->opcode & 2;
206
    cmp = (dc->imm & 1) && (!dc->type_b) && k;
207

    
208
    if (cmp) {
209
        LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
210
        if (dc->rd) {
211
            if (u)
212
                gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
213
            else
214
                gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
215
        }
216
    } else {
217
        LOG_DIS("sub%s%s r%d, r%d r%d\n",
218
                 k ? "k" : "",  c ? "c" : "", dc->rd, dc->ra, dc->rb);
219

    
220
        if (!k || c) {
221
            TCGv t;
222
            t = tcg_temp_new();
223
            if (dc->rd)
224
                gen_helper_subkc(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)),
225
                                 tcg_const_tl(k), tcg_const_tl(c));
226
            else
227
                gen_helper_subkc(t, cpu_R[dc->ra], *(dec_alu_op_b(dc)),
228
                                 tcg_const_tl(k), tcg_const_tl(c));
229
            tcg_temp_free(t);
230
        }
231
        else if (dc->rd)
232
            tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
233
    }
234
}
235

    
236
static void dec_pattern(DisasContext *dc)
237
{
238
    unsigned int mode;
239
    int l1;
240

    
241
    if ((dc->tb_flags & MSR_EE_FLAG)
242
          && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
243
          && !((dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
244
        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
245
        t_gen_raise_exception(dc, EXCP_HW_EXCP);
246
    }
247

    
248
    mode = dc->opcode & 3;
249
    switch (mode) {
250
        case 0:
251
            /* pcmpbf.  */
252
            LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
253
            if (dc->rd)
254
                gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
255
            break;
256
        case 2:
257
            LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
258
            if (dc->rd) {
259
                TCGv t0 = tcg_temp_local_new();
260
                l1 = gen_new_label();
261
                tcg_gen_movi_tl(t0, 1);
262
                tcg_gen_brcond_tl(TCG_COND_EQ,
263
                                  cpu_R[dc->ra], cpu_R[dc->rb], l1);
264
                tcg_gen_movi_tl(t0, 0);
265
                gen_set_label(l1);
266
                tcg_gen_mov_tl(cpu_R[dc->rd], t0);
267
                tcg_temp_free(t0);
268
            }
269
            break;
270
        case 3:
271
            LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
272
            l1 = gen_new_label();
273
            if (dc->rd) {
274
                TCGv t0 = tcg_temp_local_new();
275
                tcg_gen_movi_tl(t0, 1);
276
                tcg_gen_brcond_tl(TCG_COND_NE,
277
                                  cpu_R[dc->ra], cpu_R[dc->rb], l1);
278
                tcg_gen_movi_tl(t0, 0);
279
                gen_set_label(l1);
280
                tcg_gen_mov_tl(cpu_R[dc->rd], t0);
281
                tcg_temp_free(t0);
282
            }
283
            break;
284
        default:
285
            cpu_abort(dc->env,
286
                      "unsupported pattern insn opcode=%x\n", dc->opcode);
287
            break;
288
    }
289
}
290

    
291
static void dec_and(DisasContext *dc)
292
{
293
    unsigned int not;
294

    
295
    if (!dc->type_b && (dc->imm & (1 << 10))) {
296
        dec_pattern(dc);
297
        return;
298
    }
299

    
300
    not = dc->opcode & (1 << 1);
301
    LOG_DIS("and%s\n", not ? "n" : "");
302

    
303
    if (!dc->rd)
304
        return;
305

    
306
    if (not) {
307
        TCGv t = tcg_temp_new();
308
        tcg_gen_not_tl(t, *(dec_alu_op_b(dc)));
309
        tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], t);
310
        tcg_temp_free(t);
311
    } else
312
        tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
313
}
314

    
315
static void dec_or(DisasContext *dc)
316
{
317
    if (!dc->type_b && (dc->imm & (1 << 10))) {
318
        dec_pattern(dc);
319
        return;
320
    }
321

    
322
    LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
323
    if (dc->rd)
324
        tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
325
}
326

    
327
static void dec_xor(DisasContext *dc)
328
{
329
    if (!dc->type_b && (dc->imm & (1 << 10))) {
330
        dec_pattern(dc);
331
        return;
332
    }
333

    
334
    LOG_DIS("xor r%d\n", dc->rd);
335
    if (dc->rd)
336
        tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
337
}
338

    
339
static void read_carry(DisasContext *dc, TCGv d)
340
{
341
    tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
342
}
343

    
344
static void write_carry(DisasContext *dc, TCGv v)
345
{
346
    TCGv t0 = tcg_temp_new();
347
    tcg_gen_shli_tl(t0, v, 31);
348
    tcg_gen_sari_tl(t0, t0, 31);
349
    tcg_gen_mov_tl(env_debug, t0);
350
    tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
351
    tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
352
                    ~(MSR_C | MSR_CC));
353
    tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
354
    tcg_temp_free(t0);
355
}
356

    
357

    
358
static inline void msr_read(DisasContext *dc, TCGv d)
359
{
360
    tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
361
}
362

    
363
static inline void msr_write(DisasContext *dc, TCGv v)
364
{
365
    dc->cpustate_changed = 1;
366
    tcg_gen_mov_tl(cpu_SR[SR_MSR], v);
367
    /* PVR, we have a processor version register.  */
368
    tcg_gen_ori_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], (1 << 10));
369
}
370

    
371
static void dec_msr(DisasContext *dc)
372
{
373
    TCGv t0, t1;
374
    unsigned int sr, to, rn;
375
    int mem_index = cpu_mmu_index(dc->env);
376

    
377
    sr = dc->imm & ((1 << 14) - 1);
378
    to = dc->imm & (1 << 14);
379
    dc->type_b = 1;
380
    if (to)
381
        dc->cpustate_changed = 1;
382

    
383
    /* msrclr and msrset.  */
384
    if (!(dc->imm & (1 << 15))) {
385
        unsigned int clr = dc->ir & (1 << 16);
386

    
387
        LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
388
                dc->rd, dc->imm);
389

    
390
        if (!(dc->env->pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
391
            /* nop??? */
392
            return;
393
        }
394

    
395
        if ((dc->tb_flags & MSR_EE_FLAG)
396
            && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
397
            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
398
            t_gen_raise_exception(dc, EXCP_HW_EXCP);
399
            return;
400
        }
401

    
402
        if (dc->rd)
403
            msr_read(dc, cpu_R[dc->rd]);
404

    
405
        t0 = tcg_temp_new();
406
        t1 = tcg_temp_new();
407
        msr_read(dc, t0);
408
        tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
409

    
410
        if (clr) {
411
            tcg_gen_not_tl(t1, t1);
412
            tcg_gen_and_tl(t0, t0, t1);
413
        } else
414
            tcg_gen_or_tl(t0, t0, t1);
415
        msr_write(dc, t0);
416
        tcg_temp_free(t0);
417
        tcg_temp_free(t1);
418
        tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
419
        dc->is_jmp = DISAS_UPDATE;
420
        return;
421
    }
422

    
423
    if (to) {
424
        if ((dc->tb_flags & MSR_EE_FLAG)
425
             && mem_index == MMU_USER_IDX) {
426
            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
427
            t_gen_raise_exception(dc, EXCP_HW_EXCP);
428
            return;
429
        }
430
    }
431

    
432
#if !defined(CONFIG_USER_ONLY)
433
    /* Catch read/writes to the mmu block.  */
434
    if ((sr & ~0xff) == 0x1000) {
435
        sr &= 7;
436
        LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
437
        if (to)
438
            gen_helper_mmu_write(tcg_const_tl(sr), cpu_R[dc->ra]);
439
        else
440
            gen_helper_mmu_read(cpu_R[dc->rd], tcg_const_tl(sr));
441
        return;
442
    }
443
#endif
444

    
445
    if (to) {
446
        LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
447
        switch (sr) {
448
            case 0:
449
                break;
450
            case 1:
451
                msr_write(dc, cpu_R[dc->ra]);
452
                break;
453
            case 0x3:
454
                tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
455
                break;
456
            case 0x5:
457
                tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
458
                break;
459
            case 0x7:
460
                tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
461
                break;
462
            default:
463
                cpu_abort(dc->env, "unknown mts reg %x\n", sr);
464
                break;
465
        }
466
    } else {
467
        LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
468

    
469
        switch (sr) {
470
            case 0:
471
                tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
472
                break;
473
            case 1:
474
                msr_read(dc, cpu_R[dc->rd]);
475
                break;
476
            case 0x3:
477
                tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
478
                break;
479
            case 0x5:
480
                tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
481
                break;
482
             case 0x7:
483
                tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
484
                break;
485
            case 0xb:
486
                tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
487
                break;
488
            case 0x2000:
489
            case 0x2001:
490
            case 0x2002:
491
            case 0x2003:
492
            case 0x2004:
493
            case 0x2005:
494
            case 0x2006:
495
            case 0x2007:
496
            case 0x2008:
497
            case 0x2009:
498
            case 0x200a:
499
            case 0x200b:
500
            case 0x200c:
501
                rn = sr & 0xf;
502
                tcg_gen_ld_tl(cpu_R[dc->rd],
503
                              cpu_env, offsetof(CPUState, pvr.regs[rn]));
504
                break;
505
            default:
506
                cpu_abort(dc->env, "unknown mfs reg %x\n", sr);
507
                break;
508
        }
509
    }
510

    
511
    if (dc->rd == 0) {
512
        tcg_gen_movi_tl(cpu_R[0], 0);
513
    }
514
}
515

    
516
/* 64-bit signed mul, lower result in d and upper in d2.  */
517
static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
518
{
519
    TCGv_i64 t0, t1;
520

    
521
    t0 = tcg_temp_new_i64();
522
    t1 = tcg_temp_new_i64();
523

    
524
    tcg_gen_ext_i32_i64(t0, a);
525
    tcg_gen_ext_i32_i64(t1, b);
526
    tcg_gen_mul_i64(t0, t0, t1);
527

    
528
    tcg_gen_trunc_i64_i32(d, t0);
529
    tcg_gen_shri_i64(t0, t0, 32);
530
    tcg_gen_trunc_i64_i32(d2, t0);
531

    
532
    tcg_temp_free_i64(t0);
533
    tcg_temp_free_i64(t1);
534
}
535

    
536
/* 64-bit unsigned muls, lower result in d and upper in d2.  */
537
static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
538
{
539
    TCGv_i64 t0, t1;
540

    
541
    t0 = tcg_temp_new_i64();
542
    t1 = tcg_temp_new_i64();
543

    
544
    tcg_gen_extu_i32_i64(t0, a);
545
    tcg_gen_extu_i32_i64(t1, b);
546
    tcg_gen_mul_i64(t0, t0, t1);
547

    
548
    tcg_gen_trunc_i64_i32(d, t0);
549
    tcg_gen_shri_i64(t0, t0, 32);
550
    tcg_gen_trunc_i64_i32(d2, t0);
551

    
552
    tcg_temp_free_i64(t0);
553
    tcg_temp_free_i64(t1);
554
}
555

    
556
/* Multiplier unit.  */
557
static void dec_mul(DisasContext *dc)
558
{
559
    TCGv d[2];
560
    unsigned int subcode;
561

    
562
    if ((dc->tb_flags & MSR_EE_FLAG)
563
         && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
564
         && !(dc->env->pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
565
        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
566
        t_gen_raise_exception(dc, EXCP_HW_EXCP);
567
        return;
568
    }
569

    
570
    subcode = dc->imm & 3;
571
    d[0] = tcg_temp_new();
572
    d[1] = tcg_temp_new();
573

    
574
    if (dc->type_b) {
575
        LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
576
        t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
577
        goto done;
578
    }
579

    
580
    /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2.  */
581
    if (subcode >= 1 && subcode <= 3
582
        && !((dc->env->pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
583
        /* nop??? */
584
    }
585

    
586
    switch (subcode) {
587
        case 0:
588
            LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
589
            t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]);
590
            break;
591
        case 1:
592
            LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
593
            t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
594
            break;
595
        case 2:
596
            LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
597
            t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
598
            break;
599
        case 3:
600
            LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
601
            t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
602
            break;
603
        default:
604
            cpu_abort(dc->env, "unknown MUL insn %x\n", subcode);
605
            break;
606
    }
607
done:
608
    tcg_temp_free(d[0]);
609
    tcg_temp_free(d[1]);
610
}
611

    
612
/* Div unit.  */
613
static void dec_div(DisasContext *dc)
614
{
615
    unsigned int u;
616

    
617
    u = dc->imm & 2; 
618
    LOG_DIS("div\n");
619

    
620
    if ((dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
621
          && !((dc->env->pvr.regs[0] & PVR0_USE_DIV_MASK))) {
622
        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
623
        t_gen_raise_exception(dc, EXCP_HW_EXCP);
624
    }
625

    
626
    if (u)
627
        gen_helper_divu(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
628
    else
629
        gen_helper_divs(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
630
    if (!dc->rd)
631
        tcg_gen_movi_tl(cpu_R[dc->rd], 0);
632
}
633

    
634
static void dec_barrel(DisasContext *dc)
635
{
636
    TCGv t0;
637
    unsigned int s, t;
638

    
639
    if ((dc->tb_flags & MSR_EE_FLAG)
640
          && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
641
          && !(dc->env->pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
642
        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
643
        t_gen_raise_exception(dc, EXCP_HW_EXCP);
644
        return;
645
    }
646

    
647
    s = dc->imm & (1 << 10);
648
    t = dc->imm & (1 << 9);
649

    
650
    LOG_DIS("bs%s%s r%d r%d r%d\n",
651
            s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
652

    
653
    t0 = tcg_temp_new();
654

    
655
    tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
656
    tcg_gen_andi_tl(t0, t0, 31);
657

    
658
    if (s)
659
        tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
660
    else {
661
        if (t)
662
            tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
663
        else
664
            tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
665
    }
666
}
667

    
668
static void dec_bit(DisasContext *dc)
669
{
670
    TCGv t0, t1;
671
    unsigned int op;
672
    int mem_index = cpu_mmu_index(dc->env);
673

    
674
    op = dc->ir & ((1 << 8) - 1);
675
    switch (op) {
676
        case 0x21:
677
            /* src.  */
678
            t0 = tcg_temp_new();
679

    
680
            LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
681
            tcg_gen_andi_tl(t0, cpu_R[dc->ra], 1);
682
            if (dc->rd) {
683
                t1 = tcg_temp_new();
684
                read_carry(dc, t1);
685
                tcg_gen_shli_tl(t1, t1, 31);
686

    
687
                tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
688
                tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t1);
689
                tcg_temp_free(t1);
690
            }
691

    
692
            /* Update carry.  */
693
            write_carry(dc, t0);
694
            tcg_temp_free(t0);
695
            break;
696

    
697
        case 0x1:
698
        case 0x41:
699
            /* srl.  */
700
            t0 = tcg_temp_new();
701
            LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
702

    
703
            /* Update carry.  */
704
            tcg_gen_andi_tl(t0, cpu_R[dc->ra], 1);
705
            write_carry(dc, t0);
706
            tcg_temp_free(t0);
707
            if (dc->rd) {
708
                if (op == 0x41)
709
                    tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
710
                else
711
                    tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
712
            }
713
            break;
714
        case 0x60:
715
            LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
716
            tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
717
            break;
718
        case 0x61:
719
            LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
720
            tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
721
            break;
722
        case 0x64:
723
        case 0x66:
724
        case 0x74:
725
        case 0x76:
726
            /* wdc.  */
727
            LOG_DIS("wdc r%d\n", dc->ra);
728
            if ((dc->tb_flags & MSR_EE_FLAG)
729
                 && mem_index == MMU_USER_IDX) {
730
                tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
731
                t_gen_raise_exception(dc, EXCP_HW_EXCP);
732
                return;
733
            }
734
            break;
735
        case 0x68:
736
            /* wic.  */
737
            LOG_DIS("wic r%d\n", dc->ra);
738
            if ((dc->tb_flags & MSR_EE_FLAG)
739
                 && mem_index == MMU_USER_IDX) {
740
                tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
741
                t_gen_raise_exception(dc, EXCP_HW_EXCP);
742
                return;
743
            }
744
            break;
745
        default:
746
            cpu_abort(dc->env, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
747
                     dc->pc, op, dc->rd, dc->ra, dc->rb);
748
            break;
749
    }
750
}
751

    
752
static inline void sync_jmpstate(DisasContext *dc)
753
{
754
    if (dc->jmp == JMP_DIRECT) {
755
            dc->jmp = JMP_INDIRECT;
756
            tcg_gen_movi_tl(env_btaken, 1);
757
            tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
758
    }
759
}
760

    
761
static void dec_imm(DisasContext *dc)
762
{
763
    LOG_DIS("imm %x\n", dc->imm << 16);
764
    tcg_gen_movi_tl(env_imm, (dc->imm << 16));
765
    dc->tb_flags |= IMM_FLAG;
766
    dc->clear_imm = 0;
767
}
768

    
769
static inline void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
770
                            unsigned int size)
771
{
772
    int mem_index = cpu_mmu_index(dc->env);
773

    
774
    if (size == 1) {
775
        tcg_gen_qemu_ld8u(dst, addr, mem_index);
776
    } else if (size == 2) {
777
        tcg_gen_qemu_ld16u(dst, addr, mem_index);
778
    } else if (size == 4) {
779
        tcg_gen_qemu_ld32u(dst, addr, mem_index);
780
    } else
781
        cpu_abort(dc->env, "Incorrect load size %d\n", size);
782
}
783

    
784
static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
785
{
786
    unsigned int extimm = dc->tb_flags & IMM_FLAG;
787

    
788
    /* Treat the fast cases first.  */
789
    if (!dc->type_b) {
790
        /* If any of the regs is r0, return a ptr to the other.  */
791
        if (dc->ra == 0) {
792
            return &cpu_R[dc->rb];
793
        } else if (dc->rb == 0) {
794
            return &cpu_R[dc->ra];
795
        }
796

    
797
        *t = tcg_temp_new();
798
        tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
799
        return t;
800
    }
801
    /* Immediate.  */
802
    if (!extimm) {
803
        if (dc->imm == 0) {
804
            return &cpu_R[dc->ra];
805
        }
806
        *t = tcg_temp_new();
807
        tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
808
        tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
809
    } else {
810
        *t = tcg_temp_new();
811
        tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
812
    }
813

    
814
    return t;
815
}
816

    
817
static void dec_load(DisasContext *dc)
818
{
819
    TCGv t, *addr;
820
    unsigned int size;
821

    
822
    size = 1 << (dc->opcode & 3);
823
    if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
824
          && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
825
        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
826
        t_gen_raise_exception(dc, EXCP_HW_EXCP);
827
        return;
828
    }
829

    
830
    LOG_DIS("l %x %d\n", dc->opcode, size);
831
    t_sync_flags(dc);
832
    addr = compute_ldst_addr(dc, &t);
833

    
834
    /* If we get a fault on a dslot, the jmpstate better be in sync.  */
835
    sync_jmpstate(dc);
836

    
837
    /* Verify alignment if needed.  */
838
    if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
839
        TCGv v = tcg_temp_new();
840

    
841
        /*
842
         * Microblaze gives MMU faults priority over faults due to
843
         * unaligned addresses. That's why we speculatively do the load
844
         * into v. If the load succeeds, we verify alignment of the
845
         * address and if that succeeds we write into the destination reg.
846
         */
847
        gen_load(dc, v, *addr, size);
848

    
849
        tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
850
        gen_helper_memalign(*addr, tcg_const_tl(dc->rd),
851
                            tcg_const_tl(0), tcg_const_tl(size - 1));
852
        if (dc->rd)
853
            tcg_gen_mov_tl(cpu_R[dc->rd], v);
854
        tcg_temp_free(v);
855
    } else {
856
        if (dc->rd) {
857
            gen_load(dc, cpu_R[dc->rd], *addr, size);
858
        } else {
859
            gen_load(dc, env_imm, *addr, size);
860
        }
861
    }
862

    
863
    if (addr == &t)
864
        tcg_temp_free(t);
865
}
866

    
867
static void gen_store(DisasContext *dc, TCGv addr, TCGv val,
868
                      unsigned int size)
869
{
870
    int mem_index = cpu_mmu_index(dc->env);
871

    
872
    if (size == 1)
873
        tcg_gen_qemu_st8(val, addr, mem_index);
874
    else if (size == 2) {
875
        tcg_gen_qemu_st16(val, addr, mem_index);
876
    } else if (size == 4) {
877
        tcg_gen_qemu_st32(val, addr, mem_index);
878
    } else
879
        cpu_abort(dc->env, "Incorrect store size %d\n", size);
880
}
881

    
882
static void dec_store(DisasContext *dc)
883
{
884
    TCGv t, *addr;
885
    unsigned int size;
886

    
887
    size = 1 << (dc->opcode & 3);
888

    
889
    if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
890
          && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
891
        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
892
        t_gen_raise_exception(dc, EXCP_HW_EXCP);
893
        return;
894
    }
895

    
896
    LOG_DIS("s%d%s\n", size, dc->type_b ? "i" : "");
897
    t_sync_flags(dc);
898
    /* If we get a fault on a dslot, the jmpstate better be in sync.  */
899
    sync_jmpstate(dc);
900
    addr = compute_ldst_addr(dc, &t);
901

    
902
    gen_store(dc, *addr, cpu_R[dc->rd], size);
903

    
904
    /* Verify alignment if needed.  */
905
    if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
906
        tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
907
        /* FIXME: if the alignment is wrong, we should restore the value
908
         *        in memory.
909
         */
910
        gen_helper_memalign(*addr, tcg_const_tl(dc->rd),
911
                            tcg_const_tl(1), tcg_const_tl(size - 1));
912
    }
913

    
914
    if (addr == &t)
915
        tcg_temp_free(t);
916
}
917

    
918
static inline void eval_cc(DisasContext *dc, unsigned int cc,
919
                           TCGv d, TCGv a, TCGv b)
920
{
921
    switch (cc) {
922
        case CC_EQ:
923
            tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
924
            break;
925
        case CC_NE:
926
            tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
927
            break;
928
        case CC_LT:
929
            tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
930
            break;
931
        case CC_LE:
932
            tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
933
            break;
934
        case CC_GE:
935
            tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
936
            break;
937
        case CC_GT:
938
            tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
939
            break;
940
        default:
941
            cpu_abort(dc->env, "Unknown condition code %x.\n", cc);
942
            break;
943
    }
944
}
945

    
946
static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
947
{
948
    int l1;
949

    
950
    l1 = gen_new_label();
951
    /* Conditional jmp.  */
952
    tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
953
    tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
954
    tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
955
    gen_set_label(l1);
956
}
957

    
958
static void dec_bcc(DisasContext *dc)
959
{
960
    unsigned int cc;
961
    unsigned int dslot;
962

    
963
    cc = EXTRACT_FIELD(dc->ir, 21, 23);
964
    dslot = dc->ir & (1 << 25);
965
    LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
966

    
967
    dc->delayed_branch = 1;
968
    if (dslot) {
969
        dc->delayed_branch = 2;
970
        dc->tb_flags |= D_FLAG;
971
        tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
972
                      cpu_env, offsetof(CPUState, bimm));
973
    }
974

    
975
    if (dec_alu_op_b_is_small_imm(dc)) {
976
        int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend.  */
977

    
978
        tcg_gen_movi_tl(env_btarget, dc->pc + offset);
979
    } else {
980
        tcg_gen_movi_tl(env_btarget, dc->pc);
981
        tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
982
    }
983
    dc->jmp = JMP_INDIRECT;
984
    eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
985
}
986

    
987
static void dec_br(DisasContext *dc)
988
{
989
    unsigned int dslot, link, abs;
990
    int mem_index = cpu_mmu_index(dc->env);
991

    
992
    dslot = dc->ir & (1 << 20);
993
    abs = dc->ir & (1 << 19);
994
    link = dc->ir & (1 << 18);
995
    LOG_DIS("br%s%s%s%s imm=%x\n",
996
             abs ? "a" : "", link ? "l" : "",
997
             dc->type_b ? "i" : "", dslot ? "d" : "",
998
             dc->imm);
999

    
1000
    dc->delayed_branch = 1;
1001
    if (dslot) {
1002
        dc->delayed_branch = 2;
1003
        dc->tb_flags |= D_FLAG;
1004
        tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1005
                      cpu_env, offsetof(CPUState, bimm));
1006
    }
1007
    if (link && dc->rd)
1008
        tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1009

    
1010
    dc->jmp = JMP_INDIRECT;
1011
    if (abs) {
1012
        tcg_gen_movi_tl(env_btaken, 1);
1013
        tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
1014
        if (link && !dslot) {
1015
            if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1016
                t_gen_raise_exception(dc, EXCP_BREAK);
1017
            if (dc->imm == 0) {
1018
                if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1019
                    tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1020
                    t_gen_raise_exception(dc, EXCP_HW_EXCP);
1021
                    return;
1022
                }
1023

    
1024
                t_gen_raise_exception(dc, EXCP_DEBUG);
1025
            }
1026
        }
1027
    } else {
1028
        if (dec_alu_op_b_is_small_imm(dc)) {
1029
            dc->jmp = JMP_DIRECT;
1030
            dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1031
        } else {
1032
            tcg_gen_movi_tl(env_btaken, 1);
1033
            tcg_gen_movi_tl(env_btarget, dc->pc);
1034
            tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1035
        }
1036
    }
1037
}
1038

    
1039
static inline void do_rti(DisasContext *dc)
1040
{
1041
    TCGv t0, t1;
1042
    t0 = tcg_temp_new();
1043
    t1 = tcg_temp_new();
1044
    tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1045
    tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1046
    tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1047

    
1048
    tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1049
    tcg_gen_or_tl(t1, t1, t0);
1050
    msr_write(dc, t1);
1051
    tcg_temp_free(t1);
1052
    tcg_temp_free(t0);
1053
    dc->tb_flags &= ~DRTI_FLAG;
1054
}
1055

    
1056
static inline void do_rtb(DisasContext *dc)
1057
{
1058
    TCGv t0, t1;
1059
    t0 = tcg_temp_new();
1060
    t1 = tcg_temp_new();
1061
    tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1062
    tcg_gen_shri_tl(t0, t1, 1);
1063
    tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1064

    
1065
    tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1066
    tcg_gen_or_tl(t1, t1, t0);
1067
    msr_write(dc, t1);
1068
    tcg_temp_free(t1);
1069
    tcg_temp_free(t0);
1070
    dc->tb_flags &= ~DRTB_FLAG;
1071
}
1072

    
1073
static inline void do_rte(DisasContext *dc)
1074
{
1075
    TCGv t0, t1;
1076
    t0 = tcg_temp_new();
1077
    t1 = tcg_temp_new();
1078

    
1079
    tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1080
    tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1081
    tcg_gen_shri_tl(t0, t1, 1);
1082
    tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1083

    
1084
    tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1085
    tcg_gen_or_tl(t1, t1, t0);
1086
    msr_write(dc, t1);
1087
    tcg_temp_free(t1);
1088
    tcg_temp_free(t0);
1089
    dc->tb_flags &= ~DRTE_FLAG;
1090
}
1091

    
1092
static void dec_rts(DisasContext *dc)
1093
{
1094
    unsigned int b_bit, i_bit, e_bit;
1095
    int mem_index = cpu_mmu_index(dc->env);
1096

    
1097
    i_bit = dc->ir & (1 << 21);
1098
    b_bit = dc->ir & (1 << 22);
1099
    e_bit = dc->ir & (1 << 23);
1100

    
1101
    dc->delayed_branch = 2;
1102
    dc->tb_flags |= D_FLAG;
1103
    tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1104
                  cpu_env, offsetof(CPUState, bimm));
1105

    
1106
    if (i_bit) {
1107
        LOG_DIS("rtid ir=%x\n", dc->ir);
1108
        if ((dc->tb_flags & MSR_EE_FLAG)
1109
             && mem_index == MMU_USER_IDX) {
1110
            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1111
            t_gen_raise_exception(dc, EXCP_HW_EXCP);
1112
        }
1113
        dc->tb_flags |= DRTI_FLAG;
1114
    } else if (b_bit) {
1115
        LOG_DIS("rtbd ir=%x\n", dc->ir);
1116
        if ((dc->tb_flags & MSR_EE_FLAG)
1117
             && mem_index == MMU_USER_IDX) {
1118
            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1119
            t_gen_raise_exception(dc, EXCP_HW_EXCP);
1120
        }
1121
        dc->tb_flags |= DRTB_FLAG;
1122
    } else if (e_bit) {
1123
        LOG_DIS("rted ir=%x\n", dc->ir);
1124
        if ((dc->tb_flags & MSR_EE_FLAG)
1125
             && mem_index == MMU_USER_IDX) {
1126
            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1127
            t_gen_raise_exception(dc, EXCP_HW_EXCP);
1128
        }
1129
        dc->tb_flags |= DRTE_FLAG;
1130
    } else
1131
        LOG_DIS("rts ir=%x\n", dc->ir);
1132

    
1133
    tcg_gen_movi_tl(env_btaken, 1);
1134
    tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1135
}
1136

    
1137
static int dec_check_fpuv2(DisasContext *dc)
1138
{
1139
    int r;
1140

    
1141
    r = dc->env->pvr.regs[2] & PVR2_USE_FPU2_MASK;
1142

    
1143
    if (!r && (dc->tb_flags & MSR_EE_FLAG)) {
1144
        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1145
        t_gen_raise_exception(dc, EXCP_HW_EXCP);
1146
    }
1147
    return r;
1148
}
1149

    
1150
static void dec_fpu(DisasContext *dc)
1151
{
1152
    unsigned int fpu_insn;
1153

    
1154
    if ((dc->tb_flags & MSR_EE_FLAG)
1155
          && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1156
          && !((dc->env->pvr.regs[2] & PVR2_USE_FPU_MASK))) {
1157
        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1158
        t_gen_raise_exception(dc, EXCP_HW_EXCP);
1159
        return;
1160
    }
1161

    
1162
    fpu_insn = (dc->ir >> 7) & 7;
1163

    
1164
    switch (fpu_insn) {
1165
        case 0:
1166
            gen_helper_fadd(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
1167
            break;
1168

    
1169
        case 1:
1170
            gen_helper_frsub(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
1171
            break;
1172

    
1173
        case 2:
1174
            gen_helper_fmul(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
1175
            break;
1176

    
1177
        case 3:
1178
            gen_helper_fdiv(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
1179
            break;
1180

    
1181
        case 4:
1182
            switch ((dc->ir >> 4) & 7) {
1183
                case 0:
1184
                    gen_helper_fcmp_un(cpu_R[dc->rd],
1185
                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1186
                    break;
1187
                case 1:
1188
                    gen_helper_fcmp_lt(cpu_R[dc->rd],
1189
                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1190
                    break;
1191
                case 2:
1192
                    gen_helper_fcmp_eq(cpu_R[dc->rd],
1193
                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1194
                    break;
1195
                case 3:
1196
                    gen_helper_fcmp_le(cpu_R[dc->rd],
1197
                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1198
                    break;
1199
                case 4:
1200
                    gen_helper_fcmp_gt(cpu_R[dc->rd],
1201
                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1202
                    break;
1203
                case 5:
1204
                    gen_helper_fcmp_ne(cpu_R[dc->rd],
1205
                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1206
                    break;
1207
                case 6:
1208
                    gen_helper_fcmp_ge(cpu_R[dc->rd],
1209
                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1210
                    break;
1211
                default:
1212
                    qemu_log ("unimplemented fcmp fpu_insn=%x pc=%x opc=%x\n",
1213
                              fpu_insn, dc->pc, dc->opcode);
1214
                    dc->abort_at_next_insn = 1;
1215
                    break;
1216
            }
1217
            break;
1218

    
1219
        case 5:
1220
            if (!dec_check_fpuv2(dc)) {
1221
                return;
1222
            }
1223
            gen_helper_flt(cpu_R[dc->rd], cpu_R[dc->ra]);
1224
            break;
1225

    
1226
        case 6:
1227
            if (!dec_check_fpuv2(dc)) {
1228
                return;
1229
            }
1230
            gen_helper_fint(cpu_R[dc->rd], cpu_R[dc->ra]);
1231
            break;
1232

    
1233
        case 7:
1234
            if (!dec_check_fpuv2(dc)) {
1235
                return;
1236
            }
1237
            gen_helper_fsqrt(cpu_R[dc->rd], cpu_R[dc->ra]);
1238
            break;
1239

    
1240
        default:
1241
            qemu_log ("unimplemented FPU insn fpu_insn=%x pc=%x opc=%x\n",
1242
                      fpu_insn, dc->pc, dc->opcode);
1243
            dc->abort_at_next_insn = 1;
1244
            break;
1245
    }
1246
}
1247

    
1248
static void dec_null(DisasContext *dc)
1249
{
1250
    if ((dc->tb_flags & MSR_EE_FLAG)
1251
          && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1252
        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1253
        t_gen_raise_exception(dc, EXCP_HW_EXCP);
1254
        return;
1255
    }
1256
    qemu_log ("unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1257
    dc->abort_at_next_insn = 1;
1258
}
1259

    
1260
static struct decoder_info {
1261
    struct {
1262
        uint32_t bits;
1263
        uint32_t mask;
1264
    };
1265
    void (*dec)(DisasContext *dc);
1266
} decinfo[] = {
1267
    {DEC_ADD, dec_add},
1268
    {DEC_SUB, dec_sub},
1269
    {DEC_AND, dec_and},
1270
    {DEC_XOR, dec_xor},
1271
    {DEC_OR, dec_or},
1272
    {DEC_BIT, dec_bit},
1273
    {DEC_BARREL, dec_barrel},
1274
    {DEC_LD, dec_load},
1275
    {DEC_ST, dec_store},
1276
    {DEC_IMM, dec_imm},
1277
    {DEC_BR, dec_br},
1278
    {DEC_BCC, dec_bcc},
1279
    {DEC_RTS, dec_rts},
1280
    {DEC_FPU, dec_fpu},
1281
    {DEC_MUL, dec_mul},
1282
    {DEC_DIV, dec_div},
1283
    {DEC_MSR, dec_msr},
1284
    {{0, 0}, dec_null}
1285
};
1286

    
1287
static inline void decode(DisasContext *dc)
1288
{
1289
    uint32_t ir;
1290
    int i;
1291

    
1292
    if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
1293
        tcg_gen_debug_insn_start(dc->pc);
1294

    
1295
    dc->ir = ir = ldl_code(dc->pc);
1296
    LOG_DIS("%8.8x\t", dc->ir);
1297

    
1298
    if (dc->ir)
1299
        dc->nr_nops = 0;
1300
    else {
1301
        if ((dc->tb_flags & MSR_EE_FLAG)
1302
              && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1303
              && (dc->env->pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1304
            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1305
            t_gen_raise_exception(dc, EXCP_HW_EXCP);
1306
            return;
1307
        }
1308

    
1309
        LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1310
        dc->nr_nops++;
1311
        if (dc->nr_nops > 4)
1312
            cpu_abort(dc->env, "fetching nop sequence\n");
1313
    }
1314
    /* bit 2 seems to indicate insn type.  */
1315
    dc->type_b = ir & (1 << 29);
1316

    
1317
    dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1318
    dc->rd = EXTRACT_FIELD(ir, 21, 25);
1319
    dc->ra = EXTRACT_FIELD(ir, 16, 20);
1320
    dc->rb = EXTRACT_FIELD(ir, 11, 15);
1321
    dc->imm = EXTRACT_FIELD(ir, 0, 15);
1322

    
1323
    /* Large switch for all insns.  */
1324
    for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1325
        if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1326
            decinfo[i].dec(dc);
1327
            break;
1328
        }
1329
    }
1330
}
1331

    
1332
static void check_breakpoint(CPUState *env, DisasContext *dc)
1333
{
1334
    CPUBreakpoint *bp;
1335

    
1336
    if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1337
        QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1338
            if (bp->pc == dc->pc) {
1339
                t_gen_raise_exception(dc, EXCP_DEBUG);
1340
                dc->is_jmp = DISAS_UPDATE;
1341
             }
1342
        }
1343
    }
1344
}
1345

    
1346
/* generate intermediate code for basic block 'tb'.  */
1347
static void
1348
gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
1349
                               int search_pc)
1350
{
1351
    uint16_t *gen_opc_end;
1352
    uint32_t pc_start;
1353
    int j, lj;
1354
    struct DisasContext ctx;
1355
    struct DisasContext *dc = &ctx;
1356
    uint32_t next_page_start, org_flags;
1357
    target_ulong npc;
1358
    int num_insns;
1359
    int max_insns;
1360

    
1361
    qemu_log_try_set_file(stderr);
1362

    
1363
    pc_start = tb->pc;
1364
    dc->env = env;
1365
    dc->tb = tb;
1366
    org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1367

    
1368
    gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1369

    
1370
    dc->is_jmp = DISAS_NEXT;
1371
    dc->jmp = 0;
1372
    dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1373
    dc->pc = pc_start;
1374
    dc->singlestep_enabled = env->singlestep_enabled;
1375
    dc->cpustate_changed = 0;
1376
    dc->abort_at_next_insn = 0;
1377
    dc->nr_nops = 0;
1378

    
1379
    if (pc_start & 3)
1380
        cpu_abort(env, "Microblaze: unaligned PC=%x\n", pc_start);
1381

    
1382
    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1383
#if !SIM_COMPAT
1384
        qemu_log("--------------\n");
1385
        log_cpu_state(env, 0);
1386
#endif
1387
    }
1388

    
1389
    next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1390
    lj = -1;
1391
    num_insns = 0;
1392
    max_insns = tb->cflags & CF_COUNT_MASK;
1393
    if (max_insns == 0)
1394
        max_insns = CF_COUNT_MASK;
1395

    
1396
    gen_icount_start();
1397
    do
1398
    {
1399
#if SIM_COMPAT
1400
        if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1401
            tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1402
            gen_helper_debug();
1403
        }
1404
#endif
1405
        check_breakpoint(env, dc);
1406

    
1407
        if (search_pc) {
1408
            j = gen_opc_ptr - gen_opc_buf;
1409
            if (lj < j) {
1410
                lj++;
1411
                while (lj < j)
1412
                    gen_opc_instr_start[lj++] = 0;
1413
            }
1414
            gen_opc_pc[lj] = dc->pc;
1415
            gen_opc_instr_start[lj] = 1;
1416
                        gen_opc_icount[lj] = num_insns;
1417
        }
1418

    
1419
        /* Pretty disas.  */
1420
        LOG_DIS("%8.8x:\t", dc->pc);
1421

    
1422
        if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1423
            gen_io_start();
1424

    
1425
        dc->clear_imm = 1;
1426
        decode(dc);
1427
        if (dc->clear_imm)
1428
            dc->tb_flags &= ~IMM_FLAG;
1429
        dc->pc += 4;
1430
        num_insns++;
1431

    
1432
        if (dc->delayed_branch) {
1433
            dc->delayed_branch--;
1434
            if (!dc->delayed_branch) {
1435
                if (dc->tb_flags & DRTI_FLAG)
1436
                    do_rti(dc);
1437
                 if (dc->tb_flags & DRTB_FLAG)
1438
                    do_rtb(dc);
1439
                if (dc->tb_flags & DRTE_FLAG)
1440
                    do_rte(dc);
1441
                /* Clear the delay slot flag.  */
1442
                dc->tb_flags &= ~D_FLAG;
1443
                /* If it is a direct jump, try direct chaining.  */
1444
                if (dc->jmp != JMP_DIRECT) {
1445
                    eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1446
                    dc->is_jmp = DISAS_JUMP;
1447
                }
1448
                break;
1449
            }
1450
        }
1451
        if (env->singlestep_enabled)
1452
            break;
1453
    } while (!dc->is_jmp && !dc->cpustate_changed
1454
         && gen_opc_ptr < gen_opc_end
1455
                 && !singlestep
1456
         && (dc->pc < next_page_start)
1457
                 && num_insns < max_insns);
1458

    
1459
    npc = dc->pc;
1460
    if (dc->jmp == JMP_DIRECT) {
1461
        if (dc->tb_flags & D_FLAG) {
1462
            dc->is_jmp = DISAS_UPDATE;
1463
            tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1464
            sync_jmpstate(dc);
1465
        } else
1466
            npc = dc->jmp_pc;
1467
    }
1468

    
1469
    if (tb->cflags & CF_LAST_IO)
1470
        gen_io_end();
1471
    /* Force an update if the per-tb cpu state has changed.  */
1472
    if (dc->is_jmp == DISAS_NEXT
1473
        && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1474
        dc->is_jmp = DISAS_UPDATE;
1475
        tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1476
    }
1477
    t_sync_flags(dc);
1478

    
1479
    if (unlikely(env->singlestep_enabled)) {
1480
        t_gen_raise_exception(dc, EXCP_DEBUG);
1481
        if (dc->is_jmp == DISAS_NEXT)
1482
            tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1483
    } else {
1484
        switch(dc->is_jmp) {
1485
            case DISAS_NEXT:
1486
                gen_goto_tb(dc, 1, npc);
1487
                break;
1488
            default:
1489
            case DISAS_JUMP:
1490
            case DISAS_UPDATE:
1491
                /* indicate that the hash table must be used
1492
                   to find the next TB */
1493
                tcg_gen_exit_tb(0);
1494
                break;
1495
            case DISAS_TB_JUMP:
1496
                /* nothing more to generate */
1497
                break;
1498
        }
1499
    }
1500
    gen_icount_end(tb, num_insns);
1501
    *gen_opc_ptr = INDEX_op_end;
1502
    if (search_pc) {
1503
        j = gen_opc_ptr - gen_opc_buf;
1504
        lj++;
1505
        while (lj <= j)
1506
            gen_opc_instr_start[lj++] = 0;
1507
    } else {
1508
        tb->size = dc->pc - pc_start;
1509
                tb->icount = num_insns;
1510
    }
1511

    
1512
#ifdef DEBUG_DISAS
1513
#if !SIM_COMPAT
1514
    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1515
        qemu_log("\n");
1516
#if DISAS_GNU
1517
        log_target_disas(pc_start, dc->pc - pc_start, 0);
1518
#endif
1519
        qemu_log("\nisize=%d osize=%td\n",
1520
            dc->pc - pc_start, gen_opc_ptr - gen_opc_buf);
1521
    }
1522
#endif
1523
#endif
1524
    assert(!dc->abort_at_next_insn);
1525
}
1526

    
1527
void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
1528
{
1529
    gen_intermediate_code_internal(env, tb, 0);
1530
}
1531

    
1532
void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
1533
{
1534
    gen_intermediate_code_internal(env, tb, 1);
1535
}
1536

    
1537
void cpu_dump_state (CPUState *env, FILE *f, fprintf_function cpu_fprintf,
1538
                     int flags)
1539
{
1540
    int i;
1541

    
1542
    if (!env || !f)
1543
        return;
1544

    
1545
    cpu_fprintf(f, "IN: PC=%x %s\n",
1546
                env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1547
    cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1548
             env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1549
             env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1550
    cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1551
             env->btaken, env->btarget,
1552
             (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1553
             (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1554
             (env->sregs[SR_MSR] & MSR_EIP),
1555
             (env->sregs[SR_MSR] & MSR_IE));
1556

    
1557
    for (i = 0; i < 32; i++) {
1558
        cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1559
        if ((i + 1) % 4 == 0)
1560
            cpu_fprintf(f, "\n");
1561
        }
1562
    cpu_fprintf(f, "\n\n");
1563
}
1564

    
1565
CPUState *cpu_mb_init (const char *cpu_model)
1566
{
1567
    CPUState *env;
1568
    static int tcg_initialized = 0;
1569
    int i;
1570

    
1571
    env = qemu_mallocz(sizeof(CPUState));
1572

    
1573
    cpu_exec_init(env);
1574
    cpu_reset(env);
1575
    set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
1576

    
1577
    if (tcg_initialized)
1578
        return env;
1579

    
1580
    tcg_initialized = 1;
1581

    
1582
    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1583

    
1584
    env_debug = tcg_global_mem_new(TCG_AREG0, 
1585
                    offsetof(CPUState, debug),
1586
                    "debug0");
1587
    env_iflags = tcg_global_mem_new(TCG_AREG0, 
1588
                    offsetof(CPUState, iflags),
1589
                    "iflags");
1590
    env_imm = tcg_global_mem_new(TCG_AREG0, 
1591
                    offsetof(CPUState, imm),
1592
                    "imm");
1593
    env_btarget = tcg_global_mem_new(TCG_AREG0,
1594
                     offsetof(CPUState, btarget),
1595
                     "btarget");
1596
    env_btaken = tcg_global_mem_new(TCG_AREG0,
1597
                     offsetof(CPUState, btaken),
1598
                     "btaken");
1599
    for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1600
        cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
1601
                          offsetof(CPUState, regs[i]),
1602
                          regnames[i]);
1603
    }
1604
    for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1605
        cpu_SR[i] = tcg_global_mem_new(TCG_AREG0,
1606
                          offsetof(CPUState, sregs[i]),
1607
                          special_regnames[i]);
1608
    }
1609
#define GEN_HELPER 2
1610
#include "helper.h"
1611

    
1612
    return env;
1613
}
1614

    
1615
void cpu_reset (CPUState *env)
1616
{
1617
    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
1618
        qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
1619
        log_cpu_state(env, 0);
1620
    }
1621

    
1622
    memset(env, 0, offsetof(CPUMBState, breakpoints));
1623
    tlb_flush(env, 1);
1624

    
1625
    env->pvr.regs[0] = PVR0_PVR_FULL_MASK \
1626
                       | PVR0_USE_BARREL_MASK \
1627
                       | PVR0_USE_DIV_MASK \
1628
                       | PVR0_USE_HW_MUL_MASK \
1629
                       | PVR0_USE_EXC_MASK \
1630
                       | PVR0_USE_ICACHE_MASK \
1631
                       | PVR0_USE_DCACHE_MASK \
1632
                       | PVR0_USE_MMU \
1633
                       | (0xb << 8);
1634
    env->pvr.regs[2] = PVR2_D_OPB_MASK \
1635
                        | PVR2_D_LMB_MASK \
1636
                        | PVR2_I_OPB_MASK \
1637
                        | PVR2_I_LMB_MASK \
1638
                        | PVR2_USE_MSR_INSTR \
1639
                        | PVR2_USE_PCMP_INSTR \
1640
                        | PVR2_USE_BARREL_MASK \
1641
                        | PVR2_USE_DIV_MASK \
1642
                        | PVR2_USE_HW_MUL_MASK \
1643
                        | PVR2_USE_MUL64_MASK \
1644
                        | PVR2_USE_FPU_MASK \
1645
                        | PVR2_USE_FPU2_MASK \
1646
                        | PVR2_FPU_EXC_MASK \
1647
                        | 0;
1648
    env->pvr.regs[10] = 0x0c000000; /* Default to spartan 3a dsp family.  */
1649
    env->pvr.regs[11] = PVR11_USE_MMU | (16 << 17);
1650

    
1651
#if defined(CONFIG_USER_ONLY)
1652
    /* start in user mode with interrupts enabled.  */
1653
    env->sregs[SR_MSR] = MSR_EE | MSR_IE | MSR_VM | MSR_UM;
1654
    env->pvr.regs[10] = 0x0c000000; /* Spartan 3a dsp.  */
1655
#else
1656
    env->sregs[SR_MSR] = 0;
1657
    mmu_init(&env->mmu);
1658
    env->mmu.c_mmu = 3;
1659
    env->mmu.c_mmu_tlb_access = 3;
1660
    env->mmu.c_mmu_zones = 16;
1661
#endif
1662
}
1663

    
1664
void gen_pc_load(CPUState *env, struct TranslationBlock *tb,
1665
                 unsigned long searched_pc, int pc_pos, void *puc)
1666
{
1667
    env->sregs[SR_PC] = gen_opc_pc[pc_pos];
1668
}