Statistics
| Branch: | Revision:

root / target-m68k / translate.c @ ab1103de

History | View | Annotate | Download (81 kB)

1
/*
2
 *  m68k translation
3
 *
4
 *  Copyright (c) 2005-2007 CodeSourcery
5
 *  Written by Paul Brook
6
 *
7
 * This library is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2 of the License, or (at your option) any later version.
11
 *
12
 * This library is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19
 */
20

    
21
#include "cpu.h"
22
#include "disas.h"
23
#include "tcg-op.h"
24
#include "qemu-log.h"
25

    
26
#include "helpers.h"
27
#define GEN_HELPER 1
28
#include "helpers.h"
29

    
30
//#define DEBUG_DISPATCH 1
31

    
32
/* Fake floating point.  */
33
#define tcg_gen_mov_f64 tcg_gen_mov_i64
34
#define tcg_gen_qemu_ldf64 tcg_gen_qemu_ld64
35
#define tcg_gen_qemu_stf64 tcg_gen_qemu_st64
36

    
37
#define DEFO32(name, offset) static TCGv QREG_##name;
38
#define DEFO64(name, offset) static TCGv_i64 QREG_##name;
39
#define DEFF64(name, offset) static TCGv_i64 QREG_##name;
40
#include "qregs.def"
41
#undef DEFO32
42
#undef DEFO64
43
#undef DEFF64
44

    
45
static TCGv_ptr cpu_env;
46

    
47
static char cpu_reg_names[3*8*3 + 5*4];
48
static TCGv cpu_dregs[8];
49
static TCGv cpu_aregs[8];
50
static TCGv_i64 cpu_fregs[8];
51
static TCGv_i64 cpu_macc[4];
52

    
53
#define DREG(insn, pos) cpu_dregs[((insn) >> (pos)) & 7]
54
#define AREG(insn, pos) cpu_aregs[((insn) >> (pos)) & 7]
55
#define FREG(insn, pos) cpu_fregs[((insn) >> (pos)) & 7]
56
#define MACREG(acc) cpu_macc[acc]
57
#define QREG_SP cpu_aregs[7]
58

    
59
static TCGv NULL_QREG;
60
#define IS_NULL_QREG(t) (TCGV_EQUAL(t, NULL_QREG))
61
/* Used to distinguish stores from bad addressing modes.  */
62
static TCGv store_dummy;
63

    
64
#include "gen-icount.h"
65

    
66
void m68k_tcg_init(void)
67
{
68
    char *p;
69
    int i;
70

    
71
#define DEFO32(name,  offset) QREG_##name = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUM68KState, offset), #name);
72
#define DEFO64(name,  offset) QREG_##name = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUM68KState, offset), #name);
73
#define DEFF64(name,  offset) DEFO64(name, offset)
74
#include "qregs.def"
75
#undef DEFO32
76
#undef DEFO64
77
#undef DEFF64
78

    
79
    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
80

    
81
    p = cpu_reg_names;
82
    for (i = 0; i < 8; i++) {
83
        sprintf(p, "D%d", i);
84
        cpu_dregs[i] = tcg_global_mem_new(TCG_AREG0,
85
                                          offsetof(CPUM68KState, dregs[i]), p);
86
        p += 3;
87
        sprintf(p, "A%d", i);
88
        cpu_aregs[i] = tcg_global_mem_new(TCG_AREG0,
89
                                          offsetof(CPUM68KState, aregs[i]), p);
90
        p += 3;
91
        sprintf(p, "F%d", i);
92
        cpu_fregs[i] = tcg_global_mem_new_i64(TCG_AREG0,
93
                                          offsetof(CPUM68KState, fregs[i]), p);
94
        p += 3;
95
    }
96
    for (i = 0; i < 4; i++) {
97
        sprintf(p, "ACC%d", i);
98
        cpu_macc[i] = tcg_global_mem_new_i64(TCG_AREG0,
99
                                         offsetof(CPUM68KState, macc[i]), p);
100
        p += 5;
101
    }
102

    
103
    NULL_QREG = tcg_global_mem_new(TCG_AREG0, -4, "NULL");
104
    store_dummy = tcg_global_mem_new(TCG_AREG0, -8, "NULL");
105

    
106
#define GEN_HELPER 2
107
#include "helpers.h"
108
}
109

    
110
static inline void qemu_assert(int cond, const char *msg)
111
{
112
    if (!cond) {
113
        fprintf (stderr, "badness: %s\n", msg);
114
        abort();
115
    }
116
}
117

    
118
/* internal defines */
119
typedef struct DisasContext {
120
    CPUM68KState *env;
121
    target_ulong insn_pc; /* Start of the current instruction.  */
122
    target_ulong pc;
123
    int is_jmp;
124
    int cc_op;
125
    int user;
126
    uint32_t fpcr;
127
    struct TranslationBlock *tb;
128
    int singlestep_enabled;
129
    int is_mem;
130
    TCGv_i64 mactmp;
131
    int done_mac;
132
} DisasContext;
133

    
134
#define DISAS_JUMP_NEXT 4
135

    
136
#if defined(CONFIG_USER_ONLY)
137
#define IS_USER(s) 1
138
#else
139
#define IS_USER(s) s->user
140
#endif
141

    
142
/* XXX: move that elsewhere */
143
/* ??? Fix exceptions.  */
144
static void *gen_throws_exception;
145
#define gen_last_qop NULL
146

    
147
#define OS_BYTE 0
148
#define OS_WORD 1
149
#define OS_LONG 2
150
#define OS_SINGLE 4
151
#define OS_DOUBLE 5
152

    
153
typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
154

    
155
#ifdef DEBUG_DISPATCH
156
#define DISAS_INSN(name)                                                \
157
    static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
158
                                  uint16_t insn);                       \
159
    static void disas_##name(CPUM68KState *env, DisasContext *s,        \
160
                             uint16_t insn)                             \
161
    {                                                                   \
162
        qemu_log("Dispatch " #name "\n");                               \
163
        real_disas_##name(s, env, insn);                                \
164
    }                                                                   \
165
    static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
166
                                  uint16_t insn)
167
#else
168
#define DISAS_INSN(name)                                                \
169
    static void disas_##name(CPUM68KState *env, DisasContext *s,        \
170
                             uint16_t insn)
171
#endif
172

    
173
/* Generate a load from the specified address.  Narrow values are
174
   sign extended to full register width.  */
175
static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign)
176
{
177
    TCGv tmp;
178
    int index = IS_USER(s);
179
    s->is_mem = 1;
180
    tmp = tcg_temp_new_i32();
181
    switch(opsize) {
182
    case OS_BYTE:
183
        if (sign)
184
            tcg_gen_qemu_ld8s(tmp, addr, index);
185
        else
186
            tcg_gen_qemu_ld8u(tmp, addr, index);
187
        break;
188
    case OS_WORD:
189
        if (sign)
190
            tcg_gen_qemu_ld16s(tmp, addr, index);
191
        else
192
            tcg_gen_qemu_ld16u(tmp, addr, index);
193
        break;
194
    case OS_LONG:
195
    case OS_SINGLE:
196
        tcg_gen_qemu_ld32u(tmp, addr, index);
197
        break;
198
    default:
199
        qemu_assert(0, "bad load size");
200
    }
201
    gen_throws_exception = gen_last_qop;
202
    return tmp;
203
}
204

    
205
static inline TCGv_i64 gen_load64(DisasContext * s, TCGv addr)
206
{
207
    TCGv_i64 tmp;
208
    int index = IS_USER(s);
209
    s->is_mem = 1;
210
    tmp = tcg_temp_new_i64();
211
    tcg_gen_qemu_ldf64(tmp, addr, index);
212
    gen_throws_exception = gen_last_qop;
213
    return tmp;
214
}
215

    
216
/* Generate a store.  */
217
static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val)
218
{
219
    int index = IS_USER(s);
220
    s->is_mem = 1;
221
    switch(opsize) {
222
    case OS_BYTE:
223
        tcg_gen_qemu_st8(val, addr, index);
224
        break;
225
    case OS_WORD:
226
        tcg_gen_qemu_st16(val, addr, index);
227
        break;
228
    case OS_LONG:
229
    case OS_SINGLE:
230
        tcg_gen_qemu_st32(val, addr, index);
231
        break;
232
    default:
233
        qemu_assert(0, "bad store size");
234
    }
235
    gen_throws_exception = gen_last_qop;
236
}
237

    
238
static inline void gen_store64(DisasContext *s, TCGv addr, TCGv_i64 val)
239
{
240
    int index = IS_USER(s);
241
    s->is_mem = 1;
242
    tcg_gen_qemu_stf64(val, addr, index);
243
    gen_throws_exception = gen_last_qop;
244
}
245

    
246
typedef enum {
247
    EA_STORE,
248
    EA_LOADU,
249
    EA_LOADS
250
} ea_what;
251

    
252
/* Generate an unsigned load if VAL is 0 a signed load if val is -1,
253
   otherwise generate a store.  */
254
static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
255
                     ea_what what)
256
{
257
    if (what == EA_STORE) {
258
        gen_store(s, opsize, addr, val);
259
        return store_dummy;
260
    } else {
261
        return gen_load(s, opsize, addr, what == EA_LOADS);
262
    }
263
}
264

    
265
/* Read a 32-bit immediate constant.  */
266
static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
267
{
268
    uint32_t im;
269
    im = ((uint32_t)cpu_lduw_code(env, s->pc)) << 16;
270
    s->pc += 2;
271
    im |= cpu_lduw_code(env, s->pc);
272
    s->pc += 2;
273
    return im;
274
}
275

    
276
/* Calculate and address index.  */
277
static TCGv gen_addr_index(uint16_t ext, TCGv tmp)
278
{
279
    TCGv add;
280
    int scale;
281

    
282
    add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
283
    if ((ext & 0x800) == 0) {
284
        tcg_gen_ext16s_i32(tmp, add);
285
        add = tmp;
286
    }
287
    scale = (ext >> 9) & 3;
288
    if (scale != 0) {
289
        tcg_gen_shli_i32(tmp, add, scale);
290
        add = tmp;
291
    }
292
    return add;
293
}
294

    
295
/* Handle a base + index + displacement effective addresss.
296
   A NULL_QREG base means pc-relative.  */
297
static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, int opsize,
298
                            TCGv base)
299
{
300
    uint32_t offset;
301
    uint16_t ext;
302
    TCGv add;
303
    TCGv tmp;
304
    uint32_t bd, od;
305

    
306
    offset = s->pc;
307
    ext = cpu_lduw_code(env, s->pc);
308
    s->pc += 2;
309

    
310
    if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
311
        return NULL_QREG;
312

    
313
    if (ext & 0x100) {
314
        /* full extension word format */
315
        if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
316
            return NULL_QREG;
317

    
318
        if ((ext & 0x30) > 0x10) {
319
            /* base displacement */
320
            if ((ext & 0x30) == 0x20) {
321
                bd = (int16_t)cpu_lduw_code(env, s->pc);
322
                s->pc += 2;
323
            } else {
324
                bd = read_im32(env, s);
325
            }
326
        } else {
327
            bd = 0;
328
        }
329
        tmp = tcg_temp_new();
330
        if ((ext & 0x44) == 0) {
331
            /* pre-index */
332
            add = gen_addr_index(ext, tmp);
333
        } else {
334
            add = NULL_QREG;
335
        }
336
        if ((ext & 0x80) == 0) {
337
            /* base not suppressed */
338
            if (IS_NULL_QREG(base)) {
339
                base = tcg_const_i32(offset + bd);
340
                bd = 0;
341
            }
342
            if (!IS_NULL_QREG(add)) {
343
                tcg_gen_add_i32(tmp, add, base);
344
                add = tmp;
345
            } else {
346
                add = base;
347
            }
348
        }
349
        if (!IS_NULL_QREG(add)) {
350
            if (bd != 0) {
351
                tcg_gen_addi_i32(tmp, add, bd);
352
                add = tmp;
353
            }
354
        } else {
355
            add = tcg_const_i32(bd);
356
        }
357
        if ((ext & 3) != 0) {
358
            /* memory indirect */
359
            base = gen_load(s, OS_LONG, add, 0);
360
            if ((ext & 0x44) == 4) {
361
                add = gen_addr_index(ext, tmp);
362
                tcg_gen_add_i32(tmp, add, base);
363
                add = tmp;
364
            } else {
365
                add = base;
366
            }
367
            if ((ext & 3) > 1) {
368
                /* outer displacement */
369
                if ((ext & 3) == 2) {
370
                    od = (int16_t)cpu_lduw_code(env, s->pc);
371
                    s->pc += 2;
372
                } else {
373
                    od = read_im32(env, s);
374
                }
375
            } else {
376
                od = 0;
377
            }
378
            if (od != 0) {
379
                tcg_gen_addi_i32(tmp, add, od);
380
                add = tmp;
381
            }
382
        }
383
    } else {
384
        /* brief extension word format */
385
        tmp = tcg_temp_new();
386
        add = gen_addr_index(ext, tmp);
387
        if (!IS_NULL_QREG(base)) {
388
            tcg_gen_add_i32(tmp, add, base);
389
            if ((int8_t)ext)
390
                tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
391
        } else {
392
            tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
393
        }
394
        add = tmp;
395
    }
396
    return add;
397
}
398

    
399
/* Update the CPU env CC_OP state.  */
400
static inline void gen_flush_cc_op(DisasContext *s)
401
{
402
    if (s->cc_op != CC_OP_DYNAMIC)
403
        tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
404
}
405

    
406
/* Evaluate all the CC flags.  */
407
static inline void gen_flush_flags(DisasContext *s)
408
{
409
    if (s->cc_op == CC_OP_FLAGS)
410
        return;
411
    gen_flush_cc_op(s);
412
    gen_helper_flush_flags(cpu_env, QREG_CC_OP);
413
    s->cc_op = CC_OP_FLAGS;
414
}
415

    
416
static void gen_logic_cc(DisasContext *s, TCGv val)
417
{
418
    tcg_gen_mov_i32(QREG_CC_DEST, val);
419
    s->cc_op = CC_OP_LOGIC;
420
}
421

    
422
static void gen_update_cc_add(TCGv dest, TCGv src)
423
{
424
    tcg_gen_mov_i32(QREG_CC_DEST, dest);
425
    tcg_gen_mov_i32(QREG_CC_SRC, src);
426
}
427

    
428
static inline int opsize_bytes(int opsize)
429
{
430
    switch (opsize) {
431
    case OS_BYTE: return 1;
432
    case OS_WORD: return 2;
433
    case OS_LONG: return 4;
434
    case OS_SINGLE: return 4;
435
    case OS_DOUBLE: return 8;
436
    default:
437
        qemu_assert(0, "bad operand size");
438
        return 0;
439
    }
440
}
441

    
442
/* Assign value to a register.  If the width is less than the register width
443
   only the low part of the register is set.  */
444
static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
445
{
446
    TCGv tmp;
447
    switch (opsize) {
448
    case OS_BYTE:
449
        tcg_gen_andi_i32(reg, reg, 0xffffff00);
450
        tmp = tcg_temp_new();
451
        tcg_gen_ext8u_i32(tmp, val);
452
        tcg_gen_or_i32(reg, reg, tmp);
453
        break;
454
    case OS_WORD:
455
        tcg_gen_andi_i32(reg, reg, 0xffff0000);
456
        tmp = tcg_temp_new();
457
        tcg_gen_ext16u_i32(tmp, val);
458
        tcg_gen_or_i32(reg, reg, tmp);
459
        break;
460
    case OS_LONG:
461
    case OS_SINGLE:
462
        tcg_gen_mov_i32(reg, val);
463
        break;
464
    default:
465
        qemu_assert(0, "Bad operand size");
466
        break;
467
    }
468
}
469

    
470
/* Sign or zero extend a value.  */
471
static inline TCGv gen_extend(TCGv val, int opsize, int sign)
472
{
473
    TCGv tmp;
474

    
475
    switch (opsize) {
476
    case OS_BYTE:
477
        tmp = tcg_temp_new();
478
        if (sign)
479
            tcg_gen_ext8s_i32(tmp, val);
480
        else
481
            tcg_gen_ext8u_i32(tmp, val);
482
        break;
483
    case OS_WORD:
484
        tmp = tcg_temp_new();
485
        if (sign)
486
            tcg_gen_ext16s_i32(tmp, val);
487
        else
488
            tcg_gen_ext16u_i32(tmp, val);
489
        break;
490
    case OS_LONG:
491
    case OS_SINGLE:
492
        tmp = val;
493
        break;
494
    default:
495
        qemu_assert(0, "Bad operand size");
496
    }
497
    return tmp;
498
}
499

    
500
/* Generate code for an "effective address".  Does not adjust the base
501
   register for autoincrement addressing modes.  */
502
static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
503
                    int opsize)
504
{
505
    TCGv reg;
506
    TCGv tmp;
507
    uint16_t ext;
508
    uint32_t offset;
509

    
510
    switch ((insn >> 3) & 7) {
511
    case 0: /* Data register direct.  */
512
    case 1: /* Address register direct.  */
513
        return NULL_QREG;
514
    case 2: /* Indirect register */
515
    case 3: /* Indirect postincrement.  */
516
        return AREG(insn, 0);
517
    case 4: /* Indirect predecrememnt.  */
518
        reg = AREG(insn, 0);
519
        tmp = tcg_temp_new();
520
        tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
521
        return tmp;
522
    case 5: /* Indirect displacement.  */
523
        reg = AREG(insn, 0);
524
        tmp = tcg_temp_new();
525
        ext = cpu_lduw_code(env, s->pc);
526
        s->pc += 2;
527
        tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
528
        return tmp;
529
    case 6: /* Indirect index + displacement.  */
530
        reg = AREG(insn, 0);
531
        return gen_lea_indexed(env, s, opsize, reg);
532
    case 7: /* Other */
533
        switch (insn & 7) {
534
        case 0: /* Absolute short.  */
535
            offset = cpu_ldsw_code(env, s->pc);
536
            s->pc += 2;
537
            return tcg_const_i32(offset);
538
        case 1: /* Absolute long.  */
539
            offset = read_im32(env, s);
540
            return tcg_const_i32(offset);
541
        case 2: /* pc displacement  */
542
            offset = s->pc;
543
            offset += cpu_ldsw_code(env, s->pc);
544
            s->pc += 2;
545
            return tcg_const_i32(offset);
546
        case 3: /* pc index+displacement.  */
547
            return gen_lea_indexed(env, s, opsize, NULL_QREG);
548
        case 4: /* Immediate.  */
549
        default:
550
            return NULL_QREG;
551
        }
552
    }
553
    /* Should never happen.  */
554
    return NULL_QREG;
555
}
556

    
557
/* Helper function for gen_ea. Reuse the computed address between the
558
   for read/write operands.  */
559
static inline TCGv gen_ea_once(CPUM68KState *env, DisasContext *s,
560
                               uint16_t insn, int opsize, TCGv val,
561
                               TCGv *addrp, ea_what what)
562
{
563
    TCGv tmp;
564

    
565
    if (addrp && what == EA_STORE) {
566
        tmp = *addrp;
567
    } else {
568
        tmp = gen_lea(env, s, insn, opsize);
569
        if (IS_NULL_QREG(tmp))
570
            return tmp;
571
        if (addrp)
572
            *addrp = tmp;
573
    }
574
    return gen_ldst(s, opsize, tmp, val, what);
575
}
576

    
577
/* Generate code to load/store a value ito/from an EA.  If VAL > 0 this is
578
   a write otherwise it is a read (0 == sign extend, -1 == zero extend).
579
   ADDRP is non-null for readwrite operands.  */
580
static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
581
                   int opsize, TCGv val, TCGv *addrp, ea_what what)
582
{
583
    TCGv reg;
584
    TCGv result;
585
    uint32_t offset;
586

    
587
    switch ((insn >> 3) & 7) {
588
    case 0: /* Data register direct.  */
589
        reg = DREG(insn, 0);
590
        if (what == EA_STORE) {
591
            gen_partset_reg(opsize, reg, val);
592
            return store_dummy;
593
        } else {
594
            return gen_extend(reg, opsize, what == EA_LOADS);
595
        }
596
    case 1: /* Address register direct.  */
597
        reg = AREG(insn, 0);
598
        if (what == EA_STORE) {
599
            tcg_gen_mov_i32(reg, val);
600
            return store_dummy;
601
        } else {
602
            return gen_extend(reg, opsize, what == EA_LOADS);
603
        }
604
    case 2: /* Indirect register */
605
        reg = AREG(insn, 0);
606
        return gen_ldst(s, opsize, reg, val, what);
607
    case 3: /* Indirect postincrement.  */
608
        reg = AREG(insn, 0);
609
        result = gen_ldst(s, opsize, reg, val, what);
610
        /* ??? This is not exception safe.  The instruction may still
611
           fault after this point.  */
612
        if (what == EA_STORE || !addrp)
613
            tcg_gen_addi_i32(reg, reg, opsize_bytes(opsize));
614
        return result;
615
    case 4: /* Indirect predecrememnt.  */
616
        {
617
            TCGv tmp;
618
            if (addrp && what == EA_STORE) {
619
                tmp = *addrp;
620
            } else {
621
                tmp = gen_lea(env, s, insn, opsize);
622
                if (IS_NULL_QREG(tmp))
623
                    return tmp;
624
                if (addrp)
625
                    *addrp = tmp;
626
            }
627
            result = gen_ldst(s, opsize, tmp, val, what);
628
            /* ??? This is not exception safe.  The instruction may still
629
               fault after this point.  */
630
            if (what == EA_STORE || !addrp) {
631
                reg = AREG(insn, 0);
632
                tcg_gen_mov_i32(reg, tmp);
633
            }
634
        }
635
        return result;
636
    case 5: /* Indirect displacement.  */
637
    case 6: /* Indirect index + displacement.  */
638
        return gen_ea_once(env, s, insn, opsize, val, addrp, what);
639
    case 7: /* Other */
640
        switch (insn & 7) {
641
        case 0: /* Absolute short.  */
642
        case 1: /* Absolute long.  */
643
        case 2: /* pc displacement  */
644
        case 3: /* pc index+displacement.  */
645
            return gen_ea_once(env, s, insn, opsize, val, addrp, what);
646
        case 4: /* Immediate.  */
647
            /* Sign extend values for consistency.  */
648
            switch (opsize) {
649
            case OS_BYTE:
650
                if (what == EA_LOADS) {
651
                    offset = cpu_ldsb_code(env, s->pc + 1);
652
                } else {
653
                    offset = cpu_ldub_code(env, s->pc + 1);
654
                }
655
                s->pc += 2;
656
                break;
657
            case OS_WORD:
658
                if (what == EA_LOADS) {
659
                    offset = cpu_ldsw_code(env, s->pc);
660
                } else {
661
                    offset = cpu_lduw_code(env, s->pc);
662
                }
663
                s->pc += 2;
664
                break;
665
            case OS_LONG:
666
                offset = read_im32(env, s);
667
                break;
668
            default:
669
                qemu_assert(0, "Bad immediate operand");
670
            }
671
            return tcg_const_i32(offset);
672
        default:
673
            return NULL_QREG;
674
        }
675
    }
676
    /* Should never happen.  */
677
    return NULL_QREG;
678
}
679

    
680
/* This generates a conditional branch, clobbering all temporaries.  */
681
static void gen_jmpcc(DisasContext *s, int cond, int l1)
682
{
683
    TCGv tmp;
684

    
685
    /* TODO: Optimize compare/branch pairs rather than always flushing
686
       flag state to CC_OP_FLAGS.  */
687
    gen_flush_flags(s);
688
    switch (cond) {
689
    case 0: /* T */
690
        tcg_gen_br(l1);
691
        break;
692
    case 1: /* F */
693
        break;
694
    case 2: /* HI (!C && !Z) */
695
        tmp = tcg_temp_new();
696
        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C | CCF_Z);
697
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
698
        break;
699
    case 3: /* LS (C || Z) */
700
        tmp = tcg_temp_new();
701
        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C | CCF_Z);
702
        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
703
        break;
704
    case 4: /* CC (!C) */
705
        tmp = tcg_temp_new();
706
        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C);
707
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
708
        break;
709
    case 5: /* CS (C) */
710
        tmp = tcg_temp_new();
711
        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C);
712
        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
713
        break;
714
    case 6: /* NE (!Z) */
715
        tmp = tcg_temp_new();
716
        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_Z);
717
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
718
        break;
719
    case 7: /* EQ (Z) */
720
        tmp = tcg_temp_new();
721
        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_Z);
722
        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
723
        break;
724
    case 8: /* VC (!V) */
725
        tmp = tcg_temp_new();
726
        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_V);
727
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
728
        break;
729
    case 9: /* VS (V) */
730
        tmp = tcg_temp_new();
731
        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_V);
732
        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
733
        break;
734
    case 10: /* PL (!N) */
735
        tmp = tcg_temp_new();
736
        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
737
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
738
        break;
739
    case 11: /* MI (N) */
740
        tmp = tcg_temp_new();
741
        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
742
        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
743
        break;
744
    case 12: /* GE (!(N ^ V)) */
745
        tmp = tcg_temp_new();
746
        assert(CCF_V == (CCF_N >> 2));
747
        tcg_gen_shri_i32(tmp, QREG_CC_DEST, 2);
748
        tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
749
        tcg_gen_andi_i32(tmp, tmp, CCF_V);
750
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
751
        break;
752
    case 13: /* LT (N ^ V) */
753
        tmp = tcg_temp_new();
754
        assert(CCF_V == (CCF_N >> 2));
755
        tcg_gen_shri_i32(tmp, QREG_CC_DEST, 2);
756
        tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
757
        tcg_gen_andi_i32(tmp, tmp, CCF_V);
758
        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
759
        break;
760
    case 14: /* GT (!(Z || (N ^ V))) */
761
        tmp = tcg_temp_new();
762
        assert(CCF_V == (CCF_N >> 2));
763
        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
764
        tcg_gen_shri_i32(tmp, tmp, 2);
765
        tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
766
        tcg_gen_andi_i32(tmp, tmp, CCF_V | CCF_Z);
767
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
768
        break;
769
    case 15: /* LE (Z || (N ^ V)) */
770
        tmp = tcg_temp_new();
771
        assert(CCF_V == (CCF_N >> 2));
772
        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
773
        tcg_gen_shri_i32(tmp, tmp, 2);
774
        tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
775
        tcg_gen_andi_i32(tmp, tmp, CCF_V | CCF_Z);
776
        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
777
        break;
778
    default:
779
        /* Should ever happen.  */
780
        abort();
781
    }
782
}
783

    
784
DISAS_INSN(scc)
785
{
786
    int l1;
787
    int cond;
788
    TCGv reg;
789

    
790
    l1 = gen_new_label();
791
    cond = (insn >> 8) & 0xf;
792
    reg = DREG(insn, 0);
793
    tcg_gen_andi_i32(reg, reg, 0xffffff00);
794
    /* This is safe because we modify the reg directly, with no other values
795
       live.  */
796
    gen_jmpcc(s, cond ^ 1, l1);
797
    tcg_gen_ori_i32(reg, reg, 0xff);
798
    gen_set_label(l1);
799
}
800

    
801
/* Force a TB lookup after an instruction that changes the CPU state.  */
802
static void gen_lookup_tb(DisasContext *s)
803
{
804
    gen_flush_cc_op(s);
805
    tcg_gen_movi_i32(QREG_PC, s->pc);
806
    s->is_jmp = DISAS_UPDATE;
807
}
808

    
809
/* Generate a jump to an immediate address.  */
810
static void gen_jmp_im(DisasContext *s, uint32_t dest)
811
{
812
    gen_flush_cc_op(s);
813
    tcg_gen_movi_i32(QREG_PC, dest);
814
    s->is_jmp = DISAS_JUMP;
815
}
816

    
817
/* Generate a jump to the address in qreg DEST.  */
818
static void gen_jmp(DisasContext *s, TCGv dest)
819
{
820
    gen_flush_cc_op(s);
821
    tcg_gen_mov_i32(QREG_PC, dest);
822
    s->is_jmp = DISAS_JUMP;
823
}
824

    
825
static void gen_exception(DisasContext *s, uint32_t where, int nr)
826
{
827
    gen_flush_cc_op(s);
828
    gen_jmp_im(s, where);
829
    gen_helper_raise_exception(cpu_env, tcg_const_i32(nr));
830
}
831

    
832
static inline void gen_addr_fault(DisasContext *s)
833
{
834
    gen_exception(s, s->insn_pc, EXCP_ADDRESS);
835
}
836

    
837
#define SRC_EA(env, result, opsize, op_sign, addrp) do {                \
838
        result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp,         \
839
                        op_sign ? EA_LOADS : EA_LOADU);                 \
840
        if (IS_NULL_QREG(result)) {                                     \
841
            gen_addr_fault(s);                                          \
842
            return;                                                     \
843
        }                                                               \
844
    } while (0)
845

    
846
#define DEST_EA(env, insn, opsize, val, addrp) do {                     \
847
        TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, EA_STORE); \
848
        if (IS_NULL_QREG(ea_result)) {                                  \
849
            gen_addr_fault(s);                                          \
850
            return;                                                     \
851
        }                                                               \
852
    } while (0)
853

    
854
/* Generate a jump to an immediate address.  */
855
static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
856
{
857
    TranslationBlock *tb;
858

    
859
    tb = s->tb;
860
    if (unlikely(s->singlestep_enabled)) {
861
        gen_exception(s, dest, EXCP_DEBUG);
862
    } else if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
863
               (s->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
864
        tcg_gen_goto_tb(n);
865
        tcg_gen_movi_i32(QREG_PC, dest);
866
        tcg_gen_exit_tb((tcg_target_long)tb + n);
867
    } else {
868
        gen_jmp_im(s, dest);
869
        tcg_gen_exit_tb(0);
870
    }
871
    s->is_jmp = DISAS_TB_JUMP;
872
}
873

    
874
DISAS_INSN(undef_mac)
875
{
876
    gen_exception(s, s->pc - 2, EXCP_LINEA);
877
}
878

    
879
DISAS_INSN(undef_fpu)
880
{
881
    gen_exception(s, s->pc - 2, EXCP_LINEF);
882
}
883

    
884
DISAS_INSN(undef)
885
{
886
    gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED);
887
    cpu_abort(env, "Illegal instruction: %04x @ %08x", insn, s->pc - 2);
888
}
889

    
890
DISAS_INSN(mulw)
891
{
892
    TCGv reg;
893
    TCGv tmp;
894
    TCGv src;
895
    int sign;
896

    
897
    sign = (insn & 0x100) != 0;
898
    reg = DREG(insn, 9);
899
    tmp = tcg_temp_new();
900
    if (sign)
901
        tcg_gen_ext16s_i32(tmp, reg);
902
    else
903
        tcg_gen_ext16u_i32(tmp, reg);
904
    SRC_EA(env, src, OS_WORD, sign, NULL);
905
    tcg_gen_mul_i32(tmp, tmp, src);
906
    tcg_gen_mov_i32(reg, tmp);
907
    /* Unlike m68k, coldfire always clears the overflow bit.  */
908
    gen_logic_cc(s, tmp);
909
}
910

    
911
DISAS_INSN(divw)
912
{
913
    TCGv reg;
914
    TCGv tmp;
915
    TCGv src;
916
    int sign;
917

    
918
    sign = (insn & 0x100) != 0;
919
    reg = DREG(insn, 9);
920
    if (sign) {
921
        tcg_gen_ext16s_i32(QREG_DIV1, reg);
922
    } else {
923
        tcg_gen_ext16u_i32(QREG_DIV1, reg);
924
    }
925
    SRC_EA(env, src, OS_WORD, sign, NULL);
926
    tcg_gen_mov_i32(QREG_DIV2, src);
927
    if (sign) {
928
        gen_helper_divs(cpu_env, tcg_const_i32(1));
929
    } else {
930
        gen_helper_divu(cpu_env, tcg_const_i32(1));
931
    }
932

    
933
    tmp = tcg_temp_new();
934
    src = tcg_temp_new();
935
    tcg_gen_ext16u_i32(tmp, QREG_DIV1);
936
    tcg_gen_shli_i32(src, QREG_DIV2, 16);
937
    tcg_gen_or_i32(reg, tmp, src);
938
    s->cc_op = CC_OP_FLAGS;
939
}
940

    
941
DISAS_INSN(divl)
942
{
943
    TCGv num;
944
    TCGv den;
945
    TCGv reg;
946
    uint16_t ext;
947

    
948
    ext = cpu_lduw_code(env, s->pc);
949
    s->pc += 2;
950
    if (ext & 0x87f8) {
951
        gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
952
        return;
953
    }
954
    num = DREG(ext, 12);
955
    reg = DREG(ext, 0);
956
    tcg_gen_mov_i32(QREG_DIV1, num);
957
    SRC_EA(env, den, OS_LONG, 0, NULL);
958
    tcg_gen_mov_i32(QREG_DIV2, den);
959
    if (ext & 0x0800) {
960
        gen_helper_divs(cpu_env, tcg_const_i32(0));
961
    } else {
962
        gen_helper_divu(cpu_env, tcg_const_i32(0));
963
    }
964
    if ((ext & 7) == ((ext >> 12) & 7)) {
965
        /* div */
966
        tcg_gen_mov_i32 (reg, QREG_DIV1);
967
    } else {
968
        /* rem */
969
        tcg_gen_mov_i32 (reg, QREG_DIV2);
970
    }
971
    s->cc_op = CC_OP_FLAGS;
972
}
973

    
974
DISAS_INSN(addsub)
975
{
976
    TCGv reg;
977
    TCGv dest;
978
    TCGv src;
979
    TCGv tmp;
980
    TCGv addr;
981
    int add;
982

    
983
    add = (insn & 0x4000) != 0;
984
    reg = DREG(insn, 9);
985
    dest = tcg_temp_new();
986
    if (insn & 0x100) {
987
        SRC_EA(env, tmp, OS_LONG, 0, &addr);
988
        src = reg;
989
    } else {
990
        tmp = reg;
991
        SRC_EA(env, src, OS_LONG, 0, NULL);
992
    }
993
    if (add) {
994
        tcg_gen_add_i32(dest, tmp, src);
995
        gen_helper_xflag_lt(QREG_CC_X, dest, src);
996
        s->cc_op = CC_OP_ADD;
997
    } else {
998
        gen_helper_xflag_lt(QREG_CC_X, tmp, src);
999
        tcg_gen_sub_i32(dest, tmp, src);
1000
        s->cc_op = CC_OP_SUB;
1001
    }
1002
    gen_update_cc_add(dest, src);
1003
    if (insn & 0x100) {
1004
        DEST_EA(env, insn, OS_LONG, dest, &addr);
1005
    } else {
1006
        tcg_gen_mov_i32(reg, dest);
1007
    }
1008
}
1009

    
1010

    
1011
/* Reverse the order of the bits in REG.  */
1012
DISAS_INSN(bitrev)
1013
{
1014
    TCGv reg;
1015
    reg = DREG(insn, 0);
1016
    gen_helper_bitrev(reg, reg);
1017
}
1018

    
1019
DISAS_INSN(bitop_reg)
1020
{
1021
    int opsize;
1022
    int op;
1023
    TCGv src1;
1024
    TCGv src2;
1025
    TCGv tmp;
1026
    TCGv addr;
1027
    TCGv dest;
1028

    
1029
    if ((insn & 0x38) != 0)
1030
        opsize = OS_BYTE;
1031
    else
1032
        opsize = OS_LONG;
1033
    op = (insn >> 6) & 3;
1034
    SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1035
    src2 = DREG(insn, 9);
1036
    dest = tcg_temp_new();
1037

    
1038
    gen_flush_flags(s);
1039
    tmp = tcg_temp_new();
1040
    if (opsize == OS_BYTE)
1041
        tcg_gen_andi_i32(tmp, src2, 7);
1042
    else
1043
        tcg_gen_andi_i32(tmp, src2, 31);
1044
    src2 = tmp;
1045
    tmp = tcg_temp_new();
1046
    tcg_gen_shr_i32(tmp, src1, src2);
1047
    tcg_gen_andi_i32(tmp, tmp, 1);
1048
    tcg_gen_shli_i32(tmp, tmp, 2);
1049
    /* Clear CCF_Z if bit set.  */
1050
    tcg_gen_ori_i32(QREG_CC_DEST, QREG_CC_DEST, CCF_Z);
1051
    tcg_gen_xor_i32(QREG_CC_DEST, QREG_CC_DEST, tmp);
1052

    
1053
    tcg_gen_shl_i32(tmp, tcg_const_i32(1), src2);
1054
    switch (op) {
1055
    case 1: /* bchg */
1056
        tcg_gen_xor_i32(dest, src1, tmp);
1057
        break;
1058
    case 2: /* bclr */
1059
        tcg_gen_not_i32(tmp, tmp);
1060
        tcg_gen_and_i32(dest, src1, tmp);
1061
        break;
1062
    case 3: /* bset */
1063
        tcg_gen_or_i32(dest, src1, tmp);
1064
        break;
1065
    default: /* btst */
1066
        break;
1067
    }
1068
    if (op)
1069
        DEST_EA(env, insn, opsize, dest, &addr);
1070
}
1071

    
1072
DISAS_INSN(sats)
1073
{
1074
    TCGv reg;
1075
    reg = DREG(insn, 0);
1076
    gen_flush_flags(s);
1077
    gen_helper_sats(reg, reg, QREG_CC_DEST);
1078
    gen_logic_cc(s, reg);
1079
}
1080

    
1081
static void gen_push(DisasContext *s, TCGv val)
1082
{
1083
    TCGv tmp;
1084

    
1085
    tmp = tcg_temp_new();
1086
    tcg_gen_subi_i32(tmp, QREG_SP, 4);
1087
    gen_store(s, OS_LONG, tmp, val);
1088
    tcg_gen_mov_i32(QREG_SP, tmp);
1089
}
1090

    
1091
DISAS_INSN(movem)
1092
{
1093
    TCGv addr;
1094
    int i;
1095
    uint16_t mask;
1096
    TCGv reg;
1097
    TCGv tmp;
1098
    int is_load;
1099

    
1100
    mask = cpu_lduw_code(env, s->pc);
1101
    s->pc += 2;
1102
    tmp = gen_lea(env, s, insn, OS_LONG);
1103
    if (IS_NULL_QREG(tmp)) {
1104
        gen_addr_fault(s);
1105
        return;
1106
    }
1107
    addr = tcg_temp_new();
1108
    tcg_gen_mov_i32(addr, tmp);
1109
    is_load = ((insn & 0x0400) != 0);
1110
    for (i = 0; i < 16; i++, mask >>= 1) {
1111
        if (mask & 1) {
1112
            if (i < 8)
1113
                reg = DREG(i, 0);
1114
            else
1115
                reg = AREG(i, 0);
1116
            if (is_load) {
1117
                tmp = gen_load(s, OS_LONG, addr, 0);
1118
                tcg_gen_mov_i32(reg, tmp);
1119
            } else {
1120
                gen_store(s, OS_LONG, addr, reg);
1121
            }
1122
            if (mask != 1)
1123
                tcg_gen_addi_i32(addr, addr, 4);
1124
        }
1125
    }
1126
}
1127

    
1128
DISAS_INSN(bitop_im)
1129
{
1130
    int opsize;
1131
    int op;
1132
    TCGv src1;
1133
    uint32_t mask;
1134
    int bitnum;
1135
    TCGv tmp;
1136
    TCGv addr;
1137

    
1138
    if ((insn & 0x38) != 0)
1139
        opsize = OS_BYTE;
1140
    else
1141
        opsize = OS_LONG;
1142
    op = (insn >> 6) & 3;
1143

    
1144
    bitnum = cpu_lduw_code(env, s->pc);
1145
    s->pc += 2;
1146
    if (bitnum & 0xff00) {
1147
        disas_undef(env, s, insn);
1148
        return;
1149
    }
1150

    
1151
    SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1152

    
1153
    gen_flush_flags(s);
1154
    if (opsize == OS_BYTE)
1155
        bitnum &= 7;
1156
    else
1157
        bitnum &= 31;
1158
    mask = 1 << bitnum;
1159

    
1160
    tmp = tcg_temp_new();
1161
    assert (CCF_Z == (1 << 2));
1162
    if (bitnum > 2)
1163
        tcg_gen_shri_i32(tmp, src1, bitnum - 2);
1164
    else if (bitnum < 2)
1165
        tcg_gen_shli_i32(tmp, src1, 2 - bitnum);
1166
    else
1167
        tcg_gen_mov_i32(tmp, src1);
1168
    tcg_gen_andi_i32(tmp, tmp, CCF_Z);
1169
    /* Clear CCF_Z if bit set.  */
1170
    tcg_gen_ori_i32(QREG_CC_DEST, QREG_CC_DEST, CCF_Z);
1171
    tcg_gen_xor_i32(QREG_CC_DEST, QREG_CC_DEST, tmp);
1172
    if (op) {
1173
        switch (op) {
1174
        case 1: /* bchg */
1175
            tcg_gen_xori_i32(tmp, src1, mask);
1176
            break;
1177
        case 2: /* bclr */
1178
            tcg_gen_andi_i32(tmp, src1, ~mask);
1179
            break;
1180
        case 3: /* bset */
1181
            tcg_gen_ori_i32(tmp, src1, mask);
1182
            break;
1183
        default: /* btst */
1184
            break;
1185
        }
1186
        DEST_EA(env, insn, opsize, tmp, &addr);
1187
    }
1188
}
1189

    
1190
DISAS_INSN(arith_im)
1191
{
1192
    int op;
1193
    uint32_t im;
1194
    TCGv src1;
1195
    TCGv dest;
1196
    TCGv addr;
1197

    
1198
    op = (insn >> 9) & 7;
1199
    SRC_EA(env, src1, OS_LONG, 0, (op == 6) ? NULL : &addr);
1200
    im = read_im32(env, s);
1201
    dest = tcg_temp_new();
1202
    switch (op) {
1203
    case 0: /* ori */
1204
        tcg_gen_ori_i32(dest, src1, im);
1205
        gen_logic_cc(s, dest);
1206
        break;
1207
    case 1: /* andi */
1208
        tcg_gen_andi_i32(dest, src1, im);
1209
        gen_logic_cc(s, dest);
1210
        break;
1211
    case 2: /* subi */
1212
        tcg_gen_mov_i32(dest, src1);
1213
        gen_helper_xflag_lt(QREG_CC_X, dest, tcg_const_i32(im));
1214
        tcg_gen_subi_i32(dest, dest, im);
1215
        gen_update_cc_add(dest, tcg_const_i32(im));
1216
        s->cc_op = CC_OP_SUB;
1217
        break;
1218
    case 3: /* addi */
1219
        tcg_gen_mov_i32(dest, src1);
1220
        tcg_gen_addi_i32(dest, dest, im);
1221
        gen_update_cc_add(dest, tcg_const_i32(im));
1222
        gen_helper_xflag_lt(QREG_CC_X, dest, tcg_const_i32(im));
1223
        s->cc_op = CC_OP_ADD;
1224
        break;
1225
    case 5: /* eori */
1226
        tcg_gen_xori_i32(dest, src1, im);
1227
        gen_logic_cc(s, dest);
1228
        break;
1229
    case 6: /* cmpi */
1230
        tcg_gen_mov_i32(dest, src1);
1231
        tcg_gen_subi_i32(dest, dest, im);
1232
        gen_update_cc_add(dest, tcg_const_i32(im));
1233
        s->cc_op = CC_OP_SUB;
1234
        break;
1235
    default:
1236
        abort();
1237
    }
1238
    if (op != 6) {
1239
        DEST_EA(env, insn, OS_LONG, dest, &addr);
1240
    }
1241
}
1242

    
1243
DISAS_INSN(byterev)
1244
{
1245
    TCGv reg;
1246

    
1247
    reg = DREG(insn, 0);
1248
    tcg_gen_bswap32_i32(reg, reg);
1249
}
1250

    
1251
DISAS_INSN(move)
1252
{
1253
    TCGv src;
1254
    TCGv dest;
1255
    int op;
1256
    int opsize;
1257

    
1258
    switch (insn >> 12) {
1259
    case 1: /* move.b */
1260
        opsize = OS_BYTE;
1261
        break;
1262
    case 2: /* move.l */
1263
        opsize = OS_LONG;
1264
        break;
1265
    case 3: /* move.w */
1266
        opsize = OS_WORD;
1267
        break;
1268
    default:
1269
        abort();
1270
    }
1271
    SRC_EA(env, src, opsize, 1, NULL);
1272
    op = (insn >> 6) & 7;
1273
    if (op == 1) {
1274
        /* movea */
1275
        /* The value will already have been sign extended.  */
1276
        dest = AREG(insn, 9);
1277
        tcg_gen_mov_i32(dest, src);
1278
    } else {
1279
        /* normal move */
1280
        uint16_t dest_ea;
1281
        dest_ea = ((insn >> 9) & 7) | (op << 3);
1282
        DEST_EA(env, dest_ea, opsize, src, NULL);
1283
        /* This will be correct because loads sign extend.  */
1284
        gen_logic_cc(s, src);
1285
    }
1286
}
1287

    
1288
DISAS_INSN(negx)
1289
{
1290
    TCGv reg;
1291

    
1292
    gen_flush_flags(s);
1293
    reg = DREG(insn, 0);
1294
    gen_helper_subx_cc(reg, cpu_env, tcg_const_i32(0), reg);
1295
}
1296

    
1297
DISAS_INSN(lea)
1298
{
1299
    TCGv reg;
1300
    TCGv tmp;
1301

    
1302
    reg = AREG(insn, 9);
1303
    tmp = gen_lea(env, s, insn, OS_LONG);
1304
    if (IS_NULL_QREG(tmp)) {
1305
        gen_addr_fault(s);
1306
        return;
1307
    }
1308
    tcg_gen_mov_i32(reg, tmp);
1309
}
1310

    
1311
DISAS_INSN(clr)
1312
{
1313
    int opsize;
1314

    
1315
    switch ((insn >> 6) & 3) {
1316
    case 0: /* clr.b */
1317
        opsize = OS_BYTE;
1318
        break;
1319
    case 1: /* clr.w */
1320
        opsize = OS_WORD;
1321
        break;
1322
    case 2: /* clr.l */
1323
        opsize = OS_LONG;
1324
        break;
1325
    default:
1326
        abort();
1327
    }
1328
    DEST_EA(env, insn, opsize, tcg_const_i32(0), NULL);
1329
    gen_logic_cc(s, tcg_const_i32(0));
1330
}
1331

    
1332
static TCGv gen_get_ccr(DisasContext *s)
1333
{
1334
    TCGv dest;
1335

    
1336
    gen_flush_flags(s);
1337
    dest = tcg_temp_new();
1338
    tcg_gen_shli_i32(dest, QREG_CC_X, 4);
1339
    tcg_gen_or_i32(dest, dest, QREG_CC_DEST);
1340
    return dest;
1341
}
1342

    
1343
DISAS_INSN(move_from_ccr)
1344
{
1345
    TCGv reg;
1346
    TCGv ccr;
1347

    
1348
    ccr = gen_get_ccr(s);
1349
    reg = DREG(insn, 0);
1350
    gen_partset_reg(OS_WORD, reg, ccr);
1351
}
1352

    
1353
DISAS_INSN(neg)
1354
{
1355
    TCGv reg;
1356
    TCGv src1;
1357

    
1358
    reg = DREG(insn, 0);
1359
    src1 = tcg_temp_new();
1360
    tcg_gen_mov_i32(src1, reg);
1361
    tcg_gen_neg_i32(reg, src1);
1362
    s->cc_op = CC_OP_SUB;
1363
    gen_update_cc_add(reg, src1);
1364
    gen_helper_xflag_lt(QREG_CC_X, tcg_const_i32(0), src1);
1365
    s->cc_op = CC_OP_SUB;
1366
}
1367

    
1368
static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
1369
{
1370
    tcg_gen_movi_i32(QREG_CC_DEST, val & 0xf);
1371
    tcg_gen_movi_i32(QREG_CC_X, (val & 0x10) >> 4);
1372
    if (!ccr_only) {
1373
        gen_helper_set_sr(cpu_env, tcg_const_i32(val & 0xff00));
1374
    }
1375
}
1376

    
1377
static void gen_set_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
1378
                       int ccr_only)
1379
{
1380
    TCGv tmp;
1381
    TCGv reg;
1382

    
1383
    s->cc_op = CC_OP_FLAGS;
1384
    if ((insn & 0x38) == 0)
1385
      {
1386
        tmp = tcg_temp_new();
1387
        reg = DREG(insn, 0);
1388
        tcg_gen_andi_i32(QREG_CC_DEST, reg, 0xf);
1389
        tcg_gen_shri_i32(tmp, reg, 4);
1390
        tcg_gen_andi_i32(QREG_CC_X, tmp, 1);
1391
        if (!ccr_only) {
1392
            gen_helper_set_sr(cpu_env, reg);
1393
        }
1394
      }
1395
    else if ((insn & 0x3f) == 0x3c)
1396
      {
1397
        uint16_t val;
1398
        val = cpu_lduw_code(env, s->pc);
1399
        s->pc += 2;
1400
        gen_set_sr_im(s, val, ccr_only);
1401
      }
1402
    else
1403
        disas_undef(env, s, insn);
1404
}
1405

    
1406
DISAS_INSN(move_to_ccr)
1407
{
1408
    gen_set_sr(env, s, insn, 1);
1409
}
1410

    
1411
DISAS_INSN(not)
1412
{
1413
    TCGv reg;
1414

    
1415
    reg = DREG(insn, 0);
1416
    tcg_gen_not_i32(reg, reg);
1417
    gen_logic_cc(s, reg);
1418
}
1419

    
1420
DISAS_INSN(swap)
1421
{
1422
    TCGv src1;
1423
    TCGv src2;
1424
    TCGv reg;
1425

    
1426
    src1 = tcg_temp_new();
1427
    src2 = tcg_temp_new();
1428
    reg = DREG(insn, 0);
1429
    tcg_gen_shli_i32(src1, reg, 16);
1430
    tcg_gen_shri_i32(src2, reg, 16);
1431
    tcg_gen_or_i32(reg, src1, src2);
1432
    gen_logic_cc(s, reg);
1433
}
1434

    
1435
DISAS_INSN(pea)
1436
{
1437
    TCGv tmp;
1438

    
1439
    tmp = gen_lea(env, s, insn, OS_LONG);
1440
    if (IS_NULL_QREG(tmp)) {
1441
        gen_addr_fault(s);
1442
        return;
1443
    }
1444
    gen_push(s, tmp);
1445
}
1446

    
1447
DISAS_INSN(ext)
1448
{
1449
    int op;
1450
    TCGv reg;
1451
    TCGv tmp;
1452

    
1453
    reg = DREG(insn, 0);
1454
    op = (insn >> 6) & 7;
1455
    tmp = tcg_temp_new();
1456
    if (op == 3)
1457
        tcg_gen_ext16s_i32(tmp, reg);
1458
    else
1459
        tcg_gen_ext8s_i32(tmp, reg);
1460
    if (op == 2)
1461
        gen_partset_reg(OS_WORD, reg, tmp);
1462
    else
1463
        tcg_gen_mov_i32(reg, tmp);
1464
    gen_logic_cc(s, tmp);
1465
}
1466

    
1467
DISAS_INSN(tst)
1468
{
1469
    int opsize;
1470
    TCGv tmp;
1471

    
1472
    switch ((insn >> 6) & 3) {
1473
    case 0: /* tst.b */
1474
        opsize = OS_BYTE;
1475
        break;
1476
    case 1: /* tst.w */
1477
        opsize = OS_WORD;
1478
        break;
1479
    case 2: /* tst.l */
1480
        opsize = OS_LONG;
1481
        break;
1482
    default:
1483
        abort();
1484
    }
1485
    SRC_EA(env, tmp, opsize, 1, NULL);
1486
    gen_logic_cc(s, tmp);
1487
}
1488

    
1489
DISAS_INSN(pulse)
1490
{
1491
  /* Implemented as a NOP.  */
1492
}
1493

    
1494
DISAS_INSN(illegal)
1495
{
1496
    gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
1497
}
1498

    
1499
/* ??? This should be atomic.  */
1500
DISAS_INSN(tas)
1501
{
1502
    TCGv dest;
1503
    TCGv src1;
1504
    TCGv addr;
1505

    
1506
    dest = tcg_temp_new();
1507
    SRC_EA(env, src1, OS_BYTE, 1, &addr);
1508
    gen_logic_cc(s, src1);
1509
    tcg_gen_ori_i32(dest, src1, 0x80);
1510
    DEST_EA(env, insn, OS_BYTE, dest, &addr);
1511
}
1512

    
1513
DISAS_INSN(mull)
1514
{
1515
    uint16_t ext;
1516
    TCGv reg;
1517
    TCGv src1;
1518
    TCGv dest;
1519

    
1520
    /* The upper 32 bits of the product are discarded, so
1521
       muls.l and mulu.l are functionally equivalent.  */
1522
    ext = cpu_lduw_code(env, s->pc);
1523
    s->pc += 2;
1524
    if (ext & 0x87ff) {
1525
        gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
1526
        return;
1527
    }
1528
    reg = DREG(ext, 12);
1529
    SRC_EA(env, src1, OS_LONG, 0, NULL);
1530
    dest = tcg_temp_new();
1531
    tcg_gen_mul_i32(dest, src1, reg);
1532
    tcg_gen_mov_i32(reg, dest);
1533
    /* Unlike m68k, coldfire always clears the overflow bit.  */
1534
    gen_logic_cc(s, dest);
1535
}
1536

    
1537
DISAS_INSN(link)
1538
{
1539
    int16_t offset;
1540
    TCGv reg;
1541
    TCGv tmp;
1542

    
1543
    offset = cpu_ldsw_code(env, s->pc);
1544
    s->pc += 2;
1545
    reg = AREG(insn, 0);
1546
    tmp = tcg_temp_new();
1547
    tcg_gen_subi_i32(tmp, QREG_SP, 4);
1548
    gen_store(s, OS_LONG, tmp, reg);
1549
    if ((insn & 7) != 7)
1550
        tcg_gen_mov_i32(reg, tmp);
1551
    tcg_gen_addi_i32(QREG_SP, tmp, offset);
1552
}
1553

    
1554
DISAS_INSN(unlk)
1555
{
1556
    TCGv src;
1557
    TCGv reg;
1558
    TCGv tmp;
1559

    
1560
    src = tcg_temp_new();
1561
    reg = AREG(insn, 0);
1562
    tcg_gen_mov_i32(src, reg);
1563
    tmp = gen_load(s, OS_LONG, src, 0);
1564
    tcg_gen_mov_i32(reg, tmp);
1565
    tcg_gen_addi_i32(QREG_SP, src, 4);
1566
}
1567

    
1568
DISAS_INSN(nop)
1569
{
1570
}
1571

    
1572
DISAS_INSN(rts)
1573
{
1574
    TCGv tmp;
1575

    
1576
    tmp = gen_load(s, OS_LONG, QREG_SP, 0);
1577
    tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
1578
    gen_jmp(s, tmp);
1579
}
1580

    
1581
DISAS_INSN(jump)
1582
{
1583
    TCGv tmp;
1584

    
1585
    /* Load the target address first to ensure correct exception
1586
       behavior.  */
1587
    tmp = gen_lea(env, s, insn, OS_LONG);
1588
    if (IS_NULL_QREG(tmp)) {
1589
        gen_addr_fault(s);
1590
        return;
1591
    }
1592
    if ((insn & 0x40) == 0) {
1593
        /* jsr */
1594
        gen_push(s, tcg_const_i32(s->pc));
1595
    }
1596
    gen_jmp(s, tmp);
1597
}
1598

    
1599
DISAS_INSN(addsubq)
1600
{
1601
    TCGv src1;
1602
    TCGv src2;
1603
    TCGv dest;
1604
    int val;
1605
    TCGv addr;
1606

    
1607
    SRC_EA(env, src1, OS_LONG, 0, &addr);
1608
    val = (insn >> 9) & 7;
1609
    if (val == 0)
1610
        val = 8;
1611
    dest = tcg_temp_new();
1612
    tcg_gen_mov_i32(dest, src1);
1613
    if ((insn & 0x38) == 0x08) {
1614
        /* Don't update condition codes if the destination is an
1615
           address register.  */
1616
        if (insn & 0x0100) {
1617
            tcg_gen_subi_i32(dest, dest, val);
1618
        } else {
1619
            tcg_gen_addi_i32(dest, dest, val);
1620
        }
1621
    } else {
1622
        src2 = tcg_const_i32(val);
1623
        if (insn & 0x0100) {
1624
            gen_helper_xflag_lt(QREG_CC_X, dest, src2);
1625
            tcg_gen_subi_i32(dest, dest, val);
1626
            s->cc_op = CC_OP_SUB;
1627
        } else {
1628
            tcg_gen_addi_i32(dest, dest, val);
1629
            gen_helper_xflag_lt(QREG_CC_X, dest, src2);
1630
            s->cc_op = CC_OP_ADD;
1631
        }
1632
        gen_update_cc_add(dest, src2);
1633
    }
1634
    DEST_EA(env, insn, OS_LONG, dest, &addr);
1635
}
1636

    
1637
DISAS_INSN(tpf)
1638
{
1639
    switch (insn & 7) {
1640
    case 2: /* One extension word.  */
1641
        s->pc += 2;
1642
        break;
1643
    case 3: /* Two extension words.  */
1644
        s->pc += 4;
1645
        break;
1646
    case 4: /* No extension words.  */
1647
        break;
1648
    default:
1649
        disas_undef(env, s, insn);
1650
    }
1651
}
1652

    
1653
DISAS_INSN(branch)
1654
{
1655
    int32_t offset;
1656
    uint32_t base;
1657
    int op;
1658
    int l1;
1659

    
1660
    base = s->pc;
1661
    op = (insn >> 8) & 0xf;
1662
    offset = (int8_t)insn;
1663
    if (offset == 0) {
1664
        offset = cpu_ldsw_code(env, s->pc);
1665
        s->pc += 2;
1666
    } else if (offset == -1) {
1667
        offset = read_im32(env, s);
1668
    }
1669
    if (op == 1) {
1670
        /* bsr */
1671
        gen_push(s, tcg_const_i32(s->pc));
1672
    }
1673
    gen_flush_cc_op(s);
1674
    if (op > 1) {
1675
        /* Bcc */
1676
        l1 = gen_new_label();
1677
        gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
1678
        gen_jmp_tb(s, 1, base + offset);
1679
        gen_set_label(l1);
1680
        gen_jmp_tb(s, 0, s->pc);
1681
    } else {
1682
        /* Unconditional branch.  */
1683
        gen_jmp_tb(s, 0, base + offset);
1684
    }
1685
}
1686

    
1687
DISAS_INSN(moveq)
1688
{
1689
    uint32_t val;
1690

    
1691
    val = (int8_t)insn;
1692
    tcg_gen_movi_i32(DREG(insn, 9), val);
1693
    gen_logic_cc(s, tcg_const_i32(val));
1694
}
1695

    
1696
DISAS_INSN(mvzs)
1697
{
1698
    int opsize;
1699
    TCGv src;
1700
    TCGv reg;
1701

    
1702
    if (insn & 0x40)
1703
        opsize = OS_WORD;
1704
    else
1705
        opsize = OS_BYTE;
1706
    SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
1707
    reg = DREG(insn, 9);
1708
    tcg_gen_mov_i32(reg, src);
1709
    gen_logic_cc(s, src);
1710
}
1711

    
1712
DISAS_INSN(or)
1713
{
1714
    TCGv reg;
1715
    TCGv dest;
1716
    TCGv src;
1717
    TCGv addr;
1718

    
1719
    reg = DREG(insn, 9);
1720
    dest = tcg_temp_new();
1721
    if (insn & 0x100) {
1722
        SRC_EA(env, src, OS_LONG, 0, &addr);
1723
        tcg_gen_or_i32(dest, src, reg);
1724
        DEST_EA(env, insn, OS_LONG, dest, &addr);
1725
    } else {
1726
        SRC_EA(env, src, OS_LONG, 0, NULL);
1727
        tcg_gen_or_i32(dest, src, reg);
1728
        tcg_gen_mov_i32(reg, dest);
1729
    }
1730
    gen_logic_cc(s, dest);
1731
}
1732

    
1733
DISAS_INSN(suba)
1734
{
1735
    TCGv src;
1736
    TCGv reg;
1737

    
1738
    SRC_EA(env, src, OS_LONG, 0, NULL);
1739
    reg = AREG(insn, 9);
1740
    tcg_gen_sub_i32(reg, reg, src);
1741
}
1742

    
1743
DISAS_INSN(subx)
1744
{
1745
    TCGv reg;
1746
    TCGv src;
1747

    
1748
    gen_flush_flags(s);
1749
    reg = DREG(insn, 9);
1750
    src = DREG(insn, 0);
1751
    gen_helper_subx_cc(reg, cpu_env, reg, src);
1752
}
1753

    
1754
DISAS_INSN(mov3q)
1755
{
1756
    TCGv src;
1757
    int val;
1758

    
1759
    val = (insn >> 9) & 7;
1760
    if (val == 0)
1761
        val = -1;
1762
    src = tcg_const_i32(val);
1763
    gen_logic_cc(s, src);
1764
    DEST_EA(env, insn, OS_LONG, src, NULL);
1765
}
1766

    
1767
DISAS_INSN(cmp)
1768
{
1769
    int op;
1770
    TCGv src;
1771
    TCGv reg;
1772
    TCGv dest;
1773
    int opsize;
1774

    
1775
    op = (insn >> 6) & 3;
1776
    switch (op) {
1777
    case 0: /* cmp.b */
1778
        opsize = OS_BYTE;
1779
        s->cc_op = CC_OP_CMPB;
1780
        break;
1781
    case 1: /* cmp.w */
1782
        opsize = OS_WORD;
1783
        s->cc_op = CC_OP_CMPW;
1784
        break;
1785
    case 2: /* cmp.l */
1786
        opsize = OS_LONG;
1787
        s->cc_op = CC_OP_SUB;
1788
        break;
1789
    default:
1790
        abort();
1791
    }
1792
    SRC_EA(env, src, opsize, 1, NULL);
1793
    reg = DREG(insn, 9);
1794
    dest = tcg_temp_new();
1795
    tcg_gen_sub_i32(dest, reg, src);
1796
    gen_update_cc_add(dest, src);
1797
}
1798

    
1799
DISAS_INSN(cmpa)
1800
{
1801
    int opsize;
1802
    TCGv src;
1803
    TCGv reg;
1804
    TCGv dest;
1805

    
1806
    if (insn & 0x100) {
1807
        opsize = OS_LONG;
1808
    } else {
1809
        opsize = OS_WORD;
1810
    }
1811
    SRC_EA(env, src, opsize, 1, NULL);
1812
    reg = AREG(insn, 9);
1813
    dest = tcg_temp_new();
1814
    tcg_gen_sub_i32(dest, reg, src);
1815
    gen_update_cc_add(dest, src);
1816
    s->cc_op = CC_OP_SUB;
1817
}
1818

    
1819
DISAS_INSN(eor)
1820
{
1821
    TCGv src;
1822
    TCGv reg;
1823
    TCGv dest;
1824
    TCGv addr;
1825

    
1826
    SRC_EA(env, src, OS_LONG, 0, &addr);
1827
    reg = DREG(insn, 9);
1828
    dest = tcg_temp_new();
1829
    tcg_gen_xor_i32(dest, src, reg);
1830
    gen_logic_cc(s, dest);
1831
    DEST_EA(env, insn, OS_LONG, dest, &addr);
1832
}
1833

    
1834
DISAS_INSN(and)
1835
{
1836
    TCGv src;
1837
    TCGv reg;
1838
    TCGv dest;
1839
    TCGv addr;
1840

    
1841
    reg = DREG(insn, 9);
1842
    dest = tcg_temp_new();
1843
    if (insn & 0x100) {
1844
        SRC_EA(env, src, OS_LONG, 0, &addr);
1845
        tcg_gen_and_i32(dest, src, reg);
1846
        DEST_EA(env, insn, OS_LONG, dest, &addr);
1847
    } else {
1848
        SRC_EA(env, src, OS_LONG, 0, NULL);
1849
        tcg_gen_and_i32(dest, src, reg);
1850
        tcg_gen_mov_i32(reg, dest);
1851
    }
1852
    gen_logic_cc(s, dest);
1853
}
1854

    
1855
DISAS_INSN(adda)
1856
{
1857
    TCGv src;
1858
    TCGv reg;
1859

    
1860
    SRC_EA(env, src, OS_LONG, 0, NULL);
1861
    reg = AREG(insn, 9);
1862
    tcg_gen_add_i32(reg, reg, src);
1863
}
1864

    
1865
DISAS_INSN(addx)
1866
{
1867
    TCGv reg;
1868
    TCGv src;
1869

    
1870
    gen_flush_flags(s);
1871
    reg = DREG(insn, 9);
1872
    src = DREG(insn, 0);
1873
    gen_helper_addx_cc(reg, cpu_env, reg, src);
1874
    s->cc_op = CC_OP_FLAGS;
1875
}
1876

    
1877
/* TODO: This could be implemented without helper functions.  */
1878
DISAS_INSN(shift_im)
1879
{
1880
    TCGv reg;
1881
    int tmp;
1882
    TCGv shift;
1883

    
1884
    reg = DREG(insn, 0);
1885
    tmp = (insn >> 9) & 7;
1886
    if (tmp == 0)
1887
        tmp = 8;
1888
    shift = tcg_const_i32(tmp);
1889
    /* No need to flush flags becuse we know we will set C flag.  */
1890
    if (insn & 0x100) {
1891
        gen_helper_shl_cc(reg, cpu_env, reg, shift);
1892
    } else {
1893
        if (insn & 8) {
1894
            gen_helper_shr_cc(reg, cpu_env, reg, shift);
1895
        } else {
1896
            gen_helper_sar_cc(reg, cpu_env, reg, shift);
1897
        }
1898
    }
1899
    s->cc_op = CC_OP_SHIFT;
1900
}
1901

    
1902
DISAS_INSN(shift_reg)
1903
{
1904
    TCGv reg;
1905
    TCGv shift;
1906

    
1907
    reg = DREG(insn, 0);
1908
    shift = DREG(insn, 9);
1909
    /* Shift by zero leaves C flag unmodified.   */
1910
    gen_flush_flags(s);
1911
    if (insn & 0x100) {
1912
        gen_helper_shl_cc(reg, cpu_env, reg, shift);
1913
    } else {
1914
        if (insn & 8) {
1915
            gen_helper_shr_cc(reg, cpu_env, reg, shift);
1916
        } else {
1917
            gen_helper_sar_cc(reg, cpu_env, reg, shift);
1918
        }
1919
    }
1920
    s->cc_op = CC_OP_SHIFT;
1921
}
1922

    
1923
DISAS_INSN(ff1)
1924
{
1925
    TCGv reg;
1926
    reg = DREG(insn, 0);
1927
    gen_logic_cc(s, reg);
1928
    gen_helper_ff1(reg, reg);
1929
}
1930

    
1931
static TCGv gen_get_sr(DisasContext *s)
1932
{
1933
    TCGv ccr;
1934
    TCGv sr;
1935

    
1936
    ccr = gen_get_ccr(s);
1937
    sr = tcg_temp_new();
1938
    tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
1939
    tcg_gen_or_i32(sr, sr, ccr);
1940
    return sr;
1941
}
1942

    
1943
DISAS_INSN(strldsr)
1944
{
1945
    uint16_t ext;
1946
    uint32_t addr;
1947

    
1948
    addr = s->pc - 2;
1949
    ext = cpu_lduw_code(env, s->pc);
1950
    s->pc += 2;
1951
    if (ext != 0x46FC) {
1952
        gen_exception(s, addr, EXCP_UNSUPPORTED);
1953
        return;
1954
    }
1955
    ext = cpu_lduw_code(env, s->pc);
1956
    s->pc += 2;
1957
    if (IS_USER(s) || (ext & SR_S) == 0) {
1958
        gen_exception(s, addr, EXCP_PRIVILEGE);
1959
        return;
1960
    }
1961
    gen_push(s, gen_get_sr(s));
1962
    gen_set_sr_im(s, ext, 0);
1963
}
1964

    
1965
DISAS_INSN(move_from_sr)
1966
{
1967
    TCGv reg;
1968
    TCGv sr;
1969

    
1970
    if (IS_USER(s)) {
1971
        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
1972
        return;
1973
    }
1974
    sr = gen_get_sr(s);
1975
    reg = DREG(insn, 0);
1976
    gen_partset_reg(OS_WORD, reg, sr);
1977
}
1978

    
1979
DISAS_INSN(move_to_sr)
1980
{
1981
    if (IS_USER(s)) {
1982
        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
1983
        return;
1984
    }
1985
    gen_set_sr(env, s, insn, 0);
1986
    gen_lookup_tb(s);
1987
}
1988

    
1989
DISAS_INSN(move_from_usp)
1990
{
1991
    if (IS_USER(s)) {
1992
        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
1993
        return;
1994
    }
1995
    /* TODO: Implement USP.  */
1996
    gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
1997
}
1998

    
1999
DISAS_INSN(move_to_usp)
2000
{
2001
    if (IS_USER(s)) {
2002
        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2003
        return;
2004
    }
2005
    /* TODO: Implement USP.  */
2006
    gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
2007
}
2008

    
2009
DISAS_INSN(halt)
2010
{
2011
    gen_exception(s, s->pc, EXCP_HALT_INSN);
2012
}
2013

    
2014
DISAS_INSN(stop)
2015
{
2016
    uint16_t ext;
2017

    
2018
    if (IS_USER(s)) {
2019
        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2020
        return;
2021
    }
2022

    
2023
    ext = cpu_lduw_code(env, s->pc);
2024
    s->pc += 2;
2025

    
2026
    gen_set_sr_im(s, ext, 0);
2027
    tcg_gen_movi_i32(QREG_HALTED, 1);
2028
    gen_exception(s, s->pc, EXCP_HLT);
2029
}
2030

    
2031
DISAS_INSN(rte)
2032
{
2033
    if (IS_USER(s)) {
2034
        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2035
        return;
2036
    }
2037
    gen_exception(s, s->pc - 2, EXCP_RTE);
2038
}
2039

    
2040
DISAS_INSN(movec)
2041
{
2042
    uint16_t ext;
2043
    TCGv reg;
2044

    
2045
    if (IS_USER(s)) {
2046
        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2047
        return;
2048
    }
2049

    
2050
    ext = cpu_lduw_code(env, s->pc);
2051
    s->pc += 2;
2052

    
2053
    if (ext & 0x8000) {
2054
        reg = AREG(ext, 12);
2055
    } else {
2056
        reg = DREG(ext, 12);
2057
    }
2058
    gen_helper_movec(cpu_env, tcg_const_i32(ext & 0xfff), reg);
2059
    gen_lookup_tb(s);
2060
}
2061

    
2062
DISAS_INSN(intouch)
2063
{
2064
    if (IS_USER(s)) {
2065
        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2066
        return;
2067
    }
2068
    /* ICache fetch.  Implement as no-op.  */
2069
}
2070

    
2071
DISAS_INSN(cpushl)
2072
{
2073
    if (IS_USER(s)) {
2074
        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2075
        return;
2076
    }
2077
    /* Cache push/invalidate.  Implement as no-op.  */
2078
}
2079

    
2080
DISAS_INSN(wddata)
2081
{
2082
    gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2083
}
2084

    
2085
DISAS_INSN(wdebug)
2086
{
2087
    if (IS_USER(s)) {
2088
        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2089
        return;
2090
    }
2091
    /* TODO: Implement wdebug.  */
2092
    qemu_assert(0, "WDEBUG not implemented");
2093
}
2094

    
2095
DISAS_INSN(trap)
2096
{
2097
    gen_exception(s, s->pc - 2, EXCP_TRAP0 + (insn & 0xf));
2098
}
2099

    
2100
/* ??? FP exceptions are not implemented.  Most exceptions are deferred until
2101
   immediately before the next FP instruction is executed.  */
2102
DISAS_INSN(fpu)
2103
{
2104
    uint16_t ext;
2105
    int32_t offset;
2106
    int opmode;
2107
    TCGv_i64 src;
2108
    TCGv_i64 dest;
2109
    TCGv_i64 res;
2110
    TCGv tmp32;
2111
    int round;
2112
    int set_dest;
2113
    int opsize;
2114

    
2115
    ext = cpu_lduw_code(env, s->pc);
2116
    s->pc += 2;
2117
    opmode = ext & 0x7f;
2118
    switch ((ext >> 13) & 7) {
2119
    case 0: case 2:
2120
        break;
2121
    case 1:
2122
        goto undef;
2123
    case 3: /* fmove out */
2124
        src = FREG(ext, 7);
2125
        tmp32 = tcg_temp_new_i32();
2126
        /* fmove */
2127
        /* ??? TODO: Proper behavior on overflow.  */
2128
        switch ((ext >> 10) & 7) {
2129
        case 0:
2130
            opsize = OS_LONG;
2131
            gen_helper_f64_to_i32(tmp32, cpu_env, src);
2132
            break;
2133
        case 1:
2134
            opsize = OS_SINGLE;
2135
            gen_helper_f64_to_f32(tmp32, cpu_env, src);
2136
            break;
2137
        case 4:
2138
            opsize = OS_WORD;
2139
            gen_helper_f64_to_i32(tmp32, cpu_env, src);
2140
            break;
2141
        case 5: /* OS_DOUBLE */
2142
            tcg_gen_mov_i32(tmp32, AREG(insn, 0));
2143
            switch ((insn >> 3) & 7) {
2144
            case 2:
2145
            case 3:
2146
                break;
2147
            case 4:
2148
                tcg_gen_addi_i32(tmp32, tmp32, -8);
2149
                break;
2150
            case 5:
2151
                offset = cpu_ldsw_code(env, s->pc);
2152
                s->pc += 2;
2153
                tcg_gen_addi_i32(tmp32, tmp32, offset);
2154
                break;
2155
            default:
2156
                goto undef;
2157
            }
2158
            gen_store64(s, tmp32, src);
2159
            switch ((insn >> 3) & 7) {
2160
            case 3:
2161
                tcg_gen_addi_i32(tmp32, tmp32, 8);
2162
                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2163
                break;
2164
            case 4:
2165
                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2166
                break;
2167
            }
2168
            tcg_temp_free_i32(tmp32);
2169
            return;
2170
        case 6:
2171
            opsize = OS_BYTE;
2172
            gen_helper_f64_to_i32(tmp32, cpu_env, src);
2173
            break;
2174
        default:
2175
            goto undef;
2176
        }
2177
        DEST_EA(env, insn, opsize, tmp32, NULL);
2178
        tcg_temp_free_i32(tmp32);
2179
        return;
2180
    case 4: /* fmove to control register.  */
2181
        switch ((ext >> 10) & 7) {
2182
        case 4: /* FPCR */
2183
            /* Not implemented.  Ignore writes.  */
2184
            break;
2185
        case 1: /* FPIAR */
2186
        case 2: /* FPSR */
2187
        default:
2188
            cpu_abort(NULL, "Unimplemented: fmove to control %d",
2189
                      (ext >> 10) & 7);
2190
        }
2191
        break;
2192
    case 5: /* fmove from control register.  */
2193
        switch ((ext >> 10) & 7) {
2194
        case 4: /* FPCR */
2195
            /* Not implemented.  Always return zero.  */
2196
            tmp32 = tcg_const_i32(0);
2197
            break;
2198
        case 1: /* FPIAR */
2199
        case 2: /* FPSR */
2200
        default:
2201
            cpu_abort(NULL, "Unimplemented: fmove from control %d",
2202
                      (ext >> 10) & 7);
2203
            goto undef;
2204
        }
2205
        DEST_EA(env, insn, OS_LONG, tmp32, NULL);
2206
        break;
2207
    case 6: /* fmovem */
2208
    case 7:
2209
        {
2210
            TCGv addr;
2211
            uint16_t mask;
2212
            int i;
2213
            if ((ext & 0x1f00) != 0x1000 || (ext & 0xff) == 0)
2214
                goto undef;
2215
            tmp32 = gen_lea(env, s, insn, OS_LONG);
2216
            if (IS_NULL_QREG(tmp32)) {
2217
                gen_addr_fault(s);
2218
                return;
2219
            }
2220
            addr = tcg_temp_new_i32();
2221
            tcg_gen_mov_i32(addr, tmp32);
2222
            mask = 0x80;
2223
            for (i = 0; i < 8; i++) {
2224
                if (ext & mask) {
2225
                    s->is_mem = 1;
2226
                    dest = FREG(i, 0);
2227
                    if (ext & (1 << 13)) {
2228
                        /* store */
2229
                        tcg_gen_qemu_stf64(dest, addr, IS_USER(s));
2230
                    } else {
2231
                        /* load */
2232
                        tcg_gen_qemu_ldf64(dest, addr, IS_USER(s));
2233
                    }
2234
                    if (ext & (mask - 1))
2235
                        tcg_gen_addi_i32(addr, addr, 8);
2236
                }
2237
                mask >>= 1;
2238
            }
2239
            tcg_temp_free_i32(addr);
2240
        }
2241
        return;
2242
    }
2243
    if (ext & (1 << 14)) {
2244
        /* Source effective address.  */
2245
        switch ((ext >> 10) & 7) {
2246
        case 0: opsize = OS_LONG; break;
2247
        case 1: opsize = OS_SINGLE; break;
2248
        case 4: opsize = OS_WORD; break;
2249
        case 5: opsize = OS_DOUBLE; break;
2250
        case 6: opsize = OS_BYTE; break;
2251
        default:
2252
            goto undef;
2253
        }
2254
        if (opsize == OS_DOUBLE) {
2255
            tmp32 = tcg_temp_new_i32();
2256
            tcg_gen_mov_i32(tmp32, AREG(insn, 0));
2257
            switch ((insn >> 3) & 7) {
2258
            case 2:
2259
            case 3:
2260
                break;
2261
            case 4:
2262
                tcg_gen_addi_i32(tmp32, tmp32, -8);
2263
                break;
2264
            case 5:
2265
                offset = cpu_ldsw_code(env, s->pc);
2266
                s->pc += 2;
2267
                tcg_gen_addi_i32(tmp32, tmp32, offset);
2268
                break;
2269
            case 7:
2270
                offset = cpu_ldsw_code(env, s->pc);
2271
                offset += s->pc - 2;
2272
                s->pc += 2;
2273
                tcg_gen_addi_i32(tmp32, tmp32, offset);
2274
                break;
2275
            default:
2276
                goto undef;
2277
            }
2278
            src = gen_load64(s, tmp32);
2279
            switch ((insn >> 3) & 7) {
2280
            case 3:
2281
                tcg_gen_addi_i32(tmp32, tmp32, 8);
2282
                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2283
                break;
2284
            case 4:
2285
                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2286
                break;
2287
            }
2288
            tcg_temp_free_i32(tmp32);
2289
        } else {
2290
            SRC_EA(env, tmp32, opsize, 1, NULL);
2291
            src = tcg_temp_new_i64();
2292
            switch (opsize) {
2293
            case OS_LONG:
2294
            case OS_WORD:
2295
            case OS_BYTE:
2296
                gen_helper_i32_to_f64(src, cpu_env, tmp32);
2297
                break;
2298
            case OS_SINGLE:
2299
                gen_helper_f32_to_f64(src, cpu_env, tmp32);
2300
                break;
2301
            }
2302
        }
2303
    } else {
2304
        /* Source register.  */
2305
        src = FREG(ext, 10);
2306
    }
2307
    dest = FREG(ext, 7);
2308
    res = tcg_temp_new_i64();
2309
    if (opmode != 0x3a)
2310
        tcg_gen_mov_f64(res, dest);
2311
    round = 1;
2312
    set_dest = 1;
2313
    switch (opmode) {
2314
    case 0: case 0x40: case 0x44: /* fmove */
2315
        tcg_gen_mov_f64(res, src);
2316
        break;
2317
    case 1: /* fint */
2318
        gen_helper_iround_f64(res, cpu_env, src);
2319
        round = 0;
2320
        break;
2321
    case 3: /* fintrz */
2322
        gen_helper_itrunc_f64(res, cpu_env, src);
2323
        round = 0;
2324
        break;
2325
    case 4: case 0x41: case 0x45: /* fsqrt */
2326
        gen_helper_sqrt_f64(res, cpu_env, src);
2327
        break;
2328
    case 0x18: case 0x58: case 0x5c: /* fabs */
2329
        gen_helper_abs_f64(res, src);
2330
        break;
2331
    case 0x1a: case 0x5a: case 0x5e: /* fneg */
2332
        gen_helper_chs_f64(res, src);
2333
        break;
2334
    case 0x20: case 0x60: case 0x64: /* fdiv */
2335
        gen_helper_div_f64(res, cpu_env, res, src);
2336
        break;
2337
    case 0x22: case 0x62: case 0x66: /* fadd */
2338
        gen_helper_add_f64(res, cpu_env, res, src);
2339
        break;
2340
    case 0x23: case 0x63: case 0x67: /* fmul */
2341
        gen_helper_mul_f64(res, cpu_env, res, src);
2342
        break;
2343
    case 0x28: case 0x68: case 0x6c: /* fsub */
2344
        gen_helper_sub_f64(res, cpu_env, res, src);
2345
        break;
2346
    case 0x38: /* fcmp */
2347
        gen_helper_sub_cmp_f64(res, cpu_env, res, src);
2348
        set_dest = 0;
2349
        round = 0;
2350
        break;
2351
    case 0x3a: /* ftst */
2352
        tcg_gen_mov_f64(res, src);
2353
        set_dest = 0;
2354
        round = 0;
2355
        break;
2356
    default:
2357
        goto undef;
2358
    }
2359
    if (ext & (1 << 14)) {
2360
        tcg_temp_free_i64(src);
2361
    }
2362
    if (round) {
2363
        if (opmode & 0x40) {
2364
            if ((opmode & 0x4) != 0)
2365
                round = 0;
2366
        } else if ((s->fpcr & M68K_FPCR_PREC) == 0) {
2367
            round = 0;
2368
        }
2369
    }
2370
    if (round) {
2371
        TCGv tmp = tcg_temp_new_i32();
2372
        gen_helper_f64_to_f32(tmp, cpu_env, res);
2373
        gen_helper_f32_to_f64(res, cpu_env, tmp);
2374
        tcg_temp_free_i32(tmp);
2375
    }
2376
    tcg_gen_mov_f64(QREG_FP_RESULT, res);
2377
    if (set_dest) {
2378
        tcg_gen_mov_f64(dest, res);
2379
    }
2380
    tcg_temp_free_i64(res);
2381
    return;
2382
undef:
2383
    /* FIXME: Is this right for offset addressing modes?  */
2384
    s->pc -= 2;
2385
    disas_undef_fpu(env, s, insn);
2386
}
2387

    
2388
DISAS_INSN(fbcc)
2389
{
2390
    uint32_t offset;
2391
    uint32_t addr;
2392
    TCGv flag;
2393
    int l1;
2394

    
2395
    addr = s->pc;
2396
    offset = cpu_ldsw_code(env, s->pc);
2397
    s->pc += 2;
2398
    if (insn & (1 << 6)) {
2399
        offset = (offset << 16) | cpu_lduw_code(env, s->pc);
2400
        s->pc += 2;
2401
    }
2402

    
2403
    l1 = gen_new_label();
2404
    /* TODO: Raise BSUN exception.  */
2405
    flag = tcg_temp_new();
2406
    gen_helper_compare_f64(flag, cpu_env, QREG_FP_RESULT);
2407
    /* Jump to l1 if condition is true.  */
2408
    switch (insn & 0xf) {
2409
    case 0: /* f */
2410
        break;
2411
    case 1: /* eq (=0) */
2412
        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
2413
        break;
2414
    case 2: /* ogt (=1) */
2415
        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(1), l1);
2416
        break;
2417
    case 3: /* oge (=0 or =1) */
2418
        tcg_gen_brcond_i32(TCG_COND_LEU, flag, tcg_const_i32(1), l1);
2419
        break;
2420
    case 4: /* olt (=-1) */
2421
        tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(0), l1);
2422
        break;
2423
    case 5: /* ole (=-1 or =0) */
2424
        tcg_gen_brcond_i32(TCG_COND_LE, flag, tcg_const_i32(0), l1);
2425
        break;
2426
    case 6: /* ogl (=-1 or =1) */
2427
        tcg_gen_andi_i32(flag, flag, 1);
2428
        tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
2429
        break;
2430
    case 7: /* or (=2) */
2431
        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(2), l1);
2432
        break;
2433
    case 8: /* un (<2) */
2434
        tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(2), l1);
2435
        break;
2436
    case 9: /* ueq (=0 or =2) */
2437
        tcg_gen_andi_i32(flag, flag, 1);
2438
        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
2439
        break;
2440
    case 10: /* ugt (>0) */
2441
        tcg_gen_brcond_i32(TCG_COND_GT, flag, tcg_const_i32(0), l1);
2442
        break;
2443
    case 11: /* uge (>=0) */
2444
        tcg_gen_brcond_i32(TCG_COND_GE, flag, tcg_const_i32(0), l1);
2445
        break;
2446
    case 12: /* ult (=-1 or =2) */
2447
        tcg_gen_brcond_i32(TCG_COND_GEU, flag, tcg_const_i32(2), l1);
2448
        break;
2449
    case 13: /* ule (!=1) */
2450
        tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(1), l1);
2451
        break;
2452
    case 14: /* ne (!=0) */
2453
        tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
2454
        break;
2455
    case 15: /* t */
2456
        tcg_gen_br(l1);
2457
        break;
2458
    }
2459
    gen_jmp_tb(s, 0, s->pc);
2460
    gen_set_label(l1);
2461
    gen_jmp_tb(s, 1, addr + offset);
2462
}
2463

    
2464
DISAS_INSN(frestore)
2465
{
2466
    /* TODO: Implement frestore.  */
2467
    qemu_assert(0, "FRESTORE not implemented");
2468
}
2469

    
2470
DISAS_INSN(fsave)
2471
{
2472
    /* TODO: Implement fsave.  */
2473
    qemu_assert(0, "FSAVE not implemented");
2474
}
2475

    
2476
static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
2477
{
2478
    TCGv tmp = tcg_temp_new();
2479
    if (s->env->macsr & MACSR_FI) {
2480
        if (upper)
2481
            tcg_gen_andi_i32(tmp, val, 0xffff0000);
2482
        else
2483
            tcg_gen_shli_i32(tmp, val, 16);
2484
    } else if (s->env->macsr & MACSR_SU) {
2485
        if (upper)
2486
            tcg_gen_sari_i32(tmp, val, 16);
2487
        else
2488
            tcg_gen_ext16s_i32(tmp, val);
2489
    } else {
2490
        if (upper)
2491
            tcg_gen_shri_i32(tmp, val, 16);
2492
        else
2493
            tcg_gen_ext16u_i32(tmp, val);
2494
    }
2495
    return tmp;
2496
}
2497

    
2498
static void gen_mac_clear_flags(void)
2499
{
2500
    tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
2501
                     ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
2502
}
2503

    
2504
DISAS_INSN(mac)
2505
{
2506
    TCGv rx;
2507
    TCGv ry;
2508
    uint16_t ext;
2509
    int acc;
2510
    TCGv tmp;
2511
    TCGv addr;
2512
    TCGv loadval;
2513
    int dual;
2514
    TCGv saved_flags;
2515

    
2516
    if (!s->done_mac) {
2517
        s->mactmp = tcg_temp_new_i64();
2518
        s->done_mac = 1;
2519
    }
2520

    
2521
    ext = cpu_lduw_code(env, s->pc);
2522
    s->pc += 2;
2523

    
2524
    acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
2525
    dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
2526
    if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
2527
        disas_undef(env, s, insn);
2528
        return;
2529
    }
2530
    if (insn & 0x30) {
2531
        /* MAC with load.  */
2532
        tmp = gen_lea(env, s, insn, OS_LONG);
2533
        addr = tcg_temp_new();
2534
        tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
2535
        /* Load the value now to ensure correct exception behavior.
2536
           Perform writeback after reading the MAC inputs.  */
2537
        loadval = gen_load(s, OS_LONG, addr, 0);
2538

    
2539
        acc ^= 1;
2540
        rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
2541
        ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
2542
    } else {
2543
        loadval = addr = NULL_QREG;
2544
        rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
2545
        ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2546
    }
2547

    
2548
    gen_mac_clear_flags();
2549
#if 0
2550
    l1 = -1;
2551
    /* Disabled because conditional branches clobber temporary vars.  */
2552
    if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
2553
        /* Skip the multiply if we know we will ignore it.  */
2554
        l1 = gen_new_label();
2555
        tmp = tcg_temp_new();
2556
        tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
2557
        gen_op_jmp_nz32(tmp, l1);
2558
    }
2559
#endif
2560

    
2561
    if ((ext & 0x0800) == 0) {
2562
        /* Word.  */
2563
        rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
2564
        ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
2565
    }
2566
    if (s->env->macsr & MACSR_FI) {
2567
        gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
2568
    } else {
2569
        if (s->env->macsr & MACSR_SU)
2570
            gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
2571
        else
2572
            gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
2573
        switch ((ext >> 9) & 3) {
2574
        case 1:
2575
            tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
2576
            break;
2577
        case 3:
2578
            tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
2579
            break;
2580
        }
2581
    }
2582

    
2583
    if (dual) {
2584
        /* Save the overflow flag from the multiply.  */
2585
        saved_flags = tcg_temp_new();
2586
        tcg_gen_mov_i32(saved_flags, QREG_MACSR);
2587
    } else {
2588
        saved_flags = NULL_QREG;
2589
    }
2590

    
2591
#if 0
2592
    /* Disabled because conditional branches clobber temporary vars.  */
2593
    if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
2594
        /* Skip the accumulate if the value is already saturated.  */
2595
        l1 = gen_new_label();
2596
        tmp = tcg_temp_new();
2597
        gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
2598
        gen_op_jmp_nz32(tmp, l1);
2599
    }
2600
#endif
2601

    
2602
    if (insn & 0x100)
2603
        tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
2604
    else
2605
        tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
2606

    
2607
    if (s->env->macsr & MACSR_FI)
2608
        gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
2609
    else if (s->env->macsr & MACSR_SU)
2610
        gen_helper_macsats(cpu_env, tcg_const_i32(acc));
2611
    else
2612
        gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
2613

    
2614
#if 0
2615
    /* Disabled because conditional branches clobber temporary vars.  */
2616
    if (l1 != -1)
2617
        gen_set_label(l1);
2618
#endif
2619

    
2620
    if (dual) {
2621
        /* Dual accumulate variant.  */
2622
        acc = (ext >> 2) & 3;
2623
        /* Restore the overflow flag from the multiplier.  */
2624
        tcg_gen_mov_i32(QREG_MACSR, saved_flags);
2625
#if 0
2626
        /* Disabled because conditional branches clobber temporary vars.  */
2627
        if ((s->env->macsr & MACSR_OMC) != 0) {
2628
            /* Skip the accumulate if the value is already saturated.  */
2629
            l1 = gen_new_label();
2630
            tmp = tcg_temp_new();
2631
            gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
2632
            gen_op_jmp_nz32(tmp, l1);
2633
        }
2634
#endif
2635
        if (ext & 2)
2636
            tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
2637
        else
2638
            tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
2639
        if (s->env->macsr & MACSR_FI)
2640
            gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
2641
        else if (s->env->macsr & MACSR_SU)
2642
            gen_helper_macsats(cpu_env, tcg_const_i32(acc));
2643
        else
2644
            gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
2645
#if 0
2646
        /* Disabled because conditional branches clobber temporary vars.  */
2647
        if (l1 != -1)
2648
            gen_set_label(l1);
2649
#endif
2650
    }
2651
    gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
2652

    
2653
    if (insn & 0x30) {
2654
        TCGv rw;
2655
        rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
2656
        tcg_gen_mov_i32(rw, loadval);
2657
        /* FIXME: Should address writeback happen with the masked or
2658
           unmasked value?  */
2659
        switch ((insn >> 3) & 7) {
2660
        case 3: /* Post-increment.  */
2661
            tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
2662
            break;
2663
        case 4: /* Pre-decrement.  */
2664
            tcg_gen_mov_i32(AREG(insn, 0), addr);
2665
        }
2666
    }
2667
}
2668

    
2669
DISAS_INSN(from_mac)
2670
{
2671
    TCGv rx;
2672
    TCGv_i64 acc;
2673
    int accnum;
2674

    
2675
    rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2676
    accnum = (insn >> 9) & 3;
2677
    acc = MACREG(accnum);
2678
    if (s->env->macsr & MACSR_FI) {
2679
        gen_helper_get_macf(rx, cpu_env, acc);
2680
    } else if ((s->env->macsr & MACSR_OMC) == 0) {
2681
        tcg_gen_trunc_i64_i32(rx, acc);
2682
    } else if (s->env->macsr & MACSR_SU) {
2683
        gen_helper_get_macs(rx, acc);
2684
    } else {
2685
        gen_helper_get_macu(rx, acc);
2686
    }
2687
    if (insn & 0x40) {
2688
        tcg_gen_movi_i64(acc, 0);
2689
        tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
2690
    }
2691
}
2692

    
2693
DISAS_INSN(move_mac)
2694
{
2695
    /* FIXME: This can be done without a helper.  */
2696
    int src;
2697
    TCGv dest;
2698
    src = insn & 3;
2699
    dest = tcg_const_i32((insn >> 9) & 3);
2700
    gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
2701
    gen_mac_clear_flags();
2702
    gen_helper_mac_set_flags(cpu_env, dest);
2703
}
2704

    
2705
DISAS_INSN(from_macsr)
2706
{
2707
    TCGv reg;
2708

    
2709
    reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2710
    tcg_gen_mov_i32(reg, QREG_MACSR);
2711
}
2712

    
2713
DISAS_INSN(from_mask)
2714
{
2715
    TCGv reg;
2716
    reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2717
    tcg_gen_mov_i32(reg, QREG_MAC_MASK);
2718
}
2719

    
2720
DISAS_INSN(from_mext)
2721
{
2722
    TCGv reg;
2723
    TCGv acc;
2724
    reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2725
    acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
2726
    if (s->env->macsr & MACSR_FI)
2727
        gen_helper_get_mac_extf(reg, cpu_env, acc);
2728
    else
2729
        gen_helper_get_mac_exti(reg, cpu_env, acc);
2730
}
2731

    
2732
DISAS_INSN(macsr_to_ccr)
2733
{
2734
    tcg_gen_movi_i32(QREG_CC_X, 0);
2735
    tcg_gen_andi_i32(QREG_CC_DEST, QREG_MACSR, 0xf);
2736
    s->cc_op = CC_OP_FLAGS;
2737
}
2738

    
2739
DISAS_INSN(to_mac)
2740
{
2741
    TCGv_i64 acc;
2742
    TCGv val;
2743
    int accnum;
2744
    accnum = (insn >> 9) & 3;
2745
    acc = MACREG(accnum);
2746
    SRC_EA(env, val, OS_LONG, 0, NULL);
2747
    if (s->env->macsr & MACSR_FI) {
2748
        tcg_gen_ext_i32_i64(acc, val);
2749
        tcg_gen_shli_i64(acc, acc, 8);
2750
    } else if (s->env->macsr & MACSR_SU) {
2751
        tcg_gen_ext_i32_i64(acc, val);
2752
    } else {
2753
        tcg_gen_extu_i32_i64(acc, val);
2754
    }
2755
    tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
2756
    gen_mac_clear_flags();
2757
    gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
2758
}
2759

    
2760
DISAS_INSN(to_macsr)
2761
{
2762
    TCGv val;
2763
    SRC_EA(env, val, OS_LONG, 0, NULL);
2764
    gen_helper_set_macsr(cpu_env, val);
2765
    gen_lookup_tb(s);
2766
}
2767

    
2768
DISAS_INSN(to_mask)
2769
{
2770
    TCGv val;
2771
    SRC_EA(env, val, OS_LONG, 0, NULL);
2772
    tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
2773
}
2774

    
2775
DISAS_INSN(to_mext)
2776
{
2777
    TCGv val;
2778
    TCGv acc;
2779
    SRC_EA(env, val, OS_LONG, 0, NULL);
2780
    acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
2781
    if (s->env->macsr & MACSR_FI)
2782
        gen_helper_set_mac_extf(cpu_env, val, acc);
2783
    else if (s->env->macsr & MACSR_SU)
2784
        gen_helper_set_mac_exts(cpu_env, val, acc);
2785
    else
2786
        gen_helper_set_mac_extu(cpu_env, val, acc);
2787
}
2788

    
2789
static disas_proc opcode_table[65536];
2790

    
2791
static void
2792
register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
2793
{
2794
  int i;
2795
  int from;
2796
  int to;
2797

    
2798
  /* Sanity check.  All set bits must be included in the mask.  */
2799
  if (opcode & ~mask) {
2800
      fprintf(stderr,
2801
              "qemu internal error: bogus opcode definition %04x/%04x\n",
2802
              opcode, mask);
2803
      abort();
2804
  }
2805
  /* This could probably be cleverer.  For now just optimize the case where
2806
     the top bits are known.  */
2807
  /* Find the first zero bit in the mask.  */
2808
  i = 0x8000;
2809
  while ((i & mask) != 0)
2810
      i >>= 1;
2811
  /* Iterate over all combinations of this and lower bits.  */
2812
  if (i == 0)
2813
      i = 1;
2814
  else
2815
      i <<= 1;
2816
  from = opcode & ~(i - 1);
2817
  to = from + i;
2818
  for (i = from; i < to; i++) {
2819
      if ((i & mask) == opcode)
2820
          opcode_table[i] = proc;
2821
  }
2822
}
2823

    
2824
/* Register m68k opcode handlers.  Order is important.
2825
   Later insn override earlier ones.  */
2826
void register_m68k_insns (CPUM68KState *env)
2827
{
2828
#define INSN(name, opcode, mask, feature) do { \
2829
    if (m68k_feature(env, M68K_FEATURE_##feature)) \
2830
        register_opcode(disas_##name, 0x##opcode, 0x##mask); \
2831
    } while(0)
2832
    INSN(undef,     0000, 0000, CF_ISA_A);
2833
    INSN(arith_im,  0080, fff8, CF_ISA_A);
2834
    INSN(bitrev,    00c0, fff8, CF_ISA_APLUSC);
2835
    INSN(bitop_reg, 0100, f1c0, CF_ISA_A);
2836
    INSN(bitop_reg, 0140, f1c0, CF_ISA_A);
2837
    INSN(bitop_reg, 0180, f1c0, CF_ISA_A);
2838
    INSN(bitop_reg, 01c0, f1c0, CF_ISA_A);
2839
    INSN(arith_im,  0280, fff8, CF_ISA_A);
2840
    INSN(byterev,   02c0, fff8, CF_ISA_APLUSC);
2841
    INSN(arith_im,  0480, fff8, CF_ISA_A);
2842
    INSN(ff1,       04c0, fff8, CF_ISA_APLUSC);
2843
    INSN(arith_im,  0680, fff8, CF_ISA_A);
2844
    INSN(bitop_im,  0800, ffc0, CF_ISA_A);
2845
    INSN(bitop_im,  0840, ffc0, CF_ISA_A);
2846
    INSN(bitop_im,  0880, ffc0, CF_ISA_A);
2847
    INSN(bitop_im,  08c0, ffc0, CF_ISA_A);
2848
    INSN(arith_im,  0a80, fff8, CF_ISA_A);
2849
    INSN(arith_im,  0c00, ff38, CF_ISA_A);
2850
    INSN(move,      1000, f000, CF_ISA_A);
2851
    INSN(move,      2000, f000, CF_ISA_A);
2852
    INSN(move,      3000, f000, CF_ISA_A);
2853
    INSN(strldsr,   40e7, ffff, CF_ISA_APLUSC);
2854
    INSN(negx,      4080, fff8, CF_ISA_A);
2855
    INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
2856
    INSN(lea,       41c0, f1c0, CF_ISA_A);
2857
    INSN(clr,       4200, ff00, CF_ISA_A);
2858
    INSN(undef,     42c0, ffc0, CF_ISA_A);
2859
    INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
2860
    INSN(neg,       4480, fff8, CF_ISA_A);
2861
    INSN(move_to_ccr, 44c0, ffc0, CF_ISA_A);
2862
    INSN(not,       4680, fff8, CF_ISA_A);
2863
    INSN(move_to_sr, 46c0, ffc0, CF_ISA_A);
2864
    INSN(pea,       4840, ffc0, CF_ISA_A);
2865
    INSN(swap,      4840, fff8, CF_ISA_A);
2866
    INSN(movem,     48c0, fbc0, CF_ISA_A);
2867
    INSN(ext,       4880, fff8, CF_ISA_A);
2868
    INSN(ext,       48c0, fff8, CF_ISA_A);
2869
    INSN(ext,       49c0, fff8, CF_ISA_A);
2870
    INSN(tst,       4a00, ff00, CF_ISA_A);
2871
    INSN(tas,       4ac0, ffc0, CF_ISA_B);
2872
    INSN(halt,      4ac8, ffff, CF_ISA_A);
2873
    INSN(pulse,     4acc, ffff, CF_ISA_A);
2874
    INSN(illegal,   4afc, ffff, CF_ISA_A);
2875
    INSN(mull,      4c00, ffc0, CF_ISA_A);
2876
    INSN(divl,      4c40, ffc0, CF_ISA_A);
2877
    INSN(sats,      4c80, fff8, CF_ISA_B);
2878
    INSN(trap,      4e40, fff0, CF_ISA_A);
2879
    INSN(link,      4e50, fff8, CF_ISA_A);
2880
    INSN(unlk,      4e58, fff8, CF_ISA_A);
2881
    INSN(move_to_usp, 4e60, fff8, USP);
2882
    INSN(move_from_usp, 4e68, fff8, USP);
2883
    INSN(nop,       4e71, ffff, CF_ISA_A);
2884
    INSN(stop,      4e72, ffff, CF_ISA_A);
2885
    INSN(rte,       4e73, ffff, CF_ISA_A);
2886
    INSN(rts,       4e75, ffff, CF_ISA_A);
2887
    INSN(movec,     4e7b, ffff, CF_ISA_A);
2888
    INSN(jump,      4e80, ffc0, CF_ISA_A);
2889
    INSN(jump,      4ec0, ffc0, CF_ISA_A);
2890
    INSN(addsubq,   5180, f1c0, CF_ISA_A);
2891
    INSN(scc,       50c0, f0f8, CF_ISA_A);
2892
    INSN(addsubq,   5080, f1c0, CF_ISA_A);
2893
    INSN(tpf,       51f8, fff8, CF_ISA_A);
2894

    
2895
    /* Branch instructions.  */
2896
    INSN(branch,    6000, f000, CF_ISA_A);
2897
    /* Disable long branch instructions, then add back the ones we want.  */
2898
    INSN(undef,     60ff, f0ff, CF_ISA_A); /* All long branches.  */
2899
    INSN(branch,    60ff, f0ff, CF_ISA_B);
2900
    INSN(undef,     60ff, ffff, CF_ISA_B); /* bra.l */
2901
    INSN(branch,    60ff, ffff, BRAL);
2902

    
2903
    INSN(moveq,     7000, f100, CF_ISA_A);
2904
    INSN(mvzs,      7100, f100, CF_ISA_B);
2905
    INSN(or,        8000, f000, CF_ISA_A);
2906
    INSN(divw,      80c0, f0c0, CF_ISA_A);
2907
    INSN(addsub,    9000, f000, CF_ISA_A);
2908
    INSN(subx,      9180, f1f8, CF_ISA_A);
2909
    INSN(suba,      91c0, f1c0, CF_ISA_A);
2910

    
2911
    INSN(undef_mac, a000, f000, CF_ISA_A);
2912
    INSN(mac,       a000, f100, CF_EMAC);
2913
    INSN(from_mac,  a180, f9b0, CF_EMAC);
2914
    INSN(move_mac,  a110, f9fc, CF_EMAC);
2915
    INSN(from_macsr,a980, f9f0, CF_EMAC);
2916
    INSN(from_mask, ad80, fff0, CF_EMAC);
2917
    INSN(from_mext, ab80, fbf0, CF_EMAC);
2918
    INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
2919
    INSN(to_mac,    a100, f9c0, CF_EMAC);
2920
    INSN(to_macsr,  a900, ffc0, CF_EMAC);
2921
    INSN(to_mext,   ab00, fbc0, CF_EMAC);
2922
    INSN(to_mask,   ad00, ffc0, CF_EMAC);
2923

    
2924
    INSN(mov3q,     a140, f1c0, CF_ISA_B);
2925
    INSN(cmp,       b000, f1c0, CF_ISA_B); /* cmp.b */
2926
    INSN(cmp,       b040, f1c0, CF_ISA_B); /* cmp.w */
2927
    INSN(cmpa,      b0c0, f1c0, CF_ISA_B); /* cmpa.w */
2928
    INSN(cmp,       b080, f1c0, CF_ISA_A);
2929
    INSN(cmpa,      b1c0, f1c0, CF_ISA_A);
2930
    INSN(eor,       b180, f1c0, CF_ISA_A);
2931
    INSN(and,       c000, f000, CF_ISA_A);
2932
    INSN(mulw,      c0c0, f0c0, CF_ISA_A);
2933
    INSN(addsub,    d000, f000, CF_ISA_A);
2934
    INSN(addx,      d180, f1f8, CF_ISA_A);
2935
    INSN(adda,      d1c0, f1c0, CF_ISA_A);
2936
    INSN(shift_im,  e080, f0f0, CF_ISA_A);
2937
    INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
2938
    INSN(undef_fpu, f000, f000, CF_ISA_A);
2939
    INSN(fpu,       f200, ffc0, CF_FPU);
2940
    INSN(fbcc,      f280, ffc0, CF_FPU);
2941
    INSN(frestore,  f340, ffc0, CF_FPU);
2942
    INSN(fsave,     f340, ffc0, CF_FPU);
2943
    INSN(intouch,   f340, ffc0, CF_ISA_A);
2944
    INSN(cpushl,    f428, ff38, CF_ISA_A);
2945
    INSN(wddata,    fb00, ff00, CF_ISA_A);
2946
    INSN(wdebug,    fbc0, ffc0, CF_ISA_A);
2947
#undef INSN
2948
}
2949

    
2950
/* ??? Some of this implementation is not exception safe.  We should always
2951
   write back the result to memory before setting the condition codes.  */
2952
static void disas_m68k_insn(CPUM68KState * env, DisasContext *s)
2953
{
2954
    uint16_t insn;
2955

    
2956
    if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
2957
        tcg_gen_debug_insn_start(s->pc);
2958
    }
2959

    
2960
    insn = cpu_lduw_code(env, s->pc);
2961
    s->pc += 2;
2962

    
2963
    opcode_table[insn](env, s, insn);
2964
}
2965

    
2966
/* generate intermediate code for basic block 'tb'.  */
2967
static inline void
2968
gen_intermediate_code_internal(CPUM68KState *env, TranslationBlock *tb,
2969
                               int search_pc)
2970
{
2971
    DisasContext dc1, *dc = &dc1;
2972
    uint16_t *gen_opc_end;
2973
    CPUBreakpoint *bp;
2974
    int j, lj;
2975
    target_ulong pc_start;
2976
    int pc_offset;
2977
    int num_insns;
2978
    int max_insns;
2979

    
2980
    /* generate intermediate code */
2981
    pc_start = tb->pc;
2982

    
2983
    dc->tb = tb;
2984

    
2985
    gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2986

    
2987
    dc->env = env;
2988
    dc->is_jmp = DISAS_NEXT;
2989
    dc->pc = pc_start;
2990
    dc->cc_op = CC_OP_DYNAMIC;
2991
    dc->singlestep_enabled = env->singlestep_enabled;
2992
    dc->fpcr = env->fpcr;
2993
    dc->user = (env->sr & SR_S) == 0;
2994
    dc->is_mem = 0;
2995
    dc->done_mac = 0;
2996
    lj = -1;
2997
    num_insns = 0;
2998
    max_insns = tb->cflags & CF_COUNT_MASK;
2999
    if (max_insns == 0)
3000
        max_insns = CF_COUNT_MASK;
3001

    
3002
    gen_icount_start();
3003
    do {
3004
        pc_offset = dc->pc - pc_start;
3005
        gen_throws_exception = NULL;
3006
        if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3007
            QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3008
                if (bp->pc == dc->pc) {
3009
                    gen_exception(dc, dc->pc, EXCP_DEBUG);
3010
                    dc->is_jmp = DISAS_JUMP;
3011
                    break;
3012
                }
3013
            }
3014
            if (dc->is_jmp)
3015
                break;
3016
        }
3017
        if (search_pc) {
3018
            j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3019
            if (lj < j) {
3020
                lj++;
3021
                while (lj < j)
3022
                    tcg_ctx.gen_opc_instr_start[lj++] = 0;
3023
            }
3024
            tcg_ctx.gen_opc_pc[lj] = dc->pc;
3025
            tcg_ctx.gen_opc_instr_start[lj] = 1;
3026
            tcg_ctx.gen_opc_icount[lj] = num_insns;
3027
        }
3028
        if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3029
            gen_io_start();
3030
        dc->insn_pc = dc->pc;
3031
        disas_m68k_insn(env, dc);
3032
        num_insns++;
3033
    } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
3034
             !env->singlestep_enabled &&
3035
             !singlestep &&
3036
             (pc_offset) < (TARGET_PAGE_SIZE - 32) &&
3037
             num_insns < max_insns);
3038

    
3039
    if (tb->cflags & CF_LAST_IO)
3040
        gen_io_end();
3041
    if (unlikely(env->singlestep_enabled)) {
3042
        /* Make sure the pc is updated, and raise a debug exception.  */
3043
        if (!dc->is_jmp) {
3044
            gen_flush_cc_op(dc);
3045
            tcg_gen_movi_i32(QREG_PC, dc->pc);
3046
        }
3047
        gen_helper_raise_exception(cpu_env, tcg_const_i32(EXCP_DEBUG));
3048
    } else {
3049
        switch(dc->is_jmp) {
3050
        case DISAS_NEXT:
3051
            gen_flush_cc_op(dc);
3052
            gen_jmp_tb(dc, 0, dc->pc);
3053
            break;
3054
        default:
3055
        case DISAS_JUMP:
3056
        case DISAS_UPDATE:
3057
            gen_flush_cc_op(dc);
3058
            /* indicate that the hash table must be used to find the next TB */
3059
            tcg_gen_exit_tb(0);
3060
            break;
3061
        case DISAS_TB_JUMP:
3062
            /* nothing more to generate */
3063
            break;
3064
        }
3065
    }
3066
    gen_icount_end(tb, num_insns);
3067
    *tcg_ctx.gen_opc_ptr = INDEX_op_end;
3068

    
3069
#ifdef DEBUG_DISAS
3070
    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3071
        qemu_log("----------------\n");
3072
        qemu_log("IN: %s\n", lookup_symbol(pc_start));
3073
        log_target_disas(env, pc_start, dc->pc - pc_start, 0);
3074
        qemu_log("\n");
3075
    }
3076
#endif
3077
    if (search_pc) {
3078
        j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3079
        lj++;
3080
        while (lj <= j)
3081
            tcg_ctx.gen_opc_instr_start[lj++] = 0;
3082
    } else {
3083
        tb->size = dc->pc - pc_start;
3084
        tb->icount = num_insns;
3085
    }
3086

    
3087
    //optimize_flags();
3088
    //expand_target_qops();
3089
}
3090

    
3091
void gen_intermediate_code(CPUM68KState *env, TranslationBlock *tb)
3092
{
3093
    gen_intermediate_code_internal(env, tb, 0);
3094
}
3095

    
3096
void gen_intermediate_code_pc(CPUM68KState *env, TranslationBlock *tb)
3097
{
3098
    gen_intermediate_code_internal(env, tb, 1);
3099
}
3100

    
3101
void cpu_dump_state(CPUM68KState *env, FILE *f, fprintf_function cpu_fprintf,
3102
                    int flags)
3103
{
3104
    int i;
3105
    uint16_t sr;
3106
    CPU_DoubleU u;
3107
    for (i = 0; i < 8; i++)
3108
      {
3109
        u.d = env->fregs[i];
3110
        cpu_fprintf (f, "D%d = %08x   A%d = %08x   F%d = %08x%08x (%12g)\n",
3111
                     i, env->dregs[i], i, env->aregs[i],
3112
                     i, u.l.upper, u.l.lower, *(double *)&u.d);
3113
      }
3114
    cpu_fprintf (f, "PC = %08x   ", env->pc);
3115
    sr = env->sr;
3116
    cpu_fprintf (f, "SR = %04x %c%c%c%c%c ", sr, (sr & 0x10) ? 'X' : '-',
3117
                 (sr & CCF_N) ? 'N' : '-', (sr & CCF_Z) ? 'Z' : '-',
3118
                 (sr & CCF_V) ? 'V' : '-', (sr & CCF_C) ? 'C' : '-');
3119
    cpu_fprintf (f, "FPRESULT = %12g\n", *(double *)&env->fp_result);
3120
}
3121

    
3122
void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb, int pc_pos)
3123
{
3124
    env->pc = tcg_ctx.gen_opc_pc[pc_pos];
3125
}