Statistics
| Branch: | Revision:

root / target-m68k / translate.c @ 8cfd0495

History | View | Annotate | Download (81.4 kB)

1
/*
2
 *  m68k translation
3
 *
4
 *  Copyright (c) 2005-2007 CodeSourcery
5
 *  Written by Paul Brook
6
 *
7
 * This library is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2 of the License, or (at your option) any later version.
11
 *
12
 * This library is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19
 */
20

    
21
#include "cpu.h"
22
#include "disas/disas.h"
23
#include "tcg-op.h"
24
#include "qemu/log.h"
25

    
26
#include "helpers.h"
27
#define GEN_HELPER 1
28
#include "helpers.h"
29

    
30
//#define DEBUG_DISPATCH 1
31

    
32
/* Fake floating point.  */
33
#define tcg_gen_mov_f64 tcg_gen_mov_i64
34
#define tcg_gen_qemu_ldf64 tcg_gen_qemu_ld64
35
#define tcg_gen_qemu_stf64 tcg_gen_qemu_st64
36

    
37
#define DEFO32(name, offset) static TCGv QREG_##name;
38
#define DEFO64(name, offset) static TCGv_i64 QREG_##name;
39
#define DEFF64(name, offset) static TCGv_i64 QREG_##name;
40
#include "qregs.def"
41
#undef DEFO32
42
#undef DEFO64
43
#undef DEFF64
44

    
45
static TCGv_i32 cpu_halted;
46

    
47
static TCGv_ptr cpu_env;
48

    
49
static char cpu_reg_names[3*8*3 + 5*4];
50
static TCGv cpu_dregs[8];
51
static TCGv cpu_aregs[8];
52
static TCGv_i64 cpu_fregs[8];
53
static TCGv_i64 cpu_macc[4];
54

    
55
#define DREG(insn, pos) cpu_dregs[((insn) >> (pos)) & 7]
56
#define AREG(insn, pos) cpu_aregs[((insn) >> (pos)) & 7]
57
#define FREG(insn, pos) cpu_fregs[((insn) >> (pos)) & 7]
58
#define MACREG(acc) cpu_macc[acc]
59
#define QREG_SP cpu_aregs[7]
60

    
61
static TCGv NULL_QREG;
62
#define IS_NULL_QREG(t) (TCGV_EQUAL(t, NULL_QREG))
63
/* Used to distinguish stores from bad addressing modes.  */
64
static TCGv store_dummy;
65

    
66
#include "exec/gen-icount.h"
67

    
68
void m68k_tcg_init(void)
69
{
70
    char *p;
71
    int i;
72

    
73
#define DEFO32(name,  offset) QREG_##name = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUM68KState, offset), #name);
74
#define DEFO64(name,  offset) QREG_##name = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUM68KState, offset), #name);
75
#define DEFF64(name,  offset) DEFO64(name, offset)
76
#include "qregs.def"
77
#undef DEFO32
78
#undef DEFO64
79
#undef DEFF64
80

    
81
    cpu_halted = tcg_global_mem_new_i32(TCG_AREG0,
82
                                        -offsetof(M68kCPU, env) +
83
                                        offsetof(CPUState, halted), "HALTED");
84

    
85
    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
86

    
87
    p = cpu_reg_names;
88
    for (i = 0; i < 8; i++) {
89
        sprintf(p, "D%d", i);
90
        cpu_dregs[i] = tcg_global_mem_new(TCG_AREG0,
91
                                          offsetof(CPUM68KState, dregs[i]), p);
92
        p += 3;
93
        sprintf(p, "A%d", i);
94
        cpu_aregs[i] = tcg_global_mem_new(TCG_AREG0,
95
                                          offsetof(CPUM68KState, aregs[i]), p);
96
        p += 3;
97
        sprintf(p, "F%d", i);
98
        cpu_fregs[i] = tcg_global_mem_new_i64(TCG_AREG0,
99
                                          offsetof(CPUM68KState, fregs[i]), p);
100
        p += 3;
101
    }
102
    for (i = 0; i < 4; i++) {
103
        sprintf(p, "ACC%d", i);
104
        cpu_macc[i] = tcg_global_mem_new_i64(TCG_AREG0,
105
                                         offsetof(CPUM68KState, macc[i]), p);
106
        p += 5;
107
    }
108

    
109
    NULL_QREG = tcg_global_mem_new(TCG_AREG0, -4, "NULL");
110
    store_dummy = tcg_global_mem_new(TCG_AREG0, -8, "NULL");
111

    
112
#define GEN_HELPER 2
113
#include "helpers.h"
114
}
115

    
116
static inline void qemu_assert(int cond, const char *msg)
117
{
118
    if (!cond) {
119
        fprintf (stderr, "badness: %s\n", msg);
120
        abort();
121
    }
122
}
123

    
124
/* internal defines */
125
typedef struct DisasContext {
126
    CPUM68KState *env;
127
    target_ulong insn_pc; /* Start of the current instruction.  */
128
    target_ulong pc;
129
    int is_jmp;
130
    int cc_op;
131
    int user;
132
    uint32_t fpcr;
133
    struct TranslationBlock *tb;
134
    int singlestep_enabled;
135
    int is_mem;
136
    TCGv_i64 mactmp;
137
    int done_mac;
138
} DisasContext;
139

    
140
#define DISAS_JUMP_NEXT 4
141

    
142
#if defined(CONFIG_USER_ONLY)
143
#define IS_USER(s) 1
144
#else
145
#define IS_USER(s) s->user
146
#endif
147

    
148
/* XXX: move that elsewhere */
149
/* ??? Fix exceptions.  */
150
static void *gen_throws_exception;
151
#define gen_last_qop NULL
152

    
153
#define OS_BYTE 0
154
#define OS_WORD 1
155
#define OS_LONG 2
156
#define OS_SINGLE 4
157
#define OS_DOUBLE 5
158

    
159
typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
160

    
161
#ifdef DEBUG_DISPATCH
162
#define DISAS_INSN(name)                                                \
163
    static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
164
                                  uint16_t insn);                       \
165
    static void disas_##name(CPUM68KState *env, DisasContext *s,        \
166
                             uint16_t insn)                             \
167
    {                                                                   \
168
        qemu_log("Dispatch " #name "\n");                               \
169
        real_disas_##name(s, env, insn);                                \
170
    }                                                                   \
171
    static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
172
                                  uint16_t insn)
173
#else
174
#define DISAS_INSN(name)                                                \
175
    static void disas_##name(CPUM68KState *env, DisasContext *s,        \
176
                             uint16_t insn)
177
#endif
178

    
179
/* Generate a load from the specified address.  Narrow values are
180
   sign extended to full register width.  */
181
static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign)
182
{
183
    TCGv tmp;
184
    int index = IS_USER(s);
185
    s->is_mem = 1;
186
    tmp = tcg_temp_new_i32();
187
    switch(opsize) {
188
    case OS_BYTE:
189
        if (sign)
190
            tcg_gen_qemu_ld8s(tmp, addr, index);
191
        else
192
            tcg_gen_qemu_ld8u(tmp, addr, index);
193
        break;
194
    case OS_WORD:
195
        if (sign)
196
            tcg_gen_qemu_ld16s(tmp, addr, index);
197
        else
198
            tcg_gen_qemu_ld16u(tmp, addr, index);
199
        break;
200
    case OS_LONG:
201
    case OS_SINGLE:
202
        tcg_gen_qemu_ld32u(tmp, addr, index);
203
        break;
204
    default:
205
        qemu_assert(0, "bad load size");
206
    }
207
    gen_throws_exception = gen_last_qop;
208
    return tmp;
209
}
210

    
211
static inline TCGv_i64 gen_load64(DisasContext * s, TCGv addr)
212
{
213
    TCGv_i64 tmp;
214
    int index = IS_USER(s);
215
    s->is_mem = 1;
216
    tmp = tcg_temp_new_i64();
217
    tcg_gen_qemu_ldf64(tmp, addr, index);
218
    gen_throws_exception = gen_last_qop;
219
    return tmp;
220
}
221

    
222
/* Generate a store.  */
223
static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val)
224
{
225
    int index = IS_USER(s);
226
    s->is_mem = 1;
227
    switch(opsize) {
228
    case OS_BYTE:
229
        tcg_gen_qemu_st8(val, addr, index);
230
        break;
231
    case OS_WORD:
232
        tcg_gen_qemu_st16(val, addr, index);
233
        break;
234
    case OS_LONG:
235
    case OS_SINGLE:
236
        tcg_gen_qemu_st32(val, addr, index);
237
        break;
238
    default:
239
        qemu_assert(0, "bad store size");
240
    }
241
    gen_throws_exception = gen_last_qop;
242
}
243

    
244
static inline void gen_store64(DisasContext *s, TCGv addr, TCGv_i64 val)
245
{
246
    int index = IS_USER(s);
247
    s->is_mem = 1;
248
    tcg_gen_qemu_stf64(val, addr, index);
249
    gen_throws_exception = gen_last_qop;
250
}
251

    
252
typedef enum {
253
    EA_STORE,
254
    EA_LOADU,
255
    EA_LOADS
256
} ea_what;
257

    
258
/* Generate an unsigned load if VAL is 0 a signed load if val is -1,
259
   otherwise generate a store.  */
260
static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
261
                     ea_what what)
262
{
263
    if (what == EA_STORE) {
264
        gen_store(s, opsize, addr, val);
265
        return store_dummy;
266
    } else {
267
        return gen_load(s, opsize, addr, what == EA_LOADS);
268
    }
269
}
270

    
271
/* Read a 32-bit immediate constant.  */
272
static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
273
{
274
    uint32_t im;
275
    im = ((uint32_t)cpu_lduw_code(env, s->pc)) << 16;
276
    s->pc += 2;
277
    im |= cpu_lduw_code(env, s->pc);
278
    s->pc += 2;
279
    return im;
280
}
281

    
282
/* Calculate and address index.  */
283
static TCGv gen_addr_index(uint16_t ext, TCGv tmp)
284
{
285
    TCGv add;
286
    int scale;
287

    
288
    add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
289
    if ((ext & 0x800) == 0) {
290
        tcg_gen_ext16s_i32(tmp, add);
291
        add = tmp;
292
    }
293
    scale = (ext >> 9) & 3;
294
    if (scale != 0) {
295
        tcg_gen_shli_i32(tmp, add, scale);
296
        add = tmp;
297
    }
298
    return add;
299
}
300

    
301
/* Handle a base + index + displacement effective addresss.
302
   A NULL_QREG base means pc-relative.  */
303
static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, int opsize,
304
                            TCGv base)
305
{
306
    uint32_t offset;
307
    uint16_t ext;
308
    TCGv add;
309
    TCGv tmp;
310
    uint32_t bd, od;
311

    
312
    offset = s->pc;
313
    ext = cpu_lduw_code(env, s->pc);
314
    s->pc += 2;
315

    
316
    if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
317
        return NULL_QREG;
318

    
319
    if (ext & 0x100) {
320
        /* full extension word format */
321
        if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
322
            return NULL_QREG;
323

    
324
        if ((ext & 0x30) > 0x10) {
325
            /* base displacement */
326
            if ((ext & 0x30) == 0x20) {
327
                bd = (int16_t)cpu_lduw_code(env, s->pc);
328
                s->pc += 2;
329
            } else {
330
                bd = read_im32(env, s);
331
            }
332
        } else {
333
            bd = 0;
334
        }
335
        tmp = tcg_temp_new();
336
        if ((ext & 0x44) == 0) {
337
            /* pre-index */
338
            add = gen_addr_index(ext, tmp);
339
        } else {
340
            add = NULL_QREG;
341
        }
342
        if ((ext & 0x80) == 0) {
343
            /* base not suppressed */
344
            if (IS_NULL_QREG(base)) {
345
                base = tcg_const_i32(offset + bd);
346
                bd = 0;
347
            }
348
            if (!IS_NULL_QREG(add)) {
349
                tcg_gen_add_i32(tmp, add, base);
350
                add = tmp;
351
            } else {
352
                add = base;
353
            }
354
        }
355
        if (!IS_NULL_QREG(add)) {
356
            if (bd != 0) {
357
                tcg_gen_addi_i32(tmp, add, bd);
358
                add = tmp;
359
            }
360
        } else {
361
            add = tcg_const_i32(bd);
362
        }
363
        if ((ext & 3) != 0) {
364
            /* memory indirect */
365
            base = gen_load(s, OS_LONG, add, 0);
366
            if ((ext & 0x44) == 4) {
367
                add = gen_addr_index(ext, tmp);
368
                tcg_gen_add_i32(tmp, add, base);
369
                add = tmp;
370
            } else {
371
                add = base;
372
            }
373
            if ((ext & 3) > 1) {
374
                /* outer displacement */
375
                if ((ext & 3) == 2) {
376
                    od = (int16_t)cpu_lduw_code(env, s->pc);
377
                    s->pc += 2;
378
                } else {
379
                    od = read_im32(env, s);
380
                }
381
            } else {
382
                od = 0;
383
            }
384
            if (od != 0) {
385
                tcg_gen_addi_i32(tmp, add, od);
386
                add = tmp;
387
            }
388
        }
389
    } else {
390
        /* brief extension word format */
391
        tmp = tcg_temp_new();
392
        add = gen_addr_index(ext, tmp);
393
        if (!IS_NULL_QREG(base)) {
394
            tcg_gen_add_i32(tmp, add, base);
395
            if ((int8_t)ext)
396
                tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
397
        } else {
398
            tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
399
        }
400
        add = tmp;
401
    }
402
    return add;
403
}
404

    
405
/* Update the CPU env CC_OP state.  */
406
static inline void gen_flush_cc_op(DisasContext *s)
407
{
408
    if (s->cc_op != CC_OP_DYNAMIC)
409
        tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
410
}
411

    
412
/* Evaluate all the CC flags.  */
413
static inline void gen_flush_flags(DisasContext *s)
414
{
415
    if (s->cc_op == CC_OP_FLAGS)
416
        return;
417
    gen_flush_cc_op(s);
418
    gen_helper_flush_flags(cpu_env, QREG_CC_OP);
419
    s->cc_op = CC_OP_FLAGS;
420
}
421

    
422
static void gen_logic_cc(DisasContext *s, TCGv val)
423
{
424
    tcg_gen_mov_i32(QREG_CC_DEST, val);
425
    s->cc_op = CC_OP_LOGIC;
426
}
427

    
428
static void gen_update_cc_add(TCGv dest, TCGv src)
429
{
430
    tcg_gen_mov_i32(QREG_CC_DEST, dest);
431
    tcg_gen_mov_i32(QREG_CC_SRC, src);
432
}
433

    
434
static inline int opsize_bytes(int opsize)
435
{
436
    switch (opsize) {
437
    case OS_BYTE: return 1;
438
    case OS_WORD: return 2;
439
    case OS_LONG: return 4;
440
    case OS_SINGLE: return 4;
441
    case OS_DOUBLE: return 8;
442
    default:
443
        qemu_assert(0, "bad operand size");
444
        return 0;
445
    }
446
}
447

    
448
/* Assign value to a register.  If the width is less than the register width
449
   only the low part of the register is set.  */
450
static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
451
{
452
    TCGv tmp;
453
    switch (opsize) {
454
    case OS_BYTE:
455
        tcg_gen_andi_i32(reg, reg, 0xffffff00);
456
        tmp = tcg_temp_new();
457
        tcg_gen_ext8u_i32(tmp, val);
458
        tcg_gen_or_i32(reg, reg, tmp);
459
        break;
460
    case OS_WORD:
461
        tcg_gen_andi_i32(reg, reg, 0xffff0000);
462
        tmp = tcg_temp_new();
463
        tcg_gen_ext16u_i32(tmp, val);
464
        tcg_gen_or_i32(reg, reg, tmp);
465
        break;
466
    case OS_LONG:
467
    case OS_SINGLE:
468
        tcg_gen_mov_i32(reg, val);
469
        break;
470
    default:
471
        qemu_assert(0, "Bad operand size");
472
        break;
473
    }
474
}
475

    
476
/* Sign or zero extend a value.  */
477
static inline TCGv gen_extend(TCGv val, int opsize, int sign)
478
{
479
    TCGv tmp;
480

    
481
    switch (opsize) {
482
    case OS_BYTE:
483
        tmp = tcg_temp_new();
484
        if (sign)
485
            tcg_gen_ext8s_i32(tmp, val);
486
        else
487
            tcg_gen_ext8u_i32(tmp, val);
488
        break;
489
    case OS_WORD:
490
        tmp = tcg_temp_new();
491
        if (sign)
492
            tcg_gen_ext16s_i32(tmp, val);
493
        else
494
            tcg_gen_ext16u_i32(tmp, val);
495
        break;
496
    case OS_LONG:
497
    case OS_SINGLE:
498
        tmp = val;
499
        break;
500
    default:
501
        qemu_assert(0, "Bad operand size");
502
    }
503
    return tmp;
504
}
505

    
506
/* Generate code for an "effective address".  Does not adjust the base
507
   register for autoincrement addressing modes.  */
508
static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
509
                    int opsize)
510
{
511
    TCGv reg;
512
    TCGv tmp;
513
    uint16_t ext;
514
    uint32_t offset;
515

    
516
    switch ((insn >> 3) & 7) {
517
    case 0: /* Data register direct.  */
518
    case 1: /* Address register direct.  */
519
        return NULL_QREG;
520
    case 2: /* Indirect register */
521
    case 3: /* Indirect postincrement.  */
522
        return AREG(insn, 0);
523
    case 4: /* Indirect predecrememnt.  */
524
        reg = AREG(insn, 0);
525
        tmp = tcg_temp_new();
526
        tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
527
        return tmp;
528
    case 5: /* Indirect displacement.  */
529
        reg = AREG(insn, 0);
530
        tmp = tcg_temp_new();
531
        ext = cpu_lduw_code(env, s->pc);
532
        s->pc += 2;
533
        tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
534
        return tmp;
535
    case 6: /* Indirect index + displacement.  */
536
        reg = AREG(insn, 0);
537
        return gen_lea_indexed(env, s, opsize, reg);
538
    case 7: /* Other */
539
        switch (insn & 7) {
540
        case 0: /* Absolute short.  */
541
            offset = cpu_ldsw_code(env, s->pc);
542
            s->pc += 2;
543
            return tcg_const_i32(offset);
544
        case 1: /* Absolute long.  */
545
            offset = read_im32(env, s);
546
            return tcg_const_i32(offset);
547
        case 2: /* pc displacement  */
548
            offset = s->pc;
549
            offset += cpu_ldsw_code(env, s->pc);
550
            s->pc += 2;
551
            return tcg_const_i32(offset);
552
        case 3: /* pc index+displacement.  */
553
            return gen_lea_indexed(env, s, opsize, NULL_QREG);
554
        case 4: /* Immediate.  */
555
        default:
556
            return NULL_QREG;
557
        }
558
    }
559
    /* Should never happen.  */
560
    return NULL_QREG;
561
}
562

    
563
/* Helper function for gen_ea. Reuse the computed address between the
564
   for read/write operands.  */
565
static inline TCGv gen_ea_once(CPUM68KState *env, DisasContext *s,
566
                               uint16_t insn, int opsize, TCGv val,
567
                               TCGv *addrp, ea_what what)
568
{
569
    TCGv tmp;
570

    
571
    if (addrp && what == EA_STORE) {
572
        tmp = *addrp;
573
    } else {
574
        tmp = gen_lea(env, s, insn, opsize);
575
        if (IS_NULL_QREG(tmp))
576
            return tmp;
577
        if (addrp)
578
            *addrp = tmp;
579
    }
580
    return gen_ldst(s, opsize, tmp, val, what);
581
}
582

    
583
/* Generate code to load/store a value from/into an EA.  If VAL > 0 this is
584
   a write otherwise it is a read (0 == sign extend, -1 == zero extend).
585
   ADDRP is non-null for readwrite operands.  */
586
static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
587
                   int opsize, TCGv val, TCGv *addrp, ea_what what)
588
{
589
    TCGv reg;
590
    TCGv result;
591
    uint32_t offset;
592

    
593
    switch ((insn >> 3) & 7) {
594
    case 0: /* Data register direct.  */
595
        reg = DREG(insn, 0);
596
        if (what == EA_STORE) {
597
            gen_partset_reg(opsize, reg, val);
598
            return store_dummy;
599
        } else {
600
            return gen_extend(reg, opsize, what == EA_LOADS);
601
        }
602
    case 1: /* Address register direct.  */
603
        reg = AREG(insn, 0);
604
        if (what == EA_STORE) {
605
            tcg_gen_mov_i32(reg, val);
606
            return store_dummy;
607
        } else {
608
            return gen_extend(reg, opsize, what == EA_LOADS);
609
        }
610
    case 2: /* Indirect register */
611
        reg = AREG(insn, 0);
612
        return gen_ldst(s, opsize, reg, val, what);
613
    case 3: /* Indirect postincrement.  */
614
        reg = AREG(insn, 0);
615
        result = gen_ldst(s, opsize, reg, val, what);
616
        /* ??? This is not exception safe.  The instruction may still
617
           fault after this point.  */
618
        if (what == EA_STORE || !addrp)
619
            tcg_gen_addi_i32(reg, reg, opsize_bytes(opsize));
620
        return result;
621
    case 4: /* Indirect predecrememnt.  */
622
        {
623
            TCGv tmp;
624
            if (addrp && what == EA_STORE) {
625
                tmp = *addrp;
626
            } else {
627
                tmp = gen_lea(env, s, insn, opsize);
628
                if (IS_NULL_QREG(tmp))
629
                    return tmp;
630
                if (addrp)
631
                    *addrp = tmp;
632
            }
633
            result = gen_ldst(s, opsize, tmp, val, what);
634
            /* ??? This is not exception safe.  The instruction may still
635
               fault after this point.  */
636
            if (what == EA_STORE || !addrp) {
637
                reg = AREG(insn, 0);
638
                tcg_gen_mov_i32(reg, tmp);
639
            }
640
        }
641
        return result;
642
    case 5: /* Indirect displacement.  */
643
    case 6: /* Indirect index + displacement.  */
644
        return gen_ea_once(env, s, insn, opsize, val, addrp, what);
645
    case 7: /* Other */
646
        switch (insn & 7) {
647
        case 0: /* Absolute short.  */
648
        case 1: /* Absolute long.  */
649
        case 2: /* pc displacement  */
650
        case 3: /* pc index+displacement.  */
651
            return gen_ea_once(env, s, insn, opsize, val, addrp, what);
652
        case 4: /* Immediate.  */
653
            /* Sign extend values for consistency.  */
654
            switch (opsize) {
655
            case OS_BYTE:
656
                if (what == EA_LOADS) {
657
                    offset = cpu_ldsb_code(env, s->pc + 1);
658
                } else {
659
                    offset = cpu_ldub_code(env, s->pc + 1);
660
                }
661
                s->pc += 2;
662
                break;
663
            case OS_WORD:
664
                if (what == EA_LOADS) {
665
                    offset = cpu_ldsw_code(env, s->pc);
666
                } else {
667
                    offset = cpu_lduw_code(env, s->pc);
668
                }
669
                s->pc += 2;
670
                break;
671
            case OS_LONG:
672
                offset = read_im32(env, s);
673
                break;
674
            default:
675
                qemu_assert(0, "Bad immediate operand");
676
            }
677
            return tcg_const_i32(offset);
678
        default:
679
            return NULL_QREG;
680
        }
681
    }
682
    /* Should never happen.  */
683
    return NULL_QREG;
684
}
685

    
686
/* This generates a conditional branch, clobbering all temporaries.  */
687
static void gen_jmpcc(DisasContext *s, int cond, int l1)
688
{
689
    TCGv tmp;
690

    
691
    /* TODO: Optimize compare/branch pairs rather than always flushing
692
       flag state to CC_OP_FLAGS.  */
693
    gen_flush_flags(s);
694
    switch (cond) {
695
    case 0: /* T */
696
        tcg_gen_br(l1);
697
        break;
698
    case 1: /* F */
699
        break;
700
    case 2: /* HI (!C && !Z) */
701
        tmp = tcg_temp_new();
702
        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C | CCF_Z);
703
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
704
        break;
705
    case 3: /* LS (C || Z) */
706
        tmp = tcg_temp_new();
707
        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C | CCF_Z);
708
        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
709
        break;
710
    case 4: /* CC (!C) */
711
        tmp = tcg_temp_new();
712
        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C);
713
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
714
        break;
715
    case 5: /* CS (C) */
716
        tmp = tcg_temp_new();
717
        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C);
718
        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
719
        break;
720
    case 6: /* NE (!Z) */
721
        tmp = tcg_temp_new();
722
        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_Z);
723
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
724
        break;
725
    case 7: /* EQ (Z) */
726
        tmp = tcg_temp_new();
727
        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_Z);
728
        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
729
        break;
730
    case 8: /* VC (!V) */
731
        tmp = tcg_temp_new();
732
        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_V);
733
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
734
        break;
735
    case 9: /* VS (V) */
736
        tmp = tcg_temp_new();
737
        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_V);
738
        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
739
        break;
740
    case 10: /* PL (!N) */
741
        tmp = tcg_temp_new();
742
        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
743
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
744
        break;
745
    case 11: /* MI (N) */
746
        tmp = tcg_temp_new();
747
        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
748
        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
749
        break;
750
    case 12: /* GE (!(N ^ V)) */
751
        tmp = tcg_temp_new();
752
        assert(CCF_V == (CCF_N >> 2));
753
        tcg_gen_shri_i32(tmp, QREG_CC_DEST, 2);
754
        tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
755
        tcg_gen_andi_i32(tmp, tmp, CCF_V);
756
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
757
        break;
758
    case 13: /* LT (N ^ V) */
759
        tmp = tcg_temp_new();
760
        assert(CCF_V == (CCF_N >> 2));
761
        tcg_gen_shri_i32(tmp, QREG_CC_DEST, 2);
762
        tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
763
        tcg_gen_andi_i32(tmp, tmp, CCF_V);
764
        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
765
        break;
766
    case 14: /* GT (!(Z || (N ^ V))) */
767
        tmp = tcg_temp_new();
768
        assert(CCF_V == (CCF_N >> 2));
769
        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
770
        tcg_gen_shri_i32(tmp, tmp, 2);
771
        tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
772
        tcg_gen_andi_i32(tmp, tmp, CCF_V | CCF_Z);
773
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
774
        break;
775
    case 15: /* LE (Z || (N ^ V)) */
776
        tmp = tcg_temp_new();
777
        assert(CCF_V == (CCF_N >> 2));
778
        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
779
        tcg_gen_shri_i32(tmp, tmp, 2);
780
        tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
781
        tcg_gen_andi_i32(tmp, tmp, CCF_V | CCF_Z);
782
        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
783
        break;
784
    default:
785
        /* Should ever happen.  */
786
        abort();
787
    }
788
}
789

    
790
DISAS_INSN(scc)
791
{
792
    int l1;
793
    int cond;
794
    TCGv reg;
795

    
796
    l1 = gen_new_label();
797
    cond = (insn >> 8) & 0xf;
798
    reg = DREG(insn, 0);
799
    tcg_gen_andi_i32(reg, reg, 0xffffff00);
800
    /* This is safe because we modify the reg directly, with no other values
801
       live.  */
802
    gen_jmpcc(s, cond ^ 1, l1);
803
    tcg_gen_ori_i32(reg, reg, 0xff);
804
    gen_set_label(l1);
805
}
806

    
807
/* Force a TB lookup after an instruction that changes the CPU state.  */
808
static void gen_lookup_tb(DisasContext *s)
809
{
810
    gen_flush_cc_op(s);
811
    tcg_gen_movi_i32(QREG_PC, s->pc);
812
    s->is_jmp = DISAS_UPDATE;
813
}
814

    
815
/* Generate a jump to an immediate address.  */
816
static void gen_jmp_im(DisasContext *s, uint32_t dest)
817
{
818
    gen_flush_cc_op(s);
819
    tcg_gen_movi_i32(QREG_PC, dest);
820
    s->is_jmp = DISAS_JUMP;
821
}
822

    
823
/* Generate a jump to the address in qreg DEST.  */
824
static void gen_jmp(DisasContext *s, TCGv dest)
825
{
826
    gen_flush_cc_op(s);
827
    tcg_gen_mov_i32(QREG_PC, dest);
828
    s->is_jmp = DISAS_JUMP;
829
}
830

    
831
static void gen_exception(DisasContext *s, uint32_t where, int nr)
832
{
833
    gen_flush_cc_op(s);
834
    gen_jmp_im(s, where);
835
    gen_helper_raise_exception(cpu_env, tcg_const_i32(nr));
836
}
837

    
838
static inline void gen_addr_fault(DisasContext *s)
839
{
840
    gen_exception(s, s->insn_pc, EXCP_ADDRESS);
841
}
842

    
843
#define SRC_EA(env, result, opsize, op_sign, addrp) do {                \
844
        result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp,         \
845
                        op_sign ? EA_LOADS : EA_LOADU);                 \
846
        if (IS_NULL_QREG(result)) {                                     \
847
            gen_addr_fault(s);                                          \
848
            return;                                                     \
849
        }                                                               \
850
    } while (0)
851

    
852
#define DEST_EA(env, insn, opsize, val, addrp) do {                     \
853
        TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, EA_STORE); \
854
        if (IS_NULL_QREG(ea_result)) {                                  \
855
            gen_addr_fault(s);                                          \
856
            return;                                                     \
857
        }                                                               \
858
    } while (0)
859

    
860
/* Generate a jump to an immediate address.  */
861
static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
862
{
863
    TranslationBlock *tb;
864

    
865
    tb = s->tb;
866
    if (unlikely(s->singlestep_enabled)) {
867
        gen_exception(s, dest, EXCP_DEBUG);
868
    } else if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
869
               (s->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
870
        tcg_gen_goto_tb(n);
871
        tcg_gen_movi_i32(QREG_PC, dest);
872
        tcg_gen_exit_tb((uintptr_t)tb + n);
873
    } else {
874
        gen_jmp_im(s, dest);
875
        tcg_gen_exit_tb(0);
876
    }
877
    s->is_jmp = DISAS_TB_JUMP;
878
}
879

    
880
DISAS_INSN(undef_mac)
881
{
882
    gen_exception(s, s->pc - 2, EXCP_LINEA);
883
}
884

    
885
DISAS_INSN(undef_fpu)
886
{
887
    gen_exception(s, s->pc - 2, EXCP_LINEF);
888
}
889

    
890
DISAS_INSN(undef)
891
{
892
    gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED);
893
    cpu_abort(env, "Illegal instruction: %04x @ %08x", insn, s->pc - 2);
894
}
895

    
896
DISAS_INSN(mulw)
897
{
898
    TCGv reg;
899
    TCGv tmp;
900
    TCGv src;
901
    int sign;
902

    
903
    sign = (insn & 0x100) != 0;
904
    reg = DREG(insn, 9);
905
    tmp = tcg_temp_new();
906
    if (sign)
907
        tcg_gen_ext16s_i32(tmp, reg);
908
    else
909
        tcg_gen_ext16u_i32(tmp, reg);
910
    SRC_EA(env, src, OS_WORD, sign, NULL);
911
    tcg_gen_mul_i32(tmp, tmp, src);
912
    tcg_gen_mov_i32(reg, tmp);
913
    /* Unlike m68k, coldfire always clears the overflow bit.  */
914
    gen_logic_cc(s, tmp);
915
}
916

    
917
DISAS_INSN(divw)
918
{
919
    TCGv reg;
920
    TCGv tmp;
921
    TCGv src;
922
    int sign;
923

    
924
    sign = (insn & 0x100) != 0;
925
    reg = DREG(insn, 9);
926
    if (sign) {
927
        tcg_gen_ext16s_i32(QREG_DIV1, reg);
928
    } else {
929
        tcg_gen_ext16u_i32(QREG_DIV1, reg);
930
    }
931
    SRC_EA(env, src, OS_WORD, sign, NULL);
932
    tcg_gen_mov_i32(QREG_DIV2, src);
933
    if (sign) {
934
        gen_helper_divs(cpu_env, tcg_const_i32(1));
935
    } else {
936
        gen_helper_divu(cpu_env, tcg_const_i32(1));
937
    }
938

    
939
    tmp = tcg_temp_new();
940
    src = tcg_temp_new();
941
    tcg_gen_ext16u_i32(tmp, QREG_DIV1);
942
    tcg_gen_shli_i32(src, QREG_DIV2, 16);
943
    tcg_gen_or_i32(reg, tmp, src);
944
    s->cc_op = CC_OP_FLAGS;
945
}
946

    
947
DISAS_INSN(divl)
948
{
949
    TCGv num;
950
    TCGv den;
951
    TCGv reg;
952
    uint16_t ext;
953

    
954
    ext = cpu_lduw_code(env, s->pc);
955
    s->pc += 2;
956
    if (ext & 0x87f8) {
957
        gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
958
        return;
959
    }
960
    num = DREG(ext, 12);
961
    reg = DREG(ext, 0);
962
    tcg_gen_mov_i32(QREG_DIV1, num);
963
    SRC_EA(env, den, OS_LONG, 0, NULL);
964
    tcg_gen_mov_i32(QREG_DIV2, den);
965
    if (ext & 0x0800) {
966
        gen_helper_divs(cpu_env, tcg_const_i32(0));
967
    } else {
968
        gen_helper_divu(cpu_env, tcg_const_i32(0));
969
    }
970
    if ((ext & 7) == ((ext >> 12) & 7)) {
971
        /* div */
972
        tcg_gen_mov_i32 (reg, QREG_DIV1);
973
    } else {
974
        /* rem */
975
        tcg_gen_mov_i32 (reg, QREG_DIV2);
976
    }
977
    s->cc_op = CC_OP_FLAGS;
978
}
979

    
980
DISAS_INSN(addsub)
981
{
982
    TCGv reg;
983
    TCGv dest;
984
    TCGv src;
985
    TCGv tmp;
986
    TCGv addr;
987
    int add;
988

    
989
    add = (insn & 0x4000) != 0;
990
    reg = DREG(insn, 9);
991
    dest = tcg_temp_new();
992
    if (insn & 0x100) {
993
        SRC_EA(env, tmp, OS_LONG, 0, &addr);
994
        src = reg;
995
    } else {
996
        tmp = reg;
997
        SRC_EA(env, src, OS_LONG, 0, NULL);
998
    }
999
    if (add) {
1000
        tcg_gen_add_i32(dest, tmp, src);
1001
        gen_helper_xflag_lt(QREG_CC_X, dest, src);
1002
        s->cc_op = CC_OP_ADD;
1003
    } else {
1004
        gen_helper_xflag_lt(QREG_CC_X, tmp, src);
1005
        tcg_gen_sub_i32(dest, tmp, src);
1006
        s->cc_op = CC_OP_SUB;
1007
    }
1008
    gen_update_cc_add(dest, src);
1009
    if (insn & 0x100) {
1010
        DEST_EA(env, insn, OS_LONG, dest, &addr);
1011
    } else {
1012
        tcg_gen_mov_i32(reg, dest);
1013
    }
1014
}
1015

    
1016

    
1017
/* Reverse the order of the bits in REG.  */
1018
DISAS_INSN(bitrev)
1019
{
1020
    TCGv reg;
1021
    reg = DREG(insn, 0);
1022
    gen_helper_bitrev(reg, reg);
1023
}
1024

    
1025
DISAS_INSN(bitop_reg)
1026
{
1027
    int opsize;
1028
    int op;
1029
    TCGv src1;
1030
    TCGv src2;
1031
    TCGv tmp;
1032
    TCGv addr;
1033
    TCGv dest;
1034

    
1035
    if ((insn & 0x38) != 0)
1036
        opsize = OS_BYTE;
1037
    else
1038
        opsize = OS_LONG;
1039
    op = (insn >> 6) & 3;
1040
    SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1041
    src2 = DREG(insn, 9);
1042
    dest = tcg_temp_new();
1043

    
1044
    gen_flush_flags(s);
1045
    tmp = tcg_temp_new();
1046
    if (opsize == OS_BYTE)
1047
        tcg_gen_andi_i32(tmp, src2, 7);
1048
    else
1049
        tcg_gen_andi_i32(tmp, src2, 31);
1050
    src2 = tmp;
1051
    tmp = tcg_temp_new();
1052
    tcg_gen_shr_i32(tmp, src1, src2);
1053
    tcg_gen_andi_i32(tmp, tmp, 1);
1054
    tcg_gen_shli_i32(tmp, tmp, 2);
1055
    /* Clear CCF_Z if bit set.  */
1056
    tcg_gen_ori_i32(QREG_CC_DEST, QREG_CC_DEST, CCF_Z);
1057
    tcg_gen_xor_i32(QREG_CC_DEST, QREG_CC_DEST, tmp);
1058

    
1059
    tcg_gen_shl_i32(tmp, tcg_const_i32(1), src2);
1060
    switch (op) {
1061
    case 1: /* bchg */
1062
        tcg_gen_xor_i32(dest, src1, tmp);
1063
        break;
1064
    case 2: /* bclr */
1065
        tcg_gen_not_i32(tmp, tmp);
1066
        tcg_gen_and_i32(dest, src1, tmp);
1067
        break;
1068
    case 3: /* bset */
1069
        tcg_gen_or_i32(dest, src1, tmp);
1070
        break;
1071
    default: /* btst */
1072
        break;
1073
    }
1074
    if (op)
1075
        DEST_EA(env, insn, opsize, dest, &addr);
1076
}
1077

    
1078
DISAS_INSN(sats)
1079
{
1080
    TCGv reg;
1081
    reg = DREG(insn, 0);
1082
    gen_flush_flags(s);
1083
    gen_helper_sats(reg, reg, QREG_CC_DEST);
1084
    gen_logic_cc(s, reg);
1085
}
1086

    
1087
static void gen_push(DisasContext *s, TCGv val)
1088
{
1089
    TCGv tmp;
1090

    
1091
    tmp = tcg_temp_new();
1092
    tcg_gen_subi_i32(tmp, QREG_SP, 4);
1093
    gen_store(s, OS_LONG, tmp, val);
1094
    tcg_gen_mov_i32(QREG_SP, tmp);
1095
}
1096

    
1097
DISAS_INSN(movem)
1098
{
1099
    TCGv addr;
1100
    int i;
1101
    uint16_t mask;
1102
    TCGv reg;
1103
    TCGv tmp;
1104
    int is_load;
1105

    
1106
    mask = cpu_lduw_code(env, s->pc);
1107
    s->pc += 2;
1108
    tmp = gen_lea(env, s, insn, OS_LONG);
1109
    if (IS_NULL_QREG(tmp)) {
1110
        gen_addr_fault(s);
1111
        return;
1112
    }
1113
    addr = tcg_temp_new();
1114
    tcg_gen_mov_i32(addr, tmp);
1115
    is_load = ((insn & 0x0400) != 0);
1116
    for (i = 0; i < 16; i++, mask >>= 1) {
1117
        if (mask & 1) {
1118
            if (i < 8)
1119
                reg = DREG(i, 0);
1120
            else
1121
                reg = AREG(i, 0);
1122
            if (is_load) {
1123
                tmp = gen_load(s, OS_LONG, addr, 0);
1124
                tcg_gen_mov_i32(reg, tmp);
1125
            } else {
1126
                gen_store(s, OS_LONG, addr, reg);
1127
            }
1128
            if (mask != 1)
1129
                tcg_gen_addi_i32(addr, addr, 4);
1130
        }
1131
    }
1132
}
1133

    
1134
DISAS_INSN(bitop_im)
1135
{
1136
    int opsize;
1137
    int op;
1138
    TCGv src1;
1139
    uint32_t mask;
1140
    int bitnum;
1141
    TCGv tmp;
1142
    TCGv addr;
1143

    
1144
    if ((insn & 0x38) != 0)
1145
        opsize = OS_BYTE;
1146
    else
1147
        opsize = OS_LONG;
1148
    op = (insn >> 6) & 3;
1149

    
1150
    bitnum = cpu_lduw_code(env, s->pc);
1151
    s->pc += 2;
1152
    if (bitnum & 0xff00) {
1153
        disas_undef(env, s, insn);
1154
        return;
1155
    }
1156

    
1157
    SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1158

    
1159
    gen_flush_flags(s);
1160
    if (opsize == OS_BYTE)
1161
        bitnum &= 7;
1162
    else
1163
        bitnum &= 31;
1164
    mask = 1 << bitnum;
1165

    
1166
    tmp = tcg_temp_new();
1167
    assert (CCF_Z == (1 << 2));
1168
    if (bitnum > 2)
1169
        tcg_gen_shri_i32(tmp, src1, bitnum - 2);
1170
    else if (bitnum < 2)
1171
        tcg_gen_shli_i32(tmp, src1, 2 - bitnum);
1172
    else
1173
        tcg_gen_mov_i32(tmp, src1);
1174
    tcg_gen_andi_i32(tmp, tmp, CCF_Z);
1175
    /* Clear CCF_Z if bit set.  */
1176
    tcg_gen_ori_i32(QREG_CC_DEST, QREG_CC_DEST, CCF_Z);
1177
    tcg_gen_xor_i32(QREG_CC_DEST, QREG_CC_DEST, tmp);
1178
    if (op) {
1179
        switch (op) {
1180
        case 1: /* bchg */
1181
            tcg_gen_xori_i32(tmp, src1, mask);
1182
            break;
1183
        case 2: /* bclr */
1184
            tcg_gen_andi_i32(tmp, src1, ~mask);
1185
            break;
1186
        case 3: /* bset */
1187
            tcg_gen_ori_i32(tmp, src1, mask);
1188
            break;
1189
        default: /* btst */
1190
            break;
1191
        }
1192
        DEST_EA(env, insn, opsize, tmp, &addr);
1193
    }
1194
}
1195

    
1196
DISAS_INSN(arith_im)
1197
{
1198
    int op;
1199
    uint32_t im;
1200
    TCGv src1;
1201
    TCGv dest;
1202
    TCGv addr;
1203

    
1204
    op = (insn >> 9) & 7;
1205
    SRC_EA(env, src1, OS_LONG, 0, (op == 6) ? NULL : &addr);
1206
    im = read_im32(env, s);
1207
    dest = tcg_temp_new();
1208
    switch (op) {
1209
    case 0: /* ori */
1210
        tcg_gen_ori_i32(dest, src1, im);
1211
        gen_logic_cc(s, dest);
1212
        break;
1213
    case 1: /* andi */
1214
        tcg_gen_andi_i32(dest, src1, im);
1215
        gen_logic_cc(s, dest);
1216
        break;
1217
    case 2: /* subi */
1218
        tcg_gen_mov_i32(dest, src1);
1219
        gen_helper_xflag_lt(QREG_CC_X, dest, tcg_const_i32(im));
1220
        tcg_gen_subi_i32(dest, dest, im);
1221
        gen_update_cc_add(dest, tcg_const_i32(im));
1222
        s->cc_op = CC_OP_SUB;
1223
        break;
1224
    case 3: /* addi */
1225
        tcg_gen_mov_i32(dest, src1);
1226
        tcg_gen_addi_i32(dest, dest, im);
1227
        gen_update_cc_add(dest, tcg_const_i32(im));
1228
        gen_helper_xflag_lt(QREG_CC_X, dest, tcg_const_i32(im));
1229
        s->cc_op = CC_OP_ADD;
1230
        break;
1231
    case 5: /* eori */
1232
        tcg_gen_xori_i32(dest, src1, im);
1233
        gen_logic_cc(s, dest);
1234
        break;
1235
    case 6: /* cmpi */
1236
        tcg_gen_mov_i32(dest, src1);
1237
        tcg_gen_subi_i32(dest, dest, im);
1238
        gen_update_cc_add(dest, tcg_const_i32(im));
1239
        s->cc_op = CC_OP_SUB;
1240
        break;
1241
    default:
1242
        abort();
1243
    }
1244
    if (op != 6) {
1245
        DEST_EA(env, insn, OS_LONG, dest, &addr);
1246
    }
1247
}
1248

    
1249
DISAS_INSN(byterev)
1250
{
1251
    TCGv reg;
1252

    
1253
    reg = DREG(insn, 0);
1254
    tcg_gen_bswap32_i32(reg, reg);
1255
}
1256

    
1257
DISAS_INSN(move)
1258
{
1259
    TCGv src;
1260
    TCGv dest;
1261
    int op;
1262
    int opsize;
1263

    
1264
    switch (insn >> 12) {
1265
    case 1: /* move.b */
1266
        opsize = OS_BYTE;
1267
        break;
1268
    case 2: /* move.l */
1269
        opsize = OS_LONG;
1270
        break;
1271
    case 3: /* move.w */
1272
        opsize = OS_WORD;
1273
        break;
1274
    default:
1275
        abort();
1276
    }
1277
    SRC_EA(env, src, opsize, 1, NULL);
1278
    op = (insn >> 6) & 7;
1279
    if (op == 1) {
1280
        /* movea */
1281
        /* The value will already have been sign extended.  */
1282
        dest = AREG(insn, 9);
1283
        tcg_gen_mov_i32(dest, src);
1284
    } else {
1285
        /* normal move */
1286
        uint16_t dest_ea;
1287
        dest_ea = ((insn >> 9) & 7) | (op << 3);
1288
        DEST_EA(env, dest_ea, opsize, src, NULL);
1289
        /* This will be correct because loads sign extend.  */
1290
        gen_logic_cc(s, src);
1291
    }
1292
}
1293

    
1294
DISAS_INSN(negx)
1295
{
1296
    TCGv reg;
1297

    
1298
    gen_flush_flags(s);
1299
    reg = DREG(insn, 0);
1300
    gen_helper_subx_cc(reg, cpu_env, tcg_const_i32(0), reg);
1301
}
1302

    
1303
DISAS_INSN(lea)
1304
{
1305
    TCGv reg;
1306
    TCGv tmp;
1307

    
1308
    reg = AREG(insn, 9);
1309
    tmp = gen_lea(env, s, insn, OS_LONG);
1310
    if (IS_NULL_QREG(tmp)) {
1311
        gen_addr_fault(s);
1312
        return;
1313
    }
1314
    tcg_gen_mov_i32(reg, tmp);
1315
}
1316

    
1317
DISAS_INSN(clr)
1318
{
1319
    int opsize;
1320

    
1321
    switch ((insn >> 6) & 3) {
1322
    case 0: /* clr.b */
1323
        opsize = OS_BYTE;
1324
        break;
1325
    case 1: /* clr.w */
1326
        opsize = OS_WORD;
1327
        break;
1328
    case 2: /* clr.l */
1329
        opsize = OS_LONG;
1330
        break;
1331
    default:
1332
        abort();
1333
    }
1334
    DEST_EA(env, insn, opsize, tcg_const_i32(0), NULL);
1335
    gen_logic_cc(s, tcg_const_i32(0));
1336
}
1337

    
1338
static TCGv gen_get_ccr(DisasContext *s)
1339
{
1340
    TCGv dest;
1341

    
1342
    gen_flush_flags(s);
1343
    dest = tcg_temp_new();
1344
    tcg_gen_shli_i32(dest, QREG_CC_X, 4);
1345
    tcg_gen_or_i32(dest, dest, QREG_CC_DEST);
1346
    return dest;
1347
}
1348

    
1349
DISAS_INSN(move_from_ccr)
1350
{
1351
    TCGv reg;
1352
    TCGv ccr;
1353

    
1354
    ccr = gen_get_ccr(s);
1355
    reg = DREG(insn, 0);
1356
    gen_partset_reg(OS_WORD, reg, ccr);
1357
}
1358

    
1359
DISAS_INSN(neg)
1360
{
1361
    TCGv reg;
1362
    TCGv src1;
1363

    
1364
    reg = DREG(insn, 0);
1365
    src1 = tcg_temp_new();
1366
    tcg_gen_mov_i32(src1, reg);
1367
    tcg_gen_neg_i32(reg, src1);
1368
    s->cc_op = CC_OP_SUB;
1369
    gen_update_cc_add(reg, src1);
1370
    gen_helper_xflag_lt(QREG_CC_X, tcg_const_i32(0), src1);
1371
    s->cc_op = CC_OP_SUB;
1372
}
1373

    
1374
static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
1375
{
1376
    tcg_gen_movi_i32(QREG_CC_DEST, val & 0xf);
1377
    tcg_gen_movi_i32(QREG_CC_X, (val & 0x10) >> 4);
1378
    if (!ccr_only) {
1379
        gen_helper_set_sr(cpu_env, tcg_const_i32(val & 0xff00));
1380
    }
1381
}
1382

    
1383
static void gen_set_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
1384
                       int ccr_only)
1385
{
1386
    TCGv tmp;
1387
    TCGv reg;
1388

    
1389
    s->cc_op = CC_OP_FLAGS;
1390
    if ((insn & 0x38) == 0)
1391
      {
1392
        tmp = tcg_temp_new();
1393
        reg = DREG(insn, 0);
1394
        tcg_gen_andi_i32(QREG_CC_DEST, reg, 0xf);
1395
        tcg_gen_shri_i32(tmp, reg, 4);
1396
        tcg_gen_andi_i32(QREG_CC_X, tmp, 1);
1397
        if (!ccr_only) {
1398
            gen_helper_set_sr(cpu_env, reg);
1399
        }
1400
      }
1401
    else if ((insn & 0x3f) == 0x3c)
1402
      {
1403
        uint16_t val;
1404
        val = cpu_lduw_code(env, s->pc);
1405
        s->pc += 2;
1406
        gen_set_sr_im(s, val, ccr_only);
1407
      }
1408
    else
1409
        disas_undef(env, s, insn);
1410
}
1411

    
1412
DISAS_INSN(move_to_ccr)
1413
{
1414
    gen_set_sr(env, s, insn, 1);
1415
}
1416

    
1417
DISAS_INSN(not)
1418
{
1419
    TCGv reg;
1420

    
1421
    reg = DREG(insn, 0);
1422
    tcg_gen_not_i32(reg, reg);
1423
    gen_logic_cc(s, reg);
1424
}
1425

    
1426
DISAS_INSN(swap)
1427
{
1428
    TCGv src1;
1429
    TCGv src2;
1430
    TCGv reg;
1431

    
1432
    src1 = tcg_temp_new();
1433
    src2 = tcg_temp_new();
1434
    reg = DREG(insn, 0);
1435
    tcg_gen_shli_i32(src1, reg, 16);
1436
    tcg_gen_shri_i32(src2, reg, 16);
1437
    tcg_gen_or_i32(reg, src1, src2);
1438
    gen_logic_cc(s, reg);
1439
}
1440

    
1441
DISAS_INSN(pea)
1442
{
1443
    TCGv tmp;
1444

    
1445
    tmp = gen_lea(env, s, insn, OS_LONG);
1446
    if (IS_NULL_QREG(tmp)) {
1447
        gen_addr_fault(s);
1448
        return;
1449
    }
1450
    gen_push(s, tmp);
1451
}
1452

    
1453
DISAS_INSN(ext)
1454
{
1455
    int op;
1456
    TCGv reg;
1457
    TCGv tmp;
1458

    
1459
    reg = DREG(insn, 0);
1460
    op = (insn >> 6) & 7;
1461
    tmp = tcg_temp_new();
1462
    if (op == 3)
1463
        tcg_gen_ext16s_i32(tmp, reg);
1464
    else
1465
        tcg_gen_ext8s_i32(tmp, reg);
1466
    if (op == 2)
1467
        gen_partset_reg(OS_WORD, reg, tmp);
1468
    else
1469
        tcg_gen_mov_i32(reg, tmp);
1470
    gen_logic_cc(s, tmp);
1471
}
1472

    
1473
DISAS_INSN(tst)
1474
{
1475
    int opsize;
1476
    TCGv tmp;
1477

    
1478
    switch ((insn >> 6) & 3) {
1479
    case 0: /* tst.b */
1480
        opsize = OS_BYTE;
1481
        break;
1482
    case 1: /* tst.w */
1483
        opsize = OS_WORD;
1484
        break;
1485
    case 2: /* tst.l */
1486
        opsize = OS_LONG;
1487
        break;
1488
    default:
1489
        abort();
1490
    }
1491
    SRC_EA(env, tmp, opsize, 1, NULL);
1492
    gen_logic_cc(s, tmp);
1493
}
1494

    
1495
DISAS_INSN(pulse)
1496
{
1497
  /* Implemented as a NOP.  */
1498
}
1499

    
1500
DISAS_INSN(illegal)
1501
{
1502
    gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
1503
}
1504

    
1505
/* ??? This should be atomic.  */
1506
DISAS_INSN(tas)
1507
{
1508
    TCGv dest;
1509
    TCGv src1;
1510
    TCGv addr;
1511

    
1512
    dest = tcg_temp_new();
1513
    SRC_EA(env, src1, OS_BYTE, 1, &addr);
1514
    gen_logic_cc(s, src1);
1515
    tcg_gen_ori_i32(dest, src1, 0x80);
1516
    DEST_EA(env, insn, OS_BYTE, dest, &addr);
1517
}
1518

    
1519
DISAS_INSN(mull)
1520
{
1521
    uint16_t ext;
1522
    TCGv reg;
1523
    TCGv src1;
1524
    TCGv dest;
1525

    
1526
    /* The upper 32 bits of the product are discarded, so
1527
       muls.l and mulu.l are functionally equivalent.  */
1528
    ext = cpu_lduw_code(env, s->pc);
1529
    s->pc += 2;
1530
    if (ext & 0x87ff) {
1531
        gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
1532
        return;
1533
    }
1534
    reg = DREG(ext, 12);
1535
    SRC_EA(env, src1, OS_LONG, 0, NULL);
1536
    dest = tcg_temp_new();
1537
    tcg_gen_mul_i32(dest, src1, reg);
1538
    tcg_gen_mov_i32(reg, dest);
1539
    /* Unlike m68k, coldfire always clears the overflow bit.  */
1540
    gen_logic_cc(s, dest);
1541
}
1542

    
1543
DISAS_INSN(link)
1544
{
1545
    int16_t offset;
1546
    TCGv reg;
1547
    TCGv tmp;
1548

    
1549
    offset = cpu_ldsw_code(env, s->pc);
1550
    s->pc += 2;
1551
    reg = AREG(insn, 0);
1552
    tmp = tcg_temp_new();
1553
    tcg_gen_subi_i32(tmp, QREG_SP, 4);
1554
    gen_store(s, OS_LONG, tmp, reg);
1555
    if ((insn & 7) != 7)
1556
        tcg_gen_mov_i32(reg, tmp);
1557
    tcg_gen_addi_i32(QREG_SP, tmp, offset);
1558
}
1559

    
1560
DISAS_INSN(unlk)
1561
{
1562
    TCGv src;
1563
    TCGv reg;
1564
    TCGv tmp;
1565

    
1566
    src = tcg_temp_new();
1567
    reg = AREG(insn, 0);
1568
    tcg_gen_mov_i32(src, reg);
1569
    tmp = gen_load(s, OS_LONG, src, 0);
1570
    tcg_gen_mov_i32(reg, tmp);
1571
    tcg_gen_addi_i32(QREG_SP, src, 4);
1572
}
1573

    
1574
DISAS_INSN(nop)
1575
{
1576
}
1577

    
1578
DISAS_INSN(rts)
1579
{
1580
    TCGv tmp;
1581

    
1582
    tmp = gen_load(s, OS_LONG, QREG_SP, 0);
1583
    tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
1584
    gen_jmp(s, tmp);
1585
}
1586

    
1587
DISAS_INSN(jump)
1588
{
1589
    TCGv tmp;
1590

    
1591
    /* Load the target address first to ensure correct exception
1592
       behavior.  */
1593
    tmp = gen_lea(env, s, insn, OS_LONG);
1594
    if (IS_NULL_QREG(tmp)) {
1595
        gen_addr_fault(s);
1596
        return;
1597
    }
1598
    if ((insn & 0x40) == 0) {
1599
        /* jsr */
1600
        gen_push(s, tcg_const_i32(s->pc));
1601
    }
1602
    gen_jmp(s, tmp);
1603
}
1604

    
1605
DISAS_INSN(addsubq)
1606
{
1607
    TCGv src1;
1608
    TCGv src2;
1609
    TCGv dest;
1610
    int val;
1611
    TCGv addr;
1612

    
1613
    SRC_EA(env, src1, OS_LONG, 0, &addr);
1614
    val = (insn >> 9) & 7;
1615
    if (val == 0)
1616
        val = 8;
1617
    dest = tcg_temp_new();
1618
    tcg_gen_mov_i32(dest, src1);
1619
    if ((insn & 0x38) == 0x08) {
1620
        /* Don't update condition codes if the destination is an
1621
           address register.  */
1622
        if (insn & 0x0100) {
1623
            tcg_gen_subi_i32(dest, dest, val);
1624
        } else {
1625
            tcg_gen_addi_i32(dest, dest, val);
1626
        }
1627
    } else {
1628
        src2 = tcg_const_i32(val);
1629
        if (insn & 0x0100) {
1630
            gen_helper_xflag_lt(QREG_CC_X, dest, src2);
1631
            tcg_gen_subi_i32(dest, dest, val);
1632
            s->cc_op = CC_OP_SUB;
1633
        } else {
1634
            tcg_gen_addi_i32(dest, dest, val);
1635
            gen_helper_xflag_lt(QREG_CC_X, dest, src2);
1636
            s->cc_op = CC_OP_ADD;
1637
        }
1638
        gen_update_cc_add(dest, src2);
1639
    }
1640
    DEST_EA(env, insn, OS_LONG, dest, &addr);
1641
}
1642

    
1643
DISAS_INSN(tpf)
1644
{
1645
    switch (insn & 7) {
1646
    case 2: /* One extension word.  */
1647
        s->pc += 2;
1648
        break;
1649
    case 3: /* Two extension words.  */
1650
        s->pc += 4;
1651
        break;
1652
    case 4: /* No extension words.  */
1653
        break;
1654
    default:
1655
        disas_undef(env, s, insn);
1656
    }
1657
}
1658

    
1659
DISAS_INSN(branch)
1660
{
1661
    int32_t offset;
1662
    uint32_t base;
1663
    int op;
1664
    int l1;
1665

    
1666
    base = s->pc;
1667
    op = (insn >> 8) & 0xf;
1668
    offset = (int8_t)insn;
1669
    if (offset == 0) {
1670
        offset = cpu_ldsw_code(env, s->pc);
1671
        s->pc += 2;
1672
    } else if (offset == -1) {
1673
        offset = read_im32(env, s);
1674
    }
1675
    if (op == 1) {
1676
        /* bsr */
1677
        gen_push(s, tcg_const_i32(s->pc));
1678
    }
1679
    gen_flush_cc_op(s);
1680
    if (op > 1) {
1681
        /* Bcc */
1682
        l1 = gen_new_label();
1683
        gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
1684
        gen_jmp_tb(s, 1, base + offset);
1685
        gen_set_label(l1);
1686
        gen_jmp_tb(s, 0, s->pc);
1687
    } else {
1688
        /* Unconditional branch.  */
1689
        gen_jmp_tb(s, 0, base + offset);
1690
    }
1691
}
1692

    
1693
DISAS_INSN(moveq)
1694
{
1695
    uint32_t val;
1696

    
1697
    val = (int8_t)insn;
1698
    tcg_gen_movi_i32(DREG(insn, 9), val);
1699
    gen_logic_cc(s, tcg_const_i32(val));
1700
}
1701

    
1702
DISAS_INSN(mvzs)
1703
{
1704
    int opsize;
1705
    TCGv src;
1706
    TCGv reg;
1707

    
1708
    if (insn & 0x40)
1709
        opsize = OS_WORD;
1710
    else
1711
        opsize = OS_BYTE;
1712
    SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
1713
    reg = DREG(insn, 9);
1714
    tcg_gen_mov_i32(reg, src);
1715
    gen_logic_cc(s, src);
1716
}
1717

    
1718
DISAS_INSN(or)
1719
{
1720
    TCGv reg;
1721
    TCGv dest;
1722
    TCGv src;
1723
    TCGv addr;
1724

    
1725
    reg = DREG(insn, 9);
1726
    dest = tcg_temp_new();
1727
    if (insn & 0x100) {
1728
        SRC_EA(env, src, OS_LONG, 0, &addr);
1729
        tcg_gen_or_i32(dest, src, reg);
1730
        DEST_EA(env, insn, OS_LONG, dest, &addr);
1731
    } else {
1732
        SRC_EA(env, src, OS_LONG, 0, NULL);
1733
        tcg_gen_or_i32(dest, src, reg);
1734
        tcg_gen_mov_i32(reg, dest);
1735
    }
1736
    gen_logic_cc(s, dest);
1737
}
1738

    
1739
DISAS_INSN(suba)
1740
{
1741
    TCGv src;
1742
    TCGv reg;
1743

    
1744
    SRC_EA(env, src, OS_LONG, 0, NULL);
1745
    reg = AREG(insn, 9);
1746
    tcg_gen_sub_i32(reg, reg, src);
1747
}
1748

    
1749
DISAS_INSN(subx)
1750
{
1751
    TCGv reg;
1752
    TCGv src;
1753

    
1754
    gen_flush_flags(s);
1755
    reg = DREG(insn, 9);
1756
    src = DREG(insn, 0);
1757
    gen_helper_subx_cc(reg, cpu_env, reg, src);
1758
}
1759

    
1760
DISAS_INSN(mov3q)
1761
{
1762
    TCGv src;
1763
    int val;
1764

    
1765
    val = (insn >> 9) & 7;
1766
    if (val == 0)
1767
        val = -1;
1768
    src = tcg_const_i32(val);
1769
    gen_logic_cc(s, src);
1770
    DEST_EA(env, insn, OS_LONG, src, NULL);
1771
}
1772

    
1773
DISAS_INSN(cmp)
1774
{
1775
    int op;
1776
    TCGv src;
1777
    TCGv reg;
1778
    TCGv dest;
1779
    int opsize;
1780

    
1781
    op = (insn >> 6) & 3;
1782
    switch (op) {
1783
    case 0: /* cmp.b */
1784
        opsize = OS_BYTE;
1785
        s->cc_op = CC_OP_CMPB;
1786
        break;
1787
    case 1: /* cmp.w */
1788
        opsize = OS_WORD;
1789
        s->cc_op = CC_OP_CMPW;
1790
        break;
1791
    case 2: /* cmp.l */
1792
        opsize = OS_LONG;
1793
        s->cc_op = CC_OP_SUB;
1794
        break;
1795
    default:
1796
        abort();
1797
    }
1798
    SRC_EA(env, src, opsize, 1, NULL);
1799
    reg = DREG(insn, 9);
1800
    dest = tcg_temp_new();
1801
    tcg_gen_sub_i32(dest, reg, src);
1802
    gen_update_cc_add(dest, src);
1803
}
1804

    
1805
DISAS_INSN(cmpa)
1806
{
1807
    int opsize;
1808
    TCGv src;
1809
    TCGv reg;
1810
    TCGv dest;
1811

    
1812
    if (insn & 0x100) {
1813
        opsize = OS_LONG;
1814
    } else {
1815
        opsize = OS_WORD;
1816
    }
1817
    SRC_EA(env, src, opsize, 1, NULL);
1818
    reg = AREG(insn, 9);
1819
    dest = tcg_temp_new();
1820
    tcg_gen_sub_i32(dest, reg, src);
1821
    gen_update_cc_add(dest, src);
1822
    s->cc_op = CC_OP_SUB;
1823
}
1824

    
1825
DISAS_INSN(eor)
1826
{
1827
    TCGv src;
1828
    TCGv reg;
1829
    TCGv dest;
1830
    TCGv addr;
1831

    
1832
    SRC_EA(env, src, OS_LONG, 0, &addr);
1833
    reg = DREG(insn, 9);
1834
    dest = tcg_temp_new();
1835
    tcg_gen_xor_i32(dest, src, reg);
1836
    gen_logic_cc(s, dest);
1837
    DEST_EA(env, insn, OS_LONG, dest, &addr);
1838
}
1839

    
1840
DISAS_INSN(and)
1841
{
1842
    TCGv src;
1843
    TCGv reg;
1844
    TCGv dest;
1845
    TCGv addr;
1846

    
1847
    reg = DREG(insn, 9);
1848
    dest = tcg_temp_new();
1849
    if (insn & 0x100) {
1850
        SRC_EA(env, src, OS_LONG, 0, &addr);
1851
        tcg_gen_and_i32(dest, src, reg);
1852
        DEST_EA(env, insn, OS_LONG, dest, &addr);
1853
    } else {
1854
        SRC_EA(env, src, OS_LONG, 0, NULL);
1855
        tcg_gen_and_i32(dest, src, reg);
1856
        tcg_gen_mov_i32(reg, dest);
1857
    }
1858
    gen_logic_cc(s, dest);
1859
}
1860

    
1861
DISAS_INSN(adda)
1862
{
1863
    TCGv src;
1864
    TCGv reg;
1865

    
1866
    SRC_EA(env, src, OS_LONG, 0, NULL);
1867
    reg = AREG(insn, 9);
1868
    tcg_gen_add_i32(reg, reg, src);
1869
}
1870

    
1871
DISAS_INSN(addx)
1872
{
1873
    TCGv reg;
1874
    TCGv src;
1875

    
1876
    gen_flush_flags(s);
1877
    reg = DREG(insn, 9);
1878
    src = DREG(insn, 0);
1879
    gen_helper_addx_cc(reg, cpu_env, reg, src);
1880
    s->cc_op = CC_OP_FLAGS;
1881
}
1882

    
1883
/* TODO: This could be implemented without helper functions.  */
1884
DISAS_INSN(shift_im)
1885
{
1886
    TCGv reg;
1887
    int tmp;
1888
    TCGv shift;
1889

    
1890
    reg = DREG(insn, 0);
1891
    tmp = (insn >> 9) & 7;
1892
    if (tmp == 0)
1893
        tmp = 8;
1894
    shift = tcg_const_i32(tmp);
1895
    /* No need to flush flags becuse we know we will set C flag.  */
1896
    if (insn & 0x100) {
1897
        gen_helper_shl_cc(reg, cpu_env, reg, shift);
1898
    } else {
1899
        if (insn & 8) {
1900
            gen_helper_shr_cc(reg, cpu_env, reg, shift);
1901
        } else {
1902
            gen_helper_sar_cc(reg, cpu_env, reg, shift);
1903
        }
1904
    }
1905
    s->cc_op = CC_OP_SHIFT;
1906
}
1907

    
1908
DISAS_INSN(shift_reg)
1909
{
1910
    TCGv reg;
1911
    TCGv shift;
1912

    
1913
    reg = DREG(insn, 0);
1914
    shift = DREG(insn, 9);
1915
    /* Shift by zero leaves C flag unmodified.   */
1916
    gen_flush_flags(s);
1917
    if (insn & 0x100) {
1918
        gen_helper_shl_cc(reg, cpu_env, reg, shift);
1919
    } else {
1920
        if (insn & 8) {
1921
            gen_helper_shr_cc(reg, cpu_env, reg, shift);
1922
        } else {
1923
            gen_helper_sar_cc(reg, cpu_env, reg, shift);
1924
        }
1925
    }
1926
    s->cc_op = CC_OP_SHIFT;
1927
}
1928

    
1929
DISAS_INSN(ff1)
1930
{
1931
    TCGv reg;
1932
    reg = DREG(insn, 0);
1933
    gen_logic_cc(s, reg);
1934
    gen_helper_ff1(reg, reg);
1935
}
1936

    
1937
static TCGv gen_get_sr(DisasContext *s)
1938
{
1939
    TCGv ccr;
1940
    TCGv sr;
1941

    
1942
    ccr = gen_get_ccr(s);
1943
    sr = tcg_temp_new();
1944
    tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
1945
    tcg_gen_or_i32(sr, sr, ccr);
1946
    return sr;
1947
}
1948

    
1949
DISAS_INSN(strldsr)
1950
{
1951
    uint16_t ext;
1952
    uint32_t addr;
1953

    
1954
    addr = s->pc - 2;
1955
    ext = cpu_lduw_code(env, s->pc);
1956
    s->pc += 2;
1957
    if (ext != 0x46FC) {
1958
        gen_exception(s, addr, EXCP_UNSUPPORTED);
1959
        return;
1960
    }
1961
    ext = cpu_lduw_code(env, s->pc);
1962
    s->pc += 2;
1963
    if (IS_USER(s) || (ext & SR_S) == 0) {
1964
        gen_exception(s, addr, EXCP_PRIVILEGE);
1965
        return;
1966
    }
1967
    gen_push(s, gen_get_sr(s));
1968
    gen_set_sr_im(s, ext, 0);
1969
}
1970

    
1971
DISAS_INSN(move_from_sr)
1972
{
1973
    TCGv reg;
1974
    TCGv sr;
1975

    
1976
    if (IS_USER(s)) {
1977
        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
1978
        return;
1979
    }
1980
    sr = gen_get_sr(s);
1981
    reg = DREG(insn, 0);
1982
    gen_partset_reg(OS_WORD, reg, sr);
1983
}
1984

    
1985
DISAS_INSN(move_to_sr)
1986
{
1987
    if (IS_USER(s)) {
1988
        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
1989
        return;
1990
    }
1991
    gen_set_sr(env, s, insn, 0);
1992
    gen_lookup_tb(s);
1993
}
1994

    
1995
DISAS_INSN(move_from_usp)
1996
{
1997
    if (IS_USER(s)) {
1998
        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
1999
        return;
2000
    }
2001
    /* TODO: Implement USP.  */
2002
    gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
2003
}
2004

    
2005
DISAS_INSN(move_to_usp)
2006
{
2007
    if (IS_USER(s)) {
2008
        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2009
        return;
2010
    }
2011
    /* TODO: Implement USP.  */
2012
    gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
2013
}
2014

    
2015
DISAS_INSN(halt)
2016
{
2017
    gen_exception(s, s->pc, EXCP_HALT_INSN);
2018
}
2019

    
2020
DISAS_INSN(stop)
2021
{
2022
    uint16_t ext;
2023

    
2024
    if (IS_USER(s)) {
2025
        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2026
        return;
2027
    }
2028

    
2029
    ext = cpu_lduw_code(env, s->pc);
2030
    s->pc += 2;
2031

    
2032
    gen_set_sr_im(s, ext, 0);
2033
    tcg_gen_movi_i32(cpu_halted, 1);
2034
    gen_exception(s, s->pc, EXCP_HLT);
2035
}
2036

    
2037
DISAS_INSN(rte)
2038
{
2039
    if (IS_USER(s)) {
2040
        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2041
        return;
2042
    }
2043
    gen_exception(s, s->pc - 2, EXCP_RTE);
2044
}
2045

    
2046
DISAS_INSN(movec)
2047
{
2048
    uint16_t ext;
2049
    TCGv reg;
2050

    
2051
    if (IS_USER(s)) {
2052
        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2053
        return;
2054
    }
2055

    
2056
    ext = cpu_lduw_code(env, s->pc);
2057
    s->pc += 2;
2058

    
2059
    if (ext & 0x8000) {
2060
        reg = AREG(ext, 12);
2061
    } else {
2062
        reg = DREG(ext, 12);
2063
    }
2064
    gen_helper_movec(cpu_env, tcg_const_i32(ext & 0xfff), reg);
2065
    gen_lookup_tb(s);
2066
}
2067

    
2068
DISAS_INSN(intouch)
2069
{
2070
    if (IS_USER(s)) {
2071
        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2072
        return;
2073
    }
2074
    /* ICache fetch.  Implement as no-op.  */
2075
}
2076

    
2077
DISAS_INSN(cpushl)
2078
{
2079
    if (IS_USER(s)) {
2080
        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2081
        return;
2082
    }
2083
    /* Cache push/invalidate.  Implement as no-op.  */
2084
}
2085

    
2086
DISAS_INSN(wddata)
2087
{
2088
    gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2089
}
2090

    
2091
DISAS_INSN(wdebug)
2092
{
2093
    if (IS_USER(s)) {
2094
        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2095
        return;
2096
    }
2097
    /* TODO: Implement wdebug.  */
2098
    qemu_assert(0, "WDEBUG not implemented");
2099
}
2100

    
2101
DISAS_INSN(trap)
2102
{
2103
    gen_exception(s, s->pc - 2, EXCP_TRAP0 + (insn & 0xf));
2104
}
2105

    
2106
/* ??? FP exceptions are not implemented.  Most exceptions are deferred until
2107
   immediately before the next FP instruction is executed.  */
2108
DISAS_INSN(fpu)
2109
{
2110
    uint16_t ext;
2111
    int32_t offset;
2112
    int opmode;
2113
    TCGv_i64 src;
2114
    TCGv_i64 dest;
2115
    TCGv_i64 res;
2116
    TCGv tmp32;
2117
    int round;
2118
    int set_dest;
2119
    int opsize;
2120

    
2121
    ext = cpu_lduw_code(env, s->pc);
2122
    s->pc += 2;
2123
    opmode = ext & 0x7f;
2124
    switch ((ext >> 13) & 7) {
2125
    case 0: case 2:
2126
        break;
2127
    case 1:
2128
        goto undef;
2129
    case 3: /* fmove out */
2130
        src = FREG(ext, 7);
2131
        tmp32 = tcg_temp_new_i32();
2132
        /* fmove */
2133
        /* ??? TODO: Proper behavior on overflow.  */
2134
        switch ((ext >> 10) & 7) {
2135
        case 0:
2136
            opsize = OS_LONG;
2137
            gen_helper_f64_to_i32(tmp32, cpu_env, src);
2138
            break;
2139
        case 1:
2140
            opsize = OS_SINGLE;
2141
            gen_helper_f64_to_f32(tmp32, cpu_env, src);
2142
            break;
2143
        case 4:
2144
            opsize = OS_WORD;
2145
            gen_helper_f64_to_i32(tmp32, cpu_env, src);
2146
            break;
2147
        case 5: /* OS_DOUBLE */
2148
            tcg_gen_mov_i32(tmp32, AREG(insn, 0));
2149
            switch ((insn >> 3) & 7) {
2150
            case 2:
2151
            case 3:
2152
                break;
2153
            case 4:
2154
                tcg_gen_addi_i32(tmp32, tmp32, -8);
2155
                break;
2156
            case 5:
2157
                offset = cpu_ldsw_code(env, s->pc);
2158
                s->pc += 2;
2159
                tcg_gen_addi_i32(tmp32, tmp32, offset);
2160
                break;
2161
            default:
2162
                goto undef;
2163
            }
2164
            gen_store64(s, tmp32, src);
2165
            switch ((insn >> 3) & 7) {
2166
            case 3:
2167
                tcg_gen_addi_i32(tmp32, tmp32, 8);
2168
                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2169
                break;
2170
            case 4:
2171
                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2172
                break;
2173
            }
2174
            tcg_temp_free_i32(tmp32);
2175
            return;
2176
        case 6:
2177
            opsize = OS_BYTE;
2178
            gen_helper_f64_to_i32(tmp32, cpu_env, src);
2179
            break;
2180
        default:
2181
            goto undef;
2182
        }
2183
        DEST_EA(env, insn, opsize, tmp32, NULL);
2184
        tcg_temp_free_i32(tmp32);
2185
        return;
2186
    case 4: /* fmove to control register.  */
2187
        switch ((ext >> 10) & 7) {
2188
        case 4: /* FPCR */
2189
            /* Not implemented.  Ignore writes.  */
2190
            break;
2191
        case 1: /* FPIAR */
2192
        case 2: /* FPSR */
2193
        default:
2194
            cpu_abort(NULL, "Unimplemented: fmove to control %d",
2195
                      (ext >> 10) & 7);
2196
        }
2197
        break;
2198
    case 5: /* fmove from control register.  */
2199
        switch ((ext >> 10) & 7) {
2200
        case 4: /* FPCR */
2201
            /* Not implemented.  Always return zero.  */
2202
            tmp32 = tcg_const_i32(0);
2203
            break;
2204
        case 1: /* FPIAR */
2205
        case 2: /* FPSR */
2206
        default:
2207
            cpu_abort(NULL, "Unimplemented: fmove from control %d",
2208
                      (ext >> 10) & 7);
2209
            goto undef;
2210
        }
2211
        DEST_EA(env, insn, OS_LONG, tmp32, NULL);
2212
        break;
2213
    case 6: /* fmovem */
2214
    case 7:
2215
        {
2216
            TCGv addr;
2217
            uint16_t mask;
2218
            int i;
2219
            if ((ext & 0x1f00) != 0x1000 || (ext & 0xff) == 0)
2220
                goto undef;
2221
            tmp32 = gen_lea(env, s, insn, OS_LONG);
2222
            if (IS_NULL_QREG(tmp32)) {
2223
                gen_addr_fault(s);
2224
                return;
2225
            }
2226
            addr = tcg_temp_new_i32();
2227
            tcg_gen_mov_i32(addr, tmp32);
2228
            mask = 0x80;
2229
            for (i = 0; i < 8; i++) {
2230
                if (ext & mask) {
2231
                    s->is_mem = 1;
2232
                    dest = FREG(i, 0);
2233
                    if (ext & (1 << 13)) {
2234
                        /* store */
2235
                        tcg_gen_qemu_stf64(dest, addr, IS_USER(s));
2236
                    } else {
2237
                        /* load */
2238
                        tcg_gen_qemu_ldf64(dest, addr, IS_USER(s));
2239
                    }
2240
                    if (ext & (mask - 1))
2241
                        tcg_gen_addi_i32(addr, addr, 8);
2242
                }
2243
                mask >>= 1;
2244
            }
2245
            tcg_temp_free_i32(addr);
2246
        }
2247
        return;
2248
    }
2249
    if (ext & (1 << 14)) {
2250
        /* Source effective address.  */
2251
        switch ((ext >> 10) & 7) {
2252
        case 0: opsize = OS_LONG; break;
2253
        case 1: opsize = OS_SINGLE; break;
2254
        case 4: opsize = OS_WORD; break;
2255
        case 5: opsize = OS_DOUBLE; break;
2256
        case 6: opsize = OS_BYTE; break;
2257
        default:
2258
            goto undef;
2259
        }
2260
        if (opsize == OS_DOUBLE) {
2261
            tmp32 = tcg_temp_new_i32();
2262
            tcg_gen_mov_i32(tmp32, AREG(insn, 0));
2263
            switch ((insn >> 3) & 7) {
2264
            case 2:
2265
            case 3:
2266
                break;
2267
            case 4:
2268
                tcg_gen_addi_i32(tmp32, tmp32, -8);
2269
                break;
2270
            case 5:
2271
                offset = cpu_ldsw_code(env, s->pc);
2272
                s->pc += 2;
2273
                tcg_gen_addi_i32(tmp32, tmp32, offset);
2274
                break;
2275
            case 7:
2276
                offset = cpu_ldsw_code(env, s->pc);
2277
                offset += s->pc - 2;
2278
                s->pc += 2;
2279
                tcg_gen_addi_i32(tmp32, tmp32, offset);
2280
                break;
2281
            default:
2282
                goto undef;
2283
            }
2284
            src = gen_load64(s, tmp32);
2285
            switch ((insn >> 3) & 7) {
2286
            case 3:
2287
                tcg_gen_addi_i32(tmp32, tmp32, 8);
2288
                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2289
                break;
2290
            case 4:
2291
                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2292
                break;
2293
            }
2294
            tcg_temp_free_i32(tmp32);
2295
        } else {
2296
            SRC_EA(env, tmp32, opsize, 1, NULL);
2297
            src = tcg_temp_new_i64();
2298
            switch (opsize) {
2299
            case OS_LONG:
2300
            case OS_WORD:
2301
            case OS_BYTE:
2302
                gen_helper_i32_to_f64(src, cpu_env, tmp32);
2303
                break;
2304
            case OS_SINGLE:
2305
                gen_helper_f32_to_f64(src, cpu_env, tmp32);
2306
                break;
2307
            }
2308
        }
2309
    } else {
2310
        /* Source register.  */
2311
        src = FREG(ext, 10);
2312
    }
2313
    dest = FREG(ext, 7);
2314
    res = tcg_temp_new_i64();
2315
    if (opmode != 0x3a)
2316
        tcg_gen_mov_f64(res, dest);
2317
    round = 1;
2318
    set_dest = 1;
2319
    switch (opmode) {
2320
    case 0: case 0x40: case 0x44: /* fmove */
2321
        tcg_gen_mov_f64(res, src);
2322
        break;
2323
    case 1: /* fint */
2324
        gen_helper_iround_f64(res, cpu_env, src);
2325
        round = 0;
2326
        break;
2327
    case 3: /* fintrz */
2328
        gen_helper_itrunc_f64(res, cpu_env, src);
2329
        round = 0;
2330
        break;
2331
    case 4: case 0x41: case 0x45: /* fsqrt */
2332
        gen_helper_sqrt_f64(res, cpu_env, src);
2333
        break;
2334
    case 0x18: case 0x58: case 0x5c: /* fabs */
2335
        gen_helper_abs_f64(res, src);
2336
        break;
2337
    case 0x1a: case 0x5a: case 0x5e: /* fneg */
2338
        gen_helper_chs_f64(res, src);
2339
        break;
2340
    case 0x20: case 0x60: case 0x64: /* fdiv */
2341
        gen_helper_div_f64(res, cpu_env, res, src);
2342
        break;
2343
    case 0x22: case 0x62: case 0x66: /* fadd */
2344
        gen_helper_add_f64(res, cpu_env, res, src);
2345
        break;
2346
    case 0x23: case 0x63: case 0x67: /* fmul */
2347
        gen_helper_mul_f64(res, cpu_env, res, src);
2348
        break;
2349
    case 0x28: case 0x68: case 0x6c: /* fsub */
2350
        gen_helper_sub_f64(res, cpu_env, res, src);
2351
        break;
2352
    case 0x38: /* fcmp */
2353
        gen_helper_sub_cmp_f64(res, cpu_env, res, src);
2354
        set_dest = 0;
2355
        round = 0;
2356
        break;
2357
    case 0x3a: /* ftst */
2358
        tcg_gen_mov_f64(res, src);
2359
        set_dest = 0;
2360
        round = 0;
2361
        break;
2362
    default:
2363
        goto undef;
2364
    }
2365
    if (ext & (1 << 14)) {
2366
        tcg_temp_free_i64(src);
2367
    }
2368
    if (round) {
2369
        if (opmode & 0x40) {
2370
            if ((opmode & 0x4) != 0)
2371
                round = 0;
2372
        } else if ((s->fpcr & M68K_FPCR_PREC) == 0) {
2373
            round = 0;
2374
        }
2375
    }
2376
    if (round) {
2377
        TCGv tmp = tcg_temp_new_i32();
2378
        gen_helper_f64_to_f32(tmp, cpu_env, res);
2379
        gen_helper_f32_to_f64(res, cpu_env, tmp);
2380
        tcg_temp_free_i32(tmp);
2381
    }
2382
    tcg_gen_mov_f64(QREG_FP_RESULT, res);
2383
    if (set_dest) {
2384
        tcg_gen_mov_f64(dest, res);
2385
    }
2386
    tcg_temp_free_i64(res);
2387
    return;
2388
undef:
2389
    /* FIXME: Is this right for offset addressing modes?  */
2390
    s->pc -= 2;
2391
    disas_undef_fpu(env, s, insn);
2392
}
2393

    
2394
DISAS_INSN(fbcc)
2395
{
2396
    uint32_t offset;
2397
    uint32_t addr;
2398
    TCGv flag;
2399
    int l1;
2400

    
2401
    addr = s->pc;
2402
    offset = cpu_ldsw_code(env, s->pc);
2403
    s->pc += 2;
2404
    if (insn & (1 << 6)) {
2405
        offset = (offset << 16) | cpu_lduw_code(env, s->pc);
2406
        s->pc += 2;
2407
    }
2408

    
2409
    l1 = gen_new_label();
2410
    /* TODO: Raise BSUN exception.  */
2411
    flag = tcg_temp_new();
2412
    gen_helper_compare_f64(flag, cpu_env, QREG_FP_RESULT);
2413
    /* Jump to l1 if condition is true.  */
2414
    switch (insn & 0xf) {
2415
    case 0: /* f */
2416
        break;
2417
    case 1: /* eq (=0) */
2418
        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
2419
        break;
2420
    case 2: /* ogt (=1) */
2421
        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(1), l1);
2422
        break;
2423
    case 3: /* oge (=0 or =1) */
2424
        tcg_gen_brcond_i32(TCG_COND_LEU, flag, tcg_const_i32(1), l1);
2425
        break;
2426
    case 4: /* olt (=-1) */
2427
        tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(0), l1);
2428
        break;
2429
    case 5: /* ole (=-1 or =0) */
2430
        tcg_gen_brcond_i32(TCG_COND_LE, flag, tcg_const_i32(0), l1);
2431
        break;
2432
    case 6: /* ogl (=-1 or =1) */
2433
        tcg_gen_andi_i32(flag, flag, 1);
2434
        tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
2435
        break;
2436
    case 7: /* or (=2) */
2437
        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(2), l1);
2438
        break;
2439
    case 8: /* un (<2) */
2440
        tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(2), l1);
2441
        break;
2442
    case 9: /* ueq (=0 or =2) */
2443
        tcg_gen_andi_i32(flag, flag, 1);
2444
        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
2445
        break;
2446
    case 10: /* ugt (>0) */
2447
        tcg_gen_brcond_i32(TCG_COND_GT, flag, tcg_const_i32(0), l1);
2448
        break;
2449
    case 11: /* uge (>=0) */
2450
        tcg_gen_brcond_i32(TCG_COND_GE, flag, tcg_const_i32(0), l1);
2451
        break;
2452
    case 12: /* ult (=-1 or =2) */
2453
        tcg_gen_brcond_i32(TCG_COND_GEU, flag, tcg_const_i32(2), l1);
2454
        break;
2455
    case 13: /* ule (!=1) */
2456
        tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(1), l1);
2457
        break;
2458
    case 14: /* ne (!=0) */
2459
        tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
2460
        break;
2461
    case 15: /* t */
2462
        tcg_gen_br(l1);
2463
        break;
2464
    }
2465
    gen_jmp_tb(s, 0, s->pc);
2466
    gen_set_label(l1);
2467
    gen_jmp_tb(s, 1, addr + offset);
2468
}
2469

    
2470
DISAS_INSN(frestore)
2471
{
2472
    /* TODO: Implement frestore.  */
2473
    qemu_assert(0, "FRESTORE not implemented");
2474
}
2475

    
2476
DISAS_INSN(fsave)
2477
{
2478
    /* TODO: Implement fsave.  */
2479
    qemu_assert(0, "FSAVE not implemented");
2480
}
2481

    
2482
static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
2483
{
2484
    TCGv tmp = tcg_temp_new();
2485
    if (s->env->macsr & MACSR_FI) {
2486
        if (upper)
2487
            tcg_gen_andi_i32(tmp, val, 0xffff0000);
2488
        else
2489
            tcg_gen_shli_i32(tmp, val, 16);
2490
    } else if (s->env->macsr & MACSR_SU) {
2491
        if (upper)
2492
            tcg_gen_sari_i32(tmp, val, 16);
2493
        else
2494
            tcg_gen_ext16s_i32(tmp, val);
2495
    } else {
2496
        if (upper)
2497
            tcg_gen_shri_i32(tmp, val, 16);
2498
        else
2499
            tcg_gen_ext16u_i32(tmp, val);
2500
    }
2501
    return tmp;
2502
}
2503

    
2504
static void gen_mac_clear_flags(void)
2505
{
2506
    tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
2507
                     ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
2508
}
2509

    
2510
DISAS_INSN(mac)
2511
{
2512
    TCGv rx;
2513
    TCGv ry;
2514
    uint16_t ext;
2515
    int acc;
2516
    TCGv tmp;
2517
    TCGv addr;
2518
    TCGv loadval;
2519
    int dual;
2520
    TCGv saved_flags;
2521

    
2522
    if (!s->done_mac) {
2523
        s->mactmp = tcg_temp_new_i64();
2524
        s->done_mac = 1;
2525
    }
2526

    
2527
    ext = cpu_lduw_code(env, s->pc);
2528
    s->pc += 2;
2529

    
2530
    acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
2531
    dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
2532
    if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
2533
        disas_undef(env, s, insn);
2534
        return;
2535
    }
2536
    if (insn & 0x30) {
2537
        /* MAC with load.  */
2538
        tmp = gen_lea(env, s, insn, OS_LONG);
2539
        addr = tcg_temp_new();
2540
        tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
2541
        /* Load the value now to ensure correct exception behavior.
2542
           Perform writeback after reading the MAC inputs.  */
2543
        loadval = gen_load(s, OS_LONG, addr, 0);
2544

    
2545
        acc ^= 1;
2546
        rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
2547
        ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
2548
    } else {
2549
        loadval = addr = NULL_QREG;
2550
        rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
2551
        ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2552
    }
2553

    
2554
    gen_mac_clear_flags();
2555
#if 0
2556
    l1 = -1;
2557
    /* Disabled because conditional branches clobber temporary vars.  */
2558
    if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
2559
        /* Skip the multiply if we know we will ignore it.  */
2560
        l1 = gen_new_label();
2561
        tmp = tcg_temp_new();
2562
        tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
2563
        gen_op_jmp_nz32(tmp, l1);
2564
    }
2565
#endif
2566

    
2567
    if ((ext & 0x0800) == 0) {
2568
        /* Word.  */
2569
        rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
2570
        ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
2571
    }
2572
    if (s->env->macsr & MACSR_FI) {
2573
        gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
2574
    } else {
2575
        if (s->env->macsr & MACSR_SU)
2576
            gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
2577
        else
2578
            gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
2579
        switch ((ext >> 9) & 3) {
2580
        case 1:
2581
            tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
2582
            break;
2583
        case 3:
2584
            tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
2585
            break;
2586
        }
2587
    }
2588

    
2589
    if (dual) {
2590
        /* Save the overflow flag from the multiply.  */
2591
        saved_flags = tcg_temp_new();
2592
        tcg_gen_mov_i32(saved_flags, QREG_MACSR);
2593
    } else {
2594
        saved_flags = NULL_QREG;
2595
    }
2596

    
2597
#if 0
2598
    /* Disabled because conditional branches clobber temporary vars.  */
2599
    if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
2600
        /* Skip the accumulate if the value is already saturated.  */
2601
        l1 = gen_new_label();
2602
        tmp = tcg_temp_new();
2603
        gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
2604
        gen_op_jmp_nz32(tmp, l1);
2605
    }
2606
#endif
2607

    
2608
    if (insn & 0x100)
2609
        tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
2610
    else
2611
        tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
2612

    
2613
    if (s->env->macsr & MACSR_FI)
2614
        gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
2615
    else if (s->env->macsr & MACSR_SU)
2616
        gen_helper_macsats(cpu_env, tcg_const_i32(acc));
2617
    else
2618
        gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
2619

    
2620
#if 0
2621
    /* Disabled because conditional branches clobber temporary vars.  */
2622
    if (l1 != -1)
2623
        gen_set_label(l1);
2624
#endif
2625

    
2626
    if (dual) {
2627
        /* Dual accumulate variant.  */
2628
        acc = (ext >> 2) & 3;
2629
        /* Restore the overflow flag from the multiplier.  */
2630
        tcg_gen_mov_i32(QREG_MACSR, saved_flags);
2631
#if 0
2632
        /* Disabled because conditional branches clobber temporary vars.  */
2633
        if ((s->env->macsr & MACSR_OMC) != 0) {
2634
            /* Skip the accumulate if the value is already saturated.  */
2635
            l1 = gen_new_label();
2636
            tmp = tcg_temp_new();
2637
            gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
2638
            gen_op_jmp_nz32(tmp, l1);
2639
        }
2640
#endif
2641
        if (ext & 2)
2642
            tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
2643
        else
2644
            tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
2645
        if (s->env->macsr & MACSR_FI)
2646
            gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
2647
        else if (s->env->macsr & MACSR_SU)
2648
            gen_helper_macsats(cpu_env, tcg_const_i32(acc));
2649
        else
2650
            gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
2651
#if 0
2652
        /* Disabled because conditional branches clobber temporary vars.  */
2653
        if (l1 != -1)
2654
            gen_set_label(l1);
2655
#endif
2656
    }
2657
    gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
2658

    
2659
    if (insn & 0x30) {
2660
        TCGv rw;
2661
        rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
2662
        tcg_gen_mov_i32(rw, loadval);
2663
        /* FIXME: Should address writeback happen with the masked or
2664
           unmasked value?  */
2665
        switch ((insn >> 3) & 7) {
2666
        case 3: /* Post-increment.  */
2667
            tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
2668
            break;
2669
        case 4: /* Pre-decrement.  */
2670
            tcg_gen_mov_i32(AREG(insn, 0), addr);
2671
        }
2672
    }
2673
}
2674

    
2675
DISAS_INSN(from_mac)
2676
{
2677
    TCGv rx;
2678
    TCGv_i64 acc;
2679
    int accnum;
2680

    
2681
    rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2682
    accnum = (insn >> 9) & 3;
2683
    acc = MACREG(accnum);
2684
    if (s->env->macsr & MACSR_FI) {
2685
        gen_helper_get_macf(rx, cpu_env, acc);
2686
    } else if ((s->env->macsr & MACSR_OMC) == 0) {
2687
        tcg_gen_trunc_i64_i32(rx, acc);
2688
    } else if (s->env->macsr & MACSR_SU) {
2689
        gen_helper_get_macs(rx, acc);
2690
    } else {
2691
        gen_helper_get_macu(rx, acc);
2692
    }
2693
    if (insn & 0x40) {
2694
        tcg_gen_movi_i64(acc, 0);
2695
        tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
2696
    }
2697
}
2698

    
2699
DISAS_INSN(move_mac)
2700
{
2701
    /* FIXME: This can be done without a helper.  */
2702
    int src;
2703
    TCGv dest;
2704
    src = insn & 3;
2705
    dest = tcg_const_i32((insn >> 9) & 3);
2706
    gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
2707
    gen_mac_clear_flags();
2708
    gen_helper_mac_set_flags(cpu_env, dest);
2709
}
2710

    
2711
DISAS_INSN(from_macsr)
2712
{
2713
    TCGv reg;
2714

    
2715
    reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2716
    tcg_gen_mov_i32(reg, QREG_MACSR);
2717
}
2718

    
2719
DISAS_INSN(from_mask)
2720
{
2721
    TCGv reg;
2722
    reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2723
    tcg_gen_mov_i32(reg, QREG_MAC_MASK);
2724
}
2725

    
2726
DISAS_INSN(from_mext)
2727
{
2728
    TCGv reg;
2729
    TCGv acc;
2730
    reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2731
    acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
2732
    if (s->env->macsr & MACSR_FI)
2733
        gen_helper_get_mac_extf(reg, cpu_env, acc);
2734
    else
2735
        gen_helper_get_mac_exti(reg, cpu_env, acc);
2736
}
2737

    
2738
DISAS_INSN(macsr_to_ccr)
2739
{
2740
    tcg_gen_movi_i32(QREG_CC_X, 0);
2741
    tcg_gen_andi_i32(QREG_CC_DEST, QREG_MACSR, 0xf);
2742
    s->cc_op = CC_OP_FLAGS;
2743
}
2744

    
2745
DISAS_INSN(to_mac)
2746
{
2747
    TCGv_i64 acc;
2748
    TCGv val;
2749
    int accnum;
2750
    accnum = (insn >> 9) & 3;
2751
    acc = MACREG(accnum);
2752
    SRC_EA(env, val, OS_LONG, 0, NULL);
2753
    if (s->env->macsr & MACSR_FI) {
2754
        tcg_gen_ext_i32_i64(acc, val);
2755
        tcg_gen_shli_i64(acc, acc, 8);
2756
    } else if (s->env->macsr & MACSR_SU) {
2757
        tcg_gen_ext_i32_i64(acc, val);
2758
    } else {
2759
        tcg_gen_extu_i32_i64(acc, val);
2760
    }
2761
    tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
2762
    gen_mac_clear_flags();
2763
    gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
2764
}
2765

    
2766
DISAS_INSN(to_macsr)
2767
{
2768
    TCGv val;
2769
    SRC_EA(env, val, OS_LONG, 0, NULL);
2770
    gen_helper_set_macsr(cpu_env, val);
2771
    gen_lookup_tb(s);
2772
}
2773

    
2774
DISAS_INSN(to_mask)
2775
{
2776
    TCGv val;
2777
    SRC_EA(env, val, OS_LONG, 0, NULL);
2778
    tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
2779
}
2780

    
2781
DISAS_INSN(to_mext)
2782
{
2783
    TCGv val;
2784
    TCGv acc;
2785
    SRC_EA(env, val, OS_LONG, 0, NULL);
2786
    acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
2787
    if (s->env->macsr & MACSR_FI)
2788
        gen_helper_set_mac_extf(cpu_env, val, acc);
2789
    else if (s->env->macsr & MACSR_SU)
2790
        gen_helper_set_mac_exts(cpu_env, val, acc);
2791
    else
2792
        gen_helper_set_mac_extu(cpu_env, val, acc);
2793
}
2794

    
2795
static disas_proc opcode_table[65536];
2796

    
2797
static void
2798
register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
2799
{
2800
  int i;
2801
  int from;
2802
  int to;
2803

    
2804
  /* Sanity check.  All set bits must be included in the mask.  */
2805
  if (opcode & ~mask) {
2806
      fprintf(stderr,
2807
              "qemu internal error: bogus opcode definition %04x/%04x\n",
2808
              opcode, mask);
2809
      abort();
2810
  }
2811
  /* This could probably be cleverer.  For now just optimize the case where
2812
     the top bits are known.  */
2813
  /* Find the first zero bit in the mask.  */
2814
  i = 0x8000;
2815
  while ((i & mask) != 0)
2816
      i >>= 1;
2817
  /* Iterate over all combinations of this and lower bits.  */
2818
  if (i == 0)
2819
      i = 1;
2820
  else
2821
      i <<= 1;
2822
  from = opcode & ~(i - 1);
2823
  to = from + i;
2824
  for (i = from; i < to; i++) {
2825
      if ((i & mask) == opcode)
2826
          opcode_table[i] = proc;
2827
  }
2828
}
2829

    
2830
/* Register m68k opcode handlers.  Order is important.
2831
   Later insn override earlier ones.  */
2832
void register_m68k_insns (CPUM68KState *env)
2833
{
2834
#define INSN(name, opcode, mask, feature) do { \
2835
    if (m68k_feature(env, M68K_FEATURE_##feature)) \
2836
        register_opcode(disas_##name, 0x##opcode, 0x##mask); \
2837
    } while(0)
2838
    INSN(undef,     0000, 0000, CF_ISA_A);
2839
    INSN(arith_im,  0080, fff8, CF_ISA_A);
2840
    INSN(bitrev,    00c0, fff8, CF_ISA_APLUSC);
2841
    INSN(bitop_reg, 0100, f1c0, CF_ISA_A);
2842
    INSN(bitop_reg, 0140, f1c0, CF_ISA_A);
2843
    INSN(bitop_reg, 0180, f1c0, CF_ISA_A);
2844
    INSN(bitop_reg, 01c0, f1c0, CF_ISA_A);
2845
    INSN(arith_im,  0280, fff8, CF_ISA_A);
2846
    INSN(byterev,   02c0, fff8, CF_ISA_APLUSC);
2847
    INSN(arith_im,  0480, fff8, CF_ISA_A);
2848
    INSN(ff1,       04c0, fff8, CF_ISA_APLUSC);
2849
    INSN(arith_im,  0680, fff8, CF_ISA_A);
2850
    INSN(bitop_im,  0800, ffc0, CF_ISA_A);
2851
    INSN(bitop_im,  0840, ffc0, CF_ISA_A);
2852
    INSN(bitop_im,  0880, ffc0, CF_ISA_A);
2853
    INSN(bitop_im,  08c0, ffc0, CF_ISA_A);
2854
    INSN(arith_im,  0a80, fff8, CF_ISA_A);
2855
    INSN(arith_im,  0c00, ff38, CF_ISA_A);
2856
    INSN(move,      1000, f000, CF_ISA_A);
2857
    INSN(move,      2000, f000, CF_ISA_A);
2858
    INSN(move,      3000, f000, CF_ISA_A);
2859
    INSN(strldsr,   40e7, ffff, CF_ISA_APLUSC);
2860
    INSN(negx,      4080, fff8, CF_ISA_A);
2861
    INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
2862
    INSN(lea,       41c0, f1c0, CF_ISA_A);
2863
    INSN(clr,       4200, ff00, CF_ISA_A);
2864
    INSN(undef,     42c0, ffc0, CF_ISA_A);
2865
    INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
2866
    INSN(neg,       4480, fff8, CF_ISA_A);
2867
    INSN(move_to_ccr, 44c0, ffc0, CF_ISA_A);
2868
    INSN(not,       4680, fff8, CF_ISA_A);
2869
    INSN(move_to_sr, 46c0, ffc0, CF_ISA_A);
2870
    INSN(pea,       4840, ffc0, CF_ISA_A);
2871
    INSN(swap,      4840, fff8, CF_ISA_A);
2872
    INSN(movem,     48c0, fbc0, CF_ISA_A);
2873
    INSN(ext,       4880, fff8, CF_ISA_A);
2874
    INSN(ext,       48c0, fff8, CF_ISA_A);
2875
    INSN(ext,       49c0, fff8, CF_ISA_A);
2876
    INSN(tst,       4a00, ff00, CF_ISA_A);
2877
    INSN(tas,       4ac0, ffc0, CF_ISA_B);
2878
    INSN(halt,      4ac8, ffff, CF_ISA_A);
2879
    INSN(pulse,     4acc, ffff, CF_ISA_A);
2880
    INSN(illegal,   4afc, ffff, CF_ISA_A);
2881
    INSN(mull,      4c00, ffc0, CF_ISA_A);
2882
    INSN(divl,      4c40, ffc0, CF_ISA_A);
2883
    INSN(sats,      4c80, fff8, CF_ISA_B);
2884
    INSN(trap,      4e40, fff0, CF_ISA_A);
2885
    INSN(link,      4e50, fff8, CF_ISA_A);
2886
    INSN(unlk,      4e58, fff8, CF_ISA_A);
2887
    INSN(move_to_usp, 4e60, fff8, USP);
2888
    INSN(move_from_usp, 4e68, fff8, USP);
2889
    INSN(nop,       4e71, ffff, CF_ISA_A);
2890
    INSN(stop,      4e72, ffff, CF_ISA_A);
2891
    INSN(rte,       4e73, ffff, CF_ISA_A);
2892
    INSN(rts,       4e75, ffff, CF_ISA_A);
2893
    INSN(movec,     4e7b, ffff, CF_ISA_A);
2894
    INSN(jump,      4e80, ffc0, CF_ISA_A);
2895
    INSN(jump,      4ec0, ffc0, CF_ISA_A);
2896
    INSN(addsubq,   5180, f1c0, CF_ISA_A);
2897
    INSN(scc,       50c0, f0f8, CF_ISA_A);
2898
    INSN(addsubq,   5080, f1c0, CF_ISA_A);
2899
    INSN(tpf,       51f8, fff8, CF_ISA_A);
2900

    
2901
    /* Branch instructions.  */
2902
    INSN(branch,    6000, f000, CF_ISA_A);
2903
    /* Disable long branch instructions, then add back the ones we want.  */
2904
    INSN(undef,     60ff, f0ff, CF_ISA_A); /* All long branches.  */
2905
    INSN(branch,    60ff, f0ff, CF_ISA_B);
2906
    INSN(undef,     60ff, ffff, CF_ISA_B); /* bra.l */
2907
    INSN(branch,    60ff, ffff, BRAL);
2908

    
2909
    INSN(moveq,     7000, f100, CF_ISA_A);
2910
    INSN(mvzs,      7100, f100, CF_ISA_B);
2911
    INSN(or,        8000, f000, CF_ISA_A);
2912
    INSN(divw,      80c0, f0c0, CF_ISA_A);
2913
    INSN(addsub,    9000, f000, CF_ISA_A);
2914
    INSN(subx,      9180, f1f8, CF_ISA_A);
2915
    INSN(suba,      91c0, f1c0, CF_ISA_A);
2916

    
2917
    INSN(undef_mac, a000, f000, CF_ISA_A);
2918
    INSN(mac,       a000, f100, CF_EMAC);
2919
    INSN(from_mac,  a180, f9b0, CF_EMAC);
2920
    INSN(move_mac,  a110, f9fc, CF_EMAC);
2921
    INSN(from_macsr,a980, f9f0, CF_EMAC);
2922
    INSN(from_mask, ad80, fff0, CF_EMAC);
2923
    INSN(from_mext, ab80, fbf0, CF_EMAC);
2924
    INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
2925
    INSN(to_mac,    a100, f9c0, CF_EMAC);
2926
    INSN(to_macsr,  a900, ffc0, CF_EMAC);
2927
    INSN(to_mext,   ab00, fbc0, CF_EMAC);
2928
    INSN(to_mask,   ad00, ffc0, CF_EMAC);
2929

    
2930
    INSN(mov3q,     a140, f1c0, CF_ISA_B);
2931
    INSN(cmp,       b000, f1c0, CF_ISA_B); /* cmp.b */
2932
    INSN(cmp,       b040, f1c0, CF_ISA_B); /* cmp.w */
2933
    INSN(cmpa,      b0c0, f1c0, CF_ISA_B); /* cmpa.w */
2934
    INSN(cmp,       b080, f1c0, CF_ISA_A);
2935
    INSN(cmpa,      b1c0, f1c0, CF_ISA_A);
2936
    INSN(eor,       b180, f1c0, CF_ISA_A);
2937
    INSN(and,       c000, f000, CF_ISA_A);
2938
    INSN(mulw,      c0c0, f0c0, CF_ISA_A);
2939
    INSN(addsub,    d000, f000, CF_ISA_A);
2940
    INSN(addx,      d180, f1f8, CF_ISA_A);
2941
    INSN(adda,      d1c0, f1c0, CF_ISA_A);
2942
    INSN(shift_im,  e080, f0f0, CF_ISA_A);
2943
    INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
2944
    INSN(undef_fpu, f000, f000, CF_ISA_A);
2945
    INSN(fpu,       f200, ffc0, CF_FPU);
2946
    INSN(fbcc,      f280, ffc0, CF_FPU);
2947
    INSN(frestore,  f340, ffc0, CF_FPU);
2948
    INSN(fsave,     f340, ffc0, CF_FPU);
2949
    INSN(intouch,   f340, ffc0, CF_ISA_A);
2950
    INSN(cpushl,    f428, ff38, CF_ISA_A);
2951
    INSN(wddata,    fb00, ff00, CF_ISA_A);
2952
    INSN(wdebug,    fbc0, ffc0, CF_ISA_A);
2953
#undef INSN
2954
}
2955

    
2956
/* ??? Some of this implementation is not exception safe.  We should always
2957
   write back the result to memory before setting the condition codes.  */
2958
static void disas_m68k_insn(CPUM68KState * env, DisasContext *s)
2959
{
2960
    uint16_t insn;
2961

    
2962
    if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
2963
        tcg_gen_debug_insn_start(s->pc);
2964
    }
2965

    
2966
    insn = cpu_lduw_code(env, s->pc);
2967
    s->pc += 2;
2968

    
2969
    opcode_table[insn](env, s, insn);
2970
}
2971

    
2972
/* generate intermediate code for basic block 'tb'.  */
2973
static inline void
2974
gen_intermediate_code_internal(M68kCPU *cpu, TranslationBlock *tb,
2975
                               bool search_pc)
2976
{
2977
    CPUState *cs = CPU(cpu);
2978
    CPUM68KState *env = &cpu->env;
2979
    DisasContext dc1, *dc = &dc1;
2980
    uint16_t *gen_opc_end;
2981
    CPUBreakpoint *bp;
2982
    int j, lj;
2983
    target_ulong pc_start;
2984
    int pc_offset;
2985
    int num_insns;
2986
    int max_insns;
2987

    
2988
    /* generate intermediate code */
2989
    pc_start = tb->pc;
2990

    
2991
    dc->tb = tb;
2992

    
2993
    gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2994

    
2995
    dc->env = env;
2996
    dc->is_jmp = DISAS_NEXT;
2997
    dc->pc = pc_start;
2998
    dc->cc_op = CC_OP_DYNAMIC;
2999
    dc->singlestep_enabled = cs->singlestep_enabled;
3000
    dc->fpcr = env->fpcr;
3001
    dc->user = (env->sr & SR_S) == 0;
3002
    dc->is_mem = 0;
3003
    dc->done_mac = 0;
3004
    lj = -1;
3005
    num_insns = 0;
3006
    max_insns = tb->cflags & CF_COUNT_MASK;
3007
    if (max_insns == 0)
3008
        max_insns = CF_COUNT_MASK;
3009

    
3010
    gen_tb_start();
3011
    do {
3012
        pc_offset = dc->pc - pc_start;
3013
        gen_throws_exception = NULL;
3014
        if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3015
            QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3016
                if (bp->pc == dc->pc) {
3017
                    gen_exception(dc, dc->pc, EXCP_DEBUG);
3018
                    dc->is_jmp = DISAS_JUMP;
3019
                    break;
3020
                }
3021
            }
3022
            if (dc->is_jmp)
3023
                break;
3024
        }
3025
        if (search_pc) {
3026
            j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3027
            if (lj < j) {
3028
                lj++;
3029
                while (lj < j)
3030
                    tcg_ctx.gen_opc_instr_start[lj++] = 0;
3031
            }
3032
            tcg_ctx.gen_opc_pc[lj] = dc->pc;
3033
            tcg_ctx.gen_opc_instr_start[lj] = 1;
3034
            tcg_ctx.gen_opc_icount[lj] = num_insns;
3035
        }
3036
        if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3037
            gen_io_start();
3038
        dc->insn_pc = dc->pc;
3039
        disas_m68k_insn(env, dc);
3040
        num_insns++;
3041
    } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
3042
             !cs->singlestep_enabled &&
3043
             !singlestep &&
3044
             (pc_offset) < (TARGET_PAGE_SIZE - 32) &&
3045
             num_insns < max_insns);
3046

    
3047
    if (tb->cflags & CF_LAST_IO)
3048
        gen_io_end();
3049
    if (unlikely(cs->singlestep_enabled)) {
3050
        /* Make sure the pc is updated, and raise a debug exception.  */
3051
        if (!dc->is_jmp) {
3052
            gen_flush_cc_op(dc);
3053
            tcg_gen_movi_i32(QREG_PC, dc->pc);
3054
        }
3055
        gen_helper_raise_exception(cpu_env, tcg_const_i32(EXCP_DEBUG));
3056
    } else {
3057
        switch(dc->is_jmp) {
3058
        case DISAS_NEXT:
3059
            gen_flush_cc_op(dc);
3060
            gen_jmp_tb(dc, 0, dc->pc);
3061
            break;
3062
        default:
3063
        case DISAS_JUMP:
3064
        case DISAS_UPDATE:
3065
            gen_flush_cc_op(dc);
3066
            /* indicate that the hash table must be used to find the next TB */
3067
            tcg_gen_exit_tb(0);
3068
            break;
3069
        case DISAS_TB_JUMP:
3070
            /* nothing more to generate */
3071
            break;
3072
        }
3073
    }
3074
    gen_tb_end(tb, num_insns);
3075
    *tcg_ctx.gen_opc_ptr = INDEX_op_end;
3076

    
3077
#ifdef DEBUG_DISAS
3078
    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3079
        qemu_log("----------------\n");
3080
        qemu_log("IN: %s\n", lookup_symbol(pc_start));
3081
        log_target_disas(env, pc_start, dc->pc - pc_start, 0);
3082
        qemu_log("\n");
3083
    }
3084
#endif
3085
    if (search_pc) {
3086
        j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3087
        lj++;
3088
        while (lj <= j)
3089
            tcg_ctx.gen_opc_instr_start[lj++] = 0;
3090
    } else {
3091
        tb->size = dc->pc - pc_start;
3092
        tb->icount = num_insns;
3093
    }
3094

    
3095
    //optimize_flags();
3096
    //expand_target_qops();
3097
}
3098

    
3099
void gen_intermediate_code(CPUM68KState *env, TranslationBlock *tb)
3100
{
3101
    gen_intermediate_code_internal(m68k_env_get_cpu(env), tb, false);
3102
}
3103

    
3104
void gen_intermediate_code_pc(CPUM68KState *env, TranslationBlock *tb)
3105
{
3106
    gen_intermediate_code_internal(m68k_env_get_cpu(env), tb, true);
3107
}
3108

    
3109
void m68k_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
3110
                         int flags)
3111
{
3112
    M68kCPU *cpu = M68K_CPU(cs);
3113
    CPUM68KState *env = &cpu->env;
3114
    int i;
3115
    uint16_t sr;
3116
    CPU_DoubleU u;
3117
    for (i = 0; i < 8; i++)
3118
      {
3119
        u.d = env->fregs[i];
3120
        cpu_fprintf (f, "D%d = %08x   A%d = %08x   F%d = %08x%08x (%12g)\n",
3121
                     i, env->dregs[i], i, env->aregs[i],
3122
                     i, u.l.upper, u.l.lower, *(double *)&u.d);
3123
      }
3124
    cpu_fprintf (f, "PC = %08x   ", env->pc);
3125
    sr = env->sr;
3126
    cpu_fprintf (f, "SR = %04x %c%c%c%c%c ", sr, (sr & 0x10) ? 'X' : '-',
3127
                 (sr & CCF_N) ? 'N' : '-', (sr & CCF_Z) ? 'Z' : '-',
3128
                 (sr & CCF_V) ? 'V' : '-', (sr & CCF_C) ? 'C' : '-');
3129
    cpu_fprintf (f, "FPRESULT = %12g\n", *(double *)&env->fp_result);
3130
}
3131

    
3132
void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb, int pc_pos)
3133
{
3134
    env->pc = tcg_ctx.gen_opc_pc[pc_pos];
3135
}