Statistics
| Branch: | Revision:

root / target-i386 / translate.c @ 8001c294

History | View | Annotate | Download (256.4 kB)

1
/*
2
 *  i386 translation
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <stdarg.h>
20
#include <stdlib.h>
21
#include <stdio.h>
22
#include <string.h>
23
#include <inttypes.h>
24
#include <signal.h>
25

    
26
#include "cpu.h"
27
#include "exec-all.h"
28
#include "disas.h"
29
#include "tcg-op.h"
30

    
31
#include "helper.h"
32
#define GEN_HELPER 1
33
#include "helper.h"
34

    
35
#define PREFIX_REPZ   0x01
36
#define PREFIX_REPNZ  0x02
37
#define PREFIX_LOCK   0x04
38
#define PREFIX_DATA   0x08
39
#define PREFIX_ADR    0x10
40

    
41
#ifdef TARGET_X86_64
42
#define X86_64_ONLY(x) x
43
#define X86_64_DEF(...)  __VA_ARGS__
44
#define CODE64(s) ((s)->code64)
45
#define REX_X(s) ((s)->rex_x)
46
#define REX_B(s) ((s)->rex_b)
47
/* XXX: gcc generates push/pop in some opcodes, so we cannot use them */
48
#if 1
49
#define BUGGY_64(x) NULL
50
#endif
51
#else
52
#define X86_64_ONLY(x) NULL
53
#define X86_64_DEF(...)
54
#define CODE64(s) 0
55
#define REX_X(s) 0
56
#define REX_B(s) 0
57
#endif
58

    
59
//#define MACRO_TEST   1
60

    
61
/* global register indexes */
62
static TCGv_ptr cpu_env;
63
static TCGv cpu_A0, cpu_cc_src, cpu_cc_dst, cpu_cc_tmp;
64
static TCGv_i32 cpu_cc_op;
65
static TCGv cpu_regs[CPU_NB_REGS];
66
/* local temps */
67
static TCGv cpu_T[2], cpu_T3;
68
/* local register indexes (only used inside old micro ops) */
69
static TCGv cpu_tmp0, cpu_tmp4;
70
static TCGv_ptr cpu_ptr0, cpu_ptr1;
71
static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
72
static TCGv_i64 cpu_tmp1_i64;
73
static TCGv cpu_tmp5;
74

    
75
static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
76

    
77
#include "gen-icount.h"
78

    
79
#ifdef TARGET_X86_64
80
static int x86_64_hregs;
81
#endif
82

    
83
typedef struct DisasContext {
84
    /* current insn context */
85
    int override; /* -1 if no override */
86
    int prefix;
87
    int aflag, dflag;
88
    target_ulong pc; /* pc = eip + cs_base */
89
    int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
90
                   static state change (stop translation) */
91
    /* current block context */
92
    target_ulong cs_base; /* base of CS segment */
93
    int pe;     /* protected mode */
94
    int code32; /* 32 bit code segment */
95
#ifdef TARGET_X86_64
96
    int lma;    /* long mode active */
97
    int code64; /* 64 bit code segment */
98
    int rex_x, rex_b;
99
#endif
100
    int ss32;   /* 32 bit stack segment */
101
    int cc_op;  /* current CC operation */
102
    int addseg; /* non zero if either DS/ES/SS have a non zero base */
103
    int f_st;   /* currently unused */
104
    int vm86;   /* vm86 mode */
105
    int cpl;
106
    int iopl;
107
    int tf;     /* TF cpu flag */
108
    int singlestep_enabled; /* "hardware" single step enabled */
109
    int jmp_opt; /* use direct block chaining for direct jumps */
110
    int mem_index; /* select memory access functions */
111
    uint64_t flags; /* all execution flags */
112
    struct TranslationBlock *tb;
113
    int popl_esp_hack; /* for correct popl with esp base handling */
114
    int rip_offset; /* only used in x86_64, but left for simplicity */
115
    int cpuid_features;
116
    int cpuid_ext_features;
117
    int cpuid_ext2_features;
118
    int cpuid_ext3_features;
119
} DisasContext;
120

    
121
static void gen_eob(DisasContext *s);
122
static void gen_jmp(DisasContext *s, target_ulong eip);
123
static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
124

    
125
/* i386 arith/logic operations */
126
enum {
127
    OP_ADDL,
128
    OP_ORL,
129
    OP_ADCL,
130
    OP_SBBL,
131
    OP_ANDL,
132
    OP_SUBL,
133
    OP_XORL,
134
    OP_CMPL,
135
};
136

    
137
/* i386 shift ops */
138
enum {
139
    OP_ROL,
140
    OP_ROR,
141
    OP_RCL,
142
    OP_RCR,
143
    OP_SHL,
144
    OP_SHR,
145
    OP_SHL1, /* undocumented */
146
    OP_SAR = 7,
147
};
148

    
149
enum {
150
    JCC_O,
151
    JCC_B,
152
    JCC_Z,
153
    JCC_BE,
154
    JCC_S,
155
    JCC_P,
156
    JCC_L,
157
    JCC_LE,
158
};
159

    
160
/* operand size */
161
enum {
162
    OT_BYTE = 0,
163
    OT_WORD,
164
    OT_LONG,
165
    OT_QUAD,
166
};
167

    
168
enum {
169
    /* I386 int registers */
170
    OR_EAX,   /* MUST be even numbered */
171
    OR_ECX,
172
    OR_EDX,
173
    OR_EBX,
174
    OR_ESP,
175
    OR_EBP,
176
    OR_ESI,
177
    OR_EDI,
178

    
179
    OR_TMP0 = 16,    /* temporary operand register */
180
    OR_TMP1,
181
    OR_A0, /* temporary register used when doing address evaluation */
182
};
183

    
184
static inline void gen_op_movl_T0_0(void)
185
{
186
    tcg_gen_movi_tl(cpu_T[0], 0);
187
}
188

    
189
static inline void gen_op_movl_T0_im(int32_t val)
190
{
191
    tcg_gen_movi_tl(cpu_T[0], val);
192
}
193

    
194
static inline void gen_op_movl_T0_imu(uint32_t val)
195
{
196
    tcg_gen_movi_tl(cpu_T[0], val);
197
}
198

    
199
static inline void gen_op_movl_T1_im(int32_t val)
200
{
201
    tcg_gen_movi_tl(cpu_T[1], val);
202
}
203

    
204
static inline void gen_op_movl_T1_imu(uint32_t val)
205
{
206
    tcg_gen_movi_tl(cpu_T[1], val);
207
}
208

    
209
static inline void gen_op_movl_A0_im(uint32_t val)
210
{
211
    tcg_gen_movi_tl(cpu_A0, val);
212
}
213

    
214
#ifdef TARGET_X86_64
215
static inline void gen_op_movq_A0_im(int64_t val)
216
{
217
    tcg_gen_movi_tl(cpu_A0, val);
218
}
219
#endif
220

    
221
static inline void gen_movtl_T0_im(target_ulong val)
222
{
223
    tcg_gen_movi_tl(cpu_T[0], val);
224
}
225

    
226
static inline void gen_movtl_T1_im(target_ulong val)
227
{
228
    tcg_gen_movi_tl(cpu_T[1], val);
229
}
230

    
231
static inline void gen_op_andl_T0_ffff(void)
232
{
233
    tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
234
}
235

    
236
static inline void gen_op_andl_T0_im(uint32_t val)
237
{
238
    tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val);
239
}
240

    
241
static inline void gen_op_movl_T0_T1(void)
242
{
243
    tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
244
}
245

    
246
static inline void gen_op_andl_A0_ffff(void)
247
{
248
    tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
249
}
250

    
251
#ifdef TARGET_X86_64
252

    
253
#define NB_OP_SIZES 4
254

    
255
#else /* !TARGET_X86_64 */
256

    
257
#define NB_OP_SIZES 3
258

    
259
#endif /* !TARGET_X86_64 */
260

    
261
#if defined(HOST_WORDS_BIGENDIAN)
262
#define REG_B_OFFSET (sizeof(target_ulong) - 1)
263
#define REG_H_OFFSET (sizeof(target_ulong) - 2)
264
#define REG_W_OFFSET (sizeof(target_ulong) - 2)
265
#define REG_L_OFFSET (sizeof(target_ulong) - 4)
266
#define REG_LH_OFFSET (sizeof(target_ulong) - 8)
267
#else
268
#define REG_B_OFFSET 0
269
#define REG_H_OFFSET 1
270
#define REG_W_OFFSET 0
271
#define REG_L_OFFSET 0
272
#define REG_LH_OFFSET 4
273
#endif
274

    
275
static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
276
{
277
    switch(ot) {
278
    case OT_BYTE:
279
        if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
280
            tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
281
        } else {
282
            tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
283
        }
284
        break;
285
    case OT_WORD:
286
        tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
287
        break;
288
    default: /* XXX this shouldn't be reached;  abort? */
289
    case OT_LONG:
290
        /* For x86_64, this sets the higher half of register to zero.
291
           For i386, this is equivalent to a mov. */
292
        tcg_gen_ext32u_tl(cpu_regs[reg], t0);
293
        break;
294
#ifdef TARGET_X86_64
295
    case OT_QUAD:
296
        tcg_gen_mov_tl(cpu_regs[reg], t0);
297
        break;
298
#endif
299
    }
300
}
301

    
302
static inline void gen_op_mov_reg_T0(int ot, int reg)
303
{
304
    gen_op_mov_reg_v(ot, reg, cpu_T[0]);
305
}
306

    
307
static inline void gen_op_mov_reg_T1(int ot, int reg)
308
{
309
    gen_op_mov_reg_v(ot, reg, cpu_T[1]);
310
}
311

    
312
static inline void gen_op_mov_reg_A0(int size, int reg)
313
{
314
    switch(size) {
315
    case 0:
316
        tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_A0, 0, 16);
317
        break;
318
    default: /* XXX this shouldn't be reached;  abort? */
319
    case 1:
320
        /* For x86_64, this sets the higher half of register to zero.
321
           For i386, this is equivalent to a mov. */
322
        tcg_gen_ext32u_tl(cpu_regs[reg], cpu_A0);
323
        break;
324
#ifdef TARGET_X86_64
325
    case 2:
326
        tcg_gen_mov_tl(cpu_regs[reg], cpu_A0);
327
        break;
328
#endif
329
    }
330
}
331

    
332
static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
333
{
334
    switch(ot) {
335
    case OT_BYTE:
336
        if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
337
            goto std_case;
338
        } else {
339
            tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
340
            tcg_gen_ext8u_tl(t0, t0);
341
        }
342
        break;
343
    default:
344
    std_case:
345
        tcg_gen_mov_tl(t0, cpu_regs[reg]);
346
        break;
347
    }
348
}
349

    
350
static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
351
{
352
    gen_op_mov_v_reg(ot, cpu_T[t_index], reg);
353
}
354

    
355
static inline void gen_op_movl_A0_reg(int reg)
356
{
357
    tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
358
}
359

    
360
static inline void gen_op_addl_A0_im(int32_t val)
361
{
362
    tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
363
#ifdef TARGET_X86_64
364
    tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
365
#endif
366
}
367

    
368
#ifdef TARGET_X86_64
369
static inline void gen_op_addq_A0_im(int64_t val)
370
{
371
    tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
372
}
373
#endif
374
    
375
static void gen_add_A0_im(DisasContext *s, int val)
376
{
377
#ifdef TARGET_X86_64
378
    if (CODE64(s))
379
        gen_op_addq_A0_im(val);
380
    else
381
#endif
382
        gen_op_addl_A0_im(val);
383
}
384

    
385
static inline void gen_op_addl_T0_T1(void)
386
{
387
    tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
388
}
389

    
390
static inline void gen_op_jmp_T0(void)
391
{
392
    tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUState, eip));
393
}
394

    
395
static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
396
{
397
    switch(size) {
398
    case 0:
399
        tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
400
        tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
401
        break;
402
    case 1:
403
        tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
404
        /* For x86_64, this sets the higher half of register to zero.
405
           For i386, this is equivalent to a nop. */
406
        tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
407
        tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
408
        break;
409
#ifdef TARGET_X86_64
410
    case 2:
411
        tcg_gen_addi_tl(cpu_regs[reg], cpu_regs[reg], val);
412
        break;
413
#endif
414
    }
415
}
416

    
417
static inline void gen_op_add_reg_T0(int size, int reg)
418
{
419
    switch(size) {
420
    case 0:
421
        tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
422
        tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
423
        break;
424
    case 1:
425
        tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
426
        /* For x86_64, this sets the higher half of register to zero.
427
           For i386, this is equivalent to a nop. */
428
        tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
429
        tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
430
        break;
431
#ifdef TARGET_X86_64
432
    case 2:
433
        tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], cpu_T[0]);
434
        break;
435
#endif
436
    }
437
}
438

    
439
static inline void gen_op_set_cc_op(int32_t val)
440
{
441
    tcg_gen_movi_i32(cpu_cc_op, val);
442
}
443

    
444
static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
445
{
446
    tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
447
    if (shift != 0)
448
        tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
449
    tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
450
    /* For x86_64, this sets the higher half of register to zero.
451
       For i386, this is equivalent to a nop. */
452
    tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
453
}
454

    
455
static inline void gen_op_movl_A0_seg(int reg)
456
{
457
    tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base) + REG_L_OFFSET);
458
}
459

    
460
static inline void gen_op_addl_A0_seg(int reg)
461
{
462
    tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
463
    tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
464
#ifdef TARGET_X86_64
465
    tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
466
#endif
467
}
468

    
469
#ifdef TARGET_X86_64
470
static inline void gen_op_movq_A0_seg(int reg)
471
{
472
    tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base));
473
}
474

    
475
static inline void gen_op_addq_A0_seg(int reg)
476
{
477
    tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
478
    tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
479
}
480

    
481
static inline void gen_op_movq_A0_reg(int reg)
482
{
483
    tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
484
}
485

    
486
static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
487
{
488
    tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
489
    if (shift != 0)
490
        tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
491
    tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
492
}
493
#endif
494

    
495
static inline void gen_op_lds_T0_A0(int idx)
496
{
497
    int mem_index = (idx >> 2) - 1;
498
    switch(idx & 3) {
499
    case 0:
500
        tcg_gen_qemu_ld8s(cpu_T[0], cpu_A0, mem_index);
501
        break;
502
    case 1:
503
        tcg_gen_qemu_ld16s(cpu_T[0], cpu_A0, mem_index);
504
        break;
505
    default:
506
    case 2:
507
        tcg_gen_qemu_ld32s(cpu_T[0], cpu_A0, mem_index);
508
        break;
509
    }
510
}
511

    
512
static inline void gen_op_ld_v(int idx, TCGv t0, TCGv a0)
513
{
514
    int mem_index = (idx >> 2) - 1;
515
    switch(idx & 3) {
516
    case 0:
517
        tcg_gen_qemu_ld8u(t0, a0, mem_index);
518
        break;
519
    case 1:
520
        tcg_gen_qemu_ld16u(t0, a0, mem_index);
521
        break;
522
    case 2:
523
        tcg_gen_qemu_ld32u(t0, a0, mem_index);
524
        break;
525
    default:
526
    case 3:
527
        /* Should never happen on 32-bit targets.  */
528
#ifdef TARGET_X86_64
529
        tcg_gen_qemu_ld64(t0, a0, mem_index);
530
#endif
531
        break;
532
    }
533
}
534

    
535
/* XXX: always use ldu or lds */
536
static inline void gen_op_ld_T0_A0(int idx)
537
{
538
    gen_op_ld_v(idx, cpu_T[0], cpu_A0);
539
}
540

    
541
static inline void gen_op_ldu_T0_A0(int idx)
542
{
543
    gen_op_ld_v(idx, cpu_T[0], cpu_A0);
544
}
545

    
546
static inline void gen_op_ld_T1_A0(int idx)
547
{
548
    gen_op_ld_v(idx, cpu_T[1], cpu_A0);
549
}
550

    
551
static inline void gen_op_st_v(int idx, TCGv t0, TCGv a0)
552
{
553
    int mem_index = (idx >> 2) - 1;
554
    switch(idx & 3) {
555
    case 0:
556
        tcg_gen_qemu_st8(t0, a0, mem_index);
557
        break;
558
    case 1:
559
        tcg_gen_qemu_st16(t0, a0, mem_index);
560
        break;
561
    case 2:
562
        tcg_gen_qemu_st32(t0, a0, mem_index);
563
        break;
564
    default:
565
    case 3:
566
        /* Should never happen on 32-bit targets.  */
567
#ifdef TARGET_X86_64
568
        tcg_gen_qemu_st64(t0, a0, mem_index);
569
#endif
570
        break;
571
    }
572
}
573

    
574
static inline void gen_op_st_T0_A0(int idx)
575
{
576
    gen_op_st_v(idx, cpu_T[0], cpu_A0);
577
}
578

    
579
static inline void gen_op_st_T1_A0(int idx)
580
{
581
    gen_op_st_v(idx, cpu_T[1], cpu_A0);
582
}
583

    
584
static inline void gen_jmp_im(target_ulong pc)
585
{
586
    tcg_gen_movi_tl(cpu_tmp0, pc);
587
    tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, eip));
588
}
589

    
590
static inline void gen_string_movl_A0_ESI(DisasContext *s)
591
{
592
    int override;
593

    
594
    override = s->override;
595
#ifdef TARGET_X86_64
596
    if (s->aflag == 2) {
597
        if (override >= 0) {
598
            gen_op_movq_A0_seg(override);
599
            gen_op_addq_A0_reg_sN(0, R_ESI);
600
        } else {
601
            gen_op_movq_A0_reg(R_ESI);
602
        }
603
    } else
604
#endif
605
    if (s->aflag) {
606
        /* 32 bit address */
607
        if (s->addseg && override < 0)
608
            override = R_DS;
609
        if (override >= 0) {
610
            gen_op_movl_A0_seg(override);
611
            gen_op_addl_A0_reg_sN(0, R_ESI);
612
        } else {
613
            gen_op_movl_A0_reg(R_ESI);
614
        }
615
    } else {
616
        /* 16 address, always override */
617
        if (override < 0)
618
            override = R_DS;
619
        gen_op_movl_A0_reg(R_ESI);
620
        gen_op_andl_A0_ffff();
621
        gen_op_addl_A0_seg(override);
622
    }
623
}
624

    
625
static inline void gen_string_movl_A0_EDI(DisasContext *s)
626
{
627
#ifdef TARGET_X86_64
628
    if (s->aflag == 2) {
629
        gen_op_movq_A0_reg(R_EDI);
630
    } else
631
#endif
632
    if (s->aflag) {
633
        if (s->addseg) {
634
            gen_op_movl_A0_seg(R_ES);
635
            gen_op_addl_A0_reg_sN(0, R_EDI);
636
        } else {
637
            gen_op_movl_A0_reg(R_EDI);
638
        }
639
    } else {
640
        gen_op_movl_A0_reg(R_EDI);
641
        gen_op_andl_A0_ffff();
642
        gen_op_addl_A0_seg(R_ES);
643
    }
644
}
645

    
646
static inline void gen_op_movl_T0_Dshift(int ot) 
647
{
648
    tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUState, df));
649
    tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
650
};
651

    
652
static void gen_extu(int ot, TCGv reg)
653
{
654
    switch(ot) {
655
    case OT_BYTE:
656
        tcg_gen_ext8u_tl(reg, reg);
657
        break;
658
    case OT_WORD:
659
        tcg_gen_ext16u_tl(reg, reg);
660
        break;
661
    case OT_LONG:
662
        tcg_gen_ext32u_tl(reg, reg);
663
        break;
664
    default:
665
        break;
666
    }
667
}
668

    
669
static void gen_exts(int ot, TCGv reg)
670
{
671
    switch(ot) {
672
    case OT_BYTE:
673
        tcg_gen_ext8s_tl(reg, reg);
674
        break;
675
    case OT_WORD:
676
        tcg_gen_ext16s_tl(reg, reg);
677
        break;
678
    case OT_LONG:
679
        tcg_gen_ext32s_tl(reg, reg);
680
        break;
681
    default:
682
        break;
683
    }
684
}
685

    
686
static inline void gen_op_jnz_ecx(int size, int label1)
687
{
688
    tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
689
    gen_extu(size + 1, cpu_tmp0);
690
    tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
691
}
692

    
693
static inline void gen_op_jz_ecx(int size, int label1)
694
{
695
    tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
696
    gen_extu(size + 1, cpu_tmp0);
697
    tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
698
}
699

    
700
static void gen_helper_in_func(int ot, TCGv v, TCGv_i32 n)
701
{
702
    switch (ot) {
703
    case 0: gen_helper_inb(v, n); break;
704
    case 1: gen_helper_inw(v, n); break;
705
    case 2: gen_helper_inl(v, n); break;
706
    }
707

    
708
}
709

    
710
static void gen_helper_out_func(int ot, TCGv_i32 v, TCGv_i32 n)
711
{
712
    switch (ot) {
713
    case 0: gen_helper_outb(v, n); break;
714
    case 1: gen_helper_outw(v, n); break;
715
    case 2: gen_helper_outl(v, n); break;
716
    }
717

    
718
}
719

    
720
static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
721
                         uint32_t svm_flags)
722
{
723
    int state_saved;
724
    target_ulong next_eip;
725

    
726
    state_saved = 0;
727
    if (s->pe && (s->cpl > s->iopl || s->vm86)) {
728
        if (s->cc_op != CC_OP_DYNAMIC)
729
            gen_op_set_cc_op(s->cc_op);
730
        gen_jmp_im(cur_eip);
731
        state_saved = 1;
732
        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
733
        switch (ot) {
734
        case 0: gen_helper_check_iob(cpu_tmp2_i32); break;
735
        case 1: gen_helper_check_iow(cpu_tmp2_i32); break;
736
        case 2: gen_helper_check_iol(cpu_tmp2_i32); break;
737
        }
738
    }
739
    if(s->flags & HF_SVMI_MASK) {
740
        if (!state_saved) {
741
            if (s->cc_op != CC_OP_DYNAMIC)
742
                gen_op_set_cc_op(s->cc_op);
743
            gen_jmp_im(cur_eip);
744
        }
745
        svm_flags |= (1 << (4 + ot));
746
        next_eip = s->pc - s->cs_base;
747
        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
748
        gen_helper_svm_check_io(cpu_tmp2_i32, tcg_const_i32(svm_flags),
749
                                tcg_const_i32(next_eip - cur_eip));
750
    }
751
}
752

    
753
static inline void gen_movs(DisasContext *s, int ot)
754
{
755
    gen_string_movl_A0_ESI(s);
756
    gen_op_ld_T0_A0(ot + s->mem_index);
757
    gen_string_movl_A0_EDI(s);
758
    gen_op_st_T0_A0(ot + s->mem_index);
759
    gen_op_movl_T0_Dshift(ot);
760
    gen_op_add_reg_T0(s->aflag, R_ESI);
761
    gen_op_add_reg_T0(s->aflag, R_EDI);
762
}
763

    
764
static inline void gen_update_cc_op(DisasContext *s)
765
{
766
    if (s->cc_op != CC_OP_DYNAMIC) {
767
        gen_op_set_cc_op(s->cc_op);
768
        s->cc_op = CC_OP_DYNAMIC;
769
    }
770
}
771

    
772
static void gen_op_update1_cc(void)
773
{
774
    tcg_gen_discard_tl(cpu_cc_src);
775
    tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
776
}
777

    
778
static void gen_op_update2_cc(void)
779
{
780
    tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
781
    tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
782
}
783

    
784
static inline void gen_op_cmpl_T0_T1_cc(void)
785
{
786
    tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
787
    tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
788
}
789

    
790
static inline void gen_op_testl_T0_T1_cc(void)
791
{
792
    tcg_gen_discard_tl(cpu_cc_src);
793
    tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
794
}
795

    
796
static void gen_op_update_neg_cc(void)
797
{
798
    tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
799
    tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
800
}
801

    
802
/* compute eflags.C to reg */
803
static void gen_compute_eflags_c(TCGv reg)
804
{
805
    gen_helper_cc_compute_c(cpu_tmp2_i32, cpu_cc_op);
806
    tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
807
}
808

    
809
/* compute all eflags to cc_src */
810
static void gen_compute_eflags(TCGv reg)
811
{
812
    gen_helper_cc_compute_all(cpu_tmp2_i32, cpu_cc_op);
813
    tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
814
}
815

    
816
static inline void gen_setcc_slow_T0(DisasContext *s, int jcc_op)
817
{
818
    if (s->cc_op != CC_OP_DYNAMIC)
819
        gen_op_set_cc_op(s->cc_op);
820
    switch(jcc_op) {
821
    case JCC_O:
822
        gen_compute_eflags(cpu_T[0]);
823
        tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 11);
824
        tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
825
        break;
826
    case JCC_B:
827
        gen_compute_eflags_c(cpu_T[0]);
828
        break;
829
    case JCC_Z:
830
        gen_compute_eflags(cpu_T[0]);
831
        tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 6);
832
        tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
833
        break;
834
    case JCC_BE:
835
        gen_compute_eflags(cpu_tmp0);
836
        tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 6);
837
        tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
838
        tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
839
        break;
840
    case JCC_S:
841
        gen_compute_eflags(cpu_T[0]);
842
        tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 7);
843
        tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
844
        break;
845
    case JCC_P:
846
        gen_compute_eflags(cpu_T[0]);
847
        tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 2);
848
        tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
849
        break;
850
    case JCC_L:
851
        gen_compute_eflags(cpu_tmp0);
852
        tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
853
        tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 7); /* CC_S */
854
        tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
855
        tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
856
        break;
857
    default:
858
    case JCC_LE:
859
        gen_compute_eflags(cpu_tmp0);
860
        tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
861
        tcg_gen_shri_tl(cpu_tmp4, cpu_tmp0, 7); /* CC_S */
862
        tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 6); /* CC_Z */
863
        tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
864
        tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
865
        tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
866
        break;
867
    }
868
}
869

    
870
/* return true if setcc_slow is not needed (WARNING: must be kept in
871
   sync with gen_jcc1) */
872
static int is_fast_jcc_case(DisasContext *s, int b)
873
{
874
    int jcc_op;
875
    jcc_op = (b >> 1) & 7;
876
    switch(s->cc_op) {
877
        /* we optimize the cmp/jcc case */
878
    case CC_OP_SUBB:
879
    case CC_OP_SUBW:
880
    case CC_OP_SUBL:
881
    case CC_OP_SUBQ:
882
        if (jcc_op == JCC_O || jcc_op == JCC_P)
883
            goto slow_jcc;
884
        break;
885

    
886
        /* some jumps are easy to compute */
887
    case CC_OP_ADDB:
888
    case CC_OP_ADDW:
889
    case CC_OP_ADDL:
890
    case CC_OP_ADDQ:
891

    
892
    case CC_OP_LOGICB:
893
    case CC_OP_LOGICW:
894
    case CC_OP_LOGICL:
895
    case CC_OP_LOGICQ:
896

    
897
    case CC_OP_INCB:
898
    case CC_OP_INCW:
899
    case CC_OP_INCL:
900
    case CC_OP_INCQ:
901

    
902
    case CC_OP_DECB:
903
    case CC_OP_DECW:
904
    case CC_OP_DECL:
905
    case CC_OP_DECQ:
906

    
907
    case CC_OP_SHLB:
908
    case CC_OP_SHLW:
909
    case CC_OP_SHLL:
910
    case CC_OP_SHLQ:
911
        if (jcc_op != JCC_Z && jcc_op != JCC_S)
912
            goto slow_jcc;
913
        break;
914
    default:
915
    slow_jcc:
916
        return 0;
917
    }
918
    return 1;
919
}
920

    
921
/* generate a conditional jump to label 'l1' according to jump opcode
922
   value 'b'. In the fast case, T0 is guaranted not to be used. */
923
static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
924
{
925
    int inv, jcc_op, size, cond;
926
    TCGv t0;
927

    
928
    inv = b & 1;
929
    jcc_op = (b >> 1) & 7;
930

    
931
    switch(cc_op) {
932
        /* we optimize the cmp/jcc case */
933
    case CC_OP_SUBB:
934
    case CC_OP_SUBW:
935
    case CC_OP_SUBL:
936
    case CC_OP_SUBQ:
937
        
938
        size = cc_op - CC_OP_SUBB;
939
        switch(jcc_op) {
940
        case JCC_Z:
941
        fast_jcc_z:
942
            switch(size) {
943
            case 0:
944
                tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xff);
945
                t0 = cpu_tmp0;
946
                break;
947
            case 1:
948
                tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffff);
949
                t0 = cpu_tmp0;
950
                break;
951
#ifdef TARGET_X86_64
952
            case 2:
953
                tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffffffff);
954
                t0 = cpu_tmp0;
955
                break;
956
#endif
957
            default:
958
                t0 = cpu_cc_dst;
959
                break;
960
            }
961
            tcg_gen_brcondi_tl(inv ? TCG_COND_NE : TCG_COND_EQ, t0, 0, l1);
962
            break;
963
        case JCC_S:
964
        fast_jcc_s:
965
            switch(size) {
966
            case 0:
967
                tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80);
968
                tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0, 
969
                                   0, l1);
970
                break;
971
            case 1:
972
                tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x8000);
973
                tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0, 
974
                                   0, l1);
975
                break;
976
#ifdef TARGET_X86_64
977
            case 2:
978
                tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80000000);
979
                tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0, 
980
                                   0, l1);
981
                break;
982
#endif
983
            default:
984
                tcg_gen_brcondi_tl(inv ? TCG_COND_GE : TCG_COND_LT, cpu_cc_dst, 
985
                                   0, l1);
986
                break;
987
            }
988
            break;
989
            
990
        case JCC_B:
991
            cond = inv ? TCG_COND_GEU : TCG_COND_LTU;
992
            goto fast_jcc_b;
993
        case JCC_BE:
994
            cond = inv ? TCG_COND_GTU : TCG_COND_LEU;
995
        fast_jcc_b:
996
            tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
997
            switch(size) {
998
            case 0:
999
                t0 = cpu_tmp0;
1000
                tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xff);
1001
                tcg_gen_andi_tl(t0, cpu_cc_src, 0xff);
1002
                break;
1003
            case 1:
1004
                t0 = cpu_tmp0;
1005
                tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffff);
1006
                tcg_gen_andi_tl(t0, cpu_cc_src, 0xffff);
1007
                break;
1008
#ifdef TARGET_X86_64
1009
            case 2:
1010
                t0 = cpu_tmp0;
1011
                tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffffffff);
1012
                tcg_gen_andi_tl(t0, cpu_cc_src, 0xffffffff);
1013
                break;
1014
#endif
1015
            default:
1016
                t0 = cpu_cc_src;
1017
                break;
1018
            }
1019
            tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1020
            break;
1021
            
1022
        case JCC_L:
1023
            cond = inv ? TCG_COND_GE : TCG_COND_LT;
1024
            goto fast_jcc_l;
1025
        case JCC_LE:
1026
            cond = inv ? TCG_COND_GT : TCG_COND_LE;
1027
        fast_jcc_l:
1028
            tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1029
            switch(size) {
1030
            case 0:
1031
                t0 = cpu_tmp0;
1032
                tcg_gen_ext8s_tl(cpu_tmp4, cpu_tmp4);
1033
                tcg_gen_ext8s_tl(t0, cpu_cc_src);
1034
                break;
1035
            case 1:
1036
                t0 = cpu_tmp0;
1037
                tcg_gen_ext16s_tl(cpu_tmp4, cpu_tmp4);
1038
                tcg_gen_ext16s_tl(t0, cpu_cc_src);
1039
                break;
1040
#ifdef TARGET_X86_64
1041
            case 2:
1042
                t0 = cpu_tmp0;
1043
                tcg_gen_ext32s_tl(cpu_tmp4, cpu_tmp4);
1044
                tcg_gen_ext32s_tl(t0, cpu_cc_src);
1045
                break;
1046
#endif
1047
            default:
1048
                t0 = cpu_cc_src;
1049
                break;
1050
            }
1051
            tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1052
            break;
1053
            
1054
        default:
1055
            goto slow_jcc;
1056
        }
1057
        break;
1058
        
1059
        /* some jumps are easy to compute */
1060
    case CC_OP_ADDB:
1061
    case CC_OP_ADDW:
1062
    case CC_OP_ADDL:
1063
    case CC_OP_ADDQ:
1064
        
1065
    case CC_OP_ADCB:
1066
    case CC_OP_ADCW:
1067
    case CC_OP_ADCL:
1068
    case CC_OP_ADCQ:
1069
        
1070
    case CC_OP_SBBB:
1071
    case CC_OP_SBBW:
1072
    case CC_OP_SBBL:
1073
    case CC_OP_SBBQ:
1074
        
1075
    case CC_OP_LOGICB:
1076
    case CC_OP_LOGICW:
1077
    case CC_OP_LOGICL:
1078
    case CC_OP_LOGICQ:
1079
        
1080
    case CC_OP_INCB:
1081
    case CC_OP_INCW:
1082
    case CC_OP_INCL:
1083
    case CC_OP_INCQ:
1084
        
1085
    case CC_OP_DECB:
1086
    case CC_OP_DECW:
1087
    case CC_OP_DECL:
1088
    case CC_OP_DECQ:
1089
        
1090
    case CC_OP_SHLB:
1091
    case CC_OP_SHLW:
1092
    case CC_OP_SHLL:
1093
    case CC_OP_SHLQ:
1094
        
1095
    case CC_OP_SARB:
1096
    case CC_OP_SARW:
1097
    case CC_OP_SARL:
1098
    case CC_OP_SARQ:
1099
        switch(jcc_op) {
1100
        case JCC_Z:
1101
            size = (cc_op - CC_OP_ADDB) & 3;
1102
            goto fast_jcc_z;
1103
        case JCC_S:
1104
            size = (cc_op - CC_OP_ADDB) & 3;
1105
            goto fast_jcc_s;
1106
        default:
1107
            goto slow_jcc;
1108
        }
1109
        break;
1110
    default:
1111
    slow_jcc:
1112
        gen_setcc_slow_T0(s, jcc_op);
1113
        tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, 
1114
                           cpu_T[0], 0, l1);
1115
        break;
1116
    }
1117
}
1118

    
1119
/* XXX: does not work with gdbstub "ice" single step - not a
1120
   serious problem */
1121
static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1122
{
1123
    int l1, l2;
1124

    
1125
    l1 = gen_new_label();
1126
    l2 = gen_new_label();
1127
    gen_op_jnz_ecx(s->aflag, l1);
1128
    gen_set_label(l2);
1129
    gen_jmp_tb(s, next_eip, 1);
1130
    gen_set_label(l1);
1131
    return l2;
1132
}
1133

    
1134
static inline void gen_stos(DisasContext *s, int ot)
1135
{
1136
    gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1137
    gen_string_movl_A0_EDI(s);
1138
    gen_op_st_T0_A0(ot + s->mem_index);
1139
    gen_op_movl_T0_Dshift(ot);
1140
    gen_op_add_reg_T0(s->aflag, R_EDI);
1141
}
1142

    
1143
static inline void gen_lods(DisasContext *s, int ot)
1144
{
1145
    gen_string_movl_A0_ESI(s);
1146
    gen_op_ld_T0_A0(ot + s->mem_index);
1147
    gen_op_mov_reg_T0(ot, R_EAX);
1148
    gen_op_movl_T0_Dshift(ot);
1149
    gen_op_add_reg_T0(s->aflag, R_ESI);
1150
}
1151

    
1152
static inline void gen_scas(DisasContext *s, int ot)
1153
{
1154
    gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1155
    gen_string_movl_A0_EDI(s);
1156
    gen_op_ld_T1_A0(ot + s->mem_index);
1157
    gen_op_cmpl_T0_T1_cc();
1158
    gen_op_movl_T0_Dshift(ot);
1159
    gen_op_add_reg_T0(s->aflag, R_EDI);
1160
}
1161

    
1162
static inline void gen_cmps(DisasContext *s, int ot)
1163
{
1164
    gen_string_movl_A0_ESI(s);
1165
    gen_op_ld_T0_A0(ot + s->mem_index);
1166
    gen_string_movl_A0_EDI(s);
1167
    gen_op_ld_T1_A0(ot + s->mem_index);
1168
    gen_op_cmpl_T0_T1_cc();
1169
    gen_op_movl_T0_Dshift(ot);
1170
    gen_op_add_reg_T0(s->aflag, R_ESI);
1171
    gen_op_add_reg_T0(s->aflag, R_EDI);
1172
}
1173

    
1174
static inline void gen_ins(DisasContext *s, int ot)
1175
{
1176
    if (use_icount)
1177
        gen_io_start();
1178
    gen_string_movl_A0_EDI(s);
1179
    /* Note: we must do this dummy write first to be restartable in
1180
       case of page fault. */
1181
    gen_op_movl_T0_0();
1182
    gen_op_st_T0_A0(ot + s->mem_index);
1183
    gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1184
    tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1185
    tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1186
    gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
1187
    gen_op_st_T0_A0(ot + s->mem_index);
1188
    gen_op_movl_T0_Dshift(ot);
1189
    gen_op_add_reg_T0(s->aflag, R_EDI);
1190
    if (use_icount)
1191
        gen_io_end();
1192
}
1193

    
1194
static inline void gen_outs(DisasContext *s, int ot)
1195
{
1196
    if (use_icount)
1197
        gen_io_start();
1198
    gen_string_movl_A0_ESI(s);
1199
    gen_op_ld_T0_A0(ot + s->mem_index);
1200

    
1201
    gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1202
    tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1203
    tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1204
    tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1205
    gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1206

    
1207
    gen_op_movl_T0_Dshift(ot);
1208
    gen_op_add_reg_T0(s->aflag, R_ESI);
1209
    if (use_icount)
1210
        gen_io_end();
1211
}
1212

    
1213
/* same method as Valgrind : we generate jumps to current or next
1214
   instruction */
1215
#define GEN_REPZ(op)                                                          \
1216
static inline void gen_repz_ ## op(DisasContext *s, int ot,                   \
1217
                                 target_ulong cur_eip, target_ulong next_eip) \
1218
{                                                                             \
1219
    int l2;\
1220
    gen_update_cc_op(s);                                                      \
1221
    l2 = gen_jz_ecx_string(s, next_eip);                                      \
1222
    gen_ ## op(s, ot);                                                        \
1223
    gen_op_add_reg_im(s->aflag, R_ECX, -1);                                   \
1224
    /* a loop would cause two single step exceptions if ECX = 1               \
1225
       before rep string_insn */                                              \
1226
    if (!s->jmp_opt)                                                          \
1227
        gen_op_jz_ecx(s->aflag, l2);                                          \
1228
    gen_jmp(s, cur_eip);                                                      \
1229
}
1230

    
1231
#define GEN_REPZ2(op)                                                         \
1232
static inline void gen_repz_ ## op(DisasContext *s, int ot,                   \
1233
                                   target_ulong cur_eip,                      \
1234
                                   target_ulong next_eip,                     \
1235
                                   int nz)                                    \
1236
{                                                                             \
1237
    int l2;\
1238
    gen_update_cc_op(s);                                                      \
1239
    l2 = gen_jz_ecx_string(s, next_eip);                                      \
1240
    gen_ ## op(s, ot);                                                        \
1241
    gen_op_add_reg_im(s->aflag, R_ECX, -1);                                   \
1242
    gen_op_set_cc_op(CC_OP_SUBB + ot);                                        \
1243
    gen_jcc1(s, CC_OP_SUBB + ot, (JCC_Z << 1) | (nz ^ 1), l2);                \
1244
    if (!s->jmp_opt)                                                          \
1245
        gen_op_jz_ecx(s->aflag, l2);                                          \
1246
    gen_jmp(s, cur_eip);                                                      \
1247
}
1248

    
1249
GEN_REPZ(movs)
1250
GEN_REPZ(stos)
1251
GEN_REPZ(lods)
1252
GEN_REPZ(ins)
1253
GEN_REPZ(outs)
1254
GEN_REPZ2(scas)
1255
GEN_REPZ2(cmps)
1256

    
1257
static void gen_helper_fp_arith_ST0_FT0(int op)
1258
{
1259
    switch (op) {
1260
    case 0: gen_helper_fadd_ST0_FT0(); break;
1261
    case 1: gen_helper_fmul_ST0_FT0(); break;
1262
    case 2: gen_helper_fcom_ST0_FT0(); break;
1263
    case 3: gen_helper_fcom_ST0_FT0(); break;
1264
    case 4: gen_helper_fsub_ST0_FT0(); break;
1265
    case 5: gen_helper_fsubr_ST0_FT0(); break;
1266
    case 6: gen_helper_fdiv_ST0_FT0(); break;
1267
    case 7: gen_helper_fdivr_ST0_FT0(); break;
1268
    }
1269
}
1270

    
1271
/* NOTE the exception in "r" op ordering */
1272
static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1273
{
1274
    TCGv_i32 tmp = tcg_const_i32(opreg);
1275
    switch (op) {
1276
    case 0: gen_helper_fadd_STN_ST0(tmp); break;
1277
    case 1: gen_helper_fmul_STN_ST0(tmp); break;
1278
    case 4: gen_helper_fsubr_STN_ST0(tmp); break;
1279
    case 5: gen_helper_fsub_STN_ST0(tmp); break;
1280
    case 6: gen_helper_fdivr_STN_ST0(tmp); break;
1281
    case 7: gen_helper_fdiv_STN_ST0(tmp); break;
1282
    }
1283
}
1284

    
1285
/* if d == OR_TMP0, it means memory operand (address in A0) */
1286
static void gen_op(DisasContext *s1, int op, int ot, int d)
1287
{
1288
    if (d != OR_TMP0) {
1289
        gen_op_mov_TN_reg(ot, 0, d);
1290
    } else {
1291
        gen_op_ld_T0_A0(ot + s1->mem_index);
1292
    }
1293
    switch(op) {
1294
    case OP_ADCL:
1295
        if (s1->cc_op != CC_OP_DYNAMIC)
1296
            gen_op_set_cc_op(s1->cc_op);
1297
        gen_compute_eflags_c(cpu_tmp4);
1298
        tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1299
        tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1300
        if (d != OR_TMP0)
1301
            gen_op_mov_reg_T0(ot, d);
1302
        else
1303
            gen_op_st_T0_A0(ot + s1->mem_index);
1304
        tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1305
        tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1306
        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1307
        tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1308
        tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_ADDB + ot);
1309
        s1->cc_op = CC_OP_DYNAMIC;
1310
        break;
1311
    case OP_SBBL:
1312
        if (s1->cc_op != CC_OP_DYNAMIC)
1313
            gen_op_set_cc_op(s1->cc_op);
1314
        gen_compute_eflags_c(cpu_tmp4);
1315
        tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1316
        tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1317
        if (d != OR_TMP0)
1318
            gen_op_mov_reg_T0(ot, d);
1319
        else
1320
            gen_op_st_T0_A0(ot + s1->mem_index);
1321
        tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1322
        tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1323
        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1324
        tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1325
        tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_SUBB + ot);
1326
        s1->cc_op = CC_OP_DYNAMIC;
1327
        break;
1328
    case OP_ADDL:
1329
        gen_op_addl_T0_T1();
1330
        if (d != OR_TMP0)
1331
            gen_op_mov_reg_T0(ot, d);
1332
        else
1333
            gen_op_st_T0_A0(ot + s1->mem_index);
1334
        gen_op_update2_cc();
1335
        s1->cc_op = CC_OP_ADDB + ot;
1336
        break;
1337
    case OP_SUBL:
1338
        tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1339
        if (d != OR_TMP0)
1340
            gen_op_mov_reg_T0(ot, d);
1341
        else
1342
            gen_op_st_T0_A0(ot + s1->mem_index);
1343
        gen_op_update2_cc();
1344
        s1->cc_op = CC_OP_SUBB + ot;
1345
        break;
1346
    default:
1347
    case OP_ANDL:
1348
        tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1349
        if (d != OR_TMP0)
1350
            gen_op_mov_reg_T0(ot, d);
1351
        else
1352
            gen_op_st_T0_A0(ot + s1->mem_index);
1353
        gen_op_update1_cc();
1354
        s1->cc_op = CC_OP_LOGICB + ot;
1355
        break;
1356
    case OP_ORL:
1357
        tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1358
        if (d != OR_TMP0)
1359
            gen_op_mov_reg_T0(ot, d);
1360
        else
1361
            gen_op_st_T0_A0(ot + s1->mem_index);
1362
        gen_op_update1_cc();
1363
        s1->cc_op = CC_OP_LOGICB + ot;
1364
        break;
1365
    case OP_XORL:
1366
        tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1367
        if (d != OR_TMP0)
1368
            gen_op_mov_reg_T0(ot, d);
1369
        else
1370
            gen_op_st_T0_A0(ot + s1->mem_index);
1371
        gen_op_update1_cc();
1372
        s1->cc_op = CC_OP_LOGICB + ot;
1373
        break;
1374
    case OP_CMPL:
1375
        gen_op_cmpl_T0_T1_cc();
1376
        s1->cc_op = CC_OP_SUBB + ot;
1377
        break;
1378
    }
1379
}
1380

    
1381
/* if d == OR_TMP0, it means memory operand (address in A0) */
1382
static void gen_inc(DisasContext *s1, int ot, int d, int c)
1383
{
1384
    if (d != OR_TMP0)
1385
        gen_op_mov_TN_reg(ot, 0, d);
1386
    else
1387
        gen_op_ld_T0_A0(ot + s1->mem_index);
1388
    if (s1->cc_op != CC_OP_DYNAMIC)
1389
        gen_op_set_cc_op(s1->cc_op);
1390
    if (c > 0) {
1391
        tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1392
        s1->cc_op = CC_OP_INCB + ot;
1393
    } else {
1394
        tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1395
        s1->cc_op = CC_OP_DECB + ot;
1396
    }
1397
    if (d != OR_TMP0)
1398
        gen_op_mov_reg_T0(ot, d);
1399
    else
1400
        gen_op_st_T0_A0(ot + s1->mem_index);
1401
    gen_compute_eflags_c(cpu_cc_src);
1402
    tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1403
}
1404

    
1405
static void gen_shift_rm_T1(DisasContext *s, int ot, int op1, 
1406
                            int is_right, int is_arith)
1407
{
1408
    target_ulong mask;
1409
    int shift_label;
1410
    TCGv t0, t1;
1411

    
1412
    if (ot == OT_QUAD)
1413
        mask = 0x3f;
1414
    else
1415
        mask = 0x1f;
1416

    
1417
    /* load */
1418
    if (op1 == OR_TMP0)
1419
        gen_op_ld_T0_A0(ot + s->mem_index);
1420
    else
1421
        gen_op_mov_TN_reg(ot, 0, op1);
1422

    
1423
    tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1424

    
1425
    tcg_gen_addi_tl(cpu_tmp5, cpu_T[1], -1);
1426

    
1427
    if (is_right) {
1428
        if (is_arith) {
1429
            gen_exts(ot, cpu_T[0]);
1430
            tcg_gen_sar_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1431
            tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1432
        } else {
1433
            gen_extu(ot, cpu_T[0]);
1434
            tcg_gen_shr_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1435
            tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1436
        }
1437
    } else {
1438
        tcg_gen_shl_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1439
        tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1440
    }
1441

    
1442
    /* store */
1443
    if (op1 == OR_TMP0)
1444
        gen_op_st_T0_A0(ot + s->mem_index);
1445
    else
1446
        gen_op_mov_reg_T0(ot, op1);
1447
        
1448
    /* update eflags if non zero shift */
1449
    if (s->cc_op != CC_OP_DYNAMIC)
1450
        gen_op_set_cc_op(s->cc_op);
1451

    
1452
    /* XXX: inefficient */
1453
    t0 = tcg_temp_local_new();
1454
    t1 = tcg_temp_local_new();
1455

    
1456
    tcg_gen_mov_tl(t0, cpu_T[0]);
1457
    tcg_gen_mov_tl(t1, cpu_T3);
1458

    
1459
    shift_label = gen_new_label();
1460
    tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T[1], 0, shift_label);
1461

    
1462
    tcg_gen_mov_tl(cpu_cc_src, t1);
1463
    tcg_gen_mov_tl(cpu_cc_dst, t0);
1464
    if (is_right)
1465
        tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1466
    else
1467
        tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1468
        
1469
    gen_set_label(shift_label);
1470
    s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1471

    
1472
    tcg_temp_free(t0);
1473
    tcg_temp_free(t1);
1474
}
1475

    
1476
static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
1477
                            int is_right, int is_arith)
1478
{
1479
    int mask;
1480
    
1481
    if (ot == OT_QUAD)
1482
        mask = 0x3f;
1483
    else
1484
        mask = 0x1f;
1485

    
1486
    /* load */
1487
    if (op1 == OR_TMP0)
1488
        gen_op_ld_T0_A0(ot + s->mem_index);
1489
    else
1490
        gen_op_mov_TN_reg(ot, 0, op1);
1491

    
1492
    op2 &= mask;
1493
    if (op2 != 0) {
1494
        if (is_right) {
1495
            if (is_arith) {
1496
                gen_exts(ot, cpu_T[0]);
1497
                tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1498
                tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1499
            } else {
1500
                gen_extu(ot, cpu_T[0]);
1501
                tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1502
                tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1503
            }
1504
        } else {
1505
            tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1506
            tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1507
        }
1508
    }
1509

    
1510
    /* store */
1511
    if (op1 == OR_TMP0)
1512
        gen_op_st_T0_A0(ot + s->mem_index);
1513
    else
1514
        gen_op_mov_reg_T0(ot, op1);
1515
        
1516
    /* update eflags if non zero shift */
1517
    if (op2 != 0) {
1518
        tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1519
        tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1520
        if (is_right)
1521
            s->cc_op = CC_OP_SARB + ot;
1522
        else
1523
            s->cc_op = CC_OP_SHLB + ot;
1524
    }
1525
}
1526

    
1527
static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
1528
{
1529
    if (arg2 >= 0)
1530
        tcg_gen_shli_tl(ret, arg1, arg2);
1531
    else
1532
        tcg_gen_shri_tl(ret, arg1, -arg2);
1533
}
1534

    
1535
static void gen_rot_rm_T1(DisasContext *s, int ot, int op1, 
1536
                          int is_right)
1537
{
1538
    target_ulong mask;
1539
    int label1, label2, data_bits;
1540
    TCGv t0, t1, t2, a0;
1541

    
1542
    /* XXX: inefficient, but we must use local temps */
1543
    t0 = tcg_temp_local_new();
1544
    t1 = tcg_temp_local_new();
1545
    t2 = tcg_temp_local_new();
1546
    a0 = tcg_temp_local_new();
1547

    
1548
    if (ot == OT_QUAD)
1549
        mask = 0x3f;
1550
    else
1551
        mask = 0x1f;
1552

    
1553
    /* load */
1554
    if (op1 == OR_TMP0) {
1555
        tcg_gen_mov_tl(a0, cpu_A0);
1556
        gen_op_ld_v(ot + s->mem_index, t0, a0);
1557
    } else {
1558
        gen_op_mov_v_reg(ot, t0, op1);
1559
    }
1560

    
1561
    tcg_gen_mov_tl(t1, cpu_T[1]);
1562

    
1563
    tcg_gen_andi_tl(t1, t1, mask);
1564

    
1565
    /* Must test zero case to avoid using undefined behaviour in TCG
1566
       shifts. */
1567
    label1 = gen_new_label();
1568
    tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label1);
1569
    
1570
    if (ot <= OT_WORD)
1571
        tcg_gen_andi_tl(cpu_tmp0, t1, (1 << (3 + ot)) - 1);
1572
    else
1573
        tcg_gen_mov_tl(cpu_tmp0, t1);
1574
    
1575
    gen_extu(ot, t0);
1576
    tcg_gen_mov_tl(t2, t0);
1577

    
1578
    data_bits = 8 << ot;
1579
    /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1580
       fix TCG definition) */
1581
    if (is_right) {
1582
        tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp0);
1583
        tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1584
        tcg_gen_shl_tl(t0, t0, cpu_tmp0);
1585
    } else {
1586
        tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp0);
1587
        tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1588
        tcg_gen_shr_tl(t0, t0, cpu_tmp0);
1589
    }
1590
    tcg_gen_or_tl(t0, t0, cpu_tmp4);
1591

    
1592
    gen_set_label(label1);
1593
    /* store */
1594
    if (op1 == OR_TMP0) {
1595
        gen_op_st_v(ot + s->mem_index, t0, a0);
1596
    } else {
1597
        gen_op_mov_reg_v(ot, op1, t0);
1598
    }
1599
    
1600
    /* update eflags */
1601
    if (s->cc_op != CC_OP_DYNAMIC)
1602
        gen_op_set_cc_op(s->cc_op);
1603

    
1604
    label2 = gen_new_label();
1605
    tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label2);
1606

    
1607
    gen_compute_eflags(cpu_cc_src);
1608
    tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1609
    tcg_gen_xor_tl(cpu_tmp0, t2, t0);
1610
    tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1611
    tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1612
    tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1613
    if (is_right) {
1614
        tcg_gen_shri_tl(t0, t0, data_bits - 1);
1615
    }
1616
    tcg_gen_andi_tl(t0, t0, CC_C);
1617
    tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1618
    
1619
    tcg_gen_discard_tl(cpu_cc_dst);
1620
    tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1621
        
1622
    gen_set_label(label2);
1623
    s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1624

    
1625
    tcg_temp_free(t0);
1626
    tcg_temp_free(t1);
1627
    tcg_temp_free(t2);
1628
    tcg_temp_free(a0);
1629
}
1630

    
1631
static void gen_rot_rm_im(DisasContext *s, int ot, int op1, int op2,
1632
                          int is_right)
1633
{
1634
    int mask;
1635
    int data_bits;
1636
    TCGv t0, t1, a0;
1637

    
1638
    /* XXX: inefficient, but we must use local temps */
1639
    t0 = tcg_temp_local_new();
1640
    t1 = tcg_temp_local_new();
1641
    a0 = tcg_temp_local_new();
1642

    
1643
    if (ot == OT_QUAD)
1644
        mask = 0x3f;
1645
    else
1646
        mask = 0x1f;
1647

    
1648
    /* load */
1649
    if (op1 == OR_TMP0) {
1650
        tcg_gen_mov_tl(a0, cpu_A0);
1651
        gen_op_ld_v(ot + s->mem_index, t0, a0);
1652
    } else {
1653
        gen_op_mov_v_reg(ot, t0, op1);
1654
    }
1655

    
1656
    gen_extu(ot, t0);
1657
    tcg_gen_mov_tl(t1, t0);
1658

    
1659
    op2 &= mask;
1660
    data_bits = 8 << ot;
1661
    if (op2 != 0) {
1662
        int shift = op2 & ((1 << (3 + ot)) - 1);
1663
        if (is_right) {
1664
            tcg_gen_shri_tl(cpu_tmp4, t0, shift);
1665
            tcg_gen_shli_tl(t0, t0, data_bits - shift);
1666
        }
1667
        else {
1668
            tcg_gen_shli_tl(cpu_tmp4, t0, shift);
1669
            tcg_gen_shri_tl(t0, t0, data_bits - shift);
1670
        }
1671
        tcg_gen_or_tl(t0, t0, cpu_tmp4);
1672
    }
1673

    
1674
    /* store */
1675
    if (op1 == OR_TMP0) {
1676
        gen_op_st_v(ot + s->mem_index, t0, a0);
1677
    } else {
1678
        gen_op_mov_reg_v(ot, op1, t0);
1679
    }
1680

    
1681
    if (op2 != 0) {
1682
        /* update eflags */
1683
        if (s->cc_op != CC_OP_DYNAMIC)
1684
            gen_op_set_cc_op(s->cc_op);
1685

    
1686
        gen_compute_eflags(cpu_cc_src);
1687
        tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1688
        tcg_gen_xor_tl(cpu_tmp0, t1, t0);
1689
        tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1690
        tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1691
        tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1692
        if (is_right) {
1693
            tcg_gen_shri_tl(t0, t0, data_bits - 1);
1694
        }
1695
        tcg_gen_andi_tl(t0, t0, CC_C);
1696
        tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1697

    
1698
        tcg_gen_discard_tl(cpu_cc_dst);
1699
        tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1700
        s->cc_op = CC_OP_EFLAGS;
1701
    }
1702

    
1703
    tcg_temp_free(t0);
1704
    tcg_temp_free(t1);
1705
    tcg_temp_free(a0);
1706
}
1707

    
1708
/* XXX: add faster immediate = 1 case */
1709
static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1, 
1710
                           int is_right)
1711
{
1712
    int label1;
1713

    
1714
    if (s->cc_op != CC_OP_DYNAMIC)
1715
        gen_op_set_cc_op(s->cc_op);
1716

    
1717
    /* load */
1718
    if (op1 == OR_TMP0)
1719
        gen_op_ld_T0_A0(ot + s->mem_index);
1720
    else
1721
        gen_op_mov_TN_reg(ot, 0, op1);
1722
    
1723
    if (is_right) {
1724
        switch (ot) {
1725
        case 0: gen_helper_rcrb(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1726
        case 1: gen_helper_rcrw(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1727
        case 2: gen_helper_rcrl(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1728
#ifdef TARGET_X86_64
1729
        case 3: gen_helper_rcrq(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1730
#endif
1731
        }
1732
    } else {
1733
        switch (ot) {
1734
        case 0: gen_helper_rclb(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1735
        case 1: gen_helper_rclw(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1736
        case 2: gen_helper_rcll(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1737
#ifdef TARGET_X86_64
1738
        case 3: gen_helper_rclq(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1739
#endif
1740
        }
1741
    }
1742
    /* store */
1743
    if (op1 == OR_TMP0)
1744
        gen_op_st_T0_A0(ot + s->mem_index);
1745
    else
1746
        gen_op_mov_reg_T0(ot, op1);
1747

    
1748
    /* update eflags */
1749
    label1 = gen_new_label();
1750
    tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cc_tmp, -1, label1);
1751

    
1752
    tcg_gen_mov_tl(cpu_cc_src, cpu_cc_tmp);
1753
    tcg_gen_discard_tl(cpu_cc_dst);
1754
    tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1755
        
1756
    gen_set_label(label1);
1757
    s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1758
}
1759

    
1760
/* XXX: add faster immediate case */
1761
static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1, 
1762
                                int is_right)
1763
{
1764
    int label1, label2, data_bits;
1765
    target_ulong mask;
1766
    TCGv t0, t1, t2, a0;
1767

    
1768
    t0 = tcg_temp_local_new();
1769
    t1 = tcg_temp_local_new();
1770
    t2 = tcg_temp_local_new();
1771
    a0 = tcg_temp_local_new();
1772

    
1773
    if (ot == OT_QUAD)
1774
        mask = 0x3f;
1775
    else
1776
        mask = 0x1f;
1777

    
1778
    /* load */
1779
    if (op1 == OR_TMP0) {
1780
        tcg_gen_mov_tl(a0, cpu_A0);
1781
        gen_op_ld_v(ot + s->mem_index, t0, a0);
1782
    } else {
1783
        gen_op_mov_v_reg(ot, t0, op1);
1784
    }
1785

    
1786
    tcg_gen_andi_tl(cpu_T3, cpu_T3, mask);
1787

    
1788
    tcg_gen_mov_tl(t1, cpu_T[1]);
1789
    tcg_gen_mov_tl(t2, cpu_T3);
1790

    
1791
    /* Must test zero case to avoid using undefined behaviour in TCG
1792
       shifts. */
1793
    label1 = gen_new_label();
1794
    tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
1795
    
1796
    tcg_gen_addi_tl(cpu_tmp5, t2, -1);
1797
    if (ot == OT_WORD) {
1798
        /* Note: we implement the Intel behaviour for shift count > 16 */
1799
        if (is_right) {
1800
            tcg_gen_andi_tl(t0, t0, 0xffff);
1801
            tcg_gen_shli_tl(cpu_tmp0, t1, 16);
1802
            tcg_gen_or_tl(t0, t0, cpu_tmp0);
1803
            tcg_gen_ext32u_tl(t0, t0);
1804

    
1805
            tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1806
            
1807
            /* only needed if count > 16, but a test would complicate */
1808
            tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
1809
            tcg_gen_shl_tl(cpu_tmp0, t0, cpu_tmp5);
1810

    
1811
            tcg_gen_shr_tl(t0, t0, t2);
1812

    
1813
            tcg_gen_or_tl(t0, t0, cpu_tmp0);
1814
        } else {
1815
            /* XXX: not optimal */
1816
            tcg_gen_andi_tl(t0, t0, 0xffff);
1817
            tcg_gen_shli_tl(t1, t1, 16);
1818
            tcg_gen_or_tl(t1, t1, t0);
1819
            tcg_gen_ext32u_tl(t1, t1);
1820
            
1821
            tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1822
            tcg_gen_subfi_tl(cpu_tmp0, 32, cpu_tmp5);
1823
            tcg_gen_shr_tl(cpu_tmp5, t1, cpu_tmp0);
1824
            tcg_gen_or_tl(cpu_tmp4, cpu_tmp4, cpu_tmp5);
1825

    
1826
            tcg_gen_shl_tl(t0, t0, t2);
1827
            tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
1828
            tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1829
            tcg_gen_or_tl(t0, t0, t1);
1830
        }
1831
    } else {
1832
        data_bits = 8 << ot;
1833
        if (is_right) {
1834
            if (ot == OT_LONG)
1835
                tcg_gen_ext32u_tl(t0, t0);
1836

    
1837
            tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1838

    
1839
            tcg_gen_shr_tl(t0, t0, t2);
1840
            tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
1841
            tcg_gen_shl_tl(t1, t1, cpu_tmp5);
1842
            tcg_gen_or_tl(t0, t0, t1);
1843
            
1844
        } else {
1845
            if (ot == OT_LONG)
1846
                tcg_gen_ext32u_tl(t1, t1);
1847

    
1848
            tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1849
            
1850
            tcg_gen_shl_tl(t0, t0, t2);
1851
            tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
1852
            tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1853
            tcg_gen_or_tl(t0, t0, t1);
1854
        }
1855
    }
1856
    tcg_gen_mov_tl(t1, cpu_tmp4);
1857

    
1858
    gen_set_label(label1);
1859
    /* store */
1860
    if (op1 == OR_TMP0) {
1861
        gen_op_st_v(ot + s->mem_index, t0, a0);
1862
    } else {
1863
        gen_op_mov_reg_v(ot, op1, t0);
1864
    }
1865
    
1866
    /* update eflags */
1867
    if (s->cc_op != CC_OP_DYNAMIC)
1868
        gen_op_set_cc_op(s->cc_op);
1869

    
1870
    label2 = gen_new_label();
1871
    tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label2);
1872

    
1873
    tcg_gen_mov_tl(cpu_cc_src, t1);
1874
    tcg_gen_mov_tl(cpu_cc_dst, t0);
1875
    if (is_right) {
1876
        tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1877
    } else {
1878
        tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1879
    }
1880
    gen_set_label(label2);
1881
    s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1882

    
1883
    tcg_temp_free(t0);
1884
    tcg_temp_free(t1);
1885
    tcg_temp_free(t2);
1886
    tcg_temp_free(a0);
1887
}
1888

    
1889
static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
1890
{
1891
    if (s != OR_TMP1)
1892
        gen_op_mov_TN_reg(ot, 1, s);
1893
    switch(op) {
1894
    case OP_ROL:
1895
        gen_rot_rm_T1(s1, ot, d, 0);
1896
        break;
1897
    case OP_ROR:
1898
        gen_rot_rm_T1(s1, ot, d, 1);
1899
        break;
1900
    case OP_SHL:
1901
    case OP_SHL1:
1902
        gen_shift_rm_T1(s1, ot, d, 0, 0);
1903
        break;
1904
    case OP_SHR:
1905
        gen_shift_rm_T1(s1, ot, d, 1, 0);
1906
        break;
1907
    case OP_SAR:
1908
        gen_shift_rm_T1(s1, ot, d, 1, 1);
1909
        break;
1910
    case OP_RCL:
1911
        gen_rotc_rm_T1(s1, ot, d, 0);
1912
        break;
1913
    case OP_RCR:
1914
        gen_rotc_rm_T1(s1, ot, d, 1);
1915
        break;
1916
    }
1917
}
1918

    
1919
static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
1920
{
1921
    switch(op) {
1922
    case OP_ROL:
1923
        gen_rot_rm_im(s1, ot, d, c, 0);
1924
        break;
1925
    case OP_ROR:
1926
        gen_rot_rm_im(s1, ot, d, c, 1);
1927
        break;
1928
    case OP_SHL:
1929
    case OP_SHL1:
1930
        gen_shift_rm_im(s1, ot, d, c, 0, 0);
1931
        break;
1932
    case OP_SHR:
1933
        gen_shift_rm_im(s1, ot, d, c, 1, 0);
1934
        break;
1935
    case OP_SAR:
1936
        gen_shift_rm_im(s1, ot, d, c, 1, 1);
1937
        break;
1938
    default:
1939
        /* currently not optimized */
1940
        gen_op_movl_T1_im(c);
1941
        gen_shift(s1, op, ot, d, OR_TMP1);
1942
        break;
1943
    }
1944
}
1945

    
1946
static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ptr)
1947
{
1948
    target_long disp;
1949
    int havesib;
1950
    int base;
1951
    int index;
1952
    int scale;
1953
    int opreg;
1954
    int mod, rm, code, override, must_add_seg;
1955

    
1956
    override = s->override;
1957
    must_add_seg = s->addseg;
1958
    if (override >= 0)
1959
        must_add_seg = 1;
1960
    mod = (modrm >> 6) & 3;
1961
    rm = modrm & 7;
1962

    
1963
    if (s->aflag) {
1964

    
1965
        havesib = 0;
1966
        base = rm;
1967
        index = 0;
1968
        scale = 0;
1969

    
1970
        if (base == 4) {
1971
            havesib = 1;
1972
            code = ldub_code(s->pc++);
1973
            scale = (code >> 6) & 3;
1974
            index = ((code >> 3) & 7) | REX_X(s);
1975
            base = (code & 7);
1976
        }
1977
        base |= REX_B(s);
1978

    
1979
        switch (mod) {
1980
        case 0:
1981
            if ((base & 7) == 5) {
1982
                base = -1;
1983
                disp = (int32_t)ldl_code(s->pc);
1984
                s->pc += 4;
1985
                if (CODE64(s) && !havesib) {
1986
                    disp += s->pc + s->rip_offset;
1987
                }
1988
            } else {
1989
                disp = 0;
1990
            }
1991
            break;
1992
        case 1:
1993
            disp = (int8_t)ldub_code(s->pc++);
1994
            break;
1995
        default:
1996
        case 2:
1997
            disp = (int32_t)ldl_code(s->pc);
1998
            s->pc += 4;
1999
            break;
2000
        }
2001

    
2002
        if (base >= 0) {
2003
            /* for correct popl handling with esp */
2004
            if (base == 4 && s->popl_esp_hack)
2005
                disp += s->popl_esp_hack;
2006
#ifdef TARGET_X86_64
2007
            if (s->aflag == 2) {
2008
                gen_op_movq_A0_reg(base);
2009
                if (disp != 0) {
2010
                    gen_op_addq_A0_im(disp);
2011
                }
2012
            } else
2013
#endif
2014
            {
2015
                gen_op_movl_A0_reg(base);
2016
                if (disp != 0)
2017
                    gen_op_addl_A0_im(disp);
2018
            }
2019
        } else {
2020
#ifdef TARGET_X86_64
2021
            if (s->aflag == 2) {
2022
                gen_op_movq_A0_im(disp);
2023
            } else
2024
#endif
2025
            {
2026
                gen_op_movl_A0_im(disp);
2027
            }
2028
        }
2029
        /* index == 4 means no index */
2030
        if (havesib && (index != 4)) {
2031
#ifdef TARGET_X86_64
2032
            if (s->aflag == 2) {
2033
                gen_op_addq_A0_reg_sN(scale, index);
2034
            } else
2035
#endif
2036
            {
2037
                gen_op_addl_A0_reg_sN(scale, index);
2038
            }
2039
        }
2040
        if (must_add_seg) {
2041
            if (override < 0) {
2042
                if (base == R_EBP || base == R_ESP)
2043
                    override = R_SS;
2044
                else
2045
                    override = R_DS;
2046
            }
2047
#ifdef TARGET_X86_64
2048
            if (s->aflag == 2) {
2049
                gen_op_addq_A0_seg(override);
2050
            } else
2051
#endif
2052
            {
2053
                gen_op_addl_A0_seg(override);
2054
            }
2055
        }
2056
    } else {
2057
        switch (mod) {
2058
        case 0:
2059
            if (rm == 6) {
2060
                disp = lduw_code(s->pc);
2061
                s->pc += 2;
2062
                gen_op_movl_A0_im(disp);
2063
                rm = 0; /* avoid SS override */
2064
                goto no_rm;
2065
            } else {
2066
                disp = 0;
2067
            }
2068
            break;
2069
        case 1:
2070
            disp = (int8_t)ldub_code(s->pc++);
2071
            break;
2072
        default:
2073
        case 2:
2074
            disp = lduw_code(s->pc);
2075
            s->pc += 2;
2076
            break;
2077
        }
2078
        switch(rm) {
2079
        case 0:
2080
            gen_op_movl_A0_reg(R_EBX);
2081
            gen_op_addl_A0_reg_sN(0, R_ESI);
2082
            break;
2083
        case 1:
2084
            gen_op_movl_A0_reg(R_EBX);
2085
            gen_op_addl_A0_reg_sN(0, R_EDI);
2086
            break;
2087
        case 2:
2088
            gen_op_movl_A0_reg(R_EBP);
2089
            gen_op_addl_A0_reg_sN(0, R_ESI);
2090
            break;
2091
        case 3:
2092
            gen_op_movl_A0_reg(R_EBP);
2093
            gen_op_addl_A0_reg_sN(0, R_EDI);
2094
            break;
2095
        case 4:
2096
            gen_op_movl_A0_reg(R_ESI);
2097
            break;
2098
        case 5:
2099
            gen_op_movl_A0_reg(R_EDI);
2100
            break;
2101
        case 6:
2102
            gen_op_movl_A0_reg(R_EBP);
2103
            break;
2104
        default:
2105
        case 7:
2106
            gen_op_movl_A0_reg(R_EBX);
2107
            break;
2108
        }
2109
        if (disp != 0)
2110
            gen_op_addl_A0_im(disp);
2111
        gen_op_andl_A0_ffff();
2112
    no_rm:
2113
        if (must_add_seg) {
2114
            if (override < 0) {
2115
                if (rm == 2 || rm == 3 || rm == 6)
2116
                    override = R_SS;
2117
                else
2118
                    override = R_DS;
2119
            }
2120
            gen_op_addl_A0_seg(override);
2121
        }
2122
    }
2123

    
2124
    opreg = OR_A0;
2125
    disp = 0;
2126
    *reg_ptr = opreg;
2127
    *offset_ptr = disp;
2128
}
2129

    
2130
static void gen_nop_modrm(DisasContext *s, int modrm)
2131
{
2132
    int mod, rm, base, code;
2133

    
2134
    mod = (modrm >> 6) & 3;
2135
    if (mod == 3)
2136
        return;
2137
    rm = modrm & 7;
2138

    
2139
    if (s->aflag) {
2140

    
2141
        base = rm;
2142

    
2143
        if (base == 4) {
2144
            code = ldub_code(s->pc++);
2145
            base = (code & 7);
2146
        }
2147

    
2148
        switch (mod) {
2149
        case 0:
2150
            if (base == 5) {
2151
                s->pc += 4;
2152
            }
2153
            break;
2154
        case 1:
2155
            s->pc++;
2156
            break;
2157
        default:
2158
        case 2:
2159
            s->pc += 4;
2160
            break;
2161
        }
2162
    } else {
2163
        switch (mod) {
2164
        case 0:
2165
            if (rm == 6) {
2166
                s->pc += 2;
2167
            }
2168
            break;
2169
        case 1:
2170
            s->pc++;
2171
            break;
2172
        default:
2173
        case 2:
2174
            s->pc += 2;
2175
            break;
2176
        }
2177
    }
2178
}
2179

    
2180
/* used for LEA and MOV AX, mem */
2181
static void gen_add_A0_ds_seg(DisasContext *s)
2182
{
2183
    int override, must_add_seg;
2184
    must_add_seg = s->addseg;
2185
    override = R_DS;
2186
    if (s->override >= 0) {
2187
        override = s->override;
2188
        must_add_seg = 1;
2189
    }
2190
    if (must_add_seg) {
2191
#ifdef TARGET_X86_64
2192
        if (CODE64(s)) {
2193
            gen_op_addq_A0_seg(override);
2194
        } else
2195
#endif
2196
        {
2197
            gen_op_addl_A0_seg(override);
2198
        }
2199
    }
2200
}
2201

    
2202
/* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2203
   OR_TMP0 */
2204
static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_store)
2205
{
2206
    int mod, rm, opreg, disp;
2207

    
2208
    mod = (modrm >> 6) & 3;
2209
    rm = (modrm & 7) | REX_B(s);
2210
    if (mod == 3) {
2211
        if (is_store) {
2212
            if (reg != OR_TMP0)
2213
                gen_op_mov_TN_reg(ot, 0, reg);
2214
            gen_op_mov_reg_T0(ot, rm);
2215
        } else {
2216
            gen_op_mov_TN_reg(ot, 0, rm);
2217
            if (reg != OR_TMP0)
2218
                gen_op_mov_reg_T0(ot, reg);
2219
        }
2220
    } else {
2221
        gen_lea_modrm(s, modrm, &opreg, &disp);
2222
        if (is_store) {
2223
            if (reg != OR_TMP0)
2224
                gen_op_mov_TN_reg(ot, 0, reg);
2225
            gen_op_st_T0_A0(ot + s->mem_index);
2226
        } else {
2227
            gen_op_ld_T0_A0(ot + s->mem_index);
2228
            if (reg != OR_TMP0)
2229
                gen_op_mov_reg_T0(ot, reg);
2230
        }
2231
    }
2232
}
2233

    
2234
static inline uint32_t insn_get(DisasContext *s, int ot)
2235
{
2236
    uint32_t ret;
2237

    
2238
    switch(ot) {
2239
    case OT_BYTE:
2240
        ret = ldub_code(s->pc);
2241
        s->pc++;
2242
        break;
2243
    case OT_WORD:
2244
        ret = lduw_code(s->pc);
2245
        s->pc += 2;
2246
        break;
2247
    default:
2248
    case OT_LONG:
2249
        ret = ldl_code(s->pc);
2250
        s->pc += 4;
2251
        break;
2252
    }
2253
    return ret;
2254
}
2255

    
2256
static inline int insn_const_size(unsigned int ot)
2257
{
2258
    if (ot <= OT_LONG)
2259
        return 1 << ot;
2260
    else
2261
        return 4;
2262
}
2263

    
2264
static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2265
{
2266
    TranslationBlock *tb;
2267
    target_ulong pc;
2268

    
2269
    pc = s->cs_base + eip;
2270
    tb = s->tb;
2271
    /* NOTE: we handle the case where the TB spans two pages here */
2272
    if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2273
        (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))  {
2274
        /* jump to same page: we can use a direct jump */
2275
        tcg_gen_goto_tb(tb_num);
2276
        gen_jmp_im(eip);
2277
        tcg_gen_exit_tb((tcg_target_long)tb + tb_num);
2278
    } else {
2279
        /* jump to another page: currently not optimized */
2280
        gen_jmp_im(eip);
2281
        gen_eob(s);
2282
    }
2283
}
2284

    
2285
static inline void gen_jcc(DisasContext *s, int b,
2286
                           target_ulong val, target_ulong next_eip)
2287
{
2288
    int l1, l2, cc_op;
2289

    
2290
    cc_op = s->cc_op;
2291
    gen_update_cc_op(s);
2292
    if (s->jmp_opt) {
2293
        l1 = gen_new_label();
2294
        gen_jcc1(s, cc_op, b, l1);
2295
        
2296
        gen_goto_tb(s, 0, next_eip);
2297

    
2298
        gen_set_label(l1);
2299
        gen_goto_tb(s, 1, val);
2300
        s->is_jmp = DISAS_TB_JUMP;
2301
    } else {
2302

    
2303
        l1 = gen_new_label();
2304
        l2 = gen_new_label();
2305
        gen_jcc1(s, cc_op, b, l1);
2306

    
2307
        gen_jmp_im(next_eip);
2308
        tcg_gen_br(l2);
2309

    
2310
        gen_set_label(l1);
2311
        gen_jmp_im(val);
2312
        gen_set_label(l2);
2313
        gen_eob(s);
2314
    }
2315
}
2316

    
2317
static void gen_setcc(DisasContext *s, int b)
2318
{
2319
    int inv, jcc_op, l1;
2320
    TCGv t0;
2321

    
2322
    if (is_fast_jcc_case(s, b)) {
2323
        /* nominal case: we use a jump */
2324
        /* XXX: make it faster by adding new instructions in TCG */
2325
        t0 = tcg_temp_local_new();
2326
        tcg_gen_movi_tl(t0, 0);
2327
        l1 = gen_new_label();
2328
        gen_jcc1(s, s->cc_op, b ^ 1, l1);
2329
        tcg_gen_movi_tl(t0, 1);
2330
        gen_set_label(l1);
2331
        tcg_gen_mov_tl(cpu_T[0], t0);
2332
        tcg_temp_free(t0);
2333
    } else {
2334
        /* slow case: it is more efficient not to generate a jump,
2335
           although it is questionnable whether this optimization is
2336
           worth to */
2337
        inv = b & 1;
2338
        jcc_op = (b >> 1) & 7;
2339
        gen_setcc_slow_T0(s, jcc_op);
2340
        if (inv) {
2341
            tcg_gen_xori_tl(cpu_T[0], cpu_T[0], 1);
2342
        }
2343
    }
2344
}
2345

    
2346
static inline void gen_op_movl_T0_seg(int seg_reg)
2347
{
2348
    tcg_gen_ld32u_tl(cpu_T[0], cpu_env, 
2349
                     offsetof(CPUX86State,segs[seg_reg].selector));
2350
}
2351

    
2352
static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2353
{
2354
    tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2355
    tcg_gen_st32_tl(cpu_T[0], cpu_env, 
2356
                    offsetof(CPUX86State,segs[seg_reg].selector));
2357
    tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2358
    tcg_gen_st_tl(cpu_T[0], cpu_env, 
2359
                  offsetof(CPUX86State,segs[seg_reg].base));
2360
}
2361

    
2362
/* move T0 to seg_reg and compute if the CPU state may change. Never
2363
   call this function with seg_reg == R_CS */
2364
static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2365
{
2366
    if (s->pe && !s->vm86) {
2367
        /* XXX: optimize by finding processor state dynamically */
2368
        if (s->cc_op != CC_OP_DYNAMIC)
2369
            gen_op_set_cc_op(s->cc_op);
2370
        gen_jmp_im(cur_eip);
2371
        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2372
        gen_helper_load_seg(tcg_const_i32(seg_reg), cpu_tmp2_i32);
2373
        /* abort translation because the addseg value may change or
2374
           because ss32 may change. For R_SS, translation must always
2375
           stop as a special handling must be done to disable hardware
2376
           interrupts for the next instruction */
2377
        if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2378
            s->is_jmp = DISAS_TB_JUMP;
2379
    } else {
2380
        gen_op_movl_seg_T0_vm(seg_reg);
2381
        if (seg_reg == R_SS)
2382
            s->is_jmp = DISAS_TB_JUMP;
2383
    }
2384
}
2385

    
2386
static inline int svm_is_rep(int prefixes)
2387
{
2388
    return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2389
}
2390

    
2391
static inline void
2392
gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2393
                              uint32_t type, uint64_t param)
2394
{
2395
    /* no SVM activated; fast case */
2396
    if (likely(!(s->flags & HF_SVMI_MASK)))
2397
        return;
2398
    if (s->cc_op != CC_OP_DYNAMIC)
2399
        gen_op_set_cc_op(s->cc_op);
2400
    gen_jmp_im(pc_start - s->cs_base);
2401
    gen_helper_svm_check_intercept_param(tcg_const_i32(type),
2402
                                         tcg_const_i64(param));
2403
}
2404

    
2405
static inline void
2406
gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2407
{
2408
    gen_svm_check_intercept_param(s, pc_start, type, 0);
2409
}
2410

    
2411
static inline void gen_stack_update(DisasContext *s, int addend)
2412
{
2413
#ifdef TARGET_X86_64
2414
    if (CODE64(s)) {
2415
        gen_op_add_reg_im(2, R_ESP, addend);
2416
    } else
2417
#endif
2418
    if (s->ss32) {
2419
        gen_op_add_reg_im(1, R_ESP, addend);
2420
    } else {
2421
        gen_op_add_reg_im(0, R_ESP, addend);
2422
    }
2423
}
2424

    
2425
/* generate a push. It depends on ss32, addseg and dflag */
2426
static void gen_push_T0(DisasContext *s)
2427
{
2428
#ifdef TARGET_X86_64
2429
    if (CODE64(s)) {
2430
        gen_op_movq_A0_reg(R_ESP);
2431
        if (s->dflag) {
2432
            gen_op_addq_A0_im(-8);
2433
            gen_op_st_T0_A0(OT_QUAD + s->mem_index);
2434
        } else {
2435
            gen_op_addq_A0_im(-2);
2436
            gen_op_st_T0_A0(OT_WORD + s->mem_index);
2437
        }
2438
        gen_op_mov_reg_A0(2, R_ESP);
2439
    } else
2440
#endif
2441
    {
2442
        gen_op_movl_A0_reg(R_ESP);
2443
        if (!s->dflag)
2444
            gen_op_addl_A0_im(-2);
2445
        else
2446
            gen_op_addl_A0_im(-4);
2447
        if (s->ss32) {
2448
            if (s->addseg) {
2449
                tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2450
                gen_op_addl_A0_seg(R_SS);
2451
            }
2452
        } else {
2453
            gen_op_andl_A0_ffff();
2454
            tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2455
            gen_op_addl_A0_seg(R_SS);
2456
        }
2457
        gen_op_st_T0_A0(s->dflag + 1 + s->mem_index);
2458
        if (s->ss32 && !s->addseg)
2459
            gen_op_mov_reg_A0(1, R_ESP);
2460
        else
2461
            gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
2462
    }
2463
}
2464

    
2465
/* generate a push. It depends on ss32, addseg and dflag */
2466
/* slower version for T1, only used for call Ev */
2467
static void gen_push_T1(DisasContext *s)
2468
{
2469
#ifdef TARGET_X86_64
2470
    if (CODE64(s)) {
2471
        gen_op_movq_A0_reg(R_ESP);
2472
        if (s->dflag) {
2473
            gen_op_addq_A0_im(-8);
2474
            gen_op_st_T1_A0(OT_QUAD + s->mem_index);
2475
        } else {
2476
            gen_op_addq_A0_im(-2);
2477
            gen_op_st_T0_A0(OT_WORD + s->mem_index);
2478
        }
2479
        gen_op_mov_reg_A0(2, R_ESP);
2480
    } else
2481
#endif
2482
    {
2483
        gen_op_movl_A0_reg(R_ESP);
2484
        if (!s->dflag)
2485
            gen_op_addl_A0_im(-2);
2486
        else
2487
            gen_op_addl_A0_im(-4);
2488
        if (s->ss32) {
2489
            if (s->addseg) {
2490
                gen_op_addl_A0_seg(R_SS);
2491
            }
2492
        } else {
2493
            gen_op_andl_A0_ffff();
2494
            gen_op_addl_A0_seg(R_SS);
2495
        }
2496
        gen_op_st_T1_A0(s->dflag + 1 + s->mem_index);
2497

    
2498
        if (s->ss32 && !s->addseg)
2499
            gen_op_mov_reg_A0(1, R_ESP);
2500
        else
2501
            gen_stack_update(s, (-2) << s->dflag);
2502
    }
2503
}
2504

    
2505
/* two step pop is necessary for precise exceptions */
2506
static void gen_pop_T0(DisasContext *s)
2507
{
2508
#ifdef TARGET_X86_64
2509
    if (CODE64(s)) {
2510
        gen_op_movq_A0_reg(R_ESP);
2511
        gen_op_ld_T0_A0((s->dflag ? OT_QUAD : OT_WORD) + s->mem_index);
2512
    } else
2513
#endif
2514
    {
2515
        gen_op_movl_A0_reg(R_ESP);
2516
        if (s->ss32) {
2517
            if (s->addseg)
2518
                gen_op_addl_A0_seg(R_SS);
2519
        } else {
2520
            gen_op_andl_A0_ffff();
2521
            gen_op_addl_A0_seg(R_SS);
2522
        }
2523
        gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index);
2524
    }
2525
}
2526

    
2527
static void gen_pop_update(DisasContext *s)
2528
{
2529
#ifdef TARGET_X86_64
2530
    if (CODE64(s) && s->dflag) {
2531
        gen_stack_update(s, 8);
2532
    } else
2533
#endif
2534
    {
2535
        gen_stack_update(s, 2 << s->dflag);
2536
    }
2537
}
2538

    
2539
static void gen_stack_A0(DisasContext *s)
2540
{
2541
    gen_op_movl_A0_reg(R_ESP);
2542
    if (!s->ss32)
2543
        gen_op_andl_A0_ffff();
2544
    tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2545
    if (s->addseg)
2546
        gen_op_addl_A0_seg(R_SS);
2547
}
2548

    
2549
/* NOTE: wrap around in 16 bit not fully handled */
2550
static void gen_pusha(DisasContext *s)
2551
{
2552
    int i;
2553
    gen_op_movl_A0_reg(R_ESP);
2554
    gen_op_addl_A0_im(-16 <<  s->dflag);
2555
    if (!s->ss32)
2556
        gen_op_andl_A0_ffff();
2557
    tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2558
    if (s->addseg)
2559
        gen_op_addl_A0_seg(R_SS);
2560
    for(i = 0;i < 8; i++) {
2561
        gen_op_mov_TN_reg(OT_LONG, 0, 7 - i);
2562
        gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index);
2563
        gen_op_addl_A0_im(2 <<  s->dflag);
2564
    }
2565
    gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2566
}
2567

    
2568
/* NOTE: wrap around in 16 bit not fully handled */
2569
static void gen_popa(DisasContext *s)
2570
{
2571
    int i;
2572
    gen_op_movl_A0_reg(R_ESP);
2573
    if (!s->ss32)
2574
        gen_op_andl_A0_ffff();
2575
    tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2576
    tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 <<  s->dflag);
2577
    if (s->addseg)
2578
        gen_op_addl_A0_seg(R_SS);
2579
    for(i = 0;i < 8; i++) {
2580
        /* ESP is not reloaded */
2581
        if (i != 3) {
2582
            gen_op_ld_T0_A0(OT_WORD + s->dflag + s->mem_index);
2583
            gen_op_mov_reg_T0(OT_WORD + s->dflag, 7 - i);
2584
        }
2585
        gen_op_addl_A0_im(2 <<  s->dflag);
2586
    }
2587
    gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2588
}
2589

    
2590
static void gen_enter(DisasContext *s, int esp_addend, int level)
2591
{
2592
    int ot, opsize;
2593

    
2594
    level &= 0x1f;
2595
#ifdef TARGET_X86_64
2596
    if (CODE64(s)) {
2597
        ot = s->dflag ? OT_QUAD : OT_WORD;
2598
        opsize = 1 << ot;
2599

    
2600
        gen_op_movl_A0_reg(R_ESP);
2601
        gen_op_addq_A0_im(-opsize);
2602
        tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2603

    
2604
        /* push bp */
2605
        gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2606
        gen_op_st_T0_A0(ot + s->mem_index);
2607
        if (level) {
2608
            /* XXX: must save state */
2609
            gen_helper_enter64_level(tcg_const_i32(level),
2610
                                     tcg_const_i32((ot == OT_QUAD)),
2611
                                     cpu_T[1]);
2612
        }
2613
        gen_op_mov_reg_T1(ot, R_EBP);
2614
        tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2615
        gen_op_mov_reg_T1(OT_QUAD, R_ESP);
2616
    } else
2617
#endif
2618
    {
2619
        ot = s->dflag + OT_WORD;
2620
        opsize = 2 << s->dflag;
2621

    
2622
        gen_op_movl_A0_reg(R_ESP);
2623
        gen_op_addl_A0_im(-opsize);
2624
        if (!s->ss32)
2625
            gen_op_andl_A0_ffff();
2626
        tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2627
        if (s->addseg)
2628
            gen_op_addl_A0_seg(R_SS);
2629
        /* push bp */
2630
        gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2631
        gen_op_st_T0_A0(ot + s->mem_index);
2632
        if (level) {
2633
            /* XXX: must save state */
2634
            gen_helper_enter_level(tcg_const_i32(level),
2635
                                   tcg_const_i32(s->dflag),
2636
                                   cpu_T[1]);
2637
        }
2638
        gen_op_mov_reg_T1(ot, R_EBP);
2639
        tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2640
        gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2641
    }
2642
}
2643

    
2644
static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2645
{
2646
    if (s->cc_op != CC_OP_DYNAMIC)
2647
        gen_op_set_cc_op(s->cc_op);
2648
    gen_jmp_im(cur_eip);
2649
    gen_helper_raise_exception(tcg_const_i32(trapno));
2650
    s->is_jmp = DISAS_TB_JUMP;
2651
}
2652

    
2653
/* an interrupt is different from an exception because of the
2654
   privilege checks */
2655
static void gen_interrupt(DisasContext *s, int intno,
2656
                          target_ulong cur_eip, target_ulong next_eip)
2657
{
2658
    if (s->cc_op != CC_OP_DYNAMIC)
2659
        gen_op_set_cc_op(s->cc_op);
2660
    gen_jmp_im(cur_eip);
2661
    gen_helper_raise_interrupt(tcg_const_i32(intno), 
2662
                               tcg_const_i32(next_eip - cur_eip));
2663
    s->is_jmp = DISAS_TB_JUMP;
2664
}
2665

    
2666
static void gen_debug(DisasContext *s, target_ulong cur_eip)
2667
{
2668
    if (s->cc_op != CC_OP_DYNAMIC)
2669
        gen_op_set_cc_op(s->cc_op);
2670
    gen_jmp_im(cur_eip);
2671
    gen_helper_debug();
2672
    s->is_jmp = DISAS_TB_JUMP;
2673
}
2674

    
2675
/* generate a generic end of block. Trace exception is also generated
2676
   if needed */
2677
static void gen_eob(DisasContext *s)
2678
{
2679
    if (s->cc_op != CC_OP_DYNAMIC)
2680
        gen_op_set_cc_op(s->cc_op);
2681
    if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2682
        gen_helper_reset_inhibit_irq();
2683
    }
2684
    if (s->tb->flags & HF_RF_MASK) {
2685
        gen_helper_reset_rf();
2686
    }
2687
    if (s->singlestep_enabled) {
2688
        gen_helper_debug();
2689
    } else if (s->tf) {
2690
        gen_helper_single_step();
2691
    } else {
2692
        tcg_gen_exit_tb(0);
2693
    }
2694
    s->is_jmp = DISAS_TB_JUMP;
2695
}
2696

    
2697
/* generate a jump to eip. No segment change must happen before as a
2698
   direct call to the next block may occur */
2699
static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2700
{
2701
    if (s->jmp_opt) {
2702
        gen_update_cc_op(s);
2703
        gen_goto_tb(s, tb_num, eip);
2704
        s->is_jmp = DISAS_TB_JUMP;
2705
    } else {
2706
        gen_jmp_im(eip);
2707
        gen_eob(s);
2708
    }
2709
}
2710

    
2711
static void gen_jmp(DisasContext *s, target_ulong eip)
2712
{
2713
    gen_jmp_tb(s, eip, 0);
2714
}
2715

    
2716
static inline void gen_ldq_env_A0(int idx, int offset)
2717
{
2718
    int mem_index = (idx >> 2) - 1;
2719
    tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2720
    tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2721
}
2722

    
2723
static inline void gen_stq_env_A0(int idx, int offset)
2724
{
2725
    int mem_index = (idx >> 2) - 1;
2726
    tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2727
    tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2728
}
2729

    
2730
static inline void gen_ldo_env_A0(int idx, int offset)
2731
{
2732
    int mem_index = (idx >> 2) - 1;
2733
    tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2734
    tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2735
    tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2736
    tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2737
    tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2738
}
2739

    
2740
static inline void gen_sto_env_A0(int idx, int offset)
2741
{
2742
    int mem_index = (idx >> 2) - 1;
2743
    tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2744
    tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2745
    tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2746
    tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2747
    tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2748
}
2749

    
2750
static inline void gen_op_movo(int d_offset, int s_offset)
2751
{
2752
    tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2753
    tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2754
    tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2755
    tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
2756
}
2757

    
2758
static inline void gen_op_movq(int d_offset, int s_offset)
2759
{
2760
    tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2761
    tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2762
}
2763

    
2764
static inline void gen_op_movl(int d_offset, int s_offset)
2765
{
2766
    tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2767
    tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2768
}
2769

    
2770
static inline void gen_op_movq_env_0(int d_offset)
2771
{
2772
    tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2773
    tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2774
}
2775

    
2776
#define SSE_SPECIAL ((void *)1)
2777
#define SSE_DUMMY ((void *)2)
2778

    
2779
#define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2780
#define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2781
                     gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2782

    
2783
static void *sse_op_table1[256][4] = {
2784
    /* 3DNow! extensions */
2785
    [0x0e] = { SSE_DUMMY }, /* femms */
2786
    [0x0f] = { SSE_DUMMY }, /* pf... */
2787
    /* pure SSE operations */
2788
    [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2789
    [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2790
    [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2791
    [0x13] = { SSE_SPECIAL, SSE_SPECIAL },  /* movlps, movlpd */
2792
    [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2793
    [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2794
    [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },  /* movhps, movhpd, movshdup */
2795
    [0x17] = { SSE_SPECIAL, SSE_SPECIAL },  /* movhps, movhpd */
2796

    
2797
    [0x28] = { SSE_SPECIAL, SSE_SPECIAL },  /* movaps, movapd */
2798
    [0x29] = { SSE_SPECIAL, SSE_SPECIAL },  /* movaps, movapd */
2799
    [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2800
    [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
2801
    [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2802
    [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2803
    [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2804
    [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2805
    [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2806
    [0x51] = SSE_FOP(sqrt),
2807
    [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2808
    [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2809
    [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2810
    [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2811
    [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2812
    [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2813
    [0x58] = SSE_FOP(add),
2814
    [0x59] = SSE_FOP(mul),
2815
    [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2816
               gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2817
    [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2818
    [0x5c] = SSE_FOP(sub),
2819
    [0x5d] = SSE_FOP(min),
2820
    [0x5e] = SSE_FOP(div),
2821
    [0x5f] = SSE_FOP(max),
2822

    
2823
    [0xc2] = SSE_FOP(cmpeq),
2824
    [0xc6] = { gen_helper_shufps, gen_helper_shufpd },
2825

    
2826
    [0x38] = { SSE_SPECIAL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2827
    [0x3a] = { SSE_SPECIAL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2828

    
2829
    /* MMX ops and their SSE extensions */
2830
    [0x60] = MMX_OP2(punpcklbw),
2831
    [0x61] = MMX_OP2(punpcklwd),
2832
    [0x62] = MMX_OP2(punpckldq),
2833
    [0x63] = MMX_OP2(packsswb),
2834
    [0x64] = MMX_OP2(pcmpgtb),
2835
    [0x65] = MMX_OP2(pcmpgtw),
2836
    [0x66] = MMX_OP2(pcmpgtl),
2837
    [0x67] = MMX_OP2(packuswb),
2838
    [0x68] = MMX_OP2(punpckhbw),
2839
    [0x69] = MMX_OP2(punpckhwd),
2840
    [0x6a] = MMX_OP2(punpckhdq),
2841
    [0x6b] = MMX_OP2(packssdw),
2842
    [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2843
    [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2844
    [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2845
    [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2846
    [0x70] = { gen_helper_pshufw_mmx,
2847
               gen_helper_pshufd_xmm,
2848
               gen_helper_pshufhw_xmm,
2849
               gen_helper_pshuflw_xmm },
2850
    [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2851
    [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2852
    [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2853
    [0x74] = MMX_OP2(pcmpeqb),
2854
    [0x75] = MMX_OP2(pcmpeqw),
2855
    [0x76] = MMX_OP2(pcmpeql),
2856
    [0x77] = { SSE_DUMMY }, /* emms */
2857
    [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2858
    [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
2859
    [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2860
    [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
2861
    [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2862
    [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2863
    [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2864
    [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2865
    [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
2866
    [0xd1] = MMX_OP2(psrlw),
2867
    [0xd2] = MMX_OP2(psrld),
2868
    [0xd3] = MMX_OP2(psrlq),
2869
    [0xd4] = MMX_OP2(paddq),
2870
    [0xd5] = MMX_OP2(pmullw),
2871
    [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2872
    [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2873
    [0xd8] = MMX_OP2(psubusb),
2874
    [0xd9] = MMX_OP2(psubusw),
2875
    [0xda] = MMX_OP2(pminub),
2876
    [0xdb] = MMX_OP2(pand),
2877
    [0xdc] = MMX_OP2(paddusb),
2878
    [0xdd] = MMX_OP2(paddusw),
2879
    [0xde] = MMX_OP2(pmaxub),
2880
    [0xdf] = MMX_OP2(pandn),
2881
    [0xe0] = MMX_OP2(pavgb),
2882
    [0xe1] = MMX_OP2(psraw),
2883
    [0xe2] = MMX_OP2(psrad),
2884
    [0xe3] = MMX_OP2(pavgw),
2885
    [0xe4] = MMX_OP2(pmulhuw),
2886
    [0xe5] = MMX_OP2(pmulhw),
2887
    [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
2888
    [0xe7] = { SSE_SPECIAL , SSE_SPECIAL },  /* movntq, movntq */
2889
    [0xe8] = MMX_OP2(psubsb),
2890
    [0xe9] = MMX_OP2(psubsw),
2891
    [0xea] = MMX_OP2(pminsw),
2892
    [0xeb] = MMX_OP2(por),
2893
    [0xec] = MMX_OP2(paddsb),
2894
    [0xed] = MMX_OP2(paddsw),
2895
    [0xee] = MMX_OP2(pmaxsw),
2896
    [0xef] = MMX_OP2(pxor),
2897
    [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
2898
    [0xf1] = MMX_OP2(psllw),
2899
    [0xf2] = MMX_OP2(pslld),
2900
    [0xf3] = MMX_OP2(psllq),
2901
    [0xf4] = MMX_OP2(pmuludq),
2902
    [0xf5] = MMX_OP2(pmaddwd),
2903
    [0xf6] = MMX_OP2(psadbw),
2904
    [0xf7] = MMX_OP2(maskmov),
2905
    [0xf8] = MMX_OP2(psubb),
2906
    [0xf9] = MMX_OP2(psubw),
2907
    [0xfa] = MMX_OP2(psubl),
2908
    [0xfb] = MMX_OP2(psubq),
2909
    [0xfc] = MMX_OP2(paddb),
2910
    [0xfd] = MMX_OP2(paddw),
2911
    [0xfe] = MMX_OP2(paddl),
2912
};
2913

    
2914
static void *sse_op_table2[3 * 8][2] = {
2915
    [0 + 2] = MMX_OP2(psrlw),
2916
    [0 + 4] = MMX_OP2(psraw),
2917
    [0 + 6] = MMX_OP2(psllw),
2918
    [8 + 2] = MMX_OP2(psrld),
2919
    [8 + 4] = MMX_OP2(psrad),
2920
    [8 + 6] = MMX_OP2(pslld),
2921
    [16 + 2] = MMX_OP2(psrlq),
2922
    [16 + 3] = { NULL, gen_helper_psrldq_xmm },
2923
    [16 + 6] = MMX_OP2(psllq),
2924
    [16 + 7] = { NULL, gen_helper_pslldq_xmm },
2925
};
2926

    
2927
static void *sse_op_table3[4 * 3] = {
2928
    gen_helper_cvtsi2ss,
2929
    gen_helper_cvtsi2sd,
2930
    X86_64_ONLY(gen_helper_cvtsq2ss),
2931
    X86_64_ONLY(gen_helper_cvtsq2sd),
2932

    
2933
    gen_helper_cvttss2si,
2934
    gen_helper_cvttsd2si,
2935
    X86_64_ONLY(gen_helper_cvttss2sq),
2936
    X86_64_ONLY(gen_helper_cvttsd2sq),
2937

    
2938
    gen_helper_cvtss2si,
2939
    gen_helper_cvtsd2si,
2940
    X86_64_ONLY(gen_helper_cvtss2sq),
2941
    X86_64_ONLY(gen_helper_cvtsd2sq),
2942
};
2943

    
2944
static void *sse_op_table4[8][4] = {
2945
    SSE_FOP(cmpeq),
2946
    SSE_FOP(cmplt),
2947
    SSE_FOP(cmple),
2948
    SSE_FOP(cmpunord),
2949
    SSE_FOP(cmpneq),
2950
    SSE_FOP(cmpnlt),
2951
    SSE_FOP(cmpnle),
2952
    SSE_FOP(cmpord),
2953
};
2954

    
2955
static void *sse_op_table5[256] = {
2956
    [0x0c] = gen_helper_pi2fw,
2957
    [0x0d] = gen_helper_pi2fd,
2958
    [0x1c] = gen_helper_pf2iw,
2959
    [0x1d] = gen_helper_pf2id,
2960
    [0x8a] = gen_helper_pfnacc,
2961
    [0x8e] = gen_helper_pfpnacc,
2962
    [0x90] = gen_helper_pfcmpge,
2963
    [0x94] = gen_helper_pfmin,
2964
    [0x96] = gen_helper_pfrcp,
2965
    [0x97] = gen_helper_pfrsqrt,
2966
    [0x9a] = gen_helper_pfsub,
2967
    [0x9e] = gen_helper_pfadd,
2968
    [0xa0] = gen_helper_pfcmpgt,
2969
    [0xa4] = gen_helper_pfmax,
2970
    [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
2971
    [0xa7] = gen_helper_movq, /* pfrsqit1 */
2972
    [0xaa] = gen_helper_pfsubr,
2973
    [0xae] = gen_helper_pfacc,
2974
    [0xb0] = gen_helper_pfcmpeq,
2975
    [0xb4] = gen_helper_pfmul,
2976
    [0xb6] = gen_helper_movq, /* pfrcpit2 */
2977
    [0xb7] = gen_helper_pmulhrw_mmx,
2978
    [0xbb] = gen_helper_pswapd,
2979
    [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
2980
};
2981

    
2982
struct sse_op_helper_s {
2983
    void *op[2]; uint32_t ext_mask;
2984
};
2985
#define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
2986
#define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
2987
#define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
2988
#define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
2989
static struct sse_op_helper_s sse_op_table6[256] = {
2990
    [0x00] = SSSE3_OP(pshufb),
2991
    [0x01] = SSSE3_OP(phaddw),
2992
    [0x02] = SSSE3_OP(phaddd),
2993
    [0x03] = SSSE3_OP(phaddsw),
2994
    [0x04] = SSSE3_OP(pmaddubsw),
2995
    [0x05] = SSSE3_OP(phsubw),
2996
    [0x06] = SSSE3_OP(phsubd),
2997
    [0x07] = SSSE3_OP(phsubsw),
2998
    [0x08] = SSSE3_OP(psignb),
2999
    [0x09] = SSSE3_OP(psignw),
3000
    [0x0a] = SSSE3_OP(psignd),
3001
    [0x0b] = SSSE3_OP(pmulhrsw),
3002
    [0x10] = SSE41_OP(pblendvb),
3003
    [0x14] = SSE41_OP(blendvps),
3004
    [0x15] = SSE41_OP(blendvpd),
3005
    [0x17] = SSE41_OP(ptest),
3006
    [0x1c] = SSSE3_OP(pabsb),
3007
    [0x1d] = SSSE3_OP(pabsw),
3008
    [0x1e] = SSSE3_OP(pabsd),
3009
    [0x20] = SSE41_OP(pmovsxbw),
3010
    [0x21] = SSE41_OP(pmovsxbd),
3011
    [0x22] = SSE41_OP(pmovsxbq),
3012
    [0x23] = SSE41_OP(pmovsxwd),
3013
    [0x24] = SSE41_OP(pmovsxwq),
3014
    [0x25] = SSE41_OP(pmovsxdq),
3015
    [0x28] = SSE41_OP(pmuldq),
3016
    [0x29] = SSE41_OP(pcmpeqq),
3017
    [0x2a] = SSE41_SPECIAL, /* movntqda */
3018
    [0x2b] = SSE41_OP(packusdw),
3019
    [0x30] = SSE41_OP(pmovzxbw),
3020
    [0x31] = SSE41_OP(pmovzxbd),
3021
    [0x32] = SSE41_OP(pmovzxbq),
3022
    [0x33] = SSE41_OP(pmovzxwd),
3023
    [0x34] = SSE41_OP(pmovzxwq),
3024
    [0x35] = SSE41_OP(pmovzxdq),
3025
    [0x37] = SSE42_OP(pcmpgtq),
3026
    [0x38] = SSE41_OP(pminsb),
3027
    [0x39] = SSE41_OP(pminsd),
3028
    [0x3a] = SSE41_OP(pminuw),
3029
    [0x3b] = SSE41_OP(pminud),
3030
    [0x3c] = SSE41_OP(pmaxsb),
3031
    [0x3d] = SSE41_OP(pmaxsd),
3032
    [0x3e] = SSE41_OP(pmaxuw),
3033
    [0x3f] = SSE41_OP(pmaxud),
3034
    [0x40] = SSE41_OP(pmulld),
3035
    [0x41] = SSE41_OP(phminposuw),
3036
};
3037

    
3038
static struct sse_op_helper_s sse_op_table7[256] = {
3039
    [0x08] = SSE41_OP(roundps),
3040
    [0x09] = SSE41_OP(roundpd),
3041
    [0x0a] = SSE41_OP(roundss),
3042
    [0x0b] = SSE41_OP(roundsd),
3043
    [0x0c] = SSE41_OP(blendps),
3044
    [0x0d] = SSE41_OP(blendpd),
3045
    [0x0e] = SSE41_OP(pblendw),
3046
    [0x0f] = SSSE3_OP(palignr),
3047
    [0x14] = SSE41_SPECIAL, /* pextrb */
3048
    [0x15] = SSE41_SPECIAL, /* pextrw */
3049
    [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
3050
    [0x17] = SSE41_SPECIAL, /* extractps */
3051
    [0x20] = SSE41_SPECIAL, /* pinsrb */
3052
    [0x21] = SSE41_SPECIAL, /* insertps */
3053
    [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
3054
    [0x40] = SSE41_OP(dpps),
3055
    [0x41] = SSE41_OP(dppd),
3056
    [0x42] = SSE41_OP(mpsadbw),
3057
    [0x60] = SSE42_OP(pcmpestrm),
3058
    [0x61] = SSE42_OP(pcmpestri),
3059
    [0x62] = SSE42_OP(pcmpistrm),
3060
    [0x63] = SSE42_OP(pcmpistri),
3061
};
3062

    
3063
static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
3064
{
3065
    int b1, op1_offset, op2_offset, is_xmm, val, ot;
3066
    int modrm, mod, rm, reg, reg_addr, offset_addr;
3067
    void *sse_op2;
3068

    
3069
    b &= 0xff;
3070
    if (s->prefix & PREFIX_DATA)
3071
        b1 = 1;
3072
    else if (s->prefix & PREFIX_REPZ)
3073
        b1 = 2;
3074
    else if (s->prefix & PREFIX_REPNZ)
3075
        b1 = 3;
3076
    else
3077
        b1 = 0;
3078
    sse_op2 = sse_op_table1[b][b1];
3079
    if (!sse_op2)
3080
        goto illegal_op;
3081
    if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
3082
        is_xmm = 1;
3083
    } else {
3084
        if (b1 == 0) {
3085
            /* MMX case */
3086
            is_xmm = 0;
3087
        } else {
3088
            is_xmm = 1;
3089
        }
3090
    }
3091
    /* simple MMX/SSE operation */
3092
    if (s->flags & HF_TS_MASK) {
3093
        gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3094
        return;
3095
    }
3096
    if (s->flags & HF_EM_MASK) {
3097
    illegal_op:
3098
        gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3099
        return;
3100
    }
3101
    if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
3102
        if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3103
            goto illegal_op;
3104
    if (b == 0x0e) {
3105
        if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3106
            goto illegal_op;
3107
        /* femms */
3108
        gen_helper_emms();
3109
        return;
3110
    }
3111
    if (b == 0x77) {
3112
        /* emms */
3113
        gen_helper_emms();
3114
        return;
3115
    }
3116
    /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3117
       the static cpu state) */
3118
    if (!is_xmm) {
3119
        gen_helper_enter_mmx();
3120
    }
3121

    
3122
    modrm = ldub_code(s->pc++);
3123
    reg = ((modrm >> 3) & 7);
3124
    if (is_xmm)
3125
        reg |= rex_r;
3126
    mod = (modrm >> 6) & 3;
3127
    if (sse_op2 == SSE_SPECIAL) {
3128
        b |= (b1 << 8);
3129
        switch(b) {
3130
        case 0x0e7: /* movntq */
3131
            if (mod == 3)
3132
                goto illegal_op;
3133
            gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3134
            gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3135
            break;
3136
        case 0x1e7: /* movntdq */
3137
        case 0x02b: /* movntps */
3138
        case 0x12b: /* movntps */
3139
            if (mod == 3)
3140
                goto illegal_op;
3141
            gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3142
            gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3143
            break;
3144
        case 0x3f0: /* lddqu */
3145
            if (mod == 3)
3146
                goto illegal_op;
3147
            gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3148
            gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3149
            break;
3150
        case 0x22b: /* movntss */
3151
        case 0x32b: /* movntsd */
3152
            if (mod == 3)
3153
                goto illegal_op;
3154
            gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3155
            if (b1 & 1) {
3156
                gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,
3157
                    xmm_regs[reg]));
3158
            } else {
3159
                tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3160
                    xmm_regs[reg].XMM_L(0)));
3161
                gen_op_st_T0_A0(OT_LONG + s->mem_index);
3162
            }
3163
            break;
3164
        case 0x6e: /* movd mm, ea */
3165
#ifdef TARGET_X86_64
3166
            if (s->dflag == 2) {
3167
                gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
3168
                tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3169
            } else
3170
#endif
3171
            {
3172
                gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3173
                tcg_gen_addi_ptr(cpu_ptr0, cpu_env, 
3174
                                 offsetof(CPUX86State,fpregs[reg].mmx));
3175
                tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3176
                gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3177
            }
3178
            break;
3179
        case 0x16e: /* movd xmm, ea */
3180
#ifdef TARGET_X86_64
3181
            if (s->dflag == 2) {
3182
                gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
3183
                tcg_gen_addi_ptr(cpu_ptr0, cpu_env, 
3184
                                 offsetof(CPUX86State,xmm_regs[reg]));
3185
                gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
3186
            } else
3187
#endif
3188
            {
3189
                gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3190
                tcg_gen_addi_ptr(cpu_ptr0, cpu_env, 
3191
                                 offsetof(CPUX86State,xmm_regs[reg]));
3192
                tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3193
                gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3194
            }
3195
            break;
3196
        case 0x6f: /* movq mm, ea */
3197
            if (mod != 3) {
3198
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3199
                gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3200
            } else {
3201
                rm = (modrm & 7);
3202
                tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3203
                               offsetof(CPUX86State,fpregs[rm].mmx));
3204
                tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3205
                               offsetof(CPUX86State,fpregs[reg].mmx));
3206
            }
3207
            break;
3208
        case 0x010: /* movups */
3209
        case 0x110: /* movupd */
3210
        case 0x028: /* movaps */
3211
        case 0x128: /* movapd */
3212
        case 0x16f: /* movdqa xmm, ea */
3213
        case 0x26f: /* movdqu xmm, ea */
3214
            if (mod != 3) {
3215
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3216
                gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3217
            } else {
3218
                rm = (modrm & 7) | REX_B(s);
3219
                gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3220
                            offsetof(CPUX86State,xmm_regs[rm]));
3221
            }
3222
            break;
3223
        case 0x210: /* movss xmm, ea */
3224
            if (mod != 3) {
3225
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3226
                gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3227
                tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3228
                gen_op_movl_T0_0();
3229
                tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3230
                tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3231
                tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3232
            } else {
3233
                rm = (modrm & 7) | REX_B(s);
3234
                gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3235
                            offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3236
            }
3237
            break;
3238
        case 0x310: /* movsd xmm, ea */
3239
            if (mod != 3) {
3240
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3241
                gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3242
                gen_op_movl_T0_0();
3243
                tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3244
                tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3245
            } else {
3246
                rm = (modrm & 7) | REX_B(s);
3247
                gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3248
                            offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3249
            }
3250
            break;
3251
        case 0x012: /* movlps */
3252
        case 0x112: /* movlpd */
3253
            if (mod != 3) {
3254
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3255
                gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3256
            } else {
3257
                /* movhlps */
3258
                rm = (modrm & 7) | REX_B(s);
3259
                gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3260
                            offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3261
            }
3262
            break;
3263
        case 0x212: /* movsldup */
3264
            if (mod != 3) {
3265
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3266
                gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3267
            } else {
3268
                rm = (modrm & 7) | REX_B(s);
3269
                gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3270
                            offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3271
                gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3272
                            offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3273
            }
3274
            gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3275
                        offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3276
            gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3277
                        offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3278
            break;
3279
        case 0x312: /* movddup */
3280
            if (mod != 3) {
3281
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3282
                gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3283
            } else {
3284
                rm = (modrm & 7) | REX_B(s);
3285
                gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3286
                            offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3287
            }
3288
            gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3289
                        offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3290
            break;
3291
        case 0x016: /* movhps */
3292
        case 0x116: /* movhpd */
3293
            if (mod != 3) {
3294
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3295
                gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3296
            } else {
3297
                /* movlhps */
3298
                rm = (modrm & 7) | REX_B(s);
3299
                gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3300
                            offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3301
            }
3302
            break;
3303
        case 0x216: /* movshdup */
3304
            if (mod != 3) {
3305
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3306
                gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3307
            } else {
3308
                rm = (modrm & 7) | REX_B(s);
3309
                gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3310
                            offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3311
                gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3312
                            offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3313
            }
3314
            gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3315
                        offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3316
            gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3317
                        offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3318
            break;
3319
        case 0x178:
3320
        case 0x378:
3321
            {
3322
                int bit_index, field_length;
3323

    
3324
                if (b1 == 1 && reg != 0)
3325
                    goto illegal_op;
3326
                field_length = ldub_code(s->pc++) & 0x3F;
3327
                bit_index = ldub_code(s->pc++) & 0x3F;
3328
                tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3329
                    offsetof(CPUX86State,xmm_regs[reg]));
3330
                if (b1 == 1)
3331
                    gen_helper_extrq_i(cpu_ptr0, tcg_const_i32(bit_index),
3332
                        tcg_const_i32(field_length));
3333
                else
3334
                    gen_helper_insertq_i(cpu_ptr0, tcg_const_i32(bit_index),
3335
                        tcg_const_i32(field_length));
3336
            }
3337
            break;
3338
        case 0x7e: /* movd ea, mm */
3339
#ifdef TARGET_X86_64
3340
            if (s->dflag == 2) {
3341
                tcg_gen_ld_i64(cpu_T[0], cpu_env, 
3342
                               offsetof(CPUX86State,fpregs[reg].mmx));
3343
                gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3344
            } else
3345
#endif
3346
            {
3347
                tcg_gen_ld32u_tl(cpu_T[0], cpu_env, 
3348
                                 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3349
                gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3350
            }
3351
            break;
3352
        case 0x17e: /* movd ea, xmm */
3353
#ifdef TARGET_X86_64
3354
            if (s->dflag == 2) {
3355
                tcg_gen_ld_i64(cpu_T[0], cpu_env, 
3356
                               offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3357
                gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3358
            } else
3359
#endif
3360
            {
3361
                tcg_gen_ld32u_tl(cpu_T[0], cpu_env, 
3362
                                 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3363
                gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3364
            }
3365
            break;
3366
        case 0x27e: /* movq xmm, ea */
3367
            if (mod != 3) {
3368
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3369
                gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3370
            } else {
3371
                rm = (modrm & 7) | REX_B(s);
3372
                gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3373
                            offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3374
            }
3375
            gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3376
            break;
3377
        case 0x7f: /* movq ea, mm */
3378
            if (mod != 3) {
3379
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3380
                gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3381
            } else {
3382
                rm = (modrm & 7);
3383
                gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3384
                            offsetof(CPUX86State,fpregs[reg].mmx));
3385
            }
3386
            break;
3387
        case 0x011: /* movups */
3388
        case 0x111: /* movupd */
3389
        case 0x029: /* movaps */
3390
        case 0x129: /* movapd */
3391
        case 0x17f: /* movdqa ea, xmm */
3392
        case 0x27f: /* movdqu ea, xmm */
3393
            if (mod != 3) {
3394
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3395
                gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3396
            } else {
3397
                rm = (modrm & 7) | REX_B(s);
3398
                gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3399
                            offsetof(CPUX86State,xmm_regs[reg]));
3400
            }
3401
            break;
3402
        case 0x211: /* movss ea, xmm */
3403
            if (mod != 3) {
3404
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3405
                tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3406
                gen_op_st_T0_A0(OT_LONG + s->mem_index);
3407
            } else {
3408
                rm = (modrm & 7) | REX_B(s);
3409
                gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3410
                            offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3411
            }
3412
            break;
3413
        case 0x311: /* movsd ea, xmm */
3414
            if (mod != 3) {
3415
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3416
                gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3417
            } else {
3418
                rm = (modrm & 7) | REX_B(s);
3419
                gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3420
                            offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3421
            }
3422
            break;
3423
        case 0x013: /* movlps */
3424
        case 0x113: /* movlpd */
3425
            if (mod != 3) {
3426
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3427
                gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3428
            } else {
3429
                goto illegal_op;
3430
            }
3431
            break;
3432
        case 0x017: /* movhps */
3433
        case 0x117: /* movhpd */
3434
            if (mod != 3) {
3435
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3436
                gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3437
            } else {
3438
                goto illegal_op;
3439
            }
3440
            break;
3441
        case 0x71: /* shift mm, im */
3442
        case 0x72:
3443
        case 0x73:
3444
        case 0x171: /* shift xmm, im */
3445
        case 0x172:
3446
        case 0x173:
3447
            if (b1 >= 2) {
3448
                goto illegal_op;
3449
            }
3450
            val = ldub_code(s->pc++);
3451
            if (is_xmm) {
3452
                gen_op_movl_T0_im(val);
3453
                tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3454
                gen_op_movl_T0_0();
3455
                tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
3456
                op1_offset = offsetof(CPUX86State,xmm_t0);
3457
            } else {
3458
                gen_op_movl_T0_im(val);
3459
                tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3460
                gen_op_movl_T0_0();
3461
                tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3462
                op1_offset = offsetof(CPUX86State,mmx_t0);
3463
            }
3464
            sse_op2 = sse_op_table2[((b - 1) & 3) * 8 + (((modrm >> 3)) & 7)][b1];
3465
            if (!sse_op2)
3466
                goto illegal_op;
3467
            if (is_xmm) {
3468
                rm = (modrm & 7) | REX_B(s);
3469
                op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3470
            } else {
3471
                rm = (modrm & 7);
3472
                op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3473
            }
3474
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3475
            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3476
            ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
3477
            break;
3478
        case 0x050: /* movmskps */
3479
            rm = (modrm & 7) | REX_B(s);
3480
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, 
3481
                             offsetof(CPUX86State,xmm_regs[rm]));
3482
            gen_helper_movmskps(cpu_tmp2_i32, cpu_ptr0);
3483
            tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3484
            gen_op_mov_reg_T0(OT_LONG, reg);
3485
            break;
3486
        case 0x150: /* movmskpd */
3487
            rm = (modrm & 7) | REX_B(s);
3488
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, 
3489
                             offsetof(CPUX86State,xmm_regs[rm]));
3490
            gen_helper_movmskpd(cpu_tmp2_i32, cpu_ptr0);
3491
            tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3492
            gen_op_mov_reg_T0(OT_LONG, reg);
3493
            break;
3494
        case 0x02a: /* cvtpi2ps */
3495
        case 0x12a: /* cvtpi2pd */
3496
            gen_helper_enter_mmx();
3497
            if (mod != 3) {
3498
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3499
                op2_offset = offsetof(CPUX86State,mmx_t0);
3500
                gen_ldq_env_A0(s->mem_index, op2_offset);
3501
            } else {
3502
                rm = (modrm & 7);
3503
                op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3504
            }
3505
            op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3506
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3507
            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3508
            switch(b >> 8) {
3509
            case 0x0:
3510
                gen_helper_cvtpi2ps(cpu_ptr0, cpu_ptr1);
3511
                break;
3512
            default:
3513
            case 0x1:
3514
                gen_helper_cvtpi2pd(cpu_ptr0, cpu_ptr1);
3515
                break;
3516
            }
3517
            break;
3518
        case 0x22a: /* cvtsi2ss */
3519
        case 0x32a: /* cvtsi2sd */
3520
            ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3521
            gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
3522
            op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3523
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3524
            sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2)];
3525
            if (ot == OT_LONG) {
3526
                tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3527
                ((void (*)(TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_tmp2_i32);
3528
            } else {
3529
                ((void (*)(TCGv_ptr, TCGv))sse_op2)(cpu_ptr0, cpu_T[0]);
3530
            }
3531
            break;
3532
        case 0x02c: /* cvttps2pi */
3533
        case 0x12c: /* cvttpd2pi */
3534
        case 0x02d: /* cvtps2pi */
3535
        case 0x12d: /* cvtpd2pi */
3536
            gen_helper_enter_mmx();
3537
            if (mod != 3) {
3538
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3539
                op2_offset = offsetof(CPUX86State,xmm_t0);
3540
                gen_ldo_env_A0(s->mem_index, op2_offset);
3541
            } else {
3542
                rm = (modrm & 7) | REX_B(s);
3543
                op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3544
            }
3545
            op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3546
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3547
            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3548
            switch(b) {
3549
            case 0x02c:
3550
                gen_helper_cvttps2pi(cpu_ptr0, cpu_ptr1);
3551
                break;
3552
            case 0x12c:
3553
                gen_helper_cvttpd2pi(cpu_ptr0, cpu_ptr1);
3554
                break;
3555
            case 0x02d:
3556
                gen_helper_cvtps2pi(cpu_ptr0, cpu_ptr1);
3557
                break;
3558
            case 0x12d:
3559
                gen_helper_cvtpd2pi(cpu_ptr0, cpu_ptr1);
3560
                break;
3561
            }
3562
            break;
3563
        case 0x22c: /* cvttss2si */
3564
        case 0x32c: /* cvttsd2si */
3565
        case 0x22d: /* cvtss2si */
3566
        case 0x32d: /* cvtsd2si */
3567
            ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3568
            if (mod != 3) {
3569
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3570
                if ((b >> 8) & 1) {
3571
                    gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0)));
3572
                } else {
3573
                    gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3574
                    tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3575
                }
3576
                op2_offset = offsetof(CPUX86State,xmm_t0);
3577
            } else {
3578
                rm = (modrm & 7) | REX_B(s);
3579
                op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3580
            }
3581
            sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2) + 4 +
3582
                                    (b & 1) * 4];
3583
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3584
            if (ot == OT_LONG) {
3585
                ((void (*)(TCGv_i32, TCGv_ptr))sse_op2)(cpu_tmp2_i32, cpu_ptr0);
3586
                tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3587
            } else {
3588
                ((void (*)(TCGv, TCGv_ptr))sse_op2)(cpu_T[0], cpu_ptr0);
3589
            }
3590
            gen_op_mov_reg_T0(ot, reg);
3591
            break;
3592
        case 0xc4: /* pinsrw */
3593
        case 0x1c4:
3594
            s->rip_offset = 1;
3595
            gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
3596
            val = ldub_code(s->pc++);
3597
            if (b1) {
3598
                val &= 7;
3599
                tcg_gen_st16_tl(cpu_T[0], cpu_env,
3600
                                offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
3601
            } else {
3602
                val &= 3;
3603
                tcg_gen_st16_tl(cpu_T[0], cpu_env,
3604
                                offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3605
            }
3606
            break;
3607
        case 0xc5: /* pextrw */
3608
        case 0x1c5:
3609
            if (mod != 3)
3610
                goto illegal_op;
3611
            ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3612
            val = ldub_code(s->pc++);
3613
            if (b1) {
3614
                val &= 7;
3615
                rm = (modrm & 7) | REX_B(s);
3616
                tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3617
                                 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
3618
            } else {
3619
                val &= 3;
3620
                rm = (modrm & 7);
3621
                tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3622
                                offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3623
            }
3624
            reg = ((modrm >> 3) & 7) | rex_r;
3625
            gen_op_mov_reg_T0(ot, reg);
3626
            break;
3627
        case 0x1d6: /* movq ea, xmm */
3628
            if (mod != 3) {
3629
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3630
                gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3631
            } else {
3632
                rm = (modrm & 7) | REX_B(s);
3633
                gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3634
                            offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3635
                gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3636
            }
3637
            break;
3638
        case 0x2d6: /* movq2dq */
3639
            gen_helper_enter_mmx();
3640
            rm = (modrm & 7);
3641
            gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3642
                        offsetof(CPUX86State,fpregs[rm].mmx));
3643
            gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3644
            break;
3645
        case 0x3d6: /* movdq2q */
3646
            gen_helper_enter_mmx();
3647
            rm = (modrm & 7) | REX_B(s);
3648
            gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3649
                        offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3650
            break;
3651
        case 0xd7: /* pmovmskb */
3652
        case 0x1d7:
3653
            if (mod != 3)
3654
                goto illegal_op;
3655
            if (b1) {
3656
                rm = (modrm & 7) | REX_B(s);
3657
                tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3658
                gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_ptr0);
3659
            } else {
3660
                rm = (modrm & 7);
3661
                tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3662
                gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_ptr0);
3663
            }
3664
            tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3665
            reg = ((modrm >> 3) & 7) | rex_r;
3666
            gen_op_mov_reg_T0(OT_LONG, reg);
3667
            break;
3668
        case 0x138:
3669
            if (s->prefix & PREFIX_REPNZ)
3670
                goto crc32;
3671
        case 0x038:
3672
            b = modrm;
3673
            modrm = ldub_code(s->pc++);
3674
            rm = modrm & 7;
3675
            reg = ((modrm >> 3) & 7) | rex_r;
3676
            mod = (modrm >> 6) & 3;
3677
            if (b1 >= 2) {
3678
                goto illegal_op;
3679
            }
3680

    
3681
            sse_op2 = sse_op_table6[b].op[b1];
3682
            if (!sse_op2)
3683
                goto illegal_op;
3684
            if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3685
                goto illegal_op;
3686

    
3687
            if (b1) {
3688
                op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3689
                if (mod == 3) {
3690
                    op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3691
                } else {
3692
                    op2_offset = offsetof(CPUX86State,xmm_t0);
3693
                    gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3694
                    switch (b) {
3695
                    case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3696
                    case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3697
                    case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3698
                        gen_ldq_env_A0(s->mem_index, op2_offset +
3699
                                        offsetof(XMMReg, XMM_Q(0)));
3700
                        break;
3701
                    case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3702
                    case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3703
                        tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3704
                                          (s->mem_index >> 2) - 1);
3705
                        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3706
                        tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3707
                                        offsetof(XMMReg, XMM_L(0)));
3708
                        break;
3709
                    case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3710
                        tcg_gen_qemu_ld16u(cpu_tmp0, cpu_A0,
3711
                                          (s->mem_index >> 2) - 1);
3712
                        tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3713
                                        offsetof(XMMReg, XMM_W(0)));
3714
                        break;
3715
                    case 0x2a:            /* movntqda */
3716
                        gen_ldo_env_A0(s->mem_index, op1_offset);
3717
                        return;
3718
                    default:
3719
                        gen_ldo_env_A0(s->mem_index, op2_offset);
3720
                    }
3721
                }
3722
            } else {
3723
                op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3724
                if (mod == 3) {
3725
                    op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3726
                } else {
3727
                    op2_offset = offsetof(CPUX86State,mmx_t0);
3728
                    gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3729
                    gen_ldq_env_A0(s->mem_index, op2_offset);
3730
                }
3731
            }
3732
            if (sse_op2 == SSE_SPECIAL)
3733
                goto illegal_op;
3734

    
3735
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3736
            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3737
            ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
3738

    
3739
            if (b == 0x17)
3740
                s->cc_op = CC_OP_EFLAGS;
3741
            break;
3742
        case 0x338: /* crc32 */
3743
        crc32:
3744
            b = modrm;
3745
            modrm = ldub_code(s->pc++);
3746
            reg = ((modrm >> 3) & 7) | rex_r;
3747

    
3748
            if (b != 0xf0 && b != 0xf1)
3749
                goto illegal_op;
3750
            if (!(s->cpuid_ext_features & CPUID_EXT_SSE42))
3751
                goto illegal_op;
3752

    
3753
            if (b == 0xf0)
3754
                ot = OT_BYTE;
3755
            else if (b == 0xf1 && s->dflag != 2)
3756
                if (s->prefix & PREFIX_DATA)
3757
                    ot = OT_WORD;
3758
                else
3759
                    ot = OT_LONG;
3760
            else
3761
                ot = OT_QUAD;
3762

    
3763
            gen_op_mov_TN_reg(OT_LONG, 0, reg);
3764
            tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3765
            gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
3766
            gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3767
                             cpu_T[0], tcg_const_i32(8 << ot));
3768

    
3769
            ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3770
            gen_op_mov_reg_T0(ot, reg);
3771
            break;
3772
        case 0x03a:
3773
        case 0x13a:
3774
            b = modrm;
3775
            modrm = ldub_code(s->pc++);
3776
            rm = modrm & 7;
3777
            reg = ((modrm >> 3) & 7) | rex_r;
3778
            mod = (modrm >> 6) & 3;
3779
            if (b1 >= 2) {
3780
                goto illegal_op;
3781
            }
3782

    
3783
            sse_op2 = sse_op_table7[b].op[b1];
3784
            if (!sse_op2)
3785
                goto illegal_op;
3786
            if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
3787
                goto illegal_op;
3788

    
3789
            if (sse_op2 == SSE_SPECIAL) {
3790
                ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3791
                rm = (modrm & 7) | REX_B(s);
3792
                if (mod != 3)
3793
                    gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3794
                reg = ((modrm >> 3) & 7) | rex_r;
3795
                val = ldub_code(s->pc++);
3796
                switch (b) {
3797
                case 0x14: /* pextrb */
3798
                    tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3799
                                            xmm_regs[reg].XMM_B(val & 15)));
3800
                    if (mod == 3)
3801
                        gen_op_mov_reg_T0(ot, rm);
3802
                    else
3803
                        tcg_gen_qemu_st8(cpu_T[0], cpu_A0,
3804
                                        (s->mem_index >> 2) - 1);
3805
                    break;
3806
                case 0x15: /* pextrw */
3807
                    tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3808
                                            xmm_regs[reg].XMM_W(val & 7)));
3809
                    if (mod == 3)
3810
                        gen_op_mov_reg_T0(ot, rm);
3811
                    else
3812
                        tcg_gen_qemu_st16(cpu_T[0], cpu_A0,
3813
                                        (s->mem_index >> 2) - 1);
3814
                    break;
3815
                case 0x16:
3816
                    if (ot == OT_LONG) { /* pextrd */
3817
                        tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
3818
                                        offsetof(CPUX86State,
3819
                                                xmm_regs[reg].XMM_L(val & 3)));
3820
                        tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3821
                        if (mod == 3)
3822
                            gen_op_mov_reg_v(ot, rm, cpu_T[0]);
3823
                        else
3824
                            tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
3825
                                            (s->mem_index >> 2) - 1);
3826
                    } else { /* pextrq */
3827
#ifdef TARGET_X86_64
3828
                        tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3829
                                        offsetof(CPUX86State,
3830
                                                xmm_regs[reg].XMM_Q(val & 1)));
3831
                        if (mod == 3)
3832
                            gen_op_mov_reg_v(ot, rm, cpu_tmp1_i64);
3833
                        else
3834
                            tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
3835
                                            (s->mem_index >> 2) - 1);
3836
#else
3837
                        goto illegal_op;
3838
#endif
3839
                    }
3840
                    break;
3841
                case 0x17: /* extractps */
3842
                    tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3843
                                            xmm_regs[reg].XMM_L(val & 3)));
3844
                    if (mod == 3)
3845
                        gen_op_mov_reg_T0(ot, rm);
3846
                    else
3847
                        tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
3848
                                        (s->mem_index >> 2) - 1);
3849
                    break;
3850
                case 0x20: /* pinsrb */
3851
                    if (mod == 3)
3852
                        gen_op_mov_TN_reg(OT_LONG, 0, rm);
3853
                    else
3854
                        tcg_gen_qemu_ld8u(cpu_tmp0, cpu_A0,
3855
                                        (s->mem_index >> 2) - 1);
3856
                    tcg_gen_st8_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State,
3857
                                            xmm_regs[reg].XMM_B(val & 15)));
3858
                    break;
3859
                case 0x21: /* insertps */
3860
                    if (mod == 3) {
3861
                        tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
3862
                                        offsetof(CPUX86State,xmm_regs[rm]
3863
                                                .XMM_L((val >> 6) & 3)));
3864
                    } else {
3865
                        tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3866
                                        (s->mem_index >> 2) - 1);
3867
                        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3868
                    }
3869
                    tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
3870
                                    offsetof(CPUX86State,xmm_regs[reg]
3871
                                            .XMM_L((val >> 4) & 3)));
3872
                    if ((val >> 0) & 1)
3873
                        tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3874
                                        cpu_env, offsetof(CPUX86State,
3875
                                                xmm_regs[reg].XMM_L(0)));
3876
                    if ((val >> 1) & 1)
3877
                        tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3878
                                        cpu_env, offsetof(CPUX86State,
3879
                                                xmm_regs[reg].XMM_L(1)));
3880
                    if ((val >> 2) & 1)
3881
                        tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3882
                                        cpu_env, offsetof(CPUX86State,
3883
                                                xmm_regs[reg].XMM_L(2)));
3884
                    if ((val >> 3) & 1)
3885
                        tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3886
                                        cpu_env, offsetof(CPUX86State,
3887
                                                xmm_regs[reg].XMM_L(3)));
3888
                    break;
3889
                case 0x22:
3890
                    if (ot == OT_LONG) { /* pinsrd */
3891
                        if (mod == 3)
3892
                            gen_op_mov_v_reg(ot, cpu_tmp0, rm);
3893
                        else
3894
                            tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3895
                                            (s->mem_index >> 2) - 1);
3896
                        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3897
                        tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
3898
                                        offsetof(CPUX86State,
3899
                                                xmm_regs[reg].XMM_L(val & 3)));
3900
                    } else { /* pinsrq */
3901
#ifdef TARGET_X86_64
3902
                        if (mod == 3)
3903
                            gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
3904
                        else
3905
                            tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
3906
                                            (s->mem_index >> 2) - 1);
3907
                        tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3908
                                        offsetof(CPUX86State,
3909
                                                xmm_regs[reg].XMM_Q(val & 1)));
3910
#else
3911
                        goto illegal_op;
3912
#endif
3913
                    }
3914
                    break;
3915
                }
3916
                return;
3917
            }
3918

    
3919
            if (b1) {
3920
                op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3921
                if (mod == 3) {
3922
                    op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3923
                } else {
3924
                    op2_offset = offsetof(CPUX86State,xmm_t0);
3925
                    gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3926
                    gen_ldo_env_A0(s->mem_index, op2_offset);
3927
                }
3928
            } else {
3929
                op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3930
                if (mod == 3) {
3931
                    op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3932
                } else {
3933
                    op2_offset = offsetof(CPUX86State,mmx_t0);
3934
                    gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3935
                    gen_ldq_env_A0(s->mem_index, op2_offset);
3936
                }
3937
            }
3938
            val = ldub_code(s->pc++);
3939

    
3940
            if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
3941
                s->cc_op = CC_OP_EFLAGS;
3942

    
3943
                if (s->dflag == 2)
3944
                    /* The helper must use entire 64-bit gp registers */
3945
                    val |= 1 << 8;
3946
            }
3947

    
3948
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3949
            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3950
            ((void (*)(TCGv_ptr, TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
3951
            break;
3952
        default:
3953
            goto illegal_op;
3954
        }
3955
    } else {
3956
        /* generic MMX or SSE operation */
3957
        switch(b) {
3958
        case 0x70: /* pshufx insn */
3959
        case 0xc6: /* pshufx insn */
3960
        case 0xc2: /* compare insns */
3961
            s->rip_offset = 1;
3962
            break;
3963
        default:
3964
            break;
3965
        }
3966
        if (is_xmm) {
3967
            op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3968
            if (mod != 3) {
3969
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3970
                op2_offset = offsetof(CPUX86State,xmm_t0);
3971
                if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
3972
                                b == 0xc2)) {
3973
                    /* specific case for SSE single instructions */
3974
                    if (b1 == 2) {
3975
                        /* 32 bit access */
3976
                        gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3977
                        tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3978
                    } else {
3979
                        /* 64 bit access */
3980
                        gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0)));
3981
                    }
3982
                } else {
3983
                    gen_ldo_env_A0(s->mem_index, op2_offset);
3984
                }
3985
            } else {
3986
                rm = (modrm & 7) | REX_B(s);
3987
                op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3988
            }
3989
        } else {
3990
            op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3991
            if (mod != 3) {
3992
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3993
                op2_offset = offsetof(CPUX86State,mmx_t0);
3994
                gen_ldq_env_A0(s->mem_index, op2_offset);
3995
            } else {
3996
                rm = (modrm & 7);
3997
                op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3998
            }
3999
        }
4000
        switch(b) {
4001
        case 0x0f: /* 3DNow! data insns */
4002
            if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4003
                goto illegal_op;
4004
            val = ldub_code(s->pc++);
4005
            sse_op2 = sse_op_table5[val];
4006
            if (!sse_op2)
4007
                goto illegal_op;
4008
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4009
            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4010
            ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
4011
            break;
4012
        case 0x70: /* pshufx insn */
4013
        case 0xc6: /* pshufx insn */
4014
            val = ldub_code(s->pc++);
4015
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4016
            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4017
            ((void (*)(TCGv_ptr, TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4018
            break;
4019
        case 0xc2:
4020
            /* compare insns */
4021
            val = ldub_code(s->pc++);
4022
            if (val >= 8)
4023
                goto illegal_op;
4024
            sse_op2 = sse_op_table4[val][b1];
4025
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4026
            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4027
            ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
4028
            break;
4029
        case 0xf7:
4030
            /* maskmov : we must prepare A0 */
4031
            if (mod != 3)
4032
                goto illegal_op;
4033
#ifdef TARGET_X86_64
4034
            if (s->aflag == 2) {
4035
                gen_op_movq_A0_reg(R_EDI);
4036
            } else
4037
#endif
4038
            {
4039
                gen_op_movl_A0_reg(R_EDI);
4040
                if (s->aflag == 0)
4041
                    gen_op_andl_A0_ffff();
4042
            }
4043
            gen_add_A0_ds_seg(s);
4044

    
4045
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4046
            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4047
            ((void (*)(TCGv_ptr, TCGv_ptr, TCGv))sse_op2)(cpu_ptr0, cpu_ptr1, cpu_A0);
4048
            break;
4049
        default:
4050
            tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4051
            tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4052
            ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
4053
            break;
4054
        }
4055
        if (b == 0x2e || b == 0x2f) {
4056
            s->cc_op = CC_OP_EFLAGS;
4057
        }
4058
    }
4059
}
4060

    
4061
/* convert one instruction. s->is_jmp is set if the translation must
4062
   be stopped. Return the next pc value */
4063
static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
4064
{
4065
    int b, prefixes, aflag, dflag;
4066
    int shift, ot;
4067
    int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val;
4068
    target_ulong next_eip, tval;
4069
    int rex_w, rex_r;
4070

    
4071
    if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
4072
        tcg_gen_debug_insn_start(pc_start);
4073
    s->pc = pc_start;
4074
    prefixes = 0;
4075
    aflag = s->code32;
4076
    dflag = s->code32;
4077
    s->override = -1;
4078
    rex_w = -1;
4079
    rex_r = 0;
4080
#ifdef TARGET_X86_64
4081
    s->rex_x = 0;
4082
    s->rex_b = 0;
4083
    x86_64_hregs = 0;
4084
#endif
4085
    s->rip_offset = 0; /* for relative ip address */
4086
 next_byte:
4087
    b = ldub_code(s->pc);
4088
    s->pc++;
4089
    /* check prefixes */
4090
#ifdef TARGET_X86_64
4091
    if (CODE64(s)) {
4092
        switch (b) {
4093
        case 0xf3:
4094
            prefixes |= PREFIX_REPZ;
4095
            goto next_byte;
4096
        case 0xf2:
4097
            prefixes |= PREFIX_REPNZ;
4098
            goto next_byte;
4099
        case 0xf0:
4100
            prefixes |= PREFIX_LOCK;
4101
            goto next_byte;
4102
        case 0x2e:
4103
            s->override = R_CS;
4104
            goto next_byte;
4105
        case 0x36:
4106
            s->override = R_SS;
4107
            goto next_byte;
4108
        case 0x3e:
4109
            s->override = R_DS;
4110
            goto next_byte;
4111
        case 0x26:
4112
            s->override = R_ES;
4113
            goto next_byte;
4114
        case 0x64:
4115
            s->override = R_FS;
4116
            goto next_byte;
4117
        case 0x65:
4118
            s->override = R_GS;
4119
            goto next_byte;
4120
        case 0x66:
4121
            prefixes |= PREFIX_DATA;
4122
            goto next_byte;
4123
        case 0x67:
4124
            prefixes |= PREFIX_ADR;
4125
            goto next_byte;
4126
        case 0x40 ... 0x4f:
4127
            /* REX prefix */
4128
            rex_w = (b >> 3) & 1;
4129
            rex_r = (b & 0x4) << 1;
4130
            s->rex_x = (b & 0x2) << 2;
4131
            REX_B(s) = (b & 0x1) << 3;
4132
            x86_64_hregs = 1; /* select uniform byte register addressing */
4133
            goto next_byte;
4134
        }
4135
        if (rex_w == 1) {
4136
            /* 0x66 is ignored if rex.w is set */
4137
            dflag = 2;
4138
        } else {
4139
            if (prefixes & PREFIX_DATA)
4140
                dflag ^= 1;
4141
        }
4142
        if (!(prefixes & PREFIX_ADR))
4143
            aflag = 2;
4144
    } else
4145
#endif
4146
    {
4147
        switch (b) {
4148
        case 0xf3:
4149
            prefixes |= PREFIX_REPZ;
4150
            goto next_byte;
4151
        case 0xf2:
4152
            prefixes |= PREFIX_REPNZ;
4153
            goto next_byte;
4154
        case 0xf0:
4155
            prefixes |= PREFIX_LOCK;
4156
            goto next_byte;
4157
        case 0x2e:
4158
            s->override = R_CS;
4159
            goto next_byte;
4160
        case 0x36:
4161
            s->override = R_SS;
4162
            goto next_byte;
4163
        case 0x3e:
4164
            s->override = R_DS;
4165
            goto next_byte;
4166
        case 0x26:
4167
            s->override = R_ES;
4168
            goto next_byte;
4169
        case 0x64:
4170
            s->override = R_FS;
4171
            goto next_byte;
4172
        case 0x65:
4173
            s->override = R_GS;
4174
            goto next_byte;
4175
        case 0x66:
4176
            prefixes |= PREFIX_DATA;
4177
            goto next_byte;
4178
        case 0x67:
4179
            prefixes |= PREFIX_ADR;
4180
            goto next_byte;
4181
        }
4182
        if (prefixes & PREFIX_DATA)
4183
            dflag ^= 1;
4184
        if (prefixes & PREFIX_ADR)
4185
            aflag ^= 1;
4186
    }
4187

    
4188
    s->prefix = prefixes;
4189
    s->aflag = aflag;
4190
    s->dflag = dflag;
4191

    
4192
    /* lock generation */
4193
    if (prefixes & PREFIX_LOCK)
4194
        gen_helper_lock();
4195

    
4196
    /* now check op code */
4197
 reswitch:
4198
    switch(b) {
4199
    case 0x0f:
4200
        /**************************/
4201
        /* extended op code */
4202
        b = ldub_code(s->pc++) | 0x100;
4203
        goto reswitch;
4204

    
4205
        /**************************/
4206
        /* arith & logic */
4207
    case 0x00 ... 0x05:
4208
    case 0x08 ... 0x0d:
4209
    case 0x10 ... 0x15:
4210
    case 0x18 ... 0x1d:
4211
    case 0x20 ... 0x25:
4212
    case 0x28 ... 0x2d:
4213
    case 0x30 ... 0x35:
4214
    case 0x38 ... 0x3d:
4215
        {
4216
            int op, f, val;
4217
            op = (b >> 3) & 7;
4218
            f = (b >> 1) & 3;
4219

    
4220
            if ((b & 1) == 0)
4221
                ot = OT_BYTE;
4222
            else
4223
                ot = dflag + OT_WORD;
4224

    
4225
            switch(f) {
4226
            case 0: /* OP Ev, Gv */
4227
                modrm = ldub_code(s->pc++);
4228
                reg = ((modrm >> 3) & 7) | rex_r;
4229
                mod = (modrm >> 6) & 3;
4230
                rm = (modrm & 7) | REX_B(s);
4231
                if (mod != 3) {
4232
                    gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4233
                    opreg = OR_TMP0;
4234
                } else if (op == OP_XORL && rm == reg) {
4235
                xor_zero:
4236
                    /* xor reg, reg optimisation */
4237
                    gen_op_movl_T0_0();
4238
                    s->cc_op = CC_OP_LOGICB + ot;
4239
                    gen_op_mov_reg_T0(ot, reg);
4240
                    gen_op_update1_cc();
4241
                    break;
4242
                } else {
4243
                    opreg = rm;
4244
                }
4245
                gen_op_mov_TN_reg(ot, 1, reg);
4246
                gen_op(s, op, ot, opreg);
4247
                break;
4248
            case 1: /* OP Gv, Ev */
4249
                modrm = ldub_code(s->pc++);
4250
                mod = (modrm >> 6) & 3;
4251
                reg = ((modrm >> 3) & 7) | rex_r;
4252
                rm = (modrm & 7) | REX_B(s);
4253
                if (mod != 3) {
4254
                    gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4255
                    gen_op_ld_T1_A0(ot + s->mem_index);
4256
                } else if (op == OP_XORL && rm == reg) {
4257
                    goto xor_zero;
4258
                } else {
4259
                    gen_op_mov_TN_reg(ot, 1, rm);
4260
                }
4261
                gen_op(s, op, ot, reg);
4262
                break;
4263
            case 2: /* OP A, Iv */
4264
                val = insn_get(s, ot);
4265
                gen_op_movl_T1_im(val);
4266
                gen_op(s, op, ot, OR_EAX);
4267
                break;
4268
            }
4269
        }
4270
        break;
4271

    
4272
    case 0x82:
4273
        if (CODE64(s))
4274
            goto illegal_op;
4275
    case 0x80: /* GRP1 */
4276
    case 0x81:
4277
    case 0x83:
4278
        {
4279
            int val;
4280

    
4281
            if ((b & 1) == 0)
4282
                ot = OT_BYTE;
4283
            else
4284
                ot = dflag + OT_WORD;
4285

    
4286
            modrm = ldub_code(s->pc++);
4287
            mod = (modrm >> 6) & 3;
4288
            rm = (modrm & 7) | REX_B(s);
4289
            op = (modrm >> 3) & 7;
4290

    
4291
            if (mod != 3) {
4292
                if (b == 0x83)
4293
                    s->rip_offset = 1;
4294
                else
4295
                    s->rip_offset = insn_const_size(ot);
4296
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4297
                opreg = OR_TMP0;
4298
            } else {
4299
                opreg = rm;
4300
            }
4301

    
4302
            switch(b) {
4303
            default:
4304
            case 0x80:
4305
            case 0x81:
4306
            case 0x82:
4307
                val = insn_get(s, ot);
4308
                break;
4309
            case 0x83:
4310
                val = (int8_t)insn_get(s, OT_BYTE);
4311
                break;
4312
            }
4313
            gen_op_movl_T1_im(val);
4314
            gen_op(s, op, ot, opreg);
4315
        }
4316
        break;
4317

    
4318
        /**************************/
4319
        /* inc, dec, and other misc arith */
4320
    case 0x40 ... 0x47: /* inc Gv */
4321
        ot = dflag ? OT_LONG : OT_WORD;
4322
        gen_inc(s, ot, OR_EAX + (b & 7), 1);
4323
        break;
4324
    case 0x48 ... 0x4f: /* dec Gv */
4325
        ot = dflag ? OT_LONG : OT_WORD;
4326
        gen_inc(s, ot, OR_EAX + (b & 7), -1);
4327
        break;
4328
    case 0xf6: /* GRP3 */
4329
    case 0xf7:
4330
        if ((b & 1) == 0)
4331
            ot = OT_BYTE;
4332
        else
4333
            ot = dflag + OT_WORD;
4334

    
4335
        modrm = ldub_code(s->pc++);
4336
        mod = (modrm >> 6) & 3;
4337
        rm = (modrm & 7) | REX_B(s);
4338
        op = (modrm >> 3) & 7;
4339
        if (mod != 3) {
4340
            if (op == 0)
4341
                s->rip_offset = insn_const_size(ot);
4342
            gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4343
            gen_op_ld_T0_A0(ot + s->mem_index);
4344
        } else {
4345
            gen_op_mov_TN_reg(ot, 0, rm);
4346
        }
4347

    
4348
        switch(op) {
4349
        case 0: /* test */
4350
            val = insn_get(s, ot);
4351
            gen_op_movl_T1_im(val);
4352
            gen_op_testl_T0_T1_cc();
4353
            s->cc_op = CC_OP_LOGICB + ot;
4354
            break;
4355
        case 2: /* not */
4356
            tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
4357
            if (mod != 3) {
4358
                gen_op_st_T0_A0(ot + s->mem_index);
4359
            } else {
4360
                gen_op_mov_reg_T0(ot, rm);
4361
            }
4362
            break;
4363
        case 3: /* neg */
4364
            tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4365
            if (mod != 3) {
4366
                gen_op_st_T0_A0(ot + s->mem_index);
4367
            } else {
4368
                gen_op_mov_reg_T0(ot, rm);
4369
            }
4370
            gen_op_update_neg_cc();
4371
            s->cc_op = CC_OP_SUBB + ot;
4372
            break;
4373
        case 4: /* mul */
4374
            switch(ot) {
4375
            case OT_BYTE:
4376
                gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4377
                tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4378
                tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4379
                /* XXX: use 32 bit mul which could be faster */
4380
                tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4381
                gen_op_mov_reg_T0(OT_WORD, R_EAX);
4382
                tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4383
                tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
4384
                s->cc_op = CC_OP_MULB;
4385
                break;
4386
            case OT_WORD:
4387
                gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4388
                tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4389
                tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4390
                /* XXX: use 32 bit mul which could be faster */
4391
                tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4392
                gen_op_mov_reg_T0(OT_WORD, R_EAX);
4393
                tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4394
                tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4395
                gen_op_mov_reg_T0(OT_WORD, R_EDX);
4396
                tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4397
                s->cc_op = CC_OP_MULW;
4398
                break;
4399
            default:
4400
            case OT_LONG:
4401
#ifdef TARGET_X86_64
4402
                gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4403
                tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
4404
                tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]);
4405
                tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4406
                gen_op_mov_reg_T0(OT_LONG, R_EAX);
4407
                tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4408
                tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4409
                gen_op_mov_reg_T0(OT_LONG, R_EDX);
4410
                tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4411
#else
4412
                {
4413
                    TCGv_i64 t0, t1;
4414
                    t0 = tcg_temp_new_i64();
4415
                    t1 = tcg_temp_new_i64();
4416
                    gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4417
                    tcg_gen_extu_i32_i64(t0, cpu_T[0]);
4418
                    tcg_gen_extu_i32_i64(t1, cpu_T[1]);
4419
                    tcg_gen_mul_i64(t0, t0, t1);
4420
                    tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4421
                    gen_op_mov_reg_T0(OT_LONG, R_EAX);
4422
                    tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4423
                    tcg_gen_shri_i64(t0, t0, 32);
4424
                    tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4425
                    gen_op_mov_reg_T0(OT_LONG, R_EDX);
4426
                    tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4427
                }
4428
#endif
4429
                s->cc_op = CC_OP_MULL;
4430
                break;
4431
#ifdef TARGET_X86_64
4432
            case OT_QUAD:
4433
                gen_helper_mulq_EAX_T0(cpu_T[0]);
4434
                s->cc_op = CC_OP_MULQ;
4435
                break;
4436
#endif
4437
            }
4438
            break;
4439
        case 5: /* imul */
4440
            switch(ot) {
4441
            case OT_BYTE:
4442
                gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4443
                tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4444
                tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4445
                /* XXX: use 32 bit mul which could be faster */
4446
                tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4447
                gen_op_mov_reg_T0(OT_WORD, R_EAX);
4448
                tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4449
                tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4450
                tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4451
                s->cc_op = CC_OP_MULB;
4452
                break;
4453
            case OT_WORD:
4454
                gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4455
                tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4456
                tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4457
                /* XXX: use 32 bit mul which could be faster */
4458
                tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4459
                gen_op_mov_reg_T0(OT_WORD, R_EAX);
4460
                tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4461
                tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4462
                tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4463
                tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4464
                gen_op_mov_reg_T0(OT_WORD, R_EDX);
4465
                s->cc_op = CC_OP_MULW;
4466
                break;
4467
            default:
4468
            case OT_LONG:
4469
#ifdef TARGET_X86_64
4470
                gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4471
                tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4472
                tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4473
                tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4474
                gen_op_mov_reg_T0(OT_LONG, R_EAX);
4475
                tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4476
                tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4477
                tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4478
                tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4479
                gen_op_mov_reg_T0(OT_LONG, R_EDX);
4480
#else
4481
                {
4482
                    TCGv_i64 t0, t1;
4483
                    t0 = tcg_temp_new_i64();
4484
                    t1 = tcg_temp_new_i64();
4485
                    gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4486
                    tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4487
                    tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4488
                    tcg_gen_mul_i64(t0, t0, t1);
4489
                    tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4490
                    gen_op_mov_reg_T0(OT_LONG, R_EAX);
4491
                    tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4492
                    tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4493
                    tcg_gen_shri_i64(t0, t0, 32);
4494
                    tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4495
                    gen_op_mov_reg_T0(OT_LONG, R_EDX);
4496
                    tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4497
                }
4498
#endif
4499
                s->cc_op = CC_OP_MULL;
4500
                break;
4501
#ifdef TARGET_X86_64
4502
            case OT_QUAD:
4503
                gen_helper_imulq_EAX_T0(cpu_T[0]);
4504
                s->cc_op = CC_OP_MULQ;
4505
                break;
4506
#endif
4507
            }
4508
            break;
4509
        case 6: /* div */
4510
            switch(ot) {
4511
            case OT_BYTE:
4512
                gen_jmp_im(pc_start - s->cs_base);
4513
                gen_helper_divb_AL(cpu_T[0]);
4514
                break;
4515
            case OT_WORD:
4516
                gen_jmp_im(pc_start - s->cs_base);
4517
                gen_helper_divw_AX(cpu_T[0]);
4518
                break;
4519
            default:
4520
            case OT_LONG:
4521
                gen_jmp_im(pc_start - s->cs_base);
4522
                gen_helper_divl_EAX(cpu_T[0]);
4523
                break;
4524
#ifdef TARGET_X86_64
4525
            case OT_QUAD:
4526
                gen_jmp_im(pc_start - s->cs_base);
4527
                gen_helper_divq_EAX(cpu_T[0]);
4528
                break;
4529
#endif
4530
            }
4531
            break;
4532
        case 7: /* idiv */
4533
            switch(ot) {
4534
            case OT_BYTE:
4535
                gen_jmp_im(pc_start - s->cs_base);
4536
                gen_helper_idivb_AL(cpu_T[0]);
4537
                break;
4538
            case OT_WORD:
4539
                gen_jmp_im(pc_start - s->cs_base);
4540
                gen_helper_idivw_AX(cpu_T[0]);
4541
                break;
4542
            default:
4543
            case OT_LONG:
4544
                gen_jmp_im(pc_start - s->cs_base);
4545
                gen_helper_idivl_EAX(cpu_T[0]);
4546
                break;
4547
#ifdef TARGET_X86_64
4548
            case OT_QUAD:
4549
                gen_jmp_im(pc_start - s->cs_base);
4550
                gen_helper_idivq_EAX(cpu_T[0]);
4551
                break;
4552
#endif
4553
            }
4554
            break;
4555
        default:
4556
            goto illegal_op;
4557
        }
4558
        break;
4559

    
4560
    case 0xfe: /* GRP4 */
4561
    case 0xff: /* GRP5 */
4562
        if ((b & 1) == 0)
4563
            ot = OT_BYTE;
4564
        else
4565
            ot = dflag + OT_WORD;
4566

    
4567
        modrm = ldub_code(s->pc++);
4568
        mod = (modrm >> 6) & 3;
4569
        rm = (modrm & 7) | REX_B(s);
4570
        op = (modrm >> 3) & 7;
4571
        if (op >= 2 && b == 0xfe) {
4572
            goto illegal_op;
4573
        }
4574
        if (CODE64(s)) {
4575
            if (op == 2 || op == 4) {
4576
                /* operand size for jumps is 64 bit */
4577
                ot = OT_QUAD;
4578
            } else if (op == 3 || op == 5) {
4579
                ot = dflag ? OT_LONG + (rex_w == 1) : OT_WORD;
4580
            } else if (op == 6) {
4581
                /* default push size is 64 bit */
4582
                ot = dflag ? OT_QUAD : OT_WORD;
4583
            }
4584
        }
4585
        if (mod != 3) {
4586
            gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4587
            if (op >= 2 && op != 3 && op != 5)
4588
                gen_op_ld_T0_A0(ot + s->mem_index);
4589
        } else {
4590
            gen_op_mov_TN_reg(ot, 0, rm);
4591
        }
4592

    
4593
        switch(op) {
4594
        case 0: /* inc Ev */
4595
            if (mod != 3)
4596
                opreg = OR_TMP0;
4597
            else
4598
                opreg = rm;
4599
            gen_inc(s, ot, opreg, 1);
4600
            break;
4601
        case 1: /* dec Ev */
4602
            if (mod != 3)
4603
                opreg = OR_TMP0;
4604
            else
4605
                opreg = rm;
4606
            gen_inc(s, ot, opreg, -1);
4607
            break;
4608
        case 2: /* call Ev */
4609
            /* XXX: optimize if memory (no 'and' is necessary) */
4610
            if (s->dflag == 0)
4611
                gen_op_andl_T0_ffff();
4612
            next_eip = s->pc - s->cs_base;
4613
            gen_movtl_T1_im(next_eip);
4614
            gen_push_T1(s);
4615
            gen_op_jmp_T0();
4616
            gen_eob(s);
4617
            break;
4618
        case 3: /* lcall Ev */
4619
            gen_op_ld_T1_A0(ot + s->mem_index);
4620
            gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4621
            gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4622
        do_lcall:
4623
            if (s->pe && !s->vm86) {
4624
                if (s->cc_op != CC_OP_DYNAMIC)
4625
                    gen_op_set_cc_op(s->cc_op);
4626
                gen_jmp_im(pc_start - s->cs_base);
4627
                tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4628
                gen_helper_lcall_protected(cpu_tmp2_i32, cpu_T[1],
4629
                                           tcg_const_i32(dflag), 
4630
                                           tcg_const_i32(s->pc - pc_start));
4631
            } else {
4632
                tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4633
                gen_helper_lcall_real(cpu_tmp2_i32, cpu_T[1],
4634
                                      tcg_const_i32(dflag), 
4635
                                      tcg_const_i32(s->pc - s->cs_base));
4636
            }
4637
            gen_eob(s);
4638
            break;
4639
        case 4: /* jmp Ev */
4640
            if (s->dflag == 0)
4641
                gen_op_andl_T0_ffff();
4642
            gen_op_jmp_T0();
4643
            gen_eob(s);
4644
            break;
4645
        case 5: /* ljmp Ev */
4646
            gen_op_ld_T1_A0(ot + s->mem_index);
4647
            gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4648
            gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4649
        do_ljmp:
4650
            if (s->pe && !s->vm86) {
4651
                if (s->cc_op != CC_OP_DYNAMIC)
4652
                    gen_op_set_cc_op(s->cc_op);
4653
                gen_jmp_im(pc_start - s->cs_base);
4654
                tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4655
                gen_helper_ljmp_protected(cpu_tmp2_i32, cpu_T[1],
4656
                                          tcg_const_i32(s->pc - pc_start));
4657
            } else {
4658
                gen_op_movl_seg_T0_vm(R_CS);
4659
                gen_op_movl_T0_T1();
4660
                gen_op_jmp_T0();
4661
            }
4662
            gen_eob(s);
4663
            break;
4664
        case 6: /* push Ev */
4665
            gen_push_T0(s);
4666
            break;
4667
        default:
4668
            goto illegal_op;
4669
        }
4670
        break;
4671

    
4672
    case 0x84: /* test Ev, Gv */
4673
    case 0x85:
4674
        if ((b & 1) == 0)
4675
            ot = OT_BYTE;
4676
        else
4677
            ot = dflag + OT_WORD;
4678

    
4679
        modrm = ldub_code(s->pc++);
4680
        reg = ((modrm >> 3) & 7) | rex_r;
4681

    
4682
        gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
4683
        gen_op_mov_TN_reg(ot, 1, reg);
4684
        gen_op_testl_T0_T1_cc();
4685
        s->cc_op = CC_OP_LOGICB + ot;
4686
        break;
4687

    
4688
    case 0xa8: /* test eAX, Iv */
4689
    case 0xa9:
4690
        if ((b & 1) == 0)
4691
            ot = OT_BYTE;
4692
        else
4693
            ot = dflag + OT_WORD;
4694
        val = insn_get(s, ot);
4695

    
4696
        gen_op_mov_TN_reg(ot, 0, OR_EAX);
4697
        gen_op_movl_T1_im(val);
4698
        gen_op_testl_T0_T1_cc();
4699
        s->cc_op = CC_OP_LOGICB + ot;
4700
        break;
4701

    
4702
    case 0x98: /* CWDE/CBW */
4703
#ifdef TARGET_X86_64
4704
        if (dflag == 2) {
4705
            gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4706
            tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4707
            gen_op_mov_reg_T0(OT_QUAD, R_EAX);
4708
        } else
4709
#endif
4710
        if (dflag == 1) {
4711
            gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4712
            tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4713
            gen_op_mov_reg_T0(OT_LONG, R_EAX);
4714
        } else {
4715
            gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX);
4716
            tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4717
            gen_op_mov_reg_T0(OT_WORD, R_EAX);
4718
        }
4719
        break;
4720
    case 0x99: /* CDQ/CWD */
4721
#ifdef TARGET_X86_64
4722
        if (dflag == 2) {
4723
            gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
4724
            tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
4725
            gen_op_mov_reg_T0(OT_QUAD, R_EDX);
4726
        } else
4727
#endif
4728
        if (dflag == 1) {
4729
            gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4730
            tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4731
            tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
4732
            gen_op_mov_reg_T0(OT_LONG, R_EDX);
4733
        } else {
4734
            gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4735
            tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4736
            tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
4737
            gen_op_mov_reg_T0(OT_WORD, R_EDX);
4738
        }
4739
        break;
4740
    case 0x1af: /* imul Gv, Ev */
4741
    case 0x69: /* imul Gv, Ev, I */
4742
    case 0x6b:
4743
        ot = dflag + OT_WORD;
4744
        modrm = ldub_code(s->pc++);
4745
        reg = ((modrm >> 3) & 7) | rex_r;
4746
        if (b == 0x69)
4747
            s->rip_offset = insn_const_size(ot);
4748
        else if (b == 0x6b)
4749
            s->rip_offset = 1;
4750
        gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
4751
        if (b == 0x69) {
4752
            val = insn_get(s, ot);
4753
            gen_op_movl_T1_im(val);
4754
        } else if (b == 0x6b) {
4755
            val = (int8_t)insn_get(s, OT_BYTE);
4756
            gen_op_movl_T1_im(val);
4757
        } else {
4758
            gen_op_mov_TN_reg(ot, 1, reg);
4759
        }
4760

    
4761
#ifdef TARGET_X86_64
4762
        if (ot == OT_QUAD) {
4763
            gen_helper_imulq_T0_T1(cpu_T[0], cpu_T[0], cpu_T[1]);
4764
        } else
4765
#endif
4766
        if (ot == OT_LONG) {
4767
#ifdef TARGET_X86_64
4768
                tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4769
                tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4770
                tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4771
                tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4772
                tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4773
                tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4774
#else
4775
                {
4776
                    TCGv_i64 t0, t1;
4777
                    t0 = tcg_temp_new_i64();
4778
                    t1 = tcg_temp_new_i64();
4779
                    tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4780
                    tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4781
                    tcg_gen_mul_i64(t0, t0, t1);
4782
                    tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4783
                    tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4784
                    tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4785
                    tcg_gen_shri_i64(t0, t0, 32);
4786
                    tcg_gen_trunc_i64_i32(cpu_T[1], t0);
4787
                    tcg_gen_sub_tl(cpu_cc_src, cpu_T[1], cpu_tmp0);
4788
                }
4789
#endif
4790
        } else {
4791
            tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4792
            tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4793
            /* XXX: use 32 bit mul which could be faster */
4794
            tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4795
            tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4796
            tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4797
            tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4798
        }
4799
        gen_op_mov_reg_T0(ot, reg);
4800
        s->cc_op = CC_OP_MULB + ot;
4801
        break;
4802
    case 0x1c0:
4803
    case 0x1c1: /* xadd Ev, Gv */
4804
        if ((b & 1) == 0)
4805
            ot = OT_BYTE;
4806
        else
4807
            ot = dflag + OT_WORD;
4808
        modrm = ldub_code(s->pc++);
4809
        reg = ((modrm >> 3) & 7) | rex_r;
4810
        mod = (modrm >> 6) & 3;
4811
        if (mod == 3) {
4812
            rm = (modrm & 7) | REX_B(s);
4813
            gen_op_mov_TN_reg(ot, 0, reg);
4814
            gen_op_mov_TN_reg(ot, 1, rm);
4815
            gen_op_addl_T0_T1();
4816
            gen_op_mov_reg_T1(ot, reg);
4817
            gen_op_mov_reg_T0(ot, rm);
4818
        } else {
4819
            gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4820
            gen_op_mov_TN_reg(ot, 0, reg);
4821
            gen_op_ld_T1_A0(ot + s->mem_index);
4822
            gen_op_addl_T0_T1();
4823
            gen_op_st_T0_A0(ot + s->mem_index);
4824
            gen_op_mov_reg_T1(ot, reg);
4825
        }
4826
        gen_op_update2_cc();
4827
        s->cc_op = CC_OP_ADDB + ot;
4828
        break;
4829
    case 0x1b0:
4830
    case 0x1b1: /* cmpxchg Ev, Gv */
4831
        {
4832
            int label1, label2;
4833
            TCGv t0, t1, t2, a0;
4834

    
4835
            if ((b & 1) == 0)
4836
                ot = OT_BYTE;
4837
            else
4838
                ot = dflag + OT_WORD;
4839
            modrm = ldub_code(s->pc++);
4840
            reg = ((modrm >> 3) & 7) | rex_r;
4841
            mod = (modrm >> 6) & 3;
4842
            t0 = tcg_temp_local_new();
4843
            t1 = tcg_temp_local_new();
4844
            t2 = tcg_temp_local_new();
4845
            a0 = tcg_temp_local_new();
4846
            gen_op_mov_v_reg(ot, t1, reg);
4847
            if (mod == 3) {
4848
                rm = (modrm & 7) | REX_B(s);
4849
                gen_op_mov_v_reg(ot, t0, rm);
4850
            } else {
4851
                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4852
                tcg_gen_mov_tl(a0, cpu_A0);
4853
                gen_op_ld_v(ot + s->mem_index, t0, a0);
4854
                rm = 0; /* avoid warning */
4855
            }
4856
            label1 = gen_new_label();
4857
            tcg_gen_sub_tl(t2, cpu_regs[R_EAX], t0);
4858
            gen_extu(ot, t2);
4859
            tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
4860
            if (mod == 3) {
4861
                label2 = gen_new_label();
4862
                gen_op_mov_reg_v(ot, R_EAX, t0);
4863
                tcg_gen_br(label2);
4864
                gen_set_label(label1);
4865
                gen_op_mov_reg_v(ot, rm, t1);
4866
                gen_set_label(label2);
4867
            } else {
4868
                tcg_gen_mov_tl(t1, t0);
4869
                gen_op_mov_reg_v(ot, R_EAX, t0);
4870
                gen_set_label(label1);
4871
                /* always store */
4872
                gen_op_st_v(ot + s->mem_index, t1, a0);
4873
            }
4874
            tcg_gen_mov_tl(cpu_cc_src, t0);
4875
            tcg_gen_mov_tl(cpu_cc_dst, t2);
4876
            s->cc_op = CC_OP_SUBB + ot;
4877
            tcg_temp_free(t0);
4878
            tcg_temp_free(t1);
4879
            tcg_temp_free(t2);
4880
            tcg_temp_free(a0);
4881
        }
4882
        break;
4883
    case 0x1c7: /* cmpxchg8b */
4884
        modrm = ldub_code(s->pc++);
4885
        mod = (modrm >> 6) & 3;
4886
        if ((mod == 3) || ((modrm & 0x38) != 0x8))
4887
            goto illegal_op;
4888
#ifdef TARGET_X86_64
4889
        if (dflag == 2) {
4890
            if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
4891
                goto illegal_op;
4892
            gen_jmp_im(pc_start - s->cs_base);
4893
            if (s->cc_op != CC_OP_DYNAMIC)
4894
                gen_op_set_cc_op(s->cc_op);
4895
            gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4896
            gen_helper_cmpxchg16b(cpu_A0);
4897
        } else
4898
#endif        
4899
        {
4900
            if (!(s->cpuid_features & CPUID_CX8))
4901
                goto illegal_op;
4902
            gen_jmp_im(pc_start - s->cs_base);
4903
            if (s->cc_op != CC_OP_DYNAMIC)
4904
                gen_op_set_cc_op(s->cc_op);
4905
            gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4906
            gen_helper_cmpxchg8b(cpu_A0);
4907
        }
4908
        s->cc_op = CC_OP_EFLAGS;
4909
        break;
4910

    
4911
        /**************************/
4912
        /* push/pop */
4913
    case 0x50 ... 0x57: /* push */
4914
        gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s));
4915
        gen_push_T0(s);
4916
        break;
4917
    case 0x58 ... 0x5f: /* pop */
4918
        if (CODE64(s)) {
4919
            ot = dflag ? OT_QUAD : OT_WORD;
4920
        } else {
4921
            ot = dflag + OT_WORD;
4922
        }
4923
        gen_pop_T0(s);
4924
        /* NOTE: order is important for pop %sp */
4925
        gen_pop_update(s);
4926
        gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
4927
        break;
4928
    case 0x60: /* pusha */
4929
        if (CODE64(s))
4930
            goto illegal_op;
4931
        gen_pusha(s);
4932